From: John Crispin Date: Tue, 18 Mar 2014 19:21:56 +0000 (+0000) Subject: ralink: refresh patches X-Git-Url: http://git.openwrt.org/?p=openwrt%2Fsvn-archive%2Farchive.git;a=commitdiff_plain;h=30b84b0bd566b21a9f3e634c500bca0cd5094516 ralink: refresh patches Signed-off-by: John Crispin SVN-Revision: 39949 --- diff --git a/target/linux/ramips/dts/mt7621.dtsi b/target/linux/ramips/dts/mt7621.dtsi index 9fa9d0a5fa..83df878e9c 100644 --- a/target/linux/ramips/dts/mt7621.dtsi +++ b/target/linux/ramips/dts/mt7621.dtsi @@ -72,8 +72,8 @@ compatible = "ns16550a"; reg = <0xc00 0x100>; -/* interrupt-parent = <&gic>; - interrupts = <26>;*/ + interrupt-parent = <&gic>; + interrupts = <26>; reg-shift = <2>; reg-io-width = <4>; diff --git a/target/linux/ramips/files/drivers/usb/host/mtk-phy-7621.c b/target/linux/ramips/files/drivers/usb/host/mtk-phy-7621.c deleted file mode 100644 index 4e9c0d7a8d..0000000000 --- a/target/linux/ramips/files/drivers/usb/host/mtk-phy-7621.c +++ /dev/null @@ -1,445 +0,0 @@ -#include "mtk-phy.h" - -#ifdef CONFIG_PROJECT_7621 -#include "mtk-phy-7621.h" - -//not used on SoC -PHY_INT32 phy_init(struct u3phy_info *info){ - return PHY_TRUE; -} - -//not used on SoC -PHY_INT32 phy_change_pipe_phase(struct u3phy_info *info, PHY_INT32 phy_drv, PHY_INT32 pipe_phase){ - return PHY_TRUE; -} - -//-------------------------------------------------------- -// Function : fgEyeScanHelper_CheckPtInRegion() -// Description : Check if the test point is in a rectangle region. -// If it is in the rectangle, also check if this point -// is on the multiple of deltaX and deltaY. -// Parameter : strucScanRegion * prEye - the region -// BYTE bX -// BYTE bY -// Return : BYTE - TRUE : This point needs to be tested -// FALSE: This point will be omitted -// Note : First check within the rectangle. -// Secondly, use modulous to check if the point will be tested. -//-------------------------------------------------------- -static PHY_INT8 fgEyeScanHelper_CheckPtInRegion(struct strucScanRegion * prEye, PHY_INT8 bX, PHY_INT8 bY) -{ - PHY_INT8 fgValid = true; - - - /// Be careful, the axis origin is on the TOP-LEFT corner. - /// Therefore the top-left point has the minimum X and Y - /// Botton-right point is the maximum X and Y - if ( (prEye->bX_tl <= bX) && (bX <= prEye->bX_br) - && (prEye->bY_tl <= bY) && (bY <= prEye->bX_br)) - { - // With the region, now check whether or not the input test point is - // on the multiples of X and Y - // Do not have to worry about negative value, because we have already - // check the input bX, and bY is within the region. - if ( ((bX - prEye->bX_tl) % (prEye->bDeltaX)) - || ((bY - prEye->bY_tl) % (prEye->bDeltaY)) ) - { - // if the division will have remainder, that means - // the input test point is on the multiples of X and Y - fgValid = false; - } - else - { - } - } - else - { - - fgValid = false; - } - return fgValid; -} - -//-------------------------------------------------------- -// Function : EyeScanHelper_RunTest() -// Description : Enable the test, and wait til it is completed -// Parameter : None -// Return : None -// Note : None -//-------------------------------------------------------- -static void EyeScanHelper_RunTest(struct u3phy_info *info) -{ - DRV_UDELAY(100); - // Disable the test - U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs->eq_eye0) - , RG_SSUSB_EQ_EYE_CNT_EN_OFST, RG_SSUSB_EQ_EYE_CNT_EN, 0); //RG_SSUSB_RX_EYE_CNT_EN = 0 - DRV_UDELAY(100); - // Run the test - U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs->eq_eye0) - , RG_SSUSB_EQ_EYE_CNT_EN_OFST, RG_SSUSB_EQ_EYE_CNT_EN, 1); //RG_SSUSB_RX_EYE_CNT_EN = 1 - DRV_UDELAY(100); - // Wait til it's done - //RGS_SSUSB_RX_EYE_CNT_RDY - while(!U3PhyReadField32(((PHY_UINT32)&info->u3phyd_regs->phya_rx_mon5) - , RGS_SSUSB_EQ_EYE_CNT_RDY_OFST, RGS_SSUSB_EQ_EYE_CNT_RDY)); -} - -//-------------------------------------------------------- -// Function : fgEyeScanHelper_CalNextPoint() -// Description : Calcualte the test point for the measurement -// Parameter : None -// Return : BOOL - TRUE : the next point is within the -// boundaryof HW limit -// FALSE: the next point is out of the HW limit -// Note : The next point is obtained by calculating -// from the bottom left of the region rectangle -// and then scanning up until it reaches the upper -// limit. At this time, the x will increment, and -// start scanning downwards until the y hits the -// zero. -//-------------------------------------------------------- -static PHY_INT8 fgEyeScanHelper_CalNextPoint(void) -{ - if ( ((_bYcurr == MAX_Y) && (_eScanDir == SCAN_DN)) - || ((_bYcurr == MIN_Y) && (_eScanDir == SCAN_UP)) - ) - { - /// Reaches the limit of Y axis - /// Increment X - _bXcurr++; - _fgXChged = true; - _eScanDir = (_eScanDir == SCAN_UP) ? SCAN_DN : SCAN_UP; - - if (_bXcurr > MAX_X) - { - return false; - } - } - else - { - _bYcurr = (_eScanDir == SCAN_DN) ? _bYcurr + 1 : _bYcurr - 1; - _fgXChged = false; - } - return PHY_TRUE; -} - -PHY_INT32 eyescan_init(struct u3phy_info *info){ - //initial PHY setting - U3PhyWriteField32(((PHY_UINT32)&info->u3phya_regs->rega) - , RG_SSUSB_CDR_EPEN_OFST, RG_SSUSB_CDR_EPEN, 1); - U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs->phyd_mix3) - , RG_SSUSB_FORCE_CDR_PI_PWD_OFST, RG_SSUSB_FORCE_CDR_PI_PWD, 1); - U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_bank2_regs->b2_phyd_misc0) - , RG_SSUSB_RX_PI_CAL_EN_SEL_OFST, RG_SSUSB_RX_PI_CAL_EN_SEL, 1); //RG_SSUSB_RX_PI_CAL_MANUAL_SEL = 1 - U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_bank2_regs->b2_phyd_misc0) - , RG_SSUSB_RX_PI_CAL_EN_OFST, RG_SSUSB_RX_PI_CAL_EN, 1); //RG_SSUSB_RX_PI_CAL_MANUAL_EN = 1 - return PHY_TRUE; -} - -PHY_INT32 phy_eyescan(struct u3phy_info *info, PHY_INT32 x_t1, PHY_INT32 y_t1, PHY_INT32 x_br, PHY_INT32 y_br, PHY_INT32 delta_x, PHY_INT32 delta_y - , PHY_INT32 eye_cnt, PHY_INT32 num_cnt, PHY_INT32 PI_cal_en, PHY_INT32 num_ignore_cnt){ - PHY_INT32 cOfst = 0; - PHY_UINT8 bIdxX = 0; - PHY_UINT8 bIdxY = 0; - //PHY_INT8 bCnt = 0; - PHY_UINT8 bIdxCycCnt = 0; - PHY_INT8 fgValid; - PHY_INT8 cX; - PHY_INT8 cY; - PHY_UINT8 bExtendCnt; - PHY_INT8 isContinue; - //PHY_INT8 isBreak; - PHY_UINT32 wErr0 = 0, wErr1 = 0; - //PHY_UINT32 temp; - - PHY_UINT32 pwErrCnt0[CYCLE_COUNT_MAX][ERRCNT_MAX][ERRCNT_MAX]; - PHY_UINT32 pwErrCnt1[CYCLE_COUNT_MAX][ERRCNT_MAX][ERRCNT_MAX]; - - _rEye1.bX_tl = x_t1; - _rEye1.bY_tl = y_t1; - _rEye1.bX_br = x_br; - _rEye1.bY_br = y_br; - _rEye1.bDeltaX = delta_x; - _rEye1.bDeltaY = delta_y; - - _rEye2.bX_tl = x_t1; - _rEye2.bY_tl = y_t1; - _rEye2.bX_br = x_br; - _rEye2.bY_br = y_br; - _rEye2.bDeltaX = delta_x; - _rEye2.bDeltaY = delta_y; - - _rTestCycle.wEyeCnt = eye_cnt; - _rTestCycle.bNumOfEyeCnt = num_cnt; - _rTestCycle.bNumOfIgnoreCnt = num_ignore_cnt; - _rTestCycle.bPICalEn = PI_cal_en; - - _bXcurr = 0; - _bYcurr = 0; - _eScanDir = SCAN_DN; - _fgXChged = false; - - printk("x_t1: %x, y_t1: %x, x_br: %x, y_br: %x, delta_x: %x, delta_y: %x, \ - eye_cnt: %x, num_cnt: %x, PI_cal_en: %x, num_ignore_cnt: %x\n", \ - x_t1, y_t1, x_br, y_br, delta_x, delta_y, eye_cnt, num_cnt, PI_cal_en, num_ignore_cnt); - - //force SIGDET to OFF - U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_bank2_regs->b2_phyd_misc0) - , RG_SSUSB_RX_SIGDET_EN_SEL_OFST, RG_SSUSB_RX_SIGDET_EN_SEL, 1); //RG_SSUSB_RX_SIGDET_SEL = 1 - U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_bank2_regs->b2_phyd_misc0) - , RG_SSUSB_RX_SIGDET_EN_OFST, RG_SSUSB_RX_SIGDET_EN, 0); //RG_SSUSB_RX_SIGDET_EN = 0 - U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs->eq_eye1) - , RG_SSUSB_EQ_SIGDET_OFST, RG_SSUSB_EQ_SIGDET, 0); //RG_SSUSB_RX_SIGDET = 0 - - // RX_TRI_DET_EN to Disable - U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs->eq3) - , RG_SSUSB_EQ_TRI_DET_EN_OFST, RG_SSUSB_EQ_TRI_DET_EN, 0); //RG_SSUSB_RX_TRI_DET_EN = 0 - - U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs->eq_eye0) - , RG_SSUSB_EQ_EYE_MON_EN_OFST, RG_SSUSB_EQ_EYE_MON_EN, 1); //RG_SSUSB_EYE_MON_EN = 1 - U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs->eq_eye0) - , RG_SSUSB_EQ_EYE_XOFFSET_OFST, RG_SSUSB_EQ_EYE_XOFFSET, 0); //RG_SSUSB_RX_EYE_XOFFSET = 0 - U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs->eq_eye0) - , RG_SSUSB_EQ_EYE0_Y_OFST, RG_SSUSB_EQ_EYE0_Y, 0); //RG_SSUSB_RX_EYE0_Y = 0 - U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs->eq_eye0) - , RG_SSUSB_EQ_EYE1_Y_OFST, RG_SSUSB_EQ_EYE1_Y, 0); //RG_SSUSB_RX_EYE1_Y = 0 - - - if (PI_cal_en){ - // PI Calibration - U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_bank2_regs->b2_phyd_misc0) - , RG_SSUSB_RX_PI_CAL_EN_SEL_OFST, RG_SSUSB_RX_PI_CAL_EN_SEL, 1); //RG_SSUSB_RX_PI_CAL_MANUAL_SEL = 1 - U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_bank2_regs->b2_phyd_misc0) - , RG_SSUSB_RX_PI_CAL_EN_OFST, RG_SSUSB_RX_PI_CAL_EN, 0); //RG_SSUSB_RX_PI_CAL_MANUAL_EN = 0 - U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_bank2_regs->b2_phyd_misc0) - , RG_SSUSB_RX_PI_CAL_EN_OFST, RG_SSUSB_RX_PI_CAL_EN, 1); //RG_SSUSB_RX_PI_CAL_MANUAL_EN = 1 - - DRV_UDELAY(20); - - U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_bank2_regs->b2_phyd_misc0) - , RG_SSUSB_RX_PI_CAL_EN_OFST, RG_SSUSB_RX_PI_CAL_EN, 0); //RG_SSUSB_RX_PI_CAL_MANUAL_EN = 0 - _bPIResult = U3PhyReadField32(((PHY_UINT32)&info->u3phyd_regs->phya_rx_mon5) - , RGS_SSUSB_EQ_PILPO_OFST, RGS_SSUSB_EQ_PILPO); //read RGS_SSUSB_RX_PILPO - - printk(KERN_ERR "PI result: %d\n", _bPIResult); - } - // Read Initial DAC - // Set CYCLE - U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs->eq_eye3) - ,RG_SSUSB_EQ_EYE_CNT_OFST, RG_SSUSB_EQ_EYE_CNT, eye_cnt); //RG_SSUSB_RX_EYE_CNT - - // Eye Monitor Feature - U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs->eq_eye1) - , RG_SSUSB_EQ_EYE_MASK_OFST, RG_SSUSB_EQ_EYE_MASK, 0x3ff); //RG_SSUSB_RX_EYE_MASK = 0x3ff - U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs->eq_eye0) - , RG_SSUSB_EQ_EYE_MON_EN_OFST, RG_SSUSB_EQ_EYE_MON_EN, 1); //RG_SSUSB_EYE_MON_EN = 1 - - // Move X,Y to the top-left corner - for (cOfst = 0; cOfst >= -64; cOfst--) - { - U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs->eq_eye0) - ,RG_SSUSB_EQ_EYE_XOFFSET_OFST, RG_SSUSB_EQ_EYE_XOFFSET, cOfst); //RG_SSUSB_RX_EYE_XOFFSET - } - for (cOfst = 0; cOfst < 64; cOfst++) - { - U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs->eq_eye0) - , RG_SSUSB_EQ_EYE0_Y_OFST, RG_SSUSB_EQ_EYE0_Y, cOfst); //RG_SSUSB_RX_EYE0_Y - U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs->eq_eye0) - , RG_SSUSB_EQ_EYE1_Y_OFST, RG_SSUSB_EQ_EYE1_Y, cOfst); //RG_SSUSB_RX_EYE1_Y - } - //ClearErrorResult - for(bIdxCycCnt = 0; bIdxCycCnt < CYCLE_COUNT_MAX; bIdxCycCnt++){ - for(bIdxX = 0; bIdxX < ERRCNT_MAX; bIdxX++) - { - for(bIdxY = 0; bIdxY < ERRCNT_MAX; bIdxY++){ - pwErrCnt0[bIdxCycCnt][bIdxX][bIdxY] = 0; - pwErrCnt1[bIdxCycCnt][bIdxX][bIdxY] = 0; - } - } - } - isContinue = true; - while(isContinue){ - //printk(KERN_ERR "_bXcurr: %d, _bYcurr: %d\n", _bXcurr, _bYcurr); - // The point is within the boundary, then let's check if it is within - // the testing region. - // The point is only test-able if one of the eye region - // includes this point. - fgValid = fgEyeScanHelper_CheckPtInRegion(&_rEye1, _bXcurr, _bYcurr) - || fgEyeScanHelper_CheckPtInRegion(&_rEye2, _bXcurr, _bYcurr); - // Translate bX and bY to 2's complement from where the origin was on the - // top left corner. - // 0x40 and 0x3F needs a bit of thinking!!!! >"< - cX = (_bXcurr ^ 0x40); - cY = (_bYcurr ^ 0x3F); - - // Set X if necessary - if (_fgXChged == true) - { - U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs->eq_eye0) - , RG_SSUSB_EQ_EYE_XOFFSET_OFST, RG_SSUSB_EQ_EYE_XOFFSET, cX); //RG_SSUSB_RX_EYE_XOFFSET - } - // Set Y - U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs->eq_eye0) - , RG_SSUSB_EQ_EYE0_Y_OFST, RG_SSUSB_EQ_EYE0_Y, cY); //RG_SSUSB_RX_EYE0_Y - U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs->eq_eye0) - , RG_SSUSB_EQ_EYE1_Y_OFST, RG_SSUSB_EQ_EYE1_Y, cY); //RG_SSUSB_RX_EYE1_Y - - /// Test this point! - if (fgValid){ - for (bExtendCnt = 0; bExtendCnt < num_ignore_cnt; bExtendCnt++) - { - //run test - EyeScanHelper_RunTest(info); - } - for (bExtendCnt = 0; bExtendCnt < num_cnt; bExtendCnt++) - { - EyeScanHelper_RunTest(info); - wErr0 = U3PhyReadField32(((PHY_UINT32)&info->u3phyd_regs->phya_rx_mon3) - , RGS_SSUSB_EQ_EYE_MONITOR_ERRCNT_0_OFST, RGS_SSUSB_EQ_EYE_MONITOR_ERRCNT_0); - wErr1 = U3PhyReadField32(((PHY_UINT32)&info->u3phyd_regs->phya_rx_mon4) - , RGS_SSUSB_EQ_EYE_MONITOR_ERRCNT_1_OFST, RGS_SSUSB_EQ_EYE_MONITOR_ERRCNT_1); - - pwErrCnt0[bExtendCnt][_bXcurr][_bYcurr] = wErr0; - pwErrCnt1[bExtendCnt][_bXcurr][_bYcurr] = wErr1; - - //EyeScanHelper_GetResult(&_rRes.pwErrCnt0[bCnt], &_rRes.pwErrCnt1[bCnt]); -// printk(KERN_ERR "cnt[%d] cur_x,y [0x%x][0x%x], cX,cY [0x%x][0x%x], ErrCnt[%d][%d]\n" -// , bExtendCnt, _bXcurr, _bYcurr, cX, cY, pwErrCnt0[bExtendCnt][_bXcurr][_bYcurr], pwErrCnt1[bExtendCnt][_bXcurr][_bYcurr]); - } - //printk(KERN_ERR "cur_x,y [0x%x][0x%x], cX,cY [0x%x][0x%x], ErrCnt[%d][%d]\n", _bXcurr, _bYcurr, cX, cY, pwErrCnt0[0][_bXcurr][_bYcurr], pwErrCnt1[0][_bXcurr][_bYcurr]); - } - else{ - - } - if (fgEyeScanHelper_CalNextPoint() == false){ -#if 0 - printk(KERN_ERR "Xcurr [0x%x] Ycurr [0x%x]\n", _bXcurr, _bYcurr); - printk(KERN_ERR "XcurrREG [0x%x] YcurrREG [0x%x]\n", cX, cY); -#endif - printk(KERN_ERR "end of eye scan\n"); - isContinue = false; - } - } - printk(KERN_ERR "CurX [0x%x] CurY [0x%x]\n" - , U3PhyReadField32(((PHY_UINT32)&info->u3phyd_regs->eq_eye0), RG_SSUSB_EQ_EYE_XOFFSET_OFST, RG_SSUSB_EQ_EYE_XOFFSET) - , U3PhyReadField32(((PHY_UINT32)&info->u3phyd_regs->eq_eye0), RG_SSUSB_EQ_EYE0_Y_OFST, RG_SSUSB_EQ_EYE0_Y)); - - // Move X,Y to the top-left corner - for (cOfst = 63; cOfst >= 0; cOfst--) - { - U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs->eq_eye0) - , RG_SSUSB_EQ_EYE_XOFFSET_OFST, RG_SSUSB_EQ_EYE_XOFFSET, cOfst); //RG_SSUSB_RX_EYE_XOFFSET - } - for (cOfst = 63; cOfst >= 0; cOfst--) - { - U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs->eq_eye0) - , RG_SSUSB_EQ_EYE0_Y_OFST, RG_SSUSB_EQ_EYE0_Y, cOfst); - U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs->eq_eye0) - , RG_SSUSB_EQ_EYE1_Y_OFST, RG_SSUSB_EQ_EYE1_Y, cOfst); - - } - printk(KERN_ERR "CurX [0x%x] CurY [0x%x]\n" - , U3PhyReadField32(((PHY_UINT32)&info->u3phyd_regs->eq_eye0), RG_SSUSB_EQ_EYE_XOFFSET_OFST, RG_SSUSB_EQ_EYE_XOFFSET) - , U3PhyReadField32(((PHY_UINT32)&info->u3phyd_regs->eq_eye0), RG_SSUSB_EQ_EYE0_Y_OFST, RG_SSUSB_EQ_EYE0_Y)); - - printk(KERN_ERR "PI result: %d\n", _bPIResult); - printk(KERN_ERR "pwErrCnt0 addr: 0x%x\n", (PHY_UINT32)pwErrCnt0); - printk(KERN_ERR "pwErrCnt1 addr: 0x%x\n", (PHY_UINT32)pwErrCnt1); - - return PHY_TRUE; -} - -//not used on SoC -PHY_INT32 u2_save_cur_en(struct u3phy_info *info){ - return PHY_TRUE; -} - -//not used on SoC -PHY_INT32 u2_save_cur_re(struct u3phy_info *info){ - return PHY_TRUE; -} - -PHY_INT32 u2_slew_rate_calibration(struct u3phy_info *info){ - PHY_INT32 i=0; - //PHY_INT32 j=0; - //PHY_INT8 u1SrCalVal = 0; - //PHY_INT8 u1Reg_addr_HSTX_SRCAL_EN; - PHY_INT32 fgRet = 0; - PHY_INT32 u4FmOut = 0; - PHY_INT32 u4Tmp = 0; - //PHY_INT32 temp; - - // => RG_USB20_HSTX_SRCAL_EN = 1 - // enable HS TX SR calibration - U3PhyWriteField32(((PHY_UINT32)&info->u2phy_regs->u2phyacr0) - , RG_USB20_HSTX_SRCAL_EN_OFST, RG_USB20_HSTX_SRCAL_EN, 0x1); - DRV_MSLEEP(1); - - // => RG_FRCK_EN = 1 - // Enable free run clock - U3PhyWriteField32(((PHY_UINT32)&info->sifslv_fm_regs->fmmonr1) - , RG_FRCK_EN_OFST, RG_FRCK_EN, 1); - - // MT6290 HS signal quality patch - // => RG_CYCLECNT = 400 - // Setting cyclecnt =400 - U3PhyWriteField32(((PHY_UINT32)&info->sifslv_fm_regs->fmcr0) - , RG_CYCLECNT_OFST, RG_CYCLECNT, 0x400); - - // => RG_FREQDET_EN = 1 - // Enable frequency meter - U3PhyWriteField32(((PHY_UINT32)&info->sifslv_fm_regs->fmcr0) - , RG_FREQDET_EN_OFST, RG_FREQDET_EN, 0x1); - - // wait for FM detection done, set 10ms timeout - for(i=0; i<10; i++){ - // => u4FmOut = USB_FM_OUT - // read FM_OUT - u4FmOut = U3PhyReadReg32(((PHY_UINT32)&info->sifslv_fm_regs->fmmonr0)); - printk("FM_OUT value: u4FmOut = %d(0x%08X)\n", u4FmOut, u4FmOut); - - // check if FM detection done - if (u4FmOut != 0) - { - fgRet = 0; - printk("FM detection done! loop = %d\n", i); - - break; - } - - fgRet = 1; - DRV_MSLEEP(1); - } - // => RG_FREQDET_EN = 0 - // disable frequency meter - U3PhyWriteField32(((PHY_UINT32)&info->sifslv_fm_regs->fmcr0) - , RG_FREQDET_EN_OFST, RG_FREQDET_EN, 0); - - // => RG_FRCK_EN = 0 - // disable free run clock - U3PhyWriteField32(((PHY_UINT32)&info->sifslv_fm_regs->fmmonr1) - , RG_FRCK_EN_OFST, RG_FRCK_EN, 0); - - // => RG_USB20_HSTX_SRCAL_EN = 0 - // disable HS TX SR calibration - U3PhyWriteField32(((PHY_UINT32)&info->u2phy_regs->u2phyacr0) - , RG_USB20_HSTX_SRCAL_EN_OFST, RG_USB20_HSTX_SRCAL_EN, 0); - DRV_MSLEEP(1); - - if(u4FmOut == 0){ - U3PhyWriteField32(((PHY_UINT32)&info->u2phy_regs->u2phyacr0) - , RG_USB20_HSTX_SRCTRL_OFST, RG_USB20_HSTX_SRCTRL, 0x4); - - fgRet = 1; - } - else{ - // set reg = (1024/FM_OUT) * 25 * 0.028 (round to the nearest digits) - u4Tmp = (((1024 * 25 * U2_SR_COEF_7621) / u4FmOut) + 500) / 1000; - printk("SR calibration value u1SrCalVal = %d\n", (PHY_UINT8)u4Tmp); - U3PhyWriteField32(((PHY_UINT32)&info->u2phy_regs->u2phyacr0) - , RG_USB20_HSTX_SRCTRL_OFST, RG_USB20_HSTX_SRCTRL, u4Tmp); - } - return fgRet; -} - -#endif diff --git a/target/linux/ramips/files/drivers/usb/host/mtk-phy-7621.h b/target/linux/ramips/files/drivers/usb/host/mtk-phy-7621.h deleted file mode 100644 index 41b0c7744b..0000000000 --- a/target/linux/ramips/files/drivers/usb/host/mtk-phy-7621.h +++ /dev/null @@ -1,2871 +0,0 @@ -#ifdef CONFIG_PROJECT_7621 -#ifndef __MTK_PHY_7621_H -#define __MTK_PHY_7621_H - -#define U2_SR_COEF_7621 28 - -/////////////////////////////////////////////////////////////////////////////// - -struct u2phy_reg { - //0x0 - PHY_LE32 u2phyac0; - PHY_LE32 u2phyac1; - PHY_LE32 u2phyac2; - PHY_LE32 reserve0; - //0x10 - PHY_LE32 u2phyacr0; - PHY_LE32 u2phyacr1; - PHY_LE32 u2phyacr2; - PHY_LE32 u2phyacr3; - //0x20 - PHY_LE32 u2phyacr4; - PHY_LE32 u2phyamon0; - PHY_LE32 reserve1[2]; - //0x30~0x50 - PHY_LE32 reserve2[12]; - //0x60 - PHY_LE32 u2phydcr0; - PHY_LE32 u2phydcr1; - PHY_LE32 u2phydtm0; - PHY_LE32 u2phydtm1; - //0x70 - PHY_LE32 u2phydmon0; - PHY_LE32 u2phydmon1; - PHY_LE32 u2phydmon2; - PHY_LE32 u2phydmon3; - //0x80 - PHY_LE32 u2phybc12c; - PHY_LE32 u2phybc12c1; - PHY_LE32 reserve3[2]; - //0x90~0xe0 - PHY_LE32 reserve4[24]; - //0xf0 - PHY_LE32 reserve6[3]; - PHY_LE32 regfcom; -}; - -//U3D_U2PHYAC0 -#define RG_USB20_USBPLL_DIVEN (0x7<<28) //30:28 -#define RG_USB20_USBPLL_CKCTRL (0x3<<26) //27:26 -#define RG_USB20_USBPLL_PREDIV (0x3<<24) //25:24 -#define RG_USB20_USBPLL_FORCE_ON (0x1<<23) //23:23 -#define RG_USB20_USBPLL_FBDIV (0x7f<<16) //22:16 -#define RG_USB20_REF_EN (0x1<<15) //15:15 -#define RG_USB20_INTR_EN (0x1<<14) //14:14 -#define RG_USB20_BG_TRIM (0xf<<8) //11:8 -#define RG_USB20_BG_RBSEL (0x3<<6) //7:6 -#define RG_USB20_BG_RASEL (0x3<<4) //5:4 -#define RG_USB20_BGR_DIV (0x3<<2) //3:2 -#define RG_SIFSLV_CHP_EN (0x1<<1) //1:1 -#define RG_SIFSLV_BGR_EN (0x1<<0) //0:0 - -//U3D_U2PHYAC1 -#define RG_USB20_VRT_VREF_SEL (0x7<<28) //30:28 -#define RG_USB20_TERM_VREF_SEL (0x7<<24) //26:24 -#define RG_USB20_MPX_SEL (0xff<<16) //23:16 -#define RG_USB20_MPX_OUT_SEL (0x3<<12) //13:12 -#define RG_USB20_TX_PH_ROT_SEL (0x7<<8) //10:8 -#define RG_USB20_USBPLL_ACCEN (0x1<<3) //3:3 -#define RG_USB20_USBPLL_LF (0x1<<2) //2:2 -#define RG_USB20_USBPLL_BR (0x1<<1) //1:1 -#define RG_USB20_USBPLL_BP (0x1<<0) //0:0 - -//U3D_U2PHYAC2 -#define RG_SIFSLV_MAC_BANDGAP_EN (0x1<<17) //17:17 -#define RG_SIFSLV_MAC_CHOPPER_EN (0x1<<16) //16:16 -#define RG_USB20_CLKREF_REV (0xff<<0) //7:0 - -//U3D_U2PHYACR0 -#define RG_USB20_ICUSB_EN (0x1<<24) //24:24 -#define RG_USB20_HSTX_SRCAL_EN (0x1<<23) //23:23 -#define RG_USB20_HSTX_SRCTRL (0x7<<16) //18:16 -#define RG_USB20_LS_CR (0x7<<12) //14:12 -#define RG_USB20_FS_CR (0x7<<8) //10:8 -#define RG_USB20_LS_SR (0x7<<4) //6:4 -#define RG_USB20_FS_SR (0x7<<0) //2:0 - -//U3D_U2PHYACR1 -#define RG_USB20_INIT_SQ_EN_DG (0x3<<28) //29:28 -#define RG_USB20_SQD (0x3<<24) //25:24 -#define RG_USB20_HSTX_TMODE_SEL (0x3<<20) //21:20 -#define RG_USB20_HSTX_TMODE_EN (0x1<<19) //19:19 -#define RG_USB20_PHYD_MONEN (0x1<<18) //18:18 -#define RG_USB20_INLPBK_EN (0x1<<17) //17:17 -#define RG_USB20_CHIRP_EN (0x1<<16) //16:16 -#define RG_USB20_DM_ABIST_SOURCE_EN (0x1<<15) //15:15 -#define RG_USB20_DM_ABIST_SELE (0xf<<8) //11:8 -#define RG_USB20_DP_ABIST_SOURCE_EN (0x1<<7) //7:7 -#define RG_USB20_DP_ABIST_SELE (0xf<<0) //3:0 - -//U3D_U2PHYACR2 -#define RG_USB20_OTG_ABIST_SELE (0x7<<29) //31:29 -#define RG_USB20_OTG_ABIST_EN (0x1<<28) //28:28 -#define RG_USB20_OTG_VBUSCMP_EN (0x1<<27) //27:27 -#define RG_USB20_OTG_VBUSTH (0x7<<24) //26:24 -#define RG_USB20_DISC_FIT_EN (0x1<<22) //22:22 -#define RG_USB20_DISCD (0x3<<20) //21:20 -#define RG_USB20_DISCTH (0xf<<16) //19:16 -#define RG_USB20_SQCAL_EN (0x1<<15) //15:15 -#define RG_USB20_SQCAL (0xf<<8) //11:8 -#define RG_USB20_SQTH (0xf<<0) //3:0 - -//U3D_U2PHYACR3 -#define RG_USB20_HSTX_DBIST (0xf<<28) //31:28 -#define RG_USB20_HSTX_BIST_EN (0x1<<26) //26:26 -#define RG_USB20_HSTX_I_EN_MODE (0x3<<24) //25:24 -#define RG_USB20_HSRX_TMODE_EN (0x1<<23) //23:23 -#define RG_USB20_HSRX_BIAS_EN_SEL (0x3<<20) //21:20 -#define RG_USB20_USB11_TMODE_EN (0x1<<19) //19:19 -#define RG_USB20_TMODE_FS_LS_TX_EN (0x1<<18) //18:18 -#define RG_USB20_TMODE_FS_LS_RCV_EN (0x1<<17) //17:17 -#define RG_USB20_TMODE_FS_LS_MODE (0x1<<16) //16:16 -#define RG_USB20_HS_TERM_EN_MODE (0x3<<13) //14:13 -#define RG_USB20_PUPD_BIST_EN (0x1<<12) //12:12 -#define RG_USB20_EN_PU_DM (0x1<<11) //11:11 -#define RG_USB20_EN_PD_DM (0x1<<10) //10:10 -#define RG_USB20_EN_PU_DP (0x1<<9) //9:9 -#define RG_USB20_EN_PD_DP (0x1<<8) //8:8 -#define RG_USB20_PHY_REV (0xff<<0) //7:0 - -//U3D_U2PHYACR4 -#define RG_USB20_DP_100K_MODE (0x1<<18) //18:18 -#define RG_USB20_DM_100K_EN (0x1<<17) //17:17 -#define USB20_DP_100K_EN (0x1<<16) //16:16 -#define USB20_GPIO_DM_I (0x1<<15) //15:15 -#define USB20_GPIO_DP_I (0x1<<14) //14:14 -#define USB20_GPIO_DM_OE (0x1<<13) //13:13 -#define USB20_GPIO_DP_OE (0x1<<12) //12:12 -#define RG_USB20_GPIO_CTL (0x1<<9) //9:9 -#define USB20_GPIO_MODE (0x1<<8) //8:8 -#define RG_USB20_TX_BIAS_EN (0x1<<5) //5:5 -#define RG_USB20_TX_VCMPDN_EN (0x1<<4) //4:4 -#define RG_USB20_HS_SQ_EN_MODE (0x3<<2) //3:2 -#define RG_USB20_HS_RCV_EN_MODE (0x3<<0) //1:0 - -//U3D_U2PHYAMON0 -#define RGO_USB20_GPIO_DM_O (0x1<<1) //1:1 -#define RGO_USB20_GPIO_DP_O (0x1<<0) //0:0 - -//U3D_U2PHYDCR0 -#define RG_USB20_CDR_TST (0x3<<30) //31:30 -#define RG_USB20_GATED_ENB (0x1<<29) //29:29 -#define RG_USB20_TESTMODE (0x3<<26) //27:26 -#define RG_USB20_PLL_STABLE (0x1<<25) //25:25 -#define RG_USB20_PLL_FORCE_ON (0x1<<24) //24:24 -#define RG_USB20_PHYD_RESERVE (0xffff<<8) //23:8 -#define RG_USB20_EBTHRLD (0x1<<7) //7:7 -#define RG_USB20_EARLY_HSTX_I (0x1<<6) //6:6 -#define RG_USB20_TX_TST (0x1<<5) //5:5 -#define RG_USB20_NEGEDGE_ENB (0x1<<4) //4:4 -#define RG_USB20_CDR_FILT (0xf<<0) //3:0 - -//U3D_U2PHYDCR1 -#define RG_USB20_PROBE_SEL (0xff<<24) //31:24 -#define RG_USB20_DRVVBUS (0x1<<23) //23:23 -#define RG_DEBUG_EN (0x1<<22) //22:22 -#define RG_USB20_OTG_PROBE (0x3<<20) //21:20 -#define RG_USB20_SW_PLLMODE (0x3<<18) //19:18 -#define RG_USB20_BERTH (0x3<<16) //17:16 -#define RG_USB20_LBMODE (0x3<<13) //14:13 -#define RG_USB20_FORCE_TAP (0x1<<12) //12:12 -#define RG_USB20_TAPSEL (0xfff<<0) //11:0 - -//U3D_U2PHYDTM0 -#define RG_UART_MODE (0x3<<30) //31:30 -#define FORCE_UART_I (0x1<<29) //29:29 -#define FORCE_UART_BIAS_EN (0x1<<28) //28:28 -#define FORCE_UART_TX_OE (0x1<<27) //27:27 -#define FORCE_UART_EN (0x1<<26) //26:26 -#define FORCE_USB_CLKEN (0x1<<25) //25:25 -#define FORCE_DRVVBUS (0x1<<24) //24:24 -#define FORCE_DATAIN (0x1<<23) //23:23 -#define FORCE_TXVALID (0x1<<22) //22:22 -#define FORCE_DM_PULLDOWN (0x1<<21) //21:21 -#define FORCE_DP_PULLDOWN (0x1<<20) //20:20 -#define FORCE_XCVRSEL (0x1<<19) //19:19 -#define FORCE_SUSPENDM (0x1<<18) //18:18 -#define FORCE_TERMSEL (0x1<<17) //17:17 -#define FORCE_OPMODE (0x1<<16) //16:16 -#define UTMI_MUXSEL (0x1<<15) //15:15 -#define RG_RESET (0x1<<14) //14:14 -#define RG_DATAIN (0xf<<10) //13:10 -#define RG_TXVALIDH (0x1<<9) //9:9 -#define RG_TXVALID (0x1<<8) //8:8 -#define RG_DMPULLDOWN (0x1<<7) //7:7 -#define RG_DPPULLDOWN (0x1<<6) //6:6 -#define RG_XCVRSEL (0x3<<4) //5:4 -#define RG_SUSPENDM (0x1<<3) //3:3 -#define RG_TERMSEL (0x1<<2) //2:2 -#define RG_OPMODE (0x3<<0) //1:0 - -//U3D_U2PHYDTM1 -#define RG_USB20_PRBS7_EN (0x1<<31) //31:31 -#define RG_USB20_PRBS7_BITCNT (0x3f<<24) //29:24 -#define RG_USB20_CLK48M_EN (0x1<<23) //23:23 -#define RG_USB20_CLK60M_EN (0x1<<22) //22:22 -#define RG_UART_I (0x1<<19) //19:19 -#define RG_UART_BIAS_EN (0x1<<18) //18:18 -#define RG_UART_TX_OE (0x1<<17) //17:17 -#define RG_UART_EN (0x1<<16) //16:16 -#define FORCE_VBUSVALID (0x1<<13) //13:13 -#define FORCE_SESSEND (0x1<<12) //12:12 -#define FORCE_BVALID (0x1<<11) //11:11 -#define FORCE_AVALID (0x1<<10) //10:10 -#define FORCE_IDDIG (0x1<<9) //9:9 -#define FORCE_IDPULLUP (0x1<<8) //8:8 -#define RG_VBUSVALID (0x1<<5) //5:5 -#define RG_SESSEND (0x1<<4) //4:4 -#define RG_BVALID (0x1<<3) //3:3 -#define RG_AVALID (0x1<<2) //2:2 -#define RG_IDDIG (0x1<<1) //1:1 -#define RG_IDPULLUP (0x1<<0) //0:0 - -//U3D_U2PHYDMON0 -#define RG_USB20_PRBS7_BERTH (0xff<<0) //7:0 - -//U3D_U2PHYDMON1 -#define USB20_UART_O (0x1<<31) //31:31 -#define RGO_USB20_LB_PASS (0x1<<30) //30:30 -#define RGO_USB20_LB_DONE (0x1<<29) //29:29 -#define AD_USB20_BVALID (0x1<<28) //28:28 -#define USB20_IDDIG (0x1<<27) //27:27 -#define AD_USB20_VBUSVALID (0x1<<26) //26:26 -#define AD_USB20_SESSEND (0x1<<25) //25:25 -#define AD_USB20_AVALID (0x1<<24) //24:24 -#define USB20_LINE_STATE (0x3<<22) //23:22 -#define USB20_HST_DISCON (0x1<<21) //21:21 -#define USB20_TX_READY (0x1<<20) //20:20 -#define USB20_RX_ERROR (0x1<<19) //19:19 -#define USB20_RX_ACTIVE (0x1<<18) //18:18 -#define USB20_RX_VALIDH (0x1<<17) //17:17 -#define USB20_RX_VALID (0x1<<16) //16:16 -#define USB20_DATA_OUT (0xffff<<0) //15:0 - -//U3D_U2PHYDMON2 -#define RGO_TXVALID_CNT (0xff<<24) //31:24 -#define RGO_RXACTIVE_CNT (0xff<<16) //23:16 -#define RGO_USB20_LB_BERCNT (0xff<<8) //15:8 -#define USB20_PROBE_OUT (0xff<<0) //7:0 - -//U3D_U2PHYDMON3 -#define RGO_USB20_PRBS7_ERRCNT (0xffff<<16) //31:16 -#define RGO_USB20_PRBS7_DONE (0x1<<3) //3:3 -#define RGO_USB20_PRBS7_LOCK (0x1<<2) //2:2 -#define RGO_USB20_PRBS7_PASS (0x1<<1) //1:1 -#define RGO_USB20_PRBS7_PASSTH (0x1<<0) //0:0 - -//U3D_U2PHYBC12C -#define RG_SIFSLV_CHGDT_DEGLCH_CNT (0xf<<28) //31:28 -#define RG_SIFSLV_CHGDT_CTRL_CNT (0xf<<24) //27:24 -#define RG_SIFSLV_CHGDT_FORCE_MODE (0x1<<16) //16:16 -#define RG_CHGDT_ISRC_LEV (0x3<<14) //15:14 -#define RG_CHGDT_VDATSRC (0x1<<13) //13:13 -#define RG_CHGDT_BGVREF_SEL (0x7<<10) //12:10 -#define RG_CHGDT_RDVREF_SEL (0x3<<8) //9:8 -#define RG_CHGDT_ISRC_DP (0x1<<7) //7:7 -#define RG_SIFSLV_CHGDT_OPOUT_DM (0x1<<6) //6:6 -#define RG_CHGDT_VDAT_DM (0x1<<5) //5:5 -#define RG_CHGDT_OPOUT_DP (0x1<<4) //4:4 -#define RG_SIFSLV_CHGDT_VDAT_DP (0x1<<3) //3:3 -#define RG_SIFSLV_CHGDT_COMP_EN (0x1<<2) //2:2 -#define RG_SIFSLV_CHGDT_OPDRV_EN (0x1<<1) //1:1 -#define RG_CHGDT_EN (0x1<<0) //0:0 - -//U3D_U2PHYBC12C1 -#define RG_CHGDT_REV (0xff<<0) //7:0 - -//U3D_REGFCOM -#define RG_PAGE (0xff<<24) //31:24 -#define I2C_MODE (0x1<<16) //16:16 - - -/* OFFSET */ - -//U3D_U2PHYAC0 -#define RG_USB20_USBPLL_DIVEN_OFST (28) -#define RG_USB20_USBPLL_CKCTRL_OFST (26) -#define RG_USB20_USBPLL_PREDIV_OFST (24) -#define RG_USB20_USBPLL_FORCE_ON_OFST (23) -#define RG_USB20_USBPLL_FBDIV_OFST (16) -#define RG_USB20_REF_EN_OFST (15) -#define RG_USB20_INTR_EN_OFST (14) -#define RG_USB20_BG_TRIM_OFST (8) -#define RG_USB20_BG_RBSEL_OFST (6) -#define RG_USB20_BG_RASEL_OFST (4) -#define RG_USB20_BGR_DIV_OFST (2) -#define RG_SIFSLV_CHP_EN_OFST (1) -#define RG_SIFSLV_BGR_EN_OFST (0) - -//U3D_U2PHYAC1 -#define RG_USB20_VRT_VREF_SEL_OFST (28) -#define RG_USB20_TERM_VREF_SEL_OFST (24) -#define RG_USB20_MPX_SEL_OFST (16) -#define RG_USB20_MPX_OUT_SEL_OFST (12) -#define RG_USB20_TX_PH_ROT_SEL_OFST (8) -#define RG_USB20_USBPLL_ACCEN_OFST (3) -#define RG_USB20_USBPLL_LF_OFST (2) -#define RG_USB20_USBPLL_BR_OFST (1) -#define RG_USB20_USBPLL_BP_OFST (0) - -//U3D_U2PHYAC2 -#define RG_SIFSLV_MAC_BANDGAP_EN_OFST (17) -#define RG_SIFSLV_MAC_CHOPPER_EN_OFST (16) -#define RG_USB20_CLKREF_REV_OFST (0) - -//U3D_U2PHYACR0 -#define RG_USB20_ICUSB_EN_OFST (24) -#define RG_USB20_HSTX_SRCAL_EN_OFST (23) -#define RG_USB20_HSTX_SRCTRL_OFST (16) -#define RG_USB20_LS_CR_OFST (12) -#define RG_USB20_FS_CR_OFST (8) -#define RG_USB20_LS_SR_OFST (4) -#define RG_USB20_FS_SR_OFST (0) - -//U3D_U2PHYACR1 -#define RG_USB20_INIT_SQ_EN_DG_OFST (28) -#define RG_USB20_SQD_OFST (24) -#define RG_USB20_HSTX_TMODE_SEL_OFST (20) -#define RG_USB20_HSTX_TMODE_EN_OFST (19) -#define RG_USB20_PHYD_MONEN_OFST (18) -#define RG_USB20_INLPBK_EN_OFST (17) -#define RG_USB20_CHIRP_EN_OFST (16) -#define RG_USB20_DM_ABIST_SOURCE_EN_OFST (15) -#define RG_USB20_DM_ABIST_SELE_OFST (8) -#define RG_USB20_DP_ABIST_SOURCE_EN_OFST (7) -#define RG_USB20_DP_ABIST_SELE_OFST (0) - -//U3D_U2PHYACR2 -#define RG_USB20_OTG_ABIST_SELE_OFST (29) -#define RG_USB20_OTG_ABIST_EN_OFST (28) -#define RG_USB20_OTG_VBUSCMP_EN_OFST (27) -#define RG_USB20_OTG_VBUSTH_OFST (24) -#define RG_USB20_DISC_FIT_EN_OFST (22) -#define RG_USB20_DISCD_OFST (20) -#define RG_USB20_DISCTH_OFST (16) -#define RG_USB20_SQCAL_EN_OFST (15) -#define RG_USB20_SQCAL_OFST (8) -#define RG_USB20_SQTH_OFST (0) - -//U3D_U2PHYACR3 -#define RG_USB20_HSTX_DBIST_OFST (28) -#define RG_USB20_HSTX_BIST_EN_OFST (26) -#define RG_USB20_HSTX_I_EN_MODE_OFST (24) -#define RG_USB20_HSRX_TMODE_EN_OFST (23) -#define RG_USB20_HSRX_BIAS_EN_SEL_OFST (20) -#define RG_USB20_USB11_TMODE_EN_OFST (19) -#define RG_USB20_TMODE_FS_LS_TX_EN_OFST (18) -#define RG_USB20_TMODE_FS_LS_RCV_EN_OFST (17) -#define RG_USB20_TMODE_FS_LS_MODE_OFST (16) -#define RG_USB20_HS_TERM_EN_MODE_OFST (13) -#define RG_USB20_PUPD_BIST_EN_OFST (12) -#define RG_USB20_EN_PU_DM_OFST (11) -#define RG_USB20_EN_PD_DM_OFST (10) -#define RG_USB20_EN_PU_DP_OFST (9) -#define RG_USB20_EN_PD_DP_OFST (8) -#define RG_USB20_PHY_REV_OFST (0) - -//U3D_U2PHYACR4 -#define RG_USB20_DP_100K_MODE_OFST (18) -#define RG_USB20_DM_100K_EN_OFST (17) -#define USB20_DP_100K_EN_OFST (16) -#define USB20_GPIO_DM_I_OFST (15) -#define USB20_GPIO_DP_I_OFST (14) -#define USB20_GPIO_DM_OE_OFST (13) -#define USB20_GPIO_DP_OE_OFST (12) -#define RG_USB20_GPIO_CTL_OFST (9) -#define USB20_GPIO_MODE_OFST (8) -#define RG_USB20_TX_BIAS_EN_OFST (5) -#define RG_USB20_TX_VCMPDN_EN_OFST (4) -#define RG_USB20_HS_SQ_EN_MODE_OFST (2) -#define RG_USB20_HS_RCV_EN_MODE_OFST (0) - -//U3D_U2PHYAMON0 -#define RGO_USB20_GPIO_DM_O_OFST (1) -#define RGO_USB20_GPIO_DP_O_OFST (0) - -//U3D_U2PHYDCR0 -#define RG_USB20_CDR_TST_OFST (30) -#define RG_USB20_GATED_ENB_OFST (29) -#define RG_USB20_TESTMODE_OFST (26) -#define RG_USB20_PLL_STABLE_OFST (25) -#define RG_USB20_PLL_FORCE_ON_OFST (24) -#define RG_USB20_PHYD_RESERVE_OFST (8) -#define RG_USB20_EBTHRLD_OFST (7) -#define RG_USB20_EARLY_HSTX_I_OFST (6) -#define RG_USB20_TX_TST_OFST (5) -#define RG_USB20_NEGEDGE_ENB_OFST (4) -#define RG_USB20_CDR_FILT_OFST (0) - -//U3D_U2PHYDCR1 -#define RG_USB20_PROBE_SEL_OFST (24) -#define RG_USB20_DRVVBUS_OFST (23) -#define RG_DEBUG_EN_OFST (22) -#define RG_USB20_OTG_PROBE_OFST (20) -#define RG_USB20_SW_PLLMODE_OFST (18) -#define RG_USB20_BERTH_OFST (16) -#define RG_USB20_LBMODE_OFST (13) -#define RG_USB20_FORCE_TAP_OFST (12) -#define RG_USB20_TAPSEL_OFST (0) - -//U3D_U2PHYDTM0 -#define RG_UART_MODE_OFST (30) -#define FORCE_UART_I_OFST (29) -#define FORCE_UART_BIAS_EN_OFST (28) -#define FORCE_UART_TX_OE_OFST (27) -#define FORCE_UART_EN_OFST (26) -#define FORCE_USB_CLKEN_OFST (25) -#define FORCE_DRVVBUS_OFST (24) -#define FORCE_DATAIN_OFST (23) -#define FORCE_TXVALID_OFST (22) -#define FORCE_DM_PULLDOWN_OFST (21) -#define FORCE_DP_PULLDOWN_OFST (20) -#define FORCE_XCVRSEL_OFST (19) -#define FORCE_SUSPENDM_OFST (18) -#define FORCE_TERMSEL_OFST (17) -#define FORCE_OPMODE_OFST (16) -#define UTMI_MUXSEL_OFST (15) -#define RG_RESET_OFST (14) -#define RG_DATAIN_OFST (10) -#define RG_TXVALIDH_OFST (9) -#define RG_TXVALID_OFST (8) -#define RG_DMPULLDOWN_OFST (7) -#define RG_DPPULLDOWN_OFST (6) -#define RG_XCVRSEL_OFST (4) -#define RG_SUSPENDM_OFST (3) -#define RG_TERMSEL_OFST (2) -#define RG_OPMODE_OFST (0) - -//U3D_U2PHYDTM1 -#define RG_USB20_PRBS7_EN_OFST (31) -#define RG_USB20_PRBS7_BITCNT_OFST (24) -#define RG_USB20_CLK48M_EN_OFST (23) -#define RG_USB20_CLK60M_EN_OFST (22) -#define RG_UART_I_OFST (19) -#define RG_UART_BIAS_EN_OFST (18) -#define RG_UART_TX_OE_OFST (17) -#define RG_UART_EN_OFST (16) -#define FORCE_VBUSVALID_OFST (13) -#define FORCE_SESSEND_OFST (12) -#define FORCE_BVALID_OFST (11) -#define FORCE_AVALID_OFST (10) -#define FORCE_IDDIG_OFST (9) -#define FORCE_IDPULLUP_OFST (8) -#define RG_VBUSVALID_OFST (5) -#define RG_SESSEND_OFST (4) -#define RG_BVALID_OFST (3) -#define RG_AVALID_OFST (2) -#define RG_IDDIG_OFST (1) -#define RG_IDPULLUP_OFST (0) - -//U3D_U2PHYDMON0 -#define RG_USB20_PRBS7_BERTH_OFST (0) - -//U3D_U2PHYDMON1 -#define USB20_UART_O_OFST (31) -#define RGO_USB20_LB_PASS_OFST (30) -#define RGO_USB20_LB_DONE_OFST (29) -#define AD_USB20_BVALID_OFST (28) -#define USB20_IDDIG_OFST (27) -#define AD_USB20_VBUSVALID_OFST (26) -#define AD_USB20_SESSEND_OFST (25) -#define AD_USB20_AVALID_OFST (24) -#define USB20_LINE_STATE_OFST (22) -#define USB20_HST_DISCON_OFST (21) -#define USB20_TX_READY_OFST (20) -#define USB20_RX_ERROR_OFST (19) -#define USB20_RX_ACTIVE_OFST (18) -#define USB20_RX_VALIDH_OFST (17) -#define USB20_RX_VALID_OFST (16) -#define USB20_DATA_OUT_OFST (0) - -//U3D_U2PHYDMON2 -#define RGO_TXVALID_CNT_OFST (24) -#define RGO_RXACTIVE_CNT_OFST (16) -#define RGO_USB20_LB_BERCNT_OFST (8) -#define USB20_PROBE_OUT_OFST (0) - -//U3D_U2PHYDMON3 -#define RGO_USB20_PRBS7_ERRCNT_OFST (16) -#define RGO_USB20_PRBS7_DONE_OFST (3) -#define RGO_USB20_PRBS7_LOCK_OFST (2) -#define RGO_USB20_PRBS7_PASS_OFST (1) -#define RGO_USB20_PRBS7_PASSTH_OFST (0) - -//U3D_U2PHYBC12C -#define RG_SIFSLV_CHGDT_DEGLCH_CNT_OFST (28) -#define RG_SIFSLV_CHGDT_CTRL_CNT_OFST (24) -#define RG_SIFSLV_CHGDT_FORCE_MODE_OFST (16) -#define RG_CHGDT_ISRC_LEV_OFST (14) -#define RG_CHGDT_VDATSRC_OFST (13) -#define RG_CHGDT_BGVREF_SEL_OFST (10) -#define RG_CHGDT_RDVREF_SEL_OFST (8) -#define RG_CHGDT_ISRC_DP_OFST (7) -#define RG_SIFSLV_CHGDT_OPOUT_DM_OFST (6) -#define RG_CHGDT_VDAT_DM_OFST (5) -#define RG_CHGDT_OPOUT_DP_OFST (4) -#define RG_SIFSLV_CHGDT_VDAT_DP_OFST (3) -#define RG_SIFSLV_CHGDT_COMP_EN_OFST (2) -#define RG_SIFSLV_CHGDT_OPDRV_EN_OFST (1) -#define RG_CHGDT_EN_OFST (0) - -//U3D_U2PHYBC12C1 -#define RG_CHGDT_REV_OFST (0) - -//U3D_REGFCOM -#define RG_PAGE_OFST (24) -#define I2C_MODE_OFST (16) - - -/////////////////////////////////////////////////////////////////////////////// - -struct u3phya_reg { - //0x0 - PHY_LE32 reg0; - PHY_LE32 reg1; - PHY_LE32 reg2; - PHY_LE32 reg3; - //0x10 - PHY_LE32 reg4; - PHY_LE32 reg5; - PHY_LE32 reg6; - PHY_LE32 reg7; - //0x20 - PHY_LE32 reg8; - PHY_LE32 reg9; - PHY_LE32 rega; - PHY_LE32 regb; - //0x30 - PHY_LE32 regc; - PHY_LE32 regd; - PHY_LE32 rege; -}; - -//U3D_reg0 -#define RG_SSUSB_BGR_EN (0x1<<31) //31:31 -#define RG_SSUSB_CHPEN (0x1<<30) //30:30 -#define RG_SSUSB_BG_DIV (0x3<<28) //29:28 -#define RG_SSUSB_INTR_EN (0x1<<26) //26:26 -#define RG_SSUSB_MPX_OUT_SEL (0x3<<24) //25:24 -#define RG_SSUSB_MPX_SEL (0xff<<16) //23:16 -#define RG_SSUSB_REF_EN (0x1<<15) //15:15 -#define RG_SSUSB_VRT_VREF_SEL (0xf<<11) //14:11 -#define RG_SSUSB_BG_RASEL (0x3<<9) //10:9 -#define RG_SSUSB_BG_RBSEL (0x3<<7) //8:7 -#define RG_SSUSB_BG_MONEN (0x1<<6) //6:6 -#define RG_PCIE_CLKDRV_OFFSET (0x3<<0) //1:0 - -//U3D_reg1 -#define RG_PCIE_CLKDRV_SLEW (0x3<<30) //31:30 -#define RG_PCIE_CLKDRV_AMP (0x7<<27) //29:27 -#define RG_SSUSB_XTAL_TST_A2DCK_EN (0x1<<26) //26:26 -#define RG_SSUSB_XTAL_MON_EN (0x1<<25) //25:25 -#define RG_SSUSB_XTAL_HYS (0x1<<24) //24:24 -#define RG_SSUSB_XTAL_TOP_RESERVE (0xffff<<8) //23:8 -#define RG_SSUSB_SYSPLL_RESERVE (0xf<<4) //7:4 -#define RG_SSUSB_SYSPLL_FBSEL (0x3<<2) //3:2 -#define RG_SSUSB_SYSPLL_PREDIV (0x3<<0) //1:0 - -//U3D_reg2 -#define RG_SSUSB_SYSPLL_LF (0x1<<31) //31:31 -#define RG_SSUSB_SYSPLL_FBDIV (0x7f<<24) //30:24 -#define RG_SSUSB_SYSPLL_POSDIV (0x3<<22) //23:22 -#define RG_SSUSB_SYSPLL_VCO_DIV_SEL (0x1<<21) //21:21 -#define RG_SSUSB_SYSPLL_BLP (0x1<<20) //20:20 -#define RG_SSUSB_SYSPLL_BP (0x1<<19) //19:19 -#define RG_SSUSB_SYSPLL_BR (0x1<<18) //18:18 -#define RG_SSUSB_SYSPLL_BC (0x1<<17) //17:17 -#define RG_SSUSB_SYSPLL_DIVEN (0x7<<14) //16:14 -#define RG_SSUSB_SYSPLL_FPEN (0x1<<13) //13:13 -#define RG_SSUSB_SYSPLL_MONCK_EN (0x1<<12) //12:12 -#define RG_SSUSB_SYSPLL_MONVC_EN (0x1<<11) //11:11 -#define RG_SSUSB_SYSPLL_MONREF_EN (0x1<<10) //10:10 -#define RG_SSUSB_SYSPLL_VOD_EN (0x1<<9) //9:9 -#define RG_SSUSB_SYSPLL_CK_SEL (0x1<<8) //8:8 - -//U3D_reg3 -#define RG_SSUSB_SYSPLL_TOP_RESERVE (0xffff<<16) //31:16 - -//U3D_reg4 -#define RG_SSUSB_SYSPLL_PCW_NCPO (0x7fffffff<<1) //31:1 - -//U3D_reg5 -#define RG_SSUSB_SYSPLL_DDS_PI_C (0x7<<29) //31:29 -#define RG_SSUSB_SYSPLL_DDS_HF_EN (0x1<<28) //28:28 -#define RG_SSUSB_SYSPLL_DDS_PREDIV2 (0x1<<27) //27:27 -#define RG_SSUSB_SYSPLL_DDS_POSTDIV2 (0x1<<26) //26:26 -#define RG_SSUSB_SYSPLL_DDS_PI_PL_EN (0x1<<25) //25:25 -#define RG_SSUSB_SYSPLL_DDS_PI_RST_SEL (0x1<<24) //24:24 -#define RG_SSUSB_SYSPLL_DDS_MONEN (0x1<<23) //23:23 -#define RG_SSUSB_SYSPLL_DDS_LPF_EN (0x1<<22) //22:22 -#define RG_SSUSB_SYSPLL_CLK_PH_INV (0x1<<21) //21:21 -#define RG_SSUSB_SYSPLL_DDS_SEL_EXT (0x1<<20) //20:20 -#define RG_SSUSB_SYSPLL_DDS_DMY (0xffff<<0) //15:0 - -//U3D_reg6 -#define RG_SSUSB_TX250MCK_INVB (0x1<<31) //31:31 -#define RG_SSUSB_IDRV_ITAILOP_EN (0x1<<30) //30:30 -#define RG_SSUSB_IDRV_CALIB (0x3f<<24) //29:24 -#define RG_SSUSB_TX_R50_FON (0x1<<23) //23:23 -#define RG_SSUSB_TX_SR (0x7<<20) //22:20 -#define RG_SSUSB_TX_EIDLE_CM (0xf<<16) //19:16 -#define RG_SSUSB_RXDET_RSEL (0x3<<14) //15:14 -#define RG_SSUSB_RXDET_VTHSEL (0x3<<12) //13:12 -#define RG_SSUSB_CKMON_EN (0x1<<11) //11:11 -#define RG_SSUSB_CKMON_SEL (0x7<<8) //10:8 -#define RG_SSUSB_TX_VLMON_EN (0x1<<7) //7:7 -#define RG_SSUSB_TX_VLMON_SEL (0x1<<6) //6:6 -#define RG_SSUSB_RXLBTX_EN (0x1<<5) //5:5 -#define RG_SSUSB_TXLBRX_EN (0x1<<4) //4:4 - -//U3D_reg7 -#define RG_SSUSB_RESERVE (0xfffff<<12) //31:12 -#define RG_SSUSB_PLL_CKCTRL (0x3<<10) //11:10 -#define RG_SSUSB_PLL_POSDIV (0x3<<8) //9:8 -#define RG_SSUSB_PLL_AUTOK_LOAD (0x1<<7) //7:7 -#define RG_SSUSB_PLL_LOAD_RSTB (0x1<<6) //6:6 -#define RG_SSUSB_PLL_EP_EN (0x1<<5) //5:5 -#define RG_SSUSB_PLL_VOD_EN (0x1<<4) //4:4 -#define RG_SSUSB_PLL_V11_EN (0x1<<3) //3:3 -#define RG_SSUSB_PLL_MONREF_EN (0x1<<2) //2:2 -#define RG_SSUSB_PLL_MONCK_EN (0x1<<1) //1:1 -#define RG_SSUSB_PLL_MONVC_EN (0x1<<0) //0:0 - -//U3D_reg8 -#define RG_SSUSB_PLL_RESERVE (0xffff<<0) //15:0 - -//U3D_reg9 -#define RG_SSUSB_PLL_DDS_DMY (0xffff<<16) //31:16 -#define RG_SSUSB_PLL_SSC_PRD (0xffff<<0) //15:0 - -//U3D_regA -#define RG_SSUSB_PLL_SSC_PHASE_INI (0x1<<31) //31:31 -#define RG_SSUSB_PLL_SSC_TRI_EN (0x1<<30) //30:30 -#define RG_SSUSB_PLL_CLK_PH_INV (0x1<<29) //29:29 -#define RG_SSUSB_PLL_DDS_LPF_EN (0x1<<28) //28:28 -#define RG_SSUSB_PLL_DDS_VADJ (0x7<<21) //23:21 -#define RG_SSUSB_PLL_DDS_MONEN (0x1<<20) //20:20 -#define RG_SSUSB_PLL_DDS_PS_VADJ (0x7<<17) //19:17 -#define RG_SSUSB_PLL_DDS_SEL_EXT (0x1<<16) //16:16 -#define RG_SSUSB_CDR_PD_DIV_BYPASS (0x1<<15) //15:15 -#define RG_SSUSB_CDR_PD_DIV_SEL (0x1<<14) //14:14 -#define RG_SSUSB_CDR_CPBIAS_SEL (0x1<<13) //13:13 -#define RG_SSUSB_CDR_OSCDET_EN (0x1<<12) //12:12 -#define RG_SSUSB_CDR_MONMUX (0x1<<11) //11:11 -#define RG_SSUSB_CDR_CKCTRL (0x3<<9) //10:9 -#define RG_SSUSB_CDR_ACCEN (0x1<<8) //8:8 -#define RG_SSUSB_CDR_BYPASS (0x3<<6) //7:6 -#define RG_SSUSB_CDR_PI_SLEW (0x3<<4) //5:4 -#define RG_SSUSB_CDR_EPEN (0x1<<3) //3:3 -#define RG_SSUSB_CDR_AUTOK_LOAD (0x1<<2) //2:2 -#define RG_SSUSB_CDR_LOAD_RSTB (0x1<<1) //1:1 -#define RG_SSUSB_CDR_MONEN (0x1<<0) //0:0 - -//U3D_regB -#define RG_SSUSB_CDR_MONEN_DIG (0x1<<31) //31:31 -#define RG_SSUSB_CDR_REGOD (0x3<<29) //30:29 -#define RG_SSUSB_RX_DAC_EN (0x1<<26) //26:26 -#define RG_SSUSB_RX_DAC_PWD (0x1<<25) //25:25 -#define RG_SSUSB_EQ_CURSEL (0x1<<24) //24:24 -#define RG_SSUSB_RX_DAC_MUX (0x1f<<19) //23:19 -#define RG_SSUSB_RX_R2T_EN (0x1<<18) //18:18 -#define RG_SSUSB_RX_T2R_EN (0x1<<17) //17:17 -#define RG_SSUSB_RX_50_LOWER (0x7<<14) //16:14 -#define RG_SSUSB_RX_50_TAR (0x3<<12) //13:12 -#define RG_SSUSB_RX_SW_CTRL (0xf<<7) //10:7 -#define RG_PCIE_SIGDET_VTH (0x3<<5) //6:5 -#define RG_PCIE_SIGDET_LPF (0x3<<3) //4:3 -#define RG_SSUSB_LFPS_MON_EN (0x1<<2) //2:2 - -//U3D_regC -#define RG_SSUSB_RXAFE_DCMON_SEL (0xf<<28) //31:28 -#define RG_SSUSB_CDR_RESERVE (0xff<<16) //23:16 -#define RG_SSUSB_RXAFE_RESERVE (0xff<<8) //15:8 -#define RG_PCIE_RX_RESERVE (0xff<<0) //7:0 - -//U3D_redD -#define RGS_SSUSB_CDR_NO_OSC (0x1<<8) //8:8 -#define RGS_SSUSB_RX_DEBUG_RESERVE (0xff<<0) //7:0 - -//U3D_regE -#define RG_SSUSB_INT_BIAS_SEL (0x1<<4) //4:4 -#define RG_SSUSB_EXT_BIAS_SEL (0x1<<3) //3:3 -#define RG_SSUSB_RX_P1_ENTRY_PASS (0x1<<2) //2:2 -#define RG_SSUSB_RX_PD_RST (0x1<<1) //1:1 -#define RG_SSUSB_RX_PD_RST_PASS (0x1<<0) //0:0 - - -/* OFFSET */ - -//U3D_reg0 -#define RG_SSUSB_BGR_EN_OFST (31) -#define RG_SSUSB_CHPEN_OFST (30) -#define RG_SSUSB_BG_DIV_OFST (28) -#define RG_SSUSB_INTR_EN_OFST (26) -#define RG_SSUSB_MPX_OUT_SEL_OFST (24) -#define RG_SSUSB_MPX_SEL_OFST (16) -#define RG_SSUSB_REF_EN_OFST (15) -#define RG_SSUSB_VRT_VREF_SEL_OFST (11) -#define RG_SSUSB_BG_RASEL_OFST (9) -#define RG_SSUSB_BG_RBSEL_OFST (7) -#define RG_SSUSB_BG_MONEN_OFST (6) -#define RG_PCIE_CLKDRV_OFFSET_OFST (0) - -//U3D_reg1 -#define RG_PCIE_CLKDRV_SLEW_OFST (30) -#define RG_PCIE_CLKDRV_AMP_OFST (27) -#define RG_SSUSB_XTAL_TST_A2DCK_EN_OFST (26) -#define RG_SSUSB_XTAL_MON_EN_OFST (25) -#define RG_SSUSB_XTAL_HYS_OFST (24) -#define RG_SSUSB_XTAL_TOP_RESERVE_OFST (8) -#define RG_SSUSB_SYSPLL_RESERVE_OFST (4) -#define RG_SSUSB_SYSPLL_FBSEL_OFST (2) -#define RG_SSUSB_SYSPLL_PREDIV_OFST (0) - -//U3D_reg2 -#define RG_SSUSB_SYSPLL_LF_OFST (31) -#define RG_SSUSB_SYSPLL_FBDIV_OFST (24) -#define RG_SSUSB_SYSPLL_POSDIV_OFST (22) -#define RG_SSUSB_SYSPLL_VCO_DIV_SEL_OFST (21) -#define RG_SSUSB_SYSPLL_BLP_OFST (20) -#define RG_SSUSB_SYSPLL_BP_OFST (19) -#define RG_SSUSB_SYSPLL_BR_OFST (18) -#define RG_SSUSB_SYSPLL_BC_OFST (17) -#define RG_SSUSB_SYSPLL_DIVEN_OFST (14) -#define RG_SSUSB_SYSPLL_FPEN_OFST (13) -#define RG_SSUSB_SYSPLL_MONCK_EN_OFST (12) -#define RG_SSUSB_SYSPLL_MONVC_EN_OFST (11) -#define RG_SSUSB_SYSPLL_MONREF_EN_OFST (10) -#define RG_SSUSB_SYSPLL_VOD_EN_OFST (9) -#define RG_SSUSB_SYSPLL_CK_SEL_OFST (8) - -//U3D_reg3 -#define RG_SSUSB_SYSPLL_TOP_RESERVE_OFST (16) - -//U3D_reg4 -#define RG_SSUSB_SYSPLL_PCW_NCPO_OFST (1) - -//U3D_reg5 -#define RG_SSUSB_SYSPLL_DDS_PI_C_OFST (29) -#define RG_SSUSB_SYSPLL_DDS_HF_EN_OFST (28) -#define RG_SSUSB_SYSPLL_DDS_PREDIV2_OFST (27) -#define RG_SSUSB_SYSPLL_DDS_POSTDIV2_OFST (26) -#define RG_SSUSB_SYSPLL_DDS_PI_PL_EN_OFST (25) -#define RG_SSUSB_SYSPLL_DDS_PI_RST_SEL_OFST (24) -#define RG_SSUSB_SYSPLL_DDS_MONEN_OFST (23) -#define RG_SSUSB_SYSPLL_DDS_LPF_EN_OFST (22) -#define RG_SSUSB_SYSPLL_CLK_PH_INV_OFST (21) -#define RG_SSUSB_SYSPLL_DDS_SEL_EXT_OFST (20) -#define RG_SSUSB_SYSPLL_DDS_DMY_OFST (0) - -//U3D_reg6 -#define RG_SSUSB_TX250MCK_INVB_OFST (31) -#define RG_SSUSB_IDRV_ITAILOP_EN_OFST (30) -#define RG_SSUSB_IDRV_CALIB_OFST (24) -#define RG_SSUSB_TX_R50_FON_OFST (23) -#define RG_SSUSB_TX_SR_OFST (20) -#define RG_SSUSB_TX_EIDLE_CM_OFST (16) -#define RG_SSUSB_RXDET_RSEL_OFST (14) -#define RG_SSUSB_RXDET_VTHSEL_OFST (12) -#define RG_SSUSB_CKMON_EN_OFST (11) -#define RG_SSUSB_CKMON_SEL_OFST (8) -#define RG_SSUSB_TX_VLMON_EN_OFST (7) -#define RG_SSUSB_TX_VLMON_SEL_OFST (6) -#define RG_SSUSB_RXLBTX_EN_OFST (5) -#define RG_SSUSB_TXLBRX_EN_OFST (4) - -//U3D_reg7 -#define RG_SSUSB_RESERVE_OFST (12) -#define RG_SSUSB_PLL_CKCTRL_OFST (10) -#define RG_SSUSB_PLL_POSDIV_OFST (8) -#define RG_SSUSB_PLL_AUTOK_LOAD_OFST (7) -#define RG_SSUSB_PLL_LOAD_RSTB_OFST (6) -#define RG_SSUSB_PLL_EP_EN_OFST (5) -#define RG_SSUSB_PLL_VOD_EN_OFST (4) -#define RG_SSUSB_PLL_V11_EN_OFST (3) -#define RG_SSUSB_PLL_MONREF_EN_OFST (2) -#define RG_SSUSB_PLL_MONCK_EN_OFST (1) -#define RG_SSUSB_PLL_MONVC_EN_OFST (0) - -//U3D_reg8 -#define RG_SSUSB_PLL_RESERVE_OFST (0) - -//U3D_reg9 -#define RG_SSUSB_PLL_DDS_DMY_OFST (16) -#define RG_SSUSB_PLL_SSC_PRD_OFST (0) - -//U3D_regA -#define RG_SSUSB_PLL_SSC_PHASE_INI_OFST (31) -#define RG_SSUSB_PLL_SSC_TRI_EN_OFST (30) -#define RG_SSUSB_PLL_CLK_PH_INV_OFST (29) -#define RG_SSUSB_PLL_DDS_LPF_EN_OFST (28) -#define RG_SSUSB_PLL_DDS_VADJ_OFST (21) -#define RG_SSUSB_PLL_DDS_MONEN_OFST (20) -#define RG_SSUSB_PLL_DDS_PS_VADJ_OFST (17) -#define RG_SSUSB_PLL_DDS_SEL_EXT_OFST (16) -#define RG_SSUSB_CDR_PD_DIV_BYPASS_OFST (15) -#define RG_SSUSB_CDR_PD_DIV_SEL_OFST (14) -#define RG_SSUSB_CDR_CPBIAS_SEL_OFST (13) -#define RG_SSUSB_CDR_OSCDET_EN_OFST (12) -#define RG_SSUSB_CDR_MONMUX_OFST (11) -#define RG_SSUSB_CDR_CKCTRL_OFST (9) -#define RG_SSUSB_CDR_ACCEN_OFST (8) -#define RG_SSUSB_CDR_BYPASS_OFST (6) -#define RG_SSUSB_CDR_PI_SLEW_OFST (4) -#define RG_SSUSB_CDR_EPEN_OFST (3) -#define RG_SSUSB_CDR_AUTOK_LOAD_OFST (2) -#define RG_SSUSB_CDR_LOAD_RSTB_OFST (1) -#define RG_SSUSB_CDR_MONEN_OFST (0) - -//U3D_regB -#define RG_SSUSB_CDR_MONEN_DIG_OFST (31) -#define RG_SSUSB_CDR_REGOD_OFST (29) -#define RG_SSUSB_RX_DAC_EN_OFST (26) -#define RG_SSUSB_RX_DAC_PWD_OFST (25) -#define RG_SSUSB_EQ_CURSEL_OFST (24) -#define RG_SSUSB_RX_DAC_MUX_OFST (19) -#define RG_SSUSB_RX_R2T_EN_OFST (18) -#define RG_SSUSB_RX_T2R_EN_OFST (17) -#define RG_SSUSB_RX_50_LOWER_OFST (14) -#define RG_SSUSB_RX_50_TAR_OFST (12) -#define RG_SSUSB_RX_SW_CTRL_OFST (7) -#define RG_PCIE_SIGDET_VTH_OFST (5) -#define RG_PCIE_SIGDET_LPF_OFST (3) -#define RG_SSUSB_LFPS_MON_EN_OFST (2) - -//U3D_regC -#define RG_SSUSB_RXAFE_DCMON_SEL_OFST (28) -#define RG_SSUSB_CDR_RESERVE_OFST (16) -#define RG_SSUSB_RXAFE_RESERVE_OFST (8) -#define RG_PCIE_RX_RESERVE_OFST (0) - -//U3D_redD -#define RGS_SSUSB_CDR_NO_OSC_OFST (8) -#define RGS_SSUSB_RX_DEBUG_RESERVE_OFST (0) - -//U3D_regE -#define RG_SSUSB_INT_BIAS_SEL_OFST (4) -#define RG_SSUSB_EXT_BIAS_SEL_OFST (3) -#define RG_SSUSB_RX_P1_ENTRY_PASS_OFST (2) -#define RG_SSUSB_RX_PD_RST_OFST (1) -#define RG_SSUSB_RX_PD_RST_PASS_OFST (0) - -/////////////////////////////////////////////////////////////////////////////// - -struct u3phya_da_reg { - //0x0 - PHY_LE32 reg0; - PHY_LE32 reg1; - PHY_LE32 reg4; - PHY_LE32 reg5; - //0x10 - PHY_LE32 reg6; - PHY_LE32 reg7; - PHY_LE32 reg8; - PHY_LE32 reg9; - //0x20 - PHY_LE32 reg10; - PHY_LE32 reg12; - PHY_LE32 reg13; - PHY_LE32 reg14; - //0x30 - PHY_LE32 reg15; - PHY_LE32 reg16; - PHY_LE32 reg19; - PHY_LE32 reg20; - //0x40 - PHY_LE32 reg21; - PHY_LE32 reg23; - PHY_LE32 reg25; - PHY_LE32 reg26; - //0x50 - PHY_LE32 reg28; - PHY_LE32 reg29; - PHY_LE32 reg30; - PHY_LE32 reg31; - //0x60 - PHY_LE32 reg32; - PHY_LE32 reg33; -}; - -//U3D_reg0 -#define RG_PCIE_SPEED_PE2D (0x1<<24) //24:24 -#define RG_PCIE_SPEED_PE2H (0x1<<23) //23:23 -#define RG_PCIE_SPEED_PE1D (0x1<<22) //22:22 -#define RG_PCIE_SPEED_PE1H (0x1<<21) //21:21 -#define RG_PCIE_SPEED_U3 (0x1<<20) //20:20 -#define RG_SSUSB_XTAL_EXT_EN_PE2D (0x3<<18) //19:18 -#define RG_SSUSB_XTAL_EXT_EN_PE2H (0x3<<16) //17:16 -#define RG_SSUSB_XTAL_EXT_EN_PE1D (0x3<<14) //15:14 -#define RG_SSUSB_XTAL_EXT_EN_PE1H (0x3<<12) //13:12 -#define RG_SSUSB_XTAL_EXT_EN_U3 (0x3<<10) //11:10 -#define RG_SSUSB_CDR_REFCK_SEL_PE2D (0x3<<8) //9:8 -#define RG_SSUSB_CDR_REFCK_SEL_PE2H (0x3<<6) //7:6 -#define RG_SSUSB_CDR_REFCK_SEL_PE1D (0x3<<4) //5:4 -#define RG_SSUSB_CDR_REFCK_SEL_PE1H (0x3<<2) //3:2 -#define RG_SSUSB_CDR_REFCK_SEL_U3 (0x3<<0) //1:0 - -//U3D_reg1 -#define RG_USB20_REFCK_SEL_PE2D (0x1<<30) //30:30 -#define RG_USB20_REFCK_SEL_PE2H (0x1<<29) //29:29 -#define RG_USB20_REFCK_SEL_PE1D (0x1<<28) //28:28 -#define RG_USB20_REFCK_SEL_PE1H (0x1<<27) //27:27 -#define RG_USB20_REFCK_SEL_U3 (0x1<<26) //26:26 -#define RG_PCIE_REFCK_DIV4_PE2D (0x1<<25) //25:25 -#define RG_PCIE_REFCK_DIV4_PE2H (0x1<<24) //24:24 -#define RG_PCIE_REFCK_DIV4_PE1D (0x1<<18) //18:18 -#define RG_PCIE_REFCK_DIV4_PE1H (0x1<<17) //17:17 -#define RG_PCIE_REFCK_DIV4_U3 (0x1<<16) //16:16 -#define RG_PCIE_MODE_PE2D (0x1<<8) //8:8 -#define RG_PCIE_MODE_PE2H (0x1<<3) //3:3 -#define RG_PCIE_MODE_PE1D (0x1<<2) //2:2 -#define RG_PCIE_MODE_PE1H (0x1<<1) //1:1 -#define RG_PCIE_MODE_U3 (0x1<<0) //0:0 - -//U3D_reg4 -#define RG_SSUSB_PLL_DIVEN_PE2D (0x7<<22) //24:22 -#define RG_SSUSB_PLL_DIVEN_PE2H (0x7<<19) //21:19 -#define RG_SSUSB_PLL_DIVEN_PE1D (0x7<<16) //18:16 -#define RG_SSUSB_PLL_DIVEN_PE1H (0x7<<13) //15:13 -#define RG_SSUSB_PLL_DIVEN_U3 (0x7<<10) //12:10 -#define RG_SSUSB_PLL_BC_PE2D (0x3<<8) //9:8 -#define RG_SSUSB_PLL_BC_PE2H (0x3<<6) //7:6 -#define RG_SSUSB_PLL_BC_PE1D (0x3<<4) //5:4 -#define RG_SSUSB_PLL_BC_PE1H (0x3<<2) //3:2 -#define RG_SSUSB_PLL_BC_U3 (0x3<<0) //1:0 - -//U3D_reg5 -#define RG_SSUSB_PLL_BR_PE2D (0x7<<27) //29:27 -#define RG_SSUSB_PLL_BR_PE2H (0x7<<24) //26:24 -#define RG_SSUSB_PLL_BR_PE1D (0x7<<21) //23:21 -#define RG_SSUSB_PLL_BR_PE1H (0x7<<18) //20:18 -#define RG_SSUSB_PLL_BR_U3 (0x7<<15) //17:15 -#define RG_SSUSB_PLL_IC_PE2D (0x7<<12) //14:12 -#define RG_SSUSB_PLL_IC_PE2H (0x7<<9) //11:9 -#define RG_SSUSB_PLL_IC_PE1D (0x7<<6) //8:6 -#define RG_SSUSB_PLL_IC_PE1H (0x7<<3) //5:3 -#define RG_SSUSB_PLL_IC_U3 (0x7<<0) //2:0 - -//U3D_reg6 -#define RG_SSUSB_PLL_IR_PE2D (0xf<<24) //27:24 -#define RG_SSUSB_PLL_IR_PE2H (0xf<<16) //19:16 -#define RG_SSUSB_PLL_IR_PE1D (0xf<<8) //11:8 -#define RG_SSUSB_PLL_IR_PE1H (0xf<<4) //7:4 -#define RG_SSUSB_PLL_IR_U3 (0xf<<0) //3:0 - -//U3D_reg7 -#define RG_SSUSB_PLL_BP_PE2D (0xf<<24) //27:24 -#define RG_SSUSB_PLL_BP_PE2H (0xf<<16) //19:16 -#define RG_SSUSB_PLL_BP_PE1D (0xf<<8) //11:8 -#define RG_SSUSB_PLL_BP_PE1H (0xf<<4) //7:4 -#define RG_SSUSB_PLL_BP_U3 (0xf<<0) //3:0 - -//U3D_reg8 -#define RG_SSUSB_PLL_FBKSEL_PE2D (0x3<<24) //25:24 -#define RG_SSUSB_PLL_FBKSEL_PE2H (0x3<<16) //17:16 -#define RG_SSUSB_PLL_FBKSEL_PE1D (0x3<<8) //9:8 -#define RG_SSUSB_PLL_FBKSEL_PE1H (0x3<<2) //3:2 -#define RG_SSUSB_PLL_FBKSEL_U3 (0x3<<0) //1:0 - -//U3D_reg9 -#define RG_SSUSB_PLL_FBKDIV_PE2H (0x7f<<24) //30:24 -#define RG_SSUSB_PLL_FBKDIV_PE1D (0x7f<<16) //22:16 -#define RG_SSUSB_PLL_FBKDIV_PE1H (0x7f<<8) //14:8 -#define RG_SSUSB_PLL_FBKDIV_U3 (0x7f<<0) //6:0 - -//U3D_reg10 -#define RG_SSUSB_PLL_PREDIV_PE2D (0x3<<26) //27:26 -#define RG_SSUSB_PLL_PREDIV_PE2H (0x3<<24) //25:24 -#define RG_SSUSB_PLL_PREDIV_PE1D (0x3<<18) //19:18 -#define RG_SSUSB_PLL_PREDIV_PE1H (0x3<<16) //17:16 -#define RG_SSUSB_PLL_PREDIV_U3 (0x3<<8) //9:8 -#define RG_SSUSB_PLL_FBKDIV_PE2D (0x7f<<0) //6:0 - -//U3D_reg12 -#define RG_SSUSB_PLL_PCW_NCPO_U3 (0x7fffffff<<0) //30:0 - -//U3D_reg13 -#define RG_SSUSB_PLL_PCW_NCPO_PE1H (0x7fffffff<<0) //30:0 - -//U3D_reg14 -#define RG_SSUSB_PLL_PCW_NCPO_PE1D (0x7fffffff<<0) //30:0 - -//U3D_reg15 -#define RG_SSUSB_PLL_PCW_NCPO_PE2H (0x7fffffff<<0) //30:0 - -//U3D_reg16 -#define RG_SSUSB_PLL_PCW_NCPO_PE2D (0x7fffffff<<0) //30:0 - -//U3D_reg19 -#define RG_SSUSB_PLL_SSC_DELTA1_PE1H (0xffff<<16) //31:16 -#define RG_SSUSB_PLL_SSC_DELTA1_U3 (0xffff<<0) //15:0 - -//U3D_reg20 -#define RG_SSUSB_PLL_SSC_DELTA1_PE2H (0xffff<<16) //31:16 -#define RG_SSUSB_PLL_SSC_DELTA1_PE1D (0xffff<<0) //15:0 - -//U3D_reg21 -#define RG_SSUSB_PLL_SSC_DELTA_U3 (0xffff<<16) //31:16 -#define RG_SSUSB_PLL_SSC_DELTA1_PE2D (0xffff<<0) //15:0 - -//U3D_reg23 -#define RG_SSUSB_PLL_SSC_DELTA_PE1D (0xffff<<16) //31:16 -#define RG_SSUSB_PLL_SSC_DELTA_PE1H (0xffff<<0) //15:0 - -//U3D_reg25 -#define RG_SSUSB_PLL_SSC_DELTA_PE2D (0xffff<<16) //31:16 -#define RG_SSUSB_PLL_SSC_DELTA_PE2H (0xffff<<0) //15:0 - -//U3D_reg26 -#define RG_SSUSB_PLL_REFCKDIV_PE2D (0x1<<25) //25:25 -#define RG_SSUSB_PLL_REFCKDIV_PE2H (0x1<<24) //24:24 -#define RG_SSUSB_PLL_REFCKDIV_PE1D (0x1<<16) //16:16 -#define RG_SSUSB_PLL_REFCKDIV_PE1H (0x1<<8) //8:8 -#define RG_SSUSB_PLL_REFCKDIV_U3 (0x1<<0) //0:0 - -//U3D_reg28 -#define RG_SSUSB_CDR_BPA_PE2D (0x3<<24) //25:24 -#define RG_SSUSB_CDR_BPA_PE2H (0x3<<16) //17:16 -#define RG_SSUSB_CDR_BPA_PE1D (0x3<<10) //11:10 -#define RG_SSUSB_CDR_BPA_PE1H (0x3<<8) //9:8 -#define RG_SSUSB_CDR_BPA_U3 (0x3<<0) //1:0 - -//U3D_reg29 -#define RG_SSUSB_CDR_BPB_PE2D (0x7<<24) //26:24 -#define RG_SSUSB_CDR_BPB_PE2H (0x7<<16) //18:16 -#define RG_SSUSB_CDR_BPB_PE1D (0x7<<6) //8:6 -#define RG_SSUSB_CDR_BPB_PE1H (0x7<<3) //5:3 -#define RG_SSUSB_CDR_BPB_U3 (0x7<<0) //2:0 - -//U3D_reg30 -#define RG_SSUSB_CDR_BR_PE2D (0x7<<24) //26:24 -#define RG_SSUSB_CDR_BR_PE2H (0x7<<16) //18:16 -#define RG_SSUSB_CDR_BR_PE1D (0x7<<6) //8:6 -#define RG_SSUSB_CDR_BR_PE1H (0x7<<3) //5:3 -#define RG_SSUSB_CDR_BR_U3 (0x7<<0) //2:0 - -//U3D_reg31 -#define RG_SSUSB_CDR_FBDIV_PE2H (0x7f<<24) //30:24 -#define RG_SSUSB_CDR_FBDIV_PE1D (0x7f<<16) //22:16 -#define RG_SSUSB_CDR_FBDIV_PE1H (0x7f<<8) //14:8 -#define RG_SSUSB_CDR_FBDIV_U3 (0x7f<<0) //6:0 - -//U3D_reg32 -#define RG_SSUSB_EQ_RSTEP1_PE2D (0x3<<30) //31:30 -#define RG_SSUSB_EQ_RSTEP1_PE2H (0x3<<28) //29:28 -#define RG_SSUSB_EQ_RSTEP1_PE1D (0x3<<26) //27:26 -#define RG_SSUSB_EQ_RSTEP1_PE1H (0x3<<24) //25:24 -#define RG_SSUSB_EQ_RSTEP1_U3 (0x3<<22) //23:22 -#define RG_SSUSB_LFPS_DEGLITCH_PE2D (0x3<<20) //21:20 -#define RG_SSUSB_LFPS_DEGLITCH_PE2H (0x3<<18) //19:18 -#define RG_SSUSB_LFPS_DEGLITCH_PE1D (0x3<<16) //17:16 -#define RG_SSUSB_LFPS_DEGLITCH_PE1H (0x3<<14) //15:14 -#define RG_SSUSB_LFPS_DEGLITCH_U3 (0x3<<12) //13:12 -#define RG_SSUSB_CDR_KVSEL_PE2D (0x1<<11) //11:11 -#define RG_SSUSB_CDR_KVSEL_PE2H (0x1<<10) //10:10 -#define RG_SSUSB_CDR_KVSEL_PE1D (0x1<<9) //9:9 -#define RG_SSUSB_CDR_KVSEL_PE1H (0x1<<8) //8:8 -#define RG_SSUSB_CDR_KVSEL_U3 (0x1<<7) //7:7 -#define RG_SSUSB_CDR_FBDIV_PE2D (0x7f<<0) //6:0 - -//U3D_reg33 -#define RG_SSUSB_RX_CMPWD_PE2D (0x1<<26) //26:26 -#define RG_SSUSB_RX_CMPWD_PE2H (0x1<<25) //25:25 -#define RG_SSUSB_RX_CMPWD_PE1D (0x1<<24) //24:24 -#define RG_SSUSB_RX_CMPWD_PE1H (0x1<<23) //23:23 -#define RG_SSUSB_RX_CMPWD_U3 (0x1<<16) //16:16 -#define RG_SSUSB_EQ_RSTEP2_PE2D (0x3<<8) //9:8 -#define RG_SSUSB_EQ_RSTEP2_PE2H (0x3<<6) //7:6 -#define RG_SSUSB_EQ_RSTEP2_PE1D (0x3<<4) //5:4 -#define RG_SSUSB_EQ_RSTEP2_PE1H (0x3<<2) //3:2 -#define RG_SSUSB_EQ_RSTEP2_U3 (0x3<<0) //1:0 - - -/* OFFSET */ - -//U3D_reg0 -#define RG_PCIE_SPEED_PE2D_OFST (24) -#define RG_PCIE_SPEED_PE2H_OFST (23) -#define RG_PCIE_SPEED_PE1D_OFST (22) -#define RG_PCIE_SPEED_PE1H_OFST (21) -#define RG_PCIE_SPEED_U3_OFST (20) -#define RG_SSUSB_XTAL_EXT_EN_PE2D_OFST (18) -#define RG_SSUSB_XTAL_EXT_EN_PE2H_OFST (16) -#define RG_SSUSB_XTAL_EXT_EN_PE1D_OFST (14) -#define RG_SSUSB_XTAL_EXT_EN_PE1H_OFST (12) -#define RG_SSUSB_XTAL_EXT_EN_U3_OFST (10) -#define RG_SSUSB_CDR_REFCK_SEL_PE2D_OFST (8) -#define RG_SSUSB_CDR_REFCK_SEL_PE2H_OFST (6) -#define RG_SSUSB_CDR_REFCK_SEL_PE1D_OFST (4) -#define RG_SSUSB_CDR_REFCK_SEL_PE1H_OFST (2) -#define RG_SSUSB_CDR_REFCK_SEL_U3_OFST (0) - -//U3D_reg1 -#define RG_USB20_REFCK_SEL_PE2D_OFST (30) -#define RG_USB20_REFCK_SEL_PE2H_OFST (29) -#define RG_USB20_REFCK_SEL_PE1D_OFST (28) -#define RG_USB20_REFCK_SEL_PE1H_OFST (27) -#define RG_USB20_REFCK_SEL_U3_OFST (26) -#define RG_PCIE_REFCK_DIV4_PE2D_OFST (25) -#define RG_PCIE_REFCK_DIV4_PE2H_OFST (24) -#define RG_PCIE_REFCK_DIV4_PE1D_OFST (18) -#define RG_PCIE_REFCK_DIV4_PE1H_OFST (17) -#define RG_PCIE_REFCK_DIV4_U3_OFST (16) -#define RG_PCIE_MODE_PE2D_OFST (8) -#define RG_PCIE_MODE_PE2H_OFST (3) -#define RG_PCIE_MODE_PE1D_OFST (2) -#define RG_PCIE_MODE_PE1H_OFST (1) -#define RG_PCIE_MODE_U3_OFST (0) - -//U3D_reg4 -#define RG_SSUSB_PLL_DIVEN_PE2D_OFST (22) -#define RG_SSUSB_PLL_DIVEN_PE2H_OFST (19) -#define RG_SSUSB_PLL_DIVEN_PE1D_OFST (16) -#define RG_SSUSB_PLL_DIVEN_PE1H_OFST (13) -#define RG_SSUSB_PLL_DIVEN_U3_OFST (10) -#define RG_SSUSB_PLL_BC_PE2D_OFST (8) -#define RG_SSUSB_PLL_BC_PE2H_OFST (6) -#define RG_SSUSB_PLL_BC_PE1D_OFST (4) -#define RG_SSUSB_PLL_BC_PE1H_OFST (2) -#define RG_SSUSB_PLL_BC_U3_OFST (0) - -//U3D_reg5 -#define RG_SSUSB_PLL_BR_PE2D_OFST (27) -#define RG_SSUSB_PLL_BR_PE2H_OFST (24) -#define RG_SSUSB_PLL_BR_PE1D_OFST (21) -#define RG_SSUSB_PLL_BR_PE1H_OFST (18) -#define RG_SSUSB_PLL_BR_U3_OFST (15) -#define RG_SSUSB_PLL_IC_PE2D_OFST (12) -#define RG_SSUSB_PLL_IC_PE2H_OFST (9) -#define RG_SSUSB_PLL_IC_PE1D_OFST (6) -#define RG_SSUSB_PLL_IC_PE1H_OFST (3) -#define RG_SSUSB_PLL_IC_U3_OFST (0) - -//U3D_reg6 -#define RG_SSUSB_PLL_IR_PE2D_OFST (24) -#define RG_SSUSB_PLL_IR_PE2H_OFST (16) -#define RG_SSUSB_PLL_IR_PE1D_OFST (8) -#define RG_SSUSB_PLL_IR_PE1H_OFST (4) -#define RG_SSUSB_PLL_IR_U3_OFST (0) - -//U3D_reg7 -#define RG_SSUSB_PLL_BP_PE2D_OFST (24) -#define RG_SSUSB_PLL_BP_PE2H_OFST (16) -#define RG_SSUSB_PLL_BP_PE1D_OFST (8) -#define RG_SSUSB_PLL_BP_PE1H_OFST (4) -#define RG_SSUSB_PLL_BP_U3_OFST (0) - -//U3D_reg8 -#define RG_SSUSB_PLL_FBKSEL_PE2D_OFST (24) -#define RG_SSUSB_PLL_FBKSEL_PE2H_OFST (16) -#define RG_SSUSB_PLL_FBKSEL_PE1D_OFST (8) -#define RG_SSUSB_PLL_FBKSEL_PE1H_OFST (2) -#define RG_SSUSB_PLL_FBKSEL_U3_OFST (0) - -//U3D_reg9 -#define RG_SSUSB_PLL_FBKDIV_PE2H_OFST (24) -#define RG_SSUSB_PLL_FBKDIV_PE1D_OFST (16) -#define RG_SSUSB_PLL_FBKDIV_PE1H_OFST (8) -#define RG_SSUSB_PLL_FBKDIV_U3_OFST (0) - -//U3D_reg10 -#define RG_SSUSB_PLL_PREDIV_PE2D_OFST (26) -#define RG_SSUSB_PLL_PREDIV_PE2H_OFST (24) -#define RG_SSUSB_PLL_PREDIV_PE1D_OFST (18) -#define RG_SSUSB_PLL_PREDIV_PE1H_OFST (16) -#define RG_SSUSB_PLL_PREDIV_U3_OFST (8) -#define RG_SSUSB_PLL_FBKDIV_PE2D_OFST (0) - -//U3D_reg12 -#define RG_SSUSB_PLL_PCW_NCPO_U3_OFST (0) - -//U3D_reg13 -#define RG_SSUSB_PLL_PCW_NCPO_PE1H_OFST (0) - -//U3D_reg14 -#define RG_SSUSB_PLL_PCW_NCPO_PE1D_OFST (0) - -//U3D_reg15 -#define RG_SSUSB_PLL_PCW_NCPO_PE2H_OFST (0) - -//U3D_reg16 -#define RG_SSUSB_PLL_PCW_NCPO_PE2D_OFST (0) - -//U3D_reg19 -#define RG_SSUSB_PLL_SSC_DELTA1_PE1H_OFST (16) -#define RG_SSUSB_PLL_SSC_DELTA1_U3_OFST (0) - -//U3D_reg20 -#define RG_SSUSB_PLL_SSC_DELTA1_PE2H_OFST (16) -#define RG_SSUSB_PLL_SSC_DELTA1_PE1D_OFST (0) - -//U3D_reg21 -#define RG_SSUSB_PLL_SSC_DELTA_U3_OFST (16) -#define RG_SSUSB_PLL_SSC_DELTA1_PE2D_OFST (0) - -//U3D_reg23 -#define RG_SSUSB_PLL_SSC_DELTA_PE1D_OFST (16) -#define RG_SSUSB_PLL_SSC_DELTA_PE1H_OFST (0) - -//U3D_reg25 -#define RG_SSUSB_PLL_SSC_DELTA_PE2D_OFST (16) -#define RG_SSUSB_PLL_SSC_DELTA_PE2H_OFST (0) - -//U3D_reg26 -#define RG_SSUSB_PLL_REFCKDIV_PE2D_OFST (25) -#define RG_SSUSB_PLL_REFCKDIV_PE2H_OFST (24) -#define RG_SSUSB_PLL_REFCKDIV_PE1D_OFST (16) -#define RG_SSUSB_PLL_REFCKDIV_PE1H_OFST (8) -#define RG_SSUSB_PLL_REFCKDIV_U3_OFST (0) - -//U3D_reg28 -#define RG_SSUSB_CDR_BPA_PE2D_OFST (24) -#define RG_SSUSB_CDR_BPA_PE2H_OFST (16) -#define RG_SSUSB_CDR_BPA_PE1D_OFST (10) -#define RG_SSUSB_CDR_BPA_PE1H_OFST (8) -#define RG_SSUSB_CDR_BPA_U3_OFST (0) - -//U3D_reg29 -#define RG_SSUSB_CDR_BPB_PE2D_OFST (24) -#define RG_SSUSB_CDR_BPB_PE2H_OFST (16) -#define RG_SSUSB_CDR_BPB_PE1D_OFST (6) -#define RG_SSUSB_CDR_BPB_PE1H_OFST (3) -#define RG_SSUSB_CDR_BPB_U3_OFST (0) - -//U3D_reg30 -#define RG_SSUSB_CDR_BR_PE2D_OFST (24) -#define RG_SSUSB_CDR_BR_PE2H_OFST (16) -#define RG_SSUSB_CDR_BR_PE1D_OFST (6) -#define RG_SSUSB_CDR_BR_PE1H_OFST (3) -#define RG_SSUSB_CDR_BR_U3_OFST (0) - -//U3D_reg31 -#define RG_SSUSB_CDR_FBDIV_PE2H_OFST (24) -#define RG_SSUSB_CDR_FBDIV_PE1D_OFST (16) -#define RG_SSUSB_CDR_FBDIV_PE1H_OFST (8) -#define RG_SSUSB_CDR_FBDIV_U3_OFST (0) - -//U3D_reg32 -#define RG_SSUSB_EQ_RSTEP1_PE2D_OFST (30) -#define RG_SSUSB_EQ_RSTEP1_PE2H_OFST (28) -#define RG_SSUSB_EQ_RSTEP1_PE1D_OFST (26) -#define RG_SSUSB_EQ_RSTEP1_PE1H_OFST (24) -#define RG_SSUSB_EQ_RSTEP1_U3_OFST (22) -#define RG_SSUSB_LFPS_DEGLITCH_PE2D_OFST (20) -#define RG_SSUSB_LFPS_DEGLITCH_PE2H_OFST (18) -#define RG_SSUSB_LFPS_DEGLITCH_PE1D_OFST (16) -#define RG_SSUSB_LFPS_DEGLITCH_PE1H_OFST (14) -#define RG_SSUSB_LFPS_DEGLITCH_U3_OFST (12) -#define RG_SSUSB_CDR_KVSEL_PE2D_OFST (11) -#define RG_SSUSB_CDR_KVSEL_PE2H_OFST (10) -#define RG_SSUSB_CDR_KVSEL_PE1D_OFST (9) -#define RG_SSUSB_CDR_KVSEL_PE1H_OFST (8) -#define RG_SSUSB_CDR_KVSEL_U3_OFST (7) -#define RG_SSUSB_CDR_FBDIV_PE2D_OFST (0) - -//U3D_reg33 -#define RG_SSUSB_RX_CMPWD_PE2D_OFST (26) -#define RG_SSUSB_RX_CMPWD_PE2H_OFST (25) -#define RG_SSUSB_RX_CMPWD_PE1D_OFST (24) -#define RG_SSUSB_RX_CMPWD_PE1H_OFST (23) -#define RG_SSUSB_RX_CMPWD_U3_OFST (16) -#define RG_SSUSB_EQ_RSTEP2_PE2D_OFST (8) -#define RG_SSUSB_EQ_RSTEP2_PE2H_OFST (6) -#define RG_SSUSB_EQ_RSTEP2_PE1D_OFST (4) -#define RG_SSUSB_EQ_RSTEP2_PE1H_OFST (2) -#define RG_SSUSB_EQ_RSTEP2_U3_OFST (0) - - -/////////////////////////////////////////////////////////////////////////////// - -struct u3phyd_reg { - //0x0 - PHY_LE32 phyd_mix0; - PHY_LE32 phyd_mix1; - PHY_LE32 phyd_lfps0; - PHY_LE32 phyd_lfps1; - //0x10 - PHY_LE32 phyd_impcal0; - PHY_LE32 phyd_impcal1; - PHY_LE32 phyd_txpll0; - PHY_LE32 phyd_txpll1; - //0x20 - PHY_LE32 phyd_txpll2; - PHY_LE32 phyd_fl0; - PHY_LE32 phyd_mix2; - PHY_LE32 phyd_rx0; - //0x30 - PHY_LE32 phyd_t2rlb; - PHY_LE32 phyd_cppat; - PHY_LE32 phyd_mix3; - PHY_LE32 phyd_ebufctl; - //0x40 - PHY_LE32 phyd_pipe0; - PHY_LE32 phyd_pipe1; - PHY_LE32 phyd_mix4; - PHY_LE32 phyd_ckgen0; - //0x50 - PHY_LE32 phyd_mix5; - PHY_LE32 phyd_reserved; - PHY_LE32 phyd_cdr0; - PHY_LE32 phyd_cdr1; - //0x60 - PHY_LE32 phyd_pll_0; - PHY_LE32 phyd_pll_1; - PHY_LE32 phyd_bcn_det_1; - PHY_LE32 phyd_bcn_det_2; - //0x70 - PHY_LE32 eq0; - PHY_LE32 eq1; - PHY_LE32 eq2; - PHY_LE32 eq3; - //0x80 - PHY_LE32 eq_eye0; - PHY_LE32 eq_eye1; - PHY_LE32 eq_eye2; - PHY_LE32 eq_dfe0; - //0x90 - PHY_LE32 eq_dfe1; - PHY_LE32 eq_dfe2; - PHY_LE32 eq_dfe3; - PHY_LE32 reserve0; - //0xa0 - PHY_LE32 phyd_mon0; - PHY_LE32 phyd_mon1; - PHY_LE32 phyd_mon2; - PHY_LE32 phyd_mon3; - //0xb0 - PHY_LE32 phyd_mon4; - PHY_LE32 phyd_mon5; - PHY_LE32 phyd_mon6; - PHY_LE32 phyd_mon7; - //0xc0 - PHY_LE32 phya_rx_mon0; - PHY_LE32 phya_rx_mon1; - PHY_LE32 phya_rx_mon2; - PHY_LE32 phya_rx_mon3; - //0xd0 - PHY_LE32 phya_rx_mon4; - PHY_LE32 phya_rx_mon5; - PHY_LE32 phyd_cppat2; - PHY_LE32 eq_eye3; - //0xe0 - PHY_LE32 kband_out; - PHY_LE32 kband_out1; -}; - -//U3D_PHYD_MIX0 -#define RG_SSUSB_P_P3_TX_NG (0x1<<31) //31:31 -#define RG_SSUSB_TSEQ_EN (0x1<<30) //30:30 -#define RG_SSUSB_TSEQ_POLEN (0x1<<29) //29:29 -#define RG_SSUSB_TSEQ_POL (0x1<<28) //28:28 -#define RG_SSUSB_P_P3_PCLK_NG (0x1<<27) //27:27 -#define RG_SSUSB_TSEQ_TH (0x7<<24) //26:24 -#define RG_SSUSB_PRBS_BERTH (0xff<<16) //23:16 -#define RG_SSUSB_DISABLE_PHY_U2_ON (0x1<<15) //15:15 -#define RG_SSUSB_DISABLE_PHY_U2_OFF (0x1<<14) //14:14 -#define RG_SSUSB_PRBS_EN (0x1<<13) //13:13 -#define RG_SSUSB_BPSLOCK (0x1<<12) //12:12 -#define RG_SSUSB_RTCOMCNT (0xf<<8) //11:8 -#define RG_SSUSB_COMCNT (0xf<<4) //7:4 -#define RG_SSUSB_PRBSEL_CALIB (0xf<<0) //3:0 - -//U3D_PHYD_MIX1 -#define RG_SSUSB_SLEEP_EN (0x1<<31) //31:31 -#define RG_SSUSB_PRBSEL_PCS (0x7<<28) //30:28 -#define RG_SSUSB_TXLFPS_PRD (0xf<<24) //27:24 -#define RG_SSUSB_P_RX_P0S_CK (0x1<<23) //23:23 -#define RG_SSUSB_P_TX_P0S_CK (0x1<<22) //22:22 -#define RG_SSUSB_PDNCTL (0x3f<<16) //21:16 -#define RG_SSUSB_TX_DRV_EN (0x1<<15) //15:15 -#define RG_SSUSB_TX_DRV_SEL (0x1<<14) //14:14 -#define RG_SSUSB_TX_DRV_DLY (0x3f<<8) //13:8 -#define RG_SSUSB_BERT_EN (0x1<<7) //7:7 -#define RG_SSUSB_SCP_TH (0x7<<4) //6:4 -#define RG_SSUSB_SCP_EN (0x1<<3) //3:3 -#define RG_SSUSB_RXANSIDEC_TEST (0x7<<0) //2:0 - -//U3D_PHYD_LFPS0 -#define RG_SSUSB_LFPS_PWD (0x1<<30) //30:30 -#define RG_SSUSB_FORCE_LFPS_PWD (0x1<<29) //29:29 -#define RG_SSUSB_RXLFPS_OVF (0x1f<<24) //28:24 -#define RG_SSUSB_P3_ENTRY_SEL (0x1<<23) //23:23 -#define RG_SSUSB_P3_ENTRY (0x1<<22) //22:22 -#define RG_SSUSB_RXLFPS_CDRSEL (0x3<<20) //21:20 -#define RG_SSUSB_RXLFPS_CDRTH (0xf<<16) //19:16 -#define RG_SSUSB_LOCK5G_BLOCK (0x1<<15) //15:15 -#define RG_SSUSB_TFIFO_EXT_D_SEL (0x1<<14) //14:14 -#define RG_SSUSB_TFIFO_NO_EXTEND (0x1<<13) //13:13 -#define RG_SSUSB_RXLFPS_LOB (0x1f<<8) //12:8 -#define RG_SSUSB_TXLFPS_EN (0x1<<7) //7:7 -#define RG_SSUSB_TXLFPS_SEL (0x1<<6) //6:6 -#define RG_SSUSB_RXLFPS_CDRLOCK (0x1<<5) //5:5 -#define RG_SSUSB_RXLFPS_UPB (0x1f<<0) //4:0 - -//U3D_PHYD_LFPS1 -#define RG_SSUSB_RX_IMP_BIAS (0xf<<28) //31:28 -#define RG_SSUSB_TX_IMP_BIAS (0xf<<24) //27:24 -#define RG_SSUSB_FWAKE_TH (0x3f<<16) //21:16 -#define RG_SSUSB_RXLFPS_UDF (0x1f<<8) //12:8 -#define RG_SSUSB_RXLFPS_P0IDLETH (0xff<<0) //7:0 - -//U3D_PHYD_IMPCAL0 -#define RG_SSUSB_FORCE_TX_IMPSEL (0x1<<31) //31:31 -#define RG_SSUSB_TX_IMPCAL_EN (0x1<<30) //30:30 -#define RG_SSUSB_FORCE_TX_IMPCAL_EN (0x1<<29) //29:29 -#define RG_SSUSB_TX_IMPSEL (0x1f<<24) //28:24 -#define RG_SSUSB_TX_IMPCAL_CALCYC (0x3f<<16) //21:16 -#define RG_SSUSB_TX_IMPCAL_STBCYC (0x1f<<10) //14:10 -#define RG_SSUSB_TX_IMPCAL_CYCCNT (0x3ff<<0) //9:0 - -//U3D_PHYD_IMPCAL1 -#define RG_SSUSB_FORCE_RX_IMPSEL (0x1<<31) //31:31 -#define RG_SSUSB_RX_IMPCAL_EN (0x1<<30) //30:30 -#define RG_SSUSB_FORCE_RX_IMPCAL_EN (0x1<<29) //29:29 -#define RG_SSUSB_RX_IMPSEL (0x1f<<24) //28:24 -#define RG_SSUSB_RX_IMPCAL_CALCYC (0x3f<<16) //21:16 -#define RG_SSUSB_RX_IMPCAL_STBCYC (0x1f<<10) //14:10 -#define RG_SSUSB_RX_IMPCAL_CYCCNT (0x3ff<<0) //9:0 - -//U3D_PHYD_TXPLL0 -#define RG_SSUSB_TXPLL_DDSEN_CYC (0x1f<<27) //31:27 -#define RG_SSUSB_TXPLL_ON (0x1<<26) //26:26 -#define RG_SSUSB_FORCE_TXPLLON (0x1<<25) //25:25 -#define RG_SSUSB_TXPLL_STBCYC (0x1ff<<16) //24:16 -#define RG_SSUSB_TXPLL_NCPOCHG_CYC (0xf<<12) //15:12 -#define RG_SSUSB_TXPLL_NCPOEN_CYC (0x3<<10) //11:10 -#define RG_SSUSB_TXPLL_DDSRSTB_CYC (0x7<<0) //2:0 - -//U3D_PHYD_TXPLL1 -#define RG_SSUSB_PLL_NCPO_EN (0x1<<31) //31:31 -#define RG_SSUSB_PLL_FIFO_START_MAN (0x1<<30) //30:30 -#define RG_SSUSB_PLL_NCPO_CHG (0x1<<28) //28:28 -#define RG_SSUSB_PLL_DDS_RSTB (0x1<<27) //27:27 -#define RG_SSUSB_PLL_DDS_PWDB (0x1<<26) //26:26 -#define RG_SSUSB_PLL_DDSEN (0x1<<25) //25:25 -#define RG_SSUSB_PLL_AUTOK_VCO (0x1<<24) //24:24 -#define RG_SSUSB_PLL_PWD (0x1<<23) //23:23 -#define RG_SSUSB_RX_AFE_PWD (0x1<<22) //22:22 -#define RG_SSUSB_PLL_TCADJ (0x3f<<16) //21:16 -#define RG_SSUSB_FORCE_CDR_TCADJ (0x1<<15) //15:15 -#define RG_SSUSB_FORCE_CDR_AUTOK_VCO (0x1<<14) //14:14 -#define RG_SSUSB_FORCE_CDR_PWD (0x1<<13) //13:13 -#define RG_SSUSB_FORCE_PLL_NCPO_EN (0x1<<12) //12:12 -#define RG_SSUSB_FORCE_PLL_FIFO_START_MAN (0x1<<11) //11:11 -#define RG_SSUSB_FORCE_PLL_NCPO_CHG (0x1<<9) //9:9 -#define RG_SSUSB_FORCE_PLL_DDS_RSTB (0x1<<8) //8:8 -#define RG_SSUSB_FORCE_PLL_DDS_PWDB (0x1<<7) //7:7 -#define RG_SSUSB_FORCE_PLL_DDSEN (0x1<<6) //6:6 -#define RG_SSUSB_FORCE_PLL_TCADJ (0x1<<5) //5:5 -#define RG_SSUSB_FORCE_PLL_AUTOK_VCO (0x1<<4) //4:4 -#define RG_SSUSB_FORCE_PLL_PWD (0x1<<3) //3:3 -#define RG_SSUSB_FLT_1_DISPERR_B (0x1<<2) //2:2 - -//U3D_PHYD_TXPLL2 -#define RG_SSUSB_TX_LFPS_EN (0x1<<31) //31:31 -#define RG_SSUSB_FORCE_TX_LFPS_EN (0x1<<30) //30:30 -#define RG_SSUSB_TX_LFPS (0x1<<29) //29:29 -#define RG_SSUSB_FORCE_TX_LFPS (0x1<<28) //28:28 -#define RG_SSUSB_RXPLL_STB (0x1<<27) //27:27 -#define RG_SSUSB_TXPLL_STB (0x1<<26) //26:26 -#define RG_SSUSB_FORCE_RXPLL_STB (0x1<<25) //25:25 -#define RG_SSUSB_FORCE_TXPLL_STB (0x1<<24) //24:24 -#define RG_SSUSB_RXPLL_REFCKSEL (0x1<<16) //16:16 -#define RG_SSUSB_RXPLL_STBMODE (0x1<<11) //11:11 -#define RG_SSUSB_RXPLL_ON (0x1<<10) //10:10 -#define RG_SSUSB_FORCE_RXPLLON (0x1<<9) //9:9 -#define RG_SSUSB_FORCE_RX_AFE_PWD (0x1<<8) //8:8 -#define RG_SSUSB_CDR_AUTOK_VCO (0x1<<7) //7:7 -#define RG_SSUSB_CDR_PWD (0x1<<6) //6:6 -#define RG_SSUSB_CDR_TCADJ (0x3f<<0) //5:0 - -//U3D_PHYD_FL0 -#define RG_SSUSB_RX_FL_TARGET (0xffff<<16) //31:16 -#define RG_SSUSB_RX_FL_CYCLECNT (0xffff<<0) //15:0 - -//U3D_PHYD_MIX2 -#define RG_SSUSB_RX_EQ_RST (0x1<<31) //31:31 -#define RG_SSUSB_RX_EQ_RST_SEL (0x1<<30) //30:30 -#define RG_SSUSB_RXVAL_RST (0x1<<29) //29:29 -#define RG_SSUSB_RXVAL_CNT (0x1f<<24) //28:24 -#define RG_SSUSB_CDROS_EN (0x1<<18) //18:18 -#define RG_SSUSB_CDR_LCKOP (0x3<<16) //17:16 -#define RG_SSUSB_RX_FL_LOCKTH (0xf<<8) //11:8 -#define RG_SSUSB_RX_FL_OFFSET (0xff<<0) //7:0 - -//U3D_PHYD_RX0 -#define RG_SSUSB_T2RLB_BERTH (0xff<<24) //31:24 -#define RG_SSUSB_T2RLB_PAT (0xff<<16) //23:16 -#define RG_SSUSB_T2RLB_EN (0x1<<15) //15:15 -#define RG_SSUSB_T2RLB_BPSCRAMB (0x1<<14) //14:14 -#define RG_SSUSB_T2RLB_SERIAL (0x1<<13) //13:13 -#define RG_SSUSB_T2RLB_MODE (0x3<<11) //12:11 -#define RG_SSUSB_RX_SAOSC_EN (0x1<<10) //10:10 -#define RG_SSUSB_RX_SAOSC_EN_SEL (0x1<<9) //9:9 -#define RG_SSUSB_RX_DFE_OPTION (0x1<<8) //8:8 -#define RG_SSUSB_RX_DFE_EN (0x1<<7) //7:7 -#define RG_SSUSB_RX_DFE_EN_SEL (0x1<<6) //6:6 -#define RG_SSUSB_RX_EQ_EN (0x1<<5) //5:5 -#define RG_SSUSB_RX_EQ_EN_SEL (0x1<<4) //4:4 -#define RG_SSUSB_RX_SAOSC_RST (0x1<<3) //3:3 -#define RG_SSUSB_RX_SAOSC_RST_SEL (0x1<<2) //2:2 -#define RG_SSUSB_RX_DFE_RST (0x1<<1) //1:1 -#define RG_SSUSB_RX_DFE_RST_SEL (0x1<<0) //0:0 - -//U3D_PHYD_T2RLB -#define RG_SSUSB_EQTRAIN_CH_MODE (0x1<<28) //28:28 -#define RG_SSUSB_PRB_OUT_CPPAT (0x1<<27) //27:27 -#define RG_SSUSB_BPANSIENC (0x1<<26) //26:26 -#define RG_SSUSB_VALID_EN (0x1<<25) //25:25 -#define RG_SSUSB_EBUF_SRST (0x1<<24) //24:24 -#define RG_SSUSB_K_EMP (0xf<<20) //23:20 -#define RG_SSUSB_K_FUL (0xf<<16) //19:16 -#define RG_SSUSB_T2RLB_BDATRST (0xf<<12) //15:12 -#define RG_SSUSB_P_T2RLB_SKP_EN (0x1<<10) //10:10 -#define RG_SSUSB_T2RLB_PATMODE (0x3<<8) //9:8 -#define RG_SSUSB_T2RLB_TSEQCNT (0xff<<0) //7:0 - -//U3D_PHYD_CPPAT -#define RG_SSUSB_CPPAT_PROGRAM_EN (0x1<<24) //24:24 -#define RG_SSUSB_CPPAT_TOZ (0x3<<21) //22:21 -#define RG_SSUSB_CPPAT_PRBS_EN (0x1<<20) //20:20 -#define RG_SSUSB_CPPAT_OUT_TMP2 (0xf<<16) //19:16 -#define RG_SSUSB_CPPAT_OUT_TMP1 (0xff<<8) //15:8 -#define RG_SSUSB_CPPAT_OUT_TMP0 (0xff<<0) //7:0 - -//U3D_PHYD_MIX3 -#define RG_SSUSB_CDR_TCADJ_MINUS (0x1<<31) //31:31 -#define RG_SSUSB_P_CDROS_EN (0x1<<30) //30:30 -#define RG_SSUSB_P_P2_TX_DRV_DIS (0x1<<28) //28:28 -#define RG_SSUSB_CDR_TCADJ_OFFSET (0x7<<24) //26:24 -#define RG_SSUSB_PLL_TCADJ_MINUS (0x1<<23) //23:23 -#define RG_SSUSB_FORCE_PLL_BIAS_LPF_EN (0x1<<20) //20:20 -#define RG_SSUSB_PLL_BIAS_LPF_EN (0x1<<19) //19:19 -#define RG_SSUSB_PLL_TCADJ_OFFSET (0x7<<16) //18:16 -#define RG_SSUSB_FORCE_PLL_SSCEN (0x1<<15) //15:15 -#define RG_SSUSB_PLL_SSCEN (0x1<<14) //14:14 -#define RG_SSUSB_FORCE_CDR_PI_PWD (0x1<<13) //13:13 -#define RG_SSUSB_CDR_PI_PWD (0x1<<12) //12:12 -#define RG_SSUSB_CDR_PI_MODE (0x1<<11) //11:11 -#define RG_SSUSB_TXPLL_SSCEN_CYC (0x3ff<<0) //9:0 - -//U3D_PHYD_EBUFCTL -#define RG_SSUSB_EBUFCTL (0xffffffff<<0) //31:0 - -//U3D_PHYD_PIPE0 -#define RG_SSUSB_RXTERMINATION (0x1<<30) //30:30 -#define RG_SSUSB_RXEQTRAINING (0x1<<29) //29:29 -#define RG_SSUSB_RXPOLARITY (0x1<<28) //28:28 -#define RG_SSUSB_TXDEEMPH (0x3<<26) //27:26 -#define RG_SSUSB_POWERDOWN (0x3<<24) //25:24 -#define RG_SSUSB_TXONESZEROS (0x1<<23) //23:23 -#define RG_SSUSB_TXELECIDLE (0x1<<22) //22:22 -#define RG_SSUSB_TXDETECTRX (0x1<<21) //21:21 -#define RG_SSUSB_PIPE_SEL (0x1<<20) //20:20 -#define RG_SSUSB_TXDATAK (0xf<<16) //19:16 -#define RG_SSUSB_CDR_STABLE_SEL (0x1<<15) //15:15 -#define RG_SSUSB_CDR_STABLE (0x1<<14) //14:14 -#define RG_SSUSB_CDR_RSTB_SEL (0x1<<13) //13:13 -#define RG_SSUSB_CDR_RSTB (0x1<<12) //12:12 -#define RG_SSUSB_P_ERROR_SEL (0x3<<4) //5:4 -#define RG_SSUSB_TXMARGIN (0x7<<1) //3:1 -#define RG_SSUSB_TXCOMPLIANCE (0x1<<0) //0:0 - -//U3D_PHYD_PIPE1 -#define RG_SSUSB_TXDATA (0xffffffff<<0) //31:0 - -//U3D_PHYD_MIX4 -#define RG_SSUSB_CDROS_CNT (0x3f<<24) //29:24 -#define RG_SSUSB_T2RLB_BER_EN (0x1<<16) //16:16 -#define RG_SSUSB_T2RLB_BER_RATE (0xffff<<0) //15:0 - -//U3D_PHYD_CKGEN0 -#define RG_SSUSB_RFIFO_IMPLAT (0x1<<27) //27:27 -#define RG_SSUSB_TFIFO_PSEL (0x7<<24) //26:24 -#define RG_SSUSB_CKGEN_PSEL (0x3<<8) //9:8 -#define RG_SSUSB_RXCK_INV (0x1<<0) //0:0 - -//U3D_PHYD_MIX5 -#define RG_SSUSB_PRB_SEL (0xffff<<16) //31:16 -#define RG_SSUSB_RXPLL_STBCYC (0x7ff<<0) //10:0 - -//U3D_PHYD_RESERVED -#define RG_SSUSB_PHYD_RESERVE (0xffffffff<<0) //31:0 -//#define RG_SSUSB_RX_SIGDET_SEL (0x1<<11) -//#define RG_SSUSB_RX_SIGDET_EN (0x1<<12) -//#define RG_SSUSB_RX_PI_CAL_MANUAL_SEL (0x1<<9) -//#define RG_SSUSB_RX_PI_CAL_MANUAL_EN (0x1<<10) - -//U3D_PHYD_CDR0 -#define RG_SSUSB_CDR_BIC_LTR (0xf<<28) //31:28 -#define RG_SSUSB_CDR_BIC_LTD0 (0xf<<24) //27:24 -#define RG_SSUSB_CDR_BC_LTD1 (0x1f<<16) //20:16 -#define RG_SSUSB_CDR_BC_LTR (0x1f<<8) //12:8 -#define RG_SSUSB_CDR_BC_LTD0 (0x1f<<0) //4:0 - -//U3D_PHYD_CDR1 -#define RG_SSUSB_CDR_BIR_LTD1 (0x1f<<24) //28:24 -#define RG_SSUSB_CDR_BIR_LTR (0x1f<<16) //20:16 -#define RG_SSUSB_CDR_BIR_LTD0 (0x1f<<8) //12:8 -#define RG_SSUSB_CDR_BW_SEL (0x3<<6) //7:6 -#define RG_SSUSB_CDR_BIC_LTD1 (0xf<<0) //3:0 - -//U3D_PHYD_PLL_0 -#define RG_SSUSB_FORCE_CDR_BAND_5G (0x1<<28) //28:28 -#define RG_SSUSB_FORCE_CDR_BAND_2P5G (0x1<<27) //27:27 -#define RG_SSUSB_FORCE_PLL_BAND_5G (0x1<<26) //26:26 -#define RG_SSUSB_FORCE_PLL_BAND_2P5G (0x1<<25) //25:25 -#define RG_SSUSB_P_EQ_T_SEL (0x3ff<<15) //24:15 -#define RG_SSUSB_PLL_ISO_EN_CYC (0x3ff<<5) //14:5 -#define RG_SSUSB_PLLBAND_RECAL (0x1<<4) //4:4 -#define RG_SSUSB_PLL_DDS_ISO_EN (0x1<<3) //3:3 -#define RG_SSUSB_FORCE_PLL_DDS_ISO_EN (0x1<<2) //2:2 -#define RG_SSUSB_PLL_DDS_PWR_ON (0x1<<1) //1:1 -#define RG_SSUSB_FORCE_PLL_DDS_PWR_ON (0x1<<0) //0:0 - -//U3D_PHYD_PLL_1 -#define RG_SSUSB_CDR_BAND_5G (0xff<<24) //31:24 -#define RG_SSUSB_CDR_BAND_2P5G (0xff<<16) //23:16 -#define RG_SSUSB_PLL_BAND_5G (0xff<<8) //15:8 -#define RG_SSUSB_PLL_BAND_2P5G (0xff<<0) //7:0 - -//U3D_PHYD_BCN_DET_1 -#define RG_SSUSB_P_BCN_OBS_PRD (0xffff<<16) //31:16 -#define RG_SSUSB_U_BCN_OBS_PRD (0xffff<<0) //15:0 - -//U3D_PHYD_BCN_DET_2 -#define RG_SSUSB_P_BCN_OBS_SEL (0xfff<<16) //27:16 -#define RG_SSUSB_BCN_DET_DIS (0x1<<12) //12:12 -#define RG_SSUSB_U_BCN_OBS_SEL (0xfff<<0) //11:0 - -//U3D_EQ0 -#define RG_SSUSB_EQ_DLHL_LFI (0x7f<<24) //30:24 -#define RG_SSUSB_EQ_DHHL_LFI (0x7f<<16) //22:16 -#define RG_SSUSB_EQ_DD0HOS_LFI (0x7f<<8) //14:8 -#define RG_SSUSB_EQ_DD0LOS_LFI (0x7f<<0) //6:0 - -//U3D_EQ1 -#define RG_SSUSB_EQ_DD1HOS_LFI (0x7f<<24) //30:24 -#define RG_SSUSB_EQ_DD1LOS_LFI (0x7f<<16) //22:16 -#define RG_SSUSB_EQ_DE0OS_LFI (0x7f<<8) //14:8 -#define RG_SSUSB_EQ_DE1OS_LFI (0x7f<<0) //6:0 - -//U3D_EQ2 -#define RG_SSUSB_EQ_DLHLOS_LFI (0x7f<<24) //30:24 -#define RG_SSUSB_EQ_DHHLOS_LFI (0x7f<<16) //22:16 -#define RG_SSUSB_EQ_STOPTIME (0x1<<14) //14:14 -#define RG_SSUSB_EQ_DHHL_LF_SEL (0x7<<11) //13:11 -#define RG_SSUSB_EQ_DSAOS_LF_SEL (0x7<<8) //10:8 -#define RG_SSUSB_EQ_STARTTIME (0x3<<6) //7:6 -#define RG_SSUSB_EQ_DLEQ_LF_SEL (0x7<<3) //5:3 -#define RG_SSUSB_EQ_DLHL_LF_SEL (0x7<<0) //2:0 - -//U3D_EQ3 -#define RG_SSUSB_EQ_DLEQ_LFI_GEN2 (0xf<<28) //31:28 -#define RG_SSUSB_EQ_DLEQ_LFI_GEN1 (0xf<<24) //27:24 -#define RG_SSUSB_EQ_DEYE0OS_LFI (0x7f<<16) //22:16 -#define RG_SSUSB_EQ_DEYE1OS_LFI (0x7f<<8) //14:8 -#define RG_SSUSB_EQ_TRI_DET_EN (0x1<<7) //7:7 -#define RG_SSUSB_EQ_TRI_DET_TH (0x7f<<0) //6:0 - -//U3D_EQ_EYE0 -#define RG_SSUSB_EQ_EYE_XOFFSET (0x7f<<25) //31:25 -#define RG_SSUSB_EQ_EYE_MON_EN (0x1<<24) //24:24 -#define RG_SSUSB_EQ_EYE0_Y (0x7f<<16) //22:16 -#define RG_SSUSB_EQ_EYE1_Y (0x7f<<8) //14:8 -#define RG_SSUSB_EQ_PILPO_ROUT (0x1<<7) //7:7 -#define RG_SSUSB_EQ_PI_KPGAIN (0x7<<4) //6:4 -#define RG_SSUSB_EQ_EYE_CNT_EN (0x1<<3) //3:3 - -//U3D_EQ_EYE1 -#define RG_SSUSB_EQ_SIGDET (0x7f<<24) //30:24 -#define RG_SSUSB_EQ_EYE_MASK (0x3ff<<7) //16:7 - -//U3D_EQ_EYE2 -#define RG_SSUSB_EQ_RX500M_CK_SEL (0x1<<31) //31:31 -#define RG_SSUSB_EQ_SD_CNT1 (0x3f<<24) //29:24 -#define RG_SSUSB_EQ_ISIFLAG_SEL (0x3<<22) //23:22 -#define RG_SSUSB_EQ_SD_CNT0 (0x3f<<16) //21:16 - -//U3D_EQ_DFE0 -#define RG_SSUSB_EQ_LEQMAX (0xf<<28) //31:28 -#define RG_SSUSB_EQ_DFEX_EN (0x1<<27) //27:27 -#define RG_SSUSB_EQ_DFEX_LF_SEL (0x7<<24) //26:24 -#define RG_SSUSB_EQ_CHK_EYE_H (0x1<<23) //23:23 -#define RG_SSUSB_EQ_PIEYE_INI (0x7f<<16) //22:16 -#define RG_SSUSB_EQ_PI90_INI (0x7f<<8) //14:8 -#define RG_SSUSB_EQ_PI0_INI (0x7f<<0) //6:0 - -//U3D_EQ_DFE1 -#define RG_SSUSB_EQ_REV (0xffff<<16) //31:16 -#define RG_SSUSB_EQ_DFEYEN_DUR (0x7<<12) //14:12 -#define RG_SSUSB_EQ_DFEXEN_DUR (0x7<<8) //10:8 -#define RG_SSUSB_EQ_DFEX_RST (0x1<<7) //7:7 -#define RG_SSUSB_EQ_GATED_RXD_B (0x1<<6) //6:6 -#define RG_SSUSB_EQ_PI90CK_SEL (0x3<<4) //5:4 -#define RG_SSUSB_EQ_DFEX_DIS (0x1<<2) //2:2 -#define RG_SSUSB_EQ_DFEYEN_STOP_DIS (0x1<<1) //1:1 -#define RG_SSUSB_EQ_DFEXEN_SEL (0x1<<0) //0:0 - -//U3D_EQ_DFE2 -#define RG_SSUSB_EQ_MON_SEL (0x1f<<24) //28:24 -#define RG_SSUSB_EQ_LEQOSC_DLYCNT (0x7<<16) //18:16 -#define RG_SSUSB_EQ_DLEQOS_LFI (0x1f<<8) //12:8 -#define RG_SSUSB_EQ_LEQ_STOP_TO (0x3<<0) //1:0 - -//U3D_EQ_DFE3 -#define RG_SSUSB_EQ_RESERVED (0xffffffff<<0) //31:0 - -//U3D_PHYD_MON0 -#define RGS_SSUSB_BERT_BERC (0xffff<<16) //31:16 -#define RGS_SSUSB_LFPS (0xf<<12) //15:12 -#define RGS_SSUSB_TRAINDEC (0x7<<8) //10:8 -#define RGS_SSUSB_SCP_PAT (0xff<<0) //7:0 - -//U3D_PHYD_MON1 -#define RGS_SSUSB_RX_FL_OUT (0xffff<<0) //15:0 - -//U3D_PHYD_MON2 -#define RGS_SSUSB_T2RLB_ERRCNT (0xffff<<16) //31:16 -#define RGS_SSUSB_RETRACK (0xf<<12) //15:12 -#define RGS_SSUSB_RXPLL_LOCK (0x1<<10) //10:10 -#define RGS_SSUSB_CDR_VCOCAL_CPLT_D (0x1<<9) //9:9 -#define RGS_SSUSB_PLL_VCOCAL_CPLT_D (0x1<<8) //8:8 -#define RGS_SSUSB_PDNCTL (0xff<<0) //7:0 - -//U3D_PHYD_MON3 -#define RGS_SSUSB_TSEQ_ERRCNT (0xffff<<16) //31:16 -#define RGS_SSUSB_PRBS_ERRCNT (0xffff<<0) //15:0 - -//U3D_PHYD_MON4 -#define RGS_SSUSB_RX_LSLOCK_CNT (0xf<<24) //27:24 -#define RGS_SSUSB_SCP_DETCNT (0xff<<16) //23:16 -#define RGS_SSUSB_TSEQ_DETCNT (0xffff<<0) //15:0 - -//U3D_PHYD_MON5 -#define RGS_SSUSB_EBUFMSG (0xffff<<16) //31:16 -#define RGS_SSUSB_BERT_LOCK (0x1<<15) //15:15 -#define RGS_SSUSB_SCP_DET (0x1<<14) //14:14 -#define RGS_SSUSB_TSEQ_DET (0x1<<13) //13:13 -#define RGS_SSUSB_EBUF_UDF (0x1<<12) //12:12 -#define RGS_SSUSB_EBUF_OVF (0x1<<11) //11:11 -#define RGS_SSUSB_PRBS_PASSTH (0x1<<10) //10:10 -#define RGS_SSUSB_PRBS_PASS (0x1<<9) //9:9 -#define RGS_SSUSB_PRBS_LOCK (0x1<<8) //8:8 -#define RGS_SSUSB_T2RLB_ERR (0x1<<6) //6:6 -#define RGS_SSUSB_T2RLB_PASSTH (0x1<<5) //5:5 -#define RGS_SSUSB_T2RLB_PASS (0x1<<4) //4:4 -#define RGS_SSUSB_T2RLB_LOCK (0x1<<3) //3:3 -#define RGS_SSUSB_RX_IMPCAL_DONE (0x1<<2) //2:2 -#define RGS_SSUSB_TX_IMPCAL_DONE (0x1<<1) //1:1 -#define RGS_SSUSB_RXDETECTED (0x1<<0) //0:0 - -//U3D_PHYD_MON6 -#define RGS_SSUSB_SIGCAL_DONE (0x1<<30) //30:30 -#define RGS_SSUSB_SIGCAL_CAL_OUT (0x1<<29) //29:29 -#define RGS_SSUSB_SIGCAL_OFFSET (0x1f<<24) //28:24 -#define RGS_SSUSB_RX_IMP_SEL (0x1f<<16) //20:16 -#define RGS_SSUSB_TX_IMP_SEL (0x1f<<8) //12:8 -#define RGS_SSUSB_TFIFO_MSG (0xf<<4) //7:4 -#define RGS_SSUSB_RFIFO_MSG (0xf<<0) //3:0 - -//U3D_PHYD_MON7 -#define RGS_SSUSB_FT_OUT (0xff<<8) //15:8 -#define RGS_SSUSB_PRB_OUT (0xff<<0) //7:0 - -//U3D_PHYA_RX_MON0 -#define RGS_SSUSB_EQ_DCLEQ (0xf<<24) //27:24 -#define RGS_SSUSB_EQ_DCD0H (0x7f<<16) //22:16 -#define RGS_SSUSB_EQ_DCD0L (0x7f<<8) //14:8 -#define RGS_SSUSB_EQ_DCD1H (0x7f<<0) //6:0 - -//U3D_PHYA_RX_MON1 -#define RGS_SSUSB_EQ_DCD1L (0x7f<<24) //30:24 -#define RGS_SSUSB_EQ_DCE0 (0x7f<<16) //22:16 -#define RGS_SSUSB_EQ_DCE1 (0x7f<<8) //14:8 -#define RGS_SSUSB_EQ_DCHHL (0x7f<<0) //6:0 - -//U3D_PHYA_RX_MON2 -#define RGS_SSUSB_EQ_LEQ_STOP (0x1<<31) //31:31 -#define RGS_SSUSB_EQ_DCLHL (0x7f<<24) //30:24 -#define RGS_SSUSB_EQ_STATUS (0xff<<16) //23:16 -#define RGS_SSUSB_EQ_DCEYE0 (0x7f<<8) //14:8 -#define RGS_SSUSB_EQ_DCEYE1 (0x7f<<0) //6:0 - -//U3D_PHYA_RX_MON3 -#define RGS_SSUSB_EQ_EYE_MONITOR_ERRCNT_0 (0xfffff<<0) //19:0 - -//U3D_PHYA_RX_MON4 -#define RGS_SSUSB_EQ_EYE_MONITOR_ERRCNT_1 (0xfffff<<0) //19:0 - -//U3D_PHYA_RX_MON5 -#define RGS_SSUSB_EQ_DCLEQOS (0x1f<<8) //12:8 -#define RGS_SSUSB_EQ_EYE_CNT_RDY (0x1<<7) //7:7 -#define RGS_SSUSB_EQ_PILPO (0x7f<<0) //6:0 - -//U3D_PHYD_CPPAT2 -#define RG_SSUSB_CPPAT_OUT_H_TMP2 (0xf<<16) //19:16 -#define RG_SSUSB_CPPAT_OUT_H_TMP1 (0xff<<8) //15:8 -#define RG_SSUSB_CPPAT_OUT_H_TMP0 (0xff<<0) //7:0 - -//U3D_EQ_EYE3 -#define RG_SSUSB_EQ_LEQ_SHIFT (0x7<<24) //26:24 -#define RG_SSUSB_EQ_EYE_CNT (0xfffff<<0) //19:0 - -//U3D_KBAND_OUT -#define RGS_SSUSB_CDR_BAND_5G (0xff<<24) //31:24 -#define RGS_SSUSB_CDR_BAND_2P5G (0xff<<16) //23:16 -#define RGS_SSUSB_PLL_BAND_5G (0xff<<8) //15:8 -#define RGS_SSUSB_PLL_BAND_2P5G (0xff<<0) //7:0 - -//U3D_KBAND_OUT1 -#define RGS_SSUSB_CDR_VCOCAL_FAIL (0x1<<24) //24:24 -#define RGS_SSUSB_CDR_VCOCAL_STATE (0xff<<16) //23:16 -#define RGS_SSUSB_PLL_VCOCAL_FAIL (0x1<<8) //8:8 -#define RGS_SSUSB_PLL_VCOCAL_STATE (0xff<<0) //7:0 - - -/* OFFSET */ - -//U3D_PHYD_MIX0 -#define RG_SSUSB_P_P3_TX_NG_OFST (31) -#define RG_SSUSB_TSEQ_EN_OFST (30) -#define RG_SSUSB_TSEQ_POLEN_OFST (29) -#define RG_SSUSB_TSEQ_POL_OFST (28) -#define RG_SSUSB_P_P3_PCLK_NG_OFST (27) -#define RG_SSUSB_TSEQ_TH_OFST (24) -#define RG_SSUSB_PRBS_BERTH_OFST (16) -#define RG_SSUSB_DISABLE_PHY_U2_ON_OFST (15) -#define RG_SSUSB_DISABLE_PHY_U2_OFF_OFST (14) -#define RG_SSUSB_PRBS_EN_OFST (13) -#define RG_SSUSB_BPSLOCK_OFST (12) -#define RG_SSUSB_RTCOMCNT_OFST (8) -#define RG_SSUSB_COMCNT_OFST (4) -#define RG_SSUSB_PRBSEL_CALIB_OFST (0) - -//U3D_PHYD_MIX1 -#define RG_SSUSB_SLEEP_EN_OFST (31) -#define RG_SSUSB_PRBSEL_PCS_OFST (28) -#define RG_SSUSB_TXLFPS_PRD_OFST (24) -#define RG_SSUSB_P_RX_P0S_CK_OFST (23) -#define RG_SSUSB_P_TX_P0S_CK_OFST (22) -#define RG_SSUSB_PDNCTL_OFST (16) -#define RG_SSUSB_TX_DRV_EN_OFST (15) -#define RG_SSUSB_TX_DRV_SEL_OFST (14) -#define RG_SSUSB_TX_DRV_DLY_OFST (8) -#define RG_SSUSB_BERT_EN_OFST (7) -#define RG_SSUSB_SCP_TH_OFST (4) -#define RG_SSUSB_SCP_EN_OFST (3) -#define RG_SSUSB_RXANSIDEC_TEST_OFST (0) - -//U3D_PHYD_LFPS0 -#define RG_SSUSB_LFPS_PWD_OFST (30) -#define RG_SSUSB_FORCE_LFPS_PWD_OFST (29) -#define RG_SSUSB_RXLFPS_OVF_OFST (24) -#define RG_SSUSB_P3_ENTRY_SEL_OFST (23) -#define RG_SSUSB_P3_ENTRY_OFST (22) -#define RG_SSUSB_RXLFPS_CDRSEL_OFST (20) -#define RG_SSUSB_RXLFPS_CDRTH_OFST (16) -#define RG_SSUSB_LOCK5G_BLOCK_OFST (15) -#define RG_SSUSB_TFIFO_EXT_D_SEL_OFST (14) -#define RG_SSUSB_TFIFO_NO_EXTEND_OFST (13) -#define RG_SSUSB_RXLFPS_LOB_OFST (8) -#define RG_SSUSB_TXLFPS_EN_OFST (7) -#define RG_SSUSB_TXLFPS_SEL_OFST (6) -#define RG_SSUSB_RXLFPS_CDRLOCK_OFST (5) -#define RG_SSUSB_RXLFPS_UPB_OFST (0) - -//U3D_PHYD_LFPS1 -#define RG_SSUSB_RX_IMP_BIAS_OFST (28) -#define RG_SSUSB_TX_IMP_BIAS_OFST (24) -#define RG_SSUSB_FWAKE_TH_OFST (16) -#define RG_SSUSB_RXLFPS_UDF_OFST (8) -#define RG_SSUSB_RXLFPS_P0IDLETH_OFST (0) - -//U3D_PHYD_IMPCAL0 -#define RG_SSUSB_FORCE_TX_IMPSEL_OFST (31) -#define RG_SSUSB_TX_IMPCAL_EN_OFST (30) -#define RG_SSUSB_FORCE_TX_IMPCAL_EN_OFST (29) -#define RG_SSUSB_TX_IMPSEL_OFST (24) -#define RG_SSUSB_TX_IMPCAL_CALCYC_OFST (16) -#define RG_SSUSB_TX_IMPCAL_STBCYC_OFST (10) -#define RG_SSUSB_TX_IMPCAL_CYCCNT_OFST (0) - -//U3D_PHYD_IMPCAL1 -#define RG_SSUSB_FORCE_RX_IMPSEL_OFST (31) -#define RG_SSUSB_RX_IMPCAL_EN_OFST (30) -#define RG_SSUSB_FORCE_RX_IMPCAL_EN_OFST (29) -#define RG_SSUSB_RX_IMPSEL_OFST (24) -#define RG_SSUSB_RX_IMPCAL_CALCYC_OFST (16) -#define RG_SSUSB_RX_IMPCAL_STBCYC_OFST (10) -#define RG_SSUSB_RX_IMPCAL_CYCCNT_OFST (0) - -//U3D_PHYD_TXPLL0 -#define RG_SSUSB_TXPLL_DDSEN_CYC_OFST (27) -#define RG_SSUSB_TXPLL_ON_OFST (26) -#define RG_SSUSB_FORCE_TXPLLON_OFST (25) -#define RG_SSUSB_TXPLL_STBCYC_OFST (16) -#define RG_SSUSB_TXPLL_NCPOCHG_CYC_OFST (12) -#define RG_SSUSB_TXPLL_NCPOEN_CYC_OFST (10) -#define RG_SSUSB_TXPLL_DDSRSTB_CYC_OFST (0) - -//U3D_PHYD_TXPLL1 -#define RG_SSUSB_PLL_NCPO_EN_OFST (31) -#define RG_SSUSB_PLL_FIFO_START_MAN_OFST (30) -#define RG_SSUSB_PLL_NCPO_CHG_OFST (28) -#define RG_SSUSB_PLL_DDS_RSTB_OFST (27) -#define RG_SSUSB_PLL_DDS_PWDB_OFST (26) -#define RG_SSUSB_PLL_DDSEN_OFST (25) -#define RG_SSUSB_PLL_AUTOK_VCO_OFST (24) -#define RG_SSUSB_PLL_PWD_OFST (23) -#define RG_SSUSB_RX_AFE_PWD_OFST (22) -#define RG_SSUSB_PLL_TCADJ_OFST (16) -#define RG_SSUSB_FORCE_CDR_TCADJ_OFST (15) -#define RG_SSUSB_FORCE_CDR_AUTOK_VCO_OFST (14) -#define RG_SSUSB_FORCE_CDR_PWD_OFST (13) -#define RG_SSUSB_FORCE_PLL_NCPO_EN_OFST (12) -#define RG_SSUSB_FORCE_PLL_FIFO_START_MAN_OFST (11) -#define RG_SSUSB_FORCE_PLL_NCPO_CHG_OFST (9) -#define RG_SSUSB_FORCE_PLL_DDS_RSTB_OFST (8) -#define RG_SSUSB_FORCE_PLL_DDS_PWDB_OFST (7) -#define RG_SSUSB_FORCE_PLL_DDSEN_OFST (6) -#define RG_SSUSB_FORCE_PLL_TCADJ_OFST (5) -#define RG_SSUSB_FORCE_PLL_AUTOK_VCO_OFST (4) -#define RG_SSUSB_FORCE_PLL_PWD_OFST (3) -#define RG_SSUSB_FLT_1_DISPERR_B_OFST (2) - -//U3D_PHYD_TXPLL2 -#define RG_SSUSB_TX_LFPS_EN_OFST (31) -#define RG_SSUSB_FORCE_TX_LFPS_EN_OFST (30) -#define RG_SSUSB_TX_LFPS_OFST (29) -#define RG_SSUSB_FORCE_TX_LFPS_OFST (28) -#define RG_SSUSB_RXPLL_STB_OFST (27) -#define RG_SSUSB_TXPLL_STB_OFST (26) -#define RG_SSUSB_FORCE_RXPLL_STB_OFST (25) -#define RG_SSUSB_FORCE_TXPLL_STB_OFST (24) -#define RG_SSUSB_RXPLL_REFCKSEL_OFST (16) -#define RG_SSUSB_RXPLL_STBMODE_OFST (11) -#define RG_SSUSB_RXPLL_ON_OFST (10) -#define RG_SSUSB_FORCE_RXPLLON_OFST (9) -#define RG_SSUSB_FORCE_RX_AFE_PWD_OFST (8) -#define RG_SSUSB_CDR_AUTOK_VCO_OFST (7) -#define RG_SSUSB_CDR_PWD_OFST (6) -#define RG_SSUSB_CDR_TCADJ_OFST (0) - -//U3D_PHYD_FL0 -#define RG_SSUSB_RX_FL_TARGET_OFST (16) -#define RG_SSUSB_RX_FL_CYCLECNT_OFST (0) - -//U3D_PHYD_MIX2 -#define RG_SSUSB_RX_EQ_RST_OFST (31) -#define RG_SSUSB_RX_EQ_RST_SEL_OFST (30) -#define RG_SSUSB_RXVAL_RST_OFST (29) -#define RG_SSUSB_RXVAL_CNT_OFST (24) -#define RG_SSUSB_CDROS_EN_OFST (18) -#define RG_SSUSB_CDR_LCKOP_OFST (16) -#define RG_SSUSB_RX_FL_LOCKTH_OFST (8) -#define RG_SSUSB_RX_FL_OFFSET_OFST (0) - -//U3D_PHYD_RX0 -#define RG_SSUSB_T2RLB_BERTH_OFST (24) -#define RG_SSUSB_T2RLB_PAT_OFST (16) -#define RG_SSUSB_T2RLB_EN_OFST (15) -#define RG_SSUSB_T2RLB_BPSCRAMB_OFST (14) -#define RG_SSUSB_T2RLB_SERIAL_OFST (13) -#define RG_SSUSB_T2RLB_MODE_OFST (11) -#define RG_SSUSB_RX_SAOSC_EN_OFST (10) -#define RG_SSUSB_RX_SAOSC_EN_SEL_OFST (9) -#define RG_SSUSB_RX_DFE_OPTION_OFST (8) -#define RG_SSUSB_RX_DFE_EN_OFST (7) -#define RG_SSUSB_RX_DFE_EN_SEL_OFST (6) -#define RG_SSUSB_RX_EQ_EN_OFST (5) -#define RG_SSUSB_RX_EQ_EN_SEL_OFST (4) -#define RG_SSUSB_RX_SAOSC_RST_OFST (3) -#define RG_SSUSB_RX_SAOSC_RST_SEL_OFST (2) -#define RG_SSUSB_RX_DFE_RST_OFST (1) -#define RG_SSUSB_RX_DFE_RST_SEL_OFST (0) - -//U3D_PHYD_T2RLB -#define RG_SSUSB_EQTRAIN_CH_MODE_OFST (28) -#define RG_SSUSB_PRB_OUT_CPPAT_OFST (27) -#define RG_SSUSB_BPANSIENC_OFST (26) -#define RG_SSUSB_VALID_EN_OFST (25) -#define RG_SSUSB_EBUF_SRST_OFST (24) -#define RG_SSUSB_K_EMP_OFST (20) -#define RG_SSUSB_K_FUL_OFST (16) -#define RG_SSUSB_T2RLB_BDATRST_OFST (12) -#define RG_SSUSB_P_T2RLB_SKP_EN_OFST (10) -#define RG_SSUSB_T2RLB_PATMODE_OFST (8) -#define RG_SSUSB_T2RLB_TSEQCNT_OFST (0) - -//U3D_PHYD_CPPAT -#define RG_SSUSB_CPPAT_PROGRAM_EN_OFST (24) -#define RG_SSUSB_CPPAT_TOZ_OFST (21) -#define RG_SSUSB_CPPAT_PRBS_EN_OFST (20) -#define RG_SSUSB_CPPAT_OUT_TMP2_OFST (16) -#define RG_SSUSB_CPPAT_OUT_TMP1_OFST (8) -#define RG_SSUSB_CPPAT_OUT_TMP0_OFST (0) - -//U3D_PHYD_MIX3 -#define RG_SSUSB_CDR_TCADJ_MINUS_OFST (31) -#define RG_SSUSB_P_CDROS_EN_OFST (30) -#define RG_SSUSB_P_P2_TX_DRV_DIS_OFST (28) -#define RG_SSUSB_CDR_TCADJ_OFFSET_OFST (24) -#define RG_SSUSB_PLL_TCADJ_MINUS_OFST (23) -#define RG_SSUSB_FORCE_PLL_BIAS_LPF_EN_OFST (20) -#define RG_SSUSB_PLL_BIAS_LPF_EN_OFST (19) -#define RG_SSUSB_PLL_TCADJ_OFFSET_OFST (16) -#define RG_SSUSB_FORCE_PLL_SSCEN_OFST (15) -#define RG_SSUSB_PLL_SSCEN_OFST (14) -#define RG_SSUSB_FORCE_CDR_PI_PWD_OFST (13) -#define RG_SSUSB_CDR_PI_PWD_OFST (12) -#define RG_SSUSB_CDR_PI_MODE_OFST (11) -#define RG_SSUSB_TXPLL_SSCEN_CYC_OFST (0) - -//U3D_PHYD_EBUFCTL -#define RG_SSUSB_EBUFCTL_OFST (0) - -//U3D_PHYD_PIPE0 -#define RG_SSUSB_RXTERMINATION_OFST (30) -#define RG_SSUSB_RXEQTRAINING_OFST (29) -#define RG_SSUSB_RXPOLARITY_OFST (28) -#define RG_SSUSB_TXDEEMPH_OFST (26) -#define RG_SSUSB_POWERDOWN_OFST (24) -#define RG_SSUSB_TXONESZEROS_OFST (23) -#define RG_SSUSB_TXELECIDLE_OFST (22) -#define RG_SSUSB_TXDETECTRX_OFST (21) -#define RG_SSUSB_PIPE_SEL_OFST (20) -#define RG_SSUSB_TXDATAK_OFST (16) -#define RG_SSUSB_CDR_STABLE_SEL_OFST (15) -#define RG_SSUSB_CDR_STABLE_OFST (14) -#define RG_SSUSB_CDR_RSTB_SEL_OFST (13) -#define RG_SSUSB_CDR_RSTB_OFST (12) -#define RG_SSUSB_P_ERROR_SEL_OFST (4) -#define RG_SSUSB_TXMARGIN_OFST (1) -#define RG_SSUSB_TXCOMPLIANCE_OFST (0) - -//U3D_PHYD_PIPE1 -#define RG_SSUSB_TXDATA_OFST (0) - -//U3D_PHYD_MIX4 -#define RG_SSUSB_CDROS_CNT_OFST (24) -#define RG_SSUSB_T2RLB_BER_EN_OFST (16) -#define RG_SSUSB_T2RLB_BER_RATE_OFST (0) - -//U3D_PHYD_CKGEN0 -#define RG_SSUSB_RFIFO_IMPLAT_OFST (27) -#define RG_SSUSB_TFIFO_PSEL_OFST (24) -#define RG_SSUSB_CKGEN_PSEL_OFST (8) -#define RG_SSUSB_RXCK_INV_OFST (0) - -//U3D_PHYD_MIX5 -#define RG_SSUSB_PRB_SEL_OFST (16) -#define RG_SSUSB_RXPLL_STBCYC_OFST (0) - -//U3D_PHYD_RESERVED -#define RG_SSUSB_PHYD_RESERVE_OFST (0) -//#define RG_SSUSB_RX_SIGDET_SEL_OFST (11) -//#define RG_SSUSB_RX_SIGDET_EN_OFST (12) -//#define RG_SSUSB_RX_PI_CAL_MANUAL_SEL_OFST (9) -//#define RG_SSUSB_RX_PI_CAL_MANUAL_EN_OFST (10) - -//U3D_PHYD_CDR0 -#define RG_SSUSB_CDR_BIC_LTR_OFST (28) -#define RG_SSUSB_CDR_BIC_LTD0_OFST (24) -#define RG_SSUSB_CDR_BC_LTD1_OFST (16) -#define RG_SSUSB_CDR_BC_LTR_OFST (8) -#define RG_SSUSB_CDR_BC_LTD0_OFST (0) - -//U3D_PHYD_CDR1 -#define RG_SSUSB_CDR_BIR_LTD1_OFST (24) -#define RG_SSUSB_CDR_BIR_LTR_OFST (16) -#define RG_SSUSB_CDR_BIR_LTD0_OFST (8) -#define RG_SSUSB_CDR_BW_SEL_OFST (6) -#define RG_SSUSB_CDR_BIC_LTD1_OFST (0) - -//U3D_PHYD_PLL_0 -#define RG_SSUSB_FORCE_CDR_BAND_5G_OFST (28) -#define RG_SSUSB_FORCE_CDR_BAND_2P5G_OFST (27) -#define RG_SSUSB_FORCE_PLL_BAND_5G_OFST (26) -#define RG_SSUSB_FORCE_PLL_BAND_2P5G_OFST (25) -#define RG_SSUSB_P_EQ_T_SEL_OFST (15) -#define RG_SSUSB_PLL_ISO_EN_CYC_OFST (5) -#define RG_SSUSB_PLLBAND_RECAL_OFST (4) -#define RG_SSUSB_PLL_DDS_ISO_EN_OFST (3) -#define RG_SSUSB_FORCE_PLL_DDS_ISO_EN_OFST (2) -#define RG_SSUSB_PLL_DDS_PWR_ON_OFST (1) -#define RG_SSUSB_FORCE_PLL_DDS_PWR_ON_OFST (0) - -//U3D_PHYD_PLL_1 -#define RG_SSUSB_CDR_BAND_5G_OFST (24) -#define RG_SSUSB_CDR_BAND_2P5G_OFST (16) -#define RG_SSUSB_PLL_BAND_5G_OFST (8) -#define RG_SSUSB_PLL_BAND_2P5G_OFST (0) - -//U3D_PHYD_BCN_DET_1 -#define RG_SSUSB_P_BCN_OBS_PRD_OFST (16) -#define RG_SSUSB_U_BCN_OBS_PRD_OFST (0) - -//U3D_PHYD_BCN_DET_2 -#define RG_SSUSB_P_BCN_OBS_SEL_OFST (16) -#define RG_SSUSB_BCN_DET_DIS_OFST (12) -#define RG_SSUSB_U_BCN_OBS_SEL_OFST (0) - -//U3D_EQ0 -#define RG_SSUSB_EQ_DLHL_LFI_OFST (24) -#define RG_SSUSB_EQ_DHHL_LFI_OFST (16) -#define RG_SSUSB_EQ_DD0HOS_LFI_OFST (8) -#define RG_SSUSB_EQ_DD0LOS_LFI_OFST (0) - -//U3D_EQ1 -#define RG_SSUSB_EQ_DD1HOS_LFI_OFST (24) -#define RG_SSUSB_EQ_DD1LOS_LFI_OFST (16) -#define RG_SSUSB_EQ_DE0OS_LFI_OFST (8) -#define RG_SSUSB_EQ_DE1OS_LFI_OFST (0) - -//U3D_EQ2 -#define RG_SSUSB_EQ_DLHLOS_LFI_OFST (24) -#define RG_SSUSB_EQ_DHHLOS_LFI_OFST (16) -#define RG_SSUSB_EQ_STOPTIME_OFST (14) -#define RG_SSUSB_EQ_DHHL_LF_SEL_OFST (11) -#define RG_SSUSB_EQ_DSAOS_LF_SEL_OFST (8) -#define RG_SSUSB_EQ_STARTTIME_OFST (6) -#define RG_SSUSB_EQ_DLEQ_LF_SEL_OFST (3) -#define RG_SSUSB_EQ_DLHL_LF_SEL_OFST (0) - -//U3D_EQ3 -#define RG_SSUSB_EQ_DLEQ_LFI_GEN2_OFST (28) -#define RG_SSUSB_EQ_DLEQ_LFI_GEN1_OFST (24) -#define RG_SSUSB_EQ_DEYE0OS_LFI_OFST (16) -#define RG_SSUSB_EQ_DEYE1OS_LFI_OFST (8) -#define RG_SSUSB_EQ_TRI_DET_EN_OFST (7) -#define RG_SSUSB_EQ_TRI_DET_TH_OFST (0) - -//U3D_EQ_EYE0 -#define RG_SSUSB_EQ_EYE_XOFFSET_OFST (25) -#define RG_SSUSB_EQ_EYE_MON_EN_OFST (24) -#define RG_SSUSB_EQ_EYE0_Y_OFST (16) -#define RG_SSUSB_EQ_EYE1_Y_OFST (8) -#define RG_SSUSB_EQ_PILPO_ROUT_OFST (7) -#define RG_SSUSB_EQ_PI_KPGAIN_OFST (4) -#define RG_SSUSB_EQ_EYE_CNT_EN_OFST (3) - -//U3D_EQ_EYE1 -#define RG_SSUSB_EQ_SIGDET_OFST (24) -#define RG_SSUSB_EQ_EYE_MASK_OFST (7) - -//U3D_EQ_EYE2 -#define RG_SSUSB_EQ_RX500M_CK_SEL_OFST (31) -#define RG_SSUSB_EQ_SD_CNT1_OFST (24) -#define RG_SSUSB_EQ_ISIFLAG_SEL_OFST (22) -#define RG_SSUSB_EQ_SD_CNT0_OFST (16) - -//U3D_EQ_DFE0 -#define RG_SSUSB_EQ_LEQMAX_OFST (28) -#define RG_SSUSB_EQ_DFEX_EN_OFST (27) -#define RG_SSUSB_EQ_DFEX_LF_SEL_OFST (24) -#define RG_SSUSB_EQ_CHK_EYE_H_OFST (23) -#define RG_SSUSB_EQ_PIEYE_INI_OFST (16) -#define RG_SSUSB_EQ_PI90_INI_OFST (8) -#define RG_SSUSB_EQ_PI0_INI_OFST (0) - -//U3D_EQ_DFE1 -#define RG_SSUSB_EQ_REV_OFST (16) -#define RG_SSUSB_EQ_DFEYEN_DUR_OFST (12) -#define RG_SSUSB_EQ_DFEXEN_DUR_OFST (8) -#define RG_SSUSB_EQ_DFEX_RST_OFST (7) -#define RG_SSUSB_EQ_GATED_RXD_B_OFST (6) -#define RG_SSUSB_EQ_PI90CK_SEL_OFST (4) -#define RG_SSUSB_EQ_DFEX_DIS_OFST (2) -#define RG_SSUSB_EQ_DFEYEN_STOP_DIS_OFST (1) -#define RG_SSUSB_EQ_DFEXEN_SEL_OFST (0) - -//U3D_EQ_DFE2 -#define RG_SSUSB_EQ_MON_SEL_OFST (24) -#define RG_SSUSB_EQ_LEQOSC_DLYCNT_OFST (16) -#define RG_SSUSB_EQ_DLEQOS_LFI_OFST (8) -#define RG_SSUSB_EQ_LEQ_STOP_TO_OFST (0) - -//U3D_EQ_DFE3 -#define RG_SSUSB_EQ_RESERVED_OFST (0) - -//U3D_PHYD_MON0 -#define RGS_SSUSB_BERT_BERC_OFST (16) -#define RGS_SSUSB_LFPS_OFST (12) -#define RGS_SSUSB_TRAINDEC_OFST (8) -#define RGS_SSUSB_SCP_PAT_OFST (0) - -//U3D_PHYD_MON1 -#define RGS_SSUSB_RX_FL_OUT_OFST (0) - -//U3D_PHYD_MON2 -#define RGS_SSUSB_T2RLB_ERRCNT_OFST (16) -#define RGS_SSUSB_RETRACK_OFST (12) -#define RGS_SSUSB_RXPLL_LOCK_OFST (10) -#define RGS_SSUSB_CDR_VCOCAL_CPLT_D_OFST (9) -#define RGS_SSUSB_PLL_VCOCAL_CPLT_D_OFST (8) -#define RGS_SSUSB_PDNCTL_OFST (0) - -//U3D_PHYD_MON3 -#define RGS_SSUSB_TSEQ_ERRCNT_OFST (16) -#define RGS_SSUSB_PRBS_ERRCNT_OFST (0) - -//U3D_PHYD_MON4 -#define RGS_SSUSB_RX_LSLOCK_CNT_OFST (24) -#define RGS_SSUSB_SCP_DETCNT_OFST (16) -#define RGS_SSUSB_TSEQ_DETCNT_OFST (0) - -//U3D_PHYD_MON5 -#define RGS_SSUSB_EBUFMSG_OFST (16) -#define RGS_SSUSB_BERT_LOCK_OFST (15) -#define RGS_SSUSB_SCP_DET_OFST (14) -#define RGS_SSUSB_TSEQ_DET_OFST (13) -#define RGS_SSUSB_EBUF_UDF_OFST (12) -#define RGS_SSUSB_EBUF_OVF_OFST (11) -#define RGS_SSUSB_PRBS_PASSTH_OFST (10) -#define RGS_SSUSB_PRBS_PASS_OFST (9) -#define RGS_SSUSB_PRBS_LOCK_OFST (8) -#define RGS_SSUSB_T2RLB_ERR_OFST (6) -#define RGS_SSUSB_T2RLB_PASSTH_OFST (5) -#define RGS_SSUSB_T2RLB_PASS_OFST (4) -#define RGS_SSUSB_T2RLB_LOCK_OFST (3) -#define RGS_SSUSB_RX_IMPCAL_DONE_OFST (2) -#define RGS_SSUSB_TX_IMPCAL_DONE_OFST (1) -#define RGS_SSUSB_RXDETECTED_OFST (0) - -//U3D_PHYD_MON6 -#define RGS_SSUSB_SIGCAL_DONE_OFST (30) -#define RGS_SSUSB_SIGCAL_CAL_OUT_OFST (29) -#define RGS_SSUSB_SIGCAL_OFFSET_OFST (24) -#define RGS_SSUSB_RX_IMP_SEL_OFST (16) -#define RGS_SSUSB_TX_IMP_SEL_OFST (8) -#define RGS_SSUSB_TFIFO_MSG_OFST (4) -#define RGS_SSUSB_RFIFO_MSG_OFST (0) - -//U3D_PHYD_MON7 -#define RGS_SSUSB_FT_OUT_OFST (8) -#define RGS_SSUSB_PRB_OUT_OFST (0) - -//U3D_PHYA_RX_MON0 -#define RGS_SSUSB_EQ_DCLEQ_OFST (24) -#define RGS_SSUSB_EQ_DCD0H_OFST (16) -#define RGS_SSUSB_EQ_DCD0L_OFST (8) -#define RGS_SSUSB_EQ_DCD1H_OFST (0) - -//U3D_PHYA_RX_MON1 -#define RGS_SSUSB_EQ_DCD1L_OFST (24) -#define RGS_SSUSB_EQ_DCE0_OFST (16) -#define RGS_SSUSB_EQ_DCE1_OFST (8) -#define RGS_SSUSB_EQ_DCHHL_OFST (0) - -//U3D_PHYA_RX_MON2 -#define RGS_SSUSB_EQ_LEQ_STOP_OFST (31) -#define RGS_SSUSB_EQ_DCLHL_OFST (24) -#define RGS_SSUSB_EQ_STATUS_OFST (16) -#define RGS_SSUSB_EQ_DCEYE0_OFST (8) -#define RGS_SSUSB_EQ_DCEYE1_OFST (0) - -//U3D_PHYA_RX_MON3 -#define RGS_SSUSB_EQ_EYE_MONITOR_ERRCNT_0_OFST (0) - -//U3D_PHYA_RX_MON4 -#define RGS_SSUSB_EQ_EYE_MONITOR_ERRCNT_1_OFST (0) - -//U3D_PHYA_RX_MON5 -#define RGS_SSUSB_EQ_DCLEQOS_OFST (8) -#define RGS_SSUSB_EQ_EYE_CNT_RDY_OFST (7) -#define RGS_SSUSB_EQ_PILPO_OFST (0) - -//U3D_PHYD_CPPAT2 -#define RG_SSUSB_CPPAT_OUT_H_TMP2_OFST (16) -#define RG_SSUSB_CPPAT_OUT_H_TMP1_OFST (8) -#define RG_SSUSB_CPPAT_OUT_H_TMP0_OFST (0) - -//U3D_EQ_EYE3 -#define RG_SSUSB_EQ_LEQ_SHIFT_OFST (24) -#define RG_SSUSB_EQ_EYE_CNT_OFST (0) - -//U3D_KBAND_OUT -#define RGS_SSUSB_CDR_BAND_5G_OFST (24) -#define RGS_SSUSB_CDR_BAND_2P5G_OFST (16) -#define RGS_SSUSB_PLL_BAND_5G_OFST (8) -#define RGS_SSUSB_PLL_BAND_2P5G_OFST (0) - -//U3D_KBAND_OUT1 -#define RGS_SSUSB_CDR_VCOCAL_FAIL_OFST (24) -#define RGS_SSUSB_CDR_VCOCAL_STATE_OFST (16) -#define RGS_SSUSB_PLL_VCOCAL_FAIL_OFST (8) -#define RGS_SSUSB_PLL_VCOCAL_STATE_OFST (0) - - -/////////////////////////////////////////////////////////////////////////////// - -struct u3phyd_bank2_reg { - //0x0 - PHY_LE32 b2_phyd_top1; - PHY_LE32 b2_phyd_top2; - PHY_LE32 b2_phyd_top3; - PHY_LE32 b2_phyd_top4; - //0x10 - PHY_LE32 b2_phyd_top5; - PHY_LE32 b2_phyd_top6; - PHY_LE32 b2_phyd_top7; - PHY_LE32 b2_phyd_p_sigdet1; - //0x20 - PHY_LE32 b2_phyd_p_sigdet2; - PHY_LE32 b2_phyd_p_sigdet_cal1; - PHY_LE32 b2_phyd_rxdet1; - PHY_LE32 b2_phyd_rxdet2; - //0x30 - PHY_LE32 b2_phyd_misc0; - PHY_LE32 b2_phyd_misc2; - PHY_LE32 b2_phyd_misc3; - PHY_LE32 reserve0; - //0x40 - PHY_LE32 b2_rosc_0; - PHY_LE32 b2_rosc_1; - PHY_LE32 b2_rosc_2; - PHY_LE32 b2_rosc_3; - //0x50 - PHY_LE32 b2_rosc_4; - PHY_LE32 b2_rosc_5; - PHY_LE32 b2_rosc_6; - PHY_LE32 b2_rosc_7; - //0x60 - PHY_LE32 b2_rosc_8; - PHY_LE32 b2_rosc_9; - PHY_LE32 b2_rosc_a; - PHY_LE32 reserve1; - //0x70~0xd0 - PHY_LE32 reserve2[28]; - //0xe0 - PHY_LE32 phyd_version; - PHY_LE32 phyd_model; -}; - -//U3D_B2_PHYD_TOP1 -#define RG_SSUSB_PCIE2_K_EMP (0xf<<28) //31:28 -#define RG_SSUSB_PCIE2_K_FUL (0xf<<24) //27:24 -#define RG_SSUSB_TX_EIDLE_LP_EN (0x1<<17) //17:17 -#define RG_SSUSB_FORCE_TX_EIDLE_LP_EN (0x1<<16) //16:16 -#define RG_SSUSB_SIGDET_EN (0x1<<15) //15:15 -#define RG_SSUSB_FORCE_SIGDET_EN (0x1<<14) //14:14 -#define RG_SSUSB_CLKRX_EN (0x1<<13) //13:13 -#define RG_SSUSB_FORCE_CLKRX_EN (0x1<<12) //12:12 -#define RG_SSUSB_CLKTX_EN (0x1<<11) //11:11 -#define RG_SSUSB_FORCE_CLKTX_EN (0x1<<10) //10:10 -#define RG_SSUSB_CLK_REQ_N_I (0x1<<9) //9:9 -#define RG_SSUSB_FORCE_CLK_REQ_N_I (0x1<<8) //8:8 -#define RG_SSUSB_RATE (0x1<<6) //6:6 -#define RG_SSUSB_FORCE_RATE (0x1<<5) //5:5 -#define RG_SSUSB_PCIE_MODE_SEL (0x1<<4) //4:4 -#define RG_SSUSB_FORCE_PCIE_MODE_SEL (0x1<<3) //3:3 -#define RG_SSUSB_PHY_MODE (0x3<<1) //2:1 -#define RG_SSUSB_FORCE_PHY_MODE (0x1<<0) //0:0 - -//U3D_B2_PHYD_TOP2 -#define RG_SSUSB_FORCE_IDRV_6DB (0x1<<30) //30:30 -#define RG_SSUSB_IDRV_6DB (0x3f<<24) //29:24 -#define RG_SSUSB_FORCE_IDEM_3P5DB (0x1<<22) //22:22 -#define RG_SSUSB_IDEM_3P5DB (0x3f<<16) //21:16 -#define RG_SSUSB_FORCE_IDRV_3P5DB (0x1<<14) //14:14 -#define RG_SSUSB_IDRV_3P5DB (0x3f<<8) //13:8 -#define RG_SSUSB_FORCE_IDRV_0DB (0x1<<6) //6:6 -#define RG_SSUSB_IDRV_0DB (0x3f<<0) //5:0 - -//U3D_B2_PHYD_TOP3 -#define RG_SSUSB_TX_BIASI (0x7<<25) //27:25 -#define RG_SSUSB_FORCE_TX_BIASI_EN (0x1<<24) //24:24 -#define RG_SSUSB_TX_BIASI_EN (0x1<<16) //16:16 -#define RG_SSUSB_FORCE_TX_BIASI (0x1<<13) //13:13 -#define RG_SSUSB_FORCE_IDEM_6DB (0x1<<8) //8:8 -#define RG_SSUSB_IDEM_6DB (0x3f<<0) //5:0 - -//U3D_B2_PHYD_TOP4 -#define RG_SSUSB_G1_CDR_BIC_LTR (0xf<<28) //31:28 -#define RG_SSUSB_G1_CDR_BIC_LTD0 (0xf<<24) //27:24 -#define RG_SSUSB_G1_CDR_BC_LTD1 (0x1f<<16) //20:16 -#define RG_SSUSB_G1_CDR_BC_LTR (0x1f<<8) //12:8 -#define RG_SSUSB_G1_CDR_BC_LTD0 (0x1f<<0) //4:0 - -//U3D_B2_PHYD_TOP5 -#define RG_SSUSB_G1_CDR_BIR_LTD1 (0x1f<<24) //28:24 -#define RG_SSUSB_G1_CDR_BIR_LTR (0x1f<<16) //20:16 -#define RG_SSUSB_G1_CDR_BIR_LTD0 (0x1f<<8) //12:8 -#define RG_SSUSB_G1_CDR_BIC_LTD1 (0xf<<0) //3:0 - -//U3D_B2_PHYD_TOP6 -#define RG_SSUSB_G2_CDR_BIC_LTR (0xf<<28) //31:28 -#define RG_SSUSB_G2_CDR_BIC_LTD0 (0xf<<24) //27:24 -#define RG_SSUSB_G2_CDR_BC_LTD1 (0x1f<<16) //20:16 -#define RG_SSUSB_G2_CDR_BC_LTR (0x1f<<8) //12:8 -#define RG_SSUSB_G2_CDR_BC_LTD0 (0x1f<<0) //4:0 - -//U3D_B2_PHYD_TOP7 -#define RG_SSUSB_G2_CDR_BIR_LTD1 (0x1f<<24) //28:24 -#define RG_SSUSB_G2_CDR_BIR_LTR (0x1f<<16) //20:16 -#define RG_SSUSB_G2_CDR_BIR_LTD0 (0x1f<<8) //12:8 -#define RG_SSUSB_G2_CDR_BIC_LTD1 (0xf<<0) //3:0 - -//U3D_B2_PHYD_P_SIGDET1 -#define RG_SSUSB_P_SIGDET_FLT_DIS (0x1<<31) //31:31 -#define RG_SSUSB_P_SIGDET_FLT_G2_DEAST_SEL (0x7f<<24) //30:24 -#define RG_SSUSB_P_SIGDET_FLT_G1_DEAST_SEL (0x7f<<16) //22:16 -#define RG_SSUSB_P_SIGDET_FLT_P2_AST_SEL (0x7f<<8) //14:8 -#define RG_SSUSB_P_SIGDET_FLT_PX_AST_SEL (0x7f<<0) //6:0 - -//U3D_B2_PHYD_P_SIGDET2 -#define RG_SSUSB_P_SIGDET_RX_VAL_S (0x1<<29) //29:29 -#define RG_SSUSB_P_SIGDET_L0S_DEAS_SEL (0x1<<28) //28:28 -#define RG_SSUSB_P_SIGDET_L0_EXIT_S (0x1<<27) //27:27 -#define RG_SSUSB_P_SIGDET_L0S_EXIT_T_S (0x3<<25) //26:25 -#define RG_SSUSB_P_SIGDET_L0S_EXIT_S (0x1<<24) //24:24 -#define RG_SSUSB_P_SIGDET_L0S_ENTRY_S (0x1<<16) //16:16 -#define RG_SSUSB_P_SIGDET_PRB_SEL (0x1<<10) //10:10 -#define RG_SSUSB_P_SIGDET_BK_SIG_T (0x3<<8) //9:8 -#define RG_SSUSB_P_SIGDET_P2_RXLFPS (0x1<<6) //6:6 -#define RG_SSUSB_P_SIGDET_NON_BK_AD (0x1<<5) //5:5 -#define RG_SSUSB_P_SIGDET_BK_B_RXEQ (0x1<<4) //4:4 -#define RG_SSUSB_P_SIGDET_G2_KO_SEL (0x3<<2) //3:2 -#define RG_SSUSB_P_SIGDET_G1_KO_SEL (0x3<<0) //1:0 - -//U3D_B2_PHYD_P_SIGDET_CAL1 -#define RG_SSUSB_P_SIGDET_CAL_OFFSET (0x1f<<24) //28:24 -#define RG_SSUSB_P_FORCE_SIGDET_CAL_OFFSET (0x1<<16) //16:16 -#define RG_SSUSB_P_SIGDET_CAL_EN (0x1<<8) //8:8 -#define RG_SSUSB_P_FORCE_SIGDET_CAL_EN (0x1<<3) //3:3 -#define RG_SSUSB_P_SIGDET_FLT_EN (0x1<<2) //2:2 -#define RG_SSUSB_P_SIGDET_SAMPLE_PRD (0x1<<1) //1:1 -#define RG_SSUSB_P_SIGDET_REK (0x1<<0) //0:0 - -//U3D_B2_PHYD_RXDET1 -#define RG_SSUSB_RXDET_PRB_SEL (0x1<<31) //31:31 -#define RG_SSUSB_FORCE_CMDET (0x1<<30) //30:30 -#define RG_SSUSB_RXDET_EN (0x1<<29) //29:29 -#define RG_SSUSB_FORCE_RXDET_EN (0x1<<28) //28:28 -#define RG_SSUSB_RXDET_K_TWICE (0x1<<27) //27:27 -#define RG_SSUSB_RXDET_STB3_SET (0x1ff<<18) //26:18 -#define RG_SSUSB_RXDET_STB2_SET (0x1ff<<9) //17:9 -#define RG_SSUSB_RXDET_STB1_SET (0x1ff<<0) //8:0 - -//U3D_B2_PHYD_RXDET2 -#define RG_SSUSB_PHYD_TRAINDEC_FORCE_CGEN (0x1<<31) //31:31 -#define RG_SSUSB_PHYD_BERTLB_FORCE_CGEN (0x1<<30) //30:30 -#define RG_SSUSB_PHYD_T2RLB_FORCE_CGEN (0x1<<29) //29:29 -#define RG_SSUSB_PDN_T_SEL (0x3<<18) //19:18 -#define RG_SSUSB_RXDET_STB3_SET_P3 (0x1ff<<9) //17:9 -#define RG_SSUSB_RXDET_STB2_SET_P3 (0x1ff<<0) //8:0 - -//U3D_B2_PHYD_MISC0 -#define RG_SSUSB_FORCE_PLL_DDS_HF_EN (0x1<<22) //22:22 -#define RG_SSUSB_PLL_DDS_HF_EN_MAN (0x1<<21) //21:21 -#define RG_SSUSB_RXLFPS_ENTXDRV (0x1<<20) //20:20 -#define RG_SSUSB_RX_FL_UNLOCKTH (0xf<<16) //19:16 -#define RG_SSUSB_LFPS_PSEL (0x1<<15) //15:15 -#define RG_SSUSB_RX_SIGDET_EN (0x1<<14) //14:14 -#define RG_SSUSB_RX_SIGDET_EN_SEL (0x1<<13) //13:13 -#define RG_SSUSB_RX_PI_CAL_EN (0x1<<12) //12:12 -#define RG_SSUSB_RX_PI_CAL_EN_SEL (0x1<<11) //11:11 -#define RG_SSUSB_P3_CLS_CK_SEL (0x1<<10) //10:10 -#define RG_SSUSB_T2RLB_PSEL (0x3<<8) //9:8 -#define RG_SSUSB_PPCTL_PSEL (0x7<<5) //7:5 -#define RG_SSUSB_PHYD_TX_DATA_INV (0x1<<4) //4:4 -#define RG_SSUSB_BERTLB_PSEL (0x3<<2) //3:2 -#define RG_SSUSB_RETRACK_DIS (0x1<<1) //1:1 -#define RG_SSUSB_PPERRCNT_CLR (0x1<<0) //0:0 - -//U3D_B2_PHYD_MISC2 -#define RG_SSUSB_FRC_PLL_DDS_PREDIV2 (0x1<<31) //31:31 -#define RG_SSUSB_FRC_PLL_DDS_IADJ (0xf<<27) //30:27 -#define RG_SSUSB_P_SIGDET_125FILTER (0x1<<26) //26:26 -#define RG_SSUSB_P_SIGDET_RST_FILTER (0x1<<25) //25:25 -#define RG_SSUSB_P_SIGDET_EID_USE_RAW (0x1<<24) //24:24 -#define RG_SSUSB_P_SIGDET_LTD_USE_RAW (0x1<<23) //23:23 -#define RG_SSUSB_EIDLE_BF_RXDET (0x1<<22) //22:22 -#define RG_SSUSB_EIDLE_LP_STBCYC (0x1ff<<13) //21:13 -#define RG_SSUSB_TX_EIDLE_LP_POSTDLY (0x3f<<7) //12:7 -#define RG_SSUSB_TX_EIDLE_LP_PREDLY (0x3f<<1) //6:1 -#define RG_SSUSB_TX_EIDLE_LP_EN_ADV (0x1<<0) //0:0 - -//U3D_B2_PHYD_MISC3 -#define RGS_SSUSB_DDS_CALIB_C_STATE (0x7<<16) //18:16 -#define RGS_SSUSB_PPERRCNT (0xffff<<0) //15:0 - -//U3D_B2_ROSC_0 -#define RG_SSUSB_RING_OSC_CNTEND (0x1ff<<23) //31:23 -#define RG_SSUSB_XTAL_OSC_CNTEND (0x7f<<16) //22:16 -#define RG_SSUSB_RING_OSC_EN (0x1<<3) //3:3 -#define RG_SSUSB_RING_OSC_FORCE_EN (0x1<<2) //2:2 -#define RG_SSUSB_FRC_RING_BYPASS_DET (0x1<<1) //1:1 -#define RG_SSUSB_RING_BYPASS_DET (0x1<<0) //0:0 - -//U3D_B2_ROSC_1 -#define RG_SSUSB_RING_OSC_FRC_P3 (0x1<<20) //20:20 -#define RG_SSUSB_RING_OSC_P3 (0x1<<19) //19:19 -#define RG_SSUSB_RING_OSC_FRC_RECAL (0x3<<17) //18:17 -#define RG_SSUSB_RING_OSC_RECAL (0x1<<16) //16:16 -#define RG_SSUSB_RING_OSC_SEL (0xff<<8) //15:8 -#define RG_SSUSB_RING_OSC_FRC_SEL (0x1<<0) //0:0 - -//U3D_B2_ROSC_2 -#define RG_SSUSB_RING_DET_STRCYC2 (0xffff<<16) //31:16 -#define RG_SSUSB_RING_DET_STRCYC1 (0xffff<<0) //15:0 - -//U3D_B2_ROSC_3 -#define RG_SSUSB_RING_DET_DETWIN1 (0xffff<<16) //31:16 -#define RG_SSUSB_RING_DET_STRCYC3 (0xffff<<0) //15:0 - -//U3D_B2_ROSC_4 -#define RG_SSUSB_RING_DET_DETWIN3 (0xffff<<16) //31:16 -#define RG_SSUSB_RING_DET_DETWIN2 (0xffff<<0) //15:0 - -//U3D_B2_ROSC_5 -#define RG_SSUSB_RING_DET_LBOND1 (0xffff<<16) //31:16 -#define RG_SSUSB_RING_DET_UBOND1 (0xffff<<0) //15:0 - -//U3D_B2_ROSC_6 -#define RG_SSUSB_RING_DET_LBOND2 (0xffff<<16) //31:16 -#define RG_SSUSB_RING_DET_UBOND2 (0xffff<<0) //15:0 - -//U3D_B2_ROSC_7 -#define RG_SSUSB_RING_DET_LBOND3 (0xffff<<16) //31:16 -#define RG_SSUSB_RING_DET_UBOND3 (0xffff<<0) //15:0 - -//U3D_B2_ROSC_8 -#define RG_SSUSB_RING_RESERVE (0xffff<<16) //31:16 -#define RG_SSUSB_ROSC_PROB_SEL (0xf<<2) //5:2 -#define RG_SSUSB_RING_FREQMETER_EN (0x1<<1) //1:1 -#define RG_SSUSB_RING_DET_BPS_UBOND (0x1<<0) //0:0 - -//U3D_B2_ROSC_9 -#define RGS_FM_RING_CNT (0xffff<<16) //31:16 -#define RGS_SSUSB_RING_OSC_STATE (0x3<<10) //11:10 -#define RGS_SSUSB_RING_OSC_STABLE (0x1<<9) //9:9 -#define RGS_SSUSB_RING_OSC_CAL_FAIL (0x1<<8) //8:8 -#define RGS_SSUSB_RING_OSC_CAL (0xff<<0) //7:0 - -//U3D_B2_ROSC_A -#define RGS_SSUSB_ROSC_PROB_OUT (0xff<<0) //7:0 - -//U3D_PHYD_VERSION -#define RGS_SSUSB_PHYD_VERSION (0xffffffff<<0) //31:0 - -//U3D_PHYD_MODEL -#define RGS_SSUSB_PHYD_MODEL (0xffffffff<<0) //31:0 - - -/* OFFSET */ - -//U3D_B2_PHYD_TOP1 -#define RG_SSUSB_PCIE2_K_EMP_OFST (28) -#define RG_SSUSB_PCIE2_K_FUL_OFST (24) -#define RG_SSUSB_TX_EIDLE_LP_EN_OFST (17) -#define RG_SSUSB_FORCE_TX_EIDLE_LP_EN_OFST (16) -#define RG_SSUSB_SIGDET_EN_OFST (15) -#define RG_SSUSB_FORCE_SIGDET_EN_OFST (14) -#define RG_SSUSB_CLKRX_EN_OFST (13) -#define RG_SSUSB_FORCE_CLKRX_EN_OFST (12) -#define RG_SSUSB_CLKTX_EN_OFST (11) -#define RG_SSUSB_FORCE_CLKTX_EN_OFST (10) -#define RG_SSUSB_CLK_REQ_N_I_OFST (9) -#define RG_SSUSB_FORCE_CLK_REQ_N_I_OFST (8) -#define RG_SSUSB_RATE_OFST (6) -#define RG_SSUSB_FORCE_RATE_OFST (5) -#define RG_SSUSB_PCIE_MODE_SEL_OFST (4) -#define RG_SSUSB_FORCE_PCIE_MODE_SEL_OFST (3) -#define RG_SSUSB_PHY_MODE_OFST (1) -#define RG_SSUSB_FORCE_PHY_MODE_OFST (0) - -//U3D_B2_PHYD_TOP2 -#define RG_SSUSB_FORCE_IDRV_6DB_OFST (30) -#define RG_SSUSB_IDRV_6DB_OFST (24) -#define RG_SSUSB_FORCE_IDEM_3P5DB_OFST (22) -#define RG_SSUSB_IDEM_3P5DB_OFST (16) -#define RG_SSUSB_FORCE_IDRV_3P5DB_OFST (14) -#define RG_SSUSB_IDRV_3P5DB_OFST (8) -#define RG_SSUSB_FORCE_IDRV_0DB_OFST (6) -#define RG_SSUSB_IDRV_0DB_OFST (0) - -//U3D_B2_PHYD_TOP3 -#define RG_SSUSB_TX_BIASI_OFST (25) -#define RG_SSUSB_FORCE_TX_BIASI_EN_OFST (24) -#define RG_SSUSB_TX_BIASI_EN_OFST (16) -#define RG_SSUSB_FORCE_TX_BIASI_OFST (13) -#define RG_SSUSB_FORCE_IDEM_6DB_OFST (8) -#define RG_SSUSB_IDEM_6DB_OFST (0) - -//U3D_B2_PHYD_TOP4 -#define RG_SSUSB_G1_CDR_BIC_LTR_OFST (28) -#define RG_SSUSB_G1_CDR_BIC_LTD0_OFST (24) -#define RG_SSUSB_G1_CDR_BC_LTD1_OFST (16) -#define RG_SSUSB_G1_CDR_BC_LTR_OFST (8) -#define RG_SSUSB_G1_CDR_BC_LTD0_OFST (0) - -//U3D_B2_PHYD_TOP5 -#define RG_SSUSB_G1_CDR_BIR_LTD1_OFST (24) -#define RG_SSUSB_G1_CDR_BIR_LTR_OFST (16) -#define RG_SSUSB_G1_CDR_BIR_LTD0_OFST (8) -#define RG_SSUSB_G1_CDR_BIC_LTD1_OFST (0) - -//U3D_B2_PHYD_TOP6 -#define RG_SSUSB_G2_CDR_BIC_LTR_OFST (28) -#define RG_SSUSB_G2_CDR_BIC_LTD0_OFST (24) -#define RG_SSUSB_G2_CDR_BC_LTD1_OFST (16) -#define RG_SSUSB_G2_CDR_BC_LTR_OFST (8) -#define RG_SSUSB_G2_CDR_BC_LTD0_OFST (0) - -//U3D_B2_PHYD_TOP7 -#define RG_SSUSB_G2_CDR_BIR_LTD1_OFST (24) -#define RG_SSUSB_G2_CDR_BIR_LTR_OFST (16) -#define RG_SSUSB_G2_CDR_BIR_LTD0_OFST (8) -#define RG_SSUSB_G2_CDR_BIC_LTD1_OFST (0) - -//U3D_B2_PHYD_P_SIGDET1 -#define RG_SSUSB_P_SIGDET_FLT_DIS_OFST (31) -#define RG_SSUSB_P_SIGDET_FLT_G2_DEAST_SEL_OFST (24) -#define RG_SSUSB_P_SIGDET_FLT_G1_DEAST_SEL_OFST (16) -#define RG_SSUSB_P_SIGDET_FLT_P2_AST_SEL_OFST (8) -#define RG_SSUSB_P_SIGDET_FLT_PX_AST_SEL_OFST (0) - -//U3D_B2_PHYD_P_SIGDET2 -#define RG_SSUSB_P_SIGDET_RX_VAL_S_OFST (29) -#define RG_SSUSB_P_SIGDET_L0S_DEAS_SEL_OFST (28) -#define RG_SSUSB_P_SIGDET_L0_EXIT_S_OFST (27) -#define RG_SSUSB_P_SIGDET_L0S_EXIT_T_S_OFST (25) -#define RG_SSUSB_P_SIGDET_L0S_EXIT_S_OFST (24) -#define RG_SSUSB_P_SIGDET_L0S_ENTRY_S_OFST (16) -#define RG_SSUSB_P_SIGDET_PRB_SEL_OFST (10) -#define RG_SSUSB_P_SIGDET_BK_SIG_T_OFST (8) -#define RG_SSUSB_P_SIGDET_P2_RXLFPS_OFST (6) -#define RG_SSUSB_P_SIGDET_NON_BK_AD_OFST (5) -#define RG_SSUSB_P_SIGDET_BK_B_RXEQ_OFST (4) -#define RG_SSUSB_P_SIGDET_G2_KO_SEL_OFST (2) -#define RG_SSUSB_P_SIGDET_G1_KO_SEL_OFST (0) - -//U3D_B2_PHYD_P_SIGDET_CAL1 -#define RG_SSUSB_P_SIGDET_CAL_OFFSET_OFST (24) -#define RG_SSUSB_P_FORCE_SIGDET_CAL_OFFSET_OFST (16) -#define RG_SSUSB_P_SIGDET_CAL_EN_OFST (8) -#define RG_SSUSB_P_FORCE_SIGDET_CAL_EN_OFST (3) -#define RG_SSUSB_P_SIGDET_FLT_EN_OFST (2) -#define RG_SSUSB_P_SIGDET_SAMPLE_PRD_OFST (1) -#define RG_SSUSB_P_SIGDET_REK_OFST (0) - -//U3D_B2_PHYD_RXDET1 -#define RG_SSUSB_RXDET_PRB_SEL_OFST (31) -#define RG_SSUSB_FORCE_CMDET_OFST (30) -#define RG_SSUSB_RXDET_EN_OFST (29) -#define RG_SSUSB_FORCE_RXDET_EN_OFST (28) -#define RG_SSUSB_RXDET_K_TWICE_OFST (27) -#define RG_SSUSB_RXDET_STB3_SET_OFST (18) -#define RG_SSUSB_RXDET_STB2_SET_OFST (9) -#define RG_SSUSB_RXDET_STB1_SET_OFST (0) - -//U3D_B2_PHYD_RXDET2 -#define RG_SSUSB_PHYD_TRAINDEC_FORCE_CGEN_OFST (31) -#define RG_SSUSB_PHYD_BERTLB_FORCE_CGEN_OFST (30) -#define RG_SSUSB_PHYD_T2RLB_FORCE_CGEN_OFST (29) -#define RG_SSUSB_PDN_T_SEL_OFST (18) -#define RG_SSUSB_RXDET_STB3_SET_P3_OFST (9) -#define RG_SSUSB_RXDET_STB2_SET_P3_OFST (0) - -//U3D_B2_PHYD_MISC0 -#define RG_SSUSB_FORCE_PLL_DDS_HF_EN_OFST (22) -#define RG_SSUSB_PLL_DDS_HF_EN_MAN_OFST (21) -#define RG_SSUSB_RXLFPS_ENTXDRV_OFST (20) -#define RG_SSUSB_RX_FL_UNLOCKTH_OFST (16) -#define RG_SSUSB_LFPS_PSEL_OFST (15) -#define RG_SSUSB_RX_SIGDET_EN_OFST (14) -#define RG_SSUSB_RX_SIGDET_EN_SEL_OFST (13) -#define RG_SSUSB_RX_PI_CAL_EN_OFST (12) -#define RG_SSUSB_RX_PI_CAL_EN_SEL_OFST (11) -#define RG_SSUSB_P3_CLS_CK_SEL_OFST (10) -#define RG_SSUSB_T2RLB_PSEL_OFST (8) -#define RG_SSUSB_PPCTL_PSEL_OFST (5) -#define RG_SSUSB_PHYD_TX_DATA_INV_OFST (4) -#define RG_SSUSB_BERTLB_PSEL_OFST (2) -#define RG_SSUSB_RETRACK_DIS_OFST (1) -#define RG_SSUSB_PPERRCNT_CLR_OFST (0) - -//U3D_B2_PHYD_MISC2 -#define RG_SSUSB_FRC_PLL_DDS_PREDIV2_OFST (31) -#define RG_SSUSB_FRC_PLL_DDS_IADJ_OFST (27) -#define RG_SSUSB_P_SIGDET_125FILTER_OFST (26) -#define RG_SSUSB_P_SIGDET_RST_FILTER_OFST (25) -#define RG_SSUSB_P_SIGDET_EID_USE_RAW_OFST (24) -#define RG_SSUSB_P_SIGDET_LTD_USE_RAW_OFST (23) -#define RG_SSUSB_EIDLE_BF_RXDET_OFST (22) -#define RG_SSUSB_EIDLE_LP_STBCYC_OFST (13) -#define RG_SSUSB_TX_EIDLE_LP_POSTDLY_OFST (7) -#define RG_SSUSB_TX_EIDLE_LP_PREDLY_OFST (1) -#define RG_SSUSB_TX_EIDLE_LP_EN_ADV_OFST (0) - -//U3D_B2_PHYD_MISC3 -#define RGS_SSUSB_DDS_CALIB_C_STATE_OFST (16) -#define RGS_SSUSB_PPERRCNT_OFST (0) - -//U3D_B2_ROSC_0 -#define RG_SSUSB_RING_OSC_CNTEND_OFST (23) -#define RG_SSUSB_XTAL_OSC_CNTEND_OFST (16) -#define RG_SSUSB_RING_OSC_EN_OFST (3) -#define RG_SSUSB_RING_OSC_FORCE_EN_OFST (2) -#define RG_SSUSB_FRC_RING_BYPASS_DET_OFST (1) -#define RG_SSUSB_RING_BYPASS_DET_OFST (0) - -//U3D_B2_ROSC_1 -#define RG_SSUSB_RING_OSC_FRC_P3_OFST (20) -#define RG_SSUSB_RING_OSC_P3_OFST (19) -#define RG_SSUSB_RING_OSC_FRC_RECAL_OFST (17) -#define RG_SSUSB_RING_OSC_RECAL_OFST (16) -#define RG_SSUSB_RING_OSC_SEL_OFST (8) -#define RG_SSUSB_RING_OSC_FRC_SEL_OFST (0) - -//U3D_B2_ROSC_2 -#define RG_SSUSB_RING_DET_STRCYC2_OFST (16) -#define RG_SSUSB_RING_DET_STRCYC1_OFST (0) - -//U3D_B2_ROSC_3 -#define RG_SSUSB_RING_DET_DETWIN1_OFST (16) -#define RG_SSUSB_RING_DET_STRCYC3_OFST (0) - -//U3D_B2_ROSC_4 -#define RG_SSUSB_RING_DET_DETWIN3_OFST (16) -#define RG_SSUSB_RING_DET_DETWIN2_OFST (0) - -//U3D_B2_ROSC_5 -#define RG_SSUSB_RING_DET_LBOND1_OFST (16) -#define RG_SSUSB_RING_DET_UBOND1_OFST (0) - -//U3D_B2_ROSC_6 -#define RG_SSUSB_RING_DET_LBOND2_OFST (16) -#define RG_SSUSB_RING_DET_UBOND2_OFST (0) - -//U3D_B2_ROSC_7 -#define RG_SSUSB_RING_DET_LBOND3_OFST (16) -#define RG_SSUSB_RING_DET_UBOND3_OFST (0) - -//U3D_B2_ROSC_8 -#define RG_SSUSB_RING_RESERVE_OFST (16) -#define RG_SSUSB_ROSC_PROB_SEL_OFST (2) -#define RG_SSUSB_RING_FREQMETER_EN_OFST (1) -#define RG_SSUSB_RING_DET_BPS_UBOND_OFST (0) - -//U3D_B2_ROSC_9 -#define RGS_FM_RING_CNT_OFST (16) -#define RGS_SSUSB_RING_OSC_STATE_OFST (10) -#define RGS_SSUSB_RING_OSC_STABLE_OFST (9) -#define RGS_SSUSB_RING_OSC_CAL_FAIL_OFST (8) -#define RGS_SSUSB_RING_OSC_CAL_OFST (0) - -//U3D_B2_ROSC_A -#define RGS_SSUSB_ROSC_PROB_OUT_OFST (0) - -//U3D_PHYD_VERSION -#define RGS_SSUSB_PHYD_VERSION_OFST (0) - -//U3D_PHYD_MODEL -#define RGS_SSUSB_PHYD_MODEL_OFST (0) - - -/////////////////////////////////////////////////////////////////////////////// - -struct sifslv_chip_reg { - PHY_LE32 xtalbias; - PHY_LE32 syspll1; - PHY_LE32 gpio_ctla; - PHY_LE32 gpio_ctlb; - PHY_LE32 gpio_ctlc; -}; - -//U3D_GPIO_CTLA -#define RG_C60802_GPIO_CTLA (0xffffffff<<0) //31:0 - -//U3D_GPIO_CTLB -#define RG_C60802_GPIO_CTLB (0xffffffff<<0) //31:0 - -//U3D_GPIO_CTLC -#define RG_C60802_GPIO_CTLC (0xffffffff<<0) //31:0 - -/* OFFSET */ - -//U3D_GPIO_CTLA -#define RG_C60802_GPIO_CTLA_OFST (0) - -//U3D_GPIO_CTLB -#define RG_C60802_GPIO_CTLB_OFST (0) - -//U3D_GPIO_CTLC -#define RG_C60802_GPIO_CTLC_OFST (0) - -/////////////////////////////////////////////////////////////////////////////// - -struct sifslv_fm_feg { - //0x0 - PHY_LE32 fmcr0; - PHY_LE32 fmcr1; - PHY_LE32 fmcr2; - PHY_LE32 fmmonr0; - //0x10 - PHY_LE32 fmmonr1; -}; - -//U3D_FMCR0 -#define RG_LOCKTH (0xf<<28) //31:28 -#define RG_MONCLK_SEL (0x3<<26) //27:26 -#define RG_FM_MODE (0x1<<25) //25:25 -#define RG_FREQDET_EN (0x1<<24) //24:24 -#define RG_CYCLECNT (0xffffff<<0) //23:0 - -//U3D_FMCR1 -#define RG_TARGET (0xffffffff<<0) //31:0 - -//U3D_FMCR2 -#define RG_OFFSET (0xffffffff<<0) //31:0 - -//U3D_FMMONR0 -#define USB_FM_OUT (0xffffffff<<0) //31:0 - -//U3D_FMMONR1 -#define RG_MONCLK_SEL_3 (0x1<<9) //9:9 -#define RG_FRCK_EN (0x1<<8) //8:8 -#define USBPLL_LOCK (0x1<<1) //1:1 -#define USB_FM_VLD (0x1<<0) //0:0 - - -/* OFFSET */ - -//U3D_FMCR0 -#define RG_LOCKTH_OFST (28) -#define RG_MONCLK_SEL_OFST (26) -#define RG_FM_MODE_OFST (25) -#define RG_FREQDET_EN_OFST (24) -#define RG_CYCLECNT_OFST (0) - -//U3D_FMCR1 -#define RG_TARGET_OFST (0) - -//U3D_FMCR2 -#define RG_OFFSET_OFST (0) - -//U3D_FMMONR0 -#define USB_FM_OUT_OFST (0) - -//U3D_FMMONR1 -#define RG_MONCLK_SEL_3_OFST (9) -#define RG_FRCK_EN_OFST (8) -#define USBPLL_LOCK_OFST (1) -#define USB_FM_VLD_OFST (0) - - -/////////////////////////////////////////////////////////////////////////////// - -PHY_INT32 phy_init(struct u3phy_info *info); -PHY_INT32 phy_change_pipe_phase(struct u3phy_info *info, PHY_INT32 phy_drv, PHY_INT32 pipe_phase); -PHY_INT32 eyescan_init(struct u3phy_info *info); -PHY_INT32 phy_eyescan(struct u3phy_info *info, PHY_INT32 x_t1, PHY_INT32 y_t1, PHY_INT32 x_br, PHY_INT32 y_br, PHY_INT32 delta_x, PHY_INT32 delta_y - , PHY_INT32 eye_cnt, PHY_INT32 num_cnt, PHY_INT32 PI_cal_en, PHY_INT32 num_ignore_cnt); -PHY_INT32 u2_save_cur_en(struct u3phy_info *info); -PHY_INT32 u2_save_cur_re(struct u3phy_info *info); -PHY_INT32 u2_slew_rate_calibration(struct u3phy_info *info); - -#endif -#endif diff --git a/target/linux/ramips/files/drivers/usb/host/mtk-phy-ahb.c b/target/linux/ramips/files/drivers/usb/host/mtk-phy-ahb.c deleted file mode 100644 index ebaf7c8b15..0000000000 --- a/target/linux/ramips/files/drivers/usb/host/mtk-phy-ahb.c +++ /dev/null @@ -1,58 +0,0 @@ -#include "mtk-phy.h" -#ifdef CONFIG_U3D_HAL_SUPPORT -#include "mu3d_hal_osal.h" -#endif - -#ifdef CONFIG_U3_PHY_AHB_SUPPORT -#include -#include -#include - -#ifndef CONFIG_U3D_HAL_SUPPORT -#define os_writel(addr,data) {\ - (*((volatile PHY_UINT32*)(addr)) = data);\ - } -#define os_readl(addr) *((volatile PHY_UINT32*)(addr)) -#define os_writelmsk(addr, data, msk) \ - { os_writel(addr, ((os_readl(addr) & ~(msk)) | ((data) & (msk)))); \ - } -#define os_setmsk(addr, msk) \ - { os_writel(addr, os_readl(addr) | msk); \ - } -#define os_clrmsk(addr, msk) \ - { os_writel(addr, os_readl(addr) &~ msk); \ - } -/*msk the data first, then umsk with the umsk.*/ -#define os_writelmskumsk(addr, data, msk, umsk) \ -{\ - os_writel(addr, ((os_readl(addr) & ~(msk)) | ((data) & (msk))) & (umsk));\ -} - -#endif - -PHY_INT32 U3PhyWriteReg32(PHY_UINT32 addr, PHY_UINT32 data) -{ - os_writel(addr, data); - - return 0; -} - -PHY_INT32 U3PhyReadReg32(PHY_UINT32 addr) -{ - return os_readl(addr); -} - -PHY_INT32 U3PhyWriteReg8(PHY_UINT32 addr, PHY_UINT8 data) -{ - os_writelmsk(addr&0xfffffffc, data<<((addr%4)*8), 0xff<<((addr%4)*8)); - - return 0; -} - -PHY_INT8 U3PhyReadReg8(PHY_UINT32 addr) -{ - return ((os_readl(addr)>>((addr%4)*8))&0xff); -} - -#endif - diff --git a/target/linux/ramips/files/drivers/usb/host/mtk-phy.c b/target/linux/ramips/files/drivers/usb/host/mtk-phy.c deleted file mode 100644 index 7ed8f015b8..0000000000 --- a/target/linux/ramips/files/drivers/usb/host/mtk-phy.c +++ /dev/null @@ -1,102 +0,0 @@ -#include -#include -#include -#define U3_PHY_LIB -#include "mtk-phy.h" -#ifdef CONFIG_PROJECT_7621 -#include "mtk-phy-7621.h" -#endif -#ifdef CONFIG_PROJECT_PHY -static struct u3phy_operator project_operators = { - .init = phy_init, - .change_pipe_phase = phy_change_pipe_phase, - .eyescan_init = eyescan_init, - .eyescan = phy_eyescan, - .u2_slew_rate_calibration = u2_slew_rate_calibration, -}; -#endif - - -PHY_INT32 u3phy_init(){ -#ifndef CONFIG_PROJECT_PHY - PHY_INT32 u3phy_version; -#endif - - if(u3phy != NULL){ - return PHY_TRUE; - } - - u3phy = kmalloc(sizeof(struct u3phy_info), GFP_NOIO); -#if defined (CONFIG_USB_MT7621_XHCI_PLATFORM) - u3phy_p1 = kmalloc(sizeof(struct u3phy_info), GFP_NOIO); -#endif -#ifdef CONFIG_U3_PHY_GPIO_SUPPORT - u3phy->phyd_version_addr = 0x2000e4; -#if defined (CONFIG_USB_MT7621_XHCI_PLATFORM) - u3phy_p1->phyd_version_addr = 0x2000e4; -#endif -#else - u3phy->phyd_version_addr = U3_PHYD_B2_BASE + 0xe4; -#if defined (CONFIG_USB_MT7621_XHCI_PLATFORM) - u3phy_p1->phyd_version_addr = U3_PHYD_B2_BASE_P1 + 0xe4; -#endif -#endif - -#ifdef CONFIG_PROJECT_PHY - - u3phy->u2phy_regs = (struct u2phy_reg *)U2_PHY_BASE; - u3phy->u3phyd_regs = (struct u3phyd_reg *)U3_PHYD_BASE; - u3phy->u3phyd_bank2_regs = (struct u3phyd_bank2_reg *)U3_PHYD_B2_BASE; - u3phy->u3phya_regs = (struct u3phya_reg *)U3_PHYA_BASE; - u3phy->u3phya_da_regs = (struct u3phya_da_reg *)U3_PHYA_DA_BASE; - u3phy->sifslv_chip_regs = (struct sifslv_chip_reg *)SIFSLV_CHIP_BASE; - u3phy->sifslv_fm_regs = (struct sifslv_fm_feg *)SIFSLV_FM_FEG_BASE; - u3phy_ops = &project_operators; - -#if defined (CONFIG_USB_MT7621_XHCI_PLATFORM) - u3phy_p1->u2phy_regs = (struct u2phy_reg *)U2_PHY_BASE_P1; - u3phy_p1->u3phyd_regs = (struct u3phyd_reg *)U3_PHYD_BASE_P1; - u3phy_p1->u3phyd_bank2_regs = (struct u3phyd_bank2_reg *)U3_PHYD_B2_BASE_P1; - u3phy_p1->u3phya_regs = (struct u3phya_reg *)U3_PHYA_BASE_P1; - u3phy_p1->u3phya_da_regs = (struct u3phya_da_reg *)U3_PHYA_DA_BASE_P1; - u3phy_p1->sifslv_chip_regs = (struct sifslv_chip_reg *)SIFSLV_CHIP_BASE; - u3phy_p1->sifslv_fm_regs = (struct sifslv_fm_feg *)SIFSLV_FM_FEG_BASE; -#endif -#endif - - return PHY_TRUE; -} - -PHY_INT32 U3PhyWriteField8(PHY_INT32 addr, PHY_INT32 offset, PHY_INT32 mask, PHY_INT32 value){ - PHY_INT8 cur_value; - PHY_INT8 new_value; - - cur_value = U3PhyReadReg8(addr); - new_value = (cur_value & (~mask)) | (value << offset); - //udelay(i2cdelayus); - U3PhyWriteReg8(addr, new_value); - return PHY_TRUE; -} - -PHY_INT32 U3PhyWriteField32(PHY_INT32 addr, PHY_INT32 offset, PHY_INT32 mask, PHY_INT32 value){ - PHY_INT32 cur_value; - PHY_INT32 new_value; - - cur_value = U3PhyReadReg32(addr); - new_value = (cur_value & (~mask)) | ((value << offset) & mask); - U3PhyWriteReg32(addr, new_value); - //DRV_MDELAY(100); - - return PHY_TRUE; -} - -PHY_INT32 U3PhyReadField8(PHY_INT32 addr,PHY_INT32 offset,PHY_INT32 mask){ - - return ((U3PhyReadReg8(addr) & mask) >> offset); -} - -PHY_INT32 U3PhyReadField32(PHY_INT32 addr, PHY_INT32 offset, PHY_INT32 mask){ - - return ((U3PhyReadReg32(addr) & mask) >> offset); -} - diff --git a/target/linux/ramips/files/drivers/usb/host/mtk-phy.h b/target/linux/ramips/files/drivers/usb/host/mtk-phy.h deleted file mode 100644 index 07ed410412..0000000000 --- a/target/linux/ramips/files/drivers/usb/host/mtk-phy.h +++ /dev/null @@ -1,179 +0,0 @@ -#ifndef __MTK_PHY_NEW_H -#define __MTK_PHY_NEW_H - -//#define CONFIG_U3D_HAL_SUPPORT - -/* include system library */ -#include -#include -#include -#include - -/* Choose PHY R/W implementation */ -//#define CONFIG_U3_PHY_GPIO_SUPPORT //SW I2C implemented by GPIO -#define CONFIG_U3_PHY_AHB_SUPPORT //AHB, only on SoC - -/* Choose PHY version */ -//Select your project by defining one of the followings -#define CONFIG_PROJECT_7621 //7621 -#define CONFIG_PROJECT_PHY - -/* BASE ADDRESS DEFINE, should define this on ASIC */ -#define PHY_BASE 0xBE1D0000 -#define SIFSLV_FM_FEG_BASE (PHY_BASE+0x100) -#define SIFSLV_CHIP_BASE (PHY_BASE+0x700) -#define U2_PHY_BASE (PHY_BASE+0x800) -#define U3_PHYD_BASE (PHY_BASE+0x900) -#define U3_PHYD_B2_BASE (PHY_BASE+0xa00) -#define U3_PHYA_BASE (PHY_BASE+0xb00) -#define U3_PHYA_DA_BASE (PHY_BASE+0xc00) - -#if defined (CONFIG_USB_MT7621_XHCI_PLATFORM) -#define SIFSLV_FM_FEG_BASE_P1 (PHY_BASE+0x100) -#define SIFSLV_CHIP_BASE_P1 (PHY_BASE+0x700) -#define U2_PHY_BASE_P1 (PHY_BASE+0x1000) -#define U3_PHYD_BASE_P1 (PHY_BASE+0x1100) -#define U3_PHYD_B2_BASE_P1 (PHY_BASE+0x1200) -#define U3_PHYA_BASE_P1 (PHY_BASE+0x1300) -#define U3_PHYA_DA_BASE_P1 (PHY_BASE+0x1400) -#endif - -/* - -0x00000100 MODULE ssusb_sifslv_fmreg ssusb_sifslv_fmreg -0x00000700 MODULE ssusb_sifslv_ippc ssusb_sifslv_ippc -0x00000800 MODULE ssusb_sifslv_u2phy_com ssusb_sifslv_u2_phy_com_T28 -0x00000900 MODULE ssusb_sifslv_u3phyd ssusb_sifslv_u3phyd_T28 -0x00000a00 MODULE ssusb_sifslv_u3phyd_bank2 ssusb_sifslv_u3phyd_bank2_T28 -0x00000b00 MODULE ssusb_sifslv_u3phya ssusb_sifslv_u3phya_T28 -0x00000c00 MODULE ssusb_sifslv_u3phya_da ssusb_sifslv_u3phya_da_T28 -*/ - - -/* TYPE DEFINE */ -typedef unsigned int PHY_UINT32; -typedef int PHY_INT32; -typedef unsigned short PHY_UINT16; -typedef short PHY_INT16; -typedef unsigned char PHY_UINT8; -typedef char PHY_INT8; - -typedef PHY_UINT32 __bitwise PHY_LE32; - -/* CONSTANT DEFINE */ -#define PHY_FALSE 0 -#define PHY_TRUE 1 - -/* MACRO DEFINE */ -#define DRV_WriteReg32(addr,data) ((*(volatile PHY_UINT32 *)(addr)) = (unsigned long)(data)) -#define DRV_Reg32(addr) (*(volatile PHY_UINT32 *)(addr)) - -#define DRV_MDELAY mdelay -#define DRV_MSLEEP msleep -#define DRV_UDELAY udelay -#define DRV_USLEEP usleep - -/* PHY FUNCTION DEFINE, implemented in platform files, ex. ahb, gpio */ -PHY_INT32 U3PhyWriteReg32(PHY_UINT32 addr, PHY_UINT32 data); -PHY_INT32 U3PhyReadReg32(PHY_UINT32 addr); -PHY_INT32 U3PhyWriteReg8(PHY_UINT32 addr, PHY_UINT8 data); -PHY_INT8 U3PhyReadReg8(PHY_UINT32 addr); - -/* PHY GENERAL USAGE FUNC, implemented in mtk-phy.c */ -PHY_INT32 U3PhyWriteField8(PHY_INT32 addr, PHY_INT32 offset, PHY_INT32 mask, PHY_INT32 value); -PHY_INT32 U3PhyWriteField32(PHY_INT32 addr, PHY_INT32 offset, PHY_INT32 mask, PHY_INT32 value); -PHY_INT32 U3PhyReadField8(PHY_INT32 addr, PHY_INT32 offset, PHY_INT32 mask); -PHY_INT32 U3PhyReadField32(PHY_INT32 addr, PHY_INT32 offset, PHY_INT32 mask); - -struct u3phy_info { - PHY_INT32 phy_version; - PHY_INT32 phyd_version_addr; - -#ifdef CONFIG_PROJECT_PHY - struct u2phy_reg *u2phy_regs; - struct u3phya_reg *u3phya_regs; - struct u3phya_da_reg *u3phya_da_regs; - struct u3phyd_reg *u3phyd_regs; - struct u3phyd_bank2_reg *u3phyd_bank2_regs; - struct sifslv_chip_reg *sifslv_chip_regs; - struct sifslv_fm_feg *sifslv_fm_regs; -#endif -}; - -struct u3phy_operator { - PHY_INT32 (*init) (struct u3phy_info *info); - PHY_INT32 (*change_pipe_phase) (struct u3phy_info *info, PHY_INT32 phy_drv, PHY_INT32 pipe_phase); - PHY_INT32 (*eyescan_init) (struct u3phy_info *info); - PHY_INT32 (*eyescan) (struct u3phy_info *info, PHY_INT32 x_t1, PHY_INT32 y_t1, PHY_INT32 x_br, PHY_INT32 y_br, PHY_INT32 delta_x, PHY_INT32 delta_y, PHY_INT32 eye_cnt, PHY_INT32 num_cnt, PHY_INT32 PI_cal_en, PHY_INT32 num_ignore_cnt); - PHY_INT32 (*u2_save_current_entry) (struct u3phy_info *info); - PHY_INT32 (*u2_save_current_recovery) (struct u3phy_info *info); - PHY_INT32 (*u2_slew_rate_calibration) (struct u3phy_info *info); -}; - -#ifdef U3_PHY_LIB -#define AUTOEXT -#else -#define AUTOEXT extern -#endif - -AUTOEXT struct u3phy_info *u3phy; -AUTOEXT struct u3phy_info *u3phy_p1; -AUTOEXT struct u3phy_operator *u3phy_ops; - -/*********eye scan required*********/ - -#define LO_BYTE(x) ((PHY_UINT8)((x) & 0xFF)) -#define HI_BYTE(x) ((PHY_UINT8)(((x) & 0xFF00) >> 8)) - -typedef enum -{ - SCAN_UP, - SCAN_DN -} enumScanDir; - -struct strucScanRegion -{ - PHY_INT8 bX_tl; - PHY_INT8 bY_tl; - PHY_INT8 bX_br; - PHY_INT8 bY_br; - PHY_INT8 bDeltaX; - PHY_INT8 bDeltaY; -}; - -struct strucTestCycle -{ - PHY_UINT16 wEyeCnt; - PHY_INT8 bNumOfEyeCnt; - PHY_INT8 bPICalEn; - PHY_INT8 bNumOfIgnoreCnt; -}; - -#define ERRCNT_MAX 128 -#define CYCLE_COUNT_MAX 15 - -/// the map resolution is 128 x 128 pts -#define MAX_X 127 -#define MAX_Y 127 -#define MIN_X 0 -#define MIN_Y 0 - -PHY_INT32 u3phy_init(void); - -AUTOEXT struct strucScanRegion _rEye1; -AUTOEXT struct strucScanRegion _rEye2; -AUTOEXT struct strucTestCycle _rTestCycle; -AUTOEXT PHY_UINT8 _bXcurr; -AUTOEXT PHY_UINT8 _bYcurr; -AUTOEXT enumScanDir _eScanDir; -AUTOEXT PHY_INT8 _fgXChged; -AUTOEXT PHY_INT8 _bPIResult; -/* use local variable instead to save memory use */ -#if 0 -AUTOEXT PHY_UINT32 pwErrCnt0[CYCLE_COUNT_MAX][ERRCNT_MAX][ERRCNT_MAX]; -AUTOEXT PHY_UINT32 pwErrCnt1[CYCLE_COUNT_MAX][ERRCNT_MAX][ERRCNT_MAX]; -#endif - -/***********************************/ -#endif - diff --git a/target/linux/ramips/files/drivers/usb/host/xhci-mtk-power.c b/target/linux/ramips/files/drivers/usb/host/xhci-mtk-power.c deleted file mode 100644 index 9d9352678e..0000000000 --- a/target/linux/ramips/files/drivers/usb/host/xhci-mtk-power.c +++ /dev/null @@ -1,115 +0,0 @@ -#include "xhci-mtk.h" -#include "xhci-mtk-power.h" -#include "xhci.h" -#include /* printk() */ -#include -#include - -static int g_num_u3_port; -static int g_num_u2_port; - - -void enableXhciAllPortPower(struct xhci_hcd *xhci){ - int i; - u32 port_id, temp; - u32 __iomem *addr; - - g_num_u3_port = SSUSB_U3_PORT_NUM(readl(SSUSB_IP_CAP)); - g_num_u2_port = SSUSB_U2_PORT_NUM(readl(SSUSB_IP_CAP)); - - for(i=1; i<=g_num_u3_port; i++){ - port_id=i; - addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS*(port_id-1 & 0xff); - temp = xhci_readl(xhci, addr); - temp = xhci_port_state_to_neutral(temp); - temp |= PORT_POWER; - xhci_writel(xhci, temp, addr); - } - for(i=1; i<=g_num_u2_port; i++){ - port_id=i+g_num_u3_port; - addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS*(port_id-1 & 0xff); - temp = xhci_readl(xhci, addr); - temp = xhci_port_state_to_neutral(temp); - temp |= PORT_POWER; - xhci_writel(xhci, temp, addr); - } -} - -void enableAllClockPower(){ - - int i; - u32 temp; - - g_num_u3_port = SSUSB_U3_PORT_NUM(readl(SSUSB_IP_CAP)); - g_num_u2_port = SSUSB_U2_PORT_NUM(readl(SSUSB_IP_CAP)); - - //2. Enable xHC - writel(readl(SSUSB_IP_PW_CTRL) | (SSUSB_IP_SW_RST), SSUSB_IP_PW_CTRL); - writel(readl(SSUSB_IP_PW_CTRL) & (~SSUSB_IP_SW_RST), SSUSB_IP_PW_CTRL); - writel(readl(SSUSB_IP_PW_CTRL_1) & (~SSUSB_IP_PDN), SSUSB_IP_PW_CTRL_1); - - //1. Enable target ports - for(i=0; i -#include "xhci.h" -#include "xhci-mtk.h" - -void enableXhciAllPortPower(struct xhci_hcd *xhci); -void enableAllClockPower(void); -void disablePortClockPower(void); -void enablePortClockPower(int port_index, int port_rev); - -#endif diff --git a/target/linux/ramips/files/drivers/usb/host/xhci-mtk-scheduler.c b/target/linux/ramips/files/drivers/usb/host/xhci-mtk-scheduler.c deleted file mode 100644 index bf6a8bdc19..0000000000 --- a/target/linux/ramips/files/drivers/usb/host/xhci-mtk-scheduler.c +++ /dev/null @@ -1,608 +0,0 @@ -#include "xhci-mtk-scheduler.h" -#include /* printk() */ - -static struct sch_ep **ss_out_eps[MAX_EP_NUM]; -static struct sch_ep **ss_in_eps[MAX_EP_NUM]; -static struct sch_ep **hs_eps[MAX_EP_NUM]; //including tt isoc -static struct sch_ep **tt_intr_eps[MAX_EP_NUM]; - - -int mtk_xhci_scheduler_init(void){ - int i; - - for(i=0; idev_speed = dev_speed; - tmp_ep->isTT = isTT; - tmp_ep->is_in = is_in; - tmp_ep->ep_type = ep_type; - tmp_ep->maxp = maxp; - tmp_ep->interval = interval; - tmp_ep->burst = burst; - tmp_ep->mult = mult; - tmp_ep->offset = offset; - tmp_ep->repeat = repeat; - tmp_ep->pkts = pkts; - tmp_ep->cs_count = cs_count; - tmp_ep->burst_mode = burst_mode; - tmp_ep->bw_cost = bw_cost; - tmp_ep->ep = ep; - ep_array[i] = tmp_ep; - return SCH_SUCCESS; - } - } - return SCH_FAIL; -} - -int count_ss_bw(int is_in, int ep_type, int maxp, int interval, int burst, int mult, int offset, int repeat - , int td_size){ - int i, j, k; - int bw_required[3]; - int final_bw_required; - int bw_required_per_repeat; - int tmp_bw_required; - struct sch_ep *cur_sch_ep; - struct sch_ep **ep_array; - int cur_offset; - int cur_ep_offset; - int tmp_offset; - int tmp_interval; - int ep_offset; - int ep_interval; - int ep_repeat; - int ep_mult; - - if(is_in){ - ep_array = (struct sch_ep **)ss_in_eps; - } - else{ - ep_array = (struct sch_ep **)ss_out_eps; - } - - bw_required[0] = 0; - bw_required[1] = 0; - bw_required[2] = 0; - - if(repeat == 0){ - final_bw_required = 0; - for(i=0; iinterval; - ep_offset = cur_sch_ep->offset; - if(cur_sch_ep->repeat == 0){ - if(ep_interval >= interval){ - tmp_offset = ep_offset + ep_interval - offset; - tmp_interval = interval; - } - else{ - tmp_offset = offset + interval - ep_offset; - tmp_interval = ep_interval; - } - if(tmp_offset % tmp_interval == 0){ - final_bw_required += cur_sch_ep->bw_cost; - } - } - else{ - ep_repeat = cur_sch_ep->repeat; - ep_mult = cur_sch_ep->mult; - for(k=0; k<=ep_mult; k++){ - cur_ep_offset = ep_offset+(k*ep_mult); - if(ep_interval >= interval){ - tmp_offset = cur_ep_offset + ep_interval - offset; - tmp_interval = interval; - } - else{ - tmp_offset = offset + interval - cur_ep_offset; - tmp_interval = ep_interval; - } - if(tmp_offset % tmp_interval == 0){ - final_bw_required += cur_sch_ep->bw_cost; - break; - } - } - } - } - final_bw_required += td_size; - } - else{ - bw_required_per_repeat = maxp * (burst+1); - for(j=0; j<=mult; j++){ - tmp_bw_required = 0; - cur_offset = offset+(j*repeat); - for(i=0; iinterval; - ep_offset = cur_sch_ep->offset; - if(cur_sch_ep->repeat == 0){ - if(ep_interval >= interval){ - tmp_offset = ep_offset + ep_interval - cur_offset; - tmp_interval = interval; - } - else{ - tmp_offset = cur_offset + interval - ep_offset; - tmp_interval = ep_interval; - } - if(tmp_offset % tmp_interval == 0){ - tmp_bw_required += cur_sch_ep->bw_cost; - } - } - else{ - ep_repeat = cur_sch_ep->repeat; - ep_mult = cur_sch_ep->mult; - for(k=0; k<=ep_mult; k++){ - cur_ep_offset = ep_offset+(k*ep_repeat); - if(ep_interval >= interval){ - tmp_offset = cur_ep_offset + ep_interval - cur_offset; - tmp_interval = interval; - } - else{ - tmp_offset = cur_offset + interval - cur_ep_offset; - tmp_interval = ep_interval; - } - if(tmp_offset % tmp_interval == 0){ - tmp_bw_required += cur_sch_ep->bw_cost; - break; - } - } - } - } - bw_required[j] = tmp_bw_required; - } - final_bw_required = SS_BW_BOUND; - for(j=0; j<=mult; j++){ - if(bw_required[j] < final_bw_required){ - final_bw_required = bw_required[j]; - } - } - final_bw_required += bw_required_per_repeat; - } - return final_bw_required; -} - -int count_hs_bw(int ep_type, int maxp, int interval, int offset, int td_size){ - int i; - int bw_required; - struct sch_ep *cur_sch_ep; - int tmp_offset; - int tmp_interval; - int ep_offset; - int ep_interval; - int cur_tt_isoc_interval; //for isoc tt check - - bw_required = 0; - for(i=0; ioffset; - ep_interval = cur_sch_ep->interval; - - if(cur_sch_ep->isTT && cur_sch_ep->ep_type == USB_EP_ISOC){ - cur_tt_isoc_interval = ep_interval<<3; - if(ep_interval >= interval){ - tmp_offset = ep_offset + cur_tt_isoc_interval - offset; - tmp_interval = interval; - } - else{ - tmp_offset = offset + interval - ep_offset; - tmp_interval = cur_tt_isoc_interval; - } - if(cur_sch_ep->is_in){ - if((tmp_offset%tmp_interval >=2) && (tmp_offset%tmp_interval <= cur_sch_ep->cs_count)){ - bw_required += 188; - } - } - else{ - if(tmp_offset%tmp_interval <= cur_sch_ep->cs_count){ - bw_required += 188; - } - } - } - else{ - if(ep_interval >= interval){ - tmp_offset = ep_offset + ep_interval - offset; - tmp_interval = interval; - } - else{ - tmp_offset = offset + interval - ep_offset; - tmp_interval = ep_interval; - } - if(tmp_offset%tmp_interval == 0){ - bw_required += cur_sch_ep->bw_cost; - } - } - } - bw_required += td_size; - return bw_required; -} - -int count_tt_isoc_bw(int is_in, int maxp, int interval, int offset, int td_size){ - char is_cs; - int mframe_idx, frame_idx, s_frame, s_mframe, cur_mframe; - int bw_required, max_bw; - int ss_cs_count; - int cs_mframe; - int max_frame; - int i,j; - struct sch_ep *cur_sch_ep; - int ep_offset; - int ep_interval; - int ep_cs_count; - int tt_isoc_interval; //for isoc tt check - int cur_tt_isoc_interval; //for isoc tt check - int tmp_offset; - int tmp_interval; - - is_cs = 0; - - tt_isoc_interval = interval<<3; //frame to mframe - if(is_in){ - is_cs = 1; - } - s_frame = offset/8; - s_mframe = offset%8; - ss_cs_count = (maxp + (188 - 1))/188; - if(is_cs){ - cs_mframe = offset%8 + 2 + ss_cs_count; - if (cs_mframe <= 6) - ss_cs_count += 2; - else if (cs_mframe == 7) - ss_cs_count++; - else if (cs_mframe > 8) - return -1; - } - max_bw = 0; - if(is_in){ - i=2; - } - for(cur_mframe = offset+i; ioffset; - ep_interval = cur_sch_ep->interval; - if(cur_sch_ep->isTT && cur_sch_ep->ep_type == USB_EP_ISOC){ - //isoc tt - //check if mframe offset overlap - //if overlap, add 188 to the bw - cur_tt_isoc_interval = ep_interval<<3; - if(cur_tt_isoc_interval >= tt_isoc_interval){ - tmp_offset = (ep_offset+cur_tt_isoc_interval) - cur_mframe; - tmp_interval = tt_isoc_interval; - } - else{ - tmp_offset = (cur_mframe+tt_isoc_interval) - ep_offset; - tmp_interval = cur_tt_isoc_interval; - } - if(cur_sch_ep->is_in){ - if((tmp_offset%tmp_interval >=2) && (tmp_offset%tmp_interval <= cur_sch_ep->cs_count)){ - bw_required += 188; - } - } - else{ - if(tmp_offset%tmp_interval <= cur_sch_ep->cs_count){ - bw_required += 188; - } - } - - } - else if(cur_sch_ep->ep_type == USB_EP_INT || cur_sch_ep->ep_type == USB_EP_ISOC){ - //check if mframe - if(ep_interval >= tt_isoc_interval){ - tmp_offset = (ep_offset+ep_interval) - cur_mframe; - tmp_interval = tt_isoc_interval; - } - else{ - tmp_offset = (cur_mframe+tt_isoc_interval) - ep_offset; - tmp_interval = ep_interval; - } - if(tmp_offset%tmp_interval == 0){ - bw_required += cur_sch_ep->bw_cost; - } - } - } - bw_required += 188; - if(bw_required > max_bw){ - max_bw = bw_required; - } - } - return max_bw; -} - -int count_tt_intr_bw(int interval, int frame_offset){ - //check all eps in tt_intr_eps - int ret; - int i,j; - int ep_offset; - int ep_interval; - int tmp_offset; - int tmp_interval; - ret = SCH_SUCCESS; - struct sch_ep *cur_sch_ep; - - for(i=0; ioffset; - ep_interval = cur_sch_ep->interval; - if(ep_interval >= interval){ - tmp_offset = ep_offset + ep_interval - frame_offset; - tmp_interval = interval; - } - else{ - tmp_offset = frame_offset + interval - ep_offset; - tmp_interval = ep_interval; - } - - if(tmp_offset%tmp_interval==0){ - return SCH_FAIL; - } - } - return SCH_SUCCESS; -} - -struct sch_ep * mtk_xhci_scheduler_remove_ep(int dev_speed, int is_in, int isTT, int ep_type, mtk_u32 *ep){ - int i; - struct sch_ep **ep_array; - struct sch_ep *cur_ep; - - if (is_in && dev_speed == USB_SPEED_SUPER) { - ep_array = (struct sch_ep **)ss_in_eps; - } - else if (dev_speed == USB_SPEED_SUPER) { - ep_array = (struct sch_ep **)ss_out_eps; - } - else if (dev_speed == USB_SPEED_HIGH || (isTT && ep_type == USB_EP_ISOC)) { - ep_array = (struct sch_ep **)hs_eps; - } - else { - ep_array = (struct sch_ep **)tt_intr_eps; - } - for (i = 0; i < MAX_EP_NUM; i++) { - cur_ep = (struct sch_ep *)ep_array[i]; - if(cur_ep != NULL && cur_ep->ep == ep){ - ep_array[i] = NULL; - return cur_ep; - } - } - return NULL; -} - -int mtk_xhci_scheduler_add_ep(int dev_speed, int is_in, int isTT, int ep_type, int maxp, int interval, int burst - , int mult, mtk_u32 *ep, mtk_u32 *ep_ctx, struct sch_ep *sch_ep){ - mtk_u32 bPkts = 0; - mtk_u32 bCsCount = 0; - mtk_u32 bBm = 1; - mtk_u32 bOffset = 0; - mtk_u32 bRepeat = 0; - int ret; - struct mtk_xhci_ep_ctx *temp_ep_ctx; - int td_size; - int mframe_idx, frame_idx; - int bw_cost; - int cur_bw, best_bw, best_bw_idx,repeat, max_repeat, best_bw_repeat; - int cur_offset, cs_mframe; - int break_out; - int frame_interval; - - printk(KERN_ERR "add_ep parameters, dev_speed %d, is_in %d, isTT %d, ep_type %d, maxp %d, interval %d, burst %d, mult %d, ep 0x%x, ep_ctx 0x%x, sch_ep 0x%x\n", dev_speed, is_in, isTT, ep_type, maxp - , interval, burst, mult, ep, ep_ctx, sch_ep); - if(isTT && ep_type == USB_EP_INT && ((dev_speed == USB_SPEED_LOW) || (dev_speed == USB_SPEED_FULL))){ - frame_interval = interval >> 3; - for(frame_idx=0; frame_idx>3; - for(frame_idx=0; frame_idx 0 && cur_bw < best_bw){ - best_bw_idx = cur_offset; - best_bw = cur_bw; - if(cur_bw == td_size || cur_bw < (HS_BW_BOUND>>1)){ - break_out = 1; - break; - } - } - } - } - if(best_bw_idx == -1){ - return SCH_FAIL; - } - else{ - bOffset = best_bw_idx; - bPkts = 1; - bCsCount = (maxp + (188 - 1)) / 188; - if(is_in){ - cs_mframe = bOffset%8 + 2 + bCsCount; - if (cs_mframe <= 6) - bCsCount += 2; - else if (cs_mframe == 7) - bCsCount++; - } - bw_cost = 188; - bRepeat = 0; - if(add_sch_ep( dev_speed, is_in, isTT, ep_type, maxp, interval, burst, mult - , bOffset, bRepeat, bPkts, bCsCount, bBm, bw_cost, ep, sch_ep) == SCH_FAIL){ - return SCH_FAIL; - } - ret = SCH_SUCCESS; - } - } - else if((dev_speed == USB_SPEED_FULL || dev_speed == USB_SPEED_LOW) && ep_type == USB_EP_INT){ - bPkts = 1; - ret = SCH_SUCCESS; - } - else if(dev_speed == USB_SPEED_FULL && ep_type == USB_EP_ISOC){ - bPkts = 1; - ret = SCH_SUCCESS; - } - else if(dev_speed == USB_SPEED_HIGH && (ep_type == USB_EP_INT || ep_type == USB_EP_ISOC)){ - best_bw = HS_BW_BOUND; - best_bw_idx = -1; - cur_bw = 0; - td_size = maxp*(burst+1); - for(cur_offset = 0; cur_offset 0 && cur_bw < best_bw){ - best_bw_idx = cur_offset; - best_bw = cur_bw; - if(cur_bw == td_size || cur_bw < (HS_BW_BOUND>>1)){ - break; - } - } - } - if(best_bw_idx == -1){ - return SCH_FAIL; - } - else{ - bOffset = best_bw_idx; - bPkts = burst + 1; - bCsCount = 0; - bw_cost = td_size; - bRepeat = 0; - if(add_sch_ep(dev_speed, is_in, isTT, ep_type, maxp, interval, burst, mult - , bOffset, bRepeat, bPkts, bCsCount, bBm, bw_cost, ep, sch_ep) == SCH_FAIL){ - return SCH_FAIL; - } - ret = SCH_SUCCESS; - } - } - else if(dev_speed == USB_SPEED_SUPER && (ep_type == USB_EP_INT || ep_type == USB_EP_ISOC)){ - best_bw = SS_BW_BOUND; - best_bw_idx = -1; - cur_bw = 0; - td_size = maxp * (mult+1) * (burst+1); - if(mult == 0){ - max_repeat = 0; - } - else{ - max_repeat = (interval-1)/(mult+1); - } - break_out = 0; - for(frame_idx = 0; (frame_idx < interval) && !break_out; frame_idx++){ - for(repeat = max_repeat; repeat >= 0; repeat--){ - cur_bw = count_ss_bw(is_in, ep_type, maxp, interval, burst, mult, frame_idx - , repeat, td_size); - printk(KERN_ERR "count_ss_bw, frame_idx %d, repeat %d, td_size %d, result bw %d\n" - , frame_idx, repeat, td_size, cur_bw); - if(cur_bw > 0 && cur_bw < best_bw){ - best_bw_idx = frame_idx; - best_bw_repeat = repeat; - best_bw = cur_bw; - if(cur_bw <= td_size || cur_bw < (HS_BW_BOUND>>1)){ - break_out = 1; - break; - } - } - } - } - printk(KERN_ERR "final best idx %d, best repeat %d\n", best_bw_idx, best_bw_repeat); - if(best_bw_idx == -1){ - return SCH_FAIL; - } - else{ - bOffset = best_bw_idx; - bCsCount = 0; - bRepeat = best_bw_repeat; - if(bRepeat == 0){ - bw_cost = (burst+1)*(mult+1)*maxp; - bPkts = (burst+1)*(mult+1); - } - else{ - bw_cost = (burst+1)*maxp; - bPkts = (burst+1); - } - if(add_sch_ep(dev_speed, is_in, isTT, ep_type, maxp, interval, burst, mult - , bOffset, bRepeat, bPkts, bCsCount, bBm, bw_cost, ep, sch_ep) == SCH_FAIL){ - return SCH_FAIL; - } - ret = SCH_SUCCESS; - } - } - else{ - bPkts = 1; - ret = SCH_SUCCESS; - } - if(ret == SCH_SUCCESS){ - temp_ep_ctx = (struct mtk_xhci_ep_ctx *)ep_ctx; - temp_ep_ctx->reserved[0] |= (BPKTS(bPkts) | BCSCOUNT(bCsCount) | BBM(bBm)); - temp_ep_ctx->reserved[1] |= (BOFFSET(bOffset) | BREPEAT(bRepeat)); - - printk(KERN_DEBUG "[DBG] BPKTS: %x, BCSCOUNT: %x, BBM: %x\n", bPkts, bCsCount, bBm); - printk(KERN_DEBUG "[DBG] BOFFSET: %x, BREPEAT: %x\n", bOffset, bRepeat); - return SCH_SUCCESS; - } - else{ - return SCH_FAIL; - } -} diff --git a/target/linux/ramips/files/drivers/usb/host/xhci-mtk-scheduler.h b/target/linux/ramips/files/drivers/usb/host/xhci-mtk-scheduler.h deleted file mode 100644 index c55dfb10da..0000000000 --- a/target/linux/ramips/files/drivers/usb/host/xhci-mtk-scheduler.h +++ /dev/null @@ -1,77 +0,0 @@ -#ifndef _XHCI_MTK_SCHEDULER_H -#define _XHCI_MTK_SCHEDULER_H - -#define MTK_SCH_NEW 1 - -#define SCH_SUCCESS 1 -#define SCH_FAIL 0 - -#define MAX_EP_NUM 64 -#define SS_BW_BOUND 51000 -#define HS_BW_BOUND 6144 - -#define USB_EP_CONTROL 0 -#define USB_EP_ISOC 1 -#define USB_EP_BULK 2 -#define USB_EP_INT 3 - -#define USB_SPEED_LOW 1 -#define USB_SPEED_FULL 2 -#define USB_SPEED_HIGH 3 -#define USB_SPEED_SUPER 5 - -/* mtk scheduler bitmasks */ -#define BPKTS(p) ((p) & 0x3f) -#define BCSCOUNT(p) (((p) & 0x7) << 8) -#define BBM(p) ((p) << 11) -#define BOFFSET(p) ((p) & 0x3fff) -#define BREPEAT(p) (((p) & 0x7fff) << 16) - - -#if 1 -typedef unsigned int mtk_u32; -typedef unsigned long long mtk_u64; -#endif - -#define NULL ((void *)0) - -struct mtk_xhci_ep_ctx { - mtk_u32 ep_info; - mtk_u32 ep_info2; - mtk_u64 deq; - mtk_u32 tx_info; - /* offset 0x14 - 0x1f reserved for HC internal use */ - mtk_u32 reserved[3]; -}; - - -struct sch_ep -{ - //device info - int dev_speed; - int isTT; - //ep info - int is_in; - int ep_type; - int maxp; - int interval; - int burst; - int mult; - //scheduling info - int offset; - int repeat; - int pkts; - int cs_count; - int burst_mode; - //other - int bw_cost; //bandwidth cost in each repeat; including overhead - mtk_u32 *ep; //address of usb_endpoint pointer -}; - -int mtk_xhci_scheduler_init(void); -int mtk_xhci_scheduler_add_ep(int dev_speed, int is_in, int isTT, int ep_type, int maxp, int interval, int burst - , int mult, mtk_u32 *ep, mtk_u32 *ep_ctx, struct sch_ep *sch_ep); -struct sch_ep * mtk_xhci_scheduler_remove_ep(int dev_speed, int is_in, int isTT, int ep_type, mtk_u32 *ep); - - -#endif diff --git a/target/linux/ramips/files/drivers/usb/host/xhci-mtk.c b/target/linux/ramips/files/drivers/usb/host/xhci-mtk.c deleted file mode 100644 index 2eed0a174a..0000000000 --- a/target/linux/ramips/files/drivers/usb/host/xhci-mtk.c +++ /dev/null @@ -1,265 +0,0 @@ -#include "xhci-mtk.h" -#include "xhci-mtk-power.h" -#include "xhci.h" -#include "mtk-phy.h" -#ifdef CONFIG_C60802_SUPPORT -#include "mtk-phy-c60802.h" -#endif -#include "xhci-mtk-scheduler.h" -#include /* printk() */ -#include -#include -#include -#include -#include - -void setInitialReg(void ) -{ - __u32 __iomem *addr; - u32 temp; - - /* set SSUSB DMA burst size to 128B */ - addr = SSUSB_U3_XHCI_BASE + SSUSB_HDMA_CFG; - temp = SSUSB_HDMA_CFG_MT7621_VALUE; - writel(temp, addr); - - /* extend U3 LTSSM Polling.LFPS timeout value */ - addr = SSUSB_U3_XHCI_BASE + U3_LTSSM_TIMING_PARAMETER3; - temp = U3_LTSSM_TIMING_PARAMETER3_VALUE; - writel(temp, addr); - - /* EOF */ - addr = SSUSB_U3_XHCI_BASE + SYNC_HS_EOF; - temp = SYNC_HS_EOF_VALUE; - writel(temp, addr); - -#if defined (CONFIG_PERIODIC_ENP) - /* HSCH_CFG1: SCH2_FIFO_DEPTH */ - addr = SSUSB_U3_XHCI_BASE + HSCH_CFG1; - temp = readl(addr); - temp &= ~(0x3 << SCH2_FIFO_DEPTH_OFFSET); - writel(temp, addr); -#endif - - /* Doorbell handling */ - addr = SIFSLV_IPPC + SSUSB_IP_SPAR0; - temp = 0x1; - writel(temp, addr); - - /* Set SW PLL Stable mode to 1 for U2 LPM device remote wakeup */ - /* Port 0 */ - addr = U2_PHY_BASE + U2_PHYD_CR1; - temp = readl(addr); - temp &= ~(0x3 << 18); - temp |= (1 << 18); - writel(temp, addr); - - /* Port 1 */ - addr = U2_PHY_BASE_P1 + U2_PHYD_CR1; - temp = readl(addr); - temp &= ~(0x3 << 18); - temp |= (1 << 18); - writel(temp, addr); -} - - -void setLatchSel(void){ - __u32 __iomem *latch_sel_addr; - u32 latch_sel_value; - latch_sel_addr = U3_PIPE_LATCH_SEL_ADD; - latch_sel_value = ((U3_PIPE_LATCH_TX)<<2) | (U3_PIPE_LATCH_RX); - writel(latch_sel_value, latch_sel_addr); -} - -void reinitIP(void){ - __u32 __iomem *ip_reset_addr; - u32 ip_reset_value; - - enableAllClockPower(); - mtk_xhci_scheduler_init(); -} - -void dbg_prb_out(void){ - mtk_probe_init(0x0f0f0f0f); - mtk_probe_out(0xffffffff); - mtk_probe_out(0x01010101); - mtk_probe_out(0x02020202); - mtk_probe_out(0x04040404); - mtk_probe_out(0x08080808); - mtk_probe_out(0x10101010); - mtk_probe_out(0x20202020); - mtk_probe_out(0x40404040); - mtk_probe_out(0x80808080); - mtk_probe_out(0x55555555); - mtk_probe_out(0xaaaaaaaa); -} - - - -/////////////////////////////////////////////////////////////////////////////// - -#define RET_SUCCESS 0 -#define RET_FAIL 1 - -static int dbg_u3w(int argc, char**argv) -{ - int u4TimingValue; - char u1TimingValue; - int u4TimingAddress; - - if (argc<3) - { - printk(KERN_ERR "Arg: address value\n"); - return RET_FAIL; - } - u3phy_init(); - - u4TimingAddress = (int)simple_strtol(argv[1], &argv[1], 16); - u4TimingValue = (int)simple_strtol(argv[2], &argv[2], 16); - u1TimingValue = u4TimingValue & 0xff; - /* access MMIO directly */ - writel(u1TimingValue, u4TimingAddress); - printk(KERN_ERR "Write done\n"); - return RET_SUCCESS; - -} - -static int dbg_u3r(int argc, char**argv) -{ - char u1ReadTimingValue; - int u4TimingAddress; - if (argc<2) - { - printk(KERN_ERR "Arg: address\n"); - return 0; - } - u3phy_init(); - mdelay(500); - u4TimingAddress = (int)simple_strtol(argv[1], &argv[1], 16); - /* access MMIO directly */ - u1ReadTimingValue = readl(u4TimingAddress); - printk(KERN_ERR "Value = 0x%x\n", u1ReadTimingValue); - return 0; -} - -static int dbg_u3init(int argc, char**argv) -{ - int ret; - ret = u3phy_init(); - printk(KERN_ERR "phy registers and operations initial done\n"); - if(u3phy_ops->u2_slew_rate_calibration){ - u3phy_ops->u2_slew_rate_calibration(u3phy); - } - else{ - printk(KERN_ERR "WARN: PHY doesn't implement u2 slew rate calibration function\n"); - } - if(u3phy_ops->init(u3phy) == PHY_TRUE) - return RET_SUCCESS; - return RET_FAIL; -} - -void dbg_setU1U2(int argc, char**argv){ - struct xhci_hcd *xhci; - int u1_value; - int u2_value; - u32 port_id, temp; - u32 __iomem *addr; - - if (argc<3) - { - printk(KERN_ERR "Arg: u1value u2value\n"); - return RET_FAIL; - } - - u1_value = (int)simple_strtol(argv[1], &argv[1], 10); - u2_value = (int)simple_strtol(argv[2], &argv[2], 10); - addr = (SSUSB_U3_XHCI_BASE + 0x424); - temp = readl(addr); - temp = temp & (~(0x0000ffff)); - temp = temp | u1_value | (u2_value<<8); - writel(temp, addr); -} -/////////////////////////////////////////////////////////////////////////////// - -int call_function(char *buf) -{ - int i; - int argc; - char *argv[80]; - - argc = 0; - do - { - argv[argc] = strsep(&buf, " "); - printk(KERN_DEBUG "[%d] %s\r\n", argc, argv[argc]); - argc++; - } while (buf); - if (!strcmp("dbg.r", argv[0])) - dbg_prb_out(); - else if (!strcmp("dbg.u3w", argv[0])) - dbg_u3w(argc, argv); - else if (!strcmp("dbg.u3r", argv[0])) - dbg_u3r(argc, argv); - else if (!strcmp("dbg.u3i", argv[0])) - dbg_u3init(argc, argv); - else if (!strcmp("pw.u1u2", argv[0])) - dbg_setU1U2(argc, argv); - return 0; -} - -long xhci_mtk_test_unlock_ioctl(struct file *file, unsigned int cmd, unsigned long arg) -{ - char w_buf[200]; - char r_buf[200] = "this is a test"; - int len = 200; - - switch (cmd) { - case IOCTL_READ: - copy_to_user((char *) arg, r_buf, len); - printk(KERN_DEBUG "IOCTL_READ: %s\r\n", r_buf); - break; - case IOCTL_WRITE: - copy_from_user(w_buf, (char *) arg, len); - printk(KERN_DEBUG "IOCTL_WRITE: %s\r\n", w_buf); - - //invoke function - return call_function(w_buf); - break; - default: - return -ENOTTY; - } - - return len; -} - -int xhci_mtk_test_open(struct inode *inode, struct file *file) -{ - - printk(KERN_DEBUG "xhci_mtk_test open: successful\n"); - return 0; -} - -int xhci_mtk_test_release(struct inode *inode, struct file *file) -{ - - printk(KERN_DEBUG "xhci_mtk_test release: successful\n"); - return 0; -} - -ssize_t xhci_mtk_test_read(struct file *file, char *buf, size_t count, loff_t *ptr) -{ - - printk(KERN_DEBUG "xhci_mtk_test read: returning zero bytes\n"); - return 0; -} - -ssize_t xhci_mtk_test_write(struct file *file, const char *buf, size_t count, loff_t * ppos) -{ - - printk(KERN_DEBUG "xhci_mtk_test write: accepting zero bytes\n"); - return 0; -} - - - - diff --git a/target/linux/ramips/files/drivers/usb/host/xhci-mtk.h b/target/linux/ramips/files/drivers/usb/host/xhci-mtk.h deleted file mode 100644 index 0f2d5e834c..0000000000 --- a/target/linux/ramips/files/drivers/usb/host/xhci-mtk.h +++ /dev/null @@ -1,120 +0,0 @@ -#ifndef _XHCI_MTK_H -#define _XHCI_MTK_H - -#include -#include "xhci.h" - -#define SSUSB_U3_XHCI_BASE 0xBE1C0000 -#define SSUSB_U3_MAC_BASE 0xBE1C2400 -#define SSUSB_U3_SYS_BASE 0xBE1C2600 -#define SSUSB_U2_SYS_BASE 0xBE1C3400 -#define SSUB_SIF_SLV_TOP 0xBE1D0000 -#define SIFSLV_IPPC (SSUB_SIF_SLV_TOP + 0x700) - -#define U3_PIPE_LATCH_SEL_ADD SSUSB_U3_MAC_BASE + 0x130 -#define U3_PIPE_LATCH_TX 0 -#define U3_PIPE_LATCH_RX 0 - -#define U3_UX_EXIT_LFPS_TIMING_PAR 0xa0 -#define U3_REF_CK_PAR 0xb0 -#define U3_RX_UX_EXIT_LFPS_REF_OFFSET 8 -#define U3_RX_UX_EXIT_LFPS_REF 3 -#define U3_REF_CK_VAL 10 - -#define U3_TIMING_PULSE_CTRL 0xb4 -#define CNT_1US_VALUE 63 //62.5MHz:63, 70MHz:70, 80MHz:80, 100MHz:100, 125MHz:125 - -#define USB20_TIMING_PARAMETER 0x40 -#define TIME_VALUE_1US 63 //62.5MHz:63, 80MHz:80, 100MHz:100, 125MHz:125 - -#define LINK_PM_TIMER 0x8 -#define PM_LC_TIMEOUT_VALUE 3 - -#define XHCI_IMOD 0x624 -#define XHCI_IMOD_MT7621_VALUE 0x10 - -#define SSUSB_HDMA_CFG 0x950 -#define SSUSB_HDMA_CFG_MT7621_VALUE 0x10E0E0C - -#define U3_LTSSM_TIMING_PARAMETER3 0x2514 -#define U3_LTSSM_TIMING_PARAMETER3_VALUE 0x3E8012C - -#define U2_PHYD_CR1 0x64 - -#define SSUSB_IP_SPAR0 0xC8 - -#define SYNC_HS_EOF 0x938 -#define SYNC_HS_EOF_VALUE 0x201F3 - -#define HSCH_CFG1 0x960 -#define SCH2_FIFO_DEPTH_OFFSET 16 - - -#define SSUSB_IP_PW_CTRL (SIFSLV_IPPC+0x0) -#define SSUSB_IP_SW_RST (1<<0) -#define SSUSB_IP_PW_CTRL_1 (SIFSLV_IPPC+0x4) -#define SSUSB_IP_PDN (1<<0) -#define SSUSB_U3_CTRL(p) (SIFSLV_IPPC+0x30+(p*0x08)) -#define SSUSB_U3_PORT_DIS (1<<0) -#define SSUSB_U3_PORT_PDN (1<<1) -#define SSUSB_U3_PORT_HOST_SEL (1<<2) -#define SSUSB_U3_PORT_CKBG_EN (1<<3) -#define SSUSB_U3_PORT_MAC_RST (1<<4) -#define SSUSB_U3_PORT_PHYD_RST (1<<5) -#define SSUSB_U2_CTRL(p) (SIFSLV_IPPC+(0x50)+(p*0x08)) -#define SSUSB_U2_PORT_DIS (1<<0) -#define SSUSB_U2_PORT_PDN (1<<1) -#define SSUSB_U2_PORT_HOST_SEL (1<<2) -#define SSUSB_U2_PORT_CKBG_EN (1<<3) -#define SSUSB_U2_PORT_MAC_RST (1<<4) -#define SSUSB_U2_PORT_PHYD_RST (1<<5) -#define SSUSB_IP_CAP (SIFSLV_IPPC+0x024) - -#define SSUSB_U3_PORT_NUM(p) (p & 0xff) -#define SSUSB_U2_PORT_NUM(p) ((p>>8) & 0xff) - - -#define XHCI_MTK_TEST_MAJOR 234 -#define DEVICE_NAME "xhci_mtk_test" - -#define CLI_MAGIC 'CLI' -#define IOCTL_READ _IOR(CLI_MAGIC, 0, int) -#define IOCTL_WRITE _IOW(CLI_MAGIC, 1, int) - -void reinitIP(void); -void setInitialReg(void); -void dbg_prb_out(void); -int call_function(char *buf); - -long xhci_mtk_test_unlock_ioctl(struct file *file, unsigned int cmd, unsigned long arg); -int xhci_mtk_test_open(struct inode *inode, struct file *file); -int xhci_mtk_test_release(struct inode *inode, struct file *file); -ssize_t xhci_mtk_test_read(struct file *file, char *buf, size_t count, loff_t *ptr); -ssize_t xhci_mtk_test_write(struct file *file, const char *buf, size_t count, loff_t * ppos); - -/* - mediatek probe out -*/ -/************************************************************************************/ - -#define SW_PRB_OUT_ADDR (SIFSLV_IPPC+0xc0) -#define PRB_MODULE_SEL_ADDR (SIFSLV_IPPC+0xbc) - -static inline void mtk_probe_init(const u32 byte){ - __u32 __iomem *ptr = (__u32 __iomem *) PRB_MODULE_SEL_ADDR; - writel(byte, ptr); -} - -static inline void mtk_probe_out(const u32 value){ - __u32 __iomem *ptr = (__u32 __iomem *) SW_PRB_OUT_ADDR; - writel(value, ptr); -} - -static inline u32 mtk_probe_value(void){ - __u32 __iomem *ptr = (__u32 __iomem *) SW_PRB_OUT_ADDR; - - return readl(ptr); -} - - -#endif diff --git a/target/linux/ramips/mt7621/config-3.10 b/target/linux/ramips/mt7621/config-3.10 index 194bb4a1cc..86bc7bef4f 100644 --- a/target/linux/ramips/mt7621/config-3.10 +++ b/target/linux/ramips/mt7621/config-3.10 @@ -1,6 +1,8 @@ +# CONFIG_32B_DESC is not set CONFIG_ARCH_BINFMT_ELF_RANDOMIZE_PIE=y CONFIG_ARCH_DISCARD_MEMBLOCK=y CONFIG_ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE=y +CONFIG_ARCH_HAS_RESET_CONTROLLER=y CONFIG_ARCH_HAVE_CUSTOM_GPIO_H=y CONFIG_ARCH_REQUIRE_GPIOLIB=y CONFIG_ARCH_WANT_IPC_PARSE_VERSION=y @@ -35,6 +37,14 @@ CONFIG_DMA_NONCOHERENT=y CONFIG_DTB_RT_NONE=y CONFIG_DTC=y CONFIG_EARLY_PRINTK=y +CONFIG_ESW_DOUBLE_VLAN_TAG=y +# CONFIG_GE1_MII_AN is not set +# CONFIG_GE1_MII_FORCE_100 is not set +# CONFIG_GE1_RGMII_AN is not set +CONFIG_GE1_RGMII_FORCE_1000=y +# CONFIG_GE1_RGMII_NONE is not set +# CONFIG_GE1_RVMII_FORCE_100 is not set +# CONFIG_GE1_TRGMII_FORCE_1200 is not set CONFIG_GENERIC_ATOMIC64=y CONFIG_GENERIC_CLOCKEVENTS=y CONFIG_GENERIC_CLOCKEVENTS_BUILD=y @@ -88,6 +98,7 @@ CONFIG_IRQ_DOMAIN=y CONFIG_IRQ_FORCED_THREADING=y CONFIG_IRQ_GIC=y CONFIG_IRQ_WORK=y +# CONFIG_LAN_WAN_SUPPORT is not set CONFIG_M25PXX_USE_FAST_READ=y CONFIG_MDIO_BOARDINFO=y # CONFIG_MII is not set @@ -105,6 +116,7 @@ CONFIG_MIPS_MT_SMP=y CONFIG_MIPS_PERF_SHARED_TC_COUNTERS=y # CONFIG_MIPS_VPE_LOADER is not set CONFIG_MODULES_USE_ELF_REL=y +CONFIG_MT7621_ASIC=y # CONFIG_MT7621_WDT is not set # CONFIG_MTD_CFI_INTELEXT is not set CONFIG_MTD_CMDLINE_PARTS=y @@ -137,6 +149,7 @@ CONFIG_PAGEFLAGS_EXTENDED=y CONFIG_PCI=y CONFIG_PCI_DISABLE_COMMON_QUIRKS=y CONFIG_PCI_DOMAINS=y +CONFIG_PDMA_NEW=y CONFIG_PERF_USE_VMALLOC=y CONFIG_PHYLIB=y # CONFIG_PINCONF is not set @@ -145,12 +158,30 @@ CONFIG_PINCTRL_RT2880=y # CONFIG_PINCTRL_SINGLE is not set CONFIG_PINMUX=y # CONFIG_PREEMPT_RCU is not set +CONFIG_RAETH=y +CONFIG_RAETH_CHECKSUM_OFFLOAD=y +# CONFIG_RAETH_GMAC2 is not set +# CONFIG_RAETH_HW_VLAN_RX is not set +# CONFIG_RAETH_HW_VLAN_TX is not set +# CONFIG_RAETH_LRO is not set +# CONFIG_RAETH_NAPI is not set +# CONFIG_RAETH_QDMA is not set +CONFIG_RAETH_SCATTER_GATHER_RX_DMA=y +# CONFIG_RAETH_SKB_RECYCLE_2K is not set +# CONFIG_RAETH_SPECIAL_TAG is not set +# CONFIG_RAETH_TSO is not set CONFIG_RALINK=y +CONFIG_RALINK_MT7621=y CONFIG_RALINK_USBPHY=y +# CONFIG_RALINK_WDT is not set +CONFIG_RA_NAT_NONE=y +# CONFIG_RA_NETWORK_TASKLET_BH is not set +CONFIG_RA_NETWORK_WORKQUEUE_BH=y CONFIG_RCU_STALL_COMMON=y CONFIG_RESET_CONTROLLER=y CONFIG_RFS_ACCEL=y CONFIG_RPS=y +CONFIG_RT_3052_ESW=y # CONFIG_SAMSUNG_USB2PHY is not set # CONFIG_SAMSUNG_USB3PHY is not set # CONFIG_SAMSUNG_USBPHY is not set @@ -199,6 +230,8 @@ CONFIG_USB_XHCI_HCD=m CONFIG_USB_XHCI_PLATFORM=y CONFIG_USE_GENERIC_SMP_HELPERS=y CONFIG_USE_OF=y +CONFIG_WAN_AT_P0=y +# CONFIG_WAN_AT_P4 is not set CONFIG_WATCHDOG_CORE=y CONFIG_WEAK_ORDERING=y CONFIG_XPS=y diff --git a/target/linux/ramips/mt7621/profiles/00-default.mk b/target/linux/ramips/mt7621/profiles/00-default.mk index a905d1ed80..fb04ba788c 100644 --- a/target/linux/ramips/mt7621/profiles/00-default.mk +++ b/target/linux/ramips/mt7621/profiles/00-default.mk @@ -8,7 +8,7 @@ define Profile/Default NAME:=Default Profile PACKAGES:=\ - kmod-usb-core kmod-usb-dwc2 \ + kmod-usb-core kmod-usb3 \ kmod-ledtrig-usbdev endef diff --git a/target/linux/ramips/patches-3.10/0100-MIPS-use-set_mode-to-enable-disable-the-cevt-r4k-irq.patch b/target/linux/ramips/patches-3.10/0100-MIPS-use-set_mode-to-enable-disable-the-cevt-r4k-irq.patch index e87eb4d394..1d2e728cff 100644 --- a/target/linux/ramips/patches-3.10/0100-MIPS-use-set_mode-to-enable-disable-the-cevt-r4k-irq.patch +++ b/target/linux/ramips/patches-3.10/0100-MIPS-use-set_mode-to-enable-disable-the-cevt-r4k-irq.patch @@ -1,7 +1,7 @@ -From cdc1b12b3debaf5b3894fd146e73221a8acd0152 Mon Sep 17 00:00:00 2001 +From 1be15a87eea5f26fb24b6aac332530cd3e2d984e Mon Sep 17 00:00:00 2001 From: John Crispin Date: Sun, 14 Jul 2013 23:08:11 +0200 -Subject: [PATCH 20/25] MIPS: use set_mode() to enable/disable the cevt-r4k +Subject: [PATCH 100/133] MIPS: use set_mode() to enable/disable the cevt-r4k irq Signed-off-by: John Crispin @@ -24,13 +24,14 @@ Signed-off-by: John Crispin DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device); int cp0_timer_irq_installed; -@@ -90,6 +84,32 @@ struct irqaction c0_compare_irqaction = +@@ -90,9 +84,38 @@ struct irqaction c0_compare_irqaction = .name = "timer", }; +void mips_set_clock_mode(enum clock_event_mode mode, + struct clock_event_device *evt) +{ ++#ifdef CONFIG_CEVT_SYSTICK_QUIRK + switch (mode) { + case CLOCK_EVT_MODE_ONESHOT: + if (cp0_timer_irq_installed) @@ -53,21 +54,27 @@ Signed-off-by: John Crispin + pr_err("Unhandeled mips clock_mode\n"); + break; + } ++#endif +} void mips_event_handler(struct clock_event_device *dev) { -@@ -215,13 +235,6 @@ int __cpuinit r4k_clockevent_init(void) ++ + } + + /* +@@ -215,12 +238,14 @@ int __cpuinit r4k_clockevent_init(void) #endif clockevents_register_device(cd); -- if (cp0_timer_irq_installed) -- return 0; -- -- cp0_timer_irq_installed = 1; -- -- setup_irq(irq, &c0_compare_irqaction); -- ++#ifndef CONFIG_CEVT_SYSTICK_QUIRK + if (cp0_timer_irq_installed) + return 0; + + cp0_timer_irq_installed = 1; + + setup_irq(irq, &c0_compare_irqaction); ++#endif + return 0; } - diff --git a/target/linux/ramips/patches-3.10/0101-MIPS-ralink-add-verbose-pmu-info.patch b/target/linux/ramips/patches-3.10/0101-MIPS-ralink-add-verbose-pmu-info.patch new file mode 100644 index 0000000000..5a7d5f9997 --- /dev/null +++ b/target/linux/ramips/patches-3.10/0101-MIPS-ralink-add-verbose-pmu-info.patch @@ -0,0 +1,59 @@ +From 5689333e7e4396a827a2cb6fa1242159e9af56de Mon Sep 17 00:00:00 2001 +From: John Crispin +Date: Mon, 20 May 2013 20:57:09 +0200 +Subject: [PATCH 101/133] MIPS: ralink: add verbose pmu info + +Print the PMU and LDO settings on boot. + +Signed-off-by: John Crispin +--- + arch/mips/ralink/mt7620.c | 26 ++++++++++++++++++++++++++ + 1 file changed, 26 insertions(+) + +--- a/arch/mips/ralink/mt7620.c ++++ b/arch/mips/ralink/mt7620.c +@@ -26,6 +26,22 @@ + #define CLKCFG_FFRAC_MASK 0x001f + #define CLKCFG_FFRAC_USB_VAL 0x0003 + ++/* analog */ ++#define PMU0_CFG 0x88 ++#define PMU_SW_SET BIT(28) ++#define A_DCDC_EN BIT(24) ++#define A_SSC_PERI BIT(19) ++#define A_SSC_GEN BIT(18) ++#define A_SSC_M 0x3 ++#define A_SSC_S 16 ++#define A_DLY_M 0x7 ++#define A_DLY_S 8 ++#define A_VTUNE_M 0xff ++ ++/* digital */ ++#define PMU1_CFG 0x8C ++#define DIG_SW_SEL BIT(25) ++ + /* does the board have sdram or ddram */ + static int dram_type; + +@@ -208,6 +224,8 @@ void prom_soc_init(struct ralink_soc_inf + u32 n1; + u32 rev; + u32 cfg0; ++ u32 pmu0; ++ u32 pmu1; + + n0 = __raw_readl(sysc + SYSC_REG_CHIP_NAME0); + n1 = __raw_readl(sysc + SYSC_REG_CHIP_NAME1); +@@ -255,4 +273,12 @@ void prom_soc_init(struct ralink_soc_inf + BUG(); + } + soc_info->mem_base = MT7620_DRAM_BASE; ++ ++ pmu0 = __raw_readl(sysc + PMU0_CFG); ++ pmu1 = __raw_readl(sysc + PMU1_CFG); ++ ++ pr_info("Analog PMU set to %s control\n", ++ (pmu0 & PMU_SW_SET) ? ("sw") : ("hw")); ++ pr_info("Digital PMU set to %s control\n", ++ (pmu1 & DIG_SW_SEL) ? ("sw") : ("hw")); + } diff --git a/target/linux/ramips/patches-3.10/0102-MIPS-ralink-add-verbose-pmu-info.patch b/target/linux/ramips/patches-3.10/0102-MIPS-ralink-add-verbose-pmu-info.patch deleted file mode 100644 index 687b883fe3..0000000000 --- a/target/linux/ramips/patches-3.10/0102-MIPS-ralink-add-verbose-pmu-info.patch +++ /dev/null @@ -1,59 +0,0 @@ -From 74339d6eab7a37f7c629b737bf686d30e5014ce2 Mon Sep 17 00:00:00 2001 -From: John Crispin -Date: Mon, 20 May 2013 20:57:09 +0200 -Subject: [PATCH 06/33] MIPS: ralink: add verbose pmu info - -Print the PMU and LDO settings on boot. - -Signed-off-by: John Crispin ---- - arch/mips/ralink/mt7620.c | 26 ++++++++++++++++++++++++++ - 1 file changed, 26 insertions(+) - ---- a/arch/mips/ralink/mt7620.c -+++ b/arch/mips/ralink/mt7620.c -@@ -26,6 +26,22 @@ - #define CLKCFG_FFRAC_MASK 0x001f - #define CLKCFG_FFRAC_USB_VAL 0x0003 - -+/* analog */ -+#define PMU0_CFG 0x88 -+#define PMU_SW_SET BIT(28) -+#define A_DCDC_EN BIT(24) -+#define A_SSC_PERI BIT(19) -+#define A_SSC_GEN BIT(18) -+#define A_SSC_M 0x3 -+#define A_SSC_S 16 -+#define A_DLY_M 0x7 -+#define A_DLY_S 8 -+#define A_VTUNE_M 0xff -+ -+/* digital */ -+#define PMU1_CFG 0x8C -+#define DIG_SW_SEL BIT(25) -+ - /* does the board have sdram or ddram */ - static int dram_type; - -@@ -208,6 +224,8 @@ void prom_soc_init(struct ralink_soc_inf - u32 n1; - u32 rev; - u32 cfg0; -+ u32 pmu0; -+ u32 pmu1; - - n0 = __raw_readl(sysc + SYSC_REG_CHIP_NAME0); - n1 = __raw_readl(sysc + SYSC_REG_CHIP_NAME1); -@@ -255,4 +273,12 @@ void prom_soc_init(struct ralink_soc_inf - BUG(); - } - soc_info->mem_base = MT7620_DRAM_BASE; -+ -+ pmu0 = __raw_readl(sysc + PMU0_CFG); -+ pmu1 = __raw_readl(sysc + PMU1_CFG); -+ -+ pr_info("Analog PMU set to %s control\n", -+ (pmu0 & PMU_SW_SET) ? ("sw") : ("hw")); -+ pr_info("Digital PMU set to %s control\n", -+ (pmu1 & DIG_SW_SEL) ? ("sw") : ("hw")); - } diff --git a/target/linux/ramips/patches-3.10/0102-MIPS-ralink-adds-a-bootrom-dumper-module.patch b/target/linux/ramips/patches-3.10/0102-MIPS-ralink-adds-a-bootrom-dumper-module.patch new file mode 100644 index 0000000000..e295a4aa4d --- /dev/null +++ b/target/linux/ramips/patches-3.10/0102-MIPS-ralink-adds-a-bootrom-dumper-module.patch @@ -0,0 +1,75 @@ +From 23d18a1b3d0a7e5faa08b6bece6692667c930975 Mon Sep 17 00:00:00 2001 +From: John Crispin +Date: Tue, 21 May 2013 15:50:31 +0200 +Subject: [PATCH 102/133] MIPS: ralink: adds a bootrom dumper module + +This patch adds a trivial driver that allows userland to extract the bootrom of +a SoC via debugfs. + +Signed-off-by: John Crispin +--- + arch/mips/ralink/Makefile | 2 ++ + arch/mips/ralink/bootrom.c | 48 ++++++++++++++++++++++++++++++++++++++++++++ + 2 files changed, 50 insertions(+) + create mode 100644 arch/mips/ralink/bootrom.c + +--- a/arch/mips/ralink/Makefile ++++ b/arch/mips/ralink/Makefile +@@ -17,4 +17,6 @@ obj-$(CONFIG_SOC_MT7620) += mt7620.o + + obj-$(CONFIG_EARLY_PRINTK) += early_printk.o + ++obj-$(CONFIG_DEBUG_FS) += bootrom.o ++ + obj-y += dts/ +--- /dev/null ++++ b/arch/mips/ralink/bootrom.c +@@ -0,0 +1,48 @@ ++/* ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 as published ++ * by the Free Software Foundation. ++ * ++ * Copyright (C) 2013 John Crispin ++ */ ++ ++#include ++#include ++ ++#define BOOTROM_OFFSET 0x10118000 ++#define BOOTROM_SIZE 0x8000 ++ ++static void __iomem *membase = (void __iomem*) KSEG1ADDR(BOOTROM_OFFSET); ++ ++static int bootrom_show(struct seq_file *s, void *unused) ++{ ++ seq_write(s, membase, BOOTROM_SIZE); ++ ++ return 0; ++} ++ ++static int bootrom_open(struct inode *inode, struct file *file) ++{ ++ return single_open(file, bootrom_show, NULL); ++} ++ ++static const struct file_operations bootrom_file_ops = { ++ .open = bootrom_open, ++ .read = seq_read, ++ .llseek = seq_lseek, ++ .release = single_release, ++}; ++ ++static int bootrom_setup(void) ++{ ++ if (!debugfs_create_file("bootrom", 0444, ++ NULL, NULL, &bootrom_file_ops)) { ++ pr_err("Failed to create bootrom debugfs file\n"); ++ ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++postcore_initcall(bootrom_setup); diff --git a/target/linux/ramips/patches-3.10/0103-MIPS-ralink-add-illegal-access-driver.patch b/target/linux/ramips/patches-3.10/0103-MIPS-ralink-add-illegal-access-driver.patch new file mode 100644 index 0000000000..4dfa7b5372 --- /dev/null +++ b/target/linux/ramips/patches-3.10/0103-MIPS-ralink-add-illegal-access-driver.patch @@ -0,0 +1,113 @@ +From c5fe00f24f56b15f982dda355089986d57488b36 Mon Sep 17 00:00:00 2001 +From: John Crispin +Date: Thu, 16 May 2013 23:28:23 +0200 +Subject: [PATCH 103/133] MIPS: ralink: add illegal access driver + +Signed-off-by: John Crispin +--- + arch/mips/ralink/Makefile | 2 + + arch/mips/ralink/ill_acc.c | 87 ++++++++++++++++++++++++++++++++++++++++++++ + 2 files changed, 89 insertions(+) + create mode 100644 arch/mips/ralink/ill_acc.c + +--- a/arch/mips/ralink/Makefile ++++ b/arch/mips/ralink/Makefile +@@ -10,6 +10,8 @@ obj-y := prom.o of.o reset.o clk.o irq.o + + obj-$(CONFIG_CLKEVT_RT3352) += cevt-rt3352.o + ++obj-$(CONFIG_RALINK_ILL_ACC) += ill_acc.o ++ + obj-$(CONFIG_SOC_RT288X) += rt288x.o + obj-$(CONFIG_SOC_RT305X) += rt305x.o + obj-$(CONFIG_SOC_RT3883) += rt3883.o +--- /dev/null ++++ b/arch/mips/ralink/ill_acc.c +@@ -0,0 +1,87 @@ ++/* ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 as published ++ * by the Free Software Foundation. ++ * ++ * Copyright (C) 2013 John Crispin ++ */ ++ ++#include ++#include ++#include ++ ++#include ++ ++#define REG_ILL_ACC_ADDR 0x10 ++#define REG_ILL_ACC_TYPE 0x14 ++ ++#define ILL_INT_STATUS BIT(31) ++#define ILL_ACC_WRITE BIT(30) ++#define ILL_ACC_LEN_M 0xff ++#define ILL_ACC_OFF_M 0xf ++#define ILL_ACC_OFF_S 16 ++#define ILL_ACC_ID_M 0x7 ++#define ILL_ACC_ID_S 8 ++ ++#define DRV_NAME "ill_acc" ++ ++static const char *ill_acc_ids[] = { ++ "cpu", "dma", "ppe", "pdma rx","pdma tx", "pci/e", "wmac", "usb", ++}; ++ ++static irqreturn_t ill_acc_irq_handler(int irq, void *_priv) ++{ ++ struct device *dev = (struct device *) _priv; ++ u32 addr = rt_memc_r32(REG_ILL_ACC_ADDR); ++ u32 type = rt_memc_r32(REG_ILL_ACC_TYPE); ++ ++ dev_err(dev, "illegal %s access from %s - addr:0x%08x offset:%d len:%d\n", ++ (type & ILL_ACC_WRITE) ? ("write") : ("read"), ++ ill_acc_ids[(type >> ILL_ACC_ID_S) & ILL_ACC_ID_M], ++ addr, (type >> ILL_ACC_OFF_S) & ILL_ACC_OFF_M, ++ type & ILL_ACC_LEN_M); ++ ++ rt_memc_w32(REG_ILL_ACC_TYPE, REG_ILL_ACC_TYPE); ++ ++ return IRQ_HANDLED; ++} ++ ++static int __init ill_acc_of_setup(void) ++{ ++ struct platform_device *pdev; ++ struct device_node *np; ++ int irq; ++ ++ /* somehow this driver breaks on RT5350 */ ++ if (of_machine_is_compatible("ralink,rt5350-soc")) ++ return -EINVAL; ++ ++ np = of_find_compatible_node(NULL, NULL, "ralink,rt3050-memc"); ++ if (!np) ++ return -EINVAL; ++ ++ pdev = of_find_device_by_node(np); ++ if (!pdev) { ++ pr_err("%s: failed to lookup pdev\n", np->name); ++ return -EINVAL; ++ } ++ ++ irq = irq_of_parse_and_map(np, 0); ++ if (!irq) { ++ dev_err(&pdev->dev, "failed to get irq\n"); ++ return -EINVAL; ++ } ++ ++ if (request_irq(irq, ill_acc_irq_handler, 0, "ill_acc", &pdev->dev)) { ++ dev_err(&pdev->dev, "failed to request irq\n"); ++ return -EINVAL; ++ } ++ ++ rt_memc_w32(ILL_INT_STATUS, REG_ILL_ACC_TYPE); ++ ++ dev_info(&pdev->dev, "irq registered\n"); ++ ++ return 0; ++} ++ ++arch_initcall(ill_acc_of_setup); diff --git a/target/linux/ramips/patches-3.10/0103-MIPS-ralink-adds-a-bootrom-dumper-module.patch b/target/linux/ramips/patches-3.10/0103-MIPS-ralink-adds-a-bootrom-dumper-module.patch deleted file mode 100644 index c0910326c9..0000000000 --- a/target/linux/ramips/patches-3.10/0103-MIPS-ralink-adds-a-bootrom-dumper-module.patch +++ /dev/null @@ -1,75 +0,0 @@ -From 71409a190a0c8e3597cae7d46321742e29d8994b Mon Sep 17 00:00:00 2001 -From: John Crispin -Date: Tue, 21 May 2013 15:50:31 +0200 -Subject: [PATCH 07/33] MIPS: ralink: adds a bootrom dumper module - -This patch adds a trivial driver that allows userland to extract the bootrom of -a SoC via debugfs. - -Signed-off-by: John Crispin ---- - arch/mips/ralink/Makefile | 2 ++ - arch/mips/ralink/bootrom.c | 48 ++++++++++++++++++++++++++++++++++++++++++++ - 2 files changed, 50 insertions(+) - create mode 100644 arch/mips/ralink/bootrom.c - ---- a/arch/mips/ralink/Makefile -+++ b/arch/mips/ralink/Makefile -@@ -17,4 +17,6 @@ obj-$(CONFIG_SOC_MT7620) += mt7620.o - - obj-$(CONFIG_EARLY_PRINTK) += early_printk.o - -+obj-$(CONFIG_DEBUG_FS) += bootrom.o -+ - obj-y += dts/ ---- /dev/null -+++ b/arch/mips/ralink/bootrom.c -@@ -0,0 +1,48 @@ -+/* -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation. -+ * -+ * Copyright (C) 2013 John Crispin -+ */ -+ -+#include -+#include -+ -+#define BOOTROM_OFFSET 0x10118000 -+#define BOOTROM_SIZE 0x8000 -+ -+static void __iomem *membase = (void __iomem*) KSEG1ADDR(BOOTROM_OFFSET); -+ -+static int bootrom_show(struct seq_file *s, void *unused) -+{ -+ seq_write(s, membase, BOOTROM_SIZE); -+ -+ return 0; -+} -+ -+static int bootrom_open(struct inode *inode, struct file *file) -+{ -+ return single_open(file, bootrom_show, NULL); -+} -+ -+static const struct file_operations bootrom_file_ops = { -+ .open = bootrom_open, -+ .read = seq_read, -+ .llseek = seq_lseek, -+ .release = single_release, -+}; -+ -+static int bootrom_setup(void) -+{ -+ if (!debugfs_create_file("bootrom", 0444, -+ NULL, NULL, &bootrom_file_ops)) { -+ pr_err("Failed to create bootrom debugfs file\n"); -+ -+ return -EINVAL; -+ } -+ -+ return 0; -+} -+ -+postcore_initcall(bootrom_setup); diff --git a/target/linux/ramips/patches-3.10/0104-MIPS-ralink-add-illegal-access-driver.patch b/target/linux/ramips/patches-3.10/0104-MIPS-ralink-add-illegal-access-driver.patch deleted file mode 100644 index 082c324531..0000000000 --- a/target/linux/ramips/patches-3.10/0104-MIPS-ralink-add-illegal-access-driver.patch +++ /dev/null @@ -1,113 +0,0 @@ -From 46446fcfc6e823005ebe71357b5995524e75542c Mon Sep 17 00:00:00 2001 -From: John Crispin -Date: Thu, 16 May 2013 23:28:23 +0200 -Subject: [PATCH 08/33] MIPS: ralink: add illegal access driver - -Signed-off-by: John Crispin ---- - arch/mips/ralink/Makefile | 2 + - arch/mips/ralink/ill_acc.c | 87 ++++++++++++++++++++++++++++++++++++++++++++ - 2 files changed, 89 insertions(+) - create mode 100644 arch/mips/ralink/ill_acc.c - ---- a/arch/mips/ralink/Makefile -+++ b/arch/mips/ralink/Makefile -@@ -10,6 +10,8 @@ obj-y := prom.o of.o reset.o clk.o irq.o - - obj-$(CONFIG_CLKEVT_RT3352) += cevt-rt3352.o - -+obj-$(CONFIG_RALINK_ILL_ACC) += ill_acc.o -+ - obj-$(CONFIG_SOC_RT288X) += rt288x.o - obj-$(CONFIG_SOC_RT305X) += rt305x.o - obj-$(CONFIG_SOC_RT3883) += rt3883.o ---- /dev/null -+++ b/arch/mips/ralink/ill_acc.c -@@ -0,0 +1,87 @@ -+/* -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation. -+ * -+ * Copyright (C) 2013 John Crispin -+ */ -+ -+#include -+#include -+#include -+ -+#include -+ -+#define REG_ILL_ACC_ADDR 0x10 -+#define REG_ILL_ACC_TYPE 0x14 -+ -+#define ILL_INT_STATUS BIT(31) -+#define ILL_ACC_WRITE BIT(30) -+#define ILL_ACC_LEN_M 0xff -+#define ILL_ACC_OFF_M 0xf -+#define ILL_ACC_OFF_S 16 -+#define ILL_ACC_ID_M 0x7 -+#define ILL_ACC_ID_S 8 -+ -+#define DRV_NAME "ill_acc" -+ -+static const char *ill_acc_ids[] = { -+ "cpu", "dma", "ppe", "pdma rx","pdma tx", "pci/e", "wmac", "usb", -+}; -+ -+static irqreturn_t ill_acc_irq_handler(int irq, void *_priv) -+{ -+ struct device *dev = (struct device *) _priv; -+ u32 addr = rt_memc_r32(REG_ILL_ACC_ADDR); -+ u32 type = rt_memc_r32(REG_ILL_ACC_TYPE); -+ -+ dev_err(dev, "illegal %s access from %s - addr:0x%08x offset:%d len:%d\n", -+ (type & ILL_ACC_WRITE) ? ("write") : ("read"), -+ ill_acc_ids[(type >> ILL_ACC_ID_S) & ILL_ACC_ID_M], -+ addr, (type >> ILL_ACC_OFF_S) & ILL_ACC_OFF_M, -+ type & ILL_ACC_LEN_M); -+ -+ rt_memc_w32(REG_ILL_ACC_TYPE, REG_ILL_ACC_TYPE); -+ -+ return IRQ_HANDLED; -+} -+ -+static int __init ill_acc_of_setup(void) -+{ -+ struct platform_device *pdev; -+ struct device_node *np; -+ int irq; -+ -+ /* somehow this driver breaks on RT5350 */ -+ if (of_machine_is_compatible("ralink,rt5350-soc")) -+ return -EINVAL; -+ -+ np = of_find_compatible_node(NULL, NULL, "ralink,rt3050-memc"); -+ if (!np) -+ return -EINVAL; -+ -+ pdev = of_find_device_by_node(np); -+ if (!pdev) { -+ pr_err("%s: failed to lookup pdev\n", np->name); -+ return -EINVAL; -+ } -+ -+ irq = irq_of_parse_and_map(np, 0); -+ if (!irq) { -+ dev_err(&pdev->dev, "failed to get irq\n"); -+ return -EINVAL; -+ } -+ -+ if (request_irq(irq, ill_acc_irq_handler, 0, "ill_acc", &pdev->dev)) { -+ dev_err(&pdev->dev, "failed to request irq\n"); -+ return -EINVAL; -+ } -+ -+ rt_memc_w32(ILL_INT_STATUS, REG_ILL_ACC_TYPE); -+ -+ dev_info(&pdev->dev, "irq registered\n"); -+ -+ return 0; -+} -+ -+arch_initcall(ill_acc_of_setup); diff --git a/target/linux/ramips/patches-3.10/0104-MIPS-ralink-workaround-DTB-memory-issue.patch b/target/linux/ramips/patches-3.10/0104-MIPS-ralink-workaround-DTB-memory-issue.patch new file mode 100644 index 0000000000..c9a12b3676 --- /dev/null +++ b/target/linux/ramips/patches-3.10/0104-MIPS-ralink-workaround-DTB-memory-issue.patch @@ -0,0 +1,24 @@ +From b83808826ac7a5c727f5314b5a3bf07fcd6ec929 Mon Sep 17 00:00:00 2001 +From: John Crispin +Date: Thu, 23 May 2013 18:50:56 +0200 +Subject: [PATCH 104/133] MIPS: ralink: workaround DTB memory issue + +If the DTB is too big a bug happens on boot when init ram is freed. +This is a temporary fix until the real cause is found. + +Signed-off-by: John Crispin +--- + arch/mips/ralink/of.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/arch/mips/ralink/of.c ++++ b/arch/mips/ralink/of.c +@@ -74,7 +74,7 @@ void __init device_tree_init(void) + unflatten_device_tree(); + + /* free the space reserved for the dt blob */ +- free_bootmem(base, size); ++ //free_bootmem(base, size); + } + + void __init plat_mem_setup(void) diff --git a/target/linux/ramips/patches-3.10/0105-MIPS-ralink-add-missing-clk_set_rate-to-clk.c.patch b/target/linux/ramips/patches-3.10/0105-MIPS-ralink-add-missing-clk_set_rate-to-clk.c.patch new file mode 100644 index 0000000000..e9b30e03e3 --- /dev/null +++ b/target/linux/ramips/patches-3.10/0105-MIPS-ralink-add-missing-clk_set_rate-to-clk.c.patch @@ -0,0 +1,25 @@ +From 6f72aea69951479b7daad1d38b506ede4f8a1676 Mon Sep 17 00:00:00 2001 +From: John Crispin +Date: Sun, 16 Mar 2014 04:38:07 +0000 +Subject: [PATCH 105/133] MIPS: ralink: add missing clk_set_rate() to clk.c + +Signed-off-by: John Crispin +--- + arch/mips/ralink/clk.c | 6 ++++++ + 1 file changed, 6 insertions(+) + +--- a/arch/mips/ralink/clk.c ++++ b/arch/mips/ralink/clk.c +@@ -56,6 +56,12 @@ unsigned long clk_get_rate(struct clk *c + } + EXPORT_SYMBOL_GPL(clk_get_rate); + ++int clk_set_rate(struct clk *clk, unsigned long rate) ++{ ++ return -1; ++} ++EXPORT_SYMBOL_GPL(clk_set_rate); ++ + void __init plat_time_init(void) + { + struct clk *clk; diff --git a/target/linux/ramips/patches-3.10/0105-MIPS-ralink-workaround-DTB-memory-issue.patch b/target/linux/ramips/patches-3.10/0105-MIPS-ralink-workaround-DTB-memory-issue.patch deleted file mode 100644 index 662b356304..0000000000 --- a/target/linux/ramips/patches-3.10/0105-MIPS-ralink-workaround-DTB-memory-issue.patch +++ /dev/null @@ -1,24 +0,0 @@ -From 070a389ae536a75b9184784f625949c215c533b6 Mon Sep 17 00:00:00 2001 -From: John Crispin -Date: Thu, 23 May 2013 18:50:56 +0200 -Subject: [PATCH 09/33] MIPS: ralink: workaround DTB memory issue - -If the DTB is too big a bug happens on boot when init ram is freed. -This is a temporary fix until the real cause is found. - -Signed-off-by: John Crispin ---- - arch/mips/ralink/of.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/arch/mips/ralink/of.c -+++ b/arch/mips/ralink/of.c -@@ -74,7 +74,7 @@ void __init device_tree_init(void) - unflatten_device_tree(); - - /* free the space reserved for the dt blob */ -- free_bootmem(base, size); -+ //free_bootmem(base, size); - } - - void __init plat_mem_setup(void) diff --git a/target/linux/ramips/patches-3.10/0106-MIPS-ralink-add-support-for-MT7620n.patch b/target/linux/ramips/patches-3.10/0106-MIPS-ralink-add-support-for-MT7620n.patch new file mode 100644 index 0000000000..bb1ff7dc61 --- /dev/null +++ b/target/linux/ramips/patches-3.10/0106-MIPS-ralink-add-support-for-MT7620n.patch @@ -0,0 +1,66 @@ +From 45ba0675286e2a71f6a577833ab13b951bb7e31a Mon Sep 17 00:00:00 2001 +From: John Crispin +Date: Sun, 16 Mar 2014 04:40:02 +0000 +Subject: [PATCH 106/133] MIPS: ralink: add support for MT7620n + +This is the small version of MT7620a. + +Signed-off-by: John Crispin +--- + arch/mips/include/asm/mach-ralink/mt7620.h | 7 ++----- + arch/mips/ralink/mt7620.c | 19 ++++++++++++------- + 2 files changed, 14 insertions(+), 12 deletions(-) + +--- a/arch/mips/include/asm/mach-ralink/mt7620.h ++++ b/arch/mips/include/asm/mach-ralink/mt7620.h +@@ -24,11 +24,8 @@ + #define SYSC_REG_CPLL_CONFIG0 0x54 + #define SYSC_REG_CPLL_CONFIG1 0x58 + +-#define MT7620N_CHIP_NAME0 0x33365452 +-#define MT7620N_CHIP_NAME1 0x20203235 +- +-#define MT7620A_CHIP_NAME0 0x3637544d +-#define MT7620A_CHIP_NAME1 0x20203032 ++#define MT7620_CHIP_NAME0 0x3637544d ++#define MT7620_CHIP_NAME1 0x20203032 + + #define CHIP_REV_PKG_MASK 0x1 + #define CHIP_REV_PKG_SHIFT 16 +--- a/arch/mips/ralink/mt7620.c ++++ b/arch/mips/ralink/mt7620.c +@@ -226,22 +226,27 @@ void prom_soc_init(struct ralink_soc_inf + u32 cfg0; + u32 pmu0; + u32 pmu1; ++ u32 bga; + + n0 = __raw_readl(sysc + SYSC_REG_CHIP_NAME0); + n1 = __raw_readl(sysc + SYSC_REG_CHIP_NAME1); ++ rev = __raw_readl(sysc + SYSC_REG_CHIP_REV); ++ bga = (rev >> CHIP_REV_PKG_SHIFT) & CHIP_REV_PKG_MASK; + +- if (n0 == MT7620N_CHIP_NAME0 && n1 == MT7620N_CHIP_NAME1) { +- name = "MT7620N"; +- soc_info->compatible = "ralink,mt7620n-soc"; +- } else if (n0 == MT7620A_CHIP_NAME0 && n1 == MT7620A_CHIP_NAME1) { ++ if (n0 != MT7620_CHIP_NAME0 || n1 != MT7620_CHIP_NAME1) ++ panic("mt7620: unknown SoC, n0:%08x n1:%08x\n", n0, n1); ++ ++ if (bga) { + name = "MT7620A"; + soc_info->compatible = "ralink,mt7620a-soc"; + } else { +- panic("mt7620: unknown SoC, n0:%08x n1:%08x\n", n0, n1); ++ name = "MT7620N"; ++ soc_info->compatible = "ralink,mt7620n-soc"; ++#ifdef CONFIG_PCI ++ panic("mt7620n is only supported for non pci kernels"); ++#endif + } + +- rev = __raw_readl(sysc + SYSC_REG_CHIP_REV); +- + snprintf(soc_info->sys_type, RAMIPS_SYS_TYPE_LEN, + "Ralink %s ver:%u eco:%u", + name, diff --git a/target/linux/ramips/patches-3.10/0106-USB-dwc2.patch b/target/linux/ramips/patches-3.10/0106-USB-dwc2.patch deleted file mode 100644 index 90e8ee5e6b..0000000000 --- a/target/linux/ramips/patches-3.10/0106-USB-dwc2.patch +++ /dev/null @@ -1,19 +0,0 @@ ---- a/drivers/staging/dwc2/hcd.c -+++ b/drivers/staging/dwc2/hcd.c -@@ -47,6 +47,7 @@ - #include - #include - #include -+#include - - #include - #include -@@ -2712,6 +2713,8 @@ int dwc2_hcd_init(struct dwc2_hsotg *hso - - dev_dbg(hsotg->dev, "DWC OTG HCD INIT\n"); - -+ device_reset(hsotg->dev); -+ - /* - * Attempt to ensure this device is really a DWC_otg Controller. - * Read and verify the GSNPSID register contents. The value should be diff --git a/target/linux/ramips/patches-3.10/0107-MIPS-ralink-allow-manual-memory-override.patch b/target/linux/ramips/patches-3.10/0107-MIPS-ralink-allow-manual-memory-override.patch new file mode 100644 index 0000000000..4fb2b2be37 --- /dev/null +++ b/target/linux/ramips/patches-3.10/0107-MIPS-ralink-allow-manual-memory-override.patch @@ -0,0 +1,45 @@ +From ee46d05eefefb0fb40b5682b4f6f3876b496044b Mon Sep 17 00:00:00 2001 +From: John Crispin +Date: Sun, 16 Mar 2014 04:40:48 +0000 +Subject: [PATCH 107/133] MIPS: ralink: allow manual memory override + +RT5350 relies on the bootloader setting up the memc correctly. +On sme boards the setup is incorrect leading to 32 MB being available but only 16 being recognized. Allow these boards to manually override the memory range +. +Signed-off-by: John Crispin +--- + arch/mips/ralink/of.c | 16 +++++++++++++++- + 1 file changed, 15 insertions(+), 1 deletion(-) + +--- a/arch/mips/ralink/of.c ++++ b/arch/mips/ralink/of.c +@@ -77,6 +77,17 @@ void __init device_tree_init(void) + //free_bootmem(base, size); + } + ++static int memory_dtb; ++ ++static int __init early_init_dt_find_memory(unsigned long node, const char *uname, ++ int depth, void *data) ++{ ++ if (depth == 1 && !strcmp(uname, "memory@0")) ++ memory_dtb = 1; ++ ++ return 0; ++} ++ + void __init plat_mem_setup(void) + { + set_io_port_base(KSEG1); +@@ -87,7 +98,10 @@ void __init plat_mem_setup(void) + */ + __dt_setup_arch(&__dtb_start); + +- if (soc_info.mem_size) ++ of_scan_flat_dt(early_init_dt_find_memory, NULL); ++ if (memory_dtb) ++ of_scan_flat_dt(early_init_dt_scan_memory, NULL); ++ else if (soc_info.mem_size) + add_memory_region(soc_info.mem_base, soc_info.mem_size * SZ_1M, + BOOT_MEM_RAM); + else diff --git a/target/linux/ramips/patches-3.10/0107-PCI-MIPS-adds-rt2880-pci-support.patch b/target/linux/ramips/patches-3.10/0107-PCI-MIPS-adds-rt2880-pci-support.patch deleted file mode 100644 index 7136f15852..0000000000 --- a/target/linux/ramips/patches-3.10/0107-PCI-MIPS-adds-rt2880-pci-support.patch +++ /dev/null @@ -1,319 +0,0 @@ -From 5d57ace094803c95230643941a47d749ff81d022 Mon Sep 17 00:00:00 2001 -From: John Crispin -Date: Thu, 21 Mar 2013 18:27:29 +0100 -Subject: [PATCH 11/33] PCI: MIPS: adds rt2880 pci support - -Add support for the pci found on the rt2880 SoC. - -Signed-off-by: John Crispin ---- - arch/mips/pci/Makefile | 1 + - arch/mips/pci/pci-rt2880.c | 281 ++++++++++++++++++++++++++++++++++++++++++++ - arch/mips/ralink/Kconfig | 1 + - 3 files changed, 283 insertions(+) - create mode 100644 arch/mips/pci/pci-rt2880.c - ---- a/arch/mips/pci/Makefile -+++ b/arch/mips/pci/Makefile -@@ -41,6 +41,7 @@ obj-$(CONFIG_SIBYTE_BCM1x80) += pci-bcm1 - obj-$(CONFIG_SNI_RM) += fixup-sni.o ops-sni.o - obj-$(CONFIG_LANTIQ) += fixup-lantiq.o - obj-$(CONFIG_PCI_LANTIQ) += pci-lantiq.o ops-lantiq.o -+obj-$(CONFIG_SOC_RT2880) += pci-rt2880.o - obj-$(CONFIG_SOC_RT3883) += pci-rt3883.o - obj-$(CONFIG_TANBAC_TB0219) += fixup-tb0219.o - obj-$(CONFIG_TANBAC_TB0226) += fixup-tb0226.o ---- /dev/null -+++ b/arch/mips/pci/pci-rt2880.c -@@ -0,0 +1,281 @@ -+/* -+ * Ralink RT288x SoC PCI register definitions -+ * -+ * Copyright (C) 2009 John Crispin -+ * Copyright (C) 2009 Gabor Juhos -+ * -+ * Parts of this file are based on Ralink's 2.6.21 BSP -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+ -+#define RT2880_PCI_BASE 0x00440000 -+#define RT288X_CPU_IRQ_PCI 4 -+ -+#define RT2880_PCI_MEM_BASE 0x20000000 -+#define RT2880_PCI_MEM_SIZE 0x10000000 -+#define RT2880_PCI_IO_BASE 0x00460000 -+#define RT2880_PCI_IO_SIZE 0x00010000 -+ -+#define RT2880_PCI_REG_PCICFG_ADDR 0x00 -+#define RT2880_PCI_REG_PCIMSK_ADDR 0x0c -+#define RT2880_PCI_REG_BAR0SETUP_ADDR 0x10 -+#define RT2880_PCI_REG_IMBASEBAR0_ADDR 0x18 -+#define RT2880_PCI_REG_CONFIG_ADDR 0x20 -+#define RT2880_PCI_REG_CONFIG_DATA 0x24 -+#define RT2880_PCI_REG_MEMBASE 0x28 -+#define RT2880_PCI_REG_IOBASE 0x2c -+#define RT2880_PCI_REG_ID 0x30 -+#define RT2880_PCI_REG_CLASS 0x34 -+#define RT2880_PCI_REG_SUBID 0x38 -+#define RT2880_PCI_REG_ARBCTL 0x80 -+ -+static void __iomem *rt2880_pci_base; -+static DEFINE_SPINLOCK(rt2880_pci_lock); -+ -+static u32 rt2880_pci_reg_read(u32 reg) -+{ -+ return readl(rt2880_pci_base + reg); -+} -+ -+static void rt2880_pci_reg_write(u32 val, u32 reg) -+{ -+ writel(val, rt2880_pci_base + reg); -+} -+ -+static inline u32 rt2880_pci_get_cfgaddr(unsigned int bus, unsigned int slot, -+ unsigned int func, unsigned int where) -+{ -+ return ((bus << 16) | (slot << 11) | (func << 8) | (where & 0xfc) | -+ 0x80000000); -+} -+ -+static int rt2880_pci_config_read(struct pci_bus *bus, unsigned int devfn, -+ int where, int size, u32 *val) -+{ -+ unsigned long flags; -+ u32 address; -+ u32 data; -+ -+ address = rt2880_pci_get_cfgaddr(bus->number, PCI_SLOT(devfn), -+ PCI_FUNC(devfn), where); -+ -+ spin_lock_irqsave(&rt2880_pci_lock, flags); -+ rt2880_pci_reg_write(address, RT2880_PCI_REG_CONFIG_ADDR); -+ data = rt2880_pci_reg_read(RT2880_PCI_REG_CONFIG_DATA); -+ spin_unlock_irqrestore(&rt2880_pci_lock, flags); -+ -+ switch (size) { -+ case 1: -+ *val = (data >> ((where & 3) << 3)) & 0xff; -+ break; -+ case 2: -+ *val = (data >> ((where & 3) << 3)) & 0xffff; -+ break; -+ case 4: -+ *val = data; -+ break; -+ } -+ -+ return PCIBIOS_SUCCESSFUL; -+} -+ -+static int rt2880_pci_config_write(struct pci_bus *bus, unsigned int devfn, -+ int where, int size, u32 val) -+{ -+ unsigned long flags; -+ u32 address; -+ u32 data; -+ -+ address = rt2880_pci_get_cfgaddr(bus->number, PCI_SLOT(devfn), -+ PCI_FUNC(devfn), where); -+ -+ spin_lock_irqsave(&rt2880_pci_lock, flags); -+ rt2880_pci_reg_write(address, RT2880_PCI_REG_CONFIG_ADDR); -+ data = rt2880_pci_reg_read(RT2880_PCI_REG_CONFIG_DATA); -+ -+ switch (size) { -+ case 1: -+ data = (data & ~(0xff << ((where & 3) << 3))) | -+ (val << ((where & 3) << 3)); -+ break; -+ case 2: -+ data = (data & ~(0xffff << ((where & 3) << 3))) | -+ (val << ((where & 3) << 3)); -+ break; -+ case 4: -+ data = val; -+ break; -+ } -+ -+ rt2880_pci_reg_write(data, RT2880_PCI_REG_CONFIG_DATA); -+ spin_unlock_irqrestore(&rt2880_pci_lock, flags); -+ -+ return PCIBIOS_SUCCESSFUL; -+} -+ -+static struct pci_ops rt2880_pci_ops = { -+ .read = rt2880_pci_config_read, -+ .write = rt2880_pci_config_write, -+}; -+ -+static struct resource rt2880_pci_mem_resource = { -+ .name = "PCI MEM space", -+ .start = RT2880_PCI_MEM_BASE, -+ .end = RT2880_PCI_MEM_BASE + RT2880_PCI_MEM_SIZE - 1, -+ .flags = IORESOURCE_MEM, -+}; -+ -+static struct resource rt2880_pci_io_resource = { -+ .name = "PCI IO space", -+ .start = RT2880_PCI_IO_BASE, -+ .end = RT2880_PCI_IO_BASE + RT2880_PCI_IO_SIZE - 1, -+ .flags = IORESOURCE_IO, -+}; -+ -+static struct pci_controller rt2880_pci_controller = { -+ .pci_ops = &rt2880_pci_ops, -+ .mem_resource = &rt2880_pci_mem_resource, -+ .io_resource = &rt2880_pci_io_resource, -+}; -+ -+static inline u32 rt2880_pci_read_u32(unsigned long reg) -+{ -+ unsigned long flags; -+ u32 address; -+ u32 ret; -+ -+ address = rt2880_pci_get_cfgaddr(0, 0, 0, reg); -+ -+ spin_lock_irqsave(&rt2880_pci_lock, flags); -+ rt2880_pci_reg_write(address, RT2880_PCI_REG_CONFIG_ADDR); -+ ret = rt2880_pci_reg_read(RT2880_PCI_REG_CONFIG_DATA); -+ spin_unlock_irqrestore(&rt2880_pci_lock, flags); -+ -+ return ret; -+} -+ -+static inline void rt2880_pci_write_u32(unsigned long reg, u32 val) -+{ -+ unsigned long flags; -+ u32 address; -+ -+ address = rt2880_pci_get_cfgaddr(0, 0, 0, reg); -+ -+ spin_lock_irqsave(&rt2880_pci_lock, flags); -+ rt2880_pci_reg_write(address, RT2880_PCI_REG_CONFIG_ADDR); -+ rt2880_pci_reg_write(val, RT2880_PCI_REG_CONFIG_DATA); -+ spin_unlock_irqrestore(&rt2880_pci_lock, flags); -+} -+ -+int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) -+{ -+ u16 cmd; -+ int irq = -1; -+ -+ if (dev->bus->number != 0) -+ return irq; -+ -+ switch (PCI_SLOT(dev->devfn)) { -+ case 0x00: -+ rt2880_pci_write_u32(PCI_BASE_ADDRESS_0, 0x08000000); -+ (void) rt2880_pci_read_u32(PCI_BASE_ADDRESS_0); -+ break; -+ case 0x11: -+ irq = RT288X_CPU_IRQ_PCI; -+ break; -+ default: -+ printk("%s:%s[%d] trying to alloc unknown pci irq\n", -+ __FILE__, __func__, __LINE__); -+ BUG(); -+ break; -+ } -+ -+ pci_write_config_byte((struct pci_dev*)dev, PCI_CACHE_LINE_SIZE, 0x14); -+ pci_write_config_byte((struct pci_dev*)dev, PCI_LATENCY_TIMER, 0xFF); -+ pci_read_config_word((struct pci_dev*)dev, PCI_COMMAND, &cmd); -+ cmd |= PCI_COMMAND_MASTER | PCI_COMMAND_IO | PCI_COMMAND_MEMORY | -+ PCI_COMMAND_INVALIDATE | PCI_COMMAND_FAST_BACK | -+ PCI_COMMAND_SERR | PCI_COMMAND_WAIT | PCI_COMMAND_PARITY; -+ pci_write_config_word((struct pci_dev*)dev, PCI_COMMAND, cmd); -+ pci_write_config_byte((struct pci_dev*)dev, PCI_INTERRUPT_LINE, -+ dev->irq); -+ return irq; -+} -+ -+static int rt288x_pci_probe(struct platform_device *pdev) -+{ -+ void __iomem *io_map_base; -+ int i; -+ -+ rt2880_pci_base = ioremap_nocache(RT2880_PCI_BASE, PAGE_SIZE); -+ -+ io_map_base = ioremap(RT2880_PCI_IO_BASE, RT2880_PCI_IO_SIZE); -+ rt2880_pci_controller.io_map_base = (unsigned long) io_map_base; -+ set_io_port_base((unsigned long) io_map_base); -+ -+ ioport_resource.start = RT2880_PCI_IO_BASE; -+ ioport_resource.end = RT2880_PCI_IO_BASE + RT2880_PCI_IO_SIZE - 1; -+ -+ rt2880_pci_reg_write(0, RT2880_PCI_REG_PCICFG_ADDR); -+ for(i = 0; i < 0xfffff; i++) {} -+ -+ rt2880_pci_reg_write(0x79, RT2880_PCI_REG_ARBCTL); -+ rt2880_pci_reg_write(0x07FF0001, RT2880_PCI_REG_BAR0SETUP_ADDR); -+ rt2880_pci_reg_write(RT2880_PCI_MEM_BASE, RT2880_PCI_REG_MEMBASE); -+ rt2880_pci_reg_write(RT2880_PCI_IO_BASE, RT2880_PCI_REG_IOBASE); -+ rt2880_pci_reg_write(0x08000000, RT2880_PCI_REG_IMBASEBAR0_ADDR); -+ rt2880_pci_reg_write(0x08021814, RT2880_PCI_REG_ID); -+ rt2880_pci_reg_write(0x00800001, RT2880_PCI_REG_CLASS); -+ rt2880_pci_reg_write(0x28801814, RT2880_PCI_REG_SUBID); -+ rt2880_pci_reg_write(0x000c0000, RT2880_PCI_REG_PCIMSK_ADDR); -+ -+ rt2880_pci_write_u32(PCI_BASE_ADDRESS_0, 0x08000000); -+ (void) rt2880_pci_read_u32(PCI_BASE_ADDRESS_0); -+ -+ register_pci_controller(&rt2880_pci_controller); -+ return 0; -+} -+ -+int pcibios_plat_dev_init(struct pci_dev *dev) -+{ -+ return 0; -+} -+ -+static const struct of_device_id rt288x_pci_match[] = { -+ { .compatible = "ralink,rt288x-pci" }, -+ {}, -+}; -+MODULE_DEVICE_TABLE(of, rt288x_pci_match); -+ -+static struct platform_driver rt288x_pci_driver = { -+ .probe = rt288x_pci_probe, -+ .driver = { -+ .name = "rt288x-pci", -+ .owner = THIS_MODULE, -+ .of_match_table = rt288x_pci_match, -+ }, -+}; -+ -+int __init pcibios_init(void) -+{ -+ int ret = platform_driver_register(&rt288x_pci_driver); -+ if (ret) -+ pr_info("rt288x-pci: Error registering platform driver!"); -+ return ret; -+} -+ -+arch_initcall(pcibios_init); ---- a/arch/mips/ralink/Kconfig -+++ b/arch/mips/ralink/Kconfig -@@ -15,6 +15,7 @@ choice - - config SOC_RT288X - bool "RT288x" -+ select HW_HAS_PCI - - config SOC_RT305X - bool "RT305x" diff --git a/target/linux/ramips/patches-3.10/0108-MIPS-ralink-add-rt_sysc_m32-helper.patch b/target/linux/ramips/patches-3.10/0108-MIPS-ralink-add-rt_sysc_m32-helper.patch new file mode 100644 index 0000000000..6d9c0d3a5d --- /dev/null +++ b/target/linux/ramips/patches-3.10/0108-MIPS-ralink-add-rt_sysc_m32-helper.patch @@ -0,0 +1,26 @@ +From 1fe4d719d1c973c01f4b6a4c0de47bfac77e3eca Mon Sep 17 00:00:00 2001 +From: John Crispin +Date: Sun, 19 May 2013 00:42:23 +0200 +Subject: [PATCH 108/133] MIPS: ralink: add rt_sysc_m32 helper + +Signed-off-by: John Crispin +--- + arch/mips/include/asm/mach-ralink/ralink_regs.h | 7 +++++++ + 1 file changed, 7 insertions(+) + +--- a/arch/mips/include/asm/mach-ralink/ralink_regs.h ++++ b/arch/mips/include/asm/mach-ralink/ralink_regs.h +@@ -26,6 +26,13 @@ static inline u32 rt_sysc_r32(unsigned r + return __raw_readl(rt_sysc_membase + reg); + } + ++static inline void rt_sysc_m32(u32 clr, u32 set, unsigned reg) ++{ ++ u32 val = rt_sysc_r32(reg) & ~clr; ++ ++ __raw_writel(val | set, rt_sysc_membase + reg); ++} ++ + static inline void rt_memc_w32(u32 val, unsigned reg) + { + __raw_writel(val, rt_memc_membase + reg); diff --git a/target/linux/ramips/patches-3.10/0108-PCI-MIPS-adds-mt7620a-pcie-driver.patch b/target/linux/ramips/patches-3.10/0108-PCI-MIPS-adds-mt7620a-pcie-driver.patch deleted file mode 100644 index 2f883b302d..0000000000 --- a/target/linux/ramips/patches-3.10/0108-PCI-MIPS-adds-mt7620a-pcie-driver.patch +++ /dev/null @@ -1,399 +0,0 @@ -From ded577553b06a85c12a89b8fbcfa2b51f30bc037 Mon Sep 17 00:00:00 2001 -From: John Crispin -Date: Sat, 18 May 2013 22:06:15 +0200 -Subject: [PATCH 13/33] PCI: MIPS: adds mt7620a pcie driver - -Signed-off-by: John Crispin ---- - arch/mips/pci/Makefile | 1 + - arch/mips/pci/pci-mt7620a.c | 363 +++++++++++++++++++++++++++++++++++++++++++ - arch/mips/ralink/Kconfig | 1 + - 3 files changed, 365 insertions(+) - create mode 100644 arch/mips/pci/pci-mt7620a.c - ---- /dev/null -+++ b/arch/mips/pci/pci-mt7620a.c -@@ -0,0 +1,363 @@ -+/* -+ * Ralink MT7620A SoC PCI support -+ * -+ * Copyright (C) 2007-2013 Bruce Chang -+ * Copyright (C) 2013 John Crispin -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+ -+#define RALINK_PCI_MM_MAP_BASE 0x20000000 -+#define RALINK_PCI_IO_MAP_BASE 0x10160000 -+ -+#define RALINK_INT_PCIE0 4 -+#define RALINK_SYSTEM_CONTROL_BASE 0xb0000000 -+#define RALINK_SYSCFG1 0x14 -+#define RALINK_CLKCFG1 0x30 -+#define RALINK_GPIOMODE 0x60 -+#define RALINK_PCIE_CLK_GEN 0x7c -+#define RALINK_PCIE_CLK_GEN1 0x80 -+#define PCIEPHY0_CFG 0x90 -+#define PPLL_CFG1 0x9c -+#define PPLL_DRV 0xa0 -+#define RALINK_PCI_HOST_MODE_EN (1<<7) -+#define RALINK_PCIE_RC_MODE_EN (1<<8) -+#define RALINK_PCIE_RST (1<<23) -+#define RALINK_PCI_RST (1<<24) -+#define RALINK_PCI_CLK_EN (1<<19) -+#define RALINK_PCIE_CLK_EN (1<<21) -+#define PCI_SLOTx2 (1<<11) -+#define PCI_SLOTx1 (2<<11) -+#define PDRV_SW_SET (1<<31) -+#define LC_CKDRVPD_ (1<<19) -+ -+#define RALINK_PCI_CONFIG_ADDR 0x20 -+#define RALINK_PCI_CONFIG_DATA_VIRTUAL_REG 0x24 -+#define MEMORY_BASE 0x0 -+#define RALINK_PCIE0_RST (1<<26) -+#define RALINK_PCI_BASE 0xB0140000 -+#define RALINK_PCI_MEMBASE 0x28 -+#define RALINK_PCI_IOBASE 0x2C -+ -+#define RT6855_PCIE0_OFFSET 0x2000 -+ -+#define RALINK_PCI_PCICFG_ADDR 0x00 -+#define RALINK_PCI0_BAR0SETUP_ADDR 0x10 -+#define RALINK_PCI0_IMBASEBAR0_ADDR 0x18 -+#define RALINK_PCI0_ID 0x30 -+#define RALINK_PCI0_CLASS 0x34 -+#define RALINK_PCI0_SUBID 0x38 -+#define RALINK_PCI0_STATUS 0x50 -+#define RALINK_PCI_PCIMSK_ADDR 0x0C -+ -+#define RALINK_PCIE0_CLK_EN (1 << 26) -+ -+#define BUSY 0x80000000 -+#define WAITRETRY_MAX 10 -+#define WRITE_MODE (1UL << 23) -+#define DATA_SHIFT 0 -+#define ADDR_SHIFT 8 -+ -+ -+static void __iomem *bridge_base; -+static void __iomem *pcie_base; -+ -+static struct reset_control *rstpcie0; -+ -+static inline void bridge_w32(u32 val, unsigned reg) -+{ -+ iowrite32(val, bridge_base + reg); -+} -+ -+static inline u32 bridge_r32(unsigned reg) -+{ -+ return ioread32(bridge_base + reg); -+} -+ -+static inline void pcie_w32(u32 val, unsigned reg) -+{ -+ iowrite32(val, pcie_base + reg); -+} -+ -+static inline u32 pcie_r32(unsigned reg) -+{ -+ return ioread32(pcie_base + reg); -+} -+ -+static inline void pcie_m32(u32 clr, u32 set, unsigned reg) -+{ -+ u32 val = pcie_r32(reg); -+ val &= ~clr; -+ val |= set; -+ pcie_w32(val, reg); -+} -+ -+int wait_pciephy_busy(void) -+{ -+ unsigned long reg_value = 0x0, retry = 0; -+ -+ while (1) { -+ //reg_value = rareg(READMODE, PCIEPHY0_CFG, 0); -+ reg_value = pcie_r32(PCIEPHY0_CFG); -+ -+ if (reg_value & BUSY) -+ mdelay(100); -+ else -+ break; -+ if (retry++ > WAITRETRY_MAX){ -+ printk("PCIE-PHY retry failed.\n"); -+ return -1; -+ } -+ } -+ return 0; -+} -+ -+static void pcie_phy(unsigned long addr, unsigned long val) -+{ -+ wait_pciephy_busy(); -+ pcie_w32(WRITE_MODE | (val << DATA_SHIFT) | (addr << ADDR_SHIFT), PCIEPHY0_CFG); -+ mdelay(1); -+ wait_pciephy_busy(); -+} -+ -+static int pci_config_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 * val) -+{ -+ unsigned int slot = PCI_SLOT(devfn); -+ u8 func = PCI_FUNC(devfn); -+ u32 address; -+ u32 data; -+ -+ address = (((where & 0xF00) >> 8) << 24) | (bus->number << 16) | (slot << 11) | (func << 8) | (where & 0xfc) | 0x80000000; -+ bridge_w32(address, RALINK_PCI_CONFIG_ADDR); -+ data = bridge_r32(RALINK_PCI_CONFIG_DATA_VIRTUAL_REG); -+ -+ switch (size) { -+ case 1: -+ *val = (data >> ((where & 3) << 3)) & 0xff; -+ break; -+ case 2: -+ *val = (data >> ((where & 3) << 3)) & 0xffff; -+ break; -+ case 4: -+ *val = data; -+ break; -+ } -+ -+ return PCIBIOS_SUCCESSFUL; -+} -+ -+static int pci_config_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val) -+{ -+ unsigned int slot = PCI_SLOT(devfn); -+ u8 func = PCI_FUNC(devfn); -+ u32 address; -+ u32 data; -+ -+ address = (((where & 0xF00) >> 8) << 24) | (bus->number << 16) | (slot << 11) | (func << 8) | (where & 0xfc) | 0x80000000; -+ bridge_w32(address, RALINK_PCI_CONFIG_ADDR); -+ data = bridge_r32(RALINK_PCI_CONFIG_DATA_VIRTUAL_REG); -+ -+ switch (size) { -+ case 1: -+ data = (data & ~(0xff << ((where & 3) << 3))) | -+ (val << ((where & 3) << 3)); -+ break; -+ case 2: -+ data = (data & ~(0xffff << ((where & 3) << 3))) | -+ (val << ((where & 3) << 3)); -+ break; -+ case 4: -+ data = val; -+ break; -+ } -+ -+ bridge_w32(data, RALINK_PCI_CONFIG_DATA_VIRTUAL_REG); -+ -+ return PCIBIOS_SUCCESSFUL; -+} -+ -+struct pci_ops mt7620a_pci_ops= { -+ .read = pci_config_read, -+ .write = pci_config_write, -+}; -+ -+static struct resource mt7620a_res_pci_mem1 = { -+ .name = "pci memory", -+ .start = RALINK_PCI_MM_MAP_BASE, -+ .end = (u32) ((RALINK_PCI_MM_MAP_BASE + (unsigned char *)0x0fffffff)), -+ .flags = IORESOURCE_MEM, -+}; -+static struct resource mt7620a_res_pci_io1 = { -+ .name = "pci io", -+ .start = RALINK_PCI_IO_MAP_BASE, -+ .end = (u32) ((RALINK_PCI_IO_MAP_BASE + (unsigned char *)0x0ffff)), -+ .flags = IORESOURCE_IO, -+}; -+ -+struct pci_controller mt7620a_controller = { -+ .pci_ops = &mt7620a_pci_ops, -+ .mem_resource = &mt7620a_res_pci_mem1, -+ .io_resource = &mt7620a_res_pci_io1, -+ .mem_offset = 0x00000000UL, -+ .io_offset = 0x00000000UL, -+ .io_map_base = 0xa0000000, -+}; -+ -+static int mt7620a_pci_probe(struct platform_device *pdev) -+{ -+ struct resource *bridge_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); -+ struct resource *pcie_res = platform_get_resource(pdev, IORESOURCE_MEM, 1); -+ -+ rstpcie0 = devm_reset_control_get(&pdev->dev, "pcie0"); -+ if (IS_ERR(rstpcie0)) -+ return PTR_ERR(rstpcie0); -+ -+ bridge_base = devm_request_and_ioremap(&pdev->dev, bridge_res); -+ if (!bridge_base) -+ return -ENOMEM; -+ -+ pcie_base = devm_request_and_ioremap(&pdev->dev, pcie_res); -+ if (!pcie_base) -+ return -ENOMEM; -+ -+ iomem_resource.start = 0; -+ iomem_resource.end= ~0; -+ ioport_resource.start= 0; -+ ioport_resource.end = ~0; -+ -+ /* PCIE: bypass PCIe DLL */ -+ pcie_phy(0x0, 0x80); -+ pcie_phy(0x1, 0x04); -+ /* PCIE: Elastic buffer control */ -+ pcie_phy(0x68, 0xB4); -+ -+ reset_control_assert(rstpcie0); -+ rt_sysc_m32(RALINK_PCIE0_CLK_EN, 0, RALINK_CLKCFG1); -+ rt_sysc_m32(1<<19, 1<<31, PPLL_DRV); -+ rt_sysc_m32(0x3 << 16, 0, RALINK_GPIOMODE); -+ -+ reset_control_deassert(rstpcie0); -+ rt_sysc_m32(0, RALINK_PCIE0_CLK_EN, RALINK_CLKCFG1); -+ -+ mdelay(100); -+ -+ if (!(rt_sysc_r32(PPLL_CFG1) & 1<<23)) { -+ printk("MT7620 PPLL unlock\n"); -+ reset_control_assert(rstpcie0); -+ rt_sysc_m32(BIT(26), 0, RALINK_CLKCFG1); -+ return 0; -+ } -+ rt_sysc_m32((0x1<<18) | (0x1<<17), (0x1 << 19) | (0x1 << 31), PPLL_DRV); -+ -+ mdelay(100); -+ reset_control_assert(rstpcie0); -+ rt_sysc_m32(0x30, 2 << 4, RALINK_SYSCFG1); -+ -+ rt_sysc_m32(~0x7fffffff, 0x80000000, RALINK_PCIE_CLK_GEN); -+ rt_sysc_m32(~0x80ffffff, 0xa << 24, RALINK_PCIE_CLK_GEN1); -+ -+ mdelay(50); -+ reset_control_deassert(rstpcie0); -+ pcie_m32(BIT(1), 0, RALINK_PCI_PCICFG_ADDR); -+ mdelay(100); -+ -+ if (( pcie_r32(RALINK_PCI0_STATUS) & 0x1) == 0) { -+ reset_control_assert(rstpcie0); -+ rt_sysc_m32(RALINK_PCIE0_CLK_EN, 0, RALINK_CLKCFG1); -+ rt_sysc_m32(LC_CKDRVPD_, PDRV_SW_SET, PPLL_DRV); -+ printk("PCIE0 no card, disable it(RST&CLK)\n"); -+ } -+ -+ bridge_w32(0xffffffff, RALINK_PCI_MEMBASE); -+ bridge_w32(RALINK_PCI_IO_MAP_BASE, RALINK_PCI_IOBASE); -+ -+ pcie_w32(0x7FFF0000, RALINK_PCI0_BAR0SETUP_ADDR); -+ pcie_w32(MEMORY_BASE, RALINK_PCI0_IMBASEBAR0_ADDR); -+ pcie_w32(0x08021814, RALINK_PCI0_ID); -+ pcie_w32(0x06040001, RALINK_PCI0_CLASS); -+ pcie_w32(0x28801814, RALINK_PCI0_SUBID); -+ pcie_m32(0, BIT(20), RALINK_PCI_PCIMSK_ADDR); -+ -+ register_pci_controller(&mt7620a_controller); -+ -+ return 0; -+} -+ -+int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) -+{ -+ const struct resource *res; -+ u16 cmd; -+ u32 val; -+ int i, irq = 0; -+ -+ if ((dev->bus->number == 0) && (slot == 0)) { -+ pcie_w32(0x7FFF0001, RALINK_PCI0_BAR0SETUP_ADDR); //open 7FFF:2G; ENABLE -+ pci_config_write(dev->bus, 0, PCI_BASE_ADDRESS_0, 4, MEMORY_BASE); -+ pci_config_read(dev->bus, 0, PCI_BASE_ADDRESS_0, 4, &val); -+ } else if ((dev->bus->number == 1) && (slot == 0x0)) { -+ irq = RALINK_INT_PCIE0; -+ } else { -+ printk("bus=0x%x, slot = 0x%x\n", dev->bus->number, slot); -+ return 0; -+ } -+ -+ for (i = 0; i < 6; i++) { -+ res = &dev->resource[i]; -+ } -+ -+ pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, 0x14); //configure cache line size 0x14 -+ pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0xFF); //configure latency timer 0x10 -+ pci_read_config_word(dev, PCI_COMMAND, &cmd); -+ -+ // FIXME -+ cmd = cmd | PCI_COMMAND_MASTER | PCI_COMMAND_IO | PCI_COMMAND_MEMORY; -+ pci_write_config_word(dev, PCI_COMMAND, cmd); -+ pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq); -+ //pci_write_config_byte(dev, PCI_INTERRUPT_PIN, dev->irq); -+ -+ return irq; -+} -+ -+int pcibios_plat_dev_init(struct pci_dev *dev) -+{ -+ return 0; -+} -+ -+static const struct of_device_id mt7620a_pci_ids[] = { -+ { .compatible = "ralink,mt7620a-pci" }, -+ {}, -+}; -+MODULE_DEVICE_TABLE(of, mt7620a_pci_ids); -+ -+static struct platform_driver mt7620a_pci_driver = { -+ .probe = mt7620a_pci_probe, -+ .driver = { -+ .name = "mt7620a-pci", -+ .owner = THIS_MODULE, -+ .of_match_table = of_match_ptr(mt7620a_pci_ids), -+ }, -+}; -+ -+static int __init mt7620a_pci_init(void) -+{ -+ return platform_driver_register(&mt7620a_pci_driver); -+} -+ -+arch_initcall(mt7620a_pci_init); ---- a/arch/mips/ralink/Kconfig -+++ b/arch/mips/ralink/Kconfig -@@ -33,6 +33,7 @@ choice - bool "MT7620" - select USB_ARCH_HAS_OHCI - select USB_ARCH_HAS_EHCI -+ select HW_HAS_PCI - - endchoice - ---- a/arch/mips/pci/Makefile -+++ b/arch/mips/pci/Makefile -@@ -41,6 +41,7 @@ obj-$(CONFIG_SIBYTE_BCM1x80) += pci-bcm1 - obj-$(CONFIG_SNI_RM) += fixup-sni.o ops-sni.o - obj-$(CONFIG_LANTIQ) += fixup-lantiq.o - obj-$(CONFIG_PCI_LANTIQ) += pci-lantiq.o ops-lantiq.o -+obj-$(CONFIG_SOC_MT7620) += pci-mt7620a.o - obj-$(CONFIG_SOC_RT2880) += pci-rt2880.o - obj-$(CONFIG_SOC_RT3883) += pci-rt3883.o - obj-$(CONFIG_TANBAC_TB0219) += fixup-tb0219.o diff --git a/target/linux/ramips/patches-3.10/0109-MIPS-ralink-add-pseudo-pwm-led-trigger-based-on-time.patch b/target/linux/ramips/patches-3.10/0109-MIPS-ralink-add-pseudo-pwm-led-trigger-based-on-time.patch new file mode 100644 index 0000000000..193374e91c --- /dev/null +++ b/target/linux/ramips/patches-3.10/0109-MIPS-ralink-add-pseudo-pwm-led-trigger-based-on-time.patch @@ -0,0 +1,301 @@ +From ca21f813087ca5a8b02ec00efcd9c3f3fbf3bc1f Mon Sep 17 00:00:00 2001 +From: John Crispin +Date: Sun, 24 Mar 2013 17:17:17 +0100 +Subject: [PATCH 109/133] MIPS: ralink: add pseudo pwm led trigger based on + timer0 + +Signed-off-by: John Crispin +--- + arch/mips/ralink/timer.c | 213 ++++++++++++++++++++++++++++++++++++++++++---- + 1 file changed, 197 insertions(+), 16 deletions(-) + +--- a/arch/mips/ralink/timer.c ++++ b/arch/mips/ralink/timer.c +@@ -12,6 +12,8 @@ + #include + #include + #include ++#include ++#include + + #include + +@@ -23,16 +25,34 @@ + + #define TMR0CTL_ENABLE BIT(7) + #define TMR0CTL_MODE_PERIODIC BIT(4) +-#define TMR0CTL_PRESCALER 1 ++#define TMR0CTL_PRESCALER 2 + #define TMR0CTL_PRESCALE_VAL (0xf - TMR0CTL_PRESCALER) + #define TMR0CTL_PRESCALE_DIV (65536 / BIT(TMR0CTL_PRESCALER)) + ++struct rt_timer_gpio { ++ struct list_head list; ++ struct led_classdev *led; ++}; ++ + struct rt_timer { +- struct device *dev; +- void __iomem *membase; +- int irq; +- unsigned long timer_freq; +- unsigned long timer_div; ++ struct device *dev; ++ void __iomem *membase; ++ int irq; ++ ++ unsigned long timer_freq; ++ unsigned long timer_div; ++ ++ struct list_head gpios; ++ struct led_trigger led_trigger; ++ unsigned int duty_cycle; ++ unsigned int duty; ++ ++ unsigned int fade; ++ unsigned int fade_min; ++ unsigned int fade_max; ++ unsigned int fade_speed; ++ unsigned int fade_dir; ++ unsigned int fade_count; + }; + + static inline void rt_timer_w32(struct rt_timer *rt, u8 reg, u32 val) +@@ -48,18 +68,46 @@ static inline u32 rt_timer_r32(struct rt + static irqreturn_t rt_timer_irq(int irq, void *_rt) + { + struct rt_timer *rt = (struct rt_timer *) _rt; ++ struct rt_timer_gpio *gpio; ++ unsigned int val; + +- rt_timer_w32(rt, TIMER_REG_TMR0LOAD, rt->timer_freq / rt->timer_div); ++ if (rt->fade && (rt->fade_count++ > rt->fade_speed)) { ++ rt->fade_count = 0; ++ if (rt->duty_cycle <= rt->fade_min) ++ rt->fade_dir = 1; ++ else if (rt->duty_cycle >= rt->fade_max) ++ rt->fade_dir = 0; ++ ++ if (rt->fade_dir) ++ rt->duty_cycle += 1; ++ else ++ rt->duty_cycle -= 1; ++ ++ } ++ ++ val = rt->timer_freq / rt->timer_div; ++ if (rt->duty) ++ val *= rt->duty_cycle; ++ else ++ val *= (100 - rt->duty_cycle); ++ val /= 100; ++ ++ if (!list_empty(&rt->gpios)) ++ list_for_each_entry(gpio, &rt->gpios, list) ++ led_set_brightness(gpio->led, !!rt->duty); ++ ++ rt->duty = !rt->duty; ++ ++ rt_timer_w32(rt, TIMER_REG_TMR0LOAD, val + 1); + rt_timer_w32(rt, TIMER_REG_TMRSTAT, TMRSTAT_TMR0INT); + + return IRQ_HANDLED; + } + +- + static int rt_timer_request(struct rt_timer *rt) + { +- int err = request_irq(rt->irq, rt_timer_irq, IRQF_DISABLED, +- dev_name(rt->dev), rt); ++ int err = devm_request_irq(rt->dev, rt->irq, rt_timer_irq, ++ IRQF_DISABLED, dev_name(rt->dev), rt); + if (err) { + dev_err(rt->dev, "failed to request irq\n"); + } else { +@@ -81,8 +129,6 @@ static int rt_timer_config(struct rt_tim + else + rt->timer_div = divisor; + +- rt_timer_w32(rt, TIMER_REG_TMR0LOAD, rt->timer_freq / rt->timer_div); +- + return 0; + } + +@@ -108,11 +154,128 @@ static void rt_timer_disable(struct rt_t + rt_timer_w32(rt, TIMER_REG_TMR0CTL, t); + } + ++static ssize_t led_fade_show(struct device *dev, ++ struct device_attribute *attr, char *buf) ++{ ++ struct led_classdev *led_cdev = dev_get_drvdata(dev); ++ struct rt_timer *rt = container_of(led_cdev->trigger, struct rt_timer, led_trigger); ++ ++ return sprintf(buf, "speed: %d, min: %d, max: %d\n", rt->fade_speed, rt->fade_min, rt->fade_max); ++} ++ ++static ssize_t led_fade_store(struct device *dev, ++ struct device_attribute *attr, const char *buf, size_t size) ++{ ++ struct led_classdev *led_cdev = dev_get_drvdata(dev); ++ struct rt_timer *rt = container_of(led_cdev->trigger, struct rt_timer, led_trigger); ++ unsigned int speed = 0, min = 0, max = 0; ++ ssize_t ret = -EINVAL; ++ ++ ret = sscanf(buf, "%u %u %u", &speed, &min, &max); ++ ++ if (ret == 3) { ++ rt->fade_speed = speed; ++ rt->fade_min = min; ++ rt->fade_max = max; ++ rt->fade = 1; ++ } else { ++ rt->fade = 0; ++ } ++ ++ return size; ++} ++ ++static DEVICE_ATTR(fade, 0644, led_fade_show, led_fade_store); ++ ++static ssize_t led_duty_cycle_show(struct device *dev, ++ struct device_attribute *attr, char *buf) ++{ ++ struct led_classdev *led_cdev = dev_get_drvdata(dev); ++ struct rt_timer *rt = container_of(led_cdev->trigger, struct rt_timer, led_trigger); ++ ++ return sprintf(buf, "%u\n", rt->duty_cycle); ++} ++ ++static ssize_t led_duty_cycle_store(struct device *dev, ++ struct device_attribute *attr, const char *buf, size_t size) ++{ ++ struct led_classdev *led_cdev = dev_get_drvdata(dev); ++ struct rt_timer *rt = container_of(led_cdev->trigger, struct rt_timer, led_trigger); ++ unsigned long state; ++ ssize_t ret = -EINVAL; ++ ++ ret = kstrtoul(buf, 10, &state); ++ if (ret) ++ return ret; ++ ++ if (state <= 100) ++ rt->duty_cycle = state; ++ else ++ rt->duty_cycle = 100; ++ ++ rt->fade = 0; ++ ++ return size; ++} ++ ++static DEVICE_ATTR(duty_cycle, 0644, led_duty_cycle_show, led_duty_cycle_store); ++ ++static void rt_timer_trig_activate(struct led_classdev *led_cdev) ++{ ++ struct rt_timer *rt = container_of(led_cdev->trigger, struct rt_timer, led_trigger); ++ struct rt_timer_gpio *gpio_data; ++ int rc; ++ ++ led_cdev->trigger_data = NULL; ++ gpio_data = kzalloc(sizeof(*gpio_data), GFP_KERNEL); ++ if (!gpio_data) ++ return; ++ ++ rc = device_create_file(led_cdev->dev, &dev_attr_duty_cycle); ++ if (rc) ++ goto err_gpio; ++ rc = device_create_file(led_cdev->dev, &dev_attr_fade); ++ if (rc) ++ goto err_out_duty_cycle; ++ ++ led_cdev->activated = true; ++ led_cdev->trigger_data = gpio_data; ++ gpio_data->led = led_cdev; ++ list_add(&gpio_data->list, &rt->gpios); ++ led_cdev->trigger_data = gpio_data; ++ rt_timer_enable(rt); ++ return; ++ ++err_out_duty_cycle: ++ device_remove_file(led_cdev->dev, &dev_attr_duty_cycle); ++ ++err_gpio: ++ kfree(gpio_data); ++} ++ ++static void rt_timer_trig_deactivate(struct led_classdev *led_cdev) ++{ ++ struct rt_timer *rt = container_of(led_cdev->trigger, struct rt_timer, led_trigger); ++ struct rt_timer_gpio *gpio_data = (struct rt_timer_gpio*) led_cdev->trigger_data; ++ ++ if (led_cdev->activated) { ++ device_remove_file(led_cdev->dev, &dev_attr_duty_cycle); ++ device_remove_file(led_cdev->dev, &dev_attr_fade); ++ led_cdev->activated = false; ++ } ++ ++ list_del(&gpio_data->list); ++ rt_timer_disable(rt); ++ led_set_brightness(led_cdev, LED_OFF); ++} ++ + static int rt_timer_probe(struct platform_device *pdev) + { + struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ const __be32 *divisor; + struct rt_timer *rt; + struct clk *clk; ++ int ret; + + rt = devm_kzalloc(&pdev->dev, sizeof(*rt), GFP_KERNEL); + if (!rt) { +@@ -140,12 +303,29 @@ static int rt_timer_probe(struct platfor + if (!rt->timer_freq) + return -EINVAL; + ++ rt->duty_cycle = 100; + rt->dev = &pdev->dev; + platform_set_drvdata(pdev, rt); + +- rt_timer_request(rt); +- rt_timer_config(rt, 2); +- rt_timer_enable(rt); ++ ret = rt_timer_request(rt); ++ if (ret) ++ return ret; ++ ++ divisor = of_get_property(pdev->dev.of_node, "ralink,divisor", NULL); ++ if (divisor) ++ rt_timer_config(rt, be32_to_cpu(*divisor)); ++ else ++ rt_timer_config(rt, 200); ++ ++ rt->led_trigger.name = "pwmtimer", ++ rt->led_trigger.activate = rt_timer_trig_activate, ++ rt->led_trigger.deactivate = rt_timer_trig_deactivate, ++ ++ ret = led_trigger_register(&rt->led_trigger); ++ if (ret) ++ return ret; ++ ++ INIT_LIST_HEAD(&rt->gpios); + + dev_info(&pdev->dev, "maximum frequncy is %luHz\n", rt->timer_freq); + +@@ -156,6 +336,7 @@ static int rt_timer_remove(struct platfo + { + struct rt_timer *rt = platform_get_drvdata(pdev); + ++ led_trigger_unregister(&rt->led_trigger); + rt_timer_disable(rt); + rt_timer_free(rt); + +@@ -180,6 +361,6 @@ static struct platform_driver rt_timer_d + + module_platform_driver(rt_timer_driver); + +-MODULE_DESCRIPTION("Ralink RT2880 timer"); ++MODULE_DESCRIPTION("Ralink RT2880 timer / pseudo pwm"); + MODULE_AUTHOR("John Crispin -Date: Sat, 11 May 2013 23:40:19 +0200 -Subject: [PATCH 14/33] NET: multi phy support - -Signed-off-by: John Crispin ---- - drivers/net/phy/phy.c | 9 ++++++--- - include/linux/phy.h | 2 +- - 2 files changed, 7 insertions(+), 4 deletions(-) - ---- a/drivers/net/phy/phy.c -+++ b/drivers/net/phy/phy.c -@@ -820,7 +820,8 @@ void phy_state_machine(struct work_struc - * negotiation for now */ - if (!phydev->link) { - phydev->state = PHY_NOLINK; -- netif_carrier_off(phydev->attached_dev); -+ if (!phydev->no_auto_carrier_off) -+ netif_carrier_off(phydev->attached_dev); - phydev->adjust_link(phydev->attached_dev); - break; - } -@@ -890,7 +891,8 @@ void phy_state_machine(struct work_struc - netif_carrier_on(phydev->attached_dev); - } else { - phydev->state = PHY_NOLINK; -- netif_carrier_off(phydev->attached_dev); -+ if (!phydev->no_auto_carrier_off) -+ netif_carrier_off(phydev->attached_dev); - } - - phydev->adjust_link(phydev->attached_dev); -@@ -902,7 +904,8 @@ void phy_state_machine(struct work_struc - case PHY_HALTED: - if (phydev->link) { - phydev->link = 0; -- netif_carrier_off(phydev->attached_dev); -+ if (!phydev->no_auto_carrier_off) -+ netif_carrier_off(phydev->attached_dev); - phydev->adjust_link(phydev->attached_dev); - } - break; ---- a/include/linux/phy.h -+++ b/include/linux/phy.h -@@ -298,7 +298,7 @@ struct phy_device { - - struct phy_c45_device_ids c45_ids; - bool is_c45; -- -+ bool no_auto_carrier_off; - enum phy_state state; - - u32 dev_flags; diff --git a/target/linux/ramips/patches-3.10/0110-MIPS-ralink-add-a-helper-for-reading-the-ECO-version.patch b/target/linux/ramips/patches-3.10/0110-MIPS-ralink-add-a-helper-for-reading-the-ECO-version.patch new file mode 100644 index 0000000000..7a646400c4 --- /dev/null +++ b/target/linux/ramips/patches-3.10/0110-MIPS-ralink-add-a-helper-for-reading-the-ECO-version.patch @@ -0,0 +1,23 @@ +From f57edea9db0f7f437bc4f2ae408f6dd8bfbb9062 Mon Sep 17 00:00:00 2001 +From: John Crispin +Date: Sun, 16 Mar 2014 04:53:02 +0000 +Subject: [PATCH 110/133] MIPS: ralink: add a helper for reading the ECO + version + +Signed-off-by: John Crispin +--- + arch/mips/include/asm/mach-ralink/mt7620.h | 5 +++++ + 1 file changed, 5 insertions(+) + +--- a/arch/mips/include/asm/mach-ralink/mt7620.h ++++ b/arch/mips/include/asm/mach-ralink/mt7620.h +@@ -79,4 +79,9 @@ + #define MT7620_GPIO_MODE_EPHY BIT(15) + #define MT7620_GPIO_MODE_WDT BIT(22) + ++static inline int mt7620_get_eco(void) ++{ ++ return rt_sysc_r32(SYSC_REG_CHIP_REV) & CHIP_REV_ECO_MASK; ++} ++ + #endif diff --git a/target/linux/ramips/patches-3.10/0110-NET-add-of_get_mac_address_mtd.patch b/target/linux/ramips/patches-3.10/0110-NET-add-of_get_mac_address_mtd.patch deleted file mode 100644 index 408326d40b..0000000000 --- a/target/linux/ramips/patches-3.10/0110-NET-add-of_get_mac_address_mtd.patch +++ /dev/null @@ -1,76 +0,0 @@ -From 2a41724b2d0af9b4444572c4302570a3af377715 Mon Sep 17 00:00:00 2001 -From: John Crispin -Date: Sun, 14 Jul 2013 23:26:15 +0200 -Subject: [PATCH 15/33] NET: add of_get_mac_address_mtd() - -Many embedded devices have information such as mac addresses stored inside mtd -devices. This patch allows us to add a property inside a node describing a -network interface. The new property points at a mtd partition with an offset -where the mac address can be found. - -Signed-off-by: John Crispin ---- - drivers/of/of_net.c | 37 +++++++++++++++++++++++++++++++++++++ - include/linux/of_net.h | 1 + - 2 files changed, 38 insertions(+) - ---- a/drivers/of/of_net.c -+++ b/drivers/of/of_net.c -@@ -10,6 +10,7 @@ - #include - #include - #include -+#include - - /** - * It maps 'enum phy_interface_t' found in include/linux/phy.h -@@ -92,3 +93,39 @@ const void *of_get_mac_address(struct de - return NULL; - } - EXPORT_SYMBOL(of_get_mac_address); -+ -+int of_get_mac_address_mtd(struct device_node *np, void *mac) -+{ -+ struct device_node *mtd_np = NULL; -+ size_t retlen; -+ int size, ret; -+ struct mtd_info *mtd; -+ const char *part; -+ const __be32 *list; -+ phandle phandle; -+ -+ list = of_get_property(np, "mtd-mac-address", &size); -+ if (!list || (size != (2 * sizeof(*list)))) -+ return -ENOENT; -+ -+ phandle = be32_to_cpup(list++); -+ if (phandle) -+ mtd_np = of_find_node_by_phandle(phandle); -+ -+ if (!mtd_np) -+ return -ENOENT; -+ -+ part = of_get_property(mtd_np, "label", NULL); -+ if (!part) -+ part = mtd_np->name; -+ -+ mtd = get_mtd_device_nm(part); -+ if (IS_ERR(mtd)) -+ return PTR_ERR(mtd); -+ -+ ret = mtd_read(mtd, be32_to_cpup(list), 6, &retlen, (u_char *) mac); -+ put_mtd_device(mtd); -+ -+ return ret; -+} -+EXPORT_SYMBOL_GPL(of_get_mac_address_mtd); ---- a/include/linux/of_net.h -+++ b/include/linux/of_net.h -@@ -11,6 +11,7 @@ - #include - extern const int of_get_phy_mode(struct device_node *np); - extern const void *of_get_mac_address(struct device_node *np); -+extern int of_get_mac_address_mtd(struct device_node *np, void *mac); - #else - static inline const int of_get_phy_mode(struct device_node *np) - { diff --git a/target/linux/ramips/patches-3.10/0111-DMA-ralink-add-rt2880-dma-engine.patch b/target/linux/ramips/patches-3.10/0111-DMA-ralink-add-rt2880-dma-engine.patch new file mode 100644 index 0000000000..2aafd00d19 --- /dev/null +++ b/target/linux/ramips/patches-3.10/0111-DMA-ralink-add-rt2880-dma-engine.patch @@ -0,0 +1,662 @@ +From 2d7e32d4825e20e9db4f0dff6b3e3c25c8c7ad7d Mon Sep 17 00:00:00 2001 +From: John Crispin +Date: Tue, 3 Dec 2013 17:05:05 +0100 +Subject: [PATCH 111/133] DMA: ralink: add rt2880 dma engine + +Signed-off-by: John Crispin +--- + drivers/dma/Kconfig | 6 + + drivers/dma/Makefile | 1 + + drivers/dma/dmaengine.c | 26 ++ + drivers/dma/ralink-gdma.c | 577 +++++++++++++++++++++++++++++++++++++++++++++ + include/linux/dmaengine.h | 1 + + 5 files changed, 611 insertions(+) + create mode 100644 drivers/dma/ralink-gdma.c + +--- a/drivers/dma/Kconfig ++++ b/drivers/dma/Kconfig +@@ -312,6 +312,12 @@ config MMP_PDMA + help + Support the MMP PDMA engine for PXA and MMP platfrom. + ++config DMA_RALINK ++ tristate "RALINK DMA support" ++ depends on RALINK && SOC_MT7620 ++ select DMA_ENGINE ++ select DMA_VIRTUAL_CHANNELS ++ + config DMA_ENGINE + bool + +--- a/drivers/dma/Makefile ++++ b/drivers/dma/Makefile +@@ -38,3 +38,4 @@ obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o + obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o + obj-$(CONFIG_DMA_OMAP) += omap-dma.o + obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o ++obj-$(CONFIG_DMA_RALINK) += ralink-gdma.o +--- a/drivers/dma/dmaengine.c ++++ b/drivers/dma/dmaengine.c +@@ -504,6 +504,32 @@ static struct dma_chan *private_candidat + } + + /** ++ * dma_request_slave_channel - try to get specific channel exclusively ++ * @chan: target channel ++ */ ++struct dma_chan *dma_get_slave_channel(struct dma_chan *chan) ++{ ++ int err = -EBUSY; ++ ++ /* lock against __dma_request_channel */ ++ mutex_lock(&dma_list_mutex); ++ ++ if (chan->client_count == 0) { ++ err = dma_chan_get(chan); ++ if (err) ++ pr_debug("%s: failed to get %s: (%d)\n", ++ __func__, dma_chan_name(chan), err); ++ } else ++ chan = NULL; ++ ++ mutex_unlock(&dma_list_mutex); ++ ++ return chan; ++} ++EXPORT_SYMBOL_GPL(dma_get_slave_channel); ++ ++ ++/** + * dma_request_channel - try to allocate an exclusive channel + * @mask: capabilities that the channel must satisfy + * @fn: optional callback to disposition available channels +--- /dev/null ++++ b/drivers/dma/ralink-gdma.c +@@ -0,0 +1,577 @@ ++/* ++ * Copyright (C) 2013, Lars-Peter Clausen ++ * GDMA4740 DMAC support ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2 of the License, or (at your ++ * option) any later version. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, write to the Free Software Foundation, Inc., ++ * 675 Mass Ave, Cambridge, MA 02139, USA. ++ * ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "virt-dma.h" ++ ++#define GDMA_NR_CHANS 16 ++ ++#define GDMA_REG_SRC_ADDR(x) (0x00 + (x) * 0x10) ++#define GDMA_REG_DST_ADDR(x) (0x04 + (x) * 0x10) ++ ++#define GDMA_REG_CTRL0(x) (0x08 + (x) * 0x10) ++#define GDMA_REG_CTRL0_TX_MASK 0xffff ++#define GDMA_REG_CTRL0_TX_SHIFT 16 ++#define GDMA_REG_CTRL0_CURR_MASK 0xff ++#define GDMA_REG_CTRL0_CURR_SHIFT 8 ++#define GDMA_REG_CTRL0_SRC_ADDR_FIXED BIT(7) ++#define GDMA_REG_CTRL0_DST_ADDR_FIXED BIT(6) ++#define GDMA_REG_CTRL0_BURST_MASK 0x7 ++#define GDMA_REG_CTRL0_BURST_SHIFT 3 ++#define GDMA_REG_CTRL0_DONE_INT BIT(2) ++#define GDMA_REG_CTRL0_ENABLE BIT(1) ++#define GDMA_REG_CTRL0_HW_MODE 0 ++ ++#define GDMA_REG_CTRL1(x) (0x0c + (x) * 0x10) ++#define GDMA_REG_CTRL1_SEG_MASK 0xf ++#define GDMA_REG_CTRL1_SEG_SHIFT 22 ++#define GDMA_REG_CTRL1_REQ_MASK 0x3f ++#define GDMA_REG_CTRL1_SRC_REQ_SHIFT 16 ++#define GDMA_REG_CTRL1_DST_REQ_SHIFT 8 ++#define GDMA_REG_CTRL1_CONTINOUS BIT(14) ++#define GDMA_REG_CTRL1_NEXT_MASK 0x1f ++#define GDMA_REG_CTRL1_NEXT_SHIFT 3 ++#define GDMA_REG_CTRL1_COHERENT BIT(2) ++#define GDMA_REG_CTRL1_FAIL BIT(1) ++#define GDMA_REG_CTRL1_MASK BIT(0) ++ ++#define GDMA_REG_UNMASK_INT 0x200 ++#define GDMA_REG_DONE_INT 0x204 ++ ++#define GDMA_REG_GCT 0x220 ++#define GDMA_REG_GCT_CHAN_MASK 0x3 ++#define GDMA_REG_GCT_CHAN_SHIFT 3 ++#define GDMA_REG_GCT_VER_MASK 0x3 ++#define GDMA_REG_GCT_VER_SHIFT 1 ++#define GDMA_REG_GCT_ARBIT_RR BIT(0) ++ ++enum gdma_dma_transfer_size { ++ GDMA_TRANSFER_SIZE_4BYTE = 0, ++ GDMA_TRANSFER_SIZE_8BYTE = 1, ++ GDMA_TRANSFER_SIZE_16BYTE = 2, ++ GDMA_TRANSFER_SIZE_32BYTE = 3, ++}; ++ ++struct gdma_dma_sg { ++ dma_addr_t addr; ++ unsigned int len; ++}; ++ ++struct gdma_dma_desc { ++ struct virt_dma_desc vdesc; ++ ++ enum dma_transfer_direction direction; ++ bool cyclic; ++ ++ unsigned int num_sgs; ++ struct gdma_dma_sg sg[]; ++}; ++ ++struct gdma_dmaengine_chan { ++ struct virt_dma_chan vchan; ++ unsigned int id; ++ ++ dma_addr_t fifo_addr; ++ unsigned int transfer_shift; ++ ++ struct gdma_dma_desc *desc; ++ unsigned int next_sg; ++}; ++ ++struct gdma_dma_dev { ++ struct dma_device ddev; ++ void __iomem *base; ++ struct clk *clk; ++ ++ struct gdma_dmaengine_chan chan[GDMA_NR_CHANS]; ++}; ++ ++static struct gdma_dma_dev *gdma_dma_chan_get_dev( ++ struct gdma_dmaengine_chan *chan) ++{ ++ return container_of(chan->vchan.chan.device, struct gdma_dma_dev, ++ ddev); ++} ++ ++static struct gdma_dmaengine_chan *to_gdma_dma_chan(struct dma_chan *c) ++{ ++ return container_of(c, struct gdma_dmaengine_chan, vchan.chan); ++} ++ ++static struct gdma_dma_desc *to_gdma_dma_desc(struct virt_dma_desc *vdesc) ++{ ++ return container_of(vdesc, struct gdma_dma_desc, vdesc); ++} ++ ++static inline uint32_t gdma_dma_read(struct gdma_dma_dev *dma_dev, ++ unsigned int reg) ++{ ++ return readl(dma_dev->base + reg); ++} ++ ++static inline void gdma_dma_write(struct gdma_dma_dev *dma_dev, ++ unsigned reg, uint32_t val) ++{ ++ //printk("gdma --> %p = 0x%08X\n", dma_dev->base + reg, val); ++ writel(val, dma_dev->base + reg); ++} ++ ++static inline void gdma_dma_write_mask(struct gdma_dma_dev *dma_dev, ++ unsigned int reg, uint32_t val, uint32_t mask) ++{ ++ uint32_t tmp; ++ ++ tmp = gdma_dma_read(dma_dev, reg); ++ tmp &= ~mask; ++ tmp |= val; ++ gdma_dma_write(dma_dev, reg, tmp); ++} ++ ++static struct gdma_dma_desc *gdma_dma_alloc_desc(unsigned int num_sgs) ++{ ++ return kzalloc(sizeof(struct gdma_dma_desc) + ++ sizeof(struct gdma_dma_sg) * num_sgs, GFP_ATOMIC); ++} ++ ++static enum gdma_dma_transfer_size gdma_dma_maxburst(u32 maxburst) ++{ ++ if (maxburst <= 7) ++ return GDMA_TRANSFER_SIZE_4BYTE; ++ else if (maxburst <= 15) ++ return GDMA_TRANSFER_SIZE_8BYTE; ++ else if (maxburst <= 31) ++ return GDMA_TRANSFER_SIZE_16BYTE; ++ ++ return GDMA_TRANSFER_SIZE_32BYTE; ++} ++ ++static int gdma_dma_slave_config(struct dma_chan *c, ++ const struct dma_slave_config *config) ++{ ++ struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c); ++ struct gdma_dma_dev *dma_dev = gdma_dma_chan_get_dev(chan); ++ enum gdma_dma_transfer_size transfer_size; ++ uint32_t flags; ++ uint32_t ctrl0, ctrl1; ++ ++ switch (config->direction) { ++ case DMA_MEM_TO_DEV: ++ ctrl1 = 32 << GDMA_REG_CTRL1_SRC_REQ_SHIFT; ++ ctrl1 |= config->slave_id << GDMA_REG_CTRL1_DST_REQ_SHIFT; ++ flags = GDMA_REG_CTRL0_DST_ADDR_FIXED; ++ transfer_size = gdma_dma_maxburst(config->dst_maxburst); ++ chan->fifo_addr = config->dst_addr; ++ break; ++ ++ case DMA_DEV_TO_MEM: ++ ctrl1 = config->slave_id << GDMA_REG_CTRL1_SRC_REQ_SHIFT; ++ ctrl1 |= 32 << GDMA_REG_CTRL1_DST_REQ_SHIFT; ++ flags = GDMA_REG_CTRL0_SRC_ADDR_FIXED; ++ transfer_size = gdma_dma_maxburst(config->src_maxburst); ++ chan->fifo_addr = config->src_addr; ++ break; ++ ++ default: ++ return -EINVAL; ++ } ++ ++ chan->transfer_shift = 1 + transfer_size; ++ ++ ctrl0 = flags | GDMA_REG_CTRL0_HW_MODE; ++ ctrl0 |= GDMA_REG_CTRL0_DONE_INT; ++ ++ ctrl1 &= ~(GDMA_REG_CTRL1_NEXT_MASK << GDMA_REG_CTRL1_NEXT_SHIFT); ++ ctrl1 |= chan->id << GDMA_REG_CTRL1_NEXT_SHIFT; ++ ctrl1 |= GDMA_REG_CTRL1_FAIL; ++ ctrl1 &= ~GDMA_REG_CTRL1_CONTINOUS; ++ gdma_dma_write(dma_dev, GDMA_REG_CTRL0(chan->id), ctrl0); ++ gdma_dma_write(dma_dev, GDMA_REG_CTRL1(chan->id), ctrl1); ++ ++ return 0; ++} ++ ++static int gdma_dma_terminate_all(struct dma_chan *c) ++{ ++ struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c); ++ struct gdma_dma_dev *dma_dev = gdma_dma_chan_get_dev(chan); ++ unsigned long flags; ++ LIST_HEAD(head); ++ ++ spin_lock_irqsave(&chan->vchan.lock, flags); ++ gdma_dma_write_mask(dma_dev, GDMA_REG_CTRL0(chan->id), 0, ++ GDMA_REG_CTRL0_ENABLE); ++ chan->desc = NULL; ++ vchan_get_all_descriptors(&chan->vchan, &head); ++ spin_unlock_irqrestore(&chan->vchan.lock, flags); ++ ++ vchan_dma_desc_free_list(&chan->vchan, &head); ++ ++ return 0; ++} ++ ++static int gdma_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, ++ unsigned long arg) ++{ ++ struct dma_slave_config *config = (struct dma_slave_config *)arg; ++ ++ switch (cmd) { ++ case DMA_SLAVE_CONFIG: ++ return gdma_dma_slave_config(chan, config); ++ case DMA_TERMINATE_ALL: ++ return gdma_dma_terminate_all(chan); ++ default: ++ return -ENOSYS; ++ } ++} ++ ++static int gdma_dma_start_transfer(struct gdma_dmaengine_chan *chan) ++{ ++ struct gdma_dma_dev *dma_dev = gdma_dma_chan_get_dev(chan); ++ dma_addr_t src_addr, dst_addr; ++ struct virt_dma_desc *vdesc; ++ struct gdma_dma_sg *sg; ++ ++ gdma_dma_write_mask(dma_dev, GDMA_REG_CTRL0(chan->id), 0, ++ GDMA_REG_CTRL0_ENABLE); ++ ++ if (!chan->desc) { ++ vdesc = vchan_next_desc(&chan->vchan); ++ if (!vdesc) ++ return 0; ++ chan->desc = to_gdma_dma_desc(vdesc); ++ chan->next_sg = 0; ++ } ++ ++ if (chan->next_sg == chan->desc->num_sgs) ++ chan->next_sg = 0; ++ ++ sg = &chan->desc->sg[chan->next_sg]; ++ ++ if (chan->desc->direction == DMA_MEM_TO_DEV) { ++ src_addr = sg->addr; ++ dst_addr = chan->fifo_addr; ++ } else { ++ src_addr = chan->fifo_addr; ++ dst_addr = sg->addr; ++ } ++ gdma_dma_write(dma_dev, GDMA_REG_SRC_ADDR(chan->id), src_addr); ++ gdma_dma_write(dma_dev, GDMA_REG_DST_ADDR(chan->id), dst_addr); ++ gdma_dma_write_mask(dma_dev, GDMA_REG_CTRL0(chan->id), ++ (sg->len << GDMA_REG_CTRL0_TX_SHIFT) | GDMA_REG_CTRL0_ENABLE, ++ GDMA_REG_CTRL0_TX_MASK << GDMA_REG_CTRL0_TX_SHIFT); ++ chan->next_sg++; ++ gdma_dma_write_mask(dma_dev, GDMA_REG_CTRL1(chan->id), 0, GDMA_REG_CTRL1_MASK); ++ ++ return 0; ++} ++ ++static void gdma_dma_chan_irq(struct gdma_dmaengine_chan *chan) ++{ ++ spin_lock(&chan->vchan.lock); ++ if (chan->desc) { ++ if (chan->desc && chan->desc->cyclic) { ++ vchan_cyclic_callback(&chan->desc->vdesc); ++ } else { ++ if (chan->next_sg == chan->desc->num_sgs) { ++ chan->desc = NULL; ++ vchan_cookie_complete(&chan->desc->vdesc); ++ } ++ } ++ } ++ gdma_dma_start_transfer(chan); ++ spin_unlock(&chan->vchan.lock); ++} ++ ++static irqreturn_t gdma_dma_irq(int irq, void *devid) ++{ ++ struct gdma_dma_dev *dma_dev = devid; ++ uint32_t unmask, done; ++ unsigned int i; ++ ++ unmask = gdma_dma_read(dma_dev, GDMA_REG_UNMASK_INT); ++ gdma_dma_write(dma_dev, GDMA_REG_UNMASK_INT, unmask); ++ done = gdma_dma_read(dma_dev, GDMA_REG_DONE_INT); ++ ++ for (i = 0; i < GDMA_NR_CHANS; ++i) ++ if (done & BIT(i)) ++ gdma_dma_chan_irq(&dma_dev->chan[i]); ++ gdma_dma_write(dma_dev, GDMA_REG_DONE_INT, done); ++ ++ return IRQ_HANDLED; ++} ++ ++static void gdma_dma_issue_pending(struct dma_chan *c) ++{ ++ struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c); ++ unsigned long flags; ++ ++ spin_lock_irqsave(&chan->vchan.lock, flags); ++ if (vchan_issue_pending(&chan->vchan) && !chan->desc) ++ gdma_dma_start_transfer(chan); ++ spin_unlock_irqrestore(&chan->vchan.lock, flags); ++} ++ ++static struct dma_async_tx_descriptor *gdma_dma_prep_slave_sg( ++ struct dma_chan *c, struct scatterlist *sgl, ++ unsigned int sg_len, enum dma_transfer_direction direction, ++ unsigned long flags, void *context) ++{ ++ struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c); ++ struct gdma_dma_desc *desc; ++ struct scatterlist *sg; ++ unsigned int i; ++ ++ desc = gdma_dma_alloc_desc(sg_len); ++ if (!desc) ++ return NULL; ++ ++ for_each_sg(sgl, sg, sg_len, i) { ++ desc->sg[i].addr = sg_dma_address(sg); ++ desc->sg[i].len = sg_dma_len(sg); ++ } ++ ++ desc->num_sgs = sg_len; ++ desc->direction = direction; ++ desc->cyclic = false; ++ ++ return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); ++} ++ ++static struct dma_async_tx_descriptor *gdma_dma_prep_dma_cyclic( ++ struct dma_chan *c, dma_addr_t buf_addr, size_t buf_len, ++ size_t period_len, enum dma_transfer_direction direction, ++ unsigned long flags, void *context) ++{ ++ struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c); ++ struct gdma_dma_desc *desc; ++ unsigned int num_periods, i; ++ ++ if (buf_len % period_len) ++ return NULL; ++ ++ num_periods = buf_len / period_len; ++ ++ desc = gdma_dma_alloc_desc(num_periods); ++ if (!desc) ++ return NULL; ++ ++ for (i = 0; i < num_periods; i++) { ++ desc->sg[i].addr = buf_addr; ++ desc->sg[i].len = period_len; ++ buf_addr += period_len; ++ } ++ ++ desc->num_sgs = num_periods; ++ desc->direction = direction; ++ desc->cyclic = true; ++ ++ return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); ++} ++ ++static size_t gdma_dma_desc_residue(struct gdma_dmaengine_chan *chan, ++ struct gdma_dma_desc *desc, unsigned int next_sg) ++{ ++ struct gdma_dma_dev *dma_dev = gdma_dma_chan_get_dev(chan); ++ unsigned int residue, count; ++ unsigned int i; ++ ++ residue = 0; ++ ++ for (i = next_sg; i < desc->num_sgs; i++) ++ residue += desc->sg[i].len; ++ ++ if (next_sg != 0) { ++ count = gdma_dma_read(dma_dev, GDMA_REG_CTRL0(chan->id)); ++ count >>= GDMA_REG_CTRL0_CURR_SHIFT; ++ count &= GDMA_REG_CTRL0_CURR_MASK; ++ residue += count << chan->transfer_shift; ++ } ++ ++ return residue; ++} ++ ++static enum dma_status gdma_dma_tx_status(struct dma_chan *c, ++ dma_cookie_t cookie, struct dma_tx_state *state) ++{ ++ struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c); ++ struct virt_dma_desc *vdesc; ++ enum dma_status status; ++ unsigned long flags; ++ ++ status = dma_cookie_status(c, cookie, state); ++ if (status == DMA_SUCCESS || !state) ++ return status; ++ ++ spin_lock_irqsave(&chan->vchan.lock, flags); ++ vdesc = vchan_find_desc(&chan->vchan, cookie); ++ if (cookie == chan->desc->vdesc.tx.cookie) { ++ state->residue = gdma_dma_desc_residue(chan, chan->desc, ++ chan->next_sg); ++ } else if (vdesc) { ++ state->residue = gdma_dma_desc_residue(chan, ++ to_gdma_dma_desc(vdesc), 0); ++ } else { ++ state->residue = 0; ++ } ++ spin_unlock_irqrestore(&chan->vchan.lock, flags); ++ ++ return status; ++} ++ ++static int gdma_dma_alloc_chan_resources(struct dma_chan *c) ++{ ++ return 0; ++} ++ ++static void gdma_dma_free_chan_resources(struct dma_chan *c) ++{ ++ vchan_free_chan_resources(to_virt_chan(c)); ++} ++ ++static void gdma_dma_desc_free(struct virt_dma_desc *vdesc) ++{ ++ kfree(container_of(vdesc, struct gdma_dma_desc, vdesc)); ++} ++ ++static struct dma_chan * ++of_dma_xlate_by_chan_id(struct of_phandle_args *dma_spec, ++ struct of_dma *ofdma) ++{ ++ struct gdma_dma_dev *dma_dev = ofdma->of_dma_data; ++ unsigned int request = dma_spec->args[0]; ++ ++ if (request >= GDMA_NR_CHANS) ++ return NULL; ++ ++ return dma_get_slave_channel(&(dma_dev->chan[request].vchan.chan)); ++} ++ ++static int gdma_dma_probe(struct platform_device *pdev) ++{ ++ struct gdma_dmaengine_chan *chan; ++ struct gdma_dma_dev *dma_dev; ++ struct dma_device *dd; ++ unsigned int i; ++ struct resource *res; ++ uint32_t gct; ++ int ret; ++ int irq; ++ ++ ++ dma_dev = devm_kzalloc(&pdev->dev, sizeof(*dma_dev), GFP_KERNEL); ++ if (!dma_dev) ++ return -EINVAL; ++ ++ dd = &dma_dev->ddev; ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ dma_dev->base = devm_ioremap_resource(&pdev->dev, res); ++ if (IS_ERR(dma_dev->base)) ++ return PTR_ERR(dma_dev->base); ++ ++ dma_cap_set(DMA_SLAVE, dd->cap_mask); ++ dma_cap_set(DMA_CYCLIC, dd->cap_mask); ++ dd->device_alloc_chan_resources = gdma_dma_alloc_chan_resources; ++ dd->device_free_chan_resources = gdma_dma_free_chan_resources; ++ dd->device_tx_status = gdma_dma_tx_status; ++ dd->device_issue_pending = gdma_dma_issue_pending; ++ dd->device_prep_slave_sg = gdma_dma_prep_slave_sg; ++ dd->device_prep_dma_cyclic = gdma_dma_prep_dma_cyclic; ++ dd->device_control = gdma_dma_control; ++ dd->dev = &pdev->dev; ++ dd->chancnt = GDMA_NR_CHANS; ++ INIT_LIST_HEAD(&dd->channels); ++ ++ for (i = 0; i < dd->chancnt; i++) { ++ chan = &dma_dev->chan[i]; ++ chan->id = i; ++ chan->vchan.desc_free = gdma_dma_desc_free; ++ vchan_init(&chan->vchan, dd); ++ } ++ ++ ret = dma_async_device_register(dd); ++ if (ret) ++ return ret; ++ ++ ret = of_dma_controller_register(pdev->dev.of_node, ++ of_dma_xlate_by_chan_id, dma_dev); ++ if (ret) ++ goto err_unregister; ++ ++ irq = platform_get_irq(pdev, 0); ++ ret = request_irq(irq, gdma_dma_irq, 0, dev_name(&pdev->dev), dma_dev); ++ if (ret) ++ goto err_unregister; ++ ++ gdma_dma_write(dma_dev, GDMA_REG_UNMASK_INT, 0); ++ gdma_dma_write(dma_dev, GDMA_REG_DONE_INT, BIT(dd->chancnt) - 1); ++ ++ gct = gdma_dma_read(dma_dev, GDMA_REG_GCT); ++ dev_info(&pdev->dev, "revision: %d, channels: %d\n", ++ (gct >> GDMA_REG_GCT_VER_SHIFT) & GDMA_REG_GCT_VER_MASK, ++ 8 << ((gct >> GDMA_REG_GCT_CHAN_SHIFT) & GDMA_REG_GCT_CHAN_MASK)); ++ platform_set_drvdata(pdev, dma_dev); ++ ++ gdma_dma_write(dma_dev, GDMA_REG_GCT, GDMA_REG_GCT_ARBIT_RR); ++ ++ return 0; ++ ++err_unregister: ++ dma_async_device_unregister(dd); ++ return ret; ++} ++ ++static int gdma_dma_remove(struct platform_device *pdev) ++{ ++ struct gdma_dma_dev *dma_dev = platform_get_drvdata(pdev); ++ int irq = platform_get_irq(pdev, 0); ++ ++ free_irq(irq, dma_dev); ++ of_dma_controller_free(pdev->dev.of_node); ++ dma_async_device_unregister(&dma_dev->ddev); ++ ++ return 0; ++} ++ ++static const struct of_device_id gdma_of_match_table[] = { ++ { .compatible = "ralink,rt2880-gdma" }, ++ { }, ++}; ++ ++static struct platform_driver gdma_dma_driver = { ++ .probe = gdma_dma_probe, ++ .remove = gdma_dma_remove, ++ .driver = { ++ .name = "gdma-rt2880", ++ .owner = THIS_MODULE, ++ .of_match_table = gdma_of_match_table, ++ }, ++}; ++module_platform_driver(gdma_dma_driver); ++ ++MODULE_AUTHOR("Lars-Peter Clausen "); ++MODULE_DESCRIPTION("GDMA4740 DMA driver"); ++MODULE_LICENSE("GPLv2"); +--- a/include/linux/dmaengine.h ++++ b/include/linux/dmaengine.h +@@ -999,6 +999,7 @@ static inline void dma_release_channel(s + int dma_async_device_register(struct dma_device *device); + void dma_async_device_unregister(struct dma_device *device); + void dma_run_dependencies(struct dma_async_tx_descriptor *tx); ++struct dma_chan *dma_get_slave_channel(struct dma_chan *chan); + struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type); + struct dma_chan *net_dma_find_channel(void); + #define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y) diff --git a/target/linux/ramips/patches-3.10/0111-NET-MIPS-add-ralink-SoC-ethernet-driver.patch b/target/linux/ramips/patches-3.10/0111-NET-MIPS-add-ralink-SoC-ethernet-driver.patch deleted file mode 100644 index 4d12424e91..0000000000 --- a/target/linux/ramips/patches-3.10/0111-NET-MIPS-add-ralink-SoC-ethernet-driver.patch +++ /dev/null @@ -1,4947 +0,0 @@ -From ad11aedcc16574c0b3d3f5e40c67227d1846b94e Mon Sep 17 00:00:00 2001 -From: John Crispin -Date: Mon, 22 Apr 2013 23:20:03 +0200 -Subject: [PATCH 16/33] NET: MIPS: add ralink SoC ethernet driver - -Add support for Ralink FE and ESW. - -Signed-off-by: John Crispin ---- - .../include/asm/mach-ralink/rt305x_esw_platform.h | 27 + - arch/mips/ralink/rt305x.c | 1 + - drivers/net/ethernet/Kconfig | 1 + - drivers/net/ethernet/Makefile | 1 + - drivers/net/ethernet/ralink/Kconfig | 31 + - drivers/net/ethernet/ralink/Makefile | 18 + - drivers/net/ethernet/ralink/esw_rt3052.c | 1463 ++++++++++++++++++++ - drivers/net/ethernet/ralink/esw_rt3052.h | 32 + - drivers/net/ethernet/ralink/gsw_mt7620a.c | 1027 ++++++++++++++ - drivers/net/ethernet/ralink/gsw_mt7620a.h | 29 + - drivers/net/ethernet/ralink/mdio.c | 245 ++++ - drivers/net/ethernet/ralink/mdio.h | 29 + - drivers/net/ethernet/ralink/mdio_rt2880.c | 232 ++++ - drivers/net/ethernet/ralink/mdio_rt2880.h | 26 + - drivers/net/ethernet/ralink/ralink_soc_eth.c | 735 ++++++++++ - drivers/net/ethernet/ralink/ralink_soc_eth.h | 374 +++++ - drivers/net/ethernet/ralink/soc_mt7620.c | 111 ++ - drivers/net/ethernet/ralink/soc_rt2880.c | 51 + - drivers/net/ethernet/ralink/soc_rt305x.c | 113 ++ - drivers/net/ethernet/ralink/soc_rt3883.c | 60 + - 20 files changed, 4606 insertions(+) - create mode 100644 arch/mips/include/asm/mach-ralink/rt305x_esw_platform.h - create mode 100644 drivers/net/ethernet/ralink/Kconfig - create mode 100644 drivers/net/ethernet/ralink/Makefile - create mode 100644 drivers/net/ethernet/ralink/esw_rt3052.c - create mode 100644 drivers/net/ethernet/ralink/esw_rt3052.h - create mode 100644 drivers/net/ethernet/ralink/gsw_mt7620a.c - create mode 100644 drivers/net/ethernet/ralink/gsw_mt7620a.h - create mode 100644 drivers/net/ethernet/ralink/mdio.c - create mode 100644 drivers/net/ethernet/ralink/mdio.h - create mode 100644 drivers/net/ethernet/ralink/mdio_rt2880.c - create mode 100644 drivers/net/ethernet/ralink/mdio_rt2880.h - create mode 100644 drivers/net/ethernet/ralink/ralink_soc_eth.c - create mode 100644 drivers/net/ethernet/ralink/ralink_soc_eth.h - create mode 100644 drivers/net/ethernet/ralink/soc_mt7620.c - create mode 100644 drivers/net/ethernet/ralink/soc_rt2880.c - create mode 100644 drivers/net/ethernet/ralink/soc_rt305x.c - create mode 100644 drivers/net/ethernet/ralink/soc_rt3883.c - ---- /dev/null -+++ b/arch/mips/include/asm/mach-ralink/rt305x_esw_platform.h -@@ -0,0 +1,27 @@ -+/* -+ * Ralink RT305x SoC platform device registration -+ * -+ * Copyright (C) 2010 Gabor Juhos -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation. -+ */ -+ -+#ifndef _RT305X_ESW_PLATFORM_H -+#define _RT305X_ESW_PLATFORM_H -+ -+enum { -+ RT305X_ESW_VLAN_CONFIG_NONE = 0, -+ RT305X_ESW_VLAN_CONFIG_LLLLW, -+ RT305X_ESW_VLAN_CONFIG_WLLLL, -+}; -+ -+struct rt305x_esw_platform_data -+{ -+ u8 vlan_config; -+ u32 reg_initval_fct2; -+ u32 reg_initval_fpa2; -+}; -+ -+#endif /* _RT305X_ESW_PLATFORM_H */ ---- a/arch/mips/ralink/rt305x.c -+++ b/arch/mips/ralink/rt305x.c -@@ -221,6 +221,7 @@ void __init ralink_clk_init(void) - } - - ralink_clk_add("cpu", cpu_rate); -+ ralink_clk_add("sys", sys_rate); - ralink_clk_add("10000b00.spi", sys_rate); - ralink_clk_add("10000100.timer", wdt_rate); - ralink_clk_add("10000120.watchdog", wdt_rate); ---- a/drivers/net/ethernet/Kconfig -+++ b/drivers/net/ethernet/Kconfig -@@ -135,6 +135,7 @@ config ETHOC - source "drivers/net/ethernet/packetengines/Kconfig" - source "drivers/net/ethernet/pasemi/Kconfig" - source "drivers/net/ethernet/qlogic/Kconfig" -+source "drivers/net/ethernet/ralink/Kconfig" - source "drivers/net/ethernet/realtek/Kconfig" - source "drivers/net/ethernet/renesas/Kconfig" - source "drivers/net/ethernet/rdc/Kconfig" ---- a/drivers/net/ethernet/Makefile -+++ b/drivers/net/ethernet/Makefile -@@ -53,6 +53,7 @@ obj-$(CONFIG_ETHOC) += ethoc.o - obj-$(CONFIG_NET_PACKET_ENGINE) += packetengines/ - obj-$(CONFIG_NET_VENDOR_PASEMI) += pasemi/ - obj-$(CONFIG_NET_VENDOR_QLOGIC) += qlogic/ -+obj-$(CONFIG_NET_RALINK) += ralink/ - obj-$(CONFIG_NET_VENDOR_REALTEK) += realtek/ - obj-$(CONFIG_SH_ETH) += renesas/ - obj-$(CONFIG_NET_VENDOR_RDC) += rdc/ ---- /dev/null -+++ b/drivers/net/ethernet/ralink/Kconfig -@@ -0,0 +1,32 @@ -+config NET_RALINK -+ tristate "Ralink RT288X/RT3X5X/RT3662/RT3883/MT7620 ethernet driver" -+ depends on RALINK -+ help -+ This driver supports the ethernet mac inside the ralink wisocs -+ -+if NET_RALINK -+ -+config NET_RALINK_MDIO -+ def_bool NET_RALINK -+ depends on (SOC_RT288X || SOC_RT3883 || SOC_MT7620) -+ select PHYLIB -+ -+config NET_RALINK_MDIO_RT2880 -+ def_bool NET_RALINK -+ depends on (SOC_RT288X || SOC_RT3883) -+ select NET_RALINK_MDIO -+ -+config NET_RALINK_ESW_RT3052 -+ def_bool NET_RALINK -+ depends on SOC_RT305X -+ select PHYLIB -+ select SWCONFIG -+ -+config NET_RALINK_GSW_MT7620 -+ def_bool NET_RALINK -+ depends on SOC_MT7620 -+ select INET_LRO -+ select NET_RALINK_MDIO -+ select PHYLIB -+ select SWCONFIG -+endif ---- /dev/null -+++ b/drivers/net/ethernet/ralink/Makefile -@@ -0,0 +1,18 @@ -+# -+# Makefile for the Ralink SoCs built-in ethernet macs -+# -+ -+ralink-eth-y += ralink_soc_eth.o -+ -+ralink-eth-$(CONFIG_NET_RALINK_MDIO) += mdio.o -+ralink-eth-$(CONFIG_NET_RALINK_MDIO_RT2880) += mdio_rt2880.o -+ -+ralink-eth-$(CONFIG_NET_RALINK_ESW_RT3052) += esw_rt3052.o -+ralink-eth-$(CONFIG_NET_RALINK_GSW_MT7620) += gsw_mt7620a.o mt7530.o -+ -+ralink-eth-$(CONFIG_SOC_RT288X) += soc_rt2880.o -+ralink-eth-$(CONFIG_SOC_RT305X) += soc_rt305x.o -+ralink-eth-$(CONFIG_SOC_RT3883) += soc_rt3883.o -+ralink-eth-$(CONFIG_SOC_MT7620) += soc_mt7620.o -+ -+obj-$(CONFIG_NET_RALINK) += ralink-eth.o ---- /dev/null -+++ b/drivers/net/ethernet/ralink/esw_rt3052.c -@@ -0,0 +1,1463 @@ -+/* -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; version 2 of the License -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. -+ * -+ * Copyright (C) 2009-2013 John Crispin -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+ -+#include "ralink_soc_eth.h" -+ -+#include -+#include -+#include -+ -+#include -+#include -+#include -+ -+/* -+ * HW limitations for this switch: -+ * - No large frame support (PKT_MAX_LEN at most 1536) -+ * - Can't have untagged vlan and tagged vlan on one port at the same time, -+ * though this might be possible using the undocumented PPE. -+ */ -+ -+#define RT305X_ESW_REG_ISR 0x00 -+#define RT305X_ESW_REG_IMR 0x04 -+#define RT305X_ESW_REG_FCT0 0x08 -+#define RT305X_ESW_REG_PFC1 0x14 -+#define RT305X_ESW_REG_ATS 0x24 -+#define RT305X_ESW_REG_ATS0 0x28 -+#define RT305X_ESW_REG_ATS1 0x2c -+#define RT305X_ESW_REG_ATS2 0x30 -+#define RT305X_ESW_REG_PVIDC(_n) (0x40 + 4 * (_n)) -+#define RT305X_ESW_REG_VLANI(_n) (0x50 + 4 * (_n)) -+#define RT305X_ESW_REG_VMSC(_n) (0x70 + 4 * (_n)) -+#define RT305X_ESW_REG_POA 0x80 -+#define RT305X_ESW_REG_FPA 0x84 -+#define RT305X_ESW_REG_SOCPC 0x8c -+#define RT305X_ESW_REG_POC0 0x90 -+#define RT305X_ESW_REG_POC1 0x94 -+#define RT305X_ESW_REG_POC2 0x98 -+#define RT305X_ESW_REG_SGC 0x9c -+#define RT305X_ESW_REG_STRT 0xa0 -+#define RT305X_ESW_REG_PCR0 0xc0 -+#define RT305X_ESW_REG_PCR1 0xc4 -+#define RT305X_ESW_REG_FPA2 0xc8 -+#define RT305X_ESW_REG_FCT2 0xcc -+#define RT305X_ESW_REG_SGC2 0xe4 -+#define RT305X_ESW_REG_P0LED 0xa4 -+#define RT305X_ESW_REG_P1LED 0xa8 -+#define RT305X_ESW_REG_P2LED 0xac -+#define RT305X_ESW_REG_P3LED 0xb0 -+#define RT305X_ESW_REG_P4LED 0xb4 -+#define RT305X_ESW_REG_PXPC(_x) (0xe8 + (4 * _x)) -+#define RT305X_ESW_REG_P1PC 0xec -+#define RT305X_ESW_REG_P2PC 0xf0 -+#define RT305X_ESW_REG_P3PC 0xf4 -+#define RT305X_ESW_REG_P4PC 0xf8 -+#define RT305X_ESW_REG_P5PC 0xfc -+ -+#define RT305X_ESW_LED_LINK 0 -+#define RT305X_ESW_LED_100M 1 -+#define RT305X_ESW_LED_DUPLEX 2 -+#define RT305X_ESW_LED_ACTIVITY 3 -+#define RT305X_ESW_LED_COLLISION 4 -+#define RT305X_ESW_LED_LINKACT 5 -+#define RT305X_ESW_LED_DUPLCOLL 6 -+#define RT305X_ESW_LED_10MACT 7 -+#define RT305X_ESW_LED_100MACT 8 -+/* Additional led states not in datasheet: */ -+#define RT305X_ESW_LED_BLINK 10 -+#define RT305X_ESW_LED_ON 12 -+ -+#define RT305X_ESW_LINK_S 25 -+#define RT305X_ESW_DUPLEX_S 9 -+#define RT305X_ESW_SPD_S 0 -+ -+#define RT305X_ESW_PCR0_WT_NWAY_DATA_S 16 -+#define RT305X_ESW_PCR0_WT_PHY_CMD BIT(13) -+#define RT305X_ESW_PCR0_CPU_PHY_REG_S 8 -+ -+#define RT305X_ESW_PCR1_WT_DONE BIT(0) -+ -+#define RT305X_ESW_ATS_TIMEOUT (5 * HZ) -+#define RT305X_ESW_PHY_TIMEOUT (5 * HZ) -+ -+#define RT305X_ESW_PVIDC_PVID_M 0xfff -+#define RT305X_ESW_PVIDC_PVID_S 12 -+ -+#define RT305X_ESW_VLANI_VID_M 0xfff -+#define RT305X_ESW_VLANI_VID_S 12 -+ -+#define RT305X_ESW_VMSC_MSC_M 0xff -+#define RT305X_ESW_VMSC_MSC_S 8 -+ -+#define RT305X_ESW_SOCPC_DISUN2CPU_S 0 -+#define RT305X_ESW_SOCPC_DISMC2CPU_S 8 -+#define RT305X_ESW_SOCPC_DISBC2CPU_S 16 -+#define RT305X_ESW_SOCPC_CRC_PADDING BIT(25) -+ -+#define RT305X_ESW_POC0_EN_BP_S 0 -+#define RT305X_ESW_POC0_EN_FC_S 8 -+#define RT305X_ESW_POC0_DIS_RMC2CPU_S 16 -+#define RT305X_ESW_POC0_DIS_PORT_M 0x7f -+#define RT305X_ESW_POC0_DIS_PORT_S 23 -+ -+#define RT305X_ESW_POC2_UNTAG_EN_M 0xff -+#define RT305X_ESW_POC2_UNTAG_EN_S 0 -+#define RT305X_ESW_POC2_ENAGING_S 8 -+#define RT305X_ESW_POC2_DIS_UC_PAUSE_S 16 -+ -+#define RT305X_ESW_SGC2_DOUBLE_TAG_M 0x7f -+#define RT305X_ESW_SGC2_DOUBLE_TAG_S 0 -+#define RT305X_ESW_SGC2_LAN_PMAP_M 0x3f -+#define RT305X_ESW_SGC2_LAN_PMAP_S 24 -+ -+#define RT305X_ESW_PFC1_EN_VLAN_M 0xff -+#define RT305X_ESW_PFC1_EN_VLAN_S 16 -+#define RT305X_ESW_PFC1_EN_TOS_S 24 -+ -+#define RT305X_ESW_VLAN_NONE 0xfff -+ -+#define RT305X_ESW_GSC_BC_STROM_MASK 0x3 -+#define RT305X_ESW_GSC_BC_STROM_SHIFT 4 -+ -+#define RT305X_ESW_GSC_LED_FREQ_MASK 0x3 -+#define RT305X_ESW_GSC_LED_FREQ_SHIFT 23 -+ -+#define RT305X_ESW_POA_LINK_MASK 0x1f -+#define RT305X_ESW_POA_LINK_SHIFT 25 -+ -+#define RT305X_ESW_PORT_ST_CHG BIT(26) -+#define RT305X_ESW_PORT0 0 -+#define RT305X_ESW_PORT1 1 -+#define RT305X_ESW_PORT2 2 -+#define RT305X_ESW_PORT3 3 -+#define RT305X_ESW_PORT4 4 -+#define RT305X_ESW_PORT5 5 -+#define RT305X_ESW_PORT6 6 -+ -+#define RT305X_ESW_PORTS_NONE 0 -+ -+#define RT305X_ESW_PMAP_LLLLLL 0x3f -+#define RT305X_ESW_PMAP_LLLLWL 0x2f -+#define RT305X_ESW_PMAP_WLLLLL 0x3e -+ -+#define RT305X_ESW_PORTS_INTERNAL \ -+ (BIT(RT305X_ESW_PORT0) | BIT(RT305X_ESW_PORT1) | \ -+ BIT(RT305X_ESW_PORT2) | BIT(RT305X_ESW_PORT3) | \ -+ BIT(RT305X_ESW_PORT4)) -+ -+#define RT305X_ESW_PORTS_NOCPU \ -+ (RT305X_ESW_PORTS_INTERNAL | BIT(RT305X_ESW_PORT5)) -+ -+#define RT305X_ESW_PORTS_CPU BIT(RT305X_ESW_PORT6) -+ -+#define RT305X_ESW_PORTS_ALL \ -+ (RT305X_ESW_PORTS_NOCPU | RT305X_ESW_PORTS_CPU) -+ -+#define RT305X_ESW_NUM_VLANS 16 -+#define RT305X_ESW_NUM_VIDS 4096 -+#define RT305X_ESW_NUM_PORTS 7 -+#define RT305X_ESW_NUM_LANWAN 6 -+#define RT305X_ESW_NUM_LEDS 5 -+ -+#define RT5350_ESW_REG_PXTPC(_x) (0x150 + (4 * _x)) -+#define RT5350_EWS_REG_LED_POLARITY 0x168 -+#define RT5350_RESET_EPHY BIT(24) -+#define SYSC_REG_RESET_CTRL 0x34 -+ -+enum { -+ /* Global attributes. */ -+ RT305X_ESW_ATTR_ENABLE_VLAN, -+ RT305X_ESW_ATTR_ALT_VLAN_DISABLE, -+ RT305X_ESW_ATTR_BC_STATUS, -+ RT305X_ESW_ATTR_LED_FREQ, -+ /* Port attributes. */ -+ RT305X_ESW_ATTR_PORT_DISABLE, -+ RT305X_ESW_ATTR_PORT_DOUBLETAG, -+ RT305X_ESW_ATTR_PORT_UNTAG, -+ RT305X_ESW_ATTR_PORT_LED, -+ RT305X_ESW_ATTR_PORT_LAN, -+ RT305X_ESW_ATTR_PORT_RECV_BAD, -+ RT305X_ESW_ATTR_PORT_RECV_GOOD, -+ RT5350_ESW_ATTR_PORT_TR_BAD, -+ RT5350_ESW_ATTR_PORT_TR_GOOD, -+}; -+ -+struct esw_port { -+ bool disable; -+ bool doubletag; -+ bool untag; -+ u8 led; -+ u16 pvid; -+}; -+ -+struct esw_vlan { -+ u8 ports; -+ u16 vid; -+}; -+ -+struct rt305x_esw { -+ struct device *dev; -+ void __iomem *base; -+ int irq; -+ const struct rt305x_esw_platform_data *pdata; -+ /* Protects against concurrent register rmw operations. */ -+ spinlock_t reg_rw_lock; -+ -+ unsigned char port_map; -+ unsigned int reg_initval_fct2; -+ unsigned int reg_initval_fpa2; -+ unsigned int reg_led_polarity; -+ -+ -+ struct switch_dev swdev; -+ bool global_vlan_enable; -+ bool alt_vlan_disable; -+ int bc_storm_protect; -+ int led_frequency; -+ struct esw_vlan vlans[RT305X_ESW_NUM_VLANS]; -+ struct esw_port ports[RT305X_ESW_NUM_PORTS]; -+ -+}; -+ -+static inline void esw_w32(struct rt305x_esw *esw, u32 val, unsigned reg) -+{ -+ __raw_writel(val, esw->base + reg); -+} -+ -+static inline u32 esw_r32(struct rt305x_esw *esw, unsigned reg) -+{ -+ return __raw_readl(esw->base + reg); -+} -+ -+static inline void esw_rmw_raw(struct rt305x_esw *esw, unsigned reg, unsigned long mask, -+ unsigned long val) -+{ -+ unsigned long t; -+ -+ t = __raw_readl(esw->base + reg) & ~mask; -+ __raw_writel(t | val, esw->base + reg); -+} -+ -+static void esw_rmw(struct rt305x_esw *esw, unsigned reg, unsigned long mask, -+ unsigned long val) -+{ -+ unsigned long flags; -+ -+ spin_lock_irqsave(&esw->reg_rw_lock, flags); -+ esw_rmw_raw(esw, reg, mask, val); -+ spin_unlock_irqrestore(&esw->reg_rw_lock, flags); -+} -+ -+static u32 rt305x_mii_write(struct rt305x_esw *esw, u32 phy_addr, u32 phy_register, -+ u32 write_data) -+{ -+ unsigned long t_start = jiffies; -+ int ret = 0; -+ -+ while (1) { -+ if (!(esw_r32(esw, RT305X_ESW_REG_PCR1) & -+ RT305X_ESW_PCR1_WT_DONE)) -+ break; -+ if (time_after(jiffies, t_start + RT305X_ESW_PHY_TIMEOUT)) { -+ ret = 1; -+ goto out; -+ } -+ } -+ -+ write_data &= 0xffff; -+ esw_w32(esw, -+ (write_data << RT305X_ESW_PCR0_WT_NWAY_DATA_S) | -+ (phy_register << RT305X_ESW_PCR0_CPU_PHY_REG_S) | -+ (phy_addr) | RT305X_ESW_PCR0_WT_PHY_CMD, -+ RT305X_ESW_REG_PCR0); -+ -+ t_start = jiffies; -+ while (1) { -+ if (esw_r32(esw, RT305X_ESW_REG_PCR1) & -+ RT305X_ESW_PCR1_WT_DONE) -+ break; -+ -+ if (time_after(jiffies, t_start + RT305X_ESW_PHY_TIMEOUT)) { -+ ret = 1; -+ break; -+ } -+ } -+out: -+ if (ret) -+ printk(KERN_ERR "ramips_eth: MDIO timeout\n"); -+ return ret; -+} -+ -+static unsigned esw_get_vlan_id(struct rt305x_esw *esw, unsigned vlan) -+{ -+ unsigned s; -+ unsigned val; -+ -+ s = RT305X_ESW_VLANI_VID_S * (vlan % 2); -+ val = esw_r32(esw, RT305X_ESW_REG_VLANI(vlan / 2)); -+ val = (val >> s) & RT305X_ESW_VLANI_VID_M; -+ -+ return val; -+} -+ -+static void esw_set_vlan_id(struct rt305x_esw *esw, unsigned vlan, unsigned vid) -+{ -+ unsigned s; -+ -+ s = RT305X_ESW_VLANI_VID_S * (vlan % 2); -+ esw_rmw(esw, -+ RT305X_ESW_REG_VLANI(vlan / 2), -+ RT305X_ESW_VLANI_VID_M << s, -+ (vid & RT305X_ESW_VLANI_VID_M) << s); -+} -+ -+static unsigned esw_get_pvid(struct rt305x_esw *esw, unsigned port) -+{ -+ unsigned s, val; -+ -+ s = RT305X_ESW_PVIDC_PVID_S * (port % 2); -+ val = esw_r32(esw, RT305X_ESW_REG_PVIDC(port / 2)); -+ return (val >> s) & RT305X_ESW_PVIDC_PVID_M; -+} -+ -+static void esw_set_pvid(struct rt305x_esw *esw, unsigned port, unsigned pvid) -+{ -+ unsigned s; -+ -+ s = RT305X_ESW_PVIDC_PVID_S * (port % 2); -+ esw_rmw(esw, -+ RT305X_ESW_REG_PVIDC(port / 2), -+ RT305X_ESW_PVIDC_PVID_M << s, -+ (pvid & RT305X_ESW_PVIDC_PVID_M) << s); -+} -+ -+static unsigned esw_get_vmsc(struct rt305x_esw *esw, unsigned vlan) -+{ -+ unsigned s, val; -+ -+ s = RT305X_ESW_VMSC_MSC_S * (vlan % 4); -+ val = esw_r32(esw, RT305X_ESW_REG_VMSC(vlan / 4)); -+ val = (val >> s) & RT305X_ESW_VMSC_MSC_M; -+ -+ return val; -+} -+ -+static void esw_set_vmsc(struct rt305x_esw *esw, unsigned vlan, unsigned msc) -+{ -+ unsigned s; -+ -+ s = RT305X_ESW_VMSC_MSC_S * (vlan % 4); -+ esw_rmw(esw, -+ RT305X_ESW_REG_VMSC(vlan / 4), -+ RT305X_ESW_VMSC_MSC_M << s, -+ (msc & RT305X_ESW_VMSC_MSC_M) << s); -+} -+ -+static unsigned esw_get_port_disable(struct rt305x_esw *esw) -+{ -+ unsigned reg; -+ reg = esw_r32(esw, RT305X_ESW_REG_POC0); -+ return (reg >> RT305X_ESW_POC0_DIS_PORT_S) & -+ RT305X_ESW_POC0_DIS_PORT_M; -+} -+ -+static void esw_set_port_disable(struct rt305x_esw *esw, unsigned disable_mask) -+{ -+ unsigned old_mask; -+ unsigned enable_mask; -+ unsigned changed; -+ int i; -+ -+ old_mask = esw_get_port_disable(esw); -+ changed = old_mask ^ disable_mask; -+ enable_mask = old_mask & disable_mask; -+ -+ /* enable before writing to MII */ -+ esw_rmw(esw, RT305X_ESW_REG_POC0, -+ (RT305X_ESW_POC0_DIS_PORT_M << -+ RT305X_ESW_POC0_DIS_PORT_S), -+ enable_mask << RT305X_ESW_POC0_DIS_PORT_S); -+ -+ for (i = 0; i < RT305X_ESW_NUM_LEDS; i++) { -+ if (!(changed & (1 << i))) -+ continue; -+ if (disable_mask & (1 << i)) { -+ /* disable */ -+ rt305x_mii_write(esw, i, MII_BMCR, -+ BMCR_PDOWN); -+ } else { -+ /* enable */ -+ rt305x_mii_write(esw, i, MII_BMCR, -+ BMCR_FULLDPLX | -+ BMCR_ANENABLE | -+ BMCR_ANRESTART | -+ BMCR_SPEED100); -+ } -+ } -+ -+ /* disable after writing to MII */ -+ esw_rmw(esw, RT305X_ESW_REG_POC0, -+ (RT305X_ESW_POC0_DIS_PORT_M << -+ RT305X_ESW_POC0_DIS_PORT_S), -+ disable_mask << RT305X_ESW_POC0_DIS_PORT_S); -+} -+ -+static void esw_set_gsc(struct rt305x_esw *esw) -+{ -+ esw_rmw(esw, RT305X_ESW_REG_SGC, -+ RT305X_ESW_GSC_BC_STROM_MASK << RT305X_ESW_GSC_BC_STROM_SHIFT, -+ esw->bc_storm_protect << RT305X_ESW_GSC_BC_STROM_SHIFT); -+ esw_rmw(esw, RT305X_ESW_REG_SGC, -+ RT305X_ESW_GSC_LED_FREQ_MASK << RT305X_ESW_GSC_LED_FREQ_SHIFT, -+ esw->led_frequency << RT305X_ESW_GSC_LED_FREQ_SHIFT); -+} -+ -+static int esw_apply_config(struct switch_dev *dev); -+ -+static void esw_hw_init(struct rt305x_esw *esw) -+{ -+ int i; -+ u8 port_disable = 0; -+ u8 port_map = RT305X_ESW_PMAP_LLLLLL; -+ -+ /* vodoo from original driver */ -+ esw_w32(esw, 0xC8A07850, RT305X_ESW_REG_FCT0); -+ esw_w32(esw, 0x00000000, RT305X_ESW_REG_SGC2); -+ /* Port priority 1 for all ports, vlan enabled. */ -+ esw_w32(esw, 0x00005555 | -+ (RT305X_ESW_PORTS_ALL << RT305X_ESW_PFC1_EN_VLAN_S), -+ RT305X_ESW_REG_PFC1); -+ -+ /* Enable Back Pressure, and Flow Control */ -+ esw_w32(esw, -+ ((RT305X_ESW_PORTS_ALL << RT305X_ESW_POC0_EN_BP_S) | -+ (RT305X_ESW_PORTS_ALL << RT305X_ESW_POC0_EN_FC_S)), -+ RT305X_ESW_REG_POC0); -+ -+ /* Enable Aging, and VLAN TAG removal */ -+ esw_w32(esw, -+ ((RT305X_ESW_PORTS_ALL << RT305X_ESW_POC2_ENAGING_S) | -+ (RT305X_ESW_PORTS_NOCPU << RT305X_ESW_POC2_UNTAG_EN_S)), -+ RT305X_ESW_REG_POC2); -+ -+ if (esw->reg_initval_fct2) -+ esw_w32(esw, esw->reg_initval_fct2, RT305X_ESW_REG_FCT2); -+ else -+ esw_w32(esw, esw->pdata->reg_initval_fct2, RT305X_ESW_REG_FCT2); -+ -+ /* -+ * 300s aging timer, max packet len 1536, broadcast storm prevention -+ * disabled, disable collision abort, mac xor48 hash, 10 packet back -+ * pressure jam, GMII disable was_transmit, back pressure disabled, -+ * 30ms led flash, unmatched IGMP as broadcast, rmc tb fault to all -+ * ports. -+ */ -+ esw_w32(esw, 0x0008a301, RT305X_ESW_REG_SGC); -+ -+ /* Setup SoC Port control register */ -+ esw_w32(esw, -+ (RT305X_ESW_SOCPC_CRC_PADDING | -+ (RT305X_ESW_PORTS_CPU << RT305X_ESW_SOCPC_DISUN2CPU_S) | -+ (RT305X_ESW_PORTS_CPU << RT305X_ESW_SOCPC_DISMC2CPU_S) | -+ (RT305X_ESW_PORTS_CPU << RT305X_ESW_SOCPC_DISBC2CPU_S)), -+ RT305X_ESW_REG_SOCPC); -+ -+ if (esw->reg_initval_fpa2) -+ esw_w32(esw, esw->reg_initval_fpa2, RT305X_ESW_REG_FPA2); -+ else -+ esw_w32(esw, esw->pdata->reg_initval_fpa2, RT305X_ESW_REG_FPA2); -+ esw_w32(esw, 0x00000000, RT305X_ESW_REG_FPA); -+ -+ /* Force Link/Activity on ports */ -+ esw_w32(esw, 0x00000005, RT305X_ESW_REG_P0LED); -+ esw_w32(esw, 0x00000005, RT305X_ESW_REG_P1LED); -+ esw_w32(esw, 0x00000005, RT305X_ESW_REG_P2LED); -+ esw_w32(esw, 0x00000005, RT305X_ESW_REG_P3LED); -+ esw_w32(esw, 0x00000005, RT305X_ESW_REG_P4LED); -+ -+ /* Copy disabled port configuration from bootloader setup */ -+ port_disable = esw_get_port_disable(esw); -+ for (i = 0; i < 6; i++) -+ esw->ports[i].disable = (port_disable & (1 << i)) != 0; -+ -+ if (soc_is_rt3352()) { -+ /* reset EPHY */ -+ u32 val = rt_sysc_r32(SYSC_REG_RESET_CTRL); -+ rt_sysc_w32(val | RT5350_RESET_EPHY, SYSC_REG_RESET_CTRL); -+ rt_sysc_w32(val, SYSC_REG_RESET_CTRL); -+ -+ rt305x_mii_write(esw, 0, 31, 0x8000); -+ for (i = 0; i < 5; i++) { -+ if (esw->ports[i].disable) { -+ rt305x_mii_write(esw, i, MII_BMCR, BMCR_PDOWN); -+ } else { -+ rt305x_mii_write(esw, i, MII_BMCR, -+ BMCR_FULLDPLX | -+ BMCR_ANENABLE | -+ BMCR_SPEED100); -+ } -+ /* TX10 waveform coefficient LSB=0 disable PHY */ -+ rt305x_mii_write(esw, i, 26, 0x1601); -+ /* TX100/TX10 AD/DA current bias */ -+ rt305x_mii_write(esw, i, 29, 0x7016); -+ /* TX100 slew rate control */ -+ rt305x_mii_write(esw, i, 30, 0x0038); -+ } -+ -+ /* select global register */ -+ rt305x_mii_write(esw, 0, 31, 0x0); -+ /* enlarge agcsel threshold 3 and threshold 2 */ -+ rt305x_mii_write(esw, 0, 1, 0x4a40); -+ /* enlarge agcsel threshold 5 and threshold 4 */ -+ rt305x_mii_write(esw, 0, 2, 0x6254); -+ /* enlarge agcsel threshold */ -+ rt305x_mii_write(esw, 0, 3, 0xa17f); -+ rt305x_mii_write(esw, 0,12, 0x7eaa); -+ /* longer TP_IDL tail length */ -+ rt305x_mii_write(esw, 0, 14, 0x65); -+ /* increased squelch pulse count threshold. */ -+ rt305x_mii_write(esw, 0, 16, 0x0684); -+ /* set TX10 signal amplitude threshold to minimum */ -+ rt305x_mii_write(esw, 0, 17, 0x0fe0); -+ /* set squelch amplitude to higher threshold */ -+ rt305x_mii_write(esw, 0, 18, 0x40ba); -+ /* tune TP_IDL tail and head waveform, enable power down slew rate control */ -+ rt305x_mii_write(esw, 0, 22, 0x253f); -+ /* set PLL/Receive bias current are calibrated */ -+ rt305x_mii_write(esw, 0, 27, 0x2fda); -+ /* change PLL/Receive bias current to internal(RT3350) */ -+ rt305x_mii_write(esw, 0, 28, 0xc410); -+ /* change PLL bias current to internal(RT3052_MP3) */ -+ rt305x_mii_write(esw, 0, 29, 0x598b); -+ /* select local register */ -+ rt305x_mii_write(esw, 0, 31, 0x8000); -+ } else if (soc_is_rt5350()) { -+ /* reset EPHY */ -+ u32 val = rt_sysc_r32(SYSC_REG_RESET_CTRL); -+ rt_sysc_w32(val | RT5350_RESET_EPHY, SYSC_REG_RESET_CTRL); -+ rt_sysc_w32(val, SYSC_REG_RESET_CTRL); -+ -+ /* set the led polarity */ -+ esw_w32(esw, esw->reg_led_polarity & 0x1F, RT5350_EWS_REG_LED_POLARITY); -+ -+ /* local registers */ -+ rt305x_mii_write(esw, 0, 31, 0x8000); -+ for (i = 0; i < 5; i++) { -+ if (esw->ports[i].disable) { -+ rt305x_mii_write(esw, i, MII_BMCR, BMCR_PDOWN); -+ } else { -+ rt305x_mii_write(esw, i, MII_BMCR, -+ BMCR_FULLDPLX | -+ BMCR_ANENABLE | -+ BMCR_SPEED100); -+ } -+ /* TX10 waveform coefficient LSB=0 disable PHY */ -+ rt305x_mii_write(esw, i, 26, 0x1601); -+ /* TX100/TX10 AD/DA current bias */ -+ rt305x_mii_write(esw, i, 29, 0x7015); -+ /* TX100 slew rate control */ -+ rt305x_mii_write(esw, i, 30, 0x0038); -+ } -+ -+ /* global registers */ -+ rt305x_mii_write(esw, 0, 31, 0x0); -+ /* enlarge agcsel threshold 3 and threshold 2 */ -+ rt305x_mii_write(esw, 0, 1, 0x4a40); -+ /* enlarge agcsel threshold 5 and threshold 4 */ -+ rt305x_mii_write(esw, 0, 2, 0x6254); -+ /* enlarge agcsel threshold 6 */ -+ rt305x_mii_write(esw, 0, 3, 0xa17f); -+ rt305x_mii_write(esw, 0, 12, 0x7eaa); -+ /* longer TP_IDL tail length */ -+ rt305x_mii_write(esw, 0, 14, 0x65); -+ /* increased squelch pulse count threshold. */ -+ rt305x_mii_write(esw, 0, 16, 0x0684); -+ /* set TX10 signal amplitude threshold to minimum */ -+ rt305x_mii_write(esw, 0, 17, 0x0fe0); -+ /* set squelch amplitude to higher threshold */ -+ rt305x_mii_write(esw, 0, 18, 0x40ba); -+ /* tune TP_IDL tail and head waveform, enable power down slew rate control */ -+ rt305x_mii_write(esw, 0, 22, 0x253f); -+ /* set PLL/Receive bias current are calibrated */ -+ rt305x_mii_write(esw, 0, 27, 0x2fda); -+ /* change PLL/Receive bias current to internal(RT3350) */ -+ rt305x_mii_write(esw, 0, 28, 0xc410); -+ /* change PLL bias current to internal(RT3052_MP3) */ -+ rt305x_mii_write(esw, 0, 29, 0x598b); -+ /* select local register */ -+ rt305x_mii_write(esw, 0, 31, 0x8000); -+ } else { -+ rt305x_mii_write(esw, 0, 31, 0x8000); -+ for (i = 0; i < 5; i++) { -+ if (esw->ports[i].disable) { -+ rt305x_mii_write(esw, i, MII_BMCR, BMCR_PDOWN); -+ } else { -+ rt305x_mii_write(esw, i, MII_BMCR, -+ BMCR_FULLDPLX | -+ BMCR_ANENABLE | -+ BMCR_SPEED100); -+ } -+ /* TX10 waveform coefficient */ -+ rt305x_mii_write(esw, i, 26, 0x1601); -+ /* TX100/TX10 AD/DA current bias */ -+ rt305x_mii_write(esw, i, 29, 0x7058); -+ /* TX100 slew rate control */ -+ rt305x_mii_write(esw, i, 30, 0x0018); -+ } -+ -+ /* PHY IOT */ -+ /* select global register */ -+ rt305x_mii_write(esw, 0, 31, 0x0); -+ /* tune TP_IDL tail and head waveform */ -+ rt305x_mii_write(esw, 0, 22, 0x052f); -+ /* set TX10 signal amplitude threshold to minimum */ -+ rt305x_mii_write(esw, 0, 17, 0x0fe0); -+ /* set squelch amplitude to higher threshold */ -+ rt305x_mii_write(esw, 0, 18, 0x40ba); -+ /* longer TP_IDL tail length */ -+ rt305x_mii_write(esw, 0, 14, 0x65); -+ /* select local register */ -+ rt305x_mii_write(esw, 0, 31, 0x8000); -+ } -+ -+ if (esw->port_map) -+ port_map = esw->port_map; -+ else -+ port_map = RT305X_ESW_PMAP_LLLLLL; -+ -+ /* -+ * Unused HW feature, but still nice to be consistent here... -+ * This is also exported to userspace ('lan' attribute) so it's -+ * conveniently usable to decide which ports go into the wan vlan by -+ * default. -+ */ -+ esw_rmw(esw, RT305X_ESW_REG_SGC2, -+ RT305X_ESW_SGC2_LAN_PMAP_M << RT305X_ESW_SGC2_LAN_PMAP_S, -+ port_map << RT305X_ESW_SGC2_LAN_PMAP_S); -+ -+ /* make the switch leds blink */ -+ for (i = 0; i < RT305X_ESW_NUM_LEDS; i++) -+ esw->ports[i].led = 0x05; -+ -+ /* Apply the empty config. */ -+ esw_apply_config(&esw->swdev); -+ -+ /* Only unmask the port change interrupt */ -+ esw_w32(esw, ~RT305X_ESW_PORT_ST_CHG, RT305X_ESW_REG_IMR); -+} -+ -+static irqreturn_t esw_interrupt(int irq, void *_esw) -+{ -+ struct rt305x_esw *esw = (struct rt305x_esw *) _esw; -+ u32 status; -+ -+ status = esw_r32(esw, RT305X_ESW_REG_ISR); -+ if (status & RT305X_ESW_PORT_ST_CHG) { -+ u32 link = esw_r32(esw, RT305X_ESW_REG_POA); -+ link >>= RT305X_ESW_POA_LINK_SHIFT; -+ link &= RT305X_ESW_POA_LINK_MASK; -+ dev_info(esw->dev, "link changed 0x%02X\n", link); -+ } -+ esw_w32(esw, status, RT305X_ESW_REG_ISR); -+ -+ return IRQ_HANDLED; -+} -+ -+static int esw_apply_config(struct switch_dev *dev) -+{ -+ struct rt305x_esw *esw = container_of(dev, struct rt305x_esw, swdev); -+ int i; -+ u8 disable = 0; -+ u8 doubletag = 0; -+ u8 en_vlan = 0; -+ u8 untag = 0; -+ -+ for (i = 0; i < RT305X_ESW_NUM_VLANS; i++) { -+ u32 vid, vmsc; -+ if (esw->global_vlan_enable) { -+ vid = esw->vlans[i].vid; -+ vmsc = esw->vlans[i].ports; -+ } else { -+ vid = RT305X_ESW_VLAN_NONE; -+ vmsc = RT305X_ESW_PORTS_NONE; -+ } -+ esw_set_vlan_id(esw, i, vid); -+ esw_set_vmsc(esw, i, vmsc); -+ } -+ -+ for (i = 0; i < RT305X_ESW_NUM_PORTS; i++) { -+ u32 pvid; -+ disable |= esw->ports[i].disable << i; -+ if (esw->global_vlan_enable) { -+ doubletag |= esw->ports[i].doubletag << i; -+ en_vlan |= 1 << i; -+ untag |= esw->ports[i].untag << i; -+ pvid = esw->ports[i].pvid; -+ } else { -+ int x = esw->alt_vlan_disable ? 0 : 1; -+ doubletag |= x << i; -+ en_vlan |= x << i; -+ untag |= x << i; -+ pvid = 0; -+ } -+ esw_set_pvid(esw, i, pvid); -+ if (i < RT305X_ESW_NUM_LEDS) -+ esw_w32(esw, esw->ports[i].led, -+ RT305X_ESW_REG_P0LED + 4*i); -+ } -+ -+ esw_set_gsc(esw); -+ esw_set_port_disable(esw, disable); -+ esw_rmw(esw, RT305X_ESW_REG_SGC2, -+ (RT305X_ESW_SGC2_DOUBLE_TAG_M << -+ RT305X_ESW_SGC2_DOUBLE_TAG_S), -+ doubletag << RT305X_ESW_SGC2_DOUBLE_TAG_S); -+ esw_rmw(esw, RT305X_ESW_REG_PFC1, -+ RT305X_ESW_PFC1_EN_VLAN_M << RT305X_ESW_PFC1_EN_VLAN_S, -+ en_vlan << RT305X_ESW_PFC1_EN_VLAN_S); -+ esw_rmw(esw, RT305X_ESW_REG_POC2, -+ RT305X_ESW_POC2_UNTAG_EN_M << RT305X_ESW_POC2_UNTAG_EN_S, -+ untag << RT305X_ESW_POC2_UNTAG_EN_S); -+ -+ if (!esw->global_vlan_enable) { -+ /* -+ * Still need to put all ports into vlan 0 or they'll be -+ * isolated. -+ * NOTE: vlan 0 is special, no vlan tag is prepended -+ */ -+ esw_set_vlan_id(esw, 0, 0); -+ esw_set_vmsc(esw, 0, RT305X_ESW_PORTS_ALL); -+ } -+ -+ return 0; -+} -+ -+static int esw_reset_switch(struct switch_dev *dev) -+{ -+ struct rt305x_esw *esw = container_of(dev, struct rt305x_esw, swdev); -+ -+ esw->global_vlan_enable = 0; -+ memset(esw->ports, 0, sizeof(esw->ports)); -+ memset(esw->vlans, 0, sizeof(esw->vlans)); -+ esw_hw_init(esw); -+ -+ return 0; -+} -+ -+static int esw_get_vlan_enable(struct switch_dev *dev, -+ const struct switch_attr *attr, -+ struct switch_val *val) -+{ -+ struct rt305x_esw *esw = container_of(dev, struct rt305x_esw, swdev); -+ -+ val->value.i = esw->global_vlan_enable; -+ -+ return 0; -+} -+ -+static int esw_set_vlan_enable(struct switch_dev *dev, -+ const struct switch_attr *attr, -+ struct switch_val *val) -+{ -+ struct rt305x_esw *esw = container_of(dev, struct rt305x_esw, swdev); -+ -+ esw->global_vlan_enable = val->value.i != 0; -+ -+ return 0; -+} -+ -+static int esw_get_alt_vlan_disable(struct switch_dev *dev, -+ const struct switch_attr *attr, -+ struct switch_val *val) -+{ -+ struct rt305x_esw *esw = container_of(dev, struct rt305x_esw, swdev); -+ -+ val->value.i = esw->alt_vlan_disable; -+ -+ return 0; -+} -+ -+static int esw_set_alt_vlan_disable(struct switch_dev *dev, -+ const struct switch_attr *attr, -+ struct switch_val *val) -+{ -+ struct rt305x_esw *esw = container_of(dev, struct rt305x_esw, swdev); -+ -+ esw->alt_vlan_disable = val->value.i != 0; -+ -+ return 0; -+} -+ -+static int -+rt305x_esw_set_bc_status(struct switch_dev *dev, -+ const struct switch_attr *attr, -+ struct switch_val *val) -+{ -+ struct rt305x_esw *esw = container_of(dev, struct rt305x_esw, swdev); -+ -+ esw->bc_storm_protect = val->value.i & RT305X_ESW_GSC_BC_STROM_MASK; -+ -+ return 0; -+} -+ -+static int -+rt305x_esw_get_bc_status(struct switch_dev *dev, -+ const struct switch_attr *attr, -+ struct switch_val *val) -+{ -+ struct rt305x_esw *esw = container_of(dev, struct rt305x_esw, swdev); -+ -+ val->value.i = esw->bc_storm_protect; -+ -+ return 0; -+} -+ -+static int -+rt305x_esw_set_led_freq(struct switch_dev *dev, -+ const struct switch_attr *attr, -+ struct switch_val *val) -+{ -+ struct rt305x_esw *esw = container_of(dev, struct rt305x_esw, swdev); -+ -+ esw->led_frequency = val->value.i & RT305X_ESW_GSC_LED_FREQ_MASK; -+ -+ return 0; -+} -+ -+static int -+rt305x_esw_get_led_freq(struct switch_dev *dev, -+ const struct switch_attr *attr, -+ struct switch_val *val) -+{ -+ struct rt305x_esw *esw = container_of(dev, struct rt305x_esw, swdev); -+ -+ val->value.i = esw->led_frequency; -+ -+ return 0; -+} -+ -+static int esw_get_port_link(struct switch_dev *dev, -+ int port, -+ struct switch_port_link *link) -+{ -+ struct rt305x_esw *esw = container_of(dev, struct rt305x_esw, swdev); -+ u32 speed, poa; -+ -+ if (port < 0 || port >= RT305X_ESW_NUM_PORTS) -+ return -EINVAL; -+ -+ poa = esw_r32(esw, RT305X_ESW_REG_POA) >> port; -+ -+ link->link = (poa >> RT305X_ESW_LINK_S) & 1; -+ link->duplex = (poa >> RT305X_ESW_DUPLEX_S) & 1; -+ if (port < RT305X_ESW_NUM_LEDS) { -+ speed = (poa >> RT305X_ESW_SPD_S) & 1; -+ } else { -+ if (port == RT305X_ESW_NUM_PORTS - 1) -+ poa >>= 1; -+ speed = (poa >> RT305X_ESW_SPD_S) & 3; -+ } -+ switch (speed) { -+ case 0: -+ link->speed = SWITCH_PORT_SPEED_10; -+ break; -+ case 1: -+ link->speed = SWITCH_PORT_SPEED_100; -+ break; -+ case 2: -+ case 3: /* forced gige speed can be 2 or 3 */ -+ link->speed = SWITCH_PORT_SPEED_1000; -+ break; -+ default: -+ link->speed = SWITCH_PORT_SPEED_UNKNOWN; -+ break; -+ } -+ -+ return 0; -+} -+ -+static int esw_get_port_bool(struct switch_dev *dev, -+ const struct switch_attr *attr, -+ struct switch_val *val) -+{ -+ struct rt305x_esw *esw = container_of(dev, struct rt305x_esw, swdev); -+ int idx = val->port_vlan; -+ u32 x, reg, shift; -+ -+ if (idx < 0 || idx >= RT305X_ESW_NUM_PORTS) -+ return -EINVAL; -+ -+ switch (attr->id) { -+ case RT305X_ESW_ATTR_PORT_DISABLE: -+ reg = RT305X_ESW_REG_POC0; -+ shift = RT305X_ESW_POC0_DIS_PORT_S; -+ break; -+ case RT305X_ESW_ATTR_PORT_DOUBLETAG: -+ reg = RT305X_ESW_REG_SGC2; -+ shift = RT305X_ESW_SGC2_DOUBLE_TAG_S; -+ break; -+ case RT305X_ESW_ATTR_PORT_UNTAG: -+ reg = RT305X_ESW_REG_POC2; -+ shift = RT305X_ESW_POC2_UNTAG_EN_S; -+ break; -+ case RT305X_ESW_ATTR_PORT_LAN: -+ reg = RT305X_ESW_REG_SGC2; -+ shift = RT305X_ESW_SGC2_LAN_PMAP_S; -+ if (idx >= RT305X_ESW_NUM_LANWAN) -+ return -EINVAL; -+ break; -+ default: -+ return -EINVAL; -+ } -+ -+ x = esw_r32(esw, reg); -+ val->value.i = (x >> (idx + shift)) & 1; -+ -+ return 0; -+} -+ -+static int esw_set_port_bool(struct switch_dev *dev, -+ const struct switch_attr *attr, -+ struct switch_val *val) -+{ -+ struct rt305x_esw *esw = container_of(dev, struct rt305x_esw, swdev); -+ int idx = val->port_vlan; -+ -+ if (idx < 0 || idx >= RT305X_ESW_NUM_PORTS || -+ val->value.i < 0 || val->value.i > 1) -+ return -EINVAL; -+ -+ switch (attr->id) { -+ case RT305X_ESW_ATTR_PORT_DISABLE: -+ esw->ports[idx].disable = val->value.i; -+ break; -+ case RT305X_ESW_ATTR_PORT_DOUBLETAG: -+ esw->ports[idx].doubletag = val->value.i; -+ break; -+ case RT305X_ESW_ATTR_PORT_UNTAG: -+ esw->ports[idx].untag = val->value.i; -+ break; -+ default: -+ return -EINVAL; -+ } -+ -+ return 0; -+} -+ -+static int esw_get_port_recv_badgood(struct switch_dev *dev, -+ const struct switch_attr *attr, -+ struct switch_val *val) -+{ -+ struct rt305x_esw *esw = container_of(dev, struct rt305x_esw, swdev); -+ int idx = val->port_vlan; -+ int shift = attr->id == RT305X_ESW_ATTR_PORT_RECV_GOOD ? 0 : 16; -+ u32 reg; -+ -+ if (idx < 0 || idx >= RT305X_ESW_NUM_LANWAN) -+ return -EINVAL; -+ reg = esw_r32(esw, RT305X_ESW_REG_PXPC(idx)); -+ val->value.i = (reg >> shift) & 0xffff; -+ -+ return 0; -+} -+ -+static int -+esw_get_port_tr_badgood(struct switch_dev *dev, -+ const struct switch_attr *attr, -+ struct switch_val *val) -+{ -+ struct rt305x_esw *esw = container_of(dev, struct rt305x_esw, swdev); -+ -+ int idx = val->port_vlan; -+ int shift = attr->id == RT5350_ESW_ATTR_PORT_TR_GOOD ? 0 : 16; -+ u32 reg; -+ -+ if (!soc_is_rt5350()) -+ return -EINVAL; -+ -+ if (idx < 0 || idx >= RT305X_ESW_NUM_LANWAN) -+ return -EINVAL; -+ -+ reg = esw_r32(esw, RT5350_ESW_REG_PXTPC(idx)); -+ val->value.i = (reg >> shift) & 0xffff; -+ -+ return 0; -+} -+ -+static int esw_get_port_led(struct switch_dev *dev, -+ const struct switch_attr *attr, -+ struct switch_val *val) -+{ -+ struct rt305x_esw *esw = container_of(dev, struct rt305x_esw, swdev); -+ int idx = val->port_vlan; -+ -+ if (idx < 0 || idx >= RT305X_ESW_NUM_PORTS || -+ idx >= RT305X_ESW_NUM_LEDS) -+ return -EINVAL; -+ -+ val->value.i = esw_r32(esw, RT305X_ESW_REG_P0LED + 4*idx); -+ -+ return 0; -+} -+ -+static int esw_set_port_led(struct switch_dev *dev, -+ const struct switch_attr *attr, -+ struct switch_val *val) -+{ -+ struct rt305x_esw *esw = container_of(dev, struct rt305x_esw, swdev); -+ int idx = val->port_vlan; -+ -+ if (idx < 0 || idx >= RT305X_ESW_NUM_LEDS) -+ return -EINVAL; -+ -+ esw->ports[idx].led = val->value.i; -+ -+ return 0; -+} -+ -+static int esw_get_port_pvid(struct switch_dev *dev, int port, int *val) -+{ -+ struct rt305x_esw *esw = container_of(dev, struct rt305x_esw, swdev); -+ -+ if (port >= RT305X_ESW_NUM_PORTS) -+ return -EINVAL; -+ -+ *val = esw_get_pvid(esw, port); -+ -+ return 0; -+} -+ -+static int esw_set_port_pvid(struct switch_dev *dev, int port, int val) -+{ -+ struct rt305x_esw *esw = container_of(dev, struct rt305x_esw, swdev); -+ -+ if (port >= RT305X_ESW_NUM_PORTS) -+ return -EINVAL; -+ -+ esw->ports[port].pvid = val; -+ -+ return 0; -+} -+ -+static int esw_get_vlan_ports(struct switch_dev *dev, struct switch_val *val) -+{ -+ struct rt305x_esw *esw = container_of(dev, struct rt305x_esw, swdev); -+ u32 vmsc, poc2; -+ int vlan_idx = -1; -+ int i; -+ -+ val->len = 0; -+ -+ if (val->port_vlan < 0 || val->port_vlan >= RT305X_ESW_NUM_VIDS) -+ return -EINVAL; -+ -+ /* valid vlan? */ -+ for (i = 0; i < RT305X_ESW_NUM_VLANS; i++) { -+ if (esw_get_vlan_id(esw, i) == val->port_vlan && -+ esw_get_vmsc(esw, i) != RT305X_ESW_PORTS_NONE) { -+ vlan_idx = i; -+ break; -+ } -+ } -+ -+ if (vlan_idx == -1) -+ return -EINVAL; -+ -+ vmsc = esw_get_vmsc(esw, vlan_idx); -+ poc2 = esw_r32(esw, RT305X_ESW_REG_POC2); -+ -+ for (i = 0; i < RT305X_ESW_NUM_PORTS; i++) { -+ struct switch_port *p; -+ int port_mask = 1 << i; -+ -+ if (!(vmsc & port_mask)) -+ continue; -+ -+ p = &val->value.ports[val->len++]; -+ p->id = i; -+ if (poc2 & (port_mask << RT305X_ESW_POC2_UNTAG_EN_S)) -+ p->flags = 0; -+ else -+ p->flags = 1 << SWITCH_PORT_FLAG_TAGGED; -+ } -+ -+ return 0; -+} -+ -+static int esw_set_vlan_ports(struct switch_dev *dev, struct switch_val *val) -+{ -+ struct rt305x_esw *esw = container_of(dev, struct rt305x_esw, swdev); -+ int ports; -+ int vlan_idx = -1; -+ int i; -+ -+ if (val->port_vlan < 0 || val->port_vlan >= RT305X_ESW_NUM_VIDS || -+ val->len > RT305X_ESW_NUM_PORTS) -+ return -EINVAL; -+ -+ /* one of the already defined vlans? */ -+ for (i = 0; i < RT305X_ESW_NUM_VLANS; i++) { -+ if (esw->vlans[i].vid == val->port_vlan && -+ esw->vlans[i].ports != RT305X_ESW_PORTS_NONE) { -+ vlan_idx = i; -+ break; -+ } -+ } -+ -+ /* select a free slot */ -+ for (i = 0; vlan_idx == -1 && i < RT305X_ESW_NUM_VLANS; i++) { -+ if (esw->vlans[i].ports == RT305X_ESW_PORTS_NONE) -+ vlan_idx = i; -+ } -+ -+ /* bail if all slots are in use */ -+ if (vlan_idx == -1) -+ return -EINVAL; -+ -+ ports = RT305X_ESW_PORTS_NONE; -+ for (i = 0; i < val->len; i++) { -+ struct switch_port *p = &val->value.ports[i]; -+ int port_mask = 1 << p->id; -+ bool untagged = !(p->flags & (1 << SWITCH_PORT_FLAG_TAGGED)); -+ -+ if (p->id >= RT305X_ESW_NUM_PORTS) -+ return -EINVAL; -+ -+ ports |= port_mask; -+ esw->ports[p->id].untag = untagged; -+ } -+ esw->vlans[vlan_idx].ports = ports; -+ if (ports == RT305X_ESW_PORTS_NONE) -+ esw->vlans[vlan_idx].vid = RT305X_ESW_VLAN_NONE; -+ else -+ esw->vlans[vlan_idx].vid = val->port_vlan; -+ -+ return 0; -+} -+ -+static const struct switch_attr esw_global[] = { -+ { -+ .type = SWITCH_TYPE_INT, -+ .name = "enable_vlan", -+ .description = "VLAN mode (1:enabled)", -+ .max = 1, -+ .id = RT305X_ESW_ATTR_ENABLE_VLAN, -+ .get = esw_get_vlan_enable, -+ .set = esw_set_vlan_enable, -+ }, -+ { -+ .type = SWITCH_TYPE_INT, -+ .name = "alternate_vlan_disable", -+ .description = "Use en_vlan instead of doubletag to disable" -+ " VLAN mode", -+ .max = 1, -+ .id = RT305X_ESW_ATTR_ALT_VLAN_DISABLE, -+ .get = esw_get_alt_vlan_disable, -+ .set = esw_set_alt_vlan_disable, -+ }, -+ { -+ .type = SWITCH_TYPE_INT, -+ .name = "bc_storm_protect", -+ .description = "Global broadcast storm protection (0:Disable, 1:64 blocks, 2:96 blocks, 3:128 blocks)", -+ .max = 3, -+ .id = RT305X_ESW_ATTR_BC_STATUS, -+ .get = rt305x_esw_get_bc_status, -+ .set = rt305x_esw_set_bc_status, -+ }, -+ { -+ .type = SWITCH_TYPE_INT, -+ .name = "led_frequency", -+ .description = "LED Flash frequency (0:30mS, 1:60mS, 2:240mS, 3:480mS)", -+ .max = 3, -+ .id = RT305X_ESW_ATTR_LED_FREQ, -+ .get = rt305x_esw_get_led_freq, -+ .set = rt305x_esw_set_led_freq, -+ } -+}; -+ -+static const struct switch_attr esw_port[] = { -+ { -+ .type = SWITCH_TYPE_INT, -+ .name = "disable", -+ .description = "Port state (1:disabled)", -+ .max = 1, -+ .id = RT305X_ESW_ATTR_PORT_DISABLE, -+ .get = esw_get_port_bool, -+ .set = esw_set_port_bool, -+ }, -+ { -+ .type = SWITCH_TYPE_INT, -+ .name = "doubletag", -+ .description = "Double tagging for incoming vlan packets " -+ "(1:enabled)", -+ .max = 1, -+ .id = RT305X_ESW_ATTR_PORT_DOUBLETAG, -+ .get = esw_get_port_bool, -+ .set = esw_set_port_bool, -+ }, -+ { -+ .type = SWITCH_TYPE_INT, -+ .name = "untag", -+ .description = "Untag (1:strip outgoing vlan tag)", -+ .max = 1, -+ .id = RT305X_ESW_ATTR_PORT_UNTAG, -+ .get = esw_get_port_bool, -+ .set = esw_set_port_bool, -+ }, -+ { -+ .type = SWITCH_TYPE_INT, -+ .name = "led", -+ .description = "LED mode (0:link, 1:100m, 2:duplex, 3:activity," -+ " 4:collision, 5:linkact, 6:duplcoll, 7:10mact," -+ " 8:100mact, 10:blink, 11:off, 12:on)", -+ .max = 15, -+ .id = RT305X_ESW_ATTR_PORT_LED, -+ .get = esw_get_port_led, -+ .set = esw_set_port_led, -+ }, -+ { -+ .type = SWITCH_TYPE_INT, -+ .name = "lan", -+ .description = "HW port group (0:wan, 1:lan)", -+ .max = 1, -+ .id = RT305X_ESW_ATTR_PORT_LAN, -+ .get = esw_get_port_bool, -+ }, -+ { -+ .type = SWITCH_TYPE_INT, -+ .name = "recv_bad", -+ .description = "Receive bad packet counter", -+ .id = RT305X_ESW_ATTR_PORT_RECV_BAD, -+ .get = esw_get_port_recv_badgood, -+ }, -+ { -+ .type = SWITCH_TYPE_INT, -+ .name = "recv_good", -+ .description = "Receive good packet counter", -+ .id = RT305X_ESW_ATTR_PORT_RECV_GOOD, -+ .get = esw_get_port_recv_badgood, -+ }, -+ { -+ .type = SWITCH_TYPE_INT, -+ .name = "tr_bad", -+ -+ .description = "Transmit bad packet counter. rt5350 only", -+ .id = RT5350_ESW_ATTR_PORT_TR_BAD, -+ .get = esw_get_port_tr_badgood, -+ }, -+ { -+ .type = SWITCH_TYPE_INT, -+ .name = "tr_good", -+ -+ .description = "Transmit good packet counter. rt5350 only", -+ .id = RT5350_ESW_ATTR_PORT_TR_GOOD, -+ .get = esw_get_port_tr_badgood, -+ }, -+}; -+ -+static const struct switch_attr esw_vlan[] = { -+}; -+ -+static const struct switch_dev_ops esw_ops = { -+ .attr_global = { -+ .attr = esw_global, -+ .n_attr = ARRAY_SIZE(esw_global), -+ }, -+ .attr_port = { -+ .attr = esw_port, -+ .n_attr = ARRAY_SIZE(esw_port), -+ }, -+ .attr_vlan = { -+ .attr = esw_vlan, -+ .n_attr = ARRAY_SIZE(esw_vlan), -+ }, -+ .get_vlan_ports = esw_get_vlan_ports, -+ .set_vlan_ports = esw_set_vlan_ports, -+ .get_port_pvid = esw_get_port_pvid, -+ .set_port_pvid = esw_set_port_pvid, -+ .get_port_link = esw_get_port_link, -+ .apply_config = esw_apply_config, -+ .reset_switch = esw_reset_switch, -+}; -+ -+static struct rt305x_esw_platform_data rt3050_esw_data = { -+ /* All ports are LAN ports. */ -+ .vlan_config = RT305X_ESW_VLAN_CONFIG_NONE, -+ .reg_initval_fct2 = 0x00d6500c, -+ /* -+ * ext phy base addr 31, enable port 5 polling, rx/tx clock skew 1, -+ * turbo mii off, rgmi 3.3v off -+ * port5: disabled -+ * port6: enabled, gige, full-duplex, rx/tx-flow-control -+ */ -+ .reg_initval_fpa2 = 0x3f502b28, -+}; -+ -+static const struct of_device_id ralink_esw_match[] = { -+ { .compatible = "ralink,rt3050-esw", .data = &rt3050_esw_data }, -+ {}, -+}; -+MODULE_DEVICE_TABLE(of, ralink_esw_match); -+ -+static int esw_probe(struct platform_device *pdev) -+{ -+ struct device_node *np = pdev->dev.of_node; -+ const struct rt305x_esw_platform_data *pdata; -+ const __be32 *port_map, *reg_init; -+ struct rt305x_esw *esw; -+ struct switch_dev *swdev; -+ struct resource *res, *irq; -+ int err; -+ -+ pdata = pdev->dev.platform_data; -+ if (!pdata) { -+ const struct of_device_id *match; -+ match = of_match_device(ralink_esw_match, &pdev->dev); -+ if (match) -+ pdata = (struct rt305x_esw_platform_data *) match->data; -+ } -+ if (!pdata) -+ return -EINVAL; -+ -+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); -+ if (!res) { -+ dev_err(&pdev->dev, "no memory resource found\n"); -+ return -ENOMEM; -+ } -+ -+ irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); -+ if (!irq) { -+ dev_err(&pdev->dev, "no irq resource found\n"); -+ return -ENOMEM; -+ } -+ -+ esw = kzalloc(sizeof(struct rt305x_esw), GFP_KERNEL); -+ if (!esw) { -+ dev_err(&pdev->dev, "no memory for private data\n"); -+ return -ENOMEM; -+ } -+ -+ esw->dev = &pdev->dev; -+ esw->irq = irq->start; -+ esw->base = ioremap(res->start, resource_size(res)); -+ if (!esw->base) { -+ dev_err(&pdev->dev, "ioremap failed\n"); -+ err = -ENOMEM; -+ goto free_esw; -+ } -+ -+ port_map = of_get_property(np, "ralink,portmap", NULL); -+ if (port_map) -+ esw->port_map = be32_to_cpu(*port_map); -+ -+ reg_init = of_get_property(np, "ralink,fct2", NULL); -+ if (reg_init) -+ esw->reg_initval_fct2 = be32_to_cpu(*reg_init); -+ -+ reg_init = of_get_property(np, "ralink,fpa2", NULL); -+ if (reg_init) -+ esw->reg_initval_fpa2 = be32_to_cpu(*reg_init); -+ -+ reg_init = of_get_property(np, "ralink,led_polarity", NULL); -+ if (reg_init) -+ esw->reg_led_polarity = be32_to_cpu(*reg_init); -+ -+ swdev = &esw->swdev; -+ swdev->of_node = pdev->dev.of_node; -+ swdev->name = "rt305x-esw"; -+ swdev->alias = "rt305x"; -+ swdev->cpu_port = RT305X_ESW_PORT6; -+ swdev->ports = RT305X_ESW_NUM_PORTS; -+ swdev->vlans = RT305X_ESW_NUM_VIDS; -+ swdev->ops = &esw_ops; -+ -+ err = register_switch(swdev, NULL); -+ if (err < 0) { -+ dev_err(&pdev->dev, "register_switch failed\n"); -+ goto unmap_base; -+ } -+ -+ platform_set_drvdata(pdev, esw); -+ -+ esw->pdata = pdata; -+ spin_lock_init(&esw->reg_rw_lock); -+ -+ esw_hw_init(esw); -+ -+ esw_w32(esw, RT305X_ESW_PORT_ST_CHG, RT305X_ESW_REG_ISR); -+ esw_w32(esw, ~RT305X_ESW_PORT_ST_CHG, RT305X_ESW_REG_IMR); -+ request_irq(esw->irq, esw_interrupt, 0, "esw", esw); -+ -+ return 0; -+ -+unmap_base: -+ iounmap(esw->base); -+free_esw: -+ kfree(esw); -+ return err; -+} -+ -+static int esw_remove(struct platform_device *pdev) -+{ -+ struct rt305x_esw *esw; -+ -+ esw = platform_get_drvdata(pdev); -+ if (esw) { -+ unregister_switch(&esw->swdev); -+ platform_set_drvdata(pdev, NULL); -+ iounmap(esw->base); -+ kfree(esw); -+ } -+ -+ return 0; -+} -+ -+static struct platform_driver esw_driver = { -+ .probe = esw_probe, -+ .remove = esw_remove, -+ .driver = { -+ .name = "rt305x-esw", -+ .owner = THIS_MODULE, -+ .of_match_table = ralink_esw_match, -+ }, -+}; -+ -+int __init rtesw_init(void) -+{ -+ return platform_driver_register(&esw_driver); -+} -+ -+void rtesw_exit(void) -+{ -+ platform_driver_unregister(&esw_driver); -+} ---- /dev/null -+++ b/drivers/net/ethernet/ralink/esw_rt3052.h -@@ -0,0 +1,32 @@ -+/* -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; version 2 of the License -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. -+ * -+ * Copyright (C) 2009-2013 John Crispin -+ */ -+ -+#ifndef _RALINK_ESW_RT3052_H__ -+#define _RALINK_ESW_RT3052_H__ -+ -+#ifdef CONFIG_NET_RALINK_ESW_RT3052 -+ -+int __init rtesw_init(void); -+void rtesw_exit(void); -+ -+#else -+ -+static inline int __init rtesw_init(void) { return 0; } -+static inline void rtesw_exit(void) { } -+ -+#endif -+#endif ---- /dev/null -+++ b/drivers/net/ethernet/ralink/gsw_mt7620a.c -@@ -0,0 +1,566 @@ -+/* -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; version 2 of the License -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. -+ * -+ * Copyright (C) 2009-2013 John Crispin -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+ -+#include "ralink_soc_eth.h" -+ -+#include -+#include -+#include -+ -+#include -+#include -+ -+#include "ralink_soc_eth.h" -+#include "gsw_mt7620a.h" -+#include "mt7530.h" -+#include "mdio.h" -+ -+#define GSW_REG_PHY_TIMEOUT (5 * HZ) -+ -+#define MT7620A_GSW_REG_PIAC 0x7004 -+ -+#define GSW_NUM_VLANS 16 -+#define GSW_NUM_VIDS 4096 -+#define GSW_NUM_PORTS 7 -+#define GSW_PORT6 6 -+ -+#define GSW_MDIO_ACCESS BIT(31) -+#define GSW_MDIO_READ BIT(19) -+#define GSW_MDIO_WRITE BIT(18) -+#define GSW_MDIO_START BIT(16) -+#define GSW_MDIO_ADDR_SHIFT 20 -+#define GSW_MDIO_REG_SHIFT 25 -+ -+#define GSW_REG_PORT_PMCR(x) (0x3000 + (x * 0x100)) -+#define GSW_REG_PORT_STATUS(x) (0x3008 + (x * 0x100)) -+#define GSW_REG_SMACCR0 0x3fE4 -+#define GSW_REG_SMACCR1 0x3fE8 -+#define GSW_REG_CKGCR 0x3ff0 -+ -+#define GSW_REG_IMR 0x7008 -+#define GSW_REG_ISR 0x700c -+ -+#define SYSC_REG_CFG1 0x14 -+ -+#define PORT_IRQ_ST_CHG 0x7f -+ -+#define SYSCFG1 0x14 -+ -+#define ESW_PHY_POLLING 0x7000 -+ -+#define PMCR_IPG BIT(18) -+#define PMCR_MAC_MODE BIT(16) -+#define PMCR_FORCE BIT(15) -+#define PMCR_TX_EN BIT(14) -+#define PMCR_RX_EN BIT(13) -+#define PMCR_BACKOFF BIT(9) -+#define PMCR_BACKPRES BIT(8) -+#define PMCR_RX_FC BIT(5) -+#define PMCR_TX_FC BIT(4) -+#define PMCR_SPEED(_x) (_x << 2) -+#define PMCR_DUPLEX BIT(1) -+#define PMCR_LINK BIT(0) -+ -+#define PHY_AN_EN BIT(31) -+#define PHY_PRE_EN BIT(30) -+#define PMY_MDC_CONF(_x) ((_x & 0x3f) << 24) -+ -+enum { -+ /* Global attributes. */ -+ GSW_ATTR_ENABLE_VLAN, -+ /* Port attributes. */ -+ GSW_ATTR_PORT_UNTAG, -+}; -+ -+enum { -+ PORT4_EPHY = 0, -+ PORT4_EXT, -+}; -+ -+struct mt7620_gsw { -+ struct device *dev; -+ void __iomem *base; -+ int irq; -+ int port4; -+ long unsigned int autopoll; -+}; -+ -+static inline void gsw_w32(struct mt7620_gsw *gsw, u32 val, unsigned reg) -+{ -+ iowrite32(val, gsw->base + reg); -+} -+ -+static inline u32 gsw_r32(struct mt7620_gsw *gsw, unsigned reg) -+{ -+ return ioread32(gsw->base + reg); -+} -+ -+static int mt7620_mii_busy_wait(struct mt7620_gsw *gsw) -+{ -+ unsigned long t_start = jiffies; -+ -+ while (1) { -+ if (!(gsw_r32(gsw, MT7620A_GSW_REG_PIAC) & GSW_MDIO_ACCESS)) -+ return 0; -+ if (time_after(jiffies, t_start + GSW_REG_PHY_TIMEOUT)) { -+ break; -+ } -+ } -+ -+ printk(KERN_ERR "mdio: MDIO timeout\n"); -+ return -1; -+} -+ -+static u32 _mt7620_mii_write(struct mt7620_gsw *gsw, u32 phy_addr, u32 phy_register, -+ u32 write_data) -+{ -+ if (mt7620_mii_busy_wait(gsw)) -+ return -1; -+ -+ write_data &= 0xffff; -+ -+ gsw_w32(gsw, GSW_MDIO_ACCESS | GSW_MDIO_START | GSW_MDIO_WRITE | -+ (phy_register << GSW_MDIO_REG_SHIFT) | -+ (phy_addr << GSW_MDIO_ADDR_SHIFT) | write_data, -+ MT7620A_GSW_REG_PIAC); -+ -+ if (mt7620_mii_busy_wait(gsw)) -+ return -1; -+ -+ return 0; -+} -+ -+static u32 _mt7620_mii_read(struct mt7620_gsw *gsw, int phy_addr, int phy_reg) -+{ -+ u32 d; -+ -+ if (mt7620_mii_busy_wait(gsw)) -+ return 0xffff; -+ -+ gsw_w32(gsw, GSW_MDIO_ACCESS | GSW_MDIO_START | GSW_MDIO_READ | -+ (phy_reg << GSW_MDIO_REG_SHIFT) | -+ (phy_addr << GSW_MDIO_ADDR_SHIFT), -+ MT7620A_GSW_REG_PIAC); -+ -+ if (mt7620_mii_busy_wait(gsw)) -+ return 0xffff; -+ -+ d = gsw_r32(gsw, MT7620A_GSW_REG_PIAC) & 0xffff; -+ -+ return d; -+} -+ -+int mt7620_mdio_write(struct mii_bus *bus, int phy_addr, int phy_reg, u16 val) -+{ -+ struct fe_priv *priv = bus->priv; -+ struct mt7620_gsw *gsw = (struct mt7620_gsw *) priv->soc->swpriv; -+ -+ return _mt7620_mii_write(gsw, phy_addr, phy_reg, val); -+} -+ -+int mt7620_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg) -+{ -+ struct fe_priv *priv = bus->priv; -+ struct mt7620_gsw *gsw = (struct mt7620_gsw *) priv->soc->swpriv; -+ -+ return _mt7620_mii_read(gsw, phy_addr, phy_reg); -+} -+ -+static unsigned char *fe_speed_str(int speed) -+{ -+ switch (speed) { -+ case 2: -+ case SPEED_1000: -+ return "1000"; -+ case 1: -+ case SPEED_100: -+ return "100"; -+ case 0: -+ case SPEED_10: -+ return "10"; -+ } -+ -+ return "? "; -+} -+ -+int mt7620a_has_carrier(struct fe_priv *priv) -+{ -+ struct mt7620_gsw *gsw = (struct mt7620_gsw *) priv->soc->swpriv; -+ int i; -+ -+ for (i = 0; i < GSW_PORT6; i++) -+ if (gsw_r32(gsw, GSW_REG_PORT_STATUS(i)) & 0x1) -+ return 1; -+ return 0; -+} -+ -+static void mt7620a_handle_carrier(struct fe_priv *priv) -+{ -+ if (!priv->phy) -+ return; -+ -+ if (mt7620a_has_carrier(priv)) -+ netif_carrier_on(priv->netdev); -+ else -+ netif_carrier_off(priv->netdev); -+} -+ -+void mt7620_mdio_link_adjust(struct fe_priv *priv, int port) -+{ -+ if (priv->link[port]) -+ netdev_info(priv->netdev, "port %d link up (%sMbps/%s duplex)\n", -+ port, fe_speed_str(priv->phy->speed[port]), -+ (DUPLEX_FULL == priv->phy->duplex[port]) ? "Full" : "Half"); -+ else -+ netdev_info(priv->netdev, "port %d link down\n", port); -+ mt7620a_handle_carrier(priv); -+} -+ -+static irqreturn_t gsw_interrupt(int irq, void *_priv) -+{ -+ struct fe_priv *priv = (struct fe_priv *) _priv; -+ struct mt7620_gsw *gsw = (struct mt7620_gsw *) priv->soc->swpriv; -+ u32 status; -+ int i, max = (gsw->port4 == PORT4_EPHY) ? (4) : (3); -+ -+ status = gsw_r32(gsw, GSW_REG_ISR); -+ if (status & PORT_IRQ_ST_CHG) -+ for (i = 0; i <= max; i++) { -+ u32 status = gsw_r32(gsw, GSW_REG_PORT_STATUS(i)); -+ int link = status & 0x1; -+ -+ if (link != priv->link[i]) { -+ if (link) -+ netdev_info(priv->netdev, "port %d link up (%sMbps/%s duplex)\n", -+ i, fe_speed_str((status >> 2) & 3), -+ (status & 0x2) ? "Full" : "Half"); -+ else -+ netdev_info(priv->netdev, "port %d link down\n", i); -+ } -+ -+ priv->link[i] = link; -+ } -+ mt7620a_handle_carrier(priv); -+ -+ gsw_w32(gsw, status, GSW_REG_ISR); -+ -+ return IRQ_HANDLED; -+} -+ -+static int mt7620_is_bga(void) -+{ -+ u32 bga = rt_sysc_r32(0x0c); -+ -+ return (bga >> 16) & 1; -+} -+ -+static void gsw_auto_poll(struct mt7620_gsw *gsw) -+{ -+ int phy; -+ int lsb = -1, msb = 0; -+ -+ for_each_set_bit(phy, &gsw->autopoll, 32) { -+ if (lsb < 0) -+ lsb = phy; -+ msb = phy; -+ } -+ -+ gsw_w32(gsw, PHY_AN_EN | PHY_PRE_EN | PMY_MDC_CONF(5) | (msb << 8) | lsb, ESW_PHY_POLLING); -+} -+ -+void mt7620_port_init(struct fe_priv *priv, struct device_node *np) -+{ -+ struct mt7620_gsw *gsw = (struct mt7620_gsw *) priv->soc->swpriv; -+ const __be32 *_id = of_get_property(np, "reg", NULL); -+ int phy_mode, size, id; -+ int shift = 12; -+ u32 val, mask = 0; -+ int min = (gsw->port4 == PORT4_EPHY) ? (5) : (4); -+ -+ if (!_id || (be32_to_cpu(*_id) < min) || (be32_to_cpu(*_id) > 5)) { -+ if (_id) -+ pr_err("%s: invalid port id %d\n", np->name, be32_to_cpu(*_id)); -+ else -+ pr_err("%s: invalid port id\n", np->name); -+ return; -+ } -+ -+ id = be32_to_cpu(*_id); -+ -+ if (id == 4) -+ shift = 14; -+ -+ priv->phy->phy_fixed[id] = of_get_property(np, "ralink,fixed-link", &size); -+ if (priv->phy->phy_fixed[id] && (size != (4 * sizeof(*priv->phy->phy_fixed[id])))) { -+ pr_err("%s: invalid fixed link property\n", np->name); -+ priv->phy->phy_fixed[id] = NULL; -+ return; -+ } -+ -+ phy_mode = of_get_phy_mode(np); -+ switch (phy_mode) { -+ case PHY_INTERFACE_MODE_RGMII: -+ mask = 0; -+ break; -+ case PHY_INTERFACE_MODE_MII: -+ mask = 1; -+ break; -+ case PHY_INTERFACE_MODE_RMII: -+ mask = 2; -+ break; -+ default: -+ dev_err(priv->device, "port %d - invalid phy mode\n", id); -+ return; -+ } -+ -+ priv->phy->phy_node[id] = of_parse_phandle(np, "phy-handle", 0); -+ if (!priv->phy->phy_node[id] && !priv->phy->phy_fixed[id]) -+ return; -+ -+ val = rt_sysc_r32(SYSCFG1); -+ val &= ~(3 << shift); -+ val |= mask << shift; -+ rt_sysc_w32(val, SYSCFG1); -+ -+ if (priv->phy->phy_fixed[id]) { -+ const __be32 *link = priv->phy->phy_fixed[id]; -+ int tx_fc, rx_fc; -+ u32 val = 0; -+ -+ priv->phy->speed[id] = be32_to_cpup(link++); -+ tx_fc = be32_to_cpup(link++); -+ rx_fc = be32_to_cpup(link++); -+ priv->phy->duplex[id] = be32_to_cpup(link++); -+ priv->link[id] = 1; -+ -+ switch (priv->phy->speed[id]) { -+ case SPEED_10: -+ val = 0; -+ break; -+ case SPEED_100: -+ val = 1; -+ break; -+ case SPEED_1000: -+ val = 2; -+ break; -+ default: -+ dev_err(priv->device, "invalid link speed: %d\n", priv->phy->speed[id]); -+ priv->phy->phy_fixed[id] = 0; -+ return; -+ } -+ val = PMCR_SPEED(val); -+ val |= PMCR_LINK | PMCR_BACKPRES | PMCR_BACKOFF | PMCR_RX_EN | -+ PMCR_TX_EN | PMCR_FORCE | PMCR_MAC_MODE | PMCR_IPG; -+ if (tx_fc) -+ val |= PMCR_TX_FC; -+ if (rx_fc) -+ val |= PMCR_RX_FC; -+ if (priv->phy->duplex[id]) -+ val |= PMCR_DUPLEX; -+ gsw_w32(gsw, val, GSW_REG_PORT_PMCR(id)); -+ dev_info(priv->device, "using fixed link parameters\n"); -+ return; -+ } -+ -+ if (priv->phy->phy_node[id] && priv->mii_bus->phy_map[id]) { -+ u32 val = PMCR_BACKPRES | PMCR_BACKOFF | PMCR_RX_EN | -+ PMCR_TX_EN | PMCR_MAC_MODE | PMCR_IPG; -+ -+ gsw_w32(gsw, val, GSW_REG_PORT_PMCR(id)); -+ fe_connect_phy_node(priv, priv->phy->phy_node[id]); -+ gsw->autopoll |= BIT(id); -+ gsw_auto_poll(gsw); -+ return; -+ } -+} -+ -+static void gsw_hw_init(struct mt7620_gsw *gsw) -+{ -+ u32 is_BGA = mt7620_is_bga(); -+ -+ rt_sysc_w32(rt_sysc_r32(SYSC_REG_CFG1) | BIT(8), SYSC_REG_CFG1); -+ gsw_w32(gsw, gsw_r32(gsw, GSW_REG_CKGCR) & ~(0x3 << 4), GSW_REG_CKGCR); -+ -+ /*correct PHY setting L3.0 BGA*/ -+ _mt7620_mii_write(gsw, 1, 31, 0x4000); //global, page 4 -+ -+ _mt7620_mii_write(gsw, 1, 17, 0x7444); -+ if (is_BGA) -+ _mt7620_mii_write(gsw, 1, 19, 0x0114); -+ else -+ _mt7620_mii_write(gsw, 1, 19, 0x0117); -+ -+ _mt7620_mii_write(gsw, 1, 22, 0x10cf); -+ _mt7620_mii_write(gsw, 1, 25, 0x6212); -+ _mt7620_mii_write(gsw, 1, 26, 0x0777); -+ _mt7620_mii_write(gsw, 1, 29, 0x4000); -+ _mt7620_mii_write(gsw, 1, 28, 0xc077); -+ _mt7620_mii_write(gsw, 1, 24, 0x0000); -+ -+ _mt7620_mii_write(gsw, 1, 31, 0x3000); //global, page 3 -+ _mt7620_mii_write(gsw, 1, 17, 0x4838); -+ -+ _mt7620_mii_write(gsw, 1, 31, 0x2000); //global, page 2 -+ if (is_BGA) { -+ _mt7620_mii_write(gsw, 1, 21, 0x0515); -+ _mt7620_mii_write(gsw, 1, 22, 0x0053); -+ _mt7620_mii_write(gsw, 1, 23, 0x00bf); -+ _mt7620_mii_write(gsw, 1, 24, 0x0aaf); -+ _mt7620_mii_write(gsw, 1, 25, 0x0fad); -+ _mt7620_mii_write(gsw, 1, 26, 0x0fc1); -+ } else { -+ _mt7620_mii_write(gsw, 1, 21, 0x0517); -+ _mt7620_mii_write(gsw, 1, 22, 0x0fd2); -+ _mt7620_mii_write(gsw, 1, 23, 0x00bf); -+ _mt7620_mii_write(gsw, 1, 24, 0x0aab); -+ _mt7620_mii_write(gsw, 1, 25, 0x00ae); -+ _mt7620_mii_write(gsw, 1, 26, 0x0fff); -+ } -+ _mt7620_mii_write(gsw, 1, 31, 0x1000); //global, page 1 -+ _mt7620_mii_write(gsw, 1, 17, 0xe7f8); -+ -+ _mt7620_mii_write(gsw, 1, 31, 0x8000); //local, page 0 -+ _mt7620_mii_write(gsw, 0, 30, 0xa000); -+ _mt7620_mii_write(gsw, 1, 30, 0xa000); -+ _mt7620_mii_write(gsw, 2, 30, 0xa000); -+ _mt7620_mii_write(gsw, 3, 30, 0xa000); -+ -+ _mt7620_mii_write(gsw, 0, 4, 0x05e1); -+ _mt7620_mii_write(gsw, 1, 4, 0x05e1); -+ _mt7620_mii_write(gsw, 2, 4, 0x05e1); -+ _mt7620_mii_write(gsw, 3, 4, 0x05e1); -+ _mt7620_mii_write(gsw, 1, 31, 0xa000); //local, page 2 -+ _mt7620_mii_write(gsw, 0, 16, 0x1111); -+ _mt7620_mii_write(gsw, 1, 16, 0x1010); -+ _mt7620_mii_write(gsw, 2, 16, 0x1515); -+ _mt7620_mii_write(gsw, 3, 16, 0x0f0f); -+ -+ /* CPU Port6 Force Link 1G, FC ON */ -+ gsw_w32(gsw, 0x5e33b, GSW_REG_PORT_PMCR(6)); -+ /* Set Port6 CPU Port */ -+ gsw_w32(gsw, 0x7f7f7fe0, 0x0010); -+ -+ /* setup port 4 */ -+ if (gsw->port4 == PORT4_EPHY) { -+ u32 val = rt_sysc_r32(SYSCFG1); -+ val |= 3 << 14; -+ rt_sysc_w32(val, SYSCFG1); -+ _mt7620_mii_write(gsw, 4, 30, 0xa000); -+ _mt7620_mii_write(gsw, 4, 4, 0x05e1); -+ _mt7620_mii_write(gsw, 4, 16, 0x1313); -+ pr_info("gsw: setting port4 to ephy mode\n"); -+ } -+} -+ -+void mt7620_set_mac(struct fe_priv *priv, unsigned char *mac) -+{ -+ struct mt7620_gsw *gsw = (struct mt7620_gsw *) priv->soc->swpriv; -+ unsigned long flags; -+ -+ spin_lock_irqsave(&priv->page_lock, flags); -+ gsw_w32(gsw, (mac[0] << 8) | mac[1], GSW_REG_SMACCR1); -+ gsw_w32(gsw, (mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5], -+ GSW_REG_SMACCR0); -+ spin_unlock_irqrestore(&priv->page_lock, flags); -+} -+ -+static struct of_device_id gsw_match[] = { -+ { .compatible = "ralink,mt7620a-gsw" }, -+ {} -+}; -+ -+int mt7620_gsw_config(struct fe_priv *priv) -+{ -+ struct mt7620_gsw *gsw = (struct mt7620_gsw *) priv->soc->swpriv; -+ -+ /* is the mt7530 internal or external */ -+ if ((_mt7620_mii_read(gsw, 0x1f, 2) == 1) && (_mt7620_mii_read(gsw, 0x1f, 3) == 0xbeef)) -+ mt7530_probe(priv->device, NULL, priv->mii_bus); -+ else -+ mt7530_probe(priv->device, gsw->base, NULL); -+ -+ return 0; -+} -+ -+int mt7620_gsw_probe(struct fe_priv *priv) -+{ -+ struct mt7620_gsw *gsw; -+ struct device_node *np; -+ const char *port4 = NULL; -+ -+ np = of_find_matching_node(NULL, gsw_match); -+ if (!np) { -+ dev_err(priv->device, "no gsw node found\n"); -+ return -EINVAL; -+ } -+ np = of_node_get(np); -+ -+ gsw = devm_kzalloc(priv->device, sizeof(struct mt7620_gsw), GFP_KERNEL); -+ if (!gsw) { -+ dev_err(priv->device, "no gsw memory for private data\n"); -+ return -ENOMEM; -+ } -+ -+ gsw->irq = irq_of_parse_and_map(np, 0); -+ if (!gsw->irq) { -+ dev_err(priv->device, "no gsw irq resource found\n"); -+ return -ENOMEM; -+ } -+ -+ gsw->base = of_iomap(np, 0); -+ if (!gsw->base) { -+ dev_err(priv->device, "gsw ioremap failed\n"); -+ return -ENOMEM; -+ } -+ -+ gsw->dev = priv->device; -+ priv->soc->swpriv = gsw; -+ -+ of_property_read_string(np, "ralink,port4", &port4); -+ if (port4 && !strcmp(port4, "ephy")) -+ gsw->port4 = PORT4_EPHY; -+ else if (port4 && !strcmp(port4, "gmac")) -+ gsw->port4 = PORT4_EXT; -+ else -+ WARN_ON(port4); -+ -+ gsw_hw_init(gsw); -+ -+ gsw_w32(gsw, ~PORT_IRQ_ST_CHG, GSW_REG_IMR); -+ request_irq(gsw->irq, gsw_interrupt, 0, "gsw", priv); -+ -+ return 0; -+} ---- /dev/null -+++ b/drivers/net/ethernet/ralink/gsw_mt7620a.h -@@ -0,0 +1,30 @@ -+/* -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; version 2 of the License -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. -+ * -+ * Copyright (C) 2009-2013 John Crispin -+ */ -+ -+#ifndef _RALINK_GSW_MT7620_H__ -+#define _RALINK_GSW_MT7620_H__ -+ -+extern int mt7620_gsw_config(struct fe_priv *priv); -+extern int mt7620_gsw_probe(struct fe_priv *priv); -+extern void mt7620_set_mac(struct fe_priv *priv, unsigned char *mac); -+extern int mt7620_mdio_write(struct mii_bus *bus, int phy_addr, int phy_reg, u16 val); -+extern int mt7620_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg); -+extern void mt7620_mdio_link_adjust(struct fe_priv *priv, int port); -+extern void mt7620_port_init(struct fe_priv *priv, struct device_node *np); -+extern int mt7620a_has_carrier(struct fe_priv *priv); -+ -+#endif ---- /dev/null -+++ b/drivers/net/ethernet/ralink/mdio.c -@@ -0,0 +1,244 @@ -+/* -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; version 2 of the License -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. -+ * -+ * Copyright (C) 2009-2013 John Crispin -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "ralink_soc_eth.h" -+#include "mdio.h" -+ -+static int fe_mdio_reset(struct mii_bus *bus) -+{ -+ /* TODO */ -+ return 0; -+} -+ -+static void fe_phy_link_adjust(struct net_device *dev) -+{ -+ struct fe_priv *priv = netdev_priv(dev); -+ unsigned long flags; -+ int i; -+ -+ spin_lock_irqsave(&priv->phy->lock, flags); -+ for (i = 0; i < 8; i++) { -+ if (priv->phy->phy_node[i]) { -+ struct phy_device *phydev = priv->phy->phy[i]; -+ int status_change = 0; -+ -+ if (phydev->link) -+ if (priv->phy->duplex[i] != phydev->duplex || -+ priv->phy->speed[i] != phydev->speed) -+ status_change = 1; -+ -+ if (phydev->link != priv->link[i]) -+ status_change = 1; -+ -+ switch (phydev->speed) { -+ case SPEED_1000: -+ case SPEED_100: -+ case SPEED_10: -+ priv->link[i] = phydev->link; -+ priv->phy->duplex[i] = phydev->duplex; -+ priv->phy->speed[i] = phydev->speed; -+ -+ if (status_change && priv->soc->mdio_adjust_link) -+ priv->soc->mdio_adjust_link(priv, i); -+ break; -+ } -+ } -+ } -+} -+ -+int fe_connect_phy_node(struct fe_priv *priv, struct device_node *phy_node) -+{ -+ const __be32 *_port = NULL; -+ struct phy_device *phydev; -+ int phy_mode, port; -+ -+ _port = of_get_property(phy_node, "reg", NULL); -+ -+ if (!_port || (be32_to_cpu(*_port) >= 0x20)) { -+ pr_err("%s: invalid port id\n", phy_node->name); -+ return -EINVAL; -+ } -+ port = be32_to_cpu(*_port); -+ phy_mode = of_get_phy_mode(phy_node); -+ if (phy_mode < 0) { -+ dev_err(priv->device, "incorrect phy-mode %d\n", phy_mode); -+ priv->phy->phy_node[port] = NULL; -+ return -EINVAL; -+ } -+ -+ phydev = of_phy_connect(priv->netdev, phy_node, fe_phy_link_adjust, -+ 0, phy_mode); -+ if (IS_ERR(phydev)) { -+ dev_err(priv->device, "could not connect to PHY\n"); -+ priv->phy->phy_node[port] = NULL; -+ return PTR_ERR(phydev); -+ } -+ -+ phydev->supported &= PHY_GBIT_FEATURES; -+ phydev->advertising = phydev->supported; -+ phydev->no_auto_carrier_off = 1; -+ -+ dev_info(priv->device, -+ "connected port %d to PHY at %s [uid=%08x, driver=%s]\n", -+ port, dev_name(&phydev->dev), phydev->phy_id, -+ phydev->drv->name); -+ -+ priv->phy->phy[port] = phydev; -+ priv->link[port] = 0; -+ -+ return 0; -+} -+ -+static int fe_phy_connect(struct fe_priv *priv) -+{ -+ return 0; -+} -+ -+static void fe_phy_disconnect(struct fe_priv *priv) -+{ -+ unsigned long flags; -+ int i; -+ -+ for (i = 0; i < 8; i++) -+ if (priv->phy->phy_fixed[i]) { -+ spin_lock_irqsave(&priv->phy->lock, flags); -+ priv->link[i] = 0; -+ if (priv->soc->mdio_adjust_link) -+ priv->soc->mdio_adjust_link(priv, i); -+ spin_unlock_irqrestore(&priv->phy->lock, flags); -+ } else if (priv->phy->phy[i]) { -+ phy_disconnect(priv->phy->phy[i]); -+ } -+} -+ -+static void fe_phy_start(struct fe_priv *priv) -+{ -+ unsigned long flags; -+ int i; -+ -+ for (i = 0; i < 8; i++) { -+ if (priv->phy->phy_fixed[i]) { -+ spin_lock_irqsave(&priv->phy->lock, flags); -+ priv->link[i] = 1; -+ if (priv->soc->mdio_adjust_link) -+ priv->soc->mdio_adjust_link(priv, i); -+ spin_unlock_irqrestore(&priv->phy->lock, flags); -+ } else if (priv->phy->phy[i]) { -+ phy_start(priv->phy->phy[i]); -+ } -+ } -+} -+ -+static void fe_phy_stop(struct fe_priv *priv) -+{ -+ unsigned long flags; -+ int i; -+ -+ for (i = 0; i < 8; i++) -+ if (priv->phy->phy_fixed[i]) { -+ spin_lock_irqsave(&priv->phy->lock, flags); -+ priv->link[i] = 0; -+ if (priv->soc->mdio_adjust_link) -+ priv->soc->mdio_adjust_link(priv, i); -+ spin_unlock_irqrestore(&priv->phy->lock, flags); -+ } else if (priv->phy->phy[i]) { -+ phy_stop(priv->phy->phy[i]); -+ } -+} -+ -+static struct fe_phy phy_ralink = { -+ .connect = fe_phy_connect, -+ .disconnect = fe_phy_disconnect, -+ .start = fe_phy_start, -+ .stop = fe_phy_stop, -+}; -+ -+int fe_mdio_init(struct fe_priv *priv) -+{ -+ struct device_node *mii_np; -+ int err; -+ -+ if (!priv->soc->mdio_read || !priv->soc->mdio_write) -+ return 0; -+ -+ spin_lock_init(&phy_ralink.lock); -+ priv->phy = &phy_ralink; -+ -+ mii_np = of_get_child_by_name(priv->device->of_node, "mdio-bus"); -+ if (!mii_np) { -+ dev_err(priv->device, "no %s child node found", "mdio-bus"); -+ return -ENODEV; -+ } -+ -+ if (!of_device_is_available(mii_np)) { -+ err = 0; -+ goto err_put_node; -+ } -+ -+ priv->mii_bus = mdiobus_alloc(); -+ if (priv->mii_bus == NULL) { -+ err = -ENOMEM; -+ goto err_put_node; -+ } -+ -+ priv->mii_bus->name = "mdio"; -+ priv->mii_bus->read = priv->soc->mdio_read; -+ priv->mii_bus->write = priv->soc->mdio_write; -+ priv->mii_bus->reset = fe_mdio_reset; -+ priv->mii_bus->irq = priv->mii_irq; -+ priv->mii_bus->priv = priv; -+ priv->mii_bus->parent = priv->device; -+ -+ snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s", mii_np->name); -+ err = of_mdiobus_register(priv->mii_bus, mii_np); -+ if (err) -+ goto err_free_bus; -+ -+ return 0; -+ -+err_free_bus: -+ kfree(priv->mii_bus); -+err_put_node: -+ of_node_put(mii_np); -+ priv->mii_bus = NULL; -+ return err; -+} -+ -+void fe_mdio_cleanup(struct fe_priv *priv) -+{ -+ if (!priv->mii_bus) -+ return; -+ -+ mdiobus_unregister(priv->mii_bus); -+ of_node_put(priv->mii_bus->dev.of_node); -+ kfree(priv->mii_bus); -+} ---- /dev/null -+++ b/drivers/net/ethernet/ralink/mdio.h -@@ -0,0 +1,29 @@ -+/* -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; version 2 of the License -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. -+ * -+ * Copyright (C) 2009-2013 John Crispin -+ */ -+ -+#ifndef _RALINK_MDIO_H__ -+#define _RALINK_MDIO_H__ -+ -+#ifdef CONFIG_NET_RALINK_MDIO -+extern int fe_mdio_init(struct fe_priv *priv); -+extern void fe_mdio_cleanup(struct fe_priv *priv); -+extern int fe_connect_phy_node(struct fe_priv *priv, struct device_node *phy_node); -+#else -+static inline int fe_mdio_init(struct fe_priv *priv) { return 0; } -+static inline void fe_mdio_cleanup(struct fe_priv *priv) {} -+#endif -+#endif ---- /dev/null -+++ b/drivers/net/ethernet/ralink/mdio_rt2880.c -@@ -0,0 +1,232 @@ -+/* -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; version 2 of the License -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. -+ * -+ * Copyright (C) 2009-2013 John Crispin -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "ralink_soc_eth.h" -+#include "mdio_rt2880.h" -+#include "mdio.h" -+ -+#define FE_MDIO_RETRY 1000 -+ -+static unsigned char *rt2880_speed_str(struct fe_priv *priv) -+{ -+ switch (priv->phy->speed[0]) { -+ case SPEED_1000: -+ return "1000"; -+ case SPEED_100: -+ return "100"; -+ case SPEED_10: -+ return "10"; -+ } -+ -+ return "?"; -+} -+ -+void rt2880_mdio_link_adjust(struct fe_priv *priv, int port) -+{ -+ u32 mdio_cfg; -+ -+ if (!priv->link[0]) { -+ netif_carrier_off(priv->netdev); -+ netdev_info(priv->netdev, "link down\n"); -+ return; -+ } -+ -+ mdio_cfg = FE_MDIO_CFG_TX_CLK_SKEW_200 | -+ FE_MDIO_CFG_RX_CLK_SKEW_200 | -+ FE_MDIO_CFG_GP1_FRC_EN; -+ -+ if (priv->phy->duplex[0] == DUPLEX_FULL) -+ mdio_cfg |= FE_MDIO_CFG_GP1_DUPLEX; -+ -+ if (priv->phy->tx_fc[0]) -+ mdio_cfg |= FE_MDIO_CFG_GP1_FC_TX; -+ -+ if (priv->phy->rx_fc[0]) -+ mdio_cfg |= FE_MDIO_CFG_GP1_FC_RX; -+ -+ switch (priv->phy->speed[0]) { -+ case SPEED_10: -+ mdio_cfg |= FE_MDIO_CFG_GP1_SPEED_10; -+ break; -+ case SPEED_100: -+ mdio_cfg |= FE_MDIO_CFG_GP1_SPEED_100; -+ break; -+ case SPEED_1000: -+ mdio_cfg |= FE_MDIO_CFG_GP1_SPEED_1000; -+ break; -+ default: -+ BUG(); -+ } -+ -+ fe_w32(mdio_cfg, FE_MDIO_CFG); -+ -+ netif_carrier_on(priv->netdev); -+ netdev_info(priv->netdev, "link up (%sMbps/%s duplex)\n", -+ rt2880_speed_str(priv), -+ (DUPLEX_FULL == priv->phy->duplex[0]) ? "Full" : "Half"); -+} -+ -+static int rt2880_mdio_wait_ready(struct fe_priv *priv) -+{ -+ int retries; -+ -+ retries = FE_MDIO_RETRY; -+ while (1) { -+ u32 t; -+ -+ t = fe_r32(FE_MDIO_ACCESS); -+ if ((t & (0x1 << 31)) == 0) -+ return 0; -+ -+ if (retries-- == 0) -+ break; -+ -+ udelay(1); -+ } -+ -+ dev_err(priv->device, "MDIO operation timed out\n"); -+ return -ETIMEDOUT; -+} -+ -+int rt2880_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg) -+{ -+ struct fe_priv *priv = bus->priv; -+ int err; -+ u32 t; -+ -+ err = rt2880_mdio_wait_ready(priv); -+ if (err) -+ return 0xffff; -+ -+ t = (phy_addr << 24) | (phy_reg << 16); -+ fe_w32(t, FE_MDIO_ACCESS); -+ t |= (1 << 31); -+ fe_w32(t, FE_MDIO_ACCESS); -+ -+ err = rt2880_mdio_wait_ready(priv); -+ if (err) -+ return 0xffff; -+ -+ pr_info("%s: addr=%04x, reg=%04x, value=%04x\n", __func__, -+ phy_addr, phy_reg, fe_r32(FE_MDIO_ACCESS) & 0xffff); -+ -+ return fe_r32(FE_MDIO_ACCESS) & 0xffff; -+} -+ -+int rt2880_mdio_write(struct mii_bus *bus, int phy_addr, int phy_reg, u16 val) -+{ -+ struct fe_priv *priv = bus->priv; -+ int err; -+ u32 t; -+ -+ pr_info("%s: addr=%04x, reg=%04x, value=%04x\n", __func__, -+ phy_addr, phy_reg, fe_r32(FE_MDIO_ACCESS) & 0xffff); -+ -+ err = rt2880_mdio_wait_ready(priv); -+ if (err) -+ return err; -+ -+ t = (1 << 30) | (phy_addr << 24) | (phy_reg << 16) | val; -+ fe_w32(t, FE_MDIO_ACCESS); -+ t |= (1 << 31); -+ fe_w32(t, FE_MDIO_ACCESS); -+ -+ return rt2880_mdio_wait_ready(priv); -+} -+ -+void rt2880_port_init(struct fe_priv *priv, struct device_node *np) -+{ -+ const __be32 *id = of_get_property(np, "reg", NULL); -+ const __be32 *link; -+ int size; -+ int phy_mode; -+ -+ if (!id || (be32_to_cpu(*id) != 0)) { -+ pr_err("%s: invalid port id\n", np->name); -+ return; -+ } -+ -+ priv->phy->phy_fixed[0] = of_get_property(np, "ralink,fixed-link", &size); -+ if (priv->phy->phy_fixed[0] && (size != (4 * sizeof(*priv->phy->phy_fixed[0])))) { -+ pr_err("%s: invalid fixed link property\n", np->name); -+ priv->phy->phy_fixed[0] = NULL; -+ return; -+ } -+ -+ phy_mode = of_get_phy_mode(np); -+ switch (phy_mode) { -+ case PHY_INTERFACE_MODE_RGMII: -+ break; -+ case PHY_INTERFACE_MODE_MII: -+ break; -+ case PHY_INTERFACE_MODE_RMII: -+ break; -+ default: -+ if (!priv->phy->phy_fixed[0]) -+ dev_err(priv->device, "port %d - invalid phy mode\n", priv->phy->speed[0]); -+ break; -+ } -+ -+ priv->phy->phy_node[0] = of_parse_phandle(np, "phy-handle", 0); -+ if (!priv->phy->phy_node[0] && !priv->phy->phy_fixed[0]) -+ return; -+ -+ if (priv->phy->phy_fixed[0]) { -+ link = priv->phy->phy_fixed[0]; -+ priv->phy->speed[0] = be32_to_cpup(link++); -+ priv->phy->duplex[0] = be32_to_cpup(link++); -+ priv->phy->tx_fc[0] = be32_to_cpup(link++); -+ priv->phy->rx_fc[0] = be32_to_cpup(link++); -+ -+ priv->link[0] = 1; -+ switch (priv->phy->speed[0]) { -+ case SPEED_10: -+ break; -+ case SPEED_100: -+ break; -+ case SPEED_1000: -+ break; -+ default: -+ dev_err(priv->device, "invalid link speed: %d\n", priv->phy->speed[0]); -+ priv->phy->phy_fixed[0] = 0; -+ return; -+ } -+ dev_info(priv->device, "using fixed link parameters\n"); -+ rt2880_mdio_link_adjust(priv, 0); -+ return; -+ } -+ if (priv->phy->phy_node[0] && priv->mii_bus->phy_map[0]) { -+ fe_connect_phy_node(priv, priv->phy->phy_node[0]); -+ } -+ -+ return; -+} ---- /dev/null -+++ b/drivers/net/ethernet/ralink/mdio_rt2880.h -@@ -0,0 +1,26 @@ -+/* -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; version 2 of the License -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. -+ * -+ * Copyright (C) 2009-2013 John Crispin -+ */ -+ -+#ifndef _RALINK_MDIO_RT2880_H__ -+#define _RALINK_MDIO_RT2880_H__ -+ -+void rt2880_mdio_link_adjust(struct fe_priv *priv, int port); -+int rt2880_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg); -+int rt2880_mdio_write(struct mii_bus *bus, int phy_addr, int phy_reg, u16 val); -+void rt2880_port_init(struct fe_priv *priv, struct device_node *np); -+ -+#endif ---- /dev/null -+++ b/drivers/net/ethernet/ralink/ralink_soc_eth.c -@@ -0,0 +1,846 @@ -+/* -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; version 2 of the License -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. -+ * -+ * Copyright (C) 2009-2013 John Crispin -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+ -+#include "ralink_soc_eth.h" -+#include "esw_rt3052.h" -+#include "mdio.h" -+ -+#define TX_TIMEOUT (2 * HZ) -+#define MAX_RX_LENGTH 1536 -+#define DMA_DUMMY_DESC 0xffffffff -+ -+static const u32 fe_reg_table_default[FE_REG_COUNT] = { -+ [FE_REG_PDMA_GLO_CFG] = FE_PDMA_GLO_CFG, -+ [FE_REG_PDMA_RST_CFG] = FE_PDMA_RST_CFG, -+ [FE_REG_DLY_INT_CFG] = FE_DLY_INT_CFG, -+ [FE_REG_TX_BASE_PTR0] = FE_TX_BASE_PTR0, -+ [FE_REG_TX_MAX_CNT0] = FE_TX_MAX_CNT0, -+ [FE_REG_TX_CTX_IDX0] = FE_TX_CTX_IDX0, -+ [FE_REG_RX_BASE_PTR0] = FE_RX_BASE_PTR0, -+ [FE_REG_RX_MAX_CNT0] = FE_RX_MAX_CNT0, -+ [FE_REG_RX_CALC_IDX0] = FE_RX_CALC_IDX0, -+ [FE_REG_FE_INT_ENABLE] = FE_FE_INT_ENABLE, -+ [FE_REG_FE_INT_STATUS] = FE_FE_INT_STATUS, -+}; -+ -+static const u32 *fe_reg_table = fe_reg_table_default; -+ -+static void __iomem *fe_base = 0; -+ -+void fe_w32(u32 val, unsigned reg) -+{ -+ __raw_writel(val, fe_base + reg); -+} -+ -+u32 fe_r32(unsigned reg) -+{ -+ return __raw_readl(fe_base + reg); -+} -+ -+static inline void fe_reg_w32(u32 val, enum fe_reg reg) -+{ -+ fe_w32(val, fe_reg_table[reg]); -+} -+ -+static inline u32 fe_reg_r32(enum fe_reg reg) -+{ -+ return fe_r32(fe_reg_table[reg]); -+} -+ -+static inline void fe_int_disable(u32 mask) -+{ -+ fe_reg_w32(fe_reg_r32(FE_REG_FE_INT_ENABLE) & ~mask, -+ FE_REG_FE_INT_ENABLE); -+ /* flush write */ -+ fe_reg_r32(FE_REG_FE_INT_ENABLE); -+} -+ -+static inline void fe_int_enable(u32 mask) -+{ -+ fe_reg_w32(fe_reg_r32(FE_REG_FE_INT_ENABLE) | mask, -+ FE_REG_FE_INT_ENABLE); -+ /* flush write */ -+ fe_reg_r32(FE_REG_FE_INT_ENABLE); -+} -+ -+static inline void fe_hw_set_macaddr(struct fe_priv *priv, unsigned char *mac) -+{ -+ unsigned long flags; -+ -+ spin_lock_irqsave(&priv->page_lock, flags); -+ fe_w32((mac[0] << 8) | mac[1], FE_GDMA1_MAC_ADRH); -+ fe_w32((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5], -+ FE_GDMA1_MAC_ADRL); -+ spin_unlock_irqrestore(&priv->page_lock, flags); -+} -+ -+static int fe_set_mac_address(struct net_device *dev, void *p) -+{ -+ int ret = eth_mac_addr(dev, p); -+ -+ if (!ret) { -+ struct fe_priv *priv = netdev_priv(dev); -+ -+ if (priv->soc->set_mac) -+ priv->soc->set_mac(priv, dev->dev_addr); -+ else -+ fe_hw_set_macaddr(priv, p); -+ } -+ -+ return ret; -+} -+ -+static struct sk_buff* fe_alloc_skb(struct fe_priv *priv) -+{ -+ struct sk_buff *skb; -+ -+ skb = netdev_alloc_skb(priv->netdev, MAX_RX_LENGTH + NET_IP_ALIGN); -+ if (!skb) -+ return NULL; -+ -+ skb_reserve(skb, NET_IP_ALIGN); -+ -+ return skb; -+} -+ -+static int fe_alloc_rx(struct fe_priv *priv) -+{ -+ int size = NUM_DMA_DESC * sizeof(struct fe_rx_dma); -+ int i; -+ -+ priv->rx_dma = dma_alloc_coherent(&priv->netdev->dev, size, -+ &priv->rx_phys, GFP_ATOMIC); -+ if (!priv->rx_dma) -+ return -ENOMEM; -+ -+ memset(priv->rx_dma, 0, size); -+ -+ for (i = 0; i < NUM_DMA_DESC; i++) { -+ priv->rx_skb[i] = fe_alloc_skb(priv); -+ if (!priv->rx_skb[i]) -+ return -ENOMEM; -+ } -+ -+ for (i = 0; i < NUM_DMA_DESC; i++) { -+ dma_addr_t dma_addr = dma_map_single(&priv->netdev->dev, -+ priv->rx_skb[i]->data, -+ MAX_RX_LENGTH, -+ DMA_FROM_DEVICE); -+ priv->rx_dma[i].rxd1 = (unsigned int) dma_addr; -+ -+ if (priv->soc->rx_dma) -+ priv->soc->rx_dma(priv, i, MAX_RX_LENGTH); -+ else -+ priv->rx_dma[i].rxd2 = RX_DMA_LSO; -+ } -+ wmb(); -+ -+ fe_reg_w32(priv->rx_phys, FE_REG_RX_BASE_PTR0); -+ fe_reg_w32(NUM_DMA_DESC, FE_REG_RX_MAX_CNT0); -+ fe_reg_w32((NUM_DMA_DESC - 1), FE_REG_RX_CALC_IDX0); -+ fe_reg_w32(FE_PST_DRX_IDX0, FE_REG_PDMA_RST_CFG); -+ -+ return 0; -+} -+ -+static int fe_alloc_tx(struct fe_priv *priv) -+{ -+ int size = NUM_DMA_DESC * sizeof(struct fe_tx_dma); -+ int i; -+ -+ priv->tx_free_idx = 0; -+ -+ priv->tx_dma = dma_alloc_coherent(&priv->netdev->dev, size, -+ &priv->tx_phys, GFP_ATOMIC); -+ if (!priv->tx_dma) -+ return -ENOMEM; -+ -+ memset(priv->tx_dma, 0, size); -+ -+ for (i = 0; i < NUM_DMA_DESC; i++) { -+ if (priv->soc->tx_dma) { -+ priv->soc->tx_dma(priv, i, NULL); -+ continue; -+ } -+ -+ priv->tx_dma[i].txd2 = TX_DMA_LSO | TX_DMA_DONE; -+ priv->tx_dma[i].txd4 = TX_DMA_QN(3) | TX_DMA_PN(1); -+ } -+ -+ fe_reg_w32(priv->tx_phys, FE_REG_TX_BASE_PTR0); -+ fe_reg_w32(NUM_DMA_DESC, FE_REG_TX_MAX_CNT0); -+ fe_reg_w32(0, FE_REG_TX_CTX_IDX0); -+ fe_reg_w32(FE_PST_DTX_IDX0, FE_REG_PDMA_RST_CFG); -+ -+ return 0; -+} -+ -+static void fe_free_dma(struct fe_priv *priv) -+{ -+ int i; -+ -+ for (i = 0; i < NUM_DMA_DESC; i++) { -+ if (priv->rx_skb[i]) { -+ dma_unmap_single(&priv->netdev->dev, priv->rx_dma[i].rxd1, -+ MAX_RX_LENGTH, DMA_FROM_DEVICE); -+ dev_kfree_skb_any(priv->rx_skb[i]); -+ priv->rx_skb[i] = NULL; -+ } -+ -+ if (priv->tx_skb[i]) { -+ dev_kfree_skb_any(priv->tx_skb[i]); -+ priv->tx_skb[i] = NULL; -+ } -+ } -+ -+ if (priv->rx_dma) { -+ int size = NUM_DMA_DESC * sizeof(struct fe_rx_dma); -+ dma_free_coherent(&priv->netdev->dev, size, priv->rx_dma, -+ priv->rx_phys); -+ } -+ -+ if (priv->tx_dma) { -+ int size = NUM_DMA_DESC * sizeof(struct fe_tx_dma); -+ dma_free_coherent(&priv->netdev->dev, size, priv->tx_dma, -+ priv->tx_phys); -+ } -+ -+ netdev_reset_queue(priv->netdev); -+} -+ -+static void fe_start_tso(struct sk_buff *skb, struct net_device *dev, unsigned int nr_frags, int idx) -+{ -+ struct fe_priv *priv = netdev_priv(dev); -+ struct skb_frag_struct *frag; -+ int i; -+ -+ for (i = 0; i < nr_frags; i++) { -+ dma_addr_t mapped_addr; -+ -+ frag = &skb_shinfo(skb)->frags[i]; -+ mapped_addr = skb_frag_dma_map(&dev->dev, frag, 0, skb_frag_size(frag), DMA_TO_DEVICE); -+ if (i % 2) { -+ idx = (idx + 1) % NUM_DMA_DESC; -+ priv->tx_dma[idx].txd1 = mapped_addr; -+ if (i == nr_frags - 1) -+ priv->tx_dma[idx].txd2 = TX_DMA_LSO | TX_DMA_PLEN0(frag->size); -+ else -+ priv->tx_dma[idx].txd2 = TX_DMA_PLEN0(frag->size); -+ } else { -+ priv->tx_dma[idx].txd3 = mapped_addr; -+ if (i == nr_frags - 1) -+ priv->tx_dma[idx].txd2 |= TX_DMA_LS1 | TX_DMA_PLEN1(frag->size); -+ else -+ priv->tx_dma[idx].txd2 |= TX_DMA_PLEN1(frag->size); -+ } -+ } -+} -+ -+static int fe_start_xmit(struct sk_buff *skb, struct net_device *dev) -+{ -+ unsigned int nr_frags = skb_shinfo(skb)->nr_frags; -+ struct fe_priv *priv = netdev_priv(dev); -+ dma_addr_t mapped_addr; -+ u32 tx_next, tx, tx_num = 1; -+ int i; -+ -+ if (priv->soc->min_pkt_len) { -+ if (skb->len < priv->soc->min_pkt_len) { -+ if (skb_padto(skb, priv->soc->min_pkt_len)) { -+ printk(KERN_ERR -+ "fe_eth: skb_padto failed\n"); -+ kfree_skb(skb); -+ return 0; -+ } -+ skb_put(skb, priv->soc->min_pkt_len - skb->len); -+ } -+ } -+ -+ dev->trans_start = jiffies; -+ mapped_addr = dma_map_single(&priv->netdev->dev, skb->data, -+ skb->len, DMA_TO_DEVICE); -+ -+ spin_lock(&priv->page_lock); -+ -+ tx = fe_reg_r32(FE_REG_TX_CTX_IDX0); -+ if (priv->soc->tso && nr_frags) -+ tx_num += nr_frags >> 1; -+ tx_next = (tx + tx_num) % NUM_DMA_DESC; -+ if ((priv->tx_skb[tx]) || (priv->tx_skb[tx_next]) || -+ !(priv->tx_dma[tx].txd2 & TX_DMA_DONE) || -+ !(priv->tx_dma[tx_next].txd2 & TX_DMA_DONE)) -+ { -+ spin_unlock(&priv->page_lock); -+ dev->stats.tx_dropped++; -+ kfree_skb(skb); -+ -+ return NETDEV_TX_OK; -+ } -+ -+ if (priv->soc->tso) { -+ int t = tx_num; -+ -+ priv->tx_skb[(tx + t - 1) % NUM_DMA_DESC] = skb; -+ while (--t) -+ priv->tx_skb[(tx + t - 1) % NUM_DMA_DESC] = (struct sk_buff *) DMA_DUMMY_DESC; -+ } else { -+ priv->tx_skb[tx] = skb; -+ } -+ priv->tx_dma[tx].txd1 = (unsigned int) mapped_addr; -+ wmb(); -+ -+ priv->tx_dma[tx].txd4 &= ~0x80; -+ if (priv->soc->tx_dma) -+ priv->soc->tx_dma(priv, tx, skb); -+ else -+ priv->tx_dma[tx].txd2 = TX_DMA_LSO | TX_DMA_PLEN0(skb->len); -+ -+ if (skb->ip_summed == CHECKSUM_PARTIAL) -+ priv->tx_dma[tx].txd4 |= TX_DMA_CHKSUM; -+ else -+ priv->tx_dma[tx].txd4 &= ~TX_DMA_CHKSUM; -+ -+ if (priv->soc->tso) -+ fe_start_tso(skb, dev, nr_frags, tx); -+ -+ if (skb_shinfo(skb)->gso_segs > 1) { -+ struct iphdr *iph = NULL; -+ struct tcphdr *th = NULL; -+ struct ipv6hdr *ip6h = NULL; -+ -+ ip6h = (struct ipv6hdr *) skb_network_header(skb); -+ iph = (struct iphdr *) skb_network_header(skb); -+ if ((iph->version == 4) && (iph->protocol == IPPROTO_TCP)) { -+ th = (struct tcphdr *)skb_transport_header(skb); -+ priv->tx_dma[tx].txd4 |= BIT(28); -+ th->check = htons(skb_shinfo(skb)->gso_size); -+ dma_cache_sync(NULL, th, sizeof(struct tcphdr), DMA_TO_DEVICE); -+ } else if ((ip6h->version == 6) && (ip6h->nexthdr == NEXTHDR_TCP)) { -+ th = (struct tcphdr *)skb_transport_header(skb); -+ priv->tx_dma[tx].txd4 |= BIT(28); -+ th->check = htons(skb_shinfo(skb)->gso_size); -+ dma_cache_sync(NULL, th, sizeof(struct tcphdr), DMA_TO_DEVICE); -+ } -+ } -+ -+ for (i = 0; i < tx_num; i++) -+ dma_cache_sync(NULL, &priv->tx_dma[tx + i], sizeof(struct fe_tx_dma), DMA_TO_DEVICE); -+ -+ dev->stats.tx_packets++; -+ dev->stats.tx_bytes += skb->len; -+ -+ wmb(); -+ fe_reg_w32(tx_next, FE_REG_TX_CTX_IDX0); -+ netdev_sent_queue(dev, skb->len); -+ -+ spin_unlock(&priv->page_lock); -+ -+ return NETDEV_TX_OK; -+} -+ -+static int fe_poll_rx(struct napi_struct *napi, int budget) -+{ -+ struct fe_priv *priv = container_of(napi, struct fe_priv, rx_napi); -+ int idx = fe_reg_r32(FE_REG_RX_CALC_IDX0); -+ int complete = 0; -+ int rx = 0; -+ -+ while ((rx < budget) && !complete) { -+ idx = (idx + 1) % NUM_DMA_DESC; -+ -+ if (priv->rx_dma[idx].rxd2 & RX_DMA_DONE) { -+ struct sk_buff *new_skb = fe_alloc_skb(priv); -+ -+ if (new_skb) { -+ int pktlen = RX_DMA_PLEN0(priv->rx_dma[idx].rxd2); -+ dma_addr_t dma_addr; -+ -+ dma_unmap_single(&priv->netdev->dev, priv->rx_dma[idx].rxd1, -+ MAX_RX_LENGTH, DMA_FROM_DEVICE); -+ -+ skb_put(priv->rx_skb[idx], pktlen); -+ priv->rx_skb[idx]->dev = priv->netdev; -+ priv->rx_skb[idx]->protocol = eth_type_trans(priv->rx_skb[idx], priv->netdev); -+ if (priv->rx_dma[idx].rxd4 & priv->soc->checksum_bit) -+ priv->rx_skb[idx]->ip_summed = CHECKSUM_UNNECESSARY; -+ else -+ priv->rx_skb[idx]->ip_summed = CHECKSUM_NONE; -+ priv->netdev->stats.rx_packets++; -+ priv->netdev->stats.rx_bytes += pktlen; -+ -+#ifdef CONFIG_INET_LRO -+ if (priv->soc->get_skb_header && priv->rx_skb[idx]->ip_summed == CHECKSUM_UNNECESSARY) -+ lro_receive_skb(&priv->lro_mgr, priv->rx_skb[idx], NULL); -+ else -+#endif -+ netif_receive_skb(priv->rx_skb[idx]); -+ -+ priv->rx_skb[idx] = new_skb; -+ -+ dma_addr = dma_map_single(&priv->netdev->dev, -+ new_skb->data, -+ MAX_RX_LENGTH, -+ DMA_FROM_DEVICE); -+ priv->rx_dma[idx].rxd1 = (unsigned int) dma_addr; -+ wmb(); -+ } else { -+ priv->netdev->stats.rx_dropped++; -+ } -+ -+ if (priv->soc->rx_dma) -+ priv->soc->rx_dma(priv, idx, MAX_RX_LENGTH); -+ else -+ priv->rx_dma[idx].rxd2 = RX_DMA_LSO; -+ fe_reg_w32(idx, FE_REG_RX_CALC_IDX0); -+ -+ rx++; -+ } else { -+ complete = 1; -+ } -+ } -+ -+#ifdef CONFIG_INET_LRO -+ if (priv->soc->get_skb_header) -+ lro_flush_all(&priv->lro_mgr); -+#endif -+ if (complete) { -+ napi_complete(&priv->rx_napi); -+ fe_int_enable(priv->soc->rx_dly_int); -+ } -+ -+ return rx; -+} -+ -+static void fe_tx_housekeeping(unsigned long ptr) -+{ -+ struct net_device *dev = (struct net_device*)ptr; -+ struct fe_priv *priv = netdev_priv(dev); -+ unsigned int bytes_compl = 0; -+ unsigned int pkts_compl = 0; -+ -+ spin_lock(&priv->page_lock); -+ while (1) { -+ struct fe_tx_dma *txd; -+ -+ txd = &priv->tx_dma[priv->tx_free_idx]; -+ -+ if (!(txd->txd2 & TX_DMA_DONE) || !(priv->tx_skb[priv->tx_free_idx])) -+ break; -+ -+ if (priv->tx_skb[priv->tx_free_idx] != (struct sk_buff *) DMA_DUMMY_DESC) { -+ bytes_compl += priv->tx_skb[priv->tx_free_idx]->len; -+ dev_kfree_skb_irq(priv->tx_skb[priv->tx_free_idx]); -+ } -+ pkts_compl++; -+ priv->tx_skb[priv->tx_free_idx] = NULL; -+ priv->tx_free_idx++; -+ if (priv->tx_free_idx >= NUM_DMA_DESC) -+ priv->tx_free_idx = 0; -+ } -+ -+ netdev_completed_queue(priv->netdev, pkts_compl, bytes_compl); -+ spin_unlock(&priv->page_lock); -+ -+ fe_int_enable(priv->soc->tx_dly_int); -+} -+ -+static void fe_tx_timeout(struct net_device *dev) -+{ -+ struct fe_priv *priv = netdev_priv(dev); -+ -+ tasklet_schedule(&priv->tx_tasklet); -+ priv->netdev->stats.tx_errors++; -+ netdev_err(dev, "transmit timed out, waking up the queue\n"); -+ netif_wake_queue(dev); -+} -+ -+static irqreturn_t fe_handle_irq(int irq, void *dev) -+{ -+ struct fe_priv *priv = netdev_priv(dev); -+ unsigned int status; -+ unsigned int mask; -+ -+ status = fe_reg_r32(FE_REG_FE_INT_STATUS); -+ mask = fe_reg_r32(FE_REG_FE_INT_ENABLE); -+ -+ if (!(status & mask)) -+ return IRQ_NONE; -+ -+ if (status & priv->soc->rx_dly_int) { -+ fe_int_disable(priv->soc->rx_dly_int); -+ napi_schedule(&priv->rx_napi); -+ } -+ -+ if (status & priv->soc->tx_dly_int) { -+ fe_int_disable(priv->soc->tx_dly_int); -+ tasklet_schedule(&priv->tx_tasklet); -+ } -+ -+ fe_reg_w32(status, FE_REG_FE_INT_STATUS); -+ -+ return IRQ_HANDLED; -+} -+ -+static int fe_hw_init(struct net_device *dev) -+{ -+ struct fe_priv *priv = netdev_priv(dev); -+ int err; -+ -+ err = devm_request_irq(priv->device, dev->irq, fe_handle_irq, 0, -+ dev_name(priv->device), dev); -+ if (err) -+ return err; -+ -+ err = fe_alloc_rx(priv); -+ if (!err) -+ err = fe_alloc_tx(priv); -+ if (err) -+ return err; -+ -+ if (priv->soc->set_mac) -+ priv->soc->set_mac(priv, dev->dev_addr); -+ else -+ fe_hw_set_macaddr(priv, dev->dev_addr); -+ -+ fe_reg_w32(FE_DELAY_INIT, FE_REG_DLY_INT_CFG); -+ -+ fe_int_disable(priv->soc->tx_dly_int | priv->soc->rx_dly_int); -+ -+ tasklet_init(&priv->tx_tasklet, fe_tx_housekeeping, (unsigned long)dev); -+ -+ if (priv->soc->fwd_config) { -+ priv->soc->fwd_config(priv); -+ } else { -+ unsigned long sysclk = priv->sysclk; -+ -+ if (!sysclk) { -+ netdev_err(dev, "unable to get clock\n"); -+ return -EINVAL; -+ } -+ -+ sysclk /= FE_US_CYC_CNT_DIVISOR; -+ sysclk <<= FE_US_CYC_CNT_SHIFT; -+ -+ fe_w32((fe_r32(FE_FE_GLO_CFG) & -+ ~(FE_US_CYC_CNT_MASK << FE_US_CYC_CNT_SHIFT)) | sysclk, -+ FE_FE_GLO_CFG); -+ -+ fe_w32(fe_r32(FE_GDMA1_FWD_CFG) & ~0xffff, FE_GDMA1_FWD_CFG); -+ fe_w32(fe_r32(FE_GDMA1_FWD_CFG) | (FE_GDM1_ICS_EN | FE_GDM1_TCS_EN | FE_GDM1_UCS_EN), -+ FE_GDMA1_FWD_CFG); -+ fe_w32(fe_r32(FE_CDMA_CSG_CFG) | (FE_ICS_GEN_EN | FE_TCS_GEN_EN | FE_UCS_GEN_EN), -+ FE_CDMA_CSG_CFG); -+ fe_w32(FE_PSE_FQFC_CFG_INIT, FE_PSE_FQ_CFG); -+ } -+ -+ fe_w32(1, FE_FE_RST_GL); -+ fe_w32(0, FE_FE_RST_GL); -+ -+ return 0; -+} -+ -+static int fe_open(struct net_device *dev) -+{ -+ struct fe_priv *priv = netdev_priv(dev); -+ unsigned long flags; -+ u32 val; -+ -+ spin_lock_irqsave(&priv->page_lock, flags); -+ napi_enable(&priv->rx_napi); -+ -+ val = FE_TX_WB_DDONE | FE_RX_DMA_EN | FE_TX_DMA_EN; -+ val |= priv->soc->pdma_glo_cfg; -+ fe_reg_w32(val, FE_REG_PDMA_GLO_CFG); -+ -+ spin_unlock_irqrestore(&priv->page_lock, flags); -+ -+ if (priv->phy) -+ priv->phy->start(priv); -+ -+ if (priv->soc->has_carrier && priv->soc->has_carrier(priv)) -+ netif_carrier_on(dev); -+ -+ netif_start_queue(dev); -+ fe_int_enable(priv->soc->tx_dly_int | priv->soc->rx_dly_int); -+ -+ return 0; -+} -+ -+static int fe_stop(struct net_device *dev) -+{ -+ struct fe_priv *priv = netdev_priv(dev); -+ unsigned long flags; -+ -+ fe_int_disable(priv->soc->tx_dly_int | priv->soc->rx_dly_int); -+ -+ netif_stop_queue(dev); -+ -+ if (priv->phy) -+ priv->phy->stop(priv); -+ -+ spin_lock_irqsave(&priv->page_lock, flags); -+ napi_disable(&priv->rx_napi); -+ -+ fe_reg_w32(fe_reg_r32(FE_REG_PDMA_GLO_CFG) & -+ ~(FE_TX_WB_DDONE | FE_RX_DMA_EN | FE_TX_DMA_EN), -+ FE_REG_PDMA_GLO_CFG); -+ spin_unlock_irqrestore(&priv->page_lock, flags); -+ -+ return 0; -+} -+ -+static int __init fe_init(struct net_device *dev) -+{ -+ struct fe_priv *priv = netdev_priv(dev); -+ struct device_node *port; -+ int err; -+ -+ BUG_ON(!priv->soc->reset_fe); -+ priv->soc->reset_fe(); -+ -+ if (priv->soc->switch_init) -+ priv->soc->switch_init(priv); -+ -+ net_srandom(jiffies); -+ memcpy(dev->dev_addr, priv->soc->mac, ETH_ALEN); -+ of_get_mac_address_mtd(priv->device->of_node, dev->dev_addr); -+ -+ err = fe_mdio_init(priv); -+ if (err) -+ return err; -+ -+ if (priv->phy) { -+ err = priv->phy->connect(priv); -+ if (err) -+ goto err_mdio_cleanup; -+ } -+ -+ if (priv->soc->port_init) -+ for_each_child_of_node(priv->device->of_node, port) -+ if (of_device_is_compatible(port, "ralink,eth-port") && of_device_is_available(port)) -+ priv->soc->port_init(priv, port); -+ -+ err = fe_hw_init(dev); -+ if (err) -+ goto err_phy_disconnect; -+ -+ if (priv->soc->switch_config) -+ priv->soc->switch_config(priv); -+ -+ return 0; -+ -+err_phy_disconnect: -+ if (priv->phy) -+ priv->phy->disconnect(priv); -+err_mdio_cleanup: -+ fe_mdio_cleanup(priv); -+ -+ return err; -+} -+ -+static void fe_uninit(struct net_device *dev) -+{ -+ struct fe_priv *priv = netdev_priv(dev); -+ -+ tasklet_kill(&priv->tx_tasklet); -+ -+ if (priv->phy) -+ priv->phy->disconnect(priv); -+ fe_mdio_cleanup(priv); -+ -+ fe_reg_w32(0, FE_REG_FE_INT_ENABLE); -+ free_irq(dev->irq, dev); -+ -+ fe_free_dma(priv); -+} -+ -+static const struct net_device_ops fe_netdev_ops = { -+ .ndo_init = fe_init, -+ .ndo_uninit = fe_uninit, -+ .ndo_open = fe_open, -+ .ndo_stop = fe_stop, -+ .ndo_start_xmit = fe_start_xmit, -+ .ndo_tx_timeout = fe_tx_timeout, -+ .ndo_set_mac_address = fe_set_mac_address, -+ .ndo_change_mtu = eth_change_mtu, -+ .ndo_validate_addr = eth_validate_addr, -+}; -+ -+static int fe_probe(struct platform_device *pdev) -+{ -+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); -+ const struct of_device_id *match; -+ struct fe_soc_data *soc = NULL; -+ struct net_device *netdev; -+ struct fe_priv *priv; -+ struct clk *sysclk; -+ int err; -+ -+ device_reset(&pdev->dev); -+ -+ match = of_match_device(of_fe_match, &pdev->dev); -+ soc = (struct fe_soc_data *) match->data; -+ -+ if (soc->init_data) -+ soc->init_data(soc); -+ if (soc->reg_table) -+ fe_reg_table = soc->reg_table; -+ -+ fe_base = devm_request_and_ioremap(&pdev->dev, res); -+ if (!fe_base) -+ return -ENOMEM; -+ -+ netdev = alloc_etherdev(sizeof(struct fe_priv)); -+ if (!netdev) { -+ dev_err(&pdev->dev, "alloc_etherdev failed\n"); -+ return -ENOMEM; -+ } -+ -+ strcpy(netdev->name, "eth%d"); -+ netdev->netdev_ops = &fe_netdev_ops; -+ netdev->base_addr = (unsigned long) fe_base; -+ netdev->watchdog_timeo = TX_TIMEOUT; -+ netdev->features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM; -+ -+ if (fe_reg_table[FE_REG_FE_DMA_VID_BASE]) -+ netdev->features |= NETIF_F_HW_VLAN_CTAG_TX; -+ -+ if (soc->tso) { -+ dev_info(&pdev->dev, "Enabling TSO\n"); -+ netdev->features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_IPV6_CSUM; -+ } -+ -+ netdev->hw_features = netdev->vlan_features = netdev->features; -+ -+ netdev->irq = platform_get_irq(pdev, 0); -+ if (netdev->irq < 0) { -+ dev_err(&pdev->dev, "no IRQ resource found\n"); -+ kfree(netdev); -+ return -ENXIO; -+ } -+ -+ priv = netdev_priv(netdev); -+ memset(priv, 0, sizeof(struct fe_priv)); -+ spin_lock_init(&priv->page_lock); -+ -+ sysclk = devm_clk_get(&pdev->dev, NULL); -+ if (!IS_ERR(sysclk)) -+ priv->sysclk = clk_get_rate(sysclk); -+ -+ priv->netdev = netdev; -+ priv->device = &pdev->dev; -+ priv->soc = soc; -+ -+ err = register_netdev(netdev); -+ if (err) { -+ dev_err(&pdev->dev, "error bringing up device\n"); -+ kfree(netdev); -+ return err; -+ } -+ netif_napi_add(netdev, &priv->rx_napi, fe_poll_rx, 32); -+ -+#ifdef CONFIG_INET_LRO -+ if (priv->soc->get_skb_header) { -+ priv->lro_mgr.dev = netdev; -+ memset(&priv->lro_mgr.stats, 0, sizeof(priv->lro_mgr.stats)); -+ priv->lro_mgr.features = LRO_F_NAPI; -+ priv->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY; -+ priv->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY; -+ priv->lro_mgr.max_desc = ARRAY_SIZE(priv->lro_arr); -+ priv->lro_mgr.max_aggr = 64; -+ priv->lro_mgr.frag_align_pad = 0; -+ priv->lro_mgr.lro_arr = priv->lro_arr; -+ priv->lro_mgr.get_skb_header = priv->soc->get_skb_header; -+ } -+#endif -+ -+ platform_set_drvdata(pdev, netdev); -+ -+ netdev_info(netdev, "done loading\n"); -+ -+ return 0; -+} -+ -+static int fe_remove(struct platform_device *pdev) -+{ -+ struct net_device *dev = platform_get_drvdata(pdev); -+ struct fe_priv *priv = netdev_priv(dev); -+ -+ netif_stop_queue(dev); -+ netif_napi_del(&priv->rx_napi); -+ -+ unregister_netdev(dev); -+ free_netdev(dev); -+ -+ return 0; -+} -+ -+static struct platform_driver fe_driver = { -+ .probe = fe_probe, -+ .remove = fe_remove, -+ .driver = { -+ .name = "ralink_soc_eth", -+ .owner = THIS_MODULE, -+ .of_match_table = of_fe_match, -+ }, -+}; -+ -+static int __init init_rtfe(void) -+{ -+ int ret; -+ -+ ret = rtesw_init(); -+ if (ret) -+ return ret; -+ -+ ret = platform_driver_register(&fe_driver); -+ if (ret) -+ rtesw_exit(); -+ -+ return ret; -+} -+ -+static void __exit exit_rtfe(void) -+{ -+ platform_driver_unregister(&fe_driver); -+ rtesw_exit(); -+} -+ -+module_init(init_rtfe); -+module_exit(exit_rtfe); -+ -+MODULE_LICENSE("GPL"); -+MODULE_AUTHOR("John Crispin "); -+MODULE_DESCRIPTION("Ethernet driver for Ralink SoC"); ---- /dev/null -+++ b/drivers/net/ethernet/ralink/ralink_soc_eth.h -@@ -0,0 +1,384 @@ -+/* -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; version 2 of the License -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. -+ * -+ * based on Ralink SDK3.3 -+ * Copyright (C) 2009-2013 John Crispin -+ */ -+ -+#ifndef FE_ETH_H -+#define FE_ETH_H -+ -+#include -+#include -+#include -+#include -+#include -+#include -+ -+ -+enum fe_reg { -+ FE_REG_PDMA_GLO_CFG = 0, -+ FE_REG_PDMA_RST_CFG, -+ FE_REG_DLY_INT_CFG, -+ FE_REG_TX_BASE_PTR0, -+ FE_REG_TX_MAX_CNT0, -+ FE_REG_TX_CTX_IDX0, -+ FE_REG_RX_BASE_PTR0, -+ FE_REG_RX_MAX_CNT0, -+ FE_REG_RX_CALC_IDX0, -+ FE_REG_FE_INT_ENABLE, -+ FE_REG_FE_INT_STATUS, -+ FE_REG_FE_DMA_VID_BASE, -+ FE_REG_COUNT -+}; -+ -+#define NUM_DMA_DESC 0x100 -+ -+#define FE_DELAY_EN_INT 0x80 -+#define FE_DELAY_MAX_INT 0x04 -+#define FE_DELAY_MAX_TOUT 0x04 -+#define FE_DELAY_CHAN (((FE_DELAY_EN_INT | FE_DELAY_MAX_INT) << 8) | FE_DELAY_MAX_TOUT) -+#define FE_DELAY_INIT ((FE_DELAY_CHAN << 16) | FE_DELAY_CHAN) -+#define FE_PSE_FQFC_CFG_INIT 0x80504000 -+ -+/* interrupt bits */ -+#define FE_CNT_PPE_AF BIT(31) -+#define FE_CNT_GDM_AF BIT(29) -+#define FE_PSE_P2_FC BIT(26) -+#define FE_PSE_BUF_DROP BIT(24) -+#define FE_GDM_OTHER_DROP BIT(23) -+#define FE_PSE_P1_FC BIT(22) -+#define FE_PSE_P0_FC BIT(21) -+#define FE_PSE_FQ_EMPTY BIT(20) -+#define FE_GE1_STA_CHG BIT(18) -+#define FE_TX_COHERENT BIT(17) -+#define FE_RX_COHERENT BIT(16) -+#define FE_TX_DONE_INT3 BIT(11) -+#define FE_TX_DONE_INT2 BIT(10) -+#define FE_TX_DONE_INT1 BIT(9) -+#define FE_TX_DONE_INT0 BIT(8) -+#define FE_RX_DONE_INT0 BIT(2) -+#define FE_TX_DLY_INT BIT(1) -+#define FE_RX_DLY_INT BIT(0) -+ -+#define RT5350_RX_DLY_INT BIT(30) -+#define RT5350_TX_DLY_INT BIT(28) -+ -+/* registers */ -+#define FE_FE_OFFSET 0x0000 -+#define FE_GDMA_OFFSET 0x0020 -+#define FE_PSE_OFFSET 0x0040 -+#define FE_GDMA2_OFFSET 0x0060 -+#define FE_CDMA_OFFSET 0x0080 -+#define FE_DMA_VID0 0x00a8 -+#define FE_PDMA_OFFSET 0x0100 -+#define FE_PPE_OFFSET 0x0200 -+#define FE_CMTABLE_OFFSET 0x0400 -+#define FE_POLICYTABLE_OFFSET 0x1000 -+ -+#define RT5350_PDMA_OFFSET 0x0800 -+#define RT5350_SDM_OFFSET 0x0c00 -+ -+#define FE_MDIO_ACCESS (FE_FE_OFFSET + 0x00) -+#define FE_MDIO_CFG (FE_FE_OFFSET + 0x04) -+#define FE_FE_GLO_CFG (FE_FE_OFFSET + 0x08) -+#define FE_FE_RST_GL (FE_FE_OFFSET + 0x0C) -+#define FE_FE_INT_STATUS (FE_FE_OFFSET + 0x10) -+#define FE_FE_INT_ENABLE (FE_FE_OFFSET + 0x14) -+#define FE_MDIO_CFG2 (FE_FE_OFFSET + 0x18) -+#define FE_FOC_TS_T (FE_FE_OFFSET + 0x1C) -+ -+#define FE_GDMA1_FWD_CFG (FE_GDMA_OFFSET + 0x00) -+#define FE_GDMA1_SCH_CFG (FE_GDMA_OFFSET + 0x04) -+#define FE_GDMA1_SHPR_CFG (FE_GDMA_OFFSET + 0x08) -+#define FE_GDMA1_MAC_ADRL (FE_GDMA_OFFSET + 0x0C) -+#define FE_GDMA1_MAC_ADRH (FE_GDMA_OFFSET + 0x10) -+ -+#define FE_GDMA2_FWD_CFG (FE_GDMA2_OFFSET + 0x00) -+#define FE_GDMA2_SCH_CFG (FE_GDMA2_OFFSET + 0x04) -+#define FE_GDMA2_SHPR_CFG (FE_GDMA2_OFFSET + 0x08) -+#define FE_GDMA2_MAC_ADRL (FE_GDMA2_OFFSET + 0x0C) -+#define FE_GDMA2_MAC_ADRH (FE_GDMA2_OFFSET + 0x10) -+ -+#define FE_PSE_FQ_CFG (FE_PSE_OFFSET + 0x00) -+#define FE_CDMA_FC_CFG (FE_PSE_OFFSET + 0x04) -+#define FE_GDMA1_FC_CFG (FE_PSE_OFFSET + 0x08) -+#define FE_GDMA2_FC_CFG (FE_PSE_OFFSET + 0x0C) -+ -+#define FE_CDMA_CSG_CFG (FE_CDMA_OFFSET + 0x00) -+#define FE_CDMA_SCH_CFG (FE_CDMA_OFFSET + 0x04) -+ -+#define MT7620A_GDMA_OFFSET 0x0600 -+#define MT7620A_GDMA1_FWD_CFG (MT7620A_GDMA_OFFSET + 0x00) -+#define MT7620A_FE_GDMA1_SCH_CFG (MT7620A_GDMA_OFFSET + 0x04) -+#define MT7620A_FE_GDMA1_SHPR_CFG (MT7620A_GDMA_OFFSET + 0x08) -+#define MT7620A_FE_GDMA1_MAC_ADRL (MT7620A_GDMA_OFFSET + 0x0C) -+#define MT7620A_FE_GDMA1_MAC_ADRH (MT7620A_GDMA_OFFSET + 0x10) -+ -+#define RT5350_TX_BASE_PTR0 (RT5350_PDMA_OFFSET + 0x00) -+#define RT5350_TX_MAX_CNT0 (RT5350_PDMA_OFFSET + 0x04) -+#define RT5350_TX_CTX_IDX0 (RT5350_PDMA_OFFSET + 0x08) -+#define RT5350_TX_DTX_IDX0 (RT5350_PDMA_OFFSET + 0x0C) -+#define RT5350_TX_BASE_PTR1 (RT5350_PDMA_OFFSET + 0x10) -+#define RT5350_TX_MAX_CNT1 (RT5350_PDMA_OFFSET + 0x14) -+#define RT5350_TX_CTX_IDX1 (RT5350_PDMA_OFFSET + 0x18) -+#define RT5350_TX_DTX_IDX1 (RT5350_PDMA_OFFSET + 0x1C) -+#define RT5350_TX_BASE_PTR2 (RT5350_PDMA_OFFSET + 0x20) -+#define RT5350_TX_MAX_CNT2 (RT5350_PDMA_OFFSET + 0x24) -+#define RT5350_TX_CTX_IDX2 (RT5350_PDMA_OFFSET + 0x28) -+#define RT5350_TX_DTX_IDX2 (RT5350_PDMA_OFFSET + 0x2C) -+#define RT5350_TX_BASE_PTR3 (RT5350_PDMA_OFFSET + 0x30) -+#define RT5350_TX_MAX_CNT3 (RT5350_PDMA_OFFSET + 0x34) -+#define RT5350_TX_CTX_IDX3 (RT5350_PDMA_OFFSET + 0x38) -+#define RT5350_TX_DTX_IDX3 (RT5350_PDMA_OFFSET + 0x3C) -+#define RT5350_RX_BASE_PTR0 (RT5350_PDMA_OFFSET + 0x100) -+#define RT5350_RX_MAX_CNT0 (RT5350_PDMA_OFFSET + 0x104) -+#define RT5350_RX_CALC_IDX0 (RT5350_PDMA_OFFSET + 0x108) -+#define RT5350_RX_DRX_IDX0 (RT5350_PDMA_OFFSET + 0x10C) -+#define RT5350_RX_BASE_PTR1 (RT5350_PDMA_OFFSET + 0x110) -+#define RT5350_RX_MAX_CNT1 (RT5350_PDMA_OFFSET + 0x114) -+#define RT5350_RX_CALC_IDX1 (RT5350_PDMA_OFFSET + 0x118) -+#define RT5350_RX_DRX_IDX1 (RT5350_PDMA_OFFSET + 0x11C) -+#define RT5350_PDMA_GLO_CFG (RT5350_PDMA_OFFSET + 0x204) -+#define RT5350_PDMA_RST_CFG (RT5350_PDMA_OFFSET + 0x208) -+#define RT5350_DLY_INT_CFG (RT5350_PDMA_OFFSET + 0x20c) -+#define RT5350_FE_INT_STATUS (RT5350_PDMA_OFFSET + 0x220) -+#define RT5350_FE_INT_ENABLE (RT5350_PDMA_OFFSET + 0x228) -+#define RT5350_PDMA_SCH_CFG (RT5350_PDMA_OFFSET + 0x280) -+ -+#define FE_PDMA_GLO_CFG (FE_PDMA_OFFSET + 0x00) -+#define FE_PDMA_RST_CFG (FE_PDMA_OFFSET + 0x04) -+#define FE_PDMA_SCH_CFG (FE_PDMA_OFFSET + 0x08) -+#define FE_DLY_INT_CFG (FE_PDMA_OFFSET + 0x0C) -+#define FE_TX_BASE_PTR0 (FE_PDMA_OFFSET + 0x10) -+#define FE_TX_MAX_CNT0 (FE_PDMA_OFFSET + 0x14) -+#define FE_TX_CTX_IDX0 (FE_PDMA_OFFSET + 0x18) -+#define FE_TX_DTX_IDX0 (FE_PDMA_OFFSET + 0x1C) -+#define FE_TX_BASE_PTR1 (FE_PDMA_OFFSET + 0x20) -+#define FE_TX_MAX_CNT1 (FE_PDMA_OFFSET + 0x24) -+#define FE_TX_CTX_IDX1 (FE_PDMA_OFFSET + 0x28) -+#define FE_TX_DTX_IDX1 (FE_PDMA_OFFSET + 0x2C) -+#define FE_RX_BASE_PTR0 (FE_PDMA_OFFSET + 0x30) -+#define FE_RX_MAX_CNT0 (FE_PDMA_OFFSET + 0x34) -+#define FE_RX_CALC_IDX0 (FE_PDMA_OFFSET + 0x38) -+#define FE_RX_DRX_IDX0 (FE_PDMA_OFFSET + 0x3C) -+#define FE_TX_BASE_PTR2 (FE_PDMA_OFFSET + 0x40) -+#define FE_TX_MAX_CNT2 (FE_PDMA_OFFSET + 0x44) -+#define FE_TX_CTX_IDX2 (FE_PDMA_OFFSET + 0x48) -+#define FE_TX_DTX_IDX2 (FE_PDMA_OFFSET + 0x4C) -+#define FE_TX_BASE_PTR3 (FE_PDMA_OFFSET + 0x50) -+#define FE_TX_MAX_CNT3 (FE_PDMA_OFFSET + 0x54) -+#define FE_TX_CTX_IDX3 (FE_PDMA_OFFSET + 0x58) -+#define FE_TX_DTX_IDX3 (FE_PDMA_OFFSET + 0x5C) -+#define FE_RX_BASE_PTR1 (FE_PDMA_OFFSET + 0x60) -+#define FE_RX_MAX_CNT1 (FE_PDMA_OFFSET + 0x64) -+#define FE_RX_CALC_IDX1 (FE_PDMA_OFFSET + 0x68) -+#define FE_RX_DRX_IDX1 (FE_PDMA_OFFSET + 0x6C) -+ -+#define RT5350_SDM_CFG (RT5350_SDM_OFFSET + 0x00) //Switch DMA configuration -+#define RT5350_SDM_RRING (RT5350_SDM_OFFSET + 0x04) //Switch DMA Rx Ring -+#define RT5350_SDM_TRING (RT5350_SDM_OFFSET + 0x08) //Switch DMA Tx Ring -+#define RT5350_SDM_MAC_ADRL (RT5350_SDM_OFFSET + 0x0C) //Switch MAC address LSB -+#define RT5350_SDM_MAC_ADRH (RT5350_SDM_OFFSET + 0x10) //Switch MAC Address MSB -+#define RT5350_SDM_TPCNT (RT5350_SDM_OFFSET + 0x100) //Switch DMA Tx packet count -+#define RT5350_SDM_TBCNT (RT5350_SDM_OFFSET + 0x104) //Switch DMA Tx byte count -+#define RT5350_SDM_RPCNT (RT5350_SDM_OFFSET + 0x108) //Switch DMA rx packet count -+#define RT5350_SDM_RBCNT (RT5350_SDM_OFFSET + 0x10C) //Switch DMA rx byte count -+#define RT5350_SDM_CS_ERR (RT5350_SDM_OFFSET + 0x110) //Switch DMA rx checksum error count -+ -+#define RT5350_SDM_ICS_EN BIT(16) -+#define RT5350_SDM_TCS_EN BIT(17) -+#define RT5350_SDM_UCS_EN BIT(18) -+ -+ -+/* MDIO_CFG register bits */ -+#define FE_MDIO_CFG_AUTO_POLL_EN BIT(29) -+#define FE_MDIO_CFG_GP1_BP_EN BIT(16) -+#define FE_MDIO_CFG_GP1_FRC_EN BIT(15) -+#define FE_MDIO_CFG_GP1_SPEED_10 (0 << 13) -+#define FE_MDIO_CFG_GP1_SPEED_100 (1 << 13) -+#define FE_MDIO_CFG_GP1_SPEED_1000 (2 << 13) -+#define FE_MDIO_CFG_GP1_DUPLEX BIT(12) -+#define FE_MDIO_CFG_GP1_FC_TX BIT(11) -+#define FE_MDIO_CFG_GP1_FC_RX BIT(10) -+#define FE_MDIO_CFG_GP1_LNK_DWN BIT(9) -+#define FE_MDIO_CFG_GP1_AN_FAIL BIT(8) -+#define FE_MDIO_CFG_MDC_CLK_DIV_1 (0 << 6) -+#define FE_MDIO_CFG_MDC_CLK_DIV_2 (1 << 6) -+#define FE_MDIO_CFG_MDC_CLK_DIV_4 (2 << 6) -+#define FE_MDIO_CFG_MDC_CLK_DIV_8 (3 << 6) -+#define FE_MDIO_CFG_TURBO_MII_FREQ BIT(5) -+#define FE_MDIO_CFG_TURBO_MII_MODE BIT(4) -+#define FE_MDIO_CFG_RX_CLK_SKEW_0 (0 << 2) -+#define FE_MDIO_CFG_RX_CLK_SKEW_200 (1 << 2) -+#define FE_MDIO_CFG_RX_CLK_SKEW_400 (2 << 2) -+#define FE_MDIO_CFG_RX_CLK_SKEW_INV (3 << 2) -+#define FE_MDIO_CFG_TX_CLK_SKEW_0 0 -+#define FE_MDIO_CFG_TX_CLK_SKEW_200 1 -+#define FE_MDIO_CFG_TX_CLK_SKEW_400 2 -+#define FE_MDIO_CFG_TX_CLK_SKEW_INV 3 -+ -+/* uni-cast port */ -+#define FE_GDM1_ICS_EN BIT(22) -+#define FE_GDM1_TCS_EN BIT(21) -+#define FE_GDM1_UCS_EN BIT(20) -+#define FE_GDM1_JMB_EN BIT(19) -+#define FE_GDM1_STRPCRC BIT(16) -+#define FE_GDM1_UFRC_P_CPU (0 << 12) -+#define FE_GDM1_UFRC_P_GDMA1 (1 << 12) -+#define FE_GDM1_UFRC_P_PPE (6 << 12) -+ -+/* checksums */ -+#define FE_ICS_GEN_EN BIT(2) -+#define FE_UCS_GEN_EN BIT(1) -+#define FE_TCS_GEN_EN BIT(0) -+ -+/* dma ring */ -+#define FE_PST_DRX_IDX0 BIT(16) -+#define FE_PST_DTX_IDX3 BIT(3) -+#define FE_PST_DTX_IDX2 BIT(2) -+#define FE_PST_DTX_IDX1 BIT(1) -+#define FE_PST_DTX_IDX0 BIT(0) -+ -+#define FE_TX_WB_DDONE BIT(6) -+#define FE_RX_DMA_BUSY BIT(3) -+#define FE_TX_DMA_BUSY BIT(1) -+#define FE_RX_DMA_EN BIT(2) -+#define FE_TX_DMA_EN BIT(0) -+ -+#define FE_PDMA_SIZE_4DWORDS (0 << 4) -+#define FE_PDMA_SIZE_8DWORDS (1 << 4) -+#define FE_PDMA_SIZE_16DWORDS (2 << 4) -+ -+#define FE_US_CYC_CNT_MASK 0xff -+#define FE_US_CYC_CNT_SHIFT 0x8 -+#define FE_US_CYC_CNT_DIVISOR 1000000 -+ -+#define RX_DMA_PLEN0(_x) (((_x) >> 16) & 0x3fff) -+#define RX_DMA_LSO BIT(30) -+#define RX_DMA_DONE BIT(31) -+#define RX_DMA_L4VALID BIT(30) -+ -+struct fe_rx_dma { -+ unsigned int rxd1; -+ unsigned int rxd2; -+ unsigned int rxd3; -+ unsigned int rxd4; -+} __packed __aligned(4); -+ -+#define TX_DMA_PLEN0_MASK ((0x3fff) << 16) -+#define TX_DMA_PLEN0(_x) (((_x) & 0x3fff) << 16) -+#define TX_DMA_PLEN1(_x) ((_x) & 0x3fff) -+#define TX_DMA_LS1 BIT(14) -+#define TX_DMA_LSO BIT(30) -+#define TX_DMA_DONE BIT(31) -+#define TX_DMA_QN(_x) ((_x) << 16) -+#define TX_DMA_PN(_x) ((_x) << 24) -+#define TX_DMA_QN_MASK TX_DMA_QN(0x7) -+#define TX_DMA_PN_MASK TX_DMA_PN(0x7) -+#define TX_DMA_CHKSUM (0x7 << 29) -+ -+struct fe_tx_dma { -+ unsigned int txd1; -+ unsigned int txd2; -+ unsigned int txd3; -+ unsigned int txd4; -+} __packed __aligned(4); -+ -+struct fe_priv; -+ -+struct fe_phy { -+ struct phy_device *phy[8]; -+ struct device_node *phy_node[8]; -+ const __be32 *phy_fixed[8]; -+ int duplex[8]; -+ int speed[8]; -+ int tx_fc[8]; -+ int rx_fc[8]; -+ spinlock_t lock; -+ -+ int (*connect)(struct fe_priv *priv); -+ void (*disconnect)(struct fe_priv *priv); -+ void (*start)(struct fe_priv *priv); -+ void (*stop)(struct fe_priv *priv); -+}; -+ -+struct fe_soc_data -+{ -+ unsigned char mac[6]; -+ const u32 *reg_table; -+ -+ void (*init_data)(struct fe_soc_data *data); -+ void (*reset_fe)(void); -+ void (*set_mac)(struct fe_priv *priv, unsigned char *mac); -+ void (*fwd_config)(struct fe_priv *priv); -+ void (*tx_dma)(struct fe_priv *priv, int idx, struct sk_buff *skb); -+ void (*rx_dma)(struct fe_priv *priv, int idx, int len); -+ int (*switch_init)(struct fe_priv *priv); -+ int (*switch_config)(struct fe_priv *priv); -+ void (*port_init)(struct fe_priv *priv, struct device_node *port); -+ int (*has_carrier)(struct fe_priv *priv); -+ int (*mdio_init)(struct fe_priv *priv); -+ void (*mdio_cleanup)(struct fe_priv *priv); -+ int (*mdio_write)(struct mii_bus *bus, int phy_addr, int phy_reg, u16 val); -+ int (*mdio_read)(struct mii_bus *bus, int phy_addr, int phy_reg); -+ void (*mdio_adjust_link)(struct fe_priv *priv, int port); -+ int (*get_skb_header)(struct sk_buff *skb, void **iphdr, void **tcph, u64 *hdr_flags, void *priv); -+ -+ void *swpriv; -+ u32 pdma_glo_cfg; -+ u32 rx_dly_int; -+ u32 tx_dly_int; -+ u32 checksum_bit; -+ u32 tso; -+ -+ int min_pkt_len; -+}; -+ -+struct fe_priv -+{ -+ spinlock_t page_lock; -+ -+ struct fe_soc_data *soc; -+ struct net_device *netdev; -+ struct device *device; -+ unsigned long sysclk; -+ -+ struct fe_rx_dma *rx_dma; -+ struct napi_struct rx_napi; -+ struct sk_buff *rx_skb[NUM_DMA_DESC]; -+ dma_addr_t rx_phys; -+ -+ struct fe_tx_dma *tx_dma; -+ struct tasklet_struct tx_tasklet; -+ struct sk_buff *tx_skb[NUM_DMA_DESC]; -+ dma_addr_t tx_phys; -+ unsigned int tx_free_idx; -+ -+ struct fe_phy *phy; -+ struct mii_bus *mii_bus; -+ int mii_irq[PHY_MAX_ADDR]; -+ -+ int link[8]; -+ -+ struct net_lro_mgr lro_mgr; -+ struct net_lro_desc lro_arr[8]; -+}; -+ -+extern const struct of_device_id of_fe_match[]; -+ -+void fe_w32(u32 val, unsigned reg); -+u32 fe_r32(unsigned reg); -+ -+#endif /* FE_ETH_H */ ---- /dev/null -+++ b/drivers/net/ethernet/ralink/soc_mt7620.c -@@ -0,0 +1,172 @@ -+/* -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; version 2 of the License -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. -+ * -+ * Copyright (C) 2009-2013 John Crispin -+ */ -+ -+#include -+#include -+#include -+ -+#include -+ -+#include -+#include "ralink_soc_eth.h" -+#include "gsw_mt7620a.h" -+ -+#define MT7620A_CDMA_CSG_CFG 0x400 -+#define MT7620_DMA_VID (MT7620A_CDMA_CSG_CFG | 0x30) -+#define MT7620A_DMA_2B_OFFSET BIT(31) -+#define MT7620A_RESET_FE BIT(21) -+#define MT7620A_RESET_ESW BIT(23) -+#define MT7620_L4_VALID BIT(23) -+ -+#define SYSC_REG_RESET_CTRL 0x34 -+#define MAX_RX_LENGTH 1536 -+ -+#define CDMA_ICS_EN BIT(2) -+#define CDMA_UCS_EN BIT(1) -+#define CDMA_TCS_EN BIT(0) -+ -+#define GDMA_ICS_EN BIT(22) -+#define GDMA_TCS_EN BIT(21) -+#define GDMA_UCS_EN BIT(20) -+ -+static const u32 rt5350_reg_table[FE_REG_COUNT] = { -+ [FE_REG_PDMA_GLO_CFG] = RT5350_PDMA_GLO_CFG, -+ [FE_REG_PDMA_RST_CFG] = RT5350_PDMA_RST_CFG, -+ [FE_REG_DLY_INT_CFG] = RT5350_DLY_INT_CFG, -+ [FE_REG_TX_BASE_PTR0] = RT5350_TX_BASE_PTR0, -+ [FE_REG_TX_MAX_CNT0] = RT5350_TX_MAX_CNT0, -+ [FE_REG_TX_CTX_IDX0] = RT5350_TX_CTX_IDX0, -+ [FE_REG_RX_BASE_PTR0] = RT5350_RX_BASE_PTR0, -+ [FE_REG_RX_MAX_CNT0] = RT5350_RX_MAX_CNT0, -+ [FE_REG_RX_CALC_IDX0] = RT5350_RX_CALC_IDX0, -+ [FE_REG_FE_INT_ENABLE] = RT5350_FE_INT_ENABLE, -+ [FE_REG_FE_INT_STATUS] = RT5350_FE_INT_STATUS, -+ [FE_REG_FE_DMA_VID_BASE] = MT7620_DMA_VID, -+}; -+ -+static void mt7620_fe_reset(void) -+{ -+ rt_sysc_w32(MT7620A_RESET_FE | MT7620A_RESET_ESW, SYSC_REG_RESET_CTRL); -+ rt_sysc_w32(0, SYSC_REG_RESET_CTRL); -+} -+ -+static void mt7620_fwd_config(struct fe_priv *priv) -+{ -+ int i; -+ -+ /* frame engine will push VLAN tag regarding to VIDX feild in Tx desc. */ -+ for (i = 0; i < 16; i += 2) -+ fe_w32(((i + 1) << 16) + i, MT7620_DMA_VID + (i * 2)); -+ -+ fe_w32(fe_r32(MT7620A_GDMA1_FWD_CFG) & ~7, MT7620A_GDMA1_FWD_CFG); -+ fe_w32(fe_r32(MT7620A_GDMA1_FWD_CFG) | (GDMA_ICS_EN | GDMA_TCS_EN | GDMA_UCS_EN), MT7620A_GDMA1_FWD_CFG); -+ fe_w32(fe_r32(MT7620A_CDMA_CSG_CFG) | (CDMA_ICS_EN | CDMA_UCS_EN | CDMA_TCS_EN), MT7620A_CDMA_CSG_CFG); -+} -+ -+static void mt7620_tx_dma(struct fe_priv *priv, int idx, struct sk_buff *skb) -+{ -+ unsigned int nr_frags = 0; -+ unsigned int len = 0; -+ -+ if (skb) { -+ nr_frags = skb_shinfo(skb)->nr_frags; -+ len = skb->len - skb->data_len; -+ } -+ -+ if (!skb) -+ priv->tx_dma[idx].txd2 = TX_DMA_LSO | TX_DMA_DONE; -+ else if (!nr_frags) -+ priv->tx_dma[idx].txd2 = TX_DMA_LSO | TX_DMA_PLEN0(len); -+ else -+ priv->tx_dma[idx].txd2 = TX_DMA_PLEN0(len); -+ -+ if(skb && vlan_tx_tag_present(skb)) -+ priv->tx_dma[idx].txd4 = 0x80 | (vlan_tx_tag_get(skb) >> 13) << 4 | (vlan_tx_tag_get(skb) & 0xF); -+ else -+ priv->tx_dma[idx].txd4 = 0; -+} -+ -+static void mt7620_rx_dma(struct fe_priv *priv, int idx, int len) -+{ -+ priv->rx_dma[idx].rxd2 = RX_DMA_PLEN0(len); -+} -+ -+#ifdef CONFIG_INET_LRO -+static int -+mt7620_get_skb_header(struct sk_buff *skb, void **iphdr, void **tcph, -+ u64 *hdr_flags, void *_priv) -+{ -+ struct iphdr *iph = NULL; -+ int vhdr_len = 0; -+ -+ /* -+ * Make sure that this packet is Ethernet II, is not VLAN -+ * tagged, is IPv4, has a valid IP header, and is TCP. -+ */ -+ if (skb->protocol == 0x0081) -+ vhdr_len = VLAN_HLEN; -+ -+ iph = (struct iphdr *)(skb->data + vhdr_len); -+ if(iph->protocol != IPPROTO_TCP) -+ return -1; -+ -+ *iphdr = iph; -+ *tcph = skb->data + (iph->ihl << 2) + vhdr_len; -+ *hdr_flags = LRO_IPV4 | LRO_TCP; -+ -+ return 0; -+} -+#endif -+ -+static void mt7620_init_data(struct fe_soc_data *data) -+{ -+ if (mt7620_get_eco() >= 5) -+ data->tso = 1; -+} -+ -+static struct fe_soc_data mt7620_data = { -+ .mac = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55 }, -+ .init_data = mt7620_init_data, -+ .reset_fe = mt7620_fe_reset, -+ .set_mac = mt7620_set_mac, -+ .fwd_config = mt7620_fwd_config, -+ .tx_dma = mt7620_tx_dma, -+ .rx_dma = mt7620_rx_dma, -+ .switch_init = mt7620_gsw_probe, -+ .switch_config = mt7620_gsw_config, -+ .port_init = mt7620_port_init, -+ .min_pkt_len = 0, -+ .reg_table = rt5350_reg_table, -+ .pdma_glo_cfg = FE_PDMA_SIZE_16DWORDS | MT7620A_DMA_2B_OFFSET, -+ .rx_dly_int = RT5350_RX_DLY_INT, -+ .tx_dly_int = RT5350_TX_DLY_INT, -+ .checksum_bit = MT7620_L4_VALID, -+ .has_carrier = mt7620a_has_carrier, -+ .mdio_read = mt7620_mdio_read, -+ .mdio_write = mt7620_mdio_write, -+ .mdio_adjust_link = mt7620_mdio_link_adjust, -+#ifdef CONFIG_INET_LRO -+ .get_skb_header = mt7620_get_skb_header, -+#endif -+}; -+ -+const struct of_device_id of_fe_match[] = { -+ { .compatible = "ralink,mt7620a-eth", .data = &mt7620_data }, -+ {}, -+}; -+ -+MODULE_DEVICE_TABLE(of, of_fe_match); ---- /dev/null -+++ b/drivers/net/ethernet/ralink/soc_rt2880.c -@@ -0,0 +1,51 @@ -+/* -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; version 2 of the License -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. -+ * -+ * Copyright (C) 2009-2013 John Crispin -+ */ -+ -+#include -+ -+#include -+ -+#include "ralink_soc_eth.h" -+#include "mdio_rt2880.h" -+ -+#define SYSC_REG_RESET_CTRL 0x034 -+#define RT2880_RESET_FE BIT(18) -+ -+void rt2880_fe_reset(void) -+{ -+ rt_sysc_w32(RT2880_RESET_FE, SYSC_REG_RESET_CTRL); -+} -+ -+struct fe_soc_data rt2880_data = { -+ .mac = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55 }, -+ .reset_fe = rt2880_fe_reset, -+ .min_pkt_len = 64, -+ .pdma_glo_cfg = FE_PDMA_SIZE_4DWORDS, -+ .checksum_bit = RX_DMA_L4VALID, -+ .rx_dly_int = FE_RX_DLY_INT, -+ .tx_dly_int = FE_TX_DLY_INT, -+ .mdio_read = rt2880_mdio_read, -+ .mdio_write = rt2880_mdio_write, -+ .mdio_adjust_link = rt2880_mdio_link_adjust, -+}; -+ -+const struct of_device_id of_fe_match[] = { -+ { .compatible = "ralink,rt2880-eth", .data = &rt2880_data }, -+ {}, -+}; -+ -+MODULE_DEVICE_TABLE(of, of_fe_match); ---- /dev/null -+++ b/drivers/net/ethernet/ralink/soc_rt305x.c -@@ -0,0 +1,113 @@ -+/* -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; version 2 of the License -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. -+ * -+ * Copyright (C) 2009-2013 John Crispin -+ */ -+ -+#include -+ -+#include -+ -+#include "ralink_soc_eth.h" -+ -+#define RT305X_RESET_FE BIT(21) -+#define RT305X_RESET_ESW BIT(23) -+#define SYSC_REG_RESET_CTRL 0x034 -+ -+static const u32 rt5350_reg_table[FE_REG_COUNT] = { -+ [FE_REG_PDMA_GLO_CFG] = RT5350_PDMA_GLO_CFG, -+ [FE_REG_PDMA_RST_CFG] = RT5350_PDMA_RST_CFG, -+ [FE_REG_DLY_INT_CFG] = RT5350_DLY_INT_CFG, -+ [FE_REG_TX_BASE_PTR0] = RT5350_TX_BASE_PTR0, -+ [FE_REG_TX_MAX_CNT0] = RT5350_TX_MAX_CNT0, -+ [FE_REG_TX_CTX_IDX0] = RT5350_TX_CTX_IDX0, -+ [FE_REG_RX_BASE_PTR0] = RT5350_RX_BASE_PTR0, -+ [FE_REG_RX_MAX_CNT0] = RT5350_RX_MAX_CNT0, -+ [FE_REG_RX_CALC_IDX0] = RT5350_RX_CALC_IDX0, -+ [FE_REG_FE_INT_ENABLE] = RT5350_FE_INT_ENABLE, -+ [FE_REG_FE_INT_STATUS] = RT5350_FE_INT_STATUS, -+ [FE_REG_FE_DMA_VID_BASE] = 0, -+}; -+ -+static void rt305x_fe_reset(void) -+{ -+ rt_sysc_w32(RT305X_RESET_FE, SYSC_REG_RESET_CTRL); -+ rt_sysc_w32(0, SYSC_REG_RESET_CTRL); -+} -+ -+static void rt5350_set_mac(struct fe_priv *priv, unsigned char *mac) -+{ -+ unsigned long flags; -+ -+ spin_lock_irqsave(&priv->page_lock, flags); -+ fe_w32((mac[0] << 8) | mac[1], RT5350_SDM_MAC_ADRH); -+ fe_w32((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5], -+ RT5350_SDM_MAC_ADRL); -+ spin_unlock_irqrestore(&priv->page_lock, flags); -+} -+ -+static void rt5350_fwd_config(struct fe_priv *priv) -+{ -+ unsigned long sysclk = priv->sysclk; -+ -+ if (sysclk) { -+ sysclk /= FE_US_CYC_CNT_DIVISOR; -+ sysclk <<= FE_US_CYC_CNT_SHIFT; -+ -+ fe_w32((fe_r32(FE_FE_GLO_CFG) & -+ ~(FE_US_CYC_CNT_MASK << FE_US_CYC_CNT_SHIFT)) | sysclk, -+ FE_FE_GLO_CFG); -+ } -+ -+ fe_w32(fe_r32(RT5350_SDM_CFG) & ~0xffff, RT5350_SDM_CFG); -+ fe_w32(fe_r32(RT5350_SDM_CFG) | RT5350_SDM_ICS_EN | RT5350_SDM_TCS_EN | RT5350_SDM_UCS_EN, -+ RT5350_SDM_CFG); -+} -+ -+static void rt5350_fe_reset(void) -+{ -+ rt_sysc_w32(RT305X_RESET_FE | RT305X_RESET_ESW, SYSC_REG_RESET_CTRL); -+ rt_sysc_w32(0, SYSC_REG_RESET_CTRL); -+} -+ -+static struct fe_soc_data rt3050_data = { -+ .mac = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55 }, -+ .reset_fe = rt305x_fe_reset, -+ .min_pkt_len = 64, -+ .pdma_glo_cfg = FE_PDMA_SIZE_4DWORDS, -+ .checksum_bit = RX_DMA_L4VALID, -+ .rx_dly_int = FE_RX_DLY_INT, -+ .tx_dly_int = FE_TX_DLY_INT, -+}; -+ -+static struct fe_soc_data rt5350_data = { -+ .mac = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55 }, -+ .reg_table = rt5350_reg_table, -+ .reset_fe = rt5350_fe_reset, -+ .set_mac = rt5350_set_mac, -+ .fwd_config = rt5350_fwd_config, -+ .min_pkt_len = 64, -+ .pdma_glo_cfg = FE_PDMA_SIZE_4DWORDS, -+ .checksum_bit = RX_DMA_L4VALID, -+ .rx_dly_int = RT5350_RX_DLY_INT, -+ .tx_dly_int = RT5350_TX_DLY_INT, -+}; -+ -+const struct of_device_id of_fe_match[] = { -+ { .compatible = "ralink,rt3050-eth", .data = &rt3050_data }, -+ { .compatible = "ralink,rt5350-eth", .data = &rt5350_data }, -+ {}, -+}; -+ -+MODULE_DEVICE_TABLE(of, of_fe_match); ---- /dev/null -+++ b/drivers/net/ethernet/ralink/soc_rt3883.c -@@ -0,0 +1,60 @@ -+/* -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; version 2 of the License -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. -+ * -+ * Copyright (C) 2009-2013 John Crispin -+ */ -+ -+#include -+ -+#include -+ -+#include "ralink_soc_eth.h" -+#include "mdio_rt2880.h" -+ -+#define RT3883_SYSC_REG_RSTCTRL 0x34 -+#define RT3883_RSTCTRL_FE BIT(21) -+ -+static void rt3883_fe_reset(void) -+{ -+ u32 t; -+ -+ t = rt_sysc_r32(RT3883_SYSC_REG_RSTCTRL); -+ t |= RT3883_RSTCTRL_FE; -+ rt_sysc_w32(t , RT3883_SYSC_REG_RSTCTRL); -+ -+ t &= ~RT3883_RSTCTRL_FE; -+ rt_sysc_w32(t, RT3883_SYSC_REG_RSTCTRL); -+} -+ -+static struct fe_soc_data rt3883_data = { -+ .mac = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55 }, -+ .reset_fe = rt3883_fe_reset, -+ .min_pkt_len = 64, -+ .pdma_glo_cfg = FE_PDMA_SIZE_4DWORDS, -+ .rx_dly_int = FE_RX_DLY_INT, -+ .tx_dly_int = FE_TX_DLY_INT, -+ .checksum_bit = RX_DMA_L4VALID, -+ .mdio_read = rt2880_mdio_read, -+ .mdio_write = rt2880_mdio_write, -+ .mdio_adjust_link = rt2880_mdio_link_adjust, -+ .port_init = rt2880_port_init, -+}; -+ -+const struct of_device_id of_fe_match[] = { -+ { .compatible = "ralink,rt3883-eth", .data = &rt3883_data }, -+ {}, -+}; -+ -+MODULE_DEVICE_TABLE(of, of_fe_match); -+ ---- /dev/null -+++ b/drivers/net/ethernet/ralink/mt7530.c -@@ -0,0 +1,467 @@ -+/* -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * as published by the Free Software Foundation; either version 2 -+ * of the License, or (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * Copyright (C) 2013 John Crispin -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "mt7530.h" -+ -+#define MT7530_CPU_PORT 6 -+#define MT7530_NUM_PORTS 7 -+#define MT7530_NUM_VLANS 16 -+#define MT7530_NUM_VIDS 16 -+ -+#define REG_ESW_VLAN_VTCR 0x90 -+#define REG_ESW_VLAN_VAWD1 0x94 -+#define REG_ESW_VLAN_VAWD2 0x98 -+ -+enum { -+ /* Global attributes. */ -+ MT7530_ATTR_ENABLE_VLAN, -+}; -+ -+struct mt7530_port { -+ u16 pvid; -+}; -+ -+struct mt7530_vlan { -+ u8 ports; -+}; -+ -+struct mt7530_priv { -+ void __iomem *base; -+ struct mii_bus *bus; -+ struct switch_dev swdev; -+ -+ bool global_vlan_enable; -+ struct mt7530_vlan vlans[MT7530_NUM_VLANS]; -+ struct mt7530_port ports[MT7530_NUM_PORTS]; -+}; -+ -+struct mt7530_mapping { -+ char *name; -+ u8 pvids[6]; -+ u8 vlans[8]; -+} mt7530_defaults[] = { -+ { -+ .name = "llllw", -+ .pvids = { 1, 1, 1, 1, 2, 1 }, -+ .vlans = { 0, 0x6f, 0x50 }, -+ }, { -+ .name = "wllll", -+ .pvids = { 2, 1, 1, 1, 1, 1 }, -+ .vlans = { 0, 0x7e, 0x41 }, -+ }, -+}; -+ -+struct mt7530_mapping* -+mt7530_find_mapping(struct device_node *np) -+{ -+ const char *map; -+ int i; -+ -+ if (of_property_read_string(np, "ralink,port-map", &map)) -+ return NULL; -+ -+ for (i = 0; i < ARRAY_SIZE(mt7530_defaults); i++) -+ if (!strcmp(map, mt7530_defaults[i].name)) -+ return &mt7530_defaults[i]; -+ -+ return NULL; -+} -+ -+static void -+mt7530_apply_mapping(struct mt7530_priv *mt7530, struct mt7530_mapping *map) -+{ -+ int i = 0; -+ -+ mt7530->global_vlan_enable = 1; -+ -+ for (i = 0; i < 6; i++) -+ mt7530->ports[i].pvid = map->pvids[i]; -+ for (i = 0; i < 8; i++) -+ mt7530->vlans[i].ports = map->vlans[i]; -+} -+ -+static int -+mt7530_reset_switch(struct switch_dev *dev) -+{ -+ struct mt7530_priv *priv = container_of(dev, struct mt7530_priv, swdev); -+ -+ memset(priv->ports, 0, sizeof(priv->ports)); -+ memset(priv->vlans, 0, sizeof(priv->vlans)); -+ -+ return 0; -+} -+ -+static int -+mt7530_get_vlan_enable(struct switch_dev *dev, -+ const struct switch_attr *attr, -+ struct switch_val *val) -+{ -+ struct mt7530_priv *priv = container_of(dev, struct mt7530_priv, swdev); -+ -+ val->value.i = priv->global_vlan_enable; -+ -+ return 0; -+} -+ -+static int -+mt7530_set_vlan_enable(struct switch_dev *dev, -+ const struct switch_attr *attr, -+ struct switch_val *val) -+{ -+ struct mt7530_priv *priv = container_of(dev, struct mt7530_priv, swdev); -+ -+ priv->global_vlan_enable = val->value.i != 0; -+ -+ return 0; -+} -+ -+static u32 -+mt7530_r32(struct mt7530_priv *priv, u32 reg) -+{ -+ if (priv->bus) { -+ u16 high, low; -+ -+ mdiobus_write(priv->bus, 0x1f, 0x1f, (reg >> 6) & 0x3ff); -+ low = mdiobus_read(priv->bus, 0x1f, (reg >> 2) & 0xf); -+ high = mdiobus_read(priv->bus, 0x1f, 0x10); -+ -+ return (high << 16) | (low & 0xffff); -+ } -+ -+ return ioread32(priv->base + reg); -+} -+ -+static void -+mt7530_w32(struct mt7530_priv *priv, u32 reg, u32 val) -+{ -+ if (priv->bus) { -+ mdiobus_write(priv->bus, 0x1f, 0x1f, (reg >> 6) & 0x3ff); -+ mdiobus_write(priv->bus, 0x1f, (reg >> 2) & 0xf, val & 0xffff); -+ mdiobus_write(priv->bus, 0x1f, 0x10, val >> 16); -+ return; -+ } -+ -+ iowrite32(val, priv->base + reg); -+} -+ -+static void -+mt7530_vtcr(struct mt7530_priv *priv, u32 cmd, u32 val) -+{ -+ int i; -+ -+ mt7530_w32(priv, REG_ESW_VLAN_VTCR, BIT(31) | (cmd << 12) | val); -+ -+ for (i = 0; i < 20; i++) { -+ u32 val = mt7530_r32(priv, REG_ESW_VLAN_VTCR); -+ -+ if ((val & BIT(31)) == 0) -+ break; -+ -+ udelay(1000); -+ } -+ if (i == 20) -+ printk("mt7530: vtcr timeout\n"); -+} -+ -+static int -+mt7530_get_port_pvid(struct switch_dev *dev, int port, int *val) -+{ -+ struct mt7530_priv *priv = container_of(dev, struct mt7530_priv, swdev); -+ -+ if (port >= MT7530_NUM_PORTS) -+ return -EINVAL; -+ -+ *val = mt7530_r32(priv, 0x2014 + (0x100 * port)); -+ *val &= 0xff; -+ -+ return 0; -+} -+ -+static int -+mt7530_set_port_pvid(struct switch_dev *dev, int port, int pvid) -+{ -+ struct mt7530_priv *priv = container_of(dev, struct mt7530_priv, swdev); -+ -+ if (port >= MT7530_NUM_PORTS) -+ return -1; -+ -+ priv->ports[port].pvid = pvid; -+ -+ return 0; -+} -+ -+static int -+mt7530_get_vlan_ports(struct switch_dev *dev, struct switch_val *val) -+{ -+ struct mt7530_priv *priv = container_of(dev, struct mt7530_priv, swdev); -+ u32 member; -+ int i; -+ -+ val->len = 0; -+ -+ if (val->port_vlan < 0 || val->port_vlan >= MT7530_NUM_VIDS) -+ return -EINVAL; -+ -+ mt7530_vtcr(priv, 0, val->port_vlan); -+ member = mt7530_r32(priv, REG_ESW_VLAN_VAWD1); -+ member >>= 16; -+ member &= 0xff; -+ -+ for (i = 0; i < MT7530_NUM_PORTS; i++) { -+ struct switch_port *p; -+ if (!(member & BIT(i))) -+ continue; -+ -+ p = &val->value.ports[val->len++]; -+ p->id = i; -+ p->flags = 0; -+ } -+ -+ return 0; -+} -+ -+static int -+mt7530_set_vlan_ports(struct switch_dev *dev, struct switch_val *val) -+{ -+ struct mt7530_priv *priv = container_of(dev, struct mt7530_priv, swdev); -+ int ports = 0; -+ int i; -+ -+ if (val->port_vlan < 0 || val->port_vlan >= MT7530_NUM_VIDS || -+ val->len > MT7530_NUM_PORTS) -+ return -EINVAL; -+ -+ for (i = 0; i < val->len; i++) { -+ struct switch_port *p = &val->value.ports[i]; -+ -+ if (p->id >= MT7530_NUM_PORTS) -+ return -EINVAL; -+ -+ ports |= BIT(p->id); -+ } -+ priv->vlans[val->port_vlan].ports = ports; -+ -+ return 0; -+} -+ -+static int -+mt7530_apply_config(struct switch_dev *dev) -+{ -+ struct mt7530_priv *priv = container_of(dev, struct mt7530_priv, swdev); -+ int i; -+ -+ if (!priv->global_vlan_enable) { -+ mt7530_w32(priv, 0x2004, 0xff000); -+ mt7530_w32(priv, 0x2104, 0xff000); -+ mt7530_w32(priv, 0x2204, 0xff000); -+ mt7530_w32(priv, 0x2304, 0xff000); -+ mt7530_w32(priv, 0x2404, 0xff000); -+ mt7530_w32(priv, 0x2504, 0xff000); -+ mt7530_w32(priv, 0x2604, 0xff000); -+ mt7530_w32(priv, 0x2010, 0x810000c); -+ mt7530_w32(priv, 0x2110, 0x810000c); -+ mt7530_w32(priv, 0x2210, 0x810000c); -+ mt7530_w32(priv, 0x2310, 0x810000c); -+ mt7530_w32(priv, 0x2410, 0x810000c); -+ mt7530_w32(priv, 0x2510, 0x810000c); -+ mt7530_w32(priv, 0x2610, 0x810000c); -+ return 0; -+ } -+ -+ // LAN/WAN ports as security mode -+ mt7530_w32(priv, 0x2004, 0xff0003); -+ mt7530_w32(priv, 0x2104, 0xff0003); -+ mt7530_w32(priv, 0x2204, 0xff0003); -+ mt7530_w32(priv, 0x2304, 0xff0003); -+ mt7530_w32(priv, 0x2404, 0xff0003); -+ mt7530_w32(priv, 0x2504, 0xff0003); -+ // LAN/WAN ports as transparent port -+ mt7530_w32(priv, 0x2010, 0x810000c0); -+ mt7530_w32(priv, 0x2110, 0x810000c0); -+ mt7530_w32(priv, 0x2210, 0x810000c0); -+ mt7530_w32(priv, 0x2310, 0x810000c0); -+ mt7530_w32(priv, 0x2410, 0x810000c0); -+ mt7530_w32(priv, 0x2510, 0x810000c0); -+ -+ // set CPU/P7 port as user port -+ mt7530_w32(priv, 0x2610, 0x81000000); -+ mt7530_w32(priv, 0x2710, 0x81000000); -+ -+ mt7530_w32(priv, 0x2604, 0x20ff0003); -+ mt7530_w32(priv, 0x2704, 0x20ff0003); -+ mt7530_w32(priv, 0x2610, 0x81000000); -+ -+ for (i = 0; i < MT7530_NUM_VLANS; i++) { -+ u8 ports = priv->vlans[i].ports; -+ u32 val = mt7530_r32(priv, 0x100 + 4 * (i / 2)); -+ -+ if (i % 2 == 0) { -+ val &= 0xfff000; -+ val |= i; -+ } else { -+ val &= 0xfff; -+ val |= (i << 12); -+ } -+ mt7530_w32(priv, 0x100 + 4 * (i / 2), val); -+ -+ if (ports) -+ mt7530_w32(priv, REG_ESW_VLAN_VAWD1, BIT(30) | (ports << 16) | BIT(0)); -+ else -+ mt7530_w32(priv, REG_ESW_VLAN_VAWD1, 0); -+ -+ mt7530_vtcr(priv, 1, i); -+ } -+ -+ for (i = 0; i < MT7530_NUM_PORTS; i++) -+ mt7530_w32(priv, 0x2014 + (0x100 * i), 0x10000 | priv->ports[i].pvid); -+ -+ return 0; -+} -+ -+static int -+mt7530_get_port_link(struct switch_dev *dev, int port, -+ struct switch_port_link *link) -+{ -+ struct mt7530_priv *priv = container_of(dev, struct mt7530_priv, swdev); -+ u32 speed, pmsr; -+ -+ if (port < 0 || port >= MT7530_NUM_PORTS) -+ return -EINVAL; -+ -+ pmsr = mt7530_r32(priv, 0x3008 + (0x100 * port)); -+ -+ link->link = pmsr & 1; -+ link->duplex = (pmsr >> 1) & 1; -+ speed = (pmsr >> 2) & 3; -+ -+ switch (speed) { -+ case 0: -+ link->speed = SWITCH_PORT_SPEED_10; -+ break; -+ case 1: -+ link->speed = SWITCH_PORT_SPEED_100; -+ break; -+ case 2: -+ case 3: /* forced gige speed can be 2 or 3 */ -+ link->speed = SWITCH_PORT_SPEED_1000; -+ break; -+ default: -+ link->speed = SWITCH_PORT_SPEED_UNKNOWN; -+ break; -+ } -+ -+ return 0; -+} -+ -+static const struct switch_attr mt7530_global[] = { -+ { -+ .type = SWITCH_TYPE_INT, -+ .name = "enable_vlan", -+ .description = "VLAN mode (1:enabled)", -+ .max = 1, -+ .id = MT7530_ATTR_ENABLE_VLAN, -+ .get = mt7530_get_vlan_enable, -+ .set = mt7530_set_vlan_enable, -+ }, -+}; -+ -+static const struct switch_attr mt7530_port[] = { -+}; -+ -+static const struct switch_attr mt7530_vlan[] = { -+}; -+ -+static const struct switch_dev_ops mt7530_ops = { -+ .attr_global = { -+ .attr = mt7530_global, -+ .n_attr = ARRAY_SIZE(mt7530_global), -+ }, -+ .attr_port = { -+ .attr = mt7530_port, -+ .n_attr = ARRAY_SIZE(mt7530_port), -+ }, -+ .attr_vlan = { -+ .attr = mt7530_vlan, -+ .n_attr = ARRAY_SIZE(mt7530_vlan), -+ }, -+ .get_vlan_ports = mt7530_get_vlan_ports, -+ .set_vlan_ports = mt7530_set_vlan_ports, -+ .get_port_pvid = mt7530_get_port_pvid, -+ .set_port_pvid = mt7530_set_port_pvid, -+ .get_port_link = mt7530_get_port_link, -+ .apply_config = mt7530_apply_config, -+ .reset_switch = mt7530_reset_switch, -+}; -+ -+int -+mt7530_probe(struct device *dev, void __iomem *base, struct mii_bus *bus) -+{ -+ struct switch_dev *swdev; -+ struct mt7530_priv *mt7530; -+ struct mt7530_mapping *map; -+ int ret; -+ -+ if (bus && bus->phy_map[0x1f]->phy_id != 0x1beef) -+ return 0; -+ -+ mt7530 = devm_kzalloc(dev, sizeof(struct mt7530_priv), GFP_KERNEL); -+ if (!mt7530) -+ return -ENOMEM; -+ -+ mt7530->base = base; -+ mt7530->bus = bus; -+ mt7530->global_vlan_enable = 1; -+ -+ swdev = &mt7530->swdev; -+ swdev->name = "mt7530"; -+ swdev->alias = "mt7530"; -+ swdev->cpu_port = MT7530_CPU_PORT; -+ swdev->ports = MT7530_NUM_PORTS; -+ swdev->vlans = MT7530_NUM_VLANS; -+ swdev->ops = &mt7530_ops; -+ -+ ret = register_switch(swdev, NULL); -+ if (ret) { -+ dev_err(dev, "failed to register mt7530\n"); -+ return ret; -+ } -+ -+ dev_info(dev, "loaded mt7530 driver\n"); -+ -+ map = mt7530_find_mapping(dev->of_node); -+ if (map) -+ mt7530_apply_mapping(mt7530, map); -+ mt7530_apply_config(swdev); -+ -+ return 0; -+} ---- /dev/null -+++ b/drivers/net/ethernet/ralink/mt7530.h -@@ -0,0 +1,20 @@ -+/* -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * as published by the Free Software Foundation; either version 2 -+ * of the License, or (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * Copyright (C) 2013 John Crispin -+ */ -+ -+#ifndef _MT7530_H__ -+#define _MT7530_H__ -+ -+int mt7530_probe(struct device *dev, void __iomem *base, struct mii_bus *bus); -+ -+#endif diff --git a/target/linux/ramips/patches-3.10/0112-USB-phy-add-ralink-SoC-driver.patch b/target/linux/ramips/patches-3.10/0112-USB-phy-add-ralink-SoC-driver.patch deleted file mode 100644 index ddfc856687..0000000000 --- a/target/linux/ramips/patches-3.10/0112-USB-phy-add-ralink-SoC-driver.patch +++ /dev/null @@ -1,229 +0,0 @@ -From c5f51197b13fd312324ac0486a46e530e163eade Mon Sep 17 00:00:00 2001 -From: John Crispin -Date: Sun, 14 Jul 2013 23:31:19 +0200 -Subject: [PATCH 18/33] USB: phy: add ralink SoC driver - -Signed-off-by: John Crispin ---- - drivers/usb/phy/Kconfig | 8 ++ - drivers/usb/phy/Makefile | 1 + - drivers/usb/phy/ralink-phy.c | 191 ++++++++++++++++++++++++++++++++++++++++++ - 3 files changed, 200 insertions(+) - create mode 100644 drivers/usb/phy/ralink-phy.c - ---- a/drivers/usb/phy/Kconfig -+++ b/drivers/usb/phy/Kconfig -@@ -210,4 +210,12 @@ config USB_ULPI_VIEWPORT - Provides read/write operations to the ULPI phy register set for - controllers with a viewport register (e.g. Chipidea/ARC controllers). - -+config RALINK_USBPHY -+ bool "Ralink USB PHY controller Driver" -+ depends on MIPS && RALINK -+ select USB_OTG_UTILS -+ help -+ Enable this to support ralink USB phy controller for ralink -+ SoCs. -+ - endif # USB_PHY ---- a/drivers/usb/phy/Makefile -+++ b/drivers/usb/phy/Makefile -@@ -31,3 +31,4 @@ obj-$(CONFIG_USB_MXS_PHY) += phy-mxs-us - obj-$(CONFIG_USB_RCAR_PHY) += phy-rcar-usb.o - obj-$(CONFIG_USB_ULPI) += phy-ulpi.o - obj-$(CONFIG_USB_ULPI_VIEWPORT) += phy-ulpi-viewport.o -+obj-$(CONFIG_RALINK_USBPHY) += ralink-phy.o ---- /dev/null -+++ b/drivers/usb/phy/ralink-phy.c -@@ -0,0 +1,191 @@ -+/* -+ * Copyright (C) 2013 John Crispin -+ * -+ * based on: Renesas R-Car USB phy driver -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+ -+#define RT_SYSC_REG_SYSCFG1 0x014 -+#define RT_SYSC_REG_CLKCFG1 0x030 -+#define RT_SYSC_REG_USB_PHY_CFG 0x05c -+ -+#define RT_RSTCTRL_UDEV BIT(25) -+#define RT_RSTCTRL_UHST BIT(22) -+#define RT_SYSCFG1_USB0_HOST_MODE BIT(10) -+ -+#define MT7620_CLKCFG1_UPHY0_CLK_EN BIT(25) -+#define RT_CLKCFG1_UPHY1_CLK_EN BIT(20) -+#define RT_CLKCFG1_UPHY0_CLK_EN BIT(18) -+ -+#define USB_PHY_UTMI_8B60M BIT(1) -+#define UDEV_WAKEUP BIT(0) -+ -+static atomic_t usb_pwr_ref = ATOMIC_INIT(0); -+static struct reset_control *rstdev; -+static struct reset_control *rsthost; -+static u32 phy_clk; -+ -+static void usb_phy_enable(int state) -+{ -+ if (state) -+ rt_sysc_m32(0, phy_clk, RT_SYSC_REG_CLKCFG1); -+ else -+ rt_sysc_m32(phy_clk, 0, RT_SYSC_REG_CLKCFG1); -+ mdelay(100); -+} -+ -+static int usb_power_on(struct usb_phy *phy) -+{ -+ if (atomic_inc_return(&usb_pwr_ref) == 1) { -+ u32 t; -+ -+ usb_phy_enable(1); -+ -+// reset_control_assert(rstdev); -+// reset_control_assert(rsthost); -+ -+ if (OTG_STATE_B_HOST) { -+ rt_sysc_m32(0, RT_SYSCFG1_USB0_HOST_MODE, RT_SYSC_REG_SYSCFG1); -+ reset_control_deassert(rsthost); -+ } else { -+ rt_sysc_m32(RT_SYSCFG1_USB0_HOST_MODE, 0, RT_SYSC_REG_SYSCFG1); -+ reset_control_deassert(rstdev); -+ } -+ mdelay(100); -+ -+ t = rt_sysc_r32(RT_SYSC_REG_USB_PHY_CFG); -+ dev_info(phy->dev, "remote usb device wakeup %s\n", -+ (t & UDEV_WAKEUP) ? ("enabbled") : ("disabled")); -+ if (t & USB_PHY_UTMI_8B60M) -+ dev_info(phy->dev, "UTMI 8bit 60MHz\n"); -+ else -+ dev_info(phy->dev, "UTMI 16bit 30MHz\n"); -+ } -+ -+ return 0; -+} -+ -+static void usb_power_off(struct usb_phy *phy) -+{ -+ if (atomic_dec_return(&usb_pwr_ref) == 0) { -+ usb_phy_enable(0); -+ reset_control_assert(rstdev); -+ reset_control_assert(rsthost); -+ } -+} -+ -+static int usb_set_host(struct usb_otg *otg, struct usb_bus *host) -+{ -+ otg->gadget = NULL; -+ otg->host = host; -+ -+ return 0; -+} -+ -+static int usb_set_peripheral(struct usb_otg *otg, -+ struct usb_gadget *gadget) -+{ -+ otg->host = NULL; -+ otg->gadget = gadget; -+ -+ return 0; -+} -+ -+static const struct of_device_id ralink_usbphy_dt_match[] = { -+ { .compatible = "ralink,rt3xxx-usbphy", .data = (void *) (RT_CLKCFG1_UPHY1_CLK_EN | RT_CLKCFG1_UPHY0_CLK_EN) }, -+ { .compatible = "ralink,mt7620a-usbphy", .data = (void *) MT7620_CLKCFG1_UPHY0_CLK_EN }, -+ {}, -+}; -+MODULE_DEVICE_TABLE(of, ralink_usbphy_dt_match); -+ -+static int usb_phy_probe(struct platform_device *pdev) -+{ -+ const struct of_device_id *match; -+ struct device *dev = &pdev->dev; -+ struct usb_otg *otg; -+ struct usb_phy *phy; -+ int ret; -+ -+ match = of_match_device(ralink_usbphy_dt_match, &pdev->dev); -+ phy_clk = (int) match->data; -+ -+ rsthost = devm_reset_control_get(&pdev->dev, "host"); -+ if (IS_ERR(rsthost)) -+ return PTR_ERR(rsthost); -+ -+ rstdev = devm_reset_control_get(&pdev->dev, "device"); -+ if (IS_ERR(rstdev)) -+ return PTR_ERR(rstdev); -+ -+ phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL); -+ if (!phy) { -+ dev_err(&pdev->dev, "unable to allocate memory for USB PHY\n"); -+ return -ENOMEM; -+ } -+ -+ otg = devm_kzalloc(&pdev->dev, sizeof(*otg), GFP_KERNEL); -+ if (!otg) { -+ dev_err(&pdev->dev, "unable to allocate memory for USB OTG\n"); -+ return -ENOMEM; -+ } -+ -+ phy->dev = dev; -+ phy->label = dev_name(dev); -+ phy->init = usb_power_on; -+ phy->shutdown = usb_power_off; -+ otg->set_host = usb_set_host; -+ otg->set_peripheral = usb_set_peripheral; -+ otg->phy = phy; -+ phy->otg = otg; -+ ret = usb_add_phy(phy, USB_PHY_TYPE_USB2); -+ -+ if (ret < 0) { -+ dev_err(dev, "usb phy addition error\n"); -+ return ret; -+ } -+ -+ platform_set_drvdata(pdev, phy); -+ -+ dev_info(&pdev->dev, "loaded\n"); -+ -+ return ret; -+} -+ -+static int usb_phy_remove(struct platform_device *pdev) -+{ -+ struct usb_phy *phy = platform_get_drvdata(pdev); -+ -+ usb_remove_phy(phy); -+ -+ return 0; -+} -+ -+static struct platform_driver usb_phy_driver = { -+ .driver = { -+ .owner = THIS_MODULE, -+ .name = "rt3xxx-usbphy", -+ .of_match_table = of_match_ptr(ralink_usbphy_dt_match), -+ }, -+ .probe = usb_phy_probe, -+ .remove = usb_phy_remove, -+}; -+ -+module_platform_driver(usb_phy_driver); -+ -+MODULE_LICENSE("GPL v2"); -+MODULE_DESCRIPTION("Ralink USB phy"); -+MODULE_AUTHOR("John Crispin "); diff --git a/target/linux/ramips/patches-3.10/0112-asoc-add-mt7620-support.patch b/target/linux/ramips/patches-3.10/0112-asoc-add-mt7620-support.patch new file mode 100644 index 0000000000..469d2e404f --- /dev/null +++ b/target/linux/ramips/patches-3.10/0112-asoc-add-mt7620-support.patch @@ -0,0 +1,711 @@ +From d4398d880eba386cb85d0a1a2ba39a336876dc0a Mon Sep 17 00:00:00 2001 +From: John Crispin +Date: Tue, 3 Dec 2013 20:18:13 +0100 +Subject: [PATCH 112/133] asoc: add mt7620 support + +Signed-off-by: John Crispin +--- + arch/mips/ralink/of.c | 2 + + sound/soc/Kconfig | 1 + + sound/soc/Makefile | 1 + + sound/soc/ralink/Kconfig | 15 ++ + sound/soc/ralink/Makefile | 11 + + sound/soc/ralink/mt7620-i2s.c | 466 ++++++++++++++++++++++++++++++++++++++ + sound/soc/ralink/mt7620-wm8960.c | 125 ++++++++++ + sound/soc/soc-io.c | 10 - + 8 files changed, 621 insertions(+), 10 deletions(-) + create mode 100644 sound/soc/ralink/Kconfig + create mode 100644 sound/soc/ralink/Makefile + create mode 100644 sound/soc/ralink/mt7620-i2s.c + create mode 100644 sound/soc/ralink/mt7620-wm8960.c + +--- a/arch/mips/ralink/of.c ++++ b/arch/mips/ralink/of.c +@@ -15,6 +15,7 @@ + #include + #include + #include ++#include + #include + #include + +@@ -25,6 +26,7 @@ + #include "common.h" + + __iomem void *rt_sysc_membase; ++EXPORT_SYMBOL(rt_sysc_membase); + __iomem void *rt_memc_membase; + + extern struct boot_param_header __dtb_start; +--- a/sound/soc/Kconfig ++++ b/sound/soc/Kconfig +@@ -48,6 +48,7 @@ source "sound/soc/kirkwood/Kconfig" + source "sound/soc/mid-x86/Kconfig" + source "sound/soc/mxs/Kconfig" + source "sound/soc/pxa/Kconfig" ++source "sound/soc/ralink/Kconfig" + source "sound/soc/samsung/Kconfig" + source "sound/soc/s6000/Kconfig" + source "sound/soc/sh/Kconfig" +--- a/sound/soc/Makefile ++++ b/sound/soc/Makefile +@@ -26,6 +26,7 @@ obj-$(CONFIG_SND_SOC) += nuc900/ + obj-$(CONFIG_SND_SOC) += omap/ + obj-$(CONFIG_SND_SOC) += kirkwood/ + obj-$(CONFIG_SND_SOC) += pxa/ ++obj-$(CONFIG_SND_SOC) += ralink/ + obj-$(CONFIG_SND_SOC) += samsung/ + obj-$(CONFIG_SND_SOC) += s6000/ + obj-$(CONFIG_SND_SOC) += sh/ +--- /dev/null ++++ b/sound/soc/ralink/Kconfig +@@ -0,0 +1,15 @@ ++config SND_MT7620_SOC_I2S ++ depends on SOC_MT7620 && SND_SOC ++ select SND_SOC_GENERIC_DMAENGINE_PCM ++ tristate "SoC Audio (I2S protocol) for Ralink MT7620 SoC" ++ help ++ Say Y if you want to use I2S protocol and I2S codec on Ingenic MT7620 ++ based boards. ++ ++config SND_MT7620_SOC_WM8960 ++ tristate "SoC Audio support for Ralink WM8960" ++ select SND_MT7620_SOC_I2S ++ select SND_SOC_WM8960 ++ help ++ Say Y if you want to add support for ASoC audio on the Qi LB60 board ++ a.k.a Qi Ben NanoNote. +--- /dev/null ++++ b/sound/soc/ralink/Makefile +@@ -0,0 +1,11 @@ ++# ++# Jz4740 Platform Support ++# ++snd-soc-mt7620-i2s-objs := mt7620-i2s.o ++ ++obj-$(CONFIG_SND_MT7620_SOC_I2S) += snd-soc-mt7620-i2s.o ++ ++# Jz4740 Machine Support ++snd-soc-mt7620-wm8960-objs := mt7620-wm8960.o ++ ++obj-$(CONFIG_SND_MT7620_SOC_WM8960) += snd-soc-mt7620-wm8960.o +--- /dev/null ++++ b/sound/soc/ralink/mt7620-i2s.c +@@ -0,0 +1,466 @@ ++/* ++ * Copyright (C) 2010, Lars-Peter Clausen ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2 of the License, or (at your ++ * option) any later version. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, write to the Free Software Foundation, Inc., ++ * 675 Mass Ave, Cambridge, MA 02139, USA. ++ * ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++ ++#include ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++ ++#define I2S_REG_CFG0 0x00 ++#define I2S_REG_CFG0_EN BIT(31) ++#define I2S_REG_CFG0_DMA_EN BIT(30) ++#define I2S_REG_CFG0_BYTE_SWAP BIT(28) ++#define I2S_REG_CFG0_TX_EN BIT(24) ++#define I2S_REG_CFG0_RX_EN BIT(20) ++#define I2S_REG_CFG0_SLAVE BIT(16) ++#define I2S_REG_CFG0_RX_THRES 12 ++#define I2S_REG_CFG0_TX_THRES 4 ++#define I2S_REG_CFG0_DFT_THRES (4 << I2S_REG_CFG0_RX_THRES) | \ ++ (4 << I2S_REG_CFG0_TX_THRES) ++ ++#define I2S_REG_INT_STATUS 0x04 ++#define I2S_REG_INT_EN 0x08 ++#define I2S_REG_FF_STATUS 0x0c ++#define I2S_REG_WREG 0x10 ++#define I2S_REG_RREG 0x14 ++#define I2S_REG_CFG1 0x18 ++ ++#define I2S_REG_DIVCMP 0x20 ++#define I2S_REG_DIVINT 0x24 ++#define I2S_REG_CLK_EN BIT(31) ++ ++struct mt7620_i2s { ++ struct resource *mem; ++ void __iomem *base; ++ dma_addr_t phys_base; ++ ++ struct snd_dmaengine_dai_dma_data playback_dma_data; ++ struct snd_dmaengine_dai_dma_data capture_dma_data; ++}; ++ ++static inline uint32_t mt7620_i2s_read(const struct mt7620_i2s *i2s, ++ unsigned int reg) ++{ ++ return readl(i2s->base + reg); ++} ++ ++static inline void mt7620_i2s_write(const struct mt7620_i2s *i2s, ++ unsigned int reg, uint32_t value) ++{ ++ //printk("i2s --> %p = 0x%08X\n", i2s->base + reg, value); ++ writel(value, i2s->base + reg); ++} ++ ++static int mt7620_i2s_startup(struct snd_pcm_substream *substream, ++ struct snd_soc_dai *dai) ++{ ++ struct mt7620_i2s *i2s = snd_soc_dai_get_drvdata(dai); ++ uint32_t cfg; ++ ++ if (dai->active) ++ return 0; ++ ++ cfg = mt7620_i2s_read(i2s, I2S_REG_CFG0); ++ cfg |= I2S_REG_CFG0_EN; ++ mt7620_i2s_write(i2s, I2S_REG_CFG0, cfg); ++ ++ return 0; ++} ++ ++static void mt7620_i2s_shutdown(struct snd_pcm_substream *substream, ++ struct snd_soc_dai *dai) ++{ ++ struct mt7620_i2s *i2s = snd_soc_dai_get_drvdata(dai); ++ uint32_t cfg; ++ ++ if (dai->active) ++ return; ++ ++ cfg = mt7620_i2s_read(i2s, I2S_REG_CFG0); ++ cfg &= ~I2S_REG_CFG0_EN; ++ mt7620_i2s_write(i2s, I2S_REG_CFG0, cfg); ++} ++ ++static int mt7620_i2s_trigger(struct snd_pcm_substream *substream, int cmd, ++ struct snd_soc_dai *dai) ++{ ++ struct mt7620_i2s *i2s = snd_soc_dai_get_drvdata(dai); ++ ++ uint32_t cfg; ++ uint32_t mask; ++ ++ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ++ mask = I2S_REG_CFG0_TX_EN; ++ else ++ mask = I2S_REG_CFG0_RX_EN; ++ ++ cfg = mt7620_i2s_read(i2s, I2S_REG_CFG0); ++ ++ switch (cmd) { ++ case SNDRV_PCM_TRIGGER_START: ++ case SNDRV_PCM_TRIGGER_RESUME: ++ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: ++ cfg |= mask; ++ break; ++ case SNDRV_PCM_TRIGGER_STOP: ++ case SNDRV_PCM_TRIGGER_SUSPEND: ++ case SNDRV_PCM_TRIGGER_PAUSE_PUSH: ++ cfg &= ~mask; ++ break; ++ default: ++ return -EINVAL; ++ } ++ ++ if (cfg & (I2S_REG_CFG0_TX_EN | I2S_REG_CFG0_RX_EN)) ++ cfg |= I2S_REG_CFG0_DMA_EN; ++ else ++ cfg &= ~I2S_REG_CFG0_DMA_EN; ++ ++ mt7620_i2s_write(i2s, I2S_REG_CFG0, cfg); ++ ++ return 0; ++} ++ ++static int mt7620_i2s_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) ++{ ++ struct mt7620_i2s *i2s = snd_soc_dai_get_drvdata(dai); ++ uint32_t cfg; ++ ++ cfg = mt7620_i2s_read(i2s, I2S_REG_CFG0); ++ ++ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { ++ case SND_SOC_DAIFMT_CBS_CFS: ++ cfg |= I2S_REG_CFG0_SLAVE; ++ break; ++ case SND_SOC_DAIFMT_CBM_CFM: ++ cfg &= ~I2S_REG_CFG0_SLAVE; ++ break; ++ case SND_SOC_DAIFMT_CBM_CFS: ++ default: ++ return -EINVAL; ++ } ++ ++ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { ++ case SND_SOC_DAIFMT_I2S: ++ case SND_SOC_DAIFMT_MSB: ++ cfg &= ~I2S_REG_CFG0_BYTE_SWAP; ++ break; ++ case SND_SOC_DAIFMT_LSB: ++ cfg |= I2S_REG_CFG0_BYTE_SWAP; ++ break; ++ default: ++ return -EINVAL; ++ } ++ ++ switch (fmt & SND_SOC_DAIFMT_INV_MASK) { ++ case SND_SOC_DAIFMT_NB_NF: ++ break; ++ default: ++ return -EINVAL; ++ } ++ ++ mt7620_i2s_write(i2s, I2S_REG_CFG0, cfg); ++ ++ return 0; ++} ++ ++static int mt7620_i2s_hw_params(struct snd_pcm_substream *substream, ++ struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) ++{ ++ ++ return 0; ++} ++ ++unsigned long i2sMaster_inclk_int[11] = { ++ 78, 56, 52, 39, 28, 26, 19, 14, 13, 9, 6}; ++unsigned long i2sMaster_inclk_comp[11] = { ++ 64, 352, 42, 32, 176, 21, 272, 88, 10, 455, 261}; ++ ++ ++static int mt7620_i2s_set_sysclk(struct snd_soc_dai *dai, int clk_id, ++ unsigned int freq, int dir) ++{ ++ struct mt7620_i2s *i2s = snd_soc_dai_get_drvdata(dai); ++ ++ printk("Internal REFCLK with fractional division\n"); ++ ++ mt7620_i2s_write(i2s, I2S_REG_DIVINT, i2sMaster_inclk_int[7]); ++ mt7620_i2s_write(i2s, I2S_REG_DIVCMP, ++ i2sMaster_inclk_comp[7] | I2S_REG_CLK_EN); ++ ++/* struct mt7620_i2s *i2s = snd_soc_dai_get_drvdata(dai); ++ struct clk *parent; ++ int ret = 0; ++ ++ switch (clk_id) { ++ case JZ4740_I2S_CLKSRC_EXT: ++ parent = clk_get(NULL, "ext"); ++ clk_set_parent(i2s->clk_i2s, parent); ++ break; ++ case JZ4740_I2S_CLKSRC_PLL: ++ parent = clk_get(NULL, "pll half"); ++ clk_set_parent(i2s->clk_i2s, parent); ++ ret = clk_set_rate(i2s->clk_i2s, freq); ++ break; ++ default: ++ return -EINVAL; ++ } ++ clk_put(parent); ++ ++ return ret;*/ ++ return 0; ++} ++ ++static int mt7620_i2s_suspend(struct snd_soc_dai *dai) ++{ ++ struct mt7620_i2s *i2s = snd_soc_dai_get_drvdata(dai); ++ uint32_t cfg; ++ ++ if (dai->active) { ++ cfg = mt7620_i2s_read(i2s, I2S_REG_CFG0); ++ cfg &= ~I2S_REG_CFG0_TX_EN; ++ mt7620_i2s_write(i2s, I2S_REG_CFG0, cfg); ++ } ++ ++ return 0; ++} ++ ++static int mt7620_i2s_resume(struct snd_soc_dai *dai) ++{ ++ struct mt7620_i2s *i2s = snd_soc_dai_get_drvdata(dai); ++ uint32_t cfg; ++ ++ if (dai->active) { ++ cfg = mt7620_i2s_read(i2s, I2S_REG_CFG0); ++ cfg |= I2S_REG_CFG0_TX_EN; ++ mt7620_i2s_write(i2s, I2S_REG_CFG0, cfg); ++ } ++ ++ return 0; ++} ++ ++static void mt7620_i2c_init_pcm_config(struct mt7620_i2s *i2s) ++{ ++ struct snd_dmaengine_dai_dma_data *dma_data; ++ ++ /* Playback */ ++ dma_data = &i2s->playback_dma_data; ++ dma_data->maxburst = 16; ++ dma_data->slave_id = 2; //JZ4740_DMA_TYPE_AIC_TRANSMIT; ++ dma_data->addr = i2s->phys_base + I2S_REG_WREG; ++ ++ /* Capture */ ++ dma_data = &i2s->capture_dma_data; ++ dma_data->maxburst = 16; ++ dma_data->slave_id = 3; //JZ4740_DMA_TYPE_AIC_RECEIVE; ++ dma_data->addr = i2s->phys_base + I2S_REG_RREG; ++} ++ ++static int mt7620_i2s_dai_probe(struct snd_soc_dai *dai) ++{ ++ struct mt7620_i2s *i2s = snd_soc_dai_get_drvdata(dai); ++ uint32_t data; ++ ++ mt7620_i2c_init_pcm_config(i2s); ++ dai->playback_dma_data = &i2s->playback_dma_data; ++ dai->capture_dma_data = &i2s->capture_dma_data; ++ ++ /* set share pins to i2s/gpio mode and i2c mode */ ++ data = rt_sysc_r32(0x60); ++ data &= 0xFFFFFFE2; ++ data |= 0x00000018; ++ rt_sysc_w32(data, 0x60); ++ ++ printk("Internal REFCLK with fractional division\n"); ++ ++ mt7620_i2s_write(i2s, I2S_REG_CFG0, I2S_REG_CFG0_DFT_THRES); ++ mt7620_i2s_write(i2s, I2S_REG_CFG1, 0); ++ mt7620_i2s_write(i2s, I2S_REG_INT_EN, 0); ++ ++ mt7620_i2s_write(i2s, I2S_REG_DIVINT, i2sMaster_inclk_int[7]); ++ mt7620_i2s_write(i2s, I2S_REG_DIVCMP, ++ i2sMaster_inclk_comp[7] | I2S_REG_CLK_EN); ++ ++ return 0; ++} ++ ++static int mt7620_i2s_dai_remove(struct snd_soc_dai *dai) ++{ ++ return 0; ++} ++ ++static const struct snd_soc_dai_ops mt7620_i2s_dai_ops = { ++ .startup = mt7620_i2s_startup, ++ .shutdown = mt7620_i2s_shutdown, ++ .trigger = mt7620_i2s_trigger, ++ .hw_params = mt7620_i2s_hw_params, ++ .set_fmt = mt7620_i2s_set_fmt, ++ .set_sysclk = mt7620_i2s_set_sysclk, ++}; ++ ++#define JZ4740_I2S_FMTS (SNDRV_PCM_FMTBIT_S8 | \ ++ SNDRV_PCM_FMTBIT_S16_LE) ++ ++static struct snd_soc_dai_driver mt7620_i2s_dai = { ++ .probe = mt7620_i2s_dai_probe, ++ .remove = mt7620_i2s_dai_remove, ++ .playback = { ++ .channels_min = 1, ++ .channels_max = 2, ++ .rates = SNDRV_PCM_RATE_8000_48000, ++ .formats = JZ4740_I2S_FMTS, ++ }, ++ .capture = { ++ .channels_min = 2, ++ .channels_max = 2, ++ .rates = SNDRV_PCM_RATE_8000_48000, ++ .formats = JZ4740_I2S_FMTS, ++ }, ++ .symmetric_rates = 1, ++ .ops = &mt7620_i2s_dai_ops, ++ .suspend = mt7620_i2s_suspend, ++ .resume = mt7620_i2s_resume, ++}; ++ ++static const struct snd_pcm_hardware mt7620_pcm_hardware = { ++ .info = SNDRV_PCM_INFO_MMAP | ++ SNDRV_PCM_INFO_MMAP_VALID | ++ SNDRV_PCM_INFO_INTERLEAVED | ++ SNDRV_PCM_INFO_BLOCK_TRANSFER, ++ .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S8, ++ .period_bytes_min = PAGE_SIZE, ++ .period_bytes_max = 64 * 1024, ++ .periods_min = 2, ++ .periods_max = 128, ++ .buffer_bytes_max = 128 * 1024, ++ .fifo_size = 32, ++}; ++ ++static const struct snd_dmaengine_pcm_config mt7620_dmaengine_pcm_config = { ++ .prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config, ++ .pcm_hardware = &mt7620_pcm_hardware, ++ .prealloc_buffer_size = 256 * PAGE_SIZE, ++}; ++ ++static const struct snd_soc_component_driver mt7620_i2s_component = { ++ .name = "mt7620-i2s", ++}; ++ ++static int mt7620_i2s_dev_probe(struct platform_device *pdev) ++{ ++ struct mt7620_i2s *i2s; ++ int ret; ++ ++ snd_dmaengine_pcm_register(&pdev->dev, ++ &mt7620_dmaengine_pcm_config, ++ SND_DMAENGINE_PCM_FLAG_COMPAT); ++ ++ i2s = kzalloc(sizeof(*i2s), GFP_KERNEL); ++ if (!i2s) ++ return -ENOMEM; ++ ++ i2s->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ if (!i2s->mem) { ++ ret = -ENOENT; ++ goto err_free; ++ } ++ ++ i2s->mem = request_mem_region(i2s->mem->start, resource_size(i2s->mem), ++ pdev->name); ++ if (!i2s->mem) { ++ ret = -EBUSY; ++ goto err_free; ++ } ++ ++ i2s->base = ioremap_nocache(i2s->mem->start, resource_size(i2s->mem)); ++ if (!i2s->base) { ++ ret = -EBUSY; ++ goto err_release_mem_region; ++ } ++ ++ i2s->phys_base = i2s->mem->start; ++ ++ platform_set_drvdata(pdev, i2s); ++ ret = snd_soc_register_component(&pdev->dev, &mt7620_i2s_component, ++ &mt7620_i2s_dai, 1); ++ ++ if (!ret) { ++ dev_err(&pdev->dev, "loaded\n"); ++ return ret; ++ } ++ ++ dev_err(&pdev->dev, "Failed to register DAI\n"); ++ iounmap(i2s->base); ++ ++err_release_mem_region: ++ release_mem_region(i2s->mem->start, resource_size(i2s->mem)); ++err_free: ++ kfree(i2s); ++ ++ return ret; ++} ++ ++static int mt7620_i2s_dev_remove(struct platform_device *pdev) ++{ ++ struct mt7620_i2s *i2s = platform_get_drvdata(pdev); ++ ++ snd_soc_unregister_component(&pdev->dev); ++ ++ iounmap(i2s->base); ++ release_mem_region(i2s->mem->start, resource_size(i2s->mem)); ++ ++ kfree(i2s); ++ ++ snd_dmaengine_pcm_unregister(&pdev->dev); ++ ++ return 0; ++} ++ ++static const struct of_device_id mt7620_i2s_match[] = { ++ { .compatible = "ralink,mt7620a-i2s" }, ++ {}, ++}; ++MODULE_DEVICE_TABLE(of, mt7620_i2s_match); ++ ++static struct platform_driver mt7620_i2s_driver = { ++ .probe = mt7620_i2s_dev_probe, ++ .remove = mt7620_i2s_dev_remove, ++ .driver = { ++ .name = "mt7620-i2s", ++ .owner = THIS_MODULE, ++ .of_match_table = mt7620_i2s_match, ++ }, ++}; ++ ++module_platform_driver(mt7620_i2s_driver); ++ ++MODULE_AUTHOR("Lars-Peter Clausen, "); ++MODULE_DESCRIPTION("Ingenic JZ4740 SoC I2S driver"); ++MODULE_LICENSE("GPL"); ++MODULE_ALIAS("platform:mt7620-i2s"); +--- /dev/null ++++ b/sound/soc/ralink/mt7620-wm8960.c +@@ -0,0 +1,125 @@ ++/* ++ * Copyright (C) 2009, Lars-Peter Clausen ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, write to the Free Software Foundation, Inc., ++ * 675 Mass Ave, Cambridge, MA 02139, USA. ++ * ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++ ++static const struct snd_soc_dapm_widget mt7620_wm8960_widgets[] = { ++ SND_SOC_DAPM_SPK("Speaker", NULL), ++}; ++ ++static const struct snd_soc_dapm_route mt7620_wm8960_routes[] = { ++ {"Speaker", NULL, "HP_L"}, ++ {"Speaker", NULL, "HP_R"}, ++}; ++ ++#define MT7620_DAIFMT (SND_SOC_DAIFMT_I2S | \ ++ SND_SOC_DAIFMT_NB_NF | \ ++ SND_SOC_DAIFMT_CBM_CFM) ++ ++static int mt7620_wm8960_codec_init(struct snd_soc_pcm_runtime *rtd) ++{ ++ struct snd_soc_codec *codec = rtd->codec; ++ struct snd_soc_dai *cpu_dai = rtd->cpu_dai; ++ struct snd_soc_dapm_context *dapm = &codec->dapm; ++ int ret; ++ ++ snd_soc_dapm_enable_pin(dapm, "HP_L"); ++ snd_soc_dapm_enable_pin(dapm, "HP_R"); ++ ++ ret = snd_soc_dai_set_fmt(cpu_dai, MT7620_DAIFMT); ++ if (ret < 0) { ++ dev_err(codec->dev, "Failed to set cpu dai format: %d\n", ret); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++static struct snd_soc_dai_link mt7620_wm8960_dai = { ++ .name = "mt7620", ++ .stream_name = "mt7620", ++ .init = mt7620_wm8960_codec_init, ++ .codec_dai_name = "wm8960-hifi", ++}; ++ ++static struct snd_soc_card mt7620_wm8960 = { ++ .name = "mt7620-wm8960", ++ .owner = THIS_MODULE, ++ .dai_link = &mt7620_wm8960_dai, ++ .num_links = 1, ++ ++ .dapm_widgets = mt7620_wm8960_widgets, ++ .num_dapm_widgets = ARRAY_SIZE(mt7620_wm8960_widgets), ++ .dapm_routes = mt7620_wm8960_routes, ++ .num_dapm_routes = ARRAY_SIZE(mt7620_wm8960_routes), ++}; ++ ++static int mt7620_wm8960_probe(struct platform_device *pdev) ++{ ++ struct device_node *np = pdev->dev.of_node; ++ struct snd_soc_card *card = &mt7620_wm8960; ++ int ret; ++ ++ card->dev = &pdev->dev; ++ ++ mt7620_wm8960_dai.cpu_of_node = of_parse_phandle(np, "cpu-dai", 0); ++ mt7620_wm8960_dai.codec_of_node = of_parse_phandle(np, "codec-dai", 0); ++ mt7620_wm8960_dai.platform_of_node = mt7620_wm8960_dai.cpu_of_node; ++ ++ ret = snd_soc_register_card(card); ++ if (ret) { ++ dev_err(&pdev->dev, "snd_soc_register_card() failed: %d\n", ++ ret); ++ } ++ return ret; ++} ++ ++static int mt7620_wm8960_remove(struct platform_device *pdev) ++{ ++ struct snd_soc_card *card = platform_get_drvdata(pdev); ++ ++ snd_soc_unregister_card(card); ++ return 0; ++} ++ ++static const struct of_device_id mt7620_audio_match[] = { ++ { .compatible = "ralink,wm8960-audio" }, ++ {}, ++}; ++MODULE_DEVICE_TABLE(of, mt7620_audio_match); ++ ++static struct platform_driver mt7620_wm8960_driver = { ++ .driver = { ++ .name = "wm8960-audio", ++ .owner = THIS_MODULE, ++ .of_match_table = mt7620_audio_match, ++ }, ++ .probe = mt7620_wm8960_probe, ++ .remove = mt7620_wm8960_remove, ++}; ++ ++module_platform_driver(mt7620_wm8960_driver); ++ ++MODULE_AUTHOR("Lars-Peter Clausen "); ++MODULE_DESCRIPTION("ALSA SoC QI LB60 Audio support"); ++MODULE_LICENSE("GPL v2"); ++MODULE_ALIAS("platform:qi-lb60-audio"); +--- a/sound/soc/soc-io.c ++++ b/sound/soc/soc-io.c +@@ -19,7 +19,6 @@ + + #include + +-#ifdef CONFIG_REGMAP + static int hw_write(struct snd_soc_codec *codec, unsigned int reg, + unsigned int value) + { +@@ -161,12 +160,3 @@ int snd_soc_codec_set_cache_io(struct sn + return PTR_RET(codec->control_data); + } + EXPORT_SYMBOL_GPL(snd_soc_codec_set_cache_io); +-#else +-int snd_soc_codec_set_cache_io(struct snd_soc_codec *codec, +- int addr_bits, int data_bits, +- enum snd_soc_control_type control) +-{ +- return -ENOTSUPP; +-} +-EXPORT_SYMBOL_GPL(snd_soc_codec_set_cache_io); +-#endif diff --git a/target/linux/ramips/patches-3.10/0113-USB-add-OHCI-EHCI-OF-binding.patch b/target/linux/ramips/patches-3.10/0113-USB-add-OHCI-EHCI-OF-binding.patch deleted file mode 100644 index f963139a2a..0000000000 --- a/target/linux/ramips/patches-3.10/0113-USB-add-OHCI-EHCI-OF-binding.patch +++ /dev/null @@ -1,175 +0,0 @@ -From 40b9d3026ed0b3bcd59f90391195df5b2adabad2 Mon Sep 17 00:00:00 2001 -From: John Crispin -Date: Sun, 14 Jul 2013 23:34:53 +0200 -Subject: [PATCH 19/33] USB: add OHCI/EHCI OF binding - -based on f3bc64d6d1f21c1b92d75f233a37b75d77af6963 - -Signed-off-by: John Crispin ---- - arch/mips/ralink/Kconfig | 2 ++ - drivers/usb/Makefile | 3 ++- - drivers/usb/host/ehci-platform.c | 19 +++++++++++++++---- - drivers/usb/host/ohci-platform.c | 37 ++++++++++++++++++++++++++++++++----- - 4 files changed, 51 insertions(+), 10 deletions(-) - ---- a/drivers/usb/Makefile -+++ b/drivers/usb/Makefile -@@ -10,6 +10,8 @@ obj-$(CONFIG_USB_DWC3) += dwc3/ - - obj-$(CONFIG_USB_MON) += mon/ - -+obj-$(CONFIG_USB_PHY) += phy/ -+ - obj-$(CONFIG_PCI) += host/ - obj-$(CONFIG_USB_EHCI_HCD) += host/ - obj-$(CONFIG_USB_ISP116X_HCD) += host/ -@@ -44,7 +46,6 @@ obj-$(CONFIG_USB_MICROTEK) += image/ - obj-$(CONFIG_USB_SERIAL) += serial/ - - obj-$(CONFIG_USB) += misc/ --obj-$(CONFIG_USB_PHY) += phy/ - obj-$(CONFIG_EARLY_PRINTK_DBGP) += early/ - - obj-$(CONFIG_USB_ATM) += atm/ ---- a/drivers/usb/host/ehci-platform.c -+++ b/drivers/usb/host/ehci-platform.c -@@ -29,6 +29,8 @@ - #include - #include - #include -+#include -+#include - - #include "ehci.h" - -@@ -118,6 +120,15 @@ static int ehci_platform_probe(struct pl - hcd->rsrc_start = res_mem->start; - hcd->rsrc_len = resource_size(res_mem); - -+#ifdef CONFIG_USB_PHY -+ hcd->phy = devm_usb_get_phy(&dev->dev, USB_PHY_TYPE_USB2); -+ if (!IS_ERR_OR_NULL(hcd->phy)) { -+ otg_set_host(hcd->phy->otg, -+ &hcd->self); -+ usb_phy_init(hcd->phy); -+ } -+#endif -+ - hcd->regs = devm_ioremap_resource(&dev->dev, res_mem); - if (IS_ERR(hcd->regs)) { - err = PTR_ERR(hcd->regs); -@@ -155,6 +166,9 @@ static int ehci_platform_remove(struct p - if (pdata == &ehci_platform_defaults) - dev->dev.platform_data = NULL; - -+ if (pdata == &ehci_platform_defaults) -+ dev->dev.platform_data = NULL; -+ - return 0; - } - -@@ -199,9 +213,8 @@ static int ehci_platform_resume(struct d - #define ehci_platform_resume NULL - #endif /* CONFIG_PM */ - --static const struct of_device_id vt8500_ehci_ids[] = { -- { .compatible = "via,vt8500-ehci", }, -- { .compatible = "wm,prizm-ehci", }, -+static const struct of_device_id ralink_ehci_ids[] = { -+ { .compatible = "ralink,rt3xxx-ehci", }, - {} - }; - -@@ -225,7 +238,7 @@ static struct platform_driver ehci_platf - .owner = THIS_MODULE, - .name = "ehci-platform", - .pm = &ehci_platform_pm_ops, -- .of_match_table = of_match_ptr(vt8500_ehci_ids), -+ .of_match_table = of_match_ptr(ralink_ehci_ids), - } - }; - ---- a/drivers/usb/host/ohci-platform.c -+++ b/drivers/usb/host/ohci-platform.c -@@ -16,6 +16,10 @@ - #include - #include - #include -+#include -+#include -+ -+static struct usb_ohci_pdata ohci_platform_defaults; - - static int ohci_platform_reset(struct usb_hcd *hcd) - { -@@ -88,14 +92,22 @@ static int ohci_platform_probe(struct pl - { - struct usb_hcd *hcd; - struct resource *res_mem; -- struct usb_ohci_pdata *pdata = dev->dev.platform_data; -+ struct usb_ohci_pdata *pdata; - int irq; - int err = -ENOMEM; - -- if (!pdata) { -- WARN_ON(1); -- return -ENODEV; -- } -+ /* -+ * use reasonable defaults so platforms don't have to provide these. -+ * with DT probing on ARM, none of these are set. -+ */ -+ if (!dev->dev.platform_data) -+ dev->dev.platform_data = &ohci_platform_defaults; -+ if (!dev->dev.dma_mask) -+ dev->dev.dma_mask = &dev->dev.coherent_dma_mask; -+ if (!dev->dev.coherent_dma_mask) -+ dev->dev.coherent_dma_mask = DMA_BIT_MASK(32); -+ -+ pdata = dev->dev.platform_data; - - if (usb_disabled()) - return -ENODEV; -@@ -128,6 +140,12 @@ static int ohci_platform_probe(struct pl - hcd->rsrc_start = res_mem->start; - hcd->rsrc_len = resource_size(res_mem); - -+#ifdef CONFIG_USB_PHY -+ hcd->phy = devm_usb_get_phy(&dev->dev, USB_PHY_TYPE_USB2); -+ if (!IS_ERR_OR_NULL(hcd->phy)) -+ usb_phy_init(hcd->phy); -+#endif -+ - hcd->regs = devm_ioremap_resource(&dev->dev, res_mem); - if (IS_ERR(hcd->regs)) { - err = PTR_ERR(hcd->regs); -@@ -162,6 +180,9 @@ static int ohci_platform_remove(struct p - if (pdata->power_off) - pdata->power_off(dev); - -+ if (pdata == &ohci_platform_defaults) -+ dev->dev.platform_data = NULL; -+ - return 0; - } - -@@ -201,6 +222,11 @@ static int ohci_platform_resume(struct d - #define ohci_platform_resume NULL - #endif /* CONFIG_PM */ - -+static const struct of_device_id ralink_ohci_ids[] = { -+ { .compatible = "ralink,rt3xxx-ohci", }, -+ {} -+}; -+ - static const struct platform_device_id ohci_platform_table[] = { - { "ohci-platform", 0 }, - { } -@@ -221,5 +247,6 @@ static struct platform_driver ohci_platf - .owner = THIS_MODULE, - .name = "ohci-platform", - .pm = &ohci_platform_pm_ops, -+ .of_match_table = of_match_ptr(ralink_ohci_ids), - } - }; diff --git a/target/linux/ramips/patches-3.10/0113-pinctrl-ralink-add-pinctrl-driver.patch b/target/linux/ramips/patches-3.10/0113-pinctrl-ralink-add-pinctrl-driver.patch new file mode 100644 index 0000000000..b3750590e7 --- /dev/null +++ b/target/linux/ramips/patches-3.10/0113-pinctrl-ralink-add-pinctrl-driver.patch @@ -0,0 +1,1311 @@ +From 47bbf432252b39361728c7685292dc9f889e6537 Mon Sep 17 00:00:00 2001 +From: John Crispin +Date: Mon, 19 Aug 2013 13:49:52 +0200 +Subject: [PATCH 113/133] pinctrl: ralink: add pinctrl driver + +Signed-off-by: John Crispin +--- + arch/mips/Kconfig | 2 + + arch/mips/include/asm/mach-ralink/mt7620.h | 41 ++- + arch/mips/include/asm/mach-ralink/pinmux.h | 53 ++++ + arch/mips/include/asm/mach-ralink/rt305x.h | 34 +- + arch/mips/include/asm/mach-ralink/rt3883.h | 16 +- + arch/mips/ralink/common.h | 19 -- + arch/mips/ralink/mt7620.c | 161 ++++------ + arch/mips/ralink/rt305x.c | 146 ++++----- + arch/mips/ralink/rt3883.c | 173 +++-------- + drivers/pinctrl/Kconfig | 5 + + drivers/pinctrl/Makefile | 1 + + drivers/pinctrl/pinctrl-rt2880.c | 467 ++++++++++++++++++++++++++++ + 12 files changed, 740 insertions(+), 378 deletions(-) + create mode 100644 arch/mips/include/asm/mach-ralink/pinmux.h + create mode 100644 drivers/pinctrl/pinctrl-rt2880.c + +--- a/arch/mips/Kconfig ++++ b/arch/mips/Kconfig +@@ -446,6 +446,8 @@ config RALINK + select HAVE_MACH_CLKDEV + select CLKDEV_LOOKUP + select ARCH_REQUIRE_GPIOLIB ++ select PINCTRL ++ select PINCTRL_RT2880 + + config SGI_IP22 + bool "SGI IP22 (Indy/Indigo2)" +--- a/arch/mips/include/asm/mach-ralink/mt7620.h ++++ b/arch/mips/include/asm/mach-ralink/mt7620.h +@@ -56,7 +56,6 @@ + #define MT7620_DDR2_SIZE_MIN 32 + #define MT7620_DDR2_SIZE_MAX 256 + +-#define MT7620_GPIO_MODE_I2C BIT(0) + #define MT7620_GPIO_MODE_UART0_SHIFT 2 + #define MT7620_GPIO_MODE_UART0_MASK 0x7 + #define MT7620_GPIO_MODE_UART0(x) ((x) << MT7620_GPIO_MODE_UART0_SHIFT) +@@ -68,16 +67,36 @@ + #define MT7620_GPIO_MODE_GPIO_UARTF 0x5 + #define MT7620_GPIO_MODE_GPIO_I2S 0x6 + #define MT7620_GPIO_MODE_GPIO 0x7 +-#define MT7620_GPIO_MODE_UART1 BIT(5) +-#define MT7620_GPIO_MODE_MDIO BIT(8) +-#define MT7620_GPIO_MODE_RGMII1 BIT(9) +-#define MT7620_GPIO_MODE_RGMII2 BIT(10) +-#define MT7620_GPIO_MODE_SPI BIT(11) +-#define MT7620_GPIO_MODE_SPI_REF_CLK BIT(12) +-#define MT7620_GPIO_MODE_WLED BIT(13) +-#define MT7620_GPIO_MODE_JTAG BIT(15) +-#define MT7620_GPIO_MODE_EPHY BIT(15) +-#define MT7620_GPIO_MODE_WDT BIT(22) ++ ++#define MT7620_GPIO_MODE_NAND 0 ++#define MT7620_GPIO_MODE_SD 1 ++#define MT7620_GPIO_MODE_ND_SD_GPIO 2 ++#define MT7620_GPIO_MODE_ND_SD_MASK 0x3 ++#define MT7620_GPIO_MODE_ND_SD_SHIFT 18 ++ ++#define MT7620_GPIO_MODE_PCIE_RST 0 ++#define MT7620_GPIO_MODE_PCIE_REF 1 ++#define MT7620_GPIO_MODE_PCIE_GPIO 2 ++#define MT7620_GPIO_MODE_PCIE_MASK 0x3 ++#define MT7620_GPIO_MODE_PCIE_SHIFT 16 ++ ++#define MT7620_GPIO_MODE_WDT_RST 0 ++#define MT7620_GPIO_MODE_WDT_REF 1 ++#define MT7620_GPIO_MODE_WDT_GPIO 2 ++#define MT7620_GPIO_MODE_WDT_MASK 0x3 ++#define MT7620_GPIO_MODE_WDT_SHIFT 21 ++ ++#define MT7620_GPIO_MODE_I2C 0 ++#define MT7620_GPIO_MODE_UART1 5 ++#define MT7620_GPIO_MODE_MDIO 8 ++#define MT7620_GPIO_MODE_RGMII1 9 ++#define MT7620_GPIO_MODE_RGMII2 10 ++#define MT7620_GPIO_MODE_SPI 11 ++#define MT7620_GPIO_MODE_SPI_REF_CLK 12 ++#define MT7620_GPIO_MODE_WLED 13 ++#define MT7620_GPIO_MODE_JTAG 15 ++#define MT7620_GPIO_MODE_EPHY 15 ++#define MT7620_GPIO_MODE_PA 20 + + static inline int mt7620_get_eco(void) + { +--- /dev/null ++++ b/arch/mips/include/asm/mach-ralink/pinmux.h +@@ -0,0 +1,53 @@ ++/* ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * publishhed by the Free Software Foundation. ++ * ++ * Copyright (C) 2012 John Crispin ++ */ ++ ++#ifndef _RT288X_PINMUX_H__ ++#define _RT288X_PINMUX_H__ ++ ++#define FUNC(name, value, pin_first, pin_count) { name, value, pin_first, pin_count } ++#define GRP(_name, _func, _mask, _shift) \ ++ { .name = _name, .mask = _mask, .shift = _shift, \ ++ .func = _func, .gpio = _mask, \ ++ .func_count = ARRAY_SIZE(_func) } ++ ++#define GRP_G(_name, _func, _mask, _gpio, _shift) \ ++ { .name = _name, .mask = _mask, .shift = _shift, \ ++ .func = _func, .gpio = _gpio, \ ++ .func_count = ARRAY_SIZE(_func) } ++ ++struct rt2880_pmx_group; ++ ++struct rt2880_pmx_func { ++ const char *name; ++ const char value; ++ ++ int pin_first; ++ int pin_count; ++ int *pins; ++ ++ int *groups; ++ int group_count; ++ ++ int enabled; ++}; ++ ++struct rt2880_pmx_group { ++ const char *name; ++ int enabled; ++ ++ const u32 shift; ++ const char mask; ++ const char gpio; ++ ++ struct rt2880_pmx_func *func; ++ int func_count; ++}; ++ ++extern struct rt2880_pmx_group *rt2880_pinmux_data; ++ ++#endif +--- a/arch/mips/include/asm/mach-ralink/rt305x.h ++++ b/arch/mips/include/asm/mach-ralink/rt305x.h +@@ -125,24 +125,28 @@ static inline int soc_is_rt5350(void) + #define RT305X_GPIO_GE0_TXD0 40 + #define RT305X_GPIO_GE0_RXCLK 51 + +-#define RT305X_GPIO_MODE_I2C BIT(0) +-#define RT305X_GPIO_MODE_SPI BIT(1) + #define RT305X_GPIO_MODE_UART0_SHIFT 2 + #define RT305X_GPIO_MODE_UART0_MASK 0x7 + #define RT305X_GPIO_MODE_UART0(x) ((x) << RT305X_GPIO_MODE_UART0_SHIFT) +-#define RT305X_GPIO_MODE_UARTF 0x0 +-#define RT305X_GPIO_MODE_PCM_UARTF 0x1 +-#define RT305X_GPIO_MODE_PCM_I2S 0x2 +-#define RT305X_GPIO_MODE_I2S_UARTF 0x3 +-#define RT305X_GPIO_MODE_PCM_GPIO 0x4 +-#define RT305X_GPIO_MODE_GPIO_UARTF 0x5 +-#define RT305X_GPIO_MODE_GPIO_I2S 0x6 +-#define RT305X_GPIO_MODE_GPIO 0x7 +-#define RT305X_GPIO_MODE_UART1 BIT(5) +-#define RT305X_GPIO_MODE_JTAG BIT(6) +-#define RT305X_GPIO_MODE_MDIO BIT(7) +-#define RT305X_GPIO_MODE_SDRAM BIT(8) +-#define RT305X_GPIO_MODE_RGMII BIT(9) ++#define RT305X_GPIO_MODE_UARTF 0 ++#define RT305X_GPIO_MODE_PCM_UARTF 1 ++#define RT305X_GPIO_MODE_PCM_I2S 2 ++#define RT305X_GPIO_MODE_I2S_UARTF 3 ++#define RT305X_GPIO_MODE_PCM_GPIO 4 ++#define RT305X_GPIO_MODE_GPIO_UARTF 5 ++#define RT305X_GPIO_MODE_GPIO_I2S 6 ++#define RT305X_GPIO_MODE_GPIO 7 ++ ++#define RT305X_GPIO_MODE_I2C 0 ++#define RT305X_GPIO_MODE_SPI 1 ++#define RT305X_GPIO_MODE_UART1 5 ++#define RT305X_GPIO_MODE_JTAG 6 ++#define RT305X_GPIO_MODE_MDIO 7 ++#define RT305X_GPIO_MODE_SDRAM 8 ++#define RT305X_GPIO_MODE_RGMII 9 ++#define RT5350_GPIO_MODE_PHY_LED 14 ++#define RT3352_GPIO_MODE_LNA 18 ++#define RT3352_GPIO_MODE_PA 20 + + #define RT3352_SYSC_REG_SYSCFG0 0x010 + #define RT3352_SYSC_REG_SYSCFG1 0x014 +--- a/arch/mips/include/asm/mach-ralink/rt3883.h ++++ b/arch/mips/include/asm/mach-ralink/rt3883.h +@@ -112,8 +112,6 @@ + #define RT3883_CLKCFG1_PCI_CLK_EN BIT(19) + #define RT3883_CLKCFG1_UPHY0_CLK_EN BIT(18) + +-#define RT3883_GPIO_MODE_I2C BIT(0) +-#define RT3883_GPIO_MODE_SPI BIT(1) + #define RT3883_GPIO_MODE_UART0_SHIFT 2 + #define RT3883_GPIO_MODE_UART0_MASK 0x7 + #define RT3883_GPIO_MODE_UART0(x) ((x) << RT3883_GPIO_MODE_UART0_SHIFT) +@@ -125,11 +123,15 @@ + #define RT3883_GPIO_MODE_GPIO_UARTF 0x5 + #define RT3883_GPIO_MODE_GPIO_I2S 0x6 + #define RT3883_GPIO_MODE_GPIO 0x7 +-#define RT3883_GPIO_MODE_UART1 BIT(5) +-#define RT3883_GPIO_MODE_JTAG BIT(6) +-#define RT3883_GPIO_MODE_MDIO BIT(7) +-#define RT3883_GPIO_MODE_GE1 BIT(9) +-#define RT3883_GPIO_MODE_GE2 BIT(10) ++ ++#define RT3883_GPIO_MODE_I2C 0 ++#define RT3883_GPIO_MODE_SPI 1 ++#define RT3883_GPIO_MODE_UART1 5 ++#define RT3883_GPIO_MODE_JTAG 6 ++#define RT3883_GPIO_MODE_MDIO 7 ++#define RT3883_GPIO_MODE_GE1 9 ++#define RT3883_GPIO_MODE_GE2 10 ++ + #define RT3883_GPIO_MODE_PCI_SHIFT 11 + #define RT3883_GPIO_MODE_PCI_MASK 0x7 + #define RT3883_GPIO_MODE_PCI (RT3883_GPIO_MODE_PCI_MASK << RT3883_GPIO_MODE_PCI_SHIFT) +--- a/arch/mips/ralink/common.h ++++ b/arch/mips/ralink/common.h +@@ -11,25 +11,6 @@ + + #define RAMIPS_SYS_TYPE_LEN 32 + +-struct ralink_pinmux_grp { +- const char *name; +- u32 mask; +- int gpio_first; +- int gpio_last; +-}; +- +-struct ralink_pinmux { +- struct ralink_pinmux_grp *mode; +- struct ralink_pinmux_grp *uart; +- int uart_shift; +- u32 uart_mask; +- void (*wdt_reset)(void); +- struct ralink_pinmux_grp *pci; +- int pci_shift; +- u32 pci_mask; +-}; +-extern struct ralink_pinmux rt_gpio_pinmux; +- + struct ralink_soc_info { + unsigned char sys_type[RAMIPS_SYS_TYPE_LEN]; + unsigned char *compatible; +--- a/arch/mips/ralink/mt7620.c ++++ b/arch/mips/ralink/mt7620.c +@@ -17,6 +17,7 @@ + #include + #include + #include ++#include + + #include "common.h" + +@@ -48,118 +49,58 @@ static int dram_type; + /* the pll dividers */ + static u32 mt7620_clk_divider[] = { 2, 3, 4, 8 }; + +-static struct ralink_pinmux_grp mode_mux[] = { +- { +- .name = "i2c", +- .mask = MT7620_GPIO_MODE_I2C, +- .gpio_first = 1, +- .gpio_last = 2, +- }, { +- .name = "spi", +- .mask = MT7620_GPIO_MODE_SPI, +- .gpio_first = 3, +- .gpio_last = 6, +- }, { +- .name = "uartlite", +- .mask = MT7620_GPIO_MODE_UART1, +- .gpio_first = 15, +- .gpio_last = 16, +- }, { +- .name = "wdt", +- .mask = MT7620_GPIO_MODE_WDT, +- .gpio_first = 17, +- .gpio_last = 17, +- }, { +- .name = "mdio", +- .mask = MT7620_GPIO_MODE_MDIO, +- .gpio_first = 22, +- .gpio_last = 23, +- }, { +- .name = "rgmii1", +- .mask = MT7620_GPIO_MODE_RGMII1, +- .gpio_first = 24, +- .gpio_last = 35, +- }, { +- .name = "spi refclk", +- .mask = MT7620_GPIO_MODE_SPI_REF_CLK, +- .gpio_first = 37, +- .gpio_last = 39, +- }, { +- .name = "jtag", +- .mask = MT7620_GPIO_MODE_JTAG, +- .gpio_first = 40, +- .gpio_last = 44, +- }, { +- /* shared lines with jtag */ +- .name = "ephy", +- .mask = MT7620_GPIO_MODE_EPHY, +- .gpio_first = 40, +- .gpio_last = 44, +- }, { +- .name = "nand", +- .mask = MT7620_GPIO_MODE_JTAG, +- .gpio_first = 45, +- .gpio_last = 59, +- }, { +- .name = "rgmii2", +- .mask = MT7620_GPIO_MODE_RGMII2, +- .gpio_first = 60, +- .gpio_last = 71, +- }, { +- .name = "wled", +- .mask = MT7620_GPIO_MODE_WLED, +- .gpio_first = 72, +- .gpio_last = 72, +- }, {0} ++static struct rt2880_pmx_func i2c_grp[] = { FUNC("i2c", 0, 1, 2) }; ++static struct rt2880_pmx_func spi_grp[] = { FUNC("spi", 0, 3, 4) }; ++static struct rt2880_pmx_func uartlite_grp[] = { FUNC("uartlite", 0, 15, 2) }; ++static struct rt2880_pmx_func mdio_grp[] = { FUNC("mdio", 0, 22, 2) }; ++static struct rt2880_pmx_func rgmii1_grp[] = { FUNC("rgmii1", 0, 24, 12) }; ++static struct rt2880_pmx_func refclk_grp[] = { FUNC("spi refclk", 0, 37, 3) }; ++static struct rt2880_pmx_func ephy_grp[] = { FUNC("ephy", 0, 40, 5) }; ++static struct rt2880_pmx_func rgmii2_grp[] = { FUNC("rgmii2", 0, 60, 12) }; ++static struct rt2880_pmx_func wled_grp[] = { FUNC("wled", 0, 72, 1) }; ++static struct rt2880_pmx_func pa_grp[] = { FUNC("pa", 0, 18, 4) }; ++static struct rt2880_pmx_func uartf_grp[] = { ++ FUNC("uartf", MT7620_GPIO_MODE_UARTF, 7, 8), ++ FUNC("pcm uartf", MT7620_GPIO_MODE_PCM_UARTF, 7, 8), ++ FUNC("pcm i2s", MT7620_GPIO_MODE_PCM_I2S, 7, 8), ++ FUNC("i2s uartf", MT7620_GPIO_MODE_I2S_UARTF, 7, 8), ++ FUNC("pcm gpio", MT7620_GPIO_MODE_PCM_GPIO, 11, 4), ++ FUNC("gpio uartf", MT7620_GPIO_MODE_GPIO_UARTF, 7, 4), ++ FUNC("gpio i2s", MT7620_GPIO_MODE_GPIO_I2S, 7, 4), + }; +- +-static struct ralink_pinmux_grp uart_mux[] = { +- { +- .name = "uartf", +- .mask = MT7620_GPIO_MODE_UARTF, +- .gpio_first = 7, +- .gpio_last = 14, +- }, { +- .name = "pcm uartf", +- .mask = MT7620_GPIO_MODE_PCM_UARTF, +- .gpio_first = 7, +- .gpio_last = 14, +- }, { +- .name = "pcm i2s", +- .mask = MT7620_GPIO_MODE_PCM_I2S, +- .gpio_first = 7, +- .gpio_last = 14, +- }, { +- .name = "i2s uartf", +- .mask = MT7620_GPIO_MODE_I2S_UARTF, +- .gpio_first = 7, +- .gpio_last = 14, +- }, { +- .name = "pcm gpio", +- .mask = MT7620_GPIO_MODE_PCM_GPIO, +- .gpio_first = 11, +- .gpio_last = 14, +- }, { +- .name = "gpio uartf", +- .mask = MT7620_GPIO_MODE_GPIO_UARTF, +- .gpio_first = 7, +- .gpio_last = 10, +- }, { +- .name = "gpio i2s", +- .mask = MT7620_GPIO_MODE_GPIO_I2S, +- .gpio_first = 7, +- .gpio_last = 10, +- }, { +- .name = "gpio", +- .mask = MT7620_GPIO_MODE_GPIO, +- }, {0} ++static struct rt2880_pmx_func wdt_grp[] = { ++ FUNC("wdt rst", 0, 17, 1), ++ FUNC("wdt refclk", 0, 17, 1), ++ }; ++static struct rt2880_pmx_func pcie_rst_grp[] = { ++ FUNC("pcie rst", MT7620_GPIO_MODE_PCIE_RST, 36, 1), ++ FUNC("pcie refclk", MT7620_GPIO_MODE_PCIE_REF, 36, 1) ++}; ++static struct rt2880_pmx_func nd_sd_grp[] = { ++ FUNC("nand", MT7620_GPIO_MODE_NAND, 45, 15), ++ FUNC("sd", MT7620_GPIO_MODE_SD, 45, 15) + }; + +-struct ralink_pinmux rt_gpio_pinmux = { +- .mode = mode_mux, +- .uart = uart_mux, +- .uart_shift = MT7620_GPIO_MODE_UART0_SHIFT, +- .uart_mask = MT7620_GPIO_MODE_UART0_MASK, ++static struct rt2880_pmx_group mt7620a_pinmux_data[] = { ++ GRP("i2c", i2c_grp, 1, MT7620_GPIO_MODE_I2C), ++ GRP("uartf", uartf_grp, MT7620_GPIO_MODE_UART0_MASK, ++ MT7620_GPIO_MODE_UART0_SHIFT), ++ GRP("spi", spi_grp, 1, MT7620_GPIO_MODE_SPI), ++ GRP("uartlite", uartlite_grp, 1, MT7620_GPIO_MODE_UART1), ++ GRP_G("wdt", wdt_grp, MT7620_GPIO_MODE_WDT_MASK, ++ MT7620_GPIO_MODE_WDT_GPIO, MT7620_GPIO_MODE_WDT_SHIFT), ++ GRP("mdio", mdio_grp, 1, MT7620_GPIO_MODE_MDIO), ++ GRP("rgmii1", rgmii1_grp, 1, MT7620_GPIO_MODE_RGMII1), ++ GRP("spi refclk", refclk_grp, 1, MT7620_GPIO_MODE_SPI_REF_CLK), ++ GRP_G("pcie", pcie_rst_grp, MT7620_GPIO_MODE_PCIE_MASK, ++ MT7620_GPIO_MODE_PCIE_GPIO, MT7620_GPIO_MODE_PCIE_SHIFT), ++ GRP_G("nd_sd", nd_sd_grp, MT7620_GPIO_MODE_ND_SD_MASK, ++ MT7620_GPIO_MODE_ND_SD_GPIO, MT7620_GPIO_MODE_ND_SD_SHIFT), ++ GRP("rgmii2", rgmii2_grp, 1, MT7620_GPIO_MODE_RGMII2), ++ GRP("wled", wled_grp, 1, MT7620_GPIO_MODE_WLED), ++ GRP("ephy", ephy_grp, 1, MT7620_GPIO_MODE_EPHY), ++ GRP("pa", pa_grp, 1, MT7620_GPIO_MODE_PA), ++ { 0 } + }; + + void __init ralink_clk_init(void) +@@ -286,4 +227,6 @@ void prom_soc_init(struct ralink_soc_inf + (pmu0 & PMU_SW_SET) ? ("sw") : ("hw")); + pr_info("Digital PMU set to %s control\n", + (pmu1 & DIG_SW_SEL) ? ("sw") : ("hw")); ++ ++ rt2880_pinmux_data = mt7620a_pinmux_data; + } +--- a/arch/mips/ralink/rt305x.c ++++ b/arch/mips/ralink/rt305x.c +@@ -17,90 +17,71 @@ + #include + #include + #include ++#include + + #include "common.h" + + enum rt305x_soc_type rt305x_soc; + +-static struct ralink_pinmux_grp mode_mux[] = { +- { +- .name = "i2c", +- .mask = RT305X_GPIO_MODE_I2C, +- .gpio_first = RT305X_GPIO_I2C_SD, +- .gpio_last = RT305X_GPIO_I2C_SCLK, +- }, { +- .name = "spi", +- .mask = RT305X_GPIO_MODE_SPI, +- .gpio_first = RT305X_GPIO_SPI_EN, +- .gpio_last = RT305X_GPIO_SPI_CLK, +- }, { +- .name = "uartlite", +- .mask = RT305X_GPIO_MODE_UART1, +- .gpio_first = RT305X_GPIO_UART1_TXD, +- .gpio_last = RT305X_GPIO_UART1_RXD, +- }, { +- .name = "jtag", +- .mask = RT305X_GPIO_MODE_JTAG, +- .gpio_first = RT305X_GPIO_JTAG_TDO, +- .gpio_last = RT305X_GPIO_JTAG_TDI, +- }, { +- .name = "mdio", +- .mask = RT305X_GPIO_MODE_MDIO, +- .gpio_first = RT305X_GPIO_MDIO_MDC, +- .gpio_last = RT305X_GPIO_MDIO_MDIO, +- }, { +- .name = "sdram", +- .mask = RT305X_GPIO_MODE_SDRAM, +- .gpio_first = RT305X_GPIO_SDRAM_MD16, +- .gpio_last = RT305X_GPIO_SDRAM_MD31, +- }, { +- .name = "rgmii", +- .mask = RT305X_GPIO_MODE_RGMII, +- .gpio_first = RT305X_GPIO_GE0_TXD0, +- .gpio_last = RT305X_GPIO_GE0_RXCLK, +- }, {0} ++static struct rt2880_pmx_func i2c_func[] = { FUNC("i2c", 0, 1, 2) }; ++static struct rt2880_pmx_func spi_func[] = { FUNC("spi", 0, 3, 4) }; ++static struct rt2880_pmx_func uartf_func[] = { ++ FUNC("uartf", RT305X_GPIO_MODE_UARTF, 7, 8), ++ FUNC("pcm uartf", RT305X_GPIO_MODE_PCM_UARTF, 7, 8), ++ FUNC("pcm i2s", RT305X_GPIO_MODE_PCM_I2S, 7, 8), ++ FUNC("i2s uartf", RT305X_GPIO_MODE_I2S_UARTF, 7, 8), ++ FUNC("pcm gpio", RT305X_GPIO_MODE_PCM_GPIO, 11, 4), ++ FUNC("gpio uartf", RT305X_GPIO_MODE_GPIO_UARTF, 7, 4), ++ FUNC("gpio i2s", RT305X_GPIO_MODE_GPIO_I2S, 7, 4), ++}; ++static struct rt2880_pmx_func uartlite_func[] = { FUNC("uartlite", 0, 15, 2) }; ++static struct rt2880_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 5) }; ++static struct rt2880_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 2) }; ++static struct rt2880_pmx_func rt5350_led_func[] = { FUNC("led", 0, 22, 5) }; ++static struct rt2880_pmx_func sdram_func[] = { FUNC("sdram", 0, 24, 16) }; ++static struct rt2880_pmx_func rt3352_rgmii_func[] = { FUNC("rgmii", 0, 24, 12) }; ++static struct rt2880_pmx_func rgmii_func[] = { FUNC("rgmii", 0, 40, 12) }; ++static struct rt2880_pmx_func rt3352_lna_func[] = { FUNC("lna", 0, 36, 2) }; ++static struct rt2880_pmx_func rt3352_pa_func[] = { FUNC("pa", 0, 38, 2) }; ++static struct rt2880_pmx_func rt3352_led_func[] = { FUNC("led", 0, 40, 5) }; ++ ++static struct rt2880_pmx_group rt3050_pinmux_data[] = { ++ GRP("i2c", i2c_func, 1, RT305X_GPIO_MODE_I2C), ++ GRP("spi", spi_func, 1, RT305X_GPIO_MODE_SPI), ++ GRP("uartf", uartf_func, RT305X_GPIO_MODE_UART0_MASK, ++ RT305X_GPIO_MODE_UART0_SHIFT), ++ GRP("uartlite", uartlite_func, 1, RT305X_GPIO_MODE_UART1), ++ GRP("jtag", jtag_func, 1, RT305X_GPIO_MODE_JTAG), ++ GRP("mdio", mdio_func, 1, RT305X_GPIO_MODE_MDIO), ++ GRP("rgmii", rgmii_func, 1, RT305X_GPIO_MODE_RGMII), ++ GRP("sdram", sdram_func, 1, RT305X_GPIO_MODE_SDRAM), ++ { 0 } ++}; ++ ++static struct rt2880_pmx_group rt3352_pinmux_data[] = { ++ GRP("i2c", i2c_func, 1, RT305X_GPIO_MODE_I2C), ++ GRP("spi", spi_func, 1, RT305X_GPIO_MODE_SPI), ++ GRP("uartf", uartf_func, RT305X_GPIO_MODE_UART0_MASK, ++ RT305X_GPIO_MODE_UART0_SHIFT), ++ GRP("uartlite", uartlite_func, 1, RT305X_GPIO_MODE_UART1), ++ GRP("jtag", jtag_func, 1, RT305X_GPIO_MODE_JTAG), ++ GRP("mdio", mdio_func, 1, RT305X_GPIO_MODE_MDIO), ++ GRP("rgmii", rt3352_rgmii_func, 1, RT305X_GPIO_MODE_RGMII), ++ GRP("lna", rt3352_lna_func, 1, RT3352_GPIO_MODE_LNA), ++ GRP("pa", rt3352_pa_func, 1, RT3352_GPIO_MODE_PA), ++ GRP("led", rt3352_led_func, 1, RT5350_GPIO_MODE_PHY_LED), ++ { 0 } + }; + +-static struct ralink_pinmux_grp uart_mux[] = { +- { +- .name = "uartf", +- .mask = RT305X_GPIO_MODE_UARTF, +- .gpio_first = RT305X_GPIO_7, +- .gpio_last = RT305X_GPIO_14, +- }, { +- .name = "pcm uartf", +- .mask = RT305X_GPIO_MODE_PCM_UARTF, +- .gpio_first = RT305X_GPIO_7, +- .gpio_last = RT305X_GPIO_14, +- }, { +- .name = "pcm i2s", +- .mask = RT305X_GPIO_MODE_PCM_I2S, +- .gpio_first = RT305X_GPIO_7, +- .gpio_last = RT305X_GPIO_14, +- }, { +- .name = "i2s uartf", +- .mask = RT305X_GPIO_MODE_I2S_UARTF, +- .gpio_first = RT305X_GPIO_7, +- .gpio_last = RT305X_GPIO_14, +- }, { +- .name = "pcm gpio", +- .mask = RT305X_GPIO_MODE_PCM_GPIO, +- .gpio_first = RT305X_GPIO_10, +- .gpio_last = RT305X_GPIO_14, +- }, { +- .name = "gpio uartf", +- .mask = RT305X_GPIO_MODE_GPIO_UARTF, +- .gpio_first = RT305X_GPIO_7, +- .gpio_last = RT305X_GPIO_10, +- }, { +- .name = "gpio i2s", +- .mask = RT305X_GPIO_MODE_GPIO_I2S, +- .gpio_first = RT305X_GPIO_7, +- .gpio_last = RT305X_GPIO_10, +- }, { +- .name = "gpio", +- .mask = RT305X_GPIO_MODE_GPIO, +- }, {0} ++static struct rt2880_pmx_group rt5350_pinmux_data[] = { ++ GRP("i2c", i2c_func, 1, RT305X_GPIO_MODE_I2C), ++ GRP("spi", spi_func, 1, RT305X_GPIO_MODE_SPI), ++ GRP("uartf", uartf_func, RT305X_GPIO_MODE_UART0_MASK, ++ RT305X_GPIO_MODE_UART0_SHIFT), ++ GRP("uartlite", uartlite_func, 1, RT305X_GPIO_MODE_UART1), ++ GRP("jtag", jtag_func, 1, RT305X_GPIO_MODE_JTAG), ++ GRP("led", rt5350_led_func, 1, RT5350_GPIO_MODE_PHY_LED), ++ { 0 } + }; + + static void rt305x_wdt_reset(void) +@@ -114,14 +95,6 @@ static void rt305x_wdt_reset(void) + rt_sysc_w32(t, SYSC_REG_SYSTEM_CONFIG); + } + +-struct ralink_pinmux rt_gpio_pinmux = { +- .mode = mode_mux, +- .uart = uart_mux, +- .uart_shift = RT305X_GPIO_MODE_UART0_SHIFT, +- .uart_mask = RT305X_GPIO_MODE_UART0_MASK, +- .wdt_reset = rt305x_wdt_reset, +-}; +- + static unsigned long rt5350_get_mem_size(void) + { + void __iomem *sysc = (void __iomem *) KSEG1ADDR(RT305X_SYSC_BASE); +@@ -290,11 +263,14 @@ void prom_soc_init(struct ralink_soc_inf + soc_info->mem_base = RT305X_SDRAM_BASE; + if (soc_is_rt5350()) { + soc_info->mem_size = rt5350_get_mem_size(); ++ rt2880_pinmux_data = rt5350_pinmux_data; + } else if (soc_is_rt305x() || soc_is_rt3350()) { + soc_info->mem_size_min = RT305X_MEM_SIZE_MIN; + soc_info->mem_size_max = RT305X_MEM_SIZE_MAX; ++ rt2880_pinmux_data = rt3050_pinmux_data; + } else if (soc_is_rt3352()) { + soc_info->mem_size_min = RT3352_MEM_SIZE_MIN; + soc_info->mem_size_max = RT3352_MEM_SIZE_MAX; ++ rt2880_pinmux_data = rt3352_pinmux_data; + } + } +--- a/arch/mips/ralink/rt3883.c ++++ b/arch/mips/ralink/rt3883.c +@@ -17,132 +17,50 @@ + #include + #include + #include ++#include + + #include "common.h" + +-static struct ralink_pinmux_grp mode_mux[] = { +- { +- .name = "i2c", +- .mask = RT3883_GPIO_MODE_I2C, +- .gpio_first = RT3883_GPIO_I2C_SD, +- .gpio_last = RT3883_GPIO_I2C_SCLK, +- }, { +- .name = "spi", +- .mask = RT3883_GPIO_MODE_SPI, +- .gpio_first = RT3883_GPIO_SPI_CS0, +- .gpio_last = RT3883_GPIO_SPI_MISO, +- }, { +- .name = "uartlite", +- .mask = RT3883_GPIO_MODE_UART1, +- .gpio_first = RT3883_GPIO_UART1_TXD, +- .gpio_last = RT3883_GPIO_UART1_RXD, +- }, { +- .name = "jtag", +- .mask = RT3883_GPIO_MODE_JTAG, +- .gpio_first = RT3883_GPIO_JTAG_TDO, +- .gpio_last = RT3883_GPIO_JTAG_TCLK, +- }, { +- .name = "mdio", +- .mask = RT3883_GPIO_MODE_MDIO, +- .gpio_first = RT3883_GPIO_MDIO_MDC, +- .gpio_last = RT3883_GPIO_MDIO_MDIO, +- }, { +- .name = "ge1", +- .mask = RT3883_GPIO_MODE_GE1, +- .gpio_first = RT3883_GPIO_GE1_TXD0, +- .gpio_last = RT3883_GPIO_GE1_RXCLK, +- }, { +- .name = "ge2", +- .mask = RT3883_GPIO_MODE_GE2, +- .gpio_first = RT3883_GPIO_GE2_TXD0, +- .gpio_last = RT3883_GPIO_GE2_RXCLK, +- }, { +- .name = "pci", +- .mask = RT3883_GPIO_MODE_PCI, +- .gpio_first = RT3883_GPIO_PCI_AD0, +- .gpio_last = RT3883_GPIO_PCI_AD31, +- }, { +- .name = "lna a", +- .mask = RT3883_GPIO_MODE_LNA_A, +- .gpio_first = RT3883_GPIO_LNA_PE_A0, +- .gpio_last = RT3883_GPIO_LNA_PE_A2, +- }, { +- .name = "lna g", +- .mask = RT3883_GPIO_MODE_LNA_G, +- .gpio_first = RT3883_GPIO_LNA_PE_G0, +- .gpio_last = RT3883_GPIO_LNA_PE_G2, +- }, {0} ++static struct rt2880_pmx_func i2c_func[] = { FUNC("i2c", 0, 1, 2) }; ++static struct rt2880_pmx_func spi_func[] = { FUNC("spi", 0, 3, 4) }; ++static struct rt2880_pmx_func uartf_func[] = { ++ FUNC("uartf", RT3883_GPIO_MODE_UARTF, 7, 8), ++ FUNC("pcm uartf", RT3883_GPIO_MODE_PCM_UARTF, 7, 8), ++ FUNC("pcm i2s", RT3883_GPIO_MODE_PCM_I2S, 7, 8), ++ FUNC("i2s uartf", RT3883_GPIO_MODE_I2S_UARTF, 7, 8), ++ FUNC("pcm gpio", RT3883_GPIO_MODE_PCM_GPIO, 11, 4), ++ FUNC("gpio uartf", RT3883_GPIO_MODE_GPIO_UARTF, 7, 4), ++ FUNC("gpio i2s", RT3883_GPIO_MODE_GPIO_I2S, 7, 4), + }; +- +-static struct ralink_pinmux_grp uart_mux[] = { +- { +- .name = "uartf", +- .mask = RT3883_GPIO_MODE_UARTF, +- .gpio_first = RT3883_GPIO_7, +- .gpio_last = RT3883_GPIO_14, +- }, { +- .name = "pcm uartf", +- .mask = RT3883_GPIO_MODE_PCM_UARTF, +- .gpio_first = RT3883_GPIO_7, +- .gpio_last = RT3883_GPIO_14, +- }, { +- .name = "pcm i2s", +- .mask = RT3883_GPIO_MODE_PCM_I2S, +- .gpio_first = RT3883_GPIO_7, +- .gpio_last = RT3883_GPIO_14, +- }, { +- .name = "i2s uartf", +- .mask = RT3883_GPIO_MODE_I2S_UARTF, +- .gpio_first = RT3883_GPIO_7, +- .gpio_last = RT3883_GPIO_14, +- }, { +- .name = "pcm gpio", +- .mask = RT3883_GPIO_MODE_PCM_GPIO, +- .gpio_first = RT3883_GPIO_11, +- .gpio_last = RT3883_GPIO_14, +- }, { +- .name = "gpio uartf", +- .mask = RT3883_GPIO_MODE_GPIO_UARTF, +- .gpio_first = RT3883_GPIO_7, +- .gpio_last = RT3883_GPIO_10, +- }, { +- .name = "gpio i2s", +- .mask = RT3883_GPIO_MODE_GPIO_I2S, +- .gpio_first = RT3883_GPIO_7, +- .gpio_last = RT3883_GPIO_10, +- }, { +- .name = "gpio", +- .mask = RT3883_GPIO_MODE_GPIO, +- }, {0} ++static struct rt2880_pmx_func uartlite_func[] = { FUNC("uartlite", 0, 15, 2) }; ++static struct rt2880_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 5) }; ++static struct rt2880_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 2) }; ++static struct rt2880_pmx_func lna_a_func[] = { FUNC("lna a", 0, 32, 3) }; ++static struct rt2880_pmx_func lna_g_func[] = { FUNC("lna a", 0, 35, 3) }; ++static struct rt2880_pmx_func pci_func[] = { ++ FUNC("pci-dev", 0, 40, 32), ++ FUNC("pci-host2", 1, 40, 32), ++ FUNC("pci-host1", 2, 40, 32), ++ FUNC("pci-fnc", 3, 40, 32) + }; ++static struct rt2880_pmx_func ge1_func[] = { FUNC("ge1", 0, 72, 12) }; ++static struct rt2880_pmx_func ge2_func[] = { FUNC("ge1", 0, 84, 12) }; + +-static struct ralink_pinmux_grp pci_mux[] = { +- { +- .name = "pci-dev", +- .mask = 0, +- .gpio_first = RT3883_GPIO_PCI_AD0, +- .gpio_last = RT3883_GPIO_PCI_AD31, +- }, { +- .name = "pci-host2", +- .mask = 1, +- .gpio_first = RT3883_GPIO_PCI_AD0, +- .gpio_last = RT3883_GPIO_PCI_AD31, +- }, { +- .name = "pci-host1", +- .mask = 2, +- .gpio_first = RT3883_GPIO_PCI_AD0, +- .gpio_last = RT3883_GPIO_PCI_AD31, +- }, { +- .name = "pci-fnc", +- .mask = 3, +- .gpio_first = RT3883_GPIO_PCI_AD0, +- .gpio_last = RT3883_GPIO_PCI_AD31, +- }, { +- .name = "pci-gpio", +- .mask = 7, +- .gpio_first = RT3883_GPIO_PCI_AD0, +- .gpio_last = RT3883_GPIO_PCI_AD31, +- }, {0} ++static struct rt2880_pmx_group rt3883_pinmux_data[] = { ++ GRP("i2c", i2c_func, 1, RT3883_GPIO_MODE_I2C), ++ GRP("spi", spi_func, 1, RT3883_GPIO_MODE_SPI), ++ GRP("uartf", uartf_func, RT3883_GPIO_MODE_UART0_MASK, ++ RT3883_GPIO_MODE_UART0_SHIFT), ++ GRP("uartlite", uartlite_func, 1, RT3883_GPIO_MODE_UART1), ++ GRP("jtag", jtag_func, 1, RT3883_GPIO_MODE_JTAG), ++ GRP("mdio", mdio_func, 1, RT3883_GPIO_MODE_MDIO), ++ GRP("lna a", lna_a_func, 1, RT3883_GPIO_MODE_LNA_A), ++ GRP("lna g", lna_g_func, 1, RT3883_GPIO_MODE_LNA_G), ++ GRP("pci", pci_func, RT3883_GPIO_MODE_PCI_MASK, ++ RT3883_GPIO_MODE_PCI_SHIFT), ++ GRP("ge1", ge1_func, 1, RT3883_GPIO_MODE_GE1), ++ GRP("ge2", ge2_func, 1, RT3883_GPIO_MODE_GE2), ++ { 0 } + }; + + static void rt3883_wdt_reset(void) +@@ -155,17 +73,6 @@ static void rt3883_wdt_reset(void) + rt_sysc_w32(t, RT3883_SYSC_REG_SYSCFG1); + } + +-struct ralink_pinmux rt_gpio_pinmux = { +- .mode = mode_mux, +- .uart = uart_mux, +- .uart_shift = RT3883_GPIO_MODE_UART0_SHIFT, +- .uart_mask = RT3883_GPIO_MODE_UART0_MASK, +- .wdt_reset = rt3883_wdt_reset, +- .pci = pci_mux, +- .pci_shift = RT3883_GPIO_MODE_PCI_SHIFT, +- .pci_mask = RT3883_GPIO_MODE_PCI_MASK, +-}; +- + void __init ralink_clk_init(void) + { + unsigned long cpu_rate, sys_rate; +@@ -243,4 +150,6 @@ void prom_soc_init(struct ralink_soc_inf + soc_info->mem_base = RT3883_SDRAM_BASE; + soc_info->mem_size_min = RT3883_MEM_SIZE_MIN; + soc_info->mem_size_max = RT3883_MEM_SIZE_MAX; ++ ++ rt2880_pinmux_data = rt3883_pinmux_data; + } +--- a/drivers/pinctrl/Kconfig ++++ b/drivers/pinctrl/Kconfig +@@ -114,6 +114,11 @@ config PINCTRL_LANTIQ + select PINMUX + select PINCONF + ++config PINCTRL_RT2880 ++ bool ++ depends on RALINK ++ select PINMUX ++ + config PINCTRL_FALCON + bool + depends on SOC_FALCON +--- a/drivers/pinctrl/Makefile ++++ b/drivers/pinctrl/Makefile +@@ -45,6 +45,7 @@ obj-$(CONFIG_PINCTRL_EXYNOS5440) += pinc + obj-$(CONFIG_PINCTRL_S3C64XX) += pinctrl-s3c64xx.o + obj-$(CONFIG_PINCTRL_XWAY) += pinctrl-xway.o + obj-$(CONFIG_PINCTRL_LANTIQ) += pinctrl-lantiq.o ++obj-$(CONFIG_PINCTRL_RT2880) += pinctrl-rt2880.o + + obj-$(CONFIG_PLAT_ORION) += mvebu/ + obj-$(CONFIG_ARCH_SHMOBILE) += sh-pfc/ +--- /dev/null ++++ b/drivers/pinctrl/pinctrl-rt2880.c +@@ -0,0 +1,467 @@ ++/* ++ * linux/drivers/pinctrl/pinctrl-rt2880.c ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * publishhed by the Free Software Foundation. ++ * ++ * Copyright (C) 2013 John Crispin ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++ ++#include "core.h" ++ ++#define SYSC_REG_GPIO_MODE 0x60 ++ ++struct rt2880_priv { ++ struct device *dev; ++ ++ struct pinctrl_pin_desc *pads; ++ struct pinctrl_desc *desc; ++ ++ struct rt2880_pmx_func **func; ++ int func_count; ++ ++ struct rt2880_pmx_group *groups; ++ const char **group_names; ++ int group_count; ++ ++ uint8_t *gpio; ++ int max_pins; ++}; ++ ++struct rt2880_pmx_group *rt2880_pinmux_data = NULL; ++ ++static int rt2880_get_group_count(struct pinctrl_dev *pctrldev) ++{ ++ struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev); ++ ++ return p->group_count; ++} ++ ++static const char *rt2880_get_group_name(struct pinctrl_dev *pctrldev, ++ unsigned group) ++{ ++ struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev); ++ ++ if (group >= p->group_count) ++ return NULL; ++ ++ return p->group_names[group]; ++} ++ ++static int rt2880_get_group_pins(struct pinctrl_dev *pctrldev, ++ unsigned group, ++ const unsigned **pins, ++ unsigned *num_pins) ++{ ++ struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev); ++ ++ if (group >= p->group_count) ++ return -EINVAL; ++ ++ *pins = p->groups[group].func[0].pins; ++ *num_pins = p->groups[group].func[0].pin_count; ++ ++ return 0; ++} ++ ++static void rt2880_pinctrl_dt_free_map(struct pinctrl_dev *pctrldev, ++ struct pinctrl_map *map, unsigned num_maps) ++{ ++ int i; ++ ++ for (i = 0; i < num_maps; i++) ++ if (map[i].type == PIN_MAP_TYPE_CONFIGS_PIN || ++ map[i].type == PIN_MAP_TYPE_CONFIGS_GROUP) ++ kfree(map[i].data.configs.configs); ++ kfree(map); ++} ++ ++static void rt2880_pinctrl_pin_dbg_show(struct pinctrl_dev *pctrldev, ++ struct seq_file *s, ++ unsigned offset) ++{ ++ seq_printf(s, "ralink pio"); ++} ++ ++static void rt2880_pinctrl_dt_subnode_to_map(struct pinctrl_dev *pctrldev, ++ struct device_node *np, ++ struct pinctrl_map **map) ++{ ++ const char *function; ++ int func = of_property_read_string(np, "ralink,function", &function); ++ int grps = of_property_count_strings(np, "ralink,group"); ++ int i; ++ ++ if (func || !grps) ++ return; ++ ++ for (i = 0; i < grps; i++) { ++ const char *group; ++ ++ of_property_read_string_index(np, "ralink,group", i, &group); ++ ++ (*map)->type = PIN_MAP_TYPE_MUX_GROUP; ++ (*map)->name = function; ++ (*map)->data.mux.group = group; ++ (*map)->data.mux.function = function; ++ (*map)++; ++ } ++} ++ ++static int rt2880_pinctrl_dt_node_to_map(struct pinctrl_dev *pctrldev, ++ struct device_node *np_config, ++ struct pinctrl_map **map, ++ unsigned *num_maps) ++{ ++ int max_maps = 0; ++ struct pinctrl_map *tmp; ++ struct device_node *np; ++ ++ for_each_child_of_node(np_config, np) { ++ int ret = of_property_count_strings(np, "ralink,group"); ++ ++ if (ret >= 0) ++ max_maps += ret; ++ } ++ ++ if (!max_maps) ++ return max_maps; ++ ++ *map = kzalloc(max_maps * sizeof(struct pinctrl_map), GFP_KERNEL); ++ if (!*map) ++ return -ENOMEM; ++ ++ tmp = *map; ++ ++ for_each_child_of_node(np_config, np) ++ rt2880_pinctrl_dt_subnode_to_map(pctrldev, np, &tmp); ++ *num_maps = max_maps; ++ ++ return 0; ++} ++ ++static const struct pinctrl_ops rt2880_pctrl_ops = { ++ .get_groups_count = rt2880_get_group_count, ++ .get_group_name = rt2880_get_group_name, ++ .get_group_pins = rt2880_get_group_pins, ++ .pin_dbg_show = rt2880_pinctrl_pin_dbg_show, ++ .dt_node_to_map = rt2880_pinctrl_dt_node_to_map, ++ .dt_free_map = rt2880_pinctrl_dt_free_map, ++}; ++ ++static int rt2880_pmx_func_count(struct pinctrl_dev *pctrldev) ++{ ++ struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev); ++ ++ return p->func_count; ++} ++ ++static const char *rt2880_pmx_func_name(struct pinctrl_dev *pctrldev, ++ unsigned func) ++{ ++ struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev); ++ ++ return p->func[func]->name; ++} ++ ++static int rt2880_pmx_group_get_groups(struct pinctrl_dev *pctrldev, ++ unsigned func, ++ const char * const **groups, ++ unsigned * const num_groups) ++{ ++ struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev); ++ ++ if (p->func[func]->group_count == 1) ++ *groups = &p->group_names[p->func[func]->groups[0]]; ++ else ++ *groups = p->group_names; ++ ++ *num_groups = p->func[func]->group_count; ++ ++ return 0; ++} ++ ++static int rt2880_pmx_group_enable(struct pinctrl_dev *pctrldev, ++ unsigned func, ++ unsigned group) ++{ ++ struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev); ++ u32 mode = 0; ++ int i; ++ ++ /* dont allow double use */ ++ if (p->groups[group].enabled) { ++ dev_err(p->dev, "%s is already enabled\n", p->groups[group].name); ++ return -EBUSY; ++ } ++ ++ p->groups[group].enabled = 1; ++ p->func[func]->enabled = 1; ++ ++ mode = rt_sysc_r32(SYSC_REG_GPIO_MODE); ++ mode &= ~(p->groups[group].mask << p->groups[group].shift); ++ ++ /* mark the pins as gpio */ ++ for (i = 0; i < p->groups[group].func[0].pin_count; i++) ++ p->gpio[p->groups[group].func[0].pins[i]] = 1; ++ ++ /* function 0 is gpio and needs special handling */ ++ if (func == 0) { ++ mode |= p->groups[group].gpio << p->groups[group].shift; ++ } else { ++ for (i = 0; i < p->func[func]->pin_count; i++) ++ p->gpio[p->func[func]->pins[i]] = 0; ++ mode |= p->func[func]->value << p->groups[group].shift; ++ } ++ rt_sysc_w32(mode, SYSC_REG_GPIO_MODE); ++ ++ ++ return 0; ++} ++ ++static int rt2880_pmx_group_gpio_request_enable(struct pinctrl_dev *pctrldev, ++ struct pinctrl_gpio_range *range, ++ unsigned pin) ++{ ++ struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev); ++ ++ if (!p->gpio[pin]) { ++ dev_err(p->dev, "pin %d is not set to gpio mux\n", pin); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++static const struct pinmux_ops rt2880_pmx_group_ops = { ++ .get_functions_count = rt2880_pmx_func_count, ++ .get_function_name = rt2880_pmx_func_name, ++ .get_function_groups = rt2880_pmx_group_get_groups, ++ .enable = rt2880_pmx_group_enable, ++ .gpio_request_enable = rt2880_pmx_group_gpio_request_enable, ++}; ++ ++static struct pinctrl_desc rt2880_pctrl_desc = { ++ .owner = THIS_MODULE, ++ .name = "rt2880-pinmux", ++ .pctlops = &rt2880_pctrl_ops, ++ .pmxops = &rt2880_pmx_group_ops, ++}; ++ ++static struct rt2880_pmx_func gpio_func = { ++ .name = "gpio", ++}; ++ ++static int rt2880_pinmux_index(struct rt2880_priv *p) ++{ ++ struct rt2880_pmx_func **f; ++ struct rt2880_pmx_group *mux = p->groups; ++ int i, j, c = 0; ++ ++ /* count the mux functions */ ++ while (mux->name) { ++ p->group_count++; ++ mux++; ++ } ++ ++ /* allocate the group names array needed by the gpio function */ ++ p->group_names = devm_kzalloc(p->dev, sizeof(char *) * p->group_count, GFP_KERNEL); ++ if (!p->group_names) ++ return -1; ++ ++ for (i = 0; i < p->group_count; i++) { ++ p->group_names[i] = p->groups[i].name; ++ p->func_count += p->groups[i].func_count; ++ } ++ ++ /* we have a dummy function[0] for gpio */ ++ p->func_count++; ++ ++ /* allocate our function and group mapping index buffers */ ++ f = p->func = devm_kzalloc(p->dev, sizeof(struct rt2880_pmx_func) * p->func_count, GFP_KERNEL); ++ gpio_func.groups = devm_kzalloc(p->dev, sizeof(int) * p->group_count, GFP_KERNEL); ++ if (!f || !gpio_func.groups) ++ return -1; ++ ++ /* add a backpointer to the function so it knows its group */ ++ gpio_func.group_count = p->group_count; ++ for (i = 0; i < gpio_func.group_count; i++) ++ gpio_func.groups[i] = i; ++ ++ f[c] = &gpio_func; ++ c++; ++ ++ /* add remaining functions */ ++ for (i = 0; i < p->group_count; i++) { ++ for (j = 0; j < p->groups[i].func_count; j++) { ++ f[c] = &p->groups[i].func[j]; ++ f[c]->groups = devm_kzalloc(p->dev, sizeof(int), GFP_KERNEL); ++ f[c]->groups[0] = i; ++ f[c]->group_count = 1; ++ c++; ++ } ++ } ++ return 0; ++} ++ ++static int rt2880_pinmux_pins(struct rt2880_priv *p) ++{ ++ int i, j; ++ ++ /* loop over the functions and initialize the pins array. also work out the highest pin used */ ++ for (i = 0; i < p->func_count; i++) { ++ int pin; ++ ++ if (!p->func[i]->pin_count) ++ continue; ++ ++ p->func[i]->pins = devm_kzalloc(p->dev, sizeof(int) * p->func[i]->pin_count, GFP_KERNEL); ++ for (j = 0; j < p->func[i]->pin_count; j++) ++ p->func[i]->pins[j] = p->func[i]->pin_first + j; ++ ++ pin = p->func[i]->pin_first + p->func[i]->pin_count; ++ if (pin > p->max_pins) ++ p->max_pins = pin; ++ } ++ ++ /* the buffer that tells us which pins are gpio */ ++ p->gpio = devm_kzalloc(p->dev,sizeof(uint8_t) * p->max_pins, ++ GFP_KERNEL); ++ /* the pads needed to tell pinctrl about our pins */ ++ p->pads = devm_kzalloc(p->dev, ++ sizeof(struct pinctrl_pin_desc) * p->max_pins, ++ GFP_KERNEL); ++ if (!p->pads || !p->gpio ) { ++ dev_err(p->dev, "Failed to allocate gpio data\n"); ++ return -ENOMEM; ++ } ++ ++ memset(p->gpio, 1, sizeof(uint8_t) * p->max_pins); ++ for (i = 0; i < p->func_count; i++) { ++ if (!p->func[i]->pin_count) ++ continue; ++ ++ for (j = 0; j < p->func[i]->pin_count; j++) ++ p->gpio[p->func[i]->pins[j]] = 0; ++ } ++ ++ /* pin 0 is always a gpio */ ++ p->gpio[0] = 1; ++ ++ /* set the pads */ ++ for (i = 0; i < p->max_pins; i++) { ++ /* strlen("ioXY") + 1 = 5 */ ++ char *name = devm_kzalloc(p->dev, 5, GFP_KERNEL); ++ ++ if (!name) { ++ dev_err(p->dev, "Failed to allocate pad name\n"); ++ return -ENOMEM; ++ } ++ snprintf(name, 5, "io%d", i); ++ p->pads[i].number = i; ++ p->pads[i].name = name; ++ } ++ p->desc->pins = p->pads; ++ p->desc->npins = p->max_pins; ++ ++ return 0; ++} ++ ++static int rt2880_pinmux_probe(struct platform_device *pdev) ++{ ++ struct rt2880_priv *p; ++ struct pinctrl_dev *dev; ++ struct device_node *np; ++ ++ if (!rt2880_pinmux_data) ++ return -ENOSYS; ++ ++ /* setup the private data */ ++ p = devm_kzalloc(&pdev->dev, sizeof(struct rt2880_priv), GFP_KERNEL); ++ if (!p) ++ return -ENOMEM; ++ ++ p->dev = &pdev->dev; ++ p->desc = &rt2880_pctrl_desc; ++ p->groups = rt2880_pinmux_data; ++ platform_set_drvdata(pdev, p); ++ ++ /* init the device */ ++ if (rt2880_pinmux_index(p)) { ++ dev_err(&pdev->dev, "failed to load index\n"); ++ return -EINVAL; ++ } ++ if (rt2880_pinmux_pins(p)) { ++ dev_err(&pdev->dev, "failed to load pins\n"); ++ return -EINVAL; ++ } ++ dev = pinctrl_register(p->desc, &pdev->dev, p); ++ if (IS_ERR(dev)) ++ return PTR_ERR(dev); ++ ++ /* finalize by adding gpio ranges for enables gpio controllers */ ++ for_each_compatible_node(np, NULL, "ralink,rt2880-gpio") { ++ const __be32 *ngpio, *gpiobase; ++ struct pinctrl_gpio_range *range; ++ char *name; ++ ++ if (!of_device_is_available(np)) ++ continue; ++ ++ ngpio = of_get_property(np, "ralink,num-gpios", NULL); ++ gpiobase = of_get_property(np, "ralink,gpio-base", NULL); ++ if (!ngpio || !gpiobase) { ++ dev_err(&pdev->dev, "failed to load chip info\n"); ++ return -EINVAL; ++ } ++ ++ range = devm_kzalloc(p->dev, sizeof(struct pinctrl_gpio_range) + 4, GFP_KERNEL); ++ range->name = name = (char *) &range[1]; ++ sprintf(name, "pio"); ++ range->npins = __be32_to_cpu(*ngpio); ++ range->base = __be32_to_cpu(*gpiobase); ++ range->pin_base = range->base; ++ pinctrl_add_gpio_range(dev, range); ++ } ++ ++ return 0; ++} ++ ++static const struct of_device_id rt2880_pinmux_match[] = { ++ { .compatible = "ralink,rt2880-pinmux" }, ++ {}, ++}; ++MODULE_DEVICE_TABLE(of, rt2880_pinmux_match); ++ ++static struct platform_driver rt2880_pinmux_driver = { ++ .probe = rt2880_pinmux_probe, ++ .driver = { ++ .name = "rt2880-pinmux", ++ .owner = THIS_MODULE, ++ .of_match_table = rt2880_pinmux_match, ++ }, ++}; ++ ++int __init rt2880_pinmux_init(void) ++{ ++ return platform_driver_register(&rt2880_pinmux_driver); ++} ++ ++core_initcall_sync(rt2880_pinmux_init); diff --git a/target/linux/ramips/patches-3.10/0114-PCI-MIPS-adds-rt2880-pci-support.patch b/target/linux/ramips/patches-3.10/0114-PCI-MIPS-adds-rt2880-pci-support.patch new file mode 100644 index 0000000000..521651f67a --- /dev/null +++ b/target/linux/ramips/patches-3.10/0114-PCI-MIPS-adds-rt2880-pci-support.patch @@ -0,0 +1,319 @@ +From b7040c3ad7b8daf8309d083e9248cfa577075cfb Mon Sep 17 00:00:00 2001 +From: John Crispin +Date: Thu, 21 Mar 2013 18:27:29 +0100 +Subject: [PATCH 114/133] PCI: MIPS: adds rt2880 pci support + +Add support for the pci found on the rt2880 SoC. + +Signed-off-by: John Crispin +--- + arch/mips/pci/Makefile | 1 + + arch/mips/pci/pci-rt2880.c | 281 ++++++++++++++++++++++++++++++++++++++++++++ + arch/mips/ralink/Kconfig | 1 + + 3 files changed, 283 insertions(+) + create mode 100644 arch/mips/pci/pci-rt2880.c + +--- a/arch/mips/pci/Makefile ++++ b/arch/mips/pci/Makefile +@@ -41,6 +41,7 @@ obj-$(CONFIG_SIBYTE_BCM1x80) += pci-bcm1 + obj-$(CONFIG_SNI_RM) += fixup-sni.o ops-sni.o + obj-$(CONFIG_LANTIQ) += fixup-lantiq.o + obj-$(CONFIG_PCI_LANTIQ) += pci-lantiq.o ops-lantiq.o ++obj-$(CONFIG_SOC_RT2880) += pci-rt2880.o + obj-$(CONFIG_SOC_RT3883) += pci-rt3883.o + obj-$(CONFIG_TANBAC_TB0219) += fixup-tb0219.o + obj-$(CONFIG_TANBAC_TB0226) += fixup-tb0226.o +--- /dev/null ++++ b/arch/mips/pci/pci-rt2880.c +@@ -0,0 +1,281 @@ ++/* ++ * Ralink RT288x SoC PCI register definitions ++ * ++ * Copyright (C) 2009 John Crispin ++ * Copyright (C) 2009 Gabor Juhos ++ * ++ * Parts of this file are based on Ralink's 2.6.21 BSP ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 as published ++ * by the Free Software Foundation. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++ ++#define RT2880_PCI_BASE 0x00440000 ++#define RT288X_CPU_IRQ_PCI 4 ++ ++#define RT2880_PCI_MEM_BASE 0x20000000 ++#define RT2880_PCI_MEM_SIZE 0x10000000 ++#define RT2880_PCI_IO_BASE 0x00460000 ++#define RT2880_PCI_IO_SIZE 0x00010000 ++ ++#define RT2880_PCI_REG_PCICFG_ADDR 0x00 ++#define RT2880_PCI_REG_PCIMSK_ADDR 0x0c ++#define RT2880_PCI_REG_BAR0SETUP_ADDR 0x10 ++#define RT2880_PCI_REG_IMBASEBAR0_ADDR 0x18 ++#define RT2880_PCI_REG_CONFIG_ADDR 0x20 ++#define RT2880_PCI_REG_CONFIG_DATA 0x24 ++#define RT2880_PCI_REG_MEMBASE 0x28 ++#define RT2880_PCI_REG_IOBASE 0x2c ++#define RT2880_PCI_REG_ID 0x30 ++#define RT2880_PCI_REG_CLASS 0x34 ++#define RT2880_PCI_REG_SUBID 0x38 ++#define RT2880_PCI_REG_ARBCTL 0x80 ++ ++static void __iomem *rt2880_pci_base; ++static DEFINE_SPINLOCK(rt2880_pci_lock); ++ ++static u32 rt2880_pci_reg_read(u32 reg) ++{ ++ return readl(rt2880_pci_base + reg); ++} ++ ++static void rt2880_pci_reg_write(u32 val, u32 reg) ++{ ++ writel(val, rt2880_pci_base + reg); ++} ++ ++static inline u32 rt2880_pci_get_cfgaddr(unsigned int bus, unsigned int slot, ++ unsigned int func, unsigned int where) ++{ ++ return ((bus << 16) | (slot << 11) | (func << 8) | (where & 0xfc) | ++ 0x80000000); ++} ++ ++static int rt2880_pci_config_read(struct pci_bus *bus, unsigned int devfn, ++ int where, int size, u32 *val) ++{ ++ unsigned long flags; ++ u32 address; ++ u32 data; ++ ++ address = rt2880_pci_get_cfgaddr(bus->number, PCI_SLOT(devfn), ++ PCI_FUNC(devfn), where); ++ ++ spin_lock_irqsave(&rt2880_pci_lock, flags); ++ rt2880_pci_reg_write(address, RT2880_PCI_REG_CONFIG_ADDR); ++ data = rt2880_pci_reg_read(RT2880_PCI_REG_CONFIG_DATA); ++ spin_unlock_irqrestore(&rt2880_pci_lock, flags); ++ ++ switch (size) { ++ case 1: ++ *val = (data >> ((where & 3) << 3)) & 0xff; ++ break; ++ case 2: ++ *val = (data >> ((where & 3) << 3)) & 0xffff; ++ break; ++ case 4: ++ *val = data; ++ break; ++ } ++ ++ return PCIBIOS_SUCCESSFUL; ++} ++ ++static int rt2880_pci_config_write(struct pci_bus *bus, unsigned int devfn, ++ int where, int size, u32 val) ++{ ++ unsigned long flags; ++ u32 address; ++ u32 data; ++ ++ address = rt2880_pci_get_cfgaddr(bus->number, PCI_SLOT(devfn), ++ PCI_FUNC(devfn), where); ++ ++ spin_lock_irqsave(&rt2880_pci_lock, flags); ++ rt2880_pci_reg_write(address, RT2880_PCI_REG_CONFIG_ADDR); ++ data = rt2880_pci_reg_read(RT2880_PCI_REG_CONFIG_DATA); ++ ++ switch (size) { ++ case 1: ++ data = (data & ~(0xff << ((where & 3) << 3))) | ++ (val << ((where & 3) << 3)); ++ break; ++ case 2: ++ data = (data & ~(0xffff << ((where & 3) << 3))) | ++ (val << ((where & 3) << 3)); ++ break; ++ case 4: ++ data = val; ++ break; ++ } ++ ++ rt2880_pci_reg_write(data, RT2880_PCI_REG_CONFIG_DATA); ++ spin_unlock_irqrestore(&rt2880_pci_lock, flags); ++ ++ return PCIBIOS_SUCCESSFUL; ++} ++ ++static struct pci_ops rt2880_pci_ops = { ++ .read = rt2880_pci_config_read, ++ .write = rt2880_pci_config_write, ++}; ++ ++static struct resource rt2880_pci_mem_resource = { ++ .name = "PCI MEM space", ++ .start = RT2880_PCI_MEM_BASE, ++ .end = RT2880_PCI_MEM_BASE + RT2880_PCI_MEM_SIZE - 1, ++ .flags = IORESOURCE_MEM, ++}; ++ ++static struct resource rt2880_pci_io_resource = { ++ .name = "PCI IO space", ++ .start = RT2880_PCI_IO_BASE, ++ .end = RT2880_PCI_IO_BASE + RT2880_PCI_IO_SIZE - 1, ++ .flags = IORESOURCE_IO, ++}; ++ ++static struct pci_controller rt2880_pci_controller = { ++ .pci_ops = &rt2880_pci_ops, ++ .mem_resource = &rt2880_pci_mem_resource, ++ .io_resource = &rt2880_pci_io_resource, ++}; ++ ++static inline u32 rt2880_pci_read_u32(unsigned long reg) ++{ ++ unsigned long flags; ++ u32 address; ++ u32 ret; ++ ++ address = rt2880_pci_get_cfgaddr(0, 0, 0, reg); ++ ++ spin_lock_irqsave(&rt2880_pci_lock, flags); ++ rt2880_pci_reg_write(address, RT2880_PCI_REG_CONFIG_ADDR); ++ ret = rt2880_pci_reg_read(RT2880_PCI_REG_CONFIG_DATA); ++ spin_unlock_irqrestore(&rt2880_pci_lock, flags); ++ ++ return ret; ++} ++ ++static inline void rt2880_pci_write_u32(unsigned long reg, u32 val) ++{ ++ unsigned long flags; ++ u32 address; ++ ++ address = rt2880_pci_get_cfgaddr(0, 0, 0, reg); ++ ++ spin_lock_irqsave(&rt2880_pci_lock, flags); ++ rt2880_pci_reg_write(address, RT2880_PCI_REG_CONFIG_ADDR); ++ rt2880_pci_reg_write(val, RT2880_PCI_REG_CONFIG_DATA); ++ spin_unlock_irqrestore(&rt2880_pci_lock, flags); ++} ++ ++int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) ++{ ++ u16 cmd; ++ int irq = -1; ++ ++ if (dev->bus->number != 0) ++ return irq; ++ ++ switch (PCI_SLOT(dev->devfn)) { ++ case 0x00: ++ rt2880_pci_write_u32(PCI_BASE_ADDRESS_0, 0x08000000); ++ (void) rt2880_pci_read_u32(PCI_BASE_ADDRESS_0); ++ break; ++ case 0x11: ++ irq = RT288X_CPU_IRQ_PCI; ++ break; ++ default: ++ printk("%s:%s[%d] trying to alloc unknown pci irq\n", ++ __FILE__, __func__, __LINE__); ++ BUG(); ++ break; ++ } ++ ++ pci_write_config_byte((struct pci_dev*)dev, PCI_CACHE_LINE_SIZE, 0x14); ++ pci_write_config_byte((struct pci_dev*)dev, PCI_LATENCY_TIMER, 0xFF); ++ pci_read_config_word((struct pci_dev*)dev, PCI_COMMAND, &cmd); ++ cmd |= PCI_COMMAND_MASTER | PCI_COMMAND_IO | PCI_COMMAND_MEMORY | ++ PCI_COMMAND_INVALIDATE | PCI_COMMAND_FAST_BACK | ++ PCI_COMMAND_SERR | PCI_COMMAND_WAIT | PCI_COMMAND_PARITY; ++ pci_write_config_word((struct pci_dev*)dev, PCI_COMMAND, cmd); ++ pci_write_config_byte((struct pci_dev*)dev, PCI_INTERRUPT_LINE, ++ dev->irq); ++ return irq; ++} ++ ++static int rt288x_pci_probe(struct platform_device *pdev) ++{ ++ void __iomem *io_map_base; ++ int i; ++ ++ rt2880_pci_base = ioremap_nocache(RT2880_PCI_BASE, PAGE_SIZE); ++ ++ io_map_base = ioremap(RT2880_PCI_IO_BASE, RT2880_PCI_IO_SIZE); ++ rt2880_pci_controller.io_map_base = (unsigned long) io_map_base; ++ set_io_port_base((unsigned long) io_map_base); ++ ++ ioport_resource.start = RT2880_PCI_IO_BASE; ++ ioport_resource.end = RT2880_PCI_IO_BASE + RT2880_PCI_IO_SIZE - 1; ++ ++ rt2880_pci_reg_write(0, RT2880_PCI_REG_PCICFG_ADDR); ++ for(i = 0; i < 0xfffff; i++) {} ++ ++ rt2880_pci_reg_write(0x79, RT2880_PCI_REG_ARBCTL); ++ rt2880_pci_reg_write(0x07FF0001, RT2880_PCI_REG_BAR0SETUP_ADDR); ++ rt2880_pci_reg_write(RT2880_PCI_MEM_BASE, RT2880_PCI_REG_MEMBASE); ++ rt2880_pci_reg_write(RT2880_PCI_IO_BASE, RT2880_PCI_REG_IOBASE); ++ rt2880_pci_reg_write(0x08000000, RT2880_PCI_REG_IMBASEBAR0_ADDR); ++ rt2880_pci_reg_write(0x08021814, RT2880_PCI_REG_ID); ++ rt2880_pci_reg_write(0x00800001, RT2880_PCI_REG_CLASS); ++ rt2880_pci_reg_write(0x28801814, RT2880_PCI_REG_SUBID); ++ rt2880_pci_reg_write(0x000c0000, RT2880_PCI_REG_PCIMSK_ADDR); ++ ++ rt2880_pci_write_u32(PCI_BASE_ADDRESS_0, 0x08000000); ++ (void) rt2880_pci_read_u32(PCI_BASE_ADDRESS_0); ++ ++ register_pci_controller(&rt2880_pci_controller); ++ return 0; ++} ++ ++int pcibios_plat_dev_init(struct pci_dev *dev) ++{ ++ return 0; ++} ++ ++static const struct of_device_id rt288x_pci_match[] = { ++ { .compatible = "ralink,rt288x-pci" }, ++ {}, ++}; ++MODULE_DEVICE_TABLE(of, rt288x_pci_match); ++ ++static struct platform_driver rt288x_pci_driver = { ++ .probe = rt288x_pci_probe, ++ .driver = { ++ .name = "rt288x-pci", ++ .owner = THIS_MODULE, ++ .of_match_table = rt288x_pci_match, ++ }, ++}; ++ ++int __init pcibios_init(void) ++{ ++ int ret = platform_driver_register(&rt288x_pci_driver); ++ if (ret) ++ pr_info("rt288x-pci: Error registering platform driver!"); ++ return ret; ++} ++ ++arch_initcall(pcibios_init); +--- a/arch/mips/ralink/Kconfig ++++ b/arch/mips/ralink/Kconfig +@@ -15,6 +15,7 @@ choice + + config SOC_RT288X + bool "RT288x" ++ select HW_HAS_PCI + + config SOC_RT305X + bool "RT305x" diff --git a/target/linux/ramips/patches-3.10/0114-serial-ralink-adds-mt7620-serial.patch b/target/linux/ramips/patches-3.10/0114-serial-ralink-adds-mt7620-serial.patch deleted file mode 100644 index 2772aef738..0000000000 --- a/target/linux/ramips/patches-3.10/0114-serial-ralink-adds-mt7620-serial.patch +++ /dev/null @@ -1,23 +0,0 @@ -From 629a2ca61e0fbf331f88692038391d22f21b7c70 Mon Sep 17 00:00:00 2001 -From: John Crispin -Date: Fri, 15 Mar 2013 18:16:01 +0100 -Subject: [PATCH 20/33] serial: ralink: adds mt7620 serial - -Add the config symbol for Mediatek7620 SoC to SERIAL_8250_RT288X - -Signed-off-by: John Crispin ---- - drivers/tty/serial/8250/Kconfig | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/drivers/tty/serial/8250/Kconfig -+++ b/drivers/tty/serial/8250/Kconfig -@@ -300,7 +300,7 @@ config SERIAL_8250_EM - - config SERIAL_8250_RT288X - bool "Ralink RT288x/RT305x/RT3662/RT3883 serial port support" -- depends on SERIAL_8250 && (SOC_RT288X || SOC_RT305X || SOC_RT3883) -+ depends on SERIAL_8250 && (SOC_RT288X || SOC_RT305X || SOC_RT3883 || SOC_MT7620) - help - If you have a Ralink RT288x/RT305x SoC based board and want to use the - serial port, say Y to this option. The driver can handle up to 2 serial diff --git a/target/linux/ramips/patches-3.10/0115-PCI-MIPS-adds-mt7620a-pcie-driver.patch b/target/linux/ramips/patches-3.10/0115-PCI-MIPS-adds-mt7620a-pcie-driver.patch new file mode 100644 index 0000000000..cff4017a45 --- /dev/null +++ b/target/linux/ramips/patches-3.10/0115-PCI-MIPS-adds-mt7620a-pcie-driver.patch @@ -0,0 +1,399 @@ +From 686f5642c74323f7e7eafb93c2b85df589cbf66e Mon Sep 17 00:00:00 2001 +From: John Crispin +Date: Sat, 18 May 2013 22:06:15 +0200 +Subject: [PATCH 115/133] PCI: MIPS: adds mt7620a pcie driver + +Signed-off-by: John Crispin +--- + arch/mips/pci/Makefile | 1 + + arch/mips/pci/pci-mt7620a.c | 363 +++++++++++++++++++++++++++++++++++++++++++ + arch/mips/ralink/Kconfig | 1 + + 3 files changed, 365 insertions(+) + create mode 100644 arch/mips/pci/pci-mt7620a.c + +--- a/arch/mips/pci/Makefile ++++ b/arch/mips/pci/Makefile +@@ -41,6 +41,7 @@ obj-$(CONFIG_SIBYTE_BCM1x80) += pci-bcm1 + obj-$(CONFIG_SNI_RM) += fixup-sni.o ops-sni.o + obj-$(CONFIG_LANTIQ) += fixup-lantiq.o + obj-$(CONFIG_PCI_LANTIQ) += pci-lantiq.o ops-lantiq.o ++obj-$(CONFIG_SOC_MT7620) += pci-mt7620a.o + obj-$(CONFIG_SOC_RT2880) += pci-rt2880.o + obj-$(CONFIG_SOC_RT3883) += pci-rt3883.o + obj-$(CONFIG_TANBAC_TB0219) += fixup-tb0219.o +--- /dev/null ++++ b/arch/mips/pci/pci-mt7620a.c +@@ -0,0 +1,363 @@ ++/* ++ * Ralink MT7620A SoC PCI support ++ * ++ * Copyright (C) 2007-2013 Bruce Chang ++ * Copyright (C) 2013 John Crispin ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 as published ++ * by the Free Software Foundation. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++ ++#define RALINK_PCI_MM_MAP_BASE 0x20000000 ++#define RALINK_PCI_IO_MAP_BASE 0x10160000 ++ ++#define RALINK_INT_PCIE0 4 ++#define RALINK_SYSTEM_CONTROL_BASE 0xb0000000 ++#define RALINK_SYSCFG1 0x14 ++#define RALINK_CLKCFG1 0x30 ++#define RALINK_GPIOMODE 0x60 ++#define RALINK_PCIE_CLK_GEN 0x7c ++#define RALINK_PCIE_CLK_GEN1 0x80 ++#define PCIEPHY0_CFG 0x90 ++#define PPLL_CFG1 0x9c ++#define PPLL_DRV 0xa0 ++#define RALINK_PCI_HOST_MODE_EN (1<<7) ++#define RALINK_PCIE_RC_MODE_EN (1<<8) ++#define RALINK_PCIE_RST (1<<23) ++#define RALINK_PCI_RST (1<<24) ++#define RALINK_PCI_CLK_EN (1<<19) ++#define RALINK_PCIE_CLK_EN (1<<21) ++#define PCI_SLOTx2 (1<<11) ++#define PCI_SLOTx1 (2<<11) ++#define PDRV_SW_SET (1<<31) ++#define LC_CKDRVPD_ (1<<19) ++ ++#define RALINK_PCI_CONFIG_ADDR 0x20 ++#define RALINK_PCI_CONFIG_DATA_VIRTUAL_REG 0x24 ++#define MEMORY_BASE 0x0 ++#define RALINK_PCIE0_RST (1<<26) ++#define RALINK_PCI_BASE 0xB0140000 ++#define RALINK_PCI_MEMBASE 0x28 ++#define RALINK_PCI_IOBASE 0x2C ++ ++#define RT6855_PCIE0_OFFSET 0x2000 ++ ++#define RALINK_PCI_PCICFG_ADDR 0x00 ++#define RALINK_PCI0_BAR0SETUP_ADDR 0x10 ++#define RALINK_PCI0_IMBASEBAR0_ADDR 0x18 ++#define RALINK_PCI0_ID 0x30 ++#define RALINK_PCI0_CLASS 0x34 ++#define RALINK_PCI0_SUBID 0x38 ++#define RALINK_PCI0_STATUS 0x50 ++#define RALINK_PCI_PCIMSK_ADDR 0x0C ++ ++#define RALINK_PCIE0_CLK_EN (1 << 26) ++ ++#define BUSY 0x80000000 ++#define WAITRETRY_MAX 10 ++#define WRITE_MODE (1UL << 23) ++#define DATA_SHIFT 0 ++#define ADDR_SHIFT 8 ++ ++ ++static void __iomem *bridge_base; ++static void __iomem *pcie_base; ++ ++static struct reset_control *rstpcie0; ++ ++static inline void bridge_w32(u32 val, unsigned reg) ++{ ++ iowrite32(val, bridge_base + reg); ++} ++ ++static inline u32 bridge_r32(unsigned reg) ++{ ++ return ioread32(bridge_base + reg); ++} ++ ++static inline void pcie_w32(u32 val, unsigned reg) ++{ ++ iowrite32(val, pcie_base + reg); ++} ++ ++static inline u32 pcie_r32(unsigned reg) ++{ ++ return ioread32(pcie_base + reg); ++} ++ ++static inline void pcie_m32(u32 clr, u32 set, unsigned reg) ++{ ++ u32 val = pcie_r32(reg); ++ val &= ~clr; ++ val |= set; ++ pcie_w32(val, reg); ++} ++ ++int wait_pciephy_busy(void) ++{ ++ unsigned long reg_value = 0x0, retry = 0; ++ ++ while (1) { ++ //reg_value = rareg(READMODE, PCIEPHY0_CFG, 0); ++ reg_value = pcie_r32(PCIEPHY0_CFG); ++ ++ if (reg_value & BUSY) ++ mdelay(100); ++ else ++ break; ++ if (retry++ > WAITRETRY_MAX){ ++ printk("PCIE-PHY retry failed.\n"); ++ return -1; ++ } ++ } ++ return 0; ++} ++ ++static void pcie_phy(unsigned long addr, unsigned long val) ++{ ++ wait_pciephy_busy(); ++ pcie_w32(WRITE_MODE | (val << DATA_SHIFT) | (addr << ADDR_SHIFT), PCIEPHY0_CFG); ++ mdelay(1); ++ wait_pciephy_busy(); ++} ++ ++static int pci_config_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 * val) ++{ ++ unsigned int slot = PCI_SLOT(devfn); ++ u8 func = PCI_FUNC(devfn); ++ u32 address; ++ u32 data; ++ ++ address = (((where & 0xF00) >> 8) << 24) | (bus->number << 16) | (slot << 11) | (func << 8) | (where & 0xfc) | 0x80000000; ++ bridge_w32(address, RALINK_PCI_CONFIG_ADDR); ++ data = bridge_r32(RALINK_PCI_CONFIG_DATA_VIRTUAL_REG); ++ ++ switch (size) { ++ case 1: ++ *val = (data >> ((where & 3) << 3)) & 0xff; ++ break; ++ case 2: ++ *val = (data >> ((where & 3) << 3)) & 0xffff; ++ break; ++ case 4: ++ *val = data; ++ break; ++ } ++ ++ return PCIBIOS_SUCCESSFUL; ++} ++ ++static int pci_config_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val) ++{ ++ unsigned int slot = PCI_SLOT(devfn); ++ u8 func = PCI_FUNC(devfn); ++ u32 address; ++ u32 data; ++ ++ address = (((where & 0xF00) >> 8) << 24) | (bus->number << 16) | (slot << 11) | (func << 8) | (where & 0xfc) | 0x80000000; ++ bridge_w32(address, RALINK_PCI_CONFIG_ADDR); ++ data = bridge_r32(RALINK_PCI_CONFIG_DATA_VIRTUAL_REG); ++ ++ switch (size) { ++ case 1: ++ data = (data & ~(0xff << ((where & 3) << 3))) | ++ (val << ((where & 3) << 3)); ++ break; ++ case 2: ++ data = (data & ~(0xffff << ((where & 3) << 3))) | ++ (val << ((where & 3) << 3)); ++ break; ++ case 4: ++ data = val; ++ break; ++ } ++ ++ bridge_w32(data, RALINK_PCI_CONFIG_DATA_VIRTUAL_REG); ++ ++ return PCIBIOS_SUCCESSFUL; ++} ++ ++struct pci_ops mt7620a_pci_ops= { ++ .read = pci_config_read, ++ .write = pci_config_write, ++}; ++ ++static struct resource mt7620a_res_pci_mem1 = { ++ .name = "pci memory", ++ .start = RALINK_PCI_MM_MAP_BASE, ++ .end = (u32) ((RALINK_PCI_MM_MAP_BASE + (unsigned char *)0x0fffffff)), ++ .flags = IORESOURCE_MEM, ++}; ++static struct resource mt7620a_res_pci_io1 = { ++ .name = "pci io", ++ .start = RALINK_PCI_IO_MAP_BASE, ++ .end = (u32) ((RALINK_PCI_IO_MAP_BASE + (unsigned char *)0x0ffff)), ++ .flags = IORESOURCE_IO, ++}; ++ ++struct pci_controller mt7620a_controller = { ++ .pci_ops = &mt7620a_pci_ops, ++ .mem_resource = &mt7620a_res_pci_mem1, ++ .io_resource = &mt7620a_res_pci_io1, ++ .mem_offset = 0x00000000UL, ++ .io_offset = 0x00000000UL, ++ .io_map_base = 0xa0000000, ++}; ++ ++static int mt7620a_pci_probe(struct platform_device *pdev) ++{ ++ struct resource *bridge_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ struct resource *pcie_res = platform_get_resource(pdev, IORESOURCE_MEM, 1); ++ ++ rstpcie0 = devm_reset_control_get(&pdev->dev, "pcie0"); ++ if (IS_ERR(rstpcie0)) ++ return PTR_ERR(rstpcie0); ++ ++ bridge_base = devm_request_and_ioremap(&pdev->dev, bridge_res); ++ if (!bridge_base) ++ return -ENOMEM; ++ ++ pcie_base = devm_request_and_ioremap(&pdev->dev, pcie_res); ++ if (!pcie_base) ++ return -ENOMEM; ++ ++ iomem_resource.start = 0; ++ iomem_resource.end= ~0; ++ ioport_resource.start= 0; ++ ioport_resource.end = ~0; ++ ++ /* PCIE: bypass PCIe DLL */ ++ pcie_phy(0x0, 0x80); ++ pcie_phy(0x1, 0x04); ++ /* PCIE: Elastic buffer control */ ++ pcie_phy(0x68, 0xB4); ++ ++ reset_control_assert(rstpcie0); ++ rt_sysc_m32(RALINK_PCIE0_CLK_EN, 0, RALINK_CLKCFG1); ++ rt_sysc_m32(1<<19, 1<<31, PPLL_DRV); ++ rt_sysc_m32(0x3 << 16, 0, RALINK_GPIOMODE); ++ ++ reset_control_deassert(rstpcie0); ++ rt_sysc_m32(0, RALINK_PCIE0_CLK_EN, RALINK_CLKCFG1); ++ ++ mdelay(100); ++ ++ if (!(rt_sysc_r32(PPLL_CFG1) & 1<<23)) { ++ printk("MT7620 PPLL unlock\n"); ++ reset_control_assert(rstpcie0); ++ rt_sysc_m32(BIT(26), 0, RALINK_CLKCFG1); ++ return 0; ++ } ++ rt_sysc_m32((0x1<<18) | (0x1<<17), (0x1 << 19) | (0x1 << 31), PPLL_DRV); ++ ++ mdelay(100); ++ reset_control_assert(rstpcie0); ++ rt_sysc_m32(0x30, 2 << 4, RALINK_SYSCFG1); ++ ++ rt_sysc_m32(~0x7fffffff, 0x80000000, RALINK_PCIE_CLK_GEN); ++ rt_sysc_m32(~0x80ffffff, 0xa << 24, RALINK_PCIE_CLK_GEN1); ++ ++ mdelay(50); ++ reset_control_deassert(rstpcie0); ++ pcie_m32(BIT(1), 0, RALINK_PCI_PCICFG_ADDR); ++ mdelay(100); ++ ++ if (( pcie_r32(RALINK_PCI0_STATUS) & 0x1) == 0) { ++ reset_control_assert(rstpcie0); ++ rt_sysc_m32(RALINK_PCIE0_CLK_EN, 0, RALINK_CLKCFG1); ++ rt_sysc_m32(LC_CKDRVPD_, PDRV_SW_SET, PPLL_DRV); ++ printk("PCIE0 no card, disable it(RST&CLK)\n"); ++ } ++ ++ bridge_w32(0xffffffff, RALINK_PCI_MEMBASE); ++ bridge_w32(RALINK_PCI_IO_MAP_BASE, RALINK_PCI_IOBASE); ++ ++ pcie_w32(0x7FFF0000, RALINK_PCI0_BAR0SETUP_ADDR); ++ pcie_w32(MEMORY_BASE, RALINK_PCI0_IMBASEBAR0_ADDR); ++ pcie_w32(0x08021814, RALINK_PCI0_ID); ++ pcie_w32(0x06040001, RALINK_PCI0_CLASS); ++ pcie_w32(0x28801814, RALINK_PCI0_SUBID); ++ pcie_m32(0, BIT(20), RALINK_PCI_PCIMSK_ADDR); ++ ++ register_pci_controller(&mt7620a_controller); ++ ++ return 0; ++} ++ ++int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) ++{ ++ const struct resource *res; ++ u16 cmd; ++ u32 val; ++ int i, irq = 0; ++ ++ if ((dev->bus->number == 0) && (slot == 0)) { ++ pcie_w32(0x7FFF0001, RALINK_PCI0_BAR0SETUP_ADDR); //open 7FFF:2G; ENABLE ++ pci_config_write(dev->bus, 0, PCI_BASE_ADDRESS_0, 4, MEMORY_BASE); ++ pci_config_read(dev->bus, 0, PCI_BASE_ADDRESS_0, 4, &val); ++ } else if ((dev->bus->number == 1) && (slot == 0x0)) { ++ irq = RALINK_INT_PCIE0; ++ } else { ++ printk("bus=0x%x, slot = 0x%x\n", dev->bus->number, slot); ++ return 0; ++ } ++ ++ for (i = 0; i < 6; i++) { ++ res = &dev->resource[i]; ++ } ++ ++ pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, 0x14); //configure cache line size 0x14 ++ pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0xFF); //configure latency timer 0x10 ++ pci_read_config_word(dev, PCI_COMMAND, &cmd); ++ ++ // FIXME ++ cmd = cmd | PCI_COMMAND_MASTER | PCI_COMMAND_IO | PCI_COMMAND_MEMORY; ++ pci_write_config_word(dev, PCI_COMMAND, cmd); ++ pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq); ++ //pci_write_config_byte(dev, PCI_INTERRUPT_PIN, dev->irq); ++ ++ return irq; ++} ++ ++int pcibios_plat_dev_init(struct pci_dev *dev) ++{ ++ return 0; ++} ++ ++static const struct of_device_id mt7620a_pci_ids[] = { ++ { .compatible = "ralink,mt7620a-pci" }, ++ {}, ++}; ++MODULE_DEVICE_TABLE(of, mt7620a_pci_ids); ++ ++static struct platform_driver mt7620a_pci_driver = { ++ .probe = mt7620a_pci_probe, ++ .driver = { ++ .name = "mt7620a-pci", ++ .owner = THIS_MODULE, ++ .of_match_table = of_match_ptr(mt7620a_pci_ids), ++ }, ++}; ++ ++static int __init mt7620a_pci_init(void) ++{ ++ return platform_driver_register(&mt7620a_pci_driver); ++} ++ ++arch_initcall(mt7620a_pci_init); +--- a/arch/mips/ralink/Kconfig ++++ b/arch/mips/ralink/Kconfig +@@ -33,6 +33,7 @@ choice + bool "MT7620" + select USB_ARCH_HAS_OHCI + select USB_ARCH_HAS_EHCI ++ select HW_HAS_PCI + + endchoice + diff --git a/target/linux/ramips/patches-3.10/0115-serial-of-allow-au1x00-and-rt288x-to-load-from-OF.patch b/target/linux/ramips/patches-3.10/0115-serial-of-allow-au1x00-and-rt288x-to-load-from-OF.patch deleted file mode 100644 index b12817e000..0000000000 --- a/target/linux/ramips/patches-3.10/0115-serial-of-allow-au1x00-and-rt288x-to-load-from-OF.patch +++ /dev/null @@ -1,27 +0,0 @@ -From 53b934f796611b9a27b698429f1aaec0fe678693 Mon Sep 17 00:00:00 2001 -From: John Crispin -Date: Sun, 14 Jul 2013 23:18:57 +0200 -Subject: [PATCH 21/33] serial: of: allow au1x00 and rt288x to load from OF - -In order to make serial_8250 loadable via OF on Au1x00 and Ralink WiSoC we need -to default the iotype to UPIO_AU. - -Signed-off-by: John Crispin ---- - drivers/tty/serial/of_serial.c | 5 ++++- - 1 file changed, 4 insertions(+), 1 deletion(-) - ---- a/drivers/tty/serial/of_serial.c -+++ b/drivers/tty/serial/of_serial.c -@@ -103,7 +103,10 @@ static int of_platform_serial_setup(stru - port->fifosize = prop; - - port->irq = irq_of_parse_and_map(np, 0); -- port->iotype = UPIO_MEM; -+ if (of_device_is_compatible(np, "ralink,rt2880-uart")) -+ port->iotype = UPIO_AU; -+ else -+ port->iotype = UPIO_MEM; - if (of_property_read_u32(np, "reg-io-width", &prop) == 0) { - switch (prop) { - case 1: diff --git a/target/linux/ramips/patches-3.10/0116-NET-multi-phy-support.patch b/target/linux/ramips/patches-3.10/0116-NET-multi-phy-support.patch new file mode 100644 index 0000000000..218d0828dd --- /dev/null +++ b/target/linux/ramips/patches-3.10/0116-NET-multi-phy-support.patch @@ -0,0 +1,54 @@ +From bed88d4cb806d2738528cb7d368d6df79d9c1424 Mon Sep 17 00:00:00 2001 +From: John Crispin +Date: Sat, 11 May 2013 23:40:19 +0200 +Subject: [PATCH 116/133] NET: multi phy support + +Signed-off-by: John Crispin +--- + drivers/net/phy/phy.c | 9 ++++++--- + include/linux/phy.h | 2 +- + 2 files changed, 7 insertions(+), 4 deletions(-) + +--- a/drivers/net/phy/phy.c ++++ b/drivers/net/phy/phy.c +@@ -820,7 +820,8 @@ void phy_state_machine(struct work_struc + * negotiation for now */ + if (!phydev->link) { + phydev->state = PHY_NOLINK; +- netif_carrier_off(phydev->attached_dev); ++ if (!phydev->no_auto_carrier_off) ++ netif_carrier_off(phydev->attached_dev); + phydev->adjust_link(phydev->attached_dev); + break; + } +@@ -890,7 +891,8 @@ void phy_state_machine(struct work_struc + netif_carrier_on(phydev->attached_dev); + } else { + phydev->state = PHY_NOLINK; +- netif_carrier_off(phydev->attached_dev); ++ if (!phydev->no_auto_carrier_off) ++ netif_carrier_off(phydev->attached_dev); + } + + phydev->adjust_link(phydev->attached_dev); +@@ -902,7 +904,8 @@ void phy_state_machine(struct work_struc + case PHY_HALTED: + if (phydev->link) { + phydev->link = 0; +- netif_carrier_off(phydev->attached_dev); ++ if (!phydev->no_auto_carrier_off) ++ netif_carrier_off(phydev->attached_dev); + phydev->adjust_link(phydev->attached_dev); + } + break; +--- a/include/linux/phy.h ++++ b/include/linux/phy.h +@@ -298,7 +298,7 @@ struct phy_device { + + struct phy_c45_device_ids c45_ids; + bool is_c45; +- ++ bool no_auto_carrier_off; + enum phy_state state; + + u32 dev_flags; diff --git a/target/linux/ramips/patches-3.10/0116-i2c-MIPS-adds-ralink-I2C-driver.patch b/target/linux/ramips/patches-3.10/0116-i2c-MIPS-adds-ralink-I2C-driver.patch deleted file mode 100644 index f8f86c00ff..0000000000 --- a/target/linux/ramips/patches-3.10/0116-i2c-MIPS-adds-ralink-I2C-driver.patch +++ /dev/null @@ -1,345 +0,0 @@ -From 4596818bca07e0928168970839e08875cf51b4cc Mon Sep 17 00:00:00 2001 -From: John Crispin -Date: Mon, 29 Apr 2013 14:40:43 +0200 -Subject: [PATCH 26/33] i2c: MIPS: adds ralink I2C driver - -Signed-off-by: John Crispin ---- - .../devicetree/bindings/i2c/i2c-ralink.txt | 27 ++ - drivers/i2c/busses/Kconfig | 4 + - drivers/i2c/busses/Makefile | 1 + - drivers/i2c/busses/i2c-ralink.c | 274 ++++++++++++++++++++ - 4 files changed, 306 insertions(+) - create mode 100644 Documentation/devicetree/bindings/i2c/i2c-ralink.txt - create mode 100644 drivers/i2c/busses/i2c-ralink.c - ---- /dev/null -+++ b/Documentation/devicetree/bindings/i2c/i2c-ralink.txt -@@ -0,0 +1,27 @@ -+I2C for Ralink platforms -+ -+Required properties : -+- compatible : Must be "link,rt3052-i2c" -+- reg: physical base address of the controller and length of memory mapped -+ region. -+- #address-cells = <1>; -+- #size-cells = <0>; -+ -+Optional properties: -+- Child nodes conforming to i2c bus binding -+ -+Example : -+ -+palmbus@10000000 { -+ i2c@900 { -+ compatible = "link,rt3052-i2c"; -+ reg = <0x900 0x100>; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ -+ hwmon@4b { -+ compatible = "national,lm92"; -+ reg = <0x4b>; -+ }; -+ }; -+}; ---- a/drivers/i2c/busses/Kconfig -+++ b/drivers/i2c/busses/Kconfig -@@ -630,6 +630,10 @@ config I2C_PXA_SLAVE - is necessary for systems where the PXA may be a target on the - I2C bus. - -+config I2C_RALINK -+ tristate "Ralink I2C Controller" -+ select OF_I2C -+ - config HAVE_S3C2410_I2C - bool - help ---- a/drivers/i2c/busses/Makefile -+++ b/drivers/i2c/busses/Makefile -@@ -62,6 +62,7 @@ obj-$(CONFIG_I2C_PNX) += i2c-pnx.o - obj-$(CONFIG_I2C_PUV3) += i2c-puv3.o - obj-$(CONFIG_I2C_PXA) += i2c-pxa.o - obj-$(CONFIG_I2C_PXA_PCI) += i2c-pxa-pci.o -+obj-$(CONFIG_I2C_RALINK) += i2c-ralink.o - obj-$(CONFIG_I2C_S3C2410) += i2c-s3c2410.o - obj-$(CONFIG_I2C_S6000) += i2c-s6000.o - obj-$(CONFIG_I2C_SH7760) += i2c-sh7760.o ---- /dev/null -+++ b/drivers/i2c/busses/i2c-ralink.c -@@ -0,0 +1,274 @@ -+/* -+ * drivers/i2c/busses/i2c-ralink.c -+ * -+ * Copyright (C) 2013 Steven Liu -+ * -+ * This software is licensed under the terms of the GNU General Public -+ * License version 2, as published by the Free Software Foundation, and -+ * may be copied, distributed, and modified under those terms. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+ -+#define REG_CONFIG_REG 0x00 -+#define REG_CLKDIV_REG 0x04 -+#define REG_DEVADDR_REG 0x08 -+#define REG_ADDR_REG 0x0C -+#define REG_DATAOUT_REG 0x10 -+#define REG_DATAIN_REG 0x14 -+#define REG_STATUS_REG 0x18 -+#define REG_STARTXFR_REG 0x1C -+#define REG_BYTECNT_REG 0x20 -+ -+#define I2C_STARTERR BIT(4) -+#define I2C_ACKERR BIT(3) -+#define I2C_DATARDY BIT(2) -+#define I2C_SDOEMPTY BIT(1) -+#define I2C_BUSY BIT(0) -+ -+#define I2C_DEVADLEN_7 (6 << 2) -+#define I2C_ADDRDIS BIT(1) -+ -+#define I2C_RETRY 0x400 -+ -+#define CLKDIV_VALUE 200 // clock rate is 40M, 40M / (200*2) = 100k (standard i2c bus rate). -+//#define CLKDIV_VALUE 50 // clock rate is 40M, 40M / (50*2) = 400k (fast i2c bus rate). -+ -+#define READ_CMD 0x01 -+#define WRITE_CMD 0x00 -+#define READ_BLOCK 64 -+ -+static void __iomem *membase; -+static struct i2c_adapter *adapter; -+ -+static void rt_i2c_w32(u32 val, unsigned reg) -+{ -+ iowrite32(val, membase + reg); -+} -+ -+static u32 rt_i2c_r32(unsigned reg) -+{ -+ return ioread32(membase + reg); -+} -+ -+static inline int rt_i2c_wait_rx_done(void) -+{ -+ int retries = I2C_RETRY; -+ -+ do { -+ if (!retries--) -+ break; -+ } while(!(rt_i2c_r32(REG_STATUS_REG) & I2C_DATARDY)); -+ -+ return (retries < 0); -+} -+ -+static inline int rt_i2c_wait_idle(void) -+{ -+ int retries = I2C_RETRY; -+ -+ do { -+ if (!retries--) -+ break; -+ } while(rt_i2c_r32(REG_STATUS_REG) & I2C_BUSY); -+ -+ return (retries < 0); -+} -+ -+static inline int rt_i2c_wait_tx_done(void) -+{ -+ int retries = I2C_RETRY; -+ -+ do { -+ if (!retries--) -+ break; -+ } while(!(rt_i2c_r32(REG_STATUS_REG) & I2C_SDOEMPTY)); -+ -+ return (retries < 0); -+} -+ -+static int rt_i2c_handle_msg(struct i2c_adapter *a, struct i2c_msg* msg) -+{ -+ int i = 0, j = 0, pos = 0; -+ int nblock = msg->len / READ_BLOCK; -+ int rem = msg->len % READ_BLOCK; -+ -+ if (msg->flags & I2C_M_TEN) { -+ printk("10 bits addr not supported\n"); -+ return -EINVAL; -+ } -+ -+ if (msg->flags & I2C_M_RD) { -+ for (i = 0; i < nblock; i++) { -+ rt_i2c_wait_idle(); -+ rt_i2c_w32(READ_BLOCK - 1, REG_BYTECNT_REG); -+ rt_i2c_w32(READ_CMD, REG_STARTXFR_REG); -+ for (j = 0; j < READ_BLOCK; j++) { -+ if (rt_i2c_wait_rx_done()) -+ return -1; -+ msg->buf[pos++] = rt_i2c_r32(REG_DATAIN_REG); -+ } -+ } -+ -+ rt_i2c_wait_idle(); -+ rt_i2c_w32(rem - 1, REG_BYTECNT_REG); -+ rt_i2c_w32(READ_CMD, REG_STARTXFR_REG); -+ for (i = 0; i < rem; i++) { -+ if (rt_i2c_wait_rx_done()) -+ return -1; -+ msg->buf[pos++] = rt_i2c_r32(REG_DATAIN_REG); -+ } -+ } else { -+ rt_i2c_wait_idle(); -+ rt_i2c_w32(msg->len - 1, REG_BYTECNT_REG); -+ for (i = 0; i < msg->len; i++) { -+ rt_i2c_w32(msg->buf[i], REG_DATAOUT_REG); -+ rt_i2c_w32(WRITE_CMD, REG_STARTXFR_REG); -+ if (rt_i2c_wait_tx_done()) -+ return -1; -+ } -+ } -+ -+ return 0; -+} -+ -+static int rt_i2c_master_xfer(struct i2c_adapter *a, struct i2c_msg *m, int n) -+{ -+ int i = 0; -+ int ret = 0; -+ -+ if (rt_i2c_wait_idle()) { -+ printk("i2c transfer failed\n"); -+ return 0; -+ } -+ -+ device_reset(a->dev.parent); -+ -+ rt_i2c_w32(m->addr, REG_DEVADDR_REG); -+ rt_i2c_w32(I2C_DEVADLEN_7 | I2C_ADDRDIS, REG_CONFIG_REG); -+ rt_i2c_w32(CLKDIV_VALUE, REG_CLKDIV_REG); -+ -+ for (i = 0; i < n && !ret; i++) -+ ret = rt_i2c_handle_msg(a, &m[i]); -+ -+ if (ret) { -+ printk("i2c transfer failed\n"); -+ return 0; -+ } -+ -+ return n; -+} -+ -+static u32 rt_i2c_func(struct i2c_adapter *a) -+{ -+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; -+} -+ -+static const struct i2c_algorithm rt_i2c_algo = { -+ .master_xfer = rt_i2c_master_xfer, -+ .functionality = rt_i2c_func, -+}; -+ -+static int rt_i2c_probe(struct platform_device *pdev) -+{ -+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); -+ int ret; -+ -+ if (!res) { -+ dev_err(&pdev->dev, "no memory resource found\n"); -+ return -ENODEV; -+ } -+ -+ adapter = devm_kzalloc(&pdev->dev, sizeof(struct i2c_adapter), GFP_KERNEL); -+ if (!adapter) { -+ dev_err(&pdev->dev, "failed to allocate i2c_adapter\n"); -+ return -ENOMEM; -+ } -+ -+ membase = devm_request_and_ioremap(&pdev->dev, res); -+ if (IS_ERR(membase)) -+ return PTR_ERR(membase); -+ -+ strlcpy(adapter->name, dev_name(&pdev->dev), sizeof(adapter->name)); -+ adapter->owner = THIS_MODULE; -+ adapter->nr = pdev->id; -+ adapter->timeout = HZ; -+ adapter->algo = &rt_i2c_algo; -+ adapter->class = I2C_CLASS_HWMON | I2C_CLASS_SPD; -+ adapter->dev.parent = &pdev->dev; -+ adapter->dev.of_node = pdev->dev.of_node; -+ -+ ret = i2c_add_numbered_adapter(adapter); -+ if (ret) -+ return ret; -+ -+ of_i2c_register_devices(adapter); -+ -+ platform_set_drvdata(pdev, adapter); -+ -+ dev_info(&pdev->dev, "loaded\n"); -+ -+ return 0; -+} -+ -+static int rt_i2c_remove(struct platform_device *pdev) -+{ -+ platform_set_drvdata(pdev, NULL); -+ -+ return 0; -+} -+ -+static const struct of_device_id i2c_rt_dt_ids[] = { -+ { .compatible = "ralink,rt2880-i2c", }, -+ { /* sentinel */ } -+}; -+ -+MODULE_DEVICE_TABLE(of, i2c_rt_dt_ids); -+ -+static struct platform_driver rt_i2c_driver = { -+ .probe = rt_i2c_probe, -+ .remove = rt_i2c_remove, -+ .driver = { -+ .owner = THIS_MODULE, -+ .name = "i2c-ralink", -+ .of_match_table = i2c_rt_dt_ids, -+ }, -+}; -+ -+static int __init i2c_rt_init (void) -+{ -+ return platform_driver_register(&rt_i2c_driver); -+} -+subsys_initcall(i2c_rt_init); -+ -+static void __exit i2c_rt_exit (void) -+{ -+ platform_driver_unregister(&rt_i2c_driver); -+} -+ -+module_exit (i2c_rt_exit); -+ -+MODULE_AUTHOR("Steven Liu "); -+MODULE_DESCRIPTION("Ralink I2c host driver"); -+MODULE_LICENSE("GPL"); -+MODULE_ALIAS("platform:Ralink-I2C"); diff --git a/target/linux/ramips/patches-3.10/0117-NET-add-of_get_mac_address_mtd.patch b/target/linux/ramips/patches-3.10/0117-NET-add-of_get_mac_address_mtd.patch new file mode 100644 index 0000000000..a48bd4b48d --- /dev/null +++ b/target/linux/ramips/patches-3.10/0117-NET-add-of_get_mac_address_mtd.patch @@ -0,0 +1,76 @@ +From 1282a0da09e059288eb8b576998ea001680f6628 Mon Sep 17 00:00:00 2001 +From: John Crispin +Date: Sun, 14 Jul 2013 23:26:15 +0200 +Subject: [PATCH 117/133] NET: add of_get_mac_address_mtd() + +Many embedded devices have information such as mac addresses stored inside mtd +devices. This patch allows us to add a property inside a node describing a +network interface. The new property points at a mtd partition with an offset +where the mac address can be found. + +Signed-off-by: John Crispin +--- + drivers/of/of_net.c | 37 +++++++++++++++++++++++++++++++++++++ + include/linux/of_net.h | 1 + + 2 files changed, 38 insertions(+) + +--- a/drivers/of/of_net.c ++++ b/drivers/of/of_net.c +@@ -10,6 +10,7 @@ + #include + #include + #include ++#include + + /** + * It maps 'enum phy_interface_t' found in include/linux/phy.h +@@ -92,3 +93,39 @@ const void *of_get_mac_address(struct de + return NULL; + } + EXPORT_SYMBOL(of_get_mac_address); ++ ++int of_get_mac_address_mtd(struct device_node *np, void *mac) ++{ ++ struct device_node *mtd_np = NULL; ++ size_t retlen; ++ int size, ret; ++ struct mtd_info *mtd; ++ const char *part; ++ const __be32 *list; ++ phandle phandle; ++ ++ list = of_get_property(np, "mtd-mac-address", &size); ++ if (!list || (size != (2 * sizeof(*list)))) ++ return -ENOENT; ++ ++ phandle = be32_to_cpup(list++); ++ if (phandle) ++ mtd_np = of_find_node_by_phandle(phandle); ++ ++ if (!mtd_np) ++ return -ENOENT; ++ ++ part = of_get_property(mtd_np, "label", NULL); ++ if (!part) ++ part = mtd_np->name; ++ ++ mtd = get_mtd_device_nm(part); ++ if (IS_ERR(mtd)) ++ return PTR_ERR(mtd); ++ ++ ret = mtd_read(mtd, be32_to_cpup(list), 6, &retlen, (u_char *) mac); ++ put_mtd_device(mtd); ++ ++ return ret; ++} ++EXPORT_SYMBOL_GPL(of_get_mac_address_mtd); +--- a/include/linux/of_net.h ++++ b/include/linux/of_net.h +@@ -11,6 +11,7 @@ + #include + extern const int of_get_phy_mode(struct device_node *np); + extern const void *of_get_mac_address(struct device_node *np); ++extern int of_get_mac_address_mtd(struct device_node *np, void *mac); + #else + static inline const int of_get_phy_mode(struct device_node *np) + { diff --git a/target/linux/ramips/patches-3.10/0117-mmc-MIPS-ralink-add-sdhci-for-mt7620a-SoC.patch b/target/linux/ramips/patches-3.10/0117-mmc-MIPS-ralink-add-sdhci-for-mt7620a-SoC.patch deleted file mode 100644 index 4950a701fb..0000000000 --- a/target/linux/ramips/patches-3.10/0117-mmc-MIPS-ralink-add-sdhci-for-mt7620a-SoC.patch +++ /dev/null @@ -1,3433 +0,0 @@ -From de1defdad7554d6ba885a6d3dc55105e01e9a07e Mon Sep 17 00:00:00 2001 -From: John Crispin -Date: Thu, 2 May 2013 14:59:01 +0200 -Subject: [PATCH 27/33] mmc: MIPS: ralink: add sdhci for mt7620a SoC - -Signed-off-by: John Crispin ---- - drivers/mmc/host/Kconfig | 11 + - drivers/mmc/host/Makefile | 1 + - drivers/mmc/host/mt6575_sd.h | 1068 ++++++++++++++++++ - drivers/mmc/host/sdhci-mt7620.c | 2314 +++++++++++++++++++++++++++++++++++++++ - 4 files changed, 3394 insertions(+) - create mode 100644 drivers/mmc/host/mt6575_sd.h - create mode 100644 drivers/mmc/host/sdhci-mt7620.c - ---- a/drivers/mmc/host/Kconfig -+++ b/drivers/mmc/host/Kconfig -@@ -260,6 +260,17 @@ config MMC_SDHCI_BCM2835 - - If unsure, say N. - -+config MMC_SDHCI_MT7620 -+ tristate "SDHCI platform support for the MT7620 SD/MMC Controller" -+ depends on SOC_MT7620 -+ depends on MMC_SDHCI_PLTFM -+ select MMC_SDHCI_IO_ACCESSORS -+ help -+ This selects the BCM2835 SD/MMC controller. If you have a BCM2835 -+ platform with SD or MMC devices, say Y or M here. -+ -+ If unsure, say N. -+ - config MMC_OMAP - tristate "TI OMAP Multimedia Card Interface support" - depends on ARCH_OMAP ---- a/drivers/mmc/host/Makefile -+++ b/drivers/mmc/host/Makefile -@@ -62,6 +62,7 @@ obj-$(CONFIG_MMC_SDHCI_TEGRA) += sdhci- - obj-$(CONFIG_MMC_SDHCI_OF_ESDHC) += sdhci-of-esdhc.o - obj-$(CONFIG_MMC_SDHCI_OF_HLWD) += sdhci-of-hlwd.o - obj-$(CONFIG_MMC_SDHCI_BCM2835) += sdhci-bcm2835.o -+obj-$(CONFIG_MMC_SDHCI_MT7620) += sdhci-mt7620.o - - ifeq ($(CONFIG_CB710_DEBUG),y) - CFLAGS-cb710-mmc += -DDEBUG ---- /dev/null -+++ b/drivers/mmc/host/mt6575_sd.h -@@ -0,0 +1,1068 @@ -+/* Copyright Statement: -+ * -+ * This software/firmware and related documentation ("MediaTek Software") are -+ * protected under relevant copyright laws. The information contained herein -+ * is confidential and proprietary to MediaTek Inc. and/or its licensors. -+ * Without the prior written permission of MediaTek inc. and/or its licensors, -+ * any reproduction, modification, use or disclosure of MediaTek Software, -+ * and information contained herein, in whole or in part, shall be strictly prohibited. -+ */ -+/* MediaTek Inc. (C) 2010. All rights reserved. -+ * -+ * BY OPENING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES -+ * THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE") -+ * RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON -+ * AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES, -+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF -+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT. -+ * NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE -+ * SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR -+ * SUPPLIED WITH THE MEDIATEK SOFTWARE, AND RECEIVER AGREES TO LOOK ONLY TO SUCH -+ * THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES -+ * THAT IT IS RECEIVER'S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD PARTY ALL PROPER LICENSES -+ * CONTAINED IN MEDIATEK SOFTWARE. MEDIATEK SHALL ALSO NOT BE RESPONSIBLE FOR ANY MEDIATEK -+ * SOFTWARE RELEASES MADE TO RECEIVER'S SPECIFICATION OR TO CONFORM TO A PARTICULAR -+ * STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND -+ * CUMULATIVE LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE, -+ * AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE, -+ * OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY RECEIVER TO -+ * MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE. -+ * -+ * The following software/firmware and/or related documentation ("MediaTek Software") -+ * have been modified by MediaTek Inc. All revisions are subject to any receiver's -+ * applicable license agreements with MediaTek Inc. -+ */ -+ -+#ifndef MT6575_SD_H -+#define MT6575_SD_H -+ -+#include -+#include -+ -+// #include /* --- by chhung */ -+ -+typedef void (*sdio_irq_handler_t)(void*); /* external irq handler */ -+typedef void (*pm_callback_t)(pm_message_t state, void *data); -+ -+#define MSDC_CD_PIN_EN (1 << 0) /* card detection pin is wired */ -+#define MSDC_WP_PIN_EN (1 << 1) /* write protection pin is wired */ -+#define MSDC_RST_PIN_EN (1 << 2) /* emmc reset pin is wired */ -+#define MSDC_SDIO_IRQ (1 << 3) /* use internal sdio irq (bus) */ -+#define MSDC_EXT_SDIO_IRQ (1 << 4) /* use external sdio irq */ -+#define MSDC_REMOVABLE (1 << 5) /* removable slot */ -+#define MSDC_SYS_SUSPEND (1 << 6) /* suspended by system */ -+#define MSDC_HIGHSPEED (1 << 7) /* high-speed mode support */ -+#define MSDC_UHS1 (1 << 8) /* uhs-1 mode support */ -+#define MSDC_DDR (1 << 9) /* ddr mode support */ -+#define MSDC_SPE (1 << 10) /* special support */ -+#define MSDC_INTERNAL_CLK (1 << 11) /* Force Internal clock */ -+#define MSDC_TABDRV (1 << 12) /* TABLET */ -+ -+ -+#define MSDC_SMPL_RISING (0) -+#define MSDC_SMPL_FALLING (1) -+ -+#define MSDC_CMD_PIN (0) -+#define MSDC_DAT_PIN (1) -+#define MSDC_CD_PIN (2) -+#define MSDC_WP_PIN (3) -+#define MSDC_RST_PIN (4) -+ -+enum { -+ MSDC_CLKSRC_26MHZ = 0, -+ MSDC_CLKSRC_197MHZ = 1, -+ MSDC_CLKSRC_208MHZ = 2 -+}; -+ -+struct msdc_hw { -+ unsigned char clk_src; /* host clock source */ -+ unsigned char cmd_edge; /* command latch edge */ -+ unsigned char data_edge; /* data latch edge */ -+ unsigned char clk_drv; /* clock pad driving */ -+ unsigned char cmd_drv; /* command pad driving */ -+ unsigned char dat_drv; /* data pad driving */ -+ unsigned long flags; /* hardware capability flags */ -+ unsigned long data_pins; /* data pins */ -+ unsigned long data_offset; /* data address offset */ -+ -+ /* config gpio pull mode */ -+ void (*config_gpio_pin)(int type, int pull); -+ -+ /* external power control for card */ -+ void (*ext_power_on)(void); -+ void (*ext_power_off)(void); -+ -+ /* external sdio irq operations */ -+ void (*request_sdio_eirq)(sdio_irq_handler_t sdio_irq_handler, void *data); -+ void (*enable_sdio_eirq)(void); -+ void (*disable_sdio_eirq)(void); -+ -+ /* external cd irq operations */ -+ void (*request_cd_eirq)(sdio_irq_handler_t cd_irq_handler, void *data); -+ void (*enable_cd_eirq)(void); -+ void (*disable_cd_eirq)(void); -+ int (*get_cd_status)(void); -+ -+ /* power management callback for external module */ -+ void (*register_pm)(pm_callback_t pm_cb, void *data); -+}; -+ -+extern struct msdc_hw msdc0_hw; -+extern struct msdc_hw msdc1_hw; -+extern struct msdc_hw msdc2_hw; -+extern struct msdc_hw msdc3_hw; -+ -+ -+/*--------------------------------------------------------------------------*/ -+/* Common Macro */ -+/*--------------------------------------------------------------------------*/ -+#define REG_ADDR(x) ((volatile u32*)(base + OFFSET_##x)) -+ -+/*--------------------------------------------------------------------------*/ -+/* Common Definition */ -+/*--------------------------------------------------------------------------*/ -+#define MSDC_FIFO_SZ (128) -+#define MSDC_FIFO_THD (64) // (128) -+#define MSDC_NUM (4) -+ -+#define MSDC_MS (0) -+#define MSDC_SDMMC (1) -+ -+#define MSDC_MODE_UNKNOWN (0) -+#define MSDC_MODE_PIO (1) -+#define MSDC_MODE_DMA_BASIC (2) -+#define MSDC_MODE_DMA_DESC (3) -+#define MSDC_MODE_DMA_ENHANCED (4) -+#define MSDC_MODE_MMC_STREAM (5) -+ -+#define MSDC_BUS_1BITS (0) -+#define MSDC_BUS_4BITS (1) -+#define MSDC_BUS_8BITS (2) -+ -+#define MSDC_BRUST_8B (3) -+#define MSDC_BRUST_16B (4) -+#define MSDC_BRUST_32B (5) -+#define MSDC_BRUST_64B (6) -+ -+#define MSDC_PIN_PULL_NONE (0) -+#define MSDC_PIN_PULL_DOWN (1) -+#define MSDC_PIN_PULL_UP (2) -+#define MSDC_PIN_KEEP (3) -+ -+#define MSDC_MAX_SCLK (48000000) /* +/- by chhung */ -+#define MSDC_MIN_SCLK (260000) -+ -+#define MSDC_AUTOCMD12 (0x0001) -+#define MSDC_AUTOCMD23 (0x0002) -+#define MSDC_AUTOCMD19 (0x0003) -+ -+#define MSDC_EMMC_BOOTMODE0 (0) /* Pull low CMD mode */ -+#define MSDC_EMMC_BOOTMODE1 (1) /* Reset CMD mode */ -+ -+enum { -+ RESP_NONE = 0, -+ RESP_R1, -+ RESP_R2, -+ RESP_R3, -+ RESP_R4, -+ RESP_R5, -+ RESP_R6, -+ RESP_R7, -+ RESP_R1B -+}; -+ -+/*--------------------------------------------------------------------------*/ -+/* Register Offset */ -+/*--------------------------------------------------------------------------*/ -+#define OFFSET_MSDC_CFG (0x0) -+#define OFFSET_MSDC_IOCON (0x04) -+#define OFFSET_MSDC_PS (0x08) -+#define OFFSET_MSDC_INT (0x0c) -+#define OFFSET_MSDC_INTEN (0x10) -+#define OFFSET_MSDC_FIFOCS (0x14) -+#define OFFSET_MSDC_TXDATA (0x18) -+#define OFFSET_MSDC_RXDATA (0x1c) -+#define OFFSET_SDC_CFG (0x30) -+#define OFFSET_SDC_CMD (0x34) -+#define OFFSET_SDC_ARG (0x38) -+#define OFFSET_SDC_STS (0x3c) -+#define OFFSET_SDC_RESP0 (0x40) -+#define OFFSET_SDC_RESP1 (0x44) -+#define OFFSET_SDC_RESP2 (0x48) -+#define OFFSET_SDC_RESP3 (0x4c) -+#define OFFSET_SDC_BLK_NUM (0x50) -+#define OFFSET_SDC_CSTS (0x58) -+#define OFFSET_SDC_CSTS_EN (0x5c) -+#define OFFSET_SDC_DCRC_STS (0x60) -+#define OFFSET_EMMC_CFG0 (0x70) -+#define OFFSET_EMMC_CFG1 (0x74) -+#define OFFSET_EMMC_STS (0x78) -+#define OFFSET_EMMC_IOCON (0x7c) -+#define OFFSET_SDC_ACMD_RESP (0x80) -+#define OFFSET_SDC_ACMD19_TRG (0x84) -+#define OFFSET_SDC_ACMD19_STS (0x88) -+#define OFFSET_MSDC_DMA_SA (0x90) -+#define OFFSET_MSDC_DMA_CA (0x94) -+#define OFFSET_MSDC_DMA_CTRL (0x98) -+#define OFFSET_MSDC_DMA_CFG (0x9c) -+#define OFFSET_MSDC_DBG_SEL (0xa0) -+#define OFFSET_MSDC_DBG_OUT (0xa4) -+#define OFFSET_MSDC_PATCH_BIT (0xb0) -+#define OFFSET_MSDC_PATCH_BIT1 (0xb4) -+#define OFFSET_MSDC_PAD_CTL0 (0xe0) -+#define OFFSET_MSDC_PAD_CTL1 (0xe4) -+#define OFFSET_MSDC_PAD_CTL2 (0xe8) -+#define OFFSET_MSDC_PAD_TUNE (0xec) -+#define OFFSET_MSDC_DAT_RDDLY0 (0xf0) -+#define OFFSET_MSDC_DAT_RDDLY1 (0xf4) -+#define OFFSET_MSDC_HW_DBG (0xf8) -+#define OFFSET_MSDC_VERSION (0x100) -+#define OFFSET_MSDC_ECO_VER (0x104) -+ -+/*--------------------------------------------------------------------------*/ -+/* Register Address */ -+/*--------------------------------------------------------------------------*/ -+ -+/* common register */ -+#define MSDC_CFG REG_ADDR(MSDC_CFG) -+#define MSDC_IOCON REG_ADDR(MSDC_IOCON) -+#define MSDC_PS REG_ADDR(MSDC_PS) -+#define MSDC_INT REG_ADDR(MSDC_INT) -+#define MSDC_INTEN REG_ADDR(MSDC_INTEN) -+#define MSDC_FIFOCS REG_ADDR(MSDC_FIFOCS) -+#define MSDC_TXDATA REG_ADDR(MSDC_TXDATA) -+#define MSDC_RXDATA REG_ADDR(MSDC_RXDATA) -+#define MSDC_PATCH_BIT0 REG_ADDR(MSDC_PATCH_BIT) -+ -+/* sdmmc register */ -+#define SDC_CFG REG_ADDR(SDC_CFG) -+#define SDC_CMD REG_ADDR(SDC_CMD) -+#define SDC_ARG REG_ADDR(SDC_ARG) -+#define SDC_STS REG_ADDR(SDC_STS) -+#define SDC_RESP0 REG_ADDR(SDC_RESP0) -+#define SDC_RESP1 REG_ADDR(SDC_RESP1) -+#define SDC_RESP2 REG_ADDR(SDC_RESP2) -+#define SDC_RESP3 REG_ADDR(SDC_RESP3) -+#define SDC_BLK_NUM REG_ADDR(SDC_BLK_NUM) -+#define SDC_CSTS REG_ADDR(SDC_CSTS) -+#define SDC_CSTS_EN REG_ADDR(SDC_CSTS_EN) -+#define SDC_DCRC_STS REG_ADDR(SDC_DCRC_STS) -+ -+/* emmc register*/ -+#define EMMC_CFG0 REG_ADDR(EMMC_CFG0) -+#define EMMC_CFG1 REG_ADDR(EMMC_CFG1) -+#define EMMC_STS REG_ADDR(EMMC_STS) -+#define EMMC_IOCON REG_ADDR(EMMC_IOCON) -+ -+/* auto command register */ -+#define SDC_ACMD_RESP REG_ADDR(SDC_ACMD_RESP) -+#define SDC_ACMD19_TRG REG_ADDR(SDC_ACMD19_TRG) -+#define SDC_ACMD19_STS REG_ADDR(SDC_ACMD19_STS) -+ -+/* dma register */ -+#define MSDC_DMA_SA REG_ADDR(MSDC_DMA_SA) -+#define MSDC_DMA_CA REG_ADDR(MSDC_DMA_CA) -+#define MSDC_DMA_CTRL REG_ADDR(MSDC_DMA_CTRL) -+#define MSDC_DMA_CFG REG_ADDR(MSDC_DMA_CFG) -+ -+/* pad ctrl register */ -+#define MSDC_PAD_CTL0 REG_ADDR(MSDC_PAD_CTL0) -+#define MSDC_PAD_CTL1 REG_ADDR(MSDC_PAD_CTL1) -+#define MSDC_PAD_CTL2 REG_ADDR(MSDC_PAD_CTL2) -+ -+/* data read delay */ -+#define MSDC_DAT_RDDLY0 REG_ADDR(MSDC_DAT_RDDLY0) -+#define MSDC_DAT_RDDLY1 REG_ADDR(MSDC_DAT_RDDLY1) -+ -+/* debug register */ -+#define MSDC_DBG_SEL REG_ADDR(MSDC_DBG_SEL) -+#define MSDC_DBG_OUT REG_ADDR(MSDC_DBG_OUT) -+ -+/* misc register */ -+#define MSDC_PATCH_BIT REG_ADDR(MSDC_PATCH_BIT) -+#define MSDC_PATCH_BIT1 REG_ADDR(MSDC_PATCH_BIT1) -+#define MSDC_PAD_TUNE REG_ADDR(MSDC_PAD_TUNE) -+#define MSDC_HW_DBG REG_ADDR(MSDC_HW_DBG) -+#define MSDC_VERSION REG_ADDR(MSDC_VERSION) -+#define MSDC_ECO_VER REG_ADDR(MSDC_ECO_VER) /* ECO Version */ -+ -+/*--------------------------------------------------------------------------*/ -+/* Register Mask */ -+/*--------------------------------------------------------------------------*/ -+ -+/* MSDC_CFG mask */ -+#define MSDC_CFG_MODE (0x1 << 0) /* RW */ -+#define MSDC_CFG_CKPDN (0x1 << 1) /* RW */ -+#define MSDC_CFG_RST (0x1 << 2) /* RW */ -+#define MSDC_CFG_PIO (0x1 << 3) /* RW */ -+#define MSDC_CFG_CKDRVEN (0x1 << 4) /* RW */ -+#define MSDC_CFG_BV18SDT (0x1 << 5) /* RW */ -+#define MSDC_CFG_BV18PSS (0x1 << 6) /* R */ -+#define MSDC_CFG_CKSTB (0x1 << 7) /* R */ -+#define MSDC_CFG_CKDIV (0xff << 8) /* RW */ -+#define MSDC_CFG_CKMOD (0x3 << 16) /* RW */ -+ -+/* MSDC_IOCON mask */ -+#define MSDC_IOCON_SDR104CKS (0x1 << 0) /* RW */ -+#define MSDC_IOCON_RSPL (0x1 << 1) /* RW */ -+#define MSDC_IOCON_DSPL (0x1 << 2) /* RW */ -+#define MSDC_IOCON_DDLSEL (0x1 << 3) /* RW */ -+#define MSDC_IOCON_DDR50CKD (0x1 << 4) /* RW */ -+#define MSDC_IOCON_DSPLSEL (0x1 << 5) /* RW */ -+#define MSDC_IOCON_D0SPL (0x1 << 16) /* RW */ -+#define MSDC_IOCON_D1SPL (0x1 << 17) /* RW */ -+#define MSDC_IOCON_D2SPL (0x1 << 18) /* RW */ -+#define MSDC_IOCON_D3SPL (0x1 << 19) /* RW */ -+#define MSDC_IOCON_D4SPL (0x1 << 20) /* RW */ -+#define MSDC_IOCON_D5SPL (0x1 << 21) /* RW */ -+#define MSDC_IOCON_D6SPL (0x1 << 22) /* RW */ -+#define MSDC_IOCON_D7SPL (0x1 << 23) /* RW */ -+#define MSDC_IOCON_RISCSZ (0x3 << 24) /* RW */ -+ -+/* MSDC_PS mask */ -+#define MSDC_PS_CDEN (0x1 << 0) /* RW */ -+#define MSDC_PS_CDSTS (0x1 << 1) /* R */ -+#define MSDC_PS_CDDEBOUNCE (0xf << 12) /* RW */ -+#define MSDC_PS_DAT (0xff << 16) /* R */ -+#define MSDC_PS_CMD (0x1 << 24) /* R */ -+#define MSDC_PS_WP (0x1UL<< 31) /* R */ -+ -+/* MSDC_INT mask */ -+#define MSDC_INT_MMCIRQ (0x1 << 0) /* W1C */ -+#define MSDC_INT_CDSC (0x1 << 1) /* W1C */ -+#define MSDC_INT_ACMDRDY (0x1 << 3) /* W1C */ -+#define MSDC_INT_ACMDTMO (0x1 << 4) /* W1C */ -+#define MSDC_INT_ACMDCRCERR (0x1 << 5) /* W1C */ -+#define MSDC_INT_DMAQ_EMPTY (0x1 << 6) /* W1C */ -+#define MSDC_INT_SDIOIRQ (0x1 << 7) /* W1C */ -+#define MSDC_INT_CMDRDY (0x1 << 8) /* W1C */ -+#define MSDC_INT_CMDTMO (0x1 << 9) /* W1C */ -+#define MSDC_INT_RSPCRCERR (0x1 << 10) /* W1C */ -+#define MSDC_INT_CSTA (0x1 << 11) /* R */ -+#define MSDC_INT_XFER_COMPL (0x1 << 12) /* W1C */ -+#define MSDC_INT_DXFER_DONE (0x1 << 13) /* W1C */ -+#define MSDC_INT_DATTMO (0x1 << 14) /* W1C */ -+#define MSDC_INT_DATCRCERR (0x1 << 15) /* W1C */ -+#define MSDC_INT_ACMD19_DONE (0x1 << 16) /* W1C */ -+ -+/* MSDC_INTEN mask */ -+#define MSDC_INTEN_MMCIRQ (0x1 << 0) /* RW */ -+#define MSDC_INTEN_CDSC (0x1 << 1) /* RW */ -+#define MSDC_INTEN_ACMDRDY (0x1 << 3) /* RW */ -+#define MSDC_INTEN_ACMDTMO (0x1 << 4) /* RW */ -+#define MSDC_INTEN_ACMDCRCERR (0x1 << 5) /* RW */ -+#define MSDC_INTEN_DMAQ_EMPTY (0x1 << 6) /* RW */ -+#define MSDC_INTEN_SDIOIRQ (0x1 << 7) /* RW */ -+#define MSDC_INTEN_CMDRDY (0x1 << 8) /* RW */ -+#define MSDC_INTEN_CMDTMO (0x1 << 9) /* RW */ -+#define MSDC_INTEN_RSPCRCERR (0x1 << 10) /* RW */ -+#define MSDC_INTEN_CSTA (0x1 << 11) /* RW */ -+#define MSDC_INTEN_XFER_COMPL (0x1 << 12) /* RW */ -+#define MSDC_INTEN_DXFER_DONE (0x1 << 13) /* RW */ -+#define MSDC_INTEN_DATTMO (0x1 << 14) /* RW */ -+#define MSDC_INTEN_DATCRCERR (0x1 << 15) /* RW */ -+#define MSDC_INTEN_ACMD19_DONE (0x1 << 16) /* RW */ -+ -+/* MSDC_FIFOCS mask */ -+#define MSDC_FIFOCS_RXCNT (0xff << 0) /* R */ -+#define MSDC_FIFOCS_TXCNT (0xff << 16) /* R */ -+#define MSDC_FIFOCS_CLR (0x1UL<< 31) /* RW */ -+ -+/* SDC_CFG mask */ -+#define SDC_CFG_SDIOINTWKUP (0x1 << 0) /* RW */ -+#define SDC_CFG_INSWKUP (0x1 << 1) /* RW */ -+#define SDC_CFG_BUSWIDTH (0x3 << 16) /* RW */ -+#define SDC_CFG_SDIO (0x1 << 19) /* RW */ -+#define SDC_CFG_SDIOIDE (0x1 << 20) /* RW */ -+#define SDC_CFG_INTATGAP (0x1 << 21) /* RW */ -+#define SDC_CFG_DTOC (0xffUL << 24) /* RW */ -+ -+/* SDC_CMD mask */ -+#define SDC_CMD_OPC (0x3f << 0) /* RW */ -+#define SDC_CMD_BRK (0x1 << 6) /* RW */ -+#define SDC_CMD_RSPTYP (0x7 << 7) /* RW */ -+#define SDC_CMD_DTYP (0x3 << 11) /* RW */ -+#define SDC_CMD_DTYP (0x3 << 11) /* RW */ -+#define SDC_CMD_RW (0x1 << 13) /* RW */ -+#define SDC_CMD_STOP (0x1 << 14) /* RW */ -+#define SDC_CMD_GOIRQ (0x1 << 15) /* RW */ -+#define SDC_CMD_BLKLEN (0xfff<< 16) /* RW */ -+#define SDC_CMD_AUTOCMD (0x3 << 28) /* RW */ -+#define SDC_CMD_VOLSWTH (0x1 << 30) /* RW */ -+ -+/* SDC_STS mask */ -+#define SDC_STS_SDCBUSY (0x1 << 0) /* RW */ -+#define SDC_STS_CMDBUSY (0x1 << 1) /* RW */ -+#define SDC_STS_SWR_COMPL (0x1 << 31) /* RW */ -+ -+/* SDC_DCRC_STS mask */ -+#define SDC_DCRC_STS_NEG (0xf << 8) /* RO */ -+#define SDC_DCRC_STS_POS (0xff << 0) /* RO */ -+ -+/* EMMC_CFG0 mask */ -+#define EMMC_CFG0_BOOTSTART (0x1 << 0) /* W */ -+#define EMMC_CFG0_BOOTSTOP (0x1 << 1) /* W */ -+#define EMMC_CFG0_BOOTMODE (0x1 << 2) /* RW */ -+#define EMMC_CFG0_BOOTACKDIS (0x1 << 3) /* RW */ -+#define EMMC_CFG0_BOOTWDLY (0x7 << 12) /* RW */ -+#define EMMC_CFG0_BOOTSUPP (0x1 << 15) /* RW */ -+ -+/* EMMC_CFG1 mask */ -+#define EMMC_CFG1_BOOTDATTMC (0xfffff << 0) /* RW */ -+#define EMMC_CFG1_BOOTACKTMC (0xfffUL << 20) /* RW */ -+ -+/* EMMC_STS mask */ -+#define EMMC_STS_BOOTCRCERR (0x1 << 0) /* W1C */ -+#define EMMC_STS_BOOTACKERR (0x1 << 1) /* W1C */ -+#define EMMC_STS_BOOTDATTMO (0x1 << 2) /* W1C */ -+#define EMMC_STS_BOOTACKTMO (0x1 << 3) /* W1C */ -+#define EMMC_STS_BOOTUPSTATE (0x1 << 4) /* R */ -+#define EMMC_STS_BOOTACKRCV (0x1 << 5) /* W1C */ -+#define EMMC_STS_BOOTDATRCV (0x1 << 6) /* R */ -+ -+/* EMMC_IOCON mask */ -+#define EMMC_IOCON_BOOTRST (0x1 << 0) /* RW */ -+ -+/* SDC_ACMD19_TRG mask */ -+#define SDC_ACMD19_TRG_TUNESEL (0xf << 0) /* RW */ -+ -+/* MSDC_DMA_CTRL mask */ -+#define MSDC_DMA_CTRL_START (0x1 << 0) /* W */ -+#define MSDC_DMA_CTRL_STOP (0x1 << 1) /* W */ -+#define MSDC_DMA_CTRL_RESUME (0x1 << 2) /* W */ -+#define MSDC_DMA_CTRL_MODE (0x1 << 8) /* RW */ -+#define MSDC_DMA_CTRL_LASTBUF (0x1 << 10) /* RW */ -+#define MSDC_DMA_CTRL_BRUSTSZ (0x7 << 12) /* RW */ -+#define MSDC_DMA_CTRL_XFERSZ (0xffffUL << 16)/* RW */ -+ -+/* MSDC_DMA_CFG mask */ -+#define MSDC_DMA_CFG_STS (0x1 << 0) /* R */ -+#define MSDC_DMA_CFG_DECSEN (0x1 << 1) /* RW */ -+#define MSDC_DMA_CFG_BDCSERR (0x1 << 4) /* R */ -+#define MSDC_DMA_CFG_GPDCSERR (0x1 << 5) /* R */ -+ -+/* MSDC_PATCH_BIT mask */ -+#define MSDC_PATCH_BIT_WFLSMODE (0x1 << 0) /* RW */ -+#define MSDC_PATCH_BIT_ODDSUPP (0x1 << 1) /* RW */ -+#define MSDC_PATCH_BIT_CKGEN_CK (0x1 << 6) /* E2: Fixed to 1 */ -+#define MSDC_PATCH_BIT_IODSSEL (0x1 << 16) /* RW */ -+#define MSDC_PATCH_BIT_IOINTSEL (0x1 << 17) /* RW */ -+#define MSDC_PATCH_BIT_BUSYDLY (0xf << 18) /* RW */ -+#define MSDC_PATCH_BIT_WDOD (0xf << 22) /* RW */ -+#define MSDC_PATCH_BIT_IDRTSEL (0x1 << 26) /* RW */ -+#define MSDC_PATCH_BIT_CMDFSEL (0x1 << 27) /* RW */ -+#define MSDC_PATCH_BIT_INTDLSEL (0x1 << 28) /* RW */ -+#define MSDC_PATCH_BIT_SPCPUSH (0x1 << 29) /* RW */ -+#define MSDC_PATCH_BIT_DECRCTMO (0x1 << 30) /* RW */ -+ -+/* MSDC_PATCH_BIT1 mask */ -+#define MSDC_PATCH_BIT1_WRDAT_CRCS (0x7 << 3) -+#define MSDC_PATCH_BIT1_CMD_RSP (0x7 << 0) -+ -+/* MSDC_PAD_CTL0 mask */ -+#define MSDC_PAD_CTL0_CLKDRVN (0x7 << 0) /* RW */ -+#define MSDC_PAD_CTL0_CLKDRVP (0x7 << 4) /* RW */ -+#define MSDC_PAD_CTL0_CLKSR (0x1 << 8) /* RW */ -+#define MSDC_PAD_CTL0_CLKPD (0x1 << 16) /* RW */ -+#define MSDC_PAD_CTL0_CLKPU (0x1 << 17) /* RW */ -+#define MSDC_PAD_CTL0_CLKSMT (0x1 << 18) /* RW */ -+#define MSDC_PAD_CTL0_CLKIES (0x1 << 19) /* RW */ -+#define MSDC_PAD_CTL0_CLKTDSEL (0xf << 20) /* RW */ -+#define MSDC_PAD_CTL0_CLKRDSEL (0xffUL<< 24) /* RW */ -+ -+/* MSDC_PAD_CTL1 mask */ -+#define MSDC_PAD_CTL1_CMDDRVN (0x7 << 0) /* RW */ -+#define MSDC_PAD_CTL1_CMDDRVP (0x7 << 4) /* RW */ -+#define MSDC_PAD_CTL1_CMDSR (0x1 << 8) /* RW */ -+#define MSDC_PAD_CTL1_CMDPD (0x1 << 16) /* RW */ -+#define MSDC_PAD_CTL1_CMDPU (0x1 << 17) /* RW */ -+#define MSDC_PAD_CTL1_CMDSMT (0x1 << 18) /* RW */ -+#define MSDC_PAD_CTL1_CMDIES (0x1 << 19) /* RW */ -+#define MSDC_PAD_CTL1_CMDTDSEL (0xf << 20) /* RW */ -+#define MSDC_PAD_CTL1_CMDRDSEL (0xffUL<< 24) /* RW */ -+ -+/* MSDC_PAD_CTL2 mask */ -+#define MSDC_PAD_CTL2_DATDRVN (0x7 << 0) /* RW */ -+#define MSDC_PAD_CTL2_DATDRVP (0x7 << 4) /* RW */ -+#define MSDC_PAD_CTL2_DATSR (0x1 << 8) /* RW */ -+#define MSDC_PAD_CTL2_DATPD (0x1 << 16) /* RW */ -+#define MSDC_PAD_CTL2_DATPU (0x1 << 17) /* RW */ -+#define MSDC_PAD_CTL2_DATIES (0x1 << 19) /* RW */ -+#define MSDC_PAD_CTL2_DATSMT (0x1 << 18) /* RW */ -+#define MSDC_PAD_CTL2_DATTDSEL (0xf << 20) /* RW */ -+#define MSDC_PAD_CTL2_DATRDSEL (0xffUL<< 24) /* RW */ -+ -+/* MSDC_PAD_TUNE mask */ -+#define MSDC_PAD_TUNE_DATWRDLY (0x1F << 0) /* RW */ -+#define MSDC_PAD_TUNE_DATRRDLY (0x1F << 8) /* RW */ -+#define MSDC_PAD_TUNE_CMDRDLY (0x1F << 16) /* RW */ -+#define MSDC_PAD_TUNE_CMDRRDLY (0x1FUL << 22) /* RW */ -+#define MSDC_PAD_TUNE_CLKTXDLY (0x1FUL << 27) /* RW */ -+ -+/* MSDC_DAT_RDDLY0/1 mask */ -+#define MSDC_DAT_RDDLY0_D0 (0x1F << 0) /* RW */ -+#define MSDC_DAT_RDDLY0_D1 (0x1F << 8) /* RW */ -+#define MSDC_DAT_RDDLY0_D2 (0x1F << 16) /* RW */ -+#define MSDC_DAT_RDDLY0_D3 (0x1F << 24) /* RW */ -+ -+#define MSDC_DAT_RDDLY1_D4 (0x1F << 0) /* RW */ -+#define MSDC_DAT_RDDLY1_D5 (0x1F << 8) /* RW */ -+#define MSDC_DAT_RDDLY1_D6 (0x1F << 16) /* RW */ -+#define MSDC_DAT_RDDLY1_D7 (0x1F << 24) /* RW */ -+ -+#define MSDC_CKGEN_MSDC_DLY_SEL (0x1F<<10) -+#define MSDC_INT_DAT_LATCH_CK_SEL (0x7<<7) -+#define MSDC_CKGEN_MSDC_CK_SEL (0x1<<6) -+#define CARD_READY_FOR_DATA (1<<8) -+#define CARD_CURRENT_STATE(x) ((x&0x00001E00)>>9) -+ -+/*--------------------------------------------------------------------------*/ -+/* Descriptor Structure */ -+/*--------------------------------------------------------------------------*/ -+typedef struct { -+ u32 hwo:1; /* could be changed by hw */ -+ u32 bdp:1; -+ u32 rsv0:6; -+ u32 chksum:8; -+ u32 intr:1; -+ u32 rsv1:15; -+ void *next; -+ void *ptr; -+ u32 buflen:16; -+ u32 extlen:8; -+ u32 rsv2:8; -+ u32 arg; -+ u32 blknum; -+ u32 cmd; -+} gpd_t; -+ -+typedef struct { -+ u32 eol:1; -+ u32 rsv0:7; -+ u32 chksum:8; -+ u32 rsv1:1; -+ u32 blkpad:1; -+ u32 dwpad:1; -+ u32 rsv2:13; -+ void *next; -+ void *ptr; -+ u32 buflen:16; -+ u32 rsv3:16; -+} bd_t; -+ -+/*--------------------------------------------------------------------------*/ -+/* Register Debugging Structure */ -+/*--------------------------------------------------------------------------*/ -+ -+typedef struct { -+ u32 msdc:1; -+ u32 ckpwn:1; -+ u32 rst:1; -+ u32 pio:1; -+ u32 ckdrven:1; -+ u32 start18v:1; -+ u32 pass18v:1; -+ u32 ckstb:1; -+ u32 ckdiv:8; -+ u32 ckmod:2; -+ u32 pad:14; -+} msdc_cfg_reg; -+typedef struct { -+ u32 sdr104cksel:1; -+ u32 rsmpl:1; -+ u32 dsmpl:1; -+ u32 ddlysel:1; -+ u32 ddr50ckd:1; -+ u32 dsplsel:1; -+ u32 pad1:10; -+ u32 d0spl:1; -+ u32 d1spl:1; -+ u32 d2spl:1; -+ u32 d3spl:1; -+ u32 d4spl:1; -+ u32 d5spl:1; -+ u32 d6spl:1; -+ u32 d7spl:1; -+ u32 riscsz:1; -+ u32 pad2:7; -+} msdc_iocon_reg; -+typedef struct { -+ u32 cden:1; -+ u32 cdsts:1; -+ u32 pad1:10; -+ u32 cddebounce:4; -+ u32 dat:8; -+ u32 cmd:1; -+ u32 pad2:6; -+ u32 wp:1; -+} msdc_ps_reg; -+typedef struct { -+ u32 mmcirq:1; -+ u32 cdsc:1; -+ u32 pad1:1; -+ u32 atocmdrdy:1; -+ u32 atocmdtmo:1; -+ u32 atocmdcrc:1; -+ u32 dmaqempty:1; -+ u32 sdioirq:1; -+ u32 cmdrdy:1; -+ u32 cmdtmo:1; -+ u32 rspcrc:1; -+ u32 csta:1; -+ u32 xfercomp:1; -+ u32 dxferdone:1; -+ u32 dattmo:1; -+ u32 datcrc:1; -+ u32 atocmd19done:1; -+ u32 pad2:15; -+} msdc_int_reg; -+typedef struct { -+ u32 mmcirq:1; -+ u32 cdsc:1; -+ u32 pad1:1; -+ u32 atocmdrdy:1; -+ u32 atocmdtmo:1; -+ u32 atocmdcrc:1; -+ u32 dmaqempty:1; -+ u32 sdioirq:1; -+ u32 cmdrdy:1; -+ u32 cmdtmo:1; -+ u32 rspcrc:1; -+ u32 csta:1; -+ u32 xfercomp:1; -+ u32 dxferdone:1; -+ u32 dattmo:1; -+ u32 datcrc:1; -+ u32 atocmd19done:1; -+ u32 pad2:15; -+} msdc_inten_reg; -+typedef struct { -+ u32 rxcnt:8; -+ u32 pad1:8; -+ u32 txcnt:8; -+ u32 pad2:7; -+ u32 clr:1; -+} msdc_fifocs_reg; -+typedef struct { -+ u32 val; -+} msdc_txdat_reg; -+typedef struct { -+ u32 val; -+} msdc_rxdat_reg; -+typedef struct { -+ u32 sdiowkup:1; -+ u32 inswkup:1; -+ u32 pad1:14; -+ u32 buswidth:2; -+ u32 pad2:1; -+ u32 sdio:1; -+ u32 sdioide:1; -+ u32 intblkgap:1; -+ u32 pad4:2; -+ u32 dtoc:8; -+} sdc_cfg_reg; -+typedef struct { -+ u32 cmd:6; -+ u32 brk:1; -+ u32 rsptyp:3; -+ u32 pad1:1; -+ u32 dtype:2; -+ u32 rw:1; -+ u32 stop:1; -+ u32 goirq:1; -+ u32 blklen:12; -+ u32 atocmd:2; -+ u32 volswth:1; -+ u32 pad2:1; -+} sdc_cmd_reg; -+typedef struct { -+ u32 arg; -+} sdc_arg_reg; -+typedef struct { -+ u32 sdcbusy:1; -+ u32 cmdbusy:1; -+ u32 pad:29; -+ u32 swrcmpl:1; -+} sdc_sts_reg; -+typedef struct { -+ u32 val; -+} sdc_resp0_reg; -+typedef struct { -+ u32 val; -+} sdc_resp1_reg; -+typedef struct { -+ u32 val; -+} sdc_resp2_reg; -+typedef struct { -+ u32 val; -+} sdc_resp3_reg; -+typedef struct { -+ u32 num; -+} sdc_blknum_reg; -+typedef struct { -+ u32 sts; -+} sdc_csts_reg; -+typedef struct { -+ u32 sts; -+} sdc_cstsen_reg; -+typedef struct { -+ u32 datcrcsts:8; -+ u32 ddrcrcsts:4; -+ u32 pad:20; -+} sdc_datcrcsts_reg; -+typedef struct { -+ u32 bootstart:1; -+ u32 bootstop:1; -+ u32 bootmode:1; -+ u32 pad1:9; -+ u32 bootwaidly:3; -+ u32 bootsupp:1; -+ u32 pad2:16; -+} emmc_cfg0_reg; -+typedef struct { -+ u32 bootcrctmc:16; -+ u32 pad:4; -+ u32 bootacktmc:12; -+} emmc_cfg1_reg; -+typedef struct { -+ u32 bootcrcerr:1; -+ u32 bootackerr:1; -+ u32 bootdattmo:1; -+ u32 bootacktmo:1; -+ u32 bootupstate:1; -+ u32 bootackrcv:1; -+ u32 bootdatrcv:1; -+ u32 pad:25; -+} emmc_sts_reg; -+typedef struct { -+ u32 bootrst:1; -+ u32 pad:31; -+} emmc_iocon_reg; -+typedef struct { -+ u32 val; -+} msdc_acmd_resp_reg; -+typedef struct { -+ u32 tunesel:4; -+ u32 pad:28; -+} msdc_acmd19_trg_reg; -+typedef struct { -+ u32 val; -+} msdc_acmd19_sts_reg; -+typedef struct { -+ u32 addr; -+} msdc_dma_sa_reg; -+typedef struct { -+ u32 addr; -+} msdc_dma_ca_reg; -+typedef struct { -+ u32 start:1; -+ u32 stop:1; -+ u32 resume:1; -+ u32 pad1:5; -+ u32 mode:1; -+ u32 pad2:1; -+ u32 lastbuf:1; -+ u32 pad3:1; -+ u32 brustsz:3; -+ u32 pad4:1; -+ u32 xfersz:16; -+} msdc_dma_ctrl_reg; -+typedef struct { -+ u32 status:1; -+ u32 decsen:1; -+ u32 pad1:2; -+ u32 bdcsen:1; -+ u32 gpdcsen:1; -+ u32 pad2:26; -+} msdc_dma_cfg_reg; -+typedef struct { -+ u32 sel:16; -+ u32 pad2:16; -+} msdc_dbg_sel_reg; -+typedef struct { -+ u32 val; -+} msdc_dbg_out_reg; -+typedef struct { -+ u32 clkdrvn:3; -+ u32 rsv0:1; -+ u32 clkdrvp:3; -+ u32 rsv1:1; -+ u32 clksr:1; -+ u32 rsv2:7; -+ u32 clkpd:1; -+ u32 clkpu:1; -+ u32 clksmt:1; -+ u32 clkies:1; -+ u32 clktdsel:4; -+ u32 clkrdsel:8; -+} msdc_pad_ctl0_reg; -+typedef struct { -+ u32 cmddrvn:3; -+ u32 rsv0:1; -+ u32 cmddrvp:3; -+ u32 rsv1:1; -+ u32 cmdsr:1; -+ u32 rsv2:7; -+ u32 cmdpd:1; -+ u32 cmdpu:1; -+ u32 cmdsmt:1; -+ u32 cmdies:1; -+ u32 cmdtdsel:4; -+ u32 cmdrdsel:8; -+} msdc_pad_ctl1_reg; -+typedef struct { -+ u32 datdrvn:3; -+ u32 rsv0:1; -+ u32 datdrvp:3; -+ u32 rsv1:1; -+ u32 datsr:1; -+ u32 rsv2:7; -+ u32 datpd:1; -+ u32 datpu:1; -+ u32 datsmt:1; -+ u32 daties:1; -+ u32 dattdsel:4; -+ u32 datrdsel:8; -+} msdc_pad_ctl2_reg; -+typedef struct { -+ u32 wrrxdly:3; -+ u32 pad1:5; -+ u32 rdrxdly:8; -+ u32 pad2:16; -+} msdc_pad_tune_reg; -+typedef struct { -+ u32 dat0:5; -+ u32 rsv0:3; -+ u32 dat1:5; -+ u32 rsv1:3; -+ u32 dat2:5; -+ u32 rsv2:3; -+ u32 dat3:5; -+ u32 rsv3:3; -+} msdc_dat_rddly0; -+typedef struct { -+ u32 dat4:5; -+ u32 rsv4:3; -+ u32 dat5:5; -+ u32 rsv5:3; -+ u32 dat6:5; -+ u32 rsv6:3; -+ u32 dat7:5; -+ u32 rsv7:3; -+} msdc_dat_rddly1; -+typedef struct { -+ u32 dbg0sel:8; -+ u32 dbg1sel:6; -+ u32 pad1:2; -+ u32 dbg2sel:6; -+ u32 pad2:2; -+ u32 dbg3sel:6; -+ u32 pad3:2; -+} msdc_hw_dbg_reg; -+typedef struct { -+ u32 val; -+} msdc_version_reg; -+typedef struct { -+ u32 val; -+} msdc_eco_ver_reg; -+ -+struct msdc_regs { -+ msdc_cfg_reg msdc_cfg; /* base+0x00h */ -+ msdc_iocon_reg msdc_iocon; /* base+0x04h */ -+ msdc_ps_reg msdc_ps; /* base+0x08h */ -+ msdc_int_reg msdc_int; /* base+0x0ch */ -+ msdc_inten_reg msdc_inten; /* base+0x10h */ -+ msdc_fifocs_reg msdc_fifocs; /* base+0x14h */ -+ msdc_txdat_reg msdc_txdat; /* base+0x18h */ -+ msdc_rxdat_reg msdc_rxdat; /* base+0x1ch */ -+ u32 rsv1[4]; -+ sdc_cfg_reg sdc_cfg; /* base+0x30h */ -+ sdc_cmd_reg sdc_cmd; /* base+0x34h */ -+ sdc_arg_reg sdc_arg; /* base+0x38h */ -+ sdc_sts_reg sdc_sts; /* base+0x3ch */ -+ sdc_resp0_reg sdc_resp0; /* base+0x40h */ -+ sdc_resp1_reg sdc_resp1; /* base+0x44h */ -+ sdc_resp2_reg sdc_resp2; /* base+0x48h */ -+ sdc_resp3_reg sdc_resp3; /* base+0x4ch */ -+ sdc_blknum_reg sdc_blknum; /* base+0x50h */ -+ u32 rsv2[1]; -+ sdc_csts_reg sdc_csts; /* base+0x58h */ -+ sdc_cstsen_reg sdc_cstsen; /* base+0x5ch */ -+ sdc_datcrcsts_reg sdc_dcrcsta; /* base+0x60h */ -+ u32 rsv3[3]; -+ emmc_cfg0_reg emmc_cfg0; /* base+0x70h */ -+ emmc_cfg1_reg emmc_cfg1; /* base+0x74h */ -+ emmc_sts_reg emmc_sts; /* base+0x78h */ -+ emmc_iocon_reg emmc_iocon; /* base+0x7ch */ -+ msdc_acmd_resp_reg acmd_resp; /* base+0x80h */ -+ msdc_acmd19_trg_reg acmd19_trg; /* base+0x84h */ -+ msdc_acmd19_sts_reg acmd19_sts; /* base+0x88h */ -+ u32 rsv4[1]; -+ msdc_dma_sa_reg dma_sa; /* base+0x90h */ -+ msdc_dma_ca_reg dma_ca; /* base+0x94h */ -+ msdc_dma_ctrl_reg dma_ctrl; /* base+0x98h */ -+ msdc_dma_cfg_reg dma_cfg; /* base+0x9ch */ -+ msdc_dbg_sel_reg dbg_sel; /* base+0xa0h */ -+ msdc_dbg_out_reg dbg_out; /* base+0xa4h */ -+ u32 rsv5[2]; -+ u32 patch0; /* base+0xb0h */ -+ u32 patch1; /* base+0xb4h */ -+ u32 rsv6[10]; -+ msdc_pad_ctl0_reg pad_ctl0; /* base+0xe0h */ -+ msdc_pad_ctl1_reg pad_ctl1; /* base+0xe4h */ -+ msdc_pad_ctl2_reg pad_ctl2; /* base+0xe8h */ -+ msdc_pad_tune_reg pad_tune; /* base+0xech */ -+ msdc_dat_rddly0 dat_rddly0; /* base+0xf0h */ -+ msdc_dat_rddly1 dat_rddly1; /* base+0xf4h */ -+ msdc_hw_dbg_reg hw_dbg; /* base+0xf8h */ -+ u32 rsv7[1]; -+ msdc_version_reg version; /* base+0x100h */ -+ msdc_eco_ver_reg eco_ver; /* base+0x104h */ -+}; -+ -+struct scatterlist_ex { -+ u32 cmd; -+ u32 arg; -+ u32 sglen; -+ struct scatterlist *sg; -+}; -+ -+#define DMA_FLAG_NONE (0x00000000) -+#define DMA_FLAG_EN_CHKSUM (0x00000001) -+#define DMA_FLAG_PAD_BLOCK (0x00000002) -+#define DMA_FLAG_PAD_DWORD (0x00000004) -+ -+struct msdc_dma { -+ u32 flags; /* flags */ -+ u32 xfersz; /* xfer size in bytes */ -+ u32 sglen; /* size of scatter list */ -+ u32 blklen; /* block size */ -+ struct scatterlist *sg; /* I/O scatter list */ -+ struct scatterlist_ex *esg; /* extended I/O scatter list */ -+ u8 mode; /* dma mode */ -+ u8 burstsz; /* burst size */ -+ u8 intr; /* dma done interrupt */ -+ u8 padding; /* padding */ -+ u32 cmd; /* enhanced mode command */ -+ u32 arg; /* enhanced mode arg */ -+ u32 rsp; /* enhanced mode command response */ -+ u32 autorsp; /* auto command response */ -+ -+ gpd_t *gpd; /* pointer to gpd array */ -+ bd_t *bd; /* pointer to bd array */ -+ dma_addr_t gpd_addr; /* the physical address of gpd array */ -+ dma_addr_t bd_addr; /* the physical address of bd array */ -+ u32 used_gpd; /* the number of used gpd elements */ -+ u32 used_bd; /* the number of used bd elements */ -+}; -+ -+struct msdc_host -+{ -+ struct msdc_hw *hw; -+ -+ struct mmc_host *mmc; /* mmc structure */ -+ struct mmc_command *cmd; -+ struct mmc_data *data; -+ struct mmc_request *mrq; -+ int cmd_rsp; -+ int cmd_rsp_done; -+ int cmd_r1b_done; -+ -+ int error; -+ spinlock_t lock; /* mutex */ -+ struct semaphore sem; -+ -+ u32 blksz; /* host block size */ -+ u32 base; /* host base address */ -+ int id; /* host id */ -+ int pwr_ref; /* core power reference count */ -+ -+ u32 xfer_size; /* total transferred size */ -+ -+ struct msdc_dma dma; /* dma channel */ -+ u32 dma_addr; /* dma transfer address */ -+ u32 dma_left_size; /* dma transfer left size */ -+ u32 dma_xfer_size; /* dma transfer size in bytes */ -+ int dma_xfer; /* dma transfer mode */ -+ -+ u32 timeout_ns; /* data timeout ns */ -+ u32 timeout_clks; /* data timeout clks */ -+ -+ atomic_t abort; /* abort transfer */ -+ -+ int irq; /* host interrupt */ -+ -+ struct tasklet_struct card_tasklet; -+ -+ struct completion cmd_done; -+ struct completion xfer_done; -+ struct pm_message pm_state; -+ -+ u32 mclk; /* mmc subsystem clock */ -+ u32 hclk; /* host clock speed */ -+ u32 sclk; /* SD/MS clock speed */ -+ u8 core_clkon; /* Host core clock on ? */ -+ u8 card_clkon; /* Card clock on ? */ -+ u8 core_power; /* core power */ -+ u8 power_mode; /* host power mode */ -+ u8 card_inserted; /* card inserted ? */ -+ u8 suspend; /* host suspended ? */ -+ u8 reserved; -+ u8 app_cmd; /* for app command */ -+ u32 app_cmd_arg; -+ u64 starttime; -+}; -+ -+static inline unsigned int uffs(unsigned int x) -+{ -+ unsigned int r = 1; -+ -+ if (!x) -+ return 0; -+ if (!(x & 0xffff)) { -+ x >>= 16; -+ r += 16; -+ } -+ if (!(x & 0xff)) { -+ x >>= 8; -+ r += 8; -+ } -+ if (!(x & 0xf)) { -+ x >>= 4; -+ r += 4; -+ } -+ if (!(x & 3)) { -+ x >>= 2; -+ r += 2; -+ } -+ if (!(x & 1)) { -+ x >>= 1; -+ r += 1; -+ } -+ return r; -+} -+#define sdr_read8(reg) __raw_readb(reg) -+#define sdr_read16(reg) __raw_readw(reg) -+#define sdr_read32(reg) __raw_readl(reg) -+#define sdr_write8(reg,val) __raw_writeb(val,reg) -+#define sdr_write16(reg,val) __raw_writew(val,reg) -+#define sdr_write32(reg,val) __raw_writel(val,reg) -+ -+#define sdr_set_bits(reg,bs) ((*(volatile u32*)(reg)) |= (u32)(bs)) -+#define sdr_clr_bits(reg,bs) ((*(volatile u32*)(reg)) &= ~((u32)(bs))) -+ -+#define sdr_set_field(reg,field,val) \ -+ do { \ -+ volatile unsigned int tv = sdr_read32(reg); \ -+ tv &= ~(field); \ -+ tv |= ((val) << (uffs((unsigned int)field) - 1)); \ -+ sdr_write32(reg,tv); \ -+ } while(0) -+#define sdr_get_field(reg,field,val) \ -+ do { \ -+ volatile unsigned int tv = sdr_read32(reg); \ -+ val = ((tv & (field)) >> (uffs((unsigned int)field) - 1)); \ -+ } while(0) -+ -+#endif -+ ---- /dev/null -+++ b/drivers/mmc/host/sdhci-mt7620.c -@@ -0,0 +1,2314 @@ -+/* Copyright Statement: -+ * -+ * This software/firmware and related documentation ("MediaTek Software") are -+ * protected under relevant copyright laws. The information contained herein -+ * is confidential and proprietary to MediaTek Inc. and/or its licensors. -+ * Without the prior written permission of MediaTek inc. and/or its licensors, -+ * any reproduction, modification, use or disclosure of MediaTek Software, -+ * and information contained herein, in whole or in part, shall be strictly prohibited. -+ * -+ * MediaTek Inc. (C) 2010. All rights reserved. -+ * -+ * BY OPENING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES -+ * THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE") -+ * RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON -+ * AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES, -+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF -+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT. -+ * NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE -+ * SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR -+ * SUPPLIED WITH THE MEDIATEK SOFTWARE, AND RECEIVER AGREES TO LOOK ONLY TO SUCH -+ * THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES -+ * THAT IT IS RECEIVER'S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD PARTY ALL PROPER LICENSES -+ * CONTAINED IN MEDIATEK SOFTWARE. MEDIATEK SHALL ALSO NOT BE RESPONSIBLE FOR ANY MEDIATEK -+ * SOFTWARE RELEASES MADE TO RECEIVER'S SPECIFICATION OR TO CONFORM TO A PARTICULAR -+ * STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND -+ * CUMULATIVE LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE, -+ * AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE, -+ * OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY RECEIVER TO -+ * MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE. -+ * -+ * The following software/firmware and/or related documentation ("MediaTek Software") -+ * have been modified by MediaTek Inc. All revisions are subject to any receiver's -+ * applicable license agreements with MediaTek Inc. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+#include -+#include -+#include -+ -+#define MSDC_SMPL_FALLING (1) -+#define MSDC_CD_PIN_EN (1 << 0) /* card detection pin is wired */ -+#define MSDC_WP_PIN_EN (1 << 1) /* write protection pin is wired */ -+#define MSDC_REMOVABLE (1 << 5) /* removable slot */ -+#define MSDC_SYS_SUSPEND (1 << 6) /* suspended by system */ -+#define MSDC_HIGHSPEED (1 << 7) -+ -+#define IRQ_SDC 22 -+ -+#include -+ -+#include "mt6575_sd.h" -+ -+#define DRV_NAME "mtk-sd" -+ -+#define HOST_MAX_NUM (1) /* +/- by chhung */ -+ -+#define HOST_MAX_MCLK (48000000) /* +/- by chhung */ -+#define HOST_MIN_MCLK (260000) -+ -+#define HOST_MAX_BLKSZ (2048) -+ -+#define MSDC_OCR_AVAIL (MMC_VDD_28_29 | MMC_VDD_29_30 | MMC_VDD_30_31 | MMC_VDD_31_32 | MMC_VDD_32_33) -+ -+#define GPIO_PULL_DOWN (0) -+#define GPIO_PULL_UP (1) -+ -+#define DEFAULT_DEBOUNCE (8) /* 8 cycles */ -+#define DEFAULT_DTOC (40) /* data timeout counter. 65536x40 sclk. */ -+ -+#define CMD_TIMEOUT (HZ/10) /* 100ms */ -+#define DAT_TIMEOUT (HZ/2 * 5) /* 500ms x5 */ -+ -+#define MAX_DMA_CNT (64 * 1024 - 512) /* a single transaction for WIFI may be 50K*/ -+ -+#define MAX_GPD_NUM (1 + 1) /* one null gpd */ -+#define MAX_BD_NUM (1024) -+#define MAX_BD_PER_GPD (MAX_BD_NUM) -+ -+#define MAX_HW_SGMTS (MAX_BD_NUM) -+#define MAX_PHY_SGMTS (MAX_BD_NUM) -+#define MAX_SGMT_SZ (MAX_DMA_CNT) -+#define MAX_REQ_SZ (MAX_SGMT_SZ * 8) -+ -+#ifdef MT6575_SD_DEBUG -+static struct msdc_regs *msdc_reg[HOST_MAX_NUM]; -+#endif -+ -+//================================= -+#define PERI_MSDC0_PDN (15) -+//#define PERI_MSDC1_PDN (16) -+//#define PERI_MSDC2_PDN (17) -+//#define PERI_MSDC3_PDN (18) -+ -+struct msdc_host *msdc_6575_host[] = {NULL,NULL,NULL,NULL}; -+ -+struct msdc_hw msdc0_hw = { -+ .clk_src = 0, -+ .cmd_edge = MSDC_SMPL_FALLING, -+ .data_edge = MSDC_SMPL_FALLING, -+ .clk_drv = 4, -+ .cmd_drv = 4, -+ .dat_drv = 4, -+ .data_pins = 4, -+ .data_offset = 0, -+ .flags = MSDC_SYS_SUSPEND | MSDC_WP_PIN_EN | MSDC_CD_PIN_EN | MSDC_REMOVABLE | MSDC_HIGHSPEED, -+}; -+ -+static struct resource mtk_sd_resources[] = { -+ [0] = { -+ .start = 0xb0130000, -+ .end = 0xb0133fff, -+ .flags = IORESOURCE_MEM, -+ }, -+ [1] = { -+ .start = IRQ_SDC, /*FIXME*/ -+ .end = IRQ_SDC, /*FIXME*/ -+ .flags = IORESOURCE_IRQ, -+ }, -+}; -+ -+static struct platform_device mtk_sd_device = { -+ .name = "mtk-sd", -+ .id = 0, -+ .num_resources = ARRAY_SIZE(mtk_sd_resources), -+ .resource = mtk_sd_resources, -+}; -+/* end of +++ */ -+ -+static int msdc_rsp[] = { -+ 0, /* RESP_NONE */ -+ 1, /* RESP_R1 */ -+ 2, /* RESP_R2 */ -+ 3, /* RESP_R3 */ -+ 4, /* RESP_R4 */ -+ 1, /* RESP_R5 */ -+ 1, /* RESP_R6 */ -+ 1, /* RESP_R7 */ -+ 7, /* RESP_R1b */ -+}; -+ -+/* For Inhanced DMA */ -+#define msdc_init_gpd_ex(gpd,extlen,cmd,arg,blknum) \ -+ do { \ -+ ((gpd_t*)gpd)->extlen = extlen; \ -+ ((gpd_t*)gpd)->cmd = cmd; \ -+ ((gpd_t*)gpd)->arg = arg; \ -+ ((gpd_t*)gpd)->blknum = blknum; \ -+ }while(0) -+ -+#define msdc_init_bd(bd, blkpad, dwpad, dptr, dlen) \ -+ do { \ -+ BUG_ON(dlen > 0xFFFFUL); \ -+ ((bd_t*)bd)->blkpad = blkpad; \ -+ ((bd_t*)bd)->dwpad = dwpad; \ -+ ((bd_t*)bd)->ptr = (void*)dptr; \ -+ ((bd_t*)bd)->buflen = dlen; \ -+ }while(0) -+ -+#define msdc_txfifocnt() ((sdr_read32(MSDC_FIFOCS) & MSDC_FIFOCS_TXCNT) >> 16) -+#define msdc_rxfifocnt() ((sdr_read32(MSDC_FIFOCS) & MSDC_FIFOCS_RXCNT) >> 0) -+#define msdc_fifo_write32(v) sdr_write32(MSDC_TXDATA, (v)) -+#define msdc_fifo_write8(v) sdr_write8(MSDC_TXDATA, (v)) -+#define msdc_fifo_read32() sdr_read32(MSDC_RXDATA) -+#define msdc_fifo_read8() sdr_read8(MSDC_RXDATA) -+ -+ -+#define msdc_dma_on() sdr_clr_bits(MSDC_CFG, MSDC_CFG_PIO) -+#define msdc_dma_off() sdr_set_bits(MSDC_CFG, MSDC_CFG_PIO) -+ -+#define msdc_retry(expr,retry,cnt) \ -+ do { \ -+ int backup = cnt; \ -+ while (retry) { \ -+ if (!(expr)) break; \ -+ if (cnt-- == 0) { \ -+ retry--; mdelay(1); cnt = backup; \ -+ } \ -+ } \ -+ WARN_ON(retry == 0); \ -+ } while(0) -+ -+#if 0 /* +/- chhung */ -+#define msdc_reset() \ -+ do { \ -+ int retry = 3, cnt = 1000; \ -+ sdr_set_bits(MSDC_CFG, MSDC_CFG_RST); \ -+ dsb(); \ -+ msdc_retry(sdr_read32(MSDC_CFG) & MSDC_CFG_RST, retry, cnt); \ -+ } while(0) -+#else -+#define msdc_reset() \ -+ do { \ -+ int retry = 3, cnt = 1000; \ -+ sdr_set_bits(MSDC_CFG, MSDC_CFG_RST); \ -+ msdc_retry(sdr_read32(MSDC_CFG) & MSDC_CFG_RST, retry, cnt); \ -+ } while(0) -+#endif /* end of +/- */ -+ -+#define msdc_clr_int() \ -+ do { \ -+ volatile u32 val = sdr_read32(MSDC_INT); \ -+ sdr_write32(MSDC_INT, val); \ -+ } while(0) -+ -+#define msdc_clr_fifo() \ -+ do { \ -+ int retry = 3, cnt = 1000; \ -+ sdr_set_bits(MSDC_FIFOCS, MSDC_FIFOCS_CLR); \ -+ msdc_retry(sdr_read32(MSDC_FIFOCS) & MSDC_FIFOCS_CLR, retry, cnt); \ -+ } while(0) -+ -+#define msdc_irq_save(val) \ -+ do { \ -+ val = sdr_read32(MSDC_INTEN); \ -+ sdr_clr_bits(MSDC_INTEN, val); \ -+ } while(0) -+ -+#define msdc_irq_restore(val) \ -+ do { \ -+ sdr_set_bits(MSDC_INTEN, val); \ -+ } while(0) -+ -+/* clock source for host: global */ -+static u32 hclks[] = {48000000}; /* +/- by chhung */ -+ -+//============================================ -+// the power for msdc host controller: global -+// always keep the VMC on. -+//============================================ -+#define msdc_vcore_on(host) \ -+ do { \ -+ printk("[+]VMC ref. count<%d>\n", ++host->pwr_ref); \ -+ (void)hwPowerOn(MT65XX_POWER_LDO_VMC, VOL_3300, "SD"); \ -+ } while (0) -+#define msdc_vcore_off(host) \ -+ do { \ -+ printk("[-]VMC ref. count<%d>\n", --host->pwr_ref); \ -+ (void)hwPowerDown(MT65XX_POWER_LDO_VMC, "SD"); \ -+ } while (0) -+ -+//==================================== -+// the vdd output for card: global -+// always keep the VMCH on. -+//==================================== -+#define msdc_vdd_on(host) \ -+ do { \ -+ (void)hwPowerOn(MT65XX_POWER_LDO_VMCH, VOL_3300, "SD"); \ -+ } while (0) -+#define msdc_vdd_off(host) \ -+ do { \ -+ (void)hwPowerDown(MT65XX_POWER_LDO_VMCH, "SD"); \ -+ } while (0) -+ -+#define sdc_is_busy() (sdr_read32(SDC_STS) & SDC_STS_SDCBUSY) -+#define sdc_is_cmd_busy() (sdr_read32(SDC_STS) & SDC_STS_CMDBUSY) -+ -+#define sdc_send_cmd(cmd,arg) \ -+ do { \ -+ sdr_write32(SDC_ARG, (arg)); \ -+ sdr_write32(SDC_CMD, (cmd)); \ -+ } while(0) -+ -+// can modify to read h/w register. -+//#define is_card_present(h) ((sdr_read32(MSDC_PS) & MSDC_PS_CDSTS) ? 0 : 1); -+#define is_card_present(h) (((struct msdc_host*)(h))->card_inserted) -+ -+/* +++ chhung */ -+#ifndef __ASSEMBLY__ -+#define PHYSADDR(a) (((unsigned long)(a)) & 0x1fffffff) -+#else -+#define PHYSADDR(a) ((a) & 0x1fffffff) -+#endif -+/* end of +++ */ -+static unsigned int msdc_do_command(struct msdc_host *host, -+ struct mmc_command *cmd, -+ int tune, -+ unsigned long timeout); -+ -+static int msdc_tune_cmdrsp(struct msdc_host*host,struct mmc_command *cmd); -+ -+#ifdef MT6575_SD_DEBUG -+static void msdc_dump_card_status(struct msdc_host *host, u32 status) -+{ -+ static char *state[] = { -+ "Idle", /* 0 */ -+ "Ready", /* 1 */ -+ "Ident", /* 2 */ -+ "Stby", /* 3 */ -+ "Tran", /* 4 */ -+ "Data", /* 5 */ -+ "Rcv", /* 6 */ -+ "Prg", /* 7 */ -+ "Dis", /* 8 */ -+ "Reserved", /* 9 */ -+ "Reserved", /* 10 */ -+ "Reserved", /* 11 */ -+ "Reserved", /* 12 */ -+ "Reserved", /* 13 */ -+ "Reserved", /* 14 */ -+ "I/O mode", /* 15 */ -+ }; -+ if (status & R1_OUT_OF_RANGE) -+ printk("[CARD_STATUS] Out of Range\n"); -+ if (status & R1_ADDRESS_ERROR) -+ printk("[CARD_STATUS] Address Error\n"); -+ if (status & R1_BLOCK_LEN_ERROR) -+ printk("[CARD_STATUS] Block Len Error\n"); -+ if (status & R1_ERASE_SEQ_ERROR) -+ printk("[CARD_STATUS] Erase Seq Error\n"); -+ if (status & R1_ERASE_PARAM) -+ printk("[CARD_STATUS] Erase Param\n"); -+ if (status & R1_WP_VIOLATION) -+ printk("[CARD_STATUS] WP Violation\n"); -+ if (status & R1_CARD_IS_LOCKED) -+ printk("[CARD_STATUS] Card is Locked\n"); -+ if (status & R1_LOCK_UNLOCK_FAILED) -+ printk("[CARD_STATUS] Lock/Unlock Failed\n"); -+ if (status & R1_COM_CRC_ERROR) -+ printk("[CARD_STATUS] Command CRC Error\n"); -+ if (status & R1_ILLEGAL_COMMAND) -+ printk("[CARD_STATUS] Illegal Command\n"); -+ if (status & R1_CARD_ECC_FAILED) -+ printk("[CARD_STATUS] Card ECC Failed\n"); -+ if (status & R1_CC_ERROR) -+ printk("[CARD_STATUS] CC Error\n"); -+ if (status & R1_ERROR) -+ printk("[CARD_STATUS] Error\n"); -+ if (status & R1_UNDERRUN) -+ printk("[CARD_STATUS] Underrun\n"); -+ if (status & R1_OVERRUN) -+ printk("[CARD_STATUS] Overrun\n"); -+ if (status & R1_CID_CSD_OVERWRITE) -+ printk("[CARD_STATUS] CID/CSD Overwrite\n"); -+ if (status & R1_WP_ERASE_SKIP) -+ printk("[CARD_STATUS] WP Eraser Skip\n"); -+ if (status & R1_CARD_ECC_DISABLED) -+ printk("[CARD_STATUS] Card ECC Disabled\n"); -+ if (status & R1_ERASE_RESET) -+ printk("[CARD_STATUS] Erase Reset\n"); -+ if (status & R1_READY_FOR_DATA) -+ printk("[CARD_STATUS] Ready for Data\n"); -+ if (status & R1_SWITCH_ERROR) -+ printk("[CARD_STATUS] Switch error\n"); -+ if (status & R1_APP_CMD) -+ printk("[CARD_STATUS] App Command\n"); -+ -+ printk("[CARD_STATUS] '%s' State\n", state[R1_CURRENT_STATE(status)]); -+} -+ -+static void msdc_dump_ocr_reg(struct msdc_host *host, u32 resp) -+{ -+ if (resp & (1 << 7)) -+ printk("[OCR] Low Voltage Range\n"); -+ if (resp & (1 << 15)) -+ printk("[OCR] 2.7-2.8 volt\n"); -+ if (resp & (1 << 16)) -+ printk("[OCR] 2.8-2.9 volt\n"); -+ if (resp & (1 << 17)) -+ printk("[OCR] 2.9-3.0 volt\n"); -+ if (resp & (1 << 18)) -+ printk("[OCR] 3.0-3.1 volt\n"); -+ if (resp & (1 << 19)) -+ printk("[OCR] 3.1-3.2 volt\n"); -+ if (resp & (1 << 20)) -+ printk("[OCR] 3.2-3.3 volt\n"); -+ if (resp & (1 << 21)) -+ printk("[OCR] 3.3-3.4 volt\n"); -+ if (resp & (1 << 22)) -+ printk("[OCR] 3.4-3.5 volt\n"); -+ if (resp & (1 << 23)) -+ printk("[OCR] 3.5-3.6 volt\n"); -+ if (resp & (1 << 24)) -+ printk("[OCR] Switching to 1.8V Accepted (S18A)\n"); -+ if (resp & (1 << 30)) -+ printk("[OCR] Card Capacity Status (CCS)\n"); -+ if (resp & (1 << 31)) -+ printk("[OCR] Card Power Up Status (Idle)\n"); -+ else -+ printk("[OCR] Card Power Up Status (Busy)\n"); -+} -+ -+static void msdc_dump_rca_resp(struct msdc_host *host, u32 resp) -+{ -+ u32 status = (((resp >> 15) & 0x1) << 23) | -+ (((resp >> 14) & 0x1) << 22) | -+ (((resp >> 13) & 0x1) << 19) | -+ (resp & 0x1fff); -+ -+ printk("[RCA] 0x%.4x\n", resp >> 16); -+ -+ msdc_dump_card_status(host, status); -+} -+ -+static void msdc_dump_io_resp(struct msdc_host *host, u32 resp) -+{ -+ u32 flags = (resp >> 8) & 0xFF; -+ char *state[] = {"DIS", "CMD", "TRN", "RFU"}; -+ -+ if (flags & (1 << 7)) -+ printk("[IO] COM_CRC_ERR\n"); -+ if (flags & (1 << 6)) -+ printk("[IO] Illgal command\n"); -+ if (flags & (1 << 3)) -+ printk("[IO] Error\n"); -+ if (flags & (1 << 2)) -+ printk("[IO] RFU\n"); -+ if (flags & (1 << 1)) -+ printk("[IO] Function number error\n"); -+ if (flags & (1 << 0)) -+ printk("[IO] Out of range\n"); -+ -+ printk("[IO] State: %s, Data:0x%x\n", state[(resp >> 12) & 0x3], resp & 0xFF); -+} -+#endif -+ -+static void msdc_set_timeout(struct msdc_host *host, u32 ns, u32 clks) -+{ -+ u32 base = host->base; -+ u32 timeout, clk_ns; -+ -+ host->timeout_ns = ns; -+ host->timeout_clks = clks; -+ -+ clk_ns = 1000000000UL / host->sclk; -+ timeout = ns / clk_ns + clks; -+ timeout = timeout >> 16; /* in 65536 sclk cycle unit */ -+ timeout = timeout > 1 ? timeout - 1 : 0; -+ timeout = timeout > 255 ? 255 : timeout; -+ -+ sdr_set_field(SDC_CFG, SDC_CFG_DTOC, timeout); -+ -+/* printk("Set read data timeout: %dns %dclks -> %d x 65536 cycles\n", -+ ns, clks, timeout + 1);*/ -+} -+ -+static void msdc_eirq_sdio(void *data) -+{ -+ struct msdc_host *host = (struct msdc_host *)data; -+ -+// printk("SDIO EINT\n"); -+ -+ mmc_signal_sdio_irq(host->mmc); -+} -+ -+static void msdc_eirq_cd(void *data) -+{ -+ struct msdc_host *host = (struct msdc_host *)data; -+ -+// printk("CD EINT\n"); -+ -+ tasklet_hi_schedule(&host->card_tasklet); -+} -+ -+static void msdc_tasklet_card(unsigned long arg) -+{ -+ struct msdc_host *host = (struct msdc_host *)arg; -+ struct msdc_hw *hw = host->hw; -+ u32 base = host->base; -+ u32 inserted; -+ u32 status = 0; -+ -+ spin_lock(&host->lock); -+ -+ if (hw->get_cd_status) { -+ inserted = hw->get_cd_status(); -+ } else { -+ status = sdr_read32(MSDC_PS); -+ inserted = (status & MSDC_PS_CDSTS) ? 0 : 1; -+ } -+ -+ host->card_inserted = inserted; -+ -+ if (!host->suspend) { -+ host->mmc->f_max = HOST_MAX_MCLK; -+ mmc_detect_change(host->mmc, msecs_to_jiffies(20)); -+ } -+ -+// printk("card found<%s>\n", inserted ? "inserted" : "removed"); -+ -+ spin_unlock(&host->lock); -+} -+ -+static void msdc_set_mclk(struct msdc_host *host, int ddr, unsigned int hz) -+{ -+ u32 base = host->base; -+ u32 hclk = host->hclk; -+ u32 mode, flags, div, sclk; -+ -+ if (!hz) { -+// printk("set mclk to 0!!!\n"); -+ msdc_reset(); -+ return; -+ } -+ -+ msdc_irq_save(flags); -+ -+ if (ddr) { -+ mode = 0x2; -+ if (hz >= (hclk >> 2)) { -+ div = 1; -+ sclk = hclk >> 2; -+ } else { -+ div = (hclk + ((hz << 2) - 1)) / (hz << 2); -+ sclk = (hclk >> 2) / div; -+ } -+ } else if (hz >= hclk) { -+ mode = 0x1; -+ div = 0; -+ sclk = hclk; -+ } else { -+ mode = 0x0; -+ if (hz >= (hclk >> 1)) { -+ div = 0; -+ sclk = hclk >> 1; -+ } else { -+ div = (hclk + ((hz << 2) - 1)) / (hz << 2); -+ sclk = (hclk >> 2) / div; -+ } -+ } -+ -+ sdr_set_field(MSDC_CFG, MSDC_CFG_CKMOD, mode); -+ sdr_set_field(MSDC_CFG, MSDC_CFG_CKDIV, div); -+ -+ while (!(sdr_read32(MSDC_CFG) & MSDC_CFG_CKSTB)); -+ -+ host->sclk = sclk; -+ host->mclk = hz; -+ msdc_set_timeout(host, host->timeout_ns, host->timeout_clks); -+ -+/* printk("!!! Set<%dKHz> Source<%dKHz> -> sclk<%dKHz>\n", -+ hz / 1000, hclk / 1000, sclk / 1000); -+*/ -+ msdc_irq_restore(flags); -+} -+ -+static void msdc_abort_data(struct msdc_host *host) -+{ -+ u32 base = host->base; -+ struct mmc_command *stop = host->mrq->stop; -+ -+// printk("Need to Abort. dma<%d>\n", host->dma_xfer); -+ -+ msdc_reset(); -+ msdc_clr_fifo(); -+ msdc_clr_int(); -+ -+ if (stop) { -+// printk("stop when abort CMD<%d>\n", stop->opcode); -+ msdc_do_command(host, stop, 0, CMD_TIMEOUT); -+ } -+} -+ -+static unsigned int msdc_command_start(struct msdc_host *host, -+ struct mmc_command *cmd, int tune, unsigned long timeout) -+{ -+ u32 wints = MSDC_INT_CMDRDY | MSDC_INT_RSPCRCERR | MSDC_INT_CMDTMO | -+ MSDC_INT_ACMDRDY | MSDC_INT_ACMDCRCERR | MSDC_INT_ACMDTMO | -+ MSDC_INT_ACMD19_DONE; -+ u32 base = host->base; -+ u32 opcode = cmd->opcode; -+ u32 rawcmd; -+ u32 resp; -+ unsigned long tmo; -+ -+ if (opcode == MMC_SEND_OP_COND || opcode == SD_APP_OP_COND) -+ resp = RESP_R3; -+ else if (opcode == MMC_SET_RELATIVE_ADDR || opcode == SD_SEND_RELATIVE_ADDR) -+ resp = (mmc_cmd_type(cmd) == MMC_CMD_BCR) ? RESP_R6 : RESP_R1; -+ else if (opcode == MMC_FAST_IO) -+ resp = RESP_R4; -+ else if (opcode == MMC_GO_IRQ_STATE) -+ resp = RESP_R5; -+ else if (opcode == MMC_SELECT_CARD) -+ resp = (cmd->arg != 0) ? RESP_R1B : RESP_NONE; -+ else if (opcode == SD_IO_RW_DIRECT || opcode == SD_IO_RW_EXTENDED) -+ resp = RESP_R1; -+ else if (opcode == SD_SEND_IF_COND && (mmc_cmd_type(cmd) == MMC_CMD_BCR)) -+ resp = RESP_R1; -+ else { -+ switch (mmc_resp_type(cmd)) { -+ case MMC_RSP_R1: -+ resp = RESP_R1; -+ break; -+ case MMC_RSP_R1B: -+ resp = RESP_R1B; -+ break; -+ case MMC_RSP_R2: -+ resp = RESP_R2; -+ break; -+ case MMC_RSP_R3: -+ resp = RESP_R3; -+ break; -+ case MMC_RSP_NONE: -+ default: -+ resp = RESP_NONE; -+ break; -+ } -+ } -+ -+ cmd->error = 0; -+ rawcmd = opcode | msdc_rsp[resp] << 7 | host->blksz << 16; -+ -+ if (opcode == MMC_READ_MULTIPLE_BLOCK) { -+ rawcmd |= (2 << 11); -+ } else if (opcode == MMC_READ_SINGLE_BLOCK) { -+ rawcmd |= (1 << 11); -+ } else if (opcode == MMC_WRITE_MULTIPLE_BLOCK) { -+ rawcmd |= ((2 << 11) | (1 << 13)); -+ } else if (opcode == MMC_WRITE_BLOCK) { -+ rawcmd |= ((1 << 11) | (1 << 13)); -+ } else if (opcode == SD_IO_RW_EXTENDED) { -+ if (cmd->data->flags & MMC_DATA_WRITE) -+ rawcmd |= (1 << 13); -+ if (cmd->data->blocks > 1) -+ rawcmd |= (2 << 11); -+ else -+ rawcmd |= (1 << 11); -+ } else if (opcode == SD_IO_RW_DIRECT && cmd->flags == (unsigned int)-1) { -+ rawcmd |= (1 << 14); -+ } else if ((opcode == SD_APP_SEND_SCR) || -+ (opcode == SD_APP_SEND_NUM_WR_BLKS) || -+ (opcode == SD_SWITCH && (mmc_cmd_type(cmd) == MMC_CMD_ADTC)) || -+ (opcode == SD_APP_SD_STATUS && (mmc_cmd_type(cmd) == MMC_CMD_ADTC)) || -+ (opcode == MMC_SEND_EXT_CSD && (mmc_cmd_type(cmd) == MMC_CMD_ADTC))) { -+ rawcmd |= (1 << 11); -+ } else if (opcode == MMC_STOP_TRANSMISSION) { -+ rawcmd |= (1 << 14); -+ rawcmd &= ~(0x0FFF << 16); -+ } -+ -+// printk("CMD<%d><0x%.8x> Arg<0x%.8x>\n", opcode , rawcmd, cmd->arg); -+ -+ tmo = jiffies + timeout; -+ -+ if (opcode == MMC_SEND_STATUS) { -+ for (;;) { -+ if (!sdc_is_cmd_busy()) -+ break; -+ -+ if (time_after(jiffies, tmo)) { -+ //printk("XXX cmd_busy timeout: before CMD<%d>\n", opcode); -+ cmd->error = (unsigned int)-ETIMEDOUT; -+ msdc_reset(); -+ goto end; -+ } -+ } -+ } else { -+ for (;;) { -+ if (!sdc_is_busy()) -+ break; -+ if (time_after(jiffies, tmo)) { -+ //printk("XXX sdc_busy timeout: before CMD<%d>\n", opcode); -+ cmd->error = (unsigned int)-ETIMEDOUT; -+ msdc_reset(); -+ goto end; -+ } -+ } -+ } -+ -+ //BUG_ON(in_interrupt()); -+ host->cmd = cmd; -+ host->cmd_rsp = resp; -+ init_completion(&host->cmd_done); -+ sdr_set_bits(MSDC_INTEN, wints); -+ sdc_send_cmd(rawcmd, cmd->arg); -+ -+end: -+ return cmd->error; -+} -+ -+static unsigned int msdc_command_resp(struct msdc_host *host, struct mmc_command *cmd, -+ int tune, unsigned long timeout) -+{ -+ u32 base = host->base; -+ //u32 opcode = cmd->opcode; -+ u32 resp; -+ u32 wints = MSDC_INT_CMDRDY | MSDC_INT_RSPCRCERR | MSDC_INT_CMDTMO | -+ MSDC_INT_ACMDRDY | MSDC_INT_ACMDCRCERR | MSDC_INT_ACMDTMO | -+ MSDC_INT_ACMD19_DONE; -+ -+ resp = host->cmd_rsp; -+ -+ BUG_ON(in_interrupt()); -+ spin_unlock(&host->lock); -+ if (!wait_for_completion_timeout(&host->cmd_done, 10*timeout)) { -+ //printk("XXX CMD<%d> wait_for_completion timeout ARG<0x%.8x>\n", opcode, cmd->arg); -+ cmd->error = (unsigned int)-ETIMEDOUT; -+ msdc_reset(); -+ } -+ spin_lock(&host->lock); -+ -+ sdr_clr_bits(MSDC_INTEN, wints); -+ host->cmd = NULL; -+ -+ if (!tune) -+ return cmd->error; -+ -+ /* memory card CRC */ -+ if (host->hw->flags & MSDC_REMOVABLE && cmd->error == (unsigned int)(-EIO) ) { -+ if (sdr_read32(SDC_CMD) & 0x1800) { -+ msdc_abort_data(host); -+ } else { -+ msdc_reset(); -+ msdc_clr_fifo(); -+ msdc_clr_int(); -+ } -+ cmd->error = msdc_tune_cmdrsp(host,cmd); -+ } -+ -+ return cmd->error; -+} -+ -+static unsigned int msdc_do_command(struct msdc_host *host, struct mmc_command *cmd, -+ int tune, unsigned long timeout) -+{ -+ if (!msdc_command_start(host, cmd, tune, timeout)) -+ msdc_command_resp(host, cmd, tune, timeout); -+ -+ //printk(" return<%d> resp<0x%.8x>\n", cmd->error, cmd->resp[0]); -+ return cmd->error; -+} -+ -+static int msdc_pio_abort(struct msdc_host *host, struct mmc_data *data, unsigned long tmo) -+{ -+ u32 base = host->base; -+ int ret = 0; -+ -+ if (atomic_read(&host->abort)) -+ ret = 1; -+ -+ if (time_after(jiffies, tmo)) { -+ data->error = (unsigned int)-ETIMEDOUT; -+ //printk("XXX PIO Data Timeout: CMD<%d>\n", host->mrq->cmd->opcode); -+ ret = 1; -+ } -+ -+ if (ret) { -+ msdc_reset(); -+ msdc_clr_fifo(); -+ msdc_clr_int(); -+ //printk("msdc pio find abort\n"); -+ } -+ -+ return ret; -+} -+ -+static int msdc_pio_read(struct msdc_host *host, struct mmc_data *data) -+{ -+ struct scatterlist *sg = data->sg; -+ u32 base = host->base; -+ u32 num = data->sg_len; -+ u32 *ptr; -+ u8 *u8ptr; -+ u32 left; -+ u32 count, size = 0; -+ u32 wints = MSDC_INTEN_DATTMO | MSDC_INTEN_DATCRCERR; -+ unsigned long tmo = jiffies + DAT_TIMEOUT; -+ -+ sdr_set_bits(MSDC_INTEN, wints); -+ while (num) { -+ left = sg_dma_len(sg); -+ ptr = sg_virt(sg); -+ while (left) { -+ if ((left >= MSDC_FIFO_THD) && (msdc_rxfifocnt() >= MSDC_FIFO_THD)) { -+ count = MSDC_FIFO_THD >> 2; -+ do { -+ *ptr++ = msdc_fifo_read32(); -+ } while (--count); -+ left -= MSDC_FIFO_THD; -+ } else if ((left < MSDC_FIFO_THD) && msdc_rxfifocnt() >= left) { -+ while (left > 3) { -+ *ptr++ = msdc_fifo_read32(); -+ left -= 4; -+ } -+ -+ u8ptr = (u8 *)ptr; -+ while(left) { -+ * u8ptr++ = msdc_fifo_read8(); -+ left--; -+ } -+ } -+ -+ if (msdc_pio_abort(host, data, tmo)) -+ goto end; -+ } -+ size += sg_dma_len(sg); -+ sg = sg_next(sg); num--; -+ } -+end: -+ data->bytes_xfered += size; -+ //printk(" PIO Read<%d>bytes\n", size); -+ -+ sdr_clr_bits(MSDC_INTEN, wints); -+ if(data->error) -+ printk("read pio data->error<%d> left<%d> size<%d>\n", data->error, left, size); -+ -+ return data->error; -+} -+ -+static int msdc_pio_write(struct msdc_host* host, struct mmc_data *data) -+{ -+ u32 base = host->base; -+ struct scatterlist *sg = data->sg; -+ u32 num = data->sg_len; -+ u32 *ptr; -+ u8 *u8ptr; -+ u32 left; -+ u32 count, size = 0; -+ u32 wints = MSDC_INTEN_DATTMO | MSDC_INTEN_DATCRCERR; -+ unsigned long tmo = jiffies + DAT_TIMEOUT; -+ -+ sdr_set_bits(MSDC_INTEN, wints); -+ while (num) { -+ left = sg_dma_len(sg); -+ ptr = sg_virt(sg); -+ -+ while (left) { -+ if (left >= MSDC_FIFO_SZ && msdc_txfifocnt() == 0) { -+ count = MSDC_FIFO_SZ >> 2; -+ do { -+ msdc_fifo_write32(*ptr); ptr++; -+ } while (--count); -+ left -= MSDC_FIFO_SZ; -+ } else if (left < MSDC_FIFO_SZ && msdc_txfifocnt() == 0) { -+ while (left > 3) { -+ msdc_fifo_write32(*ptr); ptr++; -+ left -= 4; -+ } -+ -+ u8ptr = (u8*)ptr; -+ while( left) { -+ msdc_fifo_write8(*u8ptr); -+ u8ptr++; -+ left--; -+ } -+ } -+ -+ if (msdc_pio_abort(host, data, tmo)) -+ goto end; -+ } -+ size += sg_dma_len(sg); -+ sg = sg_next(sg); num--; -+ } -+end: -+ data->bytes_xfered += size; -+ //printk(" PIO Write<%d>bytes\n", size); -+ if(data->error) -+ printk("write pio data->error<%d>\n", data->error); -+ -+ sdr_clr_bits(MSDC_INTEN, wints); -+ -+ return data->error; -+} -+ -+static void msdc_dma_start(struct msdc_host *host) -+{ -+ u32 base = host->base; -+ u32 wints = MSDC_INTEN_XFER_COMPL | MSDC_INTEN_DATTMO | MSDC_INTEN_DATCRCERR; -+ -+ sdr_set_bits(MSDC_INTEN, wints); -+ sdr_set_field(MSDC_DMA_CTRL, MSDC_DMA_CTRL_START, 1); -+ -+ //printk("DMA start\n"); -+} -+ -+static void msdc_dma_stop(struct msdc_host *host) -+{ -+ u32 base = host->base; -+ u32 wints = MSDC_INTEN_XFER_COMPL | MSDC_INTEN_DATTMO | MSDC_INTEN_DATCRCERR; -+ -+ //printk("DMA status: 0x%.8x\n",sdr_read32(MSDC_DMA_CFG)); -+ -+ sdr_set_field(MSDC_DMA_CTRL, MSDC_DMA_CTRL_STOP, 1); -+ while (sdr_read32(MSDC_DMA_CFG) & MSDC_DMA_CFG_STS); -+ sdr_clr_bits(MSDC_INTEN, wints); /* Not just xfer_comp */ -+ -+ //printk("DMA stop\n"); -+} -+ -+static u8 msdc_dma_calcs(u8 *buf, u32 len) -+{ -+ u32 i, sum = 0; -+ -+ for (i = 0; i < len; i++) -+ sum += buf[i]; -+ -+ return 0xFF - (u8)sum; -+} -+ -+static int msdc_dma_config(struct msdc_host *host, struct msdc_dma *dma) -+{ -+ u32 base = host->base; -+ u32 sglen = dma->sglen; -+ u32 j, num, bdlen; -+ u8 blkpad, dwpad, chksum; -+ struct scatterlist *sg = dma->sg; -+ gpd_t *gpd; -+ bd_t *bd; -+ -+ switch (dma->mode) { -+ case MSDC_MODE_DMA_BASIC: -+ BUG_ON(dma->xfersz > 65535); -+ BUG_ON(dma->sglen != 1); -+ sdr_write32(MSDC_DMA_SA, PHYSADDR(sg_dma_address(sg))); -+ sdr_set_field(MSDC_DMA_CTRL, MSDC_DMA_CTRL_LASTBUF, 1); -+ sdr_set_field(MSDC_DMA_CTRL, MSDC_DMA_CTRL_XFERSZ, sg_dma_len(sg)); -+ sdr_set_field(MSDC_DMA_CTRL, MSDC_DMA_CTRL_BRUSTSZ, dma->burstsz); -+ sdr_set_field(MSDC_DMA_CTRL, MSDC_DMA_CTRL_MODE, 0); -+ break; -+ -+ case MSDC_MODE_DMA_DESC: -+ blkpad = (dma->flags & DMA_FLAG_PAD_BLOCK) ? 1 : 0; -+ dwpad = (dma->flags & DMA_FLAG_PAD_DWORD) ? 1 : 0; -+ chksum = (dma->flags & DMA_FLAG_EN_CHKSUM) ? 1 : 0; -+ -+ num = (sglen + MAX_BD_PER_GPD - 1) / MAX_BD_PER_GPD; -+ BUG_ON(num !=1 ); -+ -+ gpd = dma->gpd; -+ bd = dma->bd; -+ bdlen = sglen; -+ -+ gpd->hwo = 1; /* hw will clear it */ -+ gpd->bdp = 1; -+ gpd->chksum = 0; /* need to clear first. */ -+ gpd->chksum = (chksum ? msdc_dma_calcs((u8 *)gpd, 16) : 0); -+ -+ for (j = 0; j < bdlen; j++) { -+ msdc_init_bd(&bd[j], blkpad, dwpad, sg_dma_address(sg), sg_dma_len(sg)); -+ if( j == bdlen - 1) -+ bd[j].eol = 1; -+ else -+ bd[j].eol = 0; -+ bd[j].chksum = 0; /* checksume need to clear first */ -+ bd[j].chksum = (chksum ? msdc_dma_calcs((u8 *)(&bd[j]), 16) : 0); -+ sg++; -+ } -+ -+ dma->used_gpd += 2; -+ dma->used_bd += bdlen; -+ -+ sdr_set_field(MSDC_DMA_CFG, MSDC_DMA_CFG_DECSEN, chksum); -+ sdr_set_field(MSDC_DMA_CTRL, MSDC_DMA_CTRL_BRUSTSZ, dma->burstsz); -+ sdr_set_field(MSDC_DMA_CTRL, MSDC_DMA_CTRL_MODE, 1); -+ sdr_write32(MSDC_DMA_SA, PHYSADDR((u32)dma->gpd_addr)); -+ break; -+ } -+ -+// printk("DMA_CTRL = 0x%x\n", sdr_read32(MSDC_DMA_CTRL)); -+// printk("DMA_CFG = 0x%x\n", sdr_read32(MSDC_DMA_CFG)); -+// printk("DMA_SA = 0x%x\n", sdr_read32(MSDC_DMA_SA)); -+ -+ return 0; -+} -+ -+static void msdc_dma_setup(struct msdc_host *host, struct msdc_dma *dma, -+ struct scatterlist *sg, unsigned int sglen) -+{ -+ BUG_ON(sglen > MAX_BD_NUM); -+ -+ dma->sg = sg; -+ dma->flags = DMA_FLAG_EN_CHKSUM; -+ dma->sglen = sglen; -+ dma->xfersz = host->xfer_size; -+ dma->burstsz = MSDC_BRUST_64B; -+ -+ if (sglen == 1 && sg_dma_len(sg) <= MAX_DMA_CNT) -+ dma->mode = MSDC_MODE_DMA_BASIC; -+ else -+ dma->mode = MSDC_MODE_DMA_DESC; -+ -+// printk("DMA mode<%d> sglen<%d> xfersz<%d>\n", dma->mode, dma->sglen, dma->xfersz); -+ -+ msdc_dma_config(host, dma); -+} -+ -+static void msdc_set_blknum(struct msdc_host *host, u32 blknum) -+{ -+ u32 base = host->base; -+ -+ sdr_write32(SDC_BLK_NUM, blknum); -+} -+ -+static int msdc_do_request(struct mmc_host*mmc, struct mmc_request*mrq) -+{ -+ struct msdc_host *host = mmc_priv(mmc); -+ struct mmc_command *cmd; -+ struct mmc_data *data; -+ u32 base = host->base; -+ unsigned int left=0; -+ int dma = 0, read = 1, dir = DMA_FROM_DEVICE, send_type=0; -+ -+#define SND_DAT 0 -+#define SND_CMD 1 -+ -+ BUG_ON(mmc == NULL); -+ BUG_ON(mrq == NULL); -+ -+ host->error = 0; -+ atomic_set(&host->abort, 0); -+ -+ cmd = mrq->cmd; -+ data = mrq->cmd->data; -+ -+ if (!data) { -+ send_type = SND_CMD; -+ if (msdc_do_command(host, cmd, 1, CMD_TIMEOUT) != 0) -+ goto done; -+ } else { -+ BUG_ON(data->blksz > HOST_MAX_BLKSZ); -+ send_type=SND_DAT; -+ -+ data->error = 0; -+ read = data->flags & MMC_DATA_READ ? 1 : 0; -+ host->data = data; -+ host->xfer_size = data->blocks * data->blksz; -+ host->blksz = data->blksz; -+ -+ host->dma_xfer = dma = ((host->xfer_size >= 512) ? 1 : 0); -+ -+ if (read) -+ if ((host->timeout_ns != data->timeout_ns) || -+ (host->timeout_clks != data->timeout_clks)) -+ msdc_set_timeout(host, data->timeout_ns, data->timeout_clks); -+ -+ msdc_set_blknum(host, data->blocks); -+ -+ if (dma) { -+ msdc_dma_on(); -+ init_completion(&host->xfer_done); -+ -+ if (msdc_command_start(host, cmd, 1, CMD_TIMEOUT) != 0) -+ goto done; -+ -+ dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE; -+ dma_map_sg(mmc_dev(mmc), data->sg, data->sg_len, dir); -+ msdc_dma_setup(host, &host->dma, data->sg, data->sg_len); -+ -+ if (msdc_command_resp(host, cmd, 1, CMD_TIMEOUT) != 0) -+ goto done; -+ -+ msdc_dma_start(host); -+ -+ spin_unlock(&host->lock); -+ if (!wait_for_completion_timeout(&host->xfer_done, DAT_TIMEOUT)) { -+ /*printk("XXX CMD<%d> wait xfer_done<%d> timeout!!\n", cmd->opcode, data->blocks * data->blksz); -+ printk(" DMA_SA = 0x%x\n", sdr_read32(MSDC_DMA_SA)); -+ printk(" DMA_CA = 0x%x\n", sdr_read32(MSDC_DMA_CA)); -+ printk(" DMA_CTRL = 0x%x\n", sdr_read32(MSDC_DMA_CTRL)); -+ printk(" DMA_CFG = 0x%x\n", sdr_read32(MSDC_DMA_CFG));*/ -+ data->error = (unsigned int)-ETIMEDOUT; -+ -+ msdc_reset(); -+ msdc_clr_fifo(); -+ msdc_clr_int(); -+ } -+ spin_lock(&host->lock); -+ msdc_dma_stop(host); -+ } else { -+ if (msdc_do_command(host, cmd, 1, CMD_TIMEOUT) != 0) -+ goto done; -+ -+ if (read) { -+ if (msdc_pio_read(host, data)) -+ goto done; -+ } else { -+ if (msdc_pio_write(host, data)) -+ goto done; -+ } -+ -+ if (!read) { -+ while (1) { -+ left = msdc_txfifocnt(); -+ if (left == 0) { -+ break; -+ } -+ if (msdc_pio_abort(host, data, jiffies + DAT_TIMEOUT)) { -+ break; -+ /* Fix me: what about if data error, when stop ? how to? */ -+ } -+ } -+ } else { -+ /* Fix me: read case: need to check CRC error */ -+ } -+ -+ /* For write case: SDCBUSY and Xfer_Comp will assert when DAT0 not busy. -+ For read case : SDCBUSY and Xfer_Comp will assert when last byte read out from FIFO. -+ */ -+ -+ /* try not to wait xfer_comp interrupt. -+ the next command will check SDC_BUSY. -+ SDC_BUSY means xfer_comp assert -+ */ -+ -+ } // PIO mode -+ -+ /* Last: stop transfer */ -+ if (data->stop){ -+ if (msdc_do_command(host, data->stop, 0, CMD_TIMEOUT) != 0) { -+ goto done; -+ } -+ } -+ } -+ -+done: -+ if (data != NULL) { -+ host->data = NULL; -+ host->dma_xfer = 0; -+ if (dma != 0) { -+ msdc_dma_off(); -+ host->dma.used_bd = 0; -+ host->dma.used_gpd = 0; -+ dma_unmap_sg(mmc_dev(mmc), data->sg, data->sg_len, dir); -+ } -+ host->blksz = 0; -+ -+ // printk("CMD<%d> data<%s %s> blksz<%d> block<%d> error<%d>",cmd->opcode, (dma? "dma":"pio\n"), -+ // (read ? "read ":"write") ,data->blksz, data->blocks, data->error); -+ } -+ -+ if (mrq->cmd->error) host->error = 0x001; -+ if (mrq->data && mrq->data->error) host->error |= 0x010; -+ if (mrq->stop && mrq->stop->error) host->error |= 0x100; -+ -+ //if (host->error) printk("host->error<%d>\n", host->error); -+ -+ return host->error; -+} -+ -+static int msdc_app_cmd(struct mmc_host *mmc, struct msdc_host *host) -+{ -+ struct mmc_command cmd; -+ struct mmc_request mrq; -+ u32 err; -+ -+ memset(&cmd, 0, sizeof(struct mmc_command)); -+ cmd.opcode = MMC_APP_CMD; -+#if 0 /* bug: we meet mmc->card is null when ACMD6 */ -+ cmd.arg = mmc->card->rca << 16; -+#else -+ cmd.arg = host->app_cmd_arg; -+#endif -+ cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; -+ -+ memset(&mrq, 0, sizeof(struct mmc_request)); -+ mrq.cmd = &cmd; cmd.mrq = &mrq; -+ cmd.data = NULL; -+ -+ err = msdc_do_command(host, &cmd, 0, CMD_TIMEOUT); -+ return err; -+} -+ -+static int msdc_tune_cmdrsp(struct msdc_host*host, struct mmc_command *cmd) -+{ -+ int result = -1; -+ u32 base = host->base; -+ u32 rsmpl, cur_rsmpl, orig_rsmpl; -+ u32 rrdly, cur_rrdly = 0, orig_rrdly; -+ u32 skip = 1; -+ -+ /* ==== don't support 3.0 now ==== -+ 1: R_SMPL[1] -+ 2: PAD_CMD_RESP_RXDLY[26:22] -+ ==========================*/ -+ -+ // save the previous tune result -+ sdr_get_field(MSDC_IOCON, MSDC_IOCON_RSPL, orig_rsmpl); -+ sdr_get_field(MSDC_PAD_TUNE, MSDC_PAD_TUNE_CMDRRDLY, orig_rrdly); -+ -+ rrdly = 0; -+ do { -+ for (rsmpl = 0; rsmpl < 2; rsmpl++) { -+ /* Lv1: R_SMPL[1] */ -+ cur_rsmpl = (orig_rsmpl + rsmpl) % 2; -+ if (skip == 1) { -+ skip = 0; -+ continue; -+ } -+ sdr_set_field(MSDC_IOCON, MSDC_IOCON_RSPL, cur_rsmpl); -+ -+ if (host->app_cmd) { -+ result = msdc_app_cmd(host->mmc, host); -+ if (result) { -+ //printk("TUNE_CMD app_cmd<%d> failed: RESP_RXDLY<%d>,R_SMPL<%d>\n", -+ // host->mrq->cmd->opcode, cur_rrdly, cur_rsmpl); -+ continue; -+ } -+ } -+ result = msdc_do_command(host, cmd, 0, CMD_TIMEOUT); // not tune. -+ //printk("TUNE_CMD<%d> %s PAD_CMD_RESP_RXDLY[26:22]<%d> R_SMPL[1]<%d>\n", cmd->opcode, -+// (result == 0) ? "PASS" : "FAIL", cur_rrdly, cur_rsmpl); -+ -+ if (result == 0) { -+ return 0; -+ } -+ if (result != (unsigned int)(-EIO)) { -+ // printk("TUNE_CMD<%d> Error<%d> not -EIO\n", cmd->opcode, result); -+ return result; -+ } -+ -+ /* should be EIO */ -+ if (sdr_read32(SDC_CMD) & 0x1800) { /* check if has data phase */ -+ msdc_abort_data(host); -+ } -+ } -+ -+ /* Lv2: PAD_CMD_RESP_RXDLY[26:22] */ -+ cur_rrdly = (orig_rrdly + rrdly + 1) % 32; -+ sdr_set_field(MSDC_PAD_TUNE, MSDC_PAD_TUNE_CMDRRDLY, cur_rrdly); -+ }while (++rrdly < 32); -+ -+ return result; -+} -+ -+/* Support SD2.0 Only */ -+static int msdc_tune_bread(struct mmc_host *mmc, struct mmc_request *mrq) -+{ -+ struct msdc_host *host = mmc_priv(mmc); -+ u32 base = host->base; -+ u32 ddr=0; -+ u32 dcrc = 0; -+ u32 rxdly, cur_rxdly0, cur_rxdly1; -+ u32 dsmpl, cur_dsmpl, orig_dsmpl; -+ u32 cur_dat0, cur_dat1, cur_dat2, cur_dat3; -+ u32 cur_dat4, cur_dat5, cur_dat6, cur_dat7; -+ u32 orig_dat0, orig_dat1, orig_dat2, orig_dat3; -+ u32 orig_dat4, orig_dat5, orig_dat6, orig_dat7; -+ int result = -1; -+ u32 skip = 1; -+ -+ sdr_get_field(MSDC_IOCON, MSDC_IOCON_DSPL, orig_dsmpl); -+ -+ /* Tune Method 2. */ -+ sdr_set_field(MSDC_IOCON, MSDC_IOCON_DDLSEL, 1); -+ -+ rxdly = 0; -+ do { -+ for (dsmpl = 0; dsmpl < 2; dsmpl++) { -+ cur_dsmpl = (orig_dsmpl + dsmpl) % 2; -+ if (skip == 1) { -+ skip = 0; -+ continue; -+ } -+ sdr_set_field(MSDC_IOCON, MSDC_IOCON_DSPL, cur_dsmpl); -+ -+ if (host->app_cmd) { -+ result = msdc_app_cmd(host->mmc, host); -+ if (result) { -+ //printk("TUNE_BREAD app_cmd<%d> failed\n", host->mrq->cmd->opcode); -+ continue; -+ } -+ } -+ result = msdc_do_request(mmc,mrq); -+ -+ sdr_get_field(SDC_DCRC_STS, SDC_DCRC_STS_POS|SDC_DCRC_STS_NEG, dcrc); /* RO */ -+ if (!ddr) dcrc &= ~SDC_DCRC_STS_NEG; -+ //printk("TUNE_BREAD<%s> dcrc<0x%x> DATRDDLY0/1<0x%x><0x%x> dsmpl<0x%x>\n", -+ // (result == 0 && dcrc == 0) ? "PASS" : "FAIL", dcrc, -+ // sdr_read32(MSDC_DAT_RDDLY0), sdr_read32(MSDC_DAT_RDDLY1), cur_dsmpl); -+ -+ /* Fix me: result is 0, but dcrc is still exist */ -+ if (result == 0 && dcrc == 0) { -+ goto done; -+ } else { -+ /* there is a case: command timeout, and data phase not processed */ -+ if (mrq->data->error != 0 && mrq->data->error != (unsigned int)(-EIO)) { -+ //printk("TUNE_READ: result<0x%x> cmd_error<%d> data_error<%d>\n", -+ // result, mrq->cmd->error, mrq->data->error); -+ goto done; -+ } -+ } -+ } -+ -+ cur_rxdly0 = sdr_read32(MSDC_DAT_RDDLY0); -+ cur_rxdly1 = sdr_read32(MSDC_DAT_RDDLY1); -+ -+ /* E1 ECO. YD: Reverse */ -+ if (sdr_read32(MSDC_ECO_VER) >= 4) { -+ orig_dat0 = (cur_rxdly0 >> 24) & 0x1F; -+ orig_dat1 = (cur_rxdly0 >> 16) & 0x1F; -+ orig_dat2 = (cur_rxdly0 >> 8) & 0x1F; -+ orig_dat3 = (cur_rxdly0 >> 0) & 0x1F; -+ orig_dat4 = (cur_rxdly1 >> 24) & 0x1F; -+ orig_dat5 = (cur_rxdly1 >> 16) & 0x1F; -+ orig_dat6 = (cur_rxdly1 >> 8) & 0x1F; -+ orig_dat7 = (cur_rxdly1 >> 0) & 0x1F; -+ } else { -+ orig_dat0 = (cur_rxdly0 >> 0) & 0x1F; -+ orig_dat1 = (cur_rxdly0 >> 8) & 0x1F; -+ orig_dat2 = (cur_rxdly0 >> 16) & 0x1F; -+ orig_dat3 = (cur_rxdly0 >> 24) & 0x1F; -+ orig_dat4 = (cur_rxdly1 >> 0) & 0x1F; -+ orig_dat5 = (cur_rxdly1 >> 8) & 0x1F; -+ orig_dat6 = (cur_rxdly1 >> 16) & 0x1F; -+ orig_dat7 = (cur_rxdly1 >> 24) & 0x1F; -+ } -+ -+ if (ddr) { -+ cur_dat0 = (dcrc & (1 << 0) || dcrc & (1 << 8)) ? ((orig_dat0 + 1) % 32) : orig_dat0; -+ cur_dat1 = (dcrc & (1 << 1) || dcrc & (1 << 9)) ? ((orig_dat1 + 1) % 32) : orig_dat1; -+ cur_dat2 = (dcrc & (1 << 2) || dcrc & (1 << 10)) ? ((orig_dat2 + 1) % 32) : orig_dat2; -+ cur_dat3 = (dcrc & (1 << 3) || dcrc & (1 << 11)) ? ((orig_dat3 + 1) % 32) : orig_dat3; -+ } else { -+ cur_dat0 = (dcrc & (1 << 0)) ? ((orig_dat0 + 1) % 32) : orig_dat0; -+ cur_dat1 = (dcrc & (1 << 1)) ? ((orig_dat1 + 1) % 32) : orig_dat1; -+ cur_dat2 = (dcrc & (1 << 2)) ? ((orig_dat2 + 1) % 32) : orig_dat2; -+ cur_dat3 = (dcrc & (1 << 3)) ? ((orig_dat3 + 1) % 32) : orig_dat3; -+ } -+ cur_dat4 = (dcrc & (1 << 4)) ? ((orig_dat4 + 1) % 32) : orig_dat4; -+ cur_dat5 = (dcrc & (1 << 5)) ? ((orig_dat5 + 1) % 32) : orig_dat5; -+ cur_dat6 = (dcrc & (1 << 6)) ? ((orig_dat6 + 1) % 32) : orig_dat6; -+ cur_dat7 = (dcrc & (1 << 7)) ? ((orig_dat7 + 1) % 32) : orig_dat7; -+ -+ cur_rxdly0 = (cur_dat0 << 24) | (cur_dat1 << 16) | (cur_dat2 << 8) | (cur_dat3 << 0); -+ cur_rxdly1 = (cur_dat4 << 24) | (cur_dat5 << 16) | (cur_dat6 << 8) | (cur_dat7 << 0); -+ -+ sdr_write32(MSDC_DAT_RDDLY0, cur_rxdly0); -+ sdr_write32(MSDC_DAT_RDDLY1, cur_rxdly1); -+ -+ } while (++rxdly < 32); -+ -+done: -+ return result; -+} -+ -+static int msdc_tune_bwrite(struct mmc_host *mmc,struct mmc_request *mrq) -+{ -+ struct msdc_host *host = mmc_priv(mmc); -+ u32 base = host->base; -+ -+ u32 wrrdly, cur_wrrdly = 0, orig_wrrdly; -+ u32 dsmpl, cur_dsmpl, orig_dsmpl; -+ u32 rxdly, cur_rxdly0; -+ u32 orig_dat0, orig_dat1, orig_dat2, orig_dat3; -+ u32 cur_dat0, cur_dat1, cur_dat2, cur_dat3; -+ int result = -1; -+ u32 skip = 1; -+ -+ // MSDC_IOCON_DDR50CKD need to check. [Fix me] -+ -+ sdr_get_field(MSDC_PAD_TUNE, MSDC_PAD_TUNE_DATWRDLY, orig_wrrdly); -+ sdr_get_field(MSDC_IOCON, MSDC_IOCON_DSPL, orig_dsmpl ); -+ -+ /* Tune Method 2. just DAT0 */ -+ sdr_set_field(MSDC_IOCON, MSDC_IOCON_DDLSEL, 1); -+ cur_rxdly0 = sdr_read32(MSDC_DAT_RDDLY0); -+ -+ /* E1 ECO. YD: Reverse */ -+ if (sdr_read32(MSDC_ECO_VER) >= 4) { -+ orig_dat0 = (cur_rxdly0 >> 24) & 0x1F; -+ orig_dat1 = (cur_rxdly0 >> 16) & 0x1F; -+ orig_dat2 = (cur_rxdly0 >> 8) & 0x1F; -+ orig_dat3 = (cur_rxdly0 >> 0) & 0x1F; -+ } else { -+ orig_dat0 = (cur_rxdly0 >> 0) & 0x1F; -+ orig_dat1 = (cur_rxdly0 >> 8) & 0x1F; -+ orig_dat2 = (cur_rxdly0 >> 16) & 0x1F; -+ orig_dat3 = (cur_rxdly0 >> 24) & 0x1F; -+ } -+ -+ rxdly = 0; -+ do { -+ wrrdly = 0; -+ do { -+ for (dsmpl = 0; dsmpl < 2; dsmpl++) { -+ cur_dsmpl = (orig_dsmpl + dsmpl) % 2; -+ if (skip == 1) { -+ skip = 0; -+ continue; -+ } -+ sdr_set_field(MSDC_IOCON, MSDC_IOCON_DSPL, cur_dsmpl); -+ -+ if (host->app_cmd) { -+ result = msdc_app_cmd(host->mmc, host); -+ if (result) { -+ //printk("TUNE_BWRITE app_cmd<%d> failed\n", host->mrq->cmd->opcode); -+ continue; -+ } -+ } -+ result = msdc_do_request(mmc,mrq); -+ -+ //printk("TUNE_BWRITE<%s> DSPL<%d> DATWRDLY<%d> MSDC_DAT_RDDLY0<0x%x>\n", -+ // result == 0 ? "PASS" : "FAIL", -+ // cur_dsmpl, cur_wrrdly, cur_rxdly0); -+ -+ if (result == 0) { -+ goto done; -+ } -+ else { -+ /* there is a case: command timeout, and data phase not processed */ -+ if (mrq->data->error != (unsigned int)(-EIO)) { -+ //printk("TUNE_READ: result<0x%x> cmd_error<%d> data_error<%d>\n", -+ // && result, mrq->cmd->error, mrq->data->error); -+ goto done; -+ } -+ } -+ } -+ cur_wrrdly = (orig_wrrdly + wrrdly + 1) % 32; -+ sdr_set_field(MSDC_PAD_TUNE, MSDC_PAD_TUNE_DATWRDLY, cur_wrrdly); -+ } while (++wrrdly < 32); -+ -+ cur_dat0 = (orig_dat0 + rxdly) % 32; /* only adjust bit-1 for crc */ -+ cur_dat1 = orig_dat1; -+ cur_dat2 = orig_dat2; -+ cur_dat3 = orig_dat3; -+ -+ cur_rxdly0 = (cur_dat0 << 24) | (cur_dat1 << 16) | (cur_dat2 << 8) | (cur_dat3 << 0); -+ sdr_write32(MSDC_DAT_RDDLY0, cur_rxdly0); -+ } while (++rxdly < 32); -+ -+done: -+ return result; -+} -+ -+static int msdc_get_card_status(struct mmc_host *mmc, struct msdc_host *host, u32 *status) -+{ -+ struct mmc_command cmd; -+ struct mmc_request mrq; -+ u32 err; -+ -+ memset(&cmd, 0, sizeof(struct mmc_command)); -+ cmd.opcode = MMC_SEND_STATUS; -+ if (mmc->card) { -+ cmd.arg = mmc->card->rca << 16; -+ } else { -+ //printk("cmd13 mmc card is null\n"); -+ cmd.arg = host->app_cmd_arg; -+ } -+ cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC; -+ -+ memset(&mrq, 0, sizeof(struct mmc_request)); -+ mrq.cmd = &cmd; cmd.mrq = &mrq; -+ cmd.data = NULL; -+ -+ err = msdc_do_command(host, &cmd, 1, CMD_TIMEOUT); -+ -+ if (status) -+ *status = cmd.resp[0]; -+ -+ return err; -+} -+ -+static int msdc_check_busy(struct mmc_host *mmc, struct msdc_host *host) -+{ -+ u32 err = 0; -+ u32 status = 0; -+ -+ do { -+ err = msdc_get_card_status(mmc, host, &status); -+ if (err) -+ return err; -+ /* need cmd12? */ -+ //printk("cmd<13> resp<0x%x>\n", status); -+ } while (R1_CURRENT_STATE(status) == 7); -+ -+ return err; -+} -+ -+static int msdc_tune_request(struct mmc_host *mmc, struct mmc_request *mrq) -+{ -+ struct msdc_host *host = mmc_priv(mmc); -+ struct mmc_command *cmd; -+ struct mmc_data *data; -+ int ret=0, read; -+ -+ cmd = mrq->cmd; -+ data = mrq->cmd->data; -+ -+ read = data->flags & MMC_DATA_READ ? 1 : 0; -+ -+ if (read) { -+ if (data->error == (unsigned int)(-EIO)) -+ ret = msdc_tune_bread(mmc,mrq); -+ } else { -+ ret = msdc_check_busy(mmc, host); -+ if (ret){ -+ //printk("XXX cmd13 wait program done failed\n"); -+ return ret; -+ } -+ /* CRC and TO */ -+ /* Fix me: don't care card status? */ -+ ret = msdc_tune_bwrite(mmc,mrq); -+ } -+ -+ return ret; -+} -+ -+static void msdc_ops_request(struct mmc_host *mmc,struct mmc_request *mrq) -+{ -+ struct msdc_host *host = mmc_priv(mmc); -+ -+ if (host->mrq) { -+ //printk("XXX host->mrq<0x%.8x>\n", (int)host->mrq); -+ BUG(); -+ } -+ if (!is_card_present(host) || host->power_mode == MMC_POWER_OFF) { -+ //printk("cmd<%d> card<%d> power<%d>\n", mrq->cmd->opcode, is_card_present(host), host->power_mode); -+ mrq->cmd->error = (unsigned int)-ENOMEDIUM; -+ mrq->done(mrq); -+ return; -+ } -+ spin_lock(&host->lock); -+ -+ host->mrq = mrq; -+ -+ if (msdc_do_request(mmc,mrq)) -+ if(host->hw->flags & MSDC_REMOVABLE && mrq->data && mrq->data->error) -+ msdc_tune_request(mmc,mrq); -+ -+ if (mrq->cmd->opcode == MMC_APP_CMD) { -+ host->app_cmd = 1; -+ host->app_cmd_arg = mrq->cmd->arg; /* save the RCA */ -+ } else { -+ host->app_cmd = 0; -+ } -+ -+ host->mrq = NULL; -+ -+ spin_unlock(&host->lock); -+ -+ mmc_request_done(mmc, mrq); -+} -+ -+/* called by ops.set_ios */ -+static void msdc_set_buswidth(struct msdc_host *host, u32 width) -+{ -+ u32 base = host->base; -+ u32 val = sdr_read32(SDC_CFG); -+ -+ val &= ~SDC_CFG_BUSWIDTH; -+ -+ switch (width) { -+ default: -+ case MMC_BUS_WIDTH_1: -+ width = 1; -+ val |= (MSDC_BUS_1BITS << 16); -+ break; -+ case MMC_BUS_WIDTH_4: -+ val |= (MSDC_BUS_4BITS << 16); -+ break; -+ case MMC_BUS_WIDTH_8: -+ val |= (MSDC_BUS_8BITS << 16); -+ break; -+ } -+ -+ sdr_write32(SDC_CFG, val); -+ -+ //printk("Bus Width = %d\n", width); -+} -+ -+/* ops.set_ios */ -+static void msdc_ops_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) -+{ -+ struct msdc_host *host = mmc_priv(mmc); -+ struct msdc_hw *hw=host->hw; -+ u32 base = host->base; -+ u32 ddr = 0; -+ -+#ifdef MT6575_SD_DEBUG -+ static char *vdd[] = { -+ "1.50v", "1.55v", "1.60v", "1.65v", "1.70v", "1.80v", "1.90v", -+ "2.00v", "2.10v", "2.20v", "2.30v", "2.40v", "2.50v", "2.60v", -+ "2.70v", "2.80v", "2.90v", "3.00v", "3.10v", "3.20v", "3.30v", -+ "3.40v", "3.50v", "3.60v" -+ }; -+ static char *power_mode[] = { -+ "OFF", "UP", "ON" -+ }; -+ static char *bus_mode[] = { -+ "UNKNOWN", "OPENDRAIN", "PUSHPULL" -+ }; -+ static char *timing[] = { -+ "LEGACY", "MMC_HS", "SD_HS" -+ }; -+ -+ /*printk("SET_IOS: CLK(%dkHz), BUS(%s), BW(%u), PWR(%s), VDD(%s), TIMING(%s)\n", -+ ios->clock / 1000, bus_mode[ios->bus_mode], -+ (ios->bus_width == MMC_BUS_WIDTH_4) ? 4 : 1, -+ power_mode[ios->power_mode], vdd[ios->vdd], timing[ios->timing]);*/ -+#endif -+ -+ msdc_set_buswidth(host, ios->bus_width); -+ -+ /* Power control ??? */ -+ switch (ios->power_mode) { -+ case MMC_POWER_OFF: -+ case MMC_POWER_UP: -+ // msdc_set_power_mode(host, ios->power_mode); /* --- by chhung */ -+ break; -+ case MMC_POWER_ON: -+ host->power_mode = MMC_POWER_ON; -+ break; -+ default: -+ break; -+ } -+ -+ /* Clock control */ -+ if (host->mclk != ios->clock) { -+ if(ios->clock > 25000000) { -+ //printk("SD data latch edge<%d>\n", hw->data_edge); -+ sdr_set_field(MSDC_IOCON, MSDC_IOCON_RSPL, hw->cmd_edge); -+ sdr_set_field(MSDC_IOCON, MSDC_IOCON_DSPL, hw->data_edge); -+ } else { -+ sdr_write32(MSDC_IOCON, 0x00000000); -+ sdr_write32(MSDC_DAT_RDDLY0, 0x10101010); // for MT7620 E2 and afterward -+ sdr_write32(MSDC_DAT_RDDLY1, 0x00000000); -+ sdr_write32(MSDC_PAD_TUNE, 0x84101010); // for MT7620 E2 and afterward -+ } -+ msdc_set_mclk(host, ddr, ios->clock); -+ } -+} -+ -+/* ops.get_ro */ -+static int msdc_ops_get_ro(struct mmc_host *mmc) -+{ -+ struct msdc_host *host = mmc_priv(mmc); -+ u32 base = host->base; -+ unsigned long flags; -+ int ro = 0; -+ -+ if (host->hw->flags & MSDC_WP_PIN_EN) { /* set for card */ -+ spin_lock_irqsave(&host->lock, flags); -+ ro = (sdr_read32(MSDC_PS) >> 31); -+ spin_unlock_irqrestore(&host->lock, flags); -+ } -+ return ro; -+} -+ -+/* ops.get_cd */ -+static int msdc_ops_get_cd(struct mmc_host *mmc) -+{ -+ struct msdc_host *host = mmc_priv(mmc); -+ u32 base = host->base; -+ unsigned long flags; -+ int present = 1; -+ -+ /* for sdio, MSDC_REMOVABLE not set, always return 1 */ -+ if (!(host->hw->flags & MSDC_REMOVABLE)) { -+ /* For sdio, read H/W always get<1>, but may timeout some times */ -+#if 1 -+ host->card_inserted = 1; -+ return 1; -+#else -+ host->card_inserted = (host->pm_state.event == PM_EVENT_USER_RESUME) ? 1 : 0; -+ printk("sdio ops_get_cd<%d>\n", host->card_inserted); -+ return host->card_inserted; -+#endif -+ } -+ -+ /* MSDC_CD_PIN_EN set for card */ -+ if (host->hw->flags & MSDC_CD_PIN_EN) { -+ spin_lock_irqsave(&host->lock, flags); -+#if 0 -+ present = host->card_inserted; /* why not read from H/W: Fix me*/ -+#else -+ present = (sdr_read32(MSDC_PS) & MSDC_PS_CDSTS) ? 0 : 1; -+ host->card_inserted = present; -+#endif -+ spin_unlock_irqrestore(&host->lock, flags); -+ } else { -+ present = 0; /* TODO? Check DAT3 pins for card detection */ -+ } -+ -+ //printk("ops_get_cd return<%d>\n", present); -+ return present; -+} -+ -+/* ops.enable_sdio_irq */ -+static void msdc_ops_enable_sdio_irq(struct mmc_host *mmc, int enable) -+{ -+ struct msdc_host *host = mmc_priv(mmc); -+ struct msdc_hw *hw = host->hw; -+ u32 base = host->base; -+ u32 tmp; -+ -+ if (hw->flags & MSDC_EXT_SDIO_IRQ) { /* yes for sdio */ -+ if (enable) { -+ hw->enable_sdio_eirq(); /* combo_sdio_enable_eirq */ -+ } else { -+ hw->disable_sdio_eirq(); /* combo_sdio_disable_eirq */ -+ } -+ } else { -+ //printk("XXX \n"); /* so never enter here */ -+ tmp = sdr_read32(SDC_CFG); -+ /* FIXME. Need to interrupt gap detection */ -+ if (enable) { -+ tmp |= (SDC_CFG_SDIOIDE | SDC_CFG_SDIOINTWKUP); -+ } else { -+ tmp &= ~(SDC_CFG_SDIOIDE | SDC_CFG_SDIOINTWKUP); -+ } -+ sdr_write32(SDC_CFG, tmp); -+ } -+} -+ -+static struct mmc_host_ops mt_msdc_ops = { -+ .request = msdc_ops_request, -+ .set_ios = msdc_ops_set_ios, -+ .get_ro = msdc_ops_get_ro, -+ .get_cd = msdc_ops_get_cd, -+ .enable_sdio_irq = msdc_ops_enable_sdio_irq, -+}; -+ -+/*--------------------------------------------------------------------------*/ -+/* interrupt handler */ -+/*--------------------------------------------------------------------------*/ -+static irqreturn_t msdc_irq(int irq, void *dev_id) -+{ -+ struct msdc_host *host = (struct msdc_host *)dev_id; -+ struct mmc_data *data = host->data; -+ struct mmc_command *cmd = host->cmd; -+ u32 base = host->base; -+ -+ u32 cmdsts = MSDC_INT_RSPCRCERR | MSDC_INT_CMDTMO | MSDC_INT_CMDRDY | -+ MSDC_INT_ACMDCRCERR | MSDC_INT_ACMDTMO | MSDC_INT_ACMDRDY | -+ MSDC_INT_ACMD19_DONE; -+ u32 datsts = MSDC_INT_DATCRCERR |MSDC_INT_DATTMO; -+ -+ u32 intsts = sdr_read32(MSDC_INT); -+ u32 inten = sdr_read32(MSDC_INTEN); inten &= intsts; -+ -+ sdr_write32(MSDC_INT, intsts); /* clear interrupts */ -+ /* MSG will cause fatal error */ -+ -+ /* card change interrupt */ -+ if (intsts & MSDC_INT_CDSC){ -+ //printk("MSDC_INT_CDSC irq<0x%.8x>\n", intsts); -+ tasklet_hi_schedule(&host->card_tasklet); -+ /* tuning when plug card ? */ -+ } -+ -+ /* sdio interrupt */ -+ if (intsts & MSDC_INT_SDIOIRQ){ -+ //printk("XXX MSDC_INT_SDIOIRQ\n"); /* seems not sdio irq */ -+ //mmc_signal_sdio_irq(host->mmc); -+ } -+ -+ /* transfer complete interrupt */ -+ if (data != NULL) { -+ if (inten & MSDC_INT_XFER_COMPL) { -+ data->bytes_xfered = host->dma.xfersz; -+ complete(&host->xfer_done); -+ } -+ -+ if (intsts & datsts) { -+ /* do basic reset, or stop command will sdc_busy */ -+ msdc_reset(); -+ msdc_clr_fifo(); -+ msdc_clr_int(); -+ atomic_set(&host->abort, 1); /* For PIO mode exit */ -+ -+ if (intsts & MSDC_INT_DATTMO){ -+ //printk("XXX CMD<%d> MSDC_INT_DATTMO\n", host->mrq->cmd->opcode); -+ data->error = (unsigned int)-ETIMEDOUT; -+ } -+ else if (intsts & MSDC_INT_DATCRCERR){ -+ //printk("XXX CMD<%d> MSDC_INT_DATCRCERR, SDC_DCRC_STS<0x%x>\n", host->mrq->cmd->opcode, sdr_read32(SDC_DCRC_STS)); -+ data->error = (unsigned int)-EIO; -+ } -+ -+ //if(sdr_read32(MSDC_INTEN) & MSDC_INT_XFER_COMPL) { -+ if (host->dma_xfer) { -+ complete(&host->xfer_done); /* Read CRC come fast, XFER_COMPL not enabled */ -+ } /* PIO mode can't do complete, because not init */ -+ } -+ } -+ -+ /* command interrupts */ -+ if ((cmd != NULL) && (intsts & cmdsts)) { -+ if ((intsts & MSDC_INT_CMDRDY) || (intsts & MSDC_INT_ACMDRDY) || -+ (intsts & MSDC_INT_ACMD19_DONE)) { -+ u32 *rsp = &cmd->resp[0]; -+ -+ switch (host->cmd_rsp) { -+ case RESP_NONE: -+ break; -+ case RESP_R2: -+ *rsp++ = sdr_read32(SDC_RESP3); *rsp++ = sdr_read32(SDC_RESP2); -+ *rsp++ = sdr_read32(SDC_RESP1); *rsp++ = sdr_read32(SDC_RESP0); -+ break; -+ default: /* Response types 1, 3, 4, 5, 6, 7(1b) */ -+ if ((intsts & MSDC_INT_ACMDRDY) || (intsts & MSDC_INT_ACMD19_DONE)) { -+ *rsp = sdr_read32(SDC_ACMD_RESP); -+ } else { -+ *rsp = sdr_read32(SDC_RESP0); -+ } -+ break; -+ } -+ } else if ((intsts & MSDC_INT_RSPCRCERR) || (intsts & MSDC_INT_ACMDCRCERR)) { -+ if(intsts & MSDC_INT_ACMDCRCERR){ -+ //printk("XXX CMD<%d> MSDC_INT_ACMDCRCERR\n",cmd->opcode); -+ } -+ else { -+ //printk("XXX CMD<%d> MSDC_INT_RSPCRCERR\n",cmd->opcode); -+ } -+ cmd->error = (unsigned int)-EIO; -+ } else if ((intsts & MSDC_INT_CMDTMO) || (intsts & MSDC_INT_ACMDTMO)) { -+ if(intsts & MSDC_INT_ACMDTMO){ -+ //printk("XXX CMD<%d> MSDC_INT_ACMDTMO\n",cmd->opcode); -+ } -+ else { -+ //printk("XXX CMD<%d> MSDC_INT_CMDTMO\n",cmd->opcode); -+ } -+ cmd->error = (unsigned int)-ETIMEDOUT; -+ msdc_reset(); -+ msdc_clr_fifo(); -+ msdc_clr_int(); -+ } -+ complete(&host->cmd_done); -+ } -+ -+ /* mmc irq interrupts */ -+ if (intsts & MSDC_INT_MMCIRQ) { -+ //printk(KERN_INFO "msdc[%d] MMCIRQ: SDC_CSTS=0x%.8x\r\n", host->id, sdr_read32(SDC_CSTS)); -+ } -+ -+#ifdef MT6575_SD_DEBUG -+ { -+ msdc_int_reg *int_reg = (msdc_int_reg*)&intsts; -+ /*printk("IRQ_EVT(0x%x): MMCIRQ(%d) CDSC(%d), ACRDY(%d), ACTMO(%d), ACCRE(%d) AC19DN(%d)\n", -+ intsts, -+ int_reg->mmcirq, -+ int_reg->cdsc, -+ int_reg->atocmdrdy, -+ int_reg->atocmdtmo, -+ int_reg->atocmdcrc, -+ int_reg->atocmd19done); -+ printk("IRQ_EVT(0x%x): SDIO(%d) CMDRDY(%d), CMDTMO(%d), RSPCRC(%d), CSTA(%d)\n", -+ intsts, -+ int_reg->sdioirq, -+ int_reg->cmdrdy, -+ int_reg->cmdtmo, -+ int_reg->rspcrc, -+ int_reg->csta); -+ printk("IRQ_EVT(0x%x): XFCMP(%d) DXDONE(%d), DATTMO(%d), DATCRC(%d), DMAEMP(%d)\n", -+ intsts, -+ int_reg->xfercomp, -+ int_reg->dxferdone, -+ int_reg->dattmo, -+ int_reg->datcrc, -+ int_reg->dmaqempty);*/ -+ -+ } -+#endif -+ -+ return IRQ_HANDLED; -+} -+ -+/*--------------------------------------------------------------------------*/ -+/* platform_driver members */ -+/*--------------------------------------------------------------------------*/ -+/* called by msdc_drv_probe/remove */ -+static void msdc_enable_cd_irq(struct msdc_host *host, int enable) -+{ -+ struct msdc_hw *hw = host->hw; -+ u32 base = host->base; -+ -+ /* for sdio, not set */ -+ if ((hw->flags & MSDC_CD_PIN_EN) == 0) { -+ /* Pull down card detection pin since it is not avaiable */ -+ /* -+ if (hw->config_gpio_pin) -+ hw->config_gpio_pin(MSDC_CD_PIN, GPIO_PULL_DOWN); -+ */ -+ sdr_clr_bits(MSDC_PS, MSDC_PS_CDEN); -+ sdr_clr_bits(MSDC_INTEN, MSDC_INTEN_CDSC); -+ sdr_clr_bits(SDC_CFG, SDC_CFG_INSWKUP); -+ return; -+ } -+ -+ //printk("CD IRQ Eanable(%d)\n", enable); -+ -+ if (enable) { -+ if (hw->enable_cd_eirq) { /* not set, never enter */ -+ hw->enable_cd_eirq(); -+ } else { -+ /* card detection circuit relies on the core power so that the core power -+ * shouldn't be turned off. Here adds a reference count to keep -+ * the core power alive. -+ */ -+ //msdc_vcore_on(host); //did in msdc_init_hw() -+ -+ if (hw->config_gpio_pin) /* NULL */ -+ hw->config_gpio_pin(MSDC_CD_PIN, GPIO_PULL_UP); -+ -+ sdr_set_field(MSDC_PS, MSDC_PS_CDDEBOUNCE, DEFAULT_DEBOUNCE); -+ sdr_set_bits(MSDC_PS, MSDC_PS_CDEN); -+ sdr_set_bits(MSDC_INTEN, MSDC_INTEN_CDSC); -+ sdr_set_bits(SDC_CFG, SDC_CFG_INSWKUP); /* not in document! Fix me */ -+ } -+ } else { -+ if (hw->disable_cd_eirq) { -+ hw->disable_cd_eirq(); -+ } else { -+ if (hw->config_gpio_pin) /* NULL */ -+ hw->config_gpio_pin(MSDC_CD_PIN, GPIO_PULL_DOWN); -+ -+ sdr_clr_bits(SDC_CFG, SDC_CFG_INSWKUP); -+ sdr_clr_bits(MSDC_PS, MSDC_PS_CDEN); -+ sdr_clr_bits(MSDC_INTEN, MSDC_INTEN_CDSC); -+ -+ /* Here decreases a reference count to core power since card -+ * detection circuit is shutdown. -+ */ -+ //msdc_vcore_off(host); -+ } -+ } -+} -+ -+/* called by msdc_drv_probe */ -+static void msdc_init_hw(struct msdc_host *host) -+{ -+ u32 base = host->base; -+ struct msdc_hw *hw = host->hw; -+ -+#ifdef MT6575_SD_DEBUG -+ msdc_reg[host->id] = (struct msdc_regs *)host->base; -+#endif -+ -+ /* Power on */ -+#if 0 /* --- chhung */ -+ msdc_vcore_on(host); -+ msdc_pin_reset(host, MSDC_PIN_PULL_UP); -+ msdc_select_clksrc(host, hw->clk_src); -+ enable_clock(PERI_MSDC0_PDN + host->id, "SD"); -+ msdc_vdd_on(host); -+#endif /* end of --- */ -+ /* Configure to MMC/SD mode */ -+ sdr_set_field(MSDC_CFG, MSDC_CFG_MODE, MSDC_SDMMC); -+ -+ /* Reset */ -+ msdc_reset(); -+ msdc_clr_fifo(); -+ -+ /* Disable card detection */ -+ sdr_clr_bits(MSDC_PS, MSDC_PS_CDEN); -+ -+ /* Disable and clear all interrupts */ -+ sdr_clr_bits(MSDC_INTEN, sdr_read32(MSDC_INTEN)); -+ sdr_write32(MSDC_INT, sdr_read32(MSDC_INT)); -+ -+#if 1 -+ /* reset tuning parameter */ -+ sdr_write32(MSDC_PAD_CTL0, 0x00090000); -+ sdr_write32(MSDC_PAD_CTL1, 0x000A0000); -+ sdr_write32(MSDC_PAD_CTL2, 0x000A0000); -+ // sdr_write32(MSDC_PAD_TUNE, 0x00000000); -+ sdr_write32(MSDC_PAD_TUNE, 0x84101010); // for MT7620 E2 and afterward -+ // sdr_write32(MSDC_DAT_RDDLY0, 0x00000000); -+ sdr_write32(MSDC_DAT_RDDLY0, 0x10101010); // for MT7620 E2 and afterward -+ sdr_write32(MSDC_DAT_RDDLY1, 0x00000000); -+ sdr_write32(MSDC_IOCON, 0x00000000); -+#if 0 // use MT7620 default value: 0x403c004f -+ sdr_write32(MSDC_PATCH_BIT0, 0x003C000F); /* bit0 modified: Rx Data Clock Source: 1 -> 2.0*/ -+#endif -+ -+ if (sdr_read32(MSDC_ECO_VER) >= 4) { -+ if (host->id == 1) { -+ sdr_set_field(MSDC_PATCH_BIT1, MSDC_PATCH_BIT1_WRDAT_CRCS, 1); -+ sdr_set_field(MSDC_PATCH_BIT1, MSDC_PATCH_BIT1_CMD_RSP, 1); -+ -+ /* internal clock: latch read data */ -+ sdr_set_bits(MSDC_PATCH_BIT0, MSDC_PATCH_BIT_CKGEN_CK); -+ } -+ } -+#endif -+ -+ /* for safety, should clear SDC_CFG.SDIO_INT_DET_EN & set SDC_CFG.SDIO in -+ pre-loader,uboot,kernel drivers. and SDC_CFG.SDIO_INT_DET_EN will be only -+ set when kernel driver wants to use SDIO bus interrupt */ -+ /* Configure to enable SDIO mode. it's must otherwise sdio cmd5 failed */ -+ sdr_set_bits(SDC_CFG, SDC_CFG_SDIO); -+ -+ /* disable detect SDIO device interupt function */ -+ sdr_clr_bits(SDC_CFG, SDC_CFG_SDIOIDE); -+ -+ /* eneable SMT for glitch filter */ -+ sdr_set_bits(MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKSMT); -+ sdr_set_bits(MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDSMT); -+ sdr_set_bits(MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATSMT); -+ -+#if 1 -+ /* set clk, cmd, dat pad driving */ -+ sdr_set_field(MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKDRVN, hw->clk_drv); -+ sdr_set_field(MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKDRVP, hw->clk_drv); -+ sdr_set_field(MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDDRVN, hw->cmd_drv); -+ sdr_set_field(MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDDRVP, hw->cmd_drv); -+ sdr_set_field(MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATDRVN, hw->dat_drv); -+ sdr_set_field(MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATDRVP, hw->dat_drv); -+#else -+ sdr_set_field(MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKDRVN, 0); -+ sdr_set_field(MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKDRVP, 0); -+ sdr_set_field(MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDDRVN, 0); -+ sdr_set_field(MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDDRVP, 0); -+ sdr_set_field(MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATDRVN, 0); -+ sdr_set_field(MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATDRVP, 0); -+#endif -+ -+ /* set sampling edge */ -+ -+ /* write crc timeout detection */ -+ sdr_set_field(MSDC_PATCH_BIT0, 1 << 30, 1); -+ -+ /* Configure to default data timeout */ -+ sdr_set_field(SDC_CFG, SDC_CFG_DTOC, DEFAULT_DTOC); -+ -+ msdc_set_buswidth(host, MMC_BUS_WIDTH_1); -+ -+ //printk("init hardware done!\n"); -+} -+ -+/* called by msdc_drv_remove */ -+static void msdc_deinit_hw(struct msdc_host *host) -+{ -+ u32 base = host->base; -+ -+ /* Disable and clear all interrupts */ -+ sdr_clr_bits(MSDC_INTEN, sdr_read32(MSDC_INTEN)); -+ sdr_write32(MSDC_INT, sdr_read32(MSDC_INT)); -+ -+ /* Disable card detection */ -+ msdc_enable_cd_irq(host, 0); -+ // msdc_set_power_mode(host, MMC_POWER_OFF); /* make sure power down */ /* --- by chhung */ -+} -+ -+/* init gpd and bd list in msdc_drv_probe */ -+static void msdc_init_gpd_bd(struct msdc_host *host, struct msdc_dma *dma) -+{ -+ gpd_t *gpd = dma->gpd; -+ bd_t *bd = dma->bd; -+ bd_t *ptr, *prev; -+ -+ /* we just support one gpd */ -+ int bdlen = MAX_BD_PER_GPD; -+ -+ /* init the 2 gpd */ -+ memset(gpd, 0, sizeof(gpd_t) * 2); -+ //gpd->next = (void *)virt_to_phys(gpd + 1); /* pointer to a null gpd, bug! kmalloc <-> virt_to_phys */ -+ //gpd->next = (dma->gpd_addr + 1); /* bug */ -+ gpd->next = (void *)((u32)dma->gpd_addr + sizeof(gpd_t)); -+ -+ //gpd->intr = 0; -+ gpd->bdp = 1; /* hwo, cs, bd pointer */ -+ //gpd->ptr = (void*)virt_to_phys(bd); -+ gpd->ptr = (void *)dma->bd_addr; /* physical address */ -+ -+ memset(bd, 0, sizeof(bd_t) * bdlen); -+ ptr = bd + bdlen - 1; -+ //ptr->eol = 1; /* 0 or 1 [Fix me]*/ -+ //ptr->next = 0; -+ -+ while (ptr != bd) { -+ prev = ptr - 1; -+ prev->next = (void *)(dma->bd_addr + sizeof(bd_t) *(ptr - bd)); -+ ptr = prev; -+ } -+} -+ -+static int msdc_drv_probe(struct platform_device *pdev) -+{ -+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); -+ __iomem void *base; -+ struct mmc_host *mmc; -+ struct resource *mem; -+ struct msdc_host *host; -+ struct msdc_hw *hw; -+ int ret, irq; -+ pdev->dev.platform_data = &msdc0_hw; -+ -+ /* Allocate MMC host for this device */ -+ mmc = mmc_alloc_host(sizeof(struct msdc_host), &pdev->dev); -+ if (!mmc) return -ENOMEM; -+ -+ hw = (struct msdc_hw*)pdev->dev.platform_data; -+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); -+ irq = platform_get_irq(pdev, 0); -+ -+ //BUG_ON((!hw) || (!mem) || (irq < 0)); /* --- by chhung */ -+ -+ base = devm_request_and_ioremap(&pdev->dev, res); -+ if (IS_ERR(base)) -+ return PTR_ERR(base); -+ -+/* mem = request_mem_region(mem->start - 0xa0000000, (mem->end - mem->start + 1) - 0xa0000000, dev_name(&pdev->dev)); -+ if (mem == NULL) { -+ mmc_free_host(mmc); -+ return -EBUSY; -+ } -+*/ -+ /* Set host parameters to mmc */ -+ mmc->ops = &mt_msdc_ops; -+ mmc->f_min = HOST_MIN_MCLK; -+ mmc->f_max = HOST_MAX_MCLK; -+ mmc->ocr_avail = MSDC_OCR_AVAIL; -+ -+ /* For sd card: MSDC_SYS_SUSPEND | MSDC_WP_PIN_EN | MSDC_CD_PIN_EN | MSDC_REMOVABLE | MSDC_HIGHSPEED, -+ For sdio : MSDC_EXT_SDIO_IRQ | MSDC_HIGHSPEED */ -+ if (hw->flags & MSDC_HIGHSPEED) { -+ mmc->caps = MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED; -+ } -+ if (hw->data_pins == 4) { /* current data_pins are all 4*/ -+ mmc->caps |= MMC_CAP_4_BIT_DATA; -+ } else if (hw->data_pins == 8) { -+ mmc->caps |= MMC_CAP_8_BIT_DATA; -+ } -+ if ((hw->flags & MSDC_SDIO_IRQ) || (hw->flags & MSDC_EXT_SDIO_IRQ)) -+ mmc->caps |= MMC_CAP_SDIO_IRQ; /* yes for sdio */ -+ -+ /* MMC core transfer sizes tunable parameters */ -+ // mmc->max_hw_segs = MAX_HW_SGMTS; -+// mmc->max_phys_segs = MAX_PHY_SGMTS; -+ mmc->max_seg_size = MAX_SGMT_SZ; -+ mmc->max_blk_size = HOST_MAX_BLKSZ; -+ mmc->max_req_size = MAX_REQ_SZ; -+ mmc->max_blk_count = mmc->max_req_size; -+ -+ host = mmc_priv(mmc); -+ host->hw = hw; -+ host->mmc = mmc; -+ host->id = pdev->id; -+ host->error = 0; -+ host->irq = irq; -+ host->base = (unsigned long) base; -+ host->mclk = 0; /* mclk: the request clock of mmc sub-system */ -+ host->hclk = hclks[hw->clk_src]; /* hclk: clock of clock source to msdc controller */ -+ host->sclk = 0; /* sclk: the really clock after divition */ -+ host->pm_state = PMSG_RESUME; -+ host->suspend = 0; -+ host->core_clkon = 0; -+ host->card_clkon = 0; -+ host->core_power = 0; -+ host->power_mode = MMC_POWER_OFF; -+// host->card_inserted = hw->flags & MSDC_REMOVABLE ? 0 : 1; -+ host->timeout_ns = 0; -+ host->timeout_clks = DEFAULT_DTOC * 65536; -+ -+ host->mrq = NULL; -+ //init_MUTEX(&host->sem); /* we don't need to support multiple threads access */ -+ -+ host->dma.used_gpd = 0; -+ host->dma.used_bd = 0; -+ -+ /* using dma_alloc_coherent*/ /* todo: using 1, for all 4 slots */ -+ host->dma.gpd = dma_alloc_coherent(NULL, MAX_GPD_NUM * sizeof(gpd_t), &host->dma.gpd_addr, GFP_KERNEL); -+ host->dma.bd = dma_alloc_coherent(NULL, MAX_BD_NUM * sizeof(bd_t), &host->dma.bd_addr, GFP_KERNEL); -+ BUG_ON((!host->dma.gpd) || (!host->dma.bd)); -+ msdc_init_gpd_bd(host, &host->dma); -+ /*for emmc*/ -+ msdc_6575_host[pdev->id] = host; -+ -+ tasklet_init(&host->card_tasklet, msdc_tasklet_card, (ulong)host); -+ spin_lock_init(&host->lock); -+ msdc_init_hw(host); -+ -+ ret = request_irq((unsigned int)irq, msdc_irq, IRQF_TRIGGER_LOW, dev_name(&pdev->dev), host); -+ if (ret) goto release; -+ // mt65xx_irq_unmask(irq); /* --- by chhung */ -+ -+ if (hw->flags & MSDC_CD_PIN_EN) { /* not set for sdio */ -+ if (hw->request_cd_eirq) { /* not set for MT6575 */ -+ hw->request_cd_eirq(msdc_eirq_cd, (void*)host); /* msdc_eirq_cd will not be used! */ -+ } -+ } -+ -+ if (hw->request_sdio_eirq) /* set to combo_sdio_request_eirq() for WIFI */ -+ hw->request_sdio_eirq(msdc_eirq_sdio, (void*)host); /* msdc_eirq_sdio() will be called when EIRQ */ -+ -+ if (hw->register_pm) {/* yes for sdio */ -+ if(hw->flags & MSDC_SYS_SUSPEND) { /* will not set for WIFI */ -+ //printk("MSDC_SYS_SUSPEND and register_pm both set\n"); -+ } -+ //mmc->pm_flags |= MMC_PM_IGNORE_PM_NOTIFY; /* pm not controlled by system but by client. */ /* --- by chhung */ -+ } -+ -+ platform_set_drvdata(pdev, mmc); -+ -+ ret = mmc_add_host(mmc); -+ if (ret) goto free_irq; -+ -+ /* Config card detection pin and enable interrupts */ -+ if (hw->flags & MSDC_CD_PIN_EN) { /* set for card */ -+ msdc_enable_cd_irq(host, 1); -+ } else { -+ msdc_enable_cd_irq(host, 0); -+ } -+ -+ return 0; -+ -+free_irq: -+ free_irq(irq, host); -+release: -+ platform_set_drvdata(pdev, NULL); -+ msdc_deinit_hw(host); -+ -+ tasklet_kill(&host->card_tasklet); -+ -+/* if (mem) -+ release_mem_region(mem->start, mem->end - mem->start + 1); -+*/ -+ mmc_free_host(mmc); -+ -+ return ret; -+} -+ -+/* 4 device share one driver, using "drvdata" to show difference */ -+static int msdc_drv_remove(struct platform_device *pdev) -+{ -+ struct mmc_host *mmc; -+ struct msdc_host *host; -+ struct resource *mem; -+ -+ -+ mmc = platform_get_drvdata(pdev); -+ BUG_ON(!mmc); -+ -+ host = mmc_priv(mmc); -+ BUG_ON(!host); -+ -+ //printk("removed !!!\n"); -+ -+ platform_set_drvdata(pdev, NULL); -+ mmc_remove_host(host->mmc); -+ msdc_deinit_hw(host); -+ -+ tasklet_kill(&host->card_tasklet); -+ free_irq(host->irq, host); -+ -+ dma_free_coherent(NULL, MAX_GPD_NUM * sizeof(gpd_t), host->dma.gpd, host->dma.gpd_addr); -+ dma_free_coherent(NULL, MAX_BD_NUM * sizeof(bd_t), host->dma.bd, host->dma.bd_addr); -+ -+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); -+ -+ if (mem) -+ release_mem_region(mem->start, mem->end - mem->start + 1); -+ -+ mmc_free_host(host->mmc); -+ -+ return 0; -+} -+ -+static const struct of_device_id mt7620a_sdhci_match[] = { -+ { .compatible = "ralink,mt7620a-sdhci" }, -+ {}, -+}; -+MODULE_DEVICE_TABLE(of, rt288x_wdt_match); -+ -+/* Fix me: Power Flow */ -+static struct platform_driver mt_msdc_driver = { -+ .probe = msdc_drv_probe, -+ .remove = msdc_drv_remove, -+ .driver = { -+ .name = DRV_NAME, -+ .owner = THIS_MODULE, -+ .of_match_table = mt7620a_sdhci_match, -+ -+ }, -+}; -+ -+static int __init mt_msdc_init(void) -+{ -+ int ret; -+/* +++ chhung */ -+ unsigned int reg; -+ -+ mtk_sd_device.dev.platform_data = &msdc0_hw; -+ printk("MTK MSDC device init.\n"); -+ reg = sdr_read32((__iomem void *) 0xb0000060) & ~(0x3<<18); -+ reg |= 0x1 << 18; -+ sdr_write32((__iomem void *) 0xb0000060, reg); -+/* end of +++ */ -+ ret = platform_driver_register(&mt_msdc_driver); -+ if (ret) { -+ printk(KERN_ERR DRV_NAME ": Can't register driver"); -+ return ret; -+ } -+ printk(KERN_INFO DRV_NAME ": MediaTek MT6575 MSDC Driver\n"); -+ -+ //msdc_debug_proc_init(); -+ return 0; -+} -+ -+static void __exit mt_msdc_exit(void) -+{ -+ platform_driver_unregister(&mt_msdc_driver); -+} -+ -+module_init(mt_msdc_init); -+module_exit(mt_msdc_exit); -+MODULE_LICENSE("GPL"); -+MODULE_DESCRIPTION("MediaTek MT6575 SD/MMC Card Driver"); -+MODULE_AUTHOR("Infinity Chen "); -+ -+EXPORT_SYMBOL(msdc_6575_host); diff --git a/target/linux/ramips/patches-3.10/0118-NET-MIPS-add-ralink-SoC-ethernet-driver.patch b/target/linux/ramips/patches-3.10/0118-NET-MIPS-add-ralink-SoC-ethernet-driver.patch new file mode 100644 index 0000000000..103c8180f4 --- /dev/null +++ b/target/linux/ramips/patches-3.10/0118-NET-MIPS-add-ralink-SoC-ethernet-driver.patch @@ -0,0 +1,4950 @@ +From b7d9374aee4b47a76dadaf1fe7f6838087c9c62d Mon Sep 17 00:00:00 2001 +From: John Crispin +Date: Mon, 22 Apr 2013 23:20:03 +0200 +Subject: [PATCH 118/133] NET: MIPS: add ralink SoC ethernet driver + +Add support for Ralink FE and ESW. + +Signed-off-by: John Crispin +--- + .../include/asm/mach-ralink/rt305x_esw_platform.h | 27 + + arch/mips/ralink/rt305x.c | 1 + + drivers/net/ethernet/Kconfig | 1 + + drivers/net/ethernet/Makefile | 1 + + drivers/net/ethernet/ralink/Kconfig | 32 + + drivers/net/ethernet/ralink/Makefile | 18 + + drivers/net/ethernet/ralink/esw_rt3052.c | 1463 ++++++++++++++++++++ + drivers/net/ethernet/ralink/esw_rt3052.h | 32 + + drivers/net/ethernet/ralink/gsw_mt7620a.c | 566 ++++++++ + drivers/net/ethernet/ralink/gsw_mt7620a.h | 30 + + drivers/net/ethernet/ralink/mdio.c | 244 ++++ + drivers/net/ethernet/ralink/mdio.h | 29 + + drivers/net/ethernet/ralink/mdio_rt2880.c | 232 ++++ + drivers/net/ethernet/ralink/mdio_rt2880.h | 26 + + drivers/net/ethernet/ralink/mt7530.c | 467 +++++++ + drivers/net/ethernet/ralink/mt7530.h | 20 + + drivers/net/ethernet/ralink/ralink_soc_eth.c | 845 +++++++++++ + drivers/net/ethernet/ralink/ralink_soc_eth.h | 384 +++++ + drivers/net/ethernet/ralink/soc_mt7620.c | 172 +++ + drivers/net/ethernet/ralink/soc_rt2880.c | 51 + + drivers/net/ethernet/ralink/soc_rt305x.c | 113 ++ + drivers/net/ethernet/ralink/soc_rt3883.c | 60 + + 22 files changed, 4814 insertions(+) + create mode 100644 arch/mips/include/asm/mach-ralink/rt305x_esw_platform.h + create mode 100644 drivers/net/ethernet/ralink/Kconfig + create mode 100644 drivers/net/ethernet/ralink/Makefile + create mode 100644 drivers/net/ethernet/ralink/esw_rt3052.c + create mode 100644 drivers/net/ethernet/ralink/esw_rt3052.h + create mode 100644 drivers/net/ethernet/ralink/gsw_mt7620a.c + create mode 100644 drivers/net/ethernet/ralink/gsw_mt7620a.h + create mode 100644 drivers/net/ethernet/ralink/mdio.c + create mode 100644 drivers/net/ethernet/ralink/mdio.h + create mode 100644 drivers/net/ethernet/ralink/mdio_rt2880.c + create mode 100644 drivers/net/ethernet/ralink/mdio_rt2880.h + create mode 100644 drivers/net/ethernet/ralink/mt7530.c + create mode 100644 drivers/net/ethernet/ralink/mt7530.h + create mode 100644 drivers/net/ethernet/ralink/ralink_soc_eth.c + create mode 100644 drivers/net/ethernet/ralink/ralink_soc_eth.h + create mode 100644 drivers/net/ethernet/ralink/soc_mt7620.c + create mode 100644 drivers/net/ethernet/ralink/soc_rt2880.c + create mode 100644 drivers/net/ethernet/ralink/soc_rt305x.c + create mode 100644 drivers/net/ethernet/ralink/soc_rt3883.c + +--- /dev/null ++++ b/arch/mips/include/asm/mach-ralink/rt305x_esw_platform.h +@@ -0,0 +1,27 @@ ++/* ++ * Ralink RT305x SoC platform device registration ++ * ++ * Copyright (C) 2010 Gabor Juhos ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 as published ++ * by the Free Software Foundation. ++ */ ++ ++#ifndef _RT305X_ESW_PLATFORM_H ++#define _RT305X_ESW_PLATFORM_H ++ ++enum { ++ RT305X_ESW_VLAN_CONFIG_NONE = 0, ++ RT305X_ESW_VLAN_CONFIG_LLLLW, ++ RT305X_ESW_VLAN_CONFIG_WLLLL, ++}; ++ ++struct rt305x_esw_platform_data ++{ ++ u8 vlan_config; ++ u32 reg_initval_fct2; ++ u32 reg_initval_fpa2; ++}; ++ ++#endif /* _RT305X_ESW_PLATFORM_H */ +--- a/arch/mips/ralink/rt305x.c ++++ b/arch/mips/ralink/rt305x.c +@@ -194,6 +194,7 @@ void __init ralink_clk_init(void) + } + + ralink_clk_add("cpu", cpu_rate); ++ ralink_clk_add("sys", sys_rate); + ralink_clk_add("10000b00.spi", sys_rate); + ralink_clk_add("10000100.timer", wdt_rate); + ralink_clk_add("10000120.watchdog", wdt_rate); +--- a/drivers/net/ethernet/Kconfig ++++ b/drivers/net/ethernet/Kconfig +@@ -135,6 +135,7 @@ config ETHOC + source "drivers/net/ethernet/packetengines/Kconfig" + source "drivers/net/ethernet/pasemi/Kconfig" + source "drivers/net/ethernet/qlogic/Kconfig" ++source "drivers/net/ethernet/ralink/Kconfig" + source "drivers/net/ethernet/realtek/Kconfig" + source "drivers/net/ethernet/renesas/Kconfig" + source "drivers/net/ethernet/rdc/Kconfig" +--- a/drivers/net/ethernet/Makefile ++++ b/drivers/net/ethernet/Makefile +@@ -53,6 +53,7 @@ obj-$(CONFIG_ETHOC) += ethoc.o + obj-$(CONFIG_NET_PACKET_ENGINE) += packetengines/ + obj-$(CONFIG_NET_VENDOR_PASEMI) += pasemi/ + obj-$(CONFIG_NET_VENDOR_QLOGIC) += qlogic/ ++obj-$(CONFIG_NET_RALINK) += ralink/ + obj-$(CONFIG_NET_VENDOR_REALTEK) += realtek/ + obj-$(CONFIG_SH_ETH) += renesas/ + obj-$(CONFIG_NET_VENDOR_RDC) += rdc/ +--- /dev/null ++++ b/drivers/net/ethernet/ralink/Kconfig +@@ -0,0 +1,32 @@ ++config NET_RALINK ++ tristate "Ralink RT288X/RT3X5X/RT3662/RT3883/MT7620 ethernet driver" ++ depends on RALINK ++ help ++ This driver supports the ethernet mac inside the ralink wisocs ++ ++if NET_RALINK ++ ++config NET_RALINK_MDIO ++ def_bool NET_RALINK ++ depends on (SOC_RT288X || SOC_RT3883 || SOC_MT7620) ++ select PHYLIB ++ ++config NET_RALINK_MDIO_RT2880 ++ def_bool NET_RALINK ++ depends on (SOC_RT288X || SOC_RT3883) ++ select NET_RALINK_MDIO ++ ++config NET_RALINK_ESW_RT3052 ++ def_bool NET_RALINK ++ depends on SOC_RT305X ++ select PHYLIB ++ select SWCONFIG ++ ++config NET_RALINK_GSW_MT7620 ++ def_bool NET_RALINK ++ depends on SOC_MT7620 ++ select INET_LRO ++ select NET_RALINK_MDIO ++ select PHYLIB ++ select SWCONFIG ++endif +--- /dev/null ++++ b/drivers/net/ethernet/ralink/Makefile +@@ -0,0 +1,18 @@ ++# ++# Makefile for the Ralink SoCs built-in ethernet macs ++# ++ ++ralink-eth-y += ralink_soc_eth.o ++ ++ralink-eth-$(CONFIG_NET_RALINK_MDIO) += mdio.o ++ralink-eth-$(CONFIG_NET_RALINK_MDIO_RT2880) += mdio_rt2880.o ++ ++ralink-eth-$(CONFIG_NET_RALINK_ESW_RT3052) += esw_rt3052.o ++ralink-eth-$(CONFIG_NET_RALINK_GSW_MT7620) += gsw_mt7620a.o mt7530.o ++ ++ralink-eth-$(CONFIG_SOC_RT288X) += soc_rt2880.o ++ralink-eth-$(CONFIG_SOC_RT305X) += soc_rt305x.o ++ralink-eth-$(CONFIG_SOC_RT3883) += soc_rt3883.o ++ralink-eth-$(CONFIG_SOC_MT7620) += soc_mt7620.o ++ ++obj-$(CONFIG_NET_RALINK) += ralink-eth.o +--- /dev/null ++++ b/drivers/net/ethernet/ralink/esw_rt3052.c +@@ -0,0 +1,1463 @@ ++/* ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; version 2 of the License ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. ++ * ++ * Copyright (C) 2009-2013 John Crispin ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++ ++#include "ralink_soc_eth.h" ++ ++#include ++#include ++#include ++ ++#include ++#include ++#include ++ ++/* ++ * HW limitations for this switch: ++ * - No large frame support (PKT_MAX_LEN at most 1536) ++ * - Can't have untagged vlan and tagged vlan on one port at the same time, ++ * though this might be possible using the undocumented PPE. ++ */ ++ ++#define RT305X_ESW_REG_ISR 0x00 ++#define RT305X_ESW_REG_IMR 0x04 ++#define RT305X_ESW_REG_FCT0 0x08 ++#define RT305X_ESW_REG_PFC1 0x14 ++#define RT305X_ESW_REG_ATS 0x24 ++#define RT305X_ESW_REG_ATS0 0x28 ++#define RT305X_ESW_REG_ATS1 0x2c ++#define RT305X_ESW_REG_ATS2 0x30 ++#define RT305X_ESW_REG_PVIDC(_n) (0x40 + 4 * (_n)) ++#define RT305X_ESW_REG_VLANI(_n) (0x50 + 4 * (_n)) ++#define RT305X_ESW_REG_VMSC(_n) (0x70 + 4 * (_n)) ++#define RT305X_ESW_REG_POA 0x80 ++#define RT305X_ESW_REG_FPA 0x84 ++#define RT305X_ESW_REG_SOCPC 0x8c ++#define RT305X_ESW_REG_POC0 0x90 ++#define RT305X_ESW_REG_POC1 0x94 ++#define RT305X_ESW_REG_POC2 0x98 ++#define RT305X_ESW_REG_SGC 0x9c ++#define RT305X_ESW_REG_STRT 0xa0 ++#define RT305X_ESW_REG_PCR0 0xc0 ++#define RT305X_ESW_REG_PCR1 0xc4 ++#define RT305X_ESW_REG_FPA2 0xc8 ++#define RT305X_ESW_REG_FCT2 0xcc ++#define RT305X_ESW_REG_SGC2 0xe4 ++#define RT305X_ESW_REG_P0LED 0xa4 ++#define RT305X_ESW_REG_P1LED 0xa8 ++#define RT305X_ESW_REG_P2LED 0xac ++#define RT305X_ESW_REG_P3LED 0xb0 ++#define RT305X_ESW_REG_P4LED 0xb4 ++#define RT305X_ESW_REG_PXPC(_x) (0xe8 + (4 * _x)) ++#define RT305X_ESW_REG_P1PC 0xec ++#define RT305X_ESW_REG_P2PC 0xf0 ++#define RT305X_ESW_REG_P3PC 0xf4 ++#define RT305X_ESW_REG_P4PC 0xf8 ++#define RT305X_ESW_REG_P5PC 0xfc ++ ++#define RT305X_ESW_LED_LINK 0 ++#define RT305X_ESW_LED_100M 1 ++#define RT305X_ESW_LED_DUPLEX 2 ++#define RT305X_ESW_LED_ACTIVITY 3 ++#define RT305X_ESW_LED_COLLISION 4 ++#define RT305X_ESW_LED_LINKACT 5 ++#define RT305X_ESW_LED_DUPLCOLL 6 ++#define RT305X_ESW_LED_10MACT 7 ++#define RT305X_ESW_LED_100MACT 8 ++/* Additional led states not in datasheet: */ ++#define RT305X_ESW_LED_BLINK 10 ++#define RT305X_ESW_LED_ON 12 ++ ++#define RT305X_ESW_LINK_S 25 ++#define RT305X_ESW_DUPLEX_S 9 ++#define RT305X_ESW_SPD_S 0 ++ ++#define RT305X_ESW_PCR0_WT_NWAY_DATA_S 16 ++#define RT305X_ESW_PCR0_WT_PHY_CMD BIT(13) ++#define RT305X_ESW_PCR0_CPU_PHY_REG_S 8 ++ ++#define RT305X_ESW_PCR1_WT_DONE BIT(0) ++ ++#define RT305X_ESW_ATS_TIMEOUT (5 * HZ) ++#define RT305X_ESW_PHY_TIMEOUT (5 * HZ) ++ ++#define RT305X_ESW_PVIDC_PVID_M 0xfff ++#define RT305X_ESW_PVIDC_PVID_S 12 ++ ++#define RT305X_ESW_VLANI_VID_M 0xfff ++#define RT305X_ESW_VLANI_VID_S 12 ++ ++#define RT305X_ESW_VMSC_MSC_M 0xff ++#define RT305X_ESW_VMSC_MSC_S 8 ++ ++#define RT305X_ESW_SOCPC_DISUN2CPU_S 0 ++#define RT305X_ESW_SOCPC_DISMC2CPU_S 8 ++#define RT305X_ESW_SOCPC_DISBC2CPU_S 16 ++#define RT305X_ESW_SOCPC_CRC_PADDING BIT(25) ++ ++#define RT305X_ESW_POC0_EN_BP_S 0 ++#define RT305X_ESW_POC0_EN_FC_S 8 ++#define RT305X_ESW_POC0_DIS_RMC2CPU_S 16 ++#define RT305X_ESW_POC0_DIS_PORT_M 0x7f ++#define RT305X_ESW_POC0_DIS_PORT_S 23 ++ ++#define RT305X_ESW_POC2_UNTAG_EN_M 0xff ++#define RT305X_ESW_POC2_UNTAG_EN_S 0 ++#define RT305X_ESW_POC2_ENAGING_S 8 ++#define RT305X_ESW_POC2_DIS_UC_PAUSE_S 16 ++ ++#define RT305X_ESW_SGC2_DOUBLE_TAG_M 0x7f ++#define RT305X_ESW_SGC2_DOUBLE_TAG_S 0 ++#define RT305X_ESW_SGC2_LAN_PMAP_M 0x3f ++#define RT305X_ESW_SGC2_LAN_PMAP_S 24 ++ ++#define RT305X_ESW_PFC1_EN_VLAN_M 0xff ++#define RT305X_ESW_PFC1_EN_VLAN_S 16 ++#define RT305X_ESW_PFC1_EN_TOS_S 24 ++ ++#define RT305X_ESW_VLAN_NONE 0xfff ++ ++#define RT305X_ESW_GSC_BC_STROM_MASK 0x3 ++#define RT305X_ESW_GSC_BC_STROM_SHIFT 4 ++ ++#define RT305X_ESW_GSC_LED_FREQ_MASK 0x3 ++#define RT305X_ESW_GSC_LED_FREQ_SHIFT 23 ++ ++#define RT305X_ESW_POA_LINK_MASK 0x1f ++#define RT305X_ESW_POA_LINK_SHIFT 25 ++ ++#define RT305X_ESW_PORT_ST_CHG BIT(26) ++#define RT305X_ESW_PORT0 0 ++#define RT305X_ESW_PORT1 1 ++#define RT305X_ESW_PORT2 2 ++#define RT305X_ESW_PORT3 3 ++#define RT305X_ESW_PORT4 4 ++#define RT305X_ESW_PORT5 5 ++#define RT305X_ESW_PORT6 6 ++ ++#define RT305X_ESW_PORTS_NONE 0 ++ ++#define RT305X_ESW_PMAP_LLLLLL 0x3f ++#define RT305X_ESW_PMAP_LLLLWL 0x2f ++#define RT305X_ESW_PMAP_WLLLLL 0x3e ++ ++#define RT305X_ESW_PORTS_INTERNAL \ ++ (BIT(RT305X_ESW_PORT0) | BIT(RT305X_ESW_PORT1) | \ ++ BIT(RT305X_ESW_PORT2) | BIT(RT305X_ESW_PORT3) | \ ++ BIT(RT305X_ESW_PORT4)) ++ ++#define RT305X_ESW_PORTS_NOCPU \ ++ (RT305X_ESW_PORTS_INTERNAL | BIT(RT305X_ESW_PORT5)) ++ ++#define RT305X_ESW_PORTS_CPU BIT(RT305X_ESW_PORT6) ++ ++#define RT305X_ESW_PORTS_ALL \ ++ (RT305X_ESW_PORTS_NOCPU | RT305X_ESW_PORTS_CPU) ++ ++#define RT305X_ESW_NUM_VLANS 16 ++#define RT305X_ESW_NUM_VIDS 4096 ++#define RT305X_ESW_NUM_PORTS 7 ++#define RT305X_ESW_NUM_LANWAN 6 ++#define RT305X_ESW_NUM_LEDS 5 ++ ++#define RT5350_ESW_REG_PXTPC(_x) (0x150 + (4 * _x)) ++#define RT5350_EWS_REG_LED_POLARITY 0x168 ++#define RT5350_RESET_EPHY BIT(24) ++#define SYSC_REG_RESET_CTRL 0x34 ++ ++enum { ++ /* Global attributes. */ ++ RT305X_ESW_ATTR_ENABLE_VLAN, ++ RT305X_ESW_ATTR_ALT_VLAN_DISABLE, ++ RT305X_ESW_ATTR_BC_STATUS, ++ RT305X_ESW_ATTR_LED_FREQ, ++ /* Port attributes. */ ++ RT305X_ESW_ATTR_PORT_DISABLE, ++ RT305X_ESW_ATTR_PORT_DOUBLETAG, ++ RT305X_ESW_ATTR_PORT_UNTAG, ++ RT305X_ESW_ATTR_PORT_LED, ++ RT305X_ESW_ATTR_PORT_LAN, ++ RT305X_ESW_ATTR_PORT_RECV_BAD, ++ RT305X_ESW_ATTR_PORT_RECV_GOOD, ++ RT5350_ESW_ATTR_PORT_TR_BAD, ++ RT5350_ESW_ATTR_PORT_TR_GOOD, ++}; ++ ++struct esw_port { ++ bool disable; ++ bool doubletag; ++ bool untag; ++ u8 led; ++ u16 pvid; ++}; ++ ++struct esw_vlan { ++ u8 ports; ++ u16 vid; ++}; ++ ++struct rt305x_esw { ++ struct device *dev; ++ void __iomem *base; ++ int irq; ++ const struct rt305x_esw_platform_data *pdata; ++ /* Protects against concurrent register rmw operations. */ ++ spinlock_t reg_rw_lock; ++ ++ unsigned char port_map; ++ unsigned int reg_initval_fct2; ++ unsigned int reg_initval_fpa2; ++ unsigned int reg_led_polarity; ++ ++ ++ struct switch_dev swdev; ++ bool global_vlan_enable; ++ bool alt_vlan_disable; ++ int bc_storm_protect; ++ int led_frequency; ++ struct esw_vlan vlans[RT305X_ESW_NUM_VLANS]; ++ struct esw_port ports[RT305X_ESW_NUM_PORTS]; ++ ++}; ++ ++static inline void esw_w32(struct rt305x_esw *esw, u32 val, unsigned reg) ++{ ++ __raw_writel(val, esw->base + reg); ++} ++ ++static inline u32 esw_r32(struct rt305x_esw *esw, unsigned reg) ++{ ++ return __raw_readl(esw->base + reg); ++} ++ ++static inline void esw_rmw_raw(struct rt305x_esw *esw, unsigned reg, unsigned long mask, ++ unsigned long val) ++{ ++ unsigned long t; ++ ++ t = __raw_readl(esw->base + reg) & ~mask; ++ __raw_writel(t | val, esw->base + reg); ++} ++ ++static void esw_rmw(struct rt305x_esw *esw, unsigned reg, unsigned long mask, ++ unsigned long val) ++{ ++ unsigned long flags; ++ ++ spin_lock_irqsave(&esw->reg_rw_lock, flags); ++ esw_rmw_raw(esw, reg, mask, val); ++ spin_unlock_irqrestore(&esw->reg_rw_lock, flags); ++} ++ ++static u32 rt305x_mii_write(struct rt305x_esw *esw, u32 phy_addr, u32 phy_register, ++ u32 write_data) ++{ ++ unsigned long t_start = jiffies; ++ int ret = 0; ++ ++ while (1) { ++ if (!(esw_r32(esw, RT305X_ESW_REG_PCR1) & ++ RT305X_ESW_PCR1_WT_DONE)) ++ break; ++ if (time_after(jiffies, t_start + RT305X_ESW_PHY_TIMEOUT)) { ++ ret = 1; ++ goto out; ++ } ++ } ++ ++ write_data &= 0xffff; ++ esw_w32(esw, ++ (write_data << RT305X_ESW_PCR0_WT_NWAY_DATA_S) | ++ (phy_register << RT305X_ESW_PCR0_CPU_PHY_REG_S) | ++ (phy_addr) | RT305X_ESW_PCR0_WT_PHY_CMD, ++ RT305X_ESW_REG_PCR0); ++ ++ t_start = jiffies; ++ while (1) { ++ if (esw_r32(esw, RT305X_ESW_REG_PCR1) & ++ RT305X_ESW_PCR1_WT_DONE) ++ break; ++ ++ if (time_after(jiffies, t_start + RT305X_ESW_PHY_TIMEOUT)) { ++ ret = 1; ++ break; ++ } ++ } ++out: ++ if (ret) ++ printk(KERN_ERR "ramips_eth: MDIO timeout\n"); ++ return ret; ++} ++ ++static unsigned esw_get_vlan_id(struct rt305x_esw *esw, unsigned vlan) ++{ ++ unsigned s; ++ unsigned val; ++ ++ s = RT305X_ESW_VLANI_VID_S * (vlan % 2); ++ val = esw_r32(esw, RT305X_ESW_REG_VLANI(vlan / 2)); ++ val = (val >> s) & RT305X_ESW_VLANI_VID_M; ++ ++ return val; ++} ++ ++static void esw_set_vlan_id(struct rt305x_esw *esw, unsigned vlan, unsigned vid) ++{ ++ unsigned s; ++ ++ s = RT305X_ESW_VLANI_VID_S * (vlan % 2); ++ esw_rmw(esw, ++ RT305X_ESW_REG_VLANI(vlan / 2), ++ RT305X_ESW_VLANI_VID_M << s, ++ (vid & RT305X_ESW_VLANI_VID_M) << s); ++} ++ ++static unsigned esw_get_pvid(struct rt305x_esw *esw, unsigned port) ++{ ++ unsigned s, val; ++ ++ s = RT305X_ESW_PVIDC_PVID_S * (port % 2); ++ val = esw_r32(esw, RT305X_ESW_REG_PVIDC(port / 2)); ++ return (val >> s) & RT305X_ESW_PVIDC_PVID_M; ++} ++ ++static void esw_set_pvid(struct rt305x_esw *esw, unsigned port, unsigned pvid) ++{ ++ unsigned s; ++ ++ s = RT305X_ESW_PVIDC_PVID_S * (port % 2); ++ esw_rmw(esw, ++ RT305X_ESW_REG_PVIDC(port / 2), ++ RT305X_ESW_PVIDC_PVID_M << s, ++ (pvid & RT305X_ESW_PVIDC_PVID_M) << s); ++} ++ ++static unsigned esw_get_vmsc(struct rt305x_esw *esw, unsigned vlan) ++{ ++ unsigned s, val; ++ ++ s = RT305X_ESW_VMSC_MSC_S * (vlan % 4); ++ val = esw_r32(esw, RT305X_ESW_REG_VMSC(vlan / 4)); ++ val = (val >> s) & RT305X_ESW_VMSC_MSC_M; ++ ++ return val; ++} ++ ++static void esw_set_vmsc(struct rt305x_esw *esw, unsigned vlan, unsigned msc) ++{ ++ unsigned s; ++ ++ s = RT305X_ESW_VMSC_MSC_S * (vlan % 4); ++ esw_rmw(esw, ++ RT305X_ESW_REG_VMSC(vlan / 4), ++ RT305X_ESW_VMSC_MSC_M << s, ++ (msc & RT305X_ESW_VMSC_MSC_M) << s); ++} ++ ++static unsigned esw_get_port_disable(struct rt305x_esw *esw) ++{ ++ unsigned reg; ++ reg = esw_r32(esw, RT305X_ESW_REG_POC0); ++ return (reg >> RT305X_ESW_POC0_DIS_PORT_S) & ++ RT305X_ESW_POC0_DIS_PORT_M; ++} ++ ++static void esw_set_port_disable(struct rt305x_esw *esw, unsigned disable_mask) ++{ ++ unsigned old_mask; ++ unsigned enable_mask; ++ unsigned changed; ++ int i; ++ ++ old_mask = esw_get_port_disable(esw); ++ changed = old_mask ^ disable_mask; ++ enable_mask = old_mask & disable_mask; ++ ++ /* enable before writing to MII */ ++ esw_rmw(esw, RT305X_ESW_REG_POC0, ++ (RT305X_ESW_POC0_DIS_PORT_M << ++ RT305X_ESW_POC0_DIS_PORT_S), ++ enable_mask << RT305X_ESW_POC0_DIS_PORT_S); ++ ++ for (i = 0; i < RT305X_ESW_NUM_LEDS; i++) { ++ if (!(changed & (1 << i))) ++ continue; ++ if (disable_mask & (1 << i)) { ++ /* disable */ ++ rt305x_mii_write(esw, i, MII_BMCR, ++ BMCR_PDOWN); ++ } else { ++ /* enable */ ++ rt305x_mii_write(esw, i, MII_BMCR, ++ BMCR_FULLDPLX | ++ BMCR_ANENABLE | ++ BMCR_ANRESTART | ++ BMCR_SPEED100); ++ } ++ } ++ ++ /* disable after writing to MII */ ++ esw_rmw(esw, RT305X_ESW_REG_POC0, ++ (RT305X_ESW_POC0_DIS_PORT_M << ++ RT305X_ESW_POC0_DIS_PORT_S), ++ disable_mask << RT305X_ESW_POC0_DIS_PORT_S); ++} ++ ++static void esw_set_gsc(struct rt305x_esw *esw) ++{ ++ esw_rmw(esw, RT305X_ESW_REG_SGC, ++ RT305X_ESW_GSC_BC_STROM_MASK << RT305X_ESW_GSC_BC_STROM_SHIFT, ++ esw->bc_storm_protect << RT305X_ESW_GSC_BC_STROM_SHIFT); ++ esw_rmw(esw, RT305X_ESW_REG_SGC, ++ RT305X_ESW_GSC_LED_FREQ_MASK << RT305X_ESW_GSC_LED_FREQ_SHIFT, ++ esw->led_frequency << RT305X_ESW_GSC_LED_FREQ_SHIFT); ++} ++ ++static int esw_apply_config(struct switch_dev *dev); ++ ++static void esw_hw_init(struct rt305x_esw *esw) ++{ ++ int i; ++ u8 port_disable = 0; ++ u8 port_map = RT305X_ESW_PMAP_LLLLLL; ++ ++ /* vodoo from original driver */ ++ esw_w32(esw, 0xC8A07850, RT305X_ESW_REG_FCT0); ++ esw_w32(esw, 0x00000000, RT305X_ESW_REG_SGC2); ++ /* Port priority 1 for all ports, vlan enabled. */ ++ esw_w32(esw, 0x00005555 | ++ (RT305X_ESW_PORTS_ALL << RT305X_ESW_PFC1_EN_VLAN_S), ++ RT305X_ESW_REG_PFC1); ++ ++ /* Enable Back Pressure, and Flow Control */ ++ esw_w32(esw, ++ ((RT305X_ESW_PORTS_ALL << RT305X_ESW_POC0_EN_BP_S) | ++ (RT305X_ESW_PORTS_ALL << RT305X_ESW_POC0_EN_FC_S)), ++ RT305X_ESW_REG_POC0); ++ ++ /* Enable Aging, and VLAN TAG removal */ ++ esw_w32(esw, ++ ((RT305X_ESW_PORTS_ALL << RT305X_ESW_POC2_ENAGING_S) | ++ (RT305X_ESW_PORTS_NOCPU << RT305X_ESW_POC2_UNTAG_EN_S)), ++ RT305X_ESW_REG_POC2); ++ ++ if (esw->reg_initval_fct2) ++ esw_w32(esw, esw->reg_initval_fct2, RT305X_ESW_REG_FCT2); ++ else ++ esw_w32(esw, esw->pdata->reg_initval_fct2, RT305X_ESW_REG_FCT2); ++ ++ /* ++ * 300s aging timer, max packet len 1536, broadcast storm prevention ++ * disabled, disable collision abort, mac xor48 hash, 10 packet back ++ * pressure jam, GMII disable was_transmit, back pressure disabled, ++ * 30ms led flash, unmatched IGMP as broadcast, rmc tb fault to all ++ * ports. ++ */ ++ esw_w32(esw, 0x0008a301, RT305X_ESW_REG_SGC); ++ ++ /* Setup SoC Port control register */ ++ esw_w32(esw, ++ (RT305X_ESW_SOCPC_CRC_PADDING | ++ (RT305X_ESW_PORTS_CPU << RT305X_ESW_SOCPC_DISUN2CPU_S) | ++ (RT305X_ESW_PORTS_CPU << RT305X_ESW_SOCPC_DISMC2CPU_S) | ++ (RT305X_ESW_PORTS_CPU << RT305X_ESW_SOCPC_DISBC2CPU_S)), ++ RT305X_ESW_REG_SOCPC); ++ ++ if (esw->reg_initval_fpa2) ++ esw_w32(esw, esw->reg_initval_fpa2, RT305X_ESW_REG_FPA2); ++ else ++ esw_w32(esw, esw->pdata->reg_initval_fpa2, RT305X_ESW_REG_FPA2); ++ esw_w32(esw, 0x00000000, RT305X_ESW_REG_FPA); ++ ++ /* Force Link/Activity on ports */ ++ esw_w32(esw, 0x00000005, RT305X_ESW_REG_P0LED); ++ esw_w32(esw, 0x00000005, RT305X_ESW_REG_P1LED); ++ esw_w32(esw, 0x00000005, RT305X_ESW_REG_P2LED); ++ esw_w32(esw, 0x00000005, RT305X_ESW_REG_P3LED); ++ esw_w32(esw, 0x00000005, RT305X_ESW_REG_P4LED); ++ ++ /* Copy disabled port configuration from bootloader setup */ ++ port_disable = esw_get_port_disable(esw); ++ for (i = 0; i < 6; i++) ++ esw->ports[i].disable = (port_disable & (1 << i)) != 0; ++ ++ if (soc_is_rt3352()) { ++ /* reset EPHY */ ++ u32 val = rt_sysc_r32(SYSC_REG_RESET_CTRL); ++ rt_sysc_w32(val | RT5350_RESET_EPHY, SYSC_REG_RESET_CTRL); ++ rt_sysc_w32(val, SYSC_REG_RESET_CTRL); ++ ++ rt305x_mii_write(esw, 0, 31, 0x8000); ++ for (i = 0; i < 5; i++) { ++ if (esw->ports[i].disable) { ++ rt305x_mii_write(esw, i, MII_BMCR, BMCR_PDOWN); ++ } else { ++ rt305x_mii_write(esw, i, MII_BMCR, ++ BMCR_FULLDPLX | ++ BMCR_ANENABLE | ++ BMCR_SPEED100); ++ } ++ /* TX10 waveform coefficient LSB=0 disable PHY */ ++ rt305x_mii_write(esw, i, 26, 0x1601); ++ /* TX100/TX10 AD/DA current bias */ ++ rt305x_mii_write(esw, i, 29, 0x7016); ++ /* TX100 slew rate control */ ++ rt305x_mii_write(esw, i, 30, 0x0038); ++ } ++ ++ /* select global register */ ++ rt305x_mii_write(esw, 0, 31, 0x0); ++ /* enlarge agcsel threshold 3 and threshold 2 */ ++ rt305x_mii_write(esw, 0, 1, 0x4a40); ++ /* enlarge agcsel threshold 5 and threshold 4 */ ++ rt305x_mii_write(esw, 0, 2, 0x6254); ++ /* enlarge agcsel threshold */ ++ rt305x_mii_write(esw, 0, 3, 0xa17f); ++ rt305x_mii_write(esw, 0,12, 0x7eaa); ++ /* longer TP_IDL tail length */ ++ rt305x_mii_write(esw, 0, 14, 0x65); ++ /* increased squelch pulse count threshold. */ ++ rt305x_mii_write(esw, 0, 16, 0x0684); ++ /* set TX10 signal amplitude threshold to minimum */ ++ rt305x_mii_write(esw, 0, 17, 0x0fe0); ++ /* set squelch amplitude to higher threshold */ ++ rt305x_mii_write(esw, 0, 18, 0x40ba); ++ /* tune TP_IDL tail and head waveform, enable power down slew rate control */ ++ rt305x_mii_write(esw, 0, 22, 0x253f); ++ /* set PLL/Receive bias current are calibrated */ ++ rt305x_mii_write(esw, 0, 27, 0x2fda); ++ /* change PLL/Receive bias current to internal(RT3350) */ ++ rt305x_mii_write(esw, 0, 28, 0xc410); ++ /* change PLL bias current to internal(RT3052_MP3) */ ++ rt305x_mii_write(esw, 0, 29, 0x598b); ++ /* select local register */ ++ rt305x_mii_write(esw, 0, 31, 0x8000); ++ } else if (soc_is_rt5350()) { ++ /* reset EPHY */ ++ u32 val = rt_sysc_r32(SYSC_REG_RESET_CTRL); ++ rt_sysc_w32(val | RT5350_RESET_EPHY, SYSC_REG_RESET_CTRL); ++ rt_sysc_w32(val, SYSC_REG_RESET_CTRL); ++ ++ /* set the led polarity */ ++ esw_w32(esw, esw->reg_led_polarity & 0x1F, RT5350_EWS_REG_LED_POLARITY); ++ ++ /* local registers */ ++ rt305x_mii_write(esw, 0, 31, 0x8000); ++ for (i = 0; i < 5; i++) { ++ if (esw->ports[i].disable) { ++ rt305x_mii_write(esw, i, MII_BMCR, BMCR_PDOWN); ++ } else { ++ rt305x_mii_write(esw, i, MII_BMCR, ++ BMCR_FULLDPLX | ++ BMCR_ANENABLE | ++ BMCR_SPEED100); ++ } ++ /* TX10 waveform coefficient LSB=0 disable PHY */ ++ rt305x_mii_write(esw, i, 26, 0x1601); ++ /* TX100/TX10 AD/DA current bias */ ++ rt305x_mii_write(esw, i, 29, 0x7015); ++ /* TX100 slew rate control */ ++ rt305x_mii_write(esw, i, 30, 0x0038); ++ } ++ ++ /* global registers */ ++ rt305x_mii_write(esw, 0, 31, 0x0); ++ /* enlarge agcsel threshold 3 and threshold 2 */ ++ rt305x_mii_write(esw, 0, 1, 0x4a40); ++ /* enlarge agcsel threshold 5 and threshold 4 */ ++ rt305x_mii_write(esw, 0, 2, 0x6254); ++ /* enlarge agcsel threshold 6 */ ++ rt305x_mii_write(esw, 0, 3, 0xa17f); ++ rt305x_mii_write(esw, 0, 12, 0x7eaa); ++ /* longer TP_IDL tail length */ ++ rt305x_mii_write(esw, 0, 14, 0x65); ++ /* increased squelch pulse count threshold. */ ++ rt305x_mii_write(esw, 0, 16, 0x0684); ++ /* set TX10 signal amplitude threshold to minimum */ ++ rt305x_mii_write(esw, 0, 17, 0x0fe0); ++ /* set squelch amplitude to higher threshold */ ++ rt305x_mii_write(esw, 0, 18, 0x40ba); ++ /* tune TP_IDL tail and head waveform, enable power down slew rate control */ ++ rt305x_mii_write(esw, 0, 22, 0x253f); ++ /* set PLL/Receive bias current are calibrated */ ++ rt305x_mii_write(esw, 0, 27, 0x2fda); ++ /* change PLL/Receive bias current to internal(RT3350) */ ++ rt305x_mii_write(esw, 0, 28, 0xc410); ++ /* change PLL bias current to internal(RT3052_MP3) */ ++ rt305x_mii_write(esw, 0, 29, 0x598b); ++ /* select local register */ ++ rt305x_mii_write(esw, 0, 31, 0x8000); ++ } else { ++ rt305x_mii_write(esw, 0, 31, 0x8000); ++ for (i = 0; i < 5; i++) { ++ if (esw->ports[i].disable) { ++ rt305x_mii_write(esw, i, MII_BMCR, BMCR_PDOWN); ++ } else { ++ rt305x_mii_write(esw, i, MII_BMCR, ++ BMCR_FULLDPLX | ++ BMCR_ANENABLE | ++ BMCR_SPEED100); ++ } ++ /* TX10 waveform coefficient */ ++ rt305x_mii_write(esw, i, 26, 0x1601); ++ /* TX100/TX10 AD/DA current bias */ ++ rt305x_mii_write(esw, i, 29, 0x7058); ++ /* TX100 slew rate control */ ++ rt305x_mii_write(esw, i, 30, 0x0018); ++ } ++ ++ /* PHY IOT */ ++ /* select global register */ ++ rt305x_mii_write(esw, 0, 31, 0x0); ++ /* tune TP_IDL tail and head waveform */ ++ rt305x_mii_write(esw, 0, 22, 0x052f); ++ /* set TX10 signal amplitude threshold to minimum */ ++ rt305x_mii_write(esw, 0, 17, 0x0fe0); ++ /* set squelch amplitude to higher threshold */ ++ rt305x_mii_write(esw, 0, 18, 0x40ba); ++ /* longer TP_IDL tail length */ ++ rt305x_mii_write(esw, 0, 14, 0x65); ++ /* select local register */ ++ rt305x_mii_write(esw, 0, 31, 0x8000); ++ } ++ ++ if (esw->port_map) ++ port_map = esw->port_map; ++ else ++ port_map = RT305X_ESW_PMAP_LLLLLL; ++ ++ /* ++ * Unused HW feature, but still nice to be consistent here... ++ * This is also exported to userspace ('lan' attribute) so it's ++ * conveniently usable to decide which ports go into the wan vlan by ++ * default. ++ */ ++ esw_rmw(esw, RT305X_ESW_REG_SGC2, ++ RT305X_ESW_SGC2_LAN_PMAP_M << RT305X_ESW_SGC2_LAN_PMAP_S, ++ port_map << RT305X_ESW_SGC2_LAN_PMAP_S); ++ ++ /* make the switch leds blink */ ++ for (i = 0; i < RT305X_ESW_NUM_LEDS; i++) ++ esw->ports[i].led = 0x05; ++ ++ /* Apply the empty config. */ ++ esw_apply_config(&esw->swdev); ++ ++ /* Only unmask the port change interrupt */ ++ esw_w32(esw, ~RT305X_ESW_PORT_ST_CHG, RT305X_ESW_REG_IMR); ++} ++ ++static irqreturn_t esw_interrupt(int irq, void *_esw) ++{ ++ struct rt305x_esw *esw = (struct rt305x_esw *) _esw; ++ u32 status; ++ ++ status = esw_r32(esw, RT305X_ESW_REG_ISR); ++ if (status & RT305X_ESW_PORT_ST_CHG) { ++ u32 link = esw_r32(esw, RT305X_ESW_REG_POA); ++ link >>= RT305X_ESW_POA_LINK_SHIFT; ++ link &= RT305X_ESW_POA_LINK_MASK; ++ dev_info(esw->dev, "link changed 0x%02X\n", link); ++ } ++ esw_w32(esw, status, RT305X_ESW_REG_ISR); ++ ++ return IRQ_HANDLED; ++} ++ ++static int esw_apply_config(struct switch_dev *dev) ++{ ++ struct rt305x_esw *esw = container_of(dev, struct rt305x_esw, swdev); ++ int i; ++ u8 disable = 0; ++ u8 doubletag = 0; ++ u8 en_vlan = 0; ++ u8 untag = 0; ++ ++ for (i = 0; i < RT305X_ESW_NUM_VLANS; i++) { ++ u32 vid, vmsc; ++ if (esw->global_vlan_enable) { ++ vid = esw->vlans[i].vid; ++ vmsc = esw->vlans[i].ports; ++ } else { ++ vid = RT305X_ESW_VLAN_NONE; ++ vmsc = RT305X_ESW_PORTS_NONE; ++ } ++ esw_set_vlan_id(esw, i, vid); ++ esw_set_vmsc(esw, i, vmsc); ++ } ++ ++ for (i = 0; i < RT305X_ESW_NUM_PORTS; i++) { ++ u32 pvid; ++ disable |= esw->ports[i].disable << i; ++ if (esw->global_vlan_enable) { ++ doubletag |= esw->ports[i].doubletag << i; ++ en_vlan |= 1 << i; ++ untag |= esw->ports[i].untag << i; ++ pvid = esw->ports[i].pvid; ++ } else { ++ int x = esw->alt_vlan_disable ? 0 : 1; ++ doubletag |= x << i; ++ en_vlan |= x << i; ++ untag |= x << i; ++ pvid = 0; ++ } ++ esw_set_pvid(esw, i, pvid); ++ if (i < RT305X_ESW_NUM_LEDS) ++ esw_w32(esw, esw->ports[i].led, ++ RT305X_ESW_REG_P0LED + 4*i); ++ } ++ ++ esw_set_gsc(esw); ++ esw_set_port_disable(esw, disable); ++ esw_rmw(esw, RT305X_ESW_REG_SGC2, ++ (RT305X_ESW_SGC2_DOUBLE_TAG_M << ++ RT305X_ESW_SGC2_DOUBLE_TAG_S), ++ doubletag << RT305X_ESW_SGC2_DOUBLE_TAG_S); ++ esw_rmw(esw, RT305X_ESW_REG_PFC1, ++ RT305X_ESW_PFC1_EN_VLAN_M << RT305X_ESW_PFC1_EN_VLAN_S, ++ en_vlan << RT305X_ESW_PFC1_EN_VLAN_S); ++ esw_rmw(esw, RT305X_ESW_REG_POC2, ++ RT305X_ESW_POC2_UNTAG_EN_M << RT305X_ESW_POC2_UNTAG_EN_S, ++ untag << RT305X_ESW_POC2_UNTAG_EN_S); ++ ++ if (!esw->global_vlan_enable) { ++ /* ++ * Still need to put all ports into vlan 0 or they'll be ++ * isolated. ++ * NOTE: vlan 0 is special, no vlan tag is prepended ++ */ ++ esw_set_vlan_id(esw, 0, 0); ++ esw_set_vmsc(esw, 0, RT305X_ESW_PORTS_ALL); ++ } ++ ++ return 0; ++} ++ ++static int esw_reset_switch(struct switch_dev *dev) ++{ ++ struct rt305x_esw *esw = container_of(dev, struct rt305x_esw, swdev); ++ ++ esw->global_vlan_enable = 0; ++ memset(esw->ports, 0, sizeof(esw->ports)); ++ memset(esw->vlans, 0, sizeof(esw->vlans)); ++ esw_hw_init(esw); ++ ++ return 0; ++} ++ ++static int esw_get_vlan_enable(struct switch_dev *dev, ++ const struct switch_attr *attr, ++ struct switch_val *val) ++{ ++ struct rt305x_esw *esw = container_of(dev, struct rt305x_esw, swdev); ++ ++ val->value.i = esw->global_vlan_enable; ++ ++ return 0; ++} ++ ++static int esw_set_vlan_enable(struct switch_dev *dev, ++ const struct switch_attr *attr, ++ struct switch_val *val) ++{ ++ struct rt305x_esw *esw = container_of(dev, struct rt305x_esw, swdev); ++ ++ esw->global_vlan_enable = val->value.i != 0; ++ ++ return 0; ++} ++ ++static int esw_get_alt_vlan_disable(struct switch_dev *dev, ++ const struct switch_attr *attr, ++ struct switch_val *val) ++{ ++ struct rt305x_esw *esw = container_of(dev, struct rt305x_esw, swdev); ++ ++ val->value.i = esw->alt_vlan_disable; ++ ++ return 0; ++} ++ ++static int esw_set_alt_vlan_disable(struct switch_dev *dev, ++ const struct switch_attr *attr, ++ struct switch_val *val) ++{ ++ struct rt305x_esw *esw = container_of(dev, struct rt305x_esw, swdev); ++ ++ esw->alt_vlan_disable = val->value.i != 0; ++ ++ return 0; ++} ++ ++static int ++rt305x_esw_set_bc_status(struct switch_dev *dev, ++ const struct switch_attr *attr, ++ struct switch_val *val) ++{ ++ struct rt305x_esw *esw = container_of(dev, struct rt305x_esw, swdev); ++ ++ esw->bc_storm_protect = val->value.i & RT305X_ESW_GSC_BC_STROM_MASK; ++ ++ return 0; ++} ++ ++static int ++rt305x_esw_get_bc_status(struct switch_dev *dev, ++ const struct switch_attr *attr, ++ struct switch_val *val) ++{ ++ struct rt305x_esw *esw = container_of(dev, struct rt305x_esw, swdev); ++ ++ val->value.i = esw->bc_storm_protect; ++ ++ return 0; ++} ++ ++static int ++rt305x_esw_set_led_freq(struct switch_dev *dev, ++ const struct switch_attr *attr, ++ struct switch_val *val) ++{ ++ struct rt305x_esw *esw = container_of(dev, struct rt305x_esw, swdev); ++ ++ esw->led_frequency = val->value.i & RT305X_ESW_GSC_LED_FREQ_MASK; ++ ++ return 0; ++} ++ ++static int ++rt305x_esw_get_led_freq(struct switch_dev *dev, ++ const struct switch_attr *attr, ++ struct switch_val *val) ++{ ++ struct rt305x_esw *esw = container_of(dev, struct rt305x_esw, swdev); ++ ++ val->value.i = esw->led_frequency; ++ ++ return 0; ++} ++ ++static int esw_get_port_link(struct switch_dev *dev, ++ int port, ++ struct switch_port_link *link) ++{ ++ struct rt305x_esw *esw = container_of(dev, struct rt305x_esw, swdev); ++ u32 speed, poa; ++ ++ if (port < 0 || port >= RT305X_ESW_NUM_PORTS) ++ return -EINVAL; ++ ++ poa = esw_r32(esw, RT305X_ESW_REG_POA) >> port; ++ ++ link->link = (poa >> RT305X_ESW_LINK_S) & 1; ++ link->duplex = (poa >> RT305X_ESW_DUPLEX_S) & 1; ++ if (port < RT305X_ESW_NUM_LEDS) { ++ speed = (poa >> RT305X_ESW_SPD_S) & 1; ++ } else { ++ if (port == RT305X_ESW_NUM_PORTS - 1) ++ poa >>= 1; ++ speed = (poa >> RT305X_ESW_SPD_S) & 3; ++ } ++ switch (speed) { ++ case 0: ++ link->speed = SWITCH_PORT_SPEED_10; ++ break; ++ case 1: ++ link->speed = SWITCH_PORT_SPEED_100; ++ break; ++ case 2: ++ case 3: /* forced gige speed can be 2 or 3 */ ++ link->speed = SWITCH_PORT_SPEED_1000; ++ break; ++ default: ++ link->speed = SWITCH_PORT_SPEED_UNKNOWN; ++ break; ++ } ++ ++ return 0; ++} ++ ++static int esw_get_port_bool(struct switch_dev *dev, ++ const struct switch_attr *attr, ++ struct switch_val *val) ++{ ++ struct rt305x_esw *esw = container_of(dev, struct rt305x_esw, swdev); ++ int idx = val->port_vlan; ++ u32 x, reg, shift; ++ ++ if (idx < 0 || idx >= RT305X_ESW_NUM_PORTS) ++ return -EINVAL; ++ ++ switch (attr->id) { ++ case RT305X_ESW_ATTR_PORT_DISABLE: ++ reg = RT305X_ESW_REG_POC0; ++ shift = RT305X_ESW_POC0_DIS_PORT_S; ++ break; ++ case RT305X_ESW_ATTR_PORT_DOUBLETAG: ++ reg = RT305X_ESW_REG_SGC2; ++ shift = RT305X_ESW_SGC2_DOUBLE_TAG_S; ++ break; ++ case RT305X_ESW_ATTR_PORT_UNTAG: ++ reg = RT305X_ESW_REG_POC2; ++ shift = RT305X_ESW_POC2_UNTAG_EN_S; ++ break; ++ case RT305X_ESW_ATTR_PORT_LAN: ++ reg = RT305X_ESW_REG_SGC2; ++ shift = RT305X_ESW_SGC2_LAN_PMAP_S; ++ if (idx >= RT305X_ESW_NUM_LANWAN) ++ return -EINVAL; ++ break; ++ default: ++ return -EINVAL; ++ } ++ ++ x = esw_r32(esw, reg); ++ val->value.i = (x >> (idx + shift)) & 1; ++ ++ return 0; ++} ++ ++static int esw_set_port_bool(struct switch_dev *dev, ++ const struct switch_attr *attr, ++ struct switch_val *val) ++{ ++ struct rt305x_esw *esw = container_of(dev, struct rt305x_esw, swdev); ++ int idx = val->port_vlan; ++ ++ if (idx < 0 || idx >= RT305X_ESW_NUM_PORTS || ++ val->value.i < 0 || val->value.i > 1) ++ return -EINVAL; ++ ++ switch (attr->id) { ++ case RT305X_ESW_ATTR_PORT_DISABLE: ++ esw->ports[idx].disable = val->value.i; ++ break; ++ case RT305X_ESW_ATTR_PORT_DOUBLETAG: ++ esw->ports[idx].doubletag = val->value.i; ++ break; ++ case RT305X_ESW_ATTR_PORT_UNTAG: ++ esw->ports[idx].untag = val->value.i; ++ break; ++ default: ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++static int esw_get_port_recv_badgood(struct switch_dev *dev, ++ const struct switch_attr *attr, ++ struct switch_val *val) ++{ ++ struct rt305x_esw *esw = container_of(dev, struct rt305x_esw, swdev); ++ int idx = val->port_vlan; ++ int shift = attr->id == RT305X_ESW_ATTR_PORT_RECV_GOOD ? 0 : 16; ++ u32 reg; ++ ++ if (idx < 0 || idx >= RT305X_ESW_NUM_LANWAN) ++ return -EINVAL; ++ reg = esw_r32(esw, RT305X_ESW_REG_PXPC(idx)); ++ val->value.i = (reg >> shift) & 0xffff; ++ ++ return 0; ++} ++ ++static int ++esw_get_port_tr_badgood(struct switch_dev *dev, ++ const struct switch_attr *attr, ++ struct switch_val *val) ++{ ++ struct rt305x_esw *esw = container_of(dev, struct rt305x_esw, swdev); ++ ++ int idx = val->port_vlan; ++ int shift = attr->id == RT5350_ESW_ATTR_PORT_TR_GOOD ? 0 : 16; ++ u32 reg; ++ ++ if (!soc_is_rt5350()) ++ return -EINVAL; ++ ++ if (idx < 0 || idx >= RT305X_ESW_NUM_LANWAN) ++ return -EINVAL; ++ ++ reg = esw_r32(esw, RT5350_ESW_REG_PXTPC(idx)); ++ val->value.i = (reg >> shift) & 0xffff; ++ ++ return 0; ++} ++ ++static int esw_get_port_led(struct switch_dev *dev, ++ const struct switch_attr *attr, ++ struct switch_val *val) ++{ ++ struct rt305x_esw *esw = container_of(dev, struct rt305x_esw, swdev); ++ int idx = val->port_vlan; ++ ++ if (idx < 0 || idx >= RT305X_ESW_NUM_PORTS || ++ idx >= RT305X_ESW_NUM_LEDS) ++ return -EINVAL; ++ ++ val->value.i = esw_r32(esw, RT305X_ESW_REG_P0LED + 4*idx); ++ ++ return 0; ++} ++ ++static int esw_set_port_led(struct switch_dev *dev, ++ const struct switch_attr *attr, ++ struct switch_val *val) ++{ ++ struct rt305x_esw *esw = container_of(dev, struct rt305x_esw, swdev); ++ int idx = val->port_vlan; ++ ++ if (idx < 0 || idx >= RT305X_ESW_NUM_LEDS) ++ return -EINVAL; ++ ++ esw->ports[idx].led = val->value.i; ++ ++ return 0; ++} ++ ++static int esw_get_port_pvid(struct switch_dev *dev, int port, int *val) ++{ ++ struct rt305x_esw *esw = container_of(dev, struct rt305x_esw, swdev); ++ ++ if (port >= RT305X_ESW_NUM_PORTS) ++ return -EINVAL; ++ ++ *val = esw_get_pvid(esw, port); ++ ++ return 0; ++} ++ ++static int esw_set_port_pvid(struct switch_dev *dev, int port, int val) ++{ ++ struct rt305x_esw *esw = container_of(dev, struct rt305x_esw, swdev); ++ ++ if (port >= RT305X_ESW_NUM_PORTS) ++ return -EINVAL; ++ ++ esw->ports[port].pvid = val; ++ ++ return 0; ++} ++ ++static int esw_get_vlan_ports(struct switch_dev *dev, struct switch_val *val) ++{ ++ struct rt305x_esw *esw = container_of(dev, struct rt305x_esw, swdev); ++ u32 vmsc, poc2; ++ int vlan_idx = -1; ++ int i; ++ ++ val->len = 0; ++ ++ if (val->port_vlan < 0 || val->port_vlan >= RT305X_ESW_NUM_VIDS) ++ return -EINVAL; ++ ++ /* valid vlan? */ ++ for (i = 0; i < RT305X_ESW_NUM_VLANS; i++) { ++ if (esw_get_vlan_id(esw, i) == val->port_vlan && ++ esw_get_vmsc(esw, i) != RT305X_ESW_PORTS_NONE) { ++ vlan_idx = i; ++ break; ++ } ++ } ++ ++ if (vlan_idx == -1) ++ return -EINVAL; ++ ++ vmsc = esw_get_vmsc(esw, vlan_idx); ++ poc2 = esw_r32(esw, RT305X_ESW_REG_POC2); ++ ++ for (i = 0; i < RT305X_ESW_NUM_PORTS; i++) { ++ struct switch_port *p; ++ int port_mask = 1 << i; ++ ++ if (!(vmsc & port_mask)) ++ continue; ++ ++ p = &val->value.ports[val->len++]; ++ p->id = i; ++ if (poc2 & (port_mask << RT305X_ESW_POC2_UNTAG_EN_S)) ++ p->flags = 0; ++ else ++ p->flags = 1 << SWITCH_PORT_FLAG_TAGGED; ++ } ++ ++ return 0; ++} ++ ++static int esw_set_vlan_ports(struct switch_dev *dev, struct switch_val *val) ++{ ++ struct rt305x_esw *esw = container_of(dev, struct rt305x_esw, swdev); ++ int ports; ++ int vlan_idx = -1; ++ int i; ++ ++ if (val->port_vlan < 0 || val->port_vlan >= RT305X_ESW_NUM_VIDS || ++ val->len > RT305X_ESW_NUM_PORTS) ++ return -EINVAL; ++ ++ /* one of the already defined vlans? */ ++ for (i = 0; i < RT305X_ESW_NUM_VLANS; i++) { ++ if (esw->vlans[i].vid == val->port_vlan && ++ esw->vlans[i].ports != RT305X_ESW_PORTS_NONE) { ++ vlan_idx = i; ++ break; ++ } ++ } ++ ++ /* select a free slot */ ++ for (i = 0; vlan_idx == -1 && i < RT305X_ESW_NUM_VLANS; i++) { ++ if (esw->vlans[i].ports == RT305X_ESW_PORTS_NONE) ++ vlan_idx = i; ++ } ++ ++ /* bail if all slots are in use */ ++ if (vlan_idx == -1) ++ return -EINVAL; ++ ++ ports = RT305X_ESW_PORTS_NONE; ++ for (i = 0; i < val->len; i++) { ++ struct switch_port *p = &val->value.ports[i]; ++ int port_mask = 1 << p->id; ++ bool untagged = !(p->flags & (1 << SWITCH_PORT_FLAG_TAGGED)); ++ ++ if (p->id >= RT305X_ESW_NUM_PORTS) ++ return -EINVAL; ++ ++ ports |= port_mask; ++ esw->ports[p->id].untag = untagged; ++ } ++ esw->vlans[vlan_idx].ports = ports; ++ if (ports == RT305X_ESW_PORTS_NONE) ++ esw->vlans[vlan_idx].vid = RT305X_ESW_VLAN_NONE; ++ else ++ esw->vlans[vlan_idx].vid = val->port_vlan; ++ ++ return 0; ++} ++ ++static const struct switch_attr esw_global[] = { ++ { ++ .type = SWITCH_TYPE_INT, ++ .name = "enable_vlan", ++ .description = "VLAN mode (1:enabled)", ++ .max = 1, ++ .id = RT305X_ESW_ATTR_ENABLE_VLAN, ++ .get = esw_get_vlan_enable, ++ .set = esw_set_vlan_enable, ++ }, ++ { ++ .type = SWITCH_TYPE_INT, ++ .name = "alternate_vlan_disable", ++ .description = "Use en_vlan instead of doubletag to disable" ++ " VLAN mode", ++ .max = 1, ++ .id = RT305X_ESW_ATTR_ALT_VLAN_DISABLE, ++ .get = esw_get_alt_vlan_disable, ++ .set = esw_set_alt_vlan_disable, ++ }, ++ { ++ .type = SWITCH_TYPE_INT, ++ .name = "bc_storm_protect", ++ .description = "Global broadcast storm protection (0:Disable, 1:64 blocks, 2:96 blocks, 3:128 blocks)", ++ .max = 3, ++ .id = RT305X_ESW_ATTR_BC_STATUS, ++ .get = rt305x_esw_get_bc_status, ++ .set = rt305x_esw_set_bc_status, ++ }, ++ { ++ .type = SWITCH_TYPE_INT, ++ .name = "led_frequency", ++ .description = "LED Flash frequency (0:30mS, 1:60mS, 2:240mS, 3:480mS)", ++ .max = 3, ++ .id = RT305X_ESW_ATTR_LED_FREQ, ++ .get = rt305x_esw_get_led_freq, ++ .set = rt305x_esw_set_led_freq, ++ } ++}; ++ ++static const struct switch_attr esw_port[] = { ++ { ++ .type = SWITCH_TYPE_INT, ++ .name = "disable", ++ .description = "Port state (1:disabled)", ++ .max = 1, ++ .id = RT305X_ESW_ATTR_PORT_DISABLE, ++ .get = esw_get_port_bool, ++ .set = esw_set_port_bool, ++ }, ++ { ++ .type = SWITCH_TYPE_INT, ++ .name = "doubletag", ++ .description = "Double tagging for incoming vlan packets " ++ "(1:enabled)", ++ .max = 1, ++ .id = RT305X_ESW_ATTR_PORT_DOUBLETAG, ++ .get = esw_get_port_bool, ++ .set = esw_set_port_bool, ++ }, ++ { ++ .type = SWITCH_TYPE_INT, ++ .name = "untag", ++ .description = "Untag (1:strip outgoing vlan tag)", ++ .max = 1, ++ .id = RT305X_ESW_ATTR_PORT_UNTAG, ++ .get = esw_get_port_bool, ++ .set = esw_set_port_bool, ++ }, ++ { ++ .type = SWITCH_TYPE_INT, ++ .name = "led", ++ .description = "LED mode (0:link, 1:100m, 2:duplex, 3:activity," ++ " 4:collision, 5:linkact, 6:duplcoll, 7:10mact," ++ " 8:100mact, 10:blink, 11:off, 12:on)", ++ .max = 15, ++ .id = RT305X_ESW_ATTR_PORT_LED, ++ .get = esw_get_port_led, ++ .set = esw_set_port_led, ++ }, ++ { ++ .type = SWITCH_TYPE_INT, ++ .name = "lan", ++ .description = "HW port group (0:wan, 1:lan)", ++ .max = 1, ++ .id = RT305X_ESW_ATTR_PORT_LAN, ++ .get = esw_get_port_bool, ++ }, ++ { ++ .type = SWITCH_TYPE_INT, ++ .name = "recv_bad", ++ .description = "Receive bad packet counter", ++ .id = RT305X_ESW_ATTR_PORT_RECV_BAD, ++ .get = esw_get_port_recv_badgood, ++ }, ++ { ++ .type = SWITCH_TYPE_INT, ++ .name = "recv_good", ++ .description = "Receive good packet counter", ++ .id = RT305X_ESW_ATTR_PORT_RECV_GOOD, ++ .get = esw_get_port_recv_badgood, ++ }, ++ { ++ .type = SWITCH_TYPE_INT, ++ .name = "tr_bad", ++ ++ .description = "Transmit bad packet counter. rt5350 only", ++ .id = RT5350_ESW_ATTR_PORT_TR_BAD, ++ .get = esw_get_port_tr_badgood, ++ }, ++ { ++ .type = SWITCH_TYPE_INT, ++ .name = "tr_good", ++ ++ .description = "Transmit good packet counter. rt5350 only", ++ .id = RT5350_ESW_ATTR_PORT_TR_GOOD, ++ .get = esw_get_port_tr_badgood, ++ }, ++}; ++ ++static const struct switch_attr esw_vlan[] = { ++}; ++ ++static const struct switch_dev_ops esw_ops = { ++ .attr_global = { ++ .attr = esw_global, ++ .n_attr = ARRAY_SIZE(esw_global), ++ }, ++ .attr_port = { ++ .attr = esw_port, ++ .n_attr = ARRAY_SIZE(esw_port), ++ }, ++ .attr_vlan = { ++ .attr = esw_vlan, ++ .n_attr = ARRAY_SIZE(esw_vlan), ++ }, ++ .get_vlan_ports = esw_get_vlan_ports, ++ .set_vlan_ports = esw_set_vlan_ports, ++ .get_port_pvid = esw_get_port_pvid, ++ .set_port_pvid = esw_set_port_pvid, ++ .get_port_link = esw_get_port_link, ++ .apply_config = esw_apply_config, ++ .reset_switch = esw_reset_switch, ++}; ++ ++static struct rt305x_esw_platform_data rt3050_esw_data = { ++ /* All ports are LAN ports. */ ++ .vlan_config = RT305X_ESW_VLAN_CONFIG_NONE, ++ .reg_initval_fct2 = 0x00d6500c, ++ /* ++ * ext phy base addr 31, enable port 5 polling, rx/tx clock skew 1, ++ * turbo mii off, rgmi 3.3v off ++ * port5: disabled ++ * port6: enabled, gige, full-duplex, rx/tx-flow-control ++ */ ++ .reg_initval_fpa2 = 0x3f502b28, ++}; ++ ++static const struct of_device_id ralink_esw_match[] = { ++ { .compatible = "ralink,rt3050-esw", .data = &rt3050_esw_data }, ++ {}, ++}; ++MODULE_DEVICE_TABLE(of, ralink_esw_match); ++ ++static int esw_probe(struct platform_device *pdev) ++{ ++ struct device_node *np = pdev->dev.of_node; ++ const struct rt305x_esw_platform_data *pdata; ++ const __be32 *port_map, *reg_init; ++ struct rt305x_esw *esw; ++ struct switch_dev *swdev; ++ struct resource *res, *irq; ++ int err; ++ ++ pdata = pdev->dev.platform_data; ++ if (!pdata) { ++ const struct of_device_id *match; ++ match = of_match_device(ralink_esw_match, &pdev->dev); ++ if (match) ++ pdata = (struct rt305x_esw_platform_data *) match->data; ++ } ++ if (!pdata) ++ return -EINVAL; ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ if (!res) { ++ dev_err(&pdev->dev, "no memory resource found\n"); ++ return -ENOMEM; ++ } ++ ++ irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); ++ if (!irq) { ++ dev_err(&pdev->dev, "no irq resource found\n"); ++ return -ENOMEM; ++ } ++ ++ esw = kzalloc(sizeof(struct rt305x_esw), GFP_KERNEL); ++ if (!esw) { ++ dev_err(&pdev->dev, "no memory for private data\n"); ++ return -ENOMEM; ++ } ++ ++ esw->dev = &pdev->dev; ++ esw->irq = irq->start; ++ esw->base = ioremap(res->start, resource_size(res)); ++ if (!esw->base) { ++ dev_err(&pdev->dev, "ioremap failed\n"); ++ err = -ENOMEM; ++ goto free_esw; ++ } ++ ++ port_map = of_get_property(np, "ralink,portmap", NULL); ++ if (port_map) ++ esw->port_map = be32_to_cpu(*port_map); ++ ++ reg_init = of_get_property(np, "ralink,fct2", NULL); ++ if (reg_init) ++ esw->reg_initval_fct2 = be32_to_cpu(*reg_init); ++ ++ reg_init = of_get_property(np, "ralink,fpa2", NULL); ++ if (reg_init) ++ esw->reg_initval_fpa2 = be32_to_cpu(*reg_init); ++ ++ reg_init = of_get_property(np, "ralink,led_polarity", NULL); ++ if (reg_init) ++ esw->reg_led_polarity = be32_to_cpu(*reg_init); ++ ++ swdev = &esw->swdev; ++ swdev->of_node = pdev->dev.of_node; ++ swdev->name = "rt305x-esw"; ++ swdev->alias = "rt305x"; ++ swdev->cpu_port = RT305X_ESW_PORT6; ++ swdev->ports = RT305X_ESW_NUM_PORTS; ++ swdev->vlans = RT305X_ESW_NUM_VIDS; ++ swdev->ops = &esw_ops; ++ ++ err = register_switch(swdev, NULL); ++ if (err < 0) { ++ dev_err(&pdev->dev, "register_switch failed\n"); ++ goto unmap_base; ++ } ++ ++ platform_set_drvdata(pdev, esw); ++ ++ esw->pdata = pdata; ++ spin_lock_init(&esw->reg_rw_lock); ++ ++ esw_hw_init(esw); ++ ++ esw_w32(esw, RT305X_ESW_PORT_ST_CHG, RT305X_ESW_REG_ISR); ++ esw_w32(esw, ~RT305X_ESW_PORT_ST_CHG, RT305X_ESW_REG_IMR); ++ request_irq(esw->irq, esw_interrupt, 0, "esw", esw); ++ ++ return 0; ++ ++unmap_base: ++ iounmap(esw->base); ++free_esw: ++ kfree(esw); ++ return err; ++} ++ ++static int esw_remove(struct platform_device *pdev) ++{ ++ struct rt305x_esw *esw; ++ ++ esw = platform_get_drvdata(pdev); ++ if (esw) { ++ unregister_switch(&esw->swdev); ++ platform_set_drvdata(pdev, NULL); ++ iounmap(esw->base); ++ kfree(esw); ++ } ++ ++ return 0; ++} ++ ++static struct platform_driver esw_driver = { ++ .probe = esw_probe, ++ .remove = esw_remove, ++ .driver = { ++ .name = "rt305x-esw", ++ .owner = THIS_MODULE, ++ .of_match_table = ralink_esw_match, ++ }, ++}; ++ ++int __init rtesw_init(void) ++{ ++ return platform_driver_register(&esw_driver); ++} ++ ++void rtesw_exit(void) ++{ ++ platform_driver_unregister(&esw_driver); ++} +--- /dev/null ++++ b/drivers/net/ethernet/ralink/esw_rt3052.h +@@ -0,0 +1,32 @@ ++/* ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; version 2 of the License ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. ++ * ++ * Copyright (C) 2009-2013 John Crispin ++ */ ++ ++#ifndef _RALINK_ESW_RT3052_H__ ++#define _RALINK_ESW_RT3052_H__ ++ ++#ifdef CONFIG_NET_RALINK_ESW_RT3052 ++ ++int __init rtesw_init(void); ++void rtesw_exit(void); ++ ++#else ++ ++static inline int __init rtesw_init(void) { return 0; } ++static inline void rtesw_exit(void) { } ++ ++#endif ++#endif +--- /dev/null ++++ b/drivers/net/ethernet/ralink/gsw_mt7620a.c +@@ -0,0 +1,566 @@ ++/* ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; version 2 of the License ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. ++ * ++ * Copyright (C) 2009-2013 John Crispin ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++ ++#include "ralink_soc_eth.h" ++ ++#include ++#include ++#include ++ ++#include ++#include ++ ++#include "ralink_soc_eth.h" ++#include "gsw_mt7620a.h" ++#include "mt7530.h" ++#include "mdio.h" ++ ++#define GSW_REG_PHY_TIMEOUT (5 * HZ) ++ ++#define MT7620A_GSW_REG_PIAC 0x7004 ++ ++#define GSW_NUM_VLANS 16 ++#define GSW_NUM_VIDS 4096 ++#define GSW_NUM_PORTS 7 ++#define GSW_PORT6 6 ++ ++#define GSW_MDIO_ACCESS BIT(31) ++#define GSW_MDIO_READ BIT(19) ++#define GSW_MDIO_WRITE BIT(18) ++#define GSW_MDIO_START BIT(16) ++#define GSW_MDIO_ADDR_SHIFT 20 ++#define GSW_MDIO_REG_SHIFT 25 ++ ++#define GSW_REG_PORT_PMCR(x) (0x3000 + (x * 0x100)) ++#define GSW_REG_PORT_STATUS(x) (0x3008 + (x * 0x100)) ++#define GSW_REG_SMACCR0 0x3fE4 ++#define GSW_REG_SMACCR1 0x3fE8 ++#define GSW_REG_CKGCR 0x3ff0 ++ ++#define GSW_REG_IMR 0x7008 ++#define GSW_REG_ISR 0x700c ++ ++#define SYSC_REG_CFG1 0x14 ++ ++#define PORT_IRQ_ST_CHG 0x7f ++ ++#define SYSCFG1 0x14 ++ ++#define ESW_PHY_POLLING 0x7000 ++ ++#define PMCR_IPG BIT(18) ++#define PMCR_MAC_MODE BIT(16) ++#define PMCR_FORCE BIT(15) ++#define PMCR_TX_EN BIT(14) ++#define PMCR_RX_EN BIT(13) ++#define PMCR_BACKOFF BIT(9) ++#define PMCR_BACKPRES BIT(8) ++#define PMCR_RX_FC BIT(5) ++#define PMCR_TX_FC BIT(4) ++#define PMCR_SPEED(_x) (_x << 2) ++#define PMCR_DUPLEX BIT(1) ++#define PMCR_LINK BIT(0) ++ ++#define PHY_AN_EN BIT(31) ++#define PHY_PRE_EN BIT(30) ++#define PMY_MDC_CONF(_x) ((_x & 0x3f) << 24) ++ ++enum { ++ /* Global attributes. */ ++ GSW_ATTR_ENABLE_VLAN, ++ /* Port attributes. */ ++ GSW_ATTR_PORT_UNTAG, ++}; ++ ++enum { ++ PORT4_EPHY = 0, ++ PORT4_EXT, ++}; ++ ++struct mt7620_gsw { ++ struct device *dev; ++ void __iomem *base; ++ int irq; ++ int port4; ++ long unsigned int autopoll; ++}; ++ ++static inline void gsw_w32(struct mt7620_gsw *gsw, u32 val, unsigned reg) ++{ ++ iowrite32(val, gsw->base + reg); ++} ++ ++static inline u32 gsw_r32(struct mt7620_gsw *gsw, unsigned reg) ++{ ++ return ioread32(gsw->base + reg); ++} ++ ++static int mt7620_mii_busy_wait(struct mt7620_gsw *gsw) ++{ ++ unsigned long t_start = jiffies; ++ ++ while (1) { ++ if (!(gsw_r32(gsw, MT7620A_GSW_REG_PIAC) & GSW_MDIO_ACCESS)) ++ return 0; ++ if (time_after(jiffies, t_start + GSW_REG_PHY_TIMEOUT)) { ++ break; ++ } ++ } ++ ++ printk(KERN_ERR "mdio: MDIO timeout\n"); ++ return -1; ++} ++ ++static u32 _mt7620_mii_write(struct mt7620_gsw *gsw, u32 phy_addr, u32 phy_register, ++ u32 write_data) ++{ ++ if (mt7620_mii_busy_wait(gsw)) ++ return -1; ++ ++ write_data &= 0xffff; ++ ++ gsw_w32(gsw, GSW_MDIO_ACCESS | GSW_MDIO_START | GSW_MDIO_WRITE | ++ (phy_register << GSW_MDIO_REG_SHIFT) | ++ (phy_addr << GSW_MDIO_ADDR_SHIFT) | write_data, ++ MT7620A_GSW_REG_PIAC); ++ ++ if (mt7620_mii_busy_wait(gsw)) ++ return -1; ++ ++ return 0; ++} ++ ++static u32 _mt7620_mii_read(struct mt7620_gsw *gsw, int phy_addr, int phy_reg) ++{ ++ u32 d; ++ ++ if (mt7620_mii_busy_wait(gsw)) ++ return 0xffff; ++ ++ gsw_w32(gsw, GSW_MDIO_ACCESS | GSW_MDIO_START | GSW_MDIO_READ | ++ (phy_reg << GSW_MDIO_REG_SHIFT) | ++ (phy_addr << GSW_MDIO_ADDR_SHIFT), ++ MT7620A_GSW_REG_PIAC); ++ ++ if (mt7620_mii_busy_wait(gsw)) ++ return 0xffff; ++ ++ d = gsw_r32(gsw, MT7620A_GSW_REG_PIAC) & 0xffff; ++ ++ return d; ++} ++ ++int mt7620_mdio_write(struct mii_bus *bus, int phy_addr, int phy_reg, u16 val) ++{ ++ struct fe_priv *priv = bus->priv; ++ struct mt7620_gsw *gsw = (struct mt7620_gsw *) priv->soc->swpriv; ++ ++ return _mt7620_mii_write(gsw, phy_addr, phy_reg, val); ++} ++ ++int mt7620_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg) ++{ ++ struct fe_priv *priv = bus->priv; ++ struct mt7620_gsw *gsw = (struct mt7620_gsw *) priv->soc->swpriv; ++ ++ return _mt7620_mii_read(gsw, phy_addr, phy_reg); ++} ++ ++static unsigned char *fe_speed_str(int speed) ++{ ++ switch (speed) { ++ case 2: ++ case SPEED_1000: ++ return "1000"; ++ case 1: ++ case SPEED_100: ++ return "100"; ++ case 0: ++ case SPEED_10: ++ return "10"; ++ } ++ ++ return "? "; ++} ++ ++int mt7620a_has_carrier(struct fe_priv *priv) ++{ ++ struct mt7620_gsw *gsw = (struct mt7620_gsw *) priv->soc->swpriv; ++ int i; ++ ++ for (i = 0; i < GSW_PORT6; i++) ++ if (gsw_r32(gsw, GSW_REG_PORT_STATUS(i)) & 0x1) ++ return 1; ++ return 0; ++} ++ ++static void mt7620a_handle_carrier(struct fe_priv *priv) ++{ ++ if (!priv->phy) ++ return; ++ ++ if (mt7620a_has_carrier(priv)) ++ netif_carrier_on(priv->netdev); ++ else ++ netif_carrier_off(priv->netdev); ++} ++ ++void mt7620_mdio_link_adjust(struct fe_priv *priv, int port) ++{ ++ if (priv->link[port]) ++ netdev_info(priv->netdev, "port %d link up (%sMbps/%s duplex)\n", ++ port, fe_speed_str(priv->phy->speed[port]), ++ (DUPLEX_FULL == priv->phy->duplex[port]) ? "Full" : "Half"); ++ else ++ netdev_info(priv->netdev, "port %d link down\n", port); ++ mt7620a_handle_carrier(priv); ++} ++ ++static irqreturn_t gsw_interrupt(int irq, void *_priv) ++{ ++ struct fe_priv *priv = (struct fe_priv *) _priv; ++ struct mt7620_gsw *gsw = (struct mt7620_gsw *) priv->soc->swpriv; ++ u32 status; ++ int i, max = (gsw->port4 == PORT4_EPHY) ? (4) : (3); ++ ++ status = gsw_r32(gsw, GSW_REG_ISR); ++ if (status & PORT_IRQ_ST_CHG) ++ for (i = 0; i <= max; i++) { ++ u32 status = gsw_r32(gsw, GSW_REG_PORT_STATUS(i)); ++ int link = status & 0x1; ++ ++ if (link != priv->link[i]) { ++ if (link) ++ netdev_info(priv->netdev, "port %d link up (%sMbps/%s duplex)\n", ++ i, fe_speed_str((status >> 2) & 3), ++ (status & 0x2) ? "Full" : "Half"); ++ else ++ netdev_info(priv->netdev, "port %d link down\n", i); ++ } ++ ++ priv->link[i] = link; ++ } ++ mt7620a_handle_carrier(priv); ++ ++ gsw_w32(gsw, status, GSW_REG_ISR); ++ ++ return IRQ_HANDLED; ++} ++ ++static int mt7620_is_bga(void) ++{ ++ u32 bga = rt_sysc_r32(0x0c); ++ ++ return (bga >> 16) & 1; ++} ++ ++static void gsw_auto_poll(struct mt7620_gsw *gsw) ++{ ++ int phy; ++ int lsb = -1, msb = 0; ++ ++ for_each_set_bit(phy, &gsw->autopoll, 32) { ++ if (lsb < 0) ++ lsb = phy; ++ msb = phy; ++ } ++ ++ gsw_w32(gsw, PHY_AN_EN | PHY_PRE_EN | PMY_MDC_CONF(5) | (msb << 8) | lsb, ESW_PHY_POLLING); ++} ++ ++void mt7620_port_init(struct fe_priv *priv, struct device_node *np) ++{ ++ struct mt7620_gsw *gsw = (struct mt7620_gsw *) priv->soc->swpriv; ++ const __be32 *_id = of_get_property(np, "reg", NULL); ++ int phy_mode, size, id; ++ int shift = 12; ++ u32 val, mask = 0; ++ int min = (gsw->port4 == PORT4_EPHY) ? (5) : (4); ++ ++ if (!_id || (be32_to_cpu(*_id) < min) || (be32_to_cpu(*_id) > 5)) { ++ if (_id) ++ pr_err("%s: invalid port id %d\n", np->name, be32_to_cpu(*_id)); ++ else ++ pr_err("%s: invalid port id\n", np->name); ++ return; ++ } ++ ++ id = be32_to_cpu(*_id); ++ ++ if (id == 4) ++ shift = 14; ++ ++ priv->phy->phy_fixed[id] = of_get_property(np, "ralink,fixed-link", &size); ++ if (priv->phy->phy_fixed[id] && (size != (4 * sizeof(*priv->phy->phy_fixed[id])))) { ++ pr_err("%s: invalid fixed link property\n", np->name); ++ priv->phy->phy_fixed[id] = NULL; ++ return; ++ } ++ ++ phy_mode = of_get_phy_mode(np); ++ switch (phy_mode) { ++ case PHY_INTERFACE_MODE_RGMII: ++ mask = 0; ++ break; ++ case PHY_INTERFACE_MODE_MII: ++ mask = 1; ++ break; ++ case PHY_INTERFACE_MODE_RMII: ++ mask = 2; ++ break; ++ default: ++ dev_err(priv->device, "port %d - invalid phy mode\n", id); ++ return; ++ } ++ ++ priv->phy->phy_node[id] = of_parse_phandle(np, "phy-handle", 0); ++ if (!priv->phy->phy_node[id] && !priv->phy->phy_fixed[id]) ++ return; ++ ++ val = rt_sysc_r32(SYSCFG1); ++ val &= ~(3 << shift); ++ val |= mask << shift; ++ rt_sysc_w32(val, SYSCFG1); ++ ++ if (priv->phy->phy_fixed[id]) { ++ const __be32 *link = priv->phy->phy_fixed[id]; ++ int tx_fc, rx_fc; ++ u32 val = 0; ++ ++ priv->phy->speed[id] = be32_to_cpup(link++); ++ tx_fc = be32_to_cpup(link++); ++ rx_fc = be32_to_cpup(link++); ++ priv->phy->duplex[id] = be32_to_cpup(link++); ++ priv->link[id] = 1; ++ ++ switch (priv->phy->speed[id]) { ++ case SPEED_10: ++ val = 0; ++ break; ++ case SPEED_100: ++ val = 1; ++ break; ++ case SPEED_1000: ++ val = 2; ++ break; ++ default: ++ dev_err(priv->device, "invalid link speed: %d\n", priv->phy->speed[id]); ++ priv->phy->phy_fixed[id] = 0; ++ return; ++ } ++ val = PMCR_SPEED(val); ++ val |= PMCR_LINK | PMCR_BACKPRES | PMCR_BACKOFF | PMCR_RX_EN | ++ PMCR_TX_EN | PMCR_FORCE | PMCR_MAC_MODE | PMCR_IPG; ++ if (tx_fc) ++ val |= PMCR_TX_FC; ++ if (rx_fc) ++ val |= PMCR_RX_FC; ++ if (priv->phy->duplex[id]) ++ val |= PMCR_DUPLEX; ++ gsw_w32(gsw, val, GSW_REG_PORT_PMCR(id)); ++ dev_info(priv->device, "using fixed link parameters\n"); ++ return; ++ } ++ ++ if (priv->phy->phy_node[id] && priv->mii_bus->phy_map[id]) { ++ u32 val = PMCR_BACKPRES | PMCR_BACKOFF | PMCR_RX_EN | ++ PMCR_TX_EN | PMCR_MAC_MODE | PMCR_IPG; ++ ++ gsw_w32(gsw, val, GSW_REG_PORT_PMCR(id)); ++ fe_connect_phy_node(priv, priv->phy->phy_node[id]); ++ gsw->autopoll |= BIT(id); ++ gsw_auto_poll(gsw); ++ return; ++ } ++} ++ ++static void gsw_hw_init(struct mt7620_gsw *gsw) ++{ ++ u32 is_BGA = mt7620_is_bga(); ++ ++ rt_sysc_w32(rt_sysc_r32(SYSC_REG_CFG1) | BIT(8), SYSC_REG_CFG1); ++ gsw_w32(gsw, gsw_r32(gsw, GSW_REG_CKGCR) & ~(0x3 << 4), GSW_REG_CKGCR); ++ ++ /*correct PHY setting L3.0 BGA*/ ++ _mt7620_mii_write(gsw, 1, 31, 0x4000); //global, page 4 ++ ++ _mt7620_mii_write(gsw, 1, 17, 0x7444); ++ if (is_BGA) ++ _mt7620_mii_write(gsw, 1, 19, 0x0114); ++ else ++ _mt7620_mii_write(gsw, 1, 19, 0x0117); ++ ++ _mt7620_mii_write(gsw, 1, 22, 0x10cf); ++ _mt7620_mii_write(gsw, 1, 25, 0x6212); ++ _mt7620_mii_write(gsw, 1, 26, 0x0777); ++ _mt7620_mii_write(gsw, 1, 29, 0x4000); ++ _mt7620_mii_write(gsw, 1, 28, 0xc077); ++ _mt7620_mii_write(gsw, 1, 24, 0x0000); ++ ++ _mt7620_mii_write(gsw, 1, 31, 0x3000); //global, page 3 ++ _mt7620_mii_write(gsw, 1, 17, 0x4838); ++ ++ _mt7620_mii_write(gsw, 1, 31, 0x2000); //global, page 2 ++ if (is_BGA) { ++ _mt7620_mii_write(gsw, 1, 21, 0x0515); ++ _mt7620_mii_write(gsw, 1, 22, 0x0053); ++ _mt7620_mii_write(gsw, 1, 23, 0x00bf); ++ _mt7620_mii_write(gsw, 1, 24, 0x0aaf); ++ _mt7620_mii_write(gsw, 1, 25, 0x0fad); ++ _mt7620_mii_write(gsw, 1, 26, 0x0fc1); ++ } else { ++ _mt7620_mii_write(gsw, 1, 21, 0x0517); ++ _mt7620_mii_write(gsw, 1, 22, 0x0fd2); ++ _mt7620_mii_write(gsw, 1, 23, 0x00bf); ++ _mt7620_mii_write(gsw, 1, 24, 0x0aab); ++ _mt7620_mii_write(gsw, 1, 25, 0x00ae); ++ _mt7620_mii_write(gsw, 1, 26, 0x0fff); ++ } ++ _mt7620_mii_write(gsw, 1, 31, 0x1000); //global, page 1 ++ _mt7620_mii_write(gsw, 1, 17, 0xe7f8); ++ ++ _mt7620_mii_write(gsw, 1, 31, 0x8000); //local, page 0 ++ _mt7620_mii_write(gsw, 0, 30, 0xa000); ++ _mt7620_mii_write(gsw, 1, 30, 0xa000); ++ _mt7620_mii_write(gsw, 2, 30, 0xa000); ++ _mt7620_mii_write(gsw, 3, 30, 0xa000); ++ ++ _mt7620_mii_write(gsw, 0, 4, 0x05e1); ++ _mt7620_mii_write(gsw, 1, 4, 0x05e1); ++ _mt7620_mii_write(gsw, 2, 4, 0x05e1); ++ _mt7620_mii_write(gsw, 3, 4, 0x05e1); ++ _mt7620_mii_write(gsw, 1, 31, 0xa000); //local, page 2 ++ _mt7620_mii_write(gsw, 0, 16, 0x1111); ++ _mt7620_mii_write(gsw, 1, 16, 0x1010); ++ _mt7620_mii_write(gsw, 2, 16, 0x1515); ++ _mt7620_mii_write(gsw, 3, 16, 0x0f0f); ++ ++ /* CPU Port6 Force Link 1G, FC ON */ ++ gsw_w32(gsw, 0x5e33b, GSW_REG_PORT_PMCR(6)); ++ /* Set Port6 CPU Port */ ++ gsw_w32(gsw, 0x7f7f7fe0, 0x0010); ++ ++ /* setup port 4 */ ++ if (gsw->port4 == PORT4_EPHY) { ++ u32 val = rt_sysc_r32(SYSCFG1); ++ val |= 3 << 14; ++ rt_sysc_w32(val, SYSCFG1); ++ _mt7620_mii_write(gsw, 4, 30, 0xa000); ++ _mt7620_mii_write(gsw, 4, 4, 0x05e1); ++ _mt7620_mii_write(gsw, 4, 16, 0x1313); ++ pr_info("gsw: setting port4 to ephy mode\n"); ++ } ++} ++ ++void mt7620_set_mac(struct fe_priv *priv, unsigned char *mac) ++{ ++ struct mt7620_gsw *gsw = (struct mt7620_gsw *) priv->soc->swpriv; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&priv->page_lock, flags); ++ gsw_w32(gsw, (mac[0] << 8) | mac[1], GSW_REG_SMACCR1); ++ gsw_w32(gsw, (mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5], ++ GSW_REG_SMACCR0); ++ spin_unlock_irqrestore(&priv->page_lock, flags); ++} ++ ++static struct of_device_id gsw_match[] = { ++ { .compatible = "ralink,mt7620a-gsw" }, ++ {} ++}; ++ ++int mt7620_gsw_config(struct fe_priv *priv) ++{ ++ struct mt7620_gsw *gsw = (struct mt7620_gsw *) priv->soc->swpriv; ++ ++ /* is the mt7530 internal or external */ ++ if ((_mt7620_mii_read(gsw, 0x1f, 2) == 1) && (_mt7620_mii_read(gsw, 0x1f, 3) == 0xbeef)) ++ mt7530_probe(priv->device, NULL, priv->mii_bus); ++ else ++ mt7530_probe(priv->device, gsw->base, NULL); ++ ++ return 0; ++} ++ ++int mt7620_gsw_probe(struct fe_priv *priv) ++{ ++ struct mt7620_gsw *gsw; ++ struct device_node *np; ++ const char *port4 = NULL; ++ ++ np = of_find_matching_node(NULL, gsw_match); ++ if (!np) { ++ dev_err(priv->device, "no gsw node found\n"); ++ return -EINVAL; ++ } ++ np = of_node_get(np); ++ ++ gsw = devm_kzalloc(priv->device, sizeof(struct mt7620_gsw), GFP_KERNEL); ++ if (!gsw) { ++ dev_err(priv->device, "no gsw memory for private data\n"); ++ return -ENOMEM; ++ } ++ ++ gsw->irq = irq_of_parse_and_map(np, 0); ++ if (!gsw->irq) { ++ dev_err(priv->device, "no gsw irq resource found\n"); ++ return -ENOMEM; ++ } ++ ++ gsw->base = of_iomap(np, 0); ++ if (!gsw->base) { ++ dev_err(priv->device, "gsw ioremap failed\n"); ++ return -ENOMEM; ++ } ++ ++ gsw->dev = priv->device; ++ priv->soc->swpriv = gsw; ++ ++ of_property_read_string(np, "ralink,port4", &port4); ++ if (port4 && !strcmp(port4, "ephy")) ++ gsw->port4 = PORT4_EPHY; ++ else if (port4 && !strcmp(port4, "gmac")) ++ gsw->port4 = PORT4_EXT; ++ else ++ WARN_ON(port4); ++ ++ gsw_hw_init(gsw); ++ ++ gsw_w32(gsw, ~PORT_IRQ_ST_CHG, GSW_REG_IMR); ++ request_irq(gsw->irq, gsw_interrupt, 0, "gsw", priv); ++ ++ return 0; ++} +--- /dev/null ++++ b/drivers/net/ethernet/ralink/gsw_mt7620a.h +@@ -0,0 +1,30 @@ ++/* ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; version 2 of the License ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. ++ * ++ * Copyright (C) 2009-2013 John Crispin ++ */ ++ ++#ifndef _RALINK_GSW_MT7620_H__ ++#define _RALINK_GSW_MT7620_H__ ++ ++extern int mt7620_gsw_config(struct fe_priv *priv); ++extern int mt7620_gsw_probe(struct fe_priv *priv); ++extern void mt7620_set_mac(struct fe_priv *priv, unsigned char *mac); ++extern int mt7620_mdio_write(struct mii_bus *bus, int phy_addr, int phy_reg, u16 val); ++extern int mt7620_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg); ++extern void mt7620_mdio_link_adjust(struct fe_priv *priv, int port); ++extern void mt7620_port_init(struct fe_priv *priv, struct device_node *np); ++extern int mt7620a_has_carrier(struct fe_priv *priv); ++ ++#endif +--- /dev/null ++++ b/drivers/net/ethernet/ralink/mdio.c +@@ -0,0 +1,244 @@ ++/* ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; version 2 of the License ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. ++ * ++ * Copyright (C) 2009-2013 John Crispin ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "ralink_soc_eth.h" ++#include "mdio.h" ++ ++static int fe_mdio_reset(struct mii_bus *bus) ++{ ++ /* TODO */ ++ return 0; ++} ++ ++static void fe_phy_link_adjust(struct net_device *dev) ++{ ++ struct fe_priv *priv = netdev_priv(dev); ++ unsigned long flags; ++ int i; ++ ++ spin_lock_irqsave(&priv->phy->lock, flags); ++ for (i = 0; i < 8; i++) { ++ if (priv->phy->phy_node[i]) { ++ struct phy_device *phydev = priv->phy->phy[i]; ++ int status_change = 0; ++ ++ if (phydev->link) ++ if (priv->phy->duplex[i] != phydev->duplex || ++ priv->phy->speed[i] != phydev->speed) ++ status_change = 1; ++ ++ if (phydev->link != priv->link[i]) ++ status_change = 1; ++ ++ switch (phydev->speed) { ++ case SPEED_1000: ++ case SPEED_100: ++ case SPEED_10: ++ priv->link[i] = phydev->link; ++ priv->phy->duplex[i] = phydev->duplex; ++ priv->phy->speed[i] = phydev->speed; ++ ++ if (status_change && priv->soc->mdio_adjust_link) ++ priv->soc->mdio_adjust_link(priv, i); ++ break; ++ } ++ } ++ } ++} ++ ++int fe_connect_phy_node(struct fe_priv *priv, struct device_node *phy_node) ++{ ++ const __be32 *_port = NULL; ++ struct phy_device *phydev; ++ int phy_mode, port; ++ ++ _port = of_get_property(phy_node, "reg", NULL); ++ ++ if (!_port || (be32_to_cpu(*_port) >= 0x20)) { ++ pr_err("%s: invalid port id\n", phy_node->name); ++ return -EINVAL; ++ } ++ port = be32_to_cpu(*_port); ++ phy_mode = of_get_phy_mode(phy_node); ++ if (phy_mode < 0) { ++ dev_err(priv->device, "incorrect phy-mode %d\n", phy_mode); ++ priv->phy->phy_node[port] = NULL; ++ return -EINVAL; ++ } ++ ++ phydev = of_phy_connect(priv->netdev, phy_node, fe_phy_link_adjust, ++ 0, phy_mode); ++ if (IS_ERR(phydev)) { ++ dev_err(priv->device, "could not connect to PHY\n"); ++ priv->phy->phy_node[port] = NULL; ++ return PTR_ERR(phydev); ++ } ++ ++ phydev->supported &= PHY_GBIT_FEATURES; ++ phydev->advertising = phydev->supported; ++ phydev->no_auto_carrier_off = 1; ++ ++ dev_info(priv->device, ++ "connected port %d to PHY at %s [uid=%08x, driver=%s]\n", ++ port, dev_name(&phydev->dev), phydev->phy_id, ++ phydev->drv->name); ++ ++ priv->phy->phy[port] = phydev; ++ priv->link[port] = 0; ++ ++ return 0; ++} ++ ++static int fe_phy_connect(struct fe_priv *priv) ++{ ++ return 0; ++} ++ ++static void fe_phy_disconnect(struct fe_priv *priv) ++{ ++ unsigned long flags; ++ int i; ++ ++ for (i = 0; i < 8; i++) ++ if (priv->phy->phy_fixed[i]) { ++ spin_lock_irqsave(&priv->phy->lock, flags); ++ priv->link[i] = 0; ++ if (priv->soc->mdio_adjust_link) ++ priv->soc->mdio_adjust_link(priv, i); ++ spin_unlock_irqrestore(&priv->phy->lock, flags); ++ } else if (priv->phy->phy[i]) { ++ phy_disconnect(priv->phy->phy[i]); ++ } ++} ++ ++static void fe_phy_start(struct fe_priv *priv) ++{ ++ unsigned long flags; ++ int i; ++ ++ for (i = 0; i < 8; i++) { ++ if (priv->phy->phy_fixed[i]) { ++ spin_lock_irqsave(&priv->phy->lock, flags); ++ priv->link[i] = 1; ++ if (priv->soc->mdio_adjust_link) ++ priv->soc->mdio_adjust_link(priv, i); ++ spin_unlock_irqrestore(&priv->phy->lock, flags); ++ } else if (priv->phy->phy[i]) { ++ phy_start(priv->phy->phy[i]); ++ } ++ } ++} ++ ++static void fe_phy_stop(struct fe_priv *priv) ++{ ++ unsigned long flags; ++ int i; ++ ++ for (i = 0; i < 8; i++) ++ if (priv->phy->phy_fixed[i]) { ++ spin_lock_irqsave(&priv->phy->lock, flags); ++ priv->link[i] = 0; ++ if (priv->soc->mdio_adjust_link) ++ priv->soc->mdio_adjust_link(priv, i); ++ spin_unlock_irqrestore(&priv->phy->lock, flags); ++ } else if (priv->phy->phy[i]) { ++ phy_stop(priv->phy->phy[i]); ++ } ++} ++ ++static struct fe_phy phy_ralink = { ++ .connect = fe_phy_connect, ++ .disconnect = fe_phy_disconnect, ++ .start = fe_phy_start, ++ .stop = fe_phy_stop, ++}; ++ ++int fe_mdio_init(struct fe_priv *priv) ++{ ++ struct device_node *mii_np; ++ int err; ++ ++ if (!priv->soc->mdio_read || !priv->soc->mdio_write) ++ return 0; ++ ++ spin_lock_init(&phy_ralink.lock); ++ priv->phy = &phy_ralink; ++ ++ mii_np = of_get_child_by_name(priv->device->of_node, "mdio-bus"); ++ if (!mii_np) { ++ dev_err(priv->device, "no %s child node found", "mdio-bus"); ++ return -ENODEV; ++ } ++ ++ if (!of_device_is_available(mii_np)) { ++ err = 0; ++ goto err_put_node; ++ } ++ ++ priv->mii_bus = mdiobus_alloc(); ++ if (priv->mii_bus == NULL) { ++ err = -ENOMEM; ++ goto err_put_node; ++ } ++ ++ priv->mii_bus->name = "mdio"; ++ priv->mii_bus->read = priv->soc->mdio_read; ++ priv->mii_bus->write = priv->soc->mdio_write; ++ priv->mii_bus->reset = fe_mdio_reset; ++ priv->mii_bus->irq = priv->mii_irq; ++ priv->mii_bus->priv = priv; ++ priv->mii_bus->parent = priv->device; ++ ++ snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s", mii_np->name); ++ err = of_mdiobus_register(priv->mii_bus, mii_np); ++ if (err) ++ goto err_free_bus; ++ ++ return 0; ++ ++err_free_bus: ++ kfree(priv->mii_bus); ++err_put_node: ++ of_node_put(mii_np); ++ priv->mii_bus = NULL; ++ return err; ++} ++ ++void fe_mdio_cleanup(struct fe_priv *priv) ++{ ++ if (!priv->mii_bus) ++ return; ++ ++ mdiobus_unregister(priv->mii_bus); ++ of_node_put(priv->mii_bus->dev.of_node); ++ kfree(priv->mii_bus); ++} +--- /dev/null ++++ b/drivers/net/ethernet/ralink/mdio.h +@@ -0,0 +1,29 @@ ++/* ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; version 2 of the License ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. ++ * ++ * Copyright (C) 2009-2013 John Crispin ++ */ ++ ++#ifndef _RALINK_MDIO_H__ ++#define _RALINK_MDIO_H__ ++ ++#ifdef CONFIG_NET_RALINK_MDIO ++extern int fe_mdio_init(struct fe_priv *priv); ++extern void fe_mdio_cleanup(struct fe_priv *priv); ++extern int fe_connect_phy_node(struct fe_priv *priv, struct device_node *phy_node); ++#else ++static inline int fe_mdio_init(struct fe_priv *priv) { return 0; } ++static inline void fe_mdio_cleanup(struct fe_priv *priv) {} ++#endif ++#endif +--- /dev/null ++++ b/drivers/net/ethernet/ralink/mdio_rt2880.c +@@ -0,0 +1,232 @@ ++/* ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; version 2 of the License ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. ++ * ++ * Copyright (C) 2009-2013 John Crispin ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "ralink_soc_eth.h" ++#include "mdio_rt2880.h" ++#include "mdio.h" ++ ++#define FE_MDIO_RETRY 1000 ++ ++static unsigned char *rt2880_speed_str(struct fe_priv *priv) ++{ ++ switch (priv->phy->speed[0]) { ++ case SPEED_1000: ++ return "1000"; ++ case SPEED_100: ++ return "100"; ++ case SPEED_10: ++ return "10"; ++ } ++ ++ return "?"; ++} ++ ++void rt2880_mdio_link_adjust(struct fe_priv *priv, int port) ++{ ++ u32 mdio_cfg; ++ ++ if (!priv->link[0]) { ++ netif_carrier_off(priv->netdev); ++ netdev_info(priv->netdev, "link down\n"); ++ return; ++ } ++ ++ mdio_cfg = FE_MDIO_CFG_TX_CLK_SKEW_200 | ++ FE_MDIO_CFG_RX_CLK_SKEW_200 | ++ FE_MDIO_CFG_GP1_FRC_EN; ++ ++ if (priv->phy->duplex[0] == DUPLEX_FULL) ++ mdio_cfg |= FE_MDIO_CFG_GP1_DUPLEX; ++ ++ if (priv->phy->tx_fc[0]) ++ mdio_cfg |= FE_MDIO_CFG_GP1_FC_TX; ++ ++ if (priv->phy->rx_fc[0]) ++ mdio_cfg |= FE_MDIO_CFG_GP1_FC_RX; ++ ++ switch (priv->phy->speed[0]) { ++ case SPEED_10: ++ mdio_cfg |= FE_MDIO_CFG_GP1_SPEED_10; ++ break; ++ case SPEED_100: ++ mdio_cfg |= FE_MDIO_CFG_GP1_SPEED_100; ++ break; ++ case SPEED_1000: ++ mdio_cfg |= FE_MDIO_CFG_GP1_SPEED_1000; ++ break; ++ default: ++ BUG(); ++ } ++ ++ fe_w32(mdio_cfg, FE_MDIO_CFG); ++ ++ netif_carrier_on(priv->netdev); ++ netdev_info(priv->netdev, "link up (%sMbps/%s duplex)\n", ++ rt2880_speed_str(priv), ++ (DUPLEX_FULL == priv->phy->duplex[0]) ? "Full" : "Half"); ++} ++ ++static int rt2880_mdio_wait_ready(struct fe_priv *priv) ++{ ++ int retries; ++ ++ retries = FE_MDIO_RETRY; ++ while (1) { ++ u32 t; ++ ++ t = fe_r32(FE_MDIO_ACCESS); ++ if ((t & (0x1 << 31)) == 0) ++ return 0; ++ ++ if (retries-- == 0) ++ break; ++ ++ udelay(1); ++ } ++ ++ dev_err(priv->device, "MDIO operation timed out\n"); ++ return -ETIMEDOUT; ++} ++ ++int rt2880_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg) ++{ ++ struct fe_priv *priv = bus->priv; ++ int err; ++ u32 t; ++ ++ err = rt2880_mdio_wait_ready(priv); ++ if (err) ++ return 0xffff; ++ ++ t = (phy_addr << 24) | (phy_reg << 16); ++ fe_w32(t, FE_MDIO_ACCESS); ++ t |= (1 << 31); ++ fe_w32(t, FE_MDIO_ACCESS); ++ ++ err = rt2880_mdio_wait_ready(priv); ++ if (err) ++ return 0xffff; ++ ++ pr_info("%s: addr=%04x, reg=%04x, value=%04x\n", __func__, ++ phy_addr, phy_reg, fe_r32(FE_MDIO_ACCESS) & 0xffff); ++ ++ return fe_r32(FE_MDIO_ACCESS) & 0xffff; ++} ++ ++int rt2880_mdio_write(struct mii_bus *bus, int phy_addr, int phy_reg, u16 val) ++{ ++ struct fe_priv *priv = bus->priv; ++ int err; ++ u32 t; ++ ++ pr_info("%s: addr=%04x, reg=%04x, value=%04x\n", __func__, ++ phy_addr, phy_reg, fe_r32(FE_MDIO_ACCESS) & 0xffff); ++ ++ err = rt2880_mdio_wait_ready(priv); ++ if (err) ++ return err; ++ ++ t = (1 << 30) | (phy_addr << 24) | (phy_reg << 16) | val; ++ fe_w32(t, FE_MDIO_ACCESS); ++ t |= (1 << 31); ++ fe_w32(t, FE_MDIO_ACCESS); ++ ++ return rt2880_mdio_wait_ready(priv); ++} ++ ++void rt2880_port_init(struct fe_priv *priv, struct device_node *np) ++{ ++ const __be32 *id = of_get_property(np, "reg", NULL); ++ const __be32 *link; ++ int size; ++ int phy_mode; ++ ++ if (!id || (be32_to_cpu(*id) != 0)) { ++ pr_err("%s: invalid port id\n", np->name); ++ return; ++ } ++ ++ priv->phy->phy_fixed[0] = of_get_property(np, "ralink,fixed-link", &size); ++ if (priv->phy->phy_fixed[0] && (size != (4 * sizeof(*priv->phy->phy_fixed[0])))) { ++ pr_err("%s: invalid fixed link property\n", np->name); ++ priv->phy->phy_fixed[0] = NULL; ++ return; ++ } ++ ++ phy_mode = of_get_phy_mode(np); ++ switch (phy_mode) { ++ case PHY_INTERFACE_MODE_RGMII: ++ break; ++ case PHY_INTERFACE_MODE_MII: ++ break; ++ case PHY_INTERFACE_MODE_RMII: ++ break; ++ default: ++ if (!priv->phy->phy_fixed[0]) ++ dev_err(priv->device, "port %d - invalid phy mode\n", priv->phy->speed[0]); ++ break; ++ } ++ ++ priv->phy->phy_node[0] = of_parse_phandle(np, "phy-handle", 0); ++ if (!priv->phy->phy_node[0] && !priv->phy->phy_fixed[0]) ++ return; ++ ++ if (priv->phy->phy_fixed[0]) { ++ link = priv->phy->phy_fixed[0]; ++ priv->phy->speed[0] = be32_to_cpup(link++); ++ priv->phy->duplex[0] = be32_to_cpup(link++); ++ priv->phy->tx_fc[0] = be32_to_cpup(link++); ++ priv->phy->rx_fc[0] = be32_to_cpup(link++); ++ ++ priv->link[0] = 1; ++ switch (priv->phy->speed[0]) { ++ case SPEED_10: ++ break; ++ case SPEED_100: ++ break; ++ case SPEED_1000: ++ break; ++ default: ++ dev_err(priv->device, "invalid link speed: %d\n", priv->phy->speed[0]); ++ priv->phy->phy_fixed[0] = 0; ++ return; ++ } ++ dev_info(priv->device, "using fixed link parameters\n"); ++ rt2880_mdio_link_adjust(priv, 0); ++ return; ++ } ++ if (priv->phy->phy_node[0] && priv->mii_bus->phy_map[0]) { ++ fe_connect_phy_node(priv, priv->phy->phy_node[0]); ++ } ++ ++ return; ++} +--- /dev/null ++++ b/drivers/net/ethernet/ralink/mdio_rt2880.h +@@ -0,0 +1,26 @@ ++/* ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; version 2 of the License ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. ++ * ++ * Copyright (C) 2009-2013 John Crispin ++ */ ++ ++#ifndef _RALINK_MDIO_RT2880_H__ ++#define _RALINK_MDIO_RT2880_H__ ++ ++void rt2880_mdio_link_adjust(struct fe_priv *priv, int port); ++int rt2880_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg); ++int rt2880_mdio_write(struct mii_bus *bus, int phy_addr, int phy_reg, u16 val); ++void rt2880_port_init(struct fe_priv *priv, struct device_node *np); ++ ++#endif +--- /dev/null ++++ b/drivers/net/ethernet/ralink/mt7530.c +@@ -0,0 +1,467 @@ ++/* ++ * This program is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License ++ * as published by the Free Software Foundation; either version 2 ++ * of the License, or (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * Copyright (C) 2013 John Crispin ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "mt7530.h" ++ ++#define MT7530_CPU_PORT 6 ++#define MT7530_NUM_PORTS 7 ++#define MT7530_NUM_VLANS 16 ++#define MT7530_NUM_VIDS 16 ++ ++#define REG_ESW_VLAN_VTCR 0x90 ++#define REG_ESW_VLAN_VAWD1 0x94 ++#define REG_ESW_VLAN_VAWD2 0x98 ++ ++enum { ++ /* Global attributes. */ ++ MT7530_ATTR_ENABLE_VLAN, ++}; ++ ++struct mt7530_port { ++ u16 pvid; ++}; ++ ++struct mt7530_vlan { ++ u8 ports; ++}; ++ ++struct mt7530_priv { ++ void __iomem *base; ++ struct mii_bus *bus; ++ struct switch_dev swdev; ++ ++ bool global_vlan_enable; ++ struct mt7530_vlan vlans[MT7530_NUM_VLANS]; ++ struct mt7530_port ports[MT7530_NUM_PORTS]; ++}; ++ ++struct mt7530_mapping { ++ char *name; ++ u8 pvids[6]; ++ u8 vlans[8]; ++} mt7530_defaults[] = { ++ { ++ .name = "llllw", ++ .pvids = { 1, 1, 1, 1, 2, 1 }, ++ .vlans = { 0, 0x6f, 0x50 }, ++ }, { ++ .name = "wllll", ++ .pvids = { 2, 1, 1, 1, 1, 1 }, ++ .vlans = { 0, 0x7e, 0x41 }, ++ }, ++}; ++ ++struct mt7530_mapping* ++mt7530_find_mapping(struct device_node *np) ++{ ++ const char *map; ++ int i; ++ ++ if (of_property_read_string(np, "ralink,port-map", &map)) ++ return NULL; ++ ++ for (i = 0; i < ARRAY_SIZE(mt7530_defaults); i++) ++ if (!strcmp(map, mt7530_defaults[i].name)) ++ return &mt7530_defaults[i]; ++ ++ return NULL; ++} ++ ++static void ++mt7530_apply_mapping(struct mt7530_priv *mt7530, struct mt7530_mapping *map) ++{ ++ int i = 0; ++ ++ mt7530->global_vlan_enable = 1; ++ ++ for (i = 0; i < 6; i++) ++ mt7530->ports[i].pvid = map->pvids[i]; ++ for (i = 0; i < 8; i++) ++ mt7530->vlans[i].ports = map->vlans[i]; ++} ++ ++static int ++mt7530_reset_switch(struct switch_dev *dev) ++{ ++ struct mt7530_priv *priv = container_of(dev, struct mt7530_priv, swdev); ++ ++ memset(priv->ports, 0, sizeof(priv->ports)); ++ memset(priv->vlans, 0, sizeof(priv->vlans)); ++ ++ return 0; ++} ++ ++static int ++mt7530_get_vlan_enable(struct switch_dev *dev, ++ const struct switch_attr *attr, ++ struct switch_val *val) ++{ ++ struct mt7530_priv *priv = container_of(dev, struct mt7530_priv, swdev); ++ ++ val->value.i = priv->global_vlan_enable; ++ ++ return 0; ++} ++ ++static int ++mt7530_set_vlan_enable(struct switch_dev *dev, ++ const struct switch_attr *attr, ++ struct switch_val *val) ++{ ++ struct mt7530_priv *priv = container_of(dev, struct mt7530_priv, swdev); ++ ++ priv->global_vlan_enable = val->value.i != 0; ++ ++ return 0; ++} ++ ++static u32 ++mt7530_r32(struct mt7530_priv *priv, u32 reg) ++{ ++ if (priv->bus) { ++ u16 high, low; ++ ++ mdiobus_write(priv->bus, 0x1f, 0x1f, (reg >> 6) & 0x3ff); ++ low = mdiobus_read(priv->bus, 0x1f, (reg >> 2) & 0xf); ++ high = mdiobus_read(priv->bus, 0x1f, 0x10); ++ ++ return (high << 16) | (low & 0xffff); ++ } ++ ++ return ioread32(priv->base + reg); ++} ++ ++static void ++mt7530_w32(struct mt7530_priv *priv, u32 reg, u32 val) ++{ ++ if (priv->bus) { ++ mdiobus_write(priv->bus, 0x1f, 0x1f, (reg >> 6) & 0x3ff); ++ mdiobus_write(priv->bus, 0x1f, (reg >> 2) & 0xf, val & 0xffff); ++ mdiobus_write(priv->bus, 0x1f, 0x10, val >> 16); ++ return; ++ } ++ ++ iowrite32(val, priv->base + reg); ++} ++ ++static void ++mt7530_vtcr(struct mt7530_priv *priv, u32 cmd, u32 val) ++{ ++ int i; ++ ++ mt7530_w32(priv, REG_ESW_VLAN_VTCR, BIT(31) | (cmd << 12) | val); ++ ++ for (i = 0; i < 20; i++) { ++ u32 val = mt7530_r32(priv, REG_ESW_VLAN_VTCR); ++ ++ if ((val & BIT(31)) == 0) ++ break; ++ ++ udelay(1000); ++ } ++ if (i == 20) ++ printk("mt7530: vtcr timeout\n"); ++} ++ ++static int ++mt7530_get_port_pvid(struct switch_dev *dev, int port, int *val) ++{ ++ struct mt7530_priv *priv = container_of(dev, struct mt7530_priv, swdev); ++ ++ if (port >= MT7530_NUM_PORTS) ++ return -EINVAL; ++ ++ *val = mt7530_r32(priv, 0x2014 + (0x100 * port)); ++ *val &= 0xff; ++ ++ return 0; ++} ++ ++static int ++mt7530_set_port_pvid(struct switch_dev *dev, int port, int pvid) ++{ ++ struct mt7530_priv *priv = container_of(dev, struct mt7530_priv, swdev); ++ ++ if (port >= MT7530_NUM_PORTS) ++ return -1; ++ ++ priv->ports[port].pvid = pvid; ++ ++ return 0; ++} ++ ++static int ++mt7530_get_vlan_ports(struct switch_dev *dev, struct switch_val *val) ++{ ++ struct mt7530_priv *priv = container_of(dev, struct mt7530_priv, swdev); ++ u32 member; ++ int i; ++ ++ val->len = 0; ++ ++ if (val->port_vlan < 0 || val->port_vlan >= MT7530_NUM_VIDS) ++ return -EINVAL; ++ ++ mt7530_vtcr(priv, 0, val->port_vlan); ++ member = mt7530_r32(priv, REG_ESW_VLAN_VAWD1); ++ member >>= 16; ++ member &= 0xff; ++ ++ for (i = 0; i < MT7530_NUM_PORTS; i++) { ++ struct switch_port *p; ++ if (!(member & BIT(i))) ++ continue; ++ ++ p = &val->value.ports[val->len++]; ++ p->id = i; ++ p->flags = 0; ++ } ++ ++ return 0; ++} ++ ++static int ++mt7530_set_vlan_ports(struct switch_dev *dev, struct switch_val *val) ++{ ++ struct mt7530_priv *priv = container_of(dev, struct mt7530_priv, swdev); ++ int ports = 0; ++ int i; ++ ++ if (val->port_vlan < 0 || val->port_vlan >= MT7530_NUM_VIDS || ++ val->len > MT7530_NUM_PORTS) ++ return -EINVAL; ++ ++ for (i = 0; i < val->len; i++) { ++ struct switch_port *p = &val->value.ports[i]; ++ ++ if (p->id >= MT7530_NUM_PORTS) ++ return -EINVAL; ++ ++ ports |= BIT(p->id); ++ } ++ priv->vlans[val->port_vlan].ports = ports; ++ ++ return 0; ++} ++ ++static int ++mt7530_apply_config(struct switch_dev *dev) ++{ ++ struct mt7530_priv *priv = container_of(dev, struct mt7530_priv, swdev); ++ int i; ++ ++ if (!priv->global_vlan_enable) { ++ mt7530_w32(priv, 0x2004, 0xff000); ++ mt7530_w32(priv, 0x2104, 0xff000); ++ mt7530_w32(priv, 0x2204, 0xff000); ++ mt7530_w32(priv, 0x2304, 0xff000); ++ mt7530_w32(priv, 0x2404, 0xff000); ++ mt7530_w32(priv, 0x2504, 0xff000); ++ mt7530_w32(priv, 0x2604, 0xff000); ++ mt7530_w32(priv, 0x2010, 0x810000c); ++ mt7530_w32(priv, 0x2110, 0x810000c); ++ mt7530_w32(priv, 0x2210, 0x810000c); ++ mt7530_w32(priv, 0x2310, 0x810000c); ++ mt7530_w32(priv, 0x2410, 0x810000c); ++ mt7530_w32(priv, 0x2510, 0x810000c); ++ mt7530_w32(priv, 0x2610, 0x810000c); ++ return 0; ++ } ++ ++ // LAN/WAN ports as security mode ++ mt7530_w32(priv, 0x2004, 0xff0003); ++ mt7530_w32(priv, 0x2104, 0xff0003); ++ mt7530_w32(priv, 0x2204, 0xff0003); ++ mt7530_w32(priv, 0x2304, 0xff0003); ++ mt7530_w32(priv, 0x2404, 0xff0003); ++ mt7530_w32(priv, 0x2504, 0xff0003); ++ // LAN/WAN ports as transparent port ++ mt7530_w32(priv, 0x2010, 0x810000c0); ++ mt7530_w32(priv, 0x2110, 0x810000c0); ++ mt7530_w32(priv, 0x2210, 0x810000c0); ++ mt7530_w32(priv, 0x2310, 0x810000c0); ++ mt7530_w32(priv, 0x2410, 0x810000c0); ++ mt7530_w32(priv, 0x2510, 0x810000c0); ++ ++ // set CPU/P7 port as user port ++ mt7530_w32(priv, 0x2610, 0x81000000); ++ mt7530_w32(priv, 0x2710, 0x81000000); ++ ++ mt7530_w32(priv, 0x2604, 0x20ff0003); ++ mt7530_w32(priv, 0x2704, 0x20ff0003); ++ mt7530_w32(priv, 0x2610, 0x81000000); ++ ++ for (i = 0; i < MT7530_NUM_VLANS; i++) { ++ u8 ports = priv->vlans[i].ports; ++ u32 val = mt7530_r32(priv, 0x100 + 4 * (i / 2)); ++ ++ if (i % 2 == 0) { ++ val &= 0xfff000; ++ val |= i; ++ } else { ++ val &= 0xfff; ++ val |= (i << 12); ++ } ++ mt7530_w32(priv, 0x100 + 4 * (i / 2), val); ++ ++ if (ports) ++ mt7530_w32(priv, REG_ESW_VLAN_VAWD1, BIT(30) | (ports << 16) | BIT(0)); ++ else ++ mt7530_w32(priv, REG_ESW_VLAN_VAWD1, 0); ++ ++ mt7530_vtcr(priv, 1, i); ++ } ++ ++ for (i = 0; i < MT7530_NUM_PORTS; i++) ++ mt7530_w32(priv, 0x2014 + (0x100 * i), 0x10000 | priv->ports[i].pvid); ++ ++ return 0; ++} ++ ++static int ++mt7530_get_port_link(struct switch_dev *dev, int port, ++ struct switch_port_link *link) ++{ ++ struct mt7530_priv *priv = container_of(dev, struct mt7530_priv, swdev); ++ u32 speed, pmsr; ++ ++ if (port < 0 || port >= MT7530_NUM_PORTS) ++ return -EINVAL; ++ ++ pmsr = mt7530_r32(priv, 0x3008 + (0x100 * port)); ++ ++ link->link = pmsr & 1; ++ link->duplex = (pmsr >> 1) & 1; ++ speed = (pmsr >> 2) & 3; ++ ++ switch (speed) { ++ case 0: ++ link->speed = SWITCH_PORT_SPEED_10; ++ break; ++ case 1: ++ link->speed = SWITCH_PORT_SPEED_100; ++ break; ++ case 2: ++ case 3: /* forced gige speed can be 2 or 3 */ ++ link->speed = SWITCH_PORT_SPEED_1000; ++ break; ++ default: ++ link->speed = SWITCH_PORT_SPEED_UNKNOWN; ++ break; ++ } ++ ++ return 0; ++} ++ ++static const struct switch_attr mt7530_global[] = { ++ { ++ .type = SWITCH_TYPE_INT, ++ .name = "enable_vlan", ++ .description = "VLAN mode (1:enabled)", ++ .max = 1, ++ .id = MT7530_ATTR_ENABLE_VLAN, ++ .get = mt7530_get_vlan_enable, ++ .set = mt7530_set_vlan_enable, ++ }, ++}; ++ ++static const struct switch_attr mt7530_port[] = { ++}; ++ ++static const struct switch_attr mt7530_vlan[] = { ++}; ++ ++static const struct switch_dev_ops mt7530_ops = { ++ .attr_global = { ++ .attr = mt7530_global, ++ .n_attr = ARRAY_SIZE(mt7530_global), ++ }, ++ .attr_port = { ++ .attr = mt7530_port, ++ .n_attr = ARRAY_SIZE(mt7530_port), ++ }, ++ .attr_vlan = { ++ .attr = mt7530_vlan, ++ .n_attr = ARRAY_SIZE(mt7530_vlan), ++ }, ++ .get_vlan_ports = mt7530_get_vlan_ports, ++ .set_vlan_ports = mt7530_set_vlan_ports, ++ .get_port_pvid = mt7530_get_port_pvid, ++ .set_port_pvid = mt7530_set_port_pvid, ++ .get_port_link = mt7530_get_port_link, ++ .apply_config = mt7530_apply_config, ++ .reset_switch = mt7530_reset_switch, ++}; ++ ++int ++mt7530_probe(struct device *dev, void __iomem *base, struct mii_bus *bus) ++{ ++ struct switch_dev *swdev; ++ struct mt7530_priv *mt7530; ++ struct mt7530_mapping *map; ++ int ret; ++ ++ if (bus && bus->phy_map[0x1f]->phy_id != 0x1beef) ++ return 0; ++ ++ mt7530 = devm_kzalloc(dev, sizeof(struct mt7530_priv), GFP_KERNEL); ++ if (!mt7530) ++ return -ENOMEM; ++ ++ mt7530->base = base; ++ mt7530->bus = bus; ++ mt7530->global_vlan_enable = 1; ++ ++ swdev = &mt7530->swdev; ++ swdev->name = "mt7530"; ++ swdev->alias = "mt7530"; ++ swdev->cpu_port = MT7530_CPU_PORT; ++ swdev->ports = MT7530_NUM_PORTS; ++ swdev->vlans = MT7530_NUM_VLANS; ++ swdev->ops = &mt7530_ops; ++ ++ ret = register_switch(swdev, NULL); ++ if (ret) { ++ dev_err(dev, "failed to register mt7530\n"); ++ return ret; ++ } ++ ++ dev_info(dev, "loaded mt7530 driver\n"); ++ ++ map = mt7530_find_mapping(dev->of_node); ++ if (map) ++ mt7530_apply_mapping(mt7530, map); ++ mt7530_apply_config(swdev); ++ ++ return 0; ++} +--- /dev/null ++++ b/drivers/net/ethernet/ralink/mt7530.h +@@ -0,0 +1,20 @@ ++/* ++ * This program is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License ++ * as published by the Free Software Foundation; either version 2 ++ * of the License, or (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * Copyright (C) 2013 John Crispin ++ */ ++ ++#ifndef _MT7530_H__ ++#define _MT7530_H__ ++ ++int mt7530_probe(struct device *dev, void __iomem *base, struct mii_bus *bus); ++ ++#endif +--- /dev/null ++++ b/drivers/net/ethernet/ralink/ralink_soc_eth.c +@@ -0,0 +1,845 @@ ++/* ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; version 2 of the License ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. ++ * ++ * Copyright (C) 2009-2013 John Crispin ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++ ++#include "ralink_soc_eth.h" ++#include "esw_rt3052.h" ++#include "mdio.h" ++ ++#define TX_TIMEOUT (2 * HZ) ++#define MAX_RX_LENGTH 1536 ++#define DMA_DUMMY_DESC 0xffffffff ++ ++static const u32 fe_reg_table_default[FE_REG_COUNT] = { ++ [FE_REG_PDMA_GLO_CFG] = FE_PDMA_GLO_CFG, ++ [FE_REG_PDMA_RST_CFG] = FE_PDMA_RST_CFG, ++ [FE_REG_DLY_INT_CFG] = FE_DLY_INT_CFG, ++ [FE_REG_TX_BASE_PTR0] = FE_TX_BASE_PTR0, ++ [FE_REG_TX_MAX_CNT0] = FE_TX_MAX_CNT0, ++ [FE_REG_TX_CTX_IDX0] = FE_TX_CTX_IDX0, ++ [FE_REG_RX_BASE_PTR0] = FE_RX_BASE_PTR0, ++ [FE_REG_RX_MAX_CNT0] = FE_RX_MAX_CNT0, ++ [FE_REG_RX_CALC_IDX0] = FE_RX_CALC_IDX0, ++ [FE_REG_FE_INT_ENABLE] = FE_FE_INT_ENABLE, ++ [FE_REG_FE_INT_STATUS] = FE_FE_INT_STATUS, ++}; ++ ++static const u32 *fe_reg_table = fe_reg_table_default; ++ ++static void __iomem *fe_base = 0; ++ ++void fe_w32(u32 val, unsigned reg) ++{ ++ __raw_writel(val, fe_base + reg); ++} ++ ++u32 fe_r32(unsigned reg) ++{ ++ return __raw_readl(fe_base + reg); ++} ++ ++static inline void fe_reg_w32(u32 val, enum fe_reg reg) ++{ ++ fe_w32(val, fe_reg_table[reg]); ++} ++ ++static inline u32 fe_reg_r32(enum fe_reg reg) ++{ ++ return fe_r32(fe_reg_table[reg]); ++} ++ ++static inline void fe_int_disable(u32 mask) ++{ ++ fe_reg_w32(fe_reg_r32(FE_REG_FE_INT_ENABLE) & ~mask, ++ FE_REG_FE_INT_ENABLE); ++ /* flush write */ ++ fe_reg_r32(FE_REG_FE_INT_ENABLE); ++} ++ ++static inline void fe_int_enable(u32 mask) ++{ ++ fe_reg_w32(fe_reg_r32(FE_REG_FE_INT_ENABLE) | mask, ++ FE_REG_FE_INT_ENABLE); ++ /* flush write */ ++ fe_reg_r32(FE_REG_FE_INT_ENABLE); ++} ++ ++static inline void fe_hw_set_macaddr(struct fe_priv *priv, unsigned char *mac) ++{ ++ unsigned long flags; ++ ++ spin_lock_irqsave(&priv->page_lock, flags); ++ fe_w32((mac[0] << 8) | mac[1], FE_GDMA1_MAC_ADRH); ++ fe_w32((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5], ++ FE_GDMA1_MAC_ADRL); ++ spin_unlock_irqrestore(&priv->page_lock, flags); ++} ++ ++static int fe_set_mac_address(struct net_device *dev, void *p) ++{ ++ int ret = eth_mac_addr(dev, p); ++ ++ if (!ret) { ++ struct fe_priv *priv = netdev_priv(dev); ++ ++ if (priv->soc->set_mac) ++ priv->soc->set_mac(priv, dev->dev_addr); ++ else ++ fe_hw_set_macaddr(priv, p); ++ } ++ ++ return ret; ++} ++ ++static struct sk_buff* fe_alloc_skb(struct fe_priv *priv) ++{ ++ struct sk_buff *skb; ++ ++ skb = netdev_alloc_skb(priv->netdev, MAX_RX_LENGTH + NET_IP_ALIGN); ++ if (!skb) ++ return NULL; ++ ++ skb_reserve(skb, NET_IP_ALIGN); ++ ++ return skb; ++} ++ ++static int fe_alloc_rx(struct fe_priv *priv) ++{ ++ int size = NUM_DMA_DESC * sizeof(struct fe_rx_dma); ++ int i; ++ ++ priv->rx_dma = dma_alloc_coherent(&priv->netdev->dev, size, ++ &priv->rx_phys, GFP_ATOMIC); ++ if (!priv->rx_dma) ++ return -ENOMEM; ++ ++ memset(priv->rx_dma, 0, size); ++ ++ for (i = 0; i < NUM_DMA_DESC; i++) { ++ priv->rx_skb[i] = fe_alloc_skb(priv); ++ if (!priv->rx_skb[i]) ++ return -ENOMEM; ++ } ++ ++ for (i = 0; i < NUM_DMA_DESC; i++) { ++ dma_addr_t dma_addr = dma_map_single(&priv->netdev->dev, ++ priv->rx_skb[i]->data, ++ MAX_RX_LENGTH, ++ DMA_FROM_DEVICE); ++ priv->rx_dma[i].rxd1 = (unsigned int) dma_addr; ++ ++ if (priv->soc->rx_dma) ++ priv->soc->rx_dma(priv, i, MAX_RX_LENGTH); ++ else ++ priv->rx_dma[i].rxd2 = RX_DMA_LSO; ++ } ++ wmb(); ++ ++ fe_reg_w32(priv->rx_phys, FE_REG_RX_BASE_PTR0); ++ fe_reg_w32(NUM_DMA_DESC, FE_REG_RX_MAX_CNT0); ++ fe_reg_w32((NUM_DMA_DESC - 1), FE_REG_RX_CALC_IDX0); ++ fe_reg_w32(FE_PST_DRX_IDX0, FE_REG_PDMA_RST_CFG); ++ ++ return 0; ++} ++ ++static int fe_alloc_tx(struct fe_priv *priv) ++{ ++ int size = NUM_DMA_DESC * sizeof(struct fe_tx_dma); ++ int i; ++ ++ priv->tx_free_idx = 0; ++ ++ priv->tx_dma = dma_alloc_coherent(&priv->netdev->dev, size, ++ &priv->tx_phys, GFP_ATOMIC); ++ if (!priv->tx_dma) ++ return -ENOMEM; ++ ++ memset(priv->tx_dma, 0, size); ++ ++ for (i = 0; i < NUM_DMA_DESC; i++) { ++ if (priv->soc->tx_dma) { ++ priv->soc->tx_dma(priv, i, NULL); ++ continue; ++ } ++ ++ priv->tx_dma[i].txd2 = TX_DMA_LSO | TX_DMA_DONE; ++ priv->tx_dma[i].txd4 = TX_DMA_QN(3) | TX_DMA_PN(1); ++ } ++ ++ fe_reg_w32(priv->tx_phys, FE_REG_TX_BASE_PTR0); ++ fe_reg_w32(NUM_DMA_DESC, FE_REG_TX_MAX_CNT0); ++ fe_reg_w32(0, FE_REG_TX_CTX_IDX0); ++ fe_reg_w32(FE_PST_DTX_IDX0, FE_REG_PDMA_RST_CFG); ++ ++ return 0; ++} ++ ++static void fe_free_dma(struct fe_priv *priv) ++{ ++ int i; ++ ++ for (i = 0; i < NUM_DMA_DESC; i++) { ++ if (priv->rx_skb[i]) { ++ dma_unmap_single(&priv->netdev->dev, priv->rx_dma[i].rxd1, ++ MAX_RX_LENGTH, DMA_FROM_DEVICE); ++ dev_kfree_skb_any(priv->rx_skb[i]); ++ priv->rx_skb[i] = NULL; ++ } ++ ++ if (priv->tx_skb[i]) { ++ dev_kfree_skb_any(priv->tx_skb[i]); ++ priv->tx_skb[i] = NULL; ++ } ++ } ++ ++ if (priv->rx_dma) { ++ int size = NUM_DMA_DESC * sizeof(struct fe_rx_dma); ++ dma_free_coherent(&priv->netdev->dev, size, priv->rx_dma, ++ priv->rx_phys); ++ } ++ ++ if (priv->tx_dma) { ++ int size = NUM_DMA_DESC * sizeof(struct fe_tx_dma); ++ dma_free_coherent(&priv->netdev->dev, size, priv->tx_dma, ++ priv->tx_phys); ++ } ++ ++ netdev_reset_queue(priv->netdev); ++} ++ ++static void fe_start_tso(struct sk_buff *skb, struct net_device *dev, unsigned int nr_frags, int idx) ++{ ++ struct fe_priv *priv = netdev_priv(dev); ++ struct skb_frag_struct *frag; ++ int i; ++ ++ for (i = 0; i < nr_frags; i++) { ++ dma_addr_t mapped_addr; ++ ++ frag = &skb_shinfo(skb)->frags[i]; ++ mapped_addr = skb_frag_dma_map(&dev->dev, frag, 0, skb_frag_size(frag), DMA_TO_DEVICE); ++ if (i % 2) { ++ idx = (idx + 1) % NUM_DMA_DESC; ++ priv->tx_dma[idx].txd1 = mapped_addr; ++ if (i == nr_frags - 1) ++ priv->tx_dma[idx].txd2 = TX_DMA_LSO | TX_DMA_PLEN0(frag->size); ++ else ++ priv->tx_dma[idx].txd2 = TX_DMA_PLEN0(frag->size); ++ } else { ++ priv->tx_dma[idx].txd3 = mapped_addr; ++ if (i == nr_frags - 1) ++ priv->tx_dma[idx].txd2 |= TX_DMA_LS1 | TX_DMA_PLEN1(frag->size); ++ else ++ priv->tx_dma[idx].txd2 |= TX_DMA_PLEN1(frag->size); ++ } ++ } ++} ++ ++static int fe_start_xmit(struct sk_buff *skb, struct net_device *dev) ++{ ++ unsigned int nr_frags = skb_shinfo(skb)->nr_frags; ++ struct fe_priv *priv = netdev_priv(dev); ++ dma_addr_t mapped_addr; ++ u32 tx_next, tx, tx_num = 1; ++ int i; ++ ++ if (priv->soc->min_pkt_len) { ++ if (skb->len < priv->soc->min_pkt_len) { ++ if (skb_padto(skb, priv->soc->min_pkt_len)) { ++ printk(KERN_ERR ++ "fe_eth: skb_padto failed\n"); ++ kfree_skb(skb); ++ return 0; ++ } ++ skb_put(skb, priv->soc->min_pkt_len - skb->len); ++ } ++ } ++ ++ dev->trans_start = jiffies; ++ mapped_addr = dma_map_single(&priv->netdev->dev, skb->data, ++ skb->len, DMA_TO_DEVICE); ++ ++ spin_lock(&priv->page_lock); ++ ++ tx = fe_reg_r32(FE_REG_TX_CTX_IDX0); ++ if (priv->soc->tso && nr_frags) ++ tx_num += nr_frags >> 1; ++ tx_next = (tx + tx_num) % NUM_DMA_DESC; ++ if ((priv->tx_skb[tx]) || (priv->tx_skb[tx_next]) || ++ !(priv->tx_dma[tx].txd2 & TX_DMA_DONE) || ++ !(priv->tx_dma[tx_next].txd2 & TX_DMA_DONE)) ++ { ++ spin_unlock(&priv->page_lock); ++ dev->stats.tx_dropped++; ++ kfree_skb(skb); ++ ++ return NETDEV_TX_OK; ++ } ++ ++ if (priv->soc->tso) { ++ int t = tx_num; ++ ++ priv->tx_skb[(tx + t - 1) % NUM_DMA_DESC] = skb; ++ while (--t) ++ priv->tx_skb[(tx + t - 1) % NUM_DMA_DESC] = (struct sk_buff *) DMA_DUMMY_DESC; ++ } else { ++ priv->tx_skb[tx] = skb; ++ } ++ priv->tx_dma[tx].txd1 = (unsigned int) mapped_addr; ++ wmb(); ++ ++ priv->tx_dma[tx].txd4 &= ~0x80; ++ if (priv->soc->tx_dma) ++ priv->soc->tx_dma(priv, tx, skb); ++ else ++ priv->tx_dma[tx].txd2 = TX_DMA_LSO | TX_DMA_PLEN0(skb->len); ++ ++ if (skb->ip_summed == CHECKSUM_PARTIAL) ++ priv->tx_dma[tx].txd4 |= TX_DMA_CHKSUM; ++ else ++ priv->tx_dma[tx].txd4 &= ~TX_DMA_CHKSUM; ++ ++ if (priv->soc->tso) ++ fe_start_tso(skb, dev, nr_frags, tx); ++ ++ if (priv->soc->tso && (skb_shinfo(skb)->gso_segs > 1)) { ++ struct iphdr *iph = NULL; ++ struct tcphdr *th = NULL; ++ struct ipv6hdr *ip6h = NULL; ++ ++ ip6h = (struct ipv6hdr *) skb_network_header(skb); ++ iph = (struct iphdr *) skb_network_header(skb); ++ if ((iph->version == 4) && (iph->protocol == IPPROTO_TCP)) { ++ th = (struct tcphdr *)skb_transport_header(skb); ++ priv->tx_dma[tx].txd4 |= BIT(28); ++ th->check = htons(skb_shinfo(skb)->gso_size); ++ dma_cache_sync(NULL, th, sizeof(struct tcphdr), DMA_TO_DEVICE); ++ } else if ((ip6h->version == 6) && (ip6h->nexthdr == NEXTHDR_TCP)) { ++ th = (struct tcphdr *)skb_transport_header(skb); ++ priv->tx_dma[tx].txd4 |= BIT(28); ++ th->check = htons(skb_shinfo(skb)->gso_size); ++ dma_cache_sync(NULL, th, sizeof(struct tcphdr), DMA_TO_DEVICE); ++ } ++ } ++ ++ for (i = 0; i < tx_num; i++) ++ dma_cache_sync(NULL, &priv->tx_dma[tx + i], sizeof(struct fe_tx_dma), DMA_TO_DEVICE); ++ ++ dev->stats.tx_packets++; ++ dev->stats.tx_bytes += skb->len; ++ ++ wmb(); ++ fe_reg_w32(tx_next, FE_REG_TX_CTX_IDX0); ++ netdev_sent_queue(dev, skb->len); ++ ++ spin_unlock(&priv->page_lock); ++ ++ return NETDEV_TX_OK; ++} ++ ++static int fe_poll_rx(struct napi_struct *napi, int budget) ++{ ++ struct fe_priv *priv = container_of(napi, struct fe_priv, rx_napi); ++ int idx = fe_reg_r32(FE_REG_RX_CALC_IDX0); ++ int complete = 0; ++ int rx = 0; ++ ++ while ((rx < budget) && !complete) { ++ idx = (idx + 1) % NUM_DMA_DESC; ++ ++ if (priv->rx_dma[idx].rxd2 & RX_DMA_DONE) { ++ struct sk_buff *new_skb = fe_alloc_skb(priv); ++ ++ if (new_skb) { ++ int pktlen = RX_DMA_PLEN0(priv->rx_dma[idx].rxd2); ++ dma_addr_t dma_addr; ++ ++ dma_unmap_single(&priv->netdev->dev, priv->rx_dma[idx].rxd1, ++ MAX_RX_LENGTH, DMA_FROM_DEVICE); ++ ++ skb_put(priv->rx_skb[idx], pktlen); ++ priv->rx_skb[idx]->dev = priv->netdev; ++ priv->rx_skb[idx]->protocol = eth_type_trans(priv->rx_skb[idx], priv->netdev); ++ if (priv->rx_dma[idx].rxd4 & priv->soc->checksum_bit) ++ priv->rx_skb[idx]->ip_summed = CHECKSUM_UNNECESSARY; ++ else ++ priv->rx_skb[idx]->ip_summed = CHECKSUM_NONE; ++ priv->netdev->stats.rx_packets++; ++ priv->netdev->stats.rx_bytes += pktlen; ++ ++#ifdef CONFIG_INET_LRO ++ if (priv->soc->get_skb_header && priv->rx_skb[idx]->ip_summed == CHECKSUM_UNNECESSARY) ++ lro_receive_skb(&priv->lro_mgr, priv->rx_skb[idx], NULL); ++ else ++#endif ++ netif_receive_skb(priv->rx_skb[idx]); ++ ++ priv->rx_skb[idx] = new_skb; ++ ++ dma_addr = dma_map_single(&priv->netdev->dev, ++ new_skb->data, ++ MAX_RX_LENGTH, ++ DMA_FROM_DEVICE); ++ priv->rx_dma[idx].rxd1 = (unsigned int) dma_addr; ++ wmb(); ++ } else { ++ priv->netdev->stats.rx_dropped++; ++ } ++ ++ if (priv->soc->rx_dma) ++ priv->soc->rx_dma(priv, idx, MAX_RX_LENGTH); ++ else ++ priv->rx_dma[idx].rxd2 = RX_DMA_LSO; ++ fe_reg_w32(idx, FE_REG_RX_CALC_IDX0); ++ ++ rx++; ++ } else { ++ complete = 1; ++ } ++ } ++ ++#ifdef CONFIG_INET_LRO ++ if (priv->soc->get_skb_header) ++ lro_flush_all(&priv->lro_mgr); ++#endif ++ if (complete) { ++ napi_complete(&priv->rx_napi); ++ fe_int_enable(priv->soc->rx_dly_int); ++ } ++ ++ return rx; ++} ++ ++static void fe_tx_housekeeping(unsigned long ptr) ++{ ++ struct net_device *dev = (struct net_device*)ptr; ++ struct fe_priv *priv = netdev_priv(dev); ++ unsigned int bytes_compl = 0; ++ unsigned int pkts_compl = 0; ++ ++ spin_lock(&priv->page_lock); ++ while (1) { ++ struct fe_tx_dma *txd; ++ ++ txd = &priv->tx_dma[priv->tx_free_idx]; ++ ++ if (!(txd->txd2 & TX_DMA_DONE) || !(priv->tx_skb[priv->tx_free_idx])) ++ break; ++ ++ if (priv->tx_skb[priv->tx_free_idx] != (struct sk_buff *) DMA_DUMMY_DESC) { ++ bytes_compl += priv->tx_skb[priv->tx_free_idx]->len; ++ dev_kfree_skb_irq(priv->tx_skb[priv->tx_free_idx]); ++ } ++ pkts_compl++; ++ priv->tx_skb[priv->tx_free_idx] = NULL; ++ priv->tx_free_idx++; ++ if (priv->tx_free_idx >= NUM_DMA_DESC) ++ priv->tx_free_idx = 0; ++ } ++ ++ netdev_completed_queue(priv->netdev, pkts_compl, bytes_compl); ++ spin_unlock(&priv->page_lock); ++ ++ fe_int_enable(priv->soc->tx_dly_int); ++} ++ ++static void fe_tx_timeout(struct net_device *dev) ++{ ++ struct fe_priv *priv = netdev_priv(dev); ++ ++ tasklet_schedule(&priv->tx_tasklet); ++ priv->netdev->stats.tx_errors++; ++ netdev_err(dev, "transmit timed out, waking up the queue\n"); ++ netif_wake_queue(dev); ++} ++ ++static irqreturn_t fe_handle_irq(int irq, void *dev) ++{ ++ struct fe_priv *priv = netdev_priv(dev); ++ unsigned int status; ++ unsigned int mask; ++ ++ status = fe_reg_r32(FE_REG_FE_INT_STATUS); ++ mask = fe_reg_r32(FE_REG_FE_INT_ENABLE); ++ ++ if (!(status & mask)) ++ return IRQ_NONE; ++ ++ if (status & priv->soc->rx_dly_int) { ++ fe_int_disable(priv->soc->rx_dly_int); ++ napi_schedule(&priv->rx_napi); ++ } ++ ++ if (status & priv->soc->tx_dly_int) { ++ fe_int_disable(priv->soc->tx_dly_int); ++ tasklet_schedule(&priv->tx_tasklet); ++ } ++ ++ fe_reg_w32(status, FE_REG_FE_INT_STATUS); ++ ++ return IRQ_HANDLED; ++} ++ ++static int fe_hw_init(struct net_device *dev) ++{ ++ struct fe_priv *priv = netdev_priv(dev); ++ int err; ++ ++ err = devm_request_irq(priv->device, dev->irq, fe_handle_irq, 0, ++ dev_name(priv->device), dev); ++ if (err) ++ return err; ++ ++ err = fe_alloc_rx(priv); ++ if (!err) ++ err = fe_alloc_tx(priv); ++ if (err) ++ return err; ++ ++ if (priv->soc->set_mac) ++ priv->soc->set_mac(priv, dev->dev_addr); ++ else ++ fe_hw_set_macaddr(priv, dev->dev_addr); ++ ++ fe_reg_w32(FE_DELAY_INIT, FE_REG_DLY_INT_CFG); ++ ++ fe_int_disable(priv->soc->tx_dly_int | priv->soc->rx_dly_int); ++ ++ tasklet_init(&priv->tx_tasklet, fe_tx_housekeeping, (unsigned long)dev); ++ ++ if (priv->soc->fwd_config) { ++ priv->soc->fwd_config(priv); ++ } else { ++ unsigned long sysclk = priv->sysclk; ++ ++ if (!sysclk) { ++ netdev_err(dev, "unable to get clock\n"); ++ return -EINVAL; ++ } ++ ++ sysclk /= FE_US_CYC_CNT_DIVISOR; ++ sysclk <<= FE_US_CYC_CNT_SHIFT; ++ ++ fe_w32((fe_r32(FE_FE_GLO_CFG) & ++ ~(FE_US_CYC_CNT_MASK << FE_US_CYC_CNT_SHIFT)) | sysclk, ++ FE_FE_GLO_CFG); ++ ++ fe_w32(fe_r32(FE_GDMA1_FWD_CFG) & ~0xffff, FE_GDMA1_FWD_CFG); ++ fe_w32(fe_r32(FE_GDMA1_FWD_CFG) | (FE_GDM1_ICS_EN | FE_GDM1_TCS_EN | FE_GDM1_UCS_EN), ++ FE_GDMA1_FWD_CFG); ++ fe_w32(fe_r32(FE_CDMA_CSG_CFG) | (FE_ICS_GEN_EN | FE_TCS_GEN_EN | FE_UCS_GEN_EN), ++ FE_CDMA_CSG_CFG); ++ fe_w32(FE_PSE_FQFC_CFG_INIT, FE_PSE_FQ_CFG); ++ } ++ ++ fe_w32(1, FE_FE_RST_GL); ++ fe_w32(0, FE_FE_RST_GL); ++ ++ return 0; ++} ++ ++static int fe_open(struct net_device *dev) ++{ ++ struct fe_priv *priv = netdev_priv(dev); ++ unsigned long flags; ++ u32 val; ++ ++ spin_lock_irqsave(&priv->page_lock, flags); ++ napi_enable(&priv->rx_napi); ++ ++ val = FE_TX_WB_DDONE | FE_RX_DMA_EN | FE_TX_DMA_EN; ++ val |= priv->soc->pdma_glo_cfg; ++ fe_reg_w32(val, FE_REG_PDMA_GLO_CFG); ++ ++ spin_unlock_irqrestore(&priv->page_lock, flags); ++ ++ if (priv->phy) ++ priv->phy->start(priv); ++ ++ if (priv->soc->has_carrier && priv->soc->has_carrier(priv)) ++ netif_carrier_on(dev); ++ ++ netif_start_queue(dev); ++ fe_int_enable(priv->soc->tx_dly_int | priv->soc->rx_dly_int); ++ ++ return 0; ++} ++ ++static int fe_stop(struct net_device *dev) ++{ ++ struct fe_priv *priv = netdev_priv(dev); ++ unsigned long flags; ++ ++ fe_int_disable(priv->soc->tx_dly_int | priv->soc->rx_dly_int); ++ ++ netif_stop_queue(dev); ++ ++ if (priv->phy) ++ priv->phy->stop(priv); ++ ++ spin_lock_irqsave(&priv->page_lock, flags); ++ napi_disable(&priv->rx_napi); ++ ++ fe_reg_w32(fe_reg_r32(FE_REG_PDMA_GLO_CFG) & ++ ~(FE_TX_WB_DDONE | FE_RX_DMA_EN | FE_TX_DMA_EN), ++ FE_REG_PDMA_GLO_CFG); ++ spin_unlock_irqrestore(&priv->page_lock, flags); ++ ++ return 0; ++} ++ ++static int __init fe_init(struct net_device *dev) ++{ ++ struct fe_priv *priv = netdev_priv(dev); ++ struct device_node *port; ++ int err; ++ ++ BUG_ON(!priv->soc->reset_fe); ++ priv->soc->reset_fe(); ++ ++ if (priv->soc->switch_init) ++ priv->soc->switch_init(priv); ++ ++ net_srandom(jiffies); ++ memcpy(dev->dev_addr, priv->soc->mac, ETH_ALEN); ++ of_get_mac_address_mtd(priv->device->of_node, dev->dev_addr); ++ ++ err = fe_mdio_init(priv); ++ if (err) ++ return err; ++ ++ if (priv->phy) { ++ err = priv->phy->connect(priv); ++ if (err) ++ goto err_mdio_cleanup; ++ } ++ ++ if (priv->soc->port_init) ++ for_each_child_of_node(priv->device->of_node, port) ++ if (of_device_is_compatible(port, "ralink,eth-port") && of_device_is_available(port)) ++ priv->soc->port_init(priv, port); ++ ++ err = fe_hw_init(dev); ++ if (err) ++ goto err_phy_disconnect; ++ ++ if (priv->soc->switch_config) ++ priv->soc->switch_config(priv); ++ ++ return 0; ++ ++err_phy_disconnect: ++ if (priv->phy) ++ priv->phy->disconnect(priv); ++err_mdio_cleanup: ++ fe_mdio_cleanup(priv); ++ ++ return err; ++} ++ ++static void fe_uninit(struct net_device *dev) ++{ ++ struct fe_priv *priv = netdev_priv(dev); ++ ++ tasklet_kill(&priv->tx_tasklet); ++ ++ if (priv->phy) ++ priv->phy->disconnect(priv); ++ fe_mdio_cleanup(priv); ++ ++ fe_reg_w32(0, FE_REG_FE_INT_ENABLE); ++ free_irq(dev->irq, dev); ++ ++ fe_free_dma(priv); ++} ++ ++static const struct net_device_ops fe_netdev_ops = { ++ .ndo_init = fe_init, ++ .ndo_uninit = fe_uninit, ++ .ndo_open = fe_open, ++ .ndo_stop = fe_stop, ++ .ndo_start_xmit = fe_start_xmit, ++ .ndo_tx_timeout = fe_tx_timeout, ++ .ndo_set_mac_address = fe_set_mac_address, ++ .ndo_change_mtu = eth_change_mtu, ++ .ndo_validate_addr = eth_validate_addr, ++}; ++ ++static int fe_probe(struct platform_device *pdev) ++{ ++ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ const struct of_device_id *match; ++ struct fe_soc_data *soc = NULL; ++ struct net_device *netdev; ++ struct fe_priv *priv; ++ struct clk *sysclk; ++ int err; ++ ++ device_reset(&pdev->dev); ++ ++ match = of_match_device(of_fe_match, &pdev->dev); ++ soc = (struct fe_soc_data *) match->data; ++ ++ if (soc->init_data) ++ soc->init_data(soc); ++ if (soc->reg_table) ++ fe_reg_table = soc->reg_table; ++ ++ fe_base = devm_request_and_ioremap(&pdev->dev, res); ++ if (!fe_base) ++ return -ENOMEM; ++ ++ netdev = alloc_etherdev(sizeof(struct fe_priv)); ++ if (!netdev) { ++ dev_err(&pdev->dev, "alloc_etherdev failed\n"); ++ return -ENOMEM; ++ } ++ ++ strcpy(netdev->name, "eth%d"); ++ netdev->netdev_ops = &fe_netdev_ops; ++ netdev->base_addr = (unsigned long) fe_base; ++ netdev->watchdog_timeo = TX_TIMEOUT; ++ netdev->features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM; ++ ++ if (fe_reg_table[FE_REG_FE_DMA_VID_BASE]) ++ netdev->features |= NETIF_F_HW_VLAN_CTAG_TX; ++ ++ if (soc->tso) { ++ dev_info(&pdev->dev, "Enabling TSO\n"); ++ netdev->features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_IPV6_CSUM; ++ } ++ netdev->hw_features = netdev->features; ++ ++ netdev->irq = platform_get_irq(pdev, 0); ++ if (netdev->irq < 0) { ++ dev_err(&pdev->dev, "no IRQ resource found\n"); ++ kfree(netdev); ++ return -ENXIO; ++ } ++ ++ priv = netdev_priv(netdev); ++ memset(priv, 0, sizeof(struct fe_priv)); ++ spin_lock_init(&priv->page_lock); ++ ++ sysclk = devm_clk_get(&pdev->dev, NULL); ++ if (!IS_ERR(sysclk)) ++ priv->sysclk = clk_get_rate(sysclk); ++ ++ priv->netdev = netdev; ++ priv->device = &pdev->dev; ++ priv->soc = soc; ++ ++ err = register_netdev(netdev); ++ if (err) { ++ dev_err(&pdev->dev, "error bringing up device\n"); ++ kfree(netdev); ++ return err; ++ } ++ netif_napi_add(netdev, &priv->rx_napi, fe_poll_rx, 32); ++ ++#ifdef CONFIG_INET_LRO ++ if (priv->soc->get_skb_header) { ++ priv->lro_mgr.dev = netdev; ++ memset(&priv->lro_mgr.stats, 0, sizeof(priv->lro_mgr.stats)); ++ priv->lro_mgr.features = LRO_F_NAPI; ++ priv->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY; ++ priv->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY; ++ priv->lro_mgr.max_desc = ARRAY_SIZE(priv->lro_arr); ++ priv->lro_mgr.max_aggr = 64; ++ priv->lro_mgr.frag_align_pad = 0; ++ priv->lro_mgr.lro_arr = priv->lro_arr; ++ priv->lro_mgr.get_skb_header = priv->soc->get_skb_header; ++ } ++#endif ++ ++ platform_set_drvdata(pdev, netdev); ++ ++ netdev_info(netdev, "done loading\n"); ++ ++ return 0; ++} ++ ++static int fe_remove(struct platform_device *pdev) ++{ ++ struct net_device *dev = platform_get_drvdata(pdev); ++ struct fe_priv *priv = netdev_priv(dev); ++ ++ netif_stop_queue(dev); ++ netif_napi_del(&priv->rx_napi); ++ ++ unregister_netdev(dev); ++ free_netdev(dev); ++ ++ return 0; ++} ++ ++static struct platform_driver fe_driver = { ++ .probe = fe_probe, ++ .remove = fe_remove, ++ .driver = { ++ .name = "ralink_soc_eth", ++ .owner = THIS_MODULE, ++ .of_match_table = of_fe_match, ++ }, ++}; ++ ++static int __init init_rtfe(void) ++{ ++ int ret; ++ ++ ret = rtesw_init(); ++ if (ret) ++ return ret; ++ ++ ret = platform_driver_register(&fe_driver); ++ if (ret) ++ rtesw_exit(); ++ ++ return ret; ++} ++ ++static void __exit exit_rtfe(void) ++{ ++ platform_driver_unregister(&fe_driver); ++ rtesw_exit(); ++} ++ ++module_init(init_rtfe); ++module_exit(exit_rtfe); ++ ++MODULE_LICENSE("GPL"); ++MODULE_AUTHOR("John Crispin "); ++MODULE_DESCRIPTION("Ethernet driver for Ralink SoC"); +--- /dev/null ++++ b/drivers/net/ethernet/ralink/ralink_soc_eth.h +@@ -0,0 +1,384 @@ ++/* ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; version 2 of the License ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. ++ * ++ * based on Ralink SDK3.3 ++ * Copyright (C) 2009-2013 John Crispin ++ */ ++ ++#ifndef FE_ETH_H ++#define FE_ETH_H ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++ ++enum fe_reg { ++ FE_REG_PDMA_GLO_CFG = 0, ++ FE_REG_PDMA_RST_CFG, ++ FE_REG_DLY_INT_CFG, ++ FE_REG_TX_BASE_PTR0, ++ FE_REG_TX_MAX_CNT0, ++ FE_REG_TX_CTX_IDX0, ++ FE_REG_RX_BASE_PTR0, ++ FE_REG_RX_MAX_CNT0, ++ FE_REG_RX_CALC_IDX0, ++ FE_REG_FE_INT_ENABLE, ++ FE_REG_FE_INT_STATUS, ++ FE_REG_FE_DMA_VID_BASE, ++ FE_REG_COUNT ++}; ++ ++#define NUM_DMA_DESC 0x100 ++ ++#define FE_DELAY_EN_INT 0x80 ++#define FE_DELAY_MAX_INT 0x04 ++#define FE_DELAY_MAX_TOUT 0x04 ++#define FE_DELAY_CHAN (((FE_DELAY_EN_INT | FE_DELAY_MAX_INT) << 8) | FE_DELAY_MAX_TOUT) ++#define FE_DELAY_INIT ((FE_DELAY_CHAN << 16) | FE_DELAY_CHAN) ++#define FE_PSE_FQFC_CFG_INIT 0x80504000 ++ ++/* interrupt bits */ ++#define FE_CNT_PPE_AF BIT(31) ++#define FE_CNT_GDM_AF BIT(29) ++#define FE_PSE_P2_FC BIT(26) ++#define FE_PSE_BUF_DROP BIT(24) ++#define FE_GDM_OTHER_DROP BIT(23) ++#define FE_PSE_P1_FC BIT(22) ++#define FE_PSE_P0_FC BIT(21) ++#define FE_PSE_FQ_EMPTY BIT(20) ++#define FE_GE1_STA_CHG BIT(18) ++#define FE_TX_COHERENT BIT(17) ++#define FE_RX_COHERENT BIT(16) ++#define FE_TX_DONE_INT3 BIT(11) ++#define FE_TX_DONE_INT2 BIT(10) ++#define FE_TX_DONE_INT1 BIT(9) ++#define FE_TX_DONE_INT0 BIT(8) ++#define FE_RX_DONE_INT0 BIT(2) ++#define FE_TX_DLY_INT BIT(1) ++#define FE_RX_DLY_INT BIT(0) ++ ++#define RT5350_RX_DLY_INT BIT(30) ++#define RT5350_TX_DLY_INT BIT(28) ++ ++/* registers */ ++#define FE_FE_OFFSET 0x0000 ++#define FE_GDMA_OFFSET 0x0020 ++#define FE_PSE_OFFSET 0x0040 ++#define FE_GDMA2_OFFSET 0x0060 ++#define FE_CDMA_OFFSET 0x0080 ++#define FE_DMA_VID0 0x00a8 ++#define FE_PDMA_OFFSET 0x0100 ++#define FE_PPE_OFFSET 0x0200 ++#define FE_CMTABLE_OFFSET 0x0400 ++#define FE_POLICYTABLE_OFFSET 0x1000 ++ ++#define RT5350_PDMA_OFFSET 0x0800 ++#define RT5350_SDM_OFFSET 0x0c00 ++ ++#define FE_MDIO_ACCESS (FE_FE_OFFSET + 0x00) ++#define FE_MDIO_CFG (FE_FE_OFFSET + 0x04) ++#define FE_FE_GLO_CFG (FE_FE_OFFSET + 0x08) ++#define FE_FE_RST_GL (FE_FE_OFFSET + 0x0C) ++#define FE_FE_INT_STATUS (FE_FE_OFFSET + 0x10) ++#define FE_FE_INT_ENABLE (FE_FE_OFFSET + 0x14) ++#define FE_MDIO_CFG2 (FE_FE_OFFSET + 0x18) ++#define FE_FOC_TS_T (FE_FE_OFFSET + 0x1C) ++ ++#define FE_GDMA1_FWD_CFG (FE_GDMA_OFFSET + 0x00) ++#define FE_GDMA1_SCH_CFG (FE_GDMA_OFFSET + 0x04) ++#define FE_GDMA1_SHPR_CFG (FE_GDMA_OFFSET + 0x08) ++#define FE_GDMA1_MAC_ADRL (FE_GDMA_OFFSET + 0x0C) ++#define FE_GDMA1_MAC_ADRH (FE_GDMA_OFFSET + 0x10) ++ ++#define FE_GDMA2_FWD_CFG (FE_GDMA2_OFFSET + 0x00) ++#define FE_GDMA2_SCH_CFG (FE_GDMA2_OFFSET + 0x04) ++#define FE_GDMA2_SHPR_CFG (FE_GDMA2_OFFSET + 0x08) ++#define FE_GDMA2_MAC_ADRL (FE_GDMA2_OFFSET + 0x0C) ++#define FE_GDMA2_MAC_ADRH (FE_GDMA2_OFFSET + 0x10) ++ ++#define FE_PSE_FQ_CFG (FE_PSE_OFFSET + 0x00) ++#define FE_CDMA_FC_CFG (FE_PSE_OFFSET + 0x04) ++#define FE_GDMA1_FC_CFG (FE_PSE_OFFSET + 0x08) ++#define FE_GDMA2_FC_CFG (FE_PSE_OFFSET + 0x0C) ++ ++#define FE_CDMA_CSG_CFG (FE_CDMA_OFFSET + 0x00) ++#define FE_CDMA_SCH_CFG (FE_CDMA_OFFSET + 0x04) ++ ++#define MT7620A_GDMA_OFFSET 0x0600 ++#define MT7620A_GDMA1_FWD_CFG (MT7620A_GDMA_OFFSET + 0x00) ++#define MT7620A_FE_GDMA1_SCH_CFG (MT7620A_GDMA_OFFSET + 0x04) ++#define MT7620A_FE_GDMA1_SHPR_CFG (MT7620A_GDMA_OFFSET + 0x08) ++#define MT7620A_FE_GDMA1_MAC_ADRL (MT7620A_GDMA_OFFSET + 0x0C) ++#define MT7620A_FE_GDMA1_MAC_ADRH (MT7620A_GDMA_OFFSET + 0x10) ++ ++#define RT5350_TX_BASE_PTR0 (RT5350_PDMA_OFFSET + 0x00) ++#define RT5350_TX_MAX_CNT0 (RT5350_PDMA_OFFSET + 0x04) ++#define RT5350_TX_CTX_IDX0 (RT5350_PDMA_OFFSET + 0x08) ++#define RT5350_TX_DTX_IDX0 (RT5350_PDMA_OFFSET + 0x0C) ++#define RT5350_TX_BASE_PTR1 (RT5350_PDMA_OFFSET + 0x10) ++#define RT5350_TX_MAX_CNT1 (RT5350_PDMA_OFFSET + 0x14) ++#define RT5350_TX_CTX_IDX1 (RT5350_PDMA_OFFSET + 0x18) ++#define RT5350_TX_DTX_IDX1 (RT5350_PDMA_OFFSET + 0x1C) ++#define RT5350_TX_BASE_PTR2 (RT5350_PDMA_OFFSET + 0x20) ++#define RT5350_TX_MAX_CNT2 (RT5350_PDMA_OFFSET + 0x24) ++#define RT5350_TX_CTX_IDX2 (RT5350_PDMA_OFFSET + 0x28) ++#define RT5350_TX_DTX_IDX2 (RT5350_PDMA_OFFSET + 0x2C) ++#define RT5350_TX_BASE_PTR3 (RT5350_PDMA_OFFSET + 0x30) ++#define RT5350_TX_MAX_CNT3 (RT5350_PDMA_OFFSET + 0x34) ++#define RT5350_TX_CTX_IDX3 (RT5350_PDMA_OFFSET + 0x38) ++#define RT5350_TX_DTX_IDX3 (RT5350_PDMA_OFFSET + 0x3C) ++#define RT5350_RX_BASE_PTR0 (RT5350_PDMA_OFFSET + 0x100) ++#define RT5350_RX_MAX_CNT0 (RT5350_PDMA_OFFSET + 0x104) ++#define RT5350_RX_CALC_IDX0 (RT5350_PDMA_OFFSET + 0x108) ++#define RT5350_RX_DRX_IDX0 (RT5350_PDMA_OFFSET + 0x10C) ++#define RT5350_RX_BASE_PTR1 (RT5350_PDMA_OFFSET + 0x110) ++#define RT5350_RX_MAX_CNT1 (RT5350_PDMA_OFFSET + 0x114) ++#define RT5350_RX_CALC_IDX1 (RT5350_PDMA_OFFSET + 0x118) ++#define RT5350_RX_DRX_IDX1 (RT5350_PDMA_OFFSET + 0x11C) ++#define RT5350_PDMA_GLO_CFG (RT5350_PDMA_OFFSET + 0x204) ++#define RT5350_PDMA_RST_CFG (RT5350_PDMA_OFFSET + 0x208) ++#define RT5350_DLY_INT_CFG (RT5350_PDMA_OFFSET + 0x20c) ++#define RT5350_FE_INT_STATUS (RT5350_PDMA_OFFSET + 0x220) ++#define RT5350_FE_INT_ENABLE (RT5350_PDMA_OFFSET + 0x228) ++#define RT5350_PDMA_SCH_CFG (RT5350_PDMA_OFFSET + 0x280) ++ ++#define FE_PDMA_GLO_CFG (FE_PDMA_OFFSET + 0x00) ++#define FE_PDMA_RST_CFG (FE_PDMA_OFFSET + 0x04) ++#define FE_PDMA_SCH_CFG (FE_PDMA_OFFSET + 0x08) ++#define FE_DLY_INT_CFG (FE_PDMA_OFFSET + 0x0C) ++#define FE_TX_BASE_PTR0 (FE_PDMA_OFFSET + 0x10) ++#define FE_TX_MAX_CNT0 (FE_PDMA_OFFSET + 0x14) ++#define FE_TX_CTX_IDX0 (FE_PDMA_OFFSET + 0x18) ++#define FE_TX_DTX_IDX0 (FE_PDMA_OFFSET + 0x1C) ++#define FE_TX_BASE_PTR1 (FE_PDMA_OFFSET + 0x20) ++#define FE_TX_MAX_CNT1 (FE_PDMA_OFFSET + 0x24) ++#define FE_TX_CTX_IDX1 (FE_PDMA_OFFSET + 0x28) ++#define FE_TX_DTX_IDX1 (FE_PDMA_OFFSET + 0x2C) ++#define FE_RX_BASE_PTR0 (FE_PDMA_OFFSET + 0x30) ++#define FE_RX_MAX_CNT0 (FE_PDMA_OFFSET + 0x34) ++#define FE_RX_CALC_IDX0 (FE_PDMA_OFFSET + 0x38) ++#define FE_RX_DRX_IDX0 (FE_PDMA_OFFSET + 0x3C) ++#define FE_TX_BASE_PTR2 (FE_PDMA_OFFSET + 0x40) ++#define FE_TX_MAX_CNT2 (FE_PDMA_OFFSET + 0x44) ++#define FE_TX_CTX_IDX2 (FE_PDMA_OFFSET + 0x48) ++#define FE_TX_DTX_IDX2 (FE_PDMA_OFFSET + 0x4C) ++#define FE_TX_BASE_PTR3 (FE_PDMA_OFFSET + 0x50) ++#define FE_TX_MAX_CNT3 (FE_PDMA_OFFSET + 0x54) ++#define FE_TX_CTX_IDX3 (FE_PDMA_OFFSET + 0x58) ++#define FE_TX_DTX_IDX3 (FE_PDMA_OFFSET + 0x5C) ++#define FE_RX_BASE_PTR1 (FE_PDMA_OFFSET + 0x60) ++#define FE_RX_MAX_CNT1 (FE_PDMA_OFFSET + 0x64) ++#define FE_RX_CALC_IDX1 (FE_PDMA_OFFSET + 0x68) ++#define FE_RX_DRX_IDX1 (FE_PDMA_OFFSET + 0x6C) ++ ++#define RT5350_SDM_CFG (RT5350_SDM_OFFSET + 0x00) //Switch DMA configuration ++#define RT5350_SDM_RRING (RT5350_SDM_OFFSET + 0x04) //Switch DMA Rx Ring ++#define RT5350_SDM_TRING (RT5350_SDM_OFFSET + 0x08) //Switch DMA Tx Ring ++#define RT5350_SDM_MAC_ADRL (RT5350_SDM_OFFSET + 0x0C) //Switch MAC address LSB ++#define RT5350_SDM_MAC_ADRH (RT5350_SDM_OFFSET + 0x10) //Switch MAC Address MSB ++#define RT5350_SDM_TPCNT (RT5350_SDM_OFFSET + 0x100) //Switch DMA Tx packet count ++#define RT5350_SDM_TBCNT (RT5350_SDM_OFFSET + 0x104) //Switch DMA Tx byte count ++#define RT5350_SDM_RPCNT (RT5350_SDM_OFFSET + 0x108) //Switch DMA rx packet count ++#define RT5350_SDM_RBCNT (RT5350_SDM_OFFSET + 0x10C) //Switch DMA rx byte count ++#define RT5350_SDM_CS_ERR (RT5350_SDM_OFFSET + 0x110) //Switch DMA rx checksum error count ++ ++#define RT5350_SDM_ICS_EN BIT(16) ++#define RT5350_SDM_TCS_EN BIT(17) ++#define RT5350_SDM_UCS_EN BIT(18) ++ ++ ++/* MDIO_CFG register bits */ ++#define FE_MDIO_CFG_AUTO_POLL_EN BIT(29) ++#define FE_MDIO_CFG_GP1_BP_EN BIT(16) ++#define FE_MDIO_CFG_GP1_FRC_EN BIT(15) ++#define FE_MDIO_CFG_GP1_SPEED_10 (0 << 13) ++#define FE_MDIO_CFG_GP1_SPEED_100 (1 << 13) ++#define FE_MDIO_CFG_GP1_SPEED_1000 (2 << 13) ++#define FE_MDIO_CFG_GP1_DUPLEX BIT(12) ++#define FE_MDIO_CFG_GP1_FC_TX BIT(11) ++#define FE_MDIO_CFG_GP1_FC_RX BIT(10) ++#define FE_MDIO_CFG_GP1_LNK_DWN BIT(9) ++#define FE_MDIO_CFG_GP1_AN_FAIL BIT(8) ++#define FE_MDIO_CFG_MDC_CLK_DIV_1 (0 << 6) ++#define FE_MDIO_CFG_MDC_CLK_DIV_2 (1 << 6) ++#define FE_MDIO_CFG_MDC_CLK_DIV_4 (2 << 6) ++#define FE_MDIO_CFG_MDC_CLK_DIV_8 (3 << 6) ++#define FE_MDIO_CFG_TURBO_MII_FREQ BIT(5) ++#define FE_MDIO_CFG_TURBO_MII_MODE BIT(4) ++#define FE_MDIO_CFG_RX_CLK_SKEW_0 (0 << 2) ++#define FE_MDIO_CFG_RX_CLK_SKEW_200 (1 << 2) ++#define FE_MDIO_CFG_RX_CLK_SKEW_400 (2 << 2) ++#define FE_MDIO_CFG_RX_CLK_SKEW_INV (3 << 2) ++#define FE_MDIO_CFG_TX_CLK_SKEW_0 0 ++#define FE_MDIO_CFG_TX_CLK_SKEW_200 1 ++#define FE_MDIO_CFG_TX_CLK_SKEW_400 2 ++#define FE_MDIO_CFG_TX_CLK_SKEW_INV 3 ++ ++/* uni-cast port */ ++#define FE_GDM1_ICS_EN BIT(22) ++#define FE_GDM1_TCS_EN BIT(21) ++#define FE_GDM1_UCS_EN BIT(20) ++#define FE_GDM1_JMB_EN BIT(19) ++#define FE_GDM1_STRPCRC BIT(16) ++#define FE_GDM1_UFRC_P_CPU (0 << 12) ++#define FE_GDM1_UFRC_P_GDMA1 (1 << 12) ++#define FE_GDM1_UFRC_P_PPE (6 << 12) ++ ++/* checksums */ ++#define FE_ICS_GEN_EN BIT(2) ++#define FE_UCS_GEN_EN BIT(1) ++#define FE_TCS_GEN_EN BIT(0) ++ ++/* dma ring */ ++#define FE_PST_DRX_IDX0 BIT(16) ++#define FE_PST_DTX_IDX3 BIT(3) ++#define FE_PST_DTX_IDX2 BIT(2) ++#define FE_PST_DTX_IDX1 BIT(1) ++#define FE_PST_DTX_IDX0 BIT(0) ++ ++#define FE_TX_WB_DDONE BIT(6) ++#define FE_RX_DMA_BUSY BIT(3) ++#define FE_TX_DMA_BUSY BIT(1) ++#define FE_RX_DMA_EN BIT(2) ++#define FE_TX_DMA_EN BIT(0) ++ ++#define FE_PDMA_SIZE_4DWORDS (0 << 4) ++#define FE_PDMA_SIZE_8DWORDS (1 << 4) ++#define FE_PDMA_SIZE_16DWORDS (2 << 4) ++ ++#define FE_US_CYC_CNT_MASK 0xff ++#define FE_US_CYC_CNT_SHIFT 0x8 ++#define FE_US_CYC_CNT_DIVISOR 1000000 ++ ++#define RX_DMA_PLEN0(_x) (((_x) >> 16) & 0x3fff) ++#define RX_DMA_LSO BIT(30) ++#define RX_DMA_DONE BIT(31) ++#define RX_DMA_L4VALID BIT(30) ++ ++struct fe_rx_dma { ++ unsigned int rxd1; ++ unsigned int rxd2; ++ unsigned int rxd3; ++ unsigned int rxd4; ++} __packed __aligned(4); ++ ++#define TX_DMA_PLEN0_MASK ((0x3fff) << 16) ++#define TX_DMA_PLEN0(_x) (((_x) & 0x3fff) << 16) ++#define TX_DMA_PLEN1(_x) ((_x) & 0x3fff) ++#define TX_DMA_LS1 BIT(14) ++#define TX_DMA_LSO BIT(30) ++#define TX_DMA_DONE BIT(31) ++#define TX_DMA_QN(_x) ((_x) << 16) ++#define TX_DMA_PN(_x) ((_x) << 24) ++#define TX_DMA_QN_MASK TX_DMA_QN(0x7) ++#define TX_DMA_PN_MASK TX_DMA_PN(0x7) ++#define TX_DMA_CHKSUM (0x7 << 29) ++ ++struct fe_tx_dma { ++ unsigned int txd1; ++ unsigned int txd2; ++ unsigned int txd3; ++ unsigned int txd4; ++} __packed __aligned(4); ++ ++struct fe_priv; ++ ++struct fe_phy { ++ struct phy_device *phy[8]; ++ struct device_node *phy_node[8]; ++ const __be32 *phy_fixed[8]; ++ int duplex[8]; ++ int speed[8]; ++ int tx_fc[8]; ++ int rx_fc[8]; ++ spinlock_t lock; ++ ++ int (*connect)(struct fe_priv *priv); ++ void (*disconnect)(struct fe_priv *priv); ++ void (*start)(struct fe_priv *priv); ++ void (*stop)(struct fe_priv *priv); ++}; ++ ++struct fe_soc_data ++{ ++ unsigned char mac[6]; ++ const u32 *reg_table; ++ ++ void (*init_data)(struct fe_soc_data *data); ++ void (*reset_fe)(void); ++ void (*set_mac)(struct fe_priv *priv, unsigned char *mac); ++ void (*fwd_config)(struct fe_priv *priv); ++ void (*tx_dma)(struct fe_priv *priv, int idx, struct sk_buff *skb); ++ void (*rx_dma)(struct fe_priv *priv, int idx, int len); ++ int (*switch_init)(struct fe_priv *priv); ++ int (*switch_config)(struct fe_priv *priv); ++ void (*port_init)(struct fe_priv *priv, struct device_node *port); ++ int (*has_carrier)(struct fe_priv *priv); ++ int (*mdio_init)(struct fe_priv *priv); ++ void (*mdio_cleanup)(struct fe_priv *priv); ++ int (*mdio_write)(struct mii_bus *bus, int phy_addr, int phy_reg, u16 val); ++ int (*mdio_read)(struct mii_bus *bus, int phy_addr, int phy_reg); ++ void (*mdio_adjust_link)(struct fe_priv *priv, int port); ++ int (*get_skb_header)(struct sk_buff *skb, void **iphdr, void **tcph, u64 *hdr_flags, void *priv); ++ ++ void *swpriv; ++ u32 pdma_glo_cfg; ++ u32 rx_dly_int; ++ u32 tx_dly_int; ++ u32 checksum_bit; ++ u32 tso; ++ ++ int min_pkt_len; ++}; ++ ++struct fe_priv ++{ ++ spinlock_t page_lock; ++ ++ struct fe_soc_data *soc; ++ struct net_device *netdev; ++ struct device *device; ++ unsigned long sysclk; ++ ++ struct fe_rx_dma *rx_dma; ++ struct napi_struct rx_napi; ++ struct sk_buff *rx_skb[NUM_DMA_DESC]; ++ dma_addr_t rx_phys; ++ ++ struct fe_tx_dma *tx_dma; ++ struct tasklet_struct tx_tasklet; ++ struct sk_buff *tx_skb[NUM_DMA_DESC]; ++ dma_addr_t tx_phys; ++ unsigned int tx_free_idx; ++ ++ struct fe_phy *phy; ++ struct mii_bus *mii_bus; ++ int mii_irq[PHY_MAX_ADDR]; ++ ++ int link[8]; ++ ++ struct net_lro_mgr lro_mgr; ++ struct net_lro_desc lro_arr[8]; ++}; ++ ++extern const struct of_device_id of_fe_match[]; ++ ++void fe_w32(u32 val, unsigned reg); ++u32 fe_r32(unsigned reg); ++ ++#endif /* FE_ETH_H */ +--- /dev/null ++++ b/drivers/net/ethernet/ralink/soc_mt7620.c +@@ -0,0 +1,172 @@ ++/* ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; version 2 of the License ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. ++ * ++ * Copyright (C) 2009-2013 John Crispin ++ */ ++ ++#include ++#include ++#include ++ ++#include ++ ++#include ++#include "ralink_soc_eth.h" ++#include "gsw_mt7620a.h" ++ ++#define MT7620A_CDMA_CSG_CFG 0x400 ++#define MT7620_DMA_VID (MT7620A_CDMA_CSG_CFG | 0x30) ++#define MT7620A_DMA_2B_OFFSET BIT(31) ++#define MT7620A_RESET_FE BIT(21) ++#define MT7620A_RESET_ESW BIT(23) ++#define MT7620_L4_VALID BIT(23) ++ ++#define SYSC_REG_RESET_CTRL 0x34 ++#define MAX_RX_LENGTH 1536 ++ ++#define CDMA_ICS_EN BIT(2) ++#define CDMA_UCS_EN BIT(1) ++#define CDMA_TCS_EN BIT(0) ++ ++#define GDMA_ICS_EN BIT(22) ++#define GDMA_TCS_EN BIT(21) ++#define GDMA_UCS_EN BIT(20) ++ ++static const u32 rt5350_reg_table[FE_REG_COUNT] = { ++ [FE_REG_PDMA_GLO_CFG] = RT5350_PDMA_GLO_CFG, ++ [FE_REG_PDMA_RST_CFG] = RT5350_PDMA_RST_CFG, ++ [FE_REG_DLY_INT_CFG] = RT5350_DLY_INT_CFG, ++ [FE_REG_TX_BASE_PTR0] = RT5350_TX_BASE_PTR0, ++ [FE_REG_TX_MAX_CNT0] = RT5350_TX_MAX_CNT0, ++ [FE_REG_TX_CTX_IDX0] = RT5350_TX_CTX_IDX0, ++ [FE_REG_RX_BASE_PTR0] = RT5350_RX_BASE_PTR0, ++ [FE_REG_RX_MAX_CNT0] = RT5350_RX_MAX_CNT0, ++ [FE_REG_RX_CALC_IDX0] = RT5350_RX_CALC_IDX0, ++ [FE_REG_FE_INT_ENABLE] = RT5350_FE_INT_ENABLE, ++ [FE_REG_FE_INT_STATUS] = RT5350_FE_INT_STATUS, ++ [FE_REG_FE_DMA_VID_BASE] = MT7620_DMA_VID, ++}; ++ ++static void mt7620_fe_reset(void) ++{ ++ rt_sysc_w32(MT7620A_RESET_FE | MT7620A_RESET_ESW, SYSC_REG_RESET_CTRL); ++ rt_sysc_w32(0, SYSC_REG_RESET_CTRL); ++} ++ ++static void mt7620_fwd_config(struct fe_priv *priv) ++{ ++ int i; ++ ++ /* frame engine will push VLAN tag regarding to VIDX feild in Tx desc. */ ++ for (i = 0; i < 16; i += 2) ++ fe_w32(((i + 1) << 16) + i, MT7620_DMA_VID + (i * 2)); ++ ++ fe_w32(fe_r32(MT7620A_GDMA1_FWD_CFG) & ~7, MT7620A_GDMA1_FWD_CFG); ++ fe_w32(fe_r32(MT7620A_GDMA1_FWD_CFG) | (GDMA_ICS_EN | GDMA_TCS_EN | GDMA_UCS_EN), MT7620A_GDMA1_FWD_CFG); ++ fe_w32(fe_r32(MT7620A_CDMA_CSG_CFG) | (CDMA_ICS_EN | CDMA_UCS_EN | CDMA_TCS_EN), MT7620A_CDMA_CSG_CFG); ++} ++ ++static void mt7620_tx_dma(struct fe_priv *priv, int idx, struct sk_buff *skb) ++{ ++ unsigned int nr_frags = 0; ++ unsigned int len = 0; ++ ++ if (skb) { ++ nr_frags = skb_shinfo(skb)->nr_frags; ++ len = skb->len - skb->data_len; ++ } ++ ++ if (!skb) ++ priv->tx_dma[idx].txd2 = TX_DMA_LSO | TX_DMA_DONE; ++ else if (!nr_frags) ++ priv->tx_dma[idx].txd2 = TX_DMA_LSO | TX_DMA_PLEN0(len); ++ else ++ priv->tx_dma[idx].txd2 = TX_DMA_PLEN0(len); ++ ++ if(skb && vlan_tx_tag_present(skb)) ++ priv->tx_dma[idx].txd4 = 0x80 | (vlan_tx_tag_get(skb) >> 13) << 4 | (vlan_tx_tag_get(skb) & 0xF); ++ else ++ priv->tx_dma[idx].txd4 = 0; ++} ++ ++static void mt7620_rx_dma(struct fe_priv *priv, int idx, int len) ++{ ++ priv->rx_dma[idx].rxd2 = RX_DMA_PLEN0(len); ++} ++ ++#ifdef CONFIG_INET_LRO ++static int ++mt7620_get_skb_header(struct sk_buff *skb, void **iphdr, void **tcph, ++ u64 *hdr_flags, void *_priv) ++{ ++ struct iphdr *iph = NULL; ++ int vhdr_len = 0; ++ ++ /* ++ * Make sure that this packet is Ethernet II, is not VLAN ++ * tagged, is IPv4, has a valid IP header, and is TCP. ++ */ ++ if (skb->protocol == 0x0081) ++ vhdr_len = VLAN_HLEN; ++ ++ iph = (struct iphdr *)(skb->data + vhdr_len); ++ if(iph->protocol != IPPROTO_TCP) ++ return -1; ++ ++ *iphdr = iph; ++ *tcph = skb->data + (iph->ihl << 2) + vhdr_len; ++ *hdr_flags = LRO_IPV4 | LRO_TCP; ++ ++ return 0; ++} ++#endif ++ ++static void mt7620_init_data(struct fe_soc_data *data) ++{ ++ if (mt7620_get_eco() >= 5) ++ data->tso = 1; ++} ++ ++static struct fe_soc_data mt7620_data = { ++ .mac = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55 }, ++ .init_data = mt7620_init_data, ++ .reset_fe = mt7620_fe_reset, ++ .set_mac = mt7620_set_mac, ++ .fwd_config = mt7620_fwd_config, ++ .tx_dma = mt7620_tx_dma, ++ .rx_dma = mt7620_rx_dma, ++ .switch_init = mt7620_gsw_probe, ++ .switch_config = mt7620_gsw_config, ++ .port_init = mt7620_port_init, ++ .min_pkt_len = 0, ++ .reg_table = rt5350_reg_table, ++ .pdma_glo_cfg = FE_PDMA_SIZE_16DWORDS | MT7620A_DMA_2B_OFFSET, ++ .rx_dly_int = RT5350_RX_DLY_INT, ++ .tx_dly_int = RT5350_TX_DLY_INT, ++ .checksum_bit = MT7620_L4_VALID, ++ .has_carrier = mt7620a_has_carrier, ++ .mdio_read = mt7620_mdio_read, ++ .mdio_write = mt7620_mdio_write, ++ .mdio_adjust_link = mt7620_mdio_link_adjust, ++#ifdef CONFIG_INET_LRO ++ .get_skb_header = mt7620_get_skb_header, ++#endif ++}; ++ ++const struct of_device_id of_fe_match[] = { ++ { .compatible = "ralink,mt7620a-eth", .data = &mt7620_data }, ++ {}, ++}; ++ ++MODULE_DEVICE_TABLE(of, of_fe_match); +--- /dev/null ++++ b/drivers/net/ethernet/ralink/soc_rt2880.c +@@ -0,0 +1,51 @@ ++/* ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; version 2 of the License ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. ++ * ++ * Copyright (C) 2009-2013 John Crispin ++ */ ++ ++#include ++ ++#include ++ ++#include "ralink_soc_eth.h" ++#include "mdio_rt2880.h" ++ ++#define SYSC_REG_RESET_CTRL 0x034 ++#define RT2880_RESET_FE BIT(18) ++ ++void rt2880_fe_reset(void) ++{ ++ rt_sysc_w32(RT2880_RESET_FE, SYSC_REG_RESET_CTRL); ++} ++ ++struct fe_soc_data rt2880_data = { ++ .mac = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55 }, ++ .reset_fe = rt2880_fe_reset, ++ .min_pkt_len = 64, ++ .pdma_glo_cfg = FE_PDMA_SIZE_4DWORDS, ++ .checksum_bit = RX_DMA_L4VALID, ++ .rx_dly_int = FE_RX_DLY_INT, ++ .tx_dly_int = FE_TX_DLY_INT, ++ .mdio_read = rt2880_mdio_read, ++ .mdio_write = rt2880_mdio_write, ++ .mdio_adjust_link = rt2880_mdio_link_adjust, ++}; ++ ++const struct of_device_id of_fe_match[] = { ++ { .compatible = "ralink,rt2880-eth", .data = &rt2880_data }, ++ {}, ++}; ++ ++MODULE_DEVICE_TABLE(of, of_fe_match); +--- /dev/null ++++ b/drivers/net/ethernet/ralink/soc_rt305x.c +@@ -0,0 +1,113 @@ ++/* ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; version 2 of the License ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. ++ * ++ * Copyright (C) 2009-2013 John Crispin ++ */ ++ ++#include ++ ++#include ++ ++#include "ralink_soc_eth.h" ++ ++#define RT305X_RESET_FE BIT(21) ++#define RT305X_RESET_ESW BIT(23) ++#define SYSC_REG_RESET_CTRL 0x034 ++ ++static const u32 rt5350_reg_table[FE_REG_COUNT] = { ++ [FE_REG_PDMA_GLO_CFG] = RT5350_PDMA_GLO_CFG, ++ [FE_REG_PDMA_RST_CFG] = RT5350_PDMA_RST_CFG, ++ [FE_REG_DLY_INT_CFG] = RT5350_DLY_INT_CFG, ++ [FE_REG_TX_BASE_PTR0] = RT5350_TX_BASE_PTR0, ++ [FE_REG_TX_MAX_CNT0] = RT5350_TX_MAX_CNT0, ++ [FE_REG_TX_CTX_IDX0] = RT5350_TX_CTX_IDX0, ++ [FE_REG_RX_BASE_PTR0] = RT5350_RX_BASE_PTR0, ++ [FE_REG_RX_MAX_CNT0] = RT5350_RX_MAX_CNT0, ++ [FE_REG_RX_CALC_IDX0] = RT5350_RX_CALC_IDX0, ++ [FE_REG_FE_INT_ENABLE] = RT5350_FE_INT_ENABLE, ++ [FE_REG_FE_INT_STATUS] = RT5350_FE_INT_STATUS, ++ [FE_REG_FE_DMA_VID_BASE] = 0, ++}; ++ ++static void rt305x_fe_reset(void) ++{ ++ rt_sysc_w32(RT305X_RESET_FE, SYSC_REG_RESET_CTRL); ++ rt_sysc_w32(0, SYSC_REG_RESET_CTRL); ++} ++ ++static void rt5350_set_mac(struct fe_priv *priv, unsigned char *mac) ++{ ++ unsigned long flags; ++ ++ spin_lock_irqsave(&priv->page_lock, flags); ++ fe_w32((mac[0] << 8) | mac[1], RT5350_SDM_MAC_ADRH); ++ fe_w32((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5], ++ RT5350_SDM_MAC_ADRL); ++ spin_unlock_irqrestore(&priv->page_lock, flags); ++} ++ ++static void rt5350_fwd_config(struct fe_priv *priv) ++{ ++ unsigned long sysclk = priv->sysclk; ++ ++ if (sysclk) { ++ sysclk /= FE_US_CYC_CNT_DIVISOR; ++ sysclk <<= FE_US_CYC_CNT_SHIFT; ++ ++ fe_w32((fe_r32(FE_FE_GLO_CFG) & ++ ~(FE_US_CYC_CNT_MASK << FE_US_CYC_CNT_SHIFT)) | sysclk, ++ FE_FE_GLO_CFG); ++ } ++ ++ fe_w32(fe_r32(RT5350_SDM_CFG) & ~0xffff, RT5350_SDM_CFG); ++ fe_w32(fe_r32(RT5350_SDM_CFG) | RT5350_SDM_ICS_EN | RT5350_SDM_TCS_EN | RT5350_SDM_UCS_EN, ++ RT5350_SDM_CFG); ++} ++ ++static void rt5350_fe_reset(void) ++{ ++ rt_sysc_w32(RT305X_RESET_FE | RT305X_RESET_ESW, SYSC_REG_RESET_CTRL); ++ rt_sysc_w32(0, SYSC_REG_RESET_CTRL); ++} ++ ++static struct fe_soc_data rt3050_data = { ++ .mac = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55 }, ++ .reset_fe = rt305x_fe_reset, ++ .min_pkt_len = 64, ++ .pdma_glo_cfg = FE_PDMA_SIZE_4DWORDS, ++ .checksum_bit = RX_DMA_L4VALID, ++ .rx_dly_int = FE_RX_DLY_INT, ++ .tx_dly_int = FE_TX_DLY_INT, ++}; ++ ++static struct fe_soc_data rt5350_data = { ++ .mac = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55 }, ++ .reg_table = rt5350_reg_table, ++ .reset_fe = rt5350_fe_reset, ++ .set_mac = rt5350_set_mac, ++ .fwd_config = rt5350_fwd_config, ++ .min_pkt_len = 64, ++ .pdma_glo_cfg = FE_PDMA_SIZE_4DWORDS, ++ .checksum_bit = RX_DMA_L4VALID, ++ .rx_dly_int = RT5350_RX_DLY_INT, ++ .tx_dly_int = RT5350_TX_DLY_INT, ++}; ++ ++const struct of_device_id of_fe_match[] = { ++ { .compatible = "ralink,rt3050-eth", .data = &rt3050_data }, ++ { .compatible = "ralink,rt5350-eth", .data = &rt5350_data }, ++ {}, ++}; ++ ++MODULE_DEVICE_TABLE(of, of_fe_match); +--- /dev/null ++++ b/drivers/net/ethernet/ralink/soc_rt3883.c +@@ -0,0 +1,60 @@ ++/* ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; version 2 of the License ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. ++ * ++ * Copyright (C) 2009-2013 John Crispin ++ */ ++ ++#include ++ ++#include ++ ++#include "ralink_soc_eth.h" ++#include "mdio_rt2880.h" ++ ++#define RT3883_SYSC_REG_RSTCTRL 0x34 ++#define RT3883_RSTCTRL_FE BIT(21) ++ ++static void rt3883_fe_reset(void) ++{ ++ u32 t; ++ ++ t = rt_sysc_r32(RT3883_SYSC_REG_RSTCTRL); ++ t |= RT3883_RSTCTRL_FE; ++ rt_sysc_w32(t , RT3883_SYSC_REG_RSTCTRL); ++ ++ t &= ~RT3883_RSTCTRL_FE; ++ rt_sysc_w32(t, RT3883_SYSC_REG_RSTCTRL); ++} ++ ++static struct fe_soc_data rt3883_data = { ++ .mac = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55 }, ++ .reset_fe = rt3883_fe_reset, ++ .min_pkt_len = 64, ++ .pdma_glo_cfg = FE_PDMA_SIZE_4DWORDS, ++ .rx_dly_int = FE_RX_DLY_INT, ++ .tx_dly_int = FE_TX_DLY_INT, ++ .checksum_bit = RX_DMA_L4VALID, ++ .mdio_read = rt2880_mdio_read, ++ .mdio_write = rt2880_mdio_write, ++ .mdio_adjust_link = rt2880_mdio_link_adjust, ++ .port_init = rt2880_port_init, ++}; ++ ++const struct of_device_id of_fe_match[] = { ++ { .compatible = "ralink,rt3883-eth", .data = &rt3883_data }, ++ {}, ++}; ++ ++MODULE_DEVICE_TABLE(of, of_fe_match); ++ diff --git a/target/linux/ramips/patches-3.10/0118-mtd-fix-cfi-cmdset-0002-erase-status-check.patch b/target/linux/ramips/patches-3.10/0118-mtd-fix-cfi-cmdset-0002-erase-status-check.patch deleted file mode 100644 index 709d1d8271..0000000000 --- a/target/linux/ramips/patches-3.10/0118-mtd-fix-cfi-cmdset-0002-erase-status-check.patch +++ /dev/null @@ -1,29 +0,0 @@ -From 413b2ed67d8e4dc1242edb9286ea3f634d10a6ba Mon Sep 17 00:00:00 2001 -From: John Crispin -Date: Mon, 15 Jul 2013 00:38:51 +0200 -Subject: [PATCH 32/33] mtd: fix cfi cmdset 0002 erase status check - ---- - drivers/mtd/chips/cfi_cmdset_0002.c | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - ---- a/drivers/mtd/chips/cfi_cmdset_0002.c -+++ b/drivers/mtd/chips/cfi_cmdset_0002.c -@@ -1957,7 +1957,7 @@ static int __xipram do_erase_chip(struct - chip->erase_suspended = 0; - } - -- if (chip_ready(map, adr)) -+ if (chip_good(map, adr, map_word_ff(map))) - break; - - if (time_after(jiffies, timeo)) { -@@ -2046,7 +2046,7 @@ static int __xipram do_erase_oneblock(st - chip->erase_suspended = 0; - } - -- if (chip_ready(map, adr)) { -+ if (chip_good(map, adr, map_word_ff(map))) { - xip_enable(map, chip, adr); - break; - } diff --git a/target/linux/ramips/patches-3.10/0119-USB-phy-add-ralink-SoC-driver.patch b/target/linux/ramips/patches-3.10/0119-USB-phy-add-ralink-SoC-driver.patch new file mode 100644 index 0000000000..fbec32cacb --- /dev/null +++ b/target/linux/ramips/patches-3.10/0119-USB-phy-add-ralink-SoC-driver.patch @@ -0,0 +1,229 @@ +From 71e09658d3544143e46ae76e76da8a322cd73e1d Mon Sep 17 00:00:00 2001 +From: John Crispin +Date: Sun, 14 Jul 2013 23:31:19 +0200 +Subject: [PATCH 119/133] USB: phy: add ralink SoC driver + +Signed-off-by: John Crispin +--- + drivers/usb/phy/Kconfig | 8 ++ + drivers/usb/phy/Makefile | 1 + + drivers/usb/phy/ralink-phy.c | 191 ++++++++++++++++++++++++++++++++++++++++++ + 3 files changed, 200 insertions(+) + create mode 100644 drivers/usb/phy/ralink-phy.c + +--- a/drivers/usb/phy/Kconfig ++++ b/drivers/usb/phy/Kconfig +@@ -210,4 +210,12 @@ config USB_ULPI_VIEWPORT + Provides read/write operations to the ULPI phy register set for + controllers with a viewport register (e.g. Chipidea/ARC controllers). + ++config RALINK_USBPHY ++ bool "Ralink USB PHY controller Driver" ++ depends on MIPS && RALINK ++ select USB_OTG_UTILS ++ help ++ Enable this to support ralink USB phy controller for ralink ++ SoCs. ++ + endif # USB_PHY +--- a/drivers/usb/phy/Makefile ++++ b/drivers/usb/phy/Makefile +@@ -31,3 +31,4 @@ obj-$(CONFIG_USB_MXS_PHY) += phy-mxs-us + obj-$(CONFIG_USB_RCAR_PHY) += phy-rcar-usb.o + obj-$(CONFIG_USB_ULPI) += phy-ulpi.o + obj-$(CONFIG_USB_ULPI_VIEWPORT) += phy-ulpi-viewport.o ++obj-$(CONFIG_RALINK_USBPHY) += ralink-phy.o +--- /dev/null ++++ b/drivers/usb/phy/ralink-phy.c +@@ -0,0 +1,191 @@ ++/* ++ * Copyright (C) 2013 John Crispin ++ * ++ * based on: Renesas R-Car USB phy driver ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++ ++#define RT_SYSC_REG_SYSCFG1 0x014 ++#define RT_SYSC_REG_CLKCFG1 0x030 ++#define RT_SYSC_REG_USB_PHY_CFG 0x05c ++ ++#define RT_RSTCTRL_UDEV BIT(25) ++#define RT_RSTCTRL_UHST BIT(22) ++#define RT_SYSCFG1_USB0_HOST_MODE BIT(10) ++ ++#define MT7620_CLKCFG1_UPHY0_CLK_EN BIT(25) ++#define RT_CLKCFG1_UPHY1_CLK_EN BIT(20) ++#define RT_CLKCFG1_UPHY0_CLK_EN BIT(18) ++ ++#define USB_PHY_UTMI_8B60M BIT(1) ++#define UDEV_WAKEUP BIT(0) ++ ++static atomic_t usb_pwr_ref = ATOMIC_INIT(0); ++static struct reset_control *rstdev; ++static struct reset_control *rsthost; ++static u32 phy_clk; ++ ++static void usb_phy_enable(int state) ++{ ++ if (state) ++ rt_sysc_m32(0, phy_clk, RT_SYSC_REG_CLKCFG1); ++ else ++ rt_sysc_m32(phy_clk, 0, RT_SYSC_REG_CLKCFG1); ++ mdelay(100); ++} ++ ++static int usb_power_on(struct usb_phy *phy) ++{ ++ if (atomic_inc_return(&usb_pwr_ref) == 1) { ++ u32 t; ++ ++ usb_phy_enable(1); ++ ++// reset_control_assert(rstdev); ++// reset_control_assert(rsthost); ++ ++ if (OTG_STATE_B_HOST) { ++ rt_sysc_m32(0, RT_SYSCFG1_USB0_HOST_MODE, RT_SYSC_REG_SYSCFG1); ++ reset_control_deassert(rsthost); ++ } else { ++ rt_sysc_m32(RT_SYSCFG1_USB0_HOST_MODE, 0, RT_SYSC_REG_SYSCFG1); ++ reset_control_deassert(rstdev); ++ } ++ mdelay(100); ++ ++ t = rt_sysc_r32(RT_SYSC_REG_USB_PHY_CFG); ++ dev_info(phy->dev, "remote usb device wakeup %s\n", ++ (t & UDEV_WAKEUP) ? ("enabbled") : ("disabled")); ++ if (t & USB_PHY_UTMI_8B60M) ++ dev_info(phy->dev, "UTMI 8bit 60MHz\n"); ++ else ++ dev_info(phy->dev, "UTMI 16bit 30MHz\n"); ++ } ++ ++ return 0; ++} ++ ++static void usb_power_off(struct usb_phy *phy) ++{ ++ if (atomic_dec_return(&usb_pwr_ref) == 0) { ++ usb_phy_enable(0); ++ reset_control_assert(rstdev); ++ reset_control_assert(rsthost); ++ } ++} ++ ++static int usb_set_host(struct usb_otg *otg, struct usb_bus *host) ++{ ++ otg->gadget = NULL; ++ otg->host = host; ++ ++ return 0; ++} ++ ++static int usb_set_peripheral(struct usb_otg *otg, ++ struct usb_gadget *gadget) ++{ ++ otg->host = NULL; ++ otg->gadget = gadget; ++ ++ return 0; ++} ++ ++static const struct of_device_id ralink_usbphy_dt_match[] = { ++ { .compatible = "ralink,rt3xxx-usbphy", .data = (void *) (RT_CLKCFG1_UPHY1_CLK_EN | RT_CLKCFG1_UPHY0_CLK_EN) }, ++ { .compatible = "ralink,mt7620a-usbphy", .data = (void *) MT7620_CLKCFG1_UPHY0_CLK_EN }, ++ {}, ++}; ++MODULE_DEVICE_TABLE(of, ralink_usbphy_dt_match); ++ ++static int usb_phy_probe(struct platform_device *pdev) ++{ ++ const struct of_device_id *match; ++ struct device *dev = &pdev->dev; ++ struct usb_otg *otg; ++ struct usb_phy *phy; ++ int ret; ++ ++ match = of_match_device(ralink_usbphy_dt_match, &pdev->dev); ++ phy_clk = (int) match->data; ++ ++ rsthost = devm_reset_control_get(&pdev->dev, "host"); ++ if (IS_ERR(rsthost)) ++ return PTR_ERR(rsthost); ++ ++ rstdev = devm_reset_control_get(&pdev->dev, "device"); ++ if (IS_ERR(rstdev)) ++ return PTR_ERR(rstdev); ++ ++ phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL); ++ if (!phy) { ++ dev_err(&pdev->dev, "unable to allocate memory for USB PHY\n"); ++ return -ENOMEM; ++ } ++ ++ otg = devm_kzalloc(&pdev->dev, sizeof(*otg), GFP_KERNEL); ++ if (!otg) { ++ dev_err(&pdev->dev, "unable to allocate memory for USB OTG\n"); ++ return -ENOMEM; ++ } ++ ++ phy->dev = dev; ++ phy->label = dev_name(dev); ++ phy->init = usb_power_on; ++ phy->shutdown = usb_power_off; ++ otg->set_host = usb_set_host; ++ otg->set_peripheral = usb_set_peripheral; ++ otg->phy = phy; ++ phy->otg = otg; ++ ret = usb_add_phy(phy, USB_PHY_TYPE_USB2); ++ ++ if (ret < 0) { ++ dev_err(dev, "usb phy addition error\n"); ++ return ret; ++ } ++ ++ platform_set_drvdata(pdev, phy); ++ ++ dev_info(&pdev->dev, "loaded\n"); ++ ++ return ret; ++} ++ ++static int usb_phy_remove(struct platform_device *pdev) ++{ ++ struct usb_phy *phy = platform_get_drvdata(pdev); ++ ++ usb_remove_phy(phy); ++ ++ return 0; ++} ++ ++static struct platform_driver usb_phy_driver = { ++ .driver = { ++ .owner = THIS_MODULE, ++ .name = "rt3xxx-usbphy", ++ .of_match_table = of_match_ptr(ralink_usbphy_dt_match), ++ }, ++ .probe = usb_phy_probe, ++ .remove = usb_phy_remove, ++}; ++ ++module_platform_driver(usb_phy_driver); ++ ++MODULE_LICENSE("GPL v2"); ++MODULE_DESCRIPTION("Ralink USB phy"); ++MODULE_AUTHOR("John Crispin "); diff --git a/target/linux/ramips/patches-3.10/0119-mtd-cfi-cmdset-0002-force-word-write.patch b/target/linux/ramips/patches-3.10/0119-mtd-cfi-cmdset-0002-force-word-write.patch deleted file mode 100644 index 73edcb470e..0000000000 --- a/target/linux/ramips/patches-3.10/0119-mtd-cfi-cmdset-0002-force-word-write.patch +++ /dev/null @@ -1,70 +0,0 @@ -From d5b094ea6d435817d295d554d652a97a5014c64f Mon Sep 17 00:00:00 2001 -From: John Crispin -Date: Mon, 15 Jul 2013 00:39:21 +0200 -Subject: [PATCH 33/33] mtd: cfi cmdset 0002 force word write - ---- - drivers/mtd/chips/cfi_cmdset_0002.c | 9 +++++++-- - 1 file changed, 7 insertions(+), 2 deletions(-) - ---- a/drivers/mtd/chips/cfi_cmdset_0002.c -+++ b/drivers/mtd/chips/cfi_cmdset_0002.c -@@ -41,7 +41,7 @@ - #include - - #define AMD_BOOTLOC_BUG --#define FORCE_WORD_WRITE 0 -+#define FORCE_WORD_WRITE 1 - - #define MAX_WORD_RETRIES 3 - -@@ -52,7 +52,9 @@ - - static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); - static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); -+#if !FORCE_WORD_WRITE - static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); -+#endif - static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *); - static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *); - static void cfi_amdstd_sync (struct mtd_info *); -@@ -192,6 +194,7 @@ static void fixup_amd_bootblock(struct m - } - #endif - -+#if !FORCE_WORD_WRITE - static void fixup_use_write_buffers(struct mtd_info *mtd) - { - struct map_info *map = mtd->priv; -@@ -201,6 +204,7 @@ static void fixup_use_write_buffers(stru - mtd->_write = cfi_amdstd_write_buffers; - } - } -+#endif /* !FORCE_WORD_WRITE */ - - /* Atmel chips don't use the same PRI format as AMD chips */ - static void fixup_convert_atmel_pri(struct mtd_info *mtd) -@@ -1461,6 +1465,7 @@ static int cfi_amdstd_write_words(struct - /* - * FIXME: interleaved mode not tested, and probably not supported! - */ -+#if !FORCE_WORD_WRITE - static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, - unsigned long adr, const u_char *buf, - int len) -@@ -1585,7 +1590,6 @@ static int __xipram do_write_buffer(stru - return ret; - } - -- - static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len, - size_t *retlen, const u_char *buf) - { -@@ -1660,6 +1664,7 @@ static int cfi_amdstd_write_buffers(stru - - return 0; - } -+#endif /* !FORCE_WORD_WRITE */ - - /* - * Wait for the flash chip to become ready to write data diff --git a/target/linux/ramips/patches-3.10/0120-USB-add-OHCI-EHCI-OF-binding.patch b/target/linux/ramips/patches-3.10/0120-USB-add-OHCI-EHCI-OF-binding.patch new file mode 100644 index 0000000000..6d18894b62 --- /dev/null +++ b/target/linux/ramips/patches-3.10/0120-USB-add-OHCI-EHCI-OF-binding.patch @@ -0,0 +1,174 @@ +From 08d438b69f3023f16b044b07eebee6b9c2302f60 Mon Sep 17 00:00:00 2001 +From: John Crispin +Date: Sun, 14 Jul 2013 23:34:53 +0200 +Subject: [PATCH 120/133] USB: add OHCI/EHCI OF binding + +based on f3bc64d6d1f21c1b92d75f233a37b75d77af6963 + +Signed-off-by: John Crispin +--- + drivers/usb/Makefile | 3 ++- + drivers/usb/host/ehci-platform.c | 21 +++++++++++++++++---- + drivers/usb/host/ohci-platform.c | 37 ++++++++++++++++++++++++++++++++----- + 3 files changed, 51 insertions(+), 10 deletions(-) + +--- a/drivers/usb/Makefile ++++ b/drivers/usb/Makefile +@@ -10,6 +10,8 @@ obj-$(CONFIG_USB_DWC3) += dwc3/ + + obj-$(CONFIG_USB_MON) += mon/ + ++obj-$(CONFIG_USB_PHY) += phy/ ++ + obj-$(CONFIG_PCI) += host/ + obj-$(CONFIG_USB_EHCI_HCD) += host/ + obj-$(CONFIG_USB_ISP116X_HCD) += host/ +@@ -44,7 +46,6 @@ obj-$(CONFIG_USB_MICROTEK) += image/ + obj-$(CONFIG_USB_SERIAL) += serial/ + + obj-$(CONFIG_USB) += misc/ +-obj-$(CONFIG_USB_PHY) += phy/ + obj-$(CONFIG_EARLY_PRINTK_DBGP) += early/ + + obj-$(CONFIG_USB_ATM) += atm/ +--- a/drivers/usb/host/ehci-platform.c ++++ b/drivers/usb/host/ehci-platform.c +@@ -29,6 +29,8 @@ + #include + #include + #include ++#include ++#include + + #include "ehci.h" + +@@ -118,6 +120,15 @@ static int ehci_platform_probe(struct pl + hcd->rsrc_start = res_mem->start; + hcd->rsrc_len = resource_size(res_mem); + ++#ifdef CONFIG_USB_PHY ++ hcd->phy = devm_usb_get_phy(&dev->dev, USB_PHY_TYPE_USB2); ++ if (!IS_ERR_OR_NULL(hcd->phy)) { ++ otg_set_host(hcd->phy->otg, ++ &hcd->self); ++ usb_phy_init(hcd->phy); ++ } ++#endif ++ + hcd->regs = devm_ioremap_resource(&dev->dev, res_mem); + if (IS_ERR(hcd->regs)) { + err = PTR_ERR(hcd->regs); +@@ -155,6 +166,9 @@ static int ehci_platform_remove(struct p + if (pdata == &ehci_platform_defaults) + dev->dev.platform_data = NULL; + ++ if (pdata == &ehci_platform_defaults) ++ dev->dev.platform_data = NULL; ++ + return 0; + } + +@@ -199,9 +213,8 @@ static int ehci_platform_resume(struct d + #define ehci_platform_resume NULL + #endif /* CONFIG_PM */ + +-static const struct of_device_id vt8500_ehci_ids[] = { +- { .compatible = "via,vt8500-ehci", }, +- { .compatible = "wm,prizm-ehci", }, ++static const struct of_device_id ralink_ehci_ids[] = { ++ { .compatible = "ralink,rt3xxx-ehci", }, + {} + }; + +@@ -225,7 +238,7 @@ static struct platform_driver ehci_platf + .owner = THIS_MODULE, + .name = "ehci-platform", + .pm = &ehci_platform_pm_ops, +- .of_match_table = of_match_ptr(vt8500_ehci_ids), ++ .of_match_table = of_match_ptr(ralink_ehci_ids), + } + }; + +--- a/drivers/usb/host/ohci-platform.c ++++ b/drivers/usb/host/ohci-platform.c +@@ -16,6 +16,10 @@ + #include + #include + #include ++#include ++#include ++ ++static struct usb_ohci_pdata ohci_platform_defaults; + + static int ohci_platform_reset(struct usb_hcd *hcd) + { +@@ -88,14 +92,22 @@ static int ohci_platform_probe(struct pl + { + struct usb_hcd *hcd; + struct resource *res_mem; +- struct usb_ohci_pdata *pdata = dev->dev.platform_data; ++ struct usb_ohci_pdata *pdata; + int irq; + int err = -ENOMEM; + +- if (!pdata) { +- WARN_ON(1); +- return -ENODEV; +- } ++ /* ++ * use reasonable defaults so platforms don't have to provide these. ++ * with DT probing on ARM, none of these are set. ++ */ ++ if (!dev->dev.platform_data) ++ dev->dev.platform_data = &ohci_platform_defaults; ++ if (!dev->dev.dma_mask) ++ dev->dev.dma_mask = &dev->dev.coherent_dma_mask; ++ if (!dev->dev.coherent_dma_mask) ++ dev->dev.coherent_dma_mask = DMA_BIT_MASK(32); ++ ++ pdata = dev->dev.platform_data; + + if (usb_disabled()) + return -ENODEV; +@@ -128,6 +140,12 @@ static int ohci_platform_probe(struct pl + hcd->rsrc_start = res_mem->start; + hcd->rsrc_len = resource_size(res_mem); + ++#ifdef CONFIG_USB_PHY ++ hcd->phy = devm_usb_get_phy(&dev->dev, USB_PHY_TYPE_USB2); ++ if (!IS_ERR_OR_NULL(hcd->phy)) ++ usb_phy_init(hcd->phy); ++#endif ++ + hcd->regs = devm_ioremap_resource(&dev->dev, res_mem); + if (IS_ERR(hcd->regs)) { + err = PTR_ERR(hcd->regs); +@@ -162,6 +180,9 @@ static int ohci_platform_remove(struct p + if (pdata->power_off) + pdata->power_off(dev); + ++ if (pdata == &ohci_platform_defaults) ++ dev->dev.platform_data = NULL; ++ + return 0; + } + +@@ -201,6 +222,11 @@ static int ohci_platform_resume(struct d + #define ohci_platform_resume NULL + #endif /* CONFIG_PM */ + ++static const struct of_device_id ralink_ohci_ids[] = { ++ { .compatible = "ralink,rt3xxx-ohci", }, ++ {} ++}; ++ + static const struct platform_device_id ohci_platform_table[] = { + { "ohci-platform", 0 }, + { } +@@ -221,5 +247,6 @@ static struct platform_driver ohci_platf + .owner = THIS_MODULE, + .name = "ohci-platform", + .pm = &ohci_platform_pm_ops, ++ .of_match_table = of_match_ptr(ralink_ohci_ids), + } + }; diff --git a/target/linux/ramips/patches-3.10/0120-spi-introduce-macros-to-set-bits_per_word_mask.patch b/target/linux/ramips/patches-3.10/0120-spi-introduce-macros-to-set-bits_per_word_mask.patch deleted file mode 100644 index a163d497f8..0000000000 --- a/target/linux/ramips/patches-3.10/0120-spi-introduce-macros-to-set-bits_per_word_mask.patch +++ /dev/null @@ -1,29 +0,0 @@ -From 2922a8de996956893bb98e4aa91be9774c958336 Mon Sep 17 00:00:00 2001 -From: Stephen Warren -Date: Tue, 21 May 2013 20:36:34 -0600 -Subject: [PATCH] spi: introduce macros to set bits_per_word_mask - -Introduce two macros to make setting up spi_master.bits_per_word_mask -easier, and avoid mistakes like writing BIT(n) instead of BIT(n - 1). - -SPI_BPW_MASK is for a single supported value of bits_per_word_mask. - -SPI_BPW_RANGE_MASK represents a contiguous set of bit lengths. - -Signed-off-by: Stephen Warren -Signed-off-by: Mark Brown ---- - include/linux/spi/spi.h | 2 ++ - 1 file changed, 2 insertions(+) - ---- a/include/linux/spi/spi.h -+++ b/include/linux/spi/spi.h -@@ -308,6 +308,8 @@ struct spi_master { - - /* bitmask of supported bits_per_word for transfers */ - u32 bits_per_word_mask; -+#define SPI_BPW_MASK(bits) BIT((bits) - 1) -+#define SPI_BPW_RANGE_MASK(min, max) ((BIT(max) - 1) - (BIT(min) - 1)) - - /* other constraints relevant to this driver */ - u16 flags; diff --git a/target/linux/ramips/patches-3.10/0121-MIPS-ralink-add-rt_sysc_m32-helper.patch b/target/linux/ramips/patches-3.10/0121-MIPS-ralink-add-rt_sysc_m32-helper.patch deleted file mode 100644 index 915c5bed5c..0000000000 --- a/target/linux/ramips/patches-3.10/0121-MIPS-ralink-add-rt_sysc_m32-helper.patch +++ /dev/null @@ -1,26 +0,0 @@ -From 3af962f91035ae4500e63c758c49f1c067bdae09 Mon Sep 17 00:00:00 2001 -From: John Crispin -Date: Sun, 19 May 2013 00:42:23 +0200 -Subject: [PATCH 04/33] MIPS: ralink: add rt_sysc_m32 helper - -Signed-off-by: John Crispin ---- - arch/mips/include/asm/mach-ralink/ralink_regs.h | 7 +++++++ - 1 file changed, 7 insertions(+) - ---- a/arch/mips/include/asm/mach-ralink/ralink_regs.h -+++ b/arch/mips/include/asm/mach-ralink/ralink_regs.h -@@ -26,6 +26,13 @@ static inline u32 rt_sysc_r32(unsigned r - return __raw_readl(rt_sysc_membase + reg); - } - -+static inline void rt_sysc_m32(u32 clr, u32 set, unsigned reg) -+{ -+ u32 val = rt_sysc_r32(reg) & ~clr; -+ -+ __raw_writel(val | set, rt_sysc_membase + reg); -+} -+ - static inline void rt_memc_w32(u32 val, unsigned reg) - { - __raw_writel(val, rt_memc_membase + reg); diff --git a/target/linux/ramips/patches-3.10/0121-USB-adds-dwc_otg.patch b/target/linux/ramips/patches-3.10/0121-USB-adds-dwc_otg.patch new file mode 100644 index 0000000000..8267452c0d --- /dev/null +++ b/target/linux/ramips/patches-3.10/0121-USB-adds-dwc_otg.patch @@ -0,0 +1,24517 @@ +From b74db0e9bae6bbe14e9f725db855621db22e9984 Mon Sep 17 00:00:00 2001 +From: John Crispin +Date: Fri, 15 Mar 2013 20:58:18 +0100 +Subject: [PATCH 121/133] USB: adds dwc_otg + +Signed-off-by: John Crispin +--- + drivers/usb/Kconfig | 2 + + drivers/usb/Makefile | 1 + + drivers/usb/dwc_otg/Kconfig | 24 + + drivers/usb/dwc_otg/Makefile | 25 + + drivers/usb/dwc_otg/dummy_audio.c | 1575 +++++++++++++ + drivers/usb/dwc_otg/dwc_otg_attr.c | 966 ++++++++ + drivers/usb/dwc_otg/dwc_otg_attr.h | 67 + + drivers/usb/dwc_otg/dwc_otg_cil.c | 3692 ++++++++++++++++++++++++++++++ + drivers/usb/dwc_otg/dwc_otg_cil.h | 1098 +++++++++ + drivers/usb/dwc_otg/dwc_otg_cil_intr.c | 750 ++++++ + drivers/usb/dwc_otg/dwc_otg_driver.c | 1273 ++++++++++ + drivers/usb/dwc_otg/dwc_otg_driver.h | 83 + + drivers/usb/dwc_otg/dwc_otg_hcd.c | 2852 +++++++++++++++++++++++ + drivers/usb/dwc_otg/dwc_otg_hcd.h | 668 ++++++ + drivers/usb/dwc_otg/dwc_otg_hcd_intr.c | 1873 +++++++++++++++ + drivers/usb/dwc_otg/dwc_otg_hcd_queue.c | 684 ++++++ + drivers/usb/dwc_otg/dwc_otg_pcd.c | 2523 ++++++++++++++++++++ + drivers/usb/dwc_otg/dwc_otg_pcd.h | 248 ++ + drivers/usb/dwc_otg/dwc_otg_pcd_intr.c | 3654 +++++++++++++++++++++++++++++ + drivers/usb/dwc_otg/dwc_otg_regs.h | 2075 +++++++++++++++++ + drivers/usb/dwc_otg/linux/dwc_otg_plat.h | 260 +++ + 21 files changed, 24393 insertions(+) + create mode 100644 drivers/usb/dwc_otg/Kconfig + create mode 100644 drivers/usb/dwc_otg/Makefile + create mode 100644 drivers/usb/dwc_otg/dummy_audio.c + create mode 100644 drivers/usb/dwc_otg/dwc_otg_attr.c + create mode 100644 drivers/usb/dwc_otg/dwc_otg_attr.h + create mode 100644 drivers/usb/dwc_otg/dwc_otg_cil.c + create mode 100644 drivers/usb/dwc_otg/dwc_otg_cil.h + create mode 100644 drivers/usb/dwc_otg/dwc_otg_cil_intr.c + create mode 100644 drivers/usb/dwc_otg/dwc_otg_driver.c + create mode 100644 drivers/usb/dwc_otg/dwc_otg_driver.h + create mode 100644 drivers/usb/dwc_otg/dwc_otg_hcd.c + create mode 100644 drivers/usb/dwc_otg/dwc_otg_hcd.h + create mode 100644 drivers/usb/dwc_otg/dwc_otg_hcd_intr.c + create mode 100644 drivers/usb/dwc_otg/dwc_otg_hcd_queue.c + create mode 100644 drivers/usb/dwc_otg/dwc_otg_pcd.c + create mode 100644 drivers/usb/dwc_otg/dwc_otg_pcd.h + create mode 100644 drivers/usb/dwc_otg/dwc_otg_pcd_intr.c + create mode 100644 drivers/usb/dwc_otg/dwc_otg_regs.h + create mode 100644 drivers/usb/dwc_otg/linux/dwc_otg_plat.h + +--- a/drivers/usb/Kconfig ++++ b/drivers/usb/Kconfig +@@ -126,6 +126,8 @@ if USB + + source "drivers/usb/core/Kconfig" + ++source "drivers/usb/dwc_otg/Kconfig" ++ + source "drivers/usb/mon/Kconfig" + + source "drivers/usb/wusbcore/Kconfig" +--- a/drivers/usb/Makefile ++++ b/drivers/usb/Makefile +@@ -7,6 +7,7 @@ + obj-$(CONFIG_USB) += core/ + + obj-$(CONFIG_USB_DWC3) += dwc3/ ++obj-$(CONFIG_DWC_OTG) += dwc_otg/ + + obj-$(CONFIG_USB_MON) += mon/ + +--- /dev/null ++++ b/drivers/usb/dwc_otg/Kconfig +@@ -0,0 +1,24 @@ ++config DWC_OTG ++ tristate "Ralink RT305X DWC_OTG support" ++ depends on SOC_RT305X ++ ---help--- ++ This driver supports Ralink DWC_OTG ++ ++choice ++ prompt "USB Operation Mode" ++ depends on DWC_OTG ++ default DWC_OTG_HOST_ONLY ++ ++config DWC_OTG_HOST_ONLY ++ bool "HOST ONLY MODE" ++ depends on DWC_OTG ++ ++config DWC_OTG_DEVICE_ONLY ++ bool "DEVICE ONLY MODE" ++ depends on DWC_OTG ++ ++endchoice ++ ++config DWC_OTG_DEBUG ++ bool "Enable debug mode" ++ depends on DWC_OTG +--- /dev/null ++++ b/drivers/usb/dwc_otg/Makefile +@@ -0,0 +1,25 @@ ++# ++# Makefile for DWC_otg Highspeed USB controller driver ++# ++ ++ifeq ($(CONFIG_DWC_OTG_DEBUG),y) ++EXTRA_CFLAGS += -DDEBUG ++endif ++ ++# Use one of the following flags to compile the software in host-only or ++# device-only mode. ++ifeq ($(CONFIG_DWC_OTG_HOST_ONLY),y) ++EXTRA_CFLAGS += -DDWC_HOST_ONLY ++EXTRA_CFLAGS += -DDWC_EN_ISOC ++endif ++ ++ifeq ($(CONFIG_DWC_OTG_DEVICE_ONLY),y) ++EXTRA_CFLAGS += -DDWC_DEVICE_ONLY ++endif ++ ++obj-$(CONFIG_DWC_OTG) := dwc_otg.o ++ ++dwc_otg-objs := dwc_otg_driver.o dwc_otg_attr.o ++dwc_otg-objs += dwc_otg_cil.o dwc_otg_cil_intr.o ++dwc_otg-objs += dwc_otg_pcd.o dwc_otg_pcd_intr.o ++dwc_otg-objs += dwc_otg_hcd.o dwc_otg_hcd_intr.o dwc_otg_hcd_queue.o +--- /dev/null ++++ b/drivers/usb/dwc_otg/dummy_audio.c +@@ -0,0 +1,1575 @@ ++/* ++ * zero.c -- Gadget Zero, for USB development ++ * ++ * Copyright (C) 2003-2004 David Brownell ++ * All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * 1. Redistributions of source code must retain the above copyright ++ * notice, this list of conditions, and the following disclaimer, ++ * without modification. ++ * 2. Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * 3. The names of the above-listed copyright holders may not be used ++ * to endorse or promote products derived from this software without ++ * specific prior written permission. ++ * ++ * ALTERNATIVELY, this software may be distributed under the terms of the ++ * GNU General Public License ("GPL") as published by the Free Software ++ * Foundation, either version 2 of that License or (at your option) any ++ * later version. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS ++ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, ++ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR ++ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR ++ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, ++ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, ++ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR ++ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF ++ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING ++ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ++ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++ ++/* ++ * Gadget Zero only needs two bulk endpoints, and is an example of how you ++ * can write a hardware-agnostic gadget driver running inside a USB device. ++ * ++ * Hardware details are visible (see CONFIG_USB_ZERO_* below) but don't ++ * affect most of the driver. ++ * ++ * Use it with the Linux host/master side "usbtest" driver to get a basic ++ * functional test of your device-side usb stack, or with "usb-skeleton". ++ * ++ * It supports two similar configurations. One sinks whatever the usb host ++ * writes, and in return sources zeroes. The other loops whatever the host ++ * writes back, so the host can read it. Module options include: ++ * ++ * buflen=N default N=4096, buffer size used ++ * qlen=N default N=32, how many buffers in the loopback queue ++ * loopdefault default false, list loopback config first ++ * ++ * Many drivers will only have one configuration, letting them be much ++ * simpler if they also don't support high speed operation (like this ++ * driver does). ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++#include ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21) ++# include ++#else ++# include ++#endif ++ ++#include ++ ++ ++/*-------------------------------------------------------------------------*/ ++/*-------------------------------------------------------------------------*/ ++ ++ ++static int utf8_to_utf16le(const char *s, u16 *cp, unsigned len) ++{ ++ int count = 0; ++ u8 c; ++ u16 uchar; ++ ++ /* this insists on correct encodings, though not minimal ones. ++ * BUT it currently rejects legit 4-byte UTF-8 code points, ++ * which need surrogate pairs. (Unicode 3.1 can use them.) ++ */ ++ while (len != 0 && (c = (u8) *s++) != 0) { ++ if (unlikely(c & 0x80)) { ++ // 2-byte sequence: ++ // 00000yyyyyxxxxxx = 110yyyyy 10xxxxxx ++ if ((c & 0xe0) == 0xc0) { ++ uchar = (c & 0x1f) << 6; ++ ++ c = (u8) *s++; ++ if ((c & 0xc0) != 0xc0) ++ goto fail; ++ c &= 0x3f; ++ uchar |= c; ++ ++ // 3-byte sequence (most CJKV characters): ++ // zzzzyyyyyyxxxxxx = 1110zzzz 10yyyyyy 10xxxxxx ++ } else if ((c & 0xf0) == 0xe0) { ++ uchar = (c & 0x0f) << 12; ++ ++ c = (u8) *s++; ++ if ((c & 0xc0) != 0xc0) ++ goto fail; ++ c &= 0x3f; ++ uchar |= c << 6; ++ ++ c = (u8) *s++; ++ if ((c & 0xc0) != 0xc0) ++ goto fail; ++ c &= 0x3f; ++ uchar |= c; ++ ++ /* no bogus surrogates */ ++ if (0xd800 <= uchar && uchar <= 0xdfff) ++ goto fail; ++ ++ // 4-byte sequence (surrogate pairs, currently rare): ++ // 11101110wwwwzzzzyy + 110111yyyyxxxxxx ++ // = 11110uuu 10uuzzzz 10yyyyyy 10xxxxxx ++ // (uuuuu = wwww + 1) ++ // FIXME accept the surrogate code points (only) ++ ++ } else ++ goto fail; ++ } else ++ uchar = c; ++ put_unaligned (cpu_to_le16 (uchar), cp++); ++ count++; ++ len--; ++ } ++ return count; ++fail: ++ return -1; ++} ++ ++ ++/** ++ * usb_gadget_get_string - fill out a string descriptor ++ * @table: of c strings encoded using UTF-8 ++ * @id: string id, from low byte of wValue in get string descriptor ++ * @buf: at least 256 bytes ++ * ++ * Finds the UTF-8 string matching the ID, and converts it into a ++ * string descriptor in utf16-le. ++ * Returns length of descriptor (always even) or negative errno ++ * ++ * If your driver needs stings in multiple languages, you'll probably ++ * "switch (wIndex) { ... }" in your ep0 string descriptor logic, ++ * using this routine after choosing which set of UTF-8 strings to use. ++ * Note that US-ASCII is a strict subset of UTF-8; any string bytes with ++ * the eighth bit set will be multibyte UTF-8 characters, not ISO-8859/1 ++ * characters (which are also widely used in C strings). ++ */ ++int ++usb_gadget_get_string (struct usb_gadget_strings *table, int id, u8 *buf) ++{ ++ struct usb_string *s; ++ int len; ++ ++ /* descriptor 0 has the language id */ ++ if (id == 0) { ++ buf [0] = 4; ++ buf [1] = USB_DT_STRING; ++ buf [2] = (u8) table->language; ++ buf [3] = (u8) (table->language >> 8); ++ return 4; ++ } ++ for (s = table->strings; s && s->s; s++) ++ if (s->id == id) ++ break; ++ ++ /* unrecognized: stall. */ ++ if (!s || !s->s) ++ return -EINVAL; ++ ++ /* string descriptors have length, tag, then UTF16-LE text */ ++ len = min ((size_t) 126, strlen (s->s)); ++ memset (buf + 2, 0, 2 * len); /* zero all the bytes */ ++ len = utf8_to_utf16le(s->s, (u16 *)&buf[2], len); ++ if (len < 0) ++ return -EINVAL; ++ buf [0] = (len + 1) * 2; ++ buf [1] = USB_DT_STRING; ++ return buf [0]; ++} ++ ++ ++/*-------------------------------------------------------------------------*/ ++/*-------------------------------------------------------------------------*/ ++ ++ ++/** ++ * usb_descriptor_fillbuf - fill buffer with descriptors ++ * @buf: Buffer to be filled ++ * @buflen: Size of buf ++ * @src: Array of descriptor pointers, terminated by null pointer. ++ * ++ * Copies descriptors into the buffer, returning the length or a ++ * negative error code if they can't all be copied. Useful when ++ * assembling descriptors for an associated set of interfaces used ++ * as part of configuring a composite device; or in other cases where ++ * sets of descriptors need to be marshaled. ++ */ ++int ++usb_descriptor_fillbuf(void *buf, unsigned buflen, ++ const struct usb_descriptor_header **src) ++{ ++ u8 *dest = buf; ++ ++ if (!src) ++ return -EINVAL; ++ ++ /* fill buffer from src[] until null descriptor ptr */ ++ for (; 0 != *src; src++) { ++ unsigned len = (*src)->bLength; ++ ++ if (len > buflen) ++ return -EINVAL; ++ memcpy(dest, *src, len); ++ buflen -= len; ++ dest += len; ++ } ++ return dest - (u8 *)buf; ++} ++ ++ ++/** ++ * usb_gadget_config_buf - builts a complete configuration descriptor ++ * @config: Header for the descriptor, including characteristics such ++ * as power requirements and number of interfaces. ++ * @desc: Null-terminated vector of pointers to the descriptors (interface, ++ * endpoint, etc) defining all functions in this device configuration. ++ * @buf: Buffer for the resulting configuration descriptor. ++ * @length: Length of buffer. If this is not big enough to hold the ++ * entire configuration descriptor, an error code will be returned. ++ * ++ * This copies descriptors into the response buffer, building a descriptor ++ * for that configuration. It returns the buffer length or a negative ++ * status code. The config.wTotalLength field is set to match the length ++ * of the result, but other descriptor fields (including power usage and ++ * interface count) must be set by the caller. ++ * ++ * Gadget drivers could use this when constructing a config descriptor ++ * in response to USB_REQ_GET_DESCRIPTOR. They will need to patch the ++ * resulting bDescriptorType value if USB_DT_OTHER_SPEED_CONFIG is needed. ++ */ ++int usb_gadget_config_buf( ++ const struct usb_config_descriptor *config, ++ void *buf, ++ unsigned length, ++ const struct usb_descriptor_header **desc ++) ++{ ++ struct usb_config_descriptor *cp = buf; ++ int len; ++ ++ /* config descriptor first */ ++ if (length < USB_DT_CONFIG_SIZE || !desc) ++ return -EINVAL; ++ *cp = *config; ++ ++ /* then interface/endpoint/class/vendor/... */ ++ len = usb_descriptor_fillbuf(USB_DT_CONFIG_SIZE + (u8*)buf, ++ length - USB_DT_CONFIG_SIZE, desc); ++ if (len < 0) ++ return len; ++ len += USB_DT_CONFIG_SIZE; ++ if (len > 0xffff) ++ return -EINVAL; ++ ++ /* patch up the config descriptor */ ++ cp->bLength = USB_DT_CONFIG_SIZE; ++ cp->bDescriptorType = USB_DT_CONFIG; ++ cp->wTotalLength = cpu_to_le16(len); ++ cp->bmAttributes |= USB_CONFIG_ATT_ONE; ++ return len; ++} ++ ++/*-------------------------------------------------------------------------*/ ++/*-------------------------------------------------------------------------*/ ++ ++ ++#define RBUF_LEN (1024*1024) ++static int rbuf_start; ++static int rbuf_len; ++static __u8 rbuf[RBUF_LEN]; ++ ++/*-------------------------------------------------------------------------*/ ++ ++#define DRIVER_VERSION "St Patrick's Day 2004" ++ ++static const char shortname [] = "zero"; ++static const char longname [] = "YAMAHA YST-MS35D USB Speaker "; ++ ++static const char source_sink [] = "source and sink data"; ++static const char loopback [] = "loop input to output"; ++ ++/*-------------------------------------------------------------------------*/ ++ ++/* ++ * driver assumes self-powered hardware, and ++ * has no way for users to trigger remote wakeup. ++ * ++ * this version autoconfigures as much as possible, ++ * which is reasonable for most "bulk-only" drivers. ++ */ ++static const char *EP_IN_NAME; /* source */ ++static const char *EP_OUT_NAME; /* sink */ ++ ++/*-------------------------------------------------------------------------*/ ++ ++/* big enough to hold our biggest descriptor */ ++#define USB_BUFSIZ 512 ++ ++struct zero_dev { ++ spinlock_t lock; ++ struct usb_gadget *gadget; ++ struct usb_request *req; /* for control responses */ ++ ++ /* when configured, we have one of two configs: ++ * - source data (in to host) and sink it (out from host) ++ * - or loop it back (out from host back in to host) ++ */ ++ u8 config; ++ struct usb_ep *in_ep, *out_ep; ++ ++ /* autoresume timer */ ++ struct timer_list resume; ++}; ++ ++#define xprintk(d,level,fmt,args...) \ ++ dev_printk(level , &(d)->gadget->dev , fmt , ## args) ++ ++#ifdef DEBUG ++#define DBG(dev,fmt,args...) \ ++ xprintk(dev , KERN_DEBUG , fmt , ## args) ++#else ++#define DBG(dev,fmt,args...) \ ++ do { } while (0) ++#endif /* DEBUG */ ++ ++#ifdef VERBOSE ++#define VDBG DBG ++#else ++#define VDBG(dev,fmt,args...) \ ++ do { } while (0) ++#endif /* VERBOSE */ ++ ++#define ERROR(dev,fmt,args...) \ ++ xprintk(dev , KERN_ERR , fmt , ## args) ++#define WARN(dev,fmt,args...) \ ++ xprintk(dev , KERN_WARNING , fmt , ## args) ++#define INFO(dev,fmt,args...) \ ++ xprintk(dev , KERN_INFO , fmt , ## args) ++ ++/*-------------------------------------------------------------------------*/ ++ ++static unsigned buflen = 4096; ++static unsigned qlen = 32; ++static unsigned pattern = 0; ++ ++module_param (buflen, uint, S_IRUGO|S_IWUSR); ++module_param (qlen, uint, S_IRUGO|S_IWUSR); ++module_param (pattern, uint, S_IRUGO|S_IWUSR); ++ ++/* ++ * if it's nonzero, autoresume says how many seconds to wait ++ * before trying to wake up the host after suspend. ++ */ ++static unsigned autoresume = 0; ++module_param (autoresume, uint, 0); ++ ++/* ++ * Normally the "loopback" configuration is second (index 1) so ++ * it's not the default. Here's where to change that order, to ++ * work better with hosts where config changes are problematic. ++ * Or controllers (like superh) that only support one config. ++ */ ++static int loopdefault = 0; ++ ++module_param (loopdefault, bool, S_IRUGO|S_IWUSR); ++ ++/*-------------------------------------------------------------------------*/ ++ ++/* Thanks to NetChip Technologies for donating this product ID. ++ * ++ * DO NOT REUSE THESE IDs with a protocol-incompatible driver!! Ever!! ++ * Instead: allocate your own, using normal USB-IF procedures. ++ */ ++#ifndef CONFIG_USB_ZERO_HNPTEST ++#define DRIVER_VENDOR_NUM 0x0525 /* NetChip */ ++#define DRIVER_PRODUCT_NUM 0xa4a0 /* Linux-USB "Gadget Zero" */ ++#else ++#define DRIVER_VENDOR_NUM 0x1a0a /* OTG test device IDs */ ++#define DRIVER_PRODUCT_NUM 0xbadd ++#endif ++ ++/*-------------------------------------------------------------------------*/ ++ ++/* ++ * DESCRIPTORS ... most are static, but strings and (full) ++ * configuration descriptors are built on demand. ++ */ ++ ++/* ++#define STRING_MANUFACTURER 25 ++#define STRING_PRODUCT 42 ++#define STRING_SERIAL 101 ++*/ ++#define STRING_MANUFACTURER 1 ++#define STRING_PRODUCT 2 ++#define STRING_SERIAL 3 ++ ++#define STRING_SOURCE_SINK 250 ++#define STRING_LOOPBACK 251 ++ ++/* ++ * This device advertises two configurations; these numbers work ++ * on a pxa250 as well as more flexible hardware. ++ */ ++#define CONFIG_SOURCE_SINK 3 ++#define CONFIG_LOOPBACK 2 ++ ++/* ++static struct usb_device_descriptor ++device_desc = { ++ .bLength = sizeof device_desc, ++ .bDescriptorType = USB_DT_DEVICE, ++ ++ .bcdUSB = __constant_cpu_to_le16 (0x0200), ++ .bDeviceClass = USB_CLASS_VENDOR_SPEC, ++ ++ .idVendor = __constant_cpu_to_le16 (DRIVER_VENDOR_NUM), ++ .idProduct = __constant_cpu_to_le16 (DRIVER_PRODUCT_NUM), ++ .iManufacturer = STRING_MANUFACTURER, ++ .iProduct = STRING_PRODUCT, ++ .iSerialNumber = STRING_SERIAL, ++ .bNumConfigurations = 2, ++}; ++*/ ++static struct usb_device_descriptor ++device_desc = { ++ .bLength = sizeof device_desc, ++ .bDescriptorType = USB_DT_DEVICE, ++ .bcdUSB = __constant_cpu_to_le16 (0x0100), ++ .bDeviceClass = USB_CLASS_PER_INTERFACE, ++ .bDeviceSubClass = 0, ++ .bDeviceProtocol = 0, ++ .bMaxPacketSize0 = 64, ++ .bcdDevice = __constant_cpu_to_le16 (0x0100), ++ .idVendor = __constant_cpu_to_le16 (0x0499), ++ .idProduct = __constant_cpu_to_le16 (0x3002), ++ .iManufacturer = STRING_MANUFACTURER, ++ .iProduct = STRING_PRODUCT, ++ .iSerialNumber = STRING_SERIAL, ++ .bNumConfigurations = 1, ++}; ++ ++static struct usb_config_descriptor ++z_config = { ++ .bLength = sizeof z_config, ++ .bDescriptorType = USB_DT_CONFIG, ++ ++ /* compute wTotalLength on the fly */ ++ .bNumInterfaces = 2, ++ .bConfigurationValue = 1, ++ .iConfiguration = 0, ++ .bmAttributes = 0x40, ++ .bMaxPower = 0, /* self-powered */ ++}; ++ ++ ++static struct usb_otg_descriptor ++otg_descriptor = { ++ .bLength = sizeof otg_descriptor, ++ .bDescriptorType = USB_DT_OTG, ++ ++ .bmAttributes = USB_OTG_SRP, ++}; ++ ++/* one interface in each configuration */ ++#ifdef CONFIG_USB_GADGET_DUALSPEED ++ ++/* ++ * usb 2.0 devices need to expose both high speed and full speed ++ * descriptors, unless they only run at full speed. ++ * ++ * that means alternate endpoint descriptors (bigger packets) ++ * and a "device qualifier" ... plus more construction options ++ * for the config descriptor. ++ */ ++ ++static struct usb_qualifier_descriptor ++dev_qualifier = { ++ .bLength = sizeof dev_qualifier, ++ .bDescriptorType = USB_DT_DEVICE_QUALIFIER, ++ ++ .bcdUSB = __constant_cpu_to_le16 (0x0200), ++ .bDeviceClass = USB_CLASS_VENDOR_SPEC, ++ ++ .bNumConfigurations = 2, ++}; ++ ++ ++struct usb_cs_as_general_descriptor { ++ __u8 bLength; ++ __u8 bDescriptorType; ++ ++ __u8 bDescriptorSubType; ++ __u8 bTerminalLink; ++ __u8 bDelay; ++ __u16 wFormatTag; ++} __attribute__ ((packed)); ++ ++struct usb_cs_as_format_descriptor { ++ __u8 bLength; ++ __u8 bDescriptorType; ++ ++ __u8 bDescriptorSubType; ++ __u8 bFormatType; ++ __u8 bNrChannels; ++ __u8 bSubframeSize; ++ __u8 bBitResolution; ++ __u8 bSamfreqType; ++ __u8 tLowerSamFreq[3]; ++ __u8 tUpperSamFreq[3]; ++} __attribute__ ((packed)); ++ ++static const struct usb_interface_descriptor ++z_audio_control_if_desc = { ++ .bLength = sizeof z_audio_control_if_desc, ++ .bDescriptorType = USB_DT_INTERFACE, ++ .bInterfaceNumber = 0, ++ .bAlternateSetting = 0, ++ .bNumEndpoints = 0, ++ .bInterfaceClass = USB_CLASS_AUDIO, ++ .bInterfaceSubClass = 0x1, ++ .bInterfaceProtocol = 0, ++ .iInterface = 0, ++}; ++ ++static const struct usb_interface_descriptor ++z_audio_if_desc = { ++ .bLength = sizeof z_audio_if_desc, ++ .bDescriptorType = USB_DT_INTERFACE, ++ .bInterfaceNumber = 1, ++ .bAlternateSetting = 0, ++ .bNumEndpoints = 0, ++ .bInterfaceClass = USB_CLASS_AUDIO, ++ .bInterfaceSubClass = 0x2, ++ .bInterfaceProtocol = 0, ++ .iInterface = 0, ++}; ++ ++static const struct usb_interface_descriptor ++z_audio_if_desc2 = { ++ .bLength = sizeof z_audio_if_desc, ++ .bDescriptorType = USB_DT_INTERFACE, ++ .bInterfaceNumber = 1, ++ .bAlternateSetting = 1, ++ .bNumEndpoints = 1, ++ .bInterfaceClass = USB_CLASS_AUDIO, ++ .bInterfaceSubClass = 0x2, ++ .bInterfaceProtocol = 0, ++ .iInterface = 0, ++}; ++ ++static const struct usb_cs_as_general_descriptor ++z_audio_cs_as_if_desc = { ++ .bLength = 7, ++ .bDescriptorType = 0x24, ++ ++ .bDescriptorSubType = 0x01, ++ .bTerminalLink = 0x01, ++ .bDelay = 0x0, ++ .wFormatTag = __constant_cpu_to_le16 (0x0001) ++}; ++ ++ ++static const struct usb_cs_as_format_descriptor ++z_audio_cs_as_format_desc = { ++ .bLength = 0xe, ++ .bDescriptorType = 0x24, ++ ++ .bDescriptorSubType = 2, ++ .bFormatType = 1, ++ .bNrChannels = 1, ++ .bSubframeSize = 1, ++ .bBitResolution = 8, ++ .bSamfreqType = 0, ++ .tLowerSamFreq = {0x7e, 0x13, 0x00}, ++ .tUpperSamFreq = {0xe2, 0xd6, 0x00}, ++}; ++ ++static const struct usb_endpoint_descriptor ++z_iso_ep = { ++ .bLength = 0x09, ++ .bDescriptorType = 0x05, ++ .bEndpointAddress = 0x04, ++ .bmAttributes = 0x09, ++ .wMaxPacketSize = 0x0038, ++ .bInterval = 0x01, ++ .bRefresh = 0x00, ++ .bSynchAddress = 0x00, ++}; ++ ++static char z_iso_ep2[] = {0x07, 0x25, 0x01, 0x00, 0x02, 0x00, 0x02}; ++ ++// 9 bytes ++static char z_ac_interface_header_desc[] = ++{ 0x09, 0x24, 0x01, 0x00, 0x01, 0x2b, 0x00, 0x01, 0x01 }; ++ ++// 12 bytes ++static char z_0[] = {0x0c, 0x24, 0x02, 0x01, 0x01, 0x01, 0x00, 0x02, ++ 0x03, 0x00, 0x00, 0x00}; ++// 13 bytes ++static char z_1[] = {0x0d, 0x24, 0x06, 0x02, 0x01, 0x02, 0x15, 0x00, ++ 0x02, 0x00, 0x02, 0x00, 0x00}; ++// 9 bytes ++static char z_2[] = {0x09, 0x24, 0x03, 0x03, 0x01, 0x03, 0x00, 0x02, ++ 0x00}; ++ ++static char za_0[] = {0x09, 0x04, 0x01, 0x02, 0x01, 0x01, 0x02, 0x00, ++ 0x00}; ++ ++static char za_1[] = {0x07, 0x24, 0x01, 0x01, 0x00, 0x01, 0x00}; ++ ++static char za_2[] = {0x0e, 0x24, 0x02, 0x01, 0x02, 0x01, 0x08, 0x00, ++ 0x7e, 0x13, 0x00, 0xe2, 0xd6, 0x00}; ++ ++static char za_3[] = {0x09, 0x05, 0x04, 0x09, 0x70, 0x00, 0x01, 0x00, ++ 0x00}; ++ ++static char za_4[] = {0x07, 0x25, 0x01, 0x00, 0x02, 0x00, 0x02}; ++ ++static char za_5[] = {0x09, 0x04, 0x01, 0x03, 0x01, 0x01, 0x02, 0x00, ++ 0x00}; ++ ++static char za_6[] = {0x07, 0x24, 0x01, 0x01, 0x00, 0x01, 0x00}; ++ ++static char za_7[] = {0x0e, 0x24, 0x02, 0x01, 0x01, 0x02, 0x10, 0x00, ++ 0x7e, 0x13, 0x00, 0xe2, 0xd6, 0x00}; ++ ++static char za_8[] = {0x09, 0x05, 0x04, 0x09, 0x70, 0x00, 0x01, 0x00, ++ 0x00}; ++ ++static char za_9[] = {0x07, 0x25, 0x01, 0x00, 0x02, 0x00, 0x02}; ++ ++static char za_10[] = {0x09, 0x04, 0x01, 0x04, 0x01, 0x01, 0x02, 0x00, ++ 0x00}; ++ ++static char za_11[] = {0x07, 0x24, 0x01, 0x01, 0x00, 0x01, 0x00}; ++ ++static char za_12[] = {0x0e, 0x24, 0x02, 0x01, 0x02, 0x02, 0x10, 0x00, ++ 0x73, 0x13, 0x00, 0xe2, 0xd6, 0x00}; ++ ++static char za_13[] = {0x09, 0x05, 0x04, 0x09, 0xe0, 0x00, 0x01, 0x00, ++ 0x00}; ++ ++static char za_14[] = {0x07, 0x25, 0x01, 0x00, 0x02, 0x00, 0x02}; ++ ++static char za_15[] = {0x09, 0x04, 0x01, 0x05, 0x01, 0x01, 0x02, 0x00, ++ 0x00}; ++ ++static char za_16[] = {0x07, 0x24, 0x01, 0x01, 0x00, 0x01, 0x00}; ++ ++static char za_17[] = {0x0e, 0x24, 0x02, 0x01, 0x01, 0x03, 0x14, 0x00, ++ 0x7e, 0x13, 0x00, 0xe2, 0xd6, 0x00}; ++ ++static char za_18[] = {0x09, 0x05, 0x04, 0x09, 0xa8, 0x00, 0x01, 0x00, ++ 0x00}; ++ ++static char za_19[] = {0x07, 0x25, 0x01, 0x00, 0x02, 0x00, 0x02}; ++ ++static char za_20[] = {0x09, 0x04, 0x01, 0x06, 0x01, 0x01, 0x02, 0x00, ++ 0x00}; ++ ++static char za_21[] = {0x07, 0x24, 0x01, 0x01, 0x00, 0x01, 0x00}; ++ ++static char za_22[] = {0x0e, 0x24, 0x02, 0x01, 0x02, 0x03, 0x14, 0x00, ++ 0x7e, 0x13, 0x00, 0xe2, 0xd6, 0x00}; ++ ++static char za_23[] = {0x09, 0x05, 0x04, 0x09, 0x50, 0x01, 0x01, 0x00, ++ 0x00}; ++ ++static char za_24[] = {0x07, 0x25, 0x01, 0x00, 0x02, 0x00, 0x02}; ++ ++ ++ ++static const struct usb_descriptor_header *z_function [] = { ++ (struct usb_descriptor_header *) &z_audio_control_if_desc, ++ (struct usb_descriptor_header *) &z_ac_interface_header_desc, ++ (struct usb_descriptor_header *) &z_0, ++ (struct usb_descriptor_header *) &z_1, ++ (struct usb_descriptor_header *) &z_2, ++ (struct usb_descriptor_header *) &z_audio_if_desc, ++ (struct usb_descriptor_header *) &z_audio_if_desc2, ++ (struct usb_descriptor_header *) &z_audio_cs_as_if_desc, ++ (struct usb_descriptor_header *) &z_audio_cs_as_format_desc, ++ (struct usb_descriptor_header *) &z_iso_ep, ++ (struct usb_descriptor_header *) &z_iso_ep2, ++ (struct usb_descriptor_header *) &za_0, ++ (struct usb_descriptor_header *) &za_1, ++ (struct usb_descriptor_header *) &za_2, ++ (struct usb_descriptor_header *) &za_3, ++ (struct usb_descriptor_header *) &za_4, ++ (struct usb_descriptor_header *) &za_5, ++ (struct usb_descriptor_header *) &za_6, ++ (struct usb_descriptor_header *) &za_7, ++ (struct usb_descriptor_header *) &za_8, ++ (struct usb_descriptor_header *) &za_9, ++ (struct usb_descriptor_header *) &za_10, ++ (struct usb_descriptor_header *) &za_11, ++ (struct usb_descriptor_header *) &za_12, ++ (struct usb_descriptor_header *) &za_13, ++ (struct usb_descriptor_header *) &za_14, ++ (struct usb_descriptor_header *) &za_15, ++ (struct usb_descriptor_header *) &za_16, ++ (struct usb_descriptor_header *) &za_17, ++ (struct usb_descriptor_header *) &za_18, ++ (struct usb_descriptor_header *) &za_19, ++ (struct usb_descriptor_header *) &za_20, ++ (struct usb_descriptor_header *) &za_21, ++ (struct usb_descriptor_header *) &za_22, ++ (struct usb_descriptor_header *) &za_23, ++ (struct usb_descriptor_header *) &za_24, ++ NULL, ++}; ++ ++/* maxpacket and other transfer characteristics vary by speed. */ ++#define ep_desc(g,hs,fs) (((g)->speed==USB_SPEED_HIGH)?(hs):(fs)) ++ ++#else ++ ++/* if there's no high speed support, maxpacket doesn't change. */ ++#define ep_desc(g,hs,fs) fs ++ ++#endif /* !CONFIG_USB_GADGET_DUALSPEED */ ++ ++static char manufacturer [40]; ++//static char serial [40]; ++static char serial [] = "Ser 00 em"; ++ ++/* static strings, in UTF-8 */ ++static struct usb_string strings [] = { ++ { STRING_MANUFACTURER, manufacturer, }, ++ { STRING_PRODUCT, longname, }, ++ { STRING_SERIAL, serial, }, ++ { STRING_LOOPBACK, loopback, }, ++ { STRING_SOURCE_SINK, source_sink, }, ++ { } /* end of list */ ++}; ++ ++static struct usb_gadget_strings stringtab = { ++ .language = 0x0409, /* en-us */ ++ .strings = strings, ++}; ++ ++/* ++ * config descriptors are also handcrafted. these must agree with code ++ * that sets configurations, and with code managing interfaces and their ++ * altsettings. other complexity may come from: ++ * ++ * - high speed support, including "other speed config" rules ++ * - multiple configurations ++ * - interfaces with alternate settings ++ * - embedded class or vendor-specific descriptors ++ * ++ * this handles high speed, and has a second config that could as easily ++ * have been an alternate interface setting (on most hardware). ++ * ++ * NOTE: to demonstrate (and test) more USB capabilities, this driver ++ * should include an altsetting to test interrupt transfers, including ++ * high bandwidth modes at high speed. (Maybe work like Intel's test ++ * device?) ++ */ ++static int ++config_buf (struct usb_gadget *gadget, u8 *buf, u8 type, unsigned index) ++{ ++ int len; ++ const struct usb_descriptor_header **function; ++ ++ function = z_function; ++ len = usb_gadget_config_buf (&z_config, buf, USB_BUFSIZ, function); ++ if (len < 0) ++ return len; ++ ((struct usb_config_descriptor *) buf)->bDescriptorType = type; ++ return len; ++} ++ ++/*-------------------------------------------------------------------------*/ ++ ++static struct usb_request * ++alloc_ep_req (struct usb_ep *ep, unsigned length) ++{ ++ struct usb_request *req; ++ ++ req = usb_ep_alloc_request (ep, GFP_ATOMIC); ++ if (req) { ++ req->length = length; ++ req->buf = usb_ep_alloc_buffer (ep, length, ++ &req->dma, GFP_ATOMIC); ++ if (!req->buf) { ++ usb_ep_free_request (ep, req); ++ req = NULL; ++ } ++ } ++ return req; ++} ++ ++static void free_ep_req (struct usb_ep *ep, struct usb_request *req) ++{ ++ if (req->buf) ++ usb_ep_free_buffer (ep, req->buf, req->dma, req->length); ++ usb_ep_free_request (ep, req); ++} ++ ++/*-------------------------------------------------------------------------*/ ++ ++/* optionally require specific source/sink data patterns */ ++ ++static int ++check_read_data ( ++ struct zero_dev *dev, ++ struct usb_ep *ep, ++ struct usb_request *req ++) ++{ ++ unsigned i; ++ u8 *buf = req->buf; ++ ++ for (i = 0; i < req->actual; i++, buf++) { ++ switch (pattern) { ++ /* all-zeroes has no synchronization issues */ ++ case 0: ++ if (*buf == 0) ++ continue; ++ break; ++ /* mod63 stays in sync with short-terminated transfers, ++ * or otherwise when host and gadget agree on how large ++ * each usb transfer request should be. resync is done ++ * with set_interface or set_config. ++ */ ++ case 1: ++ if (*buf == (u8)(i % 63)) ++ continue; ++ break; ++ } ++ ERROR (dev, "bad OUT byte, buf [%d] = %d\n", i, *buf); ++ usb_ep_set_halt (ep); ++ return -EINVAL; ++ } ++ return 0; ++} ++ ++/*-------------------------------------------------------------------------*/ ++ ++static void zero_reset_config (struct zero_dev *dev) ++{ ++ if (dev->config == 0) ++ return; ++ ++ DBG (dev, "reset config\n"); ++ ++ /* just disable endpoints, forcing completion of pending i/o. ++ * all our completion handlers free their requests in this case. ++ */ ++ if (dev->in_ep) { ++ usb_ep_disable (dev->in_ep); ++ dev->in_ep = NULL; ++ } ++ if (dev->out_ep) { ++ usb_ep_disable (dev->out_ep); ++ dev->out_ep = NULL; ++ } ++ dev->config = 0; ++ del_timer (&dev->resume); ++} ++ ++#define _write(f, buf, sz) (f->f_op->write(f, buf, sz, &f->f_pos)) ++ ++static void ++zero_isoc_complete (struct usb_ep *ep, struct usb_request *req) ++{ ++ struct zero_dev *dev = ep->driver_data; ++ int status = req->status; ++ int i, j; ++ ++ switch (status) { ++ ++ case 0: /* normal completion? */ ++ //printk ("\nzero ---------------> isoc normal completion %d bytes\n", req->actual); ++ for (i=0, j=rbuf_start; iactual; i++) { ++ //printk ("%02x ", ((__u8*)req->buf)[i]); ++ rbuf[j] = ((__u8*)req->buf)[i]; ++ j++; ++ if (j >= RBUF_LEN) j=0; ++ } ++ rbuf_start = j; ++ //printk ("\n\n"); ++ ++ if (rbuf_len < RBUF_LEN) { ++ rbuf_len += req->actual; ++ if (rbuf_len > RBUF_LEN) { ++ rbuf_len = RBUF_LEN; ++ } ++ } ++ ++ break; ++ ++ /* this endpoint is normally active while we're configured */ ++ case -ECONNABORTED: /* hardware forced ep reset */ ++ case -ECONNRESET: /* request dequeued */ ++ case -ESHUTDOWN: /* disconnect from host */ ++ VDBG (dev, "%s gone (%d), %d/%d\n", ep->name, status, ++ req->actual, req->length); ++ if (ep == dev->out_ep) ++ check_read_data (dev, ep, req); ++ free_ep_req (ep, req); ++ return; ++ ++ case -EOVERFLOW: /* buffer overrun on read means that ++ * we didn't provide a big enough ++ * buffer. ++ */ ++ default: ++#if 1 ++ DBG (dev, "%s complete --> %d, %d/%d\n", ep->name, ++ status, req->actual, req->length); ++#endif ++ case -EREMOTEIO: /* short read */ ++ break; ++ } ++ ++ status = usb_ep_queue (ep, req, GFP_ATOMIC); ++ if (status) { ++ ERROR (dev, "kill %s: resubmit %d bytes --> %d\n", ++ ep->name, req->length, status); ++ usb_ep_set_halt (ep); ++ /* FIXME recover later ... somehow */ ++ } ++} ++ ++static struct usb_request * ++zero_start_isoc_ep (struct usb_ep *ep, int gfp_flags) ++{ ++ struct usb_request *req; ++ int status; ++ ++ req = alloc_ep_req (ep, 512); ++ if (!req) ++ return NULL; ++ ++ req->complete = zero_isoc_complete; ++ ++ status = usb_ep_queue (ep, req, gfp_flags); ++ if (status) { ++ struct zero_dev *dev = ep->driver_data; ++ ++ ERROR (dev, "start %s --> %d\n", ep->name, status); ++ free_ep_req (ep, req); ++ req = NULL; ++ } ++ ++ return req; ++} ++ ++/* change our operational config. this code must agree with the code ++ * that returns config descriptors, and altsetting code. ++ * ++ * it's also responsible for power management interactions. some ++ * configurations might not work with our current power sources. ++ * ++ * note that some device controller hardware will constrain what this ++ * code can do, perhaps by disallowing more than one configuration or ++ * by limiting configuration choices (like the pxa2xx). ++ */ ++static int ++zero_set_config (struct zero_dev *dev, unsigned number, int gfp_flags) ++{ ++ int result = 0; ++ struct usb_gadget *gadget = dev->gadget; ++ const struct usb_endpoint_descriptor *d; ++ struct usb_ep *ep; ++ ++ if (number == dev->config) ++ return 0; ++ ++ zero_reset_config (dev); ++ ++ gadget_for_each_ep (ep, gadget) { ++ ++ if (strcmp (ep->name, "ep4") == 0) { ++ ++ d = (struct usb_endpoint_descripter *)&za_23; // isoc ep desc for audio i/f alt setting 6 ++ result = usb_ep_enable (ep, d); ++ ++ if (result == 0) { ++ ep->driver_data = dev; ++ dev->in_ep = ep; ++ ++ if (zero_start_isoc_ep (ep, gfp_flags) != 0) { ++ ++ dev->in_ep = ep; ++ continue; ++ } ++ ++ usb_ep_disable (ep); ++ result = -EIO; ++ } ++ } ++ ++ } ++ ++ dev->config = number; ++ return result; ++} ++ ++/*-------------------------------------------------------------------------*/ ++ ++static void zero_setup_complete (struct usb_ep *ep, struct usb_request *req) ++{ ++ if (req->status || req->actual != req->length) ++ DBG ((struct zero_dev *) ep->driver_data, ++ "setup complete --> %d, %d/%d\n", ++ req->status, req->actual, req->length); ++} ++ ++/* ++ * The setup() callback implements all the ep0 functionality that's ++ * not handled lower down, in hardware or the hardware driver (like ++ * device and endpoint feature flags, and their status). It's all ++ * housekeeping for the gadget function we're implementing. Most of ++ * the work is in config-specific setup. ++ */ ++static int ++zero_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl) ++{ ++ struct zero_dev *dev = get_gadget_data (gadget); ++ struct usb_request *req = dev->req; ++ int value = -EOPNOTSUPP; ++ ++ /* usually this stores reply data in the pre-allocated ep0 buffer, ++ * but config change events will reconfigure hardware. ++ */ ++ req->zero = 0; ++ switch (ctrl->bRequest) { ++ ++ case USB_REQ_GET_DESCRIPTOR: ++ ++ switch (ctrl->wValue >> 8) { ++ ++ case USB_DT_DEVICE: ++ value = min (ctrl->wLength, (u16) sizeof device_desc); ++ memcpy (req->buf, &device_desc, value); ++ break; ++#ifdef CONFIG_USB_GADGET_DUALSPEED ++ case USB_DT_DEVICE_QUALIFIER: ++ if (!gadget->is_dualspeed) ++ break; ++ value = min (ctrl->wLength, (u16) sizeof dev_qualifier); ++ memcpy (req->buf, &dev_qualifier, value); ++ break; ++ ++ case USB_DT_OTHER_SPEED_CONFIG: ++ if (!gadget->is_dualspeed) ++ break; ++ // FALLTHROUGH ++#endif /* CONFIG_USB_GADGET_DUALSPEED */ ++ case USB_DT_CONFIG: ++ value = config_buf (gadget, req->buf, ++ ctrl->wValue >> 8, ++ ctrl->wValue & 0xff); ++ if (value >= 0) ++ value = min (ctrl->wLength, (u16) value); ++ break; ++ ++ case USB_DT_STRING: ++ /* wIndex == language code. ++ * this driver only handles one language, you can ++ * add string tables for other languages, using ++ * any UTF-8 characters ++ */ ++ value = usb_gadget_get_string (&stringtab, ++ ctrl->wValue & 0xff, req->buf); ++ if (value >= 0) { ++ value = min (ctrl->wLength, (u16) value); ++ } ++ break; ++ } ++ break; ++ ++ /* currently two configs, two speeds */ ++ case USB_REQ_SET_CONFIGURATION: ++ if (ctrl->bRequestType != 0) ++ goto unknown; ++ ++ spin_lock (&dev->lock); ++ value = zero_set_config (dev, ctrl->wValue, GFP_ATOMIC); ++ spin_unlock (&dev->lock); ++ break; ++ case USB_REQ_GET_CONFIGURATION: ++ if (ctrl->bRequestType != USB_DIR_IN) ++ goto unknown; ++ *(u8 *)req->buf = dev->config; ++ value = min (ctrl->wLength, (u16) 1); ++ break; ++ ++ /* until we add altsetting support, or other interfaces, ++ * only 0/0 are possible. pxa2xx only supports 0/0 (poorly) ++ * and already killed pending endpoint I/O. ++ */ ++ case USB_REQ_SET_INTERFACE: ++ ++ if (ctrl->bRequestType != USB_RECIP_INTERFACE) ++ goto unknown; ++ spin_lock (&dev->lock); ++ if (dev->config) { ++ u8 config = dev->config; ++ ++ /* resets interface configuration, forgets about ++ * previous transaction state (queued bufs, etc) ++ * and re-inits endpoint state (toggle etc) ++ * no response queued, just zero status == success. ++ * if we had more than one interface we couldn't ++ * use this "reset the config" shortcut. ++ */ ++ zero_reset_config (dev); ++ zero_set_config (dev, config, GFP_ATOMIC); ++ value = 0; ++ } ++ spin_unlock (&dev->lock); ++ break; ++ case USB_REQ_GET_INTERFACE: ++ if ((ctrl->bRequestType == 0x21) && (ctrl->wIndex == 0x02)) { ++ value = ctrl->wLength; ++ break; ++ } ++ else { ++ if (ctrl->bRequestType != (USB_DIR_IN|USB_RECIP_INTERFACE)) ++ goto unknown; ++ if (!dev->config) ++ break; ++ if (ctrl->wIndex != 0) { ++ value = -EDOM; ++ break; ++ } ++ *(u8 *)req->buf = 0; ++ value = min (ctrl->wLength, (u16) 1); ++ } ++ break; ++ ++ /* ++ * These are the same vendor-specific requests supported by ++ * Intel's USB 2.0 compliance test devices. We exceed that ++ * device spec by allowing multiple-packet requests. ++ */ ++ case 0x5b: /* control WRITE test -- fill the buffer */ ++ if (ctrl->bRequestType != (USB_DIR_OUT|USB_TYPE_VENDOR)) ++ goto unknown; ++ if (ctrl->wValue || ctrl->wIndex) ++ break; ++ /* just read that many bytes into the buffer */ ++ if (ctrl->wLength > USB_BUFSIZ) ++ break; ++ value = ctrl->wLength; ++ break; ++ case 0x5c: /* control READ test -- return the buffer */ ++ if (ctrl->bRequestType != (USB_DIR_IN|USB_TYPE_VENDOR)) ++ goto unknown; ++ if (ctrl->wValue || ctrl->wIndex) ++ break; ++ /* expect those bytes are still in the buffer; send back */ ++ if (ctrl->wLength > USB_BUFSIZ ++ || ctrl->wLength != req->length) ++ break; ++ value = ctrl->wLength; ++ break; ++ ++ case 0x01: // SET_CUR ++ case 0x02: ++ case 0x03: ++ case 0x04: ++ case 0x05: ++ value = ctrl->wLength; ++ break; ++ case 0x81: ++ switch (ctrl->wValue) { ++ case 0x0201: ++ case 0x0202: ++ ((u8*)req->buf)[0] = 0x00; ++ ((u8*)req->buf)[1] = 0xe3; ++ break; ++ case 0x0300: ++ case 0x0500: ++ ((u8*)req->buf)[0] = 0x00; ++ break; ++ } ++ //((u8*)req->buf)[0] = 0x81; ++ //((u8*)req->buf)[1] = 0x81; ++ value = ctrl->wLength; ++ break; ++ case 0x82: ++ switch (ctrl->wValue) { ++ case 0x0201: ++ case 0x0202: ++ ((u8*)req->buf)[0] = 0x00; ++ ((u8*)req->buf)[1] = 0xc3; ++ break; ++ case 0x0300: ++ case 0x0500: ++ ((u8*)req->buf)[0] = 0x00; ++ break; ++ } ++ //((u8*)req->buf)[0] = 0x82; ++ //((u8*)req->buf)[1] = 0x82; ++ value = ctrl->wLength; ++ break; ++ case 0x83: ++ switch (ctrl->wValue) { ++ case 0x0201: ++ case 0x0202: ++ ((u8*)req->buf)[0] = 0x00; ++ ((u8*)req->buf)[1] = 0x00; ++ break; ++ case 0x0300: ++ ((u8*)req->buf)[0] = 0x60; ++ break; ++ case 0x0500: ++ ((u8*)req->buf)[0] = 0x18; ++ break; ++ } ++ //((u8*)req->buf)[0] = 0x83; ++ //((u8*)req->buf)[1] = 0x83; ++ value = ctrl->wLength; ++ break; ++ case 0x84: ++ switch (ctrl->wValue) { ++ case 0x0201: ++ case 0x0202: ++ ((u8*)req->buf)[0] = 0x00; ++ ((u8*)req->buf)[1] = 0x01; ++ break; ++ case 0x0300: ++ case 0x0500: ++ ((u8*)req->buf)[0] = 0x08; ++ break; ++ } ++ //((u8*)req->buf)[0] = 0x84; ++ //((u8*)req->buf)[1] = 0x84; ++ value = ctrl->wLength; ++ break; ++ case 0x85: ++ ((u8*)req->buf)[0] = 0x85; ++ ((u8*)req->buf)[1] = 0x85; ++ value = ctrl->wLength; ++ break; ++ ++ ++ default: ++unknown: ++ printk("unknown control req%02x.%02x v%04x i%04x l%d\n", ++ ctrl->bRequestType, ctrl->bRequest, ++ ctrl->wValue, ctrl->wIndex, ctrl->wLength); ++ } ++ ++ /* respond with data transfer before status phase? */ ++ if (value >= 0) { ++ req->length = value; ++ req->zero = value < ctrl->wLength ++ && (value % gadget->ep0->maxpacket) == 0; ++ value = usb_ep_queue (gadget->ep0, req, GFP_ATOMIC); ++ if (value < 0) { ++ DBG (dev, "ep_queue < 0 --> %d\n", value); ++ req->status = 0; ++ zero_setup_complete (gadget->ep0, req); ++ } ++ } ++ ++ /* device either stalls (value < 0) or reports success */ ++ return value; ++} ++ ++static void ++zero_disconnect (struct usb_gadget *gadget) ++{ ++ struct zero_dev *dev = get_gadget_data (gadget); ++ unsigned long flags; ++ ++ spin_lock_irqsave (&dev->lock, flags); ++ zero_reset_config (dev); ++ ++ /* a more significant application might have some non-usb ++ * activities to quiesce here, saving resources like power ++ * or pushing the notification up a network stack. ++ */ ++ spin_unlock_irqrestore (&dev->lock, flags); ++ ++ /* next we may get setup() calls to enumerate new connections; ++ * or an unbind() during shutdown (including removing module). ++ */ ++} ++ ++static void ++zero_autoresume (unsigned long _dev) ++{ ++ struct zero_dev *dev = (struct zero_dev *) _dev; ++ int status; ++ ++ /* normally the host would be woken up for something ++ * more significant than just a timer firing... ++ */ ++ if (dev->gadget->speed != USB_SPEED_UNKNOWN) { ++ status = usb_gadget_wakeup (dev->gadget); ++ DBG (dev, "wakeup --> %d\n", status); ++ } ++} ++ ++/*-------------------------------------------------------------------------*/ ++ ++static void ++zero_unbind (struct usb_gadget *gadget) ++{ ++ struct zero_dev *dev = get_gadget_data (gadget); ++ ++ DBG (dev, "unbind\n"); ++ ++ /* we've already been disconnected ... no i/o is active */ ++ if (dev->req) ++ free_ep_req (gadget->ep0, dev->req); ++ del_timer_sync (&dev->resume); ++ kfree (dev); ++ set_gadget_data (gadget, NULL); ++} ++ ++static int ++zero_bind (struct usb_gadget *gadget) ++{ ++ struct zero_dev *dev; ++ //struct usb_ep *ep; ++ ++ printk("binding\n"); ++ /* ++ * DRIVER POLICY CHOICE: you may want to do this differently. ++ * One thing to avoid is reusing a bcdDevice revision code ++ * with different host-visible configurations or behavior ++ * restrictions -- using ep1in/ep2out vs ep1out/ep3in, etc ++ */ ++ //device_desc.bcdDevice = __constant_cpu_to_le16 (0x0201); ++ ++ ++ /* ok, we made sense of the hardware ... */ ++ dev = kmalloc (sizeof *dev, SLAB_KERNEL); ++ if (!dev) ++ return -ENOMEM; ++ memset (dev, 0, sizeof *dev); ++ spin_lock_init (&dev->lock); ++ dev->gadget = gadget; ++ set_gadget_data (gadget, dev); ++ ++ /* preallocate control response and buffer */ ++ dev->req = usb_ep_alloc_request (gadget->ep0, GFP_KERNEL); ++ if (!dev->req) ++ goto enomem; ++ dev->req->buf = usb_ep_alloc_buffer (gadget->ep0, USB_BUFSIZ, ++ &dev->req->dma, GFP_KERNEL); ++ if (!dev->req->buf) ++ goto enomem; ++ ++ dev->req->complete = zero_setup_complete; ++ ++ device_desc.bMaxPacketSize0 = gadget->ep0->maxpacket; ++ ++#ifdef CONFIG_USB_GADGET_DUALSPEED ++ /* assume ep0 uses the same value for both speeds ... */ ++ dev_qualifier.bMaxPacketSize0 = device_desc.bMaxPacketSize0; ++ ++ /* and that all endpoints are dual-speed */ ++ //hs_source_desc.bEndpointAddress = fs_source_desc.bEndpointAddress; ++ //hs_sink_desc.bEndpointAddress = fs_sink_desc.bEndpointAddress; ++#endif ++ ++ usb_gadget_set_selfpowered (gadget); ++ ++ init_timer (&dev->resume); ++ dev->resume.function = zero_autoresume; ++ dev->resume.data = (unsigned long) dev; ++ ++ gadget->ep0->driver_data = dev; ++ ++ INFO (dev, "%s, version: " DRIVER_VERSION "\n", longname); ++ INFO (dev, "using %s, OUT %s IN %s\n", gadget->name, ++ EP_OUT_NAME, EP_IN_NAME); ++ ++ snprintf (manufacturer, sizeof manufacturer, ++ UTS_SYSNAME " " UTS_RELEASE " with %s", ++ gadget->name); ++ ++ return 0; ++ ++enomem: ++ zero_unbind (gadget); ++ return -ENOMEM; ++} ++ ++/*-------------------------------------------------------------------------*/ ++ ++static void ++zero_suspend (struct usb_gadget *gadget) ++{ ++ struct zero_dev *dev = get_gadget_data (gadget); ++ ++ if (gadget->speed == USB_SPEED_UNKNOWN) ++ return; ++ ++ if (autoresume) { ++ mod_timer (&dev->resume, jiffies + (HZ * autoresume)); ++ DBG (dev, "suspend, wakeup in %d seconds\n", autoresume); ++ } else ++ DBG (dev, "suspend\n"); ++} ++ ++static void ++zero_resume (struct usb_gadget *gadget) ++{ ++ struct zero_dev *dev = get_gadget_data (gadget); ++ ++ DBG (dev, "resume\n"); ++ del_timer (&dev->resume); ++} ++ ++ ++/*-------------------------------------------------------------------------*/ ++ ++static struct usb_gadget_driver zero_driver = { ++#ifdef CONFIG_USB_GADGET_DUALSPEED ++ .speed = USB_SPEED_HIGH, ++#else ++ .speed = USB_SPEED_FULL, ++#endif ++ .function = (char *) longname, ++ .bind = zero_bind, ++ .unbind = zero_unbind, ++ ++ .setup = zero_setup, ++ .disconnect = zero_disconnect, ++ ++ .suspend = zero_suspend, ++ .resume = zero_resume, ++ ++ .driver = { ++ .name = (char *) shortname, ++ // .shutdown = ... ++ // .suspend = ... ++ // .resume = ... ++ }, ++}; ++ ++MODULE_AUTHOR ("David Brownell"); ++MODULE_LICENSE ("Dual BSD/GPL"); ++ ++static struct proc_dir_entry *pdir, *pfile; ++ ++static int isoc_read_data (char *page, char **start, ++ off_t off, int count, ++ int *eof, void *data) ++{ ++ int i; ++ static int c = 0; ++ static int done = 0; ++ static int s = 0; ++ ++/* ++ printk ("\ncount: %d\n", count); ++ printk ("rbuf_start: %d\n", rbuf_start); ++ printk ("rbuf_len: %d\n", rbuf_len); ++ printk ("off: %d\n", off); ++ printk ("start: %p\n\n", *start); ++*/ ++ if (done) { ++ c = 0; ++ done = 0; ++ *eof = 1; ++ return 0; ++ } ++ ++ if (c == 0) { ++ if (rbuf_len == RBUF_LEN) ++ s = rbuf_start; ++ else s = 0; ++ } ++ ++ for (i=0; i= rbuf_len) { ++ *eof = 1; ++ done = 1; ++ } ++ ++ ++ return i; ++} ++ ++static int __init init (void) ++{ ++ ++ int retval = 0; ++ ++ pdir = proc_mkdir("isoc_test", NULL); ++ if(pdir == NULL) { ++ retval = -ENOMEM; ++ printk("Error creating dir\n"); ++ goto done; ++ } ++ pdir->owner = THIS_MODULE; ++ ++ pfile = create_proc_read_entry("isoc_data", ++ 0444, pdir, ++ isoc_read_data, ++ NULL); ++ if (pfile == NULL) { ++ retval = -ENOMEM; ++ printk("Error creating file\n"); ++ goto no_file; ++ } ++ pfile->owner = THIS_MODULE; ++ ++ return usb_gadget_register_driver (&zero_driver); ++ ++ no_file: ++ remove_proc_entry("isoc_data", NULL); ++ done: ++ return retval; ++} ++module_init (init); ++ ++static void __exit cleanup (void) ++{ ++ ++ usb_gadget_unregister_driver (&zero_driver); ++ ++ remove_proc_entry("isoc_data", pdir); ++ remove_proc_entry("isoc_test", NULL); ++} ++module_exit (cleanup); +--- /dev/null ++++ b/drivers/usb/dwc_otg/dwc_otg_attr.c +@@ -0,0 +1,966 @@ ++/* ========================================================================== ++ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_attr.c $ ++ * $Revision: 1.2 $ ++ * $Date: 2008-11-21 05:39:15 $ ++ * $Change: 1064918 $ ++ * ++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, ++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless ++ * otherwise expressly agreed to in writing between Synopsys and you. ++ * ++ * The Software IS NOT an item of Licensed Software or Licensed Product under ++ * any End User Software License Agreement or Agreement for Licensed Product ++ * with Synopsys or any supplement thereto. You are permitted to use and ++ * redistribute this Software in source and binary forms, with or without ++ * modification, provided that redistributions of source code must retain this ++ * notice. You may not view, use, disclose, copy or distribute this file or ++ * any information contained herein except pursuant to this license grant from ++ * Synopsys. If you do not agree with this notice, including the disclaimer ++ * below, then you are not authorized to use the Software. ++ * ++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, ++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR ++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER ++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT ++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY ++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH ++ * DAMAGE. ++ * ========================================================================== */ ++ ++/** @file ++ * ++ * The diagnostic interface will provide access to the controller for ++ * bringing up the hardware and testing. The Linux driver attributes ++ * feature will be used to provide the Linux Diagnostic ++ * Interface. These attributes are accessed through sysfs. ++ */ ++ ++/** @page "Linux Module Attributes" ++ * ++ * The Linux module attributes feature is used to provide the Linux ++ * Diagnostic Interface. These attributes are accessed through sysfs. ++ * The diagnostic interface will provide access to the controller for ++ * bringing up the hardware and testing. ++ ++ ++ The following table shows the attributes. ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++
Name Description Access
mode Returns the current mode: 0 for device mode, 1 for host mode Read
hnpcapable Gets or sets the "HNP-capable" bit in the Core USB Configuraton Register. ++ Read returns the current value. Read/Write
srpcapable Gets or sets the "SRP-capable" bit in the Core USB Configuraton Register. ++ Read returns the current value. Read/Write
hnp Initiates the Host Negotiation Protocol. Read returns the status. Read/Write
srp Initiates the Session Request Protocol. Read returns the status. Read/Write
buspower Gets or sets the Power State of the bus (0 - Off or 1 - On) Read/Write
bussuspend Suspends the USB bus. Read/Write
busconnected Gets the connection status of the bus Read
gotgctl Gets or sets the Core Control Status Register. Read/Write
gusbcfg Gets or sets the Core USB Configuration Register Read/Write
grxfsiz Gets or sets the Receive FIFO Size Register Read/Write
gnptxfsiz Gets or sets the non-periodic Transmit Size Register Read/Write
gpvndctl Gets or sets the PHY Vendor Control Register Read/Write
ggpio Gets the value in the lower 16-bits of the General Purpose IO Register ++ or sets the upper 16 bits. Read/Write
guid Gets or sets the value of the User ID Register Read/Write
gsnpsid Gets the value of the Synopsys ID Regester Read
devspeed Gets or sets the device speed setting in the DCFG register Read/Write
enumspeed Gets the device enumeration Speed. Read
hptxfsiz Gets the value of the Host Periodic Transmit FIFO Read
hprt0 Gets or sets the value in the Host Port Control and Status Register Read/Write
regoffset Sets the register offset for the next Register Access Read/Write
regvalue Gets or sets the value of the register at the offset in the regoffset attribute. Read/Write
remote_wakeup On read, shows the status of Remote Wakeup. On write, initiates a remote ++ wakeup of the host. When bit 0 is 1 and Remote Wakeup is enabled, the Remote ++ Wakeup signalling bit in the Device Control Register is set for 1 ++ milli-second. Read/Write
regdump Dumps the contents of core registers. Read
spramdump Dumps the contents of core registers. Read
hcddump Dumps the current HCD state. Read
hcd_frrem Shows the average value of the Frame Remaining ++ field in the Host Frame Number/Frame Remaining register when an SOF interrupt ++ occurs. This can be used to determine the average interrupt latency. Also ++ shows the average Frame Remaining value for start_transfer and the "a" and ++ "b" sample points. The "a" and "b" sample points may be used during debugging ++ bto determine how long it takes to execute a section of the HCD code. Read
rd_reg_test Displays the time required to read the GNPTXFSIZ register many times ++ (the output shows the number of times the register is read). ++ Read
wr_reg_test Displays the time required to write the GNPTXFSIZ register many times ++ (the output shows the number of times the register is written). ++ Read
++ ++ Example usage: ++ To get the current mode: ++ cat /sys/devices/lm0/mode ++ ++ To power down the USB: ++ echo 0 > /sys/devices/lm0/buspower ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include /* permission constants */ ++#include ++ ++#include ++ ++#include "linux/dwc_otg_plat.h" ++#include "dwc_otg_attr.h" ++#include "dwc_otg_driver.h" ++#include "dwc_otg_pcd.h" ++#include "dwc_otg_hcd.h" ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) ++/* ++ * MACROs for defining sysfs attribute ++ */ ++#define DWC_OTG_DEVICE_ATTR_BITFIELD_SHOW(_otg_attr_name_,_addr_,_mask_,_shift_,_string_) \ ++static ssize_t _otg_attr_name_##_show (struct device *_dev, struct device_attribute *attr, char *buf) \ ++{ \ ++ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); \ ++ uint32_t val; \ ++ val = dwc_read_reg32 (_addr_); \ ++ val = (val & (_mask_)) >> _shift_; \ ++ return sprintf (buf, "%s = 0x%x\n", _string_, val); \ ++} ++#define DWC_OTG_DEVICE_ATTR_BITFIELD_STORE(_otg_attr_name_,_addr_,_mask_,_shift_,_string_) \ ++static ssize_t _otg_attr_name_##_store (struct device *_dev, struct device_attribute *attr, \ ++ const char *buf, size_t count) \ ++{ \ ++ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); \ ++ uint32_t set = simple_strtoul(buf, NULL, 16); \ ++ uint32_t clear = set; \ ++ clear = ((~clear) << _shift_) & _mask_; \ ++ set = (set << _shift_) & _mask_; \ ++ dev_dbg(_dev, "Storing Address=0x%08x Set=0x%08x Clear=0x%08x\n", (uint32_t)_addr_, set, clear); \ ++ dwc_modify_reg32(_addr_, clear, set); \ ++ return count; \ ++} ++ ++/* ++ * MACROs for defining sysfs attribute for 32-bit registers ++ */ ++#define DWC_OTG_DEVICE_ATTR_REG_SHOW(_otg_attr_name_,_addr_,_string_) \ ++static ssize_t _otg_attr_name_##_show (struct device *_dev, struct device_attribute *attr, char *buf) \ ++{ \ ++ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); \ ++ uint32_t val; \ ++ val = dwc_read_reg32 (_addr_); \ ++ return sprintf (buf, "%s = 0x%08x\n", _string_, val); \ ++} ++#define DWC_OTG_DEVICE_ATTR_REG_STORE(_otg_attr_name_,_addr_,_string_) \ ++static ssize_t _otg_attr_name_##_store (struct device *_dev, struct device_attribute *attr, \ ++ const char *buf, size_t count) \ ++{ \ ++ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); \ ++ uint32_t val = simple_strtoul(buf, NULL, 16); \ ++ dev_dbg(_dev, "Storing Address=0x%08x Val=0x%08x\n", (uint32_t)_addr_, val); \ ++ dwc_write_reg32(_addr_, val); \ ++ return count; \ ++} ++ ++#else ++ ++/* ++ * MACROs for defining sysfs attribute ++ */ ++#define DWC_OTG_DEVICE_ATTR_BITFIELD_SHOW(_otg_attr_name_,_addr_,_mask_,_shift_,_string_) \ ++static ssize_t _otg_attr_name_##_show (struct device *_dev, char *buf) \ ++{ \ ++ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev);\ ++ uint32_t val; \ ++ val = dwc_read_reg32 (_addr_); \ ++ val = (val & (_mask_)) >> _shift_; \ ++ return sprintf (buf, "%s = 0x%x\n", _string_, val); \ ++} ++#define DWC_OTG_DEVICE_ATTR_BITFIELD_STORE(_otg_attr_name_,_addr_,_mask_,_shift_,_string_) \ ++static ssize_t _otg_attr_name_##_store (struct device *_dev, const char *buf, size_t count) \ ++{ \ ++ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev);\ ++ uint32_t set = simple_strtoul(buf, NULL, 16); \ ++ uint32_t clear = set; \ ++ clear = ((~clear) << _shift_) & _mask_; \ ++ set = (set << _shift_) & _mask_; \ ++ dev_dbg(_dev, "Storing Address=0x%08x Set=0x%08x Clear=0x%08x\n", (uint32_t)_addr_, set, clear); \ ++ dwc_modify_reg32(_addr_, clear, set); \ ++ return count; \ ++} ++ ++/* ++ * MACROs for defining sysfs attribute for 32-bit registers ++ */ ++#define DWC_OTG_DEVICE_ATTR_REG_SHOW(_otg_attr_name_,_addr_,_string_) \ ++static ssize_t _otg_attr_name_##_show (struct device *_dev, char *buf) \ ++{ \ ++ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev);\ ++ uint32_t val; \ ++ val = dwc_read_reg32 (_addr_); \ ++ return sprintf (buf, "%s = 0x%08x\n", _string_, val); \ ++} ++#define DWC_OTG_DEVICE_ATTR_REG_STORE(_otg_attr_name_,_addr_,_string_) \ ++static ssize_t _otg_attr_name_##_store (struct device *_dev, const char *buf, size_t count) \ ++{ \ ++ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev);\ ++ uint32_t val = simple_strtoul(buf, NULL, 16); \ ++ dev_dbg(_dev, "Storing Address=0x%08x Val=0x%08x\n", (uint32_t)_addr_, val); \ ++ dwc_write_reg32(_addr_, val); \ ++ return count; \ ++} ++ ++#endif ++ ++#define DWC_OTG_DEVICE_ATTR_BITFIELD_RW(_otg_attr_name_,_addr_,_mask_,_shift_,_string_) \ ++DWC_OTG_DEVICE_ATTR_BITFIELD_SHOW(_otg_attr_name_,_addr_,_mask_,_shift_,_string_) \ ++DWC_OTG_DEVICE_ATTR_BITFIELD_STORE(_otg_attr_name_,_addr_,_mask_,_shift_,_string_) \ ++DEVICE_ATTR(_otg_attr_name_,0644,_otg_attr_name_##_show,_otg_attr_name_##_store); ++ ++#define DWC_OTG_DEVICE_ATTR_BITFIELD_RO(_otg_attr_name_,_addr_,_mask_,_shift_,_string_) \ ++DWC_OTG_DEVICE_ATTR_BITFIELD_SHOW(_otg_attr_name_,_addr_,_mask_,_shift_,_string_) \ ++DEVICE_ATTR(_otg_attr_name_,0444,_otg_attr_name_##_show,NULL); ++ ++#define DWC_OTG_DEVICE_ATTR_REG32_RW(_otg_attr_name_,_addr_,_string_) \ ++DWC_OTG_DEVICE_ATTR_REG_SHOW(_otg_attr_name_,_addr_,_string_) \ ++DWC_OTG_DEVICE_ATTR_REG_STORE(_otg_attr_name_,_addr_,_string_) \ ++DEVICE_ATTR(_otg_attr_name_,0644,_otg_attr_name_##_show,_otg_attr_name_##_store); ++ ++#define DWC_OTG_DEVICE_ATTR_REG32_RO(_otg_attr_name_,_addr_,_string_) \ ++DWC_OTG_DEVICE_ATTR_REG_SHOW(_otg_attr_name_,_addr_,_string_) \ ++DEVICE_ATTR(_otg_attr_name_,0444,_otg_attr_name_##_show,NULL); ++ ++ ++/** @name Functions for Show/Store of Attributes */ ++/**@{*/ ++ ++/** ++ * Show the register offset of the Register Access. ++ */ ++static ssize_t regoffset_show( struct device *_dev, ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) ++ struct device_attribute *attr, ++#endif ++ char *buf) ++{ ++ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); ++ return snprintf(buf, sizeof("0xFFFFFFFF\n")+1,"0x%08x\n", otg_dev->reg_offset); ++} ++ ++/** ++ * Set the register offset for the next Register Access Read/Write ++ */ ++static ssize_t regoffset_store( struct device *_dev, ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) ++ struct device_attribute *attr, ++#endif ++ const char *buf, ++ size_t count ) ++{ ++ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); ++ ++ uint32_t offset = simple_strtoul(buf, NULL, 16); ++ //dev_dbg(_dev, "Offset=0x%08x\n", offset); ++ if (offset < 0x00040000 ) { ++ otg_dev->reg_offset = offset; ++ } ++ else { ++ dev_err( _dev, "invalid offset\n" ); ++ } ++ ++ return count; ++} ++DEVICE_ATTR(regoffset, S_IRUGO|S_IWUSR, (void *)regoffset_show, regoffset_store); ++ ++ ++/** ++ * Show the value of the register at the offset in the reg_offset ++ * attribute. ++ */ ++static ssize_t regvalue_show( struct device *_dev, ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) ++ struct device_attribute *attr, ++#endif ++ char *buf) ++{ ++ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); ++ ++ uint32_t val; ++ volatile uint32_t *addr; ++ ++ if (otg_dev->reg_offset != 0xFFFFFFFF && ++ 0 != otg_dev->base) { ++ /* Calculate the address */ ++ addr = (uint32_t*)(otg_dev->reg_offset + ++ (uint8_t*)otg_dev->base); ++ //dev_dbg(_dev, "@0x%08x\n", (unsigned)addr); ++ val = dwc_read_reg32( addr ); ++ return snprintf(buf, sizeof("Reg@0xFFFFFFFF = 0xFFFFFFFF\n")+1, ++ "Reg@0x%06x = 0x%08x\n", ++ otg_dev->reg_offset, val); ++ } ++ else { ++ dev_err(_dev, "Invalid offset (0x%0x)\n", ++ otg_dev->reg_offset); ++ return sprintf(buf, "invalid offset\n" ); ++ } ++} ++ ++/** ++ * Store the value in the register at the offset in the reg_offset ++ * attribute. ++ * ++ */ ++static ssize_t regvalue_store( struct device *_dev, ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) ++ struct device_attribute *attr, ++#endif ++ const char *buf, ++ size_t count ) ++{ ++ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); ++ ++ volatile uint32_t * addr; ++ uint32_t val = simple_strtoul(buf, NULL, 16); ++ //dev_dbg(_dev, "Offset=0x%08x Val=0x%08x\n", otg_dev->reg_offset, val); ++ if (otg_dev->reg_offset != 0xFFFFFFFF && 0 != otg_dev->base) { ++ /* Calculate the address */ ++ addr = (uint32_t*)(otg_dev->reg_offset + ++ (uint8_t*)otg_dev->base); ++ //dev_dbg(_dev, "@0x%08x\n", (unsigned)addr); ++ dwc_write_reg32( addr, val ); ++ } ++ else { ++ dev_err(_dev, "Invalid Register Offset (0x%08x)\n", ++ otg_dev->reg_offset); ++ } ++ return count; ++} ++DEVICE_ATTR(regvalue, S_IRUGO|S_IWUSR, regvalue_show, regvalue_store); ++ ++/* ++ * Attributes ++ */ ++DWC_OTG_DEVICE_ATTR_BITFIELD_RO(mode,&(otg_dev->core_if->core_global_regs->gotgctl),(1<<20),20,"Mode"); ++DWC_OTG_DEVICE_ATTR_BITFIELD_RW(hnpcapable,&(otg_dev->core_if->core_global_regs->gusbcfg),(1<<9),9,"Mode"); ++DWC_OTG_DEVICE_ATTR_BITFIELD_RW(srpcapable,&(otg_dev->core_if->core_global_regs->gusbcfg),(1<<8),8,"Mode"); ++ ++//DWC_OTG_DEVICE_ATTR_BITFIELD_RW(buspower,&(otg_dev->core_if->core_global_regs->gotgctl),(1<<8),8,"Mode"); ++//DWC_OTG_DEVICE_ATTR_BITFIELD_RW(bussuspend,&(otg_dev->core_if->core_global_regs->gotgctl),(1<<8),8,"Mode"); ++DWC_OTG_DEVICE_ATTR_BITFIELD_RO(busconnected,otg_dev->core_if->host_if->hprt0,0x01,0,"Bus Connected"); ++ ++DWC_OTG_DEVICE_ATTR_REG32_RW(gotgctl,&(otg_dev->core_if->core_global_regs->gotgctl),"GOTGCTL"); ++DWC_OTG_DEVICE_ATTR_REG32_RW(gusbcfg,&(otg_dev->core_if->core_global_regs->gusbcfg),"GUSBCFG"); ++DWC_OTG_DEVICE_ATTR_REG32_RW(grxfsiz,&(otg_dev->core_if->core_global_regs->grxfsiz),"GRXFSIZ"); ++DWC_OTG_DEVICE_ATTR_REG32_RW(gnptxfsiz,&(otg_dev->core_if->core_global_regs->gnptxfsiz),"GNPTXFSIZ"); ++DWC_OTG_DEVICE_ATTR_REG32_RW(gpvndctl,&(otg_dev->core_if->core_global_regs->gpvndctl),"GPVNDCTL"); ++DWC_OTG_DEVICE_ATTR_REG32_RW(ggpio,&(otg_dev->core_if->core_global_regs->ggpio),"GGPIO"); ++DWC_OTG_DEVICE_ATTR_REG32_RW(guid,&(otg_dev->core_if->core_global_regs->guid),"GUID"); ++DWC_OTG_DEVICE_ATTR_REG32_RO(gsnpsid,&(otg_dev->core_if->core_global_regs->gsnpsid),"GSNPSID"); ++DWC_OTG_DEVICE_ATTR_BITFIELD_RW(devspeed,&(otg_dev->core_if->dev_if->dev_global_regs->dcfg),0x3,0,"Device Speed"); ++DWC_OTG_DEVICE_ATTR_BITFIELD_RO(enumspeed,&(otg_dev->core_if->dev_if->dev_global_regs->dsts),0x6,1,"Device Enumeration Speed"); ++ ++DWC_OTG_DEVICE_ATTR_REG32_RO(hptxfsiz,&(otg_dev->core_if->core_global_regs->hptxfsiz),"HPTXFSIZ"); ++DWC_OTG_DEVICE_ATTR_REG32_RW(hprt0,otg_dev->core_if->host_if->hprt0,"HPRT0"); ++ ++ ++/** ++ * @todo Add code to initiate the HNP. ++ */ ++/** ++ * Show the HNP status bit ++ */ ++static ssize_t hnp_show( struct device *_dev, ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) ++ struct device_attribute *attr, ++#endif ++ char *buf) ++{ ++ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); ++ ++ gotgctl_data_t val; ++ val.d32 = dwc_read_reg32 (&(otg_dev->core_if->core_global_regs->gotgctl)); ++ return sprintf (buf, "HstNegScs = 0x%x\n", val.b.hstnegscs); ++} ++ ++/** ++ * Set the HNP Request bit ++ */ ++static ssize_t hnp_store( struct device *_dev, ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) ++ struct device_attribute *attr, ++#endif ++ const char *buf, ++ size_t count ) ++{ ++ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); ++ ++ uint32_t in = simple_strtoul(buf, NULL, 16); ++ uint32_t *addr = (uint32_t *)&(otg_dev->core_if->core_global_regs->gotgctl); ++ gotgctl_data_t mem; ++ mem.d32 = dwc_read_reg32(addr); ++ mem.b.hnpreq = in; ++ dev_dbg(_dev, "Storing Address=0x%08x Data=0x%08x\n", (uint32_t)addr, mem.d32); ++ dwc_write_reg32(addr, mem.d32); ++ return count; ++} ++DEVICE_ATTR(hnp, 0644, hnp_show, hnp_store); ++ ++/** ++ * @todo Add code to initiate the SRP. ++ */ ++/** ++ * Show the SRP status bit ++ */ ++static ssize_t srp_show( struct device *_dev, ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) ++ struct device_attribute *attr, ++#endif ++ char *buf) ++{ ++#ifndef DWC_HOST_ONLY ++ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); ++ ++ gotgctl_data_t val; ++ val.d32 = dwc_read_reg32 (&(otg_dev->core_if->core_global_regs->gotgctl)); ++ return sprintf (buf, "SesReqScs = 0x%x\n", val.b.sesreqscs); ++#else ++ return sprintf(buf, "Host Only Mode!\n"); ++#endif ++} ++ ++ ++ ++/** ++ * Set the SRP Request bit ++ */ ++static ssize_t srp_store( struct device *_dev, ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) ++ struct device_attribute *attr, ++#endif ++ const char *buf, ++ size_t count ) ++{ ++#ifndef DWC_HOST_ONLY ++ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); ++ ++ dwc_otg_pcd_initiate_srp(otg_dev->pcd); ++#endif ++ return count; ++} ++DEVICE_ATTR(srp, 0644, srp_show, srp_store); ++ ++/** ++ * @todo Need to do more for power on/off? ++ */ ++/** ++ * Show the Bus Power status ++ */ ++static ssize_t buspower_show( struct device *_dev, ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) ++ struct device_attribute *attr, ++#endif ++ char *buf) ++{ ++ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); ++ ++ hprt0_data_t val; ++ val.d32 = dwc_read_reg32 (otg_dev->core_if->host_if->hprt0); ++ return sprintf (buf, "Bus Power = 0x%x\n", val.b.prtpwr); ++} ++ ++ ++/** ++ * Set the Bus Power status ++ */ ++static ssize_t buspower_store( struct device *_dev, ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) ++ struct device_attribute *attr, ++#endif ++ const char *buf, ++ size_t count ) ++{ ++ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); ++ ++ uint32_t on = simple_strtoul(buf, NULL, 16); ++ uint32_t *addr = (uint32_t *)otg_dev->core_if->host_if->hprt0; ++ hprt0_data_t mem; ++ ++ mem.d32 = dwc_read_reg32(addr); ++ mem.b.prtpwr = on; ++ ++ //dev_dbg(_dev, "Storing Address=0x%08x Data=0x%08x\n", (uint32_t)addr, mem.d32); ++ dwc_write_reg32(addr, mem.d32); ++ ++ return count; ++} ++DEVICE_ATTR(buspower, 0644, buspower_show, buspower_store); ++ ++/** ++ * @todo Need to do more for suspend? ++ */ ++/** ++ * Show the Bus Suspend status ++ */ ++static ssize_t bussuspend_show( struct device *_dev, ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) ++ struct device_attribute *attr, ++#endif ++ char *buf) ++{ ++ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); ++ ++ hprt0_data_t val; ++ val.d32 = dwc_read_reg32 (otg_dev->core_if->host_if->hprt0); ++ return sprintf (buf, "Bus Suspend = 0x%x\n", val.b.prtsusp); ++} ++ ++/** ++ * Set the Bus Suspend status ++ */ ++static ssize_t bussuspend_store( struct device *_dev, ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) ++ struct device_attribute *attr, ++#endif ++ const char *buf, ++ size_t count ) ++{ ++ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); ++ ++ uint32_t in = simple_strtoul(buf, NULL, 16); ++ uint32_t *addr = (uint32_t *)otg_dev->core_if->host_if->hprt0; ++ hprt0_data_t mem; ++ mem.d32 = dwc_read_reg32(addr); ++ mem.b.prtsusp = in; ++ dev_dbg(_dev, "Storing Address=0x%08x Data=0x%08x\n", (uint32_t)addr, mem.d32); ++ dwc_write_reg32(addr, mem.d32); ++ return count; ++} ++DEVICE_ATTR(bussuspend, 0644, bussuspend_show, bussuspend_store); ++ ++/** ++ * Show the status of Remote Wakeup. ++ */ ++static ssize_t remote_wakeup_show( struct device *_dev, ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) ++ struct device_attribute *attr, ++#endif ++ char *buf) ++{ ++#ifndef DWC_HOST_ONLY ++ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); ++ ++ dctl_data_t val; ++ val.d32 = ++ dwc_read_reg32( &otg_dev->core_if->dev_if->dev_global_regs->dctl); ++ return sprintf( buf, "Remote Wakeup = %d Enabled = %d\n", ++ val.b.rmtwkupsig, otg_dev->pcd->remote_wakeup_enable); ++#else ++ return sprintf(buf, "Host Only Mode!\n"); ++#endif ++} ++/** ++ * Initiate a remote wakeup of the host. The Device control register ++ * Remote Wakeup Signal bit is written if the PCD Remote wakeup enable ++ * flag is set. ++ * ++ */ ++static ssize_t remote_wakeup_store( struct device *_dev, ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) ++ struct device_attribute *attr, ++#endif ++ const char *buf, ++ size_t count ) ++{ ++#ifndef DWC_HOST_ONLY ++ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); ++ ++ uint32_t val = simple_strtoul(buf, NULL, 16); ++ if (val&1) { ++ dwc_otg_pcd_remote_wakeup(otg_dev->pcd, 1); ++ } ++ else { ++ dwc_otg_pcd_remote_wakeup(otg_dev->pcd, 0); ++ } ++#endif ++ return count; ++} ++DEVICE_ATTR(remote_wakeup, S_IRUGO|S_IWUSR, remote_wakeup_show, ++ remote_wakeup_store); ++ ++/** ++ * Dump global registers and either host or device registers (depending on the ++ * current mode of the core). ++ */ ++static ssize_t regdump_show( struct device *_dev, ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) ++ struct device_attribute *attr, ++#endif ++ char *buf) ++{ ++ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); ++ ++ dwc_otg_dump_global_registers( otg_dev->core_if); ++ if (dwc_otg_is_host_mode(otg_dev->core_if)) { ++ dwc_otg_dump_host_registers( otg_dev->core_if); ++ } else { ++ dwc_otg_dump_dev_registers( otg_dev->core_if); ++ ++ } ++ return sprintf( buf, "Register Dump\n" ); ++} ++ ++DEVICE_ATTR(regdump, S_IRUGO|S_IWUSR, regdump_show, 0); ++ ++/** ++ * Dump global registers and either host or device registers (depending on the ++ * current mode of the core). ++ */ ++static ssize_t spramdump_show( struct device *_dev, ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) ++ struct device_attribute *attr, ++#endif ++ char *buf) ++{ ++ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); ++ ++ dwc_otg_dump_spram( otg_dev->core_if); ++ ++ return sprintf( buf, "SPRAM Dump\n" ); ++} ++ ++DEVICE_ATTR(spramdump, S_IRUGO|S_IWUSR, spramdump_show, 0); ++ ++/** ++ * Dump the current hcd state. ++ */ ++static ssize_t hcddump_show( struct device *_dev, ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) ++ struct device_attribute *attr, ++#endif ++ char *buf) ++{ ++#ifndef DWC_DEVICE_ONLY ++ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); ++ ++ dwc_otg_hcd_dump_state(otg_dev->hcd); ++#endif ++ return sprintf( buf, "HCD Dump\n" ); ++} ++ ++DEVICE_ATTR(hcddump, S_IRUGO|S_IWUSR, hcddump_show, 0); ++ ++/** ++ * Dump the average frame remaining at SOF. This can be used to ++ * determine average interrupt latency. Frame remaining is also shown for ++ * start transfer and two additional sample points. ++ */ ++static ssize_t hcd_frrem_show( struct device *_dev, ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) ++ struct device_attribute *attr, ++#endif ++ char *buf) ++{ ++#ifndef DWC_DEVICE_ONLY ++ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); ++ ++ dwc_otg_hcd_dump_frrem(otg_dev->hcd); ++#endif ++ return sprintf( buf, "HCD Dump Frame Remaining\n" ); ++} ++ ++DEVICE_ATTR(hcd_frrem, S_IRUGO|S_IWUSR, hcd_frrem_show, 0); ++ ++/** ++ * Displays the time required to read the GNPTXFSIZ register many times (the ++ * output shows the number of times the register is read). ++ */ ++#define RW_REG_COUNT 10000000 ++#define MSEC_PER_JIFFIE 1000/HZ ++static ssize_t rd_reg_test_show( struct device *_dev, ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) ++ struct device_attribute *attr, ++#endif ++ char *buf) ++{ ++ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); ++ ++ int i; ++ int time; ++ int start_jiffies; ++ ++ printk("HZ %d, MSEC_PER_JIFFIE %d, loops_per_jiffy %lu\n", ++ HZ, MSEC_PER_JIFFIE, loops_per_jiffy); ++ start_jiffies = jiffies; ++ for (i = 0; i < RW_REG_COUNT; i++) { ++ dwc_read_reg32(&otg_dev->core_if->core_global_regs->gnptxfsiz); ++ } ++ time = jiffies - start_jiffies; ++ return sprintf( buf, "Time to read GNPTXFSIZ reg %d times: %d msecs (%d jiffies)\n", ++ RW_REG_COUNT, time * MSEC_PER_JIFFIE, time ); ++} ++ ++DEVICE_ATTR(rd_reg_test, S_IRUGO|S_IWUSR, rd_reg_test_show, 0); ++ ++/** ++ * Displays the time required to write the GNPTXFSIZ register many times (the ++ * output shows the number of times the register is written). ++ */ ++static ssize_t wr_reg_test_show( struct device *_dev, ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) ++ struct device_attribute *attr, ++#endif ++ char *buf) ++{ ++ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); ++ ++ uint32_t reg_val; ++ int i; ++ int time; ++ int start_jiffies; ++ ++ printk("HZ %d, MSEC_PER_JIFFIE %d, loops_per_jiffy %lu\n", ++ HZ, MSEC_PER_JIFFIE, loops_per_jiffy); ++ reg_val = dwc_read_reg32(&otg_dev->core_if->core_global_regs->gnptxfsiz); ++ start_jiffies = jiffies; ++ for (i = 0; i < RW_REG_COUNT; i++) { ++ dwc_write_reg32(&otg_dev->core_if->core_global_regs->gnptxfsiz, reg_val); ++ } ++ time = jiffies - start_jiffies; ++ return sprintf( buf, "Time to write GNPTXFSIZ reg %d times: %d msecs (%d jiffies)\n", ++ RW_REG_COUNT, time * MSEC_PER_JIFFIE, time); ++} ++ ++DEVICE_ATTR(wr_reg_test, S_IRUGO|S_IWUSR, wr_reg_test_show, 0); ++/**@}*/ ++ ++/** ++ * Create the device files ++ */ ++void dwc_otg_attr_create (struct device *dev) ++{ ++ int error; ++ ++ error = device_create_file(dev, &dev_attr_regoffset); ++ error = device_create_file(dev, &dev_attr_regvalue); ++ error = device_create_file(dev, &dev_attr_mode); ++ error = device_create_file(dev, &dev_attr_hnpcapable); ++ error = device_create_file(dev, &dev_attr_srpcapable); ++ error = device_create_file(dev, &dev_attr_hnp); ++ error = device_create_file(dev, &dev_attr_srp); ++ error = device_create_file(dev, &dev_attr_buspower); ++ error = device_create_file(dev, &dev_attr_bussuspend); ++ error = device_create_file(dev, &dev_attr_busconnected); ++ error = device_create_file(dev, &dev_attr_gotgctl); ++ error = device_create_file(dev, &dev_attr_gusbcfg); ++ error = device_create_file(dev, &dev_attr_grxfsiz); ++ error = device_create_file(dev, &dev_attr_gnptxfsiz); ++ error = device_create_file(dev, &dev_attr_gpvndctl); ++ error = device_create_file(dev, &dev_attr_ggpio); ++ error = device_create_file(dev, &dev_attr_guid); ++ error = device_create_file(dev, &dev_attr_gsnpsid); ++ error = device_create_file(dev, &dev_attr_devspeed); ++ error = device_create_file(dev, &dev_attr_enumspeed); ++ error = device_create_file(dev, &dev_attr_hptxfsiz); ++ error = device_create_file(dev, &dev_attr_hprt0); ++ error = device_create_file(dev, &dev_attr_remote_wakeup); ++ error = device_create_file(dev, &dev_attr_regdump); ++ error = device_create_file(dev, &dev_attr_spramdump); ++ error = device_create_file(dev, &dev_attr_hcddump); ++ error = device_create_file(dev, &dev_attr_hcd_frrem); ++ error = device_create_file(dev, &dev_attr_rd_reg_test); ++ error = device_create_file(dev, &dev_attr_wr_reg_test); ++} ++ ++/** ++ * Remove the device files ++ */ ++void dwc_otg_attr_remove (struct device *dev) ++{ ++ device_remove_file(dev, &dev_attr_regoffset); ++ device_remove_file(dev, &dev_attr_regvalue); ++ device_remove_file(dev, &dev_attr_mode); ++ device_remove_file(dev, &dev_attr_hnpcapable); ++ device_remove_file(dev, &dev_attr_srpcapable); ++ device_remove_file(dev, &dev_attr_hnp); ++ device_remove_file(dev, &dev_attr_srp); ++ device_remove_file(dev, &dev_attr_buspower); ++ device_remove_file(dev, &dev_attr_bussuspend); ++ device_remove_file(dev, &dev_attr_busconnected); ++ device_remove_file(dev, &dev_attr_gotgctl); ++ device_remove_file(dev, &dev_attr_gusbcfg); ++ device_remove_file(dev, &dev_attr_grxfsiz); ++ device_remove_file(dev, &dev_attr_gnptxfsiz); ++ device_remove_file(dev, &dev_attr_gpvndctl); ++ device_remove_file(dev, &dev_attr_ggpio); ++ device_remove_file(dev, &dev_attr_guid); ++ device_remove_file(dev, &dev_attr_gsnpsid); ++ device_remove_file(dev, &dev_attr_devspeed); ++ device_remove_file(dev, &dev_attr_enumspeed); ++ device_remove_file(dev, &dev_attr_hptxfsiz); ++ device_remove_file(dev, &dev_attr_hprt0); ++ device_remove_file(dev, &dev_attr_remote_wakeup); ++ device_remove_file(dev, &dev_attr_regdump); ++ device_remove_file(dev, &dev_attr_spramdump); ++ device_remove_file(dev, &dev_attr_hcddump); ++ device_remove_file(dev, &dev_attr_hcd_frrem); ++ device_remove_file(dev, &dev_attr_rd_reg_test); ++ device_remove_file(dev, &dev_attr_wr_reg_test); ++} +--- /dev/null ++++ b/drivers/usb/dwc_otg/dwc_otg_attr.h +@@ -0,0 +1,67 @@ ++/* ========================================================================== ++ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_attr.h $ ++ * $Revision: 1.2 $ ++ * $Date: 2008-11-21 05:39:15 $ ++ * $Change: 477051 $ ++ * ++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, ++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless ++ * otherwise expressly agreed to in writing between Synopsys and you. ++ * ++ * The Software IS NOT an item of Licensed Software or Licensed Product under ++ * any End User Software License Agreement or Agreement for Licensed Product ++ * with Synopsys or any supplement thereto. You are permitted to use and ++ * redistribute this Software in source and binary forms, with or without ++ * modification, provided that redistributions of source code must retain this ++ * notice. You may not view, use, disclose, copy or distribute this file or ++ * any information contained herein except pursuant to this license grant from ++ * Synopsys. If you do not agree with this notice, including the disclaimer ++ * below, then you are not authorized to use the Software. ++ * ++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, ++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR ++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER ++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT ++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY ++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH ++ * DAMAGE. ++ * ========================================================================== */ ++ ++#if !defined(__DWC_OTG_ATTR_H__) ++#define __DWC_OTG_ATTR_H__ ++ ++/** @file ++ * This file contains the interface to the Linux device attributes. ++ */ ++extern struct device_attribute dev_attr_regoffset; ++extern struct device_attribute dev_attr_regvalue; ++ ++extern struct device_attribute dev_attr_mode; ++extern struct device_attribute dev_attr_hnpcapable; ++extern struct device_attribute dev_attr_srpcapable; ++extern struct device_attribute dev_attr_hnp; ++extern struct device_attribute dev_attr_srp; ++extern struct device_attribute dev_attr_buspower; ++extern struct device_attribute dev_attr_bussuspend; ++extern struct device_attribute dev_attr_busconnected; ++extern struct device_attribute dev_attr_gotgctl; ++extern struct device_attribute dev_attr_gusbcfg; ++extern struct device_attribute dev_attr_grxfsiz; ++extern struct device_attribute dev_attr_gnptxfsiz; ++extern struct device_attribute dev_attr_gpvndctl; ++extern struct device_attribute dev_attr_ggpio; ++extern struct device_attribute dev_attr_guid; ++extern struct device_attribute dev_attr_gsnpsid; ++extern struct device_attribute dev_attr_devspeed; ++extern struct device_attribute dev_attr_enumspeed; ++extern struct device_attribute dev_attr_hptxfsiz; ++extern struct device_attribute dev_attr_hprt0; ++ ++void dwc_otg_attr_create (struct device *dev); ++void dwc_otg_attr_remove (struct device *dev); ++ ++#endif +--- /dev/null ++++ b/drivers/usb/dwc_otg/dwc_otg_cil.c +@@ -0,0 +1,3692 @@ ++/* ========================================================================== ++ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_cil.c $ ++ * $Revision: 1.7 $ ++ * $Date: 2008-12-22 11:43:05 $ ++ * $Change: 1117667 $ ++ * ++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, ++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless ++ * otherwise expressly agreed to in writing between Synopsys and you. ++ * ++ * The Software IS NOT an item of Licensed Software or Licensed Product under ++ * any End User Software License Agreement or Agreement for Licensed Product ++ * with Synopsys or any supplement thereto. You are permitted to use and ++ * redistribute this Software in source and binary forms, with or without ++ * modification, provided that redistributions of source code must retain this ++ * notice. You may not view, use, disclose, copy or distribute this file or ++ * any information contained herein except pursuant to this license grant from ++ * Synopsys. If you do not agree with this notice, including the disclaimer ++ * below, then you are not authorized to use the Software. ++ * ++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, ++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR ++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER ++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT ++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY ++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH ++ * DAMAGE. ++ * ========================================================================== */ ++ ++/** @file ++ * ++ * The Core Interface Layer provides basic services for accessing and ++ * managing the DWC_otg hardware. These services are used by both the ++ * Host Controller Driver and the Peripheral Controller Driver. ++ * ++ * The CIL manages the memory map for the core so that the HCD and PCD ++ * don't have to do this separately. It also handles basic tasks like ++ * reading/writing the registers and data FIFOs in the controller. ++ * Some of the data access functions provide encapsulation of several ++ * operations required to perform a task, such as writing multiple ++ * registers to start a transfer. Finally, the CIL performs basic ++ * services that are not specific to either the host or device modes ++ * of operation. These services include management of the OTG Host ++ * Negotiation Protocol (HNP) and Session Request Protocol (SRP). A ++ * Diagnostic API is also provided to allow testing of the controller ++ * hardware. ++ * ++ * The Core Interface Layer has the following requirements: ++ * - Provides basic controller operations. ++ * - Minimal use of OS services. ++ * - The OS services used will be abstracted by using inline functions ++ * or macros. ++ * ++ */ ++#include ++#include ++#ifdef DEBUG ++#include ++#endif ++ ++#include "linux/dwc_otg_plat.h" ++#include "dwc_otg_regs.h" ++#include "dwc_otg_cil.h" ++ ++/* Included only to access hc->qh for non-dword buffer handling ++ * TODO: account it ++ */ ++#include "dwc_otg_hcd.h" ++ ++/** ++ * This function is called to initialize the DWC_otg CSR data ++ * structures. The register addresses in the device and host ++ * structures are initialized from the base address supplied by the ++ * caller. The calling function must make the OS calls to get the ++ * base address of the DWC_otg controller registers. The core_params ++ * argument holds the parameters that specify how the core should be ++ * configured. ++ * ++ * @param[in] reg_base_addr Base address of DWC_otg core registers ++ * @param[in] core_params Pointer to the core configuration parameters ++ * ++ */ ++dwc_otg_core_if_t *dwc_otg_cil_init(const uint32_t *reg_base_addr, ++ dwc_otg_core_params_t *core_params) ++{ ++ dwc_otg_core_if_t *core_if = 0; ++ dwc_otg_dev_if_t *dev_if = 0; ++ dwc_otg_host_if_t *host_if = 0; ++ uint8_t *reg_base = (uint8_t *)reg_base_addr; ++ int i = 0; ++ ++ DWC_DEBUGPL(DBG_CILV, "%s(%p,%p)\n", __func__, reg_base_addr, core_params); ++ ++ core_if = kmalloc(sizeof(dwc_otg_core_if_t), GFP_KERNEL); ++ ++ if (core_if == 0) { ++ DWC_DEBUGPL(DBG_CIL, "Allocation of dwc_otg_core_if_t failed\n"); ++ return 0; ++ } ++ ++ memset(core_if, 0, sizeof(dwc_otg_core_if_t)); ++ ++ core_if->core_params = core_params; ++ core_if->core_global_regs = (dwc_otg_core_global_regs_t *)reg_base; ++ ++ /* ++ * Allocate the Device Mode structures. ++ */ ++ dev_if = kmalloc(sizeof(dwc_otg_dev_if_t), GFP_KERNEL); ++ ++ if (dev_if == 0) { ++ DWC_DEBUGPL(DBG_CIL, "Allocation of dwc_otg_dev_if_t failed\n"); ++ kfree(core_if); ++ return 0; ++ } ++ ++ dev_if->dev_global_regs = ++ (dwc_otg_device_global_regs_t *)(reg_base + DWC_DEV_GLOBAL_REG_OFFSET); ++ ++ for (i=0; iin_ep_regs[i] = (dwc_otg_dev_in_ep_regs_t *) ++ (reg_base + DWC_DEV_IN_EP_REG_OFFSET + ++ (i * DWC_EP_REG_OFFSET)); ++ ++ dev_if->out_ep_regs[i] = (dwc_otg_dev_out_ep_regs_t *) ++ (reg_base + DWC_DEV_OUT_EP_REG_OFFSET + ++ (i * DWC_EP_REG_OFFSET)); ++ DWC_DEBUGPL(DBG_CILV, "in_ep_regs[%d]->diepctl=%p\n", ++ i, &dev_if->in_ep_regs[i]->diepctl); ++ DWC_DEBUGPL(DBG_CILV, "out_ep_regs[%d]->doepctl=%p\n", ++ i, &dev_if->out_ep_regs[i]->doepctl); ++ } ++ ++ dev_if->speed = 0; // unknown ++ ++ core_if->dev_if = dev_if; ++ ++ /* ++ * Allocate the Host Mode structures. ++ */ ++ host_if = kmalloc(sizeof(dwc_otg_host_if_t), GFP_KERNEL); ++ ++ if (host_if == 0) { ++ DWC_DEBUGPL(DBG_CIL, "Allocation of dwc_otg_host_if_t failed\n"); ++ kfree(dev_if); ++ kfree(core_if); ++ return 0; ++ } ++ ++ host_if->host_global_regs = (dwc_otg_host_global_regs_t *) ++ (reg_base + DWC_OTG_HOST_GLOBAL_REG_OFFSET); ++ ++ host_if->hprt0 = (uint32_t*)(reg_base + DWC_OTG_HOST_PORT_REGS_OFFSET); ++ ++ for (i=0; ihc_regs[i] = (dwc_otg_hc_regs_t *) ++ (reg_base + DWC_OTG_HOST_CHAN_REGS_OFFSET + ++ (i * DWC_OTG_CHAN_REGS_OFFSET)); ++ DWC_DEBUGPL(DBG_CILV, "hc_reg[%d]->hcchar=%p\n", ++ i, &host_if->hc_regs[i]->hcchar); ++ } ++ ++ host_if->num_host_channels = MAX_EPS_CHANNELS; ++ core_if->host_if = host_if; ++ ++ for (i=0; idata_fifo[i] = ++ (uint32_t *)(reg_base + DWC_OTG_DATA_FIFO_OFFSET + ++ (i * DWC_OTG_DATA_FIFO_SIZE)); ++ DWC_DEBUGPL(DBG_CILV, "data_fifo[%d]=0x%08x\n", ++ i, (unsigned)core_if->data_fifo[i]); ++ } ++ ++ core_if->pcgcctl = (uint32_t*)(reg_base + DWC_OTG_PCGCCTL_OFFSET); ++ ++ /* ++ * Store the contents of the hardware configuration registers here for ++ * easy access later. ++ */ ++ core_if->hwcfg1.d32 = dwc_read_reg32(&core_if->core_global_regs->ghwcfg1); ++ core_if->hwcfg2.d32 = dwc_read_reg32(&core_if->core_global_regs->ghwcfg2); ++ core_if->hwcfg3.d32 = dwc_read_reg32(&core_if->core_global_regs->ghwcfg3); ++ core_if->hwcfg4.d32 = dwc_read_reg32(&core_if->core_global_regs->ghwcfg4); ++ ++ DWC_DEBUGPL(DBG_CILV,"hwcfg1=%08x\n",core_if->hwcfg1.d32); ++ DWC_DEBUGPL(DBG_CILV,"hwcfg2=%08x\n",core_if->hwcfg2.d32); ++ DWC_DEBUGPL(DBG_CILV,"hwcfg3=%08x\n",core_if->hwcfg3.d32); ++ DWC_DEBUGPL(DBG_CILV,"hwcfg4=%08x\n",core_if->hwcfg4.d32); ++ ++ core_if->hcfg.d32 = dwc_read_reg32(&core_if->host_if->host_global_regs->hcfg); ++ core_if->dcfg.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dcfg); ++ ++ DWC_DEBUGPL(DBG_CILV,"hcfg=%08x\n",core_if->hcfg.d32); ++ DWC_DEBUGPL(DBG_CILV,"dcfg=%08x\n",core_if->dcfg.d32); ++ ++ DWC_DEBUGPL(DBG_CILV,"op_mode=%0x\n",core_if->hwcfg2.b.op_mode); ++ DWC_DEBUGPL(DBG_CILV,"arch=%0x\n",core_if->hwcfg2.b.architecture); ++ DWC_DEBUGPL(DBG_CILV,"num_dev_ep=%d\n",core_if->hwcfg2.b.num_dev_ep); ++ DWC_DEBUGPL(DBG_CILV,"num_host_chan=%d\n",core_if->hwcfg2.b.num_host_chan); ++ DWC_DEBUGPL(DBG_CILV,"nonperio_tx_q_depth=0x%0x\n",core_if->hwcfg2.b.nonperio_tx_q_depth); ++ DWC_DEBUGPL(DBG_CILV,"host_perio_tx_q_depth=0x%0x\n",core_if->hwcfg2.b.host_perio_tx_q_depth); ++ DWC_DEBUGPL(DBG_CILV,"dev_token_q_depth=0x%0x\n",core_if->hwcfg2.b.dev_token_q_depth); ++ ++ DWC_DEBUGPL(DBG_CILV,"Total FIFO SZ=%d\n", core_if->hwcfg3.b.dfifo_depth); ++ DWC_DEBUGPL(DBG_CILV,"xfer_size_cntr_width=%0x\n", core_if->hwcfg3.b.xfer_size_cntr_width); ++ ++ /* ++ * Set the SRP sucess bit for FS-I2c ++ */ ++ core_if->srp_success = 0; ++ core_if->srp_timer_started = 0; ++ ++ ++ /* ++ * Create new workqueue and init works ++ */ ++ core_if->wq_otg = create_singlethread_workqueue("dwc_otg"); ++ if(core_if->wq_otg == 0) { ++ DWC_DEBUGPL(DBG_CIL, "Creation of wq_otg failed\n"); ++ kfree(host_if); ++ kfree(dev_if); ++ kfree(core_if); ++ return 0 * HZ; ++ } ++ ++ ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) ++ ++ INIT_WORK(&core_if->w_conn_id, w_conn_id_status_change, core_if); ++ INIT_WORK(&core_if->w_wkp, w_wakeup_detected, core_if); ++ ++#else ++ ++ INIT_WORK(&core_if->w_conn_id, w_conn_id_status_change); ++ INIT_DELAYED_WORK(&core_if->w_wkp, w_wakeup_detected); ++ ++#endif ++ return core_if; ++} ++ ++/** ++ * This function frees the structures allocated by dwc_otg_cil_init(). ++ * ++ * @param[in] core_if The core interface pointer returned from ++ * dwc_otg_cil_init(). ++ * ++ */ ++void dwc_otg_cil_remove(dwc_otg_core_if_t *core_if) ++{ ++ /* Disable all interrupts */ ++ dwc_modify_reg32(&core_if->core_global_regs->gahbcfg, 1, 0); ++ dwc_write_reg32(&core_if->core_global_regs->gintmsk, 0); ++ ++ if (core_if->wq_otg) { ++ destroy_workqueue(core_if->wq_otg); ++ } ++ if (core_if->dev_if) { ++ kfree(core_if->dev_if); ++ } ++ if (core_if->host_if) { ++ kfree(core_if->host_if); ++ } ++ kfree(core_if); ++} ++ ++/** ++ * This function enables the controller's Global Interrupt in the AHB Config ++ * register. ++ * ++ * @param[in] core_if Programming view of DWC_otg controller. ++ */ ++void dwc_otg_enable_global_interrupts(dwc_otg_core_if_t *core_if) ++{ ++ gahbcfg_data_t ahbcfg = { .d32 = 0}; ++ ahbcfg.b.glblintrmsk = 1; /* Enable interrupts */ ++ dwc_modify_reg32(&core_if->core_global_regs->gahbcfg, 0, ahbcfg.d32); ++} ++ ++/** ++ * This function disables the controller's Global Interrupt in the AHB Config ++ * register. ++ * ++ * @param[in] core_if Programming view of DWC_otg controller. ++ */ ++void dwc_otg_disable_global_interrupts(dwc_otg_core_if_t *core_if) ++{ ++ gahbcfg_data_t ahbcfg = { .d32 = 0}; ++ ahbcfg.b.glblintrmsk = 1; /* Enable interrupts */ ++ dwc_modify_reg32(&core_if->core_global_regs->gahbcfg, ahbcfg.d32, 0); ++} ++ ++/** ++ * This function initializes the commmon interrupts, used in both ++ * device and host modes. ++ * ++ * @param[in] core_if Programming view of the DWC_otg controller ++ * ++ */ ++static void dwc_otg_enable_common_interrupts(dwc_otg_core_if_t *core_if) ++{ ++ dwc_otg_core_global_regs_t *global_regs = ++ core_if->core_global_regs; ++ gintmsk_data_t intr_mask = { .d32 = 0}; ++ ++ /* Clear any pending OTG Interrupts */ ++ dwc_write_reg32(&global_regs->gotgint, 0xFFFFFFFF); ++ ++ /* Clear any pending interrupts */ ++ dwc_write_reg32(&global_regs->gintsts, 0xFFFFFFFF); ++ ++ /* ++ * Enable the interrupts in the GINTMSK. ++ */ ++ intr_mask.b.modemismatch = 1; ++ intr_mask.b.otgintr = 1; ++ ++ if (!core_if->dma_enable) { ++ intr_mask.b.rxstsqlvl = 1; ++ } ++ ++ intr_mask.b.conidstschng = 1; ++ intr_mask.b.wkupintr = 1; ++ intr_mask.b.disconnect = 1; ++ intr_mask.b.usbsuspend = 1; ++ intr_mask.b.sessreqintr = 1; ++ dwc_write_reg32(&global_regs->gintmsk, intr_mask.d32); ++} ++ ++/** ++ * Initializes the FSLSPClkSel field of the HCFG register depending on the PHY ++ * type. ++ */ ++static void init_fslspclksel(dwc_otg_core_if_t *core_if) ++{ ++ uint32_t val; ++ hcfg_data_t hcfg; ++ ++ if (((core_if->hwcfg2.b.hs_phy_type == 2) && ++ (core_if->hwcfg2.b.fs_phy_type == 1) && ++ (core_if->core_params->ulpi_fs_ls)) || ++ (core_if->core_params->phy_type == DWC_PHY_TYPE_PARAM_FS)) { ++ /* Full speed PHY */ ++ val = DWC_HCFG_48_MHZ; ++ } ++ else { ++ /* High speed PHY running at full speed or high speed */ ++ val = DWC_HCFG_30_60_MHZ; ++ } ++ ++ DWC_DEBUGPL(DBG_CIL, "Initializing HCFG.FSLSPClkSel to 0x%1x\n", val); ++ hcfg.d32 = dwc_read_reg32(&core_if->host_if->host_global_regs->hcfg); ++ hcfg.b.fslspclksel = val; ++ dwc_write_reg32(&core_if->host_if->host_global_regs->hcfg, hcfg.d32); ++} ++ ++/** ++ * Initializes the DevSpd field of the DCFG register depending on the PHY type ++ * and the enumeration speed of the device. ++ */ ++static void init_devspd(dwc_otg_core_if_t *core_if) ++{ ++ uint32_t val; ++ dcfg_data_t dcfg; ++ ++ if (((core_if->hwcfg2.b.hs_phy_type == 2) && ++ (core_if->hwcfg2.b.fs_phy_type == 1) && ++ (core_if->core_params->ulpi_fs_ls)) || ++ (core_if->core_params->phy_type == DWC_PHY_TYPE_PARAM_FS)) { ++ /* Full speed PHY */ ++ val = 0x3; ++ } ++ else if (core_if->core_params->speed == DWC_SPEED_PARAM_FULL) { ++ /* High speed PHY running at full speed */ ++ val = 0x1; ++ } ++ else { ++ /* High speed PHY running at high speed */ ++ val = 0x0; ++ } ++ ++ DWC_DEBUGPL(DBG_CIL, "Initializing DCFG.DevSpd to 0x%1x\n", val); ++ ++ dcfg.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dcfg); ++ dcfg.b.devspd = val; ++ dwc_write_reg32(&core_if->dev_if->dev_global_regs->dcfg, dcfg.d32); ++} ++ ++/** ++ * This function calculates the number of IN EPS ++ * using GHWCFG1 and GHWCFG2 registers values ++ * ++ * @param core_if Programming view of the DWC_otg controller ++ */ ++static uint32_t calc_num_in_eps(dwc_otg_core_if_t *core_if) ++{ ++ uint32_t num_in_eps = 0; ++ uint32_t num_eps = core_if->hwcfg2.b.num_dev_ep; ++ uint32_t hwcfg1 = core_if->hwcfg1.d32 >> 3; ++ uint32_t num_tx_fifos = core_if->hwcfg4.b.num_in_eps; ++ int i; ++ ++ ++ for(i = 0; i < num_eps; ++i) ++ { ++ if(!(hwcfg1 & 0x1)) ++ num_in_eps++; ++ ++ hwcfg1 >>= 2; ++ } ++ ++ if(core_if->hwcfg4.b.ded_fifo_en) { ++ num_in_eps = (num_in_eps > num_tx_fifos) ? num_tx_fifos : num_in_eps; ++ } ++ ++ return num_in_eps; ++} ++ ++ ++/** ++ * This function calculates the number of OUT EPS ++ * using GHWCFG1 and GHWCFG2 registers values ++ * ++ * @param core_if Programming view of the DWC_otg controller ++ */ ++static uint32_t calc_num_out_eps(dwc_otg_core_if_t *core_if) ++{ ++ uint32_t num_out_eps = 0; ++ uint32_t num_eps = core_if->hwcfg2.b.num_dev_ep; ++ uint32_t hwcfg1 = core_if->hwcfg1.d32 >> 2; ++ int i; ++ ++ for(i = 0; i < num_eps; ++i) ++ { ++ if(!(hwcfg1 & 0x2)) ++ num_out_eps++; ++ ++ hwcfg1 >>= 2; ++ } ++ return num_out_eps; ++} ++/** ++ * This function initializes the DWC_otg controller registers and ++ * prepares the core for device mode or host mode operation. ++ * ++ * @param core_if Programming view of the DWC_otg controller ++ * ++ */ ++void dwc_otg_core_init(dwc_otg_core_if_t *core_if) ++{ ++ int i = 0; ++ dwc_otg_core_global_regs_t *global_regs = ++ core_if->core_global_regs; ++ dwc_otg_dev_if_t *dev_if = core_if->dev_if; ++ gahbcfg_data_t ahbcfg = { .d32 = 0 }; ++ gusbcfg_data_t usbcfg = { .d32 = 0 }; ++ gi2cctl_data_t i2cctl = { .d32 = 0 }; ++ ++ DWC_DEBUGPL(DBG_CILV, "dwc_otg_core_init(%p)\n", core_if); ++ ++ /* Common Initialization */ ++ ++ usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg); ++ ++// usbcfg.b.tx_end_delay = 1; ++ /* Program the ULPI External VBUS bit if needed */ ++ usbcfg.b.ulpi_ext_vbus_drv = ++ (core_if->core_params->phy_ulpi_ext_vbus == DWC_PHY_ULPI_EXTERNAL_VBUS) ? 1 : 0; ++ ++ /* Set external TS Dline pulsing */ ++ usbcfg.b.term_sel_dl_pulse = (core_if->core_params->ts_dline == 1) ? 1 : 0; ++ dwc_write_reg32 (&global_regs->gusbcfg, usbcfg.d32); ++ ++ ++ /* Reset the Controller */ ++ dwc_otg_core_reset(core_if); ++ ++ /* Initialize parameters from Hardware configuration registers. */ ++ dev_if->num_in_eps = calc_num_in_eps(core_if); ++ dev_if->num_out_eps = calc_num_out_eps(core_if); ++ ++ ++ DWC_DEBUGPL(DBG_CIL, "num_dev_perio_in_ep=%d\n", core_if->hwcfg4.b.num_dev_perio_in_ep); ++ ++ for (i=0; i < core_if->hwcfg4.b.num_dev_perio_in_ep; i++) ++ { ++ dev_if->perio_tx_fifo_size[i] = ++ dwc_read_reg32(&global_regs->dptxfsiz_dieptxf[i]) >> 16; ++ DWC_DEBUGPL(DBG_CIL, "Periodic Tx FIFO SZ #%d=0x%0x\n", ++ i, dev_if->perio_tx_fifo_size[i]); ++ } ++ ++ for (i=0; i < core_if->hwcfg4.b.num_in_eps; i++) ++ { ++ dev_if->tx_fifo_size[i] = ++ dwc_read_reg32(&global_regs->dptxfsiz_dieptxf[i]) >> 16; ++ DWC_DEBUGPL(DBG_CIL, "Tx FIFO SZ #%d=0x%0x\n", ++ i, dev_if->perio_tx_fifo_size[i]); ++ } ++ ++ core_if->total_fifo_size = core_if->hwcfg3.b.dfifo_depth; ++ core_if->rx_fifo_size = ++ dwc_read_reg32(&global_regs->grxfsiz); ++ core_if->nperio_tx_fifo_size = ++ dwc_read_reg32(&global_regs->gnptxfsiz) >> 16; ++ ++ DWC_DEBUGPL(DBG_CIL, "Total FIFO SZ=%d\n", core_if->total_fifo_size); ++ DWC_DEBUGPL(DBG_CIL, "Rx FIFO SZ=%d\n", core_if->rx_fifo_size); ++ DWC_DEBUGPL(DBG_CIL, "NP Tx FIFO SZ=%d\n", core_if->nperio_tx_fifo_size); ++ ++ /* This programming sequence needs to happen in FS mode before any other ++ * programming occurs */ ++ if ((core_if->core_params->speed == DWC_SPEED_PARAM_FULL) && ++ (core_if->core_params->phy_type == DWC_PHY_TYPE_PARAM_FS)) { ++ /* If FS mode with FS PHY */ ++ ++ /* core_init() is now called on every switch so only call the ++ * following for the first time through. */ ++ if (!core_if->phy_init_done) { ++ core_if->phy_init_done = 1; ++ DWC_DEBUGPL(DBG_CIL, "FS_PHY detected\n"); ++ usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg); ++ usbcfg.b.physel = 1; ++ dwc_write_reg32 (&global_regs->gusbcfg, usbcfg.d32); ++ ++ /* Reset after a PHY select */ ++ dwc_otg_core_reset(core_if); ++ } ++ ++ /* Program DCFG.DevSpd or HCFG.FSLSPclkSel to 48Mhz in FS. Also ++ * do this on HNP Dev/Host mode switches (done in dev_init and ++ * host_init). */ ++ if (dwc_otg_is_host_mode(core_if)) { ++ init_fslspclksel(core_if); ++ } ++ else { ++ init_devspd(core_if); ++ } ++ ++ if (core_if->core_params->i2c_enable) { ++ DWC_DEBUGPL(DBG_CIL, "FS_PHY Enabling I2c\n"); ++ /* Program GUSBCFG.OtgUtmifsSel to I2C */ ++ usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg); ++ usbcfg.b.otgutmifssel = 1; ++ dwc_write_reg32 (&global_regs->gusbcfg, usbcfg.d32); ++ ++ /* Program GI2CCTL.I2CEn */ ++ i2cctl.d32 = dwc_read_reg32(&global_regs->gi2cctl); ++ i2cctl.b.i2cdevaddr = 1; ++ i2cctl.b.i2cen = 0; ++ dwc_write_reg32 (&global_regs->gi2cctl, i2cctl.d32); ++ i2cctl.b.i2cen = 1; ++ dwc_write_reg32 (&global_regs->gi2cctl, i2cctl.d32); ++ } ++ ++ } /* endif speed == DWC_SPEED_PARAM_FULL */ ++ ++ else { ++ /* High speed PHY. */ ++ if (!core_if->phy_init_done) { ++ core_if->phy_init_done = 1; ++ /* HS PHY parameters. These parameters are preserved ++ * during soft reset so only program the first time. Do ++ * a soft reset immediately after setting phyif. */ ++ usbcfg.b.ulpi_utmi_sel = core_if->core_params->phy_type; ++ if (usbcfg.b.ulpi_utmi_sel == 1) { ++ /* ULPI interface */ ++ usbcfg.b.phyif = 0; ++ usbcfg.b.ddrsel = core_if->core_params->phy_ulpi_ddr; ++ } ++ else { ++ /* UTMI+ interface */ ++ if (core_if->core_params->phy_utmi_width == 16) { ++ usbcfg.b.phyif = 1; ++ } ++ else { ++ usbcfg.b.phyif = 0; ++ } ++ } ++ ++ dwc_write_reg32(&global_regs->gusbcfg, usbcfg.d32); ++ ++ /* Reset after setting the PHY parameters */ ++ dwc_otg_core_reset(core_if); ++ } ++ } ++ ++ if ((core_if->hwcfg2.b.hs_phy_type == 2) && ++ (core_if->hwcfg2.b.fs_phy_type == 1) && ++ (core_if->core_params->ulpi_fs_ls)) { ++ DWC_DEBUGPL(DBG_CIL, "Setting ULPI FSLS\n"); ++ usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg); ++ usbcfg.b.ulpi_fsls = 1; ++ usbcfg.b.ulpi_clk_sus_m = 1; ++ dwc_write_reg32(&global_regs->gusbcfg, usbcfg.d32); ++ } ++ else { ++ usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg); ++ usbcfg.b.ulpi_fsls = 0; ++ usbcfg.b.ulpi_clk_sus_m = 0; ++ dwc_write_reg32(&global_regs->gusbcfg, usbcfg.d32); ++ } ++ ++ /* Program the GAHBCFG Register.*/ ++ switch (core_if->hwcfg2.b.architecture) { ++ ++ case DWC_SLAVE_ONLY_ARCH: ++ DWC_DEBUGPL(DBG_CIL, "Slave Only Mode\n"); ++ ahbcfg.b.nptxfemplvl_txfemplvl = DWC_GAHBCFG_TXFEMPTYLVL_HALFEMPTY; ++ ahbcfg.b.ptxfemplvl = DWC_GAHBCFG_TXFEMPTYLVL_HALFEMPTY; ++ core_if->dma_enable = 0; ++ core_if->dma_desc_enable = 0; ++ break; ++ ++ case DWC_EXT_DMA_ARCH: ++ DWC_DEBUGPL(DBG_CIL, "External DMA Mode\n"); ++ ahbcfg.b.hburstlen = core_if->core_params->dma_burst_size; ++ core_if->dma_enable = (core_if->core_params->dma_enable != 0); ++ core_if->dma_desc_enable = (core_if->core_params->dma_desc_enable != 0); ++ break; ++ ++ case DWC_INT_DMA_ARCH: ++ DWC_DEBUGPL(DBG_CIL, "Internal DMA Mode\n"); ++ ahbcfg.b.hburstlen = DWC_GAHBCFG_INT_DMA_BURST_INCR; ++ core_if->dma_enable = (core_if->core_params->dma_enable != 0); ++ core_if->dma_desc_enable = (core_if->core_params->dma_desc_enable != 0); ++ break; ++ ++ } ++ ahbcfg.b.dmaenable = core_if->dma_enable; ++ dwc_write_reg32(&global_regs->gahbcfg, ahbcfg.d32); ++ ++ core_if->en_multiple_tx_fifo = core_if->hwcfg4.b.ded_fifo_en; ++ ++ core_if->pti_enh_enable = core_if->core_params->pti_enable != 0; ++ core_if->multiproc_int_enable = core_if->core_params->mpi_enable; ++ DWC_PRINT("Periodic Transfer Interrupt Enhancement - %s\n", ((core_if->pti_enh_enable) ? "enabled": "disabled")); ++ DWC_PRINT("Multiprocessor Interrupt Enhancement - %s\n", ((core_if->multiproc_int_enable) ? "enabled": "disabled")); ++ ++ /* ++ * Program the GUSBCFG register. ++ */ ++ usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg); ++ ++ switch (core_if->hwcfg2.b.op_mode) { ++ case DWC_MODE_HNP_SRP_CAPABLE: ++ usbcfg.b.hnpcap = (core_if->core_params->otg_cap == ++ DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE); ++ usbcfg.b.srpcap = (core_if->core_params->otg_cap != ++ DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE); ++ break; ++ ++ case DWC_MODE_SRP_ONLY_CAPABLE: ++ usbcfg.b.hnpcap = 0; ++ usbcfg.b.srpcap = (core_if->core_params->otg_cap != ++ DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE); ++ break; ++ ++ case DWC_MODE_NO_HNP_SRP_CAPABLE: ++ usbcfg.b.hnpcap = 0; ++ usbcfg.b.srpcap = 0; ++ break; ++ ++ case DWC_MODE_SRP_CAPABLE_DEVICE: ++ usbcfg.b.hnpcap = 0; ++ usbcfg.b.srpcap = (core_if->core_params->otg_cap != ++ DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE); ++ break; ++ ++ case DWC_MODE_NO_SRP_CAPABLE_DEVICE: ++ usbcfg.b.hnpcap = 0; ++ usbcfg.b.srpcap = 0; ++ break; ++ ++ case DWC_MODE_SRP_CAPABLE_HOST: ++ usbcfg.b.hnpcap = 0; ++ usbcfg.b.srpcap = (core_if->core_params->otg_cap != ++ DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE); ++ break; ++ ++ case DWC_MODE_NO_SRP_CAPABLE_HOST: ++ usbcfg.b.hnpcap = 0; ++ usbcfg.b.srpcap = 0; ++ break; ++ } ++ ++ dwc_write_reg32(&global_regs->gusbcfg, usbcfg.d32); ++ ++ /* Enable common interrupts */ ++ dwc_otg_enable_common_interrupts(core_if); ++ ++ /* Do device or host intialization based on mode during PCD ++ * and HCD initialization */ ++ if (dwc_otg_is_host_mode(core_if)) { ++ DWC_DEBUGPL(DBG_ANY, "Host Mode\n"); ++ core_if->op_state = A_HOST; ++ } ++ else { ++ DWC_DEBUGPL(DBG_ANY, "Device Mode\n"); ++ core_if->op_state = B_PERIPHERAL; ++#ifdef DWC_DEVICE_ONLY ++ dwc_otg_core_dev_init(core_if); ++#endif ++ } ++} ++ ++ ++/** ++ * This function enables the Device mode interrupts. ++ * ++ * @param core_if Programming view of DWC_otg controller ++ */ ++void dwc_otg_enable_device_interrupts(dwc_otg_core_if_t *core_if) ++{ ++ gintmsk_data_t intr_mask = { .d32 = 0}; ++ dwc_otg_core_global_regs_t *global_regs = ++ core_if->core_global_regs; ++ ++ DWC_DEBUGPL(DBG_CIL, "%s()\n", __func__); ++ ++ /* Disable all interrupts. */ ++ dwc_write_reg32(&global_regs->gintmsk, 0); ++ ++ /* Clear any pending interrupts */ ++ dwc_write_reg32(&global_regs->gintsts, 0xFFFFFFFF); ++ ++ /* Enable the common interrupts */ ++ dwc_otg_enable_common_interrupts(core_if); ++ ++ /* Enable interrupts */ ++ intr_mask.b.usbreset = 1; ++ intr_mask.b.enumdone = 1; ++ ++ if(!core_if->multiproc_int_enable) { ++ intr_mask.b.inepintr = 1; ++ intr_mask.b.outepintr = 1; ++ } ++ ++ intr_mask.b.erlysuspend = 1; ++ ++ if(core_if->en_multiple_tx_fifo == 0) { ++ intr_mask.b.epmismatch = 1; ++ } ++ ++ ++#ifdef DWC_EN_ISOC ++ if(core_if->dma_enable) { ++ if(core_if->dma_desc_enable == 0) { ++ if(core_if->pti_enh_enable) { ++ dctl_data_t dctl = { .d32 = 0 }; ++ dctl.b.ifrmnum = 1; ++ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->dctl, 0, dctl.d32); ++ } else { ++ intr_mask.b.incomplisoin = 1; ++ intr_mask.b.incomplisoout = 1; ++ } ++ } ++ } else { ++ intr_mask.b.incomplisoin = 1; ++ intr_mask.b.incomplisoout = 1; ++ } ++#endif // DWC_EN_ISOC ++ ++/** @todo NGS: Should this be a module parameter? */ ++#ifdef USE_PERIODIC_EP ++ intr_mask.b.isooutdrop = 1; ++ intr_mask.b.eopframe = 1; ++ intr_mask.b.incomplisoin = 1; ++ intr_mask.b.incomplisoout = 1; ++#endif ++ ++ dwc_modify_reg32(&global_regs->gintmsk, intr_mask.d32, intr_mask.d32); ++ ++ DWC_DEBUGPL(DBG_CIL, "%s() gintmsk=%0x\n", __func__, ++ dwc_read_reg32(&global_regs->gintmsk)); ++} ++ ++/** ++ * This function initializes the DWC_otg controller registers for ++ * device mode. ++ * ++ * @param core_if Programming view of DWC_otg controller ++ * ++ */ ++void dwc_otg_core_dev_init(dwc_otg_core_if_t *core_if) ++{ ++ int i; ++ dwc_otg_core_global_regs_t *global_regs = ++ core_if->core_global_regs; ++ dwc_otg_dev_if_t *dev_if = core_if->dev_if; ++ dwc_otg_core_params_t *params = core_if->core_params; ++ dcfg_data_t dcfg = { .d32 = 0}; ++ grstctl_t resetctl = { .d32 = 0 }; ++ uint32_t rx_fifo_size; ++ fifosize_data_t nptxfifosize; ++ fifosize_data_t txfifosize; ++ dthrctl_data_t dthrctl; ++ fifosize_data_t ptxfifosize; ++ ++ /* Restart the Phy Clock */ ++ dwc_write_reg32(core_if->pcgcctl, 0); ++ ++ /* Device configuration register */ ++ init_devspd(core_if); ++ dcfg.d32 = dwc_read_reg32(&dev_if->dev_global_regs->dcfg); ++ dcfg.b.descdma = (core_if->dma_desc_enable) ? 1 : 0; ++ dcfg.b.perfrint = DWC_DCFG_FRAME_INTERVAL_80; ++ ++ dwc_write_reg32(&dev_if->dev_global_regs->dcfg, dcfg.d32); ++ ++ /* Configure data FIFO sizes */ ++ if (core_if->hwcfg2.b.dynamic_fifo && params->enable_dynamic_fifo) { ++ DWC_DEBUGPL(DBG_CIL, "Total FIFO Size=%d\n", core_if->total_fifo_size); ++ DWC_DEBUGPL(DBG_CIL, "Rx FIFO Size=%d\n", params->dev_rx_fifo_size); ++ DWC_DEBUGPL(DBG_CIL, "NP Tx FIFO Size=%d\n", params->dev_nperio_tx_fifo_size); ++ ++ /* Rx FIFO */ ++ DWC_DEBUGPL(DBG_CIL, "initial grxfsiz=%08x\n", ++ dwc_read_reg32(&global_regs->grxfsiz)); ++ ++ rx_fifo_size = params->dev_rx_fifo_size; ++ dwc_write_reg32(&global_regs->grxfsiz, rx_fifo_size); ++ ++ DWC_DEBUGPL(DBG_CIL, "new grxfsiz=%08x\n", ++ dwc_read_reg32(&global_regs->grxfsiz)); ++ ++ /** Set Periodic Tx FIFO Mask all bits 0 */ ++ core_if->p_tx_msk = 0; ++ ++ /** Set Tx FIFO Mask all bits 0 */ ++ core_if->tx_msk = 0; ++ ++ if(core_if->en_multiple_tx_fifo == 0) { ++ /* Non-periodic Tx FIFO */ ++ DWC_DEBUGPL(DBG_CIL, "initial gnptxfsiz=%08x\n", ++ dwc_read_reg32(&global_regs->gnptxfsiz)); ++ ++ nptxfifosize.b.depth = params->dev_nperio_tx_fifo_size; ++ nptxfifosize.b.startaddr = params->dev_rx_fifo_size; ++ ++ dwc_write_reg32(&global_regs->gnptxfsiz, nptxfifosize.d32); ++ ++ DWC_DEBUGPL(DBG_CIL, "new gnptxfsiz=%08x\n", ++ dwc_read_reg32(&global_regs->gnptxfsiz)); ++ ++ /**@todo NGS: Fix Periodic FIFO Sizing! */ ++ /* ++ * Periodic Tx FIFOs These FIFOs are numbered from 1 to 15. ++ * Indexes of the FIFO size module parameters in the ++ * dev_perio_tx_fifo_size array and the FIFO size registers in ++ * the dptxfsiz array run from 0 to 14. ++ */ ++ /** @todo Finish debug of this */ ++ ptxfifosize.b.startaddr = nptxfifosize.b.startaddr + nptxfifosize.b.depth; ++ for (i=0; i < core_if->hwcfg4.b.num_dev_perio_in_ep; i++) ++ { ++ ptxfifosize.b.depth = params->dev_perio_tx_fifo_size[i]; ++ DWC_DEBUGPL(DBG_CIL, "initial dptxfsiz_dieptxf[%d]=%08x\n", i, ++ dwc_read_reg32(&global_regs->dptxfsiz_dieptxf[i])); ++ dwc_write_reg32(&global_regs->dptxfsiz_dieptxf[i], ++ ptxfifosize.d32); ++ DWC_DEBUGPL(DBG_CIL, "new dptxfsiz_dieptxf[%d]=%08x\n", i, ++ dwc_read_reg32(&global_regs->dptxfsiz_dieptxf[i])); ++ ptxfifosize.b.startaddr += ptxfifosize.b.depth; ++ } ++ } ++ else { ++ /* ++ * Tx FIFOs These FIFOs are numbered from 1 to 15. ++ * Indexes of the FIFO size module parameters in the ++ * dev_tx_fifo_size array and the FIFO size registers in ++ * the dptxfsiz_dieptxf array run from 0 to 14. ++ */ ++ ++ ++ /* Non-periodic Tx FIFO */ ++ DWC_DEBUGPL(DBG_CIL, "initial gnptxfsiz=%08x\n", ++ dwc_read_reg32(&global_regs->gnptxfsiz)); ++ ++ nptxfifosize.b.depth = params->dev_nperio_tx_fifo_size; ++ nptxfifosize.b.startaddr = params->dev_rx_fifo_size; ++ ++ dwc_write_reg32(&global_regs->gnptxfsiz, nptxfifosize.d32); ++ ++ DWC_DEBUGPL(DBG_CIL, "new gnptxfsiz=%08x\n", ++ dwc_read_reg32(&global_regs->gnptxfsiz)); ++ ++ txfifosize.b.startaddr = nptxfifosize.b.startaddr + nptxfifosize.b.depth; ++ /* ++ Modify by kaiker ,for RT3052 device mode config ++ ++ In RT3052,Since the _core_if->hwcfg4.b.num_dev_perio_in_ep is ++ configed to 0 so these TX_FIF0 not config.IN EP will can't ++ more than 1 if not modify it. ++ ++ */ ++#if 1 ++ for (i=1 ; i <= dev_if->num_in_eps; i++) ++#else ++ for (i=1; i < _core_if->hwcfg4.b.num_dev_perio_in_ep; i++) ++#endif ++ { ++ ++ txfifosize.b.depth = params->dev_tx_fifo_size[i]; ++ ++ DWC_DEBUGPL(DBG_CIL, "initial dptxfsiz_dieptxf[%d]=%08x\n", i, ++ dwc_read_reg32(&global_regs->dptxfsiz_dieptxf[i])); ++ ++ dwc_write_reg32(&global_regs->dptxfsiz_dieptxf[i-1], ++ txfifosize.d32); ++ ++ DWC_DEBUGPL(DBG_CIL, "new dptxfsiz_dieptxf[%d]=%08x\n", i, ++ dwc_read_reg32(&global_regs->dptxfsiz_dieptxf[i-1])); ++ ++ txfifosize.b.startaddr += txfifosize.b.depth; ++ } ++ } ++ } ++ /* Flush the FIFOs */ ++ dwc_otg_flush_tx_fifo(core_if, 0x10); /* all Tx FIFOs */ ++ dwc_otg_flush_rx_fifo(core_if); ++ ++ /* Flush the Learning Queue. */ ++ resetctl.b.intknqflsh = 1; ++ dwc_write_reg32(&core_if->core_global_regs->grstctl, resetctl.d32); ++ ++ /* Clear all pending Device Interrupts */ ++ ++ if(core_if->multiproc_int_enable) { ++ } ++ ++ /** @todo - if the condition needed to be checked ++ * or in any case all pending interrutps should be cleared? ++ */ ++ if(core_if->multiproc_int_enable) { ++ for(i = 0; i < core_if->dev_if->num_in_eps; ++i) { ++ dwc_write_reg32(&dev_if->dev_global_regs->diepeachintmsk[i], 0); ++ } ++ ++ for(i = 0; i < core_if->dev_if->num_out_eps; ++i) { ++ dwc_write_reg32(&dev_if->dev_global_regs->doepeachintmsk[i], 0); ++ } ++ ++ dwc_write_reg32(&dev_if->dev_global_regs->deachint, 0xFFFFFFFF); ++ dwc_write_reg32(&dev_if->dev_global_regs->deachintmsk, 0); ++ } else { ++ dwc_write_reg32(&dev_if->dev_global_regs->diepmsk, 0); ++ dwc_write_reg32(&dev_if->dev_global_regs->doepmsk, 0); ++ dwc_write_reg32(&dev_if->dev_global_regs->daint, 0xFFFFFFFF); ++ dwc_write_reg32(&dev_if->dev_global_regs->daintmsk, 0); ++ } ++ ++ for (i=0; i <= dev_if->num_in_eps; i++) ++ { ++ depctl_data_t depctl; ++ depctl.d32 = dwc_read_reg32(&dev_if->in_ep_regs[i]->diepctl); ++ if (depctl.b.epena) { ++ depctl.d32 = 0; ++ depctl.b.epdis = 1; ++ depctl.b.snak = 1; ++ } ++ else { ++ depctl.d32 = 0; ++ } ++ ++ dwc_write_reg32(&dev_if->in_ep_regs[i]->diepctl, depctl.d32); ++ ++ ++ dwc_write_reg32(&dev_if->in_ep_regs[i]->dieptsiz, 0); ++ dwc_write_reg32(&dev_if->in_ep_regs[i]->diepdma, 0); ++ dwc_write_reg32(&dev_if->in_ep_regs[i]->diepint, 0xFF); ++ } ++ ++ for (i=0; i <= dev_if->num_out_eps; i++) ++ { ++ depctl_data_t depctl; ++ depctl.d32 = dwc_read_reg32(&dev_if->out_ep_regs[i]->doepctl); ++ if (depctl.b.epena) { ++ depctl.d32 = 0; ++ depctl.b.epdis = 1; ++ depctl.b.snak = 1; ++ } ++ else { ++ depctl.d32 = 0; ++ } ++ ++ dwc_write_reg32(&dev_if->out_ep_regs[i]->doepctl, depctl.d32); ++ ++ dwc_write_reg32(&dev_if->out_ep_regs[i]->doeptsiz, 0); ++ dwc_write_reg32(&dev_if->out_ep_regs[i]->doepdma, 0); ++ dwc_write_reg32(&dev_if->out_ep_regs[i]->doepint, 0xFF); ++ } ++ ++ if(core_if->en_multiple_tx_fifo && core_if->dma_enable) { ++ dev_if->non_iso_tx_thr_en = params->thr_ctl & 0x1; ++ dev_if->iso_tx_thr_en = (params->thr_ctl >> 1) & 0x1; ++ dev_if->rx_thr_en = (params->thr_ctl >> 2) & 0x1; ++ ++ dev_if->rx_thr_length = params->rx_thr_length; ++ dev_if->tx_thr_length = params->tx_thr_length; ++ ++ dev_if->setup_desc_index = 0; ++ ++ dthrctl.d32 = 0; ++ dthrctl.b.non_iso_thr_en = dev_if->non_iso_tx_thr_en; ++ dthrctl.b.iso_thr_en = dev_if->iso_tx_thr_en; ++ dthrctl.b.tx_thr_len = dev_if->tx_thr_length; ++ dthrctl.b.rx_thr_en = dev_if->rx_thr_en; ++ dthrctl.b.rx_thr_len = dev_if->rx_thr_length; ++ ++ dwc_write_reg32(&dev_if->dev_global_regs->dtknqr3_dthrctl, dthrctl.d32); ++ ++ DWC_DEBUGPL(DBG_CIL, "Non ISO Tx Thr - %d\nISO Tx Thr - %d\nRx Thr - %d\nTx Thr Len - %d\nRx Thr Len - %d\n", ++ dthrctl.b.non_iso_thr_en, dthrctl.b.iso_thr_en, dthrctl.b.rx_thr_en, dthrctl.b.tx_thr_len, dthrctl.b.rx_thr_len); ++ ++ } ++ ++ dwc_otg_enable_device_interrupts(core_if); ++ ++ { ++ diepmsk_data_t msk = { .d32 = 0 }; ++ msk.b.txfifoundrn = 1; ++ if(core_if->multiproc_int_enable) { ++ dwc_modify_reg32(&dev_if->dev_global_regs->diepeachintmsk[0], msk.d32, msk.d32); ++ } else { ++ dwc_modify_reg32(&dev_if->dev_global_regs->diepmsk, msk.d32, msk.d32); ++ } ++ } ++ ++ ++ if(core_if->multiproc_int_enable) { ++ /* Set NAK on Babble */ ++ dctl_data_t dctl = { .d32 = 0}; ++ dctl.b.nakonbble = 1; ++ dwc_modify_reg32(&dev_if->dev_global_regs->dctl, 0, dctl.d32); ++ } ++} ++ ++/** ++ * This function enables the Host mode interrupts. ++ * ++ * @param core_if Programming view of DWC_otg controller ++ */ ++void dwc_otg_enable_host_interrupts(dwc_otg_core_if_t *core_if) ++{ ++ dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs; ++ gintmsk_data_t intr_mask = { .d32 = 0 }; ++ ++ DWC_DEBUGPL(DBG_CIL, "%s()\n", __func__); ++ ++ /* Disable all interrupts. */ ++ dwc_write_reg32(&global_regs->gintmsk, 0); ++ ++ /* Clear any pending interrupts. */ ++ dwc_write_reg32(&global_regs->gintsts, 0xFFFFFFFF); ++ ++ /* Enable the common interrupts */ ++ dwc_otg_enable_common_interrupts(core_if); ++ ++ /* ++ * Enable host mode interrupts without disturbing common ++ * interrupts. ++ */ ++ intr_mask.b.sofintr = 1; ++ intr_mask.b.portintr = 1; ++ intr_mask.b.hcintr = 1; ++ ++ dwc_modify_reg32(&global_regs->gintmsk, intr_mask.d32, intr_mask.d32); ++} ++ ++/** ++ * This function disables the Host Mode interrupts. ++ * ++ * @param core_if Programming view of DWC_otg controller ++ */ ++void dwc_otg_disable_host_interrupts(dwc_otg_core_if_t *core_if) ++{ ++ dwc_otg_core_global_regs_t *global_regs = ++ core_if->core_global_regs; ++ gintmsk_data_t intr_mask = { .d32 = 0 }; ++ ++ DWC_DEBUGPL(DBG_CILV, "%s()\n", __func__); ++ ++ /* ++ * Disable host mode interrupts without disturbing common ++ * interrupts. ++ */ ++ intr_mask.b.sofintr = 1; ++ intr_mask.b.portintr = 1; ++ intr_mask.b.hcintr = 1; ++ intr_mask.b.ptxfempty = 1; ++ intr_mask.b.nptxfempty = 1; ++ ++ dwc_modify_reg32(&global_regs->gintmsk, intr_mask.d32, 0); ++} ++ ++/** ++ * This function initializes the DWC_otg controller registers for ++ * host mode. ++ * ++ * This function flushes the Tx and Rx FIFOs and it flushes any entries in the ++ * request queues. Host channels are reset to ensure that they are ready for ++ * performing transfers. ++ * ++ * @param core_if Programming view of DWC_otg controller ++ * ++ */ ++void dwc_otg_core_host_init(dwc_otg_core_if_t *core_if) ++{ ++ dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs; ++ dwc_otg_host_if_t *host_if = core_if->host_if; ++ dwc_otg_core_params_t *params = core_if->core_params; ++ hprt0_data_t hprt0 = { .d32 = 0 }; ++ fifosize_data_t nptxfifosize; ++ fifosize_data_t ptxfifosize; ++ int i; ++ hcchar_data_t hcchar; ++ hcfg_data_t hcfg; ++ dwc_otg_hc_regs_t *hc_regs; ++ int num_channels; ++ gotgctl_data_t gotgctl = { .d32 = 0 }; ++ ++ DWC_DEBUGPL(DBG_CILV,"%s(%p)\n", __func__, core_if); ++ ++ /* Restart the Phy Clock */ ++ dwc_write_reg32(core_if->pcgcctl, 0); ++ ++ /* Initialize Host Configuration Register */ ++ init_fslspclksel(core_if); ++ if (core_if->core_params->speed == DWC_SPEED_PARAM_FULL) ++ { ++ hcfg.d32 = dwc_read_reg32(&host_if->host_global_regs->hcfg); ++ hcfg.b.fslssupp = 1; ++ dwc_write_reg32(&host_if->host_global_regs->hcfg, hcfg.d32); ++ } ++ ++ /* Configure data FIFO sizes */ ++ if (core_if->hwcfg2.b.dynamic_fifo && params->enable_dynamic_fifo) { ++ DWC_DEBUGPL(DBG_CIL,"Total FIFO Size=%d\n", core_if->total_fifo_size); ++ DWC_DEBUGPL(DBG_CIL,"Rx FIFO Size=%d\n", params->host_rx_fifo_size); ++ DWC_DEBUGPL(DBG_CIL,"NP Tx FIFO Size=%d\n", params->host_nperio_tx_fifo_size); ++ DWC_DEBUGPL(DBG_CIL,"P Tx FIFO Size=%d\n", params->host_perio_tx_fifo_size); ++ ++ /* Rx FIFO */ ++ DWC_DEBUGPL(DBG_CIL,"initial grxfsiz=%08x\n", dwc_read_reg32(&global_regs->grxfsiz)); ++ dwc_write_reg32(&global_regs->grxfsiz, params->host_rx_fifo_size); ++ DWC_DEBUGPL(DBG_CIL,"new grxfsiz=%08x\n", dwc_read_reg32(&global_regs->grxfsiz)); ++ ++ /* Non-periodic Tx FIFO */ ++ DWC_DEBUGPL(DBG_CIL,"initial gnptxfsiz=%08x\n", dwc_read_reg32(&global_regs->gnptxfsiz)); ++ nptxfifosize.b.depth = params->host_nperio_tx_fifo_size; ++ nptxfifosize.b.startaddr = params->host_rx_fifo_size; ++ dwc_write_reg32(&global_regs->gnptxfsiz, nptxfifosize.d32); ++ DWC_DEBUGPL(DBG_CIL,"new gnptxfsiz=%08x\n", dwc_read_reg32(&global_regs->gnptxfsiz)); ++ ++ /* Periodic Tx FIFO */ ++ DWC_DEBUGPL(DBG_CIL,"initial hptxfsiz=%08x\n", dwc_read_reg32(&global_regs->hptxfsiz)); ++ ptxfifosize.b.depth = params->host_perio_tx_fifo_size; ++ ptxfifosize.b.startaddr = nptxfifosize.b.startaddr + nptxfifosize.b.depth; ++ dwc_write_reg32(&global_regs->hptxfsiz, ptxfifosize.d32); ++ DWC_DEBUGPL(DBG_CIL,"new hptxfsiz=%08x\n", dwc_read_reg32(&global_regs->hptxfsiz)); ++ } ++ ++ /* Clear Host Set HNP Enable in the OTG Control Register */ ++ gotgctl.b.hstsethnpen = 1; ++ dwc_modify_reg32(&global_regs->gotgctl, gotgctl.d32, 0); ++ ++ /* Make sure the FIFOs are flushed. */ ++ dwc_otg_flush_tx_fifo(core_if, 0x10 /* all Tx FIFOs */); ++ dwc_otg_flush_rx_fifo(core_if); ++ ++ /* Flush out any leftover queued requests. */ ++ num_channels = core_if->core_params->host_channels; ++ for (i = 0; i < num_channels; i++) ++ { ++ hc_regs = core_if->host_if->hc_regs[i]; ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ hcchar.b.chen = 0; ++ hcchar.b.chdis = 1; ++ hcchar.b.epdir = 0; ++ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); ++ } ++ ++ /* Halt all channels to put them into a known state. */ ++ for (i = 0; i < num_channels; i++) ++ { ++ int count = 0; ++ hc_regs = core_if->host_if->hc_regs[i]; ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ hcchar.b.chen = 1; ++ hcchar.b.chdis = 1; ++ hcchar.b.epdir = 0; ++ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); ++ DWC_DEBUGPL(DBG_HCDV, "%s: Halt channel %d\n", __func__, i); ++ do { ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ if (++count > 1000) ++ { ++ DWC_ERROR("%s: Unable to clear halt on channel %d\n", ++ __func__, i); ++ break; ++ } ++ } ++ while (hcchar.b.chen); ++ } ++ ++ /* Turn on the vbus power. */ ++ DWC_PRINT("Init: Port Power? op_state=%d\n", core_if->op_state); ++ if (core_if->op_state == A_HOST) { ++ hprt0.d32 = dwc_otg_read_hprt0(core_if); ++ DWC_PRINT("Init: Power Port (%d)\n", hprt0.b.prtpwr); ++ if (hprt0.b.prtpwr == 0) { ++ hprt0.b.prtpwr = 1; ++ dwc_write_reg32(host_if->hprt0, hprt0.d32); ++ } ++ } ++ ++ dwc_otg_enable_host_interrupts(core_if); ++} ++ ++/** ++ * Prepares a host channel for transferring packets to/from a specific ++ * endpoint. The HCCHARn register is set up with the characteristics specified ++ * in _hc. Host channel interrupts that may need to be serviced while this ++ * transfer is in progress are enabled. ++ * ++ * @param core_if Programming view of DWC_otg controller ++ * @param hc Information needed to initialize the host channel ++ */ ++void dwc_otg_hc_init(dwc_otg_core_if_t *core_if, dwc_hc_t *hc) ++{ ++ uint32_t intr_enable; ++ hcintmsk_data_t hc_intr_mask; ++ gintmsk_data_t gintmsk = { .d32 = 0 }; ++ hcchar_data_t hcchar; ++ hcsplt_data_t hcsplt; ++ ++ uint8_t hc_num = hc->hc_num; ++ dwc_otg_host_if_t *host_if = core_if->host_if; ++ dwc_otg_hc_regs_t *hc_regs = host_if->hc_regs[hc_num]; ++ ++ /* Clear old interrupt conditions for this host channel. */ ++ hc_intr_mask.d32 = 0xFFFFFFFF; ++ hc_intr_mask.b.reserved = 0; ++ dwc_write_reg32(&hc_regs->hcint, hc_intr_mask.d32); ++ ++ /* Enable channel interrupts required for this transfer. */ ++ hc_intr_mask.d32 = 0; ++ hc_intr_mask.b.chhltd = 1; ++ if (core_if->dma_enable) { ++ hc_intr_mask.b.ahberr = 1; ++ if (hc->error_state && !hc->do_split && ++ hc->ep_type != DWC_OTG_EP_TYPE_ISOC) { ++ hc_intr_mask.b.ack = 1; ++ if (hc->ep_is_in) { ++ hc_intr_mask.b.datatglerr = 1; ++ if (hc->ep_type != DWC_OTG_EP_TYPE_INTR) { ++ hc_intr_mask.b.nak = 1; ++ } ++ } ++ } ++ } ++ else { ++ switch (hc->ep_type) { ++ case DWC_OTG_EP_TYPE_CONTROL: ++ case DWC_OTG_EP_TYPE_BULK: ++ hc_intr_mask.b.xfercompl = 1; ++ hc_intr_mask.b.stall = 1; ++ hc_intr_mask.b.xacterr = 1; ++ hc_intr_mask.b.datatglerr = 1; ++ if (hc->ep_is_in) { ++ hc_intr_mask.b.bblerr = 1; ++ } ++ else { ++ hc_intr_mask.b.nak = 1; ++ hc_intr_mask.b.nyet = 1; ++ if (hc->do_ping) { ++ hc_intr_mask.b.ack = 1; ++ } ++ } ++ ++ if (hc->do_split) { ++ hc_intr_mask.b.nak = 1; ++ if (hc->complete_split) { ++ hc_intr_mask.b.nyet = 1; ++ } ++ else { ++ hc_intr_mask.b.ack = 1; ++ } ++ } ++ ++ if (hc->error_state) { ++ hc_intr_mask.b.ack = 1; ++ } ++ break; ++ case DWC_OTG_EP_TYPE_INTR: ++ hc_intr_mask.b.xfercompl = 1; ++ hc_intr_mask.b.nak = 1; ++ hc_intr_mask.b.stall = 1; ++ hc_intr_mask.b.xacterr = 1; ++ hc_intr_mask.b.datatglerr = 1; ++ hc_intr_mask.b.frmovrun = 1; ++ ++ if (hc->ep_is_in) { ++ hc_intr_mask.b.bblerr = 1; ++ } ++ if (hc->error_state) { ++ hc_intr_mask.b.ack = 1; ++ } ++ if (hc->do_split) { ++ if (hc->complete_split) { ++ hc_intr_mask.b.nyet = 1; ++ } ++ else { ++ hc_intr_mask.b.ack = 1; ++ } ++ } ++ break; ++ case DWC_OTG_EP_TYPE_ISOC: ++ hc_intr_mask.b.xfercompl = 1; ++ hc_intr_mask.b.frmovrun = 1; ++ hc_intr_mask.b.ack = 1; ++ ++ if (hc->ep_is_in) { ++ hc_intr_mask.b.xacterr = 1; ++ hc_intr_mask.b.bblerr = 1; ++ } ++ break; ++ } ++ } ++ dwc_write_reg32(&hc_regs->hcintmsk, hc_intr_mask.d32); ++ ++// if(hc->ep_type == DWC_OTG_EP_TYPE_BULK && !hc->ep_is_in) ++// hc->max_packet = 512; ++ /* Enable the top level host channel interrupt. */ ++ intr_enable = (1 << hc_num); ++ dwc_modify_reg32(&host_if->host_global_regs->haintmsk, 0, intr_enable); ++ ++ /* Make sure host channel interrupts are enabled. */ ++ gintmsk.b.hcintr = 1; ++ dwc_modify_reg32(&core_if->core_global_regs->gintmsk, 0, gintmsk.d32); ++ ++ /* ++ * Program the HCCHARn register with the endpoint characteristics for ++ * the current transfer. ++ */ ++ hcchar.d32 = 0; ++ hcchar.b.devaddr = hc->dev_addr; ++ hcchar.b.epnum = hc->ep_num; ++ hcchar.b.epdir = hc->ep_is_in; ++ hcchar.b.lspddev = (hc->speed == DWC_OTG_EP_SPEED_LOW); ++ hcchar.b.eptype = hc->ep_type; ++ hcchar.b.mps = hc->max_packet; ++ ++ dwc_write_reg32(&host_if->hc_regs[hc_num]->hcchar, hcchar.d32); ++ ++ DWC_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, hc->hc_num); ++ DWC_DEBUGPL(DBG_HCDV, " Dev Addr: %d\n", hcchar.b.devaddr); ++ DWC_DEBUGPL(DBG_HCDV, " Ep Num: %d\n", hcchar.b.epnum); ++ DWC_DEBUGPL(DBG_HCDV, " Is In: %d\n", hcchar.b.epdir); ++ DWC_DEBUGPL(DBG_HCDV, " Is Low Speed: %d\n", hcchar.b.lspddev); ++ DWC_DEBUGPL(DBG_HCDV, " Ep Type: %d\n", hcchar.b.eptype); ++ DWC_DEBUGPL(DBG_HCDV, " Max Pkt: %d\n", hcchar.b.mps); ++ DWC_DEBUGPL(DBG_HCDV, " Multi Cnt: %d\n", hcchar.b.multicnt); ++ ++ /* ++ * Program the HCSPLIT register for SPLITs ++ */ ++ hcsplt.d32 = 0; ++ if (hc->do_split) { ++ DWC_DEBUGPL(DBG_HCDV, "Programming HC %d with split --> %s\n", hc->hc_num, ++ hc->complete_split ? "CSPLIT" : "SSPLIT"); ++ hcsplt.b.compsplt = hc->complete_split; ++ hcsplt.b.xactpos = hc->xact_pos; ++ hcsplt.b.hubaddr = hc->hub_addr; ++ hcsplt.b.prtaddr = hc->port_addr; ++ DWC_DEBUGPL(DBG_HCDV, " comp split %d\n", hc->complete_split); ++ DWC_DEBUGPL(DBG_HCDV, " xact pos %d\n", hc->xact_pos); ++ DWC_DEBUGPL(DBG_HCDV, " hub addr %d\n", hc->hub_addr); ++ DWC_DEBUGPL(DBG_HCDV, " port addr %d\n", hc->port_addr); ++ DWC_DEBUGPL(DBG_HCDV, " is_in %d\n", hc->ep_is_in); ++ DWC_DEBUGPL(DBG_HCDV, " Max Pkt: %d\n", hcchar.b.mps); ++ DWC_DEBUGPL(DBG_HCDV, " xferlen: %d\n", hc->xfer_len); ++ } ++ dwc_write_reg32(&host_if->hc_regs[hc_num]->hcsplt, hcsplt.d32); ++ ++} ++ ++/** ++ * Attempts to halt a host channel. This function should only be called in ++ * Slave mode or to abort a transfer in either Slave mode or DMA mode. Under ++ * normal circumstances in DMA mode, the controller halts the channel when the ++ * transfer is complete or a condition occurs that requires application ++ * intervention. ++ * ++ * In slave mode, checks for a free request queue entry, then sets the Channel ++ * Enable and Channel Disable bits of the Host Channel Characteristics ++ * register of the specified channel to intiate the halt. If there is no free ++ * request queue entry, sets only the Channel Disable bit of the HCCHARn ++ * register to flush requests for this channel. In the latter case, sets a ++ * flag to indicate that the host channel needs to be halted when a request ++ * queue slot is open. ++ * ++ * In DMA mode, always sets the Channel Enable and Channel Disable bits of the ++ * HCCHARn register. The controller ensures there is space in the request ++ * queue before submitting the halt request. ++ * ++ * Some time may elapse before the core flushes any posted requests for this ++ * host channel and halts. The Channel Halted interrupt handler completes the ++ * deactivation of the host channel. ++ * ++ * @param core_if Controller register interface. ++ * @param hc Host channel to halt. ++ * @param halt_status Reason for halting the channel. ++ */ ++void dwc_otg_hc_halt(dwc_otg_core_if_t *core_if, ++ dwc_hc_t *hc, ++ dwc_otg_halt_status_e halt_status) ++{ ++ gnptxsts_data_t nptxsts; ++ hptxsts_data_t hptxsts; ++ hcchar_data_t hcchar; ++ dwc_otg_hc_regs_t *hc_regs; ++ dwc_otg_core_global_regs_t *global_regs; ++ dwc_otg_host_global_regs_t *host_global_regs; ++ ++ hc_regs = core_if->host_if->hc_regs[hc->hc_num]; ++ global_regs = core_if->core_global_regs; ++ host_global_regs = core_if->host_if->host_global_regs; ++ ++ WARN_ON(halt_status == DWC_OTG_HC_XFER_NO_HALT_STATUS); ++ ++ if (halt_status == DWC_OTG_HC_XFER_URB_DEQUEUE || ++ halt_status == DWC_OTG_HC_XFER_AHB_ERR) { ++ /* ++ * Disable all channel interrupts except Ch Halted. The QTD ++ * and QH state associated with this transfer has been cleared ++ * (in the case of URB_DEQUEUE), so the channel needs to be ++ * shut down carefully to prevent crashes. ++ */ ++ hcintmsk_data_t hcintmsk; ++ hcintmsk.d32 = 0; ++ hcintmsk.b.chhltd = 1; ++ dwc_write_reg32(&hc_regs->hcintmsk, hcintmsk.d32); ++ ++ /* ++ * Make sure no other interrupts besides halt are currently ++ * pending. Handling another interrupt could cause a crash due ++ * to the QTD and QH state. ++ */ ++ dwc_write_reg32(&hc_regs->hcint, ~hcintmsk.d32); ++ ++ /* ++ * Make sure the halt status is set to URB_DEQUEUE or AHB_ERR ++ * even if the channel was already halted for some other ++ * reason. ++ */ ++ hc->halt_status = halt_status; ++ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ if (hcchar.b.chen == 0) { ++ /* ++ * The channel is either already halted or it hasn't ++ * started yet. In DMA mode, the transfer may halt if ++ * it finishes normally or a condition occurs that ++ * requires driver intervention. Don't want to halt ++ * the channel again. In either Slave or DMA mode, ++ * it's possible that the transfer has been assigned ++ * to a channel, but not started yet when an URB is ++ * dequeued. Don't want to halt a channel that hasn't ++ * started yet. ++ */ ++ return; ++ } ++ } ++ ++ if (hc->halt_pending) { ++ /* ++ * A halt has already been issued for this channel. This might ++ * happen when a transfer is aborted by a higher level in ++ * the stack. ++ */ ++#ifdef DEBUG ++ DWC_PRINT("*** %s: Channel %d, _hc->halt_pending already set ***\n", ++ __func__, hc->hc_num); ++ ++/* dwc_otg_dump_global_registers(core_if); */ ++/* dwc_otg_dump_host_registers(core_if); */ ++#endif ++ return; ++ } ++ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ hcchar.b.chen = 1; ++ hcchar.b.chdis = 1; ++ ++ if (!core_if->dma_enable) { ++ /* Check for space in the request queue to issue the halt. */ ++ if (hc->ep_type == DWC_OTG_EP_TYPE_CONTROL || ++ hc->ep_type == DWC_OTG_EP_TYPE_BULK) { ++ nptxsts.d32 = dwc_read_reg32(&global_regs->gnptxsts); ++ if (nptxsts.b.nptxqspcavail == 0) { ++ hcchar.b.chen = 0; ++ } ++ } ++ else { ++ hptxsts.d32 = dwc_read_reg32(&host_global_regs->hptxsts); ++ if ((hptxsts.b.ptxqspcavail == 0) || (core_if->queuing_high_bandwidth)) { ++ hcchar.b.chen = 0; ++ } ++ } ++ } ++ ++ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); ++ ++ hc->halt_status = halt_status; ++ ++ if (hcchar.b.chen) { ++ hc->halt_pending = 1; ++ hc->halt_on_queue = 0; ++ } ++ else { ++ hc->halt_on_queue = 1; ++ } ++ ++ DWC_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, hc->hc_num); ++ DWC_DEBUGPL(DBG_HCDV, " hcchar: 0x%08x\n", hcchar.d32); ++ DWC_DEBUGPL(DBG_HCDV, " halt_pending: %d\n", hc->halt_pending); ++ DWC_DEBUGPL(DBG_HCDV, " halt_on_queue: %d\n", hc->halt_on_queue); ++ DWC_DEBUGPL(DBG_HCDV, " halt_status: %d\n", hc->halt_status); ++ ++ return; ++} ++ ++/** ++ * Clears the transfer state for a host channel. This function is normally ++ * called after a transfer is done and the host channel is being released. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param hc Identifies the host channel to clean up. ++ */ ++void dwc_otg_hc_cleanup(dwc_otg_core_if_t *core_if, dwc_hc_t *hc) ++{ ++ dwc_otg_hc_regs_t *hc_regs; ++ ++ hc->xfer_started = 0; ++ ++ /* ++ * Clear channel interrupt enables and any unhandled channel interrupt ++ * conditions. ++ */ ++ hc_regs = core_if->host_if->hc_regs[hc->hc_num]; ++ dwc_write_reg32(&hc_regs->hcintmsk, 0); ++ dwc_write_reg32(&hc_regs->hcint, 0xFFFFFFFF); ++ ++#ifdef DEBUG ++ del_timer(&core_if->hc_xfer_timer[hc->hc_num]); ++ { ++ hcchar_data_t hcchar; ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ if (hcchar.b.chdis) { ++ DWC_WARN("%s: chdis set, channel %d, hcchar 0x%08x\n", ++ __func__, hc->hc_num, hcchar.d32); ++ } ++ } ++#endif ++} ++ ++/** ++ * Sets the channel property that indicates in which frame a periodic transfer ++ * should occur. This is always set to the _next_ frame. This function has no ++ * effect on non-periodic transfers. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param hc Identifies the host channel to set up and its properties. ++ * @param hcchar Current value of the HCCHAR register for the specified host ++ * channel. ++ */ ++static inline void hc_set_even_odd_frame(dwc_otg_core_if_t *core_if, ++ dwc_hc_t *hc, ++ hcchar_data_t *hcchar) ++{ ++ if (hc->ep_type == DWC_OTG_EP_TYPE_INTR || ++ hc->ep_type == DWC_OTG_EP_TYPE_ISOC) { ++ hfnum_data_t hfnum; ++ hfnum.d32 = dwc_read_reg32(&core_if->host_if->host_global_regs->hfnum); ++ ++ /* 1 if _next_ frame is odd, 0 if it's even */ ++ hcchar->b.oddfrm = (hfnum.b.frnum & 0x1) ? 0 : 1; ++#ifdef DEBUG ++ if (hc->ep_type == DWC_OTG_EP_TYPE_INTR && hc->do_split && !hc->complete_split) { ++ switch (hfnum.b.frnum & 0x7) { ++ case 7: ++ core_if->hfnum_7_samples++; ++ core_if->hfnum_7_frrem_accum += hfnum.b.frrem; ++ break; ++ case 0: ++ core_if->hfnum_0_samples++; ++ core_if->hfnum_0_frrem_accum += hfnum.b.frrem; ++ break; ++ default: ++ core_if->hfnum_other_samples++; ++ core_if->hfnum_other_frrem_accum += hfnum.b.frrem; ++ break; ++ } ++ } ++#endif ++ } ++} ++ ++#ifdef DEBUG ++static void hc_xfer_timeout(unsigned long ptr) ++{ ++ hc_xfer_info_t *xfer_info = (hc_xfer_info_t *)ptr; ++ int hc_num = xfer_info->hc->hc_num; ++ DWC_WARN("%s: timeout on channel %d\n", __func__, hc_num); ++ DWC_WARN(" start_hcchar_val 0x%08x\n", xfer_info->core_if->start_hcchar_val[hc_num]); ++} ++#endif ++ ++/* ++ * This function does the setup for a data transfer for a host channel and ++ * starts the transfer. May be called in either Slave mode or DMA mode. In ++ * Slave mode, the caller must ensure that there is sufficient space in the ++ * request queue and Tx Data FIFO. ++ * ++ * For an OUT transfer in Slave mode, it loads a data packet into the ++ * appropriate FIFO. If necessary, additional data packets will be loaded in ++ * the Host ISR. ++ * ++ * For an IN transfer in Slave mode, a data packet is requested. The data ++ * packets are unloaded from the Rx FIFO in the Host ISR. If necessary, ++ * additional data packets are requested in the Host ISR. ++ * ++ * For a PING transfer in Slave mode, the Do Ping bit is set in the egards, ++ * ++ * Steven ++ * ++ * register along with a packet count of 1 and the channel is enabled. This ++ * causes a single PING transaction to occur. Other fields in HCTSIZ are ++ * simply set to 0 since no data transfer occurs in this case. ++ * ++ * For a PING transfer in DMA mode, the HCTSIZ register is initialized with ++ * all the information required to perform the subsequent data transfer. In ++ * addition, the Do Ping bit is set in the HCTSIZ register. In this case, the ++ * controller performs the entire PING protocol, then starts the data ++ * transfer. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param hc Information needed to initialize the host channel. The xfer_len ++ * value may be reduced to accommodate the max widths of the XferSize and ++ * PktCnt fields in the HCTSIZn register. The multi_count value may be changed ++ * to reflect the final xfer_len value. ++ */ ++void dwc_otg_hc_start_transfer(dwc_otg_core_if_t *core_if, dwc_hc_t *hc) ++{ ++ hcchar_data_t hcchar; ++ hctsiz_data_t hctsiz; ++ uint16_t num_packets; ++ uint32_t max_hc_xfer_size = core_if->core_params->max_transfer_size; ++ uint16_t max_hc_pkt_count = core_if->core_params->max_packet_count; ++ dwc_otg_hc_regs_t *hc_regs = core_if->host_if->hc_regs[hc->hc_num]; ++ ++ hctsiz.d32 = 0; ++ ++ if (hc->do_ping) { ++ if (!core_if->dma_enable) { ++ dwc_otg_hc_do_ping(core_if, hc); ++ hc->xfer_started = 1; ++ return; ++ } ++ else { ++ hctsiz.b.dopng = 1; ++ } ++ } ++ ++ if (hc->do_split) { ++ num_packets = 1; ++ ++ if (hc->complete_split && !hc->ep_is_in) { ++ /* For CSPLIT OUT Transfer, set the size to 0 so the ++ * core doesn't expect any data written to the FIFO */ ++ hc->xfer_len = 0; ++ } ++ else if (hc->ep_is_in || (hc->xfer_len > hc->max_packet)) { ++ hc->xfer_len = hc->max_packet; ++ } ++ else if (!hc->ep_is_in && (hc->xfer_len > 188)) { ++ hc->xfer_len = 188; ++ } ++ ++ hctsiz.b.xfersize = hc->xfer_len; ++ } ++ else { ++ /* ++ * Ensure that the transfer length and packet count will fit ++ * in the widths allocated for them in the HCTSIZn register. ++ */ ++ if (hc->ep_type == DWC_OTG_EP_TYPE_INTR || ++ hc->ep_type == DWC_OTG_EP_TYPE_ISOC) { ++ /* ++ * Make sure the transfer size is no larger than one ++ * (micro)frame's worth of data. (A check was done ++ * when the periodic transfer was accepted to ensure ++ * that a (micro)frame's worth of data can be ++ * programmed into a channel.) ++ */ ++ uint32_t max_periodic_len = hc->multi_count * hc->max_packet; ++ if (hc->xfer_len > max_periodic_len) { ++ hc->xfer_len = max_periodic_len; ++ } ++ else { ++ } ++ ++ } ++ else if (hc->xfer_len > max_hc_xfer_size) { ++ /* Make sure that xfer_len is a multiple of max packet size. */ ++ hc->xfer_len = max_hc_xfer_size - hc->max_packet + 1; ++ } ++ ++ if (hc->xfer_len > 0) { ++ num_packets = (hc->xfer_len + hc->max_packet - 1) / hc->max_packet; ++ if (num_packets > max_hc_pkt_count) { ++ num_packets = max_hc_pkt_count; ++ hc->xfer_len = num_packets * hc->max_packet; ++ } ++ } ++ else { ++ /* Need 1 packet for transfer length of 0. */ ++ num_packets = 1; ++ } ++ ++ if (hc->ep_is_in) { ++ /* Always program an integral # of max packets for IN transfers. */ ++ hc->xfer_len = num_packets * hc->max_packet; ++ } ++ ++ if (hc->ep_type == DWC_OTG_EP_TYPE_INTR || ++ hc->ep_type == DWC_OTG_EP_TYPE_ISOC) { ++ /* ++ * Make sure that the multi_count field matches the ++ * actual transfer length. ++ */ ++ hc->multi_count = num_packets; ++ } ++ ++ if (hc->ep_type == DWC_OTG_EP_TYPE_ISOC) { ++ /* Set up the initial PID for the transfer. */ ++ if (hc->speed == DWC_OTG_EP_SPEED_HIGH) { ++ if (hc->ep_is_in) { ++ if (hc->multi_count == 1) { ++ hc->data_pid_start = DWC_OTG_HC_PID_DATA0; ++ } ++ else if (hc->multi_count == 2) { ++ hc->data_pid_start = DWC_OTG_HC_PID_DATA1; ++ } ++ else { ++ hc->data_pid_start = DWC_OTG_HC_PID_DATA2; ++ } ++ } ++ else { ++ if (hc->multi_count == 1) { ++ hc->data_pid_start = DWC_OTG_HC_PID_DATA0; ++ } ++ else { ++ hc->data_pid_start = DWC_OTG_HC_PID_MDATA; ++ } ++ } ++ } ++ else { ++ hc->data_pid_start = DWC_OTG_HC_PID_DATA0; ++ } ++ } ++ ++ hctsiz.b.xfersize = hc->xfer_len; ++ } ++ ++ hc->start_pkt_count = num_packets; ++ hctsiz.b.pktcnt = num_packets; ++ hctsiz.b.pid = hc->data_pid_start; ++ dwc_write_reg32(&hc_regs->hctsiz, hctsiz.d32); ++ ++ DWC_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, hc->hc_num); ++ DWC_DEBUGPL(DBG_HCDV, " Xfer Size: %d\n", hctsiz.b.xfersize); ++ DWC_DEBUGPL(DBG_HCDV, " Num Pkts: %d\n", hctsiz.b.pktcnt); ++ DWC_DEBUGPL(DBG_HCDV, " Start PID: %d\n", hctsiz.b.pid); ++ ++ if (core_if->dma_enable) { ++#if defined (CONFIG_DWC_OTG_HOST_ONLY) ++ if ((uint32_t)hc->xfer_buff & 0x3) { ++ /* non DWORD-aligned buffer case*/ ++ if(!hc->qh->dw_align_buf) { ++ hc->qh->dw_align_buf = ++ dma_alloc_coherent(NULL, ++ core_if->core_params->max_transfer_size, ++ &hc->qh->dw_align_buf_dma, ++ GFP_ATOMIC | GFP_DMA); ++ if (!hc->qh->dw_align_buf) { ++ ++ DWC_ERROR("%s: Failed to allocate memory to handle " ++ "non-dword aligned buffer case\n", __func__); ++ return; ++ } ++ ++ } ++ if (!hc->ep_is_in) { ++ memcpy(hc->qh->dw_align_buf, phys_to_virt((uint32_t)hc->xfer_buff), hc->xfer_len); ++ } ++ ++ dwc_write_reg32(&hc_regs->hcdma, hc->qh->dw_align_buf_dma); ++ } ++ else ++#endif ++ dwc_write_reg32(&hc_regs->hcdma, (uint32_t)hc->xfer_buff); ++ } ++ ++ /* Start the split */ ++ if (hc->do_split) { ++ hcsplt_data_t hcsplt; ++ hcsplt.d32 = dwc_read_reg32 (&hc_regs->hcsplt); ++ hcsplt.b.spltena = 1; ++ dwc_write_reg32(&hc_regs->hcsplt, hcsplt.d32); ++ } ++ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ hcchar.b.multicnt = hc->multi_count; ++ hc_set_even_odd_frame(core_if, hc, &hcchar); ++#ifdef DEBUG ++ core_if->start_hcchar_val[hc->hc_num] = hcchar.d32; ++ if (hcchar.b.chdis) { ++ DWC_WARN("%s: chdis set, channel %d, hcchar 0x%08x\n", ++ __func__, hc->hc_num, hcchar.d32); ++ } ++#endif ++ ++ /* Set host channel enable after all other setup is complete. */ ++ hcchar.b.chen = 1; ++ hcchar.b.chdis = 0; ++ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); ++ ++ hc->xfer_started = 1; ++ hc->requests++; ++ ++ if (!core_if->dma_enable && ++ !hc->ep_is_in && hc->xfer_len > 0) { ++ /* Load OUT packet into the appropriate Tx FIFO. */ ++ dwc_otg_hc_write_packet(core_if, hc); ++ } ++ ++#ifdef DEBUG ++ /* Start a timer for this transfer. */ ++ core_if->hc_xfer_timer[hc->hc_num].function = hc_xfer_timeout; ++ core_if->hc_xfer_info[hc->hc_num].core_if = core_if; ++ core_if->hc_xfer_info[hc->hc_num].hc = hc; ++ core_if->hc_xfer_timer[hc->hc_num].data = (unsigned long)(&core_if->hc_xfer_info[hc->hc_num]); ++ core_if->hc_xfer_timer[hc->hc_num].expires = jiffies + (HZ*10); ++ add_timer(&core_if->hc_xfer_timer[hc->hc_num]); ++#endif ++} ++ ++/** ++ * This function continues a data transfer that was started by previous call ++ * to dwc_otg_hc_start_transfer. The caller must ensure there is ++ * sufficient space in the request queue and Tx Data FIFO. This function ++ * should only be called in Slave mode. In DMA mode, the controller acts ++ * autonomously to complete transfers programmed to a host channel. ++ * ++ * For an OUT transfer, a new data packet is loaded into the appropriate FIFO ++ * if there is any data remaining to be queued. For an IN transfer, another ++ * data packet is always requested. For the SETUP phase of a control transfer, ++ * this function does nothing. ++ * ++ * @return 1 if a new request is queued, 0 if no more requests are required ++ * for this transfer. ++ */ ++int dwc_otg_hc_continue_transfer(dwc_otg_core_if_t *core_if, dwc_hc_t *hc) ++{ ++ DWC_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, hc->hc_num); ++ ++ if (hc->do_split) { ++ /* SPLITs always queue just once per channel */ ++ return 0; ++ } ++ else if (hc->data_pid_start == DWC_OTG_HC_PID_SETUP) { ++ /* SETUPs are queued only once since they can't be NAKed. */ ++ return 0; ++ } ++ else if (hc->ep_is_in) { ++ /* ++ * Always queue another request for other IN transfers. If ++ * back-to-back INs are issued and NAKs are received for both, ++ * the driver may still be processing the first NAK when the ++ * second NAK is received. When the interrupt handler clears ++ * the NAK interrupt for the first NAK, the second NAK will ++ * not be seen. So we can't depend on the NAK interrupt ++ * handler to requeue a NAKed request. Instead, IN requests ++ * are issued each time this function is called. When the ++ * transfer completes, the extra requests for the channel will ++ * be flushed. ++ */ ++ hcchar_data_t hcchar; ++ dwc_otg_hc_regs_t *hc_regs = core_if->host_if->hc_regs[hc->hc_num]; ++ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ hc_set_even_odd_frame(core_if, hc, &hcchar); ++ hcchar.b.chen = 1; ++ hcchar.b.chdis = 0; ++ DWC_DEBUGPL(DBG_HCDV, " IN xfer: hcchar = 0x%08x\n", hcchar.d32); ++ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); ++ hc->requests++; ++ return 1; ++ } ++ else { ++ /* OUT transfers. */ ++ if (hc->xfer_count < hc->xfer_len) { ++ if (hc->ep_type == DWC_OTG_EP_TYPE_INTR || ++ hc->ep_type == DWC_OTG_EP_TYPE_ISOC) { ++ hcchar_data_t hcchar; ++ dwc_otg_hc_regs_t *hc_regs; ++ hc_regs = core_if->host_if->hc_regs[hc->hc_num]; ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ hc_set_even_odd_frame(core_if, hc, &hcchar); ++ } ++ ++ /* Load OUT packet into the appropriate Tx FIFO. */ ++ dwc_otg_hc_write_packet(core_if, hc); ++ hc->requests++; ++ return 1; ++ } ++ else { ++ return 0; ++ } ++ } ++} ++ ++/** ++ * Starts a PING transfer. This function should only be called in Slave mode. ++ * The Do Ping bit is set in the HCTSIZ register, then the channel is enabled. ++ */ ++void dwc_otg_hc_do_ping(dwc_otg_core_if_t *core_if, dwc_hc_t *hc) ++{ ++ hcchar_data_t hcchar; ++ hctsiz_data_t hctsiz; ++ dwc_otg_hc_regs_t *hc_regs = core_if->host_if->hc_regs[hc->hc_num]; ++ ++ DWC_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, hc->hc_num); ++ ++ hctsiz.d32 = 0; ++ hctsiz.b.dopng = 1; ++ hctsiz.b.pktcnt = 1; ++ dwc_write_reg32(&hc_regs->hctsiz, hctsiz.d32); ++ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ hcchar.b.chen = 1; ++ hcchar.b.chdis = 0; ++ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); ++} ++ ++/* ++ * This function writes a packet into the Tx FIFO associated with the Host ++ * Channel. For a channel associated with a non-periodic EP, the non-periodic ++ * Tx FIFO is written. For a channel associated with a periodic EP, the ++ * periodic Tx FIFO is written. This function should only be called in Slave ++ * mode. ++ * ++ * Upon return the xfer_buff and xfer_count fields in _hc are incremented by ++ * then number of bytes written to the Tx FIFO. ++ */ ++void dwc_otg_hc_write_packet(dwc_otg_core_if_t *core_if, dwc_hc_t *hc) ++{ ++ uint32_t i; ++ uint32_t remaining_count; ++ uint32_t byte_count; ++ uint32_t dword_count; ++ ++ uint32_t *data_buff = (uint32_t *)(hc->xfer_buff); ++ uint32_t *data_fifo = core_if->data_fifo[hc->hc_num]; ++ ++ remaining_count = hc->xfer_len - hc->xfer_count; ++ if (remaining_count > hc->max_packet) { ++ byte_count = hc->max_packet; ++ } ++ else { ++ byte_count = remaining_count; ++ } ++ ++ dword_count = (byte_count + 3) / 4; ++ ++ if ((((unsigned long)data_buff) & 0x3) == 0) { ++ /* xfer_buff is DWORD aligned. */ ++ for (i = 0; i < dword_count; i++, data_buff++) ++ { ++ dwc_write_reg32(data_fifo, *data_buff); ++ } ++ } ++ else { ++ /* xfer_buff is not DWORD aligned. */ ++ for (i = 0; i < dword_count; i++, data_buff++) ++ { ++ dwc_write_reg32(data_fifo, get_unaligned(data_buff)); ++ } ++ } ++ ++ hc->xfer_count += byte_count; ++ hc->xfer_buff += byte_count; ++} ++ ++/** ++ * Gets the current USB frame number. This is the frame number from the last ++ * SOF packet. ++ */ ++uint32_t dwc_otg_get_frame_number(dwc_otg_core_if_t *core_if) ++{ ++ dsts_data_t dsts; ++ dsts.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dsts); ++ ++ /* read current frame/microframe number from DSTS register */ ++ return dsts.b.soffn; ++} ++ ++/** ++ * This function reads a setup packet from the Rx FIFO into the destination ++ * buffer. This function is called from the Rx Status Queue Level (RxStsQLvl) ++ * Interrupt routine when a SETUP packet has been received in Slave mode. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param dest Destination buffer for packet data. ++ */ ++void dwc_otg_read_setup_packet(dwc_otg_core_if_t *core_if, uint32_t *dest) ++{ ++ /* Get the 8 bytes of a setup transaction data */ ++ ++ /* Pop 2 DWORDS off the receive data FIFO into memory */ ++ dest[0] = dwc_read_reg32(core_if->data_fifo[0]); ++ dest[1] = dwc_read_reg32(core_if->data_fifo[0]); ++} ++ ++ ++/** ++ * This function enables EP0 OUT to receive SETUP packets and configures EP0 ++ * IN for transmitting packets. It is normally called when the ++ * "Enumeration Done" interrupt occurs. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param ep The EP0 data. ++ */ ++void dwc_otg_ep0_activate(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) ++{ ++ dwc_otg_dev_if_t *dev_if = core_if->dev_if; ++ dsts_data_t dsts; ++ depctl_data_t diepctl; ++ depctl_data_t doepctl; ++ dctl_data_t dctl = { .d32 = 0 }; ++ ++ /* Read the Device Status and Endpoint 0 Control registers */ ++ dsts.d32 = dwc_read_reg32(&dev_if->dev_global_regs->dsts); ++ diepctl.d32 = dwc_read_reg32(&dev_if->in_ep_regs[0]->diepctl); ++ doepctl.d32 = dwc_read_reg32(&dev_if->out_ep_regs[0]->doepctl); ++ ++ /* Set the MPS of the IN EP based on the enumeration speed */ ++ switch (dsts.b.enumspd) { ++ case DWC_DSTS_ENUMSPD_HS_PHY_30MHZ_OR_60MHZ: ++ case DWC_DSTS_ENUMSPD_FS_PHY_30MHZ_OR_60MHZ: ++ case DWC_DSTS_ENUMSPD_FS_PHY_48MHZ: ++ diepctl.b.mps = DWC_DEP0CTL_MPS_64; ++ break; ++ case DWC_DSTS_ENUMSPD_LS_PHY_6MHZ: ++ diepctl.b.mps = DWC_DEP0CTL_MPS_8; ++ break; ++ } ++ ++ dwc_write_reg32(&dev_if->in_ep_regs[0]->diepctl, diepctl.d32); ++ ++ /* Enable OUT EP for receive */ ++ doepctl.b.epena = 1; ++ dwc_write_reg32(&dev_if->out_ep_regs[0]->doepctl, doepctl.d32); ++ ++#ifdef VERBOSE ++ DWC_DEBUGPL(DBG_PCDV,"doepctl0=%0x\n", ++ dwc_read_reg32(&dev_if->out_ep_regs[0]->doepctl)); ++ DWC_DEBUGPL(DBG_PCDV,"diepctl0=%0x\n", ++ dwc_read_reg32(&dev_if->in_ep_regs[0]->diepctl)); ++#endif ++ dctl.b.cgnpinnak = 1; ++ ++ dwc_modify_reg32(&dev_if->dev_global_regs->dctl, dctl.d32, dctl.d32); ++ DWC_DEBUGPL(DBG_PCDV,"dctl=%0x\n", ++ dwc_read_reg32(&dev_if->dev_global_regs->dctl)); ++} ++ ++/** ++ * This function activates an EP. The Device EP control register for ++ * the EP is configured as defined in the ep structure. Note: This ++ * function is not used for EP0. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param ep The EP to activate. ++ */ ++void dwc_otg_ep_activate(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) ++{ ++ dwc_otg_dev_if_t *dev_if = core_if->dev_if; ++ depctl_data_t depctl; ++ volatile uint32_t *addr; ++ daint_data_t daintmsk = { .d32 = 0 }; ++ ++ DWC_DEBUGPL(DBG_PCDV, "%s() EP%d-%s\n", __func__, ep->num, ++ (ep->is_in?"IN":"OUT")); ++ ++ /* Read DEPCTLn register */ ++ if (ep->is_in == 1) { ++ addr = &dev_if->in_ep_regs[ep->num]->diepctl; ++ daintmsk.ep.in = 1<num; ++ } ++ else { ++ addr = &dev_if->out_ep_regs[ep->num]->doepctl; ++ daintmsk.ep.out = 1<num; ++ } ++ ++ /* If the EP is already active don't change the EP Control ++ * register. */ ++ depctl.d32 = dwc_read_reg32(addr); ++ if (!depctl.b.usbactep) { ++ depctl.b.mps = ep->maxpacket; ++ depctl.b.eptype = ep->type; ++ depctl.b.txfnum = ep->tx_fifo_num; ++ ++ if (ep->type == DWC_OTG_EP_TYPE_ISOC) { ++ depctl.b.setd0pid = 1; // ??? ++ } ++ else { ++ depctl.b.setd0pid = 1; ++ } ++ depctl.b.usbactep = 1; ++ ++ dwc_write_reg32(addr, depctl.d32); ++ DWC_DEBUGPL(DBG_PCDV,"DEPCTL=%08x\n", dwc_read_reg32(addr)); ++ } ++ ++ /* Enable the Interrupt for this EP */ ++ if(core_if->multiproc_int_enable) { ++ if (ep->is_in == 1) { ++ diepmsk_data_t diepmsk = { .d32 = 0}; ++ diepmsk.b.xfercompl = 1; ++ diepmsk.b.timeout = 1; ++ diepmsk.b.epdisabled = 1; ++ diepmsk.b.ahberr = 1; ++ diepmsk.b.intknepmis = 1; ++ diepmsk.b.txfifoundrn = 1; //????? ++ ++ ++ if(core_if->dma_desc_enable) { ++ diepmsk.b.bna = 1; ++ } ++/* ++ if(core_if->dma_enable) { ++ doepmsk.b.nak = 1; ++ } ++*/ ++ dwc_write_reg32(&dev_if->dev_global_regs->diepeachintmsk[ep->num], diepmsk.d32); ++ ++ } else { ++ doepmsk_data_t doepmsk = { .d32 = 0}; ++ doepmsk.b.xfercompl = 1; ++ doepmsk.b.ahberr = 1; ++ doepmsk.b.epdisabled = 1; ++ ++ ++ if(core_if->dma_desc_enable) { ++ doepmsk.b.bna = 1; ++ } ++/* ++ doepmsk.b.babble = 1; ++ doepmsk.b.nyet = 1; ++ doepmsk.b.nak = 1; ++*/ ++ dwc_write_reg32(&dev_if->dev_global_regs->doepeachintmsk[ep->num], doepmsk.d32); ++ } ++ dwc_modify_reg32(&dev_if->dev_global_regs->deachintmsk, ++ 0, daintmsk.d32); ++ } else { ++ dwc_modify_reg32(&dev_if->dev_global_regs->daintmsk, ++ 0, daintmsk.d32); ++ } ++ ++ DWC_DEBUGPL(DBG_PCDV,"DAINTMSK=%0x\n", ++ dwc_read_reg32(&dev_if->dev_global_regs->daintmsk)); ++ ++ ep->stall_clear_flag = 0; ++ return; ++} ++ ++/** ++ * This function deactivates an EP. This is done by clearing the USB Active ++ * EP bit in the Device EP control register. Note: This function is not used ++ * for EP0. EP0 cannot be deactivated. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param ep The EP to deactivate. ++ */ ++void dwc_otg_ep_deactivate(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) ++{ ++ depctl_data_t depctl = { .d32 = 0 }; ++ volatile uint32_t *addr; ++ daint_data_t daintmsk = { .d32 = 0}; ++ ++ /* Read DEPCTLn register */ ++ if (ep->is_in == 1) { ++ addr = &core_if->dev_if->in_ep_regs[ep->num]->diepctl; ++ daintmsk.ep.in = 1<num; ++ } ++ else { ++ addr = &core_if->dev_if->out_ep_regs[ep->num]->doepctl; ++ daintmsk.ep.out = 1<num; ++ } ++ ++ depctl.b.usbactep = 0; ++ ++ if(core_if->dma_desc_enable) ++ depctl.b.epdis = 1; ++ ++ dwc_write_reg32(addr, depctl.d32); ++ ++ /* Disable the Interrupt for this EP */ ++ if(core_if->multiproc_int_enable) { ++ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->deachintmsk, ++ daintmsk.d32, 0); ++ ++ if (ep->is_in == 1) { ++ dwc_write_reg32(&core_if->dev_if->dev_global_regs->diepeachintmsk[ep->num], 0); ++ } else { ++ dwc_write_reg32(&core_if->dev_if->dev_global_regs->doepeachintmsk[ep->num], 0); ++ } ++ } else { ++ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->daintmsk, ++ daintmsk.d32, 0); ++ } ++} ++ ++/** ++ * This function does the setup for a data transfer for an EP and ++ * starts the transfer. For an IN transfer, the packets will be ++ * loaded into the appropriate Tx FIFO in the ISR. For OUT transfers, ++ * the packets are unloaded from the Rx FIFO in the ISR. the ISR. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param ep The EP to start the transfer on. ++ */ ++static void init_dma_desc_chain(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) ++{ ++ dwc_otg_dma_desc_t* dma_desc; ++ uint32_t offset; ++ uint32_t xfer_est; ++ int i; ++ ++ ep->desc_cnt = ( ep->total_len / ep->maxxfer) + ++ ((ep->total_len % ep->maxxfer) ? 1 : 0); ++ if(!ep->desc_cnt) ++ ep->desc_cnt = 1; ++ ++ dma_desc = ep->desc_addr; ++ xfer_est = ep->total_len; ++ offset = 0; ++ for( i = 0; i < ep->desc_cnt; ++i) { ++ /** DMA Descriptor Setup */ ++ if(xfer_est > ep->maxxfer) { ++ dma_desc->status.b.bs = BS_HOST_BUSY; ++ dma_desc->status.b.l = 0; ++ dma_desc->status.b.ioc = 0; ++ dma_desc->status.b.sp = 0; ++ dma_desc->status.b.bytes = ep->maxxfer; ++ dma_desc->buf = ep->dma_addr + offset; ++ dma_desc->status.b.bs = BS_HOST_READY; ++ ++ xfer_est -= ep->maxxfer; ++ offset += ep->maxxfer; ++ } else { ++ dma_desc->status.b.bs = BS_HOST_BUSY; ++ dma_desc->status.b.l = 1; ++ dma_desc->status.b.ioc = 1; ++ if(ep->is_in) { ++ dma_desc->status.b.sp = (xfer_est % ep->maxpacket) ? ++ 1 : ((ep->sent_zlp) ? 1 : 0); ++ dma_desc->status.b.bytes = xfer_est; ++ } else { ++ dma_desc->status.b.bytes = xfer_est + ((4 - (xfer_est & 0x3)) & 0x3) ; ++ } ++ ++ dma_desc->buf = ep->dma_addr + offset; ++ dma_desc->status.b.bs = BS_HOST_READY; ++ } ++ dma_desc ++; ++ } ++} ++ ++/** ++ * This function does the setup for a data transfer for an EP and ++ * starts the transfer. For an IN transfer, the packets will be ++ * loaded into the appropriate Tx FIFO in the ISR. For OUT transfers, ++ * the packets are unloaded from the Rx FIFO in the ISR. the ISR. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param ep The EP to start the transfer on. ++ */ ++ ++void dwc_otg_ep_start_transfer(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) ++{ ++ depctl_data_t depctl; ++ deptsiz_data_t deptsiz; ++ gintmsk_data_t intr_mask = { .d32 = 0}; ++ ++ DWC_DEBUGPL((DBG_PCDV | DBG_CILV), "%s()\n", __func__); ++ ++ DWC_DEBUGPL(DBG_PCD, "ep%d-%s xfer_len=%d xfer_cnt=%d " ++ "xfer_buff=%p start_xfer_buff=%p\n", ++ ep->num, (ep->is_in?"IN":"OUT"), ep->xfer_len, ++ ep->xfer_count, ep->xfer_buff, ep->start_xfer_buff); ++ ++ /* IN endpoint */ ++ if (ep->is_in == 1) { ++ dwc_otg_dev_in_ep_regs_t *in_regs = ++ core_if->dev_if->in_ep_regs[ep->num]; ++ ++ gnptxsts_data_t gtxstatus; ++ ++ gtxstatus.d32 = ++ dwc_read_reg32(&core_if->core_global_regs->gnptxsts); ++ ++ if(core_if->en_multiple_tx_fifo == 0 && gtxstatus.b.nptxqspcavail == 0) { ++#ifdef DEBUG ++ DWC_PRINT("TX Queue Full (0x%0x)\n", gtxstatus.d32); ++#endif ++ return; ++ } ++ ++ depctl.d32 = dwc_read_reg32(&(in_regs->diepctl)); ++ deptsiz.d32 = dwc_read_reg32(&(in_regs->dieptsiz)); ++ ++ ep->xfer_len += (ep->maxxfer < (ep->total_len - ep->xfer_len)) ? ++ ep->maxxfer : (ep->total_len - ep->xfer_len); ++ ++ /* Zero Length Packet? */ ++ if ((ep->xfer_len - ep->xfer_count) == 0) { ++ deptsiz.b.xfersize = 0; ++ deptsiz.b.pktcnt = 1; ++ } ++ else { ++ /* Program the transfer size and packet count ++ * as follows: xfersize = N * maxpacket + ++ * short_packet pktcnt = N + (short_packet ++ * exist ? 1 : 0) ++ */ ++ deptsiz.b.xfersize = ep->xfer_len - ep->xfer_count; ++ deptsiz.b.pktcnt = ++ (ep->xfer_len - ep->xfer_count - 1 + ep->maxpacket) / ++ ep->maxpacket; ++ } ++ ++ ++ /* Write the DMA register */ ++ if (core_if->dma_enable) { ++ if (core_if->dma_desc_enable == 0) { ++ dwc_write_reg32(&in_regs->dieptsiz, deptsiz.d32); ++ dwc_write_reg32 (&(in_regs->diepdma), ++ (uint32_t)ep->dma_addr); ++ } ++ else { ++ init_dma_desc_chain(core_if, ep); ++ /** DIEPDMAn Register write */ ++ dwc_write_reg32(&in_regs->diepdma, ep->dma_desc_addr); ++ } ++ } ++ else { ++ dwc_write_reg32(&in_regs->dieptsiz, deptsiz.d32); ++ if(ep->type != DWC_OTG_EP_TYPE_ISOC) { ++ /** ++ * Enable the Non-Periodic Tx FIFO empty interrupt, ++ * or the Tx FIFO epmty interrupt in dedicated Tx FIFO mode, ++ * the data will be written into the fifo by the ISR. ++ */ ++ if(core_if->en_multiple_tx_fifo == 0) { ++ intr_mask.b.nptxfempty = 1; ++ dwc_modify_reg32(&core_if->core_global_regs->gintmsk, ++ intr_mask.d32, intr_mask.d32); ++ } ++ else { ++ /* Enable the Tx FIFO Empty Interrupt for this EP */ ++ if(ep->xfer_len > 0) { ++ uint32_t fifoemptymsk = 0; ++ fifoemptymsk = 1 << ep->num; ++ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->dtknqr4_fifoemptymsk, ++ 0, fifoemptymsk); ++ ++ } ++ } ++ } ++ } ++ ++ /* EP enable, IN data in FIFO */ ++ depctl.b.cnak = 1; ++ depctl.b.epena = 1; ++ dwc_write_reg32(&in_regs->diepctl, depctl.d32); ++ ++ depctl.d32 = dwc_read_reg32 (&core_if->dev_if->in_ep_regs[0]->diepctl); ++ depctl.b.nextep = ep->num; ++ dwc_write_reg32 (&core_if->dev_if->in_ep_regs[0]->diepctl, depctl.d32); ++ ++ } ++ else { ++ /* OUT endpoint */ ++ dwc_otg_dev_out_ep_regs_t *out_regs = ++ core_if->dev_if->out_ep_regs[ep->num]; ++ ++ depctl.d32 = dwc_read_reg32(&(out_regs->doepctl)); ++ deptsiz.d32 = dwc_read_reg32(&(out_regs->doeptsiz)); ++ ++ ep->xfer_len += (ep->maxxfer < (ep->total_len - ep->xfer_len)) ? ++ ep->maxxfer : (ep->total_len - ep->xfer_len); ++ ++ /* Program the transfer size and packet count as follows: ++ * ++ * pktcnt = N ++ * xfersize = N * maxpacket ++ */ ++ if ((ep->xfer_len - ep->xfer_count) == 0) { ++ /* Zero Length Packet */ ++ deptsiz.b.xfersize = ep->maxpacket; ++ deptsiz.b.pktcnt = 1; ++ } ++ else { ++ deptsiz.b.pktcnt = ++ (ep->xfer_len - ep->xfer_count + (ep->maxpacket - 1)) / ++ ep->maxpacket; ++ ep->xfer_len = deptsiz.b.pktcnt * ep->maxpacket + ep->xfer_count; ++ deptsiz.b.xfersize = ep->xfer_len - ep->xfer_count; ++ } ++ ++ DWC_DEBUGPL(DBG_PCDV, "ep%d xfersize=%d pktcnt=%d\n", ++ ep->num, ++ deptsiz.b.xfersize, deptsiz.b.pktcnt); ++ ++ if (core_if->dma_enable) { ++ if (!core_if->dma_desc_enable) { ++ dwc_write_reg32(&out_regs->doeptsiz, deptsiz.d32); ++ ++ dwc_write_reg32 (&(out_regs->doepdma), ++ (uint32_t)ep->dma_addr); ++ } ++ else { ++ init_dma_desc_chain(core_if, ep); ++ ++ /** DOEPDMAn Register write */ ++ dwc_write_reg32(&out_regs->doepdma, ep->dma_desc_addr); ++ } ++ } ++ else { ++ dwc_write_reg32(&out_regs->doeptsiz, deptsiz.d32); ++ } ++ ++ /* EP enable */ ++ depctl.b.cnak = 1; ++ depctl.b.epena = 1; ++ ++ dwc_write_reg32(&out_regs->doepctl, depctl.d32); ++ ++ DWC_DEBUGPL(DBG_PCD, "DOEPCTL=%08x DOEPTSIZ=%08x\n", ++ dwc_read_reg32(&out_regs->doepctl), ++ dwc_read_reg32(&out_regs->doeptsiz)); ++ DWC_DEBUGPL(DBG_PCD, "DAINTMSK=%08x GINTMSK=%08x\n", ++ dwc_read_reg32(&core_if->dev_if->dev_global_regs->daintmsk), ++ dwc_read_reg32(&core_if->core_global_regs->gintmsk)); ++ } ++} ++ ++/** ++ * This function setup a zero length transfer in Buffer DMA and ++ * Slave modes for usb requests with zero field set ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param ep The EP to start the transfer on. ++ * ++ */ ++void dwc_otg_ep_start_zl_transfer(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) ++{ ++ ++ depctl_data_t depctl; ++ deptsiz_data_t deptsiz; ++ gintmsk_data_t intr_mask = { .d32 = 0}; ++ ++ DWC_DEBUGPL((DBG_PCDV | DBG_CILV), "%s()\n", __func__); ++ ++ /* IN endpoint */ ++ if (ep->is_in == 1) { ++ dwc_otg_dev_in_ep_regs_t *in_regs = ++ core_if->dev_if->in_ep_regs[ep->num]; ++ ++ depctl.d32 = dwc_read_reg32(&(in_regs->diepctl)); ++ deptsiz.d32 = dwc_read_reg32(&(in_regs->dieptsiz)); ++ ++ deptsiz.b.xfersize = 0; ++ deptsiz.b.pktcnt = 1; ++ ++ ++ /* Write the DMA register */ ++ if (core_if->dma_enable) { ++ if (core_if->dma_desc_enable == 0) { ++ dwc_write_reg32(&in_regs->dieptsiz, deptsiz.d32); ++ dwc_write_reg32 (&(in_regs->diepdma), ++ (uint32_t)ep->dma_addr); ++ } ++ } ++ else { ++ dwc_write_reg32(&in_regs->dieptsiz, deptsiz.d32); ++ /** ++ * Enable the Non-Periodic Tx FIFO empty interrupt, ++ * or the Tx FIFO epmty interrupt in dedicated Tx FIFO mode, ++ * the data will be written into the fifo by the ISR. ++ */ ++ if(core_if->en_multiple_tx_fifo == 0) { ++ intr_mask.b.nptxfempty = 1; ++ dwc_modify_reg32(&core_if->core_global_regs->gintmsk, ++ intr_mask.d32, intr_mask.d32); ++ } ++ else { ++ /* Enable the Tx FIFO Empty Interrupt for this EP */ ++ if(ep->xfer_len > 0) { ++ uint32_t fifoemptymsk = 0; ++ fifoemptymsk = 1 << ep->num; ++ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->dtknqr4_fifoemptymsk, ++ 0, fifoemptymsk); ++ } ++ } ++ } ++ ++ /* EP enable, IN data in FIFO */ ++ depctl.b.cnak = 1; ++ depctl.b.epena = 1; ++ dwc_write_reg32(&in_regs->diepctl, depctl.d32); ++ ++ depctl.d32 = dwc_read_reg32 (&core_if->dev_if->in_ep_regs[0]->diepctl); ++ depctl.b.nextep = ep->num; ++ dwc_write_reg32 (&core_if->dev_if->in_ep_regs[0]->diepctl, depctl.d32); ++ ++ } ++ else { ++ /* OUT endpoint */ ++ dwc_otg_dev_out_ep_regs_t *out_regs = ++ core_if->dev_if->out_ep_regs[ep->num]; ++ ++ depctl.d32 = dwc_read_reg32(&(out_regs->doepctl)); ++ deptsiz.d32 = dwc_read_reg32(&(out_regs->doeptsiz)); ++ ++ /* Zero Length Packet */ ++ deptsiz.b.xfersize = ep->maxpacket; ++ deptsiz.b.pktcnt = 1; ++ ++ if (core_if->dma_enable) { ++ if (!core_if->dma_desc_enable) { ++ dwc_write_reg32(&out_regs->doeptsiz, deptsiz.d32); ++ ++ dwc_write_reg32 (&(out_regs->doepdma), ++ (uint32_t)ep->dma_addr); ++ } ++ } ++ else { ++ dwc_write_reg32(&out_regs->doeptsiz, deptsiz.d32); ++ } ++ ++ /* EP enable */ ++ depctl.b.cnak = 1; ++ depctl.b.epena = 1; ++ ++ dwc_write_reg32(&out_regs->doepctl, depctl.d32); ++ ++ } ++} ++ ++/** ++ * This function does the setup for a data transfer for EP0 and starts ++ * the transfer. For an IN transfer, the packets will be loaded into ++ * the appropriate Tx FIFO in the ISR. For OUT transfers, the packets are ++ * unloaded from the Rx FIFO in the ISR. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param ep The EP0 data. ++ */ ++void dwc_otg_ep0_start_transfer(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) ++{ ++ depctl_data_t depctl; ++ deptsiz0_data_t deptsiz; ++ gintmsk_data_t intr_mask = { .d32 = 0}; ++ dwc_otg_dma_desc_t* dma_desc; ++ ++ DWC_DEBUGPL(DBG_PCD, "ep%d-%s xfer_len=%d xfer_cnt=%d " ++ "xfer_buff=%p start_xfer_buff=%p \n", ++ ep->num, (ep->is_in?"IN":"OUT"), ep->xfer_len, ++ ep->xfer_count, ep->xfer_buff, ep->start_xfer_buff); ++ ++ ep->total_len = ep->xfer_len; ++ ++ /* IN endpoint */ ++ if (ep->is_in == 1) { ++ dwc_otg_dev_in_ep_regs_t *in_regs = ++ core_if->dev_if->in_ep_regs[0]; ++ ++ gnptxsts_data_t gtxstatus; ++ ++ gtxstatus.d32 = ++ dwc_read_reg32(&core_if->core_global_regs->gnptxsts); ++ ++ if(core_if->en_multiple_tx_fifo == 0 && gtxstatus.b.nptxqspcavail == 0) { ++#ifdef DEBUG ++ deptsiz.d32 = dwc_read_reg32(&in_regs->dieptsiz); ++ DWC_DEBUGPL(DBG_PCD,"DIEPCTL0=%0x\n", ++ dwc_read_reg32(&in_regs->diepctl)); ++ DWC_DEBUGPL(DBG_PCD, "DIEPTSIZ0=%0x (sz=%d, pcnt=%d)\n", ++ deptsiz.d32, ++ deptsiz.b.xfersize, deptsiz.b.pktcnt); ++ DWC_PRINT("TX Queue or FIFO Full (0x%0x)\n", ++ gtxstatus.d32); ++#endif ++ return; ++ } ++ ++ ++ depctl.d32 = dwc_read_reg32(&in_regs->diepctl); ++ deptsiz.d32 = dwc_read_reg32(&in_regs->dieptsiz); ++ ++ /* Zero Length Packet? */ ++ if (ep->xfer_len == 0) { ++ deptsiz.b.xfersize = 0; ++ deptsiz.b.pktcnt = 1; ++ } ++ else { ++ /* Program the transfer size and packet count ++ * as follows: xfersize = N * maxpacket + ++ * short_packet pktcnt = N + (short_packet ++ * exist ? 1 : 0) ++ */ ++ if (ep->xfer_len > ep->maxpacket) { ++ ep->xfer_len = ep->maxpacket; ++ deptsiz.b.xfersize = ep->maxpacket; ++ } ++ else { ++ deptsiz.b.xfersize = ep->xfer_len; ++ } ++ deptsiz.b.pktcnt = 1; ++ ++ } ++ DWC_DEBUGPL(DBG_PCDV, "IN len=%d xfersize=%d pktcnt=%d [%08x]\n", ++ ep->xfer_len, ++ deptsiz.b.xfersize, deptsiz.b.pktcnt, deptsiz.d32); ++ ++ /* Write the DMA register */ ++ if (core_if->dma_enable) { ++ if(core_if->dma_desc_enable == 0) { ++ dwc_write_reg32(&in_regs->dieptsiz, deptsiz.d32); ++ ++ dwc_write_reg32 (&(in_regs->diepdma), ++ (uint32_t)ep->dma_addr); ++ } ++ else { ++ dma_desc = core_if->dev_if->in_desc_addr; ++ ++ /** DMA Descriptor Setup */ ++ dma_desc->status.b.bs = BS_HOST_BUSY; ++ dma_desc->status.b.l = 1; ++ dma_desc->status.b.ioc = 1; ++ dma_desc->status.b.sp = (ep->xfer_len == ep->maxpacket) ? 0 : 1; ++ dma_desc->status.b.bytes = ep->xfer_len; ++ dma_desc->buf = ep->dma_addr; ++ dma_desc->status.b.bs = BS_HOST_READY; ++ ++ /** DIEPDMA0 Register write */ ++ dwc_write_reg32(&in_regs->diepdma, core_if->dev_if->dma_in_desc_addr); ++ } ++ } ++ else { ++ dwc_write_reg32(&in_regs->dieptsiz, deptsiz.d32); ++ } ++ ++ /* EP enable, IN data in FIFO */ ++ depctl.b.cnak = 1; ++ depctl.b.epena = 1; ++ dwc_write_reg32(&in_regs->diepctl, depctl.d32); ++ ++ /** ++ * Enable the Non-Periodic Tx FIFO empty interrupt, the ++ * data will be written into the fifo by the ISR. ++ */ ++ if (!core_if->dma_enable) { ++ if(core_if->en_multiple_tx_fifo == 0) { ++ intr_mask.b.nptxfempty = 1; ++ dwc_modify_reg32(&core_if->core_global_regs->gintmsk, ++ intr_mask.d32, intr_mask.d32); ++ } ++ else { ++ /* Enable the Tx FIFO Empty Interrupt for this EP */ ++ if(ep->xfer_len > 0) { ++ uint32_t fifoemptymsk = 0; ++ fifoemptymsk |= 1 << ep->num; ++ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->dtknqr4_fifoemptymsk, ++ 0, fifoemptymsk); ++ } ++ } ++ } ++ } ++ else { ++ /* OUT endpoint */ ++ dwc_otg_dev_out_ep_regs_t *out_regs = ++ core_if->dev_if->out_ep_regs[0]; ++ ++ depctl.d32 = dwc_read_reg32(&out_regs->doepctl); ++ deptsiz.d32 = dwc_read_reg32(&out_regs->doeptsiz); ++ ++ /* Program the transfer size and packet count as follows: ++ * xfersize = N * (maxpacket + 4 - (maxpacket % 4)) ++ * pktcnt = N */ ++ /* Zero Length Packet */ ++ deptsiz.b.xfersize = ep->maxpacket; ++ deptsiz.b.pktcnt = 1; ++ ++ DWC_DEBUGPL(DBG_PCDV, "len=%d xfersize=%d pktcnt=%d\n", ++ ep->xfer_len, ++ deptsiz.b.xfersize, deptsiz.b.pktcnt); ++ ++ if (core_if->dma_enable) { ++ if(!core_if->dma_desc_enable) { ++ dwc_write_reg32(&out_regs->doeptsiz, deptsiz.d32); ++ ++ dwc_write_reg32 (&(out_regs->doepdma), ++ (uint32_t)ep->dma_addr); ++ } ++ else { ++ dma_desc = core_if->dev_if->out_desc_addr; ++ ++ /** DMA Descriptor Setup */ ++ dma_desc->status.b.bs = BS_HOST_BUSY; ++ dma_desc->status.b.l = 1; ++ dma_desc->status.b.ioc = 1; ++ dma_desc->status.b.bytes = ep->maxpacket; ++ dma_desc->buf = ep->dma_addr; ++ dma_desc->status.b.bs = BS_HOST_READY; ++ ++ /** DOEPDMA0 Register write */ ++ dwc_write_reg32(&out_regs->doepdma, core_if->dev_if->dma_out_desc_addr); ++ } ++ } ++ else { ++ dwc_write_reg32(&out_regs->doeptsiz, deptsiz.d32); ++ } ++ ++ /* EP enable */ ++ depctl.b.cnak = 1; ++ depctl.b.epena = 1; ++ dwc_write_reg32 (&(out_regs->doepctl), depctl.d32); ++ } ++} ++ ++/** ++ * This function continues control IN transfers started by ++ * dwc_otg_ep0_start_transfer, when the transfer does not fit in a ++ * single packet. NOTE: The DIEPCTL0/DOEPCTL0 registers only have one ++ * bit for the packet count. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param ep The EP0 data. ++ */ ++void dwc_otg_ep0_continue_transfer(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) ++{ ++ depctl_data_t depctl; ++ deptsiz0_data_t deptsiz; ++ gintmsk_data_t intr_mask = { .d32 = 0}; ++ dwc_otg_dma_desc_t* dma_desc; ++ ++ if (ep->is_in == 1) { ++ dwc_otg_dev_in_ep_regs_t *in_regs = ++ core_if->dev_if->in_ep_regs[0]; ++ gnptxsts_data_t tx_status = { .d32 = 0 }; ++ ++ tx_status.d32 = dwc_read_reg32(&core_if->core_global_regs->gnptxsts); ++ /** @todo Should there be check for room in the Tx ++ * Status Queue. If not remove the code above this comment. */ ++ ++ depctl.d32 = dwc_read_reg32(&in_regs->diepctl); ++ deptsiz.d32 = dwc_read_reg32(&in_regs->dieptsiz); ++ ++ /* Program the transfer size and packet count ++ * as follows: xfersize = N * maxpacket + ++ * short_packet pktcnt = N + (short_packet ++ * exist ? 1 : 0) ++ */ ++ ++ ++ if(core_if->dma_desc_enable == 0) { ++ deptsiz.b.xfersize = (ep->total_len - ep->xfer_count) > ep->maxpacket ? ep->maxpacket : ++ (ep->total_len - ep->xfer_count); ++ deptsiz.b.pktcnt = 1; ++ if(core_if->dma_enable == 0) { ++ ep->xfer_len += deptsiz.b.xfersize; ++ } else { ++ ep->xfer_len = deptsiz.b.xfersize; ++ } ++ dwc_write_reg32(&in_regs->dieptsiz, deptsiz.d32); ++ } ++ else { ++ ep->xfer_len = (ep->total_len - ep->xfer_count) > ep->maxpacket ? ep->maxpacket : ++ (ep->total_len - ep->xfer_count); ++ ++ dma_desc = core_if->dev_if->in_desc_addr; ++ ++ /** DMA Descriptor Setup */ ++ dma_desc->status.b.bs = BS_HOST_BUSY; ++ dma_desc->status.b.l = 1; ++ dma_desc->status.b.ioc = 1; ++ dma_desc->status.b.sp = (ep->xfer_len == ep->maxpacket) ? 0 : 1; ++ dma_desc->status.b.bytes = ep->xfer_len; ++ dma_desc->buf = ep->dma_addr; ++ dma_desc->status.b.bs = BS_HOST_READY; ++ ++ /** DIEPDMA0 Register write */ ++ dwc_write_reg32(&in_regs->diepdma, core_if->dev_if->dma_in_desc_addr); ++ } ++ ++ ++ DWC_DEBUGPL(DBG_PCDV, "IN len=%d xfersize=%d pktcnt=%d [%08x]\n", ++ ep->xfer_len, ++ deptsiz.b.xfersize, deptsiz.b.pktcnt, deptsiz.d32); ++ ++ /* Write the DMA register */ ++ if (core_if->hwcfg2.b.architecture == DWC_INT_DMA_ARCH) { ++ if(core_if->dma_desc_enable == 0) ++ dwc_write_reg32 (&(in_regs->diepdma), (uint32_t)ep->dma_addr); ++ } ++ ++ /* EP enable, IN data in FIFO */ ++ depctl.b.cnak = 1; ++ depctl.b.epena = 1; ++ dwc_write_reg32(&in_regs->diepctl, depctl.d32); ++ ++ /** ++ * Enable the Non-Periodic Tx FIFO empty interrupt, the ++ * data will be written into the fifo by the ISR. ++ */ ++ if (!core_if->dma_enable) { ++ if(core_if->en_multiple_tx_fifo == 0) { ++ /* First clear it from GINTSTS */ ++ intr_mask.b.nptxfempty = 1; ++ dwc_modify_reg32(&core_if->core_global_regs->gintmsk, ++ intr_mask.d32, intr_mask.d32); ++ ++ } ++ else { ++ /* Enable the Tx FIFO Empty Interrupt for this EP */ ++ if(ep->xfer_len > 0) { ++ uint32_t fifoemptymsk = 0; ++ fifoemptymsk |= 1 << ep->num; ++ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->dtknqr4_fifoemptymsk, ++ 0, fifoemptymsk); ++ } ++ } ++ } ++ } ++ else { ++ dwc_otg_dev_out_ep_regs_t *out_regs = ++ core_if->dev_if->out_ep_regs[0]; ++ ++ ++ depctl.d32 = dwc_read_reg32(&out_regs->doepctl); ++ deptsiz.d32 = dwc_read_reg32(&out_regs->doeptsiz); ++ ++ /* Program the transfer size and packet count ++ * as follows: xfersize = N * maxpacket + ++ * short_packet pktcnt = N + (short_packet ++ * exist ? 1 : 0) ++ */ ++ deptsiz.b.xfersize = ep->maxpacket; ++ deptsiz.b.pktcnt = 1; ++ ++ ++ if(core_if->dma_desc_enable == 0) { ++ dwc_write_reg32(&out_regs->doeptsiz, deptsiz.d32); ++ } ++ else { ++ dma_desc = core_if->dev_if->out_desc_addr; ++ ++ /** DMA Descriptor Setup */ ++ dma_desc->status.b.bs = BS_HOST_BUSY; ++ dma_desc->status.b.l = 1; ++ dma_desc->status.b.ioc = 1; ++ dma_desc->status.b.bytes = ep->maxpacket; ++ dma_desc->buf = ep->dma_addr; ++ dma_desc->status.b.bs = BS_HOST_READY; ++ ++ /** DOEPDMA0 Register write */ ++ dwc_write_reg32(&out_regs->doepdma, core_if->dev_if->dma_out_desc_addr); ++ } ++ ++ ++ DWC_DEBUGPL(DBG_PCDV, "IN len=%d xfersize=%d pktcnt=%d [%08x]\n", ++ ep->xfer_len, ++ deptsiz.b.xfersize, deptsiz.b.pktcnt, deptsiz.d32); ++ ++ /* Write the DMA register */ ++ if (core_if->hwcfg2.b.architecture == DWC_INT_DMA_ARCH) { ++ if(core_if->dma_desc_enable == 0) ++ dwc_write_reg32 (&(out_regs->doepdma), (uint32_t)ep->dma_addr); ++ } ++ ++ /* EP enable, IN data in FIFO */ ++ depctl.b.cnak = 1; ++ depctl.b.epena = 1; ++ dwc_write_reg32(&out_regs->doepctl, depctl.d32); ++ ++ } ++} ++ ++#ifdef DEBUG ++void dump_msg(const u8 *buf, unsigned int length) ++{ ++ unsigned int start, num, i; ++ char line[52], *p; ++ ++ if (length >= 512) ++ return; ++ start = 0; ++ while (length > 0) { ++ num = min(length, 16u); ++ p = line; ++ for (i = 0; i < num; ++i) ++ { ++ if (i == 8) ++ *p++ = ' '; ++ sprintf(p, " %02x", buf[i]); ++ p += 3; ++ } ++ *p = 0; ++ DWC_PRINT("%6x: %s\n", start, line); ++ buf += num; ++ start += num; ++ length -= num; ++ } ++} ++#else ++static inline void dump_msg(const u8 *buf, unsigned int length) ++{ ++} ++#endif ++ ++/** ++ * This function writes a packet into the Tx FIFO associated with the ++ * EP. For non-periodic EPs the non-periodic Tx FIFO is written. For ++ * periodic EPs the periodic Tx FIFO associated with the EP is written ++ * with all packets for the next micro-frame. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param ep The EP to write packet for. ++ * @param dma Indicates if DMA is being used. ++ */ ++void dwc_otg_ep_write_packet(dwc_otg_core_if_t *core_if, dwc_ep_t *ep, int dma) ++{ ++ /** ++ * The buffer is padded to DWORD on a per packet basis in ++ * slave/dma mode if the MPS is not DWORD aligned. The last ++ * packet, if short, is also padded to a multiple of DWORD. ++ * ++ * ep->xfer_buff always starts DWORD aligned in memory and is a ++ * multiple of DWORD in length ++ * ++ * ep->xfer_len can be any number of bytes ++ * ++ * ep->xfer_count is a multiple of ep->maxpacket until the last ++ * packet ++ * ++ * FIFO access is DWORD */ ++ ++ uint32_t i; ++ uint32_t byte_count; ++ uint32_t dword_count; ++ uint32_t *fifo; ++ uint32_t *data_buff = (uint32_t *)ep->xfer_buff; ++ ++ DWC_DEBUGPL((DBG_PCDV | DBG_CILV), "%s(%p,%p)\n", __func__, core_if, ep); ++ if (ep->xfer_count >= ep->xfer_len) { ++ DWC_WARN("%s() No data for EP%d!!!\n", __func__, ep->num); ++ return; ++ } ++ ++ /* Find the byte length of the packet either short packet or MPS */ ++ if ((ep->xfer_len - ep->xfer_count) < ep->maxpacket) { ++ byte_count = ep->xfer_len - ep->xfer_count; ++ } ++ else { ++ byte_count = ep->maxpacket; ++ } ++ ++ /* Find the DWORD length, padded by extra bytes as neccessary if MPS ++ * is not a multiple of DWORD */ ++ dword_count = (byte_count + 3) / 4; ++ ++#ifdef VERBOSE ++ dump_msg(ep->xfer_buff, byte_count); ++#endif ++ ++ /**@todo NGS Where are the Periodic Tx FIFO addresses ++ * intialized? What should this be? */ ++ ++ fifo = core_if->data_fifo[ep->num]; ++ ++ ++ DWC_DEBUGPL((DBG_PCDV|DBG_CILV), "fifo=%p buff=%p *p=%08x bc=%d\n", fifo, data_buff, *data_buff, byte_count); ++ ++ if (!dma) { ++ for (i=0; ixfer_count += byte_count; ++ ep->xfer_buff += byte_count; ++ ep->dma_addr += byte_count; ++} ++ ++/** ++ * Set the EP STALL. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param ep The EP to set the stall on. ++ */ ++void dwc_otg_ep_set_stall(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) ++{ ++ depctl_data_t depctl; ++ volatile uint32_t *depctl_addr; ++ ++ DWC_DEBUGPL(DBG_PCD, "%s ep%d-%s\n", __func__, ep->num, ++ (ep->is_in?"IN":"OUT")); ++ ++ DWC_PRINT("%s ep%d-%s\n", __func__, ep->num, ++ (ep->is_in?"in":"out")); ++ ++ if (ep->is_in == 1) { ++ depctl_addr = &(core_if->dev_if->in_ep_regs[ep->num]->diepctl); ++ depctl.d32 = dwc_read_reg32(depctl_addr); ++ ++ /* set the disable and stall bits */ ++ if (depctl.b.epena) { ++ depctl.b.epdis = 1; ++ } ++ depctl.b.stall = 1; ++ dwc_write_reg32(depctl_addr, depctl.d32); ++ } ++ else { ++ depctl_addr = &(core_if->dev_if->out_ep_regs[ep->num]->doepctl); ++ depctl.d32 = dwc_read_reg32(depctl_addr); ++ ++ /* set the stall bit */ ++ depctl.b.stall = 1; ++ dwc_write_reg32(depctl_addr, depctl.d32); ++ } ++ ++ DWC_DEBUGPL(DBG_PCD,"DEPCTL=%0x\n",dwc_read_reg32(depctl_addr)); ++ ++ return; ++} ++ ++/** ++ * Clear the EP STALL. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param ep The EP to clear stall from. ++ */ ++void dwc_otg_ep_clear_stall(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) ++{ ++ depctl_data_t depctl; ++ volatile uint32_t *depctl_addr; ++ ++ DWC_DEBUGPL(DBG_PCD, "%s ep%d-%s\n", __func__, ep->num, ++ (ep->is_in?"IN":"OUT")); ++ ++ if (ep->is_in == 1) { ++ depctl_addr = &(core_if->dev_if->in_ep_regs[ep->num]->diepctl); ++ } ++ else { ++ depctl_addr = &(core_if->dev_if->out_ep_regs[ep->num]->doepctl); ++ } ++ ++ depctl.d32 = dwc_read_reg32(depctl_addr); ++ ++ /* clear the stall bits */ ++ depctl.b.stall = 0; ++ ++ /* ++ * USB Spec 9.4.5: For endpoints using data toggle, regardless ++ * of whether an endpoint has the Halt feature set, a ++ * ClearFeature(ENDPOINT_HALT) request always results in the ++ * data toggle being reinitialized to DATA0. ++ */ ++ if (ep->type == DWC_OTG_EP_TYPE_INTR || ++ ep->type == DWC_OTG_EP_TYPE_BULK) { ++ depctl.b.setd0pid = 1; /* DATA0 */ ++ } ++ ++ dwc_write_reg32(depctl_addr, depctl.d32); ++ DWC_DEBUGPL(DBG_PCD,"DEPCTL=%0x\n",dwc_read_reg32(depctl_addr)); ++ return; ++} ++ ++/** ++ * This function reads a packet from the Rx FIFO into the destination ++ * buffer. To read SETUP data use dwc_otg_read_setup_packet. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param dest Destination buffer for the packet. ++ * @param bytes Number of bytes to copy to the destination. ++ */ ++void dwc_otg_read_packet(dwc_otg_core_if_t *core_if, ++ uint8_t *dest, ++ uint16_t bytes) ++{ ++ int i; ++ int word_count = (bytes + 3) / 4; ++ ++ volatile uint32_t *fifo = core_if->data_fifo[0]; ++ uint32_t *data_buff = (uint32_t *)dest; ++ ++ /** ++ * @todo Account for the case where _dest is not dword aligned. This ++ * requires reading data from the FIFO into a uint32_t temp buffer, ++ * then moving it into the data buffer. ++ */ ++ ++ DWC_DEBUGPL((DBG_PCDV | DBG_CILV), "%s(%p,%p,%d)\n", __func__, ++ core_if, dest, bytes); ++ ++ for (i=0; idev_if->dev_global_regs->dcfg; ++ DWC_PRINT("DCFG @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->dev_if->dev_global_regs->dctl; ++ DWC_PRINT("DCTL @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->dev_if->dev_global_regs->dsts; ++ DWC_PRINT("DSTS @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->dev_if->dev_global_regs->diepmsk; ++ DWC_PRINT("DIEPMSK @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->dev_if->dev_global_regs->doepmsk; ++ DWC_PRINT("DOEPMSK @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->dev_if->dev_global_regs->daint; ++ DWC_PRINT("DAINT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->dev_if->dev_global_regs->daintmsk; ++ DWC_PRINT("DAINTMSK @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->dev_if->dev_global_regs->dtknqr1; ++ DWC_PRINT("DTKNQR1 @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ if (core_if->hwcfg2.b.dev_token_q_depth > 6) { ++ addr=&core_if->dev_if->dev_global_regs->dtknqr2; ++ DWC_PRINT("DTKNQR2 @0x%08X : 0x%08X\n", ++ (uint32_t)addr,dwc_read_reg32(addr)); ++ } ++ ++ addr=&core_if->dev_if->dev_global_regs->dvbusdis; ++ DWC_PRINT("DVBUSID @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ ++ addr=&core_if->dev_if->dev_global_regs->dvbuspulse; ++ DWC_PRINT("DVBUSPULSE @0x%08X : 0x%08X\n", ++ (uint32_t)addr,dwc_read_reg32(addr)); ++ ++ if (core_if->hwcfg2.b.dev_token_q_depth > 14) { ++ addr=&core_if->dev_if->dev_global_regs->dtknqr3_dthrctl; ++ DWC_PRINT("DTKNQR3_DTHRCTL @0x%08X : 0x%08X\n", ++ (uint32_t)addr, dwc_read_reg32(addr)); ++ } ++/* ++ if (core_if->hwcfg2.b.dev_token_q_depth > 22) { ++ addr=&core_if->dev_if->dev_global_regs->dtknqr4_fifoemptymsk; ++ DWC_PRINT("DTKNQR4 @0x%08X : 0x%08X\n", ++ (uint32_t)addr, dwc_read_reg32(addr)); ++ } ++*/ ++ addr=&core_if->dev_if->dev_global_regs->dtknqr4_fifoemptymsk; ++ DWC_PRINT("FIFOEMPMSK @0x%08X : 0x%08X\n", (uint32_t)addr, dwc_read_reg32(addr)); ++ ++ addr=&core_if->dev_if->dev_global_regs->deachint; ++ DWC_PRINT("DEACHINT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->dev_if->dev_global_regs->deachintmsk; ++ DWC_PRINT("DEACHINTMSK @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ ++ for (i=0; i<= core_if->dev_if->num_in_eps; i++) { ++ addr=&core_if->dev_if->dev_global_regs->diepeachintmsk[i]; ++ DWC_PRINT("DIEPEACHINTMSK[%d] @0x%08X : 0x%08X\n", i, (uint32_t)addr, dwc_read_reg32(addr)); ++ } ++ ++ ++ for (i=0; i<= core_if->dev_if->num_out_eps; i++) { ++ addr=&core_if->dev_if->dev_global_regs->doepeachintmsk[i]; ++ DWC_PRINT("DOEPEACHINTMSK[%d] @0x%08X : 0x%08X\n", i, (uint32_t)addr, dwc_read_reg32(addr)); ++ } ++ ++ for (i=0; i<= core_if->dev_if->num_in_eps; i++) { ++ DWC_PRINT("Device IN EP %d Registers\n", i); ++ addr=&core_if->dev_if->in_ep_regs[i]->diepctl; ++ DWC_PRINT("DIEPCTL @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->dev_if->in_ep_regs[i]->diepint; ++ DWC_PRINT("DIEPINT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->dev_if->in_ep_regs[i]->dieptsiz; ++ DWC_PRINT("DIETSIZ @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->dev_if->in_ep_regs[i]->diepdma; ++ DWC_PRINT("DIEPDMA @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->dev_if->in_ep_regs[i]->dtxfsts; ++ DWC_PRINT("DTXFSTS @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->dev_if->in_ep_regs[i]->diepdmab; ++ DWC_PRINT("DIEPDMAB @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ } ++ ++ ++ for (i=0; i<= core_if->dev_if->num_out_eps; i++) { ++ DWC_PRINT("Device OUT EP %d Registers\n", i); ++ addr=&core_if->dev_if->out_ep_regs[i]->doepctl; ++ DWC_PRINT("DOEPCTL @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->dev_if->out_ep_regs[i]->doepfn; ++ DWC_PRINT("DOEPFN @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->dev_if->out_ep_regs[i]->doepint; ++ DWC_PRINT("DOEPINT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->dev_if->out_ep_regs[i]->doeptsiz; ++ DWC_PRINT("DOETSIZ @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->dev_if->out_ep_regs[i]->doepdma; ++ DWC_PRINT("DOEPDMA @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->dev_if->out_ep_regs[i]->doepdmab; ++ DWC_PRINT("DOEPDMAB @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ ++ } ++ ++ ++ ++ return; ++} ++ ++/** ++ * This functions reads the SPRAM and prints its content ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ */ ++void dwc_otg_dump_spram(dwc_otg_core_if_t *core_if) ++{ ++ volatile uint8_t *addr, *start_addr, *end_addr; ++ ++ DWC_PRINT("SPRAM Data:\n"); ++ start_addr = (void*)core_if->core_global_regs; ++ DWC_PRINT("Base Address: 0x%8X\n", (uint32_t)start_addr); ++ start_addr += 0x00028000; ++ end_addr=(void*)core_if->core_global_regs; ++ end_addr += 0x000280e0; ++ ++ for(addr = start_addr; addr < end_addr; addr+=16) ++ { ++ DWC_PRINT("0x%8X:\t%2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X\n", (uint32_t)addr, ++ addr[0], ++ addr[1], ++ addr[2], ++ addr[3], ++ addr[4], ++ addr[5], ++ addr[6], ++ addr[7], ++ addr[8], ++ addr[9], ++ addr[10], ++ addr[11], ++ addr[12], ++ addr[13], ++ addr[14], ++ addr[15] ++ ); ++ } ++ ++ return; ++} ++/** ++ * This function reads the host registers and prints them ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ */ ++void dwc_otg_dump_host_registers(dwc_otg_core_if_t *core_if) ++{ ++ int i; ++ volatile uint32_t *addr; ++ ++ DWC_PRINT("Host Global Registers\n"); ++ addr=&core_if->host_if->host_global_regs->hcfg; ++ DWC_PRINT("HCFG @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->host_if->host_global_regs->hfir; ++ DWC_PRINT("HFIR @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->host_if->host_global_regs->hfnum; ++ DWC_PRINT("HFNUM @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->host_if->host_global_regs->hptxsts; ++ DWC_PRINT("HPTXSTS @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->host_if->host_global_regs->haint; ++ DWC_PRINT("HAINT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->host_if->host_global_regs->haintmsk; ++ DWC_PRINT("HAINTMSK @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=core_if->host_if->hprt0; ++ DWC_PRINT("HPRT0 @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ ++ for (i=0; icore_params->host_channels; i++) ++ { ++ DWC_PRINT("Host Channel %d Specific Registers\n", i); ++ addr=&core_if->host_if->hc_regs[i]->hcchar; ++ DWC_PRINT("HCCHAR @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->host_if->hc_regs[i]->hcsplt; ++ DWC_PRINT("HCSPLT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->host_if->hc_regs[i]->hcint; ++ DWC_PRINT("HCINT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->host_if->hc_regs[i]->hcintmsk; ++ DWC_PRINT("HCINTMSK @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->host_if->hc_regs[i]->hctsiz; ++ DWC_PRINT("HCTSIZ @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->host_if->hc_regs[i]->hcdma; ++ DWC_PRINT("HCDMA @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ } ++ return; ++} ++ ++/** ++ * This function reads the core global registers and prints them ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ */ ++void dwc_otg_dump_global_registers(dwc_otg_core_if_t *core_if) ++{ ++ int i; ++ volatile uint32_t *addr; ++ ++ DWC_PRINT("Core Global Registers\n"); ++ addr=&core_if->core_global_regs->gotgctl; ++ DWC_PRINT("GOTGCTL @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->core_global_regs->gotgint; ++ DWC_PRINT("GOTGINT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->core_global_regs->gahbcfg; ++ DWC_PRINT("GAHBCFG @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->core_global_regs->gusbcfg; ++ DWC_PRINT("GUSBCFG @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->core_global_regs->grstctl; ++ DWC_PRINT("GRSTCTL @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->core_global_regs->gintsts; ++ DWC_PRINT("GINTSTS @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->core_global_regs->gintmsk; ++ DWC_PRINT("GINTMSK @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->core_global_regs->grxstsr; ++ DWC_PRINT("GRXSTSR @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ //addr=&core_if->core_global_regs->grxstsp; ++ //DWC_PRINT("GRXSTSP @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->core_global_regs->grxfsiz; ++ DWC_PRINT("GRXFSIZ @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->core_global_regs->gnptxfsiz; ++ DWC_PRINT("GNPTXFSIZ @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->core_global_regs->gnptxsts; ++ DWC_PRINT("GNPTXSTS @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->core_global_regs->gi2cctl; ++ DWC_PRINT("GI2CCTL @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->core_global_regs->gpvndctl; ++ DWC_PRINT("GPVNDCTL @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->core_global_regs->ggpio; ++ DWC_PRINT("GGPIO @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->core_global_regs->guid; ++ DWC_PRINT("GUID @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->core_global_regs->gsnpsid; ++ DWC_PRINT("GSNPSID @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->core_global_regs->ghwcfg1; ++ DWC_PRINT("GHWCFG1 @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->core_global_regs->ghwcfg2; ++ DWC_PRINT("GHWCFG2 @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->core_global_regs->ghwcfg3; ++ DWC_PRINT("GHWCFG3 @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->core_global_regs->ghwcfg4; ++ DWC_PRINT("GHWCFG4 @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ addr=&core_if->core_global_regs->hptxfsiz; ++ DWC_PRINT("HPTXFSIZ @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); ++ ++ for (i=0; ihwcfg4.b.num_dev_perio_in_ep; i++) ++ { ++ addr=&core_if->core_global_regs->dptxfsiz_dieptxf[i]; ++ DWC_PRINT("DPTXFSIZ[%d] @0x%08X : 0x%08X\n",i,(uint32_t)addr,dwc_read_reg32(addr)); ++ } ++} ++ ++/** ++ * Flush a Tx FIFO. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param num Tx FIFO to flush. ++ */ ++void dwc_otg_flush_tx_fifo(dwc_otg_core_if_t *core_if, ++ const int num) ++{ ++ dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs; ++ volatile grstctl_t greset = { .d32 = 0}; ++ int count = 0; ++ ++ DWC_DEBUGPL((DBG_CIL|DBG_PCDV), "Flush Tx FIFO %d\n", num); ++ ++ greset.b.txfflsh = 1; ++ greset.b.txfnum = num; ++ dwc_write_reg32(&global_regs->grstctl, greset.d32); ++ ++ do { ++ greset.d32 = dwc_read_reg32(&global_regs->grstctl); ++ if (++count > 10000) { ++ DWC_WARN("%s() HANG! GRSTCTL=%0x GNPTXSTS=0x%08x\n", ++ __func__, greset.d32, ++ dwc_read_reg32(&global_regs->gnptxsts)); ++ break; ++ } ++ } ++ while (greset.b.txfflsh == 1); ++ ++ /* Wait for 3 PHY Clocks*/ ++ UDELAY(1); ++} ++ ++/** ++ * Flush Rx FIFO. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ */ ++void dwc_otg_flush_rx_fifo(dwc_otg_core_if_t *core_if) ++{ ++ dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs; ++ volatile grstctl_t greset = { .d32 = 0}; ++ int count = 0; ++ ++ DWC_DEBUGPL((DBG_CIL|DBG_PCDV), "%s\n", __func__); ++ /* ++ * ++ */ ++ greset.b.rxfflsh = 1; ++ dwc_write_reg32(&global_regs->grstctl, greset.d32); ++ ++ do { ++ greset.d32 = dwc_read_reg32(&global_regs->grstctl); ++ if (++count > 10000) { ++ DWC_WARN("%s() HANG! GRSTCTL=%0x\n", __func__, ++ greset.d32); ++ break; ++ } ++ } ++ while (greset.b.rxfflsh == 1); ++ ++ /* Wait for 3 PHY Clocks*/ ++ UDELAY(1); ++} ++ ++/** ++ * Do core a soft reset of the core. Be careful with this because it ++ * resets all the internal state machines of the core. ++ */ ++void dwc_otg_core_reset(dwc_otg_core_if_t *core_if) ++{ ++ dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs; ++ volatile grstctl_t greset = { .d32 = 0}; ++ int count = 0; ++ ++ DWC_DEBUGPL(DBG_CILV, "%s\n", __func__); ++ /* Wait for AHB master IDLE state. */ ++ do { ++ UDELAY(10); ++ greset.d32 = dwc_read_reg32(&global_regs->grstctl); ++ if (++count > 100000) { ++ DWC_WARN("%s() HANG! AHB Idle GRSTCTL=%0x\n", __func__, ++ greset.d32); ++ return; ++ } ++ } ++ while (greset.b.ahbidle == 0); ++ ++ /* Core Soft Reset */ ++ count = 0; ++ greset.b.csftrst = 1; ++ dwc_write_reg32(&global_regs->grstctl, greset.d32); ++ do { ++ greset.d32 = dwc_read_reg32(&global_regs->grstctl); ++ if (++count > 10000) { ++ DWC_WARN("%s() HANG! Soft Reset GRSTCTL=%0x\n", __func__, ++ greset.d32); ++ break; ++ } ++ } ++ while (greset.b.csftrst == 1); ++ ++ /* Wait for 3 PHY Clocks*/ ++ MDELAY(100); ++} ++ ++ ++ ++/** ++ * Register HCD callbacks. The callbacks are used to start and stop ++ * the HCD for interrupt processing. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param cb the HCD callback structure. ++ * @param p pointer to be passed to callback function (usb_hcd*). ++ */ ++void dwc_otg_cil_register_hcd_callbacks(dwc_otg_core_if_t *core_if, ++ dwc_otg_cil_callbacks_t *cb, ++ void *p) ++{ ++ core_if->hcd_cb = cb; ++ cb->p = p; ++} ++ ++/** ++ * Register PCD callbacks. The callbacks are used to start and stop ++ * the PCD for interrupt processing. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param cb the PCD callback structure. ++ * @param p pointer to be passed to callback function (pcd*). ++ */ ++void dwc_otg_cil_register_pcd_callbacks(dwc_otg_core_if_t *core_if, ++ dwc_otg_cil_callbacks_t *cb, ++ void *p) ++{ ++ core_if->pcd_cb = cb; ++ cb->p = p; ++} ++ ++#ifdef DWC_EN_ISOC ++ ++/** ++ * This function writes isoc data per 1 (micro)frame into tx fifo ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param ep The EP to start the transfer on. ++ * ++ */ ++void write_isoc_frame_data(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) ++{ ++ dwc_otg_dev_in_ep_regs_t *ep_regs; ++ dtxfsts_data_t txstatus = {.d32 = 0}; ++ uint32_t len = 0; ++ uint32_t dwords; ++ ++ ep->xfer_len = ep->data_per_frame; ++ ep->xfer_count = 0; ++ ++ ep_regs = core_if->dev_if->in_ep_regs[ep->num]; ++ ++ len = ep->xfer_len - ep->xfer_count; ++ ++ if (len > ep->maxpacket) { ++ len = ep->maxpacket; ++ } ++ ++ dwords = (len + 3)/4; ++ ++ /* While there is space in the queue and space in the FIFO and ++ * More data to tranfer, Write packets to the Tx FIFO */ ++ txstatus.d32 = dwc_read_reg32(&core_if->dev_if->in_ep_regs[ep->num]->dtxfsts); ++ DWC_DEBUGPL(DBG_PCDV, "b4 dtxfsts[%d]=0x%08x\n",ep->num,txstatus.d32); ++ ++ while (txstatus.b.txfspcavail > dwords && ++ ep->xfer_count < ep->xfer_len && ++ ep->xfer_len != 0) { ++ /* Write the FIFO */ ++ dwc_otg_ep_write_packet(core_if, ep, 0); ++ ++ len = ep->xfer_len - ep->xfer_count; ++ if (len > ep->maxpacket) { ++ len = ep->maxpacket; ++ } ++ ++ dwords = (len + 3)/4; ++ txstatus.d32 = dwc_read_reg32(&core_if->dev_if->in_ep_regs[ep->num]->dtxfsts); ++ DWC_DEBUGPL(DBG_PCDV,"dtxfsts[%d]=0x%08x\n", ep->num, txstatus.d32); ++ } ++} ++ ++ ++/** ++ * This function initializes a descriptor chain for Isochronous transfer ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param ep The EP to start the transfer on. ++ * ++ */ ++void dwc_otg_iso_ep_start_frm_transfer(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) ++{ ++ deptsiz_data_t deptsiz = { .d32 = 0 }; ++ depctl_data_t depctl = { .d32 = 0 }; ++ dsts_data_t dsts = { .d32 = 0 }; ++ volatile uint32_t *addr; ++ ++ if(ep->is_in) { ++ addr = &core_if->dev_if->in_ep_regs[ep->num]->diepctl; ++ } else { ++ addr = &core_if->dev_if->out_ep_regs[ep->num]->doepctl; ++ } ++ ++ ep->xfer_len = ep->data_per_frame; ++ ep->xfer_count = 0; ++ ep->xfer_buff = ep->cur_pkt_addr; ++ ep->dma_addr = ep->cur_pkt_dma_addr; ++ ++ if(ep->is_in) { ++ /* Program the transfer size and packet count ++ * as follows: xfersize = N * maxpacket + ++ * short_packet pktcnt = N + (short_packet ++ * exist ? 1 : 0) ++ */ ++ deptsiz.b.xfersize = ep->xfer_len; ++ deptsiz.b.pktcnt = ++ (ep->xfer_len - 1 + ep->maxpacket) / ++ ep->maxpacket; ++ deptsiz.b.mc = deptsiz.b.pktcnt; ++ dwc_write_reg32(&core_if->dev_if->in_ep_regs[ep->num]->dieptsiz, deptsiz.d32); ++ ++ /* Write the DMA register */ ++ if (core_if->dma_enable) { ++ dwc_write_reg32 (&(core_if->dev_if->in_ep_regs[ep->num]->diepdma), (uint32_t)ep->dma_addr); ++ } ++ } else { ++ deptsiz.b.pktcnt = ++ (ep->xfer_len + (ep->maxpacket - 1)) / ++ ep->maxpacket; ++ deptsiz.b.xfersize = deptsiz.b.pktcnt * ep->maxpacket; ++ ++ dwc_write_reg32(&core_if->dev_if->out_ep_regs[ep->num]->doeptsiz, deptsiz.d32); ++ ++ if (core_if->dma_enable) { ++ dwc_write_reg32 (&(core_if->dev_if->out_ep_regs[ep->num]->doepdma), ++ (uint32_t)ep->dma_addr); ++ } ++ } ++ ++ ++ /** Enable endpoint, clear nak */ ++ ++ depctl.d32 = 0; ++ if(ep->bInterval == 1) { ++ dsts.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dsts); ++ ep->next_frame = dsts.b.soffn + ep->bInterval; ++ ++ if(ep->next_frame & 0x1) { ++ depctl.b.setd1pid = 1; ++ } else { ++ depctl.b.setd0pid = 1; ++ } ++ } else { ++ ep->next_frame += ep->bInterval; ++ ++ if(ep->next_frame & 0x1) { ++ depctl.b.setd1pid = 1; ++ } else { ++ depctl.b.setd0pid = 1; ++ } ++ } ++ depctl.b.epena = 1; ++ depctl.b.cnak = 1; ++ ++ dwc_modify_reg32(addr, 0, depctl.d32); ++ depctl.d32 = dwc_read_reg32(addr); ++ ++ if(ep->is_in && core_if->dma_enable == 0) { ++ write_isoc_frame_data(core_if, ep); ++ } ++ ++} ++ ++#endif //DWC_EN_ISOC +--- /dev/null ++++ b/drivers/usb/dwc_otg/dwc_otg_cil.h +@@ -0,0 +1,1098 @@ ++/* ========================================================================== ++ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_cil.h $ ++ * $Revision: 1.2 $ ++ * $Date: 2008-11-21 05:39:15 $ ++ * $Change: 1099526 $ ++ * ++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, ++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless ++ * otherwise expressly agreed to in writing between Synopsys and you. ++ * ++ * The Software IS NOT an item of Licensed Software or Licensed Product under ++ * any End User Software License Agreement or Agreement for Licensed Product ++ * with Synopsys or any supplement thereto. You are permitted to use and ++ * redistribute this Software in source and binary forms, with or without ++ * modification, provided that redistributions of source code must retain this ++ * notice. You may not view, use, disclose, copy or distribute this file or ++ * any information contained herein except pursuant to this license grant from ++ * Synopsys. If you do not agree with this notice, including the disclaimer ++ * below, then you are not authorized to use the Software. ++ * ++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, ++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR ++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER ++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT ++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY ++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH ++ * DAMAGE. ++ * ========================================================================== */ ++ ++#if !defined(__DWC_CIL_H__) ++#define __DWC_CIL_H__ ++ ++#include ++#include ++#include ++ ++#include "linux/dwc_otg_plat.h" ++#include "dwc_otg_regs.h" ++#ifdef DEBUG ++#include "linux/timer.h" ++#endif ++ ++/** ++ * @file ++ * This file contains the interface to the Core Interface Layer. ++ */ ++ ++ ++/** Macros defined for DWC OTG HW Release verison */ ++#define OTG_CORE_REV_2_00 0x4F542000 ++#define OTG_CORE_REV_2_60a 0x4F54260A ++#define OTG_CORE_REV_2_71a 0x4F54271A ++#define OTG_CORE_REV_2_72a 0x4F54272A ++ ++/** ++*/ ++typedef struct iso_pkt_info ++{ ++ uint32_t offset; ++ uint32_t length; ++ int32_t status; ++} iso_pkt_info_t; ++/** ++ * The dwc_ep structure represents the state of a single ++ * endpoint when acting in device mode. It contains the data items ++ * needed for an endpoint to be activated and transfer packets. ++ */ ++typedef struct dwc_ep ++{ ++ /** EP number used for register address lookup */ ++ uint8_t num; ++ /** EP direction 0 = OUT */ ++ unsigned is_in : 1; ++ /** EP active. */ ++ unsigned active : 1; ++ ++ /** Periodic Tx FIFO # for IN EPs For INTR EP set to 0 to use non-periodic Tx FIFO ++ If dedicated Tx FIFOs are enabled for all IN Eps - Tx FIFO # FOR IN EPs*/ ++ unsigned tx_fifo_num : 4; ++ /** EP type: 0 - Control, 1 - ISOC, 2 - BULK, 3 - INTR */ ++ unsigned type : 2; ++#define DWC_OTG_EP_TYPE_CONTROL 0 ++#define DWC_OTG_EP_TYPE_ISOC 1 ++#define DWC_OTG_EP_TYPE_BULK 2 ++#define DWC_OTG_EP_TYPE_INTR 3 ++ ++ /** DATA start PID for INTR and BULK EP */ ++ unsigned data_pid_start : 1; ++ /** Frame (even/odd) for ISOC EP */ ++ unsigned even_odd_frame : 1; ++ /** Max Packet bytes */ ++ unsigned maxpacket : 11; ++ ++ /** Max Transfer size */ ++ unsigned maxxfer : 16; ++ ++ /** @name Transfer state */ ++ /** @{ */ ++ ++ /** ++ * Pointer to the beginning of the transfer buffer -- do not modify ++ * during transfer. ++ */ ++ ++ uint32_t dma_addr; ++ ++ uint32_t dma_desc_addr; ++ dwc_otg_dma_desc_t* desc_addr; ++ ++ ++ uint8_t *start_xfer_buff; ++ /** pointer to the transfer buffer */ ++ uint8_t *xfer_buff; ++ /** Number of bytes to transfer */ ++ unsigned xfer_len : 19; ++ /** Number of bytes transferred. */ ++ unsigned xfer_count : 19; ++ /** Sent ZLP */ ++ unsigned sent_zlp : 1; ++ /** Total len for control transfer */ ++ unsigned total_len : 19; ++ ++ /** stall clear flag */ ++ unsigned stall_clear_flag : 1; ++ ++ /** Allocated DMA Desc count */ ++ uint32_t desc_cnt; ++ ++#ifdef DWC_EN_ISOC ++ /** ++ * Variables specific for ISOC EPs ++ * ++ */ ++ /** DMA addresses of ISOC buffers */ ++ uint32_t dma_addr0; ++ uint32_t dma_addr1; ++ ++ uint32_t iso_dma_desc_addr; ++ dwc_otg_dma_desc_t* iso_desc_addr; ++ ++ /** pointer to the transfer buffers */ ++ uint8_t *xfer_buff0; ++ uint8_t *xfer_buff1; ++ ++ /** number of ISOC Buffer is processing */ ++ uint32_t proc_buf_num; ++ /** Interval of ISOC Buffer processing */ ++ uint32_t buf_proc_intrvl; ++ /** Data size for regular frame */ ++ uint32_t data_per_frame; ++ ++ /* todo - pattern data support is to be implemented in the future */ ++ /** Data size for pattern frame */ ++ uint32_t data_pattern_frame; ++ /** Frame number of pattern data */ ++ uint32_t sync_frame; ++ ++ /** bInterval */ ++ uint32_t bInterval; ++ /** ISO Packet number per frame */ ++ uint32_t pkt_per_frm; ++ /** Next frame num for which will be setup DMA Desc */ ++ uint32_t next_frame; ++ /** Number of packets per buffer processing */ ++ uint32_t pkt_cnt; ++ /** Info for all isoc packets */ ++ iso_pkt_info_t *pkt_info; ++ /** current pkt number */ ++ uint32_t cur_pkt; ++ /** current pkt number */ ++ uint8_t *cur_pkt_addr; ++ /** current pkt number */ ++ uint32_t cur_pkt_dma_addr; ++#endif //DWC_EN_ISOC ++/** @} */ ++} dwc_ep_t; ++ ++/* ++ * Reasons for halting a host channel. ++ */ ++typedef enum dwc_otg_halt_status ++{ ++ DWC_OTG_HC_XFER_NO_HALT_STATUS, ++ DWC_OTG_HC_XFER_COMPLETE, ++ DWC_OTG_HC_XFER_URB_COMPLETE, ++ DWC_OTG_HC_XFER_ACK, ++ DWC_OTG_HC_XFER_NAK, ++ DWC_OTG_HC_XFER_NYET, ++ DWC_OTG_HC_XFER_STALL, ++ DWC_OTG_HC_XFER_XACT_ERR, ++ DWC_OTG_HC_XFER_FRAME_OVERRUN, ++ DWC_OTG_HC_XFER_BABBLE_ERR, ++ DWC_OTG_HC_XFER_DATA_TOGGLE_ERR, ++ DWC_OTG_HC_XFER_AHB_ERR, ++ DWC_OTG_HC_XFER_PERIODIC_INCOMPLETE, ++ DWC_OTG_HC_XFER_URB_DEQUEUE ++} dwc_otg_halt_status_e; ++ ++/** ++ * Host channel descriptor. This structure represents the state of a single ++ * host channel when acting in host mode. It contains the data items needed to ++ * transfer packets to an endpoint via a host channel. ++ */ ++typedef struct dwc_hc ++{ ++ /** Host channel number used for register address lookup */ ++ uint8_t hc_num; ++ ++ /** Device to access */ ++ unsigned dev_addr : 7; ++ ++ /** EP to access */ ++ unsigned ep_num : 4; ++ ++ /** EP direction. 0: OUT, 1: IN */ ++ unsigned ep_is_in : 1; ++ ++ /** ++ * EP speed. ++ * One of the following values: ++ * - DWC_OTG_EP_SPEED_LOW ++ * - DWC_OTG_EP_SPEED_FULL ++ * - DWC_OTG_EP_SPEED_HIGH ++ */ ++ unsigned speed : 2; ++#define DWC_OTG_EP_SPEED_LOW 0 ++#define DWC_OTG_EP_SPEED_FULL 1 ++#define DWC_OTG_EP_SPEED_HIGH 2 ++ ++ /** ++ * Endpoint type. ++ * One of the following values: ++ * - DWC_OTG_EP_TYPE_CONTROL: 0 ++ * - DWC_OTG_EP_TYPE_ISOC: 1 ++ * - DWC_OTG_EP_TYPE_BULK: 2 ++ * - DWC_OTG_EP_TYPE_INTR: 3 ++ */ ++ unsigned ep_type : 2; ++ ++ /** Max packet size in bytes */ ++ unsigned max_packet : 11; ++ ++ /** ++ * PID for initial transaction. ++ * 0: DATA0,
++ * 1: DATA2,
++ * 2: DATA1,
++ * 3: MDATA (non-Control EP), ++ * SETUP (Control EP) ++ */ ++ unsigned data_pid_start : 2; ++#define DWC_OTG_HC_PID_DATA0 0 ++#define DWC_OTG_HC_PID_DATA2 1 ++#define DWC_OTG_HC_PID_DATA1 2 ++#define DWC_OTG_HC_PID_MDATA 3 ++#define DWC_OTG_HC_PID_SETUP 3 ++ ++ /** Number of periodic transactions per (micro)frame */ ++ unsigned multi_count: 2; ++ ++ /** @name Transfer State */ ++ /** @{ */ ++ ++ /** Pointer to the current transfer buffer position. */ ++ uint8_t *xfer_buff; ++ /** Total number of bytes to transfer. */ ++ uint32_t xfer_len; ++ /** Number of bytes transferred so far. */ ++ uint32_t xfer_count; ++ /** Packet count at start of transfer.*/ ++ uint16_t start_pkt_count; ++ ++ /** ++ * Flag to indicate whether the transfer has been started. Set to 1 if ++ * it has been started, 0 otherwise. ++ */ ++ uint8_t xfer_started; ++ ++ /** ++ * Set to 1 to indicate that a PING request should be issued on this ++ * channel. If 0, process normally. ++ */ ++ uint8_t do_ping; ++ ++ /** ++ * Set to 1 to indicate that the error count for this transaction is ++ * non-zero. Set to 0 if the error count is 0. ++ */ ++ uint8_t error_state; ++ ++ /** ++ * Set to 1 to indicate that this channel should be halted the next ++ * time a request is queued for the channel. This is necessary in ++ * slave mode if no request queue space is available when an attempt ++ * is made to halt the channel. ++ */ ++ uint8_t halt_on_queue; ++ ++ /** ++ * Set to 1 if the host channel has been halted, but the core is not ++ * finished flushing queued requests. Otherwise 0. ++ */ ++ uint8_t halt_pending; ++ ++ /** ++ * Reason for halting the host channel. ++ */ ++ dwc_otg_halt_status_e halt_status; ++ ++ /* ++ * Split settings for the host channel ++ */ ++ uint8_t do_split; /**< Enable split for the channel */ ++ uint8_t complete_split; /**< Enable complete split */ ++ uint8_t hub_addr; /**< Address of high speed hub */ ++ ++ uint8_t port_addr; /**< Port of the low/full speed device */ ++ /** Split transaction position ++ * One of the following values: ++ * - DWC_HCSPLIT_XACTPOS_MID ++ * - DWC_HCSPLIT_XACTPOS_BEGIN ++ * - DWC_HCSPLIT_XACTPOS_END ++ * - DWC_HCSPLIT_XACTPOS_ALL */ ++ uint8_t xact_pos; ++ ++ /** Set when the host channel does a short read. */ ++ uint8_t short_read; ++ ++ /** ++ * Number of requests issued for this channel since it was assigned to ++ * the current transfer (not counting PINGs). ++ */ ++ uint8_t requests; ++ ++ /** ++ * Queue Head for the transfer being processed by this channel. ++ */ ++ struct dwc_otg_qh *qh; ++ ++ /** @} */ ++ ++ /** Entry in list of host channels. */ ++ struct list_head hc_list_entry; ++} dwc_hc_t; ++ ++/** ++ * The following parameters may be specified when starting the module. These ++ * parameters define how the DWC_otg controller should be configured. ++ * Parameter values are passed to the CIL initialization function ++ * dwc_otg_cil_init. ++ */ ++typedef struct dwc_otg_core_params ++{ ++ int32_t opt; ++#define dwc_param_opt_default 1 ++ ++ /** ++ * Specifies the OTG capabilities. The driver will automatically ++ * detect the value for this parameter if none is specified. ++ * 0 - HNP and SRP capable (default) ++ * 1 - SRP Only capable ++ * 2 - No HNP/SRP capable ++ */ ++ int32_t otg_cap; ++#define DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE 0 ++#define DWC_OTG_CAP_PARAM_SRP_ONLY_CAPABLE 1 ++#define DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE 2 ++#define dwc_param_otg_cap_default DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE ++ ++ /** ++ * Specifies whether to use slave or DMA mode for accessing the data ++ * FIFOs. The driver will automatically detect the value for this ++ * parameter if none is specified. ++ * 0 - Slave ++ * 1 - DMA (default, if available) ++ */ ++ int32_t dma_enable; ++#define dwc_param_dma_enable_default 1 ++ ++ /** ++ * When DMA mode is enabled specifies whether to use address DMA or DMA Descritor mode for accessing the data ++ * FIFOs in device mode. The driver will automatically detect the value for this ++ * parameter if none is specified. ++ * 0 - address DMA ++ * 1 - DMA Descriptor(default, if available) ++ */ ++ int32_t dma_desc_enable; ++#define dwc_param_dma_desc_enable_default 0 ++ /** The DMA Burst size (applicable only for External DMA ++ * Mode). 1, 4, 8 16, 32, 64, 128, 256 (default 32) ++ */ ++ int32_t dma_burst_size; /* Translate this to GAHBCFG values */ ++#define dwc_param_dma_burst_size_default 32 ++ ++ /** ++ * Specifies the maximum speed of operation in host and device mode. ++ * The actual speed depends on the speed of the attached device and ++ * the value of phy_type. The actual speed depends on the speed of the ++ * attached device. ++ * 0 - High Speed (default) ++ * 1 - Full Speed ++ */ ++ int32_t speed; ++#define dwc_param_speed_default 0 ++#define DWC_SPEED_PARAM_HIGH 0 ++#define DWC_SPEED_PARAM_FULL 1 ++ ++ /** Specifies whether low power mode is supported when attached ++ * to a Full Speed or Low Speed device in host mode. ++ * 0 - Don't support low power mode (default) ++ * 1 - Support low power mode ++ */ ++ int32_t host_support_fs_ls_low_power; ++#define dwc_param_host_support_fs_ls_low_power_default 0 ++ ++ /** Specifies the PHY clock rate in low power mode when connected to a ++ * Low Speed device in host mode. This parameter is applicable only if ++ * HOST_SUPPORT_FS_LS_LOW_POWER is enabled. If PHY_TYPE is set to FS ++ * then defaults to 6 MHZ otherwise 48 MHZ. ++ * ++ * 0 - 48 MHz ++ * 1 - 6 MHz ++ */ ++ int32_t host_ls_low_power_phy_clk; ++#define dwc_param_host_ls_low_power_phy_clk_default 0 ++#define DWC_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ 0 ++#define DWC_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ 1 ++ ++ /** ++ * 0 - Use cC FIFO size parameters ++ * 1 - Allow dynamic FIFO sizing (default) ++ */ ++ int32_t enable_dynamic_fifo; ++#define dwc_param_enable_dynamic_fifo_default 1 ++ ++ /** Total number of 4-byte words in the data FIFO memory. This ++ * memory includes the Rx FIFO, non-periodic Tx FIFO, and periodic ++ * Tx FIFOs. ++ * 32 to 32768 (default 8192) ++ * Note: The total FIFO memory depth in the FPGA configuration is 8192. ++ */ ++ int32_t data_fifo_size; ++#define dwc_param_data_fifo_size_default 8192 ++ ++ /** Number of 4-byte words in the Rx FIFO in device mode when dynamic ++ * FIFO sizing is enabled. ++ * 16 to 32768 (default 1064) ++ */ ++ int32_t dev_rx_fifo_size; ++#define dwc_param_dev_rx_fifo_size_default 1064 ++ ++ /** Number of 4-byte words in the non-periodic Tx FIFO in device mode ++ * when dynamic FIFO sizing is enabled. ++ * 16 to 32768 (default 1024) ++ */ ++ int32_t dev_nperio_tx_fifo_size; ++#define dwc_param_dev_nperio_tx_fifo_size_default 1024 ++ ++ /** Number of 4-byte words in each of the periodic Tx FIFOs in device ++ * mode when dynamic FIFO sizing is enabled. ++ * 4 to 768 (default 256) ++ */ ++ uint32_t dev_perio_tx_fifo_size[MAX_PERIO_FIFOS]; ++#define dwc_param_dev_perio_tx_fifo_size_default 256 ++ ++ /** Number of 4-byte words in the Rx FIFO in host mode when dynamic ++ * FIFO sizing is enabled. ++ * 16 to 32768 (default 1024) ++ */ ++ int32_t host_rx_fifo_size; ++#define dwc_param_host_rx_fifo_size_default 1024 ++ ++ /** Number of 4-byte words in the non-periodic Tx FIFO in host mode ++ * when Dynamic FIFO sizing is enabled in the core. ++ * 16 to 32768 (default 1024) ++ */ ++ int32_t host_nperio_tx_fifo_size; ++#define dwc_param_host_nperio_tx_fifo_size_default 1024 ++ ++ /** Number of 4-byte words in the host periodic Tx FIFO when dynamic ++ * FIFO sizing is enabled. ++ * 16 to 32768 (default 1024) ++ */ ++ int32_t host_perio_tx_fifo_size; ++#define dwc_param_host_perio_tx_fifo_size_default 1024 ++ ++ /** The maximum transfer size supported in bytes. ++ * 2047 to 65,535 (default 65,535) ++ */ ++ int32_t max_transfer_size; ++#define dwc_param_max_transfer_size_default 65535 ++ ++ /** The maximum number of packets in a transfer. ++ * 15 to 511 (default 511) ++ */ ++ int32_t max_packet_count; ++#define dwc_param_max_packet_count_default 511 ++ ++ /** The number of host channel registers to use. ++ * 1 to 16 (default 12) ++ * Note: The FPGA configuration supports a maximum of 12 host channels. ++ */ ++ int32_t host_channels; ++#define dwc_param_host_channels_default 12 ++ ++ /** The number of endpoints in addition to EP0 available for device ++ * mode operations. ++ * 1 to 15 (default 6 IN and OUT) ++ * Note: The FPGA configuration supports a maximum of 6 IN and OUT ++ * endpoints in addition to EP0. ++ */ ++ int32_t dev_endpoints; ++#define dwc_param_dev_endpoints_default 6 ++ ++ /** ++ * Specifies the type of PHY interface to use. By default, the driver ++ * will automatically detect the phy_type. ++ * ++ * 0 - Full Speed PHY ++ * 1 - UTMI+ (default) ++ * 2 - ULPI ++ */ ++ int32_t phy_type; ++#define DWC_PHY_TYPE_PARAM_FS 0 ++#define DWC_PHY_TYPE_PARAM_UTMI 1 ++#define DWC_PHY_TYPE_PARAM_ULPI 2 ++#define dwc_param_phy_type_default DWC_PHY_TYPE_PARAM_UTMI ++ ++ /** ++ * Specifies the UTMI+ Data Width. This parameter is ++ * applicable for a PHY_TYPE of UTMI+ or ULPI. (For a ULPI ++ * PHY_TYPE, this parameter indicates the data width between ++ * the MAC and the ULPI Wrapper.) Also, this parameter is ++ * applicable only if the OTG_HSPHY_WIDTH cC parameter was set ++ * to "8 and 16 bits", meaning that the core has been ++ * configured to work at either data path width. ++ * ++ * 8 or 16 bits (default 16) ++ */ ++ int32_t phy_utmi_width; ++#define dwc_param_phy_utmi_width_default 16 ++ ++ /** ++ * Specifies whether the ULPI operates at double or single ++ * data rate. This parameter is only applicable if PHY_TYPE is ++ * ULPI. ++ * ++ * 0 - single data rate ULPI interface with 8 bit wide data ++ * bus (default) ++ * 1 - double data rate ULPI interface with 4 bit wide data ++ * bus ++ */ ++ int32_t phy_ulpi_ddr; ++#define dwc_param_phy_ulpi_ddr_default 0 ++ ++ /** ++ * Specifies whether to use the internal or external supply to ++ * drive the vbus with a ULPI phy. ++ */ ++ int32_t phy_ulpi_ext_vbus; ++#define DWC_PHY_ULPI_INTERNAL_VBUS 0 ++#define DWC_PHY_ULPI_EXTERNAL_VBUS 1 ++#define dwc_param_phy_ulpi_ext_vbus_default DWC_PHY_ULPI_INTERNAL_VBUS ++ ++ /** ++ * Specifies whether to use the I2Cinterface for full speed PHY. This ++ * parameter is only applicable if PHY_TYPE is FS. ++ * 0 - No (default) ++ * 1 - Yes ++ */ ++ int32_t i2c_enable; ++#define dwc_param_i2c_enable_default 0 ++ ++ int32_t ulpi_fs_ls; ++#define dwc_param_ulpi_fs_ls_default 0 ++ ++ int32_t ts_dline; ++#define dwc_param_ts_dline_default 0 ++ ++ /** ++ * Specifies whether dedicated transmit FIFOs are ++ * enabled for non periodic IN endpoints in device mode ++ * 0 - No ++ * 1 - Yes ++ */ ++ int32_t en_multiple_tx_fifo; ++#define dwc_param_en_multiple_tx_fifo_default 1 ++ ++ /** Number of 4-byte words in each of the Tx FIFOs in device ++ * mode when dynamic FIFO sizing is enabled. ++ * 4 to 768 (default 256) ++ */ ++ uint32_t dev_tx_fifo_size[MAX_TX_FIFOS]; ++#define dwc_param_dev_tx_fifo_size_default 256 ++ ++ /** Thresholding enable flag- ++ * bit 0 - enable non-ISO Tx thresholding ++ * bit 1 - enable ISO Tx thresholding ++ * bit 2 - enable Rx thresholding ++ */ ++ uint32_t thr_ctl; ++#define dwc_param_thr_ctl_default 0 ++ ++ /** Thresholding length for Tx ++ * FIFOs in 32 bit DWORDs ++ */ ++ uint32_t tx_thr_length; ++#define dwc_param_tx_thr_length_default 64 ++ ++ /** Thresholding length for Rx ++ * FIFOs in 32 bit DWORDs ++ */ ++ uint32_t rx_thr_length; ++#define dwc_param_rx_thr_length_default 64 ++ ++ /** Per Transfer Interrupt ++ * mode enable flag ++ * 1 - Enabled ++ * 0 - Disabled ++ */ ++ uint32_t pti_enable; ++#define dwc_param_pti_enable_default 0 ++ ++ /** Molti Processor Interrupt ++ * mode enable flag ++ * 1 - Enabled ++ * 0 - Disabled ++ */ ++ uint32_t mpi_enable; ++#define dwc_param_mpi_enable_default 0 ++ ++} dwc_otg_core_params_t; ++ ++#ifdef DEBUG ++struct dwc_otg_core_if; ++typedef struct hc_xfer_info ++{ ++ struct dwc_otg_core_if *core_if; ++ dwc_hc_t *hc; ++} hc_xfer_info_t; ++#endif ++ ++/** ++ * The dwc_otg_core_if structure contains information needed to manage ++ * the DWC_otg controller acting in either host or device mode. It ++ * represents the programming view of the controller as a whole. ++ */ ++typedef struct dwc_otg_core_if ++{ ++ /** Parameters that define how the core should be configured.*/ ++ dwc_otg_core_params_t *core_params; ++ ++ /** Core Global registers starting at offset 000h. */ ++ dwc_otg_core_global_regs_t *core_global_regs; ++ ++ /** Device-specific information */ ++ dwc_otg_dev_if_t *dev_if; ++ /** Host-specific information */ ++ dwc_otg_host_if_t *host_if; ++ ++ /** Value from SNPSID register */ ++ uint32_t snpsid; ++ ++ /* ++ * Set to 1 if the core PHY interface bits in USBCFG have been ++ * initialized. ++ */ ++ uint8_t phy_init_done; ++ ++ /* ++ * SRP Success flag, set by srp success interrupt in FS I2C mode ++ */ ++ uint8_t srp_success; ++ uint8_t srp_timer_started; ++ ++ /* Common configuration information */ ++ /** Power and Clock Gating Control Register */ ++ volatile uint32_t *pcgcctl; ++#define DWC_OTG_PCGCCTL_OFFSET 0xE00 ++ ++ /** Push/pop addresses for endpoints or host channels.*/ ++ uint32_t *data_fifo[MAX_EPS_CHANNELS]; ++#define DWC_OTG_DATA_FIFO_OFFSET 0x1000 ++#define DWC_OTG_DATA_FIFO_SIZE 0x1000 ++ ++ /** Total RAM for FIFOs (Bytes) */ ++ uint16_t total_fifo_size; ++ /** Size of Rx FIFO (Bytes) */ ++ uint16_t rx_fifo_size; ++ /** Size of Non-periodic Tx FIFO (Bytes) */ ++ uint16_t nperio_tx_fifo_size; ++ ++ ++ /** 1 if DMA is enabled, 0 otherwise. */ ++ uint8_t dma_enable; ++ ++ /** 1 if Descriptor DMA mode is enabled, 0 otherwise. */ ++ uint8_t dma_desc_enable; ++ ++ /** 1 if PTI Enhancement mode is enabled, 0 otherwise. */ ++ uint8_t pti_enh_enable; ++ ++ /** 1 if MPI Enhancement mode is enabled, 0 otherwise. */ ++ uint8_t multiproc_int_enable; ++ ++ /** 1 if dedicated Tx FIFOs are enabled, 0 otherwise. */ ++ uint8_t en_multiple_tx_fifo; ++ ++ /** Set to 1 if multiple packets of a high-bandwidth transfer is in ++ * process of being queued */ ++ uint8_t queuing_high_bandwidth; ++ ++ /** Hardware Configuration -- stored here for convenience.*/ ++ hwcfg1_data_t hwcfg1; ++ hwcfg2_data_t hwcfg2; ++ hwcfg3_data_t hwcfg3; ++ hwcfg4_data_t hwcfg4; ++ ++ /** Host and Device Configuration -- stored here for convenience.*/ ++ hcfg_data_t hcfg; ++ dcfg_data_t dcfg; ++ ++ /** The operational State, during transations ++ * (a_host>>a_peripherial and b_device=>b_host) this may not ++ * match the core but allows the software to determine ++ * transitions. ++ */ ++ uint8_t op_state; ++ ++ /** ++ * Set to 1 if the HCD needs to be restarted on a session request ++ * interrupt. This is required if no connector ID status change has ++ * occurred since the HCD was last disconnected. ++ */ ++ uint8_t restart_hcd_on_session_req; ++ ++ /** HCD callbacks */ ++ /** A-Device is a_host */ ++#define A_HOST (1) ++ /** A-Device is a_suspend */ ++#define A_SUSPEND (2) ++ /** A-Device is a_peripherial */ ++#define A_PERIPHERAL (3) ++ /** B-Device is operating as a Peripheral. */ ++#define B_PERIPHERAL (4) ++ /** B-Device is operating as a Host. */ ++#define B_HOST (5) ++ ++ /** HCD callbacks */ ++ struct dwc_otg_cil_callbacks *hcd_cb; ++ /** PCD callbacks */ ++ struct dwc_otg_cil_callbacks *pcd_cb; ++ ++ /** Device mode Periodic Tx FIFO Mask */ ++ uint32_t p_tx_msk; ++ /** Device mode Periodic Tx FIFO Mask */ ++ uint32_t tx_msk; ++ ++ /** Workqueue object used for handling several interrupts */ ++ struct workqueue_struct *wq_otg; ++ ++ /** Work object used for handling "Connector ID Status Change" Interrupt */ ++ struct work_struct w_conn_id; ++ ++ /** Work object used for handling "Wakeup Detected" Interrupt */ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) ++ struct work_struct w_wkp; ++#else ++ struct delayed_work w_wkp; ++#endif ++ ++#ifdef DEBUG ++ uint32_t start_hcchar_val[MAX_EPS_CHANNELS]; ++ ++ hc_xfer_info_t hc_xfer_info[MAX_EPS_CHANNELS]; ++ struct timer_list hc_xfer_timer[MAX_EPS_CHANNELS]; ++ ++ uint32_t hfnum_7_samples; ++ uint64_t hfnum_7_frrem_accum; ++ uint32_t hfnum_0_samples; ++ uint64_t hfnum_0_frrem_accum; ++ uint32_t hfnum_other_samples; ++ uint64_t hfnum_other_frrem_accum; ++#endif ++ ++ ++} dwc_otg_core_if_t; ++ ++/*We must clear S3C24XX_EINTPEND external interrupt register ++ * because after clearing in this register trigerred IRQ from ++ * H/W core in kernel interrupt can be occured again before OTG ++ * handlers clear all IRQ sources of Core registers because of ++ * timing latencies and Low Level IRQ Type. ++ */ ++ ++#ifdef CONFIG_MACH_IPMATE ++#define S3C2410X_CLEAR_EINTPEND() \ ++do { \ ++ if (!dwc_otg_read_core_intr(core_if)) { \ ++ __raw_writel(1UL << 11,S3C24XX_EINTPEND); \ ++ } \ ++} while (0) ++#else ++#define S3C2410X_CLEAR_EINTPEND() do { } while (0) ++#endif ++ ++/* ++ * The following functions are functions for works ++ * using during handling some interrupts ++ */ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) ++ ++extern void w_conn_id_status_change(void *p); ++extern void w_wakeup_detected(void *p); ++ ++#else ++ ++extern void w_conn_id_status_change(struct work_struct *p); ++extern void w_wakeup_detected(struct work_struct *p); ++ ++#endif ++ ++ ++/* ++ * The following functions support initialization of the CIL driver component ++ * and the DWC_otg controller. ++ */ ++extern dwc_otg_core_if_t *dwc_otg_cil_init(const uint32_t *_reg_base_addr, ++ dwc_otg_core_params_t *_core_params); ++extern void dwc_otg_cil_remove(dwc_otg_core_if_t *_core_if); ++extern void dwc_otg_core_init(dwc_otg_core_if_t *_core_if); ++extern void dwc_otg_core_host_init(dwc_otg_core_if_t *_core_if); ++extern void dwc_otg_core_dev_init(dwc_otg_core_if_t *_core_if); ++extern void dwc_otg_enable_global_interrupts( dwc_otg_core_if_t *_core_if ); ++extern void dwc_otg_disable_global_interrupts( dwc_otg_core_if_t *_core_if ); ++ ++/** @name Device CIL Functions ++ * The following functions support managing the DWC_otg controller in device ++ * mode. ++ */ ++/**@{*/ ++extern void dwc_otg_wakeup(dwc_otg_core_if_t *_core_if); ++extern void dwc_otg_read_setup_packet (dwc_otg_core_if_t *_core_if, uint32_t *_dest); ++extern uint32_t dwc_otg_get_frame_number(dwc_otg_core_if_t *_core_if); ++extern void dwc_otg_ep0_activate(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep); ++extern void dwc_otg_ep_activate(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep); ++extern void dwc_otg_ep_deactivate(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep); ++extern void dwc_otg_ep_start_transfer(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep); ++extern void dwc_otg_ep_start_zl_transfer(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep); ++extern void dwc_otg_ep0_start_transfer(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep); ++extern void dwc_otg_ep0_continue_transfer(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep); ++extern void dwc_otg_ep_write_packet(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep, int _dma); ++extern void dwc_otg_ep_set_stall(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep); ++extern void dwc_otg_ep_clear_stall(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep); ++extern void dwc_otg_enable_device_interrupts(dwc_otg_core_if_t *_core_if); ++extern void dwc_otg_dump_dev_registers(dwc_otg_core_if_t *_core_if); ++extern void dwc_otg_dump_spram(dwc_otg_core_if_t *_core_if); ++#ifdef DWC_EN_ISOC ++extern void dwc_otg_iso_ep_start_frm_transfer(dwc_otg_core_if_t *core_if, dwc_ep_t *ep); ++extern void dwc_otg_iso_ep_start_buf_transfer(dwc_otg_core_if_t *core_if, dwc_ep_t *ep); ++#endif //DWC_EN_ISOC ++/**@}*/ ++ ++/** @name Host CIL Functions ++ * The following functions support managing the DWC_otg controller in host ++ * mode. ++ */ ++/**@{*/ ++extern void dwc_otg_hc_init(dwc_otg_core_if_t *_core_if, dwc_hc_t *_hc); ++extern void dwc_otg_hc_halt(dwc_otg_core_if_t *_core_if, ++ dwc_hc_t *_hc, ++ dwc_otg_halt_status_e _halt_status); ++extern void dwc_otg_hc_cleanup(dwc_otg_core_if_t *_core_if, dwc_hc_t *_hc); ++extern void dwc_otg_hc_start_transfer(dwc_otg_core_if_t *_core_if, dwc_hc_t *_hc); ++extern int dwc_otg_hc_continue_transfer(dwc_otg_core_if_t *_core_if, dwc_hc_t *_hc); ++extern void dwc_otg_hc_do_ping(dwc_otg_core_if_t *_core_if, dwc_hc_t *_hc); ++extern void dwc_otg_hc_write_packet(dwc_otg_core_if_t *_core_if, dwc_hc_t *_hc); ++extern void dwc_otg_enable_host_interrupts(dwc_otg_core_if_t *_core_if); ++extern void dwc_otg_disable_host_interrupts(dwc_otg_core_if_t *_core_if); ++ ++/** ++ * This function Reads HPRT0 in preparation to modify. It keeps the ++ * WC bits 0 so that if they are read as 1, they won't clear when you ++ * write it back ++ */ ++static inline uint32_t dwc_otg_read_hprt0(dwc_otg_core_if_t *_core_if) ++{ ++ hprt0_data_t hprt0; ++ hprt0.d32 = dwc_read_reg32(_core_if->host_if->hprt0); ++ hprt0.b.prtena = 0; ++ hprt0.b.prtconndet = 0; ++ hprt0.b.prtenchng = 0; ++ hprt0.b.prtovrcurrchng = 0; ++ return hprt0.d32; ++} ++ ++extern void dwc_otg_dump_host_registers(dwc_otg_core_if_t *_core_if); ++/**@}*/ ++ ++/** @name Common CIL Functions ++ * The following functions support managing the DWC_otg controller in either ++ * device or host mode. ++ */ ++/**@{*/ ++ ++extern void dwc_otg_read_packet(dwc_otg_core_if_t *core_if, ++ uint8_t *dest, ++ uint16_t bytes); ++ ++extern void dwc_otg_dump_global_registers(dwc_otg_core_if_t *_core_if); ++ ++extern void dwc_otg_flush_tx_fifo( dwc_otg_core_if_t *_core_if, ++ const int _num ); ++extern void dwc_otg_flush_rx_fifo( dwc_otg_core_if_t *_core_if ); ++extern void dwc_otg_core_reset( dwc_otg_core_if_t *_core_if ); ++ ++extern dwc_otg_dma_desc_t* dwc_otg_ep_alloc_desc_chain(uint32_t * dma_desc_addr, uint32_t count); ++extern void dwc_otg_ep_free_desc_chain(dwc_otg_dma_desc_t* desc_addr, uint32_t dma_desc_addr, uint32_t count); ++ ++/** ++ * This function returns the Core Interrupt register. ++ */ ++static inline uint32_t dwc_otg_read_core_intr(dwc_otg_core_if_t *_core_if) ++{ ++ return (dwc_read_reg32(&_core_if->core_global_regs->gintsts) & ++ dwc_read_reg32(&_core_if->core_global_regs->gintmsk)); ++} ++ ++/** ++ * This function returns the OTG Interrupt register. ++ */ ++static inline uint32_t dwc_otg_read_otg_intr (dwc_otg_core_if_t *_core_if) ++{ ++ return (dwc_read_reg32 (&_core_if->core_global_regs->gotgint)); ++} ++ ++/** ++ * This function reads the Device All Endpoints Interrupt register and ++ * returns the IN endpoint interrupt bits. ++ */ ++static inline uint32_t dwc_otg_read_dev_all_in_ep_intr(dwc_otg_core_if_t *core_if) ++{ ++ uint32_t v; ++ ++ if(core_if->multiproc_int_enable) { ++ v = dwc_read_reg32(&core_if->dev_if->dev_global_regs->deachint) & ++ dwc_read_reg32(&core_if->dev_if->dev_global_regs->deachintmsk); ++ } else { ++ v = dwc_read_reg32(&core_if->dev_if->dev_global_regs->daint) & ++ dwc_read_reg32(&core_if->dev_if->dev_global_regs->daintmsk); ++ } ++ return (v & 0xffff); ++ ++} ++ ++/** ++ * This function reads the Device All Endpoints Interrupt register and ++ * returns the OUT endpoint interrupt bits. ++ */ ++static inline uint32_t dwc_otg_read_dev_all_out_ep_intr(dwc_otg_core_if_t *core_if) ++{ ++ uint32_t v; ++ ++ if(core_if->multiproc_int_enable) { ++ v = dwc_read_reg32(&core_if->dev_if->dev_global_regs->deachint) & ++ dwc_read_reg32(&core_if->dev_if->dev_global_regs->deachintmsk); ++ } else { ++ v = dwc_read_reg32(&core_if->dev_if->dev_global_regs->daint) & ++ dwc_read_reg32(&core_if->dev_if->dev_global_regs->daintmsk); ++ } ++ ++ return ((v & 0xffff0000) >> 16); ++} ++ ++/** ++ * This function returns the Device IN EP Interrupt register ++ */ ++static inline uint32_t dwc_otg_read_dev_in_ep_intr(dwc_otg_core_if_t *core_if, ++ dwc_ep_t *ep) ++{ ++ dwc_otg_dev_if_t *dev_if = core_if->dev_if; ++ uint32_t v, msk, emp; ++ ++ if(core_if->multiproc_int_enable) { ++ msk = dwc_read_reg32(&dev_if->dev_global_regs->diepeachintmsk[ep->num]); ++ emp = dwc_read_reg32(&dev_if->dev_global_regs->dtknqr4_fifoemptymsk); ++ msk |= ((emp >> ep->num) & 0x1) << 7; ++ v = dwc_read_reg32(&dev_if->in_ep_regs[ep->num]->diepint) & msk; ++ } else { ++ msk = dwc_read_reg32(&dev_if->dev_global_regs->diepmsk); ++ emp = dwc_read_reg32(&dev_if->dev_global_regs->dtknqr4_fifoemptymsk); ++ msk |= ((emp >> ep->num) & 0x1) << 7; ++ v = dwc_read_reg32(&dev_if->in_ep_regs[ep->num]->diepint) & msk; ++ } ++ ++ ++ return v; ++} ++/** ++ * This function returns the Device OUT EP Interrupt register ++ */ ++static inline uint32_t dwc_otg_read_dev_out_ep_intr(dwc_otg_core_if_t *_core_if, ++ dwc_ep_t *_ep) ++{ ++ dwc_otg_dev_if_t *dev_if = _core_if->dev_if; ++ uint32_t v; ++ doepmsk_data_t msk = { .d32 = 0 }; ++ ++ if(_core_if->multiproc_int_enable) { ++ msk.d32 = dwc_read_reg32(&dev_if->dev_global_regs->doepeachintmsk[_ep->num]); ++ if(_core_if->pti_enh_enable) { ++ msk.b.pktdrpsts = 1; ++ } ++ v = dwc_read_reg32( &dev_if->out_ep_regs[_ep->num]->doepint) & msk.d32; ++ } else { ++ msk.d32 = dwc_read_reg32(&dev_if->dev_global_regs->doepmsk); ++ if(_core_if->pti_enh_enable) { ++ msk.b.pktdrpsts = 1; ++ } ++ v = dwc_read_reg32( &dev_if->out_ep_regs[_ep->num]->doepint) & msk.d32; ++ } ++ return v; ++} ++ ++/** ++ * This function returns the Host All Channel Interrupt register ++ */ ++static inline uint32_t dwc_otg_read_host_all_channels_intr (dwc_otg_core_if_t *_core_if) ++{ ++ return (dwc_read_reg32 (&_core_if->host_if->host_global_regs->haint)); ++} ++ ++static inline uint32_t dwc_otg_read_host_channel_intr (dwc_otg_core_if_t *_core_if, dwc_hc_t *_hc) ++{ ++ return (dwc_read_reg32 (&_core_if->host_if->hc_regs[_hc->hc_num]->hcint)); ++} ++ ++ ++/** ++ * This function returns the mode of the operation, host or device. ++ * ++ * @return 0 - Device Mode, 1 - Host Mode ++ */ ++static inline uint32_t dwc_otg_mode(dwc_otg_core_if_t *_core_if) ++{ ++ return (dwc_read_reg32( &_core_if->core_global_regs->gintsts ) & 0x1); ++} ++ ++static inline uint8_t dwc_otg_is_device_mode(dwc_otg_core_if_t *_core_if) ++{ ++ return (dwc_otg_mode(_core_if) != DWC_HOST_MODE); ++} ++static inline uint8_t dwc_otg_is_host_mode(dwc_otg_core_if_t *_core_if) ++{ ++ return (dwc_otg_mode(_core_if) == DWC_HOST_MODE); ++} ++ ++extern int32_t dwc_otg_handle_common_intr( dwc_otg_core_if_t *_core_if ); ++ ++ ++/**@}*/ ++ ++/** ++ * DWC_otg CIL callback structure. This structure allows the HCD and ++ * PCD to register functions used for starting and stopping the PCD ++ * and HCD for role change on for a DRD. ++ */ ++typedef struct dwc_otg_cil_callbacks ++{ ++ /** Start function for role change */ ++ int (*start) (void *_p); ++ /** Stop Function for role change */ ++ int (*stop) (void *_p); ++ /** Disconnect Function for role change */ ++ int (*disconnect) (void *_p); ++ /** Resume/Remote wakeup Function */ ++ int (*resume_wakeup) (void *_p); ++ /** Suspend function */ ++ int (*suspend) (void *_p); ++ /** Session Start (SRP) */ ++ int (*session_start) (void *_p); ++ /** Pointer passed to start() and stop() */ ++ void *p; ++} dwc_otg_cil_callbacks_t; ++ ++extern void dwc_otg_cil_register_pcd_callbacks( dwc_otg_core_if_t *_core_if, ++ dwc_otg_cil_callbacks_t *_cb, ++ void *_p); ++extern void dwc_otg_cil_register_hcd_callbacks( dwc_otg_core_if_t *_core_if, ++ dwc_otg_cil_callbacks_t *_cb, ++ void *_p); ++ ++#endif ++ +--- /dev/null ++++ b/drivers/usb/dwc_otg/dwc_otg_cil_intr.c +@@ -0,0 +1,750 @@ ++/* ========================================================================== ++ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_cil_intr.c $ ++ * $Revision: 1.2 $ ++ * $Date: 2008-11-21 05:39:15 $ ++ * $Change: 1065567 $ ++ * ++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, ++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless ++ * otherwise expressly agreed to in writing between Synopsys and you. ++ * ++ * The Software IS NOT an item of Licensed Software or Licensed Product under ++ * any End User Software License Agreement or Agreement for Licensed Product ++ * with Synopsys or any supplement thereto. You are permitted to use and ++ * redistribute this Software in source and binary forms, with or without ++ * modification, provided that redistributions of source code must retain this ++ * notice. You may not view, use, disclose, copy or distribute this file or ++ * any information contained herein except pursuant to this license grant from ++ * Synopsys. If you do not agree with this notice, including the disclaimer ++ * below, then you are not authorized to use the Software. ++ * ++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, ++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR ++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER ++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT ++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY ++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH ++ * DAMAGE. ++ * ========================================================================== */ ++ ++/** @file ++ * ++ * The Core Interface Layer provides basic services for accessing and ++ * managing the DWC_otg hardware. These services are used by both the ++ * Host Controller Driver and the Peripheral Controller Driver. ++ * ++ * This file contains the Common Interrupt handlers. ++ */ ++#include "linux/dwc_otg_plat.h" ++#include "dwc_otg_regs.h" ++#include "dwc_otg_cil.h" ++ ++#ifdef DEBUG ++inline const char *op_state_str(dwc_otg_core_if_t *core_if) ++{ ++ return (core_if->op_state==A_HOST?"a_host": ++ (core_if->op_state==A_SUSPEND?"a_suspend": ++ (core_if->op_state==A_PERIPHERAL?"a_peripheral": ++ (core_if->op_state==B_PERIPHERAL?"b_peripheral": ++ (core_if->op_state==B_HOST?"b_host": ++ "unknown"))))); ++} ++#endif ++ ++/** This function will log a debug message ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ */ ++int32_t dwc_otg_handle_mode_mismatch_intr (dwc_otg_core_if_t *core_if) ++{ ++ gintsts_data_t gintsts; ++ DWC_WARN("Mode Mismatch Interrupt: currently in %s mode\n", ++ dwc_otg_mode(core_if) ? "Host" : "Device"); ++ ++ /* Clear interrupt */ ++ gintsts.d32 = 0; ++ gintsts.b.modemismatch = 1; ++ dwc_write_reg32 (&core_if->core_global_regs->gintsts, gintsts.d32); ++ return 1; ++} ++ ++/** Start the HCD. Helper function for using the HCD callbacks. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ */ ++static inline void hcd_start(dwc_otg_core_if_t *core_if) ++{ ++ if (core_if->hcd_cb && core_if->hcd_cb->start) { ++ core_if->hcd_cb->start(core_if->hcd_cb->p); ++ } ++} ++/** Stop the HCD. Helper function for using the HCD callbacks. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ */ ++static inline void hcd_stop(dwc_otg_core_if_t *core_if) ++{ ++ if (core_if->hcd_cb && core_if->hcd_cb->stop) { ++ core_if->hcd_cb->stop(core_if->hcd_cb->p); ++ } ++} ++/** Disconnect the HCD. Helper function for using the HCD callbacks. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ */ ++static inline void hcd_disconnect(dwc_otg_core_if_t *core_if) ++{ ++ if (core_if->hcd_cb && core_if->hcd_cb->disconnect) { ++ core_if->hcd_cb->disconnect(core_if->hcd_cb->p); ++ } ++} ++/** Inform the HCD the a New Session has begun. Helper function for ++ * using the HCD callbacks. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ */ ++static inline void hcd_session_start(dwc_otg_core_if_t *core_if) ++{ ++ if (core_if->hcd_cb && core_if->hcd_cb->session_start) { ++ core_if->hcd_cb->session_start(core_if->hcd_cb->p); ++ } ++} ++ ++/** Start the PCD. Helper function for using the PCD callbacks. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ */ ++static inline void pcd_start(dwc_otg_core_if_t *core_if) ++{ ++ if (core_if->pcd_cb && core_if->pcd_cb->start) { ++ core_if->pcd_cb->start(core_if->pcd_cb->p); ++ } ++} ++/** Stop the PCD. Helper function for using the PCD callbacks. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ */ ++static inline void pcd_stop(dwc_otg_core_if_t *core_if) ++{ ++ if (core_if->pcd_cb && core_if->pcd_cb->stop) { ++ core_if->pcd_cb->stop(core_if->pcd_cb->p); ++ } ++} ++/** Suspend the PCD. Helper function for using the PCD callbacks. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ */ ++static inline void pcd_suspend(dwc_otg_core_if_t *core_if) ++{ ++ if (core_if->pcd_cb && core_if->pcd_cb->suspend) { ++ core_if->pcd_cb->suspend(core_if->pcd_cb->p); ++ } ++} ++/** Resume the PCD. Helper function for using the PCD callbacks. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ */ ++static inline void pcd_resume(dwc_otg_core_if_t *core_if) ++{ ++ if (core_if->pcd_cb && core_if->pcd_cb->resume_wakeup) { ++ core_if->pcd_cb->resume_wakeup(core_if->pcd_cb->p); ++ } ++} ++ ++/** ++ * This function handles the OTG Interrupts. It reads the OTG ++ * Interrupt Register (GOTGINT) to determine what interrupt has ++ * occurred. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ */ ++int32_t dwc_otg_handle_otg_intr(dwc_otg_core_if_t *core_if) ++{ ++ dwc_otg_core_global_regs_t *global_regs = ++ core_if->core_global_regs; ++ gotgint_data_t gotgint; ++ gotgctl_data_t gotgctl; ++ gintmsk_data_t gintmsk; ++ ++ gotgint.d32 = dwc_read_reg32(&global_regs->gotgint); ++ gotgctl.d32 = dwc_read_reg32(&global_regs->gotgctl); ++ DWC_DEBUGPL(DBG_CIL, "gotgctl=%08x\n", gotgctl.d32); ++ ++ if (gotgint.b.sesenddet) { ++ DWC_DEBUGPL(DBG_ANY, "OTG Interrupt: " ++ "Session End Detected++ (%s)\n", ++ op_state_str(core_if)); ++ gotgctl.d32 = dwc_read_reg32(&global_regs->gotgctl); ++ ++ if (core_if->op_state == B_HOST) { ++ pcd_start(core_if); ++ core_if->op_state = B_PERIPHERAL; ++ } else { ++ /* If not B_HOST and Device HNP still set. HNP ++ * Did not succeed!*/ ++ if (gotgctl.b.devhnpen) { ++ DWC_DEBUGPL(DBG_ANY, "Session End Detected\n"); ++ DWC_ERROR("Device Not Connected/Responding!\n"); ++ } ++ ++ /* If Session End Detected the B-Cable has ++ * been disconnected. */ ++ /* Reset PCD and Gadget driver to a ++ * clean state. */ ++ pcd_stop(core_if); ++ } ++ gotgctl.d32 = 0; ++ gotgctl.b.devhnpen = 1; ++ dwc_modify_reg32(&global_regs->gotgctl, ++ gotgctl.d32, 0); ++ } ++ if (gotgint.b.sesreqsucstschng) { ++ DWC_DEBUGPL(DBG_ANY, " OTG Interrupt: " ++ "Session Reqeust Success Status Change++\n"); ++ gotgctl.d32 = dwc_read_reg32(&global_regs->gotgctl); ++ if (gotgctl.b.sesreqscs) { ++ if ((core_if->core_params->phy_type == DWC_PHY_TYPE_PARAM_FS) && ++ (core_if->core_params->i2c_enable)) { ++ core_if->srp_success = 1; ++ } ++ else { ++ pcd_resume(core_if); ++ /* Clear Session Request */ ++ gotgctl.d32 = 0; ++ gotgctl.b.sesreq = 1; ++ dwc_modify_reg32(&global_regs->gotgctl, ++ gotgctl.d32, 0); ++ } ++ } ++ } ++ if (gotgint.b.hstnegsucstschng) { ++ /* Print statements during the HNP interrupt handling ++ * can cause it to fail.*/ ++ gotgctl.d32 = dwc_read_reg32(&global_regs->gotgctl); ++ if (gotgctl.b.hstnegscs) { ++ if (dwc_otg_is_host_mode(core_if)) { ++ core_if->op_state = B_HOST; ++ /* ++ * Need to disable SOF interrupt immediately. ++ * When switching from device to host, the PCD ++ * interrupt handler won't handle the ++ * interrupt if host mode is already set. The ++ * HCD interrupt handler won't get called if ++ * the HCD state is HALT. This means that the ++ * interrupt does not get handled and Linux ++ * complains loudly. ++ */ ++ gintmsk.d32 = 0; ++ gintmsk.b.sofintr = 1; ++ dwc_modify_reg32(&global_regs->gintmsk, ++ gintmsk.d32, 0); ++ pcd_stop(core_if); ++ /* ++ * Initialize the Core for Host mode. ++ */ ++ hcd_start(core_if); ++ core_if->op_state = B_HOST; ++ } ++ } else { ++ gotgctl.d32 = 0; ++ gotgctl.b.hnpreq = 1; ++ gotgctl.b.devhnpen = 1; ++ dwc_modify_reg32(&global_regs->gotgctl, ++ gotgctl.d32, 0); ++ DWC_DEBUGPL(DBG_ANY, "HNP Failed\n"); ++ DWC_ERROR("Device Not Connected/Responding\n"); ++ } ++ } ++ if (gotgint.b.hstnegdet) { ++ /* The disconnect interrupt is set at the same time as ++ * Host Negotiation Detected. During the mode ++ * switch all interrupts are cleared so the disconnect ++ * interrupt handler will not get executed. ++ */ ++ DWC_DEBUGPL(DBG_ANY, " ++OTG Interrupt: " ++ "Host Negotiation Detected++ (%s)\n", ++ (dwc_otg_is_host_mode(core_if)?"Host":"Device")); ++ if (dwc_otg_is_device_mode(core_if)){ ++ DWC_DEBUGPL(DBG_ANY, "a_suspend->a_peripheral (%d)\n", core_if->op_state); ++ hcd_disconnect(core_if); ++ pcd_start(core_if); ++ core_if->op_state = A_PERIPHERAL; ++ } else { ++ /* ++ * Need to disable SOF interrupt immediately. When ++ * switching from device to host, the PCD interrupt ++ * handler won't handle the interrupt if host mode is ++ * already set. The HCD interrupt handler won't get ++ * called if the HCD state is HALT. This means that ++ * the interrupt does not get handled and Linux ++ * complains loudly. ++ */ ++ gintmsk.d32 = 0; ++ gintmsk.b.sofintr = 1; ++ dwc_modify_reg32(&global_regs->gintmsk, ++ gintmsk.d32, 0); ++ pcd_stop(core_if); ++ hcd_start(core_if); ++ core_if->op_state = A_HOST; ++ } ++ } ++ if (gotgint.b.adevtoutchng) { ++ DWC_DEBUGPL(DBG_ANY, " ++OTG Interrupt: " ++ "A-Device Timeout Change++\n"); ++ } ++ if (gotgint.b.debdone) { ++ DWC_DEBUGPL(DBG_ANY, " ++OTG Interrupt: " ++ "Debounce Done++\n"); ++ } ++ ++ /* Clear GOTGINT */ ++ dwc_write_reg32 (&core_if->core_global_regs->gotgint, gotgint.d32); ++ ++ return 1; ++} ++ ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) ++ ++void w_conn_id_status_change(void *p) ++{ ++ dwc_otg_core_if_t *core_if = p; ++ ++#else ++ ++void w_conn_id_status_change(struct work_struct *p) ++{ ++ dwc_otg_core_if_t *core_if = container_of(p, dwc_otg_core_if_t, w_conn_id); ++ ++#endif ++ ++ ++ uint32_t count = 0; ++ gotgctl_data_t gotgctl = { .d32 = 0 }; ++ ++ gotgctl.d32 = dwc_read_reg32(&core_if->core_global_regs->gotgctl); ++ DWC_DEBUGPL(DBG_CIL, "gotgctl=%0x\n", gotgctl.d32); ++ DWC_DEBUGPL(DBG_CIL, "gotgctl.b.conidsts=%d\n", gotgctl.b.conidsts); ++ ++ /* B-Device connector (Device Mode) */ ++ if (gotgctl.b.conidsts) { ++ /* Wait for switch to device mode. */ ++ while (!dwc_otg_is_device_mode(core_if)){ ++ DWC_PRINT("Waiting for Peripheral Mode, Mode=%s\n", ++ (dwc_otg_is_host_mode(core_if)?"Host":"Peripheral")); ++ MDELAY(100); ++ if (++count > 10000) *(uint32_t*)NULL=0; ++ } ++ core_if->op_state = B_PERIPHERAL; ++ dwc_otg_core_init(core_if); ++ dwc_otg_enable_global_interrupts(core_if); ++ pcd_start(core_if); ++ } else { ++ /* A-Device connector (Host Mode) */ ++ while (!dwc_otg_is_host_mode(core_if)) { ++ DWC_PRINT("Waiting for Host Mode, Mode=%s\n", ++ (dwc_otg_is_host_mode(core_if)?"Host":"Peripheral")); ++ MDELAY(100); ++ if (++count > 10000) *(uint32_t*)NULL=0; ++ } ++ core_if->op_state = A_HOST; ++ /* ++ * Initialize the Core for Host mode. ++ */ ++ dwc_otg_core_init(core_if); ++ dwc_otg_enable_global_interrupts(core_if); ++ hcd_start(core_if); ++ } ++} ++ ++ ++/** ++ * This function handles the Connector ID Status Change Interrupt. It ++ * reads the OTG Interrupt Register (GOTCTL) to determine whether this ++ * is a Device to Host Mode transition or a Host Mode to Device ++ * Transition. ++ * ++ * This only occurs when the cable is connected/removed from the PHY ++ * connector. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ */ ++int32_t dwc_otg_handle_conn_id_status_change_intr(dwc_otg_core_if_t *core_if) ++{ ++ ++ /* ++ * Need to disable SOF interrupt immediately. If switching from device ++ * to host, the PCD interrupt handler won't handle the interrupt if ++ * host mode is already set. The HCD interrupt handler won't get ++ * called if the HCD state is HALT. This means that the interrupt does ++ * not get handled and Linux complains loudly. ++ */ ++ gintmsk_data_t gintmsk = { .d32 = 0 }; ++ gintsts_data_t gintsts = { .d32 = 0 }; ++ ++ gintmsk.b.sofintr = 1; ++ dwc_modify_reg32(&core_if->core_global_regs->gintmsk, gintmsk.d32, 0); ++ ++ DWC_DEBUGPL(DBG_CIL, " ++Connector ID Status Change Interrupt++ (%s)\n", ++ (dwc_otg_is_host_mode(core_if)?"Host":"Device")); ++ ++ /* ++ * Need to schedule a work, as there are possible DELAY function calls ++ */ ++ queue_work(core_if->wq_otg, &core_if->w_conn_id); ++ ++ /* Set flag and clear interrupt */ ++ gintsts.b.conidstschng = 1; ++ dwc_write_reg32 (&core_if->core_global_regs->gintsts, gintsts.d32); ++ ++ return 1; ++} ++ ++/** ++ * This interrupt indicates that a device is initiating the Session ++ * Request Protocol to request the host to turn on bus power so a new ++ * session can begin. The handler responds by turning on bus power. If ++ * the DWC_otg controller is in low power mode, the handler brings the ++ * controller out of low power mode before turning on bus power. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ */ ++int32_t dwc_otg_handle_session_req_intr(dwc_otg_core_if_t *core_if) ++{ ++ gintsts_data_t gintsts; ++ ++#ifndef DWC_HOST_ONLY ++ hprt0_data_t hprt0; ++ DWC_DEBUGPL(DBG_ANY, "++Session Request Interrupt++\n"); ++ ++ if (dwc_otg_is_device_mode(core_if)) { ++ DWC_PRINT("SRP: Device mode\n"); ++ } else { ++ DWC_PRINT("SRP: Host mode\n"); ++ ++ /* Turn on the port power bit. */ ++ hprt0.d32 = dwc_otg_read_hprt0(core_if); ++ hprt0.b.prtpwr = 1; ++ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); ++ ++ /* Start the Connection timer. So a message can be displayed ++ * if connect does not occur within 10 seconds. */ ++ hcd_session_start(core_if); ++ } ++#endif ++ ++ /* Clear interrupt */ ++ gintsts.d32 = 0; ++ gintsts.b.sessreqintr = 1; ++ dwc_write_reg32 (&core_if->core_global_regs->gintsts, gintsts.d32); ++ ++ return 1; ++} ++ ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) ++void w_wakeup_detected(void *p) ++{ ++ dwc_otg_core_if_t* core_if = p; ++ ++#else ++ ++void w_wakeup_detected(struct work_struct *p) ++{ ++ struct delayed_work *dw = container_of(p, struct delayed_work, work); ++ dwc_otg_core_if_t *core_if = container_of(dw, dwc_otg_core_if_t, w_wkp); ++ ++#endif ++ /* ++ * Clear the Resume after 70ms. (Need 20 ms minimum. Use 70 ms ++ * so that OPT tests pass with all PHYs). ++ */ ++ hprt0_data_t hprt0 = {.d32=0}; ++#if 0 ++ pcgcctl_data_t pcgcctl = {.d32=0}; ++ /* Restart the Phy Clock */ ++ pcgcctl.b.stoppclk = 1; ++ dwc_modify_reg32(core_if->pcgcctl, pcgcctl.d32, 0); ++ UDELAY(10); ++#endif //0 ++ hprt0.d32 = dwc_otg_read_hprt0(core_if); ++ DWC_DEBUGPL(DBG_ANY,"Resume: HPRT0=%0x\n", hprt0.d32); ++// MDELAY(70); ++ hprt0.b.prtres = 0; /* Resume */ ++ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); ++ DWC_DEBUGPL(DBG_ANY,"Clear Resume: HPRT0=%0x\n", dwc_read_reg32(core_if->host_if->hprt0)); ++} ++/** ++ * This interrupt indicates that the DWC_otg controller has detected a ++ * resume or remote wakeup sequence. If the DWC_otg controller is in ++ * low power mode, the handler must brings the controller out of low ++ * power mode. The controller automatically begins resume ++ * signaling. The handler schedules a time to stop resume signaling. ++ */ ++int32_t dwc_otg_handle_wakeup_detected_intr(dwc_otg_core_if_t *core_if) ++{ ++ gintsts_data_t gintsts; ++ ++ DWC_DEBUGPL(DBG_ANY, "++Resume and Remote Wakeup Detected Interrupt++\n"); ++ ++ if (dwc_otg_is_device_mode(core_if)) { ++ dctl_data_t dctl = {.d32=0}; ++ DWC_DEBUGPL(DBG_PCD, "DSTS=0x%0x\n", ++ dwc_read_reg32(&core_if->dev_if->dev_global_regs->dsts)); ++#ifdef PARTIAL_POWER_DOWN ++ if (core_if->hwcfg4.b.power_optimiz) { ++ pcgcctl_data_t power = {.d32=0}; ++ ++ power.d32 = dwc_read_reg32(core_if->pcgcctl); ++ DWC_DEBUGPL(DBG_CIL, "PCGCCTL=%0x\n", power.d32); ++ ++ power.b.stoppclk = 0; ++ dwc_write_reg32(core_if->pcgcctl, power.d32); ++ ++ power.b.pwrclmp = 0; ++ dwc_write_reg32(core_if->pcgcctl, power.d32); ++ ++ power.b.rstpdwnmodule = 0; ++ dwc_write_reg32(core_if->pcgcctl, power.d32); ++ } ++#endif ++ /* Clear the Remote Wakeup Signalling */ ++ dctl.b.rmtwkupsig = 1; ++ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->dctl, ++ dctl.d32, 0); ++ ++ if (core_if->pcd_cb && core_if->pcd_cb->resume_wakeup) { ++ core_if->pcd_cb->resume_wakeup(core_if->pcd_cb->p); ++ } ++ ++ } else { ++ pcgcctl_data_t pcgcctl = {.d32=0}; ++ ++ /* Restart the Phy Clock */ ++ pcgcctl.b.stoppclk = 1; ++ dwc_modify_reg32(core_if->pcgcctl, pcgcctl.d32, 0); ++ ++ queue_delayed_work(core_if->wq_otg, &core_if->w_wkp, ((70 * HZ / 1000) + 1)); ++ } ++ ++ /* Clear interrupt */ ++ gintsts.d32 = 0; ++ gintsts.b.wkupintr = 1; ++ dwc_write_reg32 (&core_if->core_global_regs->gintsts, gintsts.d32); ++ ++ return 1; ++} ++ ++/** ++ * This interrupt indicates that a device has been disconnected from ++ * the root port. ++ */ ++int32_t dwc_otg_handle_disconnect_intr(dwc_otg_core_if_t *core_if) ++{ ++ gintsts_data_t gintsts; ++ ++ DWC_DEBUGPL(DBG_ANY, "++Disconnect Detected Interrupt++ (%s) %s\n", ++ (dwc_otg_is_host_mode(core_if)?"Host":"Device"), ++ op_state_str(core_if)); ++ ++/** @todo Consolidate this if statement. */ ++#ifndef DWC_HOST_ONLY ++ if (core_if->op_state == B_HOST) { ++ /* If in device mode Disconnect and stop the HCD, then ++ * start the PCD. */ ++ hcd_disconnect(core_if); ++ pcd_start(core_if); ++ core_if->op_state = B_PERIPHERAL; ++ } else if (dwc_otg_is_device_mode(core_if)) { ++ gotgctl_data_t gotgctl = { .d32 = 0 }; ++ gotgctl.d32 = dwc_read_reg32(&core_if->core_global_regs->gotgctl); ++ if (gotgctl.b.hstsethnpen==1) { ++ /* Do nothing, if HNP in process the OTG ++ * interrupt "Host Negotiation Detected" ++ * interrupt will do the mode switch. ++ */ ++ } else if (gotgctl.b.devhnpen == 0) { ++ /* If in device mode Disconnect and stop the HCD, then ++ * start the PCD. */ ++ hcd_disconnect(core_if); ++ pcd_start(core_if); ++ core_if->op_state = B_PERIPHERAL; ++ } else { ++ DWC_DEBUGPL(DBG_ANY,"!a_peripheral && !devhnpen\n"); ++ } ++ } else { ++ if (core_if->op_state == A_HOST) { ++ /* A-Cable still connected but device disconnected. */ ++ hcd_disconnect(core_if); ++ } ++ } ++#endif ++ ++ gintsts.d32 = 0; ++ gintsts.b.disconnect = 1; ++ dwc_write_reg32 (&core_if->core_global_regs->gintsts, gintsts.d32); ++ return 1; ++} ++/** ++ * This interrupt indicates that SUSPEND state has been detected on ++ * the USB. ++ * ++ * For HNP the USB Suspend interrupt signals the change from ++ * "a_peripheral" to "a_host". ++ * ++ * When power management is enabled the core will be put in low power ++ * mode. ++ */ ++int32_t dwc_otg_handle_usb_suspend_intr(dwc_otg_core_if_t *core_if) ++{ ++ dsts_data_t dsts; ++ gintsts_data_t gintsts; ++ ++ DWC_DEBUGPL(DBG_ANY,"USB SUSPEND\n"); ++ ++ if (dwc_otg_is_device_mode(core_if)) { ++ /* Check the Device status register to determine if the Suspend ++ * state is active. */ ++ dsts.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dsts); ++ DWC_DEBUGPL(DBG_PCD, "DSTS=0x%0x\n", dsts.d32); ++ DWC_DEBUGPL(DBG_PCD, "DSTS.Suspend Status=%d " ++ "HWCFG4.power Optimize=%d\n", ++ dsts.b.suspsts, core_if->hwcfg4.b.power_optimiz); ++ ++ ++#ifdef PARTIAL_POWER_DOWN ++/** @todo Add a module parameter for power management. */ ++ ++ if (dsts.b.suspsts && core_if->hwcfg4.b.power_optimiz) { ++ pcgcctl_data_t power = {.d32=0}; ++ DWC_DEBUGPL(DBG_CIL, "suspend\n"); ++ ++ power.b.pwrclmp = 1; ++ dwc_write_reg32(core_if->pcgcctl, power.d32); ++ ++ power.b.rstpdwnmodule = 1; ++ dwc_modify_reg32(core_if->pcgcctl, 0, power.d32); ++ ++ power.b.stoppclk = 1; ++ dwc_modify_reg32(core_if->pcgcctl, 0, power.d32); ++ ++ } else { ++ DWC_DEBUGPL(DBG_ANY,"disconnect?\n"); ++ } ++#endif ++ /* PCD callback for suspend. */ ++ pcd_suspend(core_if); ++ } else { ++ if (core_if->op_state == A_PERIPHERAL) { ++ DWC_DEBUGPL(DBG_ANY,"a_peripheral->a_host\n"); ++ /* Clear the a_peripheral flag, back to a_host. */ ++ pcd_stop(core_if); ++ hcd_start(core_if); ++ core_if->op_state = A_HOST; ++ } ++ } ++ ++ /* Clear interrupt */ ++ gintsts.d32 = 0; ++ gintsts.b.usbsuspend = 1; ++ dwc_write_reg32(&core_if->core_global_regs->gintsts, gintsts.d32); ++ ++ return 1; ++} ++ ++ ++/** ++ * This function returns the Core Interrupt register. ++ */ ++static inline uint32_t dwc_otg_read_common_intr(dwc_otg_core_if_t *core_if) ++{ ++ gintsts_data_t gintsts; ++ gintmsk_data_t gintmsk; ++ gintmsk_data_t gintmsk_common = {.d32=0}; ++ gintmsk_common.b.wkupintr = 1; ++ gintmsk_common.b.sessreqintr = 1; ++ gintmsk_common.b.conidstschng = 1; ++ gintmsk_common.b.otgintr = 1; ++ gintmsk_common.b.modemismatch = 1; ++ gintmsk_common.b.disconnect = 1; ++ gintmsk_common.b.usbsuspend = 1; ++ /** @todo: The port interrupt occurs while in device ++ * mode. Added code to CIL to clear the interrupt for now! ++ */ ++ gintmsk_common.b.portintr = 1; ++ ++ gintsts.d32 = dwc_read_reg32(&core_if->core_global_regs->gintsts); ++ gintmsk.d32 = dwc_read_reg32(&core_if->core_global_regs->gintmsk); ++#ifdef DEBUG ++ /* if any common interrupts set */ ++ if (gintsts.d32 & gintmsk_common.d32) { ++ DWC_DEBUGPL(DBG_ANY, "gintsts=%08x gintmsk=%08x\n", ++ gintsts.d32, gintmsk.d32); ++ } ++#endif ++ ++ return ((gintsts.d32 & gintmsk.d32) & gintmsk_common.d32); ++ ++} ++ ++/** ++ * Common interrupt handler. ++ * ++ * The common interrupts are those that occur in both Host and Device mode. ++ * This handler handles the following interrupts: ++ * - Mode Mismatch Interrupt ++ * - Disconnect Interrupt ++ * - OTG Interrupt ++ * - Connector ID Status Change Interrupt ++ * - Session Request Interrupt. ++ * - Resume / Remote Wakeup Detected Interrupt. ++ * ++ */ ++int32_t dwc_otg_handle_common_intr(dwc_otg_core_if_t *core_if) ++{ ++ int retval = 0; ++ gintsts_data_t gintsts; ++ ++ gintsts.d32 = dwc_otg_read_common_intr(core_if); ++ ++ if (gintsts.b.modemismatch) { ++ retval |= dwc_otg_handle_mode_mismatch_intr(core_if); ++ } ++ if (gintsts.b.otgintr) { ++ retval |= dwc_otg_handle_otg_intr(core_if); ++ } ++ if (gintsts.b.conidstschng) { ++ retval |= dwc_otg_handle_conn_id_status_change_intr(core_if); ++ } ++ if (gintsts.b.disconnect) { ++ retval |= dwc_otg_handle_disconnect_intr(core_if); ++ } ++ if (gintsts.b.sessreqintr) { ++ retval |= dwc_otg_handle_session_req_intr(core_if); ++ } ++ if (gintsts.b.wkupintr) { ++ retval |= dwc_otg_handle_wakeup_detected_intr(core_if); ++ } ++ if (gintsts.b.usbsuspend) { ++ retval |= dwc_otg_handle_usb_suspend_intr(core_if); ++ } ++ if (gintsts.b.portintr && dwc_otg_is_device_mode(core_if)) { ++ /* The port interrupt occurs while in device mode with HPRT0 ++ * Port Enable/Disable. ++ */ ++ gintsts.d32 = 0; ++ gintsts.b.portintr = 1; ++ dwc_write_reg32(&core_if->core_global_regs->gintsts, ++ gintsts.d32); ++ retval |= 1; ++ ++ } ++ ++ S3C2410X_CLEAR_EINTPEND(); ++ ++ return retval; ++} +--- /dev/null ++++ b/drivers/usb/dwc_otg/dwc_otg_driver.c +@@ -0,0 +1,1273 @@ ++/* ========================================================================== ++ * $File: //dwh/usb_iip/dev/software/otg_ipmate/linux/drivers/dwc_otg_driver.c $ ++ * $Revision: 1.7 $ ++ * $Date: 2008-11-21 05:39:15 $ ++ * $Change: 791271 $ ++ * ++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, ++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless ++ * otherwise expressly agreed to in writing between Synopsys and you. ++ * ++ * The Software IS NOT an item of Licensed Software or Licensed Product under ++ * any End User Software License Agreement or Agreement for Licensed Product ++ * with Synopsys or any supplement thereto. You are permitted to use and ++ * redistribute this Software in source and binary forms, with or without ++ * modification, provided that redistributions of source code must retain this ++ * notice. You may not view, use, disclose, copy or distribute this file or ++ * any information contained herein except pursuant to this license grant from ++ * Synopsys. If you do not agree with this notice, including the disclaimer ++ * below, then you are not authorized to use the Software. ++ * ++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, ++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR ++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER ++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT ++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY ++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH ++ * DAMAGE. ++ * ========================================================================== */ ++ ++/** @file ++ * The dwc_otg_driver module provides the initialization and cleanup entry ++ * points for the DWC_otg driver. This module will be dynamically installed ++ * after Linux is booted using the insmod command. When the module is ++ * installed, the dwc_otg_driver_init function is called. When the module is ++ * removed (using rmmod), the dwc_otg_driver_cleanup function is called. ++ * ++ * This module also defines a data structure for the dwc_otg_driver, which is ++ * used in conjunction with the standard ARM platform_device structure. These ++ * structures allow the OTG driver to comply with the standard Linux driver ++ * model in which devices and drivers are registered with a bus driver. This ++ * has the benefit that Linux can expose attributes of the driver and device ++ * in its special sysfs file system. Users can then read or write files in ++ * this file system to perform diagnostics on the driver components or the ++ * device. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include /* permission constants */ ++#include ++#include ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) ++# include ++#endif ++ ++#include ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) ++# include ++#endif ++ ++#include "linux/dwc_otg_plat.h" ++#include "dwc_otg_attr.h" ++#include "dwc_otg_driver.h" ++#include "dwc_otg_cil.h" ++#include "dwc_otg_pcd.h" ++#include "dwc_otg_hcd.h" ++ ++#define DWC_DRIVER_VERSION "2.72a 24-JUN-2008" ++#define DWC_DRIVER_DESC "HS OTG USB Controller driver" ++ ++static const char dwc_driver_name[] = "dwc_otg"; ++ ++/*-------------------------------------------------------------------------*/ ++/* Encapsulate the module parameter settings */ ++ ++static dwc_otg_core_params_t dwc_otg_module_params = { ++ .opt = -1, ++ .otg_cap = -1, ++ .dma_enable = -1, ++ .dma_desc_enable = -1, ++ .dma_burst_size = -1, ++ .speed = -1, ++ .host_support_fs_ls_low_power = -1, ++ .host_ls_low_power_phy_clk = -1, ++ .enable_dynamic_fifo = -1, ++ .data_fifo_size = -1, ++ .dev_rx_fifo_size = -1, ++ .dev_nperio_tx_fifo_size = -1, ++ .dev_perio_tx_fifo_size = { ++ /* dev_perio_tx_fifo_size_1 */ ++ -1, ++ -1, ++ -1, ++ -1, ++ -1, ++ -1, ++ -1, ++ -1, ++ -1, ++ -1, ++ -1, ++ -1, ++ -1, ++ -1, ++ -1 ++ /* 15 */ ++ }, ++ .host_rx_fifo_size = -1, ++ .host_nperio_tx_fifo_size = -1, ++ .host_perio_tx_fifo_size = -1, ++ .max_transfer_size = -1, ++ .max_packet_count = -1, ++ .host_channels = -1, ++ .dev_endpoints = -1, ++ .phy_type = -1, ++ .phy_utmi_width = -1, ++ .phy_ulpi_ddr = -1, ++ .phy_ulpi_ext_vbus = -1, ++ .i2c_enable = -1, ++ .ulpi_fs_ls = -1, ++ .ts_dline = -1, ++ .en_multiple_tx_fifo = -1, ++ .dev_tx_fifo_size = { ++ /* dev_tx_fifo_size */ ++ -1, ++ -1, ++ -1, ++ -1, ++ -1, ++ -1, ++ -1, ++ -1, ++ -1, ++ -1, ++ -1, ++ -1, ++ -1, ++ -1, ++ -1 ++ /* 15 */ ++ }, ++ .thr_ctl = -1, ++ .tx_thr_length = -1, ++ .rx_thr_length = -1, ++ .pti_enable = -1, ++ .mpi_enable = -1, ++}; ++ ++/** ++ * This function shows the Driver Version. ++ */ ++static ssize_t version_show(struct device_driver *dev, char *buf) ++{ ++ return snprintf(buf, sizeof(DWC_DRIVER_VERSION)+2, "%s\n", ++ DWC_DRIVER_VERSION); ++} ++static DRIVER_ATTR(version, S_IRUGO, version_show, NULL); ++ ++/** ++ * Global Debug Level Mask. ++ */ ++uint32_t g_dbg_lvl = 0; /* OFF */ ++ ++/** ++ * This function shows the driver Debug Level. ++ */ ++static ssize_t dbg_level_show(struct device_driver *drv, char *buf) ++{ ++ return sprintf(buf, "0x%0x\n", g_dbg_lvl); ++} ++ ++/** ++ * This function stores the driver Debug Level. ++ */ ++static ssize_t dbg_level_store(struct device_driver *drv, const char *buf, ++ size_t count) ++{ ++ g_dbg_lvl = simple_strtoul(buf, NULL, 16); ++ return count; ++} ++static DRIVER_ATTR(debuglevel, S_IRUGO|S_IWUSR, dbg_level_show, dbg_level_store); ++ ++/** ++ * This function is called during module intialization to verify that ++ * the module parameters are in a valid state. ++ */ ++static int check_parameters(dwc_otg_core_if_t *core_if) ++{ ++ int i; ++ int retval = 0; ++ ++/* Checks if the parameter is outside of its valid range of values */ ++#define DWC_OTG_PARAM_TEST(_param_, _low_, _high_) \ ++ ((dwc_otg_module_params._param_ < (_low_)) || \ ++ (dwc_otg_module_params._param_ > (_high_))) ++ ++/* If the parameter has been set by the user, check that the parameter value is ++ * within the value range of values. If not, report a module error. */ ++#define DWC_OTG_PARAM_ERR(_param_, _low_, _high_, _string_) \ ++ do { \ ++ if (dwc_otg_module_params._param_ != -1) { \ ++ if (DWC_OTG_PARAM_TEST(_param_, (_low_), (_high_))) { \ ++ DWC_ERROR("`%d' invalid for parameter `%s'\n", \ ++ dwc_otg_module_params._param_, _string_); \ ++ dwc_otg_module_params._param_ = dwc_param_##_param_##_default; \ ++ retval++; \ ++ } \ ++ } \ ++ } while (0) ++ ++ DWC_OTG_PARAM_ERR(opt,0,1,"opt"); ++ DWC_OTG_PARAM_ERR(otg_cap,0,2,"otg_cap"); ++ DWC_OTG_PARAM_ERR(dma_enable,0,1,"dma_enable"); ++ DWC_OTG_PARAM_ERR(dma_desc_enable,0,1,"dma_desc_enable"); ++ DWC_OTG_PARAM_ERR(speed,0,1,"speed"); ++ DWC_OTG_PARAM_ERR(host_support_fs_ls_low_power,0,1,"host_support_fs_ls_low_power"); ++ DWC_OTG_PARAM_ERR(host_ls_low_power_phy_clk,0,1,"host_ls_low_power_phy_clk"); ++ DWC_OTG_PARAM_ERR(enable_dynamic_fifo,0,1,"enable_dynamic_fifo"); ++ DWC_OTG_PARAM_ERR(data_fifo_size,32,32768,"data_fifo_size"); ++ DWC_OTG_PARAM_ERR(dev_rx_fifo_size,16,32768,"dev_rx_fifo_size"); ++ DWC_OTG_PARAM_ERR(dev_nperio_tx_fifo_size,16,32768,"dev_nperio_tx_fifo_size"); ++ DWC_OTG_PARAM_ERR(host_rx_fifo_size,16,32768,"host_rx_fifo_size"); ++ DWC_OTG_PARAM_ERR(host_nperio_tx_fifo_size,16,32768,"host_nperio_tx_fifo_size"); ++ DWC_OTG_PARAM_ERR(host_perio_tx_fifo_size,16,32768,"host_perio_tx_fifo_size"); ++ DWC_OTG_PARAM_ERR(max_transfer_size,2047,524288,"max_transfer_size"); ++ DWC_OTG_PARAM_ERR(max_packet_count,15,511,"max_packet_count"); ++ DWC_OTG_PARAM_ERR(host_channels,1,16,"host_channels"); ++ DWC_OTG_PARAM_ERR(dev_endpoints,1,15,"dev_endpoints"); ++ DWC_OTG_PARAM_ERR(phy_type,0,2,"phy_type"); ++ DWC_OTG_PARAM_ERR(phy_ulpi_ddr,0,1,"phy_ulpi_ddr"); ++ DWC_OTG_PARAM_ERR(phy_ulpi_ext_vbus,0,1,"phy_ulpi_ext_vbus"); ++ DWC_OTG_PARAM_ERR(i2c_enable,0,1,"i2c_enable"); ++ DWC_OTG_PARAM_ERR(ulpi_fs_ls,0,1,"ulpi_fs_ls"); ++ DWC_OTG_PARAM_ERR(ts_dline,0,1,"ts_dline"); ++ ++ if (dwc_otg_module_params.dma_burst_size != -1) { ++ if (DWC_OTG_PARAM_TEST(dma_burst_size,1,1) && ++ DWC_OTG_PARAM_TEST(dma_burst_size,4,4) && ++ DWC_OTG_PARAM_TEST(dma_burst_size,8,8) && ++ DWC_OTG_PARAM_TEST(dma_burst_size,16,16) && ++ DWC_OTG_PARAM_TEST(dma_burst_size,32,32) && ++ DWC_OTG_PARAM_TEST(dma_burst_size,64,64) && ++ DWC_OTG_PARAM_TEST(dma_burst_size,128,128) && ++ DWC_OTG_PARAM_TEST(dma_burst_size,256,256)) { ++ DWC_ERROR("`%d' invalid for parameter `dma_burst_size'\n", ++ dwc_otg_module_params.dma_burst_size); ++ dwc_otg_module_params.dma_burst_size = 32; ++ retval++; ++ } ++ ++ { ++ uint8_t brst_sz = 0; ++ while(dwc_otg_module_params.dma_burst_size > 1) { ++ brst_sz ++; ++ dwc_otg_module_params.dma_burst_size >>= 1; ++ } ++ dwc_otg_module_params.dma_burst_size = brst_sz; ++ } ++ } ++ ++ if (dwc_otg_module_params.phy_utmi_width != -1) { ++ if (DWC_OTG_PARAM_TEST(phy_utmi_width, 8, 8) && ++ DWC_OTG_PARAM_TEST(phy_utmi_width, 16, 16)) { ++ DWC_ERROR("`%d' invalid for parameter `phy_utmi_width'\n", ++ dwc_otg_module_params.phy_utmi_width); ++ dwc_otg_module_params.phy_utmi_width = 16; ++ retval++; ++ } ++ } ++ ++ for (i = 0; i < 15; i++) { ++ /** @todo should be like above */ ++ //DWC_OTG_PARAM_ERR(dev_perio_tx_fifo_size[i], 4, 768, "dev_perio_tx_fifo_size"); ++ if (dwc_otg_module_params.dev_perio_tx_fifo_size[i] != -1) { ++ if (DWC_OTG_PARAM_TEST(dev_perio_tx_fifo_size[i], 4, 768)) { ++ DWC_ERROR("`%d' invalid for parameter `%s_%d'\n", ++ dwc_otg_module_params.dev_perio_tx_fifo_size[i], "dev_perio_tx_fifo_size", i); ++ dwc_otg_module_params.dev_perio_tx_fifo_size[i] = dwc_param_dev_perio_tx_fifo_size_default; ++ retval++; ++ } ++ } ++ } ++ ++ DWC_OTG_PARAM_ERR(en_multiple_tx_fifo, 0, 1, "en_multiple_tx_fifo"); ++ ++ for (i = 0; i < 15; i++) { ++ /** @todo should be like above */ ++ //DWC_OTG_PARAM_ERR(dev_tx_fifo_size[i], 4, 768, "dev_tx_fifo_size"); ++ if (dwc_otg_module_params.dev_tx_fifo_size[i] != -1) { ++ if (DWC_OTG_PARAM_TEST(dev_tx_fifo_size[i], 4, 768)) { ++ DWC_ERROR("`%d' invalid for parameter `%s_%d'\n", ++ dwc_otg_module_params.dev_tx_fifo_size[i], "dev_tx_fifo_size", i); ++ dwc_otg_module_params.dev_tx_fifo_size[i] = dwc_param_dev_tx_fifo_size_default; ++ retval++; ++ } ++ } ++ } ++ ++ DWC_OTG_PARAM_ERR(thr_ctl, 0, 7, "thr_ctl"); ++ DWC_OTG_PARAM_ERR(tx_thr_length, 8, 128, "tx_thr_length"); ++ DWC_OTG_PARAM_ERR(rx_thr_length, 8, 128, "rx_thr_length"); ++ ++ DWC_OTG_PARAM_ERR(pti_enable,0,1,"pti_enable"); ++ DWC_OTG_PARAM_ERR(mpi_enable,0,1,"mpi_enable"); ++ ++ /* At this point, all module parameters that have been set by the user ++ * are valid, and those that have not are left unset. Now set their ++ * default values and/or check the parameters against the hardware ++ * configurations of the OTG core. */ ++ ++/* This sets the parameter to the default value if it has not been set by the ++ * user */ ++#define DWC_OTG_PARAM_SET_DEFAULT(_param_) \ ++ ({ \ ++ int changed = 1; \ ++ if (dwc_otg_module_params._param_ == -1) { \ ++ changed = 0; \ ++ dwc_otg_module_params._param_ = dwc_param_##_param_##_default; \ ++ } \ ++ changed; \ ++ }) ++ ++/* This checks the macro agains the hardware configuration to see if it is ++ * valid. It is possible that the default value could be invalid. In this ++ * case, it will report a module error if the user touched the parameter. ++ * Otherwise it will adjust the value without any error. */ ++#define DWC_OTG_PARAM_CHECK_VALID(_param_, _str_, _is_valid_, _set_valid_) \ ++ ({ \ ++ int changed = DWC_OTG_PARAM_SET_DEFAULT(_param_); \ ++ int error = 0; \ ++ if (!(_is_valid_)) { \ ++ if (changed) { \ ++ DWC_ERROR("`%d' invalid for parameter `%s'. Check HW configuration.\n", dwc_otg_module_params._param_, _str_); \ ++ error = 1; \ ++ } \ ++ dwc_otg_module_params._param_ = (_set_valid_); \ ++ } \ ++ error; \ ++ }) ++ ++ /* OTG Cap */ ++ retval += DWC_OTG_PARAM_CHECK_VALID(otg_cap, "otg_cap", ++ ({ ++ int valid; ++ valid = 1; ++ switch (dwc_otg_module_params.otg_cap) { ++ case DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE: ++ if (core_if->hwcfg2.b.op_mode != DWC_HWCFG2_OP_MODE_HNP_SRP_CAPABLE_OTG) ++ valid = 0; ++ break; ++ case DWC_OTG_CAP_PARAM_SRP_ONLY_CAPABLE: ++ if ((core_if->hwcfg2.b.op_mode != DWC_HWCFG2_OP_MODE_HNP_SRP_CAPABLE_OTG) && ++ (core_if->hwcfg2.b.op_mode != DWC_HWCFG2_OP_MODE_SRP_ONLY_CAPABLE_OTG) && ++ (core_if->hwcfg2.b.op_mode != DWC_HWCFG2_OP_MODE_SRP_CAPABLE_DEVICE) && ++ (core_if->hwcfg2.b.op_mode != DWC_HWCFG2_OP_MODE_SRP_CAPABLE_HOST)) { ++ valid = 0; ++ } ++ break; ++ case DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE: ++ /* always valid */ ++ break; ++ } ++ valid; ++ }), ++ (((core_if->hwcfg2.b.op_mode == DWC_HWCFG2_OP_MODE_HNP_SRP_CAPABLE_OTG) || ++ (core_if->hwcfg2.b.op_mode == DWC_HWCFG2_OP_MODE_SRP_ONLY_CAPABLE_OTG) || ++ (core_if->hwcfg2.b.op_mode == DWC_HWCFG2_OP_MODE_SRP_CAPABLE_DEVICE) || ++ (core_if->hwcfg2.b.op_mode == DWC_HWCFG2_OP_MODE_SRP_CAPABLE_HOST)) ? ++ DWC_OTG_CAP_PARAM_SRP_ONLY_CAPABLE : ++ DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE)); ++ ++ retval += DWC_OTG_PARAM_CHECK_VALID(dma_enable, "dma_enable", ++ ((dwc_otg_module_params.dma_enable == 1) && (core_if->hwcfg2.b.architecture == 0)) ? 0 : 1, ++ 0); ++ ++ retval += DWC_OTG_PARAM_CHECK_VALID(dma_desc_enable, "dma_desc_enable", ++ ((dwc_otg_module_params.dma_desc_enable == 1) && ++ ((dwc_otg_module_params.dma_enable == 0) || (core_if->hwcfg4.b.desc_dma == 0))) ? 0 : 1, ++ 0); ++ ++ retval += DWC_OTG_PARAM_CHECK_VALID(opt, "opt", 1, 0); ++ ++ DWC_OTG_PARAM_SET_DEFAULT(dma_burst_size); ++ ++ retval += DWC_OTG_PARAM_CHECK_VALID(host_support_fs_ls_low_power, ++ "host_support_fs_ls_low_power", ++ 1, 0); ++ ++ retval += DWC_OTG_PARAM_CHECK_VALID(enable_dynamic_fifo, ++ "enable_dynamic_fifo", ++ ((dwc_otg_module_params.enable_dynamic_fifo == 0) || ++ (core_if->hwcfg2.b.dynamic_fifo == 1)), 0); ++ ++ retval += DWC_OTG_PARAM_CHECK_VALID(data_fifo_size, ++ "data_fifo_size", ++ (dwc_otg_module_params.data_fifo_size <= core_if->hwcfg3.b.dfifo_depth), ++ core_if->hwcfg3.b.dfifo_depth); ++ ++ retval += DWC_OTG_PARAM_CHECK_VALID(dev_rx_fifo_size, ++ "dev_rx_fifo_size", ++ (dwc_otg_module_params.dev_rx_fifo_size <= dwc_read_reg32(&core_if->core_global_regs->grxfsiz)), ++ dwc_read_reg32(&core_if->core_global_regs->grxfsiz)); ++ ++ retval += DWC_OTG_PARAM_CHECK_VALID(dev_nperio_tx_fifo_size, ++ "dev_nperio_tx_fifo_size", ++ (dwc_otg_module_params.dev_nperio_tx_fifo_size <= (dwc_read_reg32(&core_if->core_global_regs->gnptxfsiz) >> 16)), ++ (dwc_read_reg32(&core_if->core_global_regs->gnptxfsiz) >> 16)); ++ ++ retval += DWC_OTG_PARAM_CHECK_VALID(host_rx_fifo_size, ++ "host_rx_fifo_size", ++ (dwc_otg_module_params.host_rx_fifo_size <= dwc_read_reg32(&core_if->core_global_regs->grxfsiz)), ++ dwc_read_reg32(&core_if->core_global_regs->grxfsiz)); ++ ++ retval += DWC_OTG_PARAM_CHECK_VALID(host_nperio_tx_fifo_size, ++ "host_nperio_tx_fifo_size", ++ (dwc_otg_module_params.host_nperio_tx_fifo_size <= (dwc_read_reg32(&core_if->core_global_regs->gnptxfsiz) >> 16)), ++ (dwc_read_reg32(&core_if->core_global_regs->gnptxfsiz) >> 16)); ++ ++ retval += DWC_OTG_PARAM_CHECK_VALID(host_perio_tx_fifo_size, ++ "host_perio_tx_fifo_size", ++ (dwc_otg_module_params.host_perio_tx_fifo_size <= ((dwc_read_reg32(&core_if->core_global_regs->hptxfsiz) >> 16))), ++ ((dwc_read_reg32(&core_if->core_global_regs->hptxfsiz) >> 16))); ++ ++ retval += DWC_OTG_PARAM_CHECK_VALID(max_transfer_size, ++ "max_transfer_size", ++ (dwc_otg_module_params.max_transfer_size < (1 << (core_if->hwcfg3.b.xfer_size_cntr_width + 11))), ++ ((1 << (core_if->hwcfg3.b.xfer_size_cntr_width + 11)) - 1)); ++ ++ retval += DWC_OTG_PARAM_CHECK_VALID(max_packet_count, ++ "max_packet_count", ++ (dwc_otg_module_params.max_packet_count < (1 << (core_if->hwcfg3.b.packet_size_cntr_width + 4))), ++ ((1 << (core_if->hwcfg3.b.packet_size_cntr_width + 4)) - 1)); ++ ++ retval += DWC_OTG_PARAM_CHECK_VALID(host_channels, ++ "host_channels", ++ (dwc_otg_module_params.host_channels <= (core_if->hwcfg2.b.num_host_chan + 1)), ++ (core_if->hwcfg2.b.num_host_chan + 1)); ++ ++ retval += DWC_OTG_PARAM_CHECK_VALID(dev_endpoints, ++ "dev_endpoints", ++ (dwc_otg_module_params.dev_endpoints <= (core_if->hwcfg2.b.num_dev_ep)), ++ core_if->hwcfg2.b.num_dev_ep); ++ ++/* ++ * Define the following to disable the FS PHY Hardware checking. This is for ++ * internal testing only. ++ * ++ * #define NO_FS_PHY_HW_CHECKS ++ */ ++ ++#ifdef NO_FS_PHY_HW_CHECKS ++ retval += DWC_OTG_PARAM_CHECK_VALID(phy_type, ++ "phy_type", 1, 0); ++#else ++ retval += DWC_OTG_PARAM_CHECK_VALID(phy_type, ++ "phy_type", ++ ({ ++ int valid = 0; ++ if ((dwc_otg_module_params.phy_type == DWC_PHY_TYPE_PARAM_UTMI) && ++ ((core_if->hwcfg2.b.hs_phy_type == 1) || ++ (core_if->hwcfg2.b.hs_phy_type == 3))) { ++ valid = 1; ++ } ++ else if ((dwc_otg_module_params.phy_type == DWC_PHY_TYPE_PARAM_ULPI) && ++ ((core_if->hwcfg2.b.hs_phy_type == 2) || ++ (core_if->hwcfg2.b.hs_phy_type == 3))) { ++ valid = 1; ++ } ++ else if ((dwc_otg_module_params.phy_type == DWC_PHY_TYPE_PARAM_FS) && ++ (core_if->hwcfg2.b.fs_phy_type == 1)) { ++ valid = 1; ++ } ++ valid; ++ }), ++ ({ ++ int set = DWC_PHY_TYPE_PARAM_FS; ++ if (core_if->hwcfg2.b.hs_phy_type) { ++ if ((core_if->hwcfg2.b.hs_phy_type == 3) || ++ (core_if->hwcfg2.b.hs_phy_type == 1)) { ++ set = DWC_PHY_TYPE_PARAM_UTMI; ++ } ++ else { ++ set = DWC_PHY_TYPE_PARAM_ULPI; ++ } ++ } ++ set; ++ })); ++#endif ++ ++ retval += DWC_OTG_PARAM_CHECK_VALID(speed, "speed", ++ (dwc_otg_module_params.speed == 0) && (dwc_otg_module_params.phy_type == DWC_PHY_TYPE_PARAM_FS) ? 0 : 1, ++ dwc_otg_module_params.phy_type == DWC_PHY_TYPE_PARAM_FS ? 1 : 0); ++ ++ retval += DWC_OTG_PARAM_CHECK_VALID(host_ls_low_power_phy_clk, ++ "host_ls_low_power_phy_clk", ++ ((dwc_otg_module_params.host_ls_low_power_phy_clk == DWC_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ) && (dwc_otg_module_params.phy_type == DWC_PHY_TYPE_PARAM_FS) ? 0 : 1), ++ ((dwc_otg_module_params.phy_type == DWC_PHY_TYPE_PARAM_FS) ? DWC_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ : DWC_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ)); ++ ++ DWC_OTG_PARAM_SET_DEFAULT(phy_ulpi_ddr); ++ DWC_OTG_PARAM_SET_DEFAULT(phy_ulpi_ext_vbus); ++ DWC_OTG_PARAM_SET_DEFAULT(phy_utmi_width); ++ DWC_OTG_PARAM_SET_DEFAULT(ulpi_fs_ls); ++ DWC_OTG_PARAM_SET_DEFAULT(ts_dline); ++ ++#ifdef NO_FS_PHY_HW_CHECKS ++ retval += DWC_OTG_PARAM_CHECK_VALID(i2c_enable, "i2c_enable", 1, 0); ++#else ++ retval += DWC_OTG_PARAM_CHECK_VALID(i2c_enable, ++ "i2c_enable", ++ (dwc_otg_module_params.i2c_enable == 1) && (core_if->hwcfg3.b.i2c == 0) ? 0 : 1, ++ 0); ++#endif ++ ++ for (i = 0; i < 15; i++) { ++ int changed = 1; ++ int error = 0; ++ ++ if (dwc_otg_module_params.dev_perio_tx_fifo_size[i] == -1) { ++ changed = 0; ++ dwc_otg_module_params.dev_perio_tx_fifo_size[i] = dwc_param_dev_perio_tx_fifo_size_default; ++ } ++ if (!(dwc_otg_module_params.dev_perio_tx_fifo_size[i] <= (dwc_read_reg32(&core_if->core_global_regs->dptxfsiz_dieptxf[i])))) { ++ if (changed) { ++ DWC_ERROR("`%d' invalid for parameter `dev_perio_fifo_size_%d'. Check HW configuration.\n", dwc_otg_module_params.dev_perio_tx_fifo_size[i], i); ++ error = 1; ++ } ++ dwc_otg_module_params.dev_perio_tx_fifo_size[i] = dwc_read_reg32(&core_if->core_global_regs->dptxfsiz_dieptxf[i]); ++ } ++ retval += error; ++ } ++ ++ retval += DWC_OTG_PARAM_CHECK_VALID(en_multiple_tx_fifo, "en_multiple_tx_fifo", ++ ((dwc_otg_module_params.en_multiple_tx_fifo == 1) && (core_if->hwcfg4.b.ded_fifo_en == 0)) ? 0 : 1, ++ 0); ++ ++ for (i = 0; i < 15; i++) { ++ int changed = 1; ++ int error = 0; ++ ++ if (dwc_otg_module_params.dev_tx_fifo_size[i] == -1) { ++ changed = 0; ++ dwc_otg_module_params.dev_tx_fifo_size[i] = dwc_param_dev_tx_fifo_size_default; ++ } ++ if (!(dwc_otg_module_params.dev_tx_fifo_size[i] <= (dwc_read_reg32(&core_if->core_global_regs->dptxfsiz_dieptxf[i])))) { ++ if (changed) { ++ DWC_ERROR("%d' invalid for parameter `dev_perio_fifo_size_%d'. Check HW configuration.\n", dwc_otg_module_params.dev_tx_fifo_size[i], i); ++ error = 1; ++ } ++ dwc_otg_module_params.dev_tx_fifo_size[i] = dwc_read_reg32(&core_if->core_global_regs->dptxfsiz_dieptxf[i]); ++ } ++ retval += error; ++ } ++ ++ retval += DWC_OTG_PARAM_CHECK_VALID(thr_ctl, "thr_ctl", ++ ((dwc_otg_module_params.thr_ctl != 0) && ((dwc_otg_module_params.dma_enable == 0) || (core_if->hwcfg4.b.ded_fifo_en == 0))) ? 0 : 1, ++ 0); ++ ++ DWC_OTG_PARAM_SET_DEFAULT(tx_thr_length); ++ DWC_OTG_PARAM_SET_DEFAULT(rx_thr_length); ++ ++ retval += DWC_OTG_PARAM_CHECK_VALID(pti_enable, "pti_enable", ++ ((dwc_otg_module_params.pti_enable == 0) || ((dwc_otg_module_params.pti_enable == 1) && (core_if->snpsid >= 0x4F54272A))) ? 1 : 0, ++ 0); ++ ++ retval += DWC_OTG_PARAM_CHECK_VALID(mpi_enable, "mpi_enable", ++ ((dwc_otg_module_params.mpi_enable == 0) || ((dwc_otg_module_params.mpi_enable == 1) && (core_if->hwcfg2.b.multi_proc_int == 1))) ? 1 : 0, ++ 0); ++ return retval; ++} ++ ++/** ++ * This function is the top level interrupt handler for the Common ++ * (Device and host modes) interrupts. ++ */ ++static irqreturn_t dwc_otg_common_irq(int irq, void *dev ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) ++ , struct pt_regs *r ++#endif ++ ) ++{ ++ dwc_otg_device_t *otg_dev = dev; ++ int32_t retval = IRQ_NONE; ++ ++ retval = dwc_otg_handle_common_intr(otg_dev->core_if); ++ return IRQ_RETVAL(retval); ++} ++ ++/** ++ * This function is called when a platform_device is unregistered with the ++ * dwc_otg_driver. This happens, for example, when the rmmod command is ++ * executed. The device may or may not be electrically present. If it is ++ * present, the driver stops device processing. Any resources used on behalf ++ * of this device are freed. ++ * ++ * @param[in] pdev ++ */ ++static int dwc_otg_driver_remove(struct platform_device *pdev) ++{ ++ dwc_otg_device_t *otg_dev = platform_get_drvdata(pdev); ++ DWC_DEBUGPL(DBG_ANY, "%s(%p)\n", __func__, pdev); ++ ++ if (!otg_dev) { ++ /* Memory allocation for the dwc_otg_device failed. */ ++ DWC_DEBUGPL(DBG_ANY, "%s: otg_dev NULL!\n", __func__); ++ return 0; ++ } ++ ++ /* ++ * Free the IRQ ++ */ ++ if (otg_dev->common_irq_installed) { ++ free_irq(otg_dev->irq, otg_dev); ++ } ++ ++#ifndef DWC_DEVICE_ONLY ++ if (otg_dev->hcd) { ++ dwc_otg_hcd_remove(&pdev->dev); ++ } else { ++ DWC_DEBUGPL(DBG_ANY, "%s: otg_dev->hcd NULL!\n", __func__); ++ return 0; ++ } ++#endif ++ ++#ifndef DWC_HOST_ONLY ++ if (otg_dev->pcd) { ++ dwc_otg_pcd_remove(&pdev->dev); ++ } ++#endif ++ if (otg_dev->core_if) { ++ dwc_otg_cil_remove(otg_dev->core_if); ++ } ++ ++ /* ++ * Remove the device attributes ++ */ ++ dwc_otg_attr_remove(otg_dev->parent); ++ ++ /* Disable USB port */ ++ dwc_write_reg32((uint32_t *)((uint8_t *)otg_dev->base + 0xe00), 0xf); ++ ++ /* ++ * Return the memory. ++ */ ++ if (otg_dev->base) { ++ iounmap(otg_dev->base); ++ } ++ ++ if (otg_dev->phys_addr != 0) { ++ release_mem_region(otg_dev->phys_addr, otg_dev->base_len); ++ } ++ ++ kfree(otg_dev); ++ ++ /* ++ * Clear the drvdata pointer. ++ */ ++ platform_set_drvdata(pdev, NULL); ++ ++ return 0; ++} ++ ++/** ++ * This function is called when an platform_device is bound to a ++ * dwc_otg_driver. It creates the driver components required to ++ * control the device (CIL, HCD, and PCD) and it initializes the ++ * device. The driver components are stored in a dwc_otg_device ++ * structure. A reference to the dwc_otg_device is saved in the ++ * platform_device. This allows the driver to access the dwc_otg_device ++ * structure on subsequent calls to driver methods for this device. ++ * ++ * @param[in] pdev platform_device definition ++ */ ++static int dwc_otg_driver_probe(struct platform_device *pdev) ++{ ++ int retval = 0; ++ uint32_t snpsid; ++ dwc_otg_device_t *otg_dev; ++ struct resource *res; ++ ++ dev_dbg(&pdev->dev, "dwc_otg_driver_probe(%p)\n", pdev); ++ ++ otg_dev= kzalloc(sizeof(dwc_otg_device_t), GFP_KERNEL); ++ if (!otg_dev) { ++ dev_err(&pdev->dev, "kmalloc of dwc_otg_device failed\n"); ++ retval = -ENOMEM; ++ goto fail; ++ } ++ ++ otg_dev->reg_offset = 0xFFFFFFFF; ++ ++ /* ++ * Retrieve the memory and IRQ resources. ++ */ ++ otg_dev->irq = platform_get_irq(pdev, 0); ++ if (otg_dev->irq <= 0) { ++ dev_err(&pdev->dev, "no device irq\n"); ++ retval = -EINVAL; ++ goto fail; ++ } ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ if (res == NULL) { ++ dev_err(&pdev->dev, "no CSR address\n"); ++ retval = -EINVAL; ++ goto fail; ++ } ++ ++ otg_dev->parent = &pdev->dev; ++ otg_dev->phys_addr = res->start; ++ otg_dev->base_len = res->end - res->start + 1; ++ if (request_mem_region(otg_dev->phys_addr, ++ otg_dev->base_len, ++ dwc_driver_name) == NULL) { ++ dev_err(&pdev->dev, "request_mem_region failed\n"); ++ retval = -EBUSY; ++ goto fail; ++ } ++ ++ /* ++ * Map the DWC_otg Core memory into virtual address space. ++ */ ++ otg_dev->base = ioremap(otg_dev->phys_addr, otg_dev->base_len); ++ if (!otg_dev->base) { ++ dev_err(&pdev->dev, "ioremap() failed\n"); ++ retval = -ENOMEM; ++ goto fail; ++ } ++ dev_dbg(&pdev->dev, "mapped base=0x%08x\n", (unsigned) otg_dev->base); ++ ++ /* Enable USB Port */ ++ dwc_write_reg32((uint32_t *)((uint8_t *)otg_dev->base + 0xe00), 0); ++ ++ /* ++ * Attempt to ensure this device is really a DWC_otg Controller. ++ * Read and verify the SNPSID register contents. The value should be ++ * 0x45F42XXX, which corresponds to "OT2", as in "OTG version 2.XX". ++ */ ++ snpsid = dwc_read_reg32((uint32_t *)((uint8_t *)otg_dev->base + 0x40)); ++ ++ if ((snpsid & 0xFFFFF000) != OTG_CORE_REV_2_00) { ++ dev_err(&pdev->dev, "Bad value for SNPSID: 0x%08x\n", snpsid); ++ retval = -EINVAL; ++ goto fail; ++ } ++ ++ DWC_PRINT("Core Release: %x.%x%x%x\n", ++ (snpsid >> 12 & 0xF), ++ (snpsid >> 8 & 0xF), ++ (snpsid >> 4 & 0xF), ++ (snpsid & 0xF)); ++ ++ /* ++ * Initialize driver data to point to the global DWC_otg ++ * Device structure. ++ */ ++ platform_set_drvdata(pdev, otg_dev); ++ dev_dbg(&pdev->dev, "dwc_otg_device=0x%p\n", otg_dev); ++ ++ ++ otg_dev->core_if = dwc_otg_cil_init(otg_dev->base, ++ &dwc_otg_module_params); ++ ++ otg_dev->core_if->snpsid = snpsid; ++ ++ if (!otg_dev->core_if) { ++ dev_err(&pdev->dev, "CIL initialization failed!\n"); ++ retval = -ENOMEM; ++ goto fail; ++ } ++ ++ /* ++ * Validate parameter values. ++ */ ++ if (check_parameters(otg_dev->core_if)) { ++ retval = -EINVAL; ++ goto fail; ++ } ++ ++ /* ++ * Create Device Attributes in sysfs ++ */ ++ //dwc_otg_attr_create(&pdev->dev); ++ ++ /* ++ * Disable the global interrupt until all the interrupt ++ * handlers are installed. ++ */ ++ dwc_otg_disable_global_interrupts(otg_dev->core_if); ++ ++ /* ++ * Install the interrupt handler for the common interrupts before ++ * enabling common interrupts in core_init below. ++ */ ++ DWC_DEBUGPL(DBG_CIL, "registering (common) handler for irq%d\n", ++ otg_dev->irq); ++ retval = request_irq(otg_dev->irq, dwc_otg_common_irq, ++ IRQF_SHARED, "dwc_otg", otg_dev); ++ if (retval) { ++ DWC_ERROR("request of irq%d failed\n", otg_dev->irq); ++ retval = -EBUSY; ++ goto fail; ++ } else { ++ otg_dev->common_irq_installed = 1; ++ } ++ ++ /* ++ * Initialize the DWC_otg core. ++ */ ++ dwc_otg_core_init(otg_dev->core_if); ++ ++#ifndef DWC_HOST_ONLY ++ /* ++ * Initialize the PCD ++ */ ++ retval = dwc_otg_pcd_init(&pdev->dev); ++ if (retval != 0) { ++ DWC_ERROR("dwc_otg_pcd_init failed\n"); ++ otg_dev->pcd = NULL; ++ goto fail; ++ } ++#endif ++#ifndef DWC_DEVICE_ONLY ++ /* ++ * Initialize the HCD ++ */ ++ retval = dwc_otg_hcd_init(&pdev->dev); ++ if (retval != 0) { ++ DWC_ERROR("dwc_otg_hcd_init failed\n"); ++ otg_dev->hcd = NULL; ++ goto fail; ++ } ++#endif ++ ++ /* ++ * Enable the global interrupt after all the interrupt ++ * handlers are installed. ++ */ ++ dwc_otg_enable_global_interrupts(otg_dev->core_if); ++ ++ return 0; ++ ++ fail: ++ dwc_otg_driver_remove(pdev); ++ return retval; ++} ++ ++/** ++ * This structure defines the methods to be called by a bus driver ++ * during the lifecycle of a device on that bus. Both drivers and ++ * devices are registered with a bus driver. The bus driver matches ++ * devices to drivers based on information in the device and driver ++ * structures. ++ * ++ * The probe function is called when the bus driver matches a device ++ * to this driver. The remove function is called when a device is ++ * unregistered with the bus driver. ++ */ ++ ++static const struct of_device_id ralink_otg_match[] = { ++ { .compatible = "ralink,rt3050-otg" }, ++ {}, ++}; ++MODULE_DEVICE_TABLE(of, ralink_otg_match); ++ ++static struct platform_driver dwc_otg_driver = { ++ .driver = { ++ .name = (char *)dwc_driver_name, ++ .of_match_table = ralink_otg_match, ++ }, ++ .probe = dwc_otg_driver_probe, ++ .remove = dwc_otg_driver_remove, ++}; ++ ++/** ++ * This function is called when the dwc_otg_driver is installed with the ++ * insmod command. It registers the dwc_otg_driver structure with the ++ * appropriate bus driver. This will cause the dwc_otg_driver_probe function ++ * to be called. In addition, the bus driver will automatically expose ++ * attributes defined for the device and driver in the special sysfs file ++ * system. ++ * ++ * @return ++ */ ++static int __init dwc_otg_driver_init(void) ++{ ++ int retval = 0; ++ int error; ++ ++ printk(KERN_INFO "%s: version %s\n", dwc_driver_name, DWC_DRIVER_VERSION); ++ ++ retval = platform_driver_register(&dwc_otg_driver); ++ if (retval) { ++ printk(KERN_ERR "%s retval=%d\n", __func__, retval); ++ return retval; ++ } ++ ++ error = driver_create_file(&dwc_otg_driver.driver, &driver_attr_version); ++ error = driver_create_file(&dwc_otg_driver.driver, &driver_attr_debuglevel); ++ ++ return retval; ++} ++module_init(dwc_otg_driver_init); ++ ++/** ++ * This function is called when the driver is removed from the kernel ++ * with the rmmod command. The driver unregisters itself with its bus ++ * driver. ++ * ++ */ ++static void __exit dwc_otg_driver_cleanup(void) ++{ ++ printk(KERN_DEBUG "dwc_otg_driver_cleanup()\n"); ++ ++ driver_remove_file(&dwc_otg_driver.driver, &driver_attr_debuglevel); ++ driver_remove_file(&dwc_otg_driver.driver, &driver_attr_version); ++ ++ platform_driver_unregister(&dwc_otg_driver); ++ ++ printk(KERN_INFO "%s module removed\n", dwc_driver_name); ++} ++module_exit(dwc_otg_driver_cleanup); ++ ++MODULE_DESCRIPTION(DWC_DRIVER_DESC); ++MODULE_AUTHOR("Synopsys Inc."); ++MODULE_LICENSE("GPL"); ++ ++module_param_named(otg_cap, dwc_otg_module_params.otg_cap, int, 0444); ++MODULE_PARM_DESC(otg_cap, "OTG Capabilities 0=HNP&SRP 1=SRP Only 2=None"); ++module_param_named(opt, dwc_otg_module_params.opt, int, 0444); ++MODULE_PARM_DESC(opt, "OPT Mode"); ++module_param_named(dma_enable, dwc_otg_module_params.dma_enable, int, 0444); ++MODULE_PARM_DESC(dma_enable, "DMA Mode 0=Slave 1=DMA enabled"); ++ ++module_param_named(dma_desc_enable, dwc_otg_module_params.dma_desc_enable, int, 0444); ++MODULE_PARM_DESC(dma_desc_enable, "DMA Desc Mode 0=Address DMA 1=DMA Descriptor enabled"); ++ ++module_param_named(dma_burst_size, dwc_otg_module_params.dma_burst_size, int, 0444); ++MODULE_PARM_DESC(dma_burst_size, "DMA Burst Size 1, 4, 8, 16, 32, 64, 128, 256"); ++module_param_named(speed, dwc_otg_module_params.speed, int, 0444); ++MODULE_PARM_DESC(speed, "Speed 0=High Speed 1=Full Speed"); ++module_param_named(host_support_fs_ls_low_power, dwc_otg_module_params.host_support_fs_ls_low_power, int, 0444); ++MODULE_PARM_DESC(host_support_fs_ls_low_power, "Support Low Power w/FS or LS 0=Support 1=Don't Support"); ++module_param_named(host_ls_low_power_phy_clk, dwc_otg_module_params.host_ls_low_power_phy_clk, int, 0444); ++MODULE_PARM_DESC(host_ls_low_power_phy_clk, "Low Speed Low Power Clock 0=48Mhz 1=6Mhz"); ++module_param_named(enable_dynamic_fifo, dwc_otg_module_params.enable_dynamic_fifo, int, 0444); ++MODULE_PARM_DESC(enable_dynamic_fifo, "0=cC Setting 1=Allow Dynamic Sizing"); ++module_param_named(data_fifo_size, dwc_otg_module_params.data_fifo_size, int, 0444); ++MODULE_PARM_DESC(data_fifo_size, "Total number of words in the data FIFO memory 32-32768"); ++module_param_named(dev_rx_fifo_size, dwc_otg_module_params.dev_rx_fifo_size, int, 0444); ++MODULE_PARM_DESC(dev_rx_fifo_size, "Number of words in the Rx FIFO 16-32768"); ++module_param_named(dev_nperio_tx_fifo_size, dwc_otg_module_params.dev_nperio_tx_fifo_size, int, 0444); ++MODULE_PARM_DESC(dev_nperio_tx_fifo_size, "Number of words in the non-periodic Tx FIFO 16-32768"); ++module_param_named(dev_perio_tx_fifo_size_1, dwc_otg_module_params.dev_perio_tx_fifo_size[0], int, 0444); ++MODULE_PARM_DESC(dev_perio_tx_fifo_size_1, "Number of words in the periodic Tx FIFO 4-768"); ++module_param_named(dev_perio_tx_fifo_size_2, dwc_otg_module_params.dev_perio_tx_fifo_size[1], int, 0444); ++MODULE_PARM_DESC(dev_perio_tx_fifo_size_2, "Number of words in the periodic Tx FIFO 4-768"); ++module_param_named(dev_perio_tx_fifo_size_3, dwc_otg_module_params.dev_perio_tx_fifo_size[2], int, 0444); ++MODULE_PARM_DESC(dev_perio_tx_fifo_size_3, "Number of words in the periodic Tx FIFO 4-768"); ++module_param_named(dev_perio_tx_fifo_size_4, dwc_otg_module_params.dev_perio_tx_fifo_size[3], int, 0444); ++MODULE_PARM_DESC(dev_perio_tx_fifo_size_4, "Number of words in the periodic Tx FIFO 4-768"); ++module_param_named(dev_perio_tx_fifo_size_5, dwc_otg_module_params.dev_perio_tx_fifo_size[4], int, 0444); ++MODULE_PARM_DESC(dev_perio_tx_fifo_size_5, "Number of words in the periodic Tx FIFO 4-768"); ++module_param_named(dev_perio_tx_fifo_size_6, dwc_otg_module_params.dev_perio_tx_fifo_size[5], int, 0444); ++MODULE_PARM_DESC(dev_perio_tx_fifo_size_6, "Number of words in the periodic Tx FIFO 4-768"); ++module_param_named(dev_perio_tx_fifo_size_7, dwc_otg_module_params.dev_perio_tx_fifo_size[6], int, 0444); ++MODULE_PARM_DESC(dev_perio_tx_fifo_size_7, "Number of words in the periodic Tx FIFO 4-768"); ++module_param_named(dev_perio_tx_fifo_size_8, dwc_otg_module_params.dev_perio_tx_fifo_size[7], int, 0444); ++MODULE_PARM_DESC(dev_perio_tx_fifo_size_8, "Number of words in the periodic Tx FIFO 4-768"); ++module_param_named(dev_perio_tx_fifo_size_9, dwc_otg_module_params.dev_perio_tx_fifo_size[8], int, 0444); ++MODULE_PARM_DESC(dev_perio_tx_fifo_size_9, "Number of words in the periodic Tx FIFO 4-768"); ++module_param_named(dev_perio_tx_fifo_size_10, dwc_otg_module_params.dev_perio_tx_fifo_size[9], int, 0444); ++MODULE_PARM_DESC(dev_perio_tx_fifo_size_10, "Number of words in the periodic Tx FIFO 4-768"); ++module_param_named(dev_perio_tx_fifo_size_11, dwc_otg_module_params.dev_perio_tx_fifo_size[10], int, 0444); ++MODULE_PARM_DESC(dev_perio_tx_fifo_size_11, "Number of words in the periodic Tx FIFO 4-768"); ++module_param_named(dev_perio_tx_fifo_size_12, dwc_otg_module_params.dev_perio_tx_fifo_size[11], int, 0444); ++MODULE_PARM_DESC(dev_perio_tx_fifo_size_12, "Number of words in the periodic Tx FIFO 4-768"); ++module_param_named(dev_perio_tx_fifo_size_13, dwc_otg_module_params.dev_perio_tx_fifo_size[12], int, 0444); ++MODULE_PARM_DESC(dev_perio_tx_fifo_size_13, "Number of words in the periodic Tx FIFO 4-768"); ++module_param_named(dev_perio_tx_fifo_size_14, dwc_otg_module_params.dev_perio_tx_fifo_size[13], int, 0444); ++MODULE_PARM_DESC(dev_perio_tx_fifo_size_14, "Number of words in the periodic Tx FIFO 4-768"); ++module_param_named(dev_perio_tx_fifo_size_15, dwc_otg_module_params.dev_perio_tx_fifo_size[14], int, 0444); ++MODULE_PARM_DESC(dev_perio_tx_fifo_size_15, "Number of words in the periodic Tx FIFO 4-768"); ++module_param_named(host_rx_fifo_size, dwc_otg_module_params.host_rx_fifo_size, int, 0444); ++MODULE_PARM_DESC(host_rx_fifo_size, "Number of words in the Rx FIFO 16-32768"); ++module_param_named(host_nperio_tx_fifo_size, dwc_otg_module_params.host_nperio_tx_fifo_size, int, 0444); ++MODULE_PARM_DESC(host_nperio_tx_fifo_size, "Number of words in the non-periodic Tx FIFO 16-32768"); ++module_param_named(host_perio_tx_fifo_size, dwc_otg_module_params.host_perio_tx_fifo_size, int, 0444); ++MODULE_PARM_DESC(host_perio_tx_fifo_size, "Number of words in the host periodic Tx FIFO 16-32768"); ++module_param_named(max_transfer_size, dwc_otg_module_params.max_transfer_size, int, 0444); ++/** @todo Set the max to 512K, modify checks */ ++MODULE_PARM_DESC(max_transfer_size, "The maximum transfer size supported in bytes 2047-65535"); ++module_param_named(max_packet_count, dwc_otg_module_params.max_packet_count, int, 0444); ++MODULE_PARM_DESC(max_packet_count, "The maximum number of packets in a transfer 15-511"); ++module_param_named(host_channels, dwc_otg_module_params.host_channels, int, 0444); ++MODULE_PARM_DESC(host_channels, "The number of host channel registers to use 1-16"); ++module_param_named(dev_endpoints, dwc_otg_module_params.dev_endpoints, int, 0444); ++MODULE_PARM_DESC(dev_endpoints, "The number of endpoints in addition to EP0 available for device mode 1-15"); ++module_param_named(phy_type, dwc_otg_module_params.phy_type, int, 0444); ++MODULE_PARM_DESC(phy_type, "0=Reserved 1=UTMI+ 2=ULPI"); ++module_param_named(phy_utmi_width, dwc_otg_module_params.phy_utmi_width, int, 0444); ++MODULE_PARM_DESC(phy_utmi_width, "Specifies the UTMI+ Data Width 8 or 16 bits"); ++module_param_named(phy_ulpi_ddr, dwc_otg_module_params.phy_ulpi_ddr, int, 0444); ++MODULE_PARM_DESC(phy_ulpi_ddr, "ULPI at double or single data rate 0=Single 1=Double"); ++module_param_named(phy_ulpi_ext_vbus, dwc_otg_module_params.phy_ulpi_ext_vbus, int, 0444); ++MODULE_PARM_DESC(phy_ulpi_ext_vbus, "ULPI PHY using internal or external vbus 0=Internal"); ++module_param_named(i2c_enable, dwc_otg_module_params.i2c_enable, int, 0444); ++MODULE_PARM_DESC(i2c_enable, "FS PHY Interface"); ++module_param_named(ulpi_fs_ls, dwc_otg_module_params.ulpi_fs_ls, int, 0444); ++MODULE_PARM_DESC(ulpi_fs_ls, "ULPI PHY FS/LS mode only"); ++module_param_named(ts_dline, dwc_otg_module_params.ts_dline, int, 0444); ++MODULE_PARM_DESC(ts_dline, "Term select Dline pulsing for all PHYs"); ++module_param_named(debug, g_dbg_lvl, int, 0444); ++MODULE_PARM_DESC(debug, ""); ++ ++module_param_named(en_multiple_tx_fifo, dwc_otg_module_params.en_multiple_tx_fifo, int, 0444); ++MODULE_PARM_DESC(en_multiple_tx_fifo, "Dedicated Non Periodic Tx FIFOs 0=disabled 1=enabled"); ++module_param_named(dev_tx_fifo_size_1, dwc_otg_module_params.dev_tx_fifo_size[0], int, 0444); ++MODULE_PARM_DESC(dev_tx_fifo_size_1, "Number of words in the Tx FIFO 4-768"); ++module_param_named(dev_tx_fifo_size_2, dwc_otg_module_params.dev_tx_fifo_size[1], int, 0444); ++MODULE_PARM_DESC(dev_tx_fifo_size_2, "Number of words in the Tx FIFO 4-768"); ++module_param_named(dev_tx_fifo_size_3, dwc_otg_module_params.dev_tx_fifo_size[2], int, 0444); ++MODULE_PARM_DESC(dev_tx_fifo_size_3, "Number of words in the Tx FIFO 4-768"); ++module_param_named(dev_tx_fifo_size_4, dwc_otg_module_params.dev_tx_fifo_size[3], int, 0444); ++MODULE_PARM_DESC(dev_tx_fifo_size_4, "Number of words in the Tx FIFO 4-768"); ++module_param_named(dev_tx_fifo_size_5, dwc_otg_module_params.dev_tx_fifo_size[4], int, 0444); ++MODULE_PARM_DESC(dev_tx_fifo_size_5, "Number of words in the Tx FIFO 4-768"); ++module_param_named(dev_tx_fifo_size_6, dwc_otg_module_params.dev_tx_fifo_size[5], int, 0444); ++MODULE_PARM_DESC(dev_tx_fifo_size_6, "Number of words in the Tx FIFO 4-768"); ++module_param_named(dev_tx_fifo_size_7, dwc_otg_module_params.dev_tx_fifo_size[6], int, 0444); ++MODULE_PARM_DESC(dev_tx_fifo_size_7, "Number of words in the Tx FIFO 4-768"); ++module_param_named(dev_tx_fifo_size_8, dwc_otg_module_params.dev_tx_fifo_size[7], int, 0444); ++MODULE_PARM_DESC(dev_tx_fifo_size_8, "Number of words in the Tx FIFO 4-768"); ++module_param_named(dev_tx_fifo_size_9, dwc_otg_module_params.dev_tx_fifo_size[8], int, 0444); ++MODULE_PARM_DESC(dev_tx_fifo_size_9, "Number of words in the Tx FIFO 4-768"); ++module_param_named(dev_tx_fifo_size_10, dwc_otg_module_params.dev_tx_fifo_size[9], int, 0444); ++MODULE_PARM_DESC(dev_tx_fifo_size_10, "Number of words in the Tx FIFO 4-768"); ++module_param_named(dev_tx_fifo_size_11, dwc_otg_module_params.dev_tx_fifo_size[10], int, 0444); ++MODULE_PARM_DESC(dev_tx_fifo_size_11, "Number of words in the Tx FIFO 4-768"); ++module_param_named(dev_tx_fifo_size_12, dwc_otg_module_params.dev_tx_fifo_size[11], int, 0444); ++MODULE_PARM_DESC(dev_tx_fifo_size_12, "Number of words in the Tx FIFO 4-768"); ++module_param_named(dev_tx_fifo_size_13, dwc_otg_module_params.dev_tx_fifo_size[12], int, 0444); ++MODULE_PARM_DESC(dev_tx_fifo_size_13, "Number of words in the Tx FIFO 4-768"); ++module_param_named(dev_tx_fifo_size_14, dwc_otg_module_params.dev_tx_fifo_size[13], int, 0444); ++MODULE_PARM_DESC(dev_tx_fifo_size_14, "Number of words in the Tx FIFO 4-768"); ++module_param_named(dev_tx_fifo_size_15, dwc_otg_module_params.dev_tx_fifo_size[14], int, 0444); ++MODULE_PARM_DESC(dev_tx_fifo_size_15, "Number of words in the Tx FIFO 4-768"); ++ ++module_param_named(thr_ctl, dwc_otg_module_params.thr_ctl, int, 0444); ++MODULE_PARM_DESC(thr_ctl, "Thresholding enable flag bit 0 - non ISO Tx thr., 1 - ISO Tx thr., 2 - Rx thr.- bit 0=disabled 1=enabled"); ++module_param_named(tx_thr_length, dwc_otg_module_params.tx_thr_length, int, 0444); ++MODULE_PARM_DESC(tx_thr_length, "Tx Threshold length in 32 bit DWORDs"); ++module_param_named(rx_thr_length, dwc_otg_module_params.rx_thr_length, int, 0444); ++MODULE_PARM_DESC(rx_thr_length, "Rx Threshold length in 32 bit DWORDs"); ++ ++module_param_named(pti_enable, dwc_otg_module_params.pti_enable, int, 0444); ++MODULE_PARM_DESC(pti_enable, "Per Transfer Interrupt mode 0=disabled 1=enabled"); ++ ++module_param_named(mpi_enable, dwc_otg_module_params.mpi_enable, int, 0444); ++MODULE_PARM_DESC(mpi_enable, "Multiprocessor Interrupt mode 0=disabled 1=enabled"); ++ ++/** @page "Module Parameters" ++ * ++ * The following parameters may be specified when starting the module. ++ * These parameters define how the DWC_otg controller should be ++ * configured. Parameter values are passed to the CIL initialization ++ * function dwc_otg_cil_init ++ * ++ * Example: modprobe dwc_otg speed=1 otg_cap=1 ++ * ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++ ++*/ +--- /dev/null ++++ b/drivers/usb/dwc_otg/dwc_otg_driver.h +@@ -0,0 +1,83 @@ ++/* ========================================================================== ++ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_driver.h $ ++ * $Revision: 1.2 $ ++ * $Date: 2008-11-21 05:39:15 $ ++ * $Change: 1064918 $ ++ * ++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, ++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless ++ * otherwise expressly agreed to in writing between Synopsys and you. ++ * ++ * The Software IS NOT an item of Licensed Software or Licensed Product under ++ * any End User Software License Agreement or Agreement for Licensed Product ++ * with Synopsys or any supplement thereto. You are permitted to use and ++ * redistribute this Software in source and binary forms, with or without ++ * modification, provided that redistributions of source code must retain this ++ * notice. You may not view, use, disclose, copy or distribute this file or ++ * any information contained herein except pursuant to this license grant from ++ * Synopsys. If you do not agree with this notice, including the disclaimer ++ * below, then you are not authorized to use the Software. ++ * ++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, ++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR ++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER ++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT ++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY ++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH ++ * DAMAGE. ++ * ========================================================================== */ ++ ++#ifndef __DWC_OTG_DRIVER_H__ ++#define __DWC_OTG_DRIVER_H__ ++ ++/** @file ++ * This file contains the interface to the Linux driver. ++ */ ++#include "dwc_otg_cil.h" ++ ++/* Type declarations */ ++struct dwc_otg_pcd; ++struct dwc_otg_hcd; ++ ++/** ++ * This structure is a wrapper that encapsulates the driver components used to ++ * manage a single DWC_otg controller. ++ */ ++typedef struct dwc_otg_device { ++ /** Base address returned from ioremap() */ ++ void *base; ++ ++ struct device *parent; ++ ++ /** Pointer to the core interface structure. */ ++ dwc_otg_core_if_t *core_if; ++ ++ /** Register offset for Diagnostic API. */ ++ uint32_t reg_offset; ++ ++ /** Pointer to the PCD structure. */ ++ struct dwc_otg_pcd *pcd; ++ ++ /** Pointer to the HCD structure. */ ++ struct dwc_otg_hcd *hcd; ++ ++ /** Flag to indicate whether the common IRQ handler is installed. */ ++ uint8_t common_irq_installed; ++ ++ /* Interrupt request number. */ ++ unsigned int irq; ++ ++ /* Physical address of Control and Status registers, used by ++ * release_mem_region(). ++ */ ++ resource_size_t phys_addr; ++ ++ /* Length of memory region, used by release_mem_region(). */ ++ unsigned long base_len; ++} dwc_otg_device_t; ++ ++#endif +--- /dev/null ++++ b/drivers/usb/dwc_otg/dwc_otg_hcd.c +@@ -0,0 +1,2852 @@ ++/* ========================================================================== ++ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_hcd.c $ ++ * $Revision: 1.4 $ ++ * $Date: 2008-11-21 05:39:15 $ ++ * $Change: 1064940 $ ++ * ++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, ++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless ++ * otherwise expressly agreed to in writing between Synopsys and you. ++ * ++ * The Software IS NOT an item of Licensed Software or Licensed Product under ++ * any End User Software License Agreement or Agreement for Licensed Product ++ * with Synopsys or any supplement thereto. You are permitted to use and ++ * redistribute this Software in source and binary forms, with or without ++ * modification, provided that redistributions of source code must retain this ++ * notice. You may not view, use, disclose, copy or distribute this file or ++ * any information contained herein except pursuant to this license grant from ++ * Synopsys. If you do not agree with this notice, including the disclaimer ++ * below, then you are not authorized to use the Software. ++ * ++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, ++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR ++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER ++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT ++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY ++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH ++ * DAMAGE. ++ * ========================================================================== */ ++#ifndef DWC_DEVICE_ONLY ++ ++/** ++ * @file ++ * ++ * This file contains the implementation of the HCD. In Linux, the HCD ++ * implements the hc_driver API. ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "dwc_otg_driver.h" ++#include "dwc_otg_hcd.h" ++#include "dwc_otg_regs.h" ++ ++static const char dwc_otg_hcd_name[] = "dwc_otg"; ++ ++static const struct hc_driver dwc_otg_hc_driver = { ++ ++ .description = dwc_otg_hcd_name, ++ .product_desc = "DWC OTG Controller", ++ .hcd_priv_size = sizeof(dwc_otg_hcd_t), ++ ++ .irq = dwc_otg_hcd_irq, ++ ++ .flags = HCD_MEMORY | HCD_USB2, ++ ++ //.reset = ++ .start = dwc_otg_hcd_start, ++ //.suspend = ++ //.resume = ++ .stop = dwc_otg_hcd_stop, ++ ++ .urb_enqueue = dwc_otg_hcd_urb_enqueue, ++ .urb_dequeue = dwc_otg_hcd_urb_dequeue, ++ .endpoint_disable = dwc_otg_hcd_endpoint_disable, ++ ++ .get_frame_number = dwc_otg_hcd_get_frame_number, ++ ++ .hub_status_data = dwc_otg_hcd_hub_status_data, ++ .hub_control = dwc_otg_hcd_hub_control, ++ //.hub_suspend = ++ //.hub_resume = ++}; ++ ++/** ++ * Work queue function for starting the HCD when A-Cable is connected. ++ * The dwc_otg_hcd_start() must be called in a process context. ++ */ ++static void hcd_start_func( ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) ++ void *_vp ++#else ++ struct work_struct *_work ++#endif ++ ) ++{ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) ++ struct usb_hcd *usb_hcd = (struct usb_hcd *)_vp; ++#else ++ struct delayed_work *dw = container_of(_work, struct delayed_work, work); ++ struct dwc_otg_hcd *otg_hcd = container_of(dw, struct dwc_otg_hcd, start_work); ++ struct usb_hcd *usb_hcd = container_of((void *)otg_hcd, struct usb_hcd, hcd_priv); ++#endif ++ DWC_DEBUGPL(DBG_HCDV, "%s() %p\n", __func__, usb_hcd); ++ if (usb_hcd) { ++ dwc_otg_hcd_start(usb_hcd); ++ } ++} ++ ++/** ++ * HCD Callback function for starting the HCD when A-Cable is ++ * connected. ++ * ++ * @param p void pointer to the struct usb_hcd ++ */ ++static int32_t dwc_otg_hcd_start_cb(void *p) ++{ ++ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(p); ++ dwc_otg_core_if_t *core_if = dwc_otg_hcd->core_if; ++ hprt0_data_t hprt0; ++ ++ if (core_if->op_state == B_HOST) { ++ /* ++ * Reset the port. During a HNP mode switch the reset ++ * needs to occur within 1ms and have a duration of at ++ * least 50ms. ++ */ ++ hprt0.d32 = dwc_otg_read_hprt0(core_if); ++ hprt0.b.prtrst = 1; ++ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); ++ ((struct usb_hcd *)p)->self.is_b_host = 1; ++ } else { ++ ((struct usb_hcd *)p)->self.is_b_host = 0; ++ } ++ ++ /* Need to start the HCD in a non-interrupt context. */ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) ++ INIT_WORK(&dwc_otg_hcd->start_work, hcd_start_func, p); ++// INIT_DELAYED_WORK(&dwc_otg_hcd->start_work, hcd_start_func, p); ++#else ++// INIT_WORK(&dwc_otg_hcd->start_work, hcd_start_func); ++ INIT_DELAYED_WORK(&dwc_otg_hcd->start_work, hcd_start_func); ++#endif ++// schedule_work(&dwc_otg_hcd->start_work); ++ queue_delayed_work(core_if->wq_otg, &dwc_otg_hcd->start_work, 50 * HZ / 1000); ++ ++ return 1; ++} ++ ++/** ++ * HCD Callback function for stopping the HCD. ++ * ++ * @param p void pointer to the struct usb_hcd ++ */ ++static int32_t dwc_otg_hcd_stop_cb(void *p) ++{ ++ struct usb_hcd *usb_hcd = (struct usb_hcd *)p; ++ DWC_DEBUGPL(DBG_HCDV, "%s(%p)\n", __func__, p); ++ dwc_otg_hcd_stop(usb_hcd); ++ return 1; ++} ++ ++static void del_xfer_timers(dwc_otg_hcd_t *hcd) ++{ ++#ifdef DEBUG ++ int i; ++ int num_channels = hcd->core_if->core_params->host_channels; ++ for (i = 0; i < num_channels; i++) { ++ del_timer(&hcd->core_if->hc_xfer_timer[i]); ++ } ++#endif ++} ++ ++static void del_timers(dwc_otg_hcd_t *hcd) ++{ ++ del_xfer_timers(hcd); ++ del_timer(&hcd->conn_timer); ++} ++ ++/** ++ * Processes all the URBs in a single list of QHs. Completes them with ++ * -ETIMEDOUT and frees the QTD. ++ */ ++static void kill_urbs_in_qh_list(dwc_otg_hcd_t *hcd, struct list_head *qh_list) ++{ ++ struct list_head *qh_item; ++ dwc_otg_qh_t *qh; ++ struct list_head *qtd_item; ++ dwc_otg_qtd_t *qtd; ++ ++ list_for_each(qh_item, qh_list) { ++ qh = list_entry(qh_item, dwc_otg_qh_t, qh_list_entry); ++ for (qtd_item = qh->qtd_list.next; ++ qtd_item != &qh->qtd_list; ++ qtd_item = qh->qtd_list.next) { ++ qtd = list_entry(qtd_item, dwc_otg_qtd_t, qtd_list_entry); ++ if (qtd->urb != NULL) { ++ dwc_otg_hcd_complete_urb(hcd, qtd->urb, ++ -ETIMEDOUT); ++ } ++ dwc_otg_hcd_qtd_remove_and_free(hcd, qtd); ++ } ++ } ++} ++ ++/** ++ * Responds with an error status of ETIMEDOUT to all URBs in the non-periodic ++ * and periodic schedules. The QTD associated with each URB is removed from ++ * the schedule and freed. This function may be called when a disconnect is ++ * detected or when the HCD is being stopped. ++ */ ++static void kill_all_urbs(dwc_otg_hcd_t *hcd) ++{ ++ kill_urbs_in_qh_list(hcd, &hcd->non_periodic_sched_inactive); ++ kill_urbs_in_qh_list(hcd, &hcd->non_periodic_sched_active); ++ kill_urbs_in_qh_list(hcd, &hcd->periodic_sched_inactive); ++ kill_urbs_in_qh_list(hcd, &hcd->periodic_sched_ready); ++ kill_urbs_in_qh_list(hcd, &hcd->periodic_sched_assigned); ++ kill_urbs_in_qh_list(hcd, &hcd->periodic_sched_queued); ++} ++ ++/** ++ * HCD Callback function for disconnect of the HCD. ++ * ++ * @param p void pointer to the struct usb_hcd ++ */ ++static int32_t dwc_otg_hcd_disconnect_cb(void *p) ++{ ++ gintsts_data_t intr; ++ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(p); ++ ++ //DWC_DEBUGPL(DBG_HCDV, "%s(%p)\n", __func__, p); ++ ++ /* ++ * Set status flags for the hub driver. ++ */ ++ dwc_otg_hcd->flags.b.port_connect_status_change = 1; ++ dwc_otg_hcd->flags.b.port_connect_status = 0; ++ ++ /* ++ * Shutdown any transfers in process by clearing the Tx FIFO Empty ++ * interrupt mask and status bits and disabling subsequent host ++ * channel interrupts. ++ */ ++ intr.d32 = 0; ++ intr.b.nptxfempty = 1; ++ intr.b.ptxfempty = 1; ++ intr.b.hcintr = 1; ++ dwc_modify_reg32(&dwc_otg_hcd->core_if->core_global_regs->gintmsk, intr.d32, 0); ++ dwc_modify_reg32(&dwc_otg_hcd->core_if->core_global_regs->gintsts, intr.d32, 0); ++ ++ del_timers(dwc_otg_hcd); ++ ++ /* ++ * Turn off the vbus power only if the core has transitioned to device ++ * mode. If still in host mode, need to keep power on to detect a ++ * reconnection. ++ */ ++ if (dwc_otg_is_device_mode(dwc_otg_hcd->core_if)) { ++ if (dwc_otg_hcd->core_if->op_state != A_SUSPEND) { ++ hprt0_data_t hprt0 = { .d32=0 }; ++ DWC_PRINT("Disconnect: PortPower off\n"); ++ hprt0.b.prtpwr = 0; ++ dwc_write_reg32(dwc_otg_hcd->core_if->host_if->hprt0, hprt0.d32); ++ } ++ ++ dwc_otg_disable_host_interrupts(dwc_otg_hcd->core_if); ++ } ++ ++ /* Respond with an error status to all URBs in the schedule. */ ++ kill_all_urbs(dwc_otg_hcd); ++ ++ if (dwc_otg_is_host_mode(dwc_otg_hcd->core_if)) { ++ /* Clean up any host channels that were in use. */ ++ int num_channels; ++ int i; ++ dwc_hc_t *channel; ++ dwc_otg_hc_regs_t *hc_regs; ++ hcchar_data_t hcchar; ++ ++ num_channels = dwc_otg_hcd->core_if->core_params->host_channels; ++ ++ if (!dwc_otg_hcd->core_if->dma_enable) { ++ /* Flush out any channel requests in slave mode. */ ++ for (i = 0; i < num_channels; i++) { ++ channel = dwc_otg_hcd->hc_ptr_array[i]; ++ if (list_empty(&channel->hc_list_entry)) { ++ hc_regs = dwc_otg_hcd->core_if->host_if->hc_regs[i]; ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ if (hcchar.b.chen) { ++ hcchar.b.chen = 0; ++ hcchar.b.chdis = 1; ++ hcchar.b.epdir = 0; ++ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); ++ } ++ } ++ } ++ } ++ ++ for (i = 0; i < num_channels; i++) { ++ channel = dwc_otg_hcd->hc_ptr_array[i]; ++ if (list_empty(&channel->hc_list_entry)) { ++ hc_regs = dwc_otg_hcd->core_if->host_if->hc_regs[i]; ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ if (hcchar.b.chen) { ++ /* Halt the channel. */ ++ hcchar.b.chdis = 1; ++ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); ++ } ++ ++ dwc_otg_hc_cleanup(dwc_otg_hcd->core_if, channel); ++ list_add_tail(&channel->hc_list_entry, ++ &dwc_otg_hcd->free_hc_list); ++ } ++ } ++ } ++ ++ /* A disconnect will end the session so the B-Device is no ++ * longer a B-host. */ ++ ((struct usb_hcd *)p)->self.is_b_host = 0; ++ return 1; ++} ++ ++/** ++ * Connection timeout function. An OTG host is required to display a ++ * message if the device does not connect within 10 seconds. ++ */ ++void dwc_otg_hcd_connect_timeout(unsigned long ptr) ++{ ++ DWC_DEBUGPL(DBG_HCDV, "%s(%x)\n", __func__, (int)ptr); ++ DWC_PRINT("Connect Timeout\n"); ++ DWC_ERROR("Device Not Connected/Responding\n"); ++} ++ ++/** ++ * Start the connection timer. An OTG host is required to display a ++ * message if the device does not connect within 10 seconds. The ++ * timer is deleted if a port connect interrupt occurs before the ++ * timer expires. ++ */ ++static void dwc_otg_hcd_start_connect_timer(dwc_otg_hcd_t *hcd) ++{ ++ init_timer(&hcd->conn_timer); ++ hcd->conn_timer.function = dwc_otg_hcd_connect_timeout; ++ hcd->conn_timer.data = 0; ++ hcd->conn_timer.expires = jiffies + (HZ * 10); ++ add_timer(&hcd->conn_timer); ++} ++ ++/** ++ * HCD Callback function for disconnect of the HCD. ++ * ++ * @param p void pointer to the struct usb_hcd ++ */ ++static int32_t dwc_otg_hcd_session_start_cb(void *p) ++{ ++ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(p); ++ DWC_DEBUGPL(DBG_HCDV, "%s(%p)\n", __func__, p); ++ dwc_otg_hcd_start_connect_timer(dwc_otg_hcd); ++ return 1; ++} ++ ++/** ++ * HCD Callback structure for handling mode switching. ++ */ ++static dwc_otg_cil_callbacks_t hcd_cil_callbacks = { ++ .start = dwc_otg_hcd_start_cb, ++ .stop = dwc_otg_hcd_stop_cb, ++ .disconnect = dwc_otg_hcd_disconnect_cb, ++ .session_start = dwc_otg_hcd_session_start_cb, ++ .p = 0, ++}; ++ ++/** ++ * Reset tasklet function ++ */ ++static void reset_tasklet_func(unsigned long data) ++{ ++ dwc_otg_hcd_t *dwc_otg_hcd = (dwc_otg_hcd_t *)data; ++ dwc_otg_core_if_t *core_if = dwc_otg_hcd->core_if; ++ hprt0_data_t hprt0; ++ ++ DWC_DEBUGPL(DBG_HCDV, "USB RESET tasklet called\n"); ++ ++ hprt0.d32 = dwc_otg_read_hprt0(core_if); ++ hprt0.b.prtrst = 1; ++ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); ++ mdelay(60); ++ ++ hprt0.b.prtrst = 0; ++ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); ++ dwc_otg_hcd->flags.b.port_reset_change = 1; ++} ++ ++static struct tasklet_struct reset_tasklet = { ++ .next = NULL, ++ .state = 0, ++ .count = ATOMIC_INIT(0), ++ .func = reset_tasklet_func, ++ .data = 0, ++}; ++ ++/** ++ * Initializes the HCD. This function allocates memory for and initializes the ++ * static parts of the usb_hcd and dwc_otg_hcd structures. It also registers the ++ * USB bus with the core and calls the hc_driver->start() function. It returns ++ * a negative error on failure. ++ */ ++int dwc_otg_hcd_init(struct device *dev) ++{ ++ dwc_otg_device_t *otg_dev = dev_get_drvdata(dev); ++ struct usb_hcd *hcd = NULL; ++ dwc_otg_hcd_t *dwc_otg_hcd = NULL; ++ ++ int num_channels; ++ int i; ++ dwc_hc_t *channel; ++ ++ int retval = 0; ++ ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD INIT\n"); ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) ++ /* 2.6.20+ requires dev.dma_mask to be set prior to calling usb_create_hcd() */ ++ ++ /* Set device flags indicating whether the HCD supports DMA. */ ++ if (otg_dev->core_if->dma_enable) { ++ DWC_PRINT("Using DMA mode\n"); ++ dev->dma_mask = (void *)~0; ++ dev->coherent_dma_mask = ~0; ++ ++ if (otg_dev->core_if->dma_desc_enable) { ++ DWC_PRINT("Device using Descriptor DMA mode\n"); ++ } else { ++ DWC_PRINT("Device using Buffer DMA mode\n"); ++ } ++ } else { ++ DWC_PRINT("Using Slave mode\n"); ++ dev->dma_mask = (void *)0; ++ dev->coherent_dma_mask = 0; ++ } ++#endif ++ /* ++ * Allocate memory for the base HCD plus the DWC OTG HCD. ++ * Initialize the base HCD. ++ */ ++ hcd = usb_create_hcd(&dwc_otg_hc_driver, dev, dev_name(dev)); ++ if (!hcd) { ++ retval = -ENOMEM; ++ goto error1; ++ } ++ ++ dev_set_drvdata(dev, otg_dev); ++ hcd->regs = otg_dev->base; ++ hcd->rsrc_start = otg_dev->phys_addr; ++ hcd->rsrc_len = otg_dev->base_len; ++ hcd->self.otg_port = 1; ++ hcd->has_tt = 1; ++ ++ /* Initialize the DWC OTG HCD. */ ++ dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd); ++ dwc_otg_hcd->core_if = otg_dev->core_if; ++ otg_dev->hcd = dwc_otg_hcd; ++ ++ /* */ ++ spin_lock_init(&dwc_otg_hcd->lock); ++ ++ /* Register the HCD CIL Callbacks */ ++ dwc_otg_cil_register_hcd_callbacks(otg_dev->core_if, ++ &hcd_cil_callbacks, hcd); ++ ++ /* Initialize the non-periodic schedule. */ ++ INIT_LIST_HEAD(&dwc_otg_hcd->non_periodic_sched_inactive); ++ INIT_LIST_HEAD(&dwc_otg_hcd->non_periodic_sched_active); ++ ++ /* Initialize the periodic schedule. */ ++ INIT_LIST_HEAD(&dwc_otg_hcd->periodic_sched_inactive); ++ INIT_LIST_HEAD(&dwc_otg_hcd->periodic_sched_ready); ++ INIT_LIST_HEAD(&dwc_otg_hcd->periodic_sched_assigned); ++ INIT_LIST_HEAD(&dwc_otg_hcd->periodic_sched_queued); ++ ++ /* ++ * Create a host channel descriptor for each host channel implemented ++ * in the controller. Initialize the channel descriptor array. ++ */ ++ INIT_LIST_HEAD(&dwc_otg_hcd->free_hc_list); ++ num_channels = dwc_otg_hcd->core_if->core_params->host_channels; ++ memset(dwc_otg_hcd->hc_ptr_array, 0, sizeof(dwc_otg_hcd->hc_ptr_array)); ++ for (i = 0; i < num_channels; i++) { ++ channel = kmalloc(sizeof(dwc_hc_t), GFP_KERNEL); ++ if (channel == NULL) { ++ retval = -ENOMEM; ++ DWC_ERROR("%s: host channel allocation failed\n", __func__); ++ goto error2; ++ } ++ memset(channel, 0, sizeof(dwc_hc_t)); ++ channel->hc_num = i; ++ dwc_otg_hcd->hc_ptr_array[i] = channel; ++#ifdef DEBUG ++ init_timer(&dwc_otg_hcd->core_if->hc_xfer_timer[i]); ++#endif ++ DWC_DEBUGPL(DBG_HCDV, "HCD Added channel #%d, hc=%p\n", i, channel); ++ } ++ ++ /* Initialize the Connection timeout timer. */ ++ init_timer(&dwc_otg_hcd->conn_timer); ++ ++ /* Initialize reset tasklet. */ ++ reset_tasklet.data = (unsigned long) dwc_otg_hcd; ++ dwc_otg_hcd->reset_tasklet = &reset_tasklet; ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) ++ /* Set device flags indicating whether the HCD supports DMA. */ ++ if (otg_dev->core_if->dma_enable) { ++ DWC_PRINT("Using DMA mode\n"); ++ dev->dma_mask = (void *)~0; ++ dev->coherent_dma_mask = ~0; ++ ++ if (otg_dev->core_if->dma_desc_enable){ ++ DWC_PRINT("Device using Descriptor DMA mode\n"); ++ } else { ++ DWC_PRINT("Device using Buffer DMA mode\n"); ++ } ++ } else { ++ DWC_PRINT("Using Slave mode\n"); ++ dev->dma_mask = (void *)0; ++ dev->dev.coherent_dma_mask = 0; ++ } ++#endif ++ /* ++ * Finish generic HCD initialization and start the HCD. This function ++ * allocates the DMA buffer pool, registers the USB bus, requests the ++ * IRQ line, and calls dwc_otg_hcd_start method. ++ */ ++ retval = usb_add_hcd(hcd, otg_dev->irq, IRQF_SHARED); ++ if (retval < 0) { ++ goto error2; ++ } ++ ++ /* ++ * Allocate space for storing data on status transactions. Normally no ++ * data is sent, but this space acts as a bit bucket. This must be ++ * done after usb_add_hcd since that function allocates the DMA buffer ++ * pool. ++ */ ++ if (otg_dev->core_if->dma_enable) { ++ dwc_otg_hcd->status_buf = ++ dma_alloc_coherent(dev, ++ DWC_OTG_HCD_STATUS_BUF_SIZE, ++ &dwc_otg_hcd->status_buf_dma, ++ GFP_KERNEL | GFP_DMA); ++ } else { ++ dwc_otg_hcd->status_buf = kmalloc(DWC_OTG_HCD_STATUS_BUF_SIZE, ++ GFP_KERNEL); ++ } ++ if (!dwc_otg_hcd->status_buf) { ++ retval = -ENOMEM; ++ DWC_ERROR("%s: status_buf allocation failed\n", __func__); ++ goto error3; ++ } ++ ++ dwc_otg_hcd->otg_dev = otg_dev; ++ ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD Initialized HCD, bus=%s, usbbus=%d\n", ++ dev_name(dev), hcd->self.busnum); ++ ++ return 0; ++ ++ /* Error conditions */ ++ error3: ++ usb_remove_hcd(hcd); ++ error2: ++ dwc_otg_hcd_free(hcd); ++ usb_put_hcd(hcd); ++ ++ /* FIXME: 2008/05/03 by Steven ++ * write back to device: ++ * dwc_otg_hcd has already been released by dwc_otg_hcd_free() ++ */ ++ dev_set_drvdata(dev, otg_dev); ++ ++ error1: ++ return retval; ++} ++ ++/** ++ * Removes the HCD. ++ * Frees memory and resources associated with the HCD and deregisters the bus. ++ */ ++void dwc_otg_hcd_remove(struct device *dev) ++{ ++ dwc_otg_device_t *otg_dev = dev_get_drvdata(dev); ++ dwc_otg_hcd_t *dwc_otg_hcd; ++ struct usb_hcd *hcd; ++ ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD REMOVE\n"); ++ ++ if (!otg_dev) { ++ DWC_DEBUGPL(DBG_ANY, "%s: otg_dev NULL!\n", __func__); ++ return; ++ } ++ ++ dwc_otg_hcd = otg_dev->hcd; ++ ++ if (!dwc_otg_hcd) { ++ DWC_DEBUGPL(DBG_ANY, "%s: otg_dev->hcd NULL!\n", __func__); ++ return; ++ } ++ ++ hcd = dwc_otg_hcd_to_hcd(dwc_otg_hcd); ++ ++ if (!hcd) { ++ DWC_DEBUGPL(DBG_ANY, "%s: dwc_otg_hcd_to_hcd(dwc_otg_hcd) NULL!\n", __func__); ++ return; ++ } ++ ++ /* Turn off all interrupts */ ++ dwc_write_reg32(&dwc_otg_hcd->core_if->core_global_regs->gintmsk, 0); ++ dwc_modify_reg32(&dwc_otg_hcd->core_if->core_global_regs->gahbcfg, 1, 0); ++ ++ usb_remove_hcd(hcd); ++ dwc_otg_hcd_free(hcd); ++ usb_put_hcd(hcd); ++} ++ ++/* ========================================================================= ++ * Linux HC Driver Functions ++ * ========================================================================= */ ++ ++/** ++ * Initializes dynamic portions of the DWC_otg HCD state. ++ */ ++static void hcd_reinit(dwc_otg_hcd_t *hcd) ++{ ++ struct list_head *item; ++ int num_channels; ++ int i; ++ dwc_hc_t *channel; ++ ++ hcd->flags.d32 = 0; ++ ++ hcd->non_periodic_qh_ptr = &hcd->non_periodic_sched_active; ++ hcd->non_periodic_channels = 0; ++ hcd->periodic_channels = 0; ++ ++ /* ++ * Put all channels in the free channel list and clean up channel ++ * states. ++ */ ++ item = hcd->free_hc_list.next; ++ while (item != &hcd->free_hc_list) { ++ list_del(item); ++ item = hcd->free_hc_list.next; ++ } ++ num_channels = hcd->core_if->core_params->host_channels; ++ for (i = 0; i < num_channels; i++) { ++ channel = hcd->hc_ptr_array[i]; ++ list_add_tail(&channel->hc_list_entry, &hcd->free_hc_list); ++ dwc_otg_hc_cleanup(hcd->core_if, channel); ++ } ++ ++ /* Initialize the DWC core for host mode operation. */ ++ dwc_otg_core_host_init(hcd->core_if); ++} ++ ++/** Initializes the DWC_otg controller and its root hub and prepares it for host ++ * mode operation. Activates the root port. Returns 0 on success and a negative ++ * error code on failure. */ ++int dwc_otg_hcd_start(struct usb_hcd *hcd) ++{ ++ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd); ++ dwc_otg_core_if_t *core_if = dwc_otg_hcd->core_if; ++ struct usb_bus *bus; ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) ++ struct usb_device *udev; ++ int retval; ++#endif ++ ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD START\n"); ++ ++ bus = hcd_to_bus(hcd); ++ ++ /* Initialize the bus state. If the core is in Device Mode ++ * HALT the USB bus and return. */ ++ if (dwc_otg_is_device_mode(core_if)) { ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) ++ hcd->state = HC_STATE_HALT; ++#else ++ hcd->state = HC_STATE_RUNNING; ++#endif ++ return 0; ++ } ++ hcd->state = HC_STATE_RUNNING; ++ ++ /* Initialize and connect root hub if one is not already attached */ ++ if (bus->root_hub) { ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD Has Root Hub\n"); ++ /* Inform the HUB driver to resume. */ ++ usb_hcd_resume_root_hub(hcd); ++ } ++ else { ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD Does Not Have Root Hub\n"); ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) ++ udev = usb_alloc_dev(NULL, bus, 0); ++ udev->speed = USB_SPEED_HIGH; ++ if (!udev) { ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD Error udev alloc\n"); ++ return -ENODEV; ++ } ++ if ((retval = usb_hcd_register_root_hub(udev, hcd)) != 0) { ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD Error registering %d\n", retval); ++ return -ENODEV; ++ } ++#endif ++ } ++ ++ hcd_reinit(dwc_otg_hcd); ++ ++ return 0; ++} ++ ++static void qh_list_free(dwc_otg_hcd_t *hcd, struct list_head *qh_list) ++{ ++ struct list_head *item; ++ dwc_otg_qh_t *qh; ++ ++ if (!qh_list->next) { ++ /* The list hasn't been initialized yet. */ ++ return; ++ } ++ ++ /* Ensure there are no QTDs or URBs left. */ ++ kill_urbs_in_qh_list(hcd, qh_list); ++ ++ for (item = qh_list->next; item != qh_list; item = qh_list->next) { ++ qh = list_entry(item, dwc_otg_qh_t, qh_list_entry); ++ dwc_otg_hcd_qh_remove_and_free(hcd, qh); ++ } ++} ++ ++/** ++ * Halts the DWC_otg host mode operations in a clean manner. USB transfers are ++ * stopped. ++ */ ++void dwc_otg_hcd_stop(struct usb_hcd *hcd) ++{ ++ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd); ++ hprt0_data_t hprt0 = { .d32=0 }; ++ ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD STOP\n"); ++ ++ /* Turn off all host-specific interrupts. */ ++ dwc_otg_disable_host_interrupts(dwc_otg_hcd->core_if); ++ ++ /* ++ * The root hub should be disconnected before this function is called. ++ * The disconnect will clear the QTD lists (via ..._hcd_urb_dequeue) ++ * and the QH lists (via ..._hcd_endpoint_disable). ++ */ ++ ++ /* Turn off the vbus power */ ++ DWC_PRINT("PortPower off\n"); ++ hprt0.b.prtpwr = 0; ++ dwc_write_reg32(dwc_otg_hcd->core_if->host_if->hprt0, hprt0.d32); ++} ++ ++/** Returns the current frame number. */ ++int dwc_otg_hcd_get_frame_number(struct usb_hcd *hcd) ++{ ++ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd); ++ hfnum_data_t hfnum; ++ ++ hfnum.d32 = dwc_read_reg32(&dwc_otg_hcd->core_if-> ++ host_if->host_global_regs->hfnum); ++ ++#ifdef DEBUG_SOF ++ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD GET FRAME NUMBER %d\n", hfnum.b.frnum); ++#endif ++ return hfnum.b.frnum; ++} ++ ++/** ++ * Frees secondary storage associated with the dwc_otg_hcd structure contained ++ * in the struct usb_hcd field. ++ */ ++void dwc_otg_hcd_free(struct usb_hcd *hcd) ++{ ++ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd); ++ int i; ++ ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD FREE\n"); ++ ++ del_timers(dwc_otg_hcd); ++ ++ /* Free memory for QH/QTD lists */ ++ qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->non_periodic_sched_inactive); ++ qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->non_periodic_sched_active); ++ qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->periodic_sched_inactive); ++ qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->periodic_sched_ready); ++ qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->periodic_sched_assigned); ++ qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->periodic_sched_queued); ++ ++ /* Free memory for the host channels. */ ++ for (i = 0; i < MAX_EPS_CHANNELS; i++) { ++ dwc_hc_t *hc = dwc_otg_hcd->hc_ptr_array[i]; ++ if (hc != NULL) { ++ DWC_DEBUGPL(DBG_HCDV, "HCD Free channel #%i, hc=%p\n", i, hc); ++ kfree(hc); ++ } ++ } ++ ++ if (dwc_otg_hcd->core_if->dma_enable) { ++ if (dwc_otg_hcd->status_buf_dma) { ++ dma_free_coherent(hcd->self.controller, ++ DWC_OTG_HCD_STATUS_BUF_SIZE, ++ dwc_otg_hcd->status_buf, ++ dwc_otg_hcd->status_buf_dma); ++ } ++ } else if (dwc_otg_hcd->status_buf != NULL) { ++ kfree(dwc_otg_hcd->status_buf); ++ } ++} ++ ++#ifdef DEBUG ++static void dump_urb_info(struct urb *urb, char* fn_name) ++{ ++ DWC_PRINT("%s, urb %p\n", fn_name, urb); ++ DWC_PRINT(" Device address: %d\n", usb_pipedevice(urb->pipe)); ++ DWC_PRINT(" Endpoint: %d, %s\n", usb_pipeendpoint(urb->pipe), ++ (usb_pipein(urb->pipe) ? "IN" : "OUT")); ++ DWC_PRINT(" Endpoint type: %s\n", ++ ({char *pipetype; ++ switch (usb_pipetype(urb->pipe)) { ++ case PIPE_CONTROL: pipetype = "CONTROL"; break; ++ case PIPE_BULK: pipetype = "BULK"; break; ++ case PIPE_INTERRUPT: pipetype = "INTERRUPT"; break; ++ case PIPE_ISOCHRONOUS: pipetype = "ISOCHRONOUS"; break; ++ default: pipetype = "UNKNOWN"; break; ++ }; pipetype;})); ++ DWC_PRINT(" Speed: %s\n", ++ ({char *speed; ++ switch (urb->dev->speed) { ++ case USB_SPEED_HIGH: speed = "HIGH"; break; ++ case USB_SPEED_FULL: speed = "FULL"; break; ++ case USB_SPEED_LOW: speed = "LOW"; break; ++ default: speed = "UNKNOWN"; break; ++ }; speed;})); ++ DWC_PRINT(" Max packet size: %d\n", ++ usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe))); ++ DWC_PRINT(" Data buffer length: %d\n", urb->transfer_buffer_length); ++ DWC_PRINT(" Transfer buffer: %p, Transfer DMA: %p\n", ++ urb->transfer_buffer, (void *)urb->transfer_dma); ++ DWC_PRINT(" Setup buffer: %p, Setup DMA: %p\n", ++ urb->setup_packet, (void *)urb->setup_dma); ++ DWC_PRINT(" Interval: %d\n", urb->interval); ++ if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { ++ int i; ++ for (i = 0; i < urb->number_of_packets; i++) { ++ DWC_PRINT(" ISO Desc %d:\n", i); ++ DWC_PRINT(" offset: %d, length %d\n", ++ urb->iso_frame_desc[i].offset, ++ urb->iso_frame_desc[i].length); ++ } ++ } ++} ++ ++static void dump_channel_info(dwc_otg_hcd_t *hcd, ++ dwc_otg_qh_t *qh) ++{ ++ if (qh->channel != NULL) { ++ dwc_hc_t *hc = qh->channel; ++ struct list_head *item; ++ dwc_otg_qh_t *qh_item; ++ int num_channels = hcd->core_if->core_params->host_channels; ++ int i; ++ ++ dwc_otg_hc_regs_t *hc_regs; ++ hcchar_data_t hcchar; ++ hcsplt_data_t hcsplt; ++ hctsiz_data_t hctsiz; ++ uint32_t hcdma; ++ ++ hc_regs = hcd->core_if->host_if->hc_regs[hc->hc_num]; ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ hcsplt.d32 = dwc_read_reg32(&hc_regs->hcsplt); ++ hctsiz.d32 = dwc_read_reg32(&hc_regs->hctsiz); ++ hcdma = dwc_read_reg32(&hc_regs->hcdma); ++ ++ DWC_PRINT(" Assigned to channel %p:\n", hc); ++ DWC_PRINT(" hcchar 0x%08x, hcsplt 0x%08x\n", hcchar.d32, hcsplt.d32); ++ DWC_PRINT(" hctsiz 0x%08x, hcdma 0x%08x\n", hctsiz.d32, hcdma); ++ DWC_PRINT(" dev_addr: %d, ep_num: %d, ep_is_in: %d\n", ++ hc->dev_addr, hc->ep_num, hc->ep_is_in); ++ DWC_PRINT(" ep_type: %d\n", hc->ep_type); ++ DWC_PRINT(" max_packet: %d\n", hc->max_packet); ++ DWC_PRINT(" data_pid_start: %d\n", hc->data_pid_start); ++ DWC_PRINT(" xfer_started: %d\n", hc->xfer_started); ++ DWC_PRINT(" halt_status: %d\n", hc->halt_status); ++ DWC_PRINT(" xfer_buff: %p\n", hc->xfer_buff); ++ DWC_PRINT(" xfer_len: %d\n", hc->xfer_len); ++ DWC_PRINT(" qh: %p\n", hc->qh); ++ DWC_PRINT(" NP inactive sched:\n"); ++ list_for_each(item, &hcd->non_periodic_sched_inactive) { ++ qh_item = list_entry(item, dwc_otg_qh_t, qh_list_entry); ++ DWC_PRINT(" %p\n", qh_item); ++ } ++ DWC_PRINT(" NP active sched:\n"); ++ list_for_each(item, &hcd->non_periodic_sched_active) { ++ qh_item = list_entry(item, dwc_otg_qh_t, qh_list_entry); ++ DWC_PRINT(" %p\n", qh_item); ++ } ++ DWC_PRINT(" Channels: \n"); ++ for (i = 0; i < num_channels; i++) { ++ dwc_hc_t *hc = hcd->hc_ptr_array[i]; ++ DWC_PRINT(" %2d: %p\n", i, hc); ++ } ++ } ++} ++#endif ++ ++/** Starts processing a USB transfer request specified by a USB Request Block ++ * (URB). mem_flags indicates the type of memory allocation to use while ++ * processing this URB. */ ++int dwc_otg_hcd_urb_enqueue(struct usb_hcd *hcd, ++ struct urb *urb, ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) ++ int mem_flags ++#else ++ gfp_t mem_flags ++#endif ++ ) ++{ ++ int retval = 0; ++ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd); ++ dwc_otg_qtd_t *qtd; ++ ++#ifdef DEBUG ++ if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) { ++ dump_urb_info(urb, "dwc_otg_hcd_urb_enqueue"); ++ } ++#endif ++ if (!dwc_otg_hcd->flags.b.port_connect_status) { ++ /* No longer connected. */ ++ return -ENODEV; ++ } ++ ++ qtd = dwc_otg_hcd_qtd_create(urb); ++ if (qtd == NULL) { ++ DWC_ERROR("DWC OTG HCD URB Enqueue failed creating QTD\n"); ++ return -ENOMEM; ++ } ++ ++ retval = dwc_otg_hcd_qtd_add(qtd, dwc_otg_hcd); ++ if (retval < 0) { ++ DWC_ERROR("DWC OTG HCD URB Enqueue failed adding QTD. " ++ "Error status %d\n", retval); ++ dwc_otg_hcd_qtd_free(qtd); ++ } ++ ++ return retval; ++} ++ ++/** Aborts/cancels a USB transfer request. Always returns 0 to indicate ++ * success. */ ++int dwc_otg_hcd_urb_dequeue(struct usb_hcd *hcd, ++ struct urb *urb, ++ int status) ++{ ++ unsigned long flags; ++ dwc_otg_hcd_t *dwc_otg_hcd; ++ dwc_otg_qtd_t *urb_qtd; ++ dwc_otg_qh_t *qh; ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) ++ struct usb_host_endpoint *ep = dwc_urb_to_endpoint(urb); ++#endif ++ ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD URB Dequeue\n"); ++ ++ dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd); ++ ++ SPIN_LOCK_IRQSAVE(&dwc_otg_hcd->lock, flags); ++ ++ urb_qtd = (dwc_otg_qtd_t *)urb->hcpriv; ++ qh = (dwc_otg_qh_t *)ep->hcpriv; ++ ++#ifdef DEBUG ++ if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) { ++ dump_urb_info(urb, "dwc_otg_hcd_urb_dequeue"); ++ if (urb_qtd == qh->qtd_in_process) { ++ dump_channel_info(dwc_otg_hcd, qh); ++ } ++ } ++#endif ++ ++ if (urb_qtd == qh->qtd_in_process) { ++ /* The QTD is in process (it has been assigned to a channel). */ ++ ++ if (dwc_otg_hcd->flags.b.port_connect_status) { ++ /* ++ * If still connected (i.e. in host mode), halt the ++ * channel so it can be used for other transfers. If ++ * no longer connected, the host registers can't be ++ * written to halt the channel since the core is in ++ * device mode. ++ */ ++ dwc_otg_hc_halt(dwc_otg_hcd->core_if, qh->channel, ++ DWC_OTG_HC_XFER_URB_DEQUEUE); ++ } ++ } ++ ++ /* ++ * Free the QTD and clean up the associated QH. Leave the QH in the ++ * schedule if it has any remaining QTDs. ++ */ ++ dwc_otg_hcd_qtd_remove_and_free(dwc_otg_hcd, urb_qtd); ++ if (urb_qtd == qh->qtd_in_process) { ++ dwc_otg_hcd_qh_deactivate(dwc_otg_hcd, qh, 0); ++ qh->channel = NULL; ++ qh->qtd_in_process = NULL; ++ } else if (list_empty(&qh->qtd_list)) { ++ dwc_otg_hcd_qh_remove(dwc_otg_hcd, qh); ++ } ++ ++ SPIN_UNLOCK_IRQRESTORE(&dwc_otg_hcd->lock, flags); ++ ++ urb->hcpriv = NULL; ++ ++ /* Higher layer software sets URB status. */ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) ++ usb_hcd_giveback_urb(hcd, urb, status); ++#else ++ usb_hcd_giveback_urb(hcd, urb, NULL); ++#endif ++ if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) { ++ DWC_PRINT("Called usb_hcd_giveback_urb()\n"); ++ DWC_PRINT(" urb->status = %d\n", urb->status); ++ } ++ ++ return 0; ++} ++ ++/** Frees resources in the DWC_otg controller related to a given endpoint. Also ++ * clears state in the HCD related to the endpoint. Any URBs for the endpoint ++ * must already be dequeued. */ ++void dwc_otg_hcd_endpoint_disable(struct usb_hcd *hcd, ++ struct usb_host_endpoint *ep) ++{ ++ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd); ++ dwc_otg_qh_t *qh; ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) ++ unsigned long flags; ++ int retry = 0; ++#endif ++ ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD EP DISABLE: _bEndpointAddress=0x%02x, " ++ "endpoint=%d\n", ep->desc.bEndpointAddress, ++ dwc_ep_addr_to_endpoint(ep->desc.bEndpointAddress)); ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) ++rescan: ++ SPIN_LOCK_IRQSAVE(&dwc_otg_hcd->lock, flags); ++ qh = (dwc_otg_qh_t *)(ep->hcpriv); ++ if (!qh) ++ goto done; ++ ++ /** Check that the QTD list is really empty */ ++ if (!list_empty(&qh->qtd_list)) { ++ if (retry++ < 250) { ++ SPIN_UNLOCK_IRQRESTORE(&dwc_otg_hcd->lock, flags); ++ schedule_timeout_uninterruptible(1); ++ goto rescan; ++ } ++ ++ DWC_WARN("DWC OTG HCD EP DISABLE:" ++ " QTD List for this endpoint is not empty\n"); ++ } ++ ++ dwc_otg_hcd_qh_remove_and_free(dwc_otg_hcd, qh); ++ ep->hcpriv = NULL; ++done: ++ SPIN_UNLOCK_IRQRESTORE(&dwc_otg_hcd->lock, flags); ++ ++#else // LINUX_VERSION_CODE ++ ++ qh = (dwc_otg_qh_t *)(ep->hcpriv); ++ if (qh != NULL) { ++#ifdef DEBUG ++ /** Check that the QTD list is really empty */ ++ if (!list_empty(&qh->qtd_list)) { ++ DWC_WARN("DWC OTG HCD EP DISABLE:" ++ " QTD List for this endpoint is not empty\n"); ++ } ++#endif ++ dwc_otg_hcd_qh_remove_and_free(dwc_otg_hcd, qh); ++ ep->hcpriv = NULL; ++ } ++#endif // LINUX_VERSION_CODE ++} ++ ++/** Handles host mode interrupts for the DWC_otg controller. Returns IRQ_NONE if ++ * there was no interrupt to handle. Returns IRQ_HANDLED if there was a valid ++ * interrupt. ++ * ++ * This function is called by the USB core when an interrupt occurs */ ++irqreturn_t dwc_otg_hcd_irq(struct usb_hcd *hcd ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) ++ , struct pt_regs *regs ++#endif ++ ) ++{ ++ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd); ++ return IRQ_RETVAL(dwc_otg_hcd_handle_intr(dwc_otg_hcd)); ++} ++ ++/** Creates Status Change bitmap for the root hub and root port. The bitmap is ++ * returned in buf. Bit 0 is the status change indicator for the root hub. Bit 1 ++ * is the status change indicator for the single root port. Returns 1 if either ++ * change indicator is 1, otherwise returns 0. */ ++int dwc_otg_hcd_hub_status_data(struct usb_hcd *hcd, char *buf) ++{ ++ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd); ++ ++ buf[0] = 0; ++ buf[0] |= (dwc_otg_hcd->flags.b.port_connect_status_change || ++ dwc_otg_hcd->flags.b.port_reset_change || ++ dwc_otg_hcd->flags.b.port_enable_change || ++ dwc_otg_hcd->flags.b.port_suspend_change || ++ dwc_otg_hcd->flags.b.port_over_current_change) << 1; ++ ++#ifdef DEBUG ++ if (buf[0]) { ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB STATUS DATA:" ++ " Root port status changed\n"); ++ DWC_DEBUGPL(DBG_HCDV, " port_connect_status_change: %d\n", ++ dwc_otg_hcd->flags.b.port_connect_status_change); ++ DWC_DEBUGPL(DBG_HCDV, " port_reset_change: %d\n", ++ dwc_otg_hcd->flags.b.port_reset_change); ++ DWC_DEBUGPL(DBG_HCDV, " port_enable_change: %d\n", ++ dwc_otg_hcd->flags.b.port_enable_change); ++ DWC_DEBUGPL(DBG_HCDV, " port_suspend_change: %d\n", ++ dwc_otg_hcd->flags.b.port_suspend_change); ++ DWC_DEBUGPL(DBG_HCDV, " port_over_current_change: %d\n", ++ dwc_otg_hcd->flags.b.port_over_current_change); ++ } ++#endif ++ return (buf[0] != 0); ++} ++ ++#ifdef DWC_HS_ELECT_TST ++/* ++ * Quick and dirty hack to implement the HS Electrical Test ++ * SINGLE_STEP_GET_DEVICE_DESCRIPTOR feature. ++ * ++ * This code was copied from our userspace app "hset". It sends a ++ * Get Device Descriptor control sequence in two parts, first the ++ * Setup packet by itself, followed some time later by the In and ++ * Ack packets. Rather than trying to figure out how to add this ++ * functionality to the normal driver code, we just hijack the ++ * hardware, using these two function to drive the hardware ++ * directly. ++ */ ++ ++dwc_otg_core_global_regs_t *global_regs; ++dwc_otg_host_global_regs_t *hc_global_regs; ++dwc_otg_hc_regs_t *hc_regs; ++uint32_t *data_fifo; ++ ++static void do_setup(void) ++{ ++ gintsts_data_t gintsts; ++ hctsiz_data_t hctsiz; ++ hcchar_data_t hcchar; ++ haint_data_t haint; ++ hcint_data_t hcint; ++ ++ /* Enable HAINTs */ ++ dwc_write_reg32(&hc_global_regs->haintmsk, 0x0001); ++ ++ /* Enable HCINTs */ ++ dwc_write_reg32(&hc_regs->hcintmsk, 0x04a3); ++ ++ /* Read GINTSTS */ ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32); ++ ++ /* Read HAINT */ ++ haint.d32 = dwc_read_reg32(&hc_global_regs->haint); ++ //fprintf(stderr, "HAINT: %08x\n", haint.d32); ++ ++ /* Read HCINT */ ++ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); ++ //fprintf(stderr, "HCINT: %08x\n", hcint.d32); ++ ++ /* Read HCCHAR */ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32); ++ ++ /* Clear HCINT */ ++ dwc_write_reg32(&hc_regs->hcint, hcint.d32); ++ ++ /* Clear HAINT */ ++ dwc_write_reg32(&hc_global_regs->haint, haint.d32); ++ ++ /* Clear GINTSTS */ ++ dwc_write_reg32(&global_regs->gintsts, gintsts.d32); ++ ++ /* Read GINTSTS */ ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32); ++ ++ /* ++ * Send Setup packet (Get Device Descriptor) ++ */ ++ ++ /* Make sure channel is disabled */ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ if (hcchar.b.chen) { ++ //fprintf(stderr, "Channel already enabled 1, HCCHAR = %08x\n", hcchar.d32); ++ hcchar.b.chdis = 1; ++// hcchar.b.chen = 1; ++ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); ++ //sleep(1); ++ mdelay(1000); ++ ++ /* Read GINTSTS */ ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32); ++ ++ /* Read HAINT */ ++ haint.d32 = dwc_read_reg32(&hc_global_regs->haint); ++ //fprintf(stderr, "HAINT: %08x\n", haint.d32); ++ ++ /* Read HCINT */ ++ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); ++ //fprintf(stderr, "HCINT: %08x\n", hcint.d32); ++ ++ /* Read HCCHAR */ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32); ++ ++ /* Clear HCINT */ ++ dwc_write_reg32(&hc_regs->hcint, hcint.d32); ++ ++ /* Clear HAINT */ ++ dwc_write_reg32(&hc_global_regs->haint, haint.d32); ++ ++ /* Clear GINTSTS */ ++ dwc_write_reg32(&global_regs->gintsts, gintsts.d32); ++ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ //if (hcchar.b.chen) { ++ // fprintf(stderr, "** Channel _still_ enabled 1, HCCHAR = %08x **\n", hcchar.d32); ++ //} ++ } ++ ++ /* Set HCTSIZ */ ++ hctsiz.d32 = 0; ++ hctsiz.b.xfersize = 8; ++ hctsiz.b.pktcnt = 1; ++ hctsiz.b.pid = DWC_OTG_HC_PID_SETUP; ++ dwc_write_reg32(&hc_regs->hctsiz, hctsiz.d32); ++ ++ /* Set HCCHAR */ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ hcchar.b.eptype = DWC_OTG_EP_TYPE_CONTROL; ++ hcchar.b.epdir = 0; ++ hcchar.b.epnum = 0; ++ hcchar.b.mps = 8; ++ hcchar.b.chen = 1; ++ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); ++ ++ /* Fill FIFO with Setup data for Get Device Descriptor */ ++ data_fifo = (uint32_t *)((char *)global_regs + 0x1000); ++ dwc_write_reg32(data_fifo++, 0x01000680); ++ dwc_write_reg32(data_fifo++, 0x00080000); ++ ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ //fprintf(stderr, "Waiting for HCINTR intr 1, GINTSTS = %08x\n", gintsts.d32); ++ ++ /* Wait for host channel interrupt */ ++ do { ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ } while (gintsts.b.hcintr == 0); ++ ++ //fprintf(stderr, "Got HCINTR intr 1, GINTSTS = %08x\n", gintsts.d32); ++ ++ /* Disable HCINTs */ ++ dwc_write_reg32(&hc_regs->hcintmsk, 0x0000); ++ ++ /* Disable HAINTs */ ++ dwc_write_reg32(&hc_global_regs->haintmsk, 0x0000); ++ ++ /* Read HAINT */ ++ haint.d32 = dwc_read_reg32(&hc_global_regs->haint); ++ //fprintf(stderr, "HAINT: %08x\n", haint.d32); ++ ++ /* Read HCINT */ ++ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); ++ //fprintf(stderr, "HCINT: %08x\n", hcint.d32); ++ ++ /* Read HCCHAR */ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32); ++ ++ /* Clear HCINT */ ++ dwc_write_reg32(&hc_regs->hcint, hcint.d32); ++ ++ /* Clear HAINT */ ++ dwc_write_reg32(&hc_global_regs->haint, haint.d32); ++ ++ /* Clear GINTSTS */ ++ dwc_write_reg32(&global_regs->gintsts, gintsts.d32); ++ ++ /* Read GINTSTS */ ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32); ++} ++ ++static void do_in_ack(void) ++{ ++ gintsts_data_t gintsts; ++ hctsiz_data_t hctsiz; ++ hcchar_data_t hcchar; ++ haint_data_t haint; ++ hcint_data_t hcint; ++ host_grxsts_data_t grxsts; ++ ++ /* Enable HAINTs */ ++ dwc_write_reg32(&hc_global_regs->haintmsk, 0x0001); ++ ++ /* Enable HCINTs */ ++ dwc_write_reg32(&hc_regs->hcintmsk, 0x04a3); ++ ++ /* Read GINTSTS */ ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32); ++ ++ /* Read HAINT */ ++ haint.d32 = dwc_read_reg32(&hc_global_regs->haint); ++ //fprintf(stderr, "HAINT: %08x\n", haint.d32); ++ ++ /* Read HCINT */ ++ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); ++ //fprintf(stderr, "HCINT: %08x\n", hcint.d32); ++ ++ /* Read HCCHAR */ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32); ++ ++ /* Clear HCINT */ ++ dwc_write_reg32(&hc_regs->hcint, hcint.d32); ++ ++ /* Clear HAINT */ ++ dwc_write_reg32(&hc_global_regs->haint, haint.d32); ++ ++ /* Clear GINTSTS */ ++ dwc_write_reg32(&global_regs->gintsts, gintsts.d32); ++ ++ /* Read GINTSTS */ ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32); ++ ++ /* ++ * Receive Control In packet ++ */ ++ ++ /* Make sure channel is disabled */ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ if (hcchar.b.chen) { ++ //fprintf(stderr, "Channel already enabled 2, HCCHAR = %08x\n", hcchar.d32); ++ hcchar.b.chdis = 1; ++ hcchar.b.chen = 1; ++ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); ++ //sleep(1); ++ mdelay(1000); ++ ++ /* Read GINTSTS */ ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32); ++ ++ /* Read HAINT */ ++ haint.d32 = dwc_read_reg32(&hc_global_regs->haint); ++ //fprintf(stderr, "HAINT: %08x\n", haint.d32); ++ ++ /* Read HCINT */ ++ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); ++ //fprintf(stderr, "HCINT: %08x\n", hcint.d32); ++ ++ /* Read HCCHAR */ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32); ++ ++ /* Clear HCINT */ ++ dwc_write_reg32(&hc_regs->hcint, hcint.d32); ++ ++ /* Clear HAINT */ ++ dwc_write_reg32(&hc_global_regs->haint, haint.d32); ++ ++ /* Clear GINTSTS */ ++ dwc_write_reg32(&global_regs->gintsts, gintsts.d32); ++ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ //if (hcchar.b.chen) { ++ // fprintf(stderr, "** Channel _still_ enabled 2, HCCHAR = %08x **\n", hcchar.d32); ++ //} ++ } ++ ++ /* Set HCTSIZ */ ++ hctsiz.d32 = 0; ++ hctsiz.b.xfersize = 8; ++ hctsiz.b.pktcnt = 1; ++ hctsiz.b.pid = DWC_OTG_HC_PID_DATA1; ++ dwc_write_reg32(&hc_regs->hctsiz, hctsiz.d32); ++ ++ /* Set HCCHAR */ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ hcchar.b.eptype = DWC_OTG_EP_TYPE_CONTROL; ++ hcchar.b.epdir = 1; ++ hcchar.b.epnum = 0; ++ hcchar.b.mps = 8; ++ hcchar.b.chen = 1; ++ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); ++ ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ //fprintf(stderr, "Waiting for RXSTSQLVL intr 1, GINTSTS = %08x\n", gintsts.d32); ++ ++ /* Wait for receive status queue interrupt */ ++ do { ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ } while (gintsts.b.rxstsqlvl == 0); ++ ++ //fprintf(stderr, "Got RXSTSQLVL intr 1, GINTSTS = %08x\n", gintsts.d32); ++ ++ /* Read RXSTS */ ++ grxsts.d32 = dwc_read_reg32(&global_regs->grxstsp); ++ //fprintf(stderr, "GRXSTS: %08x\n", grxsts.d32); ++ ++ /* Clear RXSTSQLVL in GINTSTS */ ++ gintsts.d32 = 0; ++ gintsts.b.rxstsqlvl = 1; ++ dwc_write_reg32(&global_regs->gintsts, gintsts.d32); ++ ++ switch (grxsts.b.pktsts) { ++ case DWC_GRXSTS_PKTSTS_IN: ++ /* Read the data into the host buffer */ ++ if (grxsts.b.bcnt > 0) { ++ int i; ++ int word_count = (grxsts.b.bcnt + 3) / 4; ++ ++ data_fifo = (uint32_t *)((char *)global_regs + 0x1000); ++ ++ for (i = 0; i < word_count; i++) { ++ (void)dwc_read_reg32(data_fifo++); ++ } ++ } ++ ++ //fprintf(stderr, "Received %u bytes\n", (unsigned)grxsts.b.bcnt); ++ break; ++ ++ default: ++ //fprintf(stderr, "** Unexpected GRXSTS packet status 1 **\n"); ++ break; ++ } ++ ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ //fprintf(stderr, "Waiting for RXSTSQLVL intr 2, GINTSTS = %08x\n", gintsts.d32); ++ ++ /* Wait for receive status queue interrupt */ ++ do { ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ } while (gintsts.b.rxstsqlvl == 0); ++ ++ //fprintf(stderr, "Got RXSTSQLVL intr 2, GINTSTS = %08x\n", gintsts.d32); ++ ++ /* Read RXSTS */ ++ grxsts.d32 = dwc_read_reg32(&global_regs->grxstsp); ++ //fprintf(stderr, "GRXSTS: %08x\n", grxsts.d32); ++ ++ /* Clear RXSTSQLVL in GINTSTS */ ++ gintsts.d32 = 0; ++ gintsts.b.rxstsqlvl = 1; ++ dwc_write_reg32(&global_regs->gintsts, gintsts.d32); ++ ++ switch (grxsts.b.pktsts) { ++ case DWC_GRXSTS_PKTSTS_IN_XFER_COMP: ++ break; ++ ++ default: ++ //fprintf(stderr, "** Unexpected GRXSTS packet status 2 **\n"); ++ break; ++ } ++ ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ //fprintf(stderr, "Waiting for HCINTR intr 2, GINTSTS = %08x\n", gintsts.d32); ++ ++ /* Wait for host channel interrupt */ ++ do { ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ } while (gintsts.b.hcintr == 0); ++ ++ //fprintf(stderr, "Got HCINTR intr 2, GINTSTS = %08x\n", gintsts.d32); ++ ++ /* Read HAINT */ ++ haint.d32 = dwc_read_reg32(&hc_global_regs->haint); ++ //fprintf(stderr, "HAINT: %08x\n", haint.d32); ++ ++ /* Read HCINT */ ++ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); ++ //fprintf(stderr, "HCINT: %08x\n", hcint.d32); ++ ++ /* Read HCCHAR */ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32); ++ ++ /* Clear HCINT */ ++ dwc_write_reg32(&hc_regs->hcint, hcint.d32); ++ ++ /* Clear HAINT */ ++ dwc_write_reg32(&hc_global_regs->haint, haint.d32); ++ ++ /* Clear GINTSTS */ ++ dwc_write_reg32(&global_regs->gintsts, gintsts.d32); ++ ++ /* Read GINTSTS */ ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32); ++ ++// usleep(100000); ++// mdelay(100); ++ mdelay(1); ++ ++ /* ++ * Send handshake packet ++ */ ++ ++ /* Read HAINT */ ++ haint.d32 = dwc_read_reg32(&hc_global_regs->haint); ++ //fprintf(stderr, "HAINT: %08x\n", haint.d32); ++ ++ /* Read HCINT */ ++ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); ++ //fprintf(stderr, "HCINT: %08x\n", hcint.d32); ++ ++ /* Read HCCHAR */ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32); ++ ++ /* Clear HCINT */ ++ dwc_write_reg32(&hc_regs->hcint, hcint.d32); ++ ++ /* Clear HAINT */ ++ dwc_write_reg32(&hc_global_regs->haint, haint.d32); ++ ++ /* Clear GINTSTS */ ++ dwc_write_reg32(&global_regs->gintsts, gintsts.d32); ++ ++ /* Read GINTSTS */ ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32); ++ ++ /* Make sure channel is disabled */ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ if (hcchar.b.chen) { ++ //fprintf(stderr, "Channel already enabled 3, HCCHAR = %08x\n", hcchar.d32); ++ hcchar.b.chdis = 1; ++ hcchar.b.chen = 1; ++ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); ++ //sleep(1); ++ mdelay(1000); ++ ++ /* Read GINTSTS */ ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32); ++ ++ /* Read HAINT */ ++ haint.d32 = dwc_read_reg32(&hc_global_regs->haint); ++ //fprintf(stderr, "HAINT: %08x\n", haint.d32); ++ ++ /* Read HCINT */ ++ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); ++ //fprintf(stderr, "HCINT: %08x\n", hcint.d32); ++ ++ /* Read HCCHAR */ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32); ++ ++ /* Clear HCINT */ ++ dwc_write_reg32(&hc_regs->hcint, hcint.d32); ++ ++ /* Clear HAINT */ ++ dwc_write_reg32(&hc_global_regs->haint, haint.d32); ++ ++ /* Clear GINTSTS */ ++ dwc_write_reg32(&global_regs->gintsts, gintsts.d32); ++ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ //if (hcchar.b.chen) { ++ // fprintf(stderr, "** Channel _still_ enabled 3, HCCHAR = %08x **\n", hcchar.d32); ++ //} ++ } ++ ++ /* Set HCTSIZ */ ++ hctsiz.d32 = 0; ++ hctsiz.b.xfersize = 0; ++ hctsiz.b.pktcnt = 1; ++ hctsiz.b.pid = DWC_OTG_HC_PID_DATA1; ++ dwc_write_reg32(&hc_regs->hctsiz, hctsiz.d32); ++ ++ /* Set HCCHAR */ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ hcchar.b.eptype = DWC_OTG_EP_TYPE_CONTROL; ++ hcchar.b.epdir = 0; ++ hcchar.b.epnum = 0; ++ hcchar.b.mps = 8; ++ hcchar.b.chen = 1; ++ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); ++ ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ //fprintf(stderr, "Waiting for HCINTR intr 3, GINTSTS = %08x\n", gintsts.d32); ++ ++ /* Wait for host channel interrupt */ ++ do { ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ } while (gintsts.b.hcintr == 0); ++ ++ //fprintf(stderr, "Got HCINTR intr 3, GINTSTS = %08x\n", gintsts.d32); ++ ++ /* Disable HCINTs */ ++ dwc_write_reg32(&hc_regs->hcintmsk, 0x0000); ++ ++ /* Disable HAINTs */ ++ dwc_write_reg32(&hc_global_regs->haintmsk, 0x0000); ++ ++ /* Read HAINT */ ++ haint.d32 = dwc_read_reg32(&hc_global_regs->haint); ++ //fprintf(stderr, "HAINT: %08x\n", haint.d32); ++ ++ /* Read HCINT */ ++ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); ++ //fprintf(stderr, "HCINT: %08x\n", hcint.d32); ++ ++ /* Read HCCHAR */ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32); ++ ++ /* Clear HCINT */ ++ dwc_write_reg32(&hc_regs->hcint, hcint.d32); ++ ++ /* Clear HAINT */ ++ dwc_write_reg32(&hc_global_regs->haint, haint.d32); ++ ++ /* Clear GINTSTS */ ++ dwc_write_reg32(&global_regs->gintsts, gintsts.d32); ++ ++ /* Read GINTSTS */ ++ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); ++ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32); ++} ++#endif /* DWC_HS_ELECT_TST */ ++ ++/** Handles hub class-specific requests. */ ++int dwc_otg_hcd_hub_control(struct usb_hcd *hcd, ++ u16 typeReq, ++ u16 wValue, ++ u16 wIndex, ++ char *buf, ++ u16 wLength) ++{ ++ int retval = 0; ++ ++ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd); ++ dwc_otg_core_if_t *core_if = hcd_to_dwc_otg_hcd(hcd)->core_if; ++ struct usb_hub_descriptor *desc; ++ hprt0_data_t hprt0 = {.d32 = 0}; ++ ++ uint32_t port_status; ++ ++ switch (typeReq) { ++ case ClearHubFeature: ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " ++ "ClearHubFeature 0x%x\n", wValue); ++ switch (wValue) { ++ case C_HUB_LOCAL_POWER: ++ case C_HUB_OVER_CURRENT: ++ /* Nothing required here */ ++ break; ++ default: ++ retval = -EINVAL; ++ DWC_ERROR("DWC OTG HCD - " ++ "ClearHubFeature request %xh unknown\n", wValue); ++ } ++ break; ++ case ClearPortFeature: ++ if (!wIndex || wIndex > 1) ++ goto error; ++ ++ switch (wValue) { ++ case USB_PORT_FEAT_ENABLE: ++ DWC_DEBUGPL(DBG_ANY, "DWC OTG HCD HUB CONTROL - " ++ "ClearPortFeature USB_PORT_FEAT_ENABLE\n"); ++ hprt0.d32 = dwc_otg_read_hprt0(core_if); ++ hprt0.b.prtena = 1; ++ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); ++ break; ++ case USB_PORT_FEAT_SUSPEND: ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " ++ "ClearPortFeature USB_PORT_FEAT_SUSPEND\n"); ++ hprt0.d32 = dwc_otg_read_hprt0(core_if); ++ hprt0.b.prtres = 1; ++ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); ++ /* Clear Resume bit */ ++ mdelay(100); ++ hprt0.b.prtres = 0; ++ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); ++ break; ++ case USB_PORT_FEAT_POWER: ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " ++ "ClearPortFeature USB_PORT_FEAT_POWER\n"); ++ hprt0.d32 = dwc_otg_read_hprt0(core_if); ++ hprt0.b.prtpwr = 0; ++ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); ++ break; ++ case USB_PORT_FEAT_INDICATOR: ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " ++ "ClearPortFeature USB_PORT_FEAT_INDICATOR\n"); ++ /* Port inidicator not supported */ ++ break; ++ case USB_PORT_FEAT_C_CONNECTION: ++ /* Clears drivers internal connect status change ++ * flag */ ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " ++ "ClearPortFeature USB_PORT_FEAT_C_CONNECTION\n"); ++ dwc_otg_hcd->flags.b.port_connect_status_change = 0; ++ break; ++ case USB_PORT_FEAT_C_RESET: ++ /* Clears the driver's internal Port Reset Change ++ * flag */ ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " ++ "ClearPortFeature USB_PORT_FEAT_C_RESET\n"); ++ dwc_otg_hcd->flags.b.port_reset_change = 0; ++ break; ++ case USB_PORT_FEAT_C_ENABLE: ++ /* Clears the driver's internal Port ++ * Enable/Disable Change flag */ ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " ++ "ClearPortFeature USB_PORT_FEAT_C_ENABLE\n"); ++ dwc_otg_hcd->flags.b.port_enable_change = 0; ++ break; ++ case USB_PORT_FEAT_C_SUSPEND: ++ /* Clears the driver's internal Port Suspend ++ * Change flag, which is set when resume signaling on ++ * the host port is complete */ ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " ++ "ClearPortFeature USB_PORT_FEAT_C_SUSPEND\n"); ++ dwc_otg_hcd->flags.b.port_suspend_change = 0; ++ break; ++ case USB_PORT_FEAT_C_OVER_CURRENT: ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " ++ "ClearPortFeature USB_PORT_FEAT_C_OVER_CURRENT\n"); ++ dwc_otg_hcd->flags.b.port_over_current_change = 0; ++ break; ++ default: ++ retval = -EINVAL; ++ DWC_ERROR("DWC OTG HCD - " ++ "ClearPortFeature request %xh " ++ "unknown or unsupported\n", wValue); ++ } ++ break; ++ case GetHubDescriptor: ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " ++ "GetHubDescriptor\n"); ++ desc = (struct usb_hub_descriptor *)buf; ++ desc->bDescLength = 9; ++ desc->bDescriptorType = 0x29; ++ desc->bNbrPorts = 1; ++ desc->wHubCharacteristics = 0x08; ++ desc->bPwrOn2PwrGood = 1; ++ desc->bHubContrCurrent = 0; ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39) ++ desc->u.hs.DeviceRemovable[0] = 0; ++ desc->u.hs.DeviceRemovable[1] = 0xff; ++#endif ++ break; ++ case GetHubStatus: ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " ++ "GetHubStatus\n"); ++ memset(buf, 0, 4); ++ break; ++ case GetPortStatus: ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " ++ "GetPortStatus\n"); ++ ++ if (!wIndex || wIndex > 1) ++ goto error; ++ ++ port_status = 0; ++ ++ if (dwc_otg_hcd->flags.b.port_connect_status_change) ++ port_status |= (1 << USB_PORT_FEAT_C_CONNECTION); ++ ++ if (dwc_otg_hcd->flags.b.port_enable_change) ++ port_status |= (1 << USB_PORT_FEAT_C_ENABLE); ++ ++ if (dwc_otg_hcd->flags.b.port_suspend_change) ++ port_status |= (1 << USB_PORT_FEAT_C_SUSPEND); ++ ++ if (dwc_otg_hcd->flags.b.port_reset_change) ++ port_status |= (1 << USB_PORT_FEAT_C_RESET); ++ ++ if (dwc_otg_hcd->flags.b.port_over_current_change) { ++ DWC_ERROR("Device Not Supported\n"); ++ port_status |= (1 << USB_PORT_FEAT_C_OVER_CURRENT); ++ } ++ ++ if (!dwc_otg_hcd->flags.b.port_connect_status) { ++ /* ++ * The port is disconnected, which means the core is ++ * either in device mode or it soon will be. Just ++ * return 0's for the remainder of the port status ++ * since the port register can't be read if the core ++ * is in device mode. ++ */ ++ *((__le32 *) buf) = cpu_to_le32(port_status); ++ break; ++ } ++ ++ hprt0.d32 = dwc_read_reg32(core_if->host_if->hprt0); ++ DWC_DEBUGPL(DBG_HCDV, " HPRT0: 0x%08x\n", hprt0.d32); ++ ++ if (hprt0.b.prtconnsts) ++ port_status |= (1 << USB_PORT_FEAT_CONNECTION); ++ ++ if (hprt0.b.prtena) ++ port_status |= (1 << USB_PORT_FEAT_ENABLE); ++ ++ if (hprt0.b.prtsusp) ++ port_status |= (1 << USB_PORT_FEAT_SUSPEND); ++ ++ if (hprt0.b.prtovrcurract) ++ port_status |= (1 << USB_PORT_FEAT_OVER_CURRENT); ++ ++ if (hprt0.b.prtrst) ++ port_status |= (1 << USB_PORT_FEAT_RESET); ++ ++ if (hprt0.b.prtpwr) ++ port_status |= (1 << USB_PORT_FEAT_POWER); ++ ++ if (hprt0.b.prtspd == DWC_HPRT0_PRTSPD_HIGH_SPEED) ++ port_status |= USB_PORT_STAT_HIGH_SPEED; ++ else if (hprt0.b.prtspd == DWC_HPRT0_PRTSPD_LOW_SPEED) ++ port_status |= USB_PORT_STAT_LOW_SPEED; ++ ++ if (hprt0.b.prttstctl) ++ port_status |= (1 << USB_PORT_FEAT_TEST); ++ ++ /* USB_PORT_FEAT_INDICATOR unsupported always 0 */ ++ ++ *((__le32 *) buf) = cpu_to_le32(port_status); ++ ++ break; ++ case SetHubFeature: ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " ++ "SetHubFeature\n"); ++ /* No HUB features supported */ ++ break; ++ case SetPortFeature: ++ if (wValue != USB_PORT_FEAT_TEST && (!wIndex || wIndex > 1)) ++ goto error; ++ ++ if (!dwc_otg_hcd->flags.b.port_connect_status) { ++ /* ++ * The port is disconnected, which means the core is ++ * either in device mode or it soon will be. Just ++ * return without doing anything since the port ++ * register can't be written if the core is in device ++ * mode. ++ */ ++ break; ++ } ++ ++ switch (wValue) { ++ case USB_PORT_FEAT_SUSPEND: ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " ++ "SetPortFeature - USB_PORT_FEAT_SUSPEND\n"); ++ if (hcd->self.otg_port == wIndex && ++ hcd->self.b_hnp_enable) { ++ gotgctl_data_t gotgctl = {.d32=0}; ++ gotgctl.b.hstsethnpen = 1; ++ dwc_modify_reg32(&core_if->core_global_regs->gotgctl, ++ 0, gotgctl.d32); ++ core_if->op_state = A_SUSPEND; ++ } ++ hprt0.d32 = dwc_otg_read_hprt0(core_if); ++ hprt0.b.prtsusp = 1; ++ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); ++ //DWC_PRINT("SUSPEND: HPRT0=%0x\n", hprt0.d32); ++ /* Suspend the Phy Clock */ ++ { ++ pcgcctl_data_t pcgcctl = {.d32=0}; ++ pcgcctl.b.stoppclk = 1; ++ dwc_write_reg32(core_if->pcgcctl, pcgcctl.d32); ++ } ++ ++ /* For HNP the bus must be suspended for at least 200ms. */ ++ if (hcd->self.b_hnp_enable) { ++ mdelay(200); ++ //DWC_PRINT("SUSPEND: wait complete! (%d)\n", _hcd->state); ++ } ++ break; ++ case USB_PORT_FEAT_POWER: ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " ++ "SetPortFeature - USB_PORT_FEAT_POWER\n"); ++ hprt0.d32 = dwc_otg_read_hprt0(core_if); ++ hprt0.b.prtpwr = 1; ++ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); ++ break; ++ case USB_PORT_FEAT_RESET: ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " ++ "SetPortFeature - USB_PORT_FEAT_RESET\n"); ++ hprt0.d32 = dwc_otg_read_hprt0(core_if); ++ /* When B-Host the Port reset bit is set in ++ * the Start HCD Callback function, so that ++ * the reset is started within 1ms of the HNP ++ * success interrupt. */ ++ if (!hcd->self.is_b_host) { ++ hprt0.b.prtrst = 1; ++ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); ++ } ++ /* Clear reset bit in 10ms (FS/LS) or 50ms (HS) */ ++ MDELAY(60); ++ hprt0.b.prtrst = 0; ++ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); ++ break; ++ ++#ifdef DWC_HS_ELECT_TST ++ case USB_PORT_FEAT_TEST: ++ { ++ uint32_t t; ++ gintmsk_data_t gintmsk; ++ ++ t = (wIndex >> 8); /* MSB wIndex USB */ ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " ++ "SetPortFeature - USB_PORT_FEAT_TEST %d\n", t); ++ warn("USB_PORT_FEAT_TEST %d\n", t); ++ if (t < 6) { ++ hprt0.d32 = dwc_otg_read_hprt0(core_if); ++ hprt0.b.prttstctl = t; ++ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); ++ } else { ++ /* Setup global vars with reg addresses (quick and ++ * dirty hack, should be cleaned up) ++ */ ++ global_regs = core_if->core_global_regs; ++ hc_global_regs = core_if->host_if->host_global_regs; ++ hc_regs = (dwc_otg_hc_regs_t *)((char *)global_regs + 0x500); ++ data_fifo = (uint32_t *)((char *)global_regs + 0x1000); ++ ++ if (t == 6) { /* HS_HOST_PORT_SUSPEND_RESUME */ ++ /* Save current interrupt mask */ ++ gintmsk.d32 = dwc_read_reg32(&global_regs->gintmsk); ++ ++ /* Disable all interrupts while we muck with ++ * the hardware directly ++ */ ++ dwc_write_reg32(&global_regs->gintmsk, 0); ++ ++ /* 15 second delay per the test spec */ ++ mdelay(15000); ++ ++ /* Drive suspend on the root port */ ++ hprt0.d32 = dwc_otg_read_hprt0(core_if); ++ hprt0.b.prtsusp = 1; ++ hprt0.b.prtres = 0; ++ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); ++ ++ /* 15 second delay per the test spec */ ++ mdelay(15000); ++ ++ /* Drive resume on the root port */ ++ hprt0.d32 = dwc_otg_read_hprt0(core_if); ++ hprt0.b.prtsusp = 0; ++ hprt0.b.prtres = 1; ++ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); ++ mdelay(100); ++ ++ /* Clear the resume bit */ ++ hprt0.b.prtres = 0; ++ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); ++ ++ /* Restore interrupts */ ++ dwc_write_reg32(&global_regs->gintmsk, gintmsk.d32); ++ } else if (t == 7) { /* SINGLE_STEP_GET_DEVICE_DESCRIPTOR setup */ ++ /* Save current interrupt mask */ ++ gintmsk.d32 = dwc_read_reg32(&global_regs->gintmsk); ++ ++ /* Disable all interrupts while we muck with ++ * the hardware directly ++ */ ++ dwc_write_reg32(&global_regs->gintmsk, 0); ++ ++ /* 15 second delay per the test spec */ ++ mdelay(15000); ++ ++ /* Send the Setup packet */ ++ do_setup(); ++ ++ /* 15 second delay so nothing else happens for awhile */ ++ mdelay(15000); ++ ++ /* Restore interrupts */ ++ dwc_write_reg32(&global_regs->gintmsk, gintmsk.d32); ++ } else if (t == 8) { /* SINGLE_STEP_GET_DEVICE_DESCRIPTOR execute */ ++ /* Save current interrupt mask */ ++ gintmsk.d32 = dwc_read_reg32(&global_regs->gintmsk); ++ ++ /* Disable all interrupts while we muck with ++ * the hardware directly ++ */ ++ dwc_write_reg32(&global_regs->gintmsk, 0); ++ ++ /* Send the Setup packet */ ++ do_setup(); ++ ++ /* 15 second delay so nothing else happens for awhile */ ++ mdelay(15000); ++ ++ /* Send the In and Ack packets */ ++ do_in_ack(); ++ ++ /* 15 second delay so nothing else happens for awhile */ ++ mdelay(15000); ++ ++ /* Restore interrupts */ ++ dwc_write_reg32(&global_regs->gintmsk, gintmsk.d32); ++ } ++ } ++ break; ++ } ++#endif /* DWC_HS_ELECT_TST */ ++ ++ case USB_PORT_FEAT_INDICATOR: ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " ++ "SetPortFeature - USB_PORT_FEAT_INDICATOR\n"); ++ /* Not supported */ ++ break; ++ default: ++ retval = -EINVAL; ++ DWC_ERROR("DWC OTG HCD - " ++ "SetPortFeature request %xh " ++ "unknown or unsupported\n", wValue); ++ break; ++ } ++ break; ++ default: ++ error: ++ retval = -EINVAL; ++ DWC_WARN("DWC OTG HCD - " ++ "Unknown hub control request type or invalid typeReq: %xh wIndex: %xh wValue: %xh\n", ++ typeReq, wIndex, wValue); ++ break; ++ } ++ ++ return retval; ++} ++ ++/** ++ * Assigns transactions from a QTD to a free host channel and initializes the ++ * host channel to perform the transactions. The host channel is removed from ++ * the free list. ++ * ++ * @param hcd The HCD state structure. ++ * @param qh Transactions from the first QTD for this QH are selected and ++ * assigned to a free host channel. ++ */ ++static void assign_and_init_hc(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh) ++{ ++ dwc_hc_t *hc; ++ dwc_otg_qtd_t *qtd; ++ struct urb *urb; ++ ++ DWC_DEBUGPL(DBG_HCDV, "%s(%p,%p)\n", __func__, hcd, qh); ++ ++ hc = list_entry(hcd->free_hc_list.next, dwc_hc_t, hc_list_entry); ++ ++ /* Remove the host channel from the free list. */ ++ list_del_init(&hc->hc_list_entry); ++ ++ qtd = list_entry(qh->qtd_list.next, dwc_otg_qtd_t, qtd_list_entry); ++ urb = qtd->urb; ++ qh->channel = hc; ++ qh->qtd_in_process = qtd; ++ ++ /* ++ * Use usb_pipedevice to determine device address. This address is ++ * 0 before the SET_ADDRESS command and the correct address afterward. ++ */ ++ hc->dev_addr = usb_pipedevice(urb->pipe); ++ hc->ep_num = usb_pipeendpoint(urb->pipe); ++ ++ if (urb->dev->speed == USB_SPEED_LOW) { ++ hc->speed = DWC_OTG_EP_SPEED_LOW; ++ } else if (urb->dev->speed == USB_SPEED_FULL) { ++ hc->speed = DWC_OTG_EP_SPEED_FULL; ++ } else { ++ hc->speed = DWC_OTG_EP_SPEED_HIGH; ++ } ++ ++ hc->max_packet = dwc_max_packet(qh->maxp); ++ ++ hc->xfer_started = 0; ++ hc->halt_status = DWC_OTG_HC_XFER_NO_HALT_STATUS; ++ hc->error_state = (qtd->error_count > 0); ++ hc->halt_on_queue = 0; ++ hc->halt_pending = 0; ++ hc->requests = 0; ++ ++ /* ++ * The following values may be modified in the transfer type section ++ * below. The xfer_len value may be reduced when the transfer is ++ * started to accommodate the max widths of the XferSize and PktCnt ++ * fields in the HCTSIZn register. ++ */ ++ hc->do_ping = qh->ping_state; ++ hc->ep_is_in = (usb_pipein(urb->pipe) != 0); ++ hc->data_pid_start = qh->data_toggle; ++ hc->multi_count = 1; ++ ++ if (hcd->core_if->dma_enable) { ++ hc->xfer_buff = (uint8_t *)urb->transfer_dma + urb->actual_length; ++ } else { ++ hc->xfer_buff = (uint8_t *)urb->transfer_buffer + urb->actual_length; ++ } ++ hc->xfer_len = urb->transfer_buffer_length - urb->actual_length; ++ hc->xfer_count = 0; ++ ++ /* ++ * Set the split attributes ++ */ ++ hc->do_split = 0; ++ if (qh->do_split) { ++ hc->do_split = 1; ++ hc->xact_pos = qtd->isoc_split_pos; ++ hc->complete_split = qtd->complete_split; ++ hc->hub_addr = urb->dev->tt->hub->devnum; ++ hc->port_addr = urb->dev->ttport; ++ } ++ ++ switch (usb_pipetype(urb->pipe)) { ++ case PIPE_CONTROL: ++ hc->ep_type = DWC_OTG_EP_TYPE_CONTROL; ++ switch (qtd->control_phase) { ++ case DWC_OTG_CONTROL_SETUP: ++ DWC_DEBUGPL(DBG_HCDV, " Control setup transaction\n"); ++ hc->do_ping = 0; ++ hc->ep_is_in = 0; ++ hc->data_pid_start = DWC_OTG_HC_PID_SETUP; ++ if (hcd->core_if->dma_enable) { ++ hc->xfer_buff = (uint8_t *)urb->setup_dma; ++ } else { ++ hc->xfer_buff = (uint8_t *)urb->setup_packet; ++ } ++ hc->xfer_len = 8; ++ break; ++ case DWC_OTG_CONTROL_DATA: ++ DWC_DEBUGPL(DBG_HCDV, " Control data transaction\n"); ++ hc->data_pid_start = qtd->data_toggle; ++ break; ++ case DWC_OTG_CONTROL_STATUS: ++ /* ++ * Direction is opposite of data direction or IN if no ++ * data. ++ */ ++ DWC_DEBUGPL(DBG_HCDV, " Control status transaction\n"); ++ if (urb->transfer_buffer_length == 0) { ++ hc->ep_is_in = 1; ++ } else { ++ hc->ep_is_in = (usb_pipein(urb->pipe) != USB_DIR_IN); ++ } ++ if (hc->ep_is_in) { ++ hc->do_ping = 0; ++ } ++ hc->data_pid_start = DWC_OTG_HC_PID_DATA1; ++ hc->xfer_len = 0; ++ if (hcd->core_if->dma_enable) { ++ hc->xfer_buff = (uint8_t *)hcd->status_buf_dma; ++ } else { ++ hc->xfer_buff = (uint8_t *)hcd->status_buf; ++ } ++ break; ++ } ++ break; ++ case PIPE_BULK: ++ hc->ep_type = DWC_OTG_EP_TYPE_BULK; ++ break; ++ case PIPE_INTERRUPT: ++ hc->ep_type = DWC_OTG_EP_TYPE_INTR; ++ break; ++ case PIPE_ISOCHRONOUS: ++ { ++ struct usb_iso_packet_descriptor *frame_desc; ++ frame_desc = &urb->iso_frame_desc[qtd->isoc_frame_index]; ++ hc->ep_type = DWC_OTG_EP_TYPE_ISOC; ++ if (hcd->core_if->dma_enable) { ++ hc->xfer_buff = (uint8_t *)urb->transfer_dma; ++ } else { ++ hc->xfer_buff = (uint8_t *)urb->transfer_buffer; ++ } ++ hc->xfer_buff += frame_desc->offset + qtd->isoc_split_offset; ++ hc->xfer_len = frame_desc->length - qtd->isoc_split_offset; ++ ++ if (hc->xact_pos == DWC_HCSPLIT_XACTPOS_ALL) { ++ if (hc->xfer_len <= 188) { ++ hc->xact_pos = DWC_HCSPLIT_XACTPOS_ALL; ++ } ++ else { ++ hc->xact_pos = DWC_HCSPLIT_XACTPOS_BEGIN; ++ } ++ } ++ } ++ break; ++ } ++ ++ if (hc->ep_type == DWC_OTG_EP_TYPE_INTR || ++ hc->ep_type == DWC_OTG_EP_TYPE_ISOC) { ++ /* ++ * This value may be modified when the transfer is started to ++ * reflect the actual transfer length. ++ */ ++ hc->multi_count = dwc_hb_mult(qh->maxp); ++ } ++ ++ dwc_otg_hc_init(hcd->core_if, hc); ++ hc->qh = qh; ++} ++ ++/** ++ * This function selects transactions from the HCD transfer schedule and ++ * assigns them to available host channels. It is called from HCD interrupt ++ * handler functions. ++ * ++ * @param hcd The HCD state structure. ++ * ++ * @return The types of new transactions that were assigned to host channels. ++ */ ++dwc_otg_transaction_type_e dwc_otg_hcd_select_transactions(dwc_otg_hcd_t *hcd) ++{ ++ struct list_head *qh_ptr; ++ dwc_otg_qh_t *qh; ++ int num_channels; ++ dwc_otg_transaction_type_e ret_val = DWC_OTG_TRANSACTION_NONE; ++ ++#ifdef DEBUG_SOF ++ DWC_DEBUGPL(DBG_HCD, " Select Transactions\n"); ++#endif ++ ++ /* Process entries in the periodic ready list. */ ++ qh_ptr = hcd->periodic_sched_ready.next; ++ while (qh_ptr != &hcd->periodic_sched_ready && ++ !list_empty(&hcd->free_hc_list)) { ++ ++ qh = list_entry(qh_ptr, dwc_otg_qh_t, qh_list_entry); ++ assign_and_init_hc(hcd, qh); ++ ++ /* ++ * Move the QH from the periodic ready schedule to the ++ * periodic assigned schedule. ++ */ ++ qh_ptr = qh_ptr->next; ++ list_move(&qh->qh_list_entry, &hcd->periodic_sched_assigned); ++ ++ ret_val = DWC_OTG_TRANSACTION_PERIODIC; ++ } ++ ++ /* ++ * Process entries in the inactive portion of the non-periodic ++ * schedule. Some free host channels may not be used if they are ++ * reserved for periodic transfers. ++ */ ++ qh_ptr = hcd->non_periodic_sched_inactive.next; ++ num_channels = hcd->core_if->core_params->host_channels; ++ while (qh_ptr != &hcd->non_periodic_sched_inactive && ++ (hcd->non_periodic_channels < ++ num_channels - hcd->periodic_channels) && ++ !list_empty(&hcd->free_hc_list)) { ++ ++ qh = list_entry(qh_ptr, dwc_otg_qh_t, qh_list_entry); ++ assign_and_init_hc(hcd, qh); ++ ++ /* ++ * Move the QH from the non-periodic inactive schedule to the ++ * non-periodic active schedule. ++ */ ++ qh_ptr = qh_ptr->next; ++ list_move(&qh->qh_list_entry, &hcd->non_periodic_sched_active); ++ ++ if (ret_val == DWC_OTG_TRANSACTION_NONE) { ++ ret_val = DWC_OTG_TRANSACTION_NON_PERIODIC; ++ } else { ++ ret_val = DWC_OTG_TRANSACTION_ALL; ++ } ++ ++ hcd->non_periodic_channels++; ++ } ++ ++ return ret_val; ++} ++ ++/** ++ * Attempts to queue a single transaction request for a host channel ++ * associated with either a periodic or non-periodic transfer. This function ++ * assumes that there is space available in the appropriate request queue. For ++ * an OUT transfer or SETUP transaction in Slave mode, it checks whether space ++ * is available in the appropriate Tx FIFO. ++ * ++ * @param hcd The HCD state structure. ++ * @param hc Host channel descriptor associated with either a periodic or ++ * non-periodic transfer. ++ * @param fifo_dwords_avail Number of DWORDs available in the periodic Tx ++ * FIFO for periodic transfers or the non-periodic Tx FIFO for non-periodic ++ * transfers. ++ * ++ * @return 1 if a request is queued and more requests may be needed to ++ * complete the transfer, 0 if no more requests are required for this ++ * transfer, -1 if there is insufficient space in the Tx FIFO. ++ */ ++static int queue_transaction(dwc_otg_hcd_t *hcd, ++ dwc_hc_t *hc, ++ uint16_t fifo_dwords_avail) ++{ ++ int retval; ++ ++ if (hcd->core_if->dma_enable) { ++ if (!hc->xfer_started) { ++ dwc_otg_hc_start_transfer(hcd->core_if, hc); ++ hc->qh->ping_state = 0; ++ } ++ retval = 0; ++ } else if (hc->halt_pending) { ++ /* Don't queue a request if the channel has been halted. */ ++ retval = 0; ++ } else if (hc->halt_on_queue) { ++ dwc_otg_hc_halt(hcd->core_if, hc, hc->halt_status); ++ retval = 0; ++ } else if (hc->do_ping) { ++ if (!hc->xfer_started) { ++ dwc_otg_hc_start_transfer(hcd->core_if, hc); ++ } ++ retval = 0; ++ } else if (!hc->ep_is_in || ++ hc->data_pid_start == DWC_OTG_HC_PID_SETUP) { ++ if ((fifo_dwords_avail * 4) >= hc->max_packet) { ++ if (!hc->xfer_started) { ++ dwc_otg_hc_start_transfer(hcd->core_if, hc); ++ retval = 1; ++ } else { ++ retval = dwc_otg_hc_continue_transfer(hcd->core_if, hc); ++ } ++ } else { ++ retval = -1; ++ } ++ } else { ++ if (!hc->xfer_started) { ++ dwc_otg_hc_start_transfer(hcd->core_if, hc); ++ retval = 1; ++ } else { ++ retval = dwc_otg_hc_continue_transfer(hcd->core_if, hc); ++ } ++ } ++ ++ return retval; ++} ++ ++/** ++ * Processes active non-periodic channels and queues transactions for these ++ * channels to the DWC_otg controller. After queueing transactions, the NP Tx ++ * FIFO Empty interrupt is enabled if there are more transactions to queue as ++ * NP Tx FIFO or request queue space becomes available. Otherwise, the NP Tx ++ * FIFO Empty interrupt is disabled. ++ */ ++static void process_non_periodic_channels(dwc_otg_hcd_t *hcd) ++{ ++ gnptxsts_data_t tx_status; ++ struct list_head *orig_qh_ptr; ++ dwc_otg_qh_t *qh; ++ int status; ++ int no_queue_space = 0; ++ int no_fifo_space = 0; ++ int more_to_do = 0; ++ ++ dwc_otg_core_global_regs_t *global_regs = hcd->core_if->core_global_regs; ++ ++ DWC_DEBUGPL(DBG_HCDV, "Queue non-periodic transactions\n"); ++#ifdef DEBUG ++ tx_status.d32 = dwc_read_reg32(&global_regs->gnptxsts); ++ DWC_DEBUGPL(DBG_HCDV, " NP Tx Req Queue Space Avail (before queue): %d\n", ++ tx_status.b.nptxqspcavail); ++ DWC_DEBUGPL(DBG_HCDV, " NP Tx FIFO Space Avail (before queue): %d\n", ++ tx_status.b.nptxfspcavail); ++#endif ++ /* ++ * Keep track of the starting point. Skip over the start-of-list ++ * entry. ++ */ ++ if (hcd->non_periodic_qh_ptr == &hcd->non_periodic_sched_active) { ++ hcd->non_periodic_qh_ptr = hcd->non_periodic_qh_ptr->next; ++ } ++ orig_qh_ptr = hcd->non_periodic_qh_ptr; ++ ++ /* ++ * Process once through the active list or until no more space is ++ * available in the request queue or the Tx FIFO. ++ */ ++ do { ++ tx_status.d32 = dwc_read_reg32(&global_regs->gnptxsts); ++ if (!hcd->core_if->dma_enable && tx_status.b.nptxqspcavail == 0) { ++ no_queue_space = 1; ++ break; ++ } ++ ++ qh = list_entry(hcd->non_periodic_qh_ptr, dwc_otg_qh_t, qh_list_entry); ++ status = queue_transaction(hcd, qh->channel, tx_status.b.nptxfspcavail); ++ ++ if (status > 0) { ++ more_to_do = 1; ++ } else if (status < 0) { ++ no_fifo_space = 1; ++ break; ++ } ++ ++ /* Advance to next QH, skipping start-of-list entry. */ ++ hcd->non_periodic_qh_ptr = hcd->non_periodic_qh_ptr->next; ++ if (hcd->non_periodic_qh_ptr == &hcd->non_periodic_sched_active) { ++ hcd->non_periodic_qh_ptr = hcd->non_periodic_qh_ptr->next; ++ } ++ ++ } while (hcd->non_periodic_qh_ptr != orig_qh_ptr); ++ ++ if (!hcd->core_if->dma_enable) { ++ gintmsk_data_t intr_mask = {.d32 = 0}; ++ intr_mask.b.nptxfempty = 1; ++ ++#ifdef DEBUG ++ tx_status.d32 = dwc_read_reg32(&global_regs->gnptxsts); ++ DWC_DEBUGPL(DBG_HCDV, " NP Tx Req Queue Space Avail (after queue): %d\n", ++ tx_status.b.nptxqspcavail); ++ DWC_DEBUGPL(DBG_HCDV, " NP Tx FIFO Space Avail (after queue): %d\n", ++ tx_status.b.nptxfspcavail); ++#endif ++ if (more_to_do || no_queue_space || no_fifo_space) { ++ /* ++ * May need to queue more transactions as the request ++ * queue or Tx FIFO empties. Enable the non-periodic ++ * Tx FIFO empty interrupt. (Always use the half-empty ++ * level to ensure that new requests are loaded as ++ * soon as possible.) ++ */ ++ dwc_modify_reg32(&global_regs->gintmsk, 0, intr_mask.d32); ++ } else { ++ /* ++ * Disable the Tx FIFO empty interrupt since there are ++ * no more transactions that need to be queued right ++ * now. This function is called from interrupt ++ * handlers to queue more transactions as transfer ++ * states change. ++ */ ++ dwc_modify_reg32(&global_regs->gintmsk, intr_mask.d32, 0); ++ } ++ } ++} ++ ++/** ++ * Processes periodic channels for the next frame and queues transactions for ++ * these channels to the DWC_otg controller. After queueing transactions, the ++ * Periodic Tx FIFO Empty interrupt is enabled if there are more transactions ++ * to queue as Periodic Tx FIFO or request queue space becomes available. ++ * Otherwise, the Periodic Tx FIFO Empty interrupt is disabled. ++ */ ++static void process_periodic_channels(dwc_otg_hcd_t *hcd) ++{ ++ hptxsts_data_t tx_status; ++ struct list_head *qh_ptr; ++ dwc_otg_qh_t *qh; ++ int status; ++ int no_queue_space = 0; ++ int no_fifo_space = 0; ++ ++ dwc_otg_host_global_regs_t *host_regs; ++ host_regs = hcd->core_if->host_if->host_global_regs; ++ ++ DWC_DEBUGPL(DBG_HCDV, "Queue periodic transactions\n"); ++#ifdef DEBUG ++ tx_status.d32 = dwc_read_reg32(&host_regs->hptxsts); ++ DWC_DEBUGPL(DBG_HCDV, " P Tx Req Queue Space Avail (before queue): %d\n", ++ tx_status.b.ptxqspcavail); ++ DWC_DEBUGPL(DBG_HCDV, " P Tx FIFO Space Avail (before queue): %d\n", ++ tx_status.b.ptxfspcavail); ++#endif ++ ++ qh_ptr = hcd->periodic_sched_assigned.next; ++ while (qh_ptr != &hcd->periodic_sched_assigned) { ++ tx_status.d32 = dwc_read_reg32(&host_regs->hptxsts); ++ if (tx_status.b.ptxqspcavail == 0) { ++ no_queue_space = 1; ++ break; ++ } ++ ++ qh = list_entry(qh_ptr, dwc_otg_qh_t, qh_list_entry); ++ ++ /* ++ * Set a flag if we're queuing high-bandwidth in slave mode. ++ * The flag prevents any halts to get into the request queue in ++ * the middle of multiple high-bandwidth packets getting queued. ++ */ ++ if (!hcd->core_if->dma_enable && ++ qh->channel->multi_count > 1) ++ { ++ hcd->core_if->queuing_high_bandwidth = 1; ++ } ++ ++ status = queue_transaction(hcd, qh->channel, tx_status.b.ptxfspcavail); ++ if (status < 0) { ++ no_fifo_space = 1; ++ break; ++ } ++ ++ /* ++ * In Slave mode, stay on the current transfer until there is ++ * nothing more to do or the high-bandwidth request count is ++ * reached. In DMA mode, only need to queue one request. The ++ * controller automatically handles multiple packets for ++ * high-bandwidth transfers. ++ */ ++ if (hcd->core_if->dma_enable || status == 0 || ++ qh->channel->requests == qh->channel->multi_count) { ++ qh_ptr = qh_ptr->next; ++ /* ++ * Move the QH from the periodic assigned schedule to ++ * the periodic queued schedule. ++ */ ++ list_move(&qh->qh_list_entry, &hcd->periodic_sched_queued); ++ ++ /* done queuing high bandwidth */ ++ hcd->core_if->queuing_high_bandwidth = 0; ++ } ++ } ++ ++ if (!hcd->core_if->dma_enable) { ++ dwc_otg_core_global_regs_t *global_regs; ++ gintmsk_data_t intr_mask = {.d32 = 0}; ++ ++ global_regs = hcd->core_if->core_global_regs; ++ intr_mask.b.ptxfempty = 1; ++#ifdef DEBUG ++ tx_status.d32 = dwc_read_reg32(&host_regs->hptxsts); ++ DWC_DEBUGPL(DBG_HCDV, " P Tx Req Queue Space Avail (after queue): %d\n", ++ tx_status.b.ptxqspcavail); ++ DWC_DEBUGPL(DBG_HCDV, " P Tx FIFO Space Avail (after queue): %d\n", ++ tx_status.b.ptxfspcavail); ++#endif ++ if (!list_empty(&hcd->periodic_sched_assigned) || ++ no_queue_space || no_fifo_space) { ++ /* ++ * May need to queue more transactions as the request ++ * queue or Tx FIFO empties. Enable the periodic Tx ++ * FIFO empty interrupt. (Always use the half-empty ++ * level to ensure that new requests are loaded as ++ * soon as possible.) ++ */ ++ dwc_modify_reg32(&global_regs->gintmsk, 0, intr_mask.d32); ++ } else { ++ /* ++ * Disable the Tx FIFO empty interrupt since there are ++ * no more transactions that need to be queued right ++ * now. This function is called from interrupt ++ * handlers to queue more transactions as transfer ++ * states change. ++ */ ++ dwc_modify_reg32(&global_regs->gintmsk, intr_mask.d32, 0); ++ } ++ } ++} ++ ++/** ++ * This function processes the currently active host channels and queues ++ * transactions for these channels to the DWC_otg controller. It is called ++ * from HCD interrupt handler functions. ++ * ++ * @param hcd The HCD state structure. ++ * @param tr_type The type(s) of transactions to queue (non-periodic, ++ * periodic, or both). ++ */ ++void dwc_otg_hcd_queue_transactions(dwc_otg_hcd_t *hcd, ++ dwc_otg_transaction_type_e tr_type) ++{ ++#ifdef DEBUG_SOF ++ DWC_DEBUGPL(DBG_HCD, "Queue Transactions\n"); ++#endif ++ /* Process host channels associated with periodic transfers. */ ++ if ((tr_type == DWC_OTG_TRANSACTION_PERIODIC || ++ tr_type == DWC_OTG_TRANSACTION_ALL) && ++ !list_empty(&hcd->periodic_sched_assigned)) { ++ ++ process_periodic_channels(hcd); ++ } ++ ++ /* Process host channels associated with non-periodic transfers. */ ++ if (tr_type == DWC_OTG_TRANSACTION_NON_PERIODIC || ++ tr_type == DWC_OTG_TRANSACTION_ALL) { ++ if (!list_empty(&hcd->non_periodic_sched_active)) { ++ process_non_periodic_channels(hcd); ++ } else { ++ /* ++ * Ensure NP Tx FIFO empty interrupt is disabled when ++ * there are no non-periodic transfers to process. ++ */ ++ gintmsk_data_t gintmsk = {.d32 = 0}; ++ gintmsk.b.nptxfempty = 1; ++ dwc_modify_reg32(&hcd->core_if->core_global_regs->gintmsk, ++ gintmsk.d32, 0); ++ } ++ } ++} ++ ++/** ++ * Sets the final status of an URB and returns it to the device driver. Any ++ * required cleanup of the URB is performed. ++ */ ++void dwc_otg_hcd_complete_urb(dwc_otg_hcd_t *hcd, struct urb *urb, int status) ++{ ++#ifdef DEBUG ++ if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) { ++ DWC_PRINT("%s: urb %p, device %d, ep %d %s, status=%d\n", ++ __func__, urb, usb_pipedevice(urb->pipe), ++ usb_pipeendpoint(urb->pipe), ++ usb_pipein(urb->pipe) ? "IN" : "OUT", status); ++ if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { ++ int i; ++ for (i = 0; i < urb->number_of_packets; i++) { ++ DWC_PRINT(" ISO Desc %d status: %d\n", ++ i, urb->iso_frame_desc[i].status); ++ } ++ } ++ } ++#endif ++ ++ urb->status = status; ++ urb->hcpriv = NULL; ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) ++ usb_hcd_giveback_urb(dwc_otg_hcd_to_hcd(hcd), urb, status); ++#else ++ usb_hcd_giveback_urb(dwc_otg_hcd_to_hcd(hcd), urb, NULL); ++#endif ++} ++ ++/* ++ * Returns the Queue Head for an URB. ++ */ ++dwc_otg_qh_t *dwc_urb_to_qh(struct urb *urb) ++{ ++ struct usb_host_endpoint *ep = dwc_urb_to_endpoint(urb); ++ return (dwc_otg_qh_t *)ep->hcpriv; ++} ++ ++#ifdef DEBUG ++void dwc_print_setup_data(uint8_t *setup) ++{ ++ int i; ++ if (CHK_DEBUG_LEVEL(DBG_HCD)){ ++ DWC_PRINT("Setup Data = MSB "); ++ for (i = 7; i >= 0; i--) DWC_PRINT("%02x ", setup[i]); ++ DWC_PRINT("\n"); ++ DWC_PRINT(" bmRequestType Tranfer = %s\n", (setup[0] & 0x80) ? "Device-to-Host" : "Host-to-Device"); ++ DWC_PRINT(" bmRequestType Type = "); ++ switch ((setup[0] & 0x60) >> 5) { ++ case 0: DWC_PRINT("Standard\n"); break; ++ case 1: DWC_PRINT("Class\n"); break; ++ case 2: DWC_PRINT("Vendor\n"); break; ++ case 3: DWC_PRINT("Reserved\n"); break; ++ } ++ DWC_PRINT(" bmRequestType Recipient = "); ++ switch (setup[0] & 0x1f) { ++ case 0: DWC_PRINT("Device\n"); break; ++ case 1: DWC_PRINT("Interface\n"); break; ++ case 2: DWC_PRINT("Endpoint\n"); break; ++ case 3: DWC_PRINT("Other\n"); break; ++ default: DWC_PRINT("Reserved\n"); break; ++ } ++ DWC_PRINT(" bRequest = 0x%0x\n", setup[1]); ++ DWC_PRINT(" wValue = 0x%0x\n", *((uint16_t *)&setup[2])); ++ DWC_PRINT(" wIndex = 0x%0x\n", *((uint16_t *)&setup[4])); ++ DWC_PRINT(" wLength = 0x%0x\n\n", *((uint16_t *)&setup[6])); ++ } ++} ++#endif ++ ++void dwc_otg_hcd_dump_frrem(dwc_otg_hcd_t *hcd) { ++#if defined(DEBUG) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) ++ DWC_PRINT("Frame remaining at SOF:\n"); ++ DWC_PRINT(" samples %u, accum %llu, avg %llu\n", ++ hcd->frrem_samples, hcd->frrem_accum, ++ (hcd->frrem_samples > 0) ? ++ hcd->frrem_accum/hcd->frrem_samples : 0); ++ ++ DWC_PRINT("\n"); ++ DWC_PRINT("Frame remaining at start_transfer (uframe 7):\n"); ++ DWC_PRINT(" samples %u, accum %llu, avg %llu\n", ++ hcd->core_if->hfnum_7_samples, hcd->core_if->hfnum_7_frrem_accum, ++ (hcd->core_if->hfnum_7_samples > 0) ? ++ hcd->core_if->hfnum_7_frrem_accum/hcd->core_if->hfnum_7_samples : 0); ++ DWC_PRINT("Frame remaining at start_transfer (uframe 0):\n"); ++ DWC_PRINT(" samples %u, accum %llu, avg %llu\n", ++ hcd->core_if->hfnum_0_samples, hcd->core_if->hfnum_0_frrem_accum, ++ (hcd->core_if->hfnum_0_samples > 0) ? ++ hcd->core_if->hfnum_0_frrem_accum/hcd->core_if->hfnum_0_samples : 0); ++ DWC_PRINT("Frame remaining at start_transfer (uframe 1-6):\n"); ++ DWC_PRINT(" samples %u, accum %llu, avg %llu\n", ++ hcd->core_if->hfnum_other_samples, hcd->core_if->hfnum_other_frrem_accum, ++ (hcd->core_if->hfnum_other_samples > 0) ? ++ hcd->core_if->hfnum_other_frrem_accum/hcd->core_if->hfnum_other_samples : 0); ++ ++ DWC_PRINT("\n"); ++ DWC_PRINT("Frame remaining at sample point A (uframe 7):\n"); ++ DWC_PRINT(" samples %u, accum %llu, avg %llu\n", ++ hcd->hfnum_7_samples_a, hcd->hfnum_7_frrem_accum_a, ++ (hcd->hfnum_7_samples_a > 0) ? ++ hcd->hfnum_7_frrem_accum_a/hcd->hfnum_7_samples_a : 0); ++ DWC_PRINT("Frame remaining at sample point A (uframe 0):\n"); ++ DWC_PRINT(" samples %u, accum %llu, avg %llu\n", ++ hcd->hfnum_0_samples_a, hcd->hfnum_0_frrem_accum_a, ++ (hcd->hfnum_0_samples_a > 0) ? ++ hcd->hfnum_0_frrem_accum_a/hcd->hfnum_0_samples_a : 0); ++ DWC_PRINT("Frame remaining at sample point A (uframe 1-6):\n"); ++ DWC_PRINT(" samples %u, accum %llu, avg %llu\n", ++ hcd->hfnum_other_samples_a, hcd->hfnum_other_frrem_accum_a, ++ (hcd->hfnum_other_samples_a > 0) ? ++ hcd->hfnum_other_frrem_accum_a/hcd->hfnum_other_samples_a : 0); ++ ++ DWC_PRINT("\n"); ++ DWC_PRINT("Frame remaining at sample point B (uframe 7):\n"); ++ DWC_PRINT(" samples %u, accum %llu, avg %llu\n", ++ hcd->hfnum_7_samples_b, hcd->hfnum_7_frrem_accum_b, ++ (hcd->hfnum_7_samples_b > 0) ? ++ hcd->hfnum_7_frrem_accum_b/hcd->hfnum_7_samples_b : 0); ++ DWC_PRINT("Frame remaining at sample point B (uframe 0):\n"); ++ DWC_PRINT(" samples %u, accum %llu, avg %llu\n", ++ hcd->hfnum_0_samples_b, hcd->hfnum_0_frrem_accum_b, ++ (hcd->hfnum_0_samples_b > 0) ? ++ hcd->hfnum_0_frrem_accum_b/hcd->hfnum_0_samples_b : 0); ++ DWC_PRINT("Frame remaining at sample point B (uframe 1-6):\n"); ++ DWC_PRINT(" samples %u, accum %llu, avg %llu\n", ++ hcd->hfnum_other_samples_b, hcd->hfnum_other_frrem_accum_b, ++ (hcd->hfnum_other_samples_b > 0) ? ++ hcd->hfnum_other_frrem_accum_b/hcd->hfnum_other_samples_b : 0); ++#endif ++} ++ ++void dwc_otg_hcd_dump_state(dwc_otg_hcd_t *hcd) ++{ ++#ifdef DEBUG ++ int num_channels; ++ int i; ++ gnptxsts_data_t np_tx_status; ++ hptxsts_data_t p_tx_status; ++ ++ num_channels = hcd->core_if->core_params->host_channels; ++ DWC_PRINT("\n"); ++ DWC_PRINT("************************************************************\n"); ++ DWC_PRINT("HCD State:\n"); ++ DWC_PRINT(" Num channels: %d\n", num_channels); ++ for (i = 0; i < num_channels; i++) { ++ dwc_hc_t *hc = hcd->hc_ptr_array[i]; ++ DWC_PRINT(" Channel %d:\n", i); ++ DWC_PRINT(" dev_addr: %d, ep_num: %d, ep_is_in: %d\n", ++ hc->dev_addr, hc->ep_num, hc->ep_is_in); ++ DWC_PRINT(" speed: %d\n", hc->speed); ++ DWC_PRINT(" ep_type: %d\n", hc->ep_type); ++ DWC_PRINT(" max_packet: %d\n", hc->max_packet); ++ DWC_PRINT(" data_pid_start: %d\n", hc->data_pid_start); ++ DWC_PRINT(" multi_count: %d\n", hc->multi_count); ++ DWC_PRINT(" xfer_started: %d\n", hc->xfer_started); ++ DWC_PRINT(" xfer_buff: %p\n", hc->xfer_buff); ++ DWC_PRINT(" xfer_len: %d\n", hc->xfer_len); ++ DWC_PRINT(" xfer_count: %d\n", hc->xfer_count); ++ DWC_PRINT(" halt_on_queue: %d\n", hc->halt_on_queue); ++ DWC_PRINT(" halt_pending: %d\n", hc->halt_pending); ++ DWC_PRINT(" halt_status: %d\n", hc->halt_status); ++ DWC_PRINT(" do_split: %d\n", hc->do_split); ++ DWC_PRINT(" complete_split: %d\n", hc->complete_split); ++ DWC_PRINT(" hub_addr: %d\n", hc->hub_addr); ++ DWC_PRINT(" port_addr: %d\n", hc->port_addr); ++ DWC_PRINT(" xact_pos: %d\n", hc->xact_pos); ++ DWC_PRINT(" requests: %d\n", hc->requests); ++ DWC_PRINT(" qh: %p\n", hc->qh); ++ if (hc->xfer_started) { ++ hfnum_data_t hfnum; ++ hcchar_data_t hcchar; ++ hctsiz_data_t hctsiz; ++ hcint_data_t hcint; ++ hcintmsk_data_t hcintmsk; ++ hfnum.d32 = dwc_read_reg32(&hcd->core_if->host_if->host_global_regs->hfnum); ++ hcchar.d32 = dwc_read_reg32(&hcd->core_if->host_if->hc_regs[i]->hcchar); ++ hctsiz.d32 = dwc_read_reg32(&hcd->core_if->host_if->hc_regs[i]->hctsiz); ++ hcint.d32 = dwc_read_reg32(&hcd->core_if->host_if->hc_regs[i]->hcint); ++ hcintmsk.d32 = dwc_read_reg32(&hcd->core_if->host_if->hc_regs[i]->hcintmsk); ++ DWC_PRINT(" hfnum: 0x%08x\n", hfnum.d32); ++ DWC_PRINT(" hcchar: 0x%08x\n", hcchar.d32); ++ DWC_PRINT(" hctsiz: 0x%08x\n", hctsiz.d32); ++ DWC_PRINT(" hcint: 0x%08x\n", hcint.d32); ++ DWC_PRINT(" hcintmsk: 0x%08x\n", hcintmsk.d32); ++ } ++ if (hc->xfer_started && hc->qh && hc->qh->qtd_in_process) { ++ dwc_otg_qtd_t *qtd; ++ struct urb *urb; ++ qtd = hc->qh->qtd_in_process; ++ urb = qtd->urb; ++ DWC_PRINT(" URB Info:\n"); ++ DWC_PRINT(" qtd: %p, urb: %p\n", qtd, urb); ++ if (urb) { ++ DWC_PRINT(" Dev: %d, EP: %d %s\n", ++ usb_pipedevice(urb->pipe), usb_pipeendpoint(urb->pipe), ++ usb_pipein(urb->pipe) ? "IN" : "OUT"); ++ DWC_PRINT(" Max packet size: %d\n", ++ usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe))); ++ DWC_PRINT(" transfer_buffer: %p\n", urb->transfer_buffer); ++ DWC_PRINT(" transfer_dma: %p\n", (void *)urb->transfer_dma); ++ DWC_PRINT(" transfer_buffer_length: %d\n", urb->transfer_buffer_length); ++ DWC_PRINT(" actual_length: %d\n", urb->actual_length); ++ } ++ } ++ } ++ DWC_PRINT(" non_periodic_channels: %d\n", hcd->non_periodic_channels); ++ DWC_PRINT(" periodic_channels: %d\n", hcd->periodic_channels); ++ DWC_PRINT(" periodic_usecs: %d\n", hcd->periodic_usecs); ++ np_tx_status.d32 = dwc_read_reg32(&hcd->core_if->core_global_regs->gnptxsts); ++ DWC_PRINT(" NP Tx Req Queue Space Avail: %d\n", np_tx_status.b.nptxqspcavail); ++ DWC_PRINT(" NP Tx FIFO Space Avail: %d\n", np_tx_status.b.nptxfspcavail); ++ p_tx_status.d32 = dwc_read_reg32(&hcd->core_if->host_if->host_global_regs->hptxsts); ++ DWC_PRINT(" P Tx Req Queue Space Avail: %d\n", p_tx_status.b.ptxqspcavail); ++ DWC_PRINT(" P Tx FIFO Space Avail: %d\n", p_tx_status.b.ptxfspcavail); ++ dwc_otg_hcd_dump_frrem(hcd); ++ dwc_otg_dump_global_registers(hcd->core_if); ++ dwc_otg_dump_host_registers(hcd->core_if); ++ DWC_PRINT("************************************************************\n"); ++ DWC_PRINT("\n"); ++#endif ++} ++#endif /* DWC_DEVICE_ONLY */ +--- /dev/null ++++ b/drivers/usb/dwc_otg/dwc_otg_hcd.h +@@ -0,0 +1,668 @@ ++/* ========================================================================== ++ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_hcd.h $ ++ * $Revision: 1.3 $ ++ * $Date: 2008-12-15 06:51:32 $ ++ * $Change: 1064918 $ ++ * ++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, ++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless ++ * otherwise expressly agreed to in writing between Synopsys and you. ++ * ++ * The Software IS NOT an item of Licensed Software or Licensed Product under ++ * any End User Software License Agreement or Agreement for Licensed Product ++ * with Synopsys or any supplement thereto. You are permitted to use and ++ * redistribute this Software in source and binary forms, with or without ++ * modification, provided that redistributions of source code must retain this ++ * notice. You may not view, use, disclose, copy or distribute this file or ++ * any information contained herein except pursuant to this license grant from ++ * Synopsys. If you do not agree with this notice, including the disclaimer ++ * below, then you are not authorized to use the Software. ++ * ++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, ++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR ++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER ++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT ++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY ++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH ++ * DAMAGE. ++ * ========================================================================== */ ++#ifndef DWC_DEVICE_ONLY ++#ifndef __DWC_HCD_H__ ++#define __DWC_HCD_H__ ++ ++#include ++#include ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35) ++#include ++#else ++#include <../drivers/usb/core/hcd.h> ++#endif ++ ++struct dwc_otg_device; ++ ++#include "dwc_otg_cil.h" ++ ++/** ++ * @file ++ * ++ * This file contains the structures, constants, and interfaces for ++ * the Host Contoller Driver (HCD). ++ * ++ * The Host Controller Driver (HCD) is responsible for translating requests ++ * from the USB Driver into the appropriate actions on the DWC_otg controller. ++ * It isolates the USBD from the specifics of the controller by providing an ++ * API to the USBD. ++ */ ++ ++/** ++ * Phases for control transfers. ++ */ ++typedef enum dwc_otg_control_phase { ++ DWC_OTG_CONTROL_SETUP, ++ DWC_OTG_CONTROL_DATA, ++ DWC_OTG_CONTROL_STATUS ++} dwc_otg_control_phase_e; ++ ++/** Transaction types. */ ++typedef enum dwc_otg_transaction_type { ++ DWC_OTG_TRANSACTION_NONE, ++ DWC_OTG_TRANSACTION_PERIODIC, ++ DWC_OTG_TRANSACTION_NON_PERIODIC, ++ DWC_OTG_TRANSACTION_ALL ++} dwc_otg_transaction_type_e; ++ ++/** ++ * A Queue Transfer Descriptor (QTD) holds the state of a bulk, control, ++ * interrupt, or isochronous transfer. A single QTD is created for each URB ++ * (of one of these types) submitted to the HCD. The transfer associated with ++ * a QTD may require one or multiple transactions. ++ * ++ * A QTD is linked to a Queue Head, which is entered in either the ++ * non-periodic or periodic schedule for execution. When a QTD is chosen for ++ * execution, some or all of its transactions may be executed. After ++ * execution, the state of the QTD is updated. The QTD may be retired if all ++ * its transactions are complete or if an error occurred. Otherwise, it ++ * remains in the schedule so more transactions can be executed later. ++ */ ++typedef struct dwc_otg_qtd { ++ /** ++ * Determines the PID of the next data packet for the data phase of ++ * control transfers. Ignored for other transfer types.
++ * One of the following values: ++ * - DWC_OTG_HC_PID_DATA0 ++ * - DWC_OTG_HC_PID_DATA1 ++ */ ++ uint8_t data_toggle; ++ ++ /** Current phase for control transfers (Setup, Data, or Status). */ ++ dwc_otg_control_phase_e control_phase; ++ ++ /** Keep track of the current split type ++ * for FS/LS endpoints on a HS Hub */ ++ uint8_t complete_split; ++ ++ /** How many bytes transferred during SSPLIT OUT */ ++ uint32_t ssplit_out_xfer_count; ++ ++ /** ++ * Holds the number of bus errors that have occurred for a transaction ++ * within this transfer. ++ */ ++ uint8_t error_count; ++ ++ /** ++ * Index of the next frame descriptor for an isochronous transfer. A ++ * frame descriptor describes the buffer position and length of the ++ * data to be transferred in the next scheduled (micro)frame of an ++ * isochronous transfer. It also holds status for that transaction. ++ * The frame index starts at 0. ++ */ ++ int isoc_frame_index; ++ ++ /** Position of the ISOC split on full/low speed */ ++ uint8_t isoc_split_pos; ++ ++ /** Position of the ISOC split in the buffer for the current frame */ ++ uint16_t isoc_split_offset; ++ ++ /** URB for this transfer */ ++ struct urb *urb; ++ ++ /** This list of QTDs */ ++ struct list_head qtd_list_entry; ++ ++} dwc_otg_qtd_t; ++ ++/** ++ * A Queue Head (QH) holds the static characteristics of an endpoint and ++ * maintains a list of transfers (QTDs) for that endpoint. A QH structure may ++ * be entered in either the non-periodic or periodic schedule. ++ */ ++typedef struct dwc_otg_qh { ++ /** ++ * Endpoint type. ++ * One of the following values: ++ * - USB_ENDPOINT_XFER_CONTROL ++ * - USB_ENDPOINT_XFER_ISOC ++ * - USB_ENDPOINT_XFER_BULK ++ * - USB_ENDPOINT_XFER_INT ++ */ ++ uint8_t ep_type; ++ uint8_t ep_is_in; ++ ++ /** wMaxPacketSize Field of Endpoint Descriptor. */ ++ uint16_t maxp; ++ ++ /** ++ * Determines the PID of the next data packet for non-control ++ * transfers. Ignored for control transfers.
++ * One of the following values: ++ * - DWC_OTG_HC_PID_DATA0 ++ * - DWC_OTG_HC_PID_DATA1 ++ */ ++ uint8_t data_toggle; ++ ++ /** Ping state if 1. */ ++ uint8_t ping_state; ++ ++ /** ++ * List of QTDs for this QH. ++ */ ++ struct list_head qtd_list; ++ ++ /** Host channel currently processing transfers for this QH. */ ++ dwc_hc_t *channel; ++ ++ /** QTD currently assigned to a host channel for this QH. */ ++ dwc_otg_qtd_t *qtd_in_process; ++ ++ /** Full/low speed endpoint on high-speed hub requires split. */ ++ uint8_t do_split; ++ ++ /** @name Periodic schedule information */ ++ /** @{ */ ++ ++ /** Bandwidth in microseconds per (micro)frame. */ ++ uint8_t usecs; ++ ++ /** Interval between transfers in (micro)frames. */ ++ uint16_t interval; ++ ++ /** ++ * (micro)frame to initialize a periodic transfer. The transfer ++ * executes in the following (micro)frame. ++ */ ++ uint16_t sched_frame; ++ ++ /** (micro)frame at which last start split was initialized. */ ++ uint16_t start_split_frame; ++ ++ /** @} */ ++ ++ /** Entry for QH in either the periodic or non-periodic schedule. */ ++ struct list_head qh_list_entry; ++ ++ /* For non-dword aligned buffer support */ ++ uint8_t *dw_align_buf; ++ dma_addr_t dw_align_buf_dma; ++} dwc_otg_qh_t; ++ ++/** ++ * This structure holds the state of the HCD, including the non-periodic and ++ * periodic schedules. ++ */ ++typedef struct dwc_otg_hcd { ++ /** The DWC otg device pointer */ ++ struct dwc_otg_device *otg_dev; ++ ++ /** DWC OTG Core Interface Layer */ ++ dwc_otg_core_if_t *core_if; ++ ++ /** Internal DWC HCD Flags */ ++ volatile union dwc_otg_hcd_internal_flags { ++ uint32_t d32; ++ struct { ++ unsigned port_connect_status_change : 1; ++ unsigned port_connect_status : 1; ++ unsigned port_reset_change : 1; ++ unsigned port_enable_change : 1; ++ unsigned port_suspend_change : 1; ++ unsigned port_over_current_change : 1; ++ unsigned reserved : 27; ++ } b; ++ } flags; ++ ++ /** ++ * Inactive items in the non-periodic schedule. This is a list of ++ * Queue Heads. Transfers associated with these Queue Heads are not ++ * currently assigned to a host channel. ++ */ ++ struct list_head non_periodic_sched_inactive; ++ ++ /** ++ * Active items in the non-periodic schedule. This is a list of ++ * Queue Heads. Transfers associated with these Queue Heads are ++ * currently assigned to a host channel. ++ */ ++ struct list_head non_periodic_sched_active; ++ ++ /** ++ * Pointer to the next Queue Head to process in the active ++ * non-periodic schedule. ++ */ ++ struct list_head *non_periodic_qh_ptr; ++ ++ /** ++ * Inactive items in the periodic schedule. This is a list of QHs for ++ * periodic transfers that are _not_ scheduled for the next frame. ++ * Each QH in the list has an interval counter that determines when it ++ * needs to be scheduled for execution. This scheduling mechanism ++ * allows only a simple calculation for periodic bandwidth used (i.e. ++ * must assume that all periodic transfers may need to execute in the ++ * same frame). However, it greatly simplifies scheduling and should ++ * be sufficient for the vast majority of OTG hosts, which need to ++ * connect to a small number of peripherals at one time. ++ * ++ * Items move from this list to periodic_sched_ready when the QH ++ * interval counter is 0 at SOF. ++ */ ++ struct list_head periodic_sched_inactive; ++ ++ /** ++ * List of periodic QHs that are ready for execution in the next ++ * frame, but have not yet been assigned to host channels. ++ * ++ * Items move from this list to periodic_sched_assigned as host ++ * channels become available during the current frame. ++ */ ++ struct list_head periodic_sched_ready; ++ ++ /** ++ * List of periodic QHs to be executed in the next frame that are ++ * assigned to host channels. ++ * ++ * Items move from this list to periodic_sched_queued as the ++ * transactions for the QH are queued to the DWC_otg controller. ++ */ ++ struct list_head periodic_sched_assigned; ++ ++ /** ++ * List of periodic QHs that have been queued for execution. ++ * ++ * Items move from this list to either periodic_sched_inactive or ++ * periodic_sched_ready when the channel associated with the transfer ++ * is released. If the interval for the QH is 1, the item moves to ++ * periodic_sched_ready because it must be rescheduled for the next ++ * frame. Otherwise, the item moves to periodic_sched_inactive. ++ */ ++ struct list_head periodic_sched_queued; ++ ++ /** ++ * Total bandwidth claimed so far for periodic transfers. This value ++ * is in microseconds per (micro)frame. The assumption is that all ++ * periodic transfers may occur in the same (micro)frame. ++ */ ++ uint16_t periodic_usecs; ++ ++ /** ++ * Frame number read from the core at SOF. The value ranges from 0 to ++ * DWC_HFNUM_MAX_FRNUM. ++ */ ++ uint16_t frame_number; ++ ++ /** ++ * Free host channels in the controller. This is a list of ++ * dwc_hc_t items. ++ */ ++ struct list_head free_hc_list; ++ ++ /** ++ * Number of host channels assigned to periodic transfers. Currently ++ * assuming that there is a dedicated host channel for each periodic ++ * transaction and at least one host channel available for ++ * non-periodic transactions. ++ */ ++ int periodic_channels; ++ ++ /** ++ * Number of host channels assigned to non-periodic transfers. ++ */ ++ int non_periodic_channels; ++ ++ /** ++ * Array of pointers to the host channel descriptors. Allows accessing ++ * a host channel descriptor given the host channel number. This is ++ * useful in interrupt handlers. ++ */ ++ dwc_hc_t *hc_ptr_array[MAX_EPS_CHANNELS]; ++ ++ /** ++ * Buffer to use for any data received during the status phase of a ++ * control transfer. Normally no data is transferred during the status ++ * phase. This buffer is used as a bit bucket. ++ */ ++ uint8_t *status_buf; ++ ++ /** ++ * DMA address for status_buf. ++ */ ++ dma_addr_t status_buf_dma; ++#define DWC_OTG_HCD_STATUS_BUF_SIZE 64 ++ ++ /** ++ * Structure to allow starting the HCD in a non-interrupt context ++ * during an OTG role change. ++ */ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) ++ struct work_struct start_work; ++#else ++ struct delayed_work start_work; ++#endif ++ ++ /** ++ * Connection timer. An OTG host must display a message if the device ++ * does not connect. Started when the VBus power is turned on via ++ * sysfs attribute "buspower". ++ */ ++ struct timer_list conn_timer; ++ ++ /* Tasket to do a reset */ ++ struct tasklet_struct *reset_tasklet; ++ ++ /* */ ++ spinlock_t lock; ++ ++#ifdef DEBUG ++ uint32_t frrem_samples; ++ uint64_t frrem_accum; ++ ++ uint32_t hfnum_7_samples_a; ++ uint64_t hfnum_7_frrem_accum_a; ++ uint32_t hfnum_0_samples_a; ++ uint64_t hfnum_0_frrem_accum_a; ++ uint32_t hfnum_other_samples_a; ++ uint64_t hfnum_other_frrem_accum_a; ++ ++ uint32_t hfnum_7_samples_b; ++ uint64_t hfnum_7_frrem_accum_b; ++ uint32_t hfnum_0_samples_b; ++ uint64_t hfnum_0_frrem_accum_b; ++ uint32_t hfnum_other_samples_b; ++ uint64_t hfnum_other_frrem_accum_b; ++#endif ++} dwc_otg_hcd_t; ++ ++/** Gets the dwc_otg_hcd from a struct usb_hcd */ ++static inline dwc_otg_hcd_t *hcd_to_dwc_otg_hcd(struct usb_hcd *hcd) ++{ ++ return (dwc_otg_hcd_t *)(hcd->hcd_priv); ++} ++ ++/** Gets the struct usb_hcd that contains a dwc_otg_hcd_t. */ ++static inline struct usb_hcd *dwc_otg_hcd_to_hcd(dwc_otg_hcd_t *dwc_otg_hcd) ++{ ++ return container_of((void *)dwc_otg_hcd, struct usb_hcd, hcd_priv); ++} ++ ++/** @name HCD Create/Destroy Functions */ ++/** @{ */ ++extern int dwc_otg_hcd_init(struct device *dev); ++extern void dwc_otg_hcd_remove(struct device *dev); ++/** @} */ ++ ++/** @name Linux HC Driver API Functions */ ++/** @{ */ ++ ++extern int dwc_otg_hcd_start(struct usb_hcd *hcd); ++extern void dwc_otg_hcd_stop(struct usb_hcd *hcd); ++extern int dwc_otg_hcd_get_frame_number(struct usb_hcd *hcd); ++extern void dwc_otg_hcd_free(struct usb_hcd *hcd); ++extern int dwc_otg_hcd_urb_enqueue(struct usb_hcd *hcd, ++ struct urb *urb, ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) ++ int mem_flags ++#else ++ gfp_t mem_flags ++#endif ++ ); ++extern int dwc_otg_hcd_urb_dequeue(struct usb_hcd *hcd, ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) ++#endif ++ struct urb *urb, int status); ++extern void dwc_otg_hcd_endpoint_disable(struct usb_hcd *hcd, ++ struct usb_host_endpoint *ep); ++extern irqreturn_t dwc_otg_hcd_irq(struct usb_hcd *hcd ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) ++ , struct pt_regs *regs ++#endif ++ ); ++extern int dwc_otg_hcd_hub_status_data(struct usb_hcd *hcd, ++ char *buf); ++extern int dwc_otg_hcd_hub_control(struct usb_hcd *hcd, ++ u16 typeReq, ++ u16 wValue, ++ u16 wIndex, ++ char *buf, ++ u16 wLength); ++ ++/** @} */ ++ ++/** @name Transaction Execution Functions */ ++/** @{ */ ++extern dwc_otg_transaction_type_e dwc_otg_hcd_select_transactions(dwc_otg_hcd_t *hcd); ++extern void dwc_otg_hcd_queue_transactions(dwc_otg_hcd_t *hcd, ++ dwc_otg_transaction_type_e tr_type); ++extern void dwc_otg_hcd_complete_urb(dwc_otg_hcd_t *_hcd, struct urb *urb, ++ int status); ++/** @} */ ++ ++/** @name Interrupt Handler Functions */ ++/** @{ */ ++extern int32_t dwc_otg_hcd_handle_intr(dwc_otg_hcd_t *dwc_otg_hcd); ++extern int32_t dwc_otg_hcd_handle_sof_intr(dwc_otg_hcd_t *dwc_otg_hcd); ++extern int32_t dwc_otg_hcd_handle_rx_status_q_level_intr(dwc_otg_hcd_t *dwc_otg_hcd); ++extern int32_t dwc_otg_hcd_handle_np_tx_fifo_empty_intr(dwc_otg_hcd_t *dwc_otg_hcd); ++extern int32_t dwc_otg_hcd_handle_perio_tx_fifo_empty_intr(dwc_otg_hcd_t *dwc_otg_hcd); ++extern int32_t dwc_otg_hcd_handle_incomplete_periodic_intr(dwc_otg_hcd_t *dwc_otg_hcd); ++extern int32_t dwc_otg_hcd_handle_port_intr(dwc_otg_hcd_t *dwc_otg_hcd); ++extern int32_t dwc_otg_hcd_handle_conn_id_status_change_intr(dwc_otg_hcd_t *dwc_otg_hcd); ++extern int32_t dwc_otg_hcd_handle_disconnect_intr(dwc_otg_hcd_t *dwc_otg_hcd); ++extern int32_t dwc_otg_hcd_handle_hc_intr(dwc_otg_hcd_t *dwc_otg_hcd); ++extern int32_t dwc_otg_hcd_handle_hc_n_intr(dwc_otg_hcd_t *dwc_otg_hcd, uint32_t num); ++extern int32_t dwc_otg_hcd_handle_session_req_intr(dwc_otg_hcd_t *dwc_otg_hcd); ++extern int32_t dwc_otg_hcd_handle_wakeup_detected_intr(dwc_otg_hcd_t *dwc_otg_hcd); ++/** @} */ ++ ++ ++/** @name Schedule Queue Functions */ ++/** @{ */ ++ ++/* Implemented in dwc_otg_hcd_queue.c */ ++extern dwc_otg_qh_t *dwc_otg_hcd_qh_create(dwc_otg_hcd_t *hcd, struct urb *urb); ++extern void dwc_otg_hcd_qh_init(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh, struct urb *urb); ++extern void dwc_otg_hcd_qh_free(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh); ++extern int dwc_otg_hcd_qh_add(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh); ++extern void dwc_otg_hcd_qh_remove(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh); ++extern void dwc_otg_hcd_qh_deactivate(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh, int sched_csplit); ++ ++/** Remove and free a QH */ ++static inline void dwc_otg_hcd_qh_remove_and_free(dwc_otg_hcd_t *hcd, ++ dwc_otg_qh_t *qh) ++{ ++ dwc_otg_hcd_qh_remove(hcd, qh); ++ dwc_otg_hcd_qh_free(hcd, qh); ++} ++ ++/** Allocates memory for a QH structure. ++ * @return Returns the memory allocate or NULL on error. */ ++static inline dwc_otg_qh_t *dwc_otg_hcd_qh_alloc(void) ++{ ++ return (dwc_otg_qh_t *) kmalloc(sizeof(dwc_otg_qh_t), GFP_KERNEL); ++} ++ ++extern dwc_otg_qtd_t *dwc_otg_hcd_qtd_create(struct urb *urb); ++extern void dwc_otg_hcd_qtd_init(dwc_otg_qtd_t *qtd, struct urb *urb); ++extern int dwc_otg_hcd_qtd_add(dwc_otg_qtd_t *qtd, dwc_otg_hcd_t *dwc_otg_hcd); ++ ++/** Allocates memory for a QTD structure. ++ * @return Returns the memory allocate or NULL on error. */ ++static inline dwc_otg_qtd_t *dwc_otg_hcd_qtd_alloc(void) ++{ ++ return (dwc_otg_qtd_t *) kmalloc(sizeof(dwc_otg_qtd_t), GFP_KERNEL); ++} ++ ++/** Frees the memory for a QTD structure. QTD should already be removed from ++ * list. ++ * @param[in] qtd QTD to free.*/ ++static inline void dwc_otg_hcd_qtd_free(dwc_otg_qtd_t *qtd) ++{ ++ kfree(qtd); ++} ++ ++/** Removes a QTD from list. ++ * @param[in] hcd HCD instance. ++ * @param[in] qtd QTD to remove from list. */ ++static inline void dwc_otg_hcd_qtd_remove(dwc_otg_hcd_t *hcd, dwc_otg_qtd_t *qtd) ++{ ++ unsigned long flags; ++ SPIN_LOCK_IRQSAVE(&hcd->lock, flags); ++ list_del(&qtd->qtd_list_entry); ++ SPIN_UNLOCK_IRQRESTORE(&hcd->lock, flags); ++} ++ ++/** Remove and free a QTD */ ++static inline void dwc_otg_hcd_qtd_remove_and_free(dwc_otg_hcd_t *hcd, dwc_otg_qtd_t *qtd) ++{ ++ dwc_otg_hcd_qtd_remove(hcd, qtd); ++ dwc_otg_hcd_qtd_free(qtd); ++} ++ ++/** @} */ ++ ++ ++/** @name Internal Functions */ ++/** @{ */ ++dwc_otg_qh_t *dwc_urb_to_qh(struct urb *urb); ++void dwc_otg_hcd_dump_frrem(dwc_otg_hcd_t *hcd); ++void dwc_otg_hcd_dump_state(dwc_otg_hcd_t *hcd); ++/** @} */ ++ ++/** Gets the usb_host_endpoint associated with an URB. */ ++static inline struct usb_host_endpoint *dwc_urb_to_endpoint(struct urb *urb) ++{ ++ struct usb_device *dev = urb->dev; ++ int ep_num = usb_pipeendpoint(urb->pipe); ++ ++ if (usb_pipein(urb->pipe)) ++ return dev->ep_in[ep_num]; ++ else ++ return dev->ep_out[ep_num]; ++} ++ ++/** ++ * Gets the endpoint number from a _bEndpointAddress argument. The endpoint is ++ * qualified with its direction (possible 32 endpoints per device). ++ */ ++#define dwc_ep_addr_to_endpoint(_bEndpointAddress_) ((_bEndpointAddress_ & USB_ENDPOINT_NUMBER_MASK) | \ ++ ((_bEndpointAddress_ & USB_DIR_IN) != 0) << 4) ++ ++/** Gets the QH that contains the list_head */ ++#define dwc_list_to_qh(_list_head_ptr_) container_of(_list_head_ptr_, dwc_otg_qh_t, qh_list_entry) ++ ++/** Gets the QTD that contains the list_head */ ++#define dwc_list_to_qtd(_list_head_ptr_) container_of(_list_head_ptr_, dwc_otg_qtd_t, qtd_list_entry) ++ ++/** Check if QH is non-periodic */ ++#define dwc_qh_is_non_per(_qh_ptr_) ((_qh_ptr_->ep_type == USB_ENDPOINT_XFER_BULK) || \ ++ (_qh_ptr_->ep_type == USB_ENDPOINT_XFER_CONTROL)) ++ ++/** High bandwidth multiplier as encoded in highspeed endpoint descriptors */ ++#define dwc_hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03)) ++ ++/** Packet size for any kind of endpoint descriptor */ ++#define dwc_max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff) ++ ++/** ++ * Returns true if _frame1 is less than or equal to _frame2. The comparison is ++ * done modulo DWC_HFNUM_MAX_FRNUM. This accounts for the rollover of the ++ * frame number when the max frame number is reached. ++ */ ++static inline int dwc_frame_num_le(uint16_t frame1, uint16_t frame2) ++{ ++ return ((frame2 - frame1) & DWC_HFNUM_MAX_FRNUM) <= ++ (DWC_HFNUM_MAX_FRNUM >> 1); ++} ++ ++/** ++ * Returns true if _frame1 is greater than _frame2. The comparison is done ++ * modulo DWC_HFNUM_MAX_FRNUM. This accounts for the rollover of the frame ++ * number when the max frame number is reached. ++ */ ++static inline int dwc_frame_num_gt(uint16_t frame1, uint16_t frame2) ++{ ++ return (frame1 != frame2) && ++ (((frame1 - frame2) & DWC_HFNUM_MAX_FRNUM) < ++ (DWC_HFNUM_MAX_FRNUM >> 1)); ++} ++ ++/** ++ * Increments _frame by the amount specified by _inc. The addition is done ++ * modulo DWC_HFNUM_MAX_FRNUM. Returns the incremented value. ++ */ ++static inline uint16_t dwc_frame_num_inc(uint16_t frame, uint16_t inc) ++{ ++ return (frame + inc) & DWC_HFNUM_MAX_FRNUM; ++} ++ ++static inline uint16_t dwc_full_frame_num(uint16_t frame) ++{ ++ return (frame & DWC_HFNUM_MAX_FRNUM) >> 3; ++} ++ ++static inline uint16_t dwc_micro_frame_num(uint16_t frame) ++{ ++ return frame & 0x7; ++} ++ ++#ifdef DEBUG ++/** ++ * Macro to sample the remaining PHY clocks left in the current frame. This ++ * may be used during debugging to determine the average time it takes to ++ * execute sections of code. There are two possible sample points, "a" and ++ * "b", so the _letter argument must be one of these values. ++ * ++ * To dump the average sample times, read the "hcd_frrem" sysfs attribute. For ++ * example, "cat /sys/devices/lm0/hcd_frrem". ++ */ ++#define dwc_sample_frrem(_hcd, _qh, _letter) \ ++{ \ ++ hfnum_data_t hfnum; \ ++ dwc_otg_qtd_t *qtd; \ ++ qtd = list_entry(_qh->qtd_list.next, dwc_otg_qtd_t, qtd_list_entry); \ ++ if (usb_pipeint(qtd->urb->pipe) && _qh->start_split_frame != 0 && !qtd->complete_split) { \ ++ hfnum.d32 = dwc_read_reg32(&_hcd->core_if->host_if->host_global_regs->hfnum); \ ++ switch (hfnum.b.frnum & 0x7) { \ ++ case 7: \ ++ _hcd->hfnum_7_samples_##_letter++; \ ++ _hcd->hfnum_7_frrem_accum_##_letter += hfnum.b.frrem; \ ++ break; \ ++ case 0: \ ++ _hcd->hfnum_0_samples_##_letter++; \ ++ _hcd->hfnum_0_frrem_accum_##_letter += hfnum.b.frrem; \ ++ break; \ ++ default: \ ++ _hcd->hfnum_other_samples_##_letter++; \ ++ _hcd->hfnum_other_frrem_accum_##_letter += hfnum.b.frrem; \ ++ break; \ ++ } \ ++ } \ ++} ++#else ++#define dwc_sample_frrem(_hcd, _qh, _letter) ++#endif ++#endif ++#endif /* DWC_DEVICE_ONLY */ +--- /dev/null ++++ b/drivers/usb/dwc_otg/dwc_otg_hcd_intr.c +@@ -0,0 +1,1873 @@ ++/* ========================================================================== ++ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_hcd_intr.c $ ++ * $Revision: 1.6.2.1 $ ++ * $Date: 2009-04-22 03:48:22 $ ++ * $Change: 1117667 $ ++ * ++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, ++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless ++ * otherwise expressly agreed to in writing between Synopsys and you. ++ * ++ * The Software IS NOT an item of Licensed Software or Licensed Product under ++ * any End User Software License Agreement or Agreement for Licensed Product ++ * with Synopsys or any supplement thereto. You are permitted to use and ++ * redistribute this Software in source and binary forms, with or without ++ * modification, provided that redistributions of source code must retain this ++ * notice. You may not view, use, disclose, copy or distribute this file or ++ * any information contained herein except pursuant to this license grant from ++ * Synopsys. If you do not agree with this notice, including the disclaimer ++ * below, then you are not authorized to use the Software. ++ * ++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, ++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR ++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER ++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT ++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY ++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH ++ * DAMAGE. ++ * ========================================================================== */ ++#ifndef DWC_DEVICE_ONLY ++ ++#include ++ ++#include "dwc_otg_driver.h" ++#include "dwc_otg_hcd.h" ++#include "dwc_otg_regs.h" ++ ++/** @file ++ * This file contains the implementation of the HCD Interrupt handlers. ++ */ ++ ++/** This function handles interrupts for the HCD. */ ++int32_t dwc_otg_hcd_handle_intr(dwc_otg_hcd_t *dwc_otg_hcd) ++{ ++ int retval = 0; ++ ++ dwc_otg_core_if_t *core_if = dwc_otg_hcd->core_if; ++ gintsts_data_t gintsts; ++#ifdef DEBUG ++ dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs; ++#endif ++ ++ /* Check if HOST Mode */ ++ if (dwc_otg_is_host_mode(core_if)) { ++ gintsts.d32 = dwc_otg_read_core_intr(core_if); ++ if (!gintsts.d32) { ++ return 0; ++ } ++ ++#ifdef DEBUG ++ /* Don't print debug message in the interrupt handler on SOF */ ++# ifndef DEBUG_SOF ++ if (gintsts.d32 != DWC_SOF_INTR_MASK) ++# endif ++ DWC_DEBUGPL(DBG_HCD, "\n"); ++#endif ++ ++#ifdef DEBUG ++# ifndef DEBUG_SOF ++ if (gintsts.d32 != DWC_SOF_INTR_MASK) ++# endif ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD Interrupt Detected gintsts&gintmsk=0x%08x\n", gintsts.d32); ++#endif ++ if (gintsts.b.usbreset) { ++ DWC_PRINT("Usb Reset In Host Mode\n"); ++ } ++ ++ ++ if (gintsts.b.sofintr) { ++ retval |= dwc_otg_hcd_handle_sof_intr(dwc_otg_hcd); ++ } ++ if (gintsts.b.rxstsqlvl) { ++ retval |= dwc_otg_hcd_handle_rx_status_q_level_intr(dwc_otg_hcd); ++ } ++ if (gintsts.b.nptxfempty) { ++ retval |= dwc_otg_hcd_handle_np_tx_fifo_empty_intr(dwc_otg_hcd); ++ } ++ if (gintsts.b.i2cintr) { ++ /** @todo Implement i2cintr handler. */ ++ } ++ if (gintsts.b.portintr) { ++ retval |= dwc_otg_hcd_handle_port_intr(dwc_otg_hcd); ++ } ++ if (gintsts.b.hcintr) { ++ retval |= dwc_otg_hcd_handle_hc_intr(dwc_otg_hcd); ++ } ++ if (gintsts.b.ptxfempty) { ++ retval |= dwc_otg_hcd_handle_perio_tx_fifo_empty_intr(dwc_otg_hcd); ++ } ++#ifdef DEBUG ++# ifndef DEBUG_SOF ++ if (gintsts.d32 != DWC_SOF_INTR_MASK) ++# endif ++ { ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD Finished Servicing Interrupts\n"); ++ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD gintsts=0x%08x\n", ++ dwc_read_reg32(&global_regs->gintsts)); ++ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD gintmsk=0x%08x\n", ++ dwc_read_reg32(&global_regs->gintmsk)); ++ } ++#endif ++ ++#ifdef DEBUG ++# ifndef DEBUG_SOF ++ if (gintsts.d32 != DWC_SOF_INTR_MASK) ++# endif ++ DWC_DEBUGPL(DBG_HCD, "\n"); ++#endif ++ ++ } ++ ++ S3C2410X_CLEAR_EINTPEND(); ++ ++ return retval; ++} ++ ++#ifdef DWC_TRACK_MISSED_SOFS ++#warning Compiling code to track missed SOFs ++#define FRAME_NUM_ARRAY_SIZE 1000 ++/** ++ * This function is for debug only. ++ */ ++static inline void track_missed_sofs(uint16_t curr_frame_number) ++{ ++ static uint16_t frame_num_array[FRAME_NUM_ARRAY_SIZE]; ++ static uint16_t last_frame_num_array[FRAME_NUM_ARRAY_SIZE]; ++ static int frame_num_idx = 0; ++ static uint16_t last_frame_num = DWC_HFNUM_MAX_FRNUM; ++ static int dumped_frame_num_array = 0; ++ ++ if (frame_num_idx < FRAME_NUM_ARRAY_SIZE) { ++ if (((last_frame_num + 1) & DWC_HFNUM_MAX_FRNUM) != curr_frame_number) { ++ frame_num_array[frame_num_idx] = curr_frame_number; ++ last_frame_num_array[frame_num_idx++] = last_frame_num; ++ } ++ } else if (!dumped_frame_num_array) { ++ int i; ++ printk(KERN_EMERG USB_DWC "Frame Last Frame\n"); ++ printk(KERN_EMERG USB_DWC "----- ----------\n"); ++ for (i = 0; i < FRAME_NUM_ARRAY_SIZE; i++) { ++ printk(KERN_EMERG USB_DWC "0x%04x 0x%04x\n", ++ frame_num_array[i], last_frame_num_array[i]); ++ } ++ dumped_frame_num_array = 1; ++ } ++ last_frame_num = curr_frame_number; ++} ++#endif ++ ++/** ++ * Handles the start-of-frame interrupt in host mode. Non-periodic ++ * transactions may be queued to the DWC_otg controller for the current ++ * (micro)frame. Periodic transactions may be queued to the controller for the ++ * next (micro)frame. ++ */ ++int32_t dwc_otg_hcd_handle_sof_intr(dwc_otg_hcd_t *hcd) ++{ ++ hfnum_data_t hfnum; ++ struct list_head *qh_entry; ++ dwc_otg_qh_t *qh; ++ dwc_otg_transaction_type_e tr_type; ++ gintsts_data_t gintsts = {.d32 = 0}; ++ ++ hfnum.d32 = dwc_read_reg32(&hcd->core_if->host_if->host_global_regs->hfnum); ++ ++#ifdef DEBUG_SOF ++ DWC_DEBUGPL(DBG_HCD, "--Start of Frame Interrupt--\n"); ++#endif ++ hcd->frame_number = hfnum.b.frnum; ++ ++#ifdef DEBUG ++ hcd->frrem_accum += hfnum.b.frrem; ++ hcd->frrem_samples++; ++#endif ++ ++#ifdef DWC_TRACK_MISSED_SOFS ++ track_missed_sofs(hcd->frame_number); ++#endif ++ ++ /* Determine whether any periodic QHs should be executed. */ ++ qh_entry = hcd->periodic_sched_inactive.next; ++ while (qh_entry != &hcd->periodic_sched_inactive) { ++ qh = list_entry(qh_entry, dwc_otg_qh_t, qh_list_entry); ++ qh_entry = qh_entry->next; ++ if (dwc_frame_num_le(qh->sched_frame, hcd->frame_number)) { ++ /* ++ * Move QH to the ready list to be executed next ++ * (micro)frame. ++ */ ++ list_move(&qh->qh_list_entry, &hcd->periodic_sched_ready); ++ } ++ } ++ ++ tr_type = dwc_otg_hcd_select_transactions(hcd); ++ if (tr_type != DWC_OTG_TRANSACTION_NONE) { ++ dwc_otg_hcd_queue_transactions(hcd, tr_type); ++ } ++ ++ /* Clear interrupt */ ++ gintsts.b.sofintr = 1; ++ dwc_write_reg32(&hcd->core_if->core_global_regs->gintsts, gintsts.d32); ++ ++ return 1; ++} ++ ++/** Handles the Rx Status Queue Level Interrupt, which indicates that there is at ++ * least one packet in the Rx FIFO. The packets are moved from the FIFO to ++ * memory if the DWC_otg controller is operating in Slave mode. */ ++int32_t dwc_otg_hcd_handle_rx_status_q_level_intr(dwc_otg_hcd_t *dwc_otg_hcd) ++{ ++ host_grxsts_data_t grxsts; ++ dwc_hc_t *hc = NULL; ++ ++ DWC_DEBUGPL(DBG_HCD, "--RxStsQ Level Interrupt--\n"); ++ ++ grxsts.d32 = dwc_read_reg32(&dwc_otg_hcd->core_if->core_global_regs->grxstsp); ++ ++ hc = dwc_otg_hcd->hc_ptr_array[grxsts.b.chnum]; ++ ++ /* Packet Status */ ++ DWC_DEBUGPL(DBG_HCDV, " Ch num = %d\n", grxsts.b.chnum); ++ DWC_DEBUGPL(DBG_HCDV, " Count = %d\n", grxsts.b.bcnt); ++ DWC_DEBUGPL(DBG_HCDV, " DPID = %d, hc.dpid = %d\n", grxsts.b.dpid, hc->data_pid_start); ++ DWC_DEBUGPL(DBG_HCDV, " PStatus = %d\n", grxsts.b.pktsts); ++ ++ switch (grxsts.b.pktsts) { ++ case DWC_GRXSTS_PKTSTS_IN: ++ /* Read the data into the host buffer. */ ++ if (grxsts.b.bcnt > 0) { ++ dwc_otg_read_packet(dwc_otg_hcd->core_if, ++ hc->xfer_buff, ++ grxsts.b.bcnt); ++ ++ /* Update the HC fields for the next packet received. */ ++ hc->xfer_count += grxsts.b.bcnt; ++ hc->xfer_buff += grxsts.b.bcnt; ++ } ++ ++ case DWC_GRXSTS_PKTSTS_IN_XFER_COMP: ++ case DWC_GRXSTS_PKTSTS_DATA_TOGGLE_ERR: ++ case DWC_GRXSTS_PKTSTS_CH_HALTED: ++ /* Handled in interrupt, just ignore data */ ++ break; ++ default: ++ DWC_ERROR("RX_STS_Q Interrupt: Unknown status %d\n", grxsts.b.pktsts); ++ break; ++ } ++ ++ return 1; ++} ++ ++/** This interrupt occurs when the non-periodic Tx FIFO is half-empty. More ++ * data packets may be written to the FIFO for OUT transfers. More requests ++ * may be written to the non-periodic request queue for IN transfers. This ++ * interrupt is enabled only in Slave mode. */ ++int32_t dwc_otg_hcd_handle_np_tx_fifo_empty_intr(dwc_otg_hcd_t *dwc_otg_hcd) ++{ ++ DWC_DEBUGPL(DBG_HCD, "--Non-Periodic TxFIFO Empty Interrupt--\n"); ++ dwc_otg_hcd_queue_transactions(dwc_otg_hcd, ++ DWC_OTG_TRANSACTION_NON_PERIODIC); ++ return 1; ++} ++ ++/** This interrupt occurs when the periodic Tx FIFO is half-empty. More data ++ * packets may be written to the FIFO for OUT transfers. More requests may be ++ * written to the periodic request queue for IN transfers. This interrupt is ++ * enabled only in Slave mode. */ ++int32_t dwc_otg_hcd_handle_perio_tx_fifo_empty_intr(dwc_otg_hcd_t *dwc_otg_hcd) ++{ ++ DWC_DEBUGPL(DBG_HCD, "--Periodic TxFIFO Empty Interrupt--\n"); ++ dwc_otg_hcd_queue_transactions(dwc_otg_hcd, ++ DWC_OTG_TRANSACTION_PERIODIC); ++ return 1; ++} ++ ++/** There are multiple conditions that can cause a port interrupt. This function ++ * determines which interrupt conditions have occurred and handles them ++ * appropriately. */ ++int32_t dwc_otg_hcd_handle_port_intr(dwc_otg_hcd_t *dwc_otg_hcd) ++{ ++ int retval = 0; ++ hprt0_data_t hprt0; ++ hprt0_data_t hprt0_modify; ++ ++ hprt0.d32 = dwc_read_reg32(dwc_otg_hcd->core_if->host_if->hprt0); ++ hprt0_modify.d32 = dwc_read_reg32(dwc_otg_hcd->core_if->host_if->hprt0); ++ ++ /* Clear appropriate bits in HPRT0 to clear the interrupt bit in ++ * GINTSTS */ ++ ++ hprt0_modify.b.prtena = 0; ++ hprt0_modify.b.prtconndet = 0; ++ hprt0_modify.b.prtenchng = 0; ++ hprt0_modify.b.prtovrcurrchng = 0; ++ ++ /* Port Connect Detected ++ * Set flag and clear if detected */ ++ if (hprt0.b.prtconndet) { ++ DWC_DEBUGPL(DBG_HCD, "--Port Interrupt HPRT0=0x%08x " ++ "Port Connect Detected--\n", hprt0.d32); ++ dwc_otg_hcd->flags.b.port_connect_status_change = 1; ++ dwc_otg_hcd->flags.b.port_connect_status = 1; ++ hprt0_modify.b.prtconndet = 1; ++ ++ /* B-Device has connected, Delete the connection timer. */ ++ del_timer( &dwc_otg_hcd->conn_timer ); ++ ++ /* The Hub driver asserts a reset when it sees port connect ++ * status change flag */ ++ retval |= 1; ++ } ++ ++ /* Port Enable Changed ++ * Clear if detected - Set internal flag if disabled */ ++ if (hprt0.b.prtenchng) { ++ DWC_DEBUGPL(DBG_HCD, " --Port Interrupt HPRT0=0x%08x " ++ "Port Enable Changed--\n", hprt0.d32); ++ hprt0_modify.b.prtenchng = 1; ++ if (hprt0.b.prtena == 1) { ++ int do_reset = 0; ++ dwc_otg_core_params_t *params = dwc_otg_hcd->core_if->core_params; ++ dwc_otg_core_global_regs_t *global_regs = dwc_otg_hcd->core_if->core_global_regs; ++ dwc_otg_host_if_t *host_if = dwc_otg_hcd->core_if->host_if; ++ ++ /* Check if we need to adjust the PHY clock speed for ++ * low power and adjust it */ ++ if (params->host_support_fs_ls_low_power) { ++ gusbcfg_data_t usbcfg; ++ ++ usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg); ++ ++ if (hprt0.b.prtspd == DWC_HPRT0_PRTSPD_LOW_SPEED || ++ hprt0.b.prtspd == DWC_HPRT0_PRTSPD_FULL_SPEED) { ++ /* ++ * Low power ++ */ ++ hcfg_data_t hcfg; ++ if (usbcfg.b.phylpwrclksel == 0) { ++ /* Set PHY low power clock select for FS/LS devices */ ++ usbcfg.b.phylpwrclksel = 1; ++ dwc_write_reg32(&global_regs->gusbcfg, usbcfg.d32); ++ do_reset = 1; ++ } ++ ++ hcfg.d32 = dwc_read_reg32(&host_if->host_global_regs->hcfg); ++ ++ if (hprt0.b.prtspd == DWC_HPRT0_PRTSPD_LOW_SPEED && ++ params->host_ls_low_power_phy_clk == ++ DWC_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ) { ++ /* 6 MHZ */ ++ DWC_DEBUGPL(DBG_CIL, "FS_PHY programming HCFG to 6 MHz (Low Power)\n"); ++ if (hcfg.b.fslspclksel != DWC_HCFG_6_MHZ) { ++ hcfg.b.fslspclksel = DWC_HCFG_6_MHZ; ++ dwc_write_reg32(&host_if->host_global_regs->hcfg, ++ hcfg.d32); ++ do_reset = 1; ++ } ++ } else { ++ /* 48 MHZ */ ++ DWC_DEBUGPL(DBG_CIL, "FS_PHY programming HCFG to 48 MHz ()\n"); ++ if (hcfg.b.fslspclksel != DWC_HCFG_48_MHZ) { ++ hcfg.b.fslspclksel = DWC_HCFG_48_MHZ; ++ dwc_write_reg32(&host_if->host_global_regs->hcfg, ++ hcfg.d32); ++ do_reset = 1; ++ } ++ } ++ } else { ++ /* ++ * Not low power ++ */ ++ if (usbcfg.b.phylpwrclksel == 1) { ++ usbcfg.b.phylpwrclksel = 0; ++ dwc_write_reg32(&global_regs->gusbcfg, usbcfg.d32); ++ do_reset = 1; ++ } ++ } ++ ++ if (do_reset) { ++ tasklet_schedule(dwc_otg_hcd->reset_tasklet); ++ } ++ } ++ ++ if (!do_reset) { ++ /* Port has been enabled set the reset change flag */ ++ dwc_otg_hcd->flags.b.port_reset_change = 1; ++ } ++ } else { ++ dwc_otg_hcd->flags.b.port_enable_change = 1; ++ } ++ retval |= 1; ++ } ++ ++ /** Overcurrent Change Interrupt */ ++ if (hprt0.b.prtovrcurrchng) { ++ DWC_DEBUGPL(DBG_HCD, " --Port Interrupt HPRT0=0x%08x " ++ "Port Overcurrent Changed--\n", hprt0.d32); ++ dwc_otg_hcd->flags.b.port_over_current_change = 1; ++ hprt0_modify.b.prtovrcurrchng = 1; ++ retval |= 1; ++ } ++ ++ /* Clear Port Interrupts */ ++ dwc_write_reg32(dwc_otg_hcd->core_if->host_if->hprt0, hprt0_modify.d32); ++ ++ return retval; ++} ++ ++/** This interrupt indicates that one or more host channels has a pending ++ * interrupt. There are multiple conditions that can cause each host channel ++ * interrupt. This function determines which conditions have occurred for each ++ * host channel interrupt and handles them appropriately. */ ++int32_t dwc_otg_hcd_handle_hc_intr(dwc_otg_hcd_t *dwc_otg_hcd) ++{ ++ int i; ++ int retval = 0; ++ haint_data_t haint; ++ ++ /* Clear appropriate bits in HCINTn to clear the interrupt bit in ++ * GINTSTS */ ++ ++ haint.d32 = dwc_otg_read_host_all_channels_intr(dwc_otg_hcd->core_if); ++ ++ for (i = 0; i < dwc_otg_hcd->core_if->core_params->host_channels; i++) { ++ if (haint.b2.chint & (1 << i)) { ++ retval |= dwc_otg_hcd_handle_hc_n_intr(dwc_otg_hcd, i); ++ } ++ } ++ ++ return retval; ++} ++ ++/* Macro used to clear one channel interrupt */ ++#define clear_hc_int(_hc_regs_, _intr_) \ ++do { \ ++ hcint_data_t hcint_clear = {.d32 = 0}; \ ++ hcint_clear.b._intr_ = 1; \ ++ dwc_write_reg32(&(_hc_regs_)->hcint, hcint_clear.d32); \ ++} while (0) ++ ++/* ++ * Macro used to disable one channel interrupt. Channel interrupts are ++ * disabled when the channel is halted or released by the interrupt handler. ++ * There is no need to handle further interrupts of that type until the ++ * channel is re-assigned. In fact, subsequent handling may cause crashes ++ * because the channel structures are cleaned up when the channel is released. ++ */ ++#define disable_hc_int(_hc_regs_, _intr_) \ ++do { \ ++ hcintmsk_data_t hcintmsk = {.d32 = 0}; \ ++ hcintmsk.b._intr_ = 1; \ ++ dwc_modify_reg32(&(_hc_regs_)->hcintmsk, hcintmsk.d32, 0); \ ++} while (0) ++ ++/** ++ * Gets the actual length of a transfer after the transfer halts. _halt_status ++ * holds the reason for the halt. ++ * ++ * For IN transfers where halt_status is DWC_OTG_HC_XFER_COMPLETE, ++ * *short_read is set to 1 upon return if less than the requested ++ * number of bytes were transferred. Otherwise, *short_read is set to 0 upon ++ * return. short_read may also be NULL on entry, in which case it remains ++ * unchanged. ++ */ ++static uint32_t get_actual_xfer_length(dwc_hc_t *hc, ++ dwc_otg_hc_regs_t *hc_regs, ++ dwc_otg_qtd_t *qtd, ++ dwc_otg_halt_status_e halt_status, ++ int *short_read) ++{ ++ hctsiz_data_t hctsiz; ++ uint32_t length; ++ ++ if (short_read != NULL) { ++ *short_read = 0; ++ } ++ hctsiz.d32 = dwc_read_reg32(&hc_regs->hctsiz); ++ ++ if (halt_status == DWC_OTG_HC_XFER_COMPLETE) { ++ if (hc->ep_is_in) { ++ length = hc->xfer_len - hctsiz.b.xfersize; ++ if (short_read != NULL) { ++ *short_read = (hctsiz.b.xfersize != 0); ++ } ++ } else if (hc->qh->do_split) { ++ length = qtd->ssplit_out_xfer_count; ++ } else { ++ length = hc->xfer_len; ++ } ++ } else { ++ /* ++ * Must use the hctsiz.pktcnt field to determine how much data ++ * has been transferred. This field reflects the number of ++ * packets that have been transferred via the USB. This is ++ * always an integral number of packets if the transfer was ++ * halted before its normal completion. (Can't use the ++ * hctsiz.xfersize field because that reflects the number of ++ * bytes transferred via the AHB, not the USB). ++ */ ++ length = (hc->start_pkt_count - hctsiz.b.pktcnt) * hc->max_packet; ++ } ++ ++ return length; ++} ++ ++/** ++ * Updates the state of the URB after a Transfer Complete interrupt on the ++ * host channel. Updates the actual_length field of the URB based on the ++ * number of bytes transferred via the host channel. Sets the URB status ++ * if the data transfer is finished. ++ * ++ * @return 1 if the data transfer specified by the URB is completely finished, ++ * 0 otherwise. ++ */ ++static int update_urb_state_xfer_comp(dwc_hc_t *hc, ++ dwc_otg_hc_regs_t *hc_regs, ++ struct urb *urb, ++ dwc_otg_qtd_t *qtd) ++{ ++ int xfer_done = 0; ++ int short_read = 0; ++ int overflow_read=0; ++ uint32_t len = 0; ++ int max_packet; ++ ++ len = get_actual_xfer_length(hc, hc_regs, qtd, ++ DWC_OTG_HC_XFER_COMPLETE, ++ &short_read); ++ ++ /* Data overflow case: by Steven */ ++ if (len > urb->transfer_buffer_length) { ++ len = urb->transfer_buffer_length; ++ overflow_read = 1; ++ } ++ ++ /* non DWORD-aligned buffer case handling. */ ++ if (((uint32_t)hc->xfer_buff & 0x3) && len && hc->qh->dw_align_buf && hc->ep_is_in) { ++ memcpy(urb->transfer_buffer + urb->actual_length, hc->qh->dw_align_buf, len); ++ } ++ urb->actual_length +=len; ++ ++ max_packet = usb_maxpacket(urb->dev, urb->pipe, !usb_pipein(urb->pipe)); ++ if((len) && usb_pipebulk(urb->pipe) && ++ (urb->transfer_flags & URB_ZERO_PACKET) && ++ (urb->actual_length == urb->transfer_buffer_length) && ++ (!(urb->transfer_buffer_length % max_packet))) { ++ } else if (short_read || urb->actual_length == urb->transfer_buffer_length) { ++ xfer_done = 1; ++ if (short_read && (urb->transfer_flags & URB_SHORT_NOT_OK)) { ++ urb->status = -EREMOTEIO; ++ } else if (overflow_read) { ++ urb->status = -EOVERFLOW; ++ } else { ++ urb->status = 0; ++ } ++ } ++ ++#ifdef DEBUG ++ { ++ hctsiz_data_t hctsiz; ++ hctsiz.d32 = dwc_read_reg32(&hc_regs->hctsiz); ++ DWC_DEBUGPL(DBG_HCDV, "DWC_otg: %s: %s, channel %d\n", ++ __func__, (hc->ep_is_in ? "IN" : "OUT"), hc->hc_num); ++ DWC_DEBUGPL(DBG_HCDV, " hc->xfer_len %d\n", hc->xfer_len); ++ DWC_DEBUGPL(DBG_HCDV, " hctsiz.xfersize %d\n", hctsiz.b.xfersize); ++ DWC_DEBUGPL(DBG_HCDV, " urb->transfer_buffer_length %d\n", ++ urb->transfer_buffer_length); ++ DWC_DEBUGPL(DBG_HCDV, " urb->actual_length %d\n", urb->actual_length); ++ DWC_DEBUGPL(DBG_HCDV, " short_read %d, xfer_done %d\n", ++ short_read, xfer_done); ++ } ++#endif ++ ++ return xfer_done; ++} ++ ++/* ++ * Save the starting data toggle for the next transfer. The data toggle is ++ * saved in the QH for non-control transfers and it's saved in the QTD for ++ * control transfers. ++ */ ++static void save_data_toggle(dwc_hc_t *hc, ++ dwc_otg_hc_regs_t *hc_regs, ++ dwc_otg_qtd_t *qtd) ++{ ++ hctsiz_data_t hctsiz; ++ hctsiz.d32 = dwc_read_reg32(&hc_regs->hctsiz); ++ ++ if (hc->ep_type != DWC_OTG_EP_TYPE_CONTROL) { ++ dwc_otg_qh_t *qh = hc->qh; ++ if (hctsiz.b.pid == DWC_HCTSIZ_DATA0) { ++ qh->data_toggle = DWC_OTG_HC_PID_DATA0; ++ } else { ++ qh->data_toggle = DWC_OTG_HC_PID_DATA1; ++ } ++ } else { ++ if (hctsiz.b.pid == DWC_HCTSIZ_DATA0) { ++ qtd->data_toggle = DWC_OTG_HC_PID_DATA0; ++ } else { ++ qtd->data_toggle = DWC_OTG_HC_PID_DATA1; ++ } ++ } ++} ++ ++/** ++ * Frees the first QTD in the QH's list if free_qtd is 1. For non-periodic ++ * QHs, removes the QH from the active non-periodic schedule. If any QTDs are ++ * still linked to the QH, the QH is added to the end of the inactive ++ * non-periodic schedule. For periodic QHs, removes the QH from the periodic ++ * schedule if no more QTDs are linked to the QH. ++ */ ++static void deactivate_qh(dwc_otg_hcd_t *hcd, ++ dwc_otg_qh_t *qh, ++ int free_qtd) ++{ ++ int continue_split = 0; ++ dwc_otg_qtd_t *qtd; ++ ++ DWC_DEBUGPL(DBG_HCDV, " %s(%p,%p,%d)\n", __func__, hcd, qh, free_qtd); ++ ++ qtd = list_entry(qh->qtd_list.next, dwc_otg_qtd_t, qtd_list_entry); ++ ++ if (qtd->complete_split) { ++ continue_split = 1; ++ } else if (qtd->isoc_split_pos == DWC_HCSPLIT_XACTPOS_MID || ++ qtd->isoc_split_pos == DWC_HCSPLIT_XACTPOS_END) { ++ continue_split = 1; ++ } ++ ++ if (free_qtd) { ++ dwc_otg_hcd_qtd_remove_and_free(hcd, qtd); ++ continue_split = 0; ++ } ++ ++ qh->channel = NULL; ++ qh->qtd_in_process = NULL; ++ dwc_otg_hcd_qh_deactivate(hcd, qh, continue_split); ++} ++ ++/** ++ * Updates the state of an Isochronous URB when the transfer is stopped for ++ * any reason. The fields of the current entry in the frame descriptor array ++ * are set based on the transfer state and the input _halt_status. Completes ++ * the Isochronous URB if all the URB frames have been completed. ++ * ++ * @return DWC_OTG_HC_XFER_COMPLETE if there are more frames remaining to be ++ * transferred in the URB. Otherwise return DWC_OTG_HC_XFER_URB_COMPLETE. ++ */ ++static dwc_otg_halt_status_e ++update_isoc_urb_state(dwc_otg_hcd_t *hcd, ++ dwc_hc_t *hc, ++ dwc_otg_hc_regs_t *hc_regs, ++ dwc_otg_qtd_t *qtd, ++ dwc_otg_halt_status_e halt_status) ++{ ++ struct urb *urb = qtd->urb; ++ dwc_otg_halt_status_e ret_val = halt_status; ++ struct usb_iso_packet_descriptor *frame_desc; ++ ++ frame_desc = &urb->iso_frame_desc[qtd->isoc_frame_index]; ++ switch (halt_status) { ++ case DWC_OTG_HC_XFER_COMPLETE: ++ frame_desc->status = 0; ++ frame_desc->actual_length = ++ get_actual_xfer_length(hc, hc_regs, qtd, ++ halt_status, NULL); ++ ++ /* non DWORD-aligned buffer case handling. */ ++ if (frame_desc->actual_length && ((uint32_t)hc->xfer_buff & 0x3) && ++ hc->qh->dw_align_buf && hc->ep_is_in) { ++ memcpy(urb->transfer_buffer + frame_desc->offset + qtd->isoc_split_offset, ++ hc->qh->dw_align_buf, frame_desc->actual_length); ++ ++ } ++ ++ break; ++ case DWC_OTG_HC_XFER_FRAME_OVERRUN: ++ printk("DWC_OTG_HC_XFER_FRAME_OVERRUN: %d\n", halt_status); ++ urb->error_count++; ++ if (hc->ep_is_in) { ++ frame_desc->status = -ENOSR; ++ } else { ++ frame_desc->status = -ECOMM; ++ } ++ frame_desc->actual_length = 0; ++ break; ++ case DWC_OTG_HC_XFER_BABBLE_ERR: ++ printk("DWC_OTG_HC_XFER_BABBLE_ERR: %d\n", halt_status); ++ urb->error_count++; ++ frame_desc->status = -EOVERFLOW; ++ /* Don't need to update actual_length in this case. */ ++ break; ++ case DWC_OTG_HC_XFER_XACT_ERR: ++ printk("DWC_OTG_HC_XFER_XACT_ERR: %d\n", halt_status); ++ urb->error_count++; ++ frame_desc->status = -EPROTO; ++ frame_desc->actual_length = ++ get_actual_xfer_length(hc, hc_regs, qtd, ++ halt_status, NULL); ++ ++ /* non DWORD-aligned buffer case handling. */ ++ if (frame_desc->actual_length && ((uint32_t)hc->xfer_buff & 0x3) && ++ hc->qh->dw_align_buf && hc->ep_is_in) { ++ memcpy(urb->transfer_buffer + frame_desc->offset + qtd->isoc_split_offset, ++ hc->qh->dw_align_buf, frame_desc->actual_length); ++ ++ } ++ break; ++ default: ++ ++ DWC_ERROR("%s: Unhandled _halt_status (%d)\n", __func__, ++ halt_status); ++ BUG(); ++ break; ++ } ++ ++ if (++qtd->isoc_frame_index == urb->number_of_packets) { ++ /* ++ * urb->status is not used for isoc transfers. ++ * The individual frame_desc statuses are used instead. ++ */ ++ dwc_otg_hcd_complete_urb(hcd, urb, 0); ++ ret_val = DWC_OTG_HC_XFER_URB_COMPLETE; ++ } else { ++ ret_val = DWC_OTG_HC_XFER_COMPLETE; ++ } ++ ++ return ret_val; ++} ++ ++/** ++ * Releases a host channel for use by other transfers. Attempts to select and ++ * queue more transactions since at least one host channel is available. ++ * ++ * @param hcd The HCD state structure. ++ * @param hc The host channel to release. ++ * @param qtd The QTD associated with the host channel. This QTD may be freed ++ * if the transfer is complete or an error has occurred. ++ * @param halt_status Reason the channel is being released. This status ++ * determines the actions taken by this function. ++ */ ++static void release_channel(dwc_otg_hcd_t *hcd, ++ dwc_hc_t *hc, ++ dwc_otg_qtd_t *qtd, ++ dwc_otg_halt_status_e halt_status) ++{ ++ dwc_otg_transaction_type_e tr_type; ++ int free_qtd; ++ ++ DWC_DEBUGPL(DBG_HCDV, " %s: channel %d, halt_status %d\n", ++ __func__, hc->hc_num, halt_status); ++ ++ switch (halt_status) { ++ case DWC_OTG_HC_XFER_URB_COMPLETE: ++ free_qtd = 1; ++ break; ++ case DWC_OTG_HC_XFER_AHB_ERR: ++ case DWC_OTG_HC_XFER_STALL: ++ case DWC_OTG_HC_XFER_BABBLE_ERR: ++ free_qtd = 1; ++ break; ++ case DWC_OTG_HC_XFER_XACT_ERR: ++ if (qtd->error_count >= 3) { ++ DWC_DEBUGPL(DBG_HCDV, " Complete URB with transaction error\n"); ++ free_qtd = 1; ++ qtd->urb->status = -EPROTO; ++ dwc_otg_hcd_complete_urb(hcd, qtd->urb, -EPROTO); ++ } else { ++ free_qtd = 0; ++ } ++ break; ++ case DWC_OTG_HC_XFER_URB_DEQUEUE: ++ /* ++ * The QTD has already been removed and the QH has been ++ * deactivated. Don't want to do anything except release the ++ * host channel and try to queue more transfers. ++ */ ++ goto cleanup; ++ case DWC_OTG_HC_XFER_NO_HALT_STATUS: ++ DWC_ERROR("%s: No halt_status, channel %d\n", __func__, hc->hc_num); ++ free_qtd = 0; ++ break; ++ default: ++ free_qtd = 0; ++ break; ++ } ++ ++ deactivate_qh(hcd, hc->qh, free_qtd); ++ ++ cleanup: ++ /* ++ * Release the host channel for use by other transfers. The cleanup ++ * function clears the channel interrupt enables and conditions, so ++ * there's no need to clear the Channel Halted interrupt separately. ++ */ ++ dwc_otg_hc_cleanup(hcd->core_if, hc); ++ list_add_tail(&hc->hc_list_entry, &hcd->free_hc_list); ++ ++ switch (hc->ep_type) { ++ case DWC_OTG_EP_TYPE_CONTROL: ++ case DWC_OTG_EP_TYPE_BULK: ++ hcd->non_periodic_channels--; ++ break; ++ ++ default: ++ /* ++ * Don't release reservations for periodic channels here. ++ * That's done when a periodic transfer is descheduled (i.e. ++ * when the QH is removed from the periodic schedule). ++ */ ++ break; ++ } ++ ++ /* Try to queue more transfers now that there's a free channel. */ ++ tr_type = dwc_otg_hcd_select_transactions(hcd); ++ if (tr_type != DWC_OTG_TRANSACTION_NONE) { ++ dwc_otg_hcd_queue_transactions(hcd, tr_type); ++ } ++} ++ ++/** ++ * Halts a host channel. If the channel cannot be halted immediately because ++ * the request queue is full, this function ensures that the FIFO empty ++ * interrupt for the appropriate queue is enabled so that the halt request can ++ * be queued when there is space in the request queue. ++ * ++ * This function may also be called in DMA mode. In that case, the channel is ++ * simply released since the core always halts the channel automatically in ++ * DMA mode. ++ */ ++static void halt_channel(dwc_otg_hcd_t *hcd, ++ dwc_hc_t *hc, ++ dwc_otg_qtd_t *qtd, ++ dwc_otg_halt_status_e halt_status) ++{ ++ if (hcd->core_if->dma_enable) { ++ release_channel(hcd, hc, qtd, halt_status); ++ return; ++ } ++ ++ /* Slave mode processing... */ ++ dwc_otg_hc_halt(hcd->core_if, hc, halt_status); ++ ++ if (hc->halt_on_queue) { ++ gintmsk_data_t gintmsk = {.d32 = 0}; ++ dwc_otg_core_global_regs_t *global_regs; ++ global_regs = hcd->core_if->core_global_regs; ++ ++ if (hc->ep_type == DWC_OTG_EP_TYPE_CONTROL || ++ hc->ep_type == DWC_OTG_EP_TYPE_BULK) { ++ /* ++ * Make sure the Non-periodic Tx FIFO empty interrupt ++ * is enabled so that the non-periodic schedule will ++ * be processed. ++ */ ++ gintmsk.b.nptxfempty = 1; ++ dwc_modify_reg32(&global_regs->gintmsk, 0, gintmsk.d32); ++ } else { ++ /* ++ * Move the QH from the periodic queued schedule to ++ * the periodic assigned schedule. This allows the ++ * halt to be queued when the periodic schedule is ++ * processed. ++ */ ++ list_move(&hc->qh->qh_list_entry, ++ &hcd->periodic_sched_assigned); ++ ++ /* ++ * Make sure the Periodic Tx FIFO Empty interrupt is ++ * enabled so that the periodic schedule will be ++ * processed. ++ */ ++ gintmsk.b.ptxfempty = 1; ++ dwc_modify_reg32(&global_regs->gintmsk, 0, gintmsk.d32); ++ } ++ } ++} ++ ++/** ++ * Performs common cleanup for non-periodic transfers after a Transfer ++ * Complete interrupt. This function should be called after any endpoint type ++ * specific handling is finished to release the host channel. ++ */ ++static void complete_non_periodic_xfer(dwc_otg_hcd_t *hcd, ++ dwc_hc_t *hc, ++ dwc_otg_hc_regs_t *hc_regs, ++ dwc_otg_qtd_t *qtd, ++ dwc_otg_halt_status_e halt_status) ++{ ++ hcint_data_t hcint; ++ ++ qtd->error_count = 0; ++ ++ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); ++ if (hcint.b.nyet) { ++ /* ++ * Got a NYET on the last transaction of the transfer. This ++ * means that the endpoint should be in the PING state at the ++ * beginning of the next transfer. ++ */ ++ hc->qh->ping_state = 1; ++ clear_hc_int(hc_regs, nyet); ++ } ++ ++ /* ++ * Always halt and release the host channel to make it available for ++ * more transfers. There may still be more phases for a control ++ * transfer or more data packets for a bulk transfer at this point, ++ * but the host channel is still halted. A channel will be reassigned ++ * to the transfer when the non-periodic schedule is processed after ++ * the channel is released. This allows transactions to be queued ++ * properly via dwc_otg_hcd_queue_transactions, which also enables the ++ * Tx FIFO Empty interrupt if necessary. ++ */ ++ if (hc->ep_is_in) { ++ /* ++ * IN transfers in Slave mode require an explicit disable to ++ * halt the channel. (In DMA mode, this call simply releases ++ * the channel.) ++ */ ++ halt_channel(hcd, hc, qtd, halt_status); ++ } else { ++ /* ++ * The channel is automatically disabled by the core for OUT ++ * transfers in Slave mode. ++ */ ++ release_channel(hcd, hc, qtd, halt_status); ++ } ++} ++ ++/** ++ * Performs common cleanup for periodic transfers after a Transfer Complete ++ * interrupt. This function should be called after any endpoint type specific ++ * handling is finished to release the host channel. ++ */ ++static void complete_periodic_xfer(dwc_otg_hcd_t *hcd, ++ dwc_hc_t *hc, ++ dwc_otg_hc_regs_t *hc_regs, ++ dwc_otg_qtd_t *qtd, ++ dwc_otg_halt_status_e halt_status) ++{ ++ hctsiz_data_t hctsiz; ++ qtd->error_count = 0; ++ ++ hctsiz.d32 = dwc_read_reg32(&hc_regs->hctsiz); ++ if (!hc->ep_is_in || hctsiz.b.pktcnt == 0) { ++ /* Core halts channel in these cases. */ ++ release_channel(hcd, hc, qtd, halt_status); ++ } else { ++ /* Flush any outstanding requests from the Tx queue. */ ++ halt_channel(hcd, hc, qtd, halt_status); ++ } ++} ++ ++/** ++ * Handles a host channel Transfer Complete interrupt. This handler may be ++ * called in either DMA mode or Slave mode. ++ */ ++static int32_t handle_hc_xfercomp_intr(dwc_otg_hcd_t *hcd, ++ dwc_hc_t *hc, ++ dwc_otg_hc_regs_t *hc_regs, ++ dwc_otg_qtd_t *qtd) ++{ ++ int urb_xfer_done; ++ dwc_otg_halt_status_e halt_status = DWC_OTG_HC_XFER_COMPLETE; ++ struct urb *urb = qtd->urb; ++ int pipe_type = usb_pipetype(urb->pipe); ++ ++ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: " ++ "Transfer Complete--\n", hc->hc_num); ++ ++ /* ++ * Handle xfer complete on CSPLIT. ++ */ ++ if (hc->qh->do_split) { ++ qtd->complete_split = 0; ++ } ++ ++ /* Update the QTD and URB states. */ ++ switch (pipe_type) { ++ case PIPE_CONTROL: ++ switch (qtd->control_phase) { ++ case DWC_OTG_CONTROL_SETUP: ++ if (urb->transfer_buffer_length > 0) { ++ qtd->control_phase = DWC_OTG_CONTROL_DATA; ++ } else { ++ qtd->control_phase = DWC_OTG_CONTROL_STATUS; ++ } ++ DWC_DEBUGPL(DBG_HCDV, " Control setup transaction done\n"); ++ halt_status = DWC_OTG_HC_XFER_COMPLETE; ++ break; ++ case DWC_OTG_CONTROL_DATA: { ++ urb_xfer_done = update_urb_state_xfer_comp(hc, hc_regs, urb, qtd); ++ if (urb_xfer_done) { ++ qtd->control_phase = DWC_OTG_CONTROL_STATUS; ++ DWC_DEBUGPL(DBG_HCDV, " Control data transfer done\n"); ++ } else { ++ save_data_toggle(hc, hc_regs, qtd); ++ } ++ halt_status = DWC_OTG_HC_XFER_COMPLETE; ++ break; ++ } ++ case DWC_OTG_CONTROL_STATUS: ++ DWC_DEBUGPL(DBG_HCDV, " Control transfer complete\n"); ++ if (urb->status == -EINPROGRESS) { ++ urb->status = 0; ++ } ++ dwc_otg_hcd_complete_urb(hcd, urb, urb->status); ++ halt_status = DWC_OTG_HC_XFER_URB_COMPLETE; ++ break; ++ } ++ ++ complete_non_periodic_xfer(hcd, hc, hc_regs, qtd, halt_status); ++ break; ++ case PIPE_BULK: ++ DWC_DEBUGPL(DBG_HCDV, " Bulk transfer complete\n"); ++ urb_xfer_done = update_urb_state_xfer_comp(hc, hc_regs, urb, qtd); ++ if (urb_xfer_done) { ++ dwc_otg_hcd_complete_urb(hcd, urb, urb->status); ++ halt_status = DWC_OTG_HC_XFER_URB_COMPLETE; ++ } else { ++ halt_status = DWC_OTG_HC_XFER_COMPLETE; ++ } ++ ++ save_data_toggle(hc, hc_regs, qtd); ++ complete_non_periodic_xfer(hcd, hc, hc_regs, qtd, halt_status); ++ break; ++ case PIPE_INTERRUPT: ++ DWC_DEBUGPL(DBG_HCDV, " Interrupt transfer complete\n"); ++ update_urb_state_xfer_comp(hc, hc_regs, urb, qtd); ++ ++ /* ++ * Interrupt URB is done on the first transfer complete ++ * interrupt. ++ */ ++ dwc_otg_hcd_complete_urb(hcd, urb, urb->status); ++ save_data_toggle(hc, hc_regs, qtd); ++ complete_periodic_xfer(hcd, hc, hc_regs, qtd, ++ DWC_OTG_HC_XFER_URB_COMPLETE); ++ break; ++ case PIPE_ISOCHRONOUS: ++ DWC_DEBUGPL(DBG_HCDV, " Isochronous transfer complete\n"); ++ if (qtd->isoc_split_pos == DWC_HCSPLIT_XACTPOS_ALL) { ++ halt_status = update_isoc_urb_state(hcd, hc, hc_regs, qtd, ++ DWC_OTG_HC_XFER_COMPLETE); ++ } ++ complete_periodic_xfer(hcd, hc, hc_regs, qtd, halt_status); ++ break; ++ } ++ ++ disable_hc_int(hc_regs, xfercompl); ++ ++ return 1; ++} ++ ++/** ++ * Handles a host channel STALL interrupt. This handler may be called in ++ * either DMA mode or Slave mode. ++ */ ++static int32_t handle_hc_stall_intr(dwc_otg_hcd_t *hcd, ++ dwc_hc_t *hc, ++ dwc_otg_hc_regs_t *hc_regs, ++ dwc_otg_qtd_t *qtd) ++{ ++ struct urb *urb = qtd->urb; ++ int pipe_type = usb_pipetype(urb->pipe); ++ ++ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: " ++ "STALL Received--\n", hc->hc_num); ++ ++ if (pipe_type == PIPE_CONTROL) { ++ dwc_otg_hcd_complete_urb(hcd, urb, -EPIPE); ++ } ++ ++ if (pipe_type == PIPE_BULK || pipe_type == PIPE_INTERRUPT) { ++ dwc_otg_hcd_complete_urb(hcd, urb, -EPIPE); ++ /* ++ * USB protocol requires resetting the data toggle for bulk ++ * and interrupt endpoints when a CLEAR_FEATURE(ENDPOINT_HALT) ++ * setup command is issued to the endpoint. Anticipate the ++ * CLEAR_FEATURE command since a STALL has occurred and reset ++ * the data toggle now. ++ */ ++ hc->qh->data_toggle = 0; ++ } ++ ++ halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_STALL); ++ ++ disable_hc_int(hc_regs, stall); ++ ++ return 1; ++} ++ ++/* ++ * Updates the state of the URB when a transfer has been stopped due to an ++ * abnormal condition before the transfer completes. Modifies the ++ * actual_length field of the URB to reflect the number of bytes that have ++ * actually been transferred via the host channel. ++ */ ++static void update_urb_state_xfer_intr(dwc_hc_t *hc, ++ dwc_otg_hc_regs_t *hc_regs, ++ struct urb *urb, ++ dwc_otg_qtd_t *qtd, ++ dwc_otg_halt_status_e halt_status) ++{ ++ uint32_t bytes_transferred = get_actual_xfer_length(hc, hc_regs, qtd, ++ halt_status, NULL); ++ urb->actual_length += bytes_transferred; ++ ++#ifdef DEBUG ++ { ++ hctsiz_data_t hctsiz; ++ hctsiz.d32 = dwc_read_reg32(&hc_regs->hctsiz); ++ DWC_DEBUGPL(DBG_HCDV, "DWC_otg: %s: %s, channel %d\n", ++ __func__, (hc->ep_is_in ? "IN" : "OUT"), hc->hc_num); ++ DWC_DEBUGPL(DBG_HCDV, " hc->start_pkt_count %d\n", hc->start_pkt_count); ++ DWC_DEBUGPL(DBG_HCDV, " hctsiz.pktcnt %d\n", hctsiz.b.pktcnt); ++ DWC_DEBUGPL(DBG_HCDV, " hc->max_packet %d\n", hc->max_packet); ++ DWC_DEBUGPL(DBG_HCDV, " bytes_transferred %d\n", bytes_transferred); ++ DWC_DEBUGPL(DBG_HCDV, " urb->actual_length %d\n", urb->actual_length); ++ DWC_DEBUGPL(DBG_HCDV, " urb->transfer_buffer_length %d\n", ++ urb->transfer_buffer_length); ++ } ++#endif ++} ++ ++/** ++ * Handles a host channel NAK interrupt. This handler may be called in either ++ * DMA mode or Slave mode. ++ */ ++static int32_t handle_hc_nak_intr(dwc_otg_hcd_t *hcd, ++ dwc_hc_t *hc, ++ dwc_otg_hc_regs_t *hc_regs, ++ dwc_otg_qtd_t *qtd) ++{ ++ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: " ++ "NAK Received--\n", hc->hc_num); ++ ++ /* ++ * Handle NAK for IN/OUT SSPLIT/CSPLIT transfers, bulk, control, and ++ * interrupt. Re-start the SSPLIT transfer. ++ */ ++ if (hc->do_split) { ++ if (hc->complete_split) { ++ qtd->error_count = 0; ++ } ++ qtd->complete_split = 0; ++ halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_NAK); ++ goto handle_nak_done; ++ } ++ ++ switch (usb_pipetype(qtd->urb->pipe)) { ++ case PIPE_CONTROL: ++ case PIPE_BULK: ++ if (hcd->core_if->dma_enable && hc->ep_is_in) { ++ /* ++ * NAK interrupts are enabled on bulk/control IN ++ * transfers in DMA mode for the sole purpose of ++ * resetting the error count after a transaction error ++ * occurs. The core will continue transferring data. ++ */ ++ qtd->error_count = 0; ++ goto handle_nak_done; ++ } ++ ++ /* ++ * NAK interrupts normally occur during OUT transfers in DMA ++ * or Slave mode. For IN transfers, more requests will be ++ * queued as request queue space is available. ++ */ ++ qtd->error_count = 0; ++ ++ if (!hc->qh->ping_state) { ++ update_urb_state_xfer_intr(hc, hc_regs, qtd->urb, ++ qtd, DWC_OTG_HC_XFER_NAK); ++ save_data_toggle(hc, hc_regs, qtd); ++ if (qtd->urb->dev->speed == USB_SPEED_HIGH) { ++ hc->qh->ping_state = 1; ++ } ++ } ++ ++ /* ++ * Halt the channel so the transfer can be re-started from ++ * the appropriate point or the PING protocol will ++ * start/continue. ++ */ ++ halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_NAK); ++ break; ++ case PIPE_INTERRUPT: ++ qtd->error_count = 0; ++ halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_NAK); ++ break; ++ case PIPE_ISOCHRONOUS: ++ /* Should never get called for isochronous transfers. */ ++ BUG(); ++ break; ++ } ++ ++ handle_nak_done: ++ disable_hc_int(hc_regs, nak); ++ ++ return 1; ++} ++ ++/** ++ * Handles a host channel ACK interrupt. This interrupt is enabled when ++ * performing the PING protocol in Slave mode, when errors occur during ++ * either Slave mode or DMA mode, and during Start Split transactions. ++ */ ++static int32_t handle_hc_ack_intr(dwc_otg_hcd_t *hcd, ++ dwc_hc_t *hc, ++ dwc_otg_hc_regs_t *hc_regs, ++ dwc_otg_qtd_t *qtd) ++{ ++ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: " ++ "ACK Received--\n", hc->hc_num); ++ ++ if (hc->do_split) { ++ /* ++ * Handle ACK on SSPLIT. ++ * ACK should not occur in CSPLIT. ++ */ ++ if (!hc->ep_is_in && hc->data_pid_start != DWC_OTG_HC_PID_SETUP) { ++ qtd->ssplit_out_xfer_count = hc->xfer_len; ++ } ++ if (!(hc->ep_type == DWC_OTG_EP_TYPE_ISOC && !hc->ep_is_in)) { ++ /* Don't need complete for isochronous out transfers. */ ++ qtd->complete_split = 1; ++ } ++ ++ /* ISOC OUT */ ++ if (hc->ep_type == DWC_OTG_EP_TYPE_ISOC && !hc->ep_is_in) { ++ switch (hc->xact_pos) { ++ case DWC_HCSPLIT_XACTPOS_ALL: ++ break; ++ case DWC_HCSPLIT_XACTPOS_END: ++ qtd->isoc_split_pos = DWC_HCSPLIT_XACTPOS_ALL; ++ qtd->isoc_split_offset = 0; ++ break; ++ case DWC_HCSPLIT_XACTPOS_BEGIN: ++ case DWC_HCSPLIT_XACTPOS_MID: ++ /* ++ * For BEGIN or MID, calculate the length for ++ * the next microframe to determine the correct ++ * SSPLIT token, either MID or END. ++ */ ++ { ++ struct usb_iso_packet_descriptor *frame_desc; ++ ++ frame_desc = &qtd->urb->iso_frame_desc[qtd->isoc_frame_index]; ++ qtd->isoc_split_offset += 188; ++ ++ if ((frame_desc->length - qtd->isoc_split_offset) <= 188) { ++ qtd->isoc_split_pos = DWC_HCSPLIT_XACTPOS_END; ++ } else { ++ qtd->isoc_split_pos = DWC_HCSPLIT_XACTPOS_MID; ++ } ++ ++ } ++ break; ++ } ++ } else { ++ halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_ACK); ++ } ++ } else { ++ qtd->error_count = 0; ++ ++ if (hc->qh->ping_state) { ++ hc->qh->ping_state = 0; ++ /* ++ * Halt the channel so the transfer can be re-started ++ * from the appropriate point. This only happens in ++ * Slave mode. In DMA mode, the ping_state is cleared ++ * when the transfer is started because the core ++ * automatically executes the PING, then the transfer. ++ */ ++ halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_ACK); ++ } ++ } ++ ++ /* ++ * If the ACK occurred when _not_ in the PING state, let the channel ++ * continue transferring data after clearing the error count. ++ */ ++ ++ disable_hc_int(hc_regs, ack); ++ ++ return 1; ++} ++ ++/** ++ * Handles a host channel NYET interrupt. This interrupt should only occur on ++ * Bulk and Control OUT endpoints and for complete split transactions. If a ++ * NYET occurs at the same time as a Transfer Complete interrupt, it is ++ * handled in the xfercomp interrupt handler, not here. This handler may be ++ * called in either DMA mode or Slave mode. ++ */ ++static int32_t handle_hc_nyet_intr(dwc_otg_hcd_t *hcd, ++ dwc_hc_t *hc, ++ dwc_otg_hc_regs_t *hc_regs, ++ dwc_otg_qtd_t *qtd) ++{ ++ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: " ++ "NYET Received--\n", hc->hc_num); ++ ++ /* ++ * NYET on CSPLIT ++ * re-do the CSPLIT immediately on non-periodic ++ */ ++ if (hc->do_split && hc->complete_split) { ++ if (hc->ep_type == DWC_OTG_EP_TYPE_INTR || ++ hc->ep_type == DWC_OTG_EP_TYPE_ISOC) { ++ int frnum = dwc_otg_hcd_get_frame_number(dwc_otg_hcd_to_hcd(hcd)); ++ ++ if (dwc_full_frame_num(frnum) != ++ dwc_full_frame_num(hc->qh->sched_frame)) { ++ /* ++ * No longer in the same full speed frame. ++ * Treat this as a transaction error. ++ */ ++#if 0 ++ /** @todo Fix system performance so this can ++ * be treated as an error. Right now complete ++ * splits cannot be scheduled precisely enough ++ * due to other system activity, so this error ++ * occurs regularly in Slave mode. ++ */ ++ qtd->error_count++; ++#endif ++ qtd->complete_split = 0; ++ halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_XACT_ERR); ++ /** @todo add support for isoc release */ ++ goto handle_nyet_done; ++ } ++ } ++ ++ halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_NYET); ++ goto handle_nyet_done; ++ } ++ ++ hc->qh->ping_state = 1; ++ qtd->error_count = 0; ++ ++ update_urb_state_xfer_intr(hc, hc_regs, qtd->urb, qtd, ++ DWC_OTG_HC_XFER_NYET); ++ save_data_toggle(hc, hc_regs, qtd); ++ ++ /* ++ * Halt the channel and re-start the transfer so the PING ++ * protocol will start. ++ */ ++ halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_NYET); ++ ++handle_nyet_done: ++ disable_hc_int(hc_regs, nyet); ++ return 1; ++} ++ ++/** ++ * Handles a host channel babble interrupt. This handler may be called in ++ * either DMA mode or Slave mode. ++ */ ++static int32_t handle_hc_babble_intr(dwc_otg_hcd_t *hcd, ++ dwc_hc_t *hc, ++ dwc_otg_hc_regs_t *hc_regs, ++ dwc_otg_qtd_t *qtd) ++{ ++ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: " ++ "Babble Error--\n", hc->hc_num); ++ if (hc->ep_type != DWC_OTG_EP_TYPE_ISOC) { ++ dwc_otg_hcd_complete_urb(hcd, qtd->urb, -EOVERFLOW); ++ halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_BABBLE_ERR); ++ } else { ++ dwc_otg_halt_status_e halt_status; ++ halt_status = update_isoc_urb_state(hcd, hc, hc_regs, qtd, ++ DWC_OTG_HC_XFER_BABBLE_ERR); ++ halt_channel(hcd, hc, qtd, halt_status); ++ } ++ disable_hc_int(hc_regs, bblerr); ++ return 1; ++} ++ ++/** ++ * Handles a host channel AHB error interrupt. This handler is only called in ++ * DMA mode. ++ */ ++static int32_t handle_hc_ahberr_intr(dwc_otg_hcd_t *hcd, ++ dwc_hc_t *hc, ++ dwc_otg_hc_regs_t *hc_regs, ++ dwc_otg_qtd_t *qtd) ++{ ++ hcchar_data_t hcchar; ++ hcsplt_data_t hcsplt; ++ hctsiz_data_t hctsiz; ++ uint32_t hcdma; ++ struct urb *urb = qtd->urb; ++ ++ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: " ++ "AHB Error--\n", hc->hc_num); ++ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ hcsplt.d32 = dwc_read_reg32(&hc_regs->hcsplt); ++ hctsiz.d32 = dwc_read_reg32(&hc_regs->hctsiz); ++ hcdma = dwc_read_reg32(&hc_regs->hcdma); ++ ++ DWC_ERROR("AHB ERROR, Channel %d\n", hc->hc_num); ++ DWC_ERROR(" hcchar 0x%08x, hcsplt 0x%08x\n", hcchar.d32, hcsplt.d32); ++ DWC_ERROR(" hctsiz 0x%08x, hcdma 0x%08x\n", hctsiz.d32, hcdma); ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD URB Enqueue\n"); ++ DWC_ERROR(" Device address: %d\n", usb_pipedevice(urb->pipe)); ++ DWC_ERROR(" Endpoint: %d, %s\n", usb_pipeendpoint(urb->pipe), ++ (usb_pipein(urb->pipe) ? "IN" : "OUT")); ++ DWC_ERROR(" Endpoint type: %s\n", ++ ({char *pipetype; ++ switch (usb_pipetype(urb->pipe)) { ++ case PIPE_CONTROL: pipetype = "CONTROL"; break; ++ case PIPE_BULK: pipetype = "BULK"; break; ++ case PIPE_INTERRUPT: pipetype = "INTERRUPT"; break; ++ case PIPE_ISOCHRONOUS: pipetype = "ISOCHRONOUS"; break; ++ default: pipetype = "UNKNOWN"; break; ++ }; pipetype;})); ++ DWC_ERROR(" Speed: %s\n", ++ ({char *speed; ++ switch (urb->dev->speed) { ++ case USB_SPEED_HIGH: speed = "HIGH"; break; ++ case USB_SPEED_FULL: speed = "FULL"; break; ++ case USB_SPEED_LOW: speed = "LOW"; break; ++ default: speed = "UNKNOWN"; break; ++ }; speed;})); ++ DWC_ERROR(" Max packet size: %d\n", ++ usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe))); ++ DWC_ERROR(" Data buffer length: %d\n", urb->transfer_buffer_length); ++ DWC_ERROR(" Transfer buffer: %p, Transfer DMA: %p\n", ++ urb->transfer_buffer, (void *)urb->transfer_dma); ++ DWC_ERROR(" Setup buffer: %p, Setup DMA: %p\n", ++ urb->setup_packet, (void *)urb->setup_dma); ++ DWC_ERROR(" Interval: %d\n", urb->interval); ++ ++ dwc_otg_hcd_complete_urb(hcd, urb, -EIO); ++ ++ /* ++ * Force a channel halt. Don't call halt_channel because that won't ++ * write to the HCCHARn register in DMA mode to force the halt. ++ */ ++ dwc_otg_hc_halt(hcd->core_if, hc, DWC_OTG_HC_XFER_AHB_ERR); ++ ++ disable_hc_int(hc_regs, ahberr); ++ return 1; ++} ++ ++/** ++ * Handles a host channel transaction error interrupt. This handler may be ++ * called in either DMA mode or Slave mode. ++ */ ++static int32_t handle_hc_xacterr_intr(dwc_otg_hcd_t *hcd, ++ dwc_hc_t *hc, ++ dwc_otg_hc_regs_t *hc_regs, ++ dwc_otg_qtd_t *qtd) ++{ ++ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: " ++ "Transaction Error--\n", hc->hc_num); ++ ++ switch (usb_pipetype(qtd->urb->pipe)) { ++ case PIPE_CONTROL: ++ case PIPE_BULK: ++ qtd->error_count++; ++ if (!hc->qh->ping_state) { ++ update_urb_state_xfer_intr(hc, hc_regs, qtd->urb, ++ qtd, DWC_OTG_HC_XFER_XACT_ERR); ++ save_data_toggle(hc, hc_regs, qtd); ++ if (!hc->ep_is_in && qtd->urb->dev->speed == USB_SPEED_HIGH) { ++ hc->qh->ping_state = 1; ++ } ++ } ++ ++ /* ++ * Halt the channel so the transfer can be re-started from ++ * the appropriate point or the PING protocol will start. ++ */ ++ halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_XACT_ERR); ++ break; ++ case PIPE_INTERRUPT: ++ qtd->error_count++; ++ if (hc->do_split && hc->complete_split) { ++ qtd->complete_split = 0; ++ } ++ halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_XACT_ERR); ++ break; ++ case PIPE_ISOCHRONOUS: ++ { ++ dwc_otg_halt_status_e halt_status; ++ halt_status = update_isoc_urb_state(hcd, hc, hc_regs, qtd, ++ DWC_OTG_HC_XFER_XACT_ERR); ++ ++ halt_channel(hcd, hc, qtd, halt_status); ++ } ++ break; ++ } ++ ++ disable_hc_int(hc_regs, xacterr); ++ ++ return 1; ++} ++ ++/** ++ * Handles a host channel frame overrun interrupt. This handler may be called ++ * in either DMA mode or Slave mode. ++ */ ++static int32_t handle_hc_frmovrun_intr(dwc_otg_hcd_t *hcd, ++ dwc_hc_t *hc, ++ dwc_otg_hc_regs_t *hc_regs, ++ dwc_otg_qtd_t *qtd) ++{ ++ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: " ++ "Frame Overrun--\n", hc->hc_num); ++ ++ switch (usb_pipetype(qtd->urb->pipe)) { ++ case PIPE_CONTROL: ++ case PIPE_BULK: ++ break; ++ case PIPE_INTERRUPT: ++ halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_FRAME_OVERRUN); ++ break; ++ case PIPE_ISOCHRONOUS: ++ { ++ dwc_otg_halt_status_e halt_status; ++ halt_status = update_isoc_urb_state(hcd, hc, hc_regs, qtd, ++ DWC_OTG_HC_XFER_FRAME_OVERRUN); ++ ++ halt_channel(hcd, hc, qtd, halt_status); ++ } ++ break; ++ } ++ ++ disable_hc_int(hc_regs, frmovrun); ++ ++ return 1; ++} ++ ++/** ++ * Handles a host channel data toggle error interrupt. This handler may be ++ * called in either DMA mode or Slave mode. ++ */ ++static int32_t handle_hc_datatglerr_intr(dwc_otg_hcd_t *hcd, ++ dwc_hc_t *hc, ++ dwc_otg_hc_regs_t *hc_regs, ++ dwc_otg_qtd_t *qtd) ++{ ++ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: " ++ "Data Toggle Error--\n", hc->hc_num); ++ ++ if (hc->ep_is_in) { ++ qtd->error_count = 0; ++ } else { ++ DWC_ERROR("Data Toggle Error on OUT transfer," ++ "channel %d\n", hc->hc_num); ++ } ++ ++ disable_hc_int(hc_regs, datatglerr); ++ ++ return 1; ++} ++ ++#ifdef DEBUG ++/** ++ * This function is for debug only. It checks that a valid halt status is set ++ * and that HCCHARn.chdis is clear. If there's a problem, corrective action is ++ * taken and a warning is issued. ++ * @return 1 if halt status is ok, 0 otherwise. ++ */ ++static inline int halt_status_ok(dwc_otg_hcd_t *hcd, ++ dwc_hc_t *hc, ++ dwc_otg_hc_regs_t *hc_regs, ++ dwc_otg_qtd_t *qtd) ++{ ++ hcchar_data_t hcchar; ++ hctsiz_data_t hctsiz; ++ hcint_data_t hcint; ++ hcintmsk_data_t hcintmsk; ++ hcsplt_data_t hcsplt; ++ ++ if (hc->halt_status == DWC_OTG_HC_XFER_NO_HALT_STATUS) { ++ /* ++ * This code is here only as a check. This condition should ++ * never happen. Ignore the halt if it does occur. ++ */ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ hctsiz.d32 = dwc_read_reg32(&hc_regs->hctsiz); ++ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); ++ hcintmsk.d32 = dwc_read_reg32(&hc_regs->hcintmsk); ++ hcsplt.d32 = dwc_read_reg32(&hc_regs->hcsplt); ++ DWC_WARN("%s: hc->halt_status == DWC_OTG" ++ "channel %d, hcchar 0x%08x, hctsiz 0x%08x, " ++ "hcint 0x%08x, hcintmsk 0x%08x, " ++ "hcsplt 0x%08x, qtd->complete_split %d\n", ++ __func__, hc->hc_num, hcchar.d32, hctsiz.d32, ++ hcint.d32, hcintmsk.d32, ++ hcsplt.d32, qtd->complete_split); ++ ++ DWC_WARN("%s: no halt status, channel %d, ignoring interrupt\n", ++ __func__, hc->hc_num); ++ DWC_WARN("\n"); ++ clear_hc_int(hc_regs, chhltd); ++ return 0; ++ } ++ ++ /* ++ * This code is here only as a check. hcchar.chdis should ++ * never be set when the halt interrupt occurs. Halt the ++ * channel again if it does occur. ++ */ ++ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); ++ if (hcchar.b.chdis) { ++ DWC_WARN("%s: hcchar.chdis set unexpectedly, " ++ "hcchar 0x%08x, trying to halt again\n", ++ __func__, hcchar.d32); ++ clear_hc_int(hc_regs, chhltd); ++ hc->halt_pending = 0; ++ halt_channel(hcd, hc, qtd, hc->halt_status); ++ return 0; ++ } ++ ++ return 1; ++} ++#endif ++ ++/** ++ * Handles a host Channel Halted interrupt in DMA mode. This handler ++ * determines the reason the channel halted and proceeds accordingly. ++ */ ++static void handle_hc_chhltd_intr_dma(dwc_otg_hcd_t *hcd, ++ dwc_hc_t *hc, ++ dwc_otg_hc_regs_t *hc_regs, ++ dwc_otg_qtd_t *qtd) ++{ ++ hcint_data_t hcint; ++ hcintmsk_data_t hcintmsk; ++ int out_nak_enh = 0; ++ ++ /* For core with OUT NAK enhancement, the flow for high- ++ * speed CONTROL/BULK OUT is handled a little differently. ++ */ ++ if (hcd->core_if->snpsid >= 0x4F54271A) { ++ if (hc->speed == DWC_OTG_EP_SPEED_HIGH && !hc->ep_is_in && ++ (hc->ep_type == DWC_OTG_EP_TYPE_CONTROL || ++ hc->ep_type == DWC_OTG_EP_TYPE_BULK)) { ++ printk(KERN_DEBUG "OUT NAK enhancement enabled\n"); ++ out_nak_enh = 1; ++ } else { ++ printk(KERN_DEBUG "OUT NAK enhancement disabled, not HS Ctrl/Bulk OUT EP\n"); ++ } ++ } else { ++// printk(KERN_DEBUG "OUT NAK enhancement disabled, no core support\n"); ++ } ++ ++ if (hc->halt_status == DWC_OTG_HC_XFER_URB_DEQUEUE || ++ hc->halt_status == DWC_OTG_HC_XFER_AHB_ERR) { ++ /* ++ * Just release the channel. A dequeue can happen on a ++ * transfer timeout. In the case of an AHB Error, the channel ++ * was forced to halt because there's no way to gracefully ++ * recover. ++ */ ++ release_channel(hcd, hc, qtd, hc->halt_status); ++ return; ++ } ++ ++ /* Read the HCINTn register to determine the cause for the halt. */ ++ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); ++ hcintmsk.d32 = dwc_read_reg32(&hc_regs->hcintmsk); ++ ++ if (hcint.b.xfercomp) { ++ /** @todo This is here because of a possible hardware bug. Spec ++ * says that on SPLIT-ISOC OUT transfers in DMA mode that a HALT ++ * interrupt w/ACK bit set should occur, but I only see the ++ * XFERCOMP bit, even with it masked out. This is a workaround ++ * for that behavior. Should fix this when hardware is fixed. ++ */ ++ if (hc->ep_type == DWC_OTG_EP_TYPE_ISOC && !hc->ep_is_in) { ++ handle_hc_ack_intr(hcd, hc, hc_regs, qtd); ++ } ++ handle_hc_xfercomp_intr(hcd, hc, hc_regs, qtd); ++ } else if (hcint.b.stall) { ++ handle_hc_stall_intr(hcd, hc, hc_regs, qtd); ++ } else if (hcint.b.xacterr) { ++ if (out_nak_enh) { ++ if (hcint.b.nyet || hcint.b.nak || hcint.b.ack) { ++ printk(KERN_DEBUG "XactErr with NYET/NAK/ACK\n"); ++ qtd->error_count = 0; ++ } else { ++ printk(KERN_DEBUG "XactErr without NYET/NAK/ACK\n"); ++ } ++ } ++ ++ /* ++ * Must handle xacterr before nak or ack. Could get a xacterr ++ * at the same time as either of these on a BULK/CONTROL OUT ++ * that started with a PING. The xacterr takes precedence. ++ */ ++ handle_hc_xacterr_intr(hcd, hc, hc_regs, qtd); ++ } else if (!out_nak_enh) { ++ if (hcint.b.nyet) { ++ /* ++ * Must handle nyet before nak or ack. Could get a nyet at the ++ * same time as either of those on a BULK/CONTROL OUT that ++ * started with a PING. The nyet takes precedence. ++ */ ++ handle_hc_nyet_intr(hcd, hc, hc_regs, qtd); ++ } else if (hcint.b.bblerr) { ++ handle_hc_babble_intr(hcd, hc, hc_regs, qtd); ++ } else if (hcint.b.frmovrun) { ++ handle_hc_frmovrun_intr(hcd, hc, hc_regs, qtd); ++ } else if (hcint.b.nak && !hcintmsk.b.nak) { ++ /* ++ * If nak is not masked, it's because a non-split IN transfer ++ * is in an error state. In that case, the nak is handled by ++ * the nak interrupt handler, not here. Handle nak here for ++ * BULK/CONTROL OUT transfers, which halt on a NAK to allow ++ * rewinding the buffer pointer. ++ */ ++ handle_hc_nak_intr(hcd, hc, hc_regs, qtd); ++ } else if (hcint.b.ack && !hcintmsk.b.ack) { ++ /* ++ * If ack is not masked, it's because a non-split IN transfer ++ * is in an error state. In that case, the ack is handled by ++ * the ack interrupt handler, not here. Handle ack here for ++ * split transfers. Start splits halt on ACK. ++ */ ++ handle_hc_ack_intr(hcd, hc, hc_regs, qtd); ++ } else { ++ if (hc->ep_type == DWC_OTG_EP_TYPE_INTR || ++ hc->ep_type == DWC_OTG_EP_TYPE_ISOC) { ++ /* ++ * A periodic transfer halted with no other channel ++ * interrupts set. Assume it was halted by the core ++ * because it could not be completed in its scheduled ++ * (micro)frame. ++ */ ++#ifdef DEBUG ++ DWC_PRINT("%s: Halt channel %d (assume incomplete periodic transfer)\n", ++ __func__, hc->hc_num); ++#endif ++ halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_PERIODIC_INCOMPLETE); ++ } else { ++ DWC_ERROR("%s: Channel %d, DMA Mode -- ChHltd set, but reason " ++ "for halting is unknown, hcint 0x%08x, intsts 0x%08x\n", ++ __func__, hc->hc_num, hcint.d32, ++ dwc_read_reg32(&hcd->core_if->core_global_regs->gintsts)); ++ } ++ } ++ } else { ++ printk(KERN_DEBUG "NYET/NAK/ACK/other in non-error case, 0x%08x\n", hcint.d32); ++ } ++} ++ ++/** ++ * Handles a host channel Channel Halted interrupt. ++ * ++ * In slave mode, this handler is called only when the driver specifically ++ * requests a halt. This occurs during handling other host channel interrupts ++ * (e.g. nak, xacterr, stall, nyet, etc.). ++ * ++ * In DMA mode, this is the interrupt that occurs when the core has finished ++ * processing a transfer on a channel. Other host channel interrupts (except ++ * ahberr) are disabled in DMA mode. ++ */ ++static int32_t handle_hc_chhltd_intr(dwc_otg_hcd_t *hcd, ++ dwc_hc_t *hc, ++ dwc_otg_hc_regs_t *hc_regs, ++ dwc_otg_qtd_t *qtd) ++{ ++ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: " ++ "Channel Halted--\n", hc->hc_num); ++ ++ if (hcd->core_if->dma_enable) { ++ handle_hc_chhltd_intr_dma(hcd, hc, hc_regs, qtd); ++ } else { ++#ifdef DEBUG ++ if (!halt_status_ok(hcd, hc, hc_regs, qtd)) { ++ return 1; ++ } ++#endif ++ release_channel(hcd, hc, qtd, hc->halt_status); ++ } ++ ++ return 1; ++} ++ ++/** Handles interrupt for a specific Host Channel */ ++int32_t dwc_otg_hcd_handle_hc_n_intr(dwc_otg_hcd_t *dwc_otg_hcd, uint32_t num) ++{ ++ int retval = 0; ++ hcint_data_t hcint; ++ hcintmsk_data_t hcintmsk; ++ dwc_hc_t *hc; ++ dwc_otg_hc_regs_t *hc_regs; ++ dwc_otg_qtd_t *qtd; ++ ++ DWC_DEBUGPL(DBG_HCDV, "--Host Channel Interrupt--, Channel %d\n", num); ++ ++ hc = dwc_otg_hcd->hc_ptr_array[num]; ++ hc_regs = dwc_otg_hcd->core_if->host_if->hc_regs[num]; ++ qtd = list_entry(hc->qh->qtd_list.next, dwc_otg_qtd_t, qtd_list_entry); ++ ++ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); ++ hcintmsk.d32 = dwc_read_reg32(&hc_regs->hcintmsk); ++ DWC_DEBUGPL(DBG_HCDV, " hcint 0x%08x, hcintmsk 0x%08x, hcint&hcintmsk 0x%08x\n", ++ hcint.d32, hcintmsk.d32, (hcint.d32 & hcintmsk.d32)); ++ hcint.d32 = hcint.d32 & hcintmsk.d32; ++ ++ if (!dwc_otg_hcd->core_if->dma_enable) { ++ if (hcint.b.chhltd && hcint.d32 != 0x2) { ++ hcint.b.chhltd = 0; ++ } ++ } ++ ++ if (hcint.b.xfercomp) { ++ retval |= handle_hc_xfercomp_intr(dwc_otg_hcd, hc, hc_regs, qtd); ++ /* ++ * If NYET occurred at same time as Xfer Complete, the NYET is ++ * handled by the Xfer Complete interrupt handler. Don't want ++ * to call the NYET interrupt handler in this case. ++ */ ++ hcint.b.nyet = 0; ++ } ++ if (hcint.b.chhltd) { ++ retval |= handle_hc_chhltd_intr(dwc_otg_hcd, hc, hc_regs, qtd); ++ } ++ if (hcint.b.ahberr) { ++ retval |= handle_hc_ahberr_intr(dwc_otg_hcd, hc, hc_regs, qtd); ++ } ++ if (hcint.b.stall) { ++ retval |= handle_hc_stall_intr(dwc_otg_hcd, hc, hc_regs, qtd); ++ } ++ if (hcint.b.nak) { ++ retval |= handle_hc_nak_intr(dwc_otg_hcd, hc, hc_regs, qtd); ++ } ++ if (hcint.b.ack) { ++ retval |= handle_hc_ack_intr(dwc_otg_hcd, hc, hc_regs, qtd); ++ } ++ if (hcint.b.nyet) { ++ retval |= handle_hc_nyet_intr(dwc_otg_hcd, hc, hc_regs, qtd); ++ } ++ if (hcint.b.xacterr) { ++ retval |= handle_hc_xacterr_intr(dwc_otg_hcd, hc, hc_regs, qtd); ++ } ++ if (hcint.b.bblerr) { ++ retval |= handle_hc_babble_intr(dwc_otg_hcd, hc, hc_regs, qtd); ++ } ++ if (hcint.b.frmovrun) { ++ retval |= handle_hc_frmovrun_intr(dwc_otg_hcd, hc, hc_regs, qtd); ++ } ++ if (hcint.b.datatglerr) { ++ retval |= handle_hc_datatglerr_intr(dwc_otg_hcd, hc, hc_regs, qtd); ++ } ++ ++ return retval; ++} ++ ++#endif /* DWC_DEVICE_ONLY */ +--- /dev/null ++++ b/drivers/usb/dwc_otg/dwc_otg_hcd_queue.c +@@ -0,0 +1,684 @@ ++/* ========================================================================== ++ * $File: //dwh/usb_iip/dev/software/otg_ipmate/linux/drivers/dwc_otg_hcd_queue.c $ ++ * $Revision: 1.5 $ ++ * $Date: 2008-12-15 06:51:32 $ ++ * $Change: 537387 $ ++ * ++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, ++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless ++ * otherwise expressly agreed to in writing between Synopsys and you. ++ * ++ * The Software IS NOT an item of Licensed Software or Licensed Product under ++ * any End User Software License Agreement or Agreement for Licensed Product ++ * with Synopsys or any supplement thereto. You are permitted to use and ++ * redistribute this Software in source and binary forms, with or without ++ * modification, provided that redistributions of source code must retain this ++ * notice. You may not view, use, disclose, copy or distribute this file or ++ * any information contained herein except pursuant to this license grant from ++ * Synopsys. If you do not agree with this notice, including the disclaimer ++ * below, then you are not authorized to use the Software. ++ * ++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, ++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR ++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER ++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT ++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY ++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH ++ * DAMAGE. ++ * ========================================================================== */ ++#ifndef DWC_DEVICE_ONLY ++ ++/** ++ * @file ++ * ++ * This file contains the functions to manage Queue Heads and Queue ++ * Transfer Descriptors. ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "dwc_otg_driver.h" ++#include "dwc_otg_hcd.h" ++#include "dwc_otg_regs.h" ++ ++/** ++ * This function allocates and initializes a QH. ++ * ++ * @param hcd The HCD state structure for the DWC OTG controller. ++ * @param[in] urb Holds the information about the device/endpoint that we need ++ * to initialize the QH. ++ * ++ * @return Returns pointer to the newly allocated QH, or NULL on error. */ ++dwc_otg_qh_t *dwc_otg_hcd_qh_create (dwc_otg_hcd_t *hcd, struct urb *urb) ++{ ++ dwc_otg_qh_t *qh; ++ ++ /* Allocate memory */ ++ /** @todo add memflags argument */ ++ qh = dwc_otg_hcd_qh_alloc (); ++ if (qh == NULL) { ++ return NULL; ++ } ++ ++ dwc_otg_hcd_qh_init (hcd, qh, urb); ++ return qh; ++} ++ ++/** Free each QTD in the QH's QTD-list then free the QH. QH should already be ++ * removed from a list. QTD list should already be empty if called from URB ++ * Dequeue. ++ * ++ * @param[in] hcd HCD instance. ++ * @param[in] qh The QH to free. ++ */ ++void dwc_otg_hcd_qh_free (dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh) ++{ ++ dwc_otg_qtd_t *qtd; ++ struct list_head *pos; ++ unsigned long flags; ++ ++ /* Free each QTD in the QTD list */ ++ SPIN_LOCK_IRQSAVE(&hcd->lock, flags) ++ for (pos = qh->qtd_list.next; ++ pos != &qh->qtd_list; ++ pos = qh->qtd_list.next) ++ { ++ list_del (pos); ++ qtd = dwc_list_to_qtd (pos); ++ dwc_otg_hcd_qtd_free (qtd); ++ } ++ SPIN_UNLOCK_IRQRESTORE(&hcd->lock, flags) ++ ++ if (qh->dw_align_buf) { ++ dma_free_coherent((dwc_otg_hcd_to_hcd(hcd))->self.controller, ++ hcd->core_if->core_params->max_transfer_size, ++ qh->dw_align_buf, ++ qh->dw_align_buf_dma); ++ } ++ ++ kfree (qh); ++ return; ++} ++ ++/** Initializes a QH structure. ++ * ++ * @param[in] hcd The HCD state structure for the DWC OTG controller. ++ * @param[in] qh The QH to init. ++ * @param[in] urb Holds the information about the device/endpoint that we need ++ * to initialize the QH. */ ++#define SCHEDULE_SLOP 10 ++void dwc_otg_hcd_qh_init(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh, struct urb *urb) ++{ ++ char *speed, *type; ++ memset (qh, 0, sizeof (dwc_otg_qh_t)); ++ ++ /* Initialize QH */ ++ switch (usb_pipetype(urb->pipe)) { ++ case PIPE_CONTROL: ++ qh->ep_type = USB_ENDPOINT_XFER_CONTROL; ++ break; ++ case PIPE_BULK: ++ qh->ep_type = USB_ENDPOINT_XFER_BULK; ++ break; ++ case PIPE_ISOCHRONOUS: ++ qh->ep_type = USB_ENDPOINT_XFER_ISOC; ++ break; ++ case PIPE_INTERRUPT: ++ qh->ep_type = USB_ENDPOINT_XFER_INT; ++ break; ++ } ++ ++ qh->ep_is_in = usb_pipein(urb->pipe) ? 1 : 0; ++ ++ qh->data_toggle = DWC_OTG_HC_PID_DATA0; ++ qh->maxp = usb_maxpacket(urb->dev, urb->pipe, !(usb_pipein(urb->pipe))); ++ INIT_LIST_HEAD(&qh->qtd_list); ++ INIT_LIST_HEAD(&qh->qh_list_entry); ++ qh->channel = NULL; ++ ++ /* FS/LS Enpoint on HS Hub ++ * NOT virtual root hub */ ++ qh->do_split = 0; ++ if (((urb->dev->speed == USB_SPEED_LOW) || ++ (urb->dev->speed == USB_SPEED_FULL)) && ++ (urb->dev->tt) && (urb->dev->tt->hub) && (urb->dev->tt->hub->devnum != 1)) ++ { ++ DWC_DEBUGPL(DBG_HCD, "QH init: EP %d: TT found at hub addr %d, for port %d\n", ++ usb_pipeendpoint(urb->pipe), urb->dev->tt->hub->devnum, ++ urb->dev->ttport); ++ qh->do_split = 1; ++ } ++ ++ if (qh->ep_type == USB_ENDPOINT_XFER_INT || ++ qh->ep_type == USB_ENDPOINT_XFER_ISOC) { ++ /* Compute scheduling parameters once and save them. */ ++ hprt0_data_t hprt; ++ ++ /** @todo Account for split transfers in the bus time. */ ++ int bytecount = dwc_hb_mult(qh->maxp) * dwc_max_packet(qh->maxp); ++ ++ /* FIXME: work-around patch by Steven */ ++ qh->usecs = NS_TO_US(usb_calc_bus_time(urb->dev->speed, ++ usb_pipein(urb->pipe), ++ (qh->ep_type == USB_ENDPOINT_XFER_ISOC), ++ bytecount)); ++ ++ /* Start in a slightly future (micro)frame. */ ++ qh->sched_frame = dwc_frame_num_inc(hcd->frame_number, ++ SCHEDULE_SLOP); ++ qh->interval = urb->interval; ++#if 0 ++ /* Increase interrupt polling rate for debugging. */ ++ if (qh->ep_type == USB_ENDPOINT_XFER_INT) { ++ qh->interval = 8; ++ } ++#endif ++ hprt.d32 = dwc_read_reg32(hcd->core_if->host_if->hprt0); ++ if ((hprt.b.prtspd == DWC_HPRT0_PRTSPD_HIGH_SPEED) && ++ ((urb->dev->speed == USB_SPEED_LOW) || ++ (urb->dev->speed == USB_SPEED_FULL))) { ++ qh->interval *= 8; ++ qh->sched_frame |= 0x7; ++ qh->start_split_frame = qh->sched_frame; ++ } ++ ++ } ++ ++ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD QH Initialized\n"); ++ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - qh = %p\n", qh); ++ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - Device Address = %d\n", ++ urb->dev->devnum); ++ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - Endpoint %d, %s\n", ++ usb_pipeendpoint(urb->pipe), ++ usb_pipein(urb->pipe) == USB_DIR_IN ? "IN" : "OUT"); ++ ++ switch(urb->dev->speed) { ++ case USB_SPEED_LOW: ++ speed = "low"; ++ break; ++ case USB_SPEED_FULL: ++ speed = "full"; ++ break; ++ case USB_SPEED_HIGH: ++ speed = "high"; ++ break; ++ default: ++ speed = "?"; ++ break; ++ } ++ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - Speed = %s\n", speed); ++ ++ switch (qh->ep_type) { ++ case USB_ENDPOINT_XFER_ISOC: ++ type = "isochronous"; ++ break; ++ case USB_ENDPOINT_XFER_INT: ++ type = "interrupt"; ++ break; ++ case USB_ENDPOINT_XFER_CONTROL: ++ type = "control"; ++ break; ++ case USB_ENDPOINT_XFER_BULK: ++ type = "bulk"; ++ break; ++ default: ++ type = "?"; ++ break; ++ } ++ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - Type = %s\n",type); ++ ++#ifdef DEBUG ++ if (qh->ep_type == USB_ENDPOINT_XFER_INT) { ++ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - usecs = %d\n", ++ qh->usecs); ++ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - interval = %d\n", ++ qh->interval); ++ } ++#endif ++ qh->dw_align_buf = NULL; ++ return; ++} ++ ++/** ++ * Checks that a channel is available for a periodic transfer. ++ * ++ * @return 0 if successful, negative error code otherise. ++ */ ++static int periodic_channel_available(dwc_otg_hcd_t *hcd) ++{ ++ /* ++ * Currently assuming that there is a dedicated host channnel for each ++ * periodic transaction plus at least one host channel for ++ * non-periodic transactions. ++ */ ++ int status; ++ int num_channels; ++ ++ num_channels = hcd->core_if->core_params->host_channels; ++ if ((hcd->periodic_channels + hcd->non_periodic_channels < num_channels) && ++ (hcd->periodic_channels < num_channels - 1)) { ++ status = 0; ++ } ++ else { ++ DWC_NOTICE("%s: Total channels: %d, Periodic: %d, Non-periodic: %d\n", ++ __func__, num_channels, hcd->periodic_channels, ++ hcd->non_periodic_channels); ++ status = -ENOSPC; ++ } ++ ++ return status; ++} ++ ++/** ++ * Checks that there is sufficient bandwidth for the specified QH in the ++ * periodic schedule. For simplicity, this calculation assumes that all the ++ * transfers in the periodic schedule may occur in the same (micro)frame. ++ * ++ * @param hcd The HCD state structure for the DWC OTG controller. ++ * @param qh QH containing periodic bandwidth required. ++ * ++ * @return 0 if successful, negative error code otherwise. ++ */ ++static int check_periodic_bandwidth(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh) ++{ ++ int status; ++ uint16_t max_claimed_usecs; ++ ++ status = 0; ++ ++ if (hcd->core_if->core_params->speed == DWC_SPEED_PARAM_HIGH) { ++ /* ++ * High speed mode. ++ * Max periodic usecs is 80% x 125 usec = 100 usec. ++ */ ++ max_claimed_usecs = 100 - qh->usecs; ++ } else { ++ /* ++ * Full speed mode. ++ * Max periodic usecs is 90% x 1000 usec = 900 usec. ++ */ ++ max_claimed_usecs = 900 - qh->usecs; ++ } ++ ++ if (hcd->periodic_usecs > max_claimed_usecs) { ++ DWC_NOTICE("%s: already claimed usecs %d, required usecs %d\n", ++ __func__, hcd->periodic_usecs, qh->usecs); ++ status = -ENOSPC; ++ } ++ ++ return status; ++} ++ ++/** ++ * Checks that the max transfer size allowed in a host channel is large enough ++ * to handle the maximum data transfer in a single (micro)frame for a periodic ++ * transfer. ++ * ++ * @param hcd The HCD state structure for the DWC OTG controller. ++ * @param qh QH for a periodic endpoint. ++ * ++ * @return 0 if successful, negative error code otherwise. ++ */ ++static int check_max_xfer_size(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh) ++{ ++ int status; ++ uint32_t max_xfer_size; ++ uint32_t max_channel_xfer_size; ++ ++ status = 0; ++ ++ max_xfer_size = dwc_max_packet(qh->maxp) * dwc_hb_mult(qh->maxp); ++ max_channel_xfer_size = hcd->core_if->core_params->max_transfer_size; ++ ++ if (max_xfer_size > max_channel_xfer_size) { ++ DWC_NOTICE("%s: Periodic xfer length %d > " ++ "max xfer length for channel %d\n", ++ __func__, max_xfer_size, max_channel_xfer_size); ++ status = -ENOSPC; ++ } ++ ++ return status; ++} ++ ++/** ++ * Schedules an interrupt or isochronous transfer in the periodic schedule. ++ * ++ * @param hcd The HCD state structure for the DWC OTG controller. ++ * @param qh QH for the periodic transfer. The QH should already contain the ++ * scheduling information. ++ * ++ * @return 0 if successful, negative error code otherwise. ++ */ ++static int schedule_periodic(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh) ++{ ++ int status = 0; ++ ++ status = periodic_channel_available(hcd); ++ if (status) { ++ DWC_NOTICE("%s: No host channel available for periodic " ++ "transfer.\n", __func__); ++ return status; ++ } ++ ++ status = check_periodic_bandwidth(hcd, qh); ++ if (status) { ++ DWC_NOTICE("%s: Insufficient periodic bandwidth for " ++ "periodic transfer.\n", __func__); ++ return status; ++ } ++ ++ status = check_max_xfer_size(hcd, qh); ++ if (status) { ++ DWC_NOTICE("%s: Channel max transfer size too small " ++ "for periodic transfer.\n", __func__); ++ return status; ++ } ++ ++ /* Always start in the inactive schedule. */ ++ list_add_tail(&qh->qh_list_entry, &hcd->periodic_sched_inactive); ++ ++ /* Reserve the periodic channel. */ ++ hcd->periodic_channels++; ++ ++ /* Update claimed usecs per (micro)frame. */ ++ hcd->periodic_usecs += qh->usecs; ++ ++ /* Update average periodic bandwidth claimed and # periodic reqs for usbfs. */ ++ hcd_to_bus(dwc_otg_hcd_to_hcd(hcd))->bandwidth_allocated += qh->usecs / qh->interval; ++ if (qh->ep_type == USB_ENDPOINT_XFER_INT) { ++ hcd_to_bus(dwc_otg_hcd_to_hcd(hcd))->bandwidth_int_reqs++; ++ DWC_DEBUGPL(DBG_HCD, "Scheduled intr: qh %p, usecs %d, period %d\n", ++ qh, qh->usecs, qh->interval); ++ } else { ++ hcd_to_bus(dwc_otg_hcd_to_hcd(hcd))->bandwidth_isoc_reqs++; ++ DWC_DEBUGPL(DBG_HCD, "Scheduled isoc: qh %p, usecs %d, period %d\n", ++ qh, qh->usecs, qh->interval); ++ } ++ ++ return status; ++} ++ ++/** ++ * This function adds a QH to either the non periodic or periodic schedule if ++ * it is not already in the schedule. If the QH is already in the schedule, no ++ * action is taken. ++ * ++ * @return 0 if successful, negative error code otherwise. ++ */ ++int dwc_otg_hcd_qh_add (dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh) ++{ ++ unsigned long flags; ++ int status = 0; ++ ++ SPIN_LOCK_IRQSAVE(&hcd->lock, flags) ++ ++ if (!list_empty(&qh->qh_list_entry)) { ++ /* QH already in a schedule. */ ++ goto done; ++ } ++ ++ /* Add the new QH to the appropriate schedule */ ++ if (dwc_qh_is_non_per(qh)) { ++ /* Always start in the inactive schedule. */ ++ list_add_tail(&qh->qh_list_entry, &hcd->non_periodic_sched_inactive); ++ } else { ++ status = schedule_periodic(hcd, qh); ++ } ++ ++ done: ++ SPIN_UNLOCK_IRQRESTORE(&hcd->lock, flags) ++ ++ return status; ++} ++ ++/** ++ * Removes an interrupt or isochronous transfer from the periodic schedule. ++ * ++ * @param hcd The HCD state structure for the DWC OTG controller. ++ * @param qh QH for the periodic transfer. ++ */ ++static void deschedule_periodic(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh) ++{ ++ list_del_init(&qh->qh_list_entry); ++ ++ /* Release the periodic channel reservation. */ ++ hcd->periodic_channels--; ++ ++ /* Update claimed usecs per (micro)frame. */ ++ hcd->periodic_usecs -= qh->usecs; ++ ++ /* Update average periodic bandwidth claimed and # periodic reqs for usbfs. */ ++ hcd_to_bus(dwc_otg_hcd_to_hcd(hcd))->bandwidth_allocated -= qh->usecs / qh->interval; ++ ++ if (qh->ep_type == USB_ENDPOINT_XFER_INT) { ++ hcd_to_bus(dwc_otg_hcd_to_hcd(hcd))->bandwidth_int_reqs--; ++ DWC_DEBUGPL(DBG_HCD, "Descheduled intr: qh %p, usecs %d, period %d\n", ++ qh, qh->usecs, qh->interval); ++ } else { ++ hcd_to_bus(dwc_otg_hcd_to_hcd(hcd))->bandwidth_isoc_reqs--; ++ DWC_DEBUGPL(DBG_HCD, "Descheduled isoc: qh %p, usecs %d, period %d\n", ++ qh, qh->usecs, qh->interval); ++ } ++} ++ ++/** ++ * Removes a QH from either the non-periodic or periodic schedule. Memory is ++ * not freed. ++ * ++ * @param[in] hcd The HCD state structure. ++ * @param[in] qh QH to remove from schedule. */ ++void dwc_otg_hcd_qh_remove (dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh) ++{ ++ unsigned long flags; ++ ++ SPIN_LOCK_IRQSAVE(&hcd->lock, flags); ++ ++ if (list_empty(&qh->qh_list_entry)) { ++ /* QH is not in a schedule. */ ++ goto done; ++ } ++ ++ if (dwc_qh_is_non_per(qh)) { ++ if (hcd->non_periodic_qh_ptr == &qh->qh_list_entry) { ++ hcd->non_periodic_qh_ptr = hcd->non_periodic_qh_ptr->next; ++ } ++ list_del_init(&qh->qh_list_entry); ++ } else { ++ deschedule_periodic(hcd, qh); ++ } ++ ++ done: ++ SPIN_UNLOCK_IRQRESTORE(&hcd->lock, flags) ++} ++ ++/** ++ * Deactivates a QH. For non-periodic QHs, removes the QH from the active ++ * non-periodic schedule. The QH is added to the inactive non-periodic ++ * schedule if any QTDs are still attached to the QH. ++ * ++ * For periodic QHs, the QH is removed from the periodic queued schedule. If ++ * there are any QTDs still attached to the QH, the QH is added to either the ++ * periodic inactive schedule or the periodic ready schedule and its next ++ * scheduled frame is calculated. The QH is placed in the ready schedule if ++ * the scheduled frame has been reached already. Otherwise it's placed in the ++ * inactive schedule. If there are no QTDs attached to the QH, the QH is ++ * completely removed from the periodic schedule. ++ */ ++void dwc_otg_hcd_qh_deactivate(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh, int sched_next_periodic_split) ++{ ++ unsigned long flags; ++ SPIN_LOCK_IRQSAVE(&hcd->lock, flags); ++ ++ if (dwc_qh_is_non_per(qh)) { ++ dwc_otg_hcd_qh_remove(hcd, qh); ++ if (!list_empty(&qh->qtd_list)) { ++ /* Add back to inactive non-periodic schedule. */ ++ dwc_otg_hcd_qh_add(hcd, qh); ++ } ++ } else { ++ uint16_t frame_number = dwc_otg_hcd_get_frame_number(dwc_otg_hcd_to_hcd(hcd)); ++ ++ if (qh->do_split) { ++ /* Schedule the next continuing periodic split transfer */ ++ if (sched_next_periodic_split) { ++ ++ qh->sched_frame = frame_number; ++ if (dwc_frame_num_le(frame_number, ++ dwc_frame_num_inc(qh->start_split_frame, 1))) { ++ /* ++ * Allow one frame to elapse after start ++ * split microframe before scheduling ++ * complete split, but DONT if we are ++ * doing the next start split in the ++ * same frame for an ISOC out. ++ */ ++ if ((qh->ep_type != USB_ENDPOINT_XFER_ISOC) || (qh->ep_is_in != 0)) { ++ qh->sched_frame = dwc_frame_num_inc(qh->sched_frame, 1); ++ } ++ } ++ } else { ++ qh->sched_frame = dwc_frame_num_inc(qh->start_split_frame, ++ qh->interval); ++ if (dwc_frame_num_le(qh->sched_frame, frame_number)) { ++ qh->sched_frame = frame_number; ++ } ++ qh->sched_frame |= 0x7; ++ qh->start_split_frame = qh->sched_frame; ++ } ++ } else { ++ qh->sched_frame = dwc_frame_num_inc(qh->sched_frame, qh->interval); ++ if (dwc_frame_num_le(qh->sched_frame, frame_number)) { ++ qh->sched_frame = frame_number; ++ } ++ } ++ ++ if (list_empty(&qh->qtd_list)) { ++ dwc_otg_hcd_qh_remove(hcd, qh); ++ } else { ++ /* ++ * Remove from periodic_sched_queued and move to ++ * appropriate queue. ++ */ ++ if (qh->sched_frame == frame_number) { ++ list_move(&qh->qh_list_entry, ++ &hcd->periodic_sched_ready); ++ } else { ++ list_move(&qh->qh_list_entry, ++ &hcd->periodic_sched_inactive); ++ } ++ } ++ } ++ ++ SPIN_UNLOCK_IRQRESTORE(&hcd->lock, flags); ++} ++ ++/** ++ * This function allocates and initializes a QTD. ++ * ++ * @param[in] urb The URB to create a QTD from. Each URB-QTD pair will end up ++ * pointing to each other so each pair should have a unique correlation. ++ * ++ * @return Returns pointer to the newly allocated QTD, or NULL on error. */ ++dwc_otg_qtd_t *dwc_otg_hcd_qtd_create (struct urb *urb) ++{ ++ dwc_otg_qtd_t *qtd; ++ ++ qtd = dwc_otg_hcd_qtd_alloc (); ++ if (qtd == NULL) { ++ return NULL; ++ } ++ ++ dwc_otg_hcd_qtd_init (qtd, urb); ++ return qtd; ++} ++ ++/** ++ * Initializes a QTD structure. ++ * ++ * @param[in] qtd The QTD to initialize. ++ * @param[in] urb The URB to use for initialization. */ ++void dwc_otg_hcd_qtd_init (dwc_otg_qtd_t *qtd, struct urb *urb) ++{ ++ memset (qtd, 0, sizeof (dwc_otg_qtd_t)); ++ qtd->urb = urb; ++ if (usb_pipecontrol(urb->pipe)) { ++ /* ++ * The only time the QTD data toggle is used is on the data ++ * phase of control transfers. This phase always starts with ++ * DATA1. ++ */ ++ qtd->data_toggle = DWC_OTG_HC_PID_DATA1; ++ qtd->control_phase = DWC_OTG_CONTROL_SETUP; ++ } ++ ++ /* start split */ ++ qtd->complete_split = 0; ++ qtd->isoc_split_pos = DWC_HCSPLIT_XACTPOS_ALL; ++ qtd->isoc_split_offset = 0; ++ ++ /* Store the qtd ptr in the urb to reference what QTD. */ ++ urb->hcpriv = qtd; ++ return; ++} ++ ++/** ++ * This function adds a QTD to the QTD-list of a QH. It will find the correct ++ * QH to place the QTD into. If it does not find a QH, then it will create a ++ * new QH. If the QH to which the QTD is added is not currently scheduled, it ++ * is placed into the proper schedule based on its EP type. ++ * ++ * @param[in] qtd The QTD to add ++ * @param[in] dwc_otg_hcd The DWC HCD structure ++ * ++ * @return 0 if successful, negative error code otherwise. ++ */ ++int dwc_otg_hcd_qtd_add (dwc_otg_qtd_t *qtd, ++ dwc_otg_hcd_t *dwc_otg_hcd) ++{ ++ struct usb_host_endpoint *ep; ++ dwc_otg_qh_t *qh; ++ unsigned long flags; ++ int retval = 0; ++ ++ struct urb *urb = qtd->urb; ++ ++ SPIN_LOCK_IRQSAVE(&dwc_otg_hcd->lock, flags); ++ ++ /* ++ * Get the QH which holds the QTD-list to insert to. Create QH if it ++ * doesn't exist. ++ */ ++ ep = dwc_urb_to_endpoint(urb); ++ qh = (dwc_otg_qh_t *)ep->hcpriv; ++ if (qh == NULL) { ++ qh = dwc_otg_hcd_qh_create (dwc_otg_hcd, urb); ++ if (qh == NULL) { ++ goto done; ++ } ++ ep->hcpriv = qh; ++ } ++ ++ retval = dwc_otg_hcd_qh_add(dwc_otg_hcd, qh); ++ if (retval == 0) { ++ list_add_tail(&qtd->qtd_list_entry, &qh->qtd_list); ++ } ++ ++ done: ++ SPIN_UNLOCK_IRQRESTORE(&dwc_otg_hcd->lock, flags); ++ ++ return retval; ++} ++ ++#endif /* DWC_DEVICE_ONLY */ +--- /dev/null ++++ b/drivers/usb/dwc_otg/dwc_otg_pcd.c +@@ -0,0 +1,2523 @@ ++/* ========================================================================== ++ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_pcd.c $ ++ * $Revision: 1.5 $ ++ * $Date: 2008-11-27 09:21:25 $ ++ * $Change: 1115682 $ ++ * ++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, ++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless ++ * otherwise expressly agreed to in writing between Synopsys and you. ++ * ++ * The Software IS NOT an item of Licensed Software or Licensed Product under ++ * any End User Software License Agreement or Agreement for Licensed Product ++ * with Synopsys or any supplement thereto. You are permitted to use and ++ * redistribute this Software in source and binary forms, with or without ++ * modification, provided that redistributions of source code must retain this ++ * notice. You may not view, use, disclose, copy or distribute this file or ++ * any information contained herein except pursuant to this license grant from ++ * Synopsys. If you do not agree with this notice, including the disclaimer ++ * below, then you are not authorized to use the Software. ++ * ++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, ++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR ++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER ++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT ++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY ++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH ++ * DAMAGE. ++ * ========================================================================== */ ++#ifndef DWC_HOST_ONLY ++ ++/** @file ++ * This file implements the Peripheral Controller Driver. ++ * ++ * The Peripheral Controller Driver (PCD) is responsible for ++ * translating requests from the Function Driver into the appropriate ++ * actions on the DWC_otg controller. It isolates the Function Driver ++ * from the specifics of the controller by providing an API to the ++ * Function Driver. ++ * ++ * The Peripheral Controller Driver for Linux will implement the ++ * Gadget API, so that the existing Gadget drivers can be used. ++ * (Gadget Driver is the Linux terminology for a Function Driver.) ++ * ++ * The Linux Gadget API is defined in the header file ++ * . The USB EP operations API is ++ * defined in the structure usb_ep_ops and the USB ++ * Controller API is defined in the structure ++ * usb_gadget_ops. ++ * ++ * An important function of the PCD is managing interrupts generated ++ * by the DWC_otg controller. The implementation of the DWC_otg device ++ * mode interrupt service routines is in dwc_otg_pcd_intr.c. ++ * ++ * @todo Add Device Mode test modes (Test J mode, Test K mode, etc). ++ * @todo Does it work when the request size is greater than DEPTSIZ ++ * transfer size ++ * ++ */ ++ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21) ++# include ++#else ++# include ++#endif ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24) ++#include ++#else ++#include ++#endif ++ ++#include "dwc_otg_driver.h" ++#include "dwc_otg_pcd.h" ++ ++ ++/** ++ * Static PCD pointer for use in usb_gadget_register_driver and ++ * usb_gadget_unregister_driver. Initialized in dwc_otg_pcd_init. ++ */ ++static dwc_otg_pcd_t *s_pcd = 0; ++ ++ ++/* Display the contents of the buffer */ ++extern void dump_msg(const u8 *buf, unsigned int length); ++ ++ ++/** ++ * This function completes a request. It call's the request call back. ++ */ ++void dwc_otg_request_done(dwc_otg_pcd_ep_t *ep, dwc_otg_pcd_request_t *req, ++ int status) ++{ ++ unsigned stopped = ep->stopped; ++ ++ DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, ep); ++ list_del_init(&req->queue); ++ ++ if (req->req.status == -EINPROGRESS) { ++ req->req.status = status; ++ } else { ++ status = req->req.status; ++ } ++ ++ /* don't modify queue heads during completion callback */ ++ ep->stopped = 1; ++ SPIN_UNLOCK(&ep->pcd->lock); ++ req->req.complete(&ep->ep, &req->req); ++ SPIN_LOCK(&ep->pcd->lock); ++ ++ if (ep->pcd->request_pending > 0) { ++ --ep->pcd->request_pending; ++ } ++ ++ ep->stopped = stopped; ++} ++ ++/** ++ * This function terminates all the requsts in the EP request queue. ++ */ ++void dwc_otg_request_nuke(dwc_otg_pcd_ep_t *ep) ++{ ++ dwc_otg_pcd_request_t *req; ++ ++ ep->stopped = 1; ++ ++ /* called with irqs blocked?? */ ++ while (!list_empty(&ep->queue)) { ++ req = list_entry(ep->queue.next, dwc_otg_pcd_request_t, ++ queue); ++ dwc_otg_request_done(ep, req, -ESHUTDOWN); ++ } ++} ++ ++/* USB Endpoint Operations */ ++/* ++ * The following sections briefly describe the behavior of the Gadget ++ * API endpoint operations implemented in the DWC_otg driver ++ * software. Detailed descriptions of the generic behavior of each of ++ * these functions can be found in the Linux header file ++ * include/linux/usb_gadget.h. ++ * ++ * The Gadget API provides wrapper functions for each of the function ++ * pointers defined in usb_ep_ops. The Gadget Driver calls the wrapper ++ * function, which then calls the underlying PCD function. The ++ * following sections are named according to the wrapper ++ * functions. Within each section, the corresponding DWC_otg PCD ++ * function name is specified. ++ * ++ */ ++ ++/** ++ * This function assigns periodic Tx FIFO to an periodic EP ++ * in shared Tx FIFO mode ++ */ ++static uint32_t assign_perio_tx_fifo(dwc_otg_core_if_t *core_if) ++{ ++ uint32_t PerTxMsk = 1; ++ int i; ++ for(i = 0; i < core_if->hwcfg4.b.num_dev_perio_in_ep; ++i) ++ { ++ if((PerTxMsk & core_if->p_tx_msk) == 0) { ++ core_if->p_tx_msk |= PerTxMsk; ++ return i + 1; ++ } ++ PerTxMsk <<= 1; ++ } ++ return 0; ++} ++/** ++ * This function releases periodic Tx FIFO ++ * in shared Tx FIFO mode ++ */ ++static void release_perio_tx_fifo(dwc_otg_core_if_t *core_if, uint32_t fifo_num) ++{ ++ core_if->p_tx_msk = (core_if->p_tx_msk & (1 << (fifo_num - 1))) ^ core_if->p_tx_msk; ++} ++/** ++ * This function assigns periodic Tx FIFO to an periodic EP ++ * in shared Tx FIFO mode ++ */ ++static uint32_t assign_tx_fifo(dwc_otg_core_if_t *core_if) ++{ ++ uint32_t TxMsk = 1; ++ int i; ++ ++ for(i = 0; i < core_if->hwcfg4.b.num_in_eps; ++i) ++ { ++ if((TxMsk & core_if->tx_msk) == 0) { ++ core_if->tx_msk |= TxMsk; ++ return i + 1; ++ } ++ TxMsk <<= 1; ++ } ++ return 0; ++} ++/** ++ * This function releases periodic Tx FIFO ++ * in shared Tx FIFO mode ++ */ ++static void release_tx_fifo(dwc_otg_core_if_t *core_if, uint32_t fifo_num) ++{ ++ core_if->tx_msk = (core_if->tx_msk & (1 << (fifo_num - 1))) ^ core_if->tx_msk; ++} ++ ++/** ++ * This function is called by the Gadget Driver for each EP to be ++ * configured for the current configuration (SET_CONFIGURATION). ++ * ++ * This function initializes the dwc_otg_ep_t data structure, and then ++ * calls dwc_otg_ep_activate. ++ */ ++static int dwc_otg_pcd_ep_enable(struct usb_ep *usb_ep, ++ const struct usb_endpoint_descriptor *ep_desc) ++{ ++ dwc_otg_pcd_ep_t *ep = 0; ++ dwc_otg_pcd_t *pcd = 0; ++ unsigned long flags; ++ ++ DWC_DEBUGPL(DBG_PCDV,"%s(%p,%p)\n", __func__, usb_ep, ep_desc); ++ ++ ep = container_of(usb_ep, dwc_otg_pcd_ep_t, ep); ++ if (!usb_ep || !ep_desc || ep->desc || ++ ep_desc->bDescriptorType != USB_DT_ENDPOINT) { ++ DWC_WARN("%s, bad ep or descriptor\n", __func__); ++ return -EINVAL; ++ } ++ if (ep == &ep->pcd->ep0) { ++ DWC_WARN("%s, bad ep(0)\n", __func__); ++ return -EINVAL; ++ } ++ ++ /* Check FIFO size? */ ++ if (!ep_desc->wMaxPacketSize) { ++ DWC_WARN("%s, bad %s maxpacket\n", __func__, usb_ep->name); ++ return -ERANGE; ++ } ++ ++ pcd = ep->pcd; ++ if (!pcd->driver || pcd->gadget.speed == USB_SPEED_UNKNOWN) { ++ DWC_WARN("%s, bogus device state\n", __func__); ++ return -ESHUTDOWN; ++ } ++ ++ SPIN_LOCK_IRQSAVE(&pcd->lock, flags); ++ ++ ep->desc = ep_desc; ++ ep->ep.maxpacket = le16_to_cpu (ep_desc->wMaxPacketSize); ++ ++ /* ++ * Activate the EP ++ */ ++ ep->stopped = 0; ++ ++ ep->dwc_ep.is_in = (USB_DIR_IN & ep_desc->bEndpointAddress) != 0; ++ ep->dwc_ep.maxpacket = ep->ep.maxpacket; ++ ++ ep->dwc_ep.type = ep_desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; ++ ++ if(ep->dwc_ep.is_in) { ++ if(!pcd->otg_dev->core_if->en_multiple_tx_fifo) { ++ ep->dwc_ep.tx_fifo_num = 0; ++ ++ if (ep->dwc_ep.type == USB_ENDPOINT_XFER_ISOC) { ++ /* ++ * if ISOC EP then assign a Periodic Tx FIFO. ++ */ ++ ep->dwc_ep.tx_fifo_num = assign_perio_tx_fifo(pcd->otg_dev->core_if); ++ } ++ } else { ++ /* ++ * if Dedicated FIFOs mode is on then assign a Tx FIFO. ++ */ ++ ep->dwc_ep.tx_fifo_num = assign_tx_fifo(pcd->otg_dev->core_if); ++ ++ } ++ } ++ /* Set initial data PID. */ ++ if (ep->dwc_ep.type == USB_ENDPOINT_XFER_BULK) { ++ ep->dwc_ep.data_pid_start = 0; ++ } ++ ++ DWC_DEBUGPL(DBG_PCD, "Activate %s-%s: type=%d, mps=%d desc=%p\n", ++ ep->ep.name, (ep->dwc_ep.is_in ?"IN":"OUT"), ++ ep->dwc_ep.type, ep->dwc_ep.maxpacket, ep->desc); ++ ++ if(ep->dwc_ep.type != USB_ENDPOINT_XFER_ISOC) { ++ ep->dwc_ep.desc_addr = dwc_otg_ep_alloc_desc_chain(&ep->dwc_ep.dma_desc_addr, MAX_DMA_DESC_CNT); ++ } ++ ++ dwc_otg_ep_activate(GET_CORE_IF(pcd), &ep->dwc_ep); ++ SPIN_UNLOCK_IRQRESTORE(&pcd->lock, flags); ++ ++ return 0; ++} ++ ++/** ++ * This function is called when an EP is disabled due to disconnect or ++ * change in configuration. Any pending requests will terminate with a ++ * status of -ESHUTDOWN. ++ * ++ * This function modifies the dwc_otg_ep_t data structure for this EP, ++ * and then calls dwc_otg_ep_deactivate. ++ */ ++static int dwc_otg_pcd_ep_disable(struct usb_ep *usb_ep) ++{ ++ dwc_otg_pcd_ep_t *ep; ++ dwc_otg_pcd_t *pcd = 0; ++ unsigned long flags; ++ ++ DWC_DEBUGPL(DBG_PCDV,"%s(%p)\n", __func__, usb_ep); ++ ep = container_of(usb_ep, dwc_otg_pcd_ep_t, ep); ++ if (!usb_ep || !ep->desc) { ++ DWC_DEBUGPL(DBG_PCD, "%s, %s not enabled\n", __func__, ++ usb_ep ? ep->ep.name : NULL); ++ return -EINVAL; ++ } ++ ++ SPIN_LOCK_IRQSAVE(&ep->pcd->lock, flags); ++ ++ dwc_otg_request_nuke(ep); ++ ++ dwc_otg_ep_deactivate(GET_CORE_IF(ep->pcd), &ep->dwc_ep); ++ ep->desc = 0; ++ ep->stopped = 1; ++ ++ if(ep->dwc_ep.is_in) { ++ dwc_otg_flush_tx_fifo(GET_CORE_IF(ep->pcd), ep->dwc_ep.tx_fifo_num); ++ release_perio_tx_fifo(GET_CORE_IF(ep->pcd), ep->dwc_ep.tx_fifo_num); ++ release_tx_fifo(GET_CORE_IF(ep->pcd), ep->dwc_ep.tx_fifo_num); ++ } ++ ++ /* Free DMA Descriptors */ ++ pcd = ep->pcd; ++ ++ SPIN_UNLOCK_IRQRESTORE(&ep->pcd->lock, flags); ++ ++ if(ep->dwc_ep.type != USB_ENDPOINT_XFER_ISOC && ep->dwc_ep.desc_addr) { ++ dwc_otg_ep_free_desc_chain(ep->dwc_ep.desc_addr, ep->dwc_ep.dma_desc_addr, MAX_DMA_DESC_CNT); ++ } ++ ++ DWC_DEBUGPL(DBG_PCD, "%s disabled\n", usb_ep->name); ++ return 0; ++} ++ ++ ++/** ++ * This function allocates a request object to use with the specified ++ * endpoint. ++ * ++ * @param ep The endpoint to be used with with the request ++ * @param gfp_flags the GFP_* flags to use. ++ */ ++static struct usb_request *dwc_otg_pcd_alloc_request(struct usb_ep *ep, ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) ++ int gfp_flags ++#else ++ gfp_t gfp_flags ++#endif ++ ) ++{ ++ dwc_otg_pcd_request_t *req; ++ ++ DWC_DEBUGPL(DBG_PCDV,"%s(%p,%d)\n", __func__, ep, gfp_flags); ++ if (0 == ep) { ++ DWC_WARN("%s() %s\n", __func__, "Invalid EP!\n"); ++ return 0; ++ } ++ req = kmalloc(sizeof(dwc_otg_pcd_request_t), gfp_flags); ++ if (0 == req) { ++ DWC_WARN("%s() %s\n", __func__, ++ "request allocation failed!\n"); ++ return 0; ++ } ++ memset(req, 0, sizeof(dwc_otg_pcd_request_t)); ++ req->req.dma = DMA_ADDR_INVALID; ++ INIT_LIST_HEAD(&req->queue); ++ return &req->req; ++} ++ ++/** ++ * This function frees a request object. ++ * ++ * @param ep The endpoint associated with the request ++ * @param req The request being freed ++ */ ++static void dwc_otg_pcd_free_request(struct usb_ep *ep, ++ struct usb_request *req) ++{ ++ dwc_otg_pcd_request_t *request; ++ DWC_DEBUGPL(DBG_PCDV,"%s(%p,%p)\n", __func__, ep, req); ++ ++ if (0 == ep || 0 == req) { ++ DWC_WARN("%s() %s\n", __func__, ++ "Invalid ep or req argument!\n"); ++ return; ++ } ++ ++ request = container_of(req, dwc_otg_pcd_request_t, req); ++ kfree(request); ++} ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) ++/** ++ * This function allocates an I/O buffer to be used for a transfer ++ * to/from the specified endpoint. ++ * ++ * @param usb_ep The endpoint to be used with with the request ++ * @param bytes The desired number of bytes for the buffer ++ * @param dma Pointer to the buffer's DMA address; must be valid ++ * @param gfp_flags the GFP_* flags to use. ++ * @return address of a new buffer or null is buffer could not be allocated. ++ */ ++static void *dwc_otg_pcd_alloc_buffer(struct usb_ep *usb_ep, unsigned bytes, ++ dma_addr_t *dma, ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) ++ int gfp_flags ++#else ++ gfp_t gfp_flags ++#endif ++ ) ++{ ++ void *buf; ++ dwc_otg_pcd_ep_t *ep; ++ dwc_otg_pcd_t *pcd = 0; ++ ++ ep = container_of(usb_ep, dwc_otg_pcd_ep_t, ep); ++ pcd = ep->pcd; ++ ++ DWC_DEBUGPL(DBG_PCDV,"%s(%p,%d,%p,%0x)\n", __func__, usb_ep, bytes, ++ dma, gfp_flags); ++ ++ /* Check dword alignment */ ++ if ((bytes & 0x3UL) != 0) { ++ DWC_WARN("%s() Buffer size is not a multiple of" ++ "DWORD size (%d)",__func__, bytes); ++ } ++ ++ if (GET_CORE_IF(pcd)->dma_enable) { ++ buf = dma_alloc_coherent (NULL, bytes, dma, gfp_flags); ++ } ++ else { ++ buf = kmalloc(bytes, gfp_flags); ++ } ++ ++ /* Check dword alignment */ ++ if (((int)buf & 0x3UL) != 0) { ++ DWC_WARN("%s() Buffer is not DWORD aligned (%p)", ++ __func__, buf); ++ } ++ ++ return buf; ++} ++ ++/** ++ * This function frees an I/O buffer that was allocated by alloc_buffer. ++ * ++ * @param usb_ep the endpoint associated with the buffer ++ * @param buf address of the buffer ++ * @param dma The buffer's DMA address ++ * @param bytes The number of bytes of the buffer ++ */ ++static void dwc_otg_pcd_free_buffer(struct usb_ep *usb_ep, void *buf, ++ dma_addr_t dma, unsigned bytes) ++{ ++ dwc_otg_pcd_ep_t *ep; ++ dwc_otg_pcd_t *pcd = 0; ++ ++ ep = container_of(usb_ep, dwc_otg_pcd_ep_t, ep); ++ pcd = ep->pcd; ++ ++ DWC_DEBUGPL(DBG_PCDV,"%s(%p,%p,%0x,%d)\n", __func__, ep, buf, dma, bytes); ++ ++ if (GET_CORE_IF(pcd)->dma_enable) { ++ dma_free_coherent (NULL, bytes, buf, dma); ++ } ++ else { ++ kfree(buf); ++ } ++} ++#endif ++ ++ ++/** ++ * This function is used to submit an I/O Request to an EP. ++ * ++ * - When the request completes the request's completion callback ++ * is called to return the request to the driver. ++ * - An EP, except control EPs, may have multiple requests ++ * pending. ++ * - Once submitted the request cannot be examined or modified. ++ * - Each request is turned into one or more packets. ++ * - A BULK EP can queue any amount of data; the transfer is ++ * packetized. ++ * - Zero length Packets are specified with the request 'zero' ++ * flag. ++ */ ++static int dwc_otg_pcd_ep_queue(struct usb_ep *usb_ep, ++ struct usb_request *usb_req, ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) ++ int gfp_flags ++#else ++ gfp_t gfp_flags ++#endif ++ ) ++{ ++ int prevented = 0; ++ dwc_otg_pcd_request_t *req; ++ dwc_otg_pcd_ep_t *ep; ++ dwc_otg_pcd_t *pcd; ++ unsigned long flags = 0; ++ dwc_otg_core_if_t *_core_if; ++ ++ DWC_DEBUGPL(DBG_PCDV,"%s(%p,%p,%d)\n", ++ __func__, usb_ep, usb_req, gfp_flags); ++ ++ req = container_of(usb_req, dwc_otg_pcd_request_t, req); ++ if (!usb_req || !usb_req->complete || !usb_req->buf || ++ !list_empty(&req->queue)) { ++ DWC_WARN("%s, bad params\n", __func__); ++ return -EINVAL; ++ } ++ ++ ep = container_of(usb_ep, dwc_otg_pcd_ep_t, ep); ++ if (!usb_ep || (!ep->desc && ep->dwc_ep.num != 0)/* || ep->stopped != 0*/) { ++ DWC_WARN("%s, bad ep\n", __func__); ++ return -EINVAL; ++ } ++ ++ pcd = ep->pcd; ++ if (!pcd->driver || pcd->gadget.speed == USB_SPEED_UNKNOWN) { ++ DWC_DEBUGPL(DBG_PCDV, "gadget.speed=%d\n", pcd->gadget.speed); ++ DWC_WARN("%s, bogus device state\n", __func__); ++ return -ESHUTDOWN; ++ } ++ ++ ++ DWC_DEBUGPL(DBG_PCD, "%s queue req %p, len %d buf %p\n", ++ usb_ep->name, usb_req, usb_req->length, usb_req->buf); ++ ++ if (!GET_CORE_IF(pcd)->core_params->opt) { ++ if (ep->dwc_ep.num != 0) { ++ DWC_ERROR("%s queue req %p, len %d buf %p\n", ++ usb_ep->name, usb_req, usb_req->length, usb_req->buf); ++ } ++ } ++ ++ SPIN_LOCK_IRQSAVE(&ep->pcd->lock, flags); ++ ++ ++ /************************************************** ++ New add by kaiker ,for DMA mode bug ++ ************************************************/ ++ //by kaiker ,for RT3052 USB OTG device mode ++ ++ _core_if = GET_CORE_IF(pcd); ++ ++ if (_core_if->dma_enable) ++ { ++ usb_req->dma = virt_to_phys((void *)usb_req->buf); ++ ++ if(ep->dwc_ep.is_in) ++ { ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)) || defined(CONFIG_MIPS) ++ if(usb_req->length) ++ dma_cache_wback_inv((unsigned long)usb_req->buf, usb_req->length + 2); ++#endif ++ } ++ } ++ ++ ++ ++#if defined(DEBUG) & defined(VERBOSE) ++ dump_msg(usb_req->buf, usb_req->length); ++#endif ++ ++ usb_req->status = -EINPROGRESS; ++ usb_req->actual = 0; ++ ++ /* ++ * For EP0 IN without premature status, zlp is required? ++ */ ++ if (ep->dwc_ep.num == 0 && ep->dwc_ep.is_in) { ++ DWC_DEBUGPL(DBG_PCDV, "%s-OUT ZLP\n", usb_ep->name); ++ //_req->zero = 1; ++ } ++ ++ /* Start the transfer */ ++ if (list_empty(&ep->queue) && !ep->stopped) { ++ /* EP0 Transfer? */ ++ if (ep->dwc_ep.num == 0) { ++ switch (pcd->ep0state) { ++ case EP0_IN_DATA_PHASE: ++ DWC_DEBUGPL(DBG_PCD, ++ "%s ep0: EP0_IN_DATA_PHASE\n", ++ __func__); ++ break; ++ ++ case EP0_OUT_DATA_PHASE: ++ DWC_DEBUGPL(DBG_PCD, ++ "%s ep0: EP0_OUT_DATA_PHASE\n", ++ __func__); ++ if (pcd->request_config) { ++ /* Complete STATUS PHASE */ ++ ep->dwc_ep.is_in = 1; ++ pcd->ep0state = EP0_IN_STATUS_PHASE; ++ } ++ break; ++ ++ case EP0_IN_STATUS_PHASE: ++ DWC_DEBUGPL(DBG_PCD, ++ "%s ep0: EP0_IN_STATUS_PHASE\n", ++ __func__); ++ break; ++ ++ default: ++ DWC_DEBUGPL(DBG_ANY, "ep0: odd state %d\n", ++ pcd->ep0state); ++ SPIN_UNLOCK_IRQRESTORE(&pcd->lock, flags); ++ return -EL2HLT; ++ } ++ ep->dwc_ep.dma_addr = usb_req->dma; ++ ep->dwc_ep.start_xfer_buff = usb_req->buf; ++ ep->dwc_ep.xfer_buff = usb_req->buf; ++ ep->dwc_ep.xfer_len = usb_req->length; ++ ep->dwc_ep.xfer_count = 0; ++ ep->dwc_ep.sent_zlp = 0; ++ ep->dwc_ep.total_len = ep->dwc_ep.xfer_len; ++ ++ if(usb_req->zero) { ++ if((ep->dwc_ep.xfer_len % ep->dwc_ep.maxpacket == 0) ++ && (ep->dwc_ep.xfer_len != 0)) { ++ ep->dwc_ep.sent_zlp = 1; ++ } ++ ++ } ++ ++ dwc_otg_ep0_start_transfer(GET_CORE_IF(pcd), &ep->dwc_ep); ++ } ++ else { ++ ++ uint32_t max_transfer = GET_CORE_IF(ep->pcd)->core_params->max_transfer_size; ++ ++ /* Setup and start the Transfer */ ++ ep->dwc_ep.dma_addr = usb_req->dma; ++ ep->dwc_ep.start_xfer_buff = usb_req->buf; ++ ep->dwc_ep.xfer_buff = usb_req->buf; ++ ep->dwc_ep.sent_zlp = 0; ++ ep->dwc_ep.total_len = usb_req->length; ++ ep->dwc_ep.xfer_len = 0; ++ ep->dwc_ep.xfer_count = 0; ++ ++ if(max_transfer > MAX_TRANSFER_SIZE) { ++ ep->dwc_ep.maxxfer = max_transfer - (max_transfer % ep->dwc_ep.maxpacket); ++ } else { ++ ep->dwc_ep.maxxfer = max_transfer; ++ } ++ ++ if(usb_req->zero) { ++ if((ep->dwc_ep.total_len % ep->dwc_ep.maxpacket == 0) ++ && (ep->dwc_ep.total_len != 0)) { ++ ep->dwc_ep.sent_zlp = 1; ++ } ++ ++ } ++ dwc_otg_ep_start_transfer(GET_CORE_IF(pcd), &ep->dwc_ep); ++ } ++ } ++ ++ if ((req != 0) || prevented) { ++ ++pcd->request_pending; ++ list_add_tail(&req->queue, &ep->queue); ++ if (ep->dwc_ep.is_in && ep->stopped && !(GET_CORE_IF(pcd)->dma_enable)) { ++ /** @todo NGS Create a function for this. */ ++ diepmsk_data_t diepmsk = { .d32 = 0}; ++ diepmsk.b.intktxfemp = 1; ++ if(&GET_CORE_IF(pcd)->multiproc_int_enable) { ++ dwc_modify_reg32(&GET_CORE_IF(pcd)->dev_if->dev_global_regs->diepeachintmsk[ep->dwc_ep.num], ++ 0, diepmsk.d32); ++ } else { ++ dwc_modify_reg32(&GET_CORE_IF(pcd)->dev_if->dev_global_regs->diepmsk, 0, diepmsk.d32); ++ } ++ } ++ } ++ ++ SPIN_UNLOCK_IRQRESTORE(&pcd->lock, flags); ++ return 0; ++} ++ ++/** ++ * This function cancels an I/O request from an EP. ++ */ ++static int dwc_otg_pcd_ep_dequeue(struct usb_ep *usb_ep, ++ struct usb_request *usb_req) ++{ ++ dwc_otg_pcd_request_t *req; ++ dwc_otg_pcd_ep_t *ep; ++ dwc_otg_pcd_t *pcd; ++ unsigned long flags; ++ ++ DWC_DEBUGPL(DBG_PCDV,"%s(%p,%p)\n", __func__, usb_ep, usb_req); ++ ++ ep = container_of(usb_ep, dwc_otg_pcd_ep_t, ep); ++ if (!usb_ep || !usb_req || (!ep->desc && ep->dwc_ep.num != 0)) { ++ DWC_WARN("%s, bad argument\n", __func__); ++ return -EINVAL; ++ } ++ pcd = ep->pcd; ++ if (!pcd->driver || pcd->gadget.speed == USB_SPEED_UNKNOWN) { ++ DWC_WARN("%s, bogus device state\n", __func__); ++ return -ESHUTDOWN; ++ } ++ ++ SPIN_LOCK_IRQSAVE(&pcd->lock, flags); ++ DWC_DEBUGPL(DBG_PCDV, "%s %s %s %p\n", __func__, usb_ep->name, ++ ep->dwc_ep.is_in ? "IN" : "OUT", ++ usb_req); ++ ++ /* make sure it's actually queued on this endpoint */ ++ list_for_each_entry(req, &ep->queue, queue) ++ { ++ if (&req->req == usb_req) { ++ break; ++ } ++ } ++ ++ if (&req->req != usb_req) { ++ SPIN_UNLOCK_IRQRESTORE(&pcd->lock, flags); ++ return -EINVAL; ++ } ++ ++ if (!list_empty(&req->queue)) { ++ dwc_otg_request_done(ep, req, -ECONNRESET); ++ } ++ else { ++ req = 0; ++ } ++ ++ SPIN_UNLOCK_IRQRESTORE(&pcd->lock, flags); ++ ++ return req ? 0 : -EOPNOTSUPP; ++} ++ ++/** ++ * usb_ep_set_halt stalls an endpoint. ++ * ++ * usb_ep_clear_halt clears an endpoint halt and resets its data ++ * toggle. ++ * ++ * Both of these functions are implemented with the same underlying ++ * function. The behavior depends on the value argument. ++ * ++ * @param[in] usb_ep the Endpoint to halt or clear halt. ++ * @param[in] value ++ * - 0 means clear_halt. ++ * - 1 means set_halt, ++ * - 2 means clear stall lock flag. ++ * - 3 means set stall lock flag. ++ */ ++static int dwc_otg_pcd_ep_set_halt(struct usb_ep *usb_ep, int value) ++{ ++ int retval = 0; ++ unsigned long flags; ++ dwc_otg_pcd_ep_t *ep = 0; ++ ++ ++ DWC_DEBUGPL(DBG_PCD,"HALT %s %d\n", usb_ep->name, value); ++ ++ ep = container_of(usb_ep, dwc_otg_pcd_ep_t, ep); ++ ++ if (!usb_ep || (!ep->desc && ep != &ep->pcd->ep0) || ++ ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) { ++ DWC_WARN("%s, bad ep\n", __func__); ++ return -EINVAL; ++ } ++ ++ SPIN_LOCK_IRQSAVE(&ep->pcd->lock, flags); ++ if (!list_empty(&ep->queue)) { ++ DWC_WARN("%s() %s XFer In process\n", __func__, usb_ep->name); ++ retval = -EAGAIN; ++ } ++ else if (value == 0) { ++ dwc_otg_ep_clear_stall(ep->pcd->otg_dev->core_if, ++ &ep->dwc_ep); ++ } ++ else if(value == 1) { ++ if (ep->dwc_ep.is_in == 1 && ep->pcd->otg_dev->core_if->dma_desc_enable) { ++ dtxfsts_data_t txstatus; ++ fifosize_data_t txfifosize; ++ ++ txfifosize.d32 = dwc_read_reg32(&ep->pcd->otg_dev->core_if->core_global_regs->dptxfsiz_dieptxf[ep->dwc_ep.tx_fifo_num]); ++ txstatus.d32 = dwc_read_reg32(&ep->pcd->otg_dev->core_if->dev_if->in_ep_regs[ep->dwc_ep.num]->dtxfsts); ++ ++ if(txstatus.b.txfspcavail < txfifosize.b.depth) { ++ DWC_WARN("%s() %s Data In Tx Fifo\n", __func__, usb_ep->name); ++ retval = -EAGAIN; ++ } ++ else { ++ if (ep->dwc_ep.num == 0) { ++ ep->pcd->ep0state = EP0_STALL; ++ } ++ ++ ep->stopped = 1; ++ dwc_otg_ep_set_stall(ep->pcd->otg_dev->core_if, ++ &ep->dwc_ep); ++ } ++ } ++ else { ++ if (ep->dwc_ep.num == 0) { ++ ep->pcd->ep0state = EP0_STALL; ++ } ++ ++ ep->stopped = 1; ++ dwc_otg_ep_set_stall(ep->pcd->otg_dev->core_if, ++ &ep->dwc_ep); ++ } ++ } ++ else if (value == 2) { ++ ep->dwc_ep.stall_clear_flag = 0; ++ } ++ else if (value == 3) { ++ ep->dwc_ep.stall_clear_flag = 1; ++ } ++ ++ SPIN_UNLOCK_IRQRESTORE(&ep->pcd->lock, flags); ++ return retval; ++} ++ ++/** ++ * This function allocates a DMA Descriptor chain for the Endpoint ++ * buffer to be used for a transfer to/from the specified endpoint. ++ */ ++dwc_otg_dma_desc_t* dwc_otg_ep_alloc_desc_chain(uint32_t * dma_desc_addr, uint32_t count) ++{ ++ ++ return dma_alloc_coherent(NULL, count * sizeof(dwc_otg_dma_desc_t), dma_desc_addr, GFP_KERNEL); ++} ++ ++/** ++ * This function frees a DMA Descriptor chain that was allocated by ep_alloc_desc. ++ */ ++void dwc_otg_ep_free_desc_chain(dwc_otg_dma_desc_t* desc_addr, uint32_t dma_desc_addr, uint32_t count) ++{ ++ dma_free_coherent(NULL, count * sizeof(dwc_otg_dma_desc_t), desc_addr, dma_desc_addr); ++} ++ ++#ifdef DWC_EN_ISOC ++ ++/** ++ * This function initializes a descriptor chain for Isochronous transfer ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param dwc_ep The EP to start the transfer on. ++ * ++ */ ++void dwc_otg_iso_ep_start_ddma_transfer(dwc_otg_core_if_t *core_if, dwc_ep_t *dwc_ep) ++{ ++ ++ dsts_data_t dsts = { .d32 = 0}; ++ depctl_data_t depctl = { .d32 = 0 }; ++ volatile uint32_t *addr; ++ int i, j; ++ ++ if(dwc_ep->is_in) ++ dwc_ep->desc_cnt = dwc_ep->buf_proc_intrvl / dwc_ep->bInterval; ++ else ++ dwc_ep->desc_cnt = dwc_ep->buf_proc_intrvl * dwc_ep->pkt_per_frm / dwc_ep->bInterval; ++ ++ ++ /** Allocate descriptors for double buffering */ ++ dwc_ep->iso_desc_addr = dwc_otg_ep_alloc_desc_chain(&dwc_ep->iso_dma_desc_addr,dwc_ep->desc_cnt*2); ++ if(dwc_ep->desc_addr) { ++ DWC_WARN("%s, can't allocate DMA descriptor chain\n", __func__); ++ return; ++ } ++ ++ dsts.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dsts); ++ ++ /** ISO OUT EP */ ++ if(dwc_ep->is_in == 0) { ++ desc_sts_data_t sts = { .d32 =0 }; ++ dwc_otg_dma_desc_t* dma_desc = dwc_ep->iso_desc_addr; ++ dma_addr_t dma_ad; ++ uint32_t data_per_desc; ++ dwc_otg_dev_out_ep_regs_t *out_regs = ++ core_if->dev_if->out_ep_regs[dwc_ep->num]; ++ int offset; ++ ++ addr = &core_if->dev_if->out_ep_regs[dwc_ep->num]->doepctl; ++ dma_ad = (dma_addr_t)dwc_read_reg32(&(out_regs->doepdma)); ++ ++ /** Buffer 0 descriptors setup */ ++ dma_ad = dwc_ep->dma_addr0; ++ ++ sts.b_iso_out.bs = BS_HOST_READY; ++ sts.b_iso_out.rxsts = 0; ++ sts.b_iso_out.l = 0; ++ sts.b_iso_out.sp = 0; ++ sts.b_iso_out.ioc = 0; ++ sts.b_iso_out.pid = 0; ++ sts.b_iso_out.framenum = 0; ++ ++ offset = 0; ++ for(i = 0; i < dwc_ep->desc_cnt - dwc_ep->pkt_per_frm; i+= dwc_ep->pkt_per_frm) ++ { ++ ++ for(j = 0; j < dwc_ep->pkt_per_frm; ++j) ++ { ++ data_per_desc = ((j + 1) * dwc_ep->maxpacket > dwc_ep->data_per_frame) ? ++ dwc_ep->data_per_frame - j * dwc_ep->maxpacket : dwc_ep->maxpacket; ++ ++ data_per_desc += (data_per_desc % 4) ? (4 - data_per_desc % 4):0; ++ sts.b_iso_out.rxbytes = data_per_desc; ++ writel((uint32_t)dma_ad, &dma_desc->buf); ++ writel(sts.d32, &dma_desc->status); ++ ++ offset += data_per_desc; ++ dma_desc ++; ++ (uint32_t)dma_ad += data_per_desc; ++ } ++ } ++ ++ for(j = 0; j < dwc_ep->pkt_per_frm - 1; ++j) ++ { ++ data_per_desc = ((j + 1) * dwc_ep->maxpacket > dwc_ep->data_per_frame) ? ++ dwc_ep->data_per_frame - j * dwc_ep->maxpacket : dwc_ep->maxpacket; ++ data_per_desc += (data_per_desc % 4) ? (4 - data_per_desc % 4):0; ++ sts.b_iso_out.rxbytes = data_per_desc; ++ writel((uint32_t)dma_ad, &dma_desc->buf); ++ writel(sts.d32, &dma_desc->status); ++ ++ offset += data_per_desc; ++ dma_desc ++; ++ (uint32_t)dma_ad += data_per_desc; ++ } ++ ++ sts.b_iso_out.ioc = 1; ++ data_per_desc = ((j + 1) * dwc_ep->maxpacket > dwc_ep->data_per_frame) ? ++ dwc_ep->data_per_frame - j * dwc_ep->maxpacket : dwc_ep->maxpacket; ++ data_per_desc += (data_per_desc % 4) ? (4 - data_per_desc % 4):0; ++ sts.b_iso_out.rxbytes = data_per_desc; ++ ++ writel((uint32_t)dma_ad, &dma_desc->buf); ++ writel(sts.d32, &dma_desc->status); ++ dma_desc ++; ++ ++ /** Buffer 1 descriptors setup */ ++ sts.b_iso_out.ioc = 0; ++ dma_ad = dwc_ep->dma_addr1; ++ ++ offset = 0; ++ for(i = 0; i < dwc_ep->desc_cnt - dwc_ep->pkt_per_frm; i+= dwc_ep->pkt_per_frm) ++ { ++ for(j = 0; j < dwc_ep->pkt_per_frm; ++j) ++ { ++ data_per_desc = ((j + 1) * dwc_ep->maxpacket > dwc_ep->data_per_frame) ? ++ dwc_ep->data_per_frame - j * dwc_ep->maxpacket : dwc_ep->maxpacket; ++ data_per_desc += (data_per_desc % 4) ? (4 - data_per_desc % 4):0; ++ sts.b_iso_out.rxbytes = data_per_desc; ++ writel((uint32_t)dma_ad, &dma_desc->buf); ++ writel(sts.d32, &dma_desc->status); ++ ++ offset += data_per_desc; ++ dma_desc ++; ++ (uint32_t)dma_ad += data_per_desc; ++ } ++ } ++ for(j = 0; j < dwc_ep->pkt_per_frm - 1; ++j) ++ { ++ data_per_desc = ((j + 1) * dwc_ep->maxpacket > dwc_ep->data_per_frame) ? ++ dwc_ep->data_per_frame - j * dwc_ep->maxpacket : dwc_ep->maxpacket; ++ data_per_desc += (data_per_desc % 4) ? (4 - data_per_desc % 4):0; ++ sts.b_iso_out.rxbytes = data_per_desc; ++ writel((uint32_t)dma_ad, &dma_desc->buf); ++ writel(sts.d32, &dma_desc->status); ++ ++ offset += data_per_desc; ++ dma_desc ++; ++ (uint32_t)dma_ad += data_per_desc; ++ } ++ ++ sts.b_iso_out.ioc = 1; ++ sts.b_iso_out.l = 1; ++ data_per_desc = ((j + 1) * dwc_ep->maxpacket > dwc_ep->data_per_frame) ? ++ dwc_ep->data_per_frame - j * dwc_ep->maxpacket : dwc_ep->maxpacket; ++ data_per_desc += (data_per_desc % 4) ? (4 - data_per_desc % 4):0; ++ sts.b_iso_out.rxbytes = data_per_desc; ++ ++ writel((uint32_t)dma_ad, &dma_desc->buf); ++ writel(sts.d32, &dma_desc->status); ++ ++ dwc_ep->next_frame = 0; ++ ++ /** Write dma_ad into DOEPDMA register */ ++ dwc_write_reg32(&(out_regs->doepdma),(uint32_t)dwc_ep->iso_dma_desc_addr); ++ ++ } ++ /** ISO IN EP */ ++ else { ++ desc_sts_data_t sts = { .d32 =0 }; ++ dwc_otg_dma_desc_t* dma_desc = dwc_ep->iso_desc_addr; ++ dma_addr_t dma_ad; ++ dwc_otg_dev_in_ep_regs_t *in_regs = ++ core_if->dev_if->in_ep_regs[dwc_ep->num]; ++ unsigned int frmnumber; ++ fifosize_data_t txfifosize,rxfifosize; ++ ++ txfifosize.d32 = dwc_read_reg32(&core_if->dev_if->in_ep_regs[dwc_ep->num]->dtxfsts); ++ rxfifosize.d32 = dwc_read_reg32(&core_if->core_global_regs->grxfsiz); ++ ++ ++ addr = &core_if->dev_if->in_ep_regs[dwc_ep->num]->diepctl; ++ ++ dma_ad = dwc_ep->dma_addr0; ++ ++ dsts.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dsts); ++ ++ sts.b_iso_in.bs = BS_HOST_READY; ++ sts.b_iso_in.txsts = 0; ++ sts.b_iso_in.sp = (dwc_ep->data_per_frame % dwc_ep->maxpacket)? 1 : 0; ++ sts.b_iso_in.ioc = 0; ++ sts.b_iso_in.pid = dwc_ep->pkt_per_frm; ++ ++ ++ frmnumber = dwc_ep->next_frame; ++ ++ sts.b_iso_in.framenum = frmnumber; ++ sts.b_iso_in.txbytes = dwc_ep->data_per_frame; ++ sts.b_iso_in.l = 0; ++ ++ /** Buffer 0 descriptors setup */ ++ for(i = 0; i < dwc_ep->desc_cnt - 1; i++) ++ { ++ writel((uint32_t)dma_ad, &dma_desc->buf); ++ writel(sts.d32, &dma_desc->status); ++ dma_desc ++; ++ ++ (uint32_t)dma_ad += dwc_ep->data_per_frame; ++ sts.b_iso_in.framenum += dwc_ep->bInterval; ++ } ++ ++ sts.b_iso_in.ioc = 1; ++ writel((uint32_t)dma_ad, &dma_desc->buf); ++ writel(sts.d32, &dma_desc->status); ++ ++dma_desc; ++ ++ /** Buffer 1 descriptors setup */ ++ sts.b_iso_in.ioc = 0; ++ dma_ad = dwc_ep->dma_addr1; ++ ++ for(i = 0; i < dwc_ep->desc_cnt - dwc_ep->pkt_per_frm; i+= dwc_ep->pkt_per_frm) ++ { ++ writel((uint32_t)dma_ad, &dma_desc->buf); ++ writel(sts.d32, &dma_desc->status); ++ dma_desc ++; ++ ++ (uint32_t)dma_ad += dwc_ep->data_per_frame; ++ sts.b_iso_in.framenum += dwc_ep->bInterval; ++ ++ sts.b_iso_in.ioc = 0; ++ } ++ sts.b_iso_in.ioc = 1; ++ sts.b_iso_in.l = 1; ++ ++ writel((uint32_t)dma_ad, &dma_desc->buf); ++ writel(sts.d32, &dma_desc->status); ++ ++ dwc_ep->next_frame = sts.b_iso_in.framenum + dwc_ep->bInterval; ++ ++ /** Write dma_ad into diepdma register */ ++ dwc_write_reg32(&(in_regs->diepdma),(uint32_t)dwc_ep->iso_dma_desc_addr); ++ } ++ /** Enable endpoint, clear nak */ ++ depctl.d32 = 0; ++ depctl.b.epena = 1; ++ depctl.b.usbactep = 1; ++ depctl.b.cnak = 1; ++ ++ dwc_modify_reg32(addr, depctl.d32,depctl.d32); ++ depctl.d32 = dwc_read_reg32(addr); ++} ++ ++/** ++ * This function initializes a descriptor chain for Isochronous transfer ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param ep The EP to start the transfer on. ++ * ++ */ ++ ++void dwc_otg_iso_ep_start_buf_transfer(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) ++{ ++ depctl_data_t depctl = { .d32 = 0 }; ++ volatile uint32_t *addr; ++ ++ ++ if(ep->is_in) { ++ addr = &core_if->dev_if->in_ep_regs[ep->num]->diepctl; ++ } else { ++ addr = &core_if->dev_if->out_ep_regs[ep->num]->doepctl; ++ } ++ ++ ++ if(core_if->dma_enable == 0 || core_if->dma_desc_enable!= 0) { ++ return; ++ } else { ++ deptsiz_data_t deptsiz = { .d32 = 0 }; ++ ++ ep->xfer_len = ep->data_per_frame * ep->buf_proc_intrvl / ep->bInterval; ++ ep->pkt_cnt = (ep->xfer_len - 1 + ep->maxpacket) / ++ ep->maxpacket; ++ ep->xfer_count = 0; ++ ep->xfer_buff = (ep->proc_buf_num) ? ep->xfer_buff1 : ep->xfer_buff0; ++ ep->dma_addr = (ep->proc_buf_num) ? ep->dma_addr1 : ep->dma_addr0; ++ ++ if(ep->is_in) { ++ /* Program the transfer size and packet count ++ * as follows: xfersize = N * maxpacket + ++ * short_packet pktcnt = N + (short_packet ++ * exist ? 1 : 0) ++ */ ++ deptsiz.b.mc = ep->pkt_per_frm; ++ deptsiz.b.xfersize = ep->xfer_len; ++ deptsiz.b.pktcnt = ++ (ep->xfer_len - 1 + ep->maxpacket) / ++ ep->maxpacket; ++ dwc_write_reg32(&core_if->dev_if->in_ep_regs[ep->num]->dieptsiz, deptsiz.d32); ++ ++ /* Write the DMA register */ ++ dwc_write_reg32 (&(core_if->dev_if->in_ep_regs[ep->num]->diepdma), (uint32_t)ep->dma_addr); ++ ++ } else { ++ deptsiz.b.pktcnt = ++ (ep->xfer_len + (ep->maxpacket - 1)) / ++ ep->maxpacket; ++ deptsiz.b.xfersize = deptsiz.b.pktcnt * ep->maxpacket; ++ ++ dwc_write_reg32(&core_if->dev_if->out_ep_regs[ep->num]->doeptsiz, deptsiz.d32); ++ ++ /* Write the DMA register */ ++ dwc_write_reg32 (&(core_if->dev_if->out_ep_regs[ep->num]->doepdma), (uint32_t)ep->dma_addr); ++ ++ } ++ /** Enable endpoint, clear nak */ ++ depctl.d32 = 0; ++ dwc_modify_reg32(addr, depctl.d32,depctl.d32); ++ ++ depctl.b.epena = 1; ++ depctl.b.cnak = 1; ++ ++ dwc_modify_reg32(addr, depctl.d32,depctl.d32); ++ } ++} ++ ++ ++/** ++ * This function does the setup for a data transfer for an EP and ++ * starts the transfer. For an IN transfer, the packets will be ++ * loaded into the appropriate Tx FIFO in the ISR. For OUT transfers, ++ * the packets are unloaded from the Rx FIFO in the ISR. the ISR. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param ep The EP to start the transfer on. ++ */ ++ ++void dwc_otg_iso_ep_start_transfer(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) ++{ ++ if(core_if->dma_enable) { ++ if(core_if->dma_desc_enable) { ++ if(ep->is_in) { ++ ep->desc_cnt = ep->pkt_cnt / ep->pkt_per_frm; ++ } else { ++ ep->desc_cnt = ep->pkt_cnt; ++ } ++ dwc_otg_iso_ep_start_ddma_transfer(core_if, ep); ++ } else { ++ if(core_if->pti_enh_enable) { ++ dwc_otg_iso_ep_start_buf_transfer(core_if, ep); ++ } else { ++ ep->cur_pkt_addr = (ep->proc_buf_num) ? ep->xfer_buff1 : ep->xfer_buff0; ++ ep->cur_pkt_dma_addr = (ep->proc_buf_num) ? ep->dma_addr1 : ep->dma_addr0; ++ dwc_otg_iso_ep_start_frm_transfer(core_if, ep); ++ } ++ } ++ } else { ++ ep->cur_pkt_addr = (ep->proc_buf_num) ? ep->xfer_buff1 : ep->xfer_buff0; ++ ep->cur_pkt_dma_addr = (ep->proc_buf_num) ? ep->dma_addr1 : ep->dma_addr0; ++ dwc_otg_iso_ep_start_frm_transfer(core_if, ep); ++ } ++} ++ ++/** ++ * This function does the setup for a data transfer for an EP and ++ * starts the transfer. For an IN transfer, the packets will be ++ * loaded into the appropriate Tx FIFO in the ISR. For OUT transfers, ++ * the packets are unloaded from the Rx FIFO in the ISR. the ISR. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param ep The EP to start the transfer on. ++ */ ++ ++void dwc_otg_iso_ep_stop_transfer(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) ++{ ++ depctl_data_t depctl = { .d32 = 0 }; ++ volatile uint32_t *addr; ++ ++ if(ep->is_in == 1) { ++ addr = &core_if->dev_if->in_ep_regs[ep->num]->diepctl; ++ } ++ else { ++ addr = &core_if->dev_if->out_ep_regs[ep->num]->doepctl; ++ } ++ ++ /* disable the ep */ ++ depctl.d32 = dwc_read_reg32(addr); ++ ++ depctl.b.epdis = 1; ++ depctl.b.snak = 1; ++ ++ dwc_write_reg32(addr, depctl.d32); ++ ++ if(core_if->dma_desc_enable && ++ ep->iso_desc_addr && ep->iso_dma_desc_addr) { ++ dwc_otg_ep_free_desc_chain(ep->iso_desc_addr,ep->iso_dma_desc_addr,ep->desc_cnt * 2); ++ } ++ ++ /* reset varibales */ ++ ep->dma_addr0 = 0; ++ ep->dma_addr1 = 0; ++ ep->xfer_buff0 = 0; ++ ep->xfer_buff1 = 0; ++ ep->data_per_frame = 0; ++ ep->data_pattern_frame = 0; ++ ep->sync_frame = 0; ++ ep->buf_proc_intrvl = 0; ++ ep->bInterval = 0; ++ ep->proc_buf_num = 0; ++ ep->pkt_per_frm = 0; ++ ep->pkt_per_frm = 0; ++ ep->desc_cnt = 0; ++ ep->iso_desc_addr = 0; ++ ep->iso_dma_desc_addr = 0; ++} ++ ++ ++/** ++ * This function is used to submit an ISOC Transfer Request to an EP. ++ * ++ * - Every time a sync period completes the request's completion callback ++ * is called to provide data to the gadget driver. ++ * - Once submitted the request cannot be modified. ++ * - Each request is turned into periodic data packets untill ISO ++ * Transfer is stopped.. ++ */ ++static int dwc_otg_pcd_iso_ep_start(struct usb_ep *usb_ep, struct usb_iso_request *req, ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) ++ int gfp_flags ++#else ++ gfp_t gfp_flags ++#endif ++) ++{ ++ dwc_otg_pcd_ep_t *ep; ++ dwc_otg_pcd_t *pcd; ++ dwc_ep_t *dwc_ep; ++ unsigned long flags = 0; ++ int32_t frm_data; ++ dwc_otg_core_if_t *core_if; ++ dcfg_data_t dcfg; ++ dsts_data_t dsts; ++ ++ ++ if (!req || !req->process_buffer || !req->buf0 || !req->buf1) { ++ DWC_WARN("%s, bad params\n", __func__); ++ return -EINVAL; ++ } ++ ++ ep = container_of(usb_ep, dwc_otg_pcd_ep_t, ep); ++ ++ if (!usb_ep || !ep->desc || ep->dwc_ep.num == 0) { ++ DWC_WARN("%s, bad ep\n", __func__); ++ return -EINVAL; ++ } ++ ++ pcd = ep->pcd; ++ core_if = GET_CORE_IF(pcd); ++ ++ dcfg.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dcfg); ++ ++ if (!pcd->driver || pcd->gadget.speed == USB_SPEED_UNKNOWN) { ++ DWC_DEBUGPL(DBG_PCDV, "gadget.speed=%d\n", pcd->gadget.speed); ++ DWC_WARN("%s, bogus device state\n", __func__); ++ return -ESHUTDOWN; ++ } ++ ++ SPIN_LOCK_IRQSAVE(&ep->pcd->lock, flags); ++ ++ dwc_ep = &ep->dwc_ep; ++ ++ if(ep->iso_req) { ++ DWC_WARN("%s, iso request in progress\n", __func__); ++ } ++ req->status = -EINPROGRESS; ++ ++ dwc_ep->dma_addr0 = req->dma0; ++ dwc_ep->dma_addr1 = req->dma1; ++ ++ dwc_ep->xfer_buff0 = req->buf0; ++ dwc_ep->xfer_buff1 = req->buf1; ++ ++ ep->iso_req = req; ++ ++ dwc_ep->data_per_frame = req->data_per_frame; ++ ++ /** @todo - pattern data support is to be implemented in the future */ ++ dwc_ep->data_pattern_frame = req->data_pattern_frame; ++ dwc_ep->sync_frame = req->sync_frame; ++ ++ dwc_ep->buf_proc_intrvl = req->buf_proc_intrvl; ++ ++ dwc_ep->bInterval = 1 << (ep->desc->bInterval - 1); ++ ++ dwc_ep->proc_buf_num = 0; ++ ++ dwc_ep->pkt_per_frm = 0; ++ frm_data = ep->dwc_ep.data_per_frame; ++ while(frm_data > 0) { ++ dwc_ep->pkt_per_frm++; ++ frm_data -= ep->dwc_ep.maxpacket; ++ } ++ ++ dsts.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dsts); ++ ++ if(req->flags & USB_REQ_ISO_ASAP) { ++ dwc_ep->next_frame = dsts.b.soffn + 1; ++ if(dwc_ep->bInterval != 1){ ++ dwc_ep->next_frame = dwc_ep->next_frame + (dwc_ep->bInterval - 1 - dwc_ep->next_frame % dwc_ep->bInterval); ++ } ++ } else { ++ dwc_ep->next_frame = req->start_frame; ++ } ++ ++ ++ if(!core_if->pti_enh_enable) { ++ dwc_ep->pkt_cnt = dwc_ep->buf_proc_intrvl * dwc_ep->pkt_per_frm / dwc_ep->bInterval; ++ } else { ++ dwc_ep->pkt_cnt = ++ (dwc_ep->data_per_frame * (dwc_ep->buf_proc_intrvl / dwc_ep->bInterval) ++ - 1 + dwc_ep->maxpacket) / dwc_ep->maxpacket; ++ } ++ ++ if(core_if->dma_desc_enable) { ++ dwc_ep->desc_cnt = ++ dwc_ep->buf_proc_intrvl * dwc_ep->pkt_per_frm / dwc_ep->bInterval; ++ } ++ ++ dwc_ep->pkt_info = kmalloc(sizeof(iso_pkt_info_t) * dwc_ep->pkt_cnt, GFP_KERNEL); ++ if(!dwc_ep->pkt_info) { ++ return -ENOMEM; ++ } ++ if(core_if->pti_enh_enable) { ++ memset(dwc_ep->pkt_info, 0, sizeof(iso_pkt_info_t) * dwc_ep->pkt_cnt); ++ } ++ ++ dwc_ep->cur_pkt = 0; ++ ++ SPIN_UNLOCK_IRQRESTORE(&pcd->lock, flags); ++ ++ dwc_otg_iso_ep_start_transfer(core_if, dwc_ep); ++ ++ return 0; ++} ++ ++/** ++ * This function stops ISO EP Periodic Data Transfer. ++ */ ++static int dwc_otg_pcd_iso_ep_stop(struct usb_ep *usb_ep, struct usb_iso_request *req) ++{ ++ dwc_otg_pcd_ep_t *ep; ++ dwc_otg_pcd_t *pcd; ++ dwc_ep_t *dwc_ep; ++ unsigned long flags; ++ ++ ep = container_of(usb_ep, dwc_otg_pcd_ep_t, ep); ++ ++ if (!usb_ep || !ep->desc || ep->dwc_ep.num == 0) { ++ DWC_WARN("%s, bad ep\n", __func__); ++ return -EINVAL; ++ } ++ ++ pcd = ep->pcd; ++ ++ if (!pcd->driver || pcd->gadget.speed == USB_SPEED_UNKNOWN) { ++ DWC_DEBUGPL(DBG_PCDV, "gadget.speed=%d\n", pcd->gadget.speed); ++ DWC_WARN("%s, bogus device state\n", __func__); ++ return -ESHUTDOWN; ++ } ++ ++ dwc_ep = &ep->dwc_ep; ++ ++ dwc_otg_iso_ep_stop_transfer(GET_CORE_IF(pcd), dwc_ep); ++ ++ kfree(dwc_ep->pkt_info); ++ ++ SPIN_LOCK_IRQSAVE(&pcd->lock, flags); ++ ++ if(ep->iso_req != req) { ++ return -EINVAL; ++ } ++ ++ req->status = -ECONNRESET; ++ ++ SPIN_UNLOCK_IRQRESTORE(&pcd->lock, flags); ++ ++ ++ ep->iso_req = 0; ++ ++ return 0; ++} ++ ++/** ++ * This function is used for perodical data exchnage between PCD and gadget drivers. ++ * for Isochronous EPs ++ * ++ * - Every time a sync period completes this function is called to ++ * perform data exchange between PCD and gadget ++ */ ++void dwc_otg_iso_buffer_done(dwc_otg_pcd_ep_t *ep, dwc_otg_pcd_iso_request_t *req) ++{ ++ int i; ++ struct usb_gadget_iso_packet_descriptor *iso_packet; ++ dwc_ep_t *dwc_ep; ++ ++ dwc_ep = &ep->dwc_ep; ++ ++ if(ep->iso_req->status == -ECONNRESET) { ++ DWC_PRINT("Device has already disconnected\n"); ++ /*Device has been disconnected*/ ++ return; ++ } ++ ++ if(dwc_ep->proc_buf_num != 0) { ++ iso_packet = ep->iso_req->iso_packet_desc0; ++ } ++ ++ else { ++ iso_packet = ep->iso_req->iso_packet_desc1; ++ } ++ ++ /* Fill in ISOC packets descriptors & pass to gadget driver*/ ++ ++ for(i = 0; i < dwc_ep->pkt_cnt; ++i) { ++ iso_packet[i].status = dwc_ep->pkt_info[i].status; ++ iso_packet[i].offset = dwc_ep->pkt_info[i].offset; ++ iso_packet[i].actual_length = dwc_ep->pkt_info[i].length; ++ dwc_ep->pkt_info[i].status = 0; ++ dwc_ep->pkt_info[i].offset = 0; ++ dwc_ep->pkt_info[i].length = 0; ++ } ++ ++ /* Call callback function to process data buffer */ ++ ep->iso_req->status = 0;/* success */ ++ ++ SPIN_UNLOCK(&ep->pcd->lock); ++ ep->iso_req->process_buffer(&ep->ep, ep->iso_req); ++ SPIN_LOCK(&ep->pcd->lock); ++} ++ ++ ++static struct usb_iso_request *dwc_otg_pcd_alloc_iso_request(struct usb_ep *ep,int packets, ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) ++ int gfp_flags ++#else ++ gfp_t gfp_flags ++#endif ++) ++{ ++ struct usb_iso_request *pReq = NULL; ++ uint32_t req_size; ++ ++ ++ req_size = sizeof(struct usb_iso_request); ++ req_size += (2 * packets * (sizeof(struct usb_gadget_iso_packet_descriptor))); ++ ++ ++ pReq = kmalloc(req_size, gfp_flags); ++ if (!pReq) { ++ DWC_WARN("%s, can't allocate Iso Request\n", __func__); ++ return 0; ++ } ++ pReq->iso_packet_desc0 = (void*) (pReq + 1); ++ ++ pReq->iso_packet_desc1 = pReq->iso_packet_desc0 + packets; ++ ++ return pReq; ++} ++ ++static void dwc_otg_pcd_free_iso_request(struct usb_ep *ep, struct usb_iso_request *req) ++{ ++ kfree(req); ++} ++ ++static struct usb_isoc_ep_ops dwc_otg_pcd_ep_ops = ++{ ++ .ep_ops = ++ { ++ .enable = dwc_otg_pcd_ep_enable, ++ .disable = dwc_otg_pcd_ep_disable, ++ ++ .alloc_request = dwc_otg_pcd_alloc_request, ++ .free_request = dwc_otg_pcd_free_request, ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) ++ .alloc_buffer = dwc_otg_pcd_alloc_buffer, ++ .free_buffer = dwc_otg_pcd_free_buffer, ++#endif ++ ++ .queue = dwc_otg_pcd_ep_queue, ++ .dequeue = dwc_otg_pcd_ep_dequeue, ++ ++ .set_halt = dwc_otg_pcd_ep_set_halt, ++ .fifo_status = 0, ++ .fifo_flush = 0, ++ }, ++ .iso_ep_start = dwc_otg_pcd_iso_ep_start, ++ .iso_ep_stop = dwc_otg_pcd_iso_ep_stop, ++ .alloc_iso_request = dwc_otg_pcd_alloc_iso_request, ++ .free_iso_request = dwc_otg_pcd_free_iso_request, ++}; ++ ++#else ++ ++ ++static struct usb_ep_ops dwc_otg_pcd_ep_ops = ++{ ++ .enable = dwc_otg_pcd_ep_enable, ++ .disable = dwc_otg_pcd_ep_disable, ++ ++ .alloc_request = dwc_otg_pcd_alloc_request, ++ .free_request = dwc_otg_pcd_free_request, ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) ++ .alloc_buffer = dwc_otg_pcd_alloc_buffer, ++ .free_buffer = dwc_otg_pcd_free_buffer, ++#endif ++ ++ .queue = dwc_otg_pcd_ep_queue, ++ .dequeue = dwc_otg_pcd_ep_dequeue, ++ ++ .set_halt = dwc_otg_pcd_ep_set_halt, ++ .fifo_status = 0, ++ .fifo_flush = 0, ++ ++ ++}; ++ ++#endif /* DWC_EN_ISOC */ ++/* Gadget Operations */ ++/** ++ * The following gadget operations will be implemented in the DWC_otg ++ * PCD. Functions in the API that are not described below are not ++ * implemented. ++ * ++ * The Gadget API provides wrapper functions for each of the function ++ * pointers defined in usb_gadget_ops. The Gadget Driver calls the ++ * wrapper function, which then calls the underlying PCD function. The ++ * following sections are named according to the wrapper functions ++ * (except for ioctl, which doesn't have a wrapper function). Within ++ * each section, the corresponding DWC_otg PCD function name is ++ * specified. ++ * ++ */ ++ ++/** ++ *Gets the USB Frame number of the last SOF. ++ */ ++static int dwc_otg_pcd_get_frame(struct usb_gadget *gadget) ++{ ++ dwc_otg_pcd_t *pcd; ++ ++ DWC_DEBUGPL(DBG_PCDV,"%s(%p)\n", __func__, gadget); ++ ++ if (gadget == 0) { ++ return -ENODEV; ++ } ++ else { ++ pcd = container_of(gadget, dwc_otg_pcd_t, gadget); ++ dwc_otg_get_frame_number(GET_CORE_IF(pcd)); ++ } ++ ++ return 0; ++} ++ ++void dwc_otg_pcd_initiate_srp(dwc_otg_pcd_t *pcd) ++{ ++ uint32_t *addr = (uint32_t *)&(GET_CORE_IF(pcd)->core_global_regs->gotgctl); ++ gotgctl_data_t mem; ++ gotgctl_data_t val; ++ ++ val.d32 = dwc_read_reg32(addr); ++ if (val.b.sesreq) { ++ DWC_ERROR("Session Request Already active!\n"); ++ return; ++ } ++ ++ DWC_NOTICE("Session Request Initated\n"); ++ mem.d32 = dwc_read_reg32(addr); ++ mem.b.sesreq = 1; ++ dwc_write_reg32(addr, mem.d32); ++ ++ /* Start the SRP timer */ ++ dwc_otg_pcd_start_srp_timer(pcd); ++ return; ++} ++ ++void dwc_otg_pcd_remote_wakeup(dwc_otg_pcd_t *pcd, int set) ++{ ++ dctl_data_t dctl = {.d32=0}; ++ volatile uint32_t *addr = &(GET_CORE_IF(pcd)->dev_if->dev_global_regs->dctl); ++ ++ if (dwc_otg_is_device_mode(GET_CORE_IF(pcd))) { ++ if (pcd->remote_wakeup_enable) { ++ if (set) { ++ dctl.b.rmtwkupsig = 1; ++ dwc_modify_reg32(addr, 0, dctl.d32); ++ DWC_DEBUGPL(DBG_PCD, "Set Remote Wakeup\n"); ++ mdelay(1); ++ dwc_modify_reg32(addr, dctl.d32, 0); ++ DWC_DEBUGPL(DBG_PCD, "Clear Remote Wakeup\n"); ++ } ++ else { ++ } ++ } ++ else { ++ DWC_DEBUGPL(DBG_PCD, "Remote Wakeup is disabled\n"); ++ } ++ } ++ return; ++} ++ ++/** ++ * Initiates Session Request Protocol (SRP) to wakeup the host if no ++ * session is in progress. If a session is already in progress, but ++ * the device is suspended, remote wakeup signaling is started. ++ * ++ */ ++static int dwc_otg_pcd_wakeup(struct usb_gadget *gadget) ++{ ++ unsigned long flags; ++ dwc_otg_pcd_t *pcd; ++ dsts_data_t dsts; ++ gotgctl_data_t gotgctl; ++ ++ DWC_DEBUGPL(DBG_PCDV,"%s(%p)\n", __func__, gadget); ++ ++ if (gadget == 0) { ++ return -ENODEV; ++ } ++ else { ++ pcd = container_of(gadget, dwc_otg_pcd_t, gadget); ++ } ++ SPIN_LOCK_IRQSAVE(&pcd->lock, flags); ++ ++ /* ++ * This function starts the Protocol if no session is in progress. If ++ * a session is already in progress, but the device is suspended, ++ * remote wakeup signaling is started. ++ */ ++ ++ /* Check if valid session */ ++ gotgctl.d32 = dwc_read_reg32(&(GET_CORE_IF(pcd)->core_global_regs->gotgctl)); ++ if (gotgctl.b.bsesvld) { ++ /* Check if suspend state */ ++ dsts.d32 = dwc_read_reg32(&(GET_CORE_IF(pcd)->dev_if->dev_global_regs->dsts)); ++ if (dsts.b.suspsts) { ++ dwc_otg_pcd_remote_wakeup(pcd, 1); ++ } ++ } ++ else { ++ dwc_otg_pcd_initiate_srp(pcd); ++ } ++ ++ SPIN_UNLOCK_IRQRESTORE(&pcd->lock, flags); ++ return 0; ++} ++ ++static const struct usb_gadget_ops dwc_otg_pcd_ops = ++{ ++ .get_frame = dwc_otg_pcd_get_frame, ++ .wakeup = dwc_otg_pcd_wakeup, ++ // current versions must always be self-powered ++}; ++ ++/** ++ * This function updates the otg values in the gadget structure. ++ */ ++void dwc_otg_pcd_update_otg(dwc_otg_pcd_t *pcd, const unsigned reset) ++{ ++ ++ if (!pcd->gadget.is_otg) ++ return; ++ ++ if (reset) { ++ pcd->b_hnp_enable = 0; ++ pcd->a_hnp_support = 0; ++ pcd->a_alt_hnp_support = 0; ++ } ++ ++ pcd->gadget.b_hnp_enable = pcd->b_hnp_enable; ++ pcd->gadget.a_hnp_support = pcd->a_hnp_support; ++ pcd->gadget.a_alt_hnp_support = pcd->a_alt_hnp_support; ++} ++ ++/** ++ * This function is the top level PCD interrupt handler. ++ */ ++static irqreturn_t dwc_otg_pcd_irq(int irq, void *dev ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) ++ , struct pt_regs *r ++#endif ++ ) ++{ ++ dwc_otg_pcd_t *pcd = dev; ++ int32_t retval = IRQ_NONE; ++ ++ retval = dwc_otg_pcd_handle_intr(pcd); ++ return IRQ_RETVAL(retval); ++} ++ ++/** ++ * PCD Callback function for initializing the PCD when switching to ++ * device mode. ++ * ++ * @param p void pointer to the dwc_otg_pcd_t ++ */ ++static int32_t dwc_otg_pcd_start_cb(void *p) ++{ ++ dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *)p; ++ ++ /* ++ * Initialized the Core for Device mode. ++ */ ++ if (dwc_otg_is_device_mode(GET_CORE_IF(pcd))) { ++ dwc_otg_core_dev_init(GET_CORE_IF(pcd)); ++ } ++ return 1; ++} ++ ++/** ++ * PCD Callback function for stopping the PCD when switching to Host ++ * mode. ++ * ++ * @param p void pointer to the dwc_otg_pcd_t ++ */ ++static int32_t dwc_otg_pcd_stop_cb(void *p) ++{ ++ dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *)p; ++ extern void dwc_otg_pcd_stop(dwc_otg_pcd_t *_pcd); ++ ++ dwc_otg_pcd_stop(pcd); ++ return 1; ++} ++ ++ ++/** ++ * PCD Callback function for notifying the PCD when resuming from ++ * suspend. ++ * ++ * @param p void pointer to the dwc_otg_pcd_t ++ */ ++static int32_t dwc_otg_pcd_suspend_cb(void *p) ++{ ++ dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *)p; ++ ++ if (pcd->driver && pcd->driver->resume) { ++ SPIN_UNLOCK(&pcd->lock); ++ pcd->driver->suspend(&pcd->gadget); ++ SPIN_LOCK(&pcd->lock); ++ } ++ ++ return 1; ++} ++ ++ ++/** ++ * PCD Callback function for notifying the PCD when resuming from ++ * suspend. ++ * ++ * @param p void pointer to the dwc_otg_pcd_t ++ */ ++static int32_t dwc_otg_pcd_resume_cb(void *p) ++{ ++ dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *)p; ++ ++ if (pcd->driver && pcd->driver->resume) { ++ SPIN_UNLOCK(&pcd->lock); ++ pcd->driver->resume(&pcd->gadget); ++ SPIN_LOCK(&pcd->lock); ++ } ++ ++ /* Stop the SRP timeout timer. */ ++ if ((GET_CORE_IF(pcd)->core_params->phy_type != DWC_PHY_TYPE_PARAM_FS) || ++ (!GET_CORE_IF(pcd)->core_params->i2c_enable)) { ++ if (GET_CORE_IF(pcd)->srp_timer_started) { ++ GET_CORE_IF(pcd)->srp_timer_started = 0; ++ del_timer(&pcd->srp_timer); ++ } ++ } ++ return 1; ++} ++ ++ ++/** ++ * PCD Callback structure for handling mode switching. ++ */ ++static dwc_otg_cil_callbacks_t pcd_callbacks = ++{ ++ .start = dwc_otg_pcd_start_cb, ++ .stop = dwc_otg_pcd_stop_cb, ++ .suspend = dwc_otg_pcd_suspend_cb, ++ .resume_wakeup = dwc_otg_pcd_resume_cb, ++ .p = 0, /* Set at registration */ ++}; ++ ++/** ++ * This function is called when the SRP timer expires. The SRP should ++ * complete within 6 seconds. ++ */ ++static void srp_timeout(unsigned long ptr) ++{ ++ gotgctl_data_t gotgctl; ++ dwc_otg_core_if_t *core_if = (dwc_otg_core_if_t *)ptr; ++ volatile uint32_t *addr = &core_if->core_global_regs->gotgctl; ++ ++ gotgctl.d32 = dwc_read_reg32(addr); ++ ++ core_if->srp_timer_started = 0; ++ ++ if ((core_if->core_params->phy_type == DWC_PHY_TYPE_PARAM_FS) && ++ (core_if->core_params->i2c_enable)) { ++ DWC_PRINT("SRP Timeout\n"); ++ ++ if ((core_if->srp_success) && ++ (gotgctl.b.bsesvld)) { ++ if (core_if->pcd_cb && core_if->pcd_cb->resume_wakeup) { ++ core_if->pcd_cb->resume_wakeup(core_if->pcd_cb->p); ++ } ++ ++ /* Clear Session Request */ ++ gotgctl.d32 = 0; ++ gotgctl.b.sesreq = 1; ++ dwc_modify_reg32(&core_if->core_global_regs->gotgctl, ++ gotgctl.d32, 0); ++ ++ core_if->srp_success = 0; ++ } ++ else { ++ DWC_ERROR("Device not connected/responding\n"); ++ gotgctl.b.sesreq = 0; ++ dwc_write_reg32(addr, gotgctl.d32); ++ } ++ } ++ else if (gotgctl.b.sesreq) { ++ DWC_PRINT("SRP Timeout\n"); ++ ++ DWC_ERROR("Device not connected/responding\n"); ++ gotgctl.b.sesreq = 0; ++ dwc_write_reg32(addr, gotgctl.d32); ++ } ++ else { ++ DWC_PRINT(" SRP GOTGCTL=%0x\n", gotgctl.d32); ++ } ++} ++ ++/** ++ * Start the SRP timer to detect when the SRP does not complete within ++ * 6 seconds. ++ * ++ * @param pcd the pcd structure. ++ */ ++void dwc_otg_pcd_start_srp_timer(dwc_otg_pcd_t *pcd) ++{ ++ struct timer_list *srp_timer = &pcd->srp_timer; ++ GET_CORE_IF(pcd)->srp_timer_started = 1; ++ init_timer(srp_timer); ++ srp_timer->function = srp_timeout; ++ srp_timer->data = (unsigned long)GET_CORE_IF(pcd); ++ srp_timer->expires = jiffies + (HZ*6); ++ add_timer(srp_timer); ++} ++ ++/** ++ * Tasklet ++ * ++ */ ++extern void start_next_request(dwc_otg_pcd_ep_t *ep); ++ ++static void start_xfer_tasklet_func (unsigned long data) ++{ ++ dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t*)data; ++ dwc_otg_core_if_t *core_if = pcd->otg_dev->core_if; ++ ++ int i; ++ depctl_data_t diepctl; ++ ++ DWC_DEBUGPL(DBG_PCDV, "Start xfer tasklet\n"); ++ ++ diepctl.d32 = dwc_read_reg32(&core_if->dev_if->in_ep_regs[0]->diepctl); ++ ++ if (pcd->ep0.queue_sof) { ++ pcd->ep0.queue_sof = 0; ++ start_next_request (&pcd->ep0); ++ // break; ++ } ++ ++ for (i=0; idev_if->num_in_eps; i++) ++ { ++ depctl_data_t diepctl; ++ diepctl.d32 = dwc_read_reg32(&core_if->dev_if->in_ep_regs[i]->diepctl); ++ ++ if (pcd->in_ep[i].queue_sof) { ++ pcd->in_ep[i].queue_sof = 0; ++ start_next_request (&pcd->in_ep[i]); ++ // break; ++ } ++ } ++ ++ return; ++} ++ ++ ++ ++ ++ ++ ++ ++static struct tasklet_struct start_xfer_tasklet = { ++ .next = NULL, ++ .state = 0, ++ .count = ATOMIC_INIT(0), ++ .func = start_xfer_tasklet_func, ++ .data = 0, ++}; ++/** ++ * This function initialized the pcd Dp structures to there default ++ * state. ++ * ++ * @param pcd the pcd structure. ++ */ ++void dwc_otg_pcd_reinit(dwc_otg_pcd_t *pcd) ++{ ++ static const char * names[] = ++ { ++ ++ "ep0", ++ "ep1in", ++ "ep2in", ++ "ep3in", ++ "ep4in", ++ "ep5in", ++ "ep6in", ++ "ep7in", ++ "ep8in", ++ "ep9in", ++ "ep10in", ++ "ep11in", ++ "ep12in", ++ "ep13in", ++ "ep14in", ++ "ep15in", ++ "ep1out", ++ "ep2out", ++ "ep3out", ++ "ep4out", ++ "ep5out", ++ "ep6out", ++ "ep7out", ++ "ep8out", ++ "ep9out", ++ "ep10out", ++ "ep11out", ++ "ep12out", ++ "ep13out", ++ "ep14out", ++ "ep15out" ++ ++ }; ++ ++ int i; ++ int in_ep_cntr, out_ep_cntr; ++ uint32_t hwcfg1; ++ uint32_t num_in_eps = (GET_CORE_IF(pcd))->dev_if->num_in_eps; ++ uint32_t num_out_eps = (GET_CORE_IF(pcd))->dev_if->num_out_eps; ++ dwc_otg_pcd_ep_t *ep; ++ ++ DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, pcd); ++ ++ INIT_LIST_HEAD (&pcd->gadget.ep_list); ++ pcd->gadget.ep0 = &pcd->ep0.ep; ++ pcd->gadget.speed = USB_SPEED_UNKNOWN; ++ ++ INIT_LIST_HEAD (&pcd->gadget.ep0->ep_list); ++ ++ /** ++ * Initialize the EP0 structure. ++ */ ++ ep = &pcd->ep0; ++ ++ /* Init EP structure */ ++ ep->desc = 0; ++ ep->pcd = pcd; ++ ep->stopped = 1; ++ ++ /* Init DWC ep structure */ ++ ep->dwc_ep.num = 0; ++ ep->dwc_ep.active = 0; ++ ep->dwc_ep.tx_fifo_num = 0; ++ /* Control until ep is actvated */ ++ ep->dwc_ep.type = DWC_OTG_EP_TYPE_CONTROL; ++ ep->dwc_ep.maxpacket = MAX_PACKET_SIZE; ++ ep->dwc_ep.dma_addr = 0; ++ ep->dwc_ep.start_xfer_buff = 0; ++ ep->dwc_ep.xfer_buff = 0; ++ ep->dwc_ep.xfer_len = 0; ++ ep->dwc_ep.xfer_count = 0; ++ ep->dwc_ep.sent_zlp = 0; ++ ep->dwc_ep.total_len = 0; ++ ep->queue_sof = 0; ++ ep->dwc_ep.desc_addr = 0; ++ ep->dwc_ep.dma_desc_addr = 0; ++ ++ ++ /* Init the usb_ep structure. */ ++ ep->ep.name = names[0]; ++ ep->ep.ops = (struct usb_ep_ops*)&dwc_otg_pcd_ep_ops; ++ ++ /** ++ * @todo NGS: What should the max packet size be set to ++ * here? Before EP type is set? ++ */ ++ ep->ep.maxpacket = MAX_PACKET_SIZE; ++ ++ list_add_tail (&ep->ep.ep_list, &pcd->gadget.ep_list); ++ ++ INIT_LIST_HEAD (&ep->queue); ++ /** ++ * Initialize the EP structures. ++ */ ++ in_ep_cntr = 0; ++ hwcfg1 = (GET_CORE_IF(pcd))->hwcfg1.d32 >> 3; ++ ++ for (i = 1; in_ep_cntr < num_in_eps; i++) ++ { ++ if((hwcfg1 & 0x1) == 0) { ++ dwc_otg_pcd_ep_t *ep = &pcd->in_ep[in_ep_cntr]; ++ in_ep_cntr ++; ++ ++ /* Init EP structure */ ++ ep->desc = 0; ++ ep->pcd = pcd; ++ ep->stopped = 1; ++ ++ /* Init DWC ep structure */ ++ ep->dwc_ep.is_in = 1; ++ ep->dwc_ep.num = i; ++ ep->dwc_ep.active = 0; ++ ep->dwc_ep.tx_fifo_num = 0; ++ ++ /* Control until ep is actvated */ ++ ep->dwc_ep.type = DWC_OTG_EP_TYPE_CONTROL; ++ ep->dwc_ep.maxpacket = MAX_PACKET_SIZE; ++ ep->dwc_ep.dma_addr = 0; ++ ep->dwc_ep.start_xfer_buff = 0; ++ ep->dwc_ep.xfer_buff = 0; ++ ep->dwc_ep.xfer_len = 0; ++ ep->dwc_ep.xfer_count = 0; ++ ep->dwc_ep.sent_zlp = 0; ++ ep->dwc_ep.total_len = 0; ++ ep->queue_sof = 0; ++ ep->dwc_ep.desc_addr = 0; ++ ep->dwc_ep.dma_desc_addr = 0; ++ ++ /* Init the usb_ep structure. */ ++ ep->ep.name = names[i]; ++ ep->ep.ops = (struct usb_ep_ops*)&dwc_otg_pcd_ep_ops; ++ ++ /** ++ * @todo NGS: What should the max packet size be set to ++ * here? Before EP type is set? ++ */ ++ ep->ep.maxpacket = MAX_PACKET_SIZE; ++ ++ list_add_tail (&ep->ep.ep_list, &pcd->gadget.ep_list); ++ ++ INIT_LIST_HEAD (&ep->queue); ++ } ++ hwcfg1 >>= 2; ++ } ++ ++ out_ep_cntr = 0; ++ hwcfg1 = (GET_CORE_IF(pcd))->hwcfg1.d32 >> 2; ++ ++ for (i = 1; out_ep_cntr < num_out_eps; i++) ++ { ++ if((hwcfg1 & 0x1) == 0) { ++ dwc_otg_pcd_ep_t *ep = &pcd->out_ep[out_ep_cntr]; ++ out_ep_cntr++; ++ ++ /* Init EP structure */ ++ ep->desc = 0; ++ ep->pcd = pcd; ++ ep->stopped = 1; ++ ++ /* Init DWC ep structure */ ++ ep->dwc_ep.is_in = 0; ++ ep->dwc_ep.num = i; ++ ep->dwc_ep.active = 0; ++ ep->dwc_ep.tx_fifo_num = 0; ++ /* Control until ep is actvated */ ++ ep->dwc_ep.type = DWC_OTG_EP_TYPE_CONTROL; ++ ep->dwc_ep.maxpacket = MAX_PACKET_SIZE; ++ ep->dwc_ep.dma_addr = 0; ++ ep->dwc_ep.start_xfer_buff = 0; ++ ep->dwc_ep.xfer_buff = 0; ++ ep->dwc_ep.xfer_len = 0; ++ ep->dwc_ep.xfer_count = 0; ++ ep->dwc_ep.sent_zlp = 0; ++ ep->dwc_ep.total_len = 0; ++ ep->queue_sof = 0; ++ ++ /* Init the usb_ep structure. */ ++ ep->ep.name = names[15 + i]; ++ ep->ep.ops = (struct usb_ep_ops*)&dwc_otg_pcd_ep_ops; ++ /** ++ * @todo NGS: What should the max packet size be set to ++ * here? Before EP type is set? ++ */ ++ ep->ep.maxpacket = MAX_PACKET_SIZE; ++ ++ list_add_tail (&ep->ep.ep_list, &pcd->gadget.ep_list); ++ ++ INIT_LIST_HEAD (&ep->queue); ++ } ++ hwcfg1 >>= 2; ++ } ++ ++ /* remove ep0 from the list. There is a ep0 pointer.*/ ++ list_del_init (&pcd->ep0.ep.ep_list); ++ ++ pcd->ep0state = EP0_DISCONNECT; ++ pcd->ep0.ep.maxpacket = MAX_EP0_SIZE; ++ pcd->ep0.dwc_ep.maxpacket = MAX_EP0_SIZE; ++ pcd->ep0.dwc_ep.type = DWC_OTG_EP_TYPE_CONTROL; ++} ++ ++/** ++ * This function releases the Gadget device. ++ * required by device_unregister(). ++ * ++ * @todo Should this do something? Should it free the PCD? ++ */ ++static void dwc_otg_pcd_gadget_release(struct device *dev) ++{ ++ DWC_DEBUGPL(DBG_PCDV,"%s(%p)\n", __func__, dev); ++} ++ ++ ++ ++/** ++ * This function initialized the PCD portion of the driver. ++ * ++ */ ++ ++int dwc_otg_pcd_init(struct device *dev) ++{ ++ static char pcd_name[] = "dwc_otg_pcd"; ++ dwc_otg_pcd_t *pcd; ++ dwc_otg_core_if_t* core_if; ++ dwc_otg_dev_if_t* dev_if; ++ dwc_otg_device_t *otg_dev = dev_get_drvdata(dev); ++ int retval = 0; ++ ++ ++ DWC_DEBUGPL(DBG_PCDV,"%s(%p)\n",__func__, dev); ++ /* ++ * Allocate PCD structure ++ */ ++ pcd = kmalloc(sizeof(dwc_otg_pcd_t), GFP_KERNEL); ++ ++ if (pcd == 0) { ++ return -ENOMEM; ++ } ++ ++ memset(pcd, 0, sizeof(dwc_otg_pcd_t)); ++ spin_lock_init(&pcd->lock); ++ ++ otg_dev->pcd = pcd; ++ s_pcd = pcd; ++ pcd->gadget.name = pcd_name; ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30) ++ strcpy(pcd->gadget.dev.bus_id, "gadget"); ++#else ++ dev_set_name(&pcd->gadget.dev, "%s", "gadget"); ++#endif ++ ++ pcd->otg_dev = dev_get_drvdata(dev); ++ ++ pcd->gadget.dev.parent = dev; ++ pcd->gadget.dev.release = dwc_otg_pcd_gadget_release; ++ pcd->gadget.ops = &dwc_otg_pcd_ops; ++ ++ core_if = GET_CORE_IF(pcd); ++ dev_if = core_if->dev_if; ++ ++ if(core_if->hwcfg4.b.ded_fifo_en) { ++ DWC_PRINT("Dedicated Tx FIFOs mode\n"); ++ } ++ else { ++ DWC_PRINT("Shared Tx FIFO mode\n"); ++ } ++ ++ /* If the module is set to FS or if the PHY_TYPE is FS then the gadget ++ * should not report as dual-speed capable. replace the following line ++ * with the block of code below it once the software is debugged for ++ * this. If is_dualspeed = 0 then the gadget driver should not report ++ * a device qualifier descriptor when queried. */ ++ if ((GET_CORE_IF(pcd)->core_params->speed == DWC_SPEED_PARAM_FULL) || ++ ((GET_CORE_IF(pcd)->hwcfg2.b.hs_phy_type == 2) && ++ (GET_CORE_IF(pcd)->hwcfg2.b.fs_phy_type == 1) && ++ (GET_CORE_IF(pcd)->core_params->ulpi_fs_ls))) { ++ pcd->gadget.is_dualspeed = 0; ++ } ++ else { ++ pcd->gadget.is_dualspeed = 1; ++ } ++ ++ if ((otg_dev->core_if->hwcfg2.b.op_mode == DWC_HWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE) || ++ (otg_dev->core_if->hwcfg2.b.op_mode == DWC_HWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST) || ++ (otg_dev->core_if->hwcfg2.b.op_mode == DWC_HWCFG2_OP_MODE_SRP_CAPABLE_DEVICE) || ++ (otg_dev->core_if->hwcfg2.b.op_mode == DWC_HWCFG2_OP_MODE_SRP_CAPABLE_HOST)) { ++ pcd->gadget.is_otg = 0; ++ } ++ else { ++ pcd->gadget.is_otg = 1; ++ } ++ ++ ++ pcd->driver = 0; ++ /* Register the gadget device */ ++ retval = device_register(&pcd->gadget.dev); ++ if (retval != 0) { ++ kfree (pcd); ++ return retval; ++ } ++ ++ ++ /* ++ * Initialized the Core for Device mode. ++ */ ++ if (dwc_otg_is_device_mode(core_if)) { ++ dwc_otg_core_dev_init(core_if); ++ } ++ ++ /* ++ * Initialize EP structures ++ */ ++ dwc_otg_pcd_reinit(pcd); ++ ++ /* ++ * Register the PCD Callbacks. ++ */ ++ dwc_otg_cil_register_pcd_callbacks(otg_dev->core_if, &pcd_callbacks, ++ pcd); ++ /* ++ * Setup interupt handler ++ */ ++ DWC_DEBUGPL(DBG_ANY, "registering handler for irq%d\n", otg_dev->irq); ++ retval = request_irq(otg_dev->irq, dwc_otg_pcd_irq, ++ IRQF_SHARED, pcd->gadget.name, pcd); ++ if (retval != 0) { ++ DWC_ERROR("request of irq%d failed\n", otg_dev->irq); ++ device_unregister(&pcd->gadget.dev); ++ kfree (pcd); ++ return -EBUSY; ++ } ++ ++ /* ++ * Initialize the DMA buffer for SETUP packets ++ */ ++ if (GET_CORE_IF(pcd)->dma_enable) { ++ pcd->setup_pkt = dma_alloc_coherent (NULL, sizeof (*pcd->setup_pkt) * 5, &pcd->setup_pkt_dma_handle, 0); ++ if (pcd->setup_pkt == 0) { ++ free_irq(otg_dev->irq, pcd); ++ device_unregister(&pcd->gadget.dev); ++ kfree (pcd); ++ return -ENOMEM; ++ } ++ ++ pcd->status_buf = dma_alloc_coherent (NULL, sizeof (uint16_t), &pcd->status_buf_dma_handle, 0); ++ if (pcd->status_buf == 0) { ++ dma_free_coherent(NULL, sizeof(*pcd->setup_pkt), pcd->setup_pkt, pcd->setup_pkt_dma_handle); ++ free_irq(otg_dev->irq, pcd); ++ device_unregister(&pcd->gadget.dev); ++ kfree (pcd); ++ return -ENOMEM; ++ } ++ ++ if (GET_CORE_IF(pcd)->dma_desc_enable) { ++ dev_if->setup_desc_addr[0] = dwc_otg_ep_alloc_desc_chain(&dev_if->dma_setup_desc_addr[0], 1); ++ dev_if->setup_desc_addr[1] = dwc_otg_ep_alloc_desc_chain(&dev_if->dma_setup_desc_addr[1], 1); ++ dev_if->in_desc_addr = dwc_otg_ep_alloc_desc_chain(&dev_if->dma_in_desc_addr, 1); ++ dev_if->out_desc_addr = dwc_otg_ep_alloc_desc_chain(&dev_if->dma_out_desc_addr, 1); ++ ++ if(dev_if->setup_desc_addr[0] == 0 ++ || dev_if->setup_desc_addr[1] == 0 ++ || dev_if->in_desc_addr == 0 ++ || dev_if->out_desc_addr == 0 ) { ++ ++ if(dev_if->out_desc_addr) ++ dwc_otg_ep_free_desc_chain(dev_if->out_desc_addr, dev_if->dma_out_desc_addr, 1); ++ if(dev_if->in_desc_addr) ++ dwc_otg_ep_free_desc_chain(dev_if->in_desc_addr, dev_if->dma_in_desc_addr, 1); ++ if(dev_if->setup_desc_addr[1]) ++ dwc_otg_ep_free_desc_chain(dev_if->setup_desc_addr[1], dev_if->dma_setup_desc_addr[1], 1); ++ if(dev_if->setup_desc_addr[0]) ++ dwc_otg_ep_free_desc_chain(dev_if->setup_desc_addr[0], dev_if->dma_setup_desc_addr[0], 1); ++ ++ ++ dma_free_coherent(NULL, sizeof(*pcd->status_buf), pcd->status_buf, pcd->setup_pkt_dma_handle); ++ dma_free_coherent(NULL, sizeof(*pcd->setup_pkt), pcd->setup_pkt, pcd->setup_pkt_dma_handle); ++ ++ free_irq(otg_dev->irq, pcd); ++ device_unregister(&pcd->gadget.dev); ++ kfree (pcd); ++ ++ return -ENOMEM; ++ } ++ } ++ } ++ else { ++ pcd->setup_pkt = kmalloc (sizeof (*pcd->setup_pkt) * 5, GFP_KERNEL); ++ if (pcd->setup_pkt == 0) { ++ free_irq(otg_dev->irq, pcd); ++ device_unregister(&pcd->gadget.dev); ++ kfree (pcd); ++ return -ENOMEM; ++ } ++ ++ pcd->status_buf = kmalloc (sizeof (uint16_t), GFP_KERNEL); ++ if (pcd->status_buf == 0) { ++ kfree(pcd->setup_pkt); ++ free_irq(otg_dev->irq, pcd); ++ device_unregister(&pcd->gadget.dev); ++ kfree (pcd); ++ return -ENOMEM; ++ } ++ } ++ ++ ++ /* Initialize tasklet */ ++ start_xfer_tasklet.data = (unsigned long)pcd; ++ pcd->start_xfer_tasklet = &start_xfer_tasklet; ++ ++ return 0; ++} ++ ++/** ++ * Cleanup the PCD. ++ */ ++void dwc_otg_pcd_remove(struct device *dev) ++{ ++ dwc_otg_device_t *otg_dev = dev_get_drvdata(dev); ++ dwc_otg_pcd_t *pcd = otg_dev->pcd; ++ dwc_otg_dev_if_t* dev_if = GET_CORE_IF(pcd)->dev_if; ++ ++ DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, dev); ++ ++ /* ++ * Free the IRQ ++ */ ++ free_irq(otg_dev->irq, pcd); ++ ++ /* start with the driver above us */ ++ if (pcd->driver) { ++ /* should have been done already by driver model core */ ++ DWC_WARN("driver '%s' is still registered\n", ++ pcd->driver->driver.name); ++ usb_gadget_unregister_driver(pcd->driver); ++ } ++ device_unregister(&pcd->gadget.dev); ++ ++ if (GET_CORE_IF(pcd)->dma_enable) { ++ dma_free_coherent (NULL, sizeof (*pcd->setup_pkt) * 5, pcd->setup_pkt, pcd->setup_pkt_dma_handle); ++ dma_free_coherent (NULL, sizeof (uint16_t), pcd->status_buf, pcd->status_buf_dma_handle); ++ if (GET_CORE_IF(pcd)->dma_desc_enable) { ++ dwc_otg_ep_free_desc_chain(dev_if->setup_desc_addr[0], dev_if->dma_setup_desc_addr[0], 1); ++ dwc_otg_ep_free_desc_chain(dev_if->setup_desc_addr[1], dev_if->dma_setup_desc_addr[1], 1); ++ dwc_otg_ep_free_desc_chain(dev_if->in_desc_addr, dev_if->dma_in_desc_addr, 1); ++ dwc_otg_ep_free_desc_chain(dev_if->out_desc_addr, dev_if->dma_out_desc_addr, 1); ++ } ++ } ++ else { ++ kfree (pcd->setup_pkt); ++ kfree (pcd->status_buf); ++ } ++ ++ kfree(pcd); ++ otg_dev->pcd = 0; ++} ++ ++/** ++ * This function registers a gadget driver with the PCD. ++ * ++ * When a driver is successfully registered, it will receive control ++ * requests including set_configuration(), which enables non-control ++ * requests. then usb traffic follows until a disconnect is reported. ++ * then a host may connect again, or the driver might get unbound. ++ * ++ * @param driver The driver being registered ++ */ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37) ++int usb_gadget_probe_driver(struct usb_gadget_driver *driver, int (*bind)(struct usb_gadget *)) ++#else ++int usb_gadget_register_driver(struct usb_gadget_driver *driver) ++#endif ++{ ++ int retval; ++ int (*d_bind)(struct usb_gadget *); ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37) ++ d_bind = bind; ++#else ++ d_bind = driver->bind; ++#endif ++ ++ DWC_DEBUGPL(DBG_PCD, "registering gadget driver '%s'\n", driver->driver.name); ++ ++ if (!driver || driver->speed == USB_SPEED_UNKNOWN || ++ !d_bind || ++ !driver->unbind || ++ !driver->disconnect || ++ !driver->setup) { ++ DWC_DEBUGPL(DBG_PCDV,"EINVAL\n"); ++ return -EINVAL; ++ } ++ if (s_pcd == 0) { ++ DWC_DEBUGPL(DBG_PCDV,"ENODEV\n"); ++ return -ENODEV; ++ } ++ if (s_pcd->driver != 0) { ++ DWC_DEBUGPL(DBG_PCDV,"EBUSY (%p)\n", s_pcd->driver); ++ return -EBUSY; ++ } ++ ++ /* hook up the driver */ ++ s_pcd->driver = driver; ++ s_pcd->gadget.dev.driver = &driver->driver; ++ ++ DWC_DEBUGPL(DBG_PCD, "bind to driver %s\n", driver->driver.name); ++ retval = d_bind(&s_pcd->gadget); ++ if (retval) { ++ DWC_ERROR("bind to driver %s --> error %d\n", ++ driver->driver.name, retval); ++ s_pcd->driver = 0; ++ s_pcd->gadget.dev.driver = 0; ++ return retval; ++ } ++ DWC_DEBUGPL(DBG_ANY, "registered gadget driver '%s'\n", ++ driver->driver.name); ++ return 0; ++} ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37) ++EXPORT_SYMBOL(usb_gadget_probe_driver); ++#else ++EXPORT_SYMBOL(usb_gadget_register_driver); ++#endif ++ ++/** ++ * This function unregisters a gadget driver ++ * ++ * @param driver The driver being unregistered ++ */ ++int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) ++{ ++ //DWC_DEBUGPL(DBG_PCDV,"%s(%p)\n", __func__, _driver); ++ ++ if (s_pcd == 0) { ++ DWC_DEBUGPL(DBG_ANY, "%s Return(%d): s_pcd==0\n", __func__, ++ -ENODEV); ++ return -ENODEV; ++ } ++ if (driver == 0 || driver != s_pcd->driver) { ++ DWC_DEBUGPL(DBG_ANY, "%s Return(%d): driver?\n", __func__, ++ -EINVAL); ++ return -EINVAL; ++ } ++ ++ driver->unbind(&s_pcd->gadget); ++ s_pcd->driver = 0; ++ ++ DWC_DEBUGPL(DBG_ANY, "unregistered driver '%s'\n", ++ driver->driver.name); ++ return 0; ++} ++EXPORT_SYMBOL(usb_gadget_unregister_driver); ++ ++#endif /* DWC_HOST_ONLY */ +--- /dev/null ++++ b/drivers/usb/dwc_otg/dwc_otg_pcd.h +@@ -0,0 +1,248 @@ ++/* ========================================================================== ++ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_pcd.h $ ++ * $Revision: 1.2 $ ++ * $Date: 2008-11-21 05:39:15 $ ++ * $Change: 1103515 $ ++ * ++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, ++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless ++ * otherwise expressly agreed to in writing between Synopsys and you. ++ * ++ * The Software IS NOT an item of Licensed Software or Licensed Product under ++ * any End User Software License Agreement or Agreement for Licensed Product ++ * with Synopsys or any supplement thereto. You are permitted to use and ++ * redistribute this Software in source and binary forms, with or without ++ * modification, provided that redistributions of source code must retain this ++ * notice. You may not view, use, disclose, copy or distribute this file or ++ * any information contained herein except pursuant to this license grant from ++ * Synopsys. If you do not agree with this notice, including the disclaimer ++ * below, then you are not authorized to use the Software. ++ * ++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, ++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR ++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER ++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT ++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY ++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH ++ * DAMAGE. ++ * ========================================================================== */ ++#ifndef DWC_HOST_ONLY ++#if !defined(__DWC_PCD_H__) ++#define __DWC_PCD_H__ ++ ++#include ++#include ++#include ++#include ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21) ++# include ++#else ++# include ++#endif ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24) ++#include ++#else ++#include ++#endif ++#include ++#include ++ ++struct dwc_otg_device; ++ ++#include "dwc_otg_cil.h" ++ ++/** ++ * @file ++ * ++ * This file contains the structures, constants, and interfaces for ++ * the Perpherial Contoller Driver (PCD). ++ * ++ * The Peripheral Controller Driver (PCD) for Linux will implement the ++ * Gadget API, so that the existing Gadget drivers can be used. For ++ * the Mass Storage Function driver the File-backed USB Storage Gadget ++ * (FBS) driver will be used. The FBS driver supports the ++ * Control-Bulk (CB), Control-Bulk-Interrupt (CBI), and Bulk-Only ++ * transports. ++ * ++ */ ++ ++/** Invalid DMA Address */ ++#define DMA_ADDR_INVALID (~(dma_addr_t)0) ++/** Maxpacket size for EP0 */ ++#define MAX_EP0_SIZE 64 ++/** Maxpacket size for any EP */ ++#define MAX_PACKET_SIZE 1024 ++ ++/** Max Transfer size for any EP */ ++#define MAX_TRANSFER_SIZE 65535 ++ ++/** Max DMA Descriptor count for any EP */ ++#define MAX_DMA_DESC_CNT 64 ++ ++/** ++ * Get the pointer to the core_if from the pcd pointer. ++ */ ++#define GET_CORE_IF( _pcd ) (_pcd->otg_dev->core_if) ++ ++/** ++ * States of EP0. ++ */ ++typedef enum ep0_state ++{ ++ EP0_DISCONNECT, /* no host */ ++ EP0_IDLE, ++ EP0_IN_DATA_PHASE, ++ EP0_OUT_DATA_PHASE, ++ EP0_IN_STATUS_PHASE, ++ EP0_OUT_STATUS_PHASE, ++ EP0_STALL, ++} ep0state_e; ++ ++/** Fordward declaration.*/ ++struct dwc_otg_pcd; ++ ++/** DWC_otg iso request structure. ++ * ++ */ ++typedef struct usb_iso_request dwc_otg_pcd_iso_request_t; ++ ++/** PCD EP structure. ++ * This structure describes an EP, there is an array of EPs in the PCD ++ * structure. ++ */ ++typedef struct dwc_otg_pcd_ep ++{ ++ /** USB EP data */ ++ struct usb_ep ep; ++ /** USB EP Descriptor */ ++ const struct usb_endpoint_descriptor *desc; ++ ++ /** queue of dwc_otg_pcd_requests. */ ++ struct list_head queue; ++ unsigned stopped : 1; ++ unsigned disabling : 1; ++ unsigned dma : 1; ++ unsigned queue_sof : 1; ++ ++#ifdef DWC_EN_ISOC ++ /** DWC_otg Isochronous Transfer */ ++ struct usb_iso_request* iso_req; ++#endif //DWC_EN_ISOC ++ ++ /** DWC_otg ep data. */ ++ dwc_ep_t dwc_ep; ++ ++ /** Pointer to PCD */ ++ struct dwc_otg_pcd *pcd; ++}dwc_otg_pcd_ep_t; ++ ++ ++ ++/** DWC_otg PCD Structure. ++ * This structure encapsulates the data for the dwc_otg PCD. ++ */ ++typedef struct dwc_otg_pcd ++{ ++ /** USB gadget */ ++ struct usb_gadget gadget; ++ /** USB gadget driver pointer*/ ++ struct usb_gadget_driver *driver; ++ /** The DWC otg device pointer. */ ++ struct dwc_otg_device *otg_dev; ++ ++ /** State of EP0 */ ++ ep0state_e ep0state; ++ /** EP0 Request is pending */ ++ unsigned ep0_pending : 1; ++ /** Indicates when SET CONFIGURATION Request is in process */ ++ unsigned request_config : 1; ++ /** The state of the Remote Wakeup Enable. */ ++ unsigned remote_wakeup_enable : 1; ++ /** The state of the B-Device HNP Enable. */ ++ unsigned b_hnp_enable : 1; ++ /** The state of A-Device HNP Support. */ ++ unsigned a_hnp_support : 1; ++ /** The state of the A-Device Alt HNP support. */ ++ unsigned a_alt_hnp_support : 1; ++ /** Count of pending Requests */ ++ unsigned request_pending; ++ ++ /** SETUP packet for EP0 ++ * This structure is allocated as a DMA buffer on PCD initialization ++ * with enough space for up to 3 setup packets. ++ */ ++ union ++ { ++ struct usb_ctrlrequest req; ++ uint32_t d32[2]; ++ } *setup_pkt; ++ ++ dma_addr_t setup_pkt_dma_handle; ++ ++ /** 2-byte dma buffer used to return status from GET_STATUS */ ++ uint16_t *status_buf; ++ dma_addr_t status_buf_dma_handle; ++ ++ /** EP0 */ ++ dwc_otg_pcd_ep_t ep0; ++ ++ /** Array of IN EPs. */ ++ dwc_otg_pcd_ep_t in_ep[ MAX_EPS_CHANNELS - 1]; ++ /** Array of OUT EPs. */ ++ dwc_otg_pcd_ep_t out_ep[ MAX_EPS_CHANNELS - 1]; ++ /** number of valid EPs in the above array. */ ++// unsigned num_eps : 4; ++ spinlock_t lock; ++ /** Timer for SRP. If it expires before SRP is successful ++ * clear the SRP. */ ++ struct timer_list srp_timer; ++ ++ /** Tasklet to defer starting of TEST mode transmissions until ++ * Status Phase has been completed. ++ */ ++ struct tasklet_struct test_mode_tasklet; ++ ++ /** Tasklet to delay starting of xfer in DMA mode */ ++ struct tasklet_struct *start_xfer_tasklet; ++ ++ /** The test mode to enter when the tasklet is executed. */ ++ unsigned test_mode; ++ ++} dwc_otg_pcd_t; ++ ++ ++/** DWC_otg request structure. ++ * This structure is a list of requests. ++ */ ++typedef struct ++{ ++ struct usb_request req; /**< USB Request. */ ++ struct list_head queue; /**< queue of these requests. */ ++} dwc_otg_pcd_request_t; ++ ++ ++extern int dwc_otg_pcd_init(struct device *dev); ++ ++//extern void dwc_otg_pcd_remove( struct dwc_otg_device *_otg_dev ); ++extern void dwc_otg_pcd_remove( struct device *dev); ++extern int32_t dwc_otg_pcd_handle_intr( dwc_otg_pcd_t *pcd ); ++extern void dwc_otg_pcd_start_srp_timer(dwc_otg_pcd_t *pcd ); ++ ++extern void dwc_otg_pcd_initiate_srp(dwc_otg_pcd_t *pcd); ++extern void dwc_otg_pcd_remote_wakeup(dwc_otg_pcd_t *pcd, int set); ++ ++extern void dwc_otg_iso_buffer_done(dwc_otg_pcd_ep_t *ep, dwc_otg_pcd_iso_request_t *req); ++extern void dwc_otg_request_done(dwc_otg_pcd_ep_t *_ep, dwc_otg_pcd_request_t *req, ++ int status); ++extern void dwc_otg_request_nuke(dwc_otg_pcd_ep_t *_ep); ++extern void dwc_otg_pcd_update_otg(dwc_otg_pcd_t *_pcd, ++ const unsigned reset); ++ ++#endif ++#endif /* DWC_HOST_ONLY */ +--- /dev/null ++++ b/drivers/usb/dwc_otg/dwc_otg_pcd_intr.c +@@ -0,0 +1,3654 @@ ++/* ========================================================================== ++ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_pcd_intr.c $ ++ * $Revision: 1.2 $ ++ * $Date: 2008-11-21 05:39:15 $ ++ * $Change: 1115682 $ ++ * ++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, ++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless ++ * otherwise expressly agreed to in writing between Synopsys and you. ++ * ++ * The Software IS NOT an item of Licensed Software or Licensed Product under ++ * any End User Software License Agreement or Agreement for Licensed Product ++ * with Synopsys or any supplement thereto. You are permitted to use and ++ * redistribute this Software in source and binary forms, with or without ++ * modification, provided that redistributions of source code must retain this ++ * notice. You may not view, use, disclose, copy or distribute this file or ++ * any information contained herein except pursuant to this license grant from ++ * Synopsys. If you do not agree with this notice, including the disclaimer ++ * below, then you are not authorized to use the Software. ++ * ++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, ++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR ++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER ++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT ++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY ++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH ++ * DAMAGE. ++ * ========================================================================== */ ++#ifndef DWC_HOST_ONLY ++#include ++#include ++#include ++ ++#include "dwc_otg_driver.h" ++#include "dwc_otg_pcd.h" ++ ++ ++#define DEBUG_EP0 ++ ++/* request functions defined in "dwc_otg_pcd.c" */ ++ ++/** @file ++ * This file contains the implementation of the PCD Interrupt handlers. ++ * ++ * The PCD handles the device interrupts. Many conditions can cause a ++ * device interrupt. When an interrupt occurs, the device interrupt ++ * service routine determines the cause of the interrupt and ++ * dispatches handling to the appropriate function. These interrupt ++ * handling functions are described below. ++ * All interrupt registers are processed from LSB to MSB. ++ */ ++ ++ ++/** ++ * This function prints the ep0 state for debug purposes. ++ */ ++static inline void print_ep0_state(dwc_otg_pcd_t *pcd) ++{ ++#ifdef DEBUG ++ char str[40]; ++ ++ switch (pcd->ep0state) { ++ case EP0_DISCONNECT: ++ strcpy(str, "EP0_DISCONNECT"); ++ break; ++ case EP0_IDLE: ++ strcpy(str, "EP0_IDLE"); ++ break; ++ case EP0_IN_DATA_PHASE: ++ strcpy(str, "EP0_IN_DATA_PHASE"); ++ break; ++ case EP0_OUT_DATA_PHASE: ++ strcpy(str, "EP0_OUT_DATA_PHASE"); ++ break; ++ case EP0_IN_STATUS_PHASE: ++ strcpy(str,"EP0_IN_STATUS_PHASE"); ++ break; ++ case EP0_OUT_STATUS_PHASE: ++ strcpy(str,"EP0_OUT_STATUS_PHASE"); ++ break; ++ case EP0_STALL: ++ strcpy(str,"EP0_STALL"); ++ break; ++ default: ++ strcpy(str,"EP0_INVALID"); ++ } ++ ++ DWC_DEBUGPL(DBG_ANY, "%s(%d)\n", str, pcd->ep0state); ++#endif ++} ++ ++/** ++ * This function returns pointer to in ep struct with number ep_num ++ */ ++static inline dwc_otg_pcd_ep_t* get_in_ep(dwc_otg_pcd_t *pcd, uint32_t ep_num) ++{ ++ int i; ++ int num_in_eps = GET_CORE_IF(pcd)->dev_if->num_in_eps; ++ if(ep_num == 0) { ++ return &pcd->ep0; ++ } ++ else { ++ for(i = 0; i < num_in_eps; ++i) ++ { ++ if(pcd->in_ep[i].dwc_ep.num == ep_num) ++ return &pcd->in_ep[i]; ++ } ++ return 0; ++ } ++} ++/** ++ * This function returns pointer to out ep struct with number ep_num ++ */ ++static inline dwc_otg_pcd_ep_t* get_out_ep(dwc_otg_pcd_t *pcd, uint32_t ep_num) ++{ ++ int i; ++ int num_out_eps = GET_CORE_IF(pcd)->dev_if->num_out_eps; ++ if(ep_num == 0) { ++ return &pcd->ep0; ++ } ++ else { ++ for(i = 0; i < num_out_eps; ++i) ++ { ++ if(pcd->out_ep[i].dwc_ep.num == ep_num) ++ return &pcd->out_ep[i]; ++ } ++ return 0; ++ } ++} ++/** ++ * This functions gets a pointer to an EP from the wIndex address ++ * value of the control request. ++ */ ++static dwc_otg_pcd_ep_t *get_ep_by_addr (dwc_otg_pcd_t *pcd, u16 wIndex) ++{ ++ dwc_otg_pcd_ep_t *ep; ++ ++ if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0) ++ return &pcd->ep0; ++ list_for_each_entry(ep, &pcd->gadget.ep_list, ep.ep_list) ++ { ++ u8 bEndpointAddress; ++ ++ if (!ep->desc) ++ continue; ++ ++ bEndpointAddress = ep->desc->bEndpointAddress; ++ if((wIndex & (USB_DIR_IN | USB_ENDPOINT_NUMBER_MASK)) ++ == (bEndpointAddress & (USB_DIR_IN | USB_ENDPOINT_NUMBER_MASK))) ++ return ep; ++ } ++ return NULL; ++} ++ ++/** ++ * This function checks the EP request queue, if the queue is not ++ * empty the next request is started. ++ */ ++void start_next_request(dwc_otg_pcd_ep_t *ep) ++{ ++ dwc_otg_pcd_request_t *req = 0; ++ uint32_t max_transfer = GET_CORE_IF(ep->pcd)->core_params->max_transfer_size; ++ ++ if (!list_empty(&ep->queue)) { ++ req = list_entry(ep->queue.next, ++ dwc_otg_pcd_request_t, queue); ++ ++ /* Setup and start the Transfer */ ++ ep->dwc_ep.dma_addr = req->req.dma; ++ ep->dwc_ep.start_xfer_buff = req->req.buf; ++ ep->dwc_ep.xfer_buff = req->req.buf; ++ ep->dwc_ep.sent_zlp = 0; ++ ep->dwc_ep.total_len = req->req.length; ++ ep->dwc_ep.xfer_len = 0; ++ ep->dwc_ep.xfer_count = 0; ++ ++ if(max_transfer > MAX_TRANSFER_SIZE) { ++ ep->dwc_ep.maxxfer = max_transfer - (max_transfer % ep->dwc_ep.maxpacket); ++ } else { ++ ep->dwc_ep.maxxfer = max_transfer; ++ } ++ ++ if(req->req.zero) { ++ if((ep->dwc_ep.total_len % ep->dwc_ep.maxpacket == 0) ++ && (ep->dwc_ep.total_len != 0)) { ++ ep->dwc_ep.sent_zlp = 1; ++ } ++ ++ } ++ ++ dwc_otg_ep_start_transfer(GET_CORE_IF(ep->pcd), &ep->dwc_ep); ++ } ++} ++ ++/** ++ * This function handles the SOF Interrupts. At this time the SOF ++ * Interrupt is disabled. ++ */ ++int32_t dwc_otg_pcd_handle_sof_intr(dwc_otg_pcd_t *pcd) ++{ ++ dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd); ++ ++ gintsts_data_t gintsts; ++ ++ DWC_DEBUGPL(DBG_PCD, "SOF\n"); ++ ++ /* Clear interrupt */ ++ gintsts.d32 = 0; ++ gintsts.b.sofintr = 1; ++ dwc_write_reg32 (&core_if->core_global_regs->gintsts, gintsts.d32); ++ ++ return 1; ++} ++ ++ ++/** ++ * This function handles the Rx Status Queue Level Interrupt, which ++ * indicates that there is a least one packet in the Rx FIFO. The ++ * packets are moved from the FIFO to memory, where they will be ++ * processed when the Endpoint Interrupt Register indicates Transfer ++ * Complete or SETUP Phase Done. ++ * ++ * Repeat the following until the Rx Status Queue is empty: ++ * -# Read the Receive Status Pop Register (GRXSTSP) to get Packet ++ * info ++ * -# If Receive FIFO is empty then skip to step Clear the interrupt ++ * and exit ++ * -# If SETUP Packet call dwc_otg_read_setup_packet to copy the ++ * SETUP data to the buffer ++ * -# If OUT Data Packet call dwc_otg_read_packet to copy the data ++ * to the destination buffer ++ */ ++int32_t dwc_otg_pcd_handle_rx_status_q_level_intr(dwc_otg_pcd_t *pcd) ++{ ++ dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd); ++ dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs; ++ gintmsk_data_t gintmask = {.d32=0}; ++ device_grxsts_data_t status; ++ dwc_otg_pcd_ep_t *ep; ++ gintsts_data_t gintsts; ++#ifdef DEBUG ++ static char *dpid_str[] ={ "D0", "D2", "D1", "MDATA" }; ++#endif ++ ++ //DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, _pcd); ++ /* Disable the Rx Status Queue Level interrupt */ ++ gintmask.b.rxstsqlvl= 1; ++ dwc_modify_reg32(&global_regs->gintmsk, gintmask.d32, 0); ++ ++ /* Get the Status from the top of the FIFO */ ++ status.d32 = dwc_read_reg32(&global_regs->grxstsp); ++ ++ DWC_DEBUGPL(DBG_PCD, "EP:%d BCnt:%d DPID:%s " ++ "pktsts:%x Frame:%d(0x%0x)\n", ++ status.b.epnum, status.b.bcnt, ++ dpid_str[status.b.dpid], ++ status.b.pktsts, status.b.fn, status.b.fn); ++ /* Get pointer to EP structure */ ++ ep = get_out_ep(pcd, status.b.epnum); ++ ++ switch (status.b.pktsts) { ++ case DWC_DSTS_GOUT_NAK: ++ DWC_DEBUGPL(DBG_PCDV, "Global OUT NAK\n"); ++ break; ++ case DWC_STS_DATA_UPDT: ++ DWC_DEBUGPL(DBG_PCDV, "OUT Data Packet\n"); ++ if (status.b.bcnt && ep->dwc_ep.xfer_buff) { ++ /** @todo NGS Check for buffer overflow? */ ++ dwc_otg_read_packet(core_if, ++ ep->dwc_ep.xfer_buff, ++ status.b.bcnt); ++ ep->dwc_ep.xfer_count += status.b.bcnt; ++ ep->dwc_ep.xfer_buff += status.b.bcnt; ++ } ++ break; ++ case DWC_STS_XFER_COMP: ++ DWC_DEBUGPL(DBG_PCDV, "OUT Complete\n"); ++ break; ++ case DWC_DSTS_SETUP_COMP: ++#ifdef DEBUG_EP0 ++ DWC_DEBUGPL(DBG_PCDV, "Setup Complete\n"); ++#endif ++ break; ++case DWC_DSTS_SETUP_UPDT: ++ dwc_otg_read_setup_packet(core_if, pcd->setup_pkt->d32); ++#ifdef DEBUG_EP0 ++ DWC_DEBUGPL(DBG_PCD, ++ "SETUP PKT: %02x.%02x v%04x i%04x l%04x\n", ++ pcd->setup_pkt->req.bRequestType, ++ pcd->setup_pkt->req.bRequest, ++ pcd->setup_pkt->req.wValue, ++ pcd->setup_pkt->req.wIndex, ++ pcd->setup_pkt->req.wLength); ++#endif ++ ep->dwc_ep.xfer_count += status.b.bcnt; ++ break; ++ default: ++ DWC_DEBUGPL(DBG_PCDV, "Invalid Packet Status (0x%0x)\n", ++ status.b.pktsts); ++ break; ++ } ++ ++ /* Enable the Rx Status Queue Level interrupt */ ++ dwc_modify_reg32(&global_regs->gintmsk, 0, gintmask.d32); ++ /* Clear interrupt */ ++ gintsts.d32 = 0; ++ gintsts.b.rxstsqlvl = 1; ++ dwc_write_reg32 (&global_regs->gintsts, gintsts.d32); ++ ++ //DWC_DEBUGPL(DBG_PCDV, "EXIT: %s\n", __func__); ++ return 1; ++} ++/** ++ * This function examines the Device IN Token Learning Queue to ++ * determine the EP number of the last IN token received. This ++ * implementation is for the Mass Storage device where there are only ++ * 2 IN EPs (Control-IN and BULK-IN). ++ * ++ * The EP numbers for the first six IN Tokens are in DTKNQR1 and there ++ * are 8 EP Numbers in each of the other possible DTKNQ Registers. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * ++ */ ++static inline int get_ep_of_last_in_token(dwc_otg_core_if_t *core_if) ++{ ++ dwc_otg_device_global_regs_t *dev_global_regs = ++ core_if->dev_if->dev_global_regs; ++ const uint32_t TOKEN_Q_DEPTH = core_if->hwcfg2.b.dev_token_q_depth; ++ /* Number of Token Queue Registers */ ++ const int DTKNQ_REG_CNT = (TOKEN_Q_DEPTH + 7) / 8; ++ dtknq1_data_t dtknqr1; ++ uint32_t in_tkn_epnums[4]; ++ int ndx = 0; ++ int i = 0; ++ volatile uint32_t *addr = &dev_global_regs->dtknqr1; ++ int epnum = 0; ++ ++ //DWC_DEBUGPL(DBG_PCD,"dev_token_q_depth=%d\n",TOKEN_Q_DEPTH); ++ ++ ++ /* Read the DTKNQ Registers */ ++ for (i = 0; i < DTKNQ_REG_CNT; i++) ++ { ++ in_tkn_epnums[ i ] = dwc_read_reg32(addr); ++ DWC_DEBUGPL(DBG_PCDV, "DTKNQR%d=0x%08x\n", i+1, ++ in_tkn_epnums[i]); ++ if (addr == &dev_global_regs->dvbusdis) { ++ addr = &dev_global_regs->dtknqr3_dthrctl; ++ } ++ else { ++ ++addr; ++ } ++ ++ } ++ ++ /* Copy the DTKNQR1 data to the bit field. */ ++ dtknqr1.d32 = in_tkn_epnums[0]; ++ /* Get the EP numbers */ ++ in_tkn_epnums[0] = dtknqr1.b.epnums0_5; ++ ndx = dtknqr1.b.intknwptr - 1; ++ ++ //DWC_DEBUGPL(DBG_PCDV,"ndx=%d\n",ndx); ++ if (ndx == -1) { ++ /** @todo Find a simpler way to calculate the max ++ * queue position.*/ ++ int cnt = TOKEN_Q_DEPTH; ++ if (TOKEN_Q_DEPTH <= 6) { ++ cnt = TOKEN_Q_DEPTH - 1; ++ } ++ else if (TOKEN_Q_DEPTH <= 14) { ++ cnt = TOKEN_Q_DEPTH - 7; ++ } ++ else if (TOKEN_Q_DEPTH <= 22) { ++ cnt = TOKEN_Q_DEPTH - 15; ++ } ++ else { ++ cnt = TOKEN_Q_DEPTH - 23; ++ } ++ epnum = (in_tkn_epnums[ DTKNQ_REG_CNT - 1 ] >> (cnt * 4)) & 0xF; ++ } ++ else { ++ if (ndx <= 5) { ++ epnum = (in_tkn_epnums[0] >> (ndx * 4)) & 0xF; ++ } ++ else if (ndx <= 13) { ++ ndx -= 6; ++ epnum = (in_tkn_epnums[1] >> (ndx * 4)) & 0xF; ++ } ++ else if (ndx <= 21) { ++ ndx -= 14; ++ epnum = (in_tkn_epnums[2] >> (ndx * 4)) & 0xF; ++ } ++ else if (ndx <= 29) { ++ ndx -= 22; ++ epnum = (in_tkn_epnums[3] >> (ndx * 4)) & 0xF; ++ } ++ } ++ //DWC_DEBUGPL(DBG_PCD,"epnum=%d\n",epnum); ++ return epnum; ++} ++ ++/** ++ * This interrupt occurs when the non-periodic Tx FIFO is half-empty. ++ * The active request is checked for the next packet to be loaded into ++ * the non-periodic Tx FIFO. ++ */ ++int32_t dwc_otg_pcd_handle_np_tx_fifo_empty_intr(dwc_otg_pcd_t *pcd) ++{ ++ dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd); ++ dwc_otg_core_global_regs_t *global_regs = ++ core_if->core_global_regs; ++ dwc_otg_dev_in_ep_regs_t *ep_regs; ++ gnptxsts_data_t txstatus = {.d32 = 0}; ++ gintsts_data_t gintsts; ++ ++ int epnum = 0; ++ dwc_otg_pcd_ep_t *ep = 0; ++ uint32_t len = 0; ++ int dwords; ++ ++ /* Get the epnum from the IN Token Learning Queue. */ ++ epnum = get_ep_of_last_in_token(core_if); ++ ep = get_in_ep(pcd, epnum); ++ ++ DWC_DEBUGPL(DBG_PCD, "NP TxFifo Empty: %s(%d) \n", ep->ep.name, epnum); ++ ep_regs = core_if->dev_if->in_ep_regs[epnum]; ++ ++ len = ep->dwc_ep.xfer_len - ep->dwc_ep.xfer_count; ++ if (len > ep->dwc_ep.maxpacket) { ++ len = ep->dwc_ep.maxpacket; ++ } ++ dwords = (len + 3)/4; ++ ++ ++ /* While there is space in the queue and space in the FIFO and ++ * More data to tranfer, Write packets to the Tx FIFO */ ++ txstatus.d32 = dwc_read_reg32(&global_regs->gnptxsts); ++ DWC_DEBUGPL(DBG_PCDV, "b4 GNPTXSTS=0x%08x\n",txstatus.d32); ++ ++ while (txstatus.b.nptxqspcavail > 0 && ++ txstatus.b.nptxfspcavail > dwords && ++ ep->dwc_ep.xfer_count < ep->dwc_ep.xfer_len) { ++ /* Write the FIFO */ ++ dwc_otg_ep_write_packet(core_if, &ep->dwc_ep, 0); ++ len = ep->dwc_ep.xfer_len - ep->dwc_ep.xfer_count; ++ ++ if (len > ep->dwc_ep.maxpacket) { ++ len = ep->dwc_ep.maxpacket; ++ } ++ ++ dwords = (len + 3)/4; ++ txstatus.d32 = dwc_read_reg32(&global_regs->gnptxsts); ++ DWC_DEBUGPL(DBG_PCDV,"GNPTXSTS=0x%08x\n",txstatus.d32); ++ } ++ ++ DWC_DEBUGPL(DBG_PCDV, "GNPTXSTS=0x%08x\n", ++ dwc_read_reg32(&global_regs->gnptxsts)); ++ ++ /* Clear interrupt */ ++ gintsts.d32 = 0; ++ gintsts.b.nptxfempty = 1; ++ dwc_write_reg32 (&global_regs->gintsts, gintsts.d32); ++ ++ return 1; ++} ++ ++/** ++ * This function is called when dedicated Tx FIFO Empty interrupt occurs. ++ * The active request is checked for the next packet to be loaded into ++ * apropriate Tx FIFO. ++ */ ++static int32_t write_empty_tx_fifo(dwc_otg_pcd_t *pcd, uint32_t epnum) ++{ ++ dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd); ++ dwc_otg_dev_if_t* dev_if = core_if->dev_if; ++ dwc_otg_dev_in_ep_regs_t *ep_regs; ++ dtxfsts_data_t txstatus = {.d32 = 0}; ++ dwc_otg_pcd_ep_t *ep = 0; ++ uint32_t len = 0; ++ int dwords; ++ ++ ep = get_in_ep(pcd, epnum); ++ ++ DWC_DEBUGPL(DBG_PCD, "Dedicated TxFifo Empty: %s(%d) \n", ep->ep.name, epnum); ++ ++ ep_regs = core_if->dev_if->in_ep_regs[epnum]; ++ ++ len = ep->dwc_ep.xfer_len - ep->dwc_ep.xfer_count; ++ ++ if (len > ep->dwc_ep.maxpacket) { ++ len = ep->dwc_ep.maxpacket; ++ } ++ ++ dwords = (len + 3)/4; ++ ++ /* While there is space in the queue and space in the FIFO and ++ * More data to tranfer, Write packets to the Tx FIFO */ ++ txstatus.d32 = dwc_read_reg32(&dev_if->in_ep_regs[epnum]->dtxfsts); ++ DWC_DEBUGPL(DBG_PCDV, "b4 dtxfsts[%d]=0x%08x\n",epnum,txstatus.d32); ++ ++ while (txstatus.b.txfspcavail > dwords && ++ ep->dwc_ep.xfer_count < ep->dwc_ep.xfer_len && ++ ep->dwc_ep.xfer_len != 0) { ++ /* Write the FIFO */ ++ dwc_otg_ep_write_packet(core_if, &ep->dwc_ep, 0); ++ ++ len = ep->dwc_ep.xfer_len - ep->dwc_ep.xfer_count; ++ if (len > ep->dwc_ep.maxpacket) { ++ len = ep->dwc_ep.maxpacket; ++ } ++ ++ dwords = (len + 3)/4; ++ txstatus.d32 = dwc_read_reg32(&dev_if->in_ep_regs[epnum]->dtxfsts); ++ DWC_DEBUGPL(DBG_PCDV,"dtxfsts[%d]=0x%08x\n", epnum, txstatus.d32); ++ } ++ ++ DWC_DEBUGPL(DBG_PCDV, "b4 dtxfsts[%d]=0x%08x\n",epnum,dwc_read_reg32(&dev_if->in_ep_regs[epnum]->dtxfsts)); ++ ++ return 1; ++} ++ ++ ++/** ++ * This function is called when the Device is disconnected. It stops ++ * any active requests and informs the Gadget driver of the ++ * disconnect. ++ */ ++void dwc_otg_pcd_stop(dwc_otg_pcd_t *pcd) ++{ ++ int i, num_in_eps, num_out_eps; ++ dwc_otg_pcd_ep_t *ep; ++ ++ gintmsk_data_t intr_mask = {.d32 = 0}; ++ ++ num_in_eps = GET_CORE_IF(pcd)->dev_if->num_in_eps; ++ num_out_eps = GET_CORE_IF(pcd)->dev_if->num_out_eps; ++ ++ DWC_DEBUGPL(DBG_PCDV, "%s() \n", __func__); ++ /* don't disconnect drivers more than once */ ++ if (pcd->ep0state == EP0_DISCONNECT) { ++ DWC_DEBUGPL(DBG_ANY, "%s() Already Disconnected\n", __func__); ++ return; ++ } ++ pcd->ep0state = EP0_DISCONNECT; ++ ++ /* Reset the OTG state. */ ++ dwc_otg_pcd_update_otg(pcd, 1); ++ ++ /* Disable the NP Tx Fifo Empty Interrupt. */ ++ intr_mask.b.nptxfempty = 1; ++ dwc_modify_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk, ++ intr_mask.d32, 0); ++ ++ /* Flush the FIFOs */ ++ /**@todo NGS Flush Periodic FIFOs */ ++ dwc_otg_flush_tx_fifo(GET_CORE_IF(pcd), 0x10); ++ dwc_otg_flush_rx_fifo(GET_CORE_IF(pcd)); ++ ++ /* prevent new request submissions, kill any outstanding requests */ ++ ep = &pcd->ep0; ++ dwc_otg_request_nuke(ep); ++ /* prevent new request submissions, kill any outstanding requests */ ++ for (i = 0; i < num_in_eps; i++) ++ { ++ dwc_otg_pcd_ep_t *ep = &pcd->in_ep[i]; ++ dwc_otg_request_nuke(ep); ++ } ++ /* prevent new request submissions, kill any outstanding requests */ ++ for (i = 0; i < num_out_eps; i++) ++ { ++ dwc_otg_pcd_ep_t *ep = &pcd->out_ep[i]; ++ dwc_otg_request_nuke(ep); ++ } ++ ++ /* report disconnect; the driver is already quiesced */ ++ if (pcd->driver && pcd->driver->disconnect) { ++ SPIN_UNLOCK(&pcd->lock); ++ pcd->driver->disconnect(&pcd->gadget); ++ SPIN_LOCK(&pcd->lock); ++ } ++} ++ ++/** ++ * This interrupt indicates that ... ++ */ ++int32_t dwc_otg_pcd_handle_i2c_intr(dwc_otg_pcd_t *pcd) ++{ ++ gintmsk_data_t intr_mask = { .d32 = 0}; ++ gintsts_data_t gintsts; ++ ++ DWC_PRINT("INTERRUPT Handler not implemented for %s\n", "i2cintr"); ++ intr_mask.b.i2cintr = 1; ++ dwc_modify_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk, ++ intr_mask.d32, 0); ++ ++ /* Clear interrupt */ ++ gintsts.d32 = 0; ++ gintsts.b.i2cintr = 1; ++ dwc_write_reg32 (&GET_CORE_IF(pcd)->core_global_regs->gintsts, ++ gintsts.d32); ++ return 1; ++} ++ ++ ++/** ++ * This interrupt indicates that ... ++ */ ++int32_t dwc_otg_pcd_handle_early_suspend_intr(dwc_otg_pcd_t *pcd) ++{ ++ gintsts_data_t gintsts; ++#if defined(VERBOSE) ++ DWC_PRINT("Early Suspend Detected\n"); ++#endif ++ /* Clear interrupt */ ++ gintsts.d32 = 0; ++ gintsts.b.erlysuspend = 1; ++ dwc_write_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintsts, ++ gintsts.d32); ++ return 1; ++} ++ ++/** ++ * This function configures EPO to receive SETUP packets. ++ * ++ * @todo NGS: Update the comments from the HW FS. ++ * ++ * -# Program the following fields in the endpoint specific registers ++ * for Control OUT EP 0, in order to receive a setup packet ++ * - DOEPTSIZ0.Packet Count = 3 (To receive up to 3 back to back ++ * setup packets) ++ * - DOEPTSIZE0.Transfer Size = 24 Bytes (To receive up to 3 back ++ * to back setup packets) ++ * - In DMA mode, DOEPDMA0 Register with a memory address to ++ * store any setup packets received ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param pcd Programming view of the PCD. ++ */ ++static inline void ep0_out_start(dwc_otg_core_if_t *core_if, dwc_otg_pcd_t *pcd) ++{ ++ dwc_otg_dev_if_t *dev_if = core_if->dev_if; ++ deptsiz0_data_t doeptsize0 = { .d32 = 0}; ++ dwc_otg_dma_desc_t* dma_desc; ++ depctl_data_t doepctl = { .d32 = 0 }; ++ ++#ifdef VERBOSE ++ DWC_DEBUGPL(DBG_PCDV,"%s() doepctl0=%0x\n", __func__, ++ dwc_read_reg32(&dev_if->out_ep_regs[0]->doepctl)); ++#endif ++ ++ doeptsize0.b.supcnt = 3; ++ doeptsize0.b.pktcnt = 1; ++ doeptsize0.b.xfersize = 8*3; ++ ++ ++ if (core_if->dma_enable) { ++ if (!core_if->dma_desc_enable) { ++ /** put here as for Hermes mode deptisz register should not be written */ ++ dwc_write_reg32(&dev_if->out_ep_regs[0]->doeptsiz, ++ doeptsize0.d32); ++ ++ /** @todo dma needs to handle multiple setup packets (up to 3) */ ++ dwc_write_reg32(&dev_if->out_ep_regs[0]->doepdma, ++ pcd->setup_pkt_dma_handle); ++ } else { ++ dev_if->setup_desc_index = (dev_if->setup_desc_index + 1) & 1; ++ dma_desc = dev_if->setup_desc_addr[dev_if->setup_desc_index]; ++ ++ /** DMA Descriptor Setup */ ++ dma_desc->status.b.bs = BS_HOST_BUSY; ++ dma_desc->status.b.l = 1; ++ dma_desc->status.b.ioc = 1; ++ dma_desc->status.b.bytes = pcd->ep0.dwc_ep.maxpacket; ++ dma_desc->buf = pcd->setup_pkt_dma_handle; ++ dma_desc->status.b.bs = BS_HOST_READY; ++ ++ /** DOEPDMA0 Register write */ ++ dwc_write_reg32(&dev_if->out_ep_regs[0]->doepdma, dev_if->dma_setup_desc_addr[dev_if->setup_desc_index]); ++ } ++ ++ } else { ++ /** put here as for Hermes mode deptisz register should not be written */ ++ dwc_write_reg32(&dev_if->out_ep_regs[0]->doeptsiz, ++ doeptsize0.d32); ++ } ++ ++ /** DOEPCTL0 Register write */ ++ doepctl.b.epena = 1; ++ doepctl.b.cnak = 1; ++ dwc_write_reg32(&dev_if->out_ep_regs[0]->doepctl, doepctl.d32); ++ ++#ifdef VERBOSE ++ DWC_DEBUGPL(DBG_PCDV,"doepctl0=%0x\n", ++ dwc_read_reg32(&dev_if->out_ep_regs[0]->doepctl)); ++ DWC_DEBUGPL(DBG_PCDV,"diepctl0=%0x\n", ++ dwc_read_reg32(&dev_if->in_ep_regs[0]->diepctl)); ++#endif ++} ++ ++ ++/** ++ * This interrupt occurs when a USB Reset is detected. When the USB ++ * Reset Interrupt occurs the device state is set to DEFAULT and the ++ * EP0 state is set to IDLE. ++ * -# Set the NAK bit for all OUT endpoints (DOEPCTLn.SNAK = 1) ++ * -# Unmask the following interrupt bits ++ * - DAINTMSK.INEP0 = 1 (Control 0 IN endpoint) ++ * - DAINTMSK.OUTEP0 = 1 (Control 0 OUT endpoint) ++ * - DOEPMSK.SETUP = 1 ++ * - DOEPMSK.XferCompl = 1 ++ * - DIEPMSK.XferCompl = 1 ++ * - DIEPMSK.TimeOut = 1 ++ * -# Program the following fields in the endpoint specific registers ++ * for Control OUT EP 0, in order to receive a setup packet ++ * - DOEPTSIZ0.Packet Count = 3 (To receive up to 3 back to back ++ * setup packets) ++ * - DOEPTSIZE0.Transfer Size = 24 Bytes (To receive up to 3 back ++ * to back setup packets) ++ * - In DMA mode, DOEPDMA0 Register with a memory address to ++ * store any setup packets received ++ * At this point, all the required initialization, except for enabling ++ * the control 0 OUT endpoint is done, for receiving SETUP packets. ++ */ ++int32_t dwc_otg_pcd_handle_usb_reset_intr(dwc_otg_pcd_t * pcd) ++{ ++ dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd); ++ dwc_otg_dev_if_t *dev_if = core_if->dev_if; ++ depctl_data_t doepctl = { .d32 = 0}; ++ ++ daint_data_t daintmsk = { .d32 = 0}; ++ doepmsk_data_t doepmsk = { .d32 = 0}; ++ diepmsk_data_t diepmsk = { .d32 = 0}; ++ ++ dcfg_data_t dcfg = { .d32=0 }; ++ grstctl_t resetctl = { .d32=0 }; ++ dctl_data_t dctl = {.d32=0}; ++ int i = 0; ++ gintsts_data_t gintsts; ++ ++ DWC_PRINT("USB RESET\n"); ++#ifdef DWC_EN_ISOC ++ for(i = 1;i < 16; ++i) ++ { ++ dwc_otg_pcd_ep_t *ep; ++ dwc_ep_t *dwc_ep; ++ ep = get_in_ep(pcd,i); ++ if(ep != 0){ ++ dwc_ep = &ep->dwc_ep; ++ dwc_ep->next_frame = 0xffffffff; ++ } ++ } ++#endif /* DWC_EN_ISOC */ ++ ++ /* reset the HNP settings */ ++ dwc_otg_pcd_update_otg(pcd, 1); ++ ++ /* Clear the Remote Wakeup Signalling */ ++ dctl.b.rmtwkupsig = 1; ++ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->dctl, ++ dctl.d32, 0); ++ ++ /* Set NAK for all OUT EPs */ ++ doepctl.b.snak = 1; ++ for (i=0; i <= dev_if->num_out_eps; i++) ++ { ++ dwc_write_reg32(&dev_if->out_ep_regs[i]->doepctl, ++ doepctl.d32); ++ } ++ ++ /* Flush the NP Tx FIFO */ ++ dwc_otg_flush_tx_fifo(core_if, 0x10); ++ /* Flush the Learning Queue */ ++ resetctl.b.intknqflsh = 1; ++ dwc_write_reg32(&core_if->core_global_regs->grstctl, resetctl.d32); ++ ++ if(core_if->multiproc_int_enable) { ++ daintmsk.b.inep0 = 1; ++ daintmsk.b.outep0 = 1; ++ dwc_write_reg32(&dev_if->dev_global_regs->deachintmsk, daintmsk.d32); ++ ++ doepmsk.b.setup = 1; ++ doepmsk.b.xfercompl = 1; ++ doepmsk.b.ahberr = 1; ++ doepmsk.b.epdisabled = 1; ++ ++ if(core_if->dma_desc_enable) { ++ doepmsk.b.stsphsercvd = 1; ++ doepmsk.b.bna = 1; ++ } ++/* ++ doepmsk.b.babble = 1; ++ doepmsk.b.nyet = 1; ++ ++ if(core_if->dma_enable) { ++ doepmsk.b.nak = 1; ++ } ++*/ ++ dwc_write_reg32(&dev_if->dev_global_regs->doepeachintmsk[0], doepmsk.d32); ++ ++ diepmsk.b.xfercompl = 1; ++ diepmsk.b.timeout = 1; ++ diepmsk.b.epdisabled = 1; ++ diepmsk.b.ahberr = 1; ++ diepmsk.b.intknepmis = 1; ++ ++ if(core_if->dma_desc_enable) { ++ diepmsk.b.bna = 1; ++ } ++/* ++ if(core_if->dma_enable) { ++ diepmsk.b.nak = 1; ++ } ++*/ ++ dwc_write_reg32(&dev_if->dev_global_regs->diepeachintmsk[0], diepmsk.d32); ++ } else{ ++ daintmsk.b.inep0 = 1; ++ daintmsk.b.outep0 = 1; ++ dwc_write_reg32(&dev_if->dev_global_regs->daintmsk, daintmsk.d32); ++ ++ doepmsk.b.setup = 1; ++ doepmsk.b.xfercompl = 1; ++ doepmsk.b.ahberr = 1; ++ doepmsk.b.epdisabled = 1; ++ ++ if(core_if->dma_desc_enable) { ++ doepmsk.b.stsphsercvd = 1; ++ doepmsk.b.bna = 1; ++ } ++/* ++ doepmsk.b.babble = 1; ++ doepmsk.b.nyet = 1; ++ doepmsk.b.nak = 1; ++*/ ++ dwc_write_reg32(&dev_if->dev_global_regs->doepmsk, doepmsk.d32); ++ ++ diepmsk.b.xfercompl = 1; ++ diepmsk.b.timeout = 1; ++ diepmsk.b.epdisabled = 1; ++ diepmsk.b.ahberr = 1; ++ diepmsk.b.intknepmis = 1; ++ ++ if(core_if->dma_desc_enable) { ++ diepmsk.b.bna = 1; ++ } ++ ++// diepmsk.b.nak = 1; ++ ++ dwc_write_reg32(&dev_if->dev_global_regs->diepmsk, diepmsk.d32); ++ } ++ ++ /* Reset Device Address */ ++ dcfg.d32 = dwc_read_reg32(&dev_if->dev_global_regs->dcfg); ++ dcfg.b.devaddr = 0; ++ dwc_write_reg32(&dev_if->dev_global_regs->dcfg, dcfg.d32); ++ ++ /* setup EP0 to receive SETUP packets */ ++ ep0_out_start(core_if, pcd); ++ ++ /* Clear interrupt */ ++ gintsts.d32 = 0; ++ gintsts.b.usbreset = 1; ++ dwc_write_reg32 (&core_if->core_global_regs->gintsts, gintsts.d32); ++ ++ return 1; ++} ++ ++/** ++ * Get the device speed from the device status register and convert it ++ * to USB speed constant. ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ */ ++static int get_device_speed(dwc_otg_core_if_t *core_if) ++{ ++ dsts_data_t dsts; ++ enum usb_device_speed speed = USB_SPEED_UNKNOWN; ++ dsts.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dsts); ++ ++ switch (dsts.b.enumspd) { ++ case DWC_DSTS_ENUMSPD_HS_PHY_30MHZ_OR_60MHZ: ++ speed = USB_SPEED_HIGH; ++ break; ++ case DWC_DSTS_ENUMSPD_FS_PHY_30MHZ_OR_60MHZ: ++ case DWC_DSTS_ENUMSPD_FS_PHY_48MHZ: ++ speed = USB_SPEED_FULL; ++ break; ++ ++ case DWC_DSTS_ENUMSPD_LS_PHY_6MHZ: ++ speed = USB_SPEED_LOW; ++ break; ++ } ++ ++ return speed; ++} ++ ++/** ++ * Read the device status register and set the device speed in the ++ * data structure. ++ * Set up EP0 to receive SETUP packets by calling dwc_ep0_activate. ++ */ ++int32_t dwc_otg_pcd_handle_enum_done_intr(dwc_otg_pcd_t *pcd) ++{ ++ dwc_otg_pcd_ep_t *ep0 = &pcd->ep0; ++ gintsts_data_t gintsts; ++ gusbcfg_data_t gusbcfg; ++ dwc_otg_core_global_regs_t *global_regs = ++ GET_CORE_IF(pcd)->core_global_regs; ++ uint8_t utmi16b, utmi8b; ++ DWC_DEBUGPL(DBG_PCD, "SPEED ENUM\n"); ++ ++ if (GET_CORE_IF(pcd)->snpsid >= 0x4F54260A) { ++ utmi16b = 6; ++ utmi8b = 9; ++ } else { ++ utmi16b = 4; ++ utmi8b = 8; ++ } ++ dwc_otg_ep0_activate(GET_CORE_IF(pcd), &ep0->dwc_ep); ++ ++#ifdef DEBUG_EP0 ++ print_ep0_state(pcd); ++#endif ++ ++ if (pcd->ep0state == EP0_DISCONNECT) { ++ pcd->ep0state = EP0_IDLE; ++ } ++ else if (pcd->ep0state == EP0_STALL) { ++ pcd->ep0state = EP0_IDLE; ++ } ++ ++ pcd->ep0state = EP0_IDLE; ++ ++ ep0->stopped = 0; ++ ++ pcd->gadget.speed = get_device_speed(GET_CORE_IF(pcd)); ++ ++ /* Set USB turnaround time based on device speed and PHY interface. */ ++ gusbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg); ++ if (pcd->gadget.speed == USB_SPEED_HIGH) { ++ if (GET_CORE_IF(pcd)->hwcfg2.b.hs_phy_type == DWC_HWCFG2_HS_PHY_TYPE_ULPI) { ++ /* ULPI interface */ ++ gusbcfg.b.usbtrdtim = 9; ++ } ++ if (GET_CORE_IF(pcd)->hwcfg2.b.hs_phy_type == DWC_HWCFG2_HS_PHY_TYPE_UTMI) { ++ /* UTMI+ interface */ ++ if (GET_CORE_IF(pcd)->hwcfg4.b.utmi_phy_data_width == 0) { ++ gusbcfg.b.usbtrdtim = utmi8b; ++ } ++ else if (GET_CORE_IF(pcd)->hwcfg4.b.utmi_phy_data_width == 1) { ++ gusbcfg.b.usbtrdtim = utmi16b; ++ } ++ else if (GET_CORE_IF(pcd)->core_params->phy_utmi_width == 8) { ++ gusbcfg.b.usbtrdtim = utmi8b; ++ } ++ else { ++ gusbcfg.b.usbtrdtim = utmi16b; ++ } ++ } ++ if (GET_CORE_IF(pcd)->hwcfg2.b.hs_phy_type == DWC_HWCFG2_HS_PHY_TYPE_UTMI_ULPI) { ++ /* UTMI+ OR ULPI interface */ ++ if (gusbcfg.b.ulpi_utmi_sel == 1) { ++ /* ULPI interface */ ++ gusbcfg.b.usbtrdtim = 9; ++ } ++ else { ++ /* UTMI+ interface */ ++ if (GET_CORE_IF(pcd)->core_params->phy_utmi_width == 16) { ++ gusbcfg.b.usbtrdtim = utmi16b; ++ } ++ else { ++ gusbcfg.b.usbtrdtim = utmi8b; ++ } ++ } ++ } ++ } ++ else { ++ /* Full or low speed */ ++ gusbcfg.b.usbtrdtim = 9; ++ } ++ dwc_write_reg32(&global_regs->gusbcfg, gusbcfg.d32); ++ ++ /* Clear interrupt */ ++ gintsts.d32 = 0; ++ gintsts.b.enumdone = 1; ++ dwc_write_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintsts, ++ gintsts.d32); ++ return 1; ++} ++ ++/** ++ * This interrupt indicates that the ISO OUT Packet was dropped due to ++ * Rx FIFO full or Rx Status Queue Full. If this interrupt occurs ++ * read all the data from the Rx FIFO. ++ */ ++int32_t dwc_otg_pcd_handle_isoc_out_packet_dropped_intr(dwc_otg_pcd_t *pcd) ++{ ++ gintmsk_data_t intr_mask = { .d32 = 0}; ++ gintsts_data_t gintsts; ++ ++ DWC_PRINT("INTERRUPT Handler not implemented for %s\n", ++ "ISOC Out Dropped"); ++ ++ intr_mask.b.isooutdrop = 1; ++ dwc_modify_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk, ++ intr_mask.d32, 0); ++ ++ /* Clear interrupt */ ++ ++ gintsts.d32 = 0; ++ gintsts.b.isooutdrop = 1; ++ dwc_write_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintsts, ++ gintsts.d32); ++ ++ return 1; ++} ++ ++/** ++ * This interrupt indicates the end of the portion of the micro-frame ++ * for periodic transactions. If there is a periodic transaction for ++ * the next frame, load the packets into the EP periodic Tx FIFO. ++ */ ++int32_t dwc_otg_pcd_handle_end_periodic_frame_intr(dwc_otg_pcd_t *pcd) ++{ ++ gintmsk_data_t intr_mask = { .d32 = 0}; ++ gintsts_data_t gintsts; ++ DWC_PRINT("INTERRUPT Handler not implemented for %s\n", "EOP"); ++ ++ intr_mask.b.eopframe = 1; ++ dwc_modify_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk, ++ intr_mask.d32, 0); ++ ++ /* Clear interrupt */ ++ gintsts.d32 = 0; ++ gintsts.b.eopframe = 1; ++ dwc_write_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintsts, gintsts.d32); ++ ++ return 1; ++} ++ ++/** ++ * This interrupt indicates that EP of the packet on the top of the ++ * non-periodic Tx FIFO does not match EP of the IN Token received. ++ * ++ * The "Device IN Token Queue" Registers are read to determine the ++ * order the IN Tokens have been received. The non-periodic Tx FIFO ++ * is flushed, so it can be reloaded in the order seen in the IN Token ++ * Queue. ++ */ ++int32_t dwc_otg_pcd_handle_ep_mismatch_intr(dwc_otg_core_if_t *core_if) ++{ ++ gintsts_data_t gintsts; ++ DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, core_if); ++ ++ /* Clear interrupt */ ++ gintsts.d32 = 0; ++ gintsts.b.epmismatch = 1; ++ dwc_write_reg32 (&core_if->core_global_regs->gintsts, gintsts.d32); ++ ++ return 1; ++} ++ ++/** ++ * This funcion stalls EP0. ++ */ ++static inline void ep0_do_stall(dwc_otg_pcd_t *pcd, const int err_val) ++{ ++ dwc_otg_pcd_ep_t *ep0 = &pcd->ep0; ++ struct usb_ctrlrequest *ctrl = &pcd->setup_pkt->req; ++ DWC_WARN("req %02x.%02x protocol STALL; err %d\n", ++ ctrl->bRequestType, ctrl->bRequest, err_val); ++ ++ ep0->dwc_ep.is_in = 1; ++ dwc_otg_ep_set_stall(pcd->otg_dev->core_if, &ep0->dwc_ep); ++ pcd->ep0.stopped = 1; ++ pcd->ep0state = EP0_IDLE; ++ ep0_out_start(GET_CORE_IF(pcd), pcd); ++} ++ ++/** ++ * This functions delegates the setup command to the gadget driver. ++ */ ++static inline void do_gadget_setup(dwc_otg_pcd_t *pcd, ++ struct usb_ctrlrequest * ctrl) ++{ ++ int ret = 0; ++ if (pcd->driver && pcd->driver->setup) { ++ SPIN_UNLOCK(&pcd->lock); ++ ret = pcd->driver->setup(&pcd->gadget, ctrl); ++ SPIN_LOCK(&pcd->lock); ++ if (ret < 0) { ++ ep0_do_stall(pcd, ret); ++ } ++ ++ /** @todo This is a g_file_storage gadget driver specific ++ * workaround: a DELAYED_STATUS result from the fsg_setup ++ * routine will result in the gadget queueing a EP0 IN status ++ * phase for a two-stage control transfer. Exactly the same as ++ * a SET_CONFIGURATION/SET_INTERFACE except that this is a class ++ * specific request. Need a generic way to know when the gadget ++ * driver will queue the status phase. Can we assume when we ++ * call the gadget driver setup() function that it will always ++ * queue and require the following flag? Need to look into ++ * this. ++ */ ++ ++ if (ret == 256 + 999) { ++ pcd->request_config = 1; ++ } ++ } ++} ++ ++/** ++ * This function starts the Zero-Length Packet for the IN status phase ++ * of a 2 stage control transfer. ++ */ ++static inline void do_setup_in_status_phase(dwc_otg_pcd_t *pcd) ++{ ++ dwc_otg_pcd_ep_t *ep0 = &pcd->ep0; ++ if (pcd->ep0state == EP0_STALL) { ++ return; ++ } ++ ++ pcd->ep0state = EP0_IN_STATUS_PHASE; ++ ++ /* Prepare for more SETUP Packets */ ++ DWC_DEBUGPL(DBG_PCD, "EP0 IN ZLP\n"); ++ ep0->dwc_ep.xfer_len = 0; ++ ep0->dwc_ep.xfer_count = 0; ++ ep0->dwc_ep.is_in = 1; ++ ep0->dwc_ep.dma_addr = pcd->setup_pkt_dma_handle; ++ dwc_otg_ep0_start_transfer(GET_CORE_IF(pcd), &ep0->dwc_ep); ++ ++ /* Prepare for more SETUP Packets */ ++// if(GET_CORE_IF(pcd)->dma_enable == 0) ep0_out_start(GET_CORE_IF(pcd), pcd); ++} ++ ++/** ++ * This function starts the Zero-Length Packet for the OUT status phase ++ * of a 2 stage control transfer. ++ */ ++static inline void do_setup_out_status_phase(dwc_otg_pcd_t *pcd) ++{ ++ dwc_otg_pcd_ep_t *ep0 = &pcd->ep0; ++ if (pcd->ep0state == EP0_STALL) { ++ DWC_DEBUGPL(DBG_PCD, "EP0 STALLED\n"); ++ return; ++ } ++ pcd->ep0state = EP0_OUT_STATUS_PHASE; ++ ++ DWC_DEBUGPL(DBG_PCD, "EP0 OUT ZLP\n"); ++ ep0->dwc_ep.xfer_len = 0; ++ ep0->dwc_ep.xfer_count = 0; ++ ep0->dwc_ep.is_in = 0; ++ ep0->dwc_ep.dma_addr = pcd->setup_pkt_dma_handle; ++ dwc_otg_ep0_start_transfer(GET_CORE_IF(pcd), &ep0->dwc_ep); ++ ++ /* Prepare for more SETUP Packets */ ++ if(GET_CORE_IF(pcd)->dma_enable == 0) { ++ ep0_out_start(GET_CORE_IF(pcd), pcd); ++ } ++} ++ ++/** ++ * Clear the EP halt (STALL) and if pending requests start the ++ * transfer. ++ */ ++static inline void pcd_clear_halt(dwc_otg_pcd_t *pcd, dwc_otg_pcd_ep_t *ep) ++{ ++ if(ep->dwc_ep.stall_clear_flag == 0) ++ dwc_otg_ep_clear_stall(GET_CORE_IF(pcd), &ep->dwc_ep); ++ ++ /* Reactive the EP */ ++ dwc_otg_ep_activate(GET_CORE_IF(pcd), &ep->dwc_ep); ++ if (ep->stopped) { ++ ep->stopped = 0; ++ /* If there is a request in the EP queue start it */ ++ ++ /** @todo FIXME: this causes an EP mismatch in DMA mode. ++ * epmismatch not yet implemented. */ ++ ++ /* ++ * Above fixme is solved by implmenting a tasklet to call the ++ * start_next_request(), outside of interrupt context at some ++ * time after the current time, after a clear-halt setup packet. ++ * Still need to implement ep mismatch in the future if a gadget ++ * ever uses more than one endpoint at once ++ */ ++ ep->queue_sof = 1; ++ tasklet_schedule (pcd->start_xfer_tasklet); ++ } ++ /* Start Control Status Phase */ ++ do_setup_in_status_phase(pcd); ++} ++ ++/** ++ * This function is called when the SET_FEATURE TEST_MODE Setup packet ++ * is sent from the host. The Device Control register is written with ++ * the Test Mode bits set to the specified Test Mode. This is done as ++ * a tasklet so that the "Status" phase of the control transfer ++ * completes before transmitting the TEST packets. ++ * ++ * @todo This has not been tested since the tasklet struct was put ++ * into the PCD struct! ++ * ++ */ ++static void do_test_mode(unsigned long data) ++{ ++ dctl_data_t dctl; ++ dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *)data; ++ dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd); ++ int test_mode = pcd->test_mode; ++ ++ ++// DWC_WARN("%s() has not been tested since being rewritten!\n", __func__); ++ ++ dctl.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dctl); ++ switch (test_mode) { ++ case 1: // TEST_J ++ dctl.b.tstctl = 1; ++ break; ++ ++ case 2: // TEST_K ++ dctl.b.tstctl = 2; ++ break; ++ ++ case 3: // TEST_SE0_NAK ++ dctl.b.tstctl = 3; ++ break; ++ ++ case 4: // TEST_PACKET ++ dctl.b.tstctl = 4; ++ break; ++ ++ case 5: // TEST_FORCE_ENABLE ++ dctl.b.tstctl = 5; ++ break; ++ } ++ dwc_write_reg32(&core_if->dev_if->dev_global_regs->dctl, dctl.d32); ++} ++ ++/** ++ * This function process the GET_STATUS Setup Commands. ++ */ ++static inline void do_get_status(dwc_otg_pcd_t *pcd) ++{ ++ struct usb_ctrlrequest ctrl = pcd->setup_pkt->req; ++ dwc_otg_pcd_ep_t *ep; ++ dwc_otg_pcd_ep_t *ep0 = &pcd->ep0; ++ uint16_t *status = pcd->status_buf; ++ ++#ifdef DEBUG_EP0 ++ DWC_DEBUGPL(DBG_PCD, ++ "GET_STATUS %02x.%02x v%04x i%04x l%04x\n", ++ ctrl.bRequestType, ctrl.bRequest, ++ ctrl.wValue, ctrl.wIndex, ctrl.wLength); ++#endif ++ ++ switch (ctrl.bRequestType & USB_RECIP_MASK) { ++ case USB_RECIP_DEVICE: ++ *status = 0x1; /* Self powered */ ++ *status |= pcd->remote_wakeup_enable << 1; ++ break; ++ ++ case USB_RECIP_INTERFACE: ++ *status = 0; ++ break; ++ ++ case USB_RECIP_ENDPOINT: ++ ep = get_ep_by_addr(pcd, ctrl.wIndex); ++ if (ep == 0 || ctrl.wLength > 2) { ++ ep0_do_stall(pcd, -EOPNOTSUPP); ++ return; ++ } ++ /** @todo check for EP stall */ ++ *status = ep->stopped; ++ break; ++ } ++ pcd->ep0_pending = 1; ++ ep0->dwc_ep.start_xfer_buff = (uint8_t *)status; ++ ep0->dwc_ep.xfer_buff = (uint8_t *)status; ++ ep0->dwc_ep.dma_addr = pcd->status_buf_dma_handle; ++ ep0->dwc_ep.xfer_len = 2; ++ ep0->dwc_ep.xfer_count = 0; ++ ep0->dwc_ep.total_len = ep0->dwc_ep.xfer_len; ++ dwc_otg_ep0_start_transfer(GET_CORE_IF(pcd), &ep0->dwc_ep); ++} ++/** ++ * This function process the SET_FEATURE Setup Commands. ++ */ ++static inline void do_set_feature(dwc_otg_pcd_t *pcd) ++{ ++ dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd); ++ dwc_otg_core_global_regs_t *global_regs = ++ core_if->core_global_regs; ++ struct usb_ctrlrequest ctrl = pcd->setup_pkt->req; ++ dwc_otg_pcd_ep_t *ep = 0; ++ int32_t otg_cap_param = core_if->core_params->otg_cap; ++ gotgctl_data_t gotgctl = { .d32 = 0 }; ++ ++ DWC_DEBUGPL(DBG_PCD, "SET_FEATURE:%02x.%02x v%04x i%04x l%04x\n", ++ ctrl.bRequestType, ctrl.bRequest, ++ ctrl.wValue, ctrl.wIndex, ctrl.wLength); ++ DWC_DEBUGPL(DBG_PCD,"otg_cap=%d\n", otg_cap_param); ++ ++ ++ switch (ctrl.bRequestType & USB_RECIP_MASK) { ++ case USB_RECIP_DEVICE: ++ switch (ctrl.wValue) { ++ case USB_DEVICE_REMOTE_WAKEUP: ++ pcd->remote_wakeup_enable = 1; ++ break; ++ ++ case USB_DEVICE_TEST_MODE: ++ /* Setup the Test Mode tasklet to do the Test ++ * Packet generation after the SETUP Status ++ * phase has completed. */ ++ ++ /** @todo This has not been tested since the ++ * tasklet struct was put into the PCD ++ * struct! */ ++ pcd->test_mode_tasklet.next = 0; ++ pcd->test_mode_tasklet.state = 0; ++ atomic_set(&pcd->test_mode_tasklet.count, 0); ++ pcd->test_mode_tasklet.func = do_test_mode; ++ pcd->test_mode_tasklet.data = (unsigned long)pcd; ++ pcd->test_mode = ctrl.wIndex >> 8; ++ tasklet_schedule(&pcd->test_mode_tasklet); ++ break; ++ ++ case USB_DEVICE_B_HNP_ENABLE: ++ DWC_DEBUGPL(DBG_PCDV, "SET_FEATURE: USB_DEVICE_B_HNP_ENABLE\n"); ++ ++ /* dev may initiate HNP */ ++ if (otg_cap_param == DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE) { ++ pcd->b_hnp_enable = 1; ++ dwc_otg_pcd_update_otg(pcd, 0); ++ DWC_DEBUGPL(DBG_PCD, "Request B HNP\n"); ++ /**@todo Is the gotgctl.devhnpen cleared ++ * by a USB Reset? */ ++ gotgctl.b.devhnpen = 1; ++ gotgctl.b.hnpreq = 1; ++ dwc_write_reg32(&global_regs->gotgctl, gotgctl.d32); ++ } ++ else { ++ ep0_do_stall(pcd, -EOPNOTSUPP); ++ } ++ break; ++ ++ case USB_DEVICE_A_HNP_SUPPORT: ++ /* RH port supports HNP */ ++ DWC_DEBUGPL(DBG_PCDV, "SET_FEATURE: USB_DEVICE_A_HNP_SUPPORT\n"); ++ if (otg_cap_param == DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE) { ++ pcd->a_hnp_support = 1; ++ dwc_otg_pcd_update_otg(pcd, 0); ++ } ++ else { ++ ep0_do_stall(pcd, -EOPNOTSUPP); ++ } ++ break; ++ ++ case USB_DEVICE_A_ALT_HNP_SUPPORT: ++ /* other RH port does */ ++ DWC_DEBUGPL(DBG_PCDV, "SET_FEATURE: USB_DEVICE_A_ALT_HNP_SUPPORT\n"); ++ if (otg_cap_param == DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE) { ++ pcd->a_alt_hnp_support = 1; ++ dwc_otg_pcd_update_otg(pcd, 0); ++ } ++ else { ++ ep0_do_stall(pcd, -EOPNOTSUPP); ++ } ++ break; ++ } ++ do_setup_in_status_phase(pcd); ++ break; ++ ++ case USB_RECIP_INTERFACE: ++ do_gadget_setup(pcd, &ctrl); ++ break; ++ ++ case USB_RECIP_ENDPOINT: ++ if (ctrl.wValue == USB_ENDPOINT_HALT) { ++ ep = get_ep_by_addr(pcd, ctrl.wIndex); ++ if (ep == 0) { ++ ep0_do_stall(pcd, -EOPNOTSUPP); ++ return; ++ } ++ ep->stopped = 1; ++ dwc_otg_ep_set_stall(core_if, &ep->dwc_ep); ++ } ++ do_setup_in_status_phase(pcd); ++ break; ++ } ++} ++ ++/** ++ * This function process the CLEAR_FEATURE Setup Commands. ++ */ ++static inline void do_clear_feature(dwc_otg_pcd_t *pcd) ++{ ++ struct usb_ctrlrequest ctrl = pcd->setup_pkt->req; ++ dwc_otg_pcd_ep_t *ep = 0; ++ ++ DWC_DEBUGPL(DBG_PCD, ++ "CLEAR_FEATURE:%02x.%02x v%04x i%04x l%04x\n", ++ ctrl.bRequestType, ctrl.bRequest, ++ ctrl.wValue, ctrl.wIndex, ctrl.wLength); ++ ++ switch (ctrl.bRequestType & USB_RECIP_MASK) { ++ case USB_RECIP_DEVICE: ++ switch (ctrl.wValue) { ++ case USB_DEVICE_REMOTE_WAKEUP: ++ pcd->remote_wakeup_enable = 0; ++ break; ++ ++ case USB_DEVICE_TEST_MODE: ++ /** @todo Add CLEAR_FEATURE for TEST modes. */ ++ break; ++ } ++ do_setup_in_status_phase(pcd); ++ break; ++ ++ case USB_RECIP_ENDPOINT: ++ ep = get_ep_by_addr(pcd, ctrl.wIndex); ++ if (ep == 0) { ++ ep0_do_stall(pcd, -EOPNOTSUPP); ++ return; ++ } ++ ++ pcd_clear_halt(pcd, ep); ++ ++ break; ++ } ++} ++ ++/** ++ * This function process the SET_ADDRESS Setup Commands. ++ */ ++static inline void do_set_address(dwc_otg_pcd_t *pcd) ++{ ++ dwc_otg_dev_if_t *dev_if = GET_CORE_IF(pcd)->dev_if; ++ struct usb_ctrlrequest ctrl = pcd->setup_pkt->req; ++ ++ if (ctrl.bRequestType == USB_RECIP_DEVICE) { ++ dcfg_data_t dcfg = {.d32=0}; ++ ++#ifdef DEBUG_EP0 ++// DWC_DEBUGPL(DBG_PCDV, "SET_ADDRESS:%d\n", ctrl.wValue); ++#endif ++ dcfg.b.devaddr = ctrl.wValue; ++ dwc_modify_reg32(&dev_if->dev_global_regs->dcfg, 0, dcfg.d32); ++ do_setup_in_status_phase(pcd); ++ } ++} ++ ++/** ++ * This function processes SETUP commands. In Linux, the USB Command ++ * processing is done in two places - the first being the PCD and the ++ * second in the Gadget Driver (for example, the File-Backed Storage ++ * Gadget Driver). ++ * ++ *
Parameter NameMeaning
otg_capSpecifies the OTG capabilities. The driver will automatically detect the ++ value for this parameter if none is specified. ++ - 0: HNP and SRP capable (default, if available) ++ - 1: SRP Only capable ++ - 2: No HNP/SRP capable ++
dma_enableSpecifies whether to use slave or DMA mode for accessing the data FIFOs. ++ The driver will automatically detect the value for this parameter if none is ++ specified. ++ - 0: Slave ++ - 1: DMA (default, if available) ++
dma_burst_sizeThe DMA Burst size (applicable only for External DMA Mode). ++ - Values: 1, 4, 8 16, 32, 64, 128, 256 (default 32) ++
speedSpecifies the maximum speed of operation in host and device mode. The ++ actual speed depends on the speed of the attached device and the value of ++ phy_type. ++ - 0: High Speed (default) ++ - 1: Full Speed ++
host_support_fs_ls_low_powerSpecifies whether low power mode is supported when attached to a Full ++ Speed or Low Speed device in host mode. ++ - 0: Don't support low power mode (default) ++ - 1: Support low power mode ++
host_ls_low_power_phy_clkSpecifies the PHY clock rate in low power mode when connected to a Low ++ Speed device in host mode. This parameter is applicable only if ++ HOST_SUPPORT_FS_LS_LOW_POWER is enabled. ++ - 0: 48 MHz (default) ++ - 1: 6 MHz ++
enable_dynamic_fifo Specifies whether FIFOs may be resized by the driver software. ++ - 0: Use cC FIFO size parameters ++ - 1: Allow dynamic FIFO sizing (default) ++
data_fifo_sizeTotal number of 4-byte words in the data FIFO memory. This memory ++ includes the Rx FIFO, non-periodic Tx FIFO, and periodic Tx FIFOs. ++ - Values: 32 to 32768 (default 8192) ++ ++ Note: The total FIFO memory depth in the FPGA configuration is 8192. ++
dev_rx_fifo_sizeNumber of 4-byte words in the Rx FIFO in device mode when dynamic ++ FIFO sizing is enabled. ++ - Values: 16 to 32768 (default 1064) ++
dev_nperio_tx_fifo_sizeNumber of 4-byte words in the non-periodic Tx FIFO in device mode when ++ dynamic FIFO sizing is enabled. ++ - Values: 16 to 32768 (default 1024) ++
dev_perio_tx_fifo_size_n (n = 1 to 15)Number of 4-byte words in each of the periodic Tx FIFOs in device mode ++ when dynamic FIFO sizing is enabled. ++ - Values: 4 to 768 (default 256) ++
host_rx_fifo_sizeNumber of 4-byte words in the Rx FIFO in host mode when dynamic FIFO ++ sizing is enabled. ++ - Values: 16 to 32768 (default 1024) ++
host_nperio_tx_fifo_sizeNumber of 4-byte words in the non-periodic Tx FIFO in host mode when ++ dynamic FIFO sizing is enabled in the core. ++ - Values: 16 to 32768 (default 1024) ++
host_perio_tx_fifo_sizeNumber of 4-byte words in the host periodic Tx FIFO when dynamic FIFO ++ sizing is enabled. ++ - Values: 16 to 32768 (default 1024) ++
max_transfer_sizeThe maximum transfer size supported in bytes. ++ - Values: 2047 to 65,535 (default 65,535) ++
max_packet_countThe maximum number of packets in a transfer. ++ - Values: 15 to 511 (default 511) ++
host_channelsThe number of host channel registers to use. ++ - Values: 1 to 16 (default 12) ++ ++ Note: The FPGA configuration supports a maximum of 12 host channels. ++
dev_endpointsThe number of endpoints in addition to EP0 available for device mode ++ operations. ++ - Values: 1 to 15 (default 6 IN and OUT) ++ ++ Note: The FPGA configuration supports a maximum of 6 IN and OUT endpoints in ++ addition to EP0. ++
phy_typeSpecifies the type of PHY interface to use. By default, the driver will ++ automatically detect the phy_type. ++ - 0: Full Speed ++ - 1: UTMI+ (default, if available) ++ - 2: ULPI ++
phy_utmi_widthSpecifies the UTMI+ Data Width. This parameter is applicable for a ++ phy_type of UTMI+. Also, this parameter is applicable only if the ++ OTG_HSPHY_WIDTH cC parameter was set to "8 and 16 bits", meaning that the ++ core has been configured to work at either data path width. ++ - Values: 8 or 16 bits (default 16) ++
phy_ulpi_ddrSpecifies whether the ULPI operates at double or single data rate. This ++ parameter is only applicable if phy_type is ULPI. ++ - 0: single data rate ULPI interface with 8 bit wide data bus (default) ++ - 1: double data rate ULPI interface with 4 bit wide data bus ++
i2c_enableSpecifies whether to use the I2C interface for full speed PHY. This ++ parameter is only applicable if PHY_TYPE is FS. ++ - 0: Disabled (default) ++ - 1: Enabled ++
otg_en_multiple_tx_fifoSpecifies whether dedicatedto tx fifos are enabled for non periodic IN EPs. ++ The driver will automatically detect the value for this parameter if none is ++ specified. ++ - 0: Disabled ++ - 1: Enabled (default, if available) ++
dev_tx_fifo_size_n (n = 1 to 15)Number of 4-byte words in each of the Tx FIFOs in device mode ++ when dynamic FIFO sizing is enabled. ++ - Values: 4 to 768 (default 256) ++
++ * ++ * ++ * ++ * ++ * ++ * ++ * ++ * ++ * ++ * ++ * ++ * ++ * ++ * ++ * ++ * ++ * ++ * ++ * ++ * ++ * ++ * ++ * ++ *
Command Driver Description
GET_STATUS PCD Command is processed as ++ * defined in chapter 9 of the USB 2.0 Specification chapter 9 ++ *
CLEAR_FEATURE PCD The Device and Endpoint ++ * requests are the ENDPOINT_HALT feature is procesed, all others the ++ * interface requests are ignored.
SET_FEATURE PCD The Device and Endpoint ++ * requests are processed by the PCD. Interface requests are passed ++ * to the Gadget Driver.
SET_ADDRESS PCD Program the DCFG reg, ++ * with device address received
GET_DESCRIPTOR Gadget Driver Return the ++ * requested descriptor
SET_DESCRIPTOR Gadget Driver Optional - ++ * not implemented by any of the existing Gadget Drivers.
SET_CONFIGURATION Gadget Driver Disable ++ * all EPs and enable EPs for new configuration.
GET_CONFIGURATION Gadget Driver Return ++ * the current configuration
SET_INTERFACE Gadget Driver Disable all ++ * EPs and enable EPs for new configuration.
GET_INTERFACE Gadget Driver Return the ++ * current interface.
SYNC_FRAME PCD Display debug ++ * message.
++ * ++ * When the SETUP Phase Done interrupt occurs, the PCD SETUP commands are ++ * processed by pcd_setup. Calling the Function Driver's setup function from ++ * pcd_setup processes the gadget SETUP commands. ++ */ ++static inline void pcd_setup(dwc_otg_pcd_t *pcd) ++{ ++ dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd); ++ dwc_otg_dev_if_t *dev_if = core_if->dev_if; ++ struct usb_ctrlrequest ctrl = pcd->setup_pkt->req; ++ dwc_otg_pcd_ep_t *ep0 = &pcd->ep0; ++ ++ deptsiz0_data_t doeptsize0 = { .d32 = 0}; ++ ++#ifdef DEBUG_EP0 ++ DWC_DEBUGPL(DBG_PCD, "SETUP %02x.%02x v%04x i%04x l%04x\n", ++ ctrl.bRequestType, ctrl.bRequest, ++ ctrl.wValue, ctrl.wIndex, ctrl.wLength); ++#endif ++ ++ doeptsize0.d32 = dwc_read_reg32(&dev_if->out_ep_regs[0]->doeptsiz); ++ ++ /** @todo handle > 1 setup packet , assert error for now */ ++ ++ if (core_if->dma_enable && core_if->dma_desc_enable == 0 && (doeptsize0.b.supcnt < 2)) { ++ DWC_ERROR ("\n\n----------- CANNOT handle > 1 setup packet in DMA mode\n\n"); ++ } ++ ++ /* Clean up the request queue */ ++ dwc_otg_request_nuke(ep0); ++ ep0->stopped = 0; ++ ++ if (ctrl.bRequestType & USB_DIR_IN) { ++ ep0->dwc_ep.is_in = 1; ++ pcd->ep0state = EP0_IN_DATA_PHASE; ++ } ++ else { ++ ep0->dwc_ep.is_in = 0; ++ pcd->ep0state = EP0_OUT_DATA_PHASE; ++ } ++ ++ if(ctrl.wLength == 0) { ++ ep0->dwc_ep.is_in = 1; ++ pcd->ep0state = EP0_IN_STATUS_PHASE; ++ } ++ ++ if ((ctrl.bRequestType & USB_TYPE_MASK) != USB_TYPE_STANDARD) { ++ /* handle non-standard (class/vendor) requests in the gadget driver */ ++ do_gadget_setup(pcd, &ctrl); ++ return; ++ } ++ ++ /** @todo NGS: Handle bad setup packet? */ ++ ++/////////////////////////////////////////// ++//// --- Standard Request handling --- //// ++ ++ switch (ctrl.bRequest) { ++ case USB_REQ_GET_STATUS: ++ do_get_status(pcd); ++ break; ++ ++ case USB_REQ_CLEAR_FEATURE: ++ do_clear_feature(pcd); ++ break; ++ ++ case USB_REQ_SET_FEATURE: ++ do_set_feature(pcd); ++ break; ++ ++ case USB_REQ_SET_ADDRESS: ++ do_set_address(pcd); ++ break; ++ ++ case USB_REQ_SET_INTERFACE: ++ case USB_REQ_SET_CONFIGURATION: ++// _pcd->request_config = 1; /* Configuration changed */ ++ do_gadget_setup(pcd, &ctrl); ++ break; ++ ++ case USB_REQ_SYNCH_FRAME: ++ do_gadget_setup(pcd, &ctrl); ++ break; ++ ++ default: ++ /* Call the Gadget Driver's setup functions */ ++ do_gadget_setup(pcd, &ctrl); ++ break; ++ } ++} ++ ++/** ++ * This function completes the ep0 control transfer. ++ */ ++static int32_t ep0_complete_request(dwc_otg_pcd_ep_t *ep) ++{ ++ dwc_otg_core_if_t *core_if = GET_CORE_IF(ep->pcd); ++ dwc_otg_dev_if_t *dev_if = core_if->dev_if; ++ dwc_otg_dev_in_ep_regs_t *in_ep_regs = ++ dev_if->in_ep_regs[ep->dwc_ep.num]; ++#ifdef DEBUG_EP0 ++ dwc_otg_dev_out_ep_regs_t *out_ep_regs = ++ dev_if->out_ep_regs[ep->dwc_ep.num]; ++#endif ++ deptsiz0_data_t deptsiz; ++ desc_sts_data_t desc_sts; ++ dwc_otg_pcd_request_t *req; ++ int is_last = 0; ++ dwc_otg_pcd_t *pcd = ep->pcd; ++ ++ //DWC_DEBUGPL(DBG_PCDV, "%s() %s\n", __func__, _ep->ep.name); ++ ++ if (pcd->ep0_pending && list_empty(&ep->queue)) { ++ if (ep->dwc_ep.is_in) { ++#ifdef DEBUG_EP0 ++ DWC_DEBUGPL(DBG_PCDV, "Do setup OUT status phase\n"); ++#endif ++ do_setup_out_status_phase(pcd); ++ } ++ else { ++#ifdef DEBUG_EP0 ++ DWC_DEBUGPL(DBG_PCDV, "Do setup IN status phase\n"); ++#endif ++ do_setup_in_status_phase(pcd); ++ } ++ pcd->ep0_pending = 0; ++ return 1; ++ } ++ ++ if (list_empty(&ep->queue)) { ++ return 0; ++ } ++ req = list_entry(ep->queue.next, dwc_otg_pcd_request_t, queue); ++ ++ ++ if (pcd->ep0state == EP0_OUT_STATUS_PHASE || pcd->ep0state == EP0_IN_STATUS_PHASE) { ++ is_last = 1; ++ } ++ else if (ep->dwc_ep.is_in) { ++ deptsiz.d32 = dwc_read_reg32(&in_ep_regs->dieptsiz); ++ if(core_if->dma_desc_enable != 0) ++ desc_sts.d32 = readl(dev_if->in_desc_addr); ++#ifdef DEBUG_EP0 ++ DWC_DEBUGPL(DBG_PCDV, "%s len=%d xfersize=%d pktcnt=%d\n", ++ ep->ep.name, ep->dwc_ep.xfer_len, ++ deptsiz.b.xfersize, deptsiz.b.pktcnt); ++#endif ++ ++ if (((core_if->dma_desc_enable == 0) && (deptsiz.b.xfersize == 0)) || ++ ((core_if->dma_desc_enable != 0) && (desc_sts.b.bytes == 0))) { ++ req->req.actual = ep->dwc_ep.xfer_count; ++ /* Is a Zero Len Packet needed? */ ++ if (req->req.zero) { ++#ifdef DEBUG_EP0 ++ DWC_DEBUGPL(DBG_PCD, "Setup Rx ZLP\n"); ++#endif ++ req->req.zero = 0; ++ } ++ do_setup_out_status_phase(pcd); ++ } ++ } ++ else { ++ /* ep0-OUT */ ++#ifdef DEBUG_EP0 ++ deptsiz.d32 = dwc_read_reg32(&out_ep_regs->doeptsiz); ++ DWC_DEBUGPL(DBG_PCDV, "%s len=%d xsize=%d pktcnt=%d\n", ++ ep->ep.name, ep->dwc_ep.xfer_len, ++ deptsiz.b.xfersize, ++ deptsiz.b.pktcnt); ++#endif ++ req->req.actual = ep->dwc_ep.xfer_count; ++ /* Is a Zero Len Packet needed? */ ++ if (req->req.zero) { ++#ifdef DEBUG_EP0 ++ DWC_DEBUGPL(DBG_PCDV, "Setup Tx ZLP\n"); ++#endif ++ req->req.zero = 0; ++ } ++ if(core_if->dma_desc_enable == 0) ++ do_setup_in_status_phase(pcd); ++ } ++ ++ /* Complete the request */ ++ if (is_last) { ++ dwc_otg_request_done(ep, req, 0); ++ ep->dwc_ep.start_xfer_buff = 0; ++ ep->dwc_ep.xfer_buff = 0; ++ ep->dwc_ep.xfer_len = 0; ++ return 1; ++ } ++ return 0; ++} ++ ++/** ++ * This function completes the request for the EP. If there are ++ * additional requests for the EP in the queue they will be started. ++ */ ++static void complete_ep(dwc_otg_pcd_ep_t *ep) ++{ ++ dwc_otg_core_if_t *core_if = GET_CORE_IF(ep->pcd); ++ dwc_otg_dev_if_t *dev_if = core_if->dev_if; ++ dwc_otg_dev_in_ep_regs_t *in_ep_regs = ++ dev_if->in_ep_regs[ep->dwc_ep.num]; ++ deptsiz_data_t deptsiz; ++ desc_sts_data_t desc_sts; ++ dwc_otg_pcd_request_t *req = 0; ++ dwc_otg_dma_desc_t* dma_desc; ++ uint32_t byte_count = 0; ++ int is_last = 0; ++ int i; ++ ++ DWC_DEBUGPL(DBG_PCDV,"%s() %s-%s\n", __func__, ep->ep.name, ++ (ep->dwc_ep.is_in?"IN":"OUT")); ++ ++ /* Get any pending requests */ ++ if (!list_empty(&ep->queue)) { ++ req = list_entry(ep->queue.next, dwc_otg_pcd_request_t, ++ queue); ++ if (!req) { ++ printk("complete_ep 0x%p, req = NULL!\n", ep); ++ return; ++ } ++ } ++ else { ++ printk("complete_ep 0x%p, ep->queue empty!\n", ep); ++ return; ++ } ++ DWC_DEBUGPL(DBG_PCD, "Requests %d\n", ep->pcd->request_pending); ++ ++ if (ep->dwc_ep.is_in) { ++ deptsiz.d32 = dwc_read_reg32(&in_ep_regs->dieptsiz); ++ ++ if (core_if->dma_enable) { ++ if(core_if->dma_desc_enable == 0) { ++ if (deptsiz.b.xfersize == 0 && deptsiz.b.pktcnt == 0) { ++ byte_count = ep->dwc_ep.xfer_len - ep->dwc_ep.xfer_count; ++ ++ ep->dwc_ep.xfer_buff += byte_count; ++ ep->dwc_ep.dma_addr += byte_count; ++ ep->dwc_ep.xfer_count += byte_count; ++ ++ DWC_DEBUGPL(DBG_PCDV, "%s len=%d xfersize=%d pktcnt=%d\n", ++ ep->ep.name, ep->dwc_ep.xfer_len, ++ deptsiz.b.xfersize, deptsiz.b.pktcnt); ++ ++ ++ if(ep->dwc_ep.xfer_len < ep->dwc_ep.total_len) { ++ dwc_otg_ep_start_transfer(core_if, &ep->dwc_ep); ++ } else if(ep->dwc_ep.sent_zlp) { ++ /* ++ * This fragment of code should initiate 0 ++ * length trasfer in case if it is queued ++ * a trasfer with size divisible to EPs max ++ * packet size and with usb_request zero field ++ * is set, which means that after data is transfered, ++ * it is also should be transfered ++ * a 0 length packet at the end. For Slave and ++ * Buffer DMA modes in this case SW has ++ * to initiate 2 transfers one with transfer size, ++ * and the second with 0 size. For Desriptor ++ * DMA mode SW is able to initiate a transfer, ++ * which will handle all the packets including ++ * the last 0 legth. ++ */ ++ ep->dwc_ep.sent_zlp = 0; ++ dwc_otg_ep_start_zl_transfer(core_if, &ep->dwc_ep); ++ } else { ++ is_last = 1; ++ } ++ } else { ++ DWC_WARN("Incomplete transfer (%s-%s [siz=%d pkt=%d])\n", ++ ep->ep.name, (ep->dwc_ep.is_in?"IN":"OUT"), ++ deptsiz.b.xfersize, deptsiz.b.pktcnt); ++ } ++ } else { ++ dma_desc = ep->dwc_ep.desc_addr; ++ byte_count = 0; ++ ep->dwc_ep.sent_zlp = 0; ++ ++ for(i = 0; i < ep->dwc_ep.desc_cnt; ++i) { ++ desc_sts.d32 = readl(dma_desc); ++ byte_count += desc_sts.b.bytes; ++ dma_desc++; ++ } ++ ++ if(byte_count == 0) { ++ ep->dwc_ep.xfer_count = ep->dwc_ep.total_len; ++ is_last = 1; ++ } else { ++ DWC_WARN("Incomplete transfer\n"); ++ } ++ } ++ } else { ++ if (deptsiz.b.xfersize == 0 && deptsiz.b.pktcnt == 0) { ++ /* Check if the whole transfer was completed, ++ * if no, setup transfer for next portion of data ++ */ ++ DWC_DEBUGPL(DBG_PCDV, "%s len=%d xfersize=%d pktcnt=%d\n", ++ ep->ep.name, ep->dwc_ep.xfer_len, ++ deptsiz.b.xfersize, deptsiz.b.pktcnt); ++ if(ep->dwc_ep.xfer_len < ep->dwc_ep.total_len) { ++ dwc_otg_ep_start_transfer(core_if, &ep->dwc_ep); ++ } else if(ep->dwc_ep.sent_zlp) { ++ /* ++ * This fragment of code should initiate 0 ++ * length trasfer in case if it is queued ++ * a trasfer with size divisible to EPs max ++ * packet size and with usb_request zero field ++ * is set, which means that after data is transfered, ++ * it is also should be transfered ++ * a 0 length packet at the end. For Slave and ++ * Buffer DMA modes in this case SW has ++ * to initiate 2 transfers one with transfer size, ++ * and the second with 0 size. For Desriptor ++ * DMA mode SW is able to initiate a transfer, ++ * which will handle all the packets including ++ * the last 0 legth. ++ */ ++ ep->dwc_ep.sent_zlp = 0; ++ dwc_otg_ep_start_zl_transfer(core_if, &ep->dwc_ep); ++ } else { ++ is_last = 1; ++ } ++ } ++ else { ++ DWC_WARN("Incomplete transfer (%s-%s [siz=%d pkt=%d])\n", ++ ep->ep.name, (ep->dwc_ep.is_in?"IN":"OUT"), ++ deptsiz.b.xfersize, deptsiz.b.pktcnt); ++ } ++ } ++ } else { ++ dwc_otg_dev_out_ep_regs_t *out_ep_regs = ++ dev_if->out_ep_regs[ep->dwc_ep.num]; ++ desc_sts.d32 = 0; ++ if(core_if->dma_enable) { ++ if(core_if->dma_desc_enable) { ++ dma_desc = ep->dwc_ep.desc_addr; ++ byte_count = 0; ++ ep->dwc_ep.sent_zlp = 0; ++ for(i = 0; i < ep->dwc_ep.desc_cnt; ++i) { ++ desc_sts.d32 = readl(dma_desc); ++ byte_count += desc_sts.b.bytes; ++ dma_desc++; ++ } ++ ++ ep->dwc_ep.xfer_count = ep->dwc_ep.total_len ++ - byte_count + ((4 - (ep->dwc_ep.total_len & 0x3)) & 0x3); ++ is_last = 1; ++ } else { ++ deptsiz.d32 = 0; ++ deptsiz.d32 = dwc_read_reg32(&out_ep_regs->doeptsiz); ++ ++ byte_count = (ep->dwc_ep.xfer_len - ++ ep->dwc_ep.xfer_count - deptsiz.b.xfersize); ++ ep->dwc_ep.xfer_buff += byte_count; ++ ep->dwc_ep.dma_addr += byte_count; ++ ep->dwc_ep.xfer_count += byte_count; ++ ++ /* Check if the whole transfer was completed, ++ * if no, setup transfer for next portion of data ++ */ ++ if(ep->dwc_ep.xfer_len < ep->dwc_ep.total_len) { ++ dwc_otg_ep_start_transfer(core_if, &ep->dwc_ep); ++ } ++ else if(ep->dwc_ep.sent_zlp) { ++ /* ++ * This fragment of code should initiate 0 ++ * length trasfer in case if it is queued ++ * a trasfer with size divisible to EPs max ++ * packet size and with usb_request zero field ++ * is set, which means that after data is transfered, ++ * it is also should be transfered ++ * a 0 length packet at the end. For Slave and ++ * Buffer DMA modes in this case SW has ++ * to initiate 2 transfers one with transfer size, ++ * and the second with 0 size. For Desriptor ++ * DMA mode SW is able to initiate a transfer, ++ * which will handle all the packets including ++ * the last 0 legth. ++ */ ++ ep->dwc_ep.sent_zlp = 0; ++ dwc_otg_ep_start_zl_transfer(core_if, &ep->dwc_ep); ++ } else { ++ is_last = 1; ++ } ++ } ++ } else { ++ /* Check if the whole transfer was completed, ++ * if no, setup transfer for next portion of data ++ */ ++ if(ep->dwc_ep.xfer_len < ep->dwc_ep.total_len) { ++ dwc_otg_ep_start_transfer(core_if, &ep->dwc_ep); ++ } ++ else if(ep->dwc_ep.sent_zlp) { ++ /* ++ * This fragment of code should initiate 0 ++ * length trasfer in case if it is queued ++ * a trasfer with size divisible to EPs max ++ * packet size and with usb_request zero field ++ * is set, which means that after data is transfered, ++ * it is also should be transfered ++ * a 0 length packet at the end. For Slave and ++ * Buffer DMA modes in this case SW has ++ * to initiate 2 transfers one with transfer size, ++ * and the second with 0 size. For Desriptor ++ * DMA mode SW is able to initiate a transfer, ++ * which will handle all the packets including ++ * the last 0 legth. ++ */ ++ ep->dwc_ep.sent_zlp = 0; ++ dwc_otg_ep_start_zl_transfer(core_if, &ep->dwc_ep); ++ } else { ++ is_last = 1; ++ } ++ } ++ ++#ifdef DEBUG ++ ++ DWC_DEBUGPL(DBG_PCDV, "addr %p, %s len=%d cnt=%d xsize=%d pktcnt=%d\n", ++ &out_ep_regs->doeptsiz, ep->ep.name, ep->dwc_ep.xfer_len, ++ ep->dwc_ep.xfer_count, ++ deptsiz.b.xfersize, ++ deptsiz.b.pktcnt); ++#endif ++ } ++ ++ /* Complete the request */ ++ if (is_last) { ++ req->req.actual = ep->dwc_ep.xfer_count; ++ ++ dwc_otg_request_done(ep, req, 0); ++ ++ ep->dwc_ep.start_xfer_buff = 0; ++ ep->dwc_ep.xfer_buff = 0; ++ ep->dwc_ep.xfer_len = 0; ++ ++ /* If there is a request in the queue start it.*/ ++ start_next_request(ep); ++ } ++} ++ ++ ++#ifdef DWC_EN_ISOC ++ ++/** ++ * This function BNA interrupt for Isochronous EPs ++ * ++ */ ++static void dwc_otg_pcd_handle_iso_bna(dwc_otg_pcd_ep_t *ep) ++{ ++ dwc_ep_t *dwc_ep = &ep->dwc_ep; ++ volatile uint32_t *addr; ++ depctl_data_t depctl = {.d32 = 0}; ++ dwc_otg_pcd_t *pcd = ep->pcd; ++ dwc_otg_dma_desc_t *dma_desc; ++ int i; ++ ++ dma_desc = dwc_ep->iso_desc_addr + dwc_ep->desc_cnt * (dwc_ep->proc_buf_num); ++ ++ if(dwc_ep->is_in) { ++ desc_sts_data_t sts = {.d32 = 0}; ++ for(i = 0;i < dwc_ep->desc_cnt; ++i, ++dma_desc) ++ { ++ sts.d32 = readl(&dma_desc->status); ++ sts.b_iso_in.bs = BS_HOST_READY; ++ writel(sts.d32,&dma_desc->status); ++ } ++ } ++ else { ++ desc_sts_data_t sts = {.d32 = 0}; ++ for(i = 0;i < dwc_ep->desc_cnt; ++i, ++dma_desc) ++ { ++ sts.d32 = readl(&dma_desc->status); ++ sts.b_iso_out.bs = BS_HOST_READY; ++ writel(sts.d32,&dma_desc->status); ++ } ++ } ++ ++ if(dwc_ep->is_in == 0){ ++ addr = &GET_CORE_IF(pcd)->dev_if->out_ep_regs[dwc_ep->num]->doepctl; ++ } ++ else{ ++ addr = &GET_CORE_IF(pcd)->dev_if->in_ep_regs[dwc_ep->num]->diepctl; ++ } ++ depctl.b.epena = 1; ++ dwc_modify_reg32(addr,depctl.d32,depctl.d32); ++} ++ ++/** ++ * This function sets latest iso packet information(non-PTI mode) ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param ep The EP to start the transfer on. ++ * ++ */ ++void set_current_pkt_info(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) ++{ ++ deptsiz_data_t deptsiz = { .d32 = 0 }; ++ dma_addr_t dma_addr; ++ uint32_t offset; ++ ++ if(ep->proc_buf_num) ++ dma_addr = ep->dma_addr1; ++ else ++ dma_addr = ep->dma_addr0; ++ ++ ++ if(ep->is_in) { ++ deptsiz.d32 = dwc_read_reg32(&core_if->dev_if->in_ep_regs[ep->num]->dieptsiz); ++ offset = ep->data_per_frame; ++ } else { ++ deptsiz.d32 = dwc_read_reg32(&core_if->dev_if->out_ep_regs[ep->num]->doeptsiz); ++ offset = ep->data_per_frame + (0x4 & (0x4 - (ep->data_per_frame & 0x3))); ++ } ++ ++ if(!deptsiz.b.xfersize) { ++ ep->pkt_info[ep->cur_pkt].length = ep->data_per_frame; ++ ep->pkt_info[ep->cur_pkt].offset = ep->cur_pkt_dma_addr - dma_addr; ++ ep->pkt_info[ep->cur_pkt].status = 0; ++ } else { ++ ep->pkt_info[ep->cur_pkt].length = ep->data_per_frame; ++ ep->pkt_info[ep->cur_pkt].offset = ep->cur_pkt_dma_addr - dma_addr; ++ ep->pkt_info[ep->cur_pkt].status = -ENODATA; ++ } ++ ep->cur_pkt_addr += offset; ++ ep->cur_pkt_dma_addr += offset; ++ ep->cur_pkt++; ++} ++ ++/** ++ * This function sets latest iso packet information(DDMA mode) ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param dwc_ep The EP to start the transfer on. ++ * ++ */ ++static void set_ddma_iso_pkts_info(dwc_otg_core_if_t *core_if, dwc_ep_t *dwc_ep) ++{ ++ dwc_otg_dma_desc_t* dma_desc; ++ desc_sts_data_t sts = {.d32 = 0}; ++ iso_pkt_info_t *iso_packet; ++ uint32_t data_per_desc; ++ uint32_t offset; ++ int i, j; ++ ++ iso_packet = dwc_ep->pkt_info; ++ ++ /** Reinit closed DMA Descriptors*/ ++ /** ISO OUT EP */ ++ if(dwc_ep->is_in == 0) { ++ dma_desc = dwc_ep->iso_desc_addr + dwc_ep->desc_cnt * dwc_ep->proc_buf_num; ++ offset = 0; ++ ++ for(i = 0; i < dwc_ep->desc_cnt - dwc_ep->pkt_per_frm; i+= dwc_ep->pkt_per_frm) ++ { ++ for(j = 0; j < dwc_ep->pkt_per_frm; ++j) ++ { ++ data_per_desc = ((j + 1) * dwc_ep->maxpacket > dwc_ep->data_per_frame) ? ++ dwc_ep->data_per_frame - j * dwc_ep->maxpacket : dwc_ep->maxpacket; ++ data_per_desc += (data_per_desc % 4) ? (4 - data_per_desc % 4):0; ++ ++ sts.d32 = readl(&dma_desc->status); ++ ++ /* Write status in iso_packet_decsriptor */ ++ iso_packet->status = sts.b_iso_out.rxsts + (sts.b_iso_out.bs^BS_DMA_DONE); ++ if(iso_packet->status) { ++ iso_packet->status = -ENODATA; ++ } ++ ++ /* Received data length */ ++ if(!sts.b_iso_out.rxbytes){ ++ iso_packet->length = data_per_desc - sts.b_iso_out.rxbytes; ++ } else { ++ iso_packet->length = data_per_desc - sts.b_iso_out.rxbytes + ++ (4 - dwc_ep->data_per_frame % 4); ++ } ++ ++ iso_packet->offset = offset; ++ ++ offset += data_per_desc; ++ dma_desc ++; ++ iso_packet ++; ++ } ++ } ++ ++ for(j = 0; j < dwc_ep->pkt_per_frm - 1; ++j) ++ { ++ data_per_desc = ((j + 1) * dwc_ep->maxpacket > dwc_ep->data_per_frame) ? ++ dwc_ep->data_per_frame - j * dwc_ep->maxpacket : dwc_ep->maxpacket; ++ data_per_desc += (data_per_desc % 4) ? (4 - data_per_desc % 4):0; ++ ++ sts.d32 = readl(&dma_desc->status); ++ ++ /* Write status in iso_packet_decsriptor */ ++ iso_packet->status = sts.b_iso_out.rxsts + (sts.b_iso_out.bs^BS_DMA_DONE); ++ if(iso_packet->status) { ++ iso_packet->status = -ENODATA; ++ } ++ ++ /* Received data length */ ++ iso_packet->length = dwc_ep->data_per_frame - sts.b_iso_out.rxbytes; ++ ++ iso_packet->offset = offset; ++ ++ offset += data_per_desc; ++ iso_packet++; ++ dma_desc++; ++ } ++ ++ sts.d32 = readl(&dma_desc->status); ++ ++ /* Write status in iso_packet_decsriptor */ ++ iso_packet->status = sts.b_iso_out.rxsts + (sts.b_iso_out.bs^BS_DMA_DONE); ++ if(iso_packet->status) { ++ iso_packet->status = -ENODATA; ++ } ++ /* Received data length */ ++ if(!sts.b_iso_out.rxbytes){ ++ iso_packet->length = dwc_ep->data_per_frame - sts.b_iso_out.rxbytes; ++ } else { ++ iso_packet->length = dwc_ep->data_per_frame - sts.b_iso_out.rxbytes + ++ (4 - dwc_ep->data_per_frame % 4); ++ } ++ ++ iso_packet->offset = offset; ++ } ++ else /** ISO IN EP */ ++ { ++ dma_desc = dwc_ep->iso_desc_addr + dwc_ep->desc_cnt * dwc_ep->proc_buf_num; ++ ++ for(i = 0; i < dwc_ep->desc_cnt - 1; i++) ++ { ++ sts.d32 = readl(&dma_desc->status); ++ ++ /* Write status in iso packet descriptor */ ++ iso_packet->status = sts.b_iso_in.txsts + (sts.b_iso_in.bs^BS_DMA_DONE); ++ if(iso_packet->status != 0) { ++ iso_packet->status = -ENODATA; ++ ++ } ++ /* Bytes has been transfered */ ++ iso_packet->length = dwc_ep->data_per_frame - sts.b_iso_in.txbytes; ++ ++ dma_desc ++; ++ iso_packet++; ++ } ++ ++ sts.d32 = readl(&dma_desc->status); ++ while(sts.b_iso_in.bs == BS_DMA_BUSY) { ++ sts.d32 = readl(&dma_desc->status); ++ } ++ ++ /* Write status in iso packet descriptor ??? do be done with ERROR codes*/ ++ iso_packet->status = sts.b_iso_in.txsts + (sts.b_iso_in.bs^BS_DMA_DONE); ++ if(iso_packet->status != 0) { ++ iso_packet->status = -ENODATA; ++ } ++ ++ /* Bytes has been transfered */ ++ iso_packet->length = dwc_ep->data_per_frame - sts.b_iso_in.txbytes; ++ } ++} ++ ++/** ++ * This function reinitialize DMA Descriptors for Isochronous transfer ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param dwc_ep The EP to start the transfer on. ++ * ++ */ ++static void reinit_ddma_iso_xfer(dwc_otg_core_if_t *core_if, dwc_ep_t *dwc_ep) ++{ ++ int i, j; ++ dwc_otg_dma_desc_t* dma_desc; ++ dma_addr_t dma_ad; ++ volatile uint32_t *addr; ++ desc_sts_data_t sts = { .d32 =0 }; ++ uint32_t data_per_desc; ++ ++ if(dwc_ep->is_in == 0) { ++ addr = &core_if->dev_if->out_ep_regs[dwc_ep->num]->doepctl; ++ } ++ else { ++ addr = &core_if->dev_if->in_ep_regs[dwc_ep->num]->diepctl; ++ } ++ ++ ++ if(dwc_ep->proc_buf_num == 0) { ++ /** Buffer 0 descriptors setup */ ++ dma_ad = dwc_ep->dma_addr0; ++ } ++ else { ++ /** Buffer 1 descriptors setup */ ++ dma_ad = dwc_ep->dma_addr1; ++ } ++ ++ ++ /** Reinit closed DMA Descriptors*/ ++ /** ISO OUT EP */ ++ if(dwc_ep->is_in == 0) { ++ dma_desc = dwc_ep->iso_desc_addr + dwc_ep->desc_cnt * dwc_ep->proc_buf_num; ++ ++ sts.b_iso_out.bs = BS_HOST_READY; ++ sts.b_iso_out.rxsts = 0; ++ sts.b_iso_out.l = 0; ++ sts.b_iso_out.sp = 0; ++ sts.b_iso_out.ioc = 0; ++ sts.b_iso_out.pid = 0; ++ sts.b_iso_out.framenum = 0; ++ ++ for(i = 0; i < dwc_ep->desc_cnt - dwc_ep->pkt_per_frm; i+= dwc_ep->pkt_per_frm) ++ { ++ for(j = 0; j < dwc_ep->pkt_per_frm; ++j) ++ { ++ data_per_desc = ((j + 1) * dwc_ep->maxpacket > dwc_ep->data_per_frame) ? ++ dwc_ep->data_per_frame - j * dwc_ep->maxpacket : dwc_ep->maxpacket; ++ data_per_desc += (data_per_desc % 4) ? (4 - data_per_desc % 4):0; ++ sts.b_iso_out.rxbytes = data_per_desc; ++ writel((uint32_t)dma_ad, &dma_desc->buf); ++ writel(sts.d32, &dma_desc->status); ++ ++ (uint32_t)dma_ad += data_per_desc; ++ dma_desc ++; ++ } ++ } ++ ++ for(j = 0; j < dwc_ep->pkt_per_frm - 1; ++j) ++ { ++ ++ data_per_desc = ((j + 1) * dwc_ep->maxpacket > dwc_ep->data_per_frame) ? ++ dwc_ep->data_per_frame - j * dwc_ep->maxpacket : dwc_ep->maxpacket; ++ data_per_desc += (data_per_desc % 4) ? (4 - data_per_desc % 4):0; ++ sts.b_iso_out.rxbytes = data_per_desc; ++ ++ writel((uint32_t)dma_ad, &dma_desc->buf); ++ writel(sts.d32, &dma_desc->status); ++ ++ dma_desc++; ++ (uint32_t)dma_ad += data_per_desc; ++ } ++ ++ sts.b_iso_out.ioc = 1; ++ sts.b_iso_out.l = dwc_ep->proc_buf_num; ++ ++ data_per_desc = ((j + 1) * dwc_ep->maxpacket > dwc_ep->data_per_frame) ? ++ dwc_ep->data_per_frame - j * dwc_ep->maxpacket : dwc_ep->maxpacket; ++ data_per_desc += (data_per_desc % 4) ? (4 - data_per_desc % 4):0; ++ sts.b_iso_out.rxbytes = data_per_desc; ++ ++ writel((uint32_t)dma_ad, &dma_desc->buf); ++ writel(sts.d32, &dma_desc->status); ++ } ++ else /** ISO IN EP */ ++ { ++ dma_desc = dwc_ep->iso_desc_addr + dwc_ep->desc_cnt * dwc_ep->proc_buf_num; ++ ++ sts.b_iso_in.bs = BS_HOST_READY; ++ sts.b_iso_in.txsts = 0; ++ sts.b_iso_in.sp = 0; ++ sts.b_iso_in.ioc = 0; ++ sts.b_iso_in.pid = dwc_ep->pkt_per_frm; ++ sts.b_iso_in.framenum = dwc_ep->next_frame; ++ sts.b_iso_in.txbytes = dwc_ep->data_per_frame; ++ sts.b_iso_in.l = 0; ++ ++ for(i = 0; i < dwc_ep->desc_cnt - 1; i++) ++ { ++ writel((uint32_t)dma_ad, &dma_desc->buf); ++ writel(sts.d32, &dma_desc->status); ++ ++ sts.b_iso_in.framenum += dwc_ep->bInterval; ++ (uint32_t)dma_ad += dwc_ep->data_per_frame; ++ dma_desc ++; ++ } ++ ++ sts.b_iso_in.ioc = 1; ++ sts.b_iso_in.l = dwc_ep->proc_buf_num; ++ ++ writel((uint32_t)dma_ad, &dma_desc->buf); ++ writel(sts.d32, &dma_desc->status); ++ ++ dwc_ep->next_frame = sts.b_iso_in.framenum + dwc_ep->bInterval * 1; ++ } ++ dwc_ep->proc_buf_num = (dwc_ep->proc_buf_num ^ 1) & 0x1; ++} ++ ++ ++/** ++ * This function is to handle Iso EP transfer complete interrupt ++ * in case Iso out packet was dropped ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param dwc_ep The EP for wihich transfer complete was asserted ++ * ++ */ ++static uint32_t handle_iso_out_pkt_dropped(dwc_otg_core_if_t *core_if, dwc_ep_t *dwc_ep) ++{ ++ uint32_t dma_addr; ++ uint32_t drp_pkt; ++ uint32_t drp_pkt_cnt; ++ deptsiz_data_t deptsiz = { .d32 = 0 }; ++ depctl_data_t depctl = { .d32 = 0 }; ++ int i; ++ ++ deptsiz.d32 = dwc_read_reg32(&core_if->dev_if->out_ep_regs[dwc_ep->num]->doeptsiz); ++ ++ drp_pkt = dwc_ep->pkt_cnt - deptsiz.b.pktcnt; ++ drp_pkt_cnt = dwc_ep->pkt_per_frm - (drp_pkt % dwc_ep->pkt_per_frm); ++ ++ /* Setting dropped packets status */ ++ for(i = 0; i < drp_pkt_cnt; ++i) { ++ dwc_ep->pkt_info[drp_pkt].status = -ENODATA; ++ drp_pkt ++; ++ deptsiz.b.pktcnt--; ++ } ++ ++ ++ if(deptsiz.b.pktcnt > 0) { ++ deptsiz.b.xfersize = dwc_ep->xfer_len - (dwc_ep->pkt_cnt - deptsiz.b.pktcnt) * dwc_ep->maxpacket; ++ } else { ++ deptsiz.b.xfersize = 0; ++ deptsiz.b.pktcnt = 0; ++ } ++ ++ dwc_write_reg32(&core_if->dev_if->out_ep_regs[dwc_ep->num]->doeptsiz, deptsiz.d32); ++ ++ if(deptsiz.b.pktcnt > 0) { ++ if(dwc_ep->proc_buf_num) { ++ dma_addr = dwc_ep->dma_addr1 + dwc_ep->xfer_len - deptsiz.b.xfersize; ++ } else { ++ dma_addr = dwc_ep->dma_addr0 + dwc_ep->xfer_len - deptsiz.b.xfersize;; ++ } ++ ++ dwc_write_reg32(&core_if->dev_if->out_ep_regs[dwc_ep->num]->doepdma, dma_addr); ++ ++ /** Re-enable endpoint, clear nak */ ++ depctl.d32 = 0; ++ depctl.b.epena = 1; ++ depctl.b.cnak = 1; ++ ++ dwc_modify_reg32(&core_if->dev_if->out_ep_regs[dwc_ep->num]->doepctl, ++ depctl.d32,depctl.d32); ++ return 0; ++ } else { ++ return 1; ++ } ++} ++ ++/** ++ * This function sets iso packets information(PTI mode) ++ * ++ * @param core_if Programming view of DWC_otg controller. ++ * @param ep The EP to start the transfer on. ++ * ++ */ ++static uint32_t set_iso_pkts_info(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) ++{ ++ int i, j; ++ dma_addr_t dma_ad; ++ iso_pkt_info_t *packet_info = ep->pkt_info; ++ uint32_t offset; ++ uint32_t frame_data; ++ deptsiz_data_t deptsiz; ++ ++ if(ep->proc_buf_num == 0) { ++ /** Buffer 0 descriptors setup */ ++ dma_ad = ep->dma_addr0; ++ } ++ else { ++ /** Buffer 1 descriptors setup */ ++ dma_ad = ep->dma_addr1; ++ } ++ ++ ++ if(ep->is_in) { ++ deptsiz.d32 = dwc_read_reg32(&core_if->dev_if->in_ep_regs[ep->num]->dieptsiz); ++ } else { ++ deptsiz.d32 = dwc_read_reg32(&core_if->dev_if->out_ep_regs[ep->num]->doeptsiz); ++ } ++ ++ if(!deptsiz.b.xfersize) { ++ offset = 0; ++ for(i = 0; i < ep->pkt_cnt; i += ep->pkt_per_frm) ++ { ++ frame_data = ep->data_per_frame; ++ for(j = 0; j < ep->pkt_per_frm; ++j) { ++ ++ /* Packet status - is not set as initially ++ * it is set to 0 and if packet was sent ++ successfully, status field will remain 0*/ ++ ++ ++ /* Bytes has been transfered */ ++ packet_info->length = (ep->maxpacket < frame_data) ? ++ ep->maxpacket : frame_data; ++ ++ /* Received packet offset */ ++ packet_info->offset = offset; ++ offset += packet_info->length; ++ frame_data -= packet_info->length; ++ ++ packet_info ++; ++ } ++ } ++ return 1; ++ } else { ++ /* This is a workaround for in case of Transfer Complete with ++ * PktDrpSts interrupts merging - in this case Transfer complete ++ * interrupt for Isoc Out Endpoint is asserted without PktDrpSts ++ * set and with DOEPTSIZ register non zero. Investigations showed, ++ * that this happens when Out packet is dropped, but because of ++ * interrupts merging during first interrupt handling PktDrpSts ++ * bit is cleared and for next merged interrupts it is not reset. ++ * In this case SW hadles the interrupt as if PktDrpSts bit is set. ++ */ ++ if(ep->is_in) { ++ return 1; ++ } else { ++ return handle_iso_out_pkt_dropped(core_if, ep); ++ } ++ } ++} ++ ++/** ++ * This function is to handle Iso EP transfer complete interrupt ++ * ++ * @param ep The EP for which transfer complete was asserted ++ * ++ */ ++static void complete_iso_ep(dwc_otg_pcd_ep_t *ep) ++{ ++ dwc_otg_core_if_t *core_if = GET_CORE_IF(ep->pcd); ++ dwc_ep_t *dwc_ep = &ep->dwc_ep; ++ uint8_t is_last = 0; ++ ++ if(core_if->dma_enable) { ++ if(core_if->dma_desc_enable) { ++ set_ddma_iso_pkts_info(core_if, dwc_ep); ++ reinit_ddma_iso_xfer(core_if, dwc_ep); ++ is_last = 1; ++ } else { ++ if(core_if->pti_enh_enable) { ++ if(set_iso_pkts_info(core_if, dwc_ep)) { ++ dwc_ep->proc_buf_num = (dwc_ep->proc_buf_num ^ 1) & 0x1; ++ dwc_otg_iso_ep_start_buf_transfer(core_if, dwc_ep); ++ is_last = 1; ++ } ++ } else { ++ set_current_pkt_info(core_if, dwc_ep); ++ if(dwc_ep->cur_pkt >= dwc_ep->pkt_cnt) { ++ is_last = 1; ++ dwc_ep->cur_pkt = 0; ++ dwc_ep->proc_buf_num = (dwc_ep->proc_buf_num ^ 1) & 0x1; ++ if(dwc_ep->proc_buf_num) { ++ dwc_ep->cur_pkt_addr = dwc_ep->xfer_buff1; ++ dwc_ep->cur_pkt_dma_addr = dwc_ep->dma_addr1; ++ } else { ++ dwc_ep->cur_pkt_addr = dwc_ep->xfer_buff0; ++ dwc_ep->cur_pkt_dma_addr = dwc_ep->dma_addr0; ++ } ++ ++ } ++ dwc_otg_iso_ep_start_frm_transfer(core_if, dwc_ep); ++ } ++ } ++ } else { ++ set_current_pkt_info(core_if, dwc_ep); ++ if(dwc_ep->cur_pkt >= dwc_ep->pkt_cnt) { ++ is_last = 1; ++ dwc_ep->cur_pkt = 0; ++ dwc_ep->proc_buf_num = (dwc_ep->proc_buf_num ^ 1) & 0x1; ++ if(dwc_ep->proc_buf_num) { ++ dwc_ep->cur_pkt_addr = dwc_ep->xfer_buff1; ++ dwc_ep->cur_pkt_dma_addr = dwc_ep->dma_addr1; ++ } else { ++ dwc_ep->cur_pkt_addr = dwc_ep->xfer_buff0; ++ dwc_ep->cur_pkt_dma_addr = dwc_ep->dma_addr0; ++ } ++ ++ } ++ dwc_otg_iso_ep_start_frm_transfer(core_if, dwc_ep); ++ } ++ if(is_last) ++ dwc_otg_iso_buffer_done(ep, ep->iso_req); ++} ++ ++#endif //DWC_EN_ISOC ++ ++ ++/** ++ * This function handles EP0 Control transfers. ++ * ++ * The state of the control tranfers are tracked in ++ * ep0state. ++ */ ++static void handle_ep0(dwc_otg_pcd_t *pcd) ++{ ++ dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd); ++ dwc_otg_pcd_ep_t *ep0 = &pcd->ep0; ++ desc_sts_data_t desc_sts; ++ deptsiz0_data_t deptsiz; ++ uint32_t byte_count; ++ ++#ifdef DEBUG_EP0 ++ DWC_DEBUGPL(DBG_PCDV, "%s()\n", __func__); ++ print_ep0_state(pcd); ++#endif ++ ++ switch (pcd->ep0state) { ++ case EP0_DISCONNECT: ++ break; ++ ++ case EP0_IDLE: ++ pcd->request_config = 0; ++ ++ pcd_setup(pcd); ++ break; ++ ++ case EP0_IN_DATA_PHASE: ++#ifdef DEBUG_EP0 ++ DWC_DEBUGPL(DBG_PCD, "DATA_IN EP%d-%s: type=%d, mps=%d\n", ++ ep0->dwc_ep.num, (ep0->dwc_ep.is_in ?"IN":"OUT"), ++ ep0->dwc_ep.type, ep0->dwc_ep.maxpacket); ++#endif ++ ++ if (core_if->dma_enable != 0) { ++ /* ++ * For EP0 we can only program 1 packet at a time so we ++ * need to do the make calculations after each complete. ++ * Call write_packet to make the calculations, as in ++ * slave mode, and use those values to determine if we ++ * can complete. ++ */ ++ if(core_if->dma_desc_enable == 0) { ++ deptsiz.d32 = dwc_read_reg32(&core_if->dev_if->in_ep_regs[0]->dieptsiz); ++ byte_count = ep0->dwc_ep.xfer_len - deptsiz.b.xfersize; ++ } ++ else { ++ desc_sts.d32 = readl(core_if->dev_if->in_desc_addr); ++ byte_count = ep0->dwc_ep.xfer_len - desc_sts.b.bytes; ++ } ++ ep0->dwc_ep.xfer_count += byte_count; ++ ep0->dwc_ep.xfer_buff += byte_count; ++ ep0->dwc_ep.dma_addr += byte_count; ++ } ++ if (ep0->dwc_ep.xfer_count < ep0->dwc_ep.total_len) { ++ dwc_otg_ep0_continue_transfer (GET_CORE_IF(pcd), &ep0->dwc_ep); ++ DWC_DEBUGPL(DBG_PCD, "CONTINUE TRANSFER\n"); ++ } ++ else if(ep0->dwc_ep.sent_zlp) { ++ dwc_otg_ep0_continue_transfer (GET_CORE_IF(pcd), &ep0->dwc_ep); ++ ep0->dwc_ep.sent_zlp = 0; ++ DWC_DEBUGPL(DBG_PCD, "CONTINUE TRANSFER\n"); ++ } ++ else { ++ ep0_complete_request(ep0); ++ DWC_DEBUGPL(DBG_PCD, "COMPLETE TRANSFER\n"); ++ } ++ break; ++ case EP0_OUT_DATA_PHASE: ++#ifdef DEBUG_EP0 ++ DWC_DEBUGPL(DBG_PCD, "DATA_OUT EP%d-%s: type=%d, mps=%d\n", ++ ep0->dwc_ep.num, (ep0->dwc_ep.is_in ?"IN":"OUT"), ++ ep0->dwc_ep.type, ep0->dwc_ep.maxpacket); ++#endif ++ if (core_if->dma_enable != 0) { ++ if(core_if->dma_desc_enable == 0) { ++ deptsiz.d32 = dwc_read_reg32(&core_if->dev_if->out_ep_regs[0]->doeptsiz); ++ byte_count = ep0->dwc_ep.maxpacket - deptsiz.b.xfersize; ++ } ++ else { ++ desc_sts.d32 = readl(core_if->dev_if->out_desc_addr); ++ byte_count = ep0->dwc_ep.maxpacket - desc_sts.b.bytes; ++ } ++ ep0->dwc_ep.xfer_count += byte_count; ++ ep0->dwc_ep.xfer_buff += byte_count; ++ ep0->dwc_ep.dma_addr += byte_count; ++ } ++ if (ep0->dwc_ep.xfer_count < ep0->dwc_ep.total_len) { ++ dwc_otg_ep0_continue_transfer (GET_CORE_IF(pcd), &ep0->dwc_ep); ++ DWC_DEBUGPL(DBG_PCD, "CONTINUE TRANSFER\n"); ++ } ++ else if(ep0->dwc_ep.sent_zlp) { ++ dwc_otg_ep0_continue_transfer (GET_CORE_IF(pcd), &ep0->dwc_ep); ++ ep0->dwc_ep.sent_zlp = 0; ++ DWC_DEBUGPL(DBG_PCD, "CONTINUE TRANSFER\n"); ++ } ++ else { ++ ep0_complete_request(ep0); ++ DWC_DEBUGPL(DBG_PCD, "COMPLETE TRANSFER\n"); ++ } ++ break; ++ ++ ++ case EP0_IN_STATUS_PHASE: ++ case EP0_OUT_STATUS_PHASE: ++ DWC_DEBUGPL(DBG_PCD, "CASE: EP0_STATUS\n"); ++ ep0_complete_request(ep0); ++ pcd->ep0state = EP0_IDLE; ++ ep0->stopped = 1; ++ ep0->dwc_ep.is_in = 0; /* OUT for next SETUP */ ++ ++ /* Prepare for more SETUP Packets */ ++ if(core_if->dma_enable) { ++ ep0_out_start(core_if, pcd); ++ } ++ break; ++ ++ case EP0_STALL: ++ DWC_ERROR("EP0 STALLed, should not get here pcd_setup()\n"); ++ break; ++ } ++#ifdef DEBUG_EP0 ++ print_ep0_state(pcd); ++#endif ++} ++ ++ ++/** ++ * Restart transfer ++ */ ++static void restart_transfer(dwc_otg_pcd_t *pcd, const uint32_t epnum) ++{ ++ dwc_otg_core_if_t *core_if; ++ dwc_otg_dev_if_t *dev_if; ++ deptsiz_data_t dieptsiz = {.d32=0}; ++ dwc_otg_pcd_ep_t *ep; ++ ++ ep = get_in_ep(pcd, epnum); ++ ++#ifdef DWC_EN_ISOC ++ if(ep->dwc_ep.type == DWC_OTG_EP_TYPE_ISOC) { ++ return; ++ } ++#endif /* DWC_EN_ISOC */ ++ ++ core_if = GET_CORE_IF(pcd); ++ dev_if = core_if->dev_if; ++ ++ dieptsiz.d32 = dwc_read_reg32(&dev_if->in_ep_regs[epnum]->dieptsiz); ++ ++ DWC_DEBUGPL(DBG_PCD,"xfer_buff=%p xfer_count=%0x xfer_len=%0x" ++ " stopped=%d\n", ep->dwc_ep.xfer_buff, ++ ep->dwc_ep.xfer_count, ep->dwc_ep.xfer_len , ++ ep->stopped); ++ /* ++ * If xfersize is 0 and pktcnt in not 0, resend the last packet. ++ */ ++ if (dieptsiz.b.pktcnt && dieptsiz.b.xfersize == 0 && ++ ep->dwc_ep.start_xfer_buff != 0) { ++ if (ep->dwc_ep.total_len <= ep->dwc_ep.maxpacket) { ++ ep->dwc_ep.xfer_count = 0; ++ ep->dwc_ep.xfer_buff = ep->dwc_ep.start_xfer_buff; ++ ep->dwc_ep.xfer_len = ep->dwc_ep.xfer_count; ++ } ++ else { ++ ep->dwc_ep.xfer_count -= ep->dwc_ep.maxpacket; ++ /* convert packet size to dwords. */ ++ ep->dwc_ep.xfer_buff -= ep->dwc_ep.maxpacket; ++ ep->dwc_ep.xfer_len = ep->dwc_ep.xfer_count; ++ } ++ ep->stopped = 0; ++ DWC_DEBUGPL(DBG_PCD,"xfer_buff=%p xfer_count=%0x " ++ "xfer_len=%0x stopped=%d\n", ++ ep->dwc_ep.xfer_buff, ++ ep->dwc_ep.xfer_count, ep->dwc_ep.xfer_len , ++ ep->stopped ++ ); ++ if (epnum == 0) { ++ dwc_otg_ep0_start_transfer(core_if, &ep->dwc_ep); ++ } ++ else { ++ dwc_otg_ep_start_transfer(core_if, &ep->dwc_ep); ++ } ++ } ++} ++ ++ ++/** ++ * handle the IN EP disable interrupt. ++ */ ++static inline void handle_in_ep_disable_intr(dwc_otg_pcd_t *pcd, ++ const uint32_t epnum) ++{ ++ dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd); ++ dwc_otg_dev_if_t *dev_if = core_if->dev_if; ++ deptsiz_data_t dieptsiz = {.d32=0}; ++ dctl_data_t dctl = {.d32=0}; ++ dwc_otg_pcd_ep_t *ep; ++ dwc_ep_t *dwc_ep; ++ ++ ep = get_in_ep(pcd, epnum); ++ dwc_ep = &ep->dwc_ep; ++ ++ if(dwc_ep->type == DWC_OTG_EP_TYPE_ISOC) { ++ dwc_otg_flush_tx_fifo(core_if, dwc_ep->tx_fifo_num); ++ return; ++ } ++ ++ DWC_DEBUGPL(DBG_PCD,"diepctl%d=%0x\n", epnum, ++ dwc_read_reg32(&dev_if->in_ep_regs[epnum]->diepctl)); ++ dieptsiz.d32 = dwc_read_reg32(&dev_if->in_ep_regs[epnum]->dieptsiz); ++ ++ DWC_DEBUGPL(DBG_ANY, "pktcnt=%d size=%d\n", ++ dieptsiz.b.pktcnt, ++ dieptsiz.b.xfersize); ++ ++ if (ep->stopped) { ++ /* Flush the Tx FIFO */ ++ dwc_otg_flush_tx_fifo(core_if, dwc_ep->tx_fifo_num); ++ /* Clear the Global IN NP NAK */ ++ dctl.d32 = 0; ++ dctl.b.cgnpinnak = 1; ++ dwc_modify_reg32(&dev_if->dev_global_regs->dctl, ++ dctl.d32, 0); ++ /* Restart the transaction */ ++ if (dieptsiz.b.pktcnt != 0 || ++ dieptsiz.b.xfersize != 0) { ++ restart_transfer(pcd, epnum); ++ } ++ } ++ else { ++ /* Restart the transaction */ ++ if (dieptsiz.b.pktcnt != 0 || ++ dieptsiz.b.xfersize != 0) { ++ restart_transfer(pcd, epnum); ++ } ++ DWC_DEBUGPL(DBG_ANY, "STOPPED!!!\n"); ++ } ++} ++ ++/** ++ * Handler for the IN EP timeout handshake interrupt. ++ */ ++static inline void handle_in_ep_timeout_intr(dwc_otg_pcd_t *pcd, ++ const uint32_t epnum) ++{ ++ dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd); ++ dwc_otg_dev_if_t *dev_if = core_if->dev_if; ++ ++#ifdef DEBUG ++ deptsiz_data_t dieptsiz = {.d32=0}; ++ uint32_t num = 0; ++#endif ++ dctl_data_t dctl = {.d32=0}; ++ dwc_otg_pcd_ep_t *ep; ++ ++ gintmsk_data_t intr_mask = {.d32 = 0}; ++ ++ ep = get_in_ep(pcd, epnum); ++ ++ /* Disable the NP Tx Fifo Empty Interrrupt */ ++ if (!core_if->dma_enable) { ++ intr_mask.b.nptxfempty = 1; ++ dwc_modify_reg32(&core_if->core_global_regs->gintmsk, intr_mask.d32, 0); ++ } ++ /** @todo NGS Check EP type. ++ * Implement for Periodic EPs */ ++ /* ++ * Non-periodic EP ++ */ ++ /* Enable the Global IN NAK Effective Interrupt */ ++ intr_mask.b.ginnakeff = 1; ++ dwc_modify_reg32(&core_if->core_global_regs->gintmsk, ++ 0, intr_mask.d32); ++ ++ /* Set Global IN NAK */ ++ dctl.b.sgnpinnak = 1; ++ dwc_modify_reg32(&dev_if->dev_global_regs->dctl, ++ dctl.d32, dctl.d32); ++ ++ ep->stopped = 1; ++ ++#ifdef DEBUG ++ dieptsiz.d32 = dwc_read_reg32(&dev_if->in_ep_regs[num]->dieptsiz); ++ DWC_DEBUGPL(DBG_ANY, "pktcnt=%d size=%d\n", ++ dieptsiz.b.pktcnt, ++ dieptsiz.b.xfersize); ++#endif ++ ++#ifdef DISABLE_PERIODIC_EP ++ /* ++ * Set the NAK bit for this EP to ++ * start the disable process. ++ */ ++ diepctl.d32 = 0; ++ diepctl.b.snak = 1; ++ dwc_modify_reg32(&dev_if->in_ep_regs[num]->diepctl, diepctl.d32, diepctl.d32); ++ ep->disabling = 1; ++ ep->stopped = 1; ++#endif ++} ++ ++/** ++ * Handler for the IN EP NAK interrupt. ++ */ ++static inline int32_t handle_in_ep_nak_intr(dwc_otg_pcd_t *pcd, ++ const uint32_t epnum) ++{ ++ /** @todo implement ISR */ ++ dwc_otg_core_if_t* core_if; ++ diepmsk_data_t intr_mask = { .d32 = 0}; ++ ++ DWC_PRINT("INTERRUPT Handler not implemented for %s\n", "IN EP NAK"); ++ core_if = GET_CORE_IF(pcd); ++ intr_mask.b.nak = 1; ++ ++ if(core_if->multiproc_int_enable) { ++ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->diepeachintmsk[epnum], ++ intr_mask.d32, 0); ++ } else { ++ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->diepmsk, ++ intr_mask.d32, 0); ++ } ++ ++ return 1; ++} ++ ++/** ++ * Handler for the OUT EP Babble interrupt. ++ */ ++static inline int32_t handle_out_ep_babble_intr(dwc_otg_pcd_t *pcd, ++ const uint32_t epnum) ++{ ++ /** @todo implement ISR */ ++ dwc_otg_core_if_t* core_if; ++ doepmsk_data_t intr_mask = { .d32 = 0}; ++ ++ DWC_PRINT("INTERRUPT Handler not implemented for %s\n", "OUT EP Babble"); ++ core_if = GET_CORE_IF(pcd); ++ intr_mask.b.babble = 1; ++ ++ if(core_if->multiproc_int_enable) { ++ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->doepeachintmsk[epnum], ++ intr_mask.d32, 0); ++ } else { ++ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->doepmsk, ++ intr_mask.d32, 0); ++ } ++ ++ return 1; ++} ++ ++/** ++ * Handler for the OUT EP NAK interrupt. ++ */ ++static inline int32_t handle_out_ep_nak_intr(dwc_otg_pcd_t *pcd, ++ const uint32_t epnum) ++{ ++ /** @todo implement ISR */ ++ dwc_otg_core_if_t* core_if; ++ doepmsk_data_t intr_mask = { .d32 = 0}; ++ ++ DWC_PRINT("INTERRUPT Handler not implemented for %s\n", "OUT EP NAK"); ++ core_if = GET_CORE_IF(pcd); ++ intr_mask.b.nak = 1; ++ ++ if(core_if->multiproc_int_enable) { ++ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->doepeachintmsk[epnum], ++ intr_mask.d32, 0); ++ } else { ++ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->doepmsk, ++ intr_mask.d32, 0); ++ } ++ ++ return 1; ++} ++ ++/** ++ * Handler for the OUT EP NYET interrupt. ++ */ ++static inline int32_t handle_out_ep_nyet_intr(dwc_otg_pcd_t *pcd, ++ const uint32_t epnum) ++{ ++ /** @todo implement ISR */ ++ dwc_otg_core_if_t* core_if; ++ doepmsk_data_t intr_mask = { .d32 = 0}; ++ ++ DWC_PRINT("INTERRUPT Handler not implemented for %s\n", "OUT EP NYET"); ++ core_if = GET_CORE_IF(pcd); ++ intr_mask.b.nyet = 1; ++ ++ if(core_if->multiproc_int_enable) { ++ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->doepeachintmsk[epnum], ++ intr_mask.d32, 0); ++ } else { ++ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->doepmsk, ++ intr_mask.d32, 0); ++ } ++ ++ return 1; ++} ++ ++/** ++ * This interrupt indicates that an IN EP has a pending Interrupt. ++ * The sequence for handling the IN EP interrupt is shown below: ++ * -# Read the Device All Endpoint Interrupt register ++ * -# Repeat the following for each IN EP interrupt bit set (from ++ * LSB to MSB). ++ * -# Read the Device Endpoint Interrupt (DIEPINTn) register ++ * -# If "Transfer Complete" call the request complete function ++ * -# If "Endpoint Disabled" complete the EP disable procedure. ++ * -# If "AHB Error Interrupt" log error ++ * -# If "Time-out Handshake" log error ++ * -# If "IN Token Received when TxFIFO Empty" write packet to Tx ++ * FIFO. ++ * -# If "IN Token EP Mismatch" (disable, this is handled by EP ++ * Mismatch Interrupt) ++ */ ++static int32_t dwc_otg_pcd_handle_in_ep_intr(dwc_otg_pcd_t *pcd) ++{ ++#define CLEAR_IN_EP_INTR(__core_if,__epnum,__intr) \ ++do { \ ++ diepint_data_t diepint = {.d32=0}; \ ++ diepint.b.__intr = 1; \ ++ dwc_write_reg32(&__core_if->dev_if->in_ep_regs[__epnum]->diepint, \ ++ diepint.d32); \ ++} while (0) ++ ++ dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd); ++ dwc_otg_dev_if_t *dev_if = core_if->dev_if; ++ diepint_data_t diepint = {.d32=0}; ++ dctl_data_t dctl = {.d32=0}; ++ depctl_data_t depctl = {.d32=0}; ++ uint32_t ep_intr; ++ uint32_t epnum = 0; ++ dwc_otg_pcd_ep_t *ep; ++ dwc_ep_t *dwc_ep; ++ gintmsk_data_t intr_mask = {.d32 = 0}; ++ ++ ++ ++ DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, pcd); ++ ++ /* Read in the device interrupt bits */ ++ ep_intr = dwc_otg_read_dev_all_in_ep_intr(core_if); ++ ++ /* Service the Device IN interrupts for each endpoint */ ++ while(ep_intr) { ++ if (ep_intr&0x1) { ++ uint32_t empty_msk; ++ /* Get EP pointer */ ++ ep = get_in_ep(pcd, epnum); ++ dwc_ep = &ep->dwc_ep; ++ ++ depctl.d32 = dwc_read_reg32(&dev_if->in_ep_regs[epnum]->diepctl); ++ empty_msk = dwc_read_reg32(&dev_if->dev_global_regs->dtknqr4_fifoemptymsk); ++ ++ DWC_DEBUGPL(DBG_PCDV, ++ "IN EP INTERRUPT - %d\nepmty_msk - %8x diepctl - %8x\n", ++ epnum, ++ empty_msk, ++ depctl.d32); ++ ++ DWC_DEBUGPL(DBG_PCD, ++ "EP%d-%s: type=%d, mps=%d\n", ++ dwc_ep->num, (dwc_ep->is_in ?"IN":"OUT"), ++ dwc_ep->type, dwc_ep->maxpacket); ++ ++ diepint.d32 = dwc_otg_read_dev_in_ep_intr(core_if, dwc_ep); ++ ++ DWC_DEBUGPL(DBG_PCDV, "EP %d Interrupt Register - 0x%x\n", epnum, diepint.d32); ++ /* Transfer complete */ ++ if (diepint.b.xfercompl) { ++ /* Disable the NP Tx FIFO Empty ++ * Interrrupt */ ++ if(core_if->en_multiple_tx_fifo == 0) { ++ intr_mask.b.nptxfempty = 1; ++ dwc_modify_reg32(&core_if->core_global_regs->gintmsk, intr_mask.d32, 0); ++ } ++ else { ++ /* Disable the Tx FIFO Empty Interrupt for this EP */ ++ uint32_t fifoemptymsk = 0x1 << dwc_ep->num; ++ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->dtknqr4_fifoemptymsk, ++ fifoemptymsk, 0); ++ } ++ /* Clear the bit in DIEPINTn for this interrupt */ ++ CLEAR_IN_EP_INTR(core_if,epnum,xfercompl); ++ ++ /* Complete the transfer */ ++ if (epnum == 0) { ++ handle_ep0(pcd); ++ } ++#ifdef DWC_EN_ISOC ++ else if(dwc_ep->type == DWC_OTG_EP_TYPE_ISOC) { ++ if(!ep->stopped) ++ complete_iso_ep(ep); ++ } ++#endif //DWC_EN_ISOC ++ else { ++ ++ complete_ep(ep); ++ } ++ } ++ /* Endpoint disable */ ++ if (diepint.b.epdisabled) { ++ DWC_DEBUGPL(DBG_ANY,"EP%d IN disabled\n", epnum); ++ handle_in_ep_disable_intr(pcd, epnum); ++ ++ /* Clear the bit in DIEPINTn for this interrupt */ ++ CLEAR_IN_EP_INTR(core_if,epnum,epdisabled); ++ } ++ /* AHB Error */ ++ if (diepint.b.ahberr) { ++ DWC_DEBUGPL(DBG_ANY,"EP%d IN AHB Error\n", epnum); ++ /* Clear the bit in DIEPINTn for this interrupt */ ++ CLEAR_IN_EP_INTR(core_if,epnum,ahberr); ++ } ++ /* TimeOUT Handshake (non-ISOC IN EPs) */ ++ if (diepint.b.timeout) { ++ DWC_DEBUGPL(DBG_ANY,"EP%d IN Time-out\n", epnum); ++ handle_in_ep_timeout_intr(pcd, epnum); ++ ++ CLEAR_IN_EP_INTR(core_if,epnum,timeout); ++ } ++ /** IN Token received with TxF Empty */ ++ if (diepint.b.intktxfemp) { ++ DWC_DEBUGPL(DBG_ANY,"EP%d IN TKN TxFifo Empty\n", ++ epnum); ++ if (!ep->stopped && epnum != 0) { ++ ++ diepmsk_data_t diepmsk = { .d32 = 0}; ++ diepmsk.b.intktxfemp = 1; ++ ++ if(core_if->multiproc_int_enable) { ++ dwc_modify_reg32(&dev_if->dev_global_regs->diepeachintmsk[epnum], ++ diepmsk.d32, 0); ++ } else { ++ dwc_modify_reg32(&dev_if->dev_global_regs->diepmsk, diepmsk.d32, 0); ++ } ++ start_next_request(ep); ++ } ++ else if(core_if->dma_desc_enable && epnum == 0 && ++ pcd->ep0state == EP0_OUT_STATUS_PHASE) { ++ // EP0 IN set STALL ++ depctl.d32 = dwc_read_reg32(&dev_if->in_ep_regs[epnum]->diepctl); ++ ++ /* set the disable and stall bits */ ++ if (depctl.b.epena) { ++ depctl.b.epdis = 1; ++ } ++ depctl.b.stall = 1; ++ dwc_write_reg32(&dev_if->in_ep_regs[epnum]->diepctl, depctl.d32); ++ } ++ CLEAR_IN_EP_INTR(core_if,epnum,intktxfemp); ++ } ++ /** IN Token Received with EP mismatch */ ++ if (diepint.b.intknepmis) { ++ DWC_DEBUGPL(DBG_ANY,"EP%d IN TKN EP Mismatch\n", epnum); ++ CLEAR_IN_EP_INTR(core_if,epnum,intknepmis); ++ } ++ /** IN Endpoint NAK Effective */ ++ if (diepint.b.inepnakeff) { ++ DWC_DEBUGPL(DBG_ANY,"EP%d IN EP NAK Effective\n", epnum); ++ /* Periodic EP */ ++ if (ep->disabling) { ++ depctl.d32 = 0; ++ depctl.b.snak = 1; ++ depctl.b.epdis = 1; ++ dwc_modify_reg32(&dev_if->in_ep_regs[epnum]->diepctl, depctl.d32, depctl.d32); ++ } ++ CLEAR_IN_EP_INTR(core_if,epnum,inepnakeff); ++ ++ } ++ ++ /** IN EP Tx FIFO Empty Intr */ ++ if (diepint.b.emptyintr) { ++ DWC_DEBUGPL(DBG_ANY,"EP%d Tx FIFO Empty Intr \n", epnum); ++ write_empty_tx_fifo(pcd, epnum); ++ ++ CLEAR_IN_EP_INTR(core_if,epnum,emptyintr); ++ ++ } ++ ++ /** IN EP BNA Intr */ ++ if (diepint.b.bna) { ++ CLEAR_IN_EP_INTR(core_if,epnum,bna); ++ if(core_if->dma_desc_enable) { ++#ifdef DWC_EN_ISOC ++ if(dwc_ep->type == DWC_OTG_EP_TYPE_ISOC) { ++ /* ++ * This checking is performed to prevent first "false" BNA ++ * handling occuring right after reconnect ++ */ ++ if(dwc_ep->next_frame != 0xffffffff) ++ dwc_otg_pcd_handle_iso_bna(ep); ++ } ++ else ++#endif //DWC_EN_ISOC ++ { ++ dctl.d32 = dwc_read_reg32(&dev_if->dev_global_regs->dctl); ++ ++ /* If Global Continue on BNA is disabled - disable EP */ ++ if(!dctl.b.gcontbna) { ++ depctl.d32 = 0; ++ depctl.b.snak = 1; ++ depctl.b.epdis = 1; ++ dwc_modify_reg32(&dev_if->in_ep_regs[epnum]->diepctl, depctl.d32, depctl.d32); ++ } else { ++ start_next_request(ep); ++ } ++ } ++ } ++ } ++ /* NAK Interrutp */ ++ if (diepint.b.nak) { ++ DWC_DEBUGPL(DBG_ANY,"EP%d IN NAK Interrupt\n", epnum); ++ handle_in_ep_nak_intr(pcd, epnum); ++ ++ CLEAR_IN_EP_INTR(core_if,epnum,nak); ++ } ++ } ++ epnum++; ++ ep_intr >>=1; ++ } ++ ++ return 1; ++#undef CLEAR_IN_EP_INTR ++} ++ ++/** ++ * This interrupt indicates that an OUT EP has a pending Interrupt. ++ * The sequence for handling the OUT EP interrupt is shown below: ++ * -# Read the Device All Endpoint Interrupt register ++ * -# Repeat the following for each OUT EP interrupt bit set (from ++ * LSB to MSB). ++ * -# Read the Device Endpoint Interrupt (DOEPINTn) register ++ * -# If "Transfer Complete" call the request complete function ++ * -# If "Endpoint Disabled" complete the EP disable procedure. ++ * -# If "AHB Error Interrupt" log error ++ * -# If "Setup Phase Done" process Setup Packet (See Standard USB ++ * Command Processing) ++ */ ++static int32_t dwc_otg_pcd_handle_out_ep_intr(dwc_otg_pcd_t *pcd) ++{ ++#define CLEAR_OUT_EP_INTR(__core_if,__epnum,__intr) \ ++do { \ ++ doepint_data_t doepint = {.d32=0}; \ ++ doepint.b.__intr = 1; \ ++ dwc_write_reg32(&__core_if->dev_if->out_ep_regs[__epnum]->doepint, \ ++ doepint.d32); \ ++} while (0) ++ ++ dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd); ++ dwc_otg_dev_if_t *dev_if = core_if->dev_if; ++ uint32_t ep_intr; ++ doepint_data_t doepint = {.d32=0}; ++ dctl_data_t dctl = {.d32=0}; ++ depctl_data_t doepctl = {.d32=0}; ++ uint32_t epnum = 0; ++ dwc_otg_pcd_ep_t *ep; ++ dwc_ep_t *dwc_ep; ++ ++ DWC_DEBUGPL(DBG_PCDV, "%s()\n", __func__); ++ ++ /* Read in the device interrupt bits */ ++ ep_intr = dwc_otg_read_dev_all_out_ep_intr(core_if); ++ ++ while(ep_intr) { ++ if (ep_intr&0x1) { ++ /* Get EP pointer */ ++ ep = get_out_ep(pcd, epnum); ++ dwc_ep = &ep->dwc_ep; ++ ++#ifdef VERBOSE ++ DWC_DEBUGPL(DBG_PCDV, ++ "EP%d-%s: type=%d, mps=%d\n", ++ dwc_ep->num, (dwc_ep->is_in ?"IN":"OUT"), ++ dwc_ep->type, dwc_ep->maxpacket); ++#endif ++ doepint.d32 = dwc_otg_read_dev_out_ep_intr(core_if, dwc_ep); ++ ++ /* Transfer complete */ ++ if (doepint.b.xfercompl) { ++ ++ if (epnum == 0) { ++ /* Clear the bit in DOEPINTn for this interrupt */ ++ CLEAR_OUT_EP_INTR(core_if,epnum,xfercompl); ++ if(core_if->dma_desc_enable == 0 || pcd->ep0state != EP0_IDLE) ++ handle_ep0(pcd); ++#ifdef DWC_EN_ISOC ++ } else if(dwc_ep->type == DWC_OTG_EP_TYPE_ISOC) { ++ if (doepint.b.pktdrpsts == 0) { ++ /* Clear the bit in DOEPINTn for this interrupt */ ++ CLEAR_OUT_EP_INTR(core_if,epnum,xfercompl); ++ complete_iso_ep(ep); ++ } else { ++ ++ doepint_data_t doepint = {.d32=0}; ++ doepint.b.xfercompl = 1; ++ doepint.b.pktdrpsts = 1; ++ dwc_write_reg32(&core_if->dev_if->out_ep_regs[epnum]->doepint, ++ doepint.d32); ++ if(handle_iso_out_pkt_dropped(core_if,dwc_ep)) { ++ complete_iso_ep(ep); ++ } ++ } ++#endif //DWC_EN_ISOC ++ } else { ++ /* Clear the bit in DOEPINTn for this interrupt */ ++ CLEAR_OUT_EP_INTR(core_if,epnum,xfercompl); ++ complete_ep(ep); ++ } ++ ++ } ++ ++ /* Endpoint disable */ ++ if (doepint.b.epdisabled) { ++ ++ /* Clear the bit in DOEPINTn for this interrupt */ ++ CLEAR_OUT_EP_INTR(core_if,epnum,epdisabled); ++ } ++ /* AHB Error */ ++ if (doepint.b.ahberr) { ++ DWC_DEBUGPL(DBG_PCD,"EP%d OUT AHB Error\n", epnum); ++ DWC_DEBUGPL(DBG_PCD,"EP DMA REG %d \n", core_if->dev_if->out_ep_regs[epnum]->doepdma); ++ CLEAR_OUT_EP_INTR(core_if,epnum,ahberr); ++ } ++ /* Setup Phase Done (contorl EPs) */ ++ if (doepint.b.setup) { ++#ifdef DEBUG_EP0 ++ DWC_DEBUGPL(DBG_PCD,"EP%d SETUP Done\n", ++ epnum); ++#endif ++ CLEAR_OUT_EP_INTR(core_if,epnum,setup); ++ ++ handle_ep0(pcd); ++ } ++ ++ /** OUT EP BNA Intr */ ++ if (doepint.b.bna) { ++ CLEAR_OUT_EP_INTR(core_if,epnum,bna); ++ if(core_if->dma_desc_enable) { ++#ifdef DWC_EN_ISOC ++ if(dwc_ep->type == DWC_OTG_EP_TYPE_ISOC) { ++ /* ++ * This checking is performed to prevent first "false" BNA ++ * handling occuring right after reconnect ++ */ ++ if(dwc_ep->next_frame != 0xffffffff) ++ dwc_otg_pcd_handle_iso_bna(ep); ++ } ++ else ++#endif //DWC_EN_ISOC ++ { ++ dctl.d32 = dwc_read_reg32(&dev_if->dev_global_regs->dctl); ++ ++ /* If Global Continue on BNA is disabled - disable EP*/ ++ if(!dctl.b.gcontbna) { ++ doepctl.d32 = 0; ++ doepctl.b.snak = 1; ++ doepctl.b.epdis = 1; ++ dwc_modify_reg32(&dev_if->out_ep_regs[epnum]->doepctl, doepctl.d32, doepctl.d32); ++ } else { ++ start_next_request(ep); ++ } ++ } ++ } ++ } ++ if (doepint.b.stsphsercvd) { ++ CLEAR_OUT_EP_INTR(core_if,epnum,stsphsercvd); ++ if(core_if->dma_desc_enable) { ++ do_setup_in_status_phase(pcd); ++ } ++ } ++ /* Babble Interrutp */ ++ if (doepint.b.babble) { ++ DWC_DEBUGPL(DBG_ANY,"EP%d OUT Babble\n", epnum); ++ handle_out_ep_babble_intr(pcd, epnum); ++ ++ CLEAR_OUT_EP_INTR(core_if,epnum,babble); ++ } ++ /* NAK Interrutp */ ++ if (doepint.b.nak) { ++ DWC_DEBUGPL(DBG_ANY,"EP%d OUT NAK\n", epnum); ++ handle_out_ep_nak_intr(pcd, epnum); ++ ++ CLEAR_OUT_EP_INTR(core_if,epnum,nak); ++ } ++ /* NYET Interrutp */ ++ if (doepint.b.nyet) { ++ DWC_DEBUGPL(DBG_ANY,"EP%d OUT NYET\n", epnum); ++ handle_out_ep_nyet_intr(pcd, epnum); ++ ++ CLEAR_OUT_EP_INTR(core_if,epnum,nyet); ++ } ++ } ++ ++ epnum++; ++ ep_intr >>=1; ++ } ++ ++ return 1; ++ ++#undef CLEAR_OUT_EP_INTR ++} ++ ++ ++/** ++ * Incomplete ISO IN Transfer Interrupt. ++ * This interrupt indicates one of the following conditions occurred ++ * while transmitting an ISOC transaction. ++ * - Corrupted IN Token for ISOC EP. ++ * - Packet not complete in FIFO. ++ * The follow actions will be taken: ++ * -# Determine the EP ++ * -# Set incomplete flag in dwc_ep structure ++ * -# Disable EP; when "Endpoint Disabled" interrupt is received ++ * Flush FIFO ++ */ ++int32_t dwc_otg_pcd_handle_incomplete_isoc_in_intr(dwc_otg_pcd_t *pcd) ++{ ++ gintsts_data_t gintsts; ++ ++ ++#ifdef DWC_EN_ISOC ++ dwc_otg_dev_if_t *dev_if; ++ deptsiz_data_t deptsiz = { .d32 = 0}; ++ depctl_data_t depctl = { .d32 = 0}; ++ dsts_data_t dsts = { .d32 = 0}; ++ dwc_ep_t *dwc_ep; ++ int i; ++ ++ dev_if = GET_CORE_IF(pcd)->dev_if; ++ ++ for(i = 1; i <= dev_if->num_in_eps; ++i) { ++ dwc_ep = &pcd->in_ep[i].dwc_ep; ++ if(dwc_ep->active && ++ dwc_ep->type == USB_ENDPOINT_XFER_ISOC) ++ { ++ deptsiz.d32 = dwc_read_reg32(&dev_if->in_ep_regs[i]->dieptsiz); ++ depctl.d32 = dwc_read_reg32(&dev_if->in_ep_regs[i]->diepctl); ++ ++ if(depctl.b.epdis && deptsiz.d32) { ++ set_current_pkt_info(GET_CORE_IF(pcd), dwc_ep); ++ if(dwc_ep->cur_pkt >= dwc_ep->pkt_cnt) { ++ dwc_ep->cur_pkt = 0; ++ dwc_ep->proc_buf_num = (dwc_ep->proc_buf_num ^ 1) & 0x1; ++ ++ if(dwc_ep->proc_buf_num) { ++ dwc_ep->cur_pkt_addr = dwc_ep->xfer_buff1; ++ dwc_ep->cur_pkt_dma_addr = dwc_ep->dma_addr1; ++ } else { ++ dwc_ep->cur_pkt_addr = dwc_ep->xfer_buff0; ++ dwc_ep->cur_pkt_dma_addr = dwc_ep->dma_addr0; ++ } ++ ++ } ++ ++ dsts.d32 = dwc_read_reg32(&GET_CORE_IF(pcd)->dev_if->dev_global_regs->dsts); ++ dwc_ep->next_frame = dsts.b.soffn; ++ ++ dwc_otg_iso_ep_start_frm_transfer(GET_CORE_IF(pcd), dwc_ep); ++ } ++ } ++ } ++ ++#else ++ gintmsk_data_t intr_mask = { .d32 = 0}; ++ DWC_PRINT("INTERRUPT Handler not implemented for %s\n", ++ "IN ISOC Incomplete"); ++ ++ intr_mask.b.incomplisoin = 1; ++ dwc_modify_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk, ++ intr_mask.d32, 0); ++#endif //DWC_EN_ISOC ++ ++ /* Clear interrupt */ ++ gintsts.d32 = 0; ++ gintsts.b.incomplisoin = 1; ++ dwc_write_reg32 (&GET_CORE_IF(pcd)->core_global_regs->gintsts, ++ gintsts.d32); ++ ++ return 1; ++} ++ ++/** ++ * Incomplete ISO OUT Transfer Interrupt. ++ * ++ * This interrupt indicates that the core has dropped an ISO OUT ++ * packet. The following conditions can be the cause: ++ * - FIFO Full, the entire packet would not fit in the FIFO. ++ * - CRC Error ++ * - Corrupted Token ++ * The follow actions will be taken: ++ * -# Determine the EP ++ * -# Set incomplete flag in dwc_ep structure ++ * -# Read any data from the FIFO ++ * -# Disable EP. when "Endpoint Disabled" interrupt is received ++ * re-enable EP. ++ */ ++int32_t dwc_otg_pcd_handle_incomplete_isoc_out_intr(dwc_otg_pcd_t *pcd) ++{ ++ /* @todo implement ISR */ ++ gintsts_data_t gintsts; ++ ++#ifdef DWC_EN_ISOC ++ dwc_otg_dev_if_t *dev_if; ++ deptsiz_data_t deptsiz = { .d32 = 0}; ++ depctl_data_t depctl = { .d32 = 0}; ++ dsts_data_t dsts = { .d32 = 0}; ++ dwc_ep_t *dwc_ep; ++ int i; ++ ++ dev_if = GET_CORE_IF(pcd)->dev_if; ++ ++ for(i = 1; i <= dev_if->num_out_eps; ++i) { ++ dwc_ep = &pcd->in_ep[i].dwc_ep; ++ if(pcd->out_ep[i].dwc_ep.active && ++ pcd->out_ep[i].dwc_ep.type == USB_ENDPOINT_XFER_ISOC) ++ { ++ deptsiz.d32 = dwc_read_reg32(&dev_if->out_ep_regs[i]->doeptsiz); ++ depctl.d32 = dwc_read_reg32(&dev_if->out_ep_regs[i]->doepctl); ++ ++ if(depctl.b.epdis && deptsiz.d32) { ++ set_current_pkt_info(GET_CORE_IF(pcd), &pcd->out_ep[i].dwc_ep); ++ if(dwc_ep->cur_pkt >= dwc_ep->pkt_cnt) { ++ dwc_ep->cur_pkt = 0; ++ dwc_ep->proc_buf_num = (dwc_ep->proc_buf_num ^ 1) & 0x1; ++ ++ if(dwc_ep->proc_buf_num) { ++ dwc_ep->cur_pkt_addr = dwc_ep->xfer_buff1; ++ dwc_ep->cur_pkt_dma_addr = dwc_ep->dma_addr1; ++ } else { ++ dwc_ep->cur_pkt_addr = dwc_ep->xfer_buff0; ++ dwc_ep->cur_pkt_dma_addr = dwc_ep->dma_addr0; ++ } ++ ++ } ++ ++ dsts.d32 = dwc_read_reg32(&GET_CORE_IF(pcd)->dev_if->dev_global_regs->dsts); ++ dwc_ep->next_frame = dsts.b.soffn; ++ ++ dwc_otg_iso_ep_start_frm_transfer(GET_CORE_IF(pcd), dwc_ep); ++ } ++ } ++ } ++#else ++ /** @todo implement ISR */ ++ gintmsk_data_t intr_mask = { .d32 = 0}; ++ ++ DWC_PRINT("INTERRUPT Handler not implemented for %s\n", ++ "OUT ISOC Incomplete"); ++ ++ intr_mask.b.incomplisoout = 1; ++ dwc_modify_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk, ++ intr_mask.d32, 0); ++ ++#endif // DWC_EN_ISOC ++ ++ /* Clear interrupt */ ++ gintsts.d32 = 0; ++ gintsts.b.incomplisoout = 1; ++ dwc_write_reg32 (&GET_CORE_IF(pcd)->core_global_regs->gintsts, ++ gintsts.d32); ++ ++ return 1; ++} ++ ++/** ++ * This function handles the Global IN NAK Effective interrupt. ++ * ++ */ ++int32_t dwc_otg_pcd_handle_in_nak_effective(dwc_otg_pcd_t *pcd) ++{ ++ dwc_otg_dev_if_t *dev_if = GET_CORE_IF(pcd)->dev_if; ++ depctl_data_t diepctl = { .d32 = 0}; ++ depctl_data_t diepctl_rd = { .d32 = 0}; ++ gintmsk_data_t intr_mask = { .d32 = 0}; ++ gintsts_data_t gintsts; ++ int i; ++ ++ DWC_DEBUGPL(DBG_PCD, "Global IN NAK Effective\n"); ++ ++ /* Disable all active IN EPs */ ++ diepctl.b.epdis = 1; ++ diepctl.b.snak = 1; ++ ++ for (i=0; i <= dev_if->num_in_eps; i++) ++ { ++ diepctl_rd.d32 = dwc_read_reg32(&dev_if->in_ep_regs[i]->diepctl); ++ if (diepctl_rd.b.epena) { ++ dwc_write_reg32(&dev_if->in_ep_regs[i]->diepctl, ++ diepctl.d32); ++ } ++ } ++ /* Disable the Global IN NAK Effective Interrupt */ ++ intr_mask.b.ginnakeff = 1; ++ dwc_modify_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk, ++ intr_mask.d32, 0); ++ ++ /* Clear interrupt */ ++ gintsts.d32 = 0; ++ gintsts.b.ginnakeff = 1; ++ dwc_write_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintsts, ++ gintsts.d32); ++ ++ return 1; ++} ++ ++/** ++ * OUT NAK Effective. ++ * ++ */ ++int32_t dwc_otg_pcd_handle_out_nak_effective(dwc_otg_pcd_t *pcd) ++{ ++ gintmsk_data_t intr_mask = { .d32 = 0}; ++ gintsts_data_t gintsts; ++ ++ DWC_PRINT("INTERRUPT Handler not implemented for %s\n", ++ "Global IN NAK Effective\n"); ++ /* Disable the Global IN NAK Effective Interrupt */ ++ intr_mask.b.goutnakeff = 1; ++ dwc_modify_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk, ++ intr_mask.d32, 0); ++ ++ /* Clear interrupt */ ++ gintsts.d32 = 0; ++ gintsts.b.goutnakeff = 1; ++ dwc_write_reg32 (&GET_CORE_IF(pcd)->core_global_regs->gintsts, ++ gintsts.d32); ++ ++ return 1; ++} ++ ++ ++/** ++ * PCD interrupt handler. ++ * ++ * The PCD handles the device interrupts. Many conditions can cause a ++ * device interrupt. When an interrupt occurs, the device interrupt ++ * service routine determines the cause of the interrupt and ++ * dispatches handling to the appropriate function. These interrupt ++ * handling functions are described below. ++ * ++ * All interrupt registers are processed from LSB to MSB. ++ * ++ */ ++int32_t dwc_otg_pcd_handle_intr(dwc_otg_pcd_t *pcd) ++{ ++ dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd); ++#ifdef VERBOSE ++ dwc_otg_core_global_regs_t *global_regs = ++ core_if->core_global_regs; ++#endif ++ gintsts_data_t gintr_status; ++ int32_t retval = 0; ++ ++ ++#ifdef VERBOSE ++ DWC_DEBUGPL(DBG_ANY, "%s() gintsts=%08x gintmsk=%08x\n", ++ __func__, ++ dwc_read_reg32(&global_regs->gintsts), ++ dwc_read_reg32(&global_regs->gintmsk)); ++#endif ++ ++ if (dwc_otg_is_device_mode(core_if)) { ++ SPIN_LOCK(&pcd->lock); ++#ifdef VERBOSE ++ DWC_DEBUGPL(DBG_PCDV, "%s() gintsts=%08x gintmsk=%08x\n", ++ __func__, ++ dwc_read_reg32(&global_regs->gintsts), ++ dwc_read_reg32(&global_regs->gintmsk)); ++#endif ++ ++ gintr_status.d32 = dwc_otg_read_core_intr(core_if); ++ ++/* ++ if (!gintr_status.d32) { ++ SPIN_UNLOCK(&pcd->lock); ++ return 0; ++ } ++*/ ++ DWC_DEBUGPL(DBG_PCDV, "%s: gintsts&gintmsk=%08x\n", ++ __func__, gintr_status.d32); ++ ++ if (gintr_status.b.sofintr) { ++ retval |= dwc_otg_pcd_handle_sof_intr(pcd); ++ } ++ if (gintr_status.b.rxstsqlvl) { ++ retval |= dwc_otg_pcd_handle_rx_status_q_level_intr(pcd); ++ } ++ if (gintr_status.b.nptxfempty) { ++ retval |= dwc_otg_pcd_handle_np_tx_fifo_empty_intr(pcd); ++ } ++ if (gintr_status.b.ginnakeff) { ++ retval |= dwc_otg_pcd_handle_in_nak_effective(pcd); ++ } ++ if (gintr_status.b.goutnakeff) { ++ retval |= dwc_otg_pcd_handle_out_nak_effective(pcd); ++ } ++ if (gintr_status.b.i2cintr) { ++ retval |= dwc_otg_pcd_handle_i2c_intr(pcd); ++ } ++ if (gintr_status.b.erlysuspend) { ++ retval |= dwc_otg_pcd_handle_early_suspend_intr(pcd); ++ } ++ if (gintr_status.b.usbreset) { ++ retval |= dwc_otg_pcd_handle_usb_reset_intr(pcd); ++ } ++ if (gintr_status.b.enumdone) { ++ retval |= dwc_otg_pcd_handle_enum_done_intr(pcd); ++ } ++ if (gintr_status.b.isooutdrop) { ++ retval |= dwc_otg_pcd_handle_isoc_out_packet_dropped_intr(pcd); ++ } ++ if (gintr_status.b.eopframe) { ++ retval |= dwc_otg_pcd_handle_end_periodic_frame_intr(pcd); ++ } ++ if (gintr_status.b.epmismatch) { ++ retval |= dwc_otg_pcd_handle_ep_mismatch_intr(core_if); ++ } ++ if (gintr_status.b.inepint) { ++ if(!core_if->multiproc_int_enable) { ++ retval |= dwc_otg_pcd_handle_in_ep_intr(pcd); ++ } ++ } ++ if (gintr_status.b.outepintr) { ++ if(!core_if->multiproc_int_enable) { ++ retval |= dwc_otg_pcd_handle_out_ep_intr(pcd); ++ } ++ } ++ if (gintr_status.b.incomplisoin) { ++ retval |= dwc_otg_pcd_handle_incomplete_isoc_in_intr(pcd); ++ } ++ if (gintr_status.b.incomplisoout) { ++ retval |= dwc_otg_pcd_handle_incomplete_isoc_out_intr(pcd); ++ } ++ ++ /* In MPI mode De vice Endpoints intterrupts are asserted ++ * without setting outepintr and inepint bits set, so these ++ * Interrupt handlers are called without checking these bit-fields ++ */ ++ if(core_if->multiproc_int_enable) { ++ retval |= dwc_otg_pcd_handle_in_ep_intr(pcd); ++ retval |= dwc_otg_pcd_handle_out_ep_intr(pcd); ++ } ++#ifdef VERBOSE ++ DWC_DEBUGPL(DBG_PCDV, "%s() gintsts=%0x\n", __func__, ++ dwc_read_reg32(&global_regs->gintsts)); ++#endif ++ SPIN_UNLOCK(&pcd->lock); ++ } ++ ++ S3C2410X_CLEAR_EINTPEND(); ++ ++ return retval; ++} ++ ++#endif /* DWC_HOST_ONLY */ +--- /dev/null ++++ b/drivers/usb/dwc_otg/dwc_otg_regs.h +@@ -0,0 +1,2075 @@ ++/* ========================================================================== ++ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_regs.h $ ++ * $Revision: 1.2 $ ++ * $Date: 2008-11-21 05:39:15 $ ++ * $Change: 1099526 $ ++ * ++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, ++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless ++ * otherwise expressly agreed to in writing between Synopsys and you. ++ * ++ * The Software IS NOT an item of Licensed Software or Licensed Product under ++ * any End User Software License Agreement or Agreement for Licensed Product ++ * with Synopsys or any supplement thereto. You are permitted to use and ++ * redistribute this Software in source and binary forms, with or without ++ * modification, provided that redistributions of source code must retain this ++ * notice. You may not view, use, disclose, copy or distribute this file or ++ * any information contained herein except pursuant to this license grant from ++ * Synopsys. If you do not agree with this notice, including the disclaimer ++ * below, then you are not authorized to use the Software. ++ * ++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, ++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR ++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER ++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT ++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY ++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH ++ * DAMAGE. ++ * ========================================================================== */ ++ ++#ifndef __DWC_OTG_REGS_H__ ++#define __DWC_OTG_REGS_H__ ++ ++/** ++ * @file ++ * ++ * This file contains the data structures for accessing the DWC_otg core registers. ++ * ++ * The application interfaces with the HS OTG core by reading from and ++ * writing to the Control and Status Register (CSR) space through the ++ * AHB Slave interface. These registers are 32 bits wide, and the ++ * addresses are 32-bit-block aligned. ++ * CSRs are classified as follows: ++ * - Core Global Registers ++ * - Device Mode Registers ++ * - Device Global Registers ++ * - Device Endpoint Specific Registers ++ * - Host Mode Registers ++ * - Host Global Registers ++ * - Host Port CSRs ++ * - Host Channel Specific Registers ++ * ++ * Only the Core Global registers can be accessed in both Device and ++ * Host modes. When the HS OTG core is operating in one mode, either ++ * Device or Host, the application must not access registers from the ++ * other mode. When the core switches from one mode to another, the ++ * registers in the new mode of operation must be reprogrammed as they ++ * would be after a power-on reset. ++ */ ++ ++/** Maximum number of Periodic FIFOs */ ++#define MAX_PERIO_FIFOS 15 ++/** Maximum number of Transmit FIFOs */ ++#define MAX_TX_FIFOS 15 ++ ++/** Maximum number of Endpoints/HostChannels */ ++#define MAX_EPS_CHANNELS 16 ++ ++/****************************************************************************/ ++/** DWC_otg Core registers . ++ * The dwc_otg_core_global_regs structure defines the size ++ * and relative field offsets for the Core Global registers. ++ */ ++typedef struct dwc_otg_core_global_regs ++{ ++ /** OTG Control and Status Register. Offset: 000h */ ++ volatile uint32_t gotgctl; ++ /** OTG Interrupt Register. Offset: 004h */ ++ volatile uint32_t gotgint; ++ /**Core AHB Configuration Register. Offset: 008h */ ++ volatile uint32_t gahbcfg; ++ ++#define DWC_GLBINTRMASK 0x0001 ++#define DWC_DMAENABLE 0x0020 ++#define DWC_NPTXEMPTYLVL_EMPTY 0x0080 ++#define DWC_NPTXEMPTYLVL_HALFEMPTY 0x0000 ++#define DWC_PTXEMPTYLVL_EMPTY 0x0100 ++#define DWC_PTXEMPTYLVL_HALFEMPTY 0x0000 ++ ++ /**Core USB Configuration Register. Offset: 00Ch */ ++ volatile uint32_t gusbcfg; ++ /**Core Reset Register. Offset: 010h */ ++ volatile uint32_t grstctl; ++ /**Core Interrupt Register. Offset: 014h */ ++ volatile uint32_t gintsts; ++ /**Core Interrupt Mask Register. Offset: 018h */ ++ volatile uint32_t gintmsk; ++ /**Receive Status Queue Read Register (Read Only). Offset: 01Ch */ ++ volatile uint32_t grxstsr; ++ /**Receive Status Queue Read & POP Register (Read Only). Offset: 020h*/ ++ volatile uint32_t grxstsp; ++ /**Receive FIFO Size Register. Offset: 024h */ ++ volatile uint32_t grxfsiz; ++ /**Non Periodic Transmit FIFO Size Register. Offset: 028h */ ++ volatile uint32_t gnptxfsiz; ++ /**Non Periodic Transmit FIFO/Queue Status Register (Read ++ * Only). Offset: 02Ch */ ++ volatile uint32_t gnptxsts; ++ /**I2C Access Register. Offset: 030h */ ++ volatile uint32_t gi2cctl; ++ /**PHY Vendor Control Register. Offset: 034h */ ++ volatile uint32_t gpvndctl; ++ /**General Purpose Input/Output Register. Offset: 038h */ ++ volatile uint32_t ggpio; ++ /**User ID Register. Offset: 03Ch */ ++ volatile uint32_t guid; ++ /**Synopsys ID Register (Read Only). Offset: 040h */ ++ volatile uint32_t gsnpsid; ++ /**User HW Config1 Register (Read Only). Offset: 044h */ ++ volatile uint32_t ghwcfg1; ++ /**User HW Config2 Register (Read Only). Offset: 048h */ ++ volatile uint32_t ghwcfg2; ++#define DWC_SLAVE_ONLY_ARCH 0 ++#define DWC_EXT_DMA_ARCH 1 ++#define DWC_INT_DMA_ARCH 2 ++ ++#define DWC_MODE_HNP_SRP_CAPABLE 0 ++#define DWC_MODE_SRP_ONLY_CAPABLE 1 ++#define DWC_MODE_NO_HNP_SRP_CAPABLE 2 ++#define DWC_MODE_SRP_CAPABLE_DEVICE 3 ++#define DWC_MODE_NO_SRP_CAPABLE_DEVICE 4 ++#define DWC_MODE_SRP_CAPABLE_HOST 5 ++#define DWC_MODE_NO_SRP_CAPABLE_HOST 6 ++ ++ /**User HW Config3 Register (Read Only). Offset: 04Ch */ ++ volatile uint32_t ghwcfg3; ++ /**User HW Config4 Register (Read Only). Offset: 050h*/ ++ volatile uint32_t ghwcfg4; ++ /** Reserved Offset: 054h-0FFh */ ++ volatile uint32_t reserved[43]; ++ /** Host Periodic Transmit FIFO Size Register. Offset: 100h */ ++ volatile uint32_t hptxfsiz; ++ /** Device Periodic Transmit FIFO#n Register if dedicated fifos are disabled, ++ otherwise Device Transmit FIFO#n Register. ++ * Offset: 104h + (FIFO_Number-1)*04h, 1 <= FIFO Number <= 15 (1<=n<=15). */ ++ volatile uint32_t dptxfsiz_dieptxf[15]; ++} dwc_otg_core_global_regs_t; ++ ++/** ++ * This union represents the bit fields of the Core OTG Control ++ * and Status Register (GOTGCTL). Set the bits using the bit ++ * fields then write the d32 value to the register. ++ */ ++typedef union gotgctl_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ unsigned sesreqscs : 1; ++ unsigned sesreq : 1; ++ unsigned reserved2_7 : 6; ++ unsigned hstnegscs : 1; ++ unsigned hnpreq : 1; ++ unsigned hstsethnpen : 1; ++ unsigned devhnpen : 1; ++ unsigned reserved12_15 : 4; ++ unsigned conidsts : 1; ++ unsigned reserved17 : 1; ++ unsigned asesvld : 1; ++ unsigned bsesvld : 1; ++ unsigned currmod : 1; ++ unsigned reserved21_31 : 11; ++ } b; ++} gotgctl_data_t; ++ ++/** ++ * This union represents the bit fields of the Core OTG Interrupt Register ++ * (GOTGINT). Set/clear the bits using the bit fields then write the d32 ++ * value to the register. ++ */ ++typedef union gotgint_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ /** Current Mode */ ++ unsigned reserved0_1 : 2; ++ ++ /** Session End Detected */ ++ unsigned sesenddet : 1; ++ ++ unsigned reserved3_7 : 5; ++ ++ /** Session Request Success Status Change */ ++ unsigned sesreqsucstschng : 1; ++ /** Host Negotiation Success Status Change */ ++ unsigned hstnegsucstschng : 1; ++ ++ unsigned reserver10_16 : 7; ++ ++ /** Host Negotiation Detected */ ++ unsigned hstnegdet : 1; ++ /** A-Device Timeout Change */ ++ unsigned adevtoutchng : 1; ++ /** Debounce Done */ ++ unsigned debdone : 1; ++ ++ unsigned reserved31_20 : 12; ++ ++ } b; ++} gotgint_data_t; ++ ++ ++/** ++ * This union represents the bit fields of the Core AHB Configuration ++ * Register (GAHBCFG). Set/clear the bits using the bit fields then ++ * write the d32 value to the register. ++ */ ++typedef union gahbcfg_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ unsigned glblintrmsk : 1; ++#define DWC_GAHBCFG_GLBINT_ENABLE 1 ++ ++ unsigned hburstlen : 4; ++#define DWC_GAHBCFG_INT_DMA_BURST_SINGLE 0 ++#define DWC_GAHBCFG_INT_DMA_BURST_INCR 1 ++#define DWC_GAHBCFG_INT_DMA_BURST_INCR4 3 ++#define DWC_GAHBCFG_INT_DMA_BURST_INCR8 5 ++#define DWC_GAHBCFG_INT_DMA_BURST_INCR16 7 ++ ++ unsigned dmaenable : 1; ++#define DWC_GAHBCFG_DMAENABLE 1 ++ unsigned reserved : 1; ++ unsigned nptxfemplvl_txfemplvl : 1; ++ unsigned ptxfemplvl : 1; ++#define DWC_GAHBCFG_TXFEMPTYLVL_EMPTY 1 ++#define DWC_GAHBCFG_TXFEMPTYLVL_HALFEMPTY 0 ++ unsigned reserved9_31 : 23; ++ } b; ++} gahbcfg_data_t; ++ ++/** ++ * This union represents the bit fields of the Core USB Configuration ++ * Register (GUSBCFG). Set the bits using the bit fields then write ++ * the d32 value to the register. ++ */ ++typedef union gusbcfg_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ unsigned toutcal : 3; ++ unsigned phyif : 1; ++ unsigned ulpi_utmi_sel : 1; ++ unsigned fsintf : 1; ++ unsigned physel : 1; ++ unsigned ddrsel : 1; ++ unsigned srpcap : 1; ++ unsigned hnpcap : 1; ++ unsigned usbtrdtim : 4; ++ unsigned nptxfrwnden : 1; ++ unsigned phylpwrclksel : 1; ++ unsigned otgutmifssel : 1; ++ unsigned ulpi_fsls : 1; ++ unsigned ulpi_auto_res : 1; ++ unsigned ulpi_clk_sus_m : 1; ++ unsigned ulpi_ext_vbus_drv : 1; ++ unsigned ulpi_int_vbus_indicator : 1; ++ unsigned term_sel_dl_pulse : 1; ++ unsigned reserved23_27 : 5; ++ unsigned tx_end_delay : 1; ++ unsigned reserved29_31 : 3; ++ } b; ++} gusbcfg_data_t; ++ ++/** ++ * This union represents the bit fields of the Core Reset Register ++ * (GRSTCTL). Set/clear the bits using the bit fields then write the ++ * d32 value to the register. ++ */ ++typedef union grstctl_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ /** Core Soft Reset (CSftRst) (Device and Host) ++ * ++ * The application can flush the control logic in the ++ * entire core using this bit. This bit resets the ++ * pipelines in the AHB Clock domain as well as the ++ * PHY Clock domain. ++ * ++ * The state machines are reset to an IDLE state, the ++ * control bits in the CSRs are cleared, all the ++ * transmit FIFOs and the receive FIFO are flushed. ++ * ++ * The status mask bits that control the generation of ++ * the interrupt, are cleared, to clear the ++ * interrupt. The interrupt status bits are not ++ * cleared, so the application can get the status of ++ * any events that occurred in the core after it has ++ * set this bit. ++ * ++ * Any transactions on the AHB are terminated as soon ++ * as possible following the protocol. Any ++ * transactions on the USB are terminated immediately. ++ * ++ * The configuration settings in the CSRs are ++ * unchanged, so the software doesn't have to ++ * reprogram these registers (Device ++ * Configuration/Host Configuration/Core System ++ * Configuration/Core PHY Configuration). ++ * ++ * The application can write to this bit, any time it ++ * wants to reset the core. This is a self clearing ++ * bit and the core clears this bit after all the ++ * necessary logic is reset in the core, which may ++ * take several clocks, depending on the current state ++ * of the core. ++ */ ++ unsigned csftrst : 1; ++ /** Hclk Soft Reset ++ * ++ * The application uses this bit to reset the control logic in ++ * the AHB clock domain. Only AHB clock domain pipelines are ++ * reset. ++ */ ++ unsigned hsftrst : 1; ++ /** Host Frame Counter Reset (Host Only)
++ * ++ * The application can reset the (micro)frame number ++ * counter inside the core, using this bit. When the ++ * (micro)frame counter is reset, the subsequent SOF ++ * sent out by the core, will have a (micro)frame ++ * number of 0. ++ */ ++ unsigned hstfrm : 1; ++ /** In Token Sequence Learning Queue Flush ++ * (INTknQFlsh) (Device Only) ++ */ ++ unsigned intknqflsh : 1; ++ /** RxFIFO Flush (RxFFlsh) (Device and Host) ++ * ++ * The application can flush the entire Receive FIFO ++ * using this bit.

The application must first ++ * ensure that the core is not in the middle of a ++ * transaction.

The application should write into ++ * this bit, only after making sure that neither the ++ * DMA engine is reading from the RxFIFO nor the MAC ++ * is writing the data in to the FIFO.

The ++ * application should wait until the bit is cleared ++ * before performing any other operations. This bit ++ * will takes 8 clocks (slowest of PHY or AHB clock) ++ * to clear. ++ */ ++ unsigned rxfflsh : 1; ++ /** TxFIFO Flush (TxFFlsh) (Device and Host). ++ * ++ * This bit is used to selectively flush a single or ++ * all transmit FIFOs. The application must first ++ * ensure that the core is not in the middle of a ++ * transaction.

The application should write into ++ * this bit, only after making sure that neither the ++ * DMA engine is writing into the TxFIFO nor the MAC ++ * is reading the data out of the FIFO.

The ++ * application should wait until the core clears this ++ * bit, before performing any operations. This bit ++ * will takes 8 clocks (slowest of PHY or AHB clock) ++ * to clear. ++ */ ++ unsigned txfflsh : 1; ++ ++ /** TxFIFO Number (TxFNum) (Device and Host). ++ * ++ * This is the FIFO number which needs to be flushed, ++ * using the TxFIFO Flush bit. This field should not ++ * be changed until the TxFIFO Flush bit is cleared by ++ * the core. ++ * - 0x0 : Non Periodic TxFIFO Flush ++ * - 0x1 : Periodic TxFIFO #1 Flush in device mode ++ * or Periodic TxFIFO in host mode ++ * - 0x2 : Periodic TxFIFO #2 Flush in device mode. ++ * - ... ++ * - 0xF : Periodic TxFIFO #15 Flush in device mode ++ * - 0x10: Flush all the Transmit NonPeriodic and ++ * Transmit Periodic FIFOs in the core ++ */ ++ unsigned txfnum : 5; ++ /** Reserved */ ++ unsigned reserved11_29 : 19; ++ /** DMA Request Signal. Indicated DMA request is in ++ * probress. Used for debug purpose. */ ++ unsigned dmareq : 1; ++ /** AHB Master Idle. Indicates the AHB Master State ++ * Machine is in IDLE condition. */ ++ unsigned ahbidle : 1; ++ } b; ++} grstctl_t; ++ ++ ++/** ++ * This union represents the bit fields of the Core Interrupt Mask ++ * Register (GINTMSK). Set/clear the bits using the bit fields then ++ * write the d32 value to the register. ++ */ ++typedef union gintmsk_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ unsigned reserved0 : 1; ++ unsigned modemismatch : 1; ++ unsigned otgintr : 1; ++ unsigned sofintr : 1; ++ unsigned rxstsqlvl : 1; ++ unsigned nptxfempty : 1; ++ unsigned ginnakeff : 1; ++ unsigned goutnakeff : 1; ++ unsigned reserved8 : 1; ++ unsigned i2cintr : 1; ++ unsigned erlysuspend : 1; ++ unsigned usbsuspend : 1; ++ unsigned usbreset : 1; ++ unsigned enumdone : 1; ++ unsigned isooutdrop : 1; ++ unsigned eopframe : 1; ++ unsigned reserved16 : 1; ++ unsigned epmismatch : 1; ++ unsigned inepintr : 1; ++ unsigned outepintr : 1; ++ unsigned incomplisoin : 1; ++ unsigned incomplisoout : 1; ++ unsigned reserved22_23 : 2; ++ unsigned portintr : 1; ++ unsigned hcintr : 1; ++ unsigned ptxfempty : 1; ++ unsigned reserved27 : 1; ++ unsigned conidstschng : 1; ++ unsigned disconnect : 1; ++ unsigned sessreqintr : 1; ++ unsigned wkupintr : 1; ++ } b; ++} gintmsk_data_t; ++/** ++ * This union represents the bit fields of the Core Interrupt Register ++ * (GINTSTS). Set/clear the bits using the bit fields then write the ++ * d32 value to the register. ++ */ ++typedef union gintsts_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++#define DWC_SOF_INTR_MASK 0x0008 ++ /** register bits */ ++ struct ++ { ++#define DWC_HOST_MODE 1 ++ unsigned curmode : 1; ++ unsigned modemismatch : 1; ++ unsigned otgintr : 1; ++ unsigned sofintr : 1; ++ unsigned rxstsqlvl : 1; ++ unsigned nptxfempty : 1; ++ unsigned ginnakeff : 1; ++ unsigned goutnakeff : 1; ++ unsigned reserved8 : 1; ++ unsigned i2cintr : 1; ++ unsigned erlysuspend : 1; ++ unsigned usbsuspend : 1; ++ unsigned usbreset : 1; ++ unsigned enumdone : 1; ++ unsigned isooutdrop : 1; ++ unsigned eopframe : 1; ++ unsigned intokenrx : 1; ++ unsigned epmismatch : 1; ++ unsigned inepint: 1; ++ unsigned outepintr : 1; ++ unsigned incomplisoin : 1; ++ unsigned incomplisoout : 1; ++ unsigned reserved22_23 : 2; ++ unsigned portintr : 1; ++ unsigned hcintr : 1; ++ unsigned ptxfempty : 1; ++ unsigned reserved27 : 1; ++ unsigned conidstschng : 1; ++ unsigned disconnect : 1; ++ unsigned sessreqintr : 1; ++ unsigned wkupintr : 1; ++ } b; ++} gintsts_data_t; ++ ++ ++/** ++ * This union represents the bit fields in the Device Receive Status Read and ++ * Pop Registers (GRXSTSR, GRXSTSP) Read the register into the d32 ++ * element then read out the bits using the bit elements. ++ */ ++typedef union device_grxsts_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ unsigned epnum : 4; ++ unsigned bcnt : 11; ++ unsigned dpid : 2; ++ ++#define DWC_STS_DATA_UPDT 0x2 // OUT Data Packet ++#define DWC_STS_XFER_COMP 0x3 // OUT Data Transfer Complete ++ ++#define DWC_DSTS_GOUT_NAK 0x1 // Global OUT NAK ++#define DWC_DSTS_SETUP_COMP 0x4 // Setup Phase Complete ++#define DWC_DSTS_SETUP_UPDT 0x6 // SETUP Packet ++ unsigned pktsts : 4; ++ unsigned fn : 4; ++ unsigned reserved : 7; ++ } b; ++} device_grxsts_data_t; ++ ++/** ++ * This union represents the bit fields in the Host Receive Status Read and ++ * Pop Registers (GRXSTSR, GRXSTSP) Read the register into the d32 ++ * element then read out the bits using the bit elements. ++ */ ++typedef union host_grxsts_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ unsigned chnum : 4; ++ unsigned bcnt : 11; ++ unsigned dpid : 2; ++ ++ unsigned pktsts : 4; ++#define DWC_GRXSTS_PKTSTS_IN 0x2 ++#define DWC_GRXSTS_PKTSTS_IN_XFER_COMP 0x3 ++#define DWC_GRXSTS_PKTSTS_DATA_TOGGLE_ERR 0x5 ++#define DWC_GRXSTS_PKTSTS_CH_HALTED 0x7 ++ ++ unsigned reserved : 11; ++ } b; ++} host_grxsts_data_t; ++ ++/** ++ * This union represents the bit fields in the FIFO Size Registers (HPTXFSIZ, ++ * GNPTXFSIZ, DPTXFSIZn, DIEPTXFn). Read the register into the d32 element then ++ * read out the bits using the bit elements. ++ */ ++typedef union fifosize_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ unsigned startaddr : 16; ++ unsigned depth : 16; ++ } b; ++} fifosize_data_t; ++ ++/** ++ * This union represents the bit fields in the Non-Periodic Transmit ++ * FIFO/Queue Status Register (GNPTXSTS). Read the register into the ++ * d32 element then read out the bits using the bit ++ * elements. ++ */ ++typedef union gnptxsts_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ unsigned nptxfspcavail : 16; ++ unsigned nptxqspcavail : 8; ++ /** Top of the Non-Periodic Transmit Request Queue ++ * - bit 24 - Terminate (Last entry for the selected ++ * channel/EP) ++ * - bits 26:25 - Token Type ++ * - 2'b00 - IN/OUT ++ * - 2'b01 - Zero Length OUT ++ * - 2'b10 - PING/Complete Split ++ * - 2'b11 - Channel Halt ++ * - bits 30:27 - Channel/EP Number ++ */ ++ unsigned nptxqtop_terminate : 1; ++ unsigned nptxqtop_token : 2; ++ unsigned nptxqtop_chnep : 4; ++ unsigned reserved : 1; ++ } b; ++} gnptxsts_data_t; ++ ++/** ++ * This union represents the bit fields in the Transmit ++ * FIFO Status Register (DTXFSTS). Read the register into the ++ * d32 element then read out the bits using the bit ++ * elements. ++ */ ++typedef union dtxfsts_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ unsigned txfspcavail : 16; ++ unsigned reserved : 16; ++ } b; ++} dtxfsts_data_t; ++ ++/** ++ * This union represents the bit fields in the I2C Control Register ++ * (I2CCTL). Read the register into the d32 element then read out the ++ * bits using the bit elements. ++ */ ++typedef union gi2cctl_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ unsigned rwdata : 8; ++ unsigned regaddr : 8; ++ unsigned addr : 7; ++ unsigned i2cen : 1; ++ unsigned ack : 1; ++ unsigned i2csuspctl : 1; ++ unsigned i2cdevaddr : 2; ++ unsigned reserved : 2; ++ unsigned rw : 1; ++ unsigned bsydne : 1; ++ } b; ++} gi2cctl_data_t; ++ ++/** ++ * This union represents the bit fields in the User HW Config1 ++ * Register. Read the register into the d32 element then read ++ * out the bits using the bit elements. ++ */ ++typedef union hwcfg1_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ unsigned ep_dir0 : 2; ++ unsigned ep_dir1 : 2; ++ unsigned ep_dir2 : 2; ++ unsigned ep_dir3 : 2; ++ unsigned ep_dir4 : 2; ++ unsigned ep_dir5 : 2; ++ unsigned ep_dir6 : 2; ++ unsigned ep_dir7 : 2; ++ unsigned ep_dir8 : 2; ++ unsigned ep_dir9 : 2; ++ unsigned ep_dir10 : 2; ++ unsigned ep_dir11 : 2; ++ unsigned ep_dir12 : 2; ++ unsigned ep_dir13 : 2; ++ unsigned ep_dir14 : 2; ++ unsigned ep_dir15 : 2; ++ } b; ++} hwcfg1_data_t; ++ ++/** ++ * This union represents the bit fields in the User HW Config2 ++ * Register. Read the register into the d32 element then read ++ * out the bits using the bit elements. ++ */ ++typedef union hwcfg2_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ /* GHWCFG2 */ ++ unsigned op_mode : 3; ++#define DWC_HWCFG2_OP_MODE_HNP_SRP_CAPABLE_OTG 0 ++#define DWC_HWCFG2_OP_MODE_SRP_ONLY_CAPABLE_OTG 1 ++#define DWC_HWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE_OTG 2 ++#define DWC_HWCFG2_OP_MODE_SRP_CAPABLE_DEVICE 3 ++#define DWC_HWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE 4 ++#define DWC_HWCFG2_OP_MODE_SRP_CAPABLE_HOST 5 ++#define DWC_HWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST 6 ++ ++ unsigned architecture : 2; ++ unsigned point2point : 1; ++ unsigned hs_phy_type : 2; ++#define DWC_HWCFG2_HS_PHY_TYPE_NOT_SUPPORTED 0 ++#define DWC_HWCFG2_HS_PHY_TYPE_UTMI 1 ++#define DWC_HWCFG2_HS_PHY_TYPE_ULPI 2 ++#define DWC_HWCFG2_HS_PHY_TYPE_UTMI_ULPI 3 ++ ++ unsigned fs_phy_type : 2; ++ unsigned num_dev_ep : 4; ++ unsigned num_host_chan : 4; ++ unsigned perio_ep_supported : 1; ++ unsigned dynamic_fifo : 1; ++ unsigned multi_proc_int : 1; ++ unsigned reserved21 : 1; ++ unsigned nonperio_tx_q_depth : 2; ++ unsigned host_perio_tx_q_depth : 2; ++ unsigned dev_token_q_depth : 5; ++ unsigned reserved31 : 1; ++ } b; ++} hwcfg2_data_t; ++ ++/** ++ * This union represents the bit fields in the User HW Config3 ++ * Register. Read the register into the d32 element then read ++ * out the bits using the bit elements. ++ */ ++typedef union hwcfg3_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ /* GHWCFG3 */ ++ unsigned xfer_size_cntr_width : 4; ++ unsigned packet_size_cntr_width : 3; ++ unsigned otg_func : 1; ++ unsigned i2c : 1; ++ unsigned vendor_ctrl_if : 1; ++ unsigned optional_features : 1; ++ unsigned synch_reset_type : 1; ++ unsigned ahb_phy_clock_synch : 1; ++ unsigned reserved15_13 : 3; ++ unsigned dfifo_depth : 16; ++ } b; ++} hwcfg3_data_t; ++ ++/** ++ * This union represents the bit fields in the User HW Config4 ++ * Register. Read the register into the d32 element then read ++ * out the bits using the bit elements. ++ */ ++typedef union hwcfg4_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ unsigned num_dev_perio_in_ep : 4; ++ unsigned power_optimiz : 1; ++ unsigned min_ahb_freq : 9; ++ unsigned utmi_phy_data_width : 2; ++ unsigned num_dev_mode_ctrl_ep : 4; ++ unsigned iddig_filt_en : 1; ++ unsigned vbus_valid_filt_en : 1; ++ unsigned a_valid_filt_en : 1; ++ unsigned b_valid_filt_en : 1; ++ unsigned session_end_filt_en : 1; ++ unsigned ded_fifo_en : 1; ++ unsigned num_in_eps : 4; ++ unsigned desc_dma : 1; ++ unsigned desc_dma_dyn : 1; ++ } b; ++} hwcfg4_data_t; ++ ++//////////////////////////////////////////// ++// Device Registers ++/** ++ * Device Global Registers. Offsets 800h-BFFh ++ * ++ * The following structures define the size and relative field offsets ++ * for the Device Mode Registers. ++ * ++ * These registers are visible only in Device mode and must not be ++ * accessed in Host mode, as the results are unknown. ++ */ ++typedef struct dwc_otg_dev_global_regs ++{ ++ /** Device Configuration Register. Offset 800h */ ++ volatile uint32_t dcfg; ++ /** Device Control Register. Offset: 804h */ ++ volatile uint32_t dctl; ++ /** Device Status Register (Read Only). Offset: 808h */ ++ volatile uint32_t dsts; ++ /** Reserved. Offset: 80Ch */ ++ uint32_t unused; ++ /** Device IN Endpoint Common Interrupt Mask ++ * Register. Offset: 810h */ ++ volatile uint32_t diepmsk; ++ /** Device OUT Endpoint Common Interrupt Mask ++ * Register. Offset: 814h */ ++ volatile uint32_t doepmsk; ++ /** Device All Endpoints Interrupt Register. Offset: 818h */ ++ volatile uint32_t daint; ++ /** Device All Endpoints Interrupt Mask Register. Offset: ++ * 81Ch */ ++ volatile uint32_t daintmsk; ++ /** Device IN Token Queue Read Register-1 (Read Only). ++ * Offset: 820h */ ++ volatile uint32_t dtknqr1; ++ /** Device IN Token Queue Read Register-2 (Read Only). ++ * Offset: 824h */ ++ volatile uint32_t dtknqr2; ++ /** Device VBUS discharge Register. Offset: 828h */ ++ volatile uint32_t dvbusdis; ++ /** Device VBUS Pulse Register. Offset: 82Ch */ ++ volatile uint32_t dvbuspulse; ++ /** Device IN Token Queue Read Register-3 (Read Only). / ++ * Device Thresholding control register (Read/Write) ++ * Offset: 830h */ ++ volatile uint32_t dtknqr3_dthrctl; ++ /** Device IN Token Queue Read Register-4 (Read Only). / ++ * Device IN EPs empty Inr. Mask Register (Read/Write) ++ * Offset: 834h */ ++ volatile uint32_t dtknqr4_fifoemptymsk; ++ /** Device Each Endpoint Interrupt Register (Read Only). / ++ * Offset: 838h */ ++ volatile uint32_t deachint; ++ /** Device Each Endpoint Interrupt mask Register (Read/Write). / ++ * Offset: 83Ch */ ++ volatile uint32_t deachintmsk; ++ /** Device Each In Endpoint Interrupt mask Register (Read/Write). / ++ * Offset: 840h */ ++ volatile uint32_t diepeachintmsk[MAX_EPS_CHANNELS]; ++ /** Device Each Out Endpoint Interrupt mask Register (Read/Write). / ++ * Offset: 880h */ ++ volatile uint32_t doepeachintmsk[MAX_EPS_CHANNELS]; ++} dwc_otg_device_global_regs_t; ++ ++/** ++ * This union represents the bit fields in the Device Configuration ++ * Register. Read the register into the d32 member then ++ * set/clear the bits using the bit elements. Write the ++ * d32 member to the dcfg register. ++ */ ++typedef union dcfg_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ /** Device Speed */ ++ unsigned devspd : 2; ++ /** Non Zero Length Status OUT Handshake */ ++ unsigned nzstsouthshk : 1; ++#define DWC_DCFG_SEND_STALL 1 ++ ++ unsigned reserved3 : 1; ++ /** Device Addresses */ ++ unsigned devaddr : 7; ++ /** Periodic Frame Interval */ ++ unsigned perfrint : 2; ++#define DWC_DCFG_FRAME_INTERVAL_80 0 ++#define DWC_DCFG_FRAME_INTERVAL_85 1 ++#define DWC_DCFG_FRAME_INTERVAL_90 2 ++#define DWC_DCFG_FRAME_INTERVAL_95 3 ++ ++ unsigned reserved13_17 : 5; ++ /** In Endpoint Mis-match count */ ++ unsigned epmscnt : 5; ++ /** Enable Descriptor DMA in Device mode */ ++ unsigned descdma : 1; ++ } b; ++} dcfg_data_t; ++ ++/** ++ * This union represents the bit fields in the Device Control ++ * Register. Read the register into the d32 member then ++ * set/clear the bits using the bit elements. ++ */ ++typedef union dctl_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ /** Remote Wakeup */ ++ unsigned rmtwkupsig : 1; ++ /** Soft Disconnect */ ++ unsigned sftdiscon : 1; ++ /** Global Non-Periodic IN NAK Status */ ++ unsigned gnpinnaksts : 1; ++ /** Global OUT NAK Status */ ++ unsigned goutnaksts : 1; ++ /** Test Control */ ++ unsigned tstctl : 3; ++ /** Set Global Non-Periodic IN NAK */ ++ unsigned sgnpinnak : 1; ++ /** Clear Global Non-Periodic IN NAK */ ++ unsigned cgnpinnak : 1; ++ /** Set Global OUT NAK */ ++ unsigned sgoutnak : 1; ++ /** Clear Global OUT NAK */ ++ unsigned cgoutnak : 1; ++ ++ /** Power-On Programming Done */ ++ unsigned pwronprgdone : 1; ++ /** Global Continue on BNA */ ++ unsigned gcontbna : 1; ++ /** Global Multi Count */ ++ unsigned gmc : 2; ++ /** Ignore Frame Number for ISOC EPs */ ++ unsigned ifrmnum : 1; ++ /** NAK on Babble */ ++ unsigned nakonbble : 1; ++ ++ unsigned reserved16_31 : 16; ++ } b; ++} dctl_data_t; ++ ++/** ++ * This union represents the bit fields in the Device Status ++ * Register. Read the register into the d32 member then ++ * set/clear the bits using the bit elements. ++ */ ++typedef union dsts_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ /** Suspend Status */ ++ unsigned suspsts : 1; ++ /** Enumerated Speed */ ++ unsigned enumspd : 2; ++#define DWC_DSTS_ENUMSPD_HS_PHY_30MHZ_OR_60MHZ 0 ++#define DWC_DSTS_ENUMSPD_FS_PHY_30MHZ_OR_60MHZ 1 ++#define DWC_DSTS_ENUMSPD_LS_PHY_6MHZ 2 ++#define DWC_DSTS_ENUMSPD_FS_PHY_48MHZ 3 ++ /** Erratic Error */ ++ unsigned errticerr : 1; ++ unsigned reserved4_7: 4; ++ /** Frame or Microframe Number of the received SOF */ ++ unsigned soffn : 14; ++ unsigned reserved22_31 : 10; ++ } b; ++} dsts_data_t; ++ ++ ++/** ++ * This union represents the bit fields in the Device IN EP Interrupt ++ * Register and the Device IN EP Common Mask Register. ++ * ++ * - Read the register into the d32 member then set/clear the ++ * bits using the bit elements. ++ */ ++typedef union diepint_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ /** Transfer complete mask */ ++ unsigned xfercompl : 1; ++ /** Endpoint disable mask */ ++ unsigned epdisabled : 1; ++ /** AHB Error mask */ ++ unsigned ahberr : 1; ++ /** TimeOUT Handshake mask (non-ISOC EPs) */ ++ unsigned timeout : 1; ++ /** IN Token received with TxF Empty mask */ ++ unsigned intktxfemp : 1; ++ /** IN Token Received with EP mismatch mask */ ++ unsigned intknepmis : 1; ++ /** IN Endpoint HAK Effective mask */ ++ unsigned inepnakeff : 1; ++ /** IN Endpoint HAK Effective mask */ ++ unsigned emptyintr : 1; ++ ++ unsigned txfifoundrn : 1; ++ ++ /** BNA Interrupt mask */ ++ unsigned bna : 1; ++ ++ unsigned reserved10_12 : 3; ++ /** BNA Interrupt mask */ ++ unsigned nak : 1; ++ ++ unsigned reserved14_31 : 18; ++ } b; ++} diepint_data_t; ++ ++/** ++ * This union represents the bit fields in the Device IN EP ++ * Common/Dedicated Interrupt Mask Register. ++ */ ++typedef union diepint_data diepmsk_data_t; ++ ++/** ++ * This union represents the bit fields in the Device OUT EP Interrupt ++ * Registerand Device OUT EP Common Interrupt Mask Register. ++ * ++ * - Read the register into the d32 member then set/clear the ++ * bits using the bit elements. ++ */ ++typedef union doepint_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ /** Transfer complete */ ++ unsigned xfercompl : 1; ++ /** Endpoint disable */ ++ unsigned epdisabled : 1; ++ /** AHB Error */ ++ unsigned ahberr : 1; ++ /** Setup Phase Done (contorl EPs) */ ++ unsigned setup : 1; ++ /** OUT Token Received when Endpoint Disabled */ ++ unsigned outtknepdis : 1; ++ ++ unsigned stsphsercvd : 1; ++ /** Back-to-Back SETUP Packets Received */ ++ unsigned back2backsetup : 1; ++ ++ unsigned reserved7 : 1; ++ /** OUT packet Error */ ++ unsigned outpkterr : 1; ++ /** BNA Interrupt */ ++ unsigned bna : 1; ++ ++ unsigned reserved10 : 1; ++ /** Packet Drop Status */ ++ unsigned pktdrpsts : 1; ++ /** Babble Interrupt */ ++ unsigned babble : 1; ++ /** NAK Interrupt */ ++ unsigned nak : 1; ++ /** NYET Interrupt */ ++ unsigned nyet : 1; ++ ++ unsigned reserved15_31 : 17; ++ } b; ++} doepint_data_t; ++ ++/** ++ * This union represents the bit fields in the Device OUT EP ++ * Common/Dedicated Interrupt Mask Register. ++ */ ++typedef union doepint_data doepmsk_data_t; ++ ++/** ++ * This union represents the bit fields in the Device All EP Interrupt ++ * and Mask Registers. ++ * - Read the register into the d32 member then set/clear the ++ * bits using the bit elements. ++ */ ++typedef union daint_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ /** IN Endpoint bits */ ++ unsigned in : 16; ++ /** OUT Endpoint bits */ ++ unsigned out : 16; ++ } ep; ++ struct ++ { ++ /** IN Endpoint bits */ ++ unsigned inep0 : 1; ++ unsigned inep1 : 1; ++ unsigned inep2 : 1; ++ unsigned inep3 : 1; ++ unsigned inep4 : 1; ++ unsigned inep5 : 1; ++ unsigned inep6 : 1; ++ unsigned inep7 : 1; ++ unsigned inep8 : 1; ++ unsigned inep9 : 1; ++ unsigned inep10 : 1; ++ unsigned inep11 : 1; ++ unsigned inep12 : 1; ++ unsigned inep13 : 1; ++ unsigned inep14 : 1; ++ unsigned inep15 : 1; ++ /** OUT Endpoint bits */ ++ unsigned outep0 : 1; ++ unsigned outep1 : 1; ++ unsigned outep2 : 1; ++ unsigned outep3 : 1; ++ unsigned outep4 : 1; ++ unsigned outep5 : 1; ++ unsigned outep6 : 1; ++ unsigned outep7 : 1; ++ unsigned outep8 : 1; ++ unsigned outep9 : 1; ++ unsigned outep10 : 1; ++ unsigned outep11 : 1; ++ unsigned outep12 : 1; ++ unsigned outep13 : 1; ++ unsigned outep14 : 1; ++ unsigned outep15 : 1; ++ } b; ++} daint_data_t; ++ ++/** ++ * This union represents the bit fields in the Device IN Token Queue ++ * Read Registers. ++ * - Read the register into the d32 member. ++ * - READ-ONLY Register ++ */ ++typedef union dtknq1_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ /** In Token Queue Write Pointer */ ++ unsigned intknwptr : 5; ++ /** Reserved */ ++ unsigned reserved05_06 : 2; ++ /** write pointer has wrapped. */ ++ unsigned wrap_bit : 1; ++ /** EP Numbers of IN Tokens 0 ... 4 */ ++ unsigned epnums0_5 : 24; ++ }b; ++} dtknq1_data_t; ++ ++/** ++ * This union represents Threshold control Register ++ * - Read and write the register into the d32 member. ++ * - READ-WRITABLE Register ++ */ ++typedef union dthrctl_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ /** non ISO Tx Thr. Enable */ ++ unsigned non_iso_thr_en : 1; ++ /** ISO Tx Thr. Enable */ ++ unsigned iso_thr_en : 1; ++ /** Tx Thr. Length */ ++ unsigned tx_thr_len : 9; ++ /** Reserved */ ++ unsigned reserved11_15 : 5; ++ /** Rx Thr. Enable */ ++ unsigned rx_thr_en : 1; ++ /** Rx Thr. Length */ ++ unsigned rx_thr_len : 9; ++ /** Reserved */ ++ unsigned reserved26_31 : 6; ++ }b; ++} dthrctl_data_t; ++ ++ ++/** ++ * Device Logical IN Endpoint-Specific Registers. Offsets ++ * 900h-AFCh ++ * ++ * There will be one set of endpoint registers per logical endpoint ++ * implemented. ++ * ++ * These registers are visible only in Device mode and must not be ++ * accessed in Host mode, as the results are unknown. ++ */ ++typedef struct dwc_otg_dev_in_ep_regs ++{ ++ /** Device IN Endpoint Control Register. Offset:900h + ++ * (ep_num * 20h) + 00h */ ++ volatile uint32_t diepctl; ++ /** Reserved. Offset:900h + (ep_num * 20h) + 04h */ ++ uint32_t reserved04; ++ /** Device IN Endpoint Interrupt Register. Offset:900h + ++ * (ep_num * 20h) + 08h */ ++ volatile uint32_t diepint; ++ /** Reserved. Offset:900h + (ep_num * 20h) + 0Ch */ ++ uint32_t reserved0C; ++ /** Device IN Endpoint Transfer Size ++ * Register. Offset:900h + (ep_num * 20h) + 10h */ ++ volatile uint32_t dieptsiz; ++ /** Device IN Endpoint DMA Address Register. Offset:900h + ++ * (ep_num * 20h) + 14h */ ++ volatile uint32_t diepdma; ++ /** Device IN Endpoint Transmit FIFO Status Register. Offset:900h + ++ * (ep_num * 20h) + 18h */ ++ volatile uint32_t dtxfsts; ++ /** Device IN Endpoint DMA Buffer Register. Offset:900h + ++ * (ep_num * 20h) + 1Ch */ ++ volatile uint32_t diepdmab; ++} dwc_otg_dev_in_ep_regs_t; ++ ++/** ++ * Device Logical OUT Endpoint-Specific Registers. Offsets: ++ * B00h-CFCh ++ * ++ * There will be one set of endpoint registers per logical endpoint ++ * implemented. ++ * ++ * These registers are visible only in Device mode and must not be ++ * accessed in Host mode, as the results are unknown. ++ */ ++typedef struct dwc_otg_dev_out_ep_regs ++{ ++ /** Device OUT Endpoint Control Register. Offset:B00h + ++ * (ep_num * 20h) + 00h */ ++ volatile uint32_t doepctl; ++ /** Device OUT Endpoint Frame number Register. Offset: ++ * B00h + (ep_num * 20h) + 04h */ ++ volatile uint32_t doepfn; ++ /** Device OUT Endpoint Interrupt Register. Offset:B00h + ++ * (ep_num * 20h) + 08h */ ++ volatile uint32_t doepint; ++ /** Reserved. Offset:B00h + (ep_num * 20h) + 0Ch */ ++ uint32_t reserved0C; ++ /** Device OUT Endpoint Transfer Size Register. Offset: ++ * B00h + (ep_num * 20h) + 10h */ ++ volatile uint32_t doeptsiz; ++ /** Device OUT Endpoint DMA Address Register. Offset:B00h ++ * + (ep_num * 20h) + 14h */ ++ volatile uint32_t doepdma; ++ /** Reserved. Offset:B00h + * (ep_num * 20h) + 1Ch */ ++ uint32_t unused; ++ /** Device OUT Endpoint DMA Buffer Register. Offset:B00h ++ * + (ep_num * 20h) + 1Ch */ ++ uint32_t doepdmab; ++} dwc_otg_dev_out_ep_regs_t; ++ ++/** ++ * This union represents the bit fields in the Device EP Control ++ * Register. Read the register into the d32 member then ++ * set/clear the bits using the bit elements. ++ */ ++typedef union depctl_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ /** Maximum Packet Size ++ * IN/OUT EPn ++ * IN/OUT EP0 - 2 bits ++ * 2'b00: 64 Bytes ++ * 2'b01: 32 ++ * 2'b10: 16 ++ * 2'b11: 8 */ ++ unsigned mps : 11; ++#define DWC_DEP0CTL_MPS_64 0 ++#define DWC_DEP0CTL_MPS_32 1 ++#define DWC_DEP0CTL_MPS_16 2 ++#define DWC_DEP0CTL_MPS_8 3 ++ ++ /** Next Endpoint ++ * IN EPn/IN EP0 ++ * OUT EPn/OUT EP0 - reserved */ ++ unsigned nextep : 4; ++ ++ /** USB Active Endpoint */ ++ unsigned usbactep : 1; ++ ++ /** Endpoint DPID (INTR/Bulk IN and OUT endpoints) ++ * This field contains the PID of the packet going to ++ * be received or transmitted on this endpoint. The ++ * application should program the PID of the first ++ * packet going to be received or transmitted on this ++ * endpoint , after the endpoint is ++ * activated. Application use the SetD1PID and ++ * SetD0PID fields of this register to program either ++ * D0 or D1 PID. ++ * ++ * The encoding for this field is ++ * - 0: D0 ++ * - 1: D1 ++ */ ++ unsigned dpid : 1; ++ ++ /** NAK Status */ ++ unsigned naksts : 1; ++ ++ /** Endpoint Type ++ * 2'b00: Control ++ * 2'b01: Isochronous ++ * 2'b10: Bulk ++ * 2'b11: Interrupt */ ++ unsigned eptype : 2; ++ ++ /** Snoop Mode ++ * OUT EPn/OUT EP0 ++ * IN EPn/IN EP0 - reserved */ ++ unsigned snp : 1; ++ ++ /** Stall Handshake */ ++ unsigned stall : 1; ++ ++ /** Tx Fifo Number ++ * IN EPn/IN EP0 ++ * OUT EPn/OUT EP0 - reserved */ ++ unsigned txfnum : 4; ++ ++ /** Clear NAK */ ++ unsigned cnak : 1; ++ /** Set NAK */ ++ unsigned snak : 1; ++ /** Set DATA0 PID (INTR/Bulk IN and OUT endpoints) ++ * Writing to this field sets the Endpoint DPID (DPID) ++ * field in this register to DATA0. Set Even ++ * (micro)frame (SetEvenFr) (ISO IN and OUT Endpoints) ++ * Writing to this field sets the Even/Odd ++ * (micro)frame (EO_FrNum) field to even (micro) ++ * frame. ++ */ ++ unsigned setd0pid : 1; ++ /** Set DATA1 PID (INTR/Bulk IN and OUT endpoints) ++ * Writing to this field sets the Endpoint DPID (DPID) ++ * field in this register to DATA1 Set Odd ++ * (micro)frame (SetOddFr) (ISO IN and OUT Endpoints) ++ * Writing to this field sets the Even/Odd ++ * (micro)frame (EO_FrNum) field to odd (micro) frame. ++ */ ++ unsigned setd1pid : 1; ++ ++ /** Endpoint Disable */ ++ unsigned epdis : 1; ++ /** Endpoint Enable */ ++ unsigned epena : 1; ++ } b; ++} depctl_data_t; ++ ++/** ++ * This union represents the bit fields in the Device EP Transfer ++ * Size Register. Read the register into the d32 member then ++ * set/clear the bits using the bit elements. ++ */ ++typedef union deptsiz_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct { ++ /** Transfer size */ ++ unsigned xfersize : 19; ++ /** Packet Count */ ++ unsigned pktcnt : 10; ++ /** Multi Count - Periodic IN endpoints */ ++ unsigned mc : 2; ++ unsigned reserved : 1; ++ } b; ++} deptsiz_data_t; ++ ++/** ++ * This union represents the bit fields in the Device EP 0 Transfer ++ * Size Register. Read the register into the d32 member then ++ * set/clear the bits using the bit elements. ++ */ ++typedef union deptsiz0_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct { ++ /** Transfer size */ ++ unsigned xfersize : 7; ++ /** Reserved */ ++ unsigned reserved7_18 : 12; ++ /** Packet Count */ ++ unsigned pktcnt : 1; ++ /** Reserved */ ++ unsigned reserved20_28 : 9; ++ /**Setup Packet Count (DOEPTSIZ0 Only) */ ++ unsigned supcnt : 2; ++ unsigned reserved31; ++ } b; ++} deptsiz0_data_t; ++ ++ ++///////////////////////////////////////////////// ++// DMA Descriptor Specific Structures ++// ++ ++/** Buffer status definitions */ ++ ++#define BS_HOST_READY 0x0 ++#define BS_DMA_BUSY 0x1 ++#define BS_DMA_DONE 0x2 ++#define BS_HOST_BUSY 0x3 ++ ++/** Receive/Transmit status definitions */ ++ ++#define RTS_SUCCESS 0x0 ++#define RTS_BUFFLUSH 0x1 ++#define RTS_RESERVED 0x2 ++#define RTS_BUFERR 0x3 ++ ++ ++/** ++ * This union represents the bit fields in the DMA Descriptor ++ * status quadlet. Read the quadlet into the d32 member then ++ * set/clear the bits using the bit, b_iso_out and ++ * b_iso_in elements. ++ */ ++typedef union desc_sts_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** quadlet bits */ ++ struct { ++ /** Received number of bytes */ ++ unsigned bytes : 16; ++ ++ unsigned reserved16_22 : 7; ++ /** Multiple Transfer - only for OUT EPs */ ++ unsigned mtrf : 1; ++ /** Setup Packet received - only for OUT EPs */ ++ unsigned sr : 1; ++ /** Interrupt On Complete */ ++ unsigned ioc : 1; ++ /** Short Packet */ ++ unsigned sp : 1; ++ /** Last */ ++ unsigned l : 1; ++ /** Receive Status */ ++ unsigned sts : 2; ++ /** Buffer Status */ ++ unsigned bs : 2; ++ } b; ++ ++#ifdef DWC_EN_ISOC ++ /** iso out quadlet bits */ ++ struct { ++ /** Received number of bytes */ ++ unsigned rxbytes : 11; ++ ++ unsigned reserved11 : 1; ++ /** Frame Number */ ++ unsigned framenum : 11; ++ /** Received ISO Data PID */ ++ unsigned pid : 2; ++ /** Interrupt On Complete */ ++ unsigned ioc : 1; ++ /** Short Packet */ ++ unsigned sp : 1; ++ /** Last */ ++ unsigned l : 1; ++ /** Receive Status */ ++ unsigned rxsts : 2; ++ /** Buffer Status */ ++ unsigned bs : 2; ++ } b_iso_out; ++ ++ /** iso in quadlet bits */ ++ struct { ++ /** Transmited number of bytes */ ++ unsigned txbytes : 12; ++ /** Frame Number */ ++ unsigned framenum : 11; ++ /** Transmited ISO Data PID */ ++ unsigned pid : 2; ++ /** Interrupt On Complete */ ++ unsigned ioc : 1; ++ /** Short Packet */ ++ unsigned sp : 1; ++ /** Last */ ++ unsigned l : 1; ++ /** Transmit Status */ ++ unsigned txsts : 2; ++ /** Buffer Status */ ++ unsigned bs : 2; ++ } b_iso_in; ++#endif //DWC_EN_ISOC ++} desc_sts_data_t; ++ ++/** ++ * DMA Descriptor structure ++ * ++ * DMA Descriptor structure contains two quadlets: ++ * Status quadlet and Data buffer pointer. ++ */ ++typedef struct dwc_otg_dma_desc ++{ ++ /** DMA Descriptor status quadlet */ ++ desc_sts_data_t status; ++ /** DMA Descriptor data buffer pointer */ ++ dma_addr_t buf; ++} dwc_otg_dma_desc_t; ++ ++/** ++ * The dwc_otg_dev_if structure contains information needed to manage ++ * the DWC_otg controller acting in device mode. It represents the ++ * programming view of the device-specific aspects of the controller. ++ */ ++typedef struct dwc_otg_dev_if ++{ ++ /** Pointer to device Global registers. ++ * Device Global Registers starting at offset 800h ++ */ ++ dwc_otg_device_global_regs_t *dev_global_regs; ++#define DWC_DEV_GLOBAL_REG_OFFSET 0x800 ++ ++ /** ++ * Device Logical IN Endpoint-Specific Registers 900h-AFCh ++ */ ++ dwc_otg_dev_in_ep_regs_t *in_ep_regs[MAX_EPS_CHANNELS]; ++#define DWC_DEV_IN_EP_REG_OFFSET 0x900 ++#define DWC_EP_REG_OFFSET 0x20 ++ ++ /** Device Logical OUT Endpoint-Specific Registers B00h-CFCh */ ++ dwc_otg_dev_out_ep_regs_t *out_ep_regs[MAX_EPS_CHANNELS]; ++#define DWC_DEV_OUT_EP_REG_OFFSET 0xB00 ++ ++ /* Device configuration information*/ ++ uint8_t speed; /**< Device Speed 0: Unknown, 1: LS, 2:FS, 3: HS */ ++ uint8_t num_in_eps; /**< Number # of Tx EP range: 0-15 exept ep0 */ ++ uint8_t num_out_eps; /**< Number # of Rx EP range: 0-15 exept ep 0*/ ++ ++ /** Size of periodic FIFOs (Bytes) */ ++ uint16_t perio_tx_fifo_size[MAX_PERIO_FIFOS]; ++ ++ /** Size of Tx FIFOs (Bytes) */ ++ uint16_t tx_fifo_size[MAX_TX_FIFOS]; ++ ++ /** Thresholding enable flags and length varaiables **/ ++ uint16_t rx_thr_en; ++ uint16_t iso_tx_thr_en; ++ uint16_t non_iso_tx_thr_en; ++ ++ uint16_t rx_thr_length; ++ uint16_t tx_thr_length; ++ ++ /** ++ * Pointers to the DMA Descriptors for EP0 Control ++ * transfers (virtual and physical) ++ */ ++ ++ /** 2 descriptors for SETUP packets */ ++ uint32_t dma_setup_desc_addr[2]; ++ dwc_otg_dma_desc_t* setup_desc_addr[2]; ++ ++ /** Pointer to Descriptor with latest SETUP packet */ ++ dwc_otg_dma_desc_t* psetup; ++ ++ /** Index of current SETUP handler descriptor */ ++ uint32_t setup_desc_index; ++ ++ /** Descriptor for Data In or Status In phases */ ++ uint32_t dma_in_desc_addr; ++ dwc_otg_dma_desc_t* in_desc_addr;; ++ ++ /** Descriptor for Data Out or Status Out phases */ ++ uint32_t dma_out_desc_addr; ++ dwc_otg_dma_desc_t* out_desc_addr; ++ ++} dwc_otg_dev_if_t; ++ ++ ++ ++ ++///////////////////////////////////////////////// ++// Host Mode Register Structures ++// ++/** ++ * The Host Global Registers structure defines the size and relative ++ * field offsets for the Host Mode Global Registers. Host Global ++ * Registers offsets 400h-7FFh. ++*/ ++typedef struct dwc_otg_host_global_regs ++{ ++ /** Host Configuration Register. Offset: 400h */ ++ volatile uint32_t hcfg; ++ /** Host Frame Interval Register. Offset: 404h */ ++ volatile uint32_t hfir; ++ /** Host Frame Number / Frame Remaining Register. Offset: 408h */ ++ volatile uint32_t hfnum; ++ /** Reserved. Offset: 40Ch */ ++ uint32_t reserved40C; ++ /** Host Periodic Transmit FIFO/ Queue Status Register. Offset: 410h */ ++ volatile uint32_t hptxsts; ++ /** Host All Channels Interrupt Register. Offset: 414h */ ++ volatile uint32_t haint; ++ /** Host All Channels Interrupt Mask Register. Offset: 418h */ ++ volatile uint32_t haintmsk; ++} dwc_otg_host_global_regs_t; ++ ++/** ++ * This union represents the bit fields in the Host Configuration Register. ++ * Read the register into the d32 member then set/clear the bits using ++ * the bit elements. Write the d32 member to the hcfg register. ++ */ ++typedef union hcfg_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ ++ /** register bits */ ++ struct ++ { ++ /** FS/LS Phy Clock Select */ ++ unsigned fslspclksel : 2; ++#define DWC_HCFG_30_60_MHZ 0 ++#define DWC_HCFG_48_MHZ 1 ++#define DWC_HCFG_6_MHZ 2 ++ ++ /** FS/LS Only Support */ ++ unsigned fslssupp : 1; ++ } b; ++} hcfg_data_t; ++ ++/** ++ * This union represents the bit fields in the Host Frame Remaing/Number ++ * Register. ++ */ ++typedef union hfir_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ ++ /** register bits */ ++ struct ++ { ++ unsigned frint : 16; ++ unsigned reserved : 16; ++ } b; ++} hfir_data_t; ++ ++/** ++ * This union represents the bit fields in the Host Frame Remaing/Number ++ * Register. ++ */ ++typedef union hfnum_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ ++ /** register bits */ ++ struct ++ { ++ unsigned frnum : 16; ++#define DWC_HFNUM_MAX_FRNUM 0x3FFF ++ unsigned frrem : 16; ++ } b; ++} hfnum_data_t; ++ ++typedef union hptxsts_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ ++ /** register bits */ ++ struct ++ { ++ unsigned ptxfspcavail : 16; ++ unsigned ptxqspcavail : 8; ++ /** Top of the Periodic Transmit Request Queue ++ * - bit 24 - Terminate (last entry for the selected channel) ++ * - bits 26:25 - Token Type ++ * - 2'b00 - Zero length ++ * - 2'b01 - Ping ++ * - 2'b10 - Disable ++ * - bits 30:27 - Channel Number ++ * - bit 31 - Odd/even microframe ++ */ ++ unsigned ptxqtop_terminate : 1; ++ unsigned ptxqtop_token : 2; ++ unsigned ptxqtop_chnum : 4; ++ unsigned ptxqtop_odd : 1; ++ } b; ++} hptxsts_data_t; ++ ++/** ++ * This union represents the bit fields in the Host Port Control and Status ++ * Register. Read the register into the d32 member then set/clear the ++ * bits using the bit elements. Write the d32 member to the ++ * hprt0 register. ++ */ ++typedef union hprt0_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ unsigned prtconnsts : 1; ++ unsigned prtconndet : 1; ++ unsigned prtena : 1; ++ unsigned prtenchng : 1; ++ unsigned prtovrcurract : 1; ++ unsigned prtovrcurrchng : 1; ++ unsigned prtres : 1; ++ unsigned prtsusp : 1; ++ unsigned prtrst : 1; ++ unsigned reserved9 : 1; ++ unsigned prtlnsts : 2; ++ unsigned prtpwr : 1; ++ unsigned prttstctl : 4; ++ unsigned prtspd : 2; ++#define DWC_HPRT0_PRTSPD_HIGH_SPEED 0 ++#define DWC_HPRT0_PRTSPD_FULL_SPEED 1 ++#define DWC_HPRT0_PRTSPD_LOW_SPEED 2 ++ unsigned reserved19_31 : 13; ++ } b; ++} hprt0_data_t; ++ ++/** ++ * This union represents the bit fields in the Host All Interrupt ++ * Register. ++ */ ++typedef union haint_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ unsigned ch0 : 1; ++ unsigned ch1 : 1; ++ unsigned ch2 : 1; ++ unsigned ch3 : 1; ++ unsigned ch4 : 1; ++ unsigned ch5 : 1; ++ unsigned ch6 : 1; ++ unsigned ch7 : 1; ++ unsigned ch8 : 1; ++ unsigned ch9 : 1; ++ unsigned ch10 : 1; ++ unsigned ch11 : 1; ++ unsigned ch12 : 1; ++ unsigned ch13 : 1; ++ unsigned ch14 : 1; ++ unsigned ch15 : 1; ++ unsigned reserved : 16; ++ } b; ++ ++ struct ++ { ++ unsigned chint : 16; ++ unsigned reserved : 16; ++ } b2; ++} haint_data_t; ++ ++/** ++ * This union represents the bit fields in the Host All Interrupt ++ * Register. ++ */ ++typedef union haintmsk_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ unsigned ch0 : 1; ++ unsigned ch1 : 1; ++ unsigned ch2 : 1; ++ unsigned ch3 : 1; ++ unsigned ch4 : 1; ++ unsigned ch5 : 1; ++ unsigned ch6 : 1; ++ unsigned ch7 : 1; ++ unsigned ch8 : 1; ++ unsigned ch9 : 1; ++ unsigned ch10 : 1; ++ unsigned ch11 : 1; ++ unsigned ch12 : 1; ++ unsigned ch13 : 1; ++ unsigned ch14 : 1; ++ unsigned ch15 : 1; ++ unsigned reserved : 16; ++ } b; ++ ++ struct ++ { ++ unsigned chint : 16; ++ unsigned reserved : 16; ++ } b2; ++} haintmsk_data_t; ++ ++/** ++ * Host Channel Specific Registers. 500h-5FCh ++ */ ++typedef struct dwc_otg_hc_regs ++{ ++ /** Host Channel 0 Characteristic Register. Offset: 500h + (chan_num * 20h) + 00h */ ++ volatile uint32_t hcchar; ++ /** Host Channel 0 Split Control Register. Offset: 500h + (chan_num * 20h) + 04h */ ++ volatile uint32_t hcsplt; ++ /** Host Channel 0 Interrupt Register. Offset: 500h + (chan_num * 20h) + 08h */ ++ volatile uint32_t hcint; ++ /** Host Channel 0 Interrupt Mask Register. Offset: 500h + (chan_num * 20h) + 0Ch */ ++ volatile uint32_t hcintmsk; ++ /** Host Channel 0 Transfer Size Register. Offset: 500h + (chan_num * 20h) + 10h */ ++ volatile uint32_t hctsiz; ++ /** Host Channel 0 DMA Address Register. Offset: 500h + (chan_num * 20h) + 14h */ ++ volatile uint32_t hcdma; ++ /** Reserved. Offset: 500h + (chan_num * 20h) + 18h - 500h + (chan_num * 20h) + 1Ch */ ++ uint32_t reserved[2]; ++} dwc_otg_hc_regs_t; ++ ++/** ++ * This union represents the bit fields in the Host Channel Characteristics ++ * Register. Read the register into the d32 member then set/clear the ++ * bits using the bit elements. Write the d32 member to the ++ * hcchar register. ++ */ ++typedef union hcchar_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ ++ /** register bits */ ++ struct ++ { ++ /** Maximum packet size in bytes */ ++ unsigned mps : 11; ++ ++ /** Endpoint number */ ++ unsigned epnum : 4; ++ ++ /** 0: OUT, 1: IN */ ++ unsigned epdir : 1; ++ ++ unsigned reserved : 1; ++ ++ /** 0: Full/high speed device, 1: Low speed device */ ++ unsigned lspddev : 1; ++ ++ /** 0: Control, 1: Isoc, 2: Bulk, 3: Intr */ ++ unsigned eptype : 2; ++ ++ /** Packets per frame for periodic transfers. 0 is reserved. */ ++ unsigned multicnt : 2; ++ ++ /** Device address */ ++ unsigned devaddr : 7; ++ ++ /** ++ * Frame to transmit periodic transaction. ++ * 0: even, 1: odd ++ */ ++ unsigned oddfrm : 1; ++ ++ /** Channel disable */ ++ unsigned chdis : 1; ++ ++ /** Channel enable */ ++ unsigned chen : 1; ++ } b; ++} hcchar_data_t; ++ ++typedef union hcsplt_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ ++ /** register bits */ ++ struct ++ { ++ /** Port Address */ ++ unsigned prtaddr : 7; ++ ++ /** Hub Address */ ++ unsigned hubaddr : 7; ++ ++ /** Transaction Position */ ++ unsigned xactpos : 2; ++#define DWC_HCSPLIT_XACTPOS_MID 0 ++#define DWC_HCSPLIT_XACTPOS_END 1 ++#define DWC_HCSPLIT_XACTPOS_BEGIN 2 ++#define DWC_HCSPLIT_XACTPOS_ALL 3 ++ ++ /** Do Complete Split */ ++ unsigned compsplt : 1; ++ ++ /** Reserved */ ++ unsigned reserved : 14; ++ ++ /** Split Enble */ ++ unsigned spltena : 1; ++ } b; ++} hcsplt_data_t; ++ ++ ++/** ++ * This union represents the bit fields in the Host All Interrupt ++ * Register. ++ */ ++typedef union hcint_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ /** register bits */ ++ struct ++ { ++ /** Transfer Complete */ ++ unsigned xfercomp : 1; ++ /** Channel Halted */ ++ unsigned chhltd : 1; ++ /** AHB Error */ ++ unsigned ahberr : 1; ++ /** STALL Response Received */ ++ unsigned stall : 1; ++ /** NAK Response Received */ ++ unsigned nak : 1; ++ /** ACK Response Received */ ++ unsigned ack : 1; ++ /** NYET Response Received */ ++ unsigned nyet : 1; ++ /** Transaction Err */ ++ unsigned xacterr : 1; ++ /** Babble Error */ ++ unsigned bblerr : 1; ++ /** Frame Overrun */ ++ unsigned frmovrun : 1; ++ /** Data Toggle Error */ ++ unsigned datatglerr : 1; ++ /** Reserved */ ++ unsigned reserved : 21; ++ } b; ++} hcint_data_t; ++ ++/** ++ * This union represents the bit fields in the Host Channel Transfer Size ++ * Register. Read the register into the d32 member then set/clear the ++ * bits using the bit elements. Write the d32 member to the ++ * hcchar register. ++ */ ++typedef union hctsiz_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ ++ /** register bits */ ++ struct ++ { ++ /** Total transfer size in bytes */ ++ unsigned xfersize : 19; ++ ++ /** Data packets to transfer */ ++ unsigned pktcnt : 10; ++ ++ /** ++ * Packet ID for next data packet ++ * 0: DATA0 ++ * 1: DATA2 ++ * 2: DATA1 ++ * 3: MDATA (non-Control), SETUP (Control) ++ */ ++ unsigned pid : 2; ++#define DWC_HCTSIZ_DATA0 0 ++#define DWC_HCTSIZ_DATA1 2 ++#define DWC_HCTSIZ_DATA2 1 ++#define DWC_HCTSIZ_MDATA 3 ++#define DWC_HCTSIZ_SETUP 3 ++ ++ /** Do PING protocol when 1 */ ++ unsigned dopng : 1; ++ } b; ++} hctsiz_data_t; ++ ++/** ++ * This union represents the bit fields in the Host Channel Interrupt Mask ++ * Register. Read the register into the d32 member then set/clear the ++ * bits using the bit elements. Write the d32 member to the ++ * hcintmsk register. ++ */ ++typedef union hcintmsk_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ ++ /** register bits */ ++ struct ++ { ++ unsigned xfercompl : 1; ++ unsigned chhltd : 1; ++ unsigned ahberr : 1; ++ unsigned stall : 1; ++ unsigned nak : 1; ++ unsigned ack : 1; ++ unsigned nyet : 1; ++ unsigned xacterr : 1; ++ unsigned bblerr : 1; ++ unsigned frmovrun : 1; ++ unsigned datatglerr : 1; ++ unsigned reserved : 21; ++ } b; ++} hcintmsk_data_t; ++ ++/** OTG Host Interface Structure. ++ * ++ * The OTG Host Interface Structure structure contains information ++ * needed to manage the DWC_otg controller acting in host mode. It ++ * represents the programming view of the host-specific aspects of the ++ * controller. ++ */ ++typedef struct dwc_otg_host_if ++{ ++ /** Host Global Registers starting at offset 400h.*/ ++ dwc_otg_host_global_regs_t *host_global_regs; ++#define DWC_OTG_HOST_GLOBAL_REG_OFFSET 0x400 ++ ++ /** Host Port 0 Control and Status Register */ ++ volatile uint32_t *hprt0; ++#define DWC_OTG_HOST_PORT_REGS_OFFSET 0x440 ++ ++ ++ /** Host Channel Specific Registers at offsets 500h-5FCh. */ ++ dwc_otg_hc_regs_t *hc_regs[MAX_EPS_CHANNELS]; ++#define DWC_OTG_HOST_CHAN_REGS_OFFSET 0x500 ++#define DWC_OTG_CHAN_REGS_OFFSET 0x20 ++ ++ ++ /* Host configuration information */ ++ /** Number of Host Channels (range: 1-16) */ ++ uint8_t num_host_channels; ++ /** Periodic EPs supported (0: no, 1: yes) */ ++ uint8_t perio_eps_supported; ++ /** Periodic Tx FIFO Size (Only 1 host periodic Tx FIFO) */ ++ uint16_t perio_tx_fifo_size; ++ ++} dwc_otg_host_if_t; ++ ++ ++/** ++ * This union represents the bit fields in the Power and Clock Gating Control ++ * Register. Read the register into the d32 member then set/clear the ++ * bits using the bit elements. ++ */ ++typedef union pcgcctl_data ++{ ++ /** raw register data */ ++ uint32_t d32; ++ ++ /** register bits */ ++ struct ++ { ++ /** Stop Pclk */ ++ unsigned stoppclk : 1; ++ /** Gate Hclk */ ++ unsigned gatehclk : 1; ++ /** Power Clamp */ ++ unsigned pwrclmp : 1; ++ /** Reset Power Down Modules */ ++ unsigned rstpdwnmodule : 1; ++ /** PHY Suspended */ ++ unsigned physuspended : 1; ++ ++ unsigned reserved : 27; ++ } b; ++} pcgcctl_data_t; ++ ++ ++#endif +--- /dev/null ++++ b/drivers/usb/dwc_otg/linux/dwc_otg_plat.h +@@ -0,0 +1,260 @@ ++/* ========================================================================== ++ * $File: //dwh/usb_iip/dev/software/otg/linux/platform/dwc_otg_plat.h $ ++ * $Revision: 1.2 $ ++ * $Date: 2008-11-21 05:39:16 $ ++ * $Change: 1064915 $ ++ * ++ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, ++ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless ++ * otherwise expressly agreed to in writing between Synopsys and you. ++ * ++ * The Software IS NOT an item of Licensed Software or Licensed Product under ++ * any End User Software License Agreement or Agreement for Licensed Product ++ * with Synopsys or any supplement thereto. You are permitted to use and ++ * redistribute this Software in source and binary forms, with or without ++ * modification, provided that redistributions of source code must retain this ++ * notice. You may not view, use, disclose, copy or distribute this file or ++ * any information contained herein except pursuant to this license grant from ++ * Synopsys. If you do not agree with this notice, including the disclaimer ++ * below, then you are not authorized to use the Software. ++ * ++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS ++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE ++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ++ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, ++ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR ++ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER ++ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT ++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY ++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH ++ * DAMAGE. ++ * ========================================================================== */ ++ ++#if !defined(__DWC_OTG_PLAT_H__) ++#define __DWC_OTG_PLAT_H__ ++ ++#include ++#include ++#include ++#include ++#include ++ ++/** ++ * @file ++ * ++ * This file contains the Platform Specific constants, interfaces ++ * (functions and macros) for Linux. ++ * ++ */ ++//#if !defined(__LINUX_ARM_ARCH__) ++//#error "The contents of this file is Linux specific!!!" ++//#endif ++ ++/** ++ * Reads the content of a register. ++ * ++ * @param reg address of register to read. ++ * @return contents of the register. ++ * ++ ++ * Usage:
++ * uint32_t dev_ctl = dwc_read_reg32(&dev_regs->dctl); ++ */ ++static __inline__ uint32_t dwc_read_reg32( volatile uint32_t *reg) ++{ ++ return readl(reg); ++}; ++ ++/** ++ * Writes a register with a 32 bit value. ++ * ++ * @param reg address of register to read. ++ * @param value to write to _reg. ++ * ++ * Usage:
++ * dwc_write_reg32(&dev_regs->dctl, 0); ++ */ ++static __inline__ void dwc_write_reg32( volatile uint32_t *reg, const uint32_t value) ++{ ++ writel( value, reg ); ++}; ++ ++/** ++ * This function modifies bit values in a register. Using the ++ * algorithm: (reg_contents & ~clear_mask) | set_mask. ++ * ++ * @param reg address of register to read. ++ * @param clear_mask bit mask to be cleared. ++ * @param set_mask bit mask to be set. ++ * ++ * Usage:
++ * // Clear the SOF Interrupt Mask bit and
++ * // set the OTG Interrupt mask bit, leaving all others as they were. ++ * dwc_modify_reg32(&dev_regs->gintmsk, DWC_SOF_INT, DWC_OTG_INT);
++ */ ++static __inline__ ++ void dwc_modify_reg32( volatile uint32_t *reg, const uint32_t clear_mask, const uint32_t set_mask) ++{ ++ writel( (readl(reg) & ~clear_mask) | set_mask, reg ); ++}; ++ ++ ++/** ++ * Wrapper for the OS micro-second delay function. ++ * @param[in] usecs Microseconds of delay ++ */ ++static __inline__ void UDELAY( const uint32_t usecs ) ++{ ++ udelay( usecs ); ++} ++ ++/** ++ * Wrapper for the OS milli-second delay function. ++ * @param[in] msecs milliseconds of delay ++ */ ++static __inline__ void MDELAY( const uint32_t msecs ) ++{ ++ mdelay( msecs ); ++} ++ ++/** ++ * Wrapper for the Linux spin_lock. On the ARM (Integrator) ++ * spin_lock() is a nop. ++ * ++ * @param lock Pointer to the spinlock. ++ */ ++static __inline__ void SPIN_LOCK( spinlock_t *lock ) ++{ ++ spin_lock(lock); ++} ++ ++/** ++ * Wrapper for the Linux spin_unlock. On the ARM (Integrator) ++ * spin_lock() is a nop. ++ * ++ * @param lock Pointer to the spinlock. ++ */ ++static __inline__ void SPIN_UNLOCK( spinlock_t *lock ) ++{ ++ spin_unlock(lock); ++} ++ ++/** ++ * Wrapper (macro) for the Linux spin_lock_irqsave. On the ARM ++ * (Integrator) spin_lock() is a nop. ++ * ++ * @param l Pointer to the spinlock. ++ * @param f unsigned long for irq flags storage. ++ */ ++#define SPIN_LOCK_IRQSAVE( l, f ) spin_lock_irqsave(l,f); ++ ++/** ++ * Wrapper (macro) for the Linux spin_unlock_irqrestore. On the ARM ++ * (Integrator) spin_lock() is a nop. ++ * ++ * @param l Pointer to the spinlock. ++ * @param f unsigned long for irq flags storage. ++ */ ++#define SPIN_UNLOCK_IRQRESTORE( l,f ) spin_unlock_irqrestore(l,f); ++ ++/* ++ * Debugging support vanishes in non-debug builds. ++ */ ++ ++ ++/** ++ * The Debug Level bit-mask variable. ++ */ ++extern uint32_t g_dbg_lvl; ++/** ++ * Set the Debug Level variable. ++ */ ++static inline uint32_t SET_DEBUG_LEVEL( const uint32_t new ) ++{ ++ uint32_t old = g_dbg_lvl; ++ g_dbg_lvl = new; ++ return old; ++} ++ ++/** When debug level has the DBG_CIL bit set, display CIL Debug messages. */ ++#define DBG_CIL (0x2) ++/** When debug level has the DBG_CILV bit set, display CIL Verbose debug ++ * messages */ ++#define DBG_CILV (0x20) ++/** When debug level has the DBG_PCD bit set, display PCD (Device) debug ++ * messages */ ++#define DBG_PCD (0x4) ++/** When debug level has the DBG_PCDV set, display PCD (Device) Verbose debug ++ * messages */ ++#define DBG_PCDV (0x40) ++/** When debug level has the DBG_HCD bit set, display Host debug messages */ ++#define DBG_HCD (0x8) ++/** When debug level has the DBG_HCDV bit set, display Verbose Host debug ++ * messages */ ++#define DBG_HCDV (0x80) ++/** When debug level has the DBG_HCD_URB bit set, display enqueued URBs in host ++ * mode. */ ++#define DBG_HCD_URB (0x800) ++ ++/** When debug level has any bit set, display debug messages */ ++#define DBG_ANY (0xFF) ++ ++/** All debug messages off */ ++#define DBG_OFF 0 ++ ++/** Prefix string for DWC_DEBUG print macros. */ ++#define USB_DWC "dwc_otg: " ++ ++/** ++ * Print a debug message when the Global debug level variable contains ++ * the bit defined in lvl. ++ * ++ * @param[in] lvl - Debug level, use one of the DBG_ constants above. ++ * @param[in] x - like printf ++ * ++ * Example:

++ * ++ * DWC_DEBUGPL( DBG_ANY, "%s(%p)\n", __func__, _reg_base_addr); ++ * ++ *
++ * results in:
++ * ++ * usb-DWC_otg: dwc_otg_cil_init(ca867000) ++ * ++ */ ++#ifdef DEBUG ++ ++# define DWC_DEBUGPL(lvl, x...) do{ if ((lvl)&g_dbg_lvl)printk( KERN_DEBUG USB_DWC x ); }while(0) ++# define DWC_DEBUGP(x...) DWC_DEBUGPL(DBG_ANY, x ) ++ ++# define CHK_DEBUG_LEVEL(level) ((level) & g_dbg_lvl) ++ ++#else ++ ++# define DWC_DEBUGPL(lvl, x...) do{}while(0) ++# define DWC_DEBUGP(x...) ++ ++# define CHK_DEBUG_LEVEL(level) (0) ++ ++#endif /*DEBUG*/ ++ ++/** ++ * Print an Error message. ++ */ ++#define DWC_ERROR(x...) printk( KERN_ERR USB_DWC x ) ++/** ++ * Print a Warning message. ++ */ ++#define DWC_WARN(x...) printk( KERN_WARNING USB_DWC x ) ++/** ++ * Print a notice (normal but significant message). ++ */ ++#define DWC_NOTICE(x...) printk( KERN_NOTICE USB_DWC x ) ++/** ++ * Basic message printing. ++ */ ++#define DWC_PRINT(x...) printk( KERN_INFO USB_DWC x ) ++ ++#endif ++ diff --git a/target/linux/ramips/patches-3.10/0122-pinmux.patch b/target/linux/ramips/patches-3.10/0122-pinmux.patch deleted file mode 100644 index 20fc4286f2..0000000000 --- a/target/linux/ramips/patches-3.10/0122-pinmux.patch +++ /dev/null @@ -1,1302 +0,0 @@ -From d59fe652e3674e98caa688b4ddc9308007267adc Mon Sep 17 00:00:00 2001 -From: John Crispin -Date: Mon, 19 Aug 2013 13:49:52 +0200 -Subject: [PATCH] pinctrl: ralink; add pinctrl driver - -Signed-off-by: John Crispin ---- - arch/mips/Kconfig | 2 + - arch/mips/ralink/common.h | 21 +-- - arch/mips/ralink/dts/mt7620a.dtsi | 7 + - drivers/pinctrl/Kconfig | 5 + - drivers/pinctrl/Makefile | 1 + - drivers/pinctrl/pinctrl-rt2880.c | 368 +++++++++++++++++++++++++++++++++++++ - 6 files changed, 385 insertions(+), 19 deletions(-) - create mode 100644 drivers/pinctrl/pinctrl-rt2880.c - ---- a/arch/mips/Kconfig -+++ b/arch/mips/Kconfig -@@ -446,6 +446,8 @@ config RALINK - select HAVE_MACH_CLKDEV - select CLKDEV_LOOKUP - select ARCH_REQUIRE_GPIOLIB -+ select PINCTRL -+ select PINCTRL_RT2880 - - config SGI_IP22 - bool "SGI IP22 (Indy/Indigo2)" ---- a/drivers/pinctrl/Kconfig -+++ b/drivers/pinctrl/Kconfig -@@ -114,6 +114,11 @@ config PINCTRL_LANTIQ - select PINMUX - select PINCONF - -+config PINCTRL_RT2880 -+ bool -+ depends on RALINK -+ select PINMUX -+ - config PINCTRL_FALCON - bool - depends on SOC_FALCON ---- a/drivers/pinctrl/Makefile -+++ b/drivers/pinctrl/Makefile -@@ -45,6 +45,7 @@ obj-$(CONFIG_PINCTRL_EXYNOS5440) += pinc - obj-$(CONFIG_PINCTRL_S3C64XX) += pinctrl-s3c64xx.o - obj-$(CONFIG_PINCTRL_XWAY) += pinctrl-xway.o - obj-$(CONFIG_PINCTRL_LANTIQ) += pinctrl-lantiq.o -+obj-$(CONFIG_PINCTRL_RT2880) += pinctrl-rt2880.o - - obj-$(CONFIG_PLAT_ORION) += mvebu/ - obj-$(CONFIG_ARCH_SHMOBILE) += sh-pfc/ ---- /dev/null -+++ b/drivers/pinctrl/pinctrl-rt2880.c -@@ -0,0 +1,466 @@ -+/* -+ * linux/drivers/pinctrl/pinctrl-rt2880.c -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * publishhed by the Free Software Foundation. -+ * -+ * Copyright (C) 2013 John Crispin -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+#include -+#include -+ -+#include "core.h" -+ -+#define SYSC_REG_GPIO_MODE 0x60 -+ -+struct rt2880_priv { -+ struct device *dev; -+ -+ struct pinctrl_pin_desc *pads; -+ struct pinctrl_desc *desc; -+ -+ struct rt2880_pmx_func **func; -+ int func_count; -+ -+ struct rt2880_pmx_group *groups; -+ const char **group_names; -+ int group_count; -+ -+ uint8_t *gpio; -+ int max_pins; -+}; -+ -+struct rt2880_pmx_group *rt2880_pinmux_data = NULL; -+ -+static int rt2880_get_group_count(struct pinctrl_dev *pctrldev) -+{ -+ struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev); -+ -+ return p->group_count; -+} -+ -+static const char *rt2880_get_group_name(struct pinctrl_dev *pctrldev, -+ unsigned group) -+{ -+ struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev); -+ -+ if (group >= p->group_count) -+ return NULL; -+ -+ return p->group_names[group]; -+} -+ -+static int rt2880_get_group_pins(struct pinctrl_dev *pctrldev, -+ unsigned group, -+ const unsigned **pins, -+ unsigned *num_pins) -+{ -+ struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev); -+ -+ if (group >= p->group_count) -+ return -EINVAL; -+ -+ *pins = p->groups[group].func[0].pins; -+ *num_pins = p->groups[group].func[0].pin_count; -+ -+ return 0; -+} -+ -+static void rt2880_pinctrl_dt_free_map(struct pinctrl_dev *pctrldev, -+ struct pinctrl_map *map, unsigned num_maps) -+{ -+ int i; -+ -+ for (i = 0; i < num_maps; i++) -+ if (map[i].type == PIN_MAP_TYPE_CONFIGS_PIN || -+ map[i].type == PIN_MAP_TYPE_CONFIGS_GROUP) -+ kfree(map[i].data.configs.configs); -+ kfree(map); -+} -+ -+static void rt2880_pinctrl_pin_dbg_show(struct pinctrl_dev *pctrldev, -+ struct seq_file *s, -+ unsigned offset) -+{ -+ seq_printf(s, "ralink pio"); -+} -+ -+static void rt2880_pinctrl_dt_subnode_to_map(struct pinctrl_dev *pctrldev, -+ struct device_node *np, -+ struct pinctrl_map **map) -+{ -+ const char *function; -+ int func = of_property_read_string(np, "ralink,function", &function); -+ int grps = of_property_count_strings(np, "ralink,group"); -+ int i; -+ -+ if (func || !grps) -+ return; -+ -+ for (i = 0; i < grps; i++) { -+ const char *group; -+ -+ of_property_read_string_index(np, "ralink,group", i, &group); -+ -+ (*map)->type = PIN_MAP_TYPE_MUX_GROUP; -+ (*map)->name = function; -+ (*map)->data.mux.group = group; -+ (*map)->data.mux.function = function; -+ (*map)++; -+ } -+} -+ -+static int rt2880_pinctrl_dt_node_to_map(struct pinctrl_dev *pctrldev, -+ struct device_node *np_config, -+ struct pinctrl_map **map, -+ unsigned *num_maps) -+{ -+ int max_maps = 0; -+ struct pinctrl_map *tmp; -+ struct device_node *np; -+ -+ for_each_child_of_node(np_config, np) { -+ int ret = of_property_count_strings(np, "ralink,group"); -+ -+ if (ret >= 0) -+ max_maps += ret; -+ } -+ -+ if (!max_maps) -+ return max_maps; -+ -+ *map = kzalloc(max_maps * sizeof(struct pinctrl_map), GFP_KERNEL); -+ if (!*map) -+ return -ENOMEM; -+ -+ tmp = *map; -+ -+ for_each_child_of_node(np_config, np) -+ rt2880_pinctrl_dt_subnode_to_map(pctrldev, np, &tmp); -+ *num_maps = max_maps; -+ -+ return 0; -+} -+ -+static const struct pinctrl_ops rt2880_pctrl_ops = { -+ .get_groups_count = rt2880_get_group_count, -+ .get_group_name = rt2880_get_group_name, -+ .get_group_pins = rt2880_get_group_pins, -+ .pin_dbg_show = rt2880_pinctrl_pin_dbg_show, -+ .dt_node_to_map = rt2880_pinctrl_dt_node_to_map, -+ .dt_free_map = rt2880_pinctrl_dt_free_map, -+}; -+ -+static int rt2880_pmx_func_count(struct pinctrl_dev *pctrldev) -+{ -+ struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev); -+ -+ return p->func_count; -+} -+ -+static const char *rt2880_pmx_func_name(struct pinctrl_dev *pctrldev, -+ unsigned func) -+{ -+ struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev); -+ -+ return p->func[func]->name; -+} -+ -+static int rt2880_pmx_group_get_groups(struct pinctrl_dev *pctrldev, -+ unsigned func, -+ const char * const **groups, -+ unsigned * const num_groups) -+{ -+ struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev); -+ -+ if (p->func[func]->group_count == 1) -+ *groups = &p->group_names[p->func[func]->groups[0]]; -+ else -+ *groups = p->group_names; -+ -+ *num_groups = p->func[func]->group_count; -+ -+ return 0; -+} -+ -+static int rt2880_pmx_group_enable(struct pinctrl_dev *pctrldev, -+ unsigned func, -+ unsigned group) -+{ -+ struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev); -+ u32 mode = 0; -+ -+ /* dont allow double use */ -+ if (p->groups[group].enabled) { -+ dev_err(p->dev, "%s is already enabled\n", p->groups[group].name); -+ return -EBUSY; -+ } -+ -+ p->groups[group].enabled = 1; -+ p->func[func]->enabled = 1; -+ -+ mode = rt_sysc_r32(SYSC_REG_GPIO_MODE); -+ mode &= ~(p->groups[group].mask << p->groups[group].shift); -+ -+ /* function 0 is gpio and needs special handling */ -+ if (func == 0) { -+ int i; -+ -+ -+ mode |= p->groups[group].gpio << p->groups[group].shift; -+ /* mark the pins as gpio */ -+ for (i = 0; i < p->groups[group].func[0].pin_count; i++) -+ p->gpio[p->groups[group].func[0].pins[i]] = 1; -+ } else { -+ mode |= p->func[func]->value << p->groups[group].shift; -+ } -+ rt_sysc_w32(mode, SYSC_REG_GPIO_MODE); -+ -+ -+ return 0; -+} -+ -+static int rt2880_pmx_group_gpio_request_enable(struct pinctrl_dev *pctrldev, -+ struct pinctrl_gpio_range *range, -+ unsigned pin) -+{ -+ struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev); -+ -+ if (!p->gpio[pin]) { -+ dev_err(p->dev, "pin %d is not set to gpio mux\n", pin); -+ return -EINVAL; -+ } -+ -+ return 0; -+} -+ -+static const struct pinmux_ops rt2880_pmx_group_ops = { -+ .get_functions_count = rt2880_pmx_func_count, -+ .get_function_name = rt2880_pmx_func_name, -+ .get_function_groups = rt2880_pmx_group_get_groups, -+ .enable = rt2880_pmx_group_enable, -+ .gpio_request_enable = rt2880_pmx_group_gpio_request_enable, -+}; -+ -+static struct pinctrl_desc rt2880_pctrl_desc = { -+ .owner = THIS_MODULE, -+ .name = "rt2880-pinmux", -+ .pctlops = &rt2880_pctrl_ops, -+ .pmxops = &rt2880_pmx_group_ops, -+}; -+ -+static struct rt2880_pmx_func gpio_func = { -+ .name = "gpio", -+}; -+ -+static int rt2880_pinmux_index(struct rt2880_priv *p) -+{ -+ struct rt2880_pmx_func **f; -+ struct rt2880_pmx_group *mux = p->groups; -+ int i, j, c = 0; -+ -+ /* count the mux functions */ -+ while (mux->name) { -+ p->group_count++; -+ mux++; -+ } -+ -+ /* allocate the group names array needed by the gpio function */ -+ p->group_names = devm_kzalloc(p->dev, sizeof(char *) * p->group_count, GFP_KERNEL); -+ if (!p->group_names) -+ return -1; -+ -+ for (i = 0; i < p->group_count; i++) { -+ p->group_names[i] = p->groups[i].name; -+ p->func_count += p->groups[i].func_count; -+ } -+ -+ /* we have a dummy function[0] for gpio */ -+ p->func_count++; -+ -+ /* allocate our function and group mapping index buffers */ -+ f = p->func = devm_kzalloc(p->dev, sizeof(struct rt2880_pmx_func) * p->func_count, GFP_KERNEL); -+ gpio_func.groups = devm_kzalloc(p->dev, sizeof(int) * p->group_count, GFP_KERNEL); -+ if (!f || !gpio_func.groups) -+ return -1; -+ -+ /* add a backpointer to the function so it knows its group */ -+ gpio_func.group_count = p->group_count; -+ for (i = 0; i < gpio_func.group_count; i++) -+ gpio_func.groups[i] = i; -+ -+ f[c] = &gpio_func; -+ c++; -+ -+ /* add remaining functions */ -+ for (i = 0; i < p->group_count; i++) { -+ for (j = 0; j < p->groups[i].func_count; j++) { -+ f[c] = &p->groups[i].func[j]; -+ f[c]->groups = devm_kzalloc(p->dev, sizeof(int), GFP_KERNEL); -+ f[c]->groups[0] = i; -+ f[c]->group_count = 1; -+ c++; -+ } -+ } -+ return 0; -+} -+ -+static int rt2880_pinmux_pins(struct rt2880_priv *p) -+{ -+ int i, j; -+ -+ /* loop over the functions and initialize the pins array. also work out the highest pin used */ -+ for (i = 0; i < p->func_count; i++) { -+ int pin; -+ -+ if (!p->func[i]->pin_count) -+ continue; -+ -+ p->func[i]->pins = devm_kzalloc(p->dev, sizeof(int) * p->func[i]->pin_count, GFP_KERNEL); -+ for (j = 0; j < p->func[i]->pin_count; j++) -+ p->func[i]->pins[j] = p->func[i]->pin_first + j; -+ -+ pin = p->func[i]->pin_first + p->func[i]->pin_count; -+ if (pin > p->max_pins) -+ p->max_pins = pin; -+ } -+ -+ /* the buffer that tells us which pins are gpio */ -+ p->gpio = devm_kzalloc(p->dev,sizeof(uint8_t) * p->max_pins, -+ GFP_KERNEL); -+ /* the pads needed to tell pinctrl about our pins */ -+ p->pads = devm_kzalloc(p->dev, -+ sizeof(struct pinctrl_pin_desc) * p->max_pins, -+ GFP_KERNEL); -+ if (!p->pads || !p->gpio ) { -+ dev_err(p->dev, "Failed to allocate gpio data\n"); -+ return -ENOMEM; -+ } -+ -+ memset(p->gpio, 1, sizeof(uint8_t) * p->max_pins); -+ for (i = 0; i < p->func_count; i++) { -+ if (!p->func[i]->pin_count) -+ continue; -+ -+ for (j = 0; j < p->func[i]->pin_count; j++) -+ p->gpio[p->func[i]->pins[j]] = 0; -+ } -+ -+ /* pin 0 is always a gpio */ -+ p->gpio[0] = 1; -+ -+ /* set the pads */ -+ for (i = 0; i < p->max_pins; i++) { -+ /* strlen("ioXY") + 1 = 5 */ -+ char *name = devm_kzalloc(p->dev, 5, GFP_KERNEL); -+ -+ if (!name) { -+ dev_err(p->dev, "Failed to allocate pad name\n"); -+ return -ENOMEM; -+ } -+ snprintf(name, 5, "io%d", i); -+ p->pads[i].number = i; -+ p->pads[i].name = name; -+ } -+ p->desc->pins = p->pads; -+ p->desc->npins = p->max_pins; -+ -+ return 0; -+} -+ -+static int rt2880_pinmux_probe(struct platform_device *pdev) -+{ -+ struct rt2880_priv *p; -+ struct pinctrl_dev *dev; -+ struct device_node *np; -+ -+ if (!rt2880_pinmux_data) -+ return -ENOSYS; -+ -+ /* setup the private data */ -+ p = devm_kzalloc(&pdev->dev, sizeof(struct rt2880_priv), GFP_KERNEL); -+ if (!p) -+ return -ENOMEM; -+ -+ p->dev = &pdev->dev; -+ p->desc = &rt2880_pctrl_desc; -+ p->groups = rt2880_pinmux_data; -+ platform_set_drvdata(pdev, p); -+ -+ /* init the device */ -+ if (rt2880_pinmux_index(p)) { -+ dev_err(&pdev->dev, "failed to load index\n"); -+ return -EINVAL; -+ } -+ if (rt2880_pinmux_pins(p)) { -+ dev_err(&pdev->dev, "failed to load pins\n"); -+ return -EINVAL; -+ } -+ dev = pinctrl_register(p->desc, &pdev->dev, p); -+ if (IS_ERR(dev)) -+ return PTR_ERR(dev); -+ -+ /* finalize by adding gpio ranges for enables gpio controllers */ -+ for_each_compatible_node(np, NULL, "ralink,rt2880-gpio") { -+ const __be32 *ngpio, *gpiobase; -+ struct pinctrl_gpio_range *range; -+ char *name; -+ -+ if (!of_device_is_available(np)) -+ continue; -+ -+ ngpio = of_get_property(np, "ralink,num-gpios", NULL); -+ gpiobase = of_get_property(np, "ralink,gpio-base", NULL); -+ if (!ngpio || !gpiobase) { -+ dev_err(&pdev->dev, "failed to load chip info\n"); -+ return -EINVAL; -+ } -+ -+ range = devm_kzalloc(p->dev, sizeof(struct pinctrl_gpio_range) + 4, GFP_KERNEL); -+ range->name = name = (char *) &range[1]; -+ sprintf(name, "pio"); -+ range->npins = __be32_to_cpu(*ngpio); -+ range->base = __be32_to_cpu(*gpiobase); -+ range->pin_base = range->base; -+ pinctrl_add_gpio_range(dev, range); -+ } -+ -+ return 0; -+} -+ -+static const struct of_device_id rt2880_pinmux_match[] = { -+ { .compatible = "ralink,rt2880-pinmux" }, -+ {}, -+}; -+MODULE_DEVICE_TABLE(of, rt2880_pinmux_match); -+ -+static struct platform_driver rt2880_pinmux_driver = { -+ .probe = rt2880_pinmux_probe, -+ .driver = { -+ .name = "rt2880-pinmux", -+ .owner = THIS_MODULE, -+ .of_match_table = rt2880_pinmux_match, -+ }, -+}; -+ -+int __init rt2880_pinmux_init(void) -+{ -+ return platform_driver_register(&rt2880_pinmux_driver); -+} -+ -+core_initcall_sync(rt2880_pinmux_init); ---- /dev/null -+++ b/arch/mips/include/asm/mach-ralink/pinmux.h -@@ -0,0 +1,53 @@ -+/* -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * publishhed by the Free Software Foundation. -+ * -+ * Copyright (C) 2012 John Crispin -+ */ -+ -+#ifndef _RT288X_PINMUX_H__ -+#define _RT288X_PINMUX_H__ -+ -+#define FUNC(name, value, pin_first, pin_count) { name, value, pin_first, pin_count } -+#define GRP(_name, _func, _mask, _shift) \ -+ { .name = _name, .mask = _mask, .shift = _shift, \ -+ .func = _func, .gpio = _mask, \ -+ .func_count = ARRAY_SIZE(_func) } -+ -+#define GRP_G(_name, _func, _mask, _gpio, _shift) \ -+ { .name = _name, .mask = _mask, .shift = _shift, \ -+ .func = _func, .gpio = _gpio, \ -+ .func_count = ARRAY_SIZE(_func) } -+ -+struct rt2880_pmx_group; -+ -+struct rt2880_pmx_func { -+ const char *name; -+ const char value; -+ -+ int pin_first; -+ int pin_count; -+ int *pins; -+ -+ int *groups; -+ int group_count; -+ -+ int enabled; -+}; -+ -+struct rt2880_pmx_group { -+ const char *name; -+ int enabled; -+ -+ const u32 shift; -+ const char mask; -+ const char gpio; -+ -+ struct rt2880_pmx_func *func; -+ int func_count; -+}; -+ -+extern struct rt2880_pmx_group *rt2880_pinmux_data; -+ -+#endif ---- a/arch/mips/ralink/mt7620.c -+++ b/arch/mips/ralink/mt7620.c -@@ -17,6 +17,7 @@ - #include - #include - #include -+#include - - #include "common.h" - -@@ -48,118 +49,58 @@ static int dram_type; - /* the pll dividers */ - static u32 mt7620_clk_divider[] = { 2, 3, 4, 8 }; - --static struct ralink_pinmux_grp mode_mux[] = { -- { -- .name = "i2c", -- .mask = MT7620_GPIO_MODE_I2C, -- .gpio_first = 1, -- .gpio_last = 2, -- }, { -- .name = "spi", -- .mask = MT7620_GPIO_MODE_SPI, -- .gpio_first = 3, -- .gpio_last = 6, -- }, { -- .name = "uartlite", -- .mask = MT7620_GPIO_MODE_UART1, -- .gpio_first = 15, -- .gpio_last = 16, -- }, { -- .name = "wdt", -- .mask = MT7620_GPIO_MODE_WDT, -- .gpio_first = 17, -- .gpio_last = 17, -- }, { -- .name = "mdio", -- .mask = MT7620_GPIO_MODE_MDIO, -- .gpio_first = 22, -- .gpio_last = 23, -- }, { -- .name = "rgmii1", -- .mask = MT7620_GPIO_MODE_RGMII1, -- .gpio_first = 24, -- .gpio_last = 35, -- }, { -- .name = "spi refclk", -- .mask = MT7620_GPIO_MODE_SPI_REF_CLK, -- .gpio_first = 37, -- .gpio_last = 39, -- }, { -- .name = "jtag", -- .mask = MT7620_GPIO_MODE_JTAG, -- .gpio_first = 40, -- .gpio_last = 44, -- }, { -- /* shared lines with jtag */ -- .name = "ephy", -- .mask = MT7620_GPIO_MODE_EPHY, -- .gpio_first = 40, -- .gpio_last = 44, -- }, { -- .name = "nand", -- .mask = MT7620_GPIO_MODE_JTAG, -- .gpio_first = 45, -- .gpio_last = 59, -- }, { -- .name = "rgmii2", -- .mask = MT7620_GPIO_MODE_RGMII2, -- .gpio_first = 60, -- .gpio_last = 71, -- }, { -- .name = "wled", -- .mask = MT7620_GPIO_MODE_WLED, -- .gpio_first = 72, -- .gpio_last = 72, -- }, {0} -+static struct rt2880_pmx_func i2c_grp[] = { FUNC("i2c", 0, 1, 2) }; -+static struct rt2880_pmx_func spi_grp[] = { FUNC("spi", 0, 3, 4) }; -+static struct rt2880_pmx_func uartlite_grp[] = { FUNC("uartlite", 0, 15, 2) }; -+static struct rt2880_pmx_func mdio_grp[] = { FUNC("mdio", 0, 22, 2) }; -+static struct rt2880_pmx_func rgmii1_grp[] = { FUNC("rgmii1", 0, 24, 12) }; -+static struct rt2880_pmx_func refclk_grp[] = { FUNC("spi refclk", 0, 37, 3) }; -+static struct rt2880_pmx_func ephy_grp[] = { FUNC("ephy", 0, 40, 5) }; -+static struct rt2880_pmx_func rgmii2_grp[] = { FUNC("rgmii2", 0, 60, 12) }; -+static struct rt2880_pmx_func wled_grp[] = { FUNC("wled", 0, 72, 1) }; -+static struct rt2880_pmx_func pa_grp[] = { FUNC("pa", 0, 18, 4) }; -+static struct rt2880_pmx_func uartf_grp[] = { -+ FUNC("uartf", MT7620_GPIO_MODE_UARTF, 7, 8), -+ FUNC("pcm uartf", MT7620_GPIO_MODE_PCM_UARTF, 7, 8), -+ FUNC("pcm i2s", MT7620_GPIO_MODE_PCM_I2S, 7, 8), -+ FUNC("i2s uartf", MT7620_GPIO_MODE_I2S_UARTF, 7, 8), -+ FUNC("pcm gpio", MT7620_GPIO_MODE_PCM_GPIO, 11, 4), -+ FUNC("gpio uartf", MT7620_GPIO_MODE_GPIO_UARTF, 7, 4), -+ FUNC("gpio i2s", MT7620_GPIO_MODE_GPIO_I2S, 7, 4), - }; -- --static struct ralink_pinmux_grp uart_mux[] = { -- { -- .name = "uartf", -- .mask = MT7620_GPIO_MODE_UARTF, -- .gpio_first = 7, -- .gpio_last = 14, -- }, { -- .name = "pcm uartf", -- .mask = MT7620_GPIO_MODE_PCM_UARTF, -- .gpio_first = 7, -- .gpio_last = 14, -- }, { -- .name = "pcm i2s", -- .mask = MT7620_GPIO_MODE_PCM_I2S, -- .gpio_first = 7, -- .gpio_last = 14, -- }, { -- .name = "i2s uartf", -- .mask = MT7620_GPIO_MODE_I2S_UARTF, -- .gpio_first = 7, -- .gpio_last = 14, -- }, { -- .name = "pcm gpio", -- .mask = MT7620_GPIO_MODE_PCM_GPIO, -- .gpio_first = 11, -- .gpio_last = 14, -- }, { -- .name = "gpio uartf", -- .mask = MT7620_GPIO_MODE_GPIO_UARTF, -- .gpio_first = 7, -- .gpio_last = 10, -- }, { -- .name = "gpio i2s", -- .mask = MT7620_GPIO_MODE_GPIO_I2S, -- .gpio_first = 7, -- .gpio_last = 10, -- }, { -- .name = "gpio", -- .mask = MT7620_GPIO_MODE_GPIO, -- }, {0} -+static struct rt2880_pmx_func wdt_grp[] = { -+ FUNC("wdt rst", 0, 17, 1), -+ FUNC("wdt refclk", 0, 17, 1), -+ }; -+static struct rt2880_pmx_func pcie_rst_grp[] = { -+ FUNC("pcie rst", MT7620_GPIO_MODE_PCIE_RST, 36, 1), -+ FUNC("pcie refclk", MT7620_GPIO_MODE_PCIE_REF, 36, 1) -+}; -+static struct rt2880_pmx_func nd_sd_grp[] = { -+ FUNC("nand", MT7620_GPIO_MODE_NAND, 45, 15), -+ FUNC("sd", MT7620_GPIO_MODE_SD, 45, 15) - }; - --struct ralink_pinmux rt_gpio_pinmux = { -- .mode = mode_mux, -- .uart = uart_mux, -- .uart_shift = MT7620_GPIO_MODE_UART0_SHIFT, -- .uart_mask = MT7620_GPIO_MODE_UART0_MASK, -+static struct rt2880_pmx_group mt7620a_pinmux_data[] = { -+ GRP("i2c", i2c_grp, 1, MT7620_GPIO_MODE_I2C), -+ GRP("uartf", uartf_grp, MT7620_GPIO_MODE_UART0_MASK, -+ MT7620_GPIO_MODE_UART0_SHIFT), -+ GRP("spi", spi_grp, 1, MT7620_GPIO_MODE_SPI), -+ GRP("uartlite", uartlite_grp, 1, MT7620_GPIO_MODE_UART1), -+ GRP_G("wdt", wdt_grp, MT7620_GPIO_MODE_WDT_MASK, -+ MT7620_GPIO_MODE_WDT_GPIO, MT7620_GPIO_MODE_WDT_SHIFT), -+ GRP("mdio", mdio_grp, 1, MT7620_GPIO_MODE_MDIO), -+ GRP("rgmii1", rgmii1_grp, 1, MT7620_GPIO_MODE_RGMII1), -+ GRP("spi refclk", refclk_grp, 1, MT7620_GPIO_MODE_SPI_REF_CLK), -+ GRP_G("pcie", pcie_rst_grp, MT7620_GPIO_MODE_PCIE_MASK, -+ MT7620_GPIO_MODE_PCIE_GPIO, MT7620_GPIO_MODE_PCIE_SHIFT), -+ GRP_G("nd_sd", nd_sd_grp, MT7620_GPIO_MODE_ND_SD_MASK, -+ MT7620_GPIO_MODE_ND_SD_GPIO, MT7620_GPIO_MODE_ND_SD_SHIFT), -+ GRP("rgmii2", rgmii2_grp, 1, MT7620_GPIO_MODE_RGMII2), -+ GRP("wled", wled_grp, 1, MT7620_GPIO_MODE_WLED), -+ GRP("ephy", ephy_grp, 1, MT7620_GPIO_MODE_EPHY), -+ GRP("pa", pa_grp, 1, MT7620_GPIO_MODE_PA), -+ { 0 } - }; - - void __init ralink_clk_init(void) -@@ -281,4 +222,6 @@ void prom_soc_init(struct ralink_soc_inf - (pmu0 & PMU_SW_SET) ? ("sw") : ("hw")); - pr_info("Digital PMU set to %s control\n", - (pmu1 & DIG_SW_SEL) ? ("sw") : ("hw")); -+ -+ rt2880_pinmux_data = mt7620a_pinmux_data; - } ---- a/arch/mips/ralink/rt305x.c -+++ b/arch/mips/ralink/rt305x.c -@@ -17,90 +17,71 @@ - #include - #include - #include -+#include - - #include "common.h" - - enum rt305x_soc_type rt305x_soc; - --static struct ralink_pinmux_grp mode_mux[] = { -- { -- .name = "i2c", -- .mask = RT305X_GPIO_MODE_I2C, -- .gpio_first = RT305X_GPIO_I2C_SD, -- .gpio_last = RT305X_GPIO_I2C_SCLK, -- }, { -- .name = "spi", -- .mask = RT305X_GPIO_MODE_SPI, -- .gpio_first = RT305X_GPIO_SPI_EN, -- .gpio_last = RT305X_GPIO_SPI_CLK, -- }, { -- .name = "uartlite", -- .mask = RT305X_GPIO_MODE_UART1, -- .gpio_first = RT305X_GPIO_UART1_TXD, -- .gpio_last = RT305X_GPIO_UART1_RXD, -- }, { -- .name = "jtag", -- .mask = RT305X_GPIO_MODE_JTAG, -- .gpio_first = RT305X_GPIO_JTAG_TDO, -- .gpio_last = RT305X_GPIO_JTAG_TDI, -- }, { -- .name = "mdio", -- .mask = RT305X_GPIO_MODE_MDIO, -- .gpio_first = RT305X_GPIO_MDIO_MDC, -- .gpio_last = RT305X_GPIO_MDIO_MDIO, -- }, { -- .name = "sdram", -- .mask = RT305X_GPIO_MODE_SDRAM, -- .gpio_first = RT305X_GPIO_SDRAM_MD16, -- .gpio_last = RT305X_GPIO_SDRAM_MD31, -- }, { -- .name = "rgmii", -- .mask = RT305X_GPIO_MODE_RGMII, -- .gpio_first = RT305X_GPIO_GE0_TXD0, -- .gpio_last = RT305X_GPIO_GE0_RXCLK, -- }, {0} -+static struct rt2880_pmx_func i2c_func[] = { FUNC("i2c", 0, 1, 2) }; -+static struct rt2880_pmx_func spi_func[] = { FUNC("spi", 0, 3, 4) }; -+static struct rt2880_pmx_func uartf_func[] = { -+ FUNC("uartf", RT305X_GPIO_MODE_UARTF, 7, 8), -+ FUNC("pcm uartf", RT305X_GPIO_MODE_PCM_UARTF, 7, 8), -+ FUNC("pcm i2s", RT305X_GPIO_MODE_PCM_I2S, 7, 8), -+ FUNC("i2s uartf", RT305X_GPIO_MODE_I2S_UARTF, 7, 8), -+ FUNC("pcm gpio", RT305X_GPIO_MODE_PCM_GPIO, 11, 4), -+ FUNC("gpio uartf", RT305X_GPIO_MODE_GPIO_UARTF, 7, 4), -+ FUNC("gpio i2s", RT305X_GPIO_MODE_GPIO_I2S, 7, 4), -+}; -+static struct rt2880_pmx_func uartlite_func[] = { FUNC("uartlite", 0, 15, 2) }; -+static struct rt2880_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 5) }; -+static struct rt2880_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 2) }; -+static struct rt2880_pmx_func rt5350_led_func[] = { FUNC("led", 0, 22, 5) }; -+static struct rt2880_pmx_func sdram_func[] = { FUNC("sdram", 0, 24, 16) }; -+static struct rt2880_pmx_func rt3352_rgmii_func[] = { FUNC("rgmii", 0, 24, 12) }; -+static struct rt2880_pmx_func rgmii_func[] = { FUNC("rgmii", 0, 40, 12) }; -+static struct rt2880_pmx_func rt3352_lna_func[] = { FUNC("lna", 0, 36, 2) }; -+static struct rt2880_pmx_func rt3352_pa_func[] = { FUNC("pa", 0, 38, 2) }; -+static struct rt2880_pmx_func rt3352_led_func[] = { FUNC("led", 0, 40, 5) }; -+ -+static struct rt2880_pmx_group rt3050_pinmux_data[] = { -+ GRP("i2c", i2c_func, 1, RT305X_GPIO_MODE_I2C), -+ GRP("spi", spi_func, 1, RT305X_GPIO_MODE_SPI), -+ GRP("uartf", uartf_func, RT305X_GPIO_MODE_UART0_MASK, -+ RT305X_GPIO_MODE_UART0_SHIFT), -+ GRP("uartlite", uartlite_func, 1, RT305X_GPIO_MODE_UART1), -+ GRP("jtag", jtag_func, 1, RT305X_GPIO_MODE_JTAG), -+ GRP("mdio", mdio_func, 1, RT305X_GPIO_MODE_MDIO), -+ GRP("rgmii", rgmii_func, 1, RT305X_GPIO_MODE_RGMII), -+ GRP("sdram", sdram_func, 1, RT305X_GPIO_MODE_SDRAM), -+ { 0 } -+}; -+ -+static struct rt2880_pmx_group rt3352_pinmux_data[] = { -+ GRP("i2c", i2c_func, 1, RT305X_GPIO_MODE_I2C), -+ GRP("spi", spi_func, 1, RT305X_GPIO_MODE_SPI), -+ GRP("uartf", uartf_func, RT305X_GPIO_MODE_UART0_MASK, -+ RT305X_GPIO_MODE_UART0_SHIFT), -+ GRP("uartlite", uartlite_func, 1, RT305X_GPIO_MODE_UART1), -+ GRP("jtag", jtag_func, 1, RT305X_GPIO_MODE_JTAG), -+ GRP("mdio", mdio_func, 1, RT305X_GPIO_MODE_MDIO), -+ GRP("rgmii", rt3352_rgmii_func, 1, RT305X_GPIO_MODE_RGMII), -+ GRP("lna", rt3352_lna_func, 1, RT3352_GPIO_MODE_LNA), -+ GRP("pa", rt3352_pa_func, 1, RT3352_GPIO_MODE_PA), -+ GRP("led", rt3352_led_func, 1, RT5350_GPIO_MODE_PHY_LED), -+ { 0 } - }; - --static struct ralink_pinmux_grp uart_mux[] = { -- { -- .name = "uartf", -- .mask = RT305X_GPIO_MODE_UARTF, -- .gpio_first = RT305X_GPIO_7, -- .gpio_last = RT305X_GPIO_14, -- }, { -- .name = "pcm uartf", -- .mask = RT305X_GPIO_MODE_PCM_UARTF, -- .gpio_first = RT305X_GPIO_7, -- .gpio_last = RT305X_GPIO_14, -- }, { -- .name = "pcm i2s", -- .mask = RT305X_GPIO_MODE_PCM_I2S, -- .gpio_first = RT305X_GPIO_7, -- .gpio_last = RT305X_GPIO_14, -- }, { -- .name = "i2s uartf", -- .mask = RT305X_GPIO_MODE_I2S_UARTF, -- .gpio_first = RT305X_GPIO_7, -- .gpio_last = RT305X_GPIO_14, -- }, { -- .name = "pcm gpio", -- .mask = RT305X_GPIO_MODE_PCM_GPIO, -- .gpio_first = RT305X_GPIO_10, -- .gpio_last = RT305X_GPIO_14, -- }, { -- .name = "gpio uartf", -- .mask = RT305X_GPIO_MODE_GPIO_UARTF, -- .gpio_first = RT305X_GPIO_7, -- .gpio_last = RT305X_GPIO_10, -- }, { -- .name = "gpio i2s", -- .mask = RT305X_GPIO_MODE_GPIO_I2S, -- .gpio_first = RT305X_GPIO_7, -- .gpio_last = RT305X_GPIO_10, -- }, { -- .name = "gpio", -- .mask = RT305X_GPIO_MODE_GPIO, -- }, {0} -+static struct rt2880_pmx_group rt5350_pinmux_data[] = { -+ GRP("i2c", i2c_func, 1, RT305X_GPIO_MODE_I2C), -+ GRP("spi", spi_func, 1, RT305X_GPIO_MODE_SPI), -+ GRP("uartf", uartf_func, RT305X_GPIO_MODE_UART0_MASK, -+ RT305X_GPIO_MODE_UART0_SHIFT), -+ GRP("uartlite", uartlite_func, 1, RT305X_GPIO_MODE_UART1), -+ GRP("jtag", jtag_func, 1, RT305X_GPIO_MODE_JTAG), -+ GRP("led", rt5350_led_func, 1, RT5350_GPIO_MODE_PHY_LED), -+ { 0 } - }; - - static void rt305x_wdt_reset(void) -@@ -114,14 +95,6 @@ static void rt305x_wdt_reset(void) - rt_sysc_w32(t, SYSC_REG_SYSTEM_CONFIG); - } - --struct ralink_pinmux rt_gpio_pinmux = { -- .mode = mode_mux, -- .uart = uart_mux, -- .uart_shift = RT305X_GPIO_MODE_UART0_SHIFT, -- .uart_mask = RT305X_GPIO_MODE_UART0_MASK, -- .wdt_reset = rt305x_wdt_reset, --}; -- - static unsigned long rt5350_get_mem_size(void) - { - void __iomem *sysc = (void __iomem *) KSEG1ADDR(RT305X_SYSC_BASE); -@@ -291,11 +264,14 @@ void prom_soc_init(struct ralink_soc_inf - soc_info->mem_base = RT305X_SDRAM_BASE; - if (soc_is_rt5350()) { - soc_info->mem_size = rt5350_get_mem_size(); -+ rt2880_pinmux_data = rt5350_pinmux_data; - } else if (soc_is_rt305x() || soc_is_rt3350()) { - soc_info->mem_size_min = RT305X_MEM_SIZE_MIN; - soc_info->mem_size_max = RT305X_MEM_SIZE_MAX; -+ rt2880_pinmux_data = rt3050_pinmux_data; - } else if (soc_is_rt3352()) { - soc_info->mem_size_min = RT3352_MEM_SIZE_MIN; - soc_info->mem_size_max = RT3352_MEM_SIZE_MAX; -+ rt2880_pinmux_data = rt3352_pinmux_data; - } - } ---- a/arch/mips/include/asm/mach-ralink/rt305x.h -+++ b/arch/mips/include/asm/mach-ralink/rt305x.h -@@ -125,24 +125,28 @@ static inline int soc_is_rt5350(void) - #define RT305X_GPIO_GE0_TXD0 40 - #define RT305X_GPIO_GE0_RXCLK 51 - --#define RT305X_GPIO_MODE_I2C BIT(0) --#define RT305X_GPIO_MODE_SPI BIT(1) - #define RT305X_GPIO_MODE_UART0_SHIFT 2 - #define RT305X_GPIO_MODE_UART0_MASK 0x7 - #define RT305X_GPIO_MODE_UART0(x) ((x) << RT305X_GPIO_MODE_UART0_SHIFT) --#define RT305X_GPIO_MODE_UARTF 0x0 --#define RT305X_GPIO_MODE_PCM_UARTF 0x1 --#define RT305X_GPIO_MODE_PCM_I2S 0x2 --#define RT305X_GPIO_MODE_I2S_UARTF 0x3 --#define RT305X_GPIO_MODE_PCM_GPIO 0x4 --#define RT305X_GPIO_MODE_GPIO_UARTF 0x5 --#define RT305X_GPIO_MODE_GPIO_I2S 0x6 --#define RT305X_GPIO_MODE_GPIO 0x7 --#define RT305X_GPIO_MODE_UART1 BIT(5) --#define RT305X_GPIO_MODE_JTAG BIT(6) --#define RT305X_GPIO_MODE_MDIO BIT(7) --#define RT305X_GPIO_MODE_SDRAM BIT(8) --#define RT305X_GPIO_MODE_RGMII BIT(9) -+#define RT305X_GPIO_MODE_UARTF 0 -+#define RT305X_GPIO_MODE_PCM_UARTF 1 -+#define RT305X_GPIO_MODE_PCM_I2S 2 -+#define RT305X_GPIO_MODE_I2S_UARTF 3 -+#define RT305X_GPIO_MODE_PCM_GPIO 4 -+#define RT305X_GPIO_MODE_GPIO_UARTF 5 -+#define RT305X_GPIO_MODE_GPIO_I2S 6 -+#define RT305X_GPIO_MODE_GPIO 7 -+ -+#define RT305X_GPIO_MODE_I2C 0 -+#define RT305X_GPIO_MODE_SPI 1 -+#define RT305X_GPIO_MODE_UART1 5 -+#define RT305X_GPIO_MODE_JTAG 6 -+#define RT305X_GPIO_MODE_MDIO 7 -+#define RT305X_GPIO_MODE_SDRAM 8 -+#define RT305X_GPIO_MODE_RGMII 9 -+#define RT5350_GPIO_MODE_PHY_LED 14 -+#define RT3352_GPIO_MODE_LNA 18 -+#define RT3352_GPIO_MODE_PA 20 - - #define RT3352_SYSC_REG_SYSCFG0 0x010 - #define RT3352_SYSC_REG_SYSCFG1 0x014 ---- a/arch/mips/include/asm/mach-ralink/mt7620.h -+++ b/arch/mips/include/asm/mach-ralink/mt7620.h -@@ -59,7 +59,6 @@ - #define MT7620_DDR2_SIZE_MIN 32 - #define MT7620_DDR2_SIZE_MAX 256 - --#define MT7620_GPIO_MODE_I2C BIT(0) - #define MT7620_GPIO_MODE_UART0_SHIFT 2 - #define MT7620_GPIO_MODE_UART0_MASK 0x7 - #define MT7620_GPIO_MODE_UART0(x) ((x) << MT7620_GPIO_MODE_UART0_SHIFT) -@@ -71,15 +70,35 @@ - #define MT7620_GPIO_MODE_GPIO_UARTF 0x5 - #define MT7620_GPIO_MODE_GPIO_I2S 0x6 - #define MT7620_GPIO_MODE_GPIO 0x7 --#define MT7620_GPIO_MODE_UART1 BIT(5) --#define MT7620_GPIO_MODE_MDIO BIT(8) --#define MT7620_GPIO_MODE_RGMII1 BIT(9) --#define MT7620_GPIO_MODE_RGMII2 BIT(10) --#define MT7620_GPIO_MODE_SPI BIT(11) --#define MT7620_GPIO_MODE_SPI_REF_CLK BIT(12) --#define MT7620_GPIO_MODE_WLED BIT(13) --#define MT7620_GPIO_MODE_JTAG BIT(15) --#define MT7620_GPIO_MODE_EPHY BIT(15) --#define MT7620_GPIO_MODE_WDT BIT(22) -+ -+#define MT7620_GPIO_MODE_NAND 0 -+#define MT7620_GPIO_MODE_SD 1 -+#define MT7620_GPIO_MODE_ND_SD_GPIO 2 -+#define MT7620_GPIO_MODE_ND_SD_MASK 0x3 -+#define MT7620_GPIO_MODE_ND_SD_SHIFT 18 -+ -+#define MT7620_GPIO_MODE_PCIE_RST 0 -+#define MT7620_GPIO_MODE_PCIE_REF 1 -+#define MT7620_GPIO_MODE_PCIE_GPIO 2 -+#define MT7620_GPIO_MODE_PCIE_MASK 0x3 -+#define MT7620_GPIO_MODE_PCIE_SHIFT 16 -+ -+#define MT7620_GPIO_MODE_WDT_RST 0 -+#define MT7620_GPIO_MODE_WDT_REF 1 -+#define MT7620_GPIO_MODE_WDT_GPIO 2 -+#define MT7620_GPIO_MODE_WDT_MASK 0x3 -+#define MT7620_GPIO_MODE_WDT_SHIFT 21 -+ -+#define MT7620_GPIO_MODE_I2C 0 -+#define MT7620_GPIO_MODE_UART1 5 -+#define MT7620_GPIO_MODE_MDIO 8 -+#define MT7620_GPIO_MODE_RGMII1 9 -+#define MT7620_GPIO_MODE_RGMII2 10 -+#define MT7620_GPIO_MODE_SPI 11 -+#define MT7620_GPIO_MODE_SPI_REF_CLK 12 -+#define MT7620_GPIO_MODE_WLED 13 -+#define MT7620_GPIO_MODE_JTAG 15 -+#define MT7620_GPIO_MODE_EPHY 15 -+#define MT7620_GPIO_MODE_PA 20 - - #endif ---- a/arch/mips/include/asm/mach-ralink/rt3883.h -+++ b/arch/mips/include/asm/mach-ralink/rt3883.h -@@ -112,8 +112,6 @@ - #define RT3883_CLKCFG1_PCI_CLK_EN BIT(19) - #define RT3883_CLKCFG1_UPHY0_CLK_EN BIT(18) - --#define RT3883_GPIO_MODE_I2C BIT(0) --#define RT3883_GPIO_MODE_SPI BIT(1) - #define RT3883_GPIO_MODE_UART0_SHIFT 2 - #define RT3883_GPIO_MODE_UART0_MASK 0x7 - #define RT3883_GPIO_MODE_UART0(x) ((x) << RT3883_GPIO_MODE_UART0_SHIFT) -@@ -125,11 +123,15 @@ - #define RT3883_GPIO_MODE_GPIO_UARTF 0x5 - #define RT3883_GPIO_MODE_GPIO_I2S 0x6 - #define RT3883_GPIO_MODE_GPIO 0x7 --#define RT3883_GPIO_MODE_UART1 BIT(5) --#define RT3883_GPIO_MODE_JTAG BIT(6) --#define RT3883_GPIO_MODE_MDIO BIT(7) --#define RT3883_GPIO_MODE_GE1 BIT(9) --#define RT3883_GPIO_MODE_GE2 BIT(10) -+ -+#define RT3883_GPIO_MODE_I2C 0 -+#define RT3883_GPIO_MODE_SPI 1 -+#define RT3883_GPIO_MODE_UART1 5 -+#define RT3883_GPIO_MODE_JTAG 6 -+#define RT3883_GPIO_MODE_MDIO 7 -+#define RT3883_GPIO_MODE_GE1 9 -+#define RT3883_GPIO_MODE_GE2 10 -+ - #define RT3883_GPIO_MODE_PCI_SHIFT 11 - #define RT3883_GPIO_MODE_PCI_MASK 0x7 - #define RT3883_GPIO_MODE_PCI (RT3883_GPIO_MODE_PCI_MASK << RT3883_GPIO_MODE_PCI_SHIFT) ---- a/arch/mips/ralink/common.h -+++ b/arch/mips/ralink/common.h -@@ -11,25 +11,6 @@ - - #define RAMIPS_SYS_TYPE_LEN 32 - --struct ralink_pinmux_grp { -- const char *name; -- u32 mask; -- int gpio_first; -- int gpio_last; --}; -- --struct ralink_pinmux { -- struct ralink_pinmux_grp *mode; -- struct ralink_pinmux_grp *uart; -- int uart_shift; -- u32 uart_mask; -- void (*wdt_reset)(void); -- struct ralink_pinmux_grp *pci; -- int pci_shift; -- u32 pci_mask; --}; --extern struct ralink_pinmux rt_gpio_pinmux; -- - struct ralink_soc_info { - unsigned char sys_type[RAMIPS_SYS_TYPE_LEN]; - unsigned char *compatible; ---- a/arch/mips/ralink/rt3883.c -+++ b/arch/mips/ralink/rt3883.c -@@ -17,132 +17,50 @@ - #include - #include - #include -+#include - - #include "common.h" - --static struct ralink_pinmux_grp mode_mux[] = { -- { -- .name = "i2c", -- .mask = RT3883_GPIO_MODE_I2C, -- .gpio_first = RT3883_GPIO_I2C_SD, -- .gpio_last = RT3883_GPIO_I2C_SCLK, -- }, { -- .name = "spi", -- .mask = RT3883_GPIO_MODE_SPI, -- .gpio_first = RT3883_GPIO_SPI_CS0, -- .gpio_last = RT3883_GPIO_SPI_MISO, -- }, { -- .name = "uartlite", -- .mask = RT3883_GPIO_MODE_UART1, -- .gpio_first = RT3883_GPIO_UART1_TXD, -- .gpio_last = RT3883_GPIO_UART1_RXD, -- }, { -- .name = "jtag", -- .mask = RT3883_GPIO_MODE_JTAG, -- .gpio_first = RT3883_GPIO_JTAG_TDO, -- .gpio_last = RT3883_GPIO_JTAG_TCLK, -- }, { -- .name = "mdio", -- .mask = RT3883_GPIO_MODE_MDIO, -- .gpio_first = RT3883_GPIO_MDIO_MDC, -- .gpio_last = RT3883_GPIO_MDIO_MDIO, -- }, { -- .name = "ge1", -- .mask = RT3883_GPIO_MODE_GE1, -- .gpio_first = RT3883_GPIO_GE1_TXD0, -- .gpio_last = RT3883_GPIO_GE1_RXCLK, -- }, { -- .name = "ge2", -- .mask = RT3883_GPIO_MODE_GE2, -- .gpio_first = RT3883_GPIO_GE2_TXD0, -- .gpio_last = RT3883_GPIO_GE2_RXCLK, -- }, { -- .name = "pci", -- .mask = RT3883_GPIO_MODE_PCI, -- .gpio_first = RT3883_GPIO_PCI_AD0, -- .gpio_last = RT3883_GPIO_PCI_AD31, -- }, { -- .name = "lna a", -- .mask = RT3883_GPIO_MODE_LNA_A, -- .gpio_first = RT3883_GPIO_LNA_PE_A0, -- .gpio_last = RT3883_GPIO_LNA_PE_A2, -- }, { -- .name = "lna g", -- .mask = RT3883_GPIO_MODE_LNA_G, -- .gpio_first = RT3883_GPIO_LNA_PE_G0, -- .gpio_last = RT3883_GPIO_LNA_PE_G2, -- }, {0} -+static struct rt2880_pmx_func i2c_func[] = { FUNC("i2c", 0, 1, 2) }; -+static struct rt2880_pmx_func spi_func[] = { FUNC("spi", 0, 3, 4) }; -+static struct rt2880_pmx_func uartf_func[] = { -+ FUNC("uartf", RT3883_GPIO_MODE_UARTF, 7, 8), -+ FUNC("pcm uartf", RT3883_GPIO_MODE_PCM_UARTF, 7, 8), -+ FUNC("pcm i2s", RT3883_GPIO_MODE_PCM_I2S, 7, 8), -+ FUNC("i2s uartf", RT3883_GPIO_MODE_I2S_UARTF, 7, 8), -+ FUNC("pcm gpio", RT3883_GPIO_MODE_PCM_GPIO, 11, 4), -+ FUNC("gpio uartf", RT3883_GPIO_MODE_GPIO_UARTF, 7, 4), -+ FUNC("gpio i2s", RT3883_GPIO_MODE_GPIO_I2S, 7, 4), - }; -- --static struct ralink_pinmux_grp uart_mux[] = { -- { -- .name = "uartf", -- .mask = RT3883_GPIO_MODE_UARTF, -- .gpio_first = RT3883_GPIO_7, -- .gpio_last = RT3883_GPIO_14, -- }, { -- .name = "pcm uartf", -- .mask = RT3883_GPIO_MODE_PCM_UARTF, -- .gpio_first = RT3883_GPIO_7, -- .gpio_last = RT3883_GPIO_14, -- }, { -- .name = "pcm i2s", -- .mask = RT3883_GPIO_MODE_PCM_I2S, -- .gpio_first = RT3883_GPIO_7, -- .gpio_last = RT3883_GPIO_14, -- }, { -- .name = "i2s uartf", -- .mask = RT3883_GPIO_MODE_I2S_UARTF, -- .gpio_first = RT3883_GPIO_7, -- .gpio_last = RT3883_GPIO_14, -- }, { -- .name = "pcm gpio", -- .mask = RT3883_GPIO_MODE_PCM_GPIO, -- .gpio_first = RT3883_GPIO_11, -- .gpio_last = RT3883_GPIO_14, -- }, { -- .name = "gpio uartf", -- .mask = RT3883_GPIO_MODE_GPIO_UARTF, -- .gpio_first = RT3883_GPIO_7, -- .gpio_last = RT3883_GPIO_10, -- }, { -- .name = "gpio i2s", -- .mask = RT3883_GPIO_MODE_GPIO_I2S, -- .gpio_first = RT3883_GPIO_7, -- .gpio_last = RT3883_GPIO_10, -- }, { -- .name = "gpio", -- .mask = RT3883_GPIO_MODE_GPIO, -- }, {0} -+static struct rt2880_pmx_func uartlite_func[] = { FUNC("uartlite", 0, 15, 2) }; -+static struct rt2880_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 5) }; -+static struct rt2880_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 2) }; -+static struct rt2880_pmx_func lna_a_func[] = { FUNC("lna a", 0, 32, 3) }; -+static struct rt2880_pmx_func lna_g_func[] = { FUNC("lna a", 0, 35, 3) }; -+static struct rt2880_pmx_func pci_func[] = { -+ FUNC("pci-dev", 0, 40, 32), -+ FUNC("pci-host2", 1, 40, 32), -+ FUNC("pci-host1", 2, 40, 32), -+ FUNC("pci-fnc", 3, 40, 32) - }; -+static struct rt2880_pmx_func ge1_func[] = { FUNC("ge1", 0, 72, 12) }; -+static struct rt2880_pmx_func ge2_func[] = { FUNC("ge1", 0, 84, 12) }; - --static struct ralink_pinmux_grp pci_mux[] = { -- { -- .name = "pci-dev", -- .mask = 0, -- .gpio_first = RT3883_GPIO_PCI_AD0, -- .gpio_last = RT3883_GPIO_PCI_AD31, -- }, { -- .name = "pci-host2", -- .mask = 1, -- .gpio_first = RT3883_GPIO_PCI_AD0, -- .gpio_last = RT3883_GPIO_PCI_AD31, -- }, { -- .name = "pci-host1", -- .mask = 2, -- .gpio_first = RT3883_GPIO_PCI_AD0, -- .gpio_last = RT3883_GPIO_PCI_AD31, -- }, { -- .name = "pci-fnc", -- .mask = 3, -- .gpio_first = RT3883_GPIO_PCI_AD0, -- .gpio_last = RT3883_GPIO_PCI_AD31, -- }, { -- .name = "pci-gpio", -- .mask = 7, -- .gpio_first = RT3883_GPIO_PCI_AD0, -- .gpio_last = RT3883_GPIO_PCI_AD31, -- }, {0} -+static struct rt2880_pmx_group rt3883_pinmux_data[] = { -+ GRP("i2c", i2c_func, 1, RT3883_GPIO_MODE_I2C), -+ GRP("spi", spi_func, 1, RT3883_GPIO_MODE_SPI), -+ GRP("uartf", uartf_func, RT3883_GPIO_MODE_UART0_MASK, -+ RT3883_GPIO_MODE_UART0_SHIFT), -+ GRP("uartlite", uartlite_func, 1, RT3883_GPIO_MODE_UART1), -+ GRP("jtag", jtag_func, 1, RT3883_GPIO_MODE_JTAG), -+ GRP("mdio", mdio_func, 1, RT3883_GPIO_MODE_MDIO), -+ GRP("lna a", lna_a_func, 1, RT3883_GPIO_MODE_LNA_A), -+ GRP("lna g", lna_g_func, 1, RT3883_GPIO_MODE_LNA_G), -+ GRP("pci", pci_func, RT3883_GPIO_MODE_PCI_MASK, -+ RT3883_GPIO_MODE_PCI_SHIFT), -+ GRP("ge1", ge1_func, 1, RT3883_GPIO_MODE_GE1), -+ GRP("ge2", ge2_func, 1, RT3883_GPIO_MODE_GE2), -+ { 0 } - }; - - static void rt3883_wdt_reset(void) -@@ -155,17 +73,6 @@ static void rt3883_wdt_reset(void) - rt_sysc_w32(t, RT3883_SYSC_REG_SYSCFG1); - } - --struct ralink_pinmux rt_gpio_pinmux = { -- .mode = mode_mux, -- .uart = uart_mux, -- .uart_shift = RT3883_GPIO_MODE_UART0_SHIFT, -- .uart_mask = RT3883_GPIO_MODE_UART0_MASK, -- .wdt_reset = rt3883_wdt_reset, -- .pci = pci_mux, -- .pci_shift = RT3883_GPIO_MODE_PCI_SHIFT, -- .pci_mask = RT3883_GPIO_MODE_PCI_MASK, --}; -- - void __init ralink_clk_init(void) - { - unsigned long cpu_rate, sys_rate; -@@ -243,4 +150,6 @@ void prom_soc_init(struct ralink_soc_inf - soc_info->mem_base = RT3883_SDRAM_BASE; - soc_info->mem_size_min = RT3883_MEM_SIZE_MIN; - soc_info->mem_size_max = RT3883_MEM_SIZE_MAX; -+ -+ rt2880_pinmux_data = rt3883_pinmux_data; - } diff --git a/target/linux/ramips/patches-3.10/0122-serial-ralink-adds-mt7620-serial.patch b/target/linux/ramips/patches-3.10/0122-serial-ralink-adds-mt7620-serial.patch new file mode 100644 index 0000000000..f0b2e66b97 --- /dev/null +++ b/target/linux/ramips/patches-3.10/0122-serial-ralink-adds-mt7620-serial.patch @@ -0,0 +1,23 @@ +From 16f476a7528eefade4bd4ebee12d5aa2052bba8c Mon Sep 17 00:00:00 2001 +From: John Crispin +Date: Fri, 15 Mar 2013 18:16:01 +0100 +Subject: [PATCH 122/133] serial: ralink: adds mt7620 serial + +Add the config symbol for Mediatek7620 SoC to SERIAL_8250_RT288X + +Signed-off-by: John Crispin +--- + drivers/tty/serial/8250/Kconfig | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/drivers/tty/serial/8250/Kconfig ++++ b/drivers/tty/serial/8250/Kconfig +@@ -300,7 +300,7 @@ config SERIAL_8250_EM + + config SERIAL_8250_RT288X + bool "Ralink RT288x/RT305x/RT3662/RT3883 serial port support" +- depends on SERIAL_8250 && (SOC_RT288X || SOC_RT305X || SOC_RT3883) ++ depends on SERIAL_8250 && (SOC_RT288X || SOC_RT305X || SOC_RT3883 || SOC_MT7620) + help + If you have a Ralink RT288x/RT305x SoC based board and want to use the + serial port, say Y to this option. The driver can handle up to 2 serial diff --git a/target/linux/ramips/patches-3.10/0123-serial-ralink-the-core-has-a-size-of-0x100-and-not-0.patch b/target/linux/ramips/patches-3.10/0123-serial-ralink-the-core-has-a-size-of-0x100-and-not-0.patch new file mode 100644 index 0000000000..b7c9987807 --- /dev/null +++ b/target/linux/ramips/patches-3.10/0123-serial-ralink-the-core-has-a-size-of-0x100-and-not-0.patch @@ -0,0 +1,22 @@ +From 304c4f060cfa6b44370ad3fe6a16963cac35b10a Mon Sep 17 00:00:00 2001 +From: John Crispin +Date: Sun, 16 Mar 2014 04:52:01 +0000 +Subject: [PATCH 123/133] serial: ralink: the core has a size of 0x100 and not + 0x1000 + +Signed-off-by: John Crispin +--- + drivers/tty/serial/8250/8250_core.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/drivers/tty/serial/8250/8250_core.c ++++ b/drivers/tty/serial/8250/8250_core.c +@@ -2499,7 +2499,7 @@ serial8250_pm(struct uart_port *port, un + static unsigned int serial8250_port_size(struct uart_8250_port *pt) + { + if (pt->port.iotype == UPIO_AU) +- return 0x1000; ++ return 0x100; + if (is_omap1_8250(pt)) + return 0x16 << pt->port.regshift; + diff --git a/target/linux/ramips/patches-3.10/0124-serial-of-allow-au1x00-and-rt288x-to-load-from-OF.patch b/target/linux/ramips/patches-3.10/0124-serial-of-allow-au1x00-and-rt288x-to-load-from-OF.patch new file mode 100644 index 0000000000..8bd00e93d9 --- /dev/null +++ b/target/linux/ramips/patches-3.10/0124-serial-of-allow-au1x00-and-rt288x-to-load-from-OF.patch @@ -0,0 +1,27 @@ +From 3f70be332048f6a903dc35f73ff5381be3b8f12b Mon Sep 17 00:00:00 2001 +From: John Crispin +Date: Sun, 14 Jul 2013 23:18:57 +0200 +Subject: [PATCH 124/133] serial: of: allow au1x00 and rt288x to load from OF + +In order to make serial_8250 loadable via OF on Au1x00 and Ralink WiSoC we need +to default the iotype to UPIO_AU. + +Signed-off-by: John Crispin +--- + drivers/tty/serial/of_serial.c | 5 ++++- + 1 file changed, 4 insertions(+), 1 deletion(-) + +--- a/drivers/tty/serial/of_serial.c ++++ b/drivers/tty/serial/of_serial.c +@@ -103,7 +103,10 @@ static int of_platform_serial_setup(stru + port->fifosize = prop; + + port->irq = irq_of_parse_and_map(np, 0); +- port->iotype = UPIO_MEM; ++ if (of_device_is_compatible(np, "ralink,rt2880-uart")) ++ port->iotype = UPIO_AU; ++ else ++ port->iotype = UPIO_MEM; + if (of_property_read_u32(np, "reg-io-width", &prop) == 0) { + switch (prop) { + case 1: diff --git a/target/linux/ramips/patches-3.10/0125-i2c-MIPS-adds-ralink-I2C-driver.patch b/target/linux/ramips/patches-3.10/0125-i2c-MIPS-adds-ralink-I2C-driver.patch new file mode 100644 index 0000000000..bb5f492bde --- /dev/null +++ b/target/linux/ramips/patches-3.10/0125-i2c-MIPS-adds-ralink-I2C-driver.patch @@ -0,0 +1,345 @@ +From 701cd2fb0513d17f248048b3a6f2c7d1ea294681 Mon Sep 17 00:00:00 2001 +From: John Crispin +Date: Mon, 29 Apr 2013 14:40:43 +0200 +Subject: [PATCH 125/133] i2c: MIPS: adds ralink I2C driver + +Signed-off-by: John Crispin +--- + .../devicetree/bindings/i2c/i2c-ralink.txt | 27 ++ + drivers/i2c/busses/Kconfig | 4 + + drivers/i2c/busses/Makefile | 1 + + drivers/i2c/busses/i2c-ralink.c | 274 ++++++++++++++++++++ + 4 files changed, 306 insertions(+) + create mode 100644 Documentation/devicetree/bindings/i2c/i2c-ralink.txt + create mode 100644 drivers/i2c/busses/i2c-ralink.c + +--- /dev/null ++++ b/Documentation/devicetree/bindings/i2c/i2c-ralink.txt +@@ -0,0 +1,27 @@ ++I2C for Ralink platforms ++ ++Required properties : ++- compatible : Must be "link,rt3052-i2c" ++- reg: physical base address of the controller and length of memory mapped ++ region. ++- #address-cells = <1>; ++- #size-cells = <0>; ++ ++Optional properties: ++- Child nodes conforming to i2c bus binding ++ ++Example : ++ ++palmbus@10000000 { ++ i2c@900 { ++ compatible = "link,rt3052-i2c"; ++ reg = <0x900 0x100>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ ++ hwmon@4b { ++ compatible = "national,lm92"; ++ reg = <0x4b>; ++ }; ++ }; ++}; +--- a/drivers/i2c/busses/Kconfig ++++ b/drivers/i2c/busses/Kconfig +@@ -630,6 +630,10 @@ config I2C_PXA_SLAVE + is necessary for systems where the PXA may be a target on the + I2C bus. + ++config I2C_RALINK ++ tristate "Ralink I2C Controller" ++ select OF_I2C ++ + config HAVE_S3C2410_I2C + bool + help +--- a/drivers/i2c/busses/Makefile ++++ b/drivers/i2c/busses/Makefile +@@ -62,6 +62,7 @@ obj-$(CONFIG_I2C_PNX) += i2c-pnx.o + obj-$(CONFIG_I2C_PUV3) += i2c-puv3.o + obj-$(CONFIG_I2C_PXA) += i2c-pxa.o + obj-$(CONFIG_I2C_PXA_PCI) += i2c-pxa-pci.o ++obj-$(CONFIG_I2C_RALINK) += i2c-ralink.o + obj-$(CONFIG_I2C_S3C2410) += i2c-s3c2410.o + obj-$(CONFIG_I2C_S6000) += i2c-s6000.o + obj-$(CONFIG_I2C_SH7760) += i2c-sh7760.o +--- /dev/null ++++ b/drivers/i2c/busses/i2c-ralink.c +@@ -0,0 +1,274 @@ ++/* ++ * drivers/i2c/busses/i2c-ralink.c ++ * ++ * Copyright (C) 2013 Steven Liu ++ * ++ * This software is licensed under the terms of the GNU General Public ++ * License version 2, as published by the Free Software Foundation, and ++ * may be copied, distributed, and modified under those terms. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++ ++#define REG_CONFIG_REG 0x00 ++#define REG_CLKDIV_REG 0x04 ++#define REG_DEVADDR_REG 0x08 ++#define REG_ADDR_REG 0x0C ++#define REG_DATAOUT_REG 0x10 ++#define REG_DATAIN_REG 0x14 ++#define REG_STATUS_REG 0x18 ++#define REG_STARTXFR_REG 0x1C ++#define REG_BYTECNT_REG 0x20 ++ ++#define I2C_STARTERR BIT(4) ++#define I2C_ACKERR BIT(3) ++#define I2C_DATARDY BIT(2) ++#define I2C_SDOEMPTY BIT(1) ++#define I2C_BUSY BIT(0) ++ ++#define I2C_DEVADLEN_7 (6 << 2) ++#define I2C_ADDRDIS BIT(1) ++ ++#define I2C_RETRY 0x400 ++ ++#define CLKDIV_VALUE 200 // clock rate is 40M, 40M / (200*2) = 100k (standard i2c bus rate). ++//#define CLKDIV_VALUE 50 // clock rate is 40M, 40M / (50*2) = 400k (fast i2c bus rate). ++ ++#define READ_CMD 0x01 ++#define WRITE_CMD 0x00 ++#define READ_BLOCK 64 ++ ++static void __iomem *membase; ++static struct i2c_adapter *adapter; ++ ++static void rt_i2c_w32(u32 val, unsigned reg) ++{ ++ iowrite32(val, membase + reg); ++} ++ ++static u32 rt_i2c_r32(unsigned reg) ++{ ++ return ioread32(membase + reg); ++} ++ ++static inline int rt_i2c_wait_rx_done(void) ++{ ++ int retries = I2C_RETRY; ++ ++ do { ++ if (!retries--) ++ break; ++ } while(!(rt_i2c_r32(REG_STATUS_REG) & I2C_DATARDY)); ++ ++ return (retries < 0); ++} ++ ++static inline int rt_i2c_wait_idle(void) ++{ ++ int retries = I2C_RETRY; ++ ++ do { ++ if (!retries--) ++ break; ++ } while(rt_i2c_r32(REG_STATUS_REG) & I2C_BUSY); ++ ++ return (retries < 0); ++} ++ ++static inline int rt_i2c_wait_tx_done(void) ++{ ++ int retries = I2C_RETRY; ++ ++ do { ++ if (!retries--) ++ break; ++ } while(!(rt_i2c_r32(REG_STATUS_REG) & I2C_SDOEMPTY)); ++ ++ return (retries < 0); ++} ++ ++static int rt_i2c_handle_msg(struct i2c_adapter *a, struct i2c_msg* msg) ++{ ++ int i = 0, j = 0, pos = 0; ++ int nblock = msg->len / READ_BLOCK; ++ int rem = msg->len % READ_BLOCK; ++ ++ if (msg->flags & I2C_M_TEN) { ++ printk("10 bits addr not supported\n"); ++ return -EINVAL; ++ } ++ ++ if (msg->flags & I2C_M_RD) { ++ for (i = 0; i < nblock; i++) { ++ rt_i2c_wait_idle(); ++ rt_i2c_w32(READ_BLOCK - 1, REG_BYTECNT_REG); ++ rt_i2c_w32(READ_CMD, REG_STARTXFR_REG); ++ for (j = 0; j < READ_BLOCK; j++) { ++ if (rt_i2c_wait_rx_done()) ++ return -1; ++ msg->buf[pos++] = rt_i2c_r32(REG_DATAIN_REG); ++ } ++ } ++ ++ rt_i2c_wait_idle(); ++ rt_i2c_w32(rem - 1, REG_BYTECNT_REG); ++ rt_i2c_w32(READ_CMD, REG_STARTXFR_REG); ++ for (i = 0; i < rem; i++) { ++ if (rt_i2c_wait_rx_done()) ++ return -1; ++ msg->buf[pos++] = rt_i2c_r32(REG_DATAIN_REG); ++ } ++ } else { ++ rt_i2c_wait_idle(); ++ rt_i2c_w32(msg->len - 1, REG_BYTECNT_REG); ++ for (i = 0; i < msg->len; i++) { ++ rt_i2c_w32(msg->buf[i], REG_DATAOUT_REG); ++ rt_i2c_w32(WRITE_CMD, REG_STARTXFR_REG); ++ if (rt_i2c_wait_tx_done()) ++ return -1; ++ } ++ } ++ ++ return 0; ++} ++ ++static int rt_i2c_master_xfer(struct i2c_adapter *a, struct i2c_msg *m, int n) ++{ ++ int i = 0; ++ int ret = 0; ++ ++ if (rt_i2c_wait_idle()) { ++ printk("i2c transfer failed\n"); ++ return 0; ++ } ++ ++ device_reset(a->dev.parent); ++ ++ rt_i2c_w32(m->addr, REG_DEVADDR_REG); ++ rt_i2c_w32(I2C_DEVADLEN_7 | I2C_ADDRDIS, REG_CONFIG_REG); ++ rt_i2c_w32(CLKDIV_VALUE, REG_CLKDIV_REG); ++ ++ for (i = 0; i < n && !ret; i++) ++ ret = rt_i2c_handle_msg(a, &m[i]); ++ ++ if (ret) { ++ printk("i2c transfer failed\n"); ++ return 0; ++ } ++ ++ return n; ++} ++ ++static u32 rt_i2c_func(struct i2c_adapter *a) ++{ ++ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; ++} ++ ++static const struct i2c_algorithm rt_i2c_algo = { ++ .master_xfer = rt_i2c_master_xfer, ++ .functionality = rt_i2c_func, ++}; ++ ++static int rt_i2c_probe(struct platform_device *pdev) ++{ ++ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ int ret; ++ ++ if (!res) { ++ dev_err(&pdev->dev, "no memory resource found\n"); ++ return -ENODEV; ++ } ++ ++ adapter = devm_kzalloc(&pdev->dev, sizeof(struct i2c_adapter), GFP_KERNEL); ++ if (!adapter) { ++ dev_err(&pdev->dev, "failed to allocate i2c_adapter\n"); ++ return -ENOMEM; ++ } ++ ++ membase = devm_request_and_ioremap(&pdev->dev, res); ++ if (IS_ERR(membase)) ++ return PTR_ERR(membase); ++ ++ strlcpy(adapter->name, dev_name(&pdev->dev), sizeof(adapter->name)); ++ adapter->owner = THIS_MODULE; ++ adapter->nr = pdev->id; ++ adapter->timeout = HZ; ++ adapter->algo = &rt_i2c_algo; ++ adapter->class = I2C_CLASS_HWMON | I2C_CLASS_SPD; ++ adapter->dev.parent = &pdev->dev; ++ adapter->dev.of_node = pdev->dev.of_node; ++ ++ ret = i2c_add_numbered_adapter(adapter); ++ if (ret) ++ return ret; ++ ++ of_i2c_register_devices(adapter); ++ ++ platform_set_drvdata(pdev, adapter); ++ ++ dev_info(&pdev->dev, "loaded\n"); ++ ++ return 0; ++} ++ ++static int rt_i2c_remove(struct platform_device *pdev) ++{ ++ platform_set_drvdata(pdev, NULL); ++ ++ return 0; ++} ++ ++static const struct of_device_id i2c_rt_dt_ids[] = { ++ { .compatible = "ralink,rt2880-i2c", }, ++ { /* sentinel */ } ++}; ++ ++MODULE_DEVICE_TABLE(of, i2c_rt_dt_ids); ++ ++static struct platform_driver rt_i2c_driver = { ++ .probe = rt_i2c_probe, ++ .remove = rt_i2c_remove, ++ .driver = { ++ .owner = THIS_MODULE, ++ .name = "i2c-ralink", ++ .of_match_table = i2c_rt_dt_ids, ++ }, ++}; ++ ++static int __init i2c_rt_init (void) ++{ ++ return platform_driver_register(&rt_i2c_driver); ++} ++subsys_initcall(i2c_rt_init); ++ ++static void __exit i2c_rt_exit (void) ++{ ++ platform_driver_unregister(&rt_i2c_driver); ++} ++ ++module_exit (i2c_rt_exit); ++ ++MODULE_AUTHOR("Steven Liu "); ++MODULE_DESCRIPTION("Ralink I2c host driver"); ++MODULE_LICENSE("GPL"); ++MODULE_ALIAS("platform:Ralink-I2C"); diff --git a/target/linux/ramips/patches-3.10/0126-spi-introduce-macros-to-set-bits_per_word_mask.patch b/target/linux/ramips/patches-3.10/0126-spi-introduce-macros-to-set-bits_per_word_mask.patch new file mode 100644 index 0000000000..1dcba05078 --- /dev/null +++ b/target/linux/ramips/patches-3.10/0126-spi-introduce-macros-to-set-bits_per_word_mask.patch @@ -0,0 +1,29 @@ +From b07600f50efe84d7e3b431e6d10fe774bb00d573 Mon Sep 17 00:00:00 2001 +From: Stephen Warren +Date: Tue, 21 May 2013 20:36:34 -0600 +Subject: [PATCH 126/133] spi: introduce macros to set bits_per_word_mask + +Introduce two macros to make setting up spi_master.bits_per_word_mask +easier, and avoid mistakes like writing BIT(n) instead of BIT(n - 1). + +SPI_BPW_MASK is for a single supported value of bits_per_word_mask. + +SPI_BPW_RANGE_MASK represents a contiguous set of bit lengths. + +Signed-off-by: Stephen Warren +Signed-off-by: Mark Brown +--- + include/linux/spi/spi.h | 2 ++ + 1 file changed, 2 insertions(+) + +--- a/include/linux/spi/spi.h ++++ b/include/linux/spi/spi.h +@@ -308,6 +308,8 @@ struct spi_master { + + /* bitmask of supported bits_per_word for transfers */ + u32 bits_per_word_mask; ++#define SPI_BPW_MASK(bits) BIT((bits) - 1) ++#define SPI_BPW_RANGE_MASK(min, max) ((BIT(max) - 1) - (BIT(min) - 1)) + + /* other constraints relevant to this driver */ + u16 flags; diff --git a/target/linux/ramips/patches-3.10/0127-mmc-MIPS-ralink-add-sdhci-for-mt7620a-SoC.patch b/target/linux/ramips/patches-3.10/0127-mmc-MIPS-ralink-add-sdhci-for-mt7620a-SoC.patch new file mode 100644 index 0000000000..3985718dc6 --- /dev/null +++ b/target/linux/ramips/patches-3.10/0127-mmc-MIPS-ralink-add-sdhci-for-mt7620a-SoC.patch @@ -0,0 +1,3433 @@ +From 759e011e67792898799fb54340ba5bad944274a1 Mon Sep 17 00:00:00 2001 +From: John Crispin +Date: Thu, 2 May 2013 14:59:01 +0200 +Subject: [PATCH 127/133] mmc: MIPS: ralink: add sdhci for mt7620a SoC + +Signed-off-by: John Crispin +--- + drivers/mmc/host/Kconfig | 11 + + drivers/mmc/host/Makefile | 1 + + drivers/mmc/host/mt6575_sd.h | 1068 ++++++++++++++++++ + drivers/mmc/host/sdhci-mt7620.c | 2314 +++++++++++++++++++++++++++++++++++++++ + 4 files changed, 3394 insertions(+) + create mode 100644 drivers/mmc/host/mt6575_sd.h + create mode 100644 drivers/mmc/host/sdhci-mt7620.c + +--- a/drivers/mmc/host/Kconfig ++++ b/drivers/mmc/host/Kconfig +@@ -260,6 +260,17 @@ config MMC_SDHCI_BCM2835 + + If unsure, say N. + ++config MMC_SDHCI_MT7620 ++ tristate "SDHCI platform support for the MT7620 SD/MMC Controller" ++ depends on SOC_MT7620 ++ depends on MMC_SDHCI_PLTFM ++ select MMC_SDHCI_IO_ACCESSORS ++ help ++ This selects the BCM2835 SD/MMC controller. If you have a BCM2835 ++ platform with SD or MMC devices, say Y or M here. ++ ++ If unsure, say N. ++ + config MMC_OMAP + tristate "TI OMAP Multimedia Card Interface support" + depends on ARCH_OMAP +--- a/drivers/mmc/host/Makefile ++++ b/drivers/mmc/host/Makefile +@@ -62,6 +62,7 @@ obj-$(CONFIG_MMC_SDHCI_TEGRA) += sdhci- + obj-$(CONFIG_MMC_SDHCI_OF_ESDHC) += sdhci-of-esdhc.o + obj-$(CONFIG_MMC_SDHCI_OF_HLWD) += sdhci-of-hlwd.o + obj-$(CONFIG_MMC_SDHCI_BCM2835) += sdhci-bcm2835.o ++obj-$(CONFIG_MMC_SDHCI_MT7620) += sdhci-mt7620.o + + ifeq ($(CONFIG_CB710_DEBUG),y) + CFLAGS-cb710-mmc += -DDEBUG +--- /dev/null ++++ b/drivers/mmc/host/mt6575_sd.h +@@ -0,0 +1,1068 @@ ++/* Copyright Statement: ++ * ++ * This software/firmware and related documentation ("MediaTek Software") are ++ * protected under relevant copyright laws. The information contained herein ++ * is confidential and proprietary to MediaTek Inc. and/or its licensors. ++ * Without the prior written permission of MediaTek inc. and/or its licensors, ++ * any reproduction, modification, use or disclosure of MediaTek Software, ++ * and information contained herein, in whole or in part, shall be strictly prohibited. ++ */ ++/* MediaTek Inc. (C) 2010. All rights reserved. ++ * ++ * BY OPENING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES ++ * THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE") ++ * RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON ++ * AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT. ++ * NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE ++ * SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR ++ * SUPPLIED WITH THE MEDIATEK SOFTWARE, AND RECEIVER AGREES TO LOOK ONLY TO SUCH ++ * THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES ++ * THAT IT IS RECEIVER'S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD PARTY ALL PROPER LICENSES ++ * CONTAINED IN MEDIATEK SOFTWARE. MEDIATEK SHALL ALSO NOT BE RESPONSIBLE FOR ANY MEDIATEK ++ * SOFTWARE RELEASES MADE TO RECEIVER'S SPECIFICATION OR TO CONFORM TO A PARTICULAR ++ * STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND ++ * CUMULATIVE LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE, ++ * AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE, ++ * OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY RECEIVER TO ++ * MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE. ++ * ++ * The following software/firmware and/or related documentation ("MediaTek Software") ++ * have been modified by MediaTek Inc. All revisions are subject to any receiver's ++ * applicable license agreements with MediaTek Inc. ++ */ ++ ++#ifndef MT6575_SD_H ++#define MT6575_SD_H ++ ++#include ++#include ++ ++// #include /* --- by chhung */ ++ ++typedef void (*sdio_irq_handler_t)(void*); /* external irq handler */ ++typedef void (*pm_callback_t)(pm_message_t state, void *data); ++ ++#define MSDC_CD_PIN_EN (1 << 0) /* card detection pin is wired */ ++#define MSDC_WP_PIN_EN (1 << 1) /* write protection pin is wired */ ++#define MSDC_RST_PIN_EN (1 << 2) /* emmc reset pin is wired */ ++#define MSDC_SDIO_IRQ (1 << 3) /* use internal sdio irq (bus) */ ++#define MSDC_EXT_SDIO_IRQ (1 << 4) /* use external sdio irq */ ++#define MSDC_REMOVABLE (1 << 5) /* removable slot */ ++#define MSDC_SYS_SUSPEND (1 << 6) /* suspended by system */ ++#define MSDC_HIGHSPEED (1 << 7) /* high-speed mode support */ ++#define MSDC_UHS1 (1 << 8) /* uhs-1 mode support */ ++#define MSDC_DDR (1 << 9) /* ddr mode support */ ++#define MSDC_SPE (1 << 10) /* special support */ ++#define MSDC_INTERNAL_CLK (1 << 11) /* Force Internal clock */ ++#define MSDC_TABDRV (1 << 12) /* TABLET */ ++ ++ ++#define MSDC_SMPL_RISING (0) ++#define MSDC_SMPL_FALLING (1) ++ ++#define MSDC_CMD_PIN (0) ++#define MSDC_DAT_PIN (1) ++#define MSDC_CD_PIN (2) ++#define MSDC_WP_PIN (3) ++#define MSDC_RST_PIN (4) ++ ++enum { ++ MSDC_CLKSRC_26MHZ = 0, ++ MSDC_CLKSRC_197MHZ = 1, ++ MSDC_CLKSRC_208MHZ = 2 ++}; ++ ++struct msdc_hw { ++ unsigned char clk_src; /* host clock source */ ++ unsigned char cmd_edge; /* command latch edge */ ++ unsigned char data_edge; /* data latch edge */ ++ unsigned char clk_drv; /* clock pad driving */ ++ unsigned char cmd_drv; /* command pad driving */ ++ unsigned char dat_drv; /* data pad driving */ ++ unsigned long flags; /* hardware capability flags */ ++ unsigned long data_pins; /* data pins */ ++ unsigned long data_offset; /* data address offset */ ++ ++ /* config gpio pull mode */ ++ void (*config_gpio_pin)(int type, int pull); ++ ++ /* external power control for card */ ++ void (*ext_power_on)(void); ++ void (*ext_power_off)(void); ++ ++ /* external sdio irq operations */ ++ void (*request_sdio_eirq)(sdio_irq_handler_t sdio_irq_handler, void *data); ++ void (*enable_sdio_eirq)(void); ++ void (*disable_sdio_eirq)(void); ++ ++ /* external cd irq operations */ ++ void (*request_cd_eirq)(sdio_irq_handler_t cd_irq_handler, void *data); ++ void (*enable_cd_eirq)(void); ++ void (*disable_cd_eirq)(void); ++ int (*get_cd_status)(void); ++ ++ /* power management callback for external module */ ++ void (*register_pm)(pm_callback_t pm_cb, void *data); ++}; ++ ++extern struct msdc_hw msdc0_hw; ++extern struct msdc_hw msdc1_hw; ++extern struct msdc_hw msdc2_hw; ++extern struct msdc_hw msdc3_hw; ++ ++ ++/*--------------------------------------------------------------------------*/ ++/* Common Macro */ ++/*--------------------------------------------------------------------------*/ ++#define REG_ADDR(x) ((volatile u32*)(base + OFFSET_##x)) ++ ++/*--------------------------------------------------------------------------*/ ++/* Common Definition */ ++/*--------------------------------------------------------------------------*/ ++#define MSDC_FIFO_SZ (128) ++#define MSDC_FIFO_THD (64) // (128) ++#define MSDC_NUM (4) ++ ++#define MSDC_MS (0) ++#define MSDC_SDMMC (1) ++ ++#define MSDC_MODE_UNKNOWN (0) ++#define MSDC_MODE_PIO (1) ++#define MSDC_MODE_DMA_BASIC (2) ++#define MSDC_MODE_DMA_DESC (3) ++#define MSDC_MODE_DMA_ENHANCED (4) ++#define MSDC_MODE_MMC_STREAM (5) ++ ++#define MSDC_BUS_1BITS (0) ++#define MSDC_BUS_4BITS (1) ++#define MSDC_BUS_8BITS (2) ++ ++#define MSDC_BRUST_8B (3) ++#define MSDC_BRUST_16B (4) ++#define MSDC_BRUST_32B (5) ++#define MSDC_BRUST_64B (6) ++ ++#define MSDC_PIN_PULL_NONE (0) ++#define MSDC_PIN_PULL_DOWN (1) ++#define MSDC_PIN_PULL_UP (2) ++#define MSDC_PIN_KEEP (3) ++ ++#define MSDC_MAX_SCLK (48000000) /* +/- by chhung */ ++#define MSDC_MIN_SCLK (260000) ++ ++#define MSDC_AUTOCMD12 (0x0001) ++#define MSDC_AUTOCMD23 (0x0002) ++#define MSDC_AUTOCMD19 (0x0003) ++ ++#define MSDC_EMMC_BOOTMODE0 (0) /* Pull low CMD mode */ ++#define MSDC_EMMC_BOOTMODE1 (1) /* Reset CMD mode */ ++ ++enum { ++ RESP_NONE = 0, ++ RESP_R1, ++ RESP_R2, ++ RESP_R3, ++ RESP_R4, ++ RESP_R5, ++ RESP_R6, ++ RESP_R7, ++ RESP_R1B ++}; ++ ++/*--------------------------------------------------------------------------*/ ++/* Register Offset */ ++/*--------------------------------------------------------------------------*/ ++#define OFFSET_MSDC_CFG (0x0) ++#define OFFSET_MSDC_IOCON (0x04) ++#define OFFSET_MSDC_PS (0x08) ++#define OFFSET_MSDC_INT (0x0c) ++#define OFFSET_MSDC_INTEN (0x10) ++#define OFFSET_MSDC_FIFOCS (0x14) ++#define OFFSET_MSDC_TXDATA (0x18) ++#define OFFSET_MSDC_RXDATA (0x1c) ++#define OFFSET_SDC_CFG (0x30) ++#define OFFSET_SDC_CMD (0x34) ++#define OFFSET_SDC_ARG (0x38) ++#define OFFSET_SDC_STS (0x3c) ++#define OFFSET_SDC_RESP0 (0x40) ++#define OFFSET_SDC_RESP1 (0x44) ++#define OFFSET_SDC_RESP2 (0x48) ++#define OFFSET_SDC_RESP3 (0x4c) ++#define OFFSET_SDC_BLK_NUM (0x50) ++#define OFFSET_SDC_CSTS (0x58) ++#define OFFSET_SDC_CSTS_EN (0x5c) ++#define OFFSET_SDC_DCRC_STS (0x60) ++#define OFFSET_EMMC_CFG0 (0x70) ++#define OFFSET_EMMC_CFG1 (0x74) ++#define OFFSET_EMMC_STS (0x78) ++#define OFFSET_EMMC_IOCON (0x7c) ++#define OFFSET_SDC_ACMD_RESP (0x80) ++#define OFFSET_SDC_ACMD19_TRG (0x84) ++#define OFFSET_SDC_ACMD19_STS (0x88) ++#define OFFSET_MSDC_DMA_SA (0x90) ++#define OFFSET_MSDC_DMA_CA (0x94) ++#define OFFSET_MSDC_DMA_CTRL (0x98) ++#define OFFSET_MSDC_DMA_CFG (0x9c) ++#define OFFSET_MSDC_DBG_SEL (0xa0) ++#define OFFSET_MSDC_DBG_OUT (0xa4) ++#define OFFSET_MSDC_PATCH_BIT (0xb0) ++#define OFFSET_MSDC_PATCH_BIT1 (0xb4) ++#define OFFSET_MSDC_PAD_CTL0 (0xe0) ++#define OFFSET_MSDC_PAD_CTL1 (0xe4) ++#define OFFSET_MSDC_PAD_CTL2 (0xe8) ++#define OFFSET_MSDC_PAD_TUNE (0xec) ++#define OFFSET_MSDC_DAT_RDDLY0 (0xf0) ++#define OFFSET_MSDC_DAT_RDDLY1 (0xf4) ++#define OFFSET_MSDC_HW_DBG (0xf8) ++#define OFFSET_MSDC_VERSION (0x100) ++#define OFFSET_MSDC_ECO_VER (0x104) ++ ++/*--------------------------------------------------------------------------*/ ++/* Register Address */ ++/*--------------------------------------------------------------------------*/ ++ ++/* common register */ ++#define MSDC_CFG REG_ADDR(MSDC_CFG) ++#define MSDC_IOCON REG_ADDR(MSDC_IOCON) ++#define MSDC_PS REG_ADDR(MSDC_PS) ++#define MSDC_INT REG_ADDR(MSDC_INT) ++#define MSDC_INTEN REG_ADDR(MSDC_INTEN) ++#define MSDC_FIFOCS REG_ADDR(MSDC_FIFOCS) ++#define MSDC_TXDATA REG_ADDR(MSDC_TXDATA) ++#define MSDC_RXDATA REG_ADDR(MSDC_RXDATA) ++#define MSDC_PATCH_BIT0 REG_ADDR(MSDC_PATCH_BIT) ++ ++/* sdmmc register */ ++#define SDC_CFG REG_ADDR(SDC_CFG) ++#define SDC_CMD REG_ADDR(SDC_CMD) ++#define SDC_ARG REG_ADDR(SDC_ARG) ++#define SDC_STS REG_ADDR(SDC_STS) ++#define SDC_RESP0 REG_ADDR(SDC_RESP0) ++#define SDC_RESP1 REG_ADDR(SDC_RESP1) ++#define SDC_RESP2 REG_ADDR(SDC_RESP2) ++#define SDC_RESP3 REG_ADDR(SDC_RESP3) ++#define SDC_BLK_NUM REG_ADDR(SDC_BLK_NUM) ++#define SDC_CSTS REG_ADDR(SDC_CSTS) ++#define SDC_CSTS_EN REG_ADDR(SDC_CSTS_EN) ++#define SDC_DCRC_STS REG_ADDR(SDC_DCRC_STS) ++ ++/* emmc register*/ ++#define EMMC_CFG0 REG_ADDR(EMMC_CFG0) ++#define EMMC_CFG1 REG_ADDR(EMMC_CFG1) ++#define EMMC_STS REG_ADDR(EMMC_STS) ++#define EMMC_IOCON REG_ADDR(EMMC_IOCON) ++ ++/* auto command register */ ++#define SDC_ACMD_RESP REG_ADDR(SDC_ACMD_RESP) ++#define SDC_ACMD19_TRG REG_ADDR(SDC_ACMD19_TRG) ++#define SDC_ACMD19_STS REG_ADDR(SDC_ACMD19_STS) ++ ++/* dma register */ ++#define MSDC_DMA_SA REG_ADDR(MSDC_DMA_SA) ++#define MSDC_DMA_CA REG_ADDR(MSDC_DMA_CA) ++#define MSDC_DMA_CTRL REG_ADDR(MSDC_DMA_CTRL) ++#define MSDC_DMA_CFG REG_ADDR(MSDC_DMA_CFG) ++ ++/* pad ctrl register */ ++#define MSDC_PAD_CTL0 REG_ADDR(MSDC_PAD_CTL0) ++#define MSDC_PAD_CTL1 REG_ADDR(MSDC_PAD_CTL1) ++#define MSDC_PAD_CTL2 REG_ADDR(MSDC_PAD_CTL2) ++ ++/* data read delay */ ++#define MSDC_DAT_RDDLY0 REG_ADDR(MSDC_DAT_RDDLY0) ++#define MSDC_DAT_RDDLY1 REG_ADDR(MSDC_DAT_RDDLY1) ++ ++/* debug register */ ++#define MSDC_DBG_SEL REG_ADDR(MSDC_DBG_SEL) ++#define MSDC_DBG_OUT REG_ADDR(MSDC_DBG_OUT) ++ ++/* misc register */ ++#define MSDC_PATCH_BIT REG_ADDR(MSDC_PATCH_BIT) ++#define MSDC_PATCH_BIT1 REG_ADDR(MSDC_PATCH_BIT1) ++#define MSDC_PAD_TUNE REG_ADDR(MSDC_PAD_TUNE) ++#define MSDC_HW_DBG REG_ADDR(MSDC_HW_DBG) ++#define MSDC_VERSION REG_ADDR(MSDC_VERSION) ++#define MSDC_ECO_VER REG_ADDR(MSDC_ECO_VER) /* ECO Version */ ++ ++/*--------------------------------------------------------------------------*/ ++/* Register Mask */ ++/*--------------------------------------------------------------------------*/ ++ ++/* MSDC_CFG mask */ ++#define MSDC_CFG_MODE (0x1 << 0) /* RW */ ++#define MSDC_CFG_CKPDN (0x1 << 1) /* RW */ ++#define MSDC_CFG_RST (0x1 << 2) /* RW */ ++#define MSDC_CFG_PIO (0x1 << 3) /* RW */ ++#define MSDC_CFG_CKDRVEN (0x1 << 4) /* RW */ ++#define MSDC_CFG_BV18SDT (0x1 << 5) /* RW */ ++#define MSDC_CFG_BV18PSS (0x1 << 6) /* R */ ++#define MSDC_CFG_CKSTB (0x1 << 7) /* R */ ++#define MSDC_CFG_CKDIV (0xff << 8) /* RW */ ++#define MSDC_CFG_CKMOD (0x3 << 16) /* RW */ ++ ++/* MSDC_IOCON mask */ ++#define MSDC_IOCON_SDR104CKS (0x1 << 0) /* RW */ ++#define MSDC_IOCON_RSPL (0x1 << 1) /* RW */ ++#define MSDC_IOCON_DSPL (0x1 << 2) /* RW */ ++#define MSDC_IOCON_DDLSEL (0x1 << 3) /* RW */ ++#define MSDC_IOCON_DDR50CKD (0x1 << 4) /* RW */ ++#define MSDC_IOCON_DSPLSEL (0x1 << 5) /* RW */ ++#define MSDC_IOCON_D0SPL (0x1 << 16) /* RW */ ++#define MSDC_IOCON_D1SPL (0x1 << 17) /* RW */ ++#define MSDC_IOCON_D2SPL (0x1 << 18) /* RW */ ++#define MSDC_IOCON_D3SPL (0x1 << 19) /* RW */ ++#define MSDC_IOCON_D4SPL (0x1 << 20) /* RW */ ++#define MSDC_IOCON_D5SPL (0x1 << 21) /* RW */ ++#define MSDC_IOCON_D6SPL (0x1 << 22) /* RW */ ++#define MSDC_IOCON_D7SPL (0x1 << 23) /* RW */ ++#define MSDC_IOCON_RISCSZ (0x3 << 24) /* RW */ ++ ++/* MSDC_PS mask */ ++#define MSDC_PS_CDEN (0x1 << 0) /* RW */ ++#define MSDC_PS_CDSTS (0x1 << 1) /* R */ ++#define MSDC_PS_CDDEBOUNCE (0xf << 12) /* RW */ ++#define MSDC_PS_DAT (0xff << 16) /* R */ ++#define MSDC_PS_CMD (0x1 << 24) /* R */ ++#define MSDC_PS_WP (0x1UL<< 31) /* R */ ++ ++/* MSDC_INT mask */ ++#define MSDC_INT_MMCIRQ (0x1 << 0) /* W1C */ ++#define MSDC_INT_CDSC (0x1 << 1) /* W1C */ ++#define MSDC_INT_ACMDRDY (0x1 << 3) /* W1C */ ++#define MSDC_INT_ACMDTMO (0x1 << 4) /* W1C */ ++#define MSDC_INT_ACMDCRCERR (0x1 << 5) /* W1C */ ++#define MSDC_INT_DMAQ_EMPTY (0x1 << 6) /* W1C */ ++#define MSDC_INT_SDIOIRQ (0x1 << 7) /* W1C */ ++#define MSDC_INT_CMDRDY (0x1 << 8) /* W1C */ ++#define MSDC_INT_CMDTMO (0x1 << 9) /* W1C */ ++#define MSDC_INT_RSPCRCERR (0x1 << 10) /* W1C */ ++#define MSDC_INT_CSTA (0x1 << 11) /* R */ ++#define MSDC_INT_XFER_COMPL (0x1 << 12) /* W1C */ ++#define MSDC_INT_DXFER_DONE (0x1 << 13) /* W1C */ ++#define MSDC_INT_DATTMO (0x1 << 14) /* W1C */ ++#define MSDC_INT_DATCRCERR (0x1 << 15) /* W1C */ ++#define MSDC_INT_ACMD19_DONE (0x1 << 16) /* W1C */ ++ ++/* MSDC_INTEN mask */ ++#define MSDC_INTEN_MMCIRQ (0x1 << 0) /* RW */ ++#define MSDC_INTEN_CDSC (0x1 << 1) /* RW */ ++#define MSDC_INTEN_ACMDRDY (0x1 << 3) /* RW */ ++#define MSDC_INTEN_ACMDTMO (0x1 << 4) /* RW */ ++#define MSDC_INTEN_ACMDCRCERR (0x1 << 5) /* RW */ ++#define MSDC_INTEN_DMAQ_EMPTY (0x1 << 6) /* RW */ ++#define MSDC_INTEN_SDIOIRQ (0x1 << 7) /* RW */ ++#define MSDC_INTEN_CMDRDY (0x1 << 8) /* RW */ ++#define MSDC_INTEN_CMDTMO (0x1 << 9) /* RW */ ++#define MSDC_INTEN_RSPCRCERR (0x1 << 10) /* RW */ ++#define MSDC_INTEN_CSTA (0x1 << 11) /* RW */ ++#define MSDC_INTEN_XFER_COMPL (0x1 << 12) /* RW */ ++#define MSDC_INTEN_DXFER_DONE (0x1 << 13) /* RW */ ++#define MSDC_INTEN_DATTMO (0x1 << 14) /* RW */ ++#define MSDC_INTEN_DATCRCERR (0x1 << 15) /* RW */ ++#define MSDC_INTEN_ACMD19_DONE (0x1 << 16) /* RW */ ++ ++/* MSDC_FIFOCS mask */ ++#define MSDC_FIFOCS_RXCNT (0xff << 0) /* R */ ++#define MSDC_FIFOCS_TXCNT (0xff << 16) /* R */ ++#define MSDC_FIFOCS_CLR (0x1UL<< 31) /* RW */ ++ ++/* SDC_CFG mask */ ++#define SDC_CFG_SDIOINTWKUP (0x1 << 0) /* RW */ ++#define SDC_CFG_INSWKUP (0x1 << 1) /* RW */ ++#define SDC_CFG_BUSWIDTH (0x3 << 16) /* RW */ ++#define SDC_CFG_SDIO (0x1 << 19) /* RW */ ++#define SDC_CFG_SDIOIDE (0x1 << 20) /* RW */ ++#define SDC_CFG_INTATGAP (0x1 << 21) /* RW */ ++#define SDC_CFG_DTOC (0xffUL << 24) /* RW */ ++ ++/* SDC_CMD mask */ ++#define SDC_CMD_OPC (0x3f << 0) /* RW */ ++#define SDC_CMD_BRK (0x1 << 6) /* RW */ ++#define SDC_CMD_RSPTYP (0x7 << 7) /* RW */ ++#define SDC_CMD_DTYP (0x3 << 11) /* RW */ ++#define SDC_CMD_DTYP (0x3 << 11) /* RW */ ++#define SDC_CMD_RW (0x1 << 13) /* RW */ ++#define SDC_CMD_STOP (0x1 << 14) /* RW */ ++#define SDC_CMD_GOIRQ (0x1 << 15) /* RW */ ++#define SDC_CMD_BLKLEN (0xfff<< 16) /* RW */ ++#define SDC_CMD_AUTOCMD (0x3 << 28) /* RW */ ++#define SDC_CMD_VOLSWTH (0x1 << 30) /* RW */ ++ ++/* SDC_STS mask */ ++#define SDC_STS_SDCBUSY (0x1 << 0) /* RW */ ++#define SDC_STS_CMDBUSY (0x1 << 1) /* RW */ ++#define SDC_STS_SWR_COMPL (0x1 << 31) /* RW */ ++ ++/* SDC_DCRC_STS mask */ ++#define SDC_DCRC_STS_NEG (0xf << 8) /* RO */ ++#define SDC_DCRC_STS_POS (0xff << 0) /* RO */ ++ ++/* EMMC_CFG0 mask */ ++#define EMMC_CFG0_BOOTSTART (0x1 << 0) /* W */ ++#define EMMC_CFG0_BOOTSTOP (0x1 << 1) /* W */ ++#define EMMC_CFG0_BOOTMODE (0x1 << 2) /* RW */ ++#define EMMC_CFG0_BOOTACKDIS (0x1 << 3) /* RW */ ++#define EMMC_CFG0_BOOTWDLY (0x7 << 12) /* RW */ ++#define EMMC_CFG0_BOOTSUPP (0x1 << 15) /* RW */ ++ ++/* EMMC_CFG1 mask */ ++#define EMMC_CFG1_BOOTDATTMC (0xfffff << 0) /* RW */ ++#define EMMC_CFG1_BOOTACKTMC (0xfffUL << 20) /* RW */ ++ ++/* EMMC_STS mask */ ++#define EMMC_STS_BOOTCRCERR (0x1 << 0) /* W1C */ ++#define EMMC_STS_BOOTACKERR (0x1 << 1) /* W1C */ ++#define EMMC_STS_BOOTDATTMO (0x1 << 2) /* W1C */ ++#define EMMC_STS_BOOTACKTMO (0x1 << 3) /* W1C */ ++#define EMMC_STS_BOOTUPSTATE (0x1 << 4) /* R */ ++#define EMMC_STS_BOOTACKRCV (0x1 << 5) /* W1C */ ++#define EMMC_STS_BOOTDATRCV (0x1 << 6) /* R */ ++ ++/* EMMC_IOCON mask */ ++#define EMMC_IOCON_BOOTRST (0x1 << 0) /* RW */ ++ ++/* SDC_ACMD19_TRG mask */ ++#define SDC_ACMD19_TRG_TUNESEL (0xf << 0) /* RW */ ++ ++/* MSDC_DMA_CTRL mask */ ++#define MSDC_DMA_CTRL_START (0x1 << 0) /* W */ ++#define MSDC_DMA_CTRL_STOP (0x1 << 1) /* W */ ++#define MSDC_DMA_CTRL_RESUME (0x1 << 2) /* W */ ++#define MSDC_DMA_CTRL_MODE (0x1 << 8) /* RW */ ++#define MSDC_DMA_CTRL_LASTBUF (0x1 << 10) /* RW */ ++#define MSDC_DMA_CTRL_BRUSTSZ (0x7 << 12) /* RW */ ++#define MSDC_DMA_CTRL_XFERSZ (0xffffUL << 16)/* RW */ ++ ++/* MSDC_DMA_CFG mask */ ++#define MSDC_DMA_CFG_STS (0x1 << 0) /* R */ ++#define MSDC_DMA_CFG_DECSEN (0x1 << 1) /* RW */ ++#define MSDC_DMA_CFG_BDCSERR (0x1 << 4) /* R */ ++#define MSDC_DMA_CFG_GPDCSERR (0x1 << 5) /* R */ ++ ++/* MSDC_PATCH_BIT mask */ ++#define MSDC_PATCH_BIT_WFLSMODE (0x1 << 0) /* RW */ ++#define MSDC_PATCH_BIT_ODDSUPP (0x1 << 1) /* RW */ ++#define MSDC_PATCH_BIT_CKGEN_CK (0x1 << 6) /* E2: Fixed to 1 */ ++#define MSDC_PATCH_BIT_IODSSEL (0x1 << 16) /* RW */ ++#define MSDC_PATCH_BIT_IOINTSEL (0x1 << 17) /* RW */ ++#define MSDC_PATCH_BIT_BUSYDLY (0xf << 18) /* RW */ ++#define MSDC_PATCH_BIT_WDOD (0xf << 22) /* RW */ ++#define MSDC_PATCH_BIT_IDRTSEL (0x1 << 26) /* RW */ ++#define MSDC_PATCH_BIT_CMDFSEL (0x1 << 27) /* RW */ ++#define MSDC_PATCH_BIT_INTDLSEL (0x1 << 28) /* RW */ ++#define MSDC_PATCH_BIT_SPCPUSH (0x1 << 29) /* RW */ ++#define MSDC_PATCH_BIT_DECRCTMO (0x1 << 30) /* RW */ ++ ++/* MSDC_PATCH_BIT1 mask */ ++#define MSDC_PATCH_BIT1_WRDAT_CRCS (0x7 << 3) ++#define MSDC_PATCH_BIT1_CMD_RSP (0x7 << 0) ++ ++/* MSDC_PAD_CTL0 mask */ ++#define MSDC_PAD_CTL0_CLKDRVN (0x7 << 0) /* RW */ ++#define MSDC_PAD_CTL0_CLKDRVP (0x7 << 4) /* RW */ ++#define MSDC_PAD_CTL0_CLKSR (0x1 << 8) /* RW */ ++#define MSDC_PAD_CTL0_CLKPD (0x1 << 16) /* RW */ ++#define MSDC_PAD_CTL0_CLKPU (0x1 << 17) /* RW */ ++#define MSDC_PAD_CTL0_CLKSMT (0x1 << 18) /* RW */ ++#define MSDC_PAD_CTL0_CLKIES (0x1 << 19) /* RW */ ++#define MSDC_PAD_CTL0_CLKTDSEL (0xf << 20) /* RW */ ++#define MSDC_PAD_CTL0_CLKRDSEL (0xffUL<< 24) /* RW */ ++ ++/* MSDC_PAD_CTL1 mask */ ++#define MSDC_PAD_CTL1_CMDDRVN (0x7 << 0) /* RW */ ++#define MSDC_PAD_CTL1_CMDDRVP (0x7 << 4) /* RW */ ++#define MSDC_PAD_CTL1_CMDSR (0x1 << 8) /* RW */ ++#define MSDC_PAD_CTL1_CMDPD (0x1 << 16) /* RW */ ++#define MSDC_PAD_CTL1_CMDPU (0x1 << 17) /* RW */ ++#define MSDC_PAD_CTL1_CMDSMT (0x1 << 18) /* RW */ ++#define MSDC_PAD_CTL1_CMDIES (0x1 << 19) /* RW */ ++#define MSDC_PAD_CTL1_CMDTDSEL (0xf << 20) /* RW */ ++#define MSDC_PAD_CTL1_CMDRDSEL (0xffUL<< 24) /* RW */ ++ ++/* MSDC_PAD_CTL2 mask */ ++#define MSDC_PAD_CTL2_DATDRVN (0x7 << 0) /* RW */ ++#define MSDC_PAD_CTL2_DATDRVP (0x7 << 4) /* RW */ ++#define MSDC_PAD_CTL2_DATSR (0x1 << 8) /* RW */ ++#define MSDC_PAD_CTL2_DATPD (0x1 << 16) /* RW */ ++#define MSDC_PAD_CTL2_DATPU (0x1 << 17) /* RW */ ++#define MSDC_PAD_CTL2_DATIES (0x1 << 19) /* RW */ ++#define MSDC_PAD_CTL2_DATSMT (0x1 << 18) /* RW */ ++#define MSDC_PAD_CTL2_DATTDSEL (0xf << 20) /* RW */ ++#define MSDC_PAD_CTL2_DATRDSEL (0xffUL<< 24) /* RW */ ++ ++/* MSDC_PAD_TUNE mask */ ++#define MSDC_PAD_TUNE_DATWRDLY (0x1F << 0) /* RW */ ++#define MSDC_PAD_TUNE_DATRRDLY (0x1F << 8) /* RW */ ++#define MSDC_PAD_TUNE_CMDRDLY (0x1F << 16) /* RW */ ++#define MSDC_PAD_TUNE_CMDRRDLY (0x1FUL << 22) /* RW */ ++#define MSDC_PAD_TUNE_CLKTXDLY (0x1FUL << 27) /* RW */ ++ ++/* MSDC_DAT_RDDLY0/1 mask */ ++#define MSDC_DAT_RDDLY0_D0 (0x1F << 0) /* RW */ ++#define MSDC_DAT_RDDLY0_D1 (0x1F << 8) /* RW */ ++#define MSDC_DAT_RDDLY0_D2 (0x1F << 16) /* RW */ ++#define MSDC_DAT_RDDLY0_D3 (0x1F << 24) /* RW */ ++ ++#define MSDC_DAT_RDDLY1_D4 (0x1F << 0) /* RW */ ++#define MSDC_DAT_RDDLY1_D5 (0x1F << 8) /* RW */ ++#define MSDC_DAT_RDDLY1_D6 (0x1F << 16) /* RW */ ++#define MSDC_DAT_RDDLY1_D7 (0x1F << 24) /* RW */ ++ ++#define MSDC_CKGEN_MSDC_DLY_SEL (0x1F<<10) ++#define MSDC_INT_DAT_LATCH_CK_SEL (0x7<<7) ++#define MSDC_CKGEN_MSDC_CK_SEL (0x1<<6) ++#define CARD_READY_FOR_DATA (1<<8) ++#define CARD_CURRENT_STATE(x) ((x&0x00001E00)>>9) ++ ++/*--------------------------------------------------------------------------*/ ++/* Descriptor Structure */ ++/*--------------------------------------------------------------------------*/ ++typedef struct { ++ u32 hwo:1; /* could be changed by hw */ ++ u32 bdp:1; ++ u32 rsv0:6; ++ u32 chksum:8; ++ u32 intr:1; ++ u32 rsv1:15; ++ void *next; ++ void *ptr; ++ u32 buflen:16; ++ u32 extlen:8; ++ u32 rsv2:8; ++ u32 arg; ++ u32 blknum; ++ u32 cmd; ++} gpd_t; ++ ++typedef struct { ++ u32 eol:1; ++ u32 rsv0:7; ++ u32 chksum:8; ++ u32 rsv1:1; ++ u32 blkpad:1; ++ u32 dwpad:1; ++ u32 rsv2:13; ++ void *next; ++ void *ptr; ++ u32 buflen:16; ++ u32 rsv3:16; ++} bd_t; ++ ++/*--------------------------------------------------------------------------*/ ++/* Register Debugging Structure */ ++/*--------------------------------------------------------------------------*/ ++ ++typedef struct { ++ u32 msdc:1; ++ u32 ckpwn:1; ++ u32 rst:1; ++ u32 pio:1; ++ u32 ckdrven:1; ++ u32 start18v:1; ++ u32 pass18v:1; ++ u32 ckstb:1; ++ u32 ckdiv:8; ++ u32 ckmod:2; ++ u32 pad:14; ++} msdc_cfg_reg; ++typedef struct { ++ u32 sdr104cksel:1; ++ u32 rsmpl:1; ++ u32 dsmpl:1; ++ u32 ddlysel:1; ++ u32 ddr50ckd:1; ++ u32 dsplsel:1; ++ u32 pad1:10; ++ u32 d0spl:1; ++ u32 d1spl:1; ++ u32 d2spl:1; ++ u32 d3spl:1; ++ u32 d4spl:1; ++ u32 d5spl:1; ++ u32 d6spl:1; ++ u32 d7spl:1; ++ u32 riscsz:1; ++ u32 pad2:7; ++} msdc_iocon_reg; ++typedef struct { ++ u32 cden:1; ++ u32 cdsts:1; ++ u32 pad1:10; ++ u32 cddebounce:4; ++ u32 dat:8; ++ u32 cmd:1; ++ u32 pad2:6; ++ u32 wp:1; ++} msdc_ps_reg; ++typedef struct { ++ u32 mmcirq:1; ++ u32 cdsc:1; ++ u32 pad1:1; ++ u32 atocmdrdy:1; ++ u32 atocmdtmo:1; ++ u32 atocmdcrc:1; ++ u32 dmaqempty:1; ++ u32 sdioirq:1; ++ u32 cmdrdy:1; ++ u32 cmdtmo:1; ++ u32 rspcrc:1; ++ u32 csta:1; ++ u32 xfercomp:1; ++ u32 dxferdone:1; ++ u32 dattmo:1; ++ u32 datcrc:1; ++ u32 atocmd19done:1; ++ u32 pad2:15; ++} msdc_int_reg; ++typedef struct { ++ u32 mmcirq:1; ++ u32 cdsc:1; ++ u32 pad1:1; ++ u32 atocmdrdy:1; ++ u32 atocmdtmo:1; ++ u32 atocmdcrc:1; ++ u32 dmaqempty:1; ++ u32 sdioirq:1; ++ u32 cmdrdy:1; ++ u32 cmdtmo:1; ++ u32 rspcrc:1; ++ u32 csta:1; ++ u32 xfercomp:1; ++ u32 dxferdone:1; ++ u32 dattmo:1; ++ u32 datcrc:1; ++ u32 atocmd19done:1; ++ u32 pad2:15; ++} msdc_inten_reg; ++typedef struct { ++ u32 rxcnt:8; ++ u32 pad1:8; ++ u32 txcnt:8; ++ u32 pad2:7; ++ u32 clr:1; ++} msdc_fifocs_reg; ++typedef struct { ++ u32 val; ++} msdc_txdat_reg; ++typedef struct { ++ u32 val; ++} msdc_rxdat_reg; ++typedef struct { ++ u32 sdiowkup:1; ++ u32 inswkup:1; ++ u32 pad1:14; ++ u32 buswidth:2; ++ u32 pad2:1; ++ u32 sdio:1; ++ u32 sdioide:1; ++ u32 intblkgap:1; ++ u32 pad4:2; ++ u32 dtoc:8; ++} sdc_cfg_reg; ++typedef struct { ++ u32 cmd:6; ++ u32 brk:1; ++ u32 rsptyp:3; ++ u32 pad1:1; ++ u32 dtype:2; ++ u32 rw:1; ++ u32 stop:1; ++ u32 goirq:1; ++ u32 blklen:12; ++ u32 atocmd:2; ++ u32 volswth:1; ++ u32 pad2:1; ++} sdc_cmd_reg; ++typedef struct { ++ u32 arg; ++} sdc_arg_reg; ++typedef struct { ++ u32 sdcbusy:1; ++ u32 cmdbusy:1; ++ u32 pad:29; ++ u32 swrcmpl:1; ++} sdc_sts_reg; ++typedef struct { ++ u32 val; ++} sdc_resp0_reg; ++typedef struct { ++ u32 val; ++} sdc_resp1_reg; ++typedef struct { ++ u32 val; ++} sdc_resp2_reg; ++typedef struct { ++ u32 val; ++} sdc_resp3_reg; ++typedef struct { ++ u32 num; ++} sdc_blknum_reg; ++typedef struct { ++ u32 sts; ++} sdc_csts_reg; ++typedef struct { ++ u32 sts; ++} sdc_cstsen_reg; ++typedef struct { ++ u32 datcrcsts:8; ++ u32 ddrcrcsts:4; ++ u32 pad:20; ++} sdc_datcrcsts_reg; ++typedef struct { ++ u32 bootstart:1; ++ u32 bootstop:1; ++ u32 bootmode:1; ++ u32 pad1:9; ++ u32 bootwaidly:3; ++ u32 bootsupp:1; ++ u32 pad2:16; ++} emmc_cfg0_reg; ++typedef struct { ++ u32 bootcrctmc:16; ++ u32 pad:4; ++ u32 bootacktmc:12; ++} emmc_cfg1_reg; ++typedef struct { ++ u32 bootcrcerr:1; ++ u32 bootackerr:1; ++ u32 bootdattmo:1; ++ u32 bootacktmo:1; ++ u32 bootupstate:1; ++ u32 bootackrcv:1; ++ u32 bootdatrcv:1; ++ u32 pad:25; ++} emmc_sts_reg; ++typedef struct { ++ u32 bootrst:1; ++ u32 pad:31; ++} emmc_iocon_reg; ++typedef struct { ++ u32 val; ++} msdc_acmd_resp_reg; ++typedef struct { ++ u32 tunesel:4; ++ u32 pad:28; ++} msdc_acmd19_trg_reg; ++typedef struct { ++ u32 val; ++} msdc_acmd19_sts_reg; ++typedef struct { ++ u32 addr; ++} msdc_dma_sa_reg; ++typedef struct { ++ u32 addr; ++} msdc_dma_ca_reg; ++typedef struct { ++ u32 start:1; ++ u32 stop:1; ++ u32 resume:1; ++ u32 pad1:5; ++ u32 mode:1; ++ u32 pad2:1; ++ u32 lastbuf:1; ++ u32 pad3:1; ++ u32 brustsz:3; ++ u32 pad4:1; ++ u32 xfersz:16; ++} msdc_dma_ctrl_reg; ++typedef struct { ++ u32 status:1; ++ u32 decsen:1; ++ u32 pad1:2; ++ u32 bdcsen:1; ++ u32 gpdcsen:1; ++ u32 pad2:26; ++} msdc_dma_cfg_reg; ++typedef struct { ++ u32 sel:16; ++ u32 pad2:16; ++} msdc_dbg_sel_reg; ++typedef struct { ++ u32 val; ++} msdc_dbg_out_reg; ++typedef struct { ++ u32 clkdrvn:3; ++ u32 rsv0:1; ++ u32 clkdrvp:3; ++ u32 rsv1:1; ++ u32 clksr:1; ++ u32 rsv2:7; ++ u32 clkpd:1; ++ u32 clkpu:1; ++ u32 clksmt:1; ++ u32 clkies:1; ++ u32 clktdsel:4; ++ u32 clkrdsel:8; ++} msdc_pad_ctl0_reg; ++typedef struct { ++ u32 cmddrvn:3; ++ u32 rsv0:1; ++ u32 cmddrvp:3; ++ u32 rsv1:1; ++ u32 cmdsr:1; ++ u32 rsv2:7; ++ u32 cmdpd:1; ++ u32 cmdpu:1; ++ u32 cmdsmt:1; ++ u32 cmdies:1; ++ u32 cmdtdsel:4; ++ u32 cmdrdsel:8; ++} msdc_pad_ctl1_reg; ++typedef struct { ++ u32 datdrvn:3; ++ u32 rsv0:1; ++ u32 datdrvp:3; ++ u32 rsv1:1; ++ u32 datsr:1; ++ u32 rsv2:7; ++ u32 datpd:1; ++ u32 datpu:1; ++ u32 datsmt:1; ++ u32 daties:1; ++ u32 dattdsel:4; ++ u32 datrdsel:8; ++} msdc_pad_ctl2_reg; ++typedef struct { ++ u32 wrrxdly:3; ++ u32 pad1:5; ++ u32 rdrxdly:8; ++ u32 pad2:16; ++} msdc_pad_tune_reg; ++typedef struct { ++ u32 dat0:5; ++ u32 rsv0:3; ++ u32 dat1:5; ++ u32 rsv1:3; ++ u32 dat2:5; ++ u32 rsv2:3; ++ u32 dat3:5; ++ u32 rsv3:3; ++} msdc_dat_rddly0; ++typedef struct { ++ u32 dat4:5; ++ u32 rsv4:3; ++ u32 dat5:5; ++ u32 rsv5:3; ++ u32 dat6:5; ++ u32 rsv6:3; ++ u32 dat7:5; ++ u32 rsv7:3; ++} msdc_dat_rddly1; ++typedef struct { ++ u32 dbg0sel:8; ++ u32 dbg1sel:6; ++ u32 pad1:2; ++ u32 dbg2sel:6; ++ u32 pad2:2; ++ u32 dbg3sel:6; ++ u32 pad3:2; ++} msdc_hw_dbg_reg; ++typedef struct { ++ u32 val; ++} msdc_version_reg; ++typedef struct { ++ u32 val; ++} msdc_eco_ver_reg; ++ ++struct msdc_regs { ++ msdc_cfg_reg msdc_cfg; /* base+0x00h */ ++ msdc_iocon_reg msdc_iocon; /* base+0x04h */ ++ msdc_ps_reg msdc_ps; /* base+0x08h */ ++ msdc_int_reg msdc_int; /* base+0x0ch */ ++ msdc_inten_reg msdc_inten; /* base+0x10h */ ++ msdc_fifocs_reg msdc_fifocs; /* base+0x14h */ ++ msdc_txdat_reg msdc_txdat; /* base+0x18h */ ++ msdc_rxdat_reg msdc_rxdat; /* base+0x1ch */ ++ u32 rsv1[4]; ++ sdc_cfg_reg sdc_cfg; /* base+0x30h */ ++ sdc_cmd_reg sdc_cmd; /* base+0x34h */ ++ sdc_arg_reg sdc_arg; /* base+0x38h */ ++ sdc_sts_reg sdc_sts; /* base+0x3ch */ ++ sdc_resp0_reg sdc_resp0; /* base+0x40h */ ++ sdc_resp1_reg sdc_resp1; /* base+0x44h */ ++ sdc_resp2_reg sdc_resp2; /* base+0x48h */ ++ sdc_resp3_reg sdc_resp3; /* base+0x4ch */ ++ sdc_blknum_reg sdc_blknum; /* base+0x50h */ ++ u32 rsv2[1]; ++ sdc_csts_reg sdc_csts; /* base+0x58h */ ++ sdc_cstsen_reg sdc_cstsen; /* base+0x5ch */ ++ sdc_datcrcsts_reg sdc_dcrcsta; /* base+0x60h */ ++ u32 rsv3[3]; ++ emmc_cfg0_reg emmc_cfg0; /* base+0x70h */ ++ emmc_cfg1_reg emmc_cfg1; /* base+0x74h */ ++ emmc_sts_reg emmc_sts; /* base+0x78h */ ++ emmc_iocon_reg emmc_iocon; /* base+0x7ch */ ++ msdc_acmd_resp_reg acmd_resp; /* base+0x80h */ ++ msdc_acmd19_trg_reg acmd19_trg; /* base+0x84h */ ++ msdc_acmd19_sts_reg acmd19_sts; /* base+0x88h */ ++ u32 rsv4[1]; ++ msdc_dma_sa_reg dma_sa; /* base+0x90h */ ++ msdc_dma_ca_reg dma_ca; /* base+0x94h */ ++ msdc_dma_ctrl_reg dma_ctrl; /* base+0x98h */ ++ msdc_dma_cfg_reg dma_cfg; /* base+0x9ch */ ++ msdc_dbg_sel_reg dbg_sel; /* base+0xa0h */ ++ msdc_dbg_out_reg dbg_out; /* base+0xa4h */ ++ u32 rsv5[2]; ++ u32 patch0; /* base+0xb0h */ ++ u32 patch1; /* base+0xb4h */ ++ u32 rsv6[10]; ++ msdc_pad_ctl0_reg pad_ctl0; /* base+0xe0h */ ++ msdc_pad_ctl1_reg pad_ctl1; /* base+0xe4h */ ++ msdc_pad_ctl2_reg pad_ctl2; /* base+0xe8h */ ++ msdc_pad_tune_reg pad_tune; /* base+0xech */ ++ msdc_dat_rddly0 dat_rddly0; /* base+0xf0h */ ++ msdc_dat_rddly1 dat_rddly1; /* base+0xf4h */ ++ msdc_hw_dbg_reg hw_dbg; /* base+0xf8h */ ++ u32 rsv7[1]; ++ msdc_version_reg version; /* base+0x100h */ ++ msdc_eco_ver_reg eco_ver; /* base+0x104h */ ++}; ++ ++struct scatterlist_ex { ++ u32 cmd; ++ u32 arg; ++ u32 sglen; ++ struct scatterlist *sg; ++}; ++ ++#define DMA_FLAG_NONE (0x00000000) ++#define DMA_FLAG_EN_CHKSUM (0x00000001) ++#define DMA_FLAG_PAD_BLOCK (0x00000002) ++#define DMA_FLAG_PAD_DWORD (0x00000004) ++ ++struct msdc_dma { ++ u32 flags; /* flags */ ++ u32 xfersz; /* xfer size in bytes */ ++ u32 sglen; /* size of scatter list */ ++ u32 blklen; /* block size */ ++ struct scatterlist *sg; /* I/O scatter list */ ++ struct scatterlist_ex *esg; /* extended I/O scatter list */ ++ u8 mode; /* dma mode */ ++ u8 burstsz; /* burst size */ ++ u8 intr; /* dma done interrupt */ ++ u8 padding; /* padding */ ++ u32 cmd; /* enhanced mode command */ ++ u32 arg; /* enhanced mode arg */ ++ u32 rsp; /* enhanced mode command response */ ++ u32 autorsp; /* auto command response */ ++ ++ gpd_t *gpd; /* pointer to gpd array */ ++ bd_t *bd; /* pointer to bd array */ ++ dma_addr_t gpd_addr; /* the physical address of gpd array */ ++ dma_addr_t bd_addr; /* the physical address of bd array */ ++ u32 used_gpd; /* the number of used gpd elements */ ++ u32 used_bd; /* the number of used bd elements */ ++}; ++ ++struct msdc_host ++{ ++ struct msdc_hw *hw; ++ ++ struct mmc_host *mmc; /* mmc structure */ ++ struct mmc_command *cmd; ++ struct mmc_data *data; ++ struct mmc_request *mrq; ++ int cmd_rsp; ++ int cmd_rsp_done; ++ int cmd_r1b_done; ++ ++ int error; ++ spinlock_t lock; /* mutex */ ++ struct semaphore sem; ++ ++ u32 blksz; /* host block size */ ++ u32 base; /* host base address */ ++ int id; /* host id */ ++ int pwr_ref; /* core power reference count */ ++ ++ u32 xfer_size; /* total transferred size */ ++ ++ struct msdc_dma dma; /* dma channel */ ++ u32 dma_addr; /* dma transfer address */ ++ u32 dma_left_size; /* dma transfer left size */ ++ u32 dma_xfer_size; /* dma transfer size in bytes */ ++ int dma_xfer; /* dma transfer mode */ ++ ++ u32 timeout_ns; /* data timeout ns */ ++ u32 timeout_clks; /* data timeout clks */ ++ ++ atomic_t abort; /* abort transfer */ ++ ++ int irq; /* host interrupt */ ++ ++ struct tasklet_struct card_tasklet; ++ ++ struct completion cmd_done; ++ struct completion xfer_done; ++ struct pm_message pm_state; ++ ++ u32 mclk; /* mmc subsystem clock */ ++ u32 hclk; /* host clock speed */ ++ u32 sclk; /* SD/MS clock speed */ ++ u8 core_clkon; /* Host core clock on ? */ ++ u8 card_clkon; /* Card clock on ? */ ++ u8 core_power; /* core power */ ++ u8 power_mode; /* host power mode */ ++ u8 card_inserted; /* card inserted ? */ ++ u8 suspend; /* host suspended ? */ ++ u8 reserved; ++ u8 app_cmd; /* for app command */ ++ u32 app_cmd_arg; ++ u64 starttime; ++}; ++ ++static inline unsigned int uffs(unsigned int x) ++{ ++ unsigned int r = 1; ++ ++ if (!x) ++ return 0; ++ if (!(x & 0xffff)) { ++ x >>= 16; ++ r += 16; ++ } ++ if (!(x & 0xff)) { ++ x >>= 8; ++ r += 8; ++ } ++ if (!(x & 0xf)) { ++ x >>= 4; ++ r += 4; ++ } ++ if (!(x & 3)) { ++ x >>= 2; ++ r += 2; ++ } ++ if (!(x & 1)) { ++ x >>= 1; ++ r += 1; ++ } ++ return r; ++} ++#define sdr_read8(reg) __raw_readb(reg) ++#define sdr_read16(reg) __raw_readw(reg) ++#define sdr_read32(reg) __raw_readl(reg) ++#define sdr_write8(reg,val) __raw_writeb(val,reg) ++#define sdr_write16(reg,val) __raw_writew(val,reg) ++#define sdr_write32(reg,val) __raw_writel(val,reg) ++ ++#define sdr_set_bits(reg,bs) ((*(volatile u32*)(reg)) |= (u32)(bs)) ++#define sdr_clr_bits(reg,bs) ((*(volatile u32*)(reg)) &= ~((u32)(bs))) ++ ++#define sdr_set_field(reg,field,val) \ ++ do { \ ++ volatile unsigned int tv = sdr_read32(reg); \ ++ tv &= ~(field); \ ++ tv |= ((val) << (uffs((unsigned int)field) - 1)); \ ++ sdr_write32(reg,tv); \ ++ } while(0) ++#define sdr_get_field(reg,field,val) \ ++ do { \ ++ volatile unsigned int tv = sdr_read32(reg); \ ++ val = ((tv & (field)) >> (uffs((unsigned int)field) - 1)); \ ++ } while(0) ++ ++#endif ++ +--- /dev/null ++++ b/drivers/mmc/host/sdhci-mt7620.c +@@ -0,0 +1,2314 @@ ++/* Copyright Statement: ++ * ++ * This software/firmware and related documentation ("MediaTek Software") are ++ * protected under relevant copyright laws. The information contained herein ++ * is confidential and proprietary to MediaTek Inc. and/or its licensors. ++ * Without the prior written permission of MediaTek inc. and/or its licensors, ++ * any reproduction, modification, use or disclosure of MediaTek Software, ++ * and information contained herein, in whole or in part, shall be strictly prohibited. ++ * ++ * MediaTek Inc. (C) 2010. All rights reserved. ++ * ++ * BY OPENING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES ++ * THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE") ++ * RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON ++ * AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT. ++ * NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE ++ * SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR ++ * SUPPLIED WITH THE MEDIATEK SOFTWARE, AND RECEIVER AGREES TO LOOK ONLY TO SUCH ++ * THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES ++ * THAT IT IS RECEIVER'S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD PARTY ALL PROPER LICENSES ++ * CONTAINED IN MEDIATEK SOFTWARE. MEDIATEK SHALL ALSO NOT BE RESPONSIBLE FOR ANY MEDIATEK ++ * SOFTWARE RELEASES MADE TO RECEIVER'S SPECIFICATION OR TO CONFORM TO A PARTICULAR ++ * STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND ++ * CUMULATIVE LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE, ++ * AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE, ++ * OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY RECEIVER TO ++ * MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE. ++ * ++ * The following software/firmware and/or related documentation ("MediaTek Software") ++ * have been modified by MediaTek Inc. All revisions are subject to any receiver's ++ * applicable license agreements with MediaTek Inc. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++ ++#define MSDC_SMPL_FALLING (1) ++#define MSDC_CD_PIN_EN (1 << 0) /* card detection pin is wired */ ++#define MSDC_WP_PIN_EN (1 << 1) /* write protection pin is wired */ ++#define MSDC_REMOVABLE (1 << 5) /* removable slot */ ++#define MSDC_SYS_SUSPEND (1 << 6) /* suspended by system */ ++#define MSDC_HIGHSPEED (1 << 7) ++ ++#define IRQ_SDC 22 ++ ++#include ++ ++#include "mt6575_sd.h" ++ ++#define DRV_NAME "mtk-sd" ++ ++#define HOST_MAX_NUM (1) /* +/- by chhung */ ++ ++#define HOST_MAX_MCLK (48000000) /* +/- by chhung */ ++#define HOST_MIN_MCLK (260000) ++ ++#define HOST_MAX_BLKSZ (2048) ++ ++#define MSDC_OCR_AVAIL (MMC_VDD_28_29 | MMC_VDD_29_30 | MMC_VDD_30_31 | MMC_VDD_31_32 | MMC_VDD_32_33) ++ ++#define GPIO_PULL_DOWN (0) ++#define GPIO_PULL_UP (1) ++ ++#define DEFAULT_DEBOUNCE (8) /* 8 cycles */ ++#define DEFAULT_DTOC (40) /* data timeout counter. 65536x40 sclk. */ ++ ++#define CMD_TIMEOUT (HZ/10) /* 100ms */ ++#define DAT_TIMEOUT (HZ/2 * 5) /* 500ms x5 */ ++ ++#define MAX_DMA_CNT (64 * 1024 - 512) /* a single transaction for WIFI may be 50K*/ ++ ++#define MAX_GPD_NUM (1 + 1) /* one null gpd */ ++#define MAX_BD_NUM (1024) ++#define MAX_BD_PER_GPD (MAX_BD_NUM) ++ ++#define MAX_HW_SGMTS (MAX_BD_NUM) ++#define MAX_PHY_SGMTS (MAX_BD_NUM) ++#define MAX_SGMT_SZ (MAX_DMA_CNT) ++#define MAX_REQ_SZ (MAX_SGMT_SZ * 8) ++ ++#ifdef MT6575_SD_DEBUG ++static struct msdc_regs *msdc_reg[HOST_MAX_NUM]; ++#endif ++ ++//================================= ++#define PERI_MSDC0_PDN (15) ++//#define PERI_MSDC1_PDN (16) ++//#define PERI_MSDC2_PDN (17) ++//#define PERI_MSDC3_PDN (18) ++ ++struct msdc_host *msdc_6575_host[] = {NULL,NULL,NULL,NULL}; ++ ++struct msdc_hw msdc0_hw = { ++ .clk_src = 0, ++ .cmd_edge = MSDC_SMPL_FALLING, ++ .data_edge = MSDC_SMPL_FALLING, ++ .clk_drv = 4, ++ .cmd_drv = 4, ++ .dat_drv = 4, ++ .data_pins = 4, ++ .data_offset = 0, ++ .flags = MSDC_SYS_SUSPEND | MSDC_WP_PIN_EN | MSDC_CD_PIN_EN | MSDC_REMOVABLE | MSDC_HIGHSPEED, ++}; ++ ++static struct resource mtk_sd_resources[] = { ++ [0] = { ++ .start = 0xb0130000, ++ .end = 0xb0133fff, ++ .flags = IORESOURCE_MEM, ++ }, ++ [1] = { ++ .start = IRQ_SDC, /*FIXME*/ ++ .end = IRQ_SDC, /*FIXME*/ ++ .flags = IORESOURCE_IRQ, ++ }, ++}; ++ ++static struct platform_device mtk_sd_device = { ++ .name = "mtk-sd", ++ .id = 0, ++ .num_resources = ARRAY_SIZE(mtk_sd_resources), ++ .resource = mtk_sd_resources, ++}; ++/* end of +++ */ ++ ++static int msdc_rsp[] = { ++ 0, /* RESP_NONE */ ++ 1, /* RESP_R1 */ ++ 2, /* RESP_R2 */ ++ 3, /* RESP_R3 */ ++ 4, /* RESP_R4 */ ++ 1, /* RESP_R5 */ ++ 1, /* RESP_R6 */ ++ 1, /* RESP_R7 */ ++ 7, /* RESP_R1b */ ++}; ++ ++/* For Inhanced DMA */ ++#define msdc_init_gpd_ex(gpd,extlen,cmd,arg,blknum) \ ++ do { \ ++ ((gpd_t*)gpd)->extlen = extlen; \ ++ ((gpd_t*)gpd)->cmd = cmd; \ ++ ((gpd_t*)gpd)->arg = arg; \ ++ ((gpd_t*)gpd)->blknum = blknum; \ ++ }while(0) ++ ++#define msdc_init_bd(bd, blkpad, dwpad, dptr, dlen) \ ++ do { \ ++ BUG_ON(dlen > 0xFFFFUL); \ ++ ((bd_t*)bd)->blkpad = blkpad; \ ++ ((bd_t*)bd)->dwpad = dwpad; \ ++ ((bd_t*)bd)->ptr = (void*)dptr; \ ++ ((bd_t*)bd)->buflen = dlen; \ ++ }while(0) ++ ++#define msdc_txfifocnt() ((sdr_read32(MSDC_FIFOCS) & MSDC_FIFOCS_TXCNT) >> 16) ++#define msdc_rxfifocnt() ((sdr_read32(MSDC_FIFOCS) & MSDC_FIFOCS_RXCNT) >> 0) ++#define msdc_fifo_write32(v) sdr_write32(MSDC_TXDATA, (v)) ++#define msdc_fifo_write8(v) sdr_write8(MSDC_TXDATA, (v)) ++#define msdc_fifo_read32() sdr_read32(MSDC_RXDATA) ++#define msdc_fifo_read8() sdr_read8(MSDC_RXDATA) ++ ++ ++#define msdc_dma_on() sdr_clr_bits(MSDC_CFG, MSDC_CFG_PIO) ++#define msdc_dma_off() sdr_set_bits(MSDC_CFG, MSDC_CFG_PIO) ++ ++#define msdc_retry(expr,retry,cnt) \ ++ do { \ ++ int backup = cnt; \ ++ while (retry) { \ ++ if (!(expr)) break; \ ++ if (cnt-- == 0) { \ ++ retry--; mdelay(1); cnt = backup; \ ++ } \ ++ } \ ++ WARN_ON(retry == 0); \ ++ } while(0) ++ ++#if 0 /* +/- chhung */ ++#define msdc_reset() \ ++ do { \ ++ int retry = 3, cnt = 1000; \ ++ sdr_set_bits(MSDC_CFG, MSDC_CFG_RST); \ ++ dsb(); \ ++ msdc_retry(sdr_read32(MSDC_CFG) & MSDC_CFG_RST, retry, cnt); \ ++ } while(0) ++#else ++#define msdc_reset() \ ++ do { \ ++ int retry = 3, cnt = 1000; \ ++ sdr_set_bits(MSDC_CFG, MSDC_CFG_RST); \ ++ msdc_retry(sdr_read32(MSDC_CFG) & MSDC_CFG_RST, retry, cnt); \ ++ } while(0) ++#endif /* end of +/- */ ++ ++#define msdc_clr_int() \ ++ do { \ ++ volatile u32 val = sdr_read32(MSDC_INT); \ ++ sdr_write32(MSDC_INT, val); \ ++ } while(0) ++ ++#define msdc_clr_fifo() \ ++ do { \ ++ int retry = 3, cnt = 1000; \ ++ sdr_set_bits(MSDC_FIFOCS, MSDC_FIFOCS_CLR); \ ++ msdc_retry(sdr_read32(MSDC_FIFOCS) & MSDC_FIFOCS_CLR, retry, cnt); \ ++ } while(0) ++ ++#define msdc_irq_save(val) \ ++ do { \ ++ val = sdr_read32(MSDC_INTEN); \ ++ sdr_clr_bits(MSDC_INTEN, val); \ ++ } while(0) ++ ++#define msdc_irq_restore(val) \ ++ do { \ ++ sdr_set_bits(MSDC_INTEN, val); \ ++ } while(0) ++ ++/* clock source for host: global */ ++static u32 hclks[] = {48000000}; /* +/- by chhung */ ++ ++//============================================ ++// the power for msdc host controller: global ++// always keep the VMC on. ++//============================================ ++#define msdc_vcore_on(host) \ ++ do { \ ++ printk("[+]VMC ref. count<%d>\n", ++host->pwr_ref); \ ++ (void)hwPowerOn(MT65XX_POWER_LDO_VMC, VOL_3300, "SD"); \ ++ } while (0) ++#define msdc_vcore_off(host) \ ++ do { \ ++ printk("[-]VMC ref. count<%d>\n", --host->pwr_ref); \ ++ (void)hwPowerDown(MT65XX_POWER_LDO_VMC, "SD"); \ ++ } while (0) ++ ++//==================================== ++// the vdd output for card: global ++// always keep the VMCH on. ++//==================================== ++#define msdc_vdd_on(host) \ ++ do { \ ++ (void)hwPowerOn(MT65XX_POWER_LDO_VMCH, VOL_3300, "SD"); \ ++ } while (0) ++#define msdc_vdd_off(host) \ ++ do { \ ++ (void)hwPowerDown(MT65XX_POWER_LDO_VMCH, "SD"); \ ++ } while (0) ++ ++#define sdc_is_busy() (sdr_read32(SDC_STS) & SDC_STS_SDCBUSY) ++#define sdc_is_cmd_busy() (sdr_read32(SDC_STS) & SDC_STS_CMDBUSY) ++ ++#define sdc_send_cmd(cmd,arg) \ ++ do { \ ++ sdr_write32(SDC_ARG, (arg)); \ ++ sdr_write32(SDC_CMD, (cmd)); \ ++ } while(0) ++ ++// can modify to read h/w register. ++//#define is_card_present(h) ((sdr_read32(MSDC_PS) & MSDC_PS_CDSTS) ? 0 : 1); ++#define is_card_present(h) (((struct msdc_host*)(h))->card_inserted) ++ ++/* +++ chhung */ ++#ifndef __ASSEMBLY__ ++#define PHYSADDR(a) (((unsigned long)(a)) & 0x1fffffff) ++#else ++#define PHYSADDR(a) ((a) & 0x1fffffff) ++#endif ++/* end of +++ */ ++static unsigned int msdc_do_command(struct msdc_host *host, ++ struct mmc_command *cmd, ++ int tune, ++ unsigned long timeout); ++ ++static int msdc_tune_cmdrsp(struct msdc_host*host,struct mmc_command *cmd); ++ ++#ifdef MT6575_SD_DEBUG ++static void msdc_dump_card_status(struct msdc_host *host, u32 status) ++{ ++ static char *state[] = { ++ "Idle", /* 0 */ ++ "Ready", /* 1 */ ++ "Ident", /* 2 */ ++ "Stby", /* 3 */ ++ "Tran", /* 4 */ ++ "Data", /* 5 */ ++ "Rcv", /* 6 */ ++ "Prg", /* 7 */ ++ "Dis", /* 8 */ ++ "Reserved", /* 9 */ ++ "Reserved", /* 10 */ ++ "Reserved", /* 11 */ ++ "Reserved", /* 12 */ ++ "Reserved", /* 13 */ ++ "Reserved", /* 14 */ ++ "I/O mode", /* 15 */ ++ }; ++ if (status & R1_OUT_OF_RANGE) ++ printk("[CARD_STATUS] Out of Range\n"); ++ if (status & R1_ADDRESS_ERROR) ++ printk("[CARD_STATUS] Address Error\n"); ++ if (status & R1_BLOCK_LEN_ERROR) ++ printk("[CARD_STATUS] Block Len Error\n"); ++ if (status & R1_ERASE_SEQ_ERROR) ++ printk("[CARD_STATUS] Erase Seq Error\n"); ++ if (status & R1_ERASE_PARAM) ++ printk("[CARD_STATUS] Erase Param\n"); ++ if (status & R1_WP_VIOLATION) ++ printk("[CARD_STATUS] WP Violation\n"); ++ if (status & R1_CARD_IS_LOCKED) ++ printk("[CARD_STATUS] Card is Locked\n"); ++ if (status & R1_LOCK_UNLOCK_FAILED) ++ printk("[CARD_STATUS] Lock/Unlock Failed\n"); ++ if (status & R1_COM_CRC_ERROR) ++ printk("[CARD_STATUS] Command CRC Error\n"); ++ if (status & R1_ILLEGAL_COMMAND) ++ printk("[CARD_STATUS] Illegal Command\n"); ++ if (status & R1_CARD_ECC_FAILED) ++ printk("[CARD_STATUS] Card ECC Failed\n"); ++ if (status & R1_CC_ERROR) ++ printk("[CARD_STATUS] CC Error\n"); ++ if (status & R1_ERROR) ++ printk("[CARD_STATUS] Error\n"); ++ if (status & R1_UNDERRUN) ++ printk("[CARD_STATUS] Underrun\n"); ++ if (status & R1_OVERRUN) ++ printk("[CARD_STATUS] Overrun\n"); ++ if (status & R1_CID_CSD_OVERWRITE) ++ printk("[CARD_STATUS] CID/CSD Overwrite\n"); ++ if (status & R1_WP_ERASE_SKIP) ++ printk("[CARD_STATUS] WP Eraser Skip\n"); ++ if (status & R1_CARD_ECC_DISABLED) ++ printk("[CARD_STATUS] Card ECC Disabled\n"); ++ if (status & R1_ERASE_RESET) ++ printk("[CARD_STATUS] Erase Reset\n"); ++ if (status & R1_READY_FOR_DATA) ++ printk("[CARD_STATUS] Ready for Data\n"); ++ if (status & R1_SWITCH_ERROR) ++ printk("[CARD_STATUS] Switch error\n"); ++ if (status & R1_APP_CMD) ++ printk("[CARD_STATUS] App Command\n"); ++ ++ printk("[CARD_STATUS] '%s' State\n", state[R1_CURRENT_STATE(status)]); ++} ++ ++static void msdc_dump_ocr_reg(struct msdc_host *host, u32 resp) ++{ ++ if (resp & (1 << 7)) ++ printk("[OCR] Low Voltage Range\n"); ++ if (resp & (1 << 15)) ++ printk("[OCR] 2.7-2.8 volt\n"); ++ if (resp & (1 << 16)) ++ printk("[OCR] 2.8-2.9 volt\n"); ++ if (resp & (1 << 17)) ++ printk("[OCR] 2.9-3.0 volt\n"); ++ if (resp & (1 << 18)) ++ printk("[OCR] 3.0-3.1 volt\n"); ++ if (resp & (1 << 19)) ++ printk("[OCR] 3.1-3.2 volt\n"); ++ if (resp & (1 << 20)) ++ printk("[OCR] 3.2-3.3 volt\n"); ++ if (resp & (1 << 21)) ++ printk("[OCR] 3.3-3.4 volt\n"); ++ if (resp & (1 << 22)) ++ printk("[OCR] 3.4-3.5 volt\n"); ++ if (resp & (1 << 23)) ++ printk("[OCR] 3.5-3.6 volt\n"); ++ if (resp & (1 << 24)) ++ printk("[OCR] Switching to 1.8V Accepted (S18A)\n"); ++ if (resp & (1 << 30)) ++ printk("[OCR] Card Capacity Status (CCS)\n"); ++ if (resp & (1 << 31)) ++ printk("[OCR] Card Power Up Status (Idle)\n"); ++ else ++ printk("[OCR] Card Power Up Status (Busy)\n"); ++} ++ ++static void msdc_dump_rca_resp(struct msdc_host *host, u32 resp) ++{ ++ u32 status = (((resp >> 15) & 0x1) << 23) | ++ (((resp >> 14) & 0x1) << 22) | ++ (((resp >> 13) & 0x1) << 19) | ++ (resp & 0x1fff); ++ ++ printk("[RCA] 0x%.4x\n", resp >> 16); ++ ++ msdc_dump_card_status(host, status); ++} ++ ++static void msdc_dump_io_resp(struct msdc_host *host, u32 resp) ++{ ++ u32 flags = (resp >> 8) & 0xFF; ++ char *state[] = {"DIS", "CMD", "TRN", "RFU"}; ++ ++ if (flags & (1 << 7)) ++ printk("[IO] COM_CRC_ERR\n"); ++ if (flags & (1 << 6)) ++ printk("[IO] Illgal command\n"); ++ if (flags & (1 << 3)) ++ printk("[IO] Error\n"); ++ if (flags & (1 << 2)) ++ printk("[IO] RFU\n"); ++ if (flags & (1 << 1)) ++ printk("[IO] Function number error\n"); ++ if (flags & (1 << 0)) ++ printk("[IO] Out of range\n"); ++ ++ printk("[IO] State: %s, Data:0x%x\n", state[(resp >> 12) & 0x3], resp & 0xFF); ++} ++#endif ++ ++static void msdc_set_timeout(struct msdc_host *host, u32 ns, u32 clks) ++{ ++ u32 base = host->base; ++ u32 timeout, clk_ns; ++ ++ host->timeout_ns = ns; ++ host->timeout_clks = clks; ++ ++ clk_ns = 1000000000UL / host->sclk; ++ timeout = ns / clk_ns + clks; ++ timeout = timeout >> 16; /* in 65536 sclk cycle unit */ ++ timeout = timeout > 1 ? timeout - 1 : 0; ++ timeout = timeout > 255 ? 255 : timeout; ++ ++ sdr_set_field(SDC_CFG, SDC_CFG_DTOC, timeout); ++ ++/* printk("Set read data timeout: %dns %dclks -> %d x 65536 cycles\n", ++ ns, clks, timeout + 1);*/ ++} ++ ++static void msdc_eirq_sdio(void *data) ++{ ++ struct msdc_host *host = (struct msdc_host *)data; ++ ++// printk("SDIO EINT\n"); ++ ++ mmc_signal_sdio_irq(host->mmc); ++} ++ ++static void msdc_eirq_cd(void *data) ++{ ++ struct msdc_host *host = (struct msdc_host *)data; ++ ++// printk("CD EINT\n"); ++ ++ tasklet_hi_schedule(&host->card_tasklet); ++} ++ ++static void msdc_tasklet_card(unsigned long arg) ++{ ++ struct msdc_host *host = (struct msdc_host *)arg; ++ struct msdc_hw *hw = host->hw; ++ u32 base = host->base; ++ u32 inserted; ++ u32 status = 0; ++ ++ spin_lock(&host->lock); ++ ++ if (hw->get_cd_status) { ++ inserted = hw->get_cd_status(); ++ } else { ++ status = sdr_read32(MSDC_PS); ++ inserted = (status & MSDC_PS_CDSTS) ? 0 : 1; ++ } ++ ++ host->card_inserted = inserted; ++ ++ if (!host->suspend) { ++ host->mmc->f_max = HOST_MAX_MCLK; ++ mmc_detect_change(host->mmc, msecs_to_jiffies(20)); ++ } ++ ++// printk("card found<%s>\n", inserted ? "inserted" : "removed"); ++ ++ spin_unlock(&host->lock); ++} ++ ++static void msdc_set_mclk(struct msdc_host *host, int ddr, unsigned int hz) ++{ ++ u32 base = host->base; ++ u32 hclk = host->hclk; ++ u32 mode, flags, div, sclk; ++ ++ if (!hz) { ++// printk("set mclk to 0!!!\n"); ++ msdc_reset(); ++ return; ++ } ++ ++ msdc_irq_save(flags); ++ ++ if (ddr) { ++ mode = 0x2; ++ if (hz >= (hclk >> 2)) { ++ div = 1; ++ sclk = hclk >> 2; ++ } else { ++ div = (hclk + ((hz << 2) - 1)) / (hz << 2); ++ sclk = (hclk >> 2) / div; ++ } ++ } else if (hz >= hclk) { ++ mode = 0x1; ++ div = 0; ++ sclk = hclk; ++ } else { ++ mode = 0x0; ++ if (hz >= (hclk >> 1)) { ++ div = 0; ++ sclk = hclk >> 1; ++ } else { ++ div = (hclk + ((hz << 2) - 1)) / (hz << 2); ++ sclk = (hclk >> 2) / div; ++ } ++ } ++ ++ sdr_set_field(MSDC_CFG, MSDC_CFG_CKMOD, mode); ++ sdr_set_field(MSDC_CFG, MSDC_CFG_CKDIV, div); ++ ++ while (!(sdr_read32(MSDC_CFG) & MSDC_CFG_CKSTB)); ++ ++ host->sclk = sclk; ++ host->mclk = hz; ++ msdc_set_timeout(host, host->timeout_ns, host->timeout_clks); ++ ++/* printk("!!! Set<%dKHz> Source<%dKHz> -> sclk<%dKHz>\n", ++ hz / 1000, hclk / 1000, sclk / 1000); ++*/ ++ msdc_irq_restore(flags); ++} ++ ++static void msdc_abort_data(struct msdc_host *host) ++{ ++ u32 base = host->base; ++ struct mmc_command *stop = host->mrq->stop; ++ ++// printk("Need to Abort. dma<%d>\n", host->dma_xfer); ++ ++ msdc_reset(); ++ msdc_clr_fifo(); ++ msdc_clr_int(); ++ ++ if (stop) { ++// printk("stop when abort CMD<%d>\n", stop->opcode); ++ msdc_do_command(host, stop, 0, CMD_TIMEOUT); ++ } ++} ++ ++static unsigned int msdc_command_start(struct msdc_host *host, ++ struct mmc_command *cmd, int tune, unsigned long timeout) ++{ ++ u32 wints = MSDC_INT_CMDRDY | MSDC_INT_RSPCRCERR | MSDC_INT_CMDTMO | ++ MSDC_INT_ACMDRDY | MSDC_INT_ACMDCRCERR | MSDC_INT_ACMDTMO | ++ MSDC_INT_ACMD19_DONE; ++ u32 base = host->base; ++ u32 opcode = cmd->opcode; ++ u32 rawcmd; ++ u32 resp; ++ unsigned long tmo; ++ ++ if (opcode == MMC_SEND_OP_COND || opcode == SD_APP_OP_COND) ++ resp = RESP_R3; ++ else if (opcode == MMC_SET_RELATIVE_ADDR || opcode == SD_SEND_RELATIVE_ADDR) ++ resp = (mmc_cmd_type(cmd) == MMC_CMD_BCR) ? RESP_R6 : RESP_R1; ++ else if (opcode == MMC_FAST_IO) ++ resp = RESP_R4; ++ else if (opcode == MMC_GO_IRQ_STATE) ++ resp = RESP_R5; ++ else if (opcode == MMC_SELECT_CARD) ++ resp = (cmd->arg != 0) ? RESP_R1B : RESP_NONE; ++ else if (opcode == SD_IO_RW_DIRECT || opcode == SD_IO_RW_EXTENDED) ++ resp = RESP_R1; ++ else if (opcode == SD_SEND_IF_COND && (mmc_cmd_type(cmd) == MMC_CMD_BCR)) ++ resp = RESP_R1; ++ else { ++ switch (mmc_resp_type(cmd)) { ++ case MMC_RSP_R1: ++ resp = RESP_R1; ++ break; ++ case MMC_RSP_R1B: ++ resp = RESP_R1B; ++ break; ++ case MMC_RSP_R2: ++ resp = RESP_R2; ++ break; ++ case MMC_RSP_R3: ++ resp = RESP_R3; ++ break; ++ case MMC_RSP_NONE: ++ default: ++ resp = RESP_NONE; ++ break; ++ } ++ } ++ ++ cmd->error = 0; ++ rawcmd = opcode | msdc_rsp[resp] << 7 | host->blksz << 16; ++ ++ if (opcode == MMC_READ_MULTIPLE_BLOCK) { ++ rawcmd |= (2 << 11); ++ } else if (opcode == MMC_READ_SINGLE_BLOCK) { ++ rawcmd |= (1 << 11); ++ } else if (opcode == MMC_WRITE_MULTIPLE_BLOCK) { ++ rawcmd |= ((2 << 11) | (1 << 13)); ++ } else if (opcode == MMC_WRITE_BLOCK) { ++ rawcmd |= ((1 << 11) | (1 << 13)); ++ } else if (opcode == SD_IO_RW_EXTENDED) { ++ if (cmd->data->flags & MMC_DATA_WRITE) ++ rawcmd |= (1 << 13); ++ if (cmd->data->blocks > 1) ++ rawcmd |= (2 << 11); ++ else ++ rawcmd |= (1 << 11); ++ } else if (opcode == SD_IO_RW_DIRECT && cmd->flags == (unsigned int)-1) { ++ rawcmd |= (1 << 14); ++ } else if ((opcode == SD_APP_SEND_SCR) || ++ (opcode == SD_APP_SEND_NUM_WR_BLKS) || ++ (opcode == SD_SWITCH && (mmc_cmd_type(cmd) == MMC_CMD_ADTC)) || ++ (opcode == SD_APP_SD_STATUS && (mmc_cmd_type(cmd) == MMC_CMD_ADTC)) || ++ (opcode == MMC_SEND_EXT_CSD && (mmc_cmd_type(cmd) == MMC_CMD_ADTC))) { ++ rawcmd |= (1 << 11); ++ } else if (opcode == MMC_STOP_TRANSMISSION) { ++ rawcmd |= (1 << 14); ++ rawcmd &= ~(0x0FFF << 16); ++ } ++ ++// printk("CMD<%d><0x%.8x> Arg<0x%.8x>\n", opcode , rawcmd, cmd->arg); ++ ++ tmo = jiffies + timeout; ++ ++ if (opcode == MMC_SEND_STATUS) { ++ for (;;) { ++ if (!sdc_is_cmd_busy()) ++ break; ++ ++ if (time_after(jiffies, tmo)) { ++ //printk("XXX cmd_busy timeout: before CMD<%d>\n", opcode); ++ cmd->error = (unsigned int)-ETIMEDOUT; ++ msdc_reset(); ++ goto end; ++ } ++ } ++ } else { ++ for (;;) { ++ if (!sdc_is_busy()) ++ break; ++ if (time_after(jiffies, tmo)) { ++ //printk("XXX sdc_busy timeout: before CMD<%d>\n", opcode); ++ cmd->error = (unsigned int)-ETIMEDOUT; ++ msdc_reset(); ++ goto end; ++ } ++ } ++ } ++ ++ //BUG_ON(in_interrupt()); ++ host->cmd = cmd; ++ host->cmd_rsp = resp; ++ init_completion(&host->cmd_done); ++ sdr_set_bits(MSDC_INTEN, wints); ++ sdc_send_cmd(rawcmd, cmd->arg); ++ ++end: ++ return cmd->error; ++} ++ ++static unsigned int msdc_command_resp(struct msdc_host *host, struct mmc_command *cmd, ++ int tune, unsigned long timeout) ++{ ++ u32 base = host->base; ++ //u32 opcode = cmd->opcode; ++ u32 resp; ++ u32 wints = MSDC_INT_CMDRDY | MSDC_INT_RSPCRCERR | MSDC_INT_CMDTMO | ++ MSDC_INT_ACMDRDY | MSDC_INT_ACMDCRCERR | MSDC_INT_ACMDTMO | ++ MSDC_INT_ACMD19_DONE; ++ ++ resp = host->cmd_rsp; ++ ++ BUG_ON(in_interrupt()); ++ spin_unlock(&host->lock); ++ if (!wait_for_completion_timeout(&host->cmd_done, 10*timeout)) { ++ //printk("XXX CMD<%d> wait_for_completion timeout ARG<0x%.8x>\n", opcode, cmd->arg); ++ cmd->error = (unsigned int)-ETIMEDOUT; ++ msdc_reset(); ++ } ++ spin_lock(&host->lock); ++ ++ sdr_clr_bits(MSDC_INTEN, wints); ++ host->cmd = NULL; ++ ++ if (!tune) ++ return cmd->error; ++ ++ /* memory card CRC */ ++ if (host->hw->flags & MSDC_REMOVABLE && cmd->error == (unsigned int)(-EIO) ) { ++ if (sdr_read32(SDC_CMD) & 0x1800) { ++ msdc_abort_data(host); ++ } else { ++ msdc_reset(); ++ msdc_clr_fifo(); ++ msdc_clr_int(); ++ } ++ cmd->error = msdc_tune_cmdrsp(host,cmd); ++ } ++ ++ return cmd->error; ++} ++ ++static unsigned int msdc_do_command(struct msdc_host *host, struct mmc_command *cmd, ++ int tune, unsigned long timeout) ++{ ++ if (!msdc_command_start(host, cmd, tune, timeout)) ++ msdc_command_resp(host, cmd, tune, timeout); ++ ++ //printk(" return<%d> resp<0x%.8x>\n", cmd->error, cmd->resp[0]); ++ return cmd->error; ++} ++ ++static int msdc_pio_abort(struct msdc_host *host, struct mmc_data *data, unsigned long tmo) ++{ ++ u32 base = host->base; ++ int ret = 0; ++ ++ if (atomic_read(&host->abort)) ++ ret = 1; ++ ++ if (time_after(jiffies, tmo)) { ++ data->error = (unsigned int)-ETIMEDOUT; ++ //printk("XXX PIO Data Timeout: CMD<%d>\n", host->mrq->cmd->opcode); ++ ret = 1; ++ } ++ ++ if (ret) { ++ msdc_reset(); ++ msdc_clr_fifo(); ++ msdc_clr_int(); ++ //printk("msdc pio find abort\n"); ++ } ++ ++ return ret; ++} ++ ++static int msdc_pio_read(struct msdc_host *host, struct mmc_data *data) ++{ ++ struct scatterlist *sg = data->sg; ++ u32 base = host->base; ++ u32 num = data->sg_len; ++ u32 *ptr; ++ u8 *u8ptr; ++ u32 left; ++ u32 count, size = 0; ++ u32 wints = MSDC_INTEN_DATTMO | MSDC_INTEN_DATCRCERR; ++ unsigned long tmo = jiffies + DAT_TIMEOUT; ++ ++ sdr_set_bits(MSDC_INTEN, wints); ++ while (num) { ++ left = sg_dma_len(sg); ++ ptr = sg_virt(sg); ++ while (left) { ++ if ((left >= MSDC_FIFO_THD) && (msdc_rxfifocnt() >= MSDC_FIFO_THD)) { ++ count = MSDC_FIFO_THD >> 2; ++ do { ++ *ptr++ = msdc_fifo_read32(); ++ } while (--count); ++ left -= MSDC_FIFO_THD; ++ } else if ((left < MSDC_FIFO_THD) && msdc_rxfifocnt() >= left) { ++ while (left > 3) { ++ *ptr++ = msdc_fifo_read32(); ++ left -= 4; ++ } ++ ++ u8ptr = (u8 *)ptr; ++ while(left) { ++ * u8ptr++ = msdc_fifo_read8(); ++ left--; ++ } ++ } ++ ++ if (msdc_pio_abort(host, data, tmo)) ++ goto end; ++ } ++ size += sg_dma_len(sg); ++ sg = sg_next(sg); num--; ++ } ++end: ++ data->bytes_xfered += size; ++ //printk(" PIO Read<%d>bytes\n", size); ++ ++ sdr_clr_bits(MSDC_INTEN, wints); ++ if(data->error) ++ printk("read pio data->error<%d> left<%d> size<%d>\n", data->error, left, size); ++ ++ return data->error; ++} ++ ++static int msdc_pio_write(struct msdc_host* host, struct mmc_data *data) ++{ ++ u32 base = host->base; ++ struct scatterlist *sg = data->sg; ++ u32 num = data->sg_len; ++ u32 *ptr; ++ u8 *u8ptr; ++ u32 left; ++ u32 count, size = 0; ++ u32 wints = MSDC_INTEN_DATTMO | MSDC_INTEN_DATCRCERR; ++ unsigned long tmo = jiffies + DAT_TIMEOUT; ++ ++ sdr_set_bits(MSDC_INTEN, wints); ++ while (num) { ++ left = sg_dma_len(sg); ++ ptr = sg_virt(sg); ++ ++ while (left) { ++ if (left >= MSDC_FIFO_SZ && msdc_txfifocnt() == 0) { ++ count = MSDC_FIFO_SZ >> 2; ++ do { ++ msdc_fifo_write32(*ptr); ptr++; ++ } while (--count); ++ left -= MSDC_FIFO_SZ; ++ } else if (left < MSDC_FIFO_SZ && msdc_txfifocnt() == 0) { ++ while (left > 3) { ++ msdc_fifo_write32(*ptr); ptr++; ++ left -= 4; ++ } ++ ++ u8ptr = (u8*)ptr; ++ while( left) { ++ msdc_fifo_write8(*u8ptr); ++ u8ptr++; ++ left--; ++ } ++ } ++ ++ if (msdc_pio_abort(host, data, tmo)) ++ goto end; ++ } ++ size += sg_dma_len(sg); ++ sg = sg_next(sg); num--; ++ } ++end: ++ data->bytes_xfered += size; ++ //printk(" PIO Write<%d>bytes\n", size); ++ if(data->error) ++ printk("write pio data->error<%d>\n", data->error); ++ ++ sdr_clr_bits(MSDC_INTEN, wints); ++ ++ return data->error; ++} ++ ++static void msdc_dma_start(struct msdc_host *host) ++{ ++ u32 base = host->base; ++ u32 wints = MSDC_INTEN_XFER_COMPL | MSDC_INTEN_DATTMO | MSDC_INTEN_DATCRCERR; ++ ++ sdr_set_bits(MSDC_INTEN, wints); ++ sdr_set_field(MSDC_DMA_CTRL, MSDC_DMA_CTRL_START, 1); ++ ++ //printk("DMA start\n"); ++} ++ ++static void msdc_dma_stop(struct msdc_host *host) ++{ ++ u32 base = host->base; ++ u32 wints = MSDC_INTEN_XFER_COMPL | MSDC_INTEN_DATTMO | MSDC_INTEN_DATCRCERR; ++ ++ //printk("DMA status: 0x%.8x\n",sdr_read32(MSDC_DMA_CFG)); ++ ++ sdr_set_field(MSDC_DMA_CTRL, MSDC_DMA_CTRL_STOP, 1); ++ while (sdr_read32(MSDC_DMA_CFG) & MSDC_DMA_CFG_STS); ++ sdr_clr_bits(MSDC_INTEN, wints); /* Not just xfer_comp */ ++ ++ //printk("DMA stop\n"); ++} ++ ++static u8 msdc_dma_calcs(u8 *buf, u32 len) ++{ ++ u32 i, sum = 0; ++ ++ for (i = 0; i < len; i++) ++ sum += buf[i]; ++ ++ return 0xFF - (u8)sum; ++} ++ ++static int msdc_dma_config(struct msdc_host *host, struct msdc_dma *dma) ++{ ++ u32 base = host->base; ++ u32 sglen = dma->sglen; ++ u32 j, num, bdlen; ++ u8 blkpad, dwpad, chksum; ++ struct scatterlist *sg = dma->sg; ++ gpd_t *gpd; ++ bd_t *bd; ++ ++ switch (dma->mode) { ++ case MSDC_MODE_DMA_BASIC: ++ BUG_ON(dma->xfersz > 65535); ++ BUG_ON(dma->sglen != 1); ++ sdr_write32(MSDC_DMA_SA, PHYSADDR(sg_dma_address(sg))); ++ sdr_set_field(MSDC_DMA_CTRL, MSDC_DMA_CTRL_LASTBUF, 1); ++ sdr_set_field(MSDC_DMA_CTRL, MSDC_DMA_CTRL_XFERSZ, sg_dma_len(sg)); ++ sdr_set_field(MSDC_DMA_CTRL, MSDC_DMA_CTRL_BRUSTSZ, dma->burstsz); ++ sdr_set_field(MSDC_DMA_CTRL, MSDC_DMA_CTRL_MODE, 0); ++ break; ++ ++ case MSDC_MODE_DMA_DESC: ++ blkpad = (dma->flags & DMA_FLAG_PAD_BLOCK) ? 1 : 0; ++ dwpad = (dma->flags & DMA_FLAG_PAD_DWORD) ? 1 : 0; ++ chksum = (dma->flags & DMA_FLAG_EN_CHKSUM) ? 1 : 0; ++ ++ num = (sglen + MAX_BD_PER_GPD - 1) / MAX_BD_PER_GPD; ++ BUG_ON(num !=1 ); ++ ++ gpd = dma->gpd; ++ bd = dma->bd; ++ bdlen = sglen; ++ ++ gpd->hwo = 1; /* hw will clear it */ ++ gpd->bdp = 1; ++ gpd->chksum = 0; /* need to clear first. */ ++ gpd->chksum = (chksum ? msdc_dma_calcs((u8 *)gpd, 16) : 0); ++ ++ for (j = 0; j < bdlen; j++) { ++ msdc_init_bd(&bd[j], blkpad, dwpad, sg_dma_address(sg), sg_dma_len(sg)); ++ if( j == bdlen - 1) ++ bd[j].eol = 1; ++ else ++ bd[j].eol = 0; ++ bd[j].chksum = 0; /* checksume need to clear first */ ++ bd[j].chksum = (chksum ? msdc_dma_calcs((u8 *)(&bd[j]), 16) : 0); ++ sg++; ++ } ++ ++ dma->used_gpd += 2; ++ dma->used_bd += bdlen; ++ ++ sdr_set_field(MSDC_DMA_CFG, MSDC_DMA_CFG_DECSEN, chksum); ++ sdr_set_field(MSDC_DMA_CTRL, MSDC_DMA_CTRL_BRUSTSZ, dma->burstsz); ++ sdr_set_field(MSDC_DMA_CTRL, MSDC_DMA_CTRL_MODE, 1); ++ sdr_write32(MSDC_DMA_SA, PHYSADDR((u32)dma->gpd_addr)); ++ break; ++ } ++ ++// printk("DMA_CTRL = 0x%x\n", sdr_read32(MSDC_DMA_CTRL)); ++// printk("DMA_CFG = 0x%x\n", sdr_read32(MSDC_DMA_CFG)); ++// printk("DMA_SA = 0x%x\n", sdr_read32(MSDC_DMA_SA)); ++ ++ return 0; ++} ++ ++static void msdc_dma_setup(struct msdc_host *host, struct msdc_dma *dma, ++ struct scatterlist *sg, unsigned int sglen) ++{ ++ BUG_ON(sglen > MAX_BD_NUM); ++ ++ dma->sg = sg; ++ dma->flags = DMA_FLAG_EN_CHKSUM; ++ dma->sglen = sglen; ++ dma->xfersz = host->xfer_size; ++ dma->burstsz = MSDC_BRUST_64B; ++ ++ if (sglen == 1 && sg_dma_len(sg) <= MAX_DMA_CNT) ++ dma->mode = MSDC_MODE_DMA_BASIC; ++ else ++ dma->mode = MSDC_MODE_DMA_DESC; ++ ++// printk("DMA mode<%d> sglen<%d> xfersz<%d>\n", dma->mode, dma->sglen, dma->xfersz); ++ ++ msdc_dma_config(host, dma); ++} ++ ++static void msdc_set_blknum(struct msdc_host *host, u32 blknum) ++{ ++ u32 base = host->base; ++ ++ sdr_write32(SDC_BLK_NUM, blknum); ++} ++ ++static int msdc_do_request(struct mmc_host*mmc, struct mmc_request*mrq) ++{ ++ struct msdc_host *host = mmc_priv(mmc); ++ struct mmc_command *cmd; ++ struct mmc_data *data; ++ u32 base = host->base; ++ unsigned int left=0; ++ int dma = 0, read = 1, dir = DMA_FROM_DEVICE, send_type=0; ++ ++#define SND_DAT 0 ++#define SND_CMD 1 ++ ++ BUG_ON(mmc == NULL); ++ BUG_ON(mrq == NULL); ++ ++ host->error = 0; ++ atomic_set(&host->abort, 0); ++ ++ cmd = mrq->cmd; ++ data = mrq->cmd->data; ++ ++ if (!data) { ++ send_type = SND_CMD; ++ if (msdc_do_command(host, cmd, 1, CMD_TIMEOUT) != 0) ++ goto done; ++ } else { ++ BUG_ON(data->blksz > HOST_MAX_BLKSZ); ++ send_type=SND_DAT; ++ ++ data->error = 0; ++ read = data->flags & MMC_DATA_READ ? 1 : 0; ++ host->data = data; ++ host->xfer_size = data->blocks * data->blksz; ++ host->blksz = data->blksz; ++ ++ host->dma_xfer = dma = ((host->xfer_size >= 512) ? 1 : 0); ++ ++ if (read) ++ if ((host->timeout_ns != data->timeout_ns) || ++ (host->timeout_clks != data->timeout_clks)) ++ msdc_set_timeout(host, data->timeout_ns, data->timeout_clks); ++ ++ msdc_set_blknum(host, data->blocks); ++ ++ if (dma) { ++ msdc_dma_on(); ++ init_completion(&host->xfer_done); ++ ++ if (msdc_command_start(host, cmd, 1, CMD_TIMEOUT) != 0) ++ goto done; ++ ++ dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE; ++ dma_map_sg(mmc_dev(mmc), data->sg, data->sg_len, dir); ++ msdc_dma_setup(host, &host->dma, data->sg, data->sg_len); ++ ++ if (msdc_command_resp(host, cmd, 1, CMD_TIMEOUT) != 0) ++ goto done; ++ ++ msdc_dma_start(host); ++ ++ spin_unlock(&host->lock); ++ if (!wait_for_completion_timeout(&host->xfer_done, DAT_TIMEOUT)) { ++ /*printk("XXX CMD<%d> wait xfer_done<%d> timeout!!\n", cmd->opcode, data->blocks * data->blksz); ++ printk(" DMA_SA = 0x%x\n", sdr_read32(MSDC_DMA_SA)); ++ printk(" DMA_CA = 0x%x\n", sdr_read32(MSDC_DMA_CA)); ++ printk(" DMA_CTRL = 0x%x\n", sdr_read32(MSDC_DMA_CTRL)); ++ printk(" DMA_CFG = 0x%x\n", sdr_read32(MSDC_DMA_CFG));*/ ++ data->error = (unsigned int)-ETIMEDOUT; ++ ++ msdc_reset(); ++ msdc_clr_fifo(); ++ msdc_clr_int(); ++ } ++ spin_lock(&host->lock); ++ msdc_dma_stop(host); ++ } else { ++ if (msdc_do_command(host, cmd, 1, CMD_TIMEOUT) != 0) ++ goto done; ++ ++ if (read) { ++ if (msdc_pio_read(host, data)) ++ goto done; ++ } else { ++ if (msdc_pio_write(host, data)) ++ goto done; ++ } ++ ++ if (!read) { ++ while (1) { ++ left = msdc_txfifocnt(); ++ if (left == 0) { ++ break; ++ } ++ if (msdc_pio_abort(host, data, jiffies + DAT_TIMEOUT)) { ++ break; ++ /* Fix me: what about if data error, when stop ? how to? */ ++ } ++ } ++ } else { ++ /* Fix me: read case: need to check CRC error */ ++ } ++ ++ /* For write case: SDCBUSY and Xfer_Comp will assert when DAT0 not busy. ++ For read case : SDCBUSY and Xfer_Comp will assert when last byte read out from FIFO. ++ */ ++ ++ /* try not to wait xfer_comp interrupt. ++ the next command will check SDC_BUSY. ++ SDC_BUSY means xfer_comp assert ++ */ ++ ++ } // PIO mode ++ ++ /* Last: stop transfer */ ++ if (data->stop){ ++ if (msdc_do_command(host, data->stop, 0, CMD_TIMEOUT) != 0) { ++ goto done; ++ } ++ } ++ } ++ ++done: ++ if (data != NULL) { ++ host->data = NULL; ++ host->dma_xfer = 0; ++ if (dma != 0) { ++ msdc_dma_off(); ++ host->dma.used_bd = 0; ++ host->dma.used_gpd = 0; ++ dma_unmap_sg(mmc_dev(mmc), data->sg, data->sg_len, dir); ++ } ++ host->blksz = 0; ++ ++ // printk("CMD<%d> data<%s %s> blksz<%d> block<%d> error<%d>",cmd->opcode, (dma? "dma":"pio\n"), ++ // (read ? "read ":"write") ,data->blksz, data->blocks, data->error); ++ } ++ ++ if (mrq->cmd->error) host->error = 0x001; ++ if (mrq->data && mrq->data->error) host->error |= 0x010; ++ if (mrq->stop && mrq->stop->error) host->error |= 0x100; ++ ++ //if (host->error) printk("host->error<%d>\n", host->error); ++ ++ return host->error; ++} ++ ++static int msdc_app_cmd(struct mmc_host *mmc, struct msdc_host *host) ++{ ++ struct mmc_command cmd; ++ struct mmc_request mrq; ++ u32 err; ++ ++ memset(&cmd, 0, sizeof(struct mmc_command)); ++ cmd.opcode = MMC_APP_CMD; ++#if 0 /* bug: we meet mmc->card is null when ACMD6 */ ++ cmd.arg = mmc->card->rca << 16; ++#else ++ cmd.arg = host->app_cmd_arg; ++#endif ++ cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; ++ ++ memset(&mrq, 0, sizeof(struct mmc_request)); ++ mrq.cmd = &cmd; cmd.mrq = &mrq; ++ cmd.data = NULL; ++ ++ err = msdc_do_command(host, &cmd, 0, CMD_TIMEOUT); ++ return err; ++} ++ ++static int msdc_tune_cmdrsp(struct msdc_host*host, struct mmc_command *cmd) ++{ ++ int result = -1; ++ u32 base = host->base; ++ u32 rsmpl, cur_rsmpl, orig_rsmpl; ++ u32 rrdly, cur_rrdly = 0, orig_rrdly; ++ u32 skip = 1; ++ ++ /* ==== don't support 3.0 now ==== ++ 1: R_SMPL[1] ++ 2: PAD_CMD_RESP_RXDLY[26:22] ++ ==========================*/ ++ ++ // save the previous tune result ++ sdr_get_field(MSDC_IOCON, MSDC_IOCON_RSPL, orig_rsmpl); ++ sdr_get_field(MSDC_PAD_TUNE, MSDC_PAD_TUNE_CMDRRDLY, orig_rrdly); ++ ++ rrdly = 0; ++ do { ++ for (rsmpl = 0; rsmpl < 2; rsmpl++) { ++ /* Lv1: R_SMPL[1] */ ++ cur_rsmpl = (orig_rsmpl + rsmpl) % 2; ++ if (skip == 1) { ++ skip = 0; ++ continue; ++ } ++ sdr_set_field(MSDC_IOCON, MSDC_IOCON_RSPL, cur_rsmpl); ++ ++ if (host->app_cmd) { ++ result = msdc_app_cmd(host->mmc, host); ++ if (result) { ++ //printk("TUNE_CMD app_cmd<%d> failed: RESP_RXDLY<%d>,R_SMPL<%d>\n", ++ // host->mrq->cmd->opcode, cur_rrdly, cur_rsmpl); ++ continue; ++ } ++ } ++ result = msdc_do_command(host, cmd, 0, CMD_TIMEOUT); // not tune. ++ //printk("TUNE_CMD<%d> %s PAD_CMD_RESP_RXDLY[26:22]<%d> R_SMPL[1]<%d>\n", cmd->opcode, ++// (result == 0) ? "PASS" : "FAIL", cur_rrdly, cur_rsmpl); ++ ++ if (result == 0) { ++ return 0; ++ } ++ if (result != (unsigned int)(-EIO)) { ++ // printk("TUNE_CMD<%d> Error<%d> not -EIO\n", cmd->opcode, result); ++ return result; ++ } ++ ++ /* should be EIO */ ++ if (sdr_read32(SDC_CMD) & 0x1800) { /* check if has data phase */ ++ msdc_abort_data(host); ++ } ++ } ++ ++ /* Lv2: PAD_CMD_RESP_RXDLY[26:22] */ ++ cur_rrdly = (orig_rrdly + rrdly + 1) % 32; ++ sdr_set_field(MSDC_PAD_TUNE, MSDC_PAD_TUNE_CMDRRDLY, cur_rrdly); ++ }while (++rrdly < 32); ++ ++ return result; ++} ++ ++/* Support SD2.0 Only */ ++static int msdc_tune_bread(struct mmc_host *mmc, struct mmc_request *mrq) ++{ ++ struct msdc_host *host = mmc_priv(mmc); ++ u32 base = host->base; ++ u32 ddr=0; ++ u32 dcrc = 0; ++ u32 rxdly, cur_rxdly0, cur_rxdly1; ++ u32 dsmpl, cur_dsmpl, orig_dsmpl; ++ u32 cur_dat0, cur_dat1, cur_dat2, cur_dat3; ++ u32 cur_dat4, cur_dat5, cur_dat6, cur_dat7; ++ u32 orig_dat0, orig_dat1, orig_dat2, orig_dat3; ++ u32 orig_dat4, orig_dat5, orig_dat6, orig_dat7; ++ int result = -1; ++ u32 skip = 1; ++ ++ sdr_get_field(MSDC_IOCON, MSDC_IOCON_DSPL, orig_dsmpl); ++ ++ /* Tune Method 2. */ ++ sdr_set_field(MSDC_IOCON, MSDC_IOCON_DDLSEL, 1); ++ ++ rxdly = 0; ++ do { ++ for (dsmpl = 0; dsmpl < 2; dsmpl++) { ++ cur_dsmpl = (orig_dsmpl + dsmpl) % 2; ++ if (skip == 1) { ++ skip = 0; ++ continue; ++ } ++ sdr_set_field(MSDC_IOCON, MSDC_IOCON_DSPL, cur_dsmpl); ++ ++ if (host->app_cmd) { ++ result = msdc_app_cmd(host->mmc, host); ++ if (result) { ++ //printk("TUNE_BREAD app_cmd<%d> failed\n", host->mrq->cmd->opcode); ++ continue; ++ } ++ } ++ result = msdc_do_request(mmc,mrq); ++ ++ sdr_get_field(SDC_DCRC_STS, SDC_DCRC_STS_POS|SDC_DCRC_STS_NEG, dcrc); /* RO */ ++ if (!ddr) dcrc &= ~SDC_DCRC_STS_NEG; ++ //printk("TUNE_BREAD<%s> dcrc<0x%x> DATRDDLY0/1<0x%x><0x%x> dsmpl<0x%x>\n", ++ // (result == 0 && dcrc == 0) ? "PASS" : "FAIL", dcrc, ++ // sdr_read32(MSDC_DAT_RDDLY0), sdr_read32(MSDC_DAT_RDDLY1), cur_dsmpl); ++ ++ /* Fix me: result is 0, but dcrc is still exist */ ++ if (result == 0 && dcrc == 0) { ++ goto done; ++ } else { ++ /* there is a case: command timeout, and data phase not processed */ ++ if (mrq->data->error != 0 && mrq->data->error != (unsigned int)(-EIO)) { ++ //printk("TUNE_READ: result<0x%x> cmd_error<%d> data_error<%d>\n", ++ // result, mrq->cmd->error, mrq->data->error); ++ goto done; ++ } ++ } ++ } ++ ++ cur_rxdly0 = sdr_read32(MSDC_DAT_RDDLY0); ++ cur_rxdly1 = sdr_read32(MSDC_DAT_RDDLY1); ++ ++ /* E1 ECO. YD: Reverse */ ++ if (sdr_read32(MSDC_ECO_VER) >= 4) { ++ orig_dat0 = (cur_rxdly0 >> 24) & 0x1F; ++ orig_dat1 = (cur_rxdly0 >> 16) & 0x1F; ++ orig_dat2 = (cur_rxdly0 >> 8) & 0x1F; ++ orig_dat3 = (cur_rxdly0 >> 0) & 0x1F; ++ orig_dat4 = (cur_rxdly1 >> 24) & 0x1F; ++ orig_dat5 = (cur_rxdly1 >> 16) & 0x1F; ++ orig_dat6 = (cur_rxdly1 >> 8) & 0x1F; ++ orig_dat7 = (cur_rxdly1 >> 0) & 0x1F; ++ } else { ++ orig_dat0 = (cur_rxdly0 >> 0) & 0x1F; ++ orig_dat1 = (cur_rxdly0 >> 8) & 0x1F; ++ orig_dat2 = (cur_rxdly0 >> 16) & 0x1F; ++ orig_dat3 = (cur_rxdly0 >> 24) & 0x1F; ++ orig_dat4 = (cur_rxdly1 >> 0) & 0x1F; ++ orig_dat5 = (cur_rxdly1 >> 8) & 0x1F; ++ orig_dat6 = (cur_rxdly1 >> 16) & 0x1F; ++ orig_dat7 = (cur_rxdly1 >> 24) & 0x1F; ++ } ++ ++ if (ddr) { ++ cur_dat0 = (dcrc & (1 << 0) || dcrc & (1 << 8)) ? ((orig_dat0 + 1) % 32) : orig_dat0; ++ cur_dat1 = (dcrc & (1 << 1) || dcrc & (1 << 9)) ? ((orig_dat1 + 1) % 32) : orig_dat1; ++ cur_dat2 = (dcrc & (1 << 2) || dcrc & (1 << 10)) ? ((orig_dat2 + 1) % 32) : orig_dat2; ++ cur_dat3 = (dcrc & (1 << 3) || dcrc & (1 << 11)) ? ((orig_dat3 + 1) % 32) : orig_dat3; ++ } else { ++ cur_dat0 = (dcrc & (1 << 0)) ? ((orig_dat0 + 1) % 32) : orig_dat0; ++ cur_dat1 = (dcrc & (1 << 1)) ? ((orig_dat1 + 1) % 32) : orig_dat1; ++ cur_dat2 = (dcrc & (1 << 2)) ? ((orig_dat2 + 1) % 32) : orig_dat2; ++ cur_dat3 = (dcrc & (1 << 3)) ? ((orig_dat3 + 1) % 32) : orig_dat3; ++ } ++ cur_dat4 = (dcrc & (1 << 4)) ? ((orig_dat4 + 1) % 32) : orig_dat4; ++ cur_dat5 = (dcrc & (1 << 5)) ? ((orig_dat5 + 1) % 32) : orig_dat5; ++ cur_dat6 = (dcrc & (1 << 6)) ? ((orig_dat6 + 1) % 32) : orig_dat6; ++ cur_dat7 = (dcrc & (1 << 7)) ? ((orig_dat7 + 1) % 32) : orig_dat7; ++ ++ cur_rxdly0 = (cur_dat0 << 24) | (cur_dat1 << 16) | (cur_dat2 << 8) | (cur_dat3 << 0); ++ cur_rxdly1 = (cur_dat4 << 24) | (cur_dat5 << 16) | (cur_dat6 << 8) | (cur_dat7 << 0); ++ ++ sdr_write32(MSDC_DAT_RDDLY0, cur_rxdly0); ++ sdr_write32(MSDC_DAT_RDDLY1, cur_rxdly1); ++ ++ } while (++rxdly < 32); ++ ++done: ++ return result; ++} ++ ++static int msdc_tune_bwrite(struct mmc_host *mmc,struct mmc_request *mrq) ++{ ++ struct msdc_host *host = mmc_priv(mmc); ++ u32 base = host->base; ++ ++ u32 wrrdly, cur_wrrdly = 0, orig_wrrdly; ++ u32 dsmpl, cur_dsmpl, orig_dsmpl; ++ u32 rxdly, cur_rxdly0; ++ u32 orig_dat0, orig_dat1, orig_dat2, orig_dat3; ++ u32 cur_dat0, cur_dat1, cur_dat2, cur_dat3; ++ int result = -1; ++ u32 skip = 1; ++ ++ // MSDC_IOCON_DDR50CKD need to check. [Fix me] ++ ++ sdr_get_field(MSDC_PAD_TUNE, MSDC_PAD_TUNE_DATWRDLY, orig_wrrdly); ++ sdr_get_field(MSDC_IOCON, MSDC_IOCON_DSPL, orig_dsmpl ); ++ ++ /* Tune Method 2. just DAT0 */ ++ sdr_set_field(MSDC_IOCON, MSDC_IOCON_DDLSEL, 1); ++ cur_rxdly0 = sdr_read32(MSDC_DAT_RDDLY0); ++ ++ /* E1 ECO. YD: Reverse */ ++ if (sdr_read32(MSDC_ECO_VER) >= 4) { ++ orig_dat0 = (cur_rxdly0 >> 24) & 0x1F; ++ orig_dat1 = (cur_rxdly0 >> 16) & 0x1F; ++ orig_dat2 = (cur_rxdly0 >> 8) & 0x1F; ++ orig_dat3 = (cur_rxdly0 >> 0) & 0x1F; ++ } else { ++ orig_dat0 = (cur_rxdly0 >> 0) & 0x1F; ++ orig_dat1 = (cur_rxdly0 >> 8) & 0x1F; ++ orig_dat2 = (cur_rxdly0 >> 16) & 0x1F; ++ orig_dat3 = (cur_rxdly0 >> 24) & 0x1F; ++ } ++ ++ rxdly = 0; ++ do { ++ wrrdly = 0; ++ do { ++ for (dsmpl = 0; dsmpl < 2; dsmpl++) { ++ cur_dsmpl = (orig_dsmpl + dsmpl) % 2; ++ if (skip == 1) { ++ skip = 0; ++ continue; ++ } ++ sdr_set_field(MSDC_IOCON, MSDC_IOCON_DSPL, cur_dsmpl); ++ ++ if (host->app_cmd) { ++ result = msdc_app_cmd(host->mmc, host); ++ if (result) { ++ //printk("TUNE_BWRITE app_cmd<%d> failed\n", host->mrq->cmd->opcode); ++ continue; ++ } ++ } ++ result = msdc_do_request(mmc,mrq); ++ ++ //printk("TUNE_BWRITE<%s> DSPL<%d> DATWRDLY<%d> MSDC_DAT_RDDLY0<0x%x>\n", ++ // result == 0 ? "PASS" : "FAIL", ++ // cur_dsmpl, cur_wrrdly, cur_rxdly0); ++ ++ if (result == 0) { ++ goto done; ++ } ++ else { ++ /* there is a case: command timeout, and data phase not processed */ ++ if (mrq->data->error != (unsigned int)(-EIO)) { ++ //printk("TUNE_READ: result<0x%x> cmd_error<%d> data_error<%d>\n", ++ // && result, mrq->cmd->error, mrq->data->error); ++ goto done; ++ } ++ } ++ } ++ cur_wrrdly = (orig_wrrdly + wrrdly + 1) % 32; ++ sdr_set_field(MSDC_PAD_TUNE, MSDC_PAD_TUNE_DATWRDLY, cur_wrrdly); ++ } while (++wrrdly < 32); ++ ++ cur_dat0 = (orig_dat0 + rxdly) % 32; /* only adjust bit-1 for crc */ ++ cur_dat1 = orig_dat1; ++ cur_dat2 = orig_dat2; ++ cur_dat3 = orig_dat3; ++ ++ cur_rxdly0 = (cur_dat0 << 24) | (cur_dat1 << 16) | (cur_dat2 << 8) | (cur_dat3 << 0); ++ sdr_write32(MSDC_DAT_RDDLY0, cur_rxdly0); ++ } while (++rxdly < 32); ++ ++done: ++ return result; ++} ++ ++static int msdc_get_card_status(struct mmc_host *mmc, struct msdc_host *host, u32 *status) ++{ ++ struct mmc_command cmd; ++ struct mmc_request mrq; ++ u32 err; ++ ++ memset(&cmd, 0, sizeof(struct mmc_command)); ++ cmd.opcode = MMC_SEND_STATUS; ++ if (mmc->card) { ++ cmd.arg = mmc->card->rca << 16; ++ } else { ++ //printk("cmd13 mmc card is null\n"); ++ cmd.arg = host->app_cmd_arg; ++ } ++ cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC; ++ ++ memset(&mrq, 0, sizeof(struct mmc_request)); ++ mrq.cmd = &cmd; cmd.mrq = &mrq; ++ cmd.data = NULL; ++ ++ err = msdc_do_command(host, &cmd, 1, CMD_TIMEOUT); ++ ++ if (status) ++ *status = cmd.resp[0]; ++ ++ return err; ++} ++ ++static int msdc_check_busy(struct mmc_host *mmc, struct msdc_host *host) ++{ ++ u32 err = 0; ++ u32 status = 0; ++ ++ do { ++ err = msdc_get_card_status(mmc, host, &status); ++ if (err) ++ return err; ++ /* need cmd12? */ ++ //printk("cmd<13> resp<0x%x>\n", status); ++ } while (R1_CURRENT_STATE(status) == 7); ++ ++ return err; ++} ++ ++static int msdc_tune_request(struct mmc_host *mmc, struct mmc_request *mrq) ++{ ++ struct msdc_host *host = mmc_priv(mmc); ++ struct mmc_command *cmd; ++ struct mmc_data *data; ++ int ret=0, read; ++ ++ cmd = mrq->cmd; ++ data = mrq->cmd->data; ++ ++ read = data->flags & MMC_DATA_READ ? 1 : 0; ++ ++ if (read) { ++ if (data->error == (unsigned int)(-EIO)) ++ ret = msdc_tune_bread(mmc,mrq); ++ } else { ++ ret = msdc_check_busy(mmc, host); ++ if (ret){ ++ //printk("XXX cmd13 wait program done failed\n"); ++ return ret; ++ } ++ /* CRC and TO */ ++ /* Fix me: don't care card status? */ ++ ret = msdc_tune_bwrite(mmc,mrq); ++ } ++ ++ return ret; ++} ++ ++static void msdc_ops_request(struct mmc_host *mmc,struct mmc_request *mrq) ++{ ++ struct msdc_host *host = mmc_priv(mmc); ++ ++ if (host->mrq) { ++ //printk("XXX host->mrq<0x%.8x>\n", (int)host->mrq); ++ BUG(); ++ } ++ if (!is_card_present(host) || host->power_mode == MMC_POWER_OFF) { ++ //printk("cmd<%d> card<%d> power<%d>\n", mrq->cmd->opcode, is_card_present(host), host->power_mode); ++ mrq->cmd->error = (unsigned int)-ENOMEDIUM; ++ mrq->done(mrq); ++ return; ++ } ++ spin_lock(&host->lock); ++ ++ host->mrq = mrq; ++ ++ if (msdc_do_request(mmc,mrq)) ++ if(host->hw->flags & MSDC_REMOVABLE && mrq->data && mrq->data->error) ++ msdc_tune_request(mmc,mrq); ++ ++ if (mrq->cmd->opcode == MMC_APP_CMD) { ++ host->app_cmd = 1; ++ host->app_cmd_arg = mrq->cmd->arg; /* save the RCA */ ++ } else { ++ host->app_cmd = 0; ++ } ++ ++ host->mrq = NULL; ++ ++ spin_unlock(&host->lock); ++ ++ mmc_request_done(mmc, mrq); ++} ++ ++/* called by ops.set_ios */ ++static void msdc_set_buswidth(struct msdc_host *host, u32 width) ++{ ++ u32 base = host->base; ++ u32 val = sdr_read32(SDC_CFG); ++ ++ val &= ~SDC_CFG_BUSWIDTH; ++ ++ switch (width) { ++ default: ++ case MMC_BUS_WIDTH_1: ++ width = 1; ++ val |= (MSDC_BUS_1BITS << 16); ++ break; ++ case MMC_BUS_WIDTH_4: ++ val |= (MSDC_BUS_4BITS << 16); ++ break; ++ case MMC_BUS_WIDTH_8: ++ val |= (MSDC_BUS_8BITS << 16); ++ break; ++ } ++ ++ sdr_write32(SDC_CFG, val); ++ ++ //printk("Bus Width = %d\n", width); ++} ++ ++/* ops.set_ios */ ++static void msdc_ops_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) ++{ ++ struct msdc_host *host = mmc_priv(mmc); ++ struct msdc_hw *hw=host->hw; ++ u32 base = host->base; ++ u32 ddr = 0; ++ ++#ifdef MT6575_SD_DEBUG ++ static char *vdd[] = { ++ "1.50v", "1.55v", "1.60v", "1.65v", "1.70v", "1.80v", "1.90v", ++ "2.00v", "2.10v", "2.20v", "2.30v", "2.40v", "2.50v", "2.60v", ++ "2.70v", "2.80v", "2.90v", "3.00v", "3.10v", "3.20v", "3.30v", ++ "3.40v", "3.50v", "3.60v" ++ }; ++ static char *power_mode[] = { ++ "OFF", "UP", "ON" ++ }; ++ static char *bus_mode[] = { ++ "UNKNOWN", "OPENDRAIN", "PUSHPULL" ++ }; ++ static char *timing[] = { ++ "LEGACY", "MMC_HS", "SD_HS" ++ }; ++ ++ /*printk("SET_IOS: CLK(%dkHz), BUS(%s), BW(%u), PWR(%s), VDD(%s), TIMING(%s)\n", ++ ios->clock / 1000, bus_mode[ios->bus_mode], ++ (ios->bus_width == MMC_BUS_WIDTH_4) ? 4 : 1, ++ power_mode[ios->power_mode], vdd[ios->vdd], timing[ios->timing]);*/ ++#endif ++ ++ msdc_set_buswidth(host, ios->bus_width); ++ ++ /* Power control ??? */ ++ switch (ios->power_mode) { ++ case MMC_POWER_OFF: ++ case MMC_POWER_UP: ++ // msdc_set_power_mode(host, ios->power_mode); /* --- by chhung */ ++ break; ++ case MMC_POWER_ON: ++ host->power_mode = MMC_POWER_ON; ++ break; ++ default: ++ break; ++ } ++ ++ /* Clock control */ ++ if (host->mclk != ios->clock) { ++ if(ios->clock > 25000000) { ++ //printk("SD data latch edge<%d>\n", hw->data_edge); ++ sdr_set_field(MSDC_IOCON, MSDC_IOCON_RSPL, hw->cmd_edge); ++ sdr_set_field(MSDC_IOCON, MSDC_IOCON_DSPL, hw->data_edge); ++ } else { ++ sdr_write32(MSDC_IOCON, 0x00000000); ++ sdr_write32(MSDC_DAT_RDDLY0, 0x10101010); // for MT7620 E2 and afterward ++ sdr_write32(MSDC_DAT_RDDLY1, 0x00000000); ++ sdr_write32(MSDC_PAD_TUNE, 0x84101010); // for MT7620 E2 and afterward ++ } ++ msdc_set_mclk(host, ddr, ios->clock); ++ } ++} ++ ++/* ops.get_ro */ ++static int msdc_ops_get_ro(struct mmc_host *mmc) ++{ ++ struct msdc_host *host = mmc_priv(mmc); ++ u32 base = host->base; ++ unsigned long flags; ++ int ro = 0; ++ ++ if (host->hw->flags & MSDC_WP_PIN_EN) { /* set for card */ ++ spin_lock_irqsave(&host->lock, flags); ++ ro = (sdr_read32(MSDC_PS) >> 31); ++ spin_unlock_irqrestore(&host->lock, flags); ++ } ++ return ro; ++} ++ ++/* ops.get_cd */ ++static int msdc_ops_get_cd(struct mmc_host *mmc) ++{ ++ struct msdc_host *host = mmc_priv(mmc); ++ u32 base = host->base; ++ unsigned long flags; ++ int present = 1; ++ ++ /* for sdio, MSDC_REMOVABLE not set, always return 1 */ ++ if (!(host->hw->flags & MSDC_REMOVABLE)) { ++ /* For sdio, read H/W always get<1>, but may timeout some times */ ++#if 1 ++ host->card_inserted = 1; ++ return 1; ++#else ++ host->card_inserted = (host->pm_state.event == PM_EVENT_USER_RESUME) ? 1 : 0; ++ printk("sdio ops_get_cd<%d>\n", host->card_inserted); ++ return host->card_inserted; ++#endif ++ } ++ ++ /* MSDC_CD_PIN_EN set for card */ ++ if (host->hw->flags & MSDC_CD_PIN_EN) { ++ spin_lock_irqsave(&host->lock, flags); ++#if 0 ++ present = host->card_inserted; /* why not read from H/W: Fix me*/ ++#else ++ present = (sdr_read32(MSDC_PS) & MSDC_PS_CDSTS) ? 0 : 1; ++ host->card_inserted = present; ++#endif ++ spin_unlock_irqrestore(&host->lock, flags); ++ } else { ++ present = 0; /* TODO? Check DAT3 pins for card detection */ ++ } ++ ++ //printk("ops_get_cd return<%d>\n", present); ++ return present; ++} ++ ++/* ops.enable_sdio_irq */ ++static void msdc_ops_enable_sdio_irq(struct mmc_host *mmc, int enable) ++{ ++ struct msdc_host *host = mmc_priv(mmc); ++ struct msdc_hw *hw = host->hw; ++ u32 base = host->base; ++ u32 tmp; ++ ++ if (hw->flags & MSDC_EXT_SDIO_IRQ) { /* yes for sdio */ ++ if (enable) { ++ hw->enable_sdio_eirq(); /* combo_sdio_enable_eirq */ ++ } else { ++ hw->disable_sdio_eirq(); /* combo_sdio_disable_eirq */ ++ } ++ } else { ++ //printk("XXX \n"); /* so never enter here */ ++ tmp = sdr_read32(SDC_CFG); ++ /* FIXME. Need to interrupt gap detection */ ++ if (enable) { ++ tmp |= (SDC_CFG_SDIOIDE | SDC_CFG_SDIOINTWKUP); ++ } else { ++ tmp &= ~(SDC_CFG_SDIOIDE | SDC_CFG_SDIOINTWKUP); ++ } ++ sdr_write32(SDC_CFG, tmp); ++ } ++} ++ ++static struct mmc_host_ops mt_msdc_ops = { ++ .request = msdc_ops_request, ++ .set_ios = msdc_ops_set_ios, ++ .get_ro = msdc_ops_get_ro, ++ .get_cd = msdc_ops_get_cd, ++ .enable_sdio_irq = msdc_ops_enable_sdio_irq, ++}; ++ ++/*--------------------------------------------------------------------------*/ ++/* interrupt handler */ ++/*--------------------------------------------------------------------------*/ ++static irqreturn_t msdc_irq(int irq, void *dev_id) ++{ ++ struct msdc_host *host = (struct msdc_host *)dev_id; ++ struct mmc_data *data = host->data; ++ struct mmc_command *cmd = host->cmd; ++ u32 base = host->base; ++ ++ u32 cmdsts = MSDC_INT_RSPCRCERR | MSDC_INT_CMDTMO | MSDC_INT_CMDRDY | ++ MSDC_INT_ACMDCRCERR | MSDC_INT_ACMDTMO | MSDC_INT_ACMDRDY | ++ MSDC_INT_ACMD19_DONE; ++ u32 datsts = MSDC_INT_DATCRCERR |MSDC_INT_DATTMO; ++ ++ u32 intsts = sdr_read32(MSDC_INT); ++ u32 inten = sdr_read32(MSDC_INTEN); inten &= intsts; ++ ++ sdr_write32(MSDC_INT, intsts); /* clear interrupts */ ++ /* MSG will cause fatal error */ ++ ++ /* card change interrupt */ ++ if (intsts & MSDC_INT_CDSC){ ++ //printk("MSDC_INT_CDSC irq<0x%.8x>\n", intsts); ++ tasklet_hi_schedule(&host->card_tasklet); ++ /* tuning when plug card ? */ ++ } ++ ++ /* sdio interrupt */ ++ if (intsts & MSDC_INT_SDIOIRQ){ ++ //printk("XXX MSDC_INT_SDIOIRQ\n"); /* seems not sdio irq */ ++ //mmc_signal_sdio_irq(host->mmc); ++ } ++ ++ /* transfer complete interrupt */ ++ if (data != NULL) { ++ if (inten & MSDC_INT_XFER_COMPL) { ++ data->bytes_xfered = host->dma.xfersz; ++ complete(&host->xfer_done); ++ } ++ ++ if (intsts & datsts) { ++ /* do basic reset, or stop command will sdc_busy */ ++ msdc_reset(); ++ msdc_clr_fifo(); ++ msdc_clr_int(); ++ atomic_set(&host->abort, 1); /* For PIO mode exit */ ++ ++ if (intsts & MSDC_INT_DATTMO){ ++ //printk("XXX CMD<%d> MSDC_INT_DATTMO\n", host->mrq->cmd->opcode); ++ data->error = (unsigned int)-ETIMEDOUT; ++ } ++ else if (intsts & MSDC_INT_DATCRCERR){ ++ //printk("XXX CMD<%d> MSDC_INT_DATCRCERR, SDC_DCRC_STS<0x%x>\n", host->mrq->cmd->opcode, sdr_read32(SDC_DCRC_STS)); ++ data->error = (unsigned int)-EIO; ++ } ++ ++ //if(sdr_read32(MSDC_INTEN) & MSDC_INT_XFER_COMPL) { ++ if (host->dma_xfer) { ++ complete(&host->xfer_done); /* Read CRC come fast, XFER_COMPL not enabled */ ++ } /* PIO mode can't do complete, because not init */ ++ } ++ } ++ ++ /* command interrupts */ ++ if ((cmd != NULL) && (intsts & cmdsts)) { ++ if ((intsts & MSDC_INT_CMDRDY) || (intsts & MSDC_INT_ACMDRDY) || ++ (intsts & MSDC_INT_ACMD19_DONE)) { ++ u32 *rsp = &cmd->resp[0]; ++ ++ switch (host->cmd_rsp) { ++ case RESP_NONE: ++ break; ++ case RESP_R2: ++ *rsp++ = sdr_read32(SDC_RESP3); *rsp++ = sdr_read32(SDC_RESP2); ++ *rsp++ = sdr_read32(SDC_RESP1); *rsp++ = sdr_read32(SDC_RESP0); ++ break; ++ default: /* Response types 1, 3, 4, 5, 6, 7(1b) */ ++ if ((intsts & MSDC_INT_ACMDRDY) || (intsts & MSDC_INT_ACMD19_DONE)) { ++ *rsp = sdr_read32(SDC_ACMD_RESP); ++ } else { ++ *rsp = sdr_read32(SDC_RESP0); ++ } ++ break; ++ } ++ } else if ((intsts & MSDC_INT_RSPCRCERR) || (intsts & MSDC_INT_ACMDCRCERR)) { ++ if(intsts & MSDC_INT_ACMDCRCERR){ ++ //printk("XXX CMD<%d> MSDC_INT_ACMDCRCERR\n",cmd->opcode); ++ } ++ else { ++ //printk("XXX CMD<%d> MSDC_INT_RSPCRCERR\n",cmd->opcode); ++ } ++ cmd->error = (unsigned int)-EIO; ++ } else if ((intsts & MSDC_INT_CMDTMO) || (intsts & MSDC_INT_ACMDTMO)) { ++ if(intsts & MSDC_INT_ACMDTMO){ ++ //printk("XXX CMD<%d> MSDC_INT_ACMDTMO\n",cmd->opcode); ++ } ++ else { ++ //printk("XXX CMD<%d> MSDC_INT_CMDTMO\n",cmd->opcode); ++ } ++ cmd->error = (unsigned int)-ETIMEDOUT; ++ msdc_reset(); ++ msdc_clr_fifo(); ++ msdc_clr_int(); ++ } ++ complete(&host->cmd_done); ++ } ++ ++ /* mmc irq interrupts */ ++ if (intsts & MSDC_INT_MMCIRQ) { ++ //printk(KERN_INFO "msdc[%d] MMCIRQ: SDC_CSTS=0x%.8x\r\n", host->id, sdr_read32(SDC_CSTS)); ++ } ++ ++#ifdef MT6575_SD_DEBUG ++ { ++ msdc_int_reg *int_reg = (msdc_int_reg*)&intsts; ++ /*printk("IRQ_EVT(0x%x): MMCIRQ(%d) CDSC(%d), ACRDY(%d), ACTMO(%d), ACCRE(%d) AC19DN(%d)\n", ++ intsts, ++ int_reg->mmcirq, ++ int_reg->cdsc, ++ int_reg->atocmdrdy, ++ int_reg->atocmdtmo, ++ int_reg->atocmdcrc, ++ int_reg->atocmd19done); ++ printk("IRQ_EVT(0x%x): SDIO(%d) CMDRDY(%d), CMDTMO(%d), RSPCRC(%d), CSTA(%d)\n", ++ intsts, ++ int_reg->sdioirq, ++ int_reg->cmdrdy, ++ int_reg->cmdtmo, ++ int_reg->rspcrc, ++ int_reg->csta); ++ printk("IRQ_EVT(0x%x): XFCMP(%d) DXDONE(%d), DATTMO(%d), DATCRC(%d), DMAEMP(%d)\n", ++ intsts, ++ int_reg->xfercomp, ++ int_reg->dxferdone, ++ int_reg->dattmo, ++ int_reg->datcrc, ++ int_reg->dmaqempty);*/ ++ ++ } ++#endif ++ ++ return IRQ_HANDLED; ++} ++ ++/*--------------------------------------------------------------------------*/ ++/* platform_driver members */ ++/*--------------------------------------------------------------------------*/ ++/* called by msdc_drv_probe/remove */ ++static void msdc_enable_cd_irq(struct msdc_host *host, int enable) ++{ ++ struct msdc_hw *hw = host->hw; ++ u32 base = host->base; ++ ++ /* for sdio, not set */ ++ if ((hw->flags & MSDC_CD_PIN_EN) == 0) { ++ /* Pull down card detection pin since it is not avaiable */ ++ /* ++ if (hw->config_gpio_pin) ++ hw->config_gpio_pin(MSDC_CD_PIN, GPIO_PULL_DOWN); ++ */ ++ sdr_clr_bits(MSDC_PS, MSDC_PS_CDEN); ++ sdr_clr_bits(MSDC_INTEN, MSDC_INTEN_CDSC); ++ sdr_clr_bits(SDC_CFG, SDC_CFG_INSWKUP); ++ return; ++ } ++ ++ //printk("CD IRQ Eanable(%d)\n", enable); ++ ++ if (enable) { ++ if (hw->enable_cd_eirq) { /* not set, never enter */ ++ hw->enable_cd_eirq(); ++ } else { ++ /* card detection circuit relies on the core power so that the core power ++ * shouldn't be turned off. Here adds a reference count to keep ++ * the core power alive. ++ */ ++ //msdc_vcore_on(host); //did in msdc_init_hw() ++ ++ if (hw->config_gpio_pin) /* NULL */ ++ hw->config_gpio_pin(MSDC_CD_PIN, GPIO_PULL_UP); ++ ++ sdr_set_field(MSDC_PS, MSDC_PS_CDDEBOUNCE, DEFAULT_DEBOUNCE); ++ sdr_set_bits(MSDC_PS, MSDC_PS_CDEN); ++ sdr_set_bits(MSDC_INTEN, MSDC_INTEN_CDSC); ++ sdr_set_bits(SDC_CFG, SDC_CFG_INSWKUP); /* not in document! Fix me */ ++ } ++ } else { ++ if (hw->disable_cd_eirq) { ++ hw->disable_cd_eirq(); ++ } else { ++ if (hw->config_gpio_pin) /* NULL */ ++ hw->config_gpio_pin(MSDC_CD_PIN, GPIO_PULL_DOWN); ++ ++ sdr_clr_bits(SDC_CFG, SDC_CFG_INSWKUP); ++ sdr_clr_bits(MSDC_PS, MSDC_PS_CDEN); ++ sdr_clr_bits(MSDC_INTEN, MSDC_INTEN_CDSC); ++ ++ /* Here decreases a reference count to core power since card ++ * detection circuit is shutdown. ++ */ ++ //msdc_vcore_off(host); ++ } ++ } ++} ++ ++/* called by msdc_drv_probe */ ++static void msdc_init_hw(struct msdc_host *host) ++{ ++ u32 base = host->base; ++ struct msdc_hw *hw = host->hw; ++ ++#ifdef MT6575_SD_DEBUG ++ msdc_reg[host->id] = (struct msdc_regs *)host->base; ++#endif ++ ++ /* Power on */ ++#if 0 /* --- chhung */ ++ msdc_vcore_on(host); ++ msdc_pin_reset(host, MSDC_PIN_PULL_UP); ++ msdc_select_clksrc(host, hw->clk_src); ++ enable_clock(PERI_MSDC0_PDN + host->id, "SD"); ++ msdc_vdd_on(host); ++#endif /* end of --- */ ++ /* Configure to MMC/SD mode */ ++ sdr_set_field(MSDC_CFG, MSDC_CFG_MODE, MSDC_SDMMC); ++ ++ /* Reset */ ++ msdc_reset(); ++ msdc_clr_fifo(); ++ ++ /* Disable card detection */ ++ sdr_clr_bits(MSDC_PS, MSDC_PS_CDEN); ++ ++ /* Disable and clear all interrupts */ ++ sdr_clr_bits(MSDC_INTEN, sdr_read32(MSDC_INTEN)); ++ sdr_write32(MSDC_INT, sdr_read32(MSDC_INT)); ++ ++#if 1 ++ /* reset tuning parameter */ ++ sdr_write32(MSDC_PAD_CTL0, 0x00090000); ++ sdr_write32(MSDC_PAD_CTL1, 0x000A0000); ++ sdr_write32(MSDC_PAD_CTL2, 0x000A0000); ++ // sdr_write32(MSDC_PAD_TUNE, 0x00000000); ++ sdr_write32(MSDC_PAD_TUNE, 0x84101010); // for MT7620 E2 and afterward ++ // sdr_write32(MSDC_DAT_RDDLY0, 0x00000000); ++ sdr_write32(MSDC_DAT_RDDLY0, 0x10101010); // for MT7620 E2 and afterward ++ sdr_write32(MSDC_DAT_RDDLY1, 0x00000000); ++ sdr_write32(MSDC_IOCON, 0x00000000); ++#if 0 // use MT7620 default value: 0x403c004f ++ sdr_write32(MSDC_PATCH_BIT0, 0x003C000F); /* bit0 modified: Rx Data Clock Source: 1 -> 2.0*/ ++#endif ++ ++ if (sdr_read32(MSDC_ECO_VER) >= 4) { ++ if (host->id == 1) { ++ sdr_set_field(MSDC_PATCH_BIT1, MSDC_PATCH_BIT1_WRDAT_CRCS, 1); ++ sdr_set_field(MSDC_PATCH_BIT1, MSDC_PATCH_BIT1_CMD_RSP, 1); ++ ++ /* internal clock: latch read data */ ++ sdr_set_bits(MSDC_PATCH_BIT0, MSDC_PATCH_BIT_CKGEN_CK); ++ } ++ } ++#endif ++ ++ /* for safety, should clear SDC_CFG.SDIO_INT_DET_EN & set SDC_CFG.SDIO in ++ pre-loader,uboot,kernel drivers. and SDC_CFG.SDIO_INT_DET_EN will be only ++ set when kernel driver wants to use SDIO bus interrupt */ ++ /* Configure to enable SDIO mode. it's must otherwise sdio cmd5 failed */ ++ sdr_set_bits(SDC_CFG, SDC_CFG_SDIO); ++ ++ /* disable detect SDIO device interupt function */ ++ sdr_clr_bits(SDC_CFG, SDC_CFG_SDIOIDE); ++ ++ /* eneable SMT for glitch filter */ ++ sdr_set_bits(MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKSMT); ++ sdr_set_bits(MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDSMT); ++ sdr_set_bits(MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATSMT); ++ ++#if 1 ++ /* set clk, cmd, dat pad driving */ ++ sdr_set_field(MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKDRVN, hw->clk_drv); ++ sdr_set_field(MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKDRVP, hw->clk_drv); ++ sdr_set_field(MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDDRVN, hw->cmd_drv); ++ sdr_set_field(MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDDRVP, hw->cmd_drv); ++ sdr_set_field(MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATDRVN, hw->dat_drv); ++ sdr_set_field(MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATDRVP, hw->dat_drv); ++#else ++ sdr_set_field(MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKDRVN, 0); ++ sdr_set_field(MSDC_PAD_CTL0, MSDC_PAD_CTL0_CLKDRVP, 0); ++ sdr_set_field(MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDDRVN, 0); ++ sdr_set_field(MSDC_PAD_CTL1, MSDC_PAD_CTL1_CMDDRVP, 0); ++ sdr_set_field(MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATDRVN, 0); ++ sdr_set_field(MSDC_PAD_CTL2, MSDC_PAD_CTL2_DATDRVP, 0); ++#endif ++ ++ /* set sampling edge */ ++ ++ /* write crc timeout detection */ ++ sdr_set_field(MSDC_PATCH_BIT0, 1 << 30, 1); ++ ++ /* Configure to default data timeout */ ++ sdr_set_field(SDC_CFG, SDC_CFG_DTOC, DEFAULT_DTOC); ++ ++ msdc_set_buswidth(host, MMC_BUS_WIDTH_1); ++ ++ //printk("init hardware done!\n"); ++} ++ ++/* called by msdc_drv_remove */ ++static void msdc_deinit_hw(struct msdc_host *host) ++{ ++ u32 base = host->base; ++ ++ /* Disable and clear all interrupts */ ++ sdr_clr_bits(MSDC_INTEN, sdr_read32(MSDC_INTEN)); ++ sdr_write32(MSDC_INT, sdr_read32(MSDC_INT)); ++ ++ /* Disable card detection */ ++ msdc_enable_cd_irq(host, 0); ++ // msdc_set_power_mode(host, MMC_POWER_OFF); /* make sure power down */ /* --- by chhung */ ++} ++ ++/* init gpd and bd list in msdc_drv_probe */ ++static void msdc_init_gpd_bd(struct msdc_host *host, struct msdc_dma *dma) ++{ ++ gpd_t *gpd = dma->gpd; ++ bd_t *bd = dma->bd; ++ bd_t *ptr, *prev; ++ ++ /* we just support one gpd */ ++ int bdlen = MAX_BD_PER_GPD; ++ ++ /* init the 2 gpd */ ++ memset(gpd, 0, sizeof(gpd_t) * 2); ++ //gpd->next = (void *)virt_to_phys(gpd + 1); /* pointer to a null gpd, bug! kmalloc <-> virt_to_phys */ ++ //gpd->next = (dma->gpd_addr + 1); /* bug */ ++ gpd->next = (void *)((u32)dma->gpd_addr + sizeof(gpd_t)); ++ ++ //gpd->intr = 0; ++ gpd->bdp = 1; /* hwo, cs, bd pointer */ ++ //gpd->ptr = (void*)virt_to_phys(bd); ++ gpd->ptr = (void *)dma->bd_addr; /* physical address */ ++ ++ memset(bd, 0, sizeof(bd_t) * bdlen); ++ ptr = bd + bdlen - 1; ++ //ptr->eol = 1; /* 0 or 1 [Fix me]*/ ++ //ptr->next = 0; ++ ++ while (ptr != bd) { ++ prev = ptr - 1; ++ prev->next = (void *)(dma->bd_addr + sizeof(bd_t) *(ptr - bd)); ++ ptr = prev; ++ } ++} ++ ++static int msdc_drv_probe(struct platform_device *pdev) ++{ ++ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ __iomem void *base; ++ struct mmc_host *mmc; ++ struct resource *mem; ++ struct msdc_host *host; ++ struct msdc_hw *hw; ++ int ret, irq; ++ pdev->dev.platform_data = &msdc0_hw; ++ ++ /* Allocate MMC host for this device */ ++ mmc = mmc_alloc_host(sizeof(struct msdc_host), &pdev->dev); ++ if (!mmc) return -ENOMEM; ++ ++ hw = (struct msdc_hw*)pdev->dev.platform_data; ++ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ irq = platform_get_irq(pdev, 0); ++ ++ //BUG_ON((!hw) || (!mem) || (irq < 0)); /* --- by chhung */ ++ ++ base = devm_request_and_ioremap(&pdev->dev, res); ++ if (IS_ERR(base)) ++ return PTR_ERR(base); ++ ++/* mem = request_mem_region(mem->start - 0xa0000000, (mem->end - mem->start + 1) - 0xa0000000, dev_name(&pdev->dev)); ++ if (mem == NULL) { ++ mmc_free_host(mmc); ++ return -EBUSY; ++ } ++*/ ++ /* Set host parameters to mmc */ ++ mmc->ops = &mt_msdc_ops; ++ mmc->f_min = HOST_MIN_MCLK; ++ mmc->f_max = HOST_MAX_MCLK; ++ mmc->ocr_avail = MSDC_OCR_AVAIL; ++ ++ /* For sd card: MSDC_SYS_SUSPEND | MSDC_WP_PIN_EN | MSDC_CD_PIN_EN | MSDC_REMOVABLE | MSDC_HIGHSPEED, ++ For sdio : MSDC_EXT_SDIO_IRQ | MSDC_HIGHSPEED */ ++ if (hw->flags & MSDC_HIGHSPEED) { ++ mmc->caps = MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED; ++ } ++ if (hw->data_pins == 4) { /* current data_pins are all 4*/ ++ mmc->caps |= MMC_CAP_4_BIT_DATA; ++ } else if (hw->data_pins == 8) { ++ mmc->caps |= MMC_CAP_8_BIT_DATA; ++ } ++ if ((hw->flags & MSDC_SDIO_IRQ) || (hw->flags & MSDC_EXT_SDIO_IRQ)) ++ mmc->caps |= MMC_CAP_SDIO_IRQ; /* yes for sdio */ ++ ++ /* MMC core transfer sizes tunable parameters */ ++ // mmc->max_hw_segs = MAX_HW_SGMTS; ++// mmc->max_phys_segs = MAX_PHY_SGMTS; ++ mmc->max_seg_size = MAX_SGMT_SZ; ++ mmc->max_blk_size = HOST_MAX_BLKSZ; ++ mmc->max_req_size = MAX_REQ_SZ; ++ mmc->max_blk_count = mmc->max_req_size; ++ ++ host = mmc_priv(mmc); ++ host->hw = hw; ++ host->mmc = mmc; ++ host->id = pdev->id; ++ host->error = 0; ++ host->irq = irq; ++ host->base = (unsigned long) base; ++ host->mclk = 0; /* mclk: the request clock of mmc sub-system */ ++ host->hclk = hclks[hw->clk_src]; /* hclk: clock of clock source to msdc controller */ ++ host->sclk = 0; /* sclk: the really clock after divition */ ++ host->pm_state = PMSG_RESUME; ++ host->suspend = 0; ++ host->core_clkon = 0; ++ host->card_clkon = 0; ++ host->core_power = 0; ++ host->power_mode = MMC_POWER_OFF; ++// host->card_inserted = hw->flags & MSDC_REMOVABLE ? 0 : 1; ++ host->timeout_ns = 0; ++ host->timeout_clks = DEFAULT_DTOC * 65536; ++ ++ host->mrq = NULL; ++ //init_MUTEX(&host->sem); /* we don't need to support multiple threads access */ ++ ++ host->dma.used_gpd = 0; ++ host->dma.used_bd = 0; ++ ++ /* using dma_alloc_coherent*/ /* todo: using 1, for all 4 slots */ ++ host->dma.gpd = dma_alloc_coherent(NULL, MAX_GPD_NUM * sizeof(gpd_t), &host->dma.gpd_addr, GFP_KERNEL); ++ host->dma.bd = dma_alloc_coherent(NULL, MAX_BD_NUM * sizeof(bd_t), &host->dma.bd_addr, GFP_KERNEL); ++ BUG_ON((!host->dma.gpd) || (!host->dma.bd)); ++ msdc_init_gpd_bd(host, &host->dma); ++ /*for emmc*/ ++ msdc_6575_host[pdev->id] = host; ++ ++ tasklet_init(&host->card_tasklet, msdc_tasklet_card, (ulong)host); ++ spin_lock_init(&host->lock); ++ msdc_init_hw(host); ++ ++ ret = request_irq((unsigned int)irq, msdc_irq, IRQF_TRIGGER_LOW, dev_name(&pdev->dev), host); ++ if (ret) goto release; ++ // mt65xx_irq_unmask(irq); /* --- by chhung */ ++ ++ if (hw->flags & MSDC_CD_PIN_EN) { /* not set for sdio */ ++ if (hw->request_cd_eirq) { /* not set for MT6575 */ ++ hw->request_cd_eirq(msdc_eirq_cd, (void*)host); /* msdc_eirq_cd will not be used! */ ++ } ++ } ++ ++ if (hw->request_sdio_eirq) /* set to combo_sdio_request_eirq() for WIFI */ ++ hw->request_sdio_eirq(msdc_eirq_sdio, (void*)host); /* msdc_eirq_sdio() will be called when EIRQ */ ++ ++ if (hw->register_pm) {/* yes for sdio */ ++ if(hw->flags & MSDC_SYS_SUSPEND) { /* will not set for WIFI */ ++ //printk("MSDC_SYS_SUSPEND and register_pm both set\n"); ++ } ++ //mmc->pm_flags |= MMC_PM_IGNORE_PM_NOTIFY; /* pm not controlled by system but by client. */ /* --- by chhung */ ++ } ++ ++ platform_set_drvdata(pdev, mmc); ++ ++ ret = mmc_add_host(mmc); ++ if (ret) goto free_irq; ++ ++ /* Config card detection pin and enable interrupts */ ++ if (hw->flags & MSDC_CD_PIN_EN) { /* set for card */ ++ msdc_enable_cd_irq(host, 1); ++ } else { ++ msdc_enable_cd_irq(host, 0); ++ } ++ ++ return 0; ++ ++free_irq: ++ free_irq(irq, host); ++release: ++ platform_set_drvdata(pdev, NULL); ++ msdc_deinit_hw(host); ++ ++ tasklet_kill(&host->card_tasklet); ++ ++/* if (mem) ++ release_mem_region(mem->start, mem->end - mem->start + 1); ++*/ ++ mmc_free_host(mmc); ++ ++ return ret; ++} ++ ++/* 4 device share one driver, using "drvdata" to show difference */ ++static int msdc_drv_remove(struct platform_device *pdev) ++{ ++ struct mmc_host *mmc; ++ struct msdc_host *host; ++ struct resource *mem; ++ ++ ++ mmc = platform_get_drvdata(pdev); ++ BUG_ON(!mmc); ++ ++ host = mmc_priv(mmc); ++ BUG_ON(!host); ++ ++ //printk("removed !!!\n"); ++ ++ platform_set_drvdata(pdev, NULL); ++ mmc_remove_host(host->mmc); ++ msdc_deinit_hw(host); ++ ++ tasklet_kill(&host->card_tasklet); ++ free_irq(host->irq, host); ++ ++ dma_free_coherent(NULL, MAX_GPD_NUM * sizeof(gpd_t), host->dma.gpd, host->dma.gpd_addr); ++ dma_free_coherent(NULL, MAX_BD_NUM * sizeof(bd_t), host->dma.bd, host->dma.bd_addr); ++ ++ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ ++ if (mem) ++ release_mem_region(mem->start, mem->end - mem->start + 1); ++ ++ mmc_free_host(host->mmc); ++ ++ return 0; ++} ++ ++static const struct of_device_id mt7620a_sdhci_match[] = { ++ { .compatible = "ralink,mt7620a-sdhci" }, ++ {}, ++}; ++MODULE_DEVICE_TABLE(of, rt288x_wdt_match); ++ ++/* Fix me: Power Flow */ ++static struct platform_driver mt_msdc_driver = { ++ .probe = msdc_drv_probe, ++ .remove = msdc_drv_remove, ++ .driver = { ++ .name = DRV_NAME, ++ .owner = THIS_MODULE, ++ .of_match_table = mt7620a_sdhci_match, ++ ++ }, ++}; ++ ++static int __init mt_msdc_init(void) ++{ ++ int ret; ++/* +++ chhung */ ++ unsigned int reg; ++ ++ mtk_sd_device.dev.platform_data = &msdc0_hw; ++ printk("MTK MSDC device init.\n"); ++ reg = sdr_read32((__iomem void *) 0xb0000060) & ~(0x3<<18); ++ reg |= 0x1 << 18; ++ sdr_write32((__iomem void *) 0xb0000060, reg); ++/* end of +++ */ ++ ret = platform_driver_register(&mt_msdc_driver); ++ if (ret) { ++ printk(KERN_ERR DRV_NAME ": Can't register driver"); ++ return ret; ++ } ++ printk(KERN_INFO DRV_NAME ": MediaTek MT6575 MSDC Driver\n"); ++ ++ //msdc_debug_proc_init(); ++ return 0; ++} ++ ++static void __exit mt_msdc_exit(void) ++{ ++ platform_driver_unregister(&mt_msdc_driver); ++} ++ ++module_init(mt_msdc_init); ++module_exit(mt_msdc_exit); ++MODULE_LICENSE("GPL"); ++MODULE_DESCRIPTION("MediaTek MT6575 SD/MMC Card Driver"); ++MODULE_AUTHOR("Infinity Chen "); ++ ++EXPORT_SYMBOL(msdc_6575_host); diff --git a/target/linux/ramips/patches-3.10/0128-mtd-fix-cfi-cmdset-0002-erase-status-check.patch b/target/linux/ramips/patches-3.10/0128-mtd-fix-cfi-cmdset-0002-erase-status-check.patch new file mode 100644 index 0000000000..18ba4cc6b2 --- /dev/null +++ b/target/linux/ramips/patches-3.10/0128-mtd-fix-cfi-cmdset-0002-erase-status-check.patch @@ -0,0 +1,29 @@ +From 543f839e6fbeb325e6fa201e205ab18a46e37424 Mon Sep 17 00:00:00 2001 +From: John Crispin +Date: Mon, 15 Jul 2013 00:38:51 +0200 +Subject: [PATCH 128/133] mtd: fix cfi cmdset 0002 erase status check + +--- + drivers/mtd/chips/cfi_cmdset_0002.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/drivers/mtd/chips/cfi_cmdset_0002.c ++++ b/drivers/mtd/chips/cfi_cmdset_0002.c +@@ -1957,7 +1957,7 @@ static int __xipram do_erase_chip(struct + chip->erase_suspended = 0; + } + +- if (chip_ready(map, adr)) ++ if (chip_good(map, adr, map_word_ff(map))) + break; + + if (time_after(jiffies, timeo)) { +@@ -2046,7 +2046,7 @@ static int __xipram do_erase_oneblock(st + chip->erase_suspended = 0; + } + +- if (chip_ready(map, adr)) { ++ if (chip_good(map, adr, map_word_ff(map))) { + xip_enable(map, chip, adr); + break; + } diff --git a/target/linux/ramips/patches-3.10/0129-mtd-cfi-cmdset-0002-force-word-write.patch b/target/linux/ramips/patches-3.10/0129-mtd-cfi-cmdset-0002-force-word-write.patch new file mode 100644 index 0000000000..28274ee579 --- /dev/null +++ b/target/linux/ramips/patches-3.10/0129-mtd-cfi-cmdset-0002-force-word-write.patch @@ -0,0 +1,70 @@ +From 0ffe6cdf77793536a77b5c85cf41deb27cfc7632 Mon Sep 17 00:00:00 2001 +From: John Crispin +Date: Mon, 15 Jul 2013 00:39:21 +0200 +Subject: [PATCH 129/133] mtd: cfi cmdset 0002 force word write + +--- + drivers/mtd/chips/cfi_cmdset_0002.c | 9 +++++++-- + 1 file changed, 7 insertions(+), 2 deletions(-) + +--- a/drivers/mtd/chips/cfi_cmdset_0002.c ++++ b/drivers/mtd/chips/cfi_cmdset_0002.c +@@ -41,7 +41,7 @@ + #include + + #define AMD_BOOTLOC_BUG +-#define FORCE_WORD_WRITE 0 ++#define FORCE_WORD_WRITE 1 + + #define MAX_WORD_RETRIES 3 + +@@ -52,7 +52,9 @@ + + static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); + static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); ++#if !FORCE_WORD_WRITE + static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); ++#endif + static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *); + static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *); + static void cfi_amdstd_sync (struct mtd_info *); +@@ -192,6 +194,7 @@ static void fixup_amd_bootblock(struct m + } + #endif + ++#if !FORCE_WORD_WRITE + static void fixup_use_write_buffers(struct mtd_info *mtd) + { + struct map_info *map = mtd->priv; +@@ -201,6 +204,7 @@ static void fixup_use_write_buffers(stru + mtd->_write = cfi_amdstd_write_buffers; + } + } ++#endif /* !FORCE_WORD_WRITE */ + + /* Atmel chips don't use the same PRI format as AMD chips */ + static void fixup_convert_atmel_pri(struct mtd_info *mtd) +@@ -1461,6 +1465,7 @@ static int cfi_amdstd_write_words(struct + /* + * FIXME: interleaved mode not tested, and probably not supported! + */ ++#if !FORCE_WORD_WRITE + static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, + unsigned long adr, const u_char *buf, + int len) +@@ -1585,7 +1590,6 @@ static int __xipram do_write_buffer(stru + return ret; + } + +- + static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len, + size_t *retlen, const u_char *buf) + { +@@ -1660,6 +1664,7 @@ static int cfi_amdstd_write_buffers(stru + + return 0; + } ++#endif /* !FORCE_WORD_WRITE */ + + /* + * Wait for the flash chip to become ready to write data diff --git a/target/linux/ramips/patches-3.10/0130-mtd-ralink-add-mt7620-nand-driver.patch b/target/linux/ramips/patches-3.10/0130-mtd-ralink-add-mt7620-nand-driver.patch new file mode 100644 index 0000000000..45e4632839 --- /dev/null +++ b/target/linux/ramips/patches-3.10/0130-mtd-ralink-add-mt7620-nand-driver.patch @@ -0,0 +1,2408 @@ +From bea6f4b28443b7603e25b2404ad787a97f80fc59 Mon Sep 17 00:00:00 2001 +From: John Crispin +Date: Sun, 17 Nov 2013 17:41:46 +0100 +Subject: [PATCH 130/133] mtd: ralink: add mt7620 nand driver + +Signed-off-by: John Crispin +--- + drivers/mtd/maps/Kconfig | 4 + + drivers/mtd/maps/Makefile | 2 + + drivers/mtd/maps/ralink_nand.c | 2136 ++++++++++++++++++++++++++++++++++++++++ + drivers/mtd/maps/ralink_nand.h | 232 +++++ + 4 files changed, 2374 insertions(+) + create mode 100644 drivers/mtd/maps/ralink_nand.c + create mode 100644 drivers/mtd/maps/ralink_nand.h + +--- a/drivers/mtd/maps/Kconfig ++++ b/drivers/mtd/maps/Kconfig +@@ -424,4 +424,8 @@ config MTD_LATCH_ADDR + + If compiled as a module, it will be called latch-addr-flash. + ++config MTD_NAND_MT7620 ++ tristate "Support for NAND on Mediatek MT7620" ++ depends on RALINK && SOC_MT7620 ++ + endmenu +--- a/drivers/mtd/maps/Makefile ++++ b/drivers/mtd/maps/Makefile +@@ -46,3 +46,5 @@ obj-$(CONFIG_MTD_VMU) += vmu-flash.o + obj-$(CONFIG_MTD_GPIO_ADDR) += gpio-addr-flash.o + obj-$(CONFIG_MTD_LATCH_ADDR) += latch-addr-flash.o + obj-$(CONFIG_MTD_LANTIQ) += lantiq-flash.o ++obj-$(CONFIG_MTD_NAND_MT7620) += ralink_nand.o ++ +--- /dev/null ++++ b/drivers/mtd/maps/ralink_nand.c +@@ -0,0 +1,2136 @@ ++#define DEBUG ++#include ++#undef DEBUG ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "ralink_nand.h" ++#ifdef RANDOM_GEN_BAD_BLOCK ++#include ++#endif ++ ++#define LARGE_MTD_BOOT_PART_SIZE (CFG_BLOCKSIZE<<2) ++#define LARGE_MTD_CONFIG_PART_SIZE (CFG_BLOCKSIZE<<2) ++#define LARGE_MTD_FACTORY_PART_SIZE (CFG_BLOCKSIZE<<1) ++ ++ ++#define BLOCK_ALIGNED(a) ((a) & (CFG_BLOCKSIZE - 1)) ++ ++#define READ_STATUS_RETRY 1000 ++ ++struct mtd_info *ranfc_mtd = NULL; ++ ++int skipbbt = 0; ++int ranfc_debug = 1; ++static int ranfc_bbt = 1; ++#if defined (WORKAROUND_RX_BUF_OV) ++static int ranfc_verify = 1; ++#endif ++static u32 nand_addrlen; ++ ++#if 0 ++module_param(ranfc_debug, int, 0644); ++module_param(ranfc_bbt, int, 0644); ++module_param(ranfc_verify, int, 0644); ++#endif ++ ++#if 0 ++#define ra_dbg(args...) do { if (ranfc_debug) printk(args); } while(0) ++#else ++#define ra_dbg(args...) ++#endif ++ ++#define CLEAR_INT_STATUS() ra_outl(NFC_INT_ST, ra_inl(NFC_INT_ST)) ++#define NFC_TRANS_DONE() (ra_inl(NFC_INT_ST) & INT_ST_ND_DONE) ++ ++int is_nand_page_2048 = 0; ++const unsigned int nand_size_map[2][3] = {{25, 30, 30}, {20, 27, 30}}; ++ ++static int nfc_wait_ready(int snooze_ms); ++ ++static const char * const mtk_probe_types[] = { "cmdlinepart", "ofpart", NULL }; ++ ++/** ++ * reset nand chip ++ */ ++static int nfc_chip_reset(void) ++{ ++ int status; ++ ++ //ra_dbg("%s:\n", __func__); ++ ++ // reset nand flash ++ ra_outl(NFC_CMD1, 0x0); ++ ra_outl(NFC_CMD2, 0xff); ++ ra_outl(NFC_ADDR, 0x0); ++ ra_outl(NFC_CONF, 0x0411); ++ ++ status = nfc_wait_ready(5); //erase wait 5us ++ if (status & NAND_STATUS_FAIL) { ++ printk("%s: fail \n", __func__); ++ } ++ ++ return (int)(status & NAND_STATUS_FAIL); ++ ++} ++ ++ ++ ++/** ++ * clear NFC and flash chip. ++ */ ++static int nfc_all_reset(void) ++{ ++ int retry; ++ ++ ra_dbg("%s: \n", __func__); ++ ++ // reset controller ++ ra_outl(NFC_CTRL, ra_inl(NFC_CTRL) | 0x02); //clear data buffer ++ ra_outl(NFC_CTRL, ra_inl(NFC_CTRL) & ~0x02); //clear data buffer ++ ++ CLEAR_INT_STATUS(); ++ ++ retry = READ_STATUS_RETRY; ++ while ((ra_inl(NFC_INT_ST) & 0x02) != 0x02 && retry--); ++ if (retry <= 0) { ++ printk("nfc_all_reset: clean buffer fail \n"); ++ return -1; ++ } ++ ++ retry = READ_STATUS_RETRY; ++ while ((ra_inl(NFC_STATUS) & 0x1) != 0x0 && retry--) { //fixme, controller is busy ? ++ udelay(1); ++ } ++ ++ nfc_chip_reset(); ++ ++ return 0; ++} ++ ++/** NOTICE: only called by nfc_wait_ready(). ++ * @return -1, nfc can not get transction done ++ * @return 0, ok. ++ */ ++static int _nfc_read_status(char *status) ++{ ++ unsigned long cmd1, conf; ++ int int_st, nfc_st; ++ int retry; ++ ++ cmd1 = 0x70; ++ conf = 0x000101 | (1 << 20); ++ ++ //fixme, should we check nfc status? ++ CLEAR_INT_STATUS(); ++ ++ ra_outl(NFC_CMD1, cmd1); ++ ra_outl(NFC_CONF, conf); ++ ++ /* FIXME, ++ * 1. since we have no wired ready signal, directly ++ * calling this function is not gurantee to read right status under ready state. ++ * 2. the other side, we can not determine how long to become ready, this timeout retry is nonsense. ++ * 3. SUGGESTION: call nfc_read_status() from nfc_wait_ready(), ++ * that is aware about caller (in sementics) and has snooze plused nfc ND_DONE. ++ */ ++ retry = READ_STATUS_RETRY; ++ do { ++ nfc_st = ra_inl(NFC_STATUS); ++ int_st = ra_inl(NFC_INT_ST); ++ ++ ndelay(10); ++ } while (!(int_st & INT_ST_RX_BUF_RDY) && retry--); ++ ++ if (!(int_st & INT_ST_RX_BUF_RDY)) { ++ printk("nfc_read_status: NFC fail, int_st(%x), retry:%x. nfc:%x, reset nfc and flash. \n", ++ int_st, retry, nfc_st); ++ nfc_all_reset(); ++ *status = NAND_STATUS_FAIL; ++ return -1; ++ } ++ ++ *status = (char)(le32_to_cpu(ra_inl(NFC_DATA)) & 0x0ff); ++ return 0; ++} ++ ++/** ++ * @return !0, chip protect. ++ * @return 0, chip not protected. ++ */ ++static int nfc_check_wp(void) ++{ ++ /* Check the WP bit */ ++#if !defined CONFIG_NOT_SUPPORT_WP ++ return !!(ra_inl(NFC_CTRL) & 0x01); ++#else ++ char result = 0; ++ int ret; ++ ++ ret = _nfc_read_status(&result); ++ //FIXME, if ret < 0 ++ ++ return !(result & NAND_STATUS_WP); ++#endif ++} ++ ++#if !defined CONFIG_NOT_SUPPORT_RB ++/* ++ * @return !0, chip ready. ++ * @return 0, chip busy. ++ */ ++static int nfc_device_ready(void) ++{ ++ /* Check the ready */ ++ return !!(ra_inl(NFC_STATUS) & 0x04); ++} ++#endif ++ ++ ++/** ++ * generic function to get data from flash. ++ * @return data length reading from flash. ++ */ ++static int _ra_nand_pull_data(char *buf, int len, int use_gdma) ++{ ++#ifdef RW_DATA_BY_BYTE ++ char *p = buf; ++#else ++ __u32 *p = (__u32 *)buf; ++#endif ++ int retry, int_st; ++ unsigned int ret_data; ++ int ret_size; ++ ++ // receive data by use_gdma ++ if (use_gdma) { ++ //if (_ra_nand_dma_pull((unsigned long)p, len)) { ++ if (1) { ++ printk("%s: fail \n", __func__); ++ len = -1; //return error ++ } ++ ++ return len; ++ } ++ ++ //fixme: retry count size? ++ retry = READ_STATUS_RETRY; ++ // no gdma ++ while (len > 0) { ++ int_st = ra_inl(NFC_INT_ST); ++ if (int_st & INT_ST_RX_BUF_RDY) { ++ ++ ret_data = ra_inl(NFC_DATA); ++ ra_outl(NFC_INT_ST, INT_ST_RX_BUF_RDY); ++#ifdef RW_DATA_BY_BYTE ++ ret_size = sizeof(unsigned int); ++ ret_size = min(ret_size, len); ++ len -= ret_size; ++ while (ret_size-- > 0) { ++ //nfc is little endian ++ *p++ = ret_data & 0x0ff; ++ ret_data >>= 8; ++ } ++#else ++ ret_size = min(len, 4); ++ len -= ret_size; ++ if (ret_size == 4) ++ *p++ = ret_data; ++ else { ++ __u8 *q = (__u8 *)p; ++ while (ret_size-- > 0) { ++ *q++ = ret_data & 0x0ff; ++ ret_data >>= 8; ++ } ++ p = (__u32 *)q; ++ } ++#endif ++ retry = READ_STATUS_RETRY; ++ } ++ else if (int_st & INT_ST_ND_DONE) { ++ break; ++ } ++ else { ++ udelay(1); ++ if (retry-- < 0) ++ break; ++ } ++ } ++ ++#ifdef RW_DATA_BY_BYTE ++ return (int)(p - buf); ++#else ++ return ((int)p - (int)buf); ++#endif ++} ++ ++/** ++ * generic function to put data into flash. ++ * @return data length writing into flash. ++ */ ++static int _ra_nand_push_data(char *buf, int len, int use_gdma) ++{ ++#ifdef RW_DATA_BY_BYTE ++ char *p = buf; ++#else ++ __u32 *p = (__u32 *)buf; ++#endif ++ int retry, int_st; ++ unsigned int tx_data = 0; ++ int tx_size, iter = 0; ++ ++ // receive data by use_gdma ++ if (use_gdma) { ++ //if (_ra_nand_dma_push((unsigned long)p, len)) ++ if (1) ++ len = 0; ++ printk("%s: fail \n", __func__); ++ return len; ++ } ++ ++ // no gdma ++ retry = READ_STATUS_RETRY; ++ while (len > 0) { ++ int_st = ra_inl(NFC_INT_ST); ++ if (int_st & INT_ST_TX_BUF_RDY) { ++#ifdef RW_DATA_BY_BYTE ++ tx_size = min(len, (int)sizeof(unsigned long)); ++ for (iter = 0; iter < tx_size; iter++) { ++ tx_data |= (*p++ << (8*iter)); ++ } ++#else ++ tx_size = min(len, 4); ++ if (tx_size == 4) ++ tx_data = (*p++); ++ else { ++ __u8 *q = (__u8 *)p; ++ for (iter = 0; iter < tx_size; iter++) ++ tx_data |= (*q++ << (8*iter)); ++ p = (__u32 *)q; ++ } ++#endif ++ ra_outl(NFC_INT_ST, INT_ST_TX_BUF_RDY); ++ ra_outl(NFC_DATA, tx_data); ++ len -= tx_size; ++ retry = READ_STATUS_RETRY; ++ } ++ else if (int_st & INT_ST_ND_DONE) { ++ break; ++ } ++ else { ++ udelay(1); ++ if (retry-- < 0) { ++ ra_dbg("%s p:%p buf:%p \n", __func__, p, buf); ++ break; ++ } ++ } ++ } ++ ++ ++#ifdef RW_DATA_BY_BYTE ++ return (int)(p - buf); ++#else ++ return ((int)p - (int)buf); ++#endif ++ ++} ++ ++static int nfc_select_chip(struct ra_nand_chip *ra, int chipnr) ++{ ++#if (CONFIG_NUMCHIPS == 1) ++ if (!(chipnr < CONFIG_NUMCHIPS)) ++ return -1; ++ return 0; ++#else ++ BUG(); ++#endif ++} ++ ++/** @return -1: chip_select fail ++ * 0 : both CE and WP==0 are OK ++ * 1 : CE OK and WP==1 ++ */ ++static int nfc_enable_chip(struct ra_nand_chip *ra, unsigned int offs, int read_only) ++{ ++ int chipnr = offs >> ra->chip_shift; ++ ++ ra_dbg("%s: offs:%x read_only:%x \n", __func__, offs, read_only); ++ ++ chipnr = nfc_select_chip(ra, chipnr); ++ if (chipnr < 0) { ++ printk("%s: chip select error, offs(%x)\n", __func__, offs); ++ return -1; ++ } ++ ++ if (!read_only) ++ return nfc_check_wp(); ++ ++ return 0; ++} ++ ++/** wait nand chip becomeing ready and return queried status. ++ * @param snooze: sleep time in ms unit before polling device ready. ++ * @return status of nand chip ++ * @return NAN_STATUS_FAIL if something unexpected. ++ */ ++static int nfc_wait_ready(int snooze_ms) ++{ ++ int retry; ++ char status; ++ ++ // wait nfc idle, ++ if (snooze_ms == 0) ++ snooze_ms = 1; ++ else ++ schedule_timeout(snooze_ms * HZ / 1000); ++ ++ snooze_ms = retry = snooze_ms *1000000 / 100 ; // ndelay(100) ++ ++ while (!NFC_TRANS_DONE() && retry--) { ++ if (!cond_resched()) ++ ndelay(100); ++ } ++ ++ if (!NFC_TRANS_DONE()) { ++ printk("nfc_wait_ready: no transaction done \n"); ++ return NAND_STATUS_FAIL; ++ } ++ ++#if !defined (CONFIG_NOT_SUPPORT_RB) ++ //fixme ++ while(!(status = nfc_device_ready()) && retry--) { ++ ndelay(100); ++ } ++ ++ if (status == 0) { ++ printk("nfc_wait_ready: no device ready. \n"); ++ return NAND_STATUS_FAIL; ++ } ++ ++ _nfc_read_status(&status); ++ return status; ++#else ++ ++ while(retry--) { ++ _nfc_read_status(&status); ++ if (status & NAND_STATUS_READY) ++ break; ++ ndelay(100); ++ } ++ if (retry<0) ++ printk("nfc_wait_ready 2: no device ready, status(%x). \n", status); ++ ++ return status; ++#endif ++} ++ ++/** ++ * return 0: erase OK ++ * return -EIO: fail ++ */ ++int nfc_erase_block(struct ra_nand_chip *ra, int row_addr) ++{ ++ unsigned long cmd1, cmd2, bus_addr, conf; ++ char status; ++ ++ cmd1 = 0x60; ++ cmd2 = 0xd0; ++ bus_addr = row_addr; ++ conf = 0x00511 | ((CFG_ROW_ADDR_CYCLE)<<16); ++ ++ // set NFC ++ ra_dbg("%s: cmd1: %lx, cmd2:%lx bus_addr: %lx, conf: %lx \n", ++ __func__, cmd1, cmd2, bus_addr, conf); ++ ++ //fixme, should we check nfc status? ++ CLEAR_INT_STATUS(); ++ ++ ra_outl(NFC_CMD1, cmd1); ++ ra_outl(NFC_CMD2, cmd2); ++ ra_outl(NFC_ADDR, bus_addr); ++ ra_outl(NFC_CONF, conf); ++ ++ status = nfc_wait_ready(3); //erase wait 3ms ++ if (status & NAND_STATUS_FAIL) { ++ printk("%s: fail \n", __func__); ++ return -EIO; ++ } ++ ++ return 0; ++ ++} ++ ++static inline int _nfc_read_raw_data(int cmd1, int cmd2, int bus_addr, int bus_addr2, int conf, char *buf, int len, int flags) ++{ ++ int ret; ++ ++ CLEAR_INT_STATUS(); ++ ra_outl(NFC_CMD1, cmd1); ++ ra_outl(NFC_CMD2, cmd2); ++ ra_outl(NFC_ADDR, bus_addr); ++#if defined (CONFIG_RALINK_RT6855) || defined (CONFIG_RALINK_RT6855A) || \ ++ defined (CONFIG_RALINK_MT7620) || defined (CONFIG_RALINK_MT7621) ++ ra_outl(NFC_ADDR2, bus_addr2); ++#endif ++ ra_outl(NFC_CONF, conf); ++ ++ ret = _ra_nand_pull_data(buf, len, 0); ++ if (ret != len) { ++ ra_dbg("%s: ret:%x (%x) \n", __func__, ret, len); ++ return NAND_STATUS_FAIL; ++ } ++ ++ //FIXME, this section is not necessary ++ ret = nfc_wait_ready(0); //wait ready ++ /* to prevent the DATA FIFO 's old data from next operation */ ++ ra_outl(NFC_CTRL, ra_inl(NFC_CTRL) | 0x02); //clear data buffer ++ ra_outl(NFC_CTRL, ra_inl(NFC_CTRL) & ~0x02); //clear data buffer ++ ++ if (ret & NAND_STATUS_FAIL) { ++ printk("%s: fail \n", __func__); ++ return NAND_STATUS_FAIL; ++ } ++ ++ return 0; ++} ++ ++static inline int _nfc_write_raw_data(int cmd1, int cmd3, int bus_addr, int bus_addr2, int conf, char *buf, int len, int flags) ++{ ++ int ret; ++ ++ CLEAR_INT_STATUS(); ++ ra_outl(NFC_CMD1, cmd1); ++ ra_outl(NFC_CMD3, cmd3); ++ ra_outl(NFC_ADDR, bus_addr); ++#if defined (CONFIG_RALINK_RT6855) || defined (CONFIG_RALINK_RT6855A) || \ ++ defined (CONFIG_RALINK_MT7620) || defined (CONFIG_RALINK_MT7621) ++ ra_outl(NFC_ADDR2, bus_addr2); ++#endif ++ ra_outl(NFC_CONF, conf); ++ ++ ret = _ra_nand_push_data(buf, len, 0); ++ if (ret != len) { ++ ra_dbg("%s: ret:%x (%x) \n", __func__, ret, len); ++ return NAND_STATUS_FAIL; ++ } ++ ++ ret = nfc_wait_ready(1); //write wait 1ms ++ /* to prevent the DATA FIFO 's old data from next operation */ ++ ra_outl(NFC_CTRL, ra_inl(NFC_CTRL) | 0x02); //clear data buffer ++ ra_outl(NFC_CTRL, ra_inl(NFC_CTRL) & ~0x02); //clear data buffer ++ ++ if (ret & NAND_STATUS_FAIL) { ++ printk("%s: fail \n", __func__); ++ return NAND_STATUS_FAIL; ++ } ++ ++ return 0; ++} ++ ++/** ++ * @return !0: fail ++ * @return 0: OK ++ */ ++int nfc_read_oob(struct ra_nand_chip *ra, int page, unsigned int offs, char *buf, int len, int flags) ++{ ++ unsigned int cmd1 = 0, cmd2 = 0, conf = 0; ++ unsigned int bus_addr = 0, bus_addr2 = 0; ++ unsigned int ecc_en; ++ int use_gdma; ++ int status; ++ ++ int pages_perblock = 1<<(ra->erase_shift - ra->page_shift); ++ // constrain of nfc read function ++ ++#if defined (WORKAROUND_RX_BUF_OV) ++ BUG_ON (len > 60); //problem of rx-buffer overrun ++#endif ++ BUG_ON (offs >> ra->oob_shift); //page boundry ++ BUG_ON ((unsigned int)(((offs + len) >> ra->oob_shift) + page) > ++ ((page + pages_perblock) & ~(pages_perblock-1))); //block boundry ++ ++ use_gdma = flags & FLAG_USE_GDMA; ++ ecc_en = flags & FLAG_ECC_EN; ++ bus_addr = (page << (CFG_COLUMN_ADDR_CYCLE*8)) | (offs & ((1<> (CFG_COLUMN_ADDR_CYCLE*8); ++ cmd1 = 0x0; ++ cmd2 = 0x30; ++ conf = 0x000511| ((CFG_ADDR_CYCLE)<<16) | (len << 20); ++ } ++ else { ++ cmd1 = 0x50; ++ conf = 0x000141| ((CFG_ADDR_CYCLE)<<16) | (len << 20); ++ } ++ if (ecc_en) ++ conf |= (1<<3); ++ if (use_gdma) ++ conf |= (1<<2); ++ ++ ra_dbg("%s: cmd1:%x, bus_addr:%x, conf:%x, len:%x, flag:%x\n", ++ __func__, cmd1, bus_addr, conf, len, flags); ++ ++ status = _nfc_read_raw_data(cmd1, cmd2, bus_addr, bus_addr2, conf, buf, len, flags); ++ if (status & NAND_STATUS_FAIL) { ++ printk("%s: fail\n", __func__); ++ return -EIO; ++ } ++ ++ return 0; ++} ++ ++/** ++ * @return !0: fail ++ * @return 0: OK ++ */ ++int nfc_write_oob(struct ra_nand_chip *ra, int page, unsigned int offs, char *buf, int len, int flags) ++{ ++ unsigned int cmd1 = 0, cmd3=0, conf = 0; ++ unsigned int bus_addr = 0, bus_addr2 = 0; ++ int use_gdma; ++ int status; ++ ++ int pages_perblock = 1<<(ra->erase_shift - ra->page_shift); ++ // constrain of nfc read function ++ ++ BUG_ON (offs >> ra->oob_shift); //page boundry ++ BUG_ON ((unsigned int)(((offs + len) >> ra->oob_shift) + page) > ++ ((page + pages_perblock) & ~(pages_perblock-1))); //block boundry ++ ++ use_gdma = flags & FLAG_USE_GDMA; ++ bus_addr = (page << (CFG_COLUMN_ADDR_CYCLE*8)) | (offs & ((1<> (CFG_COLUMN_ADDR_CYCLE*8); ++ conf = 0x001123 | ((CFG_ADDR_CYCLE)<<16) | ((len) << 20); ++ } ++ else { ++ cmd1 = 0x08050; ++ cmd3 = 0x10; ++ conf = 0x001223 | ((CFG_ADDR_CYCLE)<<16) | ((len) << 20); ++ } ++ if (use_gdma) ++ conf |= (1<<2); ++ ++ // set NFC ++ ra_dbg("%s: cmd1: %x, cmd3: %x bus_addr: %x, conf: %x, len:%x\n", ++ __func__, cmd1, cmd3, bus_addr, conf, len); ++ ++ status = _nfc_write_raw_data(cmd1, cmd3, bus_addr, bus_addr2, conf, buf, len, flags); ++ if (status & NAND_STATUS_FAIL) { ++ printk("%s: fail \n", __func__); ++ return -EIO; ++ } ++ ++ return 0; ++} ++ ++ ++int nfc_read_page(struct ra_nand_chip *ra, char *buf, int page, int flags); ++int nfc_write_page(struct ra_nand_chip *ra, char *buf, int page, int flags); ++ ++ ++#if !defined (WORKAROUND_RX_BUF_OV) ++static int one_bit_correction(char *ecc, char *expected, int *bytes, int *bits); ++int nfc_ecc_verify(struct ra_nand_chip *ra, char *buf, int page, int mode) ++{ ++ int ret, i; ++ char *p, *e; ++ int ecc; ++ ++ //ra_dbg("%s, page:%x mode:%d\n", __func__, page, mode); ++ ++ if (mode == FL_WRITING) { ++ int len = CFG_PAGESIZE + CFG_PAGE_OOBSIZE; ++ int conf = 0x000141| ((CFG_ADDR_CYCLE)<<16) | (len << 20); ++ conf |= (1<<3); //(ecc_en) ++ //conf |= (1<<2); // (use_gdma) ++ ++ p = ra->readback_buffers; ++ ret = nfc_read_page(ra, ra->readback_buffers, page, FLAG_ECC_EN); ++ if (ret == 0) ++ goto ecc_check; ++ ++ //FIXME, double comfirm ++ printk("%s: read back fail, try again \n",__func__); ++ ret = nfc_read_page(ra, ra->readback_buffers, page, FLAG_ECC_EN); ++ if (ret != 0) { ++ printk("\t%s: read back fail agian \n",__func__); ++ goto bad_block; ++ } ++ } ++ else if (mode == FL_READING) { ++ p = buf; ++ } ++ else ++ return -2; ++ ++ecc_check: ++ p += CFG_PAGESIZE; ++ if (!is_nand_page_2048) { ++ ecc = ra_inl(NFC_ECC); ++ if (ecc == 0) //clean page. ++ return 0; ++ e = (char*)&ecc; ++ for (i=0; ireadback_buffers, page, FLAG_NONE); ++ if (ret != 0) //double comfirm ++ ret = nfc_read_page(ra, ra->readback_buffers, page, FLAG_NONE); ++ ++ if (ret != 0) { ++ printk("%s: mode:%x read back fail \n", __func__, mode); ++ return -1; ++ } ++ return memcmp(buf, ra->readback_buffers, 1<page_shift); ++ } ++ ++ if (mode == FL_READING) { ++#if 0 ++ if (ra->sandbox_page == 0) ++ return 0; ++ ++ ret = nfc_write_page(ra, buf, ra->sandbox_page, FLAG_USE_GDMA | FLAG_ECC_EN); ++ if (ret != 0) { ++ printk("%s, fail write sandbox_page \n", __func__); ++ return -1; ++ } ++#else ++ /** @note: ++ * The following command is actually not 'write' command to drive NFC to write flash. ++ * However, it can make NFC to calculate ECC, that will be used to compare with original ones. ++ * --YT ++ */ ++ unsigned int conf = 0x001223| (CFG_ADDR_CYCLE<<16) | (0x200 << 20) | (1<<3) | (1<<2); ++ _nfc_write_raw_data(0xff, 0xff, ra->sandbox_page<page_shift, conf, buf, 0x200, FLAG_USE_GDMA); ++#endif ++ ++ ecc = ra_inl(NFC_ECC); ++ if (ecc == 0) //clean page. ++ return 0; ++ e = (char*)&ecc; ++ p = buf + (1<page_shift); ++ for (i=0; i 0) { ++ int len; ++#if defined (WORKAROUND_RX_BUF_OV) ++ len = min(60, size); ++#else ++ len = size; ++#endif ++ bus_addr = (page << (CFG_COLUMN_ADDR_CYCLE*8)) | (offs & ((1<> (CFG_COLUMN_ADDR_CYCLE*8); ++ cmd1 = 0x0; ++ cmd2 = 0x30; ++ conf = 0x000511| ((CFG_ADDR_CYCLE)<<16) | (len << 20); ++ } ++ else { ++ if (offs & ~(CFG_PAGESIZE-1)) ++ cmd1 = 0x50; ++ else if (offs & ~((1<buffers_page = -1; //cached ++ } ++ ++ return 0; ++} ++ ++ ++/** ++ * @return -EIO, fail to write ++ * @return 0, OK ++ */ ++int nfc_write_page(struct ra_nand_chip *ra, char *buf, int page, int flags) ++{ ++ unsigned int cmd1 = 0, cmd3, conf = 0; ++ unsigned int bus_addr = 0, bus_addr2 = 0; ++ unsigned int ecc_en; ++ int use_gdma; ++ int size; ++ char status; ++ uint8_t *oob = buf + (1<page_shift); ++ ++ use_gdma = flags & FLAG_USE_GDMA; ++ ecc_en = flags & FLAG_ECC_EN; ++ ++ oob[ra->badblockpos] = 0xff; //tag as good block. ++ ra->buffers_page = -1; //cached ++ ++ page = page & (CFG_CHIPSIZE-1); //chip boundary ++ size = CFG_PAGESIZE + CFG_PAGE_OOBSIZE; //add oobsize ++ bus_addr = (page << (CFG_COLUMN_ADDR_CYCLE*8)); //write_page always write from offset 0. ++ ++ if (is_nand_page_2048) { ++ bus_addr2 = page >> (CFG_COLUMN_ADDR_CYCLE*8); ++ cmd1 = 0x80; ++ cmd3 = 0x10; ++ conf = 0x001123| ((CFG_ADDR_CYCLE)<<16) | (size << 20); ++ } ++ else { ++ cmd1 = 0x8000; ++ cmd3 = 0x10; ++ conf = 0x001223| ((CFG_ADDR_CYCLE)<<16) | (size << 20); ++} ++ if (ecc_en) ++ conf |= (1<<3); //enable ecc ++ if (use_gdma) ++ conf |= (1<<2); ++ ++ // set NFC ++ ra_dbg("nfc_write_page: cmd1: %x, cmd3: %x bus_addr: %x, conf: %x, len:%x\n", ++ cmd1, cmd3, bus_addr, conf, size); ++ ++ status = _nfc_write_raw_data(cmd1, cmd3, bus_addr, bus_addr2, conf, buf, size, flags); ++ if (status & NAND_STATUS_FAIL) { ++ printk("%s: fail \n", __func__); ++ return -EIO; ++ } ++ ++ ++ if (flags & FLAG_VERIFY) { // verify and correct ecc ++ status = nfc_ecc_verify(ra, buf, page, FL_WRITING); ++ ++#ifdef RANDOM_GEN_BAD_BLOCK ++ if (((random32() & 0x1ff) == 0x0) && (page >= 0x100)) // randomly create bad block ++ { ++ printk("hmm... create a bad block at page %x\n", (bus_addr >> 16)); ++ status = -1; ++ } ++#endif ++ ++ if (status != 0) { ++ printk("%s: ecc_verify fail: ret:%x \n", __func__, status); ++ oob[ra->badblockpos] = 0x33; ++ page -= page % (CFG_BLOCKSIZE/CFG_PAGESIZE); ++ printk("create a bad block at page %x\n", page); ++ if (!is_nand_page_2048) ++ status = nfc_write_oob(ra, page, ra->badblockpos, oob+ra->badblockpos, 1, flags); ++ else ++ { ++ status = _nfc_write_raw_data(cmd1, cmd3, bus_addr, bus_addr2, conf, buf, size, flags); ++ nfc_write_oob(ra, page, 0, oob, 16, FLAG_NONE); ++ } ++ return -EBADMSG; ++ } ++ } ++ ++ ++ ra->buffers_page = page; //cached ++ return 0; ++} ++ ++ ++ ++/************************************************************* ++ * nand internal process ++ *************************************************************/ ++ ++/** ++ * nand_release_device - [GENERIC] release chip ++ * @mtd: MTD device structure ++ * ++ * Deselect, release chip lock and wake up anyone waiting on the device ++ */ ++static void nand_release_device(struct ra_nand_chip *ra) ++{ ++ /* De-select the NAND device */ ++ nfc_select_chip(ra, -1); ++ ++ /* Release the controller and the chip */ ++ ra->state = FL_READY; ++ ++ mutex_unlock(ra->controller); ++} ++ ++/** ++ * nand_get_device - [GENERIC] Get chip for selected access ++ * @chip: the nand chip descriptor ++ * @mtd: MTD device structure ++ * @new_state: the state which is requested ++ * ++ * Get the device and lock it for exclusive access ++ */ ++static int ++nand_get_device(struct ra_nand_chip *ra, int new_state) ++{ ++ int ret = 0; ++ ++ ret = mutex_lock_interruptible(ra->controller); ++ if (!ret) ++ ra->state = new_state; ++ ++ return ret; ++ ++} ++ ++ ++ ++/************************************************************* ++ * nand internal process ++ *************************************************************/ ++ ++int nand_bbt_get(struct ra_nand_chip *ra, int block) ++{ ++ int byte, bits; ++ bits = block * BBTTAG_BITS; ++ ++ byte = bits / 8; ++ bits = bits % 8; ++ ++ return (ra->bbt[byte] >> bits) & BBTTAG_BITS_MASK; ++} ++ ++int nand_bbt_set(struct ra_nand_chip *ra, int block, int tag) ++{ ++ int byte, bits; ++ bits = block * BBTTAG_BITS; ++ ++ byte = bits / 8; ++ bits = bits % 8; ++ ++ // If previous tag is bad, dont overwrite it ++ if (((ra->bbt[byte] >> bits) & BBTTAG_BITS_MASK) == BBT_TAG_BAD) ++ { ++ return BBT_TAG_BAD; ++ } ++ ++ ra->bbt[byte] = (ra->bbt[byte] & ~(BBTTAG_BITS_MASK << bits)) | ((tag & BBTTAG_BITS_MASK) << bits); ++ ++ return tag; ++} ++ ++/** ++ * nand_block_checkbad - [GENERIC] Check if a block is marked bad ++ * @mtd: MTD device structure ++ * @ofs: offset from device start ++ * ++ * Check, if the block is bad. Either by reading the bad block table or ++ * calling of the scan function. ++ */ ++int nand_block_checkbad(struct ra_nand_chip *ra, loff_t offs) ++{ ++ int page, block; ++ int ret = 4; ++ unsigned int tag; ++ char *str[]= {"UNK", "RES", "BAD", "GOOD"}; ++ ++ if (ranfc_bbt == 0) ++ return 0; ++ ++ { ++ // align with chip ++ ++ offs = offs & ((1<chip_shift) -1); ++ ++ page = offs >> ra->page_shift; ++ block = offs >> ra->erase_shift; ++ } ++ ++ tag = nand_bbt_get(ra, block); ++ ++ if (tag == BBT_TAG_UNKNOWN) { ++ ret = nfc_read_oob(ra, page, ra->badblockpos, (char*)&tag, 1, FLAG_NONE); ++ if (ret == 0) ++ tag = ((le32_to_cpu(tag) & 0x0ff) == 0x0ff) ? BBT_TAG_GOOD : BBT_TAG_BAD; ++ else ++ tag = BBT_TAG_BAD; ++ ++ nand_bbt_set(ra, block, tag); ++ } ++ ++ if (tag != BBT_TAG_GOOD) { ++ printk("%s: offs:%x tag: %s \n", __func__, (unsigned int)offs, str[tag]); ++ return 1; ++ } ++ else ++ return 0; ++ ++} ++ ++ ++ ++/** ++ * nand_block_markbad - ++ */ ++int nand_block_markbad(struct ra_nand_chip *ra, loff_t offs) ++{ ++ int page, block; ++ int ret = 4; ++ unsigned int tag; ++ char *ecc; ++ ++ // align with chip ++ ra_dbg("%s offs: %x \n", __func__, (int)offs); ++ ++ offs = offs & ((1<chip_shift) -1); ++ ++ page = offs >> ra->page_shift; ++ block = offs >> ra->erase_shift; ++ ++ tag = nand_bbt_get(ra, block); ++ ++ if (tag == BBT_TAG_BAD) { ++ printk("%s: mark repeatedly \n", __func__); ++ return 0; ++ } ++ ++ // new tag as bad ++ tag =BBT_TAG_BAD; ++ ret = nfc_read_page(ra, ra->buffers, page, FLAG_NONE); ++ if (ret != 0) { ++ printk("%s: fail to read bad block tag \n", __func__); ++ goto tag_bbt; ++ } ++ ++ ecc = &ra->buffers[(1<page_shift)+ra->badblockpos]; ++ if (*ecc == (char)0x0ff) { ++ //tag into flash ++ *ecc = (char)tag; ++ ret = nfc_write_page(ra, ra->buffers, page, FLAG_USE_GDMA); ++ if (ret) ++ printk("%s: fail to write bad block tag \n", __func__); ++ ++ } ++ ++tag_bbt: ++ //update bbt ++ nand_bbt_set(ra, block, tag); ++ ++ return 0; ++} ++ ++ ++#if defined (WORKAROUND_RX_BUF_OV) ++/** ++ * to find a bad block for ecc verify of read_page ++ */ ++unsigned int nand_bbt_find_sandbox(struct ra_nand_chip *ra) ++{ ++ loff_t offs = 0; ++ int chipsize = 1 << ra->chip_shift; ++ int blocksize = 1 << ra->erase_shift; ++ ++ ++ while (offs < chipsize) { ++ if (nand_block_checkbad(ra, offs)) //scan and verify the unknown tag ++ break; ++ offs += blocksize; ++ } ++ ++ if (offs >= chipsize) { ++ offs = chipsize - blocksize; ++ } ++ ++ nand_bbt_set(ra, (unsigned int)offs>>ra->erase_shift, BBT_TAG_RES); // tag bbt only, instead of update badblockpos of flash. ++ return (offs >> ra->page_shift); ++} ++#endif ++ ++ ++ ++/** ++ * nand_erase_nand - [Internal] erase block(s) ++ * @mtd: MTD device structure ++ * @instr: erase instruction ++ * @allowbbt: allow erasing the bbt area ++ * ++ * Erase one ore more blocks ++ */ ++int _nand_erase_nand(struct ra_nand_chip *ra, struct erase_info *instr) ++{ ++ int page, len, status, ret; ++ unsigned int addr, blocksize = 1<erase_shift; ++ ++ ra_dbg("%s: start:%x, len:%x \n", __func__, ++ (unsigned int)instr->addr, (unsigned int)instr->len); ++ ++//#define BLOCK_ALIGNED(a) ((a) & (blocksize - 1)) // already defined ++ ++ if (BLOCK_ALIGNED(instr->addr) || BLOCK_ALIGNED(instr->len)) { ++ ra_dbg("%s: erase block not aligned, addr:%x len:%x\n", __func__, instr->addr, instr->len); ++ return -EINVAL; ++ } ++ ++ instr->fail_addr = 0xffffffff; ++ ++ len = instr->len; ++ addr = instr->addr; ++ instr->state = MTD_ERASING; ++ ++ while (len) { ++ ++ page = (int)(addr >> ra->page_shift); ++ ++ /* select device and check wp */ ++ if (nfc_enable_chip(ra, addr, 0)) { ++ printk("%s: nand is write protected \n", __func__); ++ instr->state = MTD_ERASE_FAILED; ++ goto erase_exit; ++ } ++ ++ /* if we have a bad block, we do not erase bad blocks */ ++ if (nand_block_checkbad(ra, addr)) { ++ printk(KERN_WARNING "nand_erase: attempt to erase a " ++ "bad block at 0x%08x\n", addr); ++ instr->state = MTD_ERASE_FAILED; ++ goto erase_exit; ++ } ++ ++ /* ++ * Invalidate the page cache, if we erase the block which ++ * contains the current cached page ++ */ ++ if (BLOCK_ALIGNED(addr) == BLOCK_ALIGNED(ra->buffers_page << ra->page_shift)) ++ ra->buffers_page = -1; ++ ++ status = nfc_erase_block(ra, page); ++ /* See if block erase succeeded */ ++ if (status) { ++ printk("%s: failed erase, page 0x%08x\n", __func__, page); ++ instr->state = MTD_ERASE_FAILED; ++ instr->fail_addr = (page << ra->page_shift); ++ goto erase_exit; ++ } ++ ++ ++ /* Increment page address and decrement length */ ++ len -= blocksize; ++ addr += blocksize; ++ ++ } ++ instr->state = MTD_ERASE_DONE; ++ ++erase_exit: ++ ++ ret = ((instr->state == MTD_ERASE_DONE) ? 0 : -EIO); ++ /* Do call back function */ ++ if (!ret) ++ mtd_erase_callback(instr); ++ ++ if (ret) { ++ nand_bbt_set(ra, addr >> ra->erase_shift, BBT_TAG_BAD); ++ } ++ ++ /* Return more or less happy */ ++ return ret; ++} ++ ++static int ++nand_write_oob_buf(struct ra_nand_chip *ra, uint8_t *buf, uint8_t *oob, size_t size, ++ int mode, int ooboffs) ++{ ++ size_t oobsize = 1<oob_shift; ++ struct nand_oobfree *free; ++ uint32_t woffs = ooboffs; ++ int retsize = 0; ++ ++ ra_dbg("%s: size:%x, mode:%x, offs:%x \n", __func__, size, mode, ooboffs); ++ ++ switch(mode) { ++ case MTD_OPS_PLACE_OOB: ++ case MTD_OPS_RAW: ++ if (ooboffs > oobsize) ++ return -1; ++ ++ size = min(size, oobsize - ooboffs); ++ memcpy(buf + ooboffs, oob, size); ++ retsize = size; ++ break; ++ ++ case MTD_OPS_AUTO_OOB: ++ if (ooboffs > ra->oob->oobavail) ++ return -1; ++ ++ while (size) { ++ for(free = ra->oob->oobfree; free->length && size; free++) { ++ int wlen = free->length - woffs; ++ int bytes = 0; ++ ++ /* Write request not from offset 0 ? */ ++ if (wlen <= 0) { ++ woffs = -wlen; ++ continue; ++ } ++ ++ bytes = min_t(size_t, size, wlen); ++ memcpy (buf + free->offset + woffs, oob, bytes); ++ woffs = 0; ++ oob += bytes; ++ size -= bytes; ++ retsize += bytes; ++ } ++ buf += oobsize; ++ } ++ break; ++ ++ default: ++ BUG(); ++ } ++ ++ return retsize; ++} ++ ++static int nand_read_oob_buf(struct ra_nand_chip *ra, uint8_t *oob, size_t size, ++ int mode, int ooboffs) ++{ ++ size_t oobsize = 1<oob_shift; ++ uint8_t *buf = ra->buffers + (1<page_shift); ++ int retsize=0; ++ ++ ra_dbg("%s: size:%x, mode:%x, offs:%x \n", __func__, size, mode, ooboffs); ++ ++ switch(mode) { ++ case MTD_OPS_PLACE_OOB: ++ case MTD_OPS_RAW: ++ if (ooboffs > oobsize) ++ return -1; ++ ++ size = min(size, oobsize - ooboffs); ++ memcpy(oob, buf + ooboffs, size); ++ return size; ++ ++ case MTD_OPS_AUTO_OOB: { ++ struct nand_oobfree *free; ++ uint32_t woffs = ooboffs; ++ ++ if (ooboffs > ra->oob->oobavail) ++ return -1; ++ ++ size = min(size, ra->oob->oobavail - ooboffs); ++ for(free = ra->oob->oobfree; free->length && size; free++) { ++ int wlen = free->length - woffs; ++ int bytes = 0; ++ ++ /* Write request not from offset 0 ? */ ++ if (wlen <= 0) { ++ woffs = -wlen; ++ continue; ++ } ++ ++ bytes = min_t(size_t, size, wlen); ++ memcpy (oob, buf + free->offset + woffs, bytes); ++ woffs = 0; ++ oob += bytes; ++ size -= bytes; ++ retsize += bytes; ++ } ++ return retsize; ++ } ++ default: ++ BUG(); ++ } ++ ++ return -1; ++} ++ ++/** ++ * nand_do_write_ops - [Internal] NAND write with ECC ++ * @mtd: MTD device structure ++ * @to: offset to write to ++ * @ops: oob operations description structure ++ * ++ * NAND write with ECC ++ */ ++static int nand_do_write_ops(struct ra_nand_chip *ra, loff_t to, ++ struct mtd_oob_ops *ops) ++{ ++ int page; ++ uint32_t datalen = ops->len; ++ uint32_t ooblen = ops->ooblen; ++ uint8_t *oob = ops->oobbuf; ++ uint8_t *data = ops->datbuf; ++ int pagesize = (1<page_shift); ++ int pagemask = (pagesize -1); ++ int oobsize = 1<oob_shift; ++ loff_t addr = to; ++ //int i = 0; //for ra_dbg only ++ ++ ra_dbg("%s: to:%x, ops data:%p, oob:%p datalen:%x ooblen:%x, ooboffs:%x oobmode:%x \n", ++ __func__, (unsigned int)to, data, oob, datalen, ooblen, ops->ooboffs, ops->mode); ++ ++ ops->retlen = 0; ++ ops->oobretlen = 0; ++ ++ ++ /* Invalidate the page cache, when we write to the cached page */ ++ ra->buffers_page = -1; ++ ++ ++ if (data ==0) ++ datalen = 0; ++ ++ // oob sequential (burst) write ++ if (datalen == 0 && ooblen) { ++ int len = ((ooblen + ops->ooboffs) + (ra->oob->oobavail - 1)) / ra->oob->oobavail * oobsize; ++ ++ /* select chip, and check if it is write protected */ ++ if (nfc_enable_chip(ra, addr, 0)) ++ return -EIO; ++ ++ //FIXME, need sanity check of block boundary ++ page = (int)((to & ((1<chip_shift)-1)) >> ra->page_shift); //chip boundary ++ memset(ra->buffers, 0x0ff, pagesize); ++ //fixme, should we reserve the original content? ++ if (ops->mode == MTD_OPS_AUTO_OOB) { ++ nfc_read_oob(ra, page, 0, ra->buffers, len, FLAG_NONE); ++ } ++ //prepare buffers ++ if (ooblen != 8) ++ { ++ nand_write_oob_buf(ra, ra->buffers, oob, ooblen, ops->mode, ops->ooboffs); ++ // write out buffer to chip ++ nfc_write_oob(ra, page, 0, ra->buffers, len, FLAG_USE_GDMA); ++ } ++ ++ ops->oobretlen = ooblen; ++ ooblen = 0; ++ } ++ ++ // data sequential (burst) write ++ if (datalen && ooblen == 0) { ++ // ranfc can not support write_data_burst, since hw-ecc and fifo constraints.. ++ } ++ ++ // page write ++ while(datalen || ooblen) { ++ int len; ++ int ret; ++ int offs; ++ int ecc_en = 0; ++ ++ ra_dbg("%s (%d): addr:%x, ops data:%p, oob:%p datalen:%x ooblen:%x, ooboffs:%x \n", ++ __func__, i++, (unsigned int)addr, data, oob, datalen, ooblen, ops->ooboffs); ++ ++ page = (int)((addr & ((1<chip_shift)-1)) >> ra->page_shift); //chip boundary ++ ++ /* select chip, and check if it is write protected */ ++ if (nfc_enable_chip(ra, addr, 0)) ++ return -EIO; ++ ++ // oob write ++ if (ops->mode == MTD_OPS_AUTO_OOB) { ++ //fixme, this path is not yet varified ++ nfc_read_oob(ra, page, 0, ra->buffers + pagesize, oobsize, FLAG_NONE); ++ } ++ if (oob && ooblen > 0) { ++ len = nand_write_oob_buf(ra, ra->buffers + pagesize, oob, ooblen, ops->mode, ops->ooboffs); ++ if (len < 0) ++ return -EINVAL; ++ ++ oob += len; ++ ops->oobretlen += len; ++ ooblen -= len; ++ } ++ ++ // data write ++ offs = addr & pagemask; ++ len = min_t(size_t, datalen, pagesize - offs); ++ if (data && len > 0) { ++ memcpy(ra->buffers + offs, data, len); // we can not sure ops->buf wether is DMA-able. ++ ++ data += len; ++ datalen -= len; ++ ops->retlen += len; ++ ++ ecc_en = FLAG_ECC_EN; ++ } ++ ret = nfc_write_page(ra, ra->buffers, page, FLAG_USE_GDMA | FLAG_VERIFY | ++ ((ops->mode == MTD_OPS_RAW || ops->mode == MTD_OPS_PLACE_OOB) ? 0 : ecc_en )); ++ if (ret) { ++ nand_bbt_set(ra, addr >> ra->erase_shift, BBT_TAG_BAD); ++ return ret; ++ } ++ ++ nand_bbt_set(ra, addr >> ra->erase_shift, BBT_TAG_GOOD); ++ ++ addr = (page+1) << ra->page_shift; ++ ++ } ++ return 0; ++} ++ ++/** ++ * nand_do_read_ops - [Internal] Read data with ECC ++ * ++ * @mtd: MTD device structure ++ * @from: offset to read from ++ * @ops: oob ops structure ++ * ++ * Internal function. Called with chip held. ++ */ ++static int nand_do_read_ops(struct ra_nand_chip *ra, loff_t from, ++ struct mtd_oob_ops *ops) ++{ ++ int page; ++ uint32_t datalen = ops->len; ++ uint32_t ooblen = ops->ooblen; ++ uint8_t *oob = ops->oobbuf; ++ uint8_t *data = ops->datbuf; ++ int pagesize = (1<page_shift); ++ int pagemask = (pagesize -1); ++ loff_t addr = from; ++ //int i = 0; //for ra_dbg only ++ ++ ra_dbg("%s: addr:%x, ops data:%p, oob:%p datalen:%x ooblen:%x, ooboffs:%x \n", ++ __func__, (unsigned int)addr, data, oob, datalen, ooblen, ops->ooboffs); ++ ++ ops->retlen = 0; ++ ops->oobretlen = 0; ++ if (data == 0) ++ datalen = 0; ++ ++ ++ while(datalen || ooblen) { ++ int len; ++ int ret; ++ int offs; ++ ++ ra_dbg("%s (%d): addr:%x, ops data:%p, oob:%p datalen:%x ooblen:%x, ooboffs:%x \n", ++ __func__, i++, (unsigned int)addr, data, oob, datalen, ooblen, ops->ooboffs); ++ /* select chip */ ++ if (nfc_enable_chip(ra, addr, 1) < 0) ++ return -EIO; ++ ++ page = (int)((addr & ((1<chip_shift)-1)) >> ra->page_shift); ++ ++ ret = nfc_read_page(ra, ra->buffers, page, FLAG_VERIFY | ++ ((ops->mode == MTD_OPS_RAW || ops->mode == MTD_OPS_PLACE_OOB) ? 0: FLAG_ECC_EN )); ++ //FIXME, something strange here, some page needs 2 more tries to guarantee read success. ++ if (ret) { ++ printk("read again:\n"); ++ ret = nfc_read_page(ra, ra->buffers, page, FLAG_VERIFY | ++ ((ops->mode == MTD_OPS_RAW || ops->mode == MTD_OPS_PLACE_OOB) ? 0: FLAG_ECC_EN )); ++ ++ if (ret) { ++ printk("read again fail \n"); ++ nand_bbt_set(ra, addr >> ra->erase_shift, BBT_TAG_BAD); ++ if ((ret != -EUCLEAN) && (ret != -EBADMSG)) { ++ return ret; ++ } ++ else { ++ /* ecc verification fail, but data need to be returned. */ ++ } ++ } ++ else { ++ printk(" read agian susccess \n"); ++ } ++ } ++ ++ // oob read ++ if (oob && ooblen > 0) { ++ len = nand_read_oob_buf(ra, oob, ooblen, ops->mode, ops->ooboffs); ++ if (len < 0) { ++ printk("nand_read_oob_buf: fail return %x \n", len); ++ return -EINVAL; ++ } ++ ++ oob += len; ++ ops->oobretlen += len; ++ ooblen -= len; ++ } ++ ++ // data read ++ offs = addr & pagemask; ++ len = min_t(size_t, datalen, pagesize - offs); ++ if (data && len > 0) { ++ memcpy(data, ra->buffers + offs, len); // we can not sure ops->buf wether is DMA-able. ++ ++ data += len; ++ datalen -= len; ++ ops->retlen += len; ++ if (ret) ++ return ret; ++ } ++ ++ ++ nand_bbt_set(ra, addr >> ra->erase_shift, BBT_TAG_GOOD); ++ // address go further to next page, instead of increasing of length of write. This avoids some special cases wrong. ++ addr = (page+1) << ra->page_shift; ++ } ++ return 0; ++} ++ ++static int ++ramtd_nand_erase(struct mtd_info *mtd, struct erase_info *instr) ++{ ++ struct ra_nand_chip *ra = (struct ra_nand_chip *)mtd->priv; ++ int ret; ++ ++ ra_dbg("%s: start:%x, len:%x \n", __func__, ++ (unsigned int)instr->addr, (unsigned int)instr->len); ++ ++ nand_get_device(ra, FL_ERASING); ++ ret = _nand_erase_nand((struct ra_nand_chip *)mtd->priv, instr); ++ nand_release_device(ra); ++ ++ return ret; ++} ++ ++static int ++ramtd_nand_write(struct mtd_info *mtd, loff_t to, size_t len, ++ size_t *retlen, const uint8_t *buf) ++{ ++ struct ra_nand_chip *ra = mtd->priv; ++ struct mtd_oob_ops ops; ++ int ret; ++ ++ ra_dbg("%s: to 0x%x len=0x%x\n", __func__, to, len); ++ ++ if ((to + len) > mtd->size) ++ return -EINVAL; ++ ++ if (!len) ++ return 0; ++ ++ nand_get_device(ra, FL_WRITING); ++ ++ memset(&ops, 0, sizeof(ops)); ++ ops.len = len; ++ ops.datbuf = (uint8_t *)buf; ++ ops.oobbuf = NULL; ++ ops.mode = MTD_OPS_AUTO_OOB; ++ ++ ret = nand_do_write_ops(ra, to, &ops); ++ ++ *retlen = ops.retlen; ++ ++ nand_release_device(ra); ++ ++ return ret; ++} ++ ++static int ++ramtd_nand_read(struct mtd_info *mtd, loff_t from, size_t len, ++ size_t *retlen, uint8_t *buf) ++{ ++ ++ struct ra_nand_chip *ra = mtd->priv; ++ int ret; ++ struct mtd_oob_ops ops; ++ ++ ra_dbg("%s: mtd:%p from:%x, len:%x, buf:%p \n", __func__, mtd, (unsigned int)from, len, buf); ++ ++ /* Do not allow reads past end of device */ ++ if ((from + len) > mtd->size) ++ return -EINVAL; ++ if (!len) ++ return 0; ++ ++ nand_get_device(ra, FL_READING); ++ ++ memset(&ops, 0, sizeof(ops)); ++ ops.len = len; ++ ops.datbuf = buf; ++ ops.oobbuf = NULL; ++ ops.mode = MTD_OPS_AUTO_OOB; ++ ++ ret = nand_do_read_ops(ra, from, &ops); ++ ++ *retlen = ops.retlen; ++ ++ nand_release_device(ra); ++ ++ return ret; ++ ++} ++ ++static int ++ramtd_nand_readoob(struct mtd_info *mtd, loff_t from, ++ struct mtd_oob_ops *ops) ++{ ++ struct ra_nand_chip *ra = mtd->priv; ++ int ret; ++ ++ ra_dbg("%s: \n", __func__); ++ ++ nand_get_device(ra, FL_READING); ++ ++ ret = nand_do_read_ops(ra, from, ops); ++ ++ nand_release_device(ra); ++ ++ return ret; ++} ++ ++static int ++ramtd_nand_writeoob(struct mtd_info *mtd, loff_t to, ++ struct mtd_oob_ops *ops) ++{ ++ struct ra_nand_chip *ra = mtd->priv; ++ int ret; ++ ++ nand_get_device(ra, FL_READING); ++ ret = nand_do_write_ops(ra, to, ops); ++ nand_release_device(ra); ++ ++ return ret; ++} ++ ++static int ++ramtd_nand_block_isbad(struct mtd_info *mtd, loff_t offs) ++{ ++ if (offs > mtd->size) ++ return -EINVAL; ++ ++ return nand_block_checkbad((struct ra_nand_chip *)mtd->priv, offs); ++} ++ ++static int ++ramtd_nand_block_markbad(struct mtd_info *mtd, loff_t ofs) ++{ ++ struct ra_nand_chip *ra = mtd->priv; ++ int ret; ++ ++ ra_dbg("%s: \n", __func__); ++ nand_get_device(ra, FL_WRITING); ++ ret = nand_block_markbad(ra, ofs); ++ nand_release_device(ra); ++ ++ return ret; ++} ++ ++// 1-bit error detection ++static int one_bit_correction(char *ecc1, char *ecc2, int *bytes, int *bits) ++{ ++ // check if ecc and expected are all valid ++ char *p, nibble, crumb; ++ int i, xor, iecc1 = 0, iecc2 = 0; ++ ++ printk("correction : %x %x %x\n", ecc1[0], ecc1[1], ecc1[2]); ++ printk("correction : %x %x %x\n", ecc2[0], ecc2[1], ecc2[2]); ++ ++ p = (char *)ecc1; ++ for (i = 0; i < CONFIG_ECC_BYTES; i++) ++ { ++ nibble = *(p+i) & 0xf; ++ if ((nibble != 0x0) && (nibble != 0xf) && (nibble != 0x3) && (nibble != 0xc) && ++ (nibble != 0x5) && (nibble != 0xa) && (nibble != 0x6) && (nibble != 0x9)) ++ return -1; ++ nibble = ((*(p+i)) >> 4) & 0xf; ++ if ((nibble != 0x0) && (nibble != 0xf) && (nibble != 0x3) && (nibble != 0xc) && ++ (nibble != 0x5) && (nibble != 0xa) && (nibble != 0x6) && (nibble != 0x9)) ++ return -1; ++ } ++ ++ p = (char *)ecc2; ++ for (i = 0; i < CONFIG_ECC_BYTES; i++) ++ { ++ nibble = *(p+i) & 0xf; ++ if ((nibble != 0x0) && (nibble != 0xf) && (nibble != 0x3) && (nibble != 0xc) && ++ (nibble != 0x5) && (nibble != 0xa) && (nibble != 0x6) && (nibble != 0x9)) ++ return -1; ++ nibble = ((*(p+i)) >> 4) & 0xf; ++ if ((nibble != 0x0) && (nibble != 0xf) && (nibble != 0x3) && (nibble != 0xc) && ++ (nibble != 0x5) && (nibble != 0xa) && (nibble != 0x6) && (nibble != 0x9)) ++ return -1; ++ } ++ ++ memcpy(&iecc1, ecc1, 3); ++ memcpy(&iecc2, ecc2, 3); ++ ++ xor = iecc1 ^ iecc2; ++ printk("xor = %x (%x %x)\n", xor, iecc1, iecc2); ++ ++ *bytes = 0; ++ for (i = 0; i < 9; i++) ++ { ++ crumb = (xor >> (2*i)) & 0x3; ++ if ((crumb == 0x0) || (crumb == 0x3)) ++ return -1; ++ if (crumb == 0x2) ++ *bytes += (1 << i); ++ } ++ ++ *bits = 0; ++ for (i = 0; i < 3; i++) ++ { ++ crumb = (xor >> (18 + 2*i)) & 0x3; ++ if ((crumb == 0x0) || (crumb == 0x3)) ++ return -1; ++ if (crumb == 0x2) ++ *bits += (1 << i); ++ } ++ ++ return 0; ++} ++ ++ ++ ++/************************************************************ ++ * the init/exit section. ++ */ ++ ++static struct nand_ecclayout ra_oob_layout = { ++ .eccbytes = CONFIG_ECC_BYTES, ++ .eccpos = {5, 6, 7}, ++ .oobfree = { ++ {.offset = 0, .length = 4}, ++ {.offset = 8, .length = 8}, ++ {.offset = 0, .length = 0} ++ }, ++#define RA_CHIP_OOB_AVAIL (4+8) ++ .oobavail = RA_CHIP_OOB_AVAIL, ++ // 5th byte is bad-block flag. ++}; ++ ++static int ++mtk_nand_probe(struct platform_device *pdev) ++{ ++ struct mtd_part_parser_data ppdata; ++ struct ra_nand_chip *ra; ++ int alloc_size, bbt_size, buffers_size, reg, err; ++ unsigned char chip_mode = 12; ++ ++/* if(ra_check_flash_type()!=BOOT_FROM_NAND) { ++ return 0; ++ }*/ ++ ++ //FIXME: config 512 or 2048-byte page according to HWCONF ++#if defined (CONFIG_RALINK_RT6855A) ++ reg = ra_inl(RALINK_SYSCTL_BASE+0x8c); ++ chip_mode = ((reg>>28) & 0x3)|(((reg>>22) & 0x3)<<2); ++ if (chip_mode == 1) { ++ printk("! nand 2048\n"); ++ ra_or(NFC_CONF1, 1); ++ is_nand_page_2048 = 1; ++ nand_addrlen = 5; ++ } ++ else { ++ printk("! nand 512\n"); ++ ra_and(NFC_CONF1, ~1); ++ is_nand_page_2048 = 0; ++ nand_addrlen = 4; ++ } ++#elif (defined (CONFIG_RALINK_MT7620) || defined (CONFIG_RALINK_RT6855)) ++ ra_outl(RALINK_SYSCTL_BASE+0x60, ra_inl(RALINK_SYSCTL_BASE+0x60) & ~(0x3<<18)); ++ reg = ra_inl(RALINK_SYSCTL_BASE+0x10); ++ chip_mode = (reg & 0x0F); ++ if((chip_mode==1)||(chip_mode==11)) { ++ ra_or(NFC_CONF1, 1); ++ is_nand_page_2048 = 1; ++ nand_addrlen = ((chip_mode!=11) ? 4 : 5); ++ printk("!!! nand page size = 2048, addr len=%d\n", nand_addrlen); ++ } ++ else { ++ ra_and(NFC_CONF1, ~1); ++ is_nand_page_2048 = 0; ++ nand_addrlen = ((chip_mode!=10) ? 3 : 4); ++ printk("!!! nand page size = 512, addr len=%d\n", nand_addrlen); ++ } ++#else ++ is_nand_page_2048 = 0; ++ nand_addrlen = 3; ++ printk("!!! nand page size = 512, addr len=%d\n", nand_addrlen); ++#endif ++ ++#if defined (CONFIG_RALINK_RT6855A) || defined (CONFIG_RALINK_MT7620) || defined (CONFIG_RALINK_RT6855) ++ //config ECC location ++ ra_and(NFC_CONF1, 0xfff000ff); ++ ra_or(NFC_CONF1, ((CONFIG_ECC_OFFSET + 2) << 16) + ++ ((CONFIG_ECC_OFFSET + 1) << 12) + ++ (CONFIG_ECC_OFFSET << 8)); ++#endif ++ ++#define ALIGNE_16(a) (((unsigned long)(a)+15) & ~15) ++ buffers_size = ALIGNE_16((1<buffers ++ bbt_size = BBTTAG_BITS * (1<<(CONFIG_CHIP_SIZE_BIT - (CONFIG_PAGE_SIZE_BIT + CONFIG_NUMPAGE_PER_BLOCK_BIT))) / 8; //ra->bbt ++ bbt_size = ALIGNE_16(bbt_size); ++ ++ alloc_size = buffers_size + bbt_size; ++ alloc_size += buffers_size; //for ra->readback_buffers ++ alloc_size += sizeof(*ra); ++ alloc_size += sizeof(*ranfc_mtd); ++ ++ //make sure gpio-0 is input ++ ra_outl(RALINK_PIO_BASE+0x24, ra_inl(RALINK_PIO_BASE+0x24) & ~0x01); ++ ++ ra = (struct ra_nand_chip *)kzalloc(alloc_size, GFP_KERNEL | GFP_DMA); ++ if (!ra) { ++ printk("%s: mem alloc fail \n", __func__); ++ return -ENOMEM; ++ } ++ memset(ra, 0, alloc_size); ++ ++ //dynamic ++ ra->buffers = (char *)((char *)ra + sizeof(*ra)); ++ ra->readback_buffers = ra->buffers + buffers_size; ++ ra->bbt = ra->readback_buffers + buffers_size; ++ ranfc_mtd = (struct mtd_info *)(ra->bbt + bbt_size); ++ ++ //static ++ ra->numchips = CONFIG_NUMCHIPS; ++ ra->chip_shift = CONFIG_CHIP_SIZE_BIT; ++ ra->page_shift = CONFIG_PAGE_SIZE_BIT; ++ ra->oob_shift = CONFIG_OOBSIZE_PER_PAGE_BIT; ++ ra->erase_shift = (CONFIG_PAGE_SIZE_BIT + CONFIG_NUMPAGE_PER_BLOCK_BIT); ++ ra->badblockpos = CONFIG_BAD_BLOCK_POS; ++ ra_oob_layout.eccpos[0] = CONFIG_ECC_OFFSET; ++ ra_oob_layout.eccpos[1] = CONFIG_ECC_OFFSET + 1; ++ ra_oob_layout.eccpos[2] = CONFIG_ECC_OFFSET + 2; ++ ra->oob = &ra_oob_layout; ++ ra->buffers_page = -1; ++ ++#if defined (WORKAROUND_RX_BUF_OV) ++ if (ranfc_verify) { ++ ra->sandbox_page = nand_bbt_find_sandbox(ra); ++ } ++#endif ++ ra_outl(NFC_CTRL, ra_inl(NFC_CTRL) | 0x01); //set wp to high ++ nfc_all_reset(); ++ ++ ranfc_mtd->type = MTD_NANDFLASH; ++ ranfc_mtd->flags = MTD_CAP_NANDFLASH; ++ ranfc_mtd->size = CONFIG_NUMCHIPS * CFG_CHIPSIZE; ++ ranfc_mtd->erasesize = CFG_BLOCKSIZE; ++ ranfc_mtd->writesize = CFG_PAGESIZE; ++ ranfc_mtd->oobsize = CFG_PAGE_OOBSIZE; ++ ranfc_mtd->oobavail = RA_CHIP_OOB_AVAIL; ++ ranfc_mtd->name = "ra_nfc"; ++ //ranfc_mtd->index ++ ranfc_mtd->ecclayout = &ra_oob_layout; ++ //ranfc_mtd->numberaseregions ++ //ranfc_mtd->eraseregions ++ //ranfc_mtd->bansize ++ ranfc_mtd->_erase = ramtd_nand_erase; ++ //ranfc_mtd->point ++ //ranfc_mtd->unpoint ++ ranfc_mtd->_read = ramtd_nand_read; ++ ranfc_mtd->_write = ramtd_nand_write; ++ ranfc_mtd->_read_oob = ramtd_nand_readoob; ++ ranfc_mtd->_write_oob = ramtd_nand_writeoob; ++ //ranfc_mtd->get_fact_prot_info; ranfc_mtd->read_fact_prot_reg; ++ //ranfc_mtd->get_user_prot_info; ranfc_mtd->read_user_prot_reg; ++ //ranfc_mtd->write_user_prot_reg; ranfc_mtd->lock_user_prot_reg; ++ //ranfc_mtd->writev; ranfc_mtd->sync; ranfc_mtd->lock; ranfc_mtd->unlock; ranfc_mtd->suspend; ranfc_mtd->resume; ++ ranfc_mtd->_block_isbad = ramtd_nand_block_isbad; ++ ranfc_mtd->_block_markbad = ramtd_nand_block_markbad; ++ //ranfc_mtd->reboot_notifier ++ //ranfc_mtd->ecc_stats; ++ // subpage_sht; ++ ++ //ranfc_mtd->get_device; ranfc_mtd->put_device ++ ranfc_mtd->priv = ra; ++ ++ ranfc_mtd->owner = THIS_MODULE; ++ ra->controller = &ra->hwcontrol; ++ mutex_init(ra->controller); ++ ++ printk("%s: alloc %x, at %p , btt(%p, %x), ranfc_mtd:%p\n", ++ __func__ , alloc_size, ra, ra->bbt, bbt_size, ranfc_mtd); ++ ++ ppdata.of_node = pdev->dev.of_node; ++ err = mtd_device_parse_register(ranfc_mtd, mtk_probe_types, ++ &ppdata, NULL, 0); ++ ++ return err; ++} ++ ++static int ++mtk_nand_remove(struct platform_device *pdev) ++{ ++ struct ra_nand_chip *ra; ++ ++ if (ranfc_mtd) { ++ ra = (struct ra_nand_chip *)ranfc_mtd->priv; ++ ++ /* Deregister partitions */ ++ //del_mtd_partitions(ranfc_mtd); ++ kfree(ra); ++ } ++ return 0; ++} ++ ++static const struct of_device_id mtk_nand_match[] = { ++ { .compatible = "mtk,mt7620-nand" }, ++ {}, ++}; ++MODULE_DEVICE_TABLE(of, mtk_nand_match); ++ ++static struct platform_driver mtk_nand_driver = { ++ .probe = mtk_nand_probe, ++ .remove = mtk_nand_remove, ++ .driver = { ++ .name = "mt7620_nand", ++ .owner = THIS_MODULE, ++ .of_match_table = mtk_nand_match, ++ }, ++}; ++ ++module_platform_driver(mtk_nand_driver); ++ ++ ++MODULE_LICENSE("GPL"); +--- /dev/null ++++ b/drivers/mtd/maps/ralink_nand.h +@@ -0,0 +1,232 @@ ++#ifndef RT2880_NAND_H ++#define RT2880_NAND_H ++ ++#include ++ ++//#include "gdma.h" ++ ++#define RALINK_SYSCTL_BASE 0xB0000000 ++#define RALINK_PIO_BASE 0xB0000600 ++#define RALINK_NAND_CTRL_BASE 0xB0000810 ++#define CONFIG_RALINK_MT7620 ++ ++#define SKIP_BAD_BLOCK ++//#define RANDOM_GEN_BAD_BLOCK ++ ++#define ra_inl(addr) (*(volatile unsigned int *)(addr)) ++#define ra_outl(addr, value) (*(volatile unsigned int *)(addr) = (value)) ++#define ra_aor(addr, a_mask, o_value) ra_outl(addr, (ra_inl(addr) & (a_mask)) | (o_value)) ++#define ra_and(addr, a_mask) ra_aor(addr, a_mask, 0) ++#define ra_or(addr, o_value) ra_aor(addr, -1, o_value) ++ ++ ++#define CONFIG_NUMCHIPS 1 ++#define CONFIG_NOT_SUPPORT_WP //rt3052 has no WP signal for chip. ++//#define CONFIG_NOT_SUPPORT_RB ++ ++extern int is_nand_page_2048; ++extern const unsigned int nand_size_map[2][3]; ++ ++//chip ++// chip geometry: SAMSUNG small size 32MB. ++#define CONFIG_CHIP_SIZE_BIT (nand_size_map[is_nand_page_2048][nand_addrlen-3]) //! (1<=32)? 31 : CONFIG_CHIP_SIZE_BIT)) ++//#define CFG_CHIPSIZE (1 << CONFIG_CHIP_SIZE_BIT) ++#define CFG_PAGESIZE (1 << CONFIG_PAGE_SIZE_BIT) ++#define CFG_BLOCKSIZE (CFG_PAGESIZE << CONFIG_NUMPAGE_PER_BLOCK_BIT) ++#define CFG_NUMPAGE (1 << (CONFIG_CHIP_SIZE_BIT - CONFIG_PAGE_SIZE_BIT)) ++#define CFG_NUMBLOCK (CFG_NUMPAGE >> CONFIG_NUMPAGE_PER_BLOCK_BIT) ++#define CFG_BLOCK_OOBSIZE (1 << (CONFIG_OOBSIZE_PER_PAGE_BIT + CONFIG_NUMPAGE_PER_BLOCK_BIT)) ++#define CFG_PAGE_OOBSIZE (1 << CONFIG_OOBSIZE_PER_PAGE_BIT) ++ ++#define NAND_BLOCK_ALIGN(addr) ((addr) & (CFG_BLOCKSIZE-1)) ++#define NAND_PAGE_ALIGN(addr) ((addr) & (CFG_PAGESIZE-1)) ++ ++ ++#define NFC_BASE RALINK_NAND_CTRL_BASE ++#define NFC_CTRL (NFC_BASE + 0x0) ++#define NFC_CONF (NFC_BASE + 0x4) ++#define NFC_CMD1 (NFC_BASE + 0x8) ++#define NFC_CMD2 (NFC_BASE + 0xc) ++#define NFC_CMD3 (NFC_BASE + 0x10) ++#define NFC_ADDR (NFC_BASE + 0x14) ++#define NFC_DATA (NFC_BASE + 0x18) ++#if defined (CONFIG_RALINK_RT6855) || defined (CONFIG_RALINK_RT6855A) || \ ++ defined (CONFIG_RALINK_MT7620) || defined (CONFIG_RALINK_MT7621) ++#define NFC_ECC (NFC_BASE + 0x30) ++#else ++#define NFC_ECC (NFC_BASE + 0x1c) ++#endif ++#define NFC_STATUS (NFC_BASE + 0x20) ++#define NFC_INT_EN (NFC_BASE + 0x24) ++#define NFC_INT_ST (NFC_BASE + 0x28) ++#if defined (CONFIG_RALINK_RT6855) || defined (CONFIG_RALINK_RT6855A) || \ ++ defined (CONFIG_RALINK_MT7620) || defined (CONFIG_RALINK_MT7621) ++#define NFC_CONF1 (NFC_BASE + 0x2c) ++#define NFC_ECC_P1 (NFC_BASE + 0x30) ++#define NFC_ECC_P2 (NFC_BASE + 0x34) ++#define NFC_ECC_P3 (NFC_BASE + 0x38) ++#define NFC_ECC_P4 (NFC_BASE + 0x3c) ++#define NFC_ECC_ERR1 (NFC_BASE + 0x40) ++#define NFC_ECC_ERR2 (NFC_BASE + 0x44) ++#define NFC_ECC_ERR3 (NFC_BASE + 0x48) ++#define NFC_ECC_ERR4 (NFC_BASE + 0x4c) ++#define NFC_ADDR2 (NFC_BASE + 0x50) ++#endif ++ ++enum _int_stat { ++ INT_ST_ND_DONE = 1<<0, ++ INT_ST_TX_BUF_RDY = 1<<1, ++ INT_ST_RX_BUF_RDY = 1<<2, ++ INT_ST_ECC_ERR = 1<<3, ++ INT_ST_TX_TRAS_ERR = 1<<4, ++ INT_ST_RX_TRAS_ERR = 1<<5, ++ INT_ST_TX_KICK_ERR = 1<<6, ++ INT_ST_RX_KICK_ERR = 1<<7 ++}; ++ ++ ++//#define WORKAROUND_RX_BUF_OV 1 ++ ++ ++/************************************************************* ++ * stolen from nand.h ++ *************************************************************/ ++ ++/* ++ * Standard NAND flash commands ++ */ ++#define NAND_CMD_READ0 0 ++#define NAND_CMD_READ1 1 ++#define NAND_CMD_RNDOUT 5 ++#define NAND_CMD_PAGEPROG 0x10 ++#define NAND_CMD_READOOB 0x50 ++#define NAND_CMD_ERASE1 0x60 ++#define NAND_CMD_STATUS 0x70 ++#define NAND_CMD_STATUS_MULTI 0x71 ++#define NAND_CMD_SEQIN 0x80 ++#define NAND_CMD_RNDIN 0x85 ++#define NAND_CMD_READID 0x90 ++#define NAND_CMD_ERASE2 0xd0 ++#define NAND_CMD_RESET 0xff ++ ++/* Extended commands for large page devices */ ++#define NAND_CMD_READSTART 0x30 ++#define NAND_CMD_RNDOUTSTART 0xE0 ++#define NAND_CMD_CACHEDPROG 0x15 ++ ++/* Extended commands for AG-AND device */ ++/* ++ * Note: the command for NAND_CMD_DEPLETE1 is really 0x00 but ++ * there is no way to distinguish that from NAND_CMD_READ0 ++ * until the remaining sequence of commands has been completed ++ * so add a high order bit and mask it off in the command. ++ */ ++#define NAND_CMD_DEPLETE1 0x100 ++#define NAND_CMD_DEPLETE2 0x38 ++#define NAND_CMD_STATUS_MULTI 0x71 ++#define NAND_CMD_STATUS_ERROR 0x72 ++/* multi-bank error status (banks 0-3) */ ++#define NAND_CMD_STATUS_ERROR0 0x73 ++#define NAND_CMD_STATUS_ERROR1 0x74 ++#define NAND_CMD_STATUS_ERROR2 0x75 ++#define NAND_CMD_STATUS_ERROR3 0x76 ++#define NAND_CMD_STATUS_RESET 0x7f ++#define NAND_CMD_STATUS_CLEAR 0xff ++ ++#define NAND_CMD_NONE -1 ++ ++/* Status bits */ ++#define NAND_STATUS_FAIL 0x01 ++#define NAND_STATUS_FAIL_N1 0x02 ++#define NAND_STATUS_TRUE_READY 0x20 ++#define NAND_STATUS_READY 0x40 ++#define NAND_STATUS_WP 0x80 ++ ++typedef enum { ++ FL_READY, ++ FL_READING, ++ FL_WRITING, ++ FL_ERASING, ++ FL_SYNCING, ++ FL_CACHEDPRG, ++ FL_PM_SUSPENDED, ++} nand_state_t; ++ ++/*************************************************************/ ++ ++ ++ ++typedef enum _ra_flags { ++ FLAG_NONE = 0, ++ FLAG_ECC_EN = (1<<0), ++ FLAG_USE_GDMA = (1<<1), ++ FLAG_VERIFY = (1<<2), ++} RA_FLAGS; ++ ++ ++#define BBTTAG_BITS 2 ++#define BBTTAG_BITS_MASK ((1< +Date: Wed, 27 Nov 2013 20:58:16 +0100 +Subject: [PATCH 131/133] mtd: add chunked read io to m25p80 + +Signed-off-by: John Crispin +--- + drivers/mtd/devices/m25p80.c | 127 ++++++++++++++++++++++++++++++++++++++++++ + 1 file changed, 127 insertions(+) + +--- a/drivers/mtd/devices/m25p80.c ++++ b/drivers/mtd/devices/m25p80.c +@@ -392,6 +392,57 @@ static int m25p80_read(struct mtd_info * + return 0; + } + ++static int m25p80_read_chunked(struct mtd_info *mtd, loff_t from, size_t len, ++ size_t *retlen, u_char *buf) ++{ ++ struct m25p *flash = mtd_to_m25p(mtd); ++ struct spi_transfer t[2]; ++ struct spi_message m; ++ uint8_t opcode; ++ int idx = 0; ++ ++ pr_debug("%s: %s from 0x%08x, len %zd\n", dev_name(&flash->spi->dev), ++ __func__, (u32)from, len); ++ ++ spi_message_init(&m); ++ memset(t, 0, (sizeof t)); ++ ++ t[0].tx_buf = flash->command; ++ t[0].len = m25p_cmdsz(flash); ++ spi_message_add_tail(&t[0], &m); ++ spi_message_add_tail(&t[1], &m); ++ ++ while (idx < len) { ++ int rlen = (len - idx > 4) ? (4) : (len - idx); ++ ++ t[1].rx_buf = &buf[idx]; ++ t[1].len = rlen; ++ ++ mutex_lock(&flash->lock); ++ ++ /* Wait till previous write/erase is done. */ ++ if (wait_till_ready(flash)) { ++ /* REVISIT status return?? */ ++ mutex_unlock(&flash->lock); ++ return 1; ++ } ++ ++ /* Set up the write data buffer. */ ++ opcode = OPCODE_NORM_READ; ++ flash->command[0] = opcode; ++ m25p_addr2cmd(flash, from + idx, flash->command); ++ ++ spi_sync(flash->spi, &m); ++ ++ *retlen = m.actual_length - m25p_cmdsz(flash) - ++ (flash->fast_read ? 1 : 0); ++ ++ mutex_unlock(&flash->lock); ++ idx += rlen; ++ } ++ return 0; ++} ++ + /* + * Write an address range to the flash chip. Data must be written in + * FLASH_PAGESIZE chunks. The address range may be any size provided +@@ -479,6 +530,76 @@ static int m25p80_write(struct mtd_info + return 0; + } + ++static int m25p80_write_chunked(struct mtd_info *mtd, loff_t to, size_t len, ++ size_t *retlen, const u_char *buf) ++{ ++ struct m25p *flash = mtd_to_m25p(mtd); ++ struct spi_transfer t; ++ struct spi_message m; ++ u32 i, page_size; ++ u8 tmp[8]; ++ ++ pr_debug("%s: %s to 0x%08x, len %zd\n", dev_name(&flash->spi->dev), ++ __func__, (u32)to, len); ++ ++ spi_message_init(&m); ++ memset(&t, 0, (sizeof t)); ++ ++ t.tx_buf = tmp; ++ t.len = 8; ++ spi_message_add_tail(&t, &m); ++ ++ mutex_lock(&flash->lock); ++ ++ /* Wait until finished previous write command. */ ++ if (wait_till_ready(flash)) { ++ mutex_unlock(&flash->lock); ++ return 1; ++ } ++ ++ write_enable(flash); ++ ++ /* Set up the opcode in the write buffer. */ ++ flash->command[0] = OPCODE_PP; ++ m25p_addr2cmd(flash, to, flash->command); ++ ++ t.len = 4 + (to & 0x3); ++ if (t.len == 4) ++ t.len = 8; ++ memcpy(tmp, flash->command, 4); ++ memcpy(&tmp[4], buf, t.len - 4); ++ spi_sync(flash->spi, &m); ++ page_size = t.len - 4; ++ ++ *retlen = m.actual_length - m25p_cmdsz(flash); ++ ++ /* write everything in flash->page_size chunks */ ++ for (i = page_size; i < len; i += page_size) { ++ page_size = len - i; ++ if (page_size > 4) ++ page_size = 4; ++ ++ /* write the next page to flash */ ++ m25p_addr2cmd(flash, to + i, flash->command); ++ ++ memcpy(tmp, flash->command, 4); ++ memcpy(&tmp[4], buf + i, page_size); ++ t.len = 4 + page_size; ++ ++ wait_till_ready(flash); ++ ++ write_enable(flash); ++ ++ spi_sync(flash->spi, &m); ++ ++ *retlen += m.actual_length - m25p_cmdsz(flash); ++ } ++ ++ mutex_unlock(&flash->lock); ++ ++ return 0; ++} ++ + static int sst_write(struct mtd_info *mtd, loff_t to, size_t len, + size_t *retlen, const u_char *buf) + { +@@ -1058,6 +1179,12 @@ static int m25p_probe(struct spi_device + flash->fast_read = true; + #endif + ++ if (np && of_property_read_bool(np, "m25p,chunked-io")) { ++ dev_warn(&spi->dev, "using chunked io\n"); ++ flash->mtd._read = m25p80_read_chunked; ++ flash->mtd._write = m25p80_write_chunked; ++ } ++ + #ifdef CONFIG_M25PXX_USE_FAST_READ + flash->fast_read = true; + #endif diff --git a/target/linux/ramips/patches-3.10/0132-GPIO-add-gpio_export_with_name.patch b/target/linux/ramips/patches-3.10/0132-GPIO-add-gpio_export_with_name.patch new file mode 100644 index 0000000000..c6ad7f4788 --- /dev/null +++ b/target/linux/ramips/patches-3.10/0132-GPIO-add-gpio_export_with_name.patch @@ -0,0 +1,325 @@ +From def7e226d3e5c501180bdc2fc644ff924b5a275e Mon Sep 17 00:00:00 2001 +From: John Crispin +Date: Sun, 23 Jun 2013 00:16:22 +0200 +Subject: [PATCH 132/133] GPIO: add gpio_export_with_name + +http://lists.infradead.org/pipermail/linux-arm-kernel/2012-November/133856.html + +Signed-off-by: John Crispin +--- + Documentation/devicetree/bindings/gpio/gpio.txt | 60 ++++++++++++++++++++ + drivers/gpio/gpiolib-of.c | 68 +++++++++++++++++++++++ + drivers/gpio/gpiolib.c | 24 +++++--- + include/asm-generic/gpio.h | 6 +- + include/linux/gpio.h | 26 ++++++++- + 5 files changed, 172 insertions(+), 12 deletions(-) + +--- a/Documentation/devicetree/bindings/gpio/gpio.txt ++++ b/Documentation/devicetree/bindings/gpio/gpio.txt +@@ -112,3 +112,63 @@ where, + + The pinctrl node must have "#gpio-range-cells" property to show number of + arguments to pass with phandle from gpio controllers node. ++ ++3) gpio-export ++-------------- ++ ++gpio-export will allow you to automatically export gpio ++ ++required properties: ++- compatible: Should be "gpio-export" ++ ++in each child node will reprensent a gpio or if no name is specified ++a list of gpio to export ++ ++required properties: ++- gpios: gpio to export ++ ++optional properties: ++ - gpio-export,name: export name ++ - gpio-export,output: to set the as output with default value ++ if no present gpio as input ++ - pio-export,direction_may_change: boolean to allow the direction to be controllable ++ ++Example: ++ ++ ++gpio_export { ++ compatible = "gpio-export"; ++ #size-cells = <0>; ++ ++ in { ++ gpio-export,name = "in"; ++ gpios = <&pioC 20 0>; ++ }; ++ ++ out { ++ gpio-export,name = "out"; ++ gpio-export,output = <1>; ++ gpio-export,direction_may_change; ++ gpios = <&pioC 21 0>; ++ }; ++ ++ in_out { ++ gpio-export,name = "in_out"; ++ gpio-export,direction_may_change; ++ gpios = <&pioC 21 0>; ++ }; ++ ++ gpios_in { ++ gpios = <&pioB 0 0 ++ &pioB 3 0 ++ &pioC 4 0>; ++ gpio-export,direction_may_change; ++ }; ++ ++ gpios_out { ++ gpios = <&pioB 1 0 ++ &pioB 2 0 ++ &pioC 3 0>; ++ gpio-export,output = <1>; ++ }; ++}; +--- a/drivers/gpio/gpiolib-of.c ++++ b/drivers/gpio/gpiolib-of.c +@@ -21,6 +21,8 @@ + #include + #include + #include ++#include ++#include + + /* Private data structure for of_gpiochip_find_and_xlate */ + struct gg_data { +@@ -242,3 +244,69 @@ void of_gpiochip_remove(struct gpio_chip + if (chip->of_node) + of_node_put(chip->of_node); + } ++ ++static struct of_device_id gpio_export_ids[] = { ++ { .compatible = "gpio-export" }, ++ { /* sentinel */ } ++}; ++ ++static int __init of_gpio_export_probe(struct platform_device *pdev) ++{ ++ struct device_node *np = pdev->dev.of_node; ++ struct device_node *cnp; ++ u32 val; ++ int nb = 0; ++ ++ for_each_child_of_node(np, cnp) { ++ const char *name = NULL; ++ int gpio; ++ bool dmc; ++ int max_gpio = 1; ++ int i; ++ ++ of_property_read_string(cnp, "gpio-export,name", &name); ++ ++ if (!name) ++ max_gpio = of_gpio_count(cnp); ++ ++ for (i = 0; i < max_gpio; i++) { ++ unsigned flags = 0; ++ enum of_gpio_flags of_flags; ++ ++ gpio = of_get_gpio_flags(cnp, i, &of_flags); ++ ++ if (of_flags == OF_GPIO_ACTIVE_LOW) ++ flags |= GPIOF_ACTIVE_LOW; ++ ++ if (!of_property_read_u32(cnp, "gpio-export,output", &val)) ++ flags |= val ? GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW; ++ else ++ flags |= GPIOF_IN; ++ ++ if (devm_gpio_request_one(&pdev->dev, gpio, flags, name ? name : of_node_full_name(np))) ++ continue; ++ ++ dmc = of_property_read_bool(cnp, "gpio-export,direction_may_change"); ++ gpio_export_with_name(gpio, dmc, name); ++ nb++; ++ } ++ } ++ ++ dev_info(&pdev->dev, "%d gpio(s) exported\n", nb); ++ ++ return 0; ++} ++ ++static struct platform_driver gpio_export_driver = { ++ .driver = { ++ .name = "gpio-export", ++ .owner = THIS_MODULE, ++ .of_match_table = of_match_ptr(gpio_export_ids), ++ }, ++}; ++ ++static int __init of_gpio_export_init(void) ++{ ++ return platform_driver_probe(&gpio_export_driver, of_gpio_export_probe); ++} ++device_initcall(of_gpio_export_init); +--- a/drivers/gpio/gpiolib.c ++++ b/drivers/gpio/gpiolib.c +@@ -96,7 +96,7 @@ static int gpiod_get_value(const struct + static void gpiod_set_value(struct gpio_desc *desc, int value); + static int gpiod_cansleep(const struct gpio_desc *desc); + static int gpiod_to_irq(const struct gpio_desc *desc); +-static int gpiod_export(struct gpio_desc *desc, bool direction_may_change); ++static int gpiod_export(struct gpio_desc *desc, bool direction_may_change, const char *name); + static int gpiod_export_link(struct device *dev, const char *name, + struct gpio_desc *desc); + static int gpiod_sysfs_set_active_low(struct gpio_desc *desc, int value); +@@ -674,7 +674,7 @@ static ssize_t export_store(struct class + status = -ENODEV; + goto done; + } +- status = gpiod_export(desc, true); ++ status = gpiod_export(desc, true, NULL); + if (status < 0) + gpiod_free(desc); + else +@@ -736,9 +736,10 @@ static struct class gpio_class = { + + + /** +- * gpio_export - export a GPIO through sysfs ++ * gpio_export_with_name - export a GPIO through sysfs + * @gpio: gpio to make available, already requested + * @direction_may_change: true if userspace may change gpio direction ++ * @name: gpio name + * Context: arch_initcall or later + * + * When drivers want to make a GPIO accessible to userspace after they +@@ -750,7 +751,7 @@ static struct class gpio_class = { + * + * Returns zero on success, else an error. + */ +-static int gpiod_export(struct gpio_desc *desc, bool direction_may_change) ++static int gpiod_export(struct gpio_desc *desc, bool direction_may_change, const char *name) + { + unsigned long flags; + int status; +@@ -783,6 +784,8 @@ static int gpiod_export(struct gpio_desc + goto fail_unlock; + } + ++ if (name) ++ ioname = name; + if (!desc->chip->direction_input || !desc->chip->direction_output) + direction_may_change = false; + spin_unlock_irqrestore(&gpio_lock, flags); +@@ -829,11 +832,11 @@ fail_unlock: + return status; + } + +-int gpio_export(unsigned gpio, bool direction_may_change) ++int gpio_export_with_name(unsigned gpio, bool direction_may_change, const char *name) + { +- return gpiod_export(gpio_to_desc(gpio), direction_may_change); ++ return gpiod_export(gpio_to_desc(gpio), direction_may_change, name); + } +-EXPORT_SYMBOL_GPL(gpio_export); ++EXPORT_SYMBOL_GPL(gpio_export_with_name); + + static int match_export(struct device *dev, const void *data) + { +@@ -1092,7 +1095,7 @@ static inline void gpiochip_unexport(str + } + + static inline int gpiod_export(struct gpio_desc *desc, +- bool direction_may_change) ++ bool direction_may_change, const char *name) + { + return -ENOSYS; + } +@@ -1521,6 +1524,9 @@ int gpio_request_one(unsigned gpio, unsi + if (flags & GPIOF_OPEN_SOURCE) + set_bit(FLAG_OPEN_SOURCE, &desc->flags); + ++ if (flags & GPIOF_ACTIVE_LOW) ++ set_bit(FLAG_ACTIVE_LOW, &gpio_desc[gpio].flags); ++ + if (flags & GPIOF_DIR_IN) + err = gpiod_direction_input(desc); + else +@@ -1531,7 +1537,7 @@ int gpio_request_one(unsigned gpio, unsi + goto free_gpio; + + if (flags & GPIOF_EXPORT) { +- err = gpiod_export(desc, flags & GPIOF_EXPORT_CHANGEABLE); ++ err = gpiod_export(desc, flags & GPIOF_EXPORT_CHANGEABLE, NULL); + if (err) + goto free_gpio; + } +--- a/include/asm-generic/gpio.h ++++ b/include/asm-generic/gpio.h +@@ -202,7 +202,8 @@ extern void gpio_free_array(const struct + * A sysfs interface can be exported by individual drivers if they want, + * but more typically is configured entirely from userspace. + */ +-extern int gpio_export(unsigned gpio, bool direction_may_change); ++extern int gpio_export_with_name(unsigned gpio, bool direction_may_change, ++ const char *name); + extern int gpio_export_link(struct device *dev, const char *name, + unsigned gpio); + extern int gpio_sysfs_set_active_low(unsigned gpio, int value); +@@ -284,7 +285,8 @@ struct device; + + /* sysfs support is only available with gpiolib, where it's optional */ + +-static inline int gpio_export(unsigned gpio, bool direction_may_change) ++static inline int gpio_export_with_name(unsigned gpio, ++ bool direction_may_change, const char *name) + { + return -ENOSYS; + } +--- a/include/linux/gpio.h ++++ b/include/linux/gpio.h +@@ -27,6 +27,9 @@ + #define GPIOF_EXPORT_DIR_FIXED (GPIOF_EXPORT) + #define GPIOF_EXPORT_DIR_CHANGEABLE (GPIOF_EXPORT | GPIOF_EXPORT_CHANGEABLE) + ++#define GPIOF_ACTIVE_LOW (1 << 6) ++ ++ + /** + * struct gpio - a structure describing a GPIO with configuration + * @gpio: the GPIO number +@@ -169,7 +172,8 @@ static inline void gpio_set_value_cansle + WARN_ON(1); + } + +-static inline int gpio_export(unsigned gpio, bool direction_may_change) ++static inline int gpio_export_with_name(unsigned gpio, ++ bool direction_may_change, const char *name) + { + /* GPIO can never have been requested or set as {in,out}put */ + WARN_ON(1); +@@ -236,4 +240,24 @@ int devm_gpio_request_one(struct device + unsigned long flags, const char *label); + void devm_gpio_free(struct device *dev, unsigned int gpio); + ++/** ++ * gpio_export - export a GPIO through sysfs ++ * @gpio: gpio to make available, already requested ++ * @direction_may_change: true if userspace may change gpio direction ++ * Context: arch_initcall or later ++ * ++ * When drivers want to make a GPIO accessible to userspace after they ++ * have requested it -- perhaps while debugging, or as part of their ++ * public interface -- they may use this routine. If the GPIO can ++ * change direction (some can't) and the caller allows it, userspace ++ * will see "direction" sysfs attribute which may be used to change ++ * the gpio's direction. A "value" attribute will always be provided. ++ * ++ * Returns zero on success, else an error. ++ */ ++static inline int gpio_export(unsigned gpio,bool direction_may_change) ++{ ++ return gpio_export_with_name(gpio, direction_may_change, NULL); ++} ++ + #endif /* __LINUX_GPIO_H */ diff --git a/target/linux/ramips/patches-3.10/0133-uvc-add-iPassion-iP2970-support.patch b/target/linux/ramips/patches-3.10/0133-uvc-add-iPassion-iP2970-support.patch new file mode 100644 index 0000000000..099671adf9 --- /dev/null +++ b/target/linux/ramips/patches-3.10/0133-uvc-add-iPassion-iP2970-support.patch @@ -0,0 +1,246 @@ +From 935815cd3b9690b86e70a18fb755f70becb57cc6 Mon Sep 17 00:00:00 2001 +From: John Crispin +Date: Thu, 19 Sep 2013 01:50:59 +0200 +Subject: [PATCH 133/133] uvc: add iPassion iP2970 support + +Signed-off-by: John Crispin +--- + drivers/media/usb/uvc/uvc_driver.c | 14 ++++ + drivers/media/usb/uvc/uvc_status.c | 2 + + drivers/media/usb/uvc/uvc_video.c | 147 ++++++++++++++++++++++++++++++++++++ + drivers/media/usb/uvc/uvcvideo.h | 3 + + 4 files changed, 166 insertions(+) + +--- a/drivers/media/usb/uvc/uvc_driver.c ++++ b/drivers/media/usb/uvc/uvc_driver.c +@@ -2420,6 +2420,20 @@ static struct usb_device_id uvc_ids[] = + .bInterfaceProtocol = 0, + .driver_info = UVC_QUIRK_PROBE_MINMAX + | UVC_QUIRK_IGNORE_SELECTOR_UNIT }, ++ ++/* iPassion iP2970 */ ++ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE ++ | USB_DEVICE_ID_MATCH_INT_INFO, ++ .idVendor = 0x1B3B, ++ .idProduct = 0x2970, ++ .bInterfaceClass = USB_CLASS_VIDEO, ++ .bInterfaceSubClass = 1, ++ .bInterfaceProtocol = 0, ++ .driver_info = UVC_QUIRK_PROBE_MINMAX ++ | UVC_QUIRK_STREAM_NO_FID ++ | UVC_QUIRK_MOTION ++ | UVC_QUIRK_SINGLE_ISO }, ++ + /* Generic USB Video Class */ + { USB_INTERFACE_INFO(USB_CLASS_VIDEO, 1, 0) }, + {} +--- a/drivers/media/usb/uvc/uvc_status.c ++++ b/drivers/media/usb/uvc/uvc_status.c +@@ -139,6 +139,7 @@ static void uvc_status_complete(struct u + switch (dev->status[0] & 0x0f) { + case UVC_STATUS_TYPE_CONTROL: + uvc_event_control(dev, dev->status, len); ++ dev->motion = 1; + break; + + case UVC_STATUS_TYPE_STREAMING: +@@ -182,6 +183,7 @@ int uvc_status_init(struct uvc_device *d + } + + pipe = usb_rcvintpipe(dev->udev, ep->desc.bEndpointAddress); ++ dev->motion = 0; + + /* For high-speed interrupt endpoints, the bInterval value is used as + * an exponent of two. Some developers forgot about it. +--- a/drivers/media/usb/uvc/uvc_video.c ++++ b/drivers/media/usb/uvc/uvc_video.c +@@ -21,6 +21,11 @@ + #include + #include + #include ++#include ++#include ++#include ++#include ++#include + + #include + +@@ -1074,9 +1079,149 @@ static void uvc_video_decode_data(struct + } + } + ++struct bh_priv { ++ unsigned long seen; ++}; ++ ++struct bh_event { ++ const char *name; ++ struct sk_buff *skb; ++ struct work_struct work; ++}; ++ ++#define BH_ERR(fmt, args...) printk(KERN_ERR "%s: " fmt, "webcam", ##args ) ++#define BH_DBG(fmt, args...) do {} while (0) ++#define BH_SKB_SIZE 2048 ++ ++extern u64 uevent_next_seqnum(void); ++static int seen = 0; ++ ++static int bh_event_add_var(struct bh_event *event, int argv, ++ const char *format, ...) ++{ ++ static char buf[128]; ++ char *s; ++ va_list args; ++ int len; ++ ++ if (argv) ++ return 0; ++ ++ va_start(args, format); ++ len = vsnprintf(buf, sizeof(buf), format, args); ++ va_end(args); ++ ++ if (len >= sizeof(buf)) { ++ BH_ERR("buffer size too small\n"); ++ WARN_ON(1); ++ return -ENOMEM; ++ } ++ ++ s = skb_put(event->skb, len + 1); ++ strcpy(s, buf); ++ ++ BH_DBG("added variable '%s'\n", s); ++ ++ return 0; ++} ++ ++static int motion_hotplug_fill_event(struct bh_event *event) ++{ ++ int s = jiffies; ++ int ret; ++ ++ if (!seen) ++ seen = jiffies; ++ ++ ret = bh_event_add_var(event, 0, "HOME=%s", "/"); ++ if (ret) ++ return ret; ++ ++ ret = bh_event_add_var(event, 0, "PATH=%s", ++ "/sbin:/bin:/usr/sbin:/usr/bin"); ++ if (ret) ++ return ret; ++ ++ ret = bh_event_add_var(event, 0, "SUBSYSTEM=usb"); ++ if (ret) ++ return ret; ++ ++ ret = bh_event_add_var(event, 0, "ACTION=motion"); ++ if (ret) ++ return ret; ++ ++ ret = bh_event_add_var(event, 0, "SEEN=%d", s - seen); ++ if (ret) ++ return ret; ++ seen = s; ++ ++ ret = bh_event_add_var(event, 0, "SEQNUM=%llu", uevent_next_seqnum()); ++ ++ return ret; ++} ++ ++static void motion_hotplug_work(struct work_struct *work) ++{ ++ struct bh_event *event = container_of(work, struct bh_event, work); ++ int ret = 0; ++ ++ event->skb = alloc_skb(BH_SKB_SIZE, GFP_KERNEL); ++ if (!event->skb) ++ goto out_free_event; ++ ++ ret = bh_event_add_var(event, 0, "%s@", "add"); ++ if (ret) ++ goto out_free_skb; ++ ++ ret = motion_hotplug_fill_event(event); ++ if (ret) ++ goto out_free_skb; ++ ++ NETLINK_CB(event->skb).dst_group = 1; ++ broadcast_uevent(event->skb, 0, 1, GFP_KERNEL); ++ ++out_free_skb: ++ if (ret) { ++ BH_ERR("work error %d\n", ret); ++ kfree_skb(event->skb); ++ } ++out_free_event: ++ kfree(event); ++} ++ ++static int motion_hotplug_create_event(void) ++{ ++ struct bh_event *event; ++ ++ event = kzalloc(sizeof(*event), GFP_KERNEL); ++ if (!event) ++ return -ENOMEM; ++ ++ event->name = "motion"; ++ ++ INIT_WORK(&event->work, (void *)(void *)motion_hotplug_work); ++ schedule_work(&event->work); ++ ++ return 0; ++} ++ ++#define MOTION_FLAG_OFFSET 4 + static void uvc_video_decode_end(struct uvc_streaming *stream, + struct uvc_buffer *buf, const __u8 *data, int len) + { ++ if ((stream->dev->quirks & UVC_QUIRK_MOTION) && ++ (data[len - 2] == 0xff) && (data[len - 1] == 0xd9)) { ++ u8 *mem; ++ buf->state = UVC_BUF_STATE_READY; ++ mem = (u8 *) (buf->mem + MOTION_FLAG_OFFSET); ++ if ( stream->dev->motion ) { ++ stream->dev->motion = 0; ++ motion_hotplug_create_event(); ++ } else { ++ *mem &= 0x7f; ++ } ++ } ++ + /* Mark the buffer as done if the EOF marker is set. */ + if (data[1] & UVC_STREAM_EOF && buf->bytesused != 0) { + uvc_trace(UVC_TRACE_FRAME, "Frame complete (EOF found).\n"); +@@ -1477,6 +1622,8 @@ static int uvc_init_video_isoc(struct uv + if (npackets == 0) + return -ENOMEM; + ++ if (stream->dev->quirks & UVC_QUIRK_SINGLE_ISO) ++ npackets = 1; + size = npackets * psize; + + for (i = 0; i < UVC_URBS; ++i) { +--- a/drivers/media/usb/uvc/uvcvideo.h ++++ b/drivers/media/usb/uvc/uvcvideo.h +@@ -137,6 +137,8 @@ + #define UVC_QUIRK_FIX_BANDWIDTH 0x00000080 + #define UVC_QUIRK_PROBE_DEF 0x00000100 + #define UVC_QUIRK_RESTRICT_FRAME_RATE 0x00000200 ++#define UVC_QUIRK_MOTION 0x00000400 ++#define UVC_QUIRK_SINGLE_ISO 0x00000800 + + /* Format flags */ + #define UVC_FMT_FLAG_COMPRESSED 0x00000001 +@@ -538,6 +540,7 @@ struct uvc_device { + __u8 *status; + struct input_dev *input; + char input_phys[64]; ++ int motion; + }; + + enum uvc_handle_state { diff --git a/target/linux/ramips/patches-3.10/0134-mtd-split-remove-padding.patch b/target/linux/ramips/patches-3.10/0134-mtd-split-remove-padding.patch new file mode 100644 index 0000000000..9c5a728995 --- /dev/null +++ b/target/linux/ramips/patches-3.10/0134-mtd-split-remove-padding.patch @@ -0,0 +1,13 @@ +--- a/drivers/mtd/mtdpart.c ++++ b/drivers/mtd/mtdpart.c +@@ -805,10 +805,6 @@ static void split_uimage(struct mtd_info + return; + + len = be32_to_cpu(hdr.size) + 0x40; +- len = mtd_pad_erasesize(master, part->offset, len); +- if (len + master->erasesize > part->mtd.size) +- return; +- + __mtd_add_partition(master, "rootfs", part->offset + len, + part->mtd.size - len, false); + } diff --git a/target/linux/ramips/patches-3.10/0200-MIPS-Fix-TLBR-use-hazards-for-R2-cores-in-the-TLB-re.patch b/target/linux/ramips/patches-3.10/0200-MIPS-Fix-TLBR-use-hazards-for-R2-cores-in-the-TLB-re.patch new file mode 100644 index 0000000000..e358e622b2 --- /dev/null +++ b/target/linux/ramips/patches-3.10/0200-MIPS-Fix-TLBR-use-hazards-for-R2-cores-in-the-TLB-re.patch @@ -0,0 +1,60 @@ +From f281fdccbb3e762d293e6eef7f291a33b84e0f6a Mon Sep 17 00:00:00 2001 +From: Ralf Baechle +Date: Thu, 20 Jun 2013 14:56:17 +0200 +Subject: [PATCH 200/215] MIPS: Fix TLBR-use hazards for R2 cores in the TLB + reload handlers + +MIPS R2 documents state that an execution hazard barrier is needed +after a TLBR before reading EntryLo. + +Original patch by Leonid Yegoshin . + +Signed-off-by: Ralf Baechle +Patchwork: https://patchwork.linux-mips.org/patch/5526/ +(cherry picked from commit 73acc7df534ff458a81435178dab3ea037ed6d78) +--- + arch/mips/mm/tlbex.c | 26 ++++++++++++++++++++++++++ + 1 file changed, 26 insertions(+) + +--- a/arch/mips/mm/tlbex.c ++++ b/arch/mips/mm/tlbex.c +@@ -1935,6 +1935,19 @@ static void __cpuinit build_r4000_tlb_lo + uasm_i_nop(&p); + + uasm_i_tlbr(&p); ++ ++ switch (current_cpu_type()) { ++ default: ++ if (cpu_has_mips_r2) { ++ uasm_i_ehb(&p); ++ ++ case CPU_CAVIUM_OCTEON: ++ case CPU_CAVIUM_OCTEON_PLUS: ++ case CPU_CAVIUM_OCTEON2: ++ break; ++ } ++ } ++ + /* Examine entrylo 0 or 1 based on ptr. */ + if (use_bbit_insns()) { + uasm_i_bbit0(&p, wr.r2, ilog2(sizeof(pte_t)), 8); +@@ -1989,6 +2002,19 @@ static void __cpuinit build_r4000_tlb_lo + uasm_i_nop(&p); + + uasm_i_tlbr(&p); ++ ++ switch (current_cpu_type()) { ++ default: ++ if (cpu_has_mips_r2) { ++ uasm_i_ehb(&p); ++ ++ case CPU_CAVIUM_OCTEON: ++ case CPU_CAVIUM_OCTEON_PLUS: ++ case CPU_CAVIUM_OCTEON2: ++ break; ++ } ++ } ++ + /* Examine entrylo 0 or 1 based on ptr. */ + if (use_bbit_insns()) { + uasm_i_bbit0(&p, wr.r2, ilog2(sizeof(pte_t)), 8); diff --git a/target/linux/ramips/patches-3.10/0200-owrt-GPIO-add-gpio_export_with_name.patch b/target/linux/ramips/patches-3.10/0200-owrt-GPIO-add-gpio_export_with_name.patch deleted file mode 100644 index 6ba536a893..0000000000 --- a/target/linux/ramips/patches-3.10/0200-owrt-GPIO-add-gpio_export_with_name.patch +++ /dev/null @@ -1,325 +0,0 @@ -From 8f3ed1fffa35d18c2b20ebb866c71a22cc0589ff Mon Sep 17 00:00:00 2001 -From: John Crispin -Date: Sun, 23 Jun 2013 00:16:22 +0200 -Subject: [PATCH 29/33] owrt: GPIO: add gpio_export_with_name - -http://lists.infradead.org/pipermail/linux-arm-kernel/2012-November/133856.html - -Signed-off-by: John Crispin ---- - Documentation/devicetree/bindings/gpio/gpio.txt | 60 ++++++++++++++++++++ - drivers/gpio/gpiolib-of.c | 68 +++++++++++++++++++++++ - drivers/gpio/gpiolib.c | 24 +++++--- - include/asm-generic/gpio.h | 6 +- - include/linux/gpio.h | 26 ++++++++- - 5 files changed, 172 insertions(+), 12 deletions(-) - ---- a/Documentation/devicetree/bindings/gpio/gpio.txt -+++ b/Documentation/devicetree/bindings/gpio/gpio.txt -@@ -112,3 +112,63 @@ where, - - The pinctrl node must have "#gpio-range-cells" property to show number of - arguments to pass with phandle from gpio controllers node. -+ -+3) gpio-export -+-------------- -+ -+gpio-export will allow you to automatically export gpio -+ -+required properties: -+- compatible: Should be "gpio-export" -+ -+in each child node will reprensent a gpio or if no name is specified -+a list of gpio to export -+ -+required properties: -+- gpios: gpio to export -+ -+optional properties: -+ - gpio-export,name: export name -+ - gpio-export,output: to set the as output with default value -+ if no present gpio as input -+ - pio-export,direction_may_change: boolean to allow the direction to be controllable -+ -+Example: -+ -+ -+gpio_export { -+ compatible = "gpio-export"; -+ #size-cells = <0>; -+ -+ in { -+ gpio-export,name = "in"; -+ gpios = <&pioC 20 0>; -+ }; -+ -+ out { -+ gpio-export,name = "out"; -+ gpio-export,output = <1>; -+ gpio-export,direction_may_change; -+ gpios = <&pioC 21 0>; -+ }; -+ -+ in_out { -+ gpio-export,name = "in_out"; -+ gpio-export,direction_may_change; -+ gpios = <&pioC 21 0>; -+ }; -+ -+ gpios_in { -+ gpios = <&pioB 0 0 -+ &pioB 3 0 -+ &pioC 4 0>; -+ gpio-export,direction_may_change; -+ }; -+ -+ gpios_out { -+ gpios = <&pioB 1 0 -+ &pioB 2 0 -+ &pioC 3 0>; -+ gpio-export,output = <1>; -+ }; -+}; ---- a/drivers/gpio/gpiolib-of.c -+++ b/drivers/gpio/gpiolib-of.c -@@ -21,6 +21,8 @@ - #include - #include - #include -+#include -+#include - - /* Private data structure for of_gpiochip_find_and_xlate */ - struct gg_data { -@@ -242,3 +244,69 @@ void of_gpiochip_remove(struct gpio_chip - if (chip->of_node) - of_node_put(chip->of_node); - } -+ -+static struct of_device_id gpio_export_ids[] = { -+ { .compatible = "gpio-export" }, -+ { /* sentinel */ } -+}; -+ -+static int __init of_gpio_export_probe(struct platform_device *pdev) -+{ -+ struct device_node *np = pdev->dev.of_node; -+ struct device_node *cnp; -+ u32 val; -+ int nb = 0; -+ -+ for_each_child_of_node(np, cnp) { -+ const char *name = NULL; -+ int gpio; -+ bool dmc; -+ int max_gpio = 1; -+ int i; -+ -+ of_property_read_string(cnp, "gpio-export,name", &name); -+ -+ if (!name) -+ max_gpio = of_gpio_count(cnp); -+ -+ for (i = 0; i < max_gpio; i++) { -+ unsigned flags = 0; -+ enum of_gpio_flags of_flags; -+ -+ gpio = of_get_gpio_flags(cnp, i, &of_flags); -+ -+ if (of_flags == OF_GPIO_ACTIVE_LOW) -+ flags |= GPIOF_ACTIVE_LOW; -+ -+ if (!of_property_read_u32(cnp, "gpio-export,output", &val)) -+ flags |= val ? GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW; -+ else -+ flags |= GPIOF_IN; -+ -+ if (devm_gpio_request_one(&pdev->dev, gpio, flags, name ? name : of_node_full_name(np))) -+ continue; -+ -+ dmc = of_property_read_bool(cnp, "gpio-export,direction_may_change"); -+ gpio_export_with_name(gpio, dmc, name); -+ nb++; -+ } -+ } -+ -+ dev_info(&pdev->dev, "%d gpio(s) exported\n", nb); -+ -+ return 0; -+} -+ -+static struct platform_driver gpio_export_driver = { -+ .driver = { -+ .name = "gpio-export", -+ .owner = THIS_MODULE, -+ .of_match_table = of_match_ptr(gpio_export_ids), -+ }, -+}; -+ -+static int __init of_gpio_export_init(void) -+{ -+ return platform_driver_probe(&gpio_export_driver, of_gpio_export_probe); -+} -+device_initcall(of_gpio_export_init); ---- a/drivers/gpio/gpiolib.c -+++ b/drivers/gpio/gpiolib.c -@@ -96,7 +96,7 @@ static int gpiod_get_value(const struct - static void gpiod_set_value(struct gpio_desc *desc, int value); - static int gpiod_cansleep(const struct gpio_desc *desc); - static int gpiod_to_irq(const struct gpio_desc *desc); --static int gpiod_export(struct gpio_desc *desc, bool direction_may_change); -+static int gpiod_export(struct gpio_desc *desc, bool direction_may_change, const char *name); - static int gpiod_export_link(struct device *dev, const char *name, - struct gpio_desc *desc); - static int gpiod_sysfs_set_active_low(struct gpio_desc *desc, int value); -@@ -674,7 +674,7 @@ static ssize_t export_store(struct class - status = -ENODEV; - goto done; - } -- status = gpiod_export(desc, true); -+ status = gpiod_export(desc, true, NULL); - if (status < 0) - gpiod_free(desc); - else -@@ -736,9 +736,10 @@ static struct class gpio_class = { - - - /** -- * gpio_export - export a GPIO through sysfs -+ * gpio_export_with_name - export a GPIO through sysfs - * @gpio: gpio to make available, already requested - * @direction_may_change: true if userspace may change gpio direction -+ * @name: gpio name - * Context: arch_initcall or later - * - * When drivers want to make a GPIO accessible to userspace after they -@@ -750,7 +751,7 @@ static struct class gpio_class = { - * - * Returns zero on success, else an error. - */ --static int gpiod_export(struct gpio_desc *desc, bool direction_may_change) -+static int gpiod_export(struct gpio_desc *desc, bool direction_may_change, const char *name) - { - unsigned long flags; - int status; -@@ -783,6 +784,8 @@ static int gpiod_export(struct gpio_desc - goto fail_unlock; - } - -+ if (name) -+ ioname = name; - if (!desc->chip->direction_input || !desc->chip->direction_output) - direction_may_change = false; - spin_unlock_irqrestore(&gpio_lock, flags); -@@ -829,11 +832,11 @@ fail_unlock: - return status; - } - --int gpio_export(unsigned gpio, bool direction_may_change) -+int gpio_export_with_name(unsigned gpio, bool direction_may_change, const char *name) - { -- return gpiod_export(gpio_to_desc(gpio), direction_may_change); -+ return gpiod_export(gpio_to_desc(gpio), direction_may_change, name); - } --EXPORT_SYMBOL_GPL(gpio_export); -+EXPORT_SYMBOL_GPL(gpio_export_with_name); - - static int match_export(struct device *dev, const void *data) - { -@@ -1092,7 +1095,7 @@ static inline void gpiochip_unexport(str - } - - static inline int gpiod_export(struct gpio_desc *desc, -- bool direction_may_change) -+ bool direction_may_change, const char *name) - { - return -ENOSYS; - } -@@ -1521,6 +1524,9 @@ int gpio_request_one(unsigned gpio, unsi - if (flags & GPIOF_OPEN_SOURCE) - set_bit(FLAG_OPEN_SOURCE, &desc->flags); - -+ if (flags & GPIOF_ACTIVE_LOW) -+ set_bit(FLAG_ACTIVE_LOW, &gpio_desc[gpio].flags); -+ - if (flags & GPIOF_DIR_IN) - err = gpiod_direction_input(desc); - else -@@ -1531,7 +1537,7 @@ int gpio_request_one(unsigned gpio, unsi - goto free_gpio; - - if (flags & GPIOF_EXPORT) { -- err = gpiod_export(desc, flags & GPIOF_EXPORT_CHANGEABLE); -+ err = gpiod_export(desc, flags & GPIOF_EXPORT_CHANGEABLE, NULL); - if (err) - goto free_gpio; - } ---- a/include/asm-generic/gpio.h -+++ b/include/asm-generic/gpio.h -@@ -202,7 +202,8 @@ extern void gpio_free_array(const struct - * A sysfs interface can be exported by individual drivers if they want, - * but more typically is configured entirely from userspace. - */ --extern int gpio_export(unsigned gpio, bool direction_may_change); -+extern int gpio_export_with_name(unsigned gpio, bool direction_may_change, -+ const char *name); - extern int gpio_export_link(struct device *dev, const char *name, - unsigned gpio); - extern int gpio_sysfs_set_active_low(unsigned gpio, int value); -@@ -284,7 +285,8 @@ struct device; - - /* sysfs support is only available with gpiolib, where it's optional */ - --static inline int gpio_export(unsigned gpio, bool direction_may_change) -+static inline int gpio_export_with_name(unsigned gpio, -+ bool direction_may_change, const char *name) - { - return -ENOSYS; - } ---- a/include/linux/gpio.h -+++ b/include/linux/gpio.h -@@ -27,6 +27,9 @@ - #define GPIOF_EXPORT_DIR_FIXED (GPIOF_EXPORT) - #define GPIOF_EXPORT_DIR_CHANGEABLE (GPIOF_EXPORT | GPIOF_EXPORT_CHANGEABLE) - -+#define GPIOF_ACTIVE_LOW (1 << 6) -+ -+ - /** - * struct gpio - a structure describing a GPIO with configuration - * @gpio: the GPIO number -@@ -169,7 +172,8 @@ static inline void gpio_set_value_cansle - WARN_ON(1); - } - --static inline int gpio_export(unsigned gpio, bool direction_may_change) -+static inline int gpio_export_with_name(unsigned gpio, -+ bool direction_may_change, const char *name) - { - /* GPIO can never have been requested or set as {in,out}put */ - WARN_ON(1); -@@ -236,4 +240,24 @@ int devm_gpio_request_one(struct device - unsigned long flags, const char *label); - void devm_gpio_free(struct device *dev, unsigned int gpio); - -+/** -+ * gpio_export - export a GPIO through sysfs -+ * @gpio: gpio to make available, already requested -+ * @direction_may_change: true if userspace may change gpio direction -+ * Context: arch_initcall or later -+ * -+ * When drivers want to make a GPIO accessible to userspace after they -+ * have requested it -- perhaps while debugging, or as part of their -+ * public interface -- they may use this routine. If the GPIO can -+ * change direction (some can't) and the caller allows it, userspace -+ * will see "direction" sysfs attribute which may be used to change -+ * the gpio's direction. A "value" attribute will always be provided. -+ * -+ * Returns zero on success, else an error. -+ */ -+static inline int gpio_export(unsigned gpio,bool direction_may_change) -+{ -+ return gpio_export_with_name(gpio, direction_may_change, NULL); -+} -+ - #endif /* __LINUX_GPIO_H */ diff --git a/target/linux/ramips/patches-3.10/0201-MIPS-GIC-Fix-gic_set_affinity-infinite-loop.patch b/target/linux/ramips/patches-3.10/0201-MIPS-GIC-Fix-gic_set_affinity-infinite-loop.patch new file mode 100644 index 0000000000..f1898ba073 --- /dev/null +++ b/target/linux/ramips/patches-3.10/0201-MIPS-GIC-Fix-gic_set_affinity-infinite-loop.patch @@ -0,0 +1,44 @@ +From cde59bef2f155fc38413e470ff0e4672623cdbec Mon Sep 17 00:00:00 2001 +From: Tony Wu +Date: Fri, 21 Jun 2013 10:13:08 +0000 +Subject: [PATCH 201/215] MIPS: GIC: Fix gic_set_affinity infinite loop + +There is an infinite loop in gic_set_affinity. When irq_set_affinity +gets called on gic controller, it blocks forever. + +Signed-off-by: Tony Wu +Cc: Steven J. Hill +Cc: linux-mips@linux-mips.org +Patchwork: https://patchwork.linux-mips.org/patch/5537/ +Signed-off-by: Ralf Baechle +(cherry picked from commit c214c03512b67e56dea3f4471705f8caae49553a) +--- + arch/mips/kernel/irq-gic.c | 15 +++++++-------- + 1 file changed, 7 insertions(+), 8 deletions(-) + +--- a/arch/mips/kernel/irq-gic.c ++++ b/arch/mips/kernel/irq-gic.c +@@ -219,16 +219,15 @@ static int gic_set_affinity(struct irq_d + + /* Assumption : cpumask refers to a single CPU */ + spin_lock_irqsave(&gic_lock, flags); +- for (;;) { +- /* Re-route this IRQ */ +- GIC_SH_MAP_TO_VPE_SMASK(irq, first_cpu(tmp)); + +- /* Update the pcpu_masks */ +- for (i = 0; i < NR_CPUS; i++) +- clear_bit(irq, pcpu_masks[i].pcpu_mask); +- set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask); ++ /* Re-route this IRQ */ ++ GIC_SH_MAP_TO_VPE_SMASK(irq, first_cpu(tmp)); ++ ++ /* Update the pcpu_masks */ ++ for (i = 0; i < NR_CPUS; i++) ++ clear_bit(irq, pcpu_masks[i].pcpu_mask); ++ set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask); + +- } + cpumask_copy(d->affinity, cpumask); + spin_unlock_irqrestore(&gic_lock, flags); + diff --git a/target/linux/ramips/patches-3.10/0201-owrt-MIPS-ralink-add-pseudo-pwm-led-trigger-based-on.patch b/target/linux/ramips/patches-3.10/0201-owrt-MIPS-ralink-add-pseudo-pwm-led-trigger-based-on.patch deleted file mode 100644 index ee4d72fcb5..0000000000 --- a/target/linux/ramips/patches-3.10/0201-owrt-MIPS-ralink-add-pseudo-pwm-led-trigger-based-on.patch +++ /dev/null @@ -1,301 +0,0 @@ -From daf08289dc0ac69af0d8293dacd5ca6291400593 Mon Sep 17 00:00:00 2001 -From: John Crispin -Date: Sun, 24 Mar 2013 17:17:17 +0100 -Subject: [PATCH 30/33] owrt: MIPS: ralink: add pseudo pwm led trigger based - on timer0 - -Signed-off-by: John Crispin ---- - arch/mips/ralink/timer.c | 213 ++++++++++++++++++++++++++++++++++++++++++---- - 1 file changed, 197 insertions(+), 16 deletions(-) - ---- a/arch/mips/ralink/timer.c -+++ b/arch/mips/ralink/timer.c -@@ -12,6 +12,8 @@ - #include - #include - #include -+#include -+#include - - #include - -@@ -23,16 +25,34 @@ - - #define TMR0CTL_ENABLE BIT(7) - #define TMR0CTL_MODE_PERIODIC BIT(4) --#define TMR0CTL_PRESCALER 1 -+#define TMR0CTL_PRESCALER 2 - #define TMR0CTL_PRESCALE_VAL (0xf - TMR0CTL_PRESCALER) - #define TMR0CTL_PRESCALE_DIV (65536 / BIT(TMR0CTL_PRESCALER)) - -+struct rt_timer_gpio { -+ struct list_head list; -+ struct led_classdev *led; -+}; -+ - struct rt_timer { -- struct device *dev; -- void __iomem *membase; -- int irq; -- unsigned long timer_freq; -- unsigned long timer_div; -+ struct device *dev; -+ void __iomem *membase; -+ int irq; -+ -+ unsigned long timer_freq; -+ unsigned long timer_div; -+ -+ struct list_head gpios; -+ struct led_trigger led_trigger; -+ unsigned int duty_cycle; -+ unsigned int duty; -+ -+ unsigned int fade; -+ unsigned int fade_min; -+ unsigned int fade_max; -+ unsigned int fade_speed; -+ unsigned int fade_dir; -+ unsigned int fade_count; - }; - - static inline void rt_timer_w32(struct rt_timer *rt, u8 reg, u32 val) -@@ -48,18 +68,46 @@ static inline u32 rt_timer_r32(struct rt - static irqreturn_t rt_timer_irq(int irq, void *_rt) - { - struct rt_timer *rt = (struct rt_timer *) _rt; -+ struct rt_timer_gpio *gpio; -+ unsigned int val; - -- rt_timer_w32(rt, TIMER_REG_TMR0LOAD, rt->timer_freq / rt->timer_div); -+ if (rt->fade && (rt->fade_count++ > rt->fade_speed)) { -+ rt->fade_count = 0; -+ if (rt->duty_cycle <= rt->fade_min) -+ rt->fade_dir = 1; -+ else if (rt->duty_cycle >= rt->fade_max) -+ rt->fade_dir = 0; -+ -+ if (rt->fade_dir) -+ rt->duty_cycle += 1; -+ else -+ rt->duty_cycle -= 1; -+ -+ } -+ -+ val = rt->timer_freq / rt->timer_div; -+ if (rt->duty) -+ val *= rt->duty_cycle; -+ else -+ val *= (100 - rt->duty_cycle); -+ val /= 100; -+ -+ if (!list_empty(&rt->gpios)) -+ list_for_each_entry(gpio, &rt->gpios, list) -+ led_set_brightness(gpio->led, !!rt->duty); -+ -+ rt->duty = !rt->duty; -+ -+ rt_timer_w32(rt, TIMER_REG_TMR0LOAD, val + 1); - rt_timer_w32(rt, TIMER_REG_TMRSTAT, TMRSTAT_TMR0INT); - - return IRQ_HANDLED; - } - -- - static int rt_timer_request(struct rt_timer *rt) - { -- int err = request_irq(rt->irq, rt_timer_irq, IRQF_DISABLED, -- dev_name(rt->dev), rt); -+ int err = devm_request_irq(rt->dev, rt->irq, rt_timer_irq, -+ IRQF_DISABLED, dev_name(rt->dev), rt); - if (err) { - dev_err(rt->dev, "failed to request irq\n"); - } else { -@@ -81,8 +129,6 @@ static int rt_timer_config(struct rt_tim - else - rt->timer_div = divisor; - -- rt_timer_w32(rt, TIMER_REG_TMR0LOAD, rt->timer_freq / rt->timer_div); -- - return 0; - } - -@@ -108,11 +154,128 @@ static void rt_timer_disable(struct rt_t - rt_timer_w32(rt, TIMER_REG_TMR0CTL, t); - } - -+static ssize_t led_fade_show(struct device *dev, -+ struct device_attribute *attr, char *buf) -+{ -+ struct led_classdev *led_cdev = dev_get_drvdata(dev); -+ struct rt_timer *rt = container_of(led_cdev->trigger, struct rt_timer, led_trigger); -+ -+ return sprintf(buf, "speed: %d, min: %d, max: %d\n", rt->fade_speed, rt->fade_min, rt->fade_max); -+} -+ -+static ssize_t led_fade_store(struct device *dev, -+ struct device_attribute *attr, const char *buf, size_t size) -+{ -+ struct led_classdev *led_cdev = dev_get_drvdata(dev); -+ struct rt_timer *rt = container_of(led_cdev->trigger, struct rt_timer, led_trigger); -+ unsigned int speed = 0, min = 0, max = 0; -+ ssize_t ret = -EINVAL; -+ -+ ret = sscanf(buf, "%u %u %u", &speed, &min, &max); -+ -+ if (ret == 3) { -+ rt->fade_speed = speed; -+ rt->fade_min = min; -+ rt->fade_max = max; -+ rt->fade = 1; -+ } else { -+ rt->fade = 0; -+ } -+ -+ return size; -+} -+ -+static DEVICE_ATTR(fade, 0644, led_fade_show, led_fade_store); -+ -+static ssize_t led_duty_cycle_show(struct device *dev, -+ struct device_attribute *attr, char *buf) -+{ -+ struct led_classdev *led_cdev = dev_get_drvdata(dev); -+ struct rt_timer *rt = container_of(led_cdev->trigger, struct rt_timer, led_trigger); -+ -+ return sprintf(buf, "%u\n", rt->duty_cycle); -+} -+ -+static ssize_t led_duty_cycle_store(struct device *dev, -+ struct device_attribute *attr, const char *buf, size_t size) -+{ -+ struct led_classdev *led_cdev = dev_get_drvdata(dev); -+ struct rt_timer *rt = container_of(led_cdev->trigger, struct rt_timer, led_trigger); -+ unsigned long state; -+ ssize_t ret = -EINVAL; -+ -+ ret = kstrtoul(buf, 10, &state); -+ if (ret) -+ return ret; -+ -+ if (state <= 100) -+ rt->duty_cycle = state; -+ else -+ rt->duty_cycle = 100; -+ -+ rt->fade = 0; -+ -+ return size; -+} -+ -+static DEVICE_ATTR(duty_cycle, 0644, led_duty_cycle_show, led_duty_cycle_store); -+ -+static void rt_timer_trig_activate(struct led_classdev *led_cdev) -+{ -+ struct rt_timer *rt = container_of(led_cdev->trigger, struct rt_timer, led_trigger); -+ struct rt_timer_gpio *gpio_data; -+ int rc; -+ -+ led_cdev->trigger_data = NULL; -+ gpio_data = kzalloc(sizeof(*gpio_data), GFP_KERNEL); -+ if (!gpio_data) -+ return; -+ -+ rc = device_create_file(led_cdev->dev, &dev_attr_duty_cycle); -+ if (rc) -+ goto err_gpio; -+ rc = device_create_file(led_cdev->dev, &dev_attr_fade); -+ if (rc) -+ goto err_out_duty_cycle; -+ -+ led_cdev->activated = true; -+ led_cdev->trigger_data = gpio_data; -+ gpio_data->led = led_cdev; -+ list_add(&gpio_data->list, &rt->gpios); -+ led_cdev->trigger_data = gpio_data; -+ rt_timer_enable(rt); -+ return; -+ -+err_out_duty_cycle: -+ device_remove_file(led_cdev->dev, &dev_attr_duty_cycle); -+ -+err_gpio: -+ kfree(gpio_data); -+} -+ -+static void rt_timer_trig_deactivate(struct led_classdev *led_cdev) -+{ -+ struct rt_timer *rt = container_of(led_cdev->trigger, struct rt_timer, led_trigger); -+ struct rt_timer_gpio *gpio_data = (struct rt_timer_gpio*) led_cdev->trigger_data; -+ -+ if (led_cdev->activated) { -+ device_remove_file(led_cdev->dev, &dev_attr_duty_cycle); -+ device_remove_file(led_cdev->dev, &dev_attr_fade); -+ led_cdev->activated = false; -+ } -+ -+ list_del(&gpio_data->list); -+ rt_timer_disable(rt); -+ led_set_brightness(led_cdev, LED_OFF); -+} -+ - static int rt_timer_probe(struct platform_device *pdev) - { - struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); -+ const __be32 *divisor; - struct rt_timer *rt; - struct clk *clk; -+ int ret; - - rt = devm_kzalloc(&pdev->dev, sizeof(*rt), GFP_KERNEL); - if (!rt) { -@@ -140,12 +303,29 @@ static int rt_timer_probe(struct platfor - if (!rt->timer_freq) - return -EINVAL; - -+ rt->duty_cycle = 100; - rt->dev = &pdev->dev; - platform_set_drvdata(pdev, rt); - -- rt_timer_request(rt); -- rt_timer_config(rt, 2); -- rt_timer_enable(rt); -+ ret = rt_timer_request(rt); -+ if (ret) -+ return ret; -+ -+ divisor = of_get_property(pdev->dev.of_node, "ralink,divisor", NULL); -+ if (divisor) -+ rt_timer_config(rt, be32_to_cpu(*divisor)); -+ else -+ rt_timer_config(rt, 200); -+ -+ rt->led_trigger.name = "pwmtimer", -+ rt->led_trigger.activate = rt_timer_trig_activate, -+ rt->led_trigger.deactivate = rt_timer_trig_deactivate, -+ -+ ret = led_trigger_register(&rt->led_trigger); -+ if (ret) -+ return ret; -+ -+ INIT_LIST_HEAD(&rt->gpios); - - dev_info(&pdev->dev, "maximum frequncy is %luHz\n", rt->timer_freq); - -@@ -156,6 +336,7 @@ static int rt_timer_remove(struct platfo - { - struct rt_timer *rt = platform_get_drvdata(pdev); - -+ led_trigger_unregister(&rt->led_trigger); - rt_timer_disable(rt); - rt_timer_free(rt); - -@@ -180,6 +361,6 @@ static struct platform_driver rt_timer_d - - module_platform_driver(rt_timer_driver); - --MODULE_DESCRIPTION("Ralink RT2880 timer"); -+MODULE_DESCRIPTION("Ralink RT2880 timer / pseudo pwm"); - MODULE_AUTHOR("John Crispin +Date: Wed, 11 Sep 2013 14:17:47 -0500 +Subject: [PATCH 202/215] MIPS: Fix SMP core calculations when using MT + support. + +The TCBIND register is only available if the core has MT support. It +should not be read otherwise. Secondly, the number of TCs (siblings) +are calculated differently depending on if the kernel is configured +as SMVP or SMTC. + +Signed-off-by: Leonid Yegoshin +Signed-off-by: Steven J. Hill +Cc: linux-mips@linux-mips.org +Patchwork: https://patchwork.linux-mips.org/patch/5822/ +Signed-off-by: Ralf Baechle +(cherry picked from commit 670bac3a8c201fc1f5f92ac6b4a8b42dc8172937) +--- + arch/mips/kernel/smp-cmp.c | 13 +++++++++++-- + 1 file changed, 11 insertions(+), 2 deletions(-) + +--- a/arch/mips/kernel/smp-cmp.c ++++ b/arch/mips/kernel/smp-cmp.c +@@ -99,7 +99,9 @@ static void cmp_init_secondary(void) + + c->core = (read_c0_ebase() >> 1) & 0x1ff; + #if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC) +- c->vpe_id = (read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & TCBIND_CURVPE; ++ if (cpu_has_mipsmt) ++ c->vpe_id = (read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & ++ TCBIND_CURVPE; + #endif + #ifdef CONFIG_MIPS_MT_SMTC + c->tc_id = (read_c0_tcbind() & TCBIND_CURTC) >> TCBIND_CURTC_SHIFT; +@@ -177,9 +179,16 @@ void __init cmp_smp_setup(void) + } + + if (cpu_has_mipsmt) { +- unsigned int nvpe, mvpconf0 = read_c0_mvpconf0(); ++ unsigned int nvpe = 1; ++#ifdef CONFIG_MIPS_MT_SMP ++ unsigned int mvpconf0 = read_c0_mvpconf0(); ++ ++ nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1; ++#elif defined(CONFIG_MIPS_MT_SMTC) ++ unsigned int mvpconf0 = read_c0_mvpconf0(); + + nvpe = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1; ++#endif + smp_num_siblings = nvpe; + } + pr_info("Detected %i available secondary CPU(s)\n", ncpu); diff --git a/target/linux/ramips/patches-3.10/0202-owrt-USB-adds-dwc_otg.patch b/target/linux/ramips/patches-3.10/0202-owrt-USB-adds-dwc_otg.patch deleted file mode 100644 index 5c17a665fa..0000000000 --- a/target/linux/ramips/patches-3.10/0202-owrt-USB-adds-dwc_otg.patch +++ /dev/null @@ -1,24517 +0,0 @@ -From 1a44a003bdaf917193114d0d40534496c39644ba Mon Sep 17 00:00:00 2001 -From: John Crispin -Date: Fri, 15 Mar 2013 20:58:18 +0100 -Subject: [PATCH 202/208] owrt: USB: adds dwc_otg - -Signed-off-by: John Crispin ---- - drivers/usb/Kconfig | 2 + - drivers/usb/Makefile | 1 + - drivers/usb/dwc_otg/Kconfig | 24 + - drivers/usb/dwc_otg/Makefile | 25 + - drivers/usb/dwc_otg/dummy_audio.c | 1575 +++++++++++++ - drivers/usb/dwc_otg/dwc_otg_attr.c | 966 ++++++++ - drivers/usb/dwc_otg/dwc_otg_attr.h | 67 + - drivers/usb/dwc_otg/dwc_otg_cil.c | 3692 ++++++++++++++++++++++++++++++ - drivers/usb/dwc_otg/dwc_otg_cil.h | 1098 +++++++++ - drivers/usb/dwc_otg/dwc_otg_cil_intr.c | 750 ++++++ - drivers/usb/dwc_otg/dwc_otg_driver.c | 1273 ++++++++++ - drivers/usb/dwc_otg/dwc_otg_driver.h | 83 + - drivers/usb/dwc_otg/dwc_otg_hcd.c | 2852 +++++++++++++++++++++++ - drivers/usb/dwc_otg/dwc_otg_hcd.h | 668 ++++++ - drivers/usb/dwc_otg/dwc_otg_hcd_intr.c | 1873 +++++++++++++++ - drivers/usb/dwc_otg/dwc_otg_hcd_queue.c | 684 ++++++ - drivers/usb/dwc_otg/dwc_otg_pcd.c | 2523 ++++++++++++++++++++ - drivers/usb/dwc_otg/dwc_otg_pcd.h | 248 ++ - drivers/usb/dwc_otg/dwc_otg_pcd_intr.c | 3654 +++++++++++++++++++++++++++++ - drivers/usb/dwc_otg/dwc_otg_regs.h | 2075 +++++++++++++++++ - drivers/usb/dwc_otg/linux/dwc_otg_plat.h | 260 +++ - 21 files changed, 24393 insertions(+) - create mode 100644 drivers/usb/dwc_otg/Kconfig - create mode 100644 drivers/usb/dwc_otg/Makefile - create mode 100644 drivers/usb/dwc_otg/dummy_audio.c - create mode 100644 drivers/usb/dwc_otg/dwc_otg_attr.c - create mode 100644 drivers/usb/dwc_otg/dwc_otg_attr.h - create mode 100644 drivers/usb/dwc_otg/dwc_otg_cil.c - create mode 100644 drivers/usb/dwc_otg/dwc_otg_cil.h - create mode 100644 drivers/usb/dwc_otg/dwc_otg_cil_intr.c - create mode 100644 drivers/usb/dwc_otg/dwc_otg_driver.c - create mode 100644 drivers/usb/dwc_otg/dwc_otg_driver.h - create mode 100644 drivers/usb/dwc_otg/dwc_otg_hcd.c - create mode 100644 drivers/usb/dwc_otg/dwc_otg_hcd.h - create mode 100644 drivers/usb/dwc_otg/dwc_otg_hcd_intr.c - create mode 100644 drivers/usb/dwc_otg/dwc_otg_hcd_queue.c - create mode 100644 drivers/usb/dwc_otg/dwc_otg_pcd.c - create mode 100644 drivers/usb/dwc_otg/dwc_otg_pcd.h - create mode 100644 drivers/usb/dwc_otg/dwc_otg_pcd_intr.c - create mode 100644 drivers/usb/dwc_otg/dwc_otg_regs.h - create mode 100644 drivers/usb/dwc_otg/linux/dwc_otg_plat.h - ---- a/drivers/usb/Kconfig -+++ b/drivers/usb/Kconfig -@@ -126,6 +126,8 @@ if USB - - source "drivers/usb/core/Kconfig" - -+source "drivers/usb/dwc_otg/Kconfig" -+ - source "drivers/usb/mon/Kconfig" - - source "drivers/usb/wusbcore/Kconfig" ---- a/drivers/usb/Makefile -+++ b/drivers/usb/Makefile -@@ -7,6 +7,7 @@ - obj-$(CONFIG_USB) += core/ - - obj-$(CONFIG_USB_DWC3) += dwc3/ -+obj-$(CONFIG_DWC_OTG) += dwc_otg/ - - obj-$(CONFIG_USB_MON) += mon/ - ---- /dev/null -+++ b/drivers/usb/dwc_otg/Kconfig -@@ -0,0 +1,24 @@ -+config DWC_OTG -+ tristate "Ralink RT305X DWC_OTG support" -+ depends on SOC_RT305X -+ ---help--- -+ This driver supports Ralink DWC_OTG -+ -+choice -+ prompt "USB Operation Mode" -+ depends on DWC_OTG -+ default DWC_OTG_HOST_ONLY -+ -+config DWC_OTG_HOST_ONLY -+ bool "HOST ONLY MODE" -+ depends on DWC_OTG -+ -+config DWC_OTG_DEVICE_ONLY -+ bool "DEVICE ONLY MODE" -+ depends on DWC_OTG -+ -+endchoice -+ -+config DWC_OTG_DEBUG -+ bool "Enable debug mode" -+ depends on DWC_OTG ---- /dev/null -+++ b/drivers/usb/dwc_otg/Makefile -@@ -0,0 +1,25 @@ -+# -+# Makefile for DWC_otg Highspeed USB controller driver -+# -+ -+ifeq ($(CONFIG_DWC_OTG_DEBUG),y) -+EXTRA_CFLAGS += -DDEBUG -+endif -+ -+# Use one of the following flags to compile the software in host-only or -+# device-only mode. -+ifeq ($(CONFIG_DWC_OTG_HOST_ONLY),y) -+EXTRA_CFLAGS += -DDWC_HOST_ONLY -+EXTRA_CFLAGS += -DDWC_EN_ISOC -+endif -+ -+ifeq ($(CONFIG_DWC_OTG_DEVICE_ONLY),y) -+EXTRA_CFLAGS += -DDWC_DEVICE_ONLY -+endif -+ -+obj-$(CONFIG_DWC_OTG) := dwc_otg.o -+ -+dwc_otg-objs := dwc_otg_driver.o dwc_otg_attr.o -+dwc_otg-objs += dwc_otg_cil.o dwc_otg_cil_intr.o -+dwc_otg-objs += dwc_otg_pcd.o dwc_otg_pcd_intr.o -+dwc_otg-objs += dwc_otg_hcd.o dwc_otg_hcd_intr.o dwc_otg_hcd_queue.o ---- /dev/null -+++ b/drivers/usb/dwc_otg/dummy_audio.c -@@ -0,0 +1,1575 @@ -+/* -+ * zero.c -- Gadget Zero, for USB development -+ * -+ * Copyright (C) 2003-2004 David Brownell -+ * All rights reserved. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions -+ * are met: -+ * 1. Redistributions of source code must retain the above copyright -+ * notice, this list of conditions, and the following disclaimer, -+ * without modification. -+ * 2. Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * 3. The names of the above-listed copyright holders may not be used -+ * to endorse or promote products derived from this software without -+ * specific prior written permission. -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS -+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, -+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+ -+/* -+ * Gadget Zero only needs two bulk endpoints, and is an example of how you -+ * can write a hardware-agnostic gadget driver running inside a USB device. -+ * -+ * Hardware details are visible (see CONFIG_USB_ZERO_* below) but don't -+ * affect most of the driver. -+ * -+ * Use it with the Linux host/master side "usbtest" driver to get a basic -+ * functional test of your device-side usb stack, or with "usb-skeleton". -+ * -+ * It supports two similar configurations. One sinks whatever the usb host -+ * writes, and in return sources zeroes. The other loops whatever the host -+ * writes back, so the host can read it. Module options include: -+ * -+ * buflen=N default N=4096, buffer size used -+ * qlen=N default N=32, how many buffers in the loopback queue -+ * loopdefault default false, list loopback config first -+ * -+ * Many drivers will only have one configuration, letting them be much -+ * simpler if they also don't support high speed operation (like this -+ * driver does). -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+#include -+#include -+#include -+#include -+ -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21) -+# include -+#else -+# include -+#endif -+ -+#include -+ -+ -+/*-------------------------------------------------------------------------*/ -+/*-------------------------------------------------------------------------*/ -+ -+ -+static int utf8_to_utf16le(const char *s, u16 *cp, unsigned len) -+{ -+ int count = 0; -+ u8 c; -+ u16 uchar; -+ -+ /* this insists on correct encodings, though not minimal ones. -+ * BUT it currently rejects legit 4-byte UTF-8 code points, -+ * which need surrogate pairs. (Unicode 3.1 can use them.) -+ */ -+ while (len != 0 && (c = (u8) *s++) != 0) { -+ if (unlikely(c & 0x80)) { -+ // 2-byte sequence: -+ // 00000yyyyyxxxxxx = 110yyyyy 10xxxxxx -+ if ((c & 0xe0) == 0xc0) { -+ uchar = (c & 0x1f) << 6; -+ -+ c = (u8) *s++; -+ if ((c & 0xc0) != 0xc0) -+ goto fail; -+ c &= 0x3f; -+ uchar |= c; -+ -+ // 3-byte sequence (most CJKV characters): -+ // zzzzyyyyyyxxxxxx = 1110zzzz 10yyyyyy 10xxxxxx -+ } else if ((c & 0xf0) == 0xe0) { -+ uchar = (c & 0x0f) << 12; -+ -+ c = (u8) *s++; -+ if ((c & 0xc0) != 0xc0) -+ goto fail; -+ c &= 0x3f; -+ uchar |= c << 6; -+ -+ c = (u8) *s++; -+ if ((c & 0xc0) != 0xc0) -+ goto fail; -+ c &= 0x3f; -+ uchar |= c; -+ -+ /* no bogus surrogates */ -+ if (0xd800 <= uchar && uchar <= 0xdfff) -+ goto fail; -+ -+ // 4-byte sequence (surrogate pairs, currently rare): -+ // 11101110wwwwzzzzyy + 110111yyyyxxxxxx -+ // = 11110uuu 10uuzzzz 10yyyyyy 10xxxxxx -+ // (uuuuu = wwww + 1) -+ // FIXME accept the surrogate code points (only) -+ -+ } else -+ goto fail; -+ } else -+ uchar = c; -+ put_unaligned (cpu_to_le16 (uchar), cp++); -+ count++; -+ len--; -+ } -+ return count; -+fail: -+ return -1; -+} -+ -+ -+/** -+ * usb_gadget_get_string - fill out a string descriptor -+ * @table: of c strings encoded using UTF-8 -+ * @id: string id, from low byte of wValue in get string descriptor -+ * @buf: at least 256 bytes -+ * -+ * Finds the UTF-8 string matching the ID, and converts it into a -+ * string descriptor in utf16-le. -+ * Returns length of descriptor (always even) or negative errno -+ * -+ * If your driver needs stings in multiple languages, you'll probably -+ * "switch (wIndex) { ... }" in your ep0 string descriptor logic, -+ * using this routine after choosing which set of UTF-8 strings to use. -+ * Note that US-ASCII is a strict subset of UTF-8; any string bytes with -+ * the eighth bit set will be multibyte UTF-8 characters, not ISO-8859/1 -+ * characters (which are also widely used in C strings). -+ */ -+int -+usb_gadget_get_string (struct usb_gadget_strings *table, int id, u8 *buf) -+{ -+ struct usb_string *s; -+ int len; -+ -+ /* descriptor 0 has the language id */ -+ if (id == 0) { -+ buf [0] = 4; -+ buf [1] = USB_DT_STRING; -+ buf [2] = (u8) table->language; -+ buf [3] = (u8) (table->language >> 8); -+ return 4; -+ } -+ for (s = table->strings; s && s->s; s++) -+ if (s->id == id) -+ break; -+ -+ /* unrecognized: stall. */ -+ if (!s || !s->s) -+ return -EINVAL; -+ -+ /* string descriptors have length, tag, then UTF16-LE text */ -+ len = min ((size_t) 126, strlen (s->s)); -+ memset (buf + 2, 0, 2 * len); /* zero all the bytes */ -+ len = utf8_to_utf16le(s->s, (u16 *)&buf[2], len); -+ if (len < 0) -+ return -EINVAL; -+ buf [0] = (len + 1) * 2; -+ buf [1] = USB_DT_STRING; -+ return buf [0]; -+} -+ -+ -+/*-------------------------------------------------------------------------*/ -+/*-------------------------------------------------------------------------*/ -+ -+ -+/** -+ * usb_descriptor_fillbuf - fill buffer with descriptors -+ * @buf: Buffer to be filled -+ * @buflen: Size of buf -+ * @src: Array of descriptor pointers, terminated by null pointer. -+ * -+ * Copies descriptors into the buffer, returning the length or a -+ * negative error code if they can't all be copied. Useful when -+ * assembling descriptors for an associated set of interfaces used -+ * as part of configuring a composite device; or in other cases where -+ * sets of descriptors need to be marshaled. -+ */ -+int -+usb_descriptor_fillbuf(void *buf, unsigned buflen, -+ const struct usb_descriptor_header **src) -+{ -+ u8 *dest = buf; -+ -+ if (!src) -+ return -EINVAL; -+ -+ /* fill buffer from src[] until null descriptor ptr */ -+ for (; 0 != *src; src++) { -+ unsigned len = (*src)->bLength; -+ -+ if (len > buflen) -+ return -EINVAL; -+ memcpy(dest, *src, len); -+ buflen -= len; -+ dest += len; -+ } -+ return dest - (u8 *)buf; -+} -+ -+ -+/** -+ * usb_gadget_config_buf - builts a complete configuration descriptor -+ * @config: Header for the descriptor, including characteristics such -+ * as power requirements and number of interfaces. -+ * @desc: Null-terminated vector of pointers to the descriptors (interface, -+ * endpoint, etc) defining all functions in this device configuration. -+ * @buf: Buffer for the resulting configuration descriptor. -+ * @length: Length of buffer. If this is not big enough to hold the -+ * entire configuration descriptor, an error code will be returned. -+ * -+ * This copies descriptors into the response buffer, building a descriptor -+ * for that configuration. It returns the buffer length or a negative -+ * status code. The config.wTotalLength field is set to match the length -+ * of the result, but other descriptor fields (including power usage and -+ * interface count) must be set by the caller. -+ * -+ * Gadget drivers could use this when constructing a config descriptor -+ * in response to USB_REQ_GET_DESCRIPTOR. They will need to patch the -+ * resulting bDescriptorType value if USB_DT_OTHER_SPEED_CONFIG is needed. -+ */ -+int usb_gadget_config_buf( -+ const struct usb_config_descriptor *config, -+ void *buf, -+ unsigned length, -+ const struct usb_descriptor_header **desc -+) -+{ -+ struct usb_config_descriptor *cp = buf; -+ int len; -+ -+ /* config descriptor first */ -+ if (length < USB_DT_CONFIG_SIZE || !desc) -+ return -EINVAL; -+ *cp = *config; -+ -+ /* then interface/endpoint/class/vendor/... */ -+ len = usb_descriptor_fillbuf(USB_DT_CONFIG_SIZE + (u8*)buf, -+ length - USB_DT_CONFIG_SIZE, desc); -+ if (len < 0) -+ return len; -+ len += USB_DT_CONFIG_SIZE; -+ if (len > 0xffff) -+ return -EINVAL; -+ -+ /* patch up the config descriptor */ -+ cp->bLength = USB_DT_CONFIG_SIZE; -+ cp->bDescriptorType = USB_DT_CONFIG; -+ cp->wTotalLength = cpu_to_le16(len); -+ cp->bmAttributes |= USB_CONFIG_ATT_ONE; -+ return len; -+} -+ -+/*-------------------------------------------------------------------------*/ -+/*-------------------------------------------------------------------------*/ -+ -+ -+#define RBUF_LEN (1024*1024) -+static int rbuf_start; -+static int rbuf_len; -+static __u8 rbuf[RBUF_LEN]; -+ -+/*-------------------------------------------------------------------------*/ -+ -+#define DRIVER_VERSION "St Patrick's Day 2004" -+ -+static const char shortname [] = "zero"; -+static const char longname [] = "YAMAHA YST-MS35D USB Speaker "; -+ -+static const char source_sink [] = "source and sink data"; -+static const char loopback [] = "loop input to output"; -+ -+/*-------------------------------------------------------------------------*/ -+ -+/* -+ * driver assumes self-powered hardware, and -+ * has no way for users to trigger remote wakeup. -+ * -+ * this version autoconfigures as much as possible, -+ * which is reasonable for most "bulk-only" drivers. -+ */ -+static const char *EP_IN_NAME; /* source */ -+static const char *EP_OUT_NAME; /* sink */ -+ -+/*-------------------------------------------------------------------------*/ -+ -+/* big enough to hold our biggest descriptor */ -+#define USB_BUFSIZ 512 -+ -+struct zero_dev { -+ spinlock_t lock; -+ struct usb_gadget *gadget; -+ struct usb_request *req; /* for control responses */ -+ -+ /* when configured, we have one of two configs: -+ * - source data (in to host) and sink it (out from host) -+ * - or loop it back (out from host back in to host) -+ */ -+ u8 config; -+ struct usb_ep *in_ep, *out_ep; -+ -+ /* autoresume timer */ -+ struct timer_list resume; -+}; -+ -+#define xprintk(d,level,fmt,args...) \ -+ dev_printk(level , &(d)->gadget->dev , fmt , ## args) -+ -+#ifdef DEBUG -+#define DBG(dev,fmt,args...) \ -+ xprintk(dev , KERN_DEBUG , fmt , ## args) -+#else -+#define DBG(dev,fmt,args...) \ -+ do { } while (0) -+#endif /* DEBUG */ -+ -+#ifdef VERBOSE -+#define VDBG DBG -+#else -+#define VDBG(dev,fmt,args...) \ -+ do { } while (0) -+#endif /* VERBOSE */ -+ -+#define ERROR(dev,fmt,args...) \ -+ xprintk(dev , KERN_ERR , fmt , ## args) -+#define WARN(dev,fmt,args...) \ -+ xprintk(dev , KERN_WARNING , fmt , ## args) -+#define INFO(dev,fmt,args...) \ -+ xprintk(dev , KERN_INFO , fmt , ## args) -+ -+/*-------------------------------------------------------------------------*/ -+ -+static unsigned buflen = 4096; -+static unsigned qlen = 32; -+static unsigned pattern = 0; -+ -+module_param (buflen, uint, S_IRUGO|S_IWUSR); -+module_param (qlen, uint, S_IRUGO|S_IWUSR); -+module_param (pattern, uint, S_IRUGO|S_IWUSR); -+ -+/* -+ * if it's nonzero, autoresume says how many seconds to wait -+ * before trying to wake up the host after suspend. -+ */ -+static unsigned autoresume = 0; -+module_param (autoresume, uint, 0); -+ -+/* -+ * Normally the "loopback" configuration is second (index 1) so -+ * it's not the default. Here's where to change that order, to -+ * work better with hosts where config changes are problematic. -+ * Or controllers (like superh) that only support one config. -+ */ -+static int loopdefault = 0; -+ -+module_param (loopdefault, bool, S_IRUGO|S_IWUSR); -+ -+/*-------------------------------------------------------------------------*/ -+ -+/* Thanks to NetChip Technologies for donating this product ID. -+ * -+ * DO NOT REUSE THESE IDs with a protocol-incompatible driver!! Ever!! -+ * Instead: allocate your own, using normal USB-IF procedures. -+ */ -+#ifndef CONFIG_USB_ZERO_HNPTEST -+#define DRIVER_VENDOR_NUM 0x0525 /* NetChip */ -+#define DRIVER_PRODUCT_NUM 0xa4a0 /* Linux-USB "Gadget Zero" */ -+#else -+#define DRIVER_VENDOR_NUM 0x1a0a /* OTG test device IDs */ -+#define DRIVER_PRODUCT_NUM 0xbadd -+#endif -+ -+/*-------------------------------------------------------------------------*/ -+ -+/* -+ * DESCRIPTORS ... most are static, but strings and (full) -+ * configuration descriptors are built on demand. -+ */ -+ -+/* -+#define STRING_MANUFACTURER 25 -+#define STRING_PRODUCT 42 -+#define STRING_SERIAL 101 -+*/ -+#define STRING_MANUFACTURER 1 -+#define STRING_PRODUCT 2 -+#define STRING_SERIAL 3 -+ -+#define STRING_SOURCE_SINK 250 -+#define STRING_LOOPBACK 251 -+ -+/* -+ * This device advertises two configurations; these numbers work -+ * on a pxa250 as well as more flexible hardware. -+ */ -+#define CONFIG_SOURCE_SINK 3 -+#define CONFIG_LOOPBACK 2 -+ -+/* -+static struct usb_device_descriptor -+device_desc = { -+ .bLength = sizeof device_desc, -+ .bDescriptorType = USB_DT_DEVICE, -+ -+ .bcdUSB = __constant_cpu_to_le16 (0x0200), -+ .bDeviceClass = USB_CLASS_VENDOR_SPEC, -+ -+ .idVendor = __constant_cpu_to_le16 (DRIVER_VENDOR_NUM), -+ .idProduct = __constant_cpu_to_le16 (DRIVER_PRODUCT_NUM), -+ .iManufacturer = STRING_MANUFACTURER, -+ .iProduct = STRING_PRODUCT, -+ .iSerialNumber = STRING_SERIAL, -+ .bNumConfigurations = 2, -+}; -+*/ -+static struct usb_device_descriptor -+device_desc = { -+ .bLength = sizeof device_desc, -+ .bDescriptorType = USB_DT_DEVICE, -+ .bcdUSB = __constant_cpu_to_le16 (0x0100), -+ .bDeviceClass = USB_CLASS_PER_INTERFACE, -+ .bDeviceSubClass = 0, -+ .bDeviceProtocol = 0, -+ .bMaxPacketSize0 = 64, -+ .bcdDevice = __constant_cpu_to_le16 (0x0100), -+ .idVendor = __constant_cpu_to_le16 (0x0499), -+ .idProduct = __constant_cpu_to_le16 (0x3002), -+ .iManufacturer = STRING_MANUFACTURER, -+ .iProduct = STRING_PRODUCT, -+ .iSerialNumber = STRING_SERIAL, -+ .bNumConfigurations = 1, -+}; -+ -+static struct usb_config_descriptor -+z_config = { -+ .bLength = sizeof z_config, -+ .bDescriptorType = USB_DT_CONFIG, -+ -+ /* compute wTotalLength on the fly */ -+ .bNumInterfaces = 2, -+ .bConfigurationValue = 1, -+ .iConfiguration = 0, -+ .bmAttributes = 0x40, -+ .bMaxPower = 0, /* self-powered */ -+}; -+ -+ -+static struct usb_otg_descriptor -+otg_descriptor = { -+ .bLength = sizeof otg_descriptor, -+ .bDescriptorType = USB_DT_OTG, -+ -+ .bmAttributes = USB_OTG_SRP, -+}; -+ -+/* one interface in each configuration */ -+#ifdef CONFIG_USB_GADGET_DUALSPEED -+ -+/* -+ * usb 2.0 devices need to expose both high speed and full speed -+ * descriptors, unless they only run at full speed. -+ * -+ * that means alternate endpoint descriptors (bigger packets) -+ * and a "device qualifier" ... plus more construction options -+ * for the config descriptor. -+ */ -+ -+static struct usb_qualifier_descriptor -+dev_qualifier = { -+ .bLength = sizeof dev_qualifier, -+ .bDescriptorType = USB_DT_DEVICE_QUALIFIER, -+ -+ .bcdUSB = __constant_cpu_to_le16 (0x0200), -+ .bDeviceClass = USB_CLASS_VENDOR_SPEC, -+ -+ .bNumConfigurations = 2, -+}; -+ -+ -+struct usb_cs_as_general_descriptor { -+ __u8 bLength; -+ __u8 bDescriptorType; -+ -+ __u8 bDescriptorSubType; -+ __u8 bTerminalLink; -+ __u8 bDelay; -+ __u16 wFormatTag; -+} __attribute__ ((packed)); -+ -+struct usb_cs_as_format_descriptor { -+ __u8 bLength; -+ __u8 bDescriptorType; -+ -+ __u8 bDescriptorSubType; -+ __u8 bFormatType; -+ __u8 bNrChannels; -+ __u8 bSubframeSize; -+ __u8 bBitResolution; -+ __u8 bSamfreqType; -+ __u8 tLowerSamFreq[3]; -+ __u8 tUpperSamFreq[3]; -+} __attribute__ ((packed)); -+ -+static const struct usb_interface_descriptor -+z_audio_control_if_desc = { -+ .bLength = sizeof z_audio_control_if_desc, -+ .bDescriptorType = USB_DT_INTERFACE, -+ .bInterfaceNumber = 0, -+ .bAlternateSetting = 0, -+ .bNumEndpoints = 0, -+ .bInterfaceClass = USB_CLASS_AUDIO, -+ .bInterfaceSubClass = 0x1, -+ .bInterfaceProtocol = 0, -+ .iInterface = 0, -+}; -+ -+static const struct usb_interface_descriptor -+z_audio_if_desc = { -+ .bLength = sizeof z_audio_if_desc, -+ .bDescriptorType = USB_DT_INTERFACE, -+ .bInterfaceNumber = 1, -+ .bAlternateSetting = 0, -+ .bNumEndpoints = 0, -+ .bInterfaceClass = USB_CLASS_AUDIO, -+ .bInterfaceSubClass = 0x2, -+ .bInterfaceProtocol = 0, -+ .iInterface = 0, -+}; -+ -+static const struct usb_interface_descriptor -+z_audio_if_desc2 = { -+ .bLength = sizeof z_audio_if_desc, -+ .bDescriptorType = USB_DT_INTERFACE, -+ .bInterfaceNumber = 1, -+ .bAlternateSetting = 1, -+ .bNumEndpoints = 1, -+ .bInterfaceClass = USB_CLASS_AUDIO, -+ .bInterfaceSubClass = 0x2, -+ .bInterfaceProtocol = 0, -+ .iInterface = 0, -+}; -+ -+static const struct usb_cs_as_general_descriptor -+z_audio_cs_as_if_desc = { -+ .bLength = 7, -+ .bDescriptorType = 0x24, -+ -+ .bDescriptorSubType = 0x01, -+ .bTerminalLink = 0x01, -+ .bDelay = 0x0, -+ .wFormatTag = __constant_cpu_to_le16 (0x0001) -+}; -+ -+ -+static const struct usb_cs_as_format_descriptor -+z_audio_cs_as_format_desc = { -+ .bLength = 0xe, -+ .bDescriptorType = 0x24, -+ -+ .bDescriptorSubType = 2, -+ .bFormatType = 1, -+ .bNrChannels = 1, -+ .bSubframeSize = 1, -+ .bBitResolution = 8, -+ .bSamfreqType = 0, -+ .tLowerSamFreq = {0x7e, 0x13, 0x00}, -+ .tUpperSamFreq = {0xe2, 0xd6, 0x00}, -+}; -+ -+static const struct usb_endpoint_descriptor -+z_iso_ep = { -+ .bLength = 0x09, -+ .bDescriptorType = 0x05, -+ .bEndpointAddress = 0x04, -+ .bmAttributes = 0x09, -+ .wMaxPacketSize = 0x0038, -+ .bInterval = 0x01, -+ .bRefresh = 0x00, -+ .bSynchAddress = 0x00, -+}; -+ -+static char z_iso_ep2[] = {0x07, 0x25, 0x01, 0x00, 0x02, 0x00, 0x02}; -+ -+// 9 bytes -+static char z_ac_interface_header_desc[] = -+{ 0x09, 0x24, 0x01, 0x00, 0x01, 0x2b, 0x00, 0x01, 0x01 }; -+ -+// 12 bytes -+static char z_0[] = {0x0c, 0x24, 0x02, 0x01, 0x01, 0x01, 0x00, 0x02, -+ 0x03, 0x00, 0x00, 0x00}; -+// 13 bytes -+static char z_1[] = {0x0d, 0x24, 0x06, 0x02, 0x01, 0x02, 0x15, 0x00, -+ 0x02, 0x00, 0x02, 0x00, 0x00}; -+// 9 bytes -+static char z_2[] = {0x09, 0x24, 0x03, 0x03, 0x01, 0x03, 0x00, 0x02, -+ 0x00}; -+ -+static char za_0[] = {0x09, 0x04, 0x01, 0x02, 0x01, 0x01, 0x02, 0x00, -+ 0x00}; -+ -+static char za_1[] = {0x07, 0x24, 0x01, 0x01, 0x00, 0x01, 0x00}; -+ -+static char za_2[] = {0x0e, 0x24, 0x02, 0x01, 0x02, 0x01, 0x08, 0x00, -+ 0x7e, 0x13, 0x00, 0xe2, 0xd6, 0x00}; -+ -+static char za_3[] = {0x09, 0x05, 0x04, 0x09, 0x70, 0x00, 0x01, 0x00, -+ 0x00}; -+ -+static char za_4[] = {0x07, 0x25, 0x01, 0x00, 0x02, 0x00, 0x02}; -+ -+static char za_5[] = {0x09, 0x04, 0x01, 0x03, 0x01, 0x01, 0x02, 0x00, -+ 0x00}; -+ -+static char za_6[] = {0x07, 0x24, 0x01, 0x01, 0x00, 0x01, 0x00}; -+ -+static char za_7[] = {0x0e, 0x24, 0x02, 0x01, 0x01, 0x02, 0x10, 0x00, -+ 0x7e, 0x13, 0x00, 0xe2, 0xd6, 0x00}; -+ -+static char za_8[] = {0x09, 0x05, 0x04, 0x09, 0x70, 0x00, 0x01, 0x00, -+ 0x00}; -+ -+static char za_9[] = {0x07, 0x25, 0x01, 0x00, 0x02, 0x00, 0x02}; -+ -+static char za_10[] = {0x09, 0x04, 0x01, 0x04, 0x01, 0x01, 0x02, 0x00, -+ 0x00}; -+ -+static char za_11[] = {0x07, 0x24, 0x01, 0x01, 0x00, 0x01, 0x00}; -+ -+static char za_12[] = {0x0e, 0x24, 0x02, 0x01, 0x02, 0x02, 0x10, 0x00, -+ 0x73, 0x13, 0x00, 0xe2, 0xd6, 0x00}; -+ -+static char za_13[] = {0x09, 0x05, 0x04, 0x09, 0xe0, 0x00, 0x01, 0x00, -+ 0x00}; -+ -+static char za_14[] = {0x07, 0x25, 0x01, 0x00, 0x02, 0x00, 0x02}; -+ -+static char za_15[] = {0x09, 0x04, 0x01, 0x05, 0x01, 0x01, 0x02, 0x00, -+ 0x00}; -+ -+static char za_16[] = {0x07, 0x24, 0x01, 0x01, 0x00, 0x01, 0x00}; -+ -+static char za_17[] = {0x0e, 0x24, 0x02, 0x01, 0x01, 0x03, 0x14, 0x00, -+ 0x7e, 0x13, 0x00, 0xe2, 0xd6, 0x00}; -+ -+static char za_18[] = {0x09, 0x05, 0x04, 0x09, 0xa8, 0x00, 0x01, 0x00, -+ 0x00}; -+ -+static char za_19[] = {0x07, 0x25, 0x01, 0x00, 0x02, 0x00, 0x02}; -+ -+static char za_20[] = {0x09, 0x04, 0x01, 0x06, 0x01, 0x01, 0x02, 0x00, -+ 0x00}; -+ -+static char za_21[] = {0x07, 0x24, 0x01, 0x01, 0x00, 0x01, 0x00}; -+ -+static char za_22[] = {0x0e, 0x24, 0x02, 0x01, 0x02, 0x03, 0x14, 0x00, -+ 0x7e, 0x13, 0x00, 0xe2, 0xd6, 0x00}; -+ -+static char za_23[] = {0x09, 0x05, 0x04, 0x09, 0x50, 0x01, 0x01, 0x00, -+ 0x00}; -+ -+static char za_24[] = {0x07, 0x25, 0x01, 0x00, 0x02, 0x00, 0x02}; -+ -+ -+ -+static const struct usb_descriptor_header *z_function [] = { -+ (struct usb_descriptor_header *) &z_audio_control_if_desc, -+ (struct usb_descriptor_header *) &z_ac_interface_header_desc, -+ (struct usb_descriptor_header *) &z_0, -+ (struct usb_descriptor_header *) &z_1, -+ (struct usb_descriptor_header *) &z_2, -+ (struct usb_descriptor_header *) &z_audio_if_desc, -+ (struct usb_descriptor_header *) &z_audio_if_desc2, -+ (struct usb_descriptor_header *) &z_audio_cs_as_if_desc, -+ (struct usb_descriptor_header *) &z_audio_cs_as_format_desc, -+ (struct usb_descriptor_header *) &z_iso_ep, -+ (struct usb_descriptor_header *) &z_iso_ep2, -+ (struct usb_descriptor_header *) &za_0, -+ (struct usb_descriptor_header *) &za_1, -+ (struct usb_descriptor_header *) &za_2, -+ (struct usb_descriptor_header *) &za_3, -+ (struct usb_descriptor_header *) &za_4, -+ (struct usb_descriptor_header *) &za_5, -+ (struct usb_descriptor_header *) &za_6, -+ (struct usb_descriptor_header *) &za_7, -+ (struct usb_descriptor_header *) &za_8, -+ (struct usb_descriptor_header *) &za_9, -+ (struct usb_descriptor_header *) &za_10, -+ (struct usb_descriptor_header *) &za_11, -+ (struct usb_descriptor_header *) &za_12, -+ (struct usb_descriptor_header *) &za_13, -+ (struct usb_descriptor_header *) &za_14, -+ (struct usb_descriptor_header *) &za_15, -+ (struct usb_descriptor_header *) &za_16, -+ (struct usb_descriptor_header *) &za_17, -+ (struct usb_descriptor_header *) &za_18, -+ (struct usb_descriptor_header *) &za_19, -+ (struct usb_descriptor_header *) &za_20, -+ (struct usb_descriptor_header *) &za_21, -+ (struct usb_descriptor_header *) &za_22, -+ (struct usb_descriptor_header *) &za_23, -+ (struct usb_descriptor_header *) &za_24, -+ NULL, -+}; -+ -+/* maxpacket and other transfer characteristics vary by speed. */ -+#define ep_desc(g,hs,fs) (((g)->speed==USB_SPEED_HIGH)?(hs):(fs)) -+ -+#else -+ -+/* if there's no high speed support, maxpacket doesn't change. */ -+#define ep_desc(g,hs,fs) fs -+ -+#endif /* !CONFIG_USB_GADGET_DUALSPEED */ -+ -+static char manufacturer [40]; -+//static char serial [40]; -+static char serial [] = "Ser 00 em"; -+ -+/* static strings, in UTF-8 */ -+static struct usb_string strings [] = { -+ { STRING_MANUFACTURER, manufacturer, }, -+ { STRING_PRODUCT, longname, }, -+ { STRING_SERIAL, serial, }, -+ { STRING_LOOPBACK, loopback, }, -+ { STRING_SOURCE_SINK, source_sink, }, -+ { } /* end of list */ -+}; -+ -+static struct usb_gadget_strings stringtab = { -+ .language = 0x0409, /* en-us */ -+ .strings = strings, -+}; -+ -+/* -+ * config descriptors are also handcrafted. these must agree with code -+ * that sets configurations, and with code managing interfaces and their -+ * altsettings. other complexity may come from: -+ * -+ * - high speed support, including "other speed config" rules -+ * - multiple configurations -+ * - interfaces with alternate settings -+ * - embedded class or vendor-specific descriptors -+ * -+ * this handles high speed, and has a second config that could as easily -+ * have been an alternate interface setting (on most hardware). -+ * -+ * NOTE: to demonstrate (and test) more USB capabilities, this driver -+ * should include an altsetting to test interrupt transfers, including -+ * high bandwidth modes at high speed. (Maybe work like Intel's test -+ * device?) -+ */ -+static int -+config_buf (struct usb_gadget *gadget, u8 *buf, u8 type, unsigned index) -+{ -+ int len; -+ const struct usb_descriptor_header **function; -+ -+ function = z_function; -+ len = usb_gadget_config_buf (&z_config, buf, USB_BUFSIZ, function); -+ if (len < 0) -+ return len; -+ ((struct usb_config_descriptor *) buf)->bDescriptorType = type; -+ return len; -+} -+ -+/*-------------------------------------------------------------------------*/ -+ -+static struct usb_request * -+alloc_ep_req (struct usb_ep *ep, unsigned length) -+{ -+ struct usb_request *req; -+ -+ req = usb_ep_alloc_request (ep, GFP_ATOMIC); -+ if (req) { -+ req->length = length; -+ req->buf = usb_ep_alloc_buffer (ep, length, -+ &req->dma, GFP_ATOMIC); -+ if (!req->buf) { -+ usb_ep_free_request (ep, req); -+ req = NULL; -+ } -+ } -+ return req; -+} -+ -+static void free_ep_req (struct usb_ep *ep, struct usb_request *req) -+{ -+ if (req->buf) -+ usb_ep_free_buffer (ep, req->buf, req->dma, req->length); -+ usb_ep_free_request (ep, req); -+} -+ -+/*-------------------------------------------------------------------------*/ -+ -+/* optionally require specific source/sink data patterns */ -+ -+static int -+check_read_data ( -+ struct zero_dev *dev, -+ struct usb_ep *ep, -+ struct usb_request *req -+) -+{ -+ unsigned i; -+ u8 *buf = req->buf; -+ -+ for (i = 0; i < req->actual; i++, buf++) { -+ switch (pattern) { -+ /* all-zeroes has no synchronization issues */ -+ case 0: -+ if (*buf == 0) -+ continue; -+ break; -+ /* mod63 stays in sync with short-terminated transfers, -+ * or otherwise when host and gadget agree on how large -+ * each usb transfer request should be. resync is done -+ * with set_interface or set_config. -+ */ -+ case 1: -+ if (*buf == (u8)(i % 63)) -+ continue; -+ break; -+ } -+ ERROR (dev, "bad OUT byte, buf [%d] = %d\n", i, *buf); -+ usb_ep_set_halt (ep); -+ return -EINVAL; -+ } -+ return 0; -+} -+ -+/*-------------------------------------------------------------------------*/ -+ -+static void zero_reset_config (struct zero_dev *dev) -+{ -+ if (dev->config == 0) -+ return; -+ -+ DBG (dev, "reset config\n"); -+ -+ /* just disable endpoints, forcing completion of pending i/o. -+ * all our completion handlers free their requests in this case. -+ */ -+ if (dev->in_ep) { -+ usb_ep_disable (dev->in_ep); -+ dev->in_ep = NULL; -+ } -+ if (dev->out_ep) { -+ usb_ep_disable (dev->out_ep); -+ dev->out_ep = NULL; -+ } -+ dev->config = 0; -+ del_timer (&dev->resume); -+} -+ -+#define _write(f, buf, sz) (f->f_op->write(f, buf, sz, &f->f_pos)) -+ -+static void -+zero_isoc_complete (struct usb_ep *ep, struct usb_request *req) -+{ -+ struct zero_dev *dev = ep->driver_data; -+ int status = req->status; -+ int i, j; -+ -+ switch (status) { -+ -+ case 0: /* normal completion? */ -+ //printk ("\nzero ---------------> isoc normal completion %d bytes\n", req->actual); -+ for (i=0, j=rbuf_start; iactual; i++) { -+ //printk ("%02x ", ((__u8*)req->buf)[i]); -+ rbuf[j] = ((__u8*)req->buf)[i]; -+ j++; -+ if (j >= RBUF_LEN) j=0; -+ } -+ rbuf_start = j; -+ //printk ("\n\n"); -+ -+ if (rbuf_len < RBUF_LEN) { -+ rbuf_len += req->actual; -+ if (rbuf_len > RBUF_LEN) { -+ rbuf_len = RBUF_LEN; -+ } -+ } -+ -+ break; -+ -+ /* this endpoint is normally active while we're configured */ -+ case -ECONNABORTED: /* hardware forced ep reset */ -+ case -ECONNRESET: /* request dequeued */ -+ case -ESHUTDOWN: /* disconnect from host */ -+ VDBG (dev, "%s gone (%d), %d/%d\n", ep->name, status, -+ req->actual, req->length); -+ if (ep == dev->out_ep) -+ check_read_data (dev, ep, req); -+ free_ep_req (ep, req); -+ return; -+ -+ case -EOVERFLOW: /* buffer overrun on read means that -+ * we didn't provide a big enough -+ * buffer. -+ */ -+ default: -+#if 1 -+ DBG (dev, "%s complete --> %d, %d/%d\n", ep->name, -+ status, req->actual, req->length); -+#endif -+ case -EREMOTEIO: /* short read */ -+ break; -+ } -+ -+ status = usb_ep_queue (ep, req, GFP_ATOMIC); -+ if (status) { -+ ERROR (dev, "kill %s: resubmit %d bytes --> %d\n", -+ ep->name, req->length, status); -+ usb_ep_set_halt (ep); -+ /* FIXME recover later ... somehow */ -+ } -+} -+ -+static struct usb_request * -+zero_start_isoc_ep (struct usb_ep *ep, int gfp_flags) -+{ -+ struct usb_request *req; -+ int status; -+ -+ req = alloc_ep_req (ep, 512); -+ if (!req) -+ return NULL; -+ -+ req->complete = zero_isoc_complete; -+ -+ status = usb_ep_queue (ep, req, gfp_flags); -+ if (status) { -+ struct zero_dev *dev = ep->driver_data; -+ -+ ERROR (dev, "start %s --> %d\n", ep->name, status); -+ free_ep_req (ep, req); -+ req = NULL; -+ } -+ -+ return req; -+} -+ -+/* change our operational config. this code must agree with the code -+ * that returns config descriptors, and altsetting code. -+ * -+ * it's also responsible for power management interactions. some -+ * configurations might not work with our current power sources. -+ * -+ * note that some device controller hardware will constrain what this -+ * code can do, perhaps by disallowing more than one configuration or -+ * by limiting configuration choices (like the pxa2xx). -+ */ -+static int -+zero_set_config (struct zero_dev *dev, unsigned number, int gfp_flags) -+{ -+ int result = 0; -+ struct usb_gadget *gadget = dev->gadget; -+ const struct usb_endpoint_descriptor *d; -+ struct usb_ep *ep; -+ -+ if (number == dev->config) -+ return 0; -+ -+ zero_reset_config (dev); -+ -+ gadget_for_each_ep (ep, gadget) { -+ -+ if (strcmp (ep->name, "ep4") == 0) { -+ -+ d = (struct usb_endpoint_descripter *)&za_23; // isoc ep desc for audio i/f alt setting 6 -+ result = usb_ep_enable (ep, d); -+ -+ if (result == 0) { -+ ep->driver_data = dev; -+ dev->in_ep = ep; -+ -+ if (zero_start_isoc_ep (ep, gfp_flags) != 0) { -+ -+ dev->in_ep = ep; -+ continue; -+ } -+ -+ usb_ep_disable (ep); -+ result = -EIO; -+ } -+ } -+ -+ } -+ -+ dev->config = number; -+ return result; -+} -+ -+/*-------------------------------------------------------------------------*/ -+ -+static void zero_setup_complete (struct usb_ep *ep, struct usb_request *req) -+{ -+ if (req->status || req->actual != req->length) -+ DBG ((struct zero_dev *) ep->driver_data, -+ "setup complete --> %d, %d/%d\n", -+ req->status, req->actual, req->length); -+} -+ -+/* -+ * The setup() callback implements all the ep0 functionality that's -+ * not handled lower down, in hardware or the hardware driver (like -+ * device and endpoint feature flags, and their status). It's all -+ * housekeeping for the gadget function we're implementing. Most of -+ * the work is in config-specific setup. -+ */ -+static int -+zero_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl) -+{ -+ struct zero_dev *dev = get_gadget_data (gadget); -+ struct usb_request *req = dev->req; -+ int value = -EOPNOTSUPP; -+ -+ /* usually this stores reply data in the pre-allocated ep0 buffer, -+ * but config change events will reconfigure hardware. -+ */ -+ req->zero = 0; -+ switch (ctrl->bRequest) { -+ -+ case USB_REQ_GET_DESCRIPTOR: -+ -+ switch (ctrl->wValue >> 8) { -+ -+ case USB_DT_DEVICE: -+ value = min (ctrl->wLength, (u16) sizeof device_desc); -+ memcpy (req->buf, &device_desc, value); -+ break; -+#ifdef CONFIG_USB_GADGET_DUALSPEED -+ case USB_DT_DEVICE_QUALIFIER: -+ if (!gadget->is_dualspeed) -+ break; -+ value = min (ctrl->wLength, (u16) sizeof dev_qualifier); -+ memcpy (req->buf, &dev_qualifier, value); -+ break; -+ -+ case USB_DT_OTHER_SPEED_CONFIG: -+ if (!gadget->is_dualspeed) -+ break; -+ // FALLTHROUGH -+#endif /* CONFIG_USB_GADGET_DUALSPEED */ -+ case USB_DT_CONFIG: -+ value = config_buf (gadget, req->buf, -+ ctrl->wValue >> 8, -+ ctrl->wValue & 0xff); -+ if (value >= 0) -+ value = min (ctrl->wLength, (u16) value); -+ break; -+ -+ case USB_DT_STRING: -+ /* wIndex == language code. -+ * this driver only handles one language, you can -+ * add string tables for other languages, using -+ * any UTF-8 characters -+ */ -+ value = usb_gadget_get_string (&stringtab, -+ ctrl->wValue & 0xff, req->buf); -+ if (value >= 0) { -+ value = min (ctrl->wLength, (u16) value); -+ } -+ break; -+ } -+ break; -+ -+ /* currently two configs, two speeds */ -+ case USB_REQ_SET_CONFIGURATION: -+ if (ctrl->bRequestType != 0) -+ goto unknown; -+ -+ spin_lock (&dev->lock); -+ value = zero_set_config (dev, ctrl->wValue, GFP_ATOMIC); -+ spin_unlock (&dev->lock); -+ break; -+ case USB_REQ_GET_CONFIGURATION: -+ if (ctrl->bRequestType != USB_DIR_IN) -+ goto unknown; -+ *(u8 *)req->buf = dev->config; -+ value = min (ctrl->wLength, (u16) 1); -+ break; -+ -+ /* until we add altsetting support, or other interfaces, -+ * only 0/0 are possible. pxa2xx only supports 0/0 (poorly) -+ * and already killed pending endpoint I/O. -+ */ -+ case USB_REQ_SET_INTERFACE: -+ -+ if (ctrl->bRequestType != USB_RECIP_INTERFACE) -+ goto unknown; -+ spin_lock (&dev->lock); -+ if (dev->config) { -+ u8 config = dev->config; -+ -+ /* resets interface configuration, forgets about -+ * previous transaction state (queued bufs, etc) -+ * and re-inits endpoint state (toggle etc) -+ * no response queued, just zero status == success. -+ * if we had more than one interface we couldn't -+ * use this "reset the config" shortcut. -+ */ -+ zero_reset_config (dev); -+ zero_set_config (dev, config, GFP_ATOMIC); -+ value = 0; -+ } -+ spin_unlock (&dev->lock); -+ break; -+ case USB_REQ_GET_INTERFACE: -+ if ((ctrl->bRequestType == 0x21) && (ctrl->wIndex == 0x02)) { -+ value = ctrl->wLength; -+ break; -+ } -+ else { -+ if (ctrl->bRequestType != (USB_DIR_IN|USB_RECIP_INTERFACE)) -+ goto unknown; -+ if (!dev->config) -+ break; -+ if (ctrl->wIndex != 0) { -+ value = -EDOM; -+ break; -+ } -+ *(u8 *)req->buf = 0; -+ value = min (ctrl->wLength, (u16) 1); -+ } -+ break; -+ -+ /* -+ * These are the same vendor-specific requests supported by -+ * Intel's USB 2.0 compliance test devices. We exceed that -+ * device spec by allowing multiple-packet requests. -+ */ -+ case 0x5b: /* control WRITE test -- fill the buffer */ -+ if (ctrl->bRequestType != (USB_DIR_OUT|USB_TYPE_VENDOR)) -+ goto unknown; -+ if (ctrl->wValue || ctrl->wIndex) -+ break; -+ /* just read that many bytes into the buffer */ -+ if (ctrl->wLength > USB_BUFSIZ) -+ break; -+ value = ctrl->wLength; -+ break; -+ case 0x5c: /* control READ test -- return the buffer */ -+ if (ctrl->bRequestType != (USB_DIR_IN|USB_TYPE_VENDOR)) -+ goto unknown; -+ if (ctrl->wValue || ctrl->wIndex) -+ break; -+ /* expect those bytes are still in the buffer; send back */ -+ if (ctrl->wLength > USB_BUFSIZ -+ || ctrl->wLength != req->length) -+ break; -+ value = ctrl->wLength; -+ break; -+ -+ case 0x01: // SET_CUR -+ case 0x02: -+ case 0x03: -+ case 0x04: -+ case 0x05: -+ value = ctrl->wLength; -+ break; -+ case 0x81: -+ switch (ctrl->wValue) { -+ case 0x0201: -+ case 0x0202: -+ ((u8*)req->buf)[0] = 0x00; -+ ((u8*)req->buf)[1] = 0xe3; -+ break; -+ case 0x0300: -+ case 0x0500: -+ ((u8*)req->buf)[0] = 0x00; -+ break; -+ } -+ //((u8*)req->buf)[0] = 0x81; -+ //((u8*)req->buf)[1] = 0x81; -+ value = ctrl->wLength; -+ break; -+ case 0x82: -+ switch (ctrl->wValue) { -+ case 0x0201: -+ case 0x0202: -+ ((u8*)req->buf)[0] = 0x00; -+ ((u8*)req->buf)[1] = 0xc3; -+ break; -+ case 0x0300: -+ case 0x0500: -+ ((u8*)req->buf)[0] = 0x00; -+ break; -+ } -+ //((u8*)req->buf)[0] = 0x82; -+ //((u8*)req->buf)[1] = 0x82; -+ value = ctrl->wLength; -+ break; -+ case 0x83: -+ switch (ctrl->wValue) { -+ case 0x0201: -+ case 0x0202: -+ ((u8*)req->buf)[0] = 0x00; -+ ((u8*)req->buf)[1] = 0x00; -+ break; -+ case 0x0300: -+ ((u8*)req->buf)[0] = 0x60; -+ break; -+ case 0x0500: -+ ((u8*)req->buf)[0] = 0x18; -+ break; -+ } -+ //((u8*)req->buf)[0] = 0x83; -+ //((u8*)req->buf)[1] = 0x83; -+ value = ctrl->wLength; -+ break; -+ case 0x84: -+ switch (ctrl->wValue) { -+ case 0x0201: -+ case 0x0202: -+ ((u8*)req->buf)[0] = 0x00; -+ ((u8*)req->buf)[1] = 0x01; -+ break; -+ case 0x0300: -+ case 0x0500: -+ ((u8*)req->buf)[0] = 0x08; -+ break; -+ } -+ //((u8*)req->buf)[0] = 0x84; -+ //((u8*)req->buf)[1] = 0x84; -+ value = ctrl->wLength; -+ break; -+ case 0x85: -+ ((u8*)req->buf)[0] = 0x85; -+ ((u8*)req->buf)[1] = 0x85; -+ value = ctrl->wLength; -+ break; -+ -+ -+ default: -+unknown: -+ printk("unknown control req%02x.%02x v%04x i%04x l%d\n", -+ ctrl->bRequestType, ctrl->bRequest, -+ ctrl->wValue, ctrl->wIndex, ctrl->wLength); -+ } -+ -+ /* respond with data transfer before status phase? */ -+ if (value >= 0) { -+ req->length = value; -+ req->zero = value < ctrl->wLength -+ && (value % gadget->ep0->maxpacket) == 0; -+ value = usb_ep_queue (gadget->ep0, req, GFP_ATOMIC); -+ if (value < 0) { -+ DBG (dev, "ep_queue < 0 --> %d\n", value); -+ req->status = 0; -+ zero_setup_complete (gadget->ep0, req); -+ } -+ } -+ -+ /* device either stalls (value < 0) or reports success */ -+ return value; -+} -+ -+static void -+zero_disconnect (struct usb_gadget *gadget) -+{ -+ struct zero_dev *dev = get_gadget_data (gadget); -+ unsigned long flags; -+ -+ spin_lock_irqsave (&dev->lock, flags); -+ zero_reset_config (dev); -+ -+ /* a more significant application might have some non-usb -+ * activities to quiesce here, saving resources like power -+ * or pushing the notification up a network stack. -+ */ -+ spin_unlock_irqrestore (&dev->lock, flags); -+ -+ /* next we may get setup() calls to enumerate new connections; -+ * or an unbind() during shutdown (including removing module). -+ */ -+} -+ -+static void -+zero_autoresume (unsigned long _dev) -+{ -+ struct zero_dev *dev = (struct zero_dev *) _dev; -+ int status; -+ -+ /* normally the host would be woken up for something -+ * more significant than just a timer firing... -+ */ -+ if (dev->gadget->speed != USB_SPEED_UNKNOWN) { -+ status = usb_gadget_wakeup (dev->gadget); -+ DBG (dev, "wakeup --> %d\n", status); -+ } -+} -+ -+/*-------------------------------------------------------------------------*/ -+ -+static void -+zero_unbind (struct usb_gadget *gadget) -+{ -+ struct zero_dev *dev = get_gadget_data (gadget); -+ -+ DBG (dev, "unbind\n"); -+ -+ /* we've already been disconnected ... no i/o is active */ -+ if (dev->req) -+ free_ep_req (gadget->ep0, dev->req); -+ del_timer_sync (&dev->resume); -+ kfree (dev); -+ set_gadget_data (gadget, NULL); -+} -+ -+static int -+zero_bind (struct usb_gadget *gadget) -+{ -+ struct zero_dev *dev; -+ //struct usb_ep *ep; -+ -+ printk("binding\n"); -+ /* -+ * DRIVER POLICY CHOICE: you may want to do this differently. -+ * One thing to avoid is reusing a bcdDevice revision code -+ * with different host-visible configurations or behavior -+ * restrictions -- using ep1in/ep2out vs ep1out/ep3in, etc -+ */ -+ //device_desc.bcdDevice = __constant_cpu_to_le16 (0x0201); -+ -+ -+ /* ok, we made sense of the hardware ... */ -+ dev = kmalloc (sizeof *dev, SLAB_KERNEL); -+ if (!dev) -+ return -ENOMEM; -+ memset (dev, 0, sizeof *dev); -+ spin_lock_init (&dev->lock); -+ dev->gadget = gadget; -+ set_gadget_data (gadget, dev); -+ -+ /* preallocate control response and buffer */ -+ dev->req = usb_ep_alloc_request (gadget->ep0, GFP_KERNEL); -+ if (!dev->req) -+ goto enomem; -+ dev->req->buf = usb_ep_alloc_buffer (gadget->ep0, USB_BUFSIZ, -+ &dev->req->dma, GFP_KERNEL); -+ if (!dev->req->buf) -+ goto enomem; -+ -+ dev->req->complete = zero_setup_complete; -+ -+ device_desc.bMaxPacketSize0 = gadget->ep0->maxpacket; -+ -+#ifdef CONFIG_USB_GADGET_DUALSPEED -+ /* assume ep0 uses the same value for both speeds ... */ -+ dev_qualifier.bMaxPacketSize0 = device_desc.bMaxPacketSize0; -+ -+ /* and that all endpoints are dual-speed */ -+ //hs_source_desc.bEndpointAddress = fs_source_desc.bEndpointAddress; -+ //hs_sink_desc.bEndpointAddress = fs_sink_desc.bEndpointAddress; -+#endif -+ -+ usb_gadget_set_selfpowered (gadget); -+ -+ init_timer (&dev->resume); -+ dev->resume.function = zero_autoresume; -+ dev->resume.data = (unsigned long) dev; -+ -+ gadget->ep0->driver_data = dev; -+ -+ INFO (dev, "%s, version: " DRIVER_VERSION "\n", longname); -+ INFO (dev, "using %s, OUT %s IN %s\n", gadget->name, -+ EP_OUT_NAME, EP_IN_NAME); -+ -+ snprintf (manufacturer, sizeof manufacturer, -+ UTS_SYSNAME " " UTS_RELEASE " with %s", -+ gadget->name); -+ -+ return 0; -+ -+enomem: -+ zero_unbind (gadget); -+ return -ENOMEM; -+} -+ -+/*-------------------------------------------------------------------------*/ -+ -+static void -+zero_suspend (struct usb_gadget *gadget) -+{ -+ struct zero_dev *dev = get_gadget_data (gadget); -+ -+ if (gadget->speed == USB_SPEED_UNKNOWN) -+ return; -+ -+ if (autoresume) { -+ mod_timer (&dev->resume, jiffies + (HZ * autoresume)); -+ DBG (dev, "suspend, wakeup in %d seconds\n", autoresume); -+ } else -+ DBG (dev, "suspend\n"); -+} -+ -+static void -+zero_resume (struct usb_gadget *gadget) -+{ -+ struct zero_dev *dev = get_gadget_data (gadget); -+ -+ DBG (dev, "resume\n"); -+ del_timer (&dev->resume); -+} -+ -+ -+/*-------------------------------------------------------------------------*/ -+ -+static struct usb_gadget_driver zero_driver = { -+#ifdef CONFIG_USB_GADGET_DUALSPEED -+ .speed = USB_SPEED_HIGH, -+#else -+ .speed = USB_SPEED_FULL, -+#endif -+ .function = (char *) longname, -+ .bind = zero_bind, -+ .unbind = zero_unbind, -+ -+ .setup = zero_setup, -+ .disconnect = zero_disconnect, -+ -+ .suspend = zero_suspend, -+ .resume = zero_resume, -+ -+ .driver = { -+ .name = (char *) shortname, -+ // .shutdown = ... -+ // .suspend = ... -+ // .resume = ... -+ }, -+}; -+ -+MODULE_AUTHOR ("David Brownell"); -+MODULE_LICENSE ("Dual BSD/GPL"); -+ -+static struct proc_dir_entry *pdir, *pfile; -+ -+static int isoc_read_data (char *page, char **start, -+ off_t off, int count, -+ int *eof, void *data) -+{ -+ int i; -+ static int c = 0; -+ static int done = 0; -+ static int s = 0; -+ -+/* -+ printk ("\ncount: %d\n", count); -+ printk ("rbuf_start: %d\n", rbuf_start); -+ printk ("rbuf_len: %d\n", rbuf_len); -+ printk ("off: %d\n", off); -+ printk ("start: %p\n\n", *start); -+*/ -+ if (done) { -+ c = 0; -+ done = 0; -+ *eof = 1; -+ return 0; -+ } -+ -+ if (c == 0) { -+ if (rbuf_len == RBUF_LEN) -+ s = rbuf_start; -+ else s = 0; -+ } -+ -+ for (i=0; i= rbuf_len) { -+ *eof = 1; -+ done = 1; -+ } -+ -+ -+ return i; -+} -+ -+static int __init init (void) -+{ -+ -+ int retval = 0; -+ -+ pdir = proc_mkdir("isoc_test", NULL); -+ if(pdir == NULL) { -+ retval = -ENOMEM; -+ printk("Error creating dir\n"); -+ goto done; -+ } -+ pdir->owner = THIS_MODULE; -+ -+ pfile = create_proc_read_entry("isoc_data", -+ 0444, pdir, -+ isoc_read_data, -+ NULL); -+ if (pfile == NULL) { -+ retval = -ENOMEM; -+ printk("Error creating file\n"); -+ goto no_file; -+ } -+ pfile->owner = THIS_MODULE; -+ -+ return usb_gadget_register_driver (&zero_driver); -+ -+ no_file: -+ remove_proc_entry("isoc_data", NULL); -+ done: -+ return retval; -+} -+module_init (init); -+ -+static void __exit cleanup (void) -+{ -+ -+ usb_gadget_unregister_driver (&zero_driver); -+ -+ remove_proc_entry("isoc_data", pdir); -+ remove_proc_entry("isoc_test", NULL); -+} -+module_exit (cleanup); ---- /dev/null -+++ b/drivers/usb/dwc_otg/dwc_otg_attr.c -@@ -0,0 +1,966 @@ -+/* ========================================================================== -+ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_attr.c $ -+ * $Revision: 1.2 $ -+ * $Date: 2008-11-21 05:39:15 $ -+ * $Change: 1064918 $ -+ * -+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, -+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless -+ * otherwise expressly agreed to in writing between Synopsys and you. -+ * -+ * The Software IS NOT an item of Licensed Software or Licensed Product under -+ * any End User Software License Agreement or Agreement for Licensed Product -+ * with Synopsys or any supplement thereto. You are permitted to use and -+ * redistribute this Software in source and binary forms, with or without -+ * modification, provided that redistributions of source code must retain this -+ * notice. You may not view, use, disclose, copy or distribute this file or -+ * any information contained herein except pursuant to this license grant from -+ * Synopsys. If you do not agree with this notice, including the disclaimer -+ * below, then you are not authorized to use the Software. -+ * -+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, -+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY -+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH -+ * DAMAGE. -+ * ========================================================================== */ -+ -+/** @file -+ * -+ * The diagnostic interface will provide access to the controller for -+ * bringing up the hardware and testing. The Linux driver attributes -+ * feature will be used to provide the Linux Diagnostic -+ * Interface. These attributes are accessed through sysfs. -+ */ -+ -+/** @page "Linux Module Attributes" -+ * -+ * The Linux module attributes feature is used to provide the Linux -+ * Diagnostic Interface. These attributes are accessed through sysfs. -+ * The diagnostic interface will provide access to the controller for -+ * bringing up the hardware and testing. -+ -+ -+ The following table shows the attributes. -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+
Name Description Access
mode Returns the current mode: 0 for device mode, 1 for host mode Read
hnpcapable Gets or sets the "HNP-capable" bit in the Core USB Configuraton Register. -+ Read returns the current value. Read/Write
srpcapable Gets or sets the "SRP-capable" bit in the Core USB Configuraton Register. -+ Read returns the current value. Read/Write
hnp Initiates the Host Negotiation Protocol. Read returns the status. Read/Write
srp Initiates the Session Request Protocol. Read returns the status. Read/Write
buspower Gets or sets the Power State of the bus (0 - Off or 1 - On) Read/Write
bussuspend Suspends the USB bus. Read/Write
busconnected Gets the connection status of the bus Read
gotgctl Gets or sets the Core Control Status Register. Read/Write
gusbcfg Gets or sets the Core USB Configuration Register Read/Write
grxfsiz Gets or sets the Receive FIFO Size Register Read/Write
gnptxfsiz Gets or sets the non-periodic Transmit Size Register Read/Write
gpvndctl Gets or sets the PHY Vendor Control Register Read/Write
ggpio Gets the value in the lower 16-bits of the General Purpose IO Register -+ or sets the upper 16 bits. Read/Write
guid Gets or sets the value of the User ID Register Read/Write
gsnpsid Gets the value of the Synopsys ID Regester Read
devspeed Gets or sets the device speed setting in the DCFG register Read/Write
enumspeed Gets the device enumeration Speed. Read
hptxfsiz Gets the value of the Host Periodic Transmit FIFO Read
hprt0 Gets or sets the value in the Host Port Control and Status Register Read/Write
regoffset Sets the register offset for the next Register Access Read/Write
regvalue Gets or sets the value of the register at the offset in the regoffset attribute. Read/Write
remote_wakeup On read, shows the status of Remote Wakeup. On write, initiates a remote -+ wakeup of the host. When bit 0 is 1 and Remote Wakeup is enabled, the Remote -+ Wakeup signalling bit in the Device Control Register is set for 1 -+ milli-second. Read/Write
regdump Dumps the contents of core registers. Read
spramdump Dumps the contents of core registers. Read
hcddump Dumps the current HCD state. Read
hcd_frrem Shows the average value of the Frame Remaining -+ field in the Host Frame Number/Frame Remaining register when an SOF interrupt -+ occurs. This can be used to determine the average interrupt latency. Also -+ shows the average Frame Remaining value for start_transfer and the "a" and -+ "b" sample points. The "a" and "b" sample points may be used during debugging -+ bto determine how long it takes to execute a section of the HCD code. Read
rd_reg_test Displays the time required to read the GNPTXFSIZ register many times -+ (the output shows the number of times the register is read). -+ Read
wr_reg_test Displays the time required to write the GNPTXFSIZ register many times -+ (the output shows the number of times the register is written). -+ Read
-+ -+ Example usage: -+ To get the current mode: -+ cat /sys/devices/lm0/mode -+ -+ To power down the USB: -+ echo 0 > /sys/devices/lm0/buspower -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include /* permission constants */ -+#include -+ -+#include -+ -+#include "linux/dwc_otg_plat.h" -+#include "dwc_otg_attr.h" -+#include "dwc_otg_driver.h" -+#include "dwc_otg_pcd.h" -+#include "dwc_otg_hcd.h" -+ -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) -+/* -+ * MACROs for defining sysfs attribute -+ */ -+#define DWC_OTG_DEVICE_ATTR_BITFIELD_SHOW(_otg_attr_name_,_addr_,_mask_,_shift_,_string_) \ -+static ssize_t _otg_attr_name_##_show (struct device *_dev, struct device_attribute *attr, char *buf) \ -+{ \ -+ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); \ -+ uint32_t val; \ -+ val = dwc_read_reg32 (_addr_); \ -+ val = (val & (_mask_)) >> _shift_; \ -+ return sprintf (buf, "%s = 0x%x\n", _string_, val); \ -+} -+#define DWC_OTG_DEVICE_ATTR_BITFIELD_STORE(_otg_attr_name_,_addr_,_mask_,_shift_,_string_) \ -+static ssize_t _otg_attr_name_##_store (struct device *_dev, struct device_attribute *attr, \ -+ const char *buf, size_t count) \ -+{ \ -+ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); \ -+ uint32_t set = simple_strtoul(buf, NULL, 16); \ -+ uint32_t clear = set; \ -+ clear = ((~clear) << _shift_) & _mask_; \ -+ set = (set << _shift_) & _mask_; \ -+ dev_dbg(_dev, "Storing Address=0x%08x Set=0x%08x Clear=0x%08x\n", (uint32_t)_addr_, set, clear); \ -+ dwc_modify_reg32(_addr_, clear, set); \ -+ return count; \ -+} -+ -+/* -+ * MACROs for defining sysfs attribute for 32-bit registers -+ */ -+#define DWC_OTG_DEVICE_ATTR_REG_SHOW(_otg_attr_name_,_addr_,_string_) \ -+static ssize_t _otg_attr_name_##_show (struct device *_dev, struct device_attribute *attr, char *buf) \ -+{ \ -+ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); \ -+ uint32_t val; \ -+ val = dwc_read_reg32 (_addr_); \ -+ return sprintf (buf, "%s = 0x%08x\n", _string_, val); \ -+} -+#define DWC_OTG_DEVICE_ATTR_REG_STORE(_otg_attr_name_,_addr_,_string_) \ -+static ssize_t _otg_attr_name_##_store (struct device *_dev, struct device_attribute *attr, \ -+ const char *buf, size_t count) \ -+{ \ -+ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); \ -+ uint32_t val = simple_strtoul(buf, NULL, 16); \ -+ dev_dbg(_dev, "Storing Address=0x%08x Val=0x%08x\n", (uint32_t)_addr_, val); \ -+ dwc_write_reg32(_addr_, val); \ -+ return count; \ -+} -+ -+#else -+ -+/* -+ * MACROs for defining sysfs attribute -+ */ -+#define DWC_OTG_DEVICE_ATTR_BITFIELD_SHOW(_otg_attr_name_,_addr_,_mask_,_shift_,_string_) \ -+static ssize_t _otg_attr_name_##_show (struct device *_dev, char *buf) \ -+{ \ -+ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev);\ -+ uint32_t val; \ -+ val = dwc_read_reg32 (_addr_); \ -+ val = (val & (_mask_)) >> _shift_; \ -+ return sprintf (buf, "%s = 0x%x\n", _string_, val); \ -+} -+#define DWC_OTG_DEVICE_ATTR_BITFIELD_STORE(_otg_attr_name_,_addr_,_mask_,_shift_,_string_) \ -+static ssize_t _otg_attr_name_##_store (struct device *_dev, const char *buf, size_t count) \ -+{ \ -+ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev);\ -+ uint32_t set = simple_strtoul(buf, NULL, 16); \ -+ uint32_t clear = set; \ -+ clear = ((~clear) << _shift_) & _mask_; \ -+ set = (set << _shift_) & _mask_; \ -+ dev_dbg(_dev, "Storing Address=0x%08x Set=0x%08x Clear=0x%08x\n", (uint32_t)_addr_, set, clear); \ -+ dwc_modify_reg32(_addr_, clear, set); \ -+ return count; \ -+} -+ -+/* -+ * MACROs for defining sysfs attribute for 32-bit registers -+ */ -+#define DWC_OTG_DEVICE_ATTR_REG_SHOW(_otg_attr_name_,_addr_,_string_) \ -+static ssize_t _otg_attr_name_##_show (struct device *_dev, char *buf) \ -+{ \ -+ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev);\ -+ uint32_t val; \ -+ val = dwc_read_reg32 (_addr_); \ -+ return sprintf (buf, "%s = 0x%08x\n", _string_, val); \ -+} -+#define DWC_OTG_DEVICE_ATTR_REG_STORE(_otg_attr_name_,_addr_,_string_) \ -+static ssize_t _otg_attr_name_##_store (struct device *_dev, const char *buf, size_t count) \ -+{ \ -+ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev);\ -+ uint32_t val = simple_strtoul(buf, NULL, 16); \ -+ dev_dbg(_dev, "Storing Address=0x%08x Val=0x%08x\n", (uint32_t)_addr_, val); \ -+ dwc_write_reg32(_addr_, val); \ -+ return count; \ -+} -+ -+#endif -+ -+#define DWC_OTG_DEVICE_ATTR_BITFIELD_RW(_otg_attr_name_,_addr_,_mask_,_shift_,_string_) \ -+DWC_OTG_DEVICE_ATTR_BITFIELD_SHOW(_otg_attr_name_,_addr_,_mask_,_shift_,_string_) \ -+DWC_OTG_DEVICE_ATTR_BITFIELD_STORE(_otg_attr_name_,_addr_,_mask_,_shift_,_string_) \ -+DEVICE_ATTR(_otg_attr_name_,0644,_otg_attr_name_##_show,_otg_attr_name_##_store); -+ -+#define DWC_OTG_DEVICE_ATTR_BITFIELD_RO(_otg_attr_name_,_addr_,_mask_,_shift_,_string_) \ -+DWC_OTG_DEVICE_ATTR_BITFIELD_SHOW(_otg_attr_name_,_addr_,_mask_,_shift_,_string_) \ -+DEVICE_ATTR(_otg_attr_name_,0444,_otg_attr_name_##_show,NULL); -+ -+#define DWC_OTG_DEVICE_ATTR_REG32_RW(_otg_attr_name_,_addr_,_string_) \ -+DWC_OTG_DEVICE_ATTR_REG_SHOW(_otg_attr_name_,_addr_,_string_) \ -+DWC_OTG_DEVICE_ATTR_REG_STORE(_otg_attr_name_,_addr_,_string_) \ -+DEVICE_ATTR(_otg_attr_name_,0644,_otg_attr_name_##_show,_otg_attr_name_##_store); -+ -+#define DWC_OTG_DEVICE_ATTR_REG32_RO(_otg_attr_name_,_addr_,_string_) \ -+DWC_OTG_DEVICE_ATTR_REG_SHOW(_otg_attr_name_,_addr_,_string_) \ -+DEVICE_ATTR(_otg_attr_name_,0444,_otg_attr_name_##_show,NULL); -+ -+ -+/** @name Functions for Show/Store of Attributes */ -+/**@{*/ -+ -+/** -+ * Show the register offset of the Register Access. -+ */ -+static ssize_t regoffset_show( struct device *_dev, -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) -+ struct device_attribute *attr, -+#endif -+ char *buf) -+{ -+ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); -+ return snprintf(buf, sizeof("0xFFFFFFFF\n")+1,"0x%08x\n", otg_dev->reg_offset); -+} -+ -+/** -+ * Set the register offset for the next Register Access Read/Write -+ */ -+static ssize_t regoffset_store( struct device *_dev, -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) -+ struct device_attribute *attr, -+#endif -+ const char *buf, -+ size_t count ) -+{ -+ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); -+ -+ uint32_t offset = simple_strtoul(buf, NULL, 16); -+ //dev_dbg(_dev, "Offset=0x%08x\n", offset); -+ if (offset < 0x00040000 ) { -+ otg_dev->reg_offset = offset; -+ } -+ else { -+ dev_err( _dev, "invalid offset\n" ); -+ } -+ -+ return count; -+} -+DEVICE_ATTR(regoffset, S_IRUGO|S_IWUSR, (void *)regoffset_show, regoffset_store); -+ -+ -+/** -+ * Show the value of the register at the offset in the reg_offset -+ * attribute. -+ */ -+static ssize_t regvalue_show( struct device *_dev, -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) -+ struct device_attribute *attr, -+#endif -+ char *buf) -+{ -+ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); -+ -+ uint32_t val; -+ volatile uint32_t *addr; -+ -+ if (otg_dev->reg_offset != 0xFFFFFFFF && -+ 0 != otg_dev->base) { -+ /* Calculate the address */ -+ addr = (uint32_t*)(otg_dev->reg_offset + -+ (uint8_t*)otg_dev->base); -+ //dev_dbg(_dev, "@0x%08x\n", (unsigned)addr); -+ val = dwc_read_reg32( addr ); -+ return snprintf(buf, sizeof("Reg@0xFFFFFFFF = 0xFFFFFFFF\n")+1, -+ "Reg@0x%06x = 0x%08x\n", -+ otg_dev->reg_offset, val); -+ } -+ else { -+ dev_err(_dev, "Invalid offset (0x%0x)\n", -+ otg_dev->reg_offset); -+ return sprintf(buf, "invalid offset\n" ); -+ } -+} -+ -+/** -+ * Store the value in the register at the offset in the reg_offset -+ * attribute. -+ * -+ */ -+static ssize_t regvalue_store( struct device *_dev, -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) -+ struct device_attribute *attr, -+#endif -+ const char *buf, -+ size_t count ) -+{ -+ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); -+ -+ volatile uint32_t * addr; -+ uint32_t val = simple_strtoul(buf, NULL, 16); -+ //dev_dbg(_dev, "Offset=0x%08x Val=0x%08x\n", otg_dev->reg_offset, val); -+ if (otg_dev->reg_offset != 0xFFFFFFFF && 0 != otg_dev->base) { -+ /* Calculate the address */ -+ addr = (uint32_t*)(otg_dev->reg_offset + -+ (uint8_t*)otg_dev->base); -+ //dev_dbg(_dev, "@0x%08x\n", (unsigned)addr); -+ dwc_write_reg32( addr, val ); -+ } -+ else { -+ dev_err(_dev, "Invalid Register Offset (0x%08x)\n", -+ otg_dev->reg_offset); -+ } -+ return count; -+} -+DEVICE_ATTR(regvalue, S_IRUGO|S_IWUSR, regvalue_show, regvalue_store); -+ -+/* -+ * Attributes -+ */ -+DWC_OTG_DEVICE_ATTR_BITFIELD_RO(mode,&(otg_dev->core_if->core_global_regs->gotgctl),(1<<20),20,"Mode"); -+DWC_OTG_DEVICE_ATTR_BITFIELD_RW(hnpcapable,&(otg_dev->core_if->core_global_regs->gusbcfg),(1<<9),9,"Mode"); -+DWC_OTG_DEVICE_ATTR_BITFIELD_RW(srpcapable,&(otg_dev->core_if->core_global_regs->gusbcfg),(1<<8),8,"Mode"); -+ -+//DWC_OTG_DEVICE_ATTR_BITFIELD_RW(buspower,&(otg_dev->core_if->core_global_regs->gotgctl),(1<<8),8,"Mode"); -+//DWC_OTG_DEVICE_ATTR_BITFIELD_RW(bussuspend,&(otg_dev->core_if->core_global_regs->gotgctl),(1<<8),8,"Mode"); -+DWC_OTG_DEVICE_ATTR_BITFIELD_RO(busconnected,otg_dev->core_if->host_if->hprt0,0x01,0,"Bus Connected"); -+ -+DWC_OTG_DEVICE_ATTR_REG32_RW(gotgctl,&(otg_dev->core_if->core_global_regs->gotgctl),"GOTGCTL"); -+DWC_OTG_DEVICE_ATTR_REG32_RW(gusbcfg,&(otg_dev->core_if->core_global_regs->gusbcfg),"GUSBCFG"); -+DWC_OTG_DEVICE_ATTR_REG32_RW(grxfsiz,&(otg_dev->core_if->core_global_regs->grxfsiz),"GRXFSIZ"); -+DWC_OTG_DEVICE_ATTR_REG32_RW(gnptxfsiz,&(otg_dev->core_if->core_global_regs->gnptxfsiz),"GNPTXFSIZ"); -+DWC_OTG_DEVICE_ATTR_REG32_RW(gpvndctl,&(otg_dev->core_if->core_global_regs->gpvndctl),"GPVNDCTL"); -+DWC_OTG_DEVICE_ATTR_REG32_RW(ggpio,&(otg_dev->core_if->core_global_regs->ggpio),"GGPIO"); -+DWC_OTG_DEVICE_ATTR_REG32_RW(guid,&(otg_dev->core_if->core_global_regs->guid),"GUID"); -+DWC_OTG_DEVICE_ATTR_REG32_RO(gsnpsid,&(otg_dev->core_if->core_global_regs->gsnpsid),"GSNPSID"); -+DWC_OTG_DEVICE_ATTR_BITFIELD_RW(devspeed,&(otg_dev->core_if->dev_if->dev_global_regs->dcfg),0x3,0,"Device Speed"); -+DWC_OTG_DEVICE_ATTR_BITFIELD_RO(enumspeed,&(otg_dev->core_if->dev_if->dev_global_regs->dsts),0x6,1,"Device Enumeration Speed"); -+ -+DWC_OTG_DEVICE_ATTR_REG32_RO(hptxfsiz,&(otg_dev->core_if->core_global_regs->hptxfsiz),"HPTXFSIZ"); -+DWC_OTG_DEVICE_ATTR_REG32_RW(hprt0,otg_dev->core_if->host_if->hprt0,"HPRT0"); -+ -+ -+/** -+ * @todo Add code to initiate the HNP. -+ */ -+/** -+ * Show the HNP status bit -+ */ -+static ssize_t hnp_show( struct device *_dev, -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) -+ struct device_attribute *attr, -+#endif -+ char *buf) -+{ -+ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); -+ -+ gotgctl_data_t val; -+ val.d32 = dwc_read_reg32 (&(otg_dev->core_if->core_global_regs->gotgctl)); -+ return sprintf (buf, "HstNegScs = 0x%x\n", val.b.hstnegscs); -+} -+ -+/** -+ * Set the HNP Request bit -+ */ -+static ssize_t hnp_store( struct device *_dev, -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) -+ struct device_attribute *attr, -+#endif -+ const char *buf, -+ size_t count ) -+{ -+ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); -+ -+ uint32_t in = simple_strtoul(buf, NULL, 16); -+ uint32_t *addr = (uint32_t *)&(otg_dev->core_if->core_global_regs->gotgctl); -+ gotgctl_data_t mem; -+ mem.d32 = dwc_read_reg32(addr); -+ mem.b.hnpreq = in; -+ dev_dbg(_dev, "Storing Address=0x%08x Data=0x%08x\n", (uint32_t)addr, mem.d32); -+ dwc_write_reg32(addr, mem.d32); -+ return count; -+} -+DEVICE_ATTR(hnp, 0644, hnp_show, hnp_store); -+ -+/** -+ * @todo Add code to initiate the SRP. -+ */ -+/** -+ * Show the SRP status bit -+ */ -+static ssize_t srp_show( struct device *_dev, -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) -+ struct device_attribute *attr, -+#endif -+ char *buf) -+{ -+#ifndef DWC_HOST_ONLY -+ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); -+ -+ gotgctl_data_t val; -+ val.d32 = dwc_read_reg32 (&(otg_dev->core_if->core_global_regs->gotgctl)); -+ return sprintf (buf, "SesReqScs = 0x%x\n", val.b.sesreqscs); -+#else -+ return sprintf(buf, "Host Only Mode!\n"); -+#endif -+} -+ -+ -+ -+/** -+ * Set the SRP Request bit -+ */ -+static ssize_t srp_store( struct device *_dev, -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) -+ struct device_attribute *attr, -+#endif -+ const char *buf, -+ size_t count ) -+{ -+#ifndef DWC_HOST_ONLY -+ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); -+ -+ dwc_otg_pcd_initiate_srp(otg_dev->pcd); -+#endif -+ return count; -+} -+DEVICE_ATTR(srp, 0644, srp_show, srp_store); -+ -+/** -+ * @todo Need to do more for power on/off? -+ */ -+/** -+ * Show the Bus Power status -+ */ -+static ssize_t buspower_show( struct device *_dev, -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) -+ struct device_attribute *attr, -+#endif -+ char *buf) -+{ -+ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); -+ -+ hprt0_data_t val; -+ val.d32 = dwc_read_reg32 (otg_dev->core_if->host_if->hprt0); -+ return sprintf (buf, "Bus Power = 0x%x\n", val.b.prtpwr); -+} -+ -+ -+/** -+ * Set the Bus Power status -+ */ -+static ssize_t buspower_store( struct device *_dev, -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) -+ struct device_attribute *attr, -+#endif -+ const char *buf, -+ size_t count ) -+{ -+ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); -+ -+ uint32_t on = simple_strtoul(buf, NULL, 16); -+ uint32_t *addr = (uint32_t *)otg_dev->core_if->host_if->hprt0; -+ hprt0_data_t mem; -+ -+ mem.d32 = dwc_read_reg32(addr); -+ mem.b.prtpwr = on; -+ -+ //dev_dbg(_dev, "Storing Address=0x%08x Data=0x%08x\n", (uint32_t)addr, mem.d32); -+ dwc_write_reg32(addr, mem.d32); -+ -+ return count; -+} -+DEVICE_ATTR(buspower, 0644, buspower_show, buspower_store); -+ -+/** -+ * @todo Need to do more for suspend? -+ */ -+/** -+ * Show the Bus Suspend status -+ */ -+static ssize_t bussuspend_show( struct device *_dev, -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) -+ struct device_attribute *attr, -+#endif -+ char *buf) -+{ -+ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); -+ -+ hprt0_data_t val; -+ val.d32 = dwc_read_reg32 (otg_dev->core_if->host_if->hprt0); -+ return sprintf (buf, "Bus Suspend = 0x%x\n", val.b.prtsusp); -+} -+ -+/** -+ * Set the Bus Suspend status -+ */ -+static ssize_t bussuspend_store( struct device *_dev, -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) -+ struct device_attribute *attr, -+#endif -+ const char *buf, -+ size_t count ) -+{ -+ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); -+ -+ uint32_t in = simple_strtoul(buf, NULL, 16); -+ uint32_t *addr = (uint32_t *)otg_dev->core_if->host_if->hprt0; -+ hprt0_data_t mem; -+ mem.d32 = dwc_read_reg32(addr); -+ mem.b.prtsusp = in; -+ dev_dbg(_dev, "Storing Address=0x%08x Data=0x%08x\n", (uint32_t)addr, mem.d32); -+ dwc_write_reg32(addr, mem.d32); -+ return count; -+} -+DEVICE_ATTR(bussuspend, 0644, bussuspend_show, bussuspend_store); -+ -+/** -+ * Show the status of Remote Wakeup. -+ */ -+static ssize_t remote_wakeup_show( struct device *_dev, -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) -+ struct device_attribute *attr, -+#endif -+ char *buf) -+{ -+#ifndef DWC_HOST_ONLY -+ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); -+ -+ dctl_data_t val; -+ val.d32 = -+ dwc_read_reg32( &otg_dev->core_if->dev_if->dev_global_regs->dctl); -+ return sprintf( buf, "Remote Wakeup = %d Enabled = %d\n", -+ val.b.rmtwkupsig, otg_dev->pcd->remote_wakeup_enable); -+#else -+ return sprintf(buf, "Host Only Mode!\n"); -+#endif -+} -+/** -+ * Initiate a remote wakeup of the host. The Device control register -+ * Remote Wakeup Signal bit is written if the PCD Remote wakeup enable -+ * flag is set. -+ * -+ */ -+static ssize_t remote_wakeup_store( struct device *_dev, -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) -+ struct device_attribute *attr, -+#endif -+ const char *buf, -+ size_t count ) -+{ -+#ifndef DWC_HOST_ONLY -+ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); -+ -+ uint32_t val = simple_strtoul(buf, NULL, 16); -+ if (val&1) { -+ dwc_otg_pcd_remote_wakeup(otg_dev->pcd, 1); -+ } -+ else { -+ dwc_otg_pcd_remote_wakeup(otg_dev->pcd, 0); -+ } -+#endif -+ return count; -+} -+DEVICE_ATTR(remote_wakeup, S_IRUGO|S_IWUSR, remote_wakeup_show, -+ remote_wakeup_store); -+ -+/** -+ * Dump global registers and either host or device registers (depending on the -+ * current mode of the core). -+ */ -+static ssize_t regdump_show( struct device *_dev, -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) -+ struct device_attribute *attr, -+#endif -+ char *buf) -+{ -+ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); -+ -+ dwc_otg_dump_global_registers( otg_dev->core_if); -+ if (dwc_otg_is_host_mode(otg_dev->core_if)) { -+ dwc_otg_dump_host_registers( otg_dev->core_if); -+ } else { -+ dwc_otg_dump_dev_registers( otg_dev->core_if); -+ -+ } -+ return sprintf( buf, "Register Dump\n" ); -+} -+ -+DEVICE_ATTR(regdump, S_IRUGO|S_IWUSR, regdump_show, 0); -+ -+/** -+ * Dump global registers and either host or device registers (depending on the -+ * current mode of the core). -+ */ -+static ssize_t spramdump_show( struct device *_dev, -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) -+ struct device_attribute *attr, -+#endif -+ char *buf) -+{ -+ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); -+ -+ dwc_otg_dump_spram( otg_dev->core_if); -+ -+ return sprintf( buf, "SPRAM Dump\n" ); -+} -+ -+DEVICE_ATTR(spramdump, S_IRUGO|S_IWUSR, spramdump_show, 0); -+ -+/** -+ * Dump the current hcd state. -+ */ -+static ssize_t hcddump_show( struct device *_dev, -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) -+ struct device_attribute *attr, -+#endif -+ char *buf) -+{ -+#ifndef DWC_DEVICE_ONLY -+ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); -+ -+ dwc_otg_hcd_dump_state(otg_dev->hcd); -+#endif -+ return sprintf( buf, "HCD Dump\n" ); -+} -+ -+DEVICE_ATTR(hcddump, S_IRUGO|S_IWUSR, hcddump_show, 0); -+ -+/** -+ * Dump the average frame remaining at SOF. This can be used to -+ * determine average interrupt latency. Frame remaining is also shown for -+ * start transfer and two additional sample points. -+ */ -+static ssize_t hcd_frrem_show( struct device *_dev, -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) -+ struct device_attribute *attr, -+#endif -+ char *buf) -+{ -+#ifndef DWC_DEVICE_ONLY -+ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); -+ -+ dwc_otg_hcd_dump_frrem(otg_dev->hcd); -+#endif -+ return sprintf( buf, "HCD Dump Frame Remaining\n" ); -+} -+ -+DEVICE_ATTR(hcd_frrem, S_IRUGO|S_IWUSR, hcd_frrem_show, 0); -+ -+/** -+ * Displays the time required to read the GNPTXFSIZ register many times (the -+ * output shows the number of times the register is read). -+ */ -+#define RW_REG_COUNT 10000000 -+#define MSEC_PER_JIFFIE 1000/HZ -+static ssize_t rd_reg_test_show( struct device *_dev, -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) -+ struct device_attribute *attr, -+#endif -+ char *buf) -+{ -+ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); -+ -+ int i; -+ int time; -+ int start_jiffies; -+ -+ printk("HZ %d, MSEC_PER_JIFFIE %d, loops_per_jiffy %lu\n", -+ HZ, MSEC_PER_JIFFIE, loops_per_jiffy); -+ start_jiffies = jiffies; -+ for (i = 0; i < RW_REG_COUNT; i++) { -+ dwc_read_reg32(&otg_dev->core_if->core_global_regs->gnptxfsiz); -+ } -+ time = jiffies - start_jiffies; -+ return sprintf( buf, "Time to read GNPTXFSIZ reg %d times: %d msecs (%d jiffies)\n", -+ RW_REG_COUNT, time * MSEC_PER_JIFFIE, time ); -+} -+ -+DEVICE_ATTR(rd_reg_test, S_IRUGO|S_IWUSR, rd_reg_test_show, 0); -+ -+/** -+ * Displays the time required to write the GNPTXFSIZ register many times (the -+ * output shows the number of times the register is written). -+ */ -+static ssize_t wr_reg_test_show( struct device *_dev, -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) -+ struct device_attribute *attr, -+#endif -+ char *buf) -+{ -+ dwc_otg_device_t *otg_dev = dev_get_drvdata(_dev); -+ -+ uint32_t reg_val; -+ int i; -+ int time; -+ int start_jiffies; -+ -+ printk("HZ %d, MSEC_PER_JIFFIE %d, loops_per_jiffy %lu\n", -+ HZ, MSEC_PER_JIFFIE, loops_per_jiffy); -+ reg_val = dwc_read_reg32(&otg_dev->core_if->core_global_regs->gnptxfsiz); -+ start_jiffies = jiffies; -+ for (i = 0; i < RW_REG_COUNT; i++) { -+ dwc_write_reg32(&otg_dev->core_if->core_global_regs->gnptxfsiz, reg_val); -+ } -+ time = jiffies - start_jiffies; -+ return sprintf( buf, "Time to write GNPTXFSIZ reg %d times: %d msecs (%d jiffies)\n", -+ RW_REG_COUNT, time * MSEC_PER_JIFFIE, time); -+} -+ -+DEVICE_ATTR(wr_reg_test, S_IRUGO|S_IWUSR, wr_reg_test_show, 0); -+/**@}*/ -+ -+/** -+ * Create the device files -+ */ -+void dwc_otg_attr_create (struct device *dev) -+{ -+ int error; -+ -+ error = device_create_file(dev, &dev_attr_regoffset); -+ error = device_create_file(dev, &dev_attr_regvalue); -+ error = device_create_file(dev, &dev_attr_mode); -+ error = device_create_file(dev, &dev_attr_hnpcapable); -+ error = device_create_file(dev, &dev_attr_srpcapable); -+ error = device_create_file(dev, &dev_attr_hnp); -+ error = device_create_file(dev, &dev_attr_srp); -+ error = device_create_file(dev, &dev_attr_buspower); -+ error = device_create_file(dev, &dev_attr_bussuspend); -+ error = device_create_file(dev, &dev_attr_busconnected); -+ error = device_create_file(dev, &dev_attr_gotgctl); -+ error = device_create_file(dev, &dev_attr_gusbcfg); -+ error = device_create_file(dev, &dev_attr_grxfsiz); -+ error = device_create_file(dev, &dev_attr_gnptxfsiz); -+ error = device_create_file(dev, &dev_attr_gpvndctl); -+ error = device_create_file(dev, &dev_attr_ggpio); -+ error = device_create_file(dev, &dev_attr_guid); -+ error = device_create_file(dev, &dev_attr_gsnpsid); -+ error = device_create_file(dev, &dev_attr_devspeed); -+ error = device_create_file(dev, &dev_attr_enumspeed); -+ error = device_create_file(dev, &dev_attr_hptxfsiz); -+ error = device_create_file(dev, &dev_attr_hprt0); -+ error = device_create_file(dev, &dev_attr_remote_wakeup); -+ error = device_create_file(dev, &dev_attr_regdump); -+ error = device_create_file(dev, &dev_attr_spramdump); -+ error = device_create_file(dev, &dev_attr_hcddump); -+ error = device_create_file(dev, &dev_attr_hcd_frrem); -+ error = device_create_file(dev, &dev_attr_rd_reg_test); -+ error = device_create_file(dev, &dev_attr_wr_reg_test); -+} -+ -+/** -+ * Remove the device files -+ */ -+void dwc_otg_attr_remove (struct device *dev) -+{ -+ device_remove_file(dev, &dev_attr_regoffset); -+ device_remove_file(dev, &dev_attr_regvalue); -+ device_remove_file(dev, &dev_attr_mode); -+ device_remove_file(dev, &dev_attr_hnpcapable); -+ device_remove_file(dev, &dev_attr_srpcapable); -+ device_remove_file(dev, &dev_attr_hnp); -+ device_remove_file(dev, &dev_attr_srp); -+ device_remove_file(dev, &dev_attr_buspower); -+ device_remove_file(dev, &dev_attr_bussuspend); -+ device_remove_file(dev, &dev_attr_busconnected); -+ device_remove_file(dev, &dev_attr_gotgctl); -+ device_remove_file(dev, &dev_attr_gusbcfg); -+ device_remove_file(dev, &dev_attr_grxfsiz); -+ device_remove_file(dev, &dev_attr_gnptxfsiz); -+ device_remove_file(dev, &dev_attr_gpvndctl); -+ device_remove_file(dev, &dev_attr_ggpio); -+ device_remove_file(dev, &dev_attr_guid); -+ device_remove_file(dev, &dev_attr_gsnpsid); -+ device_remove_file(dev, &dev_attr_devspeed); -+ device_remove_file(dev, &dev_attr_enumspeed); -+ device_remove_file(dev, &dev_attr_hptxfsiz); -+ device_remove_file(dev, &dev_attr_hprt0); -+ device_remove_file(dev, &dev_attr_remote_wakeup); -+ device_remove_file(dev, &dev_attr_regdump); -+ device_remove_file(dev, &dev_attr_spramdump); -+ device_remove_file(dev, &dev_attr_hcddump); -+ device_remove_file(dev, &dev_attr_hcd_frrem); -+ device_remove_file(dev, &dev_attr_rd_reg_test); -+ device_remove_file(dev, &dev_attr_wr_reg_test); -+} ---- /dev/null -+++ b/drivers/usb/dwc_otg/dwc_otg_attr.h -@@ -0,0 +1,67 @@ -+/* ========================================================================== -+ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_attr.h $ -+ * $Revision: 1.2 $ -+ * $Date: 2008-11-21 05:39:15 $ -+ * $Change: 477051 $ -+ * -+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, -+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless -+ * otherwise expressly agreed to in writing between Synopsys and you. -+ * -+ * The Software IS NOT an item of Licensed Software or Licensed Product under -+ * any End User Software License Agreement or Agreement for Licensed Product -+ * with Synopsys or any supplement thereto. You are permitted to use and -+ * redistribute this Software in source and binary forms, with or without -+ * modification, provided that redistributions of source code must retain this -+ * notice. You may not view, use, disclose, copy or distribute this file or -+ * any information contained herein except pursuant to this license grant from -+ * Synopsys. If you do not agree with this notice, including the disclaimer -+ * below, then you are not authorized to use the Software. -+ * -+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, -+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY -+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH -+ * DAMAGE. -+ * ========================================================================== */ -+ -+#if !defined(__DWC_OTG_ATTR_H__) -+#define __DWC_OTG_ATTR_H__ -+ -+/** @file -+ * This file contains the interface to the Linux device attributes. -+ */ -+extern struct device_attribute dev_attr_regoffset; -+extern struct device_attribute dev_attr_regvalue; -+ -+extern struct device_attribute dev_attr_mode; -+extern struct device_attribute dev_attr_hnpcapable; -+extern struct device_attribute dev_attr_srpcapable; -+extern struct device_attribute dev_attr_hnp; -+extern struct device_attribute dev_attr_srp; -+extern struct device_attribute dev_attr_buspower; -+extern struct device_attribute dev_attr_bussuspend; -+extern struct device_attribute dev_attr_busconnected; -+extern struct device_attribute dev_attr_gotgctl; -+extern struct device_attribute dev_attr_gusbcfg; -+extern struct device_attribute dev_attr_grxfsiz; -+extern struct device_attribute dev_attr_gnptxfsiz; -+extern struct device_attribute dev_attr_gpvndctl; -+extern struct device_attribute dev_attr_ggpio; -+extern struct device_attribute dev_attr_guid; -+extern struct device_attribute dev_attr_gsnpsid; -+extern struct device_attribute dev_attr_devspeed; -+extern struct device_attribute dev_attr_enumspeed; -+extern struct device_attribute dev_attr_hptxfsiz; -+extern struct device_attribute dev_attr_hprt0; -+ -+void dwc_otg_attr_create (struct device *dev); -+void dwc_otg_attr_remove (struct device *dev); -+ -+#endif ---- /dev/null -+++ b/drivers/usb/dwc_otg/dwc_otg_cil.c -@@ -0,0 +1,3692 @@ -+/* ========================================================================== -+ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_cil.c $ -+ * $Revision: 1.7 $ -+ * $Date: 2008-12-22 11:43:05 $ -+ * $Change: 1117667 $ -+ * -+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, -+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless -+ * otherwise expressly agreed to in writing between Synopsys and you. -+ * -+ * The Software IS NOT an item of Licensed Software or Licensed Product under -+ * any End User Software License Agreement or Agreement for Licensed Product -+ * with Synopsys or any supplement thereto. You are permitted to use and -+ * redistribute this Software in source and binary forms, with or without -+ * modification, provided that redistributions of source code must retain this -+ * notice. You may not view, use, disclose, copy or distribute this file or -+ * any information contained herein except pursuant to this license grant from -+ * Synopsys. If you do not agree with this notice, including the disclaimer -+ * below, then you are not authorized to use the Software. -+ * -+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, -+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY -+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH -+ * DAMAGE. -+ * ========================================================================== */ -+ -+/** @file -+ * -+ * The Core Interface Layer provides basic services for accessing and -+ * managing the DWC_otg hardware. These services are used by both the -+ * Host Controller Driver and the Peripheral Controller Driver. -+ * -+ * The CIL manages the memory map for the core so that the HCD and PCD -+ * don't have to do this separately. It also handles basic tasks like -+ * reading/writing the registers and data FIFOs in the controller. -+ * Some of the data access functions provide encapsulation of several -+ * operations required to perform a task, such as writing multiple -+ * registers to start a transfer. Finally, the CIL performs basic -+ * services that are not specific to either the host or device modes -+ * of operation. These services include management of the OTG Host -+ * Negotiation Protocol (HNP) and Session Request Protocol (SRP). A -+ * Diagnostic API is also provided to allow testing of the controller -+ * hardware. -+ * -+ * The Core Interface Layer has the following requirements: -+ * - Provides basic controller operations. -+ * - Minimal use of OS services. -+ * - The OS services used will be abstracted by using inline functions -+ * or macros. -+ * -+ */ -+#include -+#include -+#ifdef DEBUG -+#include -+#endif -+ -+#include "linux/dwc_otg_plat.h" -+#include "dwc_otg_regs.h" -+#include "dwc_otg_cil.h" -+ -+/* Included only to access hc->qh for non-dword buffer handling -+ * TODO: account it -+ */ -+#include "dwc_otg_hcd.h" -+ -+/** -+ * This function is called to initialize the DWC_otg CSR data -+ * structures. The register addresses in the device and host -+ * structures are initialized from the base address supplied by the -+ * caller. The calling function must make the OS calls to get the -+ * base address of the DWC_otg controller registers. The core_params -+ * argument holds the parameters that specify how the core should be -+ * configured. -+ * -+ * @param[in] reg_base_addr Base address of DWC_otg core registers -+ * @param[in] core_params Pointer to the core configuration parameters -+ * -+ */ -+dwc_otg_core_if_t *dwc_otg_cil_init(const uint32_t *reg_base_addr, -+ dwc_otg_core_params_t *core_params) -+{ -+ dwc_otg_core_if_t *core_if = 0; -+ dwc_otg_dev_if_t *dev_if = 0; -+ dwc_otg_host_if_t *host_if = 0; -+ uint8_t *reg_base = (uint8_t *)reg_base_addr; -+ int i = 0; -+ -+ DWC_DEBUGPL(DBG_CILV, "%s(%p,%p)\n", __func__, reg_base_addr, core_params); -+ -+ core_if = kmalloc(sizeof(dwc_otg_core_if_t), GFP_KERNEL); -+ -+ if (core_if == 0) { -+ DWC_DEBUGPL(DBG_CIL, "Allocation of dwc_otg_core_if_t failed\n"); -+ return 0; -+ } -+ -+ memset(core_if, 0, sizeof(dwc_otg_core_if_t)); -+ -+ core_if->core_params = core_params; -+ core_if->core_global_regs = (dwc_otg_core_global_regs_t *)reg_base; -+ -+ /* -+ * Allocate the Device Mode structures. -+ */ -+ dev_if = kmalloc(sizeof(dwc_otg_dev_if_t), GFP_KERNEL); -+ -+ if (dev_if == 0) { -+ DWC_DEBUGPL(DBG_CIL, "Allocation of dwc_otg_dev_if_t failed\n"); -+ kfree(core_if); -+ return 0; -+ } -+ -+ dev_if->dev_global_regs = -+ (dwc_otg_device_global_regs_t *)(reg_base + DWC_DEV_GLOBAL_REG_OFFSET); -+ -+ for (i=0; iin_ep_regs[i] = (dwc_otg_dev_in_ep_regs_t *) -+ (reg_base + DWC_DEV_IN_EP_REG_OFFSET + -+ (i * DWC_EP_REG_OFFSET)); -+ -+ dev_if->out_ep_regs[i] = (dwc_otg_dev_out_ep_regs_t *) -+ (reg_base + DWC_DEV_OUT_EP_REG_OFFSET + -+ (i * DWC_EP_REG_OFFSET)); -+ DWC_DEBUGPL(DBG_CILV, "in_ep_regs[%d]->diepctl=%p\n", -+ i, &dev_if->in_ep_regs[i]->diepctl); -+ DWC_DEBUGPL(DBG_CILV, "out_ep_regs[%d]->doepctl=%p\n", -+ i, &dev_if->out_ep_regs[i]->doepctl); -+ } -+ -+ dev_if->speed = 0; // unknown -+ -+ core_if->dev_if = dev_if; -+ -+ /* -+ * Allocate the Host Mode structures. -+ */ -+ host_if = kmalloc(sizeof(dwc_otg_host_if_t), GFP_KERNEL); -+ -+ if (host_if == 0) { -+ DWC_DEBUGPL(DBG_CIL, "Allocation of dwc_otg_host_if_t failed\n"); -+ kfree(dev_if); -+ kfree(core_if); -+ return 0; -+ } -+ -+ host_if->host_global_regs = (dwc_otg_host_global_regs_t *) -+ (reg_base + DWC_OTG_HOST_GLOBAL_REG_OFFSET); -+ -+ host_if->hprt0 = (uint32_t*)(reg_base + DWC_OTG_HOST_PORT_REGS_OFFSET); -+ -+ for (i=0; ihc_regs[i] = (dwc_otg_hc_regs_t *) -+ (reg_base + DWC_OTG_HOST_CHAN_REGS_OFFSET + -+ (i * DWC_OTG_CHAN_REGS_OFFSET)); -+ DWC_DEBUGPL(DBG_CILV, "hc_reg[%d]->hcchar=%p\n", -+ i, &host_if->hc_regs[i]->hcchar); -+ } -+ -+ host_if->num_host_channels = MAX_EPS_CHANNELS; -+ core_if->host_if = host_if; -+ -+ for (i=0; idata_fifo[i] = -+ (uint32_t *)(reg_base + DWC_OTG_DATA_FIFO_OFFSET + -+ (i * DWC_OTG_DATA_FIFO_SIZE)); -+ DWC_DEBUGPL(DBG_CILV, "data_fifo[%d]=0x%08x\n", -+ i, (unsigned)core_if->data_fifo[i]); -+ } -+ -+ core_if->pcgcctl = (uint32_t*)(reg_base + DWC_OTG_PCGCCTL_OFFSET); -+ -+ /* -+ * Store the contents of the hardware configuration registers here for -+ * easy access later. -+ */ -+ core_if->hwcfg1.d32 = dwc_read_reg32(&core_if->core_global_regs->ghwcfg1); -+ core_if->hwcfg2.d32 = dwc_read_reg32(&core_if->core_global_regs->ghwcfg2); -+ core_if->hwcfg3.d32 = dwc_read_reg32(&core_if->core_global_regs->ghwcfg3); -+ core_if->hwcfg4.d32 = dwc_read_reg32(&core_if->core_global_regs->ghwcfg4); -+ -+ DWC_DEBUGPL(DBG_CILV,"hwcfg1=%08x\n",core_if->hwcfg1.d32); -+ DWC_DEBUGPL(DBG_CILV,"hwcfg2=%08x\n",core_if->hwcfg2.d32); -+ DWC_DEBUGPL(DBG_CILV,"hwcfg3=%08x\n",core_if->hwcfg3.d32); -+ DWC_DEBUGPL(DBG_CILV,"hwcfg4=%08x\n",core_if->hwcfg4.d32); -+ -+ core_if->hcfg.d32 = dwc_read_reg32(&core_if->host_if->host_global_regs->hcfg); -+ core_if->dcfg.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dcfg); -+ -+ DWC_DEBUGPL(DBG_CILV,"hcfg=%08x\n",core_if->hcfg.d32); -+ DWC_DEBUGPL(DBG_CILV,"dcfg=%08x\n",core_if->dcfg.d32); -+ -+ DWC_DEBUGPL(DBG_CILV,"op_mode=%0x\n",core_if->hwcfg2.b.op_mode); -+ DWC_DEBUGPL(DBG_CILV,"arch=%0x\n",core_if->hwcfg2.b.architecture); -+ DWC_DEBUGPL(DBG_CILV,"num_dev_ep=%d\n",core_if->hwcfg2.b.num_dev_ep); -+ DWC_DEBUGPL(DBG_CILV,"num_host_chan=%d\n",core_if->hwcfg2.b.num_host_chan); -+ DWC_DEBUGPL(DBG_CILV,"nonperio_tx_q_depth=0x%0x\n",core_if->hwcfg2.b.nonperio_tx_q_depth); -+ DWC_DEBUGPL(DBG_CILV,"host_perio_tx_q_depth=0x%0x\n",core_if->hwcfg2.b.host_perio_tx_q_depth); -+ DWC_DEBUGPL(DBG_CILV,"dev_token_q_depth=0x%0x\n",core_if->hwcfg2.b.dev_token_q_depth); -+ -+ DWC_DEBUGPL(DBG_CILV,"Total FIFO SZ=%d\n", core_if->hwcfg3.b.dfifo_depth); -+ DWC_DEBUGPL(DBG_CILV,"xfer_size_cntr_width=%0x\n", core_if->hwcfg3.b.xfer_size_cntr_width); -+ -+ /* -+ * Set the SRP sucess bit for FS-I2c -+ */ -+ core_if->srp_success = 0; -+ core_if->srp_timer_started = 0; -+ -+ -+ /* -+ * Create new workqueue and init works -+ */ -+ core_if->wq_otg = create_singlethread_workqueue("dwc_otg"); -+ if(core_if->wq_otg == 0) { -+ DWC_DEBUGPL(DBG_CIL, "Creation of wq_otg failed\n"); -+ kfree(host_if); -+ kfree(dev_if); -+ kfree(core_if); -+ return 0 * HZ; -+ } -+ -+ -+ -+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) -+ -+ INIT_WORK(&core_if->w_conn_id, w_conn_id_status_change, core_if); -+ INIT_WORK(&core_if->w_wkp, w_wakeup_detected, core_if); -+ -+#else -+ -+ INIT_WORK(&core_if->w_conn_id, w_conn_id_status_change); -+ INIT_DELAYED_WORK(&core_if->w_wkp, w_wakeup_detected); -+ -+#endif -+ return core_if; -+} -+ -+/** -+ * This function frees the structures allocated by dwc_otg_cil_init(). -+ * -+ * @param[in] core_if The core interface pointer returned from -+ * dwc_otg_cil_init(). -+ * -+ */ -+void dwc_otg_cil_remove(dwc_otg_core_if_t *core_if) -+{ -+ /* Disable all interrupts */ -+ dwc_modify_reg32(&core_if->core_global_regs->gahbcfg, 1, 0); -+ dwc_write_reg32(&core_if->core_global_regs->gintmsk, 0); -+ -+ if (core_if->wq_otg) { -+ destroy_workqueue(core_if->wq_otg); -+ } -+ if (core_if->dev_if) { -+ kfree(core_if->dev_if); -+ } -+ if (core_if->host_if) { -+ kfree(core_if->host_if); -+ } -+ kfree(core_if); -+} -+ -+/** -+ * This function enables the controller's Global Interrupt in the AHB Config -+ * register. -+ * -+ * @param[in] core_if Programming view of DWC_otg controller. -+ */ -+void dwc_otg_enable_global_interrupts(dwc_otg_core_if_t *core_if) -+{ -+ gahbcfg_data_t ahbcfg = { .d32 = 0}; -+ ahbcfg.b.glblintrmsk = 1; /* Enable interrupts */ -+ dwc_modify_reg32(&core_if->core_global_regs->gahbcfg, 0, ahbcfg.d32); -+} -+ -+/** -+ * This function disables the controller's Global Interrupt in the AHB Config -+ * register. -+ * -+ * @param[in] core_if Programming view of DWC_otg controller. -+ */ -+void dwc_otg_disable_global_interrupts(dwc_otg_core_if_t *core_if) -+{ -+ gahbcfg_data_t ahbcfg = { .d32 = 0}; -+ ahbcfg.b.glblintrmsk = 1; /* Enable interrupts */ -+ dwc_modify_reg32(&core_if->core_global_regs->gahbcfg, ahbcfg.d32, 0); -+} -+ -+/** -+ * This function initializes the commmon interrupts, used in both -+ * device and host modes. -+ * -+ * @param[in] core_if Programming view of the DWC_otg controller -+ * -+ */ -+static void dwc_otg_enable_common_interrupts(dwc_otg_core_if_t *core_if) -+{ -+ dwc_otg_core_global_regs_t *global_regs = -+ core_if->core_global_regs; -+ gintmsk_data_t intr_mask = { .d32 = 0}; -+ -+ /* Clear any pending OTG Interrupts */ -+ dwc_write_reg32(&global_regs->gotgint, 0xFFFFFFFF); -+ -+ /* Clear any pending interrupts */ -+ dwc_write_reg32(&global_regs->gintsts, 0xFFFFFFFF); -+ -+ /* -+ * Enable the interrupts in the GINTMSK. -+ */ -+ intr_mask.b.modemismatch = 1; -+ intr_mask.b.otgintr = 1; -+ -+ if (!core_if->dma_enable) { -+ intr_mask.b.rxstsqlvl = 1; -+ } -+ -+ intr_mask.b.conidstschng = 1; -+ intr_mask.b.wkupintr = 1; -+ intr_mask.b.disconnect = 1; -+ intr_mask.b.usbsuspend = 1; -+ intr_mask.b.sessreqintr = 1; -+ dwc_write_reg32(&global_regs->gintmsk, intr_mask.d32); -+} -+ -+/** -+ * Initializes the FSLSPClkSel field of the HCFG register depending on the PHY -+ * type. -+ */ -+static void init_fslspclksel(dwc_otg_core_if_t *core_if) -+{ -+ uint32_t val; -+ hcfg_data_t hcfg; -+ -+ if (((core_if->hwcfg2.b.hs_phy_type == 2) && -+ (core_if->hwcfg2.b.fs_phy_type == 1) && -+ (core_if->core_params->ulpi_fs_ls)) || -+ (core_if->core_params->phy_type == DWC_PHY_TYPE_PARAM_FS)) { -+ /* Full speed PHY */ -+ val = DWC_HCFG_48_MHZ; -+ } -+ else { -+ /* High speed PHY running at full speed or high speed */ -+ val = DWC_HCFG_30_60_MHZ; -+ } -+ -+ DWC_DEBUGPL(DBG_CIL, "Initializing HCFG.FSLSPClkSel to 0x%1x\n", val); -+ hcfg.d32 = dwc_read_reg32(&core_if->host_if->host_global_regs->hcfg); -+ hcfg.b.fslspclksel = val; -+ dwc_write_reg32(&core_if->host_if->host_global_regs->hcfg, hcfg.d32); -+} -+ -+/** -+ * Initializes the DevSpd field of the DCFG register depending on the PHY type -+ * and the enumeration speed of the device. -+ */ -+static void init_devspd(dwc_otg_core_if_t *core_if) -+{ -+ uint32_t val; -+ dcfg_data_t dcfg; -+ -+ if (((core_if->hwcfg2.b.hs_phy_type == 2) && -+ (core_if->hwcfg2.b.fs_phy_type == 1) && -+ (core_if->core_params->ulpi_fs_ls)) || -+ (core_if->core_params->phy_type == DWC_PHY_TYPE_PARAM_FS)) { -+ /* Full speed PHY */ -+ val = 0x3; -+ } -+ else if (core_if->core_params->speed == DWC_SPEED_PARAM_FULL) { -+ /* High speed PHY running at full speed */ -+ val = 0x1; -+ } -+ else { -+ /* High speed PHY running at high speed */ -+ val = 0x0; -+ } -+ -+ DWC_DEBUGPL(DBG_CIL, "Initializing DCFG.DevSpd to 0x%1x\n", val); -+ -+ dcfg.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dcfg); -+ dcfg.b.devspd = val; -+ dwc_write_reg32(&core_if->dev_if->dev_global_regs->dcfg, dcfg.d32); -+} -+ -+/** -+ * This function calculates the number of IN EPS -+ * using GHWCFG1 and GHWCFG2 registers values -+ * -+ * @param core_if Programming view of the DWC_otg controller -+ */ -+static uint32_t calc_num_in_eps(dwc_otg_core_if_t *core_if) -+{ -+ uint32_t num_in_eps = 0; -+ uint32_t num_eps = core_if->hwcfg2.b.num_dev_ep; -+ uint32_t hwcfg1 = core_if->hwcfg1.d32 >> 3; -+ uint32_t num_tx_fifos = core_if->hwcfg4.b.num_in_eps; -+ int i; -+ -+ -+ for(i = 0; i < num_eps; ++i) -+ { -+ if(!(hwcfg1 & 0x1)) -+ num_in_eps++; -+ -+ hwcfg1 >>= 2; -+ } -+ -+ if(core_if->hwcfg4.b.ded_fifo_en) { -+ num_in_eps = (num_in_eps > num_tx_fifos) ? num_tx_fifos : num_in_eps; -+ } -+ -+ return num_in_eps; -+} -+ -+ -+/** -+ * This function calculates the number of OUT EPS -+ * using GHWCFG1 and GHWCFG2 registers values -+ * -+ * @param core_if Programming view of the DWC_otg controller -+ */ -+static uint32_t calc_num_out_eps(dwc_otg_core_if_t *core_if) -+{ -+ uint32_t num_out_eps = 0; -+ uint32_t num_eps = core_if->hwcfg2.b.num_dev_ep; -+ uint32_t hwcfg1 = core_if->hwcfg1.d32 >> 2; -+ int i; -+ -+ for(i = 0; i < num_eps; ++i) -+ { -+ if(!(hwcfg1 & 0x2)) -+ num_out_eps++; -+ -+ hwcfg1 >>= 2; -+ } -+ return num_out_eps; -+} -+/** -+ * This function initializes the DWC_otg controller registers and -+ * prepares the core for device mode or host mode operation. -+ * -+ * @param core_if Programming view of the DWC_otg controller -+ * -+ */ -+void dwc_otg_core_init(dwc_otg_core_if_t *core_if) -+{ -+ int i = 0; -+ dwc_otg_core_global_regs_t *global_regs = -+ core_if->core_global_regs; -+ dwc_otg_dev_if_t *dev_if = core_if->dev_if; -+ gahbcfg_data_t ahbcfg = { .d32 = 0 }; -+ gusbcfg_data_t usbcfg = { .d32 = 0 }; -+ gi2cctl_data_t i2cctl = { .d32 = 0 }; -+ -+ DWC_DEBUGPL(DBG_CILV, "dwc_otg_core_init(%p)\n", core_if); -+ -+ /* Common Initialization */ -+ -+ usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg); -+ -+// usbcfg.b.tx_end_delay = 1; -+ /* Program the ULPI External VBUS bit if needed */ -+ usbcfg.b.ulpi_ext_vbus_drv = -+ (core_if->core_params->phy_ulpi_ext_vbus == DWC_PHY_ULPI_EXTERNAL_VBUS) ? 1 : 0; -+ -+ /* Set external TS Dline pulsing */ -+ usbcfg.b.term_sel_dl_pulse = (core_if->core_params->ts_dline == 1) ? 1 : 0; -+ dwc_write_reg32 (&global_regs->gusbcfg, usbcfg.d32); -+ -+ -+ /* Reset the Controller */ -+ dwc_otg_core_reset(core_if); -+ -+ /* Initialize parameters from Hardware configuration registers. */ -+ dev_if->num_in_eps = calc_num_in_eps(core_if); -+ dev_if->num_out_eps = calc_num_out_eps(core_if); -+ -+ -+ DWC_DEBUGPL(DBG_CIL, "num_dev_perio_in_ep=%d\n", core_if->hwcfg4.b.num_dev_perio_in_ep); -+ -+ for (i=0; i < core_if->hwcfg4.b.num_dev_perio_in_ep; i++) -+ { -+ dev_if->perio_tx_fifo_size[i] = -+ dwc_read_reg32(&global_regs->dptxfsiz_dieptxf[i]) >> 16; -+ DWC_DEBUGPL(DBG_CIL, "Periodic Tx FIFO SZ #%d=0x%0x\n", -+ i, dev_if->perio_tx_fifo_size[i]); -+ } -+ -+ for (i=0; i < core_if->hwcfg4.b.num_in_eps; i++) -+ { -+ dev_if->tx_fifo_size[i] = -+ dwc_read_reg32(&global_regs->dptxfsiz_dieptxf[i]) >> 16; -+ DWC_DEBUGPL(DBG_CIL, "Tx FIFO SZ #%d=0x%0x\n", -+ i, dev_if->perio_tx_fifo_size[i]); -+ } -+ -+ core_if->total_fifo_size = core_if->hwcfg3.b.dfifo_depth; -+ core_if->rx_fifo_size = -+ dwc_read_reg32(&global_regs->grxfsiz); -+ core_if->nperio_tx_fifo_size = -+ dwc_read_reg32(&global_regs->gnptxfsiz) >> 16; -+ -+ DWC_DEBUGPL(DBG_CIL, "Total FIFO SZ=%d\n", core_if->total_fifo_size); -+ DWC_DEBUGPL(DBG_CIL, "Rx FIFO SZ=%d\n", core_if->rx_fifo_size); -+ DWC_DEBUGPL(DBG_CIL, "NP Tx FIFO SZ=%d\n", core_if->nperio_tx_fifo_size); -+ -+ /* This programming sequence needs to happen in FS mode before any other -+ * programming occurs */ -+ if ((core_if->core_params->speed == DWC_SPEED_PARAM_FULL) && -+ (core_if->core_params->phy_type == DWC_PHY_TYPE_PARAM_FS)) { -+ /* If FS mode with FS PHY */ -+ -+ /* core_init() is now called on every switch so only call the -+ * following for the first time through. */ -+ if (!core_if->phy_init_done) { -+ core_if->phy_init_done = 1; -+ DWC_DEBUGPL(DBG_CIL, "FS_PHY detected\n"); -+ usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg); -+ usbcfg.b.physel = 1; -+ dwc_write_reg32 (&global_regs->gusbcfg, usbcfg.d32); -+ -+ /* Reset after a PHY select */ -+ dwc_otg_core_reset(core_if); -+ } -+ -+ /* Program DCFG.DevSpd or HCFG.FSLSPclkSel to 48Mhz in FS. Also -+ * do this on HNP Dev/Host mode switches (done in dev_init and -+ * host_init). */ -+ if (dwc_otg_is_host_mode(core_if)) { -+ init_fslspclksel(core_if); -+ } -+ else { -+ init_devspd(core_if); -+ } -+ -+ if (core_if->core_params->i2c_enable) { -+ DWC_DEBUGPL(DBG_CIL, "FS_PHY Enabling I2c\n"); -+ /* Program GUSBCFG.OtgUtmifsSel to I2C */ -+ usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg); -+ usbcfg.b.otgutmifssel = 1; -+ dwc_write_reg32 (&global_regs->gusbcfg, usbcfg.d32); -+ -+ /* Program GI2CCTL.I2CEn */ -+ i2cctl.d32 = dwc_read_reg32(&global_regs->gi2cctl); -+ i2cctl.b.i2cdevaddr = 1; -+ i2cctl.b.i2cen = 0; -+ dwc_write_reg32 (&global_regs->gi2cctl, i2cctl.d32); -+ i2cctl.b.i2cen = 1; -+ dwc_write_reg32 (&global_regs->gi2cctl, i2cctl.d32); -+ } -+ -+ } /* endif speed == DWC_SPEED_PARAM_FULL */ -+ -+ else { -+ /* High speed PHY. */ -+ if (!core_if->phy_init_done) { -+ core_if->phy_init_done = 1; -+ /* HS PHY parameters. These parameters are preserved -+ * during soft reset so only program the first time. Do -+ * a soft reset immediately after setting phyif. */ -+ usbcfg.b.ulpi_utmi_sel = core_if->core_params->phy_type; -+ if (usbcfg.b.ulpi_utmi_sel == 1) { -+ /* ULPI interface */ -+ usbcfg.b.phyif = 0; -+ usbcfg.b.ddrsel = core_if->core_params->phy_ulpi_ddr; -+ } -+ else { -+ /* UTMI+ interface */ -+ if (core_if->core_params->phy_utmi_width == 16) { -+ usbcfg.b.phyif = 1; -+ } -+ else { -+ usbcfg.b.phyif = 0; -+ } -+ } -+ -+ dwc_write_reg32(&global_regs->gusbcfg, usbcfg.d32); -+ -+ /* Reset after setting the PHY parameters */ -+ dwc_otg_core_reset(core_if); -+ } -+ } -+ -+ if ((core_if->hwcfg2.b.hs_phy_type == 2) && -+ (core_if->hwcfg2.b.fs_phy_type == 1) && -+ (core_if->core_params->ulpi_fs_ls)) { -+ DWC_DEBUGPL(DBG_CIL, "Setting ULPI FSLS\n"); -+ usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg); -+ usbcfg.b.ulpi_fsls = 1; -+ usbcfg.b.ulpi_clk_sus_m = 1; -+ dwc_write_reg32(&global_regs->gusbcfg, usbcfg.d32); -+ } -+ else { -+ usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg); -+ usbcfg.b.ulpi_fsls = 0; -+ usbcfg.b.ulpi_clk_sus_m = 0; -+ dwc_write_reg32(&global_regs->gusbcfg, usbcfg.d32); -+ } -+ -+ /* Program the GAHBCFG Register.*/ -+ switch (core_if->hwcfg2.b.architecture) { -+ -+ case DWC_SLAVE_ONLY_ARCH: -+ DWC_DEBUGPL(DBG_CIL, "Slave Only Mode\n"); -+ ahbcfg.b.nptxfemplvl_txfemplvl = DWC_GAHBCFG_TXFEMPTYLVL_HALFEMPTY; -+ ahbcfg.b.ptxfemplvl = DWC_GAHBCFG_TXFEMPTYLVL_HALFEMPTY; -+ core_if->dma_enable = 0; -+ core_if->dma_desc_enable = 0; -+ break; -+ -+ case DWC_EXT_DMA_ARCH: -+ DWC_DEBUGPL(DBG_CIL, "External DMA Mode\n"); -+ ahbcfg.b.hburstlen = core_if->core_params->dma_burst_size; -+ core_if->dma_enable = (core_if->core_params->dma_enable != 0); -+ core_if->dma_desc_enable = (core_if->core_params->dma_desc_enable != 0); -+ break; -+ -+ case DWC_INT_DMA_ARCH: -+ DWC_DEBUGPL(DBG_CIL, "Internal DMA Mode\n"); -+ ahbcfg.b.hburstlen = DWC_GAHBCFG_INT_DMA_BURST_INCR; -+ core_if->dma_enable = (core_if->core_params->dma_enable != 0); -+ core_if->dma_desc_enable = (core_if->core_params->dma_desc_enable != 0); -+ break; -+ -+ } -+ ahbcfg.b.dmaenable = core_if->dma_enable; -+ dwc_write_reg32(&global_regs->gahbcfg, ahbcfg.d32); -+ -+ core_if->en_multiple_tx_fifo = core_if->hwcfg4.b.ded_fifo_en; -+ -+ core_if->pti_enh_enable = core_if->core_params->pti_enable != 0; -+ core_if->multiproc_int_enable = core_if->core_params->mpi_enable; -+ DWC_PRINT("Periodic Transfer Interrupt Enhancement - %s\n", ((core_if->pti_enh_enable) ? "enabled": "disabled")); -+ DWC_PRINT("Multiprocessor Interrupt Enhancement - %s\n", ((core_if->multiproc_int_enable) ? "enabled": "disabled")); -+ -+ /* -+ * Program the GUSBCFG register. -+ */ -+ usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg); -+ -+ switch (core_if->hwcfg2.b.op_mode) { -+ case DWC_MODE_HNP_SRP_CAPABLE: -+ usbcfg.b.hnpcap = (core_if->core_params->otg_cap == -+ DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE); -+ usbcfg.b.srpcap = (core_if->core_params->otg_cap != -+ DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE); -+ break; -+ -+ case DWC_MODE_SRP_ONLY_CAPABLE: -+ usbcfg.b.hnpcap = 0; -+ usbcfg.b.srpcap = (core_if->core_params->otg_cap != -+ DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE); -+ break; -+ -+ case DWC_MODE_NO_HNP_SRP_CAPABLE: -+ usbcfg.b.hnpcap = 0; -+ usbcfg.b.srpcap = 0; -+ break; -+ -+ case DWC_MODE_SRP_CAPABLE_DEVICE: -+ usbcfg.b.hnpcap = 0; -+ usbcfg.b.srpcap = (core_if->core_params->otg_cap != -+ DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE); -+ break; -+ -+ case DWC_MODE_NO_SRP_CAPABLE_DEVICE: -+ usbcfg.b.hnpcap = 0; -+ usbcfg.b.srpcap = 0; -+ break; -+ -+ case DWC_MODE_SRP_CAPABLE_HOST: -+ usbcfg.b.hnpcap = 0; -+ usbcfg.b.srpcap = (core_if->core_params->otg_cap != -+ DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE); -+ break; -+ -+ case DWC_MODE_NO_SRP_CAPABLE_HOST: -+ usbcfg.b.hnpcap = 0; -+ usbcfg.b.srpcap = 0; -+ break; -+ } -+ -+ dwc_write_reg32(&global_regs->gusbcfg, usbcfg.d32); -+ -+ /* Enable common interrupts */ -+ dwc_otg_enable_common_interrupts(core_if); -+ -+ /* Do device or host intialization based on mode during PCD -+ * and HCD initialization */ -+ if (dwc_otg_is_host_mode(core_if)) { -+ DWC_DEBUGPL(DBG_ANY, "Host Mode\n"); -+ core_if->op_state = A_HOST; -+ } -+ else { -+ DWC_DEBUGPL(DBG_ANY, "Device Mode\n"); -+ core_if->op_state = B_PERIPHERAL; -+#ifdef DWC_DEVICE_ONLY -+ dwc_otg_core_dev_init(core_if); -+#endif -+ } -+} -+ -+ -+/** -+ * This function enables the Device mode interrupts. -+ * -+ * @param core_if Programming view of DWC_otg controller -+ */ -+void dwc_otg_enable_device_interrupts(dwc_otg_core_if_t *core_if) -+{ -+ gintmsk_data_t intr_mask = { .d32 = 0}; -+ dwc_otg_core_global_regs_t *global_regs = -+ core_if->core_global_regs; -+ -+ DWC_DEBUGPL(DBG_CIL, "%s()\n", __func__); -+ -+ /* Disable all interrupts. */ -+ dwc_write_reg32(&global_regs->gintmsk, 0); -+ -+ /* Clear any pending interrupts */ -+ dwc_write_reg32(&global_regs->gintsts, 0xFFFFFFFF); -+ -+ /* Enable the common interrupts */ -+ dwc_otg_enable_common_interrupts(core_if); -+ -+ /* Enable interrupts */ -+ intr_mask.b.usbreset = 1; -+ intr_mask.b.enumdone = 1; -+ -+ if(!core_if->multiproc_int_enable) { -+ intr_mask.b.inepintr = 1; -+ intr_mask.b.outepintr = 1; -+ } -+ -+ intr_mask.b.erlysuspend = 1; -+ -+ if(core_if->en_multiple_tx_fifo == 0) { -+ intr_mask.b.epmismatch = 1; -+ } -+ -+ -+#ifdef DWC_EN_ISOC -+ if(core_if->dma_enable) { -+ if(core_if->dma_desc_enable == 0) { -+ if(core_if->pti_enh_enable) { -+ dctl_data_t dctl = { .d32 = 0 }; -+ dctl.b.ifrmnum = 1; -+ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->dctl, 0, dctl.d32); -+ } else { -+ intr_mask.b.incomplisoin = 1; -+ intr_mask.b.incomplisoout = 1; -+ } -+ } -+ } else { -+ intr_mask.b.incomplisoin = 1; -+ intr_mask.b.incomplisoout = 1; -+ } -+#endif // DWC_EN_ISOC -+ -+/** @todo NGS: Should this be a module parameter? */ -+#ifdef USE_PERIODIC_EP -+ intr_mask.b.isooutdrop = 1; -+ intr_mask.b.eopframe = 1; -+ intr_mask.b.incomplisoin = 1; -+ intr_mask.b.incomplisoout = 1; -+#endif -+ -+ dwc_modify_reg32(&global_regs->gintmsk, intr_mask.d32, intr_mask.d32); -+ -+ DWC_DEBUGPL(DBG_CIL, "%s() gintmsk=%0x\n", __func__, -+ dwc_read_reg32(&global_regs->gintmsk)); -+} -+ -+/** -+ * This function initializes the DWC_otg controller registers for -+ * device mode. -+ * -+ * @param core_if Programming view of DWC_otg controller -+ * -+ */ -+void dwc_otg_core_dev_init(dwc_otg_core_if_t *core_if) -+{ -+ int i; -+ dwc_otg_core_global_regs_t *global_regs = -+ core_if->core_global_regs; -+ dwc_otg_dev_if_t *dev_if = core_if->dev_if; -+ dwc_otg_core_params_t *params = core_if->core_params; -+ dcfg_data_t dcfg = { .d32 = 0}; -+ grstctl_t resetctl = { .d32 = 0 }; -+ uint32_t rx_fifo_size; -+ fifosize_data_t nptxfifosize; -+ fifosize_data_t txfifosize; -+ dthrctl_data_t dthrctl; -+ fifosize_data_t ptxfifosize; -+ -+ /* Restart the Phy Clock */ -+ dwc_write_reg32(core_if->pcgcctl, 0); -+ -+ /* Device configuration register */ -+ init_devspd(core_if); -+ dcfg.d32 = dwc_read_reg32(&dev_if->dev_global_regs->dcfg); -+ dcfg.b.descdma = (core_if->dma_desc_enable) ? 1 : 0; -+ dcfg.b.perfrint = DWC_DCFG_FRAME_INTERVAL_80; -+ -+ dwc_write_reg32(&dev_if->dev_global_regs->dcfg, dcfg.d32); -+ -+ /* Configure data FIFO sizes */ -+ if (core_if->hwcfg2.b.dynamic_fifo && params->enable_dynamic_fifo) { -+ DWC_DEBUGPL(DBG_CIL, "Total FIFO Size=%d\n", core_if->total_fifo_size); -+ DWC_DEBUGPL(DBG_CIL, "Rx FIFO Size=%d\n", params->dev_rx_fifo_size); -+ DWC_DEBUGPL(DBG_CIL, "NP Tx FIFO Size=%d\n", params->dev_nperio_tx_fifo_size); -+ -+ /* Rx FIFO */ -+ DWC_DEBUGPL(DBG_CIL, "initial grxfsiz=%08x\n", -+ dwc_read_reg32(&global_regs->grxfsiz)); -+ -+ rx_fifo_size = params->dev_rx_fifo_size; -+ dwc_write_reg32(&global_regs->grxfsiz, rx_fifo_size); -+ -+ DWC_DEBUGPL(DBG_CIL, "new grxfsiz=%08x\n", -+ dwc_read_reg32(&global_regs->grxfsiz)); -+ -+ /** Set Periodic Tx FIFO Mask all bits 0 */ -+ core_if->p_tx_msk = 0; -+ -+ /** Set Tx FIFO Mask all bits 0 */ -+ core_if->tx_msk = 0; -+ -+ if(core_if->en_multiple_tx_fifo == 0) { -+ /* Non-periodic Tx FIFO */ -+ DWC_DEBUGPL(DBG_CIL, "initial gnptxfsiz=%08x\n", -+ dwc_read_reg32(&global_regs->gnptxfsiz)); -+ -+ nptxfifosize.b.depth = params->dev_nperio_tx_fifo_size; -+ nptxfifosize.b.startaddr = params->dev_rx_fifo_size; -+ -+ dwc_write_reg32(&global_regs->gnptxfsiz, nptxfifosize.d32); -+ -+ DWC_DEBUGPL(DBG_CIL, "new gnptxfsiz=%08x\n", -+ dwc_read_reg32(&global_regs->gnptxfsiz)); -+ -+ /**@todo NGS: Fix Periodic FIFO Sizing! */ -+ /* -+ * Periodic Tx FIFOs These FIFOs are numbered from 1 to 15. -+ * Indexes of the FIFO size module parameters in the -+ * dev_perio_tx_fifo_size array and the FIFO size registers in -+ * the dptxfsiz array run from 0 to 14. -+ */ -+ /** @todo Finish debug of this */ -+ ptxfifosize.b.startaddr = nptxfifosize.b.startaddr + nptxfifosize.b.depth; -+ for (i=0; i < core_if->hwcfg4.b.num_dev_perio_in_ep; i++) -+ { -+ ptxfifosize.b.depth = params->dev_perio_tx_fifo_size[i]; -+ DWC_DEBUGPL(DBG_CIL, "initial dptxfsiz_dieptxf[%d]=%08x\n", i, -+ dwc_read_reg32(&global_regs->dptxfsiz_dieptxf[i])); -+ dwc_write_reg32(&global_regs->dptxfsiz_dieptxf[i], -+ ptxfifosize.d32); -+ DWC_DEBUGPL(DBG_CIL, "new dptxfsiz_dieptxf[%d]=%08x\n", i, -+ dwc_read_reg32(&global_regs->dptxfsiz_dieptxf[i])); -+ ptxfifosize.b.startaddr += ptxfifosize.b.depth; -+ } -+ } -+ else { -+ /* -+ * Tx FIFOs These FIFOs are numbered from 1 to 15. -+ * Indexes of the FIFO size module parameters in the -+ * dev_tx_fifo_size array and the FIFO size registers in -+ * the dptxfsiz_dieptxf array run from 0 to 14. -+ */ -+ -+ -+ /* Non-periodic Tx FIFO */ -+ DWC_DEBUGPL(DBG_CIL, "initial gnptxfsiz=%08x\n", -+ dwc_read_reg32(&global_regs->gnptxfsiz)); -+ -+ nptxfifosize.b.depth = params->dev_nperio_tx_fifo_size; -+ nptxfifosize.b.startaddr = params->dev_rx_fifo_size; -+ -+ dwc_write_reg32(&global_regs->gnptxfsiz, nptxfifosize.d32); -+ -+ DWC_DEBUGPL(DBG_CIL, "new gnptxfsiz=%08x\n", -+ dwc_read_reg32(&global_regs->gnptxfsiz)); -+ -+ txfifosize.b.startaddr = nptxfifosize.b.startaddr + nptxfifosize.b.depth; -+ /* -+ Modify by kaiker ,for RT3052 device mode config -+ -+ In RT3052,Since the _core_if->hwcfg4.b.num_dev_perio_in_ep is -+ configed to 0 so these TX_FIF0 not config.IN EP will can't -+ more than 1 if not modify it. -+ -+ */ -+#if 1 -+ for (i=1 ; i <= dev_if->num_in_eps; i++) -+#else -+ for (i=1; i < _core_if->hwcfg4.b.num_dev_perio_in_ep; i++) -+#endif -+ { -+ -+ txfifosize.b.depth = params->dev_tx_fifo_size[i]; -+ -+ DWC_DEBUGPL(DBG_CIL, "initial dptxfsiz_dieptxf[%d]=%08x\n", i, -+ dwc_read_reg32(&global_regs->dptxfsiz_dieptxf[i])); -+ -+ dwc_write_reg32(&global_regs->dptxfsiz_dieptxf[i-1], -+ txfifosize.d32); -+ -+ DWC_DEBUGPL(DBG_CIL, "new dptxfsiz_dieptxf[%d]=%08x\n", i, -+ dwc_read_reg32(&global_regs->dptxfsiz_dieptxf[i-1])); -+ -+ txfifosize.b.startaddr += txfifosize.b.depth; -+ } -+ } -+ } -+ /* Flush the FIFOs */ -+ dwc_otg_flush_tx_fifo(core_if, 0x10); /* all Tx FIFOs */ -+ dwc_otg_flush_rx_fifo(core_if); -+ -+ /* Flush the Learning Queue. */ -+ resetctl.b.intknqflsh = 1; -+ dwc_write_reg32(&core_if->core_global_regs->grstctl, resetctl.d32); -+ -+ /* Clear all pending Device Interrupts */ -+ -+ if(core_if->multiproc_int_enable) { -+ } -+ -+ /** @todo - if the condition needed to be checked -+ * or in any case all pending interrutps should be cleared? -+ */ -+ if(core_if->multiproc_int_enable) { -+ for(i = 0; i < core_if->dev_if->num_in_eps; ++i) { -+ dwc_write_reg32(&dev_if->dev_global_regs->diepeachintmsk[i], 0); -+ } -+ -+ for(i = 0; i < core_if->dev_if->num_out_eps; ++i) { -+ dwc_write_reg32(&dev_if->dev_global_regs->doepeachintmsk[i], 0); -+ } -+ -+ dwc_write_reg32(&dev_if->dev_global_regs->deachint, 0xFFFFFFFF); -+ dwc_write_reg32(&dev_if->dev_global_regs->deachintmsk, 0); -+ } else { -+ dwc_write_reg32(&dev_if->dev_global_regs->diepmsk, 0); -+ dwc_write_reg32(&dev_if->dev_global_regs->doepmsk, 0); -+ dwc_write_reg32(&dev_if->dev_global_regs->daint, 0xFFFFFFFF); -+ dwc_write_reg32(&dev_if->dev_global_regs->daintmsk, 0); -+ } -+ -+ for (i=0; i <= dev_if->num_in_eps; i++) -+ { -+ depctl_data_t depctl; -+ depctl.d32 = dwc_read_reg32(&dev_if->in_ep_regs[i]->diepctl); -+ if (depctl.b.epena) { -+ depctl.d32 = 0; -+ depctl.b.epdis = 1; -+ depctl.b.snak = 1; -+ } -+ else { -+ depctl.d32 = 0; -+ } -+ -+ dwc_write_reg32(&dev_if->in_ep_regs[i]->diepctl, depctl.d32); -+ -+ -+ dwc_write_reg32(&dev_if->in_ep_regs[i]->dieptsiz, 0); -+ dwc_write_reg32(&dev_if->in_ep_regs[i]->diepdma, 0); -+ dwc_write_reg32(&dev_if->in_ep_regs[i]->diepint, 0xFF); -+ } -+ -+ for (i=0; i <= dev_if->num_out_eps; i++) -+ { -+ depctl_data_t depctl; -+ depctl.d32 = dwc_read_reg32(&dev_if->out_ep_regs[i]->doepctl); -+ if (depctl.b.epena) { -+ depctl.d32 = 0; -+ depctl.b.epdis = 1; -+ depctl.b.snak = 1; -+ } -+ else { -+ depctl.d32 = 0; -+ } -+ -+ dwc_write_reg32(&dev_if->out_ep_regs[i]->doepctl, depctl.d32); -+ -+ dwc_write_reg32(&dev_if->out_ep_regs[i]->doeptsiz, 0); -+ dwc_write_reg32(&dev_if->out_ep_regs[i]->doepdma, 0); -+ dwc_write_reg32(&dev_if->out_ep_regs[i]->doepint, 0xFF); -+ } -+ -+ if(core_if->en_multiple_tx_fifo && core_if->dma_enable) { -+ dev_if->non_iso_tx_thr_en = params->thr_ctl & 0x1; -+ dev_if->iso_tx_thr_en = (params->thr_ctl >> 1) & 0x1; -+ dev_if->rx_thr_en = (params->thr_ctl >> 2) & 0x1; -+ -+ dev_if->rx_thr_length = params->rx_thr_length; -+ dev_if->tx_thr_length = params->tx_thr_length; -+ -+ dev_if->setup_desc_index = 0; -+ -+ dthrctl.d32 = 0; -+ dthrctl.b.non_iso_thr_en = dev_if->non_iso_tx_thr_en; -+ dthrctl.b.iso_thr_en = dev_if->iso_tx_thr_en; -+ dthrctl.b.tx_thr_len = dev_if->tx_thr_length; -+ dthrctl.b.rx_thr_en = dev_if->rx_thr_en; -+ dthrctl.b.rx_thr_len = dev_if->rx_thr_length; -+ -+ dwc_write_reg32(&dev_if->dev_global_regs->dtknqr3_dthrctl, dthrctl.d32); -+ -+ DWC_DEBUGPL(DBG_CIL, "Non ISO Tx Thr - %d\nISO Tx Thr - %d\nRx Thr - %d\nTx Thr Len - %d\nRx Thr Len - %d\n", -+ dthrctl.b.non_iso_thr_en, dthrctl.b.iso_thr_en, dthrctl.b.rx_thr_en, dthrctl.b.tx_thr_len, dthrctl.b.rx_thr_len); -+ -+ } -+ -+ dwc_otg_enable_device_interrupts(core_if); -+ -+ { -+ diepmsk_data_t msk = { .d32 = 0 }; -+ msk.b.txfifoundrn = 1; -+ if(core_if->multiproc_int_enable) { -+ dwc_modify_reg32(&dev_if->dev_global_regs->diepeachintmsk[0], msk.d32, msk.d32); -+ } else { -+ dwc_modify_reg32(&dev_if->dev_global_regs->diepmsk, msk.d32, msk.d32); -+ } -+ } -+ -+ -+ if(core_if->multiproc_int_enable) { -+ /* Set NAK on Babble */ -+ dctl_data_t dctl = { .d32 = 0}; -+ dctl.b.nakonbble = 1; -+ dwc_modify_reg32(&dev_if->dev_global_regs->dctl, 0, dctl.d32); -+ } -+} -+ -+/** -+ * This function enables the Host mode interrupts. -+ * -+ * @param core_if Programming view of DWC_otg controller -+ */ -+void dwc_otg_enable_host_interrupts(dwc_otg_core_if_t *core_if) -+{ -+ dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs; -+ gintmsk_data_t intr_mask = { .d32 = 0 }; -+ -+ DWC_DEBUGPL(DBG_CIL, "%s()\n", __func__); -+ -+ /* Disable all interrupts. */ -+ dwc_write_reg32(&global_regs->gintmsk, 0); -+ -+ /* Clear any pending interrupts. */ -+ dwc_write_reg32(&global_regs->gintsts, 0xFFFFFFFF); -+ -+ /* Enable the common interrupts */ -+ dwc_otg_enable_common_interrupts(core_if); -+ -+ /* -+ * Enable host mode interrupts without disturbing common -+ * interrupts. -+ */ -+ intr_mask.b.sofintr = 1; -+ intr_mask.b.portintr = 1; -+ intr_mask.b.hcintr = 1; -+ -+ dwc_modify_reg32(&global_regs->gintmsk, intr_mask.d32, intr_mask.d32); -+} -+ -+/** -+ * This function disables the Host Mode interrupts. -+ * -+ * @param core_if Programming view of DWC_otg controller -+ */ -+void dwc_otg_disable_host_interrupts(dwc_otg_core_if_t *core_if) -+{ -+ dwc_otg_core_global_regs_t *global_regs = -+ core_if->core_global_regs; -+ gintmsk_data_t intr_mask = { .d32 = 0 }; -+ -+ DWC_DEBUGPL(DBG_CILV, "%s()\n", __func__); -+ -+ /* -+ * Disable host mode interrupts without disturbing common -+ * interrupts. -+ */ -+ intr_mask.b.sofintr = 1; -+ intr_mask.b.portintr = 1; -+ intr_mask.b.hcintr = 1; -+ intr_mask.b.ptxfempty = 1; -+ intr_mask.b.nptxfempty = 1; -+ -+ dwc_modify_reg32(&global_regs->gintmsk, intr_mask.d32, 0); -+} -+ -+/** -+ * This function initializes the DWC_otg controller registers for -+ * host mode. -+ * -+ * This function flushes the Tx and Rx FIFOs and it flushes any entries in the -+ * request queues. Host channels are reset to ensure that they are ready for -+ * performing transfers. -+ * -+ * @param core_if Programming view of DWC_otg controller -+ * -+ */ -+void dwc_otg_core_host_init(dwc_otg_core_if_t *core_if) -+{ -+ dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs; -+ dwc_otg_host_if_t *host_if = core_if->host_if; -+ dwc_otg_core_params_t *params = core_if->core_params; -+ hprt0_data_t hprt0 = { .d32 = 0 }; -+ fifosize_data_t nptxfifosize; -+ fifosize_data_t ptxfifosize; -+ int i; -+ hcchar_data_t hcchar; -+ hcfg_data_t hcfg; -+ dwc_otg_hc_regs_t *hc_regs; -+ int num_channels; -+ gotgctl_data_t gotgctl = { .d32 = 0 }; -+ -+ DWC_DEBUGPL(DBG_CILV,"%s(%p)\n", __func__, core_if); -+ -+ /* Restart the Phy Clock */ -+ dwc_write_reg32(core_if->pcgcctl, 0); -+ -+ /* Initialize Host Configuration Register */ -+ init_fslspclksel(core_if); -+ if (core_if->core_params->speed == DWC_SPEED_PARAM_FULL) -+ { -+ hcfg.d32 = dwc_read_reg32(&host_if->host_global_regs->hcfg); -+ hcfg.b.fslssupp = 1; -+ dwc_write_reg32(&host_if->host_global_regs->hcfg, hcfg.d32); -+ } -+ -+ /* Configure data FIFO sizes */ -+ if (core_if->hwcfg2.b.dynamic_fifo && params->enable_dynamic_fifo) { -+ DWC_DEBUGPL(DBG_CIL,"Total FIFO Size=%d\n", core_if->total_fifo_size); -+ DWC_DEBUGPL(DBG_CIL,"Rx FIFO Size=%d\n", params->host_rx_fifo_size); -+ DWC_DEBUGPL(DBG_CIL,"NP Tx FIFO Size=%d\n", params->host_nperio_tx_fifo_size); -+ DWC_DEBUGPL(DBG_CIL,"P Tx FIFO Size=%d\n", params->host_perio_tx_fifo_size); -+ -+ /* Rx FIFO */ -+ DWC_DEBUGPL(DBG_CIL,"initial grxfsiz=%08x\n", dwc_read_reg32(&global_regs->grxfsiz)); -+ dwc_write_reg32(&global_regs->grxfsiz, params->host_rx_fifo_size); -+ DWC_DEBUGPL(DBG_CIL,"new grxfsiz=%08x\n", dwc_read_reg32(&global_regs->grxfsiz)); -+ -+ /* Non-periodic Tx FIFO */ -+ DWC_DEBUGPL(DBG_CIL,"initial gnptxfsiz=%08x\n", dwc_read_reg32(&global_regs->gnptxfsiz)); -+ nptxfifosize.b.depth = params->host_nperio_tx_fifo_size; -+ nptxfifosize.b.startaddr = params->host_rx_fifo_size; -+ dwc_write_reg32(&global_regs->gnptxfsiz, nptxfifosize.d32); -+ DWC_DEBUGPL(DBG_CIL,"new gnptxfsiz=%08x\n", dwc_read_reg32(&global_regs->gnptxfsiz)); -+ -+ /* Periodic Tx FIFO */ -+ DWC_DEBUGPL(DBG_CIL,"initial hptxfsiz=%08x\n", dwc_read_reg32(&global_regs->hptxfsiz)); -+ ptxfifosize.b.depth = params->host_perio_tx_fifo_size; -+ ptxfifosize.b.startaddr = nptxfifosize.b.startaddr + nptxfifosize.b.depth; -+ dwc_write_reg32(&global_regs->hptxfsiz, ptxfifosize.d32); -+ DWC_DEBUGPL(DBG_CIL,"new hptxfsiz=%08x\n", dwc_read_reg32(&global_regs->hptxfsiz)); -+ } -+ -+ /* Clear Host Set HNP Enable in the OTG Control Register */ -+ gotgctl.b.hstsethnpen = 1; -+ dwc_modify_reg32(&global_regs->gotgctl, gotgctl.d32, 0); -+ -+ /* Make sure the FIFOs are flushed. */ -+ dwc_otg_flush_tx_fifo(core_if, 0x10 /* all Tx FIFOs */); -+ dwc_otg_flush_rx_fifo(core_if); -+ -+ /* Flush out any leftover queued requests. */ -+ num_channels = core_if->core_params->host_channels; -+ for (i = 0; i < num_channels; i++) -+ { -+ hc_regs = core_if->host_if->hc_regs[i]; -+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); -+ hcchar.b.chen = 0; -+ hcchar.b.chdis = 1; -+ hcchar.b.epdir = 0; -+ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); -+ } -+ -+ /* Halt all channels to put them into a known state. */ -+ for (i = 0; i < num_channels; i++) -+ { -+ int count = 0; -+ hc_regs = core_if->host_if->hc_regs[i]; -+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); -+ hcchar.b.chen = 1; -+ hcchar.b.chdis = 1; -+ hcchar.b.epdir = 0; -+ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); -+ DWC_DEBUGPL(DBG_HCDV, "%s: Halt channel %d\n", __func__, i); -+ do { -+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); -+ if (++count > 1000) -+ { -+ DWC_ERROR("%s: Unable to clear halt on channel %d\n", -+ __func__, i); -+ break; -+ } -+ } -+ while (hcchar.b.chen); -+ } -+ -+ /* Turn on the vbus power. */ -+ DWC_PRINT("Init: Port Power? op_state=%d\n", core_if->op_state); -+ if (core_if->op_state == A_HOST) { -+ hprt0.d32 = dwc_otg_read_hprt0(core_if); -+ DWC_PRINT("Init: Power Port (%d)\n", hprt0.b.prtpwr); -+ if (hprt0.b.prtpwr == 0) { -+ hprt0.b.prtpwr = 1; -+ dwc_write_reg32(host_if->hprt0, hprt0.d32); -+ } -+ } -+ -+ dwc_otg_enable_host_interrupts(core_if); -+} -+ -+/** -+ * Prepares a host channel for transferring packets to/from a specific -+ * endpoint. The HCCHARn register is set up with the characteristics specified -+ * in _hc. Host channel interrupts that may need to be serviced while this -+ * transfer is in progress are enabled. -+ * -+ * @param core_if Programming view of DWC_otg controller -+ * @param hc Information needed to initialize the host channel -+ */ -+void dwc_otg_hc_init(dwc_otg_core_if_t *core_if, dwc_hc_t *hc) -+{ -+ uint32_t intr_enable; -+ hcintmsk_data_t hc_intr_mask; -+ gintmsk_data_t gintmsk = { .d32 = 0 }; -+ hcchar_data_t hcchar; -+ hcsplt_data_t hcsplt; -+ -+ uint8_t hc_num = hc->hc_num; -+ dwc_otg_host_if_t *host_if = core_if->host_if; -+ dwc_otg_hc_regs_t *hc_regs = host_if->hc_regs[hc_num]; -+ -+ /* Clear old interrupt conditions for this host channel. */ -+ hc_intr_mask.d32 = 0xFFFFFFFF; -+ hc_intr_mask.b.reserved = 0; -+ dwc_write_reg32(&hc_regs->hcint, hc_intr_mask.d32); -+ -+ /* Enable channel interrupts required for this transfer. */ -+ hc_intr_mask.d32 = 0; -+ hc_intr_mask.b.chhltd = 1; -+ if (core_if->dma_enable) { -+ hc_intr_mask.b.ahberr = 1; -+ if (hc->error_state && !hc->do_split && -+ hc->ep_type != DWC_OTG_EP_TYPE_ISOC) { -+ hc_intr_mask.b.ack = 1; -+ if (hc->ep_is_in) { -+ hc_intr_mask.b.datatglerr = 1; -+ if (hc->ep_type != DWC_OTG_EP_TYPE_INTR) { -+ hc_intr_mask.b.nak = 1; -+ } -+ } -+ } -+ } -+ else { -+ switch (hc->ep_type) { -+ case DWC_OTG_EP_TYPE_CONTROL: -+ case DWC_OTG_EP_TYPE_BULK: -+ hc_intr_mask.b.xfercompl = 1; -+ hc_intr_mask.b.stall = 1; -+ hc_intr_mask.b.xacterr = 1; -+ hc_intr_mask.b.datatglerr = 1; -+ if (hc->ep_is_in) { -+ hc_intr_mask.b.bblerr = 1; -+ } -+ else { -+ hc_intr_mask.b.nak = 1; -+ hc_intr_mask.b.nyet = 1; -+ if (hc->do_ping) { -+ hc_intr_mask.b.ack = 1; -+ } -+ } -+ -+ if (hc->do_split) { -+ hc_intr_mask.b.nak = 1; -+ if (hc->complete_split) { -+ hc_intr_mask.b.nyet = 1; -+ } -+ else { -+ hc_intr_mask.b.ack = 1; -+ } -+ } -+ -+ if (hc->error_state) { -+ hc_intr_mask.b.ack = 1; -+ } -+ break; -+ case DWC_OTG_EP_TYPE_INTR: -+ hc_intr_mask.b.xfercompl = 1; -+ hc_intr_mask.b.nak = 1; -+ hc_intr_mask.b.stall = 1; -+ hc_intr_mask.b.xacterr = 1; -+ hc_intr_mask.b.datatglerr = 1; -+ hc_intr_mask.b.frmovrun = 1; -+ -+ if (hc->ep_is_in) { -+ hc_intr_mask.b.bblerr = 1; -+ } -+ if (hc->error_state) { -+ hc_intr_mask.b.ack = 1; -+ } -+ if (hc->do_split) { -+ if (hc->complete_split) { -+ hc_intr_mask.b.nyet = 1; -+ } -+ else { -+ hc_intr_mask.b.ack = 1; -+ } -+ } -+ break; -+ case DWC_OTG_EP_TYPE_ISOC: -+ hc_intr_mask.b.xfercompl = 1; -+ hc_intr_mask.b.frmovrun = 1; -+ hc_intr_mask.b.ack = 1; -+ -+ if (hc->ep_is_in) { -+ hc_intr_mask.b.xacterr = 1; -+ hc_intr_mask.b.bblerr = 1; -+ } -+ break; -+ } -+ } -+ dwc_write_reg32(&hc_regs->hcintmsk, hc_intr_mask.d32); -+ -+// if(hc->ep_type == DWC_OTG_EP_TYPE_BULK && !hc->ep_is_in) -+// hc->max_packet = 512; -+ /* Enable the top level host channel interrupt. */ -+ intr_enable = (1 << hc_num); -+ dwc_modify_reg32(&host_if->host_global_regs->haintmsk, 0, intr_enable); -+ -+ /* Make sure host channel interrupts are enabled. */ -+ gintmsk.b.hcintr = 1; -+ dwc_modify_reg32(&core_if->core_global_regs->gintmsk, 0, gintmsk.d32); -+ -+ /* -+ * Program the HCCHARn register with the endpoint characteristics for -+ * the current transfer. -+ */ -+ hcchar.d32 = 0; -+ hcchar.b.devaddr = hc->dev_addr; -+ hcchar.b.epnum = hc->ep_num; -+ hcchar.b.epdir = hc->ep_is_in; -+ hcchar.b.lspddev = (hc->speed == DWC_OTG_EP_SPEED_LOW); -+ hcchar.b.eptype = hc->ep_type; -+ hcchar.b.mps = hc->max_packet; -+ -+ dwc_write_reg32(&host_if->hc_regs[hc_num]->hcchar, hcchar.d32); -+ -+ DWC_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, hc->hc_num); -+ DWC_DEBUGPL(DBG_HCDV, " Dev Addr: %d\n", hcchar.b.devaddr); -+ DWC_DEBUGPL(DBG_HCDV, " Ep Num: %d\n", hcchar.b.epnum); -+ DWC_DEBUGPL(DBG_HCDV, " Is In: %d\n", hcchar.b.epdir); -+ DWC_DEBUGPL(DBG_HCDV, " Is Low Speed: %d\n", hcchar.b.lspddev); -+ DWC_DEBUGPL(DBG_HCDV, " Ep Type: %d\n", hcchar.b.eptype); -+ DWC_DEBUGPL(DBG_HCDV, " Max Pkt: %d\n", hcchar.b.mps); -+ DWC_DEBUGPL(DBG_HCDV, " Multi Cnt: %d\n", hcchar.b.multicnt); -+ -+ /* -+ * Program the HCSPLIT register for SPLITs -+ */ -+ hcsplt.d32 = 0; -+ if (hc->do_split) { -+ DWC_DEBUGPL(DBG_HCDV, "Programming HC %d with split --> %s\n", hc->hc_num, -+ hc->complete_split ? "CSPLIT" : "SSPLIT"); -+ hcsplt.b.compsplt = hc->complete_split; -+ hcsplt.b.xactpos = hc->xact_pos; -+ hcsplt.b.hubaddr = hc->hub_addr; -+ hcsplt.b.prtaddr = hc->port_addr; -+ DWC_DEBUGPL(DBG_HCDV, " comp split %d\n", hc->complete_split); -+ DWC_DEBUGPL(DBG_HCDV, " xact pos %d\n", hc->xact_pos); -+ DWC_DEBUGPL(DBG_HCDV, " hub addr %d\n", hc->hub_addr); -+ DWC_DEBUGPL(DBG_HCDV, " port addr %d\n", hc->port_addr); -+ DWC_DEBUGPL(DBG_HCDV, " is_in %d\n", hc->ep_is_in); -+ DWC_DEBUGPL(DBG_HCDV, " Max Pkt: %d\n", hcchar.b.mps); -+ DWC_DEBUGPL(DBG_HCDV, " xferlen: %d\n", hc->xfer_len); -+ } -+ dwc_write_reg32(&host_if->hc_regs[hc_num]->hcsplt, hcsplt.d32); -+ -+} -+ -+/** -+ * Attempts to halt a host channel. This function should only be called in -+ * Slave mode or to abort a transfer in either Slave mode or DMA mode. Under -+ * normal circumstances in DMA mode, the controller halts the channel when the -+ * transfer is complete or a condition occurs that requires application -+ * intervention. -+ * -+ * In slave mode, checks for a free request queue entry, then sets the Channel -+ * Enable and Channel Disable bits of the Host Channel Characteristics -+ * register of the specified channel to intiate the halt. If there is no free -+ * request queue entry, sets only the Channel Disable bit of the HCCHARn -+ * register to flush requests for this channel. In the latter case, sets a -+ * flag to indicate that the host channel needs to be halted when a request -+ * queue slot is open. -+ * -+ * In DMA mode, always sets the Channel Enable and Channel Disable bits of the -+ * HCCHARn register. The controller ensures there is space in the request -+ * queue before submitting the halt request. -+ * -+ * Some time may elapse before the core flushes any posted requests for this -+ * host channel and halts. The Channel Halted interrupt handler completes the -+ * deactivation of the host channel. -+ * -+ * @param core_if Controller register interface. -+ * @param hc Host channel to halt. -+ * @param halt_status Reason for halting the channel. -+ */ -+void dwc_otg_hc_halt(dwc_otg_core_if_t *core_if, -+ dwc_hc_t *hc, -+ dwc_otg_halt_status_e halt_status) -+{ -+ gnptxsts_data_t nptxsts; -+ hptxsts_data_t hptxsts; -+ hcchar_data_t hcchar; -+ dwc_otg_hc_regs_t *hc_regs; -+ dwc_otg_core_global_regs_t *global_regs; -+ dwc_otg_host_global_regs_t *host_global_regs; -+ -+ hc_regs = core_if->host_if->hc_regs[hc->hc_num]; -+ global_regs = core_if->core_global_regs; -+ host_global_regs = core_if->host_if->host_global_regs; -+ -+ WARN_ON(halt_status == DWC_OTG_HC_XFER_NO_HALT_STATUS); -+ -+ if (halt_status == DWC_OTG_HC_XFER_URB_DEQUEUE || -+ halt_status == DWC_OTG_HC_XFER_AHB_ERR) { -+ /* -+ * Disable all channel interrupts except Ch Halted. The QTD -+ * and QH state associated with this transfer has been cleared -+ * (in the case of URB_DEQUEUE), so the channel needs to be -+ * shut down carefully to prevent crashes. -+ */ -+ hcintmsk_data_t hcintmsk; -+ hcintmsk.d32 = 0; -+ hcintmsk.b.chhltd = 1; -+ dwc_write_reg32(&hc_regs->hcintmsk, hcintmsk.d32); -+ -+ /* -+ * Make sure no other interrupts besides halt are currently -+ * pending. Handling another interrupt could cause a crash due -+ * to the QTD and QH state. -+ */ -+ dwc_write_reg32(&hc_regs->hcint, ~hcintmsk.d32); -+ -+ /* -+ * Make sure the halt status is set to URB_DEQUEUE or AHB_ERR -+ * even if the channel was already halted for some other -+ * reason. -+ */ -+ hc->halt_status = halt_status; -+ -+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); -+ if (hcchar.b.chen == 0) { -+ /* -+ * The channel is either already halted or it hasn't -+ * started yet. In DMA mode, the transfer may halt if -+ * it finishes normally or a condition occurs that -+ * requires driver intervention. Don't want to halt -+ * the channel again. In either Slave or DMA mode, -+ * it's possible that the transfer has been assigned -+ * to a channel, but not started yet when an URB is -+ * dequeued. Don't want to halt a channel that hasn't -+ * started yet. -+ */ -+ return; -+ } -+ } -+ -+ if (hc->halt_pending) { -+ /* -+ * A halt has already been issued for this channel. This might -+ * happen when a transfer is aborted by a higher level in -+ * the stack. -+ */ -+#ifdef DEBUG -+ DWC_PRINT("*** %s: Channel %d, _hc->halt_pending already set ***\n", -+ __func__, hc->hc_num); -+ -+/* dwc_otg_dump_global_registers(core_if); */ -+/* dwc_otg_dump_host_registers(core_if); */ -+#endif -+ return; -+ } -+ -+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); -+ hcchar.b.chen = 1; -+ hcchar.b.chdis = 1; -+ -+ if (!core_if->dma_enable) { -+ /* Check for space in the request queue to issue the halt. */ -+ if (hc->ep_type == DWC_OTG_EP_TYPE_CONTROL || -+ hc->ep_type == DWC_OTG_EP_TYPE_BULK) { -+ nptxsts.d32 = dwc_read_reg32(&global_regs->gnptxsts); -+ if (nptxsts.b.nptxqspcavail == 0) { -+ hcchar.b.chen = 0; -+ } -+ } -+ else { -+ hptxsts.d32 = dwc_read_reg32(&host_global_regs->hptxsts); -+ if ((hptxsts.b.ptxqspcavail == 0) || (core_if->queuing_high_bandwidth)) { -+ hcchar.b.chen = 0; -+ } -+ } -+ } -+ -+ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); -+ -+ hc->halt_status = halt_status; -+ -+ if (hcchar.b.chen) { -+ hc->halt_pending = 1; -+ hc->halt_on_queue = 0; -+ } -+ else { -+ hc->halt_on_queue = 1; -+ } -+ -+ DWC_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, hc->hc_num); -+ DWC_DEBUGPL(DBG_HCDV, " hcchar: 0x%08x\n", hcchar.d32); -+ DWC_DEBUGPL(DBG_HCDV, " halt_pending: %d\n", hc->halt_pending); -+ DWC_DEBUGPL(DBG_HCDV, " halt_on_queue: %d\n", hc->halt_on_queue); -+ DWC_DEBUGPL(DBG_HCDV, " halt_status: %d\n", hc->halt_status); -+ -+ return; -+} -+ -+/** -+ * Clears the transfer state for a host channel. This function is normally -+ * called after a transfer is done and the host channel is being released. -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ * @param hc Identifies the host channel to clean up. -+ */ -+void dwc_otg_hc_cleanup(dwc_otg_core_if_t *core_if, dwc_hc_t *hc) -+{ -+ dwc_otg_hc_regs_t *hc_regs; -+ -+ hc->xfer_started = 0; -+ -+ /* -+ * Clear channel interrupt enables and any unhandled channel interrupt -+ * conditions. -+ */ -+ hc_regs = core_if->host_if->hc_regs[hc->hc_num]; -+ dwc_write_reg32(&hc_regs->hcintmsk, 0); -+ dwc_write_reg32(&hc_regs->hcint, 0xFFFFFFFF); -+ -+#ifdef DEBUG -+ del_timer(&core_if->hc_xfer_timer[hc->hc_num]); -+ { -+ hcchar_data_t hcchar; -+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); -+ if (hcchar.b.chdis) { -+ DWC_WARN("%s: chdis set, channel %d, hcchar 0x%08x\n", -+ __func__, hc->hc_num, hcchar.d32); -+ } -+ } -+#endif -+} -+ -+/** -+ * Sets the channel property that indicates in which frame a periodic transfer -+ * should occur. This is always set to the _next_ frame. This function has no -+ * effect on non-periodic transfers. -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ * @param hc Identifies the host channel to set up and its properties. -+ * @param hcchar Current value of the HCCHAR register for the specified host -+ * channel. -+ */ -+static inline void hc_set_even_odd_frame(dwc_otg_core_if_t *core_if, -+ dwc_hc_t *hc, -+ hcchar_data_t *hcchar) -+{ -+ if (hc->ep_type == DWC_OTG_EP_TYPE_INTR || -+ hc->ep_type == DWC_OTG_EP_TYPE_ISOC) { -+ hfnum_data_t hfnum; -+ hfnum.d32 = dwc_read_reg32(&core_if->host_if->host_global_regs->hfnum); -+ -+ /* 1 if _next_ frame is odd, 0 if it's even */ -+ hcchar->b.oddfrm = (hfnum.b.frnum & 0x1) ? 0 : 1; -+#ifdef DEBUG -+ if (hc->ep_type == DWC_OTG_EP_TYPE_INTR && hc->do_split && !hc->complete_split) { -+ switch (hfnum.b.frnum & 0x7) { -+ case 7: -+ core_if->hfnum_7_samples++; -+ core_if->hfnum_7_frrem_accum += hfnum.b.frrem; -+ break; -+ case 0: -+ core_if->hfnum_0_samples++; -+ core_if->hfnum_0_frrem_accum += hfnum.b.frrem; -+ break; -+ default: -+ core_if->hfnum_other_samples++; -+ core_if->hfnum_other_frrem_accum += hfnum.b.frrem; -+ break; -+ } -+ } -+#endif -+ } -+} -+ -+#ifdef DEBUG -+static void hc_xfer_timeout(unsigned long ptr) -+{ -+ hc_xfer_info_t *xfer_info = (hc_xfer_info_t *)ptr; -+ int hc_num = xfer_info->hc->hc_num; -+ DWC_WARN("%s: timeout on channel %d\n", __func__, hc_num); -+ DWC_WARN(" start_hcchar_val 0x%08x\n", xfer_info->core_if->start_hcchar_val[hc_num]); -+} -+#endif -+ -+/* -+ * This function does the setup for a data transfer for a host channel and -+ * starts the transfer. May be called in either Slave mode or DMA mode. In -+ * Slave mode, the caller must ensure that there is sufficient space in the -+ * request queue and Tx Data FIFO. -+ * -+ * For an OUT transfer in Slave mode, it loads a data packet into the -+ * appropriate FIFO. If necessary, additional data packets will be loaded in -+ * the Host ISR. -+ * -+ * For an IN transfer in Slave mode, a data packet is requested. The data -+ * packets are unloaded from the Rx FIFO in the Host ISR. If necessary, -+ * additional data packets are requested in the Host ISR. -+ * -+ * For a PING transfer in Slave mode, the Do Ping bit is set in the egards, -+ * -+ * Steven -+ * -+ * register along with a packet count of 1 and the channel is enabled. This -+ * causes a single PING transaction to occur. Other fields in HCTSIZ are -+ * simply set to 0 since no data transfer occurs in this case. -+ * -+ * For a PING transfer in DMA mode, the HCTSIZ register is initialized with -+ * all the information required to perform the subsequent data transfer. In -+ * addition, the Do Ping bit is set in the HCTSIZ register. In this case, the -+ * controller performs the entire PING protocol, then starts the data -+ * transfer. -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ * @param hc Information needed to initialize the host channel. The xfer_len -+ * value may be reduced to accommodate the max widths of the XferSize and -+ * PktCnt fields in the HCTSIZn register. The multi_count value may be changed -+ * to reflect the final xfer_len value. -+ */ -+void dwc_otg_hc_start_transfer(dwc_otg_core_if_t *core_if, dwc_hc_t *hc) -+{ -+ hcchar_data_t hcchar; -+ hctsiz_data_t hctsiz; -+ uint16_t num_packets; -+ uint32_t max_hc_xfer_size = core_if->core_params->max_transfer_size; -+ uint16_t max_hc_pkt_count = core_if->core_params->max_packet_count; -+ dwc_otg_hc_regs_t *hc_regs = core_if->host_if->hc_regs[hc->hc_num]; -+ -+ hctsiz.d32 = 0; -+ -+ if (hc->do_ping) { -+ if (!core_if->dma_enable) { -+ dwc_otg_hc_do_ping(core_if, hc); -+ hc->xfer_started = 1; -+ return; -+ } -+ else { -+ hctsiz.b.dopng = 1; -+ } -+ } -+ -+ if (hc->do_split) { -+ num_packets = 1; -+ -+ if (hc->complete_split && !hc->ep_is_in) { -+ /* For CSPLIT OUT Transfer, set the size to 0 so the -+ * core doesn't expect any data written to the FIFO */ -+ hc->xfer_len = 0; -+ } -+ else if (hc->ep_is_in || (hc->xfer_len > hc->max_packet)) { -+ hc->xfer_len = hc->max_packet; -+ } -+ else if (!hc->ep_is_in && (hc->xfer_len > 188)) { -+ hc->xfer_len = 188; -+ } -+ -+ hctsiz.b.xfersize = hc->xfer_len; -+ } -+ else { -+ /* -+ * Ensure that the transfer length and packet count will fit -+ * in the widths allocated for them in the HCTSIZn register. -+ */ -+ if (hc->ep_type == DWC_OTG_EP_TYPE_INTR || -+ hc->ep_type == DWC_OTG_EP_TYPE_ISOC) { -+ /* -+ * Make sure the transfer size is no larger than one -+ * (micro)frame's worth of data. (A check was done -+ * when the periodic transfer was accepted to ensure -+ * that a (micro)frame's worth of data can be -+ * programmed into a channel.) -+ */ -+ uint32_t max_periodic_len = hc->multi_count * hc->max_packet; -+ if (hc->xfer_len > max_periodic_len) { -+ hc->xfer_len = max_periodic_len; -+ } -+ else { -+ } -+ -+ } -+ else if (hc->xfer_len > max_hc_xfer_size) { -+ /* Make sure that xfer_len is a multiple of max packet size. */ -+ hc->xfer_len = max_hc_xfer_size - hc->max_packet + 1; -+ } -+ -+ if (hc->xfer_len > 0) { -+ num_packets = (hc->xfer_len + hc->max_packet - 1) / hc->max_packet; -+ if (num_packets > max_hc_pkt_count) { -+ num_packets = max_hc_pkt_count; -+ hc->xfer_len = num_packets * hc->max_packet; -+ } -+ } -+ else { -+ /* Need 1 packet for transfer length of 0. */ -+ num_packets = 1; -+ } -+ -+ if (hc->ep_is_in) { -+ /* Always program an integral # of max packets for IN transfers. */ -+ hc->xfer_len = num_packets * hc->max_packet; -+ } -+ -+ if (hc->ep_type == DWC_OTG_EP_TYPE_INTR || -+ hc->ep_type == DWC_OTG_EP_TYPE_ISOC) { -+ /* -+ * Make sure that the multi_count field matches the -+ * actual transfer length. -+ */ -+ hc->multi_count = num_packets; -+ } -+ -+ if (hc->ep_type == DWC_OTG_EP_TYPE_ISOC) { -+ /* Set up the initial PID for the transfer. */ -+ if (hc->speed == DWC_OTG_EP_SPEED_HIGH) { -+ if (hc->ep_is_in) { -+ if (hc->multi_count == 1) { -+ hc->data_pid_start = DWC_OTG_HC_PID_DATA0; -+ } -+ else if (hc->multi_count == 2) { -+ hc->data_pid_start = DWC_OTG_HC_PID_DATA1; -+ } -+ else { -+ hc->data_pid_start = DWC_OTG_HC_PID_DATA2; -+ } -+ } -+ else { -+ if (hc->multi_count == 1) { -+ hc->data_pid_start = DWC_OTG_HC_PID_DATA0; -+ } -+ else { -+ hc->data_pid_start = DWC_OTG_HC_PID_MDATA; -+ } -+ } -+ } -+ else { -+ hc->data_pid_start = DWC_OTG_HC_PID_DATA0; -+ } -+ } -+ -+ hctsiz.b.xfersize = hc->xfer_len; -+ } -+ -+ hc->start_pkt_count = num_packets; -+ hctsiz.b.pktcnt = num_packets; -+ hctsiz.b.pid = hc->data_pid_start; -+ dwc_write_reg32(&hc_regs->hctsiz, hctsiz.d32); -+ -+ DWC_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, hc->hc_num); -+ DWC_DEBUGPL(DBG_HCDV, " Xfer Size: %d\n", hctsiz.b.xfersize); -+ DWC_DEBUGPL(DBG_HCDV, " Num Pkts: %d\n", hctsiz.b.pktcnt); -+ DWC_DEBUGPL(DBG_HCDV, " Start PID: %d\n", hctsiz.b.pid); -+ -+ if (core_if->dma_enable) { -+#if defined (CONFIG_DWC_OTG_HOST_ONLY) -+ if ((uint32_t)hc->xfer_buff & 0x3) { -+ /* non DWORD-aligned buffer case*/ -+ if(!hc->qh->dw_align_buf) { -+ hc->qh->dw_align_buf = -+ dma_alloc_coherent(NULL, -+ core_if->core_params->max_transfer_size, -+ &hc->qh->dw_align_buf_dma, -+ GFP_ATOMIC | GFP_DMA); -+ if (!hc->qh->dw_align_buf) { -+ -+ DWC_ERROR("%s: Failed to allocate memory to handle " -+ "non-dword aligned buffer case\n", __func__); -+ return; -+ } -+ -+ } -+ if (!hc->ep_is_in) { -+ memcpy(hc->qh->dw_align_buf, phys_to_virt((uint32_t)hc->xfer_buff), hc->xfer_len); -+ } -+ -+ dwc_write_reg32(&hc_regs->hcdma, hc->qh->dw_align_buf_dma); -+ } -+ else -+#endif -+ dwc_write_reg32(&hc_regs->hcdma, (uint32_t)hc->xfer_buff); -+ } -+ -+ /* Start the split */ -+ if (hc->do_split) { -+ hcsplt_data_t hcsplt; -+ hcsplt.d32 = dwc_read_reg32 (&hc_regs->hcsplt); -+ hcsplt.b.spltena = 1; -+ dwc_write_reg32(&hc_regs->hcsplt, hcsplt.d32); -+ } -+ -+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); -+ hcchar.b.multicnt = hc->multi_count; -+ hc_set_even_odd_frame(core_if, hc, &hcchar); -+#ifdef DEBUG -+ core_if->start_hcchar_val[hc->hc_num] = hcchar.d32; -+ if (hcchar.b.chdis) { -+ DWC_WARN("%s: chdis set, channel %d, hcchar 0x%08x\n", -+ __func__, hc->hc_num, hcchar.d32); -+ } -+#endif -+ -+ /* Set host channel enable after all other setup is complete. */ -+ hcchar.b.chen = 1; -+ hcchar.b.chdis = 0; -+ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); -+ -+ hc->xfer_started = 1; -+ hc->requests++; -+ -+ if (!core_if->dma_enable && -+ !hc->ep_is_in && hc->xfer_len > 0) { -+ /* Load OUT packet into the appropriate Tx FIFO. */ -+ dwc_otg_hc_write_packet(core_if, hc); -+ } -+ -+#ifdef DEBUG -+ /* Start a timer for this transfer. */ -+ core_if->hc_xfer_timer[hc->hc_num].function = hc_xfer_timeout; -+ core_if->hc_xfer_info[hc->hc_num].core_if = core_if; -+ core_if->hc_xfer_info[hc->hc_num].hc = hc; -+ core_if->hc_xfer_timer[hc->hc_num].data = (unsigned long)(&core_if->hc_xfer_info[hc->hc_num]); -+ core_if->hc_xfer_timer[hc->hc_num].expires = jiffies + (HZ*10); -+ add_timer(&core_if->hc_xfer_timer[hc->hc_num]); -+#endif -+} -+ -+/** -+ * This function continues a data transfer that was started by previous call -+ * to dwc_otg_hc_start_transfer. The caller must ensure there is -+ * sufficient space in the request queue and Tx Data FIFO. This function -+ * should only be called in Slave mode. In DMA mode, the controller acts -+ * autonomously to complete transfers programmed to a host channel. -+ * -+ * For an OUT transfer, a new data packet is loaded into the appropriate FIFO -+ * if there is any data remaining to be queued. For an IN transfer, another -+ * data packet is always requested. For the SETUP phase of a control transfer, -+ * this function does nothing. -+ * -+ * @return 1 if a new request is queued, 0 if no more requests are required -+ * for this transfer. -+ */ -+int dwc_otg_hc_continue_transfer(dwc_otg_core_if_t *core_if, dwc_hc_t *hc) -+{ -+ DWC_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, hc->hc_num); -+ -+ if (hc->do_split) { -+ /* SPLITs always queue just once per channel */ -+ return 0; -+ } -+ else if (hc->data_pid_start == DWC_OTG_HC_PID_SETUP) { -+ /* SETUPs are queued only once since they can't be NAKed. */ -+ return 0; -+ } -+ else if (hc->ep_is_in) { -+ /* -+ * Always queue another request for other IN transfers. If -+ * back-to-back INs are issued and NAKs are received for both, -+ * the driver may still be processing the first NAK when the -+ * second NAK is received. When the interrupt handler clears -+ * the NAK interrupt for the first NAK, the second NAK will -+ * not be seen. So we can't depend on the NAK interrupt -+ * handler to requeue a NAKed request. Instead, IN requests -+ * are issued each time this function is called. When the -+ * transfer completes, the extra requests for the channel will -+ * be flushed. -+ */ -+ hcchar_data_t hcchar; -+ dwc_otg_hc_regs_t *hc_regs = core_if->host_if->hc_regs[hc->hc_num]; -+ -+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); -+ hc_set_even_odd_frame(core_if, hc, &hcchar); -+ hcchar.b.chen = 1; -+ hcchar.b.chdis = 0; -+ DWC_DEBUGPL(DBG_HCDV, " IN xfer: hcchar = 0x%08x\n", hcchar.d32); -+ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); -+ hc->requests++; -+ return 1; -+ } -+ else { -+ /* OUT transfers. */ -+ if (hc->xfer_count < hc->xfer_len) { -+ if (hc->ep_type == DWC_OTG_EP_TYPE_INTR || -+ hc->ep_type == DWC_OTG_EP_TYPE_ISOC) { -+ hcchar_data_t hcchar; -+ dwc_otg_hc_regs_t *hc_regs; -+ hc_regs = core_if->host_if->hc_regs[hc->hc_num]; -+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); -+ hc_set_even_odd_frame(core_if, hc, &hcchar); -+ } -+ -+ /* Load OUT packet into the appropriate Tx FIFO. */ -+ dwc_otg_hc_write_packet(core_if, hc); -+ hc->requests++; -+ return 1; -+ } -+ else { -+ return 0; -+ } -+ } -+} -+ -+/** -+ * Starts a PING transfer. This function should only be called in Slave mode. -+ * The Do Ping bit is set in the HCTSIZ register, then the channel is enabled. -+ */ -+void dwc_otg_hc_do_ping(dwc_otg_core_if_t *core_if, dwc_hc_t *hc) -+{ -+ hcchar_data_t hcchar; -+ hctsiz_data_t hctsiz; -+ dwc_otg_hc_regs_t *hc_regs = core_if->host_if->hc_regs[hc->hc_num]; -+ -+ DWC_DEBUGPL(DBG_HCDV, "%s: Channel %d\n", __func__, hc->hc_num); -+ -+ hctsiz.d32 = 0; -+ hctsiz.b.dopng = 1; -+ hctsiz.b.pktcnt = 1; -+ dwc_write_reg32(&hc_regs->hctsiz, hctsiz.d32); -+ -+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); -+ hcchar.b.chen = 1; -+ hcchar.b.chdis = 0; -+ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); -+} -+ -+/* -+ * This function writes a packet into the Tx FIFO associated with the Host -+ * Channel. For a channel associated with a non-periodic EP, the non-periodic -+ * Tx FIFO is written. For a channel associated with a periodic EP, the -+ * periodic Tx FIFO is written. This function should only be called in Slave -+ * mode. -+ * -+ * Upon return the xfer_buff and xfer_count fields in _hc are incremented by -+ * then number of bytes written to the Tx FIFO. -+ */ -+void dwc_otg_hc_write_packet(dwc_otg_core_if_t *core_if, dwc_hc_t *hc) -+{ -+ uint32_t i; -+ uint32_t remaining_count; -+ uint32_t byte_count; -+ uint32_t dword_count; -+ -+ uint32_t *data_buff = (uint32_t *)(hc->xfer_buff); -+ uint32_t *data_fifo = core_if->data_fifo[hc->hc_num]; -+ -+ remaining_count = hc->xfer_len - hc->xfer_count; -+ if (remaining_count > hc->max_packet) { -+ byte_count = hc->max_packet; -+ } -+ else { -+ byte_count = remaining_count; -+ } -+ -+ dword_count = (byte_count + 3) / 4; -+ -+ if ((((unsigned long)data_buff) & 0x3) == 0) { -+ /* xfer_buff is DWORD aligned. */ -+ for (i = 0; i < dword_count; i++, data_buff++) -+ { -+ dwc_write_reg32(data_fifo, *data_buff); -+ } -+ } -+ else { -+ /* xfer_buff is not DWORD aligned. */ -+ for (i = 0; i < dword_count; i++, data_buff++) -+ { -+ dwc_write_reg32(data_fifo, get_unaligned(data_buff)); -+ } -+ } -+ -+ hc->xfer_count += byte_count; -+ hc->xfer_buff += byte_count; -+} -+ -+/** -+ * Gets the current USB frame number. This is the frame number from the last -+ * SOF packet. -+ */ -+uint32_t dwc_otg_get_frame_number(dwc_otg_core_if_t *core_if) -+{ -+ dsts_data_t dsts; -+ dsts.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dsts); -+ -+ /* read current frame/microframe number from DSTS register */ -+ return dsts.b.soffn; -+} -+ -+/** -+ * This function reads a setup packet from the Rx FIFO into the destination -+ * buffer. This function is called from the Rx Status Queue Level (RxStsQLvl) -+ * Interrupt routine when a SETUP packet has been received in Slave mode. -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ * @param dest Destination buffer for packet data. -+ */ -+void dwc_otg_read_setup_packet(dwc_otg_core_if_t *core_if, uint32_t *dest) -+{ -+ /* Get the 8 bytes of a setup transaction data */ -+ -+ /* Pop 2 DWORDS off the receive data FIFO into memory */ -+ dest[0] = dwc_read_reg32(core_if->data_fifo[0]); -+ dest[1] = dwc_read_reg32(core_if->data_fifo[0]); -+} -+ -+ -+/** -+ * This function enables EP0 OUT to receive SETUP packets and configures EP0 -+ * IN for transmitting packets. It is normally called when the -+ * "Enumeration Done" interrupt occurs. -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ * @param ep The EP0 data. -+ */ -+void dwc_otg_ep0_activate(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) -+{ -+ dwc_otg_dev_if_t *dev_if = core_if->dev_if; -+ dsts_data_t dsts; -+ depctl_data_t diepctl; -+ depctl_data_t doepctl; -+ dctl_data_t dctl = { .d32 = 0 }; -+ -+ /* Read the Device Status and Endpoint 0 Control registers */ -+ dsts.d32 = dwc_read_reg32(&dev_if->dev_global_regs->dsts); -+ diepctl.d32 = dwc_read_reg32(&dev_if->in_ep_regs[0]->diepctl); -+ doepctl.d32 = dwc_read_reg32(&dev_if->out_ep_regs[0]->doepctl); -+ -+ /* Set the MPS of the IN EP based on the enumeration speed */ -+ switch (dsts.b.enumspd) { -+ case DWC_DSTS_ENUMSPD_HS_PHY_30MHZ_OR_60MHZ: -+ case DWC_DSTS_ENUMSPD_FS_PHY_30MHZ_OR_60MHZ: -+ case DWC_DSTS_ENUMSPD_FS_PHY_48MHZ: -+ diepctl.b.mps = DWC_DEP0CTL_MPS_64; -+ break; -+ case DWC_DSTS_ENUMSPD_LS_PHY_6MHZ: -+ diepctl.b.mps = DWC_DEP0CTL_MPS_8; -+ break; -+ } -+ -+ dwc_write_reg32(&dev_if->in_ep_regs[0]->diepctl, diepctl.d32); -+ -+ /* Enable OUT EP for receive */ -+ doepctl.b.epena = 1; -+ dwc_write_reg32(&dev_if->out_ep_regs[0]->doepctl, doepctl.d32); -+ -+#ifdef VERBOSE -+ DWC_DEBUGPL(DBG_PCDV,"doepctl0=%0x\n", -+ dwc_read_reg32(&dev_if->out_ep_regs[0]->doepctl)); -+ DWC_DEBUGPL(DBG_PCDV,"diepctl0=%0x\n", -+ dwc_read_reg32(&dev_if->in_ep_regs[0]->diepctl)); -+#endif -+ dctl.b.cgnpinnak = 1; -+ -+ dwc_modify_reg32(&dev_if->dev_global_regs->dctl, dctl.d32, dctl.d32); -+ DWC_DEBUGPL(DBG_PCDV,"dctl=%0x\n", -+ dwc_read_reg32(&dev_if->dev_global_regs->dctl)); -+} -+ -+/** -+ * This function activates an EP. The Device EP control register for -+ * the EP is configured as defined in the ep structure. Note: This -+ * function is not used for EP0. -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ * @param ep The EP to activate. -+ */ -+void dwc_otg_ep_activate(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) -+{ -+ dwc_otg_dev_if_t *dev_if = core_if->dev_if; -+ depctl_data_t depctl; -+ volatile uint32_t *addr; -+ daint_data_t daintmsk = { .d32 = 0 }; -+ -+ DWC_DEBUGPL(DBG_PCDV, "%s() EP%d-%s\n", __func__, ep->num, -+ (ep->is_in?"IN":"OUT")); -+ -+ /* Read DEPCTLn register */ -+ if (ep->is_in == 1) { -+ addr = &dev_if->in_ep_regs[ep->num]->diepctl; -+ daintmsk.ep.in = 1<num; -+ } -+ else { -+ addr = &dev_if->out_ep_regs[ep->num]->doepctl; -+ daintmsk.ep.out = 1<num; -+ } -+ -+ /* If the EP is already active don't change the EP Control -+ * register. */ -+ depctl.d32 = dwc_read_reg32(addr); -+ if (!depctl.b.usbactep) { -+ depctl.b.mps = ep->maxpacket; -+ depctl.b.eptype = ep->type; -+ depctl.b.txfnum = ep->tx_fifo_num; -+ -+ if (ep->type == DWC_OTG_EP_TYPE_ISOC) { -+ depctl.b.setd0pid = 1; // ??? -+ } -+ else { -+ depctl.b.setd0pid = 1; -+ } -+ depctl.b.usbactep = 1; -+ -+ dwc_write_reg32(addr, depctl.d32); -+ DWC_DEBUGPL(DBG_PCDV,"DEPCTL=%08x\n", dwc_read_reg32(addr)); -+ } -+ -+ /* Enable the Interrupt for this EP */ -+ if(core_if->multiproc_int_enable) { -+ if (ep->is_in == 1) { -+ diepmsk_data_t diepmsk = { .d32 = 0}; -+ diepmsk.b.xfercompl = 1; -+ diepmsk.b.timeout = 1; -+ diepmsk.b.epdisabled = 1; -+ diepmsk.b.ahberr = 1; -+ diepmsk.b.intknepmis = 1; -+ diepmsk.b.txfifoundrn = 1; //????? -+ -+ -+ if(core_if->dma_desc_enable) { -+ diepmsk.b.bna = 1; -+ } -+/* -+ if(core_if->dma_enable) { -+ doepmsk.b.nak = 1; -+ } -+*/ -+ dwc_write_reg32(&dev_if->dev_global_regs->diepeachintmsk[ep->num], diepmsk.d32); -+ -+ } else { -+ doepmsk_data_t doepmsk = { .d32 = 0}; -+ doepmsk.b.xfercompl = 1; -+ doepmsk.b.ahberr = 1; -+ doepmsk.b.epdisabled = 1; -+ -+ -+ if(core_if->dma_desc_enable) { -+ doepmsk.b.bna = 1; -+ } -+/* -+ doepmsk.b.babble = 1; -+ doepmsk.b.nyet = 1; -+ doepmsk.b.nak = 1; -+*/ -+ dwc_write_reg32(&dev_if->dev_global_regs->doepeachintmsk[ep->num], doepmsk.d32); -+ } -+ dwc_modify_reg32(&dev_if->dev_global_regs->deachintmsk, -+ 0, daintmsk.d32); -+ } else { -+ dwc_modify_reg32(&dev_if->dev_global_regs->daintmsk, -+ 0, daintmsk.d32); -+ } -+ -+ DWC_DEBUGPL(DBG_PCDV,"DAINTMSK=%0x\n", -+ dwc_read_reg32(&dev_if->dev_global_regs->daintmsk)); -+ -+ ep->stall_clear_flag = 0; -+ return; -+} -+ -+/** -+ * This function deactivates an EP. This is done by clearing the USB Active -+ * EP bit in the Device EP control register. Note: This function is not used -+ * for EP0. EP0 cannot be deactivated. -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ * @param ep The EP to deactivate. -+ */ -+void dwc_otg_ep_deactivate(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) -+{ -+ depctl_data_t depctl = { .d32 = 0 }; -+ volatile uint32_t *addr; -+ daint_data_t daintmsk = { .d32 = 0}; -+ -+ /* Read DEPCTLn register */ -+ if (ep->is_in == 1) { -+ addr = &core_if->dev_if->in_ep_regs[ep->num]->diepctl; -+ daintmsk.ep.in = 1<num; -+ } -+ else { -+ addr = &core_if->dev_if->out_ep_regs[ep->num]->doepctl; -+ daintmsk.ep.out = 1<num; -+ } -+ -+ depctl.b.usbactep = 0; -+ -+ if(core_if->dma_desc_enable) -+ depctl.b.epdis = 1; -+ -+ dwc_write_reg32(addr, depctl.d32); -+ -+ /* Disable the Interrupt for this EP */ -+ if(core_if->multiproc_int_enable) { -+ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->deachintmsk, -+ daintmsk.d32, 0); -+ -+ if (ep->is_in == 1) { -+ dwc_write_reg32(&core_if->dev_if->dev_global_regs->diepeachintmsk[ep->num], 0); -+ } else { -+ dwc_write_reg32(&core_if->dev_if->dev_global_regs->doepeachintmsk[ep->num], 0); -+ } -+ } else { -+ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->daintmsk, -+ daintmsk.d32, 0); -+ } -+} -+ -+/** -+ * This function does the setup for a data transfer for an EP and -+ * starts the transfer. For an IN transfer, the packets will be -+ * loaded into the appropriate Tx FIFO in the ISR. For OUT transfers, -+ * the packets are unloaded from the Rx FIFO in the ISR. the ISR. -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ * @param ep The EP to start the transfer on. -+ */ -+static void init_dma_desc_chain(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) -+{ -+ dwc_otg_dma_desc_t* dma_desc; -+ uint32_t offset; -+ uint32_t xfer_est; -+ int i; -+ -+ ep->desc_cnt = ( ep->total_len / ep->maxxfer) + -+ ((ep->total_len % ep->maxxfer) ? 1 : 0); -+ if(!ep->desc_cnt) -+ ep->desc_cnt = 1; -+ -+ dma_desc = ep->desc_addr; -+ xfer_est = ep->total_len; -+ offset = 0; -+ for( i = 0; i < ep->desc_cnt; ++i) { -+ /** DMA Descriptor Setup */ -+ if(xfer_est > ep->maxxfer) { -+ dma_desc->status.b.bs = BS_HOST_BUSY; -+ dma_desc->status.b.l = 0; -+ dma_desc->status.b.ioc = 0; -+ dma_desc->status.b.sp = 0; -+ dma_desc->status.b.bytes = ep->maxxfer; -+ dma_desc->buf = ep->dma_addr + offset; -+ dma_desc->status.b.bs = BS_HOST_READY; -+ -+ xfer_est -= ep->maxxfer; -+ offset += ep->maxxfer; -+ } else { -+ dma_desc->status.b.bs = BS_HOST_BUSY; -+ dma_desc->status.b.l = 1; -+ dma_desc->status.b.ioc = 1; -+ if(ep->is_in) { -+ dma_desc->status.b.sp = (xfer_est % ep->maxpacket) ? -+ 1 : ((ep->sent_zlp) ? 1 : 0); -+ dma_desc->status.b.bytes = xfer_est; -+ } else { -+ dma_desc->status.b.bytes = xfer_est + ((4 - (xfer_est & 0x3)) & 0x3) ; -+ } -+ -+ dma_desc->buf = ep->dma_addr + offset; -+ dma_desc->status.b.bs = BS_HOST_READY; -+ } -+ dma_desc ++; -+ } -+} -+ -+/** -+ * This function does the setup for a data transfer for an EP and -+ * starts the transfer. For an IN transfer, the packets will be -+ * loaded into the appropriate Tx FIFO in the ISR. For OUT transfers, -+ * the packets are unloaded from the Rx FIFO in the ISR. the ISR. -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ * @param ep The EP to start the transfer on. -+ */ -+ -+void dwc_otg_ep_start_transfer(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) -+{ -+ depctl_data_t depctl; -+ deptsiz_data_t deptsiz; -+ gintmsk_data_t intr_mask = { .d32 = 0}; -+ -+ DWC_DEBUGPL((DBG_PCDV | DBG_CILV), "%s()\n", __func__); -+ -+ DWC_DEBUGPL(DBG_PCD, "ep%d-%s xfer_len=%d xfer_cnt=%d " -+ "xfer_buff=%p start_xfer_buff=%p\n", -+ ep->num, (ep->is_in?"IN":"OUT"), ep->xfer_len, -+ ep->xfer_count, ep->xfer_buff, ep->start_xfer_buff); -+ -+ /* IN endpoint */ -+ if (ep->is_in == 1) { -+ dwc_otg_dev_in_ep_regs_t *in_regs = -+ core_if->dev_if->in_ep_regs[ep->num]; -+ -+ gnptxsts_data_t gtxstatus; -+ -+ gtxstatus.d32 = -+ dwc_read_reg32(&core_if->core_global_regs->gnptxsts); -+ -+ if(core_if->en_multiple_tx_fifo == 0 && gtxstatus.b.nptxqspcavail == 0) { -+#ifdef DEBUG -+ DWC_PRINT("TX Queue Full (0x%0x)\n", gtxstatus.d32); -+#endif -+ return; -+ } -+ -+ depctl.d32 = dwc_read_reg32(&(in_regs->diepctl)); -+ deptsiz.d32 = dwc_read_reg32(&(in_regs->dieptsiz)); -+ -+ ep->xfer_len += (ep->maxxfer < (ep->total_len - ep->xfer_len)) ? -+ ep->maxxfer : (ep->total_len - ep->xfer_len); -+ -+ /* Zero Length Packet? */ -+ if ((ep->xfer_len - ep->xfer_count) == 0) { -+ deptsiz.b.xfersize = 0; -+ deptsiz.b.pktcnt = 1; -+ } -+ else { -+ /* Program the transfer size and packet count -+ * as follows: xfersize = N * maxpacket + -+ * short_packet pktcnt = N + (short_packet -+ * exist ? 1 : 0) -+ */ -+ deptsiz.b.xfersize = ep->xfer_len - ep->xfer_count; -+ deptsiz.b.pktcnt = -+ (ep->xfer_len - ep->xfer_count - 1 + ep->maxpacket) / -+ ep->maxpacket; -+ } -+ -+ -+ /* Write the DMA register */ -+ if (core_if->dma_enable) { -+ if (core_if->dma_desc_enable == 0) { -+ dwc_write_reg32(&in_regs->dieptsiz, deptsiz.d32); -+ dwc_write_reg32 (&(in_regs->diepdma), -+ (uint32_t)ep->dma_addr); -+ } -+ else { -+ init_dma_desc_chain(core_if, ep); -+ /** DIEPDMAn Register write */ -+ dwc_write_reg32(&in_regs->diepdma, ep->dma_desc_addr); -+ } -+ } -+ else { -+ dwc_write_reg32(&in_regs->dieptsiz, deptsiz.d32); -+ if(ep->type != DWC_OTG_EP_TYPE_ISOC) { -+ /** -+ * Enable the Non-Periodic Tx FIFO empty interrupt, -+ * or the Tx FIFO epmty interrupt in dedicated Tx FIFO mode, -+ * the data will be written into the fifo by the ISR. -+ */ -+ if(core_if->en_multiple_tx_fifo == 0) { -+ intr_mask.b.nptxfempty = 1; -+ dwc_modify_reg32(&core_if->core_global_regs->gintmsk, -+ intr_mask.d32, intr_mask.d32); -+ } -+ else { -+ /* Enable the Tx FIFO Empty Interrupt for this EP */ -+ if(ep->xfer_len > 0) { -+ uint32_t fifoemptymsk = 0; -+ fifoemptymsk = 1 << ep->num; -+ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->dtknqr4_fifoemptymsk, -+ 0, fifoemptymsk); -+ -+ } -+ } -+ } -+ } -+ -+ /* EP enable, IN data in FIFO */ -+ depctl.b.cnak = 1; -+ depctl.b.epena = 1; -+ dwc_write_reg32(&in_regs->diepctl, depctl.d32); -+ -+ depctl.d32 = dwc_read_reg32 (&core_if->dev_if->in_ep_regs[0]->diepctl); -+ depctl.b.nextep = ep->num; -+ dwc_write_reg32 (&core_if->dev_if->in_ep_regs[0]->diepctl, depctl.d32); -+ -+ } -+ else { -+ /* OUT endpoint */ -+ dwc_otg_dev_out_ep_regs_t *out_regs = -+ core_if->dev_if->out_ep_regs[ep->num]; -+ -+ depctl.d32 = dwc_read_reg32(&(out_regs->doepctl)); -+ deptsiz.d32 = dwc_read_reg32(&(out_regs->doeptsiz)); -+ -+ ep->xfer_len += (ep->maxxfer < (ep->total_len - ep->xfer_len)) ? -+ ep->maxxfer : (ep->total_len - ep->xfer_len); -+ -+ /* Program the transfer size and packet count as follows: -+ * -+ * pktcnt = N -+ * xfersize = N * maxpacket -+ */ -+ if ((ep->xfer_len - ep->xfer_count) == 0) { -+ /* Zero Length Packet */ -+ deptsiz.b.xfersize = ep->maxpacket; -+ deptsiz.b.pktcnt = 1; -+ } -+ else { -+ deptsiz.b.pktcnt = -+ (ep->xfer_len - ep->xfer_count + (ep->maxpacket - 1)) / -+ ep->maxpacket; -+ ep->xfer_len = deptsiz.b.pktcnt * ep->maxpacket + ep->xfer_count; -+ deptsiz.b.xfersize = ep->xfer_len - ep->xfer_count; -+ } -+ -+ DWC_DEBUGPL(DBG_PCDV, "ep%d xfersize=%d pktcnt=%d\n", -+ ep->num, -+ deptsiz.b.xfersize, deptsiz.b.pktcnt); -+ -+ if (core_if->dma_enable) { -+ if (!core_if->dma_desc_enable) { -+ dwc_write_reg32(&out_regs->doeptsiz, deptsiz.d32); -+ -+ dwc_write_reg32 (&(out_regs->doepdma), -+ (uint32_t)ep->dma_addr); -+ } -+ else { -+ init_dma_desc_chain(core_if, ep); -+ -+ /** DOEPDMAn Register write */ -+ dwc_write_reg32(&out_regs->doepdma, ep->dma_desc_addr); -+ } -+ } -+ else { -+ dwc_write_reg32(&out_regs->doeptsiz, deptsiz.d32); -+ } -+ -+ /* EP enable */ -+ depctl.b.cnak = 1; -+ depctl.b.epena = 1; -+ -+ dwc_write_reg32(&out_regs->doepctl, depctl.d32); -+ -+ DWC_DEBUGPL(DBG_PCD, "DOEPCTL=%08x DOEPTSIZ=%08x\n", -+ dwc_read_reg32(&out_regs->doepctl), -+ dwc_read_reg32(&out_regs->doeptsiz)); -+ DWC_DEBUGPL(DBG_PCD, "DAINTMSK=%08x GINTMSK=%08x\n", -+ dwc_read_reg32(&core_if->dev_if->dev_global_regs->daintmsk), -+ dwc_read_reg32(&core_if->core_global_regs->gintmsk)); -+ } -+} -+ -+/** -+ * This function setup a zero length transfer in Buffer DMA and -+ * Slave modes for usb requests with zero field set -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ * @param ep The EP to start the transfer on. -+ * -+ */ -+void dwc_otg_ep_start_zl_transfer(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) -+{ -+ -+ depctl_data_t depctl; -+ deptsiz_data_t deptsiz; -+ gintmsk_data_t intr_mask = { .d32 = 0}; -+ -+ DWC_DEBUGPL((DBG_PCDV | DBG_CILV), "%s()\n", __func__); -+ -+ /* IN endpoint */ -+ if (ep->is_in == 1) { -+ dwc_otg_dev_in_ep_regs_t *in_regs = -+ core_if->dev_if->in_ep_regs[ep->num]; -+ -+ depctl.d32 = dwc_read_reg32(&(in_regs->diepctl)); -+ deptsiz.d32 = dwc_read_reg32(&(in_regs->dieptsiz)); -+ -+ deptsiz.b.xfersize = 0; -+ deptsiz.b.pktcnt = 1; -+ -+ -+ /* Write the DMA register */ -+ if (core_if->dma_enable) { -+ if (core_if->dma_desc_enable == 0) { -+ dwc_write_reg32(&in_regs->dieptsiz, deptsiz.d32); -+ dwc_write_reg32 (&(in_regs->diepdma), -+ (uint32_t)ep->dma_addr); -+ } -+ } -+ else { -+ dwc_write_reg32(&in_regs->dieptsiz, deptsiz.d32); -+ /** -+ * Enable the Non-Periodic Tx FIFO empty interrupt, -+ * or the Tx FIFO epmty interrupt in dedicated Tx FIFO mode, -+ * the data will be written into the fifo by the ISR. -+ */ -+ if(core_if->en_multiple_tx_fifo == 0) { -+ intr_mask.b.nptxfempty = 1; -+ dwc_modify_reg32(&core_if->core_global_regs->gintmsk, -+ intr_mask.d32, intr_mask.d32); -+ } -+ else { -+ /* Enable the Tx FIFO Empty Interrupt for this EP */ -+ if(ep->xfer_len > 0) { -+ uint32_t fifoemptymsk = 0; -+ fifoemptymsk = 1 << ep->num; -+ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->dtknqr4_fifoemptymsk, -+ 0, fifoemptymsk); -+ } -+ } -+ } -+ -+ /* EP enable, IN data in FIFO */ -+ depctl.b.cnak = 1; -+ depctl.b.epena = 1; -+ dwc_write_reg32(&in_regs->diepctl, depctl.d32); -+ -+ depctl.d32 = dwc_read_reg32 (&core_if->dev_if->in_ep_regs[0]->diepctl); -+ depctl.b.nextep = ep->num; -+ dwc_write_reg32 (&core_if->dev_if->in_ep_regs[0]->diepctl, depctl.d32); -+ -+ } -+ else { -+ /* OUT endpoint */ -+ dwc_otg_dev_out_ep_regs_t *out_regs = -+ core_if->dev_if->out_ep_regs[ep->num]; -+ -+ depctl.d32 = dwc_read_reg32(&(out_regs->doepctl)); -+ deptsiz.d32 = dwc_read_reg32(&(out_regs->doeptsiz)); -+ -+ /* Zero Length Packet */ -+ deptsiz.b.xfersize = ep->maxpacket; -+ deptsiz.b.pktcnt = 1; -+ -+ if (core_if->dma_enable) { -+ if (!core_if->dma_desc_enable) { -+ dwc_write_reg32(&out_regs->doeptsiz, deptsiz.d32); -+ -+ dwc_write_reg32 (&(out_regs->doepdma), -+ (uint32_t)ep->dma_addr); -+ } -+ } -+ else { -+ dwc_write_reg32(&out_regs->doeptsiz, deptsiz.d32); -+ } -+ -+ /* EP enable */ -+ depctl.b.cnak = 1; -+ depctl.b.epena = 1; -+ -+ dwc_write_reg32(&out_regs->doepctl, depctl.d32); -+ -+ } -+} -+ -+/** -+ * This function does the setup for a data transfer for EP0 and starts -+ * the transfer. For an IN transfer, the packets will be loaded into -+ * the appropriate Tx FIFO in the ISR. For OUT transfers, the packets are -+ * unloaded from the Rx FIFO in the ISR. -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ * @param ep The EP0 data. -+ */ -+void dwc_otg_ep0_start_transfer(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) -+{ -+ depctl_data_t depctl; -+ deptsiz0_data_t deptsiz; -+ gintmsk_data_t intr_mask = { .d32 = 0}; -+ dwc_otg_dma_desc_t* dma_desc; -+ -+ DWC_DEBUGPL(DBG_PCD, "ep%d-%s xfer_len=%d xfer_cnt=%d " -+ "xfer_buff=%p start_xfer_buff=%p \n", -+ ep->num, (ep->is_in?"IN":"OUT"), ep->xfer_len, -+ ep->xfer_count, ep->xfer_buff, ep->start_xfer_buff); -+ -+ ep->total_len = ep->xfer_len; -+ -+ /* IN endpoint */ -+ if (ep->is_in == 1) { -+ dwc_otg_dev_in_ep_regs_t *in_regs = -+ core_if->dev_if->in_ep_regs[0]; -+ -+ gnptxsts_data_t gtxstatus; -+ -+ gtxstatus.d32 = -+ dwc_read_reg32(&core_if->core_global_regs->gnptxsts); -+ -+ if(core_if->en_multiple_tx_fifo == 0 && gtxstatus.b.nptxqspcavail == 0) { -+#ifdef DEBUG -+ deptsiz.d32 = dwc_read_reg32(&in_regs->dieptsiz); -+ DWC_DEBUGPL(DBG_PCD,"DIEPCTL0=%0x\n", -+ dwc_read_reg32(&in_regs->diepctl)); -+ DWC_DEBUGPL(DBG_PCD, "DIEPTSIZ0=%0x (sz=%d, pcnt=%d)\n", -+ deptsiz.d32, -+ deptsiz.b.xfersize, deptsiz.b.pktcnt); -+ DWC_PRINT("TX Queue or FIFO Full (0x%0x)\n", -+ gtxstatus.d32); -+#endif -+ return; -+ } -+ -+ -+ depctl.d32 = dwc_read_reg32(&in_regs->diepctl); -+ deptsiz.d32 = dwc_read_reg32(&in_regs->dieptsiz); -+ -+ /* Zero Length Packet? */ -+ if (ep->xfer_len == 0) { -+ deptsiz.b.xfersize = 0; -+ deptsiz.b.pktcnt = 1; -+ } -+ else { -+ /* Program the transfer size and packet count -+ * as follows: xfersize = N * maxpacket + -+ * short_packet pktcnt = N + (short_packet -+ * exist ? 1 : 0) -+ */ -+ if (ep->xfer_len > ep->maxpacket) { -+ ep->xfer_len = ep->maxpacket; -+ deptsiz.b.xfersize = ep->maxpacket; -+ } -+ else { -+ deptsiz.b.xfersize = ep->xfer_len; -+ } -+ deptsiz.b.pktcnt = 1; -+ -+ } -+ DWC_DEBUGPL(DBG_PCDV, "IN len=%d xfersize=%d pktcnt=%d [%08x]\n", -+ ep->xfer_len, -+ deptsiz.b.xfersize, deptsiz.b.pktcnt, deptsiz.d32); -+ -+ /* Write the DMA register */ -+ if (core_if->dma_enable) { -+ if(core_if->dma_desc_enable == 0) { -+ dwc_write_reg32(&in_regs->dieptsiz, deptsiz.d32); -+ -+ dwc_write_reg32 (&(in_regs->diepdma), -+ (uint32_t)ep->dma_addr); -+ } -+ else { -+ dma_desc = core_if->dev_if->in_desc_addr; -+ -+ /** DMA Descriptor Setup */ -+ dma_desc->status.b.bs = BS_HOST_BUSY; -+ dma_desc->status.b.l = 1; -+ dma_desc->status.b.ioc = 1; -+ dma_desc->status.b.sp = (ep->xfer_len == ep->maxpacket) ? 0 : 1; -+ dma_desc->status.b.bytes = ep->xfer_len; -+ dma_desc->buf = ep->dma_addr; -+ dma_desc->status.b.bs = BS_HOST_READY; -+ -+ /** DIEPDMA0 Register write */ -+ dwc_write_reg32(&in_regs->diepdma, core_if->dev_if->dma_in_desc_addr); -+ } -+ } -+ else { -+ dwc_write_reg32(&in_regs->dieptsiz, deptsiz.d32); -+ } -+ -+ /* EP enable, IN data in FIFO */ -+ depctl.b.cnak = 1; -+ depctl.b.epena = 1; -+ dwc_write_reg32(&in_regs->diepctl, depctl.d32); -+ -+ /** -+ * Enable the Non-Periodic Tx FIFO empty interrupt, the -+ * data will be written into the fifo by the ISR. -+ */ -+ if (!core_if->dma_enable) { -+ if(core_if->en_multiple_tx_fifo == 0) { -+ intr_mask.b.nptxfempty = 1; -+ dwc_modify_reg32(&core_if->core_global_regs->gintmsk, -+ intr_mask.d32, intr_mask.d32); -+ } -+ else { -+ /* Enable the Tx FIFO Empty Interrupt for this EP */ -+ if(ep->xfer_len > 0) { -+ uint32_t fifoemptymsk = 0; -+ fifoemptymsk |= 1 << ep->num; -+ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->dtknqr4_fifoemptymsk, -+ 0, fifoemptymsk); -+ } -+ } -+ } -+ } -+ else { -+ /* OUT endpoint */ -+ dwc_otg_dev_out_ep_regs_t *out_regs = -+ core_if->dev_if->out_ep_regs[0]; -+ -+ depctl.d32 = dwc_read_reg32(&out_regs->doepctl); -+ deptsiz.d32 = dwc_read_reg32(&out_regs->doeptsiz); -+ -+ /* Program the transfer size and packet count as follows: -+ * xfersize = N * (maxpacket + 4 - (maxpacket % 4)) -+ * pktcnt = N */ -+ /* Zero Length Packet */ -+ deptsiz.b.xfersize = ep->maxpacket; -+ deptsiz.b.pktcnt = 1; -+ -+ DWC_DEBUGPL(DBG_PCDV, "len=%d xfersize=%d pktcnt=%d\n", -+ ep->xfer_len, -+ deptsiz.b.xfersize, deptsiz.b.pktcnt); -+ -+ if (core_if->dma_enable) { -+ if(!core_if->dma_desc_enable) { -+ dwc_write_reg32(&out_regs->doeptsiz, deptsiz.d32); -+ -+ dwc_write_reg32 (&(out_regs->doepdma), -+ (uint32_t)ep->dma_addr); -+ } -+ else { -+ dma_desc = core_if->dev_if->out_desc_addr; -+ -+ /** DMA Descriptor Setup */ -+ dma_desc->status.b.bs = BS_HOST_BUSY; -+ dma_desc->status.b.l = 1; -+ dma_desc->status.b.ioc = 1; -+ dma_desc->status.b.bytes = ep->maxpacket; -+ dma_desc->buf = ep->dma_addr; -+ dma_desc->status.b.bs = BS_HOST_READY; -+ -+ /** DOEPDMA0 Register write */ -+ dwc_write_reg32(&out_regs->doepdma, core_if->dev_if->dma_out_desc_addr); -+ } -+ } -+ else { -+ dwc_write_reg32(&out_regs->doeptsiz, deptsiz.d32); -+ } -+ -+ /* EP enable */ -+ depctl.b.cnak = 1; -+ depctl.b.epena = 1; -+ dwc_write_reg32 (&(out_regs->doepctl), depctl.d32); -+ } -+} -+ -+/** -+ * This function continues control IN transfers started by -+ * dwc_otg_ep0_start_transfer, when the transfer does not fit in a -+ * single packet. NOTE: The DIEPCTL0/DOEPCTL0 registers only have one -+ * bit for the packet count. -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ * @param ep The EP0 data. -+ */ -+void dwc_otg_ep0_continue_transfer(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) -+{ -+ depctl_data_t depctl; -+ deptsiz0_data_t deptsiz; -+ gintmsk_data_t intr_mask = { .d32 = 0}; -+ dwc_otg_dma_desc_t* dma_desc; -+ -+ if (ep->is_in == 1) { -+ dwc_otg_dev_in_ep_regs_t *in_regs = -+ core_if->dev_if->in_ep_regs[0]; -+ gnptxsts_data_t tx_status = { .d32 = 0 }; -+ -+ tx_status.d32 = dwc_read_reg32(&core_if->core_global_regs->gnptxsts); -+ /** @todo Should there be check for room in the Tx -+ * Status Queue. If not remove the code above this comment. */ -+ -+ depctl.d32 = dwc_read_reg32(&in_regs->diepctl); -+ deptsiz.d32 = dwc_read_reg32(&in_regs->dieptsiz); -+ -+ /* Program the transfer size and packet count -+ * as follows: xfersize = N * maxpacket + -+ * short_packet pktcnt = N + (short_packet -+ * exist ? 1 : 0) -+ */ -+ -+ -+ if(core_if->dma_desc_enable == 0) { -+ deptsiz.b.xfersize = (ep->total_len - ep->xfer_count) > ep->maxpacket ? ep->maxpacket : -+ (ep->total_len - ep->xfer_count); -+ deptsiz.b.pktcnt = 1; -+ if(core_if->dma_enable == 0) { -+ ep->xfer_len += deptsiz.b.xfersize; -+ } else { -+ ep->xfer_len = deptsiz.b.xfersize; -+ } -+ dwc_write_reg32(&in_regs->dieptsiz, deptsiz.d32); -+ } -+ else { -+ ep->xfer_len = (ep->total_len - ep->xfer_count) > ep->maxpacket ? ep->maxpacket : -+ (ep->total_len - ep->xfer_count); -+ -+ dma_desc = core_if->dev_if->in_desc_addr; -+ -+ /** DMA Descriptor Setup */ -+ dma_desc->status.b.bs = BS_HOST_BUSY; -+ dma_desc->status.b.l = 1; -+ dma_desc->status.b.ioc = 1; -+ dma_desc->status.b.sp = (ep->xfer_len == ep->maxpacket) ? 0 : 1; -+ dma_desc->status.b.bytes = ep->xfer_len; -+ dma_desc->buf = ep->dma_addr; -+ dma_desc->status.b.bs = BS_HOST_READY; -+ -+ /** DIEPDMA0 Register write */ -+ dwc_write_reg32(&in_regs->diepdma, core_if->dev_if->dma_in_desc_addr); -+ } -+ -+ -+ DWC_DEBUGPL(DBG_PCDV, "IN len=%d xfersize=%d pktcnt=%d [%08x]\n", -+ ep->xfer_len, -+ deptsiz.b.xfersize, deptsiz.b.pktcnt, deptsiz.d32); -+ -+ /* Write the DMA register */ -+ if (core_if->hwcfg2.b.architecture == DWC_INT_DMA_ARCH) { -+ if(core_if->dma_desc_enable == 0) -+ dwc_write_reg32 (&(in_regs->diepdma), (uint32_t)ep->dma_addr); -+ } -+ -+ /* EP enable, IN data in FIFO */ -+ depctl.b.cnak = 1; -+ depctl.b.epena = 1; -+ dwc_write_reg32(&in_regs->diepctl, depctl.d32); -+ -+ /** -+ * Enable the Non-Periodic Tx FIFO empty interrupt, the -+ * data will be written into the fifo by the ISR. -+ */ -+ if (!core_if->dma_enable) { -+ if(core_if->en_multiple_tx_fifo == 0) { -+ /* First clear it from GINTSTS */ -+ intr_mask.b.nptxfempty = 1; -+ dwc_modify_reg32(&core_if->core_global_regs->gintmsk, -+ intr_mask.d32, intr_mask.d32); -+ -+ } -+ else { -+ /* Enable the Tx FIFO Empty Interrupt for this EP */ -+ if(ep->xfer_len > 0) { -+ uint32_t fifoemptymsk = 0; -+ fifoemptymsk |= 1 << ep->num; -+ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->dtknqr4_fifoemptymsk, -+ 0, fifoemptymsk); -+ } -+ } -+ } -+ } -+ else { -+ dwc_otg_dev_out_ep_regs_t *out_regs = -+ core_if->dev_if->out_ep_regs[0]; -+ -+ -+ depctl.d32 = dwc_read_reg32(&out_regs->doepctl); -+ deptsiz.d32 = dwc_read_reg32(&out_regs->doeptsiz); -+ -+ /* Program the transfer size and packet count -+ * as follows: xfersize = N * maxpacket + -+ * short_packet pktcnt = N + (short_packet -+ * exist ? 1 : 0) -+ */ -+ deptsiz.b.xfersize = ep->maxpacket; -+ deptsiz.b.pktcnt = 1; -+ -+ -+ if(core_if->dma_desc_enable == 0) { -+ dwc_write_reg32(&out_regs->doeptsiz, deptsiz.d32); -+ } -+ else { -+ dma_desc = core_if->dev_if->out_desc_addr; -+ -+ /** DMA Descriptor Setup */ -+ dma_desc->status.b.bs = BS_HOST_BUSY; -+ dma_desc->status.b.l = 1; -+ dma_desc->status.b.ioc = 1; -+ dma_desc->status.b.bytes = ep->maxpacket; -+ dma_desc->buf = ep->dma_addr; -+ dma_desc->status.b.bs = BS_HOST_READY; -+ -+ /** DOEPDMA0 Register write */ -+ dwc_write_reg32(&out_regs->doepdma, core_if->dev_if->dma_out_desc_addr); -+ } -+ -+ -+ DWC_DEBUGPL(DBG_PCDV, "IN len=%d xfersize=%d pktcnt=%d [%08x]\n", -+ ep->xfer_len, -+ deptsiz.b.xfersize, deptsiz.b.pktcnt, deptsiz.d32); -+ -+ /* Write the DMA register */ -+ if (core_if->hwcfg2.b.architecture == DWC_INT_DMA_ARCH) { -+ if(core_if->dma_desc_enable == 0) -+ dwc_write_reg32 (&(out_regs->doepdma), (uint32_t)ep->dma_addr); -+ } -+ -+ /* EP enable, IN data in FIFO */ -+ depctl.b.cnak = 1; -+ depctl.b.epena = 1; -+ dwc_write_reg32(&out_regs->doepctl, depctl.d32); -+ -+ } -+} -+ -+#ifdef DEBUG -+void dump_msg(const u8 *buf, unsigned int length) -+{ -+ unsigned int start, num, i; -+ char line[52], *p; -+ -+ if (length >= 512) -+ return; -+ start = 0; -+ while (length > 0) { -+ num = min(length, 16u); -+ p = line; -+ for (i = 0; i < num; ++i) -+ { -+ if (i == 8) -+ *p++ = ' '; -+ sprintf(p, " %02x", buf[i]); -+ p += 3; -+ } -+ *p = 0; -+ DWC_PRINT("%6x: %s\n", start, line); -+ buf += num; -+ start += num; -+ length -= num; -+ } -+} -+#else -+static inline void dump_msg(const u8 *buf, unsigned int length) -+{ -+} -+#endif -+ -+/** -+ * This function writes a packet into the Tx FIFO associated with the -+ * EP. For non-periodic EPs the non-periodic Tx FIFO is written. For -+ * periodic EPs the periodic Tx FIFO associated with the EP is written -+ * with all packets for the next micro-frame. -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ * @param ep The EP to write packet for. -+ * @param dma Indicates if DMA is being used. -+ */ -+void dwc_otg_ep_write_packet(dwc_otg_core_if_t *core_if, dwc_ep_t *ep, int dma) -+{ -+ /** -+ * The buffer is padded to DWORD on a per packet basis in -+ * slave/dma mode if the MPS is not DWORD aligned. The last -+ * packet, if short, is also padded to a multiple of DWORD. -+ * -+ * ep->xfer_buff always starts DWORD aligned in memory and is a -+ * multiple of DWORD in length -+ * -+ * ep->xfer_len can be any number of bytes -+ * -+ * ep->xfer_count is a multiple of ep->maxpacket until the last -+ * packet -+ * -+ * FIFO access is DWORD */ -+ -+ uint32_t i; -+ uint32_t byte_count; -+ uint32_t dword_count; -+ uint32_t *fifo; -+ uint32_t *data_buff = (uint32_t *)ep->xfer_buff; -+ -+ DWC_DEBUGPL((DBG_PCDV | DBG_CILV), "%s(%p,%p)\n", __func__, core_if, ep); -+ if (ep->xfer_count >= ep->xfer_len) { -+ DWC_WARN("%s() No data for EP%d!!!\n", __func__, ep->num); -+ return; -+ } -+ -+ /* Find the byte length of the packet either short packet or MPS */ -+ if ((ep->xfer_len - ep->xfer_count) < ep->maxpacket) { -+ byte_count = ep->xfer_len - ep->xfer_count; -+ } -+ else { -+ byte_count = ep->maxpacket; -+ } -+ -+ /* Find the DWORD length, padded by extra bytes as neccessary if MPS -+ * is not a multiple of DWORD */ -+ dword_count = (byte_count + 3) / 4; -+ -+#ifdef VERBOSE -+ dump_msg(ep->xfer_buff, byte_count); -+#endif -+ -+ /**@todo NGS Where are the Periodic Tx FIFO addresses -+ * intialized? What should this be? */ -+ -+ fifo = core_if->data_fifo[ep->num]; -+ -+ -+ DWC_DEBUGPL((DBG_PCDV|DBG_CILV), "fifo=%p buff=%p *p=%08x bc=%d\n", fifo, data_buff, *data_buff, byte_count); -+ -+ if (!dma) { -+ for (i=0; ixfer_count += byte_count; -+ ep->xfer_buff += byte_count; -+ ep->dma_addr += byte_count; -+} -+ -+/** -+ * Set the EP STALL. -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ * @param ep The EP to set the stall on. -+ */ -+void dwc_otg_ep_set_stall(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) -+{ -+ depctl_data_t depctl; -+ volatile uint32_t *depctl_addr; -+ -+ DWC_DEBUGPL(DBG_PCD, "%s ep%d-%s\n", __func__, ep->num, -+ (ep->is_in?"IN":"OUT")); -+ -+ DWC_PRINT("%s ep%d-%s\n", __func__, ep->num, -+ (ep->is_in?"in":"out")); -+ -+ if (ep->is_in == 1) { -+ depctl_addr = &(core_if->dev_if->in_ep_regs[ep->num]->diepctl); -+ depctl.d32 = dwc_read_reg32(depctl_addr); -+ -+ /* set the disable and stall bits */ -+ if (depctl.b.epena) { -+ depctl.b.epdis = 1; -+ } -+ depctl.b.stall = 1; -+ dwc_write_reg32(depctl_addr, depctl.d32); -+ } -+ else { -+ depctl_addr = &(core_if->dev_if->out_ep_regs[ep->num]->doepctl); -+ depctl.d32 = dwc_read_reg32(depctl_addr); -+ -+ /* set the stall bit */ -+ depctl.b.stall = 1; -+ dwc_write_reg32(depctl_addr, depctl.d32); -+ } -+ -+ DWC_DEBUGPL(DBG_PCD,"DEPCTL=%0x\n",dwc_read_reg32(depctl_addr)); -+ -+ return; -+} -+ -+/** -+ * Clear the EP STALL. -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ * @param ep The EP to clear stall from. -+ */ -+void dwc_otg_ep_clear_stall(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) -+{ -+ depctl_data_t depctl; -+ volatile uint32_t *depctl_addr; -+ -+ DWC_DEBUGPL(DBG_PCD, "%s ep%d-%s\n", __func__, ep->num, -+ (ep->is_in?"IN":"OUT")); -+ -+ if (ep->is_in == 1) { -+ depctl_addr = &(core_if->dev_if->in_ep_regs[ep->num]->diepctl); -+ } -+ else { -+ depctl_addr = &(core_if->dev_if->out_ep_regs[ep->num]->doepctl); -+ } -+ -+ depctl.d32 = dwc_read_reg32(depctl_addr); -+ -+ /* clear the stall bits */ -+ depctl.b.stall = 0; -+ -+ /* -+ * USB Spec 9.4.5: For endpoints using data toggle, regardless -+ * of whether an endpoint has the Halt feature set, a -+ * ClearFeature(ENDPOINT_HALT) request always results in the -+ * data toggle being reinitialized to DATA0. -+ */ -+ if (ep->type == DWC_OTG_EP_TYPE_INTR || -+ ep->type == DWC_OTG_EP_TYPE_BULK) { -+ depctl.b.setd0pid = 1; /* DATA0 */ -+ } -+ -+ dwc_write_reg32(depctl_addr, depctl.d32); -+ DWC_DEBUGPL(DBG_PCD,"DEPCTL=%0x\n",dwc_read_reg32(depctl_addr)); -+ return; -+} -+ -+/** -+ * This function reads a packet from the Rx FIFO into the destination -+ * buffer. To read SETUP data use dwc_otg_read_setup_packet. -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ * @param dest Destination buffer for the packet. -+ * @param bytes Number of bytes to copy to the destination. -+ */ -+void dwc_otg_read_packet(dwc_otg_core_if_t *core_if, -+ uint8_t *dest, -+ uint16_t bytes) -+{ -+ int i; -+ int word_count = (bytes + 3) / 4; -+ -+ volatile uint32_t *fifo = core_if->data_fifo[0]; -+ uint32_t *data_buff = (uint32_t *)dest; -+ -+ /** -+ * @todo Account for the case where _dest is not dword aligned. This -+ * requires reading data from the FIFO into a uint32_t temp buffer, -+ * then moving it into the data buffer. -+ */ -+ -+ DWC_DEBUGPL((DBG_PCDV | DBG_CILV), "%s(%p,%p,%d)\n", __func__, -+ core_if, dest, bytes); -+ -+ for (i=0; idev_if->dev_global_regs->dcfg; -+ DWC_PRINT("DCFG @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->dev_if->dev_global_regs->dctl; -+ DWC_PRINT("DCTL @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->dev_if->dev_global_regs->dsts; -+ DWC_PRINT("DSTS @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->dev_if->dev_global_regs->diepmsk; -+ DWC_PRINT("DIEPMSK @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->dev_if->dev_global_regs->doepmsk; -+ DWC_PRINT("DOEPMSK @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->dev_if->dev_global_regs->daint; -+ DWC_PRINT("DAINT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->dev_if->dev_global_regs->daintmsk; -+ DWC_PRINT("DAINTMSK @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->dev_if->dev_global_regs->dtknqr1; -+ DWC_PRINT("DTKNQR1 @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ if (core_if->hwcfg2.b.dev_token_q_depth > 6) { -+ addr=&core_if->dev_if->dev_global_regs->dtknqr2; -+ DWC_PRINT("DTKNQR2 @0x%08X : 0x%08X\n", -+ (uint32_t)addr,dwc_read_reg32(addr)); -+ } -+ -+ addr=&core_if->dev_if->dev_global_regs->dvbusdis; -+ DWC_PRINT("DVBUSID @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ -+ addr=&core_if->dev_if->dev_global_regs->dvbuspulse; -+ DWC_PRINT("DVBUSPULSE @0x%08X : 0x%08X\n", -+ (uint32_t)addr,dwc_read_reg32(addr)); -+ -+ if (core_if->hwcfg2.b.dev_token_q_depth > 14) { -+ addr=&core_if->dev_if->dev_global_regs->dtknqr3_dthrctl; -+ DWC_PRINT("DTKNQR3_DTHRCTL @0x%08X : 0x%08X\n", -+ (uint32_t)addr, dwc_read_reg32(addr)); -+ } -+/* -+ if (core_if->hwcfg2.b.dev_token_q_depth > 22) { -+ addr=&core_if->dev_if->dev_global_regs->dtknqr4_fifoemptymsk; -+ DWC_PRINT("DTKNQR4 @0x%08X : 0x%08X\n", -+ (uint32_t)addr, dwc_read_reg32(addr)); -+ } -+*/ -+ addr=&core_if->dev_if->dev_global_regs->dtknqr4_fifoemptymsk; -+ DWC_PRINT("FIFOEMPMSK @0x%08X : 0x%08X\n", (uint32_t)addr, dwc_read_reg32(addr)); -+ -+ addr=&core_if->dev_if->dev_global_regs->deachint; -+ DWC_PRINT("DEACHINT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->dev_if->dev_global_regs->deachintmsk; -+ DWC_PRINT("DEACHINTMSK @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ -+ for (i=0; i<= core_if->dev_if->num_in_eps; i++) { -+ addr=&core_if->dev_if->dev_global_regs->diepeachintmsk[i]; -+ DWC_PRINT("DIEPEACHINTMSK[%d] @0x%08X : 0x%08X\n", i, (uint32_t)addr, dwc_read_reg32(addr)); -+ } -+ -+ -+ for (i=0; i<= core_if->dev_if->num_out_eps; i++) { -+ addr=&core_if->dev_if->dev_global_regs->doepeachintmsk[i]; -+ DWC_PRINT("DOEPEACHINTMSK[%d] @0x%08X : 0x%08X\n", i, (uint32_t)addr, dwc_read_reg32(addr)); -+ } -+ -+ for (i=0; i<= core_if->dev_if->num_in_eps; i++) { -+ DWC_PRINT("Device IN EP %d Registers\n", i); -+ addr=&core_if->dev_if->in_ep_regs[i]->diepctl; -+ DWC_PRINT("DIEPCTL @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->dev_if->in_ep_regs[i]->diepint; -+ DWC_PRINT("DIEPINT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->dev_if->in_ep_regs[i]->dieptsiz; -+ DWC_PRINT("DIETSIZ @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->dev_if->in_ep_regs[i]->diepdma; -+ DWC_PRINT("DIEPDMA @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->dev_if->in_ep_regs[i]->dtxfsts; -+ DWC_PRINT("DTXFSTS @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->dev_if->in_ep_regs[i]->diepdmab; -+ DWC_PRINT("DIEPDMAB @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ } -+ -+ -+ for (i=0; i<= core_if->dev_if->num_out_eps; i++) { -+ DWC_PRINT("Device OUT EP %d Registers\n", i); -+ addr=&core_if->dev_if->out_ep_regs[i]->doepctl; -+ DWC_PRINT("DOEPCTL @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->dev_if->out_ep_regs[i]->doepfn; -+ DWC_PRINT("DOEPFN @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->dev_if->out_ep_regs[i]->doepint; -+ DWC_PRINT("DOEPINT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->dev_if->out_ep_regs[i]->doeptsiz; -+ DWC_PRINT("DOETSIZ @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->dev_if->out_ep_regs[i]->doepdma; -+ DWC_PRINT("DOEPDMA @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->dev_if->out_ep_regs[i]->doepdmab; -+ DWC_PRINT("DOEPDMAB @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ -+ } -+ -+ -+ -+ return; -+} -+ -+/** -+ * This functions reads the SPRAM and prints its content -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ */ -+void dwc_otg_dump_spram(dwc_otg_core_if_t *core_if) -+{ -+ volatile uint8_t *addr, *start_addr, *end_addr; -+ -+ DWC_PRINT("SPRAM Data:\n"); -+ start_addr = (void*)core_if->core_global_regs; -+ DWC_PRINT("Base Address: 0x%8X\n", (uint32_t)start_addr); -+ start_addr += 0x00028000; -+ end_addr=(void*)core_if->core_global_regs; -+ end_addr += 0x000280e0; -+ -+ for(addr = start_addr; addr < end_addr; addr+=16) -+ { -+ DWC_PRINT("0x%8X:\t%2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X\n", (uint32_t)addr, -+ addr[0], -+ addr[1], -+ addr[2], -+ addr[3], -+ addr[4], -+ addr[5], -+ addr[6], -+ addr[7], -+ addr[8], -+ addr[9], -+ addr[10], -+ addr[11], -+ addr[12], -+ addr[13], -+ addr[14], -+ addr[15] -+ ); -+ } -+ -+ return; -+} -+/** -+ * This function reads the host registers and prints them -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ */ -+void dwc_otg_dump_host_registers(dwc_otg_core_if_t *core_if) -+{ -+ int i; -+ volatile uint32_t *addr; -+ -+ DWC_PRINT("Host Global Registers\n"); -+ addr=&core_if->host_if->host_global_regs->hcfg; -+ DWC_PRINT("HCFG @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->host_if->host_global_regs->hfir; -+ DWC_PRINT("HFIR @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->host_if->host_global_regs->hfnum; -+ DWC_PRINT("HFNUM @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->host_if->host_global_regs->hptxsts; -+ DWC_PRINT("HPTXSTS @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->host_if->host_global_regs->haint; -+ DWC_PRINT("HAINT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->host_if->host_global_regs->haintmsk; -+ DWC_PRINT("HAINTMSK @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=core_if->host_if->hprt0; -+ DWC_PRINT("HPRT0 @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ -+ for (i=0; icore_params->host_channels; i++) -+ { -+ DWC_PRINT("Host Channel %d Specific Registers\n", i); -+ addr=&core_if->host_if->hc_regs[i]->hcchar; -+ DWC_PRINT("HCCHAR @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->host_if->hc_regs[i]->hcsplt; -+ DWC_PRINT("HCSPLT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->host_if->hc_regs[i]->hcint; -+ DWC_PRINT("HCINT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->host_if->hc_regs[i]->hcintmsk; -+ DWC_PRINT("HCINTMSK @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->host_if->hc_regs[i]->hctsiz; -+ DWC_PRINT("HCTSIZ @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->host_if->hc_regs[i]->hcdma; -+ DWC_PRINT("HCDMA @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ } -+ return; -+} -+ -+/** -+ * This function reads the core global registers and prints them -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ */ -+void dwc_otg_dump_global_registers(dwc_otg_core_if_t *core_if) -+{ -+ int i; -+ volatile uint32_t *addr; -+ -+ DWC_PRINT("Core Global Registers\n"); -+ addr=&core_if->core_global_regs->gotgctl; -+ DWC_PRINT("GOTGCTL @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->core_global_regs->gotgint; -+ DWC_PRINT("GOTGINT @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->core_global_regs->gahbcfg; -+ DWC_PRINT("GAHBCFG @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->core_global_regs->gusbcfg; -+ DWC_PRINT("GUSBCFG @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->core_global_regs->grstctl; -+ DWC_PRINT("GRSTCTL @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->core_global_regs->gintsts; -+ DWC_PRINT("GINTSTS @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->core_global_regs->gintmsk; -+ DWC_PRINT("GINTMSK @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->core_global_regs->grxstsr; -+ DWC_PRINT("GRXSTSR @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ //addr=&core_if->core_global_regs->grxstsp; -+ //DWC_PRINT("GRXSTSP @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->core_global_regs->grxfsiz; -+ DWC_PRINT("GRXFSIZ @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->core_global_regs->gnptxfsiz; -+ DWC_PRINT("GNPTXFSIZ @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->core_global_regs->gnptxsts; -+ DWC_PRINT("GNPTXSTS @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->core_global_regs->gi2cctl; -+ DWC_PRINT("GI2CCTL @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->core_global_regs->gpvndctl; -+ DWC_PRINT("GPVNDCTL @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->core_global_regs->ggpio; -+ DWC_PRINT("GGPIO @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->core_global_regs->guid; -+ DWC_PRINT("GUID @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->core_global_regs->gsnpsid; -+ DWC_PRINT("GSNPSID @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->core_global_regs->ghwcfg1; -+ DWC_PRINT("GHWCFG1 @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->core_global_regs->ghwcfg2; -+ DWC_PRINT("GHWCFG2 @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->core_global_regs->ghwcfg3; -+ DWC_PRINT("GHWCFG3 @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->core_global_regs->ghwcfg4; -+ DWC_PRINT("GHWCFG4 @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ addr=&core_if->core_global_regs->hptxfsiz; -+ DWC_PRINT("HPTXFSIZ @0x%08X : 0x%08X\n",(uint32_t)addr,dwc_read_reg32(addr)); -+ -+ for (i=0; ihwcfg4.b.num_dev_perio_in_ep; i++) -+ { -+ addr=&core_if->core_global_regs->dptxfsiz_dieptxf[i]; -+ DWC_PRINT("DPTXFSIZ[%d] @0x%08X : 0x%08X\n",i,(uint32_t)addr,dwc_read_reg32(addr)); -+ } -+} -+ -+/** -+ * Flush a Tx FIFO. -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ * @param num Tx FIFO to flush. -+ */ -+void dwc_otg_flush_tx_fifo(dwc_otg_core_if_t *core_if, -+ const int num) -+{ -+ dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs; -+ volatile grstctl_t greset = { .d32 = 0}; -+ int count = 0; -+ -+ DWC_DEBUGPL((DBG_CIL|DBG_PCDV), "Flush Tx FIFO %d\n", num); -+ -+ greset.b.txfflsh = 1; -+ greset.b.txfnum = num; -+ dwc_write_reg32(&global_regs->grstctl, greset.d32); -+ -+ do { -+ greset.d32 = dwc_read_reg32(&global_regs->grstctl); -+ if (++count > 10000) { -+ DWC_WARN("%s() HANG! GRSTCTL=%0x GNPTXSTS=0x%08x\n", -+ __func__, greset.d32, -+ dwc_read_reg32(&global_regs->gnptxsts)); -+ break; -+ } -+ } -+ while (greset.b.txfflsh == 1); -+ -+ /* Wait for 3 PHY Clocks*/ -+ UDELAY(1); -+} -+ -+/** -+ * Flush Rx FIFO. -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ */ -+void dwc_otg_flush_rx_fifo(dwc_otg_core_if_t *core_if) -+{ -+ dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs; -+ volatile grstctl_t greset = { .d32 = 0}; -+ int count = 0; -+ -+ DWC_DEBUGPL((DBG_CIL|DBG_PCDV), "%s\n", __func__); -+ /* -+ * -+ */ -+ greset.b.rxfflsh = 1; -+ dwc_write_reg32(&global_regs->grstctl, greset.d32); -+ -+ do { -+ greset.d32 = dwc_read_reg32(&global_regs->grstctl); -+ if (++count > 10000) { -+ DWC_WARN("%s() HANG! GRSTCTL=%0x\n", __func__, -+ greset.d32); -+ break; -+ } -+ } -+ while (greset.b.rxfflsh == 1); -+ -+ /* Wait for 3 PHY Clocks*/ -+ UDELAY(1); -+} -+ -+/** -+ * Do core a soft reset of the core. Be careful with this because it -+ * resets all the internal state machines of the core. -+ */ -+void dwc_otg_core_reset(dwc_otg_core_if_t *core_if) -+{ -+ dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs; -+ volatile grstctl_t greset = { .d32 = 0}; -+ int count = 0; -+ -+ DWC_DEBUGPL(DBG_CILV, "%s\n", __func__); -+ /* Wait for AHB master IDLE state. */ -+ do { -+ UDELAY(10); -+ greset.d32 = dwc_read_reg32(&global_regs->grstctl); -+ if (++count > 100000) { -+ DWC_WARN("%s() HANG! AHB Idle GRSTCTL=%0x\n", __func__, -+ greset.d32); -+ return; -+ } -+ } -+ while (greset.b.ahbidle == 0); -+ -+ /* Core Soft Reset */ -+ count = 0; -+ greset.b.csftrst = 1; -+ dwc_write_reg32(&global_regs->grstctl, greset.d32); -+ do { -+ greset.d32 = dwc_read_reg32(&global_regs->grstctl); -+ if (++count > 10000) { -+ DWC_WARN("%s() HANG! Soft Reset GRSTCTL=%0x\n", __func__, -+ greset.d32); -+ break; -+ } -+ } -+ while (greset.b.csftrst == 1); -+ -+ /* Wait for 3 PHY Clocks*/ -+ MDELAY(100); -+} -+ -+ -+ -+/** -+ * Register HCD callbacks. The callbacks are used to start and stop -+ * the HCD for interrupt processing. -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ * @param cb the HCD callback structure. -+ * @param p pointer to be passed to callback function (usb_hcd*). -+ */ -+void dwc_otg_cil_register_hcd_callbacks(dwc_otg_core_if_t *core_if, -+ dwc_otg_cil_callbacks_t *cb, -+ void *p) -+{ -+ core_if->hcd_cb = cb; -+ cb->p = p; -+} -+ -+/** -+ * Register PCD callbacks. The callbacks are used to start and stop -+ * the PCD for interrupt processing. -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ * @param cb the PCD callback structure. -+ * @param p pointer to be passed to callback function (pcd*). -+ */ -+void dwc_otg_cil_register_pcd_callbacks(dwc_otg_core_if_t *core_if, -+ dwc_otg_cil_callbacks_t *cb, -+ void *p) -+{ -+ core_if->pcd_cb = cb; -+ cb->p = p; -+} -+ -+#ifdef DWC_EN_ISOC -+ -+/** -+ * This function writes isoc data per 1 (micro)frame into tx fifo -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ * @param ep The EP to start the transfer on. -+ * -+ */ -+void write_isoc_frame_data(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) -+{ -+ dwc_otg_dev_in_ep_regs_t *ep_regs; -+ dtxfsts_data_t txstatus = {.d32 = 0}; -+ uint32_t len = 0; -+ uint32_t dwords; -+ -+ ep->xfer_len = ep->data_per_frame; -+ ep->xfer_count = 0; -+ -+ ep_regs = core_if->dev_if->in_ep_regs[ep->num]; -+ -+ len = ep->xfer_len - ep->xfer_count; -+ -+ if (len > ep->maxpacket) { -+ len = ep->maxpacket; -+ } -+ -+ dwords = (len + 3)/4; -+ -+ /* While there is space in the queue and space in the FIFO and -+ * More data to tranfer, Write packets to the Tx FIFO */ -+ txstatus.d32 = dwc_read_reg32(&core_if->dev_if->in_ep_regs[ep->num]->dtxfsts); -+ DWC_DEBUGPL(DBG_PCDV, "b4 dtxfsts[%d]=0x%08x\n",ep->num,txstatus.d32); -+ -+ while (txstatus.b.txfspcavail > dwords && -+ ep->xfer_count < ep->xfer_len && -+ ep->xfer_len != 0) { -+ /* Write the FIFO */ -+ dwc_otg_ep_write_packet(core_if, ep, 0); -+ -+ len = ep->xfer_len - ep->xfer_count; -+ if (len > ep->maxpacket) { -+ len = ep->maxpacket; -+ } -+ -+ dwords = (len + 3)/4; -+ txstatus.d32 = dwc_read_reg32(&core_if->dev_if->in_ep_regs[ep->num]->dtxfsts); -+ DWC_DEBUGPL(DBG_PCDV,"dtxfsts[%d]=0x%08x\n", ep->num, txstatus.d32); -+ } -+} -+ -+ -+/** -+ * This function initializes a descriptor chain for Isochronous transfer -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ * @param ep The EP to start the transfer on. -+ * -+ */ -+void dwc_otg_iso_ep_start_frm_transfer(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) -+{ -+ deptsiz_data_t deptsiz = { .d32 = 0 }; -+ depctl_data_t depctl = { .d32 = 0 }; -+ dsts_data_t dsts = { .d32 = 0 }; -+ volatile uint32_t *addr; -+ -+ if(ep->is_in) { -+ addr = &core_if->dev_if->in_ep_regs[ep->num]->diepctl; -+ } else { -+ addr = &core_if->dev_if->out_ep_regs[ep->num]->doepctl; -+ } -+ -+ ep->xfer_len = ep->data_per_frame; -+ ep->xfer_count = 0; -+ ep->xfer_buff = ep->cur_pkt_addr; -+ ep->dma_addr = ep->cur_pkt_dma_addr; -+ -+ if(ep->is_in) { -+ /* Program the transfer size and packet count -+ * as follows: xfersize = N * maxpacket + -+ * short_packet pktcnt = N + (short_packet -+ * exist ? 1 : 0) -+ */ -+ deptsiz.b.xfersize = ep->xfer_len; -+ deptsiz.b.pktcnt = -+ (ep->xfer_len - 1 + ep->maxpacket) / -+ ep->maxpacket; -+ deptsiz.b.mc = deptsiz.b.pktcnt; -+ dwc_write_reg32(&core_if->dev_if->in_ep_regs[ep->num]->dieptsiz, deptsiz.d32); -+ -+ /* Write the DMA register */ -+ if (core_if->dma_enable) { -+ dwc_write_reg32 (&(core_if->dev_if->in_ep_regs[ep->num]->diepdma), (uint32_t)ep->dma_addr); -+ } -+ } else { -+ deptsiz.b.pktcnt = -+ (ep->xfer_len + (ep->maxpacket - 1)) / -+ ep->maxpacket; -+ deptsiz.b.xfersize = deptsiz.b.pktcnt * ep->maxpacket; -+ -+ dwc_write_reg32(&core_if->dev_if->out_ep_regs[ep->num]->doeptsiz, deptsiz.d32); -+ -+ if (core_if->dma_enable) { -+ dwc_write_reg32 (&(core_if->dev_if->out_ep_regs[ep->num]->doepdma), -+ (uint32_t)ep->dma_addr); -+ } -+ } -+ -+ -+ /** Enable endpoint, clear nak */ -+ -+ depctl.d32 = 0; -+ if(ep->bInterval == 1) { -+ dsts.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dsts); -+ ep->next_frame = dsts.b.soffn + ep->bInterval; -+ -+ if(ep->next_frame & 0x1) { -+ depctl.b.setd1pid = 1; -+ } else { -+ depctl.b.setd0pid = 1; -+ } -+ } else { -+ ep->next_frame += ep->bInterval; -+ -+ if(ep->next_frame & 0x1) { -+ depctl.b.setd1pid = 1; -+ } else { -+ depctl.b.setd0pid = 1; -+ } -+ } -+ depctl.b.epena = 1; -+ depctl.b.cnak = 1; -+ -+ dwc_modify_reg32(addr, 0, depctl.d32); -+ depctl.d32 = dwc_read_reg32(addr); -+ -+ if(ep->is_in && core_if->dma_enable == 0) { -+ write_isoc_frame_data(core_if, ep); -+ } -+ -+} -+ -+#endif //DWC_EN_ISOC ---- /dev/null -+++ b/drivers/usb/dwc_otg/dwc_otg_cil.h -@@ -0,0 +1,1098 @@ -+/* ========================================================================== -+ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_cil.h $ -+ * $Revision: 1.2 $ -+ * $Date: 2008-11-21 05:39:15 $ -+ * $Change: 1099526 $ -+ * -+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, -+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless -+ * otherwise expressly agreed to in writing between Synopsys and you. -+ * -+ * The Software IS NOT an item of Licensed Software or Licensed Product under -+ * any End User Software License Agreement or Agreement for Licensed Product -+ * with Synopsys or any supplement thereto. You are permitted to use and -+ * redistribute this Software in source and binary forms, with or without -+ * modification, provided that redistributions of source code must retain this -+ * notice. You may not view, use, disclose, copy or distribute this file or -+ * any information contained herein except pursuant to this license grant from -+ * Synopsys. If you do not agree with this notice, including the disclaimer -+ * below, then you are not authorized to use the Software. -+ * -+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, -+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY -+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH -+ * DAMAGE. -+ * ========================================================================== */ -+ -+#if !defined(__DWC_CIL_H__) -+#define __DWC_CIL_H__ -+ -+#include -+#include -+#include -+ -+#include "linux/dwc_otg_plat.h" -+#include "dwc_otg_regs.h" -+#ifdef DEBUG -+#include "linux/timer.h" -+#endif -+ -+/** -+ * @file -+ * This file contains the interface to the Core Interface Layer. -+ */ -+ -+ -+/** Macros defined for DWC OTG HW Release verison */ -+#define OTG_CORE_REV_2_00 0x4F542000 -+#define OTG_CORE_REV_2_60a 0x4F54260A -+#define OTG_CORE_REV_2_71a 0x4F54271A -+#define OTG_CORE_REV_2_72a 0x4F54272A -+ -+/** -+*/ -+typedef struct iso_pkt_info -+{ -+ uint32_t offset; -+ uint32_t length; -+ int32_t status; -+} iso_pkt_info_t; -+/** -+ * The dwc_ep structure represents the state of a single -+ * endpoint when acting in device mode. It contains the data items -+ * needed for an endpoint to be activated and transfer packets. -+ */ -+typedef struct dwc_ep -+{ -+ /** EP number used for register address lookup */ -+ uint8_t num; -+ /** EP direction 0 = OUT */ -+ unsigned is_in : 1; -+ /** EP active. */ -+ unsigned active : 1; -+ -+ /** Periodic Tx FIFO # for IN EPs For INTR EP set to 0 to use non-periodic Tx FIFO -+ If dedicated Tx FIFOs are enabled for all IN Eps - Tx FIFO # FOR IN EPs*/ -+ unsigned tx_fifo_num : 4; -+ /** EP type: 0 - Control, 1 - ISOC, 2 - BULK, 3 - INTR */ -+ unsigned type : 2; -+#define DWC_OTG_EP_TYPE_CONTROL 0 -+#define DWC_OTG_EP_TYPE_ISOC 1 -+#define DWC_OTG_EP_TYPE_BULK 2 -+#define DWC_OTG_EP_TYPE_INTR 3 -+ -+ /** DATA start PID for INTR and BULK EP */ -+ unsigned data_pid_start : 1; -+ /** Frame (even/odd) for ISOC EP */ -+ unsigned even_odd_frame : 1; -+ /** Max Packet bytes */ -+ unsigned maxpacket : 11; -+ -+ /** Max Transfer size */ -+ unsigned maxxfer : 16; -+ -+ /** @name Transfer state */ -+ /** @{ */ -+ -+ /** -+ * Pointer to the beginning of the transfer buffer -- do not modify -+ * during transfer. -+ */ -+ -+ uint32_t dma_addr; -+ -+ uint32_t dma_desc_addr; -+ dwc_otg_dma_desc_t* desc_addr; -+ -+ -+ uint8_t *start_xfer_buff; -+ /** pointer to the transfer buffer */ -+ uint8_t *xfer_buff; -+ /** Number of bytes to transfer */ -+ unsigned xfer_len : 19; -+ /** Number of bytes transferred. */ -+ unsigned xfer_count : 19; -+ /** Sent ZLP */ -+ unsigned sent_zlp : 1; -+ /** Total len for control transfer */ -+ unsigned total_len : 19; -+ -+ /** stall clear flag */ -+ unsigned stall_clear_flag : 1; -+ -+ /** Allocated DMA Desc count */ -+ uint32_t desc_cnt; -+ -+#ifdef DWC_EN_ISOC -+ /** -+ * Variables specific for ISOC EPs -+ * -+ */ -+ /** DMA addresses of ISOC buffers */ -+ uint32_t dma_addr0; -+ uint32_t dma_addr1; -+ -+ uint32_t iso_dma_desc_addr; -+ dwc_otg_dma_desc_t* iso_desc_addr; -+ -+ /** pointer to the transfer buffers */ -+ uint8_t *xfer_buff0; -+ uint8_t *xfer_buff1; -+ -+ /** number of ISOC Buffer is processing */ -+ uint32_t proc_buf_num; -+ /** Interval of ISOC Buffer processing */ -+ uint32_t buf_proc_intrvl; -+ /** Data size for regular frame */ -+ uint32_t data_per_frame; -+ -+ /* todo - pattern data support is to be implemented in the future */ -+ /** Data size for pattern frame */ -+ uint32_t data_pattern_frame; -+ /** Frame number of pattern data */ -+ uint32_t sync_frame; -+ -+ /** bInterval */ -+ uint32_t bInterval; -+ /** ISO Packet number per frame */ -+ uint32_t pkt_per_frm; -+ /** Next frame num for which will be setup DMA Desc */ -+ uint32_t next_frame; -+ /** Number of packets per buffer processing */ -+ uint32_t pkt_cnt; -+ /** Info for all isoc packets */ -+ iso_pkt_info_t *pkt_info; -+ /** current pkt number */ -+ uint32_t cur_pkt; -+ /** current pkt number */ -+ uint8_t *cur_pkt_addr; -+ /** current pkt number */ -+ uint32_t cur_pkt_dma_addr; -+#endif //DWC_EN_ISOC -+/** @} */ -+} dwc_ep_t; -+ -+/* -+ * Reasons for halting a host channel. -+ */ -+typedef enum dwc_otg_halt_status -+{ -+ DWC_OTG_HC_XFER_NO_HALT_STATUS, -+ DWC_OTG_HC_XFER_COMPLETE, -+ DWC_OTG_HC_XFER_URB_COMPLETE, -+ DWC_OTG_HC_XFER_ACK, -+ DWC_OTG_HC_XFER_NAK, -+ DWC_OTG_HC_XFER_NYET, -+ DWC_OTG_HC_XFER_STALL, -+ DWC_OTG_HC_XFER_XACT_ERR, -+ DWC_OTG_HC_XFER_FRAME_OVERRUN, -+ DWC_OTG_HC_XFER_BABBLE_ERR, -+ DWC_OTG_HC_XFER_DATA_TOGGLE_ERR, -+ DWC_OTG_HC_XFER_AHB_ERR, -+ DWC_OTG_HC_XFER_PERIODIC_INCOMPLETE, -+ DWC_OTG_HC_XFER_URB_DEQUEUE -+} dwc_otg_halt_status_e; -+ -+/** -+ * Host channel descriptor. This structure represents the state of a single -+ * host channel when acting in host mode. It contains the data items needed to -+ * transfer packets to an endpoint via a host channel. -+ */ -+typedef struct dwc_hc -+{ -+ /** Host channel number used for register address lookup */ -+ uint8_t hc_num; -+ -+ /** Device to access */ -+ unsigned dev_addr : 7; -+ -+ /** EP to access */ -+ unsigned ep_num : 4; -+ -+ /** EP direction. 0: OUT, 1: IN */ -+ unsigned ep_is_in : 1; -+ -+ /** -+ * EP speed. -+ * One of the following values: -+ * - DWC_OTG_EP_SPEED_LOW -+ * - DWC_OTG_EP_SPEED_FULL -+ * - DWC_OTG_EP_SPEED_HIGH -+ */ -+ unsigned speed : 2; -+#define DWC_OTG_EP_SPEED_LOW 0 -+#define DWC_OTG_EP_SPEED_FULL 1 -+#define DWC_OTG_EP_SPEED_HIGH 2 -+ -+ /** -+ * Endpoint type. -+ * One of the following values: -+ * - DWC_OTG_EP_TYPE_CONTROL: 0 -+ * - DWC_OTG_EP_TYPE_ISOC: 1 -+ * - DWC_OTG_EP_TYPE_BULK: 2 -+ * - DWC_OTG_EP_TYPE_INTR: 3 -+ */ -+ unsigned ep_type : 2; -+ -+ /** Max packet size in bytes */ -+ unsigned max_packet : 11; -+ -+ /** -+ * PID for initial transaction. -+ * 0: DATA0,
-+ * 1: DATA2,
-+ * 2: DATA1,
-+ * 3: MDATA (non-Control EP), -+ * SETUP (Control EP) -+ */ -+ unsigned data_pid_start : 2; -+#define DWC_OTG_HC_PID_DATA0 0 -+#define DWC_OTG_HC_PID_DATA2 1 -+#define DWC_OTG_HC_PID_DATA1 2 -+#define DWC_OTG_HC_PID_MDATA 3 -+#define DWC_OTG_HC_PID_SETUP 3 -+ -+ /** Number of periodic transactions per (micro)frame */ -+ unsigned multi_count: 2; -+ -+ /** @name Transfer State */ -+ /** @{ */ -+ -+ /** Pointer to the current transfer buffer position. */ -+ uint8_t *xfer_buff; -+ /** Total number of bytes to transfer. */ -+ uint32_t xfer_len; -+ /** Number of bytes transferred so far. */ -+ uint32_t xfer_count; -+ /** Packet count at start of transfer.*/ -+ uint16_t start_pkt_count; -+ -+ /** -+ * Flag to indicate whether the transfer has been started. Set to 1 if -+ * it has been started, 0 otherwise. -+ */ -+ uint8_t xfer_started; -+ -+ /** -+ * Set to 1 to indicate that a PING request should be issued on this -+ * channel. If 0, process normally. -+ */ -+ uint8_t do_ping; -+ -+ /** -+ * Set to 1 to indicate that the error count for this transaction is -+ * non-zero. Set to 0 if the error count is 0. -+ */ -+ uint8_t error_state; -+ -+ /** -+ * Set to 1 to indicate that this channel should be halted the next -+ * time a request is queued for the channel. This is necessary in -+ * slave mode if no request queue space is available when an attempt -+ * is made to halt the channel. -+ */ -+ uint8_t halt_on_queue; -+ -+ /** -+ * Set to 1 if the host channel has been halted, but the core is not -+ * finished flushing queued requests. Otherwise 0. -+ */ -+ uint8_t halt_pending; -+ -+ /** -+ * Reason for halting the host channel. -+ */ -+ dwc_otg_halt_status_e halt_status; -+ -+ /* -+ * Split settings for the host channel -+ */ -+ uint8_t do_split; /**< Enable split for the channel */ -+ uint8_t complete_split; /**< Enable complete split */ -+ uint8_t hub_addr; /**< Address of high speed hub */ -+ -+ uint8_t port_addr; /**< Port of the low/full speed device */ -+ /** Split transaction position -+ * One of the following values: -+ * - DWC_HCSPLIT_XACTPOS_MID -+ * - DWC_HCSPLIT_XACTPOS_BEGIN -+ * - DWC_HCSPLIT_XACTPOS_END -+ * - DWC_HCSPLIT_XACTPOS_ALL */ -+ uint8_t xact_pos; -+ -+ /** Set when the host channel does a short read. */ -+ uint8_t short_read; -+ -+ /** -+ * Number of requests issued for this channel since it was assigned to -+ * the current transfer (not counting PINGs). -+ */ -+ uint8_t requests; -+ -+ /** -+ * Queue Head for the transfer being processed by this channel. -+ */ -+ struct dwc_otg_qh *qh; -+ -+ /** @} */ -+ -+ /** Entry in list of host channels. */ -+ struct list_head hc_list_entry; -+} dwc_hc_t; -+ -+/** -+ * The following parameters may be specified when starting the module. These -+ * parameters define how the DWC_otg controller should be configured. -+ * Parameter values are passed to the CIL initialization function -+ * dwc_otg_cil_init. -+ */ -+typedef struct dwc_otg_core_params -+{ -+ int32_t opt; -+#define dwc_param_opt_default 1 -+ -+ /** -+ * Specifies the OTG capabilities. The driver will automatically -+ * detect the value for this parameter if none is specified. -+ * 0 - HNP and SRP capable (default) -+ * 1 - SRP Only capable -+ * 2 - No HNP/SRP capable -+ */ -+ int32_t otg_cap; -+#define DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE 0 -+#define DWC_OTG_CAP_PARAM_SRP_ONLY_CAPABLE 1 -+#define DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE 2 -+#define dwc_param_otg_cap_default DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE -+ -+ /** -+ * Specifies whether to use slave or DMA mode for accessing the data -+ * FIFOs. The driver will automatically detect the value for this -+ * parameter if none is specified. -+ * 0 - Slave -+ * 1 - DMA (default, if available) -+ */ -+ int32_t dma_enable; -+#define dwc_param_dma_enable_default 1 -+ -+ /** -+ * When DMA mode is enabled specifies whether to use address DMA or DMA Descritor mode for accessing the data -+ * FIFOs in device mode. The driver will automatically detect the value for this -+ * parameter if none is specified. -+ * 0 - address DMA -+ * 1 - DMA Descriptor(default, if available) -+ */ -+ int32_t dma_desc_enable; -+#define dwc_param_dma_desc_enable_default 0 -+ /** The DMA Burst size (applicable only for External DMA -+ * Mode). 1, 4, 8 16, 32, 64, 128, 256 (default 32) -+ */ -+ int32_t dma_burst_size; /* Translate this to GAHBCFG values */ -+#define dwc_param_dma_burst_size_default 32 -+ -+ /** -+ * Specifies the maximum speed of operation in host and device mode. -+ * The actual speed depends on the speed of the attached device and -+ * the value of phy_type. The actual speed depends on the speed of the -+ * attached device. -+ * 0 - High Speed (default) -+ * 1 - Full Speed -+ */ -+ int32_t speed; -+#define dwc_param_speed_default 0 -+#define DWC_SPEED_PARAM_HIGH 0 -+#define DWC_SPEED_PARAM_FULL 1 -+ -+ /** Specifies whether low power mode is supported when attached -+ * to a Full Speed or Low Speed device in host mode. -+ * 0 - Don't support low power mode (default) -+ * 1 - Support low power mode -+ */ -+ int32_t host_support_fs_ls_low_power; -+#define dwc_param_host_support_fs_ls_low_power_default 0 -+ -+ /** Specifies the PHY clock rate in low power mode when connected to a -+ * Low Speed device in host mode. This parameter is applicable only if -+ * HOST_SUPPORT_FS_LS_LOW_POWER is enabled. If PHY_TYPE is set to FS -+ * then defaults to 6 MHZ otherwise 48 MHZ. -+ * -+ * 0 - 48 MHz -+ * 1 - 6 MHz -+ */ -+ int32_t host_ls_low_power_phy_clk; -+#define dwc_param_host_ls_low_power_phy_clk_default 0 -+#define DWC_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ 0 -+#define DWC_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ 1 -+ -+ /** -+ * 0 - Use cC FIFO size parameters -+ * 1 - Allow dynamic FIFO sizing (default) -+ */ -+ int32_t enable_dynamic_fifo; -+#define dwc_param_enable_dynamic_fifo_default 1 -+ -+ /** Total number of 4-byte words in the data FIFO memory. This -+ * memory includes the Rx FIFO, non-periodic Tx FIFO, and periodic -+ * Tx FIFOs. -+ * 32 to 32768 (default 8192) -+ * Note: The total FIFO memory depth in the FPGA configuration is 8192. -+ */ -+ int32_t data_fifo_size; -+#define dwc_param_data_fifo_size_default 8192 -+ -+ /** Number of 4-byte words in the Rx FIFO in device mode when dynamic -+ * FIFO sizing is enabled. -+ * 16 to 32768 (default 1064) -+ */ -+ int32_t dev_rx_fifo_size; -+#define dwc_param_dev_rx_fifo_size_default 1064 -+ -+ /** Number of 4-byte words in the non-periodic Tx FIFO in device mode -+ * when dynamic FIFO sizing is enabled. -+ * 16 to 32768 (default 1024) -+ */ -+ int32_t dev_nperio_tx_fifo_size; -+#define dwc_param_dev_nperio_tx_fifo_size_default 1024 -+ -+ /** Number of 4-byte words in each of the periodic Tx FIFOs in device -+ * mode when dynamic FIFO sizing is enabled. -+ * 4 to 768 (default 256) -+ */ -+ uint32_t dev_perio_tx_fifo_size[MAX_PERIO_FIFOS]; -+#define dwc_param_dev_perio_tx_fifo_size_default 256 -+ -+ /** Number of 4-byte words in the Rx FIFO in host mode when dynamic -+ * FIFO sizing is enabled. -+ * 16 to 32768 (default 1024) -+ */ -+ int32_t host_rx_fifo_size; -+#define dwc_param_host_rx_fifo_size_default 1024 -+ -+ /** Number of 4-byte words in the non-periodic Tx FIFO in host mode -+ * when Dynamic FIFO sizing is enabled in the core. -+ * 16 to 32768 (default 1024) -+ */ -+ int32_t host_nperio_tx_fifo_size; -+#define dwc_param_host_nperio_tx_fifo_size_default 1024 -+ -+ /** Number of 4-byte words in the host periodic Tx FIFO when dynamic -+ * FIFO sizing is enabled. -+ * 16 to 32768 (default 1024) -+ */ -+ int32_t host_perio_tx_fifo_size; -+#define dwc_param_host_perio_tx_fifo_size_default 1024 -+ -+ /** The maximum transfer size supported in bytes. -+ * 2047 to 65,535 (default 65,535) -+ */ -+ int32_t max_transfer_size; -+#define dwc_param_max_transfer_size_default 65535 -+ -+ /** The maximum number of packets in a transfer. -+ * 15 to 511 (default 511) -+ */ -+ int32_t max_packet_count; -+#define dwc_param_max_packet_count_default 511 -+ -+ /** The number of host channel registers to use. -+ * 1 to 16 (default 12) -+ * Note: The FPGA configuration supports a maximum of 12 host channels. -+ */ -+ int32_t host_channels; -+#define dwc_param_host_channels_default 12 -+ -+ /** The number of endpoints in addition to EP0 available for device -+ * mode operations. -+ * 1 to 15 (default 6 IN and OUT) -+ * Note: The FPGA configuration supports a maximum of 6 IN and OUT -+ * endpoints in addition to EP0. -+ */ -+ int32_t dev_endpoints; -+#define dwc_param_dev_endpoints_default 6 -+ -+ /** -+ * Specifies the type of PHY interface to use. By default, the driver -+ * will automatically detect the phy_type. -+ * -+ * 0 - Full Speed PHY -+ * 1 - UTMI+ (default) -+ * 2 - ULPI -+ */ -+ int32_t phy_type; -+#define DWC_PHY_TYPE_PARAM_FS 0 -+#define DWC_PHY_TYPE_PARAM_UTMI 1 -+#define DWC_PHY_TYPE_PARAM_ULPI 2 -+#define dwc_param_phy_type_default DWC_PHY_TYPE_PARAM_UTMI -+ -+ /** -+ * Specifies the UTMI+ Data Width. This parameter is -+ * applicable for a PHY_TYPE of UTMI+ or ULPI. (For a ULPI -+ * PHY_TYPE, this parameter indicates the data width between -+ * the MAC and the ULPI Wrapper.) Also, this parameter is -+ * applicable only if the OTG_HSPHY_WIDTH cC parameter was set -+ * to "8 and 16 bits", meaning that the core has been -+ * configured to work at either data path width. -+ * -+ * 8 or 16 bits (default 16) -+ */ -+ int32_t phy_utmi_width; -+#define dwc_param_phy_utmi_width_default 16 -+ -+ /** -+ * Specifies whether the ULPI operates at double or single -+ * data rate. This parameter is only applicable if PHY_TYPE is -+ * ULPI. -+ * -+ * 0 - single data rate ULPI interface with 8 bit wide data -+ * bus (default) -+ * 1 - double data rate ULPI interface with 4 bit wide data -+ * bus -+ */ -+ int32_t phy_ulpi_ddr; -+#define dwc_param_phy_ulpi_ddr_default 0 -+ -+ /** -+ * Specifies whether to use the internal or external supply to -+ * drive the vbus with a ULPI phy. -+ */ -+ int32_t phy_ulpi_ext_vbus; -+#define DWC_PHY_ULPI_INTERNAL_VBUS 0 -+#define DWC_PHY_ULPI_EXTERNAL_VBUS 1 -+#define dwc_param_phy_ulpi_ext_vbus_default DWC_PHY_ULPI_INTERNAL_VBUS -+ -+ /** -+ * Specifies whether to use the I2Cinterface for full speed PHY. This -+ * parameter is only applicable if PHY_TYPE is FS. -+ * 0 - No (default) -+ * 1 - Yes -+ */ -+ int32_t i2c_enable; -+#define dwc_param_i2c_enable_default 0 -+ -+ int32_t ulpi_fs_ls; -+#define dwc_param_ulpi_fs_ls_default 0 -+ -+ int32_t ts_dline; -+#define dwc_param_ts_dline_default 0 -+ -+ /** -+ * Specifies whether dedicated transmit FIFOs are -+ * enabled for non periodic IN endpoints in device mode -+ * 0 - No -+ * 1 - Yes -+ */ -+ int32_t en_multiple_tx_fifo; -+#define dwc_param_en_multiple_tx_fifo_default 1 -+ -+ /** Number of 4-byte words in each of the Tx FIFOs in device -+ * mode when dynamic FIFO sizing is enabled. -+ * 4 to 768 (default 256) -+ */ -+ uint32_t dev_tx_fifo_size[MAX_TX_FIFOS]; -+#define dwc_param_dev_tx_fifo_size_default 256 -+ -+ /** Thresholding enable flag- -+ * bit 0 - enable non-ISO Tx thresholding -+ * bit 1 - enable ISO Tx thresholding -+ * bit 2 - enable Rx thresholding -+ */ -+ uint32_t thr_ctl; -+#define dwc_param_thr_ctl_default 0 -+ -+ /** Thresholding length for Tx -+ * FIFOs in 32 bit DWORDs -+ */ -+ uint32_t tx_thr_length; -+#define dwc_param_tx_thr_length_default 64 -+ -+ /** Thresholding length for Rx -+ * FIFOs in 32 bit DWORDs -+ */ -+ uint32_t rx_thr_length; -+#define dwc_param_rx_thr_length_default 64 -+ -+ /** Per Transfer Interrupt -+ * mode enable flag -+ * 1 - Enabled -+ * 0 - Disabled -+ */ -+ uint32_t pti_enable; -+#define dwc_param_pti_enable_default 0 -+ -+ /** Molti Processor Interrupt -+ * mode enable flag -+ * 1 - Enabled -+ * 0 - Disabled -+ */ -+ uint32_t mpi_enable; -+#define dwc_param_mpi_enable_default 0 -+ -+} dwc_otg_core_params_t; -+ -+#ifdef DEBUG -+struct dwc_otg_core_if; -+typedef struct hc_xfer_info -+{ -+ struct dwc_otg_core_if *core_if; -+ dwc_hc_t *hc; -+} hc_xfer_info_t; -+#endif -+ -+/** -+ * The dwc_otg_core_if structure contains information needed to manage -+ * the DWC_otg controller acting in either host or device mode. It -+ * represents the programming view of the controller as a whole. -+ */ -+typedef struct dwc_otg_core_if -+{ -+ /** Parameters that define how the core should be configured.*/ -+ dwc_otg_core_params_t *core_params; -+ -+ /** Core Global registers starting at offset 000h. */ -+ dwc_otg_core_global_regs_t *core_global_regs; -+ -+ /** Device-specific information */ -+ dwc_otg_dev_if_t *dev_if; -+ /** Host-specific information */ -+ dwc_otg_host_if_t *host_if; -+ -+ /** Value from SNPSID register */ -+ uint32_t snpsid; -+ -+ /* -+ * Set to 1 if the core PHY interface bits in USBCFG have been -+ * initialized. -+ */ -+ uint8_t phy_init_done; -+ -+ /* -+ * SRP Success flag, set by srp success interrupt in FS I2C mode -+ */ -+ uint8_t srp_success; -+ uint8_t srp_timer_started; -+ -+ /* Common configuration information */ -+ /** Power and Clock Gating Control Register */ -+ volatile uint32_t *pcgcctl; -+#define DWC_OTG_PCGCCTL_OFFSET 0xE00 -+ -+ /** Push/pop addresses for endpoints or host channels.*/ -+ uint32_t *data_fifo[MAX_EPS_CHANNELS]; -+#define DWC_OTG_DATA_FIFO_OFFSET 0x1000 -+#define DWC_OTG_DATA_FIFO_SIZE 0x1000 -+ -+ /** Total RAM for FIFOs (Bytes) */ -+ uint16_t total_fifo_size; -+ /** Size of Rx FIFO (Bytes) */ -+ uint16_t rx_fifo_size; -+ /** Size of Non-periodic Tx FIFO (Bytes) */ -+ uint16_t nperio_tx_fifo_size; -+ -+ -+ /** 1 if DMA is enabled, 0 otherwise. */ -+ uint8_t dma_enable; -+ -+ /** 1 if Descriptor DMA mode is enabled, 0 otherwise. */ -+ uint8_t dma_desc_enable; -+ -+ /** 1 if PTI Enhancement mode is enabled, 0 otherwise. */ -+ uint8_t pti_enh_enable; -+ -+ /** 1 if MPI Enhancement mode is enabled, 0 otherwise. */ -+ uint8_t multiproc_int_enable; -+ -+ /** 1 if dedicated Tx FIFOs are enabled, 0 otherwise. */ -+ uint8_t en_multiple_tx_fifo; -+ -+ /** Set to 1 if multiple packets of a high-bandwidth transfer is in -+ * process of being queued */ -+ uint8_t queuing_high_bandwidth; -+ -+ /** Hardware Configuration -- stored here for convenience.*/ -+ hwcfg1_data_t hwcfg1; -+ hwcfg2_data_t hwcfg2; -+ hwcfg3_data_t hwcfg3; -+ hwcfg4_data_t hwcfg4; -+ -+ /** Host and Device Configuration -- stored here for convenience.*/ -+ hcfg_data_t hcfg; -+ dcfg_data_t dcfg; -+ -+ /** The operational State, during transations -+ * (a_host>>a_peripherial and b_device=>b_host) this may not -+ * match the core but allows the software to determine -+ * transitions. -+ */ -+ uint8_t op_state; -+ -+ /** -+ * Set to 1 if the HCD needs to be restarted on a session request -+ * interrupt. This is required if no connector ID status change has -+ * occurred since the HCD was last disconnected. -+ */ -+ uint8_t restart_hcd_on_session_req; -+ -+ /** HCD callbacks */ -+ /** A-Device is a_host */ -+#define A_HOST (1) -+ /** A-Device is a_suspend */ -+#define A_SUSPEND (2) -+ /** A-Device is a_peripherial */ -+#define A_PERIPHERAL (3) -+ /** B-Device is operating as a Peripheral. */ -+#define B_PERIPHERAL (4) -+ /** B-Device is operating as a Host. */ -+#define B_HOST (5) -+ -+ /** HCD callbacks */ -+ struct dwc_otg_cil_callbacks *hcd_cb; -+ /** PCD callbacks */ -+ struct dwc_otg_cil_callbacks *pcd_cb; -+ -+ /** Device mode Periodic Tx FIFO Mask */ -+ uint32_t p_tx_msk; -+ /** Device mode Periodic Tx FIFO Mask */ -+ uint32_t tx_msk; -+ -+ /** Workqueue object used for handling several interrupts */ -+ struct workqueue_struct *wq_otg; -+ -+ /** Work object used for handling "Connector ID Status Change" Interrupt */ -+ struct work_struct w_conn_id; -+ -+ /** Work object used for handling "Wakeup Detected" Interrupt */ -+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) -+ struct work_struct w_wkp; -+#else -+ struct delayed_work w_wkp; -+#endif -+ -+#ifdef DEBUG -+ uint32_t start_hcchar_val[MAX_EPS_CHANNELS]; -+ -+ hc_xfer_info_t hc_xfer_info[MAX_EPS_CHANNELS]; -+ struct timer_list hc_xfer_timer[MAX_EPS_CHANNELS]; -+ -+ uint32_t hfnum_7_samples; -+ uint64_t hfnum_7_frrem_accum; -+ uint32_t hfnum_0_samples; -+ uint64_t hfnum_0_frrem_accum; -+ uint32_t hfnum_other_samples; -+ uint64_t hfnum_other_frrem_accum; -+#endif -+ -+ -+} dwc_otg_core_if_t; -+ -+/*We must clear S3C24XX_EINTPEND external interrupt register -+ * because after clearing in this register trigerred IRQ from -+ * H/W core in kernel interrupt can be occured again before OTG -+ * handlers clear all IRQ sources of Core registers because of -+ * timing latencies and Low Level IRQ Type. -+ */ -+ -+#ifdef CONFIG_MACH_IPMATE -+#define S3C2410X_CLEAR_EINTPEND() \ -+do { \ -+ if (!dwc_otg_read_core_intr(core_if)) { \ -+ __raw_writel(1UL << 11,S3C24XX_EINTPEND); \ -+ } \ -+} while (0) -+#else -+#define S3C2410X_CLEAR_EINTPEND() do { } while (0) -+#endif -+ -+/* -+ * The following functions are functions for works -+ * using during handling some interrupts -+ */ -+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) -+ -+extern void w_conn_id_status_change(void *p); -+extern void w_wakeup_detected(void *p); -+ -+#else -+ -+extern void w_conn_id_status_change(struct work_struct *p); -+extern void w_wakeup_detected(struct work_struct *p); -+ -+#endif -+ -+ -+/* -+ * The following functions support initialization of the CIL driver component -+ * and the DWC_otg controller. -+ */ -+extern dwc_otg_core_if_t *dwc_otg_cil_init(const uint32_t *_reg_base_addr, -+ dwc_otg_core_params_t *_core_params); -+extern void dwc_otg_cil_remove(dwc_otg_core_if_t *_core_if); -+extern void dwc_otg_core_init(dwc_otg_core_if_t *_core_if); -+extern void dwc_otg_core_host_init(dwc_otg_core_if_t *_core_if); -+extern void dwc_otg_core_dev_init(dwc_otg_core_if_t *_core_if); -+extern void dwc_otg_enable_global_interrupts( dwc_otg_core_if_t *_core_if ); -+extern void dwc_otg_disable_global_interrupts( dwc_otg_core_if_t *_core_if ); -+ -+/** @name Device CIL Functions -+ * The following functions support managing the DWC_otg controller in device -+ * mode. -+ */ -+/**@{*/ -+extern void dwc_otg_wakeup(dwc_otg_core_if_t *_core_if); -+extern void dwc_otg_read_setup_packet (dwc_otg_core_if_t *_core_if, uint32_t *_dest); -+extern uint32_t dwc_otg_get_frame_number(dwc_otg_core_if_t *_core_if); -+extern void dwc_otg_ep0_activate(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep); -+extern void dwc_otg_ep_activate(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep); -+extern void dwc_otg_ep_deactivate(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep); -+extern void dwc_otg_ep_start_transfer(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep); -+extern void dwc_otg_ep_start_zl_transfer(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep); -+extern void dwc_otg_ep0_start_transfer(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep); -+extern void dwc_otg_ep0_continue_transfer(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep); -+extern void dwc_otg_ep_write_packet(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep, int _dma); -+extern void dwc_otg_ep_set_stall(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep); -+extern void dwc_otg_ep_clear_stall(dwc_otg_core_if_t *_core_if, dwc_ep_t *_ep); -+extern void dwc_otg_enable_device_interrupts(dwc_otg_core_if_t *_core_if); -+extern void dwc_otg_dump_dev_registers(dwc_otg_core_if_t *_core_if); -+extern void dwc_otg_dump_spram(dwc_otg_core_if_t *_core_if); -+#ifdef DWC_EN_ISOC -+extern void dwc_otg_iso_ep_start_frm_transfer(dwc_otg_core_if_t *core_if, dwc_ep_t *ep); -+extern void dwc_otg_iso_ep_start_buf_transfer(dwc_otg_core_if_t *core_if, dwc_ep_t *ep); -+#endif //DWC_EN_ISOC -+/**@}*/ -+ -+/** @name Host CIL Functions -+ * The following functions support managing the DWC_otg controller in host -+ * mode. -+ */ -+/**@{*/ -+extern void dwc_otg_hc_init(dwc_otg_core_if_t *_core_if, dwc_hc_t *_hc); -+extern void dwc_otg_hc_halt(dwc_otg_core_if_t *_core_if, -+ dwc_hc_t *_hc, -+ dwc_otg_halt_status_e _halt_status); -+extern void dwc_otg_hc_cleanup(dwc_otg_core_if_t *_core_if, dwc_hc_t *_hc); -+extern void dwc_otg_hc_start_transfer(dwc_otg_core_if_t *_core_if, dwc_hc_t *_hc); -+extern int dwc_otg_hc_continue_transfer(dwc_otg_core_if_t *_core_if, dwc_hc_t *_hc); -+extern void dwc_otg_hc_do_ping(dwc_otg_core_if_t *_core_if, dwc_hc_t *_hc); -+extern void dwc_otg_hc_write_packet(dwc_otg_core_if_t *_core_if, dwc_hc_t *_hc); -+extern void dwc_otg_enable_host_interrupts(dwc_otg_core_if_t *_core_if); -+extern void dwc_otg_disable_host_interrupts(dwc_otg_core_if_t *_core_if); -+ -+/** -+ * This function Reads HPRT0 in preparation to modify. It keeps the -+ * WC bits 0 so that if they are read as 1, they won't clear when you -+ * write it back -+ */ -+static inline uint32_t dwc_otg_read_hprt0(dwc_otg_core_if_t *_core_if) -+{ -+ hprt0_data_t hprt0; -+ hprt0.d32 = dwc_read_reg32(_core_if->host_if->hprt0); -+ hprt0.b.prtena = 0; -+ hprt0.b.prtconndet = 0; -+ hprt0.b.prtenchng = 0; -+ hprt0.b.prtovrcurrchng = 0; -+ return hprt0.d32; -+} -+ -+extern void dwc_otg_dump_host_registers(dwc_otg_core_if_t *_core_if); -+/**@}*/ -+ -+/** @name Common CIL Functions -+ * The following functions support managing the DWC_otg controller in either -+ * device or host mode. -+ */ -+/**@{*/ -+ -+extern void dwc_otg_read_packet(dwc_otg_core_if_t *core_if, -+ uint8_t *dest, -+ uint16_t bytes); -+ -+extern void dwc_otg_dump_global_registers(dwc_otg_core_if_t *_core_if); -+ -+extern void dwc_otg_flush_tx_fifo( dwc_otg_core_if_t *_core_if, -+ const int _num ); -+extern void dwc_otg_flush_rx_fifo( dwc_otg_core_if_t *_core_if ); -+extern void dwc_otg_core_reset( dwc_otg_core_if_t *_core_if ); -+ -+extern dwc_otg_dma_desc_t* dwc_otg_ep_alloc_desc_chain(uint32_t * dma_desc_addr, uint32_t count); -+extern void dwc_otg_ep_free_desc_chain(dwc_otg_dma_desc_t* desc_addr, uint32_t dma_desc_addr, uint32_t count); -+ -+/** -+ * This function returns the Core Interrupt register. -+ */ -+static inline uint32_t dwc_otg_read_core_intr(dwc_otg_core_if_t *_core_if) -+{ -+ return (dwc_read_reg32(&_core_if->core_global_regs->gintsts) & -+ dwc_read_reg32(&_core_if->core_global_regs->gintmsk)); -+} -+ -+/** -+ * This function returns the OTG Interrupt register. -+ */ -+static inline uint32_t dwc_otg_read_otg_intr (dwc_otg_core_if_t *_core_if) -+{ -+ return (dwc_read_reg32 (&_core_if->core_global_regs->gotgint)); -+} -+ -+/** -+ * This function reads the Device All Endpoints Interrupt register and -+ * returns the IN endpoint interrupt bits. -+ */ -+static inline uint32_t dwc_otg_read_dev_all_in_ep_intr(dwc_otg_core_if_t *core_if) -+{ -+ uint32_t v; -+ -+ if(core_if->multiproc_int_enable) { -+ v = dwc_read_reg32(&core_if->dev_if->dev_global_regs->deachint) & -+ dwc_read_reg32(&core_if->dev_if->dev_global_regs->deachintmsk); -+ } else { -+ v = dwc_read_reg32(&core_if->dev_if->dev_global_regs->daint) & -+ dwc_read_reg32(&core_if->dev_if->dev_global_regs->daintmsk); -+ } -+ return (v & 0xffff); -+ -+} -+ -+/** -+ * This function reads the Device All Endpoints Interrupt register and -+ * returns the OUT endpoint interrupt bits. -+ */ -+static inline uint32_t dwc_otg_read_dev_all_out_ep_intr(dwc_otg_core_if_t *core_if) -+{ -+ uint32_t v; -+ -+ if(core_if->multiproc_int_enable) { -+ v = dwc_read_reg32(&core_if->dev_if->dev_global_regs->deachint) & -+ dwc_read_reg32(&core_if->dev_if->dev_global_regs->deachintmsk); -+ } else { -+ v = dwc_read_reg32(&core_if->dev_if->dev_global_regs->daint) & -+ dwc_read_reg32(&core_if->dev_if->dev_global_regs->daintmsk); -+ } -+ -+ return ((v & 0xffff0000) >> 16); -+} -+ -+/** -+ * This function returns the Device IN EP Interrupt register -+ */ -+static inline uint32_t dwc_otg_read_dev_in_ep_intr(dwc_otg_core_if_t *core_if, -+ dwc_ep_t *ep) -+{ -+ dwc_otg_dev_if_t *dev_if = core_if->dev_if; -+ uint32_t v, msk, emp; -+ -+ if(core_if->multiproc_int_enable) { -+ msk = dwc_read_reg32(&dev_if->dev_global_regs->diepeachintmsk[ep->num]); -+ emp = dwc_read_reg32(&dev_if->dev_global_regs->dtknqr4_fifoemptymsk); -+ msk |= ((emp >> ep->num) & 0x1) << 7; -+ v = dwc_read_reg32(&dev_if->in_ep_regs[ep->num]->diepint) & msk; -+ } else { -+ msk = dwc_read_reg32(&dev_if->dev_global_regs->diepmsk); -+ emp = dwc_read_reg32(&dev_if->dev_global_regs->dtknqr4_fifoemptymsk); -+ msk |= ((emp >> ep->num) & 0x1) << 7; -+ v = dwc_read_reg32(&dev_if->in_ep_regs[ep->num]->diepint) & msk; -+ } -+ -+ -+ return v; -+} -+/** -+ * This function returns the Device OUT EP Interrupt register -+ */ -+static inline uint32_t dwc_otg_read_dev_out_ep_intr(dwc_otg_core_if_t *_core_if, -+ dwc_ep_t *_ep) -+{ -+ dwc_otg_dev_if_t *dev_if = _core_if->dev_if; -+ uint32_t v; -+ doepmsk_data_t msk = { .d32 = 0 }; -+ -+ if(_core_if->multiproc_int_enable) { -+ msk.d32 = dwc_read_reg32(&dev_if->dev_global_regs->doepeachintmsk[_ep->num]); -+ if(_core_if->pti_enh_enable) { -+ msk.b.pktdrpsts = 1; -+ } -+ v = dwc_read_reg32( &dev_if->out_ep_regs[_ep->num]->doepint) & msk.d32; -+ } else { -+ msk.d32 = dwc_read_reg32(&dev_if->dev_global_regs->doepmsk); -+ if(_core_if->pti_enh_enable) { -+ msk.b.pktdrpsts = 1; -+ } -+ v = dwc_read_reg32( &dev_if->out_ep_regs[_ep->num]->doepint) & msk.d32; -+ } -+ return v; -+} -+ -+/** -+ * This function returns the Host All Channel Interrupt register -+ */ -+static inline uint32_t dwc_otg_read_host_all_channels_intr (dwc_otg_core_if_t *_core_if) -+{ -+ return (dwc_read_reg32 (&_core_if->host_if->host_global_regs->haint)); -+} -+ -+static inline uint32_t dwc_otg_read_host_channel_intr (dwc_otg_core_if_t *_core_if, dwc_hc_t *_hc) -+{ -+ return (dwc_read_reg32 (&_core_if->host_if->hc_regs[_hc->hc_num]->hcint)); -+} -+ -+ -+/** -+ * This function returns the mode of the operation, host or device. -+ * -+ * @return 0 - Device Mode, 1 - Host Mode -+ */ -+static inline uint32_t dwc_otg_mode(dwc_otg_core_if_t *_core_if) -+{ -+ return (dwc_read_reg32( &_core_if->core_global_regs->gintsts ) & 0x1); -+} -+ -+static inline uint8_t dwc_otg_is_device_mode(dwc_otg_core_if_t *_core_if) -+{ -+ return (dwc_otg_mode(_core_if) != DWC_HOST_MODE); -+} -+static inline uint8_t dwc_otg_is_host_mode(dwc_otg_core_if_t *_core_if) -+{ -+ return (dwc_otg_mode(_core_if) == DWC_HOST_MODE); -+} -+ -+extern int32_t dwc_otg_handle_common_intr( dwc_otg_core_if_t *_core_if ); -+ -+ -+/**@}*/ -+ -+/** -+ * DWC_otg CIL callback structure. This structure allows the HCD and -+ * PCD to register functions used for starting and stopping the PCD -+ * and HCD for role change on for a DRD. -+ */ -+typedef struct dwc_otg_cil_callbacks -+{ -+ /** Start function for role change */ -+ int (*start) (void *_p); -+ /** Stop Function for role change */ -+ int (*stop) (void *_p); -+ /** Disconnect Function for role change */ -+ int (*disconnect) (void *_p); -+ /** Resume/Remote wakeup Function */ -+ int (*resume_wakeup) (void *_p); -+ /** Suspend function */ -+ int (*suspend) (void *_p); -+ /** Session Start (SRP) */ -+ int (*session_start) (void *_p); -+ /** Pointer passed to start() and stop() */ -+ void *p; -+} dwc_otg_cil_callbacks_t; -+ -+extern void dwc_otg_cil_register_pcd_callbacks( dwc_otg_core_if_t *_core_if, -+ dwc_otg_cil_callbacks_t *_cb, -+ void *_p); -+extern void dwc_otg_cil_register_hcd_callbacks( dwc_otg_core_if_t *_core_if, -+ dwc_otg_cil_callbacks_t *_cb, -+ void *_p); -+ -+#endif -+ ---- /dev/null -+++ b/drivers/usb/dwc_otg/dwc_otg_cil_intr.c -@@ -0,0 +1,750 @@ -+/* ========================================================================== -+ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_cil_intr.c $ -+ * $Revision: 1.2 $ -+ * $Date: 2008-11-21 05:39:15 $ -+ * $Change: 1065567 $ -+ * -+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, -+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless -+ * otherwise expressly agreed to in writing between Synopsys and you. -+ * -+ * The Software IS NOT an item of Licensed Software or Licensed Product under -+ * any End User Software License Agreement or Agreement for Licensed Product -+ * with Synopsys or any supplement thereto. You are permitted to use and -+ * redistribute this Software in source and binary forms, with or without -+ * modification, provided that redistributions of source code must retain this -+ * notice. You may not view, use, disclose, copy or distribute this file or -+ * any information contained herein except pursuant to this license grant from -+ * Synopsys. If you do not agree with this notice, including the disclaimer -+ * below, then you are not authorized to use the Software. -+ * -+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, -+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY -+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH -+ * DAMAGE. -+ * ========================================================================== */ -+ -+/** @file -+ * -+ * The Core Interface Layer provides basic services for accessing and -+ * managing the DWC_otg hardware. These services are used by both the -+ * Host Controller Driver and the Peripheral Controller Driver. -+ * -+ * This file contains the Common Interrupt handlers. -+ */ -+#include "linux/dwc_otg_plat.h" -+#include "dwc_otg_regs.h" -+#include "dwc_otg_cil.h" -+ -+#ifdef DEBUG -+inline const char *op_state_str(dwc_otg_core_if_t *core_if) -+{ -+ return (core_if->op_state==A_HOST?"a_host": -+ (core_if->op_state==A_SUSPEND?"a_suspend": -+ (core_if->op_state==A_PERIPHERAL?"a_peripheral": -+ (core_if->op_state==B_PERIPHERAL?"b_peripheral": -+ (core_if->op_state==B_HOST?"b_host": -+ "unknown"))))); -+} -+#endif -+ -+/** This function will log a debug message -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ */ -+int32_t dwc_otg_handle_mode_mismatch_intr (dwc_otg_core_if_t *core_if) -+{ -+ gintsts_data_t gintsts; -+ DWC_WARN("Mode Mismatch Interrupt: currently in %s mode\n", -+ dwc_otg_mode(core_if) ? "Host" : "Device"); -+ -+ /* Clear interrupt */ -+ gintsts.d32 = 0; -+ gintsts.b.modemismatch = 1; -+ dwc_write_reg32 (&core_if->core_global_regs->gintsts, gintsts.d32); -+ return 1; -+} -+ -+/** Start the HCD. Helper function for using the HCD callbacks. -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ */ -+static inline void hcd_start(dwc_otg_core_if_t *core_if) -+{ -+ if (core_if->hcd_cb && core_if->hcd_cb->start) { -+ core_if->hcd_cb->start(core_if->hcd_cb->p); -+ } -+} -+/** Stop the HCD. Helper function for using the HCD callbacks. -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ */ -+static inline void hcd_stop(dwc_otg_core_if_t *core_if) -+{ -+ if (core_if->hcd_cb && core_if->hcd_cb->stop) { -+ core_if->hcd_cb->stop(core_if->hcd_cb->p); -+ } -+} -+/** Disconnect the HCD. Helper function for using the HCD callbacks. -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ */ -+static inline void hcd_disconnect(dwc_otg_core_if_t *core_if) -+{ -+ if (core_if->hcd_cb && core_if->hcd_cb->disconnect) { -+ core_if->hcd_cb->disconnect(core_if->hcd_cb->p); -+ } -+} -+/** Inform the HCD the a New Session has begun. Helper function for -+ * using the HCD callbacks. -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ */ -+static inline void hcd_session_start(dwc_otg_core_if_t *core_if) -+{ -+ if (core_if->hcd_cb && core_if->hcd_cb->session_start) { -+ core_if->hcd_cb->session_start(core_if->hcd_cb->p); -+ } -+} -+ -+/** Start the PCD. Helper function for using the PCD callbacks. -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ */ -+static inline void pcd_start(dwc_otg_core_if_t *core_if) -+{ -+ if (core_if->pcd_cb && core_if->pcd_cb->start) { -+ core_if->pcd_cb->start(core_if->pcd_cb->p); -+ } -+} -+/** Stop the PCD. Helper function for using the PCD callbacks. -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ */ -+static inline void pcd_stop(dwc_otg_core_if_t *core_if) -+{ -+ if (core_if->pcd_cb && core_if->pcd_cb->stop) { -+ core_if->pcd_cb->stop(core_if->pcd_cb->p); -+ } -+} -+/** Suspend the PCD. Helper function for using the PCD callbacks. -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ */ -+static inline void pcd_suspend(dwc_otg_core_if_t *core_if) -+{ -+ if (core_if->pcd_cb && core_if->pcd_cb->suspend) { -+ core_if->pcd_cb->suspend(core_if->pcd_cb->p); -+ } -+} -+/** Resume the PCD. Helper function for using the PCD callbacks. -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ */ -+static inline void pcd_resume(dwc_otg_core_if_t *core_if) -+{ -+ if (core_if->pcd_cb && core_if->pcd_cb->resume_wakeup) { -+ core_if->pcd_cb->resume_wakeup(core_if->pcd_cb->p); -+ } -+} -+ -+/** -+ * This function handles the OTG Interrupts. It reads the OTG -+ * Interrupt Register (GOTGINT) to determine what interrupt has -+ * occurred. -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ */ -+int32_t dwc_otg_handle_otg_intr(dwc_otg_core_if_t *core_if) -+{ -+ dwc_otg_core_global_regs_t *global_regs = -+ core_if->core_global_regs; -+ gotgint_data_t gotgint; -+ gotgctl_data_t gotgctl; -+ gintmsk_data_t gintmsk; -+ -+ gotgint.d32 = dwc_read_reg32(&global_regs->gotgint); -+ gotgctl.d32 = dwc_read_reg32(&global_regs->gotgctl); -+ DWC_DEBUGPL(DBG_CIL, "gotgctl=%08x\n", gotgctl.d32); -+ -+ if (gotgint.b.sesenddet) { -+ DWC_DEBUGPL(DBG_ANY, "OTG Interrupt: " -+ "Session End Detected++ (%s)\n", -+ op_state_str(core_if)); -+ gotgctl.d32 = dwc_read_reg32(&global_regs->gotgctl); -+ -+ if (core_if->op_state == B_HOST) { -+ pcd_start(core_if); -+ core_if->op_state = B_PERIPHERAL; -+ } else { -+ /* If not B_HOST and Device HNP still set. HNP -+ * Did not succeed!*/ -+ if (gotgctl.b.devhnpen) { -+ DWC_DEBUGPL(DBG_ANY, "Session End Detected\n"); -+ DWC_ERROR("Device Not Connected/Responding!\n"); -+ } -+ -+ /* If Session End Detected the B-Cable has -+ * been disconnected. */ -+ /* Reset PCD and Gadget driver to a -+ * clean state. */ -+ pcd_stop(core_if); -+ } -+ gotgctl.d32 = 0; -+ gotgctl.b.devhnpen = 1; -+ dwc_modify_reg32(&global_regs->gotgctl, -+ gotgctl.d32, 0); -+ } -+ if (gotgint.b.sesreqsucstschng) { -+ DWC_DEBUGPL(DBG_ANY, " OTG Interrupt: " -+ "Session Reqeust Success Status Change++\n"); -+ gotgctl.d32 = dwc_read_reg32(&global_regs->gotgctl); -+ if (gotgctl.b.sesreqscs) { -+ if ((core_if->core_params->phy_type == DWC_PHY_TYPE_PARAM_FS) && -+ (core_if->core_params->i2c_enable)) { -+ core_if->srp_success = 1; -+ } -+ else { -+ pcd_resume(core_if); -+ /* Clear Session Request */ -+ gotgctl.d32 = 0; -+ gotgctl.b.sesreq = 1; -+ dwc_modify_reg32(&global_regs->gotgctl, -+ gotgctl.d32, 0); -+ } -+ } -+ } -+ if (gotgint.b.hstnegsucstschng) { -+ /* Print statements during the HNP interrupt handling -+ * can cause it to fail.*/ -+ gotgctl.d32 = dwc_read_reg32(&global_regs->gotgctl); -+ if (gotgctl.b.hstnegscs) { -+ if (dwc_otg_is_host_mode(core_if)) { -+ core_if->op_state = B_HOST; -+ /* -+ * Need to disable SOF interrupt immediately. -+ * When switching from device to host, the PCD -+ * interrupt handler won't handle the -+ * interrupt if host mode is already set. The -+ * HCD interrupt handler won't get called if -+ * the HCD state is HALT. This means that the -+ * interrupt does not get handled and Linux -+ * complains loudly. -+ */ -+ gintmsk.d32 = 0; -+ gintmsk.b.sofintr = 1; -+ dwc_modify_reg32(&global_regs->gintmsk, -+ gintmsk.d32, 0); -+ pcd_stop(core_if); -+ /* -+ * Initialize the Core for Host mode. -+ */ -+ hcd_start(core_if); -+ core_if->op_state = B_HOST; -+ } -+ } else { -+ gotgctl.d32 = 0; -+ gotgctl.b.hnpreq = 1; -+ gotgctl.b.devhnpen = 1; -+ dwc_modify_reg32(&global_regs->gotgctl, -+ gotgctl.d32, 0); -+ DWC_DEBUGPL(DBG_ANY, "HNP Failed\n"); -+ DWC_ERROR("Device Not Connected/Responding\n"); -+ } -+ } -+ if (gotgint.b.hstnegdet) { -+ /* The disconnect interrupt is set at the same time as -+ * Host Negotiation Detected. During the mode -+ * switch all interrupts are cleared so the disconnect -+ * interrupt handler will not get executed. -+ */ -+ DWC_DEBUGPL(DBG_ANY, " ++OTG Interrupt: " -+ "Host Negotiation Detected++ (%s)\n", -+ (dwc_otg_is_host_mode(core_if)?"Host":"Device")); -+ if (dwc_otg_is_device_mode(core_if)){ -+ DWC_DEBUGPL(DBG_ANY, "a_suspend->a_peripheral (%d)\n", core_if->op_state); -+ hcd_disconnect(core_if); -+ pcd_start(core_if); -+ core_if->op_state = A_PERIPHERAL; -+ } else { -+ /* -+ * Need to disable SOF interrupt immediately. When -+ * switching from device to host, the PCD interrupt -+ * handler won't handle the interrupt if host mode is -+ * already set. The HCD interrupt handler won't get -+ * called if the HCD state is HALT. This means that -+ * the interrupt does not get handled and Linux -+ * complains loudly. -+ */ -+ gintmsk.d32 = 0; -+ gintmsk.b.sofintr = 1; -+ dwc_modify_reg32(&global_regs->gintmsk, -+ gintmsk.d32, 0); -+ pcd_stop(core_if); -+ hcd_start(core_if); -+ core_if->op_state = A_HOST; -+ } -+ } -+ if (gotgint.b.adevtoutchng) { -+ DWC_DEBUGPL(DBG_ANY, " ++OTG Interrupt: " -+ "A-Device Timeout Change++\n"); -+ } -+ if (gotgint.b.debdone) { -+ DWC_DEBUGPL(DBG_ANY, " ++OTG Interrupt: " -+ "Debounce Done++\n"); -+ } -+ -+ /* Clear GOTGINT */ -+ dwc_write_reg32 (&core_if->core_global_regs->gotgint, gotgint.d32); -+ -+ return 1; -+} -+ -+ -+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) -+ -+void w_conn_id_status_change(void *p) -+{ -+ dwc_otg_core_if_t *core_if = p; -+ -+#else -+ -+void w_conn_id_status_change(struct work_struct *p) -+{ -+ dwc_otg_core_if_t *core_if = container_of(p, dwc_otg_core_if_t, w_conn_id); -+ -+#endif -+ -+ -+ uint32_t count = 0; -+ gotgctl_data_t gotgctl = { .d32 = 0 }; -+ -+ gotgctl.d32 = dwc_read_reg32(&core_if->core_global_regs->gotgctl); -+ DWC_DEBUGPL(DBG_CIL, "gotgctl=%0x\n", gotgctl.d32); -+ DWC_DEBUGPL(DBG_CIL, "gotgctl.b.conidsts=%d\n", gotgctl.b.conidsts); -+ -+ /* B-Device connector (Device Mode) */ -+ if (gotgctl.b.conidsts) { -+ /* Wait for switch to device mode. */ -+ while (!dwc_otg_is_device_mode(core_if)){ -+ DWC_PRINT("Waiting for Peripheral Mode, Mode=%s\n", -+ (dwc_otg_is_host_mode(core_if)?"Host":"Peripheral")); -+ MDELAY(100); -+ if (++count > 10000) *(uint32_t*)NULL=0; -+ } -+ core_if->op_state = B_PERIPHERAL; -+ dwc_otg_core_init(core_if); -+ dwc_otg_enable_global_interrupts(core_if); -+ pcd_start(core_if); -+ } else { -+ /* A-Device connector (Host Mode) */ -+ while (!dwc_otg_is_host_mode(core_if)) { -+ DWC_PRINT("Waiting for Host Mode, Mode=%s\n", -+ (dwc_otg_is_host_mode(core_if)?"Host":"Peripheral")); -+ MDELAY(100); -+ if (++count > 10000) *(uint32_t*)NULL=0; -+ } -+ core_if->op_state = A_HOST; -+ /* -+ * Initialize the Core for Host mode. -+ */ -+ dwc_otg_core_init(core_if); -+ dwc_otg_enable_global_interrupts(core_if); -+ hcd_start(core_if); -+ } -+} -+ -+ -+/** -+ * This function handles the Connector ID Status Change Interrupt. It -+ * reads the OTG Interrupt Register (GOTCTL) to determine whether this -+ * is a Device to Host Mode transition or a Host Mode to Device -+ * Transition. -+ * -+ * This only occurs when the cable is connected/removed from the PHY -+ * connector. -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ */ -+int32_t dwc_otg_handle_conn_id_status_change_intr(dwc_otg_core_if_t *core_if) -+{ -+ -+ /* -+ * Need to disable SOF interrupt immediately. If switching from device -+ * to host, the PCD interrupt handler won't handle the interrupt if -+ * host mode is already set. The HCD interrupt handler won't get -+ * called if the HCD state is HALT. This means that the interrupt does -+ * not get handled and Linux complains loudly. -+ */ -+ gintmsk_data_t gintmsk = { .d32 = 0 }; -+ gintsts_data_t gintsts = { .d32 = 0 }; -+ -+ gintmsk.b.sofintr = 1; -+ dwc_modify_reg32(&core_if->core_global_regs->gintmsk, gintmsk.d32, 0); -+ -+ DWC_DEBUGPL(DBG_CIL, " ++Connector ID Status Change Interrupt++ (%s)\n", -+ (dwc_otg_is_host_mode(core_if)?"Host":"Device")); -+ -+ /* -+ * Need to schedule a work, as there are possible DELAY function calls -+ */ -+ queue_work(core_if->wq_otg, &core_if->w_conn_id); -+ -+ /* Set flag and clear interrupt */ -+ gintsts.b.conidstschng = 1; -+ dwc_write_reg32 (&core_if->core_global_regs->gintsts, gintsts.d32); -+ -+ return 1; -+} -+ -+/** -+ * This interrupt indicates that a device is initiating the Session -+ * Request Protocol to request the host to turn on bus power so a new -+ * session can begin. The handler responds by turning on bus power. If -+ * the DWC_otg controller is in low power mode, the handler brings the -+ * controller out of low power mode before turning on bus power. -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ */ -+int32_t dwc_otg_handle_session_req_intr(dwc_otg_core_if_t *core_if) -+{ -+ gintsts_data_t gintsts; -+ -+#ifndef DWC_HOST_ONLY -+ hprt0_data_t hprt0; -+ DWC_DEBUGPL(DBG_ANY, "++Session Request Interrupt++\n"); -+ -+ if (dwc_otg_is_device_mode(core_if)) { -+ DWC_PRINT("SRP: Device mode\n"); -+ } else { -+ DWC_PRINT("SRP: Host mode\n"); -+ -+ /* Turn on the port power bit. */ -+ hprt0.d32 = dwc_otg_read_hprt0(core_if); -+ hprt0.b.prtpwr = 1; -+ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); -+ -+ /* Start the Connection timer. So a message can be displayed -+ * if connect does not occur within 10 seconds. */ -+ hcd_session_start(core_if); -+ } -+#endif -+ -+ /* Clear interrupt */ -+ gintsts.d32 = 0; -+ gintsts.b.sessreqintr = 1; -+ dwc_write_reg32 (&core_if->core_global_regs->gintsts, gintsts.d32); -+ -+ return 1; -+} -+ -+ -+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) -+void w_wakeup_detected(void *p) -+{ -+ dwc_otg_core_if_t* core_if = p; -+ -+#else -+ -+void w_wakeup_detected(struct work_struct *p) -+{ -+ struct delayed_work *dw = container_of(p, struct delayed_work, work); -+ dwc_otg_core_if_t *core_if = container_of(dw, dwc_otg_core_if_t, w_wkp); -+ -+#endif -+ /* -+ * Clear the Resume after 70ms. (Need 20 ms minimum. Use 70 ms -+ * so that OPT tests pass with all PHYs). -+ */ -+ hprt0_data_t hprt0 = {.d32=0}; -+#if 0 -+ pcgcctl_data_t pcgcctl = {.d32=0}; -+ /* Restart the Phy Clock */ -+ pcgcctl.b.stoppclk = 1; -+ dwc_modify_reg32(core_if->pcgcctl, pcgcctl.d32, 0); -+ UDELAY(10); -+#endif //0 -+ hprt0.d32 = dwc_otg_read_hprt0(core_if); -+ DWC_DEBUGPL(DBG_ANY,"Resume: HPRT0=%0x\n", hprt0.d32); -+// MDELAY(70); -+ hprt0.b.prtres = 0; /* Resume */ -+ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); -+ DWC_DEBUGPL(DBG_ANY,"Clear Resume: HPRT0=%0x\n", dwc_read_reg32(core_if->host_if->hprt0)); -+} -+/** -+ * This interrupt indicates that the DWC_otg controller has detected a -+ * resume or remote wakeup sequence. If the DWC_otg controller is in -+ * low power mode, the handler must brings the controller out of low -+ * power mode. The controller automatically begins resume -+ * signaling. The handler schedules a time to stop resume signaling. -+ */ -+int32_t dwc_otg_handle_wakeup_detected_intr(dwc_otg_core_if_t *core_if) -+{ -+ gintsts_data_t gintsts; -+ -+ DWC_DEBUGPL(DBG_ANY, "++Resume and Remote Wakeup Detected Interrupt++\n"); -+ -+ if (dwc_otg_is_device_mode(core_if)) { -+ dctl_data_t dctl = {.d32=0}; -+ DWC_DEBUGPL(DBG_PCD, "DSTS=0x%0x\n", -+ dwc_read_reg32(&core_if->dev_if->dev_global_regs->dsts)); -+#ifdef PARTIAL_POWER_DOWN -+ if (core_if->hwcfg4.b.power_optimiz) { -+ pcgcctl_data_t power = {.d32=0}; -+ -+ power.d32 = dwc_read_reg32(core_if->pcgcctl); -+ DWC_DEBUGPL(DBG_CIL, "PCGCCTL=%0x\n", power.d32); -+ -+ power.b.stoppclk = 0; -+ dwc_write_reg32(core_if->pcgcctl, power.d32); -+ -+ power.b.pwrclmp = 0; -+ dwc_write_reg32(core_if->pcgcctl, power.d32); -+ -+ power.b.rstpdwnmodule = 0; -+ dwc_write_reg32(core_if->pcgcctl, power.d32); -+ } -+#endif -+ /* Clear the Remote Wakeup Signalling */ -+ dctl.b.rmtwkupsig = 1; -+ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->dctl, -+ dctl.d32, 0); -+ -+ if (core_if->pcd_cb && core_if->pcd_cb->resume_wakeup) { -+ core_if->pcd_cb->resume_wakeup(core_if->pcd_cb->p); -+ } -+ -+ } else { -+ pcgcctl_data_t pcgcctl = {.d32=0}; -+ -+ /* Restart the Phy Clock */ -+ pcgcctl.b.stoppclk = 1; -+ dwc_modify_reg32(core_if->pcgcctl, pcgcctl.d32, 0); -+ -+ queue_delayed_work(core_if->wq_otg, &core_if->w_wkp, ((70 * HZ / 1000) + 1)); -+ } -+ -+ /* Clear interrupt */ -+ gintsts.d32 = 0; -+ gintsts.b.wkupintr = 1; -+ dwc_write_reg32 (&core_if->core_global_regs->gintsts, gintsts.d32); -+ -+ return 1; -+} -+ -+/** -+ * This interrupt indicates that a device has been disconnected from -+ * the root port. -+ */ -+int32_t dwc_otg_handle_disconnect_intr(dwc_otg_core_if_t *core_if) -+{ -+ gintsts_data_t gintsts; -+ -+ DWC_DEBUGPL(DBG_ANY, "++Disconnect Detected Interrupt++ (%s) %s\n", -+ (dwc_otg_is_host_mode(core_if)?"Host":"Device"), -+ op_state_str(core_if)); -+ -+/** @todo Consolidate this if statement. */ -+#ifndef DWC_HOST_ONLY -+ if (core_if->op_state == B_HOST) { -+ /* If in device mode Disconnect and stop the HCD, then -+ * start the PCD. */ -+ hcd_disconnect(core_if); -+ pcd_start(core_if); -+ core_if->op_state = B_PERIPHERAL; -+ } else if (dwc_otg_is_device_mode(core_if)) { -+ gotgctl_data_t gotgctl = { .d32 = 0 }; -+ gotgctl.d32 = dwc_read_reg32(&core_if->core_global_regs->gotgctl); -+ if (gotgctl.b.hstsethnpen==1) { -+ /* Do nothing, if HNP in process the OTG -+ * interrupt "Host Negotiation Detected" -+ * interrupt will do the mode switch. -+ */ -+ } else if (gotgctl.b.devhnpen == 0) { -+ /* If in device mode Disconnect and stop the HCD, then -+ * start the PCD. */ -+ hcd_disconnect(core_if); -+ pcd_start(core_if); -+ core_if->op_state = B_PERIPHERAL; -+ } else { -+ DWC_DEBUGPL(DBG_ANY,"!a_peripheral && !devhnpen\n"); -+ } -+ } else { -+ if (core_if->op_state == A_HOST) { -+ /* A-Cable still connected but device disconnected. */ -+ hcd_disconnect(core_if); -+ } -+ } -+#endif -+ -+ gintsts.d32 = 0; -+ gintsts.b.disconnect = 1; -+ dwc_write_reg32 (&core_if->core_global_regs->gintsts, gintsts.d32); -+ return 1; -+} -+/** -+ * This interrupt indicates that SUSPEND state has been detected on -+ * the USB. -+ * -+ * For HNP the USB Suspend interrupt signals the change from -+ * "a_peripheral" to "a_host". -+ * -+ * When power management is enabled the core will be put in low power -+ * mode. -+ */ -+int32_t dwc_otg_handle_usb_suspend_intr(dwc_otg_core_if_t *core_if) -+{ -+ dsts_data_t dsts; -+ gintsts_data_t gintsts; -+ -+ DWC_DEBUGPL(DBG_ANY,"USB SUSPEND\n"); -+ -+ if (dwc_otg_is_device_mode(core_if)) { -+ /* Check the Device status register to determine if the Suspend -+ * state is active. */ -+ dsts.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dsts); -+ DWC_DEBUGPL(DBG_PCD, "DSTS=0x%0x\n", dsts.d32); -+ DWC_DEBUGPL(DBG_PCD, "DSTS.Suspend Status=%d " -+ "HWCFG4.power Optimize=%d\n", -+ dsts.b.suspsts, core_if->hwcfg4.b.power_optimiz); -+ -+ -+#ifdef PARTIAL_POWER_DOWN -+/** @todo Add a module parameter for power management. */ -+ -+ if (dsts.b.suspsts && core_if->hwcfg4.b.power_optimiz) { -+ pcgcctl_data_t power = {.d32=0}; -+ DWC_DEBUGPL(DBG_CIL, "suspend\n"); -+ -+ power.b.pwrclmp = 1; -+ dwc_write_reg32(core_if->pcgcctl, power.d32); -+ -+ power.b.rstpdwnmodule = 1; -+ dwc_modify_reg32(core_if->pcgcctl, 0, power.d32); -+ -+ power.b.stoppclk = 1; -+ dwc_modify_reg32(core_if->pcgcctl, 0, power.d32); -+ -+ } else { -+ DWC_DEBUGPL(DBG_ANY,"disconnect?\n"); -+ } -+#endif -+ /* PCD callback for suspend. */ -+ pcd_suspend(core_if); -+ } else { -+ if (core_if->op_state == A_PERIPHERAL) { -+ DWC_DEBUGPL(DBG_ANY,"a_peripheral->a_host\n"); -+ /* Clear the a_peripheral flag, back to a_host. */ -+ pcd_stop(core_if); -+ hcd_start(core_if); -+ core_if->op_state = A_HOST; -+ } -+ } -+ -+ /* Clear interrupt */ -+ gintsts.d32 = 0; -+ gintsts.b.usbsuspend = 1; -+ dwc_write_reg32(&core_if->core_global_regs->gintsts, gintsts.d32); -+ -+ return 1; -+} -+ -+ -+/** -+ * This function returns the Core Interrupt register. -+ */ -+static inline uint32_t dwc_otg_read_common_intr(dwc_otg_core_if_t *core_if) -+{ -+ gintsts_data_t gintsts; -+ gintmsk_data_t gintmsk; -+ gintmsk_data_t gintmsk_common = {.d32=0}; -+ gintmsk_common.b.wkupintr = 1; -+ gintmsk_common.b.sessreqintr = 1; -+ gintmsk_common.b.conidstschng = 1; -+ gintmsk_common.b.otgintr = 1; -+ gintmsk_common.b.modemismatch = 1; -+ gintmsk_common.b.disconnect = 1; -+ gintmsk_common.b.usbsuspend = 1; -+ /** @todo: The port interrupt occurs while in device -+ * mode. Added code to CIL to clear the interrupt for now! -+ */ -+ gintmsk_common.b.portintr = 1; -+ -+ gintsts.d32 = dwc_read_reg32(&core_if->core_global_regs->gintsts); -+ gintmsk.d32 = dwc_read_reg32(&core_if->core_global_regs->gintmsk); -+#ifdef DEBUG -+ /* if any common interrupts set */ -+ if (gintsts.d32 & gintmsk_common.d32) { -+ DWC_DEBUGPL(DBG_ANY, "gintsts=%08x gintmsk=%08x\n", -+ gintsts.d32, gintmsk.d32); -+ } -+#endif -+ -+ return ((gintsts.d32 & gintmsk.d32) & gintmsk_common.d32); -+ -+} -+ -+/** -+ * Common interrupt handler. -+ * -+ * The common interrupts are those that occur in both Host and Device mode. -+ * This handler handles the following interrupts: -+ * - Mode Mismatch Interrupt -+ * - Disconnect Interrupt -+ * - OTG Interrupt -+ * - Connector ID Status Change Interrupt -+ * - Session Request Interrupt. -+ * - Resume / Remote Wakeup Detected Interrupt. -+ * -+ */ -+int32_t dwc_otg_handle_common_intr(dwc_otg_core_if_t *core_if) -+{ -+ int retval = 0; -+ gintsts_data_t gintsts; -+ -+ gintsts.d32 = dwc_otg_read_common_intr(core_if); -+ -+ if (gintsts.b.modemismatch) { -+ retval |= dwc_otg_handle_mode_mismatch_intr(core_if); -+ } -+ if (gintsts.b.otgintr) { -+ retval |= dwc_otg_handle_otg_intr(core_if); -+ } -+ if (gintsts.b.conidstschng) { -+ retval |= dwc_otg_handle_conn_id_status_change_intr(core_if); -+ } -+ if (gintsts.b.disconnect) { -+ retval |= dwc_otg_handle_disconnect_intr(core_if); -+ } -+ if (gintsts.b.sessreqintr) { -+ retval |= dwc_otg_handle_session_req_intr(core_if); -+ } -+ if (gintsts.b.wkupintr) { -+ retval |= dwc_otg_handle_wakeup_detected_intr(core_if); -+ } -+ if (gintsts.b.usbsuspend) { -+ retval |= dwc_otg_handle_usb_suspend_intr(core_if); -+ } -+ if (gintsts.b.portintr && dwc_otg_is_device_mode(core_if)) { -+ /* The port interrupt occurs while in device mode with HPRT0 -+ * Port Enable/Disable. -+ */ -+ gintsts.d32 = 0; -+ gintsts.b.portintr = 1; -+ dwc_write_reg32(&core_if->core_global_regs->gintsts, -+ gintsts.d32); -+ retval |= 1; -+ -+ } -+ -+ S3C2410X_CLEAR_EINTPEND(); -+ -+ return retval; -+} ---- /dev/null -+++ b/drivers/usb/dwc_otg/dwc_otg_driver.c -@@ -0,0 +1,1273 @@ -+/* ========================================================================== -+ * $File: //dwh/usb_iip/dev/software/otg_ipmate/linux/drivers/dwc_otg_driver.c $ -+ * $Revision: 1.7 $ -+ * $Date: 2008-11-21 05:39:15 $ -+ * $Change: 791271 $ -+ * -+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, -+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless -+ * otherwise expressly agreed to in writing between Synopsys and you. -+ * -+ * The Software IS NOT an item of Licensed Software or Licensed Product under -+ * any End User Software License Agreement or Agreement for Licensed Product -+ * with Synopsys or any supplement thereto. You are permitted to use and -+ * redistribute this Software in source and binary forms, with or without -+ * modification, provided that redistributions of source code must retain this -+ * notice. You may not view, use, disclose, copy or distribute this file or -+ * any information contained herein except pursuant to this license grant from -+ * Synopsys. If you do not agree with this notice, including the disclaimer -+ * below, then you are not authorized to use the Software. -+ * -+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, -+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY -+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH -+ * DAMAGE. -+ * ========================================================================== */ -+ -+/** @file -+ * The dwc_otg_driver module provides the initialization and cleanup entry -+ * points for the DWC_otg driver. This module will be dynamically installed -+ * after Linux is booted using the insmod command. When the module is -+ * installed, the dwc_otg_driver_init function is called. When the module is -+ * removed (using rmmod), the dwc_otg_driver_cleanup function is called. -+ * -+ * This module also defines a data structure for the dwc_otg_driver, which is -+ * used in conjunction with the standard ARM platform_device structure. These -+ * structures allow the OTG driver to comply with the standard Linux driver -+ * model in which devices and drivers are registered with a bus driver. This -+ * has the benefit that Linux can expose attributes of the driver and device -+ * in its special sysfs file system. Users can then read or write files in -+ * this file system to perform diagnostics on the driver components or the -+ * device. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include /* permission constants */ -+#include -+#include -+ -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) -+# include -+#endif -+ -+#include -+ -+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) -+# include -+#endif -+ -+#include "linux/dwc_otg_plat.h" -+#include "dwc_otg_attr.h" -+#include "dwc_otg_driver.h" -+#include "dwc_otg_cil.h" -+#include "dwc_otg_pcd.h" -+#include "dwc_otg_hcd.h" -+ -+#define DWC_DRIVER_VERSION "2.72a 24-JUN-2008" -+#define DWC_DRIVER_DESC "HS OTG USB Controller driver" -+ -+static const char dwc_driver_name[] = "dwc_otg"; -+ -+/*-------------------------------------------------------------------------*/ -+/* Encapsulate the module parameter settings */ -+ -+static dwc_otg_core_params_t dwc_otg_module_params = { -+ .opt = -1, -+ .otg_cap = -1, -+ .dma_enable = -1, -+ .dma_desc_enable = -1, -+ .dma_burst_size = -1, -+ .speed = -1, -+ .host_support_fs_ls_low_power = -1, -+ .host_ls_low_power_phy_clk = -1, -+ .enable_dynamic_fifo = -1, -+ .data_fifo_size = -1, -+ .dev_rx_fifo_size = -1, -+ .dev_nperio_tx_fifo_size = -1, -+ .dev_perio_tx_fifo_size = { -+ /* dev_perio_tx_fifo_size_1 */ -+ -1, -+ -1, -+ -1, -+ -1, -+ -1, -+ -1, -+ -1, -+ -1, -+ -1, -+ -1, -+ -1, -+ -1, -+ -1, -+ -1, -+ -1 -+ /* 15 */ -+ }, -+ .host_rx_fifo_size = -1, -+ .host_nperio_tx_fifo_size = -1, -+ .host_perio_tx_fifo_size = -1, -+ .max_transfer_size = -1, -+ .max_packet_count = -1, -+ .host_channels = -1, -+ .dev_endpoints = -1, -+ .phy_type = -1, -+ .phy_utmi_width = -1, -+ .phy_ulpi_ddr = -1, -+ .phy_ulpi_ext_vbus = -1, -+ .i2c_enable = -1, -+ .ulpi_fs_ls = -1, -+ .ts_dline = -1, -+ .en_multiple_tx_fifo = -1, -+ .dev_tx_fifo_size = { -+ /* dev_tx_fifo_size */ -+ -1, -+ -1, -+ -1, -+ -1, -+ -1, -+ -1, -+ -1, -+ -1, -+ -1, -+ -1, -+ -1, -+ -1, -+ -1, -+ -1, -+ -1 -+ /* 15 */ -+ }, -+ .thr_ctl = -1, -+ .tx_thr_length = -1, -+ .rx_thr_length = -1, -+ .pti_enable = -1, -+ .mpi_enable = -1, -+}; -+ -+/** -+ * This function shows the Driver Version. -+ */ -+static ssize_t version_show(struct device_driver *dev, char *buf) -+{ -+ return snprintf(buf, sizeof(DWC_DRIVER_VERSION)+2, "%s\n", -+ DWC_DRIVER_VERSION); -+} -+static DRIVER_ATTR(version, S_IRUGO, version_show, NULL); -+ -+/** -+ * Global Debug Level Mask. -+ */ -+uint32_t g_dbg_lvl = 0; /* OFF */ -+ -+/** -+ * This function shows the driver Debug Level. -+ */ -+static ssize_t dbg_level_show(struct device_driver *drv, char *buf) -+{ -+ return sprintf(buf, "0x%0x\n", g_dbg_lvl); -+} -+ -+/** -+ * This function stores the driver Debug Level. -+ */ -+static ssize_t dbg_level_store(struct device_driver *drv, const char *buf, -+ size_t count) -+{ -+ g_dbg_lvl = simple_strtoul(buf, NULL, 16); -+ return count; -+} -+static DRIVER_ATTR(debuglevel, S_IRUGO|S_IWUSR, dbg_level_show, dbg_level_store); -+ -+/** -+ * This function is called during module intialization to verify that -+ * the module parameters are in a valid state. -+ */ -+static int check_parameters(dwc_otg_core_if_t *core_if) -+{ -+ int i; -+ int retval = 0; -+ -+/* Checks if the parameter is outside of its valid range of values */ -+#define DWC_OTG_PARAM_TEST(_param_, _low_, _high_) \ -+ ((dwc_otg_module_params._param_ < (_low_)) || \ -+ (dwc_otg_module_params._param_ > (_high_))) -+ -+/* If the parameter has been set by the user, check that the parameter value is -+ * within the value range of values. If not, report a module error. */ -+#define DWC_OTG_PARAM_ERR(_param_, _low_, _high_, _string_) \ -+ do { \ -+ if (dwc_otg_module_params._param_ != -1) { \ -+ if (DWC_OTG_PARAM_TEST(_param_, (_low_), (_high_))) { \ -+ DWC_ERROR("`%d' invalid for parameter `%s'\n", \ -+ dwc_otg_module_params._param_, _string_); \ -+ dwc_otg_module_params._param_ = dwc_param_##_param_##_default; \ -+ retval++; \ -+ } \ -+ } \ -+ } while (0) -+ -+ DWC_OTG_PARAM_ERR(opt,0,1,"opt"); -+ DWC_OTG_PARAM_ERR(otg_cap,0,2,"otg_cap"); -+ DWC_OTG_PARAM_ERR(dma_enable,0,1,"dma_enable"); -+ DWC_OTG_PARAM_ERR(dma_desc_enable,0,1,"dma_desc_enable"); -+ DWC_OTG_PARAM_ERR(speed,0,1,"speed"); -+ DWC_OTG_PARAM_ERR(host_support_fs_ls_low_power,0,1,"host_support_fs_ls_low_power"); -+ DWC_OTG_PARAM_ERR(host_ls_low_power_phy_clk,0,1,"host_ls_low_power_phy_clk"); -+ DWC_OTG_PARAM_ERR(enable_dynamic_fifo,0,1,"enable_dynamic_fifo"); -+ DWC_OTG_PARAM_ERR(data_fifo_size,32,32768,"data_fifo_size"); -+ DWC_OTG_PARAM_ERR(dev_rx_fifo_size,16,32768,"dev_rx_fifo_size"); -+ DWC_OTG_PARAM_ERR(dev_nperio_tx_fifo_size,16,32768,"dev_nperio_tx_fifo_size"); -+ DWC_OTG_PARAM_ERR(host_rx_fifo_size,16,32768,"host_rx_fifo_size"); -+ DWC_OTG_PARAM_ERR(host_nperio_tx_fifo_size,16,32768,"host_nperio_tx_fifo_size"); -+ DWC_OTG_PARAM_ERR(host_perio_tx_fifo_size,16,32768,"host_perio_tx_fifo_size"); -+ DWC_OTG_PARAM_ERR(max_transfer_size,2047,524288,"max_transfer_size"); -+ DWC_OTG_PARAM_ERR(max_packet_count,15,511,"max_packet_count"); -+ DWC_OTG_PARAM_ERR(host_channels,1,16,"host_channels"); -+ DWC_OTG_PARAM_ERR(dev_endpoints,1,15,"dev_endpoints"); -+ DWC_OTG_PARAM_ERR(phy_type,0,2,"phy_type"); -+ DWC_OTG_PARAM_ERR(phy_ulpi_ddr,0,1,"phy_ulpi_ddr"); -+ DWC_OTG_PARAM_ERR(phy_ulpi_ext_vbus,0,1,"phy_ulpi_ext_vbus"); -+ DWC_OTG_PARAM_ERR(i2c_enable,0,1,"i2c_enable"); -+ DWC_OTG_PARAM_ERR(ulpi_fs_ls,0,1,"ulpi_fs_ls"); -+ DWC_OTG_PARAM_ERR(ts_dline,0,1,"ts_dline"); -+ -+ if (dwc_otg_module_params.dma_burst_size != -1) { -+ if (DWC_OTG_PARAM_TEST(dma_burst_size,1,1) && -+ DWC_OTG_PARAM_TEST(dma_burst_size,4,4) && -+ DWC_OTG_PARAM_TEST(dma_burst_size,8,8) && -+ DWC_OTG_PARAM_TEST(dma_burst_size,16,16) && -+ DWC_OTG_PARAM_TEST(dma_burst_size,32,32) && -+ DWC_OTG_PARAM_TEST(dma_burst_size,64,64) && -+ DWC_OTG_PARAM_TEST(dma_burst_size,128,128) && -+ DWC_OTG_PARAM_TEST(dma_burst_size,256,256)) { -+ DWC_ERROR("`%d' invalid for parameter `dma_burst_size'\n", -+ dwc_otg_module_params.dma_burst_size); -+ dwc_otg_module_params.dma_burst_size = 32; -+ retval++; -+ } -+ -+ { -+ uint8_t brst_sz = 0; -+ while(dwc_otg_module_params.dma_burst_size > 1) { -+ brst_sz ++; -+ dwc_otg_module_params.dma_burst_size >>= 1; -+ } -+ dwc_otg_module_params.dma_burst_size = brst_sz; -+ } -+ } -+ -+ if (dwc_otg_module_params.phy_utmi_width != -1) { -+ if (DWC_OTG_PARAM_TEST(phy_utmi_width, 8, 8) && -+ DWC_OTG_PARAM_TEST(phy_utmi_width, 16, 16)) { -+ DWC_ERROR("`%d' invalid for parameter `phy_utmi_width'\n", -+ dwc_otg_module_params.phy_utmi_width); -+ dwc_otg_module_params.phy_utmi_width = 16; -+ retval++; -+ } -+ } -+ -+ for (i = 0; i < 15; i++) { -+ /** @todo should be like above */ -+ //DWC_OTG_PARAM_ERR(dev_perio_tx_fifo_size[i], 4, 768, "dev_perio_tx_fifo_size"); -+ if (dwc_otg_module_params.dev_perio_tx_fifo_size[i] != -1) { -+ if (DWC_OTG_PARAM_TEST(dev_perio_tx_fifo_size[i], 4, 768)) { -+ DWC_ERROR("`%d' invalid for parameter `%s_%d'\n", -+ dwc_otg_module_params.dev_perio_tx_fifo_size[i], "dev_perio_tx_fifo_size", i); -+ dwc_otg_module_params.dev_perio_tx_fifo_size[i] = dwc_param_dev_perio_tx_fifo_size_default; -+ retval++; -+ } -+ } -+ } -+ -+ DWC_OTG_PARAM_ERR(en_multiple_tx_fifo, 0, 1, "en_multiple_tx_fifo"); -+ -+ for (i = 0; i < 15; i++) { -+ /** @todo should be like above */ -+ //DWC_OTG_PARAM_ERR(dev_tx_fifo_size[i], 4, 768, "dev_tx_fifo_size"); -+ if (dwc_otg_module_params.dev_tx_fifo_size[i] != -1) { -+ if (DWC_OTG_PARAM_TEST(dev_tx_fifo_size[i], 4, 768)) { -+ DWC_ERROR("`%d' invalid for parameter `%s_%d'\n", -+ dwc_otg_module_params.dev_tx_fifo_size[i], "dev_tx_fifo_size", i); -+ dwc_otg_module_params.dev_tx_fifo_size[i] = dwc_param_dev_tx_fifo_size_default; -+ retval++; -+ } -+ } -+ } -+ -+ DWC_OTG_PARAM_ERR(thr_ctl, 0, 7, "thr_ctl"); -+ DWC_OTG_PARAM_ERR(tx_thr_length, 8, 128, "tx_thr_length"); -+ DWC_OTG_PARAM_ERR(rx_thr_length, 8, 128, "rx_thr_length"); -+ -+ DWC_OTG_PARAM_ERR(pti_enable,0,1,"pti_enable"); -+ DWC_OTG_PARAM_ERR(mpi_enable,0,1,"mpi_enable"); -+ -+ /* At this point, all module parameters that have been set by the user -+ * are valid, and those that have not are left unset. Now set their -+ * default values and/or check the parameters against the hardware -+ * configurations of the OTG core. */ -+ -+/* This sets the parameter to the default value if it has not been set by the -+ * user */ -+#define DWC_OTG_PARAM_SET_DEFAULT(_param_) \ -+ ({ \ -+ int changed = 1; \ -+ if (dwc_otg_module_params._param_ == -1) { \ -+ changed = 0; \ -+ dwc_otg_module_params._param_ = dwc_param_##_param_##_default; \ -+ } \ -+ changed; \ -+ }) -+ -+/* This checks the macro agains the hardware configuration to see if it is -+ * valid. It is possible that the default value could be invalid. In this -+ * case, it will report a module error if the user touched the parameter. -+ * Otherwise it will adjust the value without any error. */ -+#define DWC_OTG_PARAM_CHECK_VALID(_param_, _str_, _is_valid_, _set_valid_) \ -+ ({ \ -+ int changed = DWC_OTG_PARAM_SET_DEFAULT(_param_); \ -+ int error = 0; \ -+ if (!(_is_valid_)) { \ -+ if (changed) { \ -+ DWC_ERROR("`%d' invalid for parameter `%s'. Check HW configuration.\n", dwc_otg_module_params._param_, _str_); \ -+ error = 1; \ -+ } \ -+ dwc_otg_module_params._param_ = (_set_valid_); \ -+ } \ -+ error; \ -+ }) -+ -+ /* OTG Cap */ -+ retval += DWC_OTG_PARAM_CHECK_VALID(otg_cap, "otg_cap", -+ ({ -+ int valid; -+ valid = 1; -+ switch (dwc_otg_module_params.otg_cap) { -+ case DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE: -+ if (core_if->hwcfg2.b.op_mode != DWC_HWCFG2_OP_MODE_HNP_SRP_CAPABLE_OTG) -+ valid = 0; -+ break; -+ case DWC_OTG_CAP_PARAM_SRP_ONLY_CAPABLE: -+ if ((core_if->hwcfg2.b.op_mode != DWC_HWCFG2_OP_MODE_HNP_SRP_CAPABLE_OTG) && -+ (core_if->hwcfg2.b.op_mode != DWC_HWCFG2_OP_MODE_SRP_ONLY_CAPABLE_OTG) && -+ (core_if->hwcfg2.b.op_mode != DWC_HWCFG2_OP_MODE_SRP_CAPABLE_DEVICE) && -+ (core_if->hwcfg2.b.op_mode != DWC_HWCFG2_OP_MODE_SRP_CAPABLE_HOST)) { -+ valid = 0; -+ } -+ break; -+ case DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE: -+ /* always valid */ -+ break; -+ } -+ valid; -+ }), -+ (((core_if->hwcfg2.b.op_mode == DWC_HWCFG2_OP_MODE_HNP_SRP_CAPABLE_OTG) || -+ (core_if->hwcfg2.b.op_mode == DWC_HWCFG2_OP_MODE_SRP_ONLY_CAPABLE_OTG) || -+ (core_if->hwcfg2.b.op_mode == DWC_HWCFG2_OP_MODE_SRP_CAPABLE_DEVICE) || -+ (core_if->hwcfg2.b.op_mode == DWC_HWCFG2_OP_MODE_SRP_CAPABLE_HOST)) ? -+ DWC_OTG_CAP_PARAM_SRP_ONLY_CAPABLE : -+ DWC_OTG_CAP_PARAM_NO_HNP_SRP_CAPABLE)); -+ -+ retval += DWC_OTG_PARAM_CHECK_VALID(dma_enable, "dma_enable", -+ ((dwc_otg_module_params.dma_enable == 1) && (core_if->hwcfg2.b.architecture == 0)) ? 0 : 1, -+ 0); -+ -+ retval += DWC_OTG_PARAM_CHECK_VALID(dma_desc_enable, "dma_desc_enable", -+ ((dwc_otg_module_params.dma_desc_enable == 1) && -+ ((dwc_otg_module_params.dma_enable == 0) || (core_if->hwcfg4.b.desc_dma == 0))) ? 0 : 1, -+ 0); -+ -+ retval += DWC_OTG_PARAM_CHECK_VALID(opt, "opt", 1, 0); -+ -+ DWC_OTG_PARAM_SET_DEFAULT(dma_burst_size); -+ -+ retval += DWC_OTG_PARAM_CHECK_VALID(host_support_fs_ls_low_power, -+ "host_support_fs_ls_low_power", -+ 1, 0); -+ -+ retval += DWC_OTG_PARAM_CHECK_VALID(enable_dynamic_fifo, -+ "enable_dynamic_fifo", -+ ((dwc_otg_module_params.enable_dynamic_fifo == 0) || -+ (core_if->hwcfg2.b.dynamic_fifo == 1)), 0); -+ -+ retval += DWC_OTG_PARAM_CHECK_VALID(data_fifo_size, -+ "data_fifo_size", -+ (dwc_otg_module_params.data_fifo_size <= core_if->hwcfg3.b.dfifo_depth), -+ core_if->hwcfg3.b.dfifo_depth); -+ -+ retval += DWC_OTG_PARAM_CHECK_VALID(dev_rx_fifo_size, -+ "dev_rx_fifo_size", -+ (dwc_otg_module_params.dev_rx_fifo_size <= dwc_read_reg32(&core_if->core_global_regs->grxfsiz)), -+ dwc_read_reg32(&core_if->core_global_regs->grxfsiz)); -+ -+ retval += DWC_OTG_PARAM_CHECK_VALID(dev_nperio_tx_fifo_size, -+ "dev_nperio_tx_fifo_size", -+ (dwc_otg_module_params.dev_nperio_tx_fifo_size <= (dwc_read_reg32(&core_if->core_global_regs->gnptxfsiz) >> 16)), -+ (dwc_read_reg32(&core_if->core_global_regs->gnptxfsiz) >> 16)); -+ -+ retval += DWC_OTG_PARAM_CHECK_VALID(host_rx_fifo_size, -+ "host_rx_fifo_size", -+ (dwc_otg_module_params.host_rx_fifo_size <= dwc_read_reg32(&core_if->core_global_regs->grxfsiz)), -+ dwc_read_reg32(&core_if->core_global_regs->grxfsiz)); -+ -+ retval += DWC_OTG_PARAM_CHECK_VALID(host_nperio_tx_fifo_size, -+ "host_nperio_tx_fifo_size", -+ (dwc_otg_module_params.host_nperio_tx_fifo_size <= (dwc_read_reg32(&core_if->core_global_regs->gnptxfsiz) >> 16)), -+ (dwc_read_reg32(&core_if->core_global_regs->gnptxfsiz) >> 16)); -+ -+ retval += DWC_OTG_PARAM_CHECK_VALID(host_perio_tx_fifo_size, -+ "host_perio_tx_fifo_size", -+ (dwc_otg_module_params.host_perio_tx_fifo_size <= ((dwc_read_reg32(&core_if->core_global_regs->hptxfsiz) >> 16))), -+ ((dwc_read_reg32(&core_if->core_global_regs->hptxfsiz) >> 16))); -+ -+ retval += DWC_OTG_PARAM_CHECK_VALID(max_transfer_size, -+ "max_transfer_size", -+ (dwc_otg_module_params.max_transfer_size < (1 << (core_if->hwcfg3.b.xfer_size_cntr_width + 11))), -+ ((1 << (core_if->hwcfg3.b.xfer_size_cntr_width + 11)) - 1)); -+ -+ retval += DWC_OTG_PARAM_CHECK_VALID(max_packet_count, -+ "max_packet_count", -+ (dwc_otg_module_params.max_packet_count < (1 << (core_if->hwcfg3.b.packet_size_cntr_width + 4))), -+ ((1 << (core_if->hwcfg3.b.packet_size_cntr_width + 4)) - 1)); -+ -+ retval += DWC_OTG_PARAM_CHECK_VALID(host_channels, -+ "host_channels", -+ (dwc_otg_module_params.host_channels <= (core_if->hwcfg2.b.num_host_chan + 1)), -+ (core_if->hwcfg2.b.num_host_chan + 1)); -+ -+ retval += DWC_OTG_PARAM_CHECK_VALID(dev_endpoints, -+ "dev_endpoints", -+ (dwc_otg_module_params.dev_endpoints <= (core_if->hwcfg2.b.num_dev_ep)), -+ core_if->hwcfg2.b.num_dev_ep); -+ -+/* -+ * Define the following to disable the FS PHY Hardware checking. This is for -+ * internal testing only. -+ * -+ * #define NO_FS_PHY_HW_CHECKS -+ */ -+ -+#ifdef NO_FS_PHY_HW_CHECKS -+ retval += DWC_OTG_PARAM_CHECK_VALID(phy_type, -+ "phy_type", 1, 0); -+#else -+ retval += DWC_OTG_PARAM_CHECK_VALID(phy_type, -+ "phy_type", -+ ({ -+ int valid = 0; -+ if ((dwc_otg_module_params.phy_type == DWC_PHY_TYPE_PARAM_UTMI) && -+ ((core_if->hwcfg2.b.hs_phy_type == 1) || -+ (core_if->hwcfg2.b.hs_phy_type == 3))) { -+ valid = 1; -+ } -+ else if ((dwc_otg_module_params.phy_type == DWC_PHY_TYPE_PARAM_ULPI) && -+ ((core_if->hwcfg2.b.hs_phy_type == 2) || -+ (core_if->hwcfg2.b.hs_phy_type == 3))) { -+ valid = 1; -+ } -+ else if ((dwc_otg_module_params.phy_type == DWC_PHY_TYPE_PARAM_FS) && -+ (core_if->hwcfg2.b.fs_phy_type == 1)) { -+ valid = 1; -+ } -+ valid; -+ }), -+ ({ -+ int set = DWC_PHY_TYPE_PARAM_FS; -+ if (core_if->hwcfg2.b.hs_phy_type) { -+ if ((core_if->hwcfg2.b.hs_phy_type == 3) || -+ (core_if->hwcfg2.b.hs_phy_type == 1)) { -+ set = DWC_PHY_TYPE_PARAM_UTMI; -+ } -+ else { -+ set = DWC_PHY_TYPE_PARAM_ULPI; -+ } -+ } -+ set; -+ })); -+#endif -+ -+ retval += DWC_OTG_PARAM_CHECK_VALID(speed, "speed", -+ (dwc_otg_module_params.speed == 0) && (dwc_otg_module_params.phy_type == DWC_PHY_TYPE_PARAM_FS) ? 0 : 1, -+ dwc_otg_module_params.phy_type == DWC_PHY_TYPE_PARAM_FS ? 1 : 0); -+ -+ retval += DWC_OTG_PARAM_CHECK_VALID(host_ls_low_power_phy_clk, -+ "host_ls_low_power_phy_clk", -+ ((dwc_otg_module_params.host_ls_low_power_phy_clk == DWC_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ) && (dwc_otg_module_params.phy_type == DWC_PHY_TYPE_PARAM_FS) ? 0 : 1), -+ ((dwc_otg_module_params.phy_type == DWC_PHY_TYPE_PARAM_FS) ? DWC_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ : DWC_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ)); -+ -+ DWC_OTG_PARAM_SET_DEFAULT(phy_ulpi_ddr); -+ DWC_OTG_PARAM_SET_DEFAULT(phy_ulpi_ext_vbus); -+ DWC_OTG_PARAM_SET_DEFAULT(phy_utmi_width); -+ DWC_OTG_PARAM_SET_DEFAULT(ulpi_fs_ls); -+ DWC_OTG_PARAM_SET_DEFAULT(ts_dline); -+ -+#ifdef NO_FS_PHY_HW_CHECKS -+ retval += DWC_OTG_PARAM_CHECK_VALID(i2c_enable, "i2c_enable", 1, 0); -+#else -+ retval += DWC_OTG_PARAM_CHECK_VALID(i2c_enable, -+ "i2c_enable", -+ (dwc_otg_module_params.i2c_enable == 1) && (core_if->hwcfg3.b.i2c == 0) ? 0 : 1, -+ 0); -+#endif -+ -+ for (i = 0; i < 15; i++) { -+ int changed = 1; -+ int error = 0; -+ -+ if (dwc_otg_module_params.dev_perio_tx_fifo_size[i] == -1) { -+ changed = 0; -+ dwc_otg_module_params.dev_perio_tx_fifo_size[i] = dwc_param_dev_perio_tx_fifo_size_default; -+ } -+ if (!(dwc_otg_module_params.dev_perio_tx_fifo_size[i] <= (dwc_read_reg32(&core_if->core_global_regs->dptxfsiz_dieptxf[i])))) { -+ if (changed) { -+ DWC_ERROR("`%d' invalid for parameter `dev_perio_fifo_size_%d'. Check HW configuration.\n", dwc_otg_module_params.dev_perio_tx_fifo_size[i], i); -+ error = 1; -+ } -+ dwc_otg_module_params.dev_perio_tx_fifo_size[i] = dwc_read_reg32(&core_if->core_global_regs->dptxfsiz_dieptxf[i]); -+ } -+ retval += error; -+ } -+ -+ retval += DWC_OTG_PARAM_CHECK_VALID(en_multiple_tx_fifo, "en_multiple_tx_fifo", -+ ((dwc_otg_module_params.en_multiple_tx_fifo == 1) && (core_if->hwcfg4.b.ded_fifo_en == 0)) ? 0 : 1, -+ 0); -+ -+ for (i = 0; i < 15; i++) { -+ int changed = 1; -+ int error = 0; -+ -+ if (dwc_otg_module_params.dev_tx_fifo_size[i] == -1) { -+ changed = 0; -+ dwc_otg_module_params.dev_tx_fifo_size[i] = dwc_param_dev_tx_fifo_size_default; -+ } -+ if (!(dwc_otg_module_params.dev_tx_fifo_size[i] <= (dwc_read_reg32(&core_if->core_global_regs->dptxfsiz_dieptxf[i])))) { -+ if (changed) { -+ DWC_ERROR("%d' invalid for parameter `dev_perio_fifo_size_%d'. Check HW configuration.\n", dwc_otg_module_params.dev_tx_fifo_size[i], i); -+ error = 1; -+ } -+ dwc_otg_module_params.dev_tx_fifo_size[i] = dwc_read_reg32(&core_if->core_global_regs->dptxfsiz_dieptxf[i]); -+ } -+ retval += error; -+ } -+ -+ retval += DWC_OTG_PARAM_CHECK_VALID(thr_ctl, "thr_ctl", -+ ((dwc_otg_module_params.thr_ctl != 0) && ((dwc_otg_module_params.dma_enable == 0) || (core_if->hwcfg4.b.ded_fifo_en == 0))) ? 0 : 1, -+ 0); -+ -+ DWC_OTG_PARAM_SET_DEFAULT(tx_thr_length); -+ DWC_OTG_PARAM_SET_DEFAULT(rx_thr_length); -+ -+ retval += DWC_OTG_PARAM_CHECK_VALID(pti_enable, "pti_enable", -+ ((dwc_otg_module_params.pti_enable == 0) || ((dwc_otg_module_params.pti_enable == 1) && (core_if->snpsid >= 0x4F54272A))) ? 1 : 0, -+ 0); -+ -+ retval += DWC_OTG_PARAM_CHECK_VALID(mpi_enable, "mpi_enable", -+ ((dwc_otg_module_params.mpi_enable == 0) || ((dwc_otg_module_params.mpi_enable == 1) && (core_if->hwcfg2.b.multi_proc_int == 1))) ? 1 : 0, -+ 0); -+ return retval; -+} -+ -+/** -+ * This function is the top level interrupt handler for the Common -+ * (Device and host modes) interrupts. -+ */ -+static irqreturn_t dwc_otg_common_irq(int irq, void *dev -+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) -+ , struct pt_regs *r -+#endif -+ ) -+{ -+ dwc_otg_device_t *otg_dev = dev; -+ int32_t retval = IRQ_NONE; -+ -+ retval = dwc_otg_handle_common_intr(otg_dev->core_if); -+ return IRQ_RETVAL(retval); -+} -+ -+/** -+ * This function is called when a platform_device is unregistered with the -+ * dwc_otg_driver. This happens, for example, when the rmmod command is -+ * executed. The device may or may not be electrically present. If it is -+ * present, the driver stops device processing. Any resources used on behalf -+ * of this device are freed. -+ * -+ * @param[in] pdev -+ */ -+static int dwc_otg_driver_remove(struct platform_device *pdev) -+{ -+ dwc_otg_device_t *otg_dev = platform_get_drvdata(pdev); -+ DWC_DEBUGPL(DBG_ANY, "%s(%p)\n", __func__, pdev); -+ -+ if (!otg_dev) { -+ /* Memory allocation for the dwc_otg_device failed. */ -+ DWC_DEBUGPL(DBG_ANY, "%s: otg_dev NULL!\n", __func__); -+ return 0; -+ } -+ -+ /* -+ * Free the IRQ -+ */ -+ if (otg_dev->common_irq_installed) { -+ free_irq(otg_dev->irq, otg_dev); -+ } -+ -+#ifndef DWC_DEVICE_ONLY -+ if (otg_dev->hcd) { -+ dwc_otg_hcd_remove(&pdev->dev); -+ } else { -+ DWC_DEBUGPL(DBG_ANY, "%s: otg_dev->hcd NULL!\n", __func__); -+ return 0; -+ } -+#endif -+ -+#ifndef DWC_HOST_ONLY -+ if (otg_dev->pcd) { -+ dwc_otg_pcd_remove(&pdev->dev); -+ } -+#endif -+ if (otg_dev->core_if) { -+ dwc_otg_cil_remove(otg_dev->core_if); -+ } -+ -+ /* -+ * Remove the device attributes -+ */ -+ dwc_otg_attr_remove(otg_dev->parent); -+ -+ /* Disable USB port */ -+ dwc_write_reg32((uint32_t *)((uint8_t *)otg_dev->base + 0xe00), 0xf); -+ -+ /* -+ * Return the memory. -+ */ -+ if (otg_dev->base) { -+ iounmap(otg_dev->base); -+ } -+ -+ if (otg_dev->phys_addr != 0) { -+ release_mem_region(otg_dev->phys_addr, otg_dev->base_len); -+ } -+ -+ kfree(otg_dev); -+ -+ /* -+ * Clear the drvdata pointer. -+ */ -+ platform_set_drvdata(pdev, NULL); -+ -+ return 0; -+} -+ -+/** -+ * This function is called when an platform_device is bound to a -+ * dwc_otg_driver. It creates the driver components required to -+ * control the device (CIL, HCD, and PCD) and it initializes the -+ * device. The driver components are stored in a dwc_otg_device -+ * structure. A reference to the dwc_otg_device is saved in the -+ * platform_device. This allows the driver to access the dwc_otg_device -+ * structure on subsequent calls to driver methods for this device. -+ * -+ * @param[in] pdev platform_device definition -+ */ -+static int dwc_otg_driver_probe(struct platform_device *pdev) -+{ -+ int retval = 0; -+ uint32_t snpsid; -+ dwc_otg_device_t *otg_dev; -+ struct resource *res; -+ -+ dev_dbg(&pdev->dev, "dwc_otg_driver_probe(%p)\n", pdev); -+ -+ otg_dev= kzalloc(sizeof(dwc_otg_device_t), GFP_KERNEL); -+ if (!otg_dev) { -+ dev_err(&pdev->dev, "kmalloc of dwc_otg_device failed\n"); -+ retval = -ENOMEM; -+ goto fail; -+ } -+ -+ otg_dev->reg_offset = 0xFFFFFFFF; -+ -+ /* -+ * Retrieve the memory and IRQ resources. -+ */ -+ otg_dev->irq = platform_get_irq(pdev, 0); -+ if (otg_dev->irq <= 0) { -+ dev_err(&pdev->dev, "no device irq\n"); -+ retval = -EINVAL; -+ goto fail; -+ } -+ -+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); -+ if (res == NULL) { -+ dev_err(&pdev->dev, "no CSR address\n"); -+ retval = -EINVAL; -+ goto fail; -+ } -+ -+ otg_dev->parent = &pdev->dev; -+ otg_dev->phys_addr = res->start; -+ otg_dev->base_len = res->end - res->start + 1; -+ if (request_mem_region(otg_dev->phys_addr, -+ otg_dev->base_len, -+ dwc_driver_name) == NULL) { -+ dev_err(&pdev->dev, "request_mem_region failed\n"); -+ retval = -EBUSY; -+ goto fail; -+ } -+ -+ /* -+ * Map the DWC_otg Core memory into virtual address space. -+ */ -+ otg_dev->base = ioremap(otg_dev->phys_addr, otg_dev->base_len); -+ if (!otg_dev->base) { -+ dev_err(&pdev->dev, "ioremap() failed\n"); -+ retval = -ENOMEM; -+ goto fail; -+ } -+ dev_dbg(&pdev->dev, "mapped base=0x%08x\n", (unsigned) otg_dev->base); -+ -+ /* Enable USB Port */ -+ dwc_write_reg32((uint32_t *)((uint8_t *)otg_dev->base + 0xe00), 0); -+ -+ /* -+ * Attempt to ensure this device is really a DWC_otg Controller. -+ * Read and verify the SNPSID register contents. The value should be -+ * 0x45F42XXX, which corresponds to "OT2", as in "OTG version 2.XX". -+ */ -+ snpsid = dwc_read_reg32((uint32_t *)((uint8_t *)otg_dev->base + 0x40)); -+ -+ if ((snpsid & 0xFFFFF000) != OTG_CORE_REV_2_00) { -+ dev_err(&pdev->dev, "Bad value for SNPSID: 0x%08x\n", snpsid); -+ retval = -EINVAL; -+ goto fail; -+ } -+ -+ DWC_PRINT("Core Release: %x.%x%x%x\n", -+ (snpsid >> 12 & 0xF), -+ (snpsid >> 8 & 0xF), -+ (snpsid >> 4 & 0xF), -+ (snpsid & 0xF)); -+ -+ /* -+ * Initialize driver data to point to the global DWC_otg -+ * Device structure. -+ */ -+ platform_set_drvdata(pdev, otg_dev); -+ dev_dbg(&pdev->dev, "dwc_otg_device=0x%p\n", otg_dev); -+ -+ -+ otg_dev->core_if = dwc_otg_cil_init(otg_dev->base, -+ &dwc_otg_module_params); -+ -+ otg_dev->core_if->snpsid = snpsid; -+ -+ if (!otg_dev->core_if) { -+ dev_err(&pdev->dev, "CIL initialization failed!\n"); -+ retval = -ENOMEM; -+ goto fail; -+ } -+ -+ /* -+ * Validate parameter values. -+ */ -+ if (check_parameters(otg_dev->core_if)) { -+ retval = -EINVAL; -+ goto fail; -+ } -+ -+ /* -+ * Create Device Attributes in sysfs -+ */ -+ //dwc_otg_attr_create(&pdev->dev); -+ -+ /* -+ * Disable the global interrupt until all the interrupt -+ * handlers are installed. -+ */ -+ dwc_otg_disable_global_interrupts(otg_dev->core_if); -+ -+ /* -+ * Install the interrupt handler for the common interrupts before -+ * enabling common interrupts in core_init below. -+ */ -+ DWC_DEBUGPL(DBG_CIL, "registering (common) handler for irq%d\n", -+ otg_dev->irq); -+ retval = request_irq(otg_dev->irq, dwc_otg_common_irq, -+ IRQF_SHARED, "dwc_otg", otg_dev); -+ if (retval) { -+ DWC_ERROR("request of irq%d failed\n", otg_dev->irq); -+ retval = -EBUSY; -+ goto fail; -+ } else { -+ otg_dev->common_irq_installed = 1; -+ } -+ -+ /* -+ * Initialize the DWC_otg core. -+ */ -+ dwc_otg_core_init(otg_dev->core_if); -+ -+#ifndef DWC_HOST_ONLY -+ /* -+ * Initialize the PCD -+ */ -+ retval = dwc_otg_pcd_init(&pdev->dev); -+ if (retval != 0) { -+ DWC_ERROR("dwc_otg_pcd_init failed\n"); -+ otg_dev->pcd = NULL; -+ goto fail; -+ } -+#endif -+#ifndef DWC_DEVICE_ONLY -+ /* -+ * Initialize the HCD -+ */ -+ retval = dwc_otg_hcd_init(&pdev->dev); -+ if (retval != 0) { -+ DWC_ERROR("dwc_otg_hcd_init failed\n"); -+ otg_dev->hcd = NULL; -+ goto fail; -+ } -+#endif -+ -+ /* -+ * Enable the global interrupt after all the interrupt -+ * handlers are installed. -+ */ -+ dwc_otg_enable_global_interrupts(otg_dev->core_if); -+ -+ return 0; -+ -+ fail: -+ dwc_otg_driver_remove(pdev); -+ return retval; -+} -+ -+/** -+ * This structure defines the methods to be called by a bus driver -+ * during the lifecycle of a device on that bus. Both drivers and -+ * devices are registered with a bus driver. The bus driver matches -+ * devices to drivers based on information in the device and driver -+ * structures. -+ * -+ * The probe function is called when the bus driver matches a device -+ * to this driver. The remove function is called when a device is -+ * unregistered with the bus driver. -+ */ -+ -+static const struct of_device_id ralink_otg_match[] = { -+ { .compatible = "ralink,rt3050-otg" }, -+ {}, -+}; -+MODULE_DEVICE_TABLE(of, ralink_otg_match); -+ -+static struct platform_driver dwc_otg_driver = { -+ .driver = { -+ .name = (char *)dwc_driver_name, -+ .of_match_table = ralink_otg_match, -+ }, -+ .probe = dwc_otg_driver_probe, -+ .remove = dwc_otg_driver_remove, -+}; -+ -+/** -+ * This function is called when the dwc_otg_driver is installed with the -+ * insmod command. It registers the dwc_otg_driver structure with the -+ * appropriate bus driver. This will cause the dwc_otg_driver_probe function -+ * to be called. In addition, the bus driver will automatically expose -+ * attributes defined for the device and driver in the special sysfs file -+ * system. -+ * -+ * @return -+ */ -+static int __init dwc_otg_driver_init(void) -+{ -+ int retval = 0; -+ int error; -+ -+ printk(KERN_INFO "%s: version %s\n", dwc_driver_name, DWC_DRIVER_VERSION); -+ -+ retval = platform_driver_register(&dwc_otg_driver); -+ if (retval) { -+ printk(KERN_ERR "%s retval=%d\n", __func__, retval); -+ return retval; -+ } -+ -+ error = driver_create_file(&dwc_otg_driver.driver, &driver_attr_version); -+ error = driver_create_file(&dwc_otg_driver.driver, &driver_attr_debuglevel); -+ -+ return retval; -+} -+module_init(dwc_otg_driver_init); -+ -+/** -+ * This function is called when the driver is removed from the kernel -+ * with the rmmod command. The driver unregisters itself with its bus -+ * driver. -+ * -+ */ -+static void __exit dwc_otg_driver_cleanup(void) -+{ -+ printk(KERN_DEBUG "dwc_otg_driver_cleanup()\n"); -+ -+ driver_remove_file(&dwc_otg_driver.driver, &driver_attr_debuglevel); -+ driver_remove_file(&dwc_otg_driver.driver, &driver_attr_version); -+ -+ platform_driver_unregister(&dwc_otg_driver); -+ -+ printk(KERN_INFO "%s module removed\n", dwc_driver_name); -+} -+module_exit(dwc_otg_driver_cleanup); -+ -+MODULE_DESCRIPTION(DWC_DRIVER_DESC); -+MODULE_AUTHOR("Synopsys Inc."); -+MODULE_LICENSE("GPL"); -+ -+module_param_named(otg_cap, dwc_otg_module_params.otg_cap, int, 0444); -+MODULE_PARM_DESC(otg_cap, "OTG Capabilities 0=HNP&SRP 1=SRP Only 2=None"); -+module_param_named(opt, dwc_otg_module_params.opt, int, 0444); -+MODULE_PARM_DESC(opt, "OPT Mode"); -+module_param_named(dma_enable, dwc_otg_module_params.dma_enable, int, 0444); -+MODULE_PARM_DESC(dma_enable, "DMA Mode 0=Slave 1=DMA enabled"); -+ -+module_param_named(dma_desc_enable, dwc_otg_module_params.dma_desc_enable, int, 0444); -+MODULE_PARM_DESC(dma_desc_enable, "DMA Desc Mode 0=Address DMA 1=DMA Descriptor enabled"); -+ -+module_param_named(dma_burst_size, dwc_otg_module_params.dma_burst_size, int, 0444); -+MODULE_PARM_DESC(dma_burst_size, "DMA Burst Size 1, 4, 8, 16, 32, 64, 128, 256"); -+module_param_named(speed, dwc_otg_module_params.speed, int, 0444); -+MODULE_PARM_DESC(speed, "Speed 0=High Speed 1=Full Speed"); -+module_param_named(host_support_fs_ls_low_power, dwc_otg_module_params.host_support_fs_ls_low_power, int, 0444); -+MODULE_PARM_DESC(host_support_fs_ls_low_power, "Support Low Power w/FS or LS 0=Support 1=Don't Support"); -+module_param_named(host_ls_low_power_phy_clk, dwc_otg_module_params.host_ls_low_power_phy_clk, int, 0444); -+MODULE_PARM_DESC(host_ls_low_power_phy_clk, "Low Speed Low Power Clock 0=48Mhz 1=6Mhz"); -+module_param_named(enable_dynamic_fifo, dwc_otg_module_params.enable_dynamic_fifo, int, 0444); -+MODULE_PARM_DESC(enable_dynamic_fifo, "0=cC Setting 1=Allow Dynamic Sizing"); -+module_param_named(data_fifo_size, dwc_otg_module_params.data_fifo_size, int, 0444); -+MODULE_PARM_DESC(data_fifo_size, "Total number of words in the data FIFO memory 32-32768"); -+module_param_named(dev_rx_fifo_size, dwc_otg_module_params.dev_rx_fifo_size, int, 0444); -+MODULE_PARM_DESC(dev_rx_fifo_size, "Number of words in the Rx FIFO 16-32768"); -+module_param_named(dev_nperio_tx_fifo_size, dwc_otg_module_params.dev_nperio_tx_fifo_size, int, 0444); -+MODULE_PARM_DESC(dev_nperio_tx_fifo_size, "Number of words in the non-periodic Tx FIFO 16-32768"); -+module_param_named(dev_perio_tx_fifo_size_1, dwc_otg_module_params.dev_perio_tx_fifo_size[0], int, 0444); -+MODULE_PARM_DESC(dev_perio_tx_fifo_size_1, "Number of words in the periodic Tx FIFO 4-768"); -+module_param_named(dev_perio_tx_fifo_size_2, dwc_otg_module_params.dev_perio_tx_fifo_size[1], int, 0444); -+MODULE_PARM_DESC(dev_perio_tx_fifo_size_2, "Number of words in the periodic Tx FIFO 4-768"); -+module_param_named(dev_perio_tx_fifo_size_3, dwc_otg_module_params.dev_perio_tx_fifo_size[2], int, 0444); -+MODULE_PARM_DESC(dev_perio_tx_fifo_size_3, "Number of words in the periodic Tx FIFO 4-768"); -+module_param_named(dev_perio_tx_fifo_size_4, dwc_otg_module_params.dev_perio_tx_fifo_size[3], int, 0444); -+MODULE_PARM_DESC(dev_perio_tx_fifo_size_4, "Number of words in the periodic Tx FIFO 4-768"); -+module_param_named(dev_perio_tx_fifo_size_5, dwc_otg_module_params.dev_perio_tx_fifo_size[4], int, 0444); -+MODULE_PARM_DESC(dev_perio_tx_fifo_size_5, "Number of words in the periodic Tx FIFO 4-768"); -+module_param_named(dev_perio_tx_fifo_size_6, dwc_otg_module_params.dev_perio_tx_fifo_size[5], int, 0444); -+MODULE_PARM_DESC(dev_perio_tx_fifo_size_6, "Number of words in the periodic Tx FIFO 4-768"); -+module_param_named(dev_perio_tx_fifo_size_7, dwc_otg_module_params.dev_perio_tx_fifo_size[6], int, 0444); -+MODULE_PARM_DESC(dev_perio_tx_fifo_size_7, "Number of words in the periodic Tx FIFO 4-768"); -+module_param_named(dev_perio_tx_fifo_size_8, dwc_otg_module_params.dev_perio_tx_fifo_size[7], int, 0444); -+MODULE_PARM_DESC(dev_perio_tx_fifo_size_8, "Number of words in the periodic Tx FIFO 4-768"); -+module_param_named(dev_perio_tx_fifo_size_9, dwc_otg_module_params.dev_perio_tx_fifo_size[8], int, 0444); -+MODULE_PARM_DESC(dev_perio_tx_fifo_size_9, "Number of words in the periodic Tx FIFO 4-768"); -+module_param_named(dev_perio_tx_fifo_size_10, dwc_otg_module_params.dev_perio_tx_fifo_size[9], int, 0444); -+MODULE_PARM_DESC(dev_perio_tx_fifo_size_10, "Number of words in the periodic Tx FIFO 4-768"); -+module_param_named(dev_perio_tx_fifo_size_11, dwc_otg_module_params.dev_perio_tx_fifo_size[10], int, 0444); -+MODULE_PARM_DESC(dev_perio_tx_fifo_size_11, "Number of words in the periodic Tx FIFO 4-768"); -+module_param_named(dev_perio_tx_fifo_size_12, dwc_otg_module_params.dev_perio_tx_fifo_size[11], int, 0444); -+MODULE_PARM_DESC(dev_perio_tx_fifo_size_12, "Number of words in the periodic Tx FIFO 4-768"); -+module_param_named(dev_perio_tx_fifo_size_13, dwc_otg_module_params.dev_perio_tx_fifo_size[12], int, 0444); -+MODULE_PARM_DESC(dev_perio_tx_fifo_size_13, "Number of words in the periodic Tx FIFO 4-768"); -+module_param_named(dev_perio_tx_fifo_size_14, dwc_otg_module_params.dev_perio_tx_fifo_size[13], int, 0444); -+MODULE_PARM_DESC(dev_perio_tx_fifo_size_14, "Number of words in the periodic Tx FIFO 4-768"); -+module_param_named(dev_perio_tx_fifo_size_15, dwc_otg_module_params.dev_perio_tx_fifo_size[14], int, 0444); -+MODULE_PARM_DESC(dev_perio_tx_fifo_size_15, "Number of words in the periodic Tx FIFO 4-768"); -+module_param_named(host_rx_fifo_size, dwc_otg_module_params.host_rx_fifo_size, int, 0444); -+MODULE_PARM_DESC(host_rx_fifo_size, "Number of words in the Rx FIFO 16-32768"); -+module_param_named(host_nperio_tx_fifo_size, dwc_otg_module_params.host_nperio_tx_fifo_size, int, 0444); -+MODULE_PARM_DESC(host_nperio_tx_fifo_size, "Number of words in the non-periodic Tx FIFO 16-32768"); -+module_param_named(host_perio_tx_fifo_size, dwc_otg_module_params.host_perio_tx_fifo_size, int, 0444); -+MODULE_PARM_DESC(host_perio_tx_fifo_size, "Number of words in the host periodic Tx FIFO 16-32768"); -+module_param_named(max_transfer_size, dwc_otg_module_params.max_transfer_size, int, 0444); -+/** @todo Set the max to 512K, modify checks */ -+MODULE_PARM_DESC(max_transfer_size, "The maximum transfer size supported in bytes 2047-65535"); -+module_param_named(max_packet_count, dwc_otg_module_params.max_packet_count, int, 0444); -+MODULE_PARM_DESC(max_packet_count, "The maximum number of packets in a transfer 15-511"); -+module_param_named(host_channels, dwc_otg_module_params.host_channels, int, 0444); -+MODULE_PARM_DESC(host_channels, "The number of host channel registers to use 1-16"); -+module_param_named(dev_endpoints, dwc_otg_module_params.dev_endpoints, int, 0444); -+MODULE_PARM_DESC(dev_endpoints, "The number of endpoints in addition to EP0 available for device mode 1-15"); -+module_param_named(phy_type, dwc_otg_module_params.phy_type, int, 0444); -+MODULE_PARM_DESC(phy_type, "0=Reserved 1=UTMI+ 2=ULPI"); -+module_param_named(phy_utmi_width, dwc_otg_module_params.phy_utmi_width, int, 0444); -+MODULE_PARM_DESC(phy_utmi_width, "Specifies the UTMI+ Data Width 8 or 16 bits"); -+module_param_named(phy_ulpi_ddr, dwc_otg_module_params.phy_ulpi_ddr, int, 0444); -+MODULE_PARM_DESC(phy_ulpi_ddr, "ULPI at double or single data rate 0=Single 1=Double"); -+module_param_named(phy_ulpi_ext_vbus, dwc_otg_module_params.phy_ulpi_ext_vbus, int, 0444); -+MODULE_PARM_DESC(phy_ulpi_ext_vbus, "ULPI PHY using internal or external vbus 0=Internal"); -+module_param_named(i2c_enable, dwc_otg_module_params.i2c_enable, int, 0444); -+MODULE_PARM_DESC(i2c_enable, "FS PHY Interface"); -+module_param_named(ulpi_fs_ls, dwc_otg_module_params.ulpi_fs_ls, int, 0444); -+MODULE_PARM_DESC(ulpi_fs_ls, "ULPI PHY FS/LS mode only"); -+module_param_named(ts_dline, dwc_otg_module_params.ts_dline, int, 0444); -+MODULE_PARM_DESC(ts_dline, "Term select Dline pulsing for all PHYs"); -+module_param_named(debug, g_dbg_lvl, int, 0444); -+MODULE_PARM_DESC(debug, ""); -+ -+module_param_named(en_multiple_tx_fifo, dwc_otg_module_params.en_multiple_tx_fifo, int, 0444); -+MODULE_PARM_DESC(en_multiple_tx_fifo, "Dedicated Non Periodic Tx FIFOs 0=disabled 1=enabled"); -+module_param_named(dev_tx_fifo_size_1, dwc_otg_module_params.dev_tx_fifo_size[0], int, 0444); -+MODULE_PARM_DESC(dev_tx_fifo_size_1, "Number of words in the Tx FIFO 4-768"); -+module_param_named(dev_tx_fifo_size_2, dwc_otg_module_params.dev_tx_fifo_size[1], int, 0444); -+MODULE_PARM_DESC(dev_tx_fifo_size_2, "Number of words in the Tx FIFO 4-768"); -+module_param_named(dev_tx_fifo_size_3, dwc_otg_module_params.dev_tx_fifo_size[2], int, 0444); -+MODULE_PARM_DESC(dev_tx_fifo_size_3, "Number of words in the Tx FIFO 4-768"); -+module_param_named(dev_tx_fifo_size_4, dwc_otg_module_params.dev_tx_fifo_size[3], int, 0444); -+MODULE_PARM_DESC(dev_tx_fifo_size_4, "Number of words in the Tx FIFO 4-768"); -+module_param_named(dev_tx_fifo_size_5, dwc_otg_module_params.dev_tx_fifo_size[4], int, 0444); -+MODULE_PARM_DESC(dev_tx_fifo_size_5, "Number of words in the Tx FIFO 4-768"); -+module_param_named(dev_tx_fifo_size_6, dwc_otg_module_params.dev_tx_fifo_size[5], int, 0444); -+MODULE_PARM_DESC(dev_tx_fifo_size_6, "Number of words in the Tx FIFO 4-768"); -+module_param_named(dev_tx_fifo_size_7, dwc_otg_module_params.dev_tx_fifo_size[6], int, 0444); -+MODULE_PARM_DESC(dev_tx_fifo_size_7, "Number of words in the Tx FIFO 4-768"); -+module_param_named(dev_tx_fifo_size_8, dwc_otg_module_params.dev_tx_fifo_size[7], int, 0444); -+MODULE_PARM_DESC(dev_tx_fifo_size_8, "Number of words in the Tx FIFO 4-768"); -+module_param_named(dev_tx_fifo_size_9, dwc_otg_module_params.dev_tx_fifo_size[8], int, 0444); -+MODULE_PARM_DESC(dev_tx_fifo_size_9, "Number of words in the Tx FIFO 4-768"); -+module_param_named(dev_tx_fifo_size_10, dwc_otg_module_params.dev_tx_fifo_size[9], int, 0444); -+MODULE_PARM_DESC(dev_tx_fifo_size_10, "Number of words in the Tx FIFO 4-768"); -+module_param_named(dev_tx_fifo_size_11, dwc_otg_module_params.dev_tx_fifo_size[10], int, 0444); -+MODULE_PARM_DESC(dev_tx_fifo_size_11, "Number of words in the Tx FIFO 4-768"); -+module_param_named(dev_tx_fifo_size_12, dwc_otg_module_params.dev_tx_fifo_size[11], int, 0444); -+MODULE_PARM_DESC(dev_tx_fifo_size_12, "Number of words in the Tx FIFO 4-768"); -+module_param_named(dev_tx_fifo_size_13, dwc_otg_module_params.dev_tx_fifo_size[12], int, 0444); -+MODULE_PARM_DESC(dev_tx_fifo_size_13, "Number of words in the Tx FIFO 4-768"); -+module_param_named(dev_tx_fifo_size_14, dwc_otg_module_params.dev_tx_fifo_size[13], int, 0444); -+MODULE_PARM_DESC(dev_tx_fifo_size_14, "Number of words in the Tx FIFO 4-768"); -+module_param_named(dev_tx_fifo_size_15, dwc_otg_module_params.dev_tx_fifo_size[14], int, 0444); -+MODULE_PARM_DESC(dev_tx_fifo_size_15, "Number of words in the Tx FIFO 4-768"); -+ -+module_param_named(thr_ctl, dwc_otg_module_params.thr_ctl, int, 0444); -+MODULE_PARM_DESC(thr_ctl, "Thresholding enable flag bit 0 - non ISO Tx thr., 1 - ISO Tx thr., 2 - Rx thr.- bit 0=disabled 1=enabled"); -+module_param_named(tx_thr_length, dwc_otg_module_params.tx_thr_length, int, 0444); -+MODULE_PARM_DESC(tx_thr_length, "Tx Threshold length in 32 bit DWORDs"); -+module_param_named(rx_thr_length, dwc_otg_module_params.rx_thr_length, int, 0444); -+MODULE_PARM_DESC(rx_thr_length, "Rx Threshold length in 32 bit DWORDs"); -+ -+module_param_named(pti_enable, dwc_otg_module_params.pti_enable, int, 0444); -+MODULE_PARM_DESC(pti_enable, "Per Transfer Interrupt mode 0=disabled 1=enabled"); -+ -+module_param_named(mpi_enable, dwc_otg_module_params.mpi_enable, int, 0444); -+MODULE_PARM_DESC(mpi_enable, "Multiprocessor Interrupt mode 0=disabled 1=enabled"); -+ -+/** @page "Module Parameters" -+ * -+ * The following parameters may be specified when starting the module. -+ * These parameters define how the DWC_otg controller should be -+ * configured. Parameter values are passed to the CIL initialization -+ * function dwc_otg_cil_init -+ * -+ * Example: modprobe dwc_otg speed=1 otg_cap=1 -+ * -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+ -+*/ ---- /dev/null -+++ b/drivers/usb/dwc_otg/dwc_otg_driver.h -@@ -0,0 +1,83 @@ -+/* ========================================================================== -+ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_driver.h $ -+ * $Revision: 1.2 $ -+ * $Date: 2008-11-21 05:39:15 $ -+ * $Change: 1064918 $ -+ * -+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, -+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless -+ * otherwise expressly agreed to in writing between Synopsys and you. -+ * -+ * The Software IS NOT an item of Licensed Software or Licensed Product under -+ * any End User Software License Agreement or Agreement for Licensed Product -+ * with Synopsys or any supplement thereto. You are permitted to use and -+ * redistribute this Software in source and binary forms, with or without -+ * modification, provided that redistributions of source code must retain this -+ * notice. You may not view, use, disclose, copy or distribute this file or -+ * any information contained herein except pursuant to this license grant from -+ * Synopsys. If you do not agree with this notice, including the disclaimer -+ * below, then you are not authorized to use the Software. -+ * -+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, -+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY -+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH -+ * DAMAGE. -+ * ========================================================================== */ -+ -+#ifndef __DWC_OTG_DRIVER_H__ -+#define __DWC_OTG_DRIVER_H__ -+ -+/** @file -+ * This file contains the interface to the Linux driver. -+ */ -+#include "dwc_otg_cil.h" -+ -+/* Type declarations */ -+struct dwc_otg_pcd; -+struct dwc_otg_hcd; -+ -+/** -+ * This structure is a wrapper that encapsulates the driver components used to -+ * manage a single DWC_otg controller. -+ */ -+typedef struct dwc_otg_device { -+ /** Base address returned from ioremap() */ -+ void *base; -+ -+ struct device *parent; -+ -+ /** Pointer to the core interface structure. */ -+ dwc_otg_core_if_t *core_if; -+ -+ /** Register offset for Diagnostic API. */ -+ uint32_t reg_offset; -+ -+ /** Pointer to the PCD structure. */ -+ struct dwc_otg_pcd *pcd; -+ -+ /** Pointer to the HCD structure. */ -+ struct dwc_otg_hcd *hcd; -+ -+ /** Flag to indicate whether the common IRQ handler is installed. */ -+ uint8_t common_irq_installed; -+ -+ /* Interrupt request number. */ -+ unsigned int irq; -+ -+ /* Physical address of Control and Status registers, used by -+ * release_mem_region(). -+ */ -+ resource_size_t phys_addr; -+ -+ /* Length of memory region, used by release_mem_region(). */ -+ unsigned long base_len; -+} dwc_otg_device_t; -+ -+#endif ---- /dev/null -+++ b/drivers/usb/dwc_otg/dwc_otg_hcd.c -@@ -0,0 +1,2852 @@ -+/* ========================================================================== -+ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_hcd.c $ -+ * $Revision: 1.4 $ -+ * $Date: 2008-11-21 05:39:15 $ -+ * $Change: 1064940 $ -+ * -+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, -+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless -+ * otherwise expressly agreed to in writing between Synopsys and you. -+ * -+ * The Software IS NOT an item of Licensed Software or Licensed Product under -+ * any End User Software License Agreement or Agreement for Licensed Product -+ * with Synopsys or any supplement thereto. You are permitted to use and -+ * redistribute this Software in source and binary forms, with or without -+ * modification, provided that redistributions of source code must retain this -+ * notice. You may not view, use, disclose, copy or distribute this file or -+ * any information contained herein except pursuant to this license grant from -+ * Synopsys. If you do not agree with this notice, including the disclaimer -+ * below, then you are not authorized to use the Software. -+ * -+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, -+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY -+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH -+ * DAMAGE. -+ * ========================================================================== */ -+#ifndef DWC_DEVICE_ONLY -+ -+/** -+ * @file -+ * -+ * This file contains the implementation of the HCD. In Linux, the HCD -+ * implements the hc_driver API. -+ */ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "dwc_otg_driver.h" -+#include "dwc_otg_hcd.h" -+#include "dwc_otg_regs.h" -+ -+static const char dwc_otg_hcd_name[] = "dwc_otg"; -+ -+static const struct hc_driver dwc_otg_hc_driver = { -+ -+ .description = dwc_otg_hcd_name, -+ .product_desc = "DWC OTG Controller", -+ .hcd_priv_size = sizeof(dwc_otg_hcd_t), -+ -+ .irq = dwc_otg_hcd_irq, -+ -+ .flags = HCD_MEMORY | HCD_USB2, -+ -+ //.reset = -+ .start = dwc_otg_hcd_start, -+ //.suspend = -+ //.resume = -+ .stop = dwc_otg_hcd_stop, -+ -+ .urb_enqueue = dwc_otg_hcd_urb_enqueue, -+ .urb_dequeue = dwc_otg_hcd_urb_dequeue, -+ .endpoint_disable = dwc_otg_hcd_endpoint_disable, -+ -+ .get_frame_number = dwc_otg_hcd_get_frame_number, -+ -+ .hub_status_data = dwc_otg_hcd_hub_status_data, -+ .hub_control = dwc_otg_hcd_hub_control, -+ //.hub_suspend = -+ //.hub_resume = -+}; -+ -+/** -+ * Work queue function for starting the HCD when A-Cable is connected. -+ * The dwc_otg_hcd_start() must be called in a process context. -+ */ -+static void hcd_start_func( -+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) -+ void *_vp -+#else -+ struct work_struct *_work -+#endif -+ ) -+{ -+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) -+ struct usb_hcd *usb_hcd = (struct usb_hcd *)_vp; -+#else -+ struct delayed_work *dw = container_of(_work, struct delayed_work, work); -+ struct dwc_otg_hcd *otg_hcd = container_of(dw, struct dwc_otg_hcd, start_work); -+ struct usb_hcd *usb_hcd = container_of((void *)otg_hcd, struct usb_hcd, hcd_priv); -+#endif -+ DWC_DEBUGPL(DBG_HCDV, "%s() %p\n", __func__, usb_hcd); -+ if (usb_hcd) { -+ dwc_otg_hcd_start(usb_hcd); -+ } -+} -+ -+/** -+ * HCD Callback function for starting the HCD when A-Cable is -+ * connected. -+ * -+ * @param p void pointer to the struct usb_hcd -+ */ -+static int32_t dwc_otg_hcd_start_cb(void *p) -+{ -+ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(p); -+ dwc_otg_core_if_t *core_if = dwc_otg_hcd->core_if; -+ hprt0_data_t hprt0; -+ -+ if (core_if->op_state == B_HOST) { -+ /* -+ * Reset the port. During a HNP mode switch the reset -+ * needs to occur within 1ms and have a duration of at -+ * least 50ms. -+ */ -+ hprt0.d32 = dwc_otg_read_hprt0(core_if); -+ hprt0.b.prtrst = 1; -+ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); -+ ((struct usb_hcd *)p)->self.is_b_host = 1; -+ } else { -+ ((struct usb_hcd *)p)->self.is_b_host = 0; -+ } -+ -+ /* Need to start the HCD in a non-interrupt context. */ -+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) -+ INIT_WORK(&dwc_otg_hcd->start_work, hcd_start_func, p); -+// INIT_DELAYED_WORK(&dwc_otg_hcd->start_work, hcd_start_func, p); -+#else -+// INIT_WORK(&dwc_otg_hcd->start_work, hcd_start_func); -+ INIT_DELAYED_WORK(&dwc_otg_hcd->start_work, hcd_start_func); -+#endif -+// schedule_work(&dwc_otg_hcd->start_work); -+ queue_delayed_work(core_if->wq_otg, &dwc_otg_hcd->start_work, 50 * HZ / 1000); -+ -+ return 1; -+} -+ -+/** -+ * HCD Callback function for stopping the HCD. -+ * -+ * @param p void pointer to the struct usb_hcd -+ */ -+static int32_t dwc_otg_hcd_stop_cb(void *p) -+{ -+ struct usb_hcd *usb_hcd = (struct usb_hcd *)p; -+ DWC_DEBUGPL(DBG_HCDV, "%s(%p)\n", __func__, p); -+ dwc_otg_hcd_stop(usb_hcd); -+ return 1; -+} -+ -+static void del_xfer_timers(dwc_otg_hcd_t *hcd) -+{ -+#ifdef DEBUG -+ int i; -+ int num_channels = hcd->core_if->core_params->host_channels; -+ for (i = 0; i < num_channels; i++) { -+ del_timer(&hcd->core_if->hc_xfer_timer[i]); -+ } -+#endif -+} -+ -+static void del_timers(dwc_otg_hcd_t *hcd) -+{ -+ del_xfer_timers(hcd); -+ del_timer(&hcd->conn_timer); -+} -+ -+/** -+ * Processes all the URBs in a single list of QHs. Completes them with -+ * -ETIMEDOUT and frees the QTD. -+ */ -+static void kill_urbs_in_qh_list(dwc_otg_hcd_t *hcd, struct list_head *qh_list) -+{ -+ struct list_head *qh_item; -+ dwc_otg_qh_t *qh; -+ struct list_head *qtd_item; -+ dwc_otg_qtd_t *qtd; -+ -+ list_for_each(qh_item, qh_list) { -+ qh = list_entry(qh_item, dwc_otg_qh_t, qh_list_entry); -+ for (qtd_item = qh->qtd_list.next; -+ qtd_item != &qh->qtd_list; -+ qtd_item = qh->qtd_list.next) { -+ qtd = list_entry(qtd_item, dwc_otg_qtd_t, qtd_list_entry); -+ if (qtd->urb != NULL) { -+ dwc_otg_hcd_complete_urb(hcd, qtd->urb, -+ -ETIMEDOUT); -+ } -+ dwc_otg_hcd_qtd_remove_and_free(hcd, qtd); -+ } -+ } -+} -+ -+/** -+ * Responds with an error status of ETIMEDOUT to all URBs in the non-periodic -+ * and periodic schedules. The QTD associated with each URB is removed from -+ * the schedule and freed. This function may be called when a disconnect is -+ * detected or when the HCD is being stopped. -+ */ -+static void kill_all_urbs(dwc_otg_hcd_t *hcd) -+{ -+ kill_urbs_in_qh_list(hcd, &hcd->non_periodic_sched_inactive); -+ kill_urbs_in_qh_list(hcd, &hcd->non_periodic_sched_active); -+ kill_urbs_in_qh_list(hcd, &hcd->periodic_sched_inactive); -+ kill_urbs_in_qh_list(hcd, &hcd->periodic_sched_ready); -+ kill_urbs_in_qh_list(hcd, &hcd->periodic_sched_assigned); -+ kill_urbs_in_qh_list(hcd, &hcd->periodic_sched_queued); -+} -+ -+/** -+ * HCD Callback function for disconnect of the HCD. -+ * -+ * @param p void pointer to the struct usb_hcd -+ */ -+static int32_t dwc_otg_hcd_disconnect_cb(void *p) -+{ -+ gintsts_data_t intr; -+ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(p); -+ -+ //DWC_DEBUGPL(DBG_HCDV, "%s(%p)\n", __func__, p); -+ -+ /* -+ * Set status flags for the hub driver. -+ */ -+ dwc_otg_hcd->flags.b.port_connect_status_change = 1; -+ dwc_otg_hcd->flags.b.port_connect_status = 0; -+ -+ /* -+ * Shutdown any transfers in process by clearing the Tx FIFO Empty -+ * interrupt mask and status bits and disabling subsequent host -+ * channel interrupts. -+ */ -+ intr.d32 = 0; -+ intr.b.nptxfempty = 1; -+ intr.b.ptxfempty = 1; -+ intr.b.hcintr = 1; -+ dwc_modify_reg32(&dwc_otg_hcd->core_if->core_global_regs->gintmsk, intr.d32, 0); -+ dwc_modify_reg32(&dwc_otg_hcd->core_if->core_global_regs->gintsts, intr.d32, 0); -+ -+ del_timers(dwc_otg_hcd); -+ -+ /* -+ * Turn off the vbus power only if the core has transitioned to device -+ * mode. If still in host mode, need to keep power on to detect a -+ * reconnection. -+ */ -+ if (dwc_otg_is_device_mode(dwc_otg_hcd->core_if)) { -+ if (dwc_otg_hcd->core_if->op_state != A_SUSPEND) { -+ hprt0_data_t hprt0 = { .d32=0 }; -+ DWC_PRINT("Disconnect: PortPower off\n"); -+ hprt0.b.prtpwr = 0; -+ dwc_write_reg32(dwc_otg_hcd->core_if->host_if->hprt0, hprt0.d32); -+ } -+ -+ dwc_otg_disable_host_interrupts(dwc_otg_hcd->core_if); -+ } -+ -+ /* Respond with an error status to all URBs in the schedule. */ -+ kill_all_urbs(dwc_otg_hcd); -+ -+ if (dwc_otg_is_host_mode(dwc_otg_hcd->core_if)) { -+ /* Clean up any host channels that were in use. */ -+ int num_channels; -+ int i; -+ dwc_hc_t *channel; -+ dwc_otg_hc_regs_t *hc_regs; -+ hcchar_data_t hcchar; -+ -+ num_channels = dwc_otg_hcd->core_if->core_params->host_channels; -+ -+ if (!dwc_otg_hcd->core_if->dma_enable) { -+ /* Flush out any channel requests in slave mode. */ -+ for (i = 0; i < num_channels; i++) { -+ channel = dwc_otg_hcd->hc_ptr_array[i]; -+ if (list_empty(&channel->hc_list_entry)) { -+ hc_regs = dwc_otg_hcd->core_if->host_if->hc_regs[i]; -+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); -+ if (hcchar.b.chen) { -+ hcchar.b.chen = 0; -+ hcchar.b.chdis = 1; -+ hcchar.b.epdir = 0; -+ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); -+ } -+ } -+ } -+ } -+ -+ for (i = 0; i < num_channels; i++) { -+ channel = dwc_otg_hcd->hc_ptr_array[i]; -+ if (list_empty(&channel->hc_list_entry)) { -+ hc_regs = dwc_otg_hcd->core_if->host_if->hc_regs[i]; -+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); -+ if (hcchar.b.chen) { -+ /* Halt the channel. */ -+ hcchar.b.chdis = 1; -+ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); -+ } -+ -+ dwc_otg_hc_cleanup(dwc_otg_hcd->core_if, channel); -+ list_add_tail(&channel->hc_list_entry, -+ &dwc_otg_hcd->free_hc_list); -+ } -+ } -+ } -+ -+ /* A disconnect will end the session so the B-Device is no -+ * longer a B-host. */ -+ ((struct usb_hcd *)p)->self.is_b_host = 0; -+ return 1; -+} -+ -+/** -+ * Connection timeout function. An OTG host is required to display a -+ * message if the device does not connect within 10 seconds. -+ */ -+void dwc_otg_hcd_connect_timeout(unsigned long ptr) -+{ -+ DWC_DEBUGPL(DBG_HCDV, "%s(%x)\n", __func__, (int)ptr); -+ DWC_PRINT("Connect Timeout\n"); -+ DWC_ERROR("Device Not Connected/Responding\n"); -+} -+ -+/** -+ * Start the connection timer. An OTG host is required to display a -+ * message if the device does not connect within 10 seconds. The -+ * timer is deleted if a port connect interrupt occurs before the -+ * timer expires. -+ */ -+static void dwc_otg_hcd_start_connect_timer(dwc_otg_hcd_t *hcd) -+{ -+ init_timer(&hcd->conn_timer); -+ hcd->conn_timer.function = dwc_otg_hcd_connect_timeout; -+ hcd->conn_timer.data = 0; -+ hcd->conn_timer.expires = jiffies + (HZ * 10); -+ add_timer(&hcd->conn_timer); -+} -+ -+/** -+ * HCD Callback function for disconnect of the HCD. -+ * -+ * @param p void pointer to the struct usb_hcd -+ */ -+static int32_t dwc_otg_hcd_session_start_cb(void *p) -+{ -+ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(p); -+ DWC_DEBUGPL(DBG_HCDV, "%s(%p)\n", __func__, p); -+ dwc_otg_hcd_start_connect_timer(dwc_otg_hcd); -+ return 1; -+} -+ -+/** -+ * HCD Callback structure for handling mode switching. -+ */ -+static dwc_otg_cil_callbacks_t hcd_cil_callbacks = { -+ .start = dwc_otg_hcd_start_cb, -+ .stop = dwc_otg_hcd_stop_cb, -+ .disconnect = dwc_otg_hcd_disconnect_cb, -+ .session_start = dwc_otg_hcd_session_start_cb, -+ .p = 0, -+}; -+ -+/** -+ * Reset tasklet function -+ */ -+static void reset_tasklet_func(unsigned long data) -+{ -+ dwc_otg_hcd_t *dwc_otg_hcd = (dwc_otg_hcd_t *)data; -+ dwc_otg_core_if_t *core_if = dwc_otg_hcd->core_if; -+ hprt0_data_t hprt0; -+ -+ DWC_DEBUGPL(DBG_HCDV, "USB RESET tasklet called\n"); -+ -+ hprt0.d32 = dwc_otg_read_hprt0(core_if); -+ hprt0.b.prtrst = 1; -+ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); -+ mdelay(60); -+ -+ hprt0.b.prtrst = 0; -+ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); -+ dwc_otg_hcd->flags.b.port_reset_change = 1; -+} -+ -+static struct tasklet_struct reset_tasklet = { -+ .next = NULL, -+ .state = 0, -+ .count = ATOMIC_INIT(0), -+ .func = reset_tasklet_func, -+ .data = 0, -+}; -+ -+/** -+ * Initializes the HCD. This function allocates memory for and initializes the -+ * static parts of the usb_hcd and dwc_otg_hcd structures. It also registers the -+ * USB bus with the core and calls the hc_driver->start() function. It returns -+ * a negative error on failure. -+ */ -+int dwc_otg_hcd_init(struct device *dev) -+{ -+ dwc_otg_device_t *otg_dev = dev_get_drvdata(dev); -+ struct usb_hcd *hcd = NULL; -+ dwc_otg_hcd_t *dwc_otg_hcd = NULL; -+ -+ int num_channels; -+ int i; -+ dwc_hc_t *channel; -+ -+ int retval = 0; -+ -+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD INIT\n"); -+ -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) -+ /* 2.6.20+ requires dev.dma_mask to be set prior to calling usb_create_hcd() */ -+ -+ /* Set device flags indicating whether the HCD supports DMA. */ -+ if (otg_dev->core_if->dma_enable) { -+ DWC_PRINT("Using DMA mode\n"); -+ dev->dma_mask = (void *)~0; -+ dev->coherent_dma_mask = ~0; -+ -+ if (otg_dev->core_if->dma_desc_enable) { -+ DWC_PRINT("Device using Descriptor DMA mode\n"); -+ } else { -+ DWC_PRINT("Device using Buffer DMA mode\n"); -+ } -+ } else { -+ DWC_PRINT("Using Slave mode\n"); -+ dev->dma_mask = (void *)0; -+ dev->coherent_dma_mask = 0; -+ } -+#endif -+ /* -+ * Allocate memory for the base HCD plus the DWC OTG HCD. -+ * Initialize the base HCD. -+ */ -+ hcd = usb_create_hcd(&dwc_otg_hc_driver, dev, dev_name(dev)); -+ if (!hcd) { -+ retval = -ENOMEM; -+ goto error1; -+ } -+ -+ dev_set_drvdata(dev, otg_dev); -+ hcd->regs = otg_dev->base; -+ hcd->rsrc_start = otg_dev->phys_addr; -+ hcd->rsrc_len = otg_dev->base_len; -+ hcd->self.otg_port = 1; -+ hcd->has_tt = 1; -+ -+ /* Initialize the DWC OTG HCD. */ -+ dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd); -+ dwc_otg_hcd->core_if = otg_dev->core_if; -+ otg_dev->hcd = dwc_otg_hcd; -+ -+ /* */ -+ spin_lock_init(&dwc_otg_hcd->lock); -+ -+ /* Register the HCD CIL Callbacks */ -+ dwc_otg_cil_register_hcd_callbacks(otg_dev->core_if, -+ &hcd_cil_callbacks, hcd); -+ -+ /* Initialize the non-periodic schedule. */ -+ INIT_LIST_HEAD(&dwc_otg_hcd->non_periodic_sched_inactive); -+ INIT_LIST_HEAD(&dwc_otg_hcd->non_periodic_sched_active); -+ -+ /* Initialize the periodic schedule. */ -+ INIT_LIST_HEAD(&dwc_otg_hcd->periodic_sched_inactive); -+ INIT_LIST_HEAD(&dwc_otg_hcd->periodic_sched_ready); -+ INIT_LIST_HEAD(&dwc_otg_hcd->periodic_sched_assigned); -+ INIT_LIST_HEAD(&dwc_otg_hcd->periodic_sched_queued); -+ -+ /* -+ * Create a host channel descriptor for each host channel implemented -+ * in the controller. Initialize the channel descriptor array. -+ */ -+ INIT_LIST_HEAD(&dwc_otg_hcd->free_hc_list); -+ num_channels = dwc_otg_hcd->core_if->core_params->host_channels; -+ memset(dwc_otg_hcd->hc_ptr_array, 0, sizeof(dwc_otg_hcd->hc_ptr_array)); -+ for (i = 0; i < num_channels; i++) { -+ channel = kmalloc(sizeof(dwc_hc_t), GFP_KERNEL); -+ if (channel == NULL) { -+ retval = -ENOMEM; -+ DWC_ERROR("%s: host channel allocation failed\n", __func__); -+ goto error2; -+ } -+ memset(channel, 0, sizeof(dwc_hc_t)); -+ channel->hc_num = i; -+ dwc_otg_hcd->hc_ptr_array[i] = channel; -+#ifdef DEBUG -+ init_timer(&dwc_otg_hcd->core_if->hc_xfer_timer[i]); -+#endif -+ DWC_DEBUGPL(DBG_HCDV, "HCD Added channel #%d, hc=%p\n", i, channel); -+ } -+ -+ /* Initialize the Connection timeout timer. */ -+ init_timer(&dwc_otg_hcd->conn_timer); -+ -+ /* Initialize reset tasklet. */ -+ reset_tasklet.data = (unsigned long) dwc_otg_hcd; -+ dwc_otg_hcd->reset_tasklet = &reset_tasklet; -+ -+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) -+ /* Set device flags indicating whether the HCD supports DMA. */ -+ if (otg_dev->core_if->dma_enable) { -+ DWC_PRINT("Using DMA mode\n"); -+ dev->dma_mask = (void *)~0; -+ dev->coherent_dma_mask = ~0; -+ -+ if (otg_dev->core_if->dma_desc_enable){ -+ DWC_PRINT("Device using Descriptor DMA mode\n"); -+ } else { -+ DWC_PRINT("Device using Buffer DMA mode\n"); -+ } -+ } else { -+ DWC_PRINT("Using Slave mode\n"); -+ dev->dma_mask = (void *)0; -+ dev->dev.coherent_dma_mask = 0; -+ } -+#endif -+ /* -+ * Finish generic HCD initialization and start the HCD. This function -+ * allocates the DMA buffer pool, registers the USB bus, requests the -+ * IRQ line, and calls dwc_otg_hcd_start method. -+ */ -+ retval = usb_add_hcd(hcd, otg_dev->irq, IRQF_SHARED); -+ if (retval < 0) { -+ goto error2; -+ } -+ -+ /* -+ * Allocate space for storing data on status transactions. Normally no -+ * data is sent, but this space acts as a bit bucket. This must be -+ * done after usb_add_hcd since that function allocates the DMA buffer -+ * pool. -+ */ -+ if (otg_dev->core_if->dma_enable) { -+ dwc_otg_hcd->status_buf = -+ dma_alloc_coherent(dev, -+ DWC_OTG_HCD_STATUS_BUF_SIZE, -+ &dwc_otg_hcd->status_buf_dma, -+ GFP_KERNEL | GFP_DMA); -+ } else { -+ dwc_otg_hcd->status_buf = kmalloc(DWC_OTG_HCD_STATUS_BUF_SIZE, -+ GFP_KERNEL); -+ } -+ if (!dwc_otg_hcd->status_buf) { -+ retval = -ENOMEM; -+ DWC_ERROR("%s: status_buf allocation failed\n", __func__); -+ goto error3; -+ } -+ -+ dwc_otg_hcd->otg_dev = otg_dev; -+ -+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD Initialized HCD, bus=%s, usbbus=%d\n", -+ dev_name(dev), hcd->self.busnum); -+ -+ return 0; -+ -+ /* Error conditions */ -+ error3: -+ usb_remove_hcd(hcd); -+ error2: -+ dwc_otg_hcd_free(hcd); -+ usb_put_hcd(hcd); -+ -+ /* FIXME: 2008/05/03 by Steven -+ * write back to device: -+ * dwc_otg_hcd has already been released by dwc_otg_hcd_free() -+ */ -+ dev_set_drvdata(dev, otg_dev); -+ -+ error1: -+ return retval; -+} -+ -+/** -+ * Removes the HCD. -+ * Frees memory and resources associated with the HCD and deregisters the bus. -+ */ -+void dwc_otg_hcd_remove(struct device *dev) -+{ -+ dwc_otg_device_t *otg_dev = dev_get_drvdata(dev); -+ dwc_otg_hcd_t *dwc_otg_hcd; -+ struct usb_hcd *hcd; -+ -+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD REMOVE\n"); -+ -+ if (!otg_dev) { -+ DWC_DEBUGPL(DBG_ANY, "%s: otg_dev NULL!\n", __func__); -+ return; -+ } -+ -+ dwc_otg_hcd = otg_dev->hcd; -+ -+ if (!dwc_otg_hcd) { -+ DWC_DEBUGPL(DBG_ANY, "%s: otg_dev->hcd NULL!\n", __func__); -+ return; -+ } -+ -+ hcd = dwc_otg_hcd_to_hcd(dwc_otg_hcd); -+ -+ if (!hcd) { -+ DWC_DEBUGPL(DBG_ANY, "%s: dwc_otg_hcd_to_hcd(dwc_otg_hcd) NULL!\n", __func__); -+ return; -+ } -+ -+ /* Turn off all interrupts */ -+ dwc_write_reg32(&dwc_otg_hcd->core_if->core_global_regs->gintmsk, 0); -+ dwc_modify_reg32(&dwc_otg_hcd->core_if->core_global_regs->gahbcfg, 1, 0); -+ -+ usb_remove_hcd(hcd); -+ dwc_otg_hcd_free(hcd); -+ usb_put_hcd(hcd); -+} -+ -+/* ========================================================================= -+ * Linux HC Driver Functions -+ * ========================================================================= */ -+ -+/** -+ * Initializes dynamic portions of the DWC_otg HCD state. -+ */ -+static void hcd_reinit(dwc_otg_hcd_t *hcd) -+{ -+ struct list_head *item; -+ int num_channels; -+ int i; -+ dwc_hc_t *channel; -+ -+ hcd->flags.d32 = 0; -+ -+ hcd->non_periodic_qh_ptr = &hcd->non_periodic_sched_active; -+ hcd->non_periodic_channels = 0; -+ hcd->periodic_channels = 0; -+ -+ /* -+ * Put all channels in the free channel list and clean up channel -+ * states. -+ */ -+ item = hcd->free_hc_list.next; -+ while (item != &hcd->free_hc_list) { -+ list_del(item); -+ item = hcd->free_hc_list.next; -+ } -+ num_channels = hcd->core_if->core_params->host_channels; -+ for (i = 0; i < num_channels; i++) { -+ channel = hcd->hc_ptr_array[i]; -+ list_add_tail(&channel->hc_list_entry, &hcd->free_hc_list); -+ dwc_otg_hc_cleanup(hcd->core_if, channel); -+ } -+ -+ /* Initialize the DWC core for host mode operation. */ -+ dwc_otg_core_host_init(hcd->core_if); -+} -+ -+/** Initializes the DWC_otg controller and its root hub and prepares it for host -+ * mode operation. Activates the root port. Returns 0 on success and a negative -+ * error code on failure. */ -+int dwc_otg_hcd_start(struct usb_hcd *hcd) -+{ -+ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd); -+ dwc_otg_core_if_t *core_if = dwc_otg_hcd->core_if; -+ struct usb_bus *bus; -+ -+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) -+ struct usb_device *udev; -+ int retval; -+#endif -+ -+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD START\n"); -+ -+ bus = hcd_to_bus(hcd); -+ -+ /* Initialize the bus state. If the core is in Device Mode -+ * HALT the USB bus and return. */ -+ if (dwc_otg_is_device_mode(core_if)) { -+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) -+ hcd->state = HC_STATE_HALT; -+#else -+ hcd->state = HC_STATE_RUNNING; -+#endif -+ return 0; -+ } -+ hcd->state = HC_STATE_RUNNING; -+ -+ /* Initialize and connect root hub if one is not already attached */ -+ if (bus->root_hub) { -+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD Has Root Hub\n"); -+ /* Inform the HUB driver to resume. */ -+ usb_hcd_resume_root_hub(hcd); -+ } -+ else { -+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD Does Not Have Root Hub\n"); -+ -+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) -+ udev = usb_alloc_dev(NULL, bus, 0); -+ udev->speed = USB_SPEED_HIGH; -+ if (!udev) { -+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD Error udev alloc\n"); -+ return -ENODEV; -+ } -+ if ((retval = usb_hcd_register_root_hub(udev, hcd)) != 0) { -+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD Error registering %d\n", retval); -+ return -ENODEV; -+ } -+#endif -+ } -+ -+ hcd_reinit(dwc_otg_hcd); -+ -+ return 0; -+} -+ -+static void qh_list_free(dwc_otg_hcd_t *hcd, struct list_head *qh_list) -+{ -+ struct list_head *item; -+ dwc_otg_qh_t *qh; -+ -+ if (!qh_list->next) { -+ /* The list hasn't been initialized yet. */ -+ return; -+ } -+ -+ /* Ensure there are no QTDs or URBs left. */ -+ kill_urbs_in_qh_list(hcd, qh_list); -+ -+ for (item = qh_list->next; item != qh_list; item = qh_list->next) { -+ qh = list_entry(item, dwc_otg_qh_t, qh_list_entry); -+ dwc_otg_hcd_qh_remove_and_free(hcd, qh); -+ } -+} -+ -+/** -+ * Halts the DWC_otg host mode operations in a clean manner. USB transfers are -+ * stopped. -+ */ -+void dwc_otg_hcd_stop(struct usb_hcd *hcd) -+{ -+ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd); -+ hprt0_data_t hprt0 = { .d32=0 }; -+ -+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD STOP\n"); -+ -+ /* Turn off all host-specific interrupts. */ -+ dwc_otg_disable_host_interrupts(dwc_otg_hcd->core_if); -+ -+ /* -+ * The root hub should be disconnected before this function is called. -+ * The disconnect will clear the QTD lists (via ..._hcd_urb_dequeue) -+ * and the QH lists (via ..._hcd_endpoint_disable). -+ */ -+ -+ /* Turn off the vbus power */ -+ DWC_PRINT("PortPower off\n"); -+ hprt0.b.prtpwr = 0; -+ dwc_write_reg32(dwc_otg_hcd->core_if->host_if->hprt0, hprt0.d32); -+} -+ -+/** Returns the current frame number. */ -+int dwc_otg_hcd_get_frame_number(struct usb_hcd *hcd) -+{ -+ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd); -+ hfnum_data_t hfnum; -+ -+ hfnum.d32 = dwc_read_reg32(&dwc_otg_hcd->core_if-> -+ host_if->host_global_regs->hfnum); -+ -+#ifdef DEBUG_SOF -+ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD GET FRAME NUMBER %d\n", hfnum.b.frnum); -+#endif -+ return hfnum.b.frnum; -+} -+ -+/** -+ * Frees secondary storage associated with the dwc_otg_hcd structure contained -+ * in the struct usb_hcd field. -+ */ -+void dwc_otg_hcd_free(struct usb_hcd *hcd) -+{ -+ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd); -+ int i; -+ -+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD FREE\n"); -+ -+ del_timers(dwc_otg_hcd); -+ -+ /* Free memory for QH/QTD lists */ -+ qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->non_periodic_sched_inactive); -+ qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->non_periodic_sched_active); -+ qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->periodic_sched_inactive); -+ qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->periodic_sched_ready); -+ qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->periodic_sched_assigned); -+ qh_list_free(dwc_otg_hcd, &dwc_otg_hcd->periodic_sched_queued); -+ -+ /* Free memory for the host channels. */ -+ for (i = 0; i < MAX_EPS_CHANNELS; i++) { -+ dwc_hc_t *hc = dwc_otg_hcd->hc_ptr_array[i]; -+ if (hc != NULL) { -+ DWC_DEBUGPL(DBG_HCDV, "HCD Free channel #%i, hc=%p\n", i, hc); -+ kfree(hc); -+ } -+ } -+ -+ if (dwc_otg_hcd->core_if->dma_enable) { -+ if (dwc_otg_hcd->status_buf_dma) { -+ dma_free_coherent(hcd->self.controller, -+ DWC_OTG_HCD_STATUS_BUF_SIZE, -+ dwc_otg_hcd->status_buf, -+ dwc_otg_hcd->status_buf_dma); -+ } -+ } else if (dwc_otg_hcd->status_buf != NULL) { -+ kfree(dwc_otg_hcd->status_buf); -+ } -+} -+ -+#ifdef DEBUG -+static void dump_urb_info(struct urb *urb, char* fn_name) -+{ -+ DWC_PRINT("%s, urb %p\n", fn_name, urb); -+ DWC_PRINT(" Device address: %d\n", usb_pipedevice(urb->pipe)); -+ DWC_PRINT(" Endpoint: %d, %s\n", usb_pipeendpoint(urb->pipe), -+ (usb_pipein(urb->pipe) ? "IN" : "OUT")); -+ DWC_PRINT(" Endpoint type: %s\n", -+ ({char *pipetype; -+ switch (usb_pipetype(urb->pipe)) { -+ case PIPE_CONTROL: pipetype = "CONTROL"; break; -+ case PIPE_BULK: pipetype = "BULK"; break; -+ case PIPE_INTERRUPT: pipetype = "INTERRUPT"; break; -+ case PIPE_ISOCHRONOUS: pipetype = "ISOCHRONOUS"; break; -+ default: pipetype = "UNKNOWN"; break; -+ }; pipetype;})); -+ DWC_PRINT(" Speed: %s\n", -+ ({char *speed; -+ switch (urb->dev->speed) { -+ case USB_SPEED_HIGH: speed = "HIGH"; break; -+ case USB_SPEED_FULL: speed = "FULL"; break; -+ case USB_SPEED_LOW: speed = "LOW"; break; -+ default: speed = "UNKNOWN"; break; -+ }; speed;})); -+ DWC_PRINT(" Max packet size: %d\n", -+ usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe))); -+ DWC_PRINT(" Data buffer length: %d\n", urb->transfer_buffer_length); -+ DWC_PRINT(" Transfer buffer: %p, Transfer DMA: %p\n", -+ urb->transfer_buffer, (void *)urb->transfer_dma); -+ DWC_PRINT(" Setup buffer: %p, Setup DMA: %p\n", -+ urb->setup_packet, (void *)urb->setup_dma); -+ DWC_PRINT(" Interval: %d\n", urb->interval); -+ if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { -+ int i; -+ for (i = 0; i < urb->number_of_packets; i++) { -+ DWC_PRINT(" ISO Desc %d:\n", i); -+ DWC_PRINT(" offset: %d, length %d\n", -+ urb->iso_frame_desc[i].offset, -+ urb->iso_frame_desc[i].length); -+ } -+ } -+} -+ -+static void dump_channel_info(dwc_otg_hcd_t *hcd, -+ dwc_otg_qh_t *qh) -+{ -+ if (qh->channel != NULL) { -+ dwc_hc_t *hc = qh->channel; -+ struct list_head *item; -+ dwc_otg_qh_t *qh_item; -+ int num_channels = hcd->core_if->core_params->host_channels; -+ int i; -+ -+ dwc_otg_hc_regs_t *hc_regs; -+ hcchar_data_t hcchar; -+ hcsplt_data_t hcsplt; -+ hctsiz_data_t hctsiz; -+ uint32_t hcdma; -+ -+ hc_regs = hcd->core_if->host_if->hc_regs[hc->hc_num]; -+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); -+ hcsplt.d32 = dwc_read_reg32(&hc_regs->hcsplt); -+ hctsiz.d32 = dwc_read_reg32(&hc_regs->hctsiz); -+ hcdma = dwc_read_reg32(&hc_regs->hcdma); -+ -+ DWC_PRINT(" Assigned to channel %p:\n", hc); -+ DWC_PRINT(" hcchar 0x%08x, hcsplt 0x%08x\n", hcchar.d32, hcsplt.d32); -+ DWC_PRINT(" hctsiz 0x%08x, hcdma 0x%08x\n", hctsiz.d32, hcdma); -+ DWC_PRINT(" dev_addr: %d, ep_num: %d, ep_is_in: %d\n", -+ hc->dev_addr, hc->ep_num, hc->ep_is_in); -+ DWC_PRINT(" ep_type: %d\n", hc->ep_type); -+ DWC_PRINT(" max_packet: %d\n", hc->max_packet); -+ DWC_PRINT(" data_pid_start: %d\n", hc->data_pid_start); -+ DWC_PRINT(" xfer_started: %d\n", hc->xfer_started); -+ DWC_PRINT(" halt_status: %d\n", hc->halt_status); -+ DWC_PRINT(" xfer_buff: %p\n", hc->xfer_buff); -+ DWC_PRINT(" xfer_len: %d\n", hc->xfer_len); -+ DWC_PRINT(" qh: %p\n", hc->qh); -+ DWC_PRINT(" NP inactive sched:\n"); -+ list_for_each(item, &hcd->non_periodic_sched_inactive) { -+ qh_item = list_entry(item, dwc_otg_qh_t, qh_list_entry); -+ DWC_PRINT(" %p\n", qh_item); -+ } -+ DWC_PRINT(" NP active sched:\n"); -+ list_for_each(item, &hcd->non_periodic_sched_active) { -+ qh_item = list_entry(item, dwc_otg_qh_t, qh_list_entry); -+ DWC_PRINT(" %p\n", qh_item); -+ } -+ DWC_PRINT(" Channels: \n"); -+ for (i = 0; i < num_channels; i++) { -+ dwc_hc_t *hc = hcd->hc_ptr_array[i]; -+ DWC_PRINT(" %2d: %p\n", i, hc); -+ } -+ } -+} -+#endif -+ -+/** Starts processing a USB transfer request specified by a USB Request Block -+ * (URB). mem_flags indicates the type of memory allocation to use while -+ * processing this URB. */ -+int dwc_otg_hcd_urb_enqueue(struct usb_hcd *hcd, -+ struct urb *urb, -+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) -+ int mem_flags -+#else -+ gfp_t mem_flags -+#endif -+ ) -+{ -+ int retval = 0; -+ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd); -+ dwc_otg_qtd_t *qtd; -+ -+#ifdef DEBUG -+ if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) { -+ dump_urb_info(urb, "dwc_otg_hcd_urb_enqueue"); -+ } -+#endif -+ if (!dwc_otg_hcd->flags.b.port_connect_status) { -+ /* No longer connected. */ -+ return -ENODEV; -+ } -+ -+ qtd = dwc_otg_hcd_qtd_create(urb); -+ if (qtd == NULL) { -+ DWC_ERROR("DWC OTG HCD URB Enqueue failed creating QTD\n"); -+ return -ENOMEM; -+ } -+ -+ retval = dwc_otg_hcd_qtd_add(qtd, dwc_otg_hcd); -+ if (retval < 0) { -+ DWC_ERROR("DWC OTG HCD URB Enqueue failed adding QTD. " -+ "Error status %d\n", retval); -+ dwc_otg_hcd_qtd_free(qtd); -+ } -+ -+ return retval; -+} -+ -+/** Aborts/cancels a USB transfer request. Always returns 0 to indicate -+ * success. */ -+int dwc_otg_hcd_urb_dequeue(struct usb_hcd *hcd, -+ struct urb *urb, -+ int status) -+{ -+ unsigned long flags; -+ dwc_otg_hcd_t *dwc_otg_hcd; -+ dwc_otg_qtd_t *urb_qtd; -+ dwc_otg_qh_t *qh; -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) -+ struct usb_host_endpoint *ep = dwc_urb_to_endpoint(urb); -+#endif -+ -+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD URB Dequeue\n"); -+ -+ dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd); -+ -+ SPIN_LOCK_IRQSAVE(&dwc_otg_hcd->lock, flags); -+ -+ urb_qtd = (dwc_otg_qtd_t *)urb->hcpriv; -+ qh = (dwc_otg_qh_t *)ep->hcpriv; -+ -+#ifdef DEBUG -+ if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) { -+ dump_urb_info(urb, "dwc_otg_hcd_urb_dequeue"); -+ if (urb_qtd == qh->qtd_in_process) { -+ dump_channel_info(dwc_otg_hcd, qh); -+ } -+ } -+#endif -+ -+ if (urb_qtd == qh->qtd_in_process) { -+ /* The QTD is in process (it has been assigned to a channel). */ -+ -+ if (dwc_otg_hcd->flags.b.port_connect_status) { -+ /* -+ * If still connected (i.e. in host mode), halt the -+ * channel so it can be used for other transfers. If -+ * no longer connected, the host registers can't be -+ * written to halt the channel since the core is in -+ * device mode. -+ */ -+ dwc_otg_hc_halt(dwc_otg_hcd->core_if, qh->channel, -+ DWC_OTG_HC_XFER_URB_DEQUEUE); -+ } -+ } -+ -+ /* -+ * Free the QTD and clean up the associated QH. Leave the QH in the -+ * schedule if it has any remaining QTDs. -+ */ -+ dwc_otg_hcd_qtd_remove_and_free(dwc_otg_hcd, urb_qtd); -+ if (urb_qtd == qh->qtd_in_process) { -+ dwc_otg_hcd_qh_deactivate(dwc_otg_hcd, qh, 0); -+ qh->channel = NULL; -+ qh->qtd_in_process = NULL; -+ } else if (list_empty(&qh->qtd_list)) { -+ dwc_otg_hcd_qh_remove(dwc_otg_hcd, qh); -+ } -+ -+ SPIN_UNLOCK_IRQRESTORE(&dwc_otg_hcd->lock, flags); -+ -+ urb->hcpriv = NULL; -+ -+ /* Higher layer software sets URB status. */ -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) -+ usb_hcd_giveback_urb(hcd, urb, status); -+#else -+ usb_hcd_giveback_urb(hcd, urb, NULL); -+#endif -+ if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) { -+ DWC_PRINT("Called usb_hcd_giveback_urb()\n"); -+ DWC_PRINT(" urb->status = %d\n", urb->status); -+ } -+ -+ return 0; -+} -+ -+/** Frees resources in the DWC_otg controller related to a given endpoint. Also -+ * clears state in the HCD related to the endpoint. Any URBs for the endpoint -+ * must already be dequeued. */ -+void dwc_otg_hcd_endpoint_disable(struct usb_hcd *hcd, -+ struct usb_host_endpoint *ep) -+{ -+ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd); -+ dwc_otg_qh_t *qh; -+ -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) -+ unsigned long flags; -+ int retry = 0; -+#endif -+ -+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD EP DISABLE: _bEndpointAddress=0x%02x, " -+ "endpoint=%d\n", ep->desc.bEndpointAddress, -+ dwc_ep_addr_to_endpoint(ep->desc.bEndpointAddress)); -+ -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) -+rescan: -+ SPIN_LOCK_IRQSAVE(&dwc_otg_hcd->lock, flags); -+ qh = (dwc_otg_qh_t *)(ep->hcpriv); -+ if (!qh) -+ goto done; -+ -+ /** Check that the QTD list is really empty */ -+ if (!list_empty(&qh->qtd_list)) { -+ if (retry++ < 250) { -+ SPIN_UNLOCK_IRQRESTORE(&dwc_otg_hcd->lock, flags); -+ schedule_timeout_uninterruptible(1); -+ goto rescan; -+ } -+ -+ DWC_WARN("DWC OTG HCD EP DISABLE:" -+ " QTD List for this endpoint is not empty\n"); -+ } -+ -+ dwc_otg_hcd_qh_remove_and_free(dwc_otg_hcd, qh); -+ ep->hcpriv = NULL; -+done: -+ SPIN_UNLOCK_IRQRESTORE(&dwc_otg_hcd->lock, flags); -+ -+#else // LINUX_VERSION_CODE -+ -+ qh = (dwc_otg_qh_t *)(ep->hcpriv); -+ if (qh != NULL) { -+#ifdef DEBUG -+ /** Check that the QTD list is really empty */ -+ if (!list_empty(&qh->qtd_list)) { -+ DWC_WARN("DWC OTG HCD EP DISABLE:" -+ " QTD List for this endpoint is not empty\n"); -+ } -+#endif -+ dwc_otg_hcd_qh_remove_and_free(dwc_otg_hcd, qh); -+ ep->hcpriv = NULL; -+ } -+#endif // LINUX_VERSION_CODE -+} -+ -+/** Handles host mode interrupts for the DWC_otg controller. Returns IRQ_NONE if -+ * there was no interrupt to handle. Returns IRQ_HANDLED if there was a valid -+ * interrupt. -+ * -+ * This function is called by the USB core when an interrupt occurs */ -+irqreturn_t dwc_otg_hcd_irq(struct usb_hcd *hcd -+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) -+ , struct pt_regs *regs -+#endif -+ ) -+{ -+ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd); -+ return IRQ_RETVAL(dwc_otg_hcd_handle_intr(dwc_otg_hcd)); -+} -+ -+/** Creates Status Change bitmap for the root hub and root port. The bitmap is -+ * returned in buf. Bit 0 is the status change indicator for the root hub. Bit 1 -+ * is the status change indicator for the single root port. Returns 1 if either -+ * change indicator is 1, otherwise returns 0. */ -+int dwc_otg_hcd_hub_status_data(struct usb_hcd *hcd, char *buf) -+{ -+ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd); -+ -+ buf[0] = 0; -+ buf[0] |= (dwc_otg_hcd->flags.b.port_connect_status_change || -+ dwc_otg_hcd->flags.b.port_reset_change || -+ dwc_otg_hcd->flags.b.port_enable_change || -+ dwc_otg_hcd->flags.b.port_suspend_change || -+ dwc_otg_hcd->flags.b.port_over_current_change) << 1; -+ -+#ifdef DEBUG -+ if (buf[0]) { -+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB STATUS DATA:" -+ " Root port status changed\n"); -+ DWC_DEBUGPL(DBG_HCDV, " port_connect_status_change: %d\n", -+ dwc_otg_hcd->flags.b.port_connect_status_change); -+ DWC_DEBUGPL(DBG_HCDV, " port_reset_change: %d\n", -+ dwc_otg_hcd->flags.b.port_reset_change); -+ DWC_DEBUGPL(DBG_HCDV, " port_enable_change: %d\n", -+ dwc_otg_hcd->flags.b.port_enable_change); -+ DWC_DEBUGPL(DBG_HCDV, " port_suspend_change: %d\n", -+ dwc_otg_hcd->flags.b.port_suspend_change); -+ DWC_DEBUGPL(DBG_HCDV, " port_over_current_change: %d\n", -+ dwc_otg_hcd->flags.b.port_over_current_change); -+ } -+#endif -+ return (buf[0] != 0); -+} -+ -+#ifdef DWC_HS_ELECT_TST -+/* -+ * Quick and dirty hack to implement the HS Electrical Test -+ * SINGLE_STEP_GET_DEVICE_DESCRIPTOR feature. -+ * -+ * This code was copied from our userspace app "hset". It sends a -+ * Get Device Descriptor control sequence in two parts, first the -+ * Setup packet by itself, followed some time later by the In and -+ * Ack packets. Rather than trying to figure out how to add this -+ * functionality to the normal driver code, we just hijack the -+ * hardware, using these two function to drive the hardware -+ * directly. -+ */ -+ -+dwc_otg_core_global_regs_t *global_regs; -+dwc_otg_host_global_regs_t *hc_global_regs; -+dwc_otg_hc_regs_t *hc_regs; -+uint32_t *data_fifo; -+ -+static void do_setup(void) -+{ -+ gintsts_data_t gintsts; -+ hctsiz_data_t hctsiz; -+ hcchar_data_t hcchar; -+ haint_data_t haint; -+ hcint_data_t hcint; -+ -+ /* Enable HAINTs */ -+ dwc_write_reg32(&hc_global_regs->haintmsk, 0x0001); -+ -+ /* Enable HCINTs */ -+ dwc_write_reg32(&hc_regs->hcintmsk, 0x04a3); -+ -+ /* Read GINTSTS */ -+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); -+ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32); -+ -+ /* Read HAINT */ -+ haint.d32 = dwc_read_reg32(&hc_global_regs->haint); -+ //fprintf(stderr, "HAINT: %08x\n", haint.d32); -+ -+ /* Read HCINT */ -+ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); -+ //fprintf(stderr, "HCINT: %08x\n", hcint.d32); -+ -+ /* Read HCCHAR */ -+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); -+ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32); -+ -+ /* Clear HCINT */ -+ dwc_write_reg32(&hc_regs->hcint, hcint.d32); -+ -+ /* Clear HAINT */ -+ dwc_write_reg32(&hc_global_regs->haint, haint.d32); -+ -+ /* Clear GINTSTS */ -+ dwc_write_reg32(&global_regs->gintsts, gintsts.d32); -+ -+ /* Read GINTSTS */ -+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); -+ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32); -+ -+ /* -+ * Send Setup packet (Get Device Descriptor) -+ */ -+ -+ /* Make sure channel is disabled */ -+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); -+ if (hcchar.b.chen) { -+ //fprintf(stderr, "Channel already enabled 1, HCCHAR = %08x\n", hcchar.d32); -+ hcchar.b.chdis = 1; -+// hcchar.b.chen = 1; -+ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); -+ //sleep(1); -+ mdelay(1000); -+ -+ /* Read GINTSTS */ -+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); -+ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32); -+ -+ /* Read HAINT */ -+ haint.d32 = dwc_read_reg32(&hc_global_regs->haint); -+ //fprintf(stderr, "HAINT: %08x\n", haint.d32); -+ -+ /* Read HCINT */ -+ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); -+ //fprintf(stderr, "HCINT: %08x\n", hcint.d32); -+ -+ /* Read HCCHAR */ -+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); -+ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32); -+ -+ /* Clear HCINT */ -+ dwc_write_reg32(&hc_regs->hcint, hcint.d32); -+ -+ /* Clear HAINT */ -+ dwc_write_reg32(&hc_global_regs->haint, haint.d32); -+ -+ /* Clear GINTSTS */ -+ dwc_write_reg32(&global_regs->gintsts, gintsts.d32); -+ -+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); -+ //if (hcchar.b.chen) { -+ // fprintf(stderr, "** Channel _still_ enabled 1, HCCHAR = %08x **\n", hcchar.d32); -+ //} -+ } -+ -+ /* Set HCTSIZ */ -+ hctsiz.d32 = 0; -+ hctsiz.b.xfersize = 8; -+ hctsiz.b.pktcnt = 1; -+ hctsiz.b.pid = DWC_OTG_HC_PID_SETUP; -+ dwc_write_reg32(&hc_regs->hctsiz, hctsiz.d32); -+ -+ /* Set HCCHAR */ -+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); -+ hcchar.b.eptype = DWC_OTG_EP_TYPE_CONTROL; -+ hcchar.b.epdir = 0; -+ hcchar.b.epnum = 0; -+ hcchar.b.mps = 8; -+ hcchar.b.chen = 1; -+ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); -+ -+ /* Fill FIFO with Setup data for Get Device Descriptor */ -+ data_fifo = (uint32_t *)((char *)global_regs + 0x1000); -+ dwc_write_reg32(data_fifo++, 0x01000680); -+ dwc_write_reg32(data_fifo++, 0x00080000); -+ -+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); -+ //fprintf(stderr, "Waiting for HCINTR intr 1, GINTSTS = %08x\n", gintsts.d32); -+ -+ /* Wait for host channel interrupt */ -+ do { -+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); -+ } while (gintsts.b.hcintr == 0); -+ -+ //fprintf(stderr, "Got HCINTR intr 1, GINTSTS = %08x\n", gintsts.d32); -+ -+ /* Disable HCINTs */ -+ dwc_write_reg32(&hc_regs->hcintmsk, 0x0000); -+ -+ /* Disable HAINTs */ -+ dwc_write_reg32(&hc_global_regs->haintmsk, 0x0000); -+ -+ /* Read HAINT */ -+ haint.d32 = dwc_read_reg32(&hc_global_regs->haint); -+ //fprintf(stderr, "HAINT: %08x\n", haint.d32); -+ -+ /* Read HCINT */ -+ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); -+ //fprintf(stderr, "HCINT: %08x\n", hcint.d32); -+ -+ /* Read HCCHAR */ -+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); -+ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32); -+ -+ /* Clear HCINT */ -+ dwc_write_reg32(&hc_regs->hcint, hcint.d32); -+ -+ /* Clear HAINT */ -+ dwc_write_reg32(&hc_global_regs->haint, haint.d32); -+ -+ /* Clear GINTSTS */ -+ dwc_write_reg32(&global_regs->gintsts, gintsts.d32); -+ -+ /* Read GINTSTS */ -+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); -+ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32); -+} -+ -+static void do_in_ack(void) -+{ -+ gintsts_data_t gintsts; -+ hctsiz_data_t hctsiz; -+ hcchar_data_t hcchar; -+ haint_data_t haint; -+ hcint_data_t hcint; -+ host_grxsts_data_t grxsts; -+ -+ /* Enable HAINTs */ -+ dwc_write_reg32(&hc_global_regs->haintmsk, 0x0001); -+ -+ /* Enable HCINTs */ -+ dwc_write_reg32(&hc_regs->hcintmsk, 0x04a3); -+ -+ /* Read GINTSTS */ -+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); -+ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32); -+ -+ /* Read HAINT */ -+ haint.d32 = dwc_read_reg32(&hc_global_regs->haint); -+ //fprintf(stderr, "HAINT: %08x\n", haint.d32); -+ -+ /* Read HCINT */ -+ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); -+ //fprintf(stderr, "HCINT: %08x\n", hcint.d32); -+ -+ /* Read HCCHAR */ -+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); -+ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32); -+ -+ /* Clear HCINT */ -+ dwc_write_reg32(&hc_regs->hcint, hcint.d32); -+ -+ /* Clear HAINT */ -+ dwc_write_reg32(&hc_global_regs->haint, haint.d32); -+ -+ /* Clear GINTSTS */ -+ dwc_write_reg32(&global_regs->gintsts, gintsts.d32); -+ -+ /* Read GINTSTS */ -+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); -+ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32); -+ -+ /* -+ * Receive Control In packet -+ */ -+ -+ /* Make sure channel is disabled */ -+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); -+ if (hcchar.b.chen) { -+ //fprintf(stderr, "Channel already enabled 2, HCCHAR = %08x\n", hcchar.d32); -+ hcchar.b.chdis = 1; -+ hcchar.b.chen = 1; -+ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); -+ //sleep(1); -+ mdelay(1000); -+ -+ /* Read GINTSTS */ -+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); -+ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32); -+ -+ /* Read HAINT */ -+ haint.d32 = dwc_read_reg32(&hc_global_regs->haint); -+ //fprintf(stderr, "HAINT: %08x\n", haint.d32); -+ -+ /* Read HCINT */ -+ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); -+ //fprintf(stderr, "HCINT: %08x\n", hcint.d32); -+ -+ /* Read HCCHAR */ -+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); -+ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32); -+ -+ /* Clear HCINT */ -+ dwc_write_reg32(&hc_regs->hcint, hcint.d32); -+ -+ /* Clear HAINT */ -+ dwc_write_reg32(&hc_global_regs->haint, haint.d32); -+ -+ /* Clear GINTSTS */ -+ dwc_write_reg32(&global_regs->gintsts, gintsts.d32); -+ -+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); -+ //if (hcchar.b.chen) { -+ // fprintf(stderr, "** Channel _still_ enabled 2, HCCHAR = %08x **\n", hcchar.d32); -+ //} -+ } -+ -+ /* Set HCTSIZ */ -+ hctsiz.d32 = 0; -+ hctsiz.b.xfersize = 8; -+ hctsiz.b.pktcnt = 1; -+ hctsiz.b.pid = DWC_OTG_HC_PID_DATA1; -+ dwc_write_reg32(&hc_regs->hctsiz, hctsiz.d32); -+ -+ /* Set HCCHAR */ -+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); -+ hcchar.b.eptype = DWC_OTG_EP_TYPE_CONTROL; -+ hcchar.b.epdir = 1; -+ hcchar.b.epnum = 0; -+ hcchar.b.mps = 8; -+ hcchar.b.chen = 1; -+ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); -+ -+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); -+ //fprintf(stderr, "Waiting for RXSTSQLVL intr 1, GINTSTS = %08x\n", gintsts.d32); -+ -+ /* Wait for receive status queue interrupt */ -+ do { -+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); -+ } while (gintsts.b.rxstsqlvl == 0); -+ -+ //fprintf(stderr, "Got RXSTSQLVL intr 1, GINTSTS = %08x\n", gintsts.d32); -+ -+ /* Read RXSTS */ -+ grxsts.d32 = dwc_read_reg32(&global_regs->grxstsp); -+ //fprintf(stderr, "GRXSTS: %08x\n", grxsts.d32); -+ -+ /* Clear RXSTSQLVL in GINTSTS */ -+ gintsts.d32 = 0; -+ gintsts.b.rxstsqlvl = 1; -+ dwc_write_reg32(&global_regs->gintsts, gintsts.d32); -+ -+ switch (grxsts.b.pktsts) { -+ case DWC_GRXSTS_PKTSTS_IN: -+ /* Read the data into the host buffer */ -+ if (grxsts.b.bcnt > 0) { -+ int i; -+ int word_count = (grxsts.b.bcnt + 3) / 4; -+ -+ data_fifo = (uint32_t *)((char *)global_regs + 0x1000); -+ -+ for (i = 0; i < word_count; i++) { -+ (void)dwc_read_reg32(data_fifo++); -+ } -+ } -+ -+ //fprintf(stderr, "Received %u bytes\n", (unsigned)grxsts.b.bcnt); -+ break; -+ -+ default: -+ //fprintf(stderr, "** Unexpected GRXSTS packet status 1 **\n"); -+ break; -+ } -+ -+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); -+ //fprintf(stderr, "Waiting for RXSTSQLVL intr 2, GINTSTS = %08x\n", gintsts.d32); -+ -+ /* Wait for receive status queue interrupt */ -+ do { -+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); -+ } while (gintsts.b.rxstsqlvl == 0); -+ -+ //fprintf(stderr, "Got RXSTSQLVL intr 2, GINTSTS = %08x\n", gintsts.d32); -+ -+ /* Read RXSTS */ -+ grxsts.d32 = dwc_read_reg32(&global_regs->grxstsp); -+ //fprintf(stderr, "GRXSTS: %08x\n", grxsts.d32); -+ -+ /* Clear RXSTSQLVL in GINTSTS */ -+ gintsts.d32 = 0; -+ gintsts.b.rxstsqlvl = 1; -+ dwc_write_reg32(&global_regs->gintsts, gintsts.d32); -+ -+ switch (grxsts.b.pktsts) { -+ case DWC_GRXSTS_PKTSTS_IN_XFER_COMP: -+ break; -+ -+ default: -+ //fprintf(stderr, "** Unexpected GRXSTS packet status 2 **\n"); -+ break; -+ } -+ -+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); -+ //fprintf(stderr, "Waiting for HCINTR intr 2, GINTSTS = %08x\n", gintsts.d32); -+ -+ /* Wait for host channel interrupt */ -+ do { -+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); -+ } while (gintsts.b.hcintr == 0); -+ -+ //fprintf(stderr, "Got HCINTR intr 2, GINTSTS = %08x\n", gintsts.d32); -+ -+ /* Read HAINT */ -+ haint.d32 = dwc_read_reg32(&hc_global_regs->haint); -+ //fprintf(stderr, "HAINT: %08x\n", haint.d32); -+ -+ /* Read HCINT */ -+ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); -+ //fprintf(stderr, "HCINT: %08x\n", hcint.d32); -+ -+ /* Read HCCHAR */ -+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); -+ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32); -+ -+ /* Clear HCINT */ -+ dwc_write_reg32(&hc_regs->hcint, hcint.d32); -+ -+ /* Clear HAINT */ -+ dwc_write_reg32(&hc_global_regs->haint, haint.d32); -+ -+ /* Clear GINTSTS */ -+ dwc_write_reg32(&global_regs->gintsts, gintsts.d32); -+ -+ /* Read GINTSTS */ -+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); -+ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32); -+ -+// usleep(100000); -+// mdelay(100); -+ mdelay(1); -+ -+ /* -+ * Send handshake packet -+ */ -+ -+ /* Read HAINT */ -+ haint.d32 = dwc_read_reg32(&hc_global_regs->haint); -+ //fprintf(stderr, "HAINT: %08x\n", haint.d32); -+ -+ /* Read HCINT */ -+ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); -+ //fprintf(stderr, "HCINT: %08x\n", hcint.d32); -+ -+ /* Read HCCHAR */ -+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); -+ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32); -+ -+ /* Clear HCINT */ -+ dwc_write_reg32(&hc_regs->hcint, hcint.d32); -+ -+ /* Clear HAINT */ -+ dwc_write_reg32(&hc_global_regs->haint, haint.d32); -+ -+ /* Clear GINTSTS */ -+ dwc_write_reg32(&global_regs->gintsts, gintsts.d32); -+ -+ /* Read GINTSTS */ -+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); -+ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32); -+ -+ /* Make sure channel is disabled */ -+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); -+ if (hcchar.b.chen) { -+ //fprintf(stderr, "Channel already enabled 3, HCCHAR = %08x\n", hcchar.d32); -+ hcchar.b.chdis = 1; -+ hcchar.b.chen = 1; -+ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); -+ //sleep(1); -+ mdelay(1000); -+ -+ /* Read GINTSTS */ -+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); -+ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32); -+ -+ /* Read HAINT */ -+ haint.d32 = dwc_read_reg32(&hc_global_regs->haint); -+ //fprintf(stderr, "HAINT: %08x\n", haint.d32); -+ -+ /* Read HCINT */ -+ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); -+ //fprintf(stderr, "HCINT: %08x\n", hcint.d32); -+ -+ /* Read HCCHAR */ -+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); -+ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32); -+ -+ /* Clear HCINT */ -+ dwc_write_reg32(&hc_regs->hcint, hcint.d32); -+ -+ /* Clear HAINT */ -+ dwc_write_reg32(&hc_global_regs->haint, haint.d32); -+ -+ /* Clear GINTSTS */ -+ dwc_write_reg32(&global_regs->gintsts, gintsts.d32); -+ -+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); -+ //if (hcchar.b.chen) { -+ // fprintf(stderr, "** Channel _still_ enabled 3, HCCHAR = %08x **\n", hcchar.d32); -+ //} -+ } -+ -+ /* Set HCTSIZ */ -+ hctsiz.d32 = 0; -+ hctsiz.b.xfersize = 0; -+ hctsiz.b.pktcnt = 1; -+ hctsiz.b.pid = DWC_OTG_HC_PID_DATA1; -+ dwc_write_reg32(&hc_regs->hctsiz, hctsiz.d32); -+ -+ /* Set HCCHAR */ -+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); -+ hcchar.b.eptype = DWC_OTG_EP_TYPE_CONTROL; -+ hcchar.b.epdir = 0; -+ hcchar.b.epnum = 0; -+ hcchar.b.mps = 8; -+ hcchar.b.chen = 1; -+ dwc_write_reg32(&hc_regs->hcchar, hcchar.d32); -+ -+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); -+ //fprintf(stderr, "Waiting for HCINTR intr 3, GINTSTS = %08x\n", gintsts.d32); -+ -+ /* Wait for host channel interrupt */ -+ do { -+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); -+ } while (gintsts.b.hcintr == 0); -+ -+ //fprintf(stderr, "Got HCINTR intr 3, GINTSTS = %08x\n", gintsts.d32); -+ -+ /* Disable HCINTs */ -+ dwc_write_reg32(&hc_regs->hcintmsk, 0x0000); -+ -+ /* Disable HAINTs */ -+ dwc_write_reg32(&hc_global_regs->haintmsk, 0x0000); -+ -+ /* Read HAINT */ -+ haint.d32 = dwc_read_reg32(&hc_global_regs->haint); -+ //fprintf(stderr, "HAINT: %08x\n", haint.d32); -+ -+ /* Read HCINT */ -+ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); -+ //fprintf(stderr, "HCINT: %08x\n", hcint.d32); -+ -+ /* Read HCCHAR */ -+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); -+ //fprintf(stderr, "HCCHAR: %08x\n", hcchar.d32); -+ -+ /* Clear HCINT */ -+ dwc_write_reg32(&hc_regs->hcint, hcint.d32); -+ -+ /* Clear HAINT */ -+ dwc_write_reg32(&hc_global_regs->haint, haint.d32); -+ -+ /* Clear GINTSTS */ -+ dwc_write_reg32(&global_regs->gintsts, gintsts.d32); -+ -+ /* Read GINTSTS */ -+ gintsts.d32 = dwc_read_reg32(&global_regs->gintsts); -+ //fprintf(stderr, "GINTSTS: %08x\n", gintsts.d32); -+} -+#endif /* DWC_HS_ELECT_TST */ -+ -+/** Handles hub class-specific requests. */ -+int dwc_otg_hcd_hub_control(struct usb_hcd *hcd, -+ u16 typeReq, -+ u16 wValue, -+ u16 wIndex, -+ char *buf, -+ u16 wLength) -+{ -+ int retval = 0; -+ -+ dwc_otg_hcd_t *dwc_otg_hcd = hcd_to_dwc_otg_hcd(hcd); -+ dwc_otg_core_if_t *core_if = hcd_to_dwc_otg_hcd(hcd)->core_if; -+ struct usb_hub_descriptor *desc; -+ hprt0_data_t hprt0 = {.d32 = 0}; -+ -+ uint32_t port_status; -+ -+ switch (typeReq) { -+ case ClearHubFeature: -+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " -+ "ClearHubFeature 0x%x\n", wValue); -+ switch (wValue) { -+ case C_HUB_LOCAL_POWER: -+ case C_HUB_OVER_CURRENT: -+ /* Nothing required here */ -+ break; -+ default: -+ retval = -EINVAL; -+ DWC_ERROR("DWC OTG HCD - " -+ "ClearHubFeature request %xh unknown\n", wValue); -+ } -+ break; -+ case ClearPortFeature: -+ if (!wIndex || wIndex > 1) -+ goto error; -+ -+ switch (wValue) { -+ case USB_PORT_FEAT_ENABLE: -+ DWC_DEBUGPL(DBG_ANY, "DWC OTG HCD HUB CONTROL - " -+ "ClearPortFeature USB_PORT_FEAT_ENABLE\n"); -+ hprt0.d32 = dwc_otg_read_hprt0(core_if); -+ hprt0.b.prtena = 1; -+ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); -+ break; -+ case USB_PORT_FEAT_SUSPEND: -+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " -+ "ClearPortFeature USB_PORT_FEAT_SUSPEND\n"); -+ hprt0.d32 = dwc_otg_read_hprt0(core_if); -+ hprt0.b.prtres = 1; -+ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); -+ /* Clear Resume bit */ -+ mdelay(100); -+ hprt0.b.prtres = 0; -+ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); -+ break; -+ case USB_PORT_FEAT_POWER: -+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " -+ "ClearPortFeature USB_PORT_FEAT_POWER\n"); -+ hprt0.d32 = dwc_otg_read_hprt0(core_if); -+ hprt0.b.prtpwr = 0; -+ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); -+ break; -+ case USB_PORT_FEAT_INDICATOR: -+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " -+ "ClearPortFeature USB_PORT_FEAT_INDICATOR\n"); -+ /* Port inidicator not supported */ -+ break; -+ case USB_PORT_FEAT_C_CONNECTION: -+ /* Clears drivers internal connect status change -+ * flag */ -+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " -+ "ClearPortFeature USB_PORT_FEAT_C_CONNECTION\n"); -+ dwc_otg_hcd->flags.b.port_connect_status_change = 0; -+ break; -+ case USB_PORT_FEAT_C_RESET: -+ /* Clears the driver's internal Port Reset Change -+ * flag */ -+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " -+ "ClearPortFeature USB_PORT_FEAT_C_RESET\n"); -+ dwc_otg_hcd->flags.b.port_reset_change = 0; -+ break; -+ case USB_PORT_FEAT_C_ENABLE: -+ /* Clears the driver's internal Port -+ * Enable/Disable Change flag */ -+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " -+ "ClearPortFeature USB_PORT_FEAT_C_ENABLE\n"); -+ dwc_otg_hcd->flags.b.port_enable_change = 0; -+ break; -+ case USB_PORT_FEAT_C_SUSPEND: -+ /* Clears the driver's internal Port Suspend -+ * Change flag, which is set when resume signaling on -+ * the host port is complete */ -+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " -+ "ClearPortFeature USB_PORT_FEAT_C_SUSPEND\n"); -+ dwc_otg_hcd->flags.b.port_suspend_change = 0; -+ break; -+ case USB_PORT_FEAT_C_OVER_CURRENT: -+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " -+ "ClearPortFeature USB_PORT_FEAT_C_OVER_CURRENT\n"); -+ dwc_otg_hcd->flags.b.port_over_current_change = 0; -+ break; -+ default: -+ retval = -EINVAL; -+ DWC_ERROR("DWC OTG HCD - " -+ "ClearPortFeature request %xh " -+ "unknown or unsupported\n", wValue); -+ } -+ break; -+ case GetHubDescriptor: -+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " -+ "GetHubDescriptor\n"); -+ desc = (struct usb_hub_descriptor *)buf; -+ desc->bDescLength = 9; -+ desc->bDescriptorType = 0x29; -+ desc->bNbrPorts = 1; -+ desc->wHubCharacteristics = 0x08; -+ desc->bPwrOn2PwrGood = 1; -+ desc->bHubContrCurrent = 0; -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39) -+ desc->u.hs.DeviceRemovable[0] = 0; -+ desc->u.hs.DeviceRemovable[1] = 0xff; -+#endif -+ break; -+ case GetHubStatus: -+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " -+ "GetHubStatus\n"); -+ memset(buf, 0, 4); -+ break; -+ case GetPortStatus: -+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " -+ "GetPortStatus\n"); -+ -+ if (!wIndex || wIndex > 1) -+ goto error; -+ -+ port_status = 0; -+ -+ if (dwc_otg_hcd->flags.b.port_connect_status_change) -+ port_status |= (1 << USB_PORT_FEAT_C_CONNECTION); -+ -+ if (dwc_otg_hcd->flags.b.port_enable_change) -+ port_status |= (1 << USB_PORT_FEAT_C_ENABLE); -+ -+ if (dwc_otg_hcd->flags.b.port_suspend_change) -+ port_status |= (1 << USB_PORT_FEAT_C_SUSPEND); -+ -+ if (dwc_otg_hcd->flags.b.port_reset_change) -+ port_status |= (1 << USB_PORT_FEAT_C_RESET); -+ -+ if (dwc_otg_hcd->flags.b.port_over_current_change) { -+ DWC_ERROR("Device Not Supported\n"); -+ port_status |= (1 << USB_PORT_FEAT_C_OVER_CURRENT); -+ } -+ -+ if (!dwc_otg_hcd->flags.b.port_connect_status) { -+ /* -+ * The port is disconnected, which means the core is -+ * either in device mode or it soon will be. Just -+ * return 0's for the remainder of the port status -+ * since the port register can't be read if the core -+ * is in device mode. -+ */ -+ *((__le32 *) buf) = cpu_to_le32(port_status); -+ break; -+ } -+ -+ hprt0.d32 = dwc_read_reg32(core_if->host_if->hprt0); -+ DWC_DEBUGPL(DBG_HCDV, " HPRT0: 0x%08x\n", hprt0.d32); -+ -+ if (hprt0.b.prtconnsts) -+ port_status |= (1 << USB_PORT_FEAT_CONNECTION); -+ -+ if (hprt0.b.prtena) -+ port_status |= (1 << USB_PORT_FEAT_ENABLE); -+ -+ if (hprt0.b.prtsusp) -+ port_status |= (1 << USB_PORT_FEAT_SUSPEND); -+ -+ if (hprt0.b.prtovrcurract) -+ port_status |= (1 << USB_PORT_FEAT_OVER_CURRENT); -+ -+ if (hprt0.b.prtrst) -+ port_status |= (1 << USB_PORT_FEAT_RESET); -+ -+ if (hprt0.b.prtpwr) -+ port_status |= (1 << USB_PORT_FEAT_POWER); -+ -+ if (hprt0.b.prtspd == DWC_HPRT0_PRTSPD_HIGH_SPEED) -+ port_status |= USB_PORT_STAT_HIGH_SPEED; -+ else if (hprt0.b.prtspd == DWC_HPRT0_PRTSPD_LOW_SPEED) -+ port_status |= USB_PORT_STAT_LOW_SPEED; -+ -+ if (hprt0.b.prttstctl) -+ port_status |= (1 << USB_PORT_FEAT_TEST); -+ -+ /* USB_PORT_FEAT_INDICATOR unsupported always 0 */ -+ -+ *((__le32 *) buf) = cpu_to_le32(port_status); -+ -+ break; -+ case SetHubFeature: -+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " -+ "SetHubFeature\n"); -+ /* No HUB features supported */ -+ break; -+ case SetPortFeature: -+ if (wValue != USB_PORT_FEAT_TEST && (!wIndex || wIndex > 1)) -+ goto error; -+ -+ if (!dwc_otg_hcd->flags.b.port_connect_status) { -+ /* -+ * The port is disconnected, which means the core is -+ * either in device mode or it soon will be. Just -+ * return without doing anything since the port -+ * register can't be written if the core is in device -+ * mode. -+ */ -+ break; -+ } -+ -+ switch (wValue) { -+ case USB_PORT_FEAT_SUSPEND: -+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " -+ "SetPortFeature - USB_PORT_FEAT_SUSPEND\n"); -+ if (hcd->self.otg_port == wIndex && -+ hcd->self.b_hnp_enable) { -+ gotgctl_data_t gotgctl = {.d32=0}; -+ gotgctl.b.hstsethnpen = 1; -+ dwc_modify_reg32(&core_if->core_global_regs->gotgctl, -+ 0, gotgctl.d32); -+ core_if->op_state = A_SUSPEND; -+ } -+ hprt0.d32 = dwc_otg_read_hprt0(core_if); -+ hprt0.b.prtsusp = 1; -+ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); -+ //DWC_PRINT("SUSPEND: HPRT0=%0x\n", hprt0.d32); -+ /* Suspend the Phy Clock */ -+ { -+ pcgcctl_data_t pcgcctl = {.d32=0}; -+ pcgcctl.b.stoppclk = 1; -+ dwc_write_reg32(core_if->pcgcctl, pcgcctl.d32); -+ } -+ -+ /* For HNP the bus must be suspended for at least 200ms. */ -+ if (hcd->self.b_hnp_enable) { -+ mdelay(200); -+ //DWC_PRINT("SUSPEND: wait complete! (%d)\n", _hcd->state); -+ } -+ break; -+ case USB_PORT_FEAT_POWER: -+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " -+ "SetPortFeature - USB_PORT_FEAT_POWER\n"); -+ hprt0.d32 = dwc_otg_read_hprt0(core_if); -+ hprt0.b.prtpwr = 1; -+ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); -+ break; -+ case USB_PORT_FEAT_RESET: -+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " -+ "SetPortFeature - USB_PORT_FEAT_RESET\n"); -+ hprt0.d32 = dwc_otg_read_hprt0(core_if); -+ /* When B-Host the Port reset bit is set in -+ * the Start HCD Callback function, so that -+ * the reset is started within 1ms of the HNP -+ * success interrupt. */ -+ if (!hcd->self.is_b_host) { -+ hprt0.b.prtrst = 1; -+ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); -+ } -+ /* Clear reset bit in 10ms (FS/LS) or 50ms (HS) */ -+ MDELAY(60); -+ hprt0.b.prtrst = 0; -+ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); -+ break; -+ -+#ifdef DWC_HS_ELECT_TST -+ case USB_PORT_FEAT_TEST: -+ { -+ uint32_t t; -+ gintmsk_data_t gintmsk; -+ -+ t = (wIndex >> 8); /* MSB wIndex USB */ -+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " -+ "SetPortFeature - USB_PORT_FEAT_TEST %d\n", t); -+ warn("USB_PORT_FEAT_TEST %d\n", t); -+ if (t < 6) { -+ hprt0.d32 = dwc_otg_read_hprt0(core_if); -+ hprt0.b.prttstctl = t; -+ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); -+ } else { -+ /* Setup global vars with reg addresses (quick and -+ * dirty hack, should be cleaned up) -+ */ -+ global_regs = core_if->core_global_regs; -+ hc_global_regs = core_if->host_if->host_global_regs; -+ hc_regs = (dwc_otg_hc_regs_t *)((char *)global_regs + 0x500); -+ data_fifo = (uint32_t *)((char *)global_regs + 0x1000); -+ -+ if (t == 6) { /* HS_HOST_PORT_SUSPEND_RESUME */ -+ /* Save current interrupt mask */ -+ gintmsk.d32 = dwc_read_reg32(&global_regs->gintmsk); -+ -+ /* Disable all interrupts while we muck with -+ * the hardware directly -+ */ -+ dwc_write_reg32(&global_regs->gintmsk, 0); -+ -+ /* 15 second delay per the test spec */ -+ mdelay(15000); -+ -+ /* Drive suspend on the root port */ -+ hprt0.d32 = dwc_otg_read_hprt0(core_if); -+ hprt0.b.prtsusp = 1; -+ hprt0.b.prtres = 0; -+ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); -+ -+ /* 15 second delay per the test spec */ -+ mdelay(15000); -+ -+ /* Drive resume on the root port */ -+ hprt0.d32 = dwc_otg_read_hprt0(core_if); -+ hprt0.b.prtsusp = 0; -+ hprt0.b.prtres = 1; -+ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); -+ mdelay(100); -+ -+ /* Clear the resume bit */ -+ hprt0.b.prtres = 0; -+ dwc_write_reg32(core_if->host_if->hprt0, hprt0.d32); -+ -+ /* Restore interrupts */ -+ dwc_write_reg32(&global_regs->gintmsk, gintmsk.d32); -+ } else if (t == 7) { /* SINGLE_STEP_GET_DEVICE_DESCRIPTOR setup */ -+ /* Save current interrupt mask */ -+ gintmsk.d32 = dwc_read_reg32(&global_regs->gintmsk); -+ -+ /* Disable all interrupts while we muck with -+ * the hardware directly -+ */ -+ dwc_write_reg32(&global_regs->gintmsk, 0); -+ -+ /* 15 second delay per the test spec */ -+ mdelay(15000); -+ -+ /* Send the Setup packet */ -+ do_setup(); -+ -+ /* 15 second delay so nothing else happens for awhile */ -+ mdelay(15000); -+ -+ /* Restore interrupts */ -+ dwc_write_reg32(&global_regs->gintmsk, gintmsk.d32); -+ } else if (t == 8) { /* SINGLE_STEP_GET_DEVICE_DESCRIPTOR execute */ -+ /* Save current interrupt mask */ -+ gintmsk.d32 = dwc_read_reg32(&global_regs->gintmsk); -+ -+ /* Disable all interrupts while we muck with -+ * the hardware directly -+ */ -+ dwc_write_reg32(&global_regs->gintmsk, 0); -+ -+ /* Send the Setup packet */ -+ do_setup(); -+ -+ /* 15 second delay so nothing else happens for awhile */ -+ mdelay(15000); -+ -+ /* Send the In and Ack packets */ -+ do_in_ack(); -+ -+ /* 15 second delay so nothing else happens for awhile */ -+ mdelay(15000); -+ -+ /* Restore interrupts */ -+ dwc_write_reg32(&global_regs->gintmsk, gintmsk.d32); -+ } -+ } -+ break; -+ } -+#endif /* DWC_HS_ELECT_TST */ -+ -+ case USB_PORT_FEAT_INDICATOR: -+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD HUB CONTROL - " -+ "SetPortFeature - USB_PORT_FEAT_INDICATOR\n"); -+ /* Not supported */ -+ break; -+ default: -+ retval = -EINVAL; -+ DWC_ERROR("DWC OTG HCD - " -+ "SetPortFeature request %xh " -+ "unknown or unsupported\n", wValue); -+ break; -+ } -+ break; -+ default: -+ error: -+ retval = -EINVAL; -+ DWC_WARN("DWC OTG HCD - " -+ "Unknown hub control request type or invalid typeReq: %xh wIndex: %xh wValue: %xh\n", -+ typeReq, wIndex, wValue); -+ break; -+ } -+ -+ return retval; -+} -+ -+/** -+ * Assigns transactions from a QTD to a free host channel and initializes the -+ * host channel to perform the transactions. The host channel is removed from -+ * the free list. -+ * -+ * @param hcd The HCD state structure. -+ * @param qh Transactions from the first QTD for this QH are selected and -+ * assigned to a free host channel. -+ */ -+static void assign_and_init_hc(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh) -+{ -+ dwc_hc_t *hc; -+ dwc_otg_qtd_t *qtd; -+ struct urb *urb; -+ -+ DWC_DEBUGPL(DBG_HCDV, "%s(%p,%p)\n", __func__, hcd, qh); -+ -+ hc = list_entry(hcd->free_hc_list.next, dwc_hc_t, hc_list_entry); -+ -+ /* Remove the host channel from the free list. */ -+ list_del_init(&hc->hc_list_entry); -+ -+ qtd = list_entry(qh->qtd_list.next, dwc_otg_qtd_t, qtd_list_entry); -+ urb = qtd->urb; -+ qh->channel = hc; -+ qh->qtd_in_process = qtd; -+ -+ /* -+ * Use usb_pipedevice to determine device address. This address is -+ * 0 before the SET_ADDRESS command and the correct address afterward. -+ */ -+ hc->dev_addr = usb_pipedevice(urb->pipe); -+ hc->ep_num = usb_pipeendpoint(urb->pipe); -+ -+ if (urb->dev->speed == USB_SPEED_LOW) { -+ hc->speed = DWC_OTG_EP_SPEED_LOW; -+ } else if (urb->dev->speed == USB_SPEED_FULL) { -+ hc->speed = DWC_OTG_EP_SPEED_FULL; -+ } else { -+ hc->speed = DWC_OTG_EP_SPEED_HIGH; -+ } -+ -+ hc->max_packet = dwc_max_packet(qh->maxp); -+ -+ hc->xfer_started = 0; -+ hc->halt_status = DWC_OTG_HC_XFER_NO_HALT_STATUS; -+ hc->error_state = (qtd->error_count > 0); -+ hc->halt_on_queue = 0; -+ hc->halt_pending = 0; -+ hc->requests = 0; -+ -+ /* -+ * The following values may be modified in the transfer type section -+ * below. The xfer_len value may be reduced when the transfer is -+ * started to accommodate the max widths of the XferSize and PktCnt -+ * fields in the HCTSIZn register. -+ */ -+ hc->do_ping = qh->ping_state; -+ hc->ep_is_in = (usb_pipein(urb->pipe) != 0); -+ hc->data_pid_start = qh->data_toggle; -+ hc->multi_count = 1; -+ -+ if (hcd->core_if->dma_enable) { -+ hc->xfer_buff = (uint8_t *)urb->transfer_dma + urb->actual_length; -+ } else { -+ hc->xfer_buff = (uint8_t *)urb->transfer_buffer + urb->actual_length; -+ } -+ hc->xfer_len = urb->transfer_buffer_length - urb->actual_length; -+ hc->xfer_count = 0; -+ -+ /* -+ * Set the split attributes -+ */ -+ hc->do_split = 0; -+ if (qh->do_split) { -+ hc->do_split = 1; -+ hc->xact_pos = qtd->isoc_split_pos; -+ hc->complete_split = qtd->complete_split; -+ hc->hub_addr = urb->dev->tt->hub->devnum; -+ hc->port_addr = urb->dev->ttport; -+ } -+ -+ switch (usb_pipetype(urb->pipe)) { -+ case PIPE_CONTROL: -+ hc->ep_type = DWC_OTG_EP_TYPE_CONTROL; -+ switch (qtd->control_phase) { -+ case DWC_OTG_CONTROL_SETUP: -+ DWC_DEBUGPL(DBG_HCDV, " Control setup transaction\n"); -+ hc->do_ping = 0; -+ hc->ep_is_in = 0; -+ hc->data_pid_start = DWC_OTG_HC_PID_SETUP; -+ if (hcd->core_if->dma_enable) { -+ hc->xfer_buff = (uint8_t *)urb->setup_dma; -+ } else { -+ hc->xfer_buff = (uint8_t *)urb->setup_packet; -+ } -+ hc->xfer_len = 8; -+ break; -+ case DWC_OTG_CONTROL_DATA: -+ DWC_DEBUGPL(DBG_HCDV, " Control data transaction\n"); -+ hc->data_pid_start = qtd->data_toggle; -+ break; -+ case DWC_OTG_CONTROL_STATUS: -+ /* -+ * Direction is opposite of data direction or IN if no -+ * data. -+ */ -+ DWC_DEBUGPL(DBG_HCDV, " Control status transaction\n"); -+ if (urb->transfer_buffer_length == 0) { -+ hc->ep_is_in = 1; -+ } else { -+ hc->ep_is_in = (usb_pipein(urb->pipe) != USB_DIR_IN); -+ } -+ if (hc->ep_is_in) { -+ hc->do_ping = 0; -+ } -+ hc->data_pid_start = DWC_OTG_HC_PID_DATA1; -+ hc->xfer_len = 0; -+ if (hcd->core_if->dma_enable) { -+ hc->xfer_buff = (uint8_t *)hcd->status_buf_dma; -+ } else { -+ hc->xfer_buff = (uint8_t *)hcd->status_buf; -+ } -+ break; -+ } -+ break; -+ case PIPE_BULK: -+ hc->ep_type = DWC_OTG_EP_TYPE_BULK; -+ break; -+ case PIPE_INTERRUPT: -+ hc->ep_type = DWC_OTG_EP_TYPE_INTR; -+ break; -+ case PIPE_ISOCHRONOUS: -+ { -+ struct usb_iso_packet_descriptor *frame_desc; -+ frame_desc = &urb->iso_frame_desc[qtd->isoc_frame_index]; -+ hc->ep_type = DWC_OTG_EP_TYPE_ISOC; -+ if (hcd->core_if->dma_enable) { -+ hc->xfer_buff = (uint8_t *)urb->transfer_dma; -+ } else { -+ hc->xfer_buff = (uint8_t *)urb->transfer_buffer; -+ } -+ hc->xfer_buff += frame_desc->offset + qtd->isoc_split_offset; -+ hc->xfer_len = frame_desc->length - qtd->isoc_split_offset; -+ -+ if (hc->xact_pos == DWC_HCSPLIT_XACTPOS_ALL) { -+ if (hc->xfer_len <= 188) { -+ hc->xact_pos = DWC_HCSPLIT_XACTPOS_ALL; -+ } -+ else { -+ hc->xact_pos = DWC_HCSPLIT_XACTPOS_BEGIN; -+ } -+ } -+ } -+ break; -+ } -+ -+ if (hc->ep_type == DWC_OTG_EP_TYPE_INTR || -+ hc->ep_type == DWC_OTG_EP_TYPE_ISOC) { -+ /* -+ * This value may be modified when the transfer is started to -+ * reflect the actual transfer length. -+ */ -+ hc->multi_count = dwc_hb_mult(qh->maxp); -+ } -+ -+ dwc_otg_hc_init(hcd->core_if, hc); -+ hc->qh = qh; -+} -+ -+/** -+ * This function selects transactions from the HCD transfer schedule and -+ * assigns them to available host channels. It is called from HCD interrupt -+ * handler functions. -+ * -+ * @param hcd The HCD state structure. -+ * -+ * @return The types of new transactions that were assigned to host channels. -+ */ -+dwc_otg_transaction_type_e dwc_otg_hcd_select_transactions(dwc_otg_hcd_t *hcd) -+{ -+ struct list_head *qh_ptr; -+ dwc_otg_qh_t *qh; -+ int num_channels; -+ dwc_otg_transaction_type_e ret_val = DWC_OTG_TRANSACTION_NONE; -+ -+#ifdef DEBUG_SOF -+ DWC_DEBUGPL(DBG_HCD, " Select Transactions\n"); -+#endif -+ -+ /* Process entries in the periodic ready list. */ -+ qh_ptr = hcd->periodic_sched_ready.next; -+ while (qh_ptr != &hcd->periodic_sched_ready && -+ !list_empty(&hcd->free_hc_list)) { -+ -+ qh = list_entry(qh_ptr, dwc_otg_qh_t, qh_list_entry); -+ assign_and_init_hc(hcd, qh); -+ -+ /* -+ * Move the QH from the periodic ready schedule to the -+ * periodic assigned schedule. -+ */ -+ qh_ptr = qh_ptr->next; -+ list_move(&qh->qh_list_entry, &hcd->periodic_sched_assigned); -+ -+ ret_val = DWC_OTG_TRANSACTION_PERIODIC; -+ } -+ -+ /* -+ * Process entries in the inactive portion of the non-periodic -+ * schedule. Some free host channels may not be used if they are -+ * reserved for periodic transfers. -+ */ -+ qh_ptr = hcd->non_periodic_sched_inactive.next; -+ num_channels = hcd->core_if->core_params->host_channels; -+ while (qh_ptr != &hcd->non_periodic_sched_inactive && -+ (hcd->non_periodic_channels < -+ num_channels - hcd->periodic_channels) && -+ !list_empty(&hcd->free_hc_list)) { -+ -+ qh = list_entry(qh_ptr, dwc_otg_qh_t, qh_list_entry); -+ assign_and_init_hc(hcd, qh); -+ -+ /* -+ * Move the QH from the non-periodic inactive schedule to the -+ * non-periodic active schedule. -+ */ -+ qh_ptr = qh_ptr->next; -+ list_move(&qh->qh_list_entry, &hcd->non_periodic_sched_active); -+ -+ if (ret_val == DWC_OTG_TRANSACTION_NONE) { -+ ret_val = DWC_OTG_TRANSACTION_NON_PERIODIC; -+ } else { -+ ret_val = DWC_OTG_TRANSACTION_ALL; -+ } -+ -+ hcd->non_periodic_channels++; -+ } -+ -+ return ret_val; -+} -+ -+/** -+ * Attempts to queue a single transaction request for a host channel -+ * associated with either a periodic or non-periodic transfer. This function -+ * assumes that there is space available in the appropriate request queue. For -+ * an OUT transfer or SETUP transaction in Slave mode, it checks whether space -+ * is available in the appropriate Tx FIFO. -+ * -+ * @param hcd The HCD state structure. -+ * @param hc Host channel descriptor associated with either a periodic or -+ * non-periodic transfer. -+ * @param fifo_dwords_avail Number of DWORDs available in the periodic Tx -+ * FIFO for periodic transfers or the non-periodic Tx FIFO for non-periodic -+ * transfers. -+ * -+ * @return 1 if a request is queued and more requests may be needed to -+ * complete the transfer, 0 if no more requests are required for this -+ * transfer, -1 if there is insufficient space in the Tx FIFO. -+ */ -+static int queue_transaction(dwc_otg_hcd_t *hcd, -+ dwc_hc_t *hc, -+ uint16_t fifo_dwords_avail) -+{ -+ int retval; -+ -+ if (hcd->core_if->dma_enable) { -+ if (!hc->xfer_started) { -+ dwc_otg_hc_start_transfer(hcd->core_if, hc); -+ hc->qh->ping_state = 0; -+ } -+ retval = 0; -+ } else if (hc->halt_pending) { -+ /* Don't queue a request if the channel has been halted. */ -+ retval = 0; -+ } else if (hc->halt_on_queue) { -+ dwc_otg_hc_halt(hcd->core_if, hc, hc->halt_status); -+ retval = 0; -+ } else if (hc->do_ping) { -+ if (!hc->xfer_started) { -+ dwc_otg_hc_start_transfer(hcd->core_if, hc); -+ } -+ retval = 0; -+ } else if (!hc->ep_is_in || -+ hc->data_pid_start == DWC_OTG_HC_PID_SETUP) { -+ if ((fifo_dwords_avail * 4) >= hc->max_packet) { -+ if (!hc->xfer_started) { -+ dwc_otg_hc_start_transfer(hcd->core_if, hc); -+ retval = 1; -+ } else { -+ retval = dwc_otg_hc_continue_transfer(hcd->core_if, hc); -+ } -+ } else { -+ retval = -1; -+ } -+ } else { -+ if (!hc->xfer_started) { -+ dwc_otg_hc_start_transfer(hcd->core_if, hc); -+ retval = 1; -+ } else { -+ retval = dwc_otg_hc_continue_transfer(hcd->core_if, hc); -+ } -+ } -+ -+ return retval; -+} -+ -+/** -+ * Processes active non-periodic channels and queues transactions for these -+ * channels to the DWC_otg controller. After queueing transactions, the NP Tx -+ * FIFO Empty interrupt is enabled if there are more transactions to queue as -+ * NP Tx FIFO or request queue space becomes available. Otherwise, the NP Tx -+ * FIFO Empty interrupt is disabled. -+ */ -+static void process_non_periodic_channels(dwc_otg_hcd_t *hcd) -+{ -+ gnptxsts_data_t tx_status; -+ struct list_head *orig_qh_ptr; -+ dwc_otg_qh_t *qh; -+ int status; -+ int no_queue_space = 0; -+ int no_fifo_space = 0; -+ int more_to_do = 0; -+ -+ dwc_otg_core_global_regs_t *global_regs = hcd->core_if->core_global_regs; -+ -+ DWC_DEBUGPL(DBG_HCDV, "Queue non-periodic transactions\n"); -+#ifdef DEBUG -+ tx_status.d32 = dwc_read_reg32(&global_regs->gnptxsts); -+ DWC_DEBUGPL(DBG_HCDV, " NP Tx Req Queue Space Avail (before queue): %d\n", -+ tx_status.b.nptxqspcavail); -+ DWC_DEBUGPL(DBG_HCDV, " NP Tx FIFO Space Avail (before queue): %d\n", -+ tx_status.b.nptxfspcavail); -+#endif -+ /* -+ * Keep track of the starting point. Skip over the start-of-list -+ * entry. -+ */ -+ if (hcd->non_periodic_qh_ptr == &hcd->non_periodic_sched_active) { -+ hcd->non_periodic_qh_ptr = hcd->non_periodic_qh_ptr->next; -+ } -+ orig_qh_ptr = hcd->non_periodic_qh_ptr; -+ -+ /* -+ * Process once through the active list or until no more space is -+ * available in the request queue or the Tx FIFO. -+ */ -+ do { -+ tx_status.d32 = dwc_read_reg32(&global_regs->gnptxsts); -+ if (!hcd->core_if->dma_enable && tx_status.b.nptxqspcavail == 0) { -+ no_queue_space = 1; -+ break; -+ } -+ -+ qh = list_entry(hcd->non_periodic_qh_ptr, dwc_otg_qh_t, qh_list_entry); -+ status = queue_transaction(hcd, qh->channel, tx_status.b.nptxfspcavail); -+ -+ if (status > 0) { -+ more_to_do = 1; -+ } else if (status < 0) { -+ no_fifo_space = 1; -+ break; -+ } -+ -+ /* Advance to next QH, skipping start-of-list entry. */ -+ hcd->non_periodic_qh_ptr = hcd->non_periodic_qh_ptr->next; -+ if (hcd->non_periodic_qh_ptr == &hcd->non_periodic_sched_active) { -+ hcd->non_periodic_qh_ptr = hcd->non_periodic_qh_ptr->next; -+ } -+ -+ } while (hcd->non_periodic_qh_ptr != orig_qh_ptr); -+ -+ if (!hcd->core_if->dma_enable) { -+ gintmsk_data_t intr_mask = {.d32 = 0}; -+ intr_mask.b.nptxfempty = 1; -+ -+#ifdef DEBUG -+ tx_status.d32 = dwc_read_reg32(&global_regs->gnptxsts); -+ DWC_DEBUGPL(DBG_HCDV, " NP Tx Req Queue Space Avail (after queue): %d\n", -+ tx_status.b.nptxqspcavail); -+ DWC_DEBUGPL(DBG_HCDV, " NP Tx FIFO Space Avail (after queue): %d\n", -+ tx_status.b.nptxfspcavail); -+#endif -+ if (more_to_do || no_queue_space || no_fifo_space) { -+ /* -+ * May need to queue more transactions as the request -+ * queue or Tx FIFO empties. Enable the non-periodic -+ * Tx FIFO empty interrupt. (Always use the half-empty -+ * level to ensure that new requests are loaded as -+ * soon as possible.) -+ */ -+ dwc_modify_reg32(&global_regs->gintmsk, 0, intr_mask.d32); -+ } else { -+ /* -+ * Disable the Tx FIFO empty interrupt since there are -+ * no more transactions that need to be queued right -+ * now. This function is called from interrupt -+ * handlers to queue more transactions as transfer -+ * states change. -+ */ -+ dwc_modify_reg32(&global_regs->gintmsk, intr_mask.d32, 0); -+ } -+ } -+} -+ -+/** -+ * Processes periodic channels for the next frame and queues transactions for -+ * these channels to the DWC_otg controller. After queueing transactions, the -+ * Periodic Tx FIFO Empty interrupt is enabled if there are more transactions -+ * to queue as Periodic Tx FIFO or request queue space becomes available. -+ * Otherwise, the Periodic Tx FIFO Empty interrupt is disabled. -+ */ -+static void process_periodic_channels(dwc_otg_hcd_t *hcd) -+{ -+ hptxsts_data_t tx_status; -+ struct list_head *qh_ptr; -+ dwc_otg_qh_t *qh; -+ int status; -+ int no_queue_space = 0; -+ int no_fifo_space = 0; -+ -+ dwc_otg_host_global_regs_t *host_regs; -+ host_regs = hcd->core_if->host_if->host_global_regs; -+ -+ DWC_DEBUGPL(DBG_HCDV, "Queue periodic transactions\n"); -+#ifdef DEBUG -+ tx_status.d32 = dwc_read_reg32(&host_regs->hptxsts); -+ DWC_DEBUGPL(DBG_HCDV, " P Tx Req Queue Space Avail (before queue): %d\n", -+ tx_status.b.ptxqspcavail); -+ DWC_DEBUGPL(DBG_HCDV, " P Tx FIFO Space Avail (before queue): %d\n", -+ tx_status.b.ptxfspcavail); -+#endif -+ -+ qh_ptr = hcd->periodic_sched_assigned.next; -+ while (qh_ptr != &hcd->periodic_sched_assigned) { -+ tx_status.d32 = dwc_read_reg32(&host_regs->hptxsts); -+ if (tx_status.b.ptxqspcavail == 0) { -+ no_queue_space = 1; -+ break; -+ } -+ -+ qh = list_entry(qh_ptr, dwc_otg_qh_t, qh_list_entry); -+ -+ /* -+ * Set a flag if we're queuing high-bandwidth in slave mode. -+ * The flag prevents any halts to get into the request queue in -+ * the middle of multiple high-bandwidth packets getting queued. -+ */ -+ if (!hcd->core_if->dma_enable && -+ qh->channel->multi_count > 1) -+ { -+ hcd->core_if->queuing_high_bandwidth = 1; -+ } -+ -+ status = queue_transaction(hcd, qh->channel, tx_status.b.ptxfspcavail); -+ if (status < 0) { -+ no_fifo_space = 1; -+ break; -+ } -+ -+ /* -+ * In Slave mode, stay on the current transfer until there is -+ * nothing more to do or the high-bandwidth request count is -+ * reached. In DMA mode, only need to queue one request. The -+ * controller automatically handles multiple packets for -+ * high-bandwidth transfers. -+ */ -+ if (hcd->core_if->dma_enable || status == 0 || -+ qh->channel->requests == qh->channel->multi_count) { -+ qh_ptr = qh_ptr->next; -+ /* -+ * Move the QH from the periodic assigned schedule to -+ * the periodic queued schedule. -+ */ -+ list_move(&qh->qh_list_entry, &hcd->periodic_sched_queued); -+ -+ /* done queuing high bandwidth */ -+ hcd->core_if->queuing_high_bandwidth = 0; -+ } -+ } -+ -+ if (!hcd->core_if->dma_enable) { -+ dwc_otg_core_global_regs_t *global_regs; -+ gintmsk_data_t intr_mask = {.d32 = 0}; -+ -+ global_regs = hcd->core_if->core_global_regs; -+ intr_mask.b.ptxfempty = 1; -+#ifdef DEBUG -+ tx_status.d32 = dwc_read_reg32(&host_regs->hptxsts); -+ DWC_DEBUGPL(DBG_HCDV, " P Tx Req Queue Space Avail (after queue): %d\n", -+ tx_status.b.ptxqspcavail); -+ DWC_DEBUGPL(DBG_HCDV, " P Tx FIFO Space Avail (after queue): %d\n", -+ tx_status.b.ptxfspcavail); -+#endif -+ if (!list_empty(&hcd->periodic_sched_assigned) || -+ no_queue_space || no_fifo_space) { -+ /* -+ * May need to queue more transactions as the request -+ * queue or Tx FIFO empties. Enable the periodic Tx -+ * FIFO empty interrupt. (Always use the half-empty -+ * level to ensure that new requests are loaded as -+ * soon as possible.) -+ */ -+ dwc_modify_reg32(&global_regs->gintmsk, 0, intr_mask.d32); -+ } else { -+ /* -+ * Disable the Tx FIFO empty interrupt since there are -+ * no more transactions that need to be queued right -+ * now. This function is called from interrupt -+ * handlers to queue more transactions as transfer -+ * states change. -+ */ -+ dwc_modify_reg32(&global_regs->gintmsk, intr_mask.d32, 0); -+ } -+ } -+} -+ -+/** -+ * This function processes the currently active host channels and queues -+ * transactions for these channels to the DWC_otg controller. It is called -+ * from HCD interrupt handler functions. -+ * -+ * @param hcd The HCD state structure. -+ * @param tr_type The type(s) of transactions to queue (non-periodic, -+ * periodic, or both). -+ */ -+void dwc_otg_hcd_queue_transactions(dwc_otg_hcd_t *hcd, -+ dwc_otg_transaction_type_e tr_type) -+{ -+#ifdef DEBUG_SOF -+ DWC_DEBUGPL(DBG_HCD, "Queue Transactions\n"); -+#endif -+ /* Process host channels associated with periodic transfers. */ -+ if ((tr_type == DWC_OTG_TRANSACTION_PERIODIC || -+ tr_type == DWC_OTG_TRANSACTION_ALL) && -+ !list_empty(&hcd->periodic_sched_assigned)) { -+ -+ process_periodic_channels(hcd); -+ } -+ -+ /* Process host channels associated with non-periodic transfers. */ -+ if (tr_type == DWC_OTG_TRANSACTION_NON_PERIODIC || -+ tr_type == DWC_OTG_TRANSACTION_ALL) { -+ if (!list_empty(&hcd->non_periodic_sched_active)) { -+ process_non_periodic_channels(hcd); -+ } else { -+ /* -+ * Ensure NP Tx FIFO empty interrupt is disabled when -+ * there are no non-periodic transfers to process. -+ */ -+ gintmsk_data_t gintmsk = {.d32 = 0}; -+ gintmsk.b.nptxfempty = 1; -+ dwc_modify_reg32(&hcd->core_if->core_global_regs->gintmsk, -+ gintmsk.d32, 0); -+ } -+ } -+} -+ -+/** -+ * Sets the final status of an URB and returns it to the device driver. Any -+ * required cleanup of the URB is performed. -+ */ -+void dwc_otg_hcd_complete_urb(dwc_otg_hcd_t *hcd, struct urb *urb, int status) -+{ -+#ifdef DEBUG -+ if (CHK_DEBUG_LEVEL(DBG_HCDV | DBG_HCD_URB)) { -+ DWC_PRINT("%s: urb %p, device %d, ep %d %s, status=%d\n", -+ __func__, urb, usb_pipedevice(urb->pipe), -+ usb_pipeendpoint(urb->pipe), -+ usb_pipein(urb->pipe) ? "IN" : "OUT", status); -+ if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { -+ int i; -+ for (i = 0; i < urb->number_of_packets; i++) { -+ DWC_PRINT(" ISO Desc %d status: %d\n", -+ i, urb->iso_frame_desc[i].status); -+ } -+ } -+ } -+#endif -+ -+ urb->status = status; -+ urb->hcpriv = NULL; -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) -+ usb_hcd_giveback_urb(dwc_otg_hcd_to_hcd(hcd), urb, status); -+#else -+ usb_hcd_giveback_urb(dwc_otg_hcd_to_hcd(hcd), urb, NULL); -+#endif -+} -+ -+/* -+ * Returns the Queue Head for an URB. -+ */ -+dwc_otg_qh_t *dwc_urb_to_qh(struct urb *urb) -+{ -+ struct usb_host_endpoint *ep = dwc_urb_to_endpoint(urb); -+ return (dwc_otg_qh_t *)ep->hcpriv; -+} -+ -+#ifdef DEBUG -+void dwc_print_setup_data(uint8_t *setup) -+{ -+ int i; -+ if (CHK_DEBUG_LEVEL(DBG_HCD)){ -+ DWC_PRINT("Setup Data = MSB "); -+ for (i = 7; i >= 0; i--) DWC_PRINT("%02x ", setup[i]); -+ DWC_PRINT("\n"); -+ DWC_PRINT(" bmRequestType Tranfer = %s\n", (setup[0] & 0x80) ? "Device-to-Host" : "Host-to-Device"); -+ DWC_PRINT(" bmRequestType Type = "); -+ switch ((setup[0] & 0x60) >> 5) { -+ case 0: DWC_PRINT("Standard\n"); break; -+ case 1: DWC_PRINT("Class\n"); break; -+ case 2: DWC_PRINT("Vendor\n"); break; -+ case 3: DWC_PRINT("Reserved\n"); break; -+ } -+ DWC_PRINT(" bmRequestType Recipient = "); -+ switch (setup[0] & 0x1f) { -+ case 0: DWC_PRINT("Device\n"); break; -+ case 1: DWC_PRINT("Interface\n"); break; -+ case 2: DWC_PRINT("Endpoint\n"); break; -+ case 3: DWC_PRINT("Other\n"); break; -+ default: DWC_PRINT("Reserved\n"); break; -+ } -+ DWC_PRINT(" bRequest = 0x%0x\n", setup[1]); -+ DWC_PRINT(" wValue = 0x%0x\n", *((uint16_t *)&setup[2])); -+ DWC_PRINT(" wIndex = 0x%0x\n", *((uint16_t *)&setup[4])); -+ DWC_PRINT(" wLength = 0x%0x\n\n", *((uint16_t *)&setup[6])); -+ } -+} -+#endif -+ -+void dwc_otg_hcd_dump_frrem(dwc_otg_hcd_t *hcd) { -+#if defined(DEBUG) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) -+ DWC_PRINT("Frame remaining at SOF:\n"); -+ DWC_PRINT(" samples %u, accum %llu, avg %llu\n", -+ hcd->frrem_samples, hcd->frrem_accum, -+ (hcd->frrem_samples > 0) ? -+ hcd->frrem_accum/hcd->frrem_samples : 0); -+ -+ DWC_PRINT("\n"); -+ DWC_PRINT("Frame remaining at start_transfer (uframe 7):\n"); -+ DWC_PRINT(" samples %u, accum %llu, avg %llu\n", -+ hcd->core_if->hfnum_7_samples, hcd->core_if->hfnum_7_frrem_accum, -+ (hcd->core_if->hfnum_7_samples > 0) ? -+ hcd->core_if->hfnum_7_frrem_accum/hcd->core_if->hfnum_7_samples : 0); -+ DWC_PRINT("Frame remaining at start_transfer (uframe 0):\n"); -+ DWC_PRINT(" samples %u, accum %llu, avg %llu\n", -+ hcd->core_if->hfnum_0_samples, hcd->core_if->hfnum_0_frrem_accum, -+ (hcd->core_if->hfnum_0_samples > 0) ? -+ hcd->core_if->hfnum_0_frrem_accum/hcd->core_if->hfnum_0_samples : 0); -+ DWC_PRINT("Frame remaining at start_transfer (uframe 1-6):\n"); -+ DWC_PRINT(" samples %u, accum %llu, avg %llu\n", -+ hcd->core_if->hfnum_other_samples, hcd->core_if->hfnum_other_frrem_accum, -+ (hcd->core_if->hfnum_other_samples > 0) ? -+ hcd->core_if->hfnum_other_frrem_accum/hcd->core_if->hfnum_other_samples : 0); -+ -+ DWC_PRINT("\n"); -+ DWC_PRINT("Frame remaining at sample point A (uframe 7):\n"); -+ DWC_PRINT(" samples %u, accum %llu, avg %llu\n", -+ hcd->hfnum_7_samples_a, hcd->hfnum_7_frrem_accum_a, -+ (hcd->hfnum_7_samples_a > 0) ? -+ hcd->hfnum_7_frrem_accum_a/hcd->hfnum_7_samples_a : 0); -+ DWC_PRINT("Frame remaining at sample point A (uframe 0):\n"); -+ DWC_PRINT(" samples %u, accum %llu, avg %llu\n", -+ hcd->hfnum_0_samples_a, hcd->hfnum_0_frrem_accum_a, -+ (hcd->hfnum_0_samples_a > 0) ? -+ hcd->hfnum_0_frrem_accum_a/hcd->hfnum_0_samples_a : 0); -+ DWC_PRINT("Frame remaining at sample point A (uframe 1-6):\n"); -+ DWC_PRINT(" samples %u, accum %llu, avg %llu\n", -+ hcd->hfnum_other_samples_a, hcd->hfnum_other_frrem_accum_a, -+ (hcd->hfnum_other_samples_a > 0) ? -+ hcd->hfnum_other_frrem_accum_a/hcd->hfnum_other_samples_a : 0); -+ -+ DWC_PRINT("\n"); -+ DWC_PRINT("Frame remaining at sample point B (uframe 7):\n"); -+ DWC_PRINT(" samples %u, accum %llu, avg %llu\n", -+ hcd->hfnum_7_samples_b, hcd->hfnum_7_frrem_accum_b, -+ (hcd->hfnum_7_samples_b > 0) ? -+ hcd->hfnum_7_frrem_accum_b/hcd->hfnum_7_samples_b : 0); -+ DWC_PRINT("Frame remaining at sample point B (uframe 0):\n"); -+ DWC_PRINT(" samples %u, accum %llu, avg %llu\n", -+ hcd->hfnum_0_samples_b, hcd->hfnum_0_frrem_accum_b, -+ (hcd->hfnum_0_samples_b > 0) ? -+ hcd->hfnum_0_frrem_accum_b/hcd->hfnum_0_samples_b : 0); -+ DWC_PRINT("Frame remaining at sample point B (uframe 1-6):\n"); -+ DWC_PRINT(" samples %u, accum %llu, avg %llu\n", -+ hcd->hfnum_other_samples_b, hcd->hfnum_other_frrem_accum_b, -+ (hcd->hfnum_other_samples_b > 0) ? -+ hcd->hfnum_other_frrem_accum_b/hcd->hfnum_other_samples_b : 0); -+#endif -+} -+ -+void dwc_otg_hcd_dump_state(dwc_otg_hcd_t *hcd) -+{ -+#ifdef DEBUG -+ int num_channels; -+ int i; -+ gnptxsts_data_t np_tx_status; -+ hptxsts_data_t p_tx_status; -+ -+ num_channels = hcd->core_if->core_params->host_channels; -+ DWC_PRINT("\n"); -+ DWC_PRINT("************************************************************\n"); -+ DWC_PRINT("HCD State:\n"); -+ DWC_PRINT(" Num channels: %d\n", num_channels); -+ for (i = 0; i < num_channels; i++) { -+ dwc_hc_t *hc = hcd->hc_ptr_array[i]; -+ DWC_PRINT(" Channel %d:\n", i); -+ DWC_PRINT(" dev_addr: %d, ep_num: %d, ep_is_in: %d\n", -+ hc->dev_addr, hc->ep_num, hc->ep_is_in); -+ DWC_PRINT(" speed: %d\n", hc->speed); -+ DWC_PRINT(" ep_type: %d\n", hc->ep_type); -+ DWC_PRINT(" max_packet: %d\n", hc->max_packet); -+ DWC_PRINT(" data_pid_start: %d\n", hc->data_pid_start); -+ DWC_PRINT(" multi_count: %d\n", hc->multi_count); -+ DWC_PRINT(" xfer_started: %d\n", hc->xfer_started); -+ DWC_PRINT(" xfer_buff: %p\n", hc->xfer_buff); -+ DWC_PRINT(" xfer_len: %d\n", hc->xfer_len); -+ DWC_PRINT(" xfer_count: %d\n", hc->xfer_count); -+ DWC_PRINT(" halt_on_queue: %d\n", hc->halt_on_queue); -+ DWC_PRINT(" halt_pending: %d\n", hc->halt_pending); -+ DWC_PRINT(" halt_status: %d\n", hc->halt_status); -+ DWC_PRINT(" do_split: %d\n", hc->do_split); -+ DWC_PRINT(" complete_split: %d\n", hc->complete_split); -+ DWC_PRINT(" hub_addr: %d\n", hc->hub_addr); -+ DWC_PRINT(" port_addr: %d\n", hc->port_addr); -+ DWC_PRINT(" xact_pos: %d\n", hc->xact_pos); -+ DWC_PRINT(" requests: %d\n", hc->requests); -+ DWC_PRINT(" qh: %p\n", hc->qh); -+ if (hc->xfer_started) { -+ hfnum_data_t hfnum; -+ hcchar_data_t hcchar; -+ hctsiz_data_t hctsiz; -+ hcint_data_t hcint; -+ hcintmsk_data_t hcintmsk; -+ hfnum.d32 = dwc_read_reg32(&hcd->core_if->host_if->host_global_regs->hfnum); -+ hcchar.d32 = dwc_read_reg32(&hcd->core_if->host_if->hc_regs[i]->hcchar); -+ hctsiz.d32 = dwc_read_reg32(&hcd->core_if->host_if->hc_regs[i]->hctsiz); -+ hcint.d32 = dwc_read_reg32(&hcd->core_if->host_if->hc_regs[i]->hcint); -+ hcintmsk.d32 = dwc_read_reg32(&hcd->core_if->host_if->hc_regs[i]->hcintmsk); -+ DWC_PRINT(" hfnum: 0x%08x\n", hfnum.d32); -+ DWC_PRINT(" hcchar: 0x%08x\n", hcchar.d32); -+ DWC_PRINT(" hctsiz: 0x%08x\n", hctsiz.d32); -+ DWC_PRINT(" hcint: 0x%08x\n", hcint.d32); -+ DWC_PRINT(" hcintmsk: 0x%08x\n", hcintmsk.d32); -+ } -+ if (hc->xfer_started && hc->qh && hc->qh->qtd_in_process) { -+ dwc_otg_qtd_t *qtd; -+ struct urb *urb; -+ qtd = hc->qh->qtd_in_process; -+ urb = qtd->urb; -+ DWC_PRINT(" URB Info:\n"); -+ DWC_PRINT(" qtd: %p, urb: %p\n", qtd, urb); -+ if (urb) { -+ DWC_PRINT(" Dev: %d, EP: %d %s\n", -+ usb_pipedevice(urb->pipe), usb_pipeendpoint(urb->pipe), -+ usb_pipein(urb->pipe) ? "IN" : "OUT"); -+ DWC_PRINT(" Max packet size: %d\n", -+ usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe))); -+ DWC_PRINT(" transfer_buffer: %p\n", urb->transfer_buffer); -+ DWC_PRINT(" transfer_dma: %p\n", (void *)urb->transfer_dma); -+ DWC_PRINT(" transfer_buffer_length: %d\n", urb->transfer_buffer_length); -+ DWC_PRINT(" actual_length: %d\n", urb->actual_length); -+ } -+ } -+ } -+ DWC_PRINT(" non_periodic_channels: %d\n", hcd->non_periodic_channels); -+ DWC_PRINT(" periodic_channels: %d\n", hcd->periodic_channels); -+ DWC_PRINT(" periodic_usecs: %d\n", hcd->periodic_usecs); -+ np_tx_status.d32 = dwc_read_reg32(&hcd->core_if->core_global_regs->gnptxsts); -+ DWC_PRINT(" NP Tx Req Queue Space Avail: %d\n", np_tx_status.b.nptxqspcavail); -+ DWC_PRINT(" NP Tx FIFO Space Avail: %d\n", np_tx_status.b.nptxfspcavail); -+ p_tx_status.d32 = dwc_read_reg32(&hcd->core_if->host_if->host_global_regs->hptxsts); -+ DWC_PRINT(" P Tx Req Queue Space Avail: %d\n", p_tx_status.b.ptxqspcavail); -+ DWC_PRINT(" P Tx FIFO Space Avail: %d\n", p_tx_status.b.ptxfspcavail); -+ dwc_otg_hcd_dump_frrem(hcd); -+ dwc_otg_dump_global_registers(hcd->core_if); -+ dwc_otg_dump_host_registers(hcd->core_if); -+ DWC_PRINT("************************************************************\n"); -+ DWC_PRINT("\n"); -+#endif -+} -+#endif /* DWC_DEVICE_ONLY */ ---- /dev/null -+++ b/drivers/usb/dwc_otg/dwc_otg_hcd.h -@@ -0,0 +1,668 @@ -+/* ========================================================================== -+ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_hcd.h $ -+ * $Revision: 1.3 $ -+ * $Date: 2008-12-15 06:51:32 $ -+ * $Change: 1064918 $ -+ * -+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, -+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless -+ * otherwise expressly agreed to in writing between Synopsys and you. -+ * -+ * The Software IS NOT an item of Licensed Software or Licensed Product under -+ * any End User Software License Agreement or Agreement for Licensed Product -+ * with Synopsys or any supplement thereto. You are permitted to use and -+ * redistribute this Software in source and binary forms, with or without -+ * modification, provided that redistributions of source code must retain this -+ * notice. You may not view, use, disclose, copy or distribute this file or -+ * any information contained herein except pursuant to this license grant from -+ * Synopsys. If you do not agree with this notice, including the disclaimer -+ * below, then you are not authorized to use the Software. -+ * -+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, -+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY -+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH -+ * DAMAGE. -+ * ========================================================================== */ -+#ifndef DWC_DEVICE_ONLY -+#ifndef __DWC_HCD_H__ -+#define __DWC_HCD_H__ -+ -+#include -+#include -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35) -+#include -+#else -+#include <../drivers/usb/core/hcd.h> -+#endif -+ -+struct dwc_otg_device; -+ -+#include "dwc_otg_cil.h" -+ -+/** -+ * @file -+ * -+ * This file contains the structures, constants, and interfaces for -+ * the Host Contoller Driver (HCD). -+ * -+ * The Host Controller Driver (HCD) is responsible for translating requests -+ * from the USB Driver into the appropriate actions on the DWC_otg controller. -+ * It isolates the USBD from the specifics of the controller by providing an -+ * API to the USBD. -+ */ -+ -+/** -+ * Phases for control transfers. -+ */ -+typedef enum dwc_otg_control_phase { -+ DWC_OTG_CONTROL_SETUP, -+ DWC_OTG_CONTROL_DATA, -+ DWC_OTG_CONTROL_STATUS -+} dwc_otg_control_phase_e; -+ -+/** Transaction types. */ -+typedef enum dwc_otg_transaction_type { -+ DWC_OTG_TRANSACTION_NONE, -+ DWC_OTG_TRANSACTION_PERIODIC, -+ DWC_OTG_TRANSACTION_NON_PERIODIC, -+ DWC_OTG_TRANSACTION_ALL -+} dwc_otg_transaction_type_e; -+ -+/** -+ * A Queue Transfer Descriptor (QTD) holds the state of a bulk, control, -+ * interrupt, or isochronous transfer. A single QTD is created for each URB -+ * (of one of these types) submitted to the HCD. The transfer associated with -+ * a QTD may require one or multiple transactions. -+ * -+ * A QTD is linked to a Queue Head, which is entered in either the -+ * non-periodic or periodic schedule for execution. When a QTD is chosen for -+ * execution, some or all of its transactions may be executed. After -+ * execution, the state of the QTD is updated. The QTD may be retired if all -+ * its transactions are complete or if an error occurred. Otherwise, it -+ * remains in the schedule so more transactions can be executed later. -+ */ -+typedef struct dwc_otg_qtd { -+ /** -+ * Determines the PID of the next data packet for the data phase of -+ * control transfers. Ignored for other transfer types.
-+ * One of the following values: -+ * - DWC_OTG_HC_PID_DATA0 -+ * - DWC_OTG_HC_PID_DATA1 -+ */ -+ uint8_t data_toggle; -+ -+ /** Current phase for control transfers (Setup, Data, or Status). */ -+ dwc_otg_control_phase_e control_phase; -+ -+ /** Keep track of the current split type -+ * for FS/LS endpoints on a HS Hub */ -+ uint8_t complete_split; -+ -+ /** How many bytes transferred during SSPLIT OUT */ -+ uint32_t ssplit_out_xfer_count; -+ -+ /** -+ * Holds the number of bus errors that have occurred for a transaction -+ * within this transfer. -+ */ -+ uint8_t error_count; -+ -+ /** -+ * Index of the next frame descriptor for an isochronous transfer. A -+ * frame descriptor describes the buffer position and length of the -+ * data to be transferred in the next scheduled (micro)frame of an -+ * isochronous transfer. It also holds status for that transaction. -+ * The frame index starts at 0. -+ */ -+ int isoc_frame_index; -+ -+ /** Position of the ISOC split on full/low speed */ -+ uint8_t isoc_split_pos; -+ -+ /** Position of the ISOC split in the buffer for the current frame */ -+ uint16_t isoc_split_offset; -+ -+ /** URB for this transfer */ -+ struct urb *urb; -+ -+ /** This list of QTDs */ -+ struct list_head qtd_list_entry; -+ -+} dwc_otg_qtd_t; -+ -+/** -+ * A Queue Head (QH) holds the static characteristics of an endpoint and -+ * maintains a list of transfers (QTDs) for that endpoint. A QH structure may -+ * be entered in either the non-periodic or periodic schedule. -+ */ -+typedef struct dwc_otg_qh { -+ /** -+ * Endpoint type. -+ * One of the following values: -+ * - USB_ENDPOINT_XFER_CONTROL -+ * - USB_ENDPOINT_XFER_ISOC -+ * - USB_ENDPOINT_XFER_BULK -+ * - USB_ENDPOINT_XFER_INT -+ */ -+ uint8_t ep_type; -+ uint8_t ep_is_in; -+ -+ /** wMaxPacketSize Field of Endpoint Descriptor. */ -+ uint16_t maxp; -+ -+ /** -+ * Determines the PID of the next data packet for non-control -+ * transfers. Ignored for control transfers.
-+ * One of the following values: -+ * - DWC_OTG_HC_PID_DATA0 -+ * - DWC_OTG_HC_PID_DATA1 -+ */ -+ uint8_t data_toggle; -+ -+ /** Ping state if 1. */ -+ uint8_t ping_state; -+ -+ /** -+ * List of QTDs for this QH. -+ */ -+ struct list_head qtd_list; -+ -+ /** Host channel currently processing transfers for this QH. */ -+ dwc_hc_t *channel; -+ -+ /** QTD currently assigned to a host channel for this QH. */ -+ dwc_otg_qtd_t *qtd_in_process; -+ -+ /** Full/low speed endpoint on high-speed hub requires split. */ -+ uint8_t do_split; -+ -+ /** @name Periodic schedule information */ -+ /** @{ */ -+ -+ /** Bandwidth in microseconds per (micro)frame. */ -+ uint8_t usecs; -+ -+ /** Interval between transfers in (micro)frames. */ -+ uint16_t interval; -+ -+ /** -+ * (micro)frame to initialize a periodic transfer. The transfer -+ * executes in the following (micro)frame. -+ */ -+ uint16_t sched_frame; -+ -+ /** (micro)frame at which last start split was initialized. */ -+ uint16_t start_split_frame; -+ -+ /** @} */ -+ -+ /** Entry for QH in either the periodic or non-periodic schedule. */ -+ struct list_head qh_list_entry; -+ -+ /* For non-dword aligned buffer support */ -+ uint8_t *dw_align_buf; -+ dma_addr_t dw_align_buf_dma; -+} dwc_otg_qh_t; -+ -+/** -+ * This structure holds the state of the HCD, including the non-periodic and -+ * periodic schedules. -+ */ -+typedef struct dwc_otg_hcd { -+ /** The DWC otg device pointer */ -+ struct dwc_otg_device *otg_dev; -+ -+ /** DWC OTG Core Interface Layer */ -+ dwc_otg_core_if_t *core_if; -+ -+ /** Internal DWC HCD Flags */ -+ volatile union dwc_otg_hcd_internal_flags { -+ uint32_t d32; -+ struct { -+ unsigned port_connect_status_change : 1; -+ unsigned port_connect_status : 1; -+ unsigned port_reset_change : 1; -+ unsigned port_enable_change : 1; -+ unsigned port_suspend_change : 1; -+ unsigned port_over_current_change : 1; -+ unsigned reserved : 27; -+ } b; -+ } flags; -+ -+ /** -+ * Inactive items in the non-periodic schedule. This is a list of -+ * Queue Heads. Transfers associated with these Queue Heads are not -+ * currently assigned to a host channel. -+ */ -+ struct list_head non_periodic_sched_inactive; -+ -+ /** -+ * Active items in the non-periodic schedule. This is a list of -+ * Queue Heads. Transfers associated with these Queue Heads are -+ * currently assigned to a host channel. -+ */ -+ struct list_head non_periodic_sched_active; -+ -+ /** -+ * Pointer to the next Queue Head to process in the active -+ * non-periodic schedule. -+ */ -+ struct list_head *non_periodic_qh_ptr; -+ -+ /** -+ * Inactive items in the periodic schedule. This is a list of QHs for -+ * periodic transfers that are _not_ scheduled for the next frame. -+ * Each QH in the list has an interval counter that determines when it -+ * needs to be scheduled for execution. This scheduling mechanism -+ * allows only a simple calculation for periodic bandwidth used (i.e. -+ * must assume that all periodic transfers may need to execute in the -+ * same frame). However, it greatly simplifies scheduling and should -+ * be sufficient for the vast majority of OTG hosts, which need to -+ * connect to a small number of peripherals at one time. -+ * -+ * Items move from this list to periodic_sched_ready when the QH -+ * interval counter is 0 at SOF. -+ */ -+ struct list_head periodic_sched_inactive; -+ -+ /** -+ * List of periodic QHs that are ready for execution in the next -+ * frame, but have not yet been assigned to host channels. -+ * -+ * Items move from this list to periodic_sched_assigned as host -+ * channels become available during the current frame. -+ */ -+ struct list_head periodic_sched_ready; -+ -+ /** -+ * List of periodic QHs to be executed in the next frame that are -+ * assigned to host channels. -+ * -+ * Items move from this list to periodic_sched_queued as the -+ * transactions for the QH are queued to the DWC_otg controller. -+ */ -+ struct list_head periodic_sched_assigned; -+ -+ /** -+ * List of periodic QHs that have been queued for execution. -+ * -+ * Items move from this list to either periodic_sched_inactive or -+ * periodic_sched_ready when the channel associated with the transfer -+ * is released. If the interval for the QH is 1, the item moves to -+ * periodic_sched_ready because it must be rescheduled for the next -+ * frame. Otherwise, the item moves to periodic_sched_inactive. -+ */ -+ struct list_head periodic_sched_queued; -+ -+ /** -+ * Total bandwidth claimed so far for periodic transfers. This value -+ * is in microseconds per (micro)frame. The assumption is that all -+ * periodic transfers may occur in the same (micro)frame. -+ */ -+ uint16_t periodic_usecs; -+ -+ /** -+ * Frame number read from the core at SOF. The value ranges from 0 to -+ * DWC_HFNUM_MAX_FRNUM. -+ */ -+ uint16_t frame_number; -+ -+ /** -+ * Free host channels in the controller. This is a list of -+ * dwc_hc_t items. -+ */ -+ struct list_head free_hc_list; -+ -+ /** -+ * Number of host channels assigned to periodic transfers. Currently -+ * assuming that there is a dedicated host channel for each periodic -+ * transaction and at least one host channel available for -+ * non-periodic transactions. -+ */ -+ int periodic_channels; -+ -+ /** -+ * Number of host channels assigned to non-periodic transfers. -+ */ -+ int non_periodic_channels; -+ -+ /** -+ * Array of pointers to the host channel descriptors. Allows accessing -+ * a host channel descriptor given the host channel number. This is -+ * useful in interrupt handlers. -+ */ -+ dwc_hc_t *hc_ptr_array[MAX_EPS_CHANNELS]; -+ -+ /** -+ * Buffer to use for any data received during the status phase of a -+ * control transfer. Normally no data is transferred during the status -+ * phase. This buffer is used as a bit bucket. -+ */ -+ uint8_t *status_buf; -+ -+ /** -+ * DMA address for status_buf. -+ */ -+ dma_addr_t status_buf_dma; -+#define DWC_OTG_HCD_STATUS_BUF_SIZE 64 -+ -+ /** -+ * Structure to allow starting the HCD in a non-interrupt context -+ * during an OTG role change. -+ */ -+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) -+ struct work_struct start_work; -+#else -+ struct delayed_work start_work; -+#endif -+ -+ /** -+ * Connection timer. An OTG host must display a message if the device -+ * does not connect. Started when the VBus power is turned on via -+ * sysfs attribute "buspower". -+ */ -+ struct timer_list conn_timer; -+ -+ /* Tasket to do a reset */ -+ struct tasklet_struct *reset_tasklet; -+ -+ /* */ -+ spinlock_t lock; -+ -+#ifdef DEBUG -+ uint32_t frrem_samples; -+ uint64_t frrem_accum; -+ -+ uint32_t hfnum_7_samples_a; -+ uint64_t hfnum_7_frrem_accum_a; -+ uint32_t hfnum_0_samples_a; -+ uint64_t hfnum_0_frrem_accum_a; -+ uint32_t hfnum_other_samples_a; -+ uint64_t hfnum_other_frrem_accum_a; -+ -+ uint32_t hfnum_7_samples_b; -+ uint64_t hfnum_7_frrem_accum_b; -+ uint32_t hfnum_0_samples_b; -+ uint64_t hfnum_0_frrem_accum_b; -+ uint32_t hfnum_other_samples_b; -+ uint64_t hfnum_other_frrem_accum_b; -+#endif -+} dwc_otg_hcd_t; -+ -+/** Gets the dwc_otg_hcd from a struct usb_hcd */ -+static inline dwc_otg_hcd_t *hcd_to_dwc_otg_hcd(struct usb_hcd *hcd) -+{ -+ return (dwc_otg_hcd_t *)(hcd->hcd_priv); -+} -+ -+/** Gets the struct usb_hcd that contains a dwc_otg_hcd_t. */ -+static inline struct usb_hcd *dwc_otg_hcd_to_hcd(dwc_otg_hcd_t *dwc_otg_hcd) -+{ -+ return container_of((void *)dwc_otg_hcd, struct usb_hcd, hcd_priv); -+} -+ -+/** @name HCD Create/Destroy Functions */ -+/** @{ */ -+extern int dwc_otg_hcd_init(struct device *dev); -+extern void dwc_otg_hcd_remove(struct device *dev); -+/** @} */ -+ -+/** @name Linux HC Driver API Functions */ -+/** @{ */ -+ -+extern int dwc_otg_hcd_start(struct usb_hcd *hcd); -+extern void dwc_otg_hcd_stop(struct usb_hcd *hcd); -+extern int dwc_otg_hcd_get_frame_number(struct usb_hcd *hcd); -+extern void dwc_otg_hcd_free(struct usb_hcd *hcd); -+extern int dwc_otg_hcd_urb_enqueue(struct usb_hcd *hcd, -+ struct urb *urb, -+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) -+ int mem_flags -+#else -+ gfp_t mem_flags -+#endif -+ ); -+extern int dwc_otg_hcd_urb_dequeue(struct usb_hcd *hcd, -+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) -+#endif -+ struct urb *urb, int status); -+extern void dwc_otg_hcd_endpoint_disable(struct usb_hcd *hcd, -+ struct usb_host_endpoint *ep); -+extern irqreturn_t dwc_otg_hcd_irq(struct usb_hcd *hcd -+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) -+ , struct pt_regs *regs -+#endif -+ ); -+extern int dwc_otg_hcd_hub_status_data(struct usb_hcd *hcd, -+ char *buf); -+extern int dwc_otg_hcd_hub_control(struct usb_hcd *hcd, -+ u16 typeReq, -+ u16 wValue, -+ u16 wIndex, -+ char *buf, -+ u16 wLength); -+ -+/** @} */ -+ -+/** @name Transaction Execution Functions */ -+/** @{ */ -+extern dwc_otg_transaction_type_e dwc_otg_hcd_select_transactions(dwc_otg_hcd_t *hcd); -+extern void dwc_otg_hcd_queue_transactions(dwc_otg_hcd_t *hcd, -+ dwc_otg_transaction_type_e tr_type); -+extern void dwc_otg_hcd_complete_urb(dwc_otg_hcd_t *_hcd, struct urb *urb, -+ int status); -+/** @} */ -+ -+/** @name Interrupt Handler Functions */ -+/** @{ */ -+extern int32_t dwc_otg_hcd_handle_intr(dwc_otg_hcd_t *dwc_otg_hcd); -+extern int32_t dwc_otg_hcd_handle_sof_intr(dwc_otg_hcd_t *dwc_otg_hcd); -+extern int32_t dwc_otg_hcd_handle_rx_status_q_level_intr(dwc_otg_hcd_t *dwc_otg_hcd); -+extern int32_t dwc_otg_hcd_handle_np_tx_fifo_empty_intr(dwc_otg_hcd_t *dwc_otg_hcd); -+extern int32_t dwc_otg_hcd_handle_perio_tx_fifo_empty_intr(dwc_otg_hcd_t *dwc_otg_hcd); -+extern int32_t dwc_otg_hcd_handle_incomplete_periodic_intr(dwc_otg_hcd_t *dwc_otg_hcd); -+extern int32_t dwc_otg_hcd_handle_port_intr(dwc_otg_hcd_t *dwc_otg_hcd); -+extern int32_t dwc_otg_hcd_handle_conn_id_status_change_intr(dwc_otg_hcd_t *dwc_otg_hcd); -+extern int32_t dwc_otg_hcd_handle_disconnect_intr(dwc_otg_hcd_t *dwc_otg_hcd); -+extern int32_t dwc_otg_hcd_handle_hc_intr(dwc_otg_hcd_t *dwc_otg_hcd); -+extern int32_t dwc_otg_hcd_handle_hc_n_intr(dwc_otg_hcd_t *dwc_otg_hcd, uint32_t num); -+extern int32_t dwc_otg_hcd_handle_session_req_intr(dwc_otg_hcd_t *dwc_otg_hcd); -+extern int32_t dwc_otg_hcd_handle_wakeup_detected_intr(dwc_otg_hcd_t *dwc_otg_hcd); -+/** @} */ -+ -+ -+/** @name Schedule Queue Functions */ -+/** @{ */ -+ -+/* Implemented in dwc_otg_hcd_queue.c */ -+extern dwc_otg_qh_t *dwc_otg_hcd_qh_create(dwc_otg_hcd_t *hcd, struct urb *urb); -+extern void dwc_otg_hcd_qh_init(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh, struct urb *urb); -+extern void dwc_otg_hcd_qh_free(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh); -+extern int dwc_otg_hcd_qh_add(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh); -+extern void dwc_otg_hcd_qh_remove(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh); -+extern void dwc_otg_hcd_qh_deactivate(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh, int sched_csplit); -+ -+/** Remove and free a QH */ -+static inline void dwc_otg_hcd_qh_remove_and_free(dwc_otg_hcd_t *hcd, -+ dwc_otg_qh_t *qh) -+{ -+ dwc_otg_hcd_qh_remove(hcd, qh); -+ dwc_otg_hcd_qh_free(hcd, qh); -+} -+ -+/** Allocates memory for a QH structure. -+ * @return Returns the memory allocate or NULL on error. */ -+static inline dwc_otg_qh_t *dwc_otg_hcd_qh_alloc(void) -+{ -+ return (dwc_otg_qh_t *) kmalloc(sizeof(dwc_otg_qh_t), GFP_KERNEL); -+} -+ -+extern dwc_otg_qtd_t *dwc_otg_hcd_qtd_create(struct urb *urb); -+extern void dwc_otg_hcd_qtd_init(dwc_otg_qtd_t *qtd, struct urb *urb); -+extern int dwc_otg_hcd_qtd_add(dwc_otg_qtd_t *qtd, dwc_otg_hcd_t *dwc_otg_hcd); -+ -+/** Allocates memory for a QTD structure. -+ * @return Returns the memory allocate or NULL on error. */ -+static inline dwc_otg_qtd_t *dwc_otg_hcd_qtd_alloc(void) -+{ -+ return (dwc_otg_qtd_t *) kmalloc(sizeof(dwc_otg_qtd_t), GFP_KERNEL); -+} -+ -+/** Frees the memory for a QTD structure. QTD should already be removed from -+ * list. -+ * @param[in] qtd QTD to free.*/ -+static inline void dwc_otg_hcd_qtd_free(dwc_otg_qtd_t *qtd) -+{ -+ kfree(qtd); -+} -+ -+/** Removes a QTD from list. -+ * @param[in] hcd HCD instance. -+ * @param[in] qtd QTD to remove from list. */ -+static inline void dwc_otg_hcd_qtd_remove(dwc_otg_hcd_t *hcd, dwc_otg_qtd_t *qtd) -+{ -+ unsigned long flags; -+ SPIN_LOCK_IRQSAVE(&hcd->lock, flags); -+ list_del(&qtd->qtd_list_entry); -+ SPIN_UNLOCK_IRQRESTORE(&hcd->lock, flags); -+} -+ -+/** Remove and free a QTD */ -+static inline void dwc_otg_hcd_qtd_remove_and_free(dwc_otg_hcd_t *hcd, dwc_otg_qtd_t *qtd) -+{ -+ dwc_otg_hcd_qtd_remove(hcd, qtd); -+ dwc_otg_hcd_qtd_free(qtd); -+} -+ -+/** @} */ -+ -+ -+/** @name Internal Functions */ -+/** @{ */ -+dwc_otg_qh_t *dwc_urb_to_qh(struct urb *urb); -+void dwc_otg_hcd_dump_frrem(dwc_otg_hcd_t *hcd); -+void dwc_otg_hcd_dump_state(dwc_otg_hcd_t *hcd); -+/** @} */ -+ -+/** Gets the usb_host_endpoint associated with an URB. */ -+static inline struct usb_host_endpoint *dwc_urb_to_endpoint(struct urb *urb) -+{ -+ struct usb_device *dev = urb->dev; -+ int ep_num = usb_pipeendpoint(urb->pipe); -+ -+ if (usb_pipein(urb->pipe)) -+ return dev->ep_in[ep_num]; -+ else -+ return dev->ep_out[ep_num]; -+} -+ -+/** -+ * Gets the endpoint number from a _bEndpointAddress argument. The endpoint is -+ * qualified with its direction (possible 32 endpoints per device). -+ */ -+#define dwc_ep_addr_to_endpoint(_bEndpointAddress_) ((_bEndpointAddress_ & USB_ENDPOINT_NUMBER_MASK) | \ -+ ((_bEndpointAddress_ & USB_DIR_IN) != 0) << 4) -+ -+/** Gets the QH that contains the list_head */ -+#define dwc_list_to_qh(_list_head_ptr_) container_of(_list_head_ptr_, dwc_otg_qh_t, qh_list_entry) -+ -+/** Gets the QTD that contains the list_head */ -+#define dwc_list_to_qtd(_list_head_ptr_) container_of(_list_head_ptr_, dwc_otg_qtd_t, qtd_list_entry) -+ -+/** Check if QH is non-periodic */ -+#define dwc_qh_is_non_per(_qh_ptr_) ((_qh_ptr_->ep_type == USB_ENDPOINT_XFER_BULK) || \ -+ (_qh_ptr_->ep_type == USB_ENDPOINT_XFER_CONTROL)) -+ -+/** High bandwidth multiplier as encoded in highspeed endpoint descriptors */ -+#define dwc_hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03)) -+ -+/** Packet size for any kind of endpoint descriptor */ -+#define dwc_max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff) -+ -+/** -+ * Returns true if _frame1 is less than or equal to _frame2. The comparison is -+ * done modulo DWC_HFNUM_MAX_FRNUM. This accounts for the rollover of the -+ * frame number when the max frame number is reached. -+ */ -+static inline int dwc_frame_num_le(uint16_t frame1, uint16_t frame2) -+{ -+ return ((frame2 - frame1) & DWC_HFNUM_MAX_FRNUM) <= -+ (DWC_HFNUM_MAX_FRNUM >> 1); -+} -+ -+/** -+ * Returns true if _frame1 is greater than _frame2. The comparison is done -+ * modulo DWC_HFNUM_MAX_FRNUM. This accounts for the rollover of the frame -+ * number when the max frame number is reached. -+ */ -+static inline int dwc_frame_num_gt(uint16_t frame1, uint16_t frame2) -+{ -+ return (frame1 != frame2) && -+ (((frame1 - frame2) & DWC_HFNUM_MAX_FRNUM) < -+ (DWC_HFNUM_MAX_FRNUM >> 1)); -+} -+ -+/** -+ * Increments _frame by the amount specified by _inc. The addition is done -+ * modulo DWC_HFNUM_MAX_FRNUM. Returns the incremented value. -+ */ -+static inline uint16_t dwc_frame_num_inc(uint16_t frame, uint16_t inc) -+{ -+ return (frame + inc) & DWC_HFNUM_MAX_FRNUM; -+} -+ -+static inline uint16_t dwc_full_frame_num(uint16_t frame) -+{ -+ return (frame & DWC_HFNUM_MAX_FRNUM) >> 3; -+} -+ -+static inline uint16_t dwc_micro_frame_num(uint16_t frame) -+{ -+ return frame & 0x7; -+} -+ -+#ifdef DEBUG -+/** -+ * Macro to sample the remaining PHY clocks left in the current frame. This -+ * may be used during debugging to determine the average time it takes to -+ * execute sections of code. There are two possible sample points, "a" and -+ * "b", so the _letter argument must be one of these values. -+ * -+ * To dump the average sample times, read the "hcd_frrem" sysfs attribute. For -+ * example, "cat /sys/devices/lm0/hcd_frrem". -+ */ -+#define dwc_sample_frrem(_hcd, _qh, _letter) \ -+{ \ -+ hfnum_data_t hfnum; \ -+ dwc_otg_qtd_t *qtd; \ -+ qtd = list_entry(_qh->qtd_list.next, dwc_otg_qtd_t, qtd_list_entry); \ -+ if (usb_pipeint(qtd->urb->pipe) && _qh->start_split_frame != 0 && !qtd->complete_split) { \ -+ hfnum.d32 = dwc_read_reg32(&_hcd->core_if->host_if->host_global_regs->hfnum); \ -+ switch (hfnum.b.frnum & 0x7) { \ -+ case 7: \ -+ _hcd->hfnum_7_samples_##_letter++; \ -+ _hcd->hfnum_7_frrem_accum_##_letter += hfnum.b.frrem; \ -+ break; \ -+ case 0: \ -+ _hcd->hfnum_0_samples_##_letter++; \ -+ _hcd->hfnum_0_frrem_accum_##_letter += hfnum.b.frrem; \ -+ break; \ -+ default: \ -+ _hcd->hfnum_other_samples_##_letter++; \ -+ _hcd->hfnum_other_frrem_accum_##_letter += hfnum.b.frrem; \ -+ break; \ -+ } \ -+ } \ -+} -+#else -+#define dwc_sample_frrem(_hcd, _qh, _letter) -+#endif -+#endif -+#endif /* DWC_DEVICE_ONLY */ ---- /dev/null -+++ b/drivers/usb/dwc_otg/dwc_otg_hcd_intr.c -@@ -0,0 +1,1873 @@ -+/* ========================================================================== -+ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_hcd_intr.c $ -+ * $Revision: 1.6.2.1 $ -+ * $Date: 2009-04-22 03:48:22 $ -+ * $Change: 1117667 $ -+ * -+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, -+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless -+ * otherwise expressly agreed to in writing between Synopsys and you. -+ * -+ * The Software IS NOT an item of Licensed Software or Licensed Product under -+ * any End User Software License Agreement or Agreement for Licensed Product -+ * with Synopsys or any supplement thereto. You are permitted to use and -+ * redistribute this Software in source and binary forms, with or without -+ * modification, provided that redistributions of source code must retain this -+ * notice. You may not view, use, disclose, copy or distribute this file or -+ * any information contained herein except pursuant to this license grant from -+ * Synopsys. If you do not agree with this notice, including the disclaimer -+ * below, then you are not authorized to use the Software. -+ * -+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, -+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY -+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH -+ * DAMAGE. -+ * ========================================================================== */ -+#ifndef DWC_DEVICE_ONLY -+ -+#include -+ -+#include "dwc_otg_driver.h" -+#include "dwc_otg_hcd.h" -+#include "dwc_otg_regs.h" -+ -+/** @file -+ * This file contains the implementation of the HCD Interrupt handlers. -+ */ -+ -+/** This function handles interrupts for the HCD. */ -+int32_t dwc_otg_hcd_handle_intr(dwc_otg_hcd_t *dwc_otg_hcd) -+{ -+ int retval = 0; -+ -+ dwc_otg_core_if_t *core_if = dwc_otg_hcd->core_if; -+ gintsts_data_t gintsts; -+#ifdef DEBUG -+ dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs; -+#endif -+ -+ /* Check if HOST Mode */ -+ if (dwc_otg_is_host_mode(core_if)) { -+ gintsts.d32 = dwc_otg_read_core_intr(core_if); -+ if (!gintsts.d32) { -+ return 0; -+ } -+ -+#ifdef DEBUG -+ /* Don't print debug message in the interrupt handler on SOF */ -+# ifndef DEBUG_SOF -+ if (gintsts.d32 != DWC_SOF_INTR_MASK) -+# endif -+ DWC_DEBUGPL(DBG_HCD, "\n"); -+#endif -+ -+#ifdef DEBUG -+# ifndef DEBUG_SOF -+ if (gintsts.d32 != DWC_SOF_INTR_MASK) -+# endif -+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD Interrupt Detected gintsts&gintmsk=0x%08x\n", gintsts.d32); -+#endif -+ if (gintsts.b.usbreset) { -+ DWC_PRINT("Usb Reset In Host Mode\n"); -+ } -+ -+ -+ if (gintsts.b.sofintr) { -+ retval |= dwc_otg_hcd_handle_sof_intr(dwc_otg_hcd); -+ } -+ if (gintsts.b.rxstsqlvl) { -+ retval |= dwc_otg_hcd_handle_rx_status_q_level_intr(dwc_otg_hcd); -+ } -+ if (gintsts.b.nptxfempty) { -+ retval |= dwc_otg_hcd_handle_np_tx_fifo_empty_intr(dwc_otg_hcd); -+ } -+ if (gintsts.b.i2cintr) { -+ /** @todo Implement i2cintr handler. */ -+ } -+ if (gintsts.b.portintr) { -+ retval |= dwc_otg_hcd_handle_port_intr(dwc_otg_hcd); -+ } -+ if (gintsts.b.hcintr) { -+ retval |= dwc_otg_hcd_handle_hc_intr(dwc_otg_hcd); -+ } -+ if (gintsts.b.ptxfempty) { -+ retval |= dwc_otg_hcd_handle_perio_tx_fifo_empty_intr(dwc_otg_hcd); -+ } -+#ifdef DEBUG -+# ifndef DEBUG_SOF -+ if (gintsts.d32 != DWC_SOF_INTR_MASK) -+# endif -+ { -+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD Finished Servicing Interrupts\n"); -+ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD gintsts=0x%08x\n", -+ dwc_read_reg32(&global_regs->gintsts)); -+ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD gintmsk=0x%08x\n", -+ dwc_read_reg32(&global_regs->gintmsk)); -+ } -+#endif -+ -+#ifdef DEBUG -+# ifndef DEBUG_SOF -+ if (gintsts.d32 != DWC_SOF_INTR_MASK) -+# endif -+ DWC_DEBUGPL(DBG_HCD, "\n"); -+#endif -+ -+ } -+ -+ S3C2410X_CLEAR_EINTPEND(); -+ -+ return retval; -+} -+ -+#ifdef DWC_TRACK_MISSED_SOFS -+#warning Compiling code to track missed SOFs -+#define FRAME_NUM_ARRAY_SIZE 1000 -+/** -+ * This function is for debug only. -+ */ -+static inline void track_missed_sofs(uint16_t curr_frame_number) -+{ -+ static uint16_t frame_num_array[FRAME_NUM_ARRAY_SIZE]; -+ static uint16_t last_frame_num_array[FRAME_NUM_ARRAY_SIZE]; -+ static int frame_num_idx = 0; -+ static uint16_t last_frame_num = DWC_HFNUM_MAX_FRNUM; -+ static int dumped_frame_num_array = 0; -+ -+ if (frame_num_idx < FRAME_NUM_ARRAY_SIZE) { -+ if (((last_frame_num + 1) & DWC_HFNUM_MAX_FRNUM) != curr_frame_number) { -+ frame_num_array[frame_num_idx] = curr_frame_number; -+ last_frame_num_array[frame_num_idx++] = last_frame_num; -+ } -+ } else if (!dumped_frame_num_array) { -+ int i; -+ printk(KERN_EMERG USB_DWC "Frame Last Frame\n"); -+ printk(KERN_EMERG USB_DWC "----- ----------\n"); -+ for (i = 0; i < FRAME_NUM_ARRAY_SIZE; i++) { -+ printk(KERN_EMERG USB_DWC "0x%04x 0x%04x\n", -+ frame_num_array[i], last_frame_num_array[i]); -+ } -+ dumped_frame_num_array = 1; -+ } -+ last_frame_num = curr_frame_number; -+} -+#endif -+ -+/** -+ * Handles the start-of-frame interrupt in host mode. Non-periodic -+ * transactions may be queued to the DWC_otg controller for the current -+ * (micro)frame. Periodic transactions may be queued to the controller for the -+ * next (micro)frame. -+ */ -+int32_t dwc_otg_hcd_handle_sof_intr(dwc_otg_hcd_t *hcd) -+{ -+ hfnum_data_t hfnum; -+ struct list_head *qh_entry; -+ dwc_otg_qh_t *qh; -+ dwc_otg_transaction_type_e tr_type; -+ gintsts_data_t gintsts = {.d32 = 0}; -+ -+ hfnum.d32 = dwc_read_reg32(&hcd->core_if->host_if->host_global_regs->hfnum); -+ -+#ifdef DEBUG_SOF -+ DWC_DEBUGPL(DBG_HCD, "--Start of Frame Interrupt--\n"); -+#endif -+ hcd->frame_number = hfnum.b.frnum; -+ -+#ifdef DEBUG -+ hcd->frrem_accum += hfnum.b.frrem; -+ hcd->frrem_samples++; -+#endif -+ -+#ifdef DWC_TRACK_MISSED_SOFS -+ track_missed_sofs(hcd->frame_number); -+#endif -+ -+ /* Determine whether any periodic QHs should be executed. */ -+ qh_entry = hcd->periodic_sched_inactive.next; -+ while (qh_entry != &hcd->periodic_sched_inactive) { -+ qh = list_entry(qh_entry, dwc_otg_qh_t, qh_list_entry); -+ qh_entry = qh_entry->next; -+ if (dwc_frame_num_le(qh->sched_frame, hcd->frame_number)) { -+ /* -+ * Move QH to the ready list to be executed next -+ * (micro)frame. -+ */ -+ list_move(&qh->qh_list_entry, &hcd->periodic_sched_ready); -+ } -+ } -+ -+ tr_type = dwc_otg_hcd_select_transactions(hcd); -+ if (tr_type != DWC_OTG_TRANSACTION_NONE) { -+ dwc_otg_hcd_queue_transactions(hcd, tr_type); -+ } -+ -+ /* Clear interrupt */ -+ gintsts.b.sofintr = 1; -+ dwc_write_reg32(&hcd->core_if->core_global_regs->gintsts, gintsts.d32); -+ -+ return 1; -+} -+ -+/** Handles the Rx Status Queue Level Interrupt, which indicates that there is at -+ * least one packet in the Rx FIFO. The packets are moved from the FIFO to -+ * memory if the DWC_otg controller is operating in Slave mode. */ -+int32_t dwc_otg_hcd_handle_rx_status_q_level_intr(dwc_otg_hcd_t *dwc_otg_hcd) -+{ -+ host_grxsts_data_t grxsts; -+ dwc_hc_t *hc = NULL; -+ -+ DWC_DEBUGPL(DBG_HCD, "--RxStsQ Level Interrupt--\n"); -+ -+ grxsts.d32 = dwc_read_reg32(&dwc_otg_hcd->core_if->core_global_regs->grxstsp); -+ -+ hc = dwc_otg_hcd->hc_ptr_array[grxsts.b.chnum]; -+ -+ /* Packet Status */ -+ DWC_DEBUGPL(DBG_HCDV, " Ch num = %d\n", grxsts.b.chnum); -+ DWC_DEBUGPL(DBG_HCDV, " Count = %d\n", grxsts.b.bcnt); -+ DWC_DEBUGPL(DBG_HCDV, " DPID = %d, hc.dpid = %d\n", grxsts.b.dpid, hc->data_pid_start); -+ DWC_DEBUGPL(DBG_HCDV, " PStatus = %d\n", grxsts.b.pktsts); -+ -+ switch (grxsts.b.pktsts) { -+ case DWC_GRXSTS_PKTSTS_IN: -+ /* Read the data into the host buffer. */ -+ if (grxsts.b.bcnt > 0) { -+ dwc_otg_read_packet(dwc_otg_hcd->core_if, -+ hc->xfer_buff, -+ grxsts.b.bcnt); -+ -+ /* Update the HC fields for the next packet received. */ -+ hc->xfer_count += grxsts.b.bcnt; -+ hc->xfer_buff += grxsts.b.bcnt; -+ } -+ -+ case DWC_GRXSTS_PKTSTS_IN_XFER_COMP: -+ case DWC_GRXSTS_PKTSTS_DATA_TOGGLE_ERR: -+ case DWC_GRXSTS_PKTSTS_CH_HALTED: -+ /* Handled in interrupt, just ignore data */ -+ break; -+ default: -+ DWC_ERROR("RX_STS_Q Interrupt: Unknown status %d\n", grxsts.b.pktsts); -+ break; -+ } -+ -+ return 1; -+} -+ -+/** This interrupt occurs when the non-periodic Tx FIFO is half-empty. More -+ * data packets may be written to the FIFO for OUT transfers. More requests -+ * may be written to the non-periodic request queue for IN transfers. This -+ * interrupt is enabled only in Slave mode. */ -+int32_t dwc_otg_hcd_handle_np_tx_fifo_empty_intr(dwc_otg_hcd_t *dwc_otg_hcd) -+{ -+ DWC_DEBUGPL(DBG_HCD, "--Non-Periodic TxFIFO Empty Interrupt--\n"); -+ dwc_otg_hcd_queue_transactions(dwc_otg_hcd, -+ DWC_OTG_TRANSACTION_NON_PERIODIC); -+ return 1; -+} -+ -+/** This interrupt occurs when the periodic Tx FIFO is half-empty. More data -+ * packets may be written to the FIFO for OUT transfers. More requests may be -+ * written to the periodic request queue for IN transfers. This interrupt is -+ * enabled only in Slave mode. */ -+int32_t dwc_otg_hcd_handle_perio_tx_fifo_empty_intr(dwc_otg_hcd_t *dwc_otg_hcd) -+{ -+ DWC_DEBUGPL(DBG_HCD, "--Periodic TxFIFO Empty Interrupt--\n"); -+ dwc_otg_hcd_queue_transactions(dwc_otg_hcd, -+ DWC_OTG_TRANSACTION_PERIODIC); -+ return 1; -+} -+ -+/** There are multiple conditions that can cause a port interrupt. This function -+ * determines which interrupt conditions have occurred and handles them -+ * appropriately. */ -+int32_t dwc_otg_hcd_handle_port_intr(dwc_otg_hcd_t *dwc_otg_hcd) -+{ -+ int retval = 0; -+ hprt0_data_t hprt0; -+ hprt0_data_t hprt0_modify; -+ -+ hprt0.d32 = dwc_read_reg32(dwc_otg_hcd->core_if->host_if->hprt0); -+ hprt0_modify.d32 = dwc_read_reg32(dwc_otg_hcd->core_if->host_if->hprt0); -+ -+ /* Clear appropriate bits in HPRT0 to clear the interrupt bit in -+ * GINTSTS */ -+ -+ hprt0_modify.b.prtena = 0; -+ hprt0_modify.b.prtconndet = 0; -+ hprt0_modify.b.prtenchng = 0; -+ hprt0_modify.b.prtovrcurrchng = 0; -+ -+ /* Port Connect Detected -+ * Set flag and clear if detected */ -+ if (hprt0.b.prtconndet) { -+ DWC_DEBUGPL(DBG_HCD, "--Port Interrupt HPRT0=0x%08x " -+ "Port Connect Detected--\n", hprt0.d32); -+ dwc_otg_hcd->flags.b.port_connect_status_change = 1; -+ dwc_otg_hcd->flags.b.port_connect_status = 1; -+ hprt0_modify.b.prtconndet = 1; -+ -+ /* B-Device has connected, Delete the connection timer. */ -+ del_timer( &dwc_otg_hcd->conn_timer ); -+ -+ /* The Hub driver asserts a reset when it sees port connect -+ * status change flag */ -+ retval |= 1; -+ } -+ -+ /* Port Enable Changed -+ * Clear if detected - Set internal flag if disabled */ -+ if (hprt0.b.prtenchng) { -+ DWC_DEBUGPL(DBG_HCD, " --Port Interrupt HPRT0=0x%08x " -+ "Port Enable Changed--\n", hprt0.d32); -+ hprt0_modify.b.prtenchng = 1; -+ if (hprt0.b.prtena == 1) { -+ int do_reset = 0; -+ dwc_otg_core_params_t *params = dwc_otg_hcd->core_if->core_params; -+ dwc_otg_core_global_regs_t *global_regs = dwc_otg_hcd->core_if->core_global_regs; -+ dwc_otg_host_if_t *host_if = dwc_otg_hcd->core_if->host_if; -+ -+ /* Check if we need to adjust the PHY clock speed for -+ * low power and adjust it */ -+ if (params->host_support_fs_ls_low_power) { -+ gusbcfg_data_t usbcfg; -+ -+ usbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg); -+ -+ if (hprt0.b.prtspd == DWC_HPRT0_PRTSPD_LOW_SPEED || -+ hprt0.b.prtspd == DWC_HPRT0_PRTSPD_FULL_SPEED) { -+ /* -+ * Low power -+ */ -+ hcfg_data_t hcfg; -+ if (usbcfg.b.phylpwrclksel == 0) { -+ /* Set PHY low power clock select for FS/LS devices */ -+ usbcfg.b.phylpwrclksel = 1; -+ dwc_write_reg32(&global_regs->gusbcfg, usbcfg.d32); -+ do_reset = 1; -+ } -+ -+ hcfg.d32 = dwc_read_reg32(&host_if->host_global_regs->hcfg); -+ -+ if (hprt0.b.prtspd == DWC_HPRT0_PRTSPD_LOW_SPEED && -+ params->host_ls_low_power_phy_clk == -+ DWC_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ) { -+ /* 6 MHZ */ -+ DWC_DEBUGPL(DBG_CIL, "FS_PHY programming HCFG to 6 MHz (Low Power)\n"); -+ if (hcfg.b.fslspclksel != DWC_HCFG_6_MHZ) { -+ hcfg.b.fslspclksel = DWC_HCFG_6_MHZ; -+ dwc_write_reg32(&host_if->host_global_regs->hcfg, -+ hcfg.d32); -+ do_reset = 1; -+ } -+ } else { -+ /* 48 MHZ */ -+ DWC_DEBUGPL(DBG_CIL, "FS_PHY programming HCFG to 48 MHz ()\n"); -+ if (hcfg.b.fslspclksel != DWC_HCFG_48_MHZ) { -+ hcfg.b.fslspclksel = DWC_HCFG_48_MHZ; -+ dwc_write_reg32(&host_if->host_global_regs->hcfg, -+ hcfg.d32); -+ do_reset = 1; -+ } -+ } -+ } else { -+ /* -+ * Not low power -+ */ -+ if (usbcfg.b.phylpwrclksel == 1) { -+ usbcfg.b.phylpwrclksel = 0; -+ dwc_write_reg32(&global_regs->gusbcfg, usbcfg.d32); -+ do_reset = 1; -+ } -+ } -+ -+ if (do_reset) { -+ tasklet_schedule(dwc_otg_hcd->reset_tasklet); -+ } -+ } -+ -+ if (!do_reset) { -+ /* Port has been enabled set the reset change flag */ -+ dwc_otg_hcd->flags.b.port_reset_change = 1; -+ } -+ } else { -+ dwc_otg_hcd->flags.b.port_enable_change = 1; -+ } -+ retval |= 1; -+ } -+ -+ /** Overcurrent Change Interrupt */ -+ if (hprt0.b.prtovrcurrchng) { -+ DWC_DEBUGPL(DBG_HCD, " --Port Interrupt HPRT0=0x%08x " -+ "Port Overcurrent Changed--\n", hprt0.d32); -+ dwc_otg_hcd->flags.b.port_over_current_change = 1; -+ hprt0_modify.b.prtovrcurrchng = 1; -+ retval |= 1; -+ } -+ -+ /* Clear Port Interrupts */ -+ dwc_write_reg32(dwc_otg_hcd->core_if->host_if->hprt0, hprt0_modify.d32); -+ -+ return retval; -+} -+ -+/** This interrupt indicates that one or more host channels has a pending -+ * interrupt. There are multiple conditions that can cause each host channel -+ * interrupt. This function determines which conditions have occurred for each -+ * host channel interrupt and handles them appropriately. */ -+int32_t dwc_otg_hcd_handle_hc_intr(dwc_otg_hcd_t *dwc_otg_hcd) -+{ -+ int i; -+ int retval = 0; -+ haint_data_t haint; -+ -+ /* Clear appropriate bits in HCINTn to clear the interrupt bit in -+ * GINTSTS */ -+ -+ haint.d32 = dwc_otg_read_host_all_channels_intr(dwc_otg_hcd->core_if); -+ -+ for (i = 0; i < dwc_otg_hcd->core_if->core_params->host_channels; i++) { -+ if (haint.b2.chint & (1 << i)) { -+ retval |= dwc_otg_hcd_handle_hc_n_intr(dwc_otg_hcd, i); -+ } -+ } -+ -+ return retval; -+} -+ -+/* Macro used to clear one channel interrupt */ -+#define clear_hc_int(_hc_regs_, _intr_) \ -+do { \ -+ hcint_data_t hcint_clear = {.d32 = 0}; \ -+ hcint_clear.b._intr_ = 1; \ -+ dwc_write_reg32(&(_hc_regs_)->hcint, hcint_clear.d32); \ -+} while (0) -+ -+/* -+ * Macro used to disable one channel interrupt. Channel interrupts are -+ * disabled when the channel is halted or released by the interrupt handler. -+ * There is no need to handle further interrupts of that type until the -+ * channel is re-assigned. In fact, subsequent handling may cause crashes -+ * because the channel structures are cleaned up when the channel is released. -+ */ -+#define disable_hc_int(_hc_regs_, _intr_) \ -+do { \ -+ hcintmsk_data_t hcintmsk = {.d32 = 0}; \ -+ hcintmsk.b._intr_ = 1; \ -+ dwc_modify_reg32(&(_hc_regs_)->hcintmsk, hcintmsk.d32, 0); \ -+} while (0) -+ -+/** -+ * Gets the actual length of a transfer after the transfer halts. _halt_status -+ * holds the reason for the halt. -+ * -+ * For IN transfers where halt_status is DWC_OTG_HC_XFER_COMPLETE, -+ * *short_read is set to 1 upon return if less than the requested -+ * number of bytes were transferred. Otherwise, *short_read is set to 0 upon -+ * return. short_read may also be NULL on entry, in which case it remains -+ * unchanged. -+ */ -+static uint32_t get_actual_xfer_length(dwc_hc_t *hc, -+ dwc_otg_hc_regs_t *hc_regs, -+ dwc_otg_qtd_t *qtd, -+ dwc_otg_halt_status_e halt_status, -+ int *short_read) -+{ -+ hctsiz_data_t hctsiz; -+ uint32_t length; -+ -+ if (short_read != NULL) { -+ *short_read = 0; -+ } -+ hctsiz.d32 = dwc_read_reg32(&hc_regs->hctsiz); -+ -+ if (halt_status == DWC_OTG_HC_XFER_COMPLETE) { -+ if (hc->ep_is_in) { -+ length = hc->xfer_len - hctsiz.b.xfersize; -+ if (short_read != NULL) { -+ *short_read = (hctsiz.b.xfersize != 0); -+ } -+ } else if (hc->qh->do_split) { -+ length = qtd->ssplit_out_xfer_count; -+ } else { -+ length = hc->xfer_len; -+ } -+ } else { -+ /* -+ * Must use the hctsiz.pktcnt field to determine how much data -+ * has been transferred. This field reflects the number of -+ * packets that have been transferred via the USB. This is -+ * always an integral number of packets if the transfer was -+ * halted before its normal completion. (Can't use the -+ * hctsiz.xfersize field because that reflects the number of -+ * bytes transferred via the AHB, not the USB). -+ */ -+ length = (hc->start_pkt_count - hctsiz.b.pktcnt) * hc->max_packet; -+ } -+ -+ return length; -+} -+ -+/** -+ * Updates the state of the URB after a Transfer Complete interrupt on the -+ * host channel. Updates the actual_length field of the URB based on the -+ * number of bytes transferred via the host channel. Sets the URB status -+ * if the data transfer is finished. -+ * -+ * @return 1 if the data transfer specified by the URB is completely finished, -+ * 0 otherwise. -+ */ -+static int update_urb_state_xfer_comp(dwc_hc_t *hc, -+ dwc_otg_hc_regs_t *hc_regs, -+ struct urb *urb, -+ dwc_otg_qtd_t *qtd) -+{ -+ int xfer_done = 0; -+ int short_read = 0; -+ int overflow_read=0; -+ uint32_t len = 0; -+ int max_packet; -+ -+ len = get_actual_xfer_length(hc, hc_regs, qtd, -+ DWC_OTG_HC_XFER_COMPLETE, -+ &short_read); -+ -+ /* Data overflow case: by Steven */ -+ if (len > urb->transfer_buffer_length) { -+ len = urb->transfer_buffer_length; -+ overflow_read = 1; -+ } -+ -+ /* non DWORD-aligned buffer case handling. */ -+ if (((uint32_t)hc->xfer_buff & 0x3) && len && hc->qh->dw_align_buf && hc->ep_is_in) { -+ memcpy(urb->transfer_buffer + urb->actual_length, hc->qh->dw_align_buf, len); -+ } -+ urb->actual_length +=len; -+ -+ max_packet = usb_maxpacket(urb->dev, urb->pipe, !usb_pipein(urb->pipe)); -+ if((len) && usb_pipebulk(urb->pipe) && -+ (urb->transfer_flags & URB_ZERO_PACKET) && -+ (urb->actual_length == urb->transfer_buffer_length) && -+ (!(urb->transfer_buffer_length % max_packet))) { -+ } else if (short_read || urb->actual_length == urb->transfer_buffer_length) { -+ xfer_done = 1; -+ if (short_read && (urb->transfer_flags & URB_SHORT_NOT_OK)) { -+ urb->status = -EREMOTEIO; -+ } else if (overflow_read) { -+ urb->status = -EOVERFLOW; -+ } else { -+ urb->status = 0; -+ } -+ } -+ -+#ifdef DEBUG -+ { -+ hctsiz_data_t hctsiz; -+ hctsiz.d32 = dwc_read_reg32(&hc_regs->hctsiz); -+ DWC_DEBUGPL(DBG_HCDV, "DWC_otg: %s: %s, channel %d\n", -+ __func__, (hc->ep_is_in ? "IN" : "OUT"), hc->hc_num); -+ DWC_DEBUGPL(DBG_HCDV, " hc->xfer_len %d\n", hc->xfer_len); -+ DWC_DEBUGPL(DBG_HCDV, " hctsiz.xfersize %d\n", hctsiz.b.xfersize); -+ DWC_DEBUGPL(DBG_HCDV, " urb->transfer_buffer_length %d\n", -+ urb->transfer_buffer_length); -+ DWC_DEBUGPL(DBG_HCDV, " urb->actual_length %d\n", urb->actual_length); -+ DWC_DEBUGPL(DBG_HCDV, " short_read %d, xfer_done %d\n", -+ short_read, xfer_done); -+ } -+#endif -+ -+ return xfer_done; -+} -+ -+/* -+ * Save the starting data toggle for the next transfer. The data toggle is -+ * saved in the QH for non-control transfers and it's saved in the QTD for -+ * control transfers. -+ */ -+static void save_data_toggle(dwc_hc_t *hc, -+ dwc_otg_hc_regs_t *hc_regs, -+ dwc_otg_qtd_t *qtd) -+{ -+ hctsiz_data_t hctsiz; -+ hctsiz.d32 = dwc_read_reg32(&hc_regs->hctsiz); -+ -+ if (hc->ep_type != DWC_OTG_EP_TYPE_CONTROL) { -+ dwc_otg_qh_t *qh = hc->qh; -+ if (hctsiz.b.pid == DWC_HCTSIZ_DATA0) { -+ qh->data_toggle = DWC_OTG_HC_PID_DATA0; -+ } else { -+ qh->data_toggle = DWC_OTG_HC_PID_DATA1; -+ } -+ } else { -+ if (hctsiz.b.pid == DWC_HCTSIZ_DATA0) { -+ qtd->data_toggle = DWC_OTG_HC_PID_DATA0; -+ } else { -+ qtd->data_toggle = DWC_OTG_HC_PID_DATA1; -+ } -+ } -+} -+ -+/** -+ * Frees the first QTD in the QH's list if free_qtd is 1. For non-periodic -+ * QHs, removes the QH from the active non-periodic schedule. If any QTDs are -+ * still linked to the QH, the QH is added to the end of the inactive -+ * non-periodic schedule. For periodic QHs, removes the QH from the periodic -+ * schedule if no more QTDs are linked to the QH. -+ */ -+static void deactivate_qh(dwc_otg_hcd_t *hcd, -+ dwc_otg_qh_t *qh, -+ int free_qtd) -+{ -+ int continue_split = 0; -+ dwc_otg_qtd_t *qtd; -+ -+ DWC_DEBUGPL(DBG_HCDV, " %s(%p,%p,%d)\n", __func__, hcd, qh, free_qtd); -+ -+ qtd = list_entry(qh->qtd_list.next, dwc_otg_qtd_t, qtd_list_entry); -+ -+ if (qtd->complete_split) { -+ continue_split = 1; -+ } else if (qtd->isoc_split_pos == DWC_HCSPLIT_XACTPOS_MID || -+ qtd->isoc_split_pos == DWC_HCSPLIT_XACTPOS_END) { -+ continue_split = 1; -+ } -+ -+ if (free_qtd) { -+ dwc_otg_hcd_qtd_remove_and_free(hcd, qtd); -+ continue_split = 0; -+ } -+ -+ qh->channel = NULL; -+ qh->qtd_in_process = NULL; -+ dwc_otg_hcd_qh_deactivate(hcd, qh, continue_split); -+} -+ -+/** -+ * Updates the state of an Isochronous URB when the transfer is stopped for -+ * any reason. The fields of the current entry in the frame descriptor array -+ * are set based on the transfer state and the input _halt_status. Completes -+ * the Isochronous URB if all the URB frames have been completed. -+ * -+ * @return DWC_OTG_HC_XFER_COMPLETE if there are more frames remaining to be -+ * transferred in the URB. Otherwise return DWC_OTG_HC_XFER_URB_COMPLETE. -+ */ -+static dwc_otg_halt_status_e -+update_isoc_urb_state(dwc_otg_hcd_t *hcd, -+ dwc_hc_t *hc, -+ dwc_otg_hc_regs_t *hc_regs, -+ dwc_otg_qtd_t *qtd, -+ dwc_otg_halt_status_e halt_status) -+{ -+ struct urb *urb = qtd->urb; -+ dwc_otg_halt_status_e ret_val = halt_status; -+ struct usb_iso_packet_descriptor *frame_desc; -+ -+ frame_desc = &urb->iso_frame_desc[qtd->isoc_frame_index]; -+ switch (halt_status) { -+ case DWC_OTG_HC_XFER_COMPLETE: -+ frame_desc->status = 0; -+ frame_desc->actual_length = -+ get_actual_xfer_length(hc, hc_regs, qtd, -+ halt_status, NULL); -+ -+ /* non DWORD-aligned buffer case handling. */ -+ if (frame_desc->actual_length && ((uint32_t)hc->xfer_buff & 0x3) && -+ hc->qh->dw_align_buf && hc->ep_is_in) { -+ memcpy(urb->transfer_buffer + frame_desc->offset + qtd->isoc_split_offset, -+ hc->qh->dw_align_buf, frame_desc->actual_length); -+ -+ } -+ -+ break; -+ case DWC_OTG_HC_XFER_FRAME_OVERRUN: -+ printk("DWC_OTG_HC_XFER_FRAME_OVERRUN: %d\n", halt_status); -+ urb->error_count++; -+ if (hc->ep_is_in) { -+ frame_desc->status = -ENOSR; -+ } else { -+ frame_desc->status = -ECOMM; -+ } -+ frame_desc->actual_length = 0; -+ break; -+ case DWC_OTG_HC_XFER_BABBLE_ERR: -+ printk("DWC_OTG_HC_XFER_BABBLE_ERR: %d\n", halt_status); -+ urb->error_count++; -+ frame_desc->status = -EOVERFLOW; -+ /* Don't need to update actual_length in this case. */ -+ break; -+ case DWC_OTG_HC_XFER_XACT_ERR: -+ printk("DWC_OTG_HC_XFER_XACT_ERR: %d\n", halt_status); -+ urb->error_count++; -+ frame_desc->status = -EPROTO; -+ frame_desc->actual_length = -+ get_actual_xfer_length(hc, hc_regs, qtd, -+ halt_status, NULL); -+ -+ /* non DWORD-aligned buffer case handling. */ -+ if (frame_desc->actual_length && ((uint32_t)hc->xfer_buff & 0x3) && -+ hc->qh->dw_align_buf && hc->ep_is_in) { -+ memcpy(urb->transfer_buffer + frame_desc->offset + qtd->isoc_split_offset, -+ hc->qh->dw_align_buf, frame_desc->actual_length); -+ -+ } -+ break; -+ default: -+ -+ DWC_ERROR("%s: Unhandled _halt_status (%d)\n", __func__, -+ halt_status); -+ BUG(); -+ break; -+ } -+ -+ if (++qtd->isoc_frame_index == urb->number_of_packets) { -+ /* -+ * urb->status is not used for isoc transfers. -+ * The individual frame_desc statuses are used instead. -+ */ -+ dwc_otg_hcd_complete_urb(hcd, urb, 0); -+ ret_val = DWC_OTG_HC_XFER_URB_COMPLETE; -+ } else { -+ ret_val = DWC_OTG_HC_XFER_COMPLETE; -+ } -+ -+ return ret_val; -+} -+ -+/** -+ * Releases a host channel for use by other transfers. Attempts to select and -+ * queue more transactions since at least one host channel is available. -+ * -+ * @param hcd The HCD state structure. -+ * @param hc The host channel to release. -+ * @param qtd The QTD associated with the host channel. This QTD may be freed -+ * if the transfer is complete or an error has occurred. -+ * @param halt_status Reason the channel is being released. This status -+ * determines the actions taken by this function. -+ */ -+static void release_channel(dwc_otg_hcd_t *hcd, -+ dwc_hc_t *hc, -+ dwc_otg_qtd_t *qtd, -+ dwc_otg_halt_status_e halt_status) -+{ -+ dwc_otg_transaction_type_e tr_type; -+ int free_qtd; -+ -+ DWC_DEBUGPL(DBG_HCDV, " %s: channel %d, halt_status %d\n", -+ __func__, hc->hc_num, halt_status); -+ -+ switch (halt_status) { -+ case DWC_OTG_HC_XFER_URB_COMPLETE: -+ free_qtd = 1; -+ break; -+ case DWC_OTG_HC_XFER_AHB_ERR: -+ case DWC_OTG_HC_XFER_STALL: -+ case DWC_OTG_HC_XFER_BABBLE_ERR: -+ free_qtd = 1; -+ break; -+ case DWC_OTG_HC_XFER_XACT_ERR: -+ if (qtd->error_count >= 3) { -+ DWC_DEBUGPL(DBG_HCDV, " Complete URB with transaction error\n"); -+ free_qtd = 1; -+ qtd->urb->status = -EPROTO; -+ dwc_otg_hcd_complete_urb(hcd, qtd->urb, -EPROTO); -+ } else { -+ free_qtd = 0; -+ } -+ break; -+ case DWC_OTG_HC_XFER_URB_DEQUEUE: -+ /* -+ * The QTD has already been removed and the QH has been -+ * deactivated. Don't want to do anything except release the -+ * host channel and try to queue more transfers. -+ */ -+ goto cleanup; -+ case DWC_OTG_HC_XFER_NO_HALT_STATUS: -+ DWC_ERROR("%s: No halt_status, channel %d\n", __func__, hc->hc_num); -+ free_qtd = 0; -+ break; -+ default: -+ free_qtd = 0; -+ break; -+ } -+ -+ deactivate_qh(hcd, hc->qh, free_qtd); -+ -+ cleanup: -+ /* -+ * Release the host channel for use by other transfers. The cleanup -+ * function clears the channel interrupt enables and conditions, so -+ * there's no need to clear the Channel Halted interrupt separately. -+ */ -+ dwc_otg_hc_cleanup(hcd->core_if, hc); -+ list_add_tail(&hc->hc_list_entry, &hcd->free_hc_list); -+ -+ switch (hc->ep_type) { -+ case DWC_OTG_EP_TYPE_CONTROL: -+ case DWC_OTG_EP_TYPE_BULK: -+ hcd->non_periodic_channels--; -+ break; -+ -+ default: -+ /* -+ * Don't release reservations for periodic channels here. -+ * That's done when a periodic transfer is descheduled (i.e. -+ * when the QH is removed from the periodic schedule). -+ */ -+ break; -+ } -+ -+ /* Try to queue more transfers now that there's a free channel. */ -+ tr_type = dwc_otg_hcd_select_transactions(hcd); -+ if (tr_type != DWC_OTG_TRANSACTION_NONE) { -+ dwc_otg_hcd_queue_transactions(hcd, tr_type); -+ } -+} -+ -+/** -+ * Halts a host channel. If the channel cannot be halted immediately because -+ * the request queue is full, this function ensures that the FIFO empty -+ * interrupt for the appropriate queue is enabled so that the halt request can -+ * be queued when there is space in the request queue. -+ * -+ * This function may also be called in DMA mode. In that case, the channel is -+ * simply released since the core always halts the channel automatically in -+ * DMA mode. -+ */ -+static void halt_channel(dwc_otg_hcd_t *hcd, -+ dwc_hc_t *hc, -+ dwc_otg_qtd_t *qtd, -+ dwc_otg_halt_status_e halt_status) -+{ -+ if (hcd->core_if->dma_enable) { -+ release_channel(hcd, hc, qtd, halt_status); -+ return; -+ } -+ -+ /* Slave mode processing... */ -+ dwc_otg_hc_halt(hcd->core_if, hc, halt_status); -+ -+ if (hc->halt_on_queue) { -+ gintmsk_data_t gintmsk = {.d32 = 0}; -+ dwc_otg_core_global_regs_t *global_regs; -+ global_regs = hcd->core_if->core_global_regs; -+ -+ if (hc->ep_type == DWC_OTG_EP_TYPE_CONTROL || -+ hc->ep_type == DWC_OTG_EP_TYPE_BULK) { -+ /* -+ * Make sure the Non-periodic Tx FIFO empty interrupt -+ * is enabled so that the non-periodic schedule will -+ * be processed. -+ */ -+ gintmsk.b.nptxfempty = 1; -+ dwc_modify_reg32(&global_regs->gintmsk, 0, gintmsk.d32); -+ } else { -+ /* -+ * Move the QH from the periodic queued schedule to -+ * the periodic assigned schedule. This allows the -+ * halt to be queued when the periodic schedule is -+ * processed. -+ */ -+ list_move(&hc->qh->qh_list_entry, -+ &hcd->periodic_sched_assigned); -+ -+ /* -+ * Make sure the Periodic Tx FIFO Empty interrupt is -+ * enabled so that the periodic schedule will be -+ * processed. -+ */ -+ gintmsk.b.ptxfempty = 1; -+ dwc_modify_reg32(&global_regs->gintmsk, 0, gintmsk.d32); -+ } -+ } -+} -+ -+/** -+ * Performs common cleanup for non-periodic transfers after a Transfer -+ * Complete interrupt. This function should be called after any endpoint type -+ * specific handling is finished to release the host channel. -+ */ -+static void complete_non_periodic_xfer(dwc_otg_hcd_t *hcd, -+ dwc_hc_t *hc, -+ dwc_otg_hc_regs_t *hc_regs, -+ dwc_otg_qtd_t *qtd, -+ dwc_otg_halt_status_e halt_status) -+{ -+ hcint_data_t hcint; -+ -+ qtd->error_count = 0; -+ -+ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); -+ if (hcint.b.nyet) { -+ /* -+ * Got a NYET on the last transaction of the transfer. This -+ * means that the endpoint should be in the PING state at the -+ * beginning of the next transfer. -+ */ -+ hc->qh->ping_state = 1; -+ clear_hc_int(hc_regs, nyet); -+ } -+ -+ /* -+ * Always halt and release the host channel to make it available for -+ * more transfers. There may still be more phases for a control -+ * transfer or more data packets for a bulk transfer at this point, -+ * but the host channel is still halted. A channel will be reassigned -+ * to the transfer when the non-periodic schedule is processed after -+ * the channel is released. This allows transactions to be queued -+ * properly via dwc_otg_hcd_queue_transactions, which also enables the -+ * Tx FIFO Empty interrupt if necessary. -+ */ -+ if (hc->ep_is_in) { -+ /* -+ * IN transfers in Slave mode require an explicit disable to -+ * halt the channel. (In DMA mode, this call simply releases -+ * the channel.) -+ */ -+ halt_channel(hcd, hc, qtd, halt_status); -+ } else { -+ /* -+ * The channel is automatically disabled by the core for OUT -+ * transfers in Slave mode. -+ */ -+ release_channel(hcd, hc, qtd, halt_status); -+ } -+} -+ -+/** -+ * Performs common cleanup for periodic transfers after a Transfer Complete -+ * interrupt. This function should be called after any endpoint type specific -+ * handling is finished to release the host channel. -+ */ -+static void complete_periodic_xfer(dwc_otg_hcd_t *hcd, -+ dwc_hc_t *hc, -+ dwc_otg_hc_regs_t *hc_regs, -+ dwc_otg_qtd_t *qtd, -+ dwc_otg_halt_status_e halt_status) -+{ -+ hctsiz_data_t hctsiz; -+ qtd->error_count = 0; -+ -+ hctsiz.d32 = dwc_read_reg32(&hc_regs->hctsiz); -+ if (!hc->ep_is_in || hctsiz.b.pktcnt == 0) { -+ /* Core halts channel in these cases. */ -+ release_channel(hcd, hc, qtd, halt_status); -+ } else { -+ /* Flush any outstanding requests from the Tx queue. */ -+ halt_channel(hcd, hc, qtd, halt_status); -+ } -+} -+ -+/** -+ * Handles a host channel Transfer Complete interrupt. This handler may be -+ * called in either DMA mode or Slave mode. -+ */ -+static int32_t handle_hc_xfercomp_intr(dwc_otg_hcd_t *hcd, -+ dwc_hc_t *hc, -+ dwc_otg_hc_regs_t *hc_regs, -+ dwc_otg_qtd_t *qtd) -+{ -+ int urb_xfer_done; -+ dwc_otg_halt_status_e halt_status = DWC_OTG_HC_XFER_COMPLETE; -+ struct urb *urb = qtd->urb; -+ int pipe_type = usb_pipetype(urb->pipe); -+ -+ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: " -+ "Transfer Complete--\n", hc->hc_num); -+ -+ /* -+ * Handle xfer complete on CSPLIT. -+ */ -+ if (hc->qh->do_split) { -+ qtd->complete_split = 0; -+ } -+ -+ /* Update the QTD and URB states. */ -+ switch (pipe_type) { -+ case PIPE_CONTROL: -+ switch (qtd->control_phase) { -+ case DWC_OTG_CONTROL_SETUP: -+ if (urb->transfer_buffer_length > 0) { -+ qtd->control_phase = DWC_OTG_CONTROL_DATA; -+ } else { -+ qtd->control_phase = DWC_OTG_CONTROL_STATUS; -+ } -+ DWC_DEBUGPL(DBG_HCDV, " Control setup transaction done\n"); -+ halt_status = DWC_OTG_HC_XFER_COMPLETE; -+ break; -+ case DWC_OTG_CONTROL_DATA: { -+ urb_xfer_done = update_urb_state_xfer_comp(hc, hc_regs, urb, qtd); -+ if (urb_xfer_done) { -+ qtd->control_phase = DWC_OTG_CONTROL_STATUS; -+ DWC_DEBUGPL(DBG_HCDV, " Control data transfer done\n"); -+ } else { -+ save_data_toggle(hc, hc_regs, qtd); -+ } -+ halt_status = DWC_OTG_HC_XFER_COMPLETE; -+ break; -+ } -+ case DWC_OTG_CONTROL_STATUS: -+ DWC_DEBUGPL(DBG_HCDV, " Control transfer complete\n"); -+ if (urb->status == -EINPROGRESS) { -+ urb->status = 0; -+ } -+ dwc_otg_hcd_complete_urb(hcd, urb, urb->status); -+ halt_status = DWC_OTG_HC_XFER_URB_COMPLETE; -+ break; -+ } -+ -+ complete_non_periodic_xfer(hcd, hc, hc_regs, qtd, halt_status); -+ break; -+ case PIPE_BULK: -+ DWC_DEBUGPL(DBG_HCDV, " Bulk transfer complete\n"); -+ urb_xfer_done = update_urb_state_xfer_comp(hc, hc_regs, urb, qtd); -+ if (urb_xfer_done) { -+ dwc_otg_hcd_complete_urb(hcd, urb, urb->status); -+ halt_status = DWC_OTG_HC_XFER_URB_COMPLETE; -+ } else { -+ halt_status = DWC_OTG_HC_XFER_COMPLETE; -+ } -+ -+ save_data_toggle(hc, hc_regs, qtd); -+ complete_non_periodic_xfer(hcd, hc, hc_regs, qtd, halt_status); -+ break; -+ case PIPE_INTERRUPT: -+ DWC_DEBUGPL(DBG_HCDV, " Interrupt transfer complete\n"); -+ update_urb_state_xfer_comp(hc, hc_regs, urb, qtd); -+ -+ /* -+ * Interrupt URB is done on the first transfer complete -+ * interrupt. -+ */ -+ dwc_otg_hcd_complete_urb(hcd, urb, urb->status); -+ save_data_toggle(hc, hc_regs, qtd); -+ complete_periodic_xfer(hcd, hc, hc_regs, qtd, -+ DWC_OTG_HC_XFER_URB_COMPLETE); -+ break; -+ case PIPE_ISOCHRONOUS: -+ DWC_DEBUGPL(DBG_HCDV, " Isochronous transfer complete\n"); -+ if (qtd->isoc_split_pos == DWC_HCSPLIT_XACTPOS_ALL) { -+ halt_status = update_isoc_urb_state(hcd, hc, hc_regs, qtd, -+ DWC_OTG_HC_XFER_COMPLETE); -+ } -+ complete_periodic_xfer(hcd, hc, hc_regs, qtd, halt_status); -+ break; -+ } -+ -+ disable_hc_int(hc_regs, xfercompl); -+ -+ return 1; -+} -+ -+/** -+ * Handles a host channel STALL interrupt. This handler may be called in -+ * either DMA mode or Slave mode. -+ */ -+static int32_t handle_hc_stall_intr(dwc_otg_hcd_t *hcd, -+ dwc_hc_t *hc, -+ dwc_otg_hc_regs_t *hc_regs, -+ dwc_otg_qtd_t *qtd) -+{ -+ struct urb *urb = qtd->urb; -+ int pipe_type = usb_pipetype(urb->pipe); -+ -+ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: " -+ "STALL Received--\n", hc->hc_num); -+ -+ if (pipe_type == PIPE_CONTROL) { -+ dwc_otg_hcd_complete_urb(hcd, urb, -EPIPE); -+ } -+ -+ if (pipe_type == PIPE_BULK || pipe_type == PIPE_INTERRUPT) { -+ dwc_otg_hcd_complete_urb(hcd, urb, -EPIPE); -+ /* -+ * USB protocol requires resetting the data toggle for bulk -+ * and interrupt endpoints when a CLEAR_FEATURE(ENDPOINT_HALT) -+ * setup command is issued to the endpoint. Anticipate the -+ * CLEAR_FEATURE command since a STALL has occurred and reset -+ * the data toggle now. -+ */ -+ hc->qh->data_toggle = 0; -+ } -+ -+ halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_STALL); -+ -+ disable_hc_int(hc_regs, stall); -+ -+ return 1; -+} -+ -+/* -+ * Updates the state of the URB when a transfer has been stopped due to an -+ * abnormal condition before the transfer completes. Modifies the -+ * actual_length field of the URB to reflect the number of bytes that have -+ * actually been transferred via the host channel. -+ */ -+static void update_urb_state_xfer_intr(dwc_hc_t *hc, -+ dwc_otg_hc_regs_t *hc_regs, -+ struct urb *urb, -+ dwc_otg_qtd_t *qtd, -+ dwc_otg_halt_status_e halt_status) -+{ -+ uint32_t bytes_transferred = get_actual_xfer_length(hc, hc_regs, qtd, -+ halt_status, NULL); -+ urb->actual_length += bytes_transferred; -+ -+#ifdef DEBUG -+ { -+ hctsiz_data_t hctsiz; -+ hctsiz.d32 = dwc_read_reg32(&hc_regs->hctsiz); -+ DWC_DEBUGPL(DBG_HCDV, "DWC_otg: %s: %s, channel %d\n", -+ __func__, (hc->ep_is_in ? "IN" : "OUT"), hc->hc_num); -+ DWC_DEBUGPL(DBG_HCDV, " hc->start_pkt_count %d\n", hc->start_pkt_count); -+ DWC_DEBUGPL(DBG_HCDV, " hctsiz.pktcnt %d\n", hctsiz.b.pktcnt); -+ DWC_DEBUGPL(DBG_HCDV, " hc->max_packet %d\n", hc->max_packet); -+ DWC_DEBUGPL(DBG_HCDV, " bytes_transferred %d\n", bytes_transferred); -+ DWC_DEBUGPL(DBG_HCDV, " urb->actual_length %d\n", urb->actual_length); -+ DWC_DEBUGPL(DBG_HCDV, " urb->transfer_buffer_length %d\n", -+ urb->transfer_buffer_length); -+ } -+#endif -+} -+ -+/** -+ * Handles a host channel NAK interrupt. This handler may be called in either -+ * DMA mode or Slave mode. -+ */ -+static int32_t handle_hc_nak_intr(dwc_otg_hcd_t *hcd, -+ dwc_hc_t *hc, -+ dwc_otg_hc_regs_t *hc_regs, -+ dwc_otg_qtd_t *qtd) -+{ -+ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: " -+ "NAK Received--\n", hc->hc_num); -+ -+ /* -+ * Handle NAK for IN/OUT SSPLIT/CSPLIT transfers, bulk, control, and -+ * interrupt. Re-start the SSPLIT transfer. -+ */ -+ if (hc->do_split) { -+ if (hc->complete_split) { -+ qtd->error_count = 0; -+ } -+ qtd->complete_split = 0; -+ halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_NAK); -+ goto handle_nak_done; -+ } -+ -+ switch (usb_pipetype(qtd->urb->pipe)) { -+ case PIPE_CONTROL: -+ case PIPE_BULK: -+ if (hcd->core_if->dma_enable && hc->ep_is_in) { -+ /* -+ * NAK interrupts are enabled on bulk/control IN -+ * transfers in DMA mode for the sole purpose of -+ * resetting the error count after a transaction error -+ * occurs. The core will continue transferring data. -+ */ -+ qtd->error_count = 0; -+ goto handle_nak_done; -+ } -+ -+ /* -+ * NAK interrupts normally occur during OUT transfers in DMA -+ * or Slave mode. For IN transfers, more requests will be -+ * queued as request queue space is available. -+ */ -+ qtd->error_count = 0; -+ -+ if (!hc->qh->ping_state) { -+ update_urb_state_xfer_intr(hc, hc_regs, qtd->urb, -+ qtd, DWC_OTG_HC_XFER_NAK); -+ save_data_toggle(hc, hc_regs, qtd); -+ if (qtd->urb->dev->speed == USB_SPEED_HIGH) { -+ hc->qh->ping_state = 1; -+ } -+ } -+ -+ /* -+ * Halt the channel so the transfer can be re-started from -+ * the appropriate point or the PING protocol will -+ * start/continue. -+ */ -+ halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_NAK); -+ break; -+ case PIPE_INTERRUPT: -+ qtd->error_count = 0; -+ halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_NAK); -+ break; -+ case PIPE_ISOCHRONOUS: -+ /* Should never get called for isochronous transfers. */ -+ BUG(); -+ break; -+ } -+ -+ handle_nak_done: -+ disable_hc_int(hc_regs, nak); -+ -+ return 1; -+} -+ -+/** -+ * Handles a host channel ACK interrupt. This interrupt is enabled when -+ * performing the PING protocol in Slave mode, when errors occur during -+ * either Slave mode or DMA mode, and during Start Split transactions. -+ */ -+static int32_t handle_hc_ack_intr(dwc_otg_hcd_t *hcd, -+ dwc_hc_t *hc, -+ dwc_otg_hc_regs_t *hc_regs, -+ dwc_otg_qtd_t *qtd) -+{ -+ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: " -+ "ACK Received--\n", hc->hc_num); -+ -+ if (hc->do_split) { -+ /* -+ * Handle ACK on SSPLIT. -+ * ACK should not occur in CSPLIT. -+ */ -+ if (!hc->ep_is_in && hc->data_pid_start != DWC_OTG_HC_PID_SETUP) { -+ qtd->ssplit_out_xfer_count = hc->xfer_len; -+ } -+ if (!(hc->ep_type == DWC_OTG_EP_TYPE_ISOC && !hc->ep_is_in)) { -+ /* Don't need complete for isochronous out transfers. */ -+ qtd->complete_split = 1; -+ } -+ -+ /* ISOC OUT */ -+ if (hc->ep_type == DWC_OTG_EP_TYPE_ISOC && !hc->ep_is_in) { -+ switch (hc->xact_pos) { -+ case DWC_HCSPLIT_XACTPOS_ALL: -+ break; -+ case DWC_HCSPLIT_XACTPOS_END: -+ qtd->isoc_split_pos = DWC_HCSPLIT_XACTPOS_ALL; -+ qtd->isoc_split_offset = 0; -+ break; -+ case DWC_HCSPLIT_XACTPOS_BEGIN: -+ case DWC_HCSPLIT_XACTPOS_MID: -+ /* -+ * For BEGIN or MID, calculate the length for -+ * the next microframe to determine the correct -+ * SSPLIT token, either MID or END. -+ */ -+ { -+ struct usb_iso_packet_descriptor *frame_desc; -+ -+ frame_desc = &qtd->urb->iso_frame_desc[qtd->isoc_frame_index]; -+ qtd->isoc_split_offset += 188; -+ -+ if ((frame_desc->length - qtd->isoc_split_offset) <= 188) { -+ qtd->isoc_split_pos = DWC_HCSPLIT_XACTPOS_END; -+ } else { -+ qtd->isoc_split_pos = DWC_HCSPLIT_XACTPOS_MID; -+ } -+ -+ } -+ break; -+ } -+ } else { -+ halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_ACK); -+ } -+ } else { -+ qtd->error_count = 0; -+ -+ if (hc->qh->ping_state) { -+ hc->qh->ping_state = 0; -+ /* -+ * Halt the channel so the transfer can be re-started -+ * from the appropriate point. This only happens in -+ * Slave mode. In DMA mode, the ping_state is cleared -+ * when the transfer is started because the core -+ * automatically executes the PING, then the transfer. -+ */ -+ halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_ACK); -+ } -+ } -+ -+ /* -+ * If the ACK occurred when _not_ in the PING state, let the channel -+ * continue transferring data after clearing the error count. -+ */ -+ -+ disable_hc_int(hc_regs, ack); -+ -+ return 1; -+} -+ -+/** -+ * Handles a host channel NYET interrupt. This interrupt should only occur on -+ * Bulk and Control OUT endpoints and for complete split transactions. If a -+ * NYET occurs at the same time as a Transfer Complete interrupt, it is -+ * handled in the xfercomp interrupt handler, not here. This handler may be -+ * called in either DMA mode or Slave mode. -+ */ -+static int32_t handle_hc_nyet_intr(dwc_otg_hcd_t *hcd, -+ dwc_hc_t *hc, -+ dwc_otg_hc_regs_t *hc_regs, -+ dwc_otg_qtd_t *qtd) -+{ -+ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: " -+ "NYET Received--\n", hc->hc_num); -+ -+ /* -+ * NYET on CSPLIT -+ * re-do the CSPLIT immediately on non-periodic -+ */ -+ if (hc->do_split && hc->complete_split) { -+ if (hc->ep_type == DWC_OTG_EP_TYPE_INTR || -+ hc->ep_type == DWC_OTG_EP_TYPE_ISOC) { -+ int frnum = dwc_otg_hcd_get_frame_number(dwc_otg_hcd_to_hcd(hcd)); -+ -+ if (dwc_full_frame_num(frnum) != -+ dwc_full_frame_num(hc->qh->sched_frame)) { -+ /* -+ * No longer in the same full speed frame. -+ * Treat this as a transaction error. -+ */ -+#if 0 -+ /** @todo Fix system performance so this can -+ * be treated as an error. Right now complete -+ * splits cannot be scheduled precisely enough -+ * due to other system activity, so this error -+ * occurs regularly in Slave mode. -+ */ -+ qtd->error_count++; -+#endif -+ qtd->complete_split = 0; -+ halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_XACT_ERR); -+ /** @todo add support for isoc release */ -+ goto handle_nyet_done; -+ } -+ } -+ -+ halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_NYET); -+ goto handle_nyet_done; -+ } -+ -+ hc->qh->ping_state = 1; -+ qtd->error_count = 0; -+ -+ update_urb_state_xfer_intr(hc, hc_regs, qtd->urb, qtd, -+ DWC_OTG_HC_XFER_NYET); -+ save_data_toggle(hc, hc_regs, qtd); -+ -+ /* -+ * Halt the channel and re-start the transfer so the PING -+ * protocol will start. -+ */ -+ halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_NYET); -+ -+handle_nyet_done: -+ disable_hc_int(hc_regs, nyet); -+ return 1; -+} -+ -+/** -+ * Handles a host channel babble interrupt. This handler may be called in -+ * either DMA mode or Slave mode. -+ */ -+static int32_t handle_hc_babble_intr(dwc_otg_hcd_t *hcd, -+ dwc_hc_t *hc, -+ dwc_otg_hc_regs_t *hc_regs, -+ dwc_otg_qtd_t *qtd) -+{ -+ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: " -+ "Babble Error--\n", hc->hc_num); -+ if (hc->ep_type != DWC_OTG_EP_TYPE_ISOC) { -+ dwc_otg_hcd_complete_urb(hcd, qtd->urb, -EOVERFLOW); -+ halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_BABBLE_ERR); -+ } else { -+ dwc_otg_halt_status_e halt_status; -+ halt_status = update_isoc_urb_state(hcd, hc, hc_regs, qtd, -+ DWC_OTG_HC_XFER_BABBLE_ERR); -+ halt_channel(hcd, hc, qtd, halt_status); -+ } -+ disable_hc_int(hc_regs, bblerr); -+ return 1; -+} -+ -+/** -+ * Handles a host channel AHB error interrupt. This handler is only called in -+ * DMA mode. -+ */ -+static int32_t handle_hc_ahberr_intr(dwc_otg_hcd_t *hcd, -+ dwc_hc_t *hc, -+ dwc_otg_hc_regs_t *hc_regs, -+ dwc_otg_qtd_t *qtd) -+{ -+ hcchar_data_t hcchar; -+ hcsplt_data_t hcsplt; -+ hctsiz_data_t hctsiz; -+ uint32_t hcdma; -+ struct urb *urb = qtd->urb; -+ -+ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: " -+ "AHB Error--\n", hc->hc_num); -+ -+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); -+ hcsplt.d32 = dwc_read_reg32(&hc_regs->hcsplt); -+ hctsiz.d32 = dwc_read_reg32(&hc_regs->hctsiz); -+ hcdma = dwc_read_reg32(&hc_regs->hcdma); -+ -+ DWC_ERROR("AHB ERROR, Channel %d\n", hc->hc_num); -+ DWC_ERROR(" hcchar 0x%08x, hcsplt 0x%08x\n", hcchar.d32, hcsplt.d32); -+ DWC_ERROR(" hctsiz 0x%08x, hcdma 0x%08x\n", hctsiz.d32, hcdma); -+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD URB Enqueue\n"); -+ DWC_ERROR(" Device address: %d\n", usb_pipedevice(urb->pipe)); -+ DWC_ERROR(" Endpoint: %d, %s\n", usb_pipeendpoint(urb->pipe), -+ (usb_pipein(urb->pipe) ? "IN" : "OUT")); -+ DWC_ERROR(" Endpoint type: %s\n", -+ ({char *pipetype; -+ switch (usb_pipetype(urb->pipe)) { -+ case PIPE_CONTROL: pipetype = "CONTROL"; break; -+ case PIPE_BULK: pipetype = "BULK"; break; -+ case PIPE_INTERRUPT: pipetype = "INTERRUPT"; break; -+ case PIPE_ISOCHRONOUS: pipetype = "ISOCHRONOUS"; break; -+ default: pipetype = "UNKNOWN"; break; -+ }; pipetype;})); -+ DWC_ERROR(" Speed: %s\n", -+ ({char *speed; -+ switch (urb->dev->speed) { -+ case USB_SPEED_HIGH: speed = "HIGH"; break; -+ case USB_SPEED_FULL: speed = "FULL"; break; -+ case USB_SPEED_LOW: speed = "LOW"; break; -+ default: speed = "UNKNOWN"; break; -+ }; speed;})); -+ DWC_ERROR(" Max packet size: %d\n", -+ usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe))); -+ DWC_ERROR(" Data buffer length: %d\n", urb->transfer_buffer_length); -+ DWC_ERROR(" Transfer buffer: %p, Transfer DMA: %p\n", -+ urb->transfer_buffer, (void *)urb->transfer_dma); -+ DWC_ERROR(" Setup buffer: %p, Setup DMA: %p\n", -+ urb->setup_packet, (void *)urb->setup_dma); -+ DWC_ERROR(" Interval: %d\n", urb->interval); -+ -+ dwc_otg_hcd_complete_urb(hcd, urb, -EIO); -+ -+ /* -+ * Force a channel halt. Don't call halt_channel because that won't -+ * write to the HCCHARn register in DMA mode to force the halt. -+ */ -+ dwc_otg_hc_halt(hcd->core_if, hc, DWC_OTG_HC_XFER_AHB_ERR); -+ -+ disable_hc_int(hc_regs, ahberr); -+ return 1; -+} -+ -+/** -+ * Handles a host channel transaction error interrupt. This handler may be -+ * called in either DMA mode or Slave mode. -+ */ -+static int32_t handle_hc_xacterr_intr(dwc_otg_hcd_t *hcd, -+ dwc_hc_t *hc, -+ dwc_otg_hc_regs_t *hc_regs, -+ dwc_otg_qtd_t *qtd) -+{ -+ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: " -+ "Transaction Error--\n", hc->hc_num); -+ -+ switch (usb_pipetype(qtd->urb->pipe)) { -+ case PIPE_CONTROL: -+ case PIPE_BULK: -+ qtd->error_count++; -+ if (!hc->qh->ping_state) { -+ update_urb_state_xfer_intr(hc, hc_regs, qtd->urb, -+ qtd, DWC_OTG_HC_XFER_XACT_ERR); -+ save_data_toggle(hc, hc_regs, qtd); -+ if (!hc->ep_is_in && qtd->urb->dev->speed == USB_SPEED_HIGH) { -+ hc->qh->ping_state = 1; -+ } -+ } -+ -+ /* -+ * Halt the channel so the transfer can be re-started from -+ * the appropriate point or the PING protocol will start. -+ */ -+ halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_XACT_ERR); -+ break; -+ case PIPE_INTERRUPT: -+ qtd->error_count++; -+ if (hc->do_split && hc->complete_split) { -+ qtd->complete_split = 0; -+ } -+ halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_XACT_ERR); -+ break; -+ case PIPE_ISOCHRONOUS: -+ { -+ dwc_otg_halt_status_e halt_status; -+ halt_status = update_isoc_urb_state(hcd, hc, hc_regs, qtd, -+ DWC_OTG_HC_XFER_XACT_ERR); -+ -+ halt_channel(hcd, hc, qtd, halt_status); -+ } -+ break; -+ } -+ -+ disable_hc_int(hc_regs, xacterr); -+ -+ return 1; -+} -+ -+/** -+ * Handles a host channel frame overrun interrupt. This handler may be called -+ * in either DMA mode or Slave mode. -+ */ -+static int32_t handle_hc_frmovrun_intr(dwc_otg_hcd_t *hcd, -+ dwc_hc_t *hc, -+ dwc_otg_hc_regs_t *hc_regs, -+ dwc_otg_qtd_t *qtd) -+{ -+ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: " -+ "Frame Overrun--\n", hc->hc_num); -+ -+ switch (usb_pipetype(qtd->urb->pipe)) { -+ case PIPE_CONTROL: -+ case PIPE_BULK: -+ break; -+ case PIPE_INTERRUPT: -+ halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_FRAME_OVERRUN); -+ break; -+ case PIPE_ISOCHRONOUS: -+ { -+ dwc_otg_halt_status_e halt_status; -+ halt_status = update_isoc_urb_state(hcd, hc, hc_regs, qtd, -+ DWC_OTG_HC_XFER_FRAME_OVERRUN); -+ -+ halt_channel(hcd, hc, qtd, halt_status); -+ } -+ break; -+ } -+ -+ disable_hc_int(hc_regs, frmovrun); -+ -+ return 1; -+} -+ -+/** -+ * Handles a host channel data toggle error interrupt. This handler may be -+ * called in either DMA mode or Slave mode. -+ */ -+static int32_t handle_hc_datatglerr_intr(dwc_otg_hcd_t *hcd, -+ dwc_hc_t *hc, -+ dwc_otg_hc_regs_t *hc_regs, -+ dwc_otg_qtd_t *qtd) -+{ -+ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: " -+ "Data Toggle Error--\n", hc->hc_num); -+ -+ if (hc->ep_is_in) { -+ qtd->error_count = 0; -+ } else { -+ DWC_ERROR("Data Toggle Error on OUT transfer," -+ "channel %d\n", hc->hc_num); -+ } -+ -+ disable_hc_int(hc_regs, datatglerr); -+ -+ return 1; -+} -+ -+#ifdef DEBUG -+/** -+ * This function is for debug only. It checks that a valid halt status is set -+ * and that HCCHARn.chdis is clear. If there's a problem, corrective action is -+ * taken and a warning is issued. -+ * @return 1 if halt status is ok, 0 otherwise. -+ */ -+static inline int halt_status_ok(dwc_otg_hcd_t *hcd, -+ dwc_hc_t *hc, -+ dwc_otg_hc_regs_t *hc_regs, -+ dwc_otg_qtd_t *qtd) -+{ -+ hcchar_data_t hcchar; -+ hctsiz_data_t hctsiz; -+ hcint_data_t hcint; -+ hcintmsk_data_t hcintmsk; -+ hcsplt_data_t hcsplt; -+ -+ if (hc->halt_status == DWC_OTG_HC_XFER_NO_HALT_STATUS) { -+ /* -+ * This code is here only as a check. This condition should -+ * never happen. Ignore the halt if it does occur. -+ */ -+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); -+ hctsiz.d32 = dwc_read_reg32(&hc_regs->hctsiz); -+ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); -+ hcintmsk.d32 = dwc_read_reg32(&hc_regs->hcintmsk); -+ hcsplt.d32 = dwc_read_reg32(&hc_regs->hcsplt); -+ DWC_WARN("%s: hc->halt_status == DWC_OTG" -+ "channel %d, hcchar 0x%08x, hctsiz 0x%08x, " -+ "hcint 0x%08x, hcintmsk 0x%08x, " -+ "hcsplt 0x%08x, qtd->complete_split %d\n", -+ __func__, hc->hc_num, hcchar.d32, hctsiz.d32, -+ hcint.d32, hcintmsk.d32, -+ hcsplt.d32, qtd->complete_split); -+ -+ DWC_WARN("%s: no halt status, channel %d, ignoring interrupt\n", -+ __func__, hc->hc_num); -+ DWC_WARN("\n"); -+ clear_hc_int(hc_regs, chhltd); -+ return 0; -+ } -+ -+ /* -+ * This code is here only as a check. hcchar.chdis should -+ * never be set when the halt interrupt occurs. Halt the -+ * channel again if it does occur. -+ */ -+ hcchar.d32 = dwc_read_reg32(&hc_regs->hcchar); -+ if (hcchar.b.chdis) { -+ DWC_WARN("%s: hcchar.chdis set unexpectedly, " -+ "hcchar 0x%08x, trying to halt again\n", -+ __func__, hcchar.d32); -+ clear_hc_int(hc_regs, chhltd); -+ hc->halt_pending = 0; -+ halt_channel(hcd, hc, qtd, hc->halt_status); -+ return 0; -+ } -+ -+ return 1; -+} -+#endif -+ -+/** -+ * Handles a host Channel Halted interrupt in DMA mode. This handler -+ * determines the reason the channel halted and proceeds accordingly. -+ */ -+static void handle_hc_chhltd_intr_dma(dwc_otg_hcd_t *hcd, -+ dwc_hc_t *hc, -+ dwc_otg_hc_regs_t *hc_regs, -+ dwc_otg_qtd_t *qtd) -+{ -+ hcint_data_t hcint; -+ hcintmsk_data_t hcintmsk; -+ int out_nak_enh = 0; -+ -+ /* For core with OUT NAK enhancement, the flow for high- -+ * speed CONTROL/BULK OUT is handled a little differently. -+ */ -+ if (hcd->core_if->snpsid >= 0x4F54271A) { -+ if (hc->speed == DWC_OTG_EP_SPEED_HIGH && !hc->ep_is_in && -+ (hc->ep_type == DWC_OTG_EP_TYPE_CONTROL || -+ hc->ep_type == DWC_OTG_EP_TYPE_BULK)) { -+ printk(KERN_DEBUG "OUT NAK enhancement enabled\n"); -+ out_nak_enh = 1; -+ } else { -+ printk(KERN_DEBUG "OUT NAK enhancement disabled, not HS Ctrl/Bulk OUT EP\n"); -+ } -+ } else { -+// printk(KERN_DEBUG "OUT NAK enhancement disabled, no core support\n"); -+ } -+ -+ if (hc->halt_status == DWC_OTG_HC_XFER_URB_DEQUEUE || -+ hc->halt_status == DWC_OTG_HC_XFER_AHB_ERR) { -+ /* -+ * Just release the channel. A dequeue can happen on a -+ * transfer timeout. In the case of an AHB Error, the channel -+ * was forced to halt because there's no way to gracefully -+ * recover. -+ */ -+ release_channel(hcd, hc, qtd, hc->halt_status); -+ return; -+ } -+ -+ /* Read the HCINTn register to determine the cause for the halt. */ -+ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); -+ hcintmsk.d32 = dwc_read_reg32(&hc_regs->hcintmsk); -+ -+ if (hcint.b.xfercomp) { -+ /** @todo This is here because of a possible hardware bug. Spec -+ * says that on SPLIT-ISOC OUT transfers in DMA mode that a HALT -+ * interrupt w/ACK bit set should occur, but I only see the -+ * XFERCOMP bit, even with it masked out. This is a workaround -+ * for that behavior. Should fix this when hardware is fixed. -+ */ -+ if (hc->ep_type == DWC_OTG_EP_TYPE_ISOC && !hc->ep_is_in) { -+ handle_hc_ack_intr(hcd, hc, hc_regs, qtd); -+ } -+ handle_hc_xfercomp_intr(hcd, hc, hc_regs, qtd); -+ } else if (hcint.b.stall) { -+ handle_hc_stall_intr(hcd, hc, hc_regs, qtd); -+ } else if (hcint.b.xacterr) { -+ if (out_nak_enh) { -+ if (hcint.b.nyet || hcint.b.nak || hcint.b.ack) { -+ printk(KERN_DEBUG "XactErr with NYET/NAK/ACK\n"); -+ qtd->error_count = 0; -+ } else { -+ printk(KERN_DEBUG "XactErr without NYET/NAK/ACK\n"); -+ } -+ } -+ -+ /* -+ * Must handle xacterr before nak or ack. Could get a xacterr -+ * at the same time as either of these on a BULK/CONTROL OUT -+ * that started with a PING. The xacterr takes precedence. -+ */ -+ handle_hc_xacterr_intr(hcd, hc, hc_regs, qtd); -+ } else if (!out_nak_enh) { -+ if (hcint.b.nyet) { -+ /* -+ * Must handle nyet before nak or ack. Could get a nyet at the -+ * same time as either of those on a BULK/CONTROL OUT that -+ * started with a PING. The nyet takes precedence. -+ */ -+ handle_hc_nyet_intr(hcd, hc, hc_regs, qtd); -+ } else if (hcint.b.bblerr) { -+ handle_hc_babble_intr(hcd, hc, hc_regs, qtd); -+ } else if (hcint.b.frmovrun) { -+ handle_hc_frmovrun_intr(hcd, hc, hc_regs, qtd); -+ } else if (hcint.b.nak && !hcintmsk.b.nak) { -+ /* -+ * If nak is not masked, it's because a non-split IN transfer -+ * is in an error state. In that case, the nak is handled by -+ * the nak interrupt handler, not here. Handle nak here for -+ * BULK/CONTROL OUT transfers, which halt on a NAK to allow -+ * rewinding the buffer pointer. -+ */ -+ handle_hc_nak_intr(hcd, hc, hc_regs, qtd); -+ } else if (hcint.b.ack && !hcintmsk.b.ack) { -+ /* -+ * If ack is not masked, it's because a non-split IN transfer -+ * is in an error state. In that case, the ack is handled by -+ * the ack interrupt handler, not here. Handle ack here for -+ * split transfers. Start splits halt on ACK. -+ */ -+ handle_hc_ack_intr(hcd, hc, hc_regs, qtd); -+ } else { -+ if (hc->ep_type == DWC_OTG_EP_TYPE_INTR || -+ hc->ep_type == DWC_OTG_EP_TYPE_ISOC) { -+ /* -+ * A periodic transfer halted with no other channel -+ * interrupts set. Assume it was halted by the core -+ * because it could not be completed in its scheduled -+ * (micro)frame. -+ */ -+#ifdef DEBUG -+ DWC_PRINT("%s: Halt channel %d (assume incomplete periodic transfer)\n", -+ __func__, hc->hc_num); -+#endif -+ halt_channel(hcd, hc, qtd, DWC_OTG_HC_XFER_PERIODIC_INCOMPLETE); -+ } else { -+ DWC_ERROR("%s: Channel %d, DMA Mode -- ChHltd set, but reason " -+ "for halting is unknown, hcint 0x%08x, intsts 0x%08x\n", -+ __func__, hc->hc_num, hcint.d32, -+ dwc_read_reg32(&hcd->core_if->core_global_regs->gintsts)); -+ } -+ } -+ } else { -+ printk(KERN_DEBUG "NYET/NAK/ACK/other in non-error case, 0x%08x\n", hcint.d32); -+ } -+} -+ -+/** -+ * Handles a host channel Channel Halted interrupt. -+ * -+ * In slave mode, this handler is called only when the driver specifically -+ * requests a halt. This occurs during handling other host channel interrupts -+ * (e.g. nak, xacterr, stall, nyet, etc.). -+ * -+ * In DMA mode, this is the interrupt that occurs when the core has finished -+ * processing a transfer on a channel. Other host channel interrupts (except -+ * ahberr) are disabled in DMA mode. -+ */ -+static int32_t handle_hc_chhltd_intr(dwc_otg_hcd_t *hcd, -+ dwc_hc_t *hc, -+ dwc_otg_hc_regs_t *hc_regs, -+ dwc_otg_qtd_t *qtd) -+{ -+ DWC_DEBUGPL(DBG_HCD, "--Host Channel %d Interrupt: " -+ "Channel Halted--\n", hc->hc_num); -+ -+ if (hcd->core_if->dma_enable) { -+ handle_hc_chhltd_intr_dma(hcd, hc, hc_regs, qtd); -+ } else { -+#ifdef DEBUG -+ if (!halt_status_ok(hcd, hc, hc_regs, qtd)) { -+ return 1; -+ } -+#endif -+ release_channel(hcd, hc, qtd, hc->halt_status); -+ } -+ -+ return 1; -+} -+ -+/** Handles interrupt for a specific Host Channel */ -+int32_t dwc_otg_hcd_handle_hc_n_intr(dwc_otg_hcd_t *dwc_otg_hcd, uint32_t num) -+{ -+ int retval = 0; -+ hcint_data_t hcint; -+ hcintmsk_data_t hcintmsk; -+ dwc_hc_t *hc; -+ dwc_otg_hc_regs_t *hc_regs; -+ dwc_otg_qtd_t *qtd; -+ -+ DWC_DEBUGPL(DBG_HCDV, "--Host Channel Interrupt--, Channel %d\n", num); -+ -+ hc = dwc_otg_hcd->hc_ptr_array[num]; -+ hc_regs = dwc_otg_hcd->core_if->host_if->hc_regs[num]; -+ qtd = list_entry(hc->qh->qtd_list.next, dwc_otg_qtd_t, qtd_list_entry); -+ -+ hcint.d32 = dwc_read_reg32(&hc_regs->hcint); -+ hcintmsk.d32 = dwc_read_reg32(&hc_regs->hcintmsk); -+ DWC_DEBUGPL(DBG_HCDV, " hcint 0x%08x, hcintmsk 0x%08x, hcint&hcintmsk 0x%08x\n", -+ hcint.d32, hcintmsk.d32, (hcint.d32 & hcintmsk.d32)); -+ hcint.d32 = hcint.d32 & hcintmsk.d32; -+ -+ if (!dwc_otg_hcd->core_if->dma_enable) { -+ if (hcint.b.chhltd && hcint.d32 != 0x2) { -+ hcint.b.chhltd = 0; -+ } -+ } -+ -+ if (hcint.b.xfercomp) { -+ retval |= handle_hc_xfercomp_intr(dwc_otg_hcd, hc, hc_regs, qtd); -+ /* -+ * If NYET occurred at same time as Xfer Complete, the NYET is -+ * handled by the Xfer Complete interrupt handler. Don't want -+ * to call the NYET interrupt handler in this case. -+ */ -+ hcint.b.nyet = 0; -+ } -+ if (hcint.b.chhltd) { -+ retval |= handle_hc_chhltd_intr(dwc_otg_hcd, hc, hc_regs, qtd); -+ } -+ if (hcint.b.ahberr) { -+ retval |= handle_hc_ahberr_intr(dwc_otg_hcd, hc, hc_regs, qtd); -+ } -+ if (hcint.b.stall) { -+ retval |= handle_hc_stall_intr(dwc_otg_hcd, hc, hc_regs, qtd); -+ } -+ if (hcint.b.nak) { -+ retval |= handle_hc_nak_intr(dwc_otg_hcd, hc, hc_regs, qtd); -+ } -+ if (hcint.b.ack) { -+ retval |= handle_hc_ack_intr(dwc_otg_hcd, hc, hc_regs, qtd); -+ } -+ if (hcint.b.nyet) { -+ retval |= handle_hc_nyet_intr(dwc_otg_hcd, hc, hc_regs, qtd); -+ } -+ if (hcint.b.xacterr) { -+ retval |= handle_hc_xacterr_intr(dwc_otg_hcd, hc, hc_regs, qtd); -+ } -+ if (hcint.b.bblerr) { -+ retval |= handle_hc_babble_intr(dwc_otg_hcd, hc, hc_regs, qtd); -+ } -+ if (hcint.b.frmovrun) { -+ retval |= handle_hc_frmovrun_intr(dwc_otg_hcd, hc, hc_regs, qtd); -+ } -+ if (hcint.b.datatglerr) { -+ retval |= handle_hc_datatglerr_intr(dwc_otg_hcd, hc, hc_regs, qtd); -+ } -+ -+ return retval; -+} -+ -+#endif /* DWC_DEVICE_ONLY */ ---- /dev/null -+++ b/drivers/usb/dwc_otg/dwc_otg_hcd_queue.c -@@ -0,0 +1,684 @@ -+/* ========================================================================== -+ * $File: //dwh/usb_iip/dev/software/otg_ipmate/linux/drivers/dwc_otg_hcd_queue.c $ -+ * $Revision: 1.5 $ -+ * $Date: 2008-12-15 06:51:32 $ -+ * $Change: 537387 $ -+ * -+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, -+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless -+ * otherwise expressly agreed to in writing between Synopsys and you. -+ * -+ * The Software IS NOT an item of Licensed Software or Licensed Product under -+ * any End User Software License Agreement or Agreement for Licensed Product -+ * with Synopsys or any supplement thereto. You are permitted to use and -+ * redistribute this Software in source and binary forms, with or without -+ * modification, provided that redistributions of source code must retain this -+ * notice. You may not view, use, disclose, copy or distribute this file or -+ * any information contained herein except pursuant to this license grant from -+ * Synopsys. If you do not agree with this notice, including the disclaimer -+ * below, then you are not authorized to use the Software. -+ * -+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, -+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY -+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH -+ * DAMAGE. -+ * ========================================================================== */ -+#ifndef DWC_DEVICE_ONLY -+ -+/** -+ * @file -+ * -+ * This file contains the functions to manage Queue Heads and Queue -+ * Transfer Descriptors. -+ */ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "dwc_otg_driver.h" -+#include "dwc_otg_hcd.h" -+#include "dwc_otg_regs.h" -+ -+/** -+ * This function allocates and initializes a QH. -+ * -+ * @param hcd The HCD state structure for the DWC OTG controller. -+ * @param[in] urb Holds the information about the device/endpoint that we need -+ * to initialize the QH. -+ * -+ * @return Returns pointer to the newly allocated QH, or NULL on error. */ -+dwc_otg_qh_t *dwc_otg_hcd_qh_create (dwc_otg_hcd_t *hcd, struct urb *urb) -+{ -+ dwc_otg_qh_t *qh; -+ -+ /* Allocate memory */ -+ /** @todo add memflags argument */ -+ qh = dwc_otg_hcd_qh_alloc (); -+ if (qh == NULL) { -+ return NULL; -+ } -+ -+ dwc_otg_hcd_qh_init (hcd, qh, urb); -+ return qh; -+} -+ -+/** Free each QTD in the QH's QTD-list then free the QH. QH should already be -+ * removed from a list. QTD list should already be empty if called from URB -+ * Dequeue. -+ * -+ * @param[in] hcd HCD instance. -+ * @param[in] qh The QH to free. -+ */ -+void dwc_otg_hcd_qh_free (dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh) -+{ -+ dwc_otg_qtd_t *qtd; -+ struct list_head *pos; -+ unsigned long flags; -+ -+ /* Free each QTD in the QTD list */ -+ SPIN_LOCK_IRQSAVE(&hcd->lock, flags) -+ for (pos = qh->qtd_list.next; -+ pos != &qh->qtd_list; -+ pos = qh->qtd_list.next) -+ { -+ list_del (pos); -+ qtd = dwc_list_to_qtd (pos); -+ dwc_otg_hcd_qtd_free (qtd); -+ } -+ SPIN_UNLOCK_IRQRESTORE(&hcd->lock, flags) -+ -+ if (qh->dw_align_buf) { -+ dma_free_coherent((dwc_otg_hcd_to_hcd(hcd))->self.controller, -+ hcd->core_if->core_params->max_transfer_size, -+ qh->dw_align_buf, -+ qh->dw_align_buf_dma); -+ } -+ -+ kfree (qh); -+ return; -+} -+ -+/** Initializes a QH structure. -+ * -+ * @param[in] hcd The HCD state structure for the DWC OTG controller. -+ * @param[in] qh The QH to init. -+ * @param[in] urb Holds the information about the device/endpoint that we need -+ * to initialize the QH. */ -+#define SCHEDULE_SLOP 10 -+void dwc_otg_hcd_qh_init(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh, struct urb *urb) -+{ -+ char *speed, *type; -+ memset (qh, 0, sizeof (dwc_otg_qh_t)); -+ -+ /* Initialize QH */ -+ switch (usb_pipetype(urb->pipe)) { -+ case PIPE_CONTROL: -+ qh->ep_type = USB_ENDPOINT_XFER_CONTROL; -+ break; -+ case PIPE_BULK: -+ qh->ep_type = USB_ENDPOINT_XFER_BULK; -+ break; -+ case PIPE_ISOCHRONOUS: -+ qh->ep_type = USB_ENDPOINT_XFER_ISOC; -+ break; -+ case PIPE_INTERRUPT: -+ qh->ep_type = USB_ENDPOINT_XFER_INT; -+ break; -+ } -+ -+ qh->ep_is_in = usb_pipein(urb->pipe) ? 1 : 0; -+ -+ qh->data_toggle = DWC_OTG_HC_PID_DATA0; -+ qh->maxp = usb_maxpacket(urb->dev, urb->pipe, !(usb_pipein(urb->pipe))); -+ INIT_LIST_HEAD(&qh->qtd_list); -+ INIT_LIST_HEAD(&qh->qh_list_entry); -+ qh->channel = NULL; -+ -+ /* FS/LS Enpoint on HS Hub -+ * NOT virtual root hub */ -+ qh->do_split = 0; -+ if (((urb->dev->speed == USB_SPEED_LOW) || -+ (urb->dev->speed == USB_SPEED_FULL)) && -+ (urb->dev->tt) && (urb->dev->tt->hub) && (urb->dev->tt->hub->devnum != 1)) -+ { -+ DWC_DEBUGPL(DBG_HCD, "QH init: EP %d: TT found at hub addr %d, for port %d\n", -+ usb_pipeendpoint(urb->pipe), urb->dev->tt->hub->devnum, -+ urb->dev->ttport); -+ qh->do_split = 1; -+ } -+ -+ if (qh->ep_type == USB_ENDPOINT_XFER_INT || -+ qh->ep_type == USB_ENDPOINT_XFER_ISOC) { -+ /* Compute scheduling parameters once and save them. */ -+ hprt0_data_t hprt; -+ -+ /** @todo Account for split transfers in the bus time. */ -+ int bytecount = dwc_hb_mult(qh->maxp) * dwc_max_packet(qh->maxp); -+ -+ /* FIXME: work-around patch by Steven */ -+ qh->usecs = NS_TO_US(usb_calc_bus_time(urb->dev->speed, -+ usb_pipein(urb->pipe), -+ (qh->ep_type == USB_ENDPOINT_XFER_ISOC), -+ bytecount)); -+ -+ /* Start in a slightly future (micro)frame. */ -+ qh->sched_frame = dwc_frame_num_inc(hcd->frame_number, -+ SCHEDULE_SLOP); -+ qh->interval = urb->interval; -+#if 0 -+ /* Increase interrupt polling rate for debugging. */ -+ if (qh->ep_type == USB_ENDPOINT_XFER_INT) { -+ qh->interval = 8; -+ } -+#endif -+ hprt.d32 = dwc_read_reg32(hcd->core_if->host_if->hprt0); -+ if ((hprt.b.prtspd == DWC_HPRT0_PRTSPD_HIGH_SPEED) && -+ ((urb->dev->speed == USB_SPEED_LOW) || -+ (urb->dev->speed == USB_SPEED_FULL))) { -+ qh->interval *= 8; -+ qh->sched_frame |= 0x7; -+ qh->start_split_frame = qh->sched_frame; -+ } -+ -+ } -+ -+ DWC_DEBUGPL(DBG_HCD, "DWC OTG HCD QH Initialized\n"); -+ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - qh = %p\n", qh); -+ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - Device Address = %d\n", -+ urb->dev->devnum); -+ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - Endpoint %d, %s\n", -+ usb_pipeendpoint(urb->pipe), -+ usb_pipein(urb->pipe) == USB_DIR_IN ? "IN" : "OUT"); -+ -+ switch(urb->dev->speed) { -+ case USB_SPEED_LOW: -+ speed = "low"; -+ break; -+ case USB_SPEED_FULL: -+ speed = "full"; -+ break; -+ case USB_SPEED_HIGH: -+ speed = "high"; -+ break; -+ default: -+ speed = "?"; -+ break; -+ } -+ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - Speed = %s\n", speed); -+ -+ switch (qh->ep_type) { -+ case USB_ENDPOINT_XFER_ISOC: -+ type = "isochronous"; -+ break; -+ case USB_ENDPOINT_XFER_INT: -+ type = "interrupt"; -+ break; -+ case USB_ENDPOINT_XFER_CONTROL: -+ type = "control"; -+ break; -+ case USB_ENDPOINT_XFER_BULK: -+ type = "bulk"; -+ break; -+ default: -+ type = "?"; -+ break; -+ } -+ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - Type = %s\n",type); -+ -+#ifdef DEBUG -+ if (qh->ep_type == USB_ENDPOINT_XFER_INT) { -+ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - usecs = %d\n", -+ qh->usecs); -+ DWC_DEBUGPL(DBG_HCDV, "DWC OTG HCD QH - interval = %d\n", -+ qh->interval); -+ } -+#endif -+ qh->dw_align_buf = NULL; -+ return; -+} -+ -+/** -+ * Checks that a channel is available for a periodic transfer. -+ * -+ * @return 0 if successful, negative error code otherise. -+ */ -+static int periodic_channel_available(dwc_otg_hcd_t *hcd) -+{ -+ /* -+ * Currently assuming that there is a dedicated host channnel for each -+ * periodic transaction plus at least one host channel for -+ * non-periodic transactions. -+ */ -+ int status; -+ int num_channels; -+ -+ num_channels = hcd->core_if->core_params->host_channels; -+ if ((hcd->periodic_channels + hcd->non_periodic_channels < num_channels) && -+ (hcd->periodic_channels < num_channels - 1)) { -+ status = 0; -+ } -+ else { -+ DWC_NOTICE("%s: Total channels: %d, Periodic: %d, Non-periodic: %d\n", -+ __func__, num_channels, hcd->periodic_channels, -+ hcd->non_periodic_channels); -+ status = -ENOSPC; -+ } -+ -+ return status; -+} -+ -+/** -+ * Checks that there is sufficient bandwidth for the specified QH in the -+ * periodic schedule. For simplicity, this calculation assumes that all the -+ * transfers in the periodic schedule may occur in the same (micro)frame. -+ * -+ * @param hcd The HCD state structure for the DWC OTG controller. -+ * @param qh QH containing periodic bandwidth required. -+ * -+ * @return 0 if successful, negative error code otherwise. -+ */ -+static int check_periodic_bandwidth(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh) -+{ -+ int status; -+ uint16_t max_claimed_usecs; -+ -+ status = 0; -+ -+ if (hcd->core_if->core_params->speed == DWC_SPEED_PARAM_HIGH) { -+ /* -+ * High speed mode. -+ * Max periodic usecs is 80% x 125 usec = 100 usec. -+ */ -+ max_claimed_usecs = 100 - qh->usecs; -+ } else { -+ /* -+ * Full speed mode. -+ * Max periodic usecs is 90% x 1000 usec = 900 usec. -+ */ -+ max_claimed_usecs = 900 - qh->usecs; -+ } -+ -+ if (hcd->periodic_usecs > max_claimed_usecs) { -+ DWC_NOTICE("%s: already claimed usecs %d, required usecs %d\n", -+ __func__, hcd->periodic_usecs, qh->usecs); -+ status = -ENOSPC; -+ } -+ -+ return status; -+} -+ -+/** -+ * Checks that the max transfer size allowed in a host channel is large enough -+ * to handle the maximum data transfer in a single (micro)frame for a periodic -+ * transfer. -+ * -+ * @param hcd The HCD state structure for the DWC OTG controller. -+ * @param qh QH for a periodic endpoint. -+ * -+ * @return 0 if successful, negative error code otherwise. -+ */ -+static int check_max_xfer_size(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh) -+{ -+ int status; -+ uint32_t max_xfer_size; -+ uint32_t max_channel_xfer_size; -+ -+ status = 0; -+ -+ max_xfer_size = dwc_max_packet(qh->maxp) * dwc_hb_mult(qh->maxp); -+ max_channel_xfer_size = hcd->core_if->core_params->max_transfer_size; -+ -+ if (max_xfer_size > max_channel_xfer_size) { -+ DWC_NOTICE("%s: Periodic xfer length %d > " -+ "max xfer length for channel %d\n", -+ __func__, max_xfer_size, max_channel_xfer_size); -+ status = -ENOSPC; -+ } -+ -+ return status; -+} -+ -+/** -+ * Schedules an interrupt or isochronous transfer in the periodic schedule. -+ * -+ * @param hcd The HCD state structure for the DWC OTG controller. -+ * @param qh QH for the periodic transfer. The QH should already contain the -+ * scheduling information. -+ * -+ * @return 0 if successful, negative error code otherwise. -+ */ -+static int schedule_periodic(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh) -+{ -+ int status = 0; -+ -+ status = periodic_channel_available(hcd); -+ if (status) { -+ DWC_NOTICE("%s: No host channel available for periodic " -+ "transfer.\n", __func__); -+ return status; -+ } -+ -+ status = check_periodic_bandwidth(hcd, qh); -+ if (status) { -+ DWC_NOTICE("%s: Insufficient periodic bandwidth for " -+ "periodic transfer.\n", __func__); -+ return status; -+ } -+ -+ status = check_max_xfer_size(hcd, qh); -+ if (status) { -+ DWC_NOTICE("%s: Channel max transfer size too small " -+ "for periodic transfer.\n", __func__); -+ return status; -+ } -+ -+ /* Always start in the inactive schedule. */ -+ list_add_tail(&qh->qh_list_entry, &hcd->periodic_sched_inactive); -+ -+ /* Reserve the periodic channel. */ -+ hcd->periodic_channels++; -+ -+ /* Update claimed usecs per (micro)frame. */ -+ hcd->periodic_usecs += qh->usecs; -+ -+ /* Update average periodic bandwidth claimed and # periodic reqs for usbfs. */ -+ hcd_to_bus(dwc_otg_hcd_to_hcd(hcd))->bandwidth_allocated += qh->usecs / qh->interval; -+ if (qh->ep_type == USB_ENDPOINT_XFER_INT) { -+ hcd_to_bus(dwc_otg_hcd_to_hcd(hcd))->bandwidth_int_reqs++; -+ DWC_DEBUGPL(DBG_HCD, "Scheduled intr: qh %p, usecs %d, period %d\n", -+ qh, qh->usecs, qh->interval); -+ } else { -+ hcd_to_bus(dwc_otg_hcd_to_hcd(hcd))->bandwidth_isoc_reqs++; -+ DWC_DEBUGPL(DBG_HCD, "Scheduled isoc: qh %p, usecs %d, period %d\n", -+ qh, qh->usecs, qh->interval); -+ } -+ -+ return status; -+} -+ -+/** -+ * This function adds a QH to either the non periodic or periodic schedule if -+ * it is not already in the schedule. If the QH is already in the schedule, no -+ * action is taken. -+ * -+ * @return 0 if successful, negative error code otherwise. -+ */ -+int dwc_otg_hcd_qh_add (dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh) -+{ -+ unsigned long flags; -+ int status = 0; -+ -+ SPIN_LOCK_IRQSAVE(&hcd->lock, flags) -+ -+ if (!list_empty(&qh->qh_list_entry)) { -+ /* QH already in a schedule. */ -+ goto done; -+ } -+ -+ /* Add the new QH to the appropriate schedule */ -+ if (dwc_qh_is_non_per(qh)) { -+ /* Always start in the inactive schedule. */ -+ list_add_tail(&qh->qh_list_entry, &hcd->non_periodic_sched_inactive); -+ } else { -+ status = schedule_periodic(hcd, qh); -+ } -+ -+ done: -+ SPIN_UNLOCK_IRQRESTORE(&hcd->lock, flags) -+ -+ return status; -+} -+ -+/** -+ * Removes an interrupt or isochronous transfer from the periodic schedule. -+ * -+ * @param hcd The HCD state structure for the DWC OTG controller. -+ * @param qh QH for the periodic transfer. -+ */ -+static void deschedule_periodic(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh) -+{ -+ list_del_init(&qh->qh_list_entry); -+ -+ /* Release the periodic channel reservation. */ -+ hcd->periodic_channels--; -+ -+ /* Update claimed usecs per (micro)frame. */ -+ hcd->periodic_usecs -= qh->usecs; -+ -+ /* Update average periodic bandwidth claimed and # periodic reqs for usbfs. */ -+ hcd_to_bus(dwc_otg_hcd_to_hcd(hcd))->bandwidth_allocated -= qh->usecs / qh->interval; -+ -+ if (qh->ep_type == USB_ENDPOINT_XFER_INT) { -+ hcd_to_bus(dwc_otg_hcd_to_hcd(hcd))->bandwidth_int_reqs--; -+ DWC_DEBUGPL(DBG_HCD, "Descheduled intr: qh %p, usecs %d, period %d\n", -+ qh, qh->usecs, qh->interval); -+ } else { -+ hcd_to_bus(dwc_otg_hcd_to_hcd(hcd))->bandwidth_isoc_reqs--; -+ DWC_DEBUGPL(DBG_HCD, "Descheduled isoc: qh %p, usecs %d, period %d\n", -+ qh, qh->usecs, qh->interval); -+ } -+} -+ -+/** -+ * Removes a QH from either the non-periodic or periodic schedule. Memory is -+ * not freed. -+ * -+ * @param[in] hcd The HCD state structure. -+ * @param[in] qh QH to remove from schedule. */ -+void dwc_otg_hcd_qh_remove (dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh) -+{ -+ unsigned long flags; -+ -+ SPIN_LOCK_IRQSAVE(&hcd->lock, flags); -+ -+ if (list_empty(&qh->qh_list_entry)) { -+ /* QH is not in a schedule. */ -+ goto done; -+ } -+ -+ if (dwc_qh_is_non_per(qh)) { -+ if (hcd->non_periodic_qh_ptr == &qh->qh_list_entry) { -+ hcd->non_periodic_qh_ptr = hcd->non_periodic_qh_ptr->next; -+ } -+ list_del_init(&qh->qh_list_entry); -+ } else { -+ deschedule_periodic(hcd, qh); -+ } -+ -+ done: -+ SPIN_UNLOCK_IRQRESTORE(&hcd->lock, flags) -+} -+ -+/** -+ * Deactivates a QH. For non-periodic QHs, removes the QH from the active -+ * non-periodic schedule. The QH is added to the inactive non-periodic -+ * schedule if any QTDs are still attached to the QH. -+ * -+ * For periodic QHs, the QH is removed from the periodic queued schedule. If -+ * there are any QTDs still attached to the QH, the QH is added to either the -+ * periodic inactive schedule or the periodic ready schedule and its next -+ * scheduled frame is calculated. The QH is placed in the ready schedule if -+ * the scheduled frame has been reached already. Otherwise it's placed in the -+ * inactive schedule. If there are no QTDs attached to the QH, the QH is -+ * completely removed from the periodic schedule. -+ */ -+void dwc_otg_hcd_qh_deactivate(dwc_otg_hcd_t *hcd, dwc_otg_qh_t *qh, int sched_next_periodic_split) -+{ -+ unsigned long flags; -+ SPIN_LOCK_IRQSAVE(&hcd->lock, flags); -+ -+ if (dwc_qh_is_non_per(qh)) { -+ dwc_otg_hcd_qh_remove(hcd, qh); -+ if (!list_empty(&qh->qtd_list)) { -+ /* Add back to inactive non-periodic schedule. */ -+ dwc_otg_hcd_qh_add(hcd, qh); -+ } -+ } else { -+ uint16_t frame_number = dwc_otg_hcd_get_frame_number(dwc_otg_hcd_to_hcd(hcd)); -+ -+ if (qh->do_split) { -+ /* Schedule the next continuing periodic split transfer */ -+ if (sched_next_periodic_split) { -+ -+ qh->sched_frame = frame_number; -+ if (dwc_frame_num_le(frame_number, -+ dwc_frame_num_inc(qh->start_split_frame, 1))) { -+ /* -+ * Allow one frame to elapse after start -+ * split microframe before scheduling -+ * complete split, but DONT if we are -+ * doing the next start split in the -+ * same frame for an ISOC out. -+ */ -+ if ((qh->ep_type != USB_ENDPOINT_XFER_ISOC) || (qh->ep_is_in != 0)) { -+ qh->sched_frame = dwc_frame_num_inc(qh->sched_frame, 1); -+ } -+ } -+ } else { -+ qh->sched_frame = dwc_frame_num_inc(qh->start_split_frame, -+ qh->interval); -+ if (dwc_frame_num_le(qh->sched_frame, frame_number)) { -+ qh->sched_frame = frame_number; -+ } -+ qh->sched_frame |= 0x7; -+ qh->start_split_frame = qh->sched_frame; -+ } -+ } else { -+ qh->sched_frame = dwc_frame_num_inc(qh->sched_frame, qh->interval); -+ if (dwc_frame_num_le(qh->sched_frame, frame_number)) { -+ qh->sched_frame = frame_number; -+ } -+ } -+ -+ if (list_empty(&qh->qtd_list)) { -+ dwc_otg_hcd_qh_remove(hcd, qh); -+ } else { -+ /* -+ * Remove from periodic_sched_queued and move to -+ * appropriate queue. -+ */ -+ if (qh->sched_frame == frame_number) { -+ list_move(&qh->qh_list_entry, -+ &hcd->periodic_sched_ready); -+ } else { -+ list_move(&qh->qh_list_entry, -+ &hcd->periodic_sched_inactive); -+ } -+ } -+ } -+ -+ SPIN_UNLOCK_IRQRESTORE(&hcd->lock, flags); -+} -+ -+/** -+ * This function allocates and initializes a QTD. -+ * -+ * @param[in] urb The URB to create a QTD from. Each URB-QTD pair will end up -+ * pointing to each other so each pair should have a unique correlation. -+ * -+ * @return Returns pointer to the newly allocated QTD, or NULL on error. */ -+dwc_otg_qtd_t *dwc_otg_hcd_qtd_create (struct urb *urb) -+{ -+ dwc_otg_qtd_t *qtd; -+ -+ qtd = dwc_otg_hcd_qtd_alloc (); -+ if (qtd == NULL) { -+ return NULL; -+ } -+ -+ dwc_otg_hcd_qtd_init (qtd, urb); -+ return qtd; -+} -+ -+/** -+ * Initializes a QTD structure. -+ * -+ * @param[in] qtd The QTD to initialize. -+ * @param[in] urb The URB to use for initialization. */ -+void dwc_otg_hcd_qtd_init (dwc_otg_qtd_t *qtd, struct urb *urb) -+{ -+ memset (qtd, 0, sizeof (dwc_otg_qtd_t)); -+ qtd->urb = urb; -+ if (usb_pipecontrol(urb->pipe)) { -+ /* -+ * The only time the QTD data toggle is used is on the data -+ * phase of control transfers. This phase always starts with -+ * DATA1. -+ */ -+ qtd->data_toggle = DWC_OTG_HC_PID_DATA1; -+ qtd->control_phase = DWC_OTG_CONTROL_SETUP; -+ } -+ -+ /* start split */ -+ qtd->complete_split = 0; -+ qtd->isoc_split_pos = DWC_HCSPLIT_XACTPOS_ALL; -+ qtd->isoc_split_offset = 0; -+ -+ /* Store the qtd ptr in the urb to reference what QTD. */ -+ urb->hcpriv = qtd; -+ return; -+} -+ -+/** -+ * This function adds a QTD to the QTD-list of a QH. It will find the correct -+ * QH to place the QTD into. If it does not find a QH, then it will create a -+ * new QH. If the QH to which the QTD is added is not currently scheduled, it -+ * is placed into the proper schedule based on its EP type. -+ * -+ * @param[in] qtd The QTD to add -+ * @param[in] dwc_otg_hcd The DWC HCD structure -+ * -+ * @return 0 if successful, negative error code otherwise. -+ */ -+int dwc_otg_hcd_qtd_add (dwc_otg_qtd_t *qtd, -+ dwc_otg_hcd_t *dwc_otg_hcd) -+{ -+ struct usb_host_endpoint *ep; -+ dwc_otg_qh_t *qh; -+ unsigned long flags; -+ int retval = 0; -+ -+ struct urb *urb = qtd->urb; -+ -+ SPIN_LOCK_IRQSAVE(&dwc_otg_hcd->lock, flags); -+ -+ /* -+ * Get the QH which holds the QTD-list to insert to. Create QH if it -+ * doesn't exist. -+ */ -+ ep = dwc_urb_to_endpoint(urb); -+ qh = (dwc_otg_qh_t *)ep->hcpriv; -+ if (qh == NULL) { -+ qh = dwc_otg_hcd_qh_create (dwc_otg_hcd, urb); -+ if (qh == NULL) { -+ goto done; -+ } -+ ep->hcpriv = qh; -+ } -+ -+ retval = dwc_otg_hcd_qh_add(dwc_otg_hcd, qh); -+ if (retval == 0) { -+ list_add_tail(&qtd->qtd_list_entry, &qh->qtd_list); -+ } -+ -+ done: -+ SPIN_UNLOCK_IRQRESTORE(&dwc_otg_hcd->lock, flags); -+ -+ return retval; -+} -+ -+#endif /* DWC_DEVICE_ONLY */ ---- /dev/null -+++ b/drivers/usb/dwc_otg/dwc_otg_pcd.c -@@ -0,0 +1,2523 @@ -+/* ========================================================================== -+ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_pcd.c $ -+ * $Revision: 1.5 $ -+ * $Date: 2008-11-27 09:21:25 $ -+ * $Change: 1115682 $ -+ * -+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, -+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless -+ * otherwise expressly agreed to in writing between Synopsys and you. -+ * -+ * The Software IS NOT an item of Licensed Software or Licensed Product under -+ * any End User Software License Agreement or Agreement for Licensed Product -+ * with Synopsys or any supplement thereto. You are permitted to use and -+ * redistribute this Software in source and binary forms, with or without -+ * modification, provided that redistributions of source code must retain this -+ * notice. You may not view, use, disclose, copy or distribute this file or -+ * any information contained herein except pursuant to this license grant from -+ * Synopsys. If you do not agree with this notice, including the disclaimer -+ * below, then you are not authorized to use the Software. -+ * -+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, -+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY -+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH -+ * DAMAGE. -+ * ========================================================================== */ -+#ifndef DWC_HOST_ONLY -+ -+/** @file -+ * This file implements the Peripheral Controller Driver. -+ * -+ * The Peripheral Controller Driver (PCD) is responsible for -+ * translating requests from the Function Driver into the appropriate -+ * actions on the DWC_otg controller. It isolates the Function Driver -+ * from the specifics of the controller by providing an API to the -+ * Function Driver. -+ * -+ * The Peripheral Controller Driver for Linux will implement the -+ * Gadget API, so that the existing Gadget drivers can be used. -+ * (Gadget Driver is the Linux terminology for a Function Driver.) -+ * -+ * The Linux Gadget API is defined in the header file -+ * . The USB EP operations API is -+ * defined in the structure usb_ep_ops and the USB -+ * Controller API is defined in the structure -+ * usb_gadget_ops. -+ * -+ * An important function of the PCD is managing interrupts generated -+ * by the DWC_otg controller. The implementation of the DWC_otg device -+ * mode interrupt service routines is in dwc_otg_pcd_intr.c. -+ * -+ * @todo Add Device Mode test modes (Test J mode, Test K mode, etc). -+ * @todo Does it work when the request size is greater than DEPTSIZ -+ * transfer size -+ * -+ */ -+ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21) -+# include -+#else -+# include -+#endif -+ -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24) -+#include -+#else -+#include -+#endif -+ -+#include "dwc_otg_driver.h" -+#include "dwc_otg_pcd.h" -+ -+ -+/** -+ * Static PCD pointer for use in usb_gadget_register_driver and -+ * usb_gadget_unregister_driver. Initialized in dwc_otg_pcd_init. -+ */ -+static dwc_otg_pcd_t *s_pcd = 0; -+ -+ -+/* Display the contents of the buffer */ -+extern void dump_msg(const u8 *buf, unsigned int length); -+ -+ -+/** -+ * This function completes a request. It call's the request call back. -+ */ -+void dwc_otg_request_done(dwc_otg_pcd_ep_t *ep, dwc_otg_pcd_request_t *req, -+ int status) -+{ -+ unsigned stopped = ep->stopped; -+ -+ DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, ep); -+ list_del_init(&req->queue); -+ -+ if (req->req.status == -EINPROGRESS) { -+ req->req.status = status; -+ } else { -+ status = req->req.status; -+ } -+ -+ /* don't modify queue heads during completion callback */ -+ ep->stopped = 1; -+ SPIN_UNLOCK(&ep->pcd->lock); -+ req->req.complete(&ep->ep, &req->req); -+ SPIN_LOCK(&ep->pcd->lock); -+ -+ if (ep->pcd->request_pending > 0) { -+ --ep->pcd->request_pending; -+ } -+ -+ ep->stopped = stopped; -+} -+ -+/** -+ * This function terminates all the requsts in the EP request queue. -+ */ -+void dwc_otg_request_nuke(dwc_otg_pcd_ep_t *ep) -+{ -+ dwc_otg_pcd_request_t *req; -+ -+ ep->stopped = 1; -+ -+ /* called with irqs blocked?? */ -+ while (!list_empty(&ep->queue)) { -+ req = list_entry(ep->queue.next, dwc_otg_pcd_request_t, -+ queue); -+ dwc_otg_request_done(ep, req, -ESHUTDOWN); -+ } -+} -+ -+/* USB Endpoint Operations */ -+/* -+ * The following sections briefly describe the behavior of the Gadget -+ * API endpoint operations implemented in the DWC_otg driver -+ * software. Detailed descriptions of the generic behavior of each of -+ * these functions can be found in the Linux header file -+ * include/linux/usb_gadget.h. -+ * -+ * The Gadget API provides wrapper functions for each of the function -+ * pointers defined in usb_ep_ops. The Gadget Driver calls the wrapper -+ * function, which then calls the underlying PCD function. The -+ * following sections are named according to the wrapper -+ * functions. Within each section, the corresponding DWC_otg PCD -+ * function name is specified. -+ * -+ */ -+ -+/** -+ * This function assigns periodic Tx FIFO to an periodic EP -+ * in shared Tx FIFO mode -+ */ -+static uint32_t assign_perio_tx_fifo(dwc_otg_core_if_t *core_if) -+{ -+ uint32_t PerTxMsk = 1; -+ int i; -+ for(i = 0; i < core_if->hwcfg4.b.num_dev_perio_in_ep; ++i) -+ { -+ if((PerTxMsk & core_if->p_tx_msk) == 0) { -+ core_if->p_tx_msk |= PerTxMsk; -+ return i + 1; -+ } -+ PerTxMsk <<= 1; -+ } -+ return 0; -+} -+/** -+ * This function releases periodic Tx FIFO -+ * in shared Tx FIFO mode -+ */ -+static void release_perio_tx_fifo(dwc_otg_core_if_t *core_if, uint32_t fifo_num) -+{ -+ core_if->p_tx_msk = (core_if->p_tx_msk & (1 << (fifo_num - 1))) ^ core_if->p_tx_msk; -+} -+/** -+ * This function assigns periodic Tx FIFO to an periodic EP -+ * in shared Tx FIFO mode -+ */ -+static uint32_t assign_tx_fifo(dwc_otg_core_if_t *core_if) -+{ -+ uint32_t TxMsk = 1; -+ int i; -+ -+ for(i = 0; i < core_if->hwcfg4.b.num_in_eps; ++i) -+ { -+ if((TxMsk & core_if->tx_msk) == 0) { -+ core_if->tx_msk |= TxMsk; -+ return i + 1; -+ } -+ TxMsk <<= 1; -+ } -+ return 0; -+} -+/** -+ * This function releases periodic Tx FIFO -+ * in shared Tx FIFO mode -+ */ -+static void release_tx_fifo(dwc_otg_core_if_t *core_if, uint32_t fifo_num) -+{ -+ core_if->tx_msk = (core_if->tx_msk & (1 << (fifo_num - 1))) ^ core_if->tx_msk; -+} -+ -+/** -+ * This function is called by the Gadget Driver for each EP to be -+ * configured for the current configuration (SET_CONFIGURATION). -+ * -+ * This function initializes the dwc_otg_ep_t data structure, and then -+ * calls dwc_otg_ep_activate. -+ */ -+static int dwc_otg_pcd_ep_enable(struct usb_ep *usb_ep, -+ const struct usb_endpoint_descriptor *ep_desc) -+{ -+ dwc_otg_pcd_ep_t *ep = 0; -+ dwc_otg_pcd_t *pcd = 0; -+ unsigned long flags; -+ -+ DWC_DEBUGPL(DBG_PCDV,"%s(%p,%p)\n", __func__, usb_ep, ep_desc); -+ -+ ep = container_of(usb_ep, dwc_otg_pcd_ep_t, ep); -+ if (!usb_ep || !ep_desc || ep->desc || -+ ep_desc->bDescriptorType != USB_DT_ENDPOINT) { -+ DWC_WARN("%s, bad ep or descriptor\n", __func__); -+ return -EINVAL; -+ } -+ if (ep == &ep->pcd->ep0) { -+ DWC_WARN("%s, bad ep(0)\n", __func__); -+ return -EINVAL; -+ } -+ -+ /* Check FIFO size? */ -+ if (!ep_desc->wMaxPacketSize) { -+ DWC_WARN("%s, bad %s maxpacket\n", __func__, usb_ep->name); -+ return -ERANGE; -+ } -+ -+ pcd = ep->pcd; -+ if (!pcd->driver || pcd->gadget.speed == USB_SPEED_UNKNOWN) { -+ DWC_WARN("%s, bogus device state\n", __func__); -+ return -ESHUTDOWN; -+ } -+ -+ SPIN_LOCK_IRQSAVE(&pcd->lock, flags); -+ -+ ep->desc = ep_desc; -+ ep->ep.maxpacket = le16_to_cpu (ep_desc->wMaxPacketSize); -+ -+ /* -+ * Activate the EP -+ */ -+ ep->stopped = 0; -+ -+ ep->dwc_ep.is_in = (USB_DIR_IN & ep_desc->bEndpointAddress) != 0; -+ ep->dwc_ep.maxpacket = ep->ep.maxpacket; -+ -+ ep->dwc_ep.type = ep_desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; -+ -+ if(ep->dwc_ep.is_in) { -+ if(!pcd->otg_dev->core_if->en_multiple_tx_fifo) { -+ ep->dwc_ep.tx_fifo_num = 0; -+ -+ if (ep->dwc_ep.type == USB_ENDPOINT_XFER_ISOC) { -+ /* -+ * if ISOC EP then assign a Periodic Tx FIFO. -+ */ -+ ep->dwc_ep.tx_fifo_num = assign_perio_tx_fifo(pcd->otg_dev->core_if); -+ } -+ } else { -+ /* -+ * if Dedicated FIFOs mode is on then assign a Tx FIFO. -+ */ -+ ep->dwc_ep.tx_fifo_num = assign_tx_fifo(pcd->otg_dev->core_if); -+ -+ } -+ } -+ /* Set initial data PID. */ -+ if (ep->dwc_ep.type == USB_ENDPOINT_XFER_BULK) { -+ ep->dwc_ep.data_pid_start = 0; -+ } -+ -+ DWC_DEBUGPL(DBG_PCD, "Activate %s-%s: type=%d, mps=%d desc=%p\n", -+ ep->ep.name, (ep->dwc_ep.is_in ?"IN":"OUT"), -+ ep->dwc_ep.type, ep->dwc_ep.maxpacket, ep->desc); -+ -+ if(ep->dwc_ep.type != USB_ENDPOINT_XFER_ISOC) { -+ ep->dwc_ep.desc_addr = dwc_otg_ep_alloc_desc_chain(&ep->dwc_ep.dma_desc_addr, MAX_DMA_DESC_CNT); -+ } -+ -+ dwc_otg_ep_activate(GET_CORE_IF(pcd), &ep->dwc_ep); -+ SPIN_UNLOCK_IRQRESTORE(&pcd->lock, flags); -+ -+ return 0; -+} -+ -+/** -+ * This function is called when an EP is disabled due to disconnect or -+ * change in configuration. Any pending requests will terminate with a -+ * status of -ESHUTDOWN. -+ * -+ * This function modifies the dwc_otg_ep_t data structure for this EP, -+ * and then calls dwc_otg_ep_deactivate. -+ */ -+static int dwc_otg_pcd_ep_disable(struct usb_ep *usb_ep) -+{ -+ dwc_otg_pcd_ep_t *ep; -+ dwc_otg_pcd_t *pcd = 0; -+ unsigned long flags; -+ -+ DWC_DEBUGPL(DBG_PCDV,"%s(%p)\n", __func__, usb_ep); -+ ep = container_of(usb_ep, dwc_otg_pcd_ep_t, ep); -+ if (!usb_ep || !ep->desc) { -+ DWC_DEBUGPL(DBG_PCD, "%s, %s not enabled\n", __func__, -+ usb_ep ? ep->ep.name : NULL); -+ return -EINVAL; -+ } -+ -+ SPIN_LOCK_IRQSAVE(&ep->pcd->lock, flags); -+ -+ dwc_otg_request_nuke(ep); -+ -+ dwc_otg_ep_deactivate(GET_CORE_IF(ep->pcd), &ep->dwc_ep); -+ ep->desc = 0; -+ ep->stopped = 1; -+ -+ if(ep->dwc_ep.is_in) { -+ dwc_otg_flush_tx_fifo(GET_CORE_IF(ep->pcd), ep->dwc_ep.tx_fifo_num); -+ release_perio_tx_fifo(GET_CORE_IF(ep->pcd), ep->dwc_ep.tx_fifo_num); -+ release_tx_fifo(GET_CORE_IF(ep->pcd), ep->dwc_ep.tx_fifo_num); -+ } -+ -+ /* Free DMA Descriptors */ -+ pcd = ep->pcd; -+ -+ SPIN_UNLOCK_IRQRESTORE(&ep->pcd->lock, flags); -+ -+ if(ep->dwc_ep.type != USB_ENDPOINT_XFER_ISOC && ep->dwc_ep.desc_addr) { -+ dwc_otg_ep_free_desc_chain(ep->dwc_ep.desc_addr, ep->dwc_ep.dma_desc_addr, MAX_DMA_DESC_CNT); -+ } -+ -+ DWC_DEBUGPL(DBG_PCD, "%s disabled\n", usb_ep->name); -+ return 0; -+} -+ -+ -+/** -+ * This function allocates a request object to use with the specified -+ * endpoint. -+ * -+ * @param ep The endpoint to be used with with the request -+ * @param gfp_flags the GFP_* flags to use. -+ */ -+static struct usb_request *dwc_otg_pcd_alloc_request(struct usb_ep *ep, -+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) -+ int gfp_flags -+#else -+ gfp_t gfp_flags -+#endif -+ ) -+{ -+ dwc_otg_pcd_request_t *req; -+ -+ DWC_DEBUGPL(DBG_PCDV,"%s(%p,%d)\n", __func__, ep, gfp_flags); -+ if (0 == ep) { -+ DWC_WARN("%s() %s\n", __func__, "Invalid EP!\n"); -+ return 0; -+ } -+ req = kmalloc(sizeof(dwc_otg_pcd_request_t), gfp_flags); -+ if (0 == req) { -+ DWC_WARN("%s() %s\n", __func__, -+ "request allocation failed!\n"); -+ return 0; -+ } -+ memset(req, 0, sizeof(dwc_otg_pcd_request_t)); -+ req->req.dma = DMA_ADDR_INVALID; -+ INIT_LIST_HEAD(&req->queue); -+ return &req->req; -+} -+ -+/** -+ * This function frees a request object. -+ * -+ * @param ep The endpoint associated with the request -+ * @param req The request being freed -+ */ -+static void dwc_otg_pcd_free_request(struct usb_ep *ep, -+ struct usb_request *req) -+{ -+ dwc_otg_pcd_request_t *request; -+ DWC_DEBUGPL(DBG_PCDV,"%s(%p,%p)\n", __func__, ep, req); -+ -+ if (0 == ep || 0 == req) { -+ DWC_WARN("%s() %s\n", __func__, -+ "Invalid ep or req argument!\n"); -+ return; -+ } -+ -+ request = container_of(req, dwc_otg_pcd_request_t, req); -+ kfree(request); -+} -+ -+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) -+/** -+ * This function allocates an I/O buffer to be used for a transfer -+ * to/from the specified endpoint. -+ * -+ * @param usb_ep The endpoint to be used with with the request -+ * @param bytes The desired number of bytes for the buffer -+ * @param dma Pointer to the buffer's DMA address; must be valid -+ * @param gfp_flags the GFP_* flags to use. -+ * @return address of a new buffer or null is buffer could not be allocated. -+ */ -+static void *dwc_otg_pcd_alloc_buffer(struct usb_ep *usb_ep, unsigned bytes, -+ dma_addr_t *dma, -+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) -+ int gfp_flags -+#else -+ gfp_t gfp_flags -+#endif -+ ) -+{ -+ void *buf; -+ dwc_otg_pcd_ep_t *ep; -+ dwc_otg_pcd_t *pcd = 0; -+ -+ ep = container_of(usb_ep, dwc_otg_pcd_ep_t, ep); -+ pcd = ep->pcd; -+ -+ DWC_DEBUGPL(DBG_PCDV,"%s(%p,%d,%p,%0x)\n", __func__, usb_ep, bytes, -+ dma, gfp_flags); -+ -+ /* Check dword alignment */ -+ if ((bytes & 0x3UL) != 0) { -+ DWC_WARN("%s() Buffer size is not a multiple of" -+ "DWORD size (%d)",__func__, bytes); -+ } -+ -+ if (GET_CORE_IF(pcd)->dma_enable) { -+ buf = dma_alloc_coherent (NULL, bytes, dma, gfp_flags); -+ } -+ else { -+ buf = kmalloc(bytes, gfp_flags); -+ } -+ -+ /* Check dword alignment */ -+ if (((int)buf & 0x3UL) != 0) { -+ DWC_WARN("%s() Buffer is not DWORD aligned (%p)", -+ __func__, buf); -+ } -+ -+ return buf; -+} -+ -+/** -+ * This function frees an I/O buffer that was allocated by alloc_buffer. -+ * -+ * @param usb_ep the endpoint associated with the buffer -+ * @param buf address of the buffer -+ * @param dma The buffer's DMA address -+ * @param bytes The number of bytes of the buffer -+ */ -+static void dwc_otg_pcd_free_buffer(struct usb_ep *usb_ep, void *buf, -+ dma_addr_t dma, unsigned bytes) -+{ -+ dwc_otg_pcd_ep_t *ep; -+ dwc_otg_pcd_t *pcd = 0; -+ -+ ep = container_of(usb_ep, dwc_otg_pcd_ep_t, ep); -+ pcd = ep->pcd; -+ -+ DWC_DEBUGPL(DBG_PCDV,"%s(%p,%p,%0x,%d)\n", __func__, ep, buf, dma, bytes); -+ -+ if (GET_CORE_IF(pcd)->dma_enable) { -+ dma_free_coherent (NULL, bytes, buf, dma); -+ } -+ else { -+ kfree(buf); -+ } -+} -+#endif -+ -+ -+/** -+ * This function is used to submit an I/O Request to an EP. -+ * -+ * - When the request completes the request's completion callback -+ * is called to return the request to the driver. -+ * - An EP, except control EPs, may have multiple requests -+ * pending. -+ * - Once submitted the request cannot be examined or modified. -+ * - Each request is turned into one or more packets. -+ * - A BULK EP can queue any amount of data; the transfer is -+ * packetized. -+ * - Zero length Packets are specified with the request 'zero' -+ * flag. -+ */ -+static int dwc_otg_pcd_ep_queue(struct usb_ep *usb_ep, -+ struct usb_request *usb_req, -+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) -+ int gfp_flags -+#else -+ gfp_t gfp_flags -+#endif -+ ) -+{ -+ int prevented = 0; -+ dwc_otg_pcd_request_t *req; -+ dwc_otg_pcd_ep_t *ep; -+ dwc_otg_pcd_t *pcd; -+ unsigned long flags = 0; -+ dwc_otg_core_if_t *_core_if; -+ -+ DWC_DEBUGPL(DBG_PCDV,"%s(%p,%p,%d)\n", -+ __func__, usb_ep, usb_req, gfp_flags); -+ -+ req = container_of(usb_req, dwc_otg_pcd_request_t, req); -+ if (!usb_req || !usb_req->complete || !usb_req->buf || -+ !list_empty(&req->queue)) { -+ DWC_WARN("%s, bad params\n", __func__); -+ return -EINVAL; -+ } -+ -+ ep = container_of(usb_ep, dwc_otg_pcd_ep_t, ep); -+ if (!usb_ep || (!ep->desc && ep->dwc_ep.num != 0)/* || ep->stopped != 0*/) { -+ DWC_WARN("%s, bad ep\n", __func__); -+ return -EINVAL; -+ } -+ -+ pcd = ep->pcd; -+ if (!pcd->driver || pcd->gadget.speed == USB_SPEED_UNKNOWN) { -+ DWC_DEBUGPL(DBG_PCDV, "gadget.speed=%d\n", pcd->gadget.speed); -+ DWC_WARN("%s, bogus device state\n", __func__); -+ return -ESHUTDOWN; -+ } -+ -+ -+ DWC_DEBUGPL(DBG_PCD, "%s queue req %p, len %d buf %p\n", -+ usb_ep->name, usb_req, usb_req->length, usb_req->buf); -+ -+ if (!GET_CORE_IF(pcd)->core_params->opt) { -+ if (ep->dwc_ep.num != 0) { -+ DWC_ERROR("%s queue req %p, len %d buf %p\n", -+ usb_ep->name, usb_req, usb_req->length, usb_req->buf); -+ } -+ } -+ -+ SPIN_LOCK_IRQSAVE(&ep->pcd->lock, flags); -+ -+ -+ /************************************************** -+ New add by kaiker ,for DMA mode bug -+ ************************************************/ -+ //by kaiker ,for RT3052 USB OTG device mode -+ -+ _core_if = GET_CORE_IF(pcd); -+ -+ if (_core_if->dma_enable) -+ { -+ usb_req->dma = virt_to_phys((void *)usb_req->buf); -+ -+ if(ep->dwc_ep.is_in) -+ { -+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)) || defined(CONFIG_MIPS) -+ if(usb_req->length) -+ dma_cache_wback_inv((unsigned long)usb_req->buf, usb_req->length + 2); -+#endif -+ } -+ } -+ -+ -+ -+#if defined(DEBUG) & defined(VERBOSE) -+ dump_msg(usb_req->buf, usb_req->length); -+#endif -+ -+ usb_req->status = -EINPROGRESS; -+ usb_req->actual = 0; -+ -+ /* -+ * For EP0 IN without premature status, zlp is required? -+ */ -+ if (ep->dwc_ep.num == 0 && ep->dwc_ep.is_in) { -+ DWC_DEBUGPL(DBG_PCDV, "%s-OUT ZLP\n", usb_ep->name); -+ //_req->zero = 1; -+ } -+ -+ /* Start the transfer */ -+ if (list_empty(&ep->queue) && !ep->stopped) { -+ /* EP0 Transfer? */ -+ if (ep->dwc_ep.num == 0) { -+ switch (pcd->ep0state) { -+ case EP0_IN_DATA_PHASE: -+ DWC_DEBUGPL(DBG_PCD, -+ "%s ep0: EP0_IN_DATA_PHASE\n", -+ __func__); -+ break; -+ -+ case EP0_OUT_DATA_PHASE: -+ DWC_DEBUGPL(DBG_PCD, -+ "%s ep0: EP0_OUT_DATA_PHASE\n", -+ __func__); -+ if (pcd->request_config) { -+ /* Complete STATUS PHASE */ -+ ep->dwc_ep.is_in = 1; -+ pcd->ep0state = EP0_IN_STATUS_PHASE; -+ } -+ break; -+ -+ case EP0_IN_STATUS_PHASE: -+ DWC_DEBUGPL(DBG_PCD, -+ "%s ep0: EP0_IN_STATUS_PHASE\n", -+ __func__); -+ break; -+ -+ default: -+ DWC_DEBUGPL(DBG_ANY, "ep0: odd state %d\n", -+ pcd->ep0state); -+ SPIN_UNLOCK_IRQRESTORE(&pcd->lock, flags); -+ return -EL2HLT; -+ } -+ ep->dwc_ep.dma_addr = usb_req->dma; -+ ep->dwc_ep.start_xfer_buff = usb_req->buf; -+ ep->dwc_ep.xfer_buff = usb_req->buf; -+ ep->dwc_ep.xfer_len = usb_req->length; -+ ep->dwc_ep.xfer_count = 0; -+ ep->dwc_ep.sent_zlp = 0; -+ ep->dwc_ep.total_len = ep->dwc_ep.xfer_len; -+ -+ if(usb_req->zero) { -+ if((ep->dwc_ep.xfer_len % ep->dwc_ep.maxpacket == 0) -+ && (ep->dwc_ep.xfer_len != 0)) { -+ ep->dwc_ep.sent_zlp = 1; -+ } -+ -+ } -+ -+ dwc_otg_ep0_start_transfer(GET_CORE_IF(pcd), &ep->dwc_ep); -+ } -+ else { -+ -+ uint32_t max_transfer = GET_CORE_IF(ep->pcd)->core_params->max_transfer_size; -+ -+ /* Setup and start the Transfer */ -+ ep->dwc_ep.dma_addr = usb_req->dma; -+ ep->dwc_ep.start_xfer_buff = usb_req->buf; -+ ep->dwc_ep.xfer_buff = usb_req->buf; -+ ep->dwc_ep.sent_zlp = 0; -+ ep->dwc_ep.total_len = usb_req->length; -+ ep->dwc_ep.xfer_len = 0; -+ ep->dwc_ep.xfer_count = 0; -+ -+ if(max_transfer > MAX_TRANSFER_SIZE) { -+ ep->dwc_ep.maxxfer = max_transfer - (max_transfer % ep->dwc_ep.maxpacket); -+ } else { -+ ep->dwc_ep.maxxfer = max_transfer; -+ } -+ -+ if(usb_req->zero) { -+ if((ep->dwc_ep.total_len % ep->dwc_ep.maxpacket == 0) -+ && (ep->dwc_ep.total_len != 0)) { -+ ep->dwc_ep.sent_zlp = 1; -+ } -+ -+ } -+ dwc_otg_ep_start_transfer(GET_CORE_IF(pcd), &ep->dwc_ep); -+ } -+ } -+ -+ if ((req != 0) || prevented) { -+ ++pcd->request_pending; -+ list_add_tail(&req->queue, &ep->queue); -+ if (ep->dwc_ep.is_in && ep->stopped && !(GET_CORE_IF(pcd)->dma_enable)) { -+ /** @todo NGS Create a function for this. */ -+ diepmsk_data_t diepmsk = { .d32 = 0}; -+ diepmsk.b.intktxfemp = 1; -+ if(&GET_CORE_IF(pcd)->multiproc_int_enable) { -+ dwc_modify_reg32(&GET_CORE_IF(pcd)->dev_if->dev_global_regs->diepeachintmsk[ep->dwc_ep.num], -+ 0, diepmsk.d32); -+ } else { -+ dwc_modify_reg32(&GET_CORE_IF(pcd)->dev_if->dev_global_regs->diepmsk, 0, diepmsk.d32); -+ } -+ } -+ } -+ -+ SPIN_UNLOCK_IRQRESTORE(&pcd->lock, flags); -+ return 0; -+} -+ -+/** -+ * This function cancels an I/O request from an EP. -+ */ -+static int dwc_otg_pcd_ep_dequeue(struct usb_ep *usb_ep, -+ struct usb_request *usb_req) -+{ -+ dwc_otg_pcd_request_t *req; -+ dwc_otg_pcd_ep_t *ep; -+ dwc_otg_pcd_t *pcd; -+ unsigned long flags; -+ -+ DWC_DEBUGPL(DBG_PCDV,"%s(%p,%p)\n", __func__, usb_ep, usb_req); -+ -+ ep = container_of(usb_ep, dwc_otg_pcd_ep_t, ep); -+ if (!usb_ep || !usb_req || (!ep->desc && ep->dwc_ep.num != 0)) { -+ DWC_WARN("%s, bad argument\n", __func__); -+ return -EINVAL; -+ } -+ pcd = ep->pcd; -+ if (!pcd->driver || pcd->gadget.speed == USB_SPEED_UNKNOWN) { -+ DWC_WARN("%s, bogus device state\n", __func__); -+ return -ESHUTDOWN; -+ } -+ -+ SPIN_LOCK_IRQSAVE(&pcd->lock, flags); -+ DWC_DEBUGPL(DBG_PCDV, "%s %s %s %p\n", __func__, usb_ep->name, -+ ep->dwc_ep.is_in ? "IN" : "OUT", -+ usb_req); -+ -+ /* make sure it's actually queued on this endpoint */ -+ list_for_each_entry(req, &ep->queue, queue) -+ { -+ if (&req->req == usb_req) { -+ break; -+ } -+ } -+ -+ if (&req->req != usb_req) { -+ SPIN_UNLOCK_IRQRESTORE(&pcd->lock, flags); -+ return -EINVAL; -+ } -+ -+ if (!list_empty(&req->queue)) { -+ dwc_otg_request_done(ep, req, -ECONNRESET); -+ } -+ else { -+ req = 0; -+ } -+ -+ SPIN_UNLOCK_IRQRESTORE(&pcd->lock, flags); -+ -+ return req ? 0 : -EOPNOTSUPP; -+} -+ -+/** -+ * usb_ep_set_halt stalls an endpoint. -+ * -+ * usb_ep_clear_halt clears an endpoint halt and resets its data -+ * toggle. -+ * -+ * Both of these functions are implemented with the same underlying -+ * function. The behavior depends on the value argument. -+ * -+ * @param[in] usb_ep the Endpoint to halt or clear halt. -+ * @param[in] value -+ * - 0 means clear_halt. -+ * - 1 means set_halt, -+ * - 2 means clear stall lock flag. -+ * - 3 means set stall lock flag. -+ */ -+static int dwc_otg_pcd_ep_set_halt(struct usb_ep *usb_ep, int value) -+{ -+ int retval = 0; -+ unsigned long flags; -+ dwc_otg_pcd_ep_t *ep = 0; -+ -+ -+ DWC_DEBUGPL(DBG_PCD,"HALT %s %d\n", usb_ep->name, value); -+ -+ ep = container_of(usb_ep, dwc_otg_pcd_ep_t, ep); -+ -+ if (!usb_ep || (!ep->desc && ep != &ep->pcd->ep0) || -+ ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) { -+ DWC_WARN("%s, bad ep\n", __func__); -+ return -EINVAL; -+ } -+ -+ SPIN_LOCK_IRQSAVE(&ep->pcd->lock, flags); -+ if (!list_empty(&ep->queue)) { -+ DWC_WARN("%s() %s XFer In process\n", __func__, usb_ep->name); -+ retval = -EAGAIN; -+ } -+ else if (value == 0) { -+ dwc_otg_ep_clear_stall(ep->pcd->otg_dev->core_if, -+ &ep->dwc_ep); -+ } -+ else if(value == 1) { -+ if (ep->dwc_ep.is_in == 1 && ep->pcd->otg_dev->core_if->dma_desc_enable) { -+ dtxfsts_data_t txstatus; -+ fifosize_data_t txfifosize; -+ -+ txfifosize.d32 = dwc_read_reg32(&ep->pcd->otg_dev->core_if->core_global_regs->dptxfsiz_dieptxf[ep->dwc_ep.tx_fifo_num]); -+ txstatus.d32 = dwc_read_reg32(&ep->pcd->otg_dev->core_if->dev_if->in_ep_regs[ep->dwc_ep.num]->dtxfsts); -+ -+ if(txstatus.b.txfspcavail < txfifosize.b.depth) { -+ DWC_WARN("%s() %s Data In Tx Fifo\n", __func__, usb_ep->name); -+ retval = -EAGAIN; -+ } -+ else { -+ if (ep->dwc_ep.num == 0) { -+ ep->pcd->ep0state = EP0_STALL; -+ } -+ -+ ep->stopped = 1; -+ dwc_otg_ep_set_stall(ep->pcd->otg_dev->core_if, -+ &ep->dwc_ep); -+ } -+ } -+ else { -+ if (ep->dwc_ep.num == 0) { -+ ep->pcd->ep0state = EP0_STALL; -+ } -+ -+ ep->stopped = 1; -+ dwc_otg_ep_set_stall(ep->pcd->otg_dev->core_if, -+ &ep->dwc_ep); -+ } -+ } -+ else if (value == 2) { -+ ep->dwc_ep.stall_clear_flag = 0; -+ } -+ else if (value == 3) { -+ ep->dwc_ep.stall_clear_flag = 1; -+ } -+ -+ SPIN_UNLOCK_IRQRESTORE(&ep->pcd->lock, flags); -+ return retval; -+} -+ -+/** -+ * This function allocates a DMA Descriptor chain for the Endpoint -+ * buffer to be used for a transfer to/from the specified endpoint. -+ */ -+dwc_otg_dma_desc_t* dwc_otg_ep_alloc_desc_chain(uint32_t * dma_desc_addr, uint32_t count) -+{ -+ -+ return dma_alloc_coherent(NULL, count * sizeof(dwc_otg_dma_desc_t), dma_desc_addr, GFP_KERNEL); -+} -+ -+/** -+ * This function frees a DMA Descriptor chain that was allocated by ep_alloc_desc. -+ */ -+void dwc_otg_ep_free_desc_chain(dwc_otg_dma_desc_t* desc_addr, uint32_t dma_desc_addr, uint32_t count) -+{ -+ dma_free_coherent(NULL, count * sizeof(dwc_otg_dma_desc_t), desc_addr, dma_desc_addr); -+} -+ -+#ifdef DWC_EN_ISOC -+ -+/** -+ * This function initializes a descriptor chain for Isochronous transfer -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ * @param dwc_ep The EP to start the transfer on. -+ * -+ */ -+void dwc_otg_iso_ep_start_ddma_transfer(dwc_otg_core_if_t *core_if, dwc_ep_t *dwc_ep) -+{ -+ -+ dsts_data_t dsts = { .d32 = 0}; -+ depctl_data_t depctl = { .d32 = 0 }; -+ volatile uint32_t *addr; -+ int i, j; -+ -+ if(dwc_ep->is_in) -+ dwc_ep->desc_cnt = dwc_ep->buf_proc_intrvl / dwc_ep->bInterval; -+ else -+ dwc_ep->desc_cnt = dwc_ep->buf_proc_intrvl * dwc_ep->pkt_per_frm / dwc_ep->bInterval; -+ -+ -+ /** Allocate descriptors for double buffering */ -+ dwc_ep->iso_desc_addr = dwc_otg_ep_alloc_desc_chain(&dwc_ep->iso_dma_desc_addr,dwc_ep->desc_cnt*2); -+ if(dwc_ep->desc_addr) { -+ DWC_WARN("%s, can't allocate DMA descriptor chain\n", __func__); -+ return; -+ } -+ -+ dsts.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dsts); -+ -+ /** ISO OUT EP */ -+ if(dwc_ep->is_in == 0) { -+ desc_sts_data_t sts = { .d32 =0 }; -+ dwc_otg_dma_desc_t* dma_desc = dwc_ep->iso_desc_addr; -+ dma_addr_t dma_ad; -+ uint32_t data_per_desc; -+ dwc_otg_dev_out_ep_regs_t *out_regs = -+ core_if->dev_if->out_ep_regs[dwc_ep->num]; -+ int offset; -+ -+ addr = &core_if->dev_if->out_ep_regs[dwc_ep->num]->doepctl; -+ dma_ad = (dma_addr_t)dwc_read_reg32(&(out_regs->doepdma)); -+ -+ /** Buffer 0 descriptors setup */ -+ dma_ad = dwc_ep->dma_addr0; -+ -+ sts.b_iso_out.bs = BS_HOST_READY; -+ sts.b_iso_out.rxsts = 0; -+ sts.b_iso_out.l = 0; -+ sts.b_iso_out.sp = 0; -+ sts.b_iso_out.ioc = 0; -+ sts.b_iso_out.pid = 0; -+ sts.b_iso_out.framenum = 0; -+ -+ offset = 0; -+ for(i = 0; i < dwc_ep->desc_cnt - dwc_ep->pkt_per_frm; i+= dwc_ep->pkt_per_frm) -+ { -+ -+ for(j = 0; j < dwc_ep->pkt_per_frm; ++j) -+ { -+ data_per_desc = ((j + 1) * dwc_ep->maxpacket > dwc_ep->data_per_frame) ? -+ dwc_ep->data_per_frame - j * dwc_ep->maxpacket : dwc_ep->maxpacket; -+ -+ data_per_desc += (data_per_desc % 4) ? (4 - data_per_desc % 4):0; -+ sts.b_iso_out.rxbytes = data_per_desc; -+ writel((uint32_t)dma_ad, &dma_desc->buf); -+ writel(sts.d32, &dma_desc->status); -+ -+ offset += data_per_desc; -+ dma_desc ++; -+ (uint32_t)dma_ad += data_per_desc; -+ } -+ } -+ -+ for(j = 0; j < dwc_ep->pkt_per_frm - 1; ++j) -+ { -+ data_per_desc = ((j + 1) * dwc_ep->maxpacket > dwc_ep->data_per_frame) ? -+ dwc_ep->data_per_frame - j * dwc_ep->maxpacket : dwc_ep->maxpacket; -+ data_per_desc += (data_per_desc % 4) ? (4 - data_per_desc % 4):0; -+ sts.b_iso_out.rxbytes = data_per_desc; -+ writel((uint32_t)dma_ad, &dma_desc->buf); -+ writel(sts.d32, &dma_desc->status); -+ -+ offset += data_per_desc; -+ dma_desc ++; -+ (uint32_t)dma_ad += data_per_desc; -+ } -+ -+ sts.b_iso_out.ioc = 1; -+ data_per_desc = ((j + 1) * dwc_ep->maxpacket > dwc_ep->data_per_frame) ? -+ dwc_ep->data_per_frame - j * dwc_ep->maxpacket : dwc_ep->maxpacket; -+ data_per_desc += (data_per_desc % 4) ? (4 - data_per_desc % 4):0; -+ sts.b_iso_out.rxbytes = data_per_desc; -+ -+ writel((uint32_t)dma_ad, &dma_desc->buf); -+ writel(sts.d32, &dma_desc->status); -+ dma_desc ++; -+ -+ /** Buffer 1 descriptors setup */ -+ sts.b_iso_out.ioc = 0; -+ dma_ad = dwc_ep->dma_addr1; -+ -+ offset = 0; -+ for(i = 0; i < dwc_ep->desc_cnt - dwc_ep->pkt_per_frm; i+= dwc_ep->pkt_per_frm) -+ { -+ for(j = 0; j < dwc_ep->pkt_per_frm; ++j) -+ { -+ data_per_desc = ((j + 1) * dwc_ep->maxpacket > dwc_ep->data_per_frame) ? -+ dwc_ep->data_per_frame - j * dwc_ep->maxpacket : dwc_ep->maxpacket; -+ data_per_desc += (data_per_desc % 4) ? (4 - data_per_desc % 4):0; -+ sts.b_iso_out.rxbytes = data_per_desc; -+ writel((uint32_t)dma_ad, &dma_desc->buf); -+ writel(sts.d32, &dma_desc->status); -+ -+ offset += data_per_desc; -+ dma_desc ++; -+ (uint32_t)dma_ad += data_per_desc; -+ } -+ } -+ for(j = 0; j < dwc_ep->pkt_per_frm - 1; ++j) -+ { -+ data_per_desc = ((j + 1) * dwc_ep->maxpacket > dwc_ep->data_per_frame) ? -+ dwc_ep->data_per_frame - j * dwc_ep->maxpacket : dwc_ep->maxpacket; -+ data_per_desc += (data_per_desc % 4) ? (4 - data_per_desc % 4):0; -+ sts.b_iso_out.rxbytes = data_per_desc; -+ writel((uint32_t)dma_ad, &dma_desc->buf); -+ writel(sts.d32, &dma_desc->status); -+ -+ offset += data_per_desc; -+ dma_desc ++; -+ (uint32_t)dma_ad += data_per_desc; -+ } -+ -+ sts.b_iso_out.ioc = 1; -+ sts.b_iso_out.l = 1; -+ data_per_desc = ((j + 1) * dwc_ep->maxpacket > dwc_ep->data_per_frame) ? -+ dwc_ep->data_per_frame - j * dwc_ep->maxpacket : dwc_ep->maxpacket; -+ data_per_desc += (data_per_desc % 4) ? (4 - data_per_desc % 4):0; -+ sts.b_iso_out.rxbytes = data_per_desc; -+ -+ writel((uint32_t)dma_ad, &dma_desc->buf); -+ writel(sts.d32, &dma_desc->status); -+ -+ dwc_ep->next_frame = 0; -+ -+ /** Write dma_ad into DOEPDMA register */ -+ dwc_write_reg32(&(out_regs->doepdma),(uint32_t)dwc_ep->iso_dma_desc_addr); -+ -+ } -+ /** ISO IN EP */ -+ else { -+ desc_sts_data_t sts = { .d32 =0 }; -+ dwc_otg_dma_desc_t* dma_desc = dwc_ep->iso_desc_addr; -+ dma_addr_t dma_ad; -+ dwc_otg_dev_in_ep_regs_t *in_regs = -+ core_if->dev_if->in_ep_regs[dwc_ep->num]; -+ unsigned int frmnumber; -+ fifosize_data_t txfifosize,rxfifosize; -+ -+ txfifosize.d32 = dwc_read_reg32(&core_if->dev_if->in_ep_regs[dwc_ep->num]->dtxfsts); -+ rxfifosize.d32 = dwc_read_reg32(&core_if->core_global_regs->grxfsiz); -+ -+ -+ addr = &core_if->dev_if->in_ep_regs[dwc_ep->num]->diepctl; -+ -+ dma_ad = dwc_ep->dma_addr0; -+ -+ dsts.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dsts); -+ -+ sts.b_iso_in.bs = BS_HOST_READY; -+ sts.b_iso_in.txsts = 0; -+ sts.b_iso_in.sp = (dwc_ep->data_per_frame % dwc_ep->maxpacket)? 1 : 0; -+ sts.b_iso_in.ioc = 0; -+ sts.b_iso_in.pid = dwc_ep->pkt_per_frm; -+ -+ -+ frmnumber = dwc_ep->next_frame; -+ -+ sts.b_iso_in.framenum = frmnumber; -+ sts.b_iso_in.txbytes = dwc_ep->data_per_frame; -+ sts.b_iso_in.l = 0; -+ -+ /** Buffer 0 descriptors setup */ -+ for(i = 0; i < dwc_ep->desc_cnt - 1; i++) -+ { -+ writel((uint32_t)dma_ad, &dma_desc->buf); -+ writel(sts.d32, &dma_desc->status); -+ dma_desc ++; -+ -+ (uint32_t)dma_ad += dwc_ep->data_per_frame; -+ sts.b_iso_in.framenum += dwc_ep->bInterval; -+ } -+ -+ sts.b_iso_in.ioc = 1; -+ writel((uint32_t)dma_ad, &dma_desc->buf); -+ writel(sts.d32, &dma_desc->status); -+ ++dma_desc; -+ -+ /** Buffer 1 descriptors setup */ -+ sts.b_iso_in.ioc = 0; -+ dma_ad = dwc_ep->dma_addr1; -+ -+ for(i = 0; i < dwc_ep->desc_cnt - dwc_ep->pkt_per_frm; i+= dwc_ep->pkt_per_frm) -+ { -+ writel((uint32_t)dma_ad, &dma_desc->buf); -+ writel(sts.d32, &dma_desc->status); -+ dma_desc ++; -+ -+ (uint32_t)dma_ad += dwc_ep->data_per_frame; -+ sts.b_iso_in.framenum += dwc_ep->bInterval; -+ -+ sts.b_iso_in.ioc = 0; -+ } -+ sts.b_iso_in.ioc = 1; -+ sts.b_iso_in.l = 1; -+ -+ writel((uint32_t)dma_ad, &dma_desc->buf); -+ writel(sts.d32, &dma_desc->status); -+ -+ dwc_ep->next_frame = sts.b_iso_in.framenum + dwc_ep->bInterval; -+ -+ /** Write dma_ad into diepdma register */ -+ dwc_write_reg32(&(in_regs->diepdma),(uint32_t)dwc_ep->iso_dma_desc_addr); -+ } -+ /** Enable endpoint, clear nak */ -+ depctl.d32 = 0; -+ depctl.b.epena = 1; -+ depctl.b.usbactep = 1; -+ depctl.b.cnak = 1; -+ -+ dwc_modify_reg32(addr, depctl.d32,depctl.d32); -+ depctl.d32 = dwc_read_reg32(addr); -+} -+ -+/** -+ * This function initializes a descriptor chain for Isochronous transfer -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ * @param ep The EP to start the transfer on. -+ * -+ */ -+ -+void dwc_otg_iso_ep_start_buf_transfer(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) -+{ -+ depctl_data_t depctl = { .d32 = 0 }; -+ volatile uint32_t *addr; -+ -+ -+ if(ep->is_in) { -+ addr = &core_if->dev_if->in_ep_regs[ep->num]->diepctl; -+ } else { -+ addr = &core_if->dev_if->out_ep_regs[ep->num]->doepctl; -+ } -+ -+ -+ if(core_if->dma_enable == 0 || core_if->dma_desc_enable!= 0) { -+ return; -+ } else { -+ deptsiz_data_t deptsiz = { .d32 = 0 }; -+ -+ ep->xfer_len = ep->data_per_frame * ep->buf_proc_intrvl / ep->bInterval; -+ ep->pkt_cnt = (ep->xfer_len - 1 + ep->maxpacket) / -+ ep->maxpacket; -+ ep->xfer_count = 0; -+ ep->xfer_buff = (ep->proc_buf_num) ? ep->xfer_buff1 : ep->xfer_buff0; -+ ep->dma_addr = (ep->proc_buf_num) ? ep->dma_addr1 : ep->dma_addr0; -+ -+ if(ep->is_in) { -+ /* Program the transfer size and packet count -+ * as follows: xfersize = N * maxpacket + -+ * short_packet pktcnt = N + (short_packet -+ * exist ? 1 : 0) -+ */ -+ deptsiz.b.mc = ep->pkt_per_frm; -+ deptsiz.b.xfersize = ep->xfer_len; -+ deptsiz.b.pktcnt = -+ (ep->xfer_len - 1 + ep->maxpacket) / -+ ep->maxpacket; -+ dwc_write_reg32(&core_if->dev_if->in_ep_regs[ep->num]->dieptsiz, deptsiz.d32); -+ -+ /* Write the DMA register */ -+ dwc_write_reg32 (&(core_if->dev_if->in_ep_regs[ep->num]->diepdma), (uint32_t)ep->dma_addr); -+ -+ } else { -+ deptsiz.b.pktcnt = -+ (ep->xfer_len + (ep->maxpacket - 1)) / -+ ep->maxpacket; -+ deptsiz.b.xfersize = deptsiz.b.pktcnt * ep->maxpacket; -+ -+ dwc_write_reg32(&core_if->dev_if->out_ep_regs[ep->num]->doeptsiz, deptsiz.d32); -+ -+ /* Write the DMA register */ -+ dwc_write_reg32 (&(core_if->dev_if->out_ep_regs[ep->num]->doepdma), (uint32_t)ep->dma_addr); -+ -+ } -+ /** Enable endpoint, clear nak */ -+ depctl.d32 = 0; -+ dwc_modify_reg32(addr, depctl.d32,depctl.d32); -+ -+ depctl.b.epena = 1; -+ depctl.b.cnak = 1; -+ -+ dwc_modify_reg32(addr, depctl.d32,depctl.d32); -+ } -+} -+ -+ -+/** -+ * This function does the setup for a data transfer for an EP and -+ * starts the transfer. For an IN transfer, the packets will be -+ * loaded into the appropriate Tx FIFO in the ISR. For OUT transfers, -+ * the packets are unloaded from the Rx FIFO in the ISR. the ISR. -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ * @param ep The EP to start the transfer on. -+ */ -+ -+void dwc_otg_iso_ep_start_transfer(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) -+{ -+ if(core_if->dma_enable) { -+ if(core_if->dma_desc_enable) { -+ if(ep->is_in) { -+ ep->desc_cnt = ep->pkt_cnt / ep->pkt_per_frm; -+ } else { -+ ep->desc_cnt = ep->pkt_cnt; -+ } -+ dwc_otg_iso_ep_start_ddma_transfer(core_if, ep); -+ } else { -+ if(core_if->pti_enh_enable) { -+ dwc_otg_iso_ep_start_buf_transfer(core_if, ep); -+ } else { -+ ep->cur_pkt_addr = (ep->proc_buf_num) ? ep->xfer_buff1 : ep->xfer_buff0; -+ ep->cur_pkt_dma_addr = (ep->proc_buf_num) ? ep->dma_addr1 : ep->dma_addr0; -+ dwc_otg_iso_ep_start_frm_transfer(core_if, ep); -+ } -+ } -+ } else { -+ ep->cur_pkt_addr = (ep->proc_buf_num) ? ep->xfer_buff1 : ep->xfer_buff0; -+ ep->cur_pkt_dma_addr = (ep->proc_buf_num) ? ep->dma_addr1 : ep->dma_addr0; -+ dwc_otg_iso_ep_start_frm_transfer(core_if, ep); -+ } -+} -+ -+/** -+ * This function does the setup for a data transfer for an EP and -+ * starts the transfer. For an IN transfer, the packets will be -+ * loaded into the appropriate Tx FIFO in the ISR. For OUT transfers, -+ * the packets are unloaded from the Rx FIFO in the ISR. the ISR. -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ * @param ep The EP to start the transfer on. -+ */ -+ -+void dwc_otg_iso_ep_stop_transfer(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) -+{ -+ depctl_data_t depctl = { .d32 = 0 }; -+ volatile uint32_t *addr; -+ -+ if(ep->is_in == 1) { -+ addr = &core_if->dev_if->in_ep_regs[ep->num]->diepctl; -+ } -+ else { -+ addr = &core_if->dev_if->out_ep_regs[ep->num]->doepctl; -+ } -+ -+ /* disable the ep */ -+ depctl.d32 = dwc_read_reg32(addr); -+ -+ depctl.b.epdis = 1; -+ depctl.b.snak = 1; -+ -+ dwc_write_reg32(addr, depctl.d32); -+ -+ if(core_if->dma_desc_enable && -+ ep->iso_desc_addr && ep->iso_dma_desc_addr) { -+ dwc_otg_ep_free_desc_chain(ep->iso_desc_addr,ep->iso_dma_desc_addr,ep->desc_cnt * 2); -+ } -+ -+ /* reset varibales */ -+ ep->dma_addr0 = 0; -+ ep->dma_addr1 = 0; -+ ep->xfer_buff0 = 0; -+ ep->xfer_buff1 = 0; -+ ep->data_per_frame = 0; -+ ep->data_pattern_frame = 0; -+ ep->sync_frame = 0; -+ ep->buf_proc_intrvl = 0; -+ ep->bInterval = 0; -+ ep->proc_buf_num = 0; -+ ep->pkt_per_frm = 0; -+ ep->pkt_per_frm = 0; -+ ep->desc_cnt = 0; -+ ep->iso_desc_addr = 0; -+ ep->iso_dma_desc_addr = 0; -+} -+ -+ -+/** -+ * This function is used to submit an ISOC Transfer Request to an EP. -+ * -+ * - Every time a sync period completes the request's completion callback -+ * is called to provide data to the gadget driver. -+ * - Once submitted the request cannot be modified. -+ * - Each request is turned into periodic data packets untill ISO -+ * Transfer is stopped.. -+ */ -+static int dwc_otg_pcd_iso_ep_start(struct usb_ep *usb_ep, struct usb_iso_request *req, -+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) -+ int gfp_flags -+#else -+ gfp_t gfp_flags -+#endif -+) -+{ -+ dwc_otg_pcd_ep_t *ep; -+ dwc_otg_pcd_t *pcd; -+ dwc_ep_t *dwc_ep; -+ unsigned long flags = 0; -+ int32_t frm_data; -+ dwc_otg_core_if_t *core_if; -+ dcfg_data_t dcfg; -+ dsts_data_t dsts; -+ -+ -+ if (!req || !req->process_buffer || !req->buf0 || !req->buf1) { -+ DWC_WARN("%s, bad params\n", __func__); -+ return -EINVAL; -+ } -+ -+ ep = container_of(usb_ep, dwc_otg_pcd_ep_t, ep); -+ -+ if (!usb_ep || !ep->desc || ep->dwc_ep.num == 0) { -+ DWC_WARN("%s, bad ep\n", __func__); -+ return -EINVAL; -+ } -+ -+ pcd = ep->pcd; -+ core_if = GET_CORE_IF(pcd); -+ -+ dcfg.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dcfg); -+ -+ if (!pcd->driver || pcd->gadget.speed == USB_SPEED_UNKNOWN) { -+ DWC_DEBUGPL(DBG_PCDV, "gadget.speed=%d\n", pcd->gadget.speed); -+ DWC_WARN("%s, bogus device state\n", __func__); -+ return -ESHUTDOWN; -+ } -+ -+ SPIN_LOCK_IRQSAVE(&ep->pcd->lock, flags); -+ -+ dwc_ep = &ep->dwc_ep; -+ -+ if(ep->iso_req) { -+ DWC_WARN("%s, iso request in progress\n", __func__); -+ } -+ req->status = -EINPROGRESS; -+ -+ dwc_ep->dma_addr0 = req->dma0; -+ dwc_ep->dma_addr1 = req->dma1; -+ -+ dwc_ep->xfer_buff0 = req->buf0; -+ dwc_ep->xfer_buff1 = req->buf1; -+ -+ ep->iso_req = req; -+ -+ dwc_ep->data_per_frame = req->data_per_frame; -+ -+ /** @todo - pattern data support is to be implemented in the future */ -+ dwc_ep->data_pattern_frame = req->data_pattern_frame; -+ dwc_ep->sync_frame = req->sync_frame; -+ -+ dwc_ep->buf_proc_intrvl = req->buf_proc_intrvl; -+ -+ dwc_ep->bInterval = 1 << (ep->desc->bInterval - 1); -+ -+ dwc_ep->proc_buf_num = 0; -+ -+ dwc_ep->pkt_per_frm = 0; -+ frm_data = ep->dwc_ep.data_per_frame; -+ while(frm_data > 0) { -+ dwc_ep->pkt_per_frm++; -+ frm_data -= ep->dwc_ep.maxpacket; -+ } -+ -+ dsts.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dsts); -+ -+ if(req->flags & USB_REQ_ISO_ASAP) { -+ dwc_ep->next_frame = dsts.b.soffn + 1; -+ if(dwc_ep->bInterval != 1){ -+ dwc_ep->next_frame = dwc_ep->next_frame + (dwc_ep->bInterval - 1 - dwc_ep->next_frame % dwc_ep->bInterval); -+ } -+ } else { -+ dwc_ep->next_frame = req->start_frame; -+ } -+ -+ -+ if(!core_if->pti_enh_enable) { -+ dwc_ep->pkt_cnt = dwc_ep->buf_proc_intrvl * dwc_ep->pkt_per_frm / dwc_ep->bInterval; -+ } else { -+ dwc_ep->pkt_cnt = -+ (dwc_ep->data_per_frame * (dwc_ep->buf_proc_intrvl / dwc_ep->bInterval) -+ - 1 + dwc_ep->maxpacket) / dwc_ep->maxpacket; -+ } -+ -+ if(core_if->dma_desc_enable) { -+ dwc_ep->desc_cnt = -+ dwc_ep->buf_proc_intrvl * dwc_ep->pkt_per_frm / dwc_ep->bInterval; -+ } -+ -+ dwc_ep->pkt_info = kmalloc(sizeof(iso_pkt_info_t) * dwc_ep->pkt_cnt, GFP_KERNEL); -+ if(!dwc_ep->pkt_info) { -+ return -ENOMEM; -+ } -+ if(core_if->pti_enh_enable) { -+ memset(dwc_ep->pkt_info, 0, sizeof(iso_pkt_info_t) * dwc_ep->pkt_cnt); -+ } -+ -+ dwc_ep->cur_pkt = 0; -+ -+ SPIN_UNLOCK_IRQRESTORE(&pcd->lock, flags); -+ -+ dwc_otg_iso_ep_start_transfer(core_if, dwc_ep); -+ -+ return 0; -+} -+ -+/** -+ * This function stops ISO EP Periodic Data Transfer. -+ */ -+static int dwc_otg_pcd_iso_ep_stop(struct usb_ep *usb_ep, struct usb_iso_request *req) -+{ -+ dwc_otg_pcd_ep_t *ep; -+ dwc_otg_pcd_t *pcd; -+ dwc_ep_t *dwc_ep; -+ unsigned long flags; -+ -+ ep = container_of(usb_ep, dwc_otg_pcd_ep_t, ep); -+ -+ if (!usb_ep || !ep->desc || ep->dwc_ep.num == 0) { -+ DWC_WARN("%s, bad ep\n", __func__); -+ return -EINVAL; -+ } -+ -+ pcd = ep->pcd; -+ -+ if (!pcd->driver || pcd->gadget.speed == USB_SPEED_UNKNOWN) { -+ DWC_DEBUGPL(DBG_PCDV, "gadget.speed=%d\n", pcd->gadget.speed); -+ DWC_WARN("%s, bogus device state\n", __func__); -+ return -ESHUTDOWN; -+ } -+ -+ dwc_ep = &ep->dwc_ep; -+ -+ dwc_otg_iso_ep_stop_transfer(GET_CORE_IF(pcd), dwc_ep); -+ -+ kfree(dwc_ep->pkt_info); -+ -+ SPIN_LOCK_IRQSAVE(&pcd->lock, flags); -+ -+ if(ep->iso_req != req) { -+ return -EINVAL; -+ } -+ -+ req->status = -ECONNRESET; -+ -+ SPIN_UNLOCK_IRQRESTORE(&pcd->lock, flags); -+ -+ -+ ep->iso_req = 0; -+ -+ return 0; -+} -+ -+/** -+ * This function is used for perodical data exchnage between PCD and gadget drivers. -+ * for Isochronous EPs -+ * -+ * - Every time a sync period completes this function is called to -+ * perform data exchange between PCD and gadget -+ */ -+void dwc_otg_iso_buffer_done(dwc_otg_pcd_ep_t *ep, dwc_otg_pcd_iso_request_t *req) -+{ -+ int i; -+ struct usb_gadget_iso_packet_descriptor *iso_packet; -+ dwc_ep_t *dwc_ep; -+ -+ dwc_ep = &ep->dwc_ep; -+ -+ if(ep->iso_req->status == -ECONNRESET) { -+ DWC_PRINT("Device has already disconnected\n"); -+ /*Device has been disconnected*/ -+ return; -+ } -+ -+ if(dwc_ep->proc_buf_num != 0) { -+ iso_packet = ep->iso_req->iso_packet_desc0; -+ } -+ -+ else { -+ iso_packet = ep->iso_req->iso_packet_desc1; -+ } -+ -+ /* Fill in ISOC packets descriptors & pass to gadget driver*/ -+ -+ for(i = 0; i < dwc_ep->pkt_cnt; ++i) { -+ iso_packet[i].status = dwc_ep->pkt_info[i].status; -+ iso_packet[i].offset = dwc_ep->pkt_info[i].offset; -+ iso_packet[i].actual_length = dwc_ep->pkt_info[i].length; -+ dwc_ep->pkt_info[i].status = 0; -+ dwc_ep->pkt_info[i].offset = 0; -+ dwc_ep->pkt_info[i].length = 0; -+ } -+ -+ /* Call callback function to process data buffer */ -+ ep->iso_req->status = 0;/* success */ -+ -+ SPIN_UNLOCK(&ep->pcd->lock); -+ ep->iso_req->process_buffer(&ep->ep, ep->iso_req); -+ SPIN_LOCK(&ep->pcd->lock); -+} -+ -+ -+static struct usb_iso_request *dwc_otg_pcd_alloc_iso_request(struct usb_ep *ep,int packets, -+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) -+ int gfp_flags -+#else -+ gfp_t gfp_flags -+#endif -+) -+{ -+ struct usb_iso_request *pReq = NULL; -+ uint32_t req_size; -+ -+ -+ req_size = sizeof(struct usb_iso_request); -+ req_size += (2 * packets * (sizeof(struct usb_gadget_iso_packet_descriptor))); -+ -+ -+ pReq = kmalloc(req_size, gfp_flags); -+ if (!pReq) { -+ DWC_WARN("%s, can't allocate Iso Request\n", __func__); -+ return 0; -+ } -+ pReq->iso_packet_desc0 = (void*) (pReq + 1); -+ -+ pReq->iso_packet_desc1 = pReq->iso_packet_desc0 + packets; -+ -+ return pReq; -+} -+ -+static void dwc_otg_pcd_free_iso_request(struct usb_ep *ep, struct usb_iso_request *req) -+{ -+ kfree(req); -+} -+ -+static struct usb_isoc_ep_ops dwc_otg_pcd_ep_ops = -+{ -+ .ep_ops = -+ { -+ .enable = dwc_otg_pcd_ep_enable, -+ .disable = dwc_otg_pcd_ep_disable, -+ -+ .alloc_request = dwc_otg_pcd_alloc_request, -+ .free_request = dwc_otg_pcd_free_request, -+ -+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) -+ .alloc_buffer = dwc_otg_pcd_alloc_buffer, -+ .free_buffer = dwc_otg_pcd_free_buffer, -+#endif -+ -+ .queue = dwc_otg_pcd_ep_queue, -+ .dequeue = dwc_otg_pcd_ep_dequeue, -+ -+ .set_halt = dwc_otg_pcd_ep_set_halt, -+ .fifo_status = 0, -+ .fifo_flush = 0, -+ }, -+ .iso_ep_start = dwc_otg_pcd_iso_ep_start, -+ .iso_ep_stop = dwc_otg_pcd_iso_ep_stop, -+ .alloc_iso_request = dwc_otg_pcd_alloc_iso_request, -+ .free_iso_request = dwc_otg_pcd_free_iso_request, -+}; -+ -+#else -+ -+ -+static struct usb_ep_ops dwc_otg_pcd_ep_ops = -+{ -+ .enable = dwc_otg_pcd_ep_enable, -+ .disable = dwc_otg_pcd_ep_disable, -+ -+ .alloc_request = dwc_otg_pcd_alloc_request, -+ .free_request = dwc_otg_pcd_free_request, -+ -+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) -+ .alloc_buffer = dwc_otg_pcd_alloc_buffer, -+ .free_buffer = dwc_otg_pcd_free_buffer, -+#endif -+ -+ .queue = dwc_otg_pcd_ep_queue, -+ .dequeue = dwc_otg_pcd_ep_dequeue, -+ -+ .set_halt = dwc_otg_pcd_ep_set_halt, -+ .fifo_status = 0, -+ .fifo_flush = 0, -+ -+ -+}; -+ -+#endif /* DWC_EN_ISOC */ -+/* Gadget Operations */ -+/** -+ * The following gadget operations will be implemented in the DWC_otg -+ * PCD. Functions in the API that are not described below are not -+ * implemented. -+ * -+ * The Gadget API provides wrapper functions for each of the function -+ * pointers defined in usb_gadget_ops. The Gadget Driver calls the -+ * wrapper function, which then calls the underlying PCD function. The -+ * following sections are named according to the wrapper functions -+ * (except for ioctl, which doesn't have a wrapper function). Within -+ * each section, the corresponding DWC_otg PCD function name is -+ * specified. -+ * -+ */ -+ -+/** -+ *Gets the USB Frame number of the last SOF. -+ */ -+static int dwc_otg_pcd_get_frame(struct usb_gadget *gadget) -+{ -+ dwc_otg_pcd_t *pcd; -+ -+ DWC_DEBUGPL(DBG_PCDV,"%s(%p)\n", __func__, gadget); -+ -+ if (gadget == 0) { -+ return -ENODEV; -+ } -+ else { -+ pcd = container_of(gadget, dwc_otg_pcd_t, gadget); -+ dwc_otg_get_frame_number(GET_CORE_IF(pcd)); -+ } -+ -+ return 0; -+} -+ -+void dwc_otg_pcd_initiate_srp(dwc_otg_pcd_t *pcd) -+{ -+ uint32_t *addr = (uint32_t *)&(GET_CORE_IF(pcd)->core_global_regs->gotgctl); -+ gotgctl_data_t mem; -+ gotgctl_data_t val; -+ -+ val.d32 = dwc_read_reg32(addr); -+ if (val.b.sesreq) { -+ DWC_ERROR("Session Request Already active!\n"); -+ return; -+ } -+ -+ DWC_NOTICE("Session Request Initated\n"); -+ mem.d32 = dwc_read_reg32(addr); -+ mem.b.sesreq = 1; -+ dwc_write_reg32(addr, mem.d32); -+ -+ /* Start the SRP timer */ -+ dwc_otg_pcd_start_srp_timer(pcd); -+ return; -+} -+ -+void dwc_otg_pcd_remote_wakeup(dwc_otg_pcd_t *pcd, int set) -+{ -+ dctl_data_t dctl = {.d32=0}; -+ volatile uint32_t *addr = &(GET_CORE_IF(pcd)->dev_if->dev_global_regs->dctl); -+ -+ if (dwc_otg_is_device_mode(GET_CORE_IF(pcd))) { -+ if (pcd->remote_wakeup_enable) { -+ if (set) { -+ dctl.b.rmtwkupsig = 1; -+ dwc_modify_reg32(addr, 0, dctl.d32); -+ DWC_DEBUGPL(DBG_PCD, "Set Remote Wakeup\n"); -+ mdelay(1); -+ dwc_modify_reg32(addr, dctl.d32, 0); -+ DWC_DEBUGPL(DBG_PCD, "Clear Remote Wakeup\n"); -+ } -+ else { -+ } -+ } -+ else { -+ DWC_DEBUGPL(DBG_PCD, "Remote Wakeup is disabled\n"); -+ } -+ } -+ return; -+} -+ -+/** -+ * Initiates Session Request Protocol (SRP) to wakeup the host if no -+ * session is in progress. If a session is already in progress, but -+ * the device is suspended, remote wakeup signaling is started. -+ * -+ */ -+static int dwc_otg_pcd_wakeup(struct usb_gadget *gadget) -+{ -+ unsigned long flags; -+ dwc_otg_pcd_t *pcd; -+ dsts_data_t dsts; -+ gotgctl_data_t gotgctl; -+ -+ DWC_DEBUGPL(DBG_PCDV,"%s(%p)\n", __func__, gadget); -+ -+ if (gadget == 0) { -+ return -ENODEV; -+ } -+ else { -+ pcd = container_of(gadget, dwc_otg_pcd_t, gadget); -+ } -+ SPIN_LOCK_IRQSAVE(&pcd->lock, flags); -+ -+ /* -+ * This function starts the Protocol if no session is in progress. If -+ * a session is already in progress, but the device is suspended, -+ * remote wakeup signaling is started. -+ */ -+ -+ /* Check if valid session */ -+ gotgctl.d32 = dwc_read_reg32(&(GET_CORE_IF(pcd)->core_global_regs->gotgctl)); -+ if (gotgctl.b.bsesvld) { -+ /* Check if suspend state */ -+ dsts.d32 = dwc_read_reg32(&(GET_CORE_IF(pcd)->dev_if->dev_global_regs->dsts)); -+ if (dsts.b.suspsts) { -+ dwc_otg_pcd_remote_wakeup(pcd, 1); -+ } -+ } -+ else { -+ dwc_otg_pcd_initiate_srp(pcd); -+ } -+ -+ SPIN_UNLOCK_IRQRESTORE(&pcd->lock, flags); -+ return 0; -+} -+ -+static const struct usb_gadget_ops dwc_otg_pcd_ops = -+{ -+ .get_frame = dwc_otg_pcd_get_frame, -+ .wakeup = dwc_otg_pcd_wakeup, -+ // current versions must always be self-powered -+}; -+ -+/** -+ * This function updates the otg values in the gadget structure. -+ */ -+void dwc_otg_pcd_update_otg(dwc_otg_pcd_t *pcd, const unsigned reset) -+{ -+ -+ if (!pcd->gadget.is_otg) -+ return; -+ -+ if (reset) { -+ pcd->b_hnp_enable = 0; -+ pcd->a_hnp_support = 0; -+ pcd->a_alt_hnp_support = 0; -+ } -+ -+ pcd->gadget.b_hnp_enable = pcd->b_hnp_enable; -+ pcd->gadget.a_hnp_support = pcd->a_hnp_support; -+ pcd->gadget.a_alt_hnp_support = pcd->a_alt_hnp_support; -+} -+ -+/** -+ * This function is the top level PCD interrupt handler. -+ */ -+static irqreturn_t dwc_otg_pcd_irq(int irq, void *dev -+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) -+ , struct pt_regs *r -+#endif -+ ) -+{ -+ dwc_otg_pcd_t *pcd = dev; -+ int32_t retval = IRQ_NONE; -+ -+ retval = dwc_otg_pcd_handle_intr(pcd); -+ return IRQ_RETVAL(retval); -+} -+ -+/** -+ * PCD Callback function for initializing the PCD when switching to -+ * device mode. -+ * -+ * @param p void pointer to the dwc_otg_pcd_t -+ */ -+static int32_t dwc_otg_pcd_start_cb(void *p) -+{ -+ dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *)p; -+ -+ /* -+ * Initialized the Core for Device mode. -+ */ -+ if (dwc_otg_is_device_mode(GET_CORE_IF(pcd))) { -+ dwc_otg_core_dev_init(GET_CORE_IF(pcd)); -+ } -+ return 1; -+} -+ -+/** -+ * PCD Callback function for stopping the PCD when switching to Host -+ * mode. -+ * -+ * @param p void pointer to the dwc_otg_pcd_t -+ */ -+static int32_t dwc_otg_pcd_stop_cb(void *p) -+{ -+ dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *)p; -+ extern void dwc_otg_pcd_stop(dwc_otg_pcd_t *_pcd); -+ -+ dwc_otg_pcd_stop(pcd); -+ return 1; -+} -+ -+ -+/** -+ * PCD Callback function for notifying the PCD when resuming from -+ * suspend. -+ * -+ * @param p void pointer to the dwc_otg_pcd_t -+ */ -+static int32_t dwc_otg_pcd_suspend_cb(void *p) -+{ -+ dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *)p; -+ -+ if (pcd->driver && pcd->driver->resume) { -+ SPIN_UNLOCK(&pcd->lock); -+ pcd->driver->suspend(&pcd->gadget); -+ SPIN_LOCK(&pcd->lock); -+ } -+ -+ return 1; -+} -+ -+ -+/** -+ * PCD Callback function for notifying the PCD when resuming from -+ * suspend. -+ * -+ * @param p void pointer to the dwc_otg_pcd_t -+ */ -+static int32_t dwc_otg_pcd_resume_cb(void *p) -+{ -+ dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *)p; -+ -+ if (pcd->driver && pcd->driver->resume) { -+ SPIN_UNLOCK(&pcd->lock); -+ pcd->driver->resume(&pcd->gadget); -+ SPIN_LOCK(&pcd->lock); -+ } -+ -+ /* Stop the SRP timeout timer. */ -+ if ((GET_CORE_IF(pcd)->core_params->phy_type != DWC_PHY_TYPE_PARAM_FS) || -+ (!GET_CORE_IF(pcd)->core_params->i2c_enable)) { -+ if (GET_CORE_IF(pcd)->srp_timer_started) { -+ GET_CORE_IF(pcd)->srp_timer_started = 0; -+ del_timer(&pcd->srp_timer); -+ } -+ } -+ return 1; -+} -+ -+ -+/** -+ * PCD Callback structure for handling mode switching. -+ */ -+static dwc_otg_cil_callbacks_t pcd_callbacks = -+{ -+ .start = dwc_otg_pcd_start_cb, -+ .stop = dwc_otg_pcd_stop_cb, -+ .suspend = dwc_otg_pcd_suspend_cb, -+ .resume_wakeup = dwc_otg_pcd_resume_cb, -+ .p = 0, /* Set at registration */ -+}; -+ -+/** -+ * This function is called when the SRP timer expires. The SRP should -+ * complete within 6 seconds. -+ */ -+static void srp_timeout(unsigned long ptr) -+{ -+ gotgctl_data_t gotgctl; -+ dwc_otg_core_if_t *core_if = (dwc_otg_core_if_t *)ptr; -+ volatile uint32_t *addr = &core_if->core_global_regs->gotgctl; -+ -+ gotgctl.d32 = dwc_read_reg32(addr); -+ -+ core_if->srp_timer_started = 0; -+ -+ if ((core_if->core_params->phy_type == DWC_PHY_TYPE_PARAM_FS) && -+ (core_if->core_params->i2c_enable)) { -+ DWC_PRINT("SRP Timeout\n"); -+ -+ if ((core_if->srp_success) && -+ (gotgctl.b.bsesvld)) { -+ if (core_if->pcd_cb && core_if->pcd_cb->resume_wakeup) { -+ core_if->pcd_cb->resume_wakeup(core_if->pcd_cb->p); -+ } -+ -+ /* Clear Session Request */ -+ gotgctl.d32 = 0; -+ gotgctl.b.sesreq = 1; -+ dwc_modify_reg32(&core_if->core_global_regs->gotgctl, -+ gotgctl.d32, 0); -+ -+ core_if->srp_success = 0; -+ } -+ else { -+ DWC_ERROR("Device not connected/responding\n"); -+ gotgctl.b.sesreq = 0; -+ dwc_write_reg32(addr, gotgctl.d32); -+ } -+ } -+ else if (gotgctl.b.sesreq) { -+ DWC_PRINT("SRP Timeout\n"); -+ -+ DWC_ERROR("Device not connected/responding\n"); -+ gotgctl.b.sesreq = 0; -+ dwc_write_reg32(addr, gotgctl.d32); -+ } -+ else { -+ DWC_PRINT(" SRP GOTGCTL=%0x\n", gotgctl.d32); -+ } -+} -+ -+/** -+ * Start the SRP timer to detect when the SRP does not complete within -+ * 6 seconds. -+ * -+ * @param pcd the pcd structure. -+ */ -+void dwc_otg_pcd_start_srp_timer(dwc_otg_pcd_t *pcd) -+{ -+ struct timer_list *srp_timer = &pcd->srp_timer; -+ GET_CORE_IF(pcd)->srp_timer_started = 1; -+ init_timer(srp_timer); -+ srp_timer->function = srp_timeout; -+ srp_timer->data = (unsigned long)GET_CORE_IF(pcd); -+ srp_timer->expires = jiffies + (HZ*6); -+ add_timer(srp_timer); -+} -+ -+/** -+ * Tasklet -+ * -+ */ -+extern void start_next_request(dwc_otg_pcd_ep_t *ep); -+ -+static void start_xfer_tasklet_func (unsigned long data) -+{ -+ dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t*)data; -+ dwc_otg_core_if_t *core_if = pcd->otg_dev->core_if; -+ -+ int i; -+ depctl_data_t diepctl; -+ -+ DWC_DEBUGPL(DBG_PCDV, "Start xfer tasklet\n"); -+ -+ diepctl.d32 = dwc_read_reg32(&core_if->dev_if->in_ep_regs[0]->diepctl); -+ -+ if (pcd->ep0.queue_sof) { -+ pcd->ep0.queue_sof = 0; -+ start_next_request (&pcd->ep0); -+ // break; -+ } -+ -+ for (i=0; idev_if->num_in_eps; i++) -+ { -+ depctl_data_t diepctl; -+ diepctl.d32 = dwc_read_reg32(&core_if->dev_if->in_ep_regs[i]->diepctl); -+ -+ if (pcd->in_ep[i].queue_sof) { -+ pcd->in_ep[i].queue_sof = 0; -+ start_next_request (&pcd->in_ep[i]); -+ // break; -+ } -+ } -+ -+ return; -+} -+ -+ -+ -+ -+ -+ -+ -+static struct tasklet_struct start_xfer_tasklet = { -+ .next = NULL, -+ .state = 0, -+ .count = ATOMIC_INIT(0), -+ .func = start_xfer_tasklet_func, -+ .data = 0, -+}; -+/** -+ * This function initialized the pcd Dp structures to there default -+ * state. -+ * -+ * @param pcd the pcd structure. -+ */ -+void dwc_otg_pcd_reinit(dwc_otg_pcd_t *pcd) -+{ -+ static const char * names[] = -+ { -+ -+ "ep0", -+ "ep1in", -+ "ep2in", -+ "ep3in", -+ "ep4in", -+ "ep5in", -+ "ep6in", -+ "ep7in", -+ "ep8in", -+ "ep9in", -+ "ep10in", -+ "ep11in", -+ "ep12in", -+ "ep13in", -+ "ep14in", -+ "ep15in", -+ "ep1out", -+ "ep2out", -+ "ep3out", -+ "ep4out", -+ "ep5out", -+ "ep6out", -+ "ep7out", -+ "ep8out", -+ "ep9out", -+ "ep10out", -+ "ep11out", -+ "ep12out", -+ "ep13out", -+ "ep14out", -+ "ep15out" -+ -+ }; -+ -+ int i; -+ int in_ep_cntr, out_ep_cntr; -+ uint32_t hwcfg1; -+ uint32_t num_in_eps = (GET_CORE_IF(pcd))->dev_if->num_in_eps; -+ uint32_t num_out_eps = (GET_CORE_IF(pcd))->dev_if->num_out_eps; -+ dwc_otg_pcd_ep_t *ep; -+ -+ DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, pcd); -+ -+ INIT_LIST_HEAD (&pcd->gadget.ep_list); -+ pcd->gadget.ep0 = &pcd->ep0.ep; -+ pcd->gadget.speed = USB_SPEED_UNKNOWN; -+ -+ INIT_LIST_HEAD (&pcd->gadget.ep0->ep_list); -+ -+ /** -+ * Initialize the EP0 structure. -+ */ -+ ep = &pcd->ep0; -+ -+ /* Init EP structure */ -+ ep->desc = 0; -+ ep->pcd = pcd; -+ ep->stopped = 1; -+ -+ /* Init DWC ep structure */ -+ ep->dwc_ep.num = 0; -+ ep->dwc_ep.active = 0; -+ ep->dwc_ep.tx_fifo_num = 0; -+ /* Control until ep is actvated */ -+ ep->dwc_ep.type = DWC_OTG_EP_TYPE_CONTROL; -+ ep->dwc_ep.maxpacket = MAX_PACKET_SIZE; -+ ep->dwc_ep.dma_addr = 0; -+ ep->dwc_ep.start_xfer_buff = 0; -+ ep->dwc_ep.xfer_buff = 0; -+ ep->dwc_ep.xfer_len = 0; -+ ep->dwc_ep.xfer_count = 0; -+ ep->dwc_ep.sent_zlp = 0; -+ ep->dwc_ep.total_len = 0; -+ ep->queue_sof = 0; -+ ep->dwc_ep.desc_addr = 0; -+ ep->dwc_ep.dma_desc_addr = 0; -+ -+ -+ /* Init the usb_ep structure. */ -+ ep->ep.name = names[0]; -+ ep->ep.ops = (struct usb_ep_ops*)&dwc_otg_pcd_ep_ops; -+ -+ /** -+ * @todo NGS: What should the max packet size be set to -+ * here? Before EP type is set? -+ */ -+ ep->ep.maxpacket = MAX_PACKET_SIZE; -+ -+ list_add_tail (&ep->ep.ep_list, &pcd->gadget.ep_list); -+ -+ INIT_LIST_HEAD (&ep->queue); -+ /** -+ * Initialize the EP structures. -+ */ -+ in_ep_cntr = 0; -+ hwcfg1 = (GET_CORE_IF(pcd))->hwcfg1.d32 >> 3; -+ -+ for (i = 1; in_ep_cntr < num_in_eps; i++) -+ { -+ if((hwcfg1 & 0x1) == 0) { -+ dwc_otg_pcd_ep_t *ep = &pcd->in_ep[in_ep_cntr]; -+ in_ep_cntr ++; -+ -+ /* Init EP structure */ -+ ep->desc = 0; -+ ep->pcd = pcd; -+ ep->stopped = 1; -+ -+ /* Init DWC ep structure */ -+ ep->dwc_ep.is_in = 1; -+ ep->dwc_ep.num = i; -+ ep->dwc_ep.active = 0; -+ ep->dwc_ep.tx_fifo_num = 0; -+ -+ /* Control until ep is actvated */ -+ ep->dwc_ep.type = DWC_OTG_EP_TYPE_CONTROL; -+ ep->dwc_ep.maxpacket = MAX_PACKET_SIZE; -+ ep->dwc_ep.dma_addr = 0; -+ ep->dwc_ep.start_xfer_buff = 0; -+ ep->dwc_ep.xfer_buff = 0; -+ ep->dwc_ep.xfer_len = 0; -+ ep->dwc_ep.xfer_count = 0; -+ ep->dwc_ep.sent_zlp = 0; -+ ep->dwc_ep.total_len = 0; -+ ep->queue_sof = 0; -+ ep->dwc_ep.desc_addr = 0; -+ ep->dwc_ep.dma_desc_addr = 0; -+ -+ /* Init the usb_ep structure. */ -+ ep->ep.name = names[i]; -+ ep->ep.ops = (struct usb_ep_ops*)&dwc_otg_pcd_ep_ops; -+ -+ /** -+ * @todo NGS: What should the max packet size be set to -+ * here? Before EP type is set? -+ */ -+ ep->ep.maxpacket = MAX_PACKET_SIZE; -+ -+ list_add_tail (&ep->ep.ep_list, &pcd->gadget.ep_list); -+ -+ INIT_LIST_HEAD (&ep->queue); -+ } -+ hwcfg1 >>= 2; -+ } -+ -+ out_ep_cntr = 0; -+ hwcfg1 = (GET_CORE_IF(pcd))->hwcfg1.d32 >> 2; -+ -+ for (i = 1; out_ep_cntr < num_out_eps; i++) -+ { -+ if((hwcfg1 & 0x1) == 0) { -+ dwc_otg_pcd_ep_t *ep = &pcd->out_ep[out_ep_cntr]; -+ out_ep_cntr++; -+ -+ /* Init EP structure */ -+ ep->desc = 0; -+ ep->pcd = pcd; -+ ep->stopped = 1; -+ -+ /* Init DWC ep structure */ -+ ep->dwc_ep.is_in = 0; -+ ep->dwc_ep.num = i; -+ ep->dwc_ep.active = 0; -+ ep->dwc_ep.tx_fifo_num = 0; -+ /* Control until ep is actvated */ -+ ep->dwc_ep.type = DWC_OTG_EP_TYPE_CONTROL; -+ ep->dwc_ep.maxpacket = MAX_PACKET_SIZE; -+ ep->dwc_ep.dma_addr = 0; -+ ep->dwc_ep.start_xfer_buff = 0; -+ ep->dwc_ep.xfer_buff = 0; -+ ep->dwc_ep.xfer_len = 0; -+ ep->dwc_ep.xfer_count = 0; -+ ep->dwc_ep.sent_zlp = 0; -+ ep->dwc_ep.total_len = 0; -+ ep->queue_sof = 0; -+ -+ /* Init the usb_ep structure. */ -+ ep->ep.name = names[15 + i]; -+ ep->ep.ops = (struct usb_ep_ops*)&dwc_otg_pcd_ep_ops; -+ /** -+ * @todo NGS: What should the max packet size be set to -+ * here? Before EP type is set? -+ */ -+ ep->ep.maxpacket = MAX_PACKET_SIZE; -+ -+ list_add_tail (&ep->ep.ep_list, &pcd->gadget.ep_list); -+ -+ INIT_LIST_HEAD (&ep->queue); -+ } -+ hwcfg1 >>= 2; -+ } -+ -+ /* remove ep0 from the list. There is a ep0 pointer.*/ -+ list_del_init (&pcd->ep0.ep.ep_list); -+ -+ pcd->ep0state = EP0_DISCONNECT; -+ pcd->ep0.ep.maxpacket = MAX_EP0_SIZE; -+ pcd->ep0.dwc_ep.maxpacket = MAX_EP0_SIZE; -+ pcd->ep0.dwc_ep.type = DWC_OTG_EP_TYPE_CONTROL; -+} -+ -+/** -+ * This function releases the Gadget device. -+ * required by device_unregister(). -+ * -+ * @todo Should this do something? Should it free the PCD? -+ */ -+static void dwc_otg_pcd_gadget_release(struct device *dev) -+{ -+ DWC_DEBUGPL(DBG_PCDV,"%s(%p)\n", __func__, dev); -+} -+ -+ -+ -+/** -+ * This function initialized the PCD portion of the driver. -+ * -+ */ -+ -+int dwc_otg_pcd_init(struct device *dev) -+{ -+ static char pcd_name[] = "dwc_otg_pcd"; -+ dwc_otg_pcd_t *pcd; -+ dwc_otg_core_if_t* core_if; -+ dwc_otg_dev_if_t* dev_if; -+ dwc_otg_device_t *otg_dev = dev_get_drvdata(dev); -+ int retval = 0; -+ -+ -+ DWC_DEBUGPL(DBG_PCDV,"%s(%p)\n",__func__, dev); -+ /* -+ * Allocate PCD structure -+ */ -+ pcd = kmalloc(sizeof(dwc_otg_pcd_t), GFP_KERNEL); -+ -+ if (pcd == 0) { -+ return -ENOMEM; -+ } -+ -+ memset(pcd, 0, sizeof(dwc_otg_pcd_t)); -+ spin_lock_init(&pcd->lock); -+ -+ otg_dev->pcd = pcd; -+ s_pcd = pcd; -+ pcd->gadget.name = pcd_name; -+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30) -+ strcpy(pcd->gadget.dev.bus_id, "gadget"); -+#else -+ dev_set_name(&pcd->gadget.dev, "%s", "gadget"); -+#endif -+ -+ pcd->otg_dev = dev_get_drvdata(dev); -+ -+ pcd->gadget.dev.parent = dev; -+ pcd->gadget.dev.release = dwc_otg_pcd_gadget_release; -+ pcd->gadget.ops = &dwc_otg_pcd_ops; -+ -+ core_if = GET_CORE_IF(pcd); -+ dev_if = core_if->dev_if; -+ -+ if(core_if->hwcfg4.b.ded_fifo_en) { -+ DWC_PRINT("Dedicated Tx FIFOs mode\n"); -+ } -+ else { -+ DWC_PRINT("Shared Tx FIFO mode\n"); -+ } -+ -+ /* If the module is set to FS or if the PHY_TYPE is FS then the gadget -+ * should not report as dual-speed capable. replace the following line -+ * with the block of code below it once the software is debugged for -+ * this. If is_dualspeed = 0 then the gadget driver should not report -+ * a device qualifier descriptor when queried. */ -+ if ((GET_CORE_IF(pcd)->core_params->speed == DWC_SPEED_PARAM_FULL) || -+ ((GET_CORE_IF(pcd)->hwcfg2.b.hs_phy_type == 2) && -+ (GET_CORE_IF(pcd)->hwcfg2.b.fs_phy_type == 1) && -+ (GET_CORE_IF(pcd)->core_params->ulpi_fs_ls))) { -+ pcd->gadget.is_dualspeed = 0; -+ } -+ else { -+ pcd->gadget.is_dualspeed = 1; -+ } -+ -+ if ((otg_dev->core_if->hwcfg2.b.op_mode == DWC_HWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE) || -+ (otg_dev->core_if->hwcfg2.b.op_mode == DWC_HWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST) || -+ (otg_dev->core_if->hwcfg2.b.op_mode == DWC_HWCFG2_OP_MODE_SRP_CAPABLE_DEVICE) || -+ (otg_dev->core_if->hwcfg2.b.op_mode == DWC_HWCFG2_OP_MODE_SRP_CAPABLE_HOST)) { -+ pcd->gadget.is_otg = 0; -+ } -+ else { -+ pcd->gadget.is_otg = 1; -+ } -+ -+ -+ pcd->driver = 0; -+ /* Register the gadget device */ -+ retval = device_register(&pcd->gadget.dev); -+ if (retval != 0) { -+ kfree (pcd); -+ return retval; -+ } -+ -+ -+ /* -+ * Initialized the Core for Device mode. -+ */ -+ if (dwc_otg_is_device_mode(core_if)) { -+ dwc_otg_core_dev_init(core_if); -+ } -+ -+ /* -+ * Initialize EP structures -+ */ -+ dwc_otg_pcd_reinit(pcd); -+ -+ /* -+ * Register the PCD Callbacks. -+ */ -+ dwc_otg_cil_register_pcd_callbacks(otg_dev->core_if, &pcd_callbacks, -+ pcd); -+ /* -+ * Setup interupt handler -+ */ -+ DWC_DEBUGPL(DBG_ANY, "registering handler for irq%d\n", otg_dev->irq); -+ retval = request_irq(otg_dev->irq, dwc_otg_pcd_irq, -+ IRQF_SHARED, pcd->gadget.name, pcd); -+ if (retval != 0) { -+ DWC_ERROR("request of irq%d failed\n", otg_dev->irq); -+ device_unregister(&pcd->gadget.dev); -+ kfree (pcd); -+ return -EBUSY; -+ } -+ -+ /* -+ * Initialize the DMA buffer for SETUP packets -+ */ -+ if (GET_CORE_IF(pcd)->dma_enable) { -+ pcd->setup_pkt = dma_alloc_coherent (NULL, sizeof (*pcd->setup_pkt) * 5, &pcd->setup_pkt_dma_handle, 0); -+ if (pcd->setup_pkt == 0) { -+ free_irq(otg_dev->irq, pcd); -+ device_unregister(&pcd->gadget.dev); -+ kfree (pcd); -+ return -ENOMEM; -+ } -+ -+ pcd->status_buf = dma_alloc_coherent (NULL, sizeof (uint16_t), &pcd->status_buf_dma_handle, 0); -+ if (pcd->status_buf == 0) { -+ dma_free_coherent(NULL, sizeof(*pcd->setup_pkt), pcd->setup_pkt, pcd->setup_pkt_dma_handle); -+ free_irq(otg_dev->irq, pcd); -+ device_unregister(&pcd->gadget.dev); -+ kfree (pcd); -+ return -ENOMEM; -+ } -+ -+ if (GET_CORE_IF(pcd)->dma_desc_enable) { -+ dev_if->setup_desc_addr[0] = dwc_otg_ep_alloc_desc_chain(&dev_if->dma_setup_desc_addr[0], 1); -+ dev_if->setup_desc_addr[1] = dwc_otg_ep_alloc_desc_chain(&dev_if->dma_setup_desc_addr[1], 1); -+ dev_if->in_desc_addr = dwc_otg_ep_alloc_desc_chain(&dev_if->dma_in_desc_addr, 1); -+ dev_if->out_desc_addr = dwc_otg_ep_alloc_desc_chain(&dev_if->dma_out_desc_addr, 1); -+ -+ if(dev_if->setup_desc_addr[0] == 0 -+ || dev_if->setup_desc_addr[1] == 0 -+ || dev_if->in_desc_addr == 0 -+ || dev_if->out_desc_addr == 0 ) { -+ -+ if(dev_if->out_desc_addr) -+ dwc_otg_ep_free_desc_chain(dev_if->out_desc_addr, dev_if->dma_out_desc_addr, 1); -+ if(dev_if->in_desc_addr) -+ dwc_otg_ep_free_desc_chain(dev_if->in_desc_addr, dev_if->dma_in_desc_addr, 1); -+ if(dev_if->setup_desc_addr[1]) -+ dwc_otg_ep_free_desc_chain(dev_if->setup_desc_addr[1], dev_if->dma_setup_desc_addr[1], 1); -+ if(dev_if->setup_desc_addr[0]) -+ dwc_otg_ep_free_desc_chain(dev_if->setup_desc_addr[0], dev_if->dma_setup_desc_addr[0], 1); -+ -+ -+ dma_free_coherent(NULL, sizeof(*pcd->status_buf), pcd->status_buf, pcd->setup_pkt_dma_handle); -+ dma_free_coherent(NULL, sizeof(*pcd->setup_pkt), pcd->setup_pkt, pcd->setup_pkt_dma_handle); -+ -+ free_irq(otg_dev->irq, pcd); -+ device_unregister(&pcd->gadget.dev); -+ kfree (pcd); -+ -+ return -ENOMEM; -+ } -+ } -+ } -+ else { -+ pcd->setup_pkt = kmalloc (sizeof (*pcd->setup_pkt) * 5, GFP_KERNEL); -+ if (pcd->setup_pkt == 0) { -+ free_irq(otg_dev->irq, pcd); -+ device_unregister(&pcd->gadget.dev); -+ kfree (pcd); -+ return -ENOMEM; -+ } -+ -+ pcd->status_buf = kmalloc (sizeof (uint16_t), GFP_KERNEL); -+ if (pcd->status_buf == 0) { -+ kfree(pcd->setup_pkt); -+ free_irq(otg_dev->irq, pcd); -+ device_unregister(&pcd->gadget.dev); -+ kfree (pcd); -+ return -ENOMEM; -+ } -+ } -+ -+ -+ /* Initialize tasklet */ -+ start_xfer_tasklet.data = (unsigned long)pcd; -+ pcd->start_xfer_tasklet = &start_xfer_tasklet; -+ -+ return 0; -+} -+ -+/** -+ * Cleanup the PCD. -+ */ -+void dwc_otg_pcd_remove(struct device *dev) -+{ -+ dwc_otg_device_t *otg_dev = dev_get_drvdata(dev); -+ dwc_otg_pcd_t *pcd = otg_dev->pcd; -+ dwc_otg_dev_if_t* dev_if = GET_CORE_IF(pcd)->dev_if; -+ -+ DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, dev); -+ -+ /* -+ * Free the IRQ -+ */ -+ free_irq(otg_dev->irq, pcd); -+ -+ /* start with the driver above us */ -+ if (pcd->driver) { -+ /* should have been done already by driver model core */ -+ DWC_WARN("driver '%s' is still registered\n", -+ pcd->driver->driver.name); -+ usb_gadget_unregister_driver(pcd->driver); -+ } -+ device_unregister(&pcd->gadget.dev); -+ -+ if (GET_CORE_IF(pcd)->dma_enable) { -+ dma_free_coherent (NULL, sizeof (*pcd->setup_pkt) * 5, pcd->setup_pkt, pcd->setup_pkt_dma_handle); -+ dma_free_coherent (NULL, sizeof (uint16_t), pcd->status_buf, pcd->status_buf_dma_handle); -+ if (GET_CORE_IF(pcd)->dma_desc_enable) { -+ dwc_otg_ep_free_desc_chain(dev_if->setup_desc_addr[0], dev_if->dma_setup_desc_addr[0], 1); -+ dwc_otg_ep_free_desc_chain(dev_if->setup_desc_addr[1], dev_if->dma_setup_desc_addr[1], 1); -+ dwc_otg_ep_free_desc_chain(dev_if->in_desc_addr, dev_if->dma_in_desc_addr, 1); -+ dwc_otg_ep_free_desc_chain(dev_if->out_desc_addr, dev_if->dma_out_desc_addr, 1); -+ } -+ } -+ else { -+ kfree (pcd->setup_pkt); -+ kfree (pcd->status_buf); -+ } -+ -+ kfree(pcd); -+ otg_dev->pcd = 0; -+} -+ -+/** -+ * This function registers a gadget driver with the PCD. -+ * -+ * When a driver is successfully registered, it will receive control -+ * requests including set_configuration(), which enables non-control -+ * requests. then usb traffic follows until a disconnect is reported. -+ * then a host may connect again, or the driver might get unbound. -+ * -+ * @param driver The driver being registered -+ */ -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37) -+int usb_gadget_probe_driver(struct usb_gadget_driver *driver, int (*bind)(struct usb_gadget *)) -+#else -+int usb_gadget_register_driver(struct usb_gadget_driver *driver) -+#endif -+{ -+ int retval; -+ int (*d_bind)(struct usb_gadget *); -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37) -+ d_bind = bind; -+#else -+ d_bind = driver->bind; -+#endif -+ -+ DWC_DEBUGPL(DBG_PCD, "registering gadget driver '%s'\n", driver->driver.name); -+ -+ if (!driver || driver->speed == USB_SPEED_UNKNOWN || -+ !d_bind || -+ !driver->unbind || -+ !driver->disconnect || -+ !driver->setup) { -+ DWC_DEBUGPL(DBG_PCDV,"EINVAL\n"); -+ return -EINVAL; -+ } -+ if (s_pcd == 0) { -+ DWC_DEBUGPL(DBG_PCDV,"ENODEV\n"); -+ return -ENODEV; -+ } -+ if (s_pcd->driver != 0) { -+ DWC_DEBUGPL(DBG_PCDV,"EBUSY (%p)\n", s_pcd->driver); -+ return -EBUSY; -+ } -+ -+ /* hook up the driver */ -+ s_pcd->driver = driver; -+ s_pcd->gadget.dev.driver = &driver->driver; -+ -+ DWC_DEBUGPL(DBG_PCD, "bind to driver %s\n", driver->driver.name); -+ retval = d_bind(&s_pcd->gadget); -+ if (retval) { -+ DWC_ERROR("bind to driver %s --> error %d\n", -+ driver->driver.name, retval); -+ s_pcd->driver = 0; -+ s_pcd->gadget.dev.driver = 0; -+ return retval; -+ } -+ DWC_DEBUGPL(DBG_ANY, "registered gadget driver '%s'\n", -+ driver->driver.name); -+ return 0; -+} -+ -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37) -+EXPORT_SYMBOL(usb_gadget_probe_driver); -+#else -+EXPORT_SYMBOL(usb_gadget_register_driver); -+#endif -+ -+/** -+ * This function unregisters a gadget driver -+ * -+ * @param driver The driver being unregistered -+ */ -+int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) -+{ -+ //DWC_DEBUGPL(DBG_PCDV,"%s(%p)\n", __func__, _driver); -+ -+ if (s_pcd == 0) { -+ DWC_DEBUGPL(DBG_ANY, "%s Return(%d): s_pcd==0\n", __func__, -+ -ENODEV); -+ return -ENODEV; -+ } -+ if (driver == 0 || driver != s_pcd->driver) { -+ DWC_DEBUGPL(DBG_ANY, "%s Return(%d): driver?\n", __func__, -+ -EINVAL); -+ return -EINVAL; -+ } -+ -+ driver->unbind(&s_pcd->gadget); -+ s_pcd->driver = 0; -+ -+ DWC_DEBUGPL(DBG_ANY, "unregistered driver '%s'\n", -+ driver->driver.name); -+ return 0; -+} -+EXPORT_SYMBOL(usb_gadget_unregister_driver); -+ -+#endif /* DWC_HOST_ONLY */ ---- /dev/null -+++ b/drivers/usb/dwc_otg/dwc_otg_pcd.h -@@ -0,0 +1,248 @@ -+/* ========================================================================== -+ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_pcd.h $ -+ * $Revision: 1.2 $ -+ * $Date: 2008-11-21 05:39:15 $ -+ * $Change: 1103515 $ -+ * -+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, -+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless -+ * otherwise expressly agreed to in writing between Synopsys and you. -+ * -+ * The Software IS NOT an item of Licensed Software or Licensed Product under -+ * any End User Software License Agreement or Agreement for Licensed Product -+ * with Synopsys or any supplement thereto. You are permitted to use and -+ * redistribute this Software in source and binary forms, with or without -+ * modification, provided that redistributions of source code must retain this -+ * notice. You may not view, use, disclose, copy or distribute this file or -+ * any information contained herein except pursuant to this license grant from -+ * Synopsys. If you do not agree with this notice, including the disclaimer -+ * below, then you are not authorized to use the Software. -+ * -+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, -+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY -+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH -+ * DAMAGE. -+ * ========================================================================== */ -+#ifndef DWC_HOST_ONLY -+#if !defined(__DWC_PCD_H__) -+#define __DWC_PCD_H__ -+ -+#include -+#include -+#include -+#include -+ -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21) -+# include -+#else -+# include -+#endif -+ -+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24) -+#include -+#else -+#include -+#endif -+#include -+#include -+ -+struct dwc_otg_device; -+ -+#include "dwc_otg_cil.h" -+ -+/** -+ * @file -+ * -+ * This file contains the structures, constants, and interfaces for -+ * the Perpherial Contoller Driver (PCD). -+ * -+ * The Peripheral Controller Driver (PCD) for Linux will implement the -+ * Gadget API, so that the existing Gadget drivers can be used. For -+ * the Mass Storage Function driver the File-backed USB Storage Gadget -+ * (FBS) driver will be used. The FBS driver supports the -+ * Control-Bulk (CB), Control-Bulk-Interrupt (CBI), and Bulk-Only -+ * transports. -+ * -+ */ -+ -+/** Invalid DMA Address */ -+#define DMA_ADDR_INVALID (~(dma_addr_t)0) -+/** Maxpacket size for EP0 */ -+#define MAX_EP0_SIZE 64 -+/** Maxpacket size for any EP */ -+#define MAX_PACKET_SIZE 1024 -+ -+/** Max Transfer size for any EP */ -+#define MAX_TRANSFER_SIZE 65535 -+ -+/** Max DMA Descriptor count for any EP */ -+#define MAX_DMA_DESC_CNT 64 -+ -+/** -+ * Get the pointer to the core_if from the pcd pointer. -+ */ -+#define GET_CORE_IF( _pcd ) (_pcd->otg_dev->core_if) -+ -+/** -+ * States of EP0. -+ */ -+typedef enum ep0_state -+{ -+ EP0_DISCONNECT, /* no host */ -+ EP0_IDLE, -+ EP0_IN_DATA_PHASE, -+ EP0_OUT_DATA_PHASE, -+ EP0_IN_STATUS_PHASE, -+ EP0_OUT_STATUS_PHASE, -+ EP0_STALL, -+} ep0state_e; -+ -+/** Fordward declaration.*/ -+struct dwc_otg_pcd; -+ -+/** DWC_otg iso request structure. -+ * -+ */ -+typedef struct usb_iso_request dwc_otg_pcd_iso_request_t; -+ -+/** PCD EP structure. -+ * This structure describes an EP, there is an array of EPs in the PCD -+ * structure. -+ */ -+typedef struct dwc_otg_pcd_ep -+{ -+ /** USB EP data */ -+ struct usb_ep ep; -+ /** USB EP Descriptor */ -+ const struct usb_endpoint_descriptor *desc; -+ -+ /** queue of dwc_otg_pcd_requests. */ -+ struct list_head queue; -+ unsigned stopped : 1; -+ unsigned disabling : 1; -+ unsigned dma : 1; -+ unsigned queue_sof : 1; -+ -+#ifdef DWC_EN_ISOC -+ /** DWC_otg Isochronous Transfer */ -+ struct usb_iso_request* iso_req; -+#endif //DWC_EN_ISOC -+ -+ /** DWC_otg ep data. */ -+ dwc_ep_t dwc_ep; -+ -+ /** Pointer to PCD */ -+ struct dwc_otg_pcd *pcd; -+}dwc_otg_pcd_ep_t; -+ -+ -+ -+/** DWC_otg PCD Structure. -+ * This structure encapsulates the data for the dwc_otg PCD. -+ */ -+typedef struct dwc_otg_pcd -+{ -+ /** USB gadget */ -+ struct usb_gadget gadget; -+ /** USB gadget driver pointer*/ -+ struct usb_gadget_driver *driver; -+ /** The DWC otg device pointer. */ -+ struct dwc_otg_device *otg_dev; -+ -+ /** State of EP0 */ -+ ep0state_e ep0state; -+ /** EP0 Request is pending */ -+ unsigned ep0_pending : 1; -+ /** Indicates when SET CONFIGURATION Request is in process */ -+ unsigned request_config : 1; -+ /** The state of the Remote Wakeup Enable. */ -+ unsigned remote_wakeup_enable : 1; -+ /** The state of the B-Device HNP Enable. */ -+ unsigned b_hnp_enable : 1; -+ /** The state of A-Device HNP Support. */ -+ unsigned a_hnp_support : 1; -+ /** The state of the A-Device Alt HNP support. */ -+ unsigned a_alt_hnp_support : 1; -+ /** Count of pending Requests */ -+ unsigned request_pending; -+ -+ /** SETUP packet for EP0 -+ * This structure is allocated as a DMA buffer on PCD initialization -+ * with enough space for up to 3 setup packets. -+ */ -+ union -+ { -+ struct usb_ctrlrequest req; -+ uint32_t d32[2]; -+ } *setup_pkt; -+ -+ dma_addr_t setup_pkt_dma_handle; -+ -+ /** 2-byte dma buffer used to return status from GET_STATUS */ -+ uint16_t *status_buf; -+ dma_addr_t status_buf_dma_handle; -+ -+ /** EP0 */ -+ dwc_otg_pcd_ep_t ep0; -+ -+ /** Array of IN EPs. */ -+ dwc_otg_pcd_ep_t in_ep[ MAX_EPS_CHANNELS - 1]; -+ /** Array of OUT EPs. */ -+ dwc_otg_pcd_ep_t out_ep[ MAX_EPS_CHANNELS - 1]; -+ /** number of valid EPs in the above array. */ -+// unsigned num_eps : 4; -+ spinlock_t lock; -+ /** Timer for SRP. If it expires before SRP is successful -+ * clear the SRP. */ -+ struct timer_list srp_timer; -+ -+ /** Tasklet to defer starting of TEST mode transmissions until -+ * Status Phase has been completed. -+ */ -+ struct tasklet_struct test_mode_tasklet; -+ -+ /** Tasklet to delay starting of xfer in DMA mode */ -+ struct tasklet_struct *start_xfer_tasklet; -+ -+ /** The test mode to enter when the tasklet is executed. */ -+ unsigned test_mode; -+ -+} dwc_otg_pcd_t; -+ -+ -+/** DWC_otg request structure. -+ * This structure is a list of requests. -+ */ -+typedef struct -+{ -+ struct usb_request req; /**< USB Request. */ -+ struct list_head queue; /**< queue of these requests. */ -+} dwc_otg_pcd_request_t; -+ -+ -+extern int dwc_otg_pcd_init(struct device *dev); -+ -+//extern void dwc_otg_pcd_remove( struct dwc_otg_device *_otg_dev ); -+extern void dwc_otg_pcd_remove( struct device *dev); -+extern int32_t dwc_otg_pcd_handle_intr( dwc_otg_pcd_t *pcd ); -+extern void dwc_otg_pcd_start_srp_timer(dwc_otg_pcd_t *pcd ); -+ -+extern void dwc_otg_pcd_initiate_srp(dwc_otg_pcd_t *pcd); -+extern void dwc_otg_pcd_remote_wakeup(dwc_otg_pcd_t *pcd, int set); -+ -+extern void dwc_otg_iso_buffer_done(dwc_otg_pcd_ep_t *ep, dwc_otg_pcd_iso_request_t *req); -+extern void dwc_otg_request_done(dwc_otg_pcd_ep_t *_ep, dwc_otg_pcd_request_t *req, -+ int status); -+extern void dwc_otg_request_nuke(dwc_otg_pcd_ep_t *_ep); -+extern void dwc_otg_pcd_update_otg(dwc_otg_pcd_t *_pcd, -+ const unsigned reset); -+ -+#endif -+#endif /* DWC_HOST_ONLY */ ---- /dev/null -+++ b/drivers/usb/dwc_otg/dwc_otg_pcd_intr.c -@@ -0,0 +1,3654 @@ -+/* ========================================================================== -+ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_pcd_intr.c $ -+ * $Revision: 1.2 $ -+ * $Date: 2008-11-21 05:39:15 $ -+ * $Change: 1115682 $ -+ * -+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, -+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless -+ * otherwise expressly agreed to in writing between Synopsys and you. -+ * -+ * The Software IS NOT an item of Licensed Software or Licensed Product under -+ * any End User Software License Agreement or Agreement for Licensed Product -+ * with Synopsys or any supplement thereto. You are permitted to use and -+ * redistribute this Software in source and binary forms, with or without -+ * modification, provided that redistributions of source code must retain this -+ * notice. You may not view, use, disclose, copy or distribute this file or -+ * any information contained herein except pursuant to this license grant from -+ * Synopsys. If you do not agree with this notice, including the disclaimer -+ * below, then you are not authorized to use the Software. -+ * -+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, -+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY -+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH -+ * DAMAGE. -+ * ========================================================================== */ -+#ifndef DWC_HOST_ONLY -+#include -+#include -+#include -+ -+#include "dwc_otg_driver.h" -+#include "dwc_otg_pcd.h" -+ -+ -+#define DEBUG_EP0 -+ -+/* request functions defined in "dwc_otg_pcd.c" */ -+ -+/** @file -+ * This file contains the implementation of the PCD Interrupt handlers. -+ * -+ * The PCD handles the device interrupts. Many conditions can cause a -+ * device interrupt. When an interrupt occurs, the device interrupt -+ * service routine determines the cause of the interrupt and -+ * dispatches handling to the appropriate function. These interrupt -+ * handling functions are described below. -+ * All interrupt registers are processed from LSB to MSB. -+ */ -+ -+ -+/** -+ * This function prints the ep0 state for debug purposes. -+ */ -+static inline void print_ep0_state(dwc_otg_pcd_t *pcd) -+{ -+#ifdef DEBUG -+ char str[40]; -+ -+ switch (pcd->ep0state) { -+ case EP0_DISCONNECT: -+ strcpy(str, "EP0_DISCONNECT"); -+ break; -+ case EP0_IDLE: -+ strcpy(str, "EP0_IDLE"); -+ break; -+ case EP0_IN_DATA_PHASE: -+ strcpy(str, "EP0_IN_DATA_PHASE"); -+ break; -+ case EP0_OUT_DATA_PHASE: -+ strcpy(str, "EP0_OUT_DATA_PHASE"); -+ break; -+ case EP0_IN_STATUS_PHASE: -+ strcpy(str,"EP0_IN_STATUS_PHASE"); -+ break; -+ case EP0_OUT_STATUS_PHASE: -+ strcpy(str,"EP0_OUT_STATUS_PHASE"); -+ break; -+ case EP0_STALL: -+ strcpy(str,"EP0_STALL"); -+ break; -+ default: -+ strcpy(str,"EP0_INVALID"); -+ } -+ -+ DWC_DEBUGPL(DBG_ANY, "%s(%d)\n", str, pcd->ep0state); -+#endif -+} -+ -+/** -+ * This function returns pointer to in ep struct with number ep_num -+ */ -+static inline dwc_otg_pcd_ep_t* get_in_ep(dwc_otg_pcd_t *pcd, uint32_t ep_num) -+{ -+ int i; -+ int num_in_eps = GET_CORE_IF(pcd)->dev_if->num_in_eps; -+ if(ep_num == 0) { -+ return &pcd->ep0; -+ } -+ else { -+ for(i = 0; i < num_in_eps; ++i) -+ { -+ if(pcd->in_ep[i].dwc_ep.num == ep_num) -+ return &pcd->in_ep[i]; -+ } -+ return 0; -+ } -+} -+/** -+ * This function returns pointer to out ep struct with number ep_num -+ */ -+static inline dwc_otg_pcd_ep_t* get_out_ep(dwc_otg_pcd_t *pcd, uint32_t ep_num) -+{ -+ int i; -+ int num_out_eps = GET_CORE_IF(pcd)->dev_if->num_out_eps; -+ if(ep_num == 0) { -+ return &pcd->ep0; -+ } -+ else { -+ for(i = 0; i < num_out_eps; ++i) -+ { -+ if(pcd->out_ep[i].dwc_ep.num == ep_num) -+ return &pcd->out_ep[i]; -+ } -+ return 0; -+ } -+} -+/** -+ * This functions gets a pointer to an EP from the wIndex address -+ * value of the control request. -+ */ -+static dwc_otg_pcd_ep_t *get_ep_by_addr (dwc_otg_pcd_t *pcd, u16 wIndex) -+{ -+ dwc_otg_pcd_ep_t *ep; -+ -+ if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0) -+ return &pcd->ep0; -+ list_for_each_entry(ep, &pcd->gadget.ep_list, ep.ep_list) -+ { -+ u8 bEndpointAddress; -+ -+ if (!ep->desc) -+ continue; -+ -+ bEndpointAddress = ep->desc->bEndpointAddress; -+ if((wIndex & (USB_DIR_IN | USB_ENDPOINT_NUMBER_MASK)) -+ == (bEndpointAddress & (USB_DIR_IN | USB_ENDPOINT_NUMBER_MASK))) -+ return ep; -+ } -+ return NULL; -+} -+ -+/** -+ * This function checks the EP request queue, if the queue is not -+ * empty the next request is started. -+ */ -+void start_next_request(dwc_otg_pcd_ep_t *ep) -+{ -+ dwc_otg_pcd_request_t *req = 0; -+ uint32_t max_transfer = GET_CORE_IF(ep->pcd)->core_params->max_transfer_size; -+ -+ if (!list_empty(&ep->queue)) { -+ req = list_entry(ep->queue.next, -+ dwc_otg_pcd_request_t, queue); -+ -+ /* Setup and start the Transfer */ -+ ep->dwc_ep.dma_addr = req->req.dma; -+ ep->dwc_ep.start_xfer_buff = req->req.buf; -+ ep->dwc_ep.xfer_buff = req->req.buf; -+ ep->dwc_ep.sent_zlp = 0; -+ ep->dwc_ep.total_len = req->req.length; -+ ep->dwc_ep.xfer_len = 0; -+ ep->dwc_ep.xfer_count = 0; -+ -+ if(max_transfer > MAX_TRANSFER_SIZE) { -+ ep->dwc_ep.maxxfer = max_transfer - (max_transfer % ep->dwc_ep.maxpacket); -+ } else { -+ ep->dwc_ep.maxxfer = max_transfer; -+ } -+ -+ if(req->req.zero) { -+ if((ep->dwc_ep.total_len % ep->dwc_ep.maxpacket == 0) -+ && (ep->dwc_ep.total_len != 0)) { -+ ep->dwc_ep.sent_zlp = 1; -+ } -+ -+ } -+ -+ dwc_otg_ep_start_transfer(GET_CORE_IF(ep->pcd), &ep->dwc_ep); -+ } -+} -+ -+/** -+ * This function handles the SOF Interrupts. At this time the SOF -+ * Interrupt is disabled. -+ */ -+int32_t dwc_otg_pcd_handle_sof_intr(dwc_otg_pcd_t *pcd) -+{ -+ dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd); -+ -+ gintsts_data_t gintsts; -+ -+ DWC_DEBUGPL(DBG_PCD, "SOF\n"); -+ -+ /* Clear interrupt */ -+ gintsts.d32 = 0; -+ gintsts.b.sofintr = 1; -+ dwc_write_reg32 (&core_if->core_global_regs->gintsts, gintsts.d32); -+ -+ return 1; -+} -+ -+ -+/** -+ * This function handles the Rx Status Queue Level Interrupt, which -+ * indicates that there is a least one packet in the Rx FIFO. The -+ * packets are moved from the FIFO to memory, where they will be -+ * processed when the Endpoint Interrupt Register indicates Transfer -+ * Complete or SETUP Phase Done. -+ * -+ * Repeat the following until the Rx Status Queue is empty: -+ * -# Read the Receive Status Pop Register (GRXSTSP) to get Packet -+ * info -+ * -# If Receive FIFO is empty then skip to step Clear the interrupt -+ * and exit -+ * -# If SETUP Packet call dwc_otg_read_setup_packet to copy the -+ * SETUP data to the buffer -+ * -# If OUT Data Packet call dwc_otg_read_packet to copy the data -+ * to the destination buffer -+ */ -+int32_t dwc_otg_pcd_handle_rx_status_q_level_intr(dwc_otg_pcd_t *pcd) -+{ -+ dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd); -+ dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs; -+ gintmsk_data_t gintmask = {.d32=0}; -+ device_grxsts_data_t status; -+ dwc_otg_pcd_ep_t *ep; -+ gintsts_data_t gintsts; -+#ifdef DEBUG -+ static char *dpid_str[] ={ "D0", "D2", "D1", "MDATA" }; -+#endif -+ -+ //DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, _pcd); -+ /* Disable the Rx Status Queue Level interrupt */ -+ gintmask.b.rxstsqlvl= 1; -+ dwc_modify_reg32(&global_regs->gintmsk, gintmask.d32, 0); -+ -+ /* Get the Status from the top of the FIFO */ -+ status.d32 = dwc_read_reg32(&global_regs->grxstsp); -+ -+ DWC_DEBUGPL(DBG_PCD, "EP:%d BCnt:%d DPID:%s " -+ "pktsts:%x Frame:%d(0x%0x)\n", -+ status.b.epnum, status.b.bcnt, -+ dpid_str[status.b.dpid], -+ status.b.pktsts, status.b.fn, status.b.fn); -+ /* Get pointer to EP structure */ -+ ep = get_out_ep(pcd, status.b.epnum); -+ -+ switch (status.b.pktsts) { -+ case DWC_DSTS_GOUT_NAK: -+ DWC_DEBUGPL(DBG_PCDV, "Global OUT NAK\n"); -+ break; -+ case DWC_STS_DATA_UPDT: -+ DWC_DEBUGPL(DBG_PCDV, "OUT Data Packet\n"); -+ if (status.b.bcnt && ep->dwc_ep.xfer_buff) { -+ /** @todo NGS Check for buffer overflow? */ -+ dwc_otg_read_packet(core_if, -+ ep->dwc_ep.xfer_buff, -+ status.b.bcnt); -+ ep->dwc_ep.xfer_count += status.b.bcnt; -+ ep->dwc_ep.xfer_buff += status.b.bcnt; -+ } -+ break; -+ case DWC_STS_XFER_COMP: -+ DWC_DEBUGPL(DBG_PCDV, "OUT Complete\n"); -+ break; -+ case DWC_DSTS_SETUP_COMP: -+#ifdef DEBUG_EP0 -+ DWC_DEBUGPL(DBG_PCDV, "Setup Complete\n"); -+#endif -+ break; -+case DWC_DSTS_SETUP_UPDT: -+ dwc_otg_read_setup_packet(core_if, pcd->setup_pkt->d32); -+#ifdef DEBUG_EP0 -+ DWC_DEBUGPL(DBG_PCD, -+ "SETUP PKT: %02x.%02x v%04x i%04x l%04x\n", -+ pcd->setup_pkt->req.bRequestType, -+ pcd->setup_pkt->req.bRequest, -+ pcd->setup_pkt->req.wValue, -+ pcd->setup_pkt->req.wIndex, -+ pcd->setup_pkt->req.wLength); -+#endif -+ ep->dwc_ep.xfer_count += status.b.bcnt; -+ break; -+ default: -+ DWC_DEBUGPL(DBG_PCDV, "Invalid Packet Status (0x%0x)\n", -+ status.b.pktsts); -+ break; -+ } -+ -+ /* Enable the Rx Status Queue Level interrupt */ -+ dwc_modify_reg32(&global_regs->gintmsk, 0, gintmask.d32); -+ /* Clear interrupt */ -+ gintsts.d32 = 0; -+ gintsts.b.rxstsqlvl = 1; -+ dwc_write_reg32 (&global_regs->gintsts, gintsts.d32); -+ -+ //DWC_DEBUGPL(DBG_PCDV, "EXIT: %s\n", __func__); -+ return 1; -+} -+/** -+ * This function examines the Device IN Token Learning Queue to -+ * determine the EP number of the last IN token received. This -+ * implementation is for the Mass Storage device where there are only -+ * 2 IN EPs (Control-IN and BULK-IN). -+ * -+ * The EP numbers for the first six IN Tokens are in DTKNQR1 and there -+ * are 8 EP Numbers in each of the other possible DTKNQ Registers. -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ * -+ */ -+static inline int get_ep_of_last_in_token(dwc_otg_core_if_t *core_if) -+{ -+ dwc_otg_device_global_regs_t *dev_global_regs = -+ core_if->dev_if->dev_global_regs; -+ const uint32_t TOKEN_Q_DEPTH = core_if->hwcfg2.b.dev_token_q_depth; -+ /* Number of Token Queue Registers */ -+ const int DTKNQ_REG_CNT = (TOKEN_Q_DEPTH + 7) / 8; -+ dtknq1_data_t dtknqr1; -+ uint32_t in_tkn_epnums[4]; -+ int ndx = 0; -+ int i = 0; -+ volatile uint32_t *addr = &dev_global_regs->dtknqr1; -+ int epnum = 0; -+ -+ //DWC_DEBUGPL(DBG_PCD,"dev_token_q_depth=%d\n",TOKEN_Q_DEPTH); -+ -+ -+ /* Read the DTKNQ Registers */ -+ for (i = 0; i < DTKNQ_REG_CNT; i++) -+ { -+ in_tkn_epnums[ i ] = dwc_read_reg32(addr); -+ DWC_DEBUGPL(DBG_PCDV, "DTKNQR%d=0x%08x\n", i+1, -+ in_tkn_epnums[i]); -+ if (addr == &dev_global_regs->dvbusdis) { -+ addr = &dev_global_regs->dtknqr3_dthrctl; -+ } -+ else { -+ ++addr; -+ } -+ -+ } -+ -+ /* Copy the DTKNQR1 data to the bit field. */ -+ dtknqr1.d32 = in_tkn_epnums[0]; -+ /* Get the EP numbers */ -+ in_tkn_epnums[0] = dtknqr1.b.epnums0_5; -+ ndx = dtknqr1.b.intknwptr - 1; -+ -+ //DWC_DEBUGPL(DBG_PCDV,"ndx=%d\n",ndx); -+ if (ndx == -1) { -+ /** @todo Find a simpler way to calculate the max -+ * queue position.*/ -+ int cnt = TOKEN_Q_DEPTH; -+ if (TOKEN_Q_DEPTH <= 6) { -+ cnt = TOKEN_Q_DEPTH - 1; -+ } -+ else if (TOKEN_Q_DEPTH <= 14) { -+ cnt = TOKEN_Q_DEPTH - 7; -+ } -+ else if (TOKEN_Q_DEPTH <= 22) { -+ cnt = TOKEN_Q_DEPTH - 15; -+ } -+ else { -+ cnt = TOKEN_Q_DEPTH - 23; -+ } -+ epnum = (in_tkn_epnums[ DTKNQ_REG_CNT - 1 ] >> (cnt * 4)) & 0xF; -+ } -+ else { -+ if (ndx <= 5) { -+ epnum = (in_tkn_epnums[0] >> (ndx * 4)) & 0xF; -+ } -+ else if (ndx <= 13) { -+ ndx -= 6; -+ epnum = (in_tkn_epnums[1] >> (ndx * 4)) & 0xF; -+ } -+ else if (ndx <= 21) { -+ ndx -= 14; -+ epnum = (in_tkn_epnums[2] >> (ndx * 4)) & 0xF; -+ } -+ else if (ndx <= 29) { -+ ndx -= 22; -+ epnum = (in_tkn_epnums[3] >> (ndx * 4)) & 0xF; -+ } -+ } -+ //DWC_DEBUGPL(DBG_PCD,"epnum=%d\n",epnum); -+ return epnum; -+} -+ -+/** -+ * This interrupt occurs when the non-periodic Tx FIFO is half-empty. -+ * The active request is checked for the next packet to be loaded into -+ * the non-periodic Tx FIFO. -+ */ -+int32_t dwc_otg_pcd_handle_np_tx_fifo_empty_intr(dwc_otg_pcd_t *pcd) -+{ -+ dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd); -+ dwc_otg_core_global_regs_t *global_regs = -+ core_if->core_global_regs; -+ dwc_otg_dev_in_ep_regs_t *ep_regs; -+ gnptxsts_data_t txstatus = {.d32 = 0}; -+ gintsts_data_t gintsts; -+ -+ int epnum = 0; -+ dwc_otg_pcd_ep_t *ep = 0; -+ uint32_t len = 0; -+ int dwords; -+ -+ /* Get the epnum from the IN Token Learning Queue. */ -+ epnum = get_ep_of_last_in_token(core_if); -+ ep = get_in_ep(pcd, epnum); -+ -+ DWC_DEBUGPL(DBG_PCD, "NP TxFifo Empty: %s(%d) \n", ep->ep.name, epnum); -+ ep_regs = core_if->dev_if->in_ep_regs[epnum]; -+ -+ len = ep->dwc_ep.xfer_len - ep->dwc_ep.xfer_count; -+ if (len > ep->dwc_ep.maxpacket) { -+ len = ep->dwc_ep.maxpacket; -+ } -+ dwords = (len + 3)/4; -+ -+ -+ /* While there is space in the queue and space in the FIFO and -+ * More data to tranfer, Write packets to the Tx FIFO */ -+ txstatus.d32 = dwc_read_reg32(&global_regs->gnptxsts); -+ DWC_DEBUGPL(DBG_PCDV, "b4 GNPTXSTS=0x%08x\n",txstatus.d32); -+ -+ while (txstatus.b.nptxqspcavail > 0 && -+ txstatus.b.nptxfspcavail > dwords && -+ ep->dwc_ep.xfer_count < ep->dwc_ep.xfer_len) { -+ /* Write the FIFO */ -+ dwc_otg_ep_write_packet(core_if, &ep->dwc_ep, 0); -+ len = ep->dwc_ep.xfer_len - ep->dwc_ep.xfer_count; -+ -+ if (len > ep->dwc_ep.maxpacket) { -+ len = ep->dwc_ep.maxpacket; -+ } -+ -+ dwords = (len + 3)/4; -+ txstatus.d32 = dwc_read_reg32(&global_regs->gnptxsts); -+ DWC_DEBUGPL(DBG_PCDV,"GNPTXSTS=0x%08x\n",txstatus.d32); -+ } -+ -+ DWC_DEBUGPL(DBG_PCDV, "GNPTXSTS=0x%08x\n", -+ dwc_read_reg32(&global_regs->gnptxsts)); -+ -+ /* Clear interrupt */ -+ gintsts.d32 = 0; -+ gintsts.b.nptxfempty = 1; -+ dwc_write_reg32 (&global_regs->gintsts, gintsts.d32); -+ -+ return 1; -+} -+ -+/** -+ * This function is called when dedicated Tx FIFO Empty interrupt occurs. -+ * The active request is checked for the next packet to be loaded into -+ * apropriate Tx FIFO. -+ */ -+static int32_t write_empty_tx_fifo(dwc_otg_pcd_t *pcd, uint32_t epnum) -+{ -+ dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd); -+ dwc_otg_dev_if_t* dev_if = core_if->dev_if; -+ dwc_otg_dev_in_ep_regs_t *ep_regs; -+ dtxfsts_data_t txstatus = {.d32 = 0}; -+ dwc_otg_pcd_ep_t *ep = 0; -+ uint32_t len = 0; -+ int dwords; -+ -+ ep = get_in_ep(pcd, epnum); -+ -+ DWC_DEBUGPL(DBG_PCD, "Dedicated TxFifo Empty: %s(%d) \n", ep->ep.name, epnum); -+ -+ ep_regs = core_if->dev_if->in_ep_regs[epnum]; -+ -+ len = ep->dwc_ep.xfer_len - ep->dwc_ep.xfer_count; -+ -+ if (len > ep->dwc_ep.maxpacket) { -+ len = ep->dwc_ep.maxpacket; -+ } -+ -+ dwords = (len + 3)/4; -+ -+ /* While there is space in the queue and space in the FIFO and -+ * More data to tranfer, Write packets to the Tx FIFO */ -+ txstatus.d32 = dwc_read_reg32(&dev_if->in_ep_regs[epnum]->dtxfsts); -+ DWC_DEBUGPL(DBG_PCDV, "b4 dtxfsts[%d]=0x%08x\n",epnum,txstatus.d32); -+ -+ while (txstatus.b.txfspcavail > dwords && -+ ep->dwc_ep.xfer_count < ep->dwc_ep.xfer_len && -+ ep->dwc_ep.xfer_len != 0) { -+ /* Write the FIFO */ -+ dwc_otg_ep_write_packet(core_if, &ep->dwc_ep, 0); -+ -+ len = ep->dwc_ep.xfer_len - ep->dwc_ep.xfer_count; -+ if (len > ep->dwc_ep.maxpacket) { -+ len = ep->dwc_ep.maxpacket; -+ } -+ -+ dwords = (len + 3)/4; -+ txstatus.d32 = dwc_read_reg32(&dev_if->in_ep_regs[epnum]->dtxfsts); -+ DWC_DEBUGPL(DBG_PCDV,"dtxfsts[%d]=0x%08x\n", epnum, txstatus.d32); -+ } -+ -+ DWC_DEBUGPL(DBG_PCDV, "b4 dtxfsts[%d]=0x%08x\n",epnum,dwc_read_reg32(&dev_if->in_ep_regs[epnum]->dtxfsts)); -+ -+ return 1; -+} -+ -+ -+/** -+ * This function is called when the Device is disconnected. It stops -+ * any active requests and informs the Gadget driver of the -+ * disconnect. -+ */ -+void dwc_otg_pcd_stop(dwc_otg_pcd_t *pcd) -+{ -+ int i, num_in_eps, num_out_eps; -+ dwc_otg_pcd_ep_t *ep; -+ -+ gintmsk_data_t intr_mask = {.d32 = 0}; -+ -+ num_in_eps = GET_CORE_IF(pcd)->dev_if->num_in_eps; -+ num_out_eps = GET_CORE_IF(pcd)->dev_if->num_out_eps; -+ -+ DWC_DEBUGPL(DBG_PCDV, "%s() \n", __func__); -+ /* don't disconnect drivers more than once */ -+ if (pcd->ep0state == EP0_DISCONNECT) { -+ DWC_DEBUGPL(DBG_ANY, "%s() Already Disconnected\n", __func__); -+ return; -+ } -+ pcd->ep0state = EP0_DISCONNECT; -+ -+ /* Reset the OTG state. */ -+ dwc_otg_pcd_update_otg(pcd, 1); -+ -+ /* Disable the NP Tx Fifo Empty Interrupt. */ -+ intr_mask.b.nptxfempty = 1; -+ dwc_modify_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk, -+ intr_mask.d32, 0); -+ -+ /* Flush the FIFOs */ -+ /**@todo NGS Flush Periodic FIFOs */ -+ dwc_otg_flush_tx_fifo(GET_CORE_IF(pcd), 0x10); -+ dwc_otg_flush_rx_fifo(GET_CORE_IF(pcd)); -+ -+ /* prevent new request submissions, kill any outstanding requests */ -+ ep = &pcd->ep0; -+ dwc_otg_request_nuke(ep); -+ /* prevent new request submissions, kill any outstanding requests */ -+ for (i = 0; i < num_in_eps; i++) -+ { -+ dwc_otg_pcd_ep_t *ep = &pcd->in_ep[i]; -+ dwc_otg_request_nuke(ep); -+ } -+ /* prevent new request submissions, kill any outstanding requests */ -+ for (i = 0; i < num_out_eps; i++) -+ { -+ dwc_otg_pcd_ep_t *ep = &pcd->out_ep[i]; -+ dwc_otg_request_nuke(ep); -+ } -+ -+ /* report disconnect; the driver is already quiesced */ -+ if (pcd->driver && pcd->driver->disconnect) { -+ SPIN_UNLOCK(&pcd->lock); -+ pcd->driver->disconnect(&pcd->gadget); -+ SPIN_LOCK(&pcd->lock); -+ } -+} -+ -+/** -+ * This interrupt indicates that ... -+ */ -+int32_t dwc_otg_pcd_handle_i2c_intr(dwc_otg_pcd_t *pcd) -+{ -+ gintmsk_data_t intr_mask = { .d32 = 0}; -+ gintsts_data_t gintsts; -+ -+ DWC_PRINT("INTERRUPT Handler not implemented for %s\n", "i2cintr"); -+ intr_mask.b.i2cintr = 1; -+ dwc_modify_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk, -+ intr_mask.d32, 0); -+ -+ /* Clear interrupt */ -+ gintsts.d32 = 0; -+ gintsts.b.i2cintr = 1; -+ dwc_write_reg32 (&GET_CORE_IF(pcd)->core_global_regs->gintsts, -+ gintsts.d32); -+ return 1; -+} -+ -+ -+/** -+ * This interrupt indicates that ... -+ */ -+int32_t dwc_otg_pcd_handle_early_suspend_intr(dwc_otg_pcd_t *pcd) -+{ -+ gintsts_data_t gintsts; -+#if defined(VERBOSE) -+ DWC_PRINT("Early Suspend Detected\n"); -+#endif -+ /* Clear interrupt */ -+ gintsts.d32 = 0; -+ gintsts.b.erlysuspend = 1; -+ dwc_write_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintsts, -+ gintsts.d32); -+ return 1; -+} -+ -+/** -+ * This function configures EPO to receive SETUP packets. -+ * -+ * @todo NGS: Update the comments from the HW FS. -+ * -+ * -# Program the following fields in the endpoint specific registers -+ * for Control OUT EP 0, in order to receive a setup packet -+ * - DOEPTSIZ0.Packet Count = 3 (To receive up to 3 back to back -+ * setup packets) -+ * - DOEPTSIZE0.Transfer Size = 24 Bytes (To receive up to 3 back -+ * to back setup packets) -+ * - In DMA mode, DOEPDMA0 Register with a memory address to -+ * store any setup packets received -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ * @param pcd Programming view of the PCD. -+ */ -+static inline void ep0_out_start(dwc_otg_core_if_t *core_if, dwc_otg_pcd_t *pcd) -+{ -+ dwc_otg_dev_if_t *dev_if = core_if->dev_if; -+ deptsiz0_data_t doeptsize0 = { .d32 = 0}; -+ dwc_otg_dma_desc_t* dma_desc; -+ depctl_data_t doepctl = { .d32 = 0 }; -+ -+#ifdef VERBOSE -+ DWC_DEBUGPL(DBG_PCDV,"%s() doepctl0=%0x\n", __func__, -+ dwc_read_reg32(&dev_if->out_ep_regs[0]->doepctl)); -+#endif -+ -+ doeptsize0.b.supcnt = 3; -+ doeptsize0.b.pktcnt = 1; -+ doeptsize0.b.xfersize = 8*3; -+ -+ -+ if (core_if->dma_enable) { -+ if (!core_if->dma_desc_enable) { -+ /** put here as for Hermes mode deptisz register should not be written */ -+ dwc_write_reg32(&dev_if->out_ep_regs[0]->doeptsiz, -+ doeptsize0.d32); -+ -+ /** @todo dma needs to handle multiple setup packets (up to 3) */ -+ dwc_write_reg32(&dev_if->out_ep_regs[0]->doepdma, -+ pcd->setup_pkt_dma_handle); -+ } else { -+ dev_if->setup_desc_index = (dev_if->setup_desc_index + 1) & 1; -+ dma_desc = dev_if->setup_desc_addr[dev_if->setup_desc_index]; -+ -+ /** DMA Descriptor Setup */ -+ dma_desc->status.b.bs = BS_HOST_BUSY; -+ dma_desc->status.b.l = 1; -+ dma_desc->status.b.ioc = 1; -+ dma_desc->status.b.bytes = pcd->ep0.dwc_ep.maxpacket; -+ dma_desc->buf = pcd->setup_pkt_dma_handle; -+ dma_desc->status.b.bs = BS_HOST_READY; -+ -+ /** DOEPDMA0 Register write */ -+ dwc_write_reg32(&dev_if->out_ep_regs[0]->doepdma, dev_if->dma_setup_desc_addr[dev_if->setup_desc_index]); -+ } -+ -+ } else { -+ /** put here as for Hermes mode deptisz register should not be written */ -+ dwc_write_reg32(&dev_if->out_ep_regs[0]->doeptsiz, -+ doeptsize0.d32); -+ } -+ -+ /** DOEPCTL0 Register write */ -+ doepctl.b.epena = 1; -+ doepctl.b.cnak = 1; -+ dwc_write_reg32(&dev_if->out_ep_regs[0]->doepctl, doepctl.d32); -+ -+#ifdef VERBOSE -+ DWC_DEBUGPL(DBG_PCDV,"doepctl0=%0x\n", -+ dwc_read_reg32(&dev_if->out_ep_regs[0]->doepctl)); -+ DWC_DEBUGPL(DBG_PCDV,"diepctl0=%0x\n", -+ dwc_read_reg32(&dev_if->in_ep_regs[0]->diepctl)); -+#endif -+} -+ -+ -+/** -+ * This interrupt occurs when a USB Reset is detected. When the USB -+ * Reset Interrupt occurs the device state is set to DEFAULT and the -+ * EP0 state is set to IDLE. -+ * -# Set the NAK bit for all OUT endpoints (DOEPCTLn.SNAK = 1) -+ * -# Unmask the following interrupt bits -+ * - DAINTMSK.INEP0 = 1 (Control 0 IN endpoint) -+ * - DAINTMSK.OUTEP0 = 1 (Control 0 OUT endpoint) -+ * - DOEPMSK.SETUP = 1 -+ * - DOEPMSK.XferCompl = 1 -+ * - DIEPMSK.XferCompl = 1 -+ * - DIEPMSK.TimeOut = 1 -+ * -# Program the following fields in the endpoint specific registers -+ * for Control OUT EP 0, in order to receive a setup packet -+ * - DOEPTSIZ0.Packet Count = 3 (To receive up to 3 back to back -+ * setup packets) -+ * - DOEPTSIZE0.Transfer Size = 24 Bytes (To receive up to 3 back -+ * to back setup packets) -+ * - In DMA mode, DOEPDMA0 Register with a memory address to -+ * store any setup packets received -+ * At this point, all the required initialization, except for enabling -+ * the control 0 OUT endpoint is done, for receiving SETUP packets. -+ */ -+int32_t dwc_otg_pcd_handle_usb_reset_intr(dwc_otg_pcd_t * pcd) -+{ -+ dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd); -+ dwc_otg_dev_if_t *dev_if = core_if->dev_if; -+ depctl_data_t doepctl = { .d32 = 0}; -+ -+ daint_data_t daintmsk = { .d32 = 0}; -+ doepmsk_data_t doepmsk = { .d32 = 0}; -+ diepmsk_data_t diepmsk = { .d32 = 0}; -+ -+ dcfg_data_t dcfg = { .d32=0 }; -+ grstctl_t resetctl = { .d32=0 }; -+ dctl_data_t dctl = {.d32=0}; -+ int i = 0; -+ gintsts_data_t gintsts; -+ -+ DWC_PRINT("USB RESET\n"); -+#ifdef DWC_EN_ISOC -+ for(i = 1;i < 16; ++i) -+ { -+ dwc_otg_pcd_ep_t *ep; -+ dwc_ep_t *dwc_ep; -+ ep = get_in_ep(pcd,i); -+ if(ep != 0){ -+ dwc_ep = &ep->dwc_ep; -+ dwc_ep->next_frame = 0xffffffff; -+ } -+ } -+#endif /* DWC_EN_ISOC */ -+ -+ /* reset the HNP settings */ -+ dwc_otg_pcd_update_otg(pcd, 1); -+ -+ /* Clear the Remote Wakeup Signalling */ -+ dctl.b.rmtwkupsig = 1; -+ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->dctl, -+ dctl.d32, 0); -+ -+ /* Set NAK for all OUT EPs */ -+ doepctl.b.snak = 1; -+ for (i=0; i <= dev_if->num_out_eps; i++) -+ { -+ dwc_write_reg32(&dev_if->out_ep_regs[i]->doepctl, -+ doepctl.d32); -+ } -+ -+ /* Flush the NP Tx FIFO */ -+ dwc_otg_flush_tx_fifo(core_if, 0x10); -+ /* Flush the Learning Queue */ -+ resetctl.b.intknqflsh = 1; -+ dwc_write_reg32(&core_if->core_global_regs->grstctl, resetctl.d32); -+ -+ if(core_if->multiproc_int_enable) { -+ daintmsk.b.inep0 = 1; -+ daintmsk.b.outep0 = 1; -+ dwc_write_reg32(&dev_if->dev_global_regs->deachintmsk, daintmsk.d32); -+ -+ doepmsk.b.setup = 1; -+ doepmsk.b.xfercompl = 1; -+ doepmsk.b.ahberr = 1; -+ doepmsk.b.epdisabled = 1; -+ -+ if(core_if->dma_desc_enable) { -+ doepmsk.b.stsphsercvd = 1; -+ doepmsk.b.bna = 1; -+ } -+/* -+ doepmsk.b.babble = 1; -+ doepmsk.b.nyet = 1; -+ -+ if(core_if->dma_enable) { -+ doepmsk.b.nak = 1; -+ } -+*/ -+ dwc_write_reg32(&dev_if->dev_global_regs->doepeachintmsk[0], doepmsk.d32); -+ -+ diepmsk.b.xfercompl = 1; -+ diepmsk.b.timeout = 1; -+ diepmsk.b.epdisabled = 1; -+ diepmsk.b.ahberr = 1; -+ diepmsk.b.intknepmis = 1; -+ -+ if(core_if->dma_desc_enable) { -+ diepmsk.b.bna = 1; -+ } -+/* -+ if(core_if->dma_enable) { -+ diepmsk.b.nak = 1; -+ } -+*/ -+ dwc_write_reg32(&dev_if->dev_global_regs->diepeachintmsk[0], diepmsk.d32); -+ } else{ -+ daintmsk.b.inep0 = 1; -+ daintmsk.b.outep0 = 1; -+ dwc_write_reg32(&dev_if->dev_global_regs->daintmsk, daintmsk.d32); -+ -+ doepmsk.b.setup = 1; -+ doepmsk.b.xfercompl = 1; -+ doepmsk.b.ahberr = 1; -+ doepmsk.b.epdisabled = 1; -+ -+ if(core_if->dma_desc_enable) { -+ doepmsk.b.stsphsercvd = 1; -+ doepmsk.b.bna = 1; -+ } -+/* -+ doepmsk.b.babble = 1; -+ doepmsk.b.nyet = 1; -+ doepmsk.b.nak = 1; -+*/ -+ dwc_write_reg32(&dev_if->dev_global_regs->doepmsk, doepmsk.d32); -+ -+ diepmsk.b.xfercompl = 1; -+ diepmsk.b.timeout = 1; -+ diepmsk.b.epdisabled = 1; -+ diepmsk.b.ahberr = 1; -+ diepmsk.b.intknepmis = 1; -+ -+ if(core_if->dma_desc_enable) { -+ diepmsk.b.bna = 1; -+ } -+ -+// diepmsk.b.nak = 1; -+ -+ dwc_write_reg32(&dev_if->dev_global_regs->diepmsk, diepmsk.d32); -+ } -+ -+ /* Reset Device Address */ -+ dcfg.d32 = dwc_read_reg32(&dev_if->dev_global_regs->dcfg); -+ dcfg.b.devaddr = 0; -+ dwc_write_reg32(&dev_if->dev_global_regs->dcfg, dcfg.d32); -+ -+ /* setup EP0 to receive SETUP packets */ -+ ep0_out_start(core_if, pcd); -+ -+ /* Clear interrupt */ -+ gintsts.d32 = 0; -+ gintsts.b.usbreset = 1; -+ dwc_write_reg32 (&core_if->core_global_regs->gintsts, gintsts.d32); -+ -+ return 1; -+} -+ -+/** -+ * Get the device speed from the device status register and convert it -+ * to USB speed constant. -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ */ -+static int get_device_speed(dwc_otg_core_if_t *core_if) -+{ -+ dsts_data_t dsts; -+ enum usb_device_speed speed = USB_SPEED_UNKNOWN; -+ dsts.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dsts); -+ -+ switch (dsts.b.enumspd) { -+ case DWC_DSTS_ENUMSPD_HS_PHY_30MHZ_OR_60MHZ: -+ speed = USB_SPEED_HIGH; -+ break; -+ case DWC_DSTS_ENUMSPD_FS_PHY_30MHZ_OR_60MHZ: -+ case DWC_DSTS_ENUMSPD_FS_PHY_48MHZ: -+ speed = USB_SPEED_FULL; -+ break; -+ -+ case DWC_DSTS_ENUMSPD_LS_PHY_6MHZ: -+ speed = USB_SPEED_LOW; -+ break; -+ } -+ -+ return speed; -+} -+ -+/** -+ * Read the device status register and set the device speed in the -+ * data structure. -+ * Set up EP0 to receive SETUP packets by calling dwc_ep0_activate. -+ */ -+int32_t dwc_otg_pcd_handle_enum_done_intr(dwc_otg_pcd_t *pcd) -+{ -+ dwc_otg_pcd_ep_t *ep0 = &pcd->ep0; -+ gintsts_data_t gintsts; -+ gusbcfg_data_t gusbcfg; -+ dwc_otg_core_global_regs_t *global_regs = -+ GET_CORE_IF(pcd)->core_global_regs; -+ uint8_t utmi16b, utmi8b; -+ DWC_DEBUGPL(DBG_PCD, "SPEED ENUM\n"); -+ -+ if (GET_CORE_IF(pcd)->snpsid >= 0x4F54260A) { -+ utmi16b = 6; -+ utmi8b = 9; -+ } else { -+ utmi16b = 4; -+ utmi8b = 8; -+ } -+ dwc_otg_ep0_activate(GET_CORE_IF(pcd), &ep0->dwc_ep); -+ -+#ifdef DEBUG_EP0 -+ print_ep0_state(pcd); -+#endif -+ -+ if (pcd->ep0state == EP0_DISCONNECT) { -+ pcd->ep0state = EP0_IDLE; -+ } -+ else if (pcd->ep0state == EP0_STALL) { -+ pcd->ep0state = EP0_IDLE; -+ } -+ -+ pcd->ep0state = EP0_IDLE; -+ -+ ep0->stopped = 0; -+ -+ pcd->gadget.speed = get_device_speed(GET_CORE_IF(pcd)); -+ -+ /* Set USB turnaround time based on device speed and PHY interface. */ -+ gusbcfg.d32 = dwc_read_reg32(&global_regs->gusbcfg); -+ if (pcd->gadget.speed == USB_SPEED_HIGH) { -+ if (GET_CORE_IF(pcd)->hwcfg2.b.hs_phy_type == DWC_HWCFG2_HS_PHY_TYPE_ULPI) { -+ /* ULPI interface */ -+ gusbcfg.b.usbtrdtim = 9; -+ } -+ if (GET_CORE_IF(pcd)->hwcfg2.b.hs_phy_type == DWC_HWCFG2_HS_PHY_TYPE_UTMI) { -+ /* UTMI+ interface */ -+ if (GET_CORE_IF(pcd)->hwcfg4.b.utmi_phy_data_width == 0) { -+ gusbcfg.b.usbtrdtim = utmi8b; -+ } -+ else if (GET_CORE_IF(pcd)->hwcfg4.b.utmi_phy_data_width == 1) { -+ gusbcfg.b.usbtrdtim = utmi16b; -+ } -+ else if (GET_CORE_IF(pcd)->core_params->phy_utmi_width == 8) { -+ gusbcfg.b.usbtrdtim = utmi8b; -+ } -+ else { -+ gusbcfg.b.usbtrdtim = utmi16b; -+ } -+ } -+ if (GET_CORE_IF(pcd)->hwcfg2.b.hs_phy_type == DWC_HWCFG2_HS_PHY_TYPE_UTMI_ULPI) { -+ /* UTMI+ OR ULPI interface */ -+ if (gusbcfg.b.ulpi_utmi_sel == 1) { -+ /* ULPI interface */ -+ gusbcfg.b.usbtrdtim = 9; -+ } -+ else { -+ /* UTMI+ interface */ -+ if (GET_CORE_IF(pcd)->core_params->phy_utmi_width == 16) { -+ gusbcfg.b.usbtrdtim = utmi16b; -+ } -+ else { -+ gusbcfg.b.usbtrdtim = utmi8b; -+ } -+ } -+ } -+ } -+ else { -+ /* Full or low speed */ -+ gusbcfg.b.usbtrdtim = 9; -+ } -+ dwc_write_reg32(&global_regs->gusbcfg, gusbcfg.d32); -+ -+ /* Clear interrupt */ -+ gintsts.d32 = 0; -+ gintsts.b.enumdone = 1; -+ dwc_write_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintsts, -+ gintsts.d32); -+ return 1; -+} -+ -+/** -+ * This interrupt indicates that the ISO OUT Packet was dropped due to -+ * Rx FIFO full or Rx Status Queue Full. If this interrupt occurs -+ * read all the data from the Rx FIFO. -+ */ -+int32_t dwc_otg_pcd_handle_isoc_out_packet_dropped_intr(dwc_otg_pcd_t *pcd) -+{ -+ gintmsk_data_t intr_mask = { .d32 = 0}; -+ gintsts_data_t gintsts; -+ -+ DWC_PRINT("INTERRUPT Handler not implemented for %s\n", -+ "ISOC Out Dropped"); -+ -+ intr_mask.b.isooutdrop = 1; -+ dwc_modify_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk, -+ intr_mask.d32, 0); -+ -+ /* Clear interrupt */ -+ -+ gintsts.d32 = 0; -+ gintsts.b.isooutdrop = 1; -+ dwc_write_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintsts, -+ gintsts.d32); -+ -+ return 1; -+} -+ -+/** -+ * This interrupt indicates the end of the portion of the micro-frame -+ * for periodic transactions. If there is a periodic transaction for -+ * the next frame, load the packets into the EP periodic Tx FIFO. -+ */ -+int32_t dwc_otg_pcd_handle_end_periodic_frame_intr(dwc_otg_pcd_t *pcd) -+{ -+ gintmsk_data_t intr_mask = { .d32 = 0}; -+ gintsts_data_t gintsts; -+ DWC_PRINT("INTERRUPT Handler not implemented for %s\n", "EOP"); -+ -+ intr_mask.b.eopframe = 1; -+ dwc_modify_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk, -+ intr_mask.d32, 0); -+ -+ /* Clear interrupt */ -+ gintsts.d32 = 0; -+ gintsts.b.eopframe = 1; -+ dwc_write_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintsts, gintsts.d32); -+ -+ return 1; -+} -+ -+/** -+ * This interrupt indicates that EP of the packet on the top of the -+ * non-periodic Tx FIFO does not match EP of the IN Token received. -+ * -+ * The "Device IN Token Queue" Registers are read to determine the -+ * order the IN Tokens have been received. The non-periodic Tx FIFO -+ * is flushed, so it can be reloaded in the order seen in the IN Token -+ * Queue. -+ */ -+int32_t dwc_otg_pcd_handle_ep_mismatch_intr(dwc_otg_core_if_t *core_if) -+{ -+ gintsts_data_t gintsts; -+ DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, core_if); -+ -+ /* Clear interrupt */ -+ gintsts.d32 = 0; -+ gintsts.b.epmismatch = 1; -+ dwc_write_reg32 (&core_if->core_global_regs->gintsts, gintsts.d32); -+ -+ return 1; -+} -+ -+/** -+ * This funcion stalls EP0. -+ */ -+static inline void ep0_do_stall(dwc_otg_pcd_t *pcd, const int err_val) -+{ -+ dwc_otg_pcd_ep_t *ep0 = &pcd->ep0; -+ struct usb_ctrlrequest *ctrl = &pcd->setup_pkt->req; -+ DWC_WARN("req %02x.%02x protocol STALL; err %d\n", -+ ctrl->bRequestType, ctrl->bRequest, err_val); -+ -+ ep0->dwc_ep.is_in = 1; -+ dwc_otg_ep_set_stall(pcd->otg_dev->core_if, &ep0->dwc_ep); -+ pcd->ep0.stopped = 1; -+ pcd->ep0state = EP0_IDLE; -+ ep0_out_start(GET_CORE_IF(pcd), pcd); -+} -+ -+/** -+ * This functions delegates the setup command to the gadget driver. -+ */ -+static inline void do_gadget_setup(dwc_otg_pcd_t *pcd, -+ struct usb_ctrlrequest * ctrl) -+{ -+ int ret = 0; -+ if (pcd->driver && pcd->driver->setup) { -+ SPIN_UNLOCK(&pcd->lock); -+ ret = pcd->driver->setup(&pcd->gadget, ctrl); -+ SPIN_LOCK(&pcd->lock); -+ if (ret < 0) { -+ ep0_do_stall(pcd, ret); -+ } -+ -+ /** @todo This is a g_file_storage gadget driver specific -+ * workaround: a DELAYED_STATUS result from the fsg_setup -+ * routine will result in the gadget queueing a EP0 IN status -+ * phase for a two-stage control transfer. Exactly the same as -+ * a SET_CONFIGURATION/SET_INTERFACE except that this is a class -+ * specific request. Need a generic way to know when the gadget -+ * driver will queue the status phase. Can we assume when we -+ * call the gadget driver setup() function that it will always -+ * queue and require the following flag? Need to look into -+ * this. -+ */ -+ -+ if (ret == 256 + 999) { -+ pcd->request_config = 1; -+ } -+ } -+} -+ -+/** -+ * This function starts the Zero-Length Packet for the IN status phase -+ * of a 2 stage control transfer. -+ */ -+static inline void do_setup_in_status_phase(dwc_otg_pcd_t *pcd) -+{ -+ dwc_otg_pcd_ep_t *ep0 = &pcd->ep0; -+ if (pcd->ep0state == EP0_STALL) { -+ return; -+ } -+ -+ pcd->ep0state = EP0_IN_STATUS_PHASE; -+ -+ /* Prepare for more SETUP Packets */ -+ DWC_DEBUGPL(DBG_PCD, "EP0 IN ZLP\n"); -+ ep0->dwc_ep.xfer_len = 0; -+ ep0->dwc_ep.xfer_count = 0; -+ ep0->dwc_ep.is_in = 1; -+ ep0->dwc_ep.dma_addr = pcd->setup_pkt_dma_handle; -+ dwc_otg_ep0_start_transfer(GET_CORE_IF(pcd), &ep0->dwc_ep); -+ -+ /* Prepare for more SETUP Packets */ -+// if(GET_CORE_IF(pcd)->dma_enable == 0) ep0_out_start(GET_CORE_IF(pcd), pcd); -+} -+ -+/** -+ * This function starts the Zero-Length Packet for the OUT status phase -+ * of a 2 stage control transfer. -+ */ -+static inline void do_setup_out_status_phase(dwc_otg_pcd_t *pcd) -+{ -+ dwc_otg_pcd_ep_t *ep0 = &pcd->ep0; -+ if (pcd->ep0state == EP0_STALL) { -+ DWC_DEBUGPL(DBG_PCD, "EP0 STALLED\n"); -+ return; -+ } -+ pcd->ep0state = EP0_OUT_STATUS_PHASE; -+ -+ DWC_DEBUGPL(DBG_PCD, "EP0 OUT ZLP\n"); -+ ep0->dwc_ep.xfer_len = 0; -+ ep0->dwc_ep.xfer_count = 0; -+ ep0->dwc_ep.is_in = 0; -+ ep0->dwc_ep.dma_addr = pcd->setup_pkt_dma_handle; -+ dwc_otg_ep0_start_transfer(GET_CORE_IF(pcd), &ep0->dwc_ep); -+ -+ /* Prepare for more SETUP Packets */ -+ if(GET_CORE_IF(pcd)->dma_enable == 0) { -+ ep0_out_start(GET_CORE_IF(pcd), pcd); -+ } -+} -+ -+/** -+ * Clear the EP halt (STALL) and if pending requests start the -+ * transfer. -+ */ -+static inline void pcd_clear_halt(dwc_otg_pcd_t *pcd, dwc_otg_pcd_ep_t *ep) -+{ -+ if(ep->dwc_ep.stall_clear_flag == 0) -+ dwc_otg_ep_clear_stall(GET_CORE_IF(pcd), &ep->dwc_ep); -+ -+ /* Reactive the EP */ -+ dwc_otg_ep_activate(GET_CORE_IF(pcd), &ep->dwc_ep); -+ if (ep->stopped) { -+ ep->stopped = 0; -+ /* If there is a request in the EP queue start it */ -+ -+ /** @todo FIXME: this causes an EP mismatch in DMA mode. -+ * epmismatch not yet implemented. */ -+ -+ /* -+ * Above fixme is solved by implmenting a tasklet to call the -+ * start_next_request(), outside of interrupt context at some -+ * time after the current time, after a clear-halt setup packet. -+ * Still need to implement ep mismatch in the future if a gadget -+ * ever uses more than one endpoint at once -+ */ -+ ep->queue_sof = 1; -+ tasklet_schedule (pcd->start_xfer_tasklet); -+ } -+ /* Start Control Status Phase */ -+ do_setup_in_status_phase(pcd); -+} -+ -+/** -+ * This function is called when the SET_FEATURE TEST_MODE Setup packet -+ * is sent from the host. The Device Control register is written with -+ * the Test Mode bits set to the specified Test Mode. This is done as -+ * a tasklet so that the "Status" phase of the control transfer -+ * completes before transmitting the TEST packets. -+ * -+ * @todo This has not been tested since the tasklet struct was put -+ * into the PCD struct! -+ * -+ */ -+static void do_test_mode(unsigned long data) -+{ -+ dctl_data_t dctl; -+ dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *)data; -+ dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd); -+ int test_mode = pcd->test_mode; -+ -+ -+// DWC_WARN("%s() has not been tested since being rewritten!\n", __func__); -+ -+ dctl.d32 = dwc_read_reg32(&core_if->dev_if->dev_global_regs->dctl); -+ switch (test_mode) { -+ case 1: // TEST_J -+ dctl.b.tstctl = 1; -+ break; -+ -+ case 2: // TEST_K -+ dctl.b.tstctl = 2; -+ break; -+ -+ case 3: // TEST_SE0_NAK -+ dctl.b.tstctl = 3; -+ break; -+ -+ case 4: // TEST_PACKET -+ dctl.b.tstctl = 4; -+ break; -+ -+ case 5: // TEST_FORCE_ENABLE -+ dctl.b.tstctl = 5; -+ break; -+ } -+ dwc_write_reg32(&core_if->dev_if->dev_global_regs->dctl, dctl.d32); -+} -+ -+/** -+ * This function process the GET_STATUS Setup Commands. -+ */ -+static inline void do_get_status(dwc_otg_pcd_t *pcd) -+{ -+ struct usb_ctrlrequest ctrl = pcd->setup_pkt->req; -+ dwc_otg_pcd_ep_t *ep; -+ dwc_otg_pcd_ep_t *ep0 = &pcd->ep0; -+ uint16_t *status = pcd->status_buf; -+ -+#ifdef DEBUG_EP0 -+ DWC_DEBUGPL(DBG_PCD, -+ "GET_STATUS %02x.%02x v%04x i%04x l%04x\n", -+ ctrl.bRequestType, ctrl.bRequest, -+ ctrl.wValue, ctrl.wIndex, ctrl.wLength); -+#endif -+ -+ switch (ctrl.bRequestType & USB_RECIP_MASK) { -+ case USB_RECIP_DEVICE: -+ *status = 0x1; /* Self powered */ -+ *status |= pcd->remote_wakeup_enable << 1; -+ break; -+ -+ case USB_RECIP_INTERFACE: -+ *status = 0; -+ break; -+ -+ case USB_RECIP_ENDPOINT: -+ ep = get_ep_by_addr(pcd, ctrl.wIndex); -+ if (ep == 0 || ctrl.wLength > 2) { -+ ep0_do_stall(pcd, -EOPNOTSUPP); -+ return; -+ } -+ /** @todo check for EP stall */ -+ *status = ep->stopped; -+ break; -+ } -+ pcd->ep0_pending = 1; -+ ep0->dwc_ep.start_xfer_buff = (uint8_t *)status; -+ ep0->dwc_ep.xfer_buff = (uint8_t *)status; -+ ep0->dwc_ep.dma_addr = pcd->status_buf_dma_handle; -+ ep0->dwc_ep.xfer_len = 2; -+ ep0->dwc_ep.xfer_count = 0; -+ ep0->dwc_ep.total_len = ep0->dwc_ep.xfer_len; -+ dwc_otg_ep0_start_transfer(GET_CORE_IF(pcd), &ep0->dwc_ep); -+} -+/** -+ * This function process the SET_FEATURE Setup Commands. -+ */ -+static inline void do_set_feature(dwc_otg_pcd_t *pcd) -+{ -+ dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd); -+ dwc_otg_core_global_regs_t *global_regs = -+ core_if->core_global_regs; -+ struct usb_ctrlrequest ctrl = pcd->setup_pkt->req; -+ dwc_otg_pcd_ep_t *ep = 0; -+ int32_t otg_cap_param = core_if->core_params->otg_cap; -+ gotgctl_data_t gotgctl = { .d32 = 0 }; -+ -+ DWC_DEBUGPL(DBG_PCD, "SET_FEATURE:%02x.%02x v%04x i%04x l%04x\n", -+ ctrl.bRequestType, ctrl.bRequest, -+ ctrl.wValue, ctrl.wIndex, ctrl.wLength); -+ DWC_DEBUGPL(DBG_PCD,"otg_cap=%d\n", otg_cap_param); -+ -+ -+ switch (ctrl.bRequestType & USB_RECIP_MASK) { -+ case USB_RECIP_DEVICE: -+ switch (ctrl.wValue) { -+ case USB_DEVICE_REMOTE_WAKEUP: -+ pcd->remote_wakeup_enable = 1; -+ break; -+ -+ case USB_DEVICE_TEST_MODE: -+ /* Setup the Test Mode tasklet to do the Test -+ * Packet generation after the SETUP Status -+ * phase has completed. */ -+ -+ /** @todo This has not been tested since the -+ * tasklet struct was put into the PCD -+ * struct! */ -+ pcd->test_mode_tasklet.next = 0; -+ pcd->test_mode_tasklet.state = 0; -+ atomic_set(&pcd->test_mode_tasklet.count, 0); -+ pcd->test_mode_tasklet.func = do_test_mode; -+ pcd->test_mode_tasklet.data = (unsigned long)pcd; -+ pcd->test_mode = ctrl.wIndex >> 8; -+ tasklet_schedule(&pcd->test_mode_tasklet); -+ break; -+ -+ case USB_DEVICE_B_HNP_ENABLE: -+ DWC_DEBUGPL(DBG_PCDV, "SET_FEATURE: USB_DEVICE_B_HNP_ENABLE\n"); -+ -+ /* dev may initiate HNP */ -+ if (otg_cap_param == DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE) { -+ pcd->b_hnp_enable = 1; -+ dwc_otg_pcd_update_otg(pcd, 0); -+ DWC_DEBUGPL(DBG_PCD, "Request B HNP\n"); -+ /**@todo Is the gotgctl.devhnpen cleared -+ * by a USB Reset? */ -+ gotgctl.b.devhnpen = 1; -+ gotgctl.b.hnpreq = 1; -+ dwc_write_reg32(&global_regs->gotgctl, gotgctl.d32); -+ } -+ else { -+ ep0_do_stall(pcd, -EOPNOTSUPP); -+ } -+ break; -+ -+ case USB_DEVICE_A_HNP_SUPPORT: -+ /* RH port supports HNP */ -+ DWC_DEBUGPL(DBG_PCDV, "SET_FEATURE: USB_DEVICE_A_HNP_SUPPORT\n"); -+ if (otg_cap_param == DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE) { -+ pcd->a_hnp_support = 1; -+ dwc_otg_pcd_update_otg(pcd, 0); -+ } -+ else { -+ ep0_do_stall(pcd, -EOPNOTSUPP); -+ } -+ break; -+ -+ case USB_DEVICE_A_ALT_HNP_SUPPORT: -+ /* other RH port does */ -+ DWC_DEBUGPL(DBG_PCDV, "SET_FEATURE: USB_DEVICE_A_ALT_HNP_SUPPORT\n"); -+ if (otg_cap_param == DWC_OTG_CAP_PARAM_HNP_SRP_CAPABLE) { -+ pcd->a_alt_hnp_support = 1; -+ dwc_otg_pcd_update_otg(pcd, 0); -+ } -+ else { -+ ep0_do_stall(pcd, -EOPNOTSUPP); -+ } -+ break; -+ } -+ do_setup_in_status_phase(pcd); -+ break; -+ -+ case USB_RECIP_INTERFACE: -+ do_gadget_setup(pcd, &ctrl); -+ break; -+ -+ case USB_RECIP_ENDPOINT: -+ if (ctrl.wValue == USB_ENDPOINT_HALT) { -+ ep = get_ep_by_addr(pcd, ctrl.wIndex); -+ if (ep == 0) { -+ ep0_do_stall(pcd, -EOPNOTSUPP); -+ return; -+ } -+ ep->stopped = 1; -+ dwc_otg_ep_set_stall(core_if, &ep->dwc_ep); -+ } -+ do_setup_in_status_phase(pcd); -+ break; -+ } -+} -+ -+/** -+ * This function process the CLEAR_FEATURE Setup Commands. -+ */ -+static inline void do_clear_feature(dwc_otg_pcd_t *pcd) -+{ -+ struct usb_ctrlrequest ctrl = pcd->setup_pkt->req; -+ dwc_otg_pcd_ep_t *ep = 0; -+ -+ DWC_DEBUGPL(DBG_PCD, -+ "CLEAR_FEATURE:%02x.%02x v%04x i%04x l%04x\n", -+ ctrl.bRequestType, ctrl.bRequest, -+ ctrl.wValue, ctrl.wIndex, ctrl.wLength); -+ -+ switch (ctrl.bRequestType & USB_RECIP_MASK) { -+ case USB_RECIP_DEVICE: -+ switch (ctrl.wValue) { -+ case USB_DEVICE_REMOTE_WAKEUP: -+ pcd->remote_wakeup_enable = 0; -+ break; -+ -+ case USB_DEVICE_TEST_MODE: -+ /** @todo Add CLEAR_FEATURE for TEST modes. */ -+ break; -+ } -+ do_setup_in_status_phase(pcd); -+ break; -+ -+ case USB_RECIP_ENDPOINT: -+ ep = get_ep_by_addr(pcd, ctrl.wIndex); -+ if (ep == 0) { -+ ep0_do_stall(pcd, -EOPNOTSUPP); -+ return; -+ } -+ -+ pcd_clear_halt(pcd, ep); -+ -+ break; -+ } -+} -+ -+/** -+ * This function process the SET_ADDRESS Setup Commands. -+ */ -+static inline void do_set_address(dwc_otg_pcd_t *pcd) -+{ -+ dwc_otg_dev_if_t *dev_if = GET_CORE_IF(pcd)->dev_if; -+ struct usb_ctrlrequest ctrl = pcd->setup_pkt->req; -+ -+ if (ctrl.bRequestType == USB_RECIP_DEVICE) { -+ dcfg_data_t dcfg = {.d32=0}; -+ -+#ifdef DEBUG_EP0 -+// DWC_DEBUGPL(DBG_PCDV, "SET_ADDRESS:%d\n", ctrl.wValue); -+#endif -+ dcfg.b.devaddr = ctrl.wValue; -+ dwc_modify_reg32(&dev_if->dev_global_regs->dcfg, 0, dcfg.d32); -+ do_setup_in_status_phase(pcd); -+ } -+} -+ -+/** -+ * This function processes SETUP commands. In Linux, the USB Command -+ * processing is done in two places - the first being the PCD and the -+ * second in the Gadget Driver (for example, the File-Backed Storage -+ * Gadget Driver). -+ * -+ *
Parameter NameMeaning
otg_capSpecifies the OTG capabilities. The driver will automatically detect the -+ value for this parameter if none is specified. -+ - 0: HNP and SRP capable (default, if available) -+ - 1: SRP Only capable -+ - 2: No HNP/SRP capable -+
dma_enableSpecifies whether to use slave or DMA mode for accessing the data FIFOs. -+ The driver will automatically detect the value for this parameter if none is -+ specified. -+ - 0: Slave -+ - 1: DMA (default, if available) -+
dma_burst_sizeThe DMA Burst size (applicable only for External DMA Mode). -+ - Values: 1, 4, 8 16, 32, 64, 128, 256 (default 32) -+
speedSpecifies the maximum speed of operation in host and device mode. The -+ actual speed depends on the speed of the attached device and the value of -+ phy_type. -+ - 0: High Speed (default) -+ - 1: Full Speed -+
host_support_fs_ls_low_powerSpecifies whether low power mode is supported when attached to a Full -+ Speed or Low Speed device in host mode. -+ - 0: Don't support low power mode (default) -+ - 1: Support low power mode -+
host_ls_low_power_phy_clkSpecifies the PHY clock rate in low power mode when connected to a Low -+ Speed device in host mode. This parameter is applicable only if -+ HOST_SUPPORT_FS_LS_LOW_POWER is enabled. -+ - 0: 48 MHz (default) -+ - 1: 6 MHz -+
enable_dynamic_fifo Specifies whether FIFOs may be resized by the driver software. -+ - 0: Use cC FIFO size parameters -+ - 1: Allow dynamic FIFO sizing (default) -+
data_fifo_sizeTotal number of 4-byte words in the data FIFO memory. This memory -+ includes the Rx FIFO, non-periodic Tx FIFO, and periodic Tx FIFOs. -+ - Values: 32 to 32768 (default 8192) -+ -+ Note: The total FIFO memory depth in the FPGA configuration is 8192. -+
dev_rx_fifo_sizeNumber of 4-byte words in the Rx FIFO in device mode when dynamic -+ FIFO sizing is enabled. -+ - Values: 16 to 32768 (default 1064) -+
dev_nperio_tx_fifo_sizeNumber of 4-byte words in the non-periodic Tx FIFO in device mode when -+ dynamic FIFO sizing is enabled. -+ - Values: 16 to 32768 (default 1024) -+
dev_perio_tx_fifo_size_n (n = 1 to 15)Number of 4-byte words in each of the periodic Tx FIFOs in device mode -+ when dynamic FIFO sizing is enabled. -+ - Values: 4 to 768 (default 256) -+
host_rx_fifo_sizeNumber of 4-byte words in the Rx FIFO in host mode when dynamic FIFO -+ sizing is enabled. -+ - Values: 16 to 32768 (default 1024) -+
host_nperio_tx_fifo_sizeNumber of 4-byte words in the non-periodic Tx FIFO in host mode when -+ dynamic FIFO sizing is enabled in the core. -+ - Values: 16 to 32768 (default 1024) -+
host_perio_tx_fifo_sizeNumber of 4-byte words in the host periodic Tx FIFO when dynamic FIFO -+ sizing is enabled. -+ - Values: 16 to 32768 (default 1024) -+
max_transfer_sizeThe maximum transfer size supported in bytes. -+ - Values: 2047 to 65,535 (default 65,535) -+
max_packet_countThe maximum number of packets in a transfer. -+ - Values: 15 to 511 (default 511) -+
host_channelsThe number of host channel registers to use. -+ - Values: 1 to 16 (default 12) -+ -+ Note: The FPGA configuration supports a maximum of 12 host channels. -+
dev_endpointsThe number of endpoints in addition to EP0 available for device mode -+ operations. -+ - Values: 1 to 15 (default 6 IN and OUT) -+ -+ Note: The FPGA configuration supports a maximum of 6 IN and OUT endpoints in -+ addition to EP0. -+
phy_typeSpecifies the type of PHY interface to use. By default, the driver will -+ automatically detect the phy_type. -+ - 0: Full Speed -+ - 1: UTMI+ (default, if available) -+ - 2: ULPI -+
phy_utmi_widthSpecifies the UTMI+ Data Width. This parameter is applicable for a -+ phy_type of UTMI+. Also, this parameter is applicable only if the -+ OTG_HSPHY_WIDTH cC parameter was set to "8 and 16 bits", meaning that the -+ core has been configured to work at either data path width. -+ - Values: 8 or 16 bits (default 16) -+
phy_ulpi_ddrSpecifies whether the ULPI operates at double or single data rate. This -+ parameter is only applicable if phy_type is ULPI. -+ - 0: single data rate ULPI interface with 8 bit wide data bus (default) -+ - 1: double data rate ULPI interface with 4 bit wide data bus -+
i2c_enableSpecifies whether to use the I2C interface for full speed PHY. This -+ parameter is only applicable if PHY_TYPE is FS. -+ - 0: Disabled (default) -+ - 1: Enabled -+
otg_en_multiple_tx_fifoSpecifies whether dedicatedto tx fifos are enabled for non periodic IN EPs. -+ The driver will automatically detect the value for this parameter if none is -+ specified. -+ - 0: Disabled -+ - 1: Enabled (default, if available) -+
dev_tx_fifo_size_n (n = 1 to 15)Number of 4-byte words in each of the Tx FIFOs in device mode -+ when dynamic FIFO sizing is enabled. -+ - Values: 4 to 768 (default 256) -+
-+ * -+ * -+ * -+ * -+ * -+ * -+ * -+ * -+ * -+ * -+ * -+ * -+ * -+ * -+ * -+ * -+ * -+ * -+ * -+ * -+ * -+ * -+ * -+ *
Command Driver Description
GET_STATUS PCD Command is processed as -+ * defined in chapter 9 of the USB 2.0 Specification chapter 9 -+ *
CLEAR_FEATURE PCD The Device and Endpoint -+ * requests are the ENDPOINT_HALT feature is procesed, all others the -+ * interface requests are ignored.
SET_FEATURE PCD The Device and Endpoint -+ * requests are processed by the PCD. Interface requests are passed -+ * to the Gadget Driver.
SET_ADDRESS PCD Program the DCFG reg, -+ * with device address received
GET_DESCRIPTOR Gadget Driver Return the -+ * requested descriptor
SET_DESCRIPTOR Gadget Driver Optional - -+ * not implemented by any of the existing Gadget Drivers.
SET_CONFIGURATION Gadget Driver Disable -+ * all EPs and enable EPs for new configuration.
GET_CONFIGURATION Gadget Driver Return -+ * the current configuration
SET_INTERFACE Gadget Driver Disable all -+ * EPs and enable EPs for new configuration.
GET_INTERFACE Gadget Driver Return the -+ * current interface.
SYNC_FRAME PCD Display debug -+ * message.
-+ * -+ * When the SETUP Phase Done interrupt occurs, the PCD SETUP commands are -+ * processed by pcd_setup. Calling the Function Driver's setup function from -+ * pcd_setup processes the gadget SETUP commands. -+ */ -+static inline void pcd_setup(dwc_otg_pcd_t *pcd) -+{ -+ dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd); -+ dwc_otg_dev_if_t *dev_if = core_if->dev_if; -+ struct usb_ctrlrequest ctrl = pcd->setup_pkt->req; -+ dwc_otg_pcd_ep_t *ep0 = &pcd->ep0; -+ -+ deptsiz0_data_t doeptsize0 = { .d32 = 0}; -+ -+#ifdef DEBUG_EP0 -+ DWC_DEBUGPL(DBG_PCD, "SETUP %02x.%02x v%04x i%04x l%04x\n", -+ ctrl.bRequestType, ctrl.bRequest, -+ ctrl.wValue, ctrl.wIndex, ctrl.wLength); -+#endif -+ -+ doeptsize0.d32 = dwc_read_reg32(&dev_if->out_ep_regs[0]->doeptsiz); -+ -+ /** @todo handle > 1 setup packet , assert error for now */ -+ -+ if (core_if->dma_enable && core_if->dma_desc_enable == 0 && (doeptsize0.b.supcnt < 2)) { -+ DWC_ERROR ("\n\n----------- CANNOT handle > 1 setup packet in DMA mode\n\n"); -+ } -+ -+ /* Clean up the request queue */ -+ dwc_otg_request_nuke(ep0); -+ ep0->stopped = 0; -+ -+ if (ctrl.bRequestType & USB_DIR_IN) { -+ ep0->dwc_ep.is_in = 1; -+ pcd->ep0state = EP0_IN_DATA_PHASE; -+ } -+ else { -+ ep0->dwc_ep.is_in = 0; -+ pcd->ep0state = EP0_OUT_DATA_PHASE; -+ } -+ -+ if(ctrl.wLength == 0) { -+ ep0->dwc_ep.is_in = 1; -+ pcd->ep0state = EP0_IN_STATUS_PHASE; -+ } -+ -+ if ((ctrl.bRequestType & USB_TYPE_MASK) != USB_TYPE_STANDARD) { -+ /* handle non-standard (class/vendor) requests in the gadget driver */ -+ do_gadget_setup(pcd, &ctrl); -+ return; -+ } -+ -+ /** @todo NGS: Handle bad setup packet? */ -+ -+/////////////////////////////////////////// -+//// --- Standard Request handling --- //// -+ -+ switch (ctrl.bRequest) { -+ case USB_REQ_GET_STATUS: -+ do_get_status(pcd); -+ break; -+ -+ case USB_REQ_CLEAR_FEATURE: -+ do_clear_feature(pcd); -+ break; -+ -+ case USB_REQ_SET_FEATURE: -+ do_set_feature(pcd); -+ break; -+ -+ case USB_REQ_SET_ADDRESS: -+ do_set_address(pcd); -+ break; -+ -+ case USB_REQ_SET_INTERFACE: -+ case USB_REQ_SET_CONFIGURATION: -+// _pcd->request_config = 1; /* Configuration changed */ -+ do_gadget_setup(pcd, &ctrl); -+ break; -+ -+ case USB_REQ_SYNCH_FRAME: -+ do_gadget_setup(pcd, &ctrl); -+ break; -+ -+ default: -+ /* Call the Gadget Driver's setup functions */ -+ do_gadget_setup(pcd, &ctrl); -+ break; -+ } -+} -+ -+/** -+ * This function completes the ep0 control transfer. -+ */ -+static int32_t ep0_complete_request(dwc_otg_pcd_ep_t *ep) -+{ -+ dwc_otg_core_if_t *core_if = GET_CORE_IF(ep->pcd); -+ dwc_otg_dev_if_t *dev_if = core_if->dev_if; -+ dwc_otg_dev_in_ep_regs_t *in_ep_regs = -+ dev_if->in_ep_regs[ep->dwc_ep.num]; -+#ifdef DEBUG_EP0 -+ dwc_otg_dev_out_ep_regs_t *out_ep_regs = -+ dev_if->out_ep_regs[ep->dwc_ep.num]; -+#endif -+ deptsiz0_data_t deptsiz; -+ desc_sts_data_t desc_sts; -+ dwc_otg_pcd_request_t *req; -+ int is_last = 0; -+ dwc_otg_pcd_t *pcd = ep->pcd; -+ -+ //DWC_DEBUGPL(DBG_PCDV, "%s() %s\n", __func__, _ep->ep.name); -+ -+ if (pcd->ep0_pending && list_empty(&ep->queue)) { -+ if (ep->dwc_ep.is_in) { -+#ifdef DEBUG_EP0 -+ DWC_DEBUGPL(DBG_PCDV, "Do setup OUT status phase\n"); -+#endif -+ do_setup_out_status_phase(pcd); -+ } -+ else { -+#ifdef DEBUG_EP0 -+ DWC_DEBUGPL(DBG_PCDV, "Do setup IN status phase\n"); -+#endif -+ do_setup_in_status_phase(pcd); -+ } -+ pcd->ep0_pending = 0; -+ return 1; -+ } -+ -+ if (list_empty(&ep->queue)) { -+ return 0; -+ } -+ req = list_entry(ep->queue.next, dwc_otg_pcd_request_t, queue); -+ -+ -+ if (pcd->ep0state == EP0_OUT_STATUS_PHASE || pcd->ep0state == EP0_IN_STATUS_PHASE) { -+ is_last = 1; -+ } -+ else if (ep->dwc_ep.is_in) { -+ deptsiz.d32 = dwc_read_reg32(&in_ep_regs->dieptsiz); -+ if(core_if->dma_desc_enable != 0) -+ desc_sts.d32 = readl(dev_if->in_desc_addr); -+#ifdef DEBUG_EP0 -+ DWC_DEBUGPL(DBG_PCDV, "%s len=%d xfersize=%d pktcnt=%d\n", -+ ep->ep.name, ep->dwc_ep.xfer_len, -+ deptsiz.b.xfersize, deptsiz.b.pktcnt); -+#endif -+ -+ if (((core_if->dma_desc_enable == 0) && (deptsiz.b.xfersize == 0)) || -+ ((core_if->dma_desc_enable != 0) && (desc_sts.b.bytes == 0))) { -+ req->req.actual = ep->dwc_ep.xfer_count; -+ /* Is a Zero Len Packet needed? */ -+ if (req->req.zero) { -+#ifdef DEBUG_EP0 -+ DWC_DEBUGPL(DBG_PCD, "Setup Rx ZLP\n"); -+#endif -+ req->req.zero = 0; -+ } -+ do_setup_out_status_phase(pcd); -+ } -+ } -+ else { -+ /* ep0-OUT */ -+#ifdef DEBUG_EP0 -+ deptsiz.d32 = dwc_read_reg32(&out_ep_regs->doeptsiz); -+ DWC_DEBUGPL(DBG_PCDV, "%s len=%d xsize=%d pktcnt=%d\n", -+ ep->ep.name, ep->dwc_ep.xfer_len, -+ deptsiz.b.xfersize, -+ deptsiz.b.pktcnt); -+#endif -+ req->req.actual = ep->dwc_ep.xfer_count; -+ /* Is a Zero Len Packet needed? */ -+ if (req->req.zero) { -+#ifdef DEBUG_EP0 -+ DWC_DEBUGPL(DBG_PCDV, "Setup Tx ZLP\n"); -+#endif -+ req->req.zero = 0; -+ } -+ if(core_if->dma_desc_enable == 0) -+ do_setup_in_status_phase(pcd); -+ } -+ -+ /* Complete the request */ -+ if (is_last) { -+ dwc_otg_request_done(ep, req, 0); -+ ep->dwc_ep.start_xfer_buff = 0; -+ ep->dwc_ep.xfer_buff = 0; -+ ep->dwc_ep.xfer_len = 0; -+ return 1; -+ } -+ return 0; -+} -+ -+/** -+ * This function completes the request for the EP. If there are -+ * additional requests for the EP in the queue they will be started. -+ */ -+static void complete_ep(dwc_otg_pcd_ep_t *ep) -+{ -+ dwc_otg_core_if_t *core_if = GET_CORE_IF(ep->pcd); -+ dwc_otg_dev_if_t *dev_if = core_if->dev_if; -+ dwc_otg_dev_in_ep_regs_t *in_ep_regs = -+ dev_if->in_ep_regs[ep->dwc_ep.num]; -+ deptsiz_data_t deptsiz; -+ desc_sts_data_t desc_sts; -+ dwc_otg_pcd_request_t *req = 0; -+ dwc_otg_dma_desc_t* dma_desc; -+ uint32_t byte_count = 0; -+ int is_last = 0; -+ int i; -+ -+ DWC_DEBUGPL(DBG_PCDV,"%s() %s-%s\n", __func__, ep->ep.name, -+ (ep->dwc_ep.is_in?"IN":"OUT")); -+ -+ /* Get any pending requests */ -+ if (!list_empty(&ep->queue)) { -+ req = list_entry(ep->queue.next, dwc_otg_pcd_request_t, -+ queue); -+ if (!req) { -+ printk("complete_ep 0x%p, req = NULL!\n", ep); -+ return; -+ } -+ } -+ else { -+ printk("complete_ep 0x%p, ep->queue empty!\n", ep); -+ return; -+ } -+ DWC_DEBUGPL(DBG_PCD, "Requests %d\n", ep->pcd->request_pending); -+ -+ if (ep->dwc_ep.is_in) { -+ deptsiz.d32 = dwc_read_reg32(&in_ep_regs->dieptsiz); -+ -+ if (core_if->dma_enable) { -+ if(core_if->dma_desc_enable == 0) { -+ if (deptsiz.b.xfersize == 0 && deptsiz.b.pktcnt == 0) { -+ byte_count = ep->dwc_ep.xfer_len - ep->dwc_ep.xfer_count; -+ -+ ep->dwc_ep.xfer_buff += byte_count; -+ ep->dwc_ep.dma_addr += byte_count; -+ ep->dwc_ep.xfer_count += byte_count; -+ -+ DWC_DEBUGPL(DBG_PCDV, "%s len=%d xfersize=%d pktcnt=%d\n", -+ ep->ep.name, ep->dwc_ep.xfer_len, -+ deptsiz.b.xfersize, deptsiz.b.pktcnt); -+ -+ -+ if(ep->dwc_ep.xfer_len < ep->dwc_ep.total_len) { -+ dwc_otg_ep_start_transfer(core_if, &ep->dwc_ep); -+ } else if(ep->dwc_ep.sent_zlp) { -+ /* -+ * This fragment of code should initiate 0 -+ * length trasfer in case if it is queued -+ * a trasfer with size divisible to EPs max -+ * packet size and with usb_request zero field -+ * is set, which means that after data is transfered, -+ * it is also should be transfered -+ * a 0 length packet at the end. For Slave and -+ * Buffer DMA modes in this case SW has -+ * to initiate 2 transfers one with transfer size, -+ * and the second with 0 size. For Desriptor -+ * DMA mode SW is able to initiate a transfer, -+ * which will handle all the packets including -+ * the last 0 legth. -+ */ -+ ep->dwc_ep.sent_zlp = 0; -+ dwc_otg_ep_start_zl_transfer(core_if, &ep->dwc_ep); -+ } else { -+ is_last = 1; -+ } -+ } else { -+ DWC_WARN("Incomplete transfer (%s-%s [siz=%d pkt=%d])\n", -+ ep->ep.name, (ep->dwc_ep.is_in?"IN":"OUT"), -+ deptsiz.b.xfersize, deptsiz.b.pktcnt); -+ } -+ } else { -+ dma_desc = ep->dwc_ep.desc_addr; -+ byte_count = 0; -+ ep->dwc_ep.sent_zlp = 0; -+ -+ for(i = 0; i < ep->dwc_ep.desc_cnt; ++i) { -+ desc_sts.d32 = readl(dma_desc); -+ byte_count += desc_sts.b.bytes; -+ dma_desc++; -+ } -+ -+ if(byte_count == 0) { -+ ep->dwc_ep.xfer_count = ep->dwc_ep.total_len; -+ is_last = 1; -+ } else { -+ DWC_WARN("Incomplete transfer\n"); -+ } -+ } -+ } else { -+ if (deptsiz.b.xfersize == 0 && deptsiz.b.pktcnt == 0) { -+ /* Check if the whole transfer was completed, -+ * if no, setup transfer for next portion of data -+ */ -+ DWC_DEBUGPL(DBG_PCDV, "%s len=%d xfersize=%d pktcnt=%d\n", -+ ep->ep.name, ep->dwc_ep.xfer_len, -+ deptsiz.b.xfersize, deptsiz.b.pktcnt); -+ if(ep->dwc_ep.xfer_len < ep->dwc_ep.total_len) { -+ dwc_otg_ep_start_transfer(core_if, &ep->dwc_ep); -+ } else if(ep->dwc_ep.sent_zlp) { -+ /* -+ * This fragment of code should initiate 0 -+ * length trasfer in case if it is queued -+ * a trasfer with size divisible to EPs max -+ * packet size and with usb_request zero field -+ * is set, which means that after data is transfered, -+ * it is also should be transfered -+ * a 0 length packet at the end. For Slave and -+ * Buffer DMA modes in this case SW has -+ * to initiate 2 transfers one with transfer size, -+ * and the second with 0 size. For Desriptor -+ * DMA mode SW is able to initiate a transfer, -+ * which will handle all the packets including -+ * the last 0 legth. -+ */ -+ ep->dwc_ep.sent_zlp = 0; -+ dwc_otg_ep_start_zl_transfer(core_if, &ep->dwc_ep); -+ } else { -+ is_last = 1; -+ } -+ } -+ else { -+ DWC_WARN("Incomplete transfer (%s-%s [siz=%d pkt=%d])\n", -+ ep->ep.name, (ep->dwc_ep.is_in?"IN":"OUT"), -+ deptsiz.b.xfersize, deptsiz.b.pktcnt); -+ } -+ } -+ } else { -+ dwc_otg_dev_out_ep_regs_t *out_ep_regs = -+ dev_if->out_ep_regs[ep->dwc_ep.num]; -+ desc_sts.d32 = 0; -+ if(core_if->dma_enable) { -+ if(core_if->dma_desc_enable) { -+ dma_desc = ep->dwc_ep.desc_addr; -+ byte_count = 0; -+ ep->dwc_ep.sent_zlp = 0; -+ for(i = 0; i < ep->dwc_ep.desc_cnt; ++i) { -+ desc_sts.d32 = readl(dma_desc); -+ byte_count += desc_sts.b.bytes; -+ dma_desc++; -+ } -+ -+ ep->dwc_ep.xfer_count = ep->dwc_ep.total_len -+ - byte_count + ((4 - (ep->dwc_ep.total_len & 0x3)) & 0x3); -+ is_last = 1; -+ } else { -+ deptsiz.d32 = 0; -+ deptsiz.d32 = dwc_read_reg32(&out_ep_regs->doeptsiz); -+ -+ byte_count = (ep->dwc_ep.xfer_len - -+ ep->dwc_ep.xfer_count - deptsiz.b.xfersize); -+ ep->dwc_ep.xfer_buff += byte_count; -+ ep->dwc_ep.dma_addr += byte_count; -+ ep->dwc_ep.xfer_count += byte_count; -+ -+ /* Check if the whole transfer was completed, -+ * if no, setup transfer for next portion of data -+ */ -+ if(ep->dwc_ep.xfer_len < ep->dwc_ep.total_len) { -+ dwc_otg_ep_start_transfer(core_if, &ep->dwc_ep); -+ } -+ else if(ep->dwc_ep.sent_zlp) { -+ /* -+ * This fragment of code should initiate 0 -+ * length trasfer in case if it is queued -+ * a trasfer with size divisible to EPs max -+ * packet size and with usb_request zero field -+ * is set, which means that after data is transfered, -+ * it is also should be transfered -+ * a 0 length packet at the end. For Slave and -+ * Buffer DMA modes in this case SW has -+ * to initiate 2 transfers one with transfer size, -+ * and the second with 0 size. For Desriptor -+ * DMA mode SW is able to initiate a transfer, -+ * which will handle all the packets including -+ * the last 0 legth. -+ */ -+ ep->dwc_ep.sent_zlp = 0; -+ dwc_otg_ep_start_zl_transfer(core_if, &ep->dwc_ep); -+ } else { -+ is_last = 1; -+ } -+ } -+ } else { -+ /* Check if the whole transfer was completed, -+ * if no, setup transfer for next portion of data -+ */ -+ if(ep->dwc_ep.xfer_len < ep->dwc_ep.total_len) { -+ dwc_otg_ep_start_transfer(core_if, &ep->dwc_ep); -+ } -+ else if(ep->dwc_ep.sent_zlp) { -+ /* -+ * This fragment of code should initiate 0 -+ * length trasfer in case if it is queued -+ * a trasfer with size divisible to EPs max -+ * packet size and with usb_request zero field -+ * is set, which means that after data is transfered, -+ * it is also should be transfered -+ * a 0 length packet at the end. For Slave and -+ * Buffer DMA modes in this case SW has -+ * to initiate 2 transfers one with transfer size, -+ * and the second with 0 size. For Desriptor -+ * DMA mode SW is able to initiate a transfer, -+ * which will handle all the packets including -+ * the last 0 legth. -+ */ -+ ep->dwc_ep.sent_zlp = 0; -+ dwc_otg_ep_start_zl_transfer(core_if, &ep->dwc_ep); -+ } else { -+ is_last = 1; -+ } -+ } -+ -+#ifdef DEBUG -+ -+ DWC_DEBUGPL(DBG_PCDV, "addr %p, %s len=%d cnt=%d xsize=%d pktcnt=%d\n", -+ &out_ep_regs->doeptsiz, ep->ep.name, ep->dwc_ep.xfer_len, -+ ep->dwc_ep.xfer_count, -+ deptsiz.b.xfersize, -+ deptsiz.b.pktcnt); -+#endif -+ } -+ -+ /* Complete the request */ -+ if (is_last) { -+ req->req.actual = ep->dwc_ep.xfer_count; -+ -+ dwc_otg_request_done(ep, req, 0); -+ -+ ep->dwc_ep.start_xfer_buff = 0; -+ ep->dwc_ep.xfer_buff = 0; -+ ep->dwc_ep.xfer_len = 0; -+ -+ /* If there is a request in the queue start it.*/ -+ start_next_request(ep); -+ } -+} -+ -+ -+#ifdef DWC_EN_ISOC -+ -+/** -+ * This function BNA interrupt for Isochronous EPs -+ * -+ */ -+static void dwc_otg_pcd_handle_iso_bna(dwc_otg_pcd_ep_t *ep) -+{ -+ dwc_ep_t *dwc_ep = &ep->dwc_ep; -+ volatile uint32_t *addr; -+ depctl_data_t depctl = {.d32 = 0}; -+ dwc_otg_pcd_t *pcd = ep->pcd; -+ dwc_otg_dma_desc_t *dma_desc; -+ int i; -+ -+ dma_desc = dwc_ep->iso_desc_addr + dwc_ep->desc_cnt * (dwc_ep->proc_buf_num); -+ -+ if(dwc_ep->is_in) { -+ desc_sts_data_t sts = {.d32 = 0}; -+ for(i = 0;i < dwc_ep->desc_cnt; ++i, ++dma_desc) -+ { -+ sts.d32 = readl(&dma_desc->status); -+ sts.b_iso_in.bs = BS_HOST_READY; -+ writel(sts.d32,&dma_desc->status); -+ } -+ } -+ else { -+ desc_sts_data_t sts = {.d32 = 0}; -+ for(i = 0;i < dwc_ep->desc_cnt; ++i, ++dma_desc) -+ { -+ sts.d32 = readl(&dma_desc->status); -+ sts.b_iso_out.bs = BS_HOST_READY; -+ writel(sts.d32,&dma_desc->status); -+ } -+ } -+ -+ if(dwc_ep->is_in == 0){ -+ addr = &GET_CORE_IF(pcd)->dev_if->out_ep_regs[dwc_ep->num]->doepctl; -+ } -+ else{ -+ addr = &GET_CORE_IF(pcd)->dev_if->in_ep_regs[dwc_ep->num]->diepctl; -+ } -+ depctl.b.epena = 1; -+ dwc_modify_reg32(addr,depctl.d32,depctl.d32); -+} -+ -+/** -+ * This function sets latest iso packet information(non-PTI mode) -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ * @param ep The EP to start the transfer on. -+ * -+ */ -+void set_current_pkt_info(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) -+{ -+ deptsiz_data_t deptsiz = { .d32 = 0 }; -+ dma_addr_t dma_addr; -+ uint32_t offset; -+ -+ if(ep->proc_buf_num) -+ dma_addr = ep->dma_addr1; -+ else -+ dma_addr = ep->dma_addr0; -+ -+ -+ if(ep->is_in) { -+ deptsiz.d32 = dwc_read_reg32(&core_if->dev_if->in_ep_regs[ep->num]->dieptsiz); -+ offset = ep->data_per_frame; -+ } else { -+ deptsiz.d32 = dwc_read_reg32(&core_if->dev_if->out_ep_regs[ep->num]->doeptsiz); -+ offset = ep->data_per_frame + (0x4 & (0x4 - (ep->data_per_frame & 0x3))); -+ } -+ -+ if(!deptsiz.b.xfersize) { -+ ep->pkt_info[ep->cur_pkt].length = ep->data_per_frame; -+ ep->pkt_info[ep->cur_pkt].offset = ep->cur_pkt_dma_addr - dma_addr; -+ ep->pkt_info[ep->cur_pkt].status = 0; -+ } else { -+ ep->pkt_info[ep->cur_pkt].length = ep->data_per_frame; -+ ep->pkt_info[ep->cur_pkt].offset = ep->cur_pkt_dma_addr - dma_addr; -+ ep->pkt_info[ep->cur_pkt].status = -ENODATA; -+ } -+ ep->cur_pkt_addr += offset; -+ ep->cur_pkt_dma_addr += offset; -+ ep->cur_pkt++; -+} -+ -+/** -+ * This function sets latest iso packet information(DDMA mode) -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ * @param dwc_ep The EP to start the transfer on. -+ * -+ */ -+static void set_ddma_iso_pkts_info(dwc_otg_core_if_t *core_if, dwc_ep_t *dwc_ep) -+{ -+ dwc_otg_dma_desc_t* dma_desc; -+ desc_sts_data_t sts = {.d32 = 0}; -+ iso_pkt_info_t *iso_packet; -+ uint32_t data_per_desc; -+ uint32_t offset; -+ int i, j; -+ -+ iso_packet = dwc_ep->pkt_info; -+ -+ /** Reinit closed DMA Descriptors*/ -+ /** ISO OUT EP */ -+ if(dwc_ep->is_in == 0) { -+ dma_desc = dwc_ep->iso_desc_addr + dwc_ep->desc_cnt * dwc_ep->proc_buf_num; -+ offset = 0; -+ -+ for(i = 0; i < dwc_ep->desc_cnt - dwc_ep->pkt_per_frm; i+= dwc_ep->pkt_per_frm) -+ { -+ for(j = 0; j < dwc_ep->pkt_per_frm; ++j) -+ { -+ data_per_desc = ((j + 1) * dwc_ep->maxpacket > dwc_ep->data_per_frame) ? -+ dwc_ep->data_per_frame - j * dwc_ep->maxpacket : dwc_ep->maxpacket; -+ data_per_desc += (data_per_desc % 4) ? (4 - data_per_desc % 4):0; -+ -+ sts.d32 = readl(&dma_desc->status); -+ -+ /* Write status in iso_packet_decsriptor */ -+ iso_packet->status = sts.b_iso_out.rxsts + (sts.b_iso_out.bs^BS_DMA_DONE); -+ if(iso_packet->status) { -+ iso_packet->status = -ENODATA; -+ } -+ -+ /* Received data length */ -+ if(!sts.b_iso_out.rxbytes){ -+ iso_packet->length = data_per_desc - sts.b_iso_out.rxbytes; -+ } else { -+ iso_packet->length = data_per_desc - sts.b_iso_out.rxbytes + -+ (4 - dwc_ep->data_per_frame % 4); -+ } -+ -+ iso_packet->offset = offset; -+ -+ offset += data_per_desc; -+ dma_desc ++; -+ iso_packet ++; -+ } -+ } -+ -+ for(j = 0; j < dwc_ep->pkt_per_frm - 1; ++j) -+ { -+ data_per_desc = ((j + 1) * dwc_ep->maxpacket > dwc_ep->data_per_frame) ? -+ dwc_ep->data_per_frame - j * dwc_ep->maxpacket : dwc_ep->maxpacket; -+ data_per_desc += (data_per_desc % 4) ? (4 - data_per_desc % 4):0; -+ -+ sts.d32 = readl(&dma_desc->status); -+ -+ /* Write status in iso_packet_decsriptor */ -+ iso_packet->status = sts.b_iso_out.rxsts + (sts.b_iso_out.bs^BS_DMA_DONE); -+ if(iso_packet->status) { -+ iso_packet->status = -ENODATA; -+ } -+ -+ /* Received data length */ -+ iso_packet->length = dwc_ep->data_per_frame - sts.b_iso_out.rxbytes; -+ -+ iso_packet->offset = offset; -+ -+ offset += data_per_desc; -+ iso_packet++; -+ dma_desc++; -+ } -+ -+ sts.d32 = readl(&dma_desc->status); -+ -+ /* Write status in iso_packet_decsriptor */ -+ iso_packet->status = sts.b_iso_out.rxsts + (sts.b_iso_out.bs^BS_DMA_DONE); -+ if(iso_packet->status) { -+ iso_packet->status = -ENODATA; -+ } -+ /* Received data length */ -+ if(!sts.b_iso_out.rxbytes){ -+ iso_packet->length = dwc_ep->data_per_frame - sts.b_iso_out.rxbytes; -+ } else { -+ iso_packet->length = dwc_ep->data_per_frame - sts.b_iso_out.rxbytes + -+ (4 - dwc_ep->data_per_frame % 4); -+ } -+ -+ iso_packet->offset = offset; -+ } -+ else /** ISO IN EP */ -+ { -+ dma_desc = dwc_ep->iso_desc_addr + dwc_ep->desc_cnt * dwc_ep->proc_buf_num; -+ -+ for(i = 0; i < dwc_ep->desc_cnt - 1; i++) -+ { -+ sts.d32 = readl(&dma_desc->status); -+ -+ /* Write status in iso packet descriptor */ -+ iso_packet->status = sts.b_iso_in.txsts + (sts.b_iso_in.bs^BS_DMA_DONE); -+ if(iso_packet->status != 0) { -+ iso_packet->status = -ENODATA; -+ -+ } -+ /* Bytes has been transfered */ -+ iso_packet->length = dwc_ep->data_per_frame - sts.b_iso_in.txbytes; -+ -+ dma_desc ++; -+ iso_packet++; -+ } -+ -+ sts.d32 = readl(&dma_desc->status); -+ while(sts.b_iso_in.bs == BS_DMA_BUSY) { -+ sts.d32 = readl(&dma_desc->status); -+ } -+ -+ /* Write status in iso packet descriptor ??? do be done with ERROR codes*/ -+ iso_packet->status = sts.b_iso_in.txsts + (sts.b_iso_in.bs^BS_DMA_DONE); -+ if(iso_packet->status != 0) { -+ iso_packet->status = -ENODATA; -+ } -+ -+ /* Bytes has been transfered */ -+ iso_packet->length = dwc_ep->data_per_frame - sts.b_iso_in.txbytes; -+ } -+} -+ -+/** -+ * This function reinitialize DMA Descriptors for Isochronous transfer -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ * @param dwc_ep The EP to start the transfer on. -+ * -+ */ -+static void reinit_ddma_iso_xfer(dwc_otg_core_if_t *core_if, dwc_ep_t *dwc_ep) -+{ -+ int i, j; -+ dwc_otg_dma_desc_t* dma_desc; -+ dma_addr_t dma_ad; -+ volatile uint32_t *addr; -+ desc_sts_data_t sts = { .d32 =0 }; -+ uint32_t data_per_desc; -+ -+ if(dwc_ep->is_in == 0) { -+ addr = &core_if->dev_if->out_ep_regs[dwc_ep->num]->doepctl; -+ } -+ else { -+ addr = &core_if->dev_if->in_ep_regs[dwc_ep->num]->diepctl; -+ } -+ -+ -+ if(dwc_ep->proc_buf_num == 0) { -+ /** Buffer 0 descriptors setup */ -+ dma_ad = dwc_ep->dma_addr0; -+ } -+ else { -+ /** Buffer 1 descriptors setup */ -+ dma_ad = dwc_ep->dma_addr1; -+ } -+ -+ -+ /** Reinit closed DMA Descriptors*/ -+ /** ISO OUT EP */ -+ if(dwc_ep->is_in == 0) { -+ dma_desc = dwc_ep->iso_desc_addr + dwc_ep->desc_cnt * dwc_ep->proc_buf_num; -+ -+ sts.b_iso_out.bs = BS_HOST_READY; -+ sts.b_iso_out.rxsts = 0; -+ sts.b_iso_out.l = 0; -+ sts.b_iso_out.sp = 0; -+ sts.b_iso_out.ioc = 0; -+ sts.b_iso_out.pid = 0; -+ sts.b_iso_out.framenum = 0; -+ -+ for(i = 0; i < dwc_ep->desc_cnt - dwc_ep->pkt_per_frm; i+= dwc_ep->pkt_per_frm) -+ { -+ for(j = 0; j < dwc_ep->pkt_per_frm; ++j) -+ { -+ data_per_desc = ((j + 1) * dwc_ep->maxpacket > dwc_ep->data_per_frame) ? -+ dwc_ep->data_per_frame - j * dwc_ep->maxpacket : dwc_ep->maxpacket; -+ data_per_desc += (data_per_desc % 4) ? (4 - data_per_desc % 4):0; -+ sts.b_iso_out.rxbytes = data_per_desc; -+ writel((uint32_t)dma_ad, &dma_desc->buf); -+ writel(sts.d32, &dma_desc->status); -+ -+ (uint32_t)dma_ad += data_per_desc; -+ dma_desc ++; -+ } -+ } -+ -+ for(j = 0; j < dwc_ep->pkt_per_frm - 1; ++j) -+ { -+ -+ data_per_desc = ((j + 1) * dwc_ep->maxpacket > dwc_ep->data_per_frame) ? -+ dwc_ep->data_per_frame - j * dwc_ep->maxpacket : dwc_ep->maxpacket; -+ data_per_desc += (data_per_desc % 4) ? (4 - data_per_desc % 4):0; -+ sts.b_iso_out.rxbytes = data_per_desc; -+ -+ writel((uint32_t)dma_ad, &dma_desc->buf); -+ writel(sts.d32, &dma_desc->status); -+ -+ dma_desc++; -+ (uint32_t)dma_ad += data_per_desc; -+ } -+ -+ sts.b_iso_out.ioc = 1; -+ sts.b_iso_out.l = dwc_ep->proc_buf_num; -+ -+ data_per_desc = ((j + 1) * dwc_ep->maxpacket > dwc_ep->data_per_frame) ? -+ dwc_ep->data_per_frame - j * dwc_ep->maxpacket : dwc_ep->maxpacket; -+ data_per_desc += (data_per_desc % 4) ? (4 - data_per_desc % 4):0; -+ sts.b_iso_out.rxbytes = data_per_desc; -+ -+ writel((uint32_t)dma_ad, &dma_desc->buf); -+ writel(sts.d32, &dma_desc->status); -+ } -+ else /** ISO IN EP */ -+ { -+ dma_desc = dwc_ep->iso_desc_addr + dwc_ep->desc_cnt * dwc_ep->proc_buf_num; -+ -+ sts.b_iso_in.bs = BS_HOST_READY; -+ sts.b_iso_in.txsts = 0; -+ sts.b_iso_in.sp = 0; -+ sts.b_iso_in.ioc = 0; -+ sts.b_iso_in.pid = dwc_ep->pkt_per_frm; -+ sts.b_iso_in.framenum = dwc_ep->next_frame; -+ sts.b_iso_in.txbytes = dwc_ep->data_per_frame; -+ sts.b_iso_in.l = 0; -+ -+ for(i = 0; i < dwc_ep->desc_cnt - 1; i++) -+ { -+ writel((uint32_t)dma_ad, &dma_desc->buf); -+ writel(sts.d32, &dma_desc->status); -+ -+ sts.b_iso_in.framenum += dwc_ep->bInterval; -+ (uint32_t)dma_ad += dwc_ep->data_per_frame; -+ dma_desc ++; -+ } -+ -+ sts.b_iso_in.ioc = 1; -+ sts.b_iso_in.l = dwc_ep->proc_buf_num; -+ -+ writel((uint32_t)dma_ad, &dma_desc->buf); -+ writel(sts.d32, &dma_desc->status); -+ -+ dwc_ep->next_frame = sts.b_iso_in.framenum + dwc_ep->bInterval * 1; -+ } -+ dwc_ep->proc_buf_num = (dwc_ep->proc_buf_num ^ 1) & 0x1; -+} -+ -+ -+/** -+ * This function is to handle Iso EP transfer complete interrupt -+ * in case Iso out packet was dropped -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ * @param dwc_ep The EP for wihich transfer complete was asserted -+ * -+ */ -+static uint32_t handle_iso_out_pkt_dropped(dwc_otg_core_if_t *core_if, dwc_ep_t *dwc_ep) -+{ -+ uint32_t dma_addr; -+ uint32_t drp_pkt; -+ uint32_t drp_pkt_cnt; -+ deptsiz_data_t deptsiz = { .d32 = 0 }; -+ depctl_data_t depctl = { .d32 = 0 }; -+ int i; -+ -+ deptsiz.d32 = dwc_read_reg32(&core_if->dev_if->out_ep_regs[dwc_ep->num]->doeptsiz); -+ -+ drp_pkt = dwc_ep->pkt_cnt - deptsiz.b.pktcnt; -+ drp_pkt_cnt = dwc_ep->pkt_per_frm - (drp_pkt % dwc_ep->pkt_per_frm); -+ -+ /* Setting dropped packets status */ -+ for(i = 0; i < drp_pkt_cnt; ++i) { -+ dwc_ep->pkt_info[drp_pkt].status = -ENODATA; -+ drp_pkt ++; -+ deptsiz.b.pktcnt--; -+ } -+ -+ -+ if(deptsiz.b.pktcnt > 0) { -+ deptsiz.b.xfersize = dwc_ep->xfer_len - (dwc_ep->pkt_cnt - deptsiz.b.pktcnt) * dwc_ep->maxpacket; -+ } else { -+ deptsiz.b.xfersize = 0; -+ deptsiz.b.pktcnt = 0; -+ } -+ -+ dwc_write_reg32(&core_if->dev_if->out_ep_regs[dwc_ep->num]->doeptsiz, deptsiz.d32); -+ -+ if(deptsiz.b.pktcnt > 0) { -+ if(dwc_ep->proc_buf_num) { -+ dma_addr = dwc_ep->dma_addr1 + dwc_ep->xfer_len - deptsiz.b.xfersize; -+ } else { -+ dma_addr = dwc_ep->dma_addr0 + dwc_ep->xfer_len - deptsiz.b.xfersize;; -+ } -+ -+ dwc_write_reg32(&core_if->dev_if->out_ep_regs[dwc_ep->num]->doepdma, dma_addr); -+ -+ /** Re-enable endpoint, clear nak */ -+ depctl.d32 = 0; -+ depctl.b.epena = 1; -+ depctl.b.cnak = 1; -+ -+ dwc_modify_reg32(&core_if->dev_if->out_ep_regs[dwc_ep->num]->doepctl, -+ depctl.d32,depctl.d32); -+ return 0; -+ } else { -+ return 1; -+ } -+} -+ -+/** -+ * This function sets iso packets information(PTI mode) -+ * -+ * @param core_if Programming view of DWC_otg controller. -+ * @param ep The EP to start the transfer on. -+ * -+ */ -+static uint32_t set_iso_pkts_info(dwc_otg_core_if_t *core_if, dwc_ep_t *ep) -+{ -+ int i, j; -+ dma_addr_t dma_ad; -+ iso_pkt_info_t *packet_info = ep->pkt_info; -+ uint32_t offset; -+ uint32_t frame_data; -+ deptsiz_data_t deptsiz; -+ -+ if(ep->proc_buf_num == 0) { -+ /** Buffer 0 descriptors setup */ -+ dma_ad = ep->dma_addr0; -+ } -+ else { -+ /** Buffer 1 descriptors setup */ -+ dma_ad = ep->dma_addr1; -+ } -+ -+ -+ if(ep->is_in) { -+ deptsiz.d32 = dwc_read_reg32(&core_if->dev_if->in_ep_regs[ep->num]->dieptsiz); -+ } else { -+ deptsiz.d32 = dwc_read_reg32(&core_if->dev_if->out_ep_regs[ep->num]->doeptsiz); -+ } -+ -+ if(!deptsiz.b.xfersize) { -+ offset = 0; -+ for(i = 0; i < ep->pkt_cnt; i += ep->pkt_per_frm) -+ { -+ frame_data = ep->data_per_frame; -+ for(j = 0; j < ep->pkt_per_frm; ++j) { -+ -+ /* Packet status - is not set as initially -+ * it is set to 0 and if packet was sent -+ successfully, status field will remain 0*/ -+ -+ -+ /* Bytes has been transfered */ -+ packet_info->length = (ep->maxpacket < frame_data) ? -+ ep->maxpacket : frame_data; -+ -+ /* Received packet offset */ -+ packet_info->offset = offset; -+ offset += packet_info->length; -+ frame_data -= packet_info->length; -+ -+ packet_info ++; -+ } -+ } -+ return 1; -+ } else { -+ /* This is a workaround for in case of Transfer Complete with -+ * PktDrpSts interrupts merging - in this case Transfer complete -+ * interrupt for Isoc Out Endpoint is asserted without PktDrpSts -+ * set and with DOEPTSIZ register non zero. Investigations showed, -+ * that this happens when Out packet is dropped, but because of -+ * interrupts merging during first interrupt handling PktDrpSts -+ * bit is cleared and for next merged interrupts it is not reset. -+ * In this case SW hadles the interrupt as if PktDrpSts bit is set. -+ */ -+ if(ep->is_in) { -+ return 1; -+ } else { -+ return handle_iso_out_pkt_dropped(core_if, ep); -+ } -+ } -+} -+ -+/** -+ * This function is to handle Iso EP transfer complete interrupt -+ * -+ * @param ep The EP for which transfer complete was asserted -+ * -+ */ -+static void complete_iso_ep(dwc_otg_pcd_ep_t *ep) -+{ -+ dwc_otg_core_if_t *core_if = GET_CORE_IF(ep->pcd); -+ dwc_ep_t *dwc_ep = &ep->dwc_ep; -+ uint8_t is_last = 0; -+ -+ if(core_if->dma_enable) { -+ if(core_if->dma_desc_enable) { -+ set_ddma_iso_pkts_info(core_if, dwc_ep); -+ reinit_ddma_iso_xfer(core_if, dwc_ep); -+ is_last = 1; -+ } else { -+ if(core_if->pti_enh_enable) { -+ if(set_iso_pkts_info(core_if, dwc_ep)) { -+ dwc_ep->proc_buf_num = (dwc_ep->proc_buf_num ^ 1) & 0x1; -+ dwc_otg_iso_ep_start_buf_transfer(core_if, dwc_ep); -+ is_last = 1; -+ } -+ } else { -+ set_current_pkt_info(core_if, dwc_ep); -+ if(dwc_ep->cur_pkt >= dwc_ep->pkt_cnt) { -+ is_last = 1; -+ dwc_ep->cur_pkt = 0; -+ dwc_ep->proc_buf_num = (dwc_ep->proc_buf_num ^ 1) & 0x1; -+ if(dwc_ep->proc_buf_num) { -+ dwc_ep->cur_pkt_addr = dwc_ep->xfer_buff1; -+ dwc_ep->cur_pkt_dma_addr = dwc_ep->dma_addr1; -+ } else { -+ dwc_ep->cur_pkt_addr = dwc_ep->xfer_buff0; -+ dwc_ep->cur_pkt_dma_addr = dwc_ep->dma_addr0; -+ } -+ -+ } -+ dwc_otg_iso_ep_start_frm_transfer(core_if, dwc_ep); -+ } -+ } -+ } else { -+ set_current_pkt_info(core_if, dwc_ep); -+ if(dwc_ep->cur_pkt >= dwc_ep->pkt_cnt) { -+ is_last = 1; -+ dwc_ep->cur_pkt = 0; -+ dwc_ep->proc_buf_num = (dwc_ep->proc_buf_num ^ 1) & 0x1; -+ if(dwc_ep->proc_buf_num) { -+ dwc_ep->cur_pkt_addr = dwc_ep->xfer_buff1; -+ dwc_ep->cur_pkt_dma_addr = dwc_ep->dma_addr1; -+ } else { -+ dwc_ep->cur_pkt_addr = dwc_ep->xfer_buff0; -+ dwc_ep->cur_pkt_dma_addr = dwc_ep->dma_addr0; -+ } -+ -+ } -+ dwc_otg_iso_ep_start_frm_transfer(core_if, dwc_ep); -+ } -+ if(is_last) -+ dwc_otg_iso_buffer_done(ep, ep->iso_req); -+} -+ -+#endif //DWC_EN_ISOC -+ -+ -+/** -+ * This function handles EP0 Control transfers. -+ * -+ * The state of the control tranfers are tracked in -+ * ep0state. -+ */ -+static void handle_ep0(dwc_otg_pcd_t *pcd) -+{ -+ dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd); -+ dwc_otg_pcd_ep_t *ep0 = &pcd->ep0; -+ desc_sts_data_t desc_sts; -+ deptsiz0_data_t deptsiz; -+ uint32_t byte_count; -+ -+#ifdef DEBUG_EP0 -+ DWC_DEBUGPL(DBG_PCDV, "%s()\n", __func__); -+ print_ep0_state(pcd); -+#endif -+ -+ switch (pcd->ep0state) { -+ case EP0_DISCONNECT: -+ break; -+ -+ case EP0_IDLE: -+ pcd->request_config = 0; -+ -+ pcd_setup(pcd); -+ break; -+ -+ case EP0_IN_DATA_PHASE: -+#ifdef DEBUG_EP0 -+ DWC_DEBUGPL(DBG_PCD, "DATA_IN EP%d-%s: type=%d, mps=%d\n", -+ ep0->dwc_ep.num, (ep0->dwc_ep.is_in ?"IN":"OUT"), -+ ep0->dwc_ep.type, ep0->dwc_ep.maxpacket); -+#endif -+ -+ if (core_if->dma_enable != 0) { -+ /* -+ * For EP0 we can only program 1 packet at a time so we -+ * need to do the make calculations after each complete. -+ * Call write_packet to make the calculations, as in -+ * slave mode, and use those values to determine if we -+ * can complete. -+ */ -+ if(core_if->dma_desc_enable == 0) { -+ deptsiz.d32 = dwc_read_reg32(&core_if->dev_if->in_ep_regs[0]->dieptsiz); -+ byte_count = ep0->dwc_ep.xfer_len - deptsiz.b.xfersize; -+ } -+ else { -+ desc_sts.d32 = readl(core_if->dev_if->in_desc_addr); -+ byte_count = ep0->dwc_ep.xfer_len - desc_sts.b.bytes; -+ } -+ ep0->dwc_ep.xfer_count += byte_count; -+ ep0->dwc_ep.xfer_buff += byte_count; -+ ep0->dwc_ep.dma_addr += byte_count; -+ } -+ if (ep0->dwc_ep.xfer_count < ep0->dwc_ep.total_len) { -+ dwc_otg_ep0_continue_transfer (GET_CORE_IF(pcd), &ep0->dwc_ep); -+ DWC_DEBUGPL(DBG_PCD, "CONTINUE TRANSFER\n"); -+ } -+ else if(ep0->dwc_ep.sent_zlp) { -+ dwc_otg_ep0_continue_transfer (GET_CORE_IF(pcd), &ep0->dwc_ep); -+ ep0->dwc_ep.sent_zlp = 0; -+ DWC_DEBUGPL(DBG_PCD, "CONTINUE TRANSFER\n"); -+ } -+ else { -+ ep0_complete_request(ep0); -+ DWC_DEBUGPL(DBG_PCD, "COMPLETE TRANSFER\n"); -+ } -+ break; -+ case EP0_OUT_DATA_PHASE: -+#ifdef DEBUG_EP0 -+ DWC_DEBUGPL(DBG_PCD, "DATA_OUT EP%d-%s: type=%d, mps=%d\n", -+ ep0->dwc_ep.num, (ep0->dwc_ep.is_in ?"IN":"OUT"), -+ ep0->dwc_ep.type, ep0->dwc_ep.maxpacket); -+#endif -+ if (core_if->dma_enable != 0) { -+ if(core_if->dma_desc_enable == 0) { -+ deptsiz.d32 = dwc_read_reg32(&core_if->dev_if->out_ep_regs[0]->doeptsiz); -+ byte_count = ep0->dwc_ep.maxpacket - deptsiz.b.xfersize; -+ } -+ else { -+ desc_sts.d32 = readl(core_if->dev_if->out_desc_addr); -+ byte_count = ep0->dwc_ep.maxpacket - desc_sts.b.bytes; -+ } -+ ep0->dwc_ep.xfer_count += byte_count; -+ ep0->dwc_ep.xfer_buff += byte_count; -+ ep0->dwc_ep.dma_addr += byte_count; -+ } -+ if (ep0->dwc_ep.xfer_count < ep0->dwc_ep.total_len) { -+ dwc_otg_ep0_continue_transfer (GET_CORE_IF(pcd), &ep0->dwc_ep); -+ DWC_DEBUGPL(DBG_PCD, "CONTINUE TRANSFER\n"); -+ } -+ else if(ep0->dwc_ep.sent_zlp) { -+ dwc_otg_ep0_continue_transfer (GET_CORE_IF(pcd), &ep0->dwc_ep); -+ ep0->dwc_ep.sent_zlp = 0; -+ DWC_DEBUGPL(DBG_PCD, "CONTINUE TRANSFER\n"); -+ } -+ else { -+ ep0_complete_request(ep0); -+ DWC_DEBUGPL(DBG_PCD, "COMPLETE TRANSFER\n"); -+ } -+ break; -+ -+ -+ case EP0_IN_STATUS_PHASE: -+ case EP0_OUT_STATUS_PHASE: -+ DWC_DEBUGPL(DBG_PCD, "CASE: EP0_STATUS\n"); -+ ep0_complete_request(ep0); -+ pcd->ep0state = EP0_IDLE; -+ ep0->stopped = 1; -+ ep0->dwc_ep.is_in = 0; /* OUT for next SETUP */ -+ -+ /* Prepare for more SETUP Packets */ -+ if(core_if->dma_enable) { -+ ep0_out_start(core_if, pcd); -+ } -+ break; -+ -+ case EP0_STALL: -+ DWC_ERROR("EP0 STALLed, should not get here pcd_setup()\n"); -+ break; -+ } -+#ifdef DEBUG_EP0 -+ print_ep0_state(pcd); -+#endif -+} -+ -+ -+/** -+ * Restart transfer -+ */ -+static void restart_transfer(dwc_otg_pcd_t *pcd, const uint32_t epnum) -+{ -+ dwc_otg_core_if_t *core_if; -+ dwc_otg_dev_if_t *dev_if; -+ deptsiz_data_t dieptsiz = {.d32=0}; -+ dwc_otg_pcd_ep_t *ep; -+ -+ ep = get_in_ep(pcd, epnum); -+ -+#ifdef DWC_EN_ISOC -+ if(ep->dwc_ep.type == DWC_OTG_EP_TYPE_ISOC) { -+ return; -+ } -+#endif /* DWC_EN_ISOC */ -+ -+ core_if = GET_CORE_IF(pcd); -+ dev_if = core_if->dev_if; -+ -+ dieptsiz.d32 = dwc_read_reg32(&dev_if->in_ep_regs[epnum]->dieptsiz); -+ -+ DWC_DEBUGPL(DBG_PCD,"xfer_buff=%p xfer_count=%0x xfer_len=%0x" -+ " stopped=%d\n", ep->dwc_ep.xfer_buff, -+ ep->dwc_ep.xfer_count, ep->dwc_ep.xfer_len , -+ ep->stopped); -+ /* -+ * If xfersize is 0 and pktcnt in not 0, resend the last packet. -+ */ -+ if (dieptsiz.b.pktcnt && dieptsiz.b.xfersize == 0 && -+ ep->dwc_ep.start_xfer_buff != 0) { -+ if (ep->dwc_ep.total_len <= ep->dwc_ep.maxpacket) { -+ ep->dwc_ep.xfer_count = 0; -+ ep->dwc_ep.xfer_buff = ep->dwc_ep.start_xfer_buff; -+ ep->dwc_ep.xfer_len = ep->dwc_ep.xfer_count; -+ } -+ else { -+ ep->dwc_ep.xfer_count -= ep->dwc_ep.maxpacket; -+ /* convert packet size to dwords. */ -+ ep->dwc_ep.xfer_buff -= ep->dwc_ep.maxpacket; -+ ep->dwc_ep.xfer_len = ep->dwc_ep.xfer_count; -+ } -+ ep->stopped = 0; -+ DWC_DEBUGPL(DBG_PCD,"xfer_buff=%p xfer_count=%0x " -+ "xfer_len=%0x stopped=%d\n", -+ ep->dwc_ep.xfer_buff, -+ ep->dwc_ep.xfer_count, ep->dwc_ep.xfer_len , -+ ep->stopped -+ ); -+ if (epnum == 0) { -+ dwc_otg_ep0_start_transfer(core_if, &ep->dwc_ep); -+ } -+ else { -+ dwc_otg_ep_start_transfer(core_if, &ep->dwc_ep); -+ } -+ } -+} -+ -+ -+/** -+ * handle the IN EP disable interrupt. -+ */ -+static inline void handle_in_ep_disable_intr(dwc_otg_pcd_t *pcd, -+ const uint32_t epnum) -+{ -+ dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd); -+ dwc_otg_dev_if_t *dev_if = core_if->dev_if; -+ deptsiz_data_t dieptsiz = {.d32=0}; -+ dctl_data_t dctl = {.d32=0}; -+ dwc_otg_pcd_ep_t *ep; -+ dwc_ep_t *dwc_ep; -+ -+ ep = get_in_ep(pcd, epnum); -+ dwc_ep = &ep->dwc_ep; -+ -+ if(dwc_ep->type == DWC_OTG_EP_TYPE_ISOC) { -+ dwc_otg_flush_tx_fifo(core_if, dwc_ep->tx_fifo_num); -+ return; -+ } -+ -+ DWC_DEBUGPL(DBG_PCD,"diepctl%d=%0x\n", epnum, -+ dwc_read_reg32(&dev_if->in_ep_regs[epnum]->diepctl)); -+ dieptsiz.d32 = dwc_read_reg32(&dev_if->in_ep_regs[epnum]->dieptsiz); -+ -+ DWC_DEBUGPL(DBG_ANY, "pktcnt=%d size=%d\n", -+ dieptsiz.b.pktcnt, -+ dieptsiz.b.xfersize); -+ -+ if (ep->stopped) { -+ /* Flush the Tx FIFO */ -+ dwc_otg_flush_tx_fifo(core_if, dwc_ep->tx_fifo_num); -+ /* Clear the Global IN NP NAK */ -+ dctl.d32 = 0; -+ dctl.b.cgnpinnak = 1; -+ dwc_modify_reg32(&dev_if->dev_global_regs->dctl, -+ dctl.d32, 0); -+ /* Restart the transaction */ -+ if (dieptsiz.b.pktcnt != 0 || -+ dieptsiz.b.xfersize != 0) { -+ restart_transfer(pcd, epnum); -+ } -+ } -+ else { -+ /* Restart the transaction */ -+ if (dieptsiz.b.pktcnt != 0 || -+ dieptsiz.b.xfersize != 0) { -+ restart_transfer(pcd, epnum); -+ } -+ DWC_DEBUGPL(DBG_ANY, "STOPPED!!!\n"); -+ } -+} -+ -+/** -+ * Handler for the IN EP timeout handshake interrupt. -+ */ -+static inline void handle_in_ep_timeout_intr(dwc_otg_pcd_t *pcd, -+ const uint32_t epnum) -+{ -+ dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd); -+ dwc_otg_dev_if_t *dev_if = core_if->dev_if; -+ -+#ifdef DEBUG -+ deptsiz_data_t dieptsiz = {.d32=0}; -+ uint32_t num = 0; -+#endif -+ dctl_data_t dctl = {.d32=0}; -+ dwc_otg_pcd_ep_t *ep; -+ -+ gintmsk_data_t intr_mask = {.d32 = 0}; -+ -+ ep = get_in_ep(pcd, epnum); -+ -+ /* Disable the NP Tx Fifo Empty Interrrupt */ -+ if (!core_if->dma_enable) { -+ intr_mask.b.nptxfempty = 1; -+ dwc_modify_reg32(&core_if->core_global_regs->gintmsk, intr_mask.d32, 0); -+ } -+ /** @todo NGS Check EP type. -+ * Implement for Periodic EPs */ -+ /* -+ * Non-periodic EP -+ */ -+ /* Enable the Global IN NAK Effective Interrupt */ -+ intr_mask.b.ginnakeff = 1; -+ dwc_modify_reg32(&core_if->core_global_regs->gintmsk, -+ 0, intr_mask.d32); -+ -+ /* Set Global IN NAK */ -+ dctl.b.sgnpinnak = 1; -+ dwc_modify_reg32(&dev_if->dev_global_regs->dctl, -+ dctl.d32, dctl.d32); -+ -+ ep->stopped = 1; -+ -+#ifdef DEBUG -+ dieptsiz.d32 = dwc_read_reg32(&dev_if->in_ep_regs[num]->dieptsiz); -+ DWC_DEBUGPL(DBG_ANY, "pktcnt=%d size=%d\n", -+ dieptsiz.b.pktcnt, -+ dieptsiz.b.xfersize); -+#endif -+ -+#ifdef DISABLE_PERIODIC_EP -+ /* -+ * Set the NAK bit for this EP to -+ * start the disable process. -+ */ -+ diepctl.d32 = 0; -+ diepctl.b.snak = 1; -+ dwc_modify_reg32(&dev_if->in_ep_regs[num]->diepctl, diepctl.d32, diepctl.d32); -+ ep->disabling = 1; -+ ep->stopped = 1; -+#endif -+} -+ -+/** -+ * Handler for the IN EP NAK interrupt. -+ */ -+static inline int32_t handle_in_ep_nak_intr(dwc_otg_pcd_t *pcd, -+ const uint32_t epnum) -+{ -+ /** @todo implement ISR */ -+ dwc_otg_core_if_t* core_if; -+ diepmsk_data_t intr_mask = { .d32 = 0}; -+ -+ DWC_PRINT("INTERRUPT Handler not implemented for %s\n", "IN EP NAK"); -+ core_if = GET_CORE_IF(pcd); -+ intr_mask.b.nak = 1; -+ -+ if(core_if->multiproc_int_enable) { -+ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->diepeachintmsk[epnum], -+ intr_mask.d32, 0); -+ } else { -+ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->diepmsk, -+ intr_mask.d32, 0); -+ } -+ -+ return 1; -+} -+ -+/** -+ * Handler for the OUT EP Babble interrupt. -+ */ -+static inline int32_t handle_out_ep_babble_intr(dwc_otg_pcd_t *pcd, -+ const uint32_t epnum) -+{ -+ /** @todo implement ISR */ -+ dwc_otg_core_if_t* core_if; -+ doepmsk_data_t intr_mask = { .d32 = 0}; -+ -+ DWC_PRINT("INTERRUPT Handler not implemented for %s\n", "OUT EP Babble"); -+ core_if = GET_CORE_IF(pcd); -+ intr_mask.b.babble = 1; -+ -+ if(core_if->multiproc_int_enable) { -+ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->doepeachintmsk[epnum], -+ intr_mask.d32, 0); -+ } else { -+ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->doepmsk, -+ intr_mask.d32, 0); -+ } -+ -+ return 1; -+} -+ -+/** -+ * Handler for the OUT EP NAK interrupt. -+ */ -+static inline int32_t handle_out_ep_nak_intr(dwc_otg_pcd_t *pcd, -+ const uint32_t epnum) -+{ -+ /** @todo implement ISR */ -+ dwc_otg_core_if_t* core_if; -+ doepmsk_data_t intr_mask = { .d32 = 0}; -+ -+ DWC_PRINT("INTERRUPT Handler not implemented for %s\n", "OUT EP NAK"); -+ core_if = GET_CORE_IF(pcd); -+ intr_mask.b.nak = 1; -+ -+ if(core_if->multiproc_int_enable) { -+ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->doepeachintmsk[epnum], -+ intr_mask.d32, 0); -+ } else { -+ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->doepmsk, -+ intr_mask.d32, 0); -+ } -+ -+ return 1; -+} -+ -+/** -+ * Handler for the OUT EP NYET interrupt. -+ */ -+static inline int32_t handle_out_ep_nyet_intr(dwc_otg_pcd_t *pcd, -+ const uint32_t epnum) -+{ -+ /** @todo implement ISR */ -+ dwc_otg_core_if_t* core_if; -+ doepmsk_data_t intr_mask = { .d32 = 0}; -+ -+ DWC_PRINT("INTERRUPT Handler not implemented for %s\n", "OUT EP NYET"); -+ core_if = GET_CORE_IF(pcd); -+ intr_mask.b.nyet = 1; -+ -+ if(core_if->multiproc_int_enable) { -+ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->doepeachintmsk[epnum], -+ intr_mask.d32, 0); -+ } else { -+ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->doepmsk, -+ intr_mask.d32, 0); -+ } -+ -+ return 1; -+} -+ -+/** -+ * This interrupt indicates that an IN EP has a pending Interrupt. -+ * The sequence for handling the IN EP interrupt is shown below: -+ * -# Read the Device All Endpoint Interrupt register -+ * -# Repeat the following for each IN EP interrupt bit set (from -+ * LSB to MSB). -+ * -# Read the Device Endpoint Interrupt (DIEPINTn) register -+ * -# If "Transfer Complete" call the request complete function -+ * -# If "Endpoint Disabled" complete the EP disable procedure. -+ * -# If "AHB Error Interrupt" log error -+ * -# If "Time-out Handshake" log error -+ * -# If "IN Token Received when TxFIFO Empty" write packet to Tx -+ * FIFO. -+ * -# If "IN Token EP Mismatch" (disable, this is handled by EP -+ * Mismatch Interrupt) -+ */ -+static int32_t dwc_otg_pcd_handle_in_ep_intr(dwc_otg_pcd_t *pcd) -+{ -+#define CLEAR_IN_EP_INTR(__core_if,__epnum,__intr) \ -+do { \ -+ diepint_data_t diepint = {.d32=0}; \ -+ diepint.b.__intr = 1; \ -+ dwc_write_reg32(&__core_if->dev_if->in_ep_regs[__epnum]->diepint, \ -+ diepint.d32); \ -+} while (0) -+ -+ dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd); -+ dwc_otg_dev_if_t *dev_if = core_if->dev_if; -+ diepint_data_t diepint = {.d32=0}; -+ dctl_data_t dctl = {.d32=0}; -+ depctl_data_t depctl = {.d32=0}; -+ uint32_t ep_intr; -+ uint32_t epnum = 0; -+ dwc_otg_pcd_ep_t *ep; -+ dwc_ep_t *dwc_ep; -+ gintmsk_data_t intr_mask = {.d32 = 0}; -+ -+ -+ -+ DWC_DEBUGPL(DBG_PCDV, "%s(%p)\n", __func__, pcd); -+ -+ /* Read in the device interrupt bits */ -+ ep_intr = dwc_otg_read_dev_all_in_ep_intr(core_if); -+ -+ /* Service the Device IN interrupts for each endpoint */ -+ while(ep_intr) { -+ if (ep_intr&0x1) { -+ uint32_t empty_msk; -+ /* Get EP pointer */ -+ ep = get_in_ep(pcd, epnum); -+ dwc_ep = &ep->dwc_ep; -+ -+ depctl.d32 = dwc_read_reg32(&dev_if->in_ep_regs[epnum]->diepctl); -+ empty_msk = dwc_read_reg32(&dev_if->dev_global_regs->dtknqr4_fifoemptymsk); -+ -+ DWC_DEBUGPL(DBG_PCDV, -+ "IN EP INTERRUPT - %d\nepmty_msk - %8x diepctl - %8x\n", -+ epnum, -+ empty_msk, -+ depctl.d32); -+ -+ DWC_DEBUGPL(DBG_PCD, -+ "EP%d-%s: type=%d, mps=%d\n", -+ dwc_ep->num, (dwc_ep->is_in ?"IN":"OUT"), -+ dwc_ep->type, dwc_ep->maxpacket); -+ -+ diepint.d32 = dwc_otg_read_dev_in_ep_intr(core_if, dwc_ep); -+ -+ DWC_DEBUGPL(DBG_PCDV, "EP %d Interrupt Register - 0x%x\n", epnum, diepint.d32); -+ /* Transfer complete */ -+ if (diepint.b.xfercompl) { -+ /* Disable the NP Tx FIFO Empty -+ * Interrrupt */ -+ if(core_if->en_multiple_tx_fifo == 0) { -+ intr_mask.b.nptxfempty = 1; -+ dwc_modify_reg32(&core_if->core_global_regs->gintmsk, intr_mask.d32, 0); -+ } -+ else { -+ /* Disable the Tx FIFO Empty Interrupt for this EP */ -+ uint32_t fifoemptymsk = 0x1 << dwc_ep->num; -+ dwc_modify_reg32(&core_if->dev_if->dev_global_regs->dtknqr4_fifoemptymsk, -+ fifoemptymsk, 0); -+ } -+ /* Clear the bit in DIEPINTn for this interrupt */ -+ CLEAR_IN_EP_INTR(core_if,epnum,xfercompl); -+ -+ /* Complete the transfer */ -+ if (epnum == 0) { -+ handle_ep0(pcd); -+ } -+#ifdef DWC_EN_ISOC -+ else if(dwc_ep->type == DWC_OTG_EP_TYPE_ISOC) { -+ if(!ep->stopped) -+ complete_iso_ep(ep); -+ } -+#endif //DWC_EN_ISOC -+ else { -+ -+ complete_ep(ep); -+ } -+ } -+ /* Endpoint disable */ -+ if (diepint.b.epdisabled) { -+ DWC_DEBUGPL(DBG_ANY,"EP%d IN disabled\n", epnum); -+ handle_in_ep_disable_intr(pcd, epnum); -+ -+ /* Clear the bit in DIEPINTn for this interrupt */ -+ CLEAR_IN_EP_INTR(core_if,epnum,epdisabled); -+ } -+ /* AHB Error */ -+ if (diepint.b.ahberr) { -+ DWC_DEBUGPL(DBG_ANY,"EP%d IN AHB Error\n", epnum); -+ /* Clear the bit in DIEPINTn for this interrupt */ -+ CLEAR_IN_EP_INTR(core_if,epnum,ahberr); -+ } -+ /* TimeOUT Handshake (non-ISOC IN EPs) */ -+ if (diepint.b.timeout) { -+ DWC_DEBUGPL(DBG_ANY,"EP%d IN Time-out\n", epnum); -+ handle_in_ep_timeout_intr(pcd, epnum); -+ -+ CLEAR_IN_EP_INTR(core_if,epnum,timeout); -+ } -+ /** IN Token received with TxF Empty */ -+ if (diepint.b.intktxfemp) { -+ DWC_DEBUGPL(DBG_ANY,"EP%d IN TKN TxFifo Empty\n", -+ epnum); -+ if (!ep->stopped && epnum != 0) { -+ -+ diepmsk_data_t diepmsk = { .d32 = 0}; -+ diepmsk.b.intktxfemp = 1; -+ -+ if(core_if->multiproc_int_enable) { -+ dwc_modify_reg32(&dev_if->dev_global_regs->diepeachintmsk[epnum], -+ diepmsk.d32, 0); -+ } else { -+ dwc_modify_reg32(&dev_if->dev_global_regs->diepmsk, diepmsk.d32, 0); -+ } -+ start_next_request(ep); -+ } -+ else if(core_if->dma_desc_enable && epnum == 0 && -+ pcd->ep0state == EP0_OUT_STATUS_PHASE) { -+ // EP0 IN set STALL -+ depctl.d32 = dwc_read_reg32(&dev_if->in_ep_regs[epnum]->diepctl); -+ -+ /* set the disable and stall bits */ -+ if (depctl.b.epena) { -+ depctl.b.epdis = 1; -+ } -+ depctl.b.stall = 1; -+ dwc_write_reg32(&dev_if->in_ep_regs[epnum]->diepctl, depctl.d32); -+ } -+ CLEAR_IN_EP_INTR(core_if,epnum,intktxfemp); -+ } -+ /** IN Token Received with EP mismatch */ -+ if (diepint.b.intknepmis) { -+ DWC_DEBUGPL(DBG_ANY,"EP%d IN TKN EP Mismatch\n", epnum); -+ CLEAR_IN_EP_INTR(core_if,epnum,intknepmis); -+ } -+ /** IN Endpoint NAK Effective */ -+ if (diepint.b.inepnakeff) { -+ DWC_DEBUGPL(DBG_ANY,"EP%d IN EP NAK Effective\n", epnum); -+ /* Periodic EP */ -+ if (ep->disabling) { -+ depctl.d32 = 0; -+ depctl.b.snak = 1; -+ depctl.b.epdis = 1; -+ dwc_modify_reg32(&dev_if->in_ep_regs[epnum]->diepctl, depctl.d32, depctl.d32); -+ } -+ CLEAR_IN_EP_INTR(core_if,epnum,inepnakeff); -+ -+ } -+ -+ /** IN EP Tx FIFO Empty Intr */ -+ if (diepint.b.emptyintr) { -+ DWC_DEBUGPL(DBG_ANY,"EP%d Tx FIFO Empty Intr \n", epnum); -+ write_empty_tx_fifo(pcd, epnum); -+ -+ CLEAR_IN_EP_INTR(core_if,epnum,emptyintr); -+ -+ } -+ -+ /** IN EP BNA Intr */ -+ if (diepint.b.bna) { -+ CLEAR_IN_EP_INTR(core_if,epnum,bna); -+ if(core_if->dma_desc_enable) { -+#ifdef DWC_EN_ISOC -+ if(dwc_ep->type == DWC_OTG_EP_TYPE_ISOC) { -+ /* -+ * This checking is performed to prevent first "false" BNA -+ * handling occuring right after reconnect -+ */ -+ if(dwc_ep->next_frame != 0xffffffff) -+ dwc_otg_pcd_handle_iso_bna(ep); -+ } -+ else -+#endif //DWC_EN_ISOC -+ { -+ dctl.d32 = dwc_read_reg32(&dev_if->dev_global_regs->dctl); -+ -+ /* If Global Continue on BNA is disabled - disable EP */ -+ if(!dctl.b.gcontbna) { -+ depctl.d32 = 0; -+ depctl.b.snak = 1; -+ depctl.b.epdis = 1; -+ dwc_modify_reg32(&dev_if->in_ep_regs[epnum]->diepctl, depctl.d32, depctl.d32); -+ } else { -+ start_next_request(ep); -+ } -+ } -+ } -+ } -+ /* NAK Interrutp */ -+ if (diepint.b.nak) { -+ DWC_DEBUGPL(DBG_ANY,"EP%d IN NAK Interrupt\n", epnum); -+ handle_in_ep_nak_intr(pcd, epnum); -+ -+ CLEAR_IN_EP_INTR(core_if,epnum,nak); -+ } -+ } -+ epnum++; -+ ep_intr >>=1; -+ } -+ -+ return 1; -+#undef CLEAR_IN_EP_INTR -+} -+ -+/** -+ * This interrupt indicates that an OUT EP has a pending Interrupt. -+ * The sequence for handling the OUT EP interrupt is shown below: -+ * -# Read the Device All Endpoint Interrupt register -+ * -# Repeat the following for each OUT EP interrupt bit set (from -+ * LSB to MSB). -+ * -# Read the Device Endpoint Interrupt (DOEPINTn) register -+ * -# If "Transfer Complete" call the request complete function -+ * -# If "Endpoint Disabled" complete the EP disable procedure. -+ * -# If "AHB Error Interrupt" log error -+ * -# If "Setup Phase Done" process Setup Packet (See Standard USB -+ * Command Processing) -+ */ -+static int32_t dwc_otg_pcd_handle_out_ep_intr(dwc_otg_pcd_t *pcd) -+{ -+#define CLEAR_OUT_EP_INTR(__core_if,__epnum,__intr) \ -+do { \ -+ doepint_data_t doepint = {.d32=0}; \ -+ doepint.b.__intr = 1; \ -+ dwc_write_reg32(&__core_if->dev_if->out_ep_regs[__epnum]->doepint, \ -+ doepint.d32); \ -+} while (0) -+ -+ dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd); -+ dwc_otg_dev_if_t *dev_if = core_if->dev_if; -+ uint32_t ep_intr; -+ doepint_data_t doepint = {.d32=0}; -+ dctl_data_t dctl = {.d32=0}; -+ depctl_data_t doepctl = {.d32=0}; -+ uint32_t epnum = 0; -+ dwc_otg_pcd_ep_t *ep; -+ dwc_ep_t *dwc_ep; -+ -+ DWC_DEBUGPL(DBG_PCDV, "%s()\n", __func__); -+ -+ /* Read in the device interrupt bits */ -+ ep_intr = dwc_otg_read_dev_all_out_ep_intr(core_if); -+ -+ while(ep_intr) { -+ if (ep_intr&0x1) { -+ /* Get EP pointer */ -+ ep = get_out_ep(pcd, epnum); -+ dwc_ep = &ep->dwc_ep; -+ -+#ifdef VERBOSE -+ DWC_DEBUGPL(DBG_PCDV, -+ "EP%d-%s: type=%d, mps=%d\n", -+ dwc_ep->num, (dwc_ep->is_in ?"IN":"OUT"), -+ dwc_ep->type, dwc_ep->maxpacket); -+#endif -+ doepint.d32 = dwc_otg_read_dev_out_ep_intr(core_if, dwc_ep); -+ -+ /* Transfer complete */ -+ if (doepint.b.xfercompl) { -+ -+ if (epnum == 0) { -+ /* Clear the bit in DOEPINTn for this interrupt */ -+ CLEAR_OUT_EP_INTR(core_if,epnum,xfercompl); -+ if(core_if->dma_desc_enable == 0 || pcd->ep0state != EP0_IDLE) -+ handle_ep0(pcd); -+#ifdef DWC_EN_ISOC -+ } else if(dwc_ep->type == DWC_OTG_EP_TYPE_ISOC) { -+ if (doepint.b.pktdrpsts == 0) { -+ /* Clear the bit in DOEPINTn for this interrupt */ -+ CLEAR_OUT_EP_INTR(core_if,epnum,xfercompl); -+ complete_iso_ep(ep); -+ } else { -+ -+ doepint_data_t doepint = {.d32=0}; -+ doepint.b.xfercompl = 1; -+ doepint.b.pktdrpsts = 1; -+ dwc_write_reg32(&core_if->dev_if->out_ep_regs[epnum]->doepint, -+ doepint.d32); -+ if(handle_iso_out_pkt_dropped(core_if,dwc_ep)) { -+ complete_iso_ep(ep); -+ } -+ } -+#endif //DWC_EN_ISOC -+ } else { -+ /* Clear the bit in DOEPINTn for this interrupt */ -+ CLEAR_OUT_EP_INTR(core_if,epnum,xfercompl); -+ complete_ep(ep); -+ } -+ -+ } -+ -+ /* Endpoint disable */ -+ if (doepint.b.epdisabled) { -+ -+ /* Clear the bit in DOEPINTn for this interrupt */ -+ CLEAR_OUT_EP_INTR(core_if,epnum,epdisabled); -+ } -+ /* AHB Error */ -+ if (doepint.b.ahberr) { -+ DWC_DEBUGPL(DBG_PCD,"EP%d OUT AHB Error\n", epnum); -+ DWC_DEBUGPL(DBG_PCD,"EP DMA REG %d \n", core_if->dev_if->out_ep_regs[epnum]->doepdma); -+ CLEAR_OUT_EP_INTR(core_if,epnum,ahberr); -+ } -+ /* Setup Phase Done (contorl EPs) */ -+ if (doepint.b.setup) { -+#ifdef DEBUG_EP0 -+ DWC_DEBUGPL(DBG_PCD,"EP%d SETUP Done\n", -+ epnum); -+#endif -+ CLEAR_OUT_EP_INTR(core_if,epnum,setup); -+ -+ handle_ep0(pcd); -+ } -+ -+ /** OUT EP BNA Intr */ -+ if (doepint.b.bna) { -+ CLEAR_OUT_EP_INTR(core_if,epnum,bna); -+ if(core_if->dma_desc_enable) { -+#ifdef DWC_EN_ISOC -+ if(dwc_ep->type == DWC_OTG_EP_TYPE_ISOC) { -+ /* -+ * This checking is performed to prevent first "false" BNA -+ * handling occuring right after reconnect -+ */ -+ if(dwc_ep->next_frame != 0xffffffff) -+ dwc_otg_pcd_handle_iso_bna(ep); -+ } -+ else -+#endif //DWC_EN_ISOC -+ { -+ dctl.d32 = dwc_read_reg32(&dev_if->dev_global_regs->dctl); -+ -+ /* If Global Continue on BNA is disabled - disable EP*/ -+ if(!dctl.b.gcontbna) { -+ doepctl.d32 = 0; -+ doepctl.b.snak = 1; -+ doepctl.b.epdis = 1; -+ dwc_modify_reg32(&dev_if->out_ep_regs[epnum]->doepctl, doepctl.d32, doepctl.d32); -+ } else { -+ start_next_request(ep); -+ } -+ } -+ } -+ } -+ if (doepint.b.stsphsercvd) { -+ CLEAR_OUT_EP_INTR(core_if,epnum,stsphsercvd); -+ if(core_if->dma_desc_enable) { -+ do_setup_in_status_phase(pcd); -+ } -+ } -+ /* Babble Interrutp */ -+ if (doepint.b.babble) { -+ DWC_DEBUGPL(DBG_ANY,"EP%d OUT Babble\n", epnum); -+ handle_out_ep_babble_intr(pcd, epnum); -+ -+ CLEAR_OUT_EP_INTR(core_if,epnum,babble); -+ } -+ /* NAK Interrutp */ -+ if (doepint.b.nak) { -+ DWC_DEBUGPL(DBG_ANY,"EP%d OUT NAK\n", epnum); -+ handle_out_ep_nak_intr(pcd, epnum); -+ -+ CLEAR_OUT_EP_INTR(core_if,epnum,nak); -+ } -+ /* NYET Interrutp */ -+ if (doepint.b.nyet) { -+ DWC_DEBUGPL(DBG_ANY,"EP%d OUT NYET\n", epnum); -+ handle_out_ep_nyet_intr(pcd, epnum); -+ -+ CLEAR_OUT_EP_INTR(core_if,epnum,nyet); -+ } -+ } -+ -+ epnum++; -+ ep_intr >>=1; -+ } -+ -+ return 1; -+ -+#undef CLEAR_OUT_EP_INTR -+} -+ -+ -+/** -+ * Incomplete ISO IN Transfer Interrupt. -+ * This interrupt indicates one of the following conditions occurred -+ * while transmitting an ISOC transaction. -+ * - Corrupted IN Token for ISOC EP. -+ * - Packet not complete in FIFO. -+ * The follow actions will be taken: -+ * -# Determine the EP -+ * -# Set incomplete flag in dwc_ep structure -+ * -# Disable EP; when "Endpoint Disabled" interrupt is received -+ * Flush FIFO -+ */ -+int32_t dwc_otg_pcd_handle_incomplete_isoc_in_intr(dwc_otg_pcd_t *pcd) -+{ -+ gintsts_data_t gintsts; -+ -+ -+#ifdef DWC_EN_ISOC -+ dwc_otg_dev_if_t *dev_if; -+ deptsiz_data_t deptsiz = { .d32 = 0}; -+ depctl_data_t depctl = { .d32 = 0}; -+ dsts_data_t dsts = { .d32 = 0}; -+ dwc_ep_t *dwc_ep; -+ int i; -+ -+ dev_if = GET_CORE_IF(pcd)->dev_if; -+ -+ for(i = 1; i <= dev_if->num_in_eps; ++i) { -+ dwc_ep = &pcd->in_ep[i].dwc_ep; -+ if(dwc_ep->active && -+ dwc_ep->type == USB_ENDPOINT_XFER_ISOC) -+ { -+ deptsiz.d32 = dwc_read_reg32(&dev_if->in_ep_regs[i]->dieptsiz); -+ depctl.d32 = dwc_read_reg32(&dev_if->in_ep_regs[i]->diepctl); -+ -+ if(depctl.b.epdis && deptsiz.d32) { -+ set_current_pkt_info(GET_CORE_IF(pcd), dwc_ep); -+ if(dwc_ep->cur_pkt >= dwc_ep->pkt_cnt) { -+ dwc_ep->cur_pkt = 0; -+ dwc_ep->proc_buf_num = (dwc_ep->proc_buf_num ^ 1) & 0x1; -+ -+ if(dwc_ep->proc_buf_num) { -+ dwc_ep->cur_pkt_addr = dwc_ep->xfer_buff1; -+ dwc_ep->cur_pkt_dma_addr = dwc_ep->dma_addr1; -+ } else { -+ dwc_ep->cur_pkt_addr = dwc_ep->xfer_buff0; -+ dwc_ep->cur_pkt_dma_addr = dwc_ep->dma_addr0; -+ } -+ -+ } -+ -+ dsts.d32 = dwc_read_reg32(&GET_CORE_IF(pcd)->dev_if->dev_global_regs->dsts); -+ dwc_ep->next_frame = dsts.b.soffn; -+ -+ dwc_otg_iso_ep_start_frm_transfer(GET_CORE_IF(pcd), dwc_ep); -+ } -+ } -+ } -+ -+#else -+ gintmsk_data_t intr_mask = { .d32 = 0}; -+ DWC_PRINT("INTERRUPT Handler not implemented for %s\n", -+ "IN ISOC Incomplete"); -+ -+ intr_mask.b.incomplisoin = 1; -+ dwc_modify_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk, -+ intr_mask.d32, 0); -+#endif //DWC_EN_ISOC -+ -+ /* Clear interrupt */ -+ gintsts.d32 = 0; -+ gintsts.b.incomplisoin = 1; -+ dwc_write_reg32 (&GET_CORE_IF(pcd)->core_global_regs->gintsts, -+ gintsts.d32); -+ -+ return 1; -+} -+ -+/** -+ * Incomplete ISO OUT Transfer Interrupt. -+ * -+ * This interrupt indicates that the core has dropped an ISO OUT -+ * packet. The following conditions can be the cause: -+ * - FIFO Full, the entire packet would not fit in the FIFO. -+ * - CRC Error -+ * - Corrupted Token -+ * The follow actions will be taken: -+ * -# Determine the EP -+ * -# Set incomplete flag in dwc_ep structure -+ * -# Read any data from the FIFO -+ * -# Disable EP. when "Endpoint Disabled" interrupt is received -+ * re-enable EP. -+ */ -+int32_t dwc_otg_pcd_handle_incomplete_isoc_out_intr(dwc_otg_pcd_t *pcd) -+{ -+ /* @todo implement ISR */ -+ gintsts_data_t gintsts; -+ -+#ifdef DWC_EN_ISOC -+ dwc_otg_dev_if_t *dev_if; -+ deptsiz_data_t deptsiz = { .d32 = 0}; -+ depctl_data_t depctl = { .d32 = 0}; -+ dsts_data_t dsts = { .d32 = 0}; -+ dwc_ep_t *dwc_ep; -+ int i; -+ -+ dev_if = GET_CORE_IF(pcd)->dev_if; -+ -+ for(i = 1; i <= dev_if->num_out_eps; ++i) { -+ dwc_ep = &pcd->in_ep[i].dwc_ep; -+ if(pcd->out_ep[i].dwc_ep.active && -+ pcd->out_ep[i].dwc_ep.type == USB_ENDPOINT_XFER_ISOC) -+ { -+ deptsiz.d32 = dwc_read_reg32(&dev_if->out_ep_regs[i]->doeptsiz); -+ depctl.d32 = dwc_read_reg32(&dev_if->out_ep_regs[i]->doepctl); -+ -+ if(depctl.b.epdis && deptsiz.d32) { -+ set_current_pkt_info(GET_CORE_IF(pcd), &pcd->out_ep[i].dwc_ep); -+ if(dwc_ep->cur_pkt >= dwc_ep->pkt_cnt) { -+ dwc_ep->cur_pkt = 0; -+ dwc_ep->proc_buf_num = (dwc_ep->proc_buf_num ^ 1) & 0x1; -+ -+ if(dwc_ep->proc_buf_num) { -+ dwc_ep->cur_pkt_addr = dwc_ep->xfer_buff1; -+ dwc_ep->cur_pkt_dma_addr = dwc_ep->dma_addr1; -+ } else { -+ dwc_ep->cur_pkt_addr = dwc_ep->xfer_buff0; -+ dwc_ep->cur_pkt_dma_addr = dwc_ep->dma_addr0; -+ } -+ -+ } -+ -+ dsts.d32 = dwc_read_reg32(&GET_CORE_IF(pcd)->dev_if->dev_global_regs->dsts); -+ dwc_ep->next_frame = dsts.b.soffn; -+ -+ dwc_otg_iso_ep_start_frm_transfer(GET_CORE_IF(pcd), dwc_ep); -+ } -+ } -+ } -+#else -+ /** @todo implement ISR */ -+ gintmsk_data_t intr_mask = { .d32 = 0}; -+ -+ DWC_PRINT("INTERRUPT Handler not implemented for %s\n", -+ "OUT ISOC Incomplete"); -+ -+ intr_mask.b.incomplisoout = 1; -+ dwc_modify_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk, -+ intr_mask.d32, 0); -+ -+#endif // DWC_EN_ISOC -+ -+ /* Clear interrupt */ -+ gintsts.d32 = 0; -+ gintsts.b.incomplisoout = 1; -+ dwc_write_reg32 (&GET_CORE_IF(pcd)->core_global_regs->gintsts, -+ gintsts.d32); -+ -+ return 1; -+} -+ -+/** -+ * This function handles the Global IN NAK Effective interrupt. -+ * -+ */ -+int32_t dwc_otg_pcd_handle_in_nak_effective(dwc_otg_pcd_t *pcd) -+{ -+ dwc_otg_dev_if_t *dev_if = GET_CORE_IF(pcd)->dev_if; -+ depctl_data_t diepctl = { .d32 = 0}; -+ depctl_data_t diepctl_rd = { .d32 = 0}; -+ gintmsk_data_t intr_mask = { .d32 = 0}; -+ gintsts_data_t gintsts; -+ int i; -+ -+ DWC_DEBUGPL(DBG_PCD, "Global IN NAK Effective\n"); -+ -+ /* Disable all active IN EPs */ -+ diepctl.b.epdis = 1; -+ diepctl.b.snak = 1; -+ -+ for (i=0; i <= dev_if->num_in_eps; i++) -+ { -+ diepctl_rd.d32 = dwc_read_reg32(&dev_if->in_ep_regs[i]->diepctl); -+ if (diepctl_rd.b.epena) { -+ dwc_write_reg32(&dev_if->in_ep_regs[i]->diepctl, -+ diepctl.d32); -+ } -+ } -+ /* Disable the Global IN NAK Effective Interrupt */ -+ intr_mask.b.ginnakeff = 1; -+ dwc_modify_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk, -+ intr_mask.d32, 0); -+ -+ /* Clear interrupt */ -+ gintsts.d32 = 0; -+ gintsts.b.ginnakeff = 1; -+ dwc_write_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintsts, -+ gintsts.d32); -+ -+ return 1; -+} -+ -+/** -+ * OUT NAK Effective. -+ * -+ */ -+int32_t dwc_otg_pcd_handle_out_nak_effective(dwc_otg_pcd_t *pcd) -+{ -+ gintmsk_data_t intr_mask = { .d32 = 0}; -+ gintsts_data_t gintsts; -+ -+ DWC_PRINT("INTERRUPT Handler not implemented for %s\n", -+ "Global IN NAK Effective\n"); -+ /* Disable the Global IN NAK Effective Interrupt */ -+ intr_mask.b.goutnakeff = 1; -+ dwc_modify_reg32(&GET_CORE_IF(pcd)->core_global_regs->gintmsk, -+ intr_mask.d32, 0); -+ -+ /* Clear interrupt */ -+ gintsts.d32 = 0; -+ gintsts.b.goutnakeff = 1; -+ dwc_write_reg32 (&GET_CORE_IF(pcd)->core_global_regs->gintsts, -+ gintsts.d32); -+ -+ return 1; -+} -+ -+ -+/** -+ * PCD interrupt handler. -+ * -+ * The PCD handles the device interrupts. Many conditions can cause a -+ * device interrupt. When an interrupt occurs, the device interrupt -+ * service routine determines the cause of the interrupt and -+ * dispatches handling to the appropriate function. These interrupt -+ * handling functions are described below. -+ * -+ * All interrupt registers are processed from LSB to MSB. -+ * -+ */ -+int32_t dwc_otg_pcd_handle_intr(dwc_otg_pcd_t *pcd) -+{ -+ dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd); -+#ifdef VERBOSE -+ dwc_otg_core_global_regs_t *global_regs = -+ core_if->core_global_regs; -+#endif -+ gintsts_data_t gintr_status; -+ int32_t retval = 0; -+ -+ -+#ifdef VERBOSE -+ DWC_DEBUGPL(DBG_ANY, "%s() gintsts=%08x gintmsk=%08x\n", -+ __func__, -+ dwc_read_reg32(&global_regs->gintsts), -+ dwc_read_reg32(&global_regs->gintmsk)); -+#endif -+ -+ if (dwc_otg_is_device_mode(core_if)) { -+ SPIN_LOCK(&pcd->lock); -+#ifdef VERBOSE -+ DWC_DEBUGPL(DBG_PCDV, "%s() gintsts=%08x gintmsk=%08x\n", -+ __func__, -+ dwc_read_reg32(&global_regs->gintsts), -+ dwc_read_reg32(&global_regs->gintmsk)); -+#endif -+ -+ gintr_status.d32 = dwc_otg_read_core_intr(core_if); -+ -+/* -+ if (!gintr_status.d32) { -+ SPIN_UNLOCK(&pcd->lock); -+ return 0; -+ } -+*/ -+ DWC_DEBUGPL(DBG_PCDV, "%s: gintsts&gintmsk=%08x\n", -+ __func__, gintr_status.d32); -+ -+ if (gintr_status.b.sofintr) { -+ retval |= dwc_otg_pcd_handle_sof_intr(pcd); -+ } -+ if (gintr_status.b.rxstsqlvl) { -+ retval |= dwc_otg_pcd_handle_rx_status_q_level_intr(pcd); -+ } -+ if (gintr_status.b.nptxfempty) { -+ retval |= dwc_otg_pcd_handle_np_tx_fifo_empty_intr(pcd); -+ } -+ if (gintr_status.b.ginnakeff) { -+ retval |= dwc_otg_pcd_handle_in_nak_effective(pcd); -+ } -+ if (gintr_status.b.goutnakeff) { -+ retval |= dwc_otg_pcd_handle_out_nak_effective(pcd); -+ } -+ if (gintr_status.b.i2cintr) { -+ retval |= dwc_otg_pcd_handle_i2c_intr(pcd); -+ } -+ if (gintr_status.b.erlysuspend) { -+ retval |= dwc_otg_pcd_handle_early_suspend_intr(pcd); -+ } -+ if (gintr_status.b.usbreset) { -+ retval |= dwc_otg_pcd_handle_usb_reset_intr(pcd); -+ } -+ if (gintr_status.b.enumdone) { -+ retval |= dwc_otg_pcd_handle_enum_done_intr(pcd); -+ } -+ if (gintr_status.b.isooutdrop) { -+ retval |= dwc_otg_pcd_handle_isoc_out_packet_dropped_intr(pcd); -+ } -+ if (gintr_status.b.eopframe) { -+ retval |= dwc_otg_pcd_handle_end_periodic_frame_intr(pcd); -+ } -+ if (gintr_status.b.epmismatch) { -+ retval |= dwc_otg_pcd_handle_ep_mismatch_intr(core_if); -+ } -+ if (gintr_status.b.inepint) { -+ if(!core_if->multiproc_int_enable) { -+ retval |= dwc_otg_pcd_handle_in_ep_intr(pcd); -+ } -+ } -+ if (gintr_status.b.outepintr) { -+ if(!core_if->multiproc_int_enable) { -+ retval |= dwc_otg_pcd_handle_out_ep_intr(pcd); -+ } -+ } -+ if (gintr_status.b.incomplisoin) { -+ retval |= dwc_otg_pcd_handle_incomplete_isoc_in_intr(pcd); -+ } -+ if (gintr_status.b.incomplisoout) { -+ retval |= dwc_otg_pcd_handle_incomplete_isoc_out_intr(pcd); -+ } -+ -+ /* In MPI mode De vice Endpoints intterrupts are asserted -+ * without setting outepintr and inepint bits set, so these -+ * Interrupt handlers are called without checking these bit-fields -+ */ -+ if(core_if->multiproc_int_enable) { -+ retval |= dwc_otg_pcd_handle_in_ep_intr(pcd); -+ retval |= dwc_otg_pcd_handle_out_ep_intr(pcd); -+ } -+#ifdef VERBOSE -+ DWC_DEBUGPL(DBG_PCDV, "%s() gintsts=%0x\n", __func__, -+ dwc_read_reg32(&global_regs->gintsts)); -+#endif -+ SPIN_UNLOCK(&pcd->lock); -+ } -+ -+ S3C2410X_CLEAR_EINTPEND(); -+ -+ return retval; -+} -+ -+#endif /* DWC_HOST_ONLY */ ---- /dev/null -+++ b/drivers/usb/dwc_otg/dwc_otg_regs.h -@@ -0,0 +1,2075 @@ -+/* ========================================================================== -+ * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_regs.h $ -+ * $Revision: 1.2 $ -+ * $Date: 2008-11-21 05:39:15 $ -+ * $Change: 1099526 $ -+ * -+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, -+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless -+ * otherwise expressly agreed to in writing between Synopsys and you. -+ * -+ * The Software IS NOT an item of Licensed Software or Licensed Product under -+ * any End User Software License Agreement or Agreement for Licensed Product -+ * with Synopsys or any supplement thereto. You are permitted to use and -+ * redistribute this Software in source and binary forms, with or without -+ * modification, provided that redistributions of source code must retain this -+ * notice. You may not view, use, disclose, copy or distribute this file or -+ * any information contained herein except pursuant to this license grant from -+ * Synopsys. If you do not agree with this notice, including the disclaimer -+ * below, then you are not authorized to use the Software. -+ * -+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, -+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY -+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH -+ * DAMAGE. -+ * ========================================================================== */ -+ -+#ifndef __DWC_OTG_REGS_H__ -+#define __DWC_OTG_REGS_H__ -+ -+/** -+ * @file -+ * -+ * This file contains the data structures for accessing the DWC_otg core registers. -+ * -+ * The application interfaces with the HS OTG core by reading from and -+ * writing to the Control and Status Register (CSR) space through the -+ * AHB Slave interface. These registers are 32 bits wide, and the -+ * addresses are 32-bit-block aligned. -+ * CSRs are classified as follows: -+ * - Core Global Registers -+ * - Device Mode Registers -+ * - Device Global Registers -+ * - Device Endpoint Specific Registers -+ * - Host Mode Registers -+ * - Host Global Registers -+ * - Host Port CSRs -+ * - Host Channel Specific Registers -+ * -+ * Only the Core Global registers can be accessed in both Device and -+ * Host modes. When the HS OTG core is operating in one mode, either -+ * Device or Host, the application must not access registers from the -+ * other mode. When the core switches from one mode to another, the -+ * registers in the new mode of operation must be reprogrammed as they -+ * would be after a power-on reset. -+ */ -+ -+/** Maximum number of Periodic FIFOs */ -+#define MAX_PERIO_FIFOS 15 -+/** Maximum number of Transmit FIFOs */ -+#define MAX_TX_FIFOS 15 -+ -+/** Maximum number of Endpoints/HostChannels */ -+#define MAX_EPS_CHANNELS 16 -+ -+/****************************************************************************/ -+/** DWC_otg Core registers . -+ * The dwc_otg_core_global_regs structure defines the size -+ * and relative field offsets for the Core Global registers. -+ */ -+typedef struct dwc_otg_core_global_regs -+{ -+ /** OTG Control and Status Register. Offset: 000h */ -+ volatile uint32_t gotgctl; -+ /** OTG Interrupt Register. Offset: 004h */ -+ volatile uint32_t gotgint; -+ /**Core AHB Configuration Register. Offset: 008h */ -+ volatile uint32_t gahbcfg; -+ -+#define DWC_GLBINTRMASK 0x0001 -+#define DWC_DMAENABLE 0x0020 -+#define DWC_NPTXEMPTYLVL_EMPTY 0x0080 -+#define DWC_NPTXEMPTYLVL_HALFEMPTY 0x0000 -+#define DWC_PTXEMPTYLVL_EMPTY 0x0100 -+#define DWC_PTXEMPTYLVL_HALFEMPTY 0x0000 -+ -+ /**Core USB Configuration Register. Offset: 00Ch */ -+ volatile uint32_t gusbcfg; -+ /**Core Reset Register. Offset: 010h */ -+ volatile uint32_t grstctl; -+ /**Core Interrupt Register. Offset: 014h */ -+ volatile uint32_t gintsts; -+ /**Core Interrupt Mask Register. Offset: 018h */ -+ volatile uint32_t gintmsk; -+ /**Receive Status Queue Read Register (Read Only). Offset: 01Ch */ -+ volatile uint32_t grxstsr; -+ /**Receive Status Queue Read & POP Register (Read Only). Offset: 020h*/ -+ volatile uint32_t grxstsp; -+ /**Receive FIFO Size Register. Offset: 024h */ -+ volatile uint32_t grxfsiz; -+ /**Non Periodic Transmit FIFO Size Register. Offset: 028h */ -+ volatile uint32_t gnptxfsiz; -+ /**Non Periodic Transmit FIFO/Queue Status Register (Read -+ * Only). Offset: 02Ch */ -+ volatile uint32_t gnptxsts; -+ /**I2C Access Register. Offset: 030h */ -+ volatile uint32_t gi2cctl; -+ /**PHY Vendor Control Register. Offset: 034h */ -+ volatile uint32_t gpvndctl; -+ /**General Purpose Input/Output Register. Offset: 038h */ -+ volatile uint32_t ggpio; -+ /**User ID Register. Offset: 03Ch */ -+ volatile uint32_t guid; -+ /**Synopsys ID Register (Read Only). Offset: 040h */ -+ volatile uint32_t gsnpsid; -+ /**User HW Config1 Register (Read Only). Offset: 044h */ -+ volatile uint32_t ghwcfg1; -+ /**User HW Config2 Register (Read Only). Offset: 048h */ -+ volatile uint32_t ghwcfg2; -+#define DWC_SLAVE_ONLY_ARCH 0 -+#define DWC_EXT_DMA_ARCH 1 -+#define DWC_INT_DMA_ARCH 2 -+ -+#define DWC_MODE_HNP_SRP_CAPABLE 0 -+#define DWC_MODE_SRP_ONLY_CAPABLE 1 -+#define DWC_MODE_NO_HNP_SRP_CAPABLE 2 -+#define DWC_MODE_SRP_CAPABLE_DEVICE 3 -+#define DWC_MODE_NO_SRP_CAPABLE_DEVICE 4 -+#define DWC_MODE_SRP_CAPABLE_HOST 5 -+#define DWC_MODE_NO_SRP_CAPABLE_HOST 6 -+ -+ /**User HW Config3 Register (Read Only). Offset: 04Ch */ -+ volatile uint32_t ghwcfg3; -+ /**User HW Config4 Register (Read Only). Offset: 050h*/ -+ volatile uint32_t ghwcfg4; -+ /** Reserved Offset: 054h-0FFh */ -+ volatile uint32_t reserved[43]; -+ /** Host Periodic Transmit FIFO Size Register. Offset: 100h */ -+ volatile uint32_t hptxfsiz; -+ /** Device Periodic Transmit FIFO#n Register if dedicated fifos are disabled, -+ otherwise Device Transmit FIFO#n Register. -+ * Offset: 104h + (FIFO_Number-1)*04h, 1 <= FIFO Number <= 15 (1<=n<=15). */ -+ volatile uint32_t dptxfsiz_dieptxf[15]; -+} dwc_otg_core_global_regs_t; -+ -+/** -+ * This union represents the bit fields of the Core OTG Control -+ * and Status Register (GOTGCTL). Set the bits using the bit -+ * fields then write the d32 value to the register. -+ */ -+typedef union gotgctl_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ /** register bits */ -+ struct -+ { -+ unsigned sesreqscs : 1; -+ unsigned sesreq : 1; -+ unsigned reserved2_7 : 6; -+ unsigned hstnegscs : 1; -+ unsigned hnpreq : 1; -+ unsigned hstsethnpen : 1; -+ unsigned devhnpen : 1; -+ unsigned reserved12_15 : 4; -+ unsigned conidsts : 1; -+ unsigned reserved17 : 1; -+ unsigned asesvld : 1; -+ unsigned bsesvld : 1; -+ unsigned currmod : 1; -+ unsigned reserved21_31 : 11; -+ } b; -+} gotgctl_data_t; -+ -+/** -+ * This union represents the bit fields of the Core OTG Interrupt Register -+ * (GOTGINT). Set/clear the bits using the bit fields then write the d32 -+ * value to the register. -+ */ -+typedef union gotgint_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ /** register bits */ -+ struct -+ { -+ /** Current Mode */ -+ unsigned reserved0_1 : 2; -+ -+ /** Session End Detected */ -+ unsigned sesenddet : 1; -+ -+ unsigned reserved3_7 : 5; -+ -+ /** Session Request Success Status Change */ -+ unsigned sesreqsucstschng : 1; -+ /** Host Negotiation Success Status Change */ -+ unsigned hstnegsucstschng : 1; -+ -+ unsigned reserver10_16 : 7; -+ -+ /** Host Negotiation Detected */ -+ unsigned hstnegdet : 1; -+ /** A-Device Timeout Change */ -+ unsigned adevtoutchng : 1; -+ /** Debounce Done */ -+ unsigned debdone : 1; -+ -+ unsigned reserved31_20 : 12; -+ -+ } b; -+} gotgint_data_t; -+ -+ -+/** -+ * This union represents the bit fields of the Core AHB Configuration -+ * Register (GAHBCFG). Set/clear the bits using the bit fields then -+ * write the d32 value to the register. -+ */ -+typedef union gahbcfg_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ /** register bits */ -+ struct -+ { -+ unsigned glblintrmsk : 1; -+#define DWC_GAHBCFG_GLBINT_ENABLE 1 -+ -+ unsigned hburstlen : 4; -+#define DWC_GAHBCFG_INT_DMA_BURST_SINGLE 0 -+#define DWC_GAHBCFG_INT_DMA_BURST_INCR 1 -+#define DWC_GAHBCFG_INT_DMA_BURST_INCR4 3 -+#define DWC_GAHBCFG_INT_DMA_BURST_INCR8 5 -+#define DWC_GAHBCFG_INT_DMA_BURST_INCR16 7 -+ -+ unsigned dmaenable : 1; -+#define DWC_GAHBCFG_DMAENABLE 1 -+ unsigned reserved : 1; -+ unsigned nptxfemplvl_txfemplvl : 1; -+ unsigned ptxfemplvl : 1; -+#define DWC_GAHBCFG_TXFEMPTYLVL_EMPTY 1 -+#define DWC_GAHBCFG_TXFEMPTYLVL_HALFEMPTY 0 -+ unsigned reserved9_31 : 23; -+ } b; -+} gahbcfg_data_t; -+ -+/** -+ * This union represents the bit fields of the Core USB Configuration -+ * Register (GUSBCFG). Set the bits using the bit fields then write -+ * the d32 value to the register. -+ */ -+typedef union gusbcfg_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ /** register bits */ -+ struct -+ { -+ unsigned toutcal : 3; -+ unsigned phyif : 1; -+ unsigned ulpi_utmi_sel : 1; -+ unsigned fsintf : 1; -+ unsigned physel : 1; -+ unsigned ddrsel : 1; -+ unsigned srpcap : 1; -+ unsigned hnpcap : 1; -+ unsigned usbtrdtim : 4; -+ unsigned nptxfrwnden : 1; -+ unsigned phylpwrclksel : 1; -+ unsigned otgutmifssel : 1; -+ unsigned ulpi_fsls : 1; -+ unsigned ulpi_auto_res : 1; -+ unsigned ulpi_clk_sus_m : 1; -+ unsigned ulpi_ext_vbus_drv : 1; -+ unsigned ulpi_int_vbus_indicator : 1; -+ unsigned term_sel_dl_pulse : 1; -+ unsigned reserved23_27 : 5; -+ unsigned tx_end_delay : 1; -+ unsigned reserved29_31 : 3; -+ } b; -+} gusbcfg_data_t; -+ -+/** -+ * This union represents the bit fields of the Core Reset Register -+ * (GRSTCTL). Set/clear the bits using the bit fields then write the -+ * d32 value to the register. -+ */ -+typedef union grstctl_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ /** register bits */ -+ struct -+ { -+ /** Core Soft Reset (CSftRst) (Device and Host) -+ * -+ * The application can flush the control logic in the -+ * entire core using this bit. This bit resets the -+ * pipelines in the AHB Clock domain as well as the -+ * PHY Clock domain. -+ * -+ * The state machines are reset to an IDLE state, the -+ * control bits in the CSRs are cleared, all the -+ * transmit FIFOs and the receive FIFO are flushed. -+ * -+ * The status mask bits that control the generation of -+ * the interrupt, are cleared, to clear the -+ * interrupt. The interrupt status bits are not -+ * cleared, so the application can get the status of -+ * any events that occurred in the core after it has -+ * set this bit. -+ * -+ * Any transactions on the AHB are terminated as soon -+ * as possible following the protocol. Any -+ * transactions on the USB are terminated immediately. -+ * -+ * The configuration settings in the CSRs are -+ * unchanged, so the software doesn't have to -+ * reprogram these registers (Device -+ * Configuration/Host Configuration/Core System -+ * Configuration/Core PHY Configuration). -+ * -+ * The application can write to this bit, any time it -+ * wants to reset the core. This is a self clearing -+ * bit and the core clears this bit after all the -+ * necessary logic is reset in the core, which may -+ * take several clocks, depending on the current state -+ * of the core. -+ */ -+ unsigned csftrst : 1; -+ /** Hclk Soft Reset -+ * -+ * The application uses this bit to reset the control logic in -+ * the AHB clock domain. Only AHB clock domain pipelines are -+ * reset. -+ */ -+ unsigned hsftrst : 1; -+ /** Host Frame Counter Reset (Host Only)
-+ * -+ * The application can reset the (micro)frame number -+ * counter inside the core, using this bit. When the -+ * (micro)frame counter is reset, the subsequent SOF -+ * sent out by the core, will have a (micro)frame -+ * number of 0. -+ */ -+ unsigned hstfrm : 1; -+ /** In Token Sequence Learning Queue Flush -+ * (INTknQFlsh) (Device Only) -+ */ -+ unsigned intknqflsh : 1; -+ /** RxFIFO Flush (RxFFlsh) (Device and Host) -+ * -+ * The application can flush the entire Receive FIFO -+ * using this bit.

The application must first -+ * ensure that the core is not in the middle of a -+ * transaction.

The application should write into -+ * this bit, only after making sure that neither the -+ * DMA engine is reading from the RxFIFO nor the MAC -+ * is writing the data in to the FIFO.

The -+ * application should wait until the bit is cleared -+ * before performing any other operations. This bit -+ * will takes 8 clocks (slowest of PHY or AHB clock) -+ * to clear. -+ */ -+ unsigned rxfflsh : 1; -+ /** TxFIFO Flush (TxFFlsh) (Device and Host). -+ * -+ * This bit is used to selectively flush a single or -+ * all transmit FIFOs. The application must first -+ * ensure that the core is not in the middle of a -+ * transaction.

The application should write into -+ * this bit, only after making sure that neither the -+ * DMA engine is writing into the TxFIFO nor the MAC -+ * is reading the data out of the FIFO.

The -+ * application should wait until the core clears this -+ * bit, before performing any operations. This bit -+ * will takes 8 clocks (slowest of PHY or AHB clock) -+ * to clear. -+ */ -+ unsigned txfflsh : 1; -+ -+ /** TxFIFO Number (TxFNum) (Device and Host). -+ * -+ * This is the FIFO number which needs to be flushed, -+ * using the TxFIFO Flush bit. This field should not -+ * be changed until the TxFIFO Flush bit is cleared by -+ * the core. -+ * - 0x0 : Non Periodic TxFIFO Flush -+ * - 0x1 : Periodic TxFIFO #1 Flush in device mode -+ * or Periodic TxFIFO in host mode -+ * - 0x2 : Periodic TxFIFO #2 Flush in device mode. -+ * - ... -+ * - 0xF : Periodic TxFIFO #15 Flush in device mode -+ * - 0x10: Flush all the Transmit NonPeriodic and -+ * Transmit Periodic FIFOs in the core -+ */ -+ unsigned txfnum : 5; -+ /** Reserved */ -+ unsigned reserved11_29 : 19; -+ /** DMA Request Signal. Indicated DMA request is in -+ * probress. Used for debug purpose. */ -+ unsigned dmareq : 1; -+ /** AHB Master Idle. Indicates the AHB Master State -+ * Machine is in IDLE condition. */ -+ unsigned ahbidle : 1; -+ } b; -+} grstctl_t; -+ -+ -+/** -+ * This union represents the bit fields of the Core Interrupt Mask -+ * Register (GINTMSK). Set/clear the bits using the bit fields then -+ * write the d32 value to the register. -+ */ -+typedef union gintmsk_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ /** register bits */ -+ struct -+ { -+ unsigned reserved0 : 1; -+ unsigned modemismatch : 1; -+ unsigned otgintr : 1; -+ unsigned sofintr : 1; -+ unsigned rxstsqlvl : 1; -+ unsigned nptxfempty : 1; -+ unsigned ginnakeff : 1; -+ unsigned goutnakeff : 1; -+ unsigned reserved8 : 1; -+ unsigned i2cintr : 1; -+ unsigned erlysuspend : 1; -+ unsigned usbsuspend : 1; -+ unsigned usbreset : 1; -+ unsigned enumdone : 1; -+ unsigned isooutdrop : 1; -+ unsigned eopframe : 1; -+ unsigned reserved16 : 1; -+ unsigned epmismatch : 1; -+ unsigned inepintr : 1; -+ unsigned outepintr : 1; -+ unsigned incomplisoin : 1; -+ unsigned incomplisoout : 1; -+ unsigned reserved22_23 : 2; -+ unsigned portintr : 1; -+ unsigned hcintr : 1; -+ unsigned ptxfempty : 1; -+ unsigned reserved27 : 1; -+ unsigned conidstschng : 1; -+ unsigned disconnect : 1; -+ unsigned sessreqintr : 1; -+ unsigned wkupintr : 1; -+ } b; -+} gintmsk_data_t; -+/** -+ * This union represents the bit fields of the Core Interrupt Register -+ * (GINTSTS). Set/clear the bits using the bit fields then write the -+ * d32 value to the register. -+ */ -+typedef union gintsts_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+#define DWC_SOF_INTR_MASK 0x0008 -+ /** register bits */ -+ struct -+ { -+#define DWC_HOST_MODE 1 -+ unsigned curmode : 1; -+ unsigned modemismatch : 1; -+ unsigned otgintr : 1; -+ unsigned sofintr : 1; -+ unsigned rxstsqlvl : 1; -+ unsigned nptxfempty : 1; -+ unsigned ginnakeff : 1; -+ unsigned goutnakeff : 1; -+ unsigned reserved8 : 1; -+ unsigned i2cintr : 1; -+ unsigned erlysuspend : 1; -+ unsigned usbsuspend : 1; -+ unsigned usbreset : 1; -+ unsigned enumdone : 1; -+ unsigned isooutdrop : 1; -+ unsigned eopframe : 1; -+ unsigned intokenrx : 1; -+ unsigned epmismatch : 1; -+ unsigned inepint: 1; -+ unsigned outepintr : 1; -+ unsigned incomplisoin : 1; -+ unsigned incomplisoout : 1; -+ unsigned reserved22_23 : 2; -+ unsigned portintr : 1; -+ unsigned hcintr : 1; -+ unsigned ptxfempty : 1; -+ unsigned reserved27 : 1; -+ unsigned conidstschng : 1; -+ unsigned disconnect : 1; -+ unsigned sessreqintr : 1; -+ unsigned wkupintr : 1; -+ } b; -+} gintsts_data_t; -+ -+ -+/** -+ * This union represents the bit fields in the Device Receive Status Read and -+ * Pop Registers (GRXSTSR, GRXSTSP) Read the register into the d32 -+ * element then read out the bits using the bit elements. -+ */ -+typedef union device_grxsts_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ /** register bits */ -+ struct -+ { -+ unsigned epnum : 4; -+ unsigned bcnt : 11; -+ unsigned dpid : 2; -+ -+#define DWC_STS_DATA_UPDT 0x2 // OUT Data Packet -+#define DWC_STS_XFER_COMP 0x3 // OUT Data Transfer Complete -+ -+#define DWC_DSTS_GOUT_NAK 0x1 // Global OUT NAK -+#define DWC_DSTS_SETUP_COMP 0x4 // Setup Phase Complete -+#define DWC_DSTS_SETUP_UPDT 0x6 // SETUP Packet -+ unsigned pktsts : 4; -+ unsigned fn : 4; -+ unsigned reserved : 7; -+ } b; -+} device_grxsts_data_t; -+ -+/** -+ * This union represents the bit fields in the Host Receive Status Read and -+ * Pop Registers (GRXSTSR, GRXSTSP) Read the register into the d32 -+ * element then read out the bits using the bit elements. -+ */ -+typedef union host_grxsts_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ /** register bits */ -+ struct -+ { -+ unsigned chnum : 4; -+ unsigned bcnt : 11; -+ unsigned dpid : 2; -+ -+ unsigned pktsts : 4; -+#define DWC_GRXSTS_PKTSTS_IN 0x2 -+#define DWC_GRXSTS_PKTSTS_IN_XFER_COMP 0x3 -+#define DWC_GRXSTS_PKTSTS_DATA_TOGGLE_ERR 0x5 -+#define DWC_GRXSTS_PKTSTS_CH_HALTED 0x7 -+ -+ unsigned reserved : 11; -+ } b; -+} host_grxsts_data_t; -+ -+/** -+ * This union represents the bit fields in the FIFO Size Registers (HPTXFSIZ, -+ * GNPTXFSIZ, DPTXFSIZn, DIEPTXFn). Read the register into the d32 element then -+ * read out the bits using the bit elements. -+ */ -+typedef union fifosize_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ /** register bits */ -+ struct -+ { -+ unsigned startaddr : 16; -+ unsigned depth : 16; -+ } b; -+} fifosize_data_t; -+ -+/** -+ * This union represents the bit fields in the Non-Periodic Transmit -+ * FIFO/Queue Status Register (GNPTXSTS). Read the register into the -+ * d32 element then read out the bits using the bit -+ * elements. -+ */ -+typedef union gnptxsts_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ /** register bits */ -+ struct -+ { -+ unsigned nptxfspcavail : 16; -+ unsigned nptxqspcavail : 8; -+ /** Top of the Non-Periodic Transmit Request Queue -+ * - bit 24 - Terminate (Last entry for the selected -+ * channel/EP) -+ * - bits 26:25 - Token Type -+ * - 2'b00 - IN/OUT -+ * - 2'b01 - Zero Length OUT -+ * - 2'b10 - PING/Complete Split -+ * - 2'b11 - Channel Halt -+ * - bits 30:27 - Channel/EP Number -+ */ -+ unsigned nptxqtop_terminate : 1; -+ unsigned nptxqtop_token : 2; -+ unsigned nptxqtop_chnep : 4; -+ unsigned reserved : 1; -+ } b; -+} gnptxsts_data_t; -+ -+/** -+ * This union represents the bit fields in the Transmit -+ * FIFO Status Register (DTXFSTS). Read the register into the -+ * d32 element then read out the bits using the bit -+ * elements. -+ */ -+typedef union dtxfsts_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ /** register bits */ -+ struct -+ { -+ unsigned txfspcavail : 16; -+ unsigned reserved : 16; -+ } b; -+} dtxfsts_data_t; -+ -+/** -+ * This union represents the bit fields in the I2C Control Register -+ * (I2CCTL). Read the register into the d32 element then read out the -+ * bits using the bit elements. -+ */ -+typedef union gi2cctl_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ /** register bits */ -+ struct -+ { -+ unsigned rwdata : 8; -+ unsigned regaddr : 8; -+ unsigned addr : 7; -+ unsigned i2cen : 1; -+ unsigned ack : 1; -+ unsigned i2csuspctl : 1; -+ unsigned i2cdevaddr : 2; -+ unsigned reserved : 2; -+ unsigned rw : 1; -+ unsigned bsydne : 1; -+ } b; -+} gi2cctl_data_t; -+ -+/** -+ * This union represents the bit fields in the User HW Config1 -+ * Register. Read the register into the d32 element then read -+ * out the bits using the bit elements. -+ */ -+typedef union hwcfg1_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ /** register bits */ -+ struct -+ { -+ unsigned ep_dir0 : 2; -+ unsigned ep_dir1 : 2; -+ unsigned ep_dir2 : 2; -+ unsigned ep_dir3 : 2; -+ unsigned ep_dir4 : 2; -+ unsigned ep_dir5 : 2; -+ unsigned ep_dir6 : 2; -+ unsigned ep_dir7 : 2; -+ unsigned ep_dir8 : 2; -+ unsigned ep_dir9 : 2; -+ unsigned ep_dir10 : 2; -+ unsigned ep_dir11 : 2; -+ unsigned ep_dir12 : 2; -+ unsigned ep_dir13 : 2; -+ unsigned ep_dir14 : 2; -+ unsigned ep_dir15 : 2; -+ } b; -+} hwcfg1_data_t; -+ -+/** -+ * This union represents the bit fields in the User HW Config2 -+ * Register. Read the register into the d32 element then read -+ * out the bits using the bit elements. -+ */ -+typedef union hwcfg2_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ /** register bits */ -+ struct -+ { -+ /* GHWCFG2 */ -+ unsigned op_mode : 3; -+#define DWC_HWCFG2_OP_MODE_HNP_SRP_CAPABLE_OTG 0 -+#define DWC_HWCFG2_OP_MODE_SRP_ONLY_CAPABLE_OTG 1 -+#define DWC_HWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE_OTG 2 -+#define DWC_HWCFG2_OP_MODE_SRP_CAPABLE_DEVICE 3 -+#define DWC_HWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE 4 -+#define DWC_HWCFG2_OP_MODE_SRP_CAPABLE_HOST 5 -+#define DWC_HWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST 6 -+ -+ unsigned architecture : 2; -+ unsigned point2point : 1; -+ unsigned hs_phy_type : 2; -+#define DWC_HWCFG2_HS_PHY_TYPE_NOT_SUPPORTED 0 -+#define DWC_HWCFG2_HS_PHY_TYPE_UTMI 1 -+#define DWC_HWCFG2_HS_PHY_TYPE_ULPI 2 -+#define DWC_HWCFG2_HS_PHY_TYPE_UTMI_ULPI 3 -+ -+ unsigned fs_phy_type : 2; -+ unsigned num_dev_ep : 4; -+ unsigned num_host_chan : 4; -+ unsigned perio_ep_supported : 1; -+ unsigned dynamic_fifo : 1; -+ unsigned multi_proc_int : 1; -+ unsigned reserved21 : 1; -+ unsigned nonperio_tx_q_depth : 2; -+ unsigned host_perio_tx_q_depth : 2; -+ unsigned dev_token_q_depth : 5; -+ unsigned reserved31 : 1; -+ } b; -+} hwcfg2_data_t; -+ -+/** -+ * This union represents the bit fields in the User HW Config3 -+ * Register. Read the register into the d32 element then read -+ * out the bits using the bit elements. -+ */ -+typedef union hwcfg3_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ /** register bits */ -+ struct -+ { -+ /* GHWCFG3 */ -+ unsigned xfer_size_cntr_width : 4; -+ unsigned packet_size_cntr_width : 3; -+ unsigned otg_func : 1; -+ unsigned i2c : 1; -+ unsigned vendor_ctrl_if : 1; -+ unsigned optional_features : 1; -+ unsigned synch_reset_type : 1; -+ unsigned ahb_phy_clock_synch : 1; -+ unsigned reserved15_13 : 3; -+ unsigned dfifo_depth : 16; -+ } b; -+} hwcfg3_data_t; -+ -+/** -+ * This union represents the bit fields in the User HW Config4 -+ * Register. Read the register into the d32 element then read -+ * out the bits using the bit elements. -+ */ -+typedef union hwcfg4_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ /** register bits */ -+ struct -+ { -+ unsigned num_dev_perio_in_ep : 4; -+ unsigned power_optimiz : 1; -+ unsigned min_ahb_freq : 9; -+ unsigned utmi_phy_data_width : 2; -+ unsigned num_dev_mode_ctrl_ep : 4; -+ unsigned iddig_filt_en : 1; -+ unsigned vbus_valid_filt_en : 1; -+ unsigned a_valid_filt_en : 1; -+ unsigned b_valid_filt_en : 1; -+ unsigned session_end_filt_en : 1; -+ unsigned ded_fifo_en : 1; -+ unsigned num_in_eps : 4; -+ unsigned desc_dma : 1; -+ unsigned desc_dma_dyn : 1; -+ } b; -+} hwcfg4_data_t; -+ -+//////////////////////////////////////////// -+// Device Registers -+/** -+ * Device Global Registers. Offsets 800h-BFFh -+ * -+ * The following structures define the size and relative field offsets -+ * for the Device Mode Registers. -+ * -+ * These registers are visible only in Device mode and must not be -+ * accessed in Host mode, as the results are unknown. -+ */ -+typedef struct dwc_otg_dev_global_regs -+{ -+ /** Device Configuration Register. Offset 800h */ -+ volatile uint32_t dcfg; -+ /** Device Control Register. Offset: 804h */ -+ volatile uint32_t dctl; -+ /** Device Status Register (Read Only). Offset: 808h */ -+ volatile uint32_t dsts; -+ /** Reserved. Offset: 80Ch */ -+ uint32_t unused; -+ /** Device IN Endpoint Common Interrupt Mask -+ * Register. Offset: 810h */ -+ volatile uint32_t diepmsk; -+ /** Device OUT Endpoint Common Interrupt Mask -+ * Register. Offset: 814h */ -+ volatile uint32_t doepmsk; -+ /** Device All Endpoints Interrupt Register. Offset: 818h */ -+ volatile uint32_t daint; -+ /** Device All Endpoints Interrupt Mask Register. Offset: -+ * 81Ch */ -+ volatile uint32_t daintmsk; -+ /** Device IN Token Queue Read Register-1 (Read Only). -+ * Offset: 820h */ -+ volatile uint32_t dtknqr1; -+ /** Device IN Token Queue Read Register-2 (Read Only). -+ * Offset: 824h */ -+ volatile uint32_t dtknqr2; -+ /** Device VBUS discharge Register. Offset: 828h */ -+ volatile uint32_t dvbusdis; -+ /** Device VBUS Pulse Register. Offset: 82Ch */ -+ volatile uint32_t dvbuspulse; -+ /** Device IN Token Queue Read Register-3 (Read Only). / -+ * Device Thresholding control register (Read/Write) -+ * Offset: 830h */ -+ volatile uint32_t dtknqr3_dthrctl; -+ /** Device IN Token Queue Read Register-4 (Read Only). / -+ * Device IN EPs empty Inr. Mask Register (Read/Write) -+ * Offset: 834h */ -+ volatile uint32_t dtknqr4_fifoemptymsk; -+ /** Device Each Endpoint Interrupt Register (Read Only). / -+ * Offset: 838h */ -+ volatile uint32_t deachint; -+ /** Device Each Endpoint Interrupt mask Register (Read/Write). / -+ * Offset: 83Ch */ -+ volatile uint32_t deachintmsk; -+ /** Device Each In Endpoint Interrupt mask Register (Read/Write). / -+ * Offset: 840h */ -+ volatile uint32_t diepeachintmsk[MAX_EPS_CHANNELS]; -+ /** Device Each Out Endpoint Interrupt mask Register (Read/Write). / -+ * Offset: 880h */ -+ volatile uint32_t doepeachintmsk[MAX_EPS_CHANNELS]; -+} dwc_otg_device_global_regs_t; -+ -+/** -+ * This union represents the bit fields in the Device Configuration -+ * Register. Read the register into the d32 member then -+ * set/clear the bits using the bit elements. Write the -+ * d32 member to the dcfg register. -+ */ -+typedef union dcfg_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ /** register bits */ -+ struct -+ { -+ /** Device Speed */ -+ unsigned devspd : 2; -+ /** Non Zero Length Status OUT Handshake */ -+ unsigned nzstsouthshk : 1; -+#define DWC_DCFG_SEND_STALL 1 -+ -+ unsigned reserved3 : 1; -+ /** Device Addresses */ -+ unsigned devaddr : 7; -+ /** Periodic Frame Interval */ -+ unsigned perfrint : 2; -+#define DWC_DCFG_FRAME_INTERVAL_80 0 -+#define DWC_DCFG_FRAME_INTERVAL_85 1 -+#define DWC_DCFG_FRAME_INTERVAL_90 2 -+#define DWC_DCFG_FRAME_INTERVAL_95 3 -+ -+ unsigned reserved13_17 : 5; -+ /** In Endpoint Mis-match count */ -+ unsigned epmscnt : 5; -+ /** Enable Descriptor DMA in Device mode */ -+ unsigned descdma : 1; -+ } b; -+} dcfg_data_t; -+ -+/** -+ * This union represents the bit fields in the Device Control -+ * Register. Read the register into the d32 member then -+ * set/clear the bits using the bit elements. -+ */ -+typedef union dctl_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ /** register bits */ -+ struct -+ { -+ /** Remote Wakeup */ -+ unsigned rmtwkupsig : 1; -+ /** Soft Disconnect */ -+ unsigned sftdiscon : 1; -+ /** Global Non-Periodic IN NAK Status */ -+ unsigned gnpinnaksts : 1; -+ /** Global OUT NAK Status */ -+ unsigned goutnaksts : 1; -+ /** Test Control */ -+ unsigned tstctl : 3; -+ /** Set Global Non-Periodic IN NAK */ -+ unsigned sgnpinnak : 1; -+ /** Clear Global Non-Periodic IN NAK */ -+ unsigned cgnpinnak : 1; -+ /** Set Global OUT NAK */ -+ unsigned sgoutnak : 1; -+ /** Clear Global OUT NAK */ -+ unsigned cgoutnak : 1; -+ -+ /** Power-On Programming Done */ -+ unsigned pwronprgdone : 1; -+ /** Global Continue on BNA */ -+ unsigned gcontbna : 1; -+ /** Global Multi Count */ -+ unsigned gmc : 2; -+ /** Ignore Frame Number for ISOC EPs */ -+ unsigned ifrmnum : 1; -+ /** NAK on Babble */ -+ unsigned nakonbble : 1; -+ -+ unsigned reserved16_31 : 16; -+ } b; -+} dctl_data_t; -+ -+/** -+ * This union represents the bit fields in the Device Status -+ * Register. Read the register into the d32 member then -+ * set/clear the bits using the bit elements. -+ */ -+typedef union dsts_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ /** register bits */ -+ struct -+ { -+ /** Suspend Status */ -+ unsigned suspsts : 1; -+ /** Enumerated Speed */ -+ unsigned enumspd : 2; -+#define DWC_DSTS_ENUMSPD_HS_PHY_30MHZ_OR_60MHZ 0 -+#define DWC_DSTS_ENUMSPD_FS_PHY_30MHZ_OR_60MHZ 1 -+#define DWC_DSTS_ENUMSPD_LS_PHY_6MHZ 2 -+#define DWC_DSTS_ENUMSPD_FS_PHY_48MHZ 3 -+ /** Erratic Error */ -+ unsigned errticerr : 1; -+ unsigned reserved4_7: 4; -+ /** Frame or Microframe Number of the received SOF */ -+ unsigned soffn : 14; -+ unsigned reserved22_31 : 10; -+ } b; -+} dsts_data_t; -+ -+ -+/** -+ * This union represents the bit fields in the Device IN EP Interrupt -+ * Register and the Device IN EP Common Mask Register. -+ * -+ * - Read the register into the d32 member then set/clear the -+ * bits using the bit elements. -+ */ -+typedef union diepint_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ /** register bits */ -+ struct -+ { -+ /** Transfer complete mask */ -+ unsigned xfercompl : 1; -+ /** Endpoint disable mask */ -+ unsigned epdisabled : 1; -+ /** AHB Error mask */ -+ unsigned ahberr : 1; -+ /** TimeOUT Handshake mask (non-ISOC EPs) */ -+ unsigned timeout : 1; -+ /** IN Token received with TxF Empty mask */ -+ unsigned intktxfemp : 1; -+ /** IN Token Received with EP mismatch mask */ -+ unsigned intknepmis : 1; -+ /** IN Endpoint HAK Effective mask */ -+ unsigned inepnakeff : 1; -+ /** IN Endpoint HAK Effective mask */ -+ unsigned emptyintr : 1; -+ -+ unsigned txfifoundrn : 1; -+ -+ /** BNA Interrupt mask */ -+ unsigned bna : 1; -+ -+ unsigned reserved10_12 : 3; -+ /** BNA Interrupt mask */ -+ unsigned nak : 1; -+ -+ unsigned reserved14_31 : 18; -+ } b; -+} diepint_data_t; -+ -+/** -+ * This union represents the bit fields in the Device IN EP -+ * Common/Dedicated Interrupt Mask Register. -+ */ -+typedef union diepint_data diepmsk_data_t; -+ -+/** -+ * This union represents the bit fields in the Device OUT EP Interrupt -+ * Registerand Device OUT EP Common Interrupt Mask Register. -+ * -+ * - Read the register into the d32 member then set/clear the -+ * bits using the bit elements. -+ */ -+typedef union doepint_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ /** register bits */ -+ struct -+ { -+ /** Transfer complete */ -+ unsigned xfercompl : 1; -+ /** Endpoint disable */ -+ unsigned epdisabled : 1; -+ /** AHB Error */ -+ unsigned ahberr : 1; -+ /** Setup Phase Done (contorl EPs) */ -+ unsigned setup : 1; -+ /** OUT Token Received when Endpoint Disabled */ -+ unsigned outtknepdis : 1; -+ -+ unsigned stsphsercvd : 1; -+ /** Back-to-Back SETUP Packets Received */ -+ unsigned back2backsetup : 1; -+ -+ unsigned reserved7 : 1; -+ /** OUT packet Error */ -+ unsigned outpkterr : 1; -+ /** BNA Interrupt */ -+ unsigned bna : 1; -+ -+ unsigned reserved10 : 1; -+ /** Packet Drop Status */ -+ unsigned pktdrpsts : 1; -+ /** Babble Interrupt */ -+ unsigned babble : 1; -+ /** NAK Interrupt */ -+ unsigned nak : 1; -+ /** NYET Interrupt */ -+ unsigned nyet : 1; -+ -+ unsigned reserved15_31 : 17; -+ } b; -+} doepint_data_t; -+ -+/** -+ * This union represents the bit fields in the Device OUT EP -+ * Common/Dedicated Interrupt Mask Register. -+ */ -+typedef union doepint_data doepmsk_data_t; -+ -+/** -+ * This union represents the bit fields in the Device All EP Interrupt -+ * and Mask Registers. -+ * - Read the register into the d32 member then set/clear the -+ * bits using the bit elements. -+ */ -+typedef union daint_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ /** register bits */ -+ struct -+ { -+ /** IN Endpoint bits */ -+ unsigned in : 16; -+ /** OUT Endpoint bits */ -+ unsigned out : 16; -+ } ep; -+ struct -+ { -+ /** IN Endpoint bits */ -+ unsigned inep0 : 1; -+ unsigned inep1 : 1; -+ unsigned inep2 : 1; -+ unsigned inep3 : 1; -+ unsigned inep4 : 1; -+ unsigned inep5 : 1; -+ unsigned inep6 : 1; -+ unsigned inep7 : 1; -+ unsigned inep8 : 1; -+ unsigned inep9 : 1; -+ unsigned inep10 : 1; -+ unsigned inep11 : 1; -+ unsigned inep12 : 1; -+ unsigned inep13 : 1; -+ unsigned inep14 : 1; -+ unsigned inep15 : 1; -+ /** OUT Endpoint bits */ -+ unsigned outep0 : 1; -+ unsigned outep1 : 1; -+ unsigned outep2 : 1; -+ unsigned outep3 : 1; -+ unsigned outep4 : 1; -+ unsigned outep5 : 1; -+ unsigned outep6 : 1; -+ unsigned outep7 : 1; -+ unsigned outep8 : 1; -+ unsigned outep9 : 1; -+ unsigned outep10 : 1; -+ unsigned outep11 : 1; -+ unsigned outep12 : 1; -+ unsigned outep13 : 1; -+ unsigned outep14 : 1; -+ unsigned outep15 : 1; -+ } b; -+} daint_data_t; -+ -+/** -+ * This union represents the bit fields in the Device IN Token Queue -+ * Read Registers. -+ * - Read the register into the d32 member. -+ * - READ-ONLY Register -+ */ -+typedef union dtknq1_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ /** register bits */ -+ struct -+ { -+ /** In Token Queue Write Pointer */ -+ unsigned intknwptr : 5; -+ /** Reserved */ -+ unsigned reserved05_06 : 2; -+ /** write pointer has wrapped. */ -+ unsigned wrap_bit : 1; -+ /** EP Numbers of IN Tokens 0 ... 4 */ -+ unsigned epnums0_5 : 24; -+ }b; -+} dtknq1_data_t; -+ -+/** -+ * This union represents Threshold control Register -+ * - Read and write the register into the d32 member. -+ * - READ-WRITABLE Register -+ */ -+typedef union dthrctl_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ /** register bits */ -+ struct -+ { -+ /** non ISO Tx Thr. Enable */ -+ unsigned non_iso_thr_en : 1; -+ /** ISO Tx Thr. Enable */ -+ unsigned iso_thr_en : 1; -+ /** Tx Thr. Length */ -+ unsigned tx_thr_len : 9; -+ /** Reserved */ -+ unsigned reserved11_15 : 5; -+ /** Rx Thr. Enable */ -+ unsigned rx_thr_en : 1; -+ /** Rx Thr. Length */ -+ unsigned rx_thr_len : 9; -+ /** Reserved */ -+ unsigned reserved26_31 : 6; -+ }b; -+} dthrctl_data_t; -+ -+ -+/** -+ * Device Logical IN Endpoint-Specific Registers. Offsets -+ * 900h-AFCh -+ * -+ * There will be one set of endpoint registers per logical endpoint -+ * implemented. -+ * -+ * These registers are visible only in Device mode and must not be -+ * accessed in Host mode, as the results are unknown. -+ */ -+typedef struct dwc_otg_dev_in_ep_regs -+{ -+ /** Device IN Endpoint Control Register. Offset:900h + -+ * (ep_num * 20h) + 00h */ -+ volatile uint32_t diepctl; -+ /** Reserved. Offset:900h + (ep_num * 20h) + 04h */ -+ uint32_t reserved04; -+ /** Device IN Endpoint Interrupt Register. Offset:900h + -+ * (ep_num * 20h) + 08h */ -+ volatile uint32_t diepint; -+ /** Reserved. Offset:900h + (ep_num * 20h) + 0Ch */ -+ uint32_t reserved0C; -+ /** Device IN Endpoint Transfer Size -+ * Register. Offset:900h + (ep_num * 20h) + 10h */ -+ volatile uint32_t dieptsiz; -+ /** Device IN Endpoint DMA Address Register. Offset:900h + -+ * (ep_num * 20h) + 14h */ -+ volatile uint32_t diepdma; -+ /** Device IN Endpoint Transmit FIFO Status Register. Offset:900h + -+ * (ep_num * 20h) + 18h */ -+ volatile uint32_t dtxfsts; -+ /** Device IN Endpoint DMA Buffer Register. Offset:900h + -+ * (ep_num * 20h) + 1Ch */ -+ volatile uint32_t diepdmab; -+} dwc_otg_dev_in_ep_regs_t; -+ -+/** -+ * Device Logical OUT Endpoint-Specific Registers. Offsets: -+ * B00h-CFCh -+ * -+ * There will be one set of endpoint registers per logical endpoint -+ * implemented. -+ * -+ * These registers are visible only in Device mode and must not be -+ * accessed in Host mode, as the results are unknown. -+ */ -+typedef struct dwc_otg_dev_out_ep_regs -+{ -+ /** Device OUT Endpoint Control Register. Offset:B00h + -+ * (ep_num * 20h) + 00h */ -+ volatile uint32_t doepctl; -+ /** Device OUT Endpoint Frame number Register. Offset: -+ * B00h + (ep_num * 20h) + 04h */ -+ volatile uint32_t doepfn; -+ /** Device OUT Endpoint Interrupt Register. Offset:B00h + -+ * (ep_num * 20h) + 08h */ -+ volatile uint32_t doepint; -+ /** Reserved. Offset:B00h + (ep_num * 20h) + 0Ch */ -+ uint32_t reserved0C; -+ /** Device OUT Endpoint Transfer Size Register. Offset: -+ * B00h + (ep_num * 20h) + 10h */ -+ volatile uint32_t doeptsiz; -+ /** Device OUT Endpoint DMA Address Register. Offset:B00h -+ * + (ep_num * 20h) + 14h */ -+ volatile uint32_t doepdma; -+ /** Reserved. Offset:B00h + * (ep_num * 20h) + 1Ch */ -+ uint32_t unused; -+ /** Device OUT Endpoint DMA Buffer Register. Offset:B00h -+ * + (ep_num * 20h) + 1Ch */ -+ uint32_t doepdmab; -+} dwc_otg_dev_out_ep_regs_t; -+ -+/** -+ * This union represents the bit fields in the Device EP Control -+ * Register. Read the register into the d32 member then -+ * set/clear the bits using the bit elements. -+ */ -+typedef union depctl_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ /** register bits */ -+ struct -+ { -+ /** Maximum Packet Size -+ * IN/OUT EPn -+ * IN/OUT EP0 - 2 bits -+ * 2'b00: 64 Bytes -+ * 2'b01: 32 -+ * 2'b10: 16 -+ * 2'b11: 8 */ -+ unsigned mps : 11; -+#define DWC_DEP0CTL_MPS_64 0 -+#define DWC_DEP0CTL_MPS_32 1 -+#define DWC_DEP0CTL_MPS_16 2 -+#define DWC_DEP0CTL_MPS_8 3 -+ -+ /** Next Endpoint -+ * IN EPn/IN EP0 -+ * OUT EPn/OUT EP0 - reserved */ -+ unsigned nextep : 4; -+ -+ /** USB Active Endpoint */ -+ unsigned usbactep : 1; -+ -+ /** Endpoint DPID (INTR/Bulk IN and OUT endpoints) -+ * This field contains the PID of the packet going to -+ * be received or transmitted on this endpoint. The -+ * application should program the PID of the first -+ * packet going to be received or transmitted on this -+ * endpoint , after the endpoint is -+ * activated. Application use the SetD1PID and -+ * SetD0PID fields of this register to program either -+ * D0 or D1 PID. -+ * -+ * The encoding for this field is -+ * - 0: D0 -+ * - 1: D1 -+ */ -+ unsigned dpid : 1; -+ -+ /** NAK Status */ -+ unsigned naksts : 1; -+ -+ /** Endpoint Type -+ * 2'b00: Control -+ * 2'b01: Isochronous -+ * 2'b10: Bulk -+ * 2'b11: Interrupt */ -+ unsigned eptype : 2; -+ -+ /** Snoop Mode -+ * OUT EPn/OUT EP0 -+ * IN EPn/IN EP0 - reserved */ -+ unsigned snp : 1; -+ -+ /** Stall Handshake */ -+ unsigned stall : 1; -+ -+ /** Tx Fifo Number -+ * IN EPn/IN EP0 -+ * OUT EPn/OUT EP0 - reserved */ -+ unsigned txfnum : 4; -+ -+ /** Clear NAK */ -+ unsigned cnak : 1; -+ /** Set NAK */ -+ unsigned snak : 1; -+ /** Set DATA0 PID (INTR/Bulk IN and OUT endpoints) -+ * Writing to this field sets the Endpoint DPID (DPID) -+ * field in this register to DATA0. Set Even -+ * (micro)frame (SetEvenFr) (ISO IN and OUT Endpoints) -+ * Writing to this field sets the Even/Odd -+ * (micro)frame (EO_FrNum) field to even (micro) -+ * frame. -+ */ -+ unsigned setd0pid : 1; -+ /** Set DATA1 PID (INTR/Bulk IN and OUT endpoints) -+ * Writing to this field sets the Endpoint DPID (DPID) -+ * field in this register to DATA1 Set Odd -+ * (micro)frame (SetOddFr) (ISO IN and OUT Endpoints) -+ * Writing to this field sets the Even/Odd -+ * (micro)frame (EO_FrNum) field to odd (micro) frame. -+ */ -+ unsigned setd1pid : 1; -+ -+ /** Endpoint Disable */ -+ unsigned epdis : 1; -+ /** Endpoint Enable */ -+ unsigned epena : 1; -+ } b; -+} depctl_data_t; -+ -+/** -+ * This union represents the bit fields in the Device EP Transfer -+ * Size Register. Read the register into the d32 member then -+ * set/clear the bits using the bit elements. -+ */ -+typedef union deptsiz_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ /** register bits */ -+ struct { -+ /** Transfer size */ -+ unsigned xfersize : 19; -+ /** Packet Count */ -+ unsigned pktcnt : 10; -+ /** Multi Count - Periodic IN endpoints */ -+ unsigned mc : 2; -+ unsigned reserved : 1; -+ } b; -+} deptsiz_data_t; -+ -+/** -+ * This union represents the bit fields in the Device EP 0 Transfer -+ * Size Register. Read the register into the d32 member then -+ * set/clear the bits using the bit elements. -+ */ -+typedef union deptsiz0_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ /** register bits */ -+ struct { -+ /** Transfer size */ -+ unsigned xfersize : 7; -+ /** Reserved */ -+ unsigned reserved7_18 : 12; -+ /** Packet Count */ -+ unsigned pktcnt : 1; -+ /** Reserved */ -+ unsigned reserved20_28 : 9; -+ /**Setup Packet Count (DOEPTSIZ0 Only) */ -+ unsigned supcnt : 2; -+ unsigned reserved31; -+ } b; -+} deptsiz0_data_t; -+ -+ -+///////////////////////////////////////////////// -+// DMA Descriptor Specific Structures -+// -+ -+/** Buffer status definitions */ -+ -+#define BS_HOST_READY 0x0 -+#define BS_DMA_BUSY 0x1 -+#define BS_DMA_DONE 0x2 -+#define BS_HOST_BUSY 0x3 -+ -+/** Receive/Transmit status definitions */ -+ -+#define RTS_SUCCESS 0x0 -+#define RTS_BUFFLUSH 0x1 -+#define RTS_RESERVED 0x2 -+#define RTS_BUFERR 0x3 -+ -+ -+/** -+ * This union represents the bit fields in the DMA Descriptor -+ * status quadlet. Read the quadlet into the d32 member then -+ * set/clear the bits using the bit, b_iso_out and -+ * b_iso_in elements. -+ */ -+typedef union desc_sts_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ /** quadlet bits */ -+ struct { -+ /** Received number of bytes */ -+ unsigned bytes : 16; -+ -+ unsigned reserved16_22 : 7; -+ /** Multiple Transfer - only for OUT EPs */ -+ unsigned mtrf : 1; -+ /** Setup Packet received - only for OUT EPs */ -+ unsigned sr : 1; -+ /** Interrupt On Complete */ -+ unsigned ioc : 1; -+ /** Short Packet */ -+ unsigned sp : 1; -+ /** Last */ -+ unsigned l : 1; -+ /** Receive Status */ -+ unsigned sts : 2; -+ /** Buffer Status */ -+ unsigned bs : 2; -+ } b; -+ -+#ifdef DWC_EN_ISOC -+ /** iso out quadlet bits */ -+ struct { -+ /** Received number of bytes */ -+ unsigned rxbytes : 11; -+ -+ unsigned reserved11 : 1; -+ /** Frame Number */ -+ unsigned framenum : 11; -+ /** Received ISO Data PID */ -+ unsigned pid : 2; -+ /** Interrupt On Complete */ -+ unsigned ioc : 1; -+ /** Short Packet */ -+ unsigned sp : 1; -+ /** Last */ -+ unsigned l : 1; -+ /** Receive Status */ -+ unsigned rxsts : 2; -+ /** Buffer Status */ -+ unsigned bs : 2; -+ } b_iso_out; -+ -+ /** iso in quadlet bits */ -+ struct { -+ /** Transmited number of bytes */ -+ unsigned txbytes : 12; -+ /** Frame Number */ -+ unsigned framenum : 11; -+ /** Transmited ISO Data PID */ -+ unsigned pid : 2; -+ /** Interrupt On Complete */ -+ unsigned ioc : 1; -+ /** Short Packet */ -+ unsigned sp : 1; -+ /** Last */ -+ unsigned l : 1; -+ /** Transmit Status */ -+ unsigned txsts : 2; -+ /** Buffer Status */ -+ unsigned bs : 2; -+ } b_iso_in; -+#endif //DWC_EN_ISOC -+} desc_sts_data_t; -+ -+/** -+ * DMA Descriptor structure -+ * -+ * DMA Descriptor structure contains two quadlets: -+ * Status quadlet and Data buffer pointer. -+ */ -+typedef struct dwc_otg_dma_desc -+{ -+ /** DMA Descriptor status quadlet */ -+ desc_sts_data_t status; -+ /** DMA Descriptor data buffer pointer */ -+ dma_addr_t buf; -+} dwc_otg_dma_desc_t; -+ -+/** -+ * The dwc_otg_dev_if structure contains information needed to manage -+ * the DWC_otg controller acting in device mode. It represents the -+ * programming view of the device-specific aspects of the controller. -+ */ -+typedef struct dwc_otg_dev_if -+{ -+ /** Pointer to device Global registers. -+ * Device Global Registers starting at offset 800h -+ */ -+ dwc_otg_device_global_regs_t *dev_global_regs; -+#define DWC_DEV_GLOBAL_REG_OFFSET 0x800 -+ -+ /** -+ * Device Logical IN Endpoint-Specific Registers 900h-AFCh -+ */ -+ dwc_otg_dev_in_ep_regs_t *in_ep_regs[MAX_EPS_CHANNELS]; -+#define DWC_DEV_IN_EP_REG_OFFSET 0x900 -+#define DWC_EP_REG_OFFSET 0x20 -+ -+ /** Device Logical OUT Endpoint-Specific Registers B00h-CFCh */ -+ dwc_otg_dev_out_ep_regs_t *out_ep_regs[MAX_EPS_CHANNELS]; -+#define DWC_DEV_OUT_EP_REG_OFFSET 0xB00 -+ -+ /* Device configuration information*/ -+ uint8_t speed; /**< Device Speed 0: Unknown, 1: LS, 2:FS, 3: HS */ -+ uint8_t num_in_eps; /**< Number # of Tx EP range: 0-15 exept ep0 */ -+ uint8_t num_out_eps; /**< Number # of Rx EP range: 0-15 exept ep 0*/ -+ -+ /** Size of periodic FIFOs (Bytes) */ -+ uint16_t perio_tx_fifo_size[MAX_PERIO_FIFOS]; -+ -+ /** Size of Tx FIFOs (Bytes) */ -+ uint16_t tx_fifo_size[MAX_TX_FIFOS]; -+ -+ /** Thresholding enable flags and length varaiables **/ -+ uint16_t rx_thr_en; -+ uint16_t iso_tx_thr_en; -+ uint16_t non_iso_tx_thr_en; -+ -+ uint16_t rx_thr_length; -+ uint16_t tx_thr_length; -+ -+ /** -+ * Pointers to the DMA Descriptors for EP0 Control -+ * transfers (virtual and physical) -+ */ -+ -+ /** 2 descriptors for SETUP packets */ -+ uint32_t dma_setup_desc_addr[2]; -+ dwc_otg_dma_desc_t* setup_desc_addr[2]; -+ -+ /** Pointer to Descriptor with latest SETUP packet */ -+ dwc_otg_dma_desc_t* psetup; -+ -+ /** Index of current SETUP handler descriptor */ -+ uint32_t setup_desc_index; -+ -+ /** Descriptor for Data In or Status In phases */ -+ uint32_t dma_in_desc_addr; -+ dwc_otg_dma_desc_t* in_desc_addr;; -+ -+ /** Descriptor for Data Out or Status Out phases */ -+ uint32_t dma_out_desc_addr; -+ dwc_otg_dma_desc_t* out_desc_addr; -+ -+} dwc_otg_dev_if_t; -+ -+ -+ -+ -+///////////////////////////////////////////////// -+// Host Mode Register Structures -+// -+/** -+ * The Host Global Registers structure defines the size and relative -+ * field offsets for the Host Mode Global Registers. Host Global -+ * Registers offsets 400h-7FFh. -+*/ -+typedef struct dwc_otg_host_global_regs -+{ -+ /** Host Configuration Register. Offset: 400h */ -+ volatile uint32_t hcfg; -+ /** Host Frame Interval Register. Offset: 404h */ -+ volatile uint32_t hfir; -+ /** Host Frame Number / Frame Remaining Register. Offset: 408h */ -+ volatile uint32_t hfnum; -+ /** Reserved. Offset: 40Ch */ -+ uint32_t reserved40C; -+ /** Host Periodic Transmit FIFO/ Queue Status Register. Offset: 410h */ -+ volatile uint32_t hptxsts; -+ /** Host All Channels Interrupt Register. Offset: 414h */ -+ volatile uint32_t haint; -+ /** Host All Channels Interrupt Mask Register. Offset: 418h */ -+ volatile uint32_t haintmsk; -+} dwc_otg_host_global_regs_t; -+ -+/** -+ * This union represents the bit fields in the Host Configuration Register. -+ * Read the register into the d32 member then set/clear the bits using -+ * the bit elements. Write the d32 member to the hcfg register. -+ */ -+typedef union hcfg_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ -+ /** register bits */ -+ struct -+ { -+ /** FS/LS Phy Clock Select */ -+ unsigned fslspclksel : 2; -+#define DWC_HCFG_30_60_MHZ 0 -+#define DWC_HCFG_48_MHZ 1 -+#define DWC_HCFG_6_MHZ 2 -+ -+ /** FS/LS Only Support */ -+ unsigned fslssupp : 1; -+ } b; -+} hcfg_data_t; -+ -+/** -+ * This union represents the bit fields in the Host Frame Remaing/Number -+ * Register. -+ */ -+typedef union hfir_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ -+ /** register bits */ -+ struct -+ { -+ unsigned frint : 16; -+ unsigned reserved : 16; -+ } b; -+} hfir_data_t; -+ -+/** -+ * This union represents the bit fields in the Host Frame Remaing/Number -+ * Register. -+ */ -+typedef union hfnum_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ -+ /** register bits */ -+ struct -+ { -+ unsigned frnum : 16; -+#define DWC_HFNUM_MAX_FRNUM 0x3FFF -+ unsigned frrem : 16; -+ } b; -+} hfnum_data_t; -+ -+typedef union hptxsts_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ -+ /** register bits */ -+ struct -+ { -+ unsigned ptxfspcavail : 16; -+ unsigned ptxqspcavail : 8; -+ /** Top of the Periodic Transmit Request Queue -+ * - bit 24 - Terminate (last entry for the selected channel) -+ * - bits 26:25 - Token Type -+ * - 2'b00 - Zero length -+ * - 2'b01 - Ping -+ * - 2'b10 - Disable -+ * - bits 30:27 - Channel Number -+ * - bit 31 - Odd/even microframe -+ */ -+ unsigned ptxqtop_terminate : 1; -+ unsigned ptxqtop_token : 2; -+ unsigned ptxqtop_chnum : 4; -+ unsigned ptxqtop_odd : 1; -+ } b; -+} hptxsts_data_t; -+ -+/** -+ * This union represents the bit fields in the Host Port Control and Status -+ * Register. Read the register into the d32 member then set/clear the -+ * bits using the bit elements. Write the d32 member to the -+ * hprt0 register. -+ */ -+typedef union hprt0_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ /** register bits */ -+ struct -+ { -+ unsigned prtconnsts : 1; -+ unsigned prtconndet : 1; -+ unsigned prtena : 1; -+ unsigned prtenchng : 1; -+ unsigned prtovrcurract : 1; -+ unsigned prtovrcurrchng : 1; -+ unsigned prtres : 1; -+ unsigned prtsusp : 1; -+ unsigned prtrst : 1; -+ unsigned reserved9 : 1; -+ unsigned prtlnsts : 2; -+ unsigned prtpwr : 1; -+ unsigned prttstctl : 4; -+ unsigned prtspd : 2; -+#define DWC_HPRT0_PRTSPD_HIGH_SPEED 0 -+#define DWC_HPRT0_PRTSPD_FULL_SPEED 1 -+#define DWC_HPRT0_PRTSPD_LOW_SPEED 2 -+ unsigned reserved19_31 : 13; -+ } b; -+} hprt0_data_t; -+ -+/** -+ * This union represents the bit fields in the Host All Interrupt -+ * Register. -+ */ -+typedef union haint_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ /** register bits */ -+ struct -+ { -+ unsigned ch0 : 1; -+ unsigned ch1 : 1; -+ unsigned ch2 : 1; -+ unsigned ch3 : 1; -+ unsigned ch4 : 1; -+ unsigned ch5 : 1; -+ unsigned ch6 : 1; -+ unsigned ch7 : 1; -+ unsigned ch8 : 1; -+ unsigned ch9 : 1; -+ unsigned ch10 : 1; -+ unsigned ch11 : 1; -+ unsigned ch12 : 1; -+ unsigned ch13 : 1; -+ unsigned ch14 : 1; -+ unsigned ch15 : 1; -+ unsigned reserved : 16; -+ } b; -+ -+ struct -+ { -+ unsigned chint : 16; -+ unsigned reserved : 16; -+ } b2; -+} haint_data_t; -+ -+/** -+ * This union represents the bit fields in the Host All Interrupt -+ * Register. -+ */ -+typedef union haintmsk_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ /** register bits */ -+ struct -+ { -+ unsigned ch0 : 1; -+ unsigned ch1 : 1; -+ unsigned ch2 : 1; -+ unsigned ch3 : 1; -+ unsigned ch4 : 1; -+ unsigned ch5 : 1; -+ unsigned ch6 : 1; -+ unsigned ch7 : 1; -+ unsigned ch8 : 1; -+ unsigned ch9 : 1; -+ unsigned ch10 : 1; -+ unsigned ch11 : 1; -+ unsigned ch12 : 1; -+ unsigned ch13 : 1; -+ unsigned ch14 : 1; -+ unsigned ch15 : 1; -+ unsigned reserved : 16; -+ } b; -+ -+ struct -+ { -+ unsigned chint : 16; -+ unsigned reserved : 16; -+ } b2; -+} haintmsk_data_t; -+ -+/** -+ * Host Channel Specific Registers. 500h-5FCh -+ */ -+typedef struct dwc_otg_hc_regs -+{ -+ /** Host Channel 0 Characteristic Register. Offset: 500h + (chan_num * 20h) + 00h */ -+ volatile uint32_t hcchar; -+ /** Host Channel 0 Split Control Register. Offset: 500h + (chan_num * 20h) + 04h */ -+ volatile uint32_t hcsplt; -+ /** Host Channel 0 Interrupt Register. Offset: 500h + (chan_num * 20h) + 08h */ -+ volatile uint32_t hcint; -+ /** Host Channel 0 Interrupt Mask Register. Offset: 500h + (chan_num * 20h) + 0Ch */ -+ volatile uint32_t hcintmsk; -+ /** Host Channel 0 Transfer Size Register. Offset: 500h + (chan_num * 20h) + 10h */ -+ volatile uint32_t hctsiz; -+ /** Host Channel 0 DMA Address Register. Offset: 500h + (chan_num * 20h) + 14h */ -+ volatile uint32_t hcdma; -+ /** Reserved. Offset: 500h + (chan_num * 20h) + 18h - 500h + (chan_num * 20h) + 1Ch */ -+ uint32_t reserved[2]; -+} dwc_otg_hc_regs_t; -+ -+/** -+ * This union represents the bit fields in the Host Channel Characteristics -+ * Register. Read the register into the d32 member then set/clear the -+ * bits using the bit elements. Write the d32 member to the -+ * hcchar register. -+ */ -+typedef union hcchar_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ -+ /** register bits */ -+ struct -+ { -+ /** Maximum packet size in bytes */ -+ unsigned mps : 11; -+ -+ /** Endpoint number */ -+ unsigned epnum : 4; -+ -+ /** 0: OUT, 1: IN */ -+ unsigned epdir : 1; -+ -+ unsigned reserved : 1; -+ -+ /** 0: Full/high speed device, 1: Low speed device */ -+ unsigned lspddev : 1; -+ -+ /** 0: Control, 1: Isoc, 2: Bulk, 3: Intr */ -+ unsigned eptype : 2; -+ -+ /** Packets per frame for periodic transfers. 0 is reserved. */ -+ unsigned multicnt : 2; -+ -+ /** Device address */ -+ unsigned devaddr : 7; -+ -+ /** -+ * Frame to transmit periodic transaction. -+ * 0: even, 1: odd -+ */ -+ unsigned oddfrm : 1; -+ -+ /** Channel disable */ -+ unsigned chdis : 1; -+ -+ /** Channel enable */ -+ unsigned chen : 1; -+ } b; -+} hcchar_data_t; -+ -+typedef union hcsplt_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ -+ /** register bits */ -+ struct -+ { -+ /** Port Address */ -+ unsigned prtaddr : 7; -+ -+ /** Hub Address */ -+ unsigned hubaddr : 7; -+ -+ /** Transaction Position */ -+ unsigned xactpos : 2; -+#define DWC_HCSPLIT_XACTPOS_MID 0 -+#define DWC_HCSPLIT_XACTPOS_END 1 -+#define DWC_HCSPLIT_XACTPOS_BEGIN 2 -+#define DWC_HCSPLIT_XACTPOS_ALL 3 -+ -+ /** Do Complete Split */ -+ unsigned compsplt : 1; -+ -+ /** Reserved */ -+ unsigned reserved : 14; -+ -+ /** Split Enble */ -+ unsigned spltena : 1; -+ } b; -+} hcsplt_data_t; -+ -+ -+/** -+ * This union represents the bit fields in the Host All Interrupt -+ * Register. -+ */ -+typedef union hcint_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ /** register bits */ -+ struct -+ { -+ /** Transfer Complete */ -+ unsigned xfercomp : 1; -+ /** Channel Halted */ -+ unsigned chhltd : 1; -+ /** AHB Error */ -+ unsigned ahberr : 1; -+ /** STALL Response Received */ -+ unsigned stall : 1; -+ /** NAK Response Received */ -+ unsigned nak : 1; -+ /** ACK Response Received */ -+ unsigned ack : 1; -+ /** NYET Response Received */ -+ unsigned nyet : 1; -+ /** Transaction Err */ -+ unsigned xacterr : 1; -+ /** Babble Error */ -+ unsigned bblerr : 1; -+ /** Frame Overrun */ -+ unsigned frmovrun : 1; -+ /** Data Toggle Error */ -+ unsigned datatglerr : 1; -+ /** Reserved */ -+ unsigned reserved : 21; -+ } b; -+} hcint_data_t; -+ -+/** -+ * This union represents the bit fields in the Host Channel Transfer Size -+ * Register. Read the register into the d32 member then set/clear the -+ * bits using the bit elements. Write the d32 member to the -+ * hcchar register. -+ */ -+typedef union hctsiz_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ -+ /** register bits */ -+ struct -+ { -+ /** Total transfer size in bytes */ -+ unsigned xfersize : 19; -+ -+ /** Data packets to transfer */ -+ unsigned pktcnt : 10; -+ -+ /** -+ * Packet ID for next data packet -+ * 0: DATA0 -+ * 1: DATA2 -+ * 2: DATA1 -+ * 3: MDATA (non-Control), SETUP (Control) -+ */ -+ unsigned pid : 2; -+#define DWC_HCTSIZ_DATA0 0 -+#define DWC_HCTSIZ_DATA1 2 -+#define DWC_HCTSIZ_DATA2 1 -+#define DWC_HCTSIZ_MDATA 3 -+#define DWC_HCTSIZ_SETUP 3 -+ -+ /** Do PING protocol when 1 */ -+ unsigned dopng : 1; -+ } b; -+} hctsiz_data_t; -+ -+/** -+ * This union represents the bit fields in the Host Channel Interrupt Mask -+ * Register. Read the register into the d32 member then set/clear the -+ * bits using the bit elements. Write the d32 member to the -+ * hcintmsk register. -+ */ -+typedef union hcintmsk_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ -+ /** register bits */ -+ struct -+ { -+ unsigned xfercompl : 1; -+ unsigned chhltd : 1; -+ unsigned ahberr : 1; -+ unsigned stall : 1; -+ unsigned nak : 1; -+ unsigned ack : 1; -+ unsigned nyet : 1; -+ unsigned xacterr : 1; -+ unsigned bblerr : 1; -+ unsigned frmovrun : 1; -+ unsigned datatglerr : 1; -+ unsigned reserved : 21; -+ } b; -+} hcintmsk_data_t; -+ -+/** OTG Host Interface Structure. -+ * -+ * The OTG Host Interface Structure structure contains information -+ * needed to manage the DWC_otg controller acting in host mode. It -+ * represents the programming view of the host-specific aspects of the -+ * controller. -+ */ -+typedef struct dwc_otg_host_if -+{ -+ /** Host Global Registers starting at offset 400h.*/ -+ dwc_otg_host_global_regs_t *host_global_regs; -+#define DWC_OTG_HOST_GLOBAL_REG_OFFSET 0x400 -+ -+ /** Host Port 0 Control and Status Register */ -+ volatile uint32_t *hprt0; -+#define DWC_OTG_HOST_PORT_REGS_OFFSET 0x440 -+ -+ -+ /** Host Channel Specific Registers at offsets 500h-5FCh. */ -+ dwc_otg_hc_regs_t *hc_regs[MAX_EPS_CHANNELS]; -+#define DWC_OTG_HOST_CHAN_REGS_OFFSET 0x500 -+#define DWC_OTG_CHAN_REGS_OFFSET 0x20 -+ -+ -+ /* Host configuration information */ -+ /** Number of Host Channels (range: 1-16) */ -+ uint8_t num_host_channels; -+ /** Periodic EPs supported (0: no, 1: yes) */ -+ uint8_t perio_eps_supported; -+ /** Periodic Tx FIFO Size (Only 1 host periodic Tx FIFO) */ -+ uint16_t perio_tx_fifo_size; -+ -+} dwc_otg_host_if_t; -+ -+ -+/** -+ * This union represents the bit fields in the Power and Clock Gating Control -+ * Register. Read the register into the d32 member then set/clear the -+ * bits using the bit elements. -+ */ -+typedef union pcgcctl_data -+{ -+ /** raw register data */ -+ uint32_t d32; -+ -+ /** register bits */ -+ struct -+ { -+ /** Stop Pclk */ -+ unsigned stoppclk : 1; -+ /** Gate Hclk */ -+ unsigned gatehclk : 1; -+ /** Power Clamp */ -+ unsigned pwrclmp : 1; -+ /** Reset Power Down Modules */ -+ unsigned rstpdwnmodule : 1; -+ /** PHY Suspended */ -+ unsigned physuspended : 1; -+ -+ unsigned reserved : 27; -+ } b; -+} pcgcctl_data_t; -+ -+ -+#endif ---- /dev/null -+++ b/drivers/usb/dwc_otg/linux/dwc_otg_plat.h -@@ -0,0 +1,260 @@ -+/* ========================================================================== -+ * $File: //dwh/usb_iip/dev/software/otg/linux/platform/dwc_otg_plat.h $ -+ * $Revision: 1.2 $ -+ * $Date: 2008-11-21 05:39:16 $ -+ * $Change: 1064915 $ -+ * -+ * Synopsys HS OTG Linux Software Driver and documentation (hereinafter, -+ * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless -+ * otherwise expressly agreed to in writing between Synopsys and you. -+ * -+ * The Software IS NOT an item of Licensed Software or Licensed Product under -+ * any End User Software License Agreement or Agreement for Licensed Product -+ * with Synopsys or any supplement thereto. You are permitted to use and -+ * redistribute this Software in source and binary forms, with or without -+ * modification, provided that redistributions of source code must retain this -+ * notice. You may not view, use, disclose, copy or distribute this file or -+ * any information contained herein except pursuant to this license grant from -+ * Synopsys. If you do not agree with this notice, including the disclaimer -+ * below, then you are not authorized to use the Software. -+ * -+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT, -+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY -+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH -+ * DAMAGE. -+ * ========================================================================== */ -+ -+#if !defined(__DWC_OTG_PLAT_H__) -+#define __DWC_OTG_PLAT_H__ -+ -+#include -+#include -+#include -+#include -+#include -+ -+/** -+ * @file -+ * -+ * This file contains the Platform Specific constants, interfaces -+ * (functions and macros) for Linux. -+ * -+ */ -+//#if !defined(__LINUX_ARM_ARCH__) -+//#error "The contents of this file is Linux specific!!!" -+//#endif -+ -+/** -+ * Reads the content of a register. -+ * -+ * @param reg address of register to read. -+ * @return contents of the register. -+ * -+ -+ * Usage:
-+ * uint32_t dev_ctl = dwc_read_reg32(&dev_regs->dctl); -+ */ -+static __inline__ uint32_t dwc_read_reg32( volatile uint32_t *reg) -+{ -+ return readl(reg); -+}; -+ -+/** -+ * Writes a register with a 32 bit value. -+ * -+ * @param reg address of register to read. -+ * @param value to write to _reg. -+ * -+ * Usage:
-+ * dwc_write_reg32(&dev_regs->dctl, 0); -+ */ -+static __inline__ void dwc_write_reg32( volatile uint32_t *reg, const uint32_t value) -+{ -+ writel( value, reg ); -+}; -+ -+/** -+ * This function modifies bit values in a register. Using the -+ * algorithm: (reg_contents & ~clear_mask) | set_mask. -+ * -+ * @param reg address of register to read. -+ * @param clear_mask bit mask to be cleared. -+ * @param set_mask bit mask to be set. -+ * -+ * Usage:
-+ * // Clear the SOF Interrupt Mask bit and
-+ * // set the OTG Interrupt mask bit, leaving all others as they were. -+ * dwc_modify_reg32(&dev_regs->gintmsk, DWC_SOF_INT, DWC_OTG_INT);
-+ */ -+static __inline__ -+ void dwc_modify_reg32( volatile uint32_t *reg, const uint32_t clear_mask, const uint32_t set_mask) -+{ -+ writel( (readl(reg) & ~clear_mask) | set_mask, reg ); -+}; -+ -+ -+/** -+ * Wrapper for the OS micro-second delay function. -+ * @param[in] usecs Microseconds of delay -+ */ -+static __inline__ void UDELAY( const uint32_t usecs ) -+{ -+ udelay( usecs ); -+} -+ -+/** -+ * Wrapper for the OS milli-second delay function. -+ * @param[in] msecs milliseconds of delay -+ */ -+static __inline__ void MDELAY( const uint32_t msecs ) -+{ -+ mdelay( msecs ); -+} -+ -+/** -+ * Wrapper for the Linux spin_lock. On the ARM (Integrator) -+ * spin_lock() is a nop. -+ * -+ * @param lock Pointer to the spinlock. -+ */ -+static __inline__ void SPIN_LOCK( spinlock_t *lock ) -+{ -+ spin_lock(lock); -+} -+ -+/** -+ * Wrapper for the Linux spin_unlock. On the ARM (Integrator) -+ * spin_lock() is a nop. -+ * -+ * @param lock Pointer to the spinlock. -+ */ -+static __inline__ void SPIN_UNLOCK( spinlock_t *lock ) -+{ -+ spin_unlock(lock); -+} -+ -+/** -+ * Wrapper (macro) for the Linux spin_lock_irqsave. On the ARM -+ * (Integrator) spin_lock() is a nop. -+ * -+ * @param l Pointer to the spinlock. -+ * @param f unsigned long for irq flags storage. -+ */ -+#define SPIN_LOCK_IRQSAVE( l, f ) spin_lock_irqsave(l,f); -+ -+/** -+ * Wrapper (macro) for the Linux spin_unlock_irqrestore. On the ARM -+ * (Integrator) spin_lock() is a nop. -+ * -+ * @param l Pointer to the spinlock. -+ * @param f unsigned long for irq flags storage. -+ */ -+#define SPIN_UNLOCK_IRQRESTORE( l,f ) spin_unlock_irqrestore(l,f); -+ -+/* -+ * Debugging support vanishes in non-debug builds. -+ */ -+ -+ -+/** -+ * The Debug Level bit-mask variable. -+ */ -+extern uint32_t g_dbg_lvl; -+/** -+ * Set the Debug Level variable. -+ */ -+static inline uint32_t SET_DEBUG_LEVEL( const uint32_t new ) -+{ -+ uint32_t old = g_dbg_lvl; -+ g_dbg_lvl = new; -+ return old; -+} -+ -+/** When debug level has the DBG_CIL bit set, display CIL Debug messages. */ -+#define DBG_CIL (0x2) -+/** When debug level has the DBG_CILV bit set, display CIL Verbose debug -+ * messages */ -+#define DBG_CILV (0x20) -+/** When debug level has the DBG_PCD bit set, display PCD (Device) debug -+ * messages */ -+#define DBG_PCD (0x4) -+/** When debug level has the DBG_PCDV set, display PCD (Device) Verbose debug -+ * messages */ -+#define DBG_PCDV (0x40) -+/** When debug level has the DBG_HCD bit set, display Host debug messages */ -+#define DBG_HCD (0x8) -+/** When debug level has the DBG_HCDV bit set, display Verbose Host debug -+ * messages */ -+#define DBG_HCDV (0x80) -+/** When debug level has the DBG_HCD_URB bit set, display enqueued URBs in host -+ * mode. */ -+#define DBG_HCD_URB (0x800) -+ -+/** When debug level has any bit set, display debug messages */ -+#define DBG_ANY (0xFF) -+ -+/** All debug messages off */ -+#define DBG_OFF 0 -+ -+/** Prefix string for DWC_DEBUG print macros. */ -+#define USB_DWC "dwc_otg: " -+ -+/** -+ * Print a debug message when the Global debug level variable contains -+ * the bit defined in lvl. -+ * -+ * @param[in] lvl - Debug level, use one of the DBG_ constants above. -+ * @param[in] x - like printf -+ * -+ * Example:

-+ * -+ * DWC_DEBUGPL( DBG_ANY, "%s(%p)\n", __func__, _reg_base_addr); -+ * -+ *
-+ * results in:
-+ * -+ * usb-DWC_otg: dwc_otg_cil_init(ca867000) -+ * -+ */ -+#ifdef DEBUG -+ -+# define DWC_DEBUGPL(lvl, x...) do{ if ((lvl)&g_dbg_lvl)printk( KERN_DEBUG USB_DWC x ); }while(0) -+# define DWC_DEBUGP(x...) DWC_DEBUGPL(DBG_ANY, x ) -+ -+# define CHK_DEBUG_LEVEL(level) ((level) & g_dbg_lvl) -+ -+#else -+ -+# define DWC_DEBUGPL(lvl, x...) do{}while(0) -+# define DWC_DEBUGP(x...) -+ -+# define CHK_DEBUG_LEVEL(level) (0) -+ -+#endif /*DEBUG*/ -+ -+/** -+ * Print an Error message. -+ */ -+#define DWC_ERROR(x...) printk( KERN_ERR USB_DWC x ) -+/** -+ * Print a Warning message. -+ */ -+#define DWC_WARN(x...) printk( KERN_WARNING USB_DWC x ) -+/** -+ * Print a notice (normal but significant message). -+ */ -+#define DWC_NOTICE(x...) printk( KERN_NOTICE USB_DWC x ) -+/** -+ * Basic message printing. -+ */ -+#define DWC_PRINT(x...) printk( KERN_INFO USB_DWC x ) -+ -+#endif -+ diff --git a/target/linux/ramips/patches-3.10/0203-MIPS-Fix-accessing-to-per-cpu-data-when-flushing-the.patch b/target/linux/ramips/patches-3.10/0203-MIPS-Fix-accessing-to-per-cpu-data-when-flushing-the.patch new file mode 100644 index 0000000000..fc853fa1bd --- /dev/null +++ b/target/linux/ramips/patches-3.10/0203-MIPS-Fix-accessing-to-per-cpu-data-when-flushing-the.patch @@ -0,0 +1,84 @@ +From 871d1be8c3ce46b8ef395b56cd0e37cede10e76a Mon Sep 17 00:00:00 2001 +From: Ralf Baechle +Date: Tue, 17 Sep 2013 12:44:31 +0200 +Subject: [PATCH 203/215] MIPS: Fix accessing to per-cpu data when flushing + the cache + +This fixes the following issue + +BUG: using smp_processor_id() in preemptible [00000000] code: kjournald/1761 +caller is blast_dcache32+0x30/0x254 +Call Trace: +[<8047f02c>] dump_stack+0x8/0x34 +[<802e7e40>] debug_smp_processor_id+0xe0/0xf0 +[<80114d94>] blast_dcache32+0x30/0x254 +[<80118484>] r4k_dma_cache_wback_inv+0x200/0x288 +[<80110ff0>] mips_dma_map_sg+0x108/0x180 +[<80355098>] ide_dma_prepare+0xf0/0x1b8 +[<8034eaa4>] do_rw_taskfile+0x1e8/0x33c +[<8035951c>] ide_do_rw_disk+0x298/0x3e4 +[<8034a3c4>] do_ide_request+0x2e0/0x704 +[<802bb0dc>] __blk_run_queue+0x44/0x64 +[<802be000>] queue_unplugged.isra.36+0x1c/0x54 +[<802beb94>] blk_flush_plug_list+0x18c/0x24c +[<802bec6c>] blk_finish_plug+0x18/0x48 +[<8026554c>] journal_commit_transaction+0x3b8/0x151c +[<80269648>] kjournald+0xec/0x238 +[<8014ac00>] kthread+0xb8/0xc0 +[<8010268c>] ret_from_kernel_thread+0x14/0x1c + +Caches in most systems are identical - but not always, so we can't avoid +the use of smp_call_function() by just looking at the boot CPU's data, +have to fiddle with preemption instead. + +Signed-off-by: Ralf Baechle +Cc: Markos Chandras +Cc: linux-mips@linux-mips.org +Patchwork: https://patchwork.linux-mips.org/patch/5835 +(cherry picked from commit ff522058bd717506b2fa066fa564657f2b86477e) +--- + arch/mips/mm/c-r4k.c | 5 +++++ + 1 file changed, 5 insertions(+) + +--- a/arch/mips/mm/c-r4k.c ++++ b/arch/mips/mm/c-r4k.c +@@ -12,6 +12,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -601,6 +602,7 @@ static void r4k_dma_cache_wback_inv(unsi + /* Catch bad driver code */ + BUG_ON(size == 0); + ++ preempt_disable(); + if (cpu_has_inclusive_pcaches) { + if (size >= scache_size) + r4k_blast_scache(); +@@ -621,6 +623,7 @@ static void r4k_dma_cache_wback_inv(unsi + R4600_HIT_CACHEOP_WAR_IMPL; + blast_dcache_range(addr, addr + size); + } ++ preempt_enable(); + + bc_wback_inv(addr, size); + __sync(); +@@ -631,6 +634,7 @@ static void r4k_dma_cache_inv(unsigned l + /* Catch bad driver code */ + BUG_ON(size == 0); + ++ preempt_disable(); + if (cpu_has_inclusive_pcaches) { + if (size >= scache_size) + r4k_blast_scache(); +@@ -655,6 +659,7 @@ static void r4k_dma_cache_inv(unsigned l + R4600_HIT_CACHEOP_WAR_IMPL; + blast_inv_dcache_range(addr, addr + size); + } ++ preempt_enable(); + + bc_inv(addr, size); + __sync(); diff --git a/target/linux/ramips/patches-3.10/0203-owrt-MIPS-add-OWRTDTB-secion.patch b/target/linux/ramips/patches-3.10/0203-owrt-MIPS-add-OWRTDTB-secion.patch deleted file mode 100644 index 71d8fe9982..0000000000 --- a/target/linux/ramips/patches-3.10/0203-owrt-MIPS-add-OWRTDTB-secion.patch +++ /dev/null @@ -1,52 +0,0 @@ -From c174d2250e402399ad7dbdd57d51883d8804bba0 Mon Sep 17 00:00:00 2001 -From: John Crispin -Date: Mon, 15 Jul 2013 00:40:37 +0200 -Subject: [PATCH 31/33] owrt: MIPS: add OWRTDTB secion - -Signed-off-by: John Crispin ---- - arch/mips/kernel/head.S | 3 +++ - arch/mips/ralink/Makefile | 2 +- - arch/mips/ralink/of.c | 4 +++- - 3 files changed, 7 insertions(+), 2 deletions(-) - ---- a/arch/mips/kernel/head.S -+++ b/arch/mips/kernel/head.S -@@ -146,6 +146,9 @@ EXPORT(__image_cmdline) - .fill 0x400 - #endif /* CONFIG_IMAGE_CMDLINE_HACK */ - -+ .ascii "OWRTDTB:" -+ EXPORT(__image_dtb) -+ .fill 0x4000 - __REF - - NESTED(kernel_entry, 16, sp) # kernel entry point ---- a/arch/mips/ralink/Makefile -+++ b/arch/mips/ralink/Makefile -@@ -21,4 +21,4 @@ obj-$(CONFIG_EARLY_PRINTK) += early_prin - - obj-$(CONFIG_DEBUG_FS) += bootrom.o - --obj-y += dts/ -+#obj-y += dts/ ---- a/arch/mips/ralink/of.c -+++ b/arch/mips/ralink/of.c -@@ -77,6 +77,8 @@ void __init device_tree_init(void) - //free_bootmem(base, size); - } - -+extern struct boot_param_header __image_dtb; -+ - void __init plat_mem_setup(void) - { - set_io_port_base(KSEG1); -@@ -85,7 +87,7 @@ void __init plat_mem_setup(void) - * Load the builtin devicetree. This causes the chosen node to be - * parsed resulting in our memory appearing - */ -- __dt_setup_arch(&__dtb_start); -+ __dt_setup_arch(&__image_dtb); - - if (soc_info.mem_size) - add_memory_region(soc_info.mem_base, soc_info.mem_size * SZ_1M, diff --git a/target/linux/ramips/patches-3.10/0203-serial-rt5350-fix-enable-uartf-kernel-panic.patch b/target/linux/ramips/patches-3.10/0203-serial-rt5350-fix-enable-uartf-kernel-panic.patch deleted file mode 100644 index a687caabee..0000000000 --- a/target/linux/ramips/patches-3.10/0203-serial-rt5350-fix-enable-uartf-kernel-panic.patch +++ /dev/null @@ -1,11 +0,0 @@ ---- a/drivers/tty/serial/8250/8250_core.c -+++ b/drivers/tty/serial/8250/8250_core.c -@@ -2499,7 +2499,7 @@ serial8250_pm(struct uart_port *port, un - static unsigned int serial8250_port_size(struct uart_8250_port *pt) - { - if (pt->port.iotype == UPIO_AU) -- return 0x1000; -+ return 0x100; - if (is_omap1_8250(pt)) - return 0x16 << pt->port.regshift; - diff --git a/target/linux/ramips/patches-3.10/0204-MIPS-74K-1074K-Correct-erratum-workaround.patch b/target/linux/ramips/patches-3.10/0204-MIPS-74K-1074K-Correct-erratum-workaround.patch new file mode 100644 index 0000000000..0a80ea5f79 --- /dev/null +++ b/target/linux/ramips/patches-3.10/0204-MIPS-74K-1074K-Correct-erratum-workaround.patch @@ -0,0 +1,60 @@ +From 3da3528448850ccde412d52fb939575641ada80d Mon Sep 17 00:00:00 2001 +From: "Maciej W. Rozycki" +Date: Wed, 18 Sep 2013 19:08:15 +0100 +Subject: [PATCH 204/215] MIPS: 74K/1074K: Correct erratum workaround. + +Make sure 74K revision numbers are not applied to the 1074K. Also catch +invalid usage. + +Signed-off-by: Maciej W. Rozycki +Cc: Steven J. Hill +Cc: Leonid Yegoshin +Cc: linux-mips@linux-mips.org +Patchwork: https://patchwork.linux-mips.org/patch/5857/ +Signed-off-by: Ralf Baechle +(cherry picked from commit 9213ad77070ea75fc3a5e43e3d9e9c4146e4930a) +--- + arch/mips/mm/c-r4k.c | 26 ++++++++++++++++++-------- + 1 file changed, 18 insertions(+), 8 deletions(-) + +--- a/arch/mips/mm/c-r4k.c ++++ b/arch/mips/mm/c-r4k.c +@@ -785,20 +785,30 @@ static inline void rm7k_erratum31(void) + + static inline void alias_74k_erratum(struct cpuinfo_mips *c) + { ++ unsigned int imp = c->processor_id & 0xff00; ++ unsigned int rev = c->processor_id & PRID_REV_MASK; ++ + /* + * Early versions of the 74K do not update the cache tags on a + * vtag miss/ptag hit which can occur in the case of KSEG0/KUSEG + * aliases. In this case it is better to treat the cache as always + * having aliases. + */ +- if ((c->processor_id & 0xff) <= PRID_REV_ENCODE_332(2, 4, 0)) +- c->dcache.flags |= MIPS_CACHE_VTAG; +- if ((c->processor_id & 0xff) == PRID_REV_ENCODE_332(2, 4, 0)) +- write_c0_config6(read_c0_config6() | MIPS_CONF6_SYND); +- if (((c->processor_id & 0xff00) == PRID_IMP_1074K) && +- ((c->processor_id & 0xff) <= PRID_REV_ENCODE_332(1, 1, 0))) { +- c->dcache.flags |= MIPS_CACHE_VTAG; +- write_c0_config6(read_c0_config6() | MIPS_CONF6_SYND); ++ switch (imp) { ++ case PRID_IMP_74K: ++ if (rev <= PRID_REV_ENCODE_332(2, 4, 0)) ++ c->dcache.flags |= MIPS_CACHE_VTAG; ++ if (rev == PRID_REV_ENCODE_332(2, 4, 0)) ++ write_c0_config6(read_c0_config6() | MIPS_CONF6_SYND); ++ break; ++ case PRID_IMP_1074K: ++ if (rev <= PRID_REV_ENCODE_332(1, 1, 0)) { ++ c->dcache.flags |= MIPS_CACHE_VTAG; ++ write_c0_config6(read_c0_config6() | MIPS_CONF6_SYND); ++ } ++ break; ++ default: ++ BUG(); + } + } + diff --git a/target/linux/ramips/patches-3.10/0204-owrt-mtd-split-remove-padding.patch b/target/linux/ramips/patches-3.10/0204-owrt-mtd-split-remove-padding.patch deleted file mode 100644 index 9c5a728995..0000000000 --- a/target/linux/ramips/patches-3.10/0204-owrt-mtd-split-remove-padding.patch +++ /dev/null @@ -1,13 +0,0 @@ ---- a/drivers/mtd/mtdpart.c -+++ b/drivers/mtd/mtdpart.c -@@ -805,10 +805,6 @@ static void split_uimage(struct mtd_info - return; - - len = be32_to_cpu(hdr.size) + 0x40; -- len = mtd_pad_erasesize(master, part->offset, len); -- if (len + master->erasesize > part->mtd.size) -- return; -- - __mtd_add_partition(master, "rootfs", part->offset + len, - part->mtd.size - len, false); - } diff --git a/target/linux/ramips/patches-3.10/0205-MIPS-GIC-Send-IPIs-using-the-GIC.patch b/target/linux/ramips/patches-3.10/0205-MIPS-GIC-Send-IPIs-using-the-GIC.patch new file mode 100644 index 0000000000..01ac8757e3 --- /dev/null +++ b/target/linux/ramips/patches-3.10/0205-MIPS-GIC-Send-IPIs-using-the-GIC.patch @@ -0,0 +1,101 @@ +From 5a43b20db2fd18f8ea5f3a919d4bc9d9c2038c6c Mon Sep 17 00:00:00 2001 +From: "Steven J. Hill" +Date: Wed, 25 Sep 2013 14:58:19 -0500 +Subject: [PATCH 205/215] MIPS: GIC: Send IPIs using the GIC. + +If a GIC present, then use it to send IPIs between the cores. + +Signed-off-by: Steven J. Hill +--- + arch/mips/kernel/smp-mt.c | 32 ++++++++++++++++++++++++++++++++ + 1 file changed, 32 insertions(+) + +--- a/arch/mips/kernel/smp-mt.c ++++ b/arch/mips/kernel/smp-mt.c +@@ -71,6 +71,7 @@ static unsigned int __init smvp_vpe_init + + /* Record this as available CPU */ + set_cpu_possible(tc, true); ++ set_cpu_present(tc, true); + __cpu_number_map[tc] = ++ncpu; + __cpu_logical_map[ncpu] = tc; + } +@@ -112,12 +113,35 @@ static void __init smvp_tc_init(unsigned + write_tc_c0_tchalt(TCHALT_H); + } + ++static void mp_send_ipi_single(int cpu, unsigned int action) ++{ ++ unsigned long flags; ++ ++ local_irq_save(flags); ++ ++ switch (action) { ++ case SMP_CALL_FUNCTION: ++ gic_send_ipi(plat_ipi_call_int_xlate(cpu)); ++ break; ++ ++ case SMP_RESCHEDULE_YOURSELF: ++ gic_send_ipi(plat_ipi_resched_int_xlate(cpu)); ++ break; ++ } ++ ++ local_irq_restore(flags); ++} ++ + static void vsmp_send_ipi_single(int cpu, unsigned int action) + { + int i; + unsigned long flags; + int vpflags; + ++ if (gic_present) { ++ mp_send_ipi_single(cpu, action); ++ return; ++ } + local_irq_save(flags); + + vpflags = dvpe(); /* can't access the other CPU's registers whilst MVPE enabled */ +@@ -164,6 +188,8 @@ static void __cpuinit vsmp_init_secondar + + static void __cpuinit vsmp_smp_finish(void) + { ++ pr_debug("SMPMT: CPU%d: vsmp_smp_finish\n", smp_processor_id()); ++ + /* CDFIXME: remove this? */ + write_c0_compare(read_c0_count() + (8* mips_hpt_frequency/HZ)); + +@@ -178,6 +204,7 @@ static void __cpuinit vsmp_smp_finish(vo + + static void vsmp_cpus_done(void) + { ++ pr_debug("SMPMT: CPU%d: vsmp_cpus_done\n", smp_processor_id()); + } + + /* +@@ -191,6 +218,8 @@ static void vsmp_cpus_done(void) + static void __cpuinit vsmp_boot_secondary(int cpu, struct task_struct *idle) + { + struct thread_info *gp = task_thread_info(idle); ++ pr_debug("SMPMT: CPU%d: vsmp_boot_secondary cpu %d\n", ++ smp_processor_id(), cpu); + dvpe(); + set_c0_mvpcontrol(MVPCONTROL_VPC); + +@@ -232,6 +261,7 @@ static void __init vsmp_smp_setup(void) + unsigned int mvpconf0, ntc, tc, ncpu = 0; + unsigned int nvpe; + ++ pr_debug("SMPMT: CPU%d: vsmp_smp_setup\n", smp_processor_id()); + #ifdef CONFIG_MIPS_MT_FPAFF + /* If we have an FPU, enroll ourselves in the FPU-full mask */ + if (cpu_has_fpu) +@@ -272,6 +302,8 @@ static void __init vsmp_smp_setup(void) + + static void __init vsmp_prepare_cpus(unsigned int max_cpus) + { ++ pr_debug("SMPMT: CPU%d: vsmp_prepare_cpus %d\n", ++ smp_processor_id(), max_cpus); + mips_mt_set_cpuoptions(); + } + diff --git a/target/linux/ramips/patches-3.10/0205-uvc-add-iPassion-iP2970-support.patch b/target/linux/ramips/patches-3.10/0205-uvc-add-iPassion-iP2970-support.patch deleted file mode 100644 index 5e3484e9e1..0000000000 --- a/target/linux/ramips/patches-3.10/0205-uvc-add-iPassion-iP2970-support.patch +++ /dev/null @@ -1,247 +0,0 @@ -From be8d5b55f93b8ccb3a6b5cfb1e858a59aeca2d6c Mon Sep 17 00:00:00 2001 -From: John Crispin -Date: Thu, 19 Sep 2013 01:50:59 +0200 -Subject: [PATCH] uvc: add iPassion iP2970 support - -Signed-off-by: John Crispin ---- - drivers/media/usb/uvc/uvc_driver.c | 12 +++++++++ - drivers/media/usb/uvc/uvc_status.c | 2 ++ - drivers/media/usb/uvc/uvc_v4l2.c | 1 + - drivers/media/usb/uvc/uvc_video.c | 50 +++++++++++++++++++++++++++++++----- - drivers/media/usb/uvc/uvcvideo.h | 3 +++ - 5 files changed, 61 insertions(+), 7 deletions(-) - ---- a/drivers/media/usb/uvc/uvc_driver.c -+++ b/drivers/media/usb/uvc/uvc_driver.c -@@ -2420,6 +2420,20 @@ static struct usb_device_id uvc_ids[] = - .bInterfaceProtocol = 0, - .driver_info = UVC_QUIRK_PROBE_MINMAX - | UVC_QUIRK_IGNORE_SELECTOR_UNIT }, -+ -+/* iPassion iP2970 */ -+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE -+ | USB_DEVICE_ID_MATCH_INT_INFO, -+ .idVendor = 0x1B3B, -+ .idProduct = 0x2970, -+ .bInterfaceClass = USB_CLASS_VIDEO, -+ .bInterfaceSubClass = 1, -+ .bInterfaceProtocol = 0, -+ .driver_info = UVC_QUIRK_PROBE_MINMAX -+ | UVC_QUIRK_STREAM_NO_FID -+ | UVC_QUIRK_MOTION -+ | UVC_QUIRK_SINGLE_ISO }, -+ - /* Generic USB Video Class */ - { USB_INTERFACE_INFO(USB_CLASS_VIDEO, 1, 0) }, - {} ---- a/drivers/media/usb/uvc/uvc_status.c -+++ b/drivers/media/usb/uvc/uvc_status.c -@@ -139,6 +139,7 @@ static void uvc_status_complete(struct u - switch (dev->status[0] & 0x0f) { - case UVC_STATUS_TYPE_CONTROL: - uvc_event_control(dev, dev->status, len); -+ dev->motion = 1; - break; - - case UVC_STATUS_TYPE_STREAMING: -@@ -182,6 +183,7 @@ int uvc_status_init(struct uvc_device *d - } - - pipe = usb_rcvintpipe(dev->udev, ep->desc.bEndpointAddress); -+ dev->motion = 0; - - /* For high-speed interrupt endpoints, the bInterval value is used as - * an exponent of two. Some developers forgot about it. ---- a/drivers/media/usb/uvc/uvc_video.c -+++ b/drivers/media/usb/uvc/uvc_video.c -@@ -21,6 +21,11 @@ - #include - #include - #include -+#include -+#include -+#include -+#include -+#include - - #include - -@@ -1074,9 +1079,149 @@ static void uvc_video_decode_data(struct - } - } - -+struct bh_priv { -+ unsigned long seen; -+}; -+ -+struct bh_event { -+ const char *name; -+ struct sk_buff *skb; -+ struct work_struct work; -+}; -+ -+#define BH_ERR(fmt, args...) printk(KERN_ERR "%s: " fmt, "webcam", ##args ) -+#define BH_DBG(fmt, args...) do {} while (0) -+#define BH_SKB_SIZE 2048 -+ -+extern u64 uevent_next_seqnum(void); -+static int seen = 0; -+ -+static int bh_event_add_var(struct bh_event *event, int argv, -+ const char *format, ...) -+{ -+ static char buf[128]; -+ char *s; -+ va_list args; -+ int len; -+ -+ if (argv) -+ return 0; -+ -+ va_start(args, format); -+ len = vsnprintf(buf, sizeof(buf), format, args); -+ va_end(args); -+ -+ if (len >= sizeof(buf)) { -+ BH_ERR("buffer size too small\n"); -+ WARN_ON(1); -+ return -ENOMEM; -+ } -+ -+ s = skb_put(event->skb, len + 1); -+ strcpy(s, buf); -+ -+ BH_DBG("added variable '%s'\n", s); -+ -+ return 0; -+} -+ -+static int motion_hotplug_fill_event(struct bh_event *event) -+{ -+ int s = jiffies; -+ int ret; -+ -+ if (!seen) -+ seen = jiffies; -+ -+ ret = bh_event_add_var(event, 0, "HOME=%s", "/"); -+ if (ret) -+ return ret; -+ -+ ret = bh_event_add_var(event, 0, "PATH=%s", -+ "/sbin:/bin:/usr/sbin:/usr/bin"); -+ if (ret) -+ return ret; -+ -+ ret = bh_event_add_var(event, 0, "SUBSYSTEM=usb"); -+ if (ret) -+ return ret; -+ -+ ret = bh_event_add_var(event, 0, "ACTION=motion"); -+ if (ret) -+ return ret; -+ -+ ret = bh_event_add_var(event, 0, "SEEN=%d", s - seen); -+ if (ret) -+ return ret; -+ seen = s; -+ -+ ret = bh_event_add_var(event, 0, "SEQNUM=%llu", uevent_next_seqnum()); -+ -+ return ret; -+} -+ -+static void motion_hotplug_work(struct work_struct *work) -+{ -+ struct bh_event *event = container_of(work, struct bh_event, work); -+ int ret = 0; -+ -+ event->skb = alloc_skb(BH_SKB_SIZE, GFP_KERNEL); -+ if (!event->skb) -+ goto out_free_event; -+ -+ ret = bh_event_add_var(event, 0, "%s@", "add"); -+ if (ret) -+ goto out_free_skb; -+ -+ ret = motion_hotplug_fill_event(event); -+ if (ret) -+ goto out_free_skb; -+ -+ NETLINK_CB(event->skb).dst_group = 1; -+ broadcast_uevent(event->skb, 0, 1, GFP_KERNEL); -+ -+out_free_skb: -+ if (ret) { -+ BH_ERR("work error %d\n", ret); -+ kfree_skb(event->skb); -+ } -+out_free_event: -+ kfree(event); -+} -+ -+static int motion_hotplug_create_event(void) -+{ -+ struct bh_event *event; -+ -+ event = kzalloc(sizeof(*event), GFP_KERNEL); -+ if (!event) -+ return -ENOMEM; -+ -+ event->name = "motion"; -+ -+ INIT_WORK(&event->work, (void *)(void *)motion_hotplug_work); -+ schedule_work(&event->work); -+ -+ return 0; -+} -+ -+#define MOTION_FLAG_OFFSET 4 - static void uvc_video_decode_end(struct uvc_streaming *stream, - struct uvc_buffer *buf, const __u8 *data, int len) - { -+ if ((stream->dev->quirks & UVC_QUIRK_MOTION) && -+ (data[len - 2] == 0xff) && (data[len - 1] == 0xd9)) { -+ u8 *mem; -+ buf->state = UVC_BUF_STATE_READY; -+ mem = (u8 *) (buf->mem + MOTION_FLAG_OFFSET); -+ if ( stream->dev->motion ) { -+ stream->dev->motion = 0; -+ motion_hotplug_create_event(); -+ } else { -+ *mem &= 0x7f; -+ } -+ } -+ - /* Mark the buffer as done if the EOF marker is set. */ - if (data[1] & UVC_STREAM_EOF && buf->bytesused != 0) { - uvc_trace(UVC_TRACE_FRAME, "Frame complete (EOF found).\n"); -@@ -1477,6 +1622,8 @@ static int uvc_init_video_isoc(struct uv - if (npackets == 0) - return -ENOMEM; - -+ if (stream->dev->quirks & UVC_QUIRK_SINGLE_ISO) -+ npackets = 1; - size = npackets * psize; - - for (i = 0; i < UVC_URBS; ++i) { ---- a/drivers/media/usb/uvc/uvcvideo.h -+++ b/drivers/media/usb/uvc/uvcvideo.h -@@ -137,6 +137,8 @@ - #define UVC_QUIRK_FIX_BANDWIDTH 0x00000080 - #define UVC_QUIRK_PROBE_DEF 0x00000100 - #define UVC_QUIRK_RESTRICT_FRAME_RATE 0x00000200 -+#define UVC_QUIRK_MOTION 0x00000400 -+#define UVC_QUIRK_SINGLE_ISO 0x00000800 - - /* Format flags */ - #define UVC_FMT_FLAG_COMPRESSED 0x00000001 -@@ -538,6 +540,7 @@ struct uvc_device { - __u8 *status; - struct input_dev *input; - char input_phys[64]; -+ int motion; - }; - - enum uvc_handle_state { diff --git a/target/linux/ramips/patches-3.10/0206-MIPS-ralink-add-MT7621-support.patch b/target/linux/ramips/patches-3.10/0206-MIPS-ralink-add-MT7621-support.patch new file mode 100644 index 0000000000..74fdf1d787 --- /dev/null +++ b/target/linux/ramips/patches-3.10/0206-MIPS-ralink-add-MT7621-support.patch @@ -0,0 +1,723 @@ +From 259ce690b20562aa5dfef711e72ed02a4f514ce4 Mon Sep 17 00:00:00 2001 +From: John Crispin +Date: Sun, 16 Mar 2014 05:19:37 +0000 +Subject: [PATCH 206/215] MIPS: ralink: add MT7621 support + +Signed-off-by: John Crispin +--- + arch/mips/include/asm/gic.h | 2 + + arch/mips/include/asm/mach-ralink/mt7621.h | 39 ++++ + arch/mips/kernel/vmlinux.lds.S | 1 + + arch/mips/ralink/Kconfig | 18 ++ + arch/mips/ralink/Makefile | 7 +- + arch/mips/ralink/Platform | 7 + + arch/mips/ralink/irq-gic.c | 271 ++++++++++++++++++++++++++++ + arch/mips/ralink/malta-amon.c | 81 +++++++++ + arch/mips/ralink/mt7621.c | 183 +++++++++++++++++++ + 9 files changed, 608 insertions(+), 1 deletion(-) + create mode 100644 arch/mips/include/asm/mach-ralink/mt7621.h + create mode 100644 arch/mips/ralink/irq-gic.c + create mode 100644 arch/mips/ralink/malta-amon.c + create mode 100644 arch/mips/ralink/mt7621.c + +--- a/arch/mips/include/asm/gic.h ++++ b/arch/mips/include/asm/gic.h +@@ -19,7 +19,11 @@ + #define GIC_TRIG_EDGE 1 + #define GIC_TRIG_LEVEL 0 + ++#define GIC_NUM_INTRS 64 ++ ++#ifndef GIC_NUM_INTRS + #define GIC_NUM_INTRS (24 + NR_CPUS * 2) ++#endif + + #define MSK(n) ((1 << (n)) - 1) + #define REG32(addr) (*(volatile unsigned int *) (addr)) +--- /dev/null ++++ b/arch/mips/include/asm/mach-ralink/mt7621.h +@@ -0,0 +1,39 @@ ++/* ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 as published ++ * by the Free Software Foundation. ++ * ++ * Parts of this file are based on Ralink's 2.6.21 BSP ++ * ++ * Copyright (C) 2008-2011 Gabor Juhos ++ * Copyright (C) 2008 Imre Kaloz ++ * Copyright (C) 2013 John Crispin ++ */ ++ ++#ifndef _MT7621_REGS_H_ ++#define _MT7621_REGS_H_ ++ ++#define MT7621_SYSC_BASE 0x1E000000 ++ ++#define SYSC_REG_CHIP_NAME0 0x00 ++#define SYSC_REG_CHIP_NAME1 0x04 ++#define SYSC_REG_CHIP_REV 0x0c ++#define SYSC_REG_SYSTEM_CONFIG0 0x10 ++#define SYSC_REG_SYSTEM_CONFIG1 0x14 ++ ++#define CHIP_REV_PKG_MASK 0x1 ++#define CHIP_REV_PKG_SHIFT 16 ++#define CHIP_REV_VER_MASK 0xf ++#define CHIP_REV_VER_SHIFT 8 ++#define CHIP_REV_ECO_MASK 0xf ++ ++#define MT7621_DRAM_BASE 0x0 ++#define MT7621_DDR2_SIZE_MIN 32 ++#define MT7621_DDR2_SIZE_MAX 256 ++ ++#define MT7621_CHIP_NAME0 0x3637544D ++#define MT7621_CHIP_NAME1 0x20203132 ++ ++#define MIPS_GIC_IRQ_BASE (MIPS_CPU_IRQ_BASE + 8) ++ ++#endif +--- a/arch/mips/kernel/vmlinux.lds.S ++++ b/arch/mips/kernel/vmlinux.lds.S +@@ -51,6 +51,7 @@ SECTIONS + /* read-only */ + _text = .; /* Text and read-only data */ + .text : { ++ /*. = . + 0x8000; */ + TEXT_TEXT + SCHED_TEXT + LOCK_TEXT +--- a/arch/mips/ralink/Kconfig ++++ b/arch/mips/ralink/Kconfig +@@ -7,6 +7,11 @@ config CLKEVT_RT3352 + select CLKSRC_OF + select CLKSRC_MMIO + ++config IRQ_INTC ++ bool ++ default y ++ depends on !SOC_MT7621 ++ + choice + prompt "Ralink SoC selection" + default SOC_RT305X +@@ -35,6 +40,15 @@ choice + select USB_ARCH_HAS_EHCI + select HW_HAS_PCI + ++ config SOC_MT7621 ++ bool "MT7621" ++ select MIPS_CPU_SCACHE ++ select SYS_SUPPORTS_MULTITHREADING ++ select SYS_SUPPORTS_SMP ++ select SYS_SUPPORTS_MIPS_CMP ++ select IRQ_GIC ++ select HW_HAS_PCI ++ + endchoice + + choice +@@ -62,6 +76,10 @@ choice + bool "MT7620A eval kit" + depends on SOC_MT7620 + ++ config DTB_MT7621_EVAL ++ bool "MT7621 eval kit" ++ depends on SOC_MT7621 ++ + endchoice + + endif +--- a/arch/mips/ralink/Makefile ++++ b/arch/mips/ralink/Makefile +@@ -6,16 +6,21 @@ + # Copyright (C) 2009-2011 Gabor Juhos + # Copyright (C) 2013 John Crispin + +-obj-y := prom.o of.o reset.o clk.o irq.o timer.o ++obj-y := prom.o of.o reset.o clk.o timer.o + + obj-$(CONFIG_CLKEVT_RT3352) += cevt-rt3352.o + + obj-$(CONFIG_RALINK_ILL_ACC) += ill_acc.o + ++obj-$(CONFIG_IRQ_INTC) += irq.o ++obj-$(CONFIG_IRQ_GIC) += irq-gic.o ++obj-$(CONFIG_MIPS_MT_SMP) += malta-amon.o ++ + obj-$(CONFIG_SOC_RT288X) += rt288x.o + obj-$(CONFIG_SOC_RT305X) += rt305x.o + obj-$(CONFIG_SOC_RT3883) += rt3883.o + obj-$(CONFIG_SOC_MT7620) += mt7620.o ++obj-$(CONFIG_SOC_MT7621) += mt7621.o + + obj-$(CONFIG_EARLY_PRINTK) += early_printk.o + +--- a/arch/mips/ralink/Platform ++++ b/arch/mips/ralink/Platform +@@ -26,3 +26,10 @@ cflags-$(CONFIG_SOC_RT3883) += -I$(srctr + # Ralink MT7620 + # + load-$(CONFIG_SOC_MT7620) += 0xffffffff80000000 ++cflags-$(CONFIG_SOC_MT7620) += -I$(srctree)/arch/mips/include/asm/mach-ralink/mt7620 ++ ++# ++# Ralink MT7621 ++# ++load-$(CONFIG_SOC_MT7621) += 0xffffffff80001000 ++cflags-$(CONFIG_SOC_MT7620) += -I$(srctree)/arch/mips/include/asm/mach-ralink/mt7621 +--- /dev/null ++++ b/arch/mips/ralink/irq-gic.c +@@ -0,0 +1,271 @@ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++ ++#include ++#include ++ ++#include ++#include ++ ++#include ++ ++unsigned long _gcmp_base; ++static int gic_resched_int_base = 56; ++static int gic_call_int_base = 60; ++static struct irq_chip *irq_gic; ++static struct gic_intr_map gic_intr_map[GIC_NUM_INTRS]; ++ ++#if defined(CONFIG_MIPS_MT_SMP) ++static int gic_resched_int_base; ++static int gic_call_int_base; ++ ++#define GIC_RESCHED_INT(cpu) (gic_resched_int_base+(cpu)) ++#define GIC_CALL_INT(cpu) (gic_call_int_base+(cpu)) ++ ++static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id) ++{ ++ scheduler_ipi(); ++ ++ return IRQ_HANDLED; ++} ++ ++static irqreturn_t ++ipi_call_interrupt(int irq, void *dev_id) ++{ ++ smp_call_function_interrupt(); ++ ++ return IRQ_HANDLED; ++} ++ ++static struct irqaction irq_resched = { ++ .handler = ipi_resched_interrupt, ++ .flags = IRQF_DISABLED|IRQF_PERCPU, ++ .name = "ipi resched" ++}; ++ ++static struct irqaction irq_call = { ++ .handler = ipi_call_interrupt, ++ .flags = IRQF_DISABLED|IRQF_PERCPU, ++ .name = "ipi call" ++}; ++ ++#endif ++ ++static void __init ++gic_fill_map(void) ++{ ++ int i; ++ ++ for (i = 0; i < ARRAY_SIZE(gic_intr_map); i++) { ++ gic_intr_map[i].cpunum = 0; ++ gic_intr_map[i].pin = GIC_CPU_INT0; ++ gic_intr_map[i].polarity = GIC_POL_POS; ++ gic_intr_map[i].trigtype = GIC_TRIG_LEVEL; ++ gic_intr_map[i].flags = GIC_FLAG_IPI; ++ } ++ ++#if defined(CONFIG_MIPS_MT_SMP) ++ { ++ int cpu; ++ ++ gic_call_int_base = ARRAY_SIZE(gic_intr_map) - nr_cpu_ids; ++ gic_resched_int_base = gic_call_int_base - nr_cpu_ids; ++ ++ i = gic_resched_int_base; ++ ++ for (cpu = 0; cpu < nr_cpu_ids; cpu++) { ++ gic_intr_map[i + cpu].cpunum = cpu; ++ gic_intr_map[i + cpu].pin = GIC_CPU_INT1; ++ gic_intr_map[i + cpu].trigtype = GIC_TRIG_EDGE; ++ ++ gic_intr_map[i + cpu + nr_cpu_ids].cpunum = cpu; ++ gic_intr_map[i + cpu + nr_cpu_ids].pin = GIC_CPU_INT2; ++ gic_intr_map[i + cpu + nr_cpu_ids].trigtype = GIC_TRIG_EDGE; ++ } ++ } ++#endif ++} ++ ++void ++gic_irq_ack(struct irq_data *d) ++{ ++ int irq = (d->irq - gic_irq_base); ++ ++ GIC_CLR_INTR_MASK(irq); ++ ++ if (gic_irq_flags[irq] & GIC_TRIG_EDGE) ++ GICWRITE(GIC_REG(SHARED, GIC_SH_WEDGE), irq); ++} ++ ++void ++gic_finish_irq(struct irq_data *d) ++{ ++ GIC_SET_INTR_MASK(d->irq - gic_irq_base); ++} ++ ++void __init ++gic_platform_init(int irqs, struct irq_chip *irq_controller) ++{ ++ irq_gic = irq_controller; ++} ++ ++static void ++gic_irqdispatch(void) ++{ ++ unsigned int irq = gic_get_int(); ++ ++ if (likely(irq < GIC_NUM_INTRS)) ++ do_IRQ(MIPS_GIC_IRQ_BASE + irq); ++ else { ++ pr_err("Spurious GIC Interrupt!\n"); ++ spurious_interrupt(); ++ } ++ ++} ++ ++static void ++vi_timer_irqdispatch(void) ++{ ++ do_IRQ(cp0_compare_irq); ++} ++ ++#if defined(CONFIG_MIPS_MT_SMP) ++unsigned int ++plat_ipi_call_int_xlate(unsigned int cpu) ++{ ++ return GIC_CALL_INT(cpu); ++} ++ ++unsigned int ++plat_ipi_resched_int_xlate(unsigned int cpu) ++{ ++ return GIC_RESCHED_INT(cpu); ++} ++#endif ++ ++asmlinkage void ++plat_irq_dispatch(void) ++{ ++ unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM; ++ ++ if (unlikely(!pending)) { ++ pr_err("Spurious CP0 Interrupt!\n"); ++ spurious_interrupt(); ++ } else { ++ if (pending & CAUSEF_IP7) ++ do_IRQ(cp0_compare_irq); ++ ++ if (pending & (CAUSEF_IP4 | CAUSEF_IP3 | CAUSEF_IP2)) ++ gic_irqdispatch(); ++ } ++} ++ ++unsigned int __cpuinit ++get_c0_compare_int(void) ++{ ++ return CP0_LEGACY_COMPARE_IRQ; ++} ++ ++static int ++gic_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) ++{ ++ irq_set_chip_and_handler(irq, irq_gic, ++#if defined(CONFIG_MIPS_MT_SMP) ++ (hw >= gic_resched_int_base) ? ++ handle_percpu_irq : ++#endif ++ handle_level_irq); ++ ++ return 0; ++} ++ ++static const struct irq_domain_ops irq_domain_ops = { ++ .xlate = irq_domain_xlate_onecell, ++ .map = gic_map, ++}; ++ ++static int __init ++of_gic_init(struct device_node *node, ++ struct device_node *parent) ++{ ++ struct irq_domain *domain; ++ struct resource gcmp = { 0 }, gic = { 0 }; ++ unsigned int gic_rev; ++ int i; ++ ++ if (of_address_to_resource(node, 0, &gic)) ++ panic("Failed to get gic memory range"); ++ if (request_mem_region(gic.start, resource_size(&gic), ++ gic.name) < 0) ++ panic("Failed to request gic memory"); ++ if (of_address_to_resource(node, 2, &gcmp)) ++ panic("Failed to get gic memory range"); ++ if (request_mem_region(gcmp.start, resource_size(&gcmp), ++ gcmp.name) < 0) ++ panic("Failed to request gcmp memory"); ++ ++ _gcmp_base = (unsigned long) ioremap_nocache(gcmp.start, resource_size(&gcmp)); ++ if (!_gcmp_base) ++ panic("Failed to remap gcmp memory\n"); ++ ++ if ((GCMPGCB(GCMPB) & GCMP_GCB_GCMPB_GCMPBASE_MSK) != gcmp.start) ++ panic("Failed to find gcmp core\n"); ++ ++ /* tell the gcmp where to find the gic */ ++ GCMPGCB(GICBA) = gic.start | GCMP_GCB_GICBA_EN_MSK; ++ gic_present = 1; ++ if (cpu_has_vint) { ++ set_vi_handler(2, gic_irqdispatch); ++ set_vi_handler(3, gic_irqdispatch); ++ set_vi_handler(4, gic_irqdispatch); ++ set_vi_handler(7, vi_timer_irqdispatch); ++ } ++ ++ gic_fill_map(); ++ ++ gic_init(gic.start, resource_size(&gic), gic_intr_map, ++ ARRAY_SIZE(gic_intr_map), MIPS_GIC_IRQ_BASE); ++ ++ GICREAD(GIC_REG(SHARED, GIC_SH_REVISIONID), gic_rev); ++ pr_info("gic: revision %d.%d\n", (gic_rev >> 8) & 0xff, gic_rev & 0xff); ++ ++ domain = irq_domain_add_legacy(node, GIC_NUM_INTRS, MIPS_GIC_IRQ_BASE, ++ 0, &irq_domain_ops, NULL); ++ if (!domain) ++ panic("Failed to add irqdomain"); ++ ++#if defined(CONFIG_MIPS_MT_SMP) ++ for (i = 0; i < nr_cpu_ids; i++) { ++ setup_irq(MIPS_GIC_IRQ_BASE + GIC_RESCHED_INT(i), &irq_resched); ++ setup_irq(MIPS_GIC_IRQ_BASE + GIC_CALL_INT(i), &irq_call); ++ } ++#endif ++ ++ change_c0_status(ST0_IM, STATUSF_IP7 | STATUSF_IP4 | STATUSF_IP3 | ++ STATUSF_IP2); ++ return 0; ++} ++ ++static struct of_device_id __initdata of_irq_ids[] = { ++ { .compatible = "mti,cpu-interrupt-controller", .data = mips_cpu_intc_init }, ++ { .compatible = "ralink,mt7621-gic", .data = of_gic_init }, ++ {}, ++}; ++ ++void __init ++arch_init_irq(void) ++{ ++ of_irq_init(of_irq_ids); ++} +--- /dev/null ++++ b/arch/mips/ralink/malta-amon.c +@@ -0,0 +1,81 @@ ++/* ++ * Copyright (C) 2007 MIPS Technologies, Inc. ++ * All rights reserved. ++ ++ * This program is free software; you can distribute it and/or modify it ++ * under the terms of the GNU General Public License (Version 2) as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * for more details. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, write to the Free Software Foundation, Inc., ++ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. ++ * ++ * Arbitrary Monitor interface ++ */ ++ ++#include ++#include ++#include ++ ++#include ++#include ++#include ++ ++int amon_cpu_avail(int cpu) ++{ ++ struct cpulaunch *launch = (struct cpulaunch *)CKSEG0ADDR(CPULAUNCH); ++ ++ if (cpu < 0 || cpu >= NCPULAUNCH) { ++ pr_debug("avail: cpu%d is out of range\n", cpu); ++ return 0; ++ } ++ ++ launch += cpu; ++ if (!(launch->flags & LAUNCH_FREADY)) { ++ pr_debug("avail: cpu%d is not ready\n", cpu); ++ return 0; ++ } ++ if (launch->flags & (LAUNCH_FGO|LAUNCH_FGONE)) { ++ pr_debug("avail: too late.. cpu%d is already gone\n", cpu); ++ return 0; ++ } ++ ++ return 1; ++} ++ ++void amon_cpu_start(int cpu, ++ unsigned long pc, unsigned long sp, ++ unsigned long gp, unsigned long a0) ++{ ++ volatile struct cpulaunch *launch = ++ (struct cpulaunch *)CKSEG0ADDR(CPULAUNCH); ++ ++ if (!amon_cpu_avail(cpu)) ++ return; ++ if (cpu == smp_processor_id()) { ++ pr_debug("launch: I am cpu%d!\n", cpu); ++ return; ++ } ++ launch += cpu; ++ ++ pr_debug("launch: starting cpu%d\n", cpu); ++ ++ launch->pc = pc; ++ launch->gp = gp; ++ launch->sp = sp; ++ launch->a0 = a0; ++ ++ smp_wmb(); /* Target must see parameters before go */ ++ launch->flags |= LAUNCH_FGO; ++ smp_wmb(); /* Target must see go before we poll */ ++ ++ while ((launch->flags & LAUNCH_FGONE) == 0) ++ ; ++ smp_rmb(); /* Target will be updating flags soon */ ++ pr_debug("launch: cpu%d gone!\n", cpu); ++} +--- /dev/null ++++ b/arch/mips/ralink/mt7621.c +@@ -0,0 +1,183 @@ ++/* ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 as published ++ * by the Free Software Foundation. ++ * ++ * Parts of this file are based on Ralink's 2.6.21 BSP ++ * ++ * Copyright (C) 2008-2011 Gabor Juhos ++ * Copyright (C) 2008 Imre Kaloz ++ * Copyright (C) 2013 John Crispin ++ */ ++ ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++ ++#include ++ ++#include "common.h" ++ ++#define SYSC_REG_SYSCFG 0x10 ++#define SYSC_REG_CPLL_CLKCFG0 0x2c ++#define SYSC_REG_CUR_CLK_STS 0x44 ++#define CPU_CLK_SEL (BIT(30) | BIT(31)) ++ ++#define MT7621_GPIO_MODE_UART1 1 ++#define MT7621_GPIO_MODE_I2C 2 ++#define MT7621_GPIO_MODE_UART2 3 ++#define MT7621_GPIO_MODE_UART3 5 ++#define MT7621_GPIO_MODE_JTAG 7 ++#define MT7621_GPIO_MODE_WDT_MASK 0x3 ++#define MT7621_GPIO_MODE_WDT_SHIFT 8 ++#define MT7621_GPIO_MODE_WDT_GPIO 1 ++#define MT7621_GPIO_MODE_PCIE_RST 0 ++#define MT7621_GPIO_MODE_PCIE_REF 2 ++#define MT7621_GPIO_MODE_PCIE_MASK 0x3 ++#define MT7621_GPIO_MODE_PCIE_SHIFT 10 ++#define MT7621_GPIO_MODE_PCIE_GPIO 1 ++#define MT7621_GPIO_MODE_MDIO 12 ++#define MT7621_GPIO_MODE_RGMII1 14 ++#define MT7621_GPIO_MODE_RGMII2 15 ++#define MT7621_GPIO_MODE_SPI_MASK 0x3 ++#define MT7621_GPIO_MODE_SPI_SHIFT 16 ++#define MT7621_GPIO_MODE_SPI_GPIO 1 ++#define MT7621_GPIO_MODE_SDHCI_MASK 0x3 ++#define MT7621_GPIO_MODE_SDHCI_SHIFT 18 ++#define MT7621_GPIO_MODE_SDHCI_GPIO 1 ++ ++static struct rt2880_pmx_func uart1_grp[] = { FUNC("uart1", 0, 1, 2) }; ++static struct rt2880_pmx_func i2c_grp[] = { FUNC("i2c", 0, 3, 2) }; ++static struct rt2880_pmx_func uart3_grp[] = { FUNC("uart3", 0, 5, 4) }; ++static struct rt2880_pmx_func uart2_grp[] = { FUNC("uart2", 0, 9, 4) }; ++static struct rt2880_pmx_func jtag_grp[] = { FUNC("jtag", 0, 13, 5) }; ++static struct rt2880_pmx_func wdt_grp[] = { ++ FUNC("wdt rst", 0, 18, 1), ++ FUNC("wdt refclk", 2, 18, 1), ++}; ++static struct rt2880_pmx_func pcie_rst_grp[] = { ++ FUNC("pcie rst", MT7621_GPIO_MODE_PCIE_RST, 19, 1), ++ FUNC("pcie refclk", MT7621_GPIO_MODE_PCIE_REF, 19, 1) ++}; ++static struct rt2880_pmx_func mdio_grp[] = { FUNC("mdio", 0, 20, 2) }; ++static struct rt2880_pmx_func rgmii2_grp[] = { FUNC("rgmii2", 0, 22, 12) }; ++static struct rt2880_pmx_func spi_grp[] = { ++ FUNC("spi", 0, 34, 7), ++ FUNC("nand", 2, 34, 8), ++}; ++static struct rt2880_pmx_func sdhci_grp[] = { ++ FUNC("sdhci", 0, 41, 8), ++ FUNC("nand", 2, 41, 8), ++}; ++static struct rt2880_pmx_func rgmii1_grp[] = { FUNC("rgmii1", 0, 49, 12) }; ++ ++static struct rt2880_pmx_group mt7621_pinmux_data[] = { ++ GRP("uart1", uart1_grp, 1, MT7621_GPIO_MODE_UART1), ++ GRP("i2c", i2c_grp, 1, MT7621_GPIO_MODE_I2C), ++ GRP("uart3", uart2_grp, 1, MT7621_GPIO_MODE_UART2), ++ GRP("uart2", uart3_grp, 1, MT7621_GPIO_MODE_UART3), ++ GRP("jtag", jtag_grp, 1, MT7621_GPIO_MODE_JTAG), ++ GRP_G("wdt", wdt_grp, MT7621_GPIO_MODE_WDT_MASK, ++ MT7621_GPIO_MODE_WDT_GPIO, MT7621_GPIO_MODE_WDT_SHIFT), ++ GRP_G("pcie", pcie_rst_grp, MT7621_GPIO_MODE_PCIE_MASK, ++ MT7621_GPIO_MODE_PCIE_GPIO, MT7621_GPIO_MODE_PCIE_SHIFT), ++ GRP("mdio", mdio_grp, 1, MT7621_GPIO_MODE_MDIO), ++ GRP("rgmii2", rgmii2_grp, 1, MT7621_GPIO_MODE_RGMII2), ++ GRP_G("spi", spi_grp, MT7621_GPIO_MODE_SPI_MASK, ++ MT7621_GPIO_MODE_SPI_GPIO, MT7621_GPIO_MODE_SPI_SHIFT), ++ GRP_G("sdhci", sdhci_grp, MT7621_GPIO_MODE_SDHCI_MASK, ++ MT7621_GPIO_MODE_SDHCI_GPIO, MT7621_GPIO_MODE_SDHCI_SHIFT), ++ GRP("rgmii1", rgmii1_grp, 1, MT7621_GPIO_MODE_RGMII1), ++ { 0 } ++}; ++ ++void __init ralink_clk_init(void) ++{ ++ int cpu_fdiv = 0; ++ int cpu_ffrac = 0; ++ int fbdiv = 0; ++ u32 clk_sts, syscfg; ++ u8 clk_sel = 0, xtal_mode; ++ u32 cpu_clk; ++ ++ if ((rt_sysc_r32(SYSC_REG_CPLL_CLKCFG0) & CPU_CLK_SEL) != 0) ++ clk_sel = 1; ++ ++ switch (clk_sel) { ++ case 0: ++ clk_sts = rt_sysc_r32(SYSC_REG_CUR_CLK_STS); ++ cpu_fdiv = ((clk_sts >> 8) & 0x1F); ++ cpu_ffrac = (clk_sts & 0x1F); ++ cpu_clk = (500 * cpu_ffrac / cpu_fdiv) * 1000 * 1000; ++ break; ++ ++ case 1: ++ fbdiv = ((rt_sysc_r32(0x648) >> 4) & 0x7F) + 1; ++ syscfg = rt_sysc_r32(SYSC_REG_SYSCFG); ++ xtal_mode = (syscfg >> 6) & 0x7; ++ if(xtal_mode >= 6) { //25Mhz Xtal ++ cpu_clk = 25 * fbdiv * 1000 * 1000; ++ } else if(xtal_mode >=3) { //40Mhz Xtal ++ cpu_clk = 40 * fbdiv * 1000 * 1000; ++ } else { // 20Mhz Xtal ++ cpu_clk = 20 * fbdiv * 1000 * 1000; ++ } ++ break; ++ } ++ cpu_clk = 880000000; ++ ralink_clk_add("cpu", cpu_clk); ++ ralink_clk_add("1e000b00.spi", 50000000); ++ ralink_clk_add("1e000c00.uartlite", 50000000); ++ ralink_clk_add("1e000d00.uart", 50000000); ++} ++ ++void __init ralink_of_remap(void) ++{ ++ rt_sysc_membase = plat_of_remap_node("mtk,mt7621-sysc"); ++ rt_memc_membase = plat_of_remap_node("mtk,mt7621-memc"); ++ ++ if (!rt_sysc_membase || !rt_memc_membase) ++ panic("Failed to remap core resources"); ++} ++ ++void prom_soc_init(struct ralink_soc_info *soc_info) ++{ ++ void __iomem *sysc = (void __iomem *) KSEG1ADDR(MT7621_SYSC_BASE); ++ unsigned char *name = NULL; ++ u32 n0; ++ u32 n1; ++ u32 rev; ++ ++ n0 = __raw_readl(sysc + SYSC_REG_CHIP_NAME0); ++ n1 = __raw_readl(sysc + SYSC_REG_CHIP_NAME1); ++ ++ if (n0 == MT7621_CHIP_NAME0 && n1 == MT7621_CHIP_NAME1) { ++ name = "MT7621"; ++ soc_info->compatible = "mtk,mt7621-soc"; ++ } else { ++ panic("mt7621: unknown SoC, n0:%08x n1:%08x\n", n0, n1); ++ } ++ ++ rev = __raw_readl(sysc + SYSC_REG_CHIP_REV); ++ ++ snprintf(soc_info->sys_type, RAMIPS_SYS_TYPE_LEN, ++ "Mediatek %s ver:%u eco:%u", ++ name, ++ (rev >> CHIP_REV_VER_SHIFT) & CHIP_REV_VER_MASK, ++ (rev & CHIP_REV_ECO_MASK)); ++ ++ soc_info->mem_size_min = MT7621_DDR2_SIZE_MIN; ++ soc_info->mem_size_max = MT7621_DDR2_SIZE_MAX; ++ soc_info->mem_base = MT7621_DRAM_BASE; ++ ++ rt2880_pinmux_data = mt7621_pinmux_data; ++ ++ if (register_cmp_smp_ops()) ++ panic("failed to register_vsmp_smp_ops()"); ++} +--- /dev/null ++++ b/arch/mips/include/asm/mach-ralink/irq.h +@@ -0,0 +1,9 @@ ++#ifndef __ASM_MACH_RALINK_IRQ_H ++#define __ASM_MACH_RALINK_IRQ_H ++ ++#define GIC_NUM_INTRS 64 ++#define NR_IRQS 256 ++ ++#include_next ++ ++#endif diff --git a/target/linux/ramips/patches-3.10/0206-MTD-add-chunked-read-io-to-m25p80.patch b/target/linux/ramips/patches-3.10/0206-MTD-add-chunked-read-io-to-m25p80.patch deleted file mode 100644 index b4d64ea50c..0000000000 --- a/target/linux/ramips/patches-3.10/0206-MTD-add-chunked-read-io-to-m25p80.patch +++ /dev/null @@ -1,160 +0,0 @@ -From 926ae0ca5017a421709ab0478582683c29988b05 Mon Sep 17 00:00:00 2001 -From: John Crispin -Date: Wed, 27 Nov 2013 20:58:16 +0100 -Subject: [PATCH 10/20] MTD: add chunked read io to m25p80 - -Signed-off-by: John Crispin ---- - drivers/mtd/devices/m25p80.c | 127 ++++++++++++++++++++++++++++++++++++++++++ - 1 file changed, 127 insertions(+) - ---- a/drivers/mtd/devices/m25p80.c -+++ b/drivers/mtd/devices/m25p80.c -@@ -392,6 +392,57 @@ static int m25p80_read(struct mtd_info * - return 0; - } - -+static int m25p80_read_chunked(struct mtd_info *mtd, loff_t from, size_t len, -+ size_t *retlen, u_char *buf) -+{ -+ struct m25p *flash = mtd_to_m25p(mtd); -+ struct spi_transfer t[2]; -+ struct spi_message m; -+ uint8_t opcode; -+ int idx = 0; -+ -+ pr_debug("%s: %s from 0x%08x, len %zd\n", dev_name(&flash->spi->dev), -+ __func__, (u32)from, len); -+ -+ spi_message_init(&m); -+ memset(t, 0, (sizeof t)); -+ -+ t[0].tx_buf = flash->command; -+ t[0].len = m25p_cmdsz(flash); -+ spi_message_add_tail(&t[0], &m); -+ spi_message_add_tail(&t[1], &m); -+ -+ while (idx < len) { -+ int rlen = (len - idx > 4) ? (4) : (len - idx); -+ -+ t[1].rx_buf = &buf[idx]; -+ t[1].len = rlen; -+ -+ mutex_lock(&flash->lock); -+ -+ /* Wait till previous write/erase is done. */ -+ if (wait_till_ready(flash)) { -+ /* REVISIT status return?? */ -+ mutex_unlock(&flash->lock); -+ return 1; -+ } -+ -+ /* Set up the write data buffer. */ -+ opcode = OPCODE_NORM_READ; -+ flash->command[0] = opcode; -+ m25p_addr2cmd(flash, from + idx, flash->command); -+ -+ spi_sync(flash->spi, &m); -+ -+ *retlen = m.actual_length - m25p_cmdsz(flash) - -+ (flash->fast_read ? 1 : 0); -+ -+ mutex_unlock(&flash->lock); -+ idx += rlen; -+ } -+ return 0; -+} -+ - /* - * Write an address range to the flash chip. Data must be written in - * FLASH_PAGESIZE chunks. The address range may be any size provided -@@ -479,6 +530,76 @@ static int m25p80_write(struct mtd_info - return 0; - } - -+static int m25p80_write_chunked(struct mtd_info *mtd, loff_t to, size_t len, -+ size_t *retlen, const u_char *buf) -+{ -+ struct m25p *flash = mtd_to_m25p(mtd); -+ struct spi_transfer t; -+ struct spi_message m; -+ u32 i, page_size; -+ u8 tmp[8]; -+ -+ pr_debug("%s: %s to 0x%08x, len %zd\n", dev_name(&flash->spi->dev), -+ __func__, (u32)to, len); -+ -+ spi_message_init(&m); -+ memset(&t, 0, (sizeof t)); -+ -+ t.tx_buf = tmp; -+ t.len = 8; -+ spi_message_add_tail(&t, &m); -+ -+ mutex_lock(&flash->lock); -+ -+ /* Wait until finished previous write command. */ -+ if (wait_till_ready(flash)) { -+ mutex_unlock(&flash->lock); -+ return 1; -+ } -+ -+ write_enable(flash); -+ -+ /* Set up the opcode in the write buffer. */ -+ flash->command[0] = OPCODE_PP; -+ m25p_addr2cmd(flash, to, flash->command); -+ -+ t.len = 4 + (to & 0x3); -+ if (t.len == 4) -+ t.len = 8; -+ memcpy(tmp, flash->command, 4); -+ memcpy(&tmp[4], buf, t.len - 4); -+ spi_sync(flash->spi, &m); -+ page_size = t.len - 4; -+ -+ *retlen = m.actual_length - m25p_cmdsz(flash); -+ -+ /* write everything in flash->page_size chunks */ -+ for (i = page_size; i < len; i += page_size) { -+ page_size = len - i; -+ if (page_size > 4) -+ page_size = 4; -+ -+ /* write the next page to flash */ -+ m25p_addr2cmd(flash, to + i, flash->command); -+ -+ memcpy(tmp, flash->command, 4); -+ memcpy(&tmp[4], buf + i, page_size); -+ t.len = 4 + page_size; -+ -+ wait_till_ready(flash); -+ -+ write_enable(flash); -+ -+ spi_sync(flash->spi, &m); -+ -+ *retlen += m.actual_length - m25p_cmdsz(flash); -+ } -+ -+ mutex_unlock(&flash->lock); -+ -+ return 0; -+} -+ - static int sst_write(struct mtd_info *mtd, loff_t to, size_t len, - size_t *retlen, const u_char *buf) - { -@@ -1058,6 +1179,12 @@ static int m25p_probe(struct spi_device - flash->fast_read = true; - #endif - -+ if (np && of_property_read_bool(np, "m25p,chunked-io")) { -+ dev_warn(&spi->dev, "using chunked io\n"); -+ flash->mtd._read = m25p80_read_chunked; -+ flash->mtd._write = m25p80_write_chunked; -+ } -+ - #ifdef CONFIG_M25PXX_USE_FAST_READ - flash->fast_read = true; - #endif diff --git a/target/linux/ramips/patches-3.10/0207-MIPS-ralink-add-MT7621-defconfig.patch b/target/linux/ramips/patches-3.10/0207-MIPS-ralink-add-MT7621-defconfig.patch new file mode 100644 index 0000000000..1dc97266b9 --- /dev/null +++ b/target/linux/ramips/patches-3.10/0207-MIPS-ralink-add-MT7621-defconfig.patch @@ -0,0 +1,211 @@ +From 29b1c70ab171609fee58ef6642086d571c0ba0c2 Mon Sep 17 00:00:00 2001 +From: John Crispin +Date: Mon, 27 Jan 2014 13:12:41 +0000 +Subject: [PATCH 207/215] MIPS: ralink: add MT7621 defconfig + +Signed-off-by: John Crispin +--- + arch/mips/configs/mt7621_defconfig | 197 ++++++++++++++++++++++++++++++++++++ + 1 file changed, 197 insertions(+) + create mode 100644 arch/mips/configs/mt7621_defconfig + +--- /dev/null ++++ b/arch/mips/configs/mt7621_defconfig +@@ -0,0 +1,197 @@ ++# CONFIG_LOCALVERSION_AUTO is not set ++CONFIG_SYSVIPC=y ++CONFIG_HIGH_RES_TIMERS=y ++CONFIG_RCU_FANOUT=32 ++CONFIG_UIDGID_STRICT_TYPE_CHECKS=y ++CONFIG_BLK_DEV_INITRD=y ++CONFIG_INITRAMFS_SOURCE="/openwrt/trunk/build_dir/target-mipsel_24kec+dsp_uClibc-0.9.33.2/root-ramips /openwrt/trunk/target/linux/generic/image/initramfs-base-files.txt" ++CONFIG_INITRAMFS_ROOT_UID=1000 ++CONFIG_INITRAMFS_ROOT_GID=1000 ++# CONFIG_RD_GZIP is not set ++CONFIG_CC_OPTIMIZE_FOR_SIZE=y ++# CONFIG_AIO is not set ++CONFIG_EMBEDDED=y ++# CONFIG_VM_EVENT_COUNTERS is not set ++# CONFIG_SLUB_DEBUG is not set ++# CONFIG_COMPAT_BRK is not set ++CONFIG_MODULES=y ++CONFIG_MODULE_UNLOAD=y ++# CONFIG_BLK_DEV_BSG is not set ++CONFIG_PARTITION_ADVANCED=y ++# CONFIG_IOSCHED_CFQ is not set ++CONFIG_SMP=y ++CONFIG_NR_CPUS=4 ++CONFIG_SCHED_SMT=y ++# CONFIG_COMPACTION is not set ++# CONFIG_CROSS_MEMORY_ATTACH is not set ++# CONFIG_SECCOMP is not set ++CONFIG_HZ_100=y ++CONFIG_CMDLINE_BOOL=y ++CONFIG_CMDLINE="rootfstype=squashfs,jffs2" ++# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set ++CONFIG_NET=y ++CONFIG_PACKET=y ++CONFIG_UNIX=y ++CONFIG_INET=y ++CONFIG_IP_MULTICAST=y ++CONFIG_IP_ADVANCED_ROUTER=y ++CONFIG_IP_MULTIPLE_TABLES=y ++CONFIG_IP_ROUTE_MULTIPATH=y ++CONFIG_IP_ROUTE_VERBOSE=y ++CONFIG_IP_MROUTE=y ++CONFIG_IP_MROUTE_MULTIPLE_TABLES=y ++CONFIG_ARPD=y ++CONFIG_SYN_COOKIES=y ++# CONFIG_INET_XFRM_MODE_TRANSPORT is not set ++# CONFIG_INET_XFRM_MODE_TUNNEL is not set ++# CONFIG_INET_XFRM_MODE_BEET is not set ++# CONFIG_INET_LRO is not set ++# CONFIG_INET_DIAG is not set ++CONFIG_TCP_CONG_ADVANCED=y ++# CONFIG_TCP_CONG_BIC is not set ++# CONFIG_TCP_CONG_WESTWOOD is not set ++# CONFIG_TCP_CONG_HTCP is not set ++CONFIG_IPV6_PRIVACY=y ++# CONFIG_INET6_XFRM_MODE_TRANSPORT is not set ++# CONFIG_INET6_XFRM_MODE_TUNNEL is not set ++# CONFIG_INET6_XFRM_MODE_BEET is not set ++# CONFIG_IPV6_SIT is not set ++CONFIG_IPV6_MULTIPLE_TABLES=y ++CONFIG_IPV6_SUBTREES=y ++CONFIG_IPV6_MROUTE=y ++CONFIG_NETFILTER=y ++# CONFIG_BRIDGE_NETFILTER is not set ++CONFIG_NF_CONNTRACK=m ++CONFIG_NF_CONNTRACK_FTP=m ++CONFIG_NF_CONNTRACK_IRC=m ++CONFIG_NETFILTER_XT_MARK=m ++CONFIG_NETFILTER_XT_TARGET_LOG=m ++CONFIG_NETFILTER_XT_TARGET_NOTRACK=m ++CONFIG_NETFILTER_XT_TARGET_TCPMSS=m ++CONFIG_NETFILTER_XT_MATCH_COMMENT=m ++CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m ++CONFIG_NETFILTER_XT_MATCH_LIMIT=m ++CONFIG_NETFILTER_XT_MATCH_MAC=m ++CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m ++CONFIG_NETFILTER_XT_MATCH_STATE=m ++CONFIG_NETFILTER_XT_MATCH_TIME=m ++CONFIG_NF_CONNTRACK_IPV4=m ++# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set ++CONFIG_IP_NF_IPTABLES=m ++CONFIG_IP_NF_FILTER=m ++CONFIG_IP_NF_TARGET_REJECT=m ++CONFIG_NF_NAT_IPV4=m ++CONFIG_IP_NF_TARGET_MASQUERADE=m ++CONFIG_IP_NF_TARGET_REDIRECT=m ++CONFIG_IP_NF_MANGLE=m ++CONFIG_IP_NF_RAW=m ++CONFIG_NF_CONNTRACK_IPV6=m ++CONFIG_IP6_NF_IPTABLES=m ++CONFIG_IP6_NF_MATCH_AH=m ++CONFIG_IP6_NF_MATCH_EUI64=m ++CONFIG_IP6_NF_MATCH_FRAG=m ++CONFIG_IP6_NF_MATCH_OPTS=m ++CONFIG_IP6_NF_MATCH_IPV6HEADER=m ++CONFIG_IP6_NF_MATCH_MH=m ++CONFIG_IP6_NF_MATCH_RT=m ++CONFIG_IP6_NF_FILTER=m ++CONFIG_IP6_NF_TARGET_REJECT=m ++CONFIG_IP6_NF_MANGLE=m ++CONFIG_IP6_NF_RAW=m ++CONFIG_BRIDGE=m ++# CONFIG_BRIDGE_IGMP_SNOOPING is not set ++CONFIG_VLAN_8021Q=y ++CONFIG_NET_SCHED=y ++CONFIG_NET_SCH_FQ_CODEL=y ++CONFIG_HAMRADIO=y ++CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" ++# CONFIG_FIRMWARE_IN_KERNEL is not set ++CONFIG_MTD=y ++CONFIG_MTD_CMDLINE_PARTS=y ++CONFIG_MTD_BLOCK=y ++CONFIG_MTD_CFI=y ++CONFIG_MTD_CFI_AMDSTD=y ++CONFIG_MTD_COMPLEX_MAPPINGS=y ++CONFIG_MTD_PHYSMAP=y ++CONFIG_MTD_M25P80=y ++CONFIG_EEPROM_93CX6=m ++CONFIG_SCSI=y ++CONFIG_BLK_DEV_SD=y ++CONFIG_NETDEVICES=y ++# CONFIG_NET_PACKET_ENGINE is not set ++# CONFIG_NET_VENDOR_WIZNET is not set ++CONFIG_PHYLIB=y ++CONFIG_SWCONFIG=y ++CONFIG_PPP=m ++CONFIG_PPP_FILTER=y ++CONFIG_PPP_MULTILINK=y ++CONFIG_PPPOE=m ++CONFIG_PPP_ASYNC=m ++CONFIG_ISDN=y ++# CONFIG_INPUT is not set ++# CONFIG_SERIO is not set ++# CONFIG_VT is not set ++# CONFIG_LEGACY_PTYS is not set ++# CONFIG_DEVKMEM is not set ++CONFIG_SERIAL_8250=y ++# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set ++CONFIG_SERIAL_8250_CONSOLE=y ++# CONFIG_SERIAL_8250_PCI is not set ++CONFIG_SERIAL_8250_RUNTIME_UARTS=2 ++CONFIG_SPI=y ++CONFIG_GPIOLIB=y ++CONFIG_GPIO_SYSFS=y ++# CONFIG_HWMON is not set ++CONFIG_WATCHDOG=y ++CONFIG_WATCHDOG_CORE=y ++# CONFIG_VGA_ARB is not set ++CONFIG_USB=y ++CONFIG_USB_XHCI_HCD=y ++CONFIG_USB_XHCI_PLATFORM=y ++CONFIG_USB_MT7621_XHCI_PLATFORM=y ++CONFIG_USB_STORAGE=y ++CONFIG_USB_PHY=y ++CONFIG_NEW_LEDS=y ++CONFIG_LEDS_CLASS=y ++CONFIG_LEDS_GPIO=m ++CONFIG_LEDS_TRIGGERS=y ++CONFIG_LEDS_TRIGGER_TIMER=y ++CONFIG_LEDS_TRIGGER_DEFAULT_ON=y ++CONFIG_STAGING=y ++CONFIG_USB_DWC2=m ++# CONFIG_IOMMU_SUPPORT is not set ++CONFIG_RESET_CONTROLLER=y ++# CONFIG_FIRMWARE_MEMMAP is not set ++# CONFIG_DNOTIFY is not set ++# CONFIG_PROC_PAGE_MONITOR is not set ++CONFIG_TMPFS=y ++CONFIG_TMPFS_XATTR=y ++CONFIG_JFFS2_FS=y ++CONFIG_JFFS2_SUMMARY=y ++CONFIG_JFFS2_FS_XATTR=y ++# CONFIG_JFFS2_FS_POSIX_ACL is not set ++# CONFIG_JFFS2_FS_SECURITY is not set ++CONFIG_JFFS2_COMPRESSION_OPTIONS=y ++# CONFIG_JFFS2_ZLIB is not set ++CONFIG_SQUASHFS=y ++# CONFIG_SQUASHFS_ZLIB is not set ++CONFIG_SQUASHFS_XZ=y ++CONFIG_PRINTK_TIME=y ++# CONFIG_ENABLE_MUST_CHECK is not set ++CONFIG_FRAME_WARN=1024 ++CONFIG_MAGIC_SYSRQ=y ++CONFIG_STRIP_ASM_SYMS=y ++# CONFIG_UNUSED_SYMBOLS is not set ++CONFIG_DEBUG_FS=y ++# CONFIG_SCHED_DEBUG is not set ++CONFIG_DEBUG_INFO=y ++CONFIG_DEBUG_INFO_REDUCED=y ++CONFIG_RCU_CPU_STALL_TIMEOUT=60 ++# CONFIG_FTRACE is not set ++CONFIG_CRYPTO_ARC4=m ++# CONFIG_CRYPTO_ANSI_CPRNG is not set ++# CONFIG_VIRTUALIZATION is not set ++CONFIG_CRC_ITU_T=m ++CONFIG_CRC32_SARWATE=y ++# CONFIG_XZ_DEC_X86 is not set ++CONFIG_AVERAGE=y diff --git a/target/linux/ramips/patches-3.10/0208-MIPS-ralink-add-MT7621-dts-file.patch b/target/linux/ramips/patches-3.10/0208-MIPS-ralink-add-MT7621-dts-file.patch new file mode 100644 index 0000000000..880252856b --- /dev/null +++ b/target/linux/ramips/patches-3.10/0208-MIPS-ralink-add-MT7621-dts-file.patch @@ -0,0 +1,300 @@ +From dd4f939bb7c30f9256a35d31de673241ead350ab Mon Sep 17 00:00:00 2001 +From: John Crispin +Date: Fri, 24 Jan 2014 17:01:22 +0100 +Subject: [PATCH 208/215] MIPS: ralink: add MT7621 dts file + +Signed-off-by: John Crispin +--- + arch/mips/ralink/dts/Makefile | 1 + + arch/mips/ralink/dts/mt7621.dtsi | 257 ++++++++++++++++++++++++++++++++++ + arch/mips/ralink/dts/mt7621_eval.dts | 16 +++ + 3 files changed, 274 insertions(+) + create mode 100644 arch/mips/ralink/dts/mt7621.dtsi + create mode 100644 arch/mips/ralink/dts/mt7621_eval.dts + +--- a/arch/mips/ralink/dts/Makefile ++++ b/arch/mips/ralink/dts/Makefile +@@ -2,3 +2,4 @@ obj-$(CONFIG_DTB_RT2880_EVAL) := rt2880_ + obj-$(CONFIG_DTB_RT305X_EVAL) := rt3052_eval.dtb.o + obj-$(CONFIG_DTB_RT3883_EVAL) := rt3883_eval.dtb.o + obj-$(CONFIG_DTB_MT7620A_EVAL) := mt7620a_eval.dtb.o ++obj-$(CONFIG_DTB_MT7621_EVAL) := mt7621_eval.dtb.o +--- /dev/null ++++ b/arch/mips/ralink/dts/mt7621.dtsi +@@ -0,0 +1,257 @@ ++/ { ++ #address-cells = <1>; ++ #size-cells = <1>; ++ compatible = "ralink,mtk7620a-soc"; ++ ++ cpus { ++ cpu@0 { ++ compatible = "mips,mips24KEc"; ++ }; ++ }; ++ ++ cpuintc: cpuintc@0 { ++ #address-cells = <0>; ++ #interrupt-cells = <1>; ++ interrupt-controller; ++ compatible = "mti,cpu-interrupt-controller"; ++ }; ++ ++ palmbus@1E000000 { ++ compatible = "palmbus"; ++ reg = <0x1E000000 0x100000>; ++ ranges = <0x0 0x1E000000 0x0FFFFF>; ++ ++ #address-cells = <1>; ++ #size-cells = <1>; ++ ++ sysc@0 { ++ compatible = "mtk,mt7621-sysc"; ++ reg = <0x0 0x100>; ++ }; ++ ++ wdt@100 { ++ compatible = "mtk,mt7621-wdt"; ++ reg = <0x100 0x100>; ++ }; ++ ++ gpio@600 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ ++ compatible = "mtk,mt7621-gpio"; ++ reg = <0x600 0x100>; ++ ++ gpio0: bank@0 { ++ reg = <0>; ++ compatible = "mtk,mt7621-gpio-bank"; ++ gpio-controller; ++ #gpio-cells = <2>; ++ }; ++ ++ gpio1: bank@1 { ++ reg = <1>; ++ compatible = "mtk,mt7621-gpio-bank"; ++ gpio-controller; ++ #gpio-cells = <2>; ++ }; ++ ++ gpio2: bank@2 { ++ reg = <2>; ++ compatible = "mtk,mt7621-gpio-bank"; ++ gpio-controller; ++ #gpio-cells = <2>; ++ }; ++ }; ++ ++ memc@5000 { ++ compatible = "mtk,mt7621-memc"; ++ reg = <0x300 0x100>; ++ }; ++ ++ uartlite@c00 { ++ compatible = "ns16550a"; ++ reg = <0xc00 0x100>; ++ ++ interrupt-parent = <&gic>; ++ interrupts = <26>; ++ ++ reg-shift = <2>; ++ reg-io-width = <4>; ++ no-loopback-test; ++ }; ++ ++ uart@d00 { ++ compatible = "ns16550a"; ++ reg = <0xd00 0x100>; ++ ++ interrupt-parent = <&gic>; ++ interrupts = <27>; ++ ++ fifo-size = <16>; ++ reg-shift = <2>; ++ reg-io-width = <4>; ++ no-loopback-test; ++ }; ++ ++ spi@b00 { ++ status = "okay"; ++ ++ compatible = "ralink,mt7621-spi"; ++ reg = <0xb00 0x100>; ++ ++ resets = <&rstctrl 18>; ++ reset-names = "spi"; ++ ++ #address-cells = <1>; ++ #size-cells = <1>; ++ ++/* pinctrl-names = "default"; ++ pinctrl-0 = <&spi_pins>;*/ ++ ++ m25p80@0 { ++ #address-cells = <1>; ++ #size-cells = <1>; ++ compatible = "en25q64"; ++ reg = <0 0>; ++ linux,modalias = "m25p80", "en25q64"; ++ spi-max-frequency = <10000000>; ++ ++ m25p,chunked-io; ++ ++ partition@0 { ++ label = "u-boot"; ++ reg = <0x0 0x30000>; ++ read-only; ++ }; ++ ++ partition@30000 { ++ label = "u-boot-env"; ++ reg = <0x30000 0x10000>; ++ read-only; ++ }; ++ ++ factory: partition@40000 { ++ label = "factory"; ++ reg = <0x40000 0x10000>; ++ read-only; ++ }; ++ ++ partition@50000 { ++ label = "firmware"; ++ reg = <0x50000 0x7a0000>; ++ }; ++ ++ partition@7f0000 { ++ label = "test"; ++ reg = <0x7f0000 0x10000>; ++ }; ++ }; ++ }; ++ }; ++ ++ rstctrl: rstctrl { ++ compatible = "ralink,rt2880-reset"; ++ #reset-cells = <1>; ++ }; ++ ++ sdhci@1E130000 { ++ compatible = "ralink,mt7620a-sdhci"; ++ reg = <0x1E130000 4000>; ++ ++ interrupt-parent = <&gic>; ++ interrupts = <20>; ++ }; ++ ++ xhci@1E1C0000 { ++ compatible = "xhci-platform"; ++ reg = <0x1E1C0000 4000>; ++ ++ interrupt-parent = <&gic>; ++ interrupts = <22>; ++ }; ++ ++ gic: gic@1fbc0000 { ++ #address-cells = <0>; ++ #interrupt-cells = <1>; ++ interrupt-controller; ++ compatible = "ralink,mt7621-gic"; ++ reg = < 0x1fbc0000 0x80 /* gic */ ++ 0x1fbf0000 0x8000 /* cpc */ ++ 0x1fbf8000 0x8000 /* gpmc */ ++ >; ++ }; ++ ++ nand@1e003000 { ++ compatible = "mtk,mt7621-nand"; ++ bank-width = <2>; ++ reg = <0x1e003000 0x800 ++ 0x1e003800 0x800>; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ ++ partition@0 { ++ label = "uboot"; ++ reg = <0x00000 0x80000>; /* 64 KB */ ++ }; ++ partition@80000 { ++ label = "uboot_env"; ++ reg = <0x80000 0x80000>; /* 64 KB */ ++ }; ++ partition@100000 { ++ label = "factory"; ++ reg = <0x100000 0x40000>; ++ }; ++ partition@140000 { ++ label = "rootfs"; ++ reg = <0x140000 0xec0000>; ++ }; ++ }; ++ ++ ethernet@1e100000 { ++ compatible = "ralink,mt7621-eth"; ++ reg = <0x1e100000 10000>; ++ ++ #address-cells = <1>; ++ #size-cells = <0>; ++ ++ ralink,port-map = "llllw"; ++ ++ interrupt-parent = <&gic>; ++ interrupts = <3>; ++ ++/* resets = <&rstctrl 21 &rstctrl 23>; ++ reset-names = "fe", "esw"; ++ ++ port@4 { ++ compatible = "ralink,mt7620a-gsw-port", "ralink,eth-port"; ++ reg = <4>; ++ ++ status = "disabled"; ++ }; ++ ++ port@5 { ++ compatible = "ralink,mt7620a-gsw-port", "ralink,eth-port"; ++ reg = <5>; ++ ++ status = "disabled"; ++ }; ++*/ ++ mdio-bus { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ ++ phy1f: ethernet-phy@1f { ++ reg = <0x1f>; ++ phy-mode = "rgmii"; ++ ++/* interrupt-parent = <&gic>; ++ interrupts = <23>; ++*/ }; ++ }; ++ }; ++ ++ gsw@1e110000 { ++ compatible = "ralink,mt7620a-gsw"; ++ reg = <0x1e110000 8000>; ++ }; ++}; +--- /dev/null ++++ b/arch/mips/ralink/dts/mt7621_eval.dts +@@ -0,0 +1,16 @@ ++/dts-v1/; ++ ++/include/ "mt7621.dtsi" ++ ++/ { ++ compatible = "ralink,mt7621-eval-board", "ralink,mt7621-soc"; ++ model = "Ralink MT7621 evaluation board"; ++ ++ memory@0 { ++ reg = <0x0 0x2000000>; ++ }; ++ ++ chosen { ++ bootargs = "console=ttyS0,57600"; ++ }; ++}; diff --git a/target/linux/ramips/patches-3.10/0209-MIPS-ralink-add-MT7621-early_printk-support.patch b/target/linux/ramips/patches-3.10/0209-MIPS-ralink-add-MT7621-early_printk-support.patch new file mode 100644 index 0000000000..d518ee6800 --- /dev/null +++ b/target/linux/ramips/patches-3.10/0209-MIPS-ralink-add-MT7621-early_printk-support.patch @@ -0,0 +1,37 @@ +From a9d4390c6d27e737887388ccbb48f3767f9f89ef Mon Sep 17 00:00:00 2001 +From: John Crispin +Date: Fri, 24 Jan 2014 17:01:17 +0100 +Subject: [PATCH 209/215] MIPS: ralink: add MT7621 early_printk support + +Signed-off-by: John Crispin +--- + arch/mips/ralink/early_printk.c | 10 +++++++--- + 1 file changed, 7 insertions(+), 3 deletions(-) + +--- a/arch/mips/ralink/early_printk.c ++++ b/arch/mips/ralink/early_printk.c +@@ -13,6 +13,8 @@ + + #ifdef CONFIG_SOC_RT288X + #define EARLY_UART_BASE 0x300c00 ++#elif defined(CONFIG_SOC_MT7621) ++#define EARLY_UART_BASE 0x1E000c00 + #else + #define EARLY_UART_BASE 0x10000c00 + #endif +@@ -40,9 +42,15 @@ static inline u32 uart_r32(unsigned reg) + + void prom_putchar(unsigned char ch) + { ++#ifdef CONFIG_SOC_MT7621 ++ uart_w32(ch, UART_TX); ++ while ((uart_r32(0x14) & UART_LSR_THRE) == 0) ++ ; ++#else + while ((uart_r32(UART_REG_LSR) & UART_LSR_THRE) == 0) + ; + uart_w32(ch, UART_REG_TX); + while ((uart_r32(UART_REG_LSR) & UART_LSR_THRE) == 0) + ; ++#endif + } diff --git a/target/linux/ramips/patches-3.10/0210-MIPS-ralink-add-MT7621-pcie-driver.patch b/target/linux/ramips/patches-3.10/0210-MIPS-ralink-add-MT7621-pcie-driver.patch new file mode 100644 index 0000000000..8dae5b3e59 --- /dev/null +++ b/target/linux/ramips/patches-3.10/0210-MIPS-ralink-add-MT7621-pcie-driver.patch @@ -0,0 +1,822 @@ +From 6541090161342ef11cf319a7471aeb6769e20c2c Mon Sep 17 00:00:00 2001 +From: John Crispin +Date: Sun, 16 Mar 2014 05:22:39 +0000 +Subject: [PATCH 210/215] MIPS: ralink: add MT7621 pcie driver + +Signed-off-by: John Crispin +--- + arch/mips/pci/Makefile | 1 + + arch/mips/pci/pci-mt7621.c | 797 ++++++++++++++++++++++++++++++++++++++++++++ + 2 files changed, 798 insertions(+) + create mode 100644 arch/mips/pci/pci-mt7621.c + +--- a/arch/mips/pci/Makefile ++++ b/arch/mips/pci/Makefile +@@ -42,6 +42,7 @@ obj-$(CONFIG_SNI_RM) += fixup-sni.o ops + obj-$(CONFIG_LANTIQ) += fixup-lantiq.o + obj-$(CONFIG_PCI_LANTIQ) += pci-lantiq.o ops-lantiq.o + obj-$(CONFIG_SOC_MT7620) += pci-mt7620a.o ++obj-$(CONFIG_SOC_MT7621) += pci-mt7621.o + obj-$(CONFIG_SOC_RT2880) += pci-rt2880.o + obj-$(CONFIG_SOC_RT3883) += pci-rt3883.o + obj-$(CONFIG_TANBAC_TB0219) += fixup-tb0219.o +--- /dev/null ++++ b/arch/mips/pci/pci-mt7621.c +@@ -0,0 +1,797 @@ ++/************************************************************************** ++ * ++ * BRIEF MODULE DESCRIPTION ++ * PCI init for Ralink RT2880 solution ++ * ++ * Copyright 2007 Ralink Inc. (bruce_chang@ralinktech.com.tw) ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2 of the License, or (at your ++ * option) any later version. ++ * ++ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED ++ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN ++ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, ++ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT ++ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF ++ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ++ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF ++ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, write to the Free Software Foundation, Inc., ++ * 675 Mass Ave, Cambridge, MA 02139, USA. ++ * ++ * ++ ************************************************************************** ++ * May 2007 Bruce Chang ++ * Initial Release ++ * ++ * May 2009 Bruce Chang ++ * support RT2880/RT3883 PCIe ++ * ++ * May 2011 Bruce Chang ++ * support RT6855/MT7620 PCIe ++ * ++ ************************************************************************** ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++//#include ++#include ++#include ++#include ++//#include ++ ++#include ++ ++extern void pcie_phy_init(void); ++extern void chk_phy_pll(void); ++ ++/* ++ * These functions and structures provide the BIOS scan and mapping of the PCI ++ * devices. ++ */ ++ ++#define CONFIG_PCIE_PORT0 ++#define CONFIG_PCIE_PORT1 ++#define CONFIG_PCIE_PORT2 ++#define RALINK_PCIE0_CLK_EN (1<<24) ++#define RALINK_PCIE1_CLK_EN (1<<25) ++#define RALINK_PCIE2_CLK_EN (1<<26) ++ ++#define RALINK_PCI_CONFIG_ADDR 0x20 ++#define RALINK_PCI_CONFIG_DATA_VIRTUAL_REG 0x24 ++#define SURFBOARDINT_PCIE0 12 /* PCIE0 */ ++#define RALINK_INT_PCIE0 SURFBOARDINT_PCIE0 ++#define RALINK_INT_PCIE1 SURFBOARDINT_PCIE1 ++#define RALINK_INT_PCIE2 SURFBOARDINT_PCIE2 ++#define SURFBOARDINT_PCIE1 32 /* PCIE1 */ ++#define SURFBOARDINT_PCIE2 33 /* PCIE2 */ ++#define RALINK_PCI_MEMBASE *(volatile u32 *)(RALINK_PCI_BASE + 0x0028) ++#define RALINK_PCI_IOBASE *(volatile u32 *)(RALINK_PCI_BASE + 0x002C) ++#define RALINK_PCIE0_RST (1<<24) ++#define RALINK_PCIE1_RST (1<<25) ++#define RALINK_PCIE2_RST (1<<26) ++#define RALINK_SYSCTL_BASE 0xBE000000 ++ ++#define RALINK_PCI_PCICFG_ADDR *(volatile u32 *)(RALINK_PCI_BASE + 0x0000) ++#define RALINK_PCI_PCIMSK_ADDR *(volatile u32 *)(RALINK_PCI_BASE + 0x000C) ++#define RALINK_PCI_BASE 0xBE140000 ++ ++#define RALINK_PCIEPHY_P0P1_CTL_OFFSET (RALINK_PCI_BASE + 0x9000) ++#define RT6855_PCIE0_OFFSET 0x2000 ++#define RT6855_PCIE1_OFFSET 0x3000 ++#define RT6855_PCIE2_OFFSET 0x4000 ++ ++#define RALINK_PCI0_BAR0SETUP_ADDR *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE0_OFFSET + 0x0010) ++#define RALINK_PCI0_IMBASEBAR0_ADDR *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE0_OFFSET + 0x0018) ++#define RALINK_PCI0_ID *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE0_OFFSET + 0x0030) ++#define RALINK_PCI0_CLASS *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE0_OFFSET + 0x0034) ++#define RALINK_PCI0_SUBID *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE0_OFFSET + 0x0038) ++#define RALINK_PCI0_STATUS *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE0_OFFSET + 0x0050) ++#define RALINK_PCI0_DERR *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE0_OFFSET + 0x0060) ++#define RALINK_PCI0_ECRC *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE0_OFFSET + 0x0064) ++ ++#define RALINK_PCI1_BAR0SETUP_ADDR *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE1_OFFSET + 0x0010) ++#define RALINK_PCI1_IMBASEBAR0_ADDR *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE1_OFFSET + 0x0018) ++#define RALINK_PCI1_ID *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE1_OFFSET + 0x0030) ++#define RALINK_PCI1_CLASS *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE1_OFFSET + 0x0034) ++#define RALINK_PCI1_SUBID *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE1_OFFSET + 0x0038) ++#define RALINK_PCI1_STATUS *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE1_OFFSET + 0x0050) ++#define RALINK_PCI1_DERR *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE1_OFFSET + 0x0060) ++#define RALINK_PCI1_ECRC *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE1_OFFSET + 0x0064) ++ ++#define RALINK_PCI2_BAR0SETUP_ADDR *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE2_OFFSET + 0x0010) ++#define RALINK_PCI2_IMBASEBAR0_ADDR *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE2_OFFSET + 0x0018) ++#define RALINK_PCI2_ID *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE2_OFFSET + 0x0030) ++#define RALINK_PCI2_CLASS *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE2_OFFSET + 0x0034) ++#define RALINK_PCI2_SUBID *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE2_OFFSET + 0x0038) ++#define RALINK_PCI2_STATUS *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE2_OFFSET + 0x0050) ++#define RALINK_PCI2_DERR *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE2_OFFSET + 0x0060) ++#define RALINK_PCI2_ECRC *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE2_OFFSET + 0x0064) ++ ++#define RALINK_PCIEPHY_P0P1_CTL_OFFSET (RALINK_PCI_BASE + 0x9000) ++#define RALINK_PCIEPHY_P2_CTL_OFFSET (RALINK_PCI_BASE + 0xA000) ++ ++ ++#define MV_WRITE(ofs, data) \ ++ *(volatile u32 *)(RALINK_PCI_BASE+(ofs)) = cpu_to_le32(data) ++#define MV_READ(ofs, data) \ ++ *(data) = le32_to_cpu(*(volatile u32 *)(RALINK_PCI_BASE+(ofs))) ++#define MV_READ_DATA(ofs) \ ++ le32_to_cpu(*(volatile u32 *)(RALINK_PCI_BASE+(ofs))) ++ ++#define MV_WRITE_16(ofs, data) \ ++ *(volatile u16 *)(RALINK_PCI_BASE+(ofs)) = cpu_to_le16(data) ++#define MV_READ_16(ofs, data) \ ++ *(data) = le16_to_cpu(*(volatile u16 *)(RALINK_PCI_BASE+(ofs))) ++ ++#define MV_WRITE_8(ofs, data) \ ++ *(volatile u8 *)(RALINK_PCI_BASE+(ofs)) = data ++#define MV_READ_8(ofs, data) \ ++ *(data) = *(volatile u8 *)(RALINK_PCI_BASE+(ofs)) ++ ++ ++ ++#define RALINK_PCI_MM_MAP_BASE 0x60000000 ++#define RALINK_PCI_IO_MAP_BASE 0x1e160000 ++ ++#define RALINK_SYSTEM_CONTROL_BASE 0xbe000000 ++#define GPIO_PERST ++#define ASSERT_SYSRST_PCIE(val) do { \ ++ if (*(unsigned int *)(0xbe00000c) == 0x00030101) \ ++ RALINK_RSTCTRL |= val; \ ++ else \ ++ RALINK_RSTCTRL &= ~val; \ ++ } while(0) ++#define DEASSERT_SYSRST_PCIE(val) do { \ ++ if (*(unsigned int *)(0xbe00000c) == 0x00030101) \ ++ RALINK_RSTCTRL &= ~val; \ ++ else \ ++ RALINK_RSTCTRL |= val; \ ++ } while(0) ++#define RALINK_SYSCFG1 *(unsigned int *)(RALINK_SYSTEM_CONTROL_BASE + 0x14) ++#define RALINK_CLKCFG1 *(unsigned int *)(RALINK_SYSTEM_CONTROL_BASE + 0x30) ++#define RALINK_RSTCTRL *(unsigned int *)(RALINK_SYSTEM_CONTROL_BASE + 0x34) ++#define RALINK_GPIOMODE *(unsigned int *)(RALINK_SYSTEM_CONTROL_BASE + 0x60) ++#define RALINK_PCIE_CLK_GEN *(unsigned int *)(RALINK_SYSTEM_CONTROL_BASE + 0x7c) ++#define RALINK_PCIE_CLK_GEN1 *(unsigned int *)(RALINK_SYSTEM_CONTROL_BASE + 0x80) ++#define PPLL_CFG1 *(unsigned int *)(RALINK_SYSTEM_CONTROL_BASE + 0x9c) ++#define PPLL_DRV *(unsigned int *)(RALINK_SYSTEM_CONTROL_BASE + 0xa0) ++//RALINK_SYSCFG1 bit ++#define RALINK_PCI_HOST_MODE_EN (1<<7) ++#define RALINK_PCIE_RC_MODE_EN (1<<8) ++//RALINK_RSTCTRL bit ++#define RALINK_PCIE_RST (1<<23) ++#define RALINK_PCI_RST (1<<24) ++//RALINK_CLKCFG1 bit ++#define RALINK_PCI_CLK_EN (1<<19) ++#define RALINK_PCIE_CLK_EN (1<<21) ++//RALINK_GPIOMODE bit ++#define PCI_SLOTx2 (1<<11) ++#define PCI_SLOTx1 (2<<11) ++//MTK PCIE PLL bit ++#define PDRV_SW_SET (1<<31) ++#define LC_CKDRVPD_ (1<<19) ++ ++#define MEMORY_BASE 0x0 ++int pcie_link_status = 0; ++ ++void __inline__ read_config(unsigned long bus, unsigned long dev, unsigned long func, unsigned long reg, unsigned long *val); ++void __inline__ write_config(unsigned long bus, unsigned long dev, unsigned long func, unsigned long reg, unsigned long val); ++ ++#define PCI_ACCESS_READ_1 0 ++#define PCI_ACCESS_READ_2 1 ++#define PCI_ACCESS_READ_4 2 ++#define PCI_ACCESS_WRITE_1 3 ++#define PCI_ACCESS_WRITE_2 4 ++#define PCI_ACCESS_WRITE_4 5 ++ ++static int config_access(unsigned char access_type, struct pci_bus *bus, ++ unsigned int devfn, unsigned int where, u32 * data) ++{ ++ unsigned int slot = PCI_SLOT(devfn); ++ u8 func = PCI_FUNC(devfn); ++ uint32_t address_reg, data_reg; ++ unsigned int address; ++ ++ address_reg = RALINK_PCI_CONFIG_ADDR; ++ data_reg = RALINK_PCI_CONFIG_DATA_VIRTUAL_REG; ++ ++ address = (((where&0xF00)>>8)<<24) |(bus->number << 16) | (slot << 11) | (func << 8) | (where & 0xfc) | 0x80000000; ++ MV_WRITE(address_reg, address); ++ ++ switch(access_type) { ++ case PCI_ACCESS_WRITE_1: ++ MV_WRITE_8(data_reg+(where&0x3), *data); ++ break; ++ case PCI_ACCESS_WRITE_2: ++ MV_WRITE_16(data_reg+(where&0x3), *data); ++ break; ++ case PCI_ACCESS_WRITE_4: ++ MV_WRITE(data_reg, *data); ++ break; ++ case PCI_ACCESS_READ_1: ++ MV_READ_8( data_reg+(where&0x3), data); ++ break; ++ case PCI_ACCESS_READ_2: ++ MV_READ_16(data_reg+(where&0x3), data); ++ break; ++ case PCI_ACCESS_READ_4: ++ MV_READ(data_reg, data); ++ break; ++ default: ++ printk("no specify access type\n"); ++ break; ++ } ++ return 0; ++} ++ ++static int ++read_config_byte(struct pci_bus *bus, unsigned int devfn, int where, u8 * val) ++{ ++ return config_access(PCI_ACCESS_READ_1, bus, devfn, (unsigned int)where, (u32 *)val); ++} ++ ++static int ++read_config_word(struct pci_bus *bus, unsigned int devfn, int where, u16 * val) ++{ ++ return config_access(PCI_ACCESS_READ_2, bus, devfn, (unsigned int)where, (u32 *)val); ++} ++ ++static int ++read_config_dword(struct pci_bus *bus, unsigned int devfn, int where, u32 * val) ++{ ++ return config_access(PCI_ACCESS_READ_4, bus, devfn, (unsigned int)where, (u32 *)val); ++} ++ ++static int ++write_config_byte(struct pci_bus *bus, unsigned int devfn, int where, u8 val) ++{ ++ if (config_access(PCI_ACCESS_WRITE_1, bus, devfn, (unsigned int)where, (u32 *)&val)) ++ return -1; ++ ++ return PCIBIOS_SUCCESSFUL; ++} ++ ++static int ++write_config_word(struct pci_bus *bus, unsigned int devfn, int where, u16 val) ++{ ++ if (config_access(PCI_ACCESS_WRITE_2, bus, devfn, where, (u32 *)&val)) ++ return -1; ++ ++ return PCIBIOS_SUCCESSFUL; ++} ++ ++static int ++write_config_dword(struct pci_bus *bus, unsigned int devfn, int where, u32 val) ++{ ++ if (config_access(PCI_ACCESS_WRITE_4, bus, devfn, where, &val)) ++ return -1; ++ ++ return PCIBIOS_SUCCESSFUL; ++} ++ ++ ++static int ++pci_config_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 * val) ++{ ++ switch (size) { ++ case 1: ++ return read_config_byte(bus, devfn, where, (u8 *) val); ++ case 2: ++ return read_config_word(bus, devfn, where, (u16 *) val); ++ default: ++ return read_config_dword(bus, devfn, where, val); ++ } ++} ++ ++static int ++pci_config_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val) ++{ ++ switch (size) { ++ case 1: ++ return write_config_byte(bus, devfn, where, (u8) val); ++ case 2: ++ return write_config_word(bus, devfn, where, (u16) val); ++ default: ++ return write_config_dword(bus, devfn, where, val); ++ } ++} ++ ++struct pci_ops rt2880_pci_ops= { ++ .read = pci_config_read, ++ .write = pci_config_write, ++}; ++ ++static struct resource rt2880_res_pci_mem1 = { ++ .name = "PCI MEM1", ++ .start = RALINK_PCI_MM_MAP_BASE, ++ .end = (u32)((RALINK_PCI_MM_MAP_BASE + (unsigned char *)0x0fffffff)), ++ .flags = IORESOURCE_MEM, ++}; ++static struct resource rt2880_res_pci_io1 = { ++ .name = "PCI I/O1", ++ .start = RALINK_PCI_IO_MAP_BASE, ++ .end = (u32)((RALINK_PCI_IO_MAP_BASE + (unsigned char *)0x0ffff)), ++ .flags = IORESOURCE_IO, ++}; ++ ++struct pci_controller rt2880_controller = { ++ .pci_ops = &rt2880_pci_ops, ++ .mem_resource = &rt2880_res_pci_mem1, ++ .io_resource = &rt2880_res_pci_io1, ++ .mem_offset = 0x00000000UL, ++ .io_offset = 0x00000000UL, ++ .io_map_base = 0xa0000000, ++}; ++ ++void __inline__ ++read_config(unsigned long bus, unsigned long dev, unsigned long func, unsigned long reg, unsigned long *val) ++{ ++ unsigned int address_reg, data_reg, address; ++ ++ address_reg = RALINK_PCI_CONFIG_ADDR; ++ data_reg = RALINK_PCI_CONFIG_DATA_VIRTUAL_REG; ++ address = (((reg & 0xF00)>>8)<<24) | (bus << 16) | (dev << 11) | (func << 8) | (reg & 0xfc) | 0x80000000 ; ++ MV_WRITE(address_reg, address); ++ MV_READ(data_reg, val); ++ return; ++} ++ ++void __inline__ ++write_config(unsigned long bus, unsigned long dev, unsigned long func, unsigned long reg, unsigned long val) ++{ ++ unsigned int address_reg, data_reg, address; ++ ++ address_reg = RALINK_PCI_CONFIG_ADDR; ++ data_reg = RALINK_PCI_CONFIG_DATA_VIRTUAL_REG; ++ address = (((reg & 0xF00)>>8)<<24) | (bus << 16) | (dev << 11) | (func << 8) | (reg & 0xfc) | 0x80000000 ; ++ MV_WRITE(address_reg, address); ++ MV_WRITE(data_reg, val); ++ return; ++} ++ ++ ++int __init ++pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) ++{ ++ u16 cmd; ++ u32 val; ++ int irq = 0; ++ ++ if ((dev->bus->number == 0) && (slot == 0)) { ++ write_config(0, 0, 0, PCI_BASE_ADDRESS_0, MEMORY_BASE); ++ read_config(0, 0, 0, PCI_BASE_ADDRESS_0, (unsigned long *)&val); ++ printk("BAR0 at slot 0 = %x\n", val); ++ printk("bus=0x%x, slot = 0x%x\n",dev->bus->number, slot); ++ } else if((dev->bus->number == 0) && (slot == 0x1)) { ++ write_config(0, 1, 0, PCI_BASE_ADDRESS_0, MEMORY_BASE); ++ read_config(0, 1, 0, PCI_BASE_ADDRESS_0, (unsigned long *)&val); ++ printk("BAR0 at slot 1 = %x\n", val); ++ printk("bus=0x%x, slot = 0x%x\n",dev->bus->number, slot); ++ } else if((dev->bus->number == 0) && (slot == 0x2)) { ++ write_config(0, 2, 0, PCI_BASE_ADDRESS_0, MEMORY_BASE); ++ read_config(0, 2, 0, PCI_BASE_ADDRESS_0, (unsigned long *)&val); ++ printk("BAR0 at slot 2 = %x\n", val); ++ printk("bus=0x%x, slot = 0x%x\n",dev->bus->number, slot); ++ } else if ((dev->bus->number == 1) && (slot == 0x0)) { ++ switch (pcie_link_status) { ++ case 2: ++ case 6: ++ irq = RALINK_INT_PCIE1; ++ break; ++ case 4: ++ irq = RALINK_INT_PCIE2; ++ break; ++ default: ++ irq = RALINK_INT_PCIE0; ++ } ++ printk("bus=0x%x, slot = 0x%x, irq=0x%x\n",dev->bus->number, slot, dev->irq); ++ } else if ((dev->bus->number == 2) && (slot == 0x0)) { ++ switch (pcie_link_status) { ++ case 5: ++ case 6: ++ irq = RALINK_INT_PCIE2; ++ break; ++ default: ++ irq = RALINK_INT_PCIE1; ++ } ++ printk("bus=0x%x, slot = 0x%x, irq=0x%x\n",dev->bus->number, slot, dev->irq); ++ } else if ((dev->bus->number == 2) && (slot == 0x1)) { ++ switch (pcie_link_status) { ++ case 5: ++ case 6: ++ irq = RALINK_INT_PCIE2; ++ break; ++ default: ++ irq = RALINK_INT_PCIE1; ++ } ++ printk("bus=0x%x, slot = 0x%x, irq=0x%x\n",dev->bus->number, slot, dev->irq); ++ } else if ((dev->bus->number ==3) && (slot == 0x0)) { ++ irq = RALINK_INT_PCIE2; ++ printk("bus=0x%x, slot = 0x%x, irq=0x%x\n",dev->bus->number, slot, dev->irq); ++ } else if ((dev->bus->number ==3) && (slot == 0x1)) { ++ irq = RALINK_INT_PCIE2; ++ printk("bus=0x%x, slot = 0x%x, irq=0x%x\n",dev->bus->number, slot, dev->irq); ++ } else if ((dev->bus->number ==3) && (slot == 0x2)) { ++ irq = RALINK_INT_PCIE2; ++ printk("bus=0x%x, slot = 0x%x, irq=0x%x\n",dev->bus->number, slot, dev->irq); ++ } else { ++ printk("bus=0x%x, slot = 0x%x\n",dev->bus->number, slot); ++ return 0; ++ } ++ ++ pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, 0x14); //configure cache line size 0x14 ++ pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0xFF); //configure latency timer 0x10 ++ pci_read_config_word(dev, PCI_COMMAND, &cmd); ++ cmd = cmd | PCI_COMMAND_MASTER | PCI_COMMAND_IO | PCI_COMMAND_MEMORY; ++ pci_write_config_word(dev, PCI_COMMAND, cmd); ++ pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq); ++ return irq; ++} ++ ++void ++set_pcie_phy(u32 *addr, int start_b, int bits, int val) ++{ ++// printk("0x%p:", addr); ++// printk(" %x", *addr); ++ *(unsigned int *)(addr) &= ~(((1< %x\n", *addr); ++} ++ ++void ++bypass_pipe_rst(void) ++{ ++#if defined (CONFIG_PCIE_PORT0) ++ /* PCIe Port 0 */ ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x02c), 12, 1, 0x01); // rg_pe1_pipe_rst_b ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x02c), 4, 1, 0x01); // rg_pe1_pipe_cmd_frc[4] ++#endif ++#if defined (CONFIG_PCIE_PORT1) ++ /* PCIe Port 1 */ ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x12c), 12, 1, 0x01); // rg_pe1_pipe_rst_b ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x12c), 4, 1, 0x01); // rg_pe1_pipe_cmd_frc[4] ++#endif ++#if defined (CONFIG_PCIE_PORT2) ++ /* PCIe Port 2 */ ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x02c), 12, 1, 0x01); // rg_pe1_pipe_rst_b ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x02c), 4, 1, 0x01); // rg_pe1_pipe_cmd_frc[4] ++#endif ++} ++ ++void ++set_phy_for_ssc(void) ++{ ++ unsigned long reg = (*(volatile u32 *)(RALINK_SYSCTL_BASE + 0x10)); ++ ++ reg = (reg >> 6) & 0x7; ++#if defined (CONFIG_PCIE_PORT0) || defined (CONFIG_PCIE_PORT1) ++ /* Set PCIe Port0 & Port1 PHY to disable SSC */ ++ /* Debug Xtal Type */ ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x400), 8, 1, 0x01); // rg_pe1_frc_h_xtal_type ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x400), 9, 2, 0x00); // rg_pe1_h_xtal_type ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x000), 4, 1, 0x01); // rg_pe1_frc_phy_en //Force Port 0 enable control ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x100), 4, 1, 0x01); // rg_pe1_frc_phy_en //Force Port 1 enable control ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x000), 5, 1, 0x00); // rg_pe1_phy_en //Port 0 disable ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x100), 5, 1, 0x00); // rg_pe1_phy_en //Port 1 disable ++ if(reg <= 5 && reg >= 3) { // 40MHz Xtal ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x490), 6, 2, 0x01); // RG_PE1_H_PLL_PREDIV //Pre-divider ratio (for host mode) ++ printk("***** Xtal 40MHz *****\n"); ++ } else { // 25MHz | 20MHz Xtal ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x490), 6, 2, 0x00); // RG_PE1_H_PLL_PREDIV //Pre-divider ratio (for host mode) ++ if (reg >= 6) { ++ printk("***** Xtal 25MHz *****\n"); ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x4bc), 4, 2, 0x01); // RG_PE1_H_PLL_FBKSEL //Feedback clock select ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x49c), 0,31, 0x18000000); // RG_PE1_H_LCDDS_PCW_NCPO //DDS NCPO PCW (for host mode) ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x4a4), 0,16, 0x18d); // RG_PE1_H_LCDDS_SSC_PRD //DDS SSC dither period control ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x4a8), 0,12, 0x4a); // RG_PE1_H_LCDDS_SSC_DELTA //DDS SSC dither amplitude control ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x4a8), 16,12, 0x4a); // RG_PE1_H_LCDDS_SSC_DELTA1 //DDS SSC dither amplitude control for initial ++ } else { ++ printk("***** Xtal 20MHz *****\n"); ++ } ++ } ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x4a0), 5, 1, 0x01); // RG_PE1_LCDDS_CLK_PH_INV //DDS clock inversion ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x490), 22, 2, 0x02); // RG_PE1_H_PLL_BC ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x490), 18, 4, 0x06); // RG_PE1_H_PLL_BP ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x490), 12, 4, 0x02); // RG_PE1_H_PLL_IR ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x490), 8, 4, 0x01); // RG_PE1_H_PLL_IC ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x4ac), 16, 3, 0x00); // RG_PE1_H_PLL_BR ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x490), 1, 3, 0x02); // RG_PE1_PLL_DIVEN ++ if(reg <= 5 && reg >= 3) { // 40MHz Xtal ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x414), 6, 2, 0x01); // rg_pe1_mstckdiv //value of da_pe1_mstckdiv when force mode enable ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x414), 5, 1, 0x01); // rg_pe1_frc_mstckdiv //force mode enable of da_pe1_mstckdiv ++ } ++ /* Enable PHY and disable force mode */ ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x000), 5, 1, 0x01); // rg_pe1_phy_en //Port 0 enable ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x100), 5, 1, 0x01); // rg_pe1_phy_en //Port 1 enable ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x000), 4, 1, 0x00); // rg_pe1_frc_phy_en //Force Port 0 disable control ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x100), 4, 1, 0x00); // rg_pe1_frc_phy_en //Force Port 1 disable control ++#endif ++#if defined (CONFIG_PCIE_PORT2) ++ /* Set PCIe Port2 PHY to disable SSC */ ++ /* Debug Xtal Type */ ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x400), 8, 1, 0x01); // rg_pe1_frc_h_xtal_type ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x400), 9, 2, 0x00); // rg_pe1_h_xtal_type ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x000), 4, 1, 0x01); // rg_pe1_frc_phy_en //Force Port 0 enable control ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x000), 5, 1, 0x00); // rg_pe1_phy_en //Port 0 disable ++ if(reg <= 5 && reg >= 3) { // 40MHz Xtal ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x490), 6, 2, 0x01); // RG_PE1_H_PLL_PREDIV //Pre-divider ratio (for host mode) ++ } else { // 25MHz | 20MHz Xtal ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x490), 6, 2, 0x00); // RG_PE1_H_PLL_PREDIV //Pre-divider ratio (for host mode) ++ if (reg >= 6) { // 25MHz Xtal ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x4bc), 4, 2, 0x01); // RG_PE1_H_PLL_FBKSEL //Feedback clock select ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x49c), 0,31, 0x18000000); // RG_PE1_H_LCDDS_PCW_NCPO //DDS NCPO PCW (for host mode) ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x4a4), 0,16, 0x18d); // RG_PE1_H_LCDDS_SSC_PRD //DDS SSC dither period control ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x4a8), 0,12, 0x4a); // RG_PE1_H_LCDDS_SSC_DELTA //DDS SSC dither amplitude control ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x4a8), 16,12, 0x4a); // RG_PE1_H_LCDDS_SSC_DELTA1 //DDS SSC dither amplitude control for initial ++ } ++ } ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x4a0), 5, 1, 0x01); // RG_PE1_LCDDS_CLK_PH_INV //DDS clock inversion ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x490), 22, 2, 0x02); // RG_PE1_H_PLL_BC ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x490), 18, 4, 0x06); // RG_PE1_H_PLL_BP ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x490), 12, 4, 0x02); // RG_PE1_H_PLL_IR ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x490), 8, 4, 0x01); // RG_PE1_H_PLL_IC ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x4ac), 16, 3, 0x00); // RG_PE1_H_PLL_BR ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x490), 1, 3, 0x02); // RG_PE1_PLL_DIVEN ++ if(reg <= 5 && reg >= 3) { // 40MHz Xtal ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x414), 6, 2, 0x01); // rg_pe1_mstckdiv //value of da_pe1_mstckdiv when force mode enable ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x414), 5, 1, 0x01); // rg_pe1_frc_mstckdiv //force mode enable of da_pe1_mstckdiv ++ } ++ /* Enable PHY and disable force mode */ ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x000), 5, 1, 0x01); // rg_pe1_phy_en //Port 0 enable ++ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x000), 4, 1, 0x00); // rg_pe1_frc_phy_en //Force Port 0 disable control ++#endif ++} ++ ++int init_rt2880pci(void) ++{ ++ unsigned long val = 0; ++ iomem_resource.start = 0; ++ iomem_resource.end= ~0; ++ ioport_resource.start= 0; ++ ioport_resource.end = ~0; ++ ++#if defined (CONFIG_PCIE_PORT0) ++ val = RALINK_PCIE0_RST; ++#endif ++#if defined (CONFIG_PCIE_PORT1) ++ val |= RALINK_PCIE1_RST; ++#endif ++#if defined (CONFIG_PCIE_PORT2) ++ val |= RALINK_PCIE2_RST; ++#endif ++ DEASSERT_SYSRST_PCIE(val); ++ printk("release PCIe RST: RALINK_RSTCTRL = %x\n", RALINK_RSTCTRL); ++ ++ bypass_pipe_rst(); ++ set_phy_for_ssc(); ++ ASSERT_SYSRST_PCIE(RALINK_PCIE0_RST | RALINK_PCIE1_RST | RALINK_PCIE2_RST); ++ printk("pull PCIe RST: RALINK_RSTCTRL = %x\n", RALINK_RSTCTRL); ++#if defined GPIO_PERST /* add GPIO control instead of PERST_N */ /*chhung*/ ++ *(unsigned int *)(0xbe000060) &= ~(0x3<<10 | 0x3<<3); ++ *(unsigned int *)(0xbe000060) |= 0x1<<10 | 0x1<<3; ++ mdelay(100); ++ *(unsigned int *)(0xbe000600) |= 0x1<<19 | 0x1<<8 | 0x1<<7; // use GPIO19/GPIO8/GPIO7 (PERST_N/UART_RXD3/UART_TXD3) ++ mdelay(100); ++ *(unsigned int *)(0xbe000620) &= ~(0x1<<19 | 0x1<<8 | 0x1<<7); // clear DATA ++ ++ mdelay(100); ++#else ++ *(unsigned int *)(0xbe000060) &= ~0x00000c00; ++#endif ++#if defined (CONFIG_PCIE_PORT0) ++ val = RALINK_PCIE0_RST; ++#endif ++#if defined (CONFIG_PCIE_PORT1) ++ val |= RALINK_PCIE1_RST; ++#endif ++#if defined (CONFIG_PCIE_PORT2) ++ val |= RALINK_PCIE2_RST; ++#endif ++ DEASSERT_SYSRST_PCIE(val); ++ printk("release PCIe RST: RALINK_RSTCTRL = %x\n", RALINK_RSTCTRL); ++#if defined (CONFIG_PCIE_PORT0) ++ read_config(0, 0, 0, 0x70c, &val); ++ val &= ~(0xff)<<8; ++ val |= 0x50<<8; ++ write_config(0, 0, 0, 0x70c, val); ++#endif ++#if defined (CONFIG_PCIE_PORT1) ++ read_config(0, 1, 0, 0x70c, &val); ++ val &= ~(0xff)<<8; ++ val |= 0x50<<8; ++ write_config(0, 1, 0, 0x70c, val); ++#endif ++#if defined (CONFIG_PCIE_PORT2) ++ read_config(0, 2, 0, 0x70c, &val); ++ val &= ~(0xff)<<8; ++ val |= 0x50<<8; ++ write_config(0, 2, 0, 0x70c, val); ++#endif ++ ++#if defined (CONFIG_PCIE_PORT0) ++ read_config(0, 0, 0, 0x70c, &val); ++ printk("Port 0 N_FTS = %x\n", (unsigned int)val); ++#endif ++#if defined (CONFIG_PCIE_PORT1) ++ read_config(0, 1, 0, 0x70c, &val); ++ printk("Port 1 N_FTS = %x\n", (unsigned int)val); ++#endif ++#if defined (CONFIG_PCIE_PORT2) ++ read_config(0, 2, 0, 0x70c, &val); ++ printk("Port 2 N_FTS = %x\n", (unsigned int)val); ++#endif ++ ++ RALINK_RSTCTRL = (RALINK_RSTCTRL | RALINK_PCIE_RST); ++ RALINK_SYSCFG1 &= ~(0x30); ++ RALINK_SYSCFG1 |= (2<<4); ++ RALINK_PCIE_CLK_GEN &= 0x7fffffff; ++ RALINK_PCIE_CLK_GEN1 &= 0x80ffffff; ++ RALINK_PCIE_CLK_GEN1 |= 0xa << 24; ++ RALINK_PCIE_CLK_GEN |= 0x80000000; ++ mdelay(50); ++ RALINK_RSTCTRL = (RALINK_RSTCTRL & ~RALINK_PCIE_RST); ++ ++ ++#if defined GPIO_PERST /* add GPIO control instead of PERST_N */ /*chhung*/ ++ *(unsigned int *)(0xbe000620) |= 0x1<<19 | 0x1<<8 | 0x1<<7; // set DATA ++ mdelay(100); ++#else ++ RALINK_PCI_PCICFG_ADDR &= ~(1<<1); //de-assert PERST ++#endif ++ mdelay(500); ++ ++ ++ mdelay(500); ++#if defined (CONFIG_PCIE_PORT0) ++ if(( RALINK_PCI0_STATUS & 0x1) == 0) ++ { ++ printk("PCIE0 no card, disable it(RST&CLK)\n"); ++ ASSERT_SYSRST_PCIE(RALINK_PCIE0_RST); ++ RALINK_CLKCFG1 = (RALINK_CLKCFG1 & ~RALINK_PCIE0_CLK_EN); ++ pcie_link_status &= ~(1<<0); ++ } else { ++ pcie_link_status |= 1<<0; ++ RALINK_PCI_PCIMSK_ADDR |= (1<<20); // enable pcie1 interrupt ++ } ++#endif ++#if defined (CONFIG_PCIE_PORT1) ++ if(( RALINK_PCI1_STATUS & 0x1) == 0) ++ { ++ printk("PCIE1 no card, disable it(RST&CLK)\n"); ++ ASSERT_SYSRST_PCIE(RALINK_PCIE1_RST); ++ RALINK_CLKCFG1 = (RALINK_CLKCFG1 & ~RALINK_PCIE1_CLK_EN); ++ pcie_link_status &= ~(1<<1); ++ } else { ++ pcie_link_status |= 1<<1; ++ RALINK_PCI_PCIMSK_ADDR |= (1<<21); // enable pcie1 interrupt ++ } ++#endif ++#if defined (CONFIG_PCIE_PORT2) ++ if (( RALINK_PCI2_STATUS & 0x1) == 0) { ++ printk("PCIE2 no card, disable it(RST&CLK)\n"); ++ ASSERT_SYSRST_PCIE(RALINK_PCIE2_RST); ++ RALINK_CLKCFG1 = (RALINK_CLKCFG1 & ~RALINK_PCIE2_CLK_EN); ++ pcie_link_status &= ~(1<<2); ++ } else { ++ pcie_link_status |= 1<<2; ++ RALINK_PCI_PCIMSK_ADDR |= (1<<22); // enable pcie2 interrupt ++ } ++#endif ++ if (pcie_link_status == 0) ++ return 0; ++ ++/* ++pcie(2/1/0) link status pcie2_num pcie1_num pcie0_num ++3'b000 x x x ++3'b001 x x 0 ++3'b010 x 0 x ++3'b011 x 1 0 ++3'b100 0 x x ++3'b101 1 x 0 ++3'b110 1 0 x ++3'b111 2 1 0 ++*/ ++ switch(pcie_link_status) { ++ case 2: ++ RALINK_PCI_PCICFG_ADDR &= ~0x00ff0000; ++ RALINK_PCI_PCICFG_ADDR |= 0x1 << 16; //port0 ++ RALINK_PCI_PCICFG_ADDR |= 0x0 << 20; //port1 ++ break; ++ case 4: ++ RALINK_PCI_PCICFG_ADDR &= ~0x0fff0000; ++ RALINK_PCI_PCICFG_ADDR |= 0x1 << 16; //port0 ++ RALINK_PCI_PCICFG_ADDR |= 0x2 << 20; //port1 ++ RALINK_PCI_PCICFG_ADDR |= 0x0 << 24; //port2 ++ break; ++ case 5: ++ RALINK_PCI_PCICFG_ADDR &= ~0x0fff0000; ++ RALINK_PCI_PCICFG_ADDR |= 0x0 << 16; //port0 ++ RALINK_PCI_PCICFG_ADDR |= 0x2 << 20; //port1 ++ RALINK_PCI_PCICFG_ADDR |= 0x1 << 24; //port2 ++ break; ++ case 6: ++ RALINK_PCI_PCICFG_ADDR &= ~0x0fff0000; ++ RALINK_PCI_PCICFG_ADDR |= 0x2 << 16; //port0 ++ RALINK_PCI_PCICFG_ADDR |= 0x0 << 20; //port1 ++ RALINK_PCI_PCICFG_ADDR |= 0x1 << 24; //port2 ++ break; ++ } ++ printk(" -> %x\n", RALINK_PCI_PCICFG_ADDR); ++ //printk(" RALINK_PCI_ARBCTL = %x\n", RALINK_PCI_ARBCTL); ++ ++/* ++ ioport_resource.start = rt2880_res_pci_io1.start; ++ ioport_resource.end = rt2880_res_pci_io1.end; ++*/ ++ ++ RALINK_PCI_MEMBASE = 0xffffffff; //RALINK_PCI_MM_MAP_BASE; ++ RALINK_PCI_IOBASE = RALINK_PCI_IO_MAP_BASE; ++ ++#if defined (CONFIG_PCIE_PORT0) ++ //PCIe0 ++ if((pcie_link_status & 0x1) != 0) { ++ RALINK_PCI0_BAR0SETUP_ADDR = 0x7FFF0001; //open 7FFF:2G; ENABLE ++ RALINK_PCI0_IMBASEBAR0_ADDR = MEMORY_BASE; ++ RALINK_PCI0_CLASS = 0x06040001; ++ printk("PCIE0 enabled\n"); ++ } ++#endif ++#if defined (CONFIG_PCIE_PORT1) ++ //PCIe1 ++ if ((pcie_link_status & 0x2) != 0) { ++ RALINK_PCI1_BAR0SETUP_ADDR = 0x7FFF0001; //open 7FFF:2G; ENABLE ++ RALINK_PCI1_IMBASEBAR0_ADDR = MEMORY_BASE; ++ RALINK_PCI1_CLASS = 0x06040001; ++ printk("PCIE1 enabled\n"); ++ } ++#endif ++#if defined (CONFIG_PCIE_PORT2) ++ //PCIe2 ++ if ((pcie_link_status & 0x4) != 0) { ++ RALINK_PCI2_BAR0SETUP_ADDR = 0x7FFF0001; //open 7FFF:2G; ENABLE ++ RALINK_PCI2_IMBASEBAR0_ADDR = MEMORY_BASE; ++ RALINK_PCI2_CLASS = 0x06040001; ++ printk("PCIE2 enabled\n"); ++ } ++#endif ++ ++ ++ switch(pcie_link_status) { ++ case 7: ++ read_config(0, 2, 0, 0x4, &val); ++ write_config(0, 2, 0, 0x4, val|0x4); ++ // write_config(0, 1, 0, 0x4, val|0x7); ++ case 3: ++ case 5: ++ case 6: ++ read_config(0, 1, 0, 0x4, &val); ++ write_config(0, 1, 0, 0x4, val|0x4); ++ // write_config(0, 1, 0, 0x4, val|0x7); ++ default: ++ read_config(0, 0, 0, 0x4, &val); ++ write_config(0, 0, 0, 0x4, val|0x4); //bus master enable ++ // write_config(0, 0, 0, 0x4, val|0x7); //bus master enable ++ } ++ register_pci_controller(&rt2880_controller); ++ return 0; ++ ++} ++arch_initcall(init_rt2880pci); ++ ++int pcibios_plat_dev_init(struct pci_dev *dev) ++{ ++ return 0; ++} diff --git a/target/linux/ramips/patches-3.10/0211-watchdog-add-MT7621-support.patch b/target/linux/ramips/patches-3.10/0211-watchdog-add-MT7621-support.patch new file mode 100644 index 0000000000..dde4a35aef --- /dev/null +++ b/target/linux/ramips/patches-3.10/0211-watchdog-add-MT7621-support.patch @@ -0,0 +1,233 @@ +From 158f2deb6349046ee4406578a5d3146ce9870cb3 Mon Sep 17 00:00:00 2001 +From: John Crispin +Date: Sun, 16 Mar 2014 05:24:42 +0000 +Subject: [PATCH 211/215] watchdog: add MT7621 support + +Signed-off-by: John Crispin +--- + drivers/watchdog/Kconfig | 7 ++ + drivers/watchdog/Makefile | 1 + + drivers/watchdog/mt7621_wdt.c | 185 +++++++++++++++++++++++++++++++++++++++++ + 3 files changed, 193 insertions(+) + create mode 100644 drivers/watchdog/mt7621_wdt.c + +Index: linux-3.10.32/drivers/watchdog/Kconfig +=================================================================== +--- linux-3.10.32.orig/drivers/watchdog/Kconfig 2014-03-18 11:00:30.629639835 +0000 ++++ linux-3.10.32/drivers/watchdog/Kconfig 2014-03-18 11:02:35.141634769 +0000 +@@ -1120,6 +1120,13 @@ + help + Hardware driver for the Ralink SoC Watchdog Timer. + ++config MT7621_WDT ++ tristate "Mediatek SoC watchdog" ++ select WATCHDOG_CORE ++ depends on SOC_MT7621 ++ help ++ Hardware driver for the Ralink SoC Watchdog Timer. ++ + # PARISC Architecture + + # POWERPC Architecture +Index: linux-3.10.32/drivers/watchdog/Makefile +=================================================================== +--- linux-3.10.32.orig/drivers/watchdog/Makefile 2014-03-18 11:00:30.629639835 +0000 ++++ linux-3.10.32/drivers/watchdog/Makefile 2014-03-18 11:00:31.317639807 +0000 +@@ -136,6 +136,7 @@ + octeon-wdt-y := octeon-wdt-main.o octeon-wdt-nmi.o + obj-$(CONFIG_LANTIQ_WDT) += lantiq_wdt.o + obj-$(CONFIG_RALINK_WDT) += rt2880_wdt.o ++obj-$(CONFIG_MT7621_WDT) += mt7621_wdt.o + + # PARISC Architecture + +Index: linux-3.10.32/drivers/watchdog/mt7621_wdt.c +=================================================================== +--- /dev/null 1970-01-01 00:00:00.000000000 +0000 ++++ linux-3.10.32/drivers/watchdog/mt7621_wdt.c 2014-03-18 11:00:31.317639807 +0000 +@@ -0,0 +1,185 @@ ++/* ++ * Ralink RT288x/RT3xxx/MT76xx built-in hardware watchdog timer ++ * ++ * Copyright (C) 2011 Gabor Juhos ++ * Copyright (C) 2013 John Crispin ++ * ++ * This driver was based on: drivers/watchdog/softdog.c ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 as published ++ * by the Free Software Foundation. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++ ++#define SYSC_RSTSTAT 0x38 ++#define WDT_RST_CAUSE BIT(1) ++ ++#define RALINK_WDT_TIMEOUT 30 ++ ++#define TIMER_REG_TMRSTAT 0x00 ++#define TIMER_REG_TMR1LOAD 0x24 ++#define TIMER_REG_TMR1CTL 0x20 ++ ++#define TMR1CTL_ENABLE BIT(7) ++#define TMR1CTL_RESTART BIT(9) ++ ++static void __iomem *mt762x_wdt_base; ++ ++static bool nowayout = WATCHDOG_NOWAYOUT; ++module_param(nowayout, bool, 0); ++MODULE_PARM_DESC(nowayout, ++ "Watchdog cannot be stopped once started (default=" ++ __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); ++ ++static inline void rt_wdt_w32(unsigned reg, u32 val) ++{ ++ iowrite32(val, mt762x_wdt_base + reg); ++} ++ ++static inline u32 rt_wdt_r32(unsigned reg) ++{ ++ return ioread32(mt762x_wdt_base + reg); ++} ++ ++static int mt762x_wdt_ping(struct watchdog_device *w) ++{ ++ rt_wdt_w32(TIMER_REG_TMRSTAT, TMR1CTL_RESTART); ++ ++ return 0; ++} ++ ++static int mt762x_wdt_set_timeout(struct watchdog_device *w, unsigned int t) ++{ ++ w->timeout = t; ++ rt_wdt_w32(TIMER_REG_TMR1LOAD, t * 1000); ++ mt762x_wdt_ping(w); ++ ++ return 0; ++} ++ ++static int mt762x_wdt_start(struct watchdog_device *w) ++{ ++ u32 t; ++ ++ rt_wdt_w32(TIMER_REG_TMR1CTL, 1000 << 16); ++ mt762x_wdt_set_timeout(w, w->timeout); ++ ++ t = rt_wdt_r32(TIMER_REG_TMR1CTL); ++ t |= TMR1CTL_ENABLE; ++ rt_wdt_w32(TIMER_REG_TMR1CTL, t); ++ ++ return 0; ++} ++ ++static int mt762x_wdt_stop(struct watchdog_device *w) ++{ ++ u32 t; ++ ++ mt762x_wdt_ping(w); ++ ++ t = rt_wdt_r32(TIMER_REG_TMR1CTL); ++ t &= ~TMR1CTL_ENABLE; ++ rt_wdt_w32(TIMER_REG_TMR1CTL, t); ++ ++ return 0; ++} ++ ++static int mt762x_wdt_bootcause(void) ++{ ++ if (rt_sysc_r32(SYSC_RSTSTAT) & WDT_RST_CAUSE) ++ return WDIOF_CARDRESET; ++ ++ return 0; ++} ++ ++static struct watchdog_info mt762x_wdt_info = { ++ .identity = "Mediatek Watchdog", ++ .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE, ++}; ++ ++static struct watchdog_ops mt762x_wdt_ops = { ++ .owner = THIS_MODULE, ++ .start = mt762x_wdt_start, ++ .stop = mt762x_wdt_stop, ++ .ping = mt762x_wdt_ping, ++ .set_timeout = mt762x_wdt_set_timeout, ++}; ++ ++static struct watchdog_device mt762x_wdt_dev = { ++ .info = &mt762x_wdt_info, ++ .ops = &mt762x_wdt_ops, ++ .min_timeout = 1, ++}; ++ ++static int mt762x_wdt_probe(struct platform_device *pdev) ++{ ++ struct resource *res; ++ int ret; ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ mt762x_wdt_base = devm_request_and_ioremap(&pdev->dev, res); ++ if (IS_ERR(mt762x_wdt_base)) ++ return PTR_ERR(mt762x_wdt_base); ++ ++ device_reset(&pdev->dev); ++ ++ mt762x_wdt_dev.dev = &pdev->dev; ++ mt762x_wdt_dev.bootstatus = mt762x_wdt_bootcause(); ++ mt762x_wdt_dev.max_timeout = (0xfffful / 1000); ++ mt762x_wdt_dev.timeout = mt762x_wdt_dev.max_timeout; ++ ++ watchdog_set_nowayout(&mt762x_wdt_dev, nowayout); ++ ++ ret = watchdog_register_device(&mt762x_wdt_dev); ++ if (!ret) ++ dev_info(&pdev->dev, "Initialized\n"); ++ ++ return 0; ++} ++ ++static int mt762x_wdt_remove(struct platform_device *pdev) ++{ ++ watchdog_unregister_device(&mt762x_wdt_dev); ++ ++ return 0; ++} ++ ++static void mt762x_wdt_shutdown(struct platform_device *pdev) ++{ ++ mt762x_wdt_stop(&mt762x_wdt_dev); ++} ++ ++static const struct of_device_id mt762x_wdt_match[] = { ++ { .compatible = "mtk,mt7621-wdt" }, ++ {}, ++}; ++MODULE_DEVICE_TABLE(of, mt762x_wdt_match); ++ ++static struct platform_driver mt762x_wdt_driver = { ++ .probe = mt762x_wdt_probe, ++ .remove = mt762x_wdt_remove, ++ .shutdown = mt762x_wdt_shutdown, ++ .driver = { ++ .name = KBUILD_MODNAME, ++ .owner = THIS_MODULE, ++ .of_match_table = mt762x_wdt_match, ++ }, ++}; ++ ++module_platform_driver(mt762x_wdt_driver); ++ ++MODULE_DESCRIPTION("MediaTek MT762x hardware watchdog driver"); ++MODULE_AUTHOR("John Crispin +Date: Sun, 16 Mar 2014 05:26:34 +0000 +Subject: [PATCH 212/215] GPIO: ralink: add mt7621 gpio controller + +Signed-off-by: John Crispin +--- + arch/mips/Kconfig | 5 +- + drivers/gpio/Kconfig | 6 ++ + drivers/gpio/Makefile | 1 + + drivers/gpio/gpio-mt7621.c | 183 ++++++++++++++++++++++++++++++++++++++++++++ + 4 files changed, 194 insertions(+), 1 deletion(-) + create mode 100644 drivers/gpio/gpio-mt7621.c + +Index: linux-3.10.32/arch/mips/Kconfig +=================================================================== +--- linux-3.10.32.orig/arch/mips/Kconfig 2014-03-18 11:00:30.945639822 +0000 ++++ linux-3.10.32/arch/mips/Kconfig 2014-03-18 11:00:31.325639806 +0000 +@@ -448,7 +448,10 @@ + select ARCH_REQUIRE_GPIOLIB + select PINCTRL + select PINCTRL_RT2880 +- ++ select ARCH_HAS_RESET_CONTROLLER ++ select RESET_CONTROLLER ++ select ARCH_REQUIRE_GPIOLIB ++ + config SGI_IP22 + bool "SGI IP22 (Indy/Indigo2)" + select FW_ARC +Index: linux-3.10.32/drivers/gpio/Kconfig +=================================================================== +--- linux-3.10.32.orig/drivers/gpio/Kconfig 2014-03-18 11:00:30.653639834 +0000 ++++ linux-3.10.32/drivers/gpio/Kconfig 2014-03-18 11:02:01.901636126 +0000 +@@ -710,6 +710,12 @@ + Enable support for GPIO on intel MSIC controllers found in + intel MID devices + ++config GPIO_MT7621 ++ bool "Mediatek GPIO Support" ++ depends on SOC_MT7621 ++ help ++ Say yes here to support the Mediatek SoC GPIO device ++ + comment "USB GPIO expanders:" + + config GPIO_VIPERBOARD +Index: linux-3.10.32/drivers/gpio/Makefile +=================================================================== +--- linux-3.10.32.orig/drivers/gpio/Makefile 2014-03-18 11:00:30.653639834 +0000 ++++ linux-3.10.32/drivers/gpio/Makefile 2014-03-18 11:00:31.325639806 +0000 +@@ -88,3 +88,4 @@ + obj-$(CONFIG_GPIO_WM8350) += gpio-wm8350.o + obj-$(CONFIG_GPIO_WM8994) += gpio-wm8994.o + obj-$(CONFIG_GPIO_XILINX) += gpio-xilinx.o ++obj-$(CONFIG_GPIO_MT7621) += gpio-mt7621.o +Index: linux-3.10.32/drivers/gpio/gpio-mt7621.c +=================================================================== +--- /dev/null 1970-01-01 00:00:00.000000000 +0000 ++++ linux-3.10.32/drivers/gpio/gpio-mt7621.c 2014-03-18 11:00:31.325639806 +0000 +@@ -0,0 +1,183 @@ ++/* ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License version 2 as published ++ * by the Free Software Foundation. ++ * ++ * Copyright (C) 2009-2011 Gabor Juhos ++ * Copyright (C) 2013 John Crispin ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define MTK_BANK_WIDTH 32 ++ ++enum mediatek_gpio_reg { ++ GPIO_REG_CTRL = 0, ++ GPIO_REG_POL, ++ GPIO_REG_DATA, ++ GPIO_REG_DSET, ++ GPIO_REG_DCLR, ++}; ++ ++static void __iomem *mtk_gc_membase; ++ ++struct mtk_gc { ++ struct gpio_chip chip; ++ spinlock_t lock; ++ int bank; ++}; ++ ++int ++gpio_to_irq(unsigned gpio) ++{ ++ return -1; ++} ++ ++static inline struct mtk_gc ++*to_mediatek_gpio(struct gpio_chip *chip) ++{ ++ struct mtk_gc *mgc; ++ ++ mgc = container_of(chip, struct mtk_gc, chip); ++ ++ return mgc; ++} ++ ++static inline void ++mtk_gpio_w32(struct mtk_gc *rg, u8 reg, u32 val) ++{ ++ iowrite32(val, mtk_gc_membase + (reg * 0x10) + (rg->bank * 0x4)); ++} ++ ++static inline u32 ++mtk_gpio_r32(struct mtk_gc *rg, u8 reg) ++{ ++ return ioread32(mtk_gc_membase + (reg * 0x10) + (rg->bank * 0x4)); ++} ++ ++static void ++mediatek_gpio_set(struct gpio_chip *chip, unsigned offset, int value) ++{ ++ struct mtk_gc *rg = to_mediatek_gpio(chip); ++ ++ mtk_gpio_w32(rg, (value) ? GPIO_REG_DSET : GPIO_REG_DCLR, BIT(offset)); ++} ++ ++static int ++mediatek_gpio_get(struct gpio_chip *chip, unsigned offset) ++{ ++ struct mtk_gc *rg = to_mediatek_gpio(chip); ++ ++ return !!(mtk_gpio_r32(rg, GPIO_REG_DATA) & BIT(offset)); ++} ++ ++static int ++mediatek_gpio_direction_input(struct gpio_chip *chip, unsigned offset) ++{ ++ struct mtk_gc *rg = to_mediatek_gpio(chip); ++ unsigned long flags; ++ u32 t; ++ ++ spin_lock_irqsave(&rg->lock, flags); ++ t = mtk_gpio_r32(rg, GPIO_REG_CTRL); ++ t &= ~BIT(offset); ++ mtk_gpio_w32(rg, GPIO_REG_CTRL, t); ++ spin_unlock_irqrestore(&rg->lock, flags); ++ ++ return 0; ++} ++ ++static int ++mediatek_gpio_direction_output(struct gpio_chip *chip, ++ unsigned offset, int value) ++{ ++ struct mtk_gc *rg = to_mediatek_gpio(chip); ++ unsigned long flags; ++ u32 t; ++ ++ spin_lock_irqsave(&rg->lock, flags); ++ t = mtk_gpio_r32(rg, GPIO_REG_CTRL); ++ t |= BIT(offset); ++ mtk_gpio_w32(rg, GPIO_REG_CTRL, t); ++ mediatek_gpio_set(chip, offset, value); ++ spin_unlock_irqrestore(&rg->lock, flags); ++ ++ return 0; ++} ++ ++static int ++mediatek_gpio_bank_probe(struct platform_device *pdev, struct device_node *bank) ++{ ++ const __be32 *id = of_get_property(bank, "reg", NULL); ++ struct mtk_gc *rg = devm_kzalloc(&pdev->dev, ++ sizeof(struct mtk_gc), GFP_KERNEL); ++ if (!rg || !id) ++ return -ENOMEM; ++ ++ spin_lock_init(&rg->lock); ++ ++ rg->chip.dev = &pdev->dev; ++ rg->chip.label = dev_name(&pdev->dev); ++ rg->chip.of_node = bank; ++ rg->chip.base = MTK_BANK_WIDTH * be32_to_cpu(*id); ++ rg->chip.ngpio = MTK_BANK_WIDTH; ++ rg->chip.direction_input = mediatek_gpio_direction_input; ++ rg->chip.direction_output = mediatek_gpio_direction_output; ++ rg->chip.get = mediatek_gpio_get; ++ rg->chip.set = mediatek_gpio_set; ++ ++ /* set polarity to low for all gpios */ ++ mtk_gpio_w32(rg, GPIO_REG_POL, 0); ++ ++ dev_info(&pdev->dev, "registering %d gpios\n", rg->chip.ngpio); ++ ++ return gpiochip_add(&rg->chip); ++} ++ ++static int ++mediatek_gpio_probe(struct platform_device *pdev) ++{ ++ struct device_node *bank, *np = pdev->dev.of_node; ++ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ ++ mtk_gc_membase = devm_request_and_ioremap(&pdev->dev, res); ++ if (IS_ERR(mtk_gc_membase)) ++ return PTR_ERR(mtk_gc_membase); ++ ++ for_each_child_of_node(np, bank) ++ if (of_device_is_compatible(bank, "mtk,mt7621-gpio-bank")) ++ mediatek_gpio_bank_probe(pdev, bank); ++ ++ return 0; ++} ++ ++static const struct of_device_id mediatek_gpio_match[] = { ++ { .compatible = "mtk,mt7621-gpio" }, ++ {}, ++}; ++MODULE_DEVICE_TABLE(of, mediatek_gpio_match); ++ ++static struct platform_driver mediatek_gpio_driver = { ++ .probe = mediatek_gpio_probe, ++ .driver = { ++ .name = "mt7621_gpio", ++ .owner = THIS_MODULE, ++ .of_match_table = mediatek_gpio_match, ++ }, ++}; ++ ++static int __init ++mediatek_gpio_init(void) ++{ ++ return platform_driver_register(&mediatek_gpio_driver); ++} ++ ++subsys_initcall(mediatek_gpio_init); diff --git a/target/linux/ramips/patches-3.10/0213-MTD-add-mt7621-nand-support.patch b/target/linux/ramips/patches-3.10/0213-MTD-add-mt7621-nand-support.patch new file mode 100644 index 0000000000..6e8aec9fc6 --- /dev/null +++ b/target/linux/ramips/patches-3.10/0213-MTD-add-mt7621-nand-support.patch @@ -0,0 +1,4441 @@ +From 3598d232eb3456fa7aca78e6eeea64210b49c1fc Mon Sep 17 00:00:00 2001 +From: John Crispin +Date: Fri, 24 Jan 2014 17:01:21 +0100 +Subject: [PATCH 213/215] MTD: add mt7621 nand support + +Signed-off-by: John Crispin +--- + drivers/mtd/nand/Kconfig | 6 + + drivers/mtd/nand/Makefile | 1 + + drivers/mtd/nand/bmt.c | 750 ++++++++++++ + drivers/mtd/nand/bmt.h | 80 ++ + drivers/mtd/nand/dev-nand.c | 63 + + drivers/mtd/nand/mt6575_typedefs.h | 340 ++++++ + drivers/mtd/nand/mtk_nand.c | 2304 +++++++++++++++++++++++++++++++++++ + drivers/mtd/nand/mtk_nand.h | 452 +++++++ + drivers/mtd/nand/nand_base.c | 6 +- + drivers/mtd/nand/nand_bbt.c | 41 + + drivers/mtd/nand/nand_def.h | 123 ++ + drivers/mtd/nand/nand_device_list.h | 55 + + drivers/mtd/nand/partition.h | 115 ++ + 13 files changed, 4333 insertions(+), 3 deletions(-) + create mode 100644 drivers/mtd/nand/bmt.c + create mode 100644 drivers/mtd/nand/bmt.h + create mode 100644 drivers/mtd/nand/dev-nand.c + create mode 100644 drivers/mtd/nand/mt6575_typedefs.h + create mode 100644 drivers/mtd/nand/mtk_nand.c + create mode 100644 drivers/mtd/nand/mtk_nand.h + create mode 100644 drivers/mtd/nand/nand_def.h + create mode 100644 drivers/mtd/nand/nand_device_list.h + create mode 100644 drivers/mtd/nand/partition.h + +--- a/drivers/mtd/nand/Kconfig ++++ b/drivers/mtd/nand/Kconfig +@@ -544,4 +544,10 @@ config MTD_NAND_XWAY + Enables support for NAND Flash chips on Lantiq XWAY SoCs. NAND is attached + to the External Bus Unit (EBU). + ++config MTK_MTD_NAND ++ tristate "Support for MTK SoC NAND controller" ++ depends on SOC_MT7621 ++ select MTD_NAND_IDS ++ select MTD_NAND_ECC ++ + endif # MTD_NAND +--- a/drivers/mtd/nand/Makefile ++++ b/drivers/mtd/nand/Makefile +@@ -50,5 +50,6 @@ obj-$(CONFIG_MTD_NAND_JZ4740) += jz4740 + obj-$(CONFIG_MTD_NAND_GPMI_NAND) += gpmi-nand/ + obj-$(CONFIG_MTD_NAND_XWAY) += xway_nand.o + obj-$(CONFIG_MTD_NAND_BCM47XXNFLASH) += bcm47xxnflash/ ++obj-$(CONFIG_MTK_MTD_NAND) += mtk_nand.o bmt.o + + nand-objs := nand_base.o nand_bbt.o +--- /dev/null ++++ b/drivers/mtd/nand/bmt.c +@@ -0,0 +1,750 @@ ++#include "bmt.h" ++ ++typedef struct ++{ ++ char signature[3]; ++ u8 version; ++ u8 bad_count; // bad block count in pool ++ u8 mapped_count; // mapped block count in pool ++ u8 checksum; ++ u8 reseverd[13]; ++} phys_bmt_header; ++ ++typedef struct ++{ ++ phys_bmt_header header; ++ bmt_entry table[MAX_BMT_SIZE]; ++} phys_bmt_struct; ++ ++typedef struct ++{ ++ char signature[3]; ++} bmt_oob_data; ++ ++static char MAIN_SIGNATURE[] = "BMT"; ++static char OOB_SIGNATURE[] = "bmt"; ++#define SIGNATURE_SIZE (3) ++ ++#define MAX_DAT_SIZE 0x1000 ++#define MAX_OOB_SIZE 0x80 ++ ++static struct mtd_info *mtd_bmt; ++static struct nand_chip *nand_chip_bmt; ++#define BLOCK_SIZE_BMT (1 << nand_chip_bmt->phys_erase_shift) ++#define PAGE_SIZE_BMT (1 << nand_chip_bmt->page_shift) ++ ++#define OFFSET(block) ((block) * BLOCK_SIZE_BMT) ++#define PAGE_ADDR(block) ((block) * BLOCK_SIZE_BMT / PAGE_SIZE_BMT) ++ ++/********************************************************************* ++* Flash is splited into 2 parts, system part is for normal system * ++* system usage, size is system_block_count, another is replace pool * ++* +-------------------------------------------------+ * ++* | system_block_count | bmt_block_count | * ++* +-------------------------------------------------+ * ++*********************************************************************/ ++static u32 total_block_count; // block number in flash ++static u32 system_block_count; ++static int bmt_block_count; // bmt table size ++// static int bmt_count; // block used in bmt ++static int page_per_block; // page per count ++ ++static u32 bmt_block_index; // bmt block index ++static bmt_struct bmt; // dynamic created global bmt table ++ ++static u8 dat_buf[MAX_DAT_SIZE]; ++static u8 oob_buf[MAX_OOB_SIZE]; ++static bool pool_erased; ++ ++/*************************************************************** ++* ++* Interface adaptor for preloader/uboot/kernel ++* These interfaces operate on physical address, read/write ++* physical data. ++* ++***************************************************************/ ++int nand_read_page_bmt(u32 page, u8 * dat, u8 * oob) ++{ ++ return mtk_nand_exec_read_page(mtd_bmt, page, PAGE_SIZE_BMT, dat, oob); ++} ++ ++bool nand_block_bad_bmt(u32 offset) ++{ ++ return mtk_nand_block_bad_hw(mtd_bmt, offset); ++} ++ ++bool nand_erase_bmt(u32 offset) ++{ ++ int status; ++ if (offset < 0x20000) ++ { ++ MSG(INIT, "erase offset: 0x%x\n", offset); ++ } ++ ++ status = mtk_nand_erase_hw(mtd_bmt, offset / PAGE_SIZE_BMT); // as nand_chip structure doesn't have a erase function defined ++ if (status & NAND_STATUS_FAIL) ++ return false; ++ else ++ return true; ++} ++ ++int mark_block_bad_bmt(u32 offset) ++{ ++ return mtk_nand_block_markbad_hw(mtd_bmt, offset); //mark_block_bad_hw(offset); ++} ++ ++bool nand_write_page_bmt(u32 page, u8 * dat, u8 * oob) ++{ ++ if (mtk_nand_exec_write_page(mtd_bmt, page, PAGE_SIZE_BMT, dat, oob)) ++ return false; ++ else ++ return true; ++} ++ ++/*************************************************************** ++* * ++* static internal function * ++* * ++***************************************************************/ ++static void dump_bmt_info(bmt_struct * bmt) ++{ ++ int i; ++ ++ MSG(INIT, "BMT v%d. total %d mapping:\n", bmt->version, bmt->mapped_count); ++ for (i = 0; i < bmt->mapped_count; i++) ++ { ++ MSG(INIT, "\t0x%x -> 0x%x\n", bmt->table[i].bad_index, bmt->table[i].mapped_index); ++ } ++} ++ ++static bool match_bmt_signature(u8 * dat, u8 * oob) ++{ ++ ++ if (memcmp(dat + MAIN_SIGNATURE_OFFSET, MAIN_SIGNATURE, SIGNATURE_SIZE)) ++ { ++ return false; ++ } ++ ++ if (memcmp(oob + OOB_SIGNATURE_OFFSET, OOB_SIGNATURE, SIGNATURE_SIZE)) ++ { ++ MSG(INIT, "main signature match, oob signature doesn't match, but ignore\n"); ++ } ++ return true; ++} ++ ++static u8 cal_bmt_checksum(phys_bmt_struct * phys_table, int bmt_size) ++{ ++ int i; ++ u8 checksum = 0; ++ u8 *dat = (u8 *) phys_table; ++ ++ checksum += phys_table->header.version; ++ checksum += phys_table->header.mapped_count; ++ ++ dat += sizeof(phys_bmt_header); ++ for (i = 0; i < bmt_size * sizeof(bmt_entry); i++) ++ { ++ checksum += dat[i]; ++ } ++ ++ return checksum; ++} ++ ++ ++static int is_block_mapped(int index) ++{ ++ int i; ++ for (i = 0; i < bmt.mapped_count; i++) ++ { ++ if (index == bmt.table[i].mapped_index) ++ return i; ++ } ++ return -1; ++} ++ ++static bool is_page_used(u8 * dat, u8 * oob) ++{ ++ return ((oob[OOB_INDEX_OFFSET] != 0xFF) || (oob[OOB_INDEX_OFFSET + 1] != 0xFF)); ++} ++ ++static bool valid_bmt_data(phys_bmt_struct * phys_table) ++{ ++ int i; ++ u8 checksum = cal_bmt_checksum(phys_table, bmt_block_count); ++ ++ // checksum correct? ++ if (phys_table->header.checksum != checksum) ++ { ++ MSG(INIT, "BMT Data checksum error: %x %x\n", phys_table->header.checksum, checksum); ++ return false; ++ } ++ ++ MSG(INIT, "BMT Checksum is: 0x%x\n", phys_table->header.checksum); ++ ++ // block index correct? ++ for (i = 0; i < phys_table->header.mapped_count; i++) ++ { ++ if (phys_table->table[i].bad_index >= total_block_count || phys_table->table[i].mapped_index >= total_block_count || phys_table->table[i].mapped_index < system_block_count) ++ { ++ MSG(INIT, "index error: bad_index: %d, mapped_index: %d\n", phys_table->table[i].bad_index, phys_table->table[i].mapped_index); ++ return false; ++ } ++ } ++ ++ // pass check, valid bmt. ++ MSG(INIT, "Valid BMT, version v%d\n", phys_table->header.version); ++ return true; ++} ++ ++static void fill_nand_bmt_buffer(bmt_struct * bmt, u8 * dat, u8 * oob) ++{ ++ phys_bmt_struct phys_bmt; ++ ++ dump_bmt_info(bmt); ++ ++ // fill phys_bmt_struct structure with bmt_struct ++ memset(&phys_bmt, 0xFF, sizeof(phys_bmt)); ++ ++ memcpy(phys_bmt.header.signature, MAIN_SIGNATURE, SIGNATURE_SIZE); ++ phys_bmt.header.version = BMT_VERSION; ++ // phys_bmt.header.bad_count = bmt->bad_count; ++ phys_bmt.header.mapped_count = bmt->mapped_count; ++ memcpy(phys_bmt.table, bmt->table, sizeof(bmt_entry) * bmt_block_count); ++ ++ phys_bmt.header.checksum = cal_bmt_checksum(&phys_bmt, bmt_block_count); ++ ++ memcpy(dat + MAIN_SIGNATURE_OFFSET, &phys_bmt, sizeof(phys_bmt)); ++ memcpy(oob + OOB_SIGNATURE_OFFSET, OOB_SIGNATURE, SIGNATURE_SIZE); ++} ++ ++// return valid index if found BMT, else return 0 ++static int load_bmt_data(int start, int pool_size) ++{ ++ int bmt_index = start + pool_size - 1; // find from the end ++ phys_bmt_struct phys_table; ++ int i; ++ ++ MSG(INIT, "[%s]: begin to search BMT from block 0x%x\n", __FUNCTION__, bmt_index); ++ ++ for (bmt_index = start + pool_size - 1; bmt_index >= start; bmt_index--) ++ { ++ if (nand_block_bad_bmt(OFFSET(bmt_index))) ++ { ++ MSG(INIT, "Skip bad block: %d\n", bmt_index); ++ continue; ++ } ++ ++ if (!nand_read_page_bmt(PAGE_ADDR(bmt_index), dat_buf, oob_buf)) ++ { ++ MSG(INIT, "Error found when read block %d\n", bmt_index); ++ continue; ++ } ++ ++ if (!match_bmt_signature(dat_buf, oob_buf)) ++ { ++ continue; ++ } ++ ++ MSG(INIT, "Match bmt signature @ block: 0x%x\n", bmt_index); ++ ++ memcpy(&phys_table, dat_buf + MAIN_SIGNATURE_OFFSET, sizeof(phys_table)); ++ ++ if (!valid_bmt_data(&phys_table)) ++ { ++ MSG(INIT, "BMT data is not correct %d\n", bmt_index); ++ continue; ++ } else ++ { ++ bmt.mapped_count = phys_table.header.mapped_count; ++ bmt.version = phys_table.header.version; ++ // bmt.bad_count = phys_table.header.bad_count; ++ memcpy(bmt.table, phys_table.table, bmt.mapped_count * sizeof(bmt_entry)); ++ ++ MSG(INIT, "bmt found at block: %d, mapped block: %d\n", bmt_index, bmt.mapped_count); ++ ++ for (i = 0; i < bmt.mapped_count; i++) ++ { ++ if (!nand_block_bad_bmt(OFFSET(bmt.table[i].bad_index))) ++ { ++ MSG(INIT, "block 0x%x is not mark bad, should be power lost last time\n", bmt.table[i].bad_index); ++ mark_block_bad_bmt(OFFSET(bmt.table[i].bad_index)); ++ } ++ } ++ ++ return bmt_index; ++ } ++ } ++ ++ MSG(INIT, "bmt block not found!\n"); ++ return 0; ++} ++ ++/************************************************************************* ++* Find an available block and erase. * ++* start_from_end: if true, find available block from end of flash. * ++* else, find from the beginning of the pool * ++* need_erase: if true, all unmapped blocks in the pool will be erased * ++*************************************************************************/ ++static int find_available_block(bool start_from_end) ++{ ++ int i; // , j; ++ int block = system_block_count; ++ int direction; ++ // int avail_index = 0; ++ MSG(INIT, "Try to find_available_block, pool_erase: %d\n", pool_erased); ++ ++ // erase all un-mapped blocks in pool when finding avaliable block ++ if (!pool_erased) ++ { ++ MSG(INIT, "Erase all un-mapped blocks in pool\n"); ++ for (i = 0; i < bmt_block_count; i++) ++ { ++ if (block == bmt_block_index) ++ { ++ MSG(INIT, "Skip bmt block 0x%x\n", block); ++ continue; ++ } ++ ++ if (nand_block_bad_bmt(OFFSET(block + i))) ++ { ++ MSG(INIT, "Skip bad block 0x%x\n", block + i); ++ continue; ++ } ++//if(block==4095) ++//{ ++// continue; ++//} ++ ++ if (is_block_mapped(block + i) >= 0) ++ { ++ MSG(INIT, "Skip mapped block 0x%x\n", block + i); ++ continue; ++ } ++ ++ if (!nand_erase_bmt(OFFSET(block + i))) ++ { ++ MSG(INIT, "Erase block 0x%x failed\n", block + i); ++ mark_block_bad_bmt(OFFSET(block + i)); ++ } ++ } ++ ++ pool_erased = 1; ++ } ++ ++ if (start_from_end) ++ { ++ block = total_block_count - 1; ++ direction = -1; ++ } else ++ { ++ block = system_block_count; ++ direction = 1; ++ } ++ ++ for (i = 0; i < bmt_block_count; i++, block += direction) ++ { ++ if (block == bmt_block_index) ++ { ++ MSG(INIT, "Skip bmt block 0x%x\n", block); ++ continue; ++ } ++ ++ if (nand_block_bad_bmt(OFFSET(block))) ++ { ++ MSG(INIT, "Skip bad block 0x%x\n", block); ++ continue; ++ } ++ ++ if (is_block_mapped(block) >= 0) ++ { ++ MSG(INIT, "Skip mapped block 0x%x\n", block); ++ continue; ++ } ++ ++ MSG(INIT, "Find block 0x%x available\n", block); ++ return block; ++ } ++ ++ return 0; ++} ++ ++static unsigned short get_bad_index_from_oob(u8 * oob_buf) ++{ ++ unsigned short index; ++ memcpy(&index, oob_buf + OOB_INDEX_OFFSET, OOB_INDEX_SIZE); ++ ++ return index; ++} ++ ++void set_bad_index_to_oob(u8 * oob, u16 index) ++{ ++ memcpy(oob + OOB_INDEX_OFFSET, &index, sizeof(index)); ++} ++ ++static int migrate_from_bad(int offset, u8 * write_dat, u8 * write_oob) ++{ ++ int page; ++ int error_block = offset / BLOCK_SIZE_BMT; ++ int error_page = (offset / PAGE_SIZE_BMT) % page_per_block; ++ int to_index; ++ ++ memcpy(oob_buf, write_oob, MAX_OOB_SIZE); ++ ++ to_index = find_available_block(false); ++ ++ if (!to_index) ++ { ++ MSG(INIT, "Cannot find an available block for BMT\n"); ++ return 0; ++ } ++ ++ { // migrate error page first ++ MSG(INIT, "Write error page: 0x%x\n", error_page); ++ if (!write_dat) ++ { ++ nand_read_page_bmt(PAGE_ADDR(error_block) + error_page, dat_buf, NULL); ++ write_dat = dat_buf; ++ } ++ // memcpy(oob_buf, write_oob, MAX_OOB_SIZE); ++ ++ if (error_block < system_block_count) ++ set_bad_index_to_oob(oob_buf, error_block); // if error_block is already a mapped block, original mapping index is in OOB. ++ ++ if (!nand_write_page_bmt(PAGE_ADDR(to_index) + error_page, write_dat, oob_buf)) ++ { ++ MSG(INIT, "Write to page 0x%x fail\n", PAGE_ADDR(to_index) + error_page); ++ mark_block_bad_bmt(to_index); ++ return migrate_from_bad(offset, write_dat, write_oob); ++ } ++ } ++ ++ for (page = 0; page < page_per_block; page++) ++ { ++ if (page != error_page) ++ { ++ nand_read_page_bmt(PAGE_ADDR(error_block) + page, dat_buf, oob_buf); ++ if (is_page_used(dat_buf, oob_buf)) ++ { ++ if (error_block < system_block_count) ++ { ++ set_bad_index_to_oob(oob_buf, error_block); ++ } ++ MSG(INIT, "\tmigrate page 0x%x to page 0x%x\n", PAGE_ADDR(error_block) + page, PAGE_ADDR(to_index) + page); ++ if (!nand_write_page_bmt(PAGE_ADDR(to_index) + page, dat_buf, oob_buf)) ++ { ++ MSG(INIT, "Write to page 0x%x fail\n", PAGE_ADDR(to_index) + page); ++ mark_block_bad_bmt(to_index); ++ return migrate_from_bad(offset, write_dat, write_oob); ++ } ++ } ++ } ++ } ++ ++ MSG(INIT, "Migrate from 0x%x to 0x%x done!\n", error_block, to_index); ++ ++ return to_index; ++} ++ ++static bool write_bmt_to_flash(u8 * dat, u8 * oob) ++{ ++ bool need_erase = true; ++ MSG(INIT, "Try to write BMT\n"); ++ ++ if (bmt_block_index == 0) ++ { ++ // if we don't have index, we don't need to erase found block as it has been erased in find_available_block() ++ need_erase = false; ++ if (!(bmt_block_index = find_available_block(true))) ++ { ++ MSG(INIT, "Cannot find an available block for BMT\n"); ++ return false; ++ } ++ } ++ ++ MSG(INIT, "Find BMT block: 0x%x\n", bmt_block_index); ++ ++ // write bmt to flash ++ if (need_erase) ++ { ++ if (!nand_erase_bmt(OFFSET(bmt_block_index))) ++ { ++ MSG(INIT, "BMT block erase fail, mark bad: 0x%x\n", bmt_block_index); ++ mark_block_bad_bmt(OFFSET(bmt_block_index)); ++ // bmt.bad_count++; ++ ++ bmt_block_index = 0; ++ return write_bmt_to_flash(dat, oob); // recursive call ++ } ++ } ++ ++ if (!nand_write_page_bmt(PAGE_ADDR(bmt_block_index), dat, oob)) ++ { ++ MSG(INIT, "Write BMT data fail, need to write again\n"); ++ mark_block_bad_bmt(OFFSET(bmt_block_index)); ++ // bmt.bad_count++; ++ ++ bmt_block_index = 0; ++ return write_bmt_to_flash(dat, oob); // recursive call ++ } ++ ++ MSG(INIT, "Write BMT data to block 0x%x success\n", bmt_block_index); ++ return true; ++} ++ ++/******************************************************************* ++* Reconstruct bmt, called when found bmt info doesn't match bad ++* block info in flash. ++* ++* Return NULL for failure ++*******************************************************************/ ++bmt_struct *reconstruct_bmt(bmt_struct * bmt) ++{ ++ int i; ++ int index = system_block_count; ++ unsigned short bad_index; ++ int mapped; ++ ++ // init everything in BMT struct ++ bmt->version = BMT_VERSION; ++ bmt->bad_count = 0; ++ bmt->mapped_count = 0; ++ ++ memset(bmt->table, 0, bmt_block_count * sizeof(bmt_entry)); ++ ++ for (i = 0; i < bmt_block_count; i++, index++) ++ { ++ if (nand_block_bad_bmt(OFFSET(index))) ++ { ++ MSG(INIT, "Skip bad block: 0x%x\n", index); ++ // bmt->bad_count++; ++ continue; ++ } ++ ++ MSG(INIT, "read page: 0x%x\n", PAGE_ADDR(index)); ++ nand_read_page_bmt(PAGE_ADDR(index), dat_buf, oob_buf); ++ /* if (mtk_nand_read_page_hw(PAGE_ADDR(index), dat_buf)) ++ { ++ MSG(INIT, "Error when read block %d\n", bmt_block_index); ++ continue; ++ } */ ++ ++ if ((bad_index = get_bad_index_from_oob(oob_buf)) >= system_block_count) ++ { ++ MSG(INIT, "get bad index: 0x%x\n", bad_index); ++ if (bad_index != 0xFFFF) ++ MSG(INIT, "Invalid bad index found in block 0x%x, bad index 0x%x\n", index, bad_index); ++ continue; ++ } ++ ++ MSG(INIT, "Block 0x%x is mapped to bad block: 0x%x\n", index, bad_index); ++ ++ if (!nand_block_bad_bmt(OFFSET(bad_index))) ++ { ++ MSG(INIT, "\tbut block 0x%x is not marked as bad, invalid mapping\n", bad_index); ++ continue; // no need to erase here, it will be erased later when trying to write BMT ++ } ++ ++ if ((mapped = is_block_mapped(bad_index)) >= 0) ++ { ++ MSG(INIT, "bad block 0x%x is mapped to 0x%x, should be caused by power lost, replace with one\n", bmt->table[mapped].bad_index, bmt->table[mapped].mapped_index); ++ bmt->table[mapped].mapped_index = index; // use new one instead. ++ } else ++ { ++ // add mapping to BMT ++ bmt->table[bmt->mapped_count].bad_index = bad_index; ++ bmt->table[bmt->mapped_count].mapped_index = index; ++ bmt->mapped_count++; ++ } ++ ++ MSG(INIT, "Add mapping: 0x%x -> 0x%x to BMT\n", bad_index, index); ++ ++ } ++ ++ MSG(INIT, "Scan replace pool done, mapped block: %d\n", bmt->mapped_count); ++ // dump_bmt_info(bmt); ++ ++ // fill NAND BMT buffer ++ memset(oob_buf, 0xFF, sizeof(oob_buf)); ++ fill_nand_bmt_buffer(bmt, dat_buf, oob_buf); ++ ++ // write BMT back ++ if (!write_bmt_to_flash(dat_buf, oob_buf)) ++ { ++ MSG(INIT, "TRAGEDY: cannot find a place to write BMT!!!!\n"); ++ } ++ ++ return bmt; ++} ++ ++/******************************************************************* ++* [BMT Interface] ++* ++* Description: ++* Init bmt from nand. Reconstruct if not found or data error ++* ++* Parameter: ++* size: size of bmt and replace pool ++* ++* Return: ++* NULL for failure, and a bmt struct for success ++*******************************************************************/ ++bmt_struct *init_bmt(struct nand_chip * chip, int size) ++{ ++ struct mtk_nand_host *host; ++ ++ if (size > 0 && size < MAX_BMT_SIZE) ++ { ++ MSG(INIT, "Init bmt table, size: %d\n", size); ++ bmt_block_count = size; ++ } else ++ { ++ MSG(INIT, "Invalid bmt table size: %d\n", size); ++ return NULL; ++ } ++ nand_chip_bmt = chip; ++ system_block_count = chip->chipsize >> chip->phys_erase_shift; ++ total_block_count = bmt_block_count + system_block_count; ++ page_per_block = BLOCK_SIZE_BMT / PAGE_SIZE_BMT; ++ host = (struct mtk_nand_host *)chip->priv; ++ mtd_bmt = &host->mtd; ++ ++ MSG(INIT, "mtd_bmt: %p, nand_chip_bmt: %p\n", mtd_bmt, nand_chip_bmt); ++ MSG(INIT, "bmt count: %d, system count: %d\n", bmt_block_count, system_block_count); ++ ++ // set this flag, and unmapped block in pool will be erased. ++ pool_erased = 0; ++ memset(bmt.table, 0, size * sizeof(bmt_entry)); ++ if ((bmt_block_index = load_bmt_data(system_block_count, size))) ++ { ++ MSG(INIT, "Load bmt data success @ block 0x%x\n", bmt_block_index); ++ dump_bmt_info(&bmt); ++ return &bmt; ++ } else ++ { ++ MSG(INIT, "Load bmt data fail, need re-construct!\n"); ++#ifndef __UBOOT_NAND__ // BMT is not re-constructed in UBOOT. ++ if (reconstruct_bmt(&bmt)) ++ return &bmt; ++ else ++#endif ++ return NULL; ++ } ++} ++ ++/******************************************************************* ++* [BMT Interface] ++* ++* Description: ++* Update BMT. ++* ++* Parameter: ++* offset: update block/page offset. ++* reason: update reason, see update_reason_t for reason. ++* dat/oob: data and oob buffer for write fail. ++* ++* Return: ++* Return true for success, and false for failure. ++*******************************************************************/ ++bool update_bmt(u32 offset, update_reason_t reason, u8 * dat, u8 * oob) ++{ ++ int map_index; ++ int orig_bad_block = -1; ++ // int bmt_update_index; ++ int i; ++ int bad_index = offset / BLOCK_SIZE_BMT; ++ ++#ifndef MTK_NAND_BMT ++ return false; ++#endif ++ if (reason == UPDATE_WRITE_FAIL) ++ { ++ MSG(INIT, "Write fail, need to migrate\n"); ++ if (!(map_index = migrate_from_bad(offset, dat, oob))) ++ { ++ MSG(INIT, "migrate fail\n"); ++ return false; ++ } ++ } else ++ { ++ if (!(map_index = find_available_block(false))) ++ { ++ MSG(INIT, "Cannot find block in pool\n"); ++ return false; ++ } ++ } ++ ++ // now let's update BMT ++ if (bad_index >= system_block_count) // mapped block become bad, find original bad block ++ { ++ for (i = 0; i < bmt_block_count; i++) ++ { ++ if (bmt.table[i].mapped_index == bad_index) ++ { ++ orig_bad_block = bmt.table[i].bad_index; ++ break; ++ } ++ } ++ // bmt.bad_count++; ++ MSG(INIT, "Mapped block becomes bad, orig bad block is 0x%x\n", orig_bad_block); ++ ++ bmt.table[i].mapped_index = map_index; ++ } else ++ { ++ bmt.table[bmt.mapped_count].mapped_index = map_index; ++ bmt.table[bmt.mapped_count].bad_index = bad_index; ++ bmt.mapped_count++; ++ } ++ ++ memset(oob_buf, 0xFF, sizeof(oob_buf)); ++ fill_nand_bmt_buffer(&bmt, dat_buf, oob_buf); ++ if (!write_bmt_to_flash(dat_buf, oob_buf)) ++ return false; ++ ++ mark_block_bad_bmt(offset); ++ ++ return true; ++} ++ ++/******************************************************************* ++* [BMT Interface] ++* ++* Description: ++* Given an block index, return mapped index if it's mapped, else ++* return given index. ++* ++* Parameter: ++* index: given an block index. This value cannot exceed ++* system_block_count. ++* ++* Return NULL for failure ++*******************************************************************/ ++u16 get_mapping_block_index(int index) ++{ ++ int i; ++#ifndef MTK_NAND_BMT ++ return index; ++#endif ++ if (index > system_block_count) ++ { ++ return index; ++ } ++ ++ for (i = 0; i < bmt.mapped_count; i++) ++ { ++ if (bmt.table[i].bad_index == index) ++ { ++ return bmt.table[i].mapped_index; ++ } ++ } ++ ++ return index; ++} ++#ifdef __KERNEL_NAND__ ++EXPORT_SYMBOL_GPL(init_bmt); ++EXPORT_SYMBOL_GPL(update_bmt); ++EXPORT_SYMBOL_GPL(get_mapping_block_index); ++ ++MODULE_LICENSE("GPL"); ++MODULE_AUTHOR("MediaTek"); ++MODULE_DESCRIPTION("Bad Block mapping management for MediaTek NAND Flash Driver"); ++#endif +--- /dev/null ++++ b/drivers/mtd/nand/bmt.h +@@ -0,0 +1,80 @@ ++#ifndef __BMT_H__ ++#define __BMT_H__ ++ ++#include "nand_def.h" ++ ++#if defined(__PRELOADER_NAND__) ++ ++#include "nand.h" ++ ++#elif defined(__UBOOT_NAND__) ++ ++#include ++#include "mtk_nand.h" ++ ++#elif defined(__KERNEL_NAND__) ++ ++#include ++#include ++#include ++#include "mtk_nand.h" ++ ++#endif ++ ++ ++#define MAX_BMT_SIZE (0x80) ++#define BMT_VERSION (1) // initial version ++ ++#define MAIN_SIGNATURE_OFFSET (0) ++#define OOB_SIGNATURE_OFFSET (1) ++#define OOB_INDEX_OFFSET (29) ++#define OOB_INDEX_SIZE (2) ++#define FAKE_INDEX (0xAAAA) ++ ++typedef struct _bmt_entry_ ++{ ++ u16 bad_index; // bad block index ++ u16 mapped_index; // mapping block index in the replace pool ++} bmt_entry; ++ ++typedef enum ++{ ++ UPDATE_ERASE_FAIL, ++ UPDATE_WRITE_FAIL, ++ UPDATE_UNMAPPED_BLOCK, ++ UPDATE_REASON_COUNT, ++} update_reason_t; ++ ++typedef struct ++{ ++ bmt_entry table[MAX_BMT_SIZE]; ++ u8 version; ++ u8 mapped_count; // mapped block count in pool ++ u8 bad_count; // bad block count in pool. Not used in V1 ++} bmt_struct; ++ ++/*************************************************************** ++* * ++* Interface BMT need to use * ++* * ++***************************************************************/ ++extern bool mtk_nand_exec_read_page(struct mtd_info *mtd, u32 row, u32 page_size, u8 * dat, u8 * oob); ++extern int mtk_nand_block_bad_hw(struct mtd_info *mtd, loff_t ofs); ++extern int mtk_nand_erase_hw(struct mtd_info *mtd, int page); ++extern int mtk_nand_block_markbad_hw(struct mtd_info *mtd, loff_t ofs); ++extern int mtk_nand_exec_write_page(struct mtd_info *mtd, u32 row, u32 page_size, u8 * dat, u8 * oob); ++ ++ ++/*************************************************************** ++* * ++* Different function interface for preloader/uboot/kernel * ++* * ++***************************************************************/ ++void set_bad_index_to_oob(u8 * oob, u16 index); ++ ++ ++bmt_struct *init_bmt(struct nand_chip *nand, int size); ++bool update_bmt(u32 offset, update_reason_t reason, u8 * dat, u8 * oob); ++unsigned short get_mapping_block_index(int index); ++ ++#endif // #ifndef __BMT_H__ +--- /dev/null ++++ b/drivers/mtd/nand/dev-nand.c +@@ -0,0 +1,63 @@ ++#include ++#include ++#include ++ ++#include "mt6575_typedefs.h" ++ ++#define RALINK_NAND_CTRL_BASE 0xBE003000 ++#define NFI_base RALINK_NAND_CTRL_BASE ++#define RALINK_NANDECC_CTRL_BASE 0xBE003800 ++#define NFIECC_base RALINK_NANDECC_CTRL_BASE ++#define MT7621_NFI_IRQ_ID SURFBOARDINT_NAND ++#define MT7621_NFIECC_IRQ_ID SURFBOARDINT_NAND_ECC ++ ++#define SURFBOARDINT_NAND 22 ++#define SURFBOARDINT_NAND_ECC 23 ++ ++static struct resource MT7621_resource_nand[] = { ++ { ++ .start = NFI_base, ++ .end = NFI_base + 0x1A0, ++ .flags = IORESOURCE_MEM, ++ }, ++ { ++ .start = NFIECC_base, ++ .end = NFIECC_base + 0x150, ++ .flags = IORESOURCE_MEM, ++ }, ++ { ++ .start = MT7621_NFI_IRQ_ID, ++ .flags = IORESOURCE_IRQ, ++ }, ++ { ++ .start = MT7621_NFIECC_IRQ_ID, ++ .flags = IORESOURCE_IRQ, ++ }, ++}; ++ ++static struct platform_device MT7621_nand_dev = { ++ .name = "MT7621-NAND", ++ .id = 0, ++ .num_resources = ARRAY_SIZE(MT7621_resource_nand), ++ .resource = MT7621_resource_nand, ++ .dev = { ++ .platform_data = &mt7621_nand_hw, ++ }, ++}; ++ ++ ++int __init mtk_nand_register(void) ++{ ++ ++ int retval = 0; ++ ++ retval = platform_device_register(&MT7621_nand_dev); ++ if (retval != 0) { ++ printk(KERN_ERR "register nand device fail\n"); ++ return retval; ++ } ++ ++ ++ return retval; ++} ++arch_initcall(mtk_nand_register); +--- /dev/null ++++ b/drivers/mtd/nand/mt6575_typedefs.h +@@ -0,0 +1,340 @@ ++/* Copyright Statement: ++ * ++ * This software/firmware and related documentation ("MediaTek Software") are ++ * protected under relevant copyright laws. The information contained herein ++ * is confidential and proprietary to MediaTek Inc. and/or its licensors. ++ * Without the prior written permission of MediaTek inc. and/or its licensors, ++ * any reproduction, modification, use or disclosure of MediaTek Software, ++ * and information contained herein, in whole or in part, shall be strictly prohibited. ++ */ ++/* MediaTek Inc. (C) 2010. All rights reserved. ++ * ++ * BY OPENING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES ++ * THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE") ++ * RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON ++ * AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT. ++ * NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE ++ * SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR ++ * SUPPLIED WITH THE MEDIATEK SOFTWARE, AND RECEIVER AGREES TO LOOK ONLY TO SUCH ++ * THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES ++ * THAT IT IS RECEIVER'S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD PARTY ALL PROPER LICENSES ++ * CONTAINED IN MEDIATEK SOFTWARE. MEDIATEK SHALL ALSO NOT BE RESPONSIBLE FOR ANY MEDIATEK ++ * SOFTWARE RELEASES MADE TO RECEIVER'S SPECIFICATION OR TO CONFORM TO A PARTICULAR ++ * STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND ++ * CUMULATIVE LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE, ++ * AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE, ++ * OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY RECEIVER TO ++ * MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE. ++ * ++ * The following software/firmware and/or related documentation ("MediaTek Software") ++ * have been modified by MediaTek Inc. All revisions are subject to any receiver's ++ * applicable license agreements with MediaTek Inc. ++ */ ++ ++/***************************************************************************** ++* Copyright Statement: ++* -------------------- ++* This software is protected by Copyright and the information contained ++* herein is confidential. The software may not be copied and the information ++* contained herein may not be used or disclosed except with the written ++* permission of MediaTek Inc. (C) 2008 ++* ++* BY OPENING THIS FILE, BUYER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES ++* THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE") ++* RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO BUYER ON ++* AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES, ++* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF ++* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT. ++* NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE ++* SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR ++* SUPPLIED WITH THE MEDIATEK SOFTWARE, AND BUYER AGREES TO LOOK ONLY TO SUCH ++* THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. MEDIATEK SHALL ALSO ++* NOT BE RESPONSIBLE FOR ANY MEDIATEK SOFTWARE RELEASES MADE TO BUYER'S ++* SPECIFICATION OR TO CONFORM TO A PARTICULAR STANDARD OR OPEN FORUM. ++* ++* BUYER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND CUMULATIVE ++* LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE, ++* AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE, ++* OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY BUYER TO ++* MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE. ++* ++* THE TRANSACTION CONTEMPLATED HEREUNDER SHALL BE CONSTRUED IN ACCORDANCE ++* WITH THE LAWS OF THE STATE OF CALIFORNIA, USA, EXCLUDING ITS CONFLICT OF ++* LAWS PRINCIPLES. ANY DISPUTES, CONTROVERSIES OR CLAIMS ARISING THEREOF AND ++* RELATED THERETO SHALL BE SETTLED BY ARBITRATION IN SAN FRANCISCO, CA, UNDER ++* THE RULES OF THE INTERNATIONAL CHAMBER OF COMMERCE (ICC). ++* ++*****************************************************************************/ ++ ++#ifndef _MT6575_TYPEDEFS_H ++#define _MT6575_TYPEDEFS_H ++ ++#if defined (__KERNEL_NAND__) ++#include ++#else ++#define true 1 ++#define false 0 ++#define bool u8 ++#endif ++ ++// --------------------------------------------------------------------------- ++// Basic Type Definitions ++// --------------------------------------------------------------------------- ++ ++typedef volatile unsigned char *P_kal_uint8; ++typedef volatile unsigned short *P_kal_uint16; ++typedef volatile unsigned int *P_kal_uint32; ++ ++typedef long LONG; ++typedef unsigned char UBYTE; ++typedef short SHORT; ++ ++typedef signed char kal_int8; ++typedef signed short kal_int16; ++typedef signed int kal_int32; ++typedef long long kal_int64; ++typedef unsigned char kal_uint8; ++typedef unsigned short kal_uint16; ++typedef unsigned int kal_uint32; ++typedef unsigned long long kal_uint64; ++typedef char kal_char; ++ ++typedef unsigned int *UINT32P; ++typedef volatile unsigned short *UINT16P; ++typedef volatile unsigned char *UINT8P; ++typedef unsigned char *U8P; ++ ++typedef volatile unsigned char *P_U8; ++typedef volatile signed char *P_S8; ++typedef volatile unsigned short *P_U16; ++typedef volatile signed short *P_S16; ++typedef volatile unsigned int *P_U32; ++typedef volatile signed int *P_S32; ++typedef unsigned long long *P_U64; ++typedef signed long long *P_S64; ++ ++typedef unsigned char U8; ++typedef signed char S8; ++typedef unsigned short U16; ++typedef signed short S16; ++typedef unsigned int U32; ++typedef signed int S32; ++typedef unsigned long long U64; ++typedef signed long long S64; ++//typedef unsigned char bool; ++ ++typedef unsigned char UINT8; ++typedef unsigned short UINT16; ++typedef unsigned int UINT32; ++typedef unsigned short USHORT; ++typedef signed char INT8; ++typedef signed short INT16; ++typedef signed int INT32; ++typedef unsigned int DWORD; ++typedef void VOID; ++typedef unsigned char BYTE; ++typedef float FLOAT; ++ ++typedef char *LPCSTR; ++typedef short *LPWSTR; ++ ++ ++// --------------------------------------------------------------------------- ++// Constants ++// --------------------------------------------------------------------------- ++ ++#define IMPORT EXTERN ++#ifndef __cplusplus ++ #define EXTERN extern ++#else ++ #define EXTERN extern "C" ++#endif ++#define LOCAL static ++#define GLOBAL ++#define EXPORT GLOBAL ++ ++#define EQ == ++#define NEQ != ++#define AND && ++#define OR || ++#define XOR(A,B) ((!(A) AND (B)) OR ((A) AND !(B))) ++ ++#ifndef FALSE ++ #define FALSE (0) ++#endif ++ ++#ifndef TRUE ++ #define TRUE (1) ++#endif ++ ++#ifndef NULL ++ #define NULL (0) ++#endif ++ ++//enum boolean {false, true}; ++enum {RX, TX, NONE}; ++ ++#ifndef BOOL ++typedef unsigned char BOOL; ++#endif ++ ++typedef enum { ++ KAL_FALSE = 0, ++ KAL_TRUE = 1, ++} kal_bool; ++ ++ ++// --------------------------------------------------------------------------- ++// Type Casting ++// --------------------------------------------------------------------------- ++ ++#define AS_INT32(x) (*(INT32 *)((void*)x)) ++#define AS_INT16(x) (*(INT16 *)((void*)x)) ++#define AS_INT8(x) (*(INT8 *)((void*)x)) ++ ++#define AS_UINT32(x) (*(UINT32 *)((void*)x)) ++#define AS_UINT16(x) (*(UINT16 *)((void*)x)) ++#define AS_UINT8(x) (*(UINT8 *)((void*)x)) ++ ++ ++// --------------------------------------------------------------------------- ++// Register Manipulations ++// --------------------------------------------------------------------------- ++ ++#define READ_REGISTER_UINT32(reg) \ ++ (*(volatile UINT32 * const)(reg)) ++ ++#define WRITE_REGISTER_UINT32(reg, val) \ ++ (*(volatile UINT32 * const)(reg)) = (val) ++ ++#define READ_REGISTER_UINT16(reg) \ ++ (*(volatile UINT16 * const)(reg)) ++ ++#define WRITE_REGISTER_UINT16(reg, val) \ ++ (*(volatile UINT16 * const)(reg)) = (val) ++ ++#define READ_REGISTER_UINT8(reg) \ ++ (*(volatile UINT8 * const)(reg)) ++ ++#define WRITE_REGISTER_UINT8(reg, val) \ ++ (*(volatile UINT8 * const)(reg)) = (val) ++ ++#define INREG8(x) READ_REGISTER_UINT8((UINT8*)((void*)(x))) ++#define OUTREG8(x, y) WRITE_REGISTER_UINT8((UINT8*)((void*)(x)), (UINT8)(y)) ++#define SETREG8(x, y) OUTREG8(x, INREG8(x)|(y)) ++#define CLRREG8(x, y) OUTREG8(x, INREG8(x)&~(y)) ++#define MASKREG8(x, y, z) OUTREG8(x, (INREG8(x)&~(y))|(z)) ++ ++#define INREG16(x) READ_REGISTER_UINT16((UINT16*)((void*)(x))) ++#define OUTREG16(x, y) WRITE_REGISTER_UINT16((UINT16*)((void*)(x)),(UINT16)(y)) ++#define SETREG16(x, y) OUTREG16(x, INREG16(x)|(y)) ++#define CLRREG16(x, y) OUTREG16(x, INREG16(x)&~(y)) ++#define MASKREG16(x, y, z) OUTREG16(x, (INREG16(x)&~(y))|(z)) ++ ++#define INREG32(x) READ_REGISTER_UINT32((UINT32*)((void*)(x))) ++#define OUTREG32(x, y) WRITE_REGISTER_UINT32((UINT32*)((void*)(x)), (UINT32)(y)) ++#define SETREG32(x, y) OUTREG32(x, INREG32(x)|(y)) ++#define CLRREG32(x, y) OUTREG32(x, INREG32(x)&~(y)) ++#define MASKREG32(x, y, z) OUTREG32(x, (INREG32(x)&~(y))|(z)) ++ ++ ++#define DRV_Reg8(addr) INREG8(addr) ++#define DRV_WriteReg8(addr, data) OUTREG8(addr, data) ++#define DRV_SetReg8(addr, data) SETREG8(addr, data) ++#define DRV_ClrReg8(addr, data) CLRREG8(addr, data) ++ ++#define DRV_Reg16(addr) INREG16(addr) ++#define DRV_WriteReg16(addr, data) OUTREG16(addr, data) ++#define DRV_SetReg16(addr, data) SETREG16(addr, data) ++#define DRV_ClrReg16(addr, data) CLRREG16(addr, data) ++ ++#define DRV_Reg32(addr) INREG32(addr) ++#define DRV_WriteReg32(addr, data) OUTREG32(addr, data) ++#define DRV_SetReg32(addr, data) SETREG32(addr, data) ++#define DRV_ClrReg32(addr, data) CLRREG32(addr, data) ++ ++// !!! DEPRECATED, WILL BE REMOVED LATER !!! ++#define DRV_Reg(addr) DRV_Reg16(addr) ++#define DRV_WriteReg(addr, data) DRV_WriteReg16(addr, data) ++#define DRV_SetReg(addr, data) DRV_SetReg16(addr, data) ++#define DRV_ClrReg(addr, data) DRV_ClrReg16(addr, data) ++ ++ ++// --------------------------------------------------------------------------- ++// Compiler Time Deduction Macros ++// --------------------------------------------------------------------------- ++ ++#define _MASK_OFFSET_1(x, n) ((x) & 0x1) ? (n) : ++#define _MASK_OFFSET_2(x, n) _MASK_OFFSET_1((x), (n)) _MASK_OFFSET_1((x) >> 1, (n) + 1) ++#define _MASK_OFFSET_4(x, n) _MASK_OFFSET_2((x), (n)) _MASK_OFFSET_2((x) >> 2, (n) + 2) ++#define _MASK_OFFSET_8(x, n) _MASK_OFFSET_4((x), (n)) _MASK_OFFSET_4((x) >> 4, (n) + 4) ++#define _MASK_OFFSET_16(x, n) _MASK_OFFSET_8((x), (n)) _MASK_OFFSET_8((x) >> 8, (n) + 8) ++#define _MASK_OFFSET_32(x, n) _MASK_OFFSET_16((x), (n)) _MASK_OFFSET_16((x) >> 16, (n) + 16) ++ ++#define MASK_OFFSET_ERROR (0xFFFFFFFF) ++ ++#define MASK_OFFSET(x) (_MASK_OFFSET_32(x, 0) MASK_OFFSET_ERROR) ++ ++ ++// --------------------------------------------------------------------------- ++// Assertions ++// --------------------------------------------------------------------------- ++ ++#ifndef ASSERT ++ #define ASSERT(expr) BUG_ON(!(expr)) ++#endif ++ ++#ifndef NOT_IMPLEMENTED ++ #define NOT_IMPLEMENTED() BUG_ON(1) ++#endif ++ ++#define STATIC_ASSERT(pred) STATIC_ASSERT_X(pred, __LINE__) ++#define STATIC_ASSERT_X(pred, line) STATIC_ASSERT_XX(pred, line) ++#define STATIC_ASSERT_XX(pred, line) \ ++ extern char assertion_failed_at_##line[(pred) ? 1 : -1] ++ ++// --------------------------------------------------------------------------- ++// Resolve Compiler Warnings ++// --------------------------------------------------------------------------- ++ ++#define NOT_REFERENCED(x) { (x) = (x); } ++ ++ ++// --------------------------------------------------------------------------- ++// Utilities ++// --------------------------------------------------------------------------- ++ ++#define MAXIMUM(A,B) (((A)>(B))?(A):(B)) ++#define MINIMUM(A,B) (((A)<(B))?(A):(B)) ++ ++#define ARY_SIZE(x) (sizeof((x)) / sizeof((x[0]))) ++#define DVT_DELAYMACRO(u4Num) \ ++{ \ ++ UINT32 u4Count = 0 ; \ ++ for (u4Count = 0; u4Count < u4Num; u4Count++ ); \ ++} \ ++ ++#define A68351B 0 ++#define B68351B 1 ++#define B68351D 2 ++#define B68351E 3 ++#define UNKNOWN_IC_VERSION 0xFF ++ ++/* NAND driver */ ++struct mtk_nand_host_hw { ++ unsigned int nfi_bus_width; /* NFI_BUS_WIDTH */ ++ unsigned int nfi_access_timing; /* NFI_ACCESS_TIMING */ ++ unsigned int nfi_cs_num; /* NFI_CS_NUM */ ++ unsigned int nand_sec_size; /* NAND_SECTOR_SIZE */ ++ unsigned int nand_sec_shift; /* NAND_SECTOR_SHIFT */ ++ unsigned int nand_ecc_size; ++ unsigned int nand_ecc_bytes; ++ unsigned int nand_ecc_mode; ++}; ++extern struct mtk_nand_host_hw mt7621_nand_hw; ++extern unsigned int CFG_BLOCKSIZE; ++ ++#endif // _MT6575_TYPEDEFS_H ++ +--- /dev/null ++++ b/drivers/mtd/nand/mtk_nand.c +@@ -0,0 +1,2304 @@ ++/****************************************************************************** ++* mtk_nand.c - MTK NAND Flash Device Driver ++ * ++* Copyright 2009-2012 MediaTek Co.,Ltd. ++ * ++* DESCRIPTION: ++* This file provid the other drivers nand relative functions ++ * ++* modification history ++* ---------------------------------------- ++* v3.0, 11 Feb 2010, mtk ++* ---------------------------------------- ++******************************************************************************/ ++#include "nand_def.h" ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "mtk_nand.h" ++#include "nand_device_list.h" ++ ++#include "bmt.h" ++#include "partition.h" ++ ++unsigned int CFG_BLOCKSIZE; ++ ++static int shift_on_bbt = 0; ++extern void nand_bbt_set(struct mtd_info *mtd, int page, int flag); ++extern int nand_bbt_get(struct mtd_info *mtd, int page); ++int mtk_nand_read_oob_hw(struct mtd_info *mtd, struct nand_chip *chip, int page); ++ ++static const char * const probe_types[] = { "cmdlinepart", "ofpart", NULL }; ++ ++#define NAND_CMD_STATUS_MULTI 0x71 ++ ++void show_stack(struct task_struct *tsk, unsigned long *sp); ++extern void mt_irq_set_sens(unsigned int irq, unsigned int sens); ++extern void mt_irq_set_polarity(unsigned int irq,unsigned int polarity); ++ ++struct mtk_nand_host mtk_nand_host; /* include mtd_info and nand_chip structs */ ++struct mtk_nand_host_hw mt7621_nand_hw = { ++ .nfi_bus_width = 8, ++ .nfi_access_timing = NFI_DEFAULT_ACCESS_TIMING, ++ .nfi_cs_num = NFI_CS_NUM, ++ .nand_sec_size = 512, ++ .nand_sec_shift = 9, ++ .nand_ecc_size = 2048, ++ .nand_ecc_bytes = 32, ++ .nand_ecc_mode = NAND_ECC_HW, ++}; ++ ++ ++/******************************************************************************* ++ * Gloable Varible Definition ++ *******************************************************************************/ ++ ++#define NFI_ISSUE_COMMAND(cmd, col_addr, row_addr, col_num, row_num) \ ++ do { \ ++ DRV_WriteReg(NFI_CMD_REG16,cmd);\ ++ while (DRV_Reg32(NFI_STA_REG32) & STA_CMD_STATE);\ ++ DRV_WriteReg32(NFI_COLADDR_REG32, col_addr);\ ++ DRV_WriteReg32(NFI_ROWADDR_REG32, row_addr);\ ++ DRV_WriteReg(NFI_ADDRNOB_REG16, col_num | (row_num<mm) { ++ printk(KERN_ERR "[nand_virt_to_phys_add] ERROR current->mm is NULL! tgid=0x%x, name=%s \n", current->tgid, current->comm); ++ return 0; ++ } ++ ++ pgd = pgd_offset(current->mm, va); /* what is tsk->mm */ ++ if (pgd_none(*pgd) || pgd_bad(*pgd)) { ++ printk(KERN_ERR "[nand_virt_to_phys_add] ERROR, va=0x%x, pgd invalid! \n", va); ++ return 0; ++ } ++ ++ pmd = pmd_offset((pud_t *)pgd, va); ++ if (pmd_none(*pmd) || pmd_bad(*pmd)) { ++ printk(KERN_ERR "[nand_virt_to_phys_add] ERROR, va=0x%x, pmd invalid! \n", va); ++ return 0; ++ } ++ ++ pte = pte_offset_map(pmd, va); ++ if (pte_present(*pte)) { ++ pa = (pte_val(*pte) & (PAGE_MASK)) | pageOffset; ++ return pa; ++ } ++ ++ printk(KERN_ERR "[nand_virt_to_phys_add] ERROR va=0x%x, pte invalid! \n", va); ++ return 0; ++} ++EXPORT_SYMBOL(nand_virt_to_phys_add); ++ ++bool ++get_device_info(u16 id, u32 ext_id, flashdev_info * pdevinfo) ++{ ++ u32 index; ++ for (index = 0; gen_FlashTable[index].id != 0; index++) { ++ if (id == gen_FlashTable[index].id && ext_id == gen_FlashTable[index].ext_id) { ++ pdevinfo->id = gen_FlashTable[index].id; ++ pdevinfo->ext_id = gen_FlashTable[index].ext_id; ++ pdevinfo->blocksize = gen_FlashTable[index].blocksize; ++ pdevinfo->addr_cycle = gen_FlashTable[index].addr_cycle; ++ pdevinfo->iowidth = gen_FlashTable[index].iowidth; ++ pdevinfo->timmingsetting = gen_FlashTable[index].timmingsetting; ++ pdevinfo->advancedmode = gen_FlashTable[index].advancedmode; ++ pdevinfo->pagesize = gen_FlashTable[index].pagesize; ++ pdevinfo->sparesize = gen_FlashTable[index].sparesize; ++ pdevinfo->totalsize = gen_FlashTable[index].totalsize; ++ memcpy(pdevinfo->devciename, gen_FlashTable[index].devciename, sizeof(pdevinfo->devciename)); ++ printk(KERN_INFO "Device found in MTK table, ID: %x, EXT_ID: %x\n", id, ext_id); ++ ++ goto find; ++ } ++ } ++ ++find: ++ if (0 == pdevinfo->id) { ++ printk(KERN_INFO "Device not found, ID: %x\n", id); ++ return false; ++ } else { ++ return true; ++ } ++} ++ ++static void ++ECC_Config(struct mtk_nand_host_hw *hw,u32 ecc_bit) ++{ ++ u32 u4ENCODESize; ++ u32 u4DECODESize; ++ u32 ecc_bit_cfg = ECC_CNFG_ECC4; ++ ++ switch(ecc_bit){ ++ case 4: ++ ecc_bit_cfg = ECC_CNFG_ECC4; ++ break; ++ case 8: ++ ecc_bit_cfg = ECC_CNFG_ECC8; ++ break; ++ case 10: ++ ecc_bit_cfg = ECC_CNFG_ECC10; ++ break; ++ case 12: ++ ecc_bit_cfg = ECC_CNFG_ECC12; ++ break; ++ default: ++ break; ++ } ++ DRV_WriteReg16(ECC_DECCON_REG16, DEC_DE); ++ do { ++ } while (!DRV_Reg16(ECC_DECIDLE_REG16)); ++ ++ DRV_WriteReg16(ECC_ENCCON_REG16, ENC_DE); ++ do { ++ } while (!DRV_Reg32(ECC_ENCIDLE_REG32)); ++ ++ /* setup FDM register base */ ++ DRV_WriteReg32(ECC_FDMADDR_REG32, NFI_FDM0L_REG32); ++ ++ /* Sector + FDM */ ++ u4ENCODESize = (hw->nand_sec_size + 8) << 3; ++ /* Sector + FDM + YAFFS2 meta data bits */ ++ u4DECODESize = ((hw->nand_sec_size + 8) << 3) + ecc_bit * 13; ++ ++ /* configure ECC decoder && encoder */ ++ DRV_WriteReg32(ECC_DECCNFG_REG32, ecc_bit_cfg | DEC_CNFG_NFI | DEC_CNFG_EMPTY_EN | (u4DECODESize << DEC_CNFG_CODE_SHIFT)); ++ ++ DRV_WriteReg32(ECC_ENCCNFG_REG32, ecc_bit_cfg | ENC_CNFG_NFI | (u4ENCODESize << ENC_CNFG_MSG_SHIFT)); ++ NFI_SET_REG32(ECC_DECCNFG_REG32, DEC_CNFG_EL); ++} ++ ++static void ++ECC_Decode_Start(void) ++{ ++ while (!(DRV_Reg16(ECC_DECIDLE_REG16) & DEC_IDLE)) ++ ; ++ DRV_WriteReg16(ECC_DECCON_REG16, DEC_EN); ++} ++ ++static void ++ECC_Decode_End(void) ++{ ++ while (!(DRV_Reg16(ECC_DECIDLE_REG16) & DEC_IDLE)) ++ ; ++ DRV_WriteReg16(ECC_DECCON_REG16, DEC_DE); ++} ++ ++static void ++ECC_Encode_Start(void) ++{ ++ while (!(DRV_Reg32(ECC_ENCIDLE_REG32) & ENC_IDLE)) ++ ; ++ mb(); ++ DRV_WriteReg16(ECC_ENCCON_REG16, ENC_EN); ++} ++ ++static void ++ECC_Encode_End(void) ++{ ++ /* wait for device returning idle */ ++ while (!(DRV_Reg32(ECC_ENCIDLE_REG32) & ENC_IDLE)) ; ++ mb(); ++ DRV_WriteReg16(ECC_ENCCON_REG16, ENC_DE); ++} ++ ++static bool ++mtk_nand_check_bch_error(struct mtd_info *mtd, u8 * pDataBuf, u32 u4SecIndex, u32 u4PageAddr) ++{ ++ bool bRet = true; ++ u16 u2SectorDoneMask = 1 << u4SecIndex; ++ u32 u4ErrorNumDebug, i, u4ErrNum; ++ u32 timeout = 0xFFFF; ++ // int el; ++ u32 au4ErrBitLoc[6]; ++ u32 u4ErrByteLoc, u4BitOffset; ++ u32 u4ErrBitLoc1th, u4ErrBitLoc2nd; ++ ++ //4 // Wait for Decode Done ++ while (0 == (u2SectorDoneMask & DRV_Reg16(ECC_DECDONE_REG16))) { ++ timeout--; ++ if (0 == timeout) ++ return false; ++ } ++ /* We will manually correct the error bits in the last sector, not all the sectors of the page! */ ++ memset(au4ErrBitLoc, 0x0, sizeof(au4ErrBitLoc)); ++ u4ErrorNumDebug = DRV_Reg32(ECC_DECENUM_REG32); ++ u4ErrNum = DRV_Reg32(ECC_DECENUM_REG32) >> (u4SecIndex << 2); ++ u4ErrNum &= 0xF; ++ ++ if (u4ErrNum) { ++ if (0xF == u4ErrNum) { ++ mtd->ecc_stats.failed++; ++ bRet = false; ++ //printk(KERN_ERR"UnCorrectable at PageAddr=%d\n", u4PageAddr); ++ } else { ++ for (i = 0; i < ((u4ErrNum + 1) >> 1); ++i) { ++ au4ErrBitLoc[i] = DRV_Reg32(ECC_DECEL0_REG32 + i); ++ u4ErrBitLoc1th = au4ErrBitLoc[i] & 0x1FFF; ++ if (u4ErrBitLoc1th < 0x1000) { ++ u4ErrByteLoc = u4ErrBitLoc1th / 8; ++ u4BitOffset = u4ErrBitLoc1th % 8; ++ pDataBuf[u4ErrByteLoc] = pDataBuf[u4ErrByteLoc] ^ (1 << u4BitOffset); ++ mtd->ecc_stats.corrected++; ++ } else { ++ mtd->ecc_stats.failed++; ++ } ++ u4ErrBitLoc2nd = (au4ErrBitLoc[i] >> 16) & 0x1FFF; ++ if (0 != u4ErrBitLoc2nd) { ++ if (u4ErrBitLoc2nd < 0x1000) { ++ u4ErrByteLoc = u4ErrBitLoc2nd / 8; ++ u4BitOffset = u4ErrBitLoc2nd % 8; ++ pDataBuf[u4ErrByteLoc] = pDataBuf[u4ErrByteLoc] ^ (1 << u4BitOffset); ++ mtd->ecc_stats.corrected++; ++ } else { ++ mtd->ecc_stats.failed++; ++ //printk(KERN_ERR"UnCorrectable High ErrLoc=%d\n", au4ErrBitLoc[i]); ++ } ++ } ++ } ++ } ++ if (0 == (DRV_Reg16(ECC_DECFER_REG16) & (1 << u4SecIndex))) ++ bRet = false; ++ } ++ return bRet; ++} ++ ++static bool ++mtk_nand_RFIFOValidSize(u16 u2Size) ++{ ++ u32 timeout = 0xFFFF; ++ while (FIFO_RD_REMAIN(DRV_Reg16(NFI_FIFOSTA_REG16)) < u2Size) { ++ timeout--; ++ if (0 == timeout) ++ return false; ++ } ++ return true; ++} ++ ++static bool ++mtk_nand_WFIFOValidSize(u16 u2Size) ++{ ++ u32 timeout = 0xFFFF; ++ ++ while (FIFO_WR_REMAIN(DRV_Reg16(NFI_FIFOSTA_REG16)) > u2Size) { ++ timeout--; ++ if (0 == timeout) ++ return false; ++ } ++ return true; ++} ++ ++static bool ++mtk_nand_status_ready(u32 u4Status) ++{ ++ u32 timeout = 0xFFFF; ++ ++ while ((DRV_Reg32(NFI_STA_REG32) & u4Status) != 0) { ++ timeout--; ++ if (0 == timeout) ++ return false; ++ } ++ return true; ++} ++ ++static bool ++mtk_nand_reset(void) ++{ ++ int timeout = 0xFFFF; ++ if (DRV_Reg16(NFI_MASTERSTA_REG16)) { ++ mb(); ++ DRV_WriteReg16(NFI_CON_REG16, CON_FIFO_FLUSH | CON_NFI_RST); ++ while (DRV_Reg16(NFI_MASTERSTA_REG16)) { ++ timeout--; ++ if (!timeout) ++ MSG(INIT, "Wait for NFI_MASTERSTA timeout\n"); ++ } ++ } ++ /* issue reset operation */ ++ mb(); ++ DRV_WriteReg16(NFI_CON_REG16, CON_FIFO_FLUSH | CON_NFI_RST); ++ ++ return mtk_nand_status_ready(STA_NFI_FSM_MASK | STA_NAND_BUSY) && mtk_nand_RFIFOValidSize(0) && mtk_nand_WFIFOValidSize(0); ++} ++ ++static void ++mtk_nand_set_mode(u16 u2OpMode) ++{ ++ u16 u2Mode = DRV_Reg16(NFI_CNFG_REG16); ++ u2Mode &= ~CNFG_OP_MODE_MASK; ++ u2Mode |= u2OpMode; ++ DRV_WriteReg16(NFI_CNFG_REG16, u2Mode); ++} ++ ++static void ++mtk_nand_set_autoformat(bool bEnable) ++{ ++ if (bEnable) ++ NFI_SET_REG16(NFI_CNFG_REG16, CNFG_AUTO_FMT_EN); ++ else ++ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AUTO_FMT_EN); ++} ++ ++static void ++mtk_nand_configure_fdm(u16 u2FDMSize) ++{ ++ NFI_CLN_REG16(NFI_PAGEFMT_REG16, PAGEFMT_FDM_MASK | PAGEFMT_FDM_ECC_MASK); ++ NFI_SET_REG16(NFI_PAGEFMT_REG16, u2FDMSize << PAGEFMT_FDM_SHIFT); ++ NFI_SET_REG16(NFI_PAGEFMT_REG16, u2FDMSize << PAGEFMT_FDM_ECC_SHIFT); ++} ++ ++static void ++mtk_nand_configure_lock(void) ++{ ++ u32 u4WriteColNOB = 2; ++ u32 u4WriteRowNOB = 3; ++ u32 u4EraseColNOB = 0; ++ u32 u4EraseRowNOB = 3; ++ DRV_WriteReg16(NFI_LOCKANOB_REG16, ++ (u4WriteColNOB << PROG_CADD_NOB_SHIFT) | (u4WriteRowNOB << PROG_RADD_NOB_SHIFT) | (u4EraseColNOB << ERASE_CADD_NOB_SHIFT) | (u4EraseRowNOB << ERASE_RADD_NOB_SHIFT)); ++ ++ if (CHIPVER_ECO_1 == g_u4ChipVer) { ++ int i; ++ for (i = 0; i < 16; ++i) { ++ DRV_WriteReg32(NFI_LOCK00ADD_REG32 + (i << 1), 0xFFFFFFFF); ++ DRV_WriteReg32(NFI_LOCK00FMT_REG32 + (i << 1), 0xFFFFFFFF); ++ } ++ //DRV_WriteReg16(NFI_LOCKANOB_REG16, 0x0); ++ DRV_WriteReg32(NFI_LOCKCON_REG32, 0xFFFFFFFF); ++ DRV_WriteReg16(NFI_LOCK_REG16, NFI_LOCK_ON); ++ } ++} ++ ++static bool ++mtk_nand_pio_ready(void) ++{ ++ int count = 0; ++ while (!(DRV_Reg16(NFI_PIO_DIRDY_REG16) & 1)) { ++ count++; ++ if (count > 0xffff) { ++ printk("PIO_DIRDY timeout\n"); ++ return false; ++ } ++ } ++ ++ return true; ++} ++ ++static bool ++mtk_nand_set_command(u16 command) ++{ ++ mb(); ++ DRV_WriteReg16(NFI_CMD_REG16, command); ++ return mtk_nand_status_ready(STA_CMD_STATE); ++} ++ ++static bool ++mtk_nand_set_address(u32 u4ColAddr, u32 u4RowAddr, u16 u2ColNOB, u16 u2RowNOB) ++{ ++ mb(); ++ DRV_WriteReg32(NFI_COLADDR_REG32, u4ColAddr); ++ DRV_WriteReg32(NFI_ROWADDR_REG32, u4RowAddr); ++ DRV_WriteReg16(NFI_ADDRNOB_REG16, u2ColNOB | (u2RowNOB << ADDR_ROW_NOB_SHIFT)); ++ return mtk_nand_status_ready(STA_ADDR_STATE); ++} ++ ++static bool ++mtk_nand_check_RW_count(u16 u2WriteSize) ++{ ++ u32 timeout = 0xFFFF; ++ u16 u2SecNum = u2WriteSize >> 9; ++ ++ while (ADDRCNTR_CNTR(DRV_Reg16(NFI_ADDRCNTR_REG16)) < u2SecNum) { ++ timeout--; ++ if (0 == timeout) { ++ printk(KERN_INFO "[%s] timeout\n", __FUNCTION__); ++ return false; ++ } ++ } ++ return true; ++} ++ ++static bool ++mtk_nand_ready_for_read(struct nand_chip *nand, u32 u4RowAddr, u32 u4ColAddr, bool full, u8 * buf) ++{ ++ /* Reset NFI HW internal state machine and flush NFI in/out FIFO */ ++ bool bRet = false; ++ u16 sec_num = 1 << (nand->page_shift - 9); ++ u32 col_addr = u4ColAddr; ++ u32 colnob = 2, rownob = devinfo.addr_cycle - 2; ++ if (nand->options & NAND_BUSWIDTH_16) ++ col_addr /= 2; ++ ++ if (!mtk_nand_reset()) ++ goto cleanup; ++ if (g_bHwEcc) { ++ NFI_SET_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN); ++ } else { ++ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN); ++ } ++ ++ mtk_nand_set_mode(CNFG_OP_READ); ++ NFI_SET_REG16(NFI_CNFG_REG16, CNFG_READ_EN); ++ DRV_WriteReg16(NFI_CON_REG16, sec_num << CON_NFI_SEC_SHIFT); ++ ++ if (full) { ++ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB); ++ ++ if (g_bHwEcc) ++ NFI_SET_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN); ++ else ++ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN); ++ } else { ++ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN); ++ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB); ++ } ++ ++ mtk_nand_set_autoformat(full); ++ if (full) ++ if (g_bHwEcc) ++ ECC_Decode_Start(); ++ if (!mtk_nand_set_command(NAND_CMD_READ0)) ++ goto cleanup; ++ if (!mtk_nand_set_address(col_addr, u4RowAddr, colnob, rownob)) ++ goto cleanup; ++ if (!mtk_nand_set_command(NAND_CMD_READSTART)) ++ goto cleanup; ++ if (!mtk_nand_status_ready(STA_NAND_BUSY)) ++ goto cleanup; ++ ++ bRet = true; ++ ++cleanup: ++ return bRet; ++} ++ ++static bool ++mtk_nand_ready_for_write(struct nand_chip *nand, u32 u4RowAddr, u32 col_addr, bool full, u8 * buf) ++{ ++ bool bRet = false; ++ u32 sec_num = 1 << (nand->page_shift - 9); ++ u32 colnob = 2, rownob = devinfo.addr_cycle - 2; ++ if (nand->options & NAND_BUSWIDTH_16) ++ col_addr /= 2; ++ ++ /* Reset NFI HW internal state machine and flush NFI in/out FIFO */ ++ if (!mtk_nand_reset()) ++ return false; ++ ++ mtk_nand_set_mode(CNFG_OP_PRGM); ++ ++ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_READ_EN); ++ ++ DRV_WriteReg16(NFI_CON_REG16, sec_num << CON_NFI_SEC_SHIFT); ++ ++ if (full) { ++ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB); ++ if (g_bHwEcc) ++ NFI_SET_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN); ++ else ++ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN); ++ } else { ++ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN); ++ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB); ++ } ++ ++ mtk_nand_set_autoformat(full); ++ ++ if (full) ++ if (g_bHwEcc) ++ ECC_Encode_Start(); ++ ++ if (!mtk_nand_set_command(NAND_CMD_SEQIN)) ++ goto cleanup; ++ //1 FIXED ME: For Any Kind of AddrCycle ++ if (!mtk_nand_set_address(col_addr, u4RowAddr, colnob, rownob)) ++ goto cleanup; ++ ++ if (!mtk_nand_status_ready(STA_NAND_BUSY)) ++ goto cleanup; ++ ++ bRet = true; ++ ++cleanup: ++ return bRet; ++} ++ ++static bool ++mtk_nand_check_dececc_done(u32 u4SecNum) ++{ ++ u32 timeout, dec_mask; ++ ++ timeout = 0xffff; ++ dec_mask = (1 << u4SecNum) - 1; ++ while ((dec_mask != DRV_Reg(ECC_DECDONE_REG16)) && timeout > 0) ++ timeout--; ++ if (timeout == 0) { ++ MSG(VERIFY, "ECC_DECDONE: timeout\n"); ++ return false; ++ } ++ return true; ++} ++ ++static bool ++mtk_nand_mcu_read_data(u8 * buf, u32 length) ++{ ++ int timeout = 0xffff; ++ u32 i; ++ u32 *buf32 = (u32 *) buf; ++ if ((u32) buf % 4 || length % 4) ++ NFI_SET_REG16(NFI_CNFG_REG16, CNFG_BYTE_RW); ++ else ++ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_BYTE_RW); ++ ++ //DRV_WriteReg32(NFI_STRADDR_REG32, 0); ++ mb(); ++ NFI_SET_REG16(NFI_CON_REG16, CON_NFI_BRD); ++ ++ if ((u32) buf % 4 || length % 4) { ++ for (i = 0; (i < (length)) && (timeout > 0);) { ++ if (DRV_Reg16(NFI_PIO_DIRDY_REG16) & 1) { ++ *buf++ = (u8) DRV_Reg32(NFI_DATAR_REG32); ++ i++; ++ } else { ++ timeout--; ++ } ++ if (0 == timeout) { ++ printk(KERN_ERR "[%s] timeout\n", __FUNCTION__); ++ dump_nfi(); ++ return false; ++ } ++ } ++ } else { ++ for (i = 0; (i < (length >> 2)) && (timeout > 0);) { ++ if (DRV_Reg16(NFI_PIO_DIRDY_REG16) & 1) { ++ *buf32++ = DRV_Reg32(NFI_DATAR_REG32); ++ i++; ++ } else { ++ timeout--; ++ } ++ if (0 == timeout) { ++ printk(KERN_ERR "[%s] timeout\n", __FUNCTION__); ++ dump_nfi(); ++ return false; ++ } ++ } ++ } ++ return true; ++} ++ ++static bool ++mtk_nand_read_page_data(struct mtd_info *mtd, u8 * pDataBuf, u32 u4Size) ++{ ++ return mtk_nand_mcu_read_data(pDataBuf, u4Size); ++} ++ ++static bool ++mtk_nand_mcu_write_data(struct mtd_info *mtd, const u8 * buf, u32 length) ++{ ++ u32 timeout = 0xFFFF; ++ u32 i; ++ u32 *pBuf32; ++ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_BYTE_RW); ++ mb(); ++ NFI_SET_REG16(NFI_CON_REG16, CON_NFI_BWR); ++ pBuf32 = (u32 *) buf; ++ ++ if ((u32) buf % 4 || length % 4) ++ NFI_SET_REG16(NFI_CNFG_REG16, CNFG_BYTE_RW); ++ else ++ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_BYTE_RW); ++ ++ if ((u32) buf % 4 || length % 4) { ++ for (i = 0; (i < (length)) && (timeout > 0);) { ++ if (DRV_Reg16(NFI_PIO_DIRDY_REG16) & 1) { ++ DRV_WriteReg32(NFI_DATAW_REG32, *buf++); ++ i++; ++ } else { ++ timeout--; ++ } ++ if (0 == timeout) { ++ printk(KERN_ERR "[%s] timeout\n", __FUNCTION__); ++ dump_nfi(); ++ return false; ++ } ++ } ++ } else { ++ for (i = 0; (i < (length >> 2)) && (timeout > 0);) { ++ if (DRV_Reg16(NFI_PIO_DIRDY_REG16) & 1) { ++ DRV_WriteReg32(NFI_DATAW_REG32, *pBuf32++); ++ i++; ++ } else { ++ timeout--; ++ } ++ if (0 == timeout) { ++ printk(KERN_ERR "[%s] timeout\n", __FUNCTION__); ++ dump_nfi(); ++ return false; ++ } ++ } ++ } ++ ++ return true; ++} ++ ++static bool ++mtk_nand_write_page_data(struct mtd_info *mtd, u8 * buf, u32 size) ++{ ++ return mtk_nand_mcu_write_data(mtd, buf, size); ++} ++ ++static void ++mtk_nand_read_fdm_data(u8 * pDataBuf, u32 u4SecNum) ++{ ++ u32 i; ++ u32 *pBuf32 = (u32 *) pDataBuf; ++ ++ if (pBuf32) { ++ for (i = 0; i < u4SecNum; ++i) { ++ *pBuf32++ = DRV_Reg32(NFI_FDM0L_REG32 + (i << 1)); ++ *pBuf32++ = DRV_Reg32(NFI_FDM0M_REG32 + (i << 1)); ++ } ++ } ++} ++ ++static u8 fdm_buf[64]; ++static void ++mtk_nand_write_fdm_data(struct nand_chip *chip, u8 * pDataBuf, u32 u4SecNum) ++{ ++ u32 i, j; ++ u8 checksum = 0; ++ bool empty = true; ++ struct nand_oobfree *free_entry; ++ u32 *pBuf32; ++ ++ memcpy(fdm_buf, pDataBuf, u4SecNum * 8); ++ ++ free_entry = chip->ecc.layout->oobfree; ++ for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES && free_entry[i].length; i++) { ++ for (j = 0; j < free_entry[i].length; j++) { ++ if (pDataBuf[free_entry[i].offset + j] != 0xFF) ++ empty = false; ++ checksum ^= pDataBuf[free_entry[i].offset + j]; ++ } ++ } ++ ++ if (!empty) { ++ fdm_buf[free_entry[i - 1].offset + free_entry[i - 1].length] = checksum; ++ } ++ ++ pBuf32 = (u32 *) fdm_buf; ++ for (i = 0; i < u4SecNum; ++i) { ++ DRV_WriteReg32(NFI_FDM0L_REG32 + (i << 1), *pBuf32++); ++ DRV_WriteReg32(NFI_FDM0M_REG32 + (i << 1), *pBuf32++); ++ } ++} ++ ++static void ++mtk_nand_stop_read(void) ++{ ++ NFI_CLN_REG16(NFI_CON_REG16, CON_NFI_BRD); ++ mtk_nand_reset(); ++ if (g_bHwEcc) ++ ECC_Decode_End(); ++ DRV_WriteReg16(NFI_INTR_EN_REG16, 0); ++} ++ ++static void ++mtk_nand_stop_write(void) ++{ ++ NFI_CLN_REG16(NFI_CON_REG16, CON_NFI_BWR); ++ if (g_bHwEcc) ++ ECC_Encode_End(); ++ DRV_WriteReg16(NFI_INTR_EN_REG16, 0); ++} ++ ++bool ++mtk_nand_exec_read_page(struct mtd_info *mtd, u32 u4RowAddr, u32 u4PageSize, u8 * pPageBuf, u8 * pFDMBuf) ++{ ++ u8 *buf; ++ bool bRet = true; ++ struct nand_chip *nand = mtd->priv; ++ u32 u4SecNum = u4PageSize >> 9; ++ ++ if (((u32) pPageBuf % 16) && local_buffer_16_align) ++ buf = local_buffer_16_align; ++ else ++ buf = pPageBuf; ++ if (mtk_nand_ready_for_read(nand, u4RowAddr, 0, true, buf)) { ++ int j; ++ for (j = 0 ; j < u4SecNum; j++) { ++ if (!mtk_nand_read_page_data(mtd, buf+j*512, 512)) ++ bRet = false; ++ if(g_bHwEcc && !mtk_nand_check_dececc_done(j+1)) ++ bRet = false; ++ if(g_bHwEcc && !mtk_nand_check_bch_error(mtd, buf+j*512, j, u4RowAddr)) ++ bRet = false; ++ } ++ if (!mtk_nand_status_ready(STA_NAND_BUSY)) ++ bRet = false; ++ ++ mtk_nand_read_fdm_data(pFDMBuf, u4SecNum); ++ mtk_nand_stop_read(); ++ } ++ ++ if (buf == local_buffer_16_align) ++ memcpy(pPageBuf, buf, u4PageSize); ++ ++ return bRet; ++} ++ ++int ++mtk_nand_exec_write_page(struct mtd_info *mtd, u32 u4RowAddr, u32 u4PageSize, u8 * pPageBuf, u8 * pFDMBuf) ++{ ++ struct nand_chip *chip = mtd->priv; ++ u32 u4SecNum = u4PageSize >> 9; ++ u8 *buf; ++ u8 status; ++ ++ MSG(WRITE, "mtk_nand_exec_write_page, page: 0x%x\n", u4RowAddr); ++ ++ if (((u32) pPageBuf % 16) && local_buffer_16_align) { ++ printk(KERN_INFO "Data buffer not 16 bytes aligned: %p\n", pPageBuf); ++ memcpy(local_buffer_16_align, pPageBuf, mtd->writesize); ++ buf = local_buffer_16_align; ++ } else ++ buf = pPageBuf; ++ ++ if (mtk_nand_ready_for_write(chip, u4RowAddr, 0, true, buf)) { ++ mtk_nand_write_fdm_data(chip, pFDMBuf, u4SecNum); ++ (void)mtk_nand_write_page_data(mtd, buf, u4PageSize); ++ (void)mtk_nand_check_RW_count(u4PageSize); ++ mtk_nand_stop_write(); ++ (void)mtk_nand_set_command(NAND_CMD_PAGEPROG); ++ while (DRV_Reg32(NFI_STA_REG32) & STA_NAND_BUSY) ; ++ } ++ ++ status = chip->waitfunc(mtd, chip); ++ if (status & NAND_STATUS_FAIL) ++ return -EIO; ++ return 0; ++} ++ ++static int ++get_start_end_block(struct mtd_info *mtd, int block, int *start_blk, int *end_blk) ++{ ++ struct nand_chip *chip = mtd->priv; ++ int i; ++ ++ *start_blk = 0; ++ for (i = 0; i <= part_num; i++) ++ { ++ if (i == part_num) ++ { ++ // try the last reset partition ++ *end_blk = (chip->chipsize >> chip->phys_erase_shift) - 1; ++ if (*start_blk <= *end_blk) ++ { ++ if ((block >= *start_blk) && (block <= *end_blk)) ++ break; ++ } ++ } ++ // skip All partition entry ++ else if (g_pasStatic_Partition[i].size == MTDPART_SIZ_FULL) ++ { ++ continue; ++ } ++ *end_blk = *start_blk + (g_pasStatic_Partition[i].size >> chip->phys_erase_shift) - 1; ++ if ((block >= *start_blk) && (block <= *end_blk)) ++ break; ++ *start_blk = *end_blk + 1; ++ } ++ if (*start_blk > *end_blk) ++ { ++ return -1; ++ } ++ return 0; ++} ++ ++static int ++block_remap(struct mtd_info *mtd, int block) ++{ ++ struct nand_chip *chip = mtd->priv; ++ int start_blk, end_blk; ++ int j, block_offset; ++ int bad_block = 0; ++ ++ if (chip->bbt == NULL) { ++ printk("ERROR!! no bbt table for block_remap\n"); ++ return -1; ++ } ++ ++ if (get_start_end_block(mtd, block, &start_blk, &end_blk) < 0) { ++ printk("ERROR!! can not find start_blk and end_blk\n"); ++ return -1; ++ } ++ ++ block_offset = block - start_blk; ++ for (j = start_blk; j <= end_blk;j++) { ++ if (((chip->bbt[j >> 2] >> ((j<<1) & 0x6)) & 0x3) == 0x0) { ++ if (!block_offset) ++ break; ++ block_offset--; ++ } else { ++ bad_block++; ++ } ++ } ++ if (j <= end_blk) { ++ return j; ++ } else { ++ // remap to the bad block ++ for (j = end_blk; bad_block > 0; j--) ++ { ++ if (((chip->bbt[j >> 2] >> ((j<<1) & 0x6)) & 0x3) != 0x0) ++ { ++ bad_block--; ++ if (bad_block <= block_offset) ++ return j; ++ } ++ } ++ } ++ ++ printk("Error!! block_remap error\n"); ++ return -1; ++} ++ ++int ++check_block_remap(struct mtd_info *mtd, int block) ++{ ++ if (shift_on_bbt) ++ return block_remap(mtd, block); ++ else ++ return block; ++} ++EXPORT_SYMBOL(check_block_remap); ++ ++ ++static int ++write_next_on_fail(struct mtd_info *mtd, char *write_buf, int page, int * to_blk) ++{ ++ struct nand_chip *chip = mtd->priv; ++ int i, j, to_page = 0, first_page; ++ char *buf, *oob; ++ int start_blk = 0, end_blk; ++ int mapped_block; ++ int page_per_block_bit = chip->phys_erase_shift - chip->page_shift; ++ int block = page >> page_per_block_bit; ++ ++ // find next available block in the same MTD partition ++ mapped_block = block_remap(mtd, block); ++ if (mapped_block == -1) ++ return NAND_STATUS_FAIL; ++ ++ get_start_end_block(mtd, block, &start_blk, &end_blk); ++ ++ buf = kzalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL | GFP_DMA); ++ if (buf == NULL) ++ return -1; ++ ++ oob = buf + mtd->writesize; ++ for ((*to_blk) = block + 1; (*to_blk) <= end_blk ; (*to_blk)++) { ++ if (nand_bbt_get(mtd, (*to_blk) << page_per_block_bit) == 0) { ++ int status; ++ status = mtk_nand_erase_hw(mtd, (*to_blk) << page_per_block_bit); ++ if (status & NAND_STATUS_FAIL) { ++ mtk_nand_block_markbad_hw(mtd, (*to_blk) << chip->phys_erase_shift); ++ nand_bbt_set(mtd, (*to_blk) << page_per_block_bit, 0x3); ++ } else { ++ /* good block */ ++ to_page = (*to_blk) << page_per_block_bit; ++ break; ++ } ++ } ++ } ++ ++ if (!to_page) { ++ kfree(buf); ++ return -1; ++ } ++ ++ first_page = (page >> page_per_block_bit) << page_per_block_bit; ++ for (i = 0; i < (1 << page_per_block_bit); i++) { ++ if ((first_page + i) != page) { ++ mtk_nand_read_oob_hw(mtd, chip, (first_page+i)); ++ for (j = 0; j < mtd->oobsize; j++) ++ if (chip->oob_poi[j] != (unsigned char)0xff) ++ break; ++ if (j < mtd->oobsize) { ++ mtk_nand_exec_read_page(mtd, (first_page+i), mtd->writesize, buf, oob); ++ memset(oob, 0xff, mtd->oobsize); ++ if (mtk_nand_exec_write_page(mtd, to_page + i, mtd->writesize, (u8 *)buf, oob) != 0) { ++ int ret, new_blk = 0; ++ nand_bbt_set(mtd, to_page, 0x3); ++ ret = write_next_on_fail(mtd, buf, to_page + i, &new_blk); ++ if (ret) { ++ kfree(buf); ++ mtk_nand_block_markbad_hw(mtd, to_page << chip->page_shift); ++ return ret; ++ } ++ mtk_nand_block_markbad_hw(mtd, to_page << chip->page_shift); ++ *to_blk = new_blk; ++ to_page = ((*to_blk) << page_per_block_bit); ++ } ++ } ++ } else { ++ memset(chip->oob_poi, 0xff, mtd->oobsize); ++ if (mtk_nand_exec_write_page(mtd, to_page + i, mtd->writesize, (u8 *)write_buf, chip->oob_poi) != 0) { ++ int ret, new_blk = 0; ++ nand_bbt_set(mtd, to_page, 0x3); ++ ret = write_next_on_fail(mtd, write_buf, to_page + i, &new_blk); ++ if (ret) { ++ kfree(buf); ++ mtk_nand_block_markbad_hw(mtd, to_page << chip->page_shift); ++ return ret; ++ } ++ mtk_nand_block_markbad_hw(mtd, to_page << chip->page_shift); ++ *to_blk = new_blk; ++ to_page = ((*to_blk) << page_per_block_bit); ++ } ++ } ++ } ++ ++ kfree(buf); ++ ++ return 0; ++} ++ ++static int ++mtk_nand_write_page(struct mtd_info *mtd, struct nand_chip *chip, uint32_t offset, ++ int data_len, const u8 * buf, int oob_required, int page, int cached, int raw) ++{ ++ int page_per_block = 1 << (chip->phys_erase_shift - chip->page_shift); ++ int block = page / page_per_block; ++ u16 page_in_block = page % page_per_block; ++ int mapped_block = block; ++ ++#if defined(MTK_NAND_BMT) ++ mapped_block = get_mapping_block_index(block); ++ // write bad index into oob ++ if (mapped_block != block) ++ set_bad_index_to_oob(chip->oob_poi, block); ++ else ++ set_bad_index_to_oob(chip->oob_poi, FAKE_INDEX); ++#else ++ if (shift_on_bbt) { ++ mapped_block = block_remap(mtd, block); ++ if (mapped_block == -1) ++ return NAND_STATUS_FAIL; ++ if (nand_bbt_get(mtd, mapped_block << (chip->phys_erase_shift - chip->page_shift)) != 0x0) ++ return NAND_STATUS_FAIL; ++ } ++#endif ++ do { ++ if (mtk_nand_exec_write_page(mtd, page_in_block + mapped_block * page_per_block, mtd->writesize, (u8 *)buf, chip->oob_poi)) { ++ MSG(INIT, "write fail at block: 0x%x, page: 0x%x\n", mapped_block, page_in_block); ++#if defined(MTK_NAND_BMT) ++ if (update_bmt((page_in_block + mapped_block * page_per_block) << chip->page_shift, UPDATE_WRITE_FAIL, (u8 *) buf, chip->oob_poi)) { ++ MSG(INIT, "Update BMT success\n"); ++ return 0; ++ } else { ++ MSG(INIT, "Update BMT fail\n"); ++ return -EIO; ++ } ++#else ++ { ++ int new_blk; ++ nand_bbt_set(mtd, page_in_block + mapped_block * page_per_block, 0x3); ++ if (write_next_on_fail(mtd, (char *)buf, page_in_block + mapped_block * page_per_block, &new_blk) != 0) ++ { ++ mtk_nand_block_markbad_hw(mtd, (page_in_block + mapped_block * page_per_block) << chip->page_shift); ++ return NAND_STATUS_FAIL; ++ } ++ mtk_nand_block_markbad_hw(mtd, (page_in_block + mapped_block * page_per_block) << chip->page_shift); ++ break; ++ } ++#endif ++ } else ++ break; ++ } while(1); ++ ++ return 0; ++} ++ ++static void ++mtk_nand_command_bp(struct mtd_info *mtd, unsigned int command, int column, int page_addr) ++{ ++ struct nand_chip *nand = mtd->priv; ++ ++ switch (command) { ++ case NAND_CMD_SEQIN: ++ memset(g_kCMD.au1OOB, 0xFF, sizeof(g_kCMD.au1OOB)); ++ g_kCMD.pDataBuf = NULL; ++ g_kCMD.u4RowAddr = page_addr; ++ g_kCMD.u4ColAddr = column; ++ break; ++ ++ case NAND_CMD_PAGEPROG: ++ if (g_kCMD.pDataBuf || (0xFF != g_kCMD.au1OOB[nand_badblock_offset])) { ++ u8 *pDataBuf = g_kCMD.pDataBuf ? g_kCMD.pDataBuf : nand->buffers->databuf; ++ mtk_nand_exec_write_page(mtd, g_kCMD.u4RowAddr, mtd->writesize, pDataBuf, g_kCMD.au1OOB); ++ g_kCMD.u4RowAddr = (u32) - 1; ++ g_kCMD.u4OOBRowAddr = (u32) - 1; ++ } ++ break; ++ ++ case NAND_CMD_READOOB: ++ g_kCMD.u4RowAddr = page_addr; ++ g_kCMD.u4ColAddr = column + mtd->writesize; ++ break; ++ ++ case NAND_CMD_READ0: ++ g_kCMD.u4RowAddr = page_addr; ++ g_kCMD.u4ColAddr = column; ++ break; ++ ++ case NAND_CMD_ERASE1: ++ nand->state=FL_ERASING; ++ (void)mtk_nand_reset(); ++ mtk_nand_set_mode(CNFG_OP_ERASE); ++ (void)mtk_nand_set_command(NAND_CMD_ERASE1); ++ (void)mtk_nand_set_address(0, page_addr, 0, devinfo.addr_cycle - 2); ++ break; ++ ++ case NAND_CMD_ERASE2: ++ (void)mtk_nand_set_command(NAND_CMD_ERASE2); ++ while (DRV_Reg32(NFI_STA_REG32) & STA_NAND_BUSY) ++ ; ++ break; ++ ++ case NAND_CMD_STATUS: ++ (void)mtk_nand_reset(); ++ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_BYTE_RW); ++ mtk_nand_set_mode(CNFG_OP_SRD); ++ mtk_nand_set_mode(CNFG_READ_EN); ++ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB); ++ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN); ++ (void)mtk_nand_set_command(NAND_CMD_STATUS); ++ NFI_CLN_REG16(NFI_CON_REG16, CON_NFI_NOB_MASK); ++ mb(); ++ DRV_WriteReg16(NFI_CON_REG16, CON_NFI_SRD | (1 << CON_NFI_NOB_SHIFT)); ++ g_bcmdstatus = true; ++ break; ++ ++ case NAND_CMD_RESET: ++ (void)mtk_nand_reset(); ++ DRV_WriteReg16(NFI_INTR_EN_REG16, INTR_RST_DONE_EN); ++ (void)mtk_nand_set_command(NAND_CMD_RESET); ++ DRV_WriteReg16(NFI_BASE+0x44, 0xF1); ++ while(!(DRV_Reg16(NFI_INTR_REG16)&INTR_RST_DONE_EN)) ++ ; ++ break; ++ ++ case NAND_CMD_READID: ++ mtk_nand_reset(); ++ /* Disable HW ECC */ ++ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN); ++ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB); ++ NFI_SET_REG16(NFI_CNFG_REG16, CNFG_READ_EN | CNFG_BYTE_RW); ++ (void)mtk_nand_reset(); ++ mb(); ++ mtk_nand_set_mode(CNFG_OP_SRD); ++ (void)mtk_nand_set_command(NAND_CMD_READID); ++ (void)mtk_nand_set_address(0, 0, 1, 0); ++ DRV_WriteReg16(NFI_CON_REG16, CON_NFI_SRD); ++ while (DRV_Reg32(NFI_STA_REG32) & STA_DATAR_STATE) ++ ; ++ break; ++ ++ default: ++ BUG(); ++ break; ++ } ++} ++ ++static void ++mtk_nand_select_chip(struct mtd_info *mtd, int chip) ++{ ++ if ((chip == -1) && (false == g_bInitDone)) { ++ struct nand_chip *nand = mtd->priv; ++ struct mtk_nand_host *host = nand->priv; ++ struct mtk_nand_host_hw *hw = host->hw; ++ u32 spare_per_sector = mtd->oobsize / (mtd->writesize / 512); ++ u32 ecc_bit = 4; ++ u32 spare_bit = PAGEFMT_SPARE_16; ++ ++ if (spare_per_sector >= 28) { ++ spare_bit = PAGEFMT_SPARE_28; ++ ecc_bit = 12; ++ spare_per_sector = 28; ++ } else if (spare_per_sector >= 27) { ++ spare_bit = PAGEFMT_SPARE_27; ++ ecc_bit = 8; ++ spare_per_sector = 27; ++ } else if (spare_per_sector >= 26) { ++ spare_bit = PAGEFMT_SPARE_26; ++ ecc_bit = 8; ++ spare_per_sector = 26; ++ } else if (spare_per_sector >= 16) { ++ spare_bit = PAGEFMT_SPARE_16; ++ ecc_bit = 4; ++ spare_per_sector = 16; ++ } else { ++ MSG(INIT, "[NAND]: NFI not support oobsize: %x\n", spare_per_sector); ++ ASSERT(0); ++ } ++ mtd->oobsize = spare_per_sector*(mtd->writesize/512); ++ MSG(INIT, "[NAND]select ecc bit:%d, sparesize :%d spare_per_sector=%d\n",ecc_bit,mtd->oobsize,spare_per_sector); ++ /* Setup PageFormat */ ++ if (4096 == mtd->writesize) { ++ NFI_SET_REG16(NFI_PAGEFMT_REG16, (spare_bit << PAGEFMT_SPARE_SHIFT) | PAGEFMT_4K); ++ nand->cmdfunc = mtk_nand_command_bp; ++ } else if (2048 == mtd->writesize) { ++ NFI_SET_REG16(NFI_PAGEFMT_REG16, (spare_bit << PAGEFMT_SPARE_SHIFT) | PAGEFMT_2K); ++ nand->cmdfunc = mtk_nand_command_bp; ++ } ++ ECC_Config(hw,ecc_bit); ++ g_bInitDone = true; ++ } ++ switch (chip) { ++ case -1: ++ break; ++ case 0: ++ case 1: ++ /* Jun Shen, 2011.04.13 */ ++ /* Note: MT6577 EVB NAND is mounted on CS0, but FPGA is CS1 */ ++ DRV_WriteReg16(NFI_CSEL_REG16, chip); ++ /* Jun Shen, 2011.04.13 */ ++ break; ++ } ++} ++ ++static uint8_t ++mtk_nand_read_byte(struct mtd_info *mtd) ++{ ++ uint8_t retval = 0; ++ ++ if (!mtk_nand_pio_ready()) { ++ printk("pio ready timeout\n"); ++ retval = false; ++ } ++ ++ if (g_bcmdstatus) { ++ retval = DRV_Reg8(NFI_DATAR_REG32); ++ NFI_CLN_REG16(NFI_CON_REG16, CON_NFI_NOB_MASK); ++ mtk_nand_reset(); ++ if (g_bHwEcc) { ++ NFI_SET_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN); ++ } else { ++ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN); ++ } ++ g_bcmdstatus = false; ++ } else ++ retval = DRV_Reg8(NFI_DATAR_REG32); ++ ++ return retval; ++} ++ ++static void ++mtk_nand_read_buf(struct mtd_info *mtd, uint8_t * buf, int len) ++{ ++ struct nand_chip *nand = (struct nand_chip *)mtd->priv; ++ struct NAND_CMD *pkCMD = &g_kCMD; ++ u32 u4ColAddr = pkCMD->u4ColAddr; ++ u32 u4PageSize = mtd->writesize; ++ ++ if (u4ColAddr < u4PageSize) { ++ if ((u4ColAddr == 0) && (len >= u4PageSize)) { ++ mtk_nand_exec_read_page(mtd, pkCMD->u4RowAddr, u4PageSize, buf, pkCMD->au1OOB); ++ if (len > u4PageSize) { ++ u32 u4Size = min(len - u4PageSize, sizeof(pkCMD->au1OOB)); ++ memcpy(buf + u4PageSize, pkCMD->au1OOB, u4Size); ++ } ++ } else { ++ mtk_nand_exec_read_page(mtd, pkCMD->u4RowAddr, u4PageSize, nand->buffers->databuf, pkCMD->au1OOB); ++ memcpy(buf, nand->buffers->databuf + u4ColAddr, len); ++ } ++ pkCMD->u4OOBRowAddr = pkCMD->u4RowAddr; ++ } else { ++ u32 u4Offset = u4ColAddr - u4PageSize; ++ u32 u4Size = min(len - u4Offset, sizeof(pkCMD->au1OOB)); ++ if (pkCMD->u4OOBRowAddr != pkCMD->u4RowAddr) { ++ mtk_nand_exec_read_page(mtd, pkCMD->u4RowAddr, u4PageSize, nand->buffers->databuf, pkCMD->au1OOB); ++ pkCMD->u4OOBRowAddr = pkCMD->u4RowAddr; ++ } ++ memcpy(buf, pkCMD->au1OOB + u4Offset, u4Size); ++ } ++ pkCMD->u4ColAddr += len; ++} ++ ++static void ++mtk_nand_write_buf(struct mtd_info *mtd, const uint8_t * buf, int len) ++{ ++ struct NAND_CMD *pkCMD = &g_kCMD; ++ u32 u4ColAddr = pkCMD->u4ColAddr; ++ u32 u4PageSize = mtd->writesize; ++ int i4Size, i; ++ ++ if (u4ColAddr >= u4PageSize) { ++ u32 u4Offset = u4ColAddr - u4PageSize; ++ u8 *pOOB = pkCMD->au1OOB + u4Offset; ++ i4Size = min(len, (int)(sizeof(pkCMD->au1OOB) - u4Offset)); ++ for (i = 0; i < i4Size; i++) { ++ pOOB[i] &= buf[i]; ++ } ++ } else { ++ pkCMD->pDataBuf = (u8 *) buf; ++ } ++ ++ pkCMD->u4ColAddr += len; ++} ++ ++static int ++mtk_nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, const uint8_t * buf, int oob_required) ++{ ++ mtk_nand_write_buf(mtd, buf, mtd->writesize); ++ mtk_nand_write_buf(mtd, chip->oob_poi, mtd->oobsize); ++ return 0; ++} ++ ++static int ++mtk_nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, uint8_t * buf, int oob_required, int page) ++{ ++ struct NAND_CMD *pkCMD = &g_kCMD; ++ u32 u4ColAddr = pkCMD->u4ColAddr; ++ u32 u4PageSize = mtd->writesize; ++ ++ if (u4ColAddr == 0) { ++ mtk_nand_exec_read_page(mtd, pkCMD->u4RowAddr, u4PageSize, buf, chip->oob_poi); ++ pkCMD->u4ColAddr += u4PageSize + mtd->oobsize; ++ } ++ ++ return 0; ++} ++ ++static int ++mtk_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip, u8 * buf, int page) ++{ ++ int page_per_block = 1 << (chip->phys_erase_shift - chip->page_shift); ++ int block = page / page_per_block; ++ u16 page_in_block = page % page_per_block; ++ int mapped_block = block; ++ ++#if defined (MTK_NAND_BMT) ++ mapped_block = get_mapping_block_index(block); ++ if (mtk_nand_exec_read_page(mtd, page_in_block + mapped_block * page_per_block, ++ mtd->writesize, buf, chip->oob_poi)) ++ return 0; ++#else ++ if (shift_on_bbt) { ++ mapped_block = block_remap(mtd, block); ++ if (mapped_block == -1) ++ return NAND_STATUS_FAIL; ++ if (nand_bbt_get(mtd, mapped_block << (chip->phys_erase_shift - chip->page_shift)) != 0x0) ++ return NAND_STATUS_FAIL; ++ } ++ ++ if (mtk_nand_exec_read_page(mtd, page_in_block + mapped_block * page_per_block, mtd->writesize, buf, chip->oob_poi)) ++ return 0; ++ else ++ return -EIO; ++#endif ++} ++ ++int ++mtk_nand_erase_hw(struct mtd_info *mtd, int page) ++{ ++ struct nand_chip *chip = (struct nand_chip *)mtd->priv; ++ ++ chip->erase_cmd(mtd, page); ++ ++ return chip->waitfunc(mtd, chip); ++} ++ ++static int ++mtk_nand_erase(struct mtd_info *mtd, int page) ++{ ++ // get mapping ++ struct nand_chip *chip = mtd->priv; ++ int page_per_block = 1 << (chip->phys_erase_shift - chip->page_shift); ++ int page_in_block = page % page_per_block; ++ int block = page / page_per_block; ++ int mapped_block = block; ++ ++#if defined(MTK_NAND_BMT) ++ mapped_block = get_mapping_block_index(block); ++#else ++ if (shift_on_bbt) { ++ mapped_block = block_remap(mtd, block); ++ if (mapped_block == -1) ++ return NAND_STATUS_FAIL; ++ if (nand_bbt_get(mtd, mapped_block << (chip->phys_erase_shift - chip->page_shift)) != 0x0) ++ return NAND_STATUS_FAIL; ++ } ++#endif ++ ++ do { ++ int status = mtk_nand_erase_hw(mtd, page_in_block + page_per_block * mapped_block); ++ ++ if (status & NAND_STATUS_FAIL) { ++#if defined (MTK_NAND_BMT) ++ if (update_bmt( (page_in_block + mapped_block * page_per_block) << chip->page_shift, ++ UPDATE_ERASE_FAIL, NULL, NULL)) ++ { ++ MSG(INIT, "Erase fail at block: 0x%x, update BMT success\n", mapped_block); ++ return 0; ++ } else { ++ MSG(INIT, "Erase fail at block: 0x%x, update BMT fail\n", mapped_block); ++ return NAND_STATUS_FAIL; ++ } ++#else ++ mtk_nand_block_markbad_hw(mtd, (page_in_block + mapped_block * page_per_block) << chip->page_shift); ++ nand_bbt_set(mtd, page_in_block + mapped_block * page_per_block, 0x3); ++ if (shift_on_bbt) { ++ mapped_block = block_remap(mtd, block); ++ if (mapped_block == -1) ++ return NAND_STATUS_FAIL; ++ if (nand_bbt_get(mtd, mapped_block << (chip->phys_erase_shift - chip->page_shift)) != 0x0) ++ return NAND_STATUS_FAIL; ++ } else ++ return NAND_STATUS_FAIL; ++#endif ++ } else ++ break; ++ } while(1); ++ ++ return 0; ++} ++ ++static int ++mtk_nand_read_oob_raw(struct mtd_info *mtd, uint8_t * buf, int page_addr, int len) ++{ ++ struct nand_chip *chip = (struct nand_chip *)mtd->priv; ++ u32 col_addr = 0; ++ u32 sector = 0; ++ int res = 0; ++ u32 colnob = 2, rawnob = devinfo.addr_cycle - 2; ++ int randomread = 0; ++ int read_len = 0; ++ int sec_num = 1<<(chip->page_shift-9); ++ int spare_per_sector = mtd->oobsize/sec_num; ++ ++ if (len > NAND_MAX_OOBSIZE || len % OOB_AVAI_PER_SECTOR || !buf) { ++ printk(KERN_WARNING "[%s] invalid parameter, len: %d, buf: %p\n", __FUNCTION__, len, buf); ++ return -EINVAL; ++ } ++ if (len > spare_per_sector) ++ randomread = 1; ++ if (!randomread || !(devinfo.advancedmode & RAMDOM_READ)) { ++ while (len > 0) { ++ read_len = min(len, spare_per_sector); ++ col_addr = NAND_SECTOR_SIZE + sector * (NAND_SECTOR_SIZE + spare_per_sector); // TODO: Fix this hard-code 16 ++ if (!mtk_nand_ready_for_read(chip, page_addr, col_addr, false, NULL)) { ++ printk(KERN_WARNING "mtk_nand_ready_for_read return failed\n"); ++ res = -EIO; ++ goto error; ++ } ++ if (!mtk_nand_mcu_read_data(buf + spare_per_sector * sector, read_len)) { ++ printk(KERN_WARNING "mtk_nand_mcu_read_data return failed\n"); ++ res = -EIO; ++ goto error; ++ } ++ mtk_nand_check_RW_count(read_len); ++ mtk_nand_stop_read(); ++ sector++; ++ len -= read_len; ++ } ++ } else { ++ col_addr = NAND_SECTOR_SIZE; ++ if (chip->options & NAND_BUSWIDTH_16) ++ col_addr /= 2; ++ if (!mtk_nand_reset()) ++ goto error; ++ mtk_nand_set_mode(0x6000); ++ NFI_SET_REG16(NFI_CNFG_REG16, CNFG_READ_EN); ++ DRV_WriteReg16(NFI_CON_REG16, 4 << CON_NFI_SEC_SHIFT); ++ ++ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB); ++ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN); ++ ++ mtk_nand_set_autoformat(false); ++ ++ if (!mtk_nand_set_command(NAND_CMD_READ0)) ++ goto error; ++ //1 FIXED ME: For Any Kind of AddrCycle ++ if (!mtk_nand_set_address(col_addr, page_addr, colnob, rawnob)) ++ goto error; ++ if (!mtk_nand_set_command(NAND_CMD_READSTART)) ++ goto error; ++ if (!mtk_nand_status_ready(STA_NAND_BUSY)) ++ goto error; ++ read_len = min(len, spare_per_sector); ++ if (!mtk_nand_mcu_read_data(buf + spare_per_sector * sector, read_len)) { ++ printk(KERN_WARNING "mtk_nand_mcu_read_data return failed first 16\n"); ++ res = -EIO; ++ goto error; ++ } ++ sector++; ++ len -= read_len; ++ mtk_nand_stop_read(); ++ while (len > 0) { ++ read_len = min(len, spare_per_sector); ++ if (!mtk_nand_set_command(0x05)) ++ goto error; ++ col_addr = NAND_SECTOR_SIZE + sector * (NAND_SECTOR_SIZE + spare_per_sector); ++ if (chip->options & NAND_BUSWIDTH_16) ++ col_addr /= 2; ++ DRV_WriteReg32(NFI_COLADDR_REG32, col_addr); ++ DRV_WriteReg16(NFI_ADDRNOB_REG16, 2); ++ DRV_WriteReg16(NFI_CON_REG16, 4 << CON_NFI_SEC_SHIFT); ++ if (!mtk_nand_status_ready(STA_ADDR_STATE)) ++ goto error; ++ if (!mtk_nand_set_command(0xE0)) ++ goto error; ++ if (!mtk_nand_status_ready(STA_NAND_BUSY)) ++ goto error; ++ if (!mtk_nand_mcu_read_data(buf + spare_per_sector * sector, read_len)) { ++ printk(KERN_WARNING "mtk_nand_mcu_read_data return failed first 16\n"); ++ res = -EIO; ++ goto error; ++ } ++ mtk_nand_stop_read(); ++ sector++; ++ len -= read_len; ++ } ++ } ++error: ++ NFI_CLN_REG16(NFI_CON_REG16, CON_NFI_BRD); ++ return res; ++} ++ ++static int ++mtk_nand_write_oob_raw(struct mtd_info *mtd, const uint8_t * buf, int page_addr, int len) ++{ ++ struct nand_chip *chip = mtd->priv; ++ u32 col_addr = 0; ++ u32 sector = 0; ++ int write_len = 0; ++ int status; ++ int sec_num = 1<<(chip->page_shift-9); ++ int spare_per_sector = mtd->oobsize/sec_num; ++ ++ if (len > NAND_MAX_OOBSIZE || len % OOB_AVAI_PER_SECTOR || !buf) { ++ printk(KERN_WARNING "[%s] invalid parameter, len: %d, buf: %p\n", __FUNCTION__, len, buf); ++ return -EINVAL; ++ } ++ ++ while (len > 0) { ++ write_len = min(len, spare_per_sector); ++ col_addr = sector * (NAND_SECTOR_SIZE + spare_per_sector) + NAND_SECTOR_SIZE; ++ if (!mtk_nand_ready_for_write(chip, page_addr, col_addr, false, NULL)) ++ return -EIO; ++ if (!mtk_nand_mcu_write_data(mtd, buf + sector * spare_per_sector, write_len)) ++ return -EIO; ++ (void)mtk_nand_check_RW_count(write_len); ++ NFI_CLN_REG16(NFI_CON_REG16, CON_NFI_BWR); ++ (void)mtk_nand_set_command(NAND_CMD_PAGEPROG); ++ while (DRV_Reg32(NFI_STA_REG32) & STA_NAND_BUSY) ++ ; ++ status = chip->waitfunc(mtd, chip); ++ if (status & NAND_STATUS_FAIL) { ++ printk(KERN_INFO "status: %d\n", status); ++ return -EIO; ++ } ++ len -= write_len; ++ sector++; ++ } ++ ++ return 0; ++} ++ ++static int ++mtk_nand_write_oob_hw(struct mtd_info *mtd, struct nand_chip *chip, int page) ++{ ++ int i, iter; ++ int sec_num = 1<<(chip->page_shift-9); ++ int spare_per_sector = mtd->oobsize/sec_num; ++ ++ memcpy(local_oob_buf, chip->oob_poi, mtd->oobsize); ++ ++ // copy ecc data ++ for (i = 0; i < chip->ecc.layout->eccbytes; i++) { ++ iter = (i / (spare_per_sector-OOB_AVAI_PER_SECTOR)) * spare_per_sector + OOB_AVAI_PER_SECTOR + i % (spare_per_sector-OOB_AVAI_PER_SECTOR); ++ local_oob_buf[iter] = chip->oob_poi[chip->ecc.layout->eccpos[i]]; ++ } ++ ++ // copy FDM data ++ for (i = 0; i < sec_num; i++) ++ memcpy(&local_oob_buf[i * spare_per_sector], &chip->oob_poi[i * OOB_AVAI_PER_SECTOR], OOB_AVAI_PER_SECTOR); ++ ++ return mtk_nand_write_oob_raw(mtd, local_oob_buf, page, mtd->oobsize); ++} ++ ++static int mtk_nand_write_oob(struct mtd_info *mtd, struct nand_chip *chip, int page) ++{ ++ int page_per_block = 1 << (chip->phys_erase_shift - chip->page_shift); ++ int block = page / page_per_block; ++ u16 page_in_block = page % page_per_block; ++ int mapped_block = block; ++ ++#if defined(MTK_NAND_BMT) ++ mapped_block = get_mapping_block_index(block); ++ // write bad index into oob ++ if (mapped_block != block) ++ set_bad_index_to_oob(chip->oob_poi, block); ++ else ++ set_bad_index_to_oob(chip->oob_poi, FAKE_INDEX); ++#else ++ if (shift_on_bbt) ++ { ++ mapped_block = block_remap(mtd, block); ++ if (mapped_block == -1) ++ return NAND_STATUS_FAIL; ++ if (nand_bbt_get(mtd, mapped_block << (chip->phys_erase_shift - chip->page_shift)) != 0x0) ++ return NAND_STATUS_FAIL; ++ } ++#endif ++ do { ++ if (mtk_nand_write_oob_hw(mtd, chip, page_in_block + mapped_block * page_per_block /* page */)) { ++ MSG(INIT, "write oob fail at block: 0x%x, page: 0x%x\n", mapped_block, page_in_block); ++#if defined(MTK_NAND_BMT) ++ if (update_bmt((page_in_block + mapped_block * page_per_block) << chip->page_shift, ++ UPDATE_WRITE_FAIL, NULL, chip->oob_poi)) ++ { ++ MSG(INIT, "Update BMT success\n"); ++ return 0; ++ } else { ++ MSG(INIT, "Update BMT fail\n"); ++ return -EIO; ++ } ++#else ++ mtk_nand_block_markbad_hw(mtd, (page_in_block + mapped_block * page_per_block) << chip->page_shift); ++ nand_bbt_set(mtd, page_in_block + mapped_block * page_per_block, 0x3); ++ if (shift_on_bbt) { ++ mapped_block = block_remap(mtd, mapped_block); ++ if (mapped_block == -1) ++ return NAND_STATUS_FAIL; ++ if (nand_bbt_get(mtd, mapped_block << (chip->phys_erase_shift - chip->page_shift)) != 0x0) ++ return NAND_STATUS_FAIL; ++ } else { ++ return NAND_STATUS_FAIL; ++ } ++#endif ++ } else ++ break; ++ } while (1); ++ ++ return 0; ++} ++ ++int ++mtk_nand_block_markbad_hw(struct mtd_info *mtd, loff_t offset) ++{ ++ struct nand_chip *chip = mtd->priv; ++ int block = (int)offset >> chip->phys_erase_shift; ++ int page = block * (1 << (chip->phys_erase_shift - chip->page_shift)); ++ u8 buf[8]; ++ ++ memset(buf, 0xFF, 8); ++ buf[0] = 0; ++ return mtk_nand_write_oob_raw(mtd, buf, page, 8); ++} ++ ++static int ++mtk_nand_block_markbad(struct mtd_info *mtd, loff_t offset) ++{ ++ struct nand_chip *chip = mtd->priv; ++ int block = (int)offset >> chip->phys_erase_shift; ++ int ret; ++ int mapped_block = block; ++ ++ nand_get_device(chip, mtd, FL_WRITING); ++ ++#if defined(MTK_NAND_BMT) ++ mapped_block = get_mapping_block_index(block); ++ ret = mtk_nand_block_markbad_hw(mtd, mapped_block << chip->phys_erase_shift); ++#else ++ if (shift_on_bbt) { ++ mapped_block = block_remap(mtd, block); ++ if (mapped_block == -1) { ++ printk("NAND mark bad failed\n"); ++ nand_release_device(mtd); ++ return NAND_STATUS_FAIL; ++ } ++ } ++ ret = mtk_nand_block_markbad_hw(mtd, mapped_block << chip->phys_erase_shift); ++#endif ++ nand_release_device(mtd); ++ ++ return ret; ++} ++ ++int ++mtk_nand_read_oob_hw(struct mtd_info *mtd, struct nand_chip *chip, int page) ++{ ++ int i; ++ u8 iter = 0; ++ ++ int sec_num = 1<<(chip->page_shift-9); ++ int spare_per_sector = mtd->oobsize/sec_num; ++ ++ if (mtk_nand_read_oob_raw(mtd, chip->oob_poi, page, mtd->oobsize)) { ++ printk(KERN_ERR "[%s]mtk_nand_read_oob_raw return failed\n", __FUNCTION__); ++ return -EIO; ++ } ++ ++ // adjust to ecc physical layout to memory layout ++ /*********************************************************/ ++ /* FDM0 | ECC0 | FDM1 | ECC1 | FDM2 | ECC2 | FDM3 | ECC3 */ ++ /* 8B | 8B | 8B | 8B | 8B | 8B | 8B | 8B */ ++ /*********************************************************/ ++ ++ memcpy(local_oob_buf, chip->oob_poi, mtd->oobsize); ++ // copy ecc data ++ for (i = 0; i < chip->ecc.layout->eccbytes; i++) { ++ iter = (i / (spare_per_sector-OOB_AVAI_PER_SECTOR)) * spare_per_sector + OOB_AVAI_PER_SECTOR + i % (spare_per_sector-OOB_AVAI_PER_SECTOR); ++ chip->oob_poi[chip->ecc.layout->eccpos[i]] = local_oob_buf[iter]; ++ } ++ ++ // copy FDM data ++ for (i = 0; i < sec_num; i++) { ++ memcpy(&chip->oob_poi[i * OOB_AVAI_PER_SECTOR], &local_oob_buf[i * spare_per_sector], OOB_AVAI_PER_SECTOR); ++ } ++ ++ return 0; ++} ++ ++static int ++mtk_nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip, int page) ++{ ++ int page_per_block = 1 << (chip->phys_erase_shift - chip->page_shift); ++ int block = page / page_per_block; ++ u16 page_in_block = page % page_per_block; ++ int mapped_block = block; ++ ++#if defined (MTK_NAND_BMT) ++ mapped_block = get_mapping_block_index(block); ++ mtk_nand_read_oob_hw(mtd, chip, page_in_block + mapped_block * page_per_block); ++#else ++ if (shift_on_bbt) { ++ mapped_block = block_remap(mtd, block); ++ if (mapped_block == -1) ++ return NAND_STATUS_FAIL; ++ // allow to read oob even if the block is bad ++ } ++ if (mtk_nand_read_oob_hw(mtd, chip, page_in_block + mapped_block * page_per_block)!=0) ++ return -1; ++#endif ++ return 0; ++} ++ ++int ++mtk_nand_block_bad_hw(struct mtd_info *mtd, loff_t ofs) ++{ ++ struct nand_chip *chip = (struct nand_chip *)mtd->priv; ++ int page_addr = (int)(ofs >> chip->page_shift); ++ unsigned int page_per_block = 1 << (chip->phys_erase_shift - chip->page_shift); ++ unsigned char oob_buf[8]; ++ ++ page_addr &= ~(page_per_block - 1); ++ if (mtk_nand_read_oob_raw(mtd, oob_buf, page_addr, sizeof(oob_buf))) { ++ printk(KERN_WARNING "mtk_nand_read_oob_raw return error\n"); ++ return 1; ++ } ++ ++ if (oob_buf[0] != 0xff) { ++ printk(KERN_WARNING "Bad block detected at 0x%x, oob_buf[0] is 0x%x\n", page_addr, oob_buf[0]); ++ // dump_nfi(); ++ return 1; ++ } ++ ++ return 0; ++} ++ ++static int ++mtk_nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip) ++{ ++ int chipnr = 0; ++ struct nand_chip *chip = (struct nand_chip *)mtd->priv; ++ int block = (int)ofs >> chip->phys_erase_shift; ++ int mapped_block = block; ++ int ret; ++ ++ if (getchip) { ++ chipnr = (int)(ofs >> chip->chip_shift); ++ nand_get_device(chip, mtd, FL_READING); ++ /* Select the NAND device */ ++ chip->select_chip(mtd, chipnr); ++ } ++ ++#if defined(MTK_NAND_BMT) ++ mapped_block = get_mapping_block_index(block); ++#else ++ if (shift_on_bbt) { ++ mapped_block = block_remap(mtd, block); ++ if (mapped_block == -1) { ++ if (getchip) ++ nand_release_device(mtd); ++ return NAND_STATUS_FAIL; ++ } ++ } ++#endif ++ ++ ret = mtk_nand_block_bad_hw(mtd, mapped_block << chip->phys_erase_shift); ++#if defined (MTK_NAND_BMT) ++ if (ret) { ++ MSG(INIT, "Unmapped bad block: 0x%x\n", mapped_block); ++ if (update_bmt(mapped_block << chip->phys_erase_shift, UPDATE_UNMAPPED_BLOCK, NULL, NULL)) { ++ MSG(INIT, "Update BMT success\n"); ++ ret = 0; ++ } else { ++ MSG(INIT, "Update BMT fail\n"); ++ ret = 1; ++ } ++ } ++#endif ++ ++ if (getchip) ++ nand_release_device(mtd); ++ ++ return ret; ++} ++ ++#ifdef CONFIG_MTD_NAND_VERIFY_WRITE ++char gacBuf[4096 + 288]; ++ ++static int ++mtk_nand_verify_buf(struct mtd_info *mtd, const uint8_t * buf, int len) ++{ ++ struct nand_chip *chip = (struct nand_chip *)mtd->priv; ++ struct NAND_CMD *pkCMD = &g_kCMD; ++ u32 u4PageSize = mtd->writesize; ++ u32 *pSrc, *pDst; ++ int i; ++ ++ mtk_nand_exec_read_page(mtd, pkCMD->u4RowAddr, u4PageSize, gacBuf, gacBuf + u4PageSize); ++ ++ pSrc = (u32 *) buf; ++ pDst = (u32 *) gacBuf; ++ len = len / sizeof(u32); ++ for (i = 0; i < len; ++i) { ++ if (*pSrc != *pDst) { ++ MSG(VERIFY, "mtk_nand_verify_buf page fail at page %d\n", pkCMD->u4RowAddr); ++ return -1; ++ } ++ pSrc++; ++ pDst++; ++ } ++ ++ pSrc = (u32 *) chip->oob_poi; ++ pDst = (u32 *) (gacBuf + u4PageSize); ++ ++ if ((pSrc[0] != pDst[0]) || (pSrc[1] != pDst[1]) || (pSrc[2] != pDst[2]) || (pSrc[3] != pDst[3]) || (pSrc[4] != pDst[4]) || (pSrc[5] != pDst[5])) { ++ // TODO: Ask Designer Why? ++ //(pSrc[6] != pDst[6]) || (pSrc[7] != pDst[7])) ++ MSG(VERIFY, "mtk_nand_verify_buf oob fail at page %d\n", pkCMD->u4RowAddr); ++ MSG(VERIFY, "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", pSrc[0], pSrc[1], pSrc[2], pSrc[3], pSrc[4], pSrc[5], pSrc[6], pSrc[7]); ++ MSG(VERIFY, "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", pDst[0], pDst[1], pDst[2], pDst[3], pDst[4], pDst[5], pDst[6], pDst[7]); ++ return -1; ++ } ++ return 0; ++} ++#endif ++ ++static void ++mtk_nand_init_hw(struct mtk_nand_host *host) { ++ struct mtk_nand_host_hw *hw = host->hw; ++ u32 data; ++ ++ data = DRV_Reg32(RALINK_SYSCTL_BASE+0x60); ++ data &= ~((0x3<<18)|(0x3<<16)); ++ data |= ((0x2<<18) |(0x2<<16)); ++ DRV_WriteReg32(RALINK_SYSCTL_BASE+0x60, data); ++ ++ MSG(INIT, "Enable NFI Clock\n"); ++ nand_enable_clock(); ++ ++ g_bInitDone = false; ++ g_kCMD.u4OOBRowAddr = (u32) - 1; ++ ++ /* Set default NFI access timing control */ ++ DRV_WriteReg32(NFI_ACCCON_REG32, hw->nfi_access_timing); ++ DRV_WriteReg16(NFI_CNFG_REG16, 0); ++ DRV_WriteReg16(NFI_PAGEFMT_REG16, 0); ++ ++ /* Reset the state machine and data FIFO, because flushing FIFO */ ++ (void)mtk_nand_reset(); ++ ++ /* Set the ECC engine */ ++ if (hw->nand_ecc_mode == NAND_ECC_HW) { ++ MSG(INIT, "%s : Use HW ECC\n", MODULE_NAME); ++ if (g_bHwEcc) ++ NFI_SET_REG32(NFI_CNFG_REG16, CNFG_HW_ECC_EN); ++ ECC_Config(host->hw,4); ++ mtk_nand_configure_fdm(8); ++ mtk_nand_configure_lock(); ++ } ++ ++ NFI_SET_REG16(NFI_IOCON_REG16, 0x47); ++} ++ ++static int mtk_nand_dev_ready(struct mtd_info *mtd) ++{ ++ return !(DRV_Reg32(NFI_STA_REG32) & STA_NAND_BUSY); ++} ++ ++#define FACT_BBT_BLOCK_NUM 32 // use the latest 32 BLOCK for factory bbt table ++#define FACT_BBT_OOB_SIGNATURE 1 ++#define FACT_BBT_SIGNATURE_LEN 7 ++const u8 oob_signature[] = "mtknand"; ++static u8 *fact_bbt = 0; ++static u32 bbt_size = 0; ++ ++static int ++read_fact_bbt(struct mtd_info *mtd, unsigned int page) ++{ ++ struct nand_chip *chip = mtd->priv; ++ ++ // read oob ++ if (mtk_nand_read_oob_hw(mtd, chip, page)==0) ++ { ++ if (chip->oob_poi[nand_badblock_offset] != 0xFF) ++ { ++ printk("Bad Block on Page %x\n", page); ++ return -1; ++ } ++ if (memcmp(&chip->oob_poi[FACT_BBT_OOB_SIGNATURE], oob_signature, FACT_BBT_SIGNATURE_LEN) != 0) ++ { ++ printk("compare signature failed %x\n", page); ++ return -1; ++ } ++ if (mtk_nand_exec_read_page(mtd, page, mtd->writesize, chip->buffers->databuf, chip->oob_poi)) ++ { ++ printk("Signature matched and data read!\n"); ++ memcpy(fact_bbt, chip->buffers->databuf, (bbt_size <= mtd->writesize)? bbt_size:mtd->writesize); ++ return 0; ++ } ++ ++ } ++ printk("failed at page %x\n", page); ++ return -1; ++} ++ ++static int ++load_fact_bbt(struct mtd_info *mtd) ++{ ++ struct nand_chip *chip = mtd->priv; ++ int i; ++ u32 total_block; ++ ++ total_block = 1 << (chip->chip_shift - chip->phys_erase_shift); ++ bbt_size = total_block >> 2; ++ ++ if ((!fact_bbt) && (bbt_size)) ++ fact_bbt = (u8 *)kmalloc(bbt_size, GFP_KERNEL); ++ if (!fact_bbt) ++ return -1; ++ ++ for (i = total_block - 1; i >= (total_block - FACT_BBT_BLOCK_NUM); i--) ++ { ++ if (read_fact_bbt(mtd, i << (chip->phys_erase_shift - chip->page_shift)) == 0) ++ { ++ printk("load_fact_bbt success %d\n", i); ++ return 0; ++ } ++ ++ } ++ printk("load_fact_bbt failed\n"); ++ return -1; ++} ++ ++static int ++mtk_nand_probe(struct platform_device *pdev) ++{ ++ struct mtd_part_parser_data ppdata; ++ struct mtk_nand_host_hw *hw; ++ struct mtd_info *mtd; ++ struct nand_chip *nand_chip; ++ u8 ext_id1, ext_id2, ext_id3; ++ int err = 0; ++ int id; ++ u32 ext_id; ++ int i; ++ u32 data; ++ ++ data = DRV_Reg32(RALINK_SYSCTL_BASE+0x60); ++ data &= ~((0x3<<18)|(0x3<<16)); ++ data |= ((0x2<<18) |(0x2<<16)); ++ DRV_WriteReg32(RALINK_SYSCTL_BASE+0x60, data); ++ ++ hw = &mt7621_nand_hw, ++ BUG_ON(!hw); ++ /* Allocate memory for the device structure (and zero it) */ ++ host = kzalloc(sizeof(struct mtk_nand_host), GFP_KERNEL); ++ if (!host) { ++ MSG(INIT, "mtk_nand: failed to allocate device structure.\n"); ++ return -ENOMEM; ++ } ++ ++ /* Allocate memory for 16 byte aligned buffer */ ++ local_buffer_16_align = local_buffer + 16 - ((u32) local_buffer % 16); ++ printk(KERN_INFO "Allocate 16 byte aligned buffer: %p\n", local_buffer_16_align); ++ host->hw = hw; ++ ++ /* init mtd data structure */ ++ nand_chip = &host->nand_chip; ++ nand_chip->priv = host; /* link the private data structures */ ++ ++ mtd = &host->mtd; ++ mtd->priv = nand_chip; ++ mtd->owner = THIS_MODULE; ++ mtd->name = "MT7621-NAND"; ++ ++ hw->nand_ecc_mode = NAND_ECC_HW; ++ ++ /* Set address of NAND IO lines */ ++ nand_chip->IO_ADDR_R = (void __iomem *)NFI_DATAR_REG32; ++ nand_chip->IO_ADDR_W = (void __iomem *)NFI_DATAW_REG32; ++ nand_chip->chip_delay = 20; /* 20us command delay time */ ++ nand_chip->ecc.mode = hw->nand_ecc_mode; /* enable ECC */ ++ nand_chip->ecc.strength = 1; ++ nand_chip->read_byte = mtk_nand_read_byte; ++ nand_chip->read_buf = mtk_nand_read_buf; ++ nand_chip->write_buf = mtk_nand_write_buf; ++#ifdef CONFIG_MTD_NAND_VERIFY_WRITE ++ nand_chip->verify_buf = mtk_nand_verify_buf; ++#endif ++ nand_chip->select_chip = mtk_nand_select_chip; ++ nand_chip->dev_ready = mtk_nand_dev_ready; ++ nand_chip->cmdfunc = mtk_nand_command_bp; ++ nand_chip->ecc.read_page = mtk_nand_read_page_hwecc; ++ nand_chip->ecc.write_page = mtk_nand_write_page_hwecc; ++ ++ nand_chip->ecc.layout = &nand_oob_64; ++ nand_chip->ecc.size = hw->nand_ecc_size; //2048 ++ nand_chip->ecc.bytes = hw->nand_ecc_bytes; //32 ++ ++ // For BMT, we need to revise driver architecture ++ nand_chip->write_page = mtk_nand_write_page; ++ nand_chip->ecc.write_oob = mtk_nand_write_oob; ++ nand_chip->block_markbad = mtk_nand_block_markbad; // need to add nand_get_device()/nand_release_device(). ++ // nand_chip->erase = mtk_nand_erase; ++ // nand_chip->read_page = mtk_nand_read_page; ++ nand_chip->ecc.read_oob = mtk_nand_read_oob; ++ nand_chip->block_bad = mtk_nand_block_bad; ++ ++ //Qwert:Add for Uboot ++ mtk_nand_init_hw(host); ++ /* Select the device */ ++ nand_chip->select_chip(mtd, NFI_DEFAULT_CS); ++ ++ /* ++ * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx) ++ * after power-up ++ */ ++ nand_chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1); ++ ++ memset(&devinfo, 0 , sizeof(flashdev_info)); ++ ++ /* Send the command for reading device ID */ ++ ++ nand_chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1); ++ ++ /* Read manufacturer and device IDs */ ++ manu_id = nand_chip->read_byte(mtd); ++ dev_id = nand_chip->read_byte(mtd); ++ id = dev_id | (manu_id << 8); ++ ext_id1 = nand_chip->read_byte(mtd); ++ ext_id2 = nand_chip->read_byte(mtd); ++ ext_id3 = nand_chip->read_byte(mtd); ++ ext_id = ext_id1 << 16 | ext_id2 << 8 | ext_id3; ++ if (!get_device_info(id, ext_id, &devinfo)) { ++ u32 chip_mode = RALINK_REG(RALINK_SYSCTL_BASE+0x010)&0x0F; ++ MSG(INIT, "Not Support this Device! \r\n"); ++ memset(&devinfo, 0 , sizeof(flashdev_info)); ++ MSG(INIT, "chip_mode=%08X\n",chip_mode); ++ ++ /* apply bootstrap first */ ++ devinfo.addr_cycle = 5; ++ devinfo.iowidth = 8; ++ ++ switch (chip_mode) { ++ case 10: ++ devinfo.pagesize = 2048; ++ devinfo.sparesize = 128; ++ devinfo.totalsize = 128; ++ devinfo.blocksize = 128; ++ break; ++ case 11: ++ devinfo.pagesize = 4096; ++ devinfo.sparesize = 128; ++ devinfo.totalsize = 1024; ++ devinfo.blocksize = 256; ++ break; ++ case 12: ++ devinfo.pagesize = 4096; ++ devinfo.sparesize = 224; ++ devinfo.totalsize = 2048; ++ devinfo.blocksize = 512; ++ break; ++ default: ++ case 1: ++ devinfo.pagesize = 2048; ++ devinfo.sparesize = 64; ++ devinfo.totalsize = 128; ++ devinfo.blocksize = 128; ++ break; ++ } ++ ++ devinfo.timmingsetting = NFI_DEFAULT_ACCESS_TIMING; ++ devinfo.devciename[0] = 'U'; ++ devinfo.advancedmode = 0; ++ } ++ mtd->writesize = devinfo.pagesize; ++ mtd->erasesize = (devinfo.blocksize<<10); ++ mtd->oobsize = devinfo.sparesize; ++ ++ nand_chip->chipsize = (devinfo.totalsize<<20); ++ nand_chip->page_shift = ffs(mtd->writesize) - 1; ++ nand_chip->pagemask = (nand_chip->chipsize >> nand_chip->page_shift) - 1; ++ nand_chip->phys_erase_shift = ffs(mtd->erasesize) - 1; ++ nand_chip->chip_shift = ffs(nand_chip->chipsize) - 1;//0x1C;//ffs(nand_chip->chipsize) - 1; ++ nand_chip->oob_poi = nand_chip->buffers->databuf + mtd->writesize; ++ nand_chip->badblockpos = 0; ++ ++ if (devinfo.pagesize == 4096) ++ nand_chip->ecc.layout = &nand_oob_128; ++ else if (devinfo.pagesize == 2048) ++ nand_chip->ecc.layout = &nand_oob_64; ++ else if (devinfo.pagesize == 512) ++ nand_chip->ecc.layout = &nand_oob_16; ++ ++ nand_chip->ecc.layout->eccbytes = devinfo.sparesize-OOB_AVAI_PER_SECTOR*(devinfo.pagesize/NAND_SECTOR_SIZE); ++ for (i = 0; i < nand_chip->ecc.layout->eccbytes; i++) ++ nand_chip->ecc.layout->eccpos[i]=OOB_AVAI_PER_SECTOR*(devinfo.pagesize/NAND_SECTOR_SIZE)+i; ++ ++ MSG(INIT, "Support this Device in MTK table! %x \r\n", id); ++ hw->nfi_bus_width = devinfo.iowidth; ++ DRV_WriteReg32(NFI_ACCCON_REG32, devinfo.timmingsetting); ++ ++ /* 16-bit bus width */ ++ if (hw->nfi_bus_width == 16) { ++ MSG(INIT, "%s : Set the 16-bit I/O settings!\n", MODULE_NAME); ++ nand_chip->options |= NAND_BUSWIDTH_16; ++ } ++ mtd->oobsize = devinfo.sparesize; ++ hw->nfi_cs_num = 1; ++ ++ /* Scan to find existance of the device */ ++ if (nand_scan(mtd, hw->nfi_cs_num)) { ++ MSG(INIT, "%s : nand_scan fail.\n", MODULE_NAME); ++ err = -ENXIO; ++ goto out; ++ } ++ ++ g_page_size = mtd->writesize; ++ platform_set_drvdata(pdev, host); ++ if (hw->nfi_bus_width == 16) { ++ NFI_SET_REG16(NFI_PAGEFMT_REG16, PAGEFMT_DBYTE_EN); ++ } ++ ++ nand_chip->select_chip(mtd, 0); ++#if defined(MTK_NAND_BMT) ++ nand_chip->chipsize -= (BMT_POOL_SIZE) << nand_chip->phys_erase_shift; ++#endif ++ mtd->size = nand_chip->chipsize; ++ ++ CFG_BLOCKSIZE = mtd->erasesize; ++ ++#if defined(MTK_NAND_BMT) ++ if (!g_bmt) { ++ if (!(g_bmt = init_bmt(nand_chip, BMT_POOL_SIZE))) { ++ MSG(INIT, "Error: init bmt failed\n"); ++ return 0; ++ } ++ } ++#endif ++ ++ ppdata.of_node = pdev->dev.of_node; ++ err = mtd_device_parse_register(mtd, probe_types, &ppdata, ++ NULL, 0); ++ if (!err) { ++ MSG(INIT, "[mtk_nand] probe successfully!\n"); ++ nand_disable_clock(); ++ shift_on_bbt = 1; ++ if (load_fact_bbt(mtd) == 0) { ++ int i; ++ for (i = 0; i < 0x100; i++) ++ nand_chip->bbt[i] |= fact_bbt[i]; ++ } ++ ++ return err; ++ } ++ ++out: ++ MSG(INIT, "[NFI] mtk_nand_probe fail, err = %d!\n", err); ++ nand_release(mtd); ++ platform_set_drvdata(pdev, NULL); ++ kfree(host); ++ nand_disable_clock(); ++ return err; ++} ++ ++static int ++mtk_nand_remove(struct platform_device *pdev) ++{ ++ struct mtk_nand_host *host = platform_get_drvdata(pdev); ++ struct mtd_info *mtd = &host->mtd; ++ ++ nand_release(mtd); ++ kfree(host); ++ nand_disable_clock(); ++ ++ return 0; ++} ++ ++static const struct of_device_id mt7621_nand_match[] = { ++ { .compatible = "mtk,mt7621-nand" }, ++ {}, ++}; ++MODULE_DEVICE_TABLE(of, mt7621_nand_match); ++ ++static struct platform_driver mtk_nand_driver = { ++ .probe = mtk_nand_probe, ++ .remove = mtk_nand_remove, ++ .driver = { ++ .name = "MT7621-NAND", ++ .owner = THIS_MODULE, ++ .of_match_table = mt7621_nand_match, ++ }, ++}; ++ ++static int __init ++mtk_nand_init(void) ++{ ++ printk("MediaTek Nand driver init, version %s\n", VERSION); ++ ++ return platform_driver_register(&mtk_nand_driver); ++} ++ ++static void __exit ++mtk_nand_exit(void) ++{ ++ platform_driver_unregister(&mtk_nand_driver); ++} ++ ++module_init(mtk_nand_init); ++module_exit(mtk_nand_exit); ++MODULE_LICENSE("GPL"); +--- /dev/null ++++ b/drivers/mtd/nand/mtk_nand.h +@@ -0,0 +1,452 @@ ++#ifndef __MTK_NAND_H ++#define __MTK_NAND_H ++ ++#define RALINK_NAND_CTRL_BASE 0xBE003000 ++#define RALINK_SYSCTL_BASE 0xBE000000 ++#define RALINK_NANDECC_CTRL_BASE 0xBE003800 ++/******************************************************************************* ++ * NFI Register Definition ++ *******************************************************************************/ ++ ++#define NFI_CNFG_REG16 ((volatile P_U16)(NFI_BASE+0x0000)) ++#define NFI_PAGEFMT_REG16 ((volatile P_U16)(NFI_BASE+0x0004)) ++#define NFI_CON_REG16 ((volatile P_U16)(NFI_BASE+0x0008)) ++#define NFI_ACCCON_REG32 ((volatile P_U32)(NFI_BASE+0x000C)) ++#define NFI_INTR_EN_REG16 ((volatile P_U16)(NFI_BASE+0x0010)) ++#define NFI_INTR_REG16 ((volatile P_U16)(NFI_BASE+0x0014)) ++ ++#define NFI_CMD_REG16 ((volatile P_U16)(NFI_BASE+0x0020)) ++ ++#define NFI_ADDRNOB_REG16 ((volatile P_U16)(NFI_BASE+0x0030)) ++#define NFI_COLADDR_REG32 ((volatile P_U32)(NFI_BASE+0x0034)) ++#define NFI_ROWADDR_REG32 ((volatile P_U32)(NFI_BASE+0x0038)) ++ ++#define NFI_STRDATA_REG16 ((volatile P_U16)(NFI_BASE+0x0040)) ++ ++#define NFI_DATAW_REG32 ((volatile P_U32)(NFI_BASE+0x0050)) ++#define NFI_DATAR_REG32 ((volatile P_U32)(NFI_BASE+0x0054)) ++#define NFI_PIO_DIRDY_REG16 ((volatile P_U16)(NFI_BASE+0x0058)) ++ ++#define NFI_STA_REG32 ((volatile P_U32)(NFI_BASE+0x0060)) ++#define NFI_FIFOSTA_REG16 ((volatile P_U16)(NFI_BASE+0x0064)) ++#define NFI_LOCKSTA_REG16 ((volatile P_U16)(NFI_BASE+0x0068)) ++ ++#define NFI_ADDRCNTR_REG16 ((volatile P_U16)(NFI_BASE+0x0070)) ++ ++#define NFI_STRADDR_REG32 ((volatile P_U32)(NFI_BASE+0x0080)) ++#define NFI_BYTELEN_REG16 ((volatile P_U16)(NFI_BASE+0x0084)) ++ ++#define NFI_CSEL_REG16 ((volatile P_U16)(NFI_BASE+0x0090)) ++#define NFI_IOCON_REG16 ((volatile P_U16)(NFI_BASE+0x0094)) ++ ++#define NFI_FDM0L_REG32 ((volatile P_U32)(NFI_BASE+0x00A0)) ++#define NFI_FDM0M_REG32 ((volatile P_U32)(NFI_BASE+0x00A4)) ++ ++#define NFI_LOCK_REG16 ((volatile P_U16)(NFI_BASE+0x0100)) ++#define NFI_LOCKCON_REG32 ((volatile P_U32)(NFI_BASE+0x0104)) ++#define NFI_LOCKANOB_REG16 ((volatile P_U16)(NFI_BASE+0x0108)) ++#define NFI_LOCK00ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0110)) ++#define NFI_LOCK00FMT_REG32 ((volatile P_U32)(NFI_BASE+0x0114)) ++#define NFI_LOCK01ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0118)) ++#define NFI_LOCK01FMT_REG32 ((volatile P_U32)(NFI_BASE+0x011C)) ++#define NFI_LOCK02ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0120)) ++#define NFI_LOCK02FMT_REG32 ((volatile P_U32)(NFI_BASE+0x0124)) ++#define NFI_LOCK03ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0128)) ++#define NFI_LOCK03FMT_REG32 ((volatile P_U32)(NFI_BASE+0x012C)) ++#define NFI_LOCK04ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0130)) ++#define NFI_LOCK04FMT_REG32 ((volatile P_U32)(NFI_BASE+0x0134)) ++#define NFI_LOCK05ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0138)) ++#define NFI_LOCK05FMT_REG32 ((volatile P_U32)(NFI_BASE+0x013C)) ++#define NFI_LOCK06ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0140)) ++#define NFI_LOCK06FMT_REG32 ((volatile P_U32)(NFI_BASE+0x0144)) ++#define NFI_LOCK07ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0148)) ++#define NFI_LOCK07FMT_REG32 ((volatile P_U32)(NFI_BASE+0x014C)) ++#define NFI_LOCK08ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0150)) ++#define NFI_LOCK08FMT_REG32 ((volatile P_U32)(NFI_BASE+0x0154)) ++#define NFI_LOCK09ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0158)) ++#define NFI_LOCK09FMT_REG32 ((volatile P_U32)(NFI_BASE+0x015C)) ++#define NFI_LOCK10ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0160)) ++#define NFI_LOCK10FMT_REG32 ((volatile P_U32)(NFI_BASE+0x0164)) ++#define NFI_LOCK11ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0168)) ++#define NFI_LOCK11FMT_REG32 ((volatile P_U32)(NFI_BASE+0x016C)) ++#define NFI_LOCK12ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0170)) ++#define NFI_LOCK12FMT_REG32 ((volatile P_U32)(NFI_BASE+0x0174)) ++#define NFI_LOCK13ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0178)) ++#define NFI_LOCK13FMT_REG32 ((volatile P_U32)(NFI_BASE+0x017C)) ++#define NFI_LOCK14ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0180)) ++#define NFI_LOCK14FMT_REG32 ((volatile P_U32)(NFI_BASE+0x0184)) ++#define NFI_LOCK15ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0188)) ++#define NFI_LOCK15FMT_REG32 ((volatile P_U32)(NFI_BASE+0x018C)) ++ ++#define NFI_FIFODATA0_REG32 ((volatile P_U32)(NFI_BASE+0x0190)) ++#define NFI_FIFODATA1_REG32 ((volatile P_U32)(NFI_BASE+0x0194)) ++#define NFI_FIFODATA2_REG32 ((volatile P_U32)(NFI_BASE+0x0198)) ++#define NFI_FIFODATA3_REG32 ((volatile P_U32)(NFI_BASE+0x019C)) ++#define NFI_MASTERSTA_REG16 ((volatile P_U16)(NFI_BASE+0x0210)) ++ ++ ++/******************************************************************************* ++ * NFI Register Field Definition ++ *******************************************************************************/ ++ ++/* NFI_CNFG */ ++#define CNFG_AHB (0x0001) ++#define CNFG_READ_EN (0x0002) ++#define CNFG_DMA_BURST_EN (0x0004) ++#define CNFG_BYTE_RW (0x0040) ++#define CNFG_HW_ECC_EN (0x0100) ++#define CNFG_AUTO_FMT_EN (0x0200) ++#define CNFG_OP_IDLE (0x0000) ++#define CNFG_OP_READ (0x1000) ++#define CNFG_OP_SRD (0x2000) ++#define CNFG_OP_PRGM (0x3000) ++#define CNFG_OP_ERASE (0x4000) ++#define CNFG_OP_RESET (0x5000) ++#define CNFG_OP_CUST (0x6000) ++#define CNFG_OP_MODE_MASK (0x7000) ++#define CNFG_OP_MODE_SHIFT (12) ++ ++/* NFI_PAGEFMT */ ++#define PAGEFMT_512 (0x0000) ++#define PAGEFMT_2K (0x0001) ++#define PAGEFMT_4K (0x0002) ++ ++#define PAGEFMT_PAGE_MASK (0x0003) ++ ++#define PAGEFMT_DBYTE_EN (0x0008) ++ ++#define PAGEFMT_SPARE_16 (0x0000) ++#define PAGEFMT_SPARE_26 (0x0001) ++#define PAGEFMT_SPARE_27 (0x0002) ++#define PAGEFMT_SPARE_28 (0x0003) ++#define PAGEFMT_SPARE_MASK (0x0030) ++#define PAGEFMT_SPARE_SHIFT (4) ++ ++#define PAGEFMT_FDM_MASK (0x0F00) ++#define PAGEFMT_FDM_SHIFT (8) ++ ++#define PAGEFMT_FDM_ECC_MASK (0xF000) ++#define PAGEFMT_FDM_ECC_SHIFT (12) ++ ++/* NFI_CON */ ++#define CON_FIFO_FLUSH (0x0001) ++#define CON_NFI_RST (0x0002) ++#define CON_NFI_SRD (0x0010) ++ ++#define CON_NFI_NOB_MASK (0x0060) ++#define CON_NFI_NOB_SHIFT (5) ++ ++#define CON_NFI_BRD (0x0100) ++#define CON_NFI_BWR (0x0200) ++ ++#define CON_NFI_SEC_MASK (0xF000) ++#define CON_NFI_SEC_SHIFT (12) ++ ++/* NFI_ACCCON */ ++#define ACCCON_SETTING () ++ ++/* NFI_INTR_EN */ ++#define INTR_RD_DONE_EN (0x0001) ++#define INTR_WR_DONE_EN (0x0002) ++#define INTR_RST_DONE_EN (0x0004) ++#define INTR_ERASE_DONE_EN (0x0008) ++#define INTR_BSY_RTN_EN (0x0010) ++#define INTR_ACC_LOCK_EN (0x0020) ++#define INTR_AHB_DONE_EN (0x0040) ++#define INTR_ALL_INTR_DE (0x0000) ++#define INTR_ALL_INTR_EN (0x007F) ++ ++/* NFI_INTR */ ++#define INTR_RD_DONE (0x0001) ++#define INTR_WR_DONE (0x0002) ++#define INTR_RST_DONE (0x0004) ++#define INTR_ERASE_DONE (0x0008) ++#define INTR_BSY_RTN (0x0010) ++#define INTR_ACC_LOCK (0x0020) ++#define INTR_AHB_DONE (0x0040) ++ ++/* NFI_ADDRNOB */ ++#define ADDR_COL_NOB_MASK (0x0003) ++#define ADDR_COL_NOB_SHIFT (0) ++#define ADDR_ROW_NOB_MASK (0x0030) ++#define ADDR_ROW_NOB_SHIFT (4) ++ ++/* NFI_STA */ ++#define STA_READ_EMPTY (0x00001000) ++#define STA_ACC_LOCK (0x00000010) ++#define STA_CMD_STATE (0x00000001) ++#define STA_ADDR_STATE (0x00000002) ++#define STA_DATAR_STATE (0x00000004) ++#define STA_DATAW_STATE (0x00000008) ++ ++#define STA_NAND_FSM_MASK (0x1F000000) ++#define STA_NAND_BUSY (0x00000100) ++#define STA_NAND_BUSY_RETURN (0x00000200) ++#define STA_NFI_FSM_MASK (0x000F0000) ++#define STA_NFI_OP_MASK (0x0000000F) ++ ++/* NFI_FIFOSTA */ ++#define FIFO_RD_EMPTY (0x0040) ++#define FIFO_RD_FULL (0x0080) ++#define FIFO_WR_FULL (0x8000) ++#define FIFO_WR_EMPTY (0x4000) ++#define FIFO_RD_REMAIN(x) (0x1F&(x)) ++#define FIFO_WR_REMAIN(x) ((0x1F00&(x))>>8) ++ ++/* NFI_ADDRCNTR */ ++#define ADDRCNTR_CNTR(x) ((0xF000&(x))>>12) ++#define ADDRCNTR_OFFSET(x) (0x03FF&(x)) ++ ++/* NFI_LOCK */ ++#define NFI_LOCK_ON (0x0001) ++ ++/* NFI_LOCKANOB */ ++#define PROG_RADD_NOB_MASK (0x7000) ++#define PROG_RADD_NOB_SHIFT (12) ++#define PROG_CADD_NOB_MASK (0x0300) ++#define PROG_CADD_NOB_SHIFT (8) ++#define ERASE_RADD_NOB_MASK (0x0070) ++#define ERASE_RADD_NOB_SHIFT (4) ++#define ERASE_CADD_NOB_MASK (0x0007) ++#define ERASE_CADD_NOB_SHIFT (0) ++ ++/******************************************************************************* ++ * ECC Register Definition ++ *******************************************************************************/ ++ ++#define ECC_ENCCON_REG16 ((volatile P_U16)(NFIECC_BASE+0x0000)) ++#define ECC_ENCCNFG_REG32 ((volatile P_U32)(NFIECC_BASE+0x0004)) ++#define ECC_ENCDIADDR_REG32 ((volatile P_U32)(NFIECC_BASE+0x0008)) ++#define ECC_ENCIDLE_REG32 ((volatile P_U32)(NFIECC_BASE+0x000C)) ++#define ECC_ENCPAR0_REG32 ((volatile P_U32)(NFIECC_BASE+0x0010)) ++#define ECC_ENCPAR1_REG32 ((volatile P_U32)(NFIECC_BASE+0x0014)) ++#define ECC_ENCPAR2_REG32 ((volatile P_U32)(NFIECC_BASE+0x0018)) ++#define ECC_ENCPAR3_REG32 ((volatile P_U32)(NFIECC_BASE+0x001C)) ++#define ECC_ENCPAR4_REG32 ((volatile P_U32)(NFIECC_BASE+0x0020)) ++#define ECC_ENCSTA_REG32 ((volatile P_U32)(NFIECC_BASE+0x0024)) ++#define ECC_ENCIRQEN_REG16 ((volatile P_U16)(NFIECC_BASE+0x0028)) ++#define ECC_ENCIRQSTA_REG16 ((volatile P_U16)(NFIECC_BASE+0x002C)) ++ ++#define ECC_DECCON_REG16 ((volatile P_U16)(NFIECC_BASE+0x0100)) ++#define ECC_DECCNFG_REG32 ((volatile P_U32)(NFIECC_BASE+0x0104)) ++#define ECC_DECDIADDR_REG32 ((volatile P_U32)(NFIECC_BASE+0x0108)) ++#define ECC_DECIDLE_REG16 ((volatile P_U16)(NFIECC_BASE+0x010C)) ++#define ECC_DECFER_REG16 ((volatile P_U16)(NFIECC_BASE+0x0110)) ++#define ECC_DECENUM_REG32 ((volatile P_U32)(NFIECC_BASE+0x0114)) ++#define ECC_DECDONE_REG16 ((volatile P_U16)(NFIECC_BASE+0x0118)) ++#define ECC_DECEL0_REG32 ((volatile P_U32)(NFIECC_BASE+0x011C)) ++#define ECC_DECEL1_REG32 ((volatile P_U32)(NFIECC_BASE+0x0120)) ++#define ECC_DECEL2_REG32 ((volatile P_U32)(NFIECC_BASE+0x0124)) ++#define ECC_DECEL3_REG32 ((volatile P_U32)(NFIECC_BASE+0x0128)) ++#define ECC_DECEL4_REG32 ((volatile P_U32)(NFIECC_BASE+0x012C)) ++#define ECC_DECEL5_REG32 ((volatile P_U32)(NFIECC_BASE+0x0130)) ++#define ECC_DECIRQEN_REG16 ((volatile P_U16)(NFIECC_BASE+0x0134)) ++#define ECC_DECIRQSTA_REG16 ((volatile P_U16)(NFIECC_BASE+0x0138)) ++#define ECC_FDMADDR_REG32 ((volatile P_U32)(NFIECC_BASE+0x013C)) ++#define ECC_DECFSM_REG32 ((volatile P_U32)(NFIECC_BASE+0x0140)) ++#define ECC_SYNSTA_REG32 ((volatile P_U32)(NFIECC_BASE+0x0144)) ++#define ECC_DECNFIDI_REG32 ((volatile P_U32)(NFIECC_BASE+0x0148)) ++#define ECC_SYN0_REG32 ((volatile P_U32)(NFIECC_BASE+0x014C)) ++ ++/******************************************************************************* ++ * ECC register definition ++ *******************************************************************************/ ++/* ECC_ENCON */ ++#define ENC_EN (0x0001) ++#define ENC_DE (0x0000) ++ ++/* ECC_ENCCNFG */ ++#define ECC_CNFG_ECC4 (0x0000) ++#define ECC_CNFG_ECC6 (0x0001) ++#define ECC_CNFG_ECC8 (0x0002) ++#define ECC_CNFG_ECC10 (0x0003) ++#define ECC_CNFG_ECC12 (0x0004) ++#define ECC_CNFG_ECC_MASK (0x00000007) ++ ++#define ENC_CNFG_NFI (0x0010) ++#define ENC_CNFG_MODE_MASK (0x0010) ++ ++#define ENC_CNFG_META6 (0x10300000) ++#define ENC_CNFG_META8 (0x10400000) ++ ++#define ENC_CNFG_MSG_MASK (0x1FFF0000) ++#define ENC_CNFG_MSG_SHIFT (0x10) ++ ++/* ECC_ENCIDLE */ ++#define ENC_IDLE (0x0001) ++ ++/* ECC_ENCSTA */ ++#define STA_FSM (0x001F) ++#define STA_COUNT_PS (0xFF10) ++#define STA_COUNT_MS (0x3FFF0000) ++ ++/* ECC_ENCIRQEN */ ++#define ENC_IRQEN (0x0001) ++ ++/* ECC_ENCIRQSTA */ ++#define ENC_IRQSTA (0x0001) ++ ++/* ECC_DECCON */ ++#define DEC_EN (0x0001) ++#define DEC_DE (0x0000) ++ ++/* ECC_ENCCNFG */ ++#define DEC_CNFG_ECC4 (0x0000) ++//#define DEC_CNFG_ECC6 (0x0001) ++//#define DEC_CNFG_ECC12 (0x0002) ++#define DEC_CNFG_NFI (0x0010) ++//#define DEC_CNFG_META6 (0x10300000) ++//#define DEC_CNFG_META8 (0x10400000) ++ ++#define DEC_CNFG_FER (0x01000) ++#define DEC_CNFG_EL (0x02000) ++#define DEC_CNFG_CORRECT (0x03000) ++#define DEC_CNFG_TYPE_MASK (0x03000) ++ ++#define DEC_CNFG_EMPTY_EN (0x80000000) ++ ++#define DEC_CNFG_CODE_MASK (0x1FFF0000) ++#define DEC_CNFG_CODE_SHIFT (0x10) ++ ++/* ECC_DECIDLE */ ++#define DEC_IDLE (0x0001) ++ ++/* ECC_DECFER */ ++#define DEC_FER0 (0x0001) ++#define DEC_FER1 (0x0002) ++#define DEC_FER2 (0x0004) ++#define DEC_FER3 (0x0008) ++#define DEC_FER4 (0x0010) ++#define DEC_FER5 (0x0020) ++#define DEC_FER6 (0x0040) ++#define DEC_FER7 (0x0080) ++ ++/* ECC_DECENUM */ ++#define ERR_NUM0 (0x0000000F) ++#define ERR_NUM1 (0x000000F0) ++#define ERR_NUM2 (0x00000F00) ++#define ERR_NUM3 (0x0000F000) ++#define ERR_NUM4 (0x000F0000) ++#define ERR_NUM5 (0x00F00000) ++#define ERR_NUM6 (0x0F000000) ++#define ERR_NUM7 (0xF0000000) ++ ++/* ECC_DECDONE */ ++#define DEC_DONE0 (0x0001) ++#define DEC_DONE1 (0x0002) ++#define DEC_DONE2 (0x0004) ++#define DEC_DONE3 (0x0008) ++#define DEC_DONE4 (0x0010) ++#define DEC_DONE5 (0x0020) ++#define DEC_DONE6 (0x0040) ++#define DEC_DONE7 (0x0080) ++ ++/* ECC_DECIRQEN */ ++#define DEC_IRQEN (0x0001) ++ ++/* ECC_DECIRQSTA */ ++#define DEC_IRQSTA (0x0001) ++ ++#define CHIPVER_ECO_1 (0x8a00) ++#define CHIPVER_ECO_2 (0x8a01) ++ ++//#define NAND_PFM ++ ++/******************************************************************************* ++ * Data Structure Definition ++ *******************************************************************************/ ++struct mtk_nand_host ++{ ++ struct nand_chip nand_chip; ++ struct mtd_info mtd; ++ struct mtk_nand_host_hw *hw; ++}; ++ ++struct NAND_CMD ++{ ++ u32 u4ColAddr; ++ u32 u4RowAddr; ++ u32 u4OOBRowAddr; ++ u8 au1OOB[288]; ++ u8* pDataBuf; ++#ifdef NAND_PFM ++ u32 pureReadOOB; ++ u32 pureReadOOBNum; ++#endif ++}; ++ ++/* ++ * ECC layout control structure. Exported to userspace for ++ * diagnosis and to allow creation of raw images ++struct nand_ecclayout { ++ uint32_t eccbytes; ++ uint32_t eccpos[64]; ++ uint32_t oobavail; ++ struct nand_oobfree oobfree[MTD_MAX_OOBFREE_ENTRIES]; ++}; ++*/ ++#define __DEBUG_NAND 1 /* Debug information on/off */ ++ ++/* Debug message event */ ++#define DBG_EVT_NONE 0x00000000 /* No event */ ++#define DBG_EVT_INIT 0x00000001 /* Initial related event */ ++#define DBG_EVT_VERIFY 0x00000002 /* Verify buffer related event */ ++#define DBG_EVT_PERFORMANCE 0x00000004 /* Performance related event */ ++#define DBG_EVT_READ 0x00000008 /* Read related event */ ++#define DBG_EVT_WRITE 0x00000010 /* Write related event */ ++#define DBG_EVT_ERASE 0x00000020 /* Erase related event */ ++#define DBG_EVT_BADBLOCK 0x00000040 /* Badblock related event */ ++#define DBG_EVT_POWERCTL 0x00000080 /* Suspend/Resume related event */ ++ ++#define DBG_EVT_ALL 0xffffffff ++ ++#define DBG_EVT_MASK (DBG_EVT_INIT) ++ ++#if __DEBUG_NAND ++#define MSG(evt, fmt, args...) \ ++do { \ ++ if ((DBG_EVT_##evt) & DBG_EVT_MASK) { \ ++ printk(fmt, ##args); \ ++ } \ ++} while(0) ++ ++#define MSG_FUNC_ENTRY(f) MSG(FUC, ": %s\n", __FUNCTION__) ++#else ++#define MSG(evt, fmt, args...) do{}while(0) ++#define MSG_FUNC_ENTRY(f) do{}while(0) ++#endif ++ ++#define RAMDOM_READ 1<<0 ++#define CACHE_READ 1<<1 ++ ++typedef struct ++{ ++ u16 id; //deviceid+menuid ++ u32 ext_id; ++ u8 addr_cycle; ++ u8 iowidth; ++ u16 totalsize; ++ u16 blocksize; ++ u16 pagesize; ++ u16 sparesize; ++ u32 timmingsetting; ++ char devciename[14]; ++ u32 advancedmode; // ++}flashdev_info,*pflashdev_info; ++ ++/* NAND driver */ ++#if 0 ++struct mtk_nand_host_hw { ++ unsigned int nfi_bus_width; /* NFI_BUS_WIDTH */ ++ unsigned int nfi_access_timing; /* NFI_ACCESS_TIMING */ ++ unsigned int nfi_cs_num; /* NFI_CS_NUM */ ++ unsigned int nand_sec_size; /* NAND_SECTOR_SIZE */ ++ unsigned int nand_sec_shift; /* NAND_SECTOR_SHIFT */ ++ unsigned int nand_ecc_size; ++ unsigned int nand_ecc_bytes; ++ unsigned int nand_ecc_mode; ++}; ++extern struct mtk_nand_host_hw mt7621_nand_hw; ++extern u32 CFG_BLOCKSIZE; ++#endif ++#endif +--- a/drivers/mtd/nand/nand_base.c ++++ b/drivers/mtd/nand/nand_base.c +@@ -90,7 +90,7 @@ static struct nand_ecclayout nand_oob_12 + .length = 78} } + }; + +-static int nand_get_device(struct mtd_info *mtd, int new_state); ++int nand_get_device(struct mtd_info *mtd, int new_state); + + static int nand_do_write_oob(struct mtd_info *mtd, loff_t to, + struct mtd_oob_ops *ops); +@@ -128,7 +128,7 @@ static int check_offs_len(struct mtd_inf + * + * Release chip lock and wake up anyone waiting on the device. + */ +-static void nand_release_device(struct mtd_info *mtd) ++void nand_release_device(struct mtd_info *mtd) + { + struct nand_chip *chip = mtd->priv; + +@@ -739,7 +739,7 @@ static void panic_nand_get_device(struct + * + * Get the device and lock it for exclusive access + */ +-static int ++int + nand_get_device(struct mtd_info *mtd, int new_state) + { + struct nand_chip *chip = mtd->priv; +--- a/drivers/mtd/nand/nand_bbt.c ++++ b/drivers/mtd/nand/nand_bbt.c +@@ -1378,6 +1378,47 @@ int nand_isbad_bbt(struct mtd_info *mtd, + return 1; + } + ++/** ++ * nand_markbad_bbt - [NAND Interface] Mark a block bad in the BBT ++ * @mtd: MTD device structure ++ * @offs: offset of the bad block ++ */ ++int nand_markbad_bbt(struct mtd_info *mtd, loff_t offs) ++{ ++ struct nand_chip *this = mtd->priv; ++ int block, ret = 0; ++ ++ block = (int)(offs >> this->bbt_erase_shift); ++ ++ /* Mark bad block in memory */ ++ bbt_mark_entry(this, block, BBT_BLOCK_WORN); ++ ++ /* Update flash-based bad block table */ ++ if (this->bbt_options & NAND_BBT_USE_FLASH) ++ ret = nand_update_bbt(mtd, offs); ++ ++ return ret; ++} ++ ++void nand_bbt_set(struct mtd_info *mtd, int page, int flag) ++{ ++ struct nand_chip *this = mtd->priv; ++ int block; ++ ++ block = (int)(page >> (this->bbt_erase_shift - this->page_shift - 1)); ++ this->bbt[block >> 3] &= ~(0x03 << (block & 0x6)); ++ this->bbt[block >> 3] |= (flag & 0x3) << (block & 0x6); ++} ++ ++int nand_bbt_get(struct mtd_info *mtd, int page) ++{ ++ struct nand_chip *this = mtd->priv; ++ int block; ++ ++ block = (int)(page >> (this->bbt_erase_shift - this->page_shift - 1)); ++ return (this->bbt[block >> 3] >> (block & 0x06)) & 0x03; ++} ++ + EXPORT_SYMBOL(nand_scan_bbt); + EXPORT_SYMBOL(nand_default_bbt); + EXPORT_SYMBOL_GPL(nand_update_bbt); +--- /dev/null ++++ b/drivers/mtd/nand/nand_def.h +@@ -0,0 +1,123 @@ ++#ifndef __NAND_DEF_H__ ++#define __NAND_DEF_H__ ++ ++#define VERSION "v2.1 Fix AHB virt2phys error" ++#define MODULE_NAME "# MTK NAND #" ++#define PROCNAME "driver/nand" ++ ++#undef TESTTIME ++//#define __UBOOT_NAND__ 1 ++#define __KERNEL_NAND__ 1 ++//#define __PRELOADER_NAND__ 1 ++//#define PMT 1 ++//#define _MTK_NAND_DUMMY_DRIVER ++//#define CONFIG_BADBLOCK_CHECK 1 ++//#ifdef CONFIG_BADBLOCK_CHECK ++//#define MTK_NAND_BMT 1 ++//#endif ++#define ECC_ENABLE 1 ++#define MANUAL_CORRECT 1 ++//#define __INTERNAL_USE_AHB_MODE__ (0) ++#define SKIP_BAD_BLOCK ++#define FACT_BBT ++ ++#ifndef NAND_OTP_SUPPORT ++#define NAND_OTP_SUPPORT 0 ++#endif ++ ++/******************************************************************************* ++ * Macro definition ++ *******************************************************************************/ ++//#define NFI_SET_REG32(reg, value) (DRV_WriteReg32(reg, DRV_Reg32(reg) | (value))) ++//#define NFI_SET_REG16(reg, value) (DRV_WriteReg16(reg, DRV_Reg16(reg) | (value))) ++//#define NFI_CLN_REG32(reg, value) (DRV_WriteReg32(reg, DRV_Reg32(reg) & (~(value)))) ++//#define NFI_CLN_REG16(reg, value) (DRV_WriteReg16(reg, DRV_Reg16(reg) & (~(value)))) ++ ++#if defined (__KERNEL_NAND__) ++#define NFI_SET_REG32(reg, value) \ ++do { \ ++ g_value = (DRV_Reg32(reg) | (value));\ ++ DRV_WriteReg32(reg, g_value); \ ++} while(0) ++ ++#define NFI_SET_REG16(reg, value) \ ++do { \ ++ g_value = (DRV_Reg16(reg) | (value));\ ++ DRV_WriteReg16(reg, g_value); \ ++} while(0) ++ ++#define NFI_CLN_REG32(reg, value) \ ++do { \ ++ g_value = (DRV_Reg32(reg) & (~(value)));\ ++ DRV_WriteReg32(reg, g_value); \ ++} while(0) ++ ++#define NFI_CLN_REG16(reg, value) \ ++do { \ ++ g_value = (DRV_Reg16(reg) & (~(value)));\ ++ DRV_WriteReg16(reg, g_value); \ ++} while(0) ++#endif ++ ++#define NFI_WAIT_STATE_DONE(state) do{;}while (__raw_readl(NFI_STA_REG32) & state) ++#define NFI_WAIT_TO_READY() do{;}while (!(__raw_readl(NFI_STA_REG32) & STA_BUSY2READY)) ++ ++ ++#define NAND_SECTOR_SIZE (512) ++#define OOB_PER_SECTOR (16) ++#define OOB_AVAI_PER_SECTOR (8) ++ ++#ifndef PART_SIZE_BMTPOOL ++#define BMT_POOL_SIZE (80) ++#else ++#define BMT_POOL_SIZE (PART_SIZE_BMTPOOL) ++#endif ++ ++#define PMT_POOL_SIZE (2) ++ ++#define TIMEOUT_1 0x1fff ++#define TIMEOUT_2 0x8ff ++#define TIMEOUT_3 0xffff ++#define TIMEOUT_4 0xffff//5000 //PIO ++ ++ ++/* temporarity definiation */ ++#if !defined (__KERNEL_NAND__) ++#define KERN_INFO ++#define KERN_WARNING ++#define KERN_ERR ++#define PAGE_SIZE (4096) ++#endif ++#define AddStorageTrace //AddStorageTrace ++#define STORAGE_LOGGER_MSG_NAND 0 ++#define NFI_BASE RALINK_NAND_CTRL_BASE ++#define NFIECC_BASE RALINK_NANDECC_CTRL_BASE ++ ++#ifdef __INTERNAL_USE_AHB_MODE__ ++#define MT65xx_POLARITY_LOW 0 ++#define MT65XX_PDN_PERI_NFI 0 ++#define MT65xx_EDGE_SENSITIVE 0 ++#define MT6575_NFI_IRQ_ID (58) ++#endif ++ ++#if defined (__KERNEL_NAND__) ++#define RALINK_REG(x) (*((volatile u32 *)(x))) ++#define __virt_to_phys(x) virt_to_phys((volatile void*)x) ++#else ++#define CONFIG_MTD_NAND_VERIFY_WRITE (1) ++#define printk printf ++#define ra_dbg printf ++#define BUG() //BUG() ++#define BUG_ON(x) //BUG_ON() ++#define NUM_PARTITIONS 1 ++#endif ++ ++#define NFI_DEFAULT_ACCESS_TIMING (0x30C77fff) //(0x44333) ++ ++//uboot only support 1 cs ++#define NFI_CS_NUM (1) ++#define NFI_DEFAULT_CS (0) ++ ++#include "mt6575_typedefs.h" ++ ++#endif /* __NAND_DEF_H__ */ +--- /dev/null ++++ b/drivers/mtd/nand/nand_device_list.h +@@ -0,0 +1,55 @@ ++/* Copyright Statement: ++ * ++ * This software/firmware and related documentation ("MediaTek Software") are ++ * protected under relevant copyright laws. The information contained herein ++ * is confidential and proprietary to MediaTek Inc. and/or its licensors. ++ * Without the prior written permission of MediaTek inc. and/or its licensors, ++ * any reproduction, modification, use or disclosure of MediaTek Software, ++ * and information contained herein, in whole or in part, shall be strictly prohibited. ++ */ ++/* MediaTek Inc. (C) 2010. All rights reserved. ++ * ++ * BY OPENING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES ++ * THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE") ++ * RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON ++ * AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT. ++ * NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE ++ * SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR ++ * SUPPLIED WITH THE MEDIATEK SOFTWARE, AND RECEIVER AGREES TO LOOK ONLY TO SUCH ++ * THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES ++ * THAT IT IS RECEIVER'S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD PARTY ALL PROPER LICENSES ++ * CONTAINED IN MEDIATEK SOFTWARE. MEDIATEK SHALL ALSO NOT BE RESPONSIBLE FOR ANY MEDIATEK ++ * SOFTWARE RELEASES MADE TO RECEIVER'S SPECIFICATION OR TO CONFORM TO A PARTICULAR ++ * STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND ++ * CUMULATIVE LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE, ++ * AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE, ++ * OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY RECEIVER TO ++ * MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE. ++ * ++ * The following software/firmware and/or related documentation ("MediaTek Software") ++ * have been modified by MediaTek Inc. All revisions are subject to any receiver's ++ * applicable license agreements with MediaTek Inc. ++ */ ++ ++#ifndef __NAND_DEVICE_LIST_H__ ++#define __NAND_DEVICE_LIST_H__ ++ ++static const flashdev_info gen_FlashTable[]={ ++ {0x20BC, 0x105554, 5, 16, 512, 128, 2048, 64, 0x1123, "EHD013151MA_5", 0}, ++ {0xECBC, 0x005554, 5, 16, 512, 128, 2048, 64, 0x1123, "K524G2GACB_A0", 0}, ++ {0x2CBC, 0x905556, 5, 16, 512, 128, 2048, 64, 0x21044333, "MT29C4G96MAZA", 0}, ++ {0xADBC, 0x905554, 5, 16, 512, 128, 2048, 64, 0x10801011, "H9DA4GH4JJAMC", 0}, ++ {0x01F1, 0x801D01, 4, 8, 128, 128, 2048, 64, 0x30C77fff, "S34ML01G100TF", 0}, ++ {0x92F1, 0x8095FF, 4, 8, 128, 128, 2048, 64, 0x30C77fff, "F59L1G81A", 0}, ++ {0xECD3, 0x519558, 5, 8, 1024, 128, 2048, 64, 0x44333, "K9K8G8000", 0}, ++ {0xC2F1, 0x801DC2, 4, 8, 128, 128, 2048, 64, 0x30C77fff, "MX30LF1G08AA", 0}, ++ {0x98D3, 0x902676, 5, 8, 1024, 256, 4096, 224, 0x00C25332, "TC58NVG3S0F", 0}, ++ {0x01DA, 0x909546, 5, 8, 256, 128, 2048, 128, 0x30C77fff, "S34ML02G200TF", 0}, ++ {0x01DC, 0x909556, 5, 8, 512, 128, 2048, 128, 0x30C77fff, "S34ML04G200TF", 0}, ++ {0x0000, 0x000000, 0, 0, 0, 0, 0, 0, 0, "xxxxxxxxxx", 0}, ++}; ++ ++ ++#endif +--- /dev/null ++++ b/drivers/mtd/nand/partition.h +@@ -0,0 +1,115 @@ ++/* Copyright Statement: ++ * ++ * This software/firmware and related documentation ("MediaTek Software") are ++ * protected under relevant copyright laws. The information contained herein ++ * is confidential and proprietary to MediaTek Inc. and/or its licensors. ++ * Without the prior written permission of MediaTek inc. and/or its licensors, ++ * any reproduction, modification, use or disclosure of MediaTek Software, ++ * and information contained herein, in whole or in part, shall be strictly prohibited. ++ */ ++/* MediaTek Inc. (C) 2010. All rights reserved. ++ * ++ * BY OPENING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES ++ * THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE") ++ * RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON ++ * AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT. ++ * NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE ++ * SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR ++ * SUPPLIED WITH THE MEDIATEK SOFTWARE, AND RECEIVER AGREES TO LOOK ONLY TO SUCH ++ * THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES ++ * THAT IT IS RECEIVER'S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD PARTY ALL PROPER LICENSES ++ * CONTAINED IN MEDIATEK SOFTWARE. MEDIATEK SHALL ALSO NOT BE RESPONSIBLE FOR ANY MEDIATEK ++ * SOFTWARE RELEASES MADE TO RECEIVER'S SPECIFICATION OR TO CONFORM TO A PARTICULAR ++ * STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND ++ * CUMULATIVE LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE, ++ * AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE, ++ * OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY RECEIVER TO ++ * MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE. ++ * ++ * The following software/firmware and/or related documentation ("MediaTek Software") ++ * have been modified by MediaTek Inc. All revisions are subject to any receiver's ++ * applicable license agreements with MediaTek Inc. ++ */ ++ ++#include ++#include ++#include ++ ++#define RECONFIG_PARTITION_SIZE 1 ++ ++#define MTD_BOOT_PART_SIZE 0x80000 ++#define MTD_CONFIG_PART_SIZE 0x20000 ++#define MTD_FACTORY_PART_SIZE 0x20000 ++ ++extern unsigned int CFG_BLOCKSIZE; ++#define LARGE_MTD_BOOT_PART_SIZE (CFG_BLOCKSIZE<<2) ++#define LARGE_MTD_CONFIG_PART_SIZE (CFG_BLOCKSIZE<<2) ++#define LARGE_MTD_FACTORY_PART_SIZE (CFG_BLOCKSIZE<<1) ++ ++/*=======================================================================*/ ++/* NAND PARTITION Mapping */ ++/*=======================================================================*/ ++//#ifdef CONFIG_MTD_PARTITIONS ++static struct mtd_partition g_pasStatic_Partition[] = { ++ { ++ name: "ALL", ++ size: MTDPART_SIZ_FULL, ++ offset: 0, ++ }, ++ /* Put your own partition definitions here */ ++ { ++ name: "Bootloader", ++ size: MTD_BOOT_PART_SIZE, ++ offset: 0, ++ }, { ++ name: "Config", ++ size: MTD_CONFIG_PART_SIZE, ++ offset: MTDPART_OFS_APPEND ++ }, { ++ name: "Factory", ++ size: MTD_FACTORY_PART_SIZE, ++ offset: MTDPART_OFS_APPEND ++#ifdef CONFIG_RT2880_ROOTFS_IN_FLASH ++ }, { ++ name: "Kernel", ++ size: MTD_KERN_PART_SIZE, ++ offset: MTDPART_OFS_APPEND, ++ }, { ++ name: "RootFS", ++ size: MTD_ROOTFS_PART_SIZE, ++ offset: MTDPART_OFS_APPEND, ++#ifdef CONFIG_ROOTFS_IN_FLASH_NO_PADDING ++ }, { ++ name: "Kernel_RootFS", ++ size: MTD_KERN_PART_SIZE + MTD_ROOTFS_PART_SIZE, ++ offset: MTD_BOOT_PART_SIZE + MTD_CONFIG_PART_SIZE + MTD_FACTORY_PART_SIZE, ++#endif ++#else //CONFIG_RT2880_ROOTFS_IN_RAM ++ }, { ++ name: "Kernel", ++ size: 0x10000, ++ offset: MTDPART_OFS_APPEND, ++#endif ++#ifdef CONFIG_DUAL_IMAGE ++ }, { ++ name: "Kernel2", ++ size: MTD_KERN2_PART_SIZE, ++ offset: MTD_KERN2_PART_OFFSET, ++#ifdef CONFIG_RT2880_ROOTFS_IN_FLASH ++ }, { ++ name: "RootFS2", ++ size: MTD_ROOTFS2_PART_SIZE, ++ offset: MTD_ROOTFS2_PART_OFFSET, ++#endif ++#endif ++ } ++ ++}; ++ ++#define NUM_PARTITIONS ARRAY_SIZE(g_pasStatic_Partition) ++extern int part_num; // = NUM_PARTITIONS; ++//#endif ++#undef RECONFIG_PARTITION_SIZE ++ diff --git a/target/linux/ramips/patches-3.10/0214-usb-add-mt7621-xhci-support.patch b/target/linux/ramips/patches-3.10/0214-usb-add-mt7621-xhci-support.patch new file mode 100644 index 0000000000..bcac2b43e6 --- /dev/null +++ b/target/linux/ramips/patches-3.10/0214-usb-add-mt7621-xhci-support.patch @@ -0,0 +1,5768 @@ +From b823088d8782e02cc39c7eb4d834396b83dabe49 Mon Sep 17 00:00:00 2001 +From: John Crispin +Date: Mon, 27 Jan 2014 13:11:01 +0000 +Subject: [PATCH 214/215] usb: add mt7621 xhci support + +Signed-off-by: John Crispin +--- + drivers/usb/core/hub.c | 2 +- + drivers/usb/core/port.c | 3 +- + drivers/usb/host/Kconfig | 6 +- + drivers/usb/host/Makefile | 10 +- + drivers/usb/host/mtk-phy-7621.c | 445 +++++ + drivers/usb/host/mtk-phy-7621.h | 2871 +++++++++++++++++++++++++++++++++ + drivers/usb/host/mtk-phy-ahb.c | 58 + + drivers/usb/host/mtk-phy.c | 102 ++ + drivers/usb/host/mtk-phy.h | 179 ++ + drivers/usb/host/pci-quirks.h | 2 +- + drivers/usb/host/xhci-dbg.c | 3 + + drivers/usb/host/xhci-mem.c | 11 + + drivers/usb/host/xhci-mtk-power.c | 115 ++ + drivers/usb/host/xhci-mtk-power.h | 13 + + drivers/usb/host/xhci-mtk-scheduler.c | 608 +++++++ + drivers/usb/host/xhci-mtk-scheduler.h | 77 + + drivers/usb/host/xhci-mtk.c | 265 +++ + drivers/usb/host/xhci-mtk.h | 120 ++ + drivers/usb/host/xhci-plat.c | 19 + + drivers/usb/host/xhci-ring.c | 109 +- + drivers/usb/host/xhci.c | 201 ++- + drivers/usb/host/xhci.h | 23 +- + 22 files changed, 5229 insertions(+), 13 deletions(-) + create mode 100644 drivers/usb/host/mtk-phy-7621.c + create mode 100644 drivers/usb/host/mtk-phy-7621.h + create mode 100644 drivers/usb/host/mtk-phy-ahb.c + create mode 100644 drivers/usb/host/mtk-phy.c + create mode 100644 drivers/usb/host/mtk-phy.h + create mode 100644 drivers/usb/host/xhci-mtk-power.c + create mode 100644 drivers/usb/host/xhci-mtk-power.h + create mode 100644 drivers/usb/host/xhci-mtk-scheduler.c + create mode 100644 drivers/usb/host/xhci-mtk-scheduler.h + create mode 100644 drivers/usb/host/xhci-mtk.c + create mode 100644 drivers/usb/host/xhci-mtk.h + +--- a/drivers/usb/core/hub.c ++++ b/drivers/usb/core/hub.c +@@ -1254,7 +1254,7 @@ static void hub_quiesce(struct usb_hub * + if (type != HUB_SUSPEND) { + /* Disconnect all the children */ + for (i = 0; i < hdev->maxchild; ++i) { +- if (hub->ports[i]->child) ++ if (hub->ports[i] && hub->ports[i]->child) + usb_disconnect(&hub->ports[i]->child); + } + } +--- a/drivers/usb/core/port.c ++++ b/drivers/usb/core/port.c +@@ -193,6 +193,7 @@ exit: + void usb_hub_remove_port_device(struct usb_hub *hub, + int port1) + { +- device_unregister(&hub->ports[port1 - 1]->dev); ++ if (hub->ports[port1 - 1]) ++ device_unregister(&hub->ports[port1 - 1]->dev); + } + +--- a/drivers/usb/host/Kconfig ++++ b/drivers/usb/host/Kconfig +@@ -28,7 +28,11 @@ config USB_XHCI_HCD + if USB_XHCI_HCD + + config USB_XHCI_PLATFORM +- tristate ++ bool "xHCI platform" ++ ++config USB_MT7621_XHCI_PLATFORM ++ bool "MTK MT7621 xHCI" ++ depends on USB_XHCI_PLATFORM + + config USB_XHCI_HCD_DEBUGGING + bool "Debugging for the xHCI host controller" +--- a/drivers/usb/host/Makefile ++++ b/drivers/usb/host/Makefile +@@ -13,15 +13,23 @@ fhci-$(CONFIG_FHCI_DEBUG) += fhci-dbg.o + + xhci-hcd-y := xhci.o xhci-mem.o + xhci-hcd-y += xhci-ring.o xhci-hub.o xhci-dbg.o ++ifndef CONFIG_USB_MT7621_XHCI_PLATFORM + xhci-hcd-$(CONFIG_PCI) += xhci-pci.o ++endif ++ ++ifdef CONFIG_USB_MT7621_XHCI_PLATFORM ++xhci-hcd-y += mtk-phy.o xhci-mtk-scheduler.o xhci-mtk-power.o xhci-mtk.o mtk-phy-7621.o mtk-phy-ahb.o ++endif + + ifneq ($(CONFIG_USB_XHCI_PLATFORM), ) +- xhci-hcd-y += xhci-plat.o ++xhci-hcd-y += xhci-plat.o + endif + + obj-$(CONFIG_USB_WHCI_HCD) += whci/ + ++ifndef CONFIG_USB_MT7621_XHCI_PLATFORM + obj-$(CONFIG_PCI) += pci-quirks.o ++endif + + obj-$(CONFIG_USB_EHCI_HCD) += ehci-hcd.o + obj-$(CONFIG_USB_EHCI_PCI) += ehci-pci.o +--- /dev/null ++++ b/drivers/usb/host/mtk-phy-7621.c +@@ -0,0 +1,445 @@ ++#include "mtk-phy.h" ++ ++#ifdef CONFIG_PROJECT_7621 ++#include "mtk-phy-7621.h" ++ ++//not used on SoC ++PHY_INT32 phy_init(struct u3phy_info *info){ ++ return PHY_TRUE; ++} ++ ++//not used on SoC ++PHY_INT32 phy_change_pipe_phase(struct u3phy_info *info, PHY_INT32 phy_drv, PHY_INT32 pipe_phase){ ++ return PHY_TRUE; ++} ++ ++//-------------------------------------------------------- ++// Function : fgEyeScanHelper_CheckPtInRegion() ++// Description : Check if the test point is in a rectangle region. ++// If it is in the rectangle, also check if this point ++// is on the multiple of deltaX and deltaY. ++// Parameter : strucScanRegion * prEye - the region ++// BYTE bX ++// BYTE bY ++// Return : BYTE - TRUE : This point needs to be tested ++// FALSE: This point will be omitted ++// Note : First check within the rectangle. ++// Secondly, use modulous to check if the point will be tested. ++//-------------------------------------------------------- ++static PHY_INT8 fgEyeScanHelper_CheckPtInRegion(struct strucScanRegion * prEye, PHY_INT8 bX, PHY_INT8 bY) ++{ ++ PHY_INT8 fgValid = true; ++ ++ ++ /// Be careful, the axis origin is on the TOP-LEFT corner. ++ /// Therefore the top-left point has the minimum X and Y ++ /// Botton-right point is the maximum X and Y ++ if ( (prEye->bX_tl <= bX) && (bX <= prEye->bX_br) ++ && (prEye->bY_tl <= bY) && (bY <= prEye->bX_br)) ++ { ++ // With the region, now check whether or not the input test point is ++ // on the multiples of X and Y ++ // Do not have to worry about negative value, because we have already ++ // check the input bX, and bY is within the region. ++ if ( ((bX - prEye->bX_tl) % (prEye->bDeltaX)) ++ || ((bY - prEye->bY_tl) % (prEye->bDeltaY)) ) ++ { ++ // if the division will have remainder, that means ++ // the input test point is on the multiples of X and Y ++ fgValid = false; ++ } ++ else ++ { ++ } ++ } ++ else ++ { ++ ++ fgValid = false; ++ } ++ return fgValid; ++} ++ ++//-------------------------------------------------------- ++// Function : EyeScanHelper_RunTest() ++// Description : Enable the test, and wait til it is completed ++// Parameter : None ++// Return : None ++// Note : None ++//-------------------------------------------------------- ++static void EyeScanHelper_RunTest(struct u3phy_info *info) ++{ ++ DRV_UDELAY(100); ++ // Disable the test ++ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs->eq_eye0) ++ , RG_SSUSB_EQ_EYE_CNT_EN_OFST, RG_SSUSB_EQ_EYE_CNT_EN, 0); //RG_SSUSB_RX_EYE_CNT_EN = 0 ++ DRV_UDELAY(100); ++ // Run the test ++ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs->eq_eye0) ++ , RG_SSUSB_EQ_EYE_CNT_EN_OFST, RG_SSUSB_EQ_EYE_CNT_EN, 1); //RG_SSUSB_RX_EYE_CNT_EN = 1 ++ DRV_UDELAY(100); ++ // Wait til it's done ++ //RGS_SSUSB_RX_EYE_CNT_RDY ++ while(!U3PhyReadField32(((PHY_UINT32)&info->u3phyd_regs->phya_rx_mon5) ++ , RGS_SSUSB_EQ_EYE_CNT_RDY_OFST, RGS_SSUSB_EQ_EYE_CNT_RDY)); ++} ++ ++//-------------------------------------------------------- ++// Function : fgEyeScanHelper_CalNextPoint() ++// Description : Calcualte the test point for the measurement ++// Parameter : None ++// Return : BOOL - TRUE : the next point is within the ++// boundaryof HW limit ++// FALSE: the next point is out of the HW limit ++// Note : The next point is obtained by calculating ++// from the bottom left of the region rectangle ++// and then scanning up until it reaches the upper ++// limit. At this time, the x will increment, and ++// start scanning downwards until the y hits the ++// zero. ++//-------------------------------------------------------- ++static PHY_INT8 fgEyeScanHelper_CalNextPoint(void) ++{ ++ if ( ((_bYcurr == MAX_Y) && (_eScanDir == SCAN_DN)) ++ || ((_bYcurr == MIN_Y) && (_eScanDir == SCAN_UP)) ++ ) ++ { ++ /// Reaches the limit of Y axis ++ /// Increment X ++ _bXcurr++; ++ _fgXChged = true; ++ _eScanDir = (_eScanDir == SCAN_UP) ? SCAN_DN : SCAN_UP; ++ ++ if (_bXcurr > MAX_X) ++ { ++ return false; ++ } ++ } ++ else ++ { ++ _bYcurr = (_eScanDir == SCAN_DN) ? _bYcurr + 1 : _bYcurr - 1; ++ _fgXChged = false; ++ } ++ return PHY_TRUE; ++} ++ ++PHY_INT32 eyescan_init(struct u3phy_info *info){ ++ //initial PHY setting ++ U3PhyWriteField32(((PHY_UINT32)&info->u3phya_regs->rega) ++ , RG_SSUSB_CDR_EPEN_OFST, RG_SSUSB_CDR_EPEN, 1); ++ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs->phyd_mix3) ++ , RG_SSUSB_FORCE_CDR_PI_PWD_OFST, RG_SSUSB_FORCE_CDR_PI_PWD, 1); ++ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_bank2_regs->b2_phyd_misc0) ++ , RG_SSUSB_RX_PI_CAL_EN_SEL_OFST, RG_SSUSB_RX_PI_CAL_EN_SEL, 1); //RG_SSUSB_RX_PI_CAL_MANUAL_SEL = 1 ++ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_bank2_regs->b2_phyd_misc0) ++ , RG_SSUSB_RX_PI_CAL_EN_OFST, RG_SSUSB_RX_PI_CAL_EN, 1); //RG_SSUSB_RX_PI_CAL_MANUAL_EN = 1 ++ return PHY_TRUE; ++} ++ ++PHY_INT32 phy_eyescan(struct u3phy_info *info, PHY_INT32 x_t1, PHY_INT32 y_t1, PHY_INT32 x_br, PHY_INT32 y_br, PHY_INT32 delta_x, PHY_INT32 delta_y ++ , PHY_INT32 eye_cnt, PHY_INT32 num_cnt, PHY_INT32 PI_cal_en, PHY_INT32 num_ignore_cnt){ ++ PHY_INT32 cOfst = 0; ++ PHY_UINT8 bIdxX = 0; ++ PHY_UINT8 bIdxY = 0; ++ //PHY_INT8 bCnt = 0; ++ PHY_UINT8 bIdxCycCnt = 0; ++ PHY_INT8 fgValid; ++ PHY_INT8 cX; ++ PHY_INT8 cY; ++ PHY_UINT8 bExtendCnt; ++ PHY_INT8 isContinue; ++ //PHY_INT8 isBreak; ++ PHY_UINT32 wErr0 = 0, wErr1 = 0; ++ //PHY_UINT32 temp; ++ ++ PHY_UINT32 pwErrCnt0[CYCLE_COUNT_MAX][ERRCNT_MAX][ERRCNT_MAX]; ++ PHY_UINT32 pwErrCnt1[CYCLE_COUNT_MAX][ERRCNT_MAX][ERRCNT_MAX]; ++ ++ _rEye1.bX_tl = x_t1; ++ _rEye1.bY_tl = y_t1; ++ _rEye1.bX_br = x_br; ++ _rEye1.bY_br = y_br; ++ _rEye1.bDeltaX = delta_x; ++ _rEye1.bDeltaY = delta_y; ++ ++ _rEye2.bX_tl = x_t1; ++ _rEye2.bY_tl = y_t1; ++ _rEye2.bX_br = x_br; ++ _rEye2.bY_br = y_br; ++ _rEye2.bDeltaX = delta_x; ++ _rEye2.bDeltaY = delta_y; ++ ++ _rTestCycle.wEyeCnt = eye_cnt; ++ _rTestCycle.bNumOfEyeCnt = num_cnt; ++ _rTestCycle.bNumOfIgnoreCnt = num_ignore_cnt; ++ _rTestCycle.bPICalEn = PI_cal_en; ++ ++ _bXcurr = 0; ++ _bYcurr = 0; ++ _eScanDir = SCAN_DN; ++ _fgXChged = false; ++ ++ printk("x_t1: %x, y_t1: %x, x_br: %x, y_br: %x, delta_x: %x, delta_y: %x, \ ++ eye_cnt: %x, num_cnt: %x, PI_cal_en: %x, num_ignore_cnt: %x\n", \ ++ x_t1, y_t1, x_br, y_br, delta_x, delta_y, eye_cnt, num_cnt, PI_cal_en, num_ignore_cnt); ++ ++ //force SIGDET to OFF ++ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_bank2_regs->b2_phyd_misc0) ++ , RG_SSUSB_RX_SIGDET_EN_SEL_OFST, RG_SSUSB_RX_SIGDET_EN_SEL, 1); //RG_SSUSB_RX_SIGDET_SEL = 1 ++ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_bank2_regs->b2_phyd_misc0) ++ , RG_SSUSB_RX_SIGDET_EN_OFST, RG_SSUSB_RX_SIGDET_EN, 0); //RG_SSUSB_RX_SIGDET_EN = 0 ++ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs->eq_eye1) ++ , RG_SSUSB_EQ_SIGDET_OFST, RG_SSUSB_EQ_SIGDET, 0); //RG_SSUSB_RX_SIGDET = 0 ++ ++ // RX_TRI_DET_EN to Disable ++ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs->eq3) ++ , RG_SSUSB_EQ_TRI_DET_EN_OFST, RG_SSUSB_EQ_TRI_DET_EN, 0); //RG_SSUSB_RX_TRI_DET_EN = 0 ++ ++ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs->eq_eye0) ++ , RG_SSUSB_EQ_EYE_MON_EN_OFST, RG_SSUSB_EQ_EYE_MON_EN, 1); //RG_SSUSB_EYE_MON_EN = 1 ++ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs->eq_eye0) ++ , RG_SSUSB_EQ_EYE_XOFFSET_OFST, RG_SSUSB_EQ_EYE_XOFFSET, 0); //RG_SSUSB_RX_EYE_XOFFSET = 0 ++ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs->eq_eye0) ++ , RG_SSUSB_EQ_EYE0_Y_OFST, RG_SSUSB_EQ_EYE0_Y, 0); //RG_SSUSB_RX_EYE0_Y = 0 ++ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs->eq_eye0) ++ , RG_SSUSB_EQ_EYE1_Y_OFST, RG_SSUSB_EQ_EYE1_Y, 0); //RG_SSUSB_RX_EYE1_Y = 0 ++ ++ ++ if (PI_cal_en){ ++ // PI Calibration ++ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_bank2_regs->b2_phyd_misc0) ++ , RG_SSUSB_RX_PI_CAL_EN_SEL_OFST, RG_SSUSB_RX_PI_CAL_EN_SEL, 1); //RG_SSUSB_RX_PI_CAL_MANUAL_SEL = 1 ++ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_bank2_regs->b2_phyd_misc0) ++ , RG_SSUSB_RX_PI_CAL_EN_OFST, RG_SSUSB_RX_PI_CAL_EN, 0); //RG_SSUSB_RX_PI_CAL_MANUAL_EN = 0 ++ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_bank2_regs->b2_phyd_misc0) ++ , RG_SSUSB_RX_PI_CAL_EN_OFST, RG_SSUSB_RX_PI_CAL_EN, 1); //RG_SSUSB_RX_PI_CAL_MANUAL_EN = 1 ++ ++ DRV_UDELAY(20); ++ ++ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_bank2_regs->b2_phyd_misc0) ++ , RG_SSUSB_RX_PI_CAL_EN_OFST, RG_SSUSB_RX_PI_CAL_EN, 0); //RG_SSUSB_RX_PI_CAL_MANUAL_EN = 0 ++ _bPIResult = U3PhyReadField32(((PHY_UINT32)&info->u3phyd_regs->phya_rx_mon5) ++ , RGS_SSUSB_EQ_PILPO_OFST, RGS_SSUSB_EQ_PILPO); //read RGS_SSUSB_RX_PILPO ++ ++ printk(KERN_ERR "PI result: %d\n", _bPIResult); ++ } ++ // Read Initial DAC ++ // Set CYCLE ++ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs->eq_eye3) ++ ,RG_SSUSB_EQ_EYE_CNT_OFST, RG_SSUSB_EQ_EYE_CNT, eye_cnt); //RG_SSUSB_RX_EYE_CNT ++ ++ // Eye Monitor Feature ++ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs->eq_eye1) ++ , RG_SSUSB_EQ_EYE_MASK_OFST, RG_SSUSB_EQ_EYE_MASK, 0x3ff); //RG_SSUSB_RX_EYE_MASK = 0x3ff ++ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs->eq_eye0) ++ , RG_SSUSB_EQ_EYE_MON_EN_OFST, RG_SSUSB_EQ_EYE_MON_EN, 1); //RG_SSUSB_EYE_MON_EN = 1 ++ ++ // Move X,Y to the top-left corner ++ for (cOfst = 0; cOfst >= -64; cOfst--) ++ { ++ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs->eq_eye0) ++ ,RG_SSUSB_EQ_EYE_XOFFSET_OFST, RG_SSUSB_EQ_EYE_XOFFSET, cOfst); //RG_SSUSB_RX_EYE_XOFFSET ++ } ++ for (cOfst = 0; cOfst < 64; cOfst++) ++ { ++ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs->eq_eye0) ++ , RG_SSUSB_EQ_EYE0_Y_OFST, RG_SSUSB_EQ_EYE0_Y, cOfst); //RG_SSUSB_RX_EYE0_Y ++ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs->eq_eye0) ++ , RG_SSUSB_EQ_EYE1_Y_OFST, RG_SSUSB_EQ_EYE1_Y, cOfst); //RG_SSUSB_RX_EYE1_Y ++ } ++ //ClearErrorResult ++ for(bIdxCycCnt = 0; bIdxCycCnt < CYCLE_COUNT_MAX; bIdxCycCnt++){ ++ for(bIdxX = 0; bIdxX < ERRCNT_MAX; bIdxX++) ++ { ++ for(bIdxY = 0; bIdxY < ERRCNT_MAX; bIdxY++){ ++ pwErrCnt0[bIdxCycCnt][bIdxX][bIdxY] = 0; ++ pwErrCnt1[bIdxCycCnt][bIdxX][bIdxY] = 0; ++ } ++ } ++ } ++ isContinue = true; ++ while(isContinue){ ++ //printk(KERN_ERR "_bXcurr: %d, _bYcurr: %d\n", _bXcurr, _bYcurr); ++ // The point is within the boundary, then let's check if it is within ++ // the testing region. ++ // The point is only test-able if one of the eye region ++ // includes this point. ++ fgValid = fgEyeScanHelper_CheckPtInRegion(&_rEye1, _bXcurr, _bYcurr) ++ || fgEyeScanHelper_CheckPtInRegion(&_rEye2, _bXcurr, _bYcurr); ++ // Translate bX and bY to 2's complement from where the origin was on the ++ // top left corner. ++ // 0x40 and 0x3F needs a bit of thinking!!!! >"< ++ cX = (_bXcurr ^ 0x40); ++ cY = (_bYcurr ^ 0x3F); ++ ++ // Set X if necessary ++ if (_fgXChged == true) ++ { ++ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs->eq_eye0) ++ , RG_SSUSB_EQ_EYE_XOFFSET_OFST, RG_SSUSB_EQ_EYE_XOFFSET, cX); //RG_SSUSB_RX_EYE_XOFFSET ++ } ++ // Set Y ++ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs->eq_eye0) ++ , RG_SSUSB_EQ_EYE0_Y_OFST, RG_SSUSB_EQ_EYE0_Y, cY); //RG_SSUSB_RX_EYE0_Y ++ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs->eq_eye0) ++ , RG_SSUSB_EQ_EYE1_Y_OFST, RG_SSUSB_EQ_EYE1_Y, cY); //RG_SSUSB_RX_EYE1_Y ++ ++ /// Test this point! ++ if (fgValid){ ++ for (bExtendCnt = 0; bExtendCnt < num_ignore_cnt; bExtendCnt++) ++ { ++ //run test ++ EyeScanHelper_RunTest(info); ++ } ++ for (bExtendCnt = 0; bExtendCnt < num_cnt; bExtendCnt++) ++ { ++ EyeScanHelper_RunTest(info); ++ wErr0 = U3PhyReadField32(((PHY_UINT32)&info->u3phyd_regs->phya_rx_mon3) ++ , RGS_SSUSB_EQ_EYE_MONITOR_ERRCNT_0_OFST, RGS_SSUSB_EQ_EYE_MONITOR_ERRCNT_0); ++ wErr1 = U3PhyReadField32(((PHY_UINT32)&info->u3phyd_regs->phya_rx_mon4) ++ , RGS_SSUSB_EQ_EYE_MONITOR_ERRCNT_1_OFST, RGS_SSUSB_EQ_EYE_MONITOR_ERRCNT_1); ++ ++ pwErrCnt0[bExtendCnt][_bXcurr][_bYcurr] = wErr0; ++ pwErrCnt1[bExtendCnt][_bXcurr][_bYcurr] = wErr1; ++ ++ //EyeScanHelper_GetResult(&_rRes.pwErrCnt0[bCnt], &_rRes.pwErrCnt1[bCnt]); ++// printk(KERN_ERR "cnt[%d] cur_x,y [0x%x][0x%x], cX,cY [0x%x][0x%x], ErrCnt[%d][%d]\n" ++// , bExtendCnt, _bXcurr, _bYcurr, cX, cY, pwErrCnt0[bExtendCnt][_bXcurr][_bYcurr], pwErrCnt1[bExtendCnt][_bXcurr][_bYcurr]); ++ } ++ //printk(KERN_ERR "cur_x,y [0x%x][0x%x], cX,cY [0x%x][0x%x], ErrCnt[%d][%d]\n", _bXcurr, _bYcurr, cX, cY, pwErrCnt0[0][_bXcurr][_bYcurr], pwErrCnt1[0][_bXcurr][_bYcurr]); ++ } ++ else{ ++ ++ } ++ if (fgEyeScanHelper_CalNextPoint() == false){ ++#if 0 ++ printk(KERN_ERR "Xcurr [0x%x] Ycurr [0x%x]\n", _bXcurr, _bYcurr); ++ printk(KERN_ERR "XcurrREG [0x%x] YcurrREG [0x%x]\n", cX, cY); ++#endif ++ printk(KERN_ERR "end of eye scan\n"); ++ isContinue = false; ++ } ++ } ++ printk(KERN_ERR "CurX [0x%x] CurY [0x%x]\n" ++ , U3PhyReadField32(((PHY_UINT32)&info->u3phyd_regs->eq_eye0), RG_SSUSB_EQ_EYE_XOFFSET_OFST, RG_SSUSB_EQ_EYE_XOFFSET) ++ , U3PhyReadField32(((PHY_UINT32)&info->u3phyd_regs->eq_eye0), RG_SSUSB_EQ_EYE0_Y_OFST, RG_SSUSB_EQ_EYE0_Y)); ++ ++ // Move X,Y to the top-left corner ++ for (cOfst = 63; cOfst >= 0; cOfst--) ++ { ++ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs->eq_eye0) ++ , RG_SSUSB_EQ_EYE_XOFFSET_OFST, RG_SSUSB_EQ_EYE_XOFFSET, cOfst); //RG_SSUSB_RX_EYE_XOFFSET ++ } ++ for (cOfst = 63; cOfst >= 0; cOfst--) ++ { ++ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs->eq_eye0) ++ , RG_SSUSB_EQ_EYE0_Y_OFST, RG_SSUSB_EQ_EYE0_Y, cOfst); ++ U3PhyWriteField32(((PHY_UINT32)&info->u3phyd_regs->eq_eye0) ++ , RG_SSUSB_EQ_EYE1_Y_OFST, RG_SSUSB_EQ_EYE1_Y, cOfst); ++ ++ } ++ printk(KERN_ERR "CurX [0x%x] CurY [0x%x]\n" ++ , U3PhyReadField32(((PHY_UINT32)&info->u3phyd_regs->eq_eye0), RG_SSUSB_EQ_EYE_XOFFSET_OFST, RG_SSUSB_EQ_EYE_XOFFSET) ++ , U3PhyReadField32(((PHY_UINT32)&info->u3phyd_regs->eq_eye0), RG_SSUSB_EQ_EYE0_Y_OFST, RG_SSUSB_EQ_EYE0_Y)); ++ ++ printk(KERN_ERR "PI result: %d\n", _bPIResult); ++ printk(KERN_ERR "pwErrCnt0 addr: 0x%x\n", (PHY_UINT32)pwErrCnt0); ++ printk(KERN_ERR "pwErrCnt1 addr: 0x%x\n", (PHY_UINT32)pwErrCnt1); ++ ++ return PHY_TRUE; ++} ++ ++//not used on SoC ++PHY_INT32 u2_save_cur_en(struct u3phy_info *info){ ++ return PHY_TRUE; ++} ++ ++//not used on SoC ++PHY_INT32 u2_save_cur_re(struct u3phy_info *info){ ++ return PHY_TRUE; ++} ++ ++PHY_INT32 u2_slew_rate_calibration(struct u3phy_info *info){ ++ PHY_INT32 i=0; ++ //PHY_INT32 j=0; ++ //PHY_INT8 u1SrCalVal = 0; ++ //PHY_INT8 u1Reg_addr_HSTX_SRCAL_EN; ++ PHY_INT32 fgRet = 0; ++ PHY_INT32 u4FmOut = 0; ++ PHY_INT32 u4Tmp = 0; ++ //PHY_INT32 temp; ++ ++ // => RG_USB20_HSTX_SRCAL_EN = 1 ++ // enable HS TX SR calibration ++ U3PhyWriteField32(((PHY_UINT32)&info->u2phy_regs->u2phyacr0) ++ , RG_USB20_HSTX_SRCAL_EN_OFST, RG_USB20_HSTX_SRCAL_EN, 0x1); ++ DRV_MSLEEP(1); ++ ++ // => RG_FRCK_EN = 1 ++ // Enable free run clock ++ U3PhyWriteField32(((PHY_UINT32)&info->sifslv_fm_regs->fmmonr1) ++ , RG_FRCK_EN_OFST, RG_FRCK_EN, 1); ++ ++ // MT6290 HS signal quality patch ++ // => RG_CYCLECNT = 400 ++ // Setting cyclecnt =400 ++ U3PhyWriteField32(((PHY_UINT32)&info->sifslv_fm_regs->fmcr0) ++ , RG_CYCLECNT_OFST, RG_CYCLECNT, 0x400); ++ ++ // => RG_FREQDET_EN = 1 ++ // Enable frequency meter ++ U3PhyWriteField32(((PHY_UINT32)&info->sifslv_fm_regs->fmcr0) ++ , RG_FREQDET_EN_OFST, RG_FREQDET_EN, 0x1); ++ ++ // wait for FM detection done, set 10ms timeout ++ for(i=0; i<10; i++){ ++ // => u4FmOut = USB_FM_OUT ++ // read FM_OUT ++ u4FmOut = U3PhyReadReg32(((PHY_UINT32)&info->sifslv_fm_regs->fmmonr0)); ++ printk("FM_OUT value: u4FmOut = %d(0x%08X)\n", u4FmOut, u4FmOut); ++ ++ // check if FM detection done ++ if (u4FmOut != 0) ++ { ++ fgRet = 0; ++ printk("FM detection done! loop = %d\n", i); ++ ++ break; ++ } ++ ++ fgRet = 1; ++ DRV_MSLEEP(1); ++ } ++ // => RG_FREQDET_EN = 0 ++ // disable frequency meter ++ U3PhyWriteField32(((PHY_UINT32)&info->sifslv_fm_regs->fmcr0) ++ , RG_FREQDET_EN_OFST, RG_FREQDET_EN, 0); ++ ++ // => RG_FRCK_EN = 0 ++ // disable free run clock ++ U3PhyWriteField32(((PHY_UINT32)&info->sifslv_fm_regs->fmmonr1) ++ , RG_FRCK_EN_OFST, RG_FRCK_EN, 0); ++ ++ // => RG_USB20_HSTX_SRCAL_EN = 0 ++ // disable HS TX SR calibration ++ U3PhyWriteField32(((PHY_UINT32)&info->u2phy_regs->u2phyacr0) ++ , RG_USB20_HSTX_SRCAL_EN_OFST, RG_USB20_HSTX_SRCAL_EN, 0); ++ DRV_MSLEEP(1); ++ ++ if(u4FmOut == 0){ ++ U3PhyWriteField32(((PHY_UINT32)&info->u2phy_regs->u2phyacr0) ++ , RG_USB20_HSTX_SRCTRL_OFST, RG_USB20_HSTX_SRCTRL, 0x4); ++ ++ fgRet = 1; ++ } ++ else{ ++ // set reg = (1024/FM_OUT) * 25 * 0.028 (round to the nearest digits) ++ u4Tmp = (((1024 * 25 * U2_SR_COEF_7621) / u4FmOut) + 500) / 1000; ++ printk("SR calibration value u1SrCalVal = %d\n", (PHY_UINT8)u4Tmp); ++ U3PhyWriteField32(((PHY_UINT32)&info->u2phy_regs->u2phyacr0) ++ , RG_USB20_HSTX_SRCTRL_OFST, RG_USB20_HSTX_SRCTRL, u4Tmp); ++ } ++ return fgRet; ++} ++ ++#endif +--- /dev/null ++++ b/drivers/usb/host/mtk-phy-7621.h +@@ -0,0 +1,2871 @@ ++#ifdef CONFIG_PROJECT_7621 ++#ifndef __MTK_PHY_7621_H ++#define __MTK_PHY_7621_H ++ ++#define U2_SR_COEF_7621 28 ++ ++/////////////////////////////////////////////////////////////////////////////// ++ ++struct u2phy_reg { ++ //0x0 ++ PHY_LE32 u2phyac0; ++ PHY_LE32 u2phyac1; ++ PHY_LE32 u2phyac2; ++ PHY_LE32 reserve0; ++ //0x10 ++ PHY_LE32 u2phyacr0; ++ PHY_LE32 u2phyacr1; ++ PHY_LE32 u2phyacr2; ++ PHY_LE32 u2phyacr3; ++ //0x20 ++ PHY_LE32 u2phyacr4; ++ PHY_LE32 u2phyamon0; ++ PHY_LE32 reserve1[2]; ++ //0x30~0x50 ++ PHY_LE32 reserve2[12]; ++ //0x60 ++ PHY_LE32 u2phydcr0; ++ PHY_LE32 u2phydcr1; ++ PHY_LE32 u2phydtm0; ++ PHY_LE32 u2phydtm1; ++ //0x70 ++ PHY_LE32 u2phydmon0; ++ PHY_LE32 u2phydmon1; ++ PHY_LE32 u2phydmon2; ++ PHY_LE32 u2phydmon3; ++ //0x80 ++ PHY_LE32 u2phybc12c; ++ PHY_LE32 u2phybc12c1; ++ PHY_LE32 reserve3[2]; ++ //0x90~0xe0 ++ PHY_LE32 reserve4[24]; ++ //0xf0 ++ PHY_LE32 reserve6[3]; ++ PHY_LE32 regfcom; ++}; ++ ++//U3D_U2PHYAC0 ++#define RG_USB20_USBPLL_DIVEN (0x7<<28) //30:28 ++#define RG_USB20_USBPLL_CKCTRL (0x3<<26) //27:26 ++#define RG_USB20_USBPLL_PREDIV (0x3<<24) //25:24 ++#define RG_USB20_USBPLL_FORCE_ON (0x1<<23) //23:23 ++#define RG_USB20_USBPLL_FBDIV (0x7f<<16) //22:16 ++#define RG_USB20_REF_EN (0x1<<15) //15:15 ++#define RG_USB20_INTR_EN (0x1<<14) //14:14 ++#define RG_USB20_BG_TRIM (0xf<<8) //11:8 ++#define RG_USB20_BG_RBSEL (0x3<<6) //7:6 ++#define RG_USB20_BG_RASEL (0x3<<4) //5:4 ++#define RG_USB20_BGR_DIV (0x3<<2) //3:2 ++#define RG_SIFSLV_CHP_EN (0x1<<1) //1:1 ++#define RG_SIFSLV_BGR_EN (0x1<<0) //0:0 ++ ++//U3D_U2PHYAC1 ++#define RG_USB20_VRT_VREF_SEL (0x7<<28) //30:28 ++#define RG_USB20_TERM_VREF_SEL (0x7<<24) //26:24 ++#define RG_USB20_MPX_SEL (0xff<<16) //23:16 ++#define RG_USB20_MPX_OUT_SEL (0x3<<12) //13:12 ++#define RG_USB20_TX_PH_ROT_SEL (0x7<<8) //10:8 ++#define RG_USB20_USBPLL_ACCEN (0x1<<3) //3:3 ++#define RG_USB20_USBPLL_LF (0x1<<2) //2:2 ++#define RG_USB20_USBPLL_BR (0x1<<1) //1:1 ++#define RG_USB20_USBPLL_BP (0x1<<0) //0:0 ++ ++//U3D_U2PHYAC2 ++#define RG_SIFSLV_MAC_BANDGAP_EN (0x1<<17) //17:17 ++#define RG_SIFSLV_MAC_CHOPPER_EN (0x1<<16) //16:16 ++#define RG_USB20_CLKREF_REV (0xff<<0) //7:0 ++ ++//U3D_U2PHYACR0 ++#define RG_USB20_ICUSB_EN (0x1<<24) //24:24 ++#define RG_USB20_HSTX_SRCAL_EN (0x1<<23) //23:23 ++#define RG_USB20_HSTX_SRCTRL (0x7<<16) //18:16 ++#define RG_USB20_LS_CR (0x7<<12) //14:12 ++#define RG_USB20_FS_CR (0x7<<8) //10:8 ++#define RG_USB20_LS_SR (0x7<<4) //6:4 ++#define RG_USB20_FS_SR (0x7<<0) //2:0 ++ ++//U3D_U2PHYACR1 ++#define RG_USB20_INIT_SQ_EN_DG (0x3<<28) //29:28 ++#define RG_USB20_SQD (0x3<<24) //25:24 ++#define RG_USB20_HSTX_TMODE_SEL (0x3<<20) //21:20 ++#define RG_USB20_HSTX_TMODE_EN (0x1<<19) //19:19 ++#define RG_USB20_PHYD_MONEN (0x1<<18) //18:18 ++#define RG_USB20_INLPBK_EN (0x1<<17) //17:17 ++#define RG_USB20_CHIRP_EN (0x1<<16) //16:16 ++#define RG_USB20_DM_ABIST_SOURCE_EN (0x1<<15) //15:15 ++#define RG_USB20_DM_ABIST_SELE (0xf<<8) //11:8 ++#define RG_USB20_DP_ABIST_SOURCE_EN (0x1<<7) //7:7 ++#define RG_USB20_DP_ABIST_SELE (0xf<<0) //3:0 ++ ++//U3D_U2PHYACR2 ++#define RG_USB20_OTG_ABIST_SELE (0x7<<29) //31:29 ++#define RG_USB20_OTG_ABIST_EN (0x1<<28) //28:28 ++#define RG_USB20_OTG_VBUSCMP_EN (0x1<<27) //27:27 ++#define RG_USB20_OTG_VBUSTH (0x7<<24) //26:24 ++#define RG_USB20_DISC_FIT_EN (0x1<<22) //22:22 ++#define RG_USB20_DISCD (0x3<<20) //21:20 ++#define RG_USB20_DISCTH (0xf<<16) //19:16 ++#define RG_USB20_SQCAL_EN (0x1<<15) //15:15 ++#define RG_USB20_SQCAL (0xf<<8) //11:8 ++#define RG_USB20_SQTH (0xf<<0) //3:0 ++ ++//U3D_U2PHYACR3 ++#define RG_USB20_HSTX_DBIST (0xf<<28) //31:28 ++#define RG_USB20_HSTX_BIST_EN (0x1<<26) //26:26 ++#define RG_USB20_HSTX_I_EN_MODE (0x3<<24) //25:24 ++#define RG_USB20_HSRX_TMODE_EN (0x1<<23) //23:23 ++#define RG_USB20_HSRX_BIAS_EN_SEL (0x3<<20) //21:20 ++#define RG_USB20_USB11_TMODE_EN (0x1<<19) //19:19 ++#define RG_USB20_TMODE_FS_LS_TX_EN (0x1<<18) //18:18 ++#define RG_USB20_TMODE_FS_LS_RCV_EN (0x1<<17) //17:17 ++#define RG_USB20_TMODE_FS_LS_MODE (0x1<<16) //16:16 ++#define RG_USB20_HS_TERM_EN_MODE (0x3<<13) //14:13 ++#define RG_USB20_PUPD_BIST_EN (0x1<<12) //12:12 ++#define RG_USB20_EN_PU_DM (0x1<<11) //11:11 ++#define RG_USB20_EN_PD_DM (0x1<<10) //10:10 ++#define RG_USB20_EN_PU_DP (0x1<<9) //9:9 ++#define RG_USB20_EN_PD_DP (0x1<<8) //8:8 ++#define RG_USB20_PHY_REV (0xff<<0) //7:0 ++ ++//U3D_U2PHYACR4 ++#define RG_USB20_DP_100K_MODE (0x1<<18) //18:18 ++#define RG_USB20_DM_100K_EN (0x1<<17) //17:17 ++#define USB20_DP_100K_EN (0x1<<16) //16:16 ++#define USB20_GPIO_DM_I (0x1<<15) //15:15 ++#define USB20_GPIO_DP_I (0x1<<14) //14:14 ++#define USB20_GPIO_DM_OE (0x1<<13) //13:13 ++#define USB20_GPIO_DP_OE (0x1<<12) //12:12 ++#define RG_USB20_GPIO_CTL (0x1<<9) //9:9 ++#define USB20_GPIO_MODE (0x1<<8) //8:8 ++#define RG_USB20_TX_BIAS_EN (0x1<<5) //5:5 ++#define RG_USB20_TX_VCMPDN_EN (0x1<<4) //4:4 ++#define RG_USB20_HS_SQ_EN_MODE (0x3<<2) //3:2 ++#define RG_USB20_HS_RCV_EN_MODE (0x3<<0) //1:0 ++ ++//U3D_U2PHYAMON0 ++#define RGO_USB20_GPIO_DM_O (0x1<<1) //1:1 ++#define RGO_USB20_GPIO_DP_O (0x1<<0) //0:0 ++ ++//U3D_U2PHYDCR0 ++#define RG_USB20_CDR_TST (0x3<<30) //31:30 ++#define RG_USB20_GATED_ENB (0x1<<29) //29:29 ++#define RG_USB20_TESTMODE (0x3<<26) //27:26 ++#define RG_USB20_PLL_STABLE (0x1<<25) //25:25 ++#define RG_USB20_PLL_FORCE_ON (0x1<<24) //24:24 ++#define RG_USB20_PHYD_RESERVE (0xffff<<8) //23:8 ++#define RG_USB20_EBTHRLD (0x1<<7) //7:7 ++#define RG_USB20_EARLY_HSTX_I (0x1<<6) //6:6 ++#define RG_USB20_TX_TST (0x1<<5) //5:5 ++#define RG_USB20_NEGEDGE_ENB (0x1<<4) //4:4 ++#define RG_USB20_CDR_FILT (0xf<<0) //3:0 ++ ++//U3D_U2PHYDCR1 ++#define RG_USB20_PROBE_SEL (0xff<<24) //31:24 ++#define RG_USB20_DRVVBUS (0x1<<23) //23:23 ++#define RG_DEBUG_EN (0x1<<22) //22:22 ++#define RG_USB20_OTG_PROBE (0x3<<20) //21:20 ++#define RG_USB20_SW_PLLMODE (0x3<<18) //19:18 ++#define RG_USB20_BERTH (0x3<<16) //17:16 ++#define RG_USB20_LBMODE (0x3<<13) //14:13 ++#define RG_USB20_FORCE_TAP (0x1<<12) //12:12 ++#define RG_USB20_TAPSEL (0xfff<<0) //11:0 ++ ++//U3D_U2PHYDTM0 ++#define RG_UART_MODE (0x3<<30) //31:30 ++#define FORCE_UART_I (0x1<<29) //29:29 ++#define FORCE_UART_BIAS_EN (0x1<<28) //28:28 ++#define FORCE_UART_TX_OE (0x1<<27) //27:27 ++#define FORCE_UART_EN (0x1<<26) //26:26 ++#define FORCE_USB_CLKEN (0x1<<25) //25:25 ++#define FORCE_DRVVBUS (0x1<<24) //24:24 ++#define FORCE_DATAIN (0x1<<23) //23:23 ++#define FORCE_TXVALID (0x1<<22) //22:22 ++#define FORCE_DM_PULLDOWN (0x1<<21) //21:21 ++#define FORCE_DP_PULLDOWN (0x1<<20) //20:20 ++#define FORCE_XCVRSEL (0x1<<19) //19:19 ++#define FORCE_SUSPENDM (0x1<<18) //18:18 ++#define FORCE_TERMSEL (0x1<<17) //17:17 ++#define FORCE_OPMODE (0x1<<16) //16:16 ++#define UTMI_MUXSEL (0x1<<15) //15:15 ++#define RG_RESET (0x1<<14) //14:14 ++#define RG_DATAIN (0xf<<10) //13:10 ++#define RG_TXVALIDH (0x1<<9) //9:9 ++#define RG_TXVALID (0x1<<8) //8:8 ++#define RG_DMPULLDOWN (0x1<<7) //7:7 ++#define RG_DPPULLDOWN (0x1<<6) //6:6 ++#define RG_XCVRSEL (0x3<<4) //5:4 ++#define RG_SUSPENDM (0x1<<3) //3:3 ++#define RG_TERMSEL (0x1<<2) //2:2 ++#define RG_OPMODE (0x3<<0) //1:0 ++ ++//U3D_U2PHYDTM1 ++#define RG_USB20_PRBS7_EN (0x1<<31) //31:31 ++#define RG_USB20_PRBS7_BITCNT (0x3f<<24) //29:24 ++#define RG_USB20_CLK48M_EN (0x1<<23) //23:23 ++#define RG_USB20_CLK60M_EN (0x1<<22) //22:22 ++#define RG_UART_I (0x1<<19) //19:19 ++#define RG_UART_BIAS_EN (0x1<<18) //18:18 ++#define RG_UART_TX_OE (0x1<<17) //17:17 ++#define RG_UART_EN (0x1<<16) //16:16 ++#define FORCE_VBUSVALID (0x1<<13) //13:13 ++#define FORCE_SESSEND (0x1<<12) //12:12 ++#define FORCE_BVALID (0x1<<11) //11:11 ++#define FORCE_AVALID (0x1<<10) //10:10 ++#define FORCE_IDDIG (0x1<<9) //9:9 ++#define FORCE_IDPULLUP (0x1<<8) //8:8 ++#define RG_VBUSVALID (0x1<<5) //5:5 ++#define RG_SESSEND (0x1<<4) //4:4 ++#define RG_BVALID (0x1<<3) //3:3 ++#define RG_AVALID (0x1<<2) //2:2 ++#define RG_IDDIG (0x1<<1) //1:1 ++#define RG_IDPULLUP (0x1<<0) //0:0 ++ ++//U3D_U2PHYDMON0 ++#define RG_USB20_PRBS7_BERTH (0xff<<0) //7:0 ++ ++//U3D_U2PHYDMON1 ++#define USB20_UART_O (0x1<<31) //31:31 ++#define RGO_USB20_LB_PASS (0x1<<30) //30:30 ++#define RGO_USB20_LB_DONE (0x1<<29) //29:29 ++#define AD_USB20_BVALID (0x1<<28) //28:28 ++#define USB20_IDDIG (0x1<<27) //27:27 ++#define AD_USB20_VBUSVALID (0x1<<26) //26:26 ++#define AD_USB20_SESSEND (0x1<<25) //25:25 ++#define AD_USB20_AVALID (0x1<<24) //24:24 ++#define USB20_LINE_STATE (0x3<<22) //23:22 ++#define USB20_HST_DISCON (0x1<<21) //21:21 ++#define USB20_TX_READY (0x1<<20) //20:20 ++#define USB20_RX_ERROR (0x1<<19) //19:19 ++#define USB20_RX_ACTIVE (0x1<<18) //18:18 ++#define USB20_RX_VALIDH (0x1<<17) //17:17 ++#define USB20_RX_VALID (0x1<<16) //16:16 ++#define USB20_DATA_OUT (0xffff<<0) //15:0 ++ ++//U3D_U2PHYDMON2 ++#define RGO_TXVALID_CNT (0xff<<24) //31:24 ++#define RGO_RXACTIVE_CNT (0xff<<16) //23:16 ++#define RGO_USB20_LB_BERCNT (0xff<<8) //15:8 ++#define USB20_PROBE_OUT (0xff<<0) //7:0 ++ ++//U3D_U2PHYDMON3 ++#define RGO_USB20_PRBS7_ERRCNT (0xffff<<16) //31:16 ++#define RGO_USB20_PRBS7_DONE (0x1<<3) //3:3 ++#define RGO_USB20_PRBS7_LOCK (0x1<<2) //2:2 ++#define RGO_USB20_PRBS7_PASS (0x1<<1) //1:1 ++#define RGO_USB20_PRBS7_PASSTH (0x1<<0) //0:0 ++ ++//U3D_U2PHYBC12C ++#define RG_SIFSLV_CHGDT_DEGLCH_CNT (0xf<<28) //31:28 ++#define RG_SIFSLV_CHGDT_CTRL_CNT (0xf<<24) //27:24 ++#define RG_SIFSLV_CHGDT_FORCE_MODE (0x1<<16) //16:16 ++#define RG_CHGDT_ISRC_LEV (0x3<<14) //15:14 ++#define RG_CHGDT_VDATSRC (0x1<<13) //13:13 ++#define RG_CHGDT_BGVREF_SEL (0x7<<10) //12:10 ++#define RG_CHGDT_RDVREF_SEL (0x3<<8) //9:8 ++#define RG_CHGDT_ISRC_DP (0x1<<7) //7:7 ++#define RG_SIFSLV_CHGDT_OPOUT_DM (0x1<<6) //6:6 ++#define RG_CHGDT_VDAT_DM (0x1<<5) //5:5 ++#define RG_CHGDT_OPOUT_DP (0x1<<4) //4:4 ++#define RG_SIFSLV_CHGDT_VDAT_DP (0x1<<3) //3:3 ++#define RG_SIFSLV_CHGDT_COMP_EN (0x1<<2) //2:2 ++#define RG_SIFSLV_CHGDT_OPDRV_EN (0x1<<1) //1:1 ++#define RG_CHGDT_EN (0x1<<0) //0:0 ++ ++//U3D_U2PHYBC12C1 ++#define RG_CHGDT_REV (0xff<<0) //7:0 ++ ++//U3D_REGFCOM ++#define RG_PAGE (0xff<<24) //31:24 ++#define I2C_MODE (0x1<<16) //16:16 ++ ++ ++/* OFFSET */ ++ ++//U3D_U2PHYAC0 ++#define RG_USB20_USBPLL_DIVEN_OFST (28) ++#define RG_USB20_USBPLL_CKCTRL_OFST (26) ++#define RG_USB20_USBPLL_PREDIV_OFST (24) ++#define RG_USB20_USBPLL_FORCE_ON_OFST (23) ++#define RG_USB20_USBPLL_FBDIV_OFST (16) ++#define RG_USB20_REF_EN_OFST (15) ++#define RG_USB20_INTR_EN_OFST (14) ++#define RG_USB20_BG_TRIM_OFST (8) ++#define RG_USB20_BG_RBSEL_OFST (6) ++#define RG_USB20_BG_RASEL_OFST (4) ++#define RG_USB20_BGR_DIV_OFST (2) ++#define RG_SIFSLV_CHP_EN_OFST (1) ++#define RG_SIFSLV_BGR_EN_OFST (0) ++ ++//U3D_U2PHYAC1 ++#define RG_USB20_VRT_VREF_SEL_OFST (28) ++#define RG_USB20_TERM_VREF_SEL_OFST (24) ++#define RG_USB20_MPX_SEL_OFST (16) ++#define RG_USB20_MPX_OUT_SEL_OFST (12) ++#define RG_USB20_TX_PH_ROT_SEL_OFST (8) ++#define RG_USB20_USBPLL_ACCEN_OFST (3) ++#define RG_USB20_USBPLL_LF_OFST (2) ++#define RG_USB20_USBPLL_BR_OFST (1) ++#define RG_USB20_USBPLL_BP_OFST (0) ++ ++//U3D_U2PHYAC2 ++#define RG_SIFSLV_MAC_BANDGAP_EN_OFST (17) ++#define RG_SIFSLV_MAC_CHOPPER_EN_OFST (16) ++#define RG_USB20_CLKREF_REV_OFST (0) ++ ++//U3D_U2PHYACR0 ++#define RG_USB20_ICUSB_EN_OFST (24) ++#define RG_USB20_HSTX_SRCAL_EN_OFST (23) ++#define RG_USB20_HSTX_SRCTRL_OFST (16) ++#define RG_USB20_LS_CR_OFST (12) ++#define RG_USB20_FS_CR_OFST (8) ++#define RG_USB20_LS_SR_OFST (4) ++#define RG_USB20_FS_SR_OFST (0) ++ ++//U3D_U2PHYACR1 ++#define RG_USB20_INIT_SQ_EN_DG_OFST (28) ++#define RG_USB20_SQD_OFST (24) ++#define RG_USB20_HSTX_TMODE_SEL_OFST (20) ++#define RG_USB20_HSTX_TMODE_EN_OFST (19) ++#define RG_USB20_PHYD_MONEN_OFST (18) ++#define RG_USB20_INLPBK_EN_OFST (17) ++#define RG_USB20_CHIRP_EN_OFST (16) ++#define RG_USB20_DM_ABIST_SOURCE_EN_OFST (15) ++#define RG_USB20_DM_ABIST_SELE_OFST (8) ++#define RG_USB20_DP_ABIST_SOURCE_EN_OFST (7) ++#define RG_USB20_DP_ABIST_SELE_OFST (0) ++ ++//U3D_U2PHYACR2 ++#define RG_USB20_OTG_ABIST_SELE_OFST (29) ++#define RG_USB20_OTG_ABIST_EN_OFST (28) ++#define RG_USB20_OTG_VBUSCMP_EN_OFST (27) ++#define RG_USB20_OTG_VBUSTH_OFST (24) ++#define RG_USB20_DISC_FIT_EN_OFST (22) ++#define RG_USB20_DISCD_OFST (20) ++#define RG_USB20_DISCTH_OFST (16) ++#define RG_USB20_SQCAL_EN_OFST (15) ++#define RG_USB20_SQCAL_OFST (8) ++#define RG_USB20_SQTH_OFST (0) ++ ++//U3D_U2PHYACR3 ++#define RG_USB20_HSTX_DBIST_OFST (28) ++#define RG_USB20_HSTX_BIST_EN_OFST (26) ++#define RG_USB20_HSTX_I_EN_MODE_OFST (24) ++#define RG_USB20_HSRX_TMODE_EN_OFST (23) ++#define RG_USB20_HSRX_BIAS_EN_SEL_OFST (20) ++#define RG_USB20_USB11_TMODE_EN_OFST (19) ++#define RG_USB20_TMODE_FS_LS_TX_EN_OFST (18) ++#define RG_USB20_TMODE_FS_LS_RCV_EN_OFST (17) ++#define RG_USB20_TMODE_FS_LS_MODE_OFST (16) ++#define RG_USB20_HS_TERM_EN_MODE_OFST (13) ++#define RG_USB20_PUPD_BIST_EN_OFST (12) ++#define RG_USB20_EN_PU_DM_OFST (11) ++#define RG_USB20_EN_PD_DM_OFST (10) ++#define RG_USB20_EN_PU_DP_OFST (9) ++#define RG_USB20_EN_PD_DP_OFST (8) ++#define RG_USB20_PHY_REV_OFST (0) ++ ++//U3D_U2PHYACR4 ++#define RG_USB20_DP_100K_MODE_OFST (18) ++#define RG_USB20_DM_100K_EN_OFST (17) ++#define USB20_DP_100K_EN_OFST (16) ++#define USB20_GPIO_DM_I_OFST (15) ++#define USB20_GPIO_DP_I_OFST (14) ++#define USB20_GPIO_DM_OE_OFST (13) ++#define USB20_GPIO_DP_OE_OFST (12) ++#define RG_USB20_GPIO_CTL_OFST (9) ++#define USB20_GPIO_MODE_OFST (8) ++#define RG_USB20_TX_BIAS_EN_OFST (5) ++#define RG_USB20_TX_VCMPDN_EN_OFST (4) ++#define RG_USB20_HS_SQ_EN_MODE_OFST (2) ++#define RG_USB20_HS_RCV_EN_MODE_OFST (0) ++ ++//U3D_U2PHYAMON0 ++#define RGO_USB20_GPIO_DM_O_OFST (1) ++#define RGO_USB20_GPIO_DP_O_OFST (0) ++ ++//U3D_U2PHYDCR0 ++#define RG_USB20_CDR_TST_OFST (30) ++#define RG_USB20_GATED_ENB_OFST (29) ++#define RG_USB20_TESTMODE_OFST (26) ++#define RG_USB20_PLL_STABLE_OFST (25) ++#define RG_USB20_PLL_FORCE_ON_OFST (24) ++#define RG_USB20_PHYD_RESERVE_OFST (8) ++#define RG_USB20_EBTHRLD_OFST (7) ++#define RG_USB20_EARLY_HSTX_I_OFST (6) ++#define RG_USB20_TX_TST_OFST (5) ++#define RG_USB20_NEGEDGE_ENB_OFST (4) ++#define RG_USB20_CDR_FILT_OFST (0) ++ ++//U3D_U2PHYDCR1 ++#define RG_USB20_PROBE_SEL_OFST (24) ++#define RG_USB20_DRVVBUS_OFST (23) ++#define RG_DEBUG_EN_OFST (22) ++#define RG_USB20_OTG_PROBE_OFST (20) ++#define RG_USB20_SW_PLLMODE_OFST (18) ++#define RG_USB20_BERTH_OFST (16) ++#define RG_USB20_LBMODE_OFST (13) ++#define RG_USB20_FORCE_TAP_OFST (12) ++#define RG_USB20_TAPSEL_OFST (0) ++ ++//U3D_U2PHYDTM0 ++#define RG_UART_MODE_OFST (30) ++#define FORCE_UART_I_OFST (29) ++#define FORCE_UART_BIAS_EN_OFST (28) ++#define FORCE_UART_TX_OE_OFST (27) ++#define FORCE_UART_EN_OFST (26) ++#define FORCE_USB_CLKEN_OFST (25) ++#define FORCE_DRVVBUS_OFST (24) ++#define FORCE_DATAIN_OFST (23) ++#define FORCE_TXVALID_OFST (22) ++#define FORCE_DM_PULLDOWN_OFST (21) ++#define FORCE_DP_PULLDOWN_OFST (20) ++#define FORCE_XCVRSEL_OFST (19) ++#define FORCE_SUSPENDM_OFST (18) ++#define FORCE_TERMSEL_OFST (17) ++#define FORCE_OPMODE_OFST (16) ++#define UTMI_MUXSEL_OFST (15) ++#define RG_RESET_OFST (14) ++#define RG_DATAIN_OFST (10) ++#define RG_TXVALIDH_OFST (9) ++#define RG_TXVALID_OFST (8) ++#define RG_DMPULLDOWN_OFST (7) ++#define RG_DPPULLDOWN_OFST (6) ++#define RG_XCVRSEL_OFST (4) ++#define RG_SUSPENDM_OFST (3) ++#define RG_TERMSEL_OFST (2) ++#define RG_OPMODE_OFST (0) ++ ++//U3D_U2PHYDTM1 ++#define RG_USB20_PRBS7_EN_OFST (31) ++#define RG_USB20_PRBS7_BITCNT_OFST (24) ++#define RG_USB20_CLK48M_EN_OFST (23) ++#define RG_USB20_CLK60M_EN_OFST (22) ++#define RG_UART_I_OFST (19) ++#define RG_UART_BIAS_EN_OFST (18) ++#define RG_UART_TX_OE_OFST (17) ++#define RG_UART_EN_OFST (16) ++#define FORCE_VBUSVALID_OFST (13) ++#define FORCE_SESSEND_OFST (12) ++#define FORCE_BVALID_OFST (11) ++#define FORCE_AVALID_OFST (10) ++#define FORCE_IDDIG_OFST (9) ++#define FORCE_IDPULLUP_OFST (8) ++#define RG_VBUSVALID_OFST (5) ++#define RG_SESSEND_OFST (4) ++#define RG_BVALID_OFST (3) ++#define RG_AVALID_OFST (2) ++#define RG_IDDIG_OFST (1) ++#define RG_IDPULLUP_OFST (0) ++ ++//U3D_U2PHYDMON0 ++#define RG_USB20_PRBS7_BERTH_OFST (0) ++ ++//U3D_U2PHYDMON1 ++#define USB20_UART_O_OFST (31) ++#define RGO_USB20_LB_PASS_OFST (30) ++#define RGO_USB20_LB_DONE_OFST (29) ++#define AD_USB20_BVALID_OFST (28) ++#define USB20_IDDIG_OFST (27) ++#define AD_USB20_VBUSVALID_OFST (26) ++#define AD_USB20_SESSEND_OFST (25) ++#define AD_USB20_AVALID_OFST (24) ++#define USB20_LINE_STATE_OFST (22) ++#define USB20_HST_DISCON_OFST (21) ++#define USB20_TX_READY_OFST (20) ++#define USB20_RX_ERROR_OFST (19) ++#define USB20_RX_ACTIVE_OFST (18) ++#define USB20_RX_VALIDH_OFST (17) ++#define USB20_RX_VALID_OFST (16) ++#define USB20_DATA_OUT_OFST (0) ++ ++//U3D_U2PHYDMON2 ++#define RGO_TXVALID_CNT_OFST (24) ++#define RGO_RXACTIVE_CNT_OFST (16) ++#define RGO_USB20_LB_BERCNT_OFST (8) ++#define USB20_PROBE_OUT_OFST (0) ++ ++//U3D_U2PHYDMON3 ++#define RGO_USB20_PRBS7_ERRCNT_OFST (16) ++#define RGO_USB20_PRBS7_DONE_OFST (3) ++#define RGO_USB20_PRBS7_LOCK_OFST (2) ++#define RGO_USB20_PRBS7_PASS_OFST (1) ++#define RGO_USB20_PRBS7_PASSTH_OFST (0) ++ ++//U3D_U2PHYBC12C ++#define RG_SIFSLV_CHGDT_DEGLCH_CNT_OFST (28) ++#define RG_SIFSLV_CHGDT_CTRL_CNT_OFST (24) ++#define RG_SIFSLV_CHGDT_FORCE_MODE_OFST (16) ++#define RG_CHGDT_ISRC_LEV_OFST (14) ++#define RG_CHGDT_VDATSRC_OFST (13) ++#define RG_CHGDT_BGVREF_SEL_OFST (10) ++#define RG_CHGDT_RDVREF_SEL_OFST (8) ++#define RG_CHGDT_ISRC_DP_OFST (7) ++#define RG_SIFSLV_CHGDT_OPOUT_DM_OFST (6) ++#define RG_CHGDT_VDAT_DM_OFST (5) ++#define RG_CHGDT_OPOUT_DP_OFST (4) ++#define RG_SIFSLV_CHGDT_VDAT_DP_OFST (3) ++#define RG_SIFSLV_CHGDT_COMP_EN_OFST (2) ++#define RG_SIFSLV_CHGDT_OPDRV_EN_OFST (1) ++#define RG_CHGDT_EN_OFST (0) ++ ++//U3D_U2PHYBC12C1 ++#define RG_CHGDT_REV_OFST (0) ++ ++//U3D_REGFCOM ++#define RG_PAGE_OFST (24) ++#define I2C_MODE_OFST (16) ++ ++ ++/////////////////////////////////////////////////////////////////////////////// ++ ++struct u3phya_reg { ++ //0x0 ++ PHY_LE32 reg0; ++ PHY_LE32 reg1; ++ PHY_LE32 reg2; ++ PHY_LE32 reg3; ++ //0x10 ++ PHY_LE32 reg4; ++ PHY_LE32 reg5; ++ PHY_LE32 reg6; ++ PHY_LE32 reg7; ++ //0x20 ++ PHY_LE32 reg8; ++ PHY_LE32 reg9; ++ PHY_LE32 rega; ++ PHY_LE32 regb; ++ //0x30 ++ PHY_LE32 regc; ++ PHY_LE32 regd; ++ PHY_LE32 rege; ++}; ++ ++//U3D_reg0 ++#define RG_SSUSB_BGR_EN (0x1<<31) //31:31 ++#define RG_SSUSB_CHPEN (0x1<<30) //30:30 ++#define RG_SSUSB_BG_DIV (0x3<<28) //29:28 ++#define RG_SSUSB_INTR_EN (0x1<<26) //26:26 ++#define RG_SSUSB_MPX_OUT_SEL (0x3<<24) //25:24 ++#define RG_SSUSB_MPX_SEL (0xff<<16) //23:16 ++#define RG_SSUSB_REF_EN (0x1<<15) //15:15 ++#define RG_SSUSB_VRT_VREF_SEL (0xf<<11) //14:11 ++#define RG_SSUSB_BG_RASEL (0x3<<9) //10:9 ++#define RG_SSUSB_BG_RBSEL (0x3<<7) //8:7 ++#define RG_SSUSB_BG_MONEN (0x1<<6) //6:6 ++#define RG_PCIE_CLKDRV_OFFSET (0x3<<0) //1:0 ++ ++//U3D_reg1 ++#define RG_PCIE_CLKDRV_SLEW (0x3<<30) //31:30 ++#define RG_PCIE_CLKDRV_AMP (0x7<<27) //29:27 ++#define RG_SSUSB_XTAL_TST_A2DCK_EN (0x1<<26) //26:26 ++#define RG_SSUSB_XTAL_MON_EN (0x1<<25) //25:25 ++#define RG_SSUSB_XTAL_HYS (0x1<<24) //24:24 ++#define RG_SSUSB_XTAL_TOP_RESERVE (0xffff<<8) //23:8 ++#define RG_SSUSB_SYSPLL_RESERVE (0xf<<4) //7:4 ++#define RG_SSUSB_SYSPLL_FBSEL (0x3<<2) //3:2 ++#define RG_SSUSB_SYSPLL_PREDIV (0x3<<0) //1:0 ++ ++//U3D_reg2 ++#define RG_SSUSB_SYSPLL_LF (0x1<<31) //31:31 ++#define RG_SSUSB_SYSPLL_FBDIV (0x7f<<24) //30:24 ++#define RG_SSUSB_SYSPLL_POSDIV (0x3<<22) //23:22 ++#define RG_SSUSB_SYSPLL_VCO_DIV_SEL (0x1<<21) //21:21 ++#define RG_SSUSB_SYSPLL_BLP (0x1<<20) //20:20 ++#define RG_SSUSB_SYSPLL_BP (0x1<<19) //19:19 ++#define RG_SSUSB_SYSPLL_BR (0x1<<18) //18:18 ++#define RG_SSUSB_SYSPLL_BC (0x1<<17) //17:17 ++#define RG_SSUSB_SYSPLL_DIVEN (0x7<<14) //16:14 ++#define RG_SSUSB_SYSPLL_FPEN (0x1<<13) //13:13 ++#define RG_SSUSB_SYSPLL_MONCK_EN (0x1<<12) //12:12 ++#define RG_SSUSB_SYSPLL_MONVC_EN (0x1<<11) //11:11 ++#define RG_SSUSB_SYSPLL_MONREF_EN (0x1<<10) //10:10 ++#define RG_SSUSB_SYSPLL_VOD_EN (0x1<<9) //9:9 ++#define RG_SSUSB_SYSPLL_CK_SEL (0x1<<8) //8:8 ++ ++//U3D_reg3 ++#define RG_SSUSB_SYSPLL_TOP_RESERVE (0xffff<<16) //31:16 ++ ++//U3D_reg4 ++#define RG_SSUSB_SYSPLL_PCW_NCPO (0x7fffffff<<1) //31:1 ++ ++//U3D_reg5 ++#define RG_SSUSB_SYSPLL_DDS_PI_C (0x7<<29) //31:29 ++#define RG_SSUSB_SYSPLL_DDS_HF_EN (0x1<<28) //28:28 ++#define RG_SSUSB_SYSPLL_DDS_PREDIV2 (0x1<<27) //27:27 ++#define RG_SSUSB_SYSPLL_DDS_POSTDIV2 (0x1<<26) //26:26 ++#define RG_SSUSB_SYSPLL_DDS_PI_PL_EN (0x1<<25) //25:25 ++#define RG_SSUSB_SYSPLL_DDS_PI_RST_SEL (0x1<<24) //24:24 ++#define RG_SSUSB_SYSPLL_DDS_MONEN (0x1<<23) //23:23 ++#define RG_SSUSB_SYSPLL_DDS_LPF_EN (0x1<<22) //22:22 ++#define RG_SSUSB_SYSPLL_CLK_PH_INV (0x1<<21) //21:21 ++#define RG_SSUSB_SYSPLL_DDS_SEL_EXT (0x1<<20) //20:20 ++#define RG_SSUSB_SYSPLL_DDS_DMY (0xffff<<0) //15:0 ++ ++//U3D_reg6 ++#define RG_SSUSB_TX250MCK_INVB (0x1<<31) //31:31 ++#define RG_SSUSB_IDRV_ITAILOP_EN (0x1<<30) //30:30 ++#define RG_SSUSB_IDRV_CALIB (0x3f<<24) //29:24 ++#define RG_SSUSB_TX_R50_FON (0x1<<23) //23:23 ++#define RG_SSUSB_TX_SR (0x7<<20) //22:20 ++#define RG_SSUSB_TX_EIDLE_CM (0xf<<16) //19:16 ++#define RG_SSUSB_RXDET_RSEL (0x3<<14) //15:14 ++#define RG_SSUSB_RXDET_VTHSEL (0x3<<12) //13:12 ++#define RG_SSUSB_CKMON_EN (0x1<<11) //11:11 ++#define RG_SSUSB_CKMON_SEL (0x7<<8) //10:8 ++#define RG_SSUSB_TX_VLMON_EN (0x1<<7) //7:7 ++#define RG_SSUSB_TX_VLMON_SEL (0x1<<6) //6:6 ++#define RG_SSUSB_RXLBTX_EN (0x1<<5) //5:5 ++#define RG_SSUSB_TXLBRX_EN (0x1<<4) //4:4 ++ ++//U3D_reg7 ++#define RG_SSUSB_RESERVE (0xfffff<<12) //31:12 ++#define RG_SSUSB_PLL_CKCTRL (0x3<<10) //11:10 ++#define RG_SSUSB_PLL_POSDIV (0x3<<8) //9:8 ++#define RG_SSUSB_PLL_AUTOK_LOAD (0x1<<7) //7:7 ++#define RG_SSUSB_PLL_LOAD_RSTB (0x1<<6) //6:6 ++#define RG_SSUSB_PLL_EP_EN (0x1<<5) //5:5 ++#define RG_SSUSB_PLL_VOD_EN (0x1<<4) //4:4 ++#define RG_SSUSB_PLL_V11_EN (0x1<<3) //3:3 ++#define RG_SSUSB_PLL_MONREF_EN (0x1<<2) //2:2 ++#define RG_SSUSB_PLL_MONCK_EN (0x1<<1) //1:1 ++#define RG_SSUSB_PLL_MONVC_EN (0x1<<0) //0:0 ++ ++//U3D_reg8 ++#define RG_SSUSB_PLL_RESERVE (0xffff<<0) //15:0 ++ ++//U3D_reg9 ++#define RG_SSUSB_PLL_DDS_DMY (0xffff<<16) //31:16 ++#define RG_SSUSB_PLL_SSC_PRD (0xffff<<0) //15:0 ++ ++//U3D_regA ++#define RG_SSUSB_PLL_SSC_PHASE_INI (0x1<<31) //31:31 ++#define RG_SSUSB_PLL_SSC_TRI_EN (0x1<<30) //30:30 ++#define RG_SSUSB_PLL_CLK_PH_INV (0x1<<29) //29:29 ++#define RG_SSUSB_PLL_DDS_LPF_EN (0x1<<28) //28:28 ++#define RG_SSUSB_PLL_DDS_VADJ (0x7<<21) //23:21 ++#define RG_SSUSB_PLL_DDS_MONEN (0x1<<20) //20:20 ++#define RG_SSUSB_PLL_DDS_PS_VADJ (0x7<<17) //19:17 ++#define RG_SSUSB_PLL_DDS_SEL_EXT (0x1<<16) //16:16 ++#define RG_SSUSB_CDR_PD_DIV_BYPASS (0x1<<15) //15:15 ++#define RG_SSUSB_CDR_PD_DIV_SEL (0x1<<14) //14:14 ++#define RG_SSUSB_CDR_CPBIAS_SEL (0x1<<13) //13:13 ++#define RG_SSUSB_CDR_OSCDET_EN (0x1<<12) //12:12 ++#define RG_SSUSB_CDR_MONMUX (0x1<<11) //11:11 ++#define RG_SSUSB_CDR_CKCTRL (0x3<<9) //10:9 ++#define RG_SSUSB_CDR_ACCEN (0x1<<8) //8:8 ++#define RG_SSUSB_CDR_BYPASS (0x3<<6) //7:6 ++#define RG_SSUSB_CDR_PI_SLEW (0x3<<4) //5:4 ++#define RG_SSUSB_CDR_EPEN (0x1<<3) //3:3 ++#define RG_SSUSB_CDR_AUTOK_LOAD (0x1<<2) //2:2 ++#define RG_SSUSB_CDR_LOAD_RSTB (0x1<<1) //1:1 ++#define RG_SSUSB_CDR_MONEN (0x1<<0) //0:0 ++ ++//U3D_regB ++#define RG_SSUSB_CDR_MONEN_DIG (0x1<<31) //31:31 ++#define RG_SSUSB_CDR_REGOD (0x3<<29) //30:29 ++#define RG_SSUSB_RX_DAC_EN (0x1<<26) //26:26 ++#define RG_SSUSB_RX_DAC_PWD (0x1<<25) //25:25 ++#define RG_SSUSB_EQ_CURSEL (0x1<<24) //24:24 ++#define RG_SSUSB_RX_DAC_MUX (0x1f<<19) //23:19 ++#define RG_SSUSB_RX_R2T_EN (0x1<<18) //18:18 ++#define RG_SSUSB_RX_T2R_EN (0x1<<17) //17:17 ++#define RG_SSUSB_RX_50_LOWER (0x7<<14) //16:14 ++#define RG_SSUSB_RX_50_TAR (0x3<<12) //13:12 ++#define RG_SSUSB_RX_SW_CTRL (0xf<<7) //10:7 ++#define RG_PCIE_SIGDET_VTH (0x3<<5) //6:5 ++#define RG_PCIE_SIGDET_LPF (0x3<<3) //4:3 ++#define RG_SSUSB_LFPS_MON_EN (0x1<<2) //2:2 ++ ++//U3D_regC ++#define RG_SSUSB_RXAFE_DCMON_SEL (0xf<<28) //31:28 ++#define RG_SSUSB_CDR_RESERVE (0xff<<16) //23:16 ++#define RG_SSUSB_RXAFE_RESERVE (0xff<<8) //15:8 ++#define RG_PCIE_RX_RESERVE (0xff<<0) //7:0 ++ ++//U3D_redD ++#define RGS_SSUSB_CDR_NO_OSC (0x1<<8) //8:8 ++#define RGS_SSUSB_RX_DEBUG_RESERVE (0xff<<0) //7:0 ++ ++//U3D_regE ++#define RG_SSUSB_INT_BIAS_SEL (0x1<<4) //4:4 ++#define RG_SSUSB_EXT_BIAS_SEL (0x1<<3) //3:3 ++#define RG_SSUSB_RX_P1_ENTRY_PASS (0x1<<2) //2:2 ++#define RG_SSUSB_RX_PD_RST (0x1<<1) //1:1 ++#define RG_SSUSB_RX_PD_RST_PASS (0x1<<0) //0:0 ++ ++ ++/* OFFSET */ ++ ++//U3D_reg0 ++#define RG_SSUSB_BGR_EN_OFST (31) ++#define RG_SSUSB_CHPEN_OFST (30) ++#define RG_SSUSB_BG_DIV_OFST (28) ++#define RG_SSUSB_INTR_EN_OFST (26) ++#define RG_SSUSB_MPX_OUT_SEL_OFST (24) ++#define RG_SSUSB_MPX_SEL_OFST (16) ++#define RG_SSUSB_REF_EN_OFST (15) ++#define RG_SSUSB_VRT_VREF_SEL_OFST (11) ++#define RG_SSUSB_BG_RASEL_OFST (9) ++#define RG_SSUSB_BG_RBSEL_OFST (7) ++#define RG_SSUSB_BG_MONEN_OFST (6) ++#define RG_PCIE_CLKDRV_OFFSET_OFST (0) ++ ++//U3D_reg1 ++#define RG_PCIE_CLKDRV_SLEW_OFST (30) ++#define RG_PCIE_CLKDRV_AMP_OFST (27) ++#define RG_SSUSB_XTAL_TST_A2DCK_EN_OFST (26) ++#define RG_SSUSB_XTAL_MON_EN_OFST (25) ++#define RG_SSUSB_XTAL_HYS_OFST (24) ++#define RG_SSUSB_XTAL_TOP_RESERVE_OFST (8) ++#define RG_SSUSB_SYSPLL_RESERVE_OFST (4) ++#define RG_SSUSB_SYSPLL_FBSEL_OFST (2) ++#define RG_SSUSB_SYSPLL_PREDIV_OFST (0) ++ ++//U3D_reg2 ++#define RG_SSUSB_SYSPLL_LF_OFST (31) ++#define RG_SSUSB_SYSPLL_FBDIV_OFST (24) ++#define RG_SSUSB_SYSPLL_POSDIV_OFST (22) ++#define RG_SSUSB_SYSPLL_VCO_DIV_SEL_OFST (21) ++#define RG_SSUSB_SYSPLL_BLP_OFST (20) ++#define RG_SSUSB_SYSPLL_BP_OFST (19) ++#define RG_SSUSB_SYSPLL_BR_OFST (18) ++#define RG_SSUSB_SYSPLL_BC_OFST (17) ++#define RG_SSUSB_SYSPLL_DIVEN_OFST (14) ++#define RG_SSUSB_SYSPLL_FPEN_OFST (13) ++#define RG_SSUSB_SYSPLL_MONCK_EN_OFST (12) ++#define RG_SSUSB_SYSPLL_MONVC_EN_OFST (11) ++#define RG_SSUSB_SYSPLL_MONREF_EN_OFST (10) ++#define RG_SSUSB_SYSPLL_VOD_EN_OFST (9) ++#define RG_SSUSB_SYSPLL_CK_SEL_OFST (8) ++ ++//U3D_reg3 ++#define RG_SSUSB_SYSPLL_TOP_RESERVE_OFST (16) ++ ++//U3D_reg4 ++#define RG_SSUSB_SYSPLL_PCW_NCPO_OFST (1) ++ ++//U3D_reg5 ++#define RG_SSUSB_SYSPLL_DDS_PI_C_OFST (29) ++#define RG_SSUSB_SYSPLL_DDS_HF_EN_OFST (28) ++#define RG_SSUSB_SYSPLL_DDS_PREDIV2_OFST (27) ++#define RG_SSUSB_SYSPLL_DDS_POSTDIV2_OFST (26) ++#define RG_SSUSB_SYSPLL_DDS_PI_PL_EN_OFST (25) ++#define RG_SSUSB_SYSPLL_DDS_PI_RST_SEL_OFST (24) ++#define RG_SSUSB_SYSPLL_DDS_MONEN_OFST (23) ++#define RG_SSUSB_SYSPLL_DDS_LPF_EN_OFST (22) ++#define RG_SSUSB_SYSPLL_CLK_PH_INV_OFST (21) ++#define RG_SSUSB_SYSPLL_DDS_SEL_EXT_OFST (20) ++#define RG_SSUSB_SYSPLL_DDS_DMY_OFST (0) ++ ++//U3D_reg6 ++#define RG_SSUSB_TX250MCK_INVB_OFST (31) ++#define RG_SSUSB_IDRV_ITAILOP_EN_OFST (30) ++#define RG_SSUSB_IDRV_CALIB_OFST (24) ++#define RG_SSUSB_TX_R50_FON_OFST (23) ++#define RG_SSUSB_TX_SR_OFST (20) ++#define RG_SSUSB_TX_EIDLE_CM_OFST (16) ++#define RG_SSUSB_RXDET_RSEL_OFST (14) ++#define RG_SSUSB_RXDET_VTHSEL_OFST (12) ++#define RG_SSUSB_CKMON_EN_OFST (11) ++#define RG_SSUSB_CKMON_SEL_OFST (8) ++#define RG_SSUSB_TX_VLMON_EN_OFST (7) ++#define RG_SSUSB_TX_VLMON_SEL_OFST (6) ++#define RG_SSUSB_RXLBTX_EN_OFST (5) ++#define RG_SSUSB_TXLBRX_EN_OFST (4) ++ ++//U3D_reg7 ++#define RG_SSUSB_RESERVE_OFST (12) ++#define RG_SSUSB_PLL_CKCTRL_OFST (10) ++#define RG_SSUSB_PLL_POSDIV_OFST (8) ++#define RG_SSUSB_PLL_AUTOK_LOAD_OFST (7) ++#define RG_SSUSB_PLL_LOAD_RSTB_OFST (6) ++#define RG_SSUSB_PLL_EP_EN_OFST (5) ++#define RG_SSUSB_PLL_VOD_EN_OFST (4) ++#define RG_SSUSB_PLL_V11_EN_OFST (3) ++#define RG_SSUSB_PLL_MONREF_EN_OFST (2) ++#define RG_SSUSB_PLL_MONCK_EN_OFST (1) ++#define RG_SSUSB_PLL_MONVC_EN_OFST (0) ++ ++//U3D_reg8 ++#define RG_SSUSB_PLL_RESERVE_OFST (0) ++ ++//U3D_reg9 ++#define RG_SSUSB_PLL_DDS_DMY_OFST (16) ++#define RG_SSUSB_PLL_SSC_PRD_OFST (0) ++ ++//U3D_regA ++#define RG_SSUSB_PLL_SSC_PHASE_INI_OFST (31) ++#define RG_SSUSB_PLL_SSC_TRI_EN_OFST (30) ++#define RG_SSUSB_PLL_CLK_PH_INV_OFST (29) ++#define RG_SSUSB_PLL_DDS_LPF_EN_OFST (28) ++#define RG_SSUSB_PLL_DDS_VADJ_OFST (21) ++#define RG_SSUSB_PLL_DDS_MONEN_OFST (20) ++#define RG_SSUSB_PLL_DDS_PS_VADJ_OFST (17) ++#define RG_SSUSB_PLL_DDS_SEL_EXT_OFST (16) ++#define RG_SSUSB_CDR_PD_DIV_BYPASS_OFST (15) ++#define RG_SSUSB_CDR_PD_DIV_SEL_OFST (14) ++#define RG_SSUSB_CDR_CPBIAS_SEL_OFST (13) ++#define RG_SSUSB_CDR_OSCDET_EN_OFST (12) ++#define RG_SSUSB_CDR_MONMUX_OFST (11) ++#define RG_SSUSB_CDR_CKCTRL_OFST (9) ++#define RG_SSUSB_CDR_ACCEN_OFST (8) ++#define RG_SSUSB_CDR_BYPASS_OFST (6) ++#define RG_SSUSB_CDR_PI_SLEW_OFST (4) ++#define RG_SSUSB_CDR_EPEN_OFST (3) ++#define RG_SSUSB_CDR_AUTOK_LOAD_OFST (2) ++#define RG_SSUSB_CDR_LOAD_RSTB_OFST (1) ++#define RG_SSUSB_CDR_MONEN_OFST (0) ++ ++//U3D_regB ++#define RG_SSUSB_CDR_MONEN_DIG_OFST (31) ++#define RG_SSUSB_CDR_REGOD_OFST (29) ++#define RG_SSUSB_RX_DAC_EN_OFST (26) ++#define RG_SSUSB_RX_DAC_PWD_OFST (25) ++#define RG_SSUSB_EQ_CURSEL_OFST (24) ++#define RG_SSUSB_RX_DAC_MUX_OFST (19) ++#define RG_SSUSB_RX_R2T_EN_OFST (18) ++#define RG_SSUSB_RX_T2R_EN_OFST (17) ++#define RG_SSUSB_RX_50_LOWER_OFST (14) ++#define RG_SSUSB_RX_50_TAR_OFST (12) ++#define RG_SSUSB_RX_SW_CTRL_OFST (7) ++#define RG_PCIE_SIGDET_VTH_OFST (5) ++#define RG_PCIE_SIGDET_LPF_OFST (3) ++#define RG_SSUSB_LFPS_MON_EN_OFST (2) ++ ++//U3D_regC ++#define RG_SSUSB_RXAFE_DCMON_SEL_OFST (28) ++#define RG_SSUSB_CDR_RESERVE_OFST (16) ++#define RG_SSUSB_RXAFE_RESERVE_OFST (8) ++#define RG_PCIE_RX_RESERVE_OFST (0) ++ ++//U3D_redD ++#define RGS_SSUSB_CDR_NO_OSC_OFST (8) ++#define RGS_SSUSB_RX_DEBUG_RESERVE_OFST (0) ++ ++//U3D_regE ++#define RG_SSUSB_INT_BIAS_SEL_OFST (4) ++#define RG_SSUSB_EXT_BIAS_SEL_OFST (3) ++#define RG_SSUSB_RX_P1_ENTRY_PASS_OFST (2) ++#define RG_SSUSB_RX_PD_RST_OFST (1) ++#define RG_SSUSB_RX_PD_RST_PASS_OFST (0) ++ ++/////////////////////////////////////////////////////////////////////////////// ++ ++struct u3phya_da_reg { ++ //0x0 ++ PHY_LE32 reg0; ++ PHY_LE32 reg1; ++ PHY_LE32 reg4; ++ PHY_LE32 reg5; ++ //0x10 ++ PHY_LE32 reg6; ++ PHY_LE32 reg7; ++ PHY_LE32 reg8; ++ PHY_LE32 reg9; ++ //0x20 ++ PHY_LE32 reg10; ++ PHY_LE32 reg12; ++ PHY_LE32 reg13; ++ PHY_LE32 reg14; ++ //0x30 ++ PHY_LE32 reg15; ++ PHY_LE32 reg16; ++ PHY_LE32 reg19; ++ PHY_LE32 reg20; ++ //0x40 ++ PHY_LE32 reg21; ++ PHY_LE32 reg23; ++ PHY_LE32 reg25; ++ PHY_LE32 reg26; ++ //0x50 ++ PHY_LE32 reg28; ++ PHY_LE32 reg29; ++ PHY_LE32 reg30; ++ PHY_LE32 reg31; ++ //0x60 ++ PHY_LE32 reg32; ++ PHY_LE32 reg33; ++}; ++ ++//U3D_reg0 ++#define RG_PCIE_SPEED_PE2D (0x1<<24) //24:24 ++#define RG_PCIE_SPEED_PE2H (0x1<<23) //23:23 ++#define RG_PCIE_SPEED_PE1D (0x1<<22) //22:22 ++#define RG_PCIE_SPEED_PE1H (0x1<<21) //21:21 ++#define RG_PCIE_SPEED_U3 (0x1<<20) //20:20 ++#define RG_SSUSB_XTAL_EXT_EN_PE2D (0x3<<18) //19:18 ++#define RG_SSUSB_XTAL_EXT_EN_PE2H (0x3<<16) //17:16 ++#define RG_SSUSB_XTAL_EXT_EN_PE1D (0x3<<14) //15:14 ++#define RG_SSUSB_XTAL_EXT_EN_PE1H (0x3<<12) //13:12 ++#define RG_SSUSB_XTAL_EXT_EN_U3 (0x3<<10) //11:10 ++#define RG_SSUSB_CDR_REFCK_SEL_PE2D (0x3<<8) //9:8 ++#define RG_SSUSB_CDR_REFCK_SEL_PE2H (0x3<<6) //7:6 ++#define RG_SSUSB_CDR_REFCK_SEL_PE1D (0x3<<4) //5:4 ++#define RG_SSUSB_CDR_REFCK_SEL_PE1H (0x3<<2) //3:2 ++#define RG_SSUSB_CDR_REFCK_SEL_U3 (0x3<<0) //1:0 ++ ++//U3D_reg1 ++#define RG_USB20_REFCK_SEL_PE2D (0x1<<30) //30:30 ++#define RG_USB20_REFCK_SEL_PE2H (0x1<<29) //29:29 ++#define RG_USB20_REFCK_SEL_PE1D (0x1<<28) //28:28 ++#define RG_USB20_REFCK_SEL_PE1H (0x1<<27) //27:27 ++#define RG_USB20_REFCK_SEL_U3 (0x1<<26) //26:26 ++#define RG_PCIE_REFCK_DIV4_PE2D (0x1<<25) //25:25 ++#define RG_PCIE_REFCK_DIV4_PE2H (0x1<<24) //24:24 ++#define RG_PCIE_REFCK_DIV4_PE1D (0x1<<18) //18:18 ++#define RG_PCIE_REFCK_DIV4_PE1H (0x1<<17) //17:17 ++#define RG_PCIE_REFCK_DIV4_U3 (0x1<<16) //16:16 ++#define RG_PCIE_MODE_PE2D (0x1<<8) //8:8 ++#define RG_PCIE_MODE_PE2H (0x1<<3) //3:3 ++#define RG_PCIE_MODE_PE1D (0x1<<2) //2:2 ++#define RG_PCIE_MODE_PE1H (0x1<<1) //1:1 ++#define RG_PCIE_MODE_U3 (0x1<<0) //0:0 ++ ++//U3D_reg4 ++#define RG_SSUSB_PLL_DIVEN_PE2D (0x7<<22) //24:22 ++#define RG_SSUSB_PLL_DIVEN_PE2H (0x7<<19) //21:19 ++#define RG_SSUSB_PLL_DIVEN_PE1D (0x7<<16) //18:16 ++#define RG_SSUSB_PLL_DIVEN_PE1H (0x7<<13) //15:13 ++#define RG_SSUSB_PLL_DIVEN_U3 (0x7<<10) //12:10 ++#define RG_SSUSB_PLL_BC_PE2D (0x3<<8) //9:8 ++#define RG_SSUSB_PLL_BC_PE2H (0x3<<6) //7:6 ++#define RG_SSUSB_PLL_BC_PE1D (0x3<<4) //5:4 ++#define RG_SSUSB_PLL_BC_PE1H (0x3<<2) //3:2 ++#define RG_SSUSB_PLL_BC_U3 (0x3<<0) //1:0 ++ ++//U3D_reg5 ++#define RG_SSUSB_PLL_BR_PE2D (0x7<<27) //29:27 ++#define RG_SSUSB_PLL_BR_PE2H (0x7<<24) //26:24 ++#define RG_SSUSB_PLL_BR_PE1D (0x7<<21) //23:21 ++#define RG_SSUSB_PLL_BR_PE1H (0x7<<18) //20:18 ++#define RG_SSUSB_PLL_BR_U3 (0x7<<15) //17:15 ++#define RG_SSUSB_PLL_IC_PE2D (0x7<<12) //14:12 ++#define RG_SSUSB_PLL_IC_PE2H (0x7<<9) //11:9 ++#define RG_SSUSB_PLL_IC_PE1D (0x7<<6) //8:6 ++#define RG_SSUSB_PLL_IC_PE1H (0x7<<3) //5:3 ++#define RG_SSUSB_PLL_IC_U3 (0x7<<0) //2:0 ++ ++//U3D_reg6 ++#define RG_SSUSB_PLL_IR_PE2D (0xf<<24) //27:24 ++#define RG_SSUSB_PLL_IR_PE2H (0xf<<16) //19:16 ++#define RG_SSUSB_PLL_IR_PE1D (0xf<<8) //11:8 ++#define RG_SSUSB_PLL_IR_PE1H (0xf<<4) //7:4 ++#define RG_SSUSB_PLL_IR_U3 (0xf<<0) //3:0 ++ ++//U3D_reg7 ++#define RG_SSUSB_PLL_BP_PE2D (0xf<<24) //27:24 ++#define RG_SSUSB_PLL_BP_PE2H (0xf<<16) //19:16 ++#define RG_SSUSB_PLL_BP_PE1D (0xf<<8) //11:8 ++#define RG_SSUSB_PLL_BP_PE1H (0xf<<4) //7:4 ++#define RG_SSUSB_PLL_BP_U3 (0xf<<0) //3:0 ++ ++//U3D_reg8 ++#define RG_SSUSB_PLL_FBKSEL_PE2D (0x3<<24) //25:24 ++#define RG_SSUSB_PLL_FBKSEL_PE2H (0x3<<16) //17:16 ++#define RG_SSUSB_PLL_FBKSEL_PE1D (0x3<<8) //9:8 ++#define RG_SSUSB_PLL_FBKSEL_PE1H (0x3<<2) //3:2 ++#define RG_SSUSB_PLL_FBKSEL_U3 (0x3<<0) //1:0 ++ ++//U3D_reg9 ++#define RG_SSUSB_PLL_FBKDIV_PE2H (0x7f<<24) //30:24 ++#define RG_SSUSB_PLL_FBKDIV_PE1D (0x7f<<16) //22:16 ++#define RG_SSUSB_PLL_FBKDIV_PE1H (0x7f<<8) //14:8 ++#define RG_SSUSB_PLL_FBKDIV_U3 (0x7f<<0) //6:0 ++ ++//U3D_reg10 ++#define RG_SSUSB_PLL_PREDIV_PE2D (0x3<<26) //27:26 ++#define RG_SSUSB_PLL_PREDIV_PE2H (0x3<<24) //25:24 ++#define RG_SSUSB_PLL_PREDIV_PE1D (0x3<<18) //19:18 ++#define RG_SSUSB_PLL_PREDIV_PE1H (0x3<<16) //17:16 ++#define RG_SSUSB_PLL_PREDIV_U3 (0x3<<8) //9:8 ++#define RG_SSUSB_PLL_FBKDIV_PE2D (0x7f<<0) //6:0 ++ ++//U3D_reg12 ++#define RG_SSUSB_PLL_PCW_NCPO_U3 (0x7fffffff<<0) //30:0 ++ ++//U3D_reg13 ++#define RG_SSUSB_PLL_PCW_NCPO_PE1H (0x7fffffff<<0) //30:0 ++ ++//U3D_reg14 ++#define RG_SSUSB_PLL_PCW_NCPO_PE1D (0x7fffffff<<0) //30:0 ++ ++//U3D_reg15 ++#define RG_SSUSB_PLL_PCW_NCPO_PE2H (0x7fffffff<<0) //30:0 ++ ++//U3D_reg16 ++#define RG_SSUSB_PLL_PCW_NCPO_PE2D (0x7fffffff<<0) //30:0 ++ ++//U3D_reg19 ++#define RG_SSUSB_PLL_SSC_DELTA1_PE1H (0xffff<<16) //31:16 ++#define RG_SSUSB_PLL_SSC_DELTA1_U3 (0xffff<<0) //15:0 ++ ++//U3D_reg20 ++#define RG_SSUSB_PLL_SSC_DELTA1_PE2H (0xffff<<16) //31:16 ++#define RG_SSUSB_PLL_SSC_DELTA1_PE1D (0xffff<<0) //15:0 ++ ++//U3D_reg21 ++#define RG_SSUSB_PLL_SSC_DELTA_U3 (0xffff<<16) //31:16 ++#define RG_SSUSB_PLL_SSC_DELTA1_PE2D (0xffff<<0) //15:0 ++ ++//U3D_reg23 ++#define RG_SSUSB_PLL_SSC_DELTA_PE1D (0xffff<<16) //31:16 ++#define RG_SSUSB_PLL_SSC_DELTA_PE1H (0xffff<<0) //15:0 ++ ++//U3D_reg25 ++#define RG_SSUSB_PLL_SSC_DELTA_PE2D (0xffff<<16) //31:16 ++#define RG_SSUSB_PLL_SSC_DELTA_PE2H (0xffff<<0) //15:0 ++ ++//U3D_reg26 ++#define RG_SSUSB_PLL_REFCKDIV_PE2D (0x1<<25) //25:25 ++#define RG_SSUSB_PLL_REFCKDIV_PE2H (0x1<<24) //24:24 ++#define RG_SSUSB_PLL_REFCKDIV_PE1D (0x1<<16) //16:16 ++#define RG_SSUSB_PLL_REFCKDIV_PE1H (0x1<<8) //8:8 ++#define RG_SSUSB_PLL_REFCKDIV_U3 (0x1<<0) //0:0 ++ ++//U3D_reg28 ++#define RG_SSUSB_CDR_BPA_PE2D (0x3<<24) //25:24 ++#define RG_SSUSB_CDR_BPA_PE2H (0x3<<16) //17:16 ++#define RG_SSUSB_CDR_BPA_PE1D (0x3<<10) //11:10 ++#define RG_SSUSB_CDR_BPA_PE1H (0x3<<8) //9:8 ++#define RG_SSUSB_CDR_BPA_U3 (0x3<<0) //1:0 ++ ++//U3D_reg29 ++#define RG_SSUSB_CDR_BPB_PE2D (0x7<<24) //26:24 ++#define RG_SSUSB_CDR_BPB_PE2H (0x7<<16) //18:16 ++#define RG_SSUSB_CDR_BPB_PE1D (0x7<<6) //8:6 ++#define RG_SSUSB_CDR_BPB_PE1H (0x7<<3) //5:3 ++#define RG_SSUSB_CDR_BPB_U3 (0x7<<0) //2:0 ++ ++//U3D_reg30 ++#define RG_SSUSB_CDR_BR_PE2D (0x7<<24) //26:24 ++#define RG_SSUSB_CDR_BR_PE2H (0x7<<16) //18:16 ++#define RG_SSUSB_CDR_BR_PE1D (0x7<<6) //8:6 ++#define RG_SSUSB_CDR_BR_PE1H (0x7<<3) //5:3 ++#define RG_SSUSB_CDR_BR_U3 (0x7<<0) //2:0 ++ ++//U3D_reg31 ++#define RG_SSUSB_CDR_FBDIV_PE2H (0x7f<<24) //30:24 ++#define RG_SSUSB_CDR_FBDIV_PE1D (0x7f<<16) //22:16 ++#define RG_SSUSB_CDR_FBDIV_PE1H (0x7f<<8) //14:8 ++#define RG_SSUSB_CDR_FBDIV_U3 (0x7f<<0) //6:0 ++ ++//U3D_reg32 ++#define RG_SSUSB_EQ_RSTEP1_PE2D (0x3<<30) //31:30 ++#define RG_SSUSB_EQ_RSTEP1_PE2H (0x3<<28) //29:28 ++#define RG_SSUSB_EQ_RSTEP1_PE1D (0x3<<26) //27:26 ++#define RG_SSUSB_EQ_RSTEP1_PE1H (0x3<<24) //25:24 ++#define RG_SSUSB_EQ_RSTEP1_U3 (0x3<<22) //23:22 ++#define RG_SSUSB_LFPS_DEGLITCH_PE2D (0x3<<20) //21:20 ++#define RG_SSUSB_LFPS_DEGLITCH_PE2H (0x3<<18) //19:18 ++#define RG_SSUSB_LFPS_DEGLITCH_PE1D (0x3<<16) //17:16 ++#define RG_SSUSB_LFPS_DEGLITCH_PE1H (0x3<<14) //15:14 ++#define RG_SSUSB_LFPS_DEGLITCH_U3 (0x3<<12) //13:12 ++#define RG_SSUSB_CDR_KVSEL_PE2D (0x1<<11) //11:11 ++#define RG_SSUSB_CDR_KVSEL_PE2H (0x1<<10) //10:10 ++#define RG_SSUSB_CDR_KVSEL_PE1D (0x1<<9) //9:9 ++#define RG_SSUSB_CDR_KVSEL_PE1H (0x1<<8) //8:8 ++#define RG_SSUSB_CDR_KVSEL_U3 (0x1<<7) //7:7 ++#define RG_SSUSB_CDR_FBDIV_PE2D (0x7f<<0) //6:0 ++ ++//U3D_reg33 ++#define RG_SSUSB_RX_CMPWD_PE2D (0x1<<26) //26:26 ++#define RG_SSUSB_RX_CMPWD_PE2H (0x1<<25) //25:25 ++#define RG_SSUSB_RX_CMPWD_PE1D (0x1<<24) //24:24 ++#define RG_SSUSB_RX_CMPWD_PE1H (0x1<<23) //23:23 ++#define RG_SSUSB_RX_CMPWD_U3 (0x1<<16) //16:16 ++#define RG_SSUSB_EQ_RSTEP2_PE2D (0x3<<8) //9:8 ++#define RG_SSUSB_EQ_RSTEP2_PE2H (0x3<<6) //7:6 ++#define RG_SSUSB_EQ_RSTEP2_PE1D (0x3<<4) //5:4 ++#define RG_SSUSB_EQ_RSTEP2_PE1H (0x3<<2) //3:2 ++#define RG_SSUSB_EQ_RSTEP2_U3 (0x3<<0) //1:0 ++ ++ ++/* OFFSET */ ++ ++//U3D_reg0 ++#define RG_PCIE_SPEED_PE2D_OFST (24) ++#define RG_PCIE_SPEED_PE2H_OFST (23) ++#define RG_PCIE_SPEED_PE1D_OFST (22) ++#define RG_PCIE_SPEED_PE1H_OFST (21) ++#define RG_PCIE_SPEED_U3_OFST (20) ++#define RG_SSUSB_XTAL_EXT_EN_PE2D_OFST (18) ++#define RG_SSUSB_XTAL_EXT_EN_PE2H_OFST (16) ++#define RG_SSUSB_XTAL_EXT_EN_PE1D_OFST (14) ++#define RG_SSUSB_XTAL_EXT_EN_PE1H_OFST (12) ++#define RG_SSUSB_XTAL_EXT_EN_U3_OFST (10) ++#define RG_SSUSB_CDR_REFCK_SEL_PE2D_OFST (8) ++#define RG_SSUSB_CDR_REFCK_SEL_PE2H_OFST (6) ++#define RG_SSUSB_CDR_REFCK_SEL_PE1D_OFST (4) ++#define RG_SSUSB_CDR_REFCK_SEL_PE1H_OFST (2) ++#define RG_SSUSB_CDR_REFCK_SEL_U3_OFST (0) ++ ++//U3D_reg1 ++#define RG_USB20_REFCK_SEL_PE2D_OFST (30) ++#define RG_USB20_REFCK_SEL_PE2H_OFST (29) ++#define RG_USB20_REFCK_SEL_PE1D_OFST (28) ++#define RG_USB20_REFCK_SEL_PE1H_OFST (27) ++#define RG_USB20_REFCK_SEL_U3_OFST (26) ++#define RG_PCIE_REFCK_DIV4_PE2D_OFST (25) ++#define RG_PCIE_REFCK_DIV4_PE2H_OFST (24) ++#define RG_PCIE_REFCK_DIV4_PE1D_OFST (18) ++#define RG_PCIE_REFCK_DIV4_PE1H_OFST (17) ++#define RG_PCIE_REFCK_DIV4_U3_OFST (16) ++#define RG_PCIE_MODE_PE2D_OFST (8) ++#define RG_PCIE_MODE_PE2H_OFST (3) ++#define RG_PCIE_MODE_PE1D_OFST (2) ++#define RG_PCIE_MODE_PE1H_OFST (1) ++#define RG_PCIE_MODE_U3_OFST (0) ++ ++//U3D_reg4 ++#define RG_SSUSB_PLL_DIVEN_PE2D_OFST (22) ++#define RG_SSUSB_PLL_DIVEN_PE2H_OFST (19) ++#define RG_SSUSB_PLL_DIVEN_PE1D_OFST (16) ++#define RG_SSUSB_PLL_DIVEN_PE1H_OFST (13) ++#define RG_SSUSB_PLL_DIVEN_U3_OFST (10) ++#define RG_SSUSB_PLL_BC_PE2D_OFST (8) ++#define RG_SSUSB_PLL_BC_PE2H_OFST (6) ++#define RG_SSUSB_PLL_BC_PE1D_OFST (4) ++#define RG_SSUSB_PLL_BC_PE1H_OFST (2) ++#define RG_SSUSB_PLL_BC_U3_OFST (0) ++ ++//U3D_reg5 ++#define RG_SSUSB_PLL_BR_PE2D_OFST (27) ++#define RG_SSUSB_PLL_BR_PE2H_OFST (24) ++#define RG_SSUSB_PLL_BR_PE1D_OFST (21) ++#define RG_SSUSB_PLL_BR_PE1H_OFST (18) ++#define RG_SSUSB_PLL_BR_U3_OFST (15) ++#define RG_SSUSB_PLL_IC_PE2D_OFST (12) ++#define RG_SSUSB_PLL_IC_PE2H_OFST (9) ++#define RG_SSUSB_PLL_IC_PE1D_OFST (6) ++#define RG_SSUSB_PLL_IC_PE1H_OFST (3) ++#define RG_SSUSB_PLL_IC_U3_OFST (0) ++ ++//U3D_reg6 ++#define RG_SSUSB_PLL_IR_PE2D_OFST (24) ++#define RG_SSUSB_PLL_IR_PE2H_OFST (16) ++#define RG_SSUSB_PLL_IR_PE1D_OFST (8) ++#define RG_SSUSB_PLL_IR_PE1H_OFST (4) ++#define RG_SSUSB_PLL_IR_U3_OFST (0) ++ ++//U3D_reg7 ++#define RG_SSUSB_PLL_BP_PE2D_OFST (24) ++#define RG_SSUSB_PLL_BP_PE2H_OFST (16) ++#define RG_SSUSB_PLL_BP_PE1D_OFST (8) ++#define RG_SSUSB_PLL_BP_PE1H_OFST (4) ++#define RG_SSUSB_PLL_BP_U3_OFST (0) ++ ++//U3D_reg8 ++#define RG_SSUSB_PLL_FBKSEL_PE2D_OFST (24) ++#define RG_SSUSB_PLL_FBKSEL_PE2H_OFST (16) ++#define RG_SSUSB_PLL_FBKSEL_PE1D_OFST (8) ++#define RG_SSUSB_PLL_FBKSEL_PE1H_OFST (2) ++#define RG_SSUSB_PLL_FBKSEL_U3_OFST (0) ++ ++//U3D_reg9 ++#define RG_SSUSB_PLL_FBKDIV_PE2H_OFST (24) ++#define RG_SSUSB_PLL_FBKDIV_PE1D_OFST (16) ++#define RG_SSUSB_PLL_FBKDIV_PE1H_OFST (8) ++#define RG_SSUSB_PLL_FBKDIV_U3_OFST (0) ++ ++//U3D_reg10 ++#define RG_SSUSB_PLL_PREDIV_PE2D_OFST (26) ++#define RG_SSUSB_PLL_PREDIV_PE2H_OFST (24) ++#define RG_SSUSB_PLL_PREDIV_PE1D_OFST (18) ++#define RG_SSUSB_PLL_PREDIV_PE1H_OFST (16) ++#define RG_SSUSB_PLL_PREDIV_U3_OFST (8) ++#define RG_SSUSB_PLL_FBKDIV_PE2D_OFST (0) ++ ++//U3D_reg12 ++#define RG_SSUSB_PLL_PCW_NCPO_U3_OFST (0) ++ ++//U3D_reg13 ++#define RG_SSUSB_PLL_PCW_NCPO_PE1H_OFST (0) ++ ++//U3D_reg14 ++#define RG_SSUSB_PLL_PCW_NCPO_PE1D_OFST (0) ++ ++//U3D_reg15 ++#define RG_SSUSB_PLL_PCW_NCPO_PE2H_OFST (0) ++ ++//U3D_reg16 ++#define RG_SSUSB_PLL_PCW_NCPO_PE2D_OFST (0) ++ ++//U3D_reg19 ++#define RG_SSUSB_PLL_SSC_DELTA1_PE1H_OFST (16) ++#define RG_SSUSB_PLL_SSC_DELTA1_U3_OFST (0) ++ ++//U3D_reg20 ++#define RG_SSUSB_PLL_SSC_DELTA1_PE2H_OFST (16) ++#define RG_SSUSB_PLL_SSC_DELTA1_PE1D_OFST (0) ++ ++//U3D_reg21 ++#define RG_SSUSB_PLL_SSC_DELTA_U3_OFST (16) ++#define RG_SSUSB_PLL_SSC_DELTA1_PE2D_OFST (0) ++ ++//U3D_reg23 ++#define RG_SSUSB_PLL_SSC_DELTA_PE1D_OFST (16) ++#define RG_SSUSB_PLL_SSC_DELTA_PE1H_OFST (0) ++ ++//U3D_reg25 ++#define RG_SSUSB_PLL_SSC_DELTA_PE2D_OFST (16) ++#define RG_SSUSB_PLL_SSC_DELTA_PE2H_OFST (0) ++ ++//U3D_reg26 ++#define RG_SSUSB_PLL_REFCKDIV_PE2D_OFST (25) ++#define RG_SSUSB_PLL_REFCKDIV_PE2H_OFST (24) ++#define RG_SSUSB_PLL_REFCKDIV_PE1D_OFST (16) ++#define RG_SSUSB_PLL_REFCKDIV_PE1H_OFST (8) ++#define RG_SSUSB_PLL_REFCKDIV_U3_OFST (0) ++ ++//U3D_reg28 ++#define RG_SSUSB_CDR_BPA_PE2D_OFST (24) ++#define RG_SSUSB_CDR_BPA_PE2H_OFST (16) ++#define RG_SSUSB_CDR_BPA_PE1D_OFST (10) ++#define RG_SSUSB_CDR_BPA_PE1H_OFST (8) ++#define RG_SSUSB_CDR_BPA_U3_OFST (0) ++ ++//U3D_reg29 ++#define RG_SSUSB_CDR_BPB_PE2D_OFST (24) ++#define RG_SSUSB_CDR_BPB_PE2H_OFST (16) ++#define RG_SSUSB_CDR_BPB_PE1D_OFST (6) ++#define RG_SSUSB_CDR_BPB_PE1H_OFST (3) ++#define RG_SSUSB_CDR_BPB_U3_OFST (0) ++ ++//U3D_reg30 ++#define RG_SSUSB_CDR_BR_PE2D_OFST (24) ++#define RG_SSUSB_CDR_BR_PE2H_OFST (16) ++#define RG_SSUSB_CDR_BR_PE1D_OFST (6) ++#define RG_SSUSB_CDR_BR_PE1H_OFST (3) ++#define RG_SSUSB_CDR_BR_U3_OFST (0) ++ ++//U3D_reg31 ++#define RG_SSUSB_CDR_FBDIV_PE2H_OFST (24) ++#define RG_SSUSB_CDR_FBDIV_PE1D_OFST (16) ++#define RG_SSUSB_CDR_FBDIV_PE1H_OFST (8) ++#define RG_SSUSB_CDR_FBDIV_U3_OFST (0) ++ ++//U3D_reg32 ++#define RG_SSUSB_EQ_RSTEP1_PE2D_OFST (30) ++#define RG_SSUSB_EQ_RSTEP1_PE2H_OFST (28) ++#define RG_SSUSB_EQ_RSTEP1_PE1D_OFST (26) ++#define RG_SSUSB_EQ_RSTEP1_PE1H_OFST (24) ++#define RG_SSUSB_EQ_RSTEP1_U3_OFST (22) ++#define RG_SSUSB_LFPS_DEGLITCH_PE2D_OFST (20) ++#define RG_SSUSB_LFPS_DEGLITCH_PE2H_OFST (18) ++#define RG_SSUSB_LFPS_DEGLITCH_PE1D_OFST (16) ++#define RG_SSUSB_LFPS_DEGLITCH_PE1H_OFST (14) ++#define RG_SSUSB_LFPS_DEGLITCH_U3_OFST (12) ++#define RG_SSUSB_CDR_KVSEL_PE2D_OFST (11) ++#define RG_SSUSB_CDR_KVSEL_PE2H_OFST (10) ++#define RG_SSUSB_CDR_KVSEL_PE1D_OFST (9) ++#define RG_SSUSB_CDR_KVSEL_PE1H_OFST (8) ++#define RG_SSUSB_CDR_KVSEL_U3_OFST (7) ++#define RG_SSUSB_CDR_FBDIV_PE2D_OFST (0) ++ ++//U3D_reg33 ++#define RG_SSUSB_RX_CMPWD_PE2D_OFST (26) ++#define RG_SSUSB_RX_CMPWD_PE2H_OFST (25) ++#define RG_SSUSB_RX_CMPWD_PE1D_OFST (24) ++#define RG_SSUSB_RX_CMPWD_PE1H_OFST (23) ++#define RG_SSUSB_RX_CMPWD_U3_OFST (16) ++#define RG_SSUSB_EQ_RSTEP2_PE2D_OFST (8) ++#define RG_SSUSB_EQ_RSTEP2_PE2H_OFST (6) ++#define RG_SSUSB_EQ_RSTEP2_PE1D_OFST (4) ++#define RG_SSUSB_EQ_RSTEP2_PE1H_OFST (2) ++#define RG_SSUSB_EQ_RSTEP2_U3_OFST (0) ++ ++ ++/////////////////////////////////////////////////////////////////////////////// ++ ++struct u3phyd_reg { ++ //0x0 ++ PHY_LE32 phyd_mix0; ++ PHY_LE32 phyd_mix1; ++ PHY_LE32 phyd_lfps0; ++ PHY_LE32 phyd_lfps1; ++ //0x10 ++ PHY_LE32 phyd_impcal0; ++ PHY_LE32 phyd_impcal1; ++ PHY_LE32 phyd_txpll0; ++ PHY_LE32 phyd_txpll1; ++ //0x20 ++ PHY_LE32 phyd_txpll2; ++ PHY_LE32 phyd_fl0; ++ PHY_LE32 phyd_mix2; ++ PHY_LE32 phyd_rx0; ++ //0x30 ++ PHY_LE32 phyd_t2rlb; ++ PHY_LE32 phyd_cppat; ++ PHY_LE32 phyd_mix3; ++ PHY_LE32 phyd_ebufctl; ++ //0x40 ++ PHY_LE32 phyd_pipe0; ++ PHY_LE32 phyd_pipe1; ++ PHY_LE32 phyd_mix4; ++ PHY_LE32 phyd_ckgen0; ++ //0x50 ++ PHY_LE32 phyd_mix5; ++ PHY_LE32 phyd_reserved; ++ PHY_LE32 phyd_cdr0; ++ PHY_LE32 phyd_cdr1; ++ //0x60 ++ PHY_LE32 phyd_pll_0; ++ PHY_LE32 phyd_pll_1; ++ PHY_LE32 phyd_bcn_det_1; ++ PHY_LE32 phyd_bcn_det_2; ++ //0x70 ++ PHY_LE32 eq0; ++ PHY_LE32 eq1; ++ PHY_LE32 eq2; ++ PHY_LE32 eq3; ++ //0x80 ++ PHY_LE32 eq_eye0; ++ PHY_LE32 eq_eye1; ++ PHY_LE32 eq_eye2; ++ PHY_LE32 eq_dfe0; ++ //0x90 ++ PHY_LE32 eq_dfe1; ++ PHY_LE32 eq_dfe2; ++ PHY_LE32 eq_dfe3; ++ PHY_LE32 reserve0; ++ //0xa0 ++ PHY_LE32 phyd_mon0; ++ PHY_LE32 phyd_mon1; ++ PHY_LE32 phyd_mon2; ++ PHY_LE32 phyd_mon3; ++ //0xb0 ++ PHY_LE32 phyd_mon4; ++ PHY_LE32 phyd_mon5; ++ PHY_LE32 phyd_mon6; ++ PHY_LE32 phyd_mon7; ++ //0xc0 ++ PHY_LE32 phya_rx_mon0; ++ PHY_LE32 phya_rx_mon1; ++ PHY_LE32 phya_rx_mon2; ++ PHY_LE32 phya_rx_mon3; ++ //0xd0 ++ PHY_LE32 phya_rx_mon4; ++ PHY_LE32 phya_rx_mon5; ++ PHY_LE32 phyd_cppat2; ++ PHY_LE32 eq_eye3; ++ //0xe0 ++ PHY_LE32 kband_out; ++ PHY_LE32 kband_out1; ++}; ++ ++//U3D_PHYD_MIX0 ++#define RG_SSUSB_P_P3_TX_NG (0x1<<31) //31:31 ++#define RG_SSUSB_TSEQ_EN (0x1<<30) //30:30 ++#define RG_SSUSB_TSEQ_POLEN (0x1<<29) //29:29 ++#define RG_SSUSB_TSEQ_POL (0x1<<28) //28:28 ++#define RG_SSUSB_P_P3_PCLK_NG (0x1<<27) //27:27 ++#define RG_SSUSB_TSEQ_TH (0x7<<24) //26:24 ++#define RG_SSUSB_PRBS_BERTH (0xff<<16) //23:16 ++#define RG_SSUSB_DISABLE_PHY_U2_ON (0x1<<15) //15:15 ++#define RG_SSUSB_DISABLE_PHY_U2_OFF (0x1<<14) //14:14 ++#define RG_SSUSB_PRBS_EN (0x1<<13) //13:13 ++#define RG_SSUSB_BPSLOCK (0x1<<12) //12:12 ++#define RG_SSUSB_RTCOMCNT (0xf<<8) //11:8 ++#define RG_SSUSB_COMCNT (0xf<<4) //7:4 ++#define RG_SSUSB_PRBSEL_CALIB (0xf<<0) //3:0 ++ ++//U3D_PHYD_MIX1 ++#define RG_SSUSB_SLEEP_EN (0x1<<31) //31:31 ++#define RG_SSUSB_PRBSEL_PCS (0x7<<28) //30:28 ++#define RG_SSUSB_TXLFPS_PRD (0xf<<24) //27:24 ++#define RG_SSUSB_P_RX_P0S_CK (0x1<<23) //23:23 ++#define RG_SSUSB_P_TX_P0S_CK (0x1<<22) //22:22 ++#define RG_SSUSB_PDNCTL (0x3f<<16) //21:16 ++#define RG_SSUSB_TX_DRV_EN (0x1<<15) //15:15 ++#define RG_SSUSB_TX_DRV_SEL (0x1<<14) //14:14 ++#define RG_SSUSB_TX_DRV_DLY (0x3f<<8) //13:8 ++#define RG_SSUSB_BERT_EN (0x1<<7) //7:7 ++#define RG_SSUSB_SCP_TH (0x7<<4) //6:4 ++#define RG_SSUSB_SCP_EN (0x1<<3) //3:3 ++#define RG_SSUSB_RXANSIDEC_TEST (0x7<<0) //2:0 ++ ++//U3D_PHYD_LFPS0 ++#define RG_SSUSB_LFPS_PWD (0x1<<30) //30:30 ++#define RG_SSUSB_FORCE_LFPS_PWD (0x1<<29) //29:29 ++#define RG_SSUSB_RXLFPS_OVF (0x1f<<24) //28:24 ++#define RG_SSUSB_P3_ENTRY_SEL (0x1<<23) //23:23 ++#define RG_SSUSB_P3_ENTRY (0x1<<22) //22:22 ++#define RG_SSUSB_RXLFPS_CDRSEL (0x3<<20) //21:20 ++#define RG_SSUSB_RXLFPS_CDRTH (0xf<<16) //19:16 ++#define RG_SSUSB_LOCK5G_BLOCK (0x1<<15) //15:15 ++#define RG_SSUSB_TFIFO_EXT_D_SEL (0x1<<14) //14:14 ++#define RG_SSUSB_TFIFO_NO_EXTEND (0x1<<13) //13:13 ++#define RG_SSUSB_RXLFPS_LOB (0x1f<<8) //12:8 ++#define RG_SSUSB_TXLFPS_EN (0x1<<7) //7:7 ++#define RG_SSUSB_TXLFPS_SEL (0x1<<6) //6:6 ++#define RG_SSUSB_RXLFPS_CDRLOCK (0x1<<5) //5:5 ++#define RG_SSUSB_RXLFPS_UPB (0x1f<<0) //4:0 ++ ++//U3D_PHYD_LFPS1 ++#define RG_SSUSB_RX_IMP_BIAS (0xf<<28) //31:28 ++#define RG_SSUSB_TX_IMP_BIAS (0xf<<24) //27:24 ++#define RG_SSUSB_FWAKE_TH (0x3f<<16) //21:16 ++#define RG_SSUSB_RXLFPS_UDF (0x1f<<8) //12:8 ++#define RG_SSUSB_RXLFPS_P0IDLETH (0xff<<0) //7:0 ++ ++//U3D_PHYD_IMPCAL0 ++#define RG_SSUSB_FORCE_TX_IMPSEL (0x1<<31) //31:31 ++#define RG_SSUSB_TX_IMPCAL_EN (0x1<<30) //30:30 ++#define RG_SSUSB_FORCE_TX_IMPCAL_EN (0x1<<29) //29:29 ++#define RG_SSUSB_TX_IMPSEL (0x1f<<24) //28:24 ++#define RG_SSUSB_TX_IMPCAL_CALCYC (0x3f<<16) //21:16 ++#define RG_SSUSB_TX_IMPCAL_STBCYC (0x1f<<10) //14:10 ++#define RG_SSUSB_TX_IMPCAL_CYCCNT (0x3ff<<0) //9:0 ++ ++//U3D_PHYD_IMPCAL1 ++#define RG_SSUSB_FORCE_RX_IMPSEL (0x1<<31) //31:31 ++#define RG_SSUSB_RX_IMPCAL_EN (0x1<<30) //30:30 ++#define RG_SSUSB_FORCE_RX_IMPCAL_EN (0x1<<29) //29:29 ++#define RG_SSUSB_RX_IMPSEL (0x1f<<24) //28:24 ++#define RG_SSUSB_RX_IMPCAL_CALCYC (0x3f<<16) //21:16 ++#define RG_SSUSB_RX_IMPCAL_STBCYC (0x1f<<10) //14:10 ++#define RG_SSUSB_RX_IMPCAL_CYCCNT (0x3ff<<0) //9:0 ++ ++//U3D_PHYD_TXPLL0 ++#define RG_SSUSB_TXPLL_DDSEN_CYC (0x1f<<27) //31:27 ++#define RG_SSUSB_TXPLL_ON (0x1<<26) //26:26 ++#define RG_SSUSB_FORCE_TXPLLON (0x1<<25) //25:25 ++#define RG_SSUSB_TXPLL_STBCYC (0x1ff<<16) //24:16 ++#define RG_SSUSB_TXPLL_NCPOCHG_CYC (0xf<<12) //15:12 ++#define RG_SSUSB_TXPLL_NCPOEN_CYC (0x3<<10) //11:10 ++#define RG_SSUSB_TXPLL_DDSRSTB_CYC (0x7<<0) //2:0 ++ ++//U3D_PHYD_TXPLL1 ++#define RG_SSUSB_PLL_NCPO_EN (0x1<<31) //31:31 ++#define RG_SSUSB_PLL_FIFO_START_MAN (0x1<<30) //30:30 ++#define RG_SSUSB_PLL_NCPO_CHG (0x1<<28) //28:28 ++#define RG_SSUSB_PLL_DDS_RSTB (0x1<<27) //27:27 ++#define RG_SSUSB_PLL_DDS_PWDB (0x1<<26) //26:26 ++#define RG_SSUSB_PLL_DDSEN (0x1<<25) //25:25 ++#define RG_SSUSB_PLL_AUTOK_VCO (0x1<<24) //24:24 ++#define RG_SSUSB_PLL_PWD (0x1<<23) //23:23 ++#define RG_SSUSB_RX_AFE_PWD (0x1<<22) //22:22 ++#define RG_SSUSB_PLL_TCADJ (0x3f<<16) //21:16 ++#define RG_SSUSB_FORCE_CDR_TCADJ (0x1<<15) //15:15 ++#define RG_SSUSB_FORCE_CDR_AUTOK_VCO (0x1<<14) //14:14 ++#define RG_SSUSB_FORCE_CDR_PWD (0x1<<13) //13:13 ++#define RG_SSUSB_FORCE_PLL_NCPO_EN (0x1<<12) //12:12 ++#define RG_SSUSB_FORCE_PLL_FIFO_START_MAN (0x1<<11) //11:11 ++#define RG_SSUSB_FORCE_PLL_NCPO_CHG (0x1<<9) //9:9 ++#define RG_SSUSB_FORCE_PLL_DDS_RSTB (0x1<<8) //8:8 ++#define RG_SSUSB_FORCE_PLL_DDS_PWDB (0x1<<7) //7:7 ++#define RG_SSUSB_FORCE_PLL_DDSEN (0x1<<6) //6:6 ++#define RG_SSUSB_FORCE_PLL_TCADJ (0x1<<5) //5:5 ++#define RG_SSUSB_FORCE_PLL_AUTOK_VCO (0x1<<4) //4:4 ++#define RG_SSUSB_FORCE_PLL_PWD (0x1<<3) //3:3 ++#define RG_SSUSB_FLT_1_DISPERR_B (0x1<<2) //2:2 ++ ++//U3D_PHYD_TXPLL2 ++#define RG_SSUSB_TX_LFPS_EN (0x1<<31) //31:31 ++#define RG_SSUSB_FORCE_TX_LFPS_EN (0x1<<30) //30:30 ++#define RG_SSUSB_TX_LFPS (0x1<<29) //29:29 ++#define RG_SSUSB_FORCE_TX_LFPS (0x1<<28) //28:28 ++#define RG_SSUSB_RXPLL_STB (0x1<<27) //27:27 ++#define RG_SSUSB_TXPLL_STB (0x1<<26) //26:26 ++#define RG_SSUSB_FORCE_RXPLL_STB (0x1<<25) //25:25 ++#define RG_SSUSB_FORCE_TXPLL_STB (0x1<<24) //24:24 ++#define RG_SSUSB_RXPLL_REFCKSEL (0x1<<16) //16:16 ++#define RG_SSUSB_RXPLL_STBMODE (0x1<<11) //11:11 ++#define RG_SSUSB_RXPLL_ON (0x1<<10) //10:10 ++#define RG_SSUSB_FORCE_RXPLLON (0x1<<9) //9:9 ++#define RG_SSUSB_FORCE_RX_AFE_PWD (0x1<<8) //8:8 ++#define RG_SSUSB_CDR_AUTOK_VCO (0x1<<7) //7:7 ++#define RG_SSUSB_CDR_PWD (0x1<<6) //6:6 ++#define RG_SSUSB_CDR_TCADJ (0x3f<<0) //5:0 ++ ++//U3D_PHYD_FL0 ++#define RG_SSUSB_RX_FL_TARGET (0xffff<<16) //31:16 ++#define RG_SSUSB_RX_FL_CYCLECNT (0xffff<<0) //15:0 ++ ++//U3D_PHYD_MIX2 ++#define RG_SSUSB_RX_EQ_RST (0x1<<31) //31:31 ++#define RG_SSUSB_RX_EQ_RST_SEL (0x1<<30) //30:30 ++#define RG_SSUSB_RXVAL_RST (0x1<<29) //29:29 ++#define RG_SSUSB_RXVAL_CNT (0x1f<<24) //28:24 ++#define RG_SSUSB_CDROS_EN (0x1<<18) //18:18 ++#define RG_SSUSB_CDR_LCKOP (0x3<<16) //17:16 ++#define RG_SSUSB_RX_FL_LOCKTH (0xf<<8) //11:8 ++#define RG_SSUSB_RX_FL_OFFSET (0xff<<0) //7:0 ++ ++//U3D_PHYD_RX0 ++#define RG_SSUSB_T2RLB_BERTH (0xff<<24) //31:24 ++#define RG_SSUSB_T2RLB_PAT (0xff<<16) //23:16 ++#define RG_SSUSB_T2RLB_EN (0x1<<15) //15:15 ++#define RG_SSUSB_T2RLB_BPSCRAMB (0x1<<14) //14:14 ++#define RG_SSUSB_T2RLB_SERIAL (0x1<<13) //13:13 ++#define RG_SSUSB_T2RLB_MODE (0x3<<11) //12:11 ++#define RG_SSUSB_RX_SAOSC_EN (0x1<<10) //10:10 ++#define RG_SSUSB_RX_SAOSC_EN_SEL (0x1<<9) //9:9 ++#define RG_SSUSB_RX_DFE_OPTION (0x1<<8) //8:8 ++#define RG_SSUSB_RX_DFE_EN (0x1<<7) //7:7 ++#define RG_SSUSB_RX_DFE_EN_SEL (0x1<<6) //6:6 ++#define RG_SSUSB_RX_EQ_EN (0x1<<5) //5:5 ++#define RG_SSUSB_RX_EQ_EN_SEL (0x1<<4) //4:4 ++#define RG_SSUSB_RX_SAOSC_RST (0x1<<3) //3:3 ++#define RG_SSUSB_RX_SAOSC_RST_SEL (0x1<<2) //2:2 ++#define RG_SSUSB_RX_DFE_RST (0x1<<1) //1:1 ++#define RG_SSUSB_RX_DFE_RST_SEL (0x1<<0) //0:0 ++ ++//U3D_PHYD_T2RLB ++#define RG_SSUSB_EQTRAIN_CH_MODE (0x1<<28) //28:28 ++#define RG_SSUSB_PRB_OUT_CPPAT (0x1<<27) //27:27 ++#define RG_SSUSB_BPANSIENC (0x1<<26) //26:26 ++#define RG_SSUSB_VALID_EN (0x1<<25) //25:25 ++#define RG_SSUSB_EBUF_SRST (0x1<<24) //24:24 ++#define RG_SSUSB_K_EMP (0xf<<20) //23:20 ++#define RG_SSUSB_K_FUL (0xf<<16) //19:16 ++#define RG_SSUSB_T2RLB_BDATRST (0xf<<12) //15:12 ++#define RG_SSUSB_P_T2RLB_SKP_EN (0x1<<10) //10:10 ++#define RG_SSUSB_T2RLB_PATMODE (0x3<<8) //9:8 ++#define RG_SSUSB_T2RLB_TSEQCNT (0xff<<0) //7:0 ++ ++//U3D_PHYD_CPPAT ++#define RG_SSUSB_CPPAT_PROGRAM_EN (0x1<<24) //24:24 ++#define RG_SSUSB_CPPAT_TOZ (0x3<<21) //22:21 ++#define RG_SSUSB_CPPAT_PRBS_EN (0x1<<20) //20:20 ++#define RG_SSUSB_CPPAT_OUT_TMP2 (0xf<<16) //19:16 ++#define RG_SSUSB_CPPAT_OUT_TMP1 (0xff<<8) //15:8 ++#define RG_SSUSB_CPPAT_OUT_TMP0 (0xff<<0) //7:0 ++ ++//U3D_PHYD_MIX3 ++#define RG_SSUSB_CDR_TCADJ_MINUS (0x1<<31) //31:31 ++#define RG_SSUSB_P_CDROS_EN (0x1<<30) //30:30 ++#define RG_SSUSB_P_P2_TX_DRV_DIS (0x1<<28) //28:28 ++#define RG_SSUSB_CDR_TCADJ_OFFSET (0x7<<24) //26:24 ++#define RG_SSUSB_PLL_TCADJ_MINUS (0x1<<23) //23:23 ++#define RG_SSUSB_FORCE_PLL_BIAS_LPF_EN (0x1<<20) //20:20 ++#define RG_SSUSB_PLL_BIAS_LPF_EN (0x1<<19) //19:19 ++#define RG_SSUSB_PLL_TCADJ_OFFSET (0x7<<16) //18:16 ++#define RG_SSUSB_FORCE_PLL_SSCEN (0x1<<15) //15:15 ++#define RG_SSUSB_PLL_SSCEN (0x1<<14) //14:14 ++#define RG_SSUSB_FORCE_CDR_PI_PWD (0x1<<13) //13:13 ++#define RG_SSUSB_CDR_PI_PWD (0x1<<12) //12:12 ++#define RG_SSUSB_CDR_PI_MODE (0x1<<11) //11:11 ++#define RG_SSUSB_TXPLL_SSCEN_CYC (0x3ff<<0) //9:0 ++ ++//U3D_PHYD_EBUFCTL ++#define RG_SSUSB_EBUFCTL (0xffffffff<<0) //31:0 ++ ++//U3D_PHYD_PIPE0 ++#define RG_SSUSB_RXTERMINATION (0x1<<30) //30:30 ++#define RG_SSUSB_RXEQTRAINING (0x1<<29) //29:29 ++#define RG_SSUSB_RXPOLARITY (0x1<<28) //28:28 ++#define RG_SSUSB_TXDEEMPH (0x3<<26) //27:26 ++#define RG_SSUSB_POWERDOWN (0x3<<24) //25:24 ++#define RG_SSUSB_TXONESZEROS (0x1<<23) //23:23 ++#define RG_SSUSB_TXELECIDLE (0x1<<22) //22:22 ++#define RG_SSUSB_TXDETECTRX (0x1<<21) //21:21 ++#define RG_SSUSB_PIPE_SEL (0x1<<20) //20:20 ++#define RG_SSUSB_TXDATAK (0xf<<16) //19:16 ++#define RG_SSUSB_CDR_STABLE_SEL (0x1<<15) //15:15 ++#define RG_SSUSB_CDR_STABLE (0x1<<14) //14:14 ++#define RG_SSUSB_CDR_RSTB_SEL (0x1<<13) //13:13 ++#define RG_SSUSB_CDR_RSTB (0x1<<12) //12:12 ++#define RG_SSUSB_P_ERROR_SEL (0x3<<4) //5:4 ++#define RG_SSUSB_TXMARGIN (0x7<<1) //3:1 ++#define RG_SSUSB_TXCOMPLIANCE (0x1<<0) //0:0 ++ ++//U3D_PHYD_PIPE1 ++#define RG_SSUSB_TXDATA (0xffffffff<<0) //31:0 ++ ++//U3D_PHYD_MIX4 ++#define RG_SSUSB_CDROS_CNT (0x3f<<24) //29:24 ++#define RG_SSUSB_T2RLB_BER_EN (0x1<<16) //16:16 ++#define RG_SSUSB_T2RLB_BER_RATE (0xffff<<0) //15:0 ++ ++//U3D_PHYD_CKGEN0 ++#define RG_SSUSB_RFIFO_IMPLAT (0x1<<27) //27:27 ++#define RG_SSUSB_TFIFO_PSEL (0x7<<24) //26:24 ++#define RG_SSUSB_CKGEN_PSEL (0x3<<8) //9:8 ++#define RG_SSUSB_RXCK_INV (0x1<<0) //0:0 ++ ++//U3D_PHYD_MIX5 ++#define RG_SSUSB_PRB_SEL (0xffff<<16) //31:16 ++#define RG_SSUSB_RXPLL_STBCYC (0x7ff<<0) //10:0 ++ ++//U3D_PHYD_RESERVED ++#define RG_SSUSB_PHYD_RESERVE (0xffffffff<<0) //31:0 ++//#define RG_SSUSB_RX_SIGDET_SEL (0x1<<11) ++//#define RG_SSUSB_RX_SIGDET_EN (0x1<<12) ++//#define RG_SSUSB_RX_PI_CAL_MANUAL_SEL (0x1<<9) ++//#define RG_SSUSB_RX_PI_CAL_MANUAL_EN (0x1<<10) ++ ++//U3D_PHYD_CDR0 ++#define RG_SSUSB_CDR_BIC_LTR (0xf<<28) //31:28 ++#define RG_SSUSB_CDR_BIC_LTD0 (0xf<<24) //27:24 ++#define RG_SSUSB_CDR_BC_LTD1 (0x1f<<16) //20:16 ++#define RG_SSUSB_CDR_BC_LTR (0x1f<<8) //12:8 ++#define RG_SSUSB_CDR_BC_LTD0 (0x1f<<0) //4:0 ++ ++//U3D_PHYD_CDR1 ++#define RG_SSUSB_CDR_BIR_LTD1 (0x1f<<24) //28:24 ++#define RG_SSUSB_CDR_BIR_LTR (0x1f<<16) //20:16 ++#define RG_SSUSB_CDR_BIR_LTD0 (0x1f<<8) //12:8 ++#define RG_SSUSB_CDR_BW_SEL (0x3<<6) //7:6 ++#define RG_SSUSB_CDR_BIC_LTD1 (0xf<<0) //3:0 ++ ++//U3D_PHYD_PLL_0 ++#define RG_SSUSB_FORCE_CDR_BAND_5G (0x1<<28) //28:28 ++#define RG_SSUSB_FORCE_CDR_BAND_2P5G (0x1<<27) //27:27 ++#define RG_SSUSB_FORCE_PLL_BAND_5G (0x1<<26) //26:26 ++#define RG_SSUSB_FORCE_PLL_BAND_2P5G (0x1<<25) //25:25 ++#define RG_SSUSB_P_EQ_T_SEL (0x3ff<<15) //24:15 ++#define RG_SSUSB_PLL_ISO_EN_CYC (0x3ff<<5) //14:5 ++#define RG_SSUSB_PLLBAND_RECAL (0x1<<4) //4:4 ++#define RG_SSUSB_PLL_DDS_ISO_EN (0x1<<3) //3:3 ++#define RG_SSUSB_FORCE_PLL_DDS_ISO_EN (0x1<<2) //2:2 ++#define RG_SSUSB_PLL_DDS_PWR_ON (0x1<<1) //1:1 ++#define RG_SSUSB_FORCE_PLL_DDS_PWR_ON (0x1<<0) //0:0 ++ ++//U3D_PHYD_PLL_1 ++#define RG_SSUSB_CDR_BAND_5G (0xff<<24) //31:24 ++#define RG_SSUSB_CDR_BAND_2P5G (0xff<<16) //23:16 ++#define RG_SSUSB_PLL_BAND_5G (0xff<<8) //15:8 ++#define RG_SSUSB_PLL_BAND_2P5G (0xff<<0) //7:0 ++ ++//U3D_PHYD_BCN_DET_1 ++#define RG_SSUSB_P_BCN_OBS_PRD (0xffff<<16) //31:16 ++#define RG_SSUSB_U_BCN_OBS_PRD (0xffff<<0) //15:0 ++ ++//U3D_PHYD_BCN_DET_2 ++#define RG_SSUSB_P_BCN_OBS_SEL (0xfff<<16) //27:16 ++#define RG_SSUSB_BCN_DET_DIS (0x1<<12) //12:12 ++#define RG_SSUSB_U_BCN_OBS_SEL (0xfff<<0) //11:0 ++ ++//U3D_EQ0 ++#define RG_SSUSB_EQ_DLHL_LFI (0x7f<<24) //30:24 ++#define RG_SSUSB_EQ_DHHL_LFI (0x7f<<16) //22:16 ++#define RG_SSUSB_EQ_DD0HOS_LFI (0x7f<<8) //14:8 ++#define RG_SSUSB_EQ_DD0LOS_LFI (0x7f<<0) //6:0 ++ ++//U3D_EQ1 ++#define RG_SSUSB_EQ_DD1HOS_LFI (0x7f<<24) //30:24 ++#define RG_SSUSB_EQ_DD1LOS_LFI (0x7f<<16) //22:16 ++#define RG_SSUSB_EQ_DE0OS_LFI (0x7f<<8) //14:8 ++#define RG_SSUSB_EQ_DE1OS_LFI (0x7f<<0) //6:0 ++ ++//U3D_EQ2 ++#define RG_SSUSB_EQ_DLHLOS_LFI (0x7f<<24) //30:24 ++#define RG_SSUSB_EQ_DHHLOS_LFI (0x7f<<16) //22:16 ++#define RG_SSUSB_EQ_STOPTIME (0x1<<14) //14:14 ++#define RG_SSUSB_EQ_DHHL_LF_SEL (0x7<<11) //13:11 ++#define RG_SSUSB_EQ_DSAOS_LF_SEL (0x7<<8) //10:8 ++#define RG_SSUSB_EQ_STARTTIME (0x3<<6) //7:6 ++#define RG_SSUSB_EQ_DLEQ_LF_SEL (0x7<<3) //5:3 ++#define RG_SSUSB_EQ_DLHL_LF_SEL (0x7<<0) //2:0 ++ ++//U3D_EQ3 ++#define RG_SSUSB_EQ_DLEQ_LFI_GEN2 (0xf<<28) //31:28 ++#define RG_SSUSB_EQ_DLEQ_LFI_GEN1 (0xf<<24) //27:24 ++#define RG_SSUSB_EQ_DEYE0OS_LFI (0x7f<<16) //22:16 ++#define RG_SSUSB_EQ_DEYE1OS_LFI (0x7f<<8) //14:8 ++#define RG_SSUSB_EQ_TRI_DET_EN (0x1<<7) //7:7 ++#define RG_SSUSB_EQ_TRI_DET_TH (0x7f<<0) //6:0 ++ ++//U3D_EQ_EYE0 ++#define RG_SSUSB_EQ_EYE_XOFFSET (0x7f<<25) //31:25 ++#define RG_SSUSB_EQ_EYE_MON_EN (0x1<<24) //24:24 ++#define RG_SSUSB_EQ_EYE0_Y (0x7f<<16) //22:16 ++#define RG_SSUSB_EQ_EYE1_Y (0x7f<<8) //14:8 ++#define RG_SSUSB_EQ_PILPO_ROUT (0x1<<7) //7:7 ++#define RG_SSUSB_EQ_PI_KPGAIN (0x7<<4) //6:4 ++#define RG_SSUSB_EQ_EYE_CNT_EN (0x1<<3) //3:3 ++ ++//U3D_EQ_EYE1 ++#define RG_SSUSB_EQ_SIGDET (0x7f<<24) //30:24 ++#define RG_SSUSB_EQ_EYE_MASK (0x3ff<<7) //16:7 ++ ++//U3D_EQ_EYE2 ++#define RG_SSUSB_EQ_RX500M_CK_SEL (0x1<<31) //31:31 ++#define RG_SSUSB_EQ_SD_CNT1 (0x3f<<24) //29:24 ++#define RG_SSUSB_EQ_ISIFLAG_SEL (0x3<<22) //23:22 ++#define RG_SSUSB_EQ_SD_CNT0 (0x3f<<16) //21:16 ++ ++//U3D_EQ_DFE0 ++#define RG_SSUSB_EQ_LEQMAX (0xf<<28) //31:28 ++#define RG_SSUSB_EQ_DFEX_EN (0x1<<27) //27:27 ++#define RG_SSUSB_EQ_DFEX_LF_SEL (0x7<<24) //26:24 ++#define RG_SSUSB_EQ_CHK_EYE_H (0x1<<23) //23:23 ++#define RG_SSUSB_EQ_PIEYE_INI (0x7f<<16) //22:16 ++#define RG_SSUSB_EQ_PI90_INI (0x7f<<8) //14:8 ++#define RG_SSUSB_EQ_PI0_INI (0x7f<<0) //6:0 ++ ++//U3D_EQ_DFE1 ++#define RG_SSUSB_EQ_REV (0xffff<<16) //31:16 ++#define RG_SSUSB_EQ_DFEYEN_DUR (0x7<<12) //14:12 ++#define RG_SSUSB_EQ_DFEXEN_DUR (0x7<<8) //10:8 ++#define RG_SSUSB_EQ_DFEX_RST (0x1<<7) //7:7 ++#define RG_SSUSB_EQ_GATED_RXD_B (0x1<<6) //6:6 ++#define RG_SSUSB_EQ_PI90CK_SEL (0x3<<4) //5:4 ++#define RG_SSUSB_EQ_DFEX_DIS (0x1<<2) //2:2 ++#define RG_SSUSB_EQ_DFEYEN_STOP_DIS (0x1<<1) //1:1 ++#define RG_SSUSB_EQ_DFEXEN_SEL (0x1<<0) //0:0 ++ ++//U3D_EQ_DFE2 ++#define RG_SSUSB_EQ_MON_SEL (0x1f<<24) //28:24 ++#define RG_SSUSB_EQ_LEQOSC_DLYCNT (0x7<<16) //18:16 ++#define RG_SSUSB_EQ_DLEQOS_LFI (0x1f<<8) //12:8 ++#define RG_SSUSB_EQ_LEQ_STOP_TO (0x3<<0) //1:0 ++ ++//U3D_EQ_DFE3 ++#define RG_SSUSB_EQ_RESERVED (0xffffffff<<0) //31:0 ++ ++//U3D_PHYD_MON0 ++#define RGS_SSUSB_BERT_BERC (0xffff<<16) //31:16 ++#define RGS_SSUSB_LFPS (0xf<<12) //15:12 ++#define RGS_SSUSB_TRAINDEC (0x7<<8) //10:8 ++#define RGS_SSUSB_SCP_PAT (0xff<<0) //7:0 ++ ++//U3D_PHYD_MON1 ++#define RGS_SSUSB_RX_FL_OUT (0xffff<<0) //15:0 ++ ++//U3D_PHYD_MON2 ++#define RGS_SSUSB_T2RLB_ERRCNT (0xffff<<16) //31:16 ++#define RGS_SSUSB_RETRACK (0xf<<12) //15:12 ++#define RGS_SSUSB_RXPLL_LOCK (0x1<<10) //10:10 ++#define RGS_SSUSB_CDR_VCOCAL_CPLT_D (0x1<<9) //9:9 ++#define RGS_SSUSB_PLL_VCOCAL_CPLT_D (0x1<<8) //8:8 ++#define RGS_SSUSB_PDNCTL (0xff<<0) //7:0 ++ ++//U3D_PHYD_MON3 ++#define RGS_SSUSB_TSEQ_ERRCNT (0xffff<<16) //31:16 ++#define RGS_SSUSB_PRBS_ERRCNT (0xffff<<0) //15:0 ++ ++//U3D_PHYD_MON4 ++#define RGS_SSUSB_RX_LSLOCK_CNT (0xf<<24) //27:24 ++#define RGS_SSUSB_SCP_DETCNT (0xff<<16) //23:16 ++#define RGS_SSUSB_TSEQ_DETCNT (0xffff<<0) //15:0 ++ ++//U3D_PHYD_MON5 ++#define RGS_SSUSB_EBUFMSG (0xffff<<16) //31:16 ++#define RGS_SSUSB_BERT_LOCK (0x1<<15) //15:15 ++#define RGS_SSUSB_SCP_DET (0x1<<14) //14:14 ++#define RGS_SSUSB_TSEQ_DET (0x1<<13) //13:13 ++#define RGS_SSUSB_EBUF_UDF (0x1<<12) //12:12 ++#define RGS_SSUSB_EBUF_OVF (0x1<<11) //11:11 ++#define RGS_SSUSB_PRBS_PASSTH (0x1<<10) //10:10 ++#define RGS_SSUSB_PRBS_PASS (0x1<<9) //9:9 ++#define RGS_SSUSB_PRBS_LOCK (0x1<<8) //8:8 ++#define RGS_SSUSB_T2RLB_ERR (0x1<<6) //6:6 ++#define RGS_SSUSB_T2RLB_PASSTH (0x1<<5) //5:5 ++#define RGS_SSUSB_T2RLB_PASS (0x1<<4) //4:4 ++#define RGS_SSUSB_T2RLB_LOCK (0x1<<3) //3:3 ++#define RGS_SSUSB_RX_IMPCAL_DONE (0x1<<2) //2:2 ++#define RGS_SSUSB_TX_IMPCAL_DONE (0x1<<1) //1:1 ++#define RGS_SSUSB_RXDETECTED (0x1<<0) //0:0 ++ ++//U3D_PHYD_MON6 ++#define RGS_SSUSB_SIGCAL_DONE (0x1<<30) //30:30 ++#define RGS_SSUSB_SIGCAL_CAL_OUT (0x1<<29) //29:29 ++#define RGS_SSUSB_SIGCAL_OFFSET (0x1f<<24) //28:24 ++#define RGS_SSUSB_RX_IMP_SEL (0x1f<<16) //20:16 ++#define RGS_SSUSB_TX_IMP_SEL (0x1f<<8) //12:8 ++#define RGS_SSUSB_TFIFO_MSG (0xf<<4) //7:4 ++#define RGS_SSUSB_RFIFO_MSG (0xf<<0) //3:0 ++ ++//U3D_PHYD_MON7 ++#define RGS_SSUSB_FT_OUT (0xff<<8) //15:8 ++#define RGS_SSUSB_PRB_OUT (0xff<<0) //7:0 ++ ++//U3D_PHYA_RX_MON0 ++#define RGS_SSUSB_EQ_DCLEQ (0xf<<24) //27:24 ++#define RGS_SSUSB_EQ_DCD0H (0x7f<<16) //22:16 ++#define RGS_SSUSB_EQ_DCD0L (0x7f<<8) //14:8 ++#define RGS_SSUSB_EQ_DCD1H (0x7f<<0) //6:0 ++ ++//U3D_PHYA_RX_MON1 ++#define RGS_SSUSB_EQ_DCD1L (0x7f<<24) //30:24 ++#define RGS_SSUSB_EQ_DCE0 (0x7f<<16) //22:16 ++#define RGS_SSUSB_EQ_DCE1 (0x7f<<8) //14:8 ++#define RGS_SSUSB_EQ_DCHHL (0x7f<<0) //6:0 ++ ++//U3D_PHYA_RX_MON2 ++#define RGS_SSUSB_EQ_LEQ_STOP (0x1<<31) //31:31 ++#define RGS_SSUSB_EQ_DCLHL (0x7f<<24) //30:24 ++#define RGS_SSUSB_EQ_STATUS (0xff<<16) //23:16 ++#define RGS_SSUSB_EQ_DCEYE0 (0x7f<<8) //14:8 ++#define RGS_SSUSB_EQ_DCEYE1 (0x7f<<0) //6:0 ++ ++//U3D_PHYA_RX_MON3 ++#define RGS_SSUSB_EQ_EYE_MONITOR_ERRCNT_0 (0xfffff<<0) //19:0 ++ ++//U3D_PHYA_RX_MON4 ++#define RGS_SSUSB_EQ_EYE_MONITOR_ERRCNT_1 (0xfffff<<0) //19:0 ++ ++//U3D_PHYA_RX_MON5 ++#define RGS_SSUSB_EQ_DCLEQOS (0x1f<<8) //12:8 ++#define RGS_SSUSB_EQ_EYE_CNT_RDY (0x1<<7) //7:7 ++#define RGS_SSUSB_EQ_PILPO (0x7f<<0) //6:0 ++ ++//U3D_PHYD_CPPAT2 ++#define RG_SSUSB_CPPAT_OUT_H_TMP2 (0xf<<16) //19:16 ++#define RG_SSUSB_CPPAT_OUT_H_TMP1 (0xff<<8) //15:8 ++#define RG_SSUSB_CPPAT_OUT_H_TMP0 (0xff<<0) //7:0 ++ ++//U3D_EQ_EYE3 ++#define RG_SSUSB_EQ_LEQ_SHIFT (0x7<<24) //26:24 ++#define RG_SSUSB_EQ_EYE_CNT (0xfffff<<0) //19:0 ++ ++//U3D_KBAND_OUT ++#define RGS_SSUSB_CDR_BAND_5G (0xff<<24) //31:24 ++#define RGS_SSUSB_CDR_BAND_2P5G (0xff<<16) //23:16 ++#define RGS_SSUSB_PLL_BAND_5G (0xff<<8) //15:8 ++#define RGS_SSUSB_PLL_BAND_2P5G (0xff<<0) //7:0 ++ ++//U3D_KBAND_OUT1 ++#define RGS_SSUSB_CDR_VCOCAL_FAIL (0x1<<24) //24:24 ++#define RGS_SSUSB_CDR_VCOCAL_STATE (0xff<<16) //23:16 ++#define RGS_SSUSB_PLL_VCOCAL_FAIL (0x1<<8) //8:8 ++#define RGS_SSUSB_PLL_VCOCAL_STATE (0xff<<0) //7:0 ++ ++ ++/* OFFSET */ ++ ++//U3D_PHYD_MIX0 ++#define RG_SSUSB_P_P3_TX_NG_OFST (31) ++#define RG_SSUSB_TSEQ_EN_OFST (30) ++#define RG_SSUSB_TSEQ_POLEN_OFST (29) ++#define RG_SSUSB_TSEQ_POL_OFST (28) ++#define RG_SSUSB_P_P3_PCLK_NG_OFST (27) ++#define RG_SSUSB_TSEQ_TH_OFST (24) ++#define RG_SSUSB_PRBS_BERTH_OFST (16) ++#define RG_SSUSB_DISABLE_PHY_U2_ON_OFST (15) ++#define RG_SSUSB_DISABLE_PHY_U2_OFF_OFST (14) ++#define RG_SSUSB_PRBS_EN_OFST (13) ++#define RG_SSUSB_BPSLOCK_OFST (12) ++#define RG_SSUSB_RTCOMCNT_OFST (8) ++#define RG_SSUSB_COMCNT_OFST (4) ++#define RG_SSUSB_PRBSEL_CALIB_OFST (0) ++ ++//U3D_PHYD_MIX1 ++#define RG_SSUSB_SLEEP_EN_OFST (31) ++#define RG_SSUSB_PRBSEL_PCS_OFST (28) ++#define RG_SSUSB_TXLFPS_PRD_OFST (24) ++#define RG_SSUSB_P_RX_P0S_CK_OFST (23) ++#define RG_SSUSB_P_TX_P0S_CK_OFST (22) ++#define RG_SSUSB_PDNCTL_OFST (16) ++#define RG_SSUSB_TX_DRV_EN_OFST (15) ++#define RG_SSUSB_TX_DRV_SEL_OFST (14) ++#define RG_SSUSB_TX_DRV_DLY_OFST (8) ++#define RG_SSUSB_BERT_EN_OFST (7) ++#define RG_SSUSB_SCP_TH_OFST (4) ++#define RG_SSUSB_SCP_EN_OFST (3) ++#define RG_SSUSB_RXANSIDEC_TEST_OFST (0) ++ ++//U3D_PHYD_LFPS0 ++#define RG_SSUSB_LFPS_PWD_OFST (30) ++#define RG_SSUSB_FORCE_LFPS_PWD_OFST (29) ++#define RG_SSUSB_RXLFPS_OVF_OFST (24) ++#define RG_SSUSB_P3_ENTRY_SEL_OFST (23) ++#define RG_SSUSB_P3_ENTRY_OFST (22) ++#define RG_SSUSB_RXLFPS_CDRSEL_OFST (20) ++#define RG_SSUSB_RXLFPS_CDRTH_OFST (16) ++#define RG_SSUSB_LOCK5G_BLOCK_OFST (15) ++#define RG_SSUSB_TFIFO_EXT_D_SEL_OFST (14) ++#define RG_SSUSB_TFIFO_NO_EXTEND_OFST (13) ++#define RG_SSUSB_RXLFPS_LOB_OFST (8) ++#define RG_SSUSB_TXLFPS_EN_OFST (7) ++#define RG_SSUSB_TXLFPS_SEL_OFST (6) ++#define RG_SSUSB_RXLFPS_CDRLOCK_OFST (5) ++#define RG_SSUSB_RXLFPS_UPB_OFST (0) ++ ++//U3D_PHYD_LFPS1 ++#define RG_SSUSB_RX_IMP_BIAS_OFST (28) ++#define RG_SSUSB_TX_IMP_BIAS_OFST (24) ++#define RG_SSUSB_FWAKE_TH_OFST (16) ++#define RG_SSUSB_RXLFPS_UDF_OFST (8) ++#define RG_SSUSB_RXLFPS_P0IDLETH_OFST (0) ++ ++//U3D_PHYD_IMPCAL0 ++#define RG_SSUSB_FORCE_TX_IMPSEL_OFST (31) ++#define RG_SSUSB_TX_IMPCAL_EN_OFST (30) ++#define RG_SSUSB_FORCE_TX_IMPCAL_EN_OFST (29) ++#define RG_SSUSB_TX_IMPSEL_OFST (24) ++#define RG_SSUSB_TX_IMPCAL_CALCYC_OFST (16) ++#define RG_SSUSB_TX_IMPCAL_STBCYC_OFST (10) ++#define RG_SSUSB_TX_IMPCAL_CYCCNT_OFST (0) ++ ++//U3D_PHYD_IMPCAL1 ++#define RG_SSUSB_FORCE_RX_IMPSEL_OFST (31) ++#define RG_SSUSB_RX_IMPCAL_EN_OFST (30) ++#define RG_SSUSB_FORCE_RX_IMPCAL_EN_OFST (29) ++#define RG_SSUSB_RX_IMPSEL_OFST (24) ++#define RG_SSUSB_RX_IMPCAL_CALCYC_OFST (16) ++#define RG_SSUSB_RX_IMPCAL_STBCYC_OFST (10) ++#define RG_SSUSB_RX_IMPCAL_CYCCNT_OFST (0) ++ ++//U3D_PHYD_TXPLL0 ++#define RG_SSUSB_TXPLL_DDSEN_CYC_OFST (27) ++#define RG_SSUSB_TXPLL_ON_OFST (26) ++#define RG_SSUSB_FORCE_TXPLLON_OFST (25) ++#define RG_SSUSB_TXPLL_STBCYC_OFST (16) ++#define RG_SSUSB_TXPLL_NCPOCHG_CYC_OFST (12) ++#define RG_SSUSB_TXPLL_NCPOEN_CYC_OFST (10) ++#define RG_SSUSB_TXPLL_DDSRSTB_CYC_OFST (0) ++ ++//U3D_PHYD_TXPLL1 ++#define RG_SSUSB_PLL_NCPO_EN_OFST (31) ++#define RG_SSUSB_PLL_FIFO_START_MAN_OFST (30) ++#define RG_SSUSB_PLL_NCPO_CHG_OFST (28) ++#define RG_SSUSB_PLL_DDS_RSTB_OFST (27) ++#define RG_SSUSB_PLL_DDS_PWDB_OFST (26) ++#define RG_SSUSB_PLL_DDSEN_OFST (25) ++#define RG_SSUSB_PLL_AUTOK_VCO_OFST (24) ++#define RG_SSUSB_PLL_PWD_OFST (23) ++#define RG_SSUSB_RX_AFE_PWD_OFST (22) ++#define RG_SSUSB_PLL_TCADJ_OFST (16) ++#define RG_SSUSB_FORCE_CDR_TCADJ_OFST (15) ++#define RG_SSUSB_FORCE_CDR_AUTOK_VCO_OFST (14) ++#define RG_SSUSB_FORCE_CDR_PWD_OFST (13) ++#define RG_SSUSB_FORCE_PLL_NCPO_EN_OFST (12) ++#define RG_SSUSB_FORCE_PLL_FIFO_START_MAN_OFST (11) ++#define RG_SSUSB_FORCE_PLL_NCPO_CHG_OFST (9) ++#define RG_SSUSB_FORCE_PLL_DDS_RSTB_OFST (8) ++#define RG_SSUSB_FORCE_PLL_DDS_PWDB_OFST (7) ++#define RG_SSUSB_FORCE_PLL_DDSEN_OFST (6) ++#define RG_SSUSB_FORCE_PLL_TCADJ_OFST (5) ++#define RG_SSUSB_FORCE_PLL_AUTOK_VCO_OFST (4) ++#define RG_SSUSB_FORCE_PLL_PWD_OFST (3) ++#define RG_SSUSB_FLT_1_DISPERR_B_OFST (2) ++ ++//U3D_PHYD_TXPLL2 ++#define RG_SSUSB_TX_LFPS_EN_OFST (31) ++#define RG_SSUSB_FORCE_TX_LFPS_EN_OFST (30) ++#define RG_SSUSB_TX_LFPS_OFST (29) ++#define RG_SSUSB_FORCE_TX_LFPS_OFST (28) ++#define RG_SSUSB_RXPLL_STB_OFST (27) ++#define RG_SSUSB_TXPLL_STB_OFST (26) ++#define RG_SSUSB_FORCE_RXPLL_STB_OFST (25) ++#define RG_SSUSB_FORCE_TXPLL_STB_OFST (24) ++#define RG_SSUSB_RXPLL_REFCKSEL_OFST (16) ++#define RG_SSUSB_RXPLL_STBMODE_OFST (11) ++#define RG_SSUSB_RXPLL_ON_OFST (10) ++#define RG_SSUSB_FORCE_RXPLLON_OFST (9) ++#define RG_SSUSB_FORCE_RX_AFE_PWD_OFST (8) ++#define RG_SSUSB_CDR_AUTOK_VCO_OFST (7) ++#define RG_SSUSB_CDR_PWD_OFST (6) ++#define RG_SSUSB_CDR_TCADJ_OFST (0) ++ ++//U3D_PHYD_FL0 ++#define RG_SSUSB_RX_FL_TARGET_OFST (16) ++#define RG_SSUSB_RX_FL_CYCLECNT_OFST (0) ++ ++//U3D_PHYD_MIX2 ++#define RG_SSUSB_RX_EQ_RST_OFST (31) ++#define RG_SSUSB_RX_EQ_RST_SEL_OFST (30) ++#define RG_SSUSB_RXVAL_RST_OFST (29) ++#define RG_SSUSB_RXVAL_CNT_OFST (24) ++#define RG_SSUSB_CDROS_EN_OFST (18) ++#define RG_SSUSB_CDR_LCKOP_OFST (16) ++#define RG_SSUSB_RX_FL_LOCKTH_OFST (8) ++#define RG_SSUSB_RX_FL_OFFSET_OFST (0) ++ ++//U3D_PHYD_RX0 ++#define RG_SSUSB_T2RLB_BERTH_OFST (24) ++#define RG_SSUSB_T2RLB_PAT_OFST (16) ++#define RG_SSUSB_T2RLB_EN_OFST (15) ++#define RG_SSUSB_T2RLB_BPSCRAMB_OFST (14) ++#define RG_SSUSB_T2RLB_SERIAL_OFST (13) ++#define RG_SSUSB_T2RLB_MODE_OFST (11) ++#define RG_SSUSB_RX_SAOSC_EN_OFST (10) ++#define RG_SSUSB_RX_SAOSC_EN_SEL_OFST (9) ++#define RG_SSUSB_RX_DFE_OPTION_OFST (8) ++#define RG_SSUSB_RX_DFE_EN_OFST (7) ++#define RG_SSUSB_RX_DFE_EN_SEL_OFST (6) ++#define RG_SSUSB_RX_EQ_EN_OFST (5) ++#define RG_SSUSB_RX_EQ_EN_SEL_OFST (4) ++#define RG_SSUSB_RX_SAOSC_RST_OFST (3) ++#define RG_SSUSB_RX_SAOSC_RST_SEL_OFST (2) ++#define RG_SSUSB_RX_DFE_RST_OFST (1) ++#define RG_SSUSB_RX_DFE_RST_SEL_OFST (0) ++ ++//U3D_PHYD_T2RLB ++#define RG_SSUSB_EQTRAIN_CH_MODE_OFST (28) ++#define RG_SSUSB_PRB_OUT_CPPAT_OFST (27) ++#define RG_SSUSB_BPANSIENC_OFST (26) ++#define RG_SSUSB_VALID_EN_OFST (25) ++#define RG_SSUSB_EBUF_SRST_OFST (24) ++#define RG_SSUSB_K_EMP_OFST (20) ++#define RG_SSUSB_K_FUL_OFST (16) ++#define RG_SSUSB_T2RLB_BDATRST_OFST (12) ++#define RG_SSUSB_P_T2RLB_SKP_EN_OFST (10) ++#define RG_SSUSB_T2RLB_PATMODE_OFST (8) ++#define RG_SSUSB_T2RLB_TSEQCNT_OFST (0) ++ ++//U3D_PHYD_CPPAT ++#define RG_SSUSB_CPPAT_PROGRAM_EN_OFST (24) ++#define RG_SSUSB_CPPAT_TOZ_OFST (21) ++#define RG_SSUSB_CPPAT_PRBS_EN_OFST (20) ++#define RG_SSUSB_CPPAT_OUT_TMP2_OFST (16) ++#define RG_SSUSB_CPPAT_OUT_TMP1_OFST (8) ++#define RG_SSUSB_CPPAT_OUT_TMP0_OFST (0) ++ ++//U3D_PHYD_MIX3 ++#define RG_SSUSB_CDR_TCADJ_MINUS_OFST (31) ++#define RG_SSUSB_P_CDROS_EN_OFST (30) ++#define RG_SSUSB_P_P2_TX_DRV_DIS_OFST (28) ++#define RG_SSUSB_CDR_TCADJ_OFFSET_OFST (24) ++#define RG_SSUSB_PLL_TCADJ_MINUS_OFST (23) ++#define RG_SSUSB_FORCE_PLL_BIAS_LPF_EN_OFST (20) ++#define RG_SSUSB_PLL_BIAS_LPF_EN_OFST (19) ++#define RG_SSUSB_PLL_TCADJ_OFFSET_OFST (16) ++#define RG_SSUSB_FORCE_PLL_SSCEN_OFST (15) ++#define RG_SSUSB_PLL_SSCEN_OFST (14) ++#define RG_SSUSB_FORCE_CDR_PI_PWD_OFST (13) ++#define RG_SSUSB_CDR_PI_PWD_OFST (12) ++#define RG_SSUSB_CDR_PI_MODE_OFST (11) ++#define RG_SSUSB_TXPLL_SSCEN_CYC_OFST (0) ++ ++//U3D_PHYD_EBUFCTL ++#define RG_SSUSB_EBUFCTL_OFST (0) ++ ++//U3D_PHYD_PIPE0 ++#define RG_SSUSB_RXTERMINATION_OFST (30) ++#define RG_SSUSB_RXEQTRAINING_OFST (29) ++#define RG_SSUSB_RXPOLARITY_OFST (28) ++#define RG_SSUSB_TXDEEMPH_OFST (26) ++#define RG_SSUSB_POWERDOWN_OFST (24) ++#define RG_SSUSB_TXONESZEROS_OFST (23) ++#define RG_SSUSB_TXELECIDLE_OFST (22) ++#define RG_SSUSB_TXDETECTRX_OFST (21) ++#define RG_SSUSB_PIPE_SEL_OFST (20) ++#define RG_SSUSB_TXDATAK_OFST (16) ++#define RG_SSUSB_CDR_STABLE_SEL_OFST (15) ++#define RG_SSUSB_CDR_STABLE_OFST (14) ++#define RG_SSUSB_CDR_RSTB_SEL_OFST (13) ++#define RG_SSUSB_CDR_RSTB_OFST (12) ++#define RG_SSUSB_P_ERROR_SEL_OFST (4) ++#define RG_SSUSB_TXMARGIN_OFST (1) ++#define RG_SSUSB_TXCOMPLIANCE_OFST (0) ++ ++//U3D_PHYD_PIPE1 ++#define RG_SSUSB_TXDATA_OFST (0) ++ ++//U3D_PHYD_MIX4 ++#define RG_SSUSB_CDROS_CNT_OFST (24) ++#define RG_SSUSB_T2RLB_BER_EN_OFST (16) ++#define RG_SSUSB_T2RLB_BER_RATE_OFST (0) ++ ++//U3D_PHYD_CKGEN0 ++#define RG_SSUSB_RFIFO_IMPLAT_OFST (27) ++#define RG_SSUSB_TFIFO_PSEL_OFST (24) ++#define RG_SSUSB_CKGEN_PSEL_OFST (8) ++#define RG_SSUSB_RXCK_INV_OFST (0) ++ ++//U3D_PHYD_MIX5 ++#define RG_SSUSB_PRB_SEL_OFST (16) ++#define RG_SSUSB_RXPLL_STBCYC_OFST (0) ++ ++//U3D_PHYD_RESERVED ++#define RG_SSUSB_PHYD_RESERVE_OFST (0) ++//#define RG_SSUSB_RX_SIGDET_SEL_OFST (11) ++//#define RG_SSUSB_RX_SIGDET_EN_OFST (12) ++//#define RG_SSUSB_RX_PI_CAL_MANUAL_SEL_OFST (9) ++//#define RG_SSUSB_RX_PI_CAL_MANUAL_EN_OFST (10) ++ ++//U3D_PHYD_CDR0 ++#define RG_SSUSB_CDR_BIC_LTR_OFST (28) ++#define RG_SSUSB_CDR_BIC_LTD0_OFST (24) ++#define RG_SSUSB_CDR_BC_LTD1_OFST (16) ++#define RG_SSUSB_CDR_BC_LTR_OFST (8) ++#define RG_SSUSB_CDR_BC_LTD0_OFST (0) ++ ++//U3D_PHYD_CDR1 ++#define RG_SSUSB_CDR_BIR_LTD1_OFST (24) ++#define RG_SSUSB_CDR_BIR_LTR_OFST (16) ++#define RG_SSUSB_CDR_BIR_LTD0_OFST (8) ++#define RG_SSUSB_CDR_BW_SEL_OFST (6) ++#define RG_SSUSB_CDR_BIC_LTD1_OFST (0) ++ ++//U3D_PHYD_PLL_0 ++#define RG_SSUSB_FORCE_CDR_BAND_5G_OFST (28) ++#define RG_SSUSB_FORCE_CDR_BAND_2P5G_OFST (27) ++#define RG_SSUSB_FORCE_PLL_BAND_5G_OFST (26) ++#define RG_SSUSB_FORCE_PLL_BAND_2P5G_OFST (25) ++#define RG_SSUSB_P_EQ_T_SEL_OFST (15) ++#define RG_SSUSB_PLL_ISO_EN_CYC_OFST (5) ++#define RG_SSUSB_PLLBAND_RECAL_OFST (4) ++#define RG_SSUSB_PLL_DDS_ISO_EN_OFST (3) ++#define RG_SSUSB_FORCE_PLL_DDS_ISO_EN_OFST (2) ++#define RG_SSUSB_PLL_DDS_PWR_ON_OFST (1) ++#define RG_SSUSB_FORCE_PLL_DDS_PWR_ON_OFST (0) ++ ++//U3D_PHYD_PLL_1 ++#define RG_SSUSB_CDR_BAND_5G_OFST (24) ++#define RG_SSUSB_CDR_BAND_2P5G_OFST (16) ++#define RG_SSUSB_PLL_BAND_5G_OFST (8) ++#define RG_SSUSB_PLL_BAND_2P5G_OFST (0) ++ ++//U3D_PHYD_BCN_DET_1 ++#define RG_SSUSB_P_BCN_OBS_PRD_OFST (16) ++#define RG_SSUSB_U_BCN_OBS_PRD_OFST (0) ++ ++//U3D_PHYD_BCN_DET_2 ++#define RG_SSUSB_P_BCN_OBS_SEL_OFST (16) ++#define RG_SSUSB_BCN_DET_DIS_OFST (12) ++#define RG_SSUSB_U_BCN_OBS_SEL_OFST (0) ++ ++//U3D_EQ0 ++#define RG_SSUSB_EQ_DLHL_LFI_OFST (24) ++#define RG_SSUSB_EQ_DHHL_LFI_OFST (16) ++#define RG_SSUSB_EQ_DD0HOS_LFI_OFST (8) ++#define RG_SSUSB_EQ_DD0LOS_LFI_OFST (0) ++ ++//U3D_EQ1 ++#define RG_SSUSB_EQ_DD1HOS_LFI_OFST (24) ++#define RG_SSUSB_EQ_DD1LOS_LFI_OFST (16) ++#define RG_SSUSB_EQ_DE0OS_LFI_OFST (8) ++#define RG_SSUSB_EQ_DE1OS_LFI_OFST (0) ++ ++//U3D_EQ2 ++#define RG_SSUSB_EQ_DLHLOS_LFI_OFST (24) ++#define RG_SSUSB_EQ_DHHLOS_LFI_OFST (16) ++#define RG_SSUSB_EQ_STOPTIME_OFST (14) ++#define RG_SSUSB_EQ_DHHL_LF_SEL_OFST (11) ++#define RG_SSUSB_EQ_DSAOS_LF_SEL_OFST (8) ++#define RG_SSUSB_EQ_STARTTIME_OFST (6) ++#define RG_SSUSB_EQ_DLEQ_LF_SEL_OFST (3) ++#define RG_SSUSB_EQ_DLHL_LF_SEL_OFST (0) ++ ++//U3D_EQ3 ++#define RG_SSUSB_EQ_DLEQ_LFI_GEN2_OFST (28) ++#define RG_SSUSB_EQ_DLEQ_LFI_GEN1_OFST (24) ++#define RG_SSUSB_EQ_DEYE0OS_LFI_OFST (16) ++#define RG_SSUSB_EQ_DEYE1OS_LFI_OFST (8) ++#define RG_SSUSB_EQ_TRI_DET_EN_OFST (7) ++#define RG_SSUSB_EQ_TRI_DET_TH_OFST (0) ++ ++//U3D_EQ_EYE0 ++#define RG_SSUSB_EQ_EYE_XOFFSET_OFST (25) ++#define RG_SSUSB_EQ_EYE_MON_EN_OFST (24) ++#define RG_SSUSB_EQ_EYE0_Y_OFST (16) ++#define RG_SSUSB_EQ_EYE1_Y_OFST (8) ++#define RG_SSUSB_EQ_PILPO_ROUT_OFST (7) ++#define RG_SSUSB_EQ_PI_KPGAIN_OFST (4) ++#define RG_SSUSB_EQ_EYE_CNT_EN_OFST (3) ++ ++//U3D_EQ_EYE1 ++#define RG_SSUSB_EQ_SIGDET_OFST (24) ++#define RG_SSUSB_EQ_EYE_MASK_OFST (7) ++ ++//U3D_EQ_EYE2 ++#define RG_SSUSB_EQ_RX500M_CK_SEL_OFST (31) ++#define RG_SSUSB_EQ_SD_CNT1_OFST (24) ++#define RG_SSUSB_EQ_ISIFLAG_SEL_OFST (22) ++#define RG_SSUSB_EQ_SD_CNT0_OFST (16) ++ ++//U3D_EQ_DFE0 ++#define RG_SSUSB_EQ_LEQMAX_OFST (28) ++#define RG_SSUSB_EQ_DFEX_EN_OFST (27) ++#define RG_SSUSB_EQ_DFEX_LF_SEL_OFST (24) ++#define RG_SSUSB_EQ_CHK_EYE_H_OFST (23) ++#define RG_SSUSB_EQ_PIEYE_INI_OFST (16) ++#define RG_SSUSB_EQ_PI90_INI_OFST (8) ++#define RG_SSUSB_EQ_PI0_INI_OFST (0) ++ ++//U3D_EQ_DFE1 ++#define RG_SSUSB_EQ_REV_OFST (16) ++#define RG_SSUSB_EQ_DFEYEN_DUR_OFST (12) ++#define RG_SSUSB_EQ_DFEXEN_DUR_OFST (8) ++#define RG_SSUSB_EQ_DFEX_RST_OFST (7) ++#define RG_SSUSB_EQ_GATED_RXD_B_OFST (6) ++#define RG_SSUSB_EQ_PI90CK_SEL_OFST (4) ++#define RG_SSUSB_EQ_DFEX_DIS_OFST (2) ++#define RG_SSUSB_EQ_DFEYEN_STOP_DIS_OFST (1) ++#define RG_SSUSB_EQ_DFEXEN_SEL_OFST (0) ++ ++//U3D_EQ_DFE2 ++#define RG_SSUSB_EQ_MON_SEL_OFST (24) ++#define RG_SSUSB_EQ_LEQOSC_DLYCNT_OFST (16) ++#define RG_SSUSB_EQ_DLEQOS_LFI_OFST (8) ++#define RG_SSUSB_EQ_LEQ_STOP_TO_OFST (0) ++ ++//U3D_EQ_DFE3 ++#define RG_SSUSB_EQ_RESERVED_OFST (0) ++ ++//U3D_PHYD_MON0 ++#define RGS_SSUSB_BERT_BERC_OFST (16) ++#define RGS_SSUSB_LFPS_OFST (12) ++#define RGS_SSUSB_TRAINDEC_OFST (8) ++#define RGS_SSUSB_SCP_PAT_OFST (0) ++ ++//U3D_PHYD_MON1 ++#define RGS_SSUSB_RX_FL_OUT_OFST (0) ++ ++//U3D_PHYD_MON2 ++#define RGS_SSUSB_T2RLB_ERRCNT_OFST (16) ++#define RGS_SSUSB_RETRACK_OFST (12) ++#define RGS_SSUSB_RXPLL_LOCK_OFST (10) ++#define RGS_SSUSB_CDR_VCOCAL_CPLT_D_OFST (9) ++#define RGS_SSUSB_PLL_VCOCAL_CPLT_D_OFST (8) ++#define RGS_SSUSB_PDNCTL_OFST (0) ++ ++//U3D_PHYD_MON3 ++#define RGS_SSUSB_TSEQ_ERRCNT_OFST (16) ++#define RGS_SSUSB_PRBS_ERRCNT_OFST (0) ++ ++//U3D_PHYD_MON4 ++#define RGS_SSUSB_RX_LSLOCK_CNT_OFST (24) ++#define RGS_SSUSB_SCP_DETCNT_OFST (16) ++#define RGS_SSUSB_TSEQ_DETCNT_OFST (0) ++ ++//U3D_PHYD_MON5 ++#define RGS_SSUSB_EBUFMSG_OFST (16) ++#define RGS_SSUSB_BERT_LOCK_OFST (15) ++#define RGS_SSUSB_SCP_DET_OFST (14) ++#define RGS_SSUSB_TSEQ_DET_OFST (13) ++#define RGS_SSUSB_EBUF_UDF_OFST (12) ++#define RGS_SSUSB_EBUF_OVF_OFST (11) ++#define RGS_SSUSB_PRBS_PASSTH_OFST (10) ++#define RGS_SSUSB_PRBS_PASS_OFST (9) ++#define RGS_SSUSB_PRBS_LOCK_OFST (8) ++#define RGS_SSUSB_T2RLB_ERR_OFST (6) ++#define RGS_SSUSB_T2RLB_PASSTH_OFST (5) ++#define RGS_SSUSB_T2RLB_PASS_OFST (4) ++#define RGS_SSUSB_T2RLB_LOCK_OFST (3) ++#define RGS_SSUSB_RX_IMPCAL_DONE_OFST (2) ++#define RGS_SSUSB_TX_IMPCAL_DONE_OFST (1) ++#define RGS_SSUSB_RXDETECTED_OFST (0) ++ ++//U3D_PHYD_MON6 ++#define RGS_SSUSB_SIGCAL_DONE_OFST (30) ++#define RGS_SSUSB_SIGCAL_CAL_OUT_OFST (29) ++#define RGS_SSUSB_SIGCAL_OFFSET_OFST (24) ++#define RGS_SSUSB_RX_IMP_SEL_OFST (16) ++#define RGS_SSUSB_TX_IMP_SEL_OFST (8) ++#define RGS_SSUSB_TFIFO_MSG_OFST (4) ++#define RGS_SSUSB_RFIFO_MSG_OFST (0) ++ ++//U3D_PHYD_MON7 ++#define RGS_SSUSB_FT_OUT_OFST (8) ++#define RGS_SSUSB_PRB_OUT_OFST (0) ++ ++//U3D_PHYA_RX_MON0 ++#define RGS_SSUSB_EQ_DCLEQ_OFST (24) ++#define RGS_SSUSB_EQ_DCD0H_OFST (16) ++#define RGS_SSUSB_EQ_DCD0L_OFST (8) ++#define RGS_SSUSB_EQ_DCD1H_OFST (0) ++ ++//U3D_PHYA_RX_MON1 ++#define RGS_SSUSB_EQ_DCD1L_OFST (24) ++#define RGS_SSUSB_EQ_DCE0_OFST (16) ++#define RGS_SSUSB_EQ_DCE1_OFST (8) ++#define RGS_SSUSB_EQ_DCHHL_OFST (0) ++ ++//U3D_PHYA_RX_MON2 ++#define RGS_SSUSB_EQ_LEQ_STOP_OFST (31) ++#define RGS_SSUSB_EQ_DCLHL_OFST (24) ++#define RGS_SSUSB_EQ_STATUS_OFST (16) ++#define RGS_SSUSB_EQ_DCEYE0_OFST (8) ++#define RGS_SSUSB_EQ_DCEYE1_OFST (0) ++ ++//U3D_PHYA_RX_MON3 ++#define RGS_SSUSB_EQ_EYE_MONITOR_ERRCNT_0_OFST (0) ++ ++//U3D_PHYA_RX_MON4 ++#define RGS_SSUSB_EQ_EYE_MONITOR_ERRCNT_1_OFST (0) ++ ++//U3D_PHYA_RX_MON5 ++#define RGS_SSUSB_EQ_DCLEQOS_OFST (8) ++#define RGS_SSUSB_EQ_EYE_CNT_RDY_OFST (7) ++#define RGS_SSUSB_EQ_PILPO_OFST (0) ++ ++//U3D_PHYD_CPPAT2 ++#define RG_SSUSB_CPPAT_OUT_H_TMP2_OFST (16) ++#define RG_SSUSB_CPPAT_OUT_H_TMP1_OFST (8) ++#define RG_SSUSB_CPPAT_OUT_H_TMP0_OFST (0) ++ ++//U3D_EQ_EYE3 ++#define RG_SSUSB_EQ_LEQ_SHIFT_OFST (24) ++#define RG_SSUSB_EQ_EYE_CNT_OFST (0) ++ ++//U3D_KBAND_OUT ++#define RGS_SSUSB_CDR_BAND_5G_OFST (24) ++#define RGS_SSUSB_CDR_BAND_2P5G_OFST (16) ++#define RGS_SSUSB_PLL_BAND_5G_OFST (8) ++#define RGS_SSUSB_PLL_BAND_2P5G_OFST (0) ++ ++//U3D_KBAND_OUT1 ++#define RGS_SSUSB_CDR_VCOCAL_FAIL_OFST (24) ++#define RGS_SSUSB_CDR_VCOCAL_STATE_OFST (16) ++#define RGS_SSUSB_PLL_VCOCAL_FAIL_OFST (8) ++#define RGS_SSUSB_PLL_VCOCAL_STATE_OFST (0) ++ ++ ++/////////////////////////////////////////////////////////////////////////////// ++ ++struct u3phyd_bank2_reg { ++ //0x0 ++ PHY_LE32 b2_phyd_top1; ++ PHY_LE32 b2_phyd_top2; ++ PHY_LE32 b2_phyd_top3; ++ PHY_LE32 b2_phyd_top4; ++ //0x10 ++ PHY_LE32 b2_phyd_top5; ++ PHY_LE32 b2_phyd_top6; ++ PHY_LE32 b2_phyd_top7; ++ PHY_LE32 b2_phyd_p_sigdet1; ++ //0x20 ++ PHY_LE32 b2_phyd_p_sigdet2; ++ PHY_LE32 b2_phyd_p_sigdet_cal1; ++ PHY_LE32 b2_phyd_rxdet1; ++ PHY_LE32 b2_phyd_rxdet2; ++ //0x30 ++ PHY_LE32 b2_phyd_misc0; ++ PHY_LE32 b2_phyd_misc2; ++ PHY_LE32 b2_phyd_misc3; ++ PHY_LE32 reserve0; ++ //0x40 ++ PHY_LE32 b2_rosc_0; ++ PHY_LE32 b2_rosc_1; ++ PHY_LE32 b2_rosc_2; ++ PHY_LE32 b2_rosc_3; ++ //0x50 ++ PHY_LE32 b2_rosc_4; ++ PHY_LE32 b2_rosc_5; ++ PHY_LE32 b2_rosc_6; ++ PHY_LE32 b2_rosc_7; ++ //0x60 ++ PHY_LE32 b2_rosc_8; ++ PHY_LE32 b2_rosc_9; ++ PHY_LE32 b2_rosc_a; ++ PHY_LE32 reserve1; ++ //0x70~0xd0 ++ PHY_LE32 reserve2[28]; ++ //0xe0 ++ PHY_LE32 phyd_version; ++ PHY_LE32 phyd_model; ++}; ++ ++//U3D_B2_PHYD_TOP1 ++#define RG_SSUSB_PCIE2_K_EMP (0xf<<28) //31:28 ++#define RG_SSUSB_PCIE2_K_FUL (0xf<<24) //27:24 ++#define RG_SSUSB_TX_EIDLE_LP_EN (0x1<<17) //17:17 ++#define RG_SSUSB_FORCE_TX_EIDLE_LP_EN (0x1<<16) //16:16 ++#define RG_SSUSB_SIGDET_EN (0x1<<15) //15:15 ++#define RG_SSUSB_FORCE_SIGDET_EN (0x1<<14) //14:14 ++#define RG_SSUSB_CLKRX_EN (0x1<<13) //13:13 ++#define RG_SSUSB_FORCE_CLKRX_EN (0x1<<12) //12:12 ++#define RG_SSUSB_CLKTX_EN (0x1<<11) //11:11 ++#define RG_SSUSB_FORCE_CLKTX_EN (0x1<<10) //10:10 ++#define RG_SSUSB_CLK_REQ_N_I (0x1<<9) //9:9 ++#define RG_SSUSB_FORCE_CLK_REQ_N_I (0x1<<8) //8:8 ++#define RG_SSUSB_RATE (0x1<<6) //6:6 ++#define RG_SSUSB_FORCE_RATE (0x1<<5) //5:5 ++#define RG_SSUSB_PCIE_MODE_SEL (0x1<<4) //4:4 ++#define RG_SSUSB_FORCE_PCIE_MODE_SEL (0x1<<3) //3:3 ++#define RG_SSUSB_PHY_MODE (0x3<<1) //2:1 ++#define RG_SSUSB_FORCE_PHY_MODE (0x1<<0) //0:0 ++ ++//U3D_B2_PHYD_TOP2 ++#define RG_SSUSB_FORCE_IDRV_6DB (0x1<<30) //30:30 ++#define RG_SSUSB_IDRV_6DB (0x3f<<24) //29:24 ++#define RG_SSUSB_FORCE_IDEM_3P5DB (0x1<<22) //22:22 ++#define RG_SSUSB_IDEM_3P5DB (0x3f<<16) //21:16 ++#define RG_SSUSB_FORCE_IDRV_3P5DB (0x1<<14) //14:14 ++#define RG_SSUSB_IDRV_3P5DB (0x3f<<8) //13:8 ++#define RG_SSUSB_FORCE_IDRV_0DB (0x1<<6) //6:6 ++#define RG_SSUSB_IDRV_0DB (0x3f<<0) //5:0 ++ ++//U3D_B2_PHYD_TOP3 ++#define RG_SSUSB_TX_BIASI (0x7<<25) //27:25 ++#define RG_SSUSB_FORCE_TX_BIASI_EN (0x1<<24) //24:24 ++#define RG_SSUSB_TX_BIASI_EN (0x1<<16) //16:16 ++#define RG_SSUSB_FORCE_TX_BIASI (0x1<<13) //13:13 ++#define RG_SSUSB_FORCE_IDEM_6DB (0x1<<8) //8:8 ++#define RG_SSUSB_IDEM_6DB (0x3f<<0) //5:0 ++ ++//U3D_B2_PHYD_TOP4 ++#define RG_SSUSB_G1_CDR_BIC_LTR (0xf<<28) //31:28 ++#define RG_SSUSB_G1_CDR_BIC_LTD0 (0xf<<24) //27:24 ++#define RG_SSUSB_G1_CDR_BC_LTD1 (0x1f<<16) //20:16 ++#define RG_SSUSB_G1_CDR_BC_LTR (0x1f<<8) //12:8 ++#define RG_SSUSB_G1_CDR_BC_LTD0 (0x1f<<0) //4:0 ++ ++//U3D_B2_PHYD_TOP5 ++#define RG_SSUSB_G1_CDR_BIR_LTD1 (0x1f<<24) //28:24 ++#define RG_SSUSB_G1_CDR_BIR_LTR (0x1f<<16) //20:16 ++#define RG_SSUSB_G1_CDR_BIR_LTD0 (0x1f<<8) //12:8 ++#define RG_SSUSB_G1_CDR_BIC_LTD1 (0xf<<0) //3:0 ++ ++//U3D_B2_PHYD_TOP6 ++#define RG_SSUSB_G2_CDR_BIC_LTR (0xf<<28) //31:28 ++#define RG_SSUSB_G2_CDR_BIC_LTD0 (0xf<<24) //27:24 ++#define RG_SSUSB_G2_CDR_BC_LTD1 (0x1f<<16) //20:16 ++#define RG_SSUSB_G2_CDR_BC_LTR (0x1f<<8) //12:8 ++#define RG_SSUSB_G2_CDR_BC_LTD0 (0x1f<<0) //4:0 ++ ++//U3D_B2_PHYD_TOP7 ++#define RG_SSUSB_G2_CDR_BIR_LTD1 (0x1f<<24) //28:24 ++#define RG_SSUSB_G2_CDR_BIR_LTR (0x1f<<16) //20:16 ++#define RG_SSUSB_G2_CDR_BIR_LTD0 (0x1f<<8) //12:8 ++#define RG_SSUSB_G2_CDR_BIC_LTD1 (0xf<<0) //3:0 ++ ++//U3D_B2_PHYD_P_SIGDET1 ++#define RG_SSUSB_P_SIGDET_FLT_DIS (0x1<<31) //31:31 ++#define RG_SSUSB_P_SIGDET_FLT_G2_DEAST_SEL (0x7f<<24) //30:24 ++#define RG_SSUSB_P_SIGDET_FLT_G1_DEAST_SEL (0x7f<<16) //22:16 ++#define RG_SSUSB_P_SIGDET_FLT_P2_AST_SEL (0x7f<<8) //14:8 ++#define RG_SSUSB_P_SIGDET_FLT_PX_AST_SEL (0x7f<<0) //6:0 ++ ++//U3D_B2_PHYD_P_SIGDET2 ++#define RG_SSUSB_P_SIGDET_RX_VAL_S (0x1<<29) //29:29 ++#define RG_SSUSB_P_SIGDET_L0S_DEAS_SEL (0x1<<28) //28:28 ++#define RG_SSUSB_P_SIGDET_L0_EXIT_S (0x1<<27) //27:27 ++#define RG_SSUSB_P_SIGDET_L0S_EXIT_T_S (0x3<<25) //26:25 ++#define RG_SSUSB_P_SIGDET_L0S_EXIT_S (0x1<<24) //24:24 ++#define RG_SSUSB_P_SIGDET_L0S_ENTRY_S (0x1<<16) //16:16 ++#define RG_SSUSB_P_SIGDET_PRB_SEL (0x1<<10) //10:10 ++#define RG_SSUSB_P_SIGDET_BK_SIG_T (0x3<<8) //9:8 ++#define RG_SSUSB_P_SIGDET_P2_RXLFPS (0x1<<6) //6:6 ++#define RG_SSUSB_P_SIGDET_NON_BK_AD (0x1<<5) //5:5 ++#define RG_SSUSB_P_SIGDET_BK_B_RXEQ (0x1<<4) //4:4 ++#define RG_SSUSB_P_SIGDET_G2_KO_SEL (0x3<<2) //3:2 ++#define RG_SSUSB_P_SIGDET_G1_KO_SEL (0x3<<0) //1:0 ++ ++//U3D_B2_PHYD_P_SIGDET_CAL1 ++#define RG_SSUSB_P_SIGDET_CAL_OFFSET (0x1f<<24) //28:24 ++#define RG_SSUSB_P_FORCE_SIGDET_CAL_OFFSET (0x1<<16) //16:16 ++#define RG_SSUSB_P_SIGDET_CAL_EN (0x1<<8) //8:8 ++#define RG_SSUSB_P_FORCE_SIGDET_CAL_EN (0x1<<3) //3:3 ++#define RG_SSUSB_P_SIGDET_FLT_EN (0x1<<2) //2:2 ++#define RG_SSUSB_P_SIGDET_SAMPLE_PRD (0x1<<1) //1:1 ++#define RG_SSUSB_P_SIGDET_REK (0x1<<0) //0:0 ++ ++//U3D_B2_PHYD_RXDET1 ++#define RG_SSUSB_RXDET_PRB_SEL (0x1<<31) //31:31 ++#define RG_SSUSB_FORCE_CMDET (0x1<<30) //30:30 ++#define RG_SSUSB_RXDET_EN (0x1<<29) //29:29 ++#define RG_SSUSB_FORCE_RXDET_EN (0x1<<28) //28:28 ++#define RG_SSUSB_RXDET_K_TWICE (0x1<<27) //27:27 ++#define RG_SSUSB_RXDET_STB3_SET (0x1ff<<18) //26:18 ++#define RG_SSUSB_RXDET_STB2_SET (0x1ff<<9) //17:9 ++#define RG_SSUSB_RXDET_STB1_SET (0x1ff<<0) //8:0 ++ ++//U3D_B2_PHYD_RXDET2 ++#define RG_SSUSB_PHYD_TRAINDEC_FORCE_CGEN (0x1<<31) //31:31 ++#define RG_SSUSB_PHYD_BERTLB_FORCE_CGEN (0x1<<30) //30:30 ++#define RG_SSUSB_PHYD_T2RLB_FORCE_CGEN (0x1<<29) //29:29 ++#define RG_SSUSB_PDN_T_SEL (0x3<<18) //19:18 ++#define RG_SSUSB_RXDET_STB3_SET_P3 (0x1ff<<9) //17:9 ++#define RG_SSUSB_RXDET_STB2_SET_P3 (0x1ff<<0) //8:0 ++ ++//U3D_B2_PHYD_MISC0 ++#define RG_SSUSB_FORCE_PLL_DDS_HF_EN (0x1<<22) //22:22 ++#define RG_SSUSB_PLL_DDS_HF_EN_MAN (0x1<<21) //21:21 ++#define RG_SSUSB_RXLFPS_ENTXDRV (0x1<<20) //20:20 ++#define RG_SSUSB_RX_FL_UNLOCKTH (0xf<<16) //19:16 ++#define RG_SSUSB_LFPS_PSEL (0x1<<15) //15:15 ++#define RG_SSUSB_RX_SIGDET_EN (0x1<<14) //14:14 ++#define RG_SSUSB_RX_SIGDET_EN_SEL (0x1<<13) //13:13 ++#define RG_SSUSB_RX_PI_CAL_EN (0x1<<12) //12:12 ++#define RG_SSUSB_RX_PI_CAL_EN_SEL (0x1<<11) //11:11 ++#define RG_SSUSB_P3_CLS_CK_SEL (0x1<<10) //10:10 ++#define RG_SSUSB_T2RLB_PSEL (0x3<<8) //9:8 ++#define RG_SSUSB_PPCTL_PSEL (0x7<<5) //7:5 ++#define RG_SSUSB_PHYD_TX_DATA_INV (0x1<<4) //4:4 ++#define RG_SSUSB_BERTLB_PSEL (0x3<<2) //3:2 ++#define RG_SSUSB_RETRACK_DIS (0x1<<1) //1:1 ++#define RG_SSUSB_PPERRCNT_CLR (0x1<<0) //0:0 ++ ++//U3D_B2_PHYD_MISC2 ++#define RG_SSUSB_FRC_PLL_DDS_PREDIV2 (0x1<<31) //31:31 ++#define RG_SSUSB_FRC_PLL_DDS_IADJ (0xf<<27) //30:27 ++#define RG_SSUSB_P_SIGDET_125FILTER (0x1<<26) //26:26 ++#define RG_SSUSB_P_SIGDET_RST_FILTER (0x1<<25) //25:25 ++#define RG_SSUSB_P_SIGDET_EID_USE_RAW (0x1<<24) //24:24 ++#define RG_SSUSB_P_SIGDET_LTD_USE_RAW (0x1<<23) //23:23 ++#define RG_SSUSB_EIDLE_BF_RXDET (0x1<<22) //22:22 ++#define RG_SSUSB_EIDLE_LP_STBCYC (0x1ff<<13) //21:13 ++#define RG_SSUSB_TX_EIDLE_LP_POSTDLY (0x3f<<7) //12:7 ++#define RG_SSUSB_TX_EIDLE_LP_PREDLY (0x3f<<1) //6:1 ++#define RG_SSUSB_TX_EIDLE_LP_EN_ADV (0x1<<0) //0:0 ++ ++//U3D_B2_PHYD_MISC3 ++#define RGS_SSUSB_DDS_CALIB_C_STATE (0x7<<16) //18:16 ++#define RGS_SSUSB_PPERRCNT (0xffff<<0) //15:0 ++ ++//U3D_B2_ROSC_0 ++#define RG_SSUSB_RING_OSC_CNTEND (0x1ff<<23) //31:23 ++#define RG_SSUSB_XTAL_OSC_CNTEND (0x7f<<16) //22:16 ++#define RG_SSUSB_RING_OSC_EN (0x1<<3) //3:3 ++#define RG_SSUSB_RING_OSC_FORCE_EN (0x1<<2) //2:2 ++#define RG_SSUSB_FRC_RING_BYPASS_DET (0x1<<1) //1:1 ++#define RG_SSUSB_RING_BYPASS_DET (0x1<<0) //0:0 ++ ++//U3D_B2_ROSC_1 ++#define RG_SSUSB_RING_OSC_FRC_P3 (0x1<<20) //20:20 ++#define RG_SSUSB_RING_OSC_P3 (0x1<<19) //19:19 ++#define RG_SSUSB_RING_OSC_FRC_RECAL (0x3<<17) //18:17 ++#define RG_SSUSB_RING_OSC_RECAL (0x1<<16) //16:16 ++#define RG_SSUSB_RING_OSC_SEL (0xff<<8) //15:8 ++#define RG_SSUSB_RING_OSC_FRC_SEL (0x1<<0) //0:0 ++ ++//U3D_B2_ROSC_2 ++#define RG_SSUSB_RING_DET_STRCYC2 (0xffff<<16) //31:16 ++#define RG_SSUSB_RING_DET_STRCYC1 (0xffff<<0) //15:0 ++ ++//U3D_B2_ROSC_3 ++#define RG_SSUSB_RING_DET_DETWIN1 (0xffff<<16) //31:16 ++#define RG_SSUSB_RING_DET_STRCYC3 (0xffff<<0) //15:0 ++ ++//U3D_B2_ROSC_4 ++#define RG_SSUSB_RING_DET_DETWIN3 (0xffff<<16) //31:16 ++#define RG_SSUSB_RING_DET_DETWIN2 (0xffff<<0) //15:0 ++ ++//U3D_B2_ROSC_5 ++#define RG_SSUSB_RING_DET_LBOND1 (0xffff<<16) //31:16 ++#define RG_SSUSB_RING_DET_UBOND1 (0xffff<<0) //15:0 ++ ++//U3D_B2_ROSC_6 ++#define RG_SSUSB_RING_DET_LBOND2 (0xffff<<16) //31:16 ++#define RG_SSUSB_RING_DET_UBOND2 (0xffff<<0) //15:0 ++ ++//U3D_B2_ROSC_7 ++#define RG_SSUSB_RING_DET_LBOND3 (0xffff<<16) //31:16 ++#define RG_SSUSB_RING_DET_UBOND3 (0xffff<<0) //15:0 ++ ++//U3D_B2_ROSC_8 ++#define RG_SSUSB_RING_RESERVE (0xffff<<16) //31:16 ++#define RG_SSUSB_ROSC_PROB_SEL (0xf<<2) //5:2 ++#define RG_SSUSB_RING_FREQMETER_EN (0x1<<1) //1:1 ++#define RG_SSUSB_RING_DET_BPS_UBOND (0x1<<0) //0:0 ++ ++//U3D_B2_ROSC_9 ++#define RGS_FM_RING_CNT (0xffff<<16) //31:16 ++#define RGS_SSUSB_RING_OSC_STATE (0x3<<10) //11:10 ++#define RGS_SSUSB_RING_OSC_STABLE (0x1<<9) //9:9 ++#define RGS_SSUSB_RING_OSC_CAL_FAIL (0x1<<8) //8:8 ++#define RGS_SSUSB_RING_OSC_CAL (0xff<<0) //7:0 ++ ++//U3D_B2_ROSC_A ++#define RGS_SSUSB_ROSC_PROB_OUT (0xff<<0) //7:0 ++ ++//U3D_PHYD_VERSION ++#define RGS_SSUSB_PHYD_VERSION (0xffffffff<<0) //31:0 ++ ++//U3D_PHYD_MODEL ++#define RGS_SSUSB_PHYD_MODEL (0xffffffff<<0) //31:0 ++ ++ ++/* OFFSET */ ++ ++//U3D_B2_PHYD_TOP1 ++#define RG_SSUSB_PCIE2_K_EMP_OFST (28) ++#define RG_SSUSB_PCIE2_K_FUL_OFST (24) ++#define RG_SSUSB_TX_EIDLE_LP_EN_OFST (17) ++#define RG_SSUSB_FORCE_TX_EIDLE_LP_EN_OFST (16) ++#define RG_SSUSB_SIGDET_EN_OFST (15) ++#define RG_SSUSB_FORCE_SIGDET_EN_OFST (14) ++#define RG_SSUSB_CLKRX_EN_OFST (13) ++#define RG_SSUSB_FORCE_CLKRX_EN_OFST (12) ++#define RG_SSUSB_CLKTX_EN_OFST (11) ++#define RG_SSUSB_FORCE_CLKTX_EN_OFST (10) ++#define RG_SSUSB_CLK_REQ_N_I_OFST (9) ++#define RG_SSUSB_FORCE_CLK_REQ_N_I_OFST (8) ++#define RG_SSUSB_RATE_OFST (6) ++#define RG_SSUSB_FORCE_RATE_OFST (5) ++#define RG_SSUSB_PCIE_MODE_SEL_OFST (4) ++#define RG_SSUSB_FORCE_PCIE_MODE_SEL_OFST (3) ++#define RG_SSUSB_PHY_MODE_OFST (1) ++#define RG_SSUSB_FORCE_PHY_MODE_OFST (0) ++ ++//U3D_B2_PHYD_TOP2 ++#define RG_SSUSB_FORCE_IDRV_6DB_OFST (30) ++#define RG_SSUSB_IDRV_6DB_OFST (24) ++#define RG_SSUSB_FORCE_IDEM_3P5DB_OFST (22) ++#define RG_SSUSB_IDEM_3P5DB_OFST (16) ++#define RG_SSUSB_FORCE_IDRV_3P5DB_OFST (14) ++#define RG_SSUSB_IDRV_3P5DB_OFST (8) ++#define RG_SSUSB_FORCE_IDRV_0DB_OFST (6) ++#define RG_SSUSB_IDRV_0DB_OFST (0) ++ ++//U3D_B2_PHYD_TOP3 ++#define RG_SSUSB_TX_BIASI_OFST (25) ++#define RG_SSUSB_FORCE_TX_BIASI_EN_OFST (24) ++#define RG_SSUSB_TX_BIASI_EN_OFST (16) ++#define RG_SSUSB_FORCE_TX_BIASI_OFST (13) ++#define RG_SSUSB_FORCE_IDEM_6DB_OFST (8) ++#define RG_SSUSB_IDEM_6DB_OFST (0) ++ ++//U3D_B2_PHYD_TOP4 ++#define RG_SSUSB_G1_CDR_BIC_LTR_OFST (28) ++#define RG_SSUSB_G1_CDR_BIC_LTD0_OFST (24) ++#define RG_SSUSB_G1_CDR_BC_LTD1_OFST (16) ++#define RG_SSUSB_G1_CDR_BC_LTR_OFST (8) ++#define RG_SSUSB_G1_CDR_BC_LTD0_OFST (0) ++ ++//U3D_B2_PHYD_TOP5 ++#define RG_SSUSB_G1_CDR_BIR_LTD1_OFST (24) ++#define RG_SSUSB_G1_CDR_BIR_LTR_OFST (16) ++#define RG_SSUSB_G1_CDR_BIR_LTD0_OFST (8) ++#define RG_SSUSB_G1_CDR_BIC_LTD1_OFST (0) ++ ++//U3D_B2_PHYD_TOP6 ++#define RG_SSUSB_G2_CDR_BIC_LTR_OFST (28) ++#define RG_SSUSB_G2_CDR_BIC_LTD0_OFST (24) ++#define RG_SSUSB_G2_CDR_BC_LTD1_OFST (16) ++#define RG_SSUSB_G2_CDR_BC_LTR_OFST (8) ++#define RG_SSUSB_G2_CDR_BC_LTD0_OFST (0) ++ ++//U3D_B2_PHYD_TOP7 ++#define RG_SSUSB_G2_CDR_BIR_LTD1_OFST (24) ++#define RG_SSUSB_G2_CDR_BIR_LTR_OFST (16) ++#define RG_SSUSB_G2_CDR_BIR_LTD0_OFST (8) ++#define RG_SSUSB_G2_CDR_BIC_LTD1_OFST (0) ++ ++//U3D_B2_PHYD_P_SIGDET1 ++#define RG_SSUSB_P_SIGDET_FLT_DIS_OFST (31) ++#define RG_SSUSB_P_SIGDET_FLT_G2_DEAST_SEL_OFST (24) ++#define RG_SSUSB_P_SIGDET_FLT_G1_DEAST_SEL_OFST (16) ++#define RG_SSUSB_P_SIGDET_FLT_P2_AST_SEL_OFST (8) ++#define RG_SSUSB_P_SIGDET_FLT_PX_AST_SEL_OFST (0) ++ ++//U3D_B2_PHYD_P_SIGDET2 ++#define RG_SSUSB_P_SIGDET_RX_VAL_S_OFST (29) ++#define RG_SSUSB_P_SIGDET_L0S_DEAS_SEL_OFST (28) ++#define RG_SSUSB_P_SIGDET_L0_EXIT_S_OFST (27) ++#define RG_SSUSB_P_SIGDET_L0S_EXIT_T_S_OFST (25) ++#define RG_SSUSB_P_SIGDET_L0S_EXIT_S_OFST (24) ++#define RG_SSUSB_P_SIGDET_L0S_ENTRY_S_OFST (16) ++#define RG_SSUSB_P_SIGDET_PRB_SEL_OFST (10) ++#define RG_SSUSB_P_SIGDET_BK_SIG_T_OFST (8) ++#define RG_SSUSB_P_SIGDET_P2_RXLFPS_OFST (6) ++#define RG_SSUSB_P_SIGDET_NON_BK_AD_OFST (5) ++#define RG_SSUSB_P_SIGDET_BK_B_RXEQ_OFST (4) ++#define RG_SSUSB_P_SIGDET_G2_KO_SEL_OFST (2) ++#define RG_SSUSB_P_SIGDET_G1_KO_SEL_OFST (0) ++ ++//U3D_B2_PHYD_P_SIGDET_CAL1 ++#define RG_SSUSB_P_SIGDET_CAL_OFFSET_OFST (24) ++#define RG_SSUSB_P_FORCE_SIGDET_CAL_OFFSET_OFST (16) ++#define RG_SSUSB_P_SIGDET_CAL_EN_OFST (8) ++#define RG_SSUSB_P_FORCE_SIGDET_CAL_EN_OFST (3) ++#define RG_SSUSB_P_SIGDET_FLT_EN_OFST (2) ++#define RG_SSUSB_P_SIGDET_SAMPLE_PRD_OFST (1) ++#define RG_SSUSB_P_SIGDET_REK_OFST (0) ++ ++//U3D_B2_PHYD_RXDET1 ++#define RG_SSUSB_RXDET_PRB_SEL_OFST (31) ++#define RG_SSUSB_FORCE_CMDET_OFST (30) ++#define RG_SSUSB_RXDET_EN_OFST (29) ++#define RG_SSUSB_FORCE_RXDET_EN_OFST (28) ++#define RG_SSUSB_RXDET_K_TWICE_OFST (27) ++#define RG_SSUSB_RXDET_STB3_SET_OFST (18) ++#define RG_SSUSB_RXDET_STB2_SET_OFST (9) ++#define RG_SSUSB_RXDET_STB1_SET_OFST (0) ++ ++//U3D_B2_PHYD_RXDET2 ++#define RG_SSUSB_PHYD_TRAINDEC_FORCE_CGEN_OFST (31) ++#define RG_SSUSB_PHYD_BERTLB_FORCE_CGEN_OFST (30) ++#define RG_SSUSB_PHYD_T2RLB_FORCE_CGEN_OFST (29) ++#define RG_SSUSB_PDN_T_SEL_OFST (18) ++#define RG_SSUSB_RXDET_STB3_SET_P3_OFST (9) ++#define RG_SSUSB_RXDET_STB2_SET_P3_OFST (0) ++ ++//U3D_B2_PHYD_MISC0 ++#define RG_SSUSB_FORCE_PLL_DDS_HF_EN_OFST (22) ++#define RG_SSUSB_PLL_DDS_HF_EN_MAN_OFST (21) ++#define RG_SSUSB_RXLFPS_ENTXDRV_OFST (20) ++#define RG_SSUSB_RX_FL_UNLOCKTH_OFST (16) ++#define RG_SSUSB_LFPS_PSEL_OFST (15) ++#define RG_SSUSB_RX_SIGDET_EN_OFST (14) ++#define RG_SSUSB_RX_SIGDET_EN_SEL_OFST (13) ++#define RG_SSUSB_RX_PI_CAL_EN_OFST (12) ++#define RG_SSUSB_RX_PI_CAL_EN_SEL_OFST (11) ++#define RG_SSUSB_P3_CLS_CK_SEL_OFST (10) ++#define RG_SSUSB_T2RLB_PSEL_OFST (8) ++#define RG_SSUSB_PPCTL_PSEL_OFST (5) ++#define RG_SSUSB_PHYD_TX_DATA_INV_OFST (4) ++#define RG_SSUSB_BERTLB_PSEL_OFST (2) ++#define RG_SSUSB_RETRACK_DIS_OFST (1) ++#define RG_SSUSB_PPERRCNT_CLR_OFST (0) ++ ++//U3D_B2_PHYD_MISC2 ++#define RG_SSUSB_FRC_PLL_DDS_PREDIV2_OFST (31) ++#define RG_SSUSB_FRC_PLL_DDS_IADJ_OFST (27) ++#define RG_SSUSB_P_SIGDET_125FILTER_OFST (26) ++#define RG_SSUSB_P_SIGDET_RST_FILTER_OFST (25) ++#define RG_SSUSB_P_SIGDET_EID_USE_RAW_OFST (24) ++#define RG_SSUSB_P_SIGDET_LTD_USE_RAW_OFST (23) ++#define RG_SSUSB_EIDLE_BF_RXDET_OFST (22) ++#define RG_SSUSB_EIDLE_LP_STBCYC_OFST (13) ++#define RG_SSUSB_TX_EIDLE_LP_POSTDLY_OFST (7) ++#define RG_SSUSB_TX_EIDLE_LP_PREDLY_OFST (1) ++#define RG_SSUSB_TX_EIDLE_LP_EN_ADV_OFST (0) ++ ++//U3D_B2_PHYD_MISC3 ++#define RGS_SSUSB_DDS_CALIB_C_STATE_OFST (16) ++#define RGS_SSUSB_PPERRCNT_OFST (0) ++ ++//U3D_B2_ROSC_0 ++#define RG_SSUSB_RING_OSC_CNTEND_OFST (23) ++#define RG_SSUSB_XTAL_OSC_CNTEND_OFST (16) ++#define RG_SSUSB_RING_OSC_EN_OFST (3) ++#define RG_SSUSB_RING_OSC_FORCE_EN_OFST (2) ++#define RG_SSUSB_FRC_RING_BYPASS_DET_OFST (1) ++#define RG_SSUSB_RING_BYPASS_DET_OFST (0) ++ ++//U3D_B2_ROSC_1 ++#define RG_SSUSB_RING_OSC_FRC_P3_OFST (20) ++#define RG_SSUSB_RING_OSC_P3_OFST (19) ++#define RG_SSUSB_RING_OSC_FRC_RECAL_OFST (17) ++#define RG_SSUSB_RING_OSC_RECAL_OFST (16) ++#define RG_SSUSB_RING_OSC_SEL_OFST (8) ++#define RG_SSUSB_RING_OSC_FRC_SEL_OFST (0) ++ ++//U3D_B2_ROSC_2 ++#define RG_SSUSB_RING_DET_STRCYC2_OFST (16) ++#define RG_SSUSB_RING_DET_STRCYC1_OFST (0) ++ ++//U3D_B2_ROSC_3 ++#define RG_SSUSB_RING_DET_DETWIN1_OFST (16) ++#define RG_SSUSB_RING_DET_STRCYC3_OFST (0) ++ ++//U3D_B2_ROSC_4 ++#define RG_SSUSB_RING_DET_DETWIN3_OFST (16) ++#define RG_SSUSB_RING_DET_DETWIN2_OFST (0) ++ ++//U3D_B2_ROSC_5 ++#define RG_SSUSB_RING_DET_LBOND1_OFST (16) ++#define RG_SSUSB_RING_DET_UBOND1_OFST (0) ++ ++//U3D_B2_ROSC_6 ++#define RG_SSUSB_RING_DET_LBOND2_OFST (16) ++#define RG_SSUSB_RING_DET_UBOND2_OFST (0) ++ ++//U3D_B2_ROSC_7 ++#define RG_SSUSB_RING_DET_LBOND3_OFST (16) ++#define RG_SSUSB_RING_DET_UBOND3_OFST (0) ++ ++//U3D_B2_ROSC_8 ++#define RG_SSUSB_RING_RESERVE_OFST (16) ++#define RG_SSUSB_ROSC_PROB_SEL_OFST (2) ++#define RG_SSUSB_RING_FREQMETER_EN_OFST (1) ++#define RG_SSUSB_RING_DET_BPS_UBOND_OFST (0) ++ ++//U3D_B2_ROSC_9 ++#define RGS_FM_RING_CNT_OFST (16) ++#define RGS_SSUSB_RING_OSC_STATE_OFST (10) ++#define RGS_SSUSB_RING_OSC_STABLE_OFST (9) ++#define RGS_SSUSB_RING_OSC_CAL_FAIL_OFST (8) ++#define RGS_SSUSB_RING_OSC_CAL_OFST (0) ++ ++//U3D_B2_ROSC_A ++#define RGS_SSUSB_ROSC_PROB_OUT_OFST (0) ++ ++//U3D_PHYD_VERSION ++#define RGS_SSUSB_PHYD_VERSION_OFST (0) ++ ++//U3D_PHYD_MODEL ++#define RGS_SSUSB_PHYD_MODEL_OFST (0) ++ ++ ++/////////////////////////////////////////////////////////////////////////////// ++ ++struct sifslv_chip_reg { ++ PHY_LE32 xtalbias; ++ PHY_LE32 syspll1; ++ PHY_LE32 gpio_ctla; ++ PHY_LE32 gpio_ctlb; ++ PHY_LE32 gpio_ctlc; ++}; ++ ++//U3D_GPIO_CTLA ++#define RG_C60802_GPIO_CTLA (0xffffffff<<0) //31:0 ++ ++//U3D_GPIO_CTLB ++#define RG_C60802_GPIO_CTLB (0xffffffff<<0) //31:0 ++ ++//U3D_GPIO_CTLC ++#define RG_C60802_GPIO_CTLC (0xffffffff<<0) //31:0 ++ ++/* OFFSET */ ++ ++//U3D_GPIO_CTLA ++#define RG_C60802_GPIO_CTLA_OFST (0) ++ ++//U3D_GPIO_CTLB ++#define RG_C60802_GPIO_CTLB_OFST (0) ++ ++//U3D_GPIO_CTLC ++#define RG_C60802_GPIO_CTLC_OFST (0) ++ ++/////////////////////////////////////////////////////////////////////////////// ++ ++struct sifslv_fm_feg { ++ //0x0 ++ PHY_LE32 fmcr0; ++ PHY_LE32 fmcr1; ++ PHY_LE32 fmcr2; ++ PHY_LE32 fmmonr0; ++ //0x10 ++ PHY_LE32 fmmonr1; ++}; ++ ++//U3D_FMCR0 ++#define RG_LOCKTH (0xf<<28) //31:28 ++#define RG_MONCLK_SEL (0x3<<26) //27:26 ++#define RG_FM_MODE (0x1<<25) //25:25 ++#define RG_FREQDET_EN (0x1<<24) //24:24 ++#define RG_CYCLECNT (0xffffff<<0) //23:0 ++ ++//U3D_FMCR1 ++#define RG_TARGET (0xffffffff<<0) //31:0 ++ ++//U3D_FMCR2 ++#define RG_OFFSET (0xffffffff<<0) //31:0 ++ ++//U3D_FMMONR0 ++#define USB_FM_OUT (0xffffffff<<0) //31:0 ++ ++//U3D_FMMONR1 ++#define RG_MONCLK_SEL_3 (0x1<<9) //9:9 ++#define RG_FRCK_EN (0x1<<8) //8:8 ++#define USBPLL_LOCK (0x1<<1) //1:1 ++#define USB_FM_VLD (0x1<<0) //0:0 ++ ++ ++/* OFFSET */ ++ ++//U3D_FMCR0 ++#define RG_LOCKTH_OFST (28) ++#define RG_MONCLK_SEL_OFST (26) ++#define RG_FM_MODE_OFST (25) ++#define RG_FREQDET_EN_OFST (24) ++#define RG_CYCLECNT_OFST (0) ++ ++//U3D_FMCR1 ++#define RG_TARGET_OFST (0) ++ ++//U3D_FMCR2 ++#define RG_OFFSET_OFST (0) ++ ++//U3D_FMMONR0 ++#define USB_FM_OUT_OFST (0) ++ ++//U3D_FMMONR1 ++#define RG_MONCLK_SEL_3_OFST (9) ++#define RG_FRCK_EN_OFST (8) ++#define USBPLL_LOCK_OFST (1) ++#define USB_FM_VLD_OFST (0) ++ ++ ++/////////////////////////////////////////////////////////////////////////////// ++ ++PHY_INT32 phy_init(struct u3phy_info *info); ++PHY_INT32 phy_change_pipe_phase(struct u3phy_info *info, PHY_INT32 phy_drv, PHY_INT32 pipe_phase); ++PHY_INT32 eyescan_init(struct u3phy_info *info); ++PHY_INT32 phy_eyescan(struct u3phy_info *info, PHY_INT32 x_t1, PHY_INT32 y_t1, PHY_INT32 x_br, PHY_INT32 y_br, PHY_INT32 delta_x, PHY_INT32 delta_y ++ , PHY_INT32 eye_cnt, PHY_INT32 num_cnt, PHY_INT32 PI_cal_en, PHY_INT32 num_ignore_cnt); ++PHY_INT32 u2_save_cur_en(struct u3phy_info *info); ++PHY_INT32 u2_save_cur_re(struct u3phy_info *info); ++PHY_INT32 u2_slew_rate_calibration(struct u3phy_info *info); ++ ++#endif ++#endif +--- /dev/null ++++ b/drivers/usb/host/mtk-phy-ahb.c +@@ -0,0 +1,58 @@ ++#include "mtk-phy.h" ++#ifdef CONFIG_U3D_HAL_SUPPORT ++#include "mu3d_hal_osal.h" ++#endif ++ ++#ifdef CONFIG_U3_PHY_AHB_SUPPORT ++#include ++#include ++#include ++ ++#ifndef CONFIG_U3D_HAL_SUPPORT ++#define os_writel(addr,data) {\ ++ (*((volatile PHY_UINT32*)(addr)) = data);\ ++ } ++#define os_readl(addr) *((volatile PHY_UINT32*)(addr)) ++#define os_writelmsk(addr, data, msk) \ ++ { os_writel(addr, ((os_readl(addr) & ~(msk)) | ((data) & (msk)))); \ ++ } ++#define os_setmsk(addr, msk) \ ++ { os_writel(addr, os_readl(addr) | msk); \ ++ } ++#define os_clrmsk(addr, msk) \ ++ { os_writel(addr, os_readl(addr) &~ msk); \ ++ } ++/*msk the data first, then umsk with the umsk.*/ ++#define os_writelmskumsk(addr, data, msk, umsk) \ ++{\ ++ os_writel(addr, ((os_readl(addr) & ~(msk)) | ((data) & (msk))) & (umsk));\ ++} ++ ++#endif ++ ++PHY_INT32 U3PhyWriteReg32(PHY_UINT32 addr, PHY_UINT32 data) ++{ ++ os_writel(addr, data); ++ ++ return 0; ++} ++ ++PHY_INT32 U3PhyReadReg32(PHY_UINT32 addr) ++{ ++ return os_readl(addr); ++} ++ ++PHY_INT32 U3PhyWriteReg8(PHY_UINT32 addr, PHY_UINT8 data) ++{ ++ os_writelmsk(addr&0xfffffffc, data<<((addr%4)*8), 0xff<<((addr%4)*8)); ++ ++ return 0; ++} ++ ++PHY_INT8 U3PhyReadReg8(PHY_UINT32 addr) ++{ ++ return ((os_readl(addr)>>((addr%4)*8))&0xff); ++} ++ ++#endif ++ +--- /dev/null ++++ b/drivers/usb/host/mtk-phy.c +@@ -0,0 +1,102 @@ ++#include ++#include ++#include ++#define U3_PHY_LIB ++#include "mtk-phy.h" ++#ifdef CONFIG_PROJECT_7621 ++#include "mtk-phy-7621.h" ++#endif ++#ifdef CONFIG_PROJECT_PHY ++static struct u3phy_operator project_operators = { ++ .init = phy_init, ++ .change_pipe_phase = phy_change_pipe_phase, ++ .eyescan_init = eyescan_init, ++ .eyescan = phy_eyescan, ++ .u2_slew_rate_calibration = u2_slew_rate_calibration, ++}; ++#endif ++ ++ ++PHY_INT32 u3phy_init(){ ++#ifndef CONFIG_PROJECT_PHY ++ PHY_INT32 u3phy_version; ++#endif ++ ++ if(u3phy != NULL){ ++ return PHY_TRUE; ++ } ++ ++ u3phy = kmalloc(sizeof(struct u3phy_info), GFP_NOIO); ++#if defined (CONFIG_USB_MT7621_XHCI_PLATFORM) ++ u3phy_p1 = kmalloc(sizeof(struct u3phy_info), GFP_NOIO); ++#endif ++#ifdef CONFIG_U3_PHY_GPIO_SUPPORT ++ u3phy->phyd_version_addr = 0x2000e4; ++#if defined (CONFIG_USB_MT7621_XHCI_PLATFORM) ++ u3phy_p1->phyd_version_addr = 0x2000e4; ++#endif ++#else ++ u3phy->phyd_version_addr = U3_PHYD_B2_BASE + 0xe4; ++#if defined (CONFIG_USB_MT7621_XHCI_PLATFORM) ++ u3phy_p1->phyd_version_addr = U3_PHYD_B2_BASE_P1 + 0xe4; ++#endif ++#endif ++ ++#ifdef CONFIG_PROJECT_PHY ++ ++ u3phy->u2phy_regs = (struct u2phy_reg *)U2_PHY_BASE; ++ u3phy->u3phyd_regs = (struct u3phyd_reg *)U3_PHYD_BASE; ++ u3phy->u3phyd_bank2_regs = (struct u3phyd_bank2_reg *)U3_PHYD_B2_BASE; ++ u3phy->u3phya_regs = (struct u3phya_reg *)U3_PHYA_BASE; ++ u3phy->u3phya_da_regs = (struct u3phya_da_reg *)U3_PHYA_DA_BASE; ++ u3phy->sifslv_chip_regs = (struct sifslv_chip_reg *)SIFSLV_CHIP_BASE; ++ u3phy->sifslv_fm_regs = (struct sifslv_fm_feg *)SIFSLV_FM_FEG_BASE; ++ u3phy_ops = &project_operators; ++ ++#if defined (CONFIG_USB_MT7621_XHCI_PLATFORM) ++ u3phy_p1->u2phy_regs = (struct u2phy_reg *)U2_PHY_BASE_P1; ++ u3phy_p1->u3phyd_regs = (struct u3phyd_reg *)U3_PHYD_BASE_P1; ++ u3phy_p1->u3phyd_bank2_regs = (struct u3phyd_bank2_reg *)U3_PHYD_B2_BASE_P1; ++ u3phy_p1->u3phya_regs = (struct u3phya_reg *)U3_PHYA_BASE_P1; ++ u3phy_p1->u3phya_da_regs = (struct u3phya_da_reg *)U3_PHYA_DA_BASE_P1; ++ u3phy_p1->sifslv_chip_regs = (struct sifslv_chip_reg *)SIFSLV_CHIP_BASE; ++ u3phy_p1->sifslv_fm_regs = (struct sifslv_fm_feg *)SIFSLV_FM_FEG_BASE; ++#endif ++#endif ++ ++ return PHY_TRUE; ++} ++ ++PHY_INT32 U3PhyWriteField8(PHY_INT32 addr, PHY_INT32 offset, PHY_INT32 mask, PHY_INT32 value){ ++ PHY_INT8 cur_value; ++ PHY_INT8 new_value; ++ ++ cur_value = U3PhyReadReg8(addr); ++ new_value = (cur_value & (~mask)) | (value << offset); ++ //udelay(i2cdelayus); ++ U3PhyWriteReg8(addr, new_value); ++ return PHY_TRUE; ++} ++ ++PHY_INT32 U3PhyWriteField32(PHY_INT32 addr, PHY_INT32 offset, PHY_INT32 mask, PHY_INT32 value){ ++ PHY_INT32 cur_value; ++ PHY_INT32 new_value; ++ ++ cur_value = U3PhyReadReg32(addr); ++ new_value = (cur_value & (~mask)) | ((value << offset) & mask); ++ U3PhyWriteReg32(addr, new_value); ++ //DRV_MDELAY(100); ++ ++ return PHY_TRUE; ++} ++ ++PHY_INT32 U3PhyReadField8(PHY_INT32 addr,PHY_INT32 offset,PHY_INT32 mask){ ++ ++ return ((U3PhyReadReg8(addr) & mask) >> offset); ++} ++ ++PHY_INT32 U3PhyReadField32(PHY_INT32 addr, PHY_INT32 offset, PHY_INT32 mask){ ++ ++ return ((U3PhyReadReg32(addr) & mask) >> offset); ++} ++ +--- /dev/null ++++ b/drivers/usb/host/mtk-phy.h +@@ -0,0 +1,179 @@ ++#ifndef __MTK_PHY_NEW_H ++#define __MTK_PHY_NEW_H ++ ++//#define CONFIG_U3D_HAL_SUPPORT ++ ++/* include system library */ ++#include ++#include ++#include ++#include ++ ++/* Choose PHY R/W implementation */ ++//#define CONFIG_U3_PHY_GPIO_SUPPORT //SW I2C implemented by GPIO ++#define CONFIG_U3_PHY_AHB_SUPPORT //AHB, only on SoC ++ ++/* Choose PHY version */ ++//Select your project by defining one of the followings ++#define CONFIG_PROJECT_7621 //7621 ++#define CONFIG_PROJECT_PHY ++ ++/* BASE ADDRESS DEFINE, should define this on ASIC */ ++#define PHY_BASE 0xBE1D0000 ++#define SIFSLV_FM_FEG_BASE (PHY_BASE+0x100) ++#define SIFSLV_CHIP_BASE (PHY_BASE+0x700) ++#define U2_PHY_BASE (PHY_BASE+0x800) ++#define U3_PHYD_BASE (PHY_BASE+0x900) ++#define U3_PHYD_B2_BASE (PHY_BASE+0xa00) ++#define U3_PHYA_BASE (PHY_BASE+0xb00) ++#define U3_PHYA_DA_BASE (PHY_BASE+0xc00) ++ ++#if defined (CONFIG_USB_MT7621_XHCI_PLATFORM) ++#define SIFSLV_FM_FEG_BASE_P1 (PHY_BASE+0x100) ++#define SIFSLV_CHIP_BASE_P1 (PHY_BASE+0x700) ++#define U2_PHY_BASE_P1 (PHY_BASE+0x1000) ++#define U3_PHYD_BASE_P1 (PHY_BASE+0x1100) ++#define U3_PHYD_B2_BASE_P1 (PHY_BASE+0x1200) ++#define U3_PHYA_BASE_P1 (PHY_BASE+0x1300) ++#define U3_PHYA_DA_BASE_P1 (PHY_BASE+0x1400) ++#endif ++ ++/* ++ ++0x00000100 MODULE ssusb_sifslv_fmreg ssusb_sifslv_fmreg ++0x00000700 MODULE ssusb_sifslv_ippc ssusb_sifslv_ippc ++0x00000800 MODULE ssusb_sifslv_u2phy_com ssusb_sifslv_u2_phy_com_T28 ++0x00000900 MODULE ssusb_sifslv_u3phyd ssusb_sifslv_u3phyd_T28 ++0x00000a00 MODULE ssusb_sifslv_u3phyd_bank2 ssusb_sifslv_u3phyd_bank2_T28 ++0x00000b00 MODULE ssusb_sifslv_u3phya ssusb_sifslv_u3phya_T28 ++0x00000c00 MODULE ssusb_sifslv_u3phya_da ssusb_sifslv_u3phya_da_T28 ++*/ ++ ++ ++/* TYPE DEFINE */ ++typedef unsigned int PHY_UINT32; ++typedef int PHY_INT32; ++typedef unsigned short PHY_UINT16; ++typedef short PHY_INT16; ++typedef unsigned char PHY_UINT8; ++typedef char PHY_INT8; ++ ++typedef PHY_UINT32 __bitwise PHY_LE32; ++ ++/* CONSTANT DEFINE */ ++#define PHY_FALSE 0 ++#define PHY_TRUE 1 ++ ++/* MACRO DEFINE */ ++#define DRV_WriteReg32(addr,data) ((*(volatile PHY_UINT32 *)(addr)) = (unsigned long)(data)) ++#define DRV_Reg32(addr) (*(volatile PHY_UINT32 *)(addr)) ++ ++#define DRV_MDELAY mdelay ++#define DRV_MSLEEP msleep ++#define DRV_UDELAY udelay ++#define DRV_USLEEP usleep ++ ++/* PHY FUNCTION DEFINE, implemented in platform files, ex. ahb, gpio */ ++PHY_INT32 U3PhyWriteReg32(PHY_UINT32 addr, PHY_UINT32 data); ++PHY_INT32 U3PhyReadReg32(PHY_UINT32 addr); ++PHY_INT32 U3PhyWriteReg8(PHY_UINT32 addr, PHY_UINT8 data); ++PHY_INT8 U3PhyReadReg8(PHY_UINT32 addr); ++ ++/* PHY GENERAL USAGE FUNC, implemented in mtk-phy.c */ ++PHY_INT32 U3PhyWriteField8(PHY_INT32 addr, PHY_INT32 offset, PHY_INT32 mask, PHY_INT32 value); ++PHY_INT32 U3PhyWriteField32(PHY_INT32 addr, PHY_INT32 offset, PHY_INT32 mask, PHY_INT32 value); ++PHY_INT32 U3PhyReadField8(PHY_INT32 addr, PHY_INT32 offset, PHY_INT32 mask); ++PHY_INT32 U3PhyReadField32(PHY_INT32 addr, PHY_INT32 offset, PHY_INT32 mask); ++ ++struct u3phy_info { ++ PHY_INT32 phy_version; ++ PHY_INT32 phyd_version_addr; ++ ++#ifdef CONFIG_PROJECT_PHY ++ struct u2phy_reg *u2phy_regs; ++ struct u3phya_reg *u3phya_regs; ++ struct u3phya_da_reg *u3phya_da_regs; ++ struct u3phyd_reg *u3phyd_regs; ++ struct u3phyd_bank2_reg *u3phyd_bank2_regs; ++ struct sifslv_chip_reg *sifslv_chip_regs; ++ struct sifslv_fm_feg *sifslv_fm_regs; ++#endif ++}; ++ ++struct u3phy_operator { ++ PHY_INT32 (*init) (struct u3phy_info *info); ++ PHY_INT32 (*change_pipe_phase) (struct u3phy_info *info, PHY_INT32 phy_drv, PHY_INT32 pipe_phase); ++ PHY_INT32 (*eyescan_init) (struct u3phy_info *info); ++ PHY_INT32 (*eyescan) (struct u3phy_info *info, PHY_INT32 x_t1, PHY_INT32 y_t1, PHY_INT32 x_br, PHY_INT32 y_br, PHY_INT32 delta_x, PHY_INT32 delta_y, PHY_INT32 eye_cnt, PHY_INT32 num_cnt, PHY_INT32 PI_cal_en, PHY_INT32 num_ignore_cnt); ++ PHY_INT32 (*u2_save_current_entry) (struct u3phy_info *info); ++ PHY_INT32 (*u2_save_current_recovery) (struct u3phy_info *info); ++ PHY_INT32 (*u2_slew_rate_calibration) (struct u3phy_info *info); ++}; ++ ++#ifdef U3_PHY_LIB ++#define AUTOEXT ++#else ++#define AUTOEXT extern ++#endif ++ ++AUTOEXT struct u3phy_info *u3phy; ++AUTOEXT struct u3phy_info *u3phy_p1; ++AUTOEXT struct u3phy_operator *u3phy_ops; ++ ++/*********eye scan required*********/ ++ ++#define LO_BYTE(x) ((PHY_UINT8)((x) & 0xFF)) ++#define HI_BYTE(x) ((PHY_UINT8)(((x) & 0xFF00) >> 8)) ++ ++typedef enum ++{ ++ SCAN_UP, ++ SCAN_DN ++} enumScanDir; ++ ++struct strucScanRegion ++{ ++ PHY_INT8 bX_tl; ++ PHY_INT8 bY_tl; ++ PHY_INT8 bX_br; ++ PHY_INT8 bY_br; ++ PHY_INT8 bDeltaX; ++ PHY_INT8 bDeltaY; ++}; ++ ++struct strucTestCycle ++{ ++ PHY_UINT16 wEyeCnt; ++ PHY_INT8 bNumOfEyeCnt; ++ PHY_INT8 bPICalEn; ++ PHY_INT8 bNumOfIgnoreCnt; ++}; ++ ++#define ERRCNT_MAX 128 ++#define CYCLE_COUNT_MAX 15 ++ ++/// the map resolution is 128 x 128 pts ++#define MAX_X 127 ++#define MAX_Y 127 ++#define MIN_X 0 ++#define MIN_Y 0 ++ ++PHY_INT32 u3phy_init(void); ++ ++AUTOEXT struct strucScanRegion _rEye1; ++AUTOEXT struct strucScanRegion _rEye2; ++AUTOEXT struct strucTestCycle _rTestCycle; ++AUTOEXT PHY_UINT8 _bXcurr; ++AUTOEXT PHY_UINT8 _bYcurr; ++AUTOEXT enumScanDir _eScanDir; ++AUTOEXT PHY_INT8 _fgXChged; ++AUTOEXT PHY_INT8 _bPIResult; ++/* use local variable instead to save memory use */ ++#if 0 ++AUTOEXT PHY_UINT32 pwErrCnt0[CYCLE_COUNT_MAX][ERRCNT_MAX][ERRCNT_MAX]; ++AUTOEXT PHY_UINT32 pwErrCnt1[CYCLE_COUNT_MAX][ERRCNT_MAX][ERRCNT_MAX]; ++#endif ++ ++/***********************************/ ++#endif ++ +--- a/drivers/usb/host/pci-quirks.h ++++ b/drivers/usb/host/pci-quirks.h +@@ -1,7 +1,7 @@ + #ifndef __LINUX_USB_PCI_QUIRKS_H + #define __LINUX_USB_PCI_QUIRKS_H + +-#ifdef CONFIG_PCI ++#if defined (CONFIG_PCI) && !defined (CONFIG_USB_MT7621_XHCI_PLATFORM) + void uhci_reset_hc(struct pci_dev *pdev, unsigned long base); + int uhci_check_and_reset_hc(struct pci_dev *pdev, unsigned long base); + #endif /* CONFIG_PCI */ +--- a/drivers/usb/host/xhci-dbg.c ++++ b/drivers/usb/host/xhci-dbg.c +@@ -21,6 +21,9 @@ + */ + + #include "xhci.h" ++#if defined (CONFIG_USB_MT7621_XHCI_PLATFORM) ++#include "xhci-mtk.h" ++#endif + + #define XHCI_INIT_VALUE 0x0 + +--- a/drivers/usb/host/xhci-mem.c ++++ b/drivers/usb/host/xhci-mem.c +@@ -65,6 +65,9 @@ static struct xhci_segment *xhci_segment + + static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg) + { ++ if (!seg) ++ return; ++ + if (seg->trbs) { + dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma); + seg->trbs = NULL; +@@ -1446,9 +1449,17 @@ int xhci_endpoint_init(struct xhci_hcd * + max_burst = (usb_endpoint_maxp(&ep->desc) + & 0x1800) >> 11; + } ++#if defined (CONFIG_USB_MT7621_XHCI_PLATFORM) ++ if ((max_packet % 4 == 2) && (max_packet % 16 != 14) && (max_burst == 0) && usb_endpoint_dir_in(&ep->desc)) ++ max_packet += 2; ++#endif + break; + case USB_SPEED_FULL: + case USB_SPEED_LOW: ++#if defined (CONFIG_USB_MT7621_XHCI_PLATFORM) ++ if ((max_packet % 4 == 2) && (max_packet % 16 != 14) && (max_burst == 0) && usb_endpoint_dir_in(&ep->desc)) ++ max_packet += 2; ++#endif + break; + default: + BUG(); +--- /dev/null ++++ b/drivers/usb/host/xhci-mtk-power.c +@@ -0,0 +1,115 @@ ++#include "xhci-mtk.h" ++#include "xhci-mtk-power.h" ++#include "xhci.h" ++#include /* printk() */ ++#include ++#include ++ ++static int g_num_u3_port; ++static int g_num_u2_port; ++ ++ ++void enableXhciAllPortPower(struct xhci_hcd *xhci){ ++ int i; ++ u32 port_id, temp; ++ u32 __iomem *addr; ++ ++ g_num_u3_port = SSUSB_U3_PORT_NUM(readl(SSUSB_IP_CAP)); ++ g_num_u2_port = SSUSB_U2_PORT_NUM(readl(SSUSB_IP_CAP)); ++ ++ for(i=1; i<=g_num_u3_port; i++){ ++ port_id=i; ++ addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS*(port_id-1 & 0xff); ++ temp = xhci_readl(xhci, addr); ++ temp = xhci_port_state_to_neutral(temp); ++ temp |= PORT_POWER; ++ xhci_writel(xhci, temp, addr); ++ } ++ for(i=1; i<=g_num_u2_port; i++){ ++ port_id=i+g_num_u3_port; ++ addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS*(port_id-1 & 0xff); ++ temp = xhci_readl(xhci, addr); ++ temp = xhci_port_state_to_neutral(temp); ++ temp |= PORT_POWER; ++ xhci_writel(xhci, temp, addr); ++ } ++} ++ ++void enableAllClockPower(){ ++ ++ int i; ++ u32 temp; ++ ++ g_num_u3_port = SSUSB_U3_PORT_NUM(readl(SSUSB_IP_CAP)); ++ g_num_u2_port = SSUSB_U2_PORT_NUM(readl(SSUSB_IP_CAP)); ++ ++ //2. Enable xHC ++ writel(readl(SSUSB_IP_PW_CTRL) | (SSUSB_IP_SW_RST), SSUSB_IP_PW_CTRL); ++ writel(readl(SSUSB_IP_PW_CTRL) & (~SSUSB_IP_SW_RST), SSUSB_IP_PW_CTRL); ++ writel(readl(SSUSB_IP_PW_CTRL_1) & (~SSUSB_IP_PDN), SSUSB_IP_PW_CTRL_1); ++ ++ //1. Enable target ports ++ for(i=0; i ++#include "xhci.h" ++#include "xhci-mtk.h" ++ ++void enableXhciAllPortPower(struct xhci_hcd *xhci); ++void enableAllClockPower(void); ++void disablePortClockPower(void); ++void enablePortClockPower(int port_index, int port_rev); ++ ++#endif +--- /dev/null ++++ b/drivers/usb/host/xhci-mtk-scheduler.c +@@ -0,0 +1,608 @@ ++#include "xhci-mtk-scheduler.h" ++#include /* printk() */ ++ ++static struct sch_ep **ss_out_eps[MAX_EP_NUM]; ++static struct sch_ep **ss_in_eps[MAX_EP_NUM]; ++static struct sch_ep **hs_eps[MAX_EP_NUM]; //including tt isoc ++static struct sch_ep **tt_intr_eps[MAX_EP_NUM]; ++ ++ ++int mtk_xhci_scheduler_init(void){ ++ int i; ++ ++ for(i=0; idev_speed = dev_speed; ++ tmp_ep->isTT = isTT; ++ tmp_ep->is_in = is_in; ++ tmp_ep->ep_type = ep_type; ++ tmp_ep->maxp = maxp; ++ tmp_ep->interval = interval; ++ tmp_ep->burst = burst; ++ tmp_ep->mult = mult; ++ tmp_ep->offset = offset; ++ tmp_ep->repeat = repeat; ++ tmp_ep->pkts = pkts; ++ tmp_ep->cs_count = cs_count; ++ tmp_ep->burst_mode = burst_mode; ++ tmp_ep->bw_cost = bw_cost; ++ tmp_ep->ep = ep; ++ ep_array[i] = tmp_ep; ++ return SCH_SUCCESS; ++ } ++ } ++ return SCH_FAIL; ++} ++ ++int count_ss_bw(int is_in, int ep_type, int maxp, int interval, int burst, int mult, int offset, int repeat ++ , int td_size){ ++ int i, j, k; ++ int bw_required[3]; ++ int final_bw_required; ++ int bw_required_per_repeat; ++ int tmp_bw_required; ++ struct sch_ep *cur_sch_ep; ++ struct sch_ep **ep_array; ++ int cur_offset; ++ int cur_ep_offset; ++ int tmp_offset; ++ int tmp_interval; ++ int ep_offset; ++ int ep_interval; ++ int ep_repeat; ++ int ep_mult; ++ ++ if(is_in){ ++ ep_array = (struct sch_ep **)ss_in_eps; ++ } ++ else{ ++ ep_array = (struct sch_ep **)ss_out_eps; ++ } ++ ++ bw_required[0] = 0; ++ bw_required[1] = 0; ++ bw_required[2] = 0; ++ ++ if(repeat == 0){ ++ final_bw_required = 0; ++ for(i=0; iinterval; ++ ep_offset = cur_sch_ep->offset; ++ if(cur_sch_ep->repeat == 0){ ++ if(ep_interval >= interval){ ++ tmp_offset = ep_offset + ep_interval - offset; ++ tmp_interval = interval; ++ } ++ else{ ++ tmp_offset = offset + interval - ep_offset; ++ tmp_interval = ep_interval; ++ } ++ if(tmp_offset % tmp_interval == 0){ ++ final_bw_required += cur_sch_ep->bw_cost; ++ } ++ } ++ else{ ++ ep_repeat = cur_sch_ep->repeat; ++ ep_mult = cur_sch_ep->mult; ++ for(k=0; k<=ep_mult; k++){ ++ cur_ep_offset = ep_offset+(k*ep_mult); ++ if(ep_interval >= interval){ ++ tmp_offset = cur_ep_offset + ep_interval - offset; ++ tmp_interval = interval; ++ } ++ else{ ++ tmp_offset = offset + interval - cur_ep_offset; ++ tmp_interval = ep_interval; ++ } ++ if(tmp_offset % tmp_interval == 0){ ++ final_bw_required += cur_sch_ep->bw_cost; ++ break; ++ } ++ } ++ } ++ } ++ final_bw_required += td_size; ++ } ++ else{ ++ bw_required_per_repeat = maxp * (burst+1); ++ for(j=0; j<=mult; j++){ ++ tmp_bw_required = 0; ++ cur_offset = offset+(j*repeat); ++ for(i=0; iinterval; ++ ep_offset = cur_sch_ep->offset; ++ if(cur_sch_ep->repeat == 0){ ++ if(ep_interval >= interval){ ++ tmp_offset = ep_offset + ep_interval - cur_offset; ++ tmp_interval = interval; ++ } ++ else{ ++ tmp_offset = cur_offset + interval - ep_offset; ++ tmp_interval = ep_interval; ++ } ++ if(tmp_offset % tmp_interval == 0){ ++ tmp_bw_required += cur_sch_ep->bw_cost; ++ } ++ } ++ else{ ++ ep_repeat = cur_sch_ep->repeat; ++ ep_mult = cur_sch_ep->mult; ++ for(k=0; k<=ep_mult; k++){ ++ cur_ep_offset = ep_offset+(k*ep_repeat); ++ if(ep_interval >= interval){ ++ tmp_offset = cur_ep_offset + ep_interval - cur_offset; ++ tmp_interval = interval; ++ } ++ else{ ++ tmp_offset = cur_offset + interval - cur_ep_offset; ++ tmp_interval = ep_interval; ++ } ++ if(tmp_offset % tmp_interval == 0){ ++ tmp_bw_required += cur_sch_ep->bw_cost; ++ break; ++ } ++ } ++ } ++ } ++ bw_required[j] = tmp_bw_required; ++ } ++ final_bw_required = SS_BW_BOUND; ++ for(j=0; j<=mult; j++){ ++ if(bw_required[j] < final_bw_required){ ++ final_bw_required = bw_required[j]; ++ } ++ } ++ final_bw_required += bw_required_per_repeat; ++ } ++ return final_bw_required; ++} ++ ++int count_hs_bw(int ep_type, int maxp, int interval, int offset, int td_size){ ++ int i; ++ int bw_required; ++ struct sch_ep *cur_sch_ep; ++ int tmp_offset; ++ int tmp_interval; ++ int ep_offset; ++ int ep_interval; ++ int cur_tt_isoc_interval; //for isoc tt check ++ ++ bw_required = 0; ++ for(i=0; ioffset; ++ ep_interval = cur_sch_ep->interval; ++ ++ if(cur_sch_ep->isTT && cur_sch_ep->ep_type == USB_EP_ISOC){ ++ cur_tt_isoc_interval = ep_interval<<3; ++ if(ep_interval >= interval){ ++ tmp_offset = ep_offset + cur_tt_isoc_interval - offset; ++ tmp_interval = interval; ++ } ++ else{ ++ tmp_offset = offset + interval - ep_offset; ++ tmp_interval = cur_tt_isoc_interval; ++ } ++ if(cur_sch_ep->is_in){ ++ if((tmp_offset%tmp_interval >=2) && (tmp_offset%tmp_interval <= cur_sch_ep->cs_count)){ ++ bw_required += 188; ++ } ++ } ++ else{ ++ if(tmp_offset%tmp_interval <= cur_sch_ep->cs_count){ ++ bw_required += 188; ++ } ++ } ++ } ++ else{ ++ if(ep_interval >= interval){ ++ tmp_offset = ep_offset + ep_interval - offset; ++ tmp_interval = interval; ++ } ++ else{ ++ tmp_offset = offset + interval - ep_offset; ++ tmp_interval = ep_interval; ++ } ++ if(tmp_offset%tmp_interval == 0){ ++ bw_required += cur_sch_ep->bw_cost; ++ } ++ } ++ } ++ bw_required += td_size; ++ return bw_required; ++} ++ ++int count_tt_isoc_bw(int is_in, int maxp, int interval, int offset, int td_size){ ++ char is_cs; ++ int mframe_idx, frame_idx, s_frame, s_mframe, cur_mframe; ++ int bw_required, max_bw; ++ int ss_cs_count; ++ int cs_mframe; ++ int max_frame; ++ int i,j; ++ struct sch_ep *cur_sch_ep; ++ int ep_offset; ++ int ep_interval; ++ int ep_cs_count; ++ int tt_isoc_interval; //for isoc tt check ++ int cur_tt_isoc_interval; //for isoc tt check ++ int tmp_offset; ++ int tmp_interval; ++ ++ is_cs = 0; ++ ++ tt_isoc_interval = interval<<3; //frame to mframe ++ if(is_in){ ++ is_cs = 1; ++ } ++ s_frame = offset/8; ++ s_mframe = offset%8; ++ ss_cs_count = (maxp + (188 - 1))/188; ++ if(is_cs){ ++ cs_mframe = offset%8 + 2 + ss_cs_count; ++ if (cs_mframe <= 6) ++ ss_cs_count += 2; ++ else if (cs_mframe == 7) ++ ss_cs_count++; ++ else if (cs_mframe > 8) ++ return -1; ++ } ++ max_bw = 0; ++ if(is_in){ ++ i=2; ++ } ++ for(cur_mframe = offset+i; ioffset; ++ ep_interval = cur_sch_ep->interval; ++ if(cur_sch_ep->isTT && cur_sch_ep->ep_type == USB_EP_ISOC){ ++ //isoc tt ++ //check if mframe offset overlap ++ //if overlap, add 188 to the bw ++ cur_tt_isoc_interval = ep_interval<<3; ++ if(cur_tt_isoc_interval >= tt_isoc_interval){ ++ tmp_offset = (ep_offset+cur_tt_isoc_interval) - cur_mframe; ++ tmp_interval = tt_isoc_interval; ++ } ++ else{ ++ tmp_offset = (cur_mframe+tt_isoc_interval) - ep_offset; ++ tmp_interval = cur_tt_isoc_interval; ++ } ++ if(cur_sch_ep->is_in){ ++ if((tmp_offset%tmp_interval >=2) && (tmp_offset%tmp_interval <= cur_sch_ep->cs_count)){ ++ bw_required += 188; ++ } ++ } ++ else{ ++ if(tmp_offset%tmp_interval <= cur_sch_ep->cs_count){ ++ bw_required += 188; ++ } ++ } ++ ++ } ++ else if(cur_sch_ep->ep_type == USB_EP_INT || cur_sch_ep->ep_type == USB_EP_ISOC){ ++ //check if mframe ++ if(ep_interval >= tt_isoc_interval){ ++ tmp_offset = (ep_offset+ep_interval) - cur_mframe; ++ tmp_interval = tt_isoc_interval; ++ } ++ else{ ++ tmp_offset = (cur_mframe+tt_isoc_interval) - ep_offset; ++ tmp_interval = ep_interval; ++ } ++ if(tmp_offset%tmp_interval == 0){ ++ bw_required += cur_sch_ep->bw_cost; ++ } ++ } ++ } ++ bw_required += 188; ++ if(bw_required > max_bw){ ++ max_bw = bw_required; ++ } ++ } ++ return max_bw; ++} ++ ++int count_tt_intr_bw(int interval, int frame_offset){ ++ //check all eps in tt_intr_eps ++ int ret; ++ int i,j; ++ int ep_offset; ++ int ep_interval; ++ int tmp_offset; ++ int tmp_interval; ++ ret = SCH_SUCCESS; ++ struct sch_ep *cur_sch_ep; ++ ++ for(i=0; ioffset; ++ ep_interval = cur_sch_ep->interval; ++ if(ep_interval >= interval){ ++ tmp_offset = ep_offset + ep_interval - frame_offset; ++ tmp_interval = interval; ++ } ++ else{ ++ tmp_offset = frame_offset + interval - ep_offset; ++ tmp_interval = ep_interval; ++ } ++ ++ if(tmp_offset%tmp_interval==0){ ++ return SCH_FAIL; ++ } ++ } ++ return SCH_SUCCESS; ++} ++ ++struct sch_ep * mtk_xhci_scheduler_remove_ep(int dev_speed, int is_in, int isTT, int ep_type, mtk_u32 *ep){ ++ int i; ++ struct sch_ep **ep_array; ++ struct sch_ep *cur_ep; ++ ++ if (is_in && dev_speed == USB_SPEED_SUPER) { ++ ep_array = (struct sch_ep **)ss_in_eps; ++ } ++ else if (dev_speed == USB_SPEED_SUPER) { ++ ep_array = (struct sch_ep **)ss_out_eps; ++ } ++ else if (dev_speed == USB_SPEED_HIGH || (isTT && ep_type == USB_EP_ISOC)) { ++ ep_array = (struct sch_ep **)hs_eps; ++ } ++ else { ++ ep_array = (struct sch_ep **)tt_intr_eps; ++ } ++ for (i = 0; i < MAX_EP_NUM; i++) { ++ cur_ep = (struct sch_ep *)ep_array[i]; ++ if(cur_ep != NULL && cur_ep->ep == ep){ ++ ep_array[i] = NULL; ++ return cur_ep; ++ } ++ } ++ return NULL; ++} ++ ++int mtk_xhci_scheduler_add_ep(int dev_speed, int is_in, int isTT, int ep_type, int maxp, int interval, int burst ++ , int mult, mtk_u32 *ep, mtk_u32 *ep_ctx, struct sch_ep *sch_ep){ ++ mtk_u32 bPkts = 0; ++ mtk_u32 bCsCount = 0; ++ mtk_u32 bBm = 1; ++ mtk_u32 bOffset = 0; ++ mtk_u32 bRepeat = 0; ++ int ret; ++ struct mtk_xhci_ep_ctx *temp_ep_ctx; ++ int td_size; ++ int mframe_idx, frame_idx; ++ int bw_cost; ++ int cur_bw, best_bw, best_bw_idx,repeat, max_repeat, best_bw_repeat; ++ int cur_offset, cs_mframe; ++ int break_out; ++ int frame_interval; ++ ++ printk(KERN_ERR "add_ep parameters, dev_speed %d, is_in %d, isTT %d, ep_type %d, maxp %d, interval %d, burst %d, mult %d, ep 0x%x, ep_ctx 0x%x, sch_ep 0x%x\n", dev_speed, is_in, isTT, ep_type, maxp ++ , interval, burst, mult, ep, ep_ctx, sch_ep); ++ if(isTT && ep_type == USB_EP_INT && ((dev_speed == USB_SPEED_LOW) || (dev_speed == USB_SPEED_FULL))){ ++ frame_interval = interval >> 3; ++ for(frame_idx=0; frame_idx>3; ++ for(frame_idx=0; frame_idx 0 && cur_bw < best_bw){ ++ best_bw_idx = cur_offset; ++ best_bw = cur_bw; ++ if(cur_bw == td_size || cur_bw < (HS_BW_BOUND>>1)){ ++ break_out = 1; ++ break; ++ } ++ } ++ } ++ } ++ if(best_bw_idx == -1){ ++ return SCH_FAIL; ++ } ++ else{ ++ bOffset = best_bw_idx; ++ bPkts = 1; ++ bCsCount = (maxp + (188 - 1)) / 188; ++ if(is_in){ ++ cs_mframe = bOffset%8 + 2 + bCsCount; ++ if (cs_mframe <= 6) ++ bCsCount += 2; ++ else if (cs_mframe == 7) ++ bCsCount++; ++ } ++ bw_cost = 188; ++ bRepeat = 0; ++ if(add_sch_ep( dev_speed, is_in, isTT, ep_type, maxp, interval, burst, mult ++ , bOffset, bRepeat, bPkts, bCsCount, bBm, bw_cost, ep, sch_ep) == SCH_FAIL){ ++ return SCH_FAIL; ++ } ++ ret = SCH_SUCCESS; ++ } ++ } ++ else if((dev_speed == USB_SPEED_FULL || dev_speed == USB_SPEED_LOW) && ep_type == USB_EP_INT){ ++ bPkts = 1; ++ ret = SCH_SUCCESS; ++ } ++ else if(dev_speed == USB_SPEED_FULL && ep_type == USB_EP_ISOC){ ++ bPkts = 1; ++ ret = SCH_SUCCESS; ++ } ++ else if(dev_speed == USB_SPEED_HIGH && (ep_type == USB_EP_INT || ep_type == USB_EP_ISOC)){ ++ best_bw = HS_BW_BOUND; ++ best_bw_idx = -1; ++ cur_bw = 0; ++ td_size = maxp*(burst+1); ++ for(cur_offset = 0; cur_offset 0 && cur_bw < best_bw){ ++ best_bw_idx = cur_offset; ++ best_bw = cur_bw; ++ if(cur_bw == td_size || cur_bw < (HS_BW_BOUND>>1)){ ++ break; ++ } ++ } ++ } ++ if(best_bw_idx == -1){ ++ return SCH_FAIL; ++ } ++ else{ ++ bOffset = best_bw_idx; ++ bPkts = burst + 1; ++ bCsCount = 0; ++ bw_cost = td_size; ++ bRepeat = 0; ++ if(add_sch_ep(dev_speed, is_in, isTT, ep_type, maxp, interval, burst, mult ++ , bOffset, bRepeat, bPkts, bCsCount, bBm, bw_cost, ep, sch_ep) == SCH_FAIL){ ++ return SCH_FAIL; ++ } ++ ret = SCH_SUCCESS; ++ } ++ } ++ else if(dev_speed == USB_SPEED_SUPER && (ep_type == USB_EP_INT || ep_type == USB_EP_ISOC)){ ++ best_bw = SS_BW_BOUND; ++ best_bw_idx = -1; ++ cur_bw = 0; ++ td_size = maxp * (mult+1) * (burst+1); ++ if(mult == 0){ ++ max_repeat = 0; ++ } ++ else{ ++ max_repeat = (interval-1)/(mult+1); ++ } ++ break_out = 0; ++ for(frame_idx = 0; (frame_idx < interval) && !break_out; frame_idx++){ ++ for(repeat = max_repeat; repeat >= 0; repeat--){ ++ cur_bw = count_ss_bw(is_in, ep_type, maxp, interval, burst, mult, frame_idx ++ , repeat, td_size); ++ printk(KERN_ERR "count_ss_bw, frame_idx %d, repeat %d, td_size %d, result bw %d\n" ++ , frame_idx, repeat, td_size, cur_bw); ++ if(cur_bw > 0 && cur_bw < best_bw){ ++ best_bw_idx = frame_idx; ++ best_bw_repeat = repeat; ++ best_bw = cur_bw; ++ if(cur_bw <= td_size || cur_bw < (HS_BW_BOUND>>1)){ ++ break_out = 1; ++ break; ++ } ++ } ++ } ++ } ++ printk(KERN_ERR "final best idx %d, best repeat %d\n", best_bw_idx, best_bw_repeat); ++ if(best_bw_idx == -1){ ++ return SCH_FAIL; ++ } ++ else{ ++ bOffset = best_bw_idx; ++ bCsCount = 0; ++ bRepeat = best_bw_repeat; ++ if(bRepeat == 0){ ++ bw_cost = (burst+1)*(mult+1)*maxp; ++ bPkts = (burst+1)*(mult+1); ++ } ++ else{ ++ bw_cost = (burst+1)*maxp; ++ bPkts = (burst+1); ++ } ++ if(add_sch_ep(dev_speed, is_in, isTT, ep_type, maxp, interval, burst, mult ++ , bOffset, bRepeat, bPkts, bCsCount, bBm, bw_cost, ep, sch_ep) == SCH_FAIL){ ++ return SCH_FAIL; ++ } ++ ret = SCH_SUCCESS; ++ } ++ } ++ else{ ++ bPkts = 1; ++ ret = SCH_SUCCESS; ++ } ++ if(ret == SCH_SUCCESS){ ++ temp_ep_ctx = (struct mtk_xhci_ep_ctx *)ep_ctx; ++ temp_ep_ctx->reserved[0] |= (BPKTS(bPkts) | BCSCOUNT(bCsCount) | BBM(bBm)); ++ temp_ep_ctx->reserved[1] |= (BOFFSET(bOffset) | BREPEAT(bRepeat)); ++ ++ printk(KERN_DEBUG "[DBG] BPKTS: %x, BCSCOUNT: %x, BBM: %x\n", bPkts, bCsCount, bBm); ++ printk(KERN_DEBUG "[DBG] BOFFSET: %x, BREPEAT: %x\n", bOffset, bRepeat); ++ return SCH_SUCCESS; ++ } ++ else{ ++ return SCH_FAIL; ++ } ++} +--- /dev/null ++++ b/drivers/usb/host/xhci-mtk-scheduler.h +@@ -0,0 +1,77 @@ ++#ifndef _XHCI_MTK_SCHEDULER_H ++#define _XHCI_MTK_SCHEDULER_H ++ ++#define MTK_SCH_NEW 1 ++ ++#define SCH_SUCCESS 1 ++#define SCH_FAIL 0 ++ ++#define MAX_EP_NUM 64 ++#define SS_BW_BOUND 51000 ++#define HS_BW_BOUND 6144 ++ ++#define USB_EP_CONTROL 0 ++#define USB_EP_ISOC 1 ++#define USB_EP_BULK 2 ++#define USB_EP_INT 3 ++ ++#define USB_SPEED_LOW 1 ++#define USB_SPEED_FULL 2 ++#define USB_SPEED_HIGH 3 ++#define USB_SPEED_SUPER 5 ++ ++/* mtk scheduler bitmasks */ ++#define BPKTS(p) ((p) & 0x3f) ++#define BCSCOUNT(p) (((p) & 0x7) << 8) ++#define BBM(p) ((p) << 11) ++#define BOFFSET(p) ((p) & 0x3fff) ++#define BREPEAT(p) (((p) & 0x7fff) << 16) ++ ++ ++#if 1 ++typedef unsigned int mtk_u32; ++typedef unsigned long long mtk_u64; ++#endif ++ ++#define NULL ((void *)0) ++ ++struct mtk_xhci_ep_ctx { ++ mtk_u32 ep_info; ++ mtk_u32 ep_info2; ++ mtk_u64 deq; ++ mtk_u32 tx_info; ++ /* offset 0x14 - 0x1f reserved for HC internal use */ ++ mtk_u32 reserved[3]; ++}; ++ ++ ++struct sch_ep ++{ ++ //device info ++ int dev_speed; ++ int isTT; ++ //ep info ++ int is_in; ++ int ep_type; ++ int maxp; ++ int interval; ++ int burst; ++ int mult; ++ //scheduling info ++ int offset; ++ int repeat; ++ int pkts; ++ int cs_count; ++ int burst_mode; ++ //other ++ int bw_cost; //bandwidth cost in each repeat; including overhead ++ mtk_u32 *ep; //address of usb_endpoint pointer ++}; ++ ++int mtk_xhci_scheduler_init(void); ++int mtk_xhci_scheduler_add_ep(int dev_speed, int is_in, int isTT, int ep_type, int maxp, int interval, int burst ++ , int mult, mtk_u32 *ep, mtk_u32 *ep_ctx, struct sch_ep *sch_ep); ++struct sch_ep * mtk_xhci_scheduler_remove_ep(int dev_speed, int is_in, int isTT, int ep_type, mtk_u32 *ep); ++ ++ ++#endif +--- /dev/null ++++ b/drivers/usb/host/xhci-mtk.c +@@ -0,0 +1,265 @@ ++#include "xhci-mtk.h" ++#include "xhci-mtk-power.h" ++#include "xhci.h" ++#include "mtk-phy.h" ++#ifdef CONFIG_C60802_SUPPORT ++#include "mtk-phy-c60802.h" ++#endif ++#include "xhci-mtk-scheduler.h" ++#include /* printk() */ ++#include ++#include ++#include ++#include ++#include ++ ++void setInitialReg(void ) ++{ ++ __u32 __iomem *addr; ++ u32 temp; ++ ++ /* set SSUSB DMA burst size to 128B */ ++ addr = SSUSB_U3_XHCI_BASE + SSUSB_HDMA_CFG; ++ temp = SSUSB_HDMA_CFG_MT7621_VALUE; ++ writel(temp, addr); ++ ++ /* extend U3 LTSSM Polling.LFPS timeout value */ ++ addr = SSUSB_U3_XHCI_BASE + U3_LTSSM_TIMING_PARAMETER3; ++ temp = U3_LTSSM_TIMING_PARAMETER3_VALUE; ++ writel(temp, addr); ++ ++ /* EOF */ ++ addr = SSUSB_U3_XHCI_BASE + SYNC_HS_EOF; ++ temp = SYNC_HS_EOF_VALUE; ++ writel(temp, addr); ++ ++#if defined (CONFIG_PERIODIC_ENP) ++ /* HSCH_CFG1: SCH2_FIFO_DEPTH */ ++ addr = SSUSB_U3_XHCI_BASE + HSCH_CFG1; ++ temp = readl(addr); ++ temp &= ~(0x3 << SCH2_FIFO_DEPTH_OFFSET); ++ writel(temp, addr); ++#endif ++ ++ /* Doorbell handling */ ++ addr = SIFSLV_IPPC + SSUSB_IP_SPAR0; ++ temp = 0x1; ++ writel(temp, addr); ++ ++ /* Set SW PLL Stable mode to 1 for U2 LPM device remote wakeup */ ++ /* Port 0 */ ++ addr = U2_PHY_BASE + U2_PHYD_CR1; ++ temp = readl(addr); ++ temp &= ~(0x3 << 18); ++ temp |= (1 << 18); ++ writel(temp, addr); ++ ++ /* Port 1 */ ++ addr = U2_PHY_BASE_P1 + U2_PHYD_CR1; ++ temp = readl(addr); ++ temp &= ~(0x3 << 18); ++ temp |= (1 << 18); ++ writel(temp, addr); ++} ++ ++ ++void setLatchSel(void){ ++ __u32 __iomem *latch_sel_addr; ++ u32 latch_sel_value; ++ latch_sel_addr = U3_PIPE_LATCH_SEL_ADD; ++ latch_sel_value = ((U3_PIPE_LATCH_TX)<<2) | (U3_PIPE_LATCH_RX); ++ writel(latch_sel_value, latch_sel_addr); ++} ++ ++void reinitIP(void){ ++ __u32 __iomem *ip_reset_addr; ++ u32 ip_reset_value; ++ ++ enableAllClockPower(); ++ mtk_xhci_scheduler_init(); ++} ++ ++void dbg_prb_out(void){ ++ mtk_probe_init(0x0f0f0f0f); ++ mtk_probe_out(0xffffffff); ++ mtk_probe_out(0x01010101); ++ mtk_probe_out(0x02020202); ++ mtk_probe_out(0x04040404); ++ mtk_probe_out(0x08080808); ++ mtk_probe_out(0x10101010); ++ mtk_probe_out(0x20202020); ++ mtk_probe_out(0x40404040); ++ mtk_probe_out(0x80808080); ++ mtk_probe_out(0x55555555); ++ mtk_probe_out(0xaaaaaaaa); ++} ++ ++ ++ ++/////////////////////////////////////////////////////////////////////////////// ++ ++#define RET_SUCCESS 0 ++#define RET_FAIL 1 ++ ++static int dbg_u3w(int argc, char**argv) ++{ ++ int u4TimingValue; ++ char u1TimingValue; ++ int u4TimingAddress; ++ ++ if (argc<3) ++ { ++ printk(KERN_ERR "Arg: address value\n"); ++ return RET_FAIL; ++ } ++ u3phy_init(); ++ ++ u4TimingAddress = (int)simple_strtol(argv[1], &argv[1], 16); ++ u4TimingValue = (int)simple_strtol(argv[2], &argv[2], 16); ++ u1TimingValue = u4TimingValue & 0xff; ++ /* access MMIO directly */ ++ writel(u1TimingValue, u4TimingAddress); ++ printk(KERN_ERR "Write done\n"); ++ return RET_SUCCESS; ++ ++} ++ ++static int dbg_u3r(int argc, char**argv) ++{ ++ char u1ReadTimingValue; ++ int u4TimingAddress; ++ if (argc<2) ++ { ++ printk(KERN_ERR "Arg: address\n"); ++ return 0; ++ } ++ u3phy_init(); ++ mdelay(500); ++ u4TimingAddress = (int)simple_strtol(argv[1], &argv[1], 16); ++ /* access MMIO directly */ ++ u1ReadTimingValue = readl(u4TimingAddress); ++ printk(KERN_ERR "Value = 0x%x\n", u1ReadTimingValue); ++ return 0; ++} ++ ++static int dbg_u3init(int argc, char**argv) ++{ ++ int ret; ++ ret = u3phy_init(); ++ printk(KERN_ERR "phy registers and operations initial done\n"); ++ if(u3phy_ops->u2_slew_rate_calibration){ ++ u3phy_ops->u2_slew_rate_calibration(u3phy); ++ } ++ else{ ++ printk(KERN_ERR "WARN: PHY doesn't implement u2 slew rate calibration function\n"); ++ } ++ if(u3phy_ops->init(u3phy) == PHY_TRUE) ++ return RET_SUCCESS; ++ return RET_FAIL; ++} ++ ++void dbg_setU1U2(int argc, char**argv){ ++ struct xhci_hcd *xhci; ++ int u1_value; ++ int u2_value; ++ u32 port_id, temp; ++ u32 __iomem *addr; ++ ++ if (argc<3) ++ { ++ printk(KERN_ERR "Arg: u1value u2value\n"); ++ return RET_FAIL; ++ } ++ ++ u1_value = (int)simple_strtol(argv[1], &argv[1], 10); ++ u2_value = (int)simple_strtol(argv[2], &argv[2], 10); ++ addr = (SSUSB_U3_XHCI_BASE + 0x424); ++ temp = readl(addr); ++ temp = temp & (~(0x0000ffff)); ++ temp = temp | u1_value | (u2_value<<8); ++ writel(temp, addr); ++} ++/////////////////////////////////////////////////////////////////////////////// ++ ++int call_function(char *buf) ++{ ++ int i; ++ int argc; ++ char *argv[80]; ++ ++ argc = 0; ++ do ++ { ++ argv[argc] = strsep(&buf, " "); ++ printk(KERN_DEBUG "[%d] %s\r\n", argc, argv[argc]); ++ argc++; ++ } while (buf); ++ if (!strcmp("dbg.r", argv[0])) ++ dbg_prb_out(); ++ else if (!strcmp("dbg.u3w", argv[0])) ++ dbg_u3w(argc, argv); ++ else if (!strcmp("dbg.u3r", argv[0])) ++ dbg_u3r(argc, argv); ++ else if (!strcmp("dbg.u3i", argv[0])) ++ dbg_u3init(argc, argv); ++ else if (!strcmp("pw.u1u2", argv[0])) ++ dbg_setU1U2(argc, argv); ++ return 0; ++} ++ ++long xhci_mtk_test_unlock_ioctl(struct file *file, unsigned int cmd, unsigned long arg) ++{ ++ char w_buf[200]; ++ char r_buf[200] = "this is a test"; ++ int len = 200; ++ ++ switch (cmd) { ++ case IOCTL_READ: ++ copy_to_user((char *) arg, r_buf, len); ++ printk(KERN_DEBUG "IOCTL_READ: %s\r\n", r_buf); ++ break; ++ case IOCTL_WRITE: ++ copy_from_user(w_buf, (char *) arg, len); ++ printk(KERN_DEBUG "IOCTL_WRITE: %s\r\n", w_buf); ++ ++ //invoke function ++ return call_function(w_buf); ++ break; ++ default: ++ return -ENOTTY; ++ } ++ ++ return len; ++} ++ ++int xhci_mtk_test_open(struct inode *inode, struct file *file) ++{ ++ ++ printk(KERN_DEBUG "xhci_mtk_test open: successful\n"); ++ return 0; ++} ++ ++int xhci_mtk_test_release(struct inode *inode, struct file *file) ++{ ++ ++ printk(KERN_DEBUG "xhci_mtk_test release: successful\n"); ++ return 0; ++} ++ ++ssize_t xhci_mtk_test_read(struct file *file, char *buf, size_t count, loff_t *ptr) ++{ ++ ++ printk(KERN_DEBUG "xhci_mtk_test read: returning zero bytes\n"); ++ return 0; ++} ++ ++ssize_t xhci_mtk_test_write(struct file *file, const char *buf, size_t count, loff_t * ppos) ++{ ++ ++ printk(KERN_DEBUG "xhci_mtk_test write: accepting zero bytes\n"); ++ return 0; ++} ++ ++ ++ ++ +--- /dev/null ++++ b/drivers/usb/host/xhci-mtk.h +@@ -0,0 +1,120 @@ ++#ifndef _XHCI_MTK_H ++#define _XHCI_MTK_H ++ ++#include ++#include "xhci.h" ++ ++#define SSUSB_U3_XHCI_BASE 0xBE1C0000 ++#define SSUSB_U3_MAC_BASE 0xBE1C2400 ++#define SSUSB_U3_SYS_BASE 0xBE1C2600 ++#define SSUSB_U2_SYS_BASE 0xBE1C3400 ++#define SSUB_SIF_SLV_TOP 0xBE1D0000 ++#define SIFSLV_IPPC (SSUB_SIF_SLV_TOP + 0x700) ++ ++#define U3_PIPE_LATCH_SEL_ADD SSUSB_U3_MAC_BASE + 0x130 ++#define U3_PIPE_LATCH_TX 0 ++#define U3_PIPE_LATCH_RX 0 ++ ++#define U3_UX_EXIT_LFPS_TIMING_PAR 0xa0 ++#define U3_REF_CK_PAR 0xb0 ++#define U3_RX_UX_EXIT_LFPS_REF_OFFSET 8 ++#define U3_RX_UX_EXIT_LFPS_REF 3 ++#define U3_REF_CK_VAL 10 ++ ++#define U3_TIMING_PULSE_CTRL 0xb4 ++#define CNT_1US_VALUE 63 //62.5MHz:63, 70MHz:70, 80MHz:80, 100MHz:100, 125MHz:125 ++ ++#define USB20_TIMING_PARAMETER 0x40 ++#define TIME_VALUE_1US 63 //62.5MHz:63, 80MHz:80, 100MHz:100, 125MHz:125 ++ ++#define LINK_PM_TIMER 0x8 ++#define PM_LC_TIMEOUT_VALUE 3 ++ ++#define XHCI_IMOD 0x624 ++#define XHCI_IMOD_MT7621_VALUE 0x10 ++ ++#define SSUSB_HDMA_CFG 0x950 ++#define SSUSB_HDMA_CFG_MT7621_VALUE 0x10E0E0C ++ ++#define U3_LTSSM_TIMING_PARAMETER3 0x2514 ++#define U3_LTSSM_TIMING_PARAMETER3_VALUE 0x3E8012C ++ ++#define U2_PHYD_CR1 0x64 ++ ++#define SSUSB_IP_SPAR0 0xC8 ++ ++#define SYNC_HS_EOF 0x938 ++#define SYNC_HS_EOF_VALUE 0x201F3 ++ ++#define HSCH_CFG1 0x960 ++#define SCH2_FIFO_DEPTH_OFFSET 16 ++ ++ ++#define SSUSB_IP_PW_CTRL (SIFSLV_IPPC+0x0) ++#define SSUSB_IP_SW_RST (1<<0) ++#define SSUSB_IP_PW_CTRL_1 (SIFSLV_IPPC+0x4) ++#define SSUSB_IP_PDN (1<<0) ++#define SSUSB_U3_CTRL(p) (SIFSLV_IPPC+0x30+(p*0x08)) ++#define SSUSB_U3_PORT_DIS (1<<0) ++#define SSUSB_U3_PORT_PDN (1<<1) ++#define SSUSB_U3_PORT_HOST_SEL (1<<2) ++#define SSUSB_U3_PORT_CKBG_EN (1<<3) ++#define SSUSB_U3_PORT_MAC_RST (1<<4) ++#define SSUSB_U3_PORT_PHYD_RST (1<<5) ++#define SSUSB_U2_CTRL(p) (SIFSLV_IPPC+(0x50)+(p*0x08)) ++#define SSUSB_U2_PORT_DIS (1<<0) ++#define SSUSB_U2_PORT_PDN (1<<1) ++#define SSUSB_U2_PORT_HOST_SEL (1<<2) ++#define SSUSB_U2_PORT_CKBG_EN (1<<3) ++#define SSUSB_U2_PORT_MAC_RST (1<<4) ++#define SSUSB_U2_PORT_PHYD_RST (1<<5) ++#define SSUSB_IP_CAP (SIFSLV_IPPC+0x024) ++ ++#define SSUSB_U3_PORT_NUM(p) (p & 0xff) ++#define SSUSB_U2_PORT_NUM(p) ((p>>8) & 0xff) ++ ++ ++#define XHCI_MTK_TEST_MAJOR 234 ++#define DEVICE_NAME "xhci_mtk_test" ++ ++#define CLI_MAGIC 'CLI' ++#define IOCTL_READ _IOR(CLI_MAGIC, 0, int) ++#define IOCTL_WRITE _IOW(CLI_MAGIC, 1, int) ++ ++void reinitIP(void); ++void setInitialReg(void); ++void dbg_prb_out(void); ++int call_function(char *buf); ++ ++long xhci_mtk_test_unlock_ioctl(struct file *file, unsigned int cmd, unsigned long arg); ++int xhci_mtk_test_open(struct inode *inode, struct file *file); ++int xhci_mtk_test_release(struct inode *inode, struct file *file); ++ssize_t xhci_mtk_test_read(struct file *file, char *buf, size_t count, loff_t *ptr); ++ssize_t xhci_mtk_test_write(struct file *file, const char *buf, size_t count, loff_t * ppos); ++ ++/* ++ mediatek probe out ++*/ ++/************************************************************************************/ ++ ++#define SW_PRB_OUT_ADDR (SIFSLV_IPPC+0xc0) ++#define PRB_MODULE_SEL_ADDR (SIFSLV_IPPC+0xbc) ++ ++static inline void mtk_probe_init(const u32 byte){ ++ __u32 __iomem *ptr = (__u32 __iomem *) PRB_MODULE_SEL_ADDR; ++ writel(byte, ptr); ++} ++ ++static inline void mtk_probe_out(const u32 value){ ++ __u32 __iomem *ptr = (__u32 __iomem *) SW_PRB_OUT_ADDR; ++ writel(value, ptr); ++} ++ ++static inline u32 mtk_probe_value(void){ ++ __u32 __iomem *ptr = (__u32 __iomem *) SW_PRB_OUT_ADDR; ++ ++ return readl(ptr); ++} ++ ++ ++#endif +--- a/drivers/usb/host/xhci-plat.c ++++ b/drivers/usb/host/xhci-plat.c +@@ -25,6 +25,13 @@ static void xhci_plat_quirks(struct devi + * dev struct in order to setup MSI + */ + xhci->quirks |= XHCI_PLAT; ++#if defined (CONFIG_USB_MT7621_XHCI_PLATFORM) ++ /* MTK host controller gives a spurious successful event after a ++ * short transfer. Ignore it. ++ */ ++ xhci->quirks |= XHCI_SPURIOUS_SUCCESS; ++ xhci->quirks |= XHCI_LPM_SUPPORT; ++#endif + } + + /* called during probe() after chip reset completes */ +@@ -96,20 +103,32 @@ static int xhci_plat_probe(struct platfo + + driver = &xhci_plat_xhci_driver; + ++#if defined (CONFIG_USB_MT7621_XHCI_PLATFORM) ++ irq = XHC_IRQ; ++#else + irq = platform_get_irq(pdev, 0); ++#endif ++ + if (irq < 0) + return -ENODEV; + ++#if !defined (CONFIG_USB_MT7621_XHCI_PLATFORM) + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -ENODEV; ++#endif + + hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev)); + if (!hcd) + return -ENOMEM; + ++#if defined (CONFIG_USB_MT7621_XHCI_PLATFORM) ++ hcd->rsrc_start = (uint32_t)XHC_IO_START; ++ hcd->rsrc_len = XHC_IO_LENGTH; ++#else + hcd->rsrc_start = res->start; + hcd->rsrc_len = resource_size(res); ++#endif + + if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, + driver->description)) { +--- a/drivers/usb/host/xhci-ring.c ++++ b/drivers/usb/host/xhci-ring.c +@@ -236,7 +236,6 @@ static void inc_enq(struct xhci_hcd *xhc + */ + if (!chain && !more_trbs_coming) + break; +- + /* If we're not dealing with 0.95 hardware or + * isoc rings on AMD 0.96 host, + * carry over the chain bit of the previous TRB +@@ -273,16 +272,20 @@ static void inc_enq(struct xhci_hcd *xhc + static inline int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring, + unsigned int num_trbs) + { ++#if !defined (CONFIG_USB_MT7621_XHCI_PLATFORM) + int num_trbs_in_deq_seg; ++#endif + + if (ring->num_trbs_free < num_trbs) + return 0; + ++#if !defined (CONFIG_USB_MT7621_XHCI_PLATFORM) + if (ring->type != TYPE_COMMAND && ring->type != TYPE_EVENT) { + num_trbs_in_deq_seg = ring->dequeue - ring->deq_seg->trbs; + if (ring->num_trbs_free < num_trbs + num_trbs_in_deq_seg) + return 0; + } ++#endif + + return 1; + } +@@ -2910,6 +2913,7 @@ static int prepare_ring(struct xhci_hcd + next = ring->enqueue; + + while (last_trb(xhci, ring, ring->enq_seg, next)) { ++#if !defined (CONFIG_USB_MT7621_XHCI_PLATFORM) + /* If we're not dealing with 0.95 hardware or isoc rings + * on AMD 0.96 host, clear the chain bit. + */ +@@ -2919,7 +2923,9 @@ static int prepare_ring(struct xhci_hcd + next->link.control &= cpu_to_le32(~TRB_CHAIN); + else + next->link.control |= cpu_to_le32(TRB_CHAIN); +- ++#else ++ next->link.control &= cpu_to_le32(~TRB_CHAIN); ++#endif + wmb(); + next->link.control ^= cpu_to_le32(TRB_CYCLE); + +@@ -3049,6 +3055,9 @@ static void giveback_first_trb(struct xh + start_trb->field[3] |= cpu_to_le32(start_cycle); + else + start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE); ++#if defined (CONFIG_USB_MT7621_XHCI_PLATFORM) ++ wmb(); ++#endif + xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id); + } + +@@ -3108,6 +3117,29 @@ static u32 xhci_td_remainder(unsigned in + return (remainder >> 10) << 17; + } + ++#if defined (CONFIG_USB_MT7621_XHCI_PLATFORM) ++static u32 mtk_xhci_td_remainder(unsigned int td_transfer_size, unsigned int td_running_total, unsigned int maxp, unsigned trb_buffer_length) ++{ ++ u32 max = 31; ++ int remainder, td_packet_count, packet_transferred; ++ ++ //0 for the last TRB ++ //FIXME: need to workaround if there is ZLP in this TD ++ if (td_running_total + trb_buffer_length == td_transfer_size) ++ return 0; ++ ++ //FIXME: need to take care of high-bandwidth (MAX_ESIT) ++ packet_transferred = (td_running_total /*+ trb_buffer_length*/) / maxp; ++ td_packet_count = DIV_ROUND_UP(td_transfer_size, maxp); ++ remainder = td_packet_count - packet_transferred; ++ ++ if (remainder > max) ++ return max << 17; ++ else ++ return remainder << 17; ++} ++#endif ++ + /* + * For xHCI 1.0 host controllers, TD size is the number of max packet sized + * packets remaining in the TD (*not* including this TRB). +@@ -3245,6 +3277,7 @@ static int queue_bulk_sg_tx(struct xhci_ + } + + /* Set the TRB length, TD size, and interrupter fields. */ ++#if !defined (CONFIG_USB_MT7621_XHCI_PLATFORM) + if (xhci->hci_version < 0x100) { + remainder = xhci_td_remainder( + urb->transfer_buffer_length - +@@ -3254,6 +3287,13 @@ static int queue_bulk_sg_tx(struct xhci_ + trb_buff_len, total_packet_count, urb, + num_trbs - 1); + } ++#else ++ if (num_trbs > 1) ++ remainder = mtk_xhci_td_remainder(urb->transfer_buffer_length, ++ running_total, urb->ep->desc.wMaxPacketSize, trb_buff_len); ++#endif ++ ++ + length_field = TRB_LEN(trb_buff_len) | + remainder | + TRB_INTR_TARGET(0); +@@ -3316,6 +3356,9 @@ int xhci_queue_bulk_tx(struct xhci_hcd * + int running_total, trb_buff_len, ret; + unsigned int total_packet_count; + u64 addr; ++#if defined (CONFIG_USB_MT7621_XHCI_PLATFORM) ++ int max_packet; ++#endif + + if (urb->num_sgs) + return queue_bulk_sg_tx(xhci, mem_flags, urb, slot_id, ep_index); +@@ -3341,6 +3384,25 @@ int xhci_queue_bulk_tx(struct xhci_hcd * + running_total += TRB_MAX_BUFF_SIZE; + } + /* FIXME: this doesn't deal with URB_ZERO_PACKET - need one more */ ++#if defined (CONFIG_USB_MT7621_XHCI_PLATFORM) ++ switch(urb->dev->speed){ ++ case USB_SPEED_SUPER: ++ max_packet = urb->ep->desc.wMaxPacketSize; ++ break; ++ case USB_SPEED_HIGH: ++ case USB_SPEED_FULL: ++ case USB_SPEED_LOW: ++ case USB_SPEED_WIRELESS: ++ case USB_SPEED_UNKNOWN: ++ default: ++ max_packet = urb->ep->desc.wMaxPacketSize & 0x7ff; ++ break; ++ } ++ if((urb->transfer_flags & URB_ZERO_PACKET) ++ && ((urb->transfer_buffer_length % max_packet) == 0)){ ++ num_trbs++; ++ } ++#endif + + ret = prepare_transfer(xhci, xhci->devs[slot_id], + ep_index, urb->stream_id, +@@ -3400,6 +3462,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd * + field |= TRB_ISP; + + /* Set the TRB length, TD size, and interrupter fields. */ ++#if !defined (CONFIG_USB_MT7621_XHCI_PLATFORM) + if (xhci->hci_version < 0x100) { + remainder = xhci_td_remainder( + urb->transfer_buffer_length - +@@ -3409,6 +3472,10 @@ int xhci_queue_bulk_tx(struct xhci_hcd * + trb_buff_len, total_packet_count, urb, + num_trbs - 1); + } ++#else ++ remainder = mtk_xhci_td_remainder(urb->transfer_buffer_length, running_total, max_packet, trb_buff_len); ++#endif ++ + length_field = TRB_LEN(trb_buff_len) | + remainder | + TRB_INTR_TARGET(0); +@@ -3498,7 +3565,11 @@ int xhci_queue_ctrl_tx(struct xhci_hcd * + field |= 0x1; + + /* xHCI 1.0 6.4.1.2.1: Transfer Type field */ ++#if defined (CONFIG_USB_MT7621_XHCI_PLATFORM) ++ if (1) { ++#else + if (xhci->hci_version == 0x100) { ++#endif + if (urb->transfer_buffer_length > 0) { + if (setup->bRequestType & USB_DIR_IN) + field |= TRB_TX_TYPE(TRB_DATA_IN); +@@ -3522,7 +3593,12 @@ int xhci_queue_ctrl_tx(struct xhci_hcd * + field = TRB_TYPE(TRB_DATA); + + length_field = TRB_LEN(urb->transfer_buffer_length) | ++#if !defined (CONFIG_USB_MT7621_XHCI_PLATFORM) + xhci_td_remainder(urb->transfer_buffer_length) | ++#else ++ //CC: MTK style, no scatter-gather for control transfer ++ 0 | ++#endif + TRB_INTR_TARGET(0); + if (urb->transfer_buffer_length > 0) { + if (setup->bRequestType & USB_DIR_IN) +@@ -3533,7 +3609,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd * + length_field, + field | ep_ring->cycle_state); + } +- ++ + /* Save the DMA address of the last TRB in the TD */ + td->last_trb = ep_ring->enqueue; + +@@ -3645,6 +3721,9 @@ static int xhci_queue_isoc_tx(struct xhc + u64 start_addr, addr; + int i, j; + bool more_trbs_coming; ++#if defined (CONFIG_USB_MT7621_XHCI_PLATFORM) ++ int max_packet; ++#endif + + ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; + +@@ -3658,6 +3737,21 @@ static int xhci_queue_isoc_tx(struct xhc + start_trb = &ep_ring->enqueue->generic; + start_cycle = ep_ring->cycle_state; + ++#if defined (CONFIG_USB_MT7621_XHCI_PLATFORM) ++ switch(urb->dev->speed){ ++ case USB_SPEED_SUPER: ++ max_packet = urb->ep->desc.wMaxPacketSize; ++ break; ++ case USB_SPEED_HIGH: ++ case USB_SPEED_FULL: ++ case USB_SPEED_LOW: ++ case USB_SPEED_WIRELESS: ++ case USB_SPEED_UNKNOWN: ++ max_packet = urb->ep->desc.wMaxPacketSize & 0x7ff; ++ break; ++ } ++#endif ++ + urb_priv = urb->hcpriv; + /* Queue the first TRB, even if it's zero-length */ + for (i = 0; i < num_tds; i++) { +@@ -3729,9 +3823,13 @@ static int xhci_queue_isoc_tx(struct xhc + } else { + td->last_trb = ep_ring->enqueue; + field |= TRB_IOC; ++#if defined (CONFIG_USB_MT7621_XHCI_PLATFORM) ++ if (!(xhci->quirks & XHCI_AVOID_BEI)) { ++#else + if (xhci->hci_version == 0x100 && + !(xhci->quirks & + XHCI_AVOID_BEI)) { ++#endif + /* Set BEI bit except for the last td */ + if (i < num_tds - 1) + field |= TRB_BEI; +@@ -3746,6 +3844,7 @@ static int xhci_queue_isoc_tx(struct xhc + trb_buff_len = td_remain_len; + + /* Set the TRB length, TD size, & interrupter fields. */ ++#if !defined (CONFIG_USB_MT7621_XHCI_PLATFORM) + if (xhci->hci_version < 0x100) { + remainder = xhci_td_remainder( + td_len - running_total); +@@ -3755,6 +3854,10 @@ static int xhci_queue_isoc_tx(struct xhc + total_packet_count, urb, + (trbs_per_td - j - 1)); + } ++#else ++ remainder = mtk_xhci_td_remainder(urb->transfer_buffer_length, running_total, max_packet, trb_buff_len); ++#endif ++ + length_field = TRB_LEN(trb_buff_len) | + remainder | + TRB_INTR_TARGET(0); +--- a/drivers/usb/host/xhci.c ++++ b/drivers/usb/host/xhci.c +@@ -30,6 +30,16 @@ + + #include "xhci.h" + ++#if defined (CONFIG_USB_MT7621_XHCI_PLATFORM) ++#include ++#include ++#include ++#include "mtk-phy.h" ++#include "xhci-mtk-scheduler.h" ++#include "xhci-mtk-power.h" ++#include "xhci-mtk.h" ++#endif ++ + #define DRIVER_AUTHOR "Sarah Sharp" + #define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver" + +@@ -38,6 +48,18 @@ static int link_quirk; + module_param(link_quirk, int, S_IRUGO | S_IWUSR); + MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB"); + ++#if defined (CONFIG_USB_MT7621_XHCI_PLATFORM) ++long xhci_mtk_test_unlock_ioctl(struct file *file, unsigned int cmd, unsigned long arg); ++static struct file_operations xhci_mtk_test_fops = { ++ .owner = THIS_MODULE, ++ .read = xhci_mtk_test_read, ++ .write = xhci_mtk_test_write, ++ .unlocked_ioctl = xhci_mtk_test_unlock_ioctl, ++ .open = xhci_mtk_test_open, ++ .release = xhci_mtk_test_release, ++}; ++#endif ++ + /* TODO: copied from ehci-hcd.c - can this be refactored? */ + /* + * xhci_handshake - spin reading hc until handshake completes or fails +@@ -189,7 +211,7 @@ int xhci_reset(struct xhci_hcd *xhci) + return ret; + } + +-#ifdef CONFIG_PCI ++#if defined (CONFIG_PCI) && !defined (CONFIG_USB_MT7621_XHCI_PLATFORM) + static int xhci_free_msi(struct xhci_hcd *xhci) + { + int i; +@@ -389,6 +411,7 @@ static int xhci_try_enable_msi(struct us + return ret; + } + hcd->irq = pdev->irq; ++ + return 0; + } + +@@ -430,6 +453,11 @@ static void compliance_mode_recovery(uns + xhci_dbg(xhci, "Attempting compliance mode recovery\n"); + hcd = xhci->shared_hcd; + ++#if defined (CONFIG_USB_MT7621_XHCI_PLATFORM) ++ temp |= (1 << 31); ++ xhci_writel(xhci, temp, xhci->usb3_ports[i]); ++#endif ++ + if (hcd->state == HC_STATE_SUSPENDED) + usb_hcd_resume_root_hub(hcd); + +@@ -478,6 +506,9 @@ bool xhci_compliance_mode_recovery_timer + { + const char *dmi_product_name, *dmi_sys_vendor; + ++#if defined (CONFIG_USB_MT7621_XHCI_PLATFORM) ++ return true; ++#endif + dmi_product_name = dmi_get_system_info(DMI_PRODUCT_NAME); + dmi_sys_vendor = dmi_get_system_info(DMI_SYS_VENDOR); + if (!dmi_product_name || !dmi_sys_vendor) +@@ -521,6 +552,10 @@ int xhci_init(struct usb_hcd *hcd) + } else { + xhci_dbg(xhci, "xHCI doesn't need link TRB QUIRK\n"); + } ++ ++#if defined (CONFIG_USB_MT7621_XHCI_PLATFORM) ++ mtk_xhci_scheduler_init(); ++#endif + retval = xhci_mem_init(xhci, GFP_KERNEL); + xhci_dbg(xhci, "Finished xhci_init\n"); + +@@ -664,7 +699,11 @@ int xhci_run(struct usb_hcd *hcd) + xhci_dbg(xhci, "// Set the interrupt modulation register\n"); + temp = xhci_readl(xhci, &xhci->ir_set->irq_control); + temp &= ~ER_IRQ_INTERVAL_MASK; ++#if defined (CONFIG_USB_MT7621_XHCI_PLATFORM) ++ temp |= (u32) 16; ++#else + temp |= (u32) 160; ++#endif + xhci_writel(xhci, temp, &xhci->ir_set->irq_control); + + /* Set the HCD state before we enable the irqs */ +@@ -685,6 +724,9 @@ int xhci_run(struct usb_hcd *hcd) + xhci_queue_vendor_command(xhci, 0, 0, 0, + TRB_TYPE(TRB_NEC_GET_FW)); + ++#if defined (CONFIG_USB_MT7621_XHCI_PLATFORM) ++ enableXhciAllPortPower(xhci); ++#endif + xhci_dbg(xhci, "Finished xhci_run for USB2 roothub\n"); + return 0; + } +@@ -1002,7 +1044,6 @@ int xhci_resume(struct xhci_hcd *xhci, b + + /* If restore operation fails, re-initialize the HC during resume */ + if ((temp & STS_SRE) || hibernated) { +- + if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && + !(xhci_all_ports_seen_u0(xhci))) { + del_timer_sync(&xhci->comp_mode_recovery_timer); +@@ -1586,6 +1627,13 @@ int xhci_drop_endpoint(struct usb_hcd *h + u32 drop_flag; + u32 new_add_flags, new_drop_flags, new_slot_info; + int ret; ++#if defined (CONFIG_USB_MT7621_XHCI_PLATFORM) ++#if MTK_SCH_NEW ++ struct sch_ep *sch_ep = NULL; ++ int isTT; ++ int ep_type; ++#endif ++#endif + + ret = xhci_check_args(hcd, udev, ep, 1, true, __func__); + if (ret <= 0) +@@ -1637,6 +1685,40 @@ int xhci_drop_endpoint(struct usb_hcd *h + + xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep); + ++#if defined (CONFIG_USB_MT7621_XHCI_PLATFORM) ++#if MTK_SCH_NEW ++ slot_ctx = xhci_get_slot_ctx(xhci, xhci->devs[udev->slot_id]->out_ctx); ++ if ((slot_ctx->tt_info & 0xff) > 0) { ++ isTT = 1; ++ } ++ else { ++ isTT = 0; ++ } ++ if (usb_endpoint_xfer_int(&ep->desc)) { ++ ep_type = USB_EP_INT; ++ } ++ else if (usb_endpoint_xfer_isoc(&ep->desc)) { ++ ep_type = USB_EP_ISOC; ++ } ++ else if (usb_endpoint_xfer_bulk(&ep->desc)) { ++ ep_type = USB_EP_BULK; ++ } ++ else ++ ep_type = USB_EP_CONTROL; ++ ++ sch_ep = mtk_xhci_scheduler_remove_ep(udev->speed, usb_endpoint_dir_in(&ep->desc) ++ , isTT, ep_type, (mtk_u32 *)ep); ++ if (sch_ep != NULL) { ++ kfree(sch_ep); ++ } ++ else { ++ xhci_dbg(xhci, "[MTK]Doesn't find ep_sch instance when removing endpoint\n"); ++ } ++#else ++ mtk_xhci_scheduler_remove_ep(xhci, udev, ep); ++#endif ++#endif ++ + xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n", + (unsigned int) ep->desc.bEndpointAddress, + udev->slot_id, +@@ -1672,6 +1754,18 @@ int xhci_add_endpoint(struct usb_hcd *hc + u32 new_add_flags, new_drop_flags, new_slot_info; + struct xhci_virt_device *virt_dev; + int ret = 0; ++#if defined (CONFIG_USB_MT7621_XHCI_PLATFORM) ++ struct xhci_ep_ctx *in_ep_ctx; ++#if MTK_SCH_NEW ++ struct sch_ep *sch_ep; ++ int isTT; ++ int ep_type; ++ int maxp = 0; ++ int burst = 0; ++ int mult = 0; ++ int interval; ++#endif ++#endif + + ret = xhci_check_args(hcd, udev, ep, 1, true, __func__); + if (ret <= 0) { +@@ -1734,6 +1828,56 @@ int xhci_add_endpoint(struct usb_hcd *hc + return -ENOMEM; + } + ++#if defined (CONFIG_USB_MT7621_XHCI_PLATFORM) ++ in_ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index); ++#if MTK_SCH_NEW ++ slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); ++ if ((slot_ctx->tt_info & 0xff) > 0) { ++ isTT = 1; ++ } ++ else { ++ isTT = 0; ++ } ++ if (usb_endpoint_xfer_int(&ep->desc)) { ++ ep_type = USB_EP_INT; ++ } ++ else if (usb_endpoint_xfer_isoc(&ep->desc)) { ++ ep_type = USB_EP_ISOC; ++ } ++ else if (usb_endpoint_xfer_bulk(&ep->desc)) { ++ ep_type = USB_EP_BULK; ++ } ++ else ++ ep_type = USB_EP_CONTROL; ++ ++ if (udev->speed == USB_SPEED_FULL || udev->speed == USB_SPEED_HIGH ++ || udev->speed == USB_SPEED_LOW) { ++ maxp = ep->desc.wMaxPacketSize & 0x7FF; ++ burst = ep->desc.wMaxPacketSize >> 11; ++ mult = 0; ++ } ++ else if (udev->speed == USB_SPEED_SUPER) { ++ maxp = ep->desc.wMaxPacketSize & 0x7FF; ++ burst = ep->ss_ep_comp.bMaxBurst; ++ mult = ep->ss_ep_comp.bmAttributes & 0x3; ++ } ++ interval = (1 << ((in_ep_ctx->ep_info >> 16) & 0xff)); ++ sch_ep = kmalloc(sizeof(struct sch_ep), GFP_KERNEL); ++ if (mtk_xhci_scheduler_add_ep(udev->speed, usb_endpoint_dir_in(&ep->desc), ++ isTT, ep_type, maxp, interval, burst, mult, (mtk_u32 *)ep ++ , (mtk_u32 *)in_ep_ctx, sch_ep) != SCH_SUCCESS) { ++ xhci_err(xhci, "[MTK] not enough bandwidth\n"); ++ ++ return -ENOSPC; ++ } ++#else ++ if (mtk_xhci_scheduler_add_ep(xhci, udev, ep, in_ep_ctx) != SCH_SUCCESS) { ++ xhci_err(xhci, "[MTK] not enough bandwidth\n"); ++ ++ return -ENOSPC; ++ } ++#endif ++#endif + ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs); + new_add_flags = le32_to_cpu(ctrl_ctx->add_flags); + +@@ -2697,7 +2841,7 @@ int xhci_check_bandwidth(struct usb_hcd + if (ctrl_ctx->add_flags == cpu_to_le32(SLOT_FLAG) && + ctrl_ctx->drop_flags == 0) + return 0; +- ++ + xhci_dbg(xhci, "New Input Control Context:\n"); + slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); + xhci_dbg_ctx(xhci, virt_dev->in_ctx, +@@ -4233,10 +4377,14 @@ static u16 xhci_call_host_update_timeout + u16 *timeout) + { + if (state == USB3_LPM_U1) { ++#if !defined (CONFIG_USB_MT7621_XHCI_PLATFORM) + if (xhci->quirks & XHCI_INTEL_HOST) ++#endif + return xhci_calculate_intel_u1_timeout(udev, desc); + } else { ++#if !defined (CONFIG_USB_MT7621_XHCI_PLATFORM) + if (xhci->quirks & XHCI_INTEL_HOST) ++#endif + return xhci_calculate_intel_u2_timeout(udev, desc); + } + +@@ -4662,7 +4810,9 @@ int xhci_gen_setup(struct usb_hcd *hcd, + /* Accept arbitrarily long scatter-gather lists */ + hcd->self.sg_tablesize = ~0; + /* XHCI controllers don't stop the ep queue on short packets :| */ ++#if !defined (CONFIG_USB_MT7621_XHCI_PLATFORM) + hcd->self.no_stop_on_short = 1; ++#endif + + if (usb_hcd_is_primary_hcd(hcd)) { + xhci = kzalloc(sizeof(struct xhci_hcd), GFP_KERNEL); +@@ -4731,6 +4881,10 @@ int xhci_gen_setup(struct usb_hcd *hcd, + goto error; + xhci_dbg(xhci, "Reset complete\n"); + ++#if defined (CONFIG_USB_MT7621_XHCI_PLATFORM) ++ setInitialReg(); ++#endif ++ + temp = xhci_readl(xhci, &xhci->cap_regs->hcc_params); + if (HCC_64BIT_ADDR(temp)) { + xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n"); +@@ -4755,8 +4909,21 @@ MODULE_DESCRIPTION(DRIVER_DESC); + MODULE_AUTHOR(DRIVER_AUTHOR); + MODULE_LICENSE("GPL"); + ++#if defined (CONFIG_USB_MT7621_XHCI_PLATFORM) ++static struct platform_device xhci_platform_dev = { ++ .name = "xhci-hcd", ++ .id = -1, ++ .dev = { ++ .coherent_dma_mask = 0xffffffff, ++ }, ++}; ++#endif ++ + static int __init xhci_hcd_init(void) + { ++#if defined (CONFIG_USB_MT7621_XHCI_PLATFORM) ++ struct platform_device *pPlatformDev; ++#endif + int retval; + + retval = xhci_register_pci(); +@@ -4769,6 +4936,33 @@ static int __init xhci_hcd_init(void) + printk(KERN_DEBUG "Problem registering platform driver."); + goto unreg_pci; + } ++ ++#if defined (CONFIG_USB_MT7621_XHCI_PLATFORM) ++ retval = register_chrdev(XHCI_MTK_TEST_MAJOR, DEVICE_NAME, &xhci_mtk_test_fops); ++ ++ u3phy_init(); ++ if (u3phy_ops->u2_slew_rate_calibration) { ++ u3phy_ops->u2_slew_rate_calibration(u3phy); ++ u3phy_ops->u2_slew_rate_calibration(u3phy_p1); ++ } ++ else{ ++ printk(KERN_ERR "WARN: PHY doesn't implement u2 slew rate calibration function\n"); ++ } ++ u3phy_ops->init(u3phy); ++ reinitIP(); ++ ++ pPlatformDev = &xhci_platform_dev; ++ memset(pPlatformDev, 0, sizeof(struct platform_device)); ++ pPlatformDev->name = "xhci-hcd"; ++ pPlatformDev->id = -1; ++ pPlatformDev->dev.coherent_dma_mask = 0xffffffff; ++ pPlatformDev->dev.dma_mask = &pPlatformDev->dev.coherent_dma_mask; ++ ++ retval = platform_device_register(&xhci_platform_dev); ++ if (retval < 0) ++ xhci_unregister_plat(); ++#endif ++ + /* + * Check the compiler generated sizes of structures that must be laid + * out in specific ways for hardware access. +@@ -4786,6 +4980,7 @@ static int __init xhci_hcd_init(void) + BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8); + /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */ + BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8); ++ + return 0; + unreg_pci: + xhci_unregister_pci(); +--- a/drivers/usb/host/xhci.h ++++ b/drivers/usb/host/xhci.h +@@ -29,9 +29,24 @@ + #include + + /* Code sharing between pci-quirks and xhci hcd */ +-#include "xhci-ext-caps.h" ++#include "xhci-ext-caps.h" + #include "pci-quirks.h" + ++#if defined (CONFIG_USB_MT7621_XHCI_PLATFORM) ++#define XHC_IRQ (22 + 8) ++#define XHC_IO_START 0x1E1C0000 ++#define XHC_IO_LENGTH 0x10000 ++/* mtk scheduler bitmasks */ ++#define BPKTS(p) ((p) & 0x3f) ++#define BCSCOUNT(p) (((p) & 0x7) << 8) ++#define BBM(p) ((p) << 11) ++#define BOFFSET(p) ((p) & 0x3fff) ++#define BREPEAT(p) (((p) & 0x7fff) << 16) ++#endif ++ ++ ++ ++ + /* xHCI PCI Configuration Registers */ + #define XHCI_SBRN_OFFSET (0x60) + +@@ -1536,8 +1551,12 @@ struct xhci_hcd { + /* Compliance Mode Recovery Data */ + struct timer_list comp_mode_recovery_timer; + u32 port_status_u0; ++#ifdef CONFIG_USB_MT7621_XHCI_PLATFORM ++#define COMP_MODE_RCVRY_MSECS 5000 ++#else + /* Compliance Mode Timer Triggered every 2 seconds */ + #define COMP_MODE_RCVRY_MSECS 2000 ++#endif + }; + + /* convert between an HCD pointer and the corresponding EHCI_HCD */ +@@ -1703,7 +1722,7 @@ void xhci_urb_free_priv(struct xhci_hcd + void xhci_free_command(struct xhci_hcd *xhci, + struct xhci_command *command); + +-#ifdef CONFIG_PCI ++#if defined (CONFIG_PCI) && !defined (CONFIG_USB_MT7621_XHCI_PLATFORM) + /* xHCI PCI glue */ + int xhci_register_pci(void); + void xhci_unregister_pci(void); diff --git a/target/linux/ramips/patches-3.10/0215-SPI-ralink-add-mt7621-support.patch b/target/linux/ramips/patches-3.10/0215-SPI-ralink-add-mt7621-support.patch new file mode 100644 index 0000000000..ec20066bf3 --- /dev/null +++ b/target/linux/ramips/patches-3.10/0215-SPI-ralink-add-mt7621-support.patch @@ -0,0 +1,346 @@ +From 1a961f146e65e2716dbe9065baa4c0931fcb6b3e Mon Sep 17 00:00:00 2001 +From: John Crispin +Date: Sun, 16 Mar 2014 05:34:11 +0000 +Subject: [PATCH 215/215] SPI: ralink: add mt7621 support + +Signed-off-by: John Crispin +--- + drivers/spi/spi-rt2880.c | 218 +++++++++++++++++++++++++++++++++++++++++++--- + 1 file changed, 205 insertions(+), 13 deletions(-) + +--- a/drivers/spi/spi-rt2880.c ++++ b/drivers/spi/spi-rt2880.c +@@ -21,8 +21,13 @@ + #include + #include + #include ++#include + #include + ++#include ++ ++#define SPI_BPW_MASK(bits) BIT((bits) - 1) ++ + #define DRIVER_NAME "spi-rt2880" + /* only one slave is supported*/ + #define RALINK_NUM_CHIPSELECTS 1 +@@ -63,6 +68,25 @@ + /* SPIFIFOSTAT register bit field */ + #define SPIFIFOSTAT_TXFULL BIT(17) + ++#define MT7621_SPI_TRANS 0x00 ++#define SPITRANS_BUSY BIT(16) ++#define MT7621_SPI_OPCODE 0x04 ++#define MT7621_SPI_DATA0 0x08 ++#define SPI_CTL_TX_RX_CNT_MASK 0xff ++#define SPI_CTL_START BIT(8) ++#define MT7621_SPI_POLAR 0x38 ++#define MT7621_SPI_MASTER 0x28 ++#define MT7621_SPI_SPACE 0x3c ++ ++struct rt2880_spi; ++ ++struct rt2880_spi_ops { ++ void (*init_hw)(struct rt2880_spi *rs); ++ void (*set_cs)(struct rt2880_spi *rs, int enable); ++ int (*baudrate_set)(struct spi_device *spi, unsigned int speed); ++ unsigned int (*write_read)(struct spi_device *spi, struct list_head *list, struct spi_transfer *xfer); ++}; ++ + struct rt2880_spi { + struct spi_master *master; + void __iomem *base; +@@ -70,6 +94,8 @@ struct rt2880_spi { + unsigned int speed; + struct clk *clk; + spinlock_t lock; ++ ++ struct rt2880_spi_ops *ops; + }; + + static inline struct rt2880_spi *spidev_to_rt2880_spi(struct spi_device *spi) +@@ -149,6 +175,17 @@ static int rt2880_spi_baudrate_set(struc + return 0; + } + ++static int mt7621_spi_baudrate_set(struct spi_device *spi, unsigned int speed) ++{ ++/* u32 master = rt2880_spi_read(rs, MT7621_SPI_MASTER); ++ ++ // set default clock to hclk/5 ++ master &= ~(0xfff << 16); ++ master |= 0x3 << 16; ++*/ ++ return 0; ++} ++ + /* + * called only when no transfer is active on the bus + */ +@@ -164,7 +201,7 @@ rt2880_spi_setup_transfer(struct spi_dev + + if (rs->speed != speed) { + dev_dbg(&spi->dev, "speed_hz:%u\n", speed); +- rc = rt2880_spi_baudrate_set(spi, speed); ++ rc = rs->ops->baudrate_set(spi, speed); + if (rc) + return rc; + } +@@ -180,6 +217,17 @@ static void rt2880_spi_set_cs(struct rt2 + rt2880_spi_setbits(rs, RAMIPS_SPI_CTL, SPICTL_SPIENA); + } + ++static void mt7621_spi_set_cs(struct rt2880_spi *rs, int enable) ++{ ++ u32 polar = rt2880_spi_read(rs, MT7621_SPI_POLAR); ++ ++ if (enable) ++ polar |= 1; ++ else ++ polar &= ~1; ++ rt2880_spi_write(rs, MT7621_SPI_POLAR, polar); ++} ++ + static inline int rt2880_spi_wait_till_ready(struct rt2880_spi *rs) + { + int i; +@@ -198,8 +246,26 @@ static inline int rt2880_spi_wait_till_r + return -ETIMEDOUT; + } + ++static inline int mt7621_spi_wait_till_ready(struct rt2880_spi *rs) ++{ ++ int i; ++ ++ for (i = 0; i < RALINK_SPI_WAIT_MAX_LOOP; i++) { ++ u32 status; ++ ++ status = rt2880_spi_read(rs, MT7621_SPI_TRANS); ++ if ((status & SPITRANS_BUSY) == 0) { ++ return 0; ++ } ++ cpu_relax(); ++ udelay(1); ++ } ++ ++ return -ETIMEDOUT; ++} ++ + static unsigned int +-rt2880_spi_write_read(struct spi_device *spi, struct spi_transfer *xfer) ++rt2880_spi_write_read(struct spi_device *spi, struct list_head *list, struct spi_transfer *xfer) + { + struct rt2880_spi *rs = spidev_to_rt2880_spi(spi); + unsigned count = 0; +@@ -239,6 +305,100 @@ out: + return count; + } + ++static unsigned int ++mt7621_spi_write_read(struct spi_device *spi, struct list_head *list, struct spi_transfer *xfer) ++{ ++ struct rt2880_spi *rs = spidev_to_rt2880_spi(spi); ++ struct spi_transfer *next = NULL; ++ const u8 *tx = xfer->tx_buf; ++ u8 *rx = NULL; ++ u32 trans; ++ int len = xfer->len; ++ ++ if (!tx) ++ return 0; ++ ++ if (!list_is_last(&xfer->transfer_list, list)) { ++ next = list_entry(xfer->transfer_list.next, struct spi_transfer, transfer_list); ++ rx = next->rx_buf; ++ } ++ ++ trans = rt2880_spi_read(rs, MT7621_SPI_TRANS); ++ trans &= ~SPI_CTL_TX_RX_CNT_MASK; ++ ++ if (tx) { ++ u32 data0 = 0, opcode = 0; ++ ++ switch (xfer->len) { ++ case 8: ++ data0 |= tx[7] << 24; ++ case 7: ++ data0 |= tx[6] << 16; ++ case 6: ++ data0 |= tx[5] << 8; ++ case 5: ++ data0 |= tx[4]; ++ case 4: ++ opcode |= tx[3] << 8; ++ case 3: ++ opcode |= tx[2] << 16; ++ case 2: ++ opcode |= tx[1] << 24; ++ case 1: ++ opcode |= tx[0]; ++ break; ++ ++ default: ++ dev_err(&spi->dev, "trying to write too many bytes: %d\n", next->len); ++ return -EINVAL; ++ } ++ ++ rt2880_spi_write(rs, MT7621_SPI_DATA0, data0); ++ rt2880_spi_write(rs, MT7621_SPI_OPCODE, opcode); ++ trans |= xfer->len; ++ } ++ ++ if (rx) ++ trans |= (next->len << 4); ++ rt2880_spi_write(rs, MT7621_SPI_TRANS, trans); ++ trans |= SPI_CTL_START; ++ rt2880_spi_write(rs, MT7621_SPI_TRANS, trans); ++ ++ mt7621_spi_wait_till_ready(rs); ++ ++ if (rx) { ++ u32 data0 = rt2880_spi_read(rs, MT7621_SPI_DATA0); ++ u32 opcode = rt2880_spi_read(rs, MT7621_SPI_OPCODE); ++ ++ switch (next->len) { ++ case 8: ++ rx[7] = (opcode >> 24) & 0xff; ++ case 7: ++ rx[6] = (opcode >> 16) & 0xff; ++ case 6: ++ rx[5] = (opcode >> 8) & 0xff; ++ case 5: ++ rx[4] = opcode & 0xff; ++ case 4: ++ rx[3] = (data0 >> 24) & 0xff; ++ case 3: ++ rx[2] = (data0 >> 16) & 0xff; ++ case 2: ++ rx[1] = (data0 >> 8) & 0xff; ++ case 1: ++ rx[0] = data0 & 0xff; ++ break; ++ ++ default: ++ dev_err(&spi->dev, "trying to read too many bytes: %d\n", next->len); ++ return -EINVAL; ++ } ++ len += next->len; ++ } ++ ++ return len; ++} ++ + static int rt2880_spi_transfer_one_message(struct spi_master *master, + struct spi_message *m) + { +@@ -280,25 +440,25 @@ static int rt2880_spi_transfer_one_messa + } + + if (!cs_active) { +- rt2880_spi_set_cs(rs, 1); ++ rs->ops->set_cs(rs, 1); + cs_active = 1; + } + + if (t->len) +- m->actual_length += rt2880_spi_write_read(spi, t); ++ m->actual_length += rs->ops->write_read(spi, &m->transfers, t); + + if (t->delay_usecs) + udelay(t->delay_usecs); + + if (t->cs_change) { +- rt2880_spi_set_cs(rs, 0); ++ rs->ops->set_cs(rs, 0); + cs_active = 0; + } + } + + msg_done: + if (cs_active) +- rt2880_spi_set_cs(rs, 0); ++ rs->ops->set_cs(rs, 0); + + m->status = status; + spi_finalize_current_message(master); +@@ -334,8 +494,41 @@ static void rt2880_spi_reset(struct rt28 + rt2880_spi_write(rs, RAMIPS_SPI_CTL, SPICTL_HIZSDO | SPICTL_SPIENA); + } + ++static void mt7621_spi_reset(struct rt2880_spi *rs) ++{ ++ u32 master = rt2880_spi_read(rs, MT7621_SPI_MASTER); ++ ++ master &= ~(0xfff << 16); ++ master |= 3 << 16; ++ ++ master |= 7 << 29; ++ rt2880_spi_write(rs, MT7621_SPI_MASTER, master); ++} ++ ++static struct rt2880_spi_ops spi_ops[] = { ++ { ++ .init_hw = rt2880_spi_reset, ++ .set_cs = rt2880_spi_set_cs, ++ .baudrate_set = rt2880_spi_baudrate_set, ++ .write_read = rt2880_spi_write_read, ++ }, { ++ .init_hw = mt7621_spi_reset, ++ .set_cs = mt7621_spi_set_cs, ++ .baudrate_set = mt7621_spi_baudrate_set, ++ .write_read = mt7621_spi_write_read, ++ }, ++}; ++ ++static const struct of_device_id rt2880_spi_match[] = { ++ { .compatible = "ralink,rt2880-spi", .data = &spi_ops[0]}, ++ { .compatible = "ralink,mt7621-spi", .data = &spi_ops[1] }, ++ {}, ++}; ++MODULE_DEVICE_TABLE(of, rt2880_spi_match); ++ + static int rt2880_spi_probe(struct platform_device *pdev) + { ++ const struct of_device_id *match; + struct spi_master *master; + struct rt2880_spi *rs; + unsigned long flags; +@@ -344,6 +537,10 @@ static int rt2880_spi_probe(struct platf + int status = 0; + struct clk *clk; + ++ match = of_match_device(rt2880_spi_match, &pdev->dev); ++ if (!match) ++ return -EINVAL; ++ + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + base = devm_ioremap_resource(&pdev->dev, r); + if (IS_ERR(base)) +@@ -382,12 +579,13 @@ static int rt2880_spi_probe(struct platf + rs->clk = clk; + rs->master = master; + rs->sys_freq = clk_get_rate(rs->clk); ++ rs->ops = (struct rt2880_spi_ops *) match->data; + dev_dbg(&pdev->dev, "sys_freq: %u\n", rs->sys_freq); + spin_lock_irqsave(&rs->lock, flags); + + device_reset(&pdev->dev); + +- rt2880_spi_reset(rs); ++ rs->ops->init_hw(rs); + + return spi_register_master(master); + } +@@ -408,12 +606,6 @@ static int rt2880_spi_remove(struct plat + + MODULE_ALIAS("platform:" DRIVER_NAME); + +-static const struct of_device_id rt2880_spi_match[] = { +- { .compatible = "ralink,rt2880-spi" }, +- {}, +-}; +-MODULE_DEVICE_TABLE(of, rt2880_spi_match); +- + static struct platform_driver rt2880_spi_driver = { + .driver = { + .name = DRIVER_NAME, diff --git a/target/linux/ramips/patches-3.10/0216-NET-ralink-add-mt7621-SDK-ethernet-driver.patch b/target/linux/ramips/patches-3.10/0216-NET-ralink-add-mt7621-SDK-ethernet-driver.patch new file mode 100644 index 0000000000..a3a5ec282d --- /dev/null +++ b/target/linux/ramips/patches-3.10/0216-NET-ralink-add-mt7621-SDK-ethernet-driver.patch @@ -0,0 +1,6045 @@ +From 6e10c9b7ab93cb105dc2779769c48949ebc60ee7 Mon Sep 17 00:00:00 2001 +From: John Crispin +Date: Sun, 16 Mar 2014 08:51:14 +0000 +Subject: [PATCH 1/3] foo + +--- + drivers/net/ethernet/raeth/Kconfig | 343 +++++++ + drivers/net/ethernet/raeth/Makefile | 7 + + drivers/net/ethernet/raeth/ethtool_readme.txt | 44 + + drivers/net/ethernet/raeth/mii_mgr.c | 166 ++++ + drivers/net/ethernet/raeth/ra2882ethreg.h | 1268 +++++++++++++++++++++++++ + drivers/net/ethernet/raeth/ra_ioctl.h | 92 ++ + drivers/net/ethernet/raeth/ra_mac.c | 93 ++ + drivers/net/ethernet/raeth/ra_mac.h | 35 + + drivers/net/ethernet/raeth/raether.c | 663 +++++++++++++ + drivers/net/ethernet/raeth/raether.h | 92 ++ + drivers/net/ethernet/raeth/raether_pdma.c | 202 ++++ + drivers/net/ethernet/raeth/raether_qdma.c | 805 ++++++++++++++++ + drivers/net/ethernet/ralink/gsw_mt7620a.c | 15 +- + drivers/net/ethernet/ralink/mt7530.c | 2 +- + drivers/net/ethernet/ralink/mt7621.c | 253 +++++ + drivers/net/ethernet/ralink/mt762x.c | 295 ++++++ + drivers/net/ethernet/ralink/mt762x.h | 38 + + drivers/net/ethernet/ralink/ralink_soc_eth.c | 25 +- + drivers/net/ethernet/ralink/ralink_soc_eth.h | 9 +- + drivers/net/ethernet/ralink/soc_mt7621.c | 5 +- + 20 files changed, 4429 insertions(+), 23 deletions(-) + create mode 100644 drivers/net/ethernet/raeth/Kconfig + create mode 100644 drivers/net/ethernet/raeth/Makefile + create mode 100644 drivers/net/ethernet/raeth/ethtool_readme.txt + create mode 100644 drivers/net/ethernet/raeth/mii_mgr.c + create mode 100644 drivers/net/ethernet/raeth/ra2882ethreg.h + create mode 100644 drivers/net/ethernet/raeth/ra_ioctl.h + create mode 100644 drivers/net/ethernet/raeth/ra_mac.c + create mode 100644 drivers/net/ethernet/raeth/ra_mac.h + create mode 100644 drivers/net/ethernet/raeth/raether.c + create mode 100644 drivers/net/ethernet/raeth/raether.h + create mode 100755 drivers/net/ethernet/raeth/raether_pdma.c + create mode 100644 drivers/net/ethernet/raeth/raether_qdma.c + create mode 100644 drivers/net/ethernet/ralink/mt7621.c + create mode 100644 drivers/net/ethernet/ralink/mt762x.c + create mode 100644 drivers/net/ethernet/ralink/mt762x.h + +--- /dev/null ++++ b/drivers/net/ethernet/raeth/Kconfig +@@ -0,0 +1,344 @@ ++ ++config RA_NAT_NONE ++ bool ++ default y ++ depends on RALINK ++ ++config MT7621_ASIC ++ bool ++ default y ++ depends on SOC_MT7621 ++ ++config RALINK_MT7621 ++ bool ++ default y ++ depends on SOC_MT7621 ++ ++config RAETH ++ tristate "Ralink GMAC" ++ depends on SOC_MT7621 ++ ---help--- ++ This driver supports Ralink gigabit ethernet family of ++ adapters. ++ ++config PDMA_NEW ++ bool ++ default y if (RALINK_MT7620 || RALINK_MT7621) ++ depends on RAETH ++ ++config RAETH_SCATTER_GATHER_RX_DMA ++ bool ++ default y if (RALINK_MT7620 || RALINK_MT7621) ++ depends on RAETH ++ ++ ++choice ++ prompt "Network BottomHalves" ++ depends on RAETH ++ default RA_NETWORK_WORKQUEUE_BH ++ ++ config RA_NETWORK_TASKLET_BH ++ bool "Tasklet" ++ ++ config RA_NETWORK_WORKQUEUE_BH ++ bool "Work Queue" ++ ++ config RAETH_NAPI ++ bool "NAPI" ++ ++endchoice ++ ++#config TASKLET_WORKQUEUE_SW ++# bool "Tasklet and Workqueue switch" ++# depends on RA_NETWORK_TASKLET_BH ++ ++config RAETH_SKB_RECYCLE_2K ++ bool "SKB Recycling" ++ depends on RAETH ++ ++config RAETH_SPECIAL_TAG ++ bool "Ralink Special Tag (0x810x)" ++ depends on RAETH && RT_3052_ESW ++ ++#config RAETH_JUMBOFRAME ++# bool "Jumbo Frame up to 4K bytes" ++# depends on RAETH && !(RALINK_RT3052 || RALINK_RT3352 || RALINK_RT5350 || RALINK_MT7628) ++ ++config RAETH_CHECKSUM_OFFLOAD ++ bool "TCP/UDP/IP checksum offload" ++ default y ++ depends on RAETH && !RALINK_RT2880 ++ ++#config RAETH_SW_FC ++# bool "When TX ring is full, inform kernel stop transmit and stop RX handler" ++# default n ++# depends on RAETH ++ ++config 32B_DESC ++ bool "32bytes TX/RX description" ++ default n ++ depends on RAETH && (RALINK_MT7620 || RALINK_MT7621) ++ ---help--- ++ At this moment, you cannot enable 32B description with Multiple RX ring at the same time. ++ ++config RAETH_LRO ++ bool "LRO (Large Receive Offload )" ++ select INET_LRO ++ depends on RAETH && (RALINK_RT6855A || RALINK_MT7620 || RALINK_MT7621) ++ ++config RAETH_HW_VLAN_TX ++ bool "Transmit VLAN HW (DoubleVLAN is not supported)" ++ depends on RAETH && !(RALINK_RT5350 || RALINK_MT7628) ++ ---help--- ++ Please disable HW_VLAN_TX if you need double vlan ++ ++config RAETH_HW_VLAN_RX ++ bool "Receive VLAN HW (DoubleVLAN is not supported)" ++ depends on RAETH && RALINK_MT7621 ++ ---help--- ++ Please disable HW_VLAN_RX if you need double vlan ++ ++config RAETH_TSO ++ bool "TSOV4 (Tcp Segmentaton Offload)" ++ depends on (RAETH_HW_VLAN_TX && (RALINK_RT6855 || RALINK_RT6855A || RALINK_MT7620)) || RALINK_MT7621 ++ ++config RAETH_TSOV6 ++ bool "TSOV6 (Tcp Segmentaton Offload)" ++ depends on RAETH_TSO ++ ++config RAETH_RW_PDMAPTR_FROM_VAR ++ bool ++ default y if RALINK_RT6855A || RALINK_MT7620 ++ depends on RAETH ++ ++#config RAETH_QOS ++# bool "QoS Feature" ++# depends on RAETH && !RALINK_RT2880 && !RALINK_MT7620 && !RALINK_MT7621 && !RAETH_TSO ++ ++choice ++ prompt "QoS Type" ++ depends on RAETH_QOS ++ default DSCP_QOS_DSCP ++ ++config RAETH_QOS_DSCP_BASED ++ bool "DSCP-based" ++ depends on RAETH_QOS ++ ++config RAETH_QOS_VPRI_BASED ++ bool "VPRI-based" ++ depends on RAETH_QOS ++ ++endchoice ++ ++config RAETH_QDMA ++ bool "Choose QDMA instead PDMA" ++ default n ++ depends on RAETH && RALINK_MT7621 ++ ++choice ++ prompt "GMAC is connected to" ++ depends on RAETH ++ default GE1_RGMII_FORCE_1000 ++ ++config GE1_MII_FORCE_100 ++ bool "MII_FORCE_100 (10/100M Switch)" ++ depends on (RALINK_RT2880 || RALINK_RT3883 || RALINK_MT7621) ++ ++config GE1_MII_AN ++ bool "MII_AN (100Phy)" ++ depends on (RALINK_RT2880 || RALINK_RT3883 || RALINK_MT7621) ++ ++config GE1_RVMII_FORCE_100 ++ bool "RvMII_FORCE_100 (CPU)" ++ depends on (RALINK_RT2880 || RALINK_RT3883 || RALINK_MT7621) ++ ++config GE1_RGMII_FORCE_1000 ++ bool "RGMII_FORCE_1000 (GigaSW, CPU)" ++ depends on (RALINK_RT2880 || RALINK_RT3883) ++ select RALINK_SPI ++ ++config GE1_RGMII_FORCE_1000 ++ bool "RGMII_FORCE_1000 (GigaSW, CPU)" ++ depends on (RALINK_MT7621) ++ select RT_3052_ESW ++ ++config GE1_TRGMII_FORCE_1200 ++ bool "TRGMII_FORCE_1200 (GigaSW, CPU)" ++ depends on (RALINK_MT7621) ++ select RT_3052_ESW ++ ++config GE1_RGMII_AN ++ bool "RGMII_AN (GigaPhy)" ++ depends on (RALINK_RT2880 || RALINK_RT3883 || RALINK_MT7621) ++ ++config GE1_RGMII_NONE ++ bool "NONE (NO CONNECT)" ++ depends on (RALINK_MT7621) ++ ++endchoice ++ ++config RT_3052_ESW ++ bool "Ralink Embedded Switch" ++ default y ++ depends on (RALINK_RT3052 || RALINK_RT3352 || RALINK_RT5350 || RALINK_RT6855 || RALINK_RT6855A || RALINK_MT7620 || RALINK_MT7621 || RALINK_MT7628) ++ ++config LAN_WAN_SUPPORT ++ bool "LAN/WAN Partition" ++ depends on RAETH_ROUTER || RT_3052_ESW ++ ++choice ++ prompt "Switch Board Layout Type" ++ depends on LAN_WAN_SUPPORT || P5_RGMII_TO_MAC_MODE || GE1_RGMII_FORCE_1000 || GE1_TRGMII_FORCE_1200 || GE2_RGMII_FORCE_1000 ++ default WAN_AT_P0 ++ ++ config WAN_AT_P4 ++ bool "LLLL/W" ++ ++ config WAN_AT_P0 ++ bool "W/LLLL" ++endchoice ++ ++config RALINK_VISTA_BASIC ++ bool 'Vista Basic Logo for IC+ 175C' ++ depends on LAN_WAN_SUPPORT && (RALINK_RT2880 || RALINK_RT3883) ++ ++config ESW_DOUBLE_VLAN_TAG ++ bool ++ default y if RT_3052_ESW ++ ++config RAETH_HAS_PORT4 ++ bool "Port 4 Support" ++ depends on RAETH && RALINK_MT7620 ++choice ++ prompt "Target Mode" ++ depends on RAETH_HAS_PORT4 ++ default P4_RGMII_TO_MAC_MODE ++ ++ config P4_MAC_TO_PHY_MODE ++ bool "Giga_Phy (RGMII)" ++ config GE_RGMII_MT7530_P0_AN ++ bool "GE_RGMII_MT7530_P0_AN (MT7530 Internal GigaPhy)" ++ config GE_RGMII_MT7530_P4_AN ++ bool "GE_RGMII_MT7530_P4_AN (MT7530 Internal GigaPhy)" ++ config P4_RGMII_TO_MAC_MODE ++ bool "Giga_SW/iNIC (RGMII)" ++ config P4_MII_TO_MAC_MODE ++ bool "External_CPU (MII_RvMII)" ++ config P4_RMII_TO_MAC_MODE ++ bool "External_CPU (RvMII_MII)" ++endchoice ++ ++config MAC_TO_GIGAPHY_MODE_ADDR2 ++ hex "Port4 Phy Address" ++ default 0x4 ++ depends on P4_MAC_TO_PHY_MODE ++ ++config RAETH_HAS_PORT5 ++ bool "Port 5 Support" ++ depends on RAETH && (RALINK_RT3052 || RALINK_RT3352 || RALINK_RT6855 || RALINK_RT6855A || RALINK_MT7620) ++choice ++ prompt "Target Mode" ++ depends on RAETH_HAS_PORT5 ++ default P5_RGMII_TO_MAC_MODE ++ ++ config P5_MAC_TO_PHY_MODE ++ bool "Giga_Phy (RGMII)" ++ config P5_RGMII_TO_MAC_MODE ++ bool "Giga_SW/iNIC (RGMII)" ++ config P5_RGMII_TO_MT7530_MODE ++ bool "MT7530 Giga_SW (RGMII)" ++ depends on RALINK_MT7620 ++ config P5_MII_TO_MAC_MODE ++ bool "External_CPU (MII_RvMII)" ++ config P5_RMII_TO_MAC_MODE ++ bool "External_CPU (RvMII_MII)" ++endchoice ++ ++config MAC_TO_GIGAPHY_MODE_ADDR ++ hex "GE1 Phy Address" ++ default 0x1F ++ depends on GE1_MII_AN || GE1_RGMII_AN ++ ++config MAC_TO_GIGAPHY_MODE_ADDR ++ hex "Port5 Phy Address" ++ default 0x5 ++ depends on P5_MAC_TO_PHY_MODE ++ ++config RAETH_GMAC2 ++ bool "GMAC2 Support" ++ depends on RAETH && (RALINK_RT3883 || RALINK_MT7621) ++ ++choice ++ prompt "GMAC2 is connected to" ++ depends on RAETH_GMAC2 ++ default GE2_RGMII_AN ++ ++config GE2_MII_FORCE_100 ++ bool "MII_FORCE_100 (10/100M Switch)" ++ depends on RAETH_GMAC2 ++ ++config GE2_MII_AN ++ bool "MII_AN (100Phy)" ++ depends on RAETH_GMAC2 ++ ++config GE2_RVMII_FORCE_100 ++ bool "RvMII_FORCE_100 (CPU)" ++ depends on RAETH_GMAC2 ++ ++config GE2_RGMII_FORCE_1000 ++ bool "RGMII_FORCE_1000 (GigaSW, CPU)" ++ depends on RAETH_GMAC2 ++ select RALINK_SPI ++ ++config GE2_RGMII_AN ++ bool "RGMII_AN (GigaPhy)" ++ depends on RAETH_GMAC2 ++ ++config GE2_INTERNAL_GPHY ++ bool "Internal GigaPHY" ++ depends on RAETH_GMAC2 ++ select LAN_WAN_SUPPORT ++ ++endchoice ++ ++config GE_RGMII_INTERNAL_P0_AN ++ bool ++ depends on GE2_INTERNAL_GPHY ++ default y if WAN_AT_P0 ++ ++config GE_RGMII_INTERNAL_P4_AN ++ bool ++ depends on GE2_INTERNAL_GPHY ++ default y if WAN_AT_P4 ++ ++config MAC_TO_GIGAPHY_MODE_ADDR2 ++ hex ++ default 0 if GE_RGMII_INTERNAL_P0_AN ++ default 4 if GE_RGMII_INTERNAL_P4_AN ++ depends on GE_RGMII_INTERNAL_P0_AN || GE_RGMII_INTERNAL_P4_AN ++ ++config MAC_TO_GIGAPHY_MODE_ADDR2 ++ hex "GE2 Phy Address" ++ default 0x1E ++ depends on GE2_MII_AN || GE2_RGMII_AN ++ ++#force 100M ++config RAETH_ROUTER ++bool ++default y if GE1_MII_FORCE_100 || GE2_MII_FORCE_100 || GE1_RVMII_FORCE_100 || GE2_RVMII_FORCE_100 ++ ++#force 1000M ++config MAC_TO_MAC_MODE ++bool ++default y if GE1_RGMII_FORCE_1000 || GE2_RGMII_FORCE_1000 ++depends on (RALINK_RT2880 || RALINK_RT3883) ++ ++#AN ++config GIGAPHY ++bool ++default y if GE1_RGMII_AN || GE2_RGMII_AN ++ ++#AN ++config 100PHY ++bool ++default y if GE1_MII_AN || GE2_MII_AN +--- /dev/null ++++ b/drivers/net/ethernet/raeth/Makefile +@@ -0,0 +1,7 @@ ++obj-$(CONFIG_RAETH) += raeth.o ++raeth-objs := ra_mac.o mii_mgr.o ++raeth-objs += raether_pdma.o ++EXTRA_CFLAGS += -DWORKQUEUE_BH ++#EXTRA_CFLAGS += -DCONFIG_RAETH_MULTIPLE_RX_RING ++ ++raeth-objs += raether.o +--- /dev/null ++++ b/drivers/net/ethernet/raeth/ethtool_readme.txt +@@ -0,0 +1,44 @@ ++ ++Ethtool readme for selecting different PHY address. ++ ++Before doing any ethtool command you should make sure the current PHY ++address is expected. The default PHY address is 1(port 1). ++ ++You can change current PHY address to X(0~4) by doing follow command: ++# echo X > /proc/rt2880/gmac ++ ++Ethtool command also would show the current PHY address as following. ++ ++# ethtool eth2 ++Settings for eth2: ++ Supported ports: [ TP MII ] ++ Supported link modes: 10baseT/Half 10baseT/Full ++ 100baseT/Half 100baseT/Full ++ Supports auto-negotiation: Yes ++ Advertised link modes: 10baseT/Half 10baseT/Full ++ 100baseT/Half 100baseT/Full ++ Advertised auto-negotiation: No ++ Speed: 10Mb/s ++ Duplex: Full ++ Port: MII ++ PHYAD: 1 ++ Transceiver: internal ++ Auto-negotiation: off ++ Current message level: 0x00000000 (0) ++ Link detected: no ++ ++ ++The "PHYAD" field shows the current PHY address. ++ ++ ++ ++Usage example ++1) show port1 info ++# echo 1 > /proc/rt2880/gmac # change phy address to 1 ++# ethtool eth2 ++ ++2) show port0 info ++# echo 0 > /proc/rt2880/gmac # change phy address to 0 ++# ethtool eth2 ++ ++ +--- /dev/null ++++ b/drivers/net/ethernet/raeth/mii_mgr.c +@@ -0,0 +1,166 @@ ++#include ++#include ++#include ++ ++#include ++#include ++#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) ++#include ++#endif ++ ++#include "ra2882ethreg.h" ++#include "raether.h" ++ ++ ++#define PHY_CONTROL_0 0x0004 ++#define MDIO_PHY_CONTROL_0 (RALINK_ETH_SW_BASE + PHY_CONTROL_0) ++#define enable_mdio(x) ++ ++ ++u32 __mii_mgr_read(u32 phy_addr, u32 phy_register, u32 *read_data) ++{ ++ u32 volatile status = 0; ++ u32 rc = 0; ++ unsigned long volatile t_start = jiffies; ++ u32 volatile data = 0; ++ ++ /* We enable mdio gpio purpose register, and disable it when exit. */ ++ enable_mdio(1); ++ ++ // make sure previous read operation is complete ++ while (1) { ++ // 0 : Read/write operation complete ++ if(!( sysRegRead(MDIO_PHY_CONTROL_0) & (0x1 << 31))) ++ { ++ break; ++ } ++ else if (time_after(jiffies, t_start + 5*HZ)) { ++ enable_mdio(0); ++ printk("\n MDIO Read operation is ongoing !!\n"); ++ return rc; ++ } ++ } ++ ++ data = (0x01 << 16) | (0x02 << 18) | (phy_addr << 20) | (phy_register << 25); ++ sysRegWrite(MDIO_PHY_CONTROL_0, data); ++ data |= (1<<31); ++ sysRegWrite(MDIO_PHY_CONTROL_0, data); ++ //printk("\n Set Command [0x%08X] to PHY !!\n",MDIO_PHY_CONTROL_0); ++ ++ ++ // make sure read operation is complete ++ t_start = jiffies; ++ while (1) { ++ if (!(sysRegRead(MDIO_PHY_CONTROL_0) & (0x1 << 31))) { ++ status = sysRegRead(MDIO_PHY_CONTROL_0); ++ *read_data = (u32)(status & 0x0000FFFF); ++ ++ enable_mdio(0); ++ return 1; ++ } ++ else if (time_after(jiffies, t_start+5*HZ)) { ++ enable_mdio(0); ++ printk("\n MDIO Read operation is ongoing and Time Out!!\n"); ++ return 0; ++ } ++ } ++} ++ ++u32 __mii_mgr_write(u32 phy_addr, u32 phy_register, u32 write_data) ++{ ++ unsigned long volatile t_start=jiffies; ++ u32 volatile data; ++ ++ enable_mdio(1); ++ ++ // make sure previous write operation is complete ++ while(1) { ++ if (!(sysRegRead(MDIO_PHY_CONTROL_0) & (0x1 << 31))) ++ { ++ break; ++ } ++ else if (time_after(jiffies, t_start + 5 * HZ)) { ++ enable_mdio(0); ++ printk("\n MDIO Write operation ongoing\n"); ++ return 0; ++ } ++ } ++ /*add 1 us delay to make sequencial write more robus*/ ++ udelay(1); ++ ++ data = (0x01 << 16)| (1<<18) | (phy_addr << 20) | (phy_register << 25) | write_data; ++ sysRegWrite(MDIO_PHY_CONTROL_0, data); ++ data |= (1<<31); ++ sysRegWrite(MDIO_PHY_CONTROL_0, data); //start operation ++ //printk("\n Set Command [0x%08X] to PHY !!\n",MDIO_PHY_CONTROL_0); ++ ++ t_start = jiffies; ++ ++ // make sure write operation is complete ++ while (1) { ++ if (!(sysRegRead(MDIO_PHY_CONTROL_0) & (0x1 << 31))) //0 : Read/write operation complete ++ { ++ enable_mdio(0); ++ return 1; ++ } ++ else if (time_after(jiffies, t_start + 5 * HZ)) { ++ enable_mdio(0); ++ printk("\n MDIO Write operation Time Out\n"); ++ return 0; ++ } ++ } ++} ++ ++u32 mii_mgr_read(u32 phy_addr, u32 phy_register, u32 *read_data) ++{ ++ u32 low_word; ++ u32 high_word; ++ if(phy_addr==31) ++ { ++ //phase1: write page address phase ++ if(__mii_mgr_write(phy_addr, 0x1f, ((phy_register >> 6) & 0x3FF))) { ++ //phase2: write address & read low word phase ++ if(__mii_mgr_read(phy_addr, (phy_register >> 2) & 0xF, &low_word)) { ++ //phase3: write address & read high word phase ++ if(__mii_mgr_read(phy_addr, (0x1 << 4), &high_word)) { ++ *read_data = (high_word << 16) | (low_word & 0xFFFF); ++ return 1; ++ } ++ } ++ } ++ } else ++ { ++ if(__mii_mgr_read(phy_addr, phy_register, read_data)) { ++ return 1; ++ } ++ } ++ ++ return 0; ++} ++ ++u32 mii_mgr_write(u32 phy_addr, u32 phy_register, u32 write_data) ++{ ++ if(phy_addr == 31) ++ { ++ //phase1: write page address phase ++ if(__mii_mgr_write(phy_addr, 0x1f, (phy_register >> 6) & 0x3FF)) { ++ //phase2: write address & read low word phase ++ if(__mii_mgr_write(phy_addr, ((phy_register >> 2) & 0xF), write_data & 0xFFFF)) { ++ //phase3: write address & read high word phase ++ if(__mii_mgr_write(phy_addr, (0x1 << 4), write_data >> 16)) { ++ return 1; ++ } ++ } ++ } ++ } else ++ { ++ if(__mii_mgr_write(phy_addr, phy_register, write_data)) { ++ return 1; ++ } ++ } ++ ++ return 0; ++} ++ ++EXPORT_SYMBOL(mii_mgr_write); ++EXPORT_SYMBOL(mii_mgr_read); +--- /dev/null ++++ b/drivers/net/ethernet/raeth/ra2882ethreg.h +@@ -0,0 +1,1268 @@ ++#ifndef RA2882ETHREG_H ++#define RA2882ETHREG_H ++ ++#include // for struct mii_if_info in ra2882ethreg.h ++#include /* check linux version for 2.4 and 2.6 compatibility */ ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ++#include ++#endif ++#include "raether.h" ++ ++#ifdef WORKQUEUE_BH ++#include ++#endif // WORKQUEUE_BH // ++#ifdef CONFIG_RAETH_LRO ++#include ++#endif ++ ++#define MAX_PACKET_SIZE 1514 ++#define MIN_PACKET_SIZE 60 ++ ++#define phys_to_bus(a) (a & 0x1FFFFFFF) ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) ++#define BIT(x) ((1 << x)) ++#endif ++#define ETHER_ADDR_LEN 6 ++ ++/* Phy Vender ID list */ ++ ++#define EV_ICPLUS_PHY_ID0 0x0243 ++#define EV_ICPLUS_PHY_ID1 0x0D90 ++#define EV_MARVELL_PHY_ID0 0x0141 ++#define EV_MARVELL_PHY_ID1 0x0CC2 ++#define EV_VTSS_PHY_ID0 0x0007 ++#define EV_VTSS_PHY_ID1 0x0421 ++ ++/* ++ FE_INT_STATUS ++*/ ++#if defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \ ++ defined (CONFIG_RALINK_MT7620) || defined (CONFIG_RALINK_MT7621) || defined (CONFIG_RALINK_MT7628) ++ ++#define RX_COHERENT BIT(31) ++#define RX_DLY_INT BIT(30) ++#define TX_COHERENT BIT(29) ++#define TX_DLY_INT BIT(28) ++ ++#define RX_DONE_INT1 BIT(17) ++#define RX_DONE_INT0 BIT(16) ++ ++#define TX_DONE_INT3 BIT(3) ++#define TX_DONE_INT2 BIT(2) ++#define TX_DONE_INT1 BIT(1) ++#define TX_DONE_INT0 BIT(0) ++ ++#if defined (CONFIG_RALINK_MT7621) ++#define RLS_COHERENT BIT(29) ++#define RLS_DLY_INT BIT(28) ++#define RLS_DONE_INT BIT(0) ++#endif ++ ++#else ++//#define CNT_PPE_AF BIT(31) ++//#define CNT_GDM_AF BIT(29) ++#define PSE_P2_FC BIT(26) ++#define GDM_CRC_DROP BIT(25) ++#define PSE_BUF_DROP BIT(24) ++#define GDM_OTHER_DROP BIT(23) ++#define PSE_P1_FC BIT(22) ++#define PSE_P0_FC BIT(21) ++#define PSE_FQ_EMPTY BIT(20) ++#define GE1_STA_CHG BIT(18) ++#define TX_COHERENT BIT(17) ++#define RX_COHERENT BIT(16) ++ ++#define TX_DONE_INT3 BIT(11) ++#define TX_DONE_INT2 BIT(10) ++#define TX_DONE_INT1 BIT(9) ++#define TX_DONE_INT0 BIT(8) ++#define RX_DONE_INT1 RX_DONE_INT0 ++#define RX_DONE_INT0 BIT(2) ++#define TX_DLY_INT BIT(1) ++#define RX_DLY_INT BIT(0) ++#endif ++ ++#define FE_INT_ALL (TX_DONE_INT3 | TX_DONE_INT2 | \ ++ TX_DONE_INT1 | TX_DONE_INT0 | \ ++ RX_DONE_INT0 ) ++ ++#if defined (CONFIG_RALINK_MT7621) ++#define QFE_INT_ALL (RLS_DONE_INT | RX_DONE_INT0 | RX_DONE_INT1) ++#define QFE_INT_DLY_INIT (RLS_DLY_INT | RX_DLY_INT) ++ ++#define NUM_QDMA_PAGE 256 ++#define QDMA_PAGE_SIZE 2048 ++#endif ++/* ++ * SW_INT_STATUS ++ */ ++#if defined (CONFIG_RALINK_RT3052) || defined (CONFIG_RALINK_RT3352) || defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628) ++#define PORT0_QUEUE_FULL BIT(14) //port0 queue full ++#define PORT1_QUEUE_FULL BIT(15) //port1 queue full ++#define PORT2_QUEUE_FULL BIT(16) //port2 queue full ++#define PORT3_QUEUE_FULL BIT(17) //port3 queue full ++#define PORT4_QUEUE_FULL BIT(18) //port4 queue full ++#define PORT5_QUEUE_FULL BIT(19) //port5 queue full ++#define PORT6_QUEUE_FULL BIT(20) //port6 queue full ++#define SHARED_QUEUE_FULL BIT(23) //shared queue full ++#define QUEUE_EXHAUSTED BIT(24) //global queue is used up and all packets are dropped ++#define BC_STROM BIT(25) //the device is undergoing broadcast storm ++#define PORT_ST_CHG BIT(26) //Port status change ++#define UNSECURED_ALERT BIT(27) //Intruder alert ++#define ABNORMAL_ALERT BIT(28) //Abnormal ++ ++#define ESW_ISR (RALINK_ETH_SW_BASE + 0x00) ++#define ESW_IMR (RALINK_ETH_SW_BASE + 0x04) ++#define ESW_INT_ALL (PORT_ST_CHG) ++ ++#elif defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \ ++ defined (CONFIG_RALINK_MT7620) ++#define MIB_INT BIT(25) ++#define ACL_INT BIT(24) ++#define P5_LINK_CH BIT(5) ++#define P4_LINK_CH BIT(4) ++#define P3_LINK_CH BIT(3) ++#define P2_LINK_CH BIT(2) ++#define P1_LINK_CH BIT(1) ++#define P0_LINK_CH BIT(0) ++ ++#define RX_GOCT_CNT BIT(4) ++#define RX_GOOD_CNT BIT(6) ++#define TX_GOCT_CNT BIT(17) ++#define TX_GOOD_CNT BIT(19) ++ ++#define MSK_RX_GOCT_CNT BIT(4) ++#define MSK_RX_GOOD_CNT BIT(6) ++#define MSK_TX_GOCT_CNT BIT(17) ++#define MSK_TX_GOOD_CNT BIT(19) ++#define MSK_CNT_INT_ALL (MSK_RX_GOCT_CNT | MSK_RX_GOOD_CNT | MSK_TX_GOCT_CNT | MSK_TX_GOOD_CNT) ++//#define MSK_CNT_INT_ALL (MSK_RX_GOOD_CNT | MSK_TX_GOOD_CNT) ++ ++ ++#define ESW_IMR (RALINK_ETH_SW_BASE + 0x7000 + 0x8) ++#define ESW_ISR (RALINK_ETH_SW_BASE + 0x7000 + 0xC) ++#define ESW_INT_ALL (P0_LINK_CH | P1_LINK_CH | P2_LINK_CH | P3_LINK_CH | P4_LINK_CH | P5_LINK_CH | ACL_INT | MIB_INT) ++#define ESW_AISR (RALINK_ETH_SW_BASE + 0x8) ++#define ESW_P0_IntSn (RALINK_ETH_SW_BASE + 0x4004) ++#define ESW_P1_IntSn (RALINK_ETH_SW_BASE + 0x4104) ++#define ESW_P2_IntSn (RALINK_ETH_SW_BASE + 0x4204) ++#define ESW_P3_IntSn (RALINK_ETH_SW_BASE + 0x4304) ++#define ESW_P4_IntSn (RALINK_ETH_SW_BASE + 0x4404) ++#define ESW_P5_IntSn (RALINK_ETH_SW_BASE + 0x4504) ++#define ESW_P6_IntSn (RALINK_ETH_SW_BASE + 0x4604) ++#define ESW_P0_IntMn (RALINK_ETH_SW_BASE + 0x4008) ++#define ESW_P1_IntMn (RALINK_ETH_SW_BASE + 0x4108) ++#define ESW_P2_IntMn (RALINK_ETH_SW_BASE + 0x4208) ++#define ESW_P3_IntMn (RALINK_ETH_SW_BASE + 0x4308) ++#define ESW_P4_IntMn (RALINK_ETH_SW_BASE + 0x4408) ++#define ESW_P5_IntMn (RALINK_ETH_SW_BASE + 0x4508) ++#define ESW_P6_IntMn (RALINK_ETH_SW_BASE + 0x4608) ++ ++#if defined (CONFIG_RALINK_MT7620) ++#define ESW_P7_IntSn (RALINK_ETH_SW_BASE + 0x4704) ++#define ESW_P7_IntMn (RALINK_ETH_SW_BASE + 0x4708) ++#endif ++ ++ ++#define ESW_PHY_POLLING (RALINK_ETH_SW_BASE + 0x7000) ++ ++#elif defined (CONFIG_RALINK_MT7621) ++ ++#define ESW_PHY_POLLING (RALINK_ETH_SW_BASE + 0x0000) ++ ++#define P5_LINK_CH BIT(5) ++#define P4_LINK_CH BIT(4) ++#define P3_LINK_CH BIT(3) ++#define P2_LINK_CH BIT(2) ++#define P1_LINK_CH BIT(1) ++#define P0_LINK_CH BIT(0) ++ ++ ++#endif // CONFIG_RALINK_RT3052 || CONFIG_RALINK_RT3352 || CONFIG_RALINK_RT5350 || defined (CONFIG_RALINK_MT7628)// ++ ++#define RX_BUF_ALLOC_SIZE 2000 ++#define FASTPATH_HEADROOM 64 ++ ++#define ETHER_BUFFER_ALIGN 32 ///// Align on a cache line ++ ++#define ETHER_ALIGNED_RX_SKB_ADDR(addr) \ ++ ((((unsigned long)(addr) + ETHER_BUFFER_ALIGN - 1) & \ ++ ~(ETHER_BUFFER_ALIGN - 1)) - (unsigned long)(addr)) ++ ++#ifdef CONFIG_PSEUDO_SUPPORT ++typedef struct _PSEUDO_ADAPTER { ++ struct net_device *RaethDev; ++ struct net_device *PseudoDev; ++ struct net_device_stats stat; ++#if defined (CONFIG_ETHTOOL) /*&& defined (CONFIG_RAETH_ROUTER)*/ ++ struct mii_if_info mii_info; ++#endif ++ ++} PSEUDO_ADAPTER, PPSEUDO_ADAPTER; ++ ++#define MAX_PSEUDO_ENTRY 1 ++#endif ++ ++ ++ ++/* Register Categories Definition */ ++#define RAFRAMEENGINE_OFFSET 0x0000 ++#define RAGDMA_OFFSET 0x0020 ++#define RAPSE_OFFSET 0x0040 ++#define RAGDMA2_OFFSET 0x0060 ++#define RACDMA_OFFSET 0x0080 ++#if defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \ ++ defined (CONFIG_RALINK_MT7620) || defined (CONFIG_RALINK_MT7621) || defined (CONFIG_RALINK_MT7628) ++ ++#define RAPDMA_OFFSET 0x0800 ++#define SDM_OFFSET 0x0C00 ++#else ++#define RAPDMA_OFFSET 0x0100 ++#endif ++#define RAPPE_OFFSET 0x0200 ++#define RACMTABLE_OFFSET 0x0400 ++#define RAPOLICYTABLE_OFFSET 0x1000 ++ ++ ++/* Register Map Detail */ ++/* RT3883 */ ++#define SYSCFG1 (RALINK_SYSCTL_BASE + 0x14) ++ ++#if defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628) ++ ++/* 1. PDMA */ ++#define TX_BASE_PTR0 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x000) ++#define TX_MAX_CNT0 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x004) ++#define TX_CTX_IDX0 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x008) ++#define TX_DTX_IDX0 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x00C) ++ ++#define TX_BASE_PTR1 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x010) ++#define TX_MAX_CNT1 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x014) ++#define TX_CTX_IDX1 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x018) ++#define TX_DTX_IDX1 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x01C) ++ ++#define TX_BASE_PTR2 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x020) ++#define TX_MAX_CNT2 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x024) ++#define TX_CTX_IDX2 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x028) ++#define TX_DTX_IDX2 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x02C) ++ ++#define TX_BASE_PTR3 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x030) ++#define TX_MAX_CNT3 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x034) ++#define TX_CTX_IDX3 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x038) ++#define TX_DTX_IDX3 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x03C) ++ ++#define RX_BASE_PTR0 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x100) ++#define RX_MAX_CNT0 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x104) ++#define RX_CALC_IDX0 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x108) ++#define RX_DRX_IDX0 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x10C) ++ ++#define RX_BASE_PTR1 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x110) ++#define RX_MAX_CNT1 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x114) ++#define RX_CALC_IDX1 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x118) ++#define RX_DRX_IDX1 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x11C) ++ ++#define PDMA_INFO (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x200) ++#define PDMA_GLO_CFG (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x204) ++#define PDMA_RST_IDX (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x208) ++#define PDMA_RST_CFG (PDMA_RST_IDX) ++#define DLY_INT_CFG (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x20C) ++#define FREEQ_THRES (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x210) ++#define INT_STATUS (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x220) ++#define FE_INT_STATUS (INT_STATUS) ++#define INT_MASK (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x228) ++#define FE_INT_ENABLE (INT_MASK) ++#define PDMA_WRR (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x280) ++#define PDMA_SCH_CFG (PDMA_WRR) ++ ++#define SDM_CON (RALINK_FRAME_ENGINE_BASE+SDM_OFFSET+0x00) //Switch DMA configuration ++#define SDM_RRING (RALINK_FRAME_ENGINE_BASE+SDM_OFFSET+0x04) //Switch DMA Rx Ring ++#define SDM_TRING (RALINK_FRAME_ENGINE_BASE+SDM_OFFSET+0x08) //Switch DMA Tx Ring ++#define SDM_MAC_ADRL (RALINK_FRAME_ENGINE_BASE+SDM_OFFSET+0x0C) //Switch MAC address LSB ++#define SDM_MAC_ADRH (RALINK_FRAME_ENGINE_BASE+SDM_OFFSET+0x10) //Switch MAC Address MSB ++#define SDM_TPCNT (RALINK_FRAME_ENGINE_BASE+SDM_OFFSET+0x100) //Switch DMA Tx packet count ++#define SDM_TBCNT (RALINK_FRAME_ENGINE_BASE+SDM_OFFSET+0x104) //Switch DMA Tx byte count ++#define SDM_RPCNT (RALINK_FRAME_ENGINE_BASE+SDM_OFFSET+0x108) //Switch DMA rx packet count ++#define SDM_RBCNT (RALINK_FRAME_ENGINE_BASE+SDM_OFFSET+0x10C) //Switch DMA rx byte count ++#define SDM_CS_ERR (RALINK_FRAME_ENGINE_BASE+SDM_OFFSET+0x110) //Switch DMA rx checksum error count ++ ++#elif defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \ ++ defined (CONFIG_RALINK_MT7620) || defined (CONFIG_RALINK_MT7621) ++ ++/* Old FE with New PDMA */ ++#define PDMA_RELATED 0x0800 ++/* 1. PDMA */ ++#define TX_BASE_PTR0 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x000) ++#define TX_MAX_CNT0 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x004) ++#define TX_CTX_IDX0 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x008) ++#define TX_DTX_IDX0 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x00C) ++ ++#define TX_BASE_PTR1 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x010) ++#define TX_MAX_CNT1 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x014) ++#define TX_CTX_IDX1 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x018) ++#define TX_DTX_IDX1 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x01C) ++ ++#define TX_BASE_PTR2 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x020) ++#define TX_MAX_CNT2 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x024) ++#define TX_CTX_IDX2 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x028) ++#define TX_DTX_IDX2 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x02C) ++ ++#define TX_BASE_PTR3 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x030) ++#define TX_MAX_CNT3 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x034) ++#define TX_CTX_IDX3 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x038) ++#define TX_DTX_IDX3 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x03C) ++ ++#define RX_BASE_PTR0 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x100) ++#define RX_MAX_CNT0 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x104) ++#define RX_CALC_IDX0 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x108) ++#define RX_DRX_IDX0 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x10C) ++ ++#define RX_BASE_PTR1 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x110) ++#define RX_MAX_CNT1 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x114) ++#define RX_CALC_IDX1 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x118) ++#define RX_DRX_IDX1 (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x11C) ++ ++#define PDMA_INFO (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x200) ++#define PDMA_GLO_CFG (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x204) ++#define PDMA_RST_IDX (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x208) ++#define PDMA_RST_CFG (PDMA_RST_IDX) ++#define DLY_INT_CFG (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x20C) ++#define FREEQ_THRES (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x210) ++#define INT_STATUS (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x220) ++#define FE_INT_STATUS (INT_STATUS) ++#define INT_MASK (RALINK_FRAME_ENGINE_BASE + PDMA_RELATED+0x228) ++#define FE_INT_ENABLE (INT_MASK) ++#define SCH_Q01_CFG (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x280) ++#define SCH_Q23_CFG (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x284) ++ ++#define FE_GLO_CFG RALINK_FRAME_ENGINE_BASE + 0x00 ++#define FE_RST_GL RALINK_FRAME_ENGINE_BASE + 0x04 ++#define FE_INT_STATUS2 RALINK_FRAME_ENGINE_BASE + 0x08 ++#define FE_INT_ENABLE2 RALINK_FRAME_ENGINE_BASE + 0x0c ++//#define FC_DROP_STA RALINK_FRAME_ENGINE_BASE + 0x18 ++#define FOE_TS_T RALINK_FRAME_ENGINE_BASE + 0x10 ++ ++#if defined (CONFIG_RALINK_MT7620) ++#define GDMA1_RELATED 0x0600 ++#define GDMA1_FWD_CFG (RALINK_FRAME_ENGINE_BASE + GDMA1_RELATED + 0x00) ++#define GDMA1_SHPR_CFG (RALINK_FRAME_ENGINE_BASE + GDMA1_RELATED + 0x04) ++#define GDMA1_MAC_ADRL (RALINK_FRAME_ENGINE_BASE + GDMA1_RELATED + 0x08) ++#define GDMA1_MAC_ADRH (RALINK_FRAME_ENGINE_BASE + GDMA1_RELATED + 0x0C) ++#elif defined (CONFIG_RALINK_MT7621) ++#define GDMA1_RELATED 0x0500 ++#define GDMA1_FWD_CFG (RALINK_FRAME_ENGINE_BASE + GDMA1_RELATED + 0x00) ++#define GDMA1_SHPR_CFG (RALINK_FRAME_ENGINE_BASE + GDMA1_RELATED + 0x04) ++#define GDMA1_MAC_ADRL (RALINK_FRAME_ENGINE_BASE + GDMA1_RELATED + 0x08) ++#define GDMA1_MAC_ADRH (RALINK_FRAME_ENGINE_BASE + GDMA1_RELATED + 0x0C) ++ ++#define GDMA2_RELATED 0x1500 ++#define GDMA2_FWD_CFG (RALINK_FRAME_ENGINE_BASE + GDMA2_RELATED + 0x00) ++#define GDMA2_SHPR_CFG (RALINK_FRAME_ENGINE_BASE + GDMA2_RELATED + 0x04) ++#define GDMA2_MAC_ADRL (RALINK_FRAME_ENGINE_BASE + GDMA2_RELATED + 0x08) ++#define GDMA2_MAC_ADRH (RALINK_FRAME_ENGINE_BASE + GDMA2_RELATED + 0x0C) ++#else ++#define GDMA1_RELATED 0x0020 ++#define GDMA1_FWD_CFG (RALINK_FRAME_ENGINE_BASE + GDMA1_RELATED + 0x00) ++#define GDMA1_SCH_CFG (RALINK_FRAME_ENGINE_BASE + GDMA1_RELATED + 0x04) ++#define GDMA1_SHPR_CFG (RALINK_FRAME_ENGINE_BASE + GDMA1_RELATED + 0x08) ++#define GDMA1_MAC_ADRL (RALINK_FRAME_ENGINE_BASE + GDMA1_RELATED + 0x0C) ++#define GDMA1_MAC_ADRH (RALINK_FRAME_ENGINE_BASE + GDMA1_RELATED + 0x10) ++ ++#define GDMA2_RELATED 0x0060 ++#define GDMA2_FWD_CFG (RALINK_FRAME_ENGINE_BASE + GDMA2_RELATED + 0x00) ++#define GDMA2_SCH_CFG (RALINK_FRAME_ENGINE_BASE + GDMA2_RELATED + 0x04) ++#define GDMA2_SHPR_CFG (RALINK_FRAME_ENGINE_BASE + GDMA2_RELATED + 0x08) ++#define GDMA2_MAC_ADRL (RALINK_FRAME_ENGINE_BASE + GDMA2_RELATED + 0x0C) ++#define GDMA2_MAC_ADRH (RALINK_FRAME_ENGINE_BASE + GDMA2_RELATED + 0x10) ++#endif ++ ++#if defined (CONFIG_RALINK_MT7620) ++#define PSE_RELATED 0x0500 ++#define PSE_FQFC_CFG (RALINK_FRAME_ENGINE_BASE + PSE_RELATED + 0x00) ++#define PSE_IQ_CFG (RALINK_FRAME_ENGINE_BASE + PSE_RELATED + 0x04) ++#define PSE_QUE_STA (RALINK_FRAME_ENGINE_BASE + PSE_RELATED + 0x08) ++#else ++#define PSE_RELATED 0x0040 ++#define PSE_FQ_CFG (RALINK_FRAME_ENGINE_BASE + PSE_RELATED + 0x00) ++#define CDMA_FC_CFG (RALINK_FRAME_ENGINE_BASE + PSE_RELATED + 0x04) ++#define GDMA1_FC_CFG (RALINK_FRAME_ENGINE_BASE + PSE_RELATED + 0x08) ++#define GDMA2_FC_CFG (RALINK_FRAME_ENGINE_BASE + PSE_RELATED + 0x0C) ++#define CDMA_OQ_STA (RALINK_FRAME_ENGINE_BASE + PSE_RELATED + 0x10) ++#define GDMA1_OQ_STA (RALINK_FRAME_ENGINE_BASE + PSE_RELATED + 0x14) ++#define GDMA2_OQ_STA (RALINK_FRAME_ENGINE_BASE + PSE_RELATED + 0x18) ++#define PSE_IQ_STA (RALINK_FRAME_ENGINE_BASE + PSE_RELATED + 0x1C) ++#endif ++ ++ ++#if defined (CONFIG_RALINK_MT7620) ++#define CDMA_RELATED 0x0400 ++#define CDMA_CSG_CFG (RALINK_FRAME_ENGINE_BASE + CDMA_RELATED + 0x00) ++#define SMACCR0 (RALINK_ETH_SW_BASE + 0x3FE4) ++#define SMACCR1 (RALINK_ETH_SW_BASE + 0x3FE8) ++#define CKGCR (RALINK_ETH_SW_BASE + 0x3FF0) ++#elif defined (CONFIG_RALINK_MT7621) ++#define CDMA_RELATED 0x0400 ++#define CDMA_CSG_CFG (RALINK_FRAME_ENGINE_BASE + CDMA_RELATED + 0x00) //fake definition ++#define CDMP_IG_CTRL (RALINK_FRAME_ENGINE_BASE + CDMA_RELATED + 0x00) ++#define CDMP_EG_CTRL (RALINK_FRAME_ENGINE_BASE + CDMA_RELATED + 0x04) ++#else ++#define CDMA_RELATED 0x0080 ++#define CDMA_CSG_CFG (RALINK_FRAME_ENGINE_BASE + CDMA_RELATED + 0x00) ++#define CDMA_SCH_CFG (RALINK_FRAME_ENGINE_BASE + CDMA_RELATED + 0x04) ++#define SMACCR0 (RALINK_ETH_SW_BASE + 0x30E4) ++#define SMACCR1 (RALINK_ETH_SW_BASE + 0x30E8) ++#define CKGCR (RALINK_ETH_SW_BASE + 0x30F0) ++#endif ++ ++#define PDMA_FC_CFG (RALINK_FRAME_ENGINE_BASE+0x100) ++ ++ ++#if defined (CONFIG_RALINK_MT7621) ++/*kurtis: add QDMA define*/ ++ ++#define CLK_CFG_0 (RALINK_SYSCTL_BASE + 0x2C) ++#define PAD_RGMII2_MDIO_CFG (RALINK_SYSCTL_BASE + 0x58) ++ ++#define QDMA_RELATED 0x1800 ++#define QTX_CFG_0 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x000) ++#define QTX_SCH_0 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x004) ++#define QTX_HEAD_0 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x008) ++#define QTX_TAIL_0 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x00C) ++#define QTX_CFG_1 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x010) ++#define QTX_SCH_1 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x014) ++#define QTX_HEAD_1 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x018) ++#define QTX_TAIL_1 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x01C) ++#define QTX_CFG_2 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x020) ++#define QTX_SCH_2 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x024) ++#define QTX_HEAD_2 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x028) ++#define QTX_TAIL_2 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x02C) ++#define QTX_CFG_3 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x030) ++#define QTX_SCH_3 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x034) ++#define QTX_HEAD_3 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x038) ++#define QTX_TAIL_3 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x03C) ++#define QTX_CFG_4 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x040) ++#define QTX_SCH_4 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x044) ++#define QTX_HEAD_4 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x048) ++#define QTX_TAIL_4 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x04C) ++#define QTX_CFG_5 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x050) ++#define QTX_SCH_5 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x054) ++#define QTX_HEAD_5 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x058) ++#define QTX_TAIL_5 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x05C) ++#define QTX_CFG_6 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x060) ++#define QTX_SCH_6 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x064) ++#define QTX_HEAD_6 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x068) ++#define QTX_TAIL_6 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x06C) ++#define QTX_CFG_7 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x070) ++#define QTX_SCH_7 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x074) ++#define QTX_HEAD_7 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x078) ++#define QTX_TAIL_7 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x07C) ++#define QTX_CFG_8 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x080) ++#define QTX_SCH_8 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x084) ++#define QTX_HEAD_8 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x088) ++#define QTX_TAIL_8 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x08C) ++#define QTX_CFG_9 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x090) ++#define QTX_SCH_9 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x094) ++#define QTX_HEAD_9 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x098) ++#define QTX_TAIL_9 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x09C) ++#define QTX_CFG_10 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0A0) ++#define QTX_SCH_10 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0A4) ++#define QTX_HEAD_10 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0A8) ++#define QTX_TAIL_10 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0AC) ++#define QTX_CFG_11 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0B0) ++#define QTX_SCH_11 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0B4) ++#define QTX_HEAD_11 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0B8) ++#define QTX_TAIL_11 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0BC) ++#define QTX_CFG_12 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0C0) ++#define QTX_SCH_12 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0C4) ++#define QTX_HEAD_12 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0C8) ++#define QTX_TAIL_12 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0CC) ++#define QTX_CFG_13 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0D0) ++#define QTX_SCH_13 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0D4) ++#define QTX_HEAD_13 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0D8) ++#define QTX_TAIL_13 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0DC) ++#define QTX_CFG_14 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0E0) ++#define QTX_SCH_14 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0E4) ++#define QTX_HEAD_14 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0E8) ++#define QTX_TAIL_14 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0EC) ++#define QTX_CFG_15 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0F0) ++#define QTX_SCH_15 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0F4) ++#define QTX_HEAD_15 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0F8) ++#define QTX_TAIL_15 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x0FC) ++#define QRX_BASE_PTR_0 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x100) ++#define QRX_MAX_CNT_0 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x104) ++#define QRX_CRX_IDX_0 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x108) ++#define QRX_DRX_IDX_0 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x10C) ++#define QRX_BASE_PTR_1 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x110) ++#define QRX_MAX_CNT_1 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x114) ++#define QRX_CRX_IDX_1 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x118) ++#define QRX_DRX_IDX_1 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x11C) ++#define QDMA_INFO (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x200) ++#define QDMA_GLO_CFG (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x204) ++#define QDMA_RST_IDX (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x208) ++#define QDMA_RST_CFG (QDMA_RST_IDX) ++#define QDMA_DELAY_INT (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x20C) ++#define QDMA_FC_THRES (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x210) ++#define QDMA_TX_SCH (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x214) ++#define QDMA_INT_STS (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x218) ++#define QFE_INT_STATUS (QDMA_INT_STS) ++#define QDMA_INT_MASK (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x21C) ++#define QFE_INT_ENABLE (QDMA_INT_MASK) ++#define QDMA_TRTCM (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x220) ++#define QDMA_DATA0 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x224) ++#define QDMA_DATA1 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x228) ++#define QDMA_RED_THRES (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x22C) ++#define QDMA_TEST (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x230) ++#define QDMA_DMA (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x234) ++#define QDMA_BMU (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x238) ++#define QDMA_HRED1 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x240) ++#define QDMA_HRED2 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x244) ++#define QDMA_SRED1 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x248) ++#define QDMA_SRED2 (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x24C) ++#define QTX_CTX_PTR (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x300) ++#define QTX_DTX_PTR (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x304) ++#define QTX_FWD_CNT (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x308) ++#define QTX_CRX_PTR (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x310) ++#define QTX_DRX_PTR (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x314) ++#define QTX_RLS_CNT (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x318) ++#define QDMA_FQ_HEAD (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x320) ++#define QDMA_FQ_TAIL (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x324) ++#define QDMA_FQ_CNT (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x328) ++#define QDMA_FQ_BLEN (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x32C) ++#define QTX_Q0MIN_BK (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x350) ++#define QTX_Q1MIN_BK (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x354) ++#define QTX_Q2MIN_BK (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x358) ++#define QTX_Q3MIN_BK (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x35C) ++#define QTX_Q0MAX_BK (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x360) ++#define QTX_Q1MAX_BK (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x364) ++#define QTX_Q2MAX_BK (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x368) ++#define QTX_Q3MAX_BK (RALINK_FRAME_ENGINE_BASE + QDMA_RELATED + 0x36C) ++ ++ ++#endif/*MT7621 QDMA*/ ++ ++#else ++ ++/* 1. Frame Engine Global Registers */ ++#define MDIO_ACCESS (RALINK_FRAME_ENGINE_BASE+RAFRAMEENGINE_OFFSET+0x00) ++#define MDIO_CFG (RALINK_FRAME_ENGINE_BASE+RAFRAMEENGINE_OFFSET+0x04) ++#define FE_GLO_CFG (RALINK_FRAME_ENGINE_BASE+RAFRAMEENGINE_OFFSET+0x08) ++#define FE_RST_GL (RALINK_FRAME_ENGINE_BASE+RAFRAMEENGINE_OFFSET+0x0C) ++#define FE_INT_STATUS (RALINK_FRAME_ENGINE_BASE+RAFRAMEENGINE_OFFSET+0x10) ++#define FE_INT_ENABLE (RALINK_FRAME_ENGINE_BASE+RAFRAMEENGINE_OFFSET+0x14) ++#define MDIO_CFG2 (RALINK_FRAME_ENGINE_BASE+RAFRAMEENGINE_OFFSET+0x18) //Original:FC_DROP_STA ++#define FOC_TS_T (RALINK_FRAME_ENGINE_BASE+RAFRAMEENGINE_OFFSET+0x1C) ++ ++ ++/* 2. GDMA Registers */ ++#define GDMA1_FWD_CFG (RALINK_FRAME_ENGINE_BASE+RAGDMA_OFFSET+0x00) ++#define GDMA1_SCH_CFG (RALINK_FRAME_ENGINE_BASE+RAGDMA_OFFSET+0x04) ++#define GDMA1_SHPR_CFG (RALINK_FRAME_ENGINE_BASE+RAGDMA_OFFSET+0x08) ++#define GDMA1_MAC_ADRL (RALINK_FRAME_ENGINE_BASE+RAGDMA_OFFSET+0x0C) ++#define GDMA1_MAC_ADRH (RALINK_FRAME_ENGINE_BASE+RAGDMA_OFFSET+0x10) ++ ++#define GDMA2_FWD_CFG (RALINK_FRAME_ENGINE_BASE+RAGDMA2_OFFSET+0x00) ++#define GDMA2_SCH_CFG (RALINK_FRAME_ENGINE_BASE+RAGDMA2_OFFSET+0x04) ++#define GDMA2_SHPR_CFG (RALINK_FRAME_ENGINE_BASE+RAGDMA2_OFFSET+0x08) ++#define GDMA2_MAC_ADRL (RALINK_FRAME_ENGINE_BASE+RAGDMA2_OFFSET+0x0C) ++#define GDMA2_MAC_ADRH (RALINK_FRAME_ENGINE_BASE+RAGDMA2_OFFSET+0x10) ++ ++/* 3. PSE */ ++#define PSE_FQ_CFG (RALINK_FRAME_ENGINE_BASE+RAPSE_OFFSET+0x00) ++#define CDMA_FC_CFG (RALINK_FRAME_ENGINE_BASE+RAPSE_OFFSET+0x04) ++#define GDMA1_FC_CFG (RALINK_FRAME_ENGINE_BASE+RAPSE_OFFSET+0x08) ++#define GDMA2_FC_CFG (RALINK_FRAME_ENGINE_BASE+RAPSE_OFFSET+0x0C) ++#define PDMA_FC_CFG (RALINK_FRAME_ENGINE_BASE+0x1f0) ++ ++/* 4. CDMA */ ++#define CDMA_CSG_CFG (RALINK_FRAME_ENGINE_BASE+RACDMA_OFFSET+0x00) ++#define CDMA_SCH_CFG (RALINK_FRAME_ENGINE_BASE+RACDMA_OFFSET+0x04) ++/* skip ppoe sid and vlan id definition */ ++ ++ ++/* 5. PDMA */ ++#define PDMA_GLO_CFG (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x00) ++#define PDMA_RST_CFG (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x04) ++#define PDMA_SCH_CFG (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x08) ++ ++#define DLY_INT_CFG (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x0C) ++ ++#define TX_BASE_PTR0 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x10) ++#define TX_MAX_CNT0 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x14) ++#define TX_CTX_IDX0 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x18) ++#define TX_DTX_IDX0 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x1C) ++ ++#define TX_BASE_PTR1 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x20) ++#define TX_MAX_CNT1 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x24) ++#define TX_CTX_IDX1 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x28) ++#define TX_DTX_IDX1 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x2C) ++ ++#define TX_BASE_PTR2 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x40) ++#define TX_MAX_CNT2 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x44) ++#define TX_CTX_IDX2 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x48) ++#define TX_DTX_IDX2 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x4C) ++ ++#define TX_BASE_PTR3 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x50) ++#define TX_MAX_CNT3 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x54) ++#define TX_CTX_IDX3 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x58) ++#define TX_DTX_IDX3 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x5C) ++ ++#define RX_BASE_PTR0 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x30) ++#define RX_MAX_CNT0 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x34) ++#define RX_CALC_IDX0 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x38) ++#define RX_DRX_IDX0 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x3C) ++ ++#define RX_BASE_PTR1 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x40) ++#define RX_MAX_CNT1 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x44) ++#define RX_CALC_IDX1 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x48) ++#define RX_DRX_IDX1 (RALINK_FRAME_ENGINE_BASE+RAPDMA_OFFSET+0x4C) ++ ++#endif ++ ++#define DELAY_INT_INIT 0x84048404 ++#define FE_INT_DLY_INIT (TX_DLY_INT | RX_DLY_INT) ++ ++ ++#if !defined (CONFIG_RALINK_RT5350) && !defined (CONFIG_RALINK_MT7628) ++ ++/* 6. Counter and Meter Table */ ++#define PPE_AC_BCNT0 (RALINK_FRAME_ENGINE_BASE+RACMTABLE_OFFSET+0x000) /* PPE Accounting Group 0 Byte Cnt */ ++#define PPE_AC_PCNT0 (RALINK_FRAME_ENGINE_BASE+RACMTABLE_OFFSET+0x004) /* PPE Accounting Group 0 Packet Cnt */ ++/* 0 ~ 63 */ ++ ++#define PPE_MTR_CNT0 (RALINK_FRAME_ENGINE_BASE+RACMTABLE_OFFSET+0x200) /* 0 ~ 63 */ ++/* skip... */ ++#define PPE_MTR_CNT63 (RALINK_FRAME_ENGINE_BASE+RACMTABLE_OFFSET+0x2FC) ++ ++#define GDMA_TX_GBCNT0 (RALINK_FRAME_ENGINE_BASE+RACMTABLE_OFFSET+0x300) /* Transmit good byte cnt for GEport */ ++#define GDMA_TX_GPCNT0 (RALINK_FRAME_ENGINE_BASE+RACMTABLE_OFFSET+0x304) /* Transmit good pkt cnt for GEport */ ++#define GDMA_TX_SKIPCNT0 (RALINK_FRAME_ENGINE_BASE+RACMTABLE_OFFSET+0x308) /* Transmit skip cnt for GEport */ ++#define GDMA_TX_COLCNT0 (RALINK_FRAME_ENGINE_BASE+RACMTABLE_OFFSET+0x30C) /* Transmit collision cnt for GEport */ ++ ++/* update these address mapping to fit data sheet v0.26, by bobtseng, 2007.6.14 */ ++#define GDMA_RX_GBCNT0 (RALINK_FRAME_ENGINE_BASE+RACMTABLE_OFFSET+0x320) ++#define GDMA_RX_GPCNT0 (RALINK_FRAME_ENGINE_BASE+RACMTABLE_OFFSET+0x324) ++#define GDMA_RX_OERCNT0 (RALINK_FRAME_ENGINE_BASE+RACMTABLE_OFFSET+0x328) ++#define GDMA_RX_FERCNT0 (RALINK_FRAME_ENGINE_BASE+RACMTABLE_OFFSET+0x32C) ++#define GDMA_RX_SERCNT0 (RALINK_FRAME_ENGINE_BASE+RACMTABLE_OFFSET+0x330) ++#define GDMA_RX_LERCNT0 (RALINK_FRAME_ENGINE_BASE+RACMTABLE_OFFSET+0x334) ++#define GDMA_RX_CERCNT0 (RALINK_FRAME_ENGINE_BASE+RACMTABLE_OFFSET+0x338) ++#define GDMA_RX_FCCNT1 (RALINK_FRAME_ENGINE_BASE+RACMTABLE_OFFSET+0x33C) ++ ++#endif ++ ++ ++/* Per Port Packet Counts in RT3052, added by bobtseng 2009.4.17. */ ++#define PORT0_PKCOUNT (0xb01100e8) ++#define PORT1_PKCOUNT (0xb01100ec) ++#define PORT2_PKCOUNT (0xb01100f0) ++#define PORT3_PKCOUNT (0xb01100f4) ++#define PORT4_PKCOUNT (0xb01100f8) ++#define PORT5_PKCOUNT (0xb01100fc) ++ ++ ++// PHYS_TO_K1 ++#define PHYS_TO_K1(physaddr) KSEG1ADDR(physaddr) ++ ++ ++#define sysRegRead(phys) \ ++ (*(volatile unsigned int *)PHYS_TO_K1(phys)) ++ ++#define sysRegWrite(phys, val) \ ++ ((*(volatile unsigned int *)PHYS_TO_K1(phys)) = (val)) ++ ++#define u_long unsigned long ++#define u32 unsigned int ++#define u16 unsigned short ++ ++ ++/* ====================================== */ ++#define GDM1_DISPAD BIT(18) ++#define GDM1_DISCRC BIT(17) ++ ++//GDMA1 uni-cast frames destination port ++#define GDM1_ICS_EN (0x1 << 22) ++#define GDM1_TCS_EN (0x1 << 21) ++#define GDM1_UCS_EN (0x1 << 20) ++#define GDM1_JMB_EN (0x1 << 19) ++#define GDM1_STRPCRC (0x1 << 16) ++#define GDM1_UFRC_P_CPU (0 << 12) ++#if defined (CONFIG_RALINK_MT7621) ++#define GDM1_UFRC_P_PPE (4 << 12) ++#else ++#define GDM1_UFRC_P_PPE (6 << 12) ++#endif ++ ++//GDMA1 broad-cast MAC address frames ++#define GDM1_BFRC_P_CPU (0 << 8) ++#if defined (CONFIG_RALINK_MT7621) ++#define GDM1_BFRC_P_PPE (4 << 8) ++#else ++#define GDM1_BFRC_P_PPE (6 << 8) ++#endif ++ ++//GDMA1 multi-cast MAC address frames ++#define GDM1_MFRC_P_CPU (0 << 4) ++#if defined (CONFIG_RALINK_MT7621) ++#define GDM1_MFRC_P_PPE (4 << 4) ++#else ++#define GDM1_MFRC_P_PPE (6 << 4) ++#endif ++ ++//GDMA1 other MAC address frames destination port ++#define GDM1_OFRC_P_CPU (0 << 0) ++#if defined (CONFIG_RALINK_MT7621) ++#define GDM1_OFRC_P_PPE (4 << 0) ++#else ++#define GDM1_OFRC_P_PPE (6 << 0) ++#endif ++ ++#if defined (CONFIG_RALINK_RT6856) || defined (CONFIG_RALINK_MT7620) || defined (CONFIG_RALINK_MT7621) ++/* checksum generator registers are removed */ ++#define ICS_GEN_EN (0 << 2) ++#define UCS_GEN_EN (0 << 1) ++#define TCS_GEN_EN (0 << 0) ++#else ++#define ICS_GEN_EN (1 << 2) ++#define UCS_GEN_EN (1 << 1) ++#define TCS_GEN_EN (1 << 0) ++#endif ++ ++// MDIO_CFG bit ++#define MDIO_CFG_GP1_FC_TX (1 << 11) ++#define MDIO_CFG_GP1_FC_RX (1 << 10) ++ ++/* ====================================== */ ++/* ====================================== */ ++#define GP1_LNK_DWN BIT(9) ++#define GP1_AN_FAIL BIT(8) ++/* ====================================== */ ++/* ====================================== */ ++#define PSE_RESET BIT(0) ++/* ====================================== */ ++#define PST_DRX_IDX1 BIT(17) ++#define PST_DRX_IDX0 BIT(16) ++#define PST_DTX_IDX3 BIT(3) ++#define PST_DTX_IDX2 BIT(2) ++#define PST_DTX_IDX1 BIT(1) ++#define PST_DTX_IDX0 BIT(0) ++ ++#define RX_2B_OFFSET BIT(31) ++#define DESC_32B_EN BIT(8) ++#define TX_WB_DDONE BIT(6) ++#define RX_DMA_BUSY BIT(3) ++#define TX_DMA_BUSY BIT(1) ++#define RX_DMA_EN BIT(2) ++#define TX_DMA_EN BIT(0) ++ ++#define PDMA_BT_SIZE_4DWORDS (0<<4) ++#define PDMA_BT_SIZE_8DWORDS (1<<4) ++#define PDMA_BT_SIZE_16DWORDS (2<<4) ++#define PDMA_BT_SIZE_32DWORDS (3<<4) ++ ++/* Register bits. ++ */ ++ ++#define MACCFG_RXEN (1<<2) ++#define MACCFG_TXEN (1<<3) ++#define MACCFG_PROMISC (1<<18) ++#define MACCFG_RXMCAST (1<<19) ++#define MACCFG_FDUPLEX (1<<20) ++#define MACCFG_PORTSEL (1<<27) ++#define MACCFG_HBEATDIS (1<<28) ++ ++ ++#define DMACTL_SR (1<<1) /* Start/Stop Receive */ ++#define DMACTL_ST (1<<13) /* Start/Stop Transmission Command */ ++ ++#define DMACFG_SWR (1<<0) /* Software Reset */ ++#define DMACFG_BURST32 (32<<8) ++ ++#define DMASTAT_TS 0x00700000 /* Transmit Process State */ ++#define DMASTAT_RS 0x000e0000 /* Receive Process State */ ++ ++#define MACCFG_INIT 0 //(MACCFG_FDUPLEX) // | MACCFG_PORTSEL) ++ ++ ++ ++/* Descriptor bits. ++ */ ++#define R_OWN 0x80000000 /* Own Bit */ ++#define RD_RER 0x02000000 /* Receive End Of Ring */ ++#define RD_LS 0x00000100 /* Last Descriptor */ ++#define RD_ES 0x00008000 /* Error Summary */ ++#define RD_CHAIN 0x01000000 /* Chained */ ++ ++/* Word 0 */ ++#define T_OWN 0x80000000 /* Own Bit */ ++#define TD_ES 0x00008000 /* Error Summary */ ++ ++/* Word 1 */ ++#define TD_LS 0x40000000 /* Last Segment */ ++#define TD_FS 0x20000000 /* First Segment */ ++#define TD_TER 0x08000000 /* Transmit End Of Ring */ ++#define TD_CHAIN 0x01000000 /* Chained */ ++ ++ ++#define TD_SET 0x08000000 /* Setup Packet */ ++ ++ ++#define POLL_DEMAND 1 ++ ++#define RSTCTL (0x34) ++#define RSTCTL_RSTENET1 (1<<19) ++#define RSTCTL_RSTENET2 (1<<20) ++ ++#define INIT_VALUE_OF_RT2883_PSE_FQ_CFG 0xff908000 ++#define INIT_VALUE_OF_PSE_FQFC_CFG 0x80504000 ++#define INIT_VALUE_OF_FORCE_100_FD 0x1001BC01 ++#define INIT_VALUE_OF_FORCE_1000_FD 0x1F01DC01 ++ ++// Define Whole FE Reset Register ++#define RSTCTRL (RALINK_SYSCTL_BASE + 0x34) ++ ++/*========================================= ++ PDMA RX Descriptor Format define ++=========================================*/ ++ ++//------------------------------------------------- ++typedef struct _PDMA_RXD_INFO1_ PDMA_RXD_INFO1_T; ++ ++struct _PDMA_RXD_INFO1_ ++{ ++ unsigned int PDP0; ++}; ++//------------------------------------------------- ++typedef struct _PDMA_RXD_INFO2_ PDMA_RXD_INFO2_T; ++ ++struct _PDMA_RXD_INFO2_ ++{ ++ unsigned int PLEN1 : 14; ++ unsigned int LS1 : 1; ++ unsigned int TAG : 1; ++ unsigned int PLEN0 : 14; ++ unsigned int LS0 : 1; ++ unsigned int DDONE_bit : 1; ++}; ++//------------------------------------------------- ++typedef struct _PDMA_RXD_INFO3_ PDMA_RXD_INFO3_T; ++ ++struct _PDMA_RXD_INFO3_ ++{ ++ unsigned int VID:16; ++ unsigned int TPID:16; ++}; ++//------------------------------------------------- ++typedef struct _PDMA_RXD_INFO4_ PDMA_RXD_INFO4_T; ++ ++struct _PDMA_RXD_INFO4_ ++{ ++#if defined (CONFIG_RALINK_MT7620) ++ unsigned int FOE_Entry : 14; ++ unsigned int CRSN : 5; ++ unsigned int SPORT : 3; ++ unsigned int L4F : 1; ++ unsigned int L4VLD : 1; ++ unsigned int TACK : 1; ++ unsigned int IP4F : 1; ++ unsigned int IP4 : 1; ++ unsigned int IP6 : 1; ++ unsigned int UN_USE1 : 4; ++#elif defined (CONFIG_RALINK_MT7621) ++ unsigned int FOE_Entry : 14; ++ unsigned int CRSN : 5; ++ unsigned int SP : 4; ++ unsigned int L4F : 1; ++ unsigned int L4VLD : 1; ++ unsigned int TACK : 1; ++ unsigned int IP4F : 1; ++ unsigned int IP4 : 1; ++ unsigned int IP6 : 1; ++ unsigned int UN_USE1 : 3; ++#else ++ unsigned int FOE_Entry : 14; ++ unsigned int FVLD : 1; ++ unsigned int UN_USE1 : 1; ++ unsigned int AI : 8; ++ unsigned int SP : 3; ++ unsigned int AIS : 1; ++ unsigned int L4F : 1; ++ unsigned int IPF : 1; ++ unsigned int L4FVLD_bit : 1; ++ unsigned int IPFVLD_bit : 1; ++#endif ++}; ++ ++ ++struct PDMA_rxdesc { ++ PDMA_RXD_INFO1_T rxd_info1; ++ PDMA_RXD_INFO2_T rxd_info2; ++ PDMA_RXD_INFO3_T rxd_info3; ++ PDMA_RXD_INFO4_T rxd_info4; ++#ifdef CONFIG_32B_DESC ++ unsigned int rxd_info5; ++ unsigned int rxd_info6; ++ unsigned int rxd_info7; ++ unsigned int rxd_info8; ++#endif ++}; ++ ++/*========================================= ++ PDMA TX Descriptor Format define ++=========================================*/ ++//------------------------------------------------- ++typedef struct _PDMA_TXD_INFO1_ PDMA_TXD_INFO1_T; ++ ++struct _PDMA_TXD_INFO1_ ++{ ++ unsigned int SDP0; ++}; ++//------------------------------------------------- ++typedef struct _PDMA_TXD_INFO2_ PDMA_TXD_INFO2_T; ++ ++struct _PDMA_TXD_INFO2_ ++{ ++ unsigned int SDL1 : 14; ++ unsigned int LS1_bit : 1; ++ unsigned int BURST_bit : 1; ++ unsigned int SDL0 : 14; ++ unsigned int LS0_bit : 1; ++ unsigned int DDONE_bit : 1; ++}; ++//------------------------------------------------- ++typedef struct _PDMA_TXD_INFO3_ PDMA_TXD_INFO3_T; ++ ++struct _PDMA_TXD_INFO3_ ++{ ++ unsigned int SDP1; ++}; ++//------------------------------------------------- ++typedef struct _PDMA_TXD_INFO4_ PDMA_TXD_INFO4_T; ++ ++struct _PDMA_TXD_INFO4_ ++{ ++#if defined (CONFIG_RALINK_MT7620) ++ unsigned int VPRI_VIDX : 8; ++ unsigned int SIDX : 4; ++ unsigned int INSP : 1; ++ unsigned int RESV : 2; ++ unsigned int UDF : 5; ++ unsigned int FP_BMAP : 8; ++ unsigned int TSO : 1; ++ unsigned int TUI_CO : 3; ++#elif defined (CONFIG_RALINK_MT7621) ++ unsigned int VLAN_TAG :17; // INSV(1)+VPRI(3)+CFI(1)+VID(12) ++ unsigned int RESV : 2; ++ unsigned int UDF : 6; ++ unsigned int FPORT : 3; ++ unsigned int TSO : 1; ++ unsigned int TUI_CO : 3; ++#else ++ unsigned int VPRI_VIDX : 8; ++ unsigned int SIDX : 4; ++ unsigned int INSP : 1; ++ unsigned int RESV : 1; ++ unsigned int UN_USE3 : 2; ++ unsigned int QN : 3; ++ unsigned int UN_USE2 : 1; ++ unsigned int UDF : 4; ++ unsigned int PN : 3; ++ unsigned int UN_USE1 : 1; ++ unsigned int TSO : 1; ++ unsigned int TUI_CO : 3; ++#endif ++}; ++ ++ ++struct PDMA_txdesc { ++ PDMA_TXD_INFO1_T txd_info1; ++ PDMA_TXD_INFO2_T txd_info2; ++ PDMA_TXD_INFO3_T txd_info3; ++ PDMA_TXD_INFO4_T txd_info4; ++#ifdef CONFIG_32B_DESC ++ unsigned int txd_info5; ++ unsigned int txd_info6; ++ unsigned int txd_info7; ++ unsigned int txd_info8; ++#endif ++}; ++ ++ ++#if defined (CONFIG_RALINK_MT7621) ++/*========================================= ++ QDMA TX Descriptor Format define ++=========================================*/ ++//------------------------------------------------- ++typedef struct _QDMA_TXD_INFO1_ QDMA_TXD_INFO1_T; ++ ++struct _QDMA_TXD_INFO1_ ++{ ++ unsigned int SDP; ++}; ++//------------------------------------------------- ++typedef struct _QDMA_TXD_INFO2_ QDMA_TXD_INFO2_T; ++ ++struct _QDMA_TXD_INFO2_ ++{ ++ unsigned int NDP; ++}; ++//------------------------------------------------- ++typedef struct _QDMA_TXD_INFO3_ QDMA_TXD_INFO3_T; ++ ++struct _QDMA_TXD_INFO3_ ++{ ++ unsigned int QID : 4; ++ unsigned int RESV : 10; ++ unsigned int SWC_bit : 1; ++ unsigned int BURST_bit : 1; ++ unsigned int SDL : 14; ++ unsigned int LS_bit : 1; ++ unsigned int OWN_bit : 1; ++}; ++//------------------------------------------------- ++typedef struct _QDMA_TXD_INFO4_ QDMA_TXD_INFO4_T; ++ ++struct _QDMA_TXD_INFO4_ ++{ ++ unsigned int VLAN_TAG :17; // INSV(1)+VPRI(3)+CFI(1)+VID(12) ++ unsigned int RESV : 2; ++ unsigned int UDF : 6; ++ unsigned int FPORT : 3; ++ unsigned int TSO : 1; ++ unsigned int TUI_CO : 3; ++}; ++ ++ ++struct QDMA_txdesc { ++ QDMA_TXD_INFO1_T txd_info1; ++ QDMA_TXD_INFO2_T txd_info2; ++ QDMA_TXD_INFO3_T txd_info3; ++ QDMA_TXD_INFO4_T txd_info4; ++#ifdef CONFIG_32B_DESC ++ unsigned int txd_info5; ++ unsigned int txd_info6; ++ unsigned int txd_info7; ++ unsigned int txd_info8; ++#endif ++}; ++#endif ++ ++#define phys_to_bus(a) (a & 0x1FFFFFFF) ++ ++#define PHY_Enable_Auto_Nego 0x1000 ++#define PHY_Restart_Auto_Nego 0x0200 ++ ++/* PHY_STAT_REG = 1; */ ++#define PHY_Auto_Neco_Comp 0x0020 ++#define PHY_Link_Status 0x0004 ++ ++/* PHY_AUTO_NEGO_REG = 4; */ ++#define PHY_Cap_10_Half 0x0020 ++#define PHY_Cap_10_Full 0x0040 ++#define PHY_Cap_100_Half 0x0080 ++#define PHY_Cap_100_Full 0x0100 ++ ++/* proc definition */ ++ ++#if !defined (CONFIG_RALINK_RT6855) && !defined(CONFIG_RALINK_RT6855A) && \ ++ !defined (CONFIG_RALINK_MT7620) && !defined (CONFIG_RALINK_MT7621) ++#define CDMA_OQ_STA (RALINK_FRAME_ENGINE_BASE+RAPSE_OFFSET+0x4c) ++#define GDMA1_OQ_STA (RALINK_FRAME_ENGINE_BASE+RAPSE_OFFSET+0x50) ++#define PPE_OQ_STA (RALINK_FRAME_ENGINE_BASE+RAPSE_OFFSET+0x54) ++#define PSE_IQ_STA (RALINK_FRAME_ENGINE_BASE+RAPSE_OFFSET+0x58) ++#endif ++ ++#define PROCREG_CONTROL_FILE "/var/run/procreg_control" ++#if defined (CONFIG_RALINK_RT2880) ++#define PROCREG_DIR "rt2880" ++#elif defined (CONFIG_RALINK_RT3052) ++#define PROCREG_DIR "rt3052" ++#elif defined (CONFIG_RALINK_RT3352) ++#define PROCREG_DIR "rt3352" ++#elif defined (CONFIG_RALINK_RT5350) ++#define PROCREG_DIR "rt5350" ++#elif defined (CONFIG_RALINK_RT2883) ++#define PROCREG_DIR "rt2883" ++#elif defined (CONFIG_RALINK_RT3883) ++#define PROCREG_DIR "rt3883" ++#elif defined (CONFIG_RALINK_RT6855) ++#define PROCREG_DIR "rt6855" ++#elif defined (CONFIG_RALINK_MT7620) ++#define PROCREG_DIR "mt7620" ++#elif defined (CONFIG_RALINK_MT7621) ++#define PROCREG_DIR "mt7621" ++#elif defined (CONFIG_RALINK_MT7628) ++#define PROCREG_DIR "mt7628" ++#elif defined (CONFIG_RALINK_RT6855A) ++#define PROCREG_DIR "rt6855a" ++#else ++#define PROCREG_DIR "rt2880" ++#endif ++#define PROCREG_SKBFREE "skb_free" ++#define PROCREG_TXRING "tx_ring" ++#define PROCREG_RXRING "rx_ring" ++#define PROCREG_NUM_OF_TXD "num_of_txd" ++#define PROCREG_TSO_LEN "tso_len" ++#define PROCREG_LRO_STATS "lro_stats" ++#define PROCREG_GMAC "gmac" ++#define PROCREG_GMAC2 "gmac2" ++#define PROCREG_CP0 "cp0" ++#define PROCREG_RAQOS "qos" ++#define PROCREG_READ_VAL "regread_value" ++#define PROCREG_WRITE_VAL "regwrite_value" ++#define PROCREG_ADDR "reg_addr" ++#define PROCREG_CTL "procreg_control" ++#define PROCREG_RXDONE_INTR "rxdone_intr_count" ++#define PROCREG_ESW_INTR "esw_intr_count" ++#define PROCREG_ESW_CNT "esw_cnt" ++#define PROCREG_SNMP "snmp" ++#if defined (TASKLET_WORKQUEUE_SW) ++#define PROCREG_SCHE "schedule" ++#endif ++#define PROCREG_QDMA "qdma" ++ ++struct rt2880_reg_op_data { ++ char name[64]; ++ unsigned int reg_addr; ++ unsigned int op; ++ unsigned int reg_value; ++}; ++ ++#ifdef CONFIG_RAETH_LRO ++struct lro_counters { ++ u32 lro_aggregated; ++ u32 lro_flushed; ++ u32 lro_no_desc; ++}; ++ ++struct lro_para_struct { ++ unsigned int lan_ip1; ++}; ++ ++#endif // CONFIG_RAETH_LRO // ++ ++ ++ ++ ++typedef struct end_device ++{ ++ ++ unsigned int tx_cpu_owner_idx0; ++ unsigned int rx_cpu_owner_idx0; ++ unsigned int fe_int_status; ++ unsigned int tx_full; ++ ++#if !defined (CONFIG_RAETH_QDMA) ++ unsigned int phy_tx_ring0; ++#else ++ /* QDMA Tx PTR */ ++ struct sk_buff *free_skb[NUM_TX_DESC]; ++ unsigned int tx_dma_ptr; ++ unsigned int tx_cpu_ptr; ++ unsigned int free_txd_num; ++ unsigned int free_txd_head; ++ unsigned int free_txd_tail; ++ struct QDMA_txdesc *txd_pool; ++ dma_addr_t phy_txd_pool; ++// unsigned int phy_txd_pool; ++ unsigned int txd_pool_info[NUM_TX_DESC]; ++#endif ++ ++ unsigned int phy_rx_ring0, phy_rx_ring1; ++ ++#if defined (CONFIG_RALINK_RT3052) || defined (CONFIG_RALINK_RT3352) || \ ++ defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_RT6855) || \ ++ defined(CONFIG_RALINK_RT6855A) || defined (CONFIG_RALINK_MT7620) || \ ++ defined(CONFIG_RALINK_MT7621) || defined (CONFIG_RALINK_MT7628) ++ //send signal to user application to notify link status changed ++ struct work_struct kill_sig_wq; ++#endif ++ ++ struct work_struct reset_task; ++#ifdef WORKQUEUE_BH ++ struct work_struct rx_wq; ++#else ++#if defined (TASKLET_WORKQUEUE_SW) ++ struct work_struct rx_wq; ++#endif ++#endif // WORKQUEUE_BH // ++ ++#if defined(CONFIG_RAETH_QOS) ++ struct sk_buff * skb_free[NUM_TX_RINGS][NUM_TX_DESC]; ++ unsigned int free_idx[NUM_TX_RINGS]; ++#else ++ struct sk_buff* skb_free[NUM_TX_DESC]; ++ unsigned int free_idx; ++#endif ++ ++ struct net_device_stats stat; /* The new statistics table. */ ++ spinlock_t page_lock; /* Page register locks */ ++ struct PDMA_txdesc *tx_ring0; ++#if defined(CONFIG_RAETH_QOS) ++ struct PDMA_txdesc *tx_ring1; ++ struct PDMA_txdesc *tx_ring2; ++ struct PDMA_txdesc *tx_ring3; ++#endif ++ struct PDMA_rxdesc *rx_ring0; ++ struct sk_buff *netrx0_skbuf[NUM_RX_DESC]; ++#if defined (CONFIG_RAETH_MULTIPLE_RX_RING) ++ struct PDMA_rxdesc *rx_ring1; ++ struct sk_buff *netrx1_skbuf[NUM_RX_DESC]; ++#endif ++#ifdef CONFIG_RAETH_NAPI ++ atomic_t irq_sem; ++#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35) ++ struct napi_struct napi; ++#endif ++#endif ++#ifdef CONFIG_PSEUDO_SUPPORT ++ struct net_device *PseudoDev; ++ unsigned int isPseudo; ++#endif ++#if defined (CONFIG_ETHTOOL) /*&& defined (CONFIG_RAETH_ROUTER)*/ ++ struct mii_if_info mii_info; ++#endif ++#ifdef CONFIG_RAETH_LRO ++ struct lro_counters lro_counters; ++ struct net_lro_mgr lro_mgr; ++ struct net_lro_desc lro_arr[8]; ++#endif ++#ifdef CONFIG_RAETH_HW_VLAN_RX ++ struct vlan_group *vlgrp; ++#endif ++} END_DEVICE, *pEND_DEVICE; ++ ++ ++#define RAETH_VERSION "v3.0" ++ ++#endif ++ ++#ifdef CONFIG_RAETH_QDMA ++#define DMA_GLO_CFG QDMA_GLO_CFG ++#define GDMA1_FWD_PORT 0x5555 ++#define GDMA2_FWD_PORT 0x5555 ++#define RAETH_RX_CALC_IDX0 QRX_CRX_IDX_0 ++#define RAETH_RX_CALC_IDX1 QRX_CRX_IDX_1 ++#define RAETH_FE_INT_STATUS QFE_INT_STATUS ++#define RAETH_FE_INT_ALL QFE_INT_ALL ++#define RAETH_FE_INT_ENABLE QFE_INT_ENABLE ++#define RAETH_FE_INT_DLY_INIT QFE_INT_DLY_INIT ++#define RAETH_FE_INT_SETTING RX_DONE_INT0 | RX_DONE_INT1 | RLS_DONE_INT ++#define RAETH_TX_DLY_INT RLS_DLY_INT ++#define RAETH_TX_DONE_INT0 RLS_DONE_INT ++#define RAETH_DLY_INT_CFG QDMA_DELAY_INT ++#else ++#define DMA_GLO_CFG PDMA_GLO_CFG ++#define GDMA1_FWD_PORT 0x0000 ++#define GDMA2_FWD_PORT 0x0000 ++#define RAETH_RX_CALC_IDX0 RX_CALC_IDX0 ++#define RAETH_RX_CALC_IDX1 RX_CALC_IDX1 ++#define RAETH_FE_INT_STATUS FE_INT_STATUS ++#define RAETH_FE_INT_ALL FE_INT_ALL ++#define RAETH_FE_INT_ENABLE FE_INT_ENABLE ++#define RAETH_FE_INT_DLY_INIT FE_INT_DLY_INIT ++#define RAETH_FE_INT_SETTING RX_DONE_INT0 | RX_DONE_INT1 | TX_DONE_INT0 | TX_DONE_INT1 | TX_DONE_INT2 | TX_DONE_INT3 ++#define RAETH_TX_DLY_INT TX_DLY_INT ++#define RAETH_TX_DONE_INT0 TX_DONE_INT0 ++#define RAETH_DLY_INT_CFG DLY_INT_CFG ++#endif +--- /dev/null ++++ b/drivers/net/ethernet/raeth/ra_ioctl.h +@@ -0,0 +1,92 @@ ++#ifndef _RAETH_IOCTL_H ++#define _RAETH_IOCTL_H ++ ++/* ioctl commands */ ++#define RAETH_ESW_REG_READ 0x89F1 ++#define RAETH_ESW_REG_WRITE 0x89F2 ++#define RAETH_MII_READ 0x89F3 ++#define RAETH_MII_WRITE 0x89F4 ++#define RAETH_ESW_INGRESS_RATE 0x89F5 ++#define RAETH_ESW_EGRESS_RATE 0x89F6 ++#define RAETH_ESW_PHY_DUMP 0x89F7 ++#define RAETH_QDMA_REG_READ 0x89F8 ++#define RAETH_QDMA_REG_WRITE 0x89F9 ++#define RAETH_QDMA_QUEUE_MAPPING 0x89FA ++#define RAETH_QDMA_READ_CPU_CLK 0x89FB ++ ++#if defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \ ++ defined (CONFIG_RALINK_MT7620) || defined(CONFIG_RALINK_MT7621) ++ ++#define REG_ESW_WT_MAC_MFC 0x10 ++#define REG_ESW_WT_MAC_ATA1 0x74 ++#define REG_ESW_WT_MAC_ATA2 0x78 ++#define REG_ESW_WT_MAC_ATWD 0x7C ++#define REG_ESW_WT_MAC_ATC 0x80 ++ ++#define REG_ESW_TABLE_TSRA1 0x84 ++#define REG_ESW_TABLE_TSRA2 0x88 ++#define REG_ESW_TABLE_ATRD 0x8C ++ ++ ++#define REG_ESW_VLAN_VTCR 0x90 ++#define REG_ESW_VLAN_VAWD1 0x94 ++#define REG_ESW_VLAN_VAWD2 0x98 ++ ++ ++#define REG_ESW_VLAN_ID_BASE 0x100 ++ ++//#define REG_ESW_VLAN_ID_BASE 0x50 ++#define REG_ESW_VLAN_MEMB_BASE 0x70 ++#define REG_ESW_TABLE_SEARCH 0x24 ++#define REG_ESW_TABLE_STATUS0 0x28 ++#define REG_ESW_TABLE_STATUS1 0x2C ++#define REG_ESW_TABLE_STATUS2 0x30 ++#define REG_ESW_WT_MAC_AD0 0x34 ++#define REG_ESW_WT_MAC_AD1 0x38 ++#define REG_ESW_WT_MAC_AD2 0x3C ++ ++#else ++/* rt3052 embedded ethernet switch registers */ ++#define REG_ESW_VLAN_ID_BASE 0x50 ++#define REG_ESW_VLAN_MEMB_BASE 0x70 ++#define REG_ESW_TABLE_SEARCH 0x24 ++#define REG_ESW_TABLE_STATUS0 0x28 ++#define REG_ESW_TABLE_STATUS1 0x2C ++#define REG_ESW_TABLE_STATUS2 0x30 ++#define REG_ESW_WT_MAC_AD0 0x34 ++#define REG_ESW_WT_MAC_AD1 0x38 ++#define REG_ESW_WT_MAC_AD2 0x3C ++#endif ++ ++ ++#if defined(CONFIG_RALINK_RT3352) || defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_MT7628) ++#define REG_ESW_MAX 0x16C ++#elif defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \ ++ defined (CONFIG_RALINK_MT7620) ++#define REG_ESW_MAX 0x7FFFF ++#else //RT305x, RT3350 ++#define REG_ESW_MAX 0xFC ++#endif ++#define REG_HQOS_MAX 0x3FFF ++ ++ ++typedef struct rt3052_esw_reg { ++ unsigned int off; ++ unsigned int val; ++} esw_reg; ++ ++typedef struct ralink_mii_ioctl_data { ++ __u32 phy_id; ++ __u32 reg_num; ++ __u32 val_in; ++ __u32 val_out; ++} ra_mii_ioctl_data; ++ ++typedef struct rt335x_esw_reg { ++ unsigned int on_off; ++ unsigned int port; ++ unsigned int bw;/*Mbps*/ ++} esw_rate; ++ ++ ++#endif +--- /dev/null ++++ b/drivers/net/ethernet/raeth/ra_mac.c +@@ -0,0 +1,98 @@ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++ ++#include /* for cp0 reg access, added by bobtseng */ ++ ++#include ++#include ++//#include ++ ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++ ++#if defined(CONFIG_USER_SNMPD) ++#include ++#endif ++ ++ ++ ++#include "ra2882ethreg.h" ++#include "raether.h" ++#include "ra_mac.h" ++ ++extern struct net_device *dev_raether; ++ ++ ++void ra2880stop(END_DEVICE *ei_local) ++{ ++ unsigned int regValue; ++ printk("ra2880stop()..."); ++ ++ regValue = sysRegRead(PDMA_GLO_CFG); ++ regValue &= ~(TX_WB_DDONE | RX_DMA_EN | TX_DMA_EN); ++ sysRegWrite(PDMA_GLO_CFG, regValue); ++ printk("-> %s 0x%08x 0x%08x\n", "PDMA_GLO_CFG", PDMA_GLO_CFG, regValue); ++ printk("Done\n"); ++} ++ ++void ei_irq_clear(void) ++{ ++ sysRegWrite(FE_INT_STATUS, 0xFFFFFFFF); ++ printk("-> %s 0x%08x 0x%08x\n", "FE_INT_STATUS", FE_INT_STATUS, 0xFFFFFFFF); ++} ++ ++void rt2880_gmac_hard_reset(void) ++{ ++ sysRegWrite(RSTCTRL, RALINK_FE_RST); ++ printk("-> %s 0x%08x 0x%08x\n", "RSTCTRL", RSTCTRL, RALINK_FE_RST); ++ sysRegWrite(RSTCTRL, 0); ++ printk("-> %s 0x%08x 0x%08x\n", "RSTCTRL", RSTCTRL, 0); ++} ++ ++void ra2880EnableInterrupt() ++{ ++ unsigned int regValue = sysRegRead(FE_INT_ENABLE); ++ sysRegWrite(FE_INT_ENABLE, regValue); ++ printk("-> %s 0x%08x 0x%08x\n", "FE_INT_ENABLE", FE_INT_ENABLE, regValue); ++} ++ ++void ra2880MacAddressSet(unsigned char p[6]) ++{ ++ unsigned long regValue; ++ ++ regValue = (p[0] << 8) | (p[1]); ++ sysRegWrite(GDMA1_MAC_ADRH, regValue); ++ printk("-> %s 0x%08x 0x%08x\n", "GDMA1_MAC_ADRH", GDMA1_MAC_ADRH, regValue); ++ ++ regValue = (p[2] << 24) | (p[3] <<16) | (p[4] << 8) | p[5]; ++ printk("-> %s 0x%08x 0x%08x\n", "GDMA1_MAC_ADRL", GDMA1_MAC_ADRL, regValue); ++ sysRegWrite(GDMA1_MAC_ADRL, regValue); ++ ++ return; ++} ++ ++ +--- /dev/null ++++ b/drivers/net/ethernet/raeth/ra_mac.h +@@ -0,0 +1,35 @@ ++#ifndef RA_MAC_H ++#define RA_MAC_H ++ ++void ra2880stop(END_DEVICE *ei_local); ++void ra2880MacAddressSet(unsigned char p[6]); ++void ra2880Mac2AddressSet(unsigned char p[6]); ++void ethtool_init(struct net_device *dev); ++ ++void ra2880EnableInterrupt(void); ++ ++void dump_qos(void); ++void dump_reg(void); ++void dump_cp0(void); ++ ++int debug_proc_init(void); ++void debug_proc_exit(void); ++ ++#if defined (CONFIG_RALINK_RT6855) || defined(CONFIG_RALINK_RT6855A) || \ ++ defined (CONFIG_RALINK_MT7620) || defined(CONFIG_RALINK_MT7621) ++void enable_auto_negotiate(int unused); ++#else ++void enable_auto_negotiate(int ge); ++#endif ++ ++void rt2880_gmac_hard_reset(void); ++ ++int TsoLenUpdate(int tso_len); ++int NumOfTxdUpdate(int num_of_txd); ++ ++#ifdef CONFIG_RAETH_LRO ++int LroStatsUpdate(struct net_lro_mgr *lro_mgr, bool all_flushed); ++#endif ++int getnext(const char *src, int separator, char *dest); ++int str_to_ip(unsigned int *ip, const char *str); ++#endif +--- /dev/null ++++ b/drivers/net/ethernet/raeth/raether.c +@@ -0,0 +1,693 @@ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include "ra2882ethreg.h" ++#include "raether.h" ++#include "ra_mac.h" ++#include "ra_ioctl.h" ++ ++static int rt2880_eth_recv(struct net_device* dev); ++int reg_dbg = 0; ++ ++void setup_internal_gsw(void); ++ ++#define MAX_RX_LENGTH 1536 ++ ++struct net_device *dev_raether; ++ ++static int rx_dma_owner_idx; ++static int rx_dma_owner_idx0; ++static int pending_recv; ++static struct PDMA_rxdesc *rx_ring; ++static unsigned long tx_ring_full=0; ++ ++#define KSEG1 0xa0000000 ++#define PHYS_TO_VIRT(x) ((void *)((x) | KSEG1)) ++#define VIRT_TO_PHYS(x) ((unsigned long)(x) & ~KSEG1) ++ ++extern int fe_dma_init(struct net_device *dev); ++extern int ei_start_xmit(struct sk_buff* skb, struct net_device *dev, int gmac_no); ++extern void ei_xmit_housekeeping(unsigned long unused); ++extern inline int rt2880_eth_send(struct net_device* dev, struct sk_buff *skb, int gmac_no); ++ ++static int ei_set_mac_addr(struct net_device *dev, void *p) ++{ ++ struct sockaddr *addr = p; ++ ++ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); ++ ++ if(netif_running(dev)) ++ return -EBUSY; ++ ++ ra2880MacAddressSet(addr->sa_data); ++ return 0; ++} ++ ++ ++void set_fe_dma_glo_cfg(void) ++{ ++ int dma_glo_cfg=0; ++ ++ dma_glo_cfg = (TX_WB_DDONE | RX_DMA_EN | TX_DMA_EN | PDMA_BT_SIZE_16DWORDS); ++ ++ dma_glo_cfg |= (RX_2B_OFFSET); ++ ++ sysRegWrite(DMA_GLO_CFG, dma_glo_cfg); ++ if (reg_dbg) printk("-> %s 0x%08x 0x%08x\n", "DMA_GLO_CFG", DMA_GLO_CFG, dma_glo_cfg); ++} ++ ++int forward_config(struct net_device *dev) ++{ ++ unsigned int regVal, regCsg; ++ ++ regVal = sysRegRead(GDMA1_FWD_CFG); ++ regCsg = sysRegRead(CDMA_CSG_CFG); ++ ++ //set unicast/multicast/broadcast frame to cpu ++ regVal &= ~0xFFFF; ++ regVal |= GDMA1_FWD_PORT; ++ regCsg &= ~0x7; ++ ++ //disable ipv4 header checksum check ++ regVal &= ~GDM1_ICS_EN; ++ regCsg &= ~ICS_GEN_EN; ++ ++ //disable tcp checksum check ++ regVal &= ~GDM1_TCS_EN; ++ regCsg &= ~TCS_GEN_EN; ++ ++ //disable udp checksum check ++ regVal &= ~GDM1_UCS_EN; ++ regCsg &= ~UCS_GEN_EN; ++ ++ ++ dev->features &= ~NETIF_F_IP_CSUM; /* disable checksum TCP/UDP over IPv4 */ ++ ++ ++ sysRegWrite(GDMA1_FWD_CFG, regVal); ++ if (reg_dbg) printk("-> %s 0x%08x 0x%08x\n", "GDMA1_FWD_CFG", GDMA1_FWD_CFG, regVal); ++ sysRegWrite(CDMA_CSG_CFG, regCsg); ++ if (reg_dbg) printk("-> %s 0x%08x 0x%08x\n", "CDMA_CSG_CFG", CDMA_CSG_CFG, regCsg); ++ ++ regVal = 0x1; ++ sysRegWrite(FE_RST_GL, regVal); ++ if (reg_dbg) printk("-> %s 0x%08x 0x%08x\n", "FE_RST_GL", FE_RST_GL, regVal); ++ sysRegWrite(FE_RST_GL, 0); // update for RSTCTL issue ++ if (reg_dbg) printk("-> %s 0x%08x 0x%08x\n", "FE_RST_GL", FE_RST_GL, 1); ++ ++ regCsg = sysRegRead(CDMA_CSG_CFG); ++ printk("CDMA_CSG_CFG = %0X\n",regCsg); ++ regVal = sysRegRead(GDMA1_FWD_CFG); ++ printk("GDMA1_FWD_CFG = %0X\n",regVal); ++ ++ return 1; ++} ++ ++ ++static int rt2880_eth_recv(struct net_device* dev) ++{ ++ struct sk_buff *skb, *rx_skb; ++ unsigned int length = 0; ++ unsigned long RxProcessed; ++ ++ ++ int bReschedule = 0; ++ END_DEVICE* ei_local = netdev_priv(dev); ++ ++ ++ ++ RxProcessed = 0; ++ ++ rx_dma_owner_idx0 = (sysRegRead(RAETH_RX_CALC_IDX0) + 1) % NUM_RX_DESC; ++ ++ for ( ; ; ) { ++ ++ if (RxProcessed++ > NUM_RX_MAX_PROCESS) ++ { ++ // need to reschedule rx handle ++ bReschedule = 1; ++ break; ++ } ++ ++ ++ ++ if (ei_local->rx_ring0[rx_dma_owner_idx0].rxd_info2.DDONE_bit == 1) { ++ rx_ring = ei_local->rx_ring0; ++ rx_dma_owner_idx = rx_dma_owner_idx0; ++ } else { ++ break; ++ } ++ ++ /* skb processing */ ++ length = rx_ring[rx_dma_owner_idx].rxd_info2.PLEN0; ++ rx_skb = ei_local->netrx0_skbuf[rx_dma_owner_idx]; ++ rx_skb->data = ei_local->netrx0_skbuf[rx_dma_owner_idx]->data; ++ rx_skb->len = length; ++ ++ rx_skb->data += NET_IP_ALIGN; ++ ++ rx_skb->tail = rx_skb->data + length; ++ ++ rx_skb->dev = dev; ++ rx_skb->protocol = eth_type_trans(rx_skb,dev); ++ ++ rx_skb->ip_summed = CHECKSUM_NONE; ++ ++ ++ /* We have to check the free memory size is big enough ++ * before pass the packet to cpu*/ ++ skb = __dev_alloc_skb(MAX_RX_LENGTH + NET_IP_ALIGN, GFP_ATOMIC); ++ ++ if (unlikely(skb == NULL)) ++ { ++ printk(KERN_ERR "skb not available...\n"); ++ ei_local->stat.rx_dropped++; ++ bReschedule = 1; ++ break; ++ } ++ ++ { ++ netif_rx(rx_skb); ++ } ++ ++ { ++ ei_local->stat.rx_packets++; ++ ei_local->stat.rx_bytes += length; ++ } ++ ++ ++ rx_ring[rx_dma_owner_idx].rxd_info2.PLEN0 = MAX_RX_LENGTH; ++ rx_ring[rx_dma_owner_idx].rxd_info2.LS0 = 0; ++ rx_ring[rx_dma_owner_idx].rxd_info2.DDONE_bit = 0; ++ rx_ring[rx_dma_owner_idx].rxd_info1.PDP0 = dma_map_single(NULL, skb->data, MAX_RX_LENGTH, PCI_DMA_FROMDEVICE); ++ ++ /* Move point to next RXD which wants to alloc*/ ++ sysRegWrite(RAETH_RX_CALC_IDX0, rx_dma_owner_idx); ++ if (reg_dbg) printk("-> %s 0x%08x 0x%08x\n", "RAETH_RX_CALC_IDX0", RAETH_RX_CALC_IDX0, rx_dma_owner_idx); ++ ei_local->netrx0_skbuf[rx_dma_owner_idx] = skb; ++ ++ /* Update to Next packet point that was received. ++ */ ++ rx_dma_owner_idx0 = (sysRegRead(RAETH_RX_CALC_IDX0) + 1) % NUM_RX_DESC; ++ } /* for */ ++ ++ return bReschedule; ++} ++ ++void ei_receive_workq(struct work_struct *work) ++{ ++ struct net_device *dev = dev_raether; ++ END_DEVICE *ei_local = netdev_priv(dev); ++ unsigned long reg_int_mask=0; ++ int bReschedule=0; ++ ++ ++ if(tx_ring_full==0){ ++ bReschedule = rt2880_eth_recv(dev); ++ if(bReschedule) ++ { ++ schedule_work(&ei_local->rx_wq); ++ }else{ ++ reg_int_mask=sysRegRead(RAETH_FE_INT_ENABLE); ++ sysRegWrite(RAETH_FE_INT_ENABLE, reg_int_mask| RX_DLY_INT); ++ if (reg_dbg) printk("-> %s 0x%08x 0x%08lx\n", "RAETH_FE_INT_ENABLE", RAETH_FE_INT_ENABLE, reg_int_mask| RX_DLY_INT); ++ } ++ }else{ ++ schedule_work(&ei_local->rx_wq); ++ } ++} ++ ++ ++static irqreturn_t ei_interrupt(int irq, void *dev_id) ++{ ++ unsigned long reg_int_val; ++ unsigned long reg_int_mask=0; ++ unsigned int recv = 0; ++ unsigned int transmit __maybe_unused = 0; ++ unsigned long flags; ++ ++ struct net_device *dev = (struct net_device *) dev_id; ++ END_DEVICE *ei_local = netdev_priv(dev); ++ ++ if (dev == NULL) ++ { ++ printk (KERN_ERR "net_interrupt(): irq %x for unknown device.\n", IRQ_ENET0); ++ return IRQ_NONE; ++ } ++ ++ ++ spin_lock_irqsave(&(ei_local->page_lock), flags); ++ reg_int_val = sysRegRead(RAETH_FE_INT_STATUS); ++ ++ if((reg_int_val & RX_DLY_INT)) ++ recv = 1; ++ ++ if (reg_int_val & RAETH_TX_DLY_INT) ++ transmit = 1; ++ ++ sysRegWrite(RAETH_FE_INT_STATUS, RAETH_FE_INT_DLY_INIT); ++ if (reg_dbg) printk("-> %s 0x%08x 0x%08lx\n", "RAETH_FE_INT_STATUS", RAETH_FE_INT_STATUS, RAETH_FE_INT_DLY_INIT); ++ ++ ei_xmit_housekeeping(0); ++ ++ if (((recv == 1) || (pending_recv ==1)) && (tx_ring_full==0)) ++ { ++ reg_int_mask = sysRegRead(RAETH_FE_INT_ENABLE); ++ sysRegWrite(RAETH_FE_INT_ENABLE, reg_int_mask & ~(RX_DLY_INT)); ++ if (reg_dbg) printk("-> %s 0x%08x 0x%08lx\n", "RAETH_FE_INT_ENABLE", RAETH_FE_INT_ENABLE, reg_int_mask & ~(RX_DLY_INT)); ++ pending_recv=0; ++ schedule_work(&ei_local->rx_wq); ++ } ++ else if (recv == 1 && tx_ring_full==1) ++ { ++ pending_recv=1; ++ } ++ spin_unlock_irqrestore(&(ei_local->page_lock), flags); ++ ++ return IRQ_HANDLED; ++} ++ ++static void esw_link_status_changed(int port_no, void *dev_id) ++{ ++ unsigned int reg_val; ++ mii_mgr_read(31, (0x3008 + (port_no*0x100)), ®_val); ++ if(reg_val & 0x1) { ++ printk("ESW: Link Status Changed - Port%d Link UP\n", port_no); ++ } else { ++ printk("ESW: Link Status Changed - Port%d Link Down\n", port_no); ++ } ++} ++ ++ ++static irqreturn_t esw_interrupt(int irq, void *dev_id) ++{ ++ unsigned long flags; ++ unsigned int reg_int_val; ++ struct net_device *dev = (struct net_device *) dev_id; ++ END_DEVICE *ei_local = netdev_priv(dev); ++ ++ spin_lock_irqsave(&(ei_local->page_lock), flags); ++ mii_mgr_read(31, 0x700c, ®_int_val); ++ ++ if (reg_int_val & P4_LINK_CH) { ++ esw_link_status_changed(4, dev_id); ++ } ++ ++ if (reg_int_val & P3_LINK_CH) { ++ esw_link_status_changed(3, dev_id); ++ } ++ if (reg_int_val & P2_LINK_CH) { ++ esw_link_status_changed(2, dev_id); ++ } ++ if (reg_int_val & P1_LINK_CH) { ++ esw_link_status_changed(1, dev_id); ++ } ++ if (reg_int_val & P0_LINK_CH) { ++ esw_link_status_changed(0, dev_id); ++ } ++ ++ mii_mgr_write(31, 0x700c, 0x1f); //ack switch link change ++ spin_unlock_irqrestore(&(ei_local->page_lock), flags); ++ return IRQ_HANDLED; ++} ++ ++ ++ ++static int ei_start_xmit_fake(struct sk_buff* skb, struct net_device *dev) ++{ ++ return ei_start_xmit(skb, dev, 1); ++} ++ ++static int ei_change_mtu(struct net_device *dev, int new_mtu) ++{ ++ unsigned long flags; ++ END_DEVICE *ei_local = netdev_priv(dev); // get priv ei_local pointer from net_dev structure ++ ++ if ( ei_local == NULL ) { ++ printk(KERN_EMERG "%s: ei_change_mtu passed a non-existent private pointer from net_dev!\n", dev->name); ++ return -ENXIO; ++ } ++ ++ spin_lock_irqsave(&ei_local->page_lock, flags); ++ ++ if ( (new_mtu > 4096) || (new_mtu < 64)) { ++ spin_unlock_irqrestore(&ei_local->page_lock, flags); ++ return -EINVAL; ++ } ++ ++ if ( new_mtu > 1500 ) { ++ spin_unlock_irqrestore(&ei_local->page_lock, flags); ++ return -EINVAL; ++ } ++ ++ dev->mtu = new_mtu; ++ ++ spin_unlock_irqrestore(&ei_local->page_lock, flags); ++ return 0; ++} ++ ++ ++static const struct net_device_ops ei_netdev_ops = { ++ .ndo_init = rather_probe, ++ .ndo_open = ei_open, ++ .ndo_stop = ei_close, ++ .ndo_start_xmit = ei_start_xmit_fake, ++ .ndo_set_mac_address = eth_mac_addr, ++ .ndo_change_mtu = ei_change_mtu, ++ .ndo_validate_addr = eth_validate_addr, ++}; ++ ++void ra2880_setup_dev_fptable(struct net_device *dev) ++{ ++ RAETH_PRINT(__FUNCTION__ "is called!\n"); ++ ++ dev->netdev_ops = &ei_netdev_ops; ++#define TX_TIMEOUT (5*HZ) ++ dev->watchdog_timeo = TX_TIMEOUT; ++ ++} ++ ++void fe_reset(void) ++{ ++ u32 val; ++ val = sysRegRead(RSTCTRL); ++ ++ val = val | RALINK_FE_RST; ++ sysRegWrite(RSTCTRL, val); ++ if (reg_dbg) printk("-> %s 0x%08x 0x%08x\n", "RSTCTRL", RSTCTRL, val); ++ val = val & ~(RALINK_FE_RST); ++ sysRegWrite(RSTCTRL, val); ++ if (reg_dbg) printk("-> %s 0x%08x 0x%08x\n", "RSTCTRL", RSTCTRL, val); ++} ++ ++void ei_reset_task(struct work_struct *work) ++{ ++ struct net_device *dev = dev_raether; ++ ++ ei_close(dev); ++ ei_open(dev); ++ ++ return; ++} ++ ++void ei_tx_timeout(struct net_device *dev) ++{ ++ END_DEVICE *ei_local = netdev_priv(dev); ++ ++ schedule_work(&ei_local->reset_task); ++} ++ ++int __init rather_probe(struct net_device *dev) ++{ ++ END_DEVICE *ei_local = netdev_priv(dev); ++ struct sockaddr addr; ++ unsigned char mac_addr01234[5] = {0x00, 0x0C, 0x43, 0x28, 0x80}; ++ ++ fe_reset(); ++ net_srandom(jiffies); ++ memcpy(addr.sa_data, mac_addr01234, 5); ++ addr.sa_data[5] = net_random()&0xFF; ++ ei_set_mac_addr(dev, &addr); ++ spin_lock_init(&ei_local->page_lock); ++ ether_setup(dev); ++ ++ return 0; ++} ++ ++ ++int ei_open(struct net_device *dev) ++{ ++ int i, err; ++ unsigned long flags; ++ END_DEVICE *ei_local; ++ ++ ++ if (!try_module_get(THIS_MODULE)) ++ { ++ printk("%s: Cannot reserve module\n", __FUNCTION__); ++ return -1; ++ } ++ printk("Raeth %s (",RAETH_VERSION); ++ printk("Workqueue"); ++ ++ printk(")\n"); ++ ei_local = netdev_priv(dev); // get device pointer from System ++ // unsigned int flags; ++ ++ if (ei_local == NULL) ++ { ++ printk(KERN_EMERG "%s: ei_open passed a non-existent device!\n", dev->name); ++ return -ENXIO; ++ } ++ ++ /* receiving packet buffer allocation - NUM_RX_DESC x MAX_RX_LENGTH */ ++ for ( i = 0; i < NUM_RX_DESC; i++) ++ { ++ ei_local->netrx0_skbuf[i] = dev_alloc_skb(MAX_RX_LENGTH + NET_IP_ALIGN); ++ if (ei_local->netrx0_skbuf[i] == NULL ) { ++ printk("rx skbuff buffer allocation failed!"); ++ } else { ++ } ++ } ++ ++ spin_lock_irqsave(&(ei_local->page_lock), flags); ++ fe_dma_init(dev); ++ fe_sw_init(); //initialize fe and switch register ++ err = request_irq( dev->irq, ei_interrupt, 0, dev->name, dev); // try to fix irq in open ++ if (err) ++ return err; ++ ++ if ( dev->dev_addr != NULL) { ++ ra2880MacAddressSet((void *)(dev->dev_addr)); ++ } else { ++ printk("dev->dev_addr is empty !\n"); ++ } ++ mii_mgr_write(31, 0x7008, 0x1f); //enable switch link change intr ++ err = request_irq(31, esw_interrupt, IRQF_DISABLED, "Ralink_ESW", dev); ++ if (err) ++ return err; ++ ++ sysRegWrite(RAETH_DLY_INT_CFG, DELAY_INT_INIT); ++ if (reg_dbg) printk("-> %s 0x%08x 0x%08x\n", "RAETH_DLY_INT_CFG", RAETH_DLY_INT_CFG, DELAY_INT_INIT); ++ sysRegWrite(RAETH_FE_INT_ENABLE, RAETH_FE_INT_DLY_INIT); ++ if (reg_dbg) printk("-> %s 0x%08x 0x%08lx\n", "RAETH_FE_INT_ENABLE", RAETH_FE_INT_ENABLE, RAETH_FE_INT_DLY_INIT); ++ ++ INIT_WORK(&ei_local->reset_task, ei_reset_task); ++ ++ INIT_WORK(&ei_local->rx_wq, ei_receive_workq); ++ ++ netif_start_queue(dev); ++ ++ ++ spin_unlock_irqrestore(&(ei_local->page_lock), flags); ++ ++ ++ forward_config(dev); ++ return 0; ++} ++ ++int ei_close(struct net_device *dev) ++{ ++ int i; ++ END_DEVICE *ei_local = netdev_priv(dev); // device pointer ++ unsigned long flags; ++ spin_lock_irqsave(&(ei_local->page_lock), flags); ++ ++ cancel_work_sync(&ei_local->reset_task); ++ netif_stop_queue(dev); ++ ra2880stop(ei_local); ++ msleep(10); ++ ++ cancel_work_sync(&ei_local->rx_wq); ++ free_irq(dev->irq, dev); ++ free_irq(31, dev); ++ for ( i = 0; i < NUM_RX_DESC; i++) ++ { ++ if (ei_local->netrx0_skbuf[i] != NULL) { ++ dev_kfree_skb(ei_local->netrx0_skbuf[i]); ++ ei_local->netrx0_skbuf[i] = NULL; ++ } ++ } ++ if (ei_local->tx_ring0 != NULL) { ++ pci_free_consistent(NULL, NUM_TX_DESC*sizeof(struct PDMA_txdesc), ei_local->tx_ring0, ei_local->phy_tx_ring0); ++ } ++ pci_free_consistent(NULL, NUM_RX_DESC*sizeof(struct PDMA_rxdesc), ei_local->rx_ring0, ei_local->phy_rx_ring0); ++ ++ printk("Free TX/RX Ring Memory!\n"); ++ ++// fe_reset(); ++ spin_unlock_irqrestore(&(ei_local->page_lock), flags); ++ ++ module_put(THIS_MODULE); ++ return 0; ++} ++ ++ ++void setup_internal_gsw(void) ++{ ++ u32 i; ++ u32 regValue; ++ ++ /* reduce RGMII2 PAD driving strength */ ++ *(volatile u_long *)(PAD_RGMII2_MDIO_CFG) &= ~(0x3 << 4); ++ ++ //RGMII1=Normal mode ++ *(volatile u_long *)(RALINK_SYSCTL_BASE + 0x60) &= ~(0x1 << 14); ++ ++ //GMAC1= RGMII mode ++ *(volatile u_long *)(SYSCFG1) &= ~(0x3 << 12); ++ ++ //enable MDIO to control MT7530 ++ regValue = le32_to_cpu(*(volatile u_long *)(RALINK_SYSCTL_BASE + 0x60)); ++ regValue &= ~(0x3 << 12); ++ *(volatile u_long *)(RALINK_SYSCTL_BASE + 0x60) = regValue; ++ ++ for(i=0;i<=4;i++) ++ { ++ //turn off PHY ++ mii_mgr_read(i, 0x0 ,®Value); ++ regValue |= (0x1<<11); ++ mii_mgr_write(i, 0x0, regValue); ++ } ++ mii_mgr_write(31, 0x7000, 0x3); //reset switch ++ udelay(10); ++ ++ if(sysRegRead(0xbe00000c)==0x00030101) { ++ sysRegWrite(RALINK_ETH_SW_BASE+0x100, 0x2005e30b);//(GE1, Force 1000M/FD, FC ON) ++ if (reg_dbg) printk("-> %s 0x%08x 0x%08x\n", "RALINK_ETH_SW_BASE+0x100", RALINK_ETH_SW_BASE+0x100, 0x2005e30b); ++ mii_mgr_write(31, 0x3600, 0x5e30b); ++ } else { ++ sysRegWrite(RALINK_ETH_SW_BASE+0x100, 0x2005e33b);//(GE1, Force 1000M/FD, FC ON) ++ if (reg_dbg) printk("-> %s 0x%08x 0x%08x\n", "RALINK_ETH_SW_BASE+0x100", RALINK_ETH_SW_BASE+0x100, 0x2005e33b); ++ mii_mgr_write(31, 0x3600, 0x5e33b); ++ } ++ ++ sysRegWrite(RALINK_ETH_SW_BASE+0x200, 0x00008000);//(GE2, Link down) ++ if (reg_dbg) printk("-> %s 0x%08x 0x%08x\n", "RALINK_ETH_SW_BASE+0x200", RALINK_ETH_SW_BASE+0x200, 0x00008000); ++ ++ //regValue = 0x117ccf; //Enable Port 6, P5 as GMAC5, P5 disable*/ ++ mii_mgr_read(31, 0x7804 ,®Value); ++ regValue &= ~(1<<8); //Enable Port 6 ++ regValue |= (1<<6); //Disable Port 5 ++ regValue |= (1<<13); //Port 5 as GMAC, no Internal PHY ++ ++ regValue |= (1<<16);//change HW-TRAP ++ printk("change HW-TRAP to 0x%x!!!!!!!!!!!!",regValue); ++ mii_mgr_write(31, 0x7804 ,regValue); ++ regValue = *(volatile u_long *)(RALINK_SYSCTL_BASE + 0x10); ++ regValue = (regValue >> 6) & 0x7; ++ if(regValue >= 6) { //25Mhz Xtal ++ /* do nothing */ ++ } else if(regValue >=3) { //40Mhz ++ ++ mii_mgr_write(0, 13, 0x1f); // disable MT7530 core clock ++ mii_mgr_write(0, 14, 0x410); ++ mii_mgr_write(0, 13, 0x401f); ++ mii_mgr_write(0, 14, 0x0); ++ ++ mii_mgr_write(0, 13, 0x1f); // disable MT7530 PLL ++ mii_mgr_write(0, 14, 0x40d); ++ mii_mgr_write(0, 13, 0x401f); ++ mii_mgr_write(0, 14, 0x2020); ++ ++ mii_mgr_write(0, 13, 0x1f); // for MT7530 core clock = 500Mhz ++ mii_mgr_write(0, 14, 0x40e); ++ mii_mgr_write(0, 13, 0x401f); ++ mii_mgr_write(0, 14, 0x119); ++ ++ mii_mgr_write(0, 13, 0x1f); // enable MT7530 PLL ++ mii_mgr_write(0, 14, 0x40d); ++ mii_mgr_write(0, 13, 0x401f); ++ mii_mgr_write(0, 14, 0x2820); ++ ++ udelay(20); //suggest by CD ++ ++ mii_mgr_write(0, 13, 0x1f); // enable MT7530 core clock ++ mii_mgr_write(0, 14, 0x410); ++ mii_mgr_write(0, 13, 0x401f); ++ }else { //20Mhz Xtal ++ ++ /* TODO */ ++ ++ } ++ mii_mgr_write(0, 14, 0x1); /*RGMII*/ ++ ++#if 1 ++ mii_mgr_write(31, 0x7b00, 0x102); //delay setting for 10/1000M ++ mii_mgr_write(31, 0x7b04, 0x14); //delay setting for 10/1000M ++#else ++ mii_mgr_write(31, 0x7b00, 8); // delay setting for 100M ++ mii_mgr_write(31, 0x7b04, 0x14); // for 100M ++#endif ++ /*Tx Driving*/ ++ mii_mgr_write(31, 0x7a54, 0x44); //lower driving ++ mii_mgr_write(31, 0x7a5c, 0x44); //lower driving ++ mii_mgr_write(31, 0x7a64, 0x44); //lower driving ++ mii_mgr_write(31, 0x7a6c, 0x44); //lower driving ++ mii_mgr_write(31, 0x7a74, 0x44); //lower driving ++ mii_mgr_write(31, 0x7a7c, 0x44); //lower driving ++ ++ for(i=0;i<=4;i++) ++ { ++ //turn on PHY ++ mii_mgr_read(i, 0x0 ,®Value); ++ regValue &= ~(0x1<<11); ++ mii_mgr_write(i, 0x0, regValue); ++ } ++ ++ mii_mgr_read(31, 0x7808 ,®Value); ++ regValue |= (3<<16); //Enable INTR ++ mii_mgr_write(31, 0x7808 ,regValue); ++} ++ ++int __init ra2882eth_init(void) ++{ ++ int ret; ++ struct net_device *dev = alloc_etherdev(sizeof(END_DEVICE)); ++ if (!dev) ++ return -ENOMEM; ++ ++ strcpy(dev->name, DEV_NAME); ++ dev->irq = IRQ_ENET0; ++ dev->addr_len = 6; ++ dev->base_addr = RALINK_FRAME_ENGINE_BASE; ++ ++ rather_probe(dev); ++ ra2880_setup_dev_fptable(dev); ++ ++ if ( register_netdev(dev) != 0) { ++ printk(KERN_WARNING " " __FILE__ ": No ethernet port found.\n"); ++ return -ENXIO; ++ } ++ ret = 0; ++ ++ dev_raether = dev; ++ return ret; ++} ++ ++void fe_sw_init(void) ++{ ++ setup_internal_gsw(); ++} ++ ++ ++void ra2882eth_cleanup_module(void) ++{ ++} ++EXPORT_SYMBOL(set_fe_dma_glo_cfg); ++module_init(ra2882eth_init); ++module_exit(ra2882eth_cleanup_module); ++MODULE_LICENSE("GPL"); +--- /dev/null ++++ b/drivers/net/ethernet/raeth/raether.h +@@ -0,0 +1,92 @@ ++#ifndef RA2882ETHEND_H ++#define RA2882ETHEND_H ++ ++#ifdef DSP_VIA_NONCACHEABLE ++#define ESRAM_BASE 0xa0800000 /* 0x0080-0000 ~ 0x00807FFF */ ++#else ++#define ESRAM_BASE 0x80800000 /* 0x0080-0000 ~ 0x00807FFF */ ++#endif ++ ++#define RX_RING_BASE ((int)(ESRAM_BASE + 0x7000)) ++#define TX_RING_BASE ((int)(ESRAM_BASE + 0x7800)) ++ ++#if defined(CONFIG_RALINK_RT2880) ++#define NUM_TX_RINGS 1 ++#else ++#define NUM_TX_RINGS 4 ++#endif ++#ifdef MEMORY_OPTIMIZATION ++#ifdef CONFIG_RAETH_ROUTER ++#define NUM_RX_DESC 128 ++#define NUM_TX_DESC 128 ++#elif CONFIG_RT_3052_ESW ++#define NUM_RX_DESC 64 ++#define NUM_TX_DESC 64 ++#else ++#define NUM_RX_DESC 128 ++#define NUM_TX_DESC 128 ++#endif ++//#define NUM_RX_MAX_PROCESS 32 ++#define NUM_RX_MAX_PROCESS 64 ++#else ++#if defined (CONFIG_RAETH_ROUTER) ++#define NUM_RX_DESC 256 ++#define NUM_TX_DESC 256 ++#elif defined (CONFIG_RT_3052_ESW) ++#define NUM_RX_DESC 256 ++#define NUM_TX_DESC 256 ++#else ++#define NUM_RX_DESC 256 ++#define NUM_TX_DESC 256 ++#endif ++#if defined(CONFIG_RALINK_RT3883) || defined(CONFIG_RALINK_MT7620) ++#define NUM_RX_MAX_PROCESS 2 ++#else ++#define NUM_RX_MAX_PROCESS 16 ++#endif ++#endif ++ ++#define DEV_NAME "eth0" ++#define DEV2_NAME "eth3" ++ ++#if defined (CONFIG_RALINK_RT6855A) || defined (CONFIG_RALINK_MT7621) ++#define GMAC0_OFFSET 0xE000 ++#define GMAC2_OFFSET 0xE006 ++#else ++#define GMAC0_OFFSET 0x28 ++#define GMAC2_OFFSET 0x22 ++#endif ++ ++#if defined(CONFIG_RALINK_RT6855A) ++#define IRQ_ENET0 22 ++#else ++#define IRQ_ENET0 11 /* hardware interrupt #3, defined in RT2880 Soc Design Spec Rev 0.03, pp43 */ ++#endif ++ ++#define FE_INT_STATUS_REG (*(volatile unsigned long *)(FE_INT_STATUS)) ++#define FE_INT_STATUS_CLEAN(reg) (*(volatile unsigned long *)(FE_INT_STATUS)) = reg ++ ++//#define RAETH_DEBUG ++#ifdef RAETH_DEBUG ++#define RAETH_PRINT(fmt, args...) printk(KERN_INFO fmt, ## args) ++#else ++#define RAETH_PRINT(fmt, args...) { } ++#endif ++ ++struct net_device_stats *ra_get_stats(struct net_device *dev); ++ ++void ei_tx_timeout(struct net_device *dev); ++int rather_probe(struct net_device *dev); ++int ei_open(struct net_device *dev); ++int ei_close(struct net_device *dev); ++ ++int ra2882eth_init(void); ++void ra2882eth_cleanup_module(void); ++ ++void ei_xmit_housekeeping(unsigned long data); ++ ++u32 mii_mgr_read(u32 phy_addr, u32 phy_register, u32 *read_data); ++u32 mii_mgr_write(u32 phy_addr, u32 phy_register, u32 write_data); ++void fe_sw_init(void); ++ ++#endif +--- /dev/null ++++ b/drivers/net/ethernet/raeth/raether_pdma.c +@@ -0,0 +1,212 @@ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "ra2882ethreg.h" ++#include "raether.h" ++#include "ra_mac.h" ++ ++#define MAX_RX_LENGTH 1536 ++ ++extern int reg_dbg; ++extern struct net_device *dev_raether; ++static unsigned long tx_ring_full=0; ++ ++#define KSEG1 0xa0000000 ++#define PHYS_TO_VIRT(x) ((void *)((x) | KSEG1)) ++#define VIRT_TO_PHYS(x) ((unsigned long)(x) & ~KSEG1) ++ ++extern void set_fe_dma_glo_cfg(void); ++ ++int fe_dma_init(struct net_device *dev) ++{ ++ ++ int i; ++ unsigned int regVal; ++ END_DEVICE* ei_local = netdev_priv(dev); ++ ++ while(1) ++ { ++ regVal = sysRegRead(PDMA_GLO_CFG); ++ if((regVal & RX_DMA_BUSY)) ++ { ++ printk("\n RX_DMA_BUSY !!! "); ++ continue; ++ } ++ if((regVal & TX_DMA_BUSY)) ++ { ++ printk("\n TX_DMA_BUSY !!! "); ++ continue; ++ } ++ break; ++ } ++ ++ for (i=0;iskb_free[i]=0; ++ } ++ ei_local->free_idx =0; ++ ei_local->tx_ring0 = pci_alloc_consistent(NULL, NUM_TX_DESC * sizeof(struct PDMA_txdesc), &ei_local->phy_tx_ring0); ++ printk("\nphy_tx_ring = 0x%08x, tx_ring = 0x%p\n", ei_local->phy_tx_ring0, ei_local->tx_ring0); ++ ++ for (i=0; i < NUM_TX_DESC; i++) { ++ memset(&ei_local->tx_ring0[i],0,sizeof(struct PDMA_txdesc)); ++ ei_local->tx_ring0[i].txd_info2.LS0_bit = 1; ++ ei_local->tx_ring0[i].txd_info2.DDONE_bit = 1; ++ ++ } ++ ++ /* Initial RX Ring 0*/ ++ ei_local->rx_ring0 = pci_alloc_consistent(NULL, NUM_RX_DESC * sizeof(struct PDMA_rxdesc), &ei_local->phy_rx_ring0); ++ for (i = 0; i < NUM_RX_DESC; i++) { ++ memset(&ei_local->rx_ring0[i],0,sizeof(struct PDMA_rxdesc)); ++ ei_local->rx_ring0[i].rxd_info2.DDONE_bit = 0; ++ ei_local->rx_ring0[i].rxd_info2.LS0 = 0; ++ ei_local->rx_ring0[i].rxd_info2.PLEN0 = MAX_RX_LENGTH; ++ ei_local->rx_ring0[i].rxd_info1.PDP0 = dma_map_single(NULL, ei_local->netrx0_skbuf[i]->data, MAX_RX_LENGTH, PCI_DMA_FROMDEVICE); ++ } ++ printk("\nphy_rx_ring0 = 0x%08x, rx_ring0 = 0x%p\n",ei_local->phy_rx_ring0,ei_local->rx_ring0); ++ ++ ++ regVal = sysRegRead(PDMA_GLO_CFG); ++ regVal &= 0x000000FF; ++ sysRegWrite(PDMA_GLO_CFG, regVal); ++ if (reg_dbg) printk("-> %s 0x%08x 0x%08x\n", "PDMA_GLO_CFG", PDMA_GLO_CFG, regVal); ++ ++ regVal=sysRegRead(PDMA_GLO_CFG); ++ ++ /* Tell the adapter where the TX/RX rings are located. */ ++ sysRegWrite(TX_BASE_PTR0, phys_to_bus((u32) ei_local->phy_tx_ring0)); ++ if (reg_dbg) printk("-> %s 0x%08x 0x%08x\n", "TX_BASE_PTR0", TX_BASE_PTR0, phys_to_bus((u32) ei_local->phy_tx_ring0)); ++ sysRegWrite(TX_MAX_CNT0, cpu_to_le32((u32) NUM_TX_DESC)); ++ if (reg_dbg) printk("-> %s 0x%08x 0x%08x\n", "TX_MAX_CNT0", TX_MAX_CNT0, cpu_to_le32((u32) NUM_TX_DESC)); ++ sysRegWrite(TX_CTX_IDX0, 0); ++ if (reg_dbg) printk("-> %s 0x%08x 0x%08x\n", "TX_CTX_IDX0", TX_CTX_IDX0, 0); ++ sysRegWrite(PDMA_RST_CFG, PST_DTX_IDX0); ++ if (reg_dbg) printk("-> %s 0x%08x 0x%08lx\n", "PDMA_RST_CFG", PDMA_RST_CFG, PST_DTX_IDX0); ++ ++ sysRegWrite(RX_BASE_PTR0, phys_to_bus((u32) ei_local->phy_rx_ring0)); ++ if (reg_dbg) printk("-> %s 0x%08x 0x%08x\n", "RX_BASE_PTR0", RX_BASE_PTR0, phys_to_bus((u32) ei_local->phy_rx_ring0)); ++ sysRegWrite(RX_MAX_CNT0, cpu_to_le32((u32) NUM_RX_DESC)); ++ if (reg_dbg) printk("-> %s 0x%08x 0x%08x\n", "RX_MAX_CNT0", RX_MAX_CNT0, cpu_to_le32((u32) NUM_RX_DESC)); ++ sysRegWrite(RX_CALC_IDX0, cpu_to_le32((u32) (NUM_RX_DESC - 1))); ++ if (reg_dbg) printk("-> %s 0x%08x 0x%08x\n", "RX_CALC_IDX0", RX_CALC_IDX0, cpu_to_le32((u32) (NUM_RX_DESC - 1))); ++ sysRegWrite(PDMA_RST_CFG, PST_DRX_IDX0); ++ if (reg_dbg) printk("-> %s 0x%08x 0x%08lx\n", "PDMA_RST_CFG", PDMA_RST_CFG, PST_DRX_IDX0); ++ ++ set_fe_dma_glo_cfg(); ++ ++ return 1; ++} ++ ++inline int rt2880_eth_send(struct net_device* dev, struct sk_buff *skb, int gmac_no) ++{ ++ unsigned int length=skb->len; ++ END_DEVICE* ei_local = netdev_priv(dev); ++ unsigned long tx_cpu_owner_idx0 = sysRegRead(TX_CTX_IDX0); ++ ++ while(ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.DDONE_bit == 0) ++ { ++ ei_local->stat.tx_errors++; ++ } ++ ++ ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info1.SDP0 = virt_to_phys(skb->data); ++ ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.SDL0 = length; ++ if (gmac_no == 1) { ++ ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.FPORT = 1; ++ }else { ++ ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info4.FPORT = 2; ++ } ++ ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.DDONE_bit = 0; ++ tx_cpu_owner_idx0 = (tx_cpu_owner_idx0+1) % NUM_TX_DESC; ++ while(ei_local->tx_ring0[tx_cpu_owner_idx0].txd_info2.DDONE_bit == 0) ++ { ++ ei_local->stat.tx_errors++; ++ } ++ sysRegWrite(TX_CTX_IDX0, cpu_to_le32((u32)tx_cpu_owner_idx0)); ++ ++ { ++ ei_local->stat.tx_packets++; ++ ei_local->stat.tx_bytes += length; ++ } ++ ++ return length; ++} ++ ++int ei_start_xmit(struct sk_buff* skb, struct net_device *dev, int gmac_no) ++{ ++ END_DEVICE *ei_local = netdev_priv(dev); ++ unsigned long flags; ++ unsigned long tx_cpu_owner_idx; ++ unsigned int tx_cpu_owner_idx_next; ++ unsigned int num_of_txd; ++ unsigned int tx_cpu_owner_idx_next2; ++ ++ dev->trans_start = jiffies; /* save the timestamp */ ++ spin_lock_irqsave(&ei_local->page_lock, flags); ++ dma_cache_sync(NULL, skb->data, skb->len, DMA_TO_DEVICE); ++ ++ tx_cpu_owner_idx = sysRegRead(TX_CTX_IDX0); ++ num_of_txd = 1; ++ tx_cpu_owner_idx_next = (tx_cpu_owner_idx + num_of_txd) % NUM_TX_DESC; ++ ++ if(((ei_local->skb_free[tx_cpu_owner_idx]) ==0) && (ei_local->skb_free[tx_cpu_owner_idx_next]==0)){ ++ rt2880_eth_send(dev, skb, gmac_no); ++ ++ tx_cpu_owner_idx_next2 = (tx_cpu_owner_idx_next + 1) % NUM_TX_DESC; ++ ++ if(ei_local->skb_free[tx_cpu_owner_idx_next2]!=0){ ++ } ++ }else { ++ ei_local->stat.tx_dropped++; ++ kfree_skb(skb); ++ spin_unlock_irqrestore(&ei_local->page_lock, flags); ++ return 0; ++ } ++ ++ ei_local->skb_free[tx_cpu_owner_idx] = skb; ++ spin_unlock_irqrestore(&ei_local->page_lock, flags); ++ return 0; ++} ++ ++void ei_xmit_housekeeping(unsigned long unused) ++{ ++ struct net_device *dev = dev_raether; ++ END_DEVICE *ei_local = netdev_priv(dev); ++ struct PDMA_txdesc *tx_desc; ++ unsigned long skb_free_idx; ++ unsigned long tx_dtx_idx __maybe_unused; ++ unsigned long reg_int_mask=0; ++ ++ tx_dtx_idx = sysRegRead(TX_DTX_IDX0); ++ tx_desc = ei_local->tx_ring0; ++ skb_free_idx = ei_local->free_idx; ++ if ((ei_local->skb_free[skb_free_idx]) != 0 && tx_desc[skb_free_idx].txd_info2.DDONE_bit==1) { ++ while(tx_desc[skb_free_idx].txd_info2.DDONE_bit==1 && (ei_local->skb_free[skb_free_idx])!=0 ){ ++ dev_kfree_skb_any(ei_local->skb_free[skb_free_idx]); ++ ei_local->skb_free[skb_free_idx]=0; ++ skb_free_idx = (skb_free_idx +1) % NUM_TX_DESC; ++ } ++ ++ netif_wake_queue(dev); ++ tx_ring_full=0; ++ ei_local->free_idx = skb_free_idx; ++ } ++ ++ reg_int_mask=sysRegRead(FE_INT_ENABLE); ++ sysRegWrite(FE_INT_ENABLE, reg_int_mask| TX_DLY_INT); ++} ++ ++EXPORT_SYMBOL(ei_start_xmit); ++EXPORT_SYMBOL(ei_xmit_housekeeping); ++EXPORT_SYMBOL(fe_dma_init); ++EXPORT_SYMBOL(rt2880_eth_send); +--- /dev/null ++++ b/drivers/net/ethernet/raeth/raether_qdma.c +@@ -0,0 +1,805 @@ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#if defined (CONFIG_RAETH_TSO) ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#endif ++#include ++#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35) ++#include ++#endif ++ ++#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) ++#include ++#else ++#include ++#endif ++ ++#include "ra2882ethreg.h" ++#include "raether.h" ++#include "ra_mac.h" ++#include "ra_ioctl.h" ++#include "ra_rfrw.h" ++#ifdef CONFIG_RAETH_NETLINK ++#include "ra_netlink.h" ++#endif ++#if defined (CONFIG_RAETH_QOS) ++#include "ra_qos.h" ++#endif ++ ++#if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE) ++#include "../../../net/nat/hw_nat/ra_nat.h" ++#endif ++ ++#if defined (TASKLET_WORKQUEUE_SW) ++int init_schedule; ++int working_schedule; ++#endif ++ ++ ++#if !defined(CONFIG_RA_NAT_NONE) ++/* bruce+ ++ */ ++extern int (*ra_sw_nat_hook_rx)(struct sk_buff *skb); ++extern int (*ra_sw_nat_hook_tx)(struct sk_buff *skb, int gmac_no); ++#endif ++ ++#if defined(CONFIG_RA_CLASSIFIER)||defined(CONFIG_RA_CLASSIFIER_MODULE) ++/* Qwert+ ++ */ ++#include ++extern int (*ra_classifier_hook_tx)(struct sk_buff *skb, unsigned long cur_cycle); ++extern int (*ra_classifier_hook_rx)(struct sk_buff *skb, unsigned long cur_cycle); ++#endif /* CONFIG_RA_CLASSIFIER */ ++ ++#if defined (CONFIG_RALINK_RT3052_MP2) ++int32_t mcast_rx(struct sk_buff * skb); ++int32_t mcast_tx(struct sk_buff * skb); ++#endif ++ ++#ifdef RA_MTD_RW_BY_NUM ++int ra_mtd_read(int num, loff_t from, size_t len, u_char *buf); ++#else ++int ra_mtd_read_nm(char *name, loff_t from, size_t len, u_char *buf); ++#endif ++ ++/* gmac driver feature set config */ ++#if defined (CONFIG_RAETH_NAPI) || defined (CONFIG_RAETH_QOS) ++#undef DELAY_INT ++#else ++#define DELAY_INT 1 ++#endif ++ ++//#define CONFIG_UNH_TEST ++/* end of config */ ++ ++#if defined (CONFIG_RAETH_JUMBOFRAME) ++#define MAX_RX_LENGTH 4096 ++#else ++#define MAX_RX_LENGTH 1536 ++#endif ++ ++extern struct net_device *dev_raether; ++ ++#if defined (CONFIG_RAETH_MULTIPLE_RX_RING) ++static int rx_dma_owner_idx1; ++#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR ++static int rx_calc_idx1; ++#endif ++#endif ++#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR ++static int rx_calc_idx0; ++static unsigned long tx_cpu_owner_idx0=0; ++#endif ++static unsigned long tx_ring_full=0; ++ ++#if defined (CONFIG_ETHTOOL) && defined (CONFIG_RAETH_ROUTER) ++#include "ra_ethtool.h" ++extern struct ethtool_ops ra_ethtool_ops; ++#ifdef CONFIG_PSEUDO_SUPPORT ++extern struct ethtool_ops ra_virt_ethtool_ops; ++#endif // CONFIG_PSEUDO_SUPPORT // ++#endif // (CONFIG_ETHTOOL // ++ ++#ifdef CONFIG_RALINK_VISTA_BASIC ++int is_switch_175c = 1; ++#endif ++ ++//skb->mark to queue mapping table ++extern unsigned int M2Q_table[64]; ++ ++ ++#define KSEG1 0xa0000000 ++#define PHYS_TO_VIRT(x) ((void *)((x) | KSEG1)) ++#define VIRT_TO_PHYS(x) ((unsigned long)(x) & ~KSEG1) ++ ++extern void set_fe_dma_glo_cfg(void); ++ ++ ++/** ++ * ++ * @brief: get the TXD index from its address ++ * ++ * @param: cpu_ptr ++ * ++ * @return: TXD index ++*/ ++ ++static unsigned int GET_TXD_OFFSET(struct QDMA_txdesc **cpu_ptr) ++{ ++ struct net_device *dev = dev_raether; ++ END_DEVICE *ei_local = netdev_priv(dev); ++ int ctx_offset; ++ ctx_offset = (((((u32)*cpu_ptr) <<8)>>8) - ((((u32)ei_local->txd_pool)<<8)>>8))/ sizeof(struct QDMA_txdesc); ++ ctx_offset = (*cpu_ptr - ei_local->txd_pool); ++ ++ return ctx_offset; ++} ++ ++ ++ ++/** ++ * @brief get free TXD from TXD queue ++ * ++ * @param free_txd ++ * ++ * @return ++ */ ++static int get_free_txd(struct QDMA_txdesc **free_txd) ++{ ++ struct net_device *dev = dev_raether; ++ END_DEVICE *ei_local = netdev_priv(dev); ++ unsigned int tmp_idx; ++ ++ if(ei_local->free_txd_num > 0){ ++ tmp_idx = ei_local->free_txd_head; ++ ei_local->free_txd_head = ei_local->txd_pool_info[tmp_idx]; ++ ei_local->free_txd_num -= 1; ++ *free_txd = &ei_local->txd_pool[tmp_idx]; ++ return tmp_idx; ++ }else ++ return NUM_TX_DESC; ++} ++ ++ ++/** ++ * @brief add free TXD into TXD queue ++ * ++ * @param free_txd ++ * ++ * @return ++ */ ++int put_free_txd(int free_txd_idx) ++{ ++ struct net_device *dev = dev_raether; ++ END_DEVICE *ei_local = netdev_priv(dev); ++ ei_local->txd_pool_info[ei_local->free_txd_tail] = free_txd_idx; ++ ei_local->free_txd_tail = free_txd_idx; ++ ei_local->txd_pool_info[free_txd_idx] = NUM_TX_DESC; ++ ei_local->free_txd_num += 1; ++ return 1; ++} ++ ++/*define qdma initial alloc*/ ++/** ++ * @brief ++ * ++ * @param net_dev ++ * ++ * @return 0: fail ++ * 1: success ++ */ ++bool qdma_tx_desc_alloc(void) ++{ ++ struct net_device *dev = dev_raether; ++ END_DEVICE *ei_local = netdev_priv(dev); ++ struct QDMA_txdesc *free_txd = NULL; ++ unsigned int txd_idx; ++ int i = 0; ++ ++ ++ ei_local->txd_pool = pci_alloc_consistent(NULL, sizeof(struct QDMA_txdesc) * NUM_TX_DESC, &ei_local->phy_txd_pool); ++ printk("txd_pool=%p phy_txd_pool=%08X\n", ei_local->txd_pool , ei_local->phy_txd_pool); ++ ++ if (ei_local->txd_pool == NULL) { ++ printk("adapter->txd_pool allocation failed!\n"); ++ return 0; ++ } ++ printk("ei_local->skb_free start address is 0x%p.\n", ei_local->skb_free); ++ //set all txd_pool_info to 0. ++ for ( i = 0; i < NUM_TX_DESC; i++) ++ { ++ ei_local->skb_free[i]= 0; ++ ei_local->txd_pool_info[i] = i + 1; ++ ei_local->txd_pool[i].txd_info3.LS_bit = 1; ++ ei_local->txd_pool[i].txd_info3.OWN_bit = 1; ++ } ++ ++ ei_local->free_txd_head = 0; ++ ei_local->free_txd_tail = NUM_TX_DESC - 1; ++ ei_local->free_txd_num = NUM_TX_DESC; ++ ++ ++ //get free txd from txd pool ++ txd_idx = get_free_txd(&free_txd); ++ if( txd_idx == NUM_TX_DESC) { ++ printk("get_free_txd fail\n"); ++ return 0; ++ } ++ ++ //add null TXD for transmit ++ ei_local->tx_dma_ptr = VIRT_TO_PHYS(free_txd); ++ ei_local->tx_cpu_ptr = VIRT_TO_PHYS(free_txd); ++ sysRegWrite(QTX_CTX_PTR, ei_local->tx_cpu_ptr); ++ sysRegWrite(QTX_DTX_PTR, ei_local->tx_dma_ptr); ++ ++ //get free txd from txd pool ++ ++ txd_idx = get_free_txd(&free_txd); ++ if( txd_idx == NUM_TX_DESC) { ++ printk("get_free_txd fail\n"); ++ return 0; ++ } ++ // add null TXD for release ++ sysRegWrite(QTX_CRX_PTR, VIRT_TO_PHYS(free_txd)); ++ sysRegWrite(QTX_DRX_PTR, VIRT_TO_PHYS(free_txd)); ++ ++ printk("free_txd: %p, ei_local->cpu_ptr: %08X\n", free_txd, ei_local->tx_cpu_ptr); ++ ++ printk(" POOL HEAD_PTR | DMA_PTR | CPU_PTR \n"); ++ printk("----------------+---------+--------\n"); ++#if 1 ++ printk(" 0x%p 0x%08X 0x%08X\n",ei_local->txd_pool, ++ ei_local->tx_dma_ptr, ei_local->tx_cpu_ptr); ++#endif ++ return 1; ++} ++ ++bool fq_qdma_init(void) ++{ ++ struct QDMA_txdesc *free_head = NULL; ++ unsigned int free_head_phy; ++ unsigned int free_tail_phy; ++ unsigned int *free_page_head = NULL; ++ unsigned int free_page_head_phy; ++ int i; ++ ++ free_head = pci_alloc_consistent(NULL, NUM_QDMA_PAGE * sizeof(struct QDMA_txdesc), &free_head_phy); ++ if (unlikely(free_head == NULL)){ ++ printk(KERN_ERR "QDMA FQ decriptor not available...\n"); ++ return 0; ++ } ++ memset(free_head, 0x0, sizeof(struct QDMA_txdesc) * NUM_QDMA_PAGE); ++ ++ free_page_head = pci_alloc_consistent(NULL, NUM_QDMA_PAGE * QDMA_PAGE_SIZE, &free_page_head_phy); ++ if (unlikely(free_page_head == NULL)){ ++ printk(KERN_ERR "QDMA FQ pager not available...\n"); ++ return 0; ++ } ++ for (i=0; i < NUM_QDMA_PAGE; i++) { ++ free_head[i].txd_info1.SDP = (free_page_head_phy + (i * QDMA_PAGE_SIZE)); ++ if(i < (NUM_QDMA_PAGE-1)){ ++ free_head[i].txd_info2.NDP = (free_head_phy + ((i+1) * sizeof(struct QDMA_txdesc))); ++ ++ ++#if 0 ++ printk("free_head_phy[%d] is 0x%x!!!\n",i, VIRT_TO_PHYS(&free_head[i]) ); ++ printk("free_head[%d] is 0x%x!!!\n",i, &free_head[i] ); ++ printk("free_head[%d].txd_info1.SDP is 0x%x!!!\n",i, free_head[i].txd_info1.SDP ); ++ printk("free_head[%d].txd_info2.NDP is 0x%x!!!\n",i, free_head[i].txd_info2.NDP ); ++#endif ++ } ++ free_head[i].txd_info3.SDL = QDMA_PAGE_SIZE; ++ ++ } ++ free_tail_phy = (free_head_phy + (u32)((NUM_QDMA_PAGE-1) * sizeof(struct QDMA_txdesc))); ++ ++ printk("free_head_phy is 0x%x!!!\n", free_head_phy); ++ printk("free_tail_phy is 0x%x!!!\n", free_tail_phy); ++ sysRegWrite(QDMA_FQ_HEAD, (u32)free_head_phy); ++ sysRegWrite(QDMA_FQ_TAIL, (u32)free_tail_phy); ++ sysRegWrite(QDMA_FQ_CNT, ((NUM_TX_DESC << 16) | NUM_QDMA_PAGE)); ++ sysRegWrite(QDMA_FQ_BLEN, QDMA_PAGE_SIZE << 16); ++ return 1; ++} ++ ++int fe_dma_init(struct net_device *dev) ++{ ++ ++ int i; ++ unsigned int regVal; ++ END_DEVICE* ei_local = netdev_priv(dev); ++ ++ fq_qdma_init(); ++ ++ while(1) ++ { ++ regVal = sysRegRead(QDMA_GLO_CFG); ++ if((regVal & RX_DMA_BUSY)) ++ { ++ printk("\n RX_DMA_BUSY !!! "); ++ continue; ++ } ++ if((regVal & TX_DMA_BUSY)) ++ { ++ printk("\n TX_DMA_BUSY !!! "); ++ continue; ++ } ++ break; ++ } ++ /*tx desc alloc, add a NULL TXD to HW*/ ++ ++ qdma_tx_desc_alloc(); ++ ++ ++ /* Initial RX Ring 0*/ ++#ifdef CONFIG_32B_DESC ++ ei_local->rx_ring0 = kmalloc(NUM_RX_DESC * sizeof(struct PDMA_rxdesc), GFP_KERNEL); ++ ei_local->phy_rx_ring0 = virt_to_phys(ei_local->rx_ring0); ++#else ++ ei_local->rx_ring0 = pci_alloc_consistent(NULL, NUM_RX_DESC * sizeof(struct PDMA_rxdesc), &ei_local->phy_rx_ring0); ++#endif ++ for (i = 0; i < NUM_RX_DESC; i++) { ++ memset(&ei_local->rx_ring0[i],0,sizeof(struct PDMA_rxdesc)); ++ ei_local->rx_ring0[i].rxd_info2.DDONE_bit = 0; ++#if defined (CONFIG_RAETH_SCATTER_GATHER_RX_DMA) ++ ei_local->rx_ring0[i].rxd_info2.LS0 = 0; ++ ei_local->rx_ring0[i].rxd_info2.PLEN0 = MAX_RX_LENGTH; ++#else ++ ei_local->rx_ring0[i].rxd_info2.LS0 = 1; ++#endif ++ ei_local->rx_ring0[i].rxd_info1.PDP0 = dma_map_single(NULL, ei_local->netrx0_skbuf[i]->data, MAX_RX_LENGTH, PCI_DMA_FROMDEVICE); ++ } ++ printk("\nphy_rx_ring0 = 0x%08x, rx_ring0 = 0x%p\n",ei_local->phy_rx_ring0,ei_local->rx_ring0); ++ ++#if defined (CONFIG_RAETH_MULTIPLE_RX_RING) ++ /* Initial RX Ring 1*/ ++#ifdef CONFIG_32B_DESC ++ ei_local->rx_ring1 = kmalloc(NUM_RX_DESC * sizeof(struct PDMA_rxdesc), GFP_KERNEL); ++ ei_local->phy_rx_ring1 = virt_to_phys(ei_local->rx_ring1); ++#else ++ ei_local->rx_ring1 = pci_alloc_consistent(NULL, NUM_RX_DESC * sizeof(struct PDMA_rxdesc), &ei_local->phy_rx_ring1); ++#endif ++ for (i = 0; i < NUM_RX_DESC; i++) { ++ memset(&ei_local->rx_ring1[i],0,sizeof(struct PDMA_rxdesc)); ++ ei_local->rx_ring1[i].rxd_info2.DDONE_bit = 0; ++#if defined (CONFIG_RAETH_SCATTER_GATHER_RX_DMA) ++ ei_local->rx_ring0[i].rxd_info2.LS0 = 0; ++ ei_local->rx_ring0[i].rxd_info2.PLEN0 = MAX_RX_LENGTH; ++#else ++ ei_local->rx_ring1[i].rxd_info2.LS0 = 1; ++#endif ++ ei_local->rx_ring1[i].rxd_info1.PDP0 = dma_map_single(NULL, ei_local->netrx1_skbuf[i]->data, MAX_RX_LENGTH, PCI_DMA_FROMDEVICE); ++ } ++ printk("\nphy_rx_ring1 = 0x%08x, rx_ring1 = 0x%p\n",ei_local->phy_rx_ring1,ei_local->rx_ring1); ++#endif ++ ++ regVal = sysRegRead(QDMA_GLO_CFG); ++ regVal &= 0x000000FF; ++ sysRegWrite(QDMA_GLO_CFG, regVal); ++ regVal=sysRegRead(QDMA_GLO_CFG); ++ ++ /* Tell the adapter where the TX/RX rings are located. */ ++ ++ sysRegWrite(QRX_BASE_PTR_0, phys_to_bus((u32) ei_local->phy_rx_ring0)); ++ sysRegWrite(QRX_MAX_CNT_0, cpu_to_le32((u32) NUM_RX_DESC)); ++ sysRegWrite(QRX_CRX_IDX_0, cpu_to_le32((u32) (NUM_RX_DESC - 1))); ++#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR ++ rx_calc_idx0 = rx_dma_owner_idx0 = sysRegRead(QRX_CRX_IDX_0); ++#endif ++ sysRegWrite(QDMA_RST_CFG, PST_DRX_IDX0); ++#if defined (CONFIG_RAETH_MULTIPLE_RX_RING) ++ sysRegWrite(QRX_BASE_PTR_1, phys_to_bus((u32) ei_local->phy_rx_ring1)); ++ sysRegWrite(QRX_MAX_CNT_1, cpu_to_le32((u32) NUM_RX_DESC)); ++ sysRegWrite(QRX_CRX_IDX_1, cpu_to_le32((u32) (NUM_RX_DESC - 1))); ++#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR ++ rx_calc_idx1 = rx_dma_owner_idx1 = sysRegRead(QRX_CRX_IDX_1); ++#endif ++ sysRegWrite(QDMA_RST_CFG, PST_DRX_IDX1); ++#endif ++ ++ set_fe_dma_glo_cfg(); ++ ++ return 1; ++} ++ ++inline int rt2880_eth_send(struct net_device* dev, struct sk_buff *skb, int gmac_no) ++{ ++ unsigned int length=skb->len; ++ END_DEVICE* ei_local = netdev_priv(dev); ++ ++ struct QDMA_txdesc *cpu_ptr; ++ ++ struct QDMA_txdesc *dma_ptr __maybe_unused; ++ struct QDMA_txdesc *free_txd; ++ int ctx_offset; ++#if defined (CONFIG_RAETH_TSO) ++ struct iphdr *iph = NULL; ++ struct QDMA_txdesc *init_cpu_ptr; ++ struct tcphdr *th = NULL; ++ struct skb_frag_struct *frag; ++ unsigned int nr_frags = skb_shinfo(skb)->nr_frags; ++ int i=0; ++ int init_txd_idx; ++#endif // CONFIG_RAETH_TSO // ++ ++#if defined (CONFIG_RAETH_TSOV6) ++ struct ipv6hdr *ip6h = NULL; ++#endif ++ ++#ifdef CONFIG_PSEUDO_SUPPORT ++ PSEUDO_ADAPTER *pAd; ++#endif ++ cpu_ptr = PHYS_TO_VIRT(ei_local->tx_cpu_ptr); ++ dma_ptr = PHYS_TO_VIRT(ei_local->tx_dma_ptr); ++ ctx_offset = GET_TXD_OFFSET(&cpu_ptr); ++ ei_local->skb_free[ctx_offset] = skb; ++#if defined (CONFIG_RAETH_TSO) ++ init_cpu_ptr = cpu_ptr; ++ init_txd_idx = ctx_offset; ++#endif ++ ++#if !defined (CONFIG_RAETH_TSO) ++ ++ //2. prepare data ++ cpu_ptr->txd_info1.SDP = VIRT_TO_PHYS(skb->data); ++ cpu_ptr->txd_info3.SDL = skb->len; ++ ++ if (gmac_no == 1) { ++ cpu_ptr->txd_info4.FPORT = 1; ++ }else { ++ cpu_ptr->txd_info4.FPORT = 2; ++ } ++ ++ ++ cpu_ptr->txd_info3.QID = M2Q_table[skb->mark]; ++#if 0 ++ iph = (struct iphdr *)skb_network_header(skb); ++ if (iph->tos == 0xe0) ++ cpu_ptr->txd_info3.QID = 3; ++ else if (iph->tos == 0xa0) ++ cpu_ptr->txd_info3.QID = 2; ++ else if (iph->tos == 0x20) ++ cpu_ptr->txd_info3.QID = 1; ++ else ++ cpu_ptr->txd_info3.QID = 0; ++#endif ++ ++#if defined (CONFIG_RAETH_CHECKSUM_OFFLOAD) && ! defined(CONFIG_RALINK_RT5350) && !defined (CONFIG_RALINK_MT7628) ++ if (skb->ip_summed == CHECKSUM_PARTIAL){ ++ cpu_ptr->txd_info4.TUI_CO = 7; ++ }else { ++ cpu_ptr->txd_info4.TUI_CO = 0; ++ } ++#endif ++ ++#ifdef CONFIG_RAETH_HW_VLAN_TX ++ if(vlan_tx_tag_present(skb)) { ++ cpu_ptr->txd_info4.VLAN_TAG = 0x10000 | vlan_tx_tag_get(skb); ++ }else { ++ cpu_ptr->txd_info4.VLAN_TAG = 0; ++ } ++#endif ++ ++#if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE) ++ if(FOE_MAGIC_TAG(skb) == FOE_MAGIC_PPE) { ++ if(ra_sw_nat_hook_rx!= NULL){ ++ cpu_ptr->txd_info4.FPORT = 4; /* PPE */ ++ FOE_MAGIC_TAG(skb) = 0; ++ } ++ } ++#endif ++#if 0 ++ cpu_ptr->txd_info4.FPORT = 4; /* PPE */ ++ cpu_ptr->txd_info4.UDF = 0x2F; ++#endif ++ ++ dma_cache_sync(NULL, skb->data, skb->len, DMA_TO_DEVICE); ++ cpu_ptr->txd_info3.SWC_bit = 1; ++ ++ //3. get NULL TXD and decrease free_tx_num by 1. ++ ctx_offset = get_free_txd(&free_txd); ++ if(ctx_offset == NUM_TX_DESC) { ++ printk("get_free_txd fail\n"); // this should not happen. free_txd_num is 2 at least. ++ return 0; ++ } ++ ++ //4. hook new TXD in the end of queue ++ cpu_ptr->txd_info2.NDP = VIRT_TO_PHYS(free_txd); ++ ++ ++ //5. move CPU_PTR to new TXD ++ ei_local->tx_cpu_ptr = VIRT_TO_PHYS(free_txd); ++ cpu_ptr->txd_info3.OWN_bit = 0; ++ sysRegWrite(QTX_CTX_PTR, ei_local->tx_cpu_ptr); ++ ++#if 0 ++ printk("----------------------------------------------\n"); ++ printk("txd_info1:%08X \n",*(int *)&cpu_ptr->txd_info1); ++ printk("txd_info2:%08X \n",*(int *)&cpu_ptr->txd_info2); ++ printk("txd_info3:%08X \n",*(int *)&cpu_ptr->txd_info3); ++ printk("txd_info4:%08X \n",*(int *)&cpu_ptr->txd_info4); ++#endif ++ ++#else //#if !defined (CONFIG_RAETH_TSO) ++ cpu_ptr->txd_info1.SDP = VIRT_TO_PHYS(skb->data); ++ cpu_ptr->txd_info3.SDL = (length - skb->data_len); ++ cpu_ptr->txd_info3.LS_bit = nr_frags ? 0:1; ++ if (gmac_no == 1) { ++ cpu_ptr->txd_info4.FPORT = 1; ++ }else { ++ cpu_ptr->txd_info4.FPORT = 2; ++ } ++ ++ cpu_ptr->txd_info4.TSO = 0; ++ cpu_ptr->txd_info3.QID = M2Q_table[skb->mark]; ++#if defined (CONFIG_RAETH_CHECKSUM_OFFLOAD) && ! defined(CONFIG_RALINK_RT5350) && !defined (CONFIG_RALINK_MT7628) ++ if (skb->ip_summed == CHECKSUM_PARTIAL){ ++ cpu_ptr->txd_info4.TUI_CO = 7; ++ }else { ++ cpu_ptr->txd_info4.TUI_CO = 0; ++ } ++#endif ++ ++#ifdef CONFIG_RAETH_HW_VLAN_TX ++ if(vlan_tx_tag_present(skb)) { ++ cpu_ptr->txd_info4.VLAN_TAG = 0x10000 | vlan_tx_tag_get(skb); ++ }else { ++ cpu_ptr->txd_info4.VLAN_TAG = 0; ++ } ++#endif ++ ++#if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE) ++ if(FOE_MAGIC_TAG(skb) == FOE_MAGIC_PPE) { ++ if(ra_sw_nat_hook_rx!= NULL){ ++ cpu_ptr->txd_info4.FPORT = 4; /* PPE */ ++ FOE_MAGIC_TAG(skb) = 0; ++ } ++ } ++#endif ++ ++ cpu_ptr->txd_info3.SWC_bit = 1; ++ ++ ctx_offset = get_free_txd(&free_txd); ++ if(ctx_offset == NUM_TX_DESC) { ++ printk("get_free_txd fail\n"); ++ return 0; ++ } ++ cpu_ptr->txd_info2.NDP = VIRT_TO_PHYS(free_txd); ++ ei_local->tx_cpu_ptr = VIRT_TO_PHYS(free_txd); ++ ++ if(nr_frags > 0) { ++ for(i=0;ifrags[i]; ++ cpu_ptr = free_txd; ++ cpu_ptr->txd_info3.QID = M2Q_table[skb->mark]; ++ cpu_ptr->txd_info1.SDP = pci_map_page(NULL, frag->page, frag->page_offset, frag->size, PCI_DMA_TODEVICE); ++ cpu_ptr->txd_info3.SDL = frag->size; ++ cpu_ptr->txd_info3.LS_bit = (i==nr_frags-1)?1:0; ++ cpu_ptr->txd_info3.OWN_bit = 0; ++ cpu_ptr->txd_info3.SWC_bit = 1; ++ ei_local->skb_free[ctx_offset] = (i==nr_frags-1)?skb:(struct sk_buff *)0xFFFFFFFF; //MAGIC ID ++ ++ ctx_offset = get_free_txd(&free_txd); ++ cpu_ptr->txd_info2.NDP = VIRT_TO_PHYS(free_txd); ++ ei_local->tx_cpu_ptr = VIRT_TO_PHYS(free_txd); ++ } ++ ei_local->skb_free[init_txd_idx]= (struct sk_buff *)0xFFFFFFFF; //MAGIC ID ++ } ++ ++ if(skb_shinfo(skb)->gso_segs > 1) { ++ ++// TsoLenUpdate(skb->len); ++ ++ /* TCP over IPv4 */ ++ iph = (struct iphdr *)skb_network_header(skb); ++#if defined (CONFIG_RAETH_TSOV6) ++ /* TCP over IPv6 */ ++ ip6h = (struct ipv6hdr *)skb_network_header(skb); ++#endif ++ if((iph->version == 4) && (iph->protocol == IPPROTO_TCP)) { ++ th = (struct tcphdr *)skb_transport_header(skb); ++ ++ init_cpu_ptr->txd_info4.TSO = 1; ++ ++ th->check = htons(skb_shinfo(skb)->gso_size); ++ dma_cache_sync(NULL, th, sizeof(struct tcphdr), DMA_TO_DEVICE); ++ } ++ ++#if defined (CONFIG_RAETH_TSOV6) ++ /* TCP over IPv6 */ ++ //ip6h = (struct ipv6hdr *)skb_network_header(skb); ++ else if ((ip6h->version == 6) && (ip6h->nexthdr == NEXTHDR_TCP)) { ++ th = (struct tcphdr *)skb_transport_header(skb); ++#ifdef CONFIG_RAETH_RW_PDMAPTR_FROM_VAR ++ init_cpu_ptr->txd_info4.TSO = 1; ++#else ++ init_cpu_ptr->txd_info4.TSO = 1; ++#endif ++ th->check = htons(skb_shinfo(skb)->gso_size); ++ dma_cache_sync(NULL, th, sizeof(struct tcphdr), DMA_TO_DEVICE); ++ } ++#endif ++ } ++ ++ ++// dma_cache_sync(NULL, skb->data, skb->len, DMA_TO_DEVICE); ++ ++ init_cpu_ptr->txd_info3.OWN_bit = 0; ++#endif // CONFIG_RAETH_TSO // ++ ++ sysRegWrite(QTX_CTX_PTR, ei_local->tx_cpu_ptr); ++ ++#ifdef CONFIG_PSEUDO_SUPPORT ++ if (gmac_no == 2) { ++ if (ei_local->PseudoDev != NULL) { ++ pAd = netdev_priv(ei_local->PseudoDev); ++ pAd->stat.tx_packets++; ++ pAd->stat.tx_bytes += length; ++ } ++ } else ++ ++#endif ++ { ++ ei_local->stat.tx_packets++; ++ ei_local->stat.tx_bytes += skb->len; ++ } ++ return length; ++} ++ ++int ei_start_xmit(struct sk_buff* skb, struct net_device *dev, int gmac_no) ++{ ++ END_DEVICE *ei_local = netdev_priv(dev); ++ unsigned long flags; ++ unsigned int num_of_txd; ++#if defined (CONFIG_RAETH_TSO) ++ unsigned int nr_frags = skb_shinfo(skb)->nr_frags; ++#endif ++#ifdef CONFIG_PSEUDO_SUPPORT ++ PSEUDO_ADAPTER *pAd; ++#endif ++ ++#if !defined(CONFIG_RA_NAT_NONE) ++ if(ra_sw_nat_hook_tx!= NULL) ++ { ++ spin_lock_irqsave(&ei_local->page_lock, flags); ++ if(ra_sw_nat_hook_tx(skb, gmac_no)==1){ ++ spin_unlock_irqrestore(&ei_local->page_lock, flags); ++ }else{ ++ kfree_skb(skb); ++ spin_unlock_irqrestore(&ei_local->page_lock, flags); ++ return 0; ++ } ++ } ++#endif ++ ++ ++ ++ dev->trans_start = jiffies; /* save the timestamp */ ++ spin_lock_irqsave(&ei_local->page_lock, flags); ++ dma_cache_sync(NULL, skb->data, skb->len, DMA_TO_DEVICE); ++ ++ ++//check free_txd_num before calling rt288_eth_send() ++ ++#if defined (CONFIG_RAETH_TSO) ++ num_of_txd = (nr_frags==0) ? 1 : (nr_frags + 1); ++#else ++ num_of_txd = 1; ++#endif ++ ++#if defined(CONFIG_RALINK_MT7621) ++ if(sysRegRead(0xbe00000c)==0x00030101) { ++ ei_xmit_housekeeping(0); ++ } ++#endif ++ ++ ++ if ((ei_local->free_txd_num > num_of_txd + 1) && (ei_local->free_txd_num != NUM_TX_DESC)) ++ { ++ rt2880_eth_send(dev, skb, gmac_no); // need to modify rt2880_eth_send() for QDMA ++ if (ei_local->free_txd_num < 3) ++ { ++#if defined (CONFIG_RAETH_STOP_RX_WHEN_TX_FULL) ++ netif_stop_queue(dev); ++#ifdef CONFIG_PSEUDO_SUPPORT ++ netif_stop_queue(ei_local->PseudoDev); ++#endif ++ tx_ring_full = 1; ++#endif ++ } ++ } else { ++#ifdef CONFIG_PSEUDO_SUPPORT ++ if (gmac_no == 2) ++ { ++ if (ei_local->PseudoDev != NULL) ++ { ++ pAd = netdev_priv(ei_local->PseudoDev); ++ pAd->stat.tx_dropped++; ++ } ++ } else ++#endif ++ ei_local->stat.tx_dropped++; ++ kfree_skb(skb); ++ spin_unlock_irqrestore(&ei_local->page_lock, flags); ++ return 0; ++ } ++ spin_unlock_irqrestore(&ei_local->page_lock, flags); ++ return 0; ++} ++ ++void ei_xmit_housekeeping(unsigned long unused) ++{ ++ struct net_device *dev = dev_raether; ++ END_DEVICE *ei_local = netdev_priv(dev); ++#ifndef CONFIG_RAETH_NAPI ++ unsigned long reg_int_mask=0; ++#endif ++ struct QDMA_txdesc *dma_ptr = NULL; ++ struct QDMA_txdesc *cpu_ptr = NULL; ++ struct QDMA_txdesc *tmp_ptr = NULL; ++ unsigned int htx_offset = 0; ++ ++ dma_ptr = PHYS_TO_VIRT(sysRegRead(QTX_DRX_PTR)); ++ cpu_ptr = PHYS_TO_VIRT(sysRegRead(QTX_CRX_PTR)); ++ if(cpu_ptr != dma_ptr && (cpu_ptr->txd_info3.OWN_bit == 1)) { ++ while(cpu_ptr != dma_ptr && (cpu_ptr->txd_info3.OWN_bit == 1)) { ++ ++ //1. keep cpu next TXD ++ tmp_ptr = PHYS_TO_VIRT(cpu_ptr->txd_info2.NDP); ++ htx_offset = GET_TXD_OFFSET(&tmp_ptr); ++ //2. free skb meomry ++#if defined (CONFIG_RAETH_TSO) ++ if(ei_local->skb_free[htx_offset]!=(struct sk_buff *)0xFFFFFFFF) { ++ dev_kfree_skb_any(ei_local->skb_free[htx_offset]); ++ } ++#else ++ dev_kfree_skb_any(ei_local->skb_free[htx_offset]); ++#endif ++ ++ //3. release TXD ++ htx_offset = GET_TXD_OFFSET(&cpu_ptr); ++ put_free_txd(htx_offset); ++ ++ netif_wake_queue(dev); ++#ifdef CONFIG_PSEUDO_SUPPORT ++ netif_wake_queue(ei_local->PseudoDev); ++#endif ++ tx_ring_full=0; ++ ++ //4. update cpu_ptr to next ptr ++ cpu_ptr = tmp_ptr; ++ } ++ } ++ sysRegWrite(QTX_CRX_PTR, VIRT_TO_PHYS(cpu_ptr)); ++#ifndef CONFIG_RAETH_NAPI ++ reg_int_mask=sysRegRead(QFE_INT_ENABLE); ++#if defined (DELAY_INT) ++ sysRegWrite(FE_INT_ENABLE, reg_int_mask| RLS_DLY_INT); ++#else ++ ++ sysRegWrite(FE_INT_ENABLE, reg_int_mask | RLS_DONE_INT); ++#endif ++#endif //CONFIG_RAETH_NAPI// ++} ++ ++EXPORT_SYMBOL(ei_start_xmit); ++EXPORT_SYMBOL(ei_xmit_housekeeping); ++EXPORT_SYMBOL(fe_dma_init); ++EXPORT_SYMBOL(rt2880_eth_send); +--- a/drivers/net/ethernet/Kconfig ++++ b/drivers/net/ethernet/Kconfig +@@ -136,6 +136,7 @@ source "drivers/net/ethernet/packetengin + source "drivers/net/ethernet/pasemi/Kconfig" + source "drivers/net/ethernet/qlogic/Kconfig" + source "drivers/net/ethernet/ralink/Kconfig" ++source "drivers/net/ethernet/raeth/Kconfig" + source "drivers/net/ethernet/realtek/Kconfig" + source "drivers/net/ethernet/renesas/Kconfig" + source "drivers/net/ethernet/rdc/Kconfig" +--- a/drivers/net/ethernet/Makefile ++++ b/drivers/net/ethernet/Makefile +@@ -54,6 +54,7 @@ obj-$(CONFIG_NET_PACKET_ENGINE) += packe + obj-$(CONFIG_NET_VENDOR_PASEMI) += pasemi/ + obj-$(CONFIG_NET_VENDOR_QLOGIC) += qlogic/ + obj-$(CONFIG_NET_RALINK) += ralink/ ++obj-$(CONFIG_RAETH) += raeth/ + obj-$(CONFIG_NET_VENDOR_REALTEK) += realtek/ + obj-$(CONFIG_SH_ETH) += renesas/ + obj-$(CONFIG_NET_VENDOR_RDC) += rdc/ +--- /dev/null ++++ b/arch/mips/include/asm/rt2880/board-custom.h +@@ -0,0 +1,153 @@ ++/* Copyright Statement: ++ * ++ * This software/firmware and related documentation ("MediaTek Software") are ++ * protected under relevant copyright laws. The information contained herein ++ * is confidential and proprietary to MediaTek Inc. and/or its licensors. ++ * Without the prior written permission of MediaTek inc. and/or its licensors, ++ * any reproduction, modification, use or disclosure of MediaTek Software, ++ * and information contained herein, in whole or in part, shall be strictly prohibited. ++ */ ++/* MediaTek Inc. (C) 2010. All rights reserved. ++ * ++ * BY OPENING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES ++ * THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE") ++ * RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON ++ * AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES, ++ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT. ++ * NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE ++ * SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR ++ * SUPPLIED WITH THE MEDIATEK SOFTWARE, AND RECEIVER AGREES TO LOOK ONLY TO SUCH ++ * THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES ++ * THAT IT IS RECEIVER'S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD PARTY ALL PROPER LICENSES ++ * CONTAINED IN MEDIATEK SOFTWARE. MEDIATEK SHALL ALSO NOT BE RESPONSIBLE FOR ANY MEDIATEK ++ * SOFTWARE RELEASES MADE TO RECEIVER'S SPECIFICATION OR TO CONFORM TO A PARTICULAR ++ * STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND ++ * CUMULATIVE LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE, ++ * AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE, ++ * OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY RECEIVER TO ++ * MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE. ++ * ++ * The following software/firmware and/or related documentation ("MediaTek Software") ++ * have been modified by MediaTek Inc. All revisions are subject to any receiver's ++ * applicable license agreements with MediaTek Inc. ++ */ ++ ++#ifndef __ARCH_ARM_MACH_MT6575_CUSTOM_BOARD_H ++#define __ARCH_ARM_MACH_MT6575_CUSTOM_BOARD_H ++ ++#include ++ ++/*=======================================================================*/ ++/* MT6575 SD */ ++/*=======================================================================*/ ++#ifdef MTK_EMMC_SUPPORT ++#define CFG_DEV_MSDC0 ++#endif ++#define CFG_DEV_MSDC1 ++#define CFG_DEV_MSDC2 ++#define CFG_DEV_MSDC3 ++#if defined(CONFIG_MTK_COMBO) || defined(CONFIG_MTK_COMBO_MODULE) ++/* ++SDIO slot index number used by connectivity combo chip: ++0: invalid (used by memory card) ++1: MSDC1 ++2: MSDC2 ++*/ ++#define CONFIG_MTK_WCN_CMB_SDIO_SLOT (2) /* MSDC2 */ ++#else ++#undef CONFIG_MTK_WCN_CMB_SDIO_SLOT ++#endif ++ ++#if 0 /* FIXME. */ ++/*=======================================================================*/ ++/* MT6575 UART */ ++/*=======================================================================*/ ++#define CFG_DEV_UART1 ++#define CFG_DEV_UART2 ++#define CFG_DEV_UART3 ++#define CFG_DEV_UART4 ++ ++#define CFG_UART_PORTS (4) ++ ++/*=======================================================================*/ ++/* MT6575 I2C */ ++/*=======================================================================*/ ++#define CFG_DEV_I2C ++//#define CFG_I2C_HIGH_SPEED_MODE ++//#define CFG_I2C_DMA_MODE ++ ++/*=======================================================================*/ ++/* MT6575 ADB */ ++/*=======================================================================*/ ++#define ADB_SERIAL "E1K" ++ ++#endif ++ ++/*=======================================================================*/ ++/* MT6575 NAND FLASH */ ++/*=======================================================================*/ ++#if 0 ++#define RAMDOM_READ 1<<0 ++#define CACHE_READ 1<<1 ++/******************************************************************************* ++ * NFI & ECC Configuration ++ *******************************************************************************/ ++typedef struct ++{ ++ u16 id; //deviceid+menuid ++ u8 addr_cycle; ++ u8 iowidth; ++ u16 totalsize; ++ u16 blocksize; ++ u16 pagesize; ++ u32 timmingsetting; ++ char devciename[14]; ++ u32 advancedmode; // ++}flashdev_info,*pflashdev_info; ++ ++static const flashdev_info g_FlashTable[]={ ++ //micro ++ {0xAA2C, 5, 8, 256, 128, 2048, 0x01113, "MT29F2G08ABD", 0}, ++ {0xB12C, 4, 16, 128, 128, 2048, 0x01113, "MT29F1G16ABC", 0}, ++ {0xBA2C, 5, 16, 256, 128, 2048, 0x01113, "MT29F2G16ABD", 0}, ++ {0xAC2C, 5, 8, 512, 128, 2048, 0x01113, "MT29F4G08ABC", 0}, ++ {0xBC2C, 5, 16, 512, 128, 2048, 0x44333, "MT29F4G16ABD", 0}, ++ //samsung ++ {0xBAEC, 5, 16, 256, 128, 2048, 0x01123, "K522H1GACE", 0}, ++ {0xBCEC, 5, 16, 512, 128, 2048, 0x01123, "K524G2GACB", 0}, ++ {0xDAEC, 5, 8, 256, 128, 2048, 0x33222, "K9F2G08U0A", RAMDOM_READ}, ++ {0xF1EC, 4, 8, 128, 128, 2048, 0x01123, "K9F1G08U0A", RAMDOM_READ}, ++ {0xAAEC, 5, 8, 256, 128, 2048, 0x01123, "K9F2G08R0A", 0}, ++ //hynix ++ {0xD3AD, 5, 8, 1024, 256, 2048, 0x44333, "HY27UT088G2A", 0}, ++ {0xA1AD, 4, 8, 128, 128, 2048, 0x01123, "H8BCSOPJOMCP", 0}, ++ {0xBCAD, 5, 16, 512, 128, 2048, 0x01123, "H8BCSOUNOMCR", 0}, ++ {0xBAAD, 5, 16, 256, 128, 2048, 0x01123, "H8BCSOSNOMCR", 0}, ++ //toshiba ++ {0x9598, 5, 16, 816, 128, 2048, 0x00113, "TY9C000000CMG", 0}, ++ {0x9498, 5, 16, 375, 128, 2048, 0x00113, "TY9C000000CMG", 0}, ++ {0xC198, 4, 16, 128, 128, 2048, 0x44333, "TC58NWGOS8C", 0}, ++ {0xBA98, 5, 16, 256, 128, 2048, 0x02113, "TC58NYG1S8C", 0}, ++ //st-micro ++ {0xBA20, 5, 16, 256, 128, 2048, 0x01123, "ND02CGR4B2DI6", 0}, ++ ++ // elpida ++ {0xBC20, 5, 16, 512, 128, 2048, 0x01123, "04GR4B2DDI6", 0}, ++ {0x0000, 0, 0, 0, 0, 0, 0, "xxxxxxxxxxxxx", 0} ++}; ++#endif ++ ++ ++#define NFI_DEFAULT_ACCESS_TIMING (0x44333) ++ ++//uboot only support 1 cs ++#define NFI_CS_NUM (2) ++#define NFI_DEFAULT_CS (0) ++ ++#define USE_AHB_MODE (1) ++ ++#define PLATFORM_EVB (1) ++ ++#endif /* __ARCH_ARM_MACH_MT6575_CUSTOM_BOARD_H */ ++ +--- /dev/null ++++ b/arch/mips/include/asm/rt2880/eureka_ep430.h +@@ -0,0 +1,204 @@ ++/************************************************************************** ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2 of the License, or (at your ++ * option) any later version. ++ * ++ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED ++ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN ++ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, ++ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT ++ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF ++ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ++ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF ++ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, write to the Free Software Foundation, Inc., ++ * 675 Mass Ave, Cambridge, MA 02139, USA. ++ * ++ * ++ ************************************************************************** ++ */ ++ ++#ifndef _EUREKA_EP430_H ++#define _EUREKA_EP430_H ++ ++ ++#include /* for KSEG1ADDR() */ ++#include /* for cpu_to_le32() */ ++#include ++ ++ ++/* ++ * Because of an error/peculiarity in the Galileo chip, we need to swap the ++ * bytes when running bigendian. ++ */ ++ ++#define MV_WRITE(ofs, data) \ ++ *(volatile u32 *)(RALINK_PCI_BASE+(ofs)) = cpu_to_le32(data) ++#define MV_READ(ofs, data) \ ++ *(data) = le32_to_cpu(*(volatile u32 *)(RALINK_PCI_BASE+(ofs))) ++#define MV_READ_DATA(ofs) \ ++ le32_to_cpu(*(volatile u32 *)(RALINK_PCI_BASE+(ofs))) ++ ++#define MV_WRITE_16(ofs, data) \ ++ *(volatile u16 *)(RALINK_PCI_BASE+(ofs)) = cpu_to_le16(data) ++#define MV_READ_16(ofs, data) \ ++ *(data) = le16_to_cpu(*(volatile u16 *)(RALINK_PCI_BASE+(ofs))) ++ ++#define MV_WRITE_8(ofs, data) \ ++ *(volatile u8 *)(RALINK_PCI_BASE+(ofs)) = data ++#define MV_READ_8(ofs, data) \ ++ *(data) = *(volatile u8 *)(RALINK_PCI_BASE+(ofs)) ++ ++#define MV_SET_REG_BITS(ofs,bits) \ ++ (*((volatile u32 *)(RALINK_PCI_BASE+(ofs)))) |= ((u32)cpu_to_le32(bits)) ++#define MV_RESET_REG_BITS(ofs,bits) \ ++ (*((volatile u32 *)(RALINK_PCI_BASE+(ofs)))) &= ~((u32)cpu_to_le32(bits)) ++ ++#define RALINK_PCI_CONFIG_ADDR 0x20 ++#define RALINK_PCI_CONFIG_DATA_VIRTUAL_REG 0x24 ++ ++#if defined(CONFIG_RALINK_RT2880) || defined(CONFIG_RALINK_RT2883) ++#define RALINK_PCI_PCICFG_ADDR *(volatile u32 *)(RALINK_PCI_BASE + 0x0000) ++#define RALINK_PCI_PCIRAW_ADDR *(volatile u32 *)(RALINK_PCI_BASE + 0x0004) ++#define RALINK_PCI_PCIINT_ADDR *(volatile u32 *)(RALINK_PCI_BASE + 0x0008) ++#define RALINK_PCI_PCIMSK_ADDR *(volatile u32 *)(RALINK_PCI_BASE + 0x000C) ++#define RALINK_PCI_BAR0SETUP_ADDR *(volatile u32 *)(RALINK_PCI_BASE + 0x0010) ++#define RALINK_PCI_IMBASEBAR0_ADDR *(volatile u32 *)(RALINK_PCI_BASE + 0x0018) ++#define RALINK_PCI_IMBASEBAR1_ADDR *(volatile u32 *)(RALINK_PCI_BASE + 0x001C) ++#define RALINK_PCI_MEMBASE *(volatile u32 *)(RALINK_PCI_BASE + 0x0028) ++#define RALINK_PCI_IOBASE *(volatile u32 *)(RALINK_PCI_BASE + 0x002C) ++#define RALINK_PCI_ID *(volatile u32 *)(RALINK_PCI_BASE + 0x0030) ++#define RALINK_PCI_CLASS *(volatile u32 *)(RALINK_PCI_BASE + 0x0034) ++#define RALINK_PCI_SUBID *(volatile u32 *)(RALINK_PCI_BASE + 0x0038) ++#define RALINK_PCI_ARBCTL *(volatile u32 *)(RALINK_PCI_BASE + 0x0080) ++#define RALINK_PCI_STATUS *(volatile u32 *)(RALINK_PCI_BASE + 0x0050) ++ ++#elif defined(CONFIG_RALINK_RT3883) ++ ++#define RALINK_PCI_PCICFG_ADDR *(volatile u32 *)(RALINK_PCI_BASE + 0x0000) ++#define RALINK_PCI_PCIRAW_ADDR *(volatile u32 *)(RALINK_PCI_BASE + 0x0004) ++#define RALINK_PCI_PCIINT_ADDR *(volatile u32 *)(RALINK_PCI_BASE + 0x0008) ++#define RALINK_PCI_PCIMSK_ADDR *(volatile u32 *)(RALINK_PCI_BASE + 0x000C) ++#define RALINK_PCI_IMBASEBAR1_ADDR *(volatile u32 *)(RALINK_PCI_BASE + 0x001C) ++#define RALINK_PCI_MEMBASE *(volatile u32 *)(RALINK_PCI_BASE + 0x0028) ++#define RALINK_PCI_IOBASE *(volatile u32 *)(RALINK_PCI_BASE + 0x002C) ++#define RALINK_PCI_ARBCTL *(volatile u32 *)(RALINK_PCI_BASE + 0x0080) ++ ++/* ++PCI0 --> PCI ++PCI1 --> PCIe ++*/ ++#define RT3883_PCI_OFFSET 0x1000 ++#define RT3883_PCIE_OFFSET 0x2000 ++ ++#define RALINK_PCI0_BAR0SETUP_ADDR *(volatile u32 *)(RALINK_PCI_BASE + RT3883_PCI_OFFSET + 0x0010) ++#define RALINK_PCI0_IMBASEBAR0_ADDR *(volatile u32 *)(RALINK_PCI_BASE + RT3883_PCI_OFFSET + 0x0018) ++#define RALINK_PCI0_ID *(volatile u32 *)(RALINK_PCI_BASE + RT3883_PCI_OFFSET + 0x0030) ++#define RALINK_PCI0_CLASS *(volatile u32 *)(RALINK_PCI_BASE + RT3883_PCI_OFFSET + 0x0034) ++#define RALINK_PCI0_SUBID *(volatile u32 *)(RALINK_PCI_BASE + RT3883_PCI_OFFSET + 0x0038) ++ ++#define RALINK_PCI1_BAR0SETUP_ADDR *(volatile u32 *)(RALINK_PCI_BASE + RT3883_PCIE_OFFSET + 0x0010) ++#define RALINK_PCI1_IMBASEBAR0_ADDR *(volatile u32 *)(RALINK_PCI_BASE + RT3883_PCIE_OFFSET + 0x0018) ++#define RALINK_PCI1_ID *(volatile u32 *)(RALINK_PCI_BASE + RT3883_PCIE_OFFSET + 0x0030) ++#define RALINK_PCI1_CLASS *(volatile u32 *)(RALINK_PCI_BASE + RT3883_PCIE_OFFSET + 0x0034) ++#define RALINK_PCI1_SUBID *(volatile u32 *)(RALINK_PCI_BASE + RT3883_PCIE_OFFSET + 0x0038) ++#define RALINK_PCI1_STATUS *(volatile u32 *)(RALINK_PCI_BASE + RT3883_PCIE_OFFSET + 0x0050) ++ ++#elif defined(CONFIG_RALINK_RT6855) || defined (CONFIG_RALINK_MT7620) || defined(CONFIG_RALINK_MT7628) ++ ++#define RALINK_PCI_PCICFG_ADDR *(volatile u32 *)(RALINK_PCI_BASE + 0x0000) ++#define RALINK_PCI_PCIRAW_ADDR *(volatile u32 *)(RALINK_PCI_BASE + 0x0004) ++#define RALINK_PCI_PCIINT_ADDR *(volatile u32 *)(RALINK_PCI_BASE + 0x0008) ++#define RALINK_PCI_PCIMSK_ADDR *(volatile u32 *)(RALINK_PCI_BASE + 0x000C) ++#define RALINK_PCI_IMBASEBAR1_ADDR *(volatile u32 *)(RALINK_PCI_BASE + 0x001C) ++#define RALINK_PCI_MEMBASE *(volatile u32 *)(RALINK_PCI_BASE + 0x0028) ++#define RALINK_PCI_IOBASE *(volatile u32 *)(RALINK_PCI_BASE + 0x002C) ++#define RALINK_PCI_ARBCTL *(volatile u32 *)(RALINK_PCI_BASE + 0x0080) ++ ++/* ++PCI0 --> PCIe 0 ++PCI1 --> PCIe 1 ++*/ ++#define RT6855_PCIE0_OFFSET 0x2000 ++#define RT6855_PCIE1_OFFSET 0x3000 ++ ++#define RALINK_PCI0_BAR0SETUP_ADDR *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE0_OFFSET + 0x0010) ++#define RALINK_PCI0_IMBASEBAR0_ADDR *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE0_OFFSET + 0x0018) ++#define RALINK_PCI0_ID *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE0_OFFSET + 0x0030) ++#define RALINK_PCI0_CLASS *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE0_OFFSET + 0x0034) ++#define RALINK_PCI0_SUBID *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE0_OFFSET + 0x0038) ++#define RALINK_PCI0_STATUS *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE0_OFFSET + 0x0050) ++#define RALINK_PCI0_DERR *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE0_OFFSET + 0x0060) ++#define RALINK_PCI0_ECRC *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE0_OFFSET + 0x0064) ++ ++#define RALINK_PCI1_BAR0SETUP_ADDR *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE1_OFFSET + 0x0010) ++#define RALINK_PCI1_IMBASEBAR0_ADDR *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE1_OFFSET + 0x0018) ++#define RALINK_PCI1_ID *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE1_OFFSET + 0x0030) ++#define RALINK_PCI1_CLASS *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE1_OFFSET + 0x0034) ++#define RALINK_PCI1_SUBID *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE1_OFFSET + 0x0038) ++#define RALINK_PCI1_STATUS *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE1_OFFSET + 0x0050) ++#define RALINK_PCI1_DERR *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE1_OFFSET + 0x0060) ++#define RALINK_PCI1_ECRC *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE1_OFFSET + 0x0064) ++ ++#elif defined (CONFIG_RALINK_MT7621) ++ ++#define RALINK_PCI_PCICFG_ADDR *(volatile u32 *)(RALINK_PCI_BASE + 0x0000) ++#define RALINK_PCI_PCIRAW_ADDR *(volatile u32 *)(RALINK_PCI_BASE + 0x0004) ++#define RALINK_PCI_PCIINT_ADDR *(volatile u32 *)(RALINK_PCI_BASE + 0x0008) ++#define RALINK_PCI_PCIMSK_ADDR *(volatile u32 *)(RALINK_PCI_BASE + 0x000C) ++#define RALINK_PCI_IMBASEBAR1_ADDR *(volatile u32 *)(RALINK_PCI_BASE + 0x001C) ++#define RALINK_PCI_MEMBASE *(volatile u32 *)(RALINK_PCI_BASE + 0x0028) ++#define RALINK_PCI_IOBASE *(volatile u32 *)(RALINK_PCI_BASE + 0x002C) ++#define RALINK_PCI_ARBCTL *(volatile u32 *)(RALINK_PCI_BASE + 0x0080) ++ ++/* ++PCI0 --> PCIe 0 ++PCI1 --> PCIe 1 ++PCI2 --> PCIe 2 ++*/ ++#define RT6855_PCIE0_OFFSET 0x2000 ++#define RT6855_PCIE1_OFFSET 0x3000 ++#define RT6855_PCIE2_OFFSET 0x4000 ++ ++#define RALINK_PCI0_BAR0SETUP_ADDR *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE0_OFFSET + 0x0010) ++#define RALINK_PCI0_IMBASEBAR0_ADDR *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE0_OFFSET + 0x0018) ++#define RALINK_PCI0_ID *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE0_OFFSET + 0x0030) ++#define RALINK_PCI0_CLASS *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE0_OFFSET + 0x0034) ++#define RALINK_PCI0_SUBID *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE0_OFFSET + 0x0038) ++#define RALINK_PCI0_STATUS *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE0_OFFSET + 0x0050) ++#define RALINK_PCI0_DERR *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE0_OFFSET + 0x0060) ++#define RALINK_PCI0_ECRC *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE0_OFFSET + 0x0064) ++ ++#define RALINK_PCI1_BAR0SETUP_ADDR *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE1_OFFSET + 0x0010) ++#define RALINK_PCI1_IMBASEBAR0_ADDR *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE1_OFFSET + 0x0018) ++#define RALINK_PCI1_ID *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE1_OFFSET + 0x0030) ++#define RALINK_PCI1_CLASS *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE1_OFFSET + 0x0034) ++#define RALINK_PCI1_SUBID *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE1_OFFSET + 0x0038) ++#define RALINK_PCI1_STATUS *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE1_OFFSET + 0x0050) ++#define RALINK_PCI1_DERR *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE1_OFFSET + 0x0060) ++#define RALINK_PCI1_ECRC *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE1_OFFSET + 0x0064) ++ ++#define RALINK_PCI2_BAR0SETUP_ADDR *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE2_OFFSET + 0x0010) ++#define RALINK_PCI2_IMBASEBAR0_ADDR *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE2_OFFSET + 0x0018) ++#define RALINK_PCI2_ID *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE2_OFFSET + 0x0030) ++#define RALINK_PCI2_CLASS *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE2_OFFSET + 0x0034) ++#define RALINK_PCI2_SUBID *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE2_OFFSET + 0x0038) ++#define RALINK_PCI2_STATUS *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE2_OFFSET + 0x0050) ++#define RALINK_PCI2_DERR *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE2_OFFSET + 0x0060) ++#define RALINK_PCI2_ECRC *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE2_OFFSET + 0x0064) ++ ++#define RALINK_PCIEPHY_P0P1_CTL_OFFSET (RALINK_PCI_BASE + 0x9000) ++#define RALINK_PCIEPHY_P2_CTL_OFFSET (RALINK_PCI_BASE + 0xA000) ++ ++#elif defined(CONFIG_RALINK_RT3052) || defined(CONFIG_RALINK_RT3352) || defined(CONFIG_RALINK_RT5350) ++#else ++#error "undefined in PCI" ++#endif ++ ++#endif +--- /dev/null ++++ b/arch/mips/include/asm/rt2880/generic.h +@@ -0,0 +1,42 @@ ++/* ++ * Copyright (C) 2001 Palmchip Corporation. All rights reserved. ++ * ++ * This program is free software; you can distribute it and/or modify it ++ * under the terms of the GNU General Public License (Version 2) as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * for more details. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, write to the Free Software Foundation, Inc., ++ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. ++ * ++ * Defines of the Palmchip boards specific address-MAP, registers, etc. ++ */ ++#ifndef __ASM_SURFBOARD_GENERIC_H ++#define __ASM_SURFBOARD_GENERIC_H ++ ++#include ++#include ++#include ++ ++/* ++ * Reset register. ++ */ ++#define SOFTRES_REG (KSEG1ADDR(RALINK_SYSCTL_BASE+0x34)) ++#define GORESET (0x1) ++ ++/* ++ * Power-off register ++ */ ++#define POWER_DIR_REG (KSEG1ADDR(RALINK_PIO_BASE+0x24)) ++#define POWER_DIR_OUTPUT (0x80) /* GPIO 7 */ ++#define POWER_POL_REG (KSEG1ADDR(RALINK_PIO_BASE+0x28)) ++#define POWEROFF_REG (KSEG1ADDR(RALINK_PIO_BASE+0x20)) ++#define POWEROFF (0x0) /* drive low */ ++ ++ ++#endif /* __ASM_SURFBOARD_GENERIC_H */ +--- /dev/null ++++ b/arch/mips/include/asm/rt2880/lm.h +@@ -0,0 +1,32 @@ ++#include ++ ++struct lm_device { ++ struct device dev; ++ struct resource resource; ++ unsigned int irq; ++ unsigned int id; ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) ++ void *lm_drvdata; ++#endif ++}; ++ ++struct lm_driver { ++ struct device_driver drv; ++ int (*probe)(struct lm_device *); ++ void (*remove)(struct lm_device *); ++ int (*suspend)(struct lm_device *, u32); ++ int (*resume)(struct lm_device *); ++}; ++ ++int lm_driver_register(struct lm_driver *drv); ++void lm_driver_unregister(struct lm_driver *drv); ++ ++int lm_device_register(struct lm_device *dev); ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) ++# define lm_get_drvdata(lm) ((lm)->lm_drvdata) ++# define lm_set_drvdata(lm,d) do { (lm)->lm_drvdata = (d); } while (0) ++#else ++# define lm_get_drvdata(lm) dev_get_drvdata(&(lm)->dev) ++# define lm_set_drvdata(lm,d) dev_set_drvdata(&(lm)->dev, d) ++#endif +--- /dev/null ++++ b/arch/mips/include/asm/rt2880/prom.h +@@ -0,0 +1,50 @@ ++/* ++ * Carsten Langgaard, carstenl@mips.com ++ * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved. ++ * ++ * ######################################################################## ++ * ++ * This program is free software; you can distribute it and/or modify it ++ * under the terms of the GNU General Public License (Version 2) as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * for more details. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, write to the Free Software Foundation, Inc., ++ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. ++ * ++ * ######################################################################## ++ * ++ * MIPS boards bootprom interface for the Linux kernel. ++ * ++ */ ++ ++#ifndef _MIPS_PROM_H ++#define _MIPS_PROM_H ++ ++extern char *prom_getcmdline(void); ++extern char *prom_getenv(char *name); ++extern void setup_prom_printf(int tty_no); ++extern void prom_setup_printf(int tty_no); ++extern void prom_printf(char *fmt, ...); ++extern void prom_init_cmdline(void); ++extern void prom_meminit(void); ++extern void prom_fixup_mem_map(unsigned long start_mem, unsigned long end_mem); ++extern void prom_free_prom_memory (void); ++extern void mips_display_message(const char *str); ++extern void mips_display_word(unsigned int num); ++extern int get_ethernet_addr(char *ethernet_addr); ++ ++/* Memory descriptor management. */ ++#define PROM_MAX_PMEMBLOCKS 32 ++struct prom_pmemblock { ++ unsigned long base; /* Within KSEG0. */ ++ unsigned int size; /* In bytes. */ ++ unsigned int type; /* free or prom memory */ ++}; ++ ++#endif /* !(_MIPS_PROM_H) */ +--- /dev/null ++++ b/arch/mips/include/asm/rt2880/rt_mmap.h +@@ -0,0 +1,796 @@ ++/************************************************************************** ++ * ++ * BRIEF MODULE DESCRIPTION ++ * register definition for Ralink RT-series SoC ++ * ++ * Copyright 2007 Ralink Inc. ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2 of the License, or (at your ++ * option) any later version. ++ * ++ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED ++ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN ++ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, ++ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT ++ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF ++ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ++ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF ++ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, write to the Free Software Foundation, Inc., ++ * 675 Mass Ave, Cambridge, MA 02139, USA. ++ * ++ * ++ ************************************************************************** ++ */ ++ ++#ifndef __RALINK_MMAP__ ++#define __RALINK_MMAP__ ++ ++#if defined (CONFIG_RALINK_RT2880_SHUTTLE) ++ ++#define RALINK_SYSCTL_BASE 0xA0300000 ++#define RALINK_TIMER_BASE 0xA0300100 ++#define RALINK_INTCL_BASE 0xA0300200 ++#define RALINK_MEMCTRL_BASE 0xA0300300 ++#define RALINK_UART_BASE 0xA0300500 ++#define RALINK_PIO_BASE 0xA0300600 ++#define RALINK_I2C_BASE 0xA0300900 ++#define RALINK_SPI_BASE 0xA0300B00 ++#define RALINK_UART_LITE_BASE 0xA0300C00 ++#define RALINK_FRAME_ENGINE_BASE 0xA0310000 ++#define RALINK_EMBEDD_ROM_BASE 0xA0400000 ++#define RALINK_PCI_BASE 0xA0500000 ++#define RALINK_11N_MAC_BASE 0xA0600000 ++ ++//Interrupt Controller ++#define RALINK_INTCTL_TIMER0 (1<<0) ++#define RALINK_INTCTL_WDTIMER (1<<1) ++#define RALINK_INTCTL_UART (1<<2) ++#define RALINK_INTCTL_PIO (1<<3) ++#define RALINK_INTCTL_PCM (1<<4) ++#define RALINK_INTCTL_UARTLITE (1<<8) ++#define RALINK_INTCTL_ILL_ACCESS (1<<23) ++ ++//Reset Control Register ++#define RALINK_TIMER_RST (1<<1) ++#define RALINK_INTC_RST (1<<2) ++#define RALINK_MC_RST (1<<3) ++#define RALINK_CPU_RST (1<<4) ++#define RALINK_UART_RST (1<<5) ++#define RALINK_PIO_RST (1<<6) ++#define RALINK_I2C_RST (1<<9) ++#define RALINK_SPI_RST (1<<11) ++#define RALINK_UART2_RST (1<<12) ++#define RALINK_PCI_RST (1<<16) ++#define RALINK_2860_RST (1<<17) ++#define RALINK_FE_RST (1<<18) ++#define RALINK_PCM_RST (1<<19) ++ ++ ++#elif defined (CONFIG_RALINK_RT2880_MP) ++ ++#define RALINK_SYSCTL_BASE 0xA0300000 ++#define RALINK_TIMER_BASE 0xA0300100 ++#define RALINK_INTCL_BASE 0xA0300200 ++#define RALINK_MEMCTRL_BASE 0xA0300300 ++#define RALINK_UART_BASE 0xA0300500 ++#define RALINK_PIO_BASE 0xA0300600 ++#define RALINK_I2C_BASE 0xA0300900 ++#define RALINK_SPI_BASE 0xA0300B00 ++#define RALINK_UART_LITE_BASE 0x00300C00 ++#define RALINK_FRAME_ENGINE_BASE 0xA0400000 ++#define RALINK_EMBEDD_ROM_BASE 0xA0410000 ++#define RALINK_PCI_BASE 0xA0440000 ++#define RALINK_11N_MAC_BASE 0xA0480000 ++ ++//Interrupt Controller ++#define RALINK_INTCTL_TIMER0 (1<<0) ++#define RALINK_INTCTL_WDTIMER (1<<1) ++#define RALINK_INTCTL_UART (1<<2) ++#define RALINK_INTCTL_PIO (1<<3) ++#define RALINK_INTCTL_PCM (1<<4) ++#define RALINK_INTCTL_UARTLITE (1<<8) ++#define RALINK_INTCTL_ILL_ACCESS (1<<23) ++ ++//Reset Control Register ++#define RALINK_TIMER_RST (1<<1) ++#define RALINK_INTC_RST (1<<2) ++#define RALINK_MC_RST (1<<3) ++#define RALINK_CPU_RST (1<<4) ++#define RALINK_UART_RST (1<<5) ++#define RALINK_PIO_RST (1<<6) ++#define RALINK_I2C_RST (1<<9) ++#define RALINK_SPI_RST (1<<11) ++#define RALINK_UART2_RST (1<<12) ++#define RALINK_PCI_RST (1<<16) ++#define RALINK_2860_RST (1<<17) ++#define RALINK_FE_RST (1<<18) ++#define RALINK_PCM_RST (1<<19) ++ ++#elif defined (CONFIG_RALINK_RT3052) ++ ++#define RALINK_SYSCTL_BASE 0xB0000000 ++#define RALINK_TIMER_BASE 0xB0000100 ++#define RALINK_INTCL_BASE 0xB0000200 ++#define RALINK_MEMCTRL_BASE 0xB0000300 ++#define RALINK_PCM_BASE 0xB0000400 ++#define RALINK_UART_BASE 0x10000500 ++#define RALINK_PIO_BASE 0xB0000600 ++#define RALINK_GDMA_BASE 0xB0000700 ++#define RALINK_NAND_CTRL_BASE 0xB0000800 ++#define RALINK_I2C_BASE 0xB0000900 ++#define RALINK_I2S_BASE 0xB0000A00 ++#define RALINK_SPI_BASE 0xB0000B00 ++#define RALINK_UART_LITE_BASE 0x10000C00 ++#define RALINK_FRAME_ENGINE_BASE 0xB0100000 ++#define RALINK_ETH_SW_BASE 0xB0110000 ++#define RALINK_11N_MAC_BASE 0xB0180000 ++#define RALINK_USB_OTG_BASE 0x101C0000 ++ ++//Interrupt Controller ++#define RALINK_INTCTL_SYSCTL (1<<0) ++#define RALINK_INTCTL_TIMER0 (1<<1) ++#define RALINK_INTCTL_WDTIMER (1<<2) ++#define RALINK_INTCTL_ILL_ACCESS (1<<3) ++#define RALINK_INTCTL_PCM (1<<4) ++#define RALINK_INTCTL_UART (1<<5) ++#define RALINK_INTCTL_PIO (1<<6) ++#define RALINK_INTCTL_DMA (1<<7) ++#define RALINK_INTCTL_NAND (1<<8) ++#define RALINK_INTCTL_PC (1<<9) ++#define RALINK_INTCTL_I2S (1<<10) ++#define RALINK_INTCTL_UARTLITE (1<<12) ++#define RALINK_INTCTL_ESW (1<<17) ++#define RALINK_INTCTL_OTG (1<<18) ++#define RALINK_INTCTL_OTG_IRQN 18 ++#define RALINK_INTCTL_GLOBAL (1<<31) ++ ++//Reset Control Register ++#define RALINK_SYS_RST (1<<0) ++#define RALINK_CPU_RST (1<<1) ++#define RALINK_TIMER_RST (1<<8) ++#define RALINK_INTC_RST (1<<9) ++#define RALINK_MC_RST (1<<10) ++#define RALINK_PCM_RST (1<<11) ++#define RALINK_UART_RST (1<<12) ++#define RALINK_PIO_RST (1<<13) ++#define RALINK_DMA_RST (1<<14) ++#define RALINK_I2C_RST (1<<16) ++#define RALINK_I2S_RST (1<<17) ++#define RALINK_SPI_RST (1<<18) ++#define RALINK_UARTL_RST (1<<19) ++#define RALINK_RT2872_RST (1<<20) ++#define RALINK_FE_RST (1<<21) ++#define RALINK_OTG_RST (1<<22) ++#define RALINK_SW_RST (1<<23) ++#define RALINK_EPHY_RST (1<<24) ++ ++#elif defined (CONFIG_RALINK_RT3352) ++ ++#define RALINK_SYSCTL_BASE 0xB0000000 ++#define RALINK_TIMER_BASE 0xB0000100 ++#define RALINK_INTCL_BASE 0xB0000200 ++#define RALINK_MEMCTRL_BASE 0xB0000300 ++#define RALINK_UART_BASE 0x10000500 ++#define RALINK_PIO_BASE 0xB0000600 ++#define RALINK_I2C_BASE 0xB0000900 ++#define RALINK_I2S_BASE 0xB0000A00 ++#define RALINK_SPI_BASE 0xB0000B00 ++#define RALINK_NAND_CTRL_BASE 0xB0000800 ++#define RALINK_UART_LITE_BASE 0x10000C00 ++#define RALINK_PCM_BASE 0xB0002000 ++#define RALINK_GDMA_BASE 0xB0002800 ++#define RALINK_FRAME_ENGINE_BASE 0xB0100000 ++#define RALINK_ETH_SW_BASE 0xB0110000 ++#define RALINK_USB_DEV_BASE 0x10120000 ++#define RALINK_11N_MAC_BASE 0xB0180000 ++#define RALINK_USB_HOST_BASE 0x101C0000 ++ ++#define RALINK_MCNT_CFG 0xB0000D00 ++#define RALINK_COMPARE 0xB0000D04 ++#define RALINK_COUNT 0xB0000D08 ++ ++//Interrupt Controller ++#define RALINK_INTCTL_SYSCTL (1<<0) ++#define RALINK_INTCTL_TIMER0 (1<<1) ++#define RALINK_INTCTL_WDTIMER (1<<2) ++#define RALINK_INTCTL_ILL_ACCESS (1<<3) ++#define RALINK_INTCTL_PCM (1<<4) ++#define RALINK_INTCTL_UART (1<<5) ++#define RALINK_INTCTL_PIO (1<<6) ++#define RALINK_INTCTL_DMA (1<<7) ++#define RALINK_INTCTL_PC (1<<9) ++#define RALINK_INTCTL_I2S (1<<10) ++#define RALINK_INTCTL_UARTLITE (1<<12) ++#define RALINK_INTCTL_ESW (1<<17) ++#define RALINK_INTCTL_OTG (1<<18) ++#define RALINK_INTCTL_GLOBAL (1<<31) ++ ++//Reset Control Register ++#define RALINK_SYS_RST (1<<0) ++#define RALINK_TIMER_RST (1<<8) ++#define RALINK_INTC_RST (1<<9) ++#define RALINK_MC_RST (1<<10) ++#define RALINK_PCM_RST (1<<11) ++#define RALINK_UART_RST (1<<12) ++#define RALINK_PIO_RST (1<<13) ++#define RALINK_DMA_RST (1<<14) ++#define RALINK_I2C_RST (1<<16) ++#define RALINK_I2S_RST (1<<17) ++#define RALINK_SPI_RST (1<<18) ++#define RALINK_UARTL_RST (1<<19) ++#define RALINK_WLAN_RST (1<<20) ++#define RALINK_FE_RST (1<<21) ++#define RALINK_UHST_RST (1<<22) ++#define RALINK_ESW_RST (1<<23) ++#define RALINK_EPHY_RST (1<<24) ++#define RALINK_UDEV_RST (1<<25) ++ ++ ++//Clock Conf Register ++#define RALINK_UPHY1_CLK_EN (1<<20) ++#define RALINK_UPHY0_CLK_EN (1<<18) ++#define RALINK_GE1_CLK_EN (1<<16) ++ ++ ++#elif defined (CONFIG_RALINK_RT5350) ++ ++#define RALINK_SYSCTL_BASE 0xB0000000 ++#define RALINK_TIMER_BASE 0xB0000100 ++#define RALINK_INTCL_BASE 0xB0000200 ++#define RALINK_MEMCTRL_BASE 0xB0000300 ++#define RALINK_UART_BASE 0x10000500 ++#define RALINK_PIO_BASE 0xB0000600 ++#define RALINK_I2C_BASE 0xB0000900 ++#define RALINK_I2S_BASE 0xB0000A00 ++#define RALINK_SPI_BASE 0xB0000B00 ++#define RALINK_UART_LITE_BASE 0x10000C00 ++#define RALINK_PCM_BASE 0xB0002000 ++#define RALINK_GDMA_BASE 0xB0002800 ++#define RALINK_FRAME_ENGINE_BASE 0xB0100000 ++#define RALINK_ETH_SW_BASE 0xB0110000 ++#define RALINK_USB_DEV_BASE 0x10120000 ++#define RALINK_11N_MAC_BASE 0xB0180000 ++#define RALINK_USB_HOST_BASE 0x101C0000 ++ ++#define RALINK_MCNT_CFG 0xB0000D00 ++#define RALINK_COMPARE 0xB0000D04 ++#define RALINK_COUNT 0xB0000D08 ++ ++//Interrupt Controller ++#define RALINK_INTCTL_SYSCTL (1<<0) ++#define RALINK_INTCTL_TIMER0 (1<<1) ++#define RALINK_INTCTL_WDTIMER (1<<2) ++#define RALINK_INTCTL_ILL_ACCESS (1<<3) ++#define RALINK_INTCTL_PCM (1<<4) ++#define RALINK_INTCTL_UART (1<<5) ++#define RALINK_INTCTL_PIO (1<<6) ++#define RALINK_INTCTL_DMA (1<<7) ++#define RALINK_INTCTL_PC (1<<9) ++#define RALINK_INTCTL_I2S (1<<10) ++#define RALINK_INTCTL_UARTLITE (1<<12) ++#define RALINK_INTCTL_ESW (1<<17) ++#define RALINK_INTCTL_USB_HOST (1<<18) ++#define RALINK_INTCTL_USB_DEV (1<<19) ++#define RALINK_INTCTL_GLOBAL (1<<31) ++ ++//Reset Control Register ++#define RALINK_SYS_RST (1<<0) ++#define RALINK_TIMER_RST (1<<8) ++#define RALINK_INTC_RST (1<<9) ++#define RALINK_MC_RST (1<<10) ++#define RALINK_PCM_RST (1<<11) ++#define RALINK_UART_RST (1<<12) ++#define RALINK_PIO_RST (1<<13) ++#define RALINK_DMA_RST (1<<14) ++#define RALINK_I2C_RST (1<<16) ++#define RALINK_I2S_RST (1<<17) ++#define RALINK_SPI_RST (1<<18) ++#define RALINK_UARTL_RST (1<<19) ++#define RALINK_WLAN_RST (1<<20) ++#define RALINK_FE_RST (1<<21) ++#define RALINK_UHST_RST (1<<22) ++#define RALINK_ESW_RST (1<<23) ++#define RALINK_EPHY_RST (1<<24) ++#define RALINK_UDEV_RST (1<<25) ++#define RALINK_MIPSC_RST (1<<28) ++ ++//Clock Conf Register ++#define RALINK_UPHY0_CLK_EN (1<<18) ++#define RALINK_GE1_CLK_EN (1<<16) ++ ++#elif defined (CONFIG_RALINK_RT2883) ++ ++#define RALINK_SYSCTL_BASE 0xB0000000 ++#define RALINK_TIMER_BASE 0xB0000100 ++#define RALINK_INTCL_BASE 0xB0000200 ++#define RALINK_MEMCTRL_BASE 0xB0000300 ++#define RALINK_PCM_BASE 0xB0000400 ++#define RALINK_UART_BASE 0x10000500 ++#define RALINK_PIO_BASE 0xB0000600 ++#define RALINK_GDMA_BASE 0xB0000700 ++#define RALINK_NAND_CTRL_BASE 0xB0000800 ++#define RALINK_I2C_BASE 0xB0000900 ++#define RALINK_I2S_BASE 0xB0000A00 ++#define RALINK_SPI_BASE 0xB0000B00 ++#define RALINK_UART_LITE_BASE 0x10000C00 ++#define RALINK_FRAME_ENGINE_BASE 0xB0100000 ++#define RALINK_PCI_BASE 0xB0140000 ++#define RALINK_11N_MAC_BASE 0xB0180000 ++#define RALINK_USB_OTG_BASE 0x101C0000 ++ ++//Interrupt Controller ++#define RALINK_INTCTL_SYSCTL (1<<0) ++#define RALINK_INTCTL_TIMER0 (1<<1) ++#define RALINK_INTCTL_WDTIMER (1<<2) ++#define RALINK_INTCTL_ILL_ACCESS (1<<3) ++#define RALINK_INTCTL_PCM (1<<4) ++#define RALINK_INTCTL_UART (1<<5) ++#define RALINK_INTCTL_PIO (1<<6) ++#define RALINK_INTCTL_DMA (1<<7) ++#define RALINK_INTCTL_NAND (1<<8) ++#define RALINK_INTCTL_PC (1<<9) ++#define RALINK_INTCTL_I2S (1<<10) ++#define RALINK_INTCTL_UARTLITE (1<<12) ++#define RALINK_INTCTL_OTG (1<<18) ++#define RALINK_INTCTL_OTG_IRQN 18 ++#define RALINK_INTCTL_GLOBAL (1<<31) ++ ++//Reset Control Register ++#define RALINK_SYS_RST (1<<0) ++#define RALINK_CPU_RST (1<<1) ++#define RALINK_TIMER_RST (1<<8) ++#define RALINK_INTC_RST (1<<9) ++#define RALINK_MC_RST (1<<10) ++#define RALINK_PCM_RST (1<<11) ++#define RALINK_UART_RST (1<<12) ++#define RALINK_PIO_RST (1<<13) ++#define RALINK_DMA_RST (1<<14) ++#define RALINK_I2C_RST (1<<16) ++#define RALINK_I2S_RST (1<<17) ++#define RALINK_SPI_RST (1<<18) ++#define RALINK_UARTL_RST (1<<19) ++#define RALINK_WLAN_RST (1<<20) ++#define RALINK_FE_RST (1<<21) ++#define RALINK_OTG_RST (1<<22) ++#define RALINK_PCIE_RST (1<<23) ++ ++#elif defined (CONFIG_RALINK_RT3883) ++ ++#define RALINK_SYSCTL_BASE 0xB0000000 ++#define RALINK_TIMER_BASE 0xB0000100 ++#define RALINK_INTCL_BASE 0xB0000200 ++#define RALINK_MEMCTRL_BASE 0xB0000300 ++#define RALINK_UART_BASE 0x10000500 ++#define RALINK_PIO_BASE 0xB0000600 ++#define RALINK_NOR_CTRL_BASE 0xB0000700 ++#define RALINK_NAND_CTRL_BASE 0xB0000810 ++#define RALINK_I2C_BASE 0xB0000900 ++#define RALINK_I2S_BASE 0xB0000A00 ++#define RALINK_SPI_BASE 0xB0000B00 ++#define RALINK_UART_LITE_BASE 0x10000C00 ++#define RALINK_PCM_BASE 0xB0002000 ++#define RALINK_GDMA_BASE 0xB0002800 ++#define RALINK_CODEC1_BASE 0xB0003000 ++#define RALINK_CODEC2_BASE 0xB0003800 ++#define RALINK_FRAME_ENGINE_BASE 0xB0100000 ++#define RALINK_USB_DEV_BASE 0x10120000 ++#define RALINK_PCI_BASE 0xB0140000 ++#define RALINK_11N_MAC_BASE 0xB0180000 ++#define RALINK_USB_HOST_BASE 0x101C0000 ++#define RALINK_PCIE_BASE 0xB0200000 ++ ++//Interrupt Controller ++#define RALINK_INTCTL_SYSCTL (1<<0) ++#define RALINK_INTCTL_TIMER0 (1<<1) ++#define RALINK_INTCTL_WDTIMER (1<<2) ++#define RALINK_INTCTL_ILL_ACCESS (1<<3) ++#define RALINK_INTCTL_PCM (1<<4) ++#define RALINK_INTCTL_UART (1<<5) ++#define RALINK_INTCTL_PIO (1<<6) ++#define RALINK_INTCTL_DMA (1<<7) ++#define RALINK_INTCTL_NAND (1<<8) ++#define RALINK_INTCTL_PC (1<<9) ++#define RALINK_INTCTL_I2S (1<<10) ++#define RALINK_INTCTL_UARTLITE (1<<12) ++#define RALINK_INTCTL_UHST (1<<18) ++#define RALINK_INTCTL_UDEV (1<<19) ++ ++//Reset Control Register ++#define RALINK_SYS_RST (1<<0) ++#define RALINK_TIMER_RST (1<<8) ++#define RALINK_INTC_RST (1<<9) ++#define RALINK_MC_RST (1<<10) ++#define RALINK_PCM_RST (1<<11) ++#define RALINK_UART_RST (1<<12) ++#define RALINK_PIO_RST (1<<13) ++#define RALINK_DMA_RST (1<<14) ++#define RALINK_NAND_RST (1<<15) ++#define RALINK_I2C_RST (1<<16) ++#define RALINK_I2S_RST (1<<17) ++#define RALINK_SPI_RST (1<<18) ++#define RALINK_UARTL_RST (1<<19) ++#define RALINK_WLAN_RST (1<<20) ++#define RALINK_FE_RST (1<<21) ++#define RALINK_UHST_RST (1<<22) ++#define RALINK_PCIE_RST (1<<23) ++#define RALINK_PCI_RST (1<<24) ++#define RALINK_UDEV_RST (1<<25) ++#define RALINK_FLASH_RST (1<<26) ++ ++//Clock Conf Register ++#define RALINK_UPHY1_CLK_EN (1<<20) ++#define RALINK_UPHY0_CLK_EN (1<<18) ++#define RALINK_GE1_CLK_EN (1<<16) ++ ++#elif defined (CONFIG_RALINK_RT6855) ++ ++#define RALINK_SYSCTL_BASE 0xB0000000 ++#define RALINK_TIMER_BASE 0xB0000100 ++#define RALINK_INTCL_BASE 0xB0000200 ++#define RALINK_MEMCTRL_BASE 0xB0000300 ++#define RALINK_UART_BASE 0x10000500 ++#define RALINK_PIO_BASE 0xB0000600 ++#define RALINK_I2C_BASE 0xB0000900 ++#define RALINK_I2S_BASE 0xB0000A00 ++#define RALINK_SPI_BASE 0xB0000B00 ++#define RALINK_NAND_CTRL_BASE 0xB0000800 ++#define RALINK_UART_LITE_BASE 0x10000C00 ++#define RALINK_PCM_BASE 0xB0002000 ++#define RALINK_GDMA_BASE 0xB0002800 ++#define RALINK_FRAME_ENGINE_BASE 0xB0100000 ++#define RALINK_ETH_SW_BASE 0xB0110000 ++#define RALINK_PCI_BASE 0xB0140000 ++#define RALINK_USB_DEV_BASE 0x10120000 ++#define RALINK_11N_MAC_BASE 0xB0180000 ++#define RALINK_USB_HOST_BASE 0x101C0000 ++ ++#define RALINK_MCNT_CFG 0xB0000D00 ++#define RALINK_COMPARE 0xB0000D04 ++#define RALINK_COUNT 0xB0000D08 ++ ++//Interrupt Controller ++#define RALINK_INTCTL_SYSCTL (1<<0) ++#define RALINK_INTCTL_TIMER0 (1<<1) ++#define RALINK_INTCTL_WDTIMER (1<<2) ++#define RALINK_INTCTL_ILL_ACCESS (1<<3) ++#define RALINK_INTCTL_PCM (1<<4) ++#define RALINK_INTCTL_UART (1<<5) ++#define RALINK_INTCTL_PIO (1<<6) ++#define RALINK_INTCTL_DMA (1<<7) ++#define RALINK_INTCTL_PC (1<<9) ++#define RALINK_INTCTL_I2S (1<<10) ++#define RALINK_INTCTL_UARTLITE (1<<12) ++#define RALINK_INTCTL_ESW (1<<17) ++#define RALINK_INTCTL_OTG (1<<18) ++#define RALINK_INTCTL_GLOBAL (1<<31) ++ ++//Reset Control Register ++#define RALINK_SYS_RST (1<<0) ++#define RALINK_TIMER_RST (1<<8) ++#define RALINK_INTC_RST (1<<9) ++#define RALINK_MC_RST (1<<10) ++#define RALINK_PCM_RST (1<<11) ++#define RALINK_UART_RST (1<<12) ++#define RALINK_PIO_RST (1<<13) ++#define RALINK_DMA_RST (1<<14) ++#define RALINK_I2C_RST (1<<16) ++#define RALINK_I2S_RST (1<<17) ++#define RALINK_SPI_RST (1<<18) ++#define RALINK_UARTL_RST (1<<19) ++#define RALINK_FE_RST (1<<21) ++#define RALINK_UHST_RST (1<<22) ++#define RALINK_ESW_RST (1<<23) ++#define RALINK_EPHY_RST (1<<24) ++#define RALINK_UDEV_RST (1<<25) ++#define RALINK_PCIE0_RST (1<<26) ++#define RALINK_PCIE1_RST (1<<27) ++ ++//Clock Conf Register ++#define RALINK_UPHY0_CLK_EN (1<<25) ++#define RALINK_PCIE0_CLK_EN (1<<26) ++#define RALINK_PCIE1_CLK_EN (1<<27) ++ ++ ++#elif defined (CONFIG_RALINK_MT7620) ++ ++#define RALINK_SYSCTL_BASE 0xB0000000 ++#define RALINK_TIMER_BASE 0xB0000100 ++#define RALINK_INTCL_BASE 0xB0000200 ++#define RALINK_MEMCTRL_BASE 0xB0000300 ++#define RALINK_RBUS_MATRIXCTL_BASE 0xB0000400 ++#define RALINK_UART_BASE 0x10000500 ++#define RALINK_PIO_BASE 0xB0000600 ++#define RALINK_NAND_CTRL_BASE 0xB0000810 ++#define RALINK_I2C_BASE 0xB0000900 ++#define RALINK_I2S_BASE 0xB0000A00 ++#define RALINK_SPI_BASE 0xB0000B00 ++#define RALINK_UART_LITE_BASE 0x10000C00 ++#define RALINK_MIPS_CNT_BASE 0x10000D00 ++#define RALINK_PCM_BASE 0xB0002000 ++#define RALINK_GDMA_BASE 0xB0002800 ++#define RALINK_CRYPTO_ENGINE_BASE 0xB0004000 ++#define RALINK_FRAME_ENGINE_BASE 0xB0100000 ++#define RALINK_PPE_BASE 0xB0100C00 ++#define RALINK_ETH_SW_BASE 0xB0110000 ++#define RALINK_USB_DEV_BASE 0x10120000 ++#define RALINK_MSDC_BASE 0xB0130000 ++#define RALINK_PCI_BASE 0xB0140000 ++#define RALINK_11N_MAC_BASE 0xB0180000 ++#define RALINK_USB_HOST_BASE 0x101C0000 ++ ++#define RALINK_MCNT_CFG 0xB0000D00 ++#define RALINK_COMPARE 0xB0000D04 ++#define RALINK_COUNT 0xB0000D08 ++ ++//Interrupt Controller ++#define RALINK_INTCTL_SYSCTL (1<<0) ++#define RALINK_INTCTL_TIMER0 (1<<1) ++#define RALINK_INTCTL_WDTIMER (1<<2) ++#define RALINK_INTCTL_ILL_ACCESS (1<<3) ++#define RALINK_INTCTL_PCM (1<<4) ++#define RALINK_INTCTL_UART (1<<5) ++#define RALINK_INTCTL_PIO (1<<6) ++#define RALINK_INTCTL_DMA (1<<7) ++#define RALINK_INTCTL_PC (1<<9) ++#define RALINK_INTCTL_I2S (1<<10) ++#define RALINK_INTCTL_SPI (1<<11) ++#define RALINK_INTCTL_UARTLITE (1<<12) ++#define RALINK_INTCTL_CRYPTO (1<<13) ++#define RALINK_INTCTL_ESW (1<<17) ++#define RALINK_INTCTL_UHST (1<<18) ++#define RALINK_INTCTL_UDEV (1<<19) ++#define RALINK_INTCTL_GLOBAL (1<<31) ++ ++//Reset Control Register ++#define RALINK_SYS_RST (1<<0) ++#define RALINK_TIMER_RST (1<<8) ++#define RALINK_INTC_RST (1<<9) ++#define RALINK_MC_RST (1<<10) ++#define RALINK_PCM_RST (1<<11) ++#define RALINK_UART_RST (1<<12) ++#define RALINK_PIO_RST (1<<13) ++#define RALINK_DMA_RST (1<<14) ++#define RALINK_I2C_RST (1<<16) ++#define RALINK_I2S_RST (1<<17) ++#define RALINK_SPI_RST (1<<18) ++#define RALINK_UARTL_RST (1<<19) ++#define RALINK_FE_RST (1<<21) ++#define RALINK_UHST_RST (1<<22) ++#define RALINK_ESW_RST (1<<23) ++#define RALINK_EPHY_RST (1<<24) ++#define RALINK_UDEV_RST (1<<25) ++#define RALINK_PCIE0_RST (1<<26) ++#define RALINK_PCIE1_RST (1<<27) ++#define RALINK_MIPS_CNT_RST (1<<28) ++#define RALINK_CRYPTO_RST (1<<29) ++ ++//Clock Conf Register ++#define RALINK_UPHY0_CLK_EN (1<<25) ++#define RALINK_UPHY1_CLK_EN (1<<22) ++#define RALINK_PCIE0_CLK_EN (1<<26) ++#define RALINK_PCIE1_CLK_EN (1<<27) ++ ++//CPU PLL CFG Register ++#define CPLL_SW_CONFIG (0x1UL << 31) ++#define CPLL_MULT_RATIO_SHIFT 16 ++#define CPLL_MULT_RATIO (0x7UL << CPLL_MULT_RATIO_SHIFT) ++#define CPLL_DIV_RATIO_SHIFT 10 ++#define CPLL_DIV_RATIO (0x3UL << CPLL_DIV_RATIO_SHIFT) ++#define BASE_CLOCK 40 /* Mhz */ ++ ++#elif defined (CONFIG_RALINK_MT7621) ++ ++#define RALINK_SYSCTL_BASE 0xBE000000 ++#define RALINK_TIMER_BASE 0xBE000100 ++#define RALINK_INTCL_BASE 0xBE000200 ++#define RALINK_RBUS_MATRIXCTL_BASE 0xBE000400 ++#define RALINK_MIPS_CNT_BASE 0x1E000500 ++#define RALINK_PIO_BASE 0xBE000600 ++#define RALINK_SPDIF_BASE 0xBE000700 ++#define RALINK_I2C_BASE 0xBE000900 ++#define RALINK_I2S_BASE 0xBE000A00 ++#define RALINK_SPI_BASE 0xBE000B00 ++#define RALINK_UART_LITE1_BASE 0x1E000C00 ++#define RALINK_UART_LITE_BASE RALINK_UART_LITE1_BASE ++#define RALINK_UART_LITE2_BASE 0x1E000D00 ++#define RALINK_UART_BASE RALINK_UART_LITE2_BASE ++#define RALINK_UART_LITE3_BASE 0x1E000E00 ++#define RALINK_ANA_CTRL_BASE 0xBE000F00 ++#define RALINK_PCM_BASE 0xBE002000 ++#define RALINK_GDMA_BASE 0xBE002800 ++#define RALINK_NAND_CTRL_BASE 0xBE003000 ++#define RALINK_NANDECC_CTRL_BASE 0xBE003800 ++#define RALINK_CRYPTO_ENGINE_BASE 0xBE004000 ++#define RALINK_MEMCTRL_BASE 0xBE005000 ++#define RALINK_EXT_MC_ARB_BASE 0xBE006000 ++#define RALINK_HS_DMA_BASE 0xBE007000 ++#define RALINK_FRAME_ENGINE_BASE 0xBE100000 ++#define RALINK_PPE_BASE 0xBE100C00 ++#define RALINK_ETH_SW_BASE 0xBE110000 ++#define RALINK_ROM_BASE 0xBE118000 ++#define RALINK_MSDC_BASE 0xBE130000 ++#define RALINK_PCI_BASE 0xBE140000 ++#define RALINK_USB_HOST_BASE 0x1E1C0000 ++#define RALINK_11N_MAC_BASE 0xBE180000 //Unused ++ ++#define RALINK_MCNT_CFG 0xBE000500 ++#define RALINK_COMPARE 0xBE000504 ++#define RALINK_COUNT 0xBE000508 ++ ++//Interrupt Controller ++#define RALINK_INTCTL_FE (1<<3) ++#define RALINK_INTCTL_PCIE0 (1<<4) ++#define RALINK_INTCTL_SYSCTL (1<<6) ++#define RALINK_INTCTL_I2C (1<<8) ++#define RALINK_INTCTL_DRAMC (1<<9) ++#define RALINK_INTCTL_PCM (1<<10) ++#define RALINK_INTCTL_HSDMA (1<<11) ++#define RALINK_INTCTL_PIO (1<<12) ++#define RALINK_INTCTL_DMA (1<<13) ++#define RALINK_INTCTL_NFI (1<<14) ++#define RALINK_INTCTL_NFIECC (1<<15) ++#define RALINK_INTCTL_I2S (1<<16) ++#define RALINK_INTCTL_SPI (1<<17) ++#define RALINK_INTCTL_SPDIF (1<<18) ++#define RALINK_INTCTL_CRYPTO (1<<19) ++#define RALINK_INTCTL_SDXC (1<<20) ++#define RALINK_INTCTL_PCTRL (1<<21) ++#define RALINK_INTCTL_USB (1<<22) ++#define RALINK_INTCTL_SWITCH (1<<23) ++#define RALINK_INTCTL_PCIE1 (1<<24) ++#define RALINK_INTCTL_PCIE2 (1<<25) ++#define RALINK_INTCTL_UART1 (1<<26) ++#define RALINK_INTCTL_UART2 (1<<27) ++#define RALINK_INTCTL_UART3 (1<<28) ++#define RALINK_INTCTL_WDTIMER (1<<29) ++#define RALINK_INTCTL_TIMER0 (1<<30) ++#define RALINK_INTCTL_TIMER1 (1<<31) ++ ++ ++//Reset Control Register ++#define RALINK_SYS_RST (1<<0) ++#define RALINK_MCM_RST (1<<1) ++#define RALINK_HSDMA_RST (1<<2) ++#define RALINK_FE_RST (1<<6) ++#define RALINK_SPDIF_RST (1<<7) ++#define RALINK_TIMER_RST (1<<8) ++#define RALINK_INTC_RST (1<<9) ++#define RALINK_MC_RST (1<<10) ++#define RALINK_PCM_RST (1<<11) ++#define RALINK_PIO_RST (1<<13) ++#define RALINK_DMA_RST (1<<14) ++#define RALINK_NAND_RST (1<<15) ++#define RALINK_I2C_RST (1<<16) ++#define RALINK_I2S_RST (1<<17) ++#define RALINK_SPI_RST (1<<18) ++#define RALINK_UART1_RST (1<<19) ++#define RALINK_UART2_RST (1<<20) ++#define RALINK_UART3_RST (1<<21) ++#define RALINK_ETH_RST (1<<23) ++#define RALINK_PCIE0_RST (1<<24) ++#define RALINK_PCIE1_RST (1<<25) ++#define RALINK_PCIE2_RST (1<<26) ++#define RALINK_AUX_STCK_RST (1<<28) ++#define RALINK_CRYPTO_RST (1<<29) ++#define RALINK_SDXC_RST (1<<30) ++#define RALINK_PPE_RST (1<<31) ++ ++//Clock Conf Register ++#define RALINK_PCIE0_CLK_EN (1<<24) ++#define RALINK_PCIE1_CLK_EN (1<<25) ++#define RALINK_PCIE2_CLK_EN (1<<26) ++//#define RALINK_UPHY0_CLK_EN (1<<27) ++//#define RALINK_UPHY1_CLK_EN (1<<28) ++ ++//CPU PLL CFG Register ++#define CPLL_SW_CONFIG (0x1UL << 31) ++#define CPLL_MULT_RATIO_SHIFT 16 ++#define CPLL_MULT_RATIO (0x7UL << CPLL_MULT_RATIO_SHIFT) ++#define CPLL_DIV_RATIO_SHIFT 10 ++#define CPLL_DIV_RATIO (0x3UL << CPLL_DIV_RATIO_SHIFT) ++#define BASE_CLOCK 40 /* Mhz */ ++ ++#define RALINK_TESTSTAT 0xBE000018 ++#define RALINK_TESTSTAT2 0xBE00001C ++ ++#elif defined (CONFIG_RALINK_MT7628) ++ ++#define RALINK_SYSCTL_BASE 0xB0000000 ++#define RALINK_TIMER_BASE 0xB0000100 ++#define RALINK_INTCL_BASE 0xB0000200 ++#define RALINK_MEMCTRL_BASE 0xB0000300 ++#define RALINK_RBUS_MATRIXCTL_BASE 0xB0000400 ++#define RALINK_MIPS_CNT_BASE 0x10000500 ++#define RALINK_PIO_BASE 0xB0000600 ++#define RALINK_SPI_SLAVE_BASE 0xB0000700 ++#define RALINK_I2C_BASE 0xB0000900 ++#define RALINK_I2S_BASE 0xB0000A00 ++#define RALINK_SPI_BASE 0xB0000B00 ++#define RALINK_UART_LITE1_BASE 0x10000C00 ++#define RALINK_UART_LITE_BASE RALINK_UART_LITE1_BASE ++#define RALINK_UART_LITE2_BASE 0x10000D00 ++#define RALINK_UART_BASE RALINK_UART_LITE2_BASE ++#define RALINK_UART_LITE3_BASE 0x10000E00 ++#define RALINK_PCM_BASE 0xB0002000 ++#define RALINK_GDMA_BASE 0xB0002800 ++#define RALINK_AES_ENGINE_BASE 0xB0004000 ++#define RALINK_CRYPTO_ENGINE_BASE RALINK_AES_ENGINE_BASE ++#define RALINK_FRAME_ENGINE_BASE 0xB0100000 ++#define RALINK_PPE_BASE 0xB0100C00 ++#define RALINK_ETH_SW_BASE 0xB0110000 ++#define RALINK_USB_DEV_BASE 0xB0120000 ++#define RALINK_MSDC_BASE 0xB0130000 ++#define RALINK_PCI_BASE 0xB0140000 ++#define RALINK_11N_MAC_BASE 0xB0180000 ++#define RALINK_USB_HOST_BASE 0x101C0000 ++ ++#define RALINK_MCNT_CFG 0xB0000500 ++#define RALINK_COMPARE 0xB0000504 ++#define RALINK_COUNT 0xB0000508 ++ ++ ++//Interrupt Controller ++#define RALINK_INTCTL_SYSCTL (1<<0) ++#define RALINK_INTCTL_TIMER0 (1<<1) ++#define RALINK_INTCTL_WDTIMER (1<<2) ++#define RALINK_INTCTL_ILL_ACCESS (1<<3) ++#define RALINK_INTCTL_PCM (1<<4) ++#define RALINK_INTCTL_UART (1<<5) ++#define RALINK_INTCTL_PIO (1<<6) ++#define RALINK_INTCTL_DMA (1<<7) ++#define RALINK_INTCTL_PC (1<<9) ++#define RALINK_INTCTL_I2S (1<<10) ++#define RALINK_INTCTL_SPI (1<<11) ++#define RALINK_INTCTL_UARTLITE (1<<12) ++#define RALINK_INTCTL_CRYPTO (1<<13) ++#define RALINK_INTCTL_ESW (1<<17) ++#define RALINK_INTCTL_UHST (1<<18) ++#define RALINK_INTCTL_UDEV (1<<19) ++#define RALINK_INTCTL_GLOBAL (1<<31) ++ ++//Reset Control Register ++#define RALINK_SYS_RST (1<<0) ++#define RALINK_TIMER_RST (1<<8) ++#define RALINK_INTC_RST (1<<9) ++#define RALINK_MC_RST (1<<10) ++#define RALINK_PCM_RST (1<<11) ++#define RALINK_UART_RST (1<<12) ++#define RALINK_PIO_RST (1<<13) ++#define RALINK_DMA_RST (1<<14) ++#define RALINK_I2C_RST (1<<16) ++#define RALINK_I2S_RST (1<<17) ++#define RALINK_SPI_RST (1<<18) ++#define RALINK_UARTL_RST (1<<19) ++#define RALINK_FE_RST (1<<21) ++#define RALINK_UHST_RST (1<<22) ++#define RALINK_ESW_RST (1<<23) ++#define RALINK_EPHY_RST (1<<24) ++#define RALINK_UDEV_RST (1<<25) ++#define RALINK_PCIE0_RST (1<<26) ++#define RALINK_PCIE1_RST (1<<27) ++#define RALINK_MIPS_CNT_RST (1<<28) ++#define RALINK_CRYPTO_RST (1<<29) ++ ++//Clock Conf Register ++#define RALINK_UPHY0_CLK_EN (1<<25) ++#define RALINK_UPHY1_CLK_EN (1<<22) ++#define RALINK_PCIE0_CLK_EN (1<<26) ++#define RALINK_PCIE1_CLK_EN (1<<27) ++ ++//CPU PLL CFG Register ++#define CPLL_SW_CONFIG (0x1UL << 31) ++#define CPLL_MULT_RATIO_SHIFT 16 ++#define CPLL_MULT_RATIO (0x7UL << CPLL_MULT_RATIO_SHIFT) ++#define CPLL_DIV_RATIO_SHIFT 10 ++#define CPLL_DIV_RATIO (0x3UL << CPLL_DIV_RATIO_SHIFT) ++#define BASE_CLOCK 40 /* Mhz */ ++ ++#endif ++#endif +--- /dev/null ++++ b/arch/mips/include/asm/rt2880/serial_rt2880.h +@@ -0,0 +1,443 @@ ++/************************************************************************** ++ * ++ * BRIEF MODULE DESCRIPTION ++ * serial port definition for Ralink RT2880 solution ++ * ++ * Copyright 2007 Ralink Inc. (bruce_chang@ralinktech.com.tw) ++ * ++ * This program is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 2 of the License, or (at your ++ * option) any later version. ++ * ++ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED ++ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN ++ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, ++ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT ++ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF ++ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ++ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF ++ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, write to the Free Software Foundation, Inc., ++ * 675 Mass Ave, Cambridge, MA 02139, USA. ++ * ++ * ++ ************************************************************************** ++ * May 2007 Bruce Chang ++ * ++ * Initial Release ++ * ++ * ++ * ++ ************************************************************************** ++ */ ++ ++#if defined (CONFIG_RALINK_MT7621) || defined (CONFIG_RALINK_MT7628) ++#define RT2880_UART_RBR_OFFSET 0x00 ++#define RT2880_UART_TBR_OFFSET 0x00 ++#define RT2880_UART_IER_OFFSET 0x04 ++#define RT2880_UART_IIR_OFFSET 0x08 ++#define RT2880_UART_FCR_OFFSET 0x08 ++#define RT2880_UART_LCR_OFFSET 0x0C ++#define RT2880_UART_MCR_OFFSET 0x10 ++#define RT2880_UART_LSR_OFFSET 0x14 ++#define RT2880_UART_DLL_OFFSET 0x00 ++#define RT2880_UART_DLM_OFFSET 0x04 ++#else ++#define RT2880_UART_RBR_OFFSET 0x00 ++#define RT2880_UART_TBR_OFFSET 0x04 ++#define RT2880_UART_IER_OFFSET 0x08 ++#define RT2880_UART_IIR_OFFSET 0x0C ++#define RT2880_UART_FCR_OFFSET 0x10 ++#define RT2880_UART_LCR_OFFSET 0x14 ++#define RT2880_UART_MCR_OFFSET 0x18 ++#define RT2880_UART_LSR_OFFSET 0x1C ++#define RT2880_UART_DLL_OFFSET 0x2C ++#define RT2880_UART_DLM_OFFSET 0x30 ++#endif ++ ++#define RBR(x) *(volatile u32 *)((x)+RT2880_UART_RBR_OFFSET) ++#define TBR(x) *(volatile u32 *)((x)+RT2880_UART_TBR_OFFSET) ++#define IER(x) *(volatile u32 *)((x)+RT2880_UART_IER_OFFSET) ++#define IIR(x) *(volatile u32 *)((x)+RT2880_UART_IIR_OFFSET) ++#define FCR(x) *(volatile u32 *)((x)+RT2880_UART_FCR_OFFSET) ++#define LCR(x) *(volatile u32 *)((x)+RT2880_UART_LCR_OFFSET) ++#define MCR(x) *(volatile u32 *)((x)+RT2880_UART_MCR_OFFSET) ++#define LSR(x) *(volatile u32 *)((x)+RT2880_UART_LSR_OFFSET) ++#define DLL(x) *(volatile u32 *)((x)+RT2880_UART_DLL_OFFSET) ++#define DLM(x) *(volatile u32 *)((x)+RT2880_UART_DLM_OFFSET) ++ ++ ++#if defined (CONFIG_RALINK_RT2880) || \ ++ defined (CONFIG_RALINK_RT2883) || \ ++ defined (CONFIG_RALINK_RT3883) || \ ++ defined (CONFIG_RALINK_RT3352) || \ ++ defined (CONFIG_RALINK_RT5350) || \ ++ defined (CONFIG_RALINK_RT6855) || \ ++ defined (CONFIG_RALINK_MT7620) || \ ++ defined (CONFIG_RALINK_RT3052) ++ ++#define UART_RX 0 /* In: Receive buffer (DLAB=0) */ ++ ++#define UART_TX 4 /* Out: Transmit buffer (DLAB=0) */ ++#define UART_TRG 4 /* (LCR=BF) FCTR bit 7 selects Rx or Tx ++ * In: Fifo count ++ * Out: Fifo custom trigger levels ++ * XR16C85x only ++ */ ++ ++#define UART_IER 8 /* Out: Interrupt Enable Register */ ++#define UART_FCTR 8 /* (LCR=BF) Feature Control Register ++ * XR16C85x only ++ */ ++ ++#define UART_IIR 12 /* In: Interrupt ID Register */ ++#define UART_EFR 12 /* I/O: Extended Features Register */ ++ /* (DLAB=1, 16C660 only) */ ++ ++#define UART_FCR 16 /* Out: FIFO Control Register */ ++#define UART_LCR 20 /* Out: Line Control Register */ ++#define UART_MCR 24 /* Out: Modem Control Register */ ++#define UART_LSR 28 /* In: Line Status Register */ ++#define UART_MSR 32 /* In: Modem Status Register */ ++#define UART_SCR 36 /* I/O: Scratch Register */ ++#define UART_DLL 44 /* Out: Divisor Latch Low (DLAB=1) */ ++/* Since surfboard uart cannot be accessed by byte, using UART_DLM will cause ++ * unpredictable values to be written to the Divisor Latch ++ */ ++#define UART_DLM 48 /* Out: Divisor Latch High (DLAB=1) */ ++ ++#else ++ ++#define UART_RX 0 /* In: Receive buffer */ ++#define UART_TX 0 /* Out: Transmit buffer */ ++#define UART_DLL 0 /* Out: Divisor Latch Low */ ++#define UART_TRG 0 /* FCTR bit 7 selects Rx or Tx ++ * In: Fifo count ++ * Out: Fifo custom trigger levels */ ++ ++#define UART_DLM 4 /* Out: Divisor Latch High */ ++#define UART_IER 4 /* Out: Interrupt Enable Register */ ++#define UART_FCTR 4 /* Feature Control Register */ ++ ++#define UART_IIR 8 /* In: Interrupt ID Register */ ++#define UART_FCR 8 /* Out: FIFO Control Register */ ++#define UART_EFR 8 /* I/O: Extended Features Register */ ++ ++#define UART_LCR 12 /* Out: Line Control Register */ ++#define UART_MCR 16 /* Out: Modem Control Register */ ++#define UART_LSR 20 /* In: Line Status Register */ ++#define UART_MSR 24 /* In: Modem Status Register */ ++#define UART_SCR 28 /* I/O: Scratch Register */ ++#define UART_EMSR 28 /* Extended Mode Select Register */ ++ ++#endif ++/* ++ * DLAB=0 ++ */ ++//#define UART_IER 1 /* Out: Interrupt Enable Register */ ++#define UART_IER_MSI 0x08 /* Enable Modem status interrupt */ ++#define UART_IER_RLSI 0x04 /* Enable receiver line status interrupt */ ++#define UART_IER_THRI 0x02 /* Enable Transmitter holding register int. */ ++#define UART_IER_RDI 0x01 /* Enable receiver data interrupt */ ++/* ++ * Sleep mode for ST16650 and TI16750. For the ST16650, EFR[4]=1 ++ */ ++#define UART_IERX_SLEEP 0x10 /* Enable sleep mode */ ++ ++//#define UART_IIR 2 /* In: Interrupt ID Register */ ++#define UART_IIR_NO_INT 0x01 /* No interrupts pending */ ++#define UART_IIR_ID 0x06 /* Mask for the interrupt ID */ ++#define UART_IIR_MSI 0x00 /* Modem status interrupt */ ++#define UART_IIR_THRI 0x02 /* Transmitter holding register empty */ ++#define UART_IIR_RDI 0x04 /* Receiver data interrupt */ ++#define UART_IIR_RLSI 0x06 /* Receiver line status interrupt */ ++ ++//#define UART_FCR 2 /* Out: FIFO Control Register */ ++#define UART_FCR_ENABLE_FIFO 0x01 /* Enable the FIFO */ ++#define UART_FCR_CLEAR_RCVR 0x02 /* Clear the RCVR FIFO */ ++#define UART_FCR_CLEAR_XMIT 0x04 /* Clear the XMIT FIFO */ ++#define UART_FCR_DMA_SELECT 0x08 /* For DMA applications */ ++/* ++ * Note: The FIFO trigger levels are chip specific: ++ * RX:76 = 00 01 10 11 TX:54 = 00 01 10 11 ++ * PC16550D: 1 4 8 14 xx xx xx xx ++ * TI16C550A: 1 4 8 14 xx xx xx xx ++ * TI16C550C: 1 4 8 14 xx xx xx xx ++ * ST16C550: 1 4 8 14 xx xx xx xx ++ * ST16C650: 8 16 24 28 16 8 24 30 PORT_16650V2 ++ * NS16C552: 1 4 8 14 xx xx xx xx ++ * ST16C654: 8 16 56 60 8 16 32 56 PORT_16654 ++ * TI16C750: 1 16 32 56 xx xx xx xx PORT_16750 ++ * TI16C752: 8 16 56 60 8 16 32 56 ++ */ ++#define UART_FCR_R_TRIG_00 0x00 ++#define UART_FCR_R_TRIG_01 0x40 ++#define UART_FCR_R_TRIG_10 0x80 ++#define UART_FCR_R_TRIG_11 0xc0 ++#define UART_FCR_T_TRIG_00 0x00 ++#define UART_FCR_T_TRIG_01 0x10 ++#define UART_FCR_T_TRIG_10 0x20 ++#define UART_FCR_T_TRIG_11 0x30 ++ ++#define UART_FCR_TRIGGER_MASK 0xC0 /* Mask for the FIFO trigger range */ ++#define UART_FCR_TRIGGER_1 0x00 /* Mask for trigger set at 1 */ ++#define UART_FCR_TRIGGER_4 0x40 /* Mask for trigger set at 4 */ ++#define UART_FCR_TRIGGER_8 0x80 /* Mask for trigger set at 8 */ ++#define UART_FCR_TRIGGER_14 0xC0 /* Mask for trigger set at 14 */ ++/* 16650 definitions */ ++#define UART_FCR6_R_TRIGGER_8 0x00 /* Mask for receive trigger set at 1 */ ++#define UART_FCR6_R_TRIGGER_16 0x40 /* Mask for receive trigger set at 4 */ ++#define UART_FCR6_R_TRIGGER_24 0x80 /* Mask for receive trigger set at 8 */ ++#define UART_FCR6_R_TRIGGER_28 0xC0 /* Mask for receive trigger set at 14 */ ++#define UART_FCR6_T_TRIGGER_16 0x00 /* Mask for transmit trigger set at 16 */ ++#define UART_FCR6_T_TRIGGER_8 0x10 /* Mask for transmit trigger set at 8 */ ++#define UART_FCR6_T_TRIGGER_24 0x20 /* Mask for transmit trigger set at 24 */ ++#define UART_FCR6_T_TRIGGER_30 0x30 /* Mask for transmit trigger set at 30 */ ++#define UART_FCR7_64BYTE 0x20 /* Go into 64 byte mode (TI16C750) */ ++ ++//#define UART_LCR 3 /* Out: Line Control Register */ ++/* ++ * Note: if the word length is 5 bits (UART_LCR_WLEN5), then setting ++ * UART_LCR_STOP will select 1.5 stop bits, not 2 stop bits. ++ */ ++#define UART_LCR_DLAB 0x80 /* Divisor latch access bit */ ++#define UART_LCR_SBC 0x40 /* Set break control */ ++#define UART_LCR_SPAR 0x20 /* Stick parity (?) */ ++#define UART_LCR_EPAR 0x10 /* Even parity select */ ++#define UART_LCR_PARITY 0x08 /* Parity Enable */ ++#define UART_LCR_STOP 0x04 /* Stop bits: 0=1 bit, 1=2 bits */ ++#define UART_LCR_WLEN5 0x00 /* Wordlength: 5 bits */ ++#define UART_LCR_WLEN6 0x01 /* Wordlength: 6 bits */ ++#define UART_LCR_WLEN7 0x02 /* Wordlength: 7 bits */ ++#define UART_LCR_WLEN8 0x03 /* Wordlength: 8 bits */ ++ ++//#define UART_MCR 4 /* Out: Modem Control Register */ ++#define UART_MCR_CLKSEL 0x80 /* Divide clock by 4 (TI16C752, EFR[4]=1) */ ++#define UART_MCR_TCRTLR 0x40 /* Access TCR/TLR (TI16C752, EFR[4]=1) */ ++#define UART_MCR_XONANY 0x20 /* Enable Xon Any (TI16C752, EFR[4]=1) */ ++#define UART_MCR_AFE 0x20 /* Enable auto-RTS/CTS (TI16C550C/TI16C750) */ ++#define UART_MCR_LOOP 0x10 /* Enable loopback test mode */ ++#define UART_MCR_OUT2 0x08 /* Out2 complement */ ++#define UART_MCR_OUT1 0x04 /* Out1 complement */ ++#define UART_MCR_RTS 0x02 /* RTS complement */ ++#define UART_MCR_DTR 0x01 /* DTR complement */ ++ ++//#define UART_LSR 5 /* In: Line Status Register */ ++#define UART_LSR_TEMT 0x40 /* Transmitter empty */ ++#define UART_LSR_THRE 0x20 /* Transmit-hold-register empty */ ++#define UART_LSR_BI 0x10 /* Break interrupt indicator */ ++#define UART_LSR_FE 0x08 /* Frame error indicator */ ++#define UART_LSR_PE 0x04 /* Parity error indicator */ ++#define UART_LSR_OE 0x02 /* Overrun error indicator */ ++#define UART_LSR_DR 0x01 /* Receiver data ready */ ++ ++//#define UART_MSR 6 /* In: Modem Status Register */ ++#define UART_MSR_DCD 0x80 /* Data Carrier Detect */ ++#define UART_MSR_RI 0x40 /* Ring Indicator */ ++#define UART_MSR_DSR 0x20 /* Data Set Ready */ ++#define UART_MSR_CTS 0x10 /* Clear to Send */ ++#define UART_MSR_DDCD 0x08 /* Delta DCD */ ++#define UART_MSR_TERI 0x04 /* Trailing edge ring indicator */ ++#define UART_MSR_DDSR 0x02 /* Delta DSR */ ++#define UART_MSR_DCTS 0x01 /* Delta CTS */ ++#define UART_MSR_ANY_DELTA 0x0F /* Any of the delta bits! */ ++ ++//#define UART_SCR 7 /* I/O: Scratch Register */ ++ ++/* ++ * DLAB=1 ++ */ ++//#define UART_DLL 0 /* Out: Divisor Latch Low */ ++//#define UART_DLM 1 /* Out: Divisor Latch High */ ++ ++/* ++ * LCR=0xBF (or DLAB=1 for 16C660) ++ */ ++//#define UART_EFR 2 /* I/O: Extended Features Register */ ++#define UART_EFR_CTS 0x80 /* CTS flow control */ ++#define UART_EFR_RTS 0x40 /* RTS flow control */ ++#define UART_EFR_SCD 0x20 /* Special character detect */ ++#define UART_EFR_ECB 0x10 /* Enhanced control bit */ ++/* ++ * the low four bits control software flow control ++ */ ++ ++/* ++ * LCR=0xBF, TI16C752, ST16650, ST16650A, ST16654 ++ */ ++#define UART_XON1 4 /* I/O: Xon character 1 */ ++#define UART_XON2 5 /* I/O: Xon character 2 */ ++#define UART_XOFF1 6 /* I/O: Xoff character 1 */ ++#define UART_XOFF2 7 /* I/O: Xoff character 2 */ ++ ++/* ++ * EFR[4]=1 MCR[6]=1, TI16C752 ++ */ ++#define UART_TI752_TCR 6 /* I/O: transmission control register */ ++#define UART_TI752_TLR 7 /* I/O: trigger level register */ ++ ++/* ++ * LCR=0xBF, XR16C85x ++ */ ++//#define UART_TRG 0 /* FCTR bit 7 selects Rx or Tx ++// * In: Fifo count ++// * Out: Fifo custom trigger levels */ ++/* ++ * These are the definitions for the Programmable Trigger Register ++ */ ++#define UART_TRG_1 0x01 ++#define UART_TRG_4 0x04 ++#define UART_TRG_8 0x08 ++#define UART_TRG_16 0x10 ++#define UART_TRG_32 0x20 ++#define UART_TRG_64 0x40 ++#define UART_TRG_96 0x60 ++#define UART_TRG_120 0x78 ++#define UART_TRG_128 0x80 ++ ++//#define UART_FCTR 1 /* Feature Control Register */ ++#define UART_FCTR_RTS_NODELAY 0x00 /* RTS flow control delay */ ++#define UART_FCTR_RTS_4DELAY 0x01 ++#define UART_FCTR_RTS_6DELAY 0x02 ++#define UART_FCTR_RTS_8DELAY 0x03 ++#define UART_FCTR_IRDA 0x04 /* IrDa data encode select */ ++#define UART_FCTR_TX_INT 0x08 /* Tx interrupt type select */ ++#define UART_FCTR_TRGA 0x00 /* Tx/Rx 550 trigger table select */ ++#define UART_FCTR_TRGB 0x10 /* Tx/Rx 650 trigger table select */ ++#define UART_FCTR_TRGC 0x20 /* Tx/Rx 654 trigger table select */ ++#define UART_FCTR_TRGD 0x30 /* Tx/Rx 850 programmable trigger select */ ++#define UART_FCTR_SCR_SWAP 0x40 /* Scratch pad register swap */ ++#define UART_FCTR_RX 0x00 /* Programmable trigger mode select */ ++#define UART_FCTR_TX 0x80 /* Programmable trigger mode select */ ++ ++/* ++ * LCR=0xBF, FCTR[6]=1 ++ */ ++//#define UART_EMSR 7 /* Extended Mode Select Register */ ++#define UART_EMSR_FIFO_COUNT 0x01 /* Rx/Tx select */ ++#define UART_EMSR_ALT_COUNT 0x02 /* Alternating count select */ ++ ++/* ++ * The Intel XScale on-chip UARTs define these bits ++ */ ++#define UART_IER_DMAE 0x80 /* DMA Requests Enable */ ++#define UART_IER_UUE 0x40 /* UART Unit Enable */ ++#define UART_IER_NRZE 0x20 /* NRZ coding Enable */ ++#define UART_IER_RTOIE 0x10 /* Receiver Time Out Interrupt Enable */ ++ ++#define UART_IIR_TOD 0x08 /* Character Timeout Indication Detected */ ++ ++#define UART_FCR_PXAR1 0x00 /* receive FIFO treshold = 1 */ ++#define UART_FCR_PXAR8 0x40 /* receive FIFO treshold = 8 */ ++#define UART_FCR_PXAR16 0x80 /* receive FIFO treshold = 16 */ ++#define UART_FCR_PXAR32 0xc0 /* receive FIFO treshold = 32 */ ++ ++ ++ ++ ++/* ++ * These register definitions are for the 16C950 ++ */ ++#define UART_ASR 0x01 /* Additional Status Register */ ++#define UART_RFL 0x03 /* Receiver FIFO level */ ++#define UART_TFL 0x04 /* Transmitter FIFO level */ ++#define UART_ICR 0x05 /* Index Control Register */ ++ ++/* The 16950 ICR registers */ ++#define UART_ACR 0x00 /* Additional Control Register */ ++#define UART_CPR 0x01 /* Clock Prescalar Register */ ++#define UART_TCR 0x02 /* Times Clock Register */ ++#define UART_CKS 0x03 /* Clock Select Register */ ++#define UART_TTL 0x04 /* Transmitter Interrupt Trigger Level */ ++#define UART_RTL 0x05 /* Receiver Interrupt Trigger Level */ ++#define UART_FCL 0x06 /* Flow Control Level Lower */ ++#define UART_FCH 0x07 /* Flow Control Level Higher */ ++#define UART_ID1 0x08 /* ID #1 */ ++#define UART_ID2 0x09 /* ID #2 */ ++#define UART_ID3 0x0A /* ID #3 */ ++#define UART_REV 0x0B /* Revision */ ++#define UART_CSR 0x0C /* Channel Software Reset */ ++#define UART_NMR 0x0D /* Nine-bit Mode Register */ ++#define UART_CTR 0xFF ++ ++/* ++ * The 16C950 Additional Control Reigster ++ */ ++#define UART_ACR_RXDIS 0x01 /* Receiver disable */ ++#define UART_ACR_TXDIS 0x02 /* Receiver disable */ ++#define UART_ACR_DSRFC 0x04 /* DSR Flow Control */ ++#define UART_ACR_TLENB 0x20 /* 950 trigger levels enable */ ++#define UART_ACR_ICRRD 0x40 /* ICR Read enable */ ++#define UART_ACR_ASREN 0x80 /* Additional status enable */ ++ ++ ++ ++/* ++ * These definitions are for the RSA-DV II/S card, from ++ * ++ * Kiyokazu SUTO ++ */ ++ ++#define UART_RSA_BASE (-8) ++ ++#define UART_RSA_MSR ((UART_RSA_BASE) + 0) /* I/O: Mode Select Register */ ++ ++#define UART_RSA_MSR_SWAP (1 << 0) /* Swap low/high 8 bytes in I/O port addr */ ++#define UART_RSA_MSR_FIFO (1 << 2) /* Enable the external FIFO */ ++#define UART_RSA_MSR_FLOW (1 << 3) /* Enable the auto RTS/CTS flow control */ ++#define UART_RSA_MSR_ITYP (1 << 4) /* Level (1) / Edge triger (0) */ ++ ++#define UART_RSA_IER ((UART_RSA_BASE) + 1) /* I/O: Interrupt Enable Register */ ++ ++#define UART_RSA_IER_Rx_FIFO_H (1 << 0) /* Enable Rx FIFO half full int. */ ++#define UART_RSA_IER_Tx_FIFO_H (1 << 1) /* Enable Tx FIFO half full int. */ ++#define UART_RSA_IER_Tx_FIFO_E (1 << 2) /* Enable Tx FIFO empty int. */ ++#define UART_RSA_IER_Rx_TOUT (1 << 3) /* Enable char receive timeout int */ ++#define UART_RSA_IER_TIMER (1 << 4) /* Enable timer interrupt */ ++ ++#define UART_RSA_SRR ((UART_RSA_BASE) + 2) /* IN: Status Read Register */ ++ ++#define UART_RSA_SRR_Tx_FIFO_NEMP (1 << 0) /* Tx FIFO is not empty (1) */ ++#define UART_RSA_SRR_Tx_FIFO_NHFL (1 << 1) /* Tx FIFO is not half full (1) */ ++#define UART_RSA_SRR_Tx_FIFO_NFUL (1 << 2) /* Tx FIFO is not full (1) */ ++#define UART_RSA_SRR_Rx_FIFO_NEMP (1 << 3) /* Rx FIFO is not empty (1) */ ++#define UART_RSA_SRR_Rx_FIFO_NHFL (1 << 4) /* Rx FIFO is not half full (1) */ ++#define UART_RSA_SRR_Rx_FIFO_NFUL (1 << 5) /* Rx FIFO is not full (1) */ ++#define UART_RSA_SRR_Rx_TOUT (1 << 6) /* Character reception timeout occurred (1) */ ++#define UART_RSA_SRR_TIMER (1 << 7) /* Timer interrupt occurred */ ++ ++#define UART_RSA_FRR ((UART_RSA_BASE) + 2) /* OUT: FIFO Reset Register */ ++ ++#define UART_RSA_TIVSR ((UART_RSA_BASE) + 3) /* I/O: Timer Interval Value Set Register */ ++ ++#define UART_RSA_TCR ((UART_RSA_BASE) + 4) /* OUT: Timer Control Register */ ++ ++#define UART_RSA_TCR_SWITCH (1 << 0) /* Timer on */ ++ ++/* ++ * The RSA DSV/II board has two fixed clock frequencies. One is the ++ * standard rate, and the other is 8 times faster. ++ */ ++#define SERIAL_RSA_BAUD_BASE (921600) ++#define SERIAL_RSA_BAUD_BASE_LO (SERIAL_RSA_BAUD_BASE / 8) ++ ++/* ++ * Extra serial register definitions for the internal UARTs ++ * in TI OMAP processors. ++ */ ++#define UART_OMAP_MDR1 0x08 /* Mode definition register */ ++#define UART_OMAP_MDR2 0x09 /* Mode definition register 2 */ ++#define UART_OMAP_SCR 0x10 /* Supplementary control register */ ++#define UART_OMAP_SSR 0x11 /* Supplementary status register */ ++#define UART_OMAP_EBLR 0x12 /* BOF length register */ ++#define UART_OMAP_OSC_12M_SEL 0x13 /* OMAP1510 12MHz osc select */ ++#define UART_OMAP_MVER 0x14 /* Module version register */ ++#define UART_OMAP_SYSC 0x15 /* System configuration register */ ++#define UART_OMAP_SYSS 0x16 /* System status register */ ++ ++ +--- /dev/null ++++ b/arch/mips/include/asm/rt2880/sizes.h +@@ -0,0 +1,52 @@ ++/* ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ++ */ ++/* DO NOT EDIT!! - this file automatically generated ++ * from .s file by awk -f s2h.awk ++ */ ++/* Size definitions ++ * Copyright (C) ARM Limited 1998. All rights reserved. ++ */ ++ ++#ifndef __sizes_h ++#define __sizes_h 1 ++ ++/* handy sizes */ ++#define SZ_1K 0x00000400 ++#define SZ_4K 0x00001000 ++#define SZ_8K 0x00002000 ++#define SZ_16K 0x00004000 ++#define SZ_64K 0x00010000 ++#define SZ_128K 0x00020000 ++#define SZ_256K 0x00040000 ++#define SZ_512K 0x00080000 ++ ++#define SZ_1M 0x00100000 ++#define SZ_2M 0x00200000 ++#define SZ_4M 0x00400000 ++#define SZ_8M 0x00800000 ++#define SZ_16M 0x01000000 ++#define SZ_32M 0x02000000 ++#define SZ_64M 0x04000000 ++#define SZ_128M 0x08000000 ++#define SZ_256M 0x10000000 ++#define SZ_512M 0x20000000 ++ ++#define SZ_1G 0x40000000 ++#define SZ_2G 0x80000000 ++ ++#endif ++ ++/* END */ +--- /dev/null ++++ b/arch/mips/include/asm/rt2880/surfboard.h +@@ -0,0 +1,70 @@ ++/* ++ * Copyright (C) 2001 Palmchip Corporation. All rights reserved. ++ * ++ * ######################################################################## ++ * ++ * This program is free software; you can distribute it and/or modify it ++ * under the terms of the GNU General Public License (Version 2) as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * for more details. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, write to the Free Software Foundation, Inc., ++ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. ++ * ++ * ######################################################################## ++ * ++ */ ++#ifndef _SURFBOARD_H ++#define _SURFBOARD_H ++ ++#include ++ ++ ++ ++/* ++ * Surfboard system clock. ++ * This is the default value and maybe overidden by System Clock passed on the ++ * command line (sysclk=). ++ */ ++#define SURFBOARD_SYSTEM_CLOCK (125000000) ++ ++/* ++ * Surfboard UART base baud rate = System Clock / 16. ++ * Ex. (14.7456 MHZ / 16) = 921600 ++ * (32.0000 MHZ / 16) = 2000000 ++ */ ++#define SURFBOARD_BAUD_DIV (16) ++#define SURFBOARD_BASE_BAUD (SURFBOARD_SYSTEM_CLOCK / SURFBOARD_BAUD_DIV) ++ ++/* ++ * Maximum number of IDE Controllers ++ * Surfboard only has one ide (ide0), so only 2 drives are ++ * possible. (no need to check for more hwifs.) ++ */ ++//#define MAX_IDE_HWIFS (1) /* Surfboard/Wakeboard */ ++#define MAX_IDE_HWIFS (2) /* Graphite board */ ++ ++#define GCMP_BASE_ADDR 0x1fbf8000 ++#define GCMP_ADDRSPACE_SZ (256 * 1024) ++ ++/* ++ * * GIC Specific definitions ++ * */ ++#define GIC_BASE_ADDR 0x1fbc0000 ++#define GIC_ADDRSPACE_SZ (128 * 1024) ++#define MIPS_GIC_IRQ_BASE (MIPS_CPU_IRQ_BASE) ++ ++/* GIC's Nomenclature for Core Interrupt Pins */ ++#define GIC_CPU_INT0 0 /* Core Interrupt 2 */ ++#define GIC_CPU_INT1 1 /* . */ ++#define GIC_CPU_INT2 2 /* . */ ++#define GIC_CPU_INT3 3 /* . */ ++#define GIC_CPU_INT4 4 /* . */ ++#define GIC_CPU_INT5 5 /* Core Interrupt 5 */ ++ ++#endif /* !(_SURFBOARD_H) */ +--- /dev/null ++++ b/arch/mips/include/asm/rt2880/surfboardint.h +@@ -0,0 +1,190 @@ ++/* ++ * Copyright (C) 2001 Palmchip Corporation. All rights reserved. ++ * ++ * ######################################################################## ++ * ++ * This program is free software; you can distribute it and/or modify it ++ * under the terms of the GNU General Public License (Version 2) as ++ * published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope it will be useful, but WITHOUT ++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++ * for more details. ++ * ++ * You should have received a copy of the GNU General Public License along ++ * with this program; if not, write to the Free Software Foundation, Inc., ++ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. ++ * ++ * ######################################################################## ++ * ++ * Defines for the Surfboard interrupt controller. ++ * ++ */ ++#ifndef _SURFBOARDINT_H ++#define _SURFBOARDINT_H ++ ++/* Number of IRQ supported on hw interrupt 0. */ ++#if defined (CONFIG_RALINK_RT2880) ++#define RALINK_CPU_TIMER_IRQ 6 /* mips timer */ ++#define SURFBOARDINT_GPIO 7 /* GPIO */ ++#define SURFBOARDINT_UART1 8 /* UART Lite */ ++#define SURFBOARDINT_UART 9 /* UART */ ++#define SURFBOARDINT_TIMER0 10 /* timer0 */ ++#elif defined (CONFIG_RALINK_RT3052) || defined (CONFIG_RALINK_RT3352) || defined (CONFIG_RALINK_RT2883) || defined (CONFIG_RALINK_RT5350) || defined (CONFIG_RALINK_RT6855) || defined (CONFIG_RALINK_MT7620) ++#define RALINK_CPU_TIMER_IRQ 5 /* mips timer */ ++#define SURFBOARDINT_GPIO 6 /* GPIO */ ++#define SURFBOARDINT_DMA 7 /* DMA */ ++#define SURFBOARDINT_NAND 8 /* NAND */ ++#define SURFBOARDINT_PC 9 /* Performance counter */ ++#define SURFBOARDINT_I2S 10 /* I2S */ ++#define SURFBOARDINT_SDXC 14 /* SDXC */ ++#define SURFBOARDINT_ESW 17 /* ESW */ ++#define SURFBOARDINT_UART1 12 /* UART Lite */ ++#define SURFBOARDINT_CRYPTO 13 /* CryptoEngine */ ++#define SURFBOARDINT_SYSCTL 32 /* SYSCTL */ ++#define SURFBOARDINT_TIMER0 33 /* timer0 */ ++#define SURFBOARDINT_WDG 34 /* watch dog */ ++#define SURFBOARDINT_ILL_ACC 35 /* illegal access */ ++#define SURFBOARDINT_PCM 36 /* PCM */ ++#define SURFBOARDINT_UART 37 /* UART */ ++#define RALINK_INT_PCIE0 13 /* PCIE0 */ ++#define RALINK_INT_PCIE1 14 /* PCIE1 */ ++ ++ ++#elif defined (CONFIG_RALINK_MT7628) ++#define SURFBOARDINT_SYSCTL 0 /* SYSCTL */ ++#define SURFBOARDINT_PCM 4 /* PCM */ ++#define SURFBOARDINT_GPIO 6 /* GPIO */ ++#define SURFBOARDINT_DMA 7 /* DMA */ ++#define SURFBOARDINT_PC 9 /* Performance counter */ ++#define SURFBOARDINT_I2S 10 /* I2S */ ++#define SURFBOARDINT_SPI 11 /* SPI */ ++#define SURFBOARDINT_AES 13 /* AES */ ++#define SURFBOARDINT_CRYPTO 13 /* CryptoEngine */ ++#define SURFBOARDINT_SDXC 14 /* SDXC */ ++#define SURFBOARDINT_ESW 17 /* ESW */ ++#define SURFBOARDINT_USB 18 /* USB */ ++#define SURFBOARDINT_UART_LITE1 20 /* UART Lite */ ++#define SURFBOARDINT_UART_LITE2 21 /* UART Lite */ ++#define SURFBOARDINT_UART_LITE3 22 /* UART Lite */ ++#define SURFBOARDINT_UART1 SURFBOARDINT_UART_LITE1 ++#define SURFBOARDINT_UART SURFBOARDINT_UART_LITE2 ++#define SURFBOARDINT_WDG 23 /* WDG timer */ ++#define SURFBOARDINT_TIMER0 24 /* Timer0 */ ++#define SURFBOARDINT_TIMER1 25 /* Timer1 */ ++#define SURFBOARDINT_ILL_ACC 35 /* illegal access */ ++#define RALINK_INT_PCIE0 2 /* PCIE0 */ ++ ++ ++#elif defined (CONFIG_RALINK_MT7621) ++ ++#define SURFBOARDINT_FE 3 /* FE */ ++#define SURFBOARDINT_PCIE0 4 /* PCIE0 */ ++#define SURFBOARDINT_SYSCTL 6 /* SYSCTL */ ++#define SURFBOARDINT_I2C 8 /* I2C */ ++#define SURFBOARDINT_DRAMC 9 /* DRAMC */ ++#define SURFBOARDINT_PCM 10 /* PCM */ ++#define SURFBOARDINT_HSGDMA 11 /* HSGDMA */ ++#define SURFBOARDINT_GPIO 12 /* GPIO */ ++#define SURFBOARDINT_DMA 13 /* GDMA */ ++#define SURFBOARDINT_NAND 14 /* NAND */ ++#define SURFBOARDINT_NAND_ECC 15 /* NFI ECC */ ++#define SURFBOARDINT_I2S 16 /* I2S */ ++#define SURFBOARDINT_SPI 17 /* SPI */ ++#define SURFBOARDINT_SPDIF 18 /* SPDIF */ ++#define SURFBOARDINT_CRYPTO 19 /* CryptoEngine */ ++#define SURFBOARDINT_SDXC 20 /* SDXC */ ++#define SURFBOARDINT_PCTRL 21 /* Performance counter */ ++#define SURFBOARDINT_USB 22 /* USB */ ++#define SURFBOARDINT_ESW 31 /* Switch */ ++#define SURFBOARDINT_PCIE1 24 /* PCIE1 */ ++#define SURFBOARDINT_PCIE2 25 /* PCIE2 */ ++#define SURFBOARDINT_UART_LITE1 26 /* UART Lite */ ++#define SURFBOARDINT_UART_LITE2 27 /* UART Lite */ ++#define SURFBOARDINT_UART_LITE3 28 /* UART Lite */ ++#define SURFBOARDINT_UART SURFBOARDINT_UART_LITE2 //ttyS0 ++#define SURFBOARDINT_UART1 SURFBOARDINT_UART_LITE1 //ttyS1 ++ ++#define SURFBOARDINT_WDG 29 /* WDG timer */ ++#define SURFBOARDINT_TIMER0 30 /* Timer0 */ ++#define SURFBOARDINT_TIMER1 31 /* Timer1 */ ++ ++#define RALINK_INT_PCIE0 SURFBOARDINT_PCIE0 ++#define RALINK_INT_PCIE1 SURFBOARDINT_PCIE1 ++#define RALINK_INT_PCIE2 SURFBOARDINT_PCIE2 ++ ++#elif defined (CONFIG_RALINK_RT3883) ++#define RALINK_CPU_TIMER_IRQ 5 /* mips timer */ ++#define SURFBOARDINT_GPIO 6 /* GPIO */ ++#define SURFBOARDINT_DMA 7 /* DMA */ ++#define SURFBOARDINT_NAND 8 /* NAND */ ++#define SURFBOARDINT_PC 9 /* Performance counter */ ++#define SURFBOARDINT_I2S 10 /* I2S */ ++#define SURFBOARDINT_UART1 12 /* UART Lite */ ++#define SURFBOARDINT_PCI 18 /* PCI */ ++#define SURFBOARDINT_UDEV 19 /* USB Device */ ++#define SURFBOARDINT_UHST 20 /* USB Host */ ++#define SURFBOARDINT_SYSCTL 32 /* SYSCTL */ ++#define SURFBOARDINT_TIMER0 33 /* timer0 */ ++#define SURFBOARDINT_ILL_ACC 35 /* illegal access */ ++#define SURFBOARDINT_PCM 36 /* PCM */ ++#define SURFBOARDINT_UART 37 /* UART */ ++#endif ++ ++#define SURFBOARDINT_END 64 ++#define RT2880_INTERINT_START 40 ++ ++/* Global interrupt bit definitions */ ++#define C_SURFBOARD_GLOBAL_INT 31 ++#define M_SURFBOARD_GLOBAL_INT (1 << C_SURFBOARD_GLOBAL_INT) ++ ++/* added ??? */ ++#define RALINK_SDRAM_ILL_ACC_ADDR *(volatile u32 *)(RALINK_SYSCTL_BASE + 0x310) ++#define RALINK_SDRAM_ILL_ACC_TYPE *(volatile u32 *)(RALINK_SYSCTL_BASE + 0x314) ++/* end of added, bobtseng */ ++ ++/* ++ * Surfboard registers are memory mapped on 32-bit aligned boundaries and ++ * only word access are allowed. ++ */ ++#if defined (CONFIG_RALINK_MT7621) || defined (CONFIG_RALINK_MT7628) ++#define RALINK_IRQ0STAT (RALINK_INTCL_BASE + 0x9C) //IRQ_STAT ++#define RALINK_IRQ1STAT (RALINK_INTCL_BASE + 0xA0) //FIQ_STAT ++#define RALINK_INTTYPE (RALINK_INTCL_BASE + 0x6C) //FIQ_SEL ++#define RALINK_INTRAW (RALINK_INTCL_BASE + 0xA4) //INT_PURE ++#define RALINK_INTENA (RALINK_INTCL_BASE + 0x80) //IRQ_MASK_SET ++#define RALINK_INTDIS (RALINK_INTCL_BASE + 0x78) //IRQ_MASK_CLR ++#else ++#define RALINK_IRQ0STAT (RALINK_INTCL_BASE + 0x0) ++#define RALINK_IRQ1STAT (RALINK_INTCL_BASE + 0x4) ++#define RALINK_INTTYPE (RALINK_INTCL_BASE + 0x20) ++#define RALINK_INTRAW (RALINK_INTCL_BASE + 0x30) ++#define RALINK_INTENA (RALINK_INTCL_BASE + 0x34) ++#define RALINK_INTDIS (RALINK_INTCL_BASE + 0x38) ++#endif ++ ++/* bobtseng added ++, 2006.3.6. */ ++#define read_32bit_cp0_register(source) \ ++({ int __res; \ ++ __asm__ __volatile__( \ ++ ".set\tpush\n\t" \ ++ ".set\treorder\n\t" \ ++ "mfc0\t%0,"STR(source)"\n\t" \ ++ ".set\tpop" \ ++ : "=r" (__res)); \ ++ __res;}) ++ ++#define write_32bit_cp0_register(register,value) \ ++ __asm__ __volatile__( \ ++ "mtc0\t%0,"STR(register)"\n\t" \ ++ "nop" \ ++ : : "r" (value)); ++ ++/* bobtseng added --, 2006.3.6. */ ++ ++void surfboardint_init(void); ++u32 get_surfboard_sysclk(void); ++ ++ ++#endif /* !(_SURFBOARDINT_H) */ +--- /dev/null ++++ b/arch/mips/include/asm/rt2880/war.h +@@ -0,0 +1,25 @@ ++/* ++ * This file is subject to the terms and conditions of the GNU General Public ++ * License. See the file "COPYING" in the main directory of this archive ++ * for more details. ++ * ++ * Copyright (C) 2002, 2004, 2007 by Ralf Baechle ++ */ ++#ifndef __ASM_MIPS_MACH_MIPS_WAR_H ++#define __ASM_MIPS_MACH_MIPS_WAR_H ++ ++#define R4600_V1_INDEX_ICACHEOP_WAR 0 ++#define R4600_V1_HIT_CACHEOP_WAR 0 ++#define R4600_V2_HIT_CACHEOP_WAR 0 ++#define R5432_CP0_INTERRUPT_WAR 0 ++#define BCM1250_M3_WAR 0 ++#define SIBYTE_1956_WAR 0 ++#define MIPS4K_ICACHE_REFILL_WAR 1 ++#define MIPS_CACHE_SYNC_WAR 1 ++#define TX49XX_ICACHE_INDEX_INV_WAR 0 ++#define RM9000_CDEX_SMP_WAR 0 ++#define ICACHE_REFILLS_WORKAROUND_WAR 1 ++#define R10000_LLSC_WAR 0 ++#define MIPS34K_MISSED_ITLB_WAR 0 ++ ++#endif /* __ASM_MIPS_MACH_MIPS_WAR_H */ diff --git a/target/linux/ramips/patches-3.10/0217-pinmux-rt2880.patch b/target/linux/ramips/patches-3.10/0217-pinmux-rt2880.patch new file mode 100644 index 0000000000..05d95484da --- /dev/null +++ b/target/linux/ramips/patches-3.10/0217-pinmux-rt2880.patch @@ -0,0 +1,88 @@ +Index: linux-3.10.32/arch/mips/ralink/rt288x.c +=================================================================== +--- linux-3.10.32.orig/arch/mips/ralink/rt288x.c 2014-02-22 20:41:54.000000000 +0000 ++++ linux-3.10.32/arch/mips/ralink/rt288x.c 2014-03-18 11:18:06.689596876 +0000 +@@ -17,46 +17,27 @@ + #include + #include + #include ++#include + + #include "common.h" + +-static struct ralink_pinmux_grp mode_mux[] = { +- { +- .name = "i2c", +- .mask = RT2880_GPIO_MODE_I2C, +- .gpio_first = 1, +- .gpio_last = 2, +- }, { +- .name = "spi", +- .mask = RT2880_GPIO_MODE_SPI, +- .gpio_first = 3, +- .gpio_last = 6, +- }, { +- .name = "uartlite", +- .mask = RT2880_GPIO_MODE_UART0, +- .gpio_first = 7, +- .gpio_last = 14, +- }, { +- .name = "jtag", +- .mask = RT2880_GPIO_MODE_JTAG, +- .gpio_first = 17, +- .gpio_last = 21, +- }, { +- .name = "mdio", +- .mask = RT2880_GPIO_MODE_MDIO, +- .gpio_first = 22, +- .gpio_last = 23, +- }, { +- .name = "sdram", +- .mask = RT2880_GPIO_MODE_SDRAM, +- .gpio_first = 24, +- .gpio_last = 39, +- }, { +- .name = "pci", +- .mask = RT2880_GPIO_MODE_PCI, +- .gpio_first = 40, +- .gpio_last = 71, +- }, {0} ++static struct rt2880_pmx_func i2c_func[] = { FUNC("i2c", 0, 1, 2) }; ++static struct rt2880_pmx_func spi_func[] = { FUNC("spi", 0, 3, 6) }; ++static struct rt2880_pmx_func uartlite_func[] = { FUNC("uartlite", 0, 7, 14) }; ++static struct rt2880_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 21) }; ++static struct rt2880_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 23) }; ++static struct rt2880_pmx_func sdram_func[] = { FUNC("sdram", 0, 24, 39) }; ++static struct rt2880_pmx_func pci_func[] = { FUNC("pci", 0, 40, 71) }; ++ ++static struct rt2880_pmx_group rt2880_pinmux_data_act[] = { ++ GRP("i2c", i2c_func, 1, RT2880_GPIO_MODE_I2C), ++ GRP("spi", spi_func, 1, RT2880_GPIO_MODE_SPI), ++ GRP("uartlite", uartlite_func, 1, RT2880_GPIO_MODE_UART0), ++ GRP("jtag", jtag_func, 1, RT2880_GPIO_MODE_JTAG), ++ GRP("mdio", mdio_func, 1, RT2880_GPIO_MODE_MDIO), ++ GRP("sdram", sdram_func, 1, RT2880_GPIO_MODE_SDRAM), ++ GRP("pci", pci_func, 1, RT2880_GPIO_MODE_PCI), ++ { 0 } + }; + + static void rt288x_wdt_reset(void) +@@ -69,11 +50,6 @@ + rt_sysc_w32(t, SYSC_REG_CLKCFG); + } + +-struct ralink_pinmux rt_gpio_pinmux = { +- .mode = mode_mux, +- .wdt_reset = rt288x_wdt_reset, +-}; +- + void __init ralink_clk_init(void) + { + unsigned long cpu_rate; +@@ -140,4 +116,6 @@ + soc_info->mem_base = RT2880_SDRAM_BASE; + soc_info->mem_size_min = RT2880_MEM_SIZE_MIN; + soc_info->mem_size_max = RT2880_MEM_SIZE_MAX; ++ ++ rt2880_pinmux_data = rt2880_pinmux_data_act; + } diff --git a/target/linux/ramips/patches-3.10/0250-nand-7620.patch b/target/linux/ramips/patches-3.10/0250-nand-7620.patch deleted file mode 100644 index 248e82c29e..0000000000 --- a/target/linux/ramips/patches-3.10/0250-nand-7620.patch +++ /dev/null @@ -1,2409 +0,0 @@ -From a5fc495c8dc199ffa997d43331693a5b7ee07270 Mon Sep 17 00:00:00 2001 -From: John Crispin -Date: Sun, 17 Nov 2013 17:41:46 +0100 -Subject: [PATCH] ralink: add mt7620 nand driver - -Signed-off-by: John Crispin ---- - drivers/mtd/maps/Kconfig | 4 + - drivers/mtd/maps/Makefile | 2 + - drivers/mtd/maps/ralink_nand.c | 2136 ++++++++++++++++++++++++++++++++++++++++ - drivers/mtd/maps/ralink_nand.h | 232 +++++ - drivers/mtd/nand/Makefile | 2 +- - 5 files changed, 2375 insertions(+), 1 deletion(-) - create mode 100644 drivers/mtd/maps/ralink_nand.c - create mode 100644 drivers/mtd/maps/ralink_nand.h - ---- a/drivers/mtd/maps/Kconfig -+++ b/drivers/mtd/maps/Kconfig -@@ -424,4 +424,8 @@ config MTD_LATCH_ADDR - - If compiled as a module, it will be called latch-addr-flash. - -+config MTD_NAND_MT7620 -+ tristate "Support for NAND on Mediatek MT7620" -+ depends on RALINK && SOC_MT7620 -+ - endmenu ---- a/drivers/mtd/maps/Makefile -+++ b/drivers/mtd/maps/Makefile -@@ -46,3 +46,5 @@ obj-$(CONFIG_MTD_VMU) += vmu-flash.o - obj-$(CONFIG_MTD_GPIO_ADDR) += gpio-addr-flash.o - obj-$(CONFIG_MTD_LATCH_ADDR) += latch-addr-flash.o - obj-$(CONFIG_MTD_LANTIQ) += lantiq-flash.o -+obj-$(CONFIG_MTD_NAND_MT7620) += ralink_nand.o -+ ---- /dev/null -+++ b/drivers/mtd/maps/ralink_nand.c -@@ -0,0 +1,2136 @@ -+#define DEBUG -+#include -+#undef DEBUG -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "ralink_nand.h" -+#ifdef RANDOM_GEN_BAD_BLOCK -+#include -+#endif -+ -+#define LARGE_MTD_BOOT_PART_SIZE (CFG_BLOCKSIZE<<2) -+#define LARGE_MTD_CONFIG_PART_SIZE (CFG_BLOCKSIZE<<2) -+#define LARGE_MTD_FACTORY_PART_SIZE (CFG_BLOCKSIZE<<1) -+ -+ -+#define BLOCK_ALIGNED(a) ((a) & (CFG_BLOCKSIZE - 1)) -+ -+#define READ_STATUS_RETRY 1000 -+ -+struct mtd_info *ranfc_mtd = NULL; -+ -+int skipbbt = 0; -+int ranfc_debug = 1; -+static int ranfc_bbt = 1; -+#if defined (WORKAROUND_RX_BUF_OV) -+static int ranfc_verify = 1; -+#endif -+static u32 nand_addrlen; -+ -+#if 0 -+module_param(ranfc_debug, int, 0644); -+module_param(ranfc_bbt, int, 0644); -+module_param(ranfc_verify, int, 0644); -+#endif -+ -+#if 0 -+#define ra_dbg(args...) do { if (ranfc_debug) printk(args); } while(0) -+#else -+#define ra_dbg(args...) -+#endif -+ -+#define CLEAR_INT_STATUS() ra_outl(NFC_INT_ST, ra_inl(NFC_INT_ST)) -+#define NFC_TRANS_DONE() (ra_inl(NFC_INT_ST) & INT_ST_ND_DONE) -+ -+int is_nand_page_2048 = 0; -+const unsigned int nand_size_map[2][3] = {{25, 30, 30}, {20, 27, 30}}; -+ -+static int nfc_wait_ready(int snooze_ms); -+ -+static const char * const mtk_probe_types[] = { "cmdlinepart", "ofpart", NULL }; -+ -+/** -+ * reset nand chip -+ */ -+static int nfc_chip_reset(void) -+{ -+ int status; -+ -+ //ra_dbg("%s:\n", __func__); -+ -+ // reset nand flash -+ ra_outl(NFC_CMD1, 0x0); -+ ra_outl(NFC_CMD2, 0xff); -+ ra_outl(NFC_ADDR, 0x0); -+ ra_outl(NFC_CONF, 0x0411); -+ -+ status = nfc_wait_ready(5); //erase wait 5us -+ if (status & NAND_STATUS_FAIL) { -+ printk("%s: fail \n", __func__); -+ } -+ -+ return (int)(status & NAND_STATUS_FAIL); -+ -+} -+ -+ -+ -+/** -+ * clear NFC and flash chip. -+ */ -+static int nfc_all_reset(void) -+{ -+ int retry; -+ -+ ra_dbg("%s: \n", __func__); -+ -+ // reset controller -+ ra_outl(NFC_CTRL, ra_inl(NFC_CTRL) | 0x02); //clear data buffer -+ ra_outl(NFC_CTRL, ra_inl(NFC_CTRL) & ~0x02); //clear data buffer -+ -+ CLEAR_INT_STATUS(); -+ -+ retry = READ_STATUS_RETRY; -+ while ((ra_inl(NFC_INT_ST) & 0x02) != 0x02 && retry--); -+ if (retry <= 0) { -+ printk("nfc_all_reset: clean buffer fail \n"); -+ return -1; -+ } -+ -+ retry = READ_STATUS_RETRY; -+ while ((ra_inl(NFC_STATUS) & 0x1) != 0x0 && retry--) { //fixme, controller is busy ? -+ udelay(1); -+ } -+ -+ nfc_chip_reset(); -+ -+ return 0; -+} -+ -+/** NOTICE: only called by nfc_wait_ready(). -+ * @return -1, nfc can not get transction done -+ * @return 0, ok. -+ */ -+static int _nfc_read_status(char *status) -+{ -+ unsigned long cmd1, conf; -+ int int_st, nfc_st; -+ int retry; -+ -+ cmd1 = 0x70; -+ conf = 0x000101 | (1 << 20); -+ -+ //fixme, should we check nfc status? -+ CLEAR_INT_STATUS(); -+ -+ ra_outl(NFC_CMD1, cmd1); -+ ra_outl(NFC_CONF, conf); -+ -+ /* FIXME, -+ * 1. since we have no wired ready signal, directly -+ * calling this function is not gurantee to read right status under ready state. -+ * 2. the other side, we can not determine how long to become ready, this timeout retry is nonsense. -+ * 3. SUGGESTION: call nfc_read_status() from nfc_wait_ready(), -+ * that is aware about caller (in sementics) and has snooze plused nfc ND_DONE. -+ */ -+ retry = READ_STATUS_RETRY; -+ do { -+ nfc_st = ra_inl(NFC_STATUS); -+ int_st = ra_inl(NFC_INT_ST); -+ -+ ndelay(10); -+ } while (!(int_st & INT_ST_RX_BUF_RDY) && retry--); -+ -+ if (!(int_st & INT_ST_RX_BUF_RDY)) { -+ printk("nfc_read_status: NFC fail, int_st(%x), retry:%x. nfc:%x, reset nfc and flash. \n", -+ int_st, retry, nfc_st); -+ nfc_all_reset(); -+ *status = NAND_STATUS_FAIL; -+ return -1; -+ } -+ -+ *status = (char)(le32_to_cpu(ra_inl(NFC_DATA)) & 0x0ff); -+ return 0; -+} -+ -+/** -+ * @return !0, chip protect. -+ * @return 0, chip not protected. -+ */ -+static int nfc_check_wp(void) -+{ -+ /* Check the WP bit */ -+#if !defined CONFIG_NOT_SUPPORT_WP -+ return !!(ra_inl(NFC_CTRL) & 0x01); -+#else -+ char result = 0; -+ int ret; -+ -+ ret = _nfc_read_status(&result); -+ //FIXME, if ret < 0 -+ -+ return !(result & NAND_STATUS_WP); -+#endif -+} -+ -+#if !defined CONFIG_NOT_SUPPORT_RB -+/* -+ * @return !0, chip ready. -+ * @return 0, chip busy. -+ */ -+static int nfc_device_ready(void) -+{ -+ /* Check the ready */ -+ return !!(ra_inl(NFC_STATUS) & 0x04); -+} -+#endif -+ -+ -+/** -+ * generic function to get data from flash. -+ * @return data length reading from flash. -+ */ -+static int _ra_nand_pull_data(char *buf, int len, int use_gdma) -+{ -+#ifdef RW_DATA_BY_BYTE -+ char *p = buf; -+#else -+ __u32 *p = (__u32 *)buf; -+#endif -+ int retry, int_st; -+ unsigned int ret_data; -+ int ret_size; -+ -+ // receive data by use_gdma -+ if (use_gdma) { -+ //if (_ra_nand_dma_pull((unsigned long)p, len)) { -+ if (1) { -+ printk("%s: fail \n", __func__); -+ len = -1; //return error -+ } -+ -+ return len; -+ } -+ -+ //fixme: retry count size? -+ retry = READ_STATUS_RETRY; -+ // no gdma -+ while (len > 0) { -+ int_st = ra_inl(NFC_INT_ST); -+ if (int_st & INT_ST_RX_BUF_RDY) { -+ -+ ret_data = ra_inl(NFC_DATA); -+ ra_outl(NFC_INT_ST, INT_ST_RX_BUF_RDY); -+#ifdef RW_DATA_BY_BYTE -+ ret_size = sizeof(unsigned int); -+ ret_size = min(ret_size, len); -+ len -= ret_size; -+ while (ret_size-- > 0) { -+ //nfc is little endian -+ *p++ = ret_data & 0x0ff; -+ ret_data >>= 8; -+ } -+#else -+ ret_size = min(len, 4); -+ len -= ret_size; -+ if (ret_size == 4) -+ *p++ = ret_data; -+ else { -+ __u8 *q = (__u8 *)p; -+ while (ret_size-- > 0) { -+ *q++ = ret_data & 0x0ff; -+ ret_data >>= 8; -+ } -+ p = (__u32 *)q; -+ } -+#endif -+ retry = READ_STATUS_RETRY; -+ } -+ else if (int_st & INT_ST_ND_DONE) { -+ break; -+ } -+ else { -+ udelay(1); -+ if (retry-- < 0) -+ break; -+ } -+ } -+ -+#ifdef RW_DATA_BY_BYTE -+ return (int)(p - buf); -+#else -+ return ((int)p - (int)buf); -+#endif -+} -+ -+/** -+ * generic function to put data into flash. -+ * @return data length writing into flash. -+ */ -+static int _ra_nand_push_data(char *buf, int len, int use_gdma) -+{ -+#ifdef RW_DATA_BY_BYTE -+ char *p = buf; -+#else -+ __u32 *p = (__u32 *)buf; -+#endif -+ int retry, int_st; -+ unsigned int tx_data = 0; -+ int tx_size, iter = 0; -+ -+ // receive data by use_gdma -+ if (use_gdma) { -+ //if (_ra_nand_dma_push((unsigned long)p, len)) -+ if (1) -+ len = 0; -+ printk("%s: fail \n", __func__); -+ return len; -+ } -+ -+ // no gdma -+ retry = READ_STATUS_RETRY; -+ while (len > 0) { -+ int_st = ra_inl(NFC_INT_ST); -+ if (int_st & INT_ST_TX_BUF_RDY) { -+#ifdef RW_DATA_BY_BYTE -+ tx_size = min(len, (int)sizeof(unsigned long)); -+ for (iter = 0; iter < tx_size; iter++) { -+ tx_data |= (*p++ << (8*iter)); -+ } -+#else -+ tx_size = min(len, 4); -+ if (tx_size == 4) -+ tx_data = (*p++); -+ else { -+ __u8 *q = (__u8 *)p; -+ for (iter = 0; iter < tx_size; iter++) -+ tx_data |= (*q++ << (8*iter)); -+ p = (__u32 *)q; -+ } -+#endif -+ ra_outl(NFC_INT_ST, INT_ST_TX_BUF_RDY); -+ ra_outl(NFC_DATA, tx_data); -+ len -= tx_size; -+ retry = READ_STATUS_RETRY; -+ } -+ else if (int_st & INT_ST_ND_DONE) { -+ break; -+ } -+ else { -+ udelay(1); -+ if (retry-- < 0) { -+ ra_dbg("%s p:%p buf:%p \n", __func__, p, buf); -+ break; -+ } -+ } -+ } -+ -+ -+#ifdef RW_DATA_BY_BYTE -+ return (int)(p - buf); -+#else -+ return ((int)p - (int)buf); -+#endif -+ -+} -+ -+static int nfc_select_chip(struct ra_nand_chip *ra, int chipnr) -+{ -+#if (CONFIG_NUMCHIPS == 1) -+ if (!(chipnr < CONFIG_NUMCHIPS)) -+ return -1; -+ return 0; -+#else -+ BUG(); -+#endif -+} -+ -+/** @return -1: chip_select fail -+ * 0 : both CE and WP==0 are OK -+ * 1 : CE OK and WP==1 -+ */ -+static int nfc_enable_chip(struct ra_nand_chip *ra, unsigned int offs, int read_only) -+{ -+ int chipnr = offs >> ra->chip_shift; -+ -+ ra_dbg("%s: offs:%x read_only:%x \n", __func__, offs, read_only); -+ -+ chipnr = nfc_select_chip(ra, chipnr); -+ if (chipnr < 0) { -+ printk("%s: chip select error, offs(%x)\n", __func__, offs); -+ return -1; -+ } -+ -+ if (!read_only) -+ return nfc_check_wp(); -+ -+ return 0; -+} -+ -+/** wait nand chip becomeing ready and return queried status. -+ * @param snooze: sleep time in ms unit before polling device ready. -+ * @return status of nand chip -+ * @return NAN_STATUS_FAIL if something unexpected. -+ */ -+static int nfc_wait_ready(int snooze_ms) -+{ -+ int retry; -+ char status; -+ -+ // wait nfc idle, -+ if (snooze_ms == 0) -+ snooze_ms = 1; -+ else -+ schedule_timeout(snooze_ms * HZ / 1000); -+ -+ snooze_ms = retry = snooze_ms *1000000 / 100 ; // ndelay(100) -+ -+ while (!NFC_TRANS_DONE() && retry--) { -+ if (!cond_resched()) -+ ndelay(100); -+ } -+ -+ if (!NFC_TRANS_DONE()) { -+ printk("nfc_wait_ready: no transaction done \n"); -+ return NAND_STATUS_FAIL; -+ } -+ -+#if !defined (CONFIG_NOT_SUPPORT_RB) -+ //fixme -+ while(!(status = nfc_device_ready()) && retry--) { -+ ndelay(100); -+ } -+ -+ if (status == 0) { -+ printk("nfc_wait_ready: no device ready. \n"); -+ return NAND_STATUS_FAIL; -+ } -+ -+ _nfc_read_status(&status); -+ return status; -+#else -+ -+ while(retry--) { -+ _nfc_read_status(&status); -+ if (status & NAND_STATUS_READY) -+ break; -+ ndelay(100); -+ } -+ if (retry<0) -+ printk("nfc_wait_ready 2: no device ready, status(%x). \n", status); -+ -+ return status; -+#endif -+} -+ -+/** -+ * return 0: erase OK -+ * return -EIO: fail -+ */ -+int nfc_erase_block(struct ra_nand_chip *ra, int row_addr) -+{ -+ unsigned long cmd1, cmd2, bus_addr, conf; -+ char status; -+ -+ cmd1 = 0x60; -+ cmd2 = 0xd0; -+ bus_addr = row_addr; -+ conf = 0x00511 | ((CFG_ROW_ADDR_CYCLE)<<16); -+ -+ // set NFC -+ ra_dbg("%s: cmd1: %lx, cmd2:%lx bus_addr: %lx, conf: %lx \n", -+ __func__, cmd1, cmd2, bus_addr, conf); -+ -+ //fixme, should we check nfc status? -+ CLEAR_INT_STATUS(); -+ -+ ra_outl(NFC_CMD1, cmd1); -+ ra_outl(NFC_CMD2, cmd2); -+ ra_outl(NFC_ADDR, bus_addr); -+ ra_outl(NFC_CONF, conf); -+ -+ status = nfc_wait_ready(3); //erase wait 3ms -+ if (status & NAND_STATUS_FAIL) { -+ printk("%s: fail \n", __func__); -+ return -EIO; -+ } -+ -+ return 0; -+ -+} -+ -+static inline int _nfc_read_raw_data(int cmd1, int cmd2, int bus_addr, int bus_addr2, int conf, char *buf, int len, int flags) -+{ -+ int ret; -+ -+ CLEAR_INT_STATUS(); -+ ra_outl(NFC_CMD1, cmd1); -+ ra_outl(NFC_CMD2, cmd2); -+ ra_outl(NFC_ADDR, bus_addr); -+#if defined (CONFIG_RALINK_RT6855) || defined (CONFIG_RALINK_RT6855A) || \ -+ defined (CONFIG_RALINK_MT7620) || defined (CONFIG_RALINK_MT7621) -+ ra_outl(NFC_ADDR2, bus_addr2); -+#endif -+ ra_outl(NFC_CONF, conf); -+ -+ ret = _ra_nand_pull_data(buf, len, 0); -+ if (ret != len) { -+ ra_dbg("%s: ret:%x (%x) \n", __func__, ret, len); -+ return NAND_STATUS_FAIL; -+ } -+ -+ //FIXME, this section is not necessary -+ ret = nfc_wait_ready(0); //wait ready -+ /* to prevent the DATA FIFO 's old data from next operation */ -+ ra_outl(NFC_CTRL, ra_inl(NFC_CTRL) | 0x02); //clear data buffer -+ ra_outl(NFC_CTRL, ra_inl(NFC_CTRL) & ~0x02); //clear data buffer -+ -+ if (ret & NAND_STATUS_FAIL) { -+ printk("%s: fail \n", __func__); -+ return NAND_STATUS_FAIL; -+ } -+ -+ return 0; -+} -+ -+static inline int _nfc_write_raw_data(int cmd1, int cmd3, int bus_addr, int bus_addr2, int conf, char *buf, int len, int flags) -+{ -+ int ret; -+ -+ CLEAR_INT_STATUS(); -+ ra_outl(NFC_CMD1, cmd1); -+ ra_outl(NFC_CMD3, cmd3); -+ ra_outl(NFC_ADDR, bus_addr); -+#if defined (CONFIG_RALINK_RT6855) || defined (CONFIG_RALINK_RT6855A) || \ -+ defined (CONFIG_RALINK_MT7620) || defined (CONFIG_RALINK_MT7621) -+ ra_outl(NFC_ADDR2, bus_addr2); -+#endif -+ ra_outl(NFC_CONF, conf); -+ -+ ret = _ra_nand_push_data(buf, len, 0); -+ if (ret != len) { -+ ra_dbg("%s: ret:%x (%x) \n", __func__, ret, len); -+ return NAND_STATUS_FAIL; -+ } -+ -+ ret = nfc_wait_ready(1); //write wait 1ms -+ /* to prevent the DATA FIFO 's old data from next operation */ -+ ra_outl(NFC_CTRL, ra_inl(NFC_CTRL) | 0x02); //clear data buffer -+ ra_outl(NFC_CTRL, ra_inl(NFC_CTRL) & ~0x02); //clear data buffer -+ -+ if (ret & NAND_STATUS_FAIL) { -+ printk("%s: fail \n", __func__); -+ return NAND_STATUS_FAIL; -+ } -+ -+ return 0; -+} -+ -+/** -+ * @return !0: fail -+ * @return 0: OK -+ */ -+int nfc_read_oob(struct ra_nand_chip *ra, int page, unsigned int offs, char *buf, int len, int flags) -+{ -+ unsigned int cmd1 = 0, cmd2 = 0, conf = 0; -+ unsigned int bus_addr = 0, bus_addr2 = 0; -+ unsigned int ecc_en; -+ int use_gdma; -+ int status; -+ -+ int pages_perblock = 1<<(ra->erase_shift - ra->page_shift); -+ // constrain of nfc read function -+ -+#if defined (WORKAROUND_RX_BUF_OV) -+ BUG_ON (len > 60); //problem of rx-buffer overrun -+#endif -+ BUG_ON (offs >> ra->oob_shift); //page boundry -+ BUG_ON ((unsigned int)(((offs + len) >> ra->oob_shift) + page) > -+ ((page + pages_perblock) & ~(pages_perblock-1))); //block boundry -+ -+ use_gdma = flags & FLAG_USE_GDMA; -+ ecc_en = flags & FLAG_ECC_EN; -+ bus_addr = (page << (CFG_COLUMN_ADDR_CYCLE*8)) | (offs & ((1<> (CFG_COLUMN_ADDR_CYCLE*8); -+ cmd1 = 0x0; -+ cmd2 = 0x30; -+ conf = 0x000511| ((CFG_ADDR_CYCLE)<<16) | (len << 20); -+ } -+ else { -+ cmd1 = 0x50; -+ conf = 0x000141| ((CFG_ADDR_CYCLE)<<16) | (len << 20); -+ } -+ if (ecc_en) -+ conf |= (1<<3); -+ if (use_gdma) -+ conf |= (1<<2); -+ -+ ra_dbg("%s: cmd1:%x, bus_addr:%x, conf:%x, len:%x, flag:%x\n", -+ __func__, cmd1, bus_addr, conf, len, flags); -+ -+ status = _nfc_read_raw_data(cmd1, cmd2, bus_addr, bus_addr2, conf, buf, len, flags); -+ if (status & NAND_STATUS_FAIL) { -+ printk("%s: fail\n", __func__); -+ return -EIO; -+ } -+ -+ return 0; -+} -+ -+/** -+ * @return !0: fail -+ * @return 0: OK -+ */ -+int nfc_write_oob(struct ra_nand_chip *ra, int page, unsigned int offs, char *buf, int len, int flags) -+{ -+ unsigned int cmd1 = 0, cmd3=0, conf = 0; -+ unsigned int bus_addr = 0, bus_addr2 = 0; -+ int use_gdma; -+ int status; -+ -+ int pages_perblock = 1<<(ra->erase_shift - ra->page_shift); -+ // constrain of nfc read function -+ -+ BUG_ON (offs >> ra->oob_shift); //page boundry -+ BUG_ON ((unsigned int)(((offs + len) >> ra->oob_shift) + page) > -+ ((page + pages_perblock) & ~(pages_perblock-1))); //block boundry -+ -+ use_gdma = flags & FLAG_USE_GDMA; -+ bus_addr = (page << (CFG_COLUMN_ADDR_CYCLE*8)) | (offs & ((1<> (CFG_COLUMN_ADDR_CYCLE*8); -+ conf = 0x001123 | ((CFG_ADDR_CYCLE)<<16) | ((len) << 20); -+ } -+ else { -+ cmd1 = 0x08050; -+ cmd3 = 0x10; -+ conf = 0x001223 | ((CFG_ADDR_CYCLE)<<16) | ((len) << 20); -+ } -+ if (use_gdma) -+ conf |= (1<<2); -+ -+ // set NFC -+ ra_dbg("%s: cmd1: %x, cmd3: %x bus_addr: %x, conf: %x, len:%x\n", -+ __func__, cmd1, cmd3, bus_addr, conf, len); -+ -+ status = _nfc_write_raw_data(cmd1, cmd3, bus_addr, bus_addr2, conf, buf, len, flags); -+ if (status & NAND_STATUS_FAIL) { -+ printk("%s: fail \n", __func__); -+ return -EIO; -+ } -+ -+ return 0; -+} -+ -+ -+int nfc_read_page(struct ra_nand_chip *ra, char *buf, int page, int flags); -+int nfc_write_page(struct ra_nand_chip *ra, char *buf, int page, int flags); -+ -+ -+#if !defined (WORKAROUND_RX_BUF_OV) -+static int one_bit_correction(char *ecc, char *expected, int *bytes, int *bits); -+int nfc_ecc_verify(struct ra_nand_chip *ra, char *buf, int page, int mode) -+{ -+ int ret, i; -+ char *p, *e; -+ int ecc; -+ -+ //ra_dbg("%s, page:%x mode:%d\n", __func__, page, mode); -+ -+ if (mode == FL_WRITING) { -+ int len = CFG_PAGESIZE + CFG_PAGE_OOBSIZE; -+ int conf = 0x000141| ((CFG_ADDR_CYCLE)<<16) | (len << 20); -+ conf |= (1<<3); //(ecc_en) -+ //conf |= (1<<2); // (use_gdma) -+ -+ p = ra->readback_buffers; -+ ret = nfc_read_page(ra, ra->readback_buffers, page, FLAG_ECC_EN); -+ if (ret == 0) -+ goto ecc_check; -+ -+ //FIXME, double comfirm -+ printk("%s: read back fail, try again \n",__func__); -+ ret = nfc_read_page(ra, ra->readback_buffers, page, FLAG_ECC_EN); -+ if (ret != 0) { -+ printk("\t%s: read back fail agian \n",__func__); -+ goto bad_block; -+ } -+ } -+ else if (mode == FL_READING) { -+ p = buf; -+ } -+ else -+ return -2; -+ -+ecc_check: -+ p += CFG_PAGESIZE; -+ if (!is_nand_page_2048) { -+ ecc = ra_inl(NFC_ECC); -+ if (ecc == 0) //clean page. -+ return 0; -+ e = (char*)&ecc; -+ for (i=0; ireadback_buffers, page, FLAG_NONE); -+ if (ret != 0) //double comfirm -+ ret = nfc_read_page(ra, ra->readback_buffers, page, FLAG_NONE); -+ -+ if (ret != 0) { -+ printk("%s: mode:%x read back fail \n", __func__, mode); -+ return -1; -+ } -+ return memcmp(buf, ra->readback_buffers, 1<page_shift); -+ } -+ -+ if (mode == FL_READING) { -+#if 0 -+ if (ra->sandbox_page == 0) -+ return 0; -+ -+ ret = nfc_write_page(ra, buf, ra->sandbox_page, FLAG_USE_GDMA | FLAG_ECC_EN); -+ if (ret != 0) { -+ printk("%s, fail write sandbox_page \n", __func__); -+ return -1; -+ } -+#else -+ /** @note: -+ * The following command is actually not 'write' command to drive NFC to write flash. -+ * However, it can make NFC to calculate ECC, that will be used to compare with original ones. -+ * --YT -+ */ -+ unsigned int conf = 0x001223| (CFG_ADDR_CYCLE<<16) | (0x200 << 20) | (1<<3) | (1<<2); -+ _nfc_write_raw_data(0xff, 0xff, ra->sandbox_page<page_shift, conf, buf, 0x200, FLAG_USE_GDMA); -+#endif -+ -+ ecc = ra_inl(NFC_ECC); -+ if (ecc == 0) //clean page. -+ return 0; -+ e = (char*)&ecc; -+ p = buf + (1<page_shift); -+ for (i=0; i 0) { -+ int len; -+#if defined (WORKAROUND_RX_BUF_OV) -+ len = min(60, size); -+#else -+ len = size; -+#endif -+ bus_addr = (page << (CFG_COLUMN_ADDR_CYCLE*8)) | (offs & ((1<> (CFG_COLUMN_ADDR_CYCLE*8); -+ cmd1 = 0x0; -+ cmd2 = 0x30; -+ conf = 0x000511| ((CFG_ADDR_CYCLE)<<16) | (len << 20); -+ } -+ else { -+ if (offs & ~(CFG_PAGESIZE-1)) -+ cmd1 = 0x50; -+ else if (offs & ~((1<buffers_page = -1; //cached -+ } -+ -+ return 0; -+} -+ -+ -+/** -+ * @return -EIO, fail to write -+ * @return 0, OK -+ */ -+int nfc_write_page(struct ra_nand_chip *ra, char *buf, int page, int flags) -+{ -+ unsigned int cmd1 = 0, cmd3, conf = 0; -+ unsigned int bus_addr = 0, bus_addr2 = 0; -+ unsigned int ecc_en; -+ int use_gdma; -+ int size; -+ char status; -+ uint8_t *oob = buf + (1<page_shift); -+ -+ use_gdma = flags & FLAG_USE_GDMA; -+ ecc_en = flags & FLAG_ECC_EN; -+ -+ oob[ra->badblockpos] = 0xff; //tag as good block. -+ ra->buffers_page = -1; //cached -+ -+ page = page & (CFG_CHIPSIZE-1); //chip boundary -+ size = CFG_PAGESIZE + CFG_PAGE_OOBSIZE; //add oobsize -+ bus_addr = (page << (CFG_COLUMN_ADDR_CYCLE*8)); //write_page always write from offset 0. -+ -+ if (is_nand_page_2048) { -+ bus_addr2 = page >> (CFG_COLUMN_ADDR_CYCLE*8); -+ cmd1 = 0x80; -+ cmd3 = 0x10; -+ conf = 0x001123| ((CFG_ADDR_CYCLE)<<16) | (size << 20); -+ } -+ else { -+ cmd1 = 0x8000; -+ cmd3 = 0x10; -+ conf = 0x001223| ((CFG_ADDR_CYCLE)<<16) | (size << 20); -+} -+ if (ecc_en) -+ conf |= (1<<3); //enable ecc -+ if (use_gdma) -+ conf |= (1<<2); -+ -+ // set NFC -+ ra_dbg("nfc_write_page: cmd1: %x, cmd3: %x bus_addr: %x, conf: %x, len:%x\n", -+ cmd1, cmd3, bus_addr, conf, size); -+ -+ status = _nfc_write_raw_data(cmd1, cmd3, bus_addr, bus_addr2, conf, buf, size, flags); -+ if (status & NAND_STATUS_FAIL) { -+ printk("%s: fail \n", __func__); -+ return -EIO; -+ } -+ -+ -+ if (flags & FLAG_VERIFY) { // verify and correct ecc -+ status = nfc_ecc_verify(ra, buf, page, FL_WRITING); -+ -+#ifdef RANDOM_GEN_BAD_BLOCK -+ if (((random32() & 0x1ff) == 0x0) && (page >= 0x100)) // randomly create bad block -+ { -+ printk("hmm... create a bad block at page %x\n", (bus_addr >> 16)); -+ status = -1; -+ } -+#endif -+ -+ if (status != 0) { -+ printk("%s: ecc_verify fail: ret:%x \n", __func__, status); -+ oob[ra->badblockpos] = 0x33; -+ page -= page % (CFG_BLOCKSIZE/CFG_PAGESIZE); -+ printk("create a bad block at page %x\n", page); -+ if (!is_nand_page_2048) -+ status = nfc_write_oob(ra, page, ra->badblockpos, oob+ra->badblockpos, 1, flags); -+ else -+ { -+ status = _nfc_write_raw_data(cmd1, cmd3, bus_addr, bus_addr2, conf, buf, size, flags); -+ nfc_write_oob(ra, page, 0, oob, 16, FLAG_NONE); -+ } -+ return -EBADMSG; -+ } -+ } -+ -+ -+ ra->buffers_page = page; //cached -+ return 0; -+} -+ -+ -+ -+/************************************************************* -+ * nand internal process -+ *************************************************************/ -+ -+/** -+ * nand_release_device - [GENERIC] release chip -+ * @mtd: MTD device structure -+ * -+ * Deselect, release chip lock and wake up anyone waiting on the device -+ */ -+static void nand_release_device(struct ra_nand_chip *ra) -+{ -+ /* De-select the NAND device */ -+ nfc_select_chip(ra, -1); -+ -+ /* Release the controller and the chip */ -+ ra->state = FL_READY; -+ -+ mutex_unlock(ra->controller); -+} -+ -+/** -+ * nand_get_device - [GENERIC] Get chip for selected access -+ * @chip: the nand chip descriptor -+ * @mtd: MTD device structure -+ * @new_state: the state which is requested -+ * -+ * Get the device and lock it for exclusive access -+ */ -+static int -+nand_get_device(struct ra_nand_chip *ra, int new_state) -+{ -+ int ret = 0; -+ -+ ret = mutex_lock_interruptible(ra->controller); -+ if (!ret) -+ ra->state = new_state; -+ -+ return ret; -+ -+} -+ -+ -+ -+/************************************************************* -+ * nand internal process -+ *************************************************************/ -+ -+int nand_bbt_get(struct ra_nand_chip *ra, int block) -+{ -+ int byte, bits; -+ bits = block * BBTTAG_BITS; -+ -+ byte = bits / 8; -+ bits = bits % 8; -+ -+ return (ra->bbt[byte] >> bits) & BBTTAG_BITS_MASK; -+} -+ -+int nand_bbt_set(struct ra_nand_chip *ra, int block, int tag) -+{ -+ int byte, bits; -+ bits = block * BBTTAG_BITS; -+ -+ byte = bits / 8; -+ bits = bits % 8; -+ -+ // If previous tag is bad, dont overwrite it -+ if (((ra->bbt[byte] >> bits) & BBTTAG_BITS_MASK) == BBT_TAG_BAD) -+ { -+ return BBT_TAG_BAD; -+ } -+ -+ ra->bbt[byte] = (ra->bbt[byte] & ~(BBTTAG_BITS_MASK << bits)) | ((tag & BBTTAG_BITS_MASK) << bits); -+ -+ return tag; -+} -+ -+/** -+ * nand_block_checkbad - [GENERIC] Check if a block is marked bad -+ * @mtd: MTD device structure -+ * @ofs: offset from device start -+ * -+ * Check, if the block is bad. Either by reading the bad block table or -+ * calling of the scan function. -+ */ -+int nand_block_checkbad(struct ra_nand_chip *ra, loff_t offs) -+{ -+ int page, block; -+ int ret = 4; -+ unsigned int tag; -+ char *str[]= {"UNK", "RES", "BAD", "GOOD"}; -+ -+ if (ranfc_bbt == 0) -+ return 0; -+ -+ { -+ // align with chip -+ -+ offs = offs & ((1<chip_shift) -1); -+ -+ page = offs >> ra->page_shift; -+ block = offs >> ra->erase_shift; -+ } -+ -+ tag = nand_bbt_get(ra, block); -+ -+ if (tag == BBT_TAG_UNKNOWN) { -+ ret = nfc_read_oob(ra, page, ra->badblockpos, (char*)&tag, 1, FLAG_NONE); -+ if (ret == 0) -+ tag = ((le32_to_cpu(tag) & 0x0ff) == 0x0ff) ? BBT_TAG_GOOD : BBT_TAG_BAD; -+ else -+ tag = BBT_TAG_BAD; -+ -+ nand_bbt_set(ra, block, tag); -+ } -+ -+ if (tag != BBT_TAG_GOOD) { -+ printk("%s: offs:%x tag: %s \n", __func__, (unsigned int)offs, str[tag]); -+ return 1; -+ } -+ else -+ return 0; -+ -+} -+ -+ -+ -+/** -+ * nand_block_markbad - -+ */ -+int nand_block_markbad(struct ra_nand_chip *ra, loff_t offs) -+{ -+ int page, block; -+ int ret = 4; -+ unsigned int tag; -+ char *ecc; -+ -+ // align with chip -+ ra_dbg("%s offs: %x \n", __func__, (int)offs); -+ -+ offs = offs & ((1<chip_shift) -1); -+ -+ page = offs >> ra->page_shift; -+ block = offs >> ra->erase_shift; -+ -+ tag = nand_bbt_get(ra, block); -+ -+ if (tag == BBT_TAG_BAD) { -+ printk("%s: mark repeatedly \n", __func__); -+ return 0; -+ } -+ -+ // new tag as bad -+ tag =BBT_TAG_BAD; -+ ret = nfc_read_page(ra, ra->buffers, page, FLAG_NONE); -+ if (ret != 0) { -+ printk("%s: fail to read bad block tag \n", __func__); -+ goto tag_bbt; -+ } -+ -+ ecc = &ra->buffers[(1<page_shift)+ra->badblockpos]; -+ if (*ecc == (char)0x0ff) { -+ //tag into flash -+ *ecc = (char)tag; -+ ret = nfc_write_page(ra, ra->buffers, page, FLAG_USE_GDMA); -+ if (ret) -+ printk("%s: fail to write bad block tag \n", __func__); -+ -+ } -+ -+tag_bbt: -+ //update bbt -+ nand_bbt_set(ra, block, tag); -+ -+ return 0; -+} -+ -+ -+#if defined (WORKAROUND_RX_BUF_OV) -+/** -+ * to find a bad block for ecc verify of read_page -+ */ -+unsigned int nand_bbt_find_sandbox(struct ra_nand_chip *ra) -+{ -+ loff_t offs = 0; -+ int chipsize = 1 << ra->chip_shift; -+ int blocksize = 1 << ra->erase_shift; -+ -+ -+ while (offs < chipsize) { -+ if (nand_block_checkbad(ra, offs)) //scan and verify the unknown tag -+ break; -+ offs += blocksize; -+ } -+ -+ if (offs >= chipsize) { -+ offs = chipsize - blocksize; -+ } -+ -+ nand_bbt_set(ra, (unsigned int)offs>>ra->erase_shift, BBT_TAG_RES); // tag bbt only, instead of update badblockpos of flash. -+ return (offs >> ra->page_shift); -+} -+#endif -+ -+ -+ -+/** -+ * nand_erase_nand - [Internal] erase block(s) -+ * @mtd: MTD device structure -+ * @instr: erase instruction -+ * @allowbbt: allow erasing the bbt area -+ * -+ * Erase one ore more blocks -+ */ -+int _nand_erase_nand(struct ra_nand_chip *ra, struct erase_info *instr) -+{ -+ int page, len, status, ret; -+ unsigned int addr, blocksize = 1<erase_shift; -+ -+ ra_dbg("%s: start:%x, len:%x \n", __func__, -+ (unsigned int)instr->addr, (unsigned int)instr->len); -+ -+//#define BLOCK_ALIGNED(a) ((a) & (blocksize - 1)) // already defined -+ -+ if (BLOCK_ALIGNED(instr->addr) || BLOCK_ALIGNED(instr->len)) { -+ ra_dbg("%s: erase block not aligned, addr:%x len:%x\n", __func__, instr->addr, instr->len); -+ return -EINVAL; -+ } -+ -+ instr->fail_addr = 0xffffffff; -+ -+ len = instr->len; -+ addr = instr->addr; -+ instr->state = MTD_ERASING; -+ -+ while (len) { -+ -+ page = (int)(addr >> ra->page_shift); -+ -+ /* select device and check wp */ -+ if (nfc_enable_chip(ra, addr, 0)) { -+ printk("%s: nand is write protected \n", __func__); -+ instr->state = MTD_ERASE_FAILED; -+ goto erase_exit; -+ } -+ -+ /* if we have a bad block, we do not erase bad blocks */ -+ if (nand_block_checkbad(ra, addr)) { -+ printk(KERN_WARNING "nand_erase: attempt to erase a " -+ "bad block at 0x%08x\n", addr); -+ instr->state = MTD_ERASE_FAILED; -+ goto erase_exit; -+ } -+ -+ /* -+ * Invalidate the page cache, if we erase the block which -+ * contains the current cached page -+ */ -+ if (BLOCK_ALIGNED(addr) == BLOCK_ALIGNED(ra->buffers_page << ra->page_shift)) -+ ra->buffers_page = -1; -+ -+ status = nfc_erase_block(ra, page); -+ /* See if block erase succeeded */ -+ if (status) { -+ printk("%s: failed erase, page 0x%08x\n", __func__, page); -+ instr->state = MTD_ERASE_FAILED; -+ instr->fail_addr = (page << ra->page_shift); -+ goto erase_exit; -+ } -+ -+ -+ /* Increment page address and decrement length */ -+ len -= blocksize; -+ addr += blocksize; -+ -+ } -+ instr->state = MTD_ERASE_DONE; -+ -+erase_exit: -+ -+ ret = ((instr->state == MTD_ERASE_DONE) ? 0 : -EIO); -+ /* Do call back function */ -+ if (!ret) -+ mtd_erase_callback(instr); -+ -+ if (ret) { -+ nand_bbt_set(ra, addr >> ra->erase_shift, BBT_TAG_BAD); -+ } -+ -+ /* Return more or less happy */ -+ return ret; -+} -+ -+static int -+nand_write_oob_buf(struct ra_nand_chip *ra, uint8_t *buf, uint8_t *oob, size_t size, -+ int mode, int ooboffs) -+{ -+ size_t oobsize = 1<oob_shift; -+ struct nand_oobfree *free; -+ uint32_t woffs = ooboffs; -+ int retsize = 0; -+ -+ ra_dbg("%s: size:%x, mode:%x, offs:%x \n", __func__, size, mode, ooboffs); -+ -+ switch(mode) { -+ case MTD_OPS_PLACE_OOB: -+ case MTD_OPS_RAW: -+ if (ooboffs > oobsize) -+ return -1; -+ -+ size = min(size, oobsize - ooboffs); -+ memcpy(buf + ooboffs, oob, size); -+ retsize = size; -+ break; -+ -+ case MTD_OPS_AUTO_OOB: -+ if (ooboffs > ra->oob->oobavail) -+ return -1; -+ -+ while (size) { -+ for(free = ra->oob->oobfree; free->length && size; free++) { -+ int wlen = free->length - woffs; -+ int bytes = 0; -+ -+ /* Write request not from offset 0 ? */ -+ if (wlen <= 0) { -+ woffs = -wlen; -+ continue; -+ } -+ -+ bytes = min_t(size_t, size, wlen); -+ memcpy (buf + free->offset + woffs, oob, bytes); -+ woffs = 0; -+ oob += bytes; -+ size -= bytes; -+ retsize += bytes; -+ } -+ buf += oobsize; -+ } -+ break; -+ -+ default: -+ BUG(); -+ } -+ -+ return retsize; -+} -+ -+static int nand_read_oob_buf(struct ra_nand_chip *ra, uint8_t *oob, size_t size, -+ int mode, int ooboffs) -+{ -+ size_t oobsize = 1<oob_shift; -+ uint8_t *buf = ra->buffers + (1<page_shift); -+ int retsize=0; -+ -+ ra_dbg("%s: size:%x, mode:%x, offs:%x \n", __func__, size, mode, ooboffs); -+ -+ switch(mode) { -+ case MTD_OPS_PLACE_OOB: -+ case MTD_OPS_RAW: -+ if (ooboffs > oobsize) -+ return -1; -+ -+ size = min(size, oobsize - ooboffs); -+ memcpy(oob, buf + ooboffs, size); -+ return size; -+ -+ case MTD_OPS_AUTO_OOB: { -+ struct nand_oobfree *free; -+ uint32_t woffs = ooboffs; -+ -+ if (ooboffs > ra->oob->oobavail) -+ return -1; -+ -+ size = min(size, ra->oob->oobavail - ooboffs); -+ for(free = ra->oob->oobfree; free->length && size; free++) { -+ int wlen = free->length - woffs; -+ int bytes = 0; -+ -+ /* Write request not from offset 0 ? */ -+ if (wlen <= 0) { -+ woffs = -wlen; -+ continue; -+ } -+ -+ bytes = min_t(size_t, size, wlen); -+ memcpy (oob, buf + free->offset + woffs, bytes); -+ woffs = 0; -+ oob += bytes; -+ size -= bytes; -+ retsize += bytes; -+ } -+ return retsize; -+ } -+ default: -+ BUG(); -+ } -+ -+ return -1; -+} -+ -+/** -+ * nand_do_write_ops - [Internal] NAND write with ECC -+ * @mtd: MTD device structure -+ * @to: offset to write to -+ * @ops: oob operations description structure -+ * -+ * NAND write with ECC -+ */ -+static int nand_do_write_ops(struct ra_nand_chip *ra, loff_t to, -+ struct mtd_oob_ops *ops) -+{ -+ int page; -+ uint32_t datalen = ops->len; -+ uint32_t ooblen = ops->ooblen; -+ uint8_t *oob = ops->oobbuf; -+ uint8_t *data = ops->datbuf; -+ int pagesize = (1<page_shift); -+ int pagemask = (pagesize -1); -+ int oobsize = 1<oob_shift; -+ loff_t addr = to; -+ //int i = 0; //for ra_dbg only -+ -+ ra_dbg("%s: to:%x, ops data:%p, oob:%p datalen:%x ooblen:%x, ooboffs:%x oobmode:%x \n", -+ __func__, (unsigned int)to, data, oob, datalen, ooblen, ops->ooboffs, ops->mode); -+ -+ ops->retlen = 0; -+ ops->oobretlen = 0; -+ -+ -+ /* Invalidate the page cache, when we write to the cached page */ -+ ra->buffers_page = -1; -+ -+ -+ if (data ==0) -+ datalen = 0; -+ -+ // oob sequential (burst) write -+ if (datalen == 0 && ooblen) { -+ int len = ((ooblen + ops->ooboffs) + (ra->oob->oobavail - 1)) / ra->oob->oobavail * oobsize; -+ -+ /* select chip, and check if it is write protected */ -+ if (nfc_enable_chip(ra, addr, 0)) -+ return -EIO; -+ -+ //FIXME, need sanity check of block boundary -+ page = (int)((to & ((1<chip_shift)-1)) >> ra->page_shift); //chip boundary -+ memset(ra->buffers, 0x0ff, pagesize); -+ //fixme, should we reserve the original content? -+ if (ops->mode == MTD_OPS_AUTO_OOB) { -+ nfc_read_oob(ra, page, 0, ra->buffers, len, FLAG_NONE); -+ } -+ //prepare buffers -+ if (ooblen != 8) -+ { -+ nand_write_oob_buf(ra, ra->buffers, oob, ooblen, ops->mode, ops->ooboffs); -+ // write out buffer to chip -+ nfc_write_oob(ra, page, 0, ra->buffers, len, FLAG_USE_GDMA); -+ } -+ -+ ops->oobretlen = ooblen; -+ ooblen = 0; -+ } -+ -+ // data sequential (burst) write -+ if (datalen && ooblen == 0) { -+ // ranfc can not support write_data_burst, since hw-ecc and fifo constraints.. -+ } -+ -+ // page write -+ while(datalen || ooblen) { -+ int len; -+ int ret; -+ int offs; -+ int ecc_en = 0; -+ -+ ra_dbg("%s (%d): addr:%x, ops data:%p, oob:%p datalen:%x ooblen:%x, ooboffs:%x \n", -+ __func__, i++, (unsigned int)addr, data, oob, datalen, ooblen, ops->ooboffs); -+ -+ page = (int)((addr & ((1<chip_shift)-1)) >> ra->page_shift); //chip boundary -+ -+ /* select chip, and check if it is write protected */ -+ if (nfc_enable_chip(ra, addr, 0)) -+ return -EIO; -+ -+ // oob write -+ if (ops->mode == MTD_OPS_AUTO_OOB) { -+ //fixme, this path is not yet varified -+ nfc_read_oob(ra, page, 0, ra->buffers + pagesize, oobsize, FLAG_NONE); -+ } -+ if (oob && ooblen > 0) { -+ len = nand_write_oob_buf(ra, ra->buffers + pagesize, oob, ooblen, ops->mode, ops->ooboffs); -+ if (len < 0) -+ return -EINVAL; -+ -+ oob += len; -+ ops->oobretlen += len; -+ ooblen -= len; -+ } -+ -+ // data write -+ offs = addr & pagemask; -+ len = min_t(size_t, datalen, pagesize - offs); -+ if (data && len > 0) { -+ memcpy(ra->buffers + offs, data, len); // we can not sure ops->buf wether is DMA-able. -+ -+ data += len; -+ datalen -= len; -+ ops->retlen += len; -+ -+ ecc_en = FLAG_ECC_EN; -+ } -+ ret = nfc_write_page(ra, ra->buffers, page, FLAG_USE_GDMA | FLAG_VERIFY | -+ ((ops->mode == MTD_OPS_RAW || ops->mode == MTD_OPS_PLACE_OOB) ? 0 : ecc_en )); -+ if (ret) { -+ nand_bbt_set(ra, addr >> ra->erase_shift, BBT_TAG_BAD); -+ return ret; -+ } -+ -+ nand_bbt_set(ra, addr >> ra->erase_shift, BBT_TAG_GOOD); -+ -+ addr = (page+1) << ra->page_shift; -+ -+ } -+ return 0; -+} -+ -+/** -+ * nand_do_read_ops - [Internal] Read data with ECC -+ * -+ * @mtd: MTD device structure -+ * @from: offset to read from -+ * @ops: oob ops structure -+ * -+ * Internal function. Called with chip held. -+ */ -+static int nand_do_read_ops(struct ra_nand_chip *ra, loff_t from, -+ struct mtd_oob_ops *ops) -+{ -+ int page; -+ uint32_t datalen = ops->len; -+ uint32_t ooblen = ops->ooblen; -+ uint8_t *oob = ops->oobbuf; -+ uint8_t *data = ops->datbuf; -+ int pagesize = (1<page_shift); -+ int pagemask = (pagesize -1); -+ loff_t addr = from; -+ //int i = 0; //for ra_dbg only -+ -+ ra_dbg("%s: addr:%x, ops data:%p, oob:%p datalen:%x ooblen:%x, ooboffs:%x \n", -+ __func__, (unsigned int)addr, data, oob, datalen, ooblen, ops->ooboffs); -+ -+ ops->retlen = 0; -+ ops->oobretlen = 0; -+ if (data == 0) -+ datalen = 0; -+ -+ -+ while(datalen || ooblen) { -+ int len; -+ int ret; -+ int offs; -+ -+ ra_dbg("%s (%d): addr:%x, ops data:%p, oob:%p datalen:%x ooblen:%x, ooboffs:%x \n", -+ __func__, i++, (unsigned int)addr, data, oob, datalen, ooblen, ops->ooboffs); -+ /* select chip */ -+ if (nfc_enable_chip(ra, addr, 1) < 0) -+ return -EIO; -+ -+ page = (int)((addr & ((1<chip_shift)-1)) >> ra->page_shift); -+ -+ ret = nfc_read_page(ra, ra->buffers, page, FLAG_VERIFY | -+ ((ops->mode == MTD_OPS_RAW || ops->mode == MTD_OPS_PLACE_OOB) ? 0: FLAG_ECC_EN )); -+ //FIXME, something strange here, some page needs 2 more tries to guarantee read success. -+ if (ret) { -+ printk("read again:\n"); -+ ret = nfc_read_page(ra, ra->buffers, page, FLAG_VERIFY | -+ ((ops->mode == MTD_OPS_RAW || ops->mode == MTD_OPS_PLACE_OOB) ? 0: FLAG_ECC_EN )); -+ -+ if (ret) { -+ printk("read again fail \n"); -+ nand_bbt_set(ra, addr >> ra->erase_shift, BBT_TAG_BAD); -+ if ((ret != -EUCLEAN) && (ret != -EBADMSG)) { -+ return ret; -+ } -+ else { -+ /* ecc verification fail, but data need to be returned. */ -+ } -+ } -+ else { -+ printk(" read agian susccess \n"); -+ } -+ } -+ -+ // oob read -+ if (oob && ooblen > 0) { -+ len = nand_read_oob_buf(ra, oob, ooblen, ops->mode, ops->ooboffs); -+ if (len < 0) { -+ printk("nand_read_oob_buf: fail return %x \n", len); -+ return -EINVAL; -+ } -+ -+ oob += len; -+ ops->oobretlen += len; -+ ooblen -= len; -+ } -+ -+ // data read -+ offs = addr & pagemask; -+ len = min_t(size_t, datalen, pagesize - offs); -+ if (data && len > 0) { -+ memcpy(data, ra->buffers + offs, len); // we can not sure ops->buf wether is DMA-able. -+ -+ data += len; -+ datalen -= len; -+ ops->retlen += len; -+ if (ret) -+ return ret; -+ } -+ -+ -+ nand_bbt_set(ra, addr >> ra->erase_shift, BBT_TAG_GOOD); -+ // address go further to next page, instead of increasing of length of write. This avoids some special cases wrong. -+ addr = (page+1) << ra->page_shift; -+ } -+ return 0; -+} -+ -+static int -+ramtd_nand_erase(struct mtd_info *mtd, struct erase_info *instr) -+{ -+ struct ra_nand_chip *ra = (struct ra_nand_chip *)mtd->priv; -+ int ret; -+ -+ ra_dbg("%s: start:%x, len:%x \n", __func__, -+ (unsigned int)instr->addr, (unsigned int)instr->len); -+ -+ nand_get_device(ra, FL_ERASING); -+ ret = _nand_erase_nand((struct ra_nand_chip *)mtd->priv, instr); -+ nand_release_device(ra); -+ -+ return ret; -+} -+ -+static int -+ramtd_nand_write(struct mtd_info *mtd, loff_t to, size_t len, -+ size_t *retlen, const uint8_t *buf) -+{ -+ struct ra_nand_chip *ra = mtd->priv; -+ struct mtd_oob_ops ops; -+ int ret; -+ -+ ra_dbg("%s: to 0x%x len=0x%x\n", __func__, to, len); -+ -+ if ((to + len) > mtd->size) -+ return -EINVAL; -+ -+ if (!len) -+ return 0; -+ -+ nand_get_device(ra, FL_WRITING); -+ -+ memset(&ops, 0, sizeof(ops)); -+ ops.len = len; -+ ops.datbuf = (uint8_t *)buf; -+ ops.oobbuf = NULL; -+ ops.mode = MTD_OPS_AUTO_OOB; -+ -+ ret = nand_do_write_ops(ra, to, &ops); -+ -+ *retlen = ops.retlen; -+ -+ nand_release_device(ra); -+ -+ return ret; -+} -+ -+static int -+ramtd_nand_read(struct mtd_info *mtd, loff_t from, size_t len, -+ size_t *retlen, uint8_t *buf) -+{ -+ -+ struct ra_nand_chip *ra = mtd->priv; -+ int ret; -+ struct mtd_oob_ops ops; -+ -+ ra_dbg("%s: mtd:%p from:%x, len:%x, buf:%p \n", __func__, mtd, (unsigned int)from, len, buf); -+ -+ /* Do not allow reads past end of device */ -+ if ((from + len) > mtd->size) -+ return -EINVAL; -+ if (!len) -+ return 0; -+ -+ nand_get_device(ra, FL_READING); -+ -+ memset(&ops, 0, sizeof(ops)); -+ ops.len = len; -+ ops.datbuf = buf; -+ ops.oobbuf = NULL; -+ ops.mode = MTD_OPS_AUTO_OOB; -+ -+ ret = nand_do_read_ops(ra, from, &ops); -+ -+ *retlen = ops.retlen; -+ -+ nand_release_device(ra); -+ -+ return ret; -+ -+} -+ -+static int -+ramtd_nand_readoob(struct mtd_info *mtd, loff_t from, -+ struct mtd_oob_ops *ops) -+{ -+ struct ra_nand_chip *ra = mtd->priv; -+ int ret; -+ -+ ra_dbg("%s: \n", __func__); -+ -+ nand_get_device(ra, FL_READING); -+ -+ ret = nand_do_read_ops(ra, from, ops); -+ -+ nand_release_device(ra); -+ -+ return ret; -+} -+ -+static int -+ramtd_nand_writeoob(struct mtd_info *mtd, loff_t to, -+ struct mtd_oob_ops *ops) -+{ -+ struct ra_nand_chip *ra = mtd->priv; -+ int ret; -+ -+ nand_get_device(ra, FL_READING); -+ ret = nand_do_write_ops(ra, to, ops); -+ nand_release_device(ra); -+ -+ return ret; -+} -+ -+static int -+ramtd_nand_block_isbad(struct mtd_info *mtd, loff_t offs) -+{ -+ if (offs > mtd->size) -+ return -EINVAL; -+ -+ return nand_block_checkbad((struct ra_nand_chip *)mtd->priv, offs); -+} -+ -+static int -+ramtd_nand_block_markbad(struct mtd_info *mtd, loff_t ofs) -+{ -+ struct ra_nand_chip *ra = mtd->priv; -+ int ret; -+ -+ ra_dbg("%s: \n", __func__); -+ nand_get_device(ra, FL_WRITING); -+ ret = nand_block_markbad(ra, ofs); -+ nand_release_device(ra); -+ -+ return ret; -+} -+ -+// 1-bit error detection -+static int one_bit_correction(char *ecc1, char *ecc2, int *bytes, int *bits) -+{ -+ // check if ecc and expected are all valid -+ char *p, nibble, crumb; -+ int i, xor, iecc1 = 0, iecc2 = 0; -+ -+ printk("correction : %x %x %x\n", ecc1[0], ecc1[1], ecc1[2]); -+ printk("correction : %x %x %x\n", ecc2[0], ecc2[1], ecc2[2]); -+ -+ p = (char *)ecc1; -+ for (i = 0; i < CONFIG_ECC_BYTES; i++) -+ { -+ nibble = *(p+i) & 0xf; -+ if ((nibble != 0x0) && (nibble != 0xf) && (nibble != 0x3) && (nibble != 0xc) && -+ (nibble != 0x5) && (nibble != 0xa) && (nibble != 0x6) && (nibble != 0x9)) -+ return -1; -+ nibble = ((*(p+i)) >> 4) & 0xf; -+ if ((nibble != 0x0) && (nibble != 0xf) && (nibble != 0x3) && (nibble != 0xc) && -+ (nibble != 0x5) && (nibble != 0xa) && (nibble != 0x6) && (nibble != 0x9)) -+ return -1; -+ } -+ -+ p = (char *)ecc2; -+ for (i = 0; i < CONFIG_ECC_BYTES; i++) -+ { -+ nibble = *(p+i) & 0xf; -+ if ((nibble != 0x0) && (nibble != 0xf) && (nibble != 0x3) && (nibble != 0xc) && -+ (nibble != 0x5) && (nibble != 0xa) && (nibble != 0x6) && (nibble != 0x9)) -+ return -1; -+ nibble = ((*(p+i)) >> 4) & 0xf; -+ if ((nibble != 0x0) && (nibble != 0xf) && (nibble != 0x3) && (nibble != 0xc) && -+ (nibble != 0x5) && (nibble != 0xa) && (nibble != 0x6) && (nibble != 0x9)) -+ return -1; -+ } -+ -+ memcpy(&iecc1, ecc1, 3); -+ memcpy(&iecc2, ecc2, 3); -+ -+ xor = iecc1 ^ iecc2; -+ printk("xor = %x (%x %x)\n", xor, iecc1, iecc2); -+ -+ *bytes = 0; -+ for (i = 0; i < 9; i++) -+ { -+ crumb = (xor >> (2*i)) & 0x3; -+ if ((crumb == 0x0) || (crumb == 0x3)) -+ return -1; -+ if (crumb == 0x2) -+ *bytes += (1 << i); -+ } -+ -+ *bits = 0; -+ for (i = 0; i < 3; i++) -+ { -+ crumb = (xor >> (18 + 2*i)) & 0x3; -+ if ((crumb == 0x0) || (crumb == 0x3)) -+ return -1; -+ if (crumb == 0x2) -+ *bits += (1 << i); -+ } -+ -+ return 0; -+} -+ -+ -+ -+/************************************************************ -+ * the init/exit section. -+ */ -+ -+static struct nand_ecclayout ra_oob_layout = { -+ .eccbytes = CONFIG_ECC_BYTES, -+ .eccpos = {5, 6, 7}, -+ .oobfree = { -+ {.offset = 0, .length = 4}, -+ {.offset = 8, .length = 8}, -+ {.offset = 0, .length = 0} -+ }, -+#define RA_CHIP_OOB_AVAIL (4+8) -+ .oobavail = RA_CHIP_OOB_AVAIL, -+ // 5th byte is bad-block flag. -+}; -+ -+static int -+mtk_nand_probe(struct platform_device *pdev) -+{ -+ struct mtd_part_parser_data ppdata; -+ struct ra_nand_chip *ra; -+ int alloc_size, bbt_size, buffers_size, reg, err; -+ unsigned char chip_mode = 12; -+ -+/* if(ra_check_flash_type()!=BOOT_FROM_NAND) { -+ return 0; -+ }*/ -+ -+ //FIXME: config 512 or 2048-byte page according to HWCONF -+#if defined (CONFIG_RALINK_RT6855A) -+ reg = ra_inl(RALINK_SYSCTL_BASE+0x8c); -+ chip_mode = ((reg>>28) & 0x3)|(((reg>>22) & 0x3)<<2); -+ if (chip_mode == 1) { -+ printk("! nand 2048\n"); -+ ra_or(NFC_CONF1, 1); -+ is_nand_page_2048 = 1; -+ nand_addrlen = 5; -+ } -+ else { -+ printk("! nand 512\n"); -+ ra_and(NFC_CONF1, ~1); -+ is_nand_page_2048 = 0; -+ nand_addrlen = 4; -+ } -+#elif (defined (CONFIG_RALINK_MT7620) || defined (CONFIG_RALINK_RT6855)) -+ ra_outl(RALINK_SYSCTL_BASE+0x60, ra_inl(RALINK_SYSCTL_BASE+0x60) & ~(0x3<<18)); -+ reg = ra_inl(RALINK_SYSCTL_BASE+0x10); -+ chip_mode = (reg & 0x0F); -+ if((chip_mode==1)||(chip_mode==11)) { -+ ra_or(NFC_CONF1, 1); -+ is_nand_page_2048 = 1; -+ nand_addrlen = ((chip_mode!=11) ? 4 : 5); -+ printk("!!! nand page size = 2048, addr len=%d\n", nand_addrlen); -+ } -+ else { -+ ra_and(NFC_CONF1, ~1); -+ is_nand_page_2048 = 0; -+ nand_addrlen = ((chip_mode!=10) ? 3 : 4); -+ printk("!!! nand page size = 512, addr len=%d\n", nand_addrlen); -+ } -+#else -+ is_nand_page_2048 = 0; -+ nand_addrlen = 3; -+ printk("!!! nand page size = 512, addr len=%d\n", nand_addrlen); -+#endif -+ -+#if defined (CONFIG_RALINK_RT6855A) || defined (CONFIG_RALINK_MT7620) || defined (CONFIG_RALINK_RT6855) -+ //config ECC location -+ ra_and(NFC_CONF1, 0xfff000ff); -+ ra_or(NFC_CONF1, ((CONFIG_ECC_OFFSET + 2) << 16) + -+ ((CONFIG_ECC_OFFSET + 1) << 12) + -+ (CONFIG_ECC_OFFSET << 8)); -+#endif -+ -+#define ALIGNE_16(a) (((unsigned long)(a)+15) & ~15) -+ buffers_size = ALIGNE_16((1<buffers -+ bbt_size = BBTTAG_BITS * (1<<(CONFIG_CHIP_SIZE_BIT - (CONFIG_PAGE_SIZE_BIT + CONFIG_NUMPAGE_PER_BLOCK_BIT))) / 8; //ra->bbt -+ bbt_size = ALIGNE_16(bbt_size); -+ -+ alloc_size = buffers_size + bbt_size; -+ alloc_size += buffers_size; //for ra->readback_buffers -+ alloc_size += sizeof(*ra); -+ alloc_size += sizeof(*ranfc_mtd); -+ -+ //make sure gpio-0 is input -+ ra_outl(RALINK_PIO_BASE+0x24, ra_inl(RALINK_PIO_BASE+0x24) & ~0x01); -+ -+ ra = (struct ra_nand_chip *)kzalloc(alloc_size, GFP_KERNEL | GFP_DMA); -+ if (!ra) { -+ printk("%s: mem alloc fail \n", __func__); -+ return -ENOMEM; -+ } -+ memset(ra, 0, alloc_size); -+ -+ //dynamic -+ ra->buffers = (char *)((char *)ra + sizeof(*ra)); -+ ra->readback_buffers = ra->buffers + buffers_size; -+ ra->bbt = ra->readback_buffers + buffers_size; -+ ranfc_mtd = (struct mtd_info *)(ra->bbt + bbt_size); -+ -+ //static -+ ra->numchips = CONFIG_NUMCHIPS; -+ ra->chip_shift = CONFIG_CHIP_SIZE_BIT; -+ ra->page_shift = CONFIG_PAGE_SIZE_BIT; -+ ra->oob_shift = CONFIG_OOBSIZE_PER_PAGE_BIT; -+ ra->erase_shift = (CONFIG_PAGE_SIZE_BIT + CONFIG_NUMPAGE_PER_BLOCK_BIT); -+ ra->badblockpos = CONFIG_BAD_BLOCK_POS; -+ ra_oob_layout.eccpos[0] = CONFIG_ECC_OFFSET; -+ ra_oob_layout.eccpos[1] = CONFIG_ECC_OFFSET + 1; -+ ra_oob_layout.eccpos[2] = CONFIG_ECC_OFFSET + 2; -+ ra->oob = &ra_oob_layout; -+ ra->buffers_page = -1; -+ -+#if defined (WORKAROUND_RX_BUF_OV) -+ if (ranfc_verify) { -+ ra->sandbox_page = nand_bbt_find_sandbox(ra); -+ } -+#endif -+ ra_outl(NFC_CTRL, ra_inl(NFC_CTRL) | 0x01); //set wp to high -+ nfc_all_reset(); -+ -+ ranfc_mtd->type = MTD_NANDFLASH; -+ ranfc_mtd->flags = MTD_CAP_NANDFLASH; -+ ranfc_mtd->size = CONFIG_NUMCHIPS * CFG_CHIPSIZE; -+ ranfc_mtd->erasesize = CFG_BLOCKSIZE; -+ ranfc_mtd->writesize = CFG_PAGESIZE; -+ ranfc_mtd->oobsize = CFG_PAGE_OOBSIZE; -+ ranfc_mtd->oobavail = RA_CHIP_OOB_AVAIL; -+ ranfc_mtd->name = "ra_nfc"; -+ //ranfc_mtd->index -+ ranfc_mtd->ecclayout = &ra_oob_layout; -+ //ranfc_mtd->numberaseregions -+ //ranfc_mtd->eraseregions -+ //ranfc_mtd->bansize -+ ranfc_mtd->_erase = ramtd_nand_erase; -+ //ranfc_mtd->point -+ //ranfc_mtd->unpoint -+ ranfc_mtd->_read = ramtd_nand_read; -+ ranfc_mtd->_write = ramtd_nand_write; -+ ranfc_mtd->_read_oob = ramtd_nand_readoob; -+ ranfc_mtd->_write_oob = ramtd_nand_writeoob; -+ //ranfc_mtd->get_fact_prot_info; ranfc_mtd->read_fact_prot_reg; -+ //ranfc_mtd->get_user_prot_info; ranfc_mtd->read_user_prot_reg; -+ //ranfc_mtd->write_user_prot_reg; ranfc_mtd->lock_user_prot_reg; -+ //ranfc_mtd->writev; ranfc_mtd->sync; ranfc_mtd->lock; ranfc_mtd->unlock; ranfc_mtd->suspend; ranfc_mtd->resume; -+ ranfc_mtd->_block_isbad = ramtd_nand_block_isbad; -+ ranfc_mtd->_block_markbad = ramtd_nand_block_markbad; -+ //ranfc_mtd->reboot_notifier -+ //ranfc_mtd->ecc_stats; -+ // subpage_sht; -+ -+ //ranfc_mtd->get_device; ranfc_mtd->put_device -+ ranfc_mtd->priv = ra; -+ -+ ranfc_mtd->owner = THIS_MODULE; -+ ra->controller = &ra->hwcontrol; -+ mutex_init(ra->controller); -+ -+ printk("%s: alloc %x, at %p , btt(%p, %x), ranfc_mtd:%p\n", -+ __func__ , alloc_size, ra, ra->bbt, bbt_size, ranfc_mtd); -+ -+ ppdata.of_node = pdev->dev.of_node; -+ err = mtd_device_parse_register(ranfc_mtd, mtk_probe_types, -+ &ppdata, NULL, 0); -+ -+ return err; -+} -+ -+static int -+mtk_nand_remove(struct platform_device *pdev) -+{ -+ struct ra_nand_chip *ra; -+ -+ if (ranfc_mtd) { -+ ra = (struct ra_nand_chip *)ranfc_mtd->priv; -+ -+ /* Deregister partitions */ -+ //del_mtd_partitions(ranfc_mtd); -+ kfree(ra); -+ } -+ return 0; -+} -+ -+static const struct of_device_id mtk_nand_match[] = { -+ { .compatible = "mtk,mt7620-nand" }, -+ {}, -+}; -+MODULE_DEVICE_TABLE(of, mtk_nand_match); -+ -+static struct platform_driver mtk_nand_driver = { -+ .probe = mtk_nand_probe, -+ .remove = mtk_nand_remove, -+ .driver = { -+ .name = "mt7620_nand", -+ .owner = THIS_MODULE, -+ .of_match_table = mtk_nand_match, -+ }, -+}; -+ -+module_platform_driver(mtk_nand_driver); -+ -+ -+MODULE_LICENSE("GPL"); ---- /dev/null -+++ b/drivers/mtd/maps/ralink_nand.h -@@ -0,0 +1,232 @@ -+#ifndef RT2880_NAND_H -+#define RT2880_NAND_H -+ -+#include -+ -+//#include "gdma.h" -+ -+#define RALINK_SYSCTL_BASE 0xB0000000 -+#define RALINK_PIO_BASE 0xB0000600 -+#define RALINK_NAND_CTRL_BASE 0xB0000810 -+#define CONFIG_RALINK_MT7620 -+ -+#define SKIP_BAD_BLOCK -+//#define RANDOM_GEN_BAD_BLOCK -+ -+#define ra_inl(addr) (*(volatile unsigned int *)(addr)) -+#define ra_outl(addr, value) (*(volatile unsigned int *)(addr) = (value)) -+#define ra_aor(addr, a_mask, o_value) ra_outl(addr, (ra_inl(addr) & (a_mask)) | (o_value)) -+#define ra_and(addr, a_mask) ra_aor(addr, a_mask, 0) -+#define ra_or(addr, o_value) ra_aor(addr, -1, o_value) -+ -+ -+#define CONFIG_NUMCHIPS 1 -+#define CONFIG_NOT_SUPPORT_WP //rt3052 has no WP signal for chip. -+//#define CONFIG_NOT_SUPPORT_RB -+ -+extern int is_nand_page_2048; -+extern const unsigned int nand_size_map[2][3]; -+ -+//chip -+// chip geometry: SAMSUNG small size 32MB. -+#define CONFIG_CHIP_SIZE_BIT (nand_size_map[is_nand_page_2048][nand_addrlen-3]) //! (1<=32)? 31 : CONFIG_CHIP_SIZE_BIT)) -+//#define CFG_CHIPSIZE (1 << CONFIG_CHIP_SIZE_BIT) -+#define CFG_PAGESIZE (1 << CONFIG_PAGE_SIZE_BIT) -+#define CFG_BLOCKSIZE (CFG_PAGESIZE << CONFIG_NUMPAGE_PER_BLOCK_BIT) -+#define CFG_NUMPAGE (1 << (CONFIG_CHIP_SIZE_BIT - CONFIG_PAGE_SIZE_BIT)) -+#define CFG_NUMBLOCK (CFG_NUMPAGE >> CONFIG_NUMPAGE_PER_BLOCK_BIT) -+#define CFG_BLOCK_OOBSIZE (1 << (CONFIG_OOBSIZE_PER_PAGE_BIT + CONFIG_NUMPAGE_PER_BLOCK_BIT)) -+#define CFG_PAGE_OOBSIZE (1 << CONFIG_OOBSIZE_PER_PAGE_BIT) -+ -+#define NAND_BLOCK_ALIGN(addr) ((addr) & (CFG_BLOCKSIZE-1)) -+#define NAND_PAGE_ALIGN(addr) ((addr) & (CFG_PAGESIZE-1)) -+ -+ -+#define NFC_BASE RALINK_NAND_CTRL_BASE -+#define NFC_CTRL (NFC_BASE + 0x0) -+#define NFC_CONF (NFC_BASE + 0x4) -+#define NFC_CMD1 (NFC_BASE + 0x8) -+#define NFC_CMD2 (NFC_BASE + 0xc) -+#define NFC_CMD3 (NFC_BASE + 0x10) -+#define NFC_ADDR (NFC_BASE + 0x14) -+#define NFC_DATA (NFC_BASE + 0x18) -+#if defined (CONFIG_RALINK_RT6855) || defined (CONFIG_RALINK_RT6855A) || \ -+ defined (CONFIG_RALINK_MT7620) || defined (CONFIG_RALINK_MT7621) -+#define NFC_ECC (NFC_BASE + 0x30) -+#else -+#define NFC_ECC (NFC_BASE + 0x1c) -+#endif -+#define NFC_STATUS (NFC_BASE + 0x20) -+#define NFC_INT_EN (NFC_BASE + 0x24) -+#define NFC_INT_ST (NFC_BASE + 0x28) -+#if defined (CONFIG_RALINK_RT6855) || defined (CONFIG_RALINK_RT6855A) || \ -+ defined (CONFIG_RALINK_MT7620) || defined (CONFIG_RALINK_MT7621) -+#define NFC_CONF1 (NFC_BASE + 0x2c) -+#define NFC_ECC_P1 (NFC_BASE + 0x30) -+#define NFC_ECC_P2 (NFC_BASE + 0x34) -+#define NFC_ECC_P3 (NFC_BASE + 0x38) -+#define NFC_ECC_P4 (NFC_BASE + 0x3c) -+#define NFC_ECC_ERR1 (NFC_BASE + 0x40) -+#define NFC_ECC_ERR2 (NFC_BASE + 0x44) -+#define NFC_ECC_ERR3 (NFC_BASE + 0x48) -+#define NFC_ECC_ERR4 (NFC_BASE + 0x4c) -+#define NFC_ADDR2 (NFC_BASE + 0x50) -+#endif -+ -+enum _int_stat { -+ INT_ST_ND_DONE = 1<<0, -+ INT_ST_TX_BUF_RDY = 1<<1, -+ INT_ST_RX_BUF_RDY = 1<<2, -+ INT_ST_ECC_ERR = 1<<3, -+ INT_ST_TX_TRAS_ERR = 1<<4, -+ INT_ST_RX_TRAS_ERR = 1<<5, -+ INT_ST_TX_KICK_ERR = 1<<6, -+ INT_ST_RX_KICK_ERR = 1<<7 -+}; -+ -+ -+//#define WORKAROUND_RX_BUF_OV 1 -+ -+ -+/************************************************************* -+ * stolen from nand.h -+ *************************************************************/ -+ -+/* -+ * Standard NAND flash commands -+ */ -+#define NAND_CMD_READ0 0 -+#define NAND_CMD_READ1 1 -+#define NAND_CMD_RNDOUT 5 -+#define NAND_CMD_PAGEPROG 0x10 -+#define NAND_CMD_READOOB 0x50 -+#define NAND_CMD_ERASE1 0x60 -+#define NAND_CMD_STATUS 0x70 -+#define NAND_CMD_STATUS_MULTI 0x71 -+#define NAND_CMD_SEQIN 0x80 -+#define NAND_CMD_RNDIN 0x85 -+#define NAND_CMD_READID 0x90 -+#define NAND_CMD_ERASE2 0xd0 -+#define NAND_CMD_RESET 0xff -+ -+/* Extended commands for large page devices */ -+#define NAND_CMD_READSTART 0x30 -+#define NAND_CMD_RNDOUTSTART 0xE0 -+#define NAND_CMD_CACHEDPROG 0x15 -+ -+/* Extended commands for AG-AND device */ -+/* -+ * Note: the command for NAND_CMD_DEPLETE1 is really 0x00 but -+ * there is no way to distinguish that from NAND_CMD_READ0 -+ * until the remaining sequence of commands has been completed -+ * so add a high order bit and mask it off in the command. -+ */ -+#define NAND_CMD_DEPLETE1 0x100 -+#define NAND_CMD_DEPLETE2 0x38 -+#define NAND_CMD_STATUS_MULTI 0x71 -+#define NAND_CMD_STATUS_ERROR 0x72 -+/* multi-bank error status (banks 0-3) */ -+#define NAND_CMD_STATUS_ERROR0 0x73 -+#define NAND_CMD_STATUS_ERROR1 0x74 -+#define NAND_CMD_STATUS_ERROR2 0x75 -+#define NAND_CMD_STATUS_ERROR3 0x76 -+#define NAND_CMD_STATUS_RESET 0x7f -+#define NAND_CMD_STATUS_CLEAR 0xff -+ -+#define NAND_CMD_NONE -1 -+ -+/* Status bits */ -+#define NAND_STATUS_FAIL 0x01 -+#define NAND_STATUS_FAIL_N1 0x02 -+#define NAND_STATUS_TRUE_READY 0x20 -+#define NAND_STATUS_READY 0x40 -+#define NAND_STATUS_WP 0x80 -+ -+typedef enum { -+ FL_READY, -+ FL_READING, -+ FL_WRITING, -+ FL_ERASING, -+ FL_SYNCING, -+ FL_CACHEDPRG, -+ FL_PM_SUSPENDED, -+} nand_state_t; -+ -+/*************************************************************/ -+ -+ -+ -+typedef enum _ra_flags { -+ FLAG_NONE = 0, -+ FLAG_ECC_EN = (1<<0), -+ FLAG_USE_GDMA = (1<<1), -+ FLAG_VERIFY = (1<<2), -+} RA_FLAGS; -+ -+ -+#define BBTTAG_BITS 2 -+#define BBTTAG_BITS_MASK ((1< -Date: Tue, 3 Dec 2013 17:05:05 +0100 -Subject: [PATCH] DMA: add rt2880 dma engine - -Signed-off-by: John Crispin ---- - drivers/dma/Kconfig | 6 + - drivers/dma/Makefile | 1 + - drivers/dma/ralink-gdma.c | 596 +++++++++++++++++++++++++++++++++++++++++++++ - 3 files changed, 603 insertions(+) - create mode 100644 drivers/dma/ralink-gdma.c - ---- a/drivers/dma/Kconfig -+++ b/drivers/dma/Kconfig -@@ -312,6 +312,12 @@ config MMP_PDMA - help - Support the MMP PDMA engine for PXA and MMP platfrom. - -+config DMA_RALINK -+ tristate "RALINK DMA support" -+ depends on RALINK && SOC_MT7620 -+ select DMA_ENGINE -+ select DMA_VIRTUAL_CHANNELS -+ - config DMA_ENGINE - bool - ---- /dev/null -+++ b/drivers/dma/ralink-gdma.c -@@ -0,0 +1,577 @@ -+/* -+ * Copyright (C) 2013, Lars-Peter Clausen -+ * GDMA4740 DMAC support -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License as published by the -+ * Free Software Foundation; either version 2 of the License, or (at your -+ * option) any later version. -+ * -+ * You should have received a copy of the GNU General Public License along -+ * with this program; if not, write to the Free Software Foundation, Inc., -+ * 675 Mass Ave, Cambridge, MA 02139, USA. -+ * -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "virt-dma.h" -+ -+#define GDMA_NR_CHANS 16 -+ -+#define GDMA_REG_SRC_ADDR(x) (0x00 + (x) * 0x10) -+#define GDMA_REG_DST_ADDR(x) (0x04 + (x) * 0x10) -+ -+#define GDMA_REG_CTRL0(x) (0x08 + (x) * 0x10) -+#define GDMA_REG_CTRL0_TX_MASK 0xffff -+#define GDMA_REG_CTRL0_TX_SHIFT 16 -+#define GDMA_REG_CTRL0_CURR_MASK 0xff -+#define GDMA_REG_CTRL0_CURR_SHIFT 8 -+#define GDMA_REG_CTRL0_SRC_ADDR_FIXED BIT(7) -+#define GDMA_REG_CTRL0_DST_ADDR_FIXED BIT(6) -+#define GDMA_REG_CTRL0_BURST_MASK 0x7 -+#define GDMA_REG_CTRL0_BURST_SHIFT 3 -+#define GDMA_REG_CTRL0_DONE_INT BIT(2) -+#define GDMA_REG_CTRL0_ENABLE BIT(1) -+#define GDMA_REG_CTRL0_HW_MODE 0 -+ -+#define GDMA_REG_CTRL1(x) (0x0c + (x) * 0x10) -+#define GDMA_REG_CTRL1_SEG_MASK 0xf -+#define GDMA_REG_CTRL1_SEG_SHIFT 22 -+#define GDMA_REG_CTRL1_REQ_MASK 0x3f -+#define GDMA_REG_CTRL1_SRC_REQ_SHIFT 16 -+#define GDMA_REG_CTRL1_DST_REQ_SHIFT 8 -+#define GDMA_REG_CTRL1_CONTINOUS BIT(14) -+#define GDMA_REG_CTRL1_NEXT_MASK 0x1f -+#define GDMA_REG_CTRL1_NEXT_SHIFT 3 -+#define GDMA_REG_CTRL1_COHERENT BIT(2) -+#define GDMA_REG_CTRL1_FAIL BIT(1) -+#define GDMA_REG_CTRL1_MASK BIT(0) -+ -+#define GDMA_REG_UNMASK_INT 0x200 -+#define GDMA_REG_DONE_INT 0x204 -+ -+#define GDMA_REG_GCT 0x220 -+#define GDMA_REG_GCT_CHAN_MASK 0x3 -+#define GDMA_REG_GCT_CHAN_SHIFT 3 -+#define GDMA_REG_GCT_VER_MASK 0x3 -+#define GDMA_REG_GCT_VER_SHIFT 1 -+#define GDMA_REG_GCT_ARBIT_RR BIT(0) -+ -+enum gdma_dma_transfer_size { -+ GDMA_TRANSFER_SIZE_4BYTE = 0, -+ GDMA_TRANSFER_SIZE_8BYTE = 1, -+ GDMA_TRANSFER_SIZE_16BYTE = 2, -+ GDMA_TRANSFER_SIZE_32BYTE = 3, -+}; -+ -+struct gdma_dma_sg { -+ dma_addr_t addr; -+ unsigned int len; -+}; -+ -+struct gdma_dma_desc { -+ struct virt_dma_desc vdesc; -+ -+ enum dma_transfer_direction direction; -+ bool cyclic; -+ -+ unsigned int num_sgs; -+ struct gdma_dma_sg sg[]; -+}; -+ -+struct gdma_dmaengine_chan { -+ struct virt_dma_chan vchan; -+ unsigned int id; -+ -+ dma_addr_t fifo_addr; -+ unsigned int transfer_shift; -+ -+ struct gdma_dma_desc *desc; -+ unsigned int next_sg; -+}; -+ -+struct gdma_dma_dev { -+ struct dma_device ddev; -+ void __iomem *base; -+ struct clk *clk; -+ -+ struct gdma_dmaengine_chan chan[GDMA_NR_CHANS]; -+}; -+ -+static struct gdma_dma_dev *gdma_dma_chan_get_dev( -+ struct gdma_dmaengine_chan *chan) -+{ -+ return container_of(chan->vchan.chan.device, struct gdma_dma_dev, -+ ddev); -+} -+ -+static struct gdma_dmaengine_chan *to_gdma_dma_chan(struct dma_chan *c) -+{ -+ return container_of(c, struct gdma_dmaengine_chan, vchan.chan); -+} -+ -+static struct gdma_dma_desc *to_gdma_dma_desc(struct virt_dma_desc *vdesc) -+{ -+ return container_of(vdesc, struct gdma_dma_desc, vdesc); -+} -+ -+static inline uint32_t gdma_dma_read(struct gdma_dma_dev *dma_dev, -+ unsigned int reg) -+{ -+ return readl(dma_dev->base + reg); -+} -+ -+static inline void gdma_dma_write(struct gdma_dma_dev *dma_dev, -+ unsigned reg, uint32_t val) -+{ -+ //printk("gdma --> %p = 0x%08X\n", dma_dev->base + reg, val); -+ writel(val, dma_dev->base + reg); -+} -+ -+static inline void gdma_dma_write_mask(struct gdma_dma_dev *dma_dev, -+ unsigned int reg, uint32_t val, uint32_t mask) -+{ -+ uint32_t tmp; -+ -+ tmp = gdma_dma_read(dma_dev, reg); -+ tmp &= ~mask; -+ tmp |= val; -+ gdma_dma_write(dma_dev, reg, tmp); -+} -+ -+static struct gdma_dma_desc *gdma_dma_alloc_desc(unsigned int num_sgs) -+{ -+ return kzalloc(sizeof(struct gdma_dma_desc) + -+ sizeof(struct gdma_dma_sg) * num_sgs, GFP_ATOMIC); -+} -+ -+static enum gdma_dma_transfer_size gdma_dma_maxburst(u32 maxburst) -+{ -+ if (maxburst <= 7) -+ return GDMA_TRANSFER_SIZE_4BYTE; -+ else if (maxburst <= 15) -+ return GDMA_TRANSFER_SIZE_8BYTE; -+ else if (maxburst <= 31) -+ return GDMA_TRANSFER_SIZE_16BYTE; -+ -+ return GDMA_TRANSFER_SIZE_32BYTE; -+} -+ -+static int gdma_dma_slave_config(struct dma_chan *c, -+ const struct dma_slave_config *config) -+{ -+ struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c); -+ struct gdma_dma_dev *dma_dev = gdma_dma_chan_get_dev(chan); -+ enum gdma_dma_transfer_size transfer_size; -+ uint32_t flags; -+ uint32_t ctrl0, ctrl1; -+ -+ switch (config->direction) { -+ case DMA_MEM_TO_DEV: -+ ctrl1 = 32 << GDMA_REG_CTRL1_SRC_REQ_SHIFT; -+ ctrl1 |= config->slave_id << GDMA_REG_CTRL1_DST_REQ_SHIFT; -+ flags = GDMA_REG_CTRL0_DST_ADDR_FIXED; -+ transfer_size = gdma_dma_maxburst(config->dst_maxburst); -+ chan->fifo_addr = config->dst_addr; -+ break; -+ -+ case DMA_DEV_TO_MEM: -+ ctrl1 = config->slave_id << GDMA_REG_CTRL1_SRC_REQ_SHIFT; -+ ctrl1 |= 32 << GDMA_REG_CTRL1_DST_REQ_SHIFT; -+ flags = GDMA_REG_CTRL0_SRC_ADDR_FIXED; -+ transfer_size = gdma_dma_maxburst(config->src_maxburst); -+ chan->fifo_addr = config->src_addr; -+ break; -+ -+ default: -+ return -EINVAL; -+ } -+ -+ chan->transfer_shift = 1 + transfer_size; -+ -+ ctrl0 = flags | GDMA_REG_CTRL0_HW_MODE; -+ ctrl0 |= GDMA_REG_CTRL0_DONE_INT; -+ -+ ctrl1 &= ~(GDMA_REG_CTRL1_NEXT_MASK << GDMA_REG_CTRL1_NEXT_SHIFT); -+ ctrl1 |= chan->id << GDMA_REG_CTRL1_NEXT_SHIFT; -+ ctrl1 |= GDMA_REG_CTRL1_FAIL; -+ ctrl1 &= ~GDMA_REG_CTRL1_CONTINOUS; -+ gdma_dma_write(dma_dev, GDMA_REG_CTRL0(chan->id), ctrl0); -+ gdma_dma_write(dma_dev, GDMA_REG_CTRL1(chan->id), ctrl1); -+ -+ return 0; -+} -+ -+static int gdma_dma_terminate_all(struct dma_chan *c) -+{ -+ struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c); -+ struct gdma_dma_dev *dma_dev = gdma_dma_chan_get_dev(chan); -+ unsigned long flags; -+ LIST_HEAD(head); -+ -+ spin_lock_irqsave(&chan->vchan.lock, flags); -+ gdma_dma_write_mask(dma_dev, GDMA_REG_CTRL0(chan->id), 0, -+ GDMA_REG_CTRL0_ENABLE); -+ chan->desc = NULL; -+ vchan_get_all_descriptors(&chan->vchan, &head); -+ spin_unlock_irqrestore(&chan->vchan.lock, flags); -+ -+ vchan_dma_desc_free_list(&chan->vchan, &head); -+ -+ return 0; -+} -+ -+static int gdma_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, -+ unsigned long arg) -+{ -+ struct dma_slave_config *config = (struct dma_slave_config *)arg; -+ -+ switch (cmd) { -+ case DMA_SLAVE_CONFIG: -+ return gdma_dma_slave_config(chan, config); -+ case DMA_TERMINATE_ALL: -+ return gdma_dma_terminate_all(chan); -+ default: -+ return -ENOSYS; -+ } -+} -+ -+static int gdma_dma_start_transfer(struct gdma_dmaengine_chan *chan) -+{ -+ struct gdma_dma_dev *dma_dev = gdma_dma_chan_get_dev(chan); -+ dma_addr_t src_addr, dst_addr; -+ struct virt_dma_desc *vdesc; -+ struct gdma_dma_sg *sg; -+ -+ gdma_dma_write_mask(dma_dev, GDMA_REG_CTRL0(chan->id), 0, -+ GDMA_REG_CTRL0_ENABLE); -+ -+ if (!chan->desc) { -+ vdesc = vchan_next_desc(&chan->vchan); -+ if (!vdesc) -+ return 0; -+ chan->desc = to_gdma_dma_desc(vdesc); -+ chan->next_sg = 0; -+ } -+ -+ if (chan->next_sg == chan->desc->num_sgs) -+ chan->next_sg = 0; -+ -+ sg = &chan->desc->sg[chan->next_sg]; -+ -+ if (chan->desc->direction == DMA_MEM_TO_DEV) { -+ src_addr = sg->addr; -+ dst_addr = chan->fifo_addr; -+ } else { -+ src_addr = chan->fifo_addr; -+ dst_addr = sg->addr; -+ } -+ gdma_dma_write(dma_dev, GDMA_REG_SRC_ADDR(chan->id), src_addr); -+ gdma_dma_write(dma_dev, GDMA_REG_DST_ADDR(chan->id), dst_addr); -+ gdma_dma_write_mask(dma_dev, GDMA_REG_CTRL0(chan->id), -+ (sg->len << GDMA_REG_CTRL0_TX_SHIFT) | GDMA_REG_CTRL0_ENABLE, -+ GDMA_REG_CTRL0_TX_MASK << GDMA_REG_CTRL0_TX_SHIFT); -+ chan->next_sg++; -+ gdma_dma_write_mask(dma_dev, GDMA_REG_CTRL1(chan->id), 0, GDMA_REG_CTRL1_MASK); -+ -+ return 0; -+} -+ -+static void gdma_dma_chan_irq(struct gdma_dmaengine_chan *chan) -+{ -+ spin_lock(&chan->vchan.lock); -+ if (chan->desc) { -+ if (chan->desc && chan->desc->cyclic) { -+ vchan_cyclic_callback(&chan->desc->vdesc); -+ } else { -+ if (chan->next_sg == chan->desc->num_sgs) { -+ chan->desc = NULL; -+ vchan_cookie_complete(&chan->desc->vdesc); -+ } -+ } -+ } -+ gdma_dma_start_transfer(chan); -+ spin_unlock(&chan->vchan.lock); -+} -+ -+static irqreturn_t gdma_dma_irq(int irq, void *devid) -+{ -+ struct gdma_dma_dev *dma_dev = devid; -+ uint32_t unmask, done; -+ unsigned int i; -+ -+ unmask = gdma_dma_read(dma_dev, GDMA_REG_UNMASK_INT); -+ gdma_dma_write(dma_dev, GDMA_REG_UNMASK_INT, unmask); -+ done = gdma_dma_read(dma_dev, GDMA_REG_DONE_INT); -+ -+ for (i = 0; i < GDMA_NR_CHANS; ++i) -+ if (done & BIT(i)) -+ gdma_dma_chan_irq(&dma_dev->chan[i]); -+ gdma_dma_write(dma_dev, GDMA_REG_DONE_INT, done); -+ -+ return IRQ_HANDLED; -+} -+ -+static void gdma_dma_issue_pending(struct dma_chan *c) -+{ -+ struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c); -+ unsigned long flags; -+ -+ spin_lock_irqsave(&chan->vchan.lock, flags); -+ if (vchan_issue_pending(&chan->vchan) && !chan->desc) -+ gdma_dma_start_transfer(chan); -+ spin_unlock_irqrestore(&chan->vchan.lock, flags); -+} -+ -+static struct dma_async_tx_descriptor *gdma_dma_prep_slave_sg( -+ struct dma_chan *c, struct scatterlist *sgl, -+ unsigned int sg_len, enum dma_transfer_direction direction, -+ unsigned long flags, void *context) -+{ -+ struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c); -+ struct gdma_dma_desc *desc; -+ struct scatterlist *sg; -+ unsigned int i; -+ -+ desc = gdma_dma_alloc_desc(sg_len); -+ if (!desc) -+ return NULL; -+ -+ for_each_sg(sgl, sg, sg_len, i) { -+ desc->sg[i].addr = sg_dma_address(sg); -+ desc->sg[i].len = sg_dma_len(sg); -+ } -+ -+ desc->num_sgs = sg_len; -+ desc->direction = direction; -+ desc->cyclic = false; -+ -+ return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); -+} -+ -+static struct dma_async_tx_descriptor *gdma_dma_prep_dma_cyclic( -+ struct dma_chan *c, dma_addr_t buf_addr, size_t buf_len, -+ size_t period_len, enum dma_transfer_direction direction, -+ unsigned long flags, void *context) -+{ -+ struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c); -+ struct gdma_dma_desc *desc; -+ unsigned int num_periods, i; -+ -+ if (buf_len % period_len) -+ return NULL; -+ -+ num_periods = buf_len / period_len; -+ -+ desc = gdma_dma_alloc_desc(num_periods); -+ if (!desc) -+ return NULL; -+ -+ for (i = 0; i < num_periods; i++) { -+ desc->sg[i].addr = buf_addr; -+ desc->sg[i].len = period_len; -+ buf_addr += period_len; -+ } -+ -+ desc->num_sgs = num_periods; -+ desc->direction = direction; -+ desc->cyclic = true; -+ -+ return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); -+} -+ -+static size_t gdma_dma_desc_residue(struct gdma_dmaengine_chan *chan, -+ struct gdma_dma_desc *desc, unsigned int next_sg) -+{ -+ struct gdma_dma_dev *dma_dev = gdma_dma_chan_get_dev(chan); -+ unsigned int residue, count; -+ unsigned int i; -+ -+ residue = 0; -+ -+ for (i = next_sg; i < desc->num_sgs; i++) -+ residue += desc->sg[i].len; -+ -+ if (next_sg != 0) { -+ count = gdma_dma_read(dma_dev, GDMA_REG_CTRL0(chan->id)); -+ count >>= GDMA_REG_CTRL0_CURR_SHIFT; -+ count &= GDMA_REG_CTRL0_CURR_MASK; -+ residue += count << chan->transfer_shift; -+ } -+ -+ return residue; -+} -+ -+static enum dma_status gdma_dma_tx_status(struct dma_chan *c, -+ dma_cookie_t cookie, struct dma_tx_state *state) -+{ -+ struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c); -+ struct virt_dma_desc *vdesc; -+ enum dma_status status; -+ unsigned long flags; -+ -+ status = dma_cookie_status(c, cookie, state); -+ if (status == DMA_SUCCESS || !state) -+ return status; -+ -+ spin_lock_irqsave(&chan->vchan.lock, flags); -+ vdesc = vchan_find_desc(&chan->vchan, cookie); -+ if (cookie == chan->desc->vdesc.tx.cookie) { -+ state->residue = gdma_dma_desc_residue(chan, chan->desc, -+ chan->next_sg); -+ } else if (vdesc) { -+ state->residue = gdma_dma_desc_residue(chan, -+ to_gdma_dma_desc(vdesc), 0); -+ } else { -+ state->residue = 0; -+ } -+ spin_unlock_irqrestore(&chan->vchan.lock, flags); -+ -+ return status; -+} -+ -+static int gdma_dma_alloc_chan_resources(struct dma_chan *c) -+{ -+ return 0; -+} -+ -+static void gdma_dma_free_chan_resources(struct dma_chan *c) -+{ -+ vchan_free_chan_resources(to_virt_chan(c)); -+} -+ -+static void gdma_dma_desc_free(struct virt_dma_desc *vdesc) -+{ -+ kfree(container_of(vdesc, struct gdma_dma_desc, vdesc)); -+} -+ -+static struct dma_chan * -+of_dma_xlate_by_chan_id(struct of_phandle_args *dma_spec, -+ struct of_dma *ofdma) -+{ -+ struct gdma_dma_dev *dma_dev = ofdma->of_dma_data; -+ unsigned int request = dma_spec->args[0]; -+ -+ if (request >= GDMA_NR_CHANS) -+ return NULL; -+ -+ return dma_get_slave_channel(&(dma_dev->chan[request].vchan.chan)); -+} -+ -+static int gdma_dma_probe(struct platform_device *pdev) -+{ -+ struct gdma_dmaengine_chan *chan; -+ struct gdma_dma_dev *dma_dev; -+ struct dma_device *dd; -+ unsigned int i; -+ struct resource *res; -+ uint32_t gct; -+ int ret; -+ int irq; -+ -+ -+ dma_dev = devm_kzalloc(&pdev->dev, sizeof(*dma_dev), GFP_KERNEL); -+ if (!dma_dev) -+ return -EINVAL; -+ -+ dd = &dma_dev->ddev; -+ -+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); -+ dma_dev->base = devm_ioremap_resource(&pdev->dev, res); -+ if (IS_ERR(dma_dev->base)) -+ return PTR_ERR(dma_dev->base); -+ -+ dma_cap_set(DMA_SLAVE, dd->cap_mask); -+ dma_cap_set(DMA_CYCLIC, dd->cap_mask); -+ dd->device_alloc_chan_resources = gdma_dma_alloc_chan_resources; -+ dd->device_free_chan_resources = gdma_dma_free_chan_resources; -+ dd->device_tx_status = gdma_dma_tx_status; -+ dd->device_issue_pending = gdma_dma_issue_pending; -+ dd->device_prep_slave_sg = gdma_dma_prep_slave_sg; -+ dd->device_prep_dma_cyclic = gdma_dma_prep_dma_cyclic; -+ dd->device_control = gdma_dma_control; -+ dd->dev = &pdev->dev; -+ dd->chancnt = GDMA_NR_CHANS; -+ INIT_LIST_HEAD(&dd->channels); -+ -+ for (i = 0; i < dd->chancnt; i++) { -+ chan = &dma_dev->chan[i]; -+ chan->id = i; -+ chan->vchan.desc_free = gdma_dma_desc_free; -+ vchan_init(&chan->vchan, dd); -+ } -+ -+ ret = dma_async_device_register(dd); -+ if (ret) -+ return ret; -+ -+ ret = of_dma_controller_register(pdev->dev.of_node, -+ of_dma_xlate_by_chan_id, dma_dev); -+ if (ret) -+ goto err_unregister; -+ -+ irq = platform_get_irq(pdev, 0); -+ ret = request_irq(irq, gdma_dma_irq, 0, dev_name(&pdev->dev), dma_dev); -+ if (ret) -+ goto err_unregister; -+ -+ gdma_dma_write(dma_dev, GDMA_REG_UNMASK_INT, 0); -+ gdma_dma_write(dma_dev, GDMA_REG_DONE_INT, BIT(dd->chancnt) - 1); -+ -+ gct = gdma_dma_read(dma_dev, GDMA_REG_GCT); -+ dev_info(&pdev->dev, "revision: %d, channels: %d\n", -+ (gct >> GDMA_REG_GCT_VER_SHIFT) & GDMA_REG_GCT_VER_MASK, -+ 8 << ((gct >> GDMA_REG_GCT_CHAN_SHIFT) & GDMA_REG_GCT_CHAN_MASK)); -+ platform_set_drvdata(pdev, dma_dev); -+ -+ gdma_dma_write(dma_dev, GDMA_REG_GCT, GDMA_REG_GCT_ARBIT_RR); -+ -+ return 0; -+ -+err_unregister: -+ dma_async_device_unregister(dd); -+ return ret; -+} -+ -+static int gdma_dma_remove(struct platform_device *pdev) -+{ -+ struct gdma_dma_dev *dma_dev = platform_get_drvdata(pdev); -+ int irq = platform_get_irq(pdev, 0); -+ -+ free_irq(irq, dma_dev); -+ of_dma_controller_free(pdev->dev.of_node); -+ dma_async_device_unregister(&dma_dev->ddev); -+ -+ return 0; -+} -+ -+static const struct of_device_id gdma_of_match_table[] = { -+ { .compatible = "ralink,rt2880-gdma" }, -+ { }, -+}; -+ -+static struct platform_driver gdma_dma_driver = { -+ .probe = gdma_dma_probe, -+ .remove = gdma_dma_remove, -+ .driver = { -+ .name = "gdma-rt2880", -+ .owner = THIS_MODULE, -+ .of_match_table = gdma_of_match_table, -+ }, -+}; -+module_platform_driver(gdma_dma_driver); -+ -+MODULE_AUTHOR("Lars-Peter Clausen "); -+MODULE_DESCRIPTION("GDMA4740 DMA driver"); -+MODULE_LICENSE("GPLv2"); ---- a/drivers/dma/dmaengine.c -+++ b/drivers/dma/dmaengine.c -@@ -504,6 +504,32 @@ static struct dma_chan *private_candidat - } - - /** -+ * dma_request_slave_channel - try to get specific channel exclusively -+ * @chan: target channel -+ */ -+struct dma_chan *dma_get_slave_channel(struct dma_chan *chan) -+{ -+ int err = -EBUSY; -+ -+ /* lock against __dma_request_channel */ -+ mutex_lock(&dma_list_mutex); -+ -+ if (chan->client_count == 0) { -+ err = dma_chan_get(chan); -+ if (err) -+ pr_debug("%s: failed to get %s: (%d)\n", -+ __func__, dma_chan_name(chan), err); -+ } else -+ chan = NULL; -+ -+ mutex_unlock(&dma_list_mutex); -+ -+ return chan; -+} -+EXPORT_SYMBOL_GPL(dma_get_slave_channel); -+ -+ -+/** - * dma_request_channel - try to allocate an exclusive channel - * @mask: capabilities that the channel must satisfy - * @fn: optional callback to disposition available channels ---- a/include/linux/dmaengine.h -+++ b/include/linux/dmaengine.h -@@ -999,6 +999,7 @@ static inline void dma_release_channel(s - int dma_async_device_register(struct dma_device *device); - void dma_async_device_unregister(struct dma_device *device); - void dma_run_dependencies(struct dma_async_tx_descriptor *tx); -+struct dma_chan *dma_get_slave_channel(struct dma_chan *chan); - struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type); - struct dma_chan *net_dma_find_channel(void); - #define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y) ---- a/drivers/dma/Makefile -+++ b/drivers/dma/Makefile -@@ -38,3 +38,4 @@ obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o - obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o - obj-$(CONFIG_DMA_OMAP) += omap-dma.o - obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o -+obj-$(CONFIG_DMA_RALINK) += ralink-gdma.o diff --git a/target/linux/ramips/patches-3.10/0300-MIPS-OWRTDTB.patch b/target/linux/ramips/patches-3.10/0300-MIPS-OWRTDTB.patch new file mode 100644 index 0000000000..1a00dc2dd8 --- /dev/null +++ b/target/linux/ramips/patches-3.10/0300-MIPS-OWRTDTB.patch @@ -0,0 +1,52 @@ +From c174d2250e402399ad7dbdd57d51883d8804bba0 Mon Sep 17 00:00:00 2001 +From: John Crispin +Date: Mon, 15 Jul 2013 00:40:37 +0200 +Subject: [PATCH 31/33] owrt: MIPS: add OWRTDTB secion + +Signed-off-by: John Crispin +--- + arch/mips/kernel/head.S | 3 +++ + arch/mips/ralink/Makefile | 2 +- + arch/mips/ralink/of.c | 4 +++- + 3 files changed, 7 insertions(+), 2 deletions(-) + +--- a/arch/mips/kernel/head.S ++++ b/arch/mips/kernel/head.S +@@ -146,6 +146,9 @@ EXPORT(__image_cmdline) + .fill 0x400 + #endif /* CONFIG_IMAGE_CMDLINE_HACK */ + ++ .ascii "OWRTDTB:" ++ EXPORT(__image_dtb) ++ .fill 0x4000 + __REF + + NESTED(kernel_entry, 16, sp) # kernel entry point +--- a/arch/mips/ralink/Makefile ++++ b/arch/mips/ralink/Makefile +@@ -26,4 +26,4 @@ obj-$(CONFIG_EARLY_PRINTK) += early_prin + + obj-$(CONFIG_DEBUG_FS) += bootrom.o + +-obj-y += dts/ ++#obj-y += dts/ +--- a/arch/mips/ralink/of.c ++++ b/arch/mips/ralink/of.c +@@ -90,6 +90,8 @@ static int __init early_init_dt_find_mem + return 0; + } + ++extern struct boot_param_header __image_dtb; ++ + void __init plat_mem_setup(void) + { + set_io_port_base(KSEG1); +@@ -98,7 +100,7 @@ void __init plat_mem_setup(void) + * Load the builtin devicetree. This causes the chosen node to be + * parsed resulting in our memory appearing + */ +- __dt_setup_arch(&__dtb_start); ++ __dt_setup_arch(&__image_dtb); + + of_scan_flat_dt(early_init_dt_find_memory, NULL); + if (memory_dtb) diff --git a/target/linux/ramips/patches-3.10/0301-asoc-add-mt7620-support.patch b/target/linux/ramips/patches-3.10/0301-asoc-add-mt7620-support.patch deleted file mode 100644 index c85f4177a0..0000000000 --- a/target/linux/ramips/patches-3.10/0301-asoc-add-mt7620-support.patch +++ /dev/null @@ -1,711 +0,0 @@ -From c72bc41d018519de5d63ec7790965fbf4605276a Mon Sep 17 00:00:00 2001 -From: John Crispin -Date: Tue, 3 Dec 2013 20:18:13 +0100 -Subject: [PATCH] asoc: add mt7620 support - -Signed-off-by: John Crispin ---- - sound/soc/Kconfig | 1 + - sound/soc/Makefile | 1 + - sound/soc/ralink/Kconfig | 24 +++ - sound/soc/ralink/Makefile | 13 ++ - sound/soc/ralink/mt7620-i2s.c | 429 ++++++++++++++++++++++++++++++++++++++ - sound/soc/ralink/mt7620-pcm.c | 77 +++++++ - sound/soc/ralink/mt7620-wm8960.c | 124 +++++++++++ - 7 files changed, 669 insertions(+) - create mode 100644 sound/soc/ralink/Kconfig - create mode 100644 sound/soc/ralink/Makefile - create mode 100644 sound/soc/ralink/mt7620-i2s.c - create mode 100644 sound/soc/ralink/mt7620-pcm.c - create mode 100644 sound/soc/ralink/mt7620-wm8960.c - ---- a/sound/soc/Kconfig -+++ b/sound/soc/Kconfig -@@ -48,6 +48,7 @@ source "sound/soc/kirkwood/Kconfig" - source "sound/soc/mid-x86/Kconfig" - source "sound/soc/mxs/Kconfig" - source "sound/soc/pxa/Kconfig" -+source "sound/soc/ralink/Kconfig" - source "sound/soc/samsung/Kconfig" - source "sound/soc/s6000/Kconfig" - source "sound/soc/sh/Kconfig" ---- a/sound/soc/Makefile -+++ b/sound/soc/Makefile -@@ -26,6 +26,7 @@ obj-$(CONFIG_SND_SOC) += nuc900/ - obj-$(CONFIG_SND_SOC) += omap/ - obj-$(CONFIG_SND_SOC) += kirkwood/ - obj-$(CONFIG_SND_SOC) += pxa/ -+obj-$(CONFIG_SND_SOC) += ralink/ - obj-$(CONFIG_SND_SOC) += samsung/ - obj-$(CONFIG_SND_SOC) += s6000/ - obj-$(CONFIG_SND_SOC) += sh/ ---- /dev/null -+++ b/sound/soc/ralink/Kconfig -@@ -0,0 +1,15 @@ -+config SND_MT7620_SOC_I2S -+ depends on SOC_MT7620 && SND_SOC -+ select SND_SOC_GENERIC_DMAENGINE_PCM -+ tristate "SoC Audio (I2S protocol) for Ralink MT7620 SoC" -+ help -+ Say Y if you want to use I2S protocol and I2S codec on Ingenic MT7620 -+ based boards. -+ -+config SND_MT7620_SOC_WM8960 -+ tristate "SoC Audio support for Ralink WM8960" -+ select SND_MT7620_SOC_I2S -+ select SND_SOC_WM8960 -+ help -+ Say Y if you want to add support for ASoC audio on the Qi LB60 board -+ a.k.a Qi Ben NanoNote. ---- /dev/null -+++ b/sound/soc/ralink/Makefile -@@ -0,0 +1,11 @@ -+# -+# Jz4740 Platform Support -+# -+snd-soc-mt7620-i2s-objs := mt7620-i2s.o -+ -+obj-$(CONFIG_SND_MT7620_SOC_I2S) += snd-soc-mt7620-i2s.o -+ -+# Jz4740 Machine Support -+snd-soc-mt7620-wm8960-objs := mt7620-wm8960.o -+ -+obj-$(CONFIG_SND_MT7620_SOC_WM8960) += snd-soc-mt7620-wm8960.o ---- /dev/null -+++ b/sound/soc/ralink/mt7620-i2s.c -@@ -0,0 +1,466 @@ -+/* -+ * Copyright (C) 2010, Lars-Peter Clausen -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License as published by the -+ * Free Software Foundation; either version 2 of the License, or (at your -+ * option) any later version. -+ * -+ * You should have received a copy of the GNU General Public License along -+ * with this program; if not, write to the Free Software Foundation, Inc., -+ * 675 Mass Ave, Cambridge, MA 02139, USA. -+ * -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+ -+#include -+ -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+ -+#define I2S_REG_CFG0 0x00 -+#define I2S_REG_CFG0_EN BIT(31) -+#define I2S_REG_CFG0_DMA_EN BIT(30) -+#define I2S_REG_CFG0_BYTE_SWAP BIT(28) -+#define I2S_REG_CFG0_TX_EN BIT(24) -+#define I2S_REG_CFG0_RX_EN BIT(20) -+#define I2S_REG_CFG0_SLAVE BIT(16) -+#define I2S_REG_CFG0_RX_THRES 12 -+#define I2S_REG_CFG0_TX_THRES 4 -+#define I2S_REG_CFG0_DFT_THRES (4 << I2S_REG_CFG0_RX_THRES) | \ -+ (4 << I2S_REG_CFG0_TX_THRES) -+ -+#define I2S_REG_INT_STATUS 0x04 -+#define I2S_REG_INT_EN 0x08 -+#define I2S_REG_FF_STATUS 0x0c -+#define I2S_REG_WREG 0x10 -+#define I2S_REG_RREG 0x14 -+#define I2S_REG_CFG1 0x18 -+ -+#define I2S_REG_DIVCMP 0x20 -+#define I2S_REG_DIVINT 0x24 -+#define I2S_REG_CLK_EN BIT(31) -+ -+struct mt7620_i2s { -+ struct resource *mem; -+ void __iomem *base; -+ dma_addr_t phys_base; -+ -+ struct snd_dmaengine_dai_dma_data playback_dma_data; -+ struct snd_dmaengine_dai_dma_data capture_dma_data; -+}; -+ -+static inline uint32_t mt7620_i2s_read(const struct mt7620_i2s *i2s, -+ unsigned int reg) -+{ -+ return readl(i2s->base + reg); -+} -+ -+static inline void mt7620_i2s_write(const struct mt7620_i2s *i2s, -+ unsigned int reg, uint32_t value) -+{ -+ //printk("i2s --> %p = 0x%08X\n", i2s->base + reg, value); -+ writel(value, i2s->base + reg); -+} -+ -+static int mt7620_i2s_startup(struct snd_pcm_substream *substream, -+ struct snd_soc_dai *dai) -+{ -+ struct mt7620_i2s *i2s = snd_soc_dai_get_drvdata(dai); -+ uint32_t cfg; -+ -+ if (dai->active) -+ return 0; -+ -+ cfg = mt7620_i2s_read(i2s, I2S_REG_CFG0); -+ cfg |= I2S_REG_CFG0_EN; -+ mt7620_i2s_write(i2s, I2S_REG_CFG0, cfg); -+ -+ return 0; -+} -+ -+static void mt7620_i2s_shutdown(struct snd_pcm_substream *substream, -+ struct snd_soc_dai *dai) -+{ -+ struct mt7620_i2s *i2s = snd_soc_dai_get_drvdata(dai); -+ uint32_t cfg; -+ -+ if (dai->active) -+ return; -+ -+ cfg = mt7620_i2s_read(i2s, I2S_REG_CFG0); -+ cfg &= ~I2S_REG_CFG0_EN; -+ mt7620_i2s_write(i2s, I2S_REG_CFG0, cfg); -+} -+ -+static int mt7620_i2s_trigger(struct snd_pcm_substream *substream, int cmd, -+ struct snd_soc_dai *dai) -+{ -+ struct mt7620_i2s *i2s = snd_soc_dai_get_drvdata(dai); -+ -+ uint32_t cfg; -+ uint32_t mask; -+ -+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) -+ mask = I2S_REG_CFG0_TX_EN; -+ else -+ mask = I2S_REG_CFG0_RX_EN; -+ -+ cfg = mt7620_i2s_read(i2s, I2S_REG_CFG0); -+ -+ switch (cmd) { -+ case SNDRV_PCM_TRIGGER_START: -+ case SNDRV_PCM_TRIGGER_RESUME: -+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: -+ cfg |= mask; -+ break; -+ case SNDRV_PCM_TRIGGER_STOP: -+ case SNDRV_PCM_TRIGGER_SUSPEND: -+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH: -+ cfg &= ~mask; -+ break; -+ default: -+ return -EINVAL; -+ } -+ -+ if (cfg & (I2S_REG_CFG0_TX_EN | I2S_REG_CFG0_RX_EN)) -+ cfg |= I2S_REG_CFG0_DMA_EN; -+ else -+ cfg &= ~I2S_REG_CFG0_DMA_EN; -+ -+ mt7620_i2s_write(i2s, I2S_REG_CFG0, cfg); -+ -+ return 0; -+} -+ -+static int mt7620_i2s_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) -+{ -+ struct mt7620_i2s *i2s = snd_soc_dai_get_drvdata(dai); -+ uint32_t cfg; -+ -+ cfg = mt7620_i2s_read(i2s, I2S_REG_CFG0); -+ -+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { -+ case SND_SOC_DAIFMT_CBS_CFS: -+ cfg |= I2S_REG_CFG0_SLAVE; -+ break; -+ case SND_SOC_DAIFMT_CBM_CFM: -+ cfg &= ~I2S_REG_CFG0_SLAVE; -+ break; -+ case SND_SOC_DAIFMT_CBM_CFS: -+ default: -+ return -EINVAL; -+ } -+ -+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { -+ case SND_SOC_DAIFMT_I2S: -+ case SND_SOC_DAIFMT_MSB: -+ cfg &= ~I2S_REG_CFG0_BYTE_SWAP; -+ break; -+ case SND_SOC_DAIFMT_LSB: -+ cfg |= I2S_REG_CFG0_BYTE_SWAP; -+ break; -+ default: -+ return -EINVAL; -+ } -+ -+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) { -+ case SND_SOC_DAIFMT_NB_NF: -+ break; -+ default: -+ return -EINVAL; -+ } -+ -+ mt7620_i2s_write(i2s, I2S_REG_CFG0, cfg); -+ -+ return 0; -+} -+ -+static int mt7620_i2s_hw_params(struct snd_pcm_substream *substream, -+ struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) -+{ -+ -+ return 0; -+} -+ -+unsigned long i2sMaster_inclk_int[11] = { -+ 78, 56, 52, 39, 28, 26, 19, 14, 13, 9, 6}; -+unsigned long i2sMaster_inclk_comp[11] = { -+ 64, 352, 42, 32, 176, 21, 272, 88, 10, 455, 261}; -+ -+ -+static int mt7620_i2s_set_sysclk(struct snd_soc_dai *dai, int clk_id, -+ unsigned int freq, int dir) -+{ -+ struct mt7620_i2s *i2s = snd_soc_dai_get_drvdata(dai); -+ -+ printk("Internal REFCLK with fractional division\n"); -+ -+ mt7620_i2s_write(i2s, I2S_REG_DIVINT, i2sMaster_inclk_int[7]); -+ mt7620_i2s_write(i2s, I2S_REG_DIVCMP, -+ i2sMaster_inclk_comp[7] | I2S_REG_CLK_EN); -+ -+/* struct mt7620_i2s *i2s = snd_soc_dai_get_drvdata(dai); -+ struct clk *parent; -+ int ret = 0; -+ -+ switch (clk_id) { -+ case JZ4740_I2S_CLKSRC_EXT: -+ parent = clk_get(NULL, "ext"); -+ clk_set_parent(i2s->clk_i2s, parent); -+ break; -+ case JZ4740_I2S_CLKSRC_PLL: -+ parent = clk_get(NULL, "pll half"); -+ clk_set_parent(i2s->clk_i2s, parent); -+ ret = clk_set_rate(i2s->clk_i2s, freq); -+ break; -+ default: -+ return -EINVAL; -+ } -+ clk_put(parent); -+ -+ return ret;*/ -+ return 0; -+} -+ -+static int mt7620_i2s_suspend(struct snd_soc_dai *dai) -+{ -+ struct mt7620_i2s *i2s = snd_soc_dai_get_drvdata(dai); -+ uint32_t cfg; -+ -+ if (dai->active) { -+ cfg = mt7620_i2s_read(i2s, I2S_REG_CFG0); -+ cfg &= ~I2S_REG_CFG0_TX_EN; -+ mt7620_i2s_write(i2s, I2S_REG_CFG0, cfg); -+ } -+ -+ return 0; -+} -+ -+static int mt7620_i2s_resume(struct snd_soc_dai *dai) -+{ -+ struct mt7620_i2s *i2s = snd_soc_dai_get_drvdata(dai); -+ uint32_t cfg; -+ -+ if (dai->active) { -+ cfg = mt7620_i2s_read(i2s, I2S_REG_CFG0); -+ cfg |= I2S_REG_CFG0_TX_EN; -+ mt7620_i2s_write(i2s, I2S_REG_CFG0, cfg); -+ } -+ -+ return 0; -+} -+ -+static void mt7620_i2c_init_pcm_config(struct mt7620_i2s *i2s) -+{ -+ struct snd_dmaengine_dai_dma_data *dma_data; -+ -+ /* Playback */ -+ dma_data = &i2s->playback_dma_data; -+ dma_data->maxburst = 16; -+ dma_data->slave_id = 2; //JZ4740_DMA_TYPE_AIC_TRANSMIT; -+ dma_data->addr = i2s->phys_base + I2S_REG_WREG; -+ -+ /* Capture */ -+ dma_data = &i2s->capture_dma_data; -+ dma_data->maxburst = 16; -+ dma_data->slave_id = 3; //JZ4740_DMA_TYPE_AIC_RECEIVE; -+ dma_data->addr = i2s->phys_base + I2S_REG_RREG; -+} -+ -+static int mt7620_i2s_dai_probe(struct snd_soc_dai *dai) -+{ -+ struct mt7620_i2s *i2s = snd_soc_dai_get_drvdata(dai); -+ uint32_t data; -+ -+ mt7620_i2c_init_pcm_config(i2s); -+ dai->playback_dma_data = &i2s->playback_dma_data; -+ dai->capture_dma_data = &i2s->capture_dma_data; -+ -+ /* set share pins to i2s/gpio mode and i2c mode */ -+ data = rt_sysc_r32(0x60); -+ data &= 0xFFFFFFE2; -+ data |= 0x00000018; -+ rt_sysc_w32(data, 0x60); -+ -+ printk("Internal REFCLK with fractional division\n"); -+ -+ mt7620_i2s_write(i2s, I2S_REG_CFG0, I2S_REG_CFG0_DFT_THRES); -+ mt7620_i2s_write(i2s, I2S_REG_CFG1, 0); -+ mt7620_i2s_write(i2s, I2S_REG_INT_EN, 0); -+ -+ mt7620_i2s_write(i2s, I2S_REG_DIVINT, i2sMaster_inclk_int[7]); -+ mt7620_i2s_write(i2s, I2S_REG_DIVCMP, -+ i2sMaster_inclk_comp[7] | I2S_REG_CLK_EN); -+ -+ return 0; -+} -+ -+static int mt7620_i2s_dai_remove(struct snd_soc_dai *dai) -+{ -+ return 0; -+} -+ -+static const struct snd_soc_dai_ops mt7620_i2s_dai_ops = { -+ .startup = mt7620_i2s_startup, -+ .shutdown = mt7620_i2s_shutdown, -+ .trigger = mt7620_i2s_trigger, -+ .hw_params = mt7620_i2s_hw_params, -+ .set_fmt = mt7620_i2s_set_fmt, -+ .set_sysclk = mt7620_i2s_set_sysclk, -+}; -+ -+#define JZ4740_I2S_FMTS (SNDRV_PCM_FMTBIT_S8 | \ -+ SNDRV_PCM_FMTBIT_S16_LE) -+ -+static struct snd_soc_dai_driver mt7620_i2s_dai = { -+ .probe = mt7620_i2s_dai_probe, -+ .remove = mt7620_i2s_dai_remove, -+ .playback = { -+ .channels_min = 1, -+ .channels_max = 2, -+ .rates = SNDRV_PCM_RATE_8000_48000, -+ .formats = JZ4740_I2S_FMTS, -+ }, -+ .capture = { -+ .channels_min = 2, -+ .channels_max = 2, -+ .rates = SNDRV_PCM_RATE_8000_48000, -+ .formats = JZ4740_I2S_FMTS, -+ }, -+ .symmetric_rates = 1, -+ .ops = &mt7620_i2s_dai_ops, -+ .suspend = mt7620_i2s_suspend, -+ .resume = mt7620_i2s_resume, -+}; -+ -+static const struct snd_pcm_hardware mt7620_pcm_hardware = { -+ .info = SNDRV_PCM_INFO_MMAP | -+ SNDRV_PCM_INFO_MMAP_VALID | -+ SNDRV_PCM_INFO_INTERLEAVED | -+ SNDRV_PCM_INFO_BLOCK_TRANSFER, -+ .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S8, -+ .period_bytes_min = PAGE_SIZE, -+ .period_bytes_max = 64 * 1024, -+ .periods_min = 2, -+ .periods_max = 128, -+ .buffer_bytes_max = 128 * 1024, -+ .fifo_size = 32, -+}; -+ -+static const struct snd_dmaengine_pcm_config mt7620_dmaengine_pcm_config = { -+ .prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config, -+ .pcm_hardware = &mt7620_pcm_hardware, -+ .prealloc_buffer_size = 256 * PAGE_SIZE, -+}; -+ -+static const struct snd_soc_component_driver mt7620_i2s_component = { -+ .name = "mt7620-i2s", -+}; -+ -+static int mt7620_i2s_dev_probe(struct platform_device *pdev) -+{ -+ struct mt7620_i2s *i2s; -+ int ret; -+ -+ snd_dmaengine_pcm_register(&pdev->dev, -+ &mt7620_dmaengine_pcm_config, -+ SND_DMAENGINE_PCM_FLAG_COMPAT); -+ -+ i2s = kzalloc(sizeof(*i2s), GFP_KERNEL); -+ if (!i2s) -+ return -ENOMEM; -+ -+ i2s->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); -+ if (!i2s->mem) { -+ ret = -ENOENT; -+ goto err_free; -+ } -+ -+ i2s->mem = request_mem_region(i2s->mem->start, resource_size(i2s->mem), -+ pdev->name); -+ if (!i2s->mem) { -+ ret = -EBUSY; -+ goto err_free; -+ } -+ -+ i2s->base = ioremap_nocache(i2s->mem->start, resource_size(i2s->mem)); -+ if (!i2s->base) { -+ ret = -EBUSY; -+ goto err_release_mem_region; -+ } -+ -+ i2s->phys_base = i2s->mem->start; -+ -+ platform_set_drvdata(pdev, i2s); -+ ret = snd_soc_register_component(&pdev->dev, &mt7620_i2s_component, -+ &mt7620_i2s_dai, 1); -+ -+ if (!ret) { -+ dev_err(&pdev->dev, "loaded\n"); -+ return ret; -+ } -+ -+ dev_err(&pdev->dev, "Failed to register DAI\n"); -+ iounmap(i2s->base); -+ -+err_release_mem_region: -+ release_mem_region(i2s->mem->start, resource_size(i2s->mem)); -+err_free: -+ kfree(i2s); -+ -+ return ret; -+} -+ -+static int mt7620_i2s_dev_remove(struct platform_device *pdev) -+{ -+ struct mt7620_i2s *i2s = platform_get_drvdata(pdev); -+ -+ snd_soc_unregister_component(&pdev->dev); -+ -+ iounmap(i2s->base); -+ release_mem_region(i2s->mem->start, resource_size(i2s->mem)); -+ -+ kfree(i2s); -+ -+ snd_dmaengine_pcm_unregister(&pdev->dev); -+ -+ return 0; -+} -+ -+static const struct of_device_id mt7620_i2s_match[] = { -+ { .compatible = "ralink,mt7620a-i2s" }, -+ {}, -+}; -+MODULE_DEVICE_TABLE(of, mt7620_i2s_match); -+ -+static struct platform_driver mt7620_i2s_driver = { -+ .probe = mt7620_i2s_dev_probe, -+ .remove = mt7620_i2s_dev_remove, -+ .driver = { -+ .name = "mt7620-i2s", -+ .owner = THIS_MODULE, -+ .of_match_table = mt7620_i2s_match, -+ }, -+}; -+ -+module_platform_driver(mt7620_i2s_driver); -+ -+MODULE_AUTHOR("Lars-Peter Clausen, "); -+MODULE_DESCRIPTION("Ingenic JZ4740 SoC I2S driver"); -+MODULE_LICENSE("GPL"); -+MODULE_ALIAS("platform:mt7620-i2s"); ---- /dev/null -+++ b/sound/soc/ralink/mt7620-wm8960.c -@@ -0,0 +1,125 @@ -+/* -+ * Copyright (C) 2009, Lars-Peter Clausen -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * You should have received a copy of the GNU General Public License along -+ * with this program; if not, write to the Free Software Foundation, Inc., -+ * 675 Mass Ave, Cambridge, MA 02139, USA. -+ * -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+ -+static const struct snd_soc_dapm_widget mt7620_wm8960_widgets[] = { -+ SND_SOC_DAPM_SPK("Speaker", NULL), -+}; -+ -+static const struct snd_soc_dapm_route mt7620_wm8960_routes[] = { -+ {"Speaker", NULL, "HP_L"}, -+ {"Speaker", NULL, "HP_R"}, -+}; -+ -+#define MT7620_DAIFMT (SND_SOC_DAIFMT_I2S | \ -+ SND_SOC_DAIFMT_NB_NF | \ -+ SND_SOC_DAIFMT_CBM_CFM) -+ -+static int mt7620_wm8960_codec_init(struct snd_soc_pcm_runtime *rtd) -+{ -+ struct snd_soc_codec *codec = rtd->codec; -+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai; -+ struct snd_soc_dapm_context *dapm = &codec->dapm; -+ int ret; -+ -+ snd_soc_dapm_enable_pin(dapm, "HP_L"); -+ snd_soc_dapm_enable_pin(dapm, "HP_R"); -+ -+ ret = snd_soc_dai_set_fmt(cpu_dai, MT7620_DAIFMT); -+ if (ret < 0) { -+ dev_err(codec->dev, "Failed to set cpu dai format: %d\n", ret); -+ return ret; -+ } -+ -+ return 0; -+} -+ -+static struct snd_soc_dai_link mt7620_wm8960_dai = { -+ .name = "mt7620", -+ .stream_name = "mt7620", -+ .init = mt7620_wm8960_codec_init, -+ .codec_dai_name = "wm8960-hifi", -+}; -+ -+static struct snd_soc_card mt7620_wm8960 = { -+ .name = "mt7620-wm8960", -+ .owner = THIS_MODULE, -+ .dai_link = &mt7620_wm8960_dai, -+ .num_links = 1, -+ -+ .dapm_widgets = mt7620_wm8960_widgets, -+ .num_dapm_widgets = ARRAY_SIZE(mt7620_wm8960_widgets), -+ .dapm_routes = mt7620_wm8960_routes, -+ .num_dapm_routes = ARRAY_SIZE(mt7620_wm8960_routes), -+}; -+ -+static int mt7620_wm8960_probe(struct platform_device *pdev) -+{ -+ struct device_node *np = pdev->dev.of_node; -+ struct snd_soc_card *card = &mt7620_wm8960; -+ int ret; -+ -+ card->dev = &pdev->dev; -+ -+ mt7620_wm8960_dai.cpu_of_node = of_parse_phandle(np, "cpu-dai", 0); -+ mt7620_wm8960_dai.codec_of_node = of_parse_phandle(np, "codec-dai", 0); -+ mt7620_wm8960_dai.platform_of_node = mt7620_wm8960_dai.cpu_of_node; -+ -+ ret = snd_soc_register_card(card); -+ if (ret) { -+ dev_err(&pdev->dev, "snd_soc_register_card() failed: %d\n", -+ ret); -+ } -+ return ret; -+} -+ -+static int mt7620_wm8960_remove(struct platform_device *pdev) -+{ -+ struct snd_soc_card *card = platform_get_drvdata(pdev); -+ -+ snd_soc_unregister_card(card); -+ return 0; -+} -+ -+static const struct of_device_id mt7620_audio_match[] = { -+ { .compatible = "ralink,wm8960-audio" }, -+ {}, -+}; -+MODULE_DEVICE_TABLE(of, mt7620_audio_match); -+ -+static struct platform_driver mt7620_wm8960_driver = { -+ .driver = { -+ .name = "wm8960-audio", -+ .owner = THIS_MODULE, -+ .of_match_table = mt7620_audio_match, -+ }, -+ .probe = mt7620_wm8960_probe, -+ .remove = mt7620_wm8960_remove, -+}; -+ -+module_platform_driver(mt7620_wm8960_driver); -+ -+MODULE_AUTHOR("Lars-Peter Clausen "); -+MODULE_DESCRIPTION("ALSA SoC QI LB60 Audio support"); -+MODULE_LICENSE("GPL v2"); -+MODULE_ALIAS("platform:qi-lb60-audio"); ---- a/arch/mips/ralink/of.c -+++ b/arch/mips/ralink/of.c -@@ -15,6 +15,7 @@ - #include - #include - #include -+#include - #include - #include - -@@ -25,6 +26,7 @@ - #include "common.h" - - __iomem void *rt_sysc_membase; -+EXPORT_SYMBOL(rt_sysc_membase); - __iomem void *rt_memc_membase; - - extern struct boot_param_header __dtb_start; ---- a/sound/soc/soc-io.c -+++ b/sound/soc/soc-io.c -@@ -19,7 +19,6 @@ - - #include - --#ifdef CONFIG_REGMAP - static int hw_write(struct snd_soc_codec *codec, unsigned int reg, - unsigned int value) - { -@@ -161,12 +160,3 @@ int snd_soc_codec_set_cache_io(struct sn - return PTR_RET(codec->control_data); - } - EXPORT_SYMBOL_GPL(snd_soc_codec_set_cache_io); --#else --int snd_soc_codec_set_cache_io(struct snd_soc_codec *codec, -- int addr_bits, int data_bits, -- enum snd_soc_control_type control) --{ -- return -ENOTSUPP; --} --EXPORT_SYMBOL_GPL(snd_soc_codec_set_cache_io); --#endif diff --git a/target/linux/ramips/patches-3.10/0500-spi-mt7621.patch b/target/linux/ramips/patches-3.10/0500-spi-mt7621.patch deleted file mode 100644 index b54c6e96dd..0000000000 --- a/target/linux/ramips/patches-3.10/0500-spi-mt7621.patch +++ /dev/null @@ -1,334 +0,0 @@ ---- a/drivers/spi/spi-rt2880.c -+++ b/drivers/spi/spi-rt2880.c -@@ -21,8 +21,11 @@ - #include - #include - #include -+#include - #include - -+#include -+ - #define DRIVER_NAME "spi-rt2880" - /* only one slave is supported*/ - #define RALINK_NUM_CHIPSELECTS 1 -@@ -63,6 +66,25 @@ - /* SPIFIFOSTAT register bit field */ - #define SPIFIFOSTAT_TXFULL BIT(17) - -+#define MT7621_SPI_TRANS 0x00 -+#define SPITRANS_BUSY BIT(16) -+#define MT7621_SPI_OPCODE 0x04 -+#define MT7621_SPI_DATA0 0x08 -+#define SPI_CTL_TX_RX_CNT_MASK 0xff -+#define SPI_CTL_START BIT(8) -+#define MT7621_SPI_POLAR 0x38 -+#define MT7621_SPI_MASTER 0x28 -+#define MT7621_SPI_SPACE 0x3c -+ -+struct rt2880_spi; -+ -+struct rt2880_spi_ops { -+ void (*init_hw)(struct rt2880_spi *rs); -+ void (*set_cs)(struct rt2880_spi *rs, int enable); -+ int (*baudrate_set)(struct spi_device *spi, unsigned int speed); -+ unsigned int (*write_read)(struct spi_device *spi, struct list_head *list, struct spi_transfer *xfer); -+}; -+ - struct rt2880_spi { - struct spi_master *master; - void __iomem *base; -@@ -70,6 +92,8 @@ struct rt2880_spi { - unsigned int speed; - struct clk *clk; - spinlock_t lock; -+ -+ struct rt2880_spi_ops *ops; - }; - - static inline struct rt2880_spi *spidev_to_rt2880_spi(struct spi_device *spi) -@@ -149,6 +173,17 @@ static int rt2880_spi_baudrate_set(struc - return 0; - } - -+static int mt7621_spi_baudrate_set(struct spi_device *spi, unsigned int speed) -+{ -+/* u32 master = rt2880_spi_read(rs, MT7621_SPI_MASTER); -+ -+ // set default clock to hclk/5 -+ master &= ~(0xfff << 16); -+ master |= 0x3 << 16; -+*/ -+ return 0; -+} -+ - /* - * called only when no transfer is active on the bus - */ -@@ -164,7 +199,7 @@ rt2880_spi_setup_transfer(struct spi_dev - - if (rs->speed != speed) { - dev_dbg(&spi->dev, "speed_hz:%u\n", speed); -- rc = rt2880_spi_baudrate_set(spi, speed); -+ rc = rs->ops->baudrate_set(spi, speed); - if (rc) - return rc; - } -@@ -180,6 +215,17 @@ static void rt2880_spi_set_cs(struct rt2 - rt2880_spi_setbits(rs, RAMIPS_SPI_CTL, SPICTL_SPIENA); - } - -+static void mt7621_spi_set_cs(struct rt2880_spi *rs, int enable) -+{ -+ u32 polar = rt2880_spi_read(rs, MT7621_SPI_POLAR); -+ -+ if (enable) -+ polar |= 1; -+ else -+ polar &= ~1; -+ rt2880_spi_write(rs, MT7621_SPI_POLAR, polar); -+} -+ - static inline int rt2880_spi_wait_till_ready(struct rt2880_spi *rs) - { - int i; -@@ -198,8 +244,26 @@ static inline int rt2880_spi_wait_till_r - return -ETIMEDOUT; - } - -+static inline int mt7621_spi_wait_till_ready(struct rt2880_spi *rs) -+{ -+ int i; -+ -+ for (i = 0; i < RALINK_SPI_WAIT_MAX_LOOP; i++) { -+ u32 status; -+ -+ status = rt2880_spi_read(rs, MT7621_SPI_TRANS); -+ if ((status & SPITRANS_BUSY) == 0) { -+ return 0; -+ } -+ cpu_relax(); -+ udelay(1); -+ } -+ -+ return -ETIMEDOUT; -+} -+ - static unsigned int --rt2880_spi_write_read(struct spi_device *spi, struct spi_transfer *xfer) -+rt2880_spi_write_read(struct spi_device *spi, struct list_head *list, struct spi_transfer *xfer) - { - struct rt2880_spi *rs = spidev_to_rt2880_spi(spi); - unsigned count = 0; -@@ -239,6 +303,100 @@ out: - return count; - } - -+static unsigned int -+mt7621_spi_write_read(struct spi_device *spi, struct list_head *list, struct spi_transfer *xfer) -+{ -+ struct rt2880_spi *rs = spidev_to_rt2880_spi(spi); -+ struct spi_transfer *next = NULL; -+ const u8 *tx = xfer->tx_buf; -+ u8 *rx = NULL; -+ u32 trans; -+ int len = xfer->len; -+ -+ if (!tx) -+ return 0; -+ -+ if (!list_is_last(&xfer->transfer_list, list)) { -+ next = list_entry(xfer->transfer_list.next, struct spi_transfer, transfer_list); -+ rx = next->rx_buf; -+ } -+ -+ trans = rt2880_spi_read(rs, MT7621_SPI_TRANS); -+ trans &= ~SPI_CTL_TX_RX_CNT_MASK; -+ -+ if (tx) { -+ u32 data0 = 0, opcode = 0; -+ -+ switch (xfer->len) { -+ case 8: -+ data0 |= tx[7] << 24; -+ case 7: -+ data0 |= tx[6] << 16; -+ case 6: -+ data0 |= tx[5] << 8; -+ case 5: -+ data0 |= tx[4]; -+ case 4: -+ opcode |= tx[3] << 8; -+ case 3: -+ opcode |= tx[2] << 16; -+ case 2: -+ opcode |= tx[1] << 24; -+ case 1: -+ opcode |= tx[0]; -+ break; -+ -+ default: -+ dev_err(&spi->dev, "trying to write too many bytes: %d\n", next->len); -+ return -EINVAL; -+ } -+ -+ rt2880_spi_write(rs, MT7621_SPI_DATA0, data0); -+ rt2880_spi_write(rs, MT7621_SPI_OPCODE, opcode); -+ trans |= xfer->len; -+ } -+ -+ if (rx) -+ trans |= (next->len << 4); -+ rt2880_spi_write(rs, MT7621_SPI_TRANS, trans); -+ trans |= SPI_CTL_START; -+ rt2880_spi_write(rs, MT7621_SPI_TRANS, trans); -+ -+ mt7621_spi_wait_till_ready(rs); -+ -+ if (rx) { -+ u32 data0 = rt2880_spi_read(rs, MT7621_SPI_DATA0); -+ u32 opcode = rt2880_spi_read(rs, MT7621_SPI_OPCODE); -+ -+ switch (next->len) { -+ case 8: -+ rx[7] = (opcode >> 24) & 0xff; -+ case 7: -+ rx[6] = (opcode >> 16) & 0xff; -+ case 6: -+ rx[5] = (opcode >> 8) & 0xff; -+ case 5: -+ rx[4] = opcode & 0xff; -+ case 4: -+ rx[3] = (data0 >> 24) & 0xff; -+ case 3: -+ rx[2] = (data0 >> 16) & 0xff; -+ case 2: -+ rx[1] = (data0 >> 8) & 0xff; -+ case 1: -+ rx[0] = data0 & 0xff; -+ break; -+ -+ default: -+ dev_err(&spi->dev, "trying to read too many bytes: %d\n", next->len); -+ return -EINVAL; -+ } -+ len += next->len; -+ } -+ -+ return len; -+} -+ - static int rt2880_spi_transfer_one_message(struct spi_master *master, - struct spi_message *m) - { -@@ -280,25 +438,25 @@ static int rt2880_spi_transfer_one_messa - } - - if (!cs_active) { -- rt2880_spi_set_cs(rs, 1); -+ rs->ops->set_cs(rs, 1); - cs_active = 1; - } - - if (t->len) -- m->actual_length += rt2880_spi_write_read(spi, t); -+ m->actual_length += rs->ops->write_read(spi, &m->transfers, t); - - if (t->delay_usecs) - udelay(t->delay_usecs); - - if (t->cs_change) { -- rt2880_spi_set_cs(rs, 0); -+ rs->ops->set_cs(rs, 0); - cs_active = 0; - } - } - - msg_done: - if (cs_active) -- rt2880_spi_set_cs(rs, 0); -+ rs->ops->set_cs(rs, 0); - - m->status = status; - spi_finalize_current_message(master); -@@ -334,8 +492,41 @@ static void rt2880_spi_reset(struct rt28 - rt2880_spi_write(rs, RAMIPS_SPI_CTL, SPICTL_HIZSDO | SPICTL_SPIENA); - } - -+static void mt7621_spi_reset(struct rt2880_spi *rs) -+{ -+ u32 master = rt2880_spi_read(rs, MT7621_SPI_MASTER); -+ -+ master &= ~(0xfff << 16); -+ master |= 3 << 16; -+ -+ master |= 7 << 29; -+ rt2880_spi_write(rs, MT7621_SPI_MASTER, master); -+} -+ -+static struct rt2880_spi_ops spi_ops[] = { -+ { -+ .init_hw = rt2880_spi_reset, -+ .set_cs = rt2880_spi_set_cs, -+ .baudrate_set = rt2880_spi_baudrate_set, -+ .write_read = rt2880_spi_write_read, -+ }, { -+ .init_hw = mt7621_spi_reset, -+ .set_cs = mt7621_spi_set_cs, -+ .baudrate_set = mt7621_spi_baudrate_set, -+ .write_read = mt7621_spi_write_read, -+ }, -+}; -+ -+static const struct of_device_id rt2880_spi_match[] = { -+ { .compatible = "ralink,rt2880-spi", .data = &spi_ops[0]}, -+ { .compatible = "ralink,mt7621-spi", .data = &spi_ops[1] }, -+ {}, -+}; -+MODULE_DEVICE_TABLE(of, rt2880_spi_match); -+ - static int rt2880_spi_probe(struct platform_device *pdev) - { -+ const struct of_device_id *match; - struct spi_master *master; - struct rt2880_spi *rs; - unsigned long flags; -@@ -344,6 +535,10 @@ static int rt2880_spi_probe(struct platf - int status = 0; - struct clk *clk; - -+ match = of_match_device(rt2880_spi_match, &pdev->dev); -+ if (!match) -+ return -EINVAL; -+ - r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - base = devm_ioremap_resource(&pdev->dev, r); - if (IS_ERR(base)) -@@ -382,12 +577,13 @@ static int rt2880_spi_probe(struct platf - rs->clk = clk; - rs->master = master; - rs->sys_freq = clk_get_rate(rs->clk); -+ rs->ops = (struct rt2880_spi_ops *) match->data; - dev_dbg(&pdev->dev, "sys_freq: %u\n", rs->sys_freq); - spin_lock_irqsave(&rs->lock, flags); - - device_reset(&pdev->dev); - -- rt2880_spi_reset(rs); -+ rs->ops->init_hw(rs); - - return spi_register_master(master); - } -@@ -408,12 +604,6 @@ static int rt2880_spi_remove(struct plat - - MODULE_ALIAS("platform:" DRIVER_NAME); - --static const struct of_device_id rt2880_spi_match[] = { -- { .compatible = "ralink,rt2880-spi" }, -- {}, --}; --MODULE_DEVICE_TABLE(of, rt2880_spi_match); -- - static struct platform_driver rt2880_spi_driver = { - .driver = { - .name = DRIVER_NAME, diff --git a/target/linux/ramips/patches-3.10/0501-MIPS-increase-GIC_INTR_MAX.patch b/target/linux/ramips/patches-3.10/0501-MIPS-increase-GIC_INTR_MAX.patch deleted file mode 100644 index 35ac5ed572..0000000000 --- a/target/linux/ramips/patches-3.10/0501-MIPS-increase-GIC_INTR_MAX.patch +++ /dev/null @@ -1,21 +0,0 @@ -From e5327a1c6969316370af5cae7cfe6b8163178575 Mon Sep 17 00:00:00 2001 -From: John Crispin -Date: Mon, 2 Dec 2013 16:07:23 +0100 -Subject: [PATCH 500/507] MIPS: increase GIC_INTR_MAX - -Signed-off-by: John Crispin ---- - arch/mips/include/asm/gic.h | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/arch/mips/include/asm/gic.h -+++ b/arch/mips/include/asm/gic.h -@@ -19,7 +19,7 @@ - #define GIC_TRIG_EDGE 1 - #define GIC_TRIG_LEVEL 0 - --#define GIC_NUM_INTRS (24 + NR_CPUS * 2) -+#define GIC_NUM_INTRS (56 + NR_CPUS * 2) - - #define MSK(n) ((1 << (n)) - 1) - #define REG32(addr) (*(volatile unsigned int *) (addr)) diff --git a/target/linux/ramips/patches-3.10/0502-MIPS-ralink-add-MT7621-support.patch b/target/linux/ramips/patches-3.10/0502-MIPS-ralink-add-MT7621-support.patch deleted file mode 100644 index 83a48f37c1..0000000000 --- a/target/linux/ramips/patches-3.10/0502-MIPS-ralink-add-MT7621-support.patch +++ /dev/null @@ -1,669 +0,0 @@ -From 99342a0481d49b6e1ade90fdb02f597cb75f103f Mon Sep 17 00:00:00 2001 -From: John Crispin -Date: Mon, 2 Dec 2013 16:11:09 +0100 -Subject: [PATCH 502/507] MIPS: ralink: add MT7621 support - -Signed-off-by: John Crispin ---- - arch/mips/include/asm/mach-ralink/mt7621.h | 39 +++++ - arch/mips/ralink/Kconfig | 18 ++ - arch/mips/ralink/Makefile | 7 +- - arch/mips/ralink/Platform | 5 + - arch/mips/ralink/irq-gic.c | 255 ++++++++++++++++++++++++++++ - arch/mips/ralink/malta-amon.c | 81 +++++++++ - arch/mips/ralink/mt7621.c | 186 ++++++++++++++++++++ - 7 files changed, 590 insertions(+), 1 deletion(-) - create mode 100644 arch/mips/include/asm/mach-ralink/mt7621.h - create mode 100644 arch/mips/ralink/irq-gic.c - create mode 100644 arch/mips/ralink/malta-amon.c - create mode 100644 arch/mips/ralink/mt7621.c - ---- /dev/null -+++ b/arch/mips/include/asm/mach-ralink/mt7621.h -@@ -0,0 +1,39 @@ -+/* -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation. -+ * -+ * Parts of this file are based on Ralink's 2.6.21 BSP -+ * -+ * Copyright (C) 2008-2011 Gabor Juhos -+ * Copyright (C) 2008 Imre Kaloz -+ * Copyright (C) 2013 John Crispin -+ */ -+ -+#ifndef _MT7621_REGS_H_ -+#define _MT7621_REGS_H_ -+ -+#define MT7621_SYSC_BASE 0x1E000000 -+ -+#define SYSC_REG_CHIP_NAME0 0x00 -+#define SYSC_REG_CHIP_NAME1 0x04 -+#define SYSC_REG_CHIP_REV 0x0c -+#define SYSC_REG_SYSTEM_CONFIG0 0x10 -+#define SYSC_REG_SYSTEM_CONFIG1 0x14 -+ -+#define CHIP_REV_PKG_MASK 0x1 -+#define CHIP_REV_PKG_SHIFT 16 -+#define CHIP_REV_VER_MASK 0xf -+#define CHIP_REV_VER_SHIFT 8 -+#define CHIP_REV_ECO_MASK 0xf -+ -+#define MT7621_DRAM_BASE 0x0 -+#define MT7621_DDR2_SIZE_MIN 32 -+#define MT7621_DDR2_SIZE_MAX 256 -+ -+#define MT7621_CHIP_NAME0 0x3637544D -+#define MT7621_CHIP_NAME1 0x20203132 -+ -+#define MIPS_GIC_IRQ_BASE (MIPS_CPU_IRQ_BASE + 8) -+ -+#endif ---- a/arch/mips/ralink/Kconfig -+++ b/arch/mips/ralink/Kconfig -@@ -1,5 +1,10 @@ - if RALINK - -+config IRQ_INTC -+ bool -+ default y -+ depends on !SOC_MT7621 -+ - config CLKEVT_RT3352 - bool "Systick Clockevent source" - depends on SOC_RT305X || SOC_MT7620 -@@ -35,6 +40,15 @@ choice - select USB_ARCH_HAS_EHCI - select HW_HAS_PCI - -+ config SOC_MT7621 -+ bool "MT7621" -+ select MIPS_CPU_SCACHE -+ select SYS_SUPPORTS_MULTITHREADING -+ select SYS_SUPPORTS_SMP -+ select SYS_SUPPORTS_MIPS_CMP -+ select IRQ_GIC -+ select HW_HAS_PCI -+ - endchoice - - choice -@@ -62,6 +76,10 @@ choice - bool "MT7620A eval kit" - depends on SOC_MT7620 - -+ config DTB_MT7621_EVAL -+ bool "MT7621 eval kit" -+ depends on SOC_MT7621 -+ - endchoice - - endif ---- a/arch/mips/ralink/Makefile -+++ b/arch/mips/ralink/Makefile -@@ -6,7 +6,11 @@ - # Copyright (C) 2009-2011 Gabor Juhos - # Copyright (C) 2013 John Crispin - --obj-y := prom.o of.o reset.o clk.o irq.o timer.o -+obj-y := prom.o of.o reset.o clk.o timer.o -+ -+obj-$(CONFIG_IRQ_INTC) += irq.o -+obj-$(CONFIG_IRQ_GIC) += irq-gic.o -+obj-$(CONFIG_MIPS_MT_SMP) += malta-amon.o - - obj-$(CONFIG_CLKEVT_RT3352) += cevt-rt3352.o - -@@ -16,6 +20,7 @@ obj-$(CONFIG_SOC_RT288X) += rt288x.o - obj-$(CONFIG_SOC_RT305X) += rt305x.o - obj-$(CONFIG_SOC_RT3883) += rt3883.o - obj-$(CONFIG_SOC_MT7620) += mt7620.o -+obj-$(CONFIG_SOC_MT7621) += mt7621.o - - obj-$(CONFIG_EARLY_PRINTK) += early_printk.o - ---- a/arch/mips/ralink/Platform -+++ b/arch/mips/ralink/Platform -@@ -26,3 +26,10 @@ cflags-$(CONFIG_SOC_RT3883) += -I$(srctr - # Ralink MT7620 - # - load-$(CONFIG_SOC_MT7620) += 0xffffffff80000000 -+cflags-$(CONFIG_SOC_MT7620) += -I$(srctree)/arch/mips/include/asm/mach-ralink/mt7620 -+ -+# -+# Ralink MT7621 -+# -+load-$(CONFIG_SOC_MT7621) += 0xffffffff80001000 -+cflags-$(CONFIG_SOC_MT7620) += -I$(srctree)/arch/mips/include/asm/mach-ralink/mt7621 ---- /dev/null -+++ b/arch/mips/ralink/irq-gic.c -@@ -0,0 +1,255 @@ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+#include -+ -+#include -+#include -+ -+#include -+#include -+ -+#include -+ -+static unsigned long _gcmp_base; -+static int gic_resched_int_base = 56; -+static int gic_call_int_base = 60; -+static struct irq_chip *irq_gic; -+ -+#define GIC_RESCHED_INT(cpu) (gic_resched_int_base+(cpu)) -+#define GIC_CALL_INT(cpu) (gic_call_int_base+(cpu)) -+ -+static struct gic_intr_map gic_intr_map[GIC_NUM_INTRS] = { -+ { 0, GIC_CPU_INT0, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT }, //0 -+ { 0, GIC_CPU_INT0, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT }, -+ { GIC_UNUSED }, -+ { 0, GIC_CPU_INT3, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT }, //FE -+ { 0, GIC_CPU_INT4, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT }, //PCIE0 -+ { GIC_UNUSED}, -+ { 0, GIC_CPU_INT0, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT }, -+ { 0, GIC_CPU_INT0, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT }, -+ { 0, GIC_CPU_INT0, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT }, -+ { 0, GIC_CPU_INT0, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT }, //10 -+ { 0, GIC_CPU_INT0, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT }, -+ { 0, GIC_CPU_INT0, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT }, -+ { 0, GIC_CPU_INT0, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT }, -+ { GIC_UNUSED }, -+ { GIC_UNUSED }, -+ { 0, GIC_CPU_INT0, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT }, -+ { 0, GIC_CPU_INT0, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT }, -+ { 0, GIC_CPU_INT0, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT }, -+ { 0, GIC_CPU_INT0, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT }, -+ { 0, GIC_CPU_INT0, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT }, //20 -+ { 0, GIC_CPU_INT0, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT }, -+ { 0, GIC_CPU_INT0, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT }, -+ { GIC_UNUSED }, -+ { 0, GIC_CPU_INT0, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT }, -+ { 0, GIC_CPU_INT0, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT }, //25 -+ { 0, GIC_CPU_INT0, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT }, -+ { 0, GIC_CPU_INT0, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT }, -+ { 0, GIC_CPU_INT0, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT }, -+ { 0, GIC_CPU_INT0, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT }, -+ { 0, GIC_CPU_INT0, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT },//30 -+ { 0, GIC_CPU_INT0, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT }, -+}; -+ -+static struct gic_intr_map ipi_intr_map[8] = { -+ { 0, GIC_CPU_INT1, GIC_POL_POS, GIC_TRIG_EDGE, GIC_FLAG_IPI }, -+ { 1, GIC_CPU_INT1, GIC_POL_POS, GIC_TRIG_EDGE, GIC_FLAG_IPI }, -+ { 2, GIC_CPU_INT1, GIC_POL_POS, GIC_TRIG_EDGE, GIC_FLAG_IPI }, -+ { 3, GIC_CPU_INT1, GIC_POL_POS, GIC_TRIG_EDGE, GIC_FLAG_IPI }, -+ { 0, GIC_CPU_INT2, GIC_POL_POS, GIC_TRIG_EDGE, GIC_FLAG_IPI }, -+ { 1, GIC_CPU_INT2, GIC_POL_POS, GIC_TRIG_EDGE, GIC_FLAG_IPI }, -+ { 2, GIC_CPU_INT2, GIC_POL_POS, GIC_TRIG_EDGE, GIC_FLAG_IPI }, -+ { 3, GIC_CPU_INT2, GIC_POL_POS, GIC_TRIG_EDGE, GIC_FLAG_IPI }, -+}; -+ -+static irqreturn_t -+ipi_resched_interrupt(int irq, void *dev_id) -+{ -+ scheduler_ipi(); -+ -+ return IRQ_HANDLED; -+} -+ -+static irqreturn_t -+ipi_call_interrupt(int irq, void *dev_id) -+{ -+ smp_call_function_interrupt(); -+ -+ return IRQ_HANDLED; -+} -+ -+static struct irqaction irq_resched = { -+ .handler = ipi_resched_interrupt, -+ .flags = IRQF_DISABLED|IRQF_PERCPU, -+ .name = "ipi resched" -+}; -+ -+static struct irqaction irq_call = { -+ .handler = ipi_call_interrupt, -+ .flags = IRQF_DISABLED|IRQF_PERCPU, -+ .name = "ipi call" -+}; -+ -+void -+gic_irq_ack(struct irq_data *d) -+{ -+ int irq = (d->irq - gic_irq_base); -+ -+ GIC_CLR_INTR_MASK(irq); -+ -+ if (gic_irq_flags[irq] & GIC_TRIG_EDGE) -+ GICWRITE(GIC_REG(SHARED, GIC_SH_WEDGE), irq); -+} -+ -+void -+gic_finish_irq(struct irq_data *d) -+{ -+ GIC_SET_INTR_MASK(d->irq - gic_irq_base); -+} -+ -+void __init -+gic_platform_init(int irqs, struct irq_chip *irq_controller) -+{ -+ irq_gic = irq_controller; -+} -+ -+static void -+vi_gic_irqdispatch(void) -+{ -+ int irq = gic_get_int(); -+ -+ if (irq >= 0) -+ do_IRQ(MIPS_GIC_IRQ_BASE + irq); -+} -+ -+static void -+vi_timer_irqdispatch(void) -+{ -+ do_IRQ(cp0_compare_irq); -+} -+ -+unsigned int -+plat_ipi_call_int_xlate(unsigned int cpu) -+{ -+ return GIC_CALL_INT(cpu); -+} -+ -+unsigned int -+plat_ipi_resched_int_xlate(unsigned int cpu) -+{ -+ return GIC_RESCHED_INT(cpu); -+} -+ -+asmlinkage void -+plat_irq_dispatch(void) -+{ -+ unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM; -+ -+ if (pending & CAUSEF_IP7) -+ do_IRQ(cp0_compare_irq); -+ else if (pending & (CAUSEF_IP4 | CAUSEF_IP3)) -+ vi_gic_irqdispatch(); -+ else -+ spurious_interrupt(); -+} -+ -+unsigned int __cpuinit -+get_c0_compare_int(void) -+{ -+ return CP0_LEGACY_COMPARE_IRQ; -+} -+ -+static int -+gic_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) -+{ -+ irq_set_chip_and_handler(irq, irq_gic, handle_percpu_irq); -+ -+ return 0; -+} -+ -+static const struct irq_domain_ops irq_domain_ops = { -+ .xlate = irq_domain_xlate_onecell, -+ .map = gic_map, -+}; -+ -+static int __init -+of_gic_init(struct device_node *node, -+ struct device_node *parent) -+{ -+ struct irq_domain *domain; -+ struct resource gcmp = { 0 }, gic = { 0 }; -+ unsigned int gic_rev; -+ int i; -+ -+ if (of_address_to_resource(node, 0, &gic)) -+ panic("Failed to get gic memory range"); -+ if (request_mem_region(gic.start, resource_size(&gic), -+ gic.name) < 0) -+ panic("Failed to request gic memory"); -+ if (of_address_to_resource(node, 2, &gcmp)) -+ panic("Failed to get gic memory range"); -+ if (request_mem_region(gcmp.start, resource_size(&gcmp), -+ gcmp.name) < 0) -+ panic("Failed to request gcmp memory"); -+ -+ _gcmp_base = (unsigned long) ioremap_nocache(gcmp.start, resource_size(&gcmp)); -+ if (!_gcmp_base) -+ panic("Failed to remap gcmp memory\n"); -+ -+ if ((GCMPGCB(GCMPB) & GCMP_GCB_GCMPB_GCMPBASE_MSK) != gcmp.start) -+ panic("Failed to find gcmp core\n"); -+ -+ /* tell the gcmp where to find the gic */ -+ GCMPGCB(GICBA) = gic.start | GCMP_GCB_GICBA_EN_MSK; -+ gic_present = 1; -+ if (cpu_has_vint) { -+ set_vi_handler(3, vi_gic_irqdispatch); -+ set_vi_handler(4, vi_gic_irqdispatch); -+ set_vi_handler(7, vi_timer_irqdispatch); -+ } -+ -+ memcpy(&gic_intr_map[gic_resched_int_base], ipi_intr_map, sizeof(ipi_intr_map)); -+ gic_init(gic.start, resource_size(&gic), gic_intr_map, -+ ARRAY_SIZE(gic_intr_map), MIPS_GIC_IRQ_BASE); -+ -+ GICREAD(GIC_REG(SHARED, GIC_SH_REVISIONID), gic_rev); -+ pr_info("gic: revision %d.%d\n", (gic_rev >> 8) & 0xff, gic_rev & 0xff); -+ -+ domain = irq_domain_add_legacy(node, GIC_NUM_INTRS, MIPS_GIC_IRQ_BASE, -+ 0, &irq_domain_ops, NULL); -+ if (!domain) -+ panic("Failed to add irqdomain"); -+ -+ for (i = 0; i < NR_CPUS; i++) { -+ setup_irq(MIPS_GIC_IRQ_BASE + GIC_RESCHED_INT(i), &irq_resched); -+ setup_irq(MIPS_GIC_IRQ_BASE + GIC_CALL_INT(i), &irq_call); -+ } -+ -+ change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 | STATUSF_IP6 | -+ STATUSF_IP7); -+ return 0; -+} -+ -+static struct of_device_id __initdata of_irq_ids[] = { -+ { .compatible = "mti,cpu-interrupt-controller", .data = mips_cpu_intc_init }, -+ { .compatible = "ralink,mt7621-gic", .data = of_gic_init }, -+ {}, -+}; -+ -+void __init -+arch_init_irq(void) -+{ -+ of_irq_init(of_irq_ids); -+} ---- /dev/null -+++ b/arch/mips/ralink/malta-amon.c -@@ -0,0 +1,81 @@ -+/* -+ * Copyright (C) 2007 MIPS Technologies, Inc. -+ * All rights reserved. -+ -+ * This program is free software; you can distribute it and/or modify it -+ * under the terms of the GNU General Public License (Version 2) as -+ * published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful, but WITHOUT -+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -+ * for more details. -+ * -+ * You should have received a copy of the GNU General Public License along -+ * with this program; if not, write to the Free Software Foundation, Inc., -+ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. -+ * -+ * Arbitrary Monitor interface -+ */ -+ -+#include -+#include -+#include -+ -+#include -+#include -+#include -+ -+int amon_cpu_avail(int cpu) -+{ -+ struct cpulaunch *launch = (struct cpulaunch *)CKSEG0ADDR(CPULAUNCH); -+ -+ if (cpu < 0 || cpu >= NCPULAUNCH) { -+ pr_debug("avail: cpu%d is out of range\n", cpu); -+ return 0; -+ } -+ -+ launch += cpu; -+ if (!(launch->flags & LAUNCH_FREADY)) { -+ pr_debug("avail: cpu%d is not ready\n", cpu); -+ return 0; -+ } -+ if (launch->flags & (LAUNCH_FGO|LAUNCH_FGONE)) { -+ pr_debug("avail: too late.. cpu%d is already gone\n", cpu); -+ return 0; -+ } -+ -+ return 1; -+} -+ -+void amon_cpu_start(int cpu, -+ unsigned long pc, unsigned long sp, -+ unsigned long gp, unsigned long a0) -+{ -+ volatile struct cpulaunch *launch = -+ (struct cpulaunch *)CKSEG0ADDR(CPULAUNCH); -+ -+ if (!amon_cpu_avail(cpu)) -+ return; -+ if (cpu == smp_processor_id()) { -+ pr_debug("launch: I am cpu%d!\n", cpu); -+ return; -+ } -+ launch += cpu; -+ -+ pr_debug("launch: starting cpu%d\n", cpu); -+ -+ launch->pc = pc; -+ launch->gp = gp; -+ launch->sp = sp; -+ launch->a0 = a0; -+ -+ smp_wmb(); /* Target must see parameters before go */ -+ launch->flags |= LAUNCH_FGO; -+ smp_wmb(); /* Target must see go before we poll */ -+ -+ while ((launch->flags & LAUNCH_FGONE) == 0) -+ ; -+ smp_rmb(); /* Target will be updating flags soon */ -+ pr_debug("launch: cpu%d gone!\n", cpu); -+} ---- /dev/null -+++ b/arch/mips/ralink/mt7621.c -@@ -0,0 +1,186 @@ -+/* -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation. -+ * -+ * Parts of this file are based on Ralink's 2.6.21 BSP -+ * -+ * Copyright (C) 2008-2011 Gabor Juhos -+ * Copyright (C) 2008 Imre Kaloz -+ * Copyright (C) 2013 John Crispin -+ */ -+ -+#include -+#include -+#include -+#include -+ -+#include -+#include -+#include -+ -+#include -+ -+#include "common.h" -+ -+#define SYSC_REG_SYSCFG 0x10 -+#define SYSC_REG_CPLL_CLKCFG0 0x2c -+#define SYSC_REG_CUR_CLK_STS 0x44 -+#define CPU_CLK_SEL (BIT(30) | BIT(31)) -+ -+#define MT7621_GPIO_MODE_UART1 1 -+#define MT7621_GPIO_MODE_I2C 2 -+#define MT7621_GPIO_MODE_UART2 3 -+#define MT7621_GPIO_MODE_UART3 5 -+#define MT7621_GPIO_MODE_JTAG 7 -+#define MT7621_GPIO_MODE_WDT_MASK 0x3 -+#define MT7621_GPIO_MODE_WDT_SHIFT 8 -+#define MT7621_GPIO_MODE_WDT_GPIO 1 -+#define MT7621_GPIO_MODE_PCIE_RST 0 -+#define MT7621_GPIO_MODE_PCIE_REF 2 -+#define MT7621_GPIO_MODE_PCIE_MASK 0x3 -+#define MT7621_GPIO_MODE_PCIE_SHIFT 10 -+#define MT7621_GPIO_MODE_PCIE_GPIO 1 -+#define MT7621_GPIO_MODE_MDIO 12 -+#define MT7621_GPIO_MODE_RGMII1 14 -+#define MT7621_GPIO_MODE_RGMII2 15 -+#define MT7621_GPIO_MODE_SPI_MASK 0x3 -+#define MT7621_GPIO_MODE_SPI_SHIFT 16 -+#define MT7621_GPIO_MODE_SPI_GPIO 1 -+#define MT7621_GPIO_MODE_SDHCI_MASK 0x3 -+#define MT7621_GPIO_MODE_SDHCI_SHIFT 18 -+#define MT7621_GPIO_MODE_SDHCI_GPIO 1 -+ -+static struct rt2880_pmx_func uart1_grp[] = { FUNC("uart1", 0, 1, 2) }; -+static struct rt2880_pmx_func i2c_grp[] = { FUNC("i2c", 0, 3, 2) }; -+static struct rt2880_pmx_func uart3_grp[] = { FUNC("uart3", 0, 5, 4) }; -+static struct rt2880_pmx_func uart2_grp[] = { FUNC("uart2", 0, 9, 4) }; -+static struct rt2880_pmx_func jtag_grp[] = { FUNC("jtag", 0, 13, 5) }; -+static struct rt2880_pmx_func wdt_grp[] = { -+ FUNC("wdt rst", 0, 18, 1), -+ FUNC("wdt refclk", 2, 18, 1), -+}; -+static struct rt2880_pmx_func pcie_rst_grp[] = { -+ FUNC("pcie rst", MT7621_GPIO_MODE_PCIE_RST, 19, 1), -+ FUNC("pcie refclk", MT7621_GPIO_MODE_PCIE_REF, 19, 1) -+}; -+static struct rt2880_pmx_func mdio_grp[] = { FUNC("mdio", 0, 20, 2) }; -+static struct rt2880_pmx_func rgmii2_grp[] = { FUNC("rgmii2", 0, 22, 12) }; -+static struct rt2880_pmx_func spi_grp[] = { -+ FUNC("spi", 0, 34, 7), -+ FUNC("nand", 2, 34, 8), -+}; -+static struct rt2880_pmx_func sdhci_grp[] = { -+ FUNC("sdhci", 0, 41, 8), -+ FUNC("nand", 2, 41, 8), -+}; -+static struct rt2880_pmx_func rgmii1_grp[] = { FUNC("rgmii1", 0, 49, 12) }; -+ -+static struct rt2880_pmx_group mt7621_pinmux_data[] = { -+ GRP("uart1", uart1_grp, 1, MT7621_GPIO_MODE_UART1), -+ GRP("i2c", i2c_grp, 1, MT7621_GPIO_MODE_I2C), -+ GRP("uart3", uart2_grp, 1, MT7621_GPIO_MODE_UART2), -+ GRP("uart2", uart3_grp, 1, MT7621_GPIO_MODE_UART3), -+ GRP("jtag", jtag_grp, 1, MT7621_GPIO_MODE_JTAG), -+ GRP_G("wdt", wdt_grp, MT7621_GPIO_MODE_WDT_MASK, -+ MT7621_GPIO_MODE_WDT_GPIO, MT7621_GPIO_MODE_WDT_SHIFT), -+ GRP_G("pcie", pcie_rst_grp, MT7621_GPIO_MODE_PCIE_MASK, -+ MT7621_GPIO_MODE_PCIE_GPIO, MT7621_GPIO_MODE_PCIE_SHIFT), -+ GRP("mdio", mdio_grp, 1, MT7621_GPIO_MODE_MDIO), -+ GRP("rgmii2", rgmii2_grp, 1, MT7621_GPIO_MODE_RGMII2), -+ GRP_G("spi", spi_grp, MT7621_GPIO_MODE_SPI_MASK, -+ MT7621_GPIO_MODE_SPI_GPIO, MT7621_GPIO_MODE_SPI_SHIFT), -+ GRP_G("sdhci", sdhci_grp, MT7621_GPIO_MODE_SDHCI_MASK, -+ MT7621_GPIO_MODE_SDHCI_GPIO, MT7621_GPIO_MODE_SDHCI_SHIFT), -+ GRP("rgmii1", rgmii1_grp, 1, MT7621_GPIO_MODE_RGMII1), -+ { 0 } -+}; -+ -+void __init ralink_clk_init(void) -+{ -+ int cpu_fdiv = 0; -+ int cpu_ffrac = 0; -+ int fbdiv = 0; -+ u32 clk_sts, syscfg; -+ u8 clk_sel = 0, xtal_mode; -+ u32 cpu_clk; -+ -+ if ((rt_sysc_r32(SYSC_REG_CPLL_CLKCFG0) & CPU_CLK_SEL) != 0) -+ clk_sel = 1; -+ -+ switch (clk_sel) { -+ case 0: -+ clk_sts = rt_sysc_r32(SYSC_REG_CUR_CLK_STS); -+ cpu_fdiv = ((clk_sts >> 8) & 0x1F); -+ cpu_ffrac = (clk_sts & 0x1F); -+ cpu_clk = (500 * cpu_ffrac / cpu_fdiv) * 1000 * 1000; -+ printk("%s:%s[%d]\n", __FILE__, __func__, __LINE__); -+ break; -+ -+ case 1: -+ fbdiv = ((rt_sysc_r32(0x648) >> 4) & 0x7F) + 1; -+ syscfg = rt_sysc_r32(SYSC_REG_SYSCFG); -+ xtal_mode = (syscfg >> 6) & 0x7; -+ printk("%s:%s[%d]\n", __FILE__, __func__, __LINE__); -+ if(xtal_mode >= 6) { //25Mhz Xtal -+ printk("%s:%s[%d]\n", __FILE__, __func__, __LINE__); -+ cpu_clk = 25 * fbdiv * 1000 * 1000; -+ } else if(xtal_mode >=3) { //40Mhz Xtal -+ printk("%s:%s[%d]\n", __FILE__, __func__, __LINE__); -+ cpu_clk = 40 * fbdiv * 1000 * 1000; -+ } else { // 20Mhz Xtal -+ printk("%s:%s[%d]\n", __FILE__, __func__, __LINE__); -+ cpu_clk = 20 * fbdiv * 1000 * 1000; -+ } -+ break; -+ } -+ cpu_clk = 880000000; -+ ralink_clk_add("cpu", cpu_clk); -+ ralink_clk_add("1e000b00.spi", 50000000); -+ ralink_clk_add("1e000c00.uartlite", 50000000); -+} -+ -+void __init ralink_of_remap(void) -+{ -+ rt_sysc_membase = plat_of_remap_node("mtk,mt7621-sysc"); -+ rt_memc_membase = plat_of_remap_node("mtk,mt7621-memc"); -+ -+ if (!rt_sysc_membase || !rt_memc_membase) -+ panic("Failed to remap core resources"); -+} -+ -+void prom_soc_init(struct ralink_soc_info *soc_info) -+{ -+ void __iomem *sysc = (void __iomem *) KSEG1ADDR(MT7621_SYSC_BASE); -+ unsigned char *name = NULL; -+ u32 n0; -+ u32 n1; -+ u32 rev; -+ -+ n0 = __raw_readl(sysc + SYSC_REG_CHIP_NAME0); -+ n1 = __raw_readl(sysc + SYSC_REG_CHIP_NAME1); -+ -+ if (n0 == MT7621_CHIP_NAME0 && n1 == MT7621_CHIP_NAME1) { -+ name = "MT7621"; -+ soc_info->compatible = "mtk,mt7621-soc"; -+ } else { -+ panic("mt7621: unknown SoC, n0:%08x n1:%08x\n", n0, n1); -+ } -+ -+ rev = __raw_readl(sysc + SYSC_REG_CHIP_REV); -+ -+ snprintf(soc_info->sys_type, RAMIPS_SYS_TYPE_LEN, -+ "Mediatek %s ver:%u eco:%u", -+ name, -+ (rev >> CHIP_REV_VER_SHIFT) & CHIP_REV_VER_MASK, -+ (rev & CHIP_REV_ECO_MASK)); -+ -+ soc_info->mem_size_min = MT7621_DDR2_SIZE_MIN; -+ soc_info->mem_size_max = MT7621_DDR2_SIZE_MAX; -+ soc_info->mem_base = MT7621_DRAM_BASE; -+ -+ rt2880_pinmux_data = mt7621_pinmux_data; -+ -+ if (register_cmp_smp_ops()) -+ panic("failed to register_vsmp_smp_ops()"); -+} diff --git a/target/linux/ramips/patches-3.10/0503-MIPS-ralink-add-MT7621-early_printk-support.patch b/target/linux/ramips/patches-3.10/0503-MIPS-ralink-add-MT7621-early_printk-support.patch deleted file mode 100644 index 0da9480804..0000000000 --- a/target/linux/ramips/patches-3.10/0503-MIPS-ralink-add-MT7621-early_printk-support.patch +++ /dev/null @@ -1,37 +0,0 @@ -From 643e61b22155cd95ae6e18e57da50acd120da091 Mon Sep 17 00:00:00 2001 -From: John Crispin -Date: Mon, 2 Dec 2013 16:11:33 +0100 -Subject: [PATCH 503/507] MIPS: ralink: add MT7621 early_printk support - -Signed-off-by: John Crispin ---- - arch/mips/ralink/early_printk.c | 10 +++++++--- - 1 file changed, 7 insertions(+), 3 deletions(-) - ---- a/arch/mips/ralink/early_printk.c -+++ b/arch/mips/ralink/early_printk.c -@@ -13,6 +13,8 @@ - - #ifdef CONFIG_SOC_RT288X - #define EARLY_UART_BASE 0x300c00 -+#elif defined(CONFIG_SOC_MT7621) -+#define EARLY_UART_BASE 0x1E000c00 - #else - #define EARLY_UART_BASE 0x10000c00 - #endif -@@ -40,9 +42,13 @@ static inline u32 uart_r32(unsigned reg) - - void prom_putchar(unsigned char ch) - { -- while ((uart_r32(UART_REG_LSR) & UART_LSR_THRE) == 0) -+#ifdef CONFIG_SOC_MT7621 -+ uart_w32(ch, UART_TX); -+ while ((uart_r32(0x14) & UART_LSR_THRE) == 0) - ; -- uart_w32(ch, UART_REG_TX); -+#else - while ((uart_r32(UART_REG_LSR) & UART_LSR_THRE) == 0) - ; -+ uart_w32(ch, UART_REG_TX); -+#endif - } diff --git a/target/linux/ramips/patches-3.10/0504-MIPS-ralink-add-pcie-driver.patch b/target/linux/ramips/patches-3.10/0504-MIPS-ralink-add-pcie-driver.patch deleted file mode 100644 index 3ca3c9dbaf..0000000000 --- a/target/linux/ramips/patches-3.10/0504-MIPS-ralink-add-pcie-driver.patch +++ /dev/null @@ -1,822 +0,0 @@ -From 50216a5b7b3cc269043e7123db4bea262e35364e Mon Sep 17 00:00:00 2001 -From: John Crispin -Date: Mon, 2 Dec 2013 16:13:40 +0100 -Subject: [PATCH 504/507] MIPS: ralink: add pcie driver - -Signed-off-by: John Crispin ---- - arch/mips/pci/Makefile | 1 + - arch/mips/pci/pci-mt7621.c | 797 ++++++++++++++++++++++++++++++++++++++++++++ - 2 files changed, 798 insertions(+) - create mode 100644 arch/mips/pci/pci-mt7621.c - ---- a/arch/mips/pci/Makefile -+++ b/arch/mips/pci/Makefile -@@ -44,6 +44,7 @@ obj-$(CONFIG_PCI_LANTIQ) += pci-lantiq.o - obj-$(CONFIG_SOC_MT7620) += pci-mt7620a.o - obj-$(CONFIG_SOC_RT2880) += pci-rt2880.o - obj-$(CONFIG_SOC_RT3883) += pci-rt3883.o -+obj-$(CONFIG_SOC_MT7621) += pci-mt7621.o - obj-$(CONFIG_TANBAC_TB0219) += fixup-tb0219.o - obj-$(CONFIG_TANBAC_TB0226) += fixup-tb0226.o - obj-$(CONFIG_TANBAC_TB0287) += fixup-tb0287.o ---- /dev/null -+++ b/arch/mips/pci/pci-mt7621.c -@@ -0,0 +1,797 @@ -+/************************************************************************** -+ * -+ * BRIEF MODULE DESCRIPTION -+ * PCI init for Ralink RT2880 solution -+ * -+ * Copyright 2007 Ralink Inc. (bruce_chang@ralinktech.com.tw) -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License as published by the -+ * Free Software Foundation; either version 2 of the License, or (at your -+ * option) any later version. -+ * -+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED -+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN -+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, -+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT -+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF -+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF -+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ * -+ * You should have received a copy of the GNU General Public License along -+ * with this program; if not, write to the Free Software Foundation, Inc., -+ * 675 Mass Ave, Cambridge, MA 02139, USA. -+ * -+ * -+ ************************************************************************** -+ * May 2007 Bruce Chang -+ * Initial Release -+ * -+ * May 2009 Bruce Chang -+ * support RT2880/RT3883 PCIe -+ * -+ * May 2011 Bruce Chang -+ * support RT6855/MT7620 PCIe -+ * -+ ************************************************************************** -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+//#include -+#include -+#include -+#include -+//#include -+ -+#include -+ -+extern void pcie_phy_init(void); -+extern void chk_phy_pll(void); -+ -+/* -+ * These functions and structures provide the BIOS scan and mapping of the PCI -+ * devices. -+ */ -+ -+#define CONFIG_PCIE_PORT0 -+#define CONFIG_PCIE_PORT1 -+#define CONFIG_PCIE_PORT2 -+#define RALINK_PCIE0_CLK_EN (1<<24) -+#define RALINK_PCIE1_CLK_EN (1<<25) -+#define RALINK_PCIE2_CLK_EN (1<<26) -+ -+#define RALINK_PCI_CONFIG_ADDR 0x20 -+#define RALINK_PCI_CONFIG_DATA_VIRTUAL_REG 0x24 -+#define SURFBOARDINT_PCIE0 12 /* PCIE0 */ -+#define RALINK_INT_PCIE0 SURFBOARDINT_PCIE0 -+#define RALINK_INT_PCIE1 SURFBOARDINT_PCIE1 -+#define RALINK_INT_PCIE2 SURFBOARDINT_PCIE2 -+#define SURFBOARDINT_PCIE1 32 /* PCIE1 */ -+#define SURFBOARDINT_PCIE2 33 /* PCIE2 */ -+#define RALINK_PCI_MEMBASE *(volatile u32 *)(RALINK_PCI_BASE + 0x0028) -+#define RALINK_PCI_IOBASE *(volatile u32 *)(RALINK_PCI_BASE + 0x002C) -+#define RALINK_PCIE0_RST (1<<24) -+#define RALINK_PCIE1_RST (1<<25) -+#define RALINK_PCIE2_RST (1<<26) -+#define RALINK_SYSCTL_BASE 0xBE000000 -+ -+#define RALINK_PCI_PCICFG_ADDR *(volatile u32 *)(RALINK_PCI_BASE + 0x0000) -+#define RALINK_PCI_PCIMSK_ADDR *(volatile u32 *)(RALINK_PCI_BASE + 0x000C) -+#define RALINK_PCI_BASE 0xBE140000 -+ -+#define RALINK_PCIEPHY_P0P1_CTL_OFFSET (RALINK_PCI_BASE + 0x9000) -+#define RT6855_PCIE0_OFFSET 0x2000 -+#define RT6855_PCIE1_OFFSET 0x3000 -+#define RT6855_PCIE2_OFFSET 0x4000 -+ -+#define RALINK_PCI0_BAR0SETUP_ADDR *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE0_OFFSET + 0x0010) -+#define RALINK_PCI0_IMBASEBAR0_ADDR *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE0_OFFSET + 0x0018) -+#define RALINK_PCI0_ID *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE0_OFFSET + 0x0030) -+#define RALINK_PCI0_CLASS *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE0_OFFSET + 0x0034) -+#define RALINK_PCI0_SUBID *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE0_OFFSET + 0x0038) -+#define RALINK_PCI0_STATUS *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE0_OFFSET + 0x0050) -+#define RALINK_PCI0_DERR *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE0_OFFSET + 0x0060) -+#define RALINK_PCI0_ECRC *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE0_OFFSET + 0x0064) -+ -+#define RALINK_PCI1_BAR0SETUP_ADDR *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE1_OFFSET + 0x0010) -+#define RALINK_PCI1_IMBASEBAR0_ADDR *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE1_OFFSET + 0x0018) -+#define RALINK_PCI1_ID *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE1_OFFSET + 0x0030) -+#define RALINK_PCI1_CLASS *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE1_OFFSET + 0x0034) -+#define RALINK_PCI1_SUBID *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE1_OFFSET + 0x0038) -+#define RALINK_PCI1_STATUS *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE1_OFFSET + 0x0050) -+#define RALINK_PCI1_DERR *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE1_OFFSET + 0x0060) -+#define RALINK_PCI1_ECRC *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE1_OFFSET + 0x0064) -+ -+#define RALINK_PCI2_BAR0SETUP_ADDR *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE2_OFFSET + 0x0010) -+#define RALINK_PCI2_IMBASEBAR0_ADDR *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE2_OFFSET + 0x0018) -+#define RALINK_PCI2_ID *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE2_OFFSET + 0x0030) -+#define RALINK_PCI2_CLASS *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE2_OFFSET + 0x0034) -+#define RALINK_PCI2_SUBID *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE2_OFFSET + 0x0038) -+#define RALINK_PCI2_STATUS *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE2_OFFSET + 0x0050) -+#define RALINK_PCI2_DERR *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE2_OFFSET + 0x0060) -+#define RALINK_PCI2_ECRC *(volatile u32 *)(RALINK_PCI_BASE + RT6855_PCIE2_OFFSET + 0x0064) -+ -+#define RALINK_PCIEPHY_P0P1_CTL_OFFSET (RALINK_PCI_BASE + 0x9000) -+#define RALINK_PCIEPHY_P2_CTL_OFFSET (RALINK_PCI_BASE + 0xA000) -+ -+ -+#define MV_WRITE(ofs, data) \ -+ *(volatile u32 *)(RALINK_PCI_BASE+(ofs)) = cpu_to_le32(data) -+#define MV_READ(ofs, data) \ -+ *(data) = le32_to_cpu(*(volatile u32 *)(RALINK_PCI_BASE+(ofs))) -+#define MV_READ_DATA(ofs) \ -+ le32_to_cpu(*(volatile u32 *)(RALINK_PCI_BASE+(ofs))) -+ -+#define MV_WRITE_16(ofs, data) \ -+ *(volatile u16 *)(RALINK_PCI_BASE+(ofs)) = cpu_to_le16(data) -+#define MV_READ_16(ofs, data) \ -+ *(data) = le16_to_cpu(*(volatile u16 *)(RALINK_PCI_BASE+(ofs))) -+ -+#define MV_WRITE_8(ofs, data) \ -+ *(volatile u8 *)(RALINK_PCI_BASE+(ofs)) = data -+#define MV_READ_8(ofs, data) \ -+ *(data) = *(volatile u8 *)(RALINK_PCI_BASE+(ofs)) -+ -+ -+ -+#define RALINK_PCI_MM_MAP_BASE 0x60000000 -+#define RALINK_PCI_IO_MAP_BASE 0x1e160000 -+ -+#define RALINK_SYSTEM_CONTROL_BASE 0xbe000000 -+#define GPIO_PERST -+#define ASSERT_SYSRST_PCIE(val) do { \ -+ if (*(unsigned int *)(0xbe00000c) == 0x00030101) \ -+ RALINK_RSTCTRL |= val; \ -+ else \ -+ RALINK_RSTCTRL &= ~val; \ -+ } while(0) -+#define DEASSERT_SYSRST_PCIE(val) do { \ -+ if (*(unsigned int *)(0xbe00000c) == 0x00030101) \ -+ RALINK_RSTCTRL &= ~val; \ -+ else \ -+ RALINK_RSTCTRL |= val; \ -+ } while(0) -+#define RALINK_SYSCFG1 *(unsigned int *)(RALINK_SYSTEM_CONTROL_BASE + 0x14) -+#define RALINK_CLKCFG1 *(unsigned int *)(RALINK_SYSTEM_CONTROL_BASE + 0x30) -+#define RALINK_RSTCTRL *(unsigned int *)(RALINK_SYSTEM_CONTROL_BASE + 0x34) -+#define RALINK_GPIOMODE *(unsigned int *)(RALINK_SYSTEM_CONTROL_BASE + 0x60) -+#define RALINK_PCIE_CLK_GEN *(unsigned int *)(RALINK_SYSTEM_CONTROL_BASE + 0x7c) -+#define RALINK_PCIE_CLK_GEN1 *(unsigned int *)(RALINK_SYSTEM_CONTROL_BASE + 0x80) -+#define PPLL_CFG1 *(unsigned int *)(RALINK_SYSTEM_CONTROL_BASE + 0x9c) -+#define PPLL_DRV *(unsigned int *)(RALINK_SYSTEM_CONTROL_BASE + 0xa0) -+//RALINK_SYSCFG1 bit -+#define RALINK_PCI_HOST_MODE_EN (1<<7) -+#define RALINK_PCIE_RC_MODE_EN (1<<8) -+//RALINK_RSTCTRL bit -+#define RALINK_PCIE_RST (1<<23) -+#define RALINK_PCI_RST (1<<24) -+//RALINK_CLKCFG1 bit -+#define RALINK_PCI_CLK_EN (1<<19) -+#define RALINK_PCIE_CLK_EN (1<<21) -+//RALINK_GPIOMODE bit -+#define PCI_SLOTx2 (1<<11) -+#define PCI_SLOTx1 (2<<11) -+//MTK PCIE PLL bit -+#define PDRV_SW_SET (1<<31) -+#define LC_CKDRVPD_ (1<<19) -+ -+#define MEMORY_BASE 0x0 -+int pcie_link_status = 0; -+ -+void __inline__ read_config(unsigned long bus, unsigned long dev, unsigned long func, unsigned long reg, unsigned long *val); -+void __inline__ write_config(unsigned long bus, unsigned long dev, unsigned long func, unsigned long reg, unsigned long val); -+ -+#define PCI_ACCESS_READ_1 0 -+#define PCI_ACCESS_READ_2 1 -+#define PCI_ACCESS_READ_4 2 -+#define PCI_ACCESS_WRITE_1 3 -+#define PCI_ACCESS_WRITE_2 4 -+#define PCI_ACCESS_WRITE_4 5 -+ -+static int config_access(unsigned char access_type, struct pci_bus *bus, -+ unsigned int devfn, unsigned int where, u32 * data) -+{ -+ unsigned int slot = PCI_SLOT(devfn); -+ u8 func = PCI_FUNC(devfn); -+ uint32_t address_reg, data_reg; -+ unsigned int address; -+ -+ address_reg = RALINK_PCI_CONFIG_ADDR; -+ data_reg = RALINK_PCI_CONFIG_DATA_VIRTUAL_REG; -+ -+ address = (((where&0xF00)>>8)<<24) |(bus->number << 16) | (slot << 11) | (func << 8) | (where & 0xfc) | 0x80000000; -+ MV_WRITE(address_reg, address); -+ -+ switch(access_type) { -+ case PCI_ACCESS_WRITE_1: -+ MV_WRITE_8(data_reg+(where&0x3), *data); -+ break; -+ case PCI_ACCESS_WRITE_2: -+ MV_WRITE_16(data_reg+(where&0x3), *data); -+ break; -+ case PCI_ACCESS_WRITE_4: -+ MV_WRITE(data_reg, *data); -+ break; -+ case PCI_ACCESS_READ_1: -+ MV_READ_8( data_reg+(where&0x3), data); -+ break; -+ case PCI_ACCESS_READ_2: -+ MV_READ_16(data_reg+(where&0x3), data); -+ break; -+ case PCI_ACCESS_READ_4: -+ MV_READ(data_reg, data); -+ break; -+ default: -+ printk("no specify access type\n"); -+ break; -+ } -+ return 0; -+} -+ -+static int -+read_config_byte(struct pci_bus *bus, unsigned int devfn, int where, u8 * val) -+{ -+ return config_access(PCI_ACCESS_READ_1, bus, devfn, (unsigned int)where, (u32 *)val); -+} -+ -+static int -+read_config_word(struct pci_bus *bus, unsigned int devfn, int where, u16 * val) -+{ -+ return config_access(PCI_ACCESS_READ_2, bus, devfn, (unsigned int)where, (u32 *)val); -+} -+ -+static int -+read_config_dword(struct pci_bus *bus, unsigned int devfn, int where, u32 * val) -+{ -+ return config_access(PCI_ACCESS_READ_4, bus, devfn, (unsigned int)where, (u32 *)val); -+} -+ -+static int -+write_config_byte(struct pci_bus *bus, unsigned int devfn, int where, u8 val) -+{ -+ if (config_access(PCI_ACCESS_WRITE_1, bus, devfn, (unsigned int)where, (u32 *)&val)) -+ return -1; -+ -+ return PCIBIOS_SUCCESSFUL; -+} -+ -+static int -+write_config_word(struct pci_bus *bus, unsigned int devfn, int where, u16 val) -+{ -+ if (config_access(PCI_ACCESS_WRITE_2, bus, devfn, where, (u32 *)&val)) -+ return -1; -+ -+ return PCIBIOS_SUCCESSFUL; -+} -+ -+static int -+write_config_dword(struct pci_bus *bus, unsigned int devfn, int where, u32 val) -+{ -+ if (config_access(PCI_ACCESS_WRITE_4, bus, devfn, where, &val)) -+ return -1; -+ -+ return PCIBIOS_SUCCESSFUL; -+} -+ -+ -+static int -+pci_config_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 * val) -+{ -+ switch (size) { -+ case 1: -+ return read_config_byte(bus, devfn, where, (u8 *) val); -+ case 2: -+ return read_config_word(bus, devfn, where, (u16 *) val); -+ default: -+ return read_config_dword(bus, devfn, where, val); -+ } -+} -+ -+static int -+pci_config_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val) -+{ -+ switch (size) { -+ case 1: -+ return write_config_byte(bus, devfn, where, (u8) val); -+ case 2: -+ return write_config_word(bus, devfn, where, (u16) val); -+ default: -+ return write_config_dword(bus, devfn, where, val); -+ } -+} -+ -+struct pci_ops rt2880_pci_ops= { -+ .read = pci_config_read, -+ .write = pci_config_write, -+}; -+ -+static struct resource rt2880_res_pci_mem1 = { -+ .name = "PCI MEM1", -+ .start = RALINK_PCI_MM_MAP_BASE, -+ .end = (u32)((RALINK_PCI_MM_MAP_BASE + (unsigned char *)0x0fffffff)), -+ .flags = IORESOURCE_MEM, -+}; -+static struct resource rt2880_res_pci_io1 = { -+ .name = "PCI I/O1", -+ .start = RALINK_PCI_IO_MAP_BASE, -+ .end = (u32)((RALINK_PCI_IO_MAP_BASE + (unsigned char *)0x0ffff)), -+ .flags = IORESOURCE_IO, -+}; -+ -+struct pci_controller rt2880_controller = { -+ .pci_ops = &rt2880_pci_ops, -+ .mem_resource = &rt2880_res_pci_mem1, -+ .io_resource = &rt2880_res_pci_io1, -+ .mem_offset = 0x00000000UL, -+ .io_offset = 0x00000000UL, -+ .io_map_base = 0xa0000000, -+}; -+ -+void __inline__ -+read_config(unsigned long bus, unsigned long dev, unsigned long func, unsigned long reg, unsigned long *val) -+{ -+ unsigned int address_reg, data_reg, address; -+ -+ address_reg = RALINK_PCI_CONFIG_ADDR; -+ data_reg = RALINK_PCI_CONFIG_DATA_VIRTUAL_REG; -+ address = (((reg & 0xF00)>>8)<<24) | (bus << 16) | (dev << 11) | (func << 8) | (reg & 0xfc) | 0x80000000 ; -+ MV_WRITE(address_reg, address); -+ MV_READ(data_reg, val); -+ return; -+} -+ -+void __inline__ -+write_config(unsigned long bus, unsigned long dev, unsigned long func, unsigned long reg, unsigned long val) -+{ -+ unsigned int address_reg, data_reg, address; -+ -+ address_reg = RALINK_PCI_CONFIG_ADDR; -+ data_reg = RALINK_PCI_CONFIG_DATA_VIRTUAL_REG; -+ address = (((reg & 0xF00)>>8)<<24) | (bus << 16) | (dev << 11) | (func << 8) | (reg & 0xfc) | 0x80000000 ; -+ MV_WRITE(address_reg, address); -+ MV_WRITE(data_reg, val); -+ return; -+} -+ -+ -+int __init -+pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) -+{ -+ u16 cmd; -+ u32 val; -+ int irq = 0; -+ -+ if ((dev->bus->number == 0) && (slot == 0)) { -+ write_config(0, 0, 0, PCI_BASE_ADDRESS_0, MEMORY_BASE); -+ read_config(0, 0, 0, PCI_BASE_ADDRESS_0, (unsigned long *)&val); -+ printk("BAR0 at slot 0 = %x\n", val); -+ printk("bus=0x%x, slot = 0x%x\n",dev->bus->number, slot); -+ } else if((dev->bus->number == 0) && (slot == 0x1)) { -+ write_config(0, 1, 0, PCI_BASE_ADDRESS_0, MEMORY_BASE); -+ read_config(0, 1, 0, PCI_BASE_ADDRESS_0, (unsigned long *)&val); -+ printk("BAR0 at slot 1 = %x\n", val); -+ printk("bus=0x%x, slot = 0x%x\n",dev->bus->number, slot); -+ } else if((dev->bus->number == 0) && (slot == 0x2)) { -+ write_config(0, 2, 0, PCI_BASE_ADDRESS_0, MEMORY_BASE); -+ read_config(0, 2, 0, PCI_BASE_ADDRESS_0, (unsigned long *)&val); -+ printk("BAR0 at slot 2 = %x\n", val); -+ printk("bus=0x%x, slot = 0x%x\n",dev->bus->number, slot); -+ } else if ((dev->bus->number == 1) && (slot == 0x0)) { -+ switch (pcie_link_status) { -+ case 2: -+ case 6: -+ irq = RALINK_INT_PCIE1; -+ break; -+ case 4: -+ irq = RALINK_INT_PCIE2; -+ break; -+ default: -+ irq = RALINK_INT_PCIE0; -+ } -+ printk("bus=0x%x, slot = 0x%x, irq=0x%x\n",dev->bus->number, slot, dev->irq); -+ } else if ((dev->bus->number == 2) && (slot == 0x0)) { -+ switch (pcie_link_status) { -+ case 5: -+ case 6: -+ irq = RALINK_INT_PCIE2; -+ break; -+ default: -+ irq = RALINK_INT_PCIE1; -+ } -+ printk("bus=0x%x, slot = 0x%x, irq=0x%x\n",dev->bus->number, slot, dev->irq); -+ } else if ((dev->bus->number == 2) && (slot == 0x1)) { -+ switch (pcie_link_status) { -+ case 5: -+ case 6: -+ irq = RALINK_INT_PCIE2; -+ break; -+ default: -+ irq = RALINK_INT_PCIE1; -+ } -+ printk("bus=0x%x, slot = 0x%x, irq=0x%x\n",dev->bus->number, slot, dev->irq); -+ } else if ((dev->bus->number ==3) && (slot == 0x0)) { -+ irq = RALINK_INT_PCIE2; -+ printk("bus=0x%x, slot = 0x%x, irq=0x%x\n",dev->bus->number, slot, dev->irq); -+ } else if ((dev->bus->number ==3) && (slot == 0x1)) { -+ irq = RALINK_INT_PCIE2; -+ printk("bus=0x%x, slot = 0x%x, irq=0x%x\n",dev->bus->number, slot, dev->irq); -+ } else if ((dev->bus->number ==3) && (slot == 0x2)) { -+ irq = RALINK_INT_PCIE2; -+ printk("bus=0x%x, slot = 0x%x, irq=0x%x\n",dev->bus->number, slot, dev->irq); -+ } else { -+ printk("bus=0x%x, slot = 0x%x\n",dev->bus->number, slot); -+ return 0; -+ } -+ -+ pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, 0x14); //configure cache line size 0x14 -+ pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0xFF); //configure latency timer 0x10 -+ pci_read_config_word(dev, PCI_COMMAND, &cmd); -+ cmd = cmd | PCI_COMMAND_MASTER | PCI_COMMAND_IO | PCI_COMMAND_MEMORY; -+ pci_write_config_word(dev, PCI_COMMAND, cmd); -+ pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq); -+ return irq; -+} -+ -+void -+set_pcie_phy(u32 *addr, int start_b, int bits, int val) -+{ -+// printk("0x%p:", addr); -+// printk(" %x", *addr); -+ *(unsigned int *)(addr) &= ~(((1< %x\n", *addr); -+} -+ -+void -+bypass_pipe_rst(void) -+{ -+#if defined (CONFIG_PCIE_PORT0) -+ /* PCIe Port 0 */ -+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x02c), 12, 1, 0x01); // rg_pe1_pipe_rst_b -+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x02c), 4, 1, 0x01); // rg_pe1_pipe_cmd_frc[4] -+#endif -+#if defined (CONFIG_PCIE_PORT1) -+ /* PCIe Port 1 */ -+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x12c), 12, 1, 0x01); // rg_pe1_pipe_rst_b -+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x12c), 4, 1, 0x01); // rg_pe1_pipe_cmd_frc[4] -+#endif -+#if defined (CONFIG_PCIE_PORT2) -+ /* PCIe Port 2 */ -+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x02c), 12, 1, 0x01); // rg_pe1_pipe_rst_b -+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x02c), 4, 1, 0x01); // rg_pe1_pipe_cmd_frc[4] -+#endif -+} -+ -+void -+set_phy_for_ssc(void) -+{ -+ unsigned long reg = (*(volatile u32 *)(RALINK_SYSCTL_BASE + 0x10)); -+ -+ reg = (reg >> 6) & 0x7; -+#if defined (CONFIG_PCIE_PORT0) || defined (CONFIG_PCIE_PORT1) -+ /* Set PCIe Port0 & Port1 PHY to disable SSC */ -+ /* Debug Xtal Type */ -+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x400), 8, 1, 0x01); // rg_pe1_frc_h_xtal_type -+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x400), 9, 2, 0x00); // rg_pe1_h_xtal_type -+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x000), 4, 1, 0x01); // rg_pe1_frc_phy_en //Force Port 0 enable control -+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x100), 4, 1, 0x01); // rg_pe1_frc_phy_en //Force Port 1 enable control -+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x000), 5, 1, 0x00); // rg_pe1_phy_en //Port 0 disable -+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x100), 5, 1, 0x00); // rg_pe1_phy_en //Port 1 disable -+ if(reg <= 5 && reg >= 3) { // 40MHz Xtal -+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x490), 6, 2, 0x01); // RG_PE1_H_PLL_PREDIV //Pre-divider ratio (for host mode) -+ printk("***** Xtal 40MHz *****\n"); -+ } else { // 25MHz | 20MHz Xtal -+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x490), 6, 2, 0x00); // RG_PE1_H_PLL_PREDIV //Pre-divider ratio (for host mode) -+ if (reg >= 6) { -+ printk("***** Xtal 25MHz *****\n"); -+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x4bc), 4, 2, 0x01); // RG_PE1_H_PLL_FBKSEL //Feedback clock select -+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x49c), 0,31, 0x18000000); // RG_PE1_H_LCDDS_PCW_NCPO //DDS NCPO PCW (for host mode) -+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x4a4), 0,16, 0x18d); // RG_PE1_H_LCDDS_SSC_PRD //DDS SSC dither period control -+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x4a8), 0,12, 0x4a); // RG_PE1_H_LCDDS_SSC_DELTA //DDS SSC dither amplitude control -+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x4a8), 16,12, 0x4a); // RG_PE1_H_LCDDS_SSC_DELTA1 //DDS SSC dither amplitude control for initial -+ } else { -+ printk("***** Xtal 20MHz *****\n"); -+ } -+ } -+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x4a0), 5, 1, 0x01); // RG_PE1_LCDDS_CLK_PH_INV //DDS clock inversion -+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x490), 22, 2, 0x02); // RG_PE1_H_PLL_BC -+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x490), 18, 4, 0x06); // RG_PE1_H_PLL_BP -+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x490), 12, 4, 0x02); // RG_PE1_H_PLL_IR -+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x490), 8, 4, 0x01); // RG_PE1_H_PLL_IC -+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x4ac), 16, 3, 0x00); // RG_PE1_H_PLL_BR -+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x490), 1, 3, 0x02); // RG_PE1_PLL_DIVEN -+ if(reg <= 5 && reg >= 3) { // 40MHz Xtal -+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x414), 6, 2, 0x01); // rg_pe1_mstckdiv //value of da_pe1_mstckdiv when force mode enable -+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x414), 5, 1, 0x01); // rg_pe1_frc_mstckdiv //force mode enable of da_pe1_mstckdiv -+ } -+ /* Enable PHY and disable force mode */ -+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x000), 5, 1, 0x01); // rg_pe1_phy_en //Port 0 enable -+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x100), 5, 1, 0x01); // rg_pe1_phy_en //Port 1 enable -+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x000), 4, 1, 0x00); // rg_pe1_frc_phy_en //Force Port 0 disable control -+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P0P1_CTL_OFFSET + 0x100), 4, 1, 0x00); // rg_pe1_frc_phy_en //Force Port 1 disable control -+#endif -+#if defined (CONFIG_PCIE_PORT2) -+ /* Set PCIe Port2 PHY to disable SSC */ -+ /* Debug Xtal Type */ -+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x400), 8, 1, 0x01); // rg_pe1_frc_h_xtal_type -+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x400), 9, 2, 0x00); // rg_pe1_h_xtal_type -+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x000), 4, 1, 0x01); // rg_pe1_frc_phy_en //Force Port 0 enable control -+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x000), 5, 1, 0x00); // rg_pe1_phy_en //Port 0 disable -+ if(reg <= 5 && reg >= 3) { // 40MHz Xtal -+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x490), 6, 2, 0x01); // RG_PE1_H_PLL_PREDIV //Pre-divider ratio (for host mode) -+ } else { // 25MHz | 20MHz Xtal -+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x490), 6, 2, 0x00); // RG_PE1_H_PLL_PREDIV //Pre-divider ratio (for host mode) -+ if (reg >= 6) { // 25MHz Xtal -+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x4bc), 4, 2, 0x01); // RG_PE1_H_PLL_FBKSEL //Feedback clock select -+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x49c), 0,31, 0x18000000); // RG_PE1_H_LCDDS_PCW_NCPO //DDS NCPO PCW (for host mode) -+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x4a4), 0,16, 0x18d); // RG_PE1_H_LCDDS_SSC_PRD //DDS SSC dither period control -+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x4a8), 0,12, 0x4a); // RG_PE1_H_LCDDS_SSC_DELTA //DDS SSC dither amplitude control -+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x4a8), 16,12, 0x4a); // RG_PE1_H_LCDDS_SSC_DELTA1 //DDS SSC dither amplitude control for initial -+ } -+ } -+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x4a0), 5, 1, 0x01); // RG_PE1_LCDDS_CLK_PH_INV //DDS clock inversion -+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x490), 22, 2, 0x02); // RG_PE1_H_PLL_BC -+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x490), 18, 4, 0x06); // RG_PE1_H_PLL_BP -+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x490), 12, 4, 0x02); // RG_PE1_H_PLL_IR -+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x490), 8, 4, 0x01); // RG_PE1_H_PLL_IC -+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x4ac), 16, 3, 0x00); // RG_PE1_H_PLL_BR -+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x490), 1, 3, 0x02); // RG_PE1_PLL_DIVEN -+ if(reg <= 5 && reg >= 3) { // 40MHz Xtal -+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x414), 6, 2, 0x01); // rg_pe1_mstckdiv //value of da_pe1_mstckdiv when force mode enable -+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x414), 5, 1, 0x01); // rg_pe1_frc_mstckdiv //force mode enable of da_pe1_mstckdiv -+ } -+ /* Enable PHY and disable force mode */ -+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x000), 5, 1, 0x01); // rg_pe1_phy_en //Port 0 enable -+ set_pcie_phy((u32 *)(RALINK_PCIEPHY_P2_CTL_OFFSET + 0x000), 4, 1, 0x00); // rg_pe1_frc_phy_en //Force Port 0 disable control -+#endif -+} -+ -+int init_rt2880pci(void) -+{ -+ unsigned long val = 0; -+ iomem_resource.start = 0; -+ iomem_resource.end= ~0; -+ ioport_resource.start= 0; -+ ioport_resource.end = ~0; -+ -+#if defined (CONFIG_PCIE_PORT0) -+ val = RALINK_PCIE0_RST; -+#endif -+#if defined (CONFIG_PCIE_PORT1) -+ val |= RALINK_PCIE1_RST; -+#endif -+#if defined (CONFIG_PCIE_PORT2) -+ val |= RALINK_PCIE2_RST; -+#endif -+ DEASSERT_SYSRST_PCIE(val); -+ printk("release PCIe RST: RALINK_RSTCTRL = %x\n", RALINK_RSTCTRL); -+ -+ bypass_pipe_rst(); -+ set_phy_for_ssc(); -+ ASSERT_SYSRST_PCIE(RALINK_PCIE0_RST | RALINK_PCIE1_RST | RALINK_PCIE2_RST); -+ printk("pull PCIe RST: RALINK_RSTCTRL = %x\n", RALINK_RSTCTRL); -+#if defined GPIO_PERST /* add GPIO control instead of PERST_N */ /*chhung*/ -+ *(unsigned int *)(0xbe000060) &= ~(0x3<<10 | 0x3<<3); -+ *(unsigned int *)(0xbe000060) |= 0x1<<10 | 0x1<<3; -+ mdelay(100); -+ *(unsigned int *)(0xbe000600) |= 0x1<<19 | 0x1<<8 | 0x1<<7; // use GPIO19/GPIO8/GPIO7 (PERST_N/UART_RXD3/UART_TXD3) -+ mdelay(100); -+ *(unsigned int *)(0xbe000620) &= ~(0x1<<19 | 0x1<<8 | 0x1<<7); // clear DATA -+ -+ mdelay(100); -+#else -+ *(unsigned int *)(0xbe000060) &= ~0x00000c00; -+#endif -+#if defined (CONFIG_PCIE_PORT0) -+ val = RALINK_PCIE0_RST; -+#endif -+#if defined (CONFIG_PCIE_PORT1) -+ val |= RALINK_PCIE1_RST; -+#endif -+#if defined (CONFIG_PCIE_PORT2) -+ val |= RALINK_PCIE2_RST; -+#endif -+ DEASSERT_SYSRST_PCIE(val); -+ printk("release PCIe RST: RALINK_RSTCTRL = %x\n", RALINK_RSTCTRL); -+#if defined (CONFIG_PCIE_PORT0) -+ read_config(0, 0, 0, 0x70c, &val); -+ val &= ~(0xff)<<8; -+ val |= 0x50<<8; -+ write_config(0, 0, 0, 0x70c, val); -+#endif -+#if defined (CONFIG_PCIE_PORT1) -+ read_config(0, 1, 0, 0x70c, &val); -+ val &= ~(0xff)<<8; -+ val |= 0x50<<8; -+ write_config(0, 1, 0, 0x70c, val); -+#endif -+#if defined (CONFIG_PCIE_PORT2) -+ read_config(0, 2, 0, 0x70c, &val); -+ val &= ~(0xff)<<8; -+ val |= 0x50<<8; -+ write_config(0, 2, 0, 0x70c, val); -+#endif -+ -+#if defined (CONFIG_PCIE_PORT0) -+ read_config(0, 0, 0, 0x70c, &val); -+ printk("Port 0 N_FTS = %x\n", (unsigned int)val); -+#endif -+#if defined (CONFIG_PCIE_PORT1) -+ read_config(0, 1, 0, 0x70c, &val); -+ printk("Port 1 N_FTS = %x\n", (unsigned int)val); -+#endif -+#if defined (CONFIG_PCIE_PORT2) -+ read_config(0, 2, 0, 0x70c, &val); -+ printk("Port 2 N_FTS = %x\n", (unsigned int)val); -+#endif -+ -+ RALINK_RSTCTRL = (RALINK_RSTCTRL | RALINK_PCIE_RST); -+ RALINK_SYSCFG1 &= ~(0x30); -+ RALINK_SYSCFG1 |= (2<<4); -+ RALINK_PCIE_CLK_GEN &= 0x7fffffff; -+ RALINK_PCIE_CLK_GEN1 &= 0x80ffffff; -+ RALINK_PCIE_CLK_GEN1 |= 0xa << 24; -+ RALINK_PCIE_CLK_GEN |= 0x80000000; -+ mdelay(50); -+ RALINK_RSTCTRL = (RALINK_RSTCTRL & ~RALINK_PCIE_RST); -+ -+ -+#if defined GPIO_PERST /* add GPIO control instead of PERST_N */ /*chhung*/ -+ *(unsigned int *)(0xbe000620) |= 0x1<<19 | 0x1<<8 | 0x1<<7; // set DATA -+ mdelay(100); -+#else -+ RALINK_PCI_PCICFG_ADDR &= ~(1<<1); //de-assert PERST -+#endif -+ mdelay(500); -+ -+ -+ mdelay(500); -+#if defined (CONFIG_PCIE_PORT0) -+ if(( RALINK_PCI0_STATUS & 0x1) == 0) -+ { -+ printk("PCIE0 no card, disable it(RST&CLK)\n"); -+ ASSERT_SYSRST_PCIE(RALINK_PCIE0_RST); -+ RALINK_CLKCFG1 = (RALINK_CLKCFG1 & ~RALINK_PCIE0_CLK_EN); -+ pcie_link_status &= ~(1<<0); -+ } else { -+ pcie_link_status |= 1<<0; -+ RALINK_PCI_PCIMSK_ADDR |= (1<<20); // enable pcie1 interrupt -+ } -+#endif -+#if defined (CONFIG_PCIE_PORT1) -+ if(( RALINK_PCI1_STATUS & 0x1) == 0) -+ { -+ printk("PCIE1 no card, disable it(RST&CLK)\n"); -+ ASSERT_SYSRST_PCIE(RALINK_PCIE1_RST); -+ RALINK_CLKCFG1 = (RALINK_CLKCFG1 & ~RALINK_PCIE1_CLK_EN); -+ pcie_link_status &= ~(1<<1); -+ } else { -+ pcie_link_status |= 1<<1; -+ RALINK_PCI_PCIMSK_ADDR |= (1<<21); // enable pcie1 interrupt -+ } -+#endif -+#if defined (CONFIG_PCIE_PORT2) -+ if (( RALINK_PCI2_STATUS & 0x1) == 0) { -+ printk("PCIE2 no card, disable it(RST&CLK)\n"); -+ ASSERT_SYSRST_PCIE(RALINK_PCIE2_RST); -+ RALINK_CLKCFG1 = (RALINK_CLKCFG1 & ~RALINK_PCIE2_CLK_EN); -+ pcie_link_status &= ~(1<<2); -+ } else { -+ pcie_link_status |= 1<<2; -+ RALINK_PCI_PCIMSK_ADDR |= (1<<22); // enable pcie2 interrupt -+ } -+#endif -+ if (pcie_link_status == 0) -+ return 0; -+ -+/* -+pcie(2/1/0) link status pcie2_num pcie1_num pcie0_num -+3'b000 x x x -+3'b001 x x 0 -+3'b010 x 0 x -+3'b011 x 1 0 -+3'b100 0 x x -+3'b101 1 x 0 -+3'b110 1 0 x -+3'b111 2 1 0 -+*/ -+ switch(pcie_link_status) { -+ case 2: -+ RALINK_PCI_PCICFG_ADDR &= ~0x00ff0000; -+ RALINK_PCI_PCICFG_ADDR |= 0x1 << 16; //port0 -+ RALINK_PCI_PCICFG_ADDR |= 0x0 << 20; //port1 -+ break; -+ case 4: -+ RALINK_PCI_PCICFG_ADDR &= ~0x0fff0000; -+ RALINK_PCI_PCICFG_ADDR |= 0x1 << 16; //port0 -+ RALINK_PCI_PCICFG_ADDR |= 0x2 << 20; //port1 -+ RALINK_PCI_PCICFG_ADDR |= 0x0 << 24; //port2 -+ break; -+ case 5: -+ RALINK_PCI_PCICFG_ADDR &= ~0x0fff0000; -+ RALINK_PCI_PCICFG_ADDR |= 0x0 << 16; //port0 -+ RALINK_PCI_PCICFG_ADDR |= 0x2 << 20; //port1 -+ RALINK_PCI_PCICFG_ADDR |= 0x1 << 24; //port2 -+ break; -+ case 6: -+ RALINK_PCI_PCICFG_ADDR &= ~0x0fff0000; -+ RALINK_PCI_PCICFG_ADDR |= 0x2 << 16; //port0 -+ RALINK_PCI_PCICFG_ADDR |= 0x0 << 20; //port1 -+ RALINK_PCI_PCICFG_ADDR |= 0x1 << 24; //port2 -+ break; -+ } -+ printk(" -> %x\n", RALINK_PCI_PCICFG_ADDR); -+ //printk(" RALINK_PCI_ARBCTL = %x\n", RALINK_PCI_ARBCTL); -+ -+/* -+ ioport_resource.start = rt2880_res_pci_io1.start; -+ ioport_resource.end = rt2880_res_pci_io1.end; -+*/ -+ -+ RALINK_PCI_MEMBASE = 0xffffffff; //RALINK_PCI_MM_MAP_BASE; -+ RALINK_PCI_IOBASE = RALINK_PCI_IO_MAP_BASE; -+ -+#if defined (CONFIG_PCIE_PORT0) -+ //PCIe0 -+ if((pcie_link_status & 0x1) != 0) { -+ RALINK_PCI0_BAR0SETUP_ADDR = 0x7FFF0001; //open 7FFF:2G; ENABLE -+ RALINK_PCI0_IMBASEBAR0_ADDR = MEMORY_BASE; -+ RALINK_PCI0_CLASS = 0x06040001; -+ printk("PCIE0 enabled\n"); -+ } -+#endif -+#if defined (CONFIG_PCIE_PORT1) -+ //PCIe1 -+ if ((pcie_link_status & 0x2) != 0) { -+ RALINK_PCI1_BAR0SETUP_ADDR = 0x7FFF0001; //open 7FFF:2G; ENABLE -+ RALINK_PCI1_IMBASEBAR0_ADDR = MEMORY_BASE; -+ RALINK_PCI1_CLASS = 0x06040001; -+ printk("PCIE1 enabled\n"); -+ } -+#endif -+#if defined (CONFIG_PCIE_PORT2) -+ //PCIe2 -+ if ((pcie_link_status & 0x4) != 0) { -+ RALINK_PCI2_BAR0SETUP_ADDR = 0x7FFF0001; //open 7FFF:2G; ENABLE -+ RALINK_PCI2_IMBASEBAR0_ADDR = MEMORY_BASE; -+ RALINK_PCI2_CLASS = 0x06040001; -+ printk("PCIE2 enabled\n"); -+ } -+#endif -+ -+ -+ switch(pcie_link_status) { -+ case 7: -+ read_config(0, 2, 0, 0x4, &val); -+ write_config(0, 2, 0, 0x4, val|0x4); -+ // write_config(0, 1, 0, 0x4, val|0x7); -+ case 3: -+ case 5: -+ case 6: -+ read_config(0, 1, 0, 0x4, &val); -+ write_config(0, 1, 0, 0x4, val|0x4); -+ // write_config(0, 1, 0, 0x4, val|0x7); -+ default: -+ read_config(0, 0, 0, 0x4, &val); -+ write_config(0, 0, 0, 0x4, val|0x4); //bus master enable -+ // write_config(0, 0, 0, 0x4, val|0x7); //bus master enable -+ } -+ register_pci_controller(&rt2880_controller); -+ return 0; -+ -+} -+arch_initcall(init_rt2880pci); -+ -+int pcibios_plat_dev_init(struct pci_dev *dev) -+{ -+ return 0; -+} diff --git a/target/linux/ramips/patches-3.10/0505-watchdog-add-MT7621-support.patch b/target/linux/ramips/patches-3.10/0505-watchdog-add-MT7621-support.patch deleted file mode 100644 index b1d2da7766..0000000000 --- a/target/linux/ramips/patches-3.10/0505-watchdog-add-MT7621-support.patch +++ /dev/null @@ -1,229 +0,0 @@ -From eb50d97682d78af68388d24956a74de4ab751cf7 Mon Sep 17 00:00:00 2001 -From: John Crispin -Date: Mon, 2 Dec 2013 16:18:36 +0100 -Subject: [PATCH 505/507] watchdog: add MT7621 support - -Signed-off-by: John Crispin ---- - drivers/watchdog/Kconfig | 7 ++ - drivers/watchdog/Makefile | 1 + - drivers/watchdog/mt7621_wdt.c | 185 +++++++++++++++++++++++++++++++++++++++++ - 3 files changed, 193 insertions(+) - create mode 100644 drivers/watchdog/mt7621_wdt.c - ---- a/drivers/watchdog/Kconfig -+++ b/drivers/watchdog/Kconfig -@@ -1116,7 +1116,14 @@ config LANTIQ_WDT - config RALINK_WDT - tristate "Ralink SoC watchdog" - select WATCHDOG_CORE -- depends on RALINK -+ depends on RALINK && !SOC_MT7621 -+ help -+ Hardware driver for the Ralink SoC Watchdog Timer. -+ -+config MT7621_WDT -+ tristate "Mediatek SoC watchdog" -+ select WATCHDOG_CORE -+ depends on RALINK && SOC_MT7621 - help - Hardware driver for the Ralink SoC Watchdog Timer. - ---- a/drivers/watchdog/Makefile -+++ b/drivers/watchdog/Makefile -@@ -136,6 +136,7 @@ obj-$(CONFIG_OCTEON_WDT) += octeon-wdt.o - octeon-wdt-y := octeon-wdt-main.o octeon-wdt-nmi.o - obj-$(CONFIG_LANTIQ_WDT) += lantiq_wdt.o - obj-$(CONFIG_RALINK_WDT) += rt2880_wdt.o -+obj-$(CONFIG_MT7621_WDT) += mt7621_wdt.o - - # PARISC Architecture - ---- /dev/null -+++ b/drivers/watchdog/mt7621_wdt.c -@@ -0,0 +1,185 @@ -+/* -+ * Ralink RT288x/RT3xxx/MT76xx built-in hardware watchdog timer -+ * -+ * Copyright (C) 2011 Gabor Juhos -+ * Copyright (C) 2013 John Crispin -+ * -+ * This driver was based on: drivers/watchdog/softdog.c -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+ -+#define SYSC_RSTSTAT 0x38 -+#define WDT_RST_CAUSE BIT(1) -+ -+#define RALINK_WDT_TIMEOUT 30 -+ -+#define TIMER_REG_TMRSTAT 0x00 -+#define TIMER_REG_TMR1LOAD 0x24 -+#define TIMER_REG_TMR1CTL 0x20 -+ -+#define TMR1CTL_ENABLE BIT(7) -+#define TMR1CTL_RESTART BIT(9) -+ -+static void __iomem *mt762x_wdt_base; -+ -+static bool nowayout = WATCHDOG_NOWAYOUT; -+module_param(nowayout, bool, 0); -+MODULE_PARM_DESC(nowayout, -+ "Watchdog cannot be stopped once started (default=" -+ __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); -+ -+static inline void rt_wdt_w32(unsigned reg, u32 val) -+{ -+ iowrite32(val, mt762x_wdt_base + reg); -+} -+ -+static inline u32 rt_wdt_r32(unsigned reg) -+{ -+ return ioread32(mt762x_wdt_base + reg); -+} -+ -+static int mt762x_wdt_ping(struct watchdog_device *w) -+{ -+ rt_wdt_w32(TIMER_REG_TMRSTAT, TMR1CTL_RESTART); -+ -+ return 0; -+} -+ -+static int mt762x_wdt_set_timeout(struct watchdog_device *w, unsigned int t) -+{ -+ w->timeout = t; -+ rt_wdt_w32(TIMER_REG_TMR1LOAD, t * 1000); -+ mt762x_wdt_ping(w); -+ -+ return 0; -+} -+ -+static int mt762x_wdt_start(struct watchdog_device *w) -+{ -+ u32 t; -+ -+ rt_wdt_w32(TIMER_REG_TMR1CTL, 1000 << 16); -+ mt762x_wdt_set_timeout(w, w->timeout); -+ -+ t = rt_wdt_r32(TIMER_REG_TMR1CTL); -+ t |= TMR1CTL_ENABLE; -+ rt_wdt_w32(TIMER_REG_TMR1CTL, t); -+ -+ return 0; -+} -+ -+static int mt762x_wdt_stop(struct watchdog_device *w) -+{ -+ u32 t; -+ -+ mt762x_wdt_ping(w); -+ -+ t = rt_wdt_r32(TIMER_REG_TMR1CTL); -+ t &= ~TMR1CTL_ENABLE; -+ rt_wdt_w32(TIMER_REG_TMR1CTL, t); -+ -+ return 0; -+} -+ -+static int mt762x_wdt_bootcause(void) -+{ -+ if (rt_sysc_r32(SYSC_RSTSTAT) & WDT_RST_CAUSE) -+ return WDIOF_CARDRESET; -+ -+ return 0; -+} -+ -+static struct watchdog_info mt762x_wdt_info = { -+ .identity = "Mediatek Watchdog", -+ .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE, -+}; -+ -+static struct watchdog_ops mt762x_wdt_ops = { -+ .owner = THIS_MODULE, -+ .start = mt762x_wdt_start, -+ .stop = mt762x_wdt_stop, -+ .ping = mt762x_wdt_ping, -+ .set_timeout = mt762x_wdt_set_timeout, -+}; -+ -+static struct watchdog_device mt762x_wdt_dev = { -+ .info = &mt762x_wdt_info, -+ .ops = &mt762x_wdt_ops, -+ .min_timeout = 1, -+}; -+ -+static int mt762x_wdt_probe(struct platform_device *pdev) -+{ -+ struct resource *res; -+ int ret; -+ -+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); -+ mt762x_wdt_base = devm_request_and_ioremap(&pdev->dev, res); -+ if (IS_ERR(mt762x_wdt_base)) -+ return PTR_ERR(mt762x_wdt_base); -+ -+ device_reset(&pdev->dev); -+ -+ mt762x_wdt_dev.dev = &pdev->dev; -+ mt762x_wdt_dev.bootstatus = mt762x_wdt_bootcause(); -+ mt762x_wdt_dev.max_timeout = (0xfffful / 1000); -+ mt762x_wdt_dev.timeout = mt762x_wdt_dev.max_timeout; -+ -+ watchdog_set_nowayout(&mt762x_wdt_dev, nowayout); -+ -+ ret = watchdog_register_device(&mt762x_wdt_dev); -+ if (!ret) -+ dev_info(&pdev->dev, "Initialized\n"); -+ -+ return 0; -+} -+ -+static int mt762x_wdt_remove(struct platform_device *pdev) -+{ -+ watchdog_unregister_device(&mt762x_wdt_dev); -+ -+ return 0; -+} -+ -+static void mt762x_wdt_shutdown(struct platform_device *pdev) -+{ -+ mt762x_wdt_stop(&mt762x_wdt_dev); -+} -+ -+static const struct of_device_id mt762x_wdt_match[] = { -+ { .compatible = "mtk,mt7621-wdt" }, -+ {}, -+}; -+MODULE_DEVICE_TABLE(of, mt762x_wdt_match); -+ -+static struct platform_driver mt762x_wdt_driver = { -+ .probe = mt762x_wdt_probe, -+ .remove = mt762x_wdt_remove, -+ .shutdown = mt762x_wdt_shutdown, -+ .driver = { -+ .name = KBUILD_MODNAME, -+ .owner = THIS_MODULE, -+ .of_match_table = mt762x_wdt_match, -+ }, -+}; -+ -+module_platform_driver(mt762x_wdt_driver); -+ -+MODULE_DESCRIPTION("MediaTek MT762x hardware watchdog driver"); -+MODULE_AUTHOR("John Crispin -Date: Mon, 2 Dec 2013 16:14:28 +0100 -Subject: [PATCH 506/507] GPIO: ralink: add mt7621 gpio controller - -Signed-off-by: John Crispin ---- - arch/mips/Kconfig | 1 + - drivers/gpio/Kconfig | 6 ++ - drivers/gpio/Makefile | 1 + - drivers/gpio/gpio-mt7621.c | 183 ++++++++++++++++++++++++++++++++++++++++++++ - 4 files changed, 191 insertions(+) - create mode 100644 drivers/gpio/gpio-mt7621.c - ---- a/drivers/gpio/Kconfig -+++ b/drivers/gpio/Kconfig -@@ -710,6 +710,12 @@ config GPIO_MSIC - Enable support for GPIO on intel MSIC controllers found in - intel MID devices - -+config GPIO_MT7621 -+ bool "Mediatek GPIO Support" -+ depends on RALINK && SOC_MT7621 -+ help -+ Say yes here to support the Mediatek SoC GPIO device -+ - comment "USB GPIO expanders:" - - config GPIO_VIPERBOARD ---- a/drivers/gpio/Makefile -+++ b/drivers/gpio/Makefile -@@ -88,3 +88,4 @@ obj-$(CONFIG_GPIO_WM831X) += gpio-wm831x - obj-$(CONFIG_GPIO_WM8350) += gpio-wm8350.o - obj-$(CONFIG_GPIO_WM8994) += gpio-wm8994.o - obj-$(CONFIG_GPIO_XILINX) += gpio-xilinx.o -+obj-$(CONFIG_GPIO_MT7621) += gpio-mt7621.o ---- /dev/null -+++ b/drivers/gpio/gpio-mt7621.c -@@ -0,0 +1,183 @@ -+/* -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation. -+ * -+ * Copyright (C) 2009-2011 Gabor Juhos -+ * Copyright (C) 2013 John Crispin -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define MTK_BANK_WIDTH 32 -+ -+enum mediatek_gpio_reg { -+ GPIO_REG_CTRL = 0, -+ GPIO_REG_POL, -+ GPIO_REG_DATA, -+ GPIO_REG_DSET, -+ GPIO_REG_DCLR, -+}; -+ -+static void __iomem *mtk_gc_membase; -+ -+struct mtk_gc { -+ struct gpio_chip chip; -+ spinlock_t lock; -+ int bank; -+}; -+ -+int -+gpio_to_irq(unsigned gpio) -+{ -+ return -1; -+} -+ -+static inline struct mtk_gc -+*to_mediatek_gpio(struct gpio_chip *chip) -+{ -+ struct mtk_gc *mgc; -+ -+ mgc = container_of(chip, struct mtk_gc, chip); -+ -+ return mgc; -+} -+ -+static inline void -+mtk_gpio_w32(struct mtk_gc *rg, u8 reg, u32 val) -+{ -+ iowrite32(val, mtk_gc_membase + (reg * 0x10) + (rg->bank * 0x4)); -+} -+ -+static inline u32 -+mtk_gpio_r32(struct mtk_gc *rg, u8 reg) -+{ -+ return ioread32(mtk_gc_membase + (reg * 0x10) + (rg->bank * 0x4)); -+} -+ -+static void -+mediatek_gpio_set(struct gpio_chip *chip, unsigned offset, int value) -+{ -+ struct mtk_gc *rg = to_mediatek_gpio(chip); -+ -+ mtk_gpio_w32(rg, (value) ? GPIO_REG_DSET : GPIO_REG_DCLR, BIT(offset)); -+} -+ -+static int -+mediatek_gpio_get(struct gpio_chip *chip, unsigned offset) -+{ -+ struct mtk_gc *rg = to_mediatek_gpio(chip); -+ -+ return !!(mtk_gpio_r32(rg, GPIO_REG_DATA) & BIT(offset)); -+} -+ -+static int -+mediatek_gpio_direction_input(struct gpio_chip *chip, unsigned offset) -+{ -+ struct mtk_gc *rg = to_mediatek_gpio(chip); -+ unsigned long flags; -+ u32 t; -+ -+ spin_lock_irqsave(&rg->lock, flags); -+ t = mtk_gpio_r32(rg, GPIO_REG_CTRL); -+ t &= ~BIT(offset); -+ mtk_gpio_w32(rg, GPIO_REG_CTRL, t); -+ spin_unlock_irqrestore(&rg->lock, flags); -+ -+ return 0; -+} -+ -+static int -+mediatek_gpio_direction_output(struct gpio_chip *chip, -+ unsigned offset, int value) -+{ -+ struct mtk_gc *rg = to_mediatek_gpio(chip); -+ unsigned long flags; -+ u32 t; -+ -+ spin_lock_irqsave(&rg->lock, flags); -+ t = mtk_gpio_r32(rg, GPIO_REG_CTRL); -+ t |= BIT(offset); -+ mtk_gpio_w32(rg, GPIO_REG_CTRL, t); -+ mediatek_gpio_set(chip, offset, value); -+ spin_unlock_irqrestore(&rg->lock, flags); -+ -+ return 0; -+} -+ -+static int -+mediatek_gpio_bank_probe(struct platform_device *pdev, struct device_node *bank) -+{ -+ const __be32 *id = of_get_property(bank, "reg", NULL); -+ struct mtk_gc *rg = devm_kzalloc(&pdev->dev, -+ sizeof(struct mtk_gc), GFP_KERNEL); -+ if (!rg || !id) -+ return -ENOMEM; -+ -+ spin_lock_init(&rg->lock); -+ -+ rg->chip.dev = &pdev->dev; -+ rg->chip.label = dev_name(&pdev->dev); -+ rg->chip.of_node = bank; -+ rg->chip.base = MTK_BANK_WIDTH * be32_to_cpu(*id); -+ rg->chip.ngpio = MTK_BANK_WIDTH; -+ rg->chip.direction_input = mediatek_gpio_direction_input; -+ rg->chip.direction_output = mediatek_gpio_direction_output; -+ rg->chip.get = mediatek_gpio_get; -+ rg->chip.set = mediatek_gpio_set; -+ -+ /* set polarity to low for all gpios */ -+ mtk_gpio_w32(rg, GPIO_REG_POL, 0); -+ -+ dev_info(&pdev->dev, "registering %d gpios\n", rg->chip.ngpio); -+ -+ return gpiochip_add(&rg->chip); -+} -+ -+static int -+mediatek_gpio_probe(struct platform_device *pdev) -+{ -+ struct device_node *bank, *np = pdev->dev.of_node; -+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); -+ -+ mtk_gc_membase = devm_request_and_ioremap(&pdev->dev, res); -+ if (IS_ERR(mtk_gc_membase)) -+ return PTR_ERR(mtk_gc_membase); -+ -+ for_each_child_of_node(np, bank) -+ if (of_device_is_compatible(bank, "mtk,mt7621-gpio-bank")) -+ mediatek_gpio_bank_probe(pdev, bank); -+ -+ return 0; -+} -+ -+static const struct of_device_id mediatek_gpio_match[] = { -+ { .compatible = "mtk,mt7621-gpio" }, -+ {}, -+}; -+MODULE_DEVICE_TABLE(of, mediatek_gpio_match); -+ -+static struct platform_driver mediatek_gpio_driver = { -+ .probe = mediatek_gpio_probe, -+ .driver = { -+ .name = "mt7621_gpio", -+ .owner = THIS_MODULE, -+ .of_match_table = mediatek_gpio_match, -+ }, -+}; -+ -+static int __init -+mediatek_gpio_init(void) -+{ -+ return platform_driver_register(&mediatek_gpio_driver); -+} -+ -+subsys_initcall(mediatek_gpio_init); diff --git a/target/linux/ramips/patches-3.10/0507-MTD-add-mt7621-nand-support.patch b/target/linux/ramips/patches-3.10/0507-MTD-add-mt7621-nand-support.patch deleted file mode 100644 index bac031aba3..0000000000 --- a/target/linux/ramips/patches-3.10/0507-MTD-add-mt7621-nand-support.patch +++ /dev/null @@ -1,4419 +0,0 @@ -From 203189714320fe43b4c0cf953efec9e28963c03b Mon Sep 17 00:00:00 2001 -From: John Crispin -Date: Mon, 2 Dec 2013 16:23:57 +0100 -Subject: [PATCH 507/507] MTD: add mt7621 nand support - -Signed-off-by: John Crispin ---- - drivers/mtd/nand/Kconfig | 6 + - drivers/mtd/nand/Makefile | 1 + - drivers/mtd/nand/bmt.c | 750 ++++++++++++ - drivers/mtd/nand/bmt.h | 80 ++ - drivers/mtd/nand/dev-nand.c | 63 + - drivers/mtd/nand/mt6575_typedefs.h | 340 ++++++ - drivers/mtd/nand/mtk_nand.c | 2304 +++++++++++++++++++++++++++++++++++ - drivers/mtd/nand/mtk_nand.h | 452 +++++++ - drivers/mtd/nand/nand_base.c | 6 +- - drivers/mtd/nand/nand_bbt.c | 19 + - drivers/mtd/nand/nand_def.h | 123 ++ - drivers/mtd/nand/nand_device_list.h | 55 + - drivers/mtd/nand/partition.h | 115 ++ - 13 files changed, 4311 insertions(+), 3 deletions(-) - create mode 100644 drivers/mtd/nand/bmt.c - create mode 100644 drivers/mtd/nand/bmt.h - create mode 100644 drivers/mtd/nand/dev-nand.c - create mode 100644 drivers/mtd/nand/mt6575_typedefs.h - create mode 100644 drivers/mtd/nand/mtk_nand.c - create mode 100644 drivers/mtd/nand/mtk_nand.h - create mode 100644 drivers/mtd/nand/nand_def.h - create mode 100644 drivers/mtd/nand/nand_device_list.h - create mode 100644 drivers/mtd/nand/partition.h - ---- a/drivers/mtd/nand/Kconfig -+++ b/drivers/mtd/nand/Kconfig -@@ -544,4 +544,10 @@ config MTD_NAND_XWAY - Enables support for NAND Flash chips on Lantiq XWAY SoCs. NAND is attached - to the External Bus Unit (EBU). - -+config MTK_MTD_NAND -+ tristate "Support for MTK SoC NAND controller" -+ depends on SOC_MT7621 -+ select MTD_NAND_IDS -+ select MTD_NAND_ECC -+ - endif # MTD_NAND ---- a/drivers/mtd/nand/Makefile -+++ b/drivers/mtd/nand/Makefile -@@ -50,5 +50,6 @@ obj-$(CONFIG_MTD_NAND_JZ4740) += jz4740 - obj-$(CONFIG_MTD_NAND_GPMI_NAND) += gpmi-nand/ - obj-$(CONFIG_MTD_NAND_XWAY) += xway_nand.o - obj-$(CONFIG_MTD_NAND_BCM47XXNFLASH) += bcm47xxnflash/ -+obj-$(CONFIG_MTK_MTD_NAND) += mtk_nand.o bmt.o - - nand-objs := nand_base.o nand_bbt.o ---- /dev/null -+++ b/drivers/mtd/nand/bmt.c -@@ -0,0 +1,750 @@ -+#include "bmt.h" -+ -+typedef struct -+{ -+ char signature[3]; -+ u8 version; -+ u8 bad_count; // bad block count in pool -+ u8 mapped_count; // mapped block count in pool -+ u8 checksum; -+ u8 reseverd[13]; -+} phys_bmt_header; -+ -+typedef struct -+{ -+ phys_bmt_header header; -+ bmt_entry table[MAX_BMT_SIZE]; -+} phys_bmt_struct; -+ -+typedef struct -+{ -+ char signature[3]; -+} bmt_oob_data; -+ -+static char MAIN_SIGNATURE[] = "BMT"; -+static char OOB_SIGNATURE[] = "bmt"; -+#define SIGNATURE_SIZE (3) -+ -+#define MAX_DAT_SIZE 0x1000 -+#define MAX_OOB_SIZE 0x80 -+ -+static struct mtd_info *mtd_bmt; -+static struct nand_chip *nand_chip_bmt; -+#define BLOCK_SIZE_BMT (1 << nand_chip_bmt->phys_erase_shift) -+#define PAGE_SIZE_BMT (1 << nand_chip_bmt->page_shift) -+ -+#define OFFSET(block) ((block) * BLOCK_SIZE_BMT) -+#define PAGE_ADDR(block) ((block) * BLOCK_SIZE_BMT / PAGE_SIZE_BMT) -+ -+/********************************************************************* -+* Flash is splited into 2 parts, system part is for normal system * -+* system usage, size is system_block_count, another is replace pool * -+* +-------------------------------------------------+ * -+* | system_block_count | bmt_block_count | * -+* +-------------------------------------------------+ * -+*********************************************************************/ -+static u32 total_block_count; // block number in flash -+static u32 system_block_count; -+static int bmt_block_count; // bmt table size -+// static int bmt_count; // block used in bmt -+static int page_per_block; // page per count -+ -+static u32 bmt_block_index; // bmt block index -+static bmt_struct bmt; // dynamic created global bmt table -+ -+static u8 dat_buf[MAX_DAT_SIZE]; -+static u8 oob_buf[MAX_OOB_SIZE]; -+static bool pool_erased; -+ -+/*************************************************************** -+* -+* Interface adaptor for preloader/uboot/kernel -+* These interfaces operate on physical address, read/write -+* physical data. -+* -+***************************************************************/ -+int nand_read_page_bmt(u32 page, u8 * dat, u8 * oob) -+{ -+ return mtk_nand_exec_read_page(mtd_bmt, page, PAGE_SIZE_BMT, dat, oob); -+} -+ -+bool nand_block_bad_bmt(u32 offset) -+{ -+ return mtk_nand_block_bad_hw(mtd_bmt, offset); -+} -+ -+bool nand_erase_bmt(u32 offset) -+{ -+ int status; -+ if (offset < 0x20000) -+ { -+ MSG(INIT, "erase offset: 0x%x\n", offset); -+ } -+ -+ status = mtk_nand_erase_hw(mtd_bmt, offset / PAGE_SIZE_BMT); // as nand_chip structure doesn't have a erase function defined -+ if (status & NAND_STATUS_FAIL) -+ return false; -+ else -+ return true; -+} -+ -+int mark_block_bad_bmt(u32 offset) -+{ -+ return mtk_nand_block_markbad_hw(mtd_bmt, offset); //mark_block_bad_hw(offset); -+} -+ -+bool nand_write_page_bmt(u32 page, u8 * dat, u8 * oob) -+{ -+ if (mtk_nand_exec_write_page(mtd_bmt, page, PAGE_SIZE_BMT, dat, oob)) -+ return false; -+ else -+ return true; -+} -+ -+/*************************************************************** -+* * -+* static internal function * -+* * -+***************************************************************/ -+static void dump_bmt_info(bmt_struct * bmt) -+{ -+ int i; -+ -+ MSG(INIT, "BMT v%d. total %d mapping:\n", bmt->version, bmt->mapped_count); -+ for (i = 0; i < bmt->mapped_count; i++) -+ { -+ MSG(INIT, "\t0x%x -> 0x%x\n", bmt->table[i].bad_index, bmt->table[i].mapped_index); -+ } -+} -+ -+static bool match_bmt_signature(u8 * dat, u8 * oob) -+{ -+ -+ if (memcmp(dat + MAIN_SIGNATURE_OFFSET, MAIN_SIGNATURE, SIGNATURE_SIZE)) -+ { -+ return false; -+ } -+ -+ if (memcmp(oob + OOB_SIGNATURE_OFFSET, OOB_SIGNATURE, SIGNATURE_SIZE)) -+ { -+ MSG(INIT, "main signature match, oob signature doesn't match, but ignore\n"); -+ } -+ return true; -+} -+ -+static u8 cal_bmt_checksum(phys_bmt_struct * phys_table, int bmt_size) -+{ -+ int i; -+ u8 checksum = 0; -+ u8 *dat = (u8 *) phys_table; -+ -+ checksum += phys_table->header.version; -+ checksum += phys_table->header.mapped_count; -+ -+ dat += sizeof(phys_bmt_header); -+ for (i = 0; i < bmt_size * sizeof(bmt_entry); i++) -+ { -+ checksum += dat[i]; -+ } -+ -+ return checksum; -+} -+ -+ -+static int is_block_mapped(int index) -+{ -+ int i; -+ for (i = 0; i < bmt.mapped_count; i++) -+ { -+ if (index == bmt.table[i].mapped_index) -+ return i; -+ } -+ return -1; -+} -+ -+static bool is_page_used(u8 * dat, u8 * oob) -+{ -+ return ((oob[OOB_INDEX_OFFSET] != 0xFF) || (oob[OOB_INDEX_OFFSET + 1] != 0xFF)); -+} -+ -+static bool valid_bmt_data(phys_bmt_struct * phys_table) -+{ -+ int i; -+ u8 checksum = cal_bmt_checksum(phys_table, bmt_block_count); -+ -+ // checksum correct? -+ if (phys_table->header.checksum != checksum) -+ { -+ MSG(INIT, "BMT Data checksum error: %x %x\n", phys_table->header.checksum, checksum); -+ return false; -+ } -+ -+ MSG(INIT, "BMT Checksum is: 0x%x\n", phys_table->header.checksum); -+ -+ // block index correct? -+ for (i = 0; i < phys_table->header.mapped_count; i++) -+ { -+ if (phys_table->table[i].bad_index >= total_block_count || phys_table->table[i].mapped_index >= total_block_count || phys_table->table[i].mapped_index < system_block_count) -+ { -+ MSG(INIT, "index error: bad_index: %d, mapped_index: %d\n", phys_table->table[i].bad_index, phys_table->table[i].mapped_index); -+ return false; -+ } -+ } -+ -+ // pass check, valid bmt. -+ MSG(INIT, "Valid BMT, version v%d\n", phys_table->header.version); -+ return true; -+} -+ -+static void fill_nand_bmt_buffer(bmt_struct * bmt, u8 * dat, u8 * oob) -+{ -+ phys_bmt_struct phys_bmt; -+ -+ dump_bmt_info(bmt); -+ -+ // fill phys_bmt_struct structure with bmt_struct -+ memset(&phys_bmt, 0xFF, sizeof(phys_bmt)); -+ -+ memcpy(phys_bmt.header.signature, MAIN_SIGNATURE, SIGNATURE_SIZE); -+ phys_bmt.header.version = BMT_VERSION; -+ // phys_bmt.header.bad_count = bmt->bad_count; -+ phys_bmt.header.mapped_count = bmt->mapped_count; -+ memcpy(phys_bmt.table, bmt->table, sizeof(bmt_entry) * bmt_block_count); -+ -+ phys_bmt.header.checksum = cal_bmt_checksum(&phys_bmt, bmt_block_count); -+ -+ memcpy(dat + MAIN_SIGNATURE_OFFSET, &phys_bmt, sizeof(phys_bmt)); -+ memcpy(oob + OOB_SIGNATURE_OFFSET, OOB_SIGNATURE, SIGNATURE_SIZE); -+} -+ -+// return valid index if found BMT, else return 0 -+static int load_bmt_data(int start, int pool_size) -+{ -+ int bmt_index = start + pool_size - 1; // find from the end -+ phys_bmt_struct phys_table; -+ int i; -+ -+ MSG(INIT, "[%s]: begin to search BMT from block 0x%x\n", __FUNCTION__, bmt_index); -+ -+ for (bmt_index = start + pool_size - 1; bmt_index >= start; bmt_index--) -+ { -+ if (nand_block_bad_bmt(OFFSET(bmt_index))) -+ { -+ MSG(INIT, "Skip bad block: %d\n", bmt_index); -+ continue; -+ } -+ -+ if (!nand_read_page_bmt(PAGE_ADDR(bmt_index), dat_buf, oob_buf)) -+ { -+ MSG(INIT, "Error found when read block %d\n", bmt_index); -+ continue; -+ } -+ -+ if (!match_bmt_signature(dat_buf, oob_buf)) -+ { -+ continue; -+ } -+ -+ MSG(INIT, "Match bmt signature @ block: 0x%x\n", bmt_index); -+ -+ memcpy(&phys_table, dat_buf + MAIN_SIGNATURE_OFFSET, sizeof(phys_table)); -+ -+ if (!valid_bmt_data(&phys_table)) -+ { -+ MSG(INIT, "BMT data is not correct %d\n", bmt_index); -+ continue; -+ } else -+ { -+ bmt.mapped_count = phys_table.header.mapped_count; -+ bmt.version = phys_table.header.version; -+ // bmt.bad_count = phys_table.header.bad_count; -+ memcpy(bmt.table, phys_table.table, bmt.mapped_count * sizeof(bmt_entry)); -+ -+ MSG(INIT, "bmt found at block: %d, mapped block: %d\n", bmt_index, bmt.mapped_count); -+ -+ for (i = 0; i < bmt.mapped_count; i++) -+ { -+ if (!nand_block_bad_bmt(OFFSET(bmt.table[i].bad_index))) -+ { -+ MSG(INIT, "block 0x%x is not mark bad, should be power lost last time\n", bmt.table[i].bad_index); -+ mark_block_bad_bmt(OFFSET(bmt.table[i].bad_index)); -+ } -+ } -+ -+ return bmt_index; -+ } -+ } -+ -+ MSG(INIT, "bmt block not found!\n"); -+ return 0; -+} -+ -+/************************************************************************* -+* Find an available block and erase. * -+* start_from_end: if true, find available block from end of flash. * -+* else, find from the beginning of the pool * -+* need_erase: if true, all unmapped blocks in the pool will be erased * -+*************************************************************************/ -+static int find_available_block(bool start_from_end) -+{ -+ int i; // , j; -+ int block = system_block_count; -+ int direction; -+ // int avail_index = 0; -+ MSG(INIT, "Try to find_available_block, pool_erase: %d\n", pool_erased); -+ -+ // erase all un-mapped blocks in pool when finding avaliable block -+ if (!pool_erased) -+ { -+ MSG(INIT, "Erase all un-mapped blocks in pool\n"); -+ for (i = 0; i < bmt_block_count; i++) -+ { -+ if (block == bmt_block_index) -+ { -+ MSG(INIT, "Skip bmt block 0x%x\n", block); -+ continue; -+ } -+ -+ if (nand_block_bad_bmt(OFFSET(block + i))) -+ { -+ MSG(INIT, "Skip bad block 0x%x\n", block + i); -+ continue; -+ } -+//if(block==4095) -+//{ -+// continue; -+//} -+ -+ if (is_block_mapped(block + i) >= 0) -+ { -+ MSG(INIT, "Skip mapped block 0x%x\n", block + i); -+ continue; -+ } -+ -+ if (!nand_erase_bmt(OFFSET(block + i))) -+ { -+ MSG(INIT, "Erase block 0x%x failed\n", block + i); -+ mark_block_bad_bmt(OFFSET(block + i)); -+ } -+ } -+ -+ pool_erased = 1; -+ } -+ -+ if (start_from_end) -+ { -+ block = total_block_count - 1; -+ direction = -1; -+ } else -+ { -+ block = system_block_count; -+ direction = 1; -+ } -+ -+ for (i = 0; i < bmt_block_count; i++, block += direction) -+ { -+ if (block == bmt_block_index) -+ { -+ MSG(INIT, "Skip bmt block 0x%x\n", block); -+ continue; -+ } -+ -+ if (nand_block_bad_bmt(OFFSET(block))) -+ { -+ MSG(INIT, "Skip bad block 0x%x\n", block); -+ continue; -+ } -+ -+ if (is_block_mapped(block) >= 0) -+ { -+ MSG(INIT, "Skip mapped block 0x%x\n", block); -+ continue; -+ } -+ -+ MSG(INIT, "Find block 0x%x available\n", block); -+ return block; -+ } -+ -+ return 0; -+} -+ -+static unsigned short get_bad_index_from_oob(u8 * oob_buf) -+{ -+ unsigned short index; -+ memcpy(&index, oob_buf + OOB_INDEX_OFFSET, OOB_INDEX_SIZE); -+ -+ return index; -+} -+ -+void set_bad_index_to_oob(u8 * oob, u16 index) -+{ -+ memcpy(oob + OOB_INDEX_OFFSET, &index, sizeof(index)); -+} -+ -+static int migrate_from_bad(int offset, u8 * write_dat, u8 * write_oob) -+{ -+ int page; -+ int error_block = offset / BLOCK_SIZE_BMT; -+ int error_page = (offset / PAGE_SIZE_BMT) % page_per_block; -+ int to_index; -+ -+ memcpy(oob_buf, write_oob, MAX_OOB_SIZE); -+ -+ to_index = find_available_block(false); -+ -+ if (!to_index) -+ { -+ MSG(INIT, "Cannot find an available block for BMT\n"); -+ return 0; -+ } -+ -+ { // migrate error page first -+ MSG(INIT, "Write error page: 0x%x\n", error_page); -+ if (!write_dat) -+ { -+ nand_read_page_bmt(PAGE_ADDR(error_block) + error_page, dat_buf, NULL); -+ write_dat = dat_buf; -+ } -+ // memcpy(oob_buf, write_oob, MAX_OOB_SIZE); -+ -+ if (error_block < system_block_count) -+ set_bad_index_to_oob(oob_buf, error_block); // if error_block is already a mapped block, original mapping index is in OOB. -+ -+ if (!nand_write_page_bmt(PAGE_ADDR(to_index) + error_page, write_dat, oob_buf)) -+ { -+ MSG(INIT, "Write to page 0x%x fail\n", PAGE_ADDR(to_index) + error_page); -+ mark_block_bad_bmt(to_index); -+ return migrate_from_bad(offset, write_dat, write_oob); -+ } -+ } -+ -+ for (page = 0; page < page_per_block; page++) -+ { -+ if (page != error_page) -+ { -+ nand_read_page_bmt(PAGE_ADDR(error_block) + page, dat_buf, oob_buf); -+ if (is_page_used(dat_buf, oob_buf)) -+ { -+ if (error_block < system_block_count) -+ { -+ set_bad_index_to_oob(oob_buf, error_block); -+ } -+ MSG(INIT, "\tmigrate page 0x%x to page 0x%x\n", PAGE_ADDR(error_block) + page, PAGE_ADDR(to_index) + page); -+ if (!nand_write_page_bmt(PAGE_ADDR(to_index) + page, dat_buf, oob_buf)) -+ { -+ MSG(INIT, "Write to page 0x%x fail\n", PAGE_ADDR(to_index) + page); -+ mark_block_bad_bmt(to_index); -+ return migrate_from_bad(offset, write_dat, write_oob); -+ } -+ } -+ } -+ } -+ -+ MSG(INIT, "Migrate from 0x%x to 0x%x done!\n", error_block, to_index); -+ -+ return to_index; -+} -+ -+static bool write_bmt_to_flash(u8 * dat, u8 * oob) -+{ -+ bool need_erase = true; -+ MSG(INIT, "Try to write BMT\n"); -+ -+ if (bmt_block_index == 0) -+ { -+ // if we don't have index, we don't need to erase found block as it has been erased in find_available_block() -+ need_erase = false; -+ if (!(bmt_block_index = find_available_block(true))) -+ { -+ MSG(INIT, "Cannot find an available block for BMT\n"); -+ return false; -+ } -+ } -+ -+ MSG(INIT, "Find BMT block: 0x%x\n", bmt_block_index); -+ -+ // write bmt to flash -+ if (need_erase) -+ { -+ if (!nand_erase_bmt(OFFSET(bmt_block_index))) -+ { -+ MSG(INIT, "BMT block erase fail, mark bad: 0x%x\n", bmt_block_index); -+ mark_block_bad_bmt(OFFSET(bmt_block_index)); -+ // bmt.bad_count++; -+ -+ bmt_block_index = 0; -+ return write_bmt_to_flash(dat, oob); // recursive call -+ } -+ } -+ -+ if (!nand_write_page_bmt(PAGE_ADDR(bmt_block_index), dat, oob)) -+ { -+ MSG(INIT, "Write BMT data fail, need to write again\n"); -+ mark_block_bad_bmt(OFFSET(bmt_block_index)); -+ // bmt.bad_count++; -+ -+ bmt_block_index = 0; -+ return write_bmt_to_flash(dat, oob); // recursive call -+ } -+ -+ MSG(INIT, "Write BMT data to block 0x%x success\n", bmt_block_index); -+ return true; -+} -+ -+/******************************************************************* -+* Reconstruct bmt, called when found bmt info doesn't match bad -+* block info in flash. -+* -+* Return NULL for failure -+*******************************************************************/ -+bmt_struct *reconstruct_bmt(bmt_struct * bmt) -+{ -+ int i; -+ int index = system_block_count; -+ unsigned short bad_index; -+ int mapped; -+ -+ // init everything in BMT struct -+ bmt->version = BMT_VERSION; -+ bmt->bad_count = 0; -+ bmt->mapped_count = 0; -+ -+ memset(bmt->table, 0, bmt_block_count * sizeof(bmt_entry)); -+ -+ for (i = 0; i < bmt_block_count; i++, index++) -+ { -+ if (nand_block_bad_bmt(OFFSET(index))) -+ { -+ MSG(INIT, "Skip bad block: 0x%x\n", index); -+ // bmt->bad_count++; -+ continue; -+ } -+ -+ MSG(INIT, "read page: 0x%x\n", PAGE_ADDR(index)); -+ nand_read_page_bmt(PAGE_ADDR(index), dat_buf, oob_buf); -+ /* if (mtk_nand_read_page_hw(PAGE_ADDR(index), dat_buf)) -+ { -+ MSG(INIT, "Error when read block %d\n", bmt_block_index); -+ continue; -+ } */ -+ -+ if ((bad_index = get_bad_index_from_oob(oob_buf)) >= system_block_count) -+ { -+ MSG(INIT, "get bad index: 0x%x\n", bad_index); -+ if (bad_index != 0xFFFF) -+ MSG(INIT, "Invalid bad index found in block 0x%x, bad index 0x%x\n", index, bad_index); -+ continue; -+ } -+ -+ MSG(INIT, "Block 0x%x is mapped to bad block: 0x%x\n", index, bad_index); -+ -+ if (!nand_block_bad_bmt(OFFSET(bad_index))) -+ { -+ MSG(INIT, "\tbut block 0x%x is not marked as bad, invalid mapping\n", bad_index); -+ continue; // no need to erase here, it will be erased later when trying to write BMT -+ } -+ -+ if ((mapped = is_block_mapped(bad_index)) >= 0) -+ { -+ MSG(INIT, "bad block 0x%x is mapped to 0x%x, should be caused by power lost, replace with one\n", bmt->table[mapped].bad_index, bmt->table[mapped].mapped_index); -+ bmt->table[mapped].mapped_index = index; // use new one instead. -+ } else -+ { -+ // add mapping to BMT -+ bmt->table[bmt->mapped_count].bad_index = bad_index; -+ bmt->table[bmt->mapped_count].mapped_index = index; -+ bmt->mapped_count++; -+ } -+ -+ MSG(INIT, "Add mapping: 0x%x -> 0x%x to BMT\n", bad_index, index); -+ -+ } -+ -+ MSG(INIT, "Scan replace pool done, mapped block: %d\n", bmt->mapped_count); -+ // dump_bmt_info(bmt); -+ -+ // fill NAND BMT buffer -+ memset(oob_buf, 0xFF, sizeof(oob_buf)); -+ fill_nand_bmt_buffer(bmt, dat_buf, oob_buf); -+ -+ // write BMT back -+ if (!write_bmt_to_flash(dat_buf, oob_buf)) -+ { -+ MSG(INIT, "TRAGEDY: cannot find a place to write BMT!!!!\n"); -+ } -+ -+ return bmt; -+} -+ -+/******************************************************************* -+* [BMT Interface] -+* -+* Description: -+* Init bmt from nand. Reconstruct if not found or data error -+* -+* Parameter: -+* size: size of bmt and replace pool -+* -+* Return: -+* NULL for failure, and a bmt struct for success -+*******************************************************************/ -+bmt_struct *init_bmt(struct nand_chip * chip, int size) -+{ -+ struct mtk_nand_host *host; -+ -+ if (size > 0 && size < MAX_BMT_SIZE) -+ { -+ MSG(INIT, "Init bmt table, size: %d\n", size); -+ bmt_block_count = size; -+ } else -+ { -+ MSG(INIT, "Invalid bmt table size: %d\n", size); -+ return NULL; -+ } -+ nand_chip_bmt = chip; -+ system_block_count = chip->chipsize >> chip->phys_erase_shift; -+ total_block_count = bmt_block_count + system_block_count; -+ page_per_block = BLOCK_SIZE_BMT / PAGE_SIZE_BMT; -+ host = (struct mtk_nand_host *)chip->priv; -+ mtd_bmt = &host->mtd; -+ -+ MSG(INIT, "mtd_bmt: %p, nand_chip_bmt: %p\n", mtd_bmt, nand_chip_bmt); -+ MSG(INIT, "bmt count: %d, system count: %d\n", bmt_block_count, system_block_count); -+ -+ // set this flag, and unmapped block in pool will be erased. -+ pool_erased = 0; -+ memset(bmt.table, 0, size * sizeof(bmt_entry)); -+ if ((bmt_block_index = load_bmt_data(system_block_count, size))) -+ { -+ MSG(INIT, "Load bmt data success @ block 0x%x\n", bmt_block_index); -+ dump_bmt_info(&bmt); -+ return &bmt; -+ } else -+ { -+ MSG(INIT, "Load bmt data fail, need re-construct!\n"); -+#ifndef __UBOOT_NAND__ // BMT is not re-constructed in UBOOT. -+ if (reconstruct_bmt(&bmt)) -+ return &bmt; -+ else -+#endif -+ return NULL; -+ } -+} -+ -+/******************************************************************* -+* [BMT Interface] -+* -+* Description: -+* Update BMT. -+* -+* Parameter: -+* offset: update block/page offset. -+* reason: update reason, see update_reason_t for reason. -+* dat/oob: data and oob buffer for write fail. -+* -+* Return: -+* Return true for success, and false for failure. -+*******************************************************************/ -+bool update_bmt(u32 offset, update_reason_t reason, u8 * dat, u8 * oob) -+{ -+ int map_index; -+ int orig_bad_block = -1; -+ // int bmt_update_index; -+ int i; -+ int bad_index = offset / BLOCK_SIZE_BMT; -+ -+#ifndef MTK_NAND_BMT -+ return false; -+#endif -+ if (reason == UPDATE_WRITE_FAIL) -+ { -+ MSG(INIT, "Write fail, need to migrate\n"); -+ if (!(map_index = migrate_from_bad(offset, dat, oob))) -+ { -+ MSG(INIT, "migrate fail\n"); -+ return false; -+ } -+ } else -+ { -+ if (!(map_index = find_available_block(false))) -+ { -+ MSG(INIT, "Cannot find block in pool\n"); -+ return false; -+ } -+ } -+ -+ // now let's update BMT -+ if (bad_index >= system_block_count) // mapped block become bad, find original bad block -+ { -+ for (i = 0; i < bmt_block_count; i++) -+ { -+ if (bmt.table[i].mapped_index == bad_index) -+ { -+ orig_bad_block = bmt.table[i].bad_index; -+ break; -+ } -+ } -+ // bmt.bad_count++; -+ MSG(INIT, "Mapped block becomes bad, orig bad block is 0x%x\n", orig_bad_block); -+ -+ bmt.table[i].mapped_index = map_index; -+ } else -+ { -+ bmt.table[bmt.mapped_count].mapped_index = map_index; -+ bmt.table[bmt.mapped_count].bad_index = bad_index; -+ bmt.mapped_count++; -+ } -+ -+ memset(oob_buf, 0xFF, sizeof(oob_buf)); -+ fill_nand_bmt_buffer(&bmt, dat_buf, oob_buf); -+ if (!write_bmt_to_flash(dat_buf, oob_buf)) -+ return false; -+ -+ mark_block_bad_bmt(offset); -+ -+ return true; -+} -+ -+/******************************************************************* -+* [BMT Interface] -+* -+* Description: -+* Given an block index, return mapped index if it's mapped, else -+* return given index. -+* -+* Parameter: -+* index: given an block index. This value cannot exceed -+* system_block_count. -+* -+* Return NULL for failure -+*******************************************************************/ -+u16 get_mapping_block_index(int index) -+{ -+ int i; -+#ifndef MTK_NAND_BMT -+ return index; -+#endif -+ if (index > system_block_count) -+ { -+ return index; -+ } -+ -+ for (i = 0; i < bmt.mapped_count; i++) -+ { -+ if (bmt.table[i].bad_index == index) -+ { -+ return bmt.table[i].mapped_index; -+ } -+ } -+ -+ return index; -+} -+#ifdef __KERNEL_NAND__ -+EXPORT_SYMBOL_GPL(init_bmt); -+EXPORT_SYMBOL_GPL(update_bmt); -+EXPORT_SYMBOL_GPL(get_mapping_block_index); -+ -+MODULE_LICENSE("GPL"); -+MODULE_AUTHOR("MediaTek"); -+MODULE_DESCRIPTION("Bad Block mapping management for MediaTek NAND Flash Driver"); -+#endif ---- /dev/null -+++ b/drivers/mtd/nand/bmt.h -@@ -0,0 +1,80 @@ -+#ifndef __BMT_H__ -+#define __BMT_H__ -+ -+#include "nand_def.h" -+ -+#if defined(__PRELOADER_NAND__) -+ -+#include "nand.h" -+ -+#elif defined(__UBOOT_NAND__) -+ -+#include -+#include "mtk_nand.h" -+ -+#elif defined(__KERNEL_NAND__) -+ -+#include -+#include -+#include -+#include "mtk_nand.h" -+ -+#endif -+ -+ -+#define MAX_BMT_SIZE (0x80) -+#define BMT_VERSION (1) // initial version -+ -+#define MAIN_SIGNATURE_OFFSET (0) -+#define OOB_SIGNATURE_OFFSET (1) -+#define OOB_INDEX_OFFSET (29) -+#define OOB_INDEX_SIZE (2) -+#define FAKE_INDEX (0xAAAA) -+ -+typedef struct _bmt_entry_ -+{ -+ u16 bad_index; // bad block index -+ u16 mapped_index; // mapping block index in the replace pool -+} bmt_entry; -+ -+typedef enum -+{ -+ UPDATE_ERASE_FAIL, -+ UPDATE_WRITE_FAIL, -+ UPDATE_UNMAPPED_BLOCK, -+ UPDATE_REASON_COUNT, -+} update_reason_t; -+ -+typedef struct -+{ -+ bmt_entry table[MAX_BMT_SIZE]; -+ u8 version; -+ u8 mapped_count; // mapped block count in pool -+ u8 bad_count; // bad block count in pool. Not used in V1 -+} bmt_struct; -+ -+/*************************************************************** -+* * -+* Interface BMT need to use * -+* * -+***************************************************************/ -+extern bool mtk_nand_exec_read_page(struct mtd_info *mtd, u32 row, u32 page_size, u8 * dat, u8 * oob); -+extern int mtk_nand_block_bad_hw(struct mtd_info *mtd, loff_t ofs); -+extern int mtk_nand_erase_hw(struct mtd_info *mtd, int page); -+extern int mtk_nand_block_markbad_hw(struct mtd_info *mtd, loff_t ofs); -+extern int mtk_nand_exec_write_page(struct mtd_info *mtd, u32 row, u32 page_size, u8 * dat, u8 * oob); -+ -+ -+/*************************************************************** -+* * -+* Different function interface for preloader/uboot/kernel * -+* * -+***************************************************************/ -+void set_bad_index_to_oob(u8 * oob, u16 index); -+ -+ -+bmt_struct *init_bmt(struct nand_chip *nand, int size); -+bool update_bmt(u32 offset, update_reason_t reason, u8 * dat, u8 * oob); -+unsigned short get_mapping_block_index(int index); -+ -+#endif // #ifndef __BMT_H__ ---- /dev/null -+++ b/drivers/mtd/nand/dev-nand.c -@@ -0,0 +1,63 @@ -+#include -+#include -+#include -+ -+#include "mt6575_typedefs.h" -+ -+#define RALINK_NAND_CTRL_BASE 0xBE003000 -+#define NFI_base RALINK_NAND_CTRL_BASE -+#define RALINK_NANDECC_CTRL_BASE 0xBE003800 -+#define NFIECC_base RALINK_NANDECC_CTRL_BASE -+#define MT7621_NFI_IRQ_ID SURFBOARDINT_NAND -+#define MT7621_NFIECC_IRQ_ID SURFBOARDINT_NAND_ECC -+ -+#define SURFBOARDINT_NAND 22 -+#define SURFBOARDINT_NAND_ECC 23 -+ -+static struct resource MT7621_resource_nand[] = { -+ { -+ .start = NFI_base, -+ .end = NFI_base + 0x1A0, -+ .flags = IORESOURCE_MEM, -+ }, -+ { -+ .start = NFIECC_base, -+ .end = NFIECC_base + 0x150, -+ .flags = IORESOURCE_MEM, -+ }, -+ { -+ .start = MT7621_NFI_IRQ_ID, -+ .flags = IORESOURCE_IRQ, -+ }, -+ { -+ .start = MT7621_NFIECC_IRQ_ID, -+ .flags = IORESOURCE_IRQ, -+ }, -+}; -+ -+static struct platform_device MT7621_nand_dev = { -+ .name = "MT7621-NAND", -+ .id = 0, -+ .num_resources = ARRAY_SIZE(MT7621_resource_nand), -+ .resource = MT7621_resource_nand, -+ .dev = { -+ .platform_data = &mt7621_nand_hw, -+ }, -+}; -+ -+ -+int __init mtk_nand_register(void) -+{ -+ -+ int retval = 0; -+ -+ retval = platform_device_register(&MT7621_nand_dev); -+ if (retval != 0) { -+ printk(KERN_ERR "register nand device fail\n"); -+ return retval; -+ } -+ -+ -+ return retval; -+} -+arch_initcall(mtk_nand_register); ---- /dev/null -+++ b/drivers/mtd/nand/mt6575_typedefs.h -@@ -0,0 +1,340 @@ -+/* Copyright Statement: -+ * -+ * This software/firmware and related documentation ("MediaTek Software") are -+ * protected under relevant copyright laws. The information contained herein -+ * is confidential and proprietary to MediaTek Inc. and/or its licensors. -+ * Without the prior written permission of MediaTek inc. and/or its licensors, -+ * any reproduction, modification, use or disclosure of MediaTek Software, -+ * and information contained herein, in whole or in part, shall be strictly prohibited. -+ */ -+/* MediaTek Inc. (C) 2010. All rights reserved. -+ * -+ * BY OPENING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES -+ * THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE") -+ * RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON -+ * AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES, -+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF -+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT. -+ * NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE -+ * SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR -+ * SUPPLIED WITH THE MEDIATEK SOFTWARE, AND RECEIVER AGREES TO LOOK ONLY TO SUCH -+ * THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES -+ * THAT IT IS RECEIVER'S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD PARTY ALL PROPER LICENSES -+ * CONTAINED IN MEDIATEK SOFTWARE. MEDIATEK SHALL ALSO NOT BE RESPONSIBLE FOR ANY MEDIATEK -+ * SOFTWARE RELEASES MADE TO RECEIVER'S SPECIFICATION OR TO CONFORM TO A PARTICULAR -+ * STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND -+ * CUMULATIVE LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE, -+ * AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE, -+ * OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY RECEIVER TO -+ * MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE. -+ * -+ * The following software/firmware and/or related documentation ("MediaTek Software") -+ * have been modified by MediaTek Inc. All revisions are subject to any receiver's -+ * applicable license agreements with MediaTek Inc. -+ */ -+ -+/***************************************************************************** -+* Copyright Statement: -+* -------------------- -+* This software is protected by Copyright and the information contained -+* herein is confidential. The software may not be copied and the information -+* contained herein may not be used or disclosed except with the written -+* permission of MediaTek Inc. (C) 2008 -+* -+* BY OPENING THIS FILE, BUYER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES -+* THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE") -+* RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO BUYER ON -+* AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES, -+* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF -+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT. -+* NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE -+* SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR -+* SUPPLIED WITH THE MEDIATEK SOFTWARE, AND BUYER AGREES TO LOOK ONLY TO SUCH -+* THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. MEDIATEK SHALL ALSO -+* NOT BE RESPONSIBLE FOR ANY MEDIATEK SOFTWARE RELEASES MADE TO BUYER'S -+* SPECIFICATION OR TO CONFORM TO A PARTICULAR STANDARD OR OPEN FORUM. -+* -+* BUYER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND CUMULATIVE -+* LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE, -+* AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE, -+* OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY BUYER TO -+* MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE. -+* -+* THE TRANSACTION CONTEMPLATED HEREUNDER SHALL BE CONSTRUED IN ACCORDANCE -+* WITH THE LAWS OF THE STATE OF CALIFORNIA, USA, EXCLUDING ITS CONFLICT OF -+* LAWS PRINCIPLES. ANY DISPUTES, CONTROVERSIES OR CLAIMS ARISING THEREOF AND -+* RELATED THERETO SHALL BE SETTLED BY ARBITRATION IN SAN FRANCISCO, CA, UNDER -+* THE RULES OF THE INTERNATIONAL CHAMBER OF COMMERCE (ICC). -+* -+*****************************************************************************/ -+ -+#ifndef _MT6575_TYPEDEFS_H -+#define _MT6575_TYPEDEFS_H -+ -+#if defined (__KERNEL_NAND__) -+#include -+#else -+#define true 1 -+#define false 0 -+#define bool u8 -+#endif -+ -+// --------------------------------------------------------------------------- -+// Basic Type Definitions -+// --------------------------------------------------------------------------- -+ -+typedef volatile unsigned char *P_kal_uint8; -+typedef volatile unsigned short *P_kal_uint16; -+typedef volatile unsigned int *P_kal_uint32; -+ -+typedef long LONG; -+typedef unsigned char UBYTE; -+typedef short SHORT; -+ -+typedef signed char kal_int8; -+typedef signed short kal_int16; -+typedef signed int kal_int32; -+typedef long long kal_int64; -+typedef unsigned char kal_uint8; -+typedef unsigned short kal_uint16; -+typedef unsigned int kal_uint32; -+typedef unsigned long long kal_uint64; -+typedef char kal_char; -+ -+typedef unsigned int *UINT32P; -+typedef volatile unsigned short *UINT16P; -+typedef volatile unsigned char *UINT8P; -+typedef unsigned char *U8P; -+ -+typedef volatile unsigned char *P_U8; -+typedef volatile signed char *P_S8; -+typedef volatile unsigned short *P_U16; -+typedef volatile signed short *P_S16; -+typedef volatile unsigned int *P_U32; -+typedef volatile signed int *P_S32; -+typedef unsigned long long *P_U64; -+typedef signed long long *P_S64; -+ -+typedef unsigned char U8; -+typedef signed char S8; -+typedef unsigned short U16; -+typedef signed short S16; -+typedef unsigned int U32; -+typedef signed int S32; -+typedef unsigned long long U64; -+typedef signed long long S64; -+//typedef unsigned char bool; -+ -+typedef unsigned char UINT8; -+typedef unsigned short UINT16; -+typedef unsigned int UINT32; -+typedef unsigned short USHORT; -+typedef signed char INT8; -+typedef signed short INT16; -+typedef signed int INT32; -+typedef unsigned int DWORD; -+typedef void VOID; -+typedef unsigned char BYTE; -+typedef float FLOAT; -+ -+typedef char *LPCSTR; -+typedef short *LPWSTR; -+ -+ -+// --------------------------------------------------------------------------- -+// Constants -+// --------------------------------------------------------------------------- -+ -+#define IMPORT EXTERN -+#ifndef __cplusplus -+ #define EXTERN extern -+#else -+ #define EXTERN extern "C" -+#endif -+#define LOCAL static -+#define GLOBAL -+#define EXPORT GLOBAL -+ -+#define EQ == -+#define NEQ != -+#define AND && -+#define OR || -+#define XOR(A,B) ((!(A) AND (B)) OR ((A) AND !(B))) -+ -+#ifndef FALSE -+ #define FALSE (0) -+#endif -+ -+#ifndef TRUE -+ #define TRUE (1) -+#endif -+ -+#ifndef NULL -+ #define NULL (0) -+#endif -+ -+//enum boolean {false, true}; -+enum {RX, TX, NONE}; -+ -+#ifndef BOOL -+typedef unsigned char BOOL; -+#endif -+ -+typedef enum { -+ KAL_FALSE = 0, -+ KAL_TRUE = 1, -+} kal_bool; -+ -+ -+// --------------------------------------------------------------------------- -+// Type Casting -+// --------------------------------------------------------------------------- -+ -+#define AS_INT32(x) (*(INT32 *)((void*)x)) -+#define AS_INT16(x) (*(INT16 *)((void*)x)) -+#define AS_INT8(x) (*(INT8 *)((void*)x)) -+ -+#define AS_UINT32(x) (*(UINT32 *)((void*)x)) -+#define AS_UINT16(x) (*(UINT16 *)((void*)x)) -+#define AS_UINT8(x) (*(UINT8 *)((void*)x)) -+ -+ -+// --------------------------------------------------------------------------- -+// Register Manipulations -+// --------------------------------------------------------------------------- -+ -+#define READ_REGISTER_UINT32(reg) \ -+ (*(volatile UINT32 * const)(reg)) -+ -+#define WRITE_REGISTER_UINT32(reg, val) \ -+ (*(volatile UINT32 * const)(reg)) = (val) -+ -+#define READ_REGISTER_UINT16(reg) \ -+ (*(volatile UINT16 * const)(reg)) -+ -+#define WRITE_REGISTER_UINT16(reg, val) \ -+ (*(volatile UINT16 * const)(reg)) = (val) -+ -+#define READ_REGISTER_UINT8(reg) \ -+ (*(volatile UINT8 * const)(reg)) -+ -+#define WRITE_REGISTER_UINT8(reg, val) \ -+ (*(volatile UINT8 * const)(reg)) = (val) -+ -+#define INREG8(x) READ_REGISTER_UINT8((UINT8*)((void*)(x))) -+#define OUTREG8(x, y) WRITE_REGISTER_UINT8((UINT8*)((void*)(x)), (UINT8)(y)) -+#define SETREG8(x, y) OUTREG8(x, INREG8(x)|(y)) -+#define CLRREG8(x, y) OUTREG8(x, INREG8(x)&~(y)) -+#define MASKREG8(x, y, z) OUTREG8(x, (INREG8(x)&~(y))|(z)) -+ -+#define INREG16(x) READ_REGISTER_UINT16((UINT16*)((void*)(x))) -+#define OUTREG16(x, y) WRITE_REGISTER_UINT16((UINT16*)((void*)(x)),(UINT16)(y)) -+#define SETREG16(x, y) OUTREG16(x, INREG16(x)|(y)) -+#define CLRREG16(x, y) OUTREG16(x, INREG16(x)&~(y)) -+#define MASKREG16(x, y, z) OUTREG16(x, (INREG16(x)&~(y))|(z)) -+ -+#define INREG32(x) READ_REGISTER_UINT32((UINT32*)((void*)(x))) -+#define OUTREG32(x, y) WRITE_REGISTER_UINT32((UINT32*)((void*)(x)), (UINT32)(y)) -+#define SETREG32(x, y) OUTREG32(x, INREG32(x)|(y)) -+#define CLRREG32(x, y) OUTREG32(x, INREG32(x)&~(y)) -+#define MASKREG32(x, y, z) OUTREG32(x, (INREG32(x)&~(y))|(z)) -+ -+ -+#define DRV_Reg8(addr) INREG8(addr) -+#define DRV_WriteReg8(addr, data) OUTREG8(addr, data) -+#define DRV_SetReg8(addr, data) SETREG8(addr, data) -+#define DRV_ClrReg8(addr, data) CLRREG8(addr, data) -+ -+#define DRV_Reg16(addr) INREG16(addr) -+#define DRV_WriteReg16(addr, data) OUTREG16(addr, data) -+#define DRV_SetReg16(addr, data) SETREG16(addr, data) -+#define DRV_ClrReg16(addr, data) CLRREG16(addr, data) -+ -+#define DRV_Reg32(addr) INREG32(addr) -+#define DRV_WriteReg32(addr, data) OUTREG32(addr, data) -+#define DRV_SetReg32(addr, data) SETREG32(addr, data) -+#define DRV_ClrReg32(addr, data) CLRREG32(addr, data) -+ -+// !!! DEPRECATED, WILL BE REMOVED LATER !!! -+#define DRV_Reg(addr) DRV_Reg16(addr) -+#define DRV_WriteReg(addr, data) DRV_WriteReg16(addr, data) -+#define DRV_SetReg(addr, data) DRV_SetReg16(addr, data) -+#define DRV_ClrReg(addr, data) DRV_ClrReg16(addr, data) -+ -+ -+// --------------------------------------------------------------------------- -+// Compiler Time Deduction Macros -+// --------------------------------------------------------------------------- -+ -+#define _MASK_OFFSET_1(x, n) ((x) & 0x1) ? (n) : -+#define _MASK_OFFSET_2(x, n) _MASK_OFFSET_1((x), (n)) _MASK_OFFSET_1((x) >> 1, (n) + 1) -+#define _MASK_OFFSET_4(x, n) _MASK_OFFSET_2((x), (n)) _MASK_OFFSET_2((x) >> 2, (n) + 2) -+#define _MASK_OFFSET_8(x, n) _MASK_OFFSET_4((x), (n)) _MASK_OFFSET_4((x) >> 4, (n) + 4) -+#define _MASK_OFFSET_16(x, n) _MASK_OFFSET_8((x), (n)) _MASK_OFFSET_8((x) >> 8, (n) + 8) -+#define _MASK_OFFSET_32(x, n) _MASK_OFFSET_16((x), (n)) _MASK_OFFSET_16((x) >> 16, (n) + 16) -+ -+#define MASK_OFFSET_ERROR (0xFFFFFFFF) -+ -+#define MASK_OFFSET(x) (_MASK_OFFSET_32(x, 0) MASK_OFFSET_ERROR) -+ -+ -+// --------------------------------------------------------------------------- -+// Assertions -+// --------------------------------------------------------------------------- -+ -+#ifndef ASSERT -+ #define ASSERT(expr) BUG_ON(!(expr)) -+#endif -+ -+#ifndef NOT_IMPLEMENTED -+ #define NOT_IMPLEMENTED() BUG_ON(1) -+#endif -+ -+#define STATIC_ASSERT(pred) STATIC_ASSERT_X(pred, __LINE__) -+#define STATIC_ASSERT_X(pred, line) STATIC_ASSERT_XX(pred, line) -+#define STATIC_ASSERT_XX(pred, line) \ -+ extern char assertion_failed_at_##line[(pred) ? 1 : -1] -+ -+// --------------------------------------------------------------------------- -+// Resolve Compiler Warnings -+// --------------------------------------------------------------------------- -+ -+#define NOT_REFERENCED(x) { (x) = (x); } -+ -+ -+// --------------------------------------------------------------------------- -+// Utilities -+// --------------------------------------------------------------------------- -+ -+#define MAXIMUM(A,B) (((A)>(B))?(A):(B)) -+#define MINIMUM(A,B) (((A)<(B))?(A):(B)) -+ -+#define ARY_SIZE(x) (sizeof((x)) / sizeof((x[0]))) -+#define DVT_DELAYMACRO(u4Num) \ -+{ \ -+ UINT32 u4Count = 0 ; \ -+ for (u4Count = 0; u4Count < u4Num; u4Count++ ); \ -+} \ -+ -+#define A68351B 0 -+#define B68351B 1 -+#define B68351D 2 -+#define B68351E 3 -+#define UNKNOWN_IC_VERSION 0xFF -+ -+/* NAND driver */ -+struct mtk_nand_host_hw { -+ unsigned int nfi_bus_width; /* NFI_BUS_WIDTH */ -+ unsigned int nfi_access_timing; /* NFI_ACCESS_TIMING */ -+ unsigned int nfi_cs_num; /* NFI_CS_NUM */ -+ unsigned int nand_sec_size; /* NAND_SECTOR_SIZE */ -+ unsigned int nand_sec_shift; /* NAND_SECTOR_SHIFT */ -+ unsigned int nand_ecc_size; -+ unsigned int nand_ecc_bytes; -+ unsigned int nand_ecc_mode; -+}; -+extern struct mtk_nand_host_hw mt7621_nand_hw; -+extern unsigned int CFG_BLOCKSIZE; -+ -+#endif // _MT6575_TYPEDEFS_H -+ ---- /dev/null -+++ b/drivers/mtd/nand/mtk_nand.c -@@ -0,0 +1,2304 @@ -+/****************************************************************************** -+* mtk_nand.c - MTK NAND Flash Device Driver -+ * -+* Copyright 2009-2012 MediaTek Co.,Ltd. -+ * -+* DESCRIPTION: -+* This file provid the other drivers nand relative functions -+ * -+* modification history -+* ---------------------------------------- -+* v3.0, 11 Feb 2010, mtk -+* ---------------------------------------- -+******************************************************************************/ -+#include "nand_def.h" -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include "mtk_nand.h" -+#include "nand_device_list.h" -+ -+#include "bmt.h" -+#include "partition.h" -+ -+unsigned int CFG_BLOCKSIZE; -+ -+static int shift_on_bbt = 0; -+extern void nand_bbt_set(struct mtd_info *mtd, int page, int flag); -+extern int nand_bbt_get(struct mtd_info *mtd, int page); -+int mtk_nand_read_oob_hw(struct mtd_info *mtd, struct nand_chip *chip, int page); -+ -+static const char * const probe_types[] = { "cmdlinepart", "ofpart", NULL }; -+ -+#define NAND_CMD_STATUS_MULTI 0x71 -+ -+void show_stack(struct task_struct *tsk, unsigned long *sp); -+extern void mt_irq_set_sens(unsigned int irq, unsigned int sens); -+extern void mt_irq_set_polarity(unsigned int irq,unsigned int polarity); -+ -+struct mtk_nand_host mtk_nand_host; /* include mtd_info and nand_chip structs */ -+struct mtk_nand_host_hw mt7621_nand_hw = { -+ .nfi_bus_width = 8, -+ .nfi_access_timing = NFI_DEFAULT_ACCESS_TIMING, -+ .nfi_cs_num = NFI_CS_NUM, -+ .nand_sec_size = 512, -+ .nand_sec_shift = 9, -+ .nand_ecc_size = 2048, -+ .nand_ecc_bytes = 32, -+ .nand_ecc_mode = NAND_ECC_HW, -+}; -+ -+ -+/******************************************************************************* -+ * Gloable Varible Definition -+ *******************************************************************************/ -+ -+#define NFI_ISSUE_COMMAND(cmd, col_addr, row_addr, col_num, row_num) \ -+ do { \ -+ DRV_WriteReg(NFI_CMD_REG16,cmd);\ -+ while (DRV_Reg32(NFI_STA_REG32) & STA_CMD_STATE);\ -+ DRV_WriteReg32(NFI_COLADDR_REG32, col_addr);\ -+ DRV_WriteReg32(NFI_ROWADDR_REG32, row_addr);\ -+ DRV_WriteReg(NFI_ADDRNOB_REG16, col_num | (row_num<mm) { -+ printk(KERN_ERR "[nand_virt_to_phys_add] ERROR current->mm is NULL! tgid=0x%x, name=%s \n", current->tgid, current->comm); -+ return 0; -+ } -+ -+ pgd = pgd_offset(current->mm, va); /* what is tsk->mm */ -+ if (pgd_none(*pgd) || pgd_bad(*pgd)) { -+ printk(KERN_ERR "[nand_virt_to_phys_add] ERROR, va=0x%x, pgd invalid! \n", va); -+ return 0; -+ } -+ -+ pmd = pmd_offset((pud_t *)pgd, va); -+ if (pmd_none(*pmd) || pmd_bad(*pmd)) { -+ printk(KERN_ERR "[nand_virt_to_phys_add] ERROR, va=0x%x, pmd invalid! \n", va); -+ return 0; -+ } -+ -+ pte = pte_offset_map(pmd, va); -+ if (pte_present(*pte)) { -+ pa = (pte_val(*pte) & (PAGE_MASK)) | pageOffset; -+ return pa; -+ } -+ -+ printk(KERN_ERR "[nand_virt_to_phys_add] ERROR va=0x%x, pte invalid! \n", va); -+ return 0; -+} -+EXPORT_SYMBOL(nand_virt_to_phys_add); -+ -+bool -+get_device_info(u16 id, u32 ext_id, flashdev_info * pdevinfo) -+{ -+ u32 index; -+ for (index = 0; gen_FlashTable[index].id != 0; index++) { -+ if (id == gen_FlashTable[index].id && ext_id == gen_FlashTable[index].ext_id) { -+ pdevinfo->id = gen_FlashTable[index].id; -+ pdevinfo->ext_id = gen_FlashTable[index].ext_id; -+ pdevinfo->blocksize = gen_FlashTable[index].blocksize; -+ pdevinfo->addr_cycle = gen_FlashTable[index].addr_cycle; -+ pdevinfo->iowidth = gen_FlashTable[index].iowidth; -+ pdevinfo->timmingsetting = gen_FlashTable[index].timmingsetting; -+ pdevinfo->advancedmode = gen_FlashTable[index].advancedmode; -+ pdevinfo->pagesize = gen_FlashTable[index].pagesize; -+ pdevinfo->sparesize = gen_FlashTable[index].sparesize; -+ pdevinfo->totalsize = gen_FlashTable[index].totalsize; -+ memcpy(pdevinfo->devciename, gen_FlashTable[index].devciename, sizeof(pdevinfo->devciename)); -+ printk(KERN_INFO "Device found in MTK table, ID: %x, EXT_ID: %x\n", id, ext_id); -+ -+ goto find; -+ } -+ } -+ -+find: -+ if (0 == pdevinfo->id) { -+ printk(KERN_INFO "Device not found, ID: %x\n", id); -+ return false; -+ } else { -+ return true; -+ } -+} -+ -+static void -+ECC_Config(struct mtk_nand_host_hw *hw,u32 ecc_bit) -+{ -+ u32 u4ENCODESize; -+ u32 u4DECODESize; -+ u32 ecc_bit_cfg = ECC_CNFG_ECC4; -+ -+ switch(ecc_bit){ -+ case 4: -+ ecc_bit_cfg = ECC_CNFG_ECC4; -+ break; -+ case 8: -+ ecc_bit_cfg = ECC_CNFG_ECC8; -+ break; -+ case 10: -+ ecc_bit_cfg = ECC_CNFG_ECC10; -+ break; -+ case 12: -+ ecc_bit_cfg = ECC_CNFG_ECC12; -+ break; -+ default: -+ break; -+ } -+ DRV_WriteReg16(ECC_DECCON_REG16, DEC_DE); -+ do { -+ } while (!DRV_Reg16(ECC_DECIDLE_REG16)); -+ -+ DRV_WriteReg16(ECC_ENCCON_REG16, ENC_DE); -+ do { -+ } while (!DRV_Reg32(ECC_ENCIDLE_REG32)); -+ -+ /* setup FDM register base */ -+ DRV_WriteReg32(ECC_FDMADDR_REG32, NFI_FDM0L_REG32); -+ -+ /* Sector + FDM */ -+ u4ENCODESize = (hw->nand_sec_size + 8) << 3; -+ /* Sector + FDM + YAFFS2 meta data bits */ -+ u4DECODESize = ((hw->nand_sec_size + 8) << 3) + ecc_bit * 13; -+ -+ /* configure ECC decoder && encoder */ -+ DRV_WriteReg32(ECC_DECCNFG_REG32, ecc_bit_cfg | DEC_CNFG_NFI | DEC_CNFG_EMPTY_EN | (u4DECODESize << DEC_CNFG_CODE_SHIFT)); -+ -+ DRV_WriteReg32(ECC_ENCCNFG_REG32, ecc_bit_cfg | ENC_CNFG_NFI | (u4ENCODESize << ENC_CNFG_MSG_SHIFT)); -+ NFI_SET_REG32(ECC_DECCNFG_REG32, DEC_CNFG_EL); -+} -+ -+static void -+ECC_Decode_Start(void) -+{ -+ while (!(DRV_Reg16(ECC_DECIDLE_REG16) & DEC_IDLE)) -+ ; -+ DRV_WriteReg16(ECC_DECCON_REG16, DEC_EN); -+} -+ -+static void -+ECC_Decode_End(void) -+{ -+ while (!(DRV_Reg16(ECC_DECIDLE_REG16) & DEC_IDLE)) -+ ; -+ DRV_WriteReg16(ECC_DECCON_REG16, DEC_DE); -+} -+ -+static void -+ECC_Encode_Start(void) -+{ -+ while (!(DRV_Reg32(ECC_ENCIDLE_REG32) & ENC_IDLE)) -+ ; -+ mb(); -+ DRV_WriteReg16(ECC_ENCCON_REG16, ENC_EN); -+} -+ -+static void -+ECC_Encode_End(void) -+{ -+ /* wait for device returning idle */ -+ while (!(DRV_Reg32(ECC_ENCIDLE_REG32) & ENC_IDLE)) ; -+ mb(); -+ DRV_WriteReg16(ECC_ENCCON_REG16, ENC_DE); -+} -+ -+static bool -+mtk_nand_check_bch_error(struct mtd_info *mtd, u8 * pDataBuf, u32 u4SecIndex, u32 u4PageAddr) -+{ -+ bool bRet = true; -+ u16 u2SectorDoneMask = 1 << u4SecIndex; -+ u32 u4ErrorNumDebug, i, u4ErrNum; -+ u32 timeout = 0xFFFF; -+ // int el; -+ u32 au4ErrBitLoc[6]; -+ u32 u4ErrByteLoc, u4BitOffset; -+ u32 u4ErrBitLoc1th, u4ErrBitLoc2nd; -+ -+ //4 // Wait for Decode Done -+ while (0 == (u2SectorDoneMask & DRV_Reg16(ECC_DECDONE_REG16))) { -+ timeout--; -+ if (0 == timeout) -+ return false; -+ } -+ /* We will manually correct the error bits in the last sector, not all the sectors of the page! */ -+ memset(au4ErrBitLoc, 0x0, sizeof(au4ErrBitLoc)); -+ u4ErrorNumDebug = DRV_Reg32(ECC_DECENUM_REG32); -+ u4ErrNum = DRV_Reg32(ECC_DECENUM_REG32) >> (u4SecIndex << 2); -+ u4ErrNum &= 0xF; -+ -+ if (u4ErrNum) { -+ if (0xF == u4ErrNum) { -+ mtd->ecc_stats.failed++; -+ bRet = false; -+ //printk(KERN_ERR"UnCorrectable at PageAddr=%d\n", u4PageAddr); -+ } else { -+ for (i = 0; i < ((u4ErrNum + 1) >> 1); ++i) { -+ au4ErrBitLoc[i] = DRV_Reg32(ECC_DECEL0_REG32 + i); -+ u4ErrBitLoc1th = au4ErrBitLoc[i] & 0x1FFF; -+ if (u4ErrBitLoc1th < 0x1000) { -+ u4ErrByteLoc = u4ErrBitLoc1th / 8; -+ u4BitOffset = u4ErrBitLoc1th % 8; -+ pDataBuf[u4ErrByteLoc] = pDataBuf[u4ErrByteLoc] ^ (1 << u4BitOffset); -+ mtd->ecc_stats.corrected++; -+ } else { -+ mtd->ecc_stats.failed++; -+ } -+ u4ErrBitLoc2nd = (au4ErrBitLoc[i] >> 16) & 0x1FFF; -+ if (0 != u4ErrBitLoc2nd) { -+ if (u4ErrBitLoc2nd < 0x1000) { -+ u4ErrByteLoc = u4ErrBitLoc2nd / 8; -+ u4BitOffset = u4ErrBitLoc2nd % 8; -+ pDataBuf[u4ErrByteLoc] = pDataBuf[u4ErrByteLoc] ^ (1 << u4BitOffset); -+ mtd->ecc_stats.corrected++; -+ } else { -+ mtd->ecc_stats.failed++; -+ //printk(KERN_ERR"UnCorrectable High ErrLoc=%d\n", au4ErrBitLoc[i]); -+ } -+ } -+ } -+ } -+ if (0 == (DRV_Reg16(ECC_DECFER_REG16) & (1 << u4SecIndex))) -+ bRet = false; -+ } -+ return bRet; -+} -+ -+static bool -+mtk_nand_RFIFOValidSize(u16 u2Size) -+{ -+ u32 timeout = 0xFFFF; -+ while (FIFO_RD_REMAIN(DRV_Reg16(NFI_FIFOSTA_REG16)) < u2Size) { -+ timeout--; -+ if (0 == timeout) -+ return false; -+ } -+ return true; -+} -+ -+static bool -+mtk_nand_WFIFOValidSize(u16 u2Size) -+{ -+ u32 timeout = 0xFFFF; -+ -+ while (FIFO_WR_REMAIN(DRV_Reg16(NFI_FIFOSTA_REG16)) > u2Size) { -+ timeout--; -+ if (0 == timeout) -+ return false; -+ } -+ return true; -+} -+ -+static bool -+mtk_nand_status_ready(u32 u4Status) -+{ -+ u32 timeout = 0xFFFF; -+ -+ while ((DRV_Reg32(NFI_STA_REG32) & u4Status) != 0) { -+ timeout--; -+ if (0 == timeout) -+ return false; -+ } -+ return true; -+} -+ -+static bool -+mtk_nand_reset(void) -+{ -+ int timeout = 0xFFFF; -+ if (DRV_Reg16(NFI_MASTERSTA_REG16)) { -+ mb(); -+ DRV_WriteReg16(NFI_CON_REG16, CON_FIFO_FLUSH | CON_NFI_RST); -+ while (DRV_Reg16(NFI_MASTERSTA_REG16)) { -+ timeout--; -+ if (!timeout) -+ MSG(INIT, "Wait for NFI_MASTERSTA timeout\n"); -+ } -+ } -+ /* issue reset operation */ -+ mb(); -+ DRV_WriteReg16(NFI_CON_REG16, CON_FIFO_FLUSH | CON_NFI_RST); -+ -+ return mtk_nand_status_ready(STA_NFI_FSM_MASK | STA_NAND_BUSY) && mtk_nand_RFIFOValidSize(0) && mtk_nand_WFIFOValidSize(0); -+} -+ -+static void -+mtk_nand_set_mode(u16 u2OpMode) -+{ -+ u16 u2Mode = DRV_Reg16(NFI_CNFG_REG16); -+ u2Mode &= ~CNFG_OP_MODE_MASK; -+ u2Mode |= u2OpMode; -+ DRV_WriteReg16(NFI_CNFG_REG16, u2Mode); -+} -+ -+static void -+mtk_nand_set_autoformat(bool bEnable) -+{ -+ if (bEnable) -+ NFI_SET_REG16(NFI_CNFG_REG16, CNFG_AUTO_FMT_EN); -+ else -+ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AUTO_FMT_EN); -+} -+ -+static void -+mtk_nand_configure_fdm(u16 u2FDMSize) -+{ -+ NFI_CLN_REG16(NFI_PAGEFMT_REG16, PAGEFMT_FDM_MASK | PAGEFMT_FDM_ECC_MASK); -+ NFI_SET_REG16(NFI_PAGEFMT_REG16, u2FDMSize << PAGEFMT_FDM_SHIFT); -+ NFI_SET_REG16(NFI_PAGEFMT_REG16, u2FDMSize << PAGEFMT_FDM_ECC_SHIFT); -+} -+ -+static void -+mtk_nand_configure_lock(void) -+{ -+ u32 u4WriteColNOB = 2; -+ u32 u4WriteRowNOB = 3; -+ u32 u4EraseColNOB = 0; -+ u32 u4EraseRowNOB = 3; -+ DRV_WriteReg16(NFI_LOCKANOB_REG16, -+ (u4WriteColNOB << PROG_CADD_NOB_SHIFT) | (u4WriteRowNOB << PROG_RADD_NOB_SHIFT) | (u4EraseColNOB << ERASE_CADD_NOB_SHIFT) | (u4EraseRowNOB << ERASE_RADD_NOB_SHIFT)); -+ -+ if (CHIPVER_ECO_1 == g_u4ChipVer) { -+ int i; -+ for (i = 0; i < 16; ++i) { -+ DRV_WriteReg32(NFI_LOCK00ADD_REG32 + (i << 1), 0xFFFFFFFF); -+ DRV_WriteReg32(NFI_LOCK00FMT_REG32 + (i << 1), 0xFFFFFFFF); -+ } -+ //DRV_WriteReg16(NFI_LOCKANOB_REG16, 0x0); -+ DRV_WriteReg32(NFI_LOCKCON_REG32, 0xFFFFFFFF); -+ DRV_WriteReg16(NFI_LOCK_REG16, NFI_LOCK_ON); -+ } -+} -+ -+static bool -+mtk_nand_pio_ready(void) -+{ -+ int count = 0; -+ while (!(DRV_Reg16(NFI_PIO_DIRDY_REG16) & 1)) { -+ count++; -+ if (count > 0xffff) { -+ printk("PIO_DIRDY timeout\n"); -+ return false; -+ } -+ } -+ -+ return true; -+} -+ -+static bool -+mtk_nand_set_command(u16 command) -+{ -+ mb(); -+ DRV_WriteReg16(NFI_CMD_REG16, command); -+ return mtk_nand_status_ready(STA_CMD_STATE); -+} -+ -+static bool -+mtk_nand_set_address(u32 u4ColAddr, u32 u4RowAddr, u16 u2ColNOB, u16 u2RowNOB) -+{ -+ mb(); -+ DRV_WriteReg32(NFI_COLADDR_REG32, u4ColAddr); -+ DRV_WriteReg32(NFI_ROWADDR_REG32, u4RowAddr); -+ DRV_WriteReg16(NFI_ADDRNOB_REG16, u2ColNOB | (u2RowNOB << ADDR_ROW_NOB_SHIFT)); -+ return mtk_nand_status_ready(STA_ADDR_STATE); -+} -+ -+static bool -+mtk_nand_check_RW_count(u16 u2WriteSize) -+{ -+ u32 timeout = 0xFFFF; -+ u16 u2SecNum = u2WriteSize >> 9; -+ -+ while (ADDRCNTR_CNTR(DRV_Reg16(NFI_ADDRCNTR_REG16)) < u2SecNum) { -+ timeout--; -+ if (0 == timeout) { -+ printk(KERN_INFO "[%s] timeout\n", __FUNCTION__); -+ return false; -+ } -+ } -+ return true; -+} -+ -+static bool -+mtk_nand_ready_for_read(struct nand_chip *nand, u32 u4RowAddr, u32 u4ColAddr, bool full, u8 * buf) -+{ -+ /* Reset NFI HW internal state machine and flush NFI in/out FIFO */ -+ bool bRet = false; -+ u16 sec_num = 1 << (nand->page_shift - 9); -+ u32 col_addr = u4ColAddr; -+ u32 colnob = 2, rownob = devinfo.addr_cycle - 2; -+ if (nand->options & NAND_BUSWIDTH_16) -+ col_addr /= 2; -+ -+ if (!mtk_nand_reset()) -+ goto cleanup; -+ if (g_bHwEcc) { -+ NFI_SET_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN); -+ } else { -+ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN); -+ } -+ -+ mtk_nand_set_mode(CNFG_OP_READ); -+ NFI_SET_REG16(NFI_CNFG_REG16, CNFG_READ_EN); -+ DRV_WriteReg16(NFI_CON_REG16, sec_num << CON_NFI_SEC_SHIFT); -+ -+ if (full) { -+ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB); -+ -+ if (g_bHwEcc) -+ NFI_SET_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN); -+ else -+ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN); -+ } else { -+ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN); -+ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB); -+ } -+ -+ mtk_nand_set_autoformat(full); -+ if (full) -+ if (g_bHwEcc) -+ ECC_Decode_Start(); -+ if (!mtk_nand_set_command(NAND_CMD_READ0)) -+ goto cleanup; -+ if (!mtk_nand_set_address(col_addr, u4RowAddr, colnob, rownob)) -+ goto cleanup; -+ if (!mtk_nand_set_command(NAND_CMD_READSTART)) -+ goto cleanup; -+ if (!mtk_nand_status_ready(STA_NAND_BUSY)) -+ goto cleanup; -+ -+ bRet = true; -+ -+cleanup: -+ return bRet; -+} -+ -+static bool -+mtk_nand_ready_for_write(struct nand_chip *nand, u32 u4RowAddr, u32 col_addr, bool full, u8 * buf) -+{ -+ bool bRet = false; -+ u32 sec_num = 1 << (nand->page_shift - 9); -+ u32 colnob = 2, rownob = devinfo.addr_cycle - 2; -+ if (nand->options & NAND_BUSWIDTH_16) -+ col_addr /= 2; -+ -+ /* Reset NFI HW internal state machine and flush NFI in/out FIFO */ -+ if (!mtk_nand_reset()) -+ return false; -+ -+ mtk_nand_set_mode(CNFG_OP_PRGM); -+ -+ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_READ_EN); -+ -+ DRV_WriteReg16(NFI_CON_REG16, sec_num << CON_NFI_SEC_SHIFT); -+ -+ if (full) { -+ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB); -+ if (g_bHwEcc) -+ NFI_SET_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN); -+ else -+ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN); -+ } else { -+ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN); -+ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB); -+ } -+ -+ mtk_nand_set_autoformat(full); -+ -+ if (full) -+ if (g_bHwEcc) -+ ECC_Encode_Start(); -+ -+ if (!mtk_nand_set_command(NAND_CMD_SEQIN)) -+ goto cleanup; -+ //1 FIXED ME: For Any Kind of AddrCycle -+ if (!mtk_nand_set_address(col_addr, u4RowAddr, colnob, rownob)) -+ goto cleanup; -+ -+ if (!mtk_nand_status_ready(STA_NAND_BUSY)) -+ goto cleanup; -+ -+ bRet = true; -+ -+cleanup: -+ return bRet; -+} -+ -+static bool -+mtk_nand_check_dececc_done(u32 u4SecNum) -+{ -+ u32 timeout, dec_mask; -+ -+ timeout = 0xffff; -+ dec_mask = (1 << u4SecNum) - 1; -+ while ((dec_mask != DRV_Reg(ECC_DECDONE_REG16)) && timeout > 0) -+ timeout--; -+ if (timeout == 0) { -+ MSG(VERIFY, "ECC_DECDONE: timeout\n"); -+ return false; -+ } -+ return true; -+} -+ -+static bool -+mtk_nand_mcu_read_data(u8 * buf, u32 length) -+{ -+ int timeout = 0xffff; -+ u32 i; -+ u32 *buf32 = (u32 *) buf; -+ if ((u32) buf % 4 || length % 4) -+ NFI_SET_REG16(NFI_CNFG_REG16, CNFG_BYTE_RW); -+ else -+ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_BYTE_RW); -+ -+ //DRV_WriteReg32(NFI_STRADDR_REG32, 0); -+ mb(); -+ NFI_SET_REG16(NFI_CON_REG16, CON_NFI_BRD); -+ -+ if ((u32) buf % 4 || length % 4) { -+ for (i = 0; (i < (length)) && (timeout > 0);) { -+ if (DRV_Reg16(NFI_PIO_DIRDY_REG16) & 1) { -+ *buf++ = (u8) DRV_Reg32(NFI_DATAR_REG32); -+ i++; -+ } else { -+ timeout--; -+ } -+ if (0 == timeout) { -+ printk(KERN_ERR "[%s] timeout\n", __FUNCTION__); -+ dump_nfi(); -+ return false; -+ } -+ } -+ } else { -+ for (i = 0; (i < (length >> 2)) && (timeout > 0);) { -+ if (DRV_Reg16(NFI_PIO_DIRDY_REG16) & 1) { -+ *buf32++ = DRV_Reg32(NFI_DATAR_REG32); -+ i++; -+ } else { -+ timeout--; -+ } -+ if (0 == timeout) { -+ printk(KERN_ERR "[%s] timeout\n", __FUNCTION__); -+ dump_nfi(); -+ return false; -+ } -+ } -+ } -+ return true; -+} -+ -+static bool -+mtk_nand_read_page_data(struct mtd_info *mtd, u8 * pDataBuf, u32 u4Size) -+{ -+ return mtk_nand_mcu_read_data(pDataBuf, u4Size); -+} -+ -+static bool -+mtk_nand_mcu_write_data(struct mtd_info *mtd, const u8 * buf, u32 length) -+{ -+ u32 timeout = 0xFFFF; -+ u32 i; -+ u32 *pBuf32; -+ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_BYTE_RW); -+ mb(); -+ NFI_SET_REG16(NFI_CON_REG16, CON_NFI_BWR); -+ pBuf32 = (u32 *) buf; -+ -+ if ((u32) buf % 4 || length % 4) -+ NFI_SET_REG16(NFI_CNFG_REG16, CNFG_BYTE_RW); -+ else -+ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_BYTE_RW); -+ -+ if ((u32) buf % 4 || length % 4) { -+ for (i = 0; (i < (length)) && (timeout > 0);) { -+ if (DRV_Reg16(NFI_PIO_DIRDY_REG16) & 1) { -+ DRV_WriteReg32(NFI_DATAW_REG32, *buf++); -+ i++; -+ } else { -+ timeout--; -+ } -+ if (0 == timeout) { -+ printk(KERN_ERR "[%s] timeout\n", __FUNCTION__); -+ dump_nfi(); -+ return false; -+ } -+ } -+ } else { -+ for (i = 0; (i < (length >> 2)) && (timeout > 0);) { -+ if (DRV_Reg16(NFI_PIO_DIRDY_REG16) & 1) { -+ DRV_WriteReg32(NFI_DATAW_REG32, *pBuf32++); -+ i++; -+ } else { -+ timeout--; -+ } -+ if (0 == timeout) { -+ printk(KERN_ERR "[%s] timeout\n", __FUNCTION__); -+ dump_nfi(); -+ return false; -+ } -+ } -+ } -+ -+ return true; -+} -+ -+static bool -+mtk_nand_write_page_data(struct mtd_info *mtd, u8 * buf, u32 size) -+{ -+ return mtk_nand_mcu_write_data(mtd, buf, size); -+} -+ -+static void -+mtk_nand_read_fdm_data(u8 * pDataBuf, u32 u4SecNum) -+{ -+ u32 i; -+ u32 *pBuf32 = (u32 *) pDataBuf; -+ -+ if (pBuf32) { -+ for (i = 0; i < u4SecNum; ++i) { -+ *pBuf32++ = DRV_Reg32(NFI_FDM0L_REG32 + (i << 1)); -+ *pBuf32++ = DRV_Reg32(NFI_FDM0M_REG32 + (i << 1)); -+ } -+ } -+} -+ -+static u8 fdm_buf[64]; -+static void -+mtk_nand_write_fdm_data(struct nand_chip *chip, u8 * pDataBuf, u32 u4SecNum) -+{ -+ u32 i, j; -+ u8 checksum = 0; -+ bool empty = true; -+ struct nand_oobfree *free_entry; -+ u32 *pBuf32; -+ -+ memcpy(fdm_buf, pDataBuf, u4SecNum * 8); -+ -+ free_entry = chip->ecc.layout->oobfree; -+ for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES && free_entry[i].length; i++) { -+ for (j = 0; j < free_entry[i].length; j++) { -+ if (pDataBuf[free_entry[i].offset + j] != 0xFF) -+ empty = false; -+ checksum ^= pDataBuf[free_entry[i].offset + j]; -+ } -+ } -+ -+ if (!empty) { -+ fdm_buf[free_entry[i - 1].offset + free_entry[i - 1].length] = checksum; -+ } -+ -+ pBuf32 = (u32 *) fdm_buf; -+ for (i = 0; i < u4SecNum; ++i) { -+ DRV_WriteReg32(NFI_FDM0L_REG32 + (i << 1), *pBuf32++); -+ DRV_WriteReg32(NFI_FDM0M_REG32 + (i << 1), *pBuf32++); -+ } -+} -+ -+static void -+mtk_nand_stop_read(void) -+{ -+ NFI_CLN_REG16(NFI_CON_REG16, CON_NFI_BRD); -+ mtk_nand_reset(); -+ if (g_bHwEcc) -+ ECC_Decode_End(); -+ DRV_WriteReg16(NFI_INTR_EN_REG16, 0); -+} -+ -+static void -+mtk_nand_stop_write(void) -+{ -+ NFI_CLN_REG16(NFI_CON_REG16, CON_NFI_BWR); -+ if (g_bHwEcc) -+ ECC_Encode_End(); -+ DRV_WriteReg16(NFI_INTR_EN_REG16, 0); -+} -+ -+bool -+mtk_nand_exec_read_page(struct mtd_info *mtd, u32 u4RowAddr, u32 u4PageSize, u8 * pPageBuf, u8 * pFDMBuf) -+{ -+ u8 *buf; -+ bool bRet = true; -+ struct nand_chip *nand = mtd->priv; -+ u32 u4SecNum = u4PageSize >> 9; -+ -+ if (((u32) pPageBuf % 16) && local_buffer_16_align) -+ buf = local_buffer_16_align; -+ else -+ buf = pPageBuf; -+ if (mtk_nand_ready_for_read(nand, u4RowAddr, 0, true, buf)) { -+ int j; -+ for (j = 0 ; j < u4SecNum; j++) { -+ if (!mtk_nand_read_page_data(mtd, buf+j*512, 512)) -+ bRet = false; -+ if(g_bHwEcc && !mtk_nand_check_dececc_done(j+1)) -+ bRet = false; -+ if(g_bHwEcc && !mtk_nand_check_bch_error(mtd, buf+j*512, j, u4RowAddr)) -+ bRet = false; -+ } -+ if (!mtk_nand_status_ready(STA_NAND_BUSY)) -+ bRet = false; -+ -+ mtk_nand_read_fdm_data(pFDMBuf, u4SecNum); -+ mtk_nand_stop_read(); -+ } -+ -+ if (buf == local_buffer_16_align) -+ memcpy(pPageBuf, buf, u4PageSize); -+ -+ return bRet; -+} -+ -+int -+mtk_nand_exec_write_page(struct mtd_info *mtd, u32 u4RowAddr, u32 u4PageSize, u8 * pPageBuf, u8 * pFDMBuf) -+{ -+ struct nand_chip *chip = mtd->priv; -+ u32 u4SecNum = u4PageSize >> 9; -+ u8 *buf; -+ u8 status; -+ -+ MSG(WRITE, "mtk_nand_exec_write_page, page: 0x%x\n", u4RowAddr); -+ -+ if (((u32) pPageBuf % 16) && local_buffer_16_align) { -+ printk(KERN_INFO "Data buffer not 16 bytes aligned: %p\n", pPageBuf); -+ memcpy(local_buffer_16_align, pPageBuf, mtd->writesize); -+ buf = local_buffer_16_align; -+ } else -+ buf = pPageBuf; -+ -+ if (mtk_nand_ready_for_write(chip, u4RowAddr, 0, true, buf)) { -+ mtk_nand_write_fdm_data(chip, pFDMBuf, u4SecNum); -+ (void)mtk_nand_write_page_data(mtd, buf, u4PageSize); -+ (void)mtk_nand_check_RW_count(u4PageSize); -+ mtk_nand_stop_write(); -+ (void)mtk_nand_set_command(NAND_CMD_PAGEPROG); -+ while (DRV_Reg32(NFI_STA_REG32) & STA_NAND_BUSY) ; -+ } -+ -+ status = chip->waitfunc(mtd, chip); -+ if (status & NAND_STATUS_FAIL) -+ return -EIO; -+ return 0; -+} -+ -+static int -+get_start_end_block(struct mtd_info *mtd, int block, int *start_blk, int *end_blk) -+{ -+ struct nand_chip *chip = mtd->priv; -+ int i; -+ -+ *start_blk = 0; -+ for (i = 0; i <= part_num; i++) -+ { -+ if (i == part_num) -+ { -+ // try the last reset partition -+ *end_blk = (chip->chipsize >> chip->phys_erase_shift) - 1; -+ if (*start_blk <= *end_blk) -+ { -+ if ((block >= *start_blk) && (block <= *end_blk)) -+ break; -+ } -+ } -+ // skip All partition entry -+ else if (g_pasStatic_Partition[i].size == MTDPART_SIZ_FULL) -+ { -+ continue; -+ } -+ *end_blk = *start_blk + (g_pasStatic_Partition[i].size >> chip->phys_erase_shift) - 1; -+ if ((block >= *start_blk) && (block <= *end_blk)) -+ break; -+ *start_blk = *end_blk + 1; -+ } -+ if (*start_blk > *end_blk) -+ { -+ return -1; -+ } -+ return 0; -+} -+ -+static int -+block_remap(struct mtd_info *mtd, int block) -+{ -+ struct nand_chip *chip = mtd->priv; -+ int start_blk, end_blk; -+ int j, block_offset; -+ int bad_block = 0; -+ -+ if (chip->bbt == NULL) { -+ printk("ERROR!! no bbt table for block_remap\n"); -+ return -1; -+ } -+ -+ if (get_start_end_block(mtd, block, &start_blk, &end_blk) < 0) { -+ printk("ERROR!! can not find start_blk and end_blk\n"); -+ return -1; -+ } -+ -+ block_offset = block - start_blk; -+ for (j = start_blk; j <= end_blk;j++) { -+ if (((chip->bbt[j >> 2] >> ((j<<1) & 0x6)) & 0x3) == 0x0) { -+ if (!block_offset) -+ break; -+ block_offset--; -+ } else { -+ bad_block++; -+ } -+ } -+ if (j <= end_blk) { -+ return j; -+ } else { -+ // remap to the bad block -+ for (j = end_blk; bad_block > 0; j--) -+ { -+ if (((chip->bbt[j >> 2] >> ((j<<1) & 0x6)) & 0x3) != 0x0) -+ { -+ bad_block--; -+ if (bad_block <= block_offset) -+ return j; -+ } -+ } -+ } -+ -+ printk("Error!! block_remap error\n"); -+ return -1; -+} -+ -+int -+check_block_remap(struct mtd_info *mtd, int block) -+{ -+ if (shift_on_bbt) -+ return block_remap(mtd, block); -+ else -+ return block; -+} -+EXPORT_SYMBOL(check_block_remap); -+ -+ -+static int -+write_next_on_fail(struct mtd_info *mtd, char *write_buf, int page, int * to_blk) -+{ -+ struct nand_chip *chip = mtd->priv; -+ int i, j, to_page = 0, first_page; -+ char *buf, *oob; -+ int start_blk = 0, end_blk; -+ int mapped_block; -+ int page_per_block_bit = chip->phys_erase_shift - chip->page_shift; -+ int block = page >> page_per_block_bit; -+ -+ // find next available block in the same MTD partition -+ mapped_block = block_remap(mtd, block); -+ if (mapped_block == -1) -+ return NAND_STATUS_FAIL; -+ -+ get_start_end_block(mtd, block, &start_blk, &end_blk); -+ -+ buf = kzalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL | GFP_DMA); -+ if (buf == NULL) -+ return -1; -+ -+ oob = buf + mtd->writesize; -+ for ((*to_blk) = block + 1; (*to_blk) <= end_blk ; (*to_blk)++) { -+ if (nand_bbt_get(mtd, (*to_blk) << page_per_block_bit) == 0) { -+ int status; -+ status = mtk_nand_erase_hw(mtd, (*to_blk) << page_per_block_bit); -+ if (status & NAND_STATUS_FAIL) { -+ mtk_nand_block_markbad_hw(mtd, (*to_blk) << chip->phys_erase_shift); -+ nand_bbt_set(mtd, (*to_blk) << page_per_block_bit, 0x3); -+ } else { -+ /* good block */ -+ to_page = (*to_blk) << page_per_block_bit; -+ break; -+ } -+ } -+ } -+ -+ if (!to_page) { -+ kfree(buf); -+ return -1; -+ } -+ -+ first_page = (page >> page_per_block_bit) << page_per_block_bit; -+ for (i = 0; i < (1 << page_per_block_bit); i++) { -+ if ((first_page + i) != page) { -+ mtk_nand_read_oob_hw(mtd, chip, (first_page+i)); -+ for (j = 0; j < mtd->oobsize; j++) -+ if (chip->oob_poi[j] != (unsigned char)0xff) -+ break; -+ if (j < mtd->oobsize) { -+ mtk_nand_exec_read_page(mtd, (first_page+i), mtd->writesize, buf, oob); -+ memset(oob, 0xff, mtd->oobsize); -+ if (mtk_nand_exec_write_page(mtd, to_page + i, mtd->writesize, (u8 *)buf, oob) != 0) { -+ int ret, new_blk = 0; -+ nand_bbt_set(mtd, to_page, 0x3); -+ ret = write_next_on_fail(mtd, buf, to_page + i, &new_blk); -+ if (ret) { -+ kfree(buf); -+ mtk_nand_block_markbad_hw(mtd, to_page << chip->page_shift); -+ return ret; -+ } -+ mtk_nand_block_markbad_hw(mtd, to_page << chip->page_shift); -+ *to_blk = new_blk; -+ to_page = ((*to_blk) << page_per_block_bit); -+ } -+ } -+ } else { -+ memset(chip->oob_poi, 0xff, mtd->oobsize); -+ if (mtk_nand_exec_write_page(mtd, to_page + i, mtd->writesize, (u8 *)write_buf, chip->oob_poi) != 0) { -+ int ret, new_blk = 0; -+ nand_bbt_set(mtd, to_page, 0x3); -+ ret = write_next_on_fail(mtd, write_buf, to_page + i, &new_blk); -+ if (ret) { -+ kfree(buf); -+ mtk_nand_block_markbad_hw(mtd, to_page << chip->page_shift); -+ return ret; -+ } -+ mtk_nand_block_markbad_hw(mtd, to_page << chip->page_shift); -+ *to_blk = new_blk; -+ to_page = ((*to_blk) << page_per_block_bit); -+ } -+ } -+ } -+ -+ kfree(buf); -+ -+ return 0; -+} -+ -+static int -+mtk_nand_write_page(struct mtd_info *mtd, struct nand_chip *chip, uint32_t offset, -+ int data_len, const u8 * buf, int oob_required, int page, int cached, int raw) -+{ -+ int page_per_block = 1 << (chip->phys_erase_shift - chip->page_shift); -+ int block = page / page_per_block; -+ u16 page_in_block = page % page_per_block; -+ int mapped_block = block; -+ -+#if defined(MTK_NAND_BMT) -+ mapped_block = get_mapping_block_index(block); -+ // write bad index into oob -+ if (mapped_block != block) -+ set_bad_index_to_oob(chip->oob_poi, block); -+ else -+ set_bad_index_to_oob(chip->oob_poi, FAKE_INDEX); -+#else -+ if (shift_on_bbt) { -+ mapped_block = block_remap(mtd, block); -+ if (mapped_block == -1) -+ return NAND_STATUS_FAIL; -+ if (nand_bbt_get(mtd, mapped_block << (chip->phys_erase_shift - chip->page_shift)) != 0x0) -+ return NAND_STATUS_FAIL; -+ } -+#endif -+ do { -+ if (mtk_nand_exec_write_page(mtd, page_in_block + mapped_block * page_per_block, mtd->writesize, (u8 *)buf, chip->oob_poi)) { -+ MSG(INIT, "write fail at block: 0x%x, page: 0x%x\n", mapped_block, page_in_block); -+#if defined(MTK_NAND_BMT) -+ if (update_bmt((page_in_block + mapped_block * page_per_block) << chip->page_shift, UPDATE_WRITE_FAIL, (u8 *) buf, chip->oob_poi)) { -+ MSG(INIT, "Update BMT success\n"); -+ return 0; -+ } else { -+ MSG(INIT, "Update BMT fail\n"); -+ return -EIO; -+ } -+#else -+ { -+ int new_blk; -+ nand_bbt_set(mtd, page_in_block + mapped_block * page_per_block, 0x3); -+ if (write_next_on_fail(mtd, (char *)buf, page_in_block + mapped_block * page_per_block, &new_blk) != 0) -+ { -+ mtk_nand_block_markbad_hw(mtd, (page_in_block + mapped_block * page_per_block) << chip->page_shift); -+ return NAND_STATUS_FAIL; -+ } -+ mtk_nand_block_markbad_hw(mtd, (page_in_block + mapped_block * page_per_block) << chip->page_shift); -+ break; -+ } -+#endif -+ } else -+ break; -+ } while(1); -+ -+ return 0; -+} -+ -+static void -+mtk_nand_command_bp(struct mtd_info *mtd, unsigned int command, int column, int page_addr) -+{ -+ struct nand_chip *nand = mtd->priv; -+ -+ switch (command) { -+ case NAND_CMD_SEQIN: -+ memset(g_kCMD.au1OOB, 0xFF, sizeof(g_kCMD.au1OOB)); -+ g_kCMD.pDataBuf = NULL; -+ g_kCMD.u4RowAddr = page_addr; -+ g_kCMD.u4ColAddr = column; -+ break; -+ -+ case NAND_CMD_PAGEPROG: -+ if (g_kCMD.pDataBuf || (0xFF != g_kCMD.au1OOB[nand_badblock_offset])) { -+ u8 *pDataBuf = g_kCMD.pDataBuf ? g_kCMD.pDataBuf : nand->buffers->databuf; -+ mtk_nand_exec_write_page(mtd, g_kCMD.u4RowAddr, mtd->writesize, pDataBuf, g_kCMD.au1OOB); -+ g_kCMD.u4RowAddr = (u32) - 1; -+ g_kCMD.u4OOBRowAddr = (u32) - 1; -+ } -+ break; -+ -+ case NAND_CMD_READOOB: -+ g_kCMD.u4RowAddr = page_addr; -+ g_kCMD.u4ColAddr = column + mtd->writesize; -+ break; -+ -+ case NAND_CMD_READ0: -+ g_kCMD.u4RowAddr = page_addr; -+ g_kCMD.u4ColAddr = column; -+ break; -+ -+ case NAND_CMD_ERASE1: -+ nand->state=FL_ERASING; -+ (void)mtk_nand_reset(); -+ mtk_nand_set_mode(CNFG_OP_ERASE); -+ (void)mtk_nand_set_command(NAND_CMD_ERASE1); -+ (void)mtk_nand_set_address(0, page_addr, 0, devinfo.addr_cycle - 2); -+ break; -+ -+ case NAND_CMD_ERASE2: -+ (void)mtk_nand_set_command(NAND_CMD_ERASE2); -+ while (DRV_Reg32(NFI_STA_REG32) & STA_NAND_BUSY) -+ ; -+ break; -+ -+ case NAND_CMD_STATUS: -+ (void)mtk_nand_reset(); -+ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_BYTE_RW); -+ mtk_nand_set_mode(CNFG_OP_SRD); -+ mtk_nand_set_mode(CNFG_READ_EN); -+ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB); -+ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN); -+ (void)mtk_nand_set_command(NAND_CMD_STATUS); -+ NFI_CLN_REG16(NFI_CON_REG16, CON_NFI_NOB_MASK); -+ mb(); -+ DRV_WriteReg16(NFI_CON_REG16, CON_NFI_SRD | (1 << CON_NFI_NOB_SHIFT)); -+ g_bcmdstatus = true; -+ break; -+ -+ case NAND_CMD_RESET: -+ (void)mtk_nand_reset(); -+ DRV_WriteReg16(NFI_INTR_EN_REG16, INTR_RST_DONE_EN); -+ (void)mtk_nand_set_command(NAND_CMD_RESET); -+ DRV_WriteReg16(NFI_BASE+0x44, 0xF1); -+ while(!(DRV_Reg16(NFI_INTR_REG16)&INTR_RST_DONE_EN)) -+ ; -+ break; -+ -+ case NAND_CMD_READID: -+ mtk_nand_reset(); -+ /* Disable HW ECC */ -+ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN); -+ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB); -+ NFI_SET_REG16(NFI_CNFG_REG16, CNFG_READ_EN | CNFG_BYTE_RW); -+ (void)mtk_nand_reset(); -+ mb(); -+ mtk_nand_set_mode(CNFG_OP_SRD); -+ (void)mtk_nand_set_command(NAND_CMD_READID); -+ (void)mtk_nand_set_address(0, 0, 1, 0); -+ DRV_WriteReg16(NFI_CON_REG16, CON_NFI_SRD); -+ while (DRV_Reg32(NFI_STA_REG32) & STA_DATAR_STATE) -+ ; -+ break; -+ -+ default: -+ BUG(); -+ break; -+ } -+} -+ -+static void -+mtk_nand_select_chip(struct mtd_info *mtd, int chip) -+{ -+ if ((chip == -1) && (false == g_bInitDone)) { -+ struct nand_chip *nand = mtd->priv; -+ struct mtk_nand_host *host = nand->priv; -+ struct mtk_nand_host_hw *hw = host->hw; -+ u32 spare_per_sector = mtd->oobsize / (mtd->writesize / 512); -+ u32 ecc_bit = 4; -+ u32 spare_bit = PAGEFMT_SPARE_16; -+ -+ if (spare_per_sector >= 28) { -+ spare_bit = PAGEFMT_SPARE_28; -+ ecc_bit = 12; -+ spare_per_sector = 28; -+ } else if (spare_per_sector >= 27) { -+ spare_bit = PAGEFMT_SPARE_27; -+ ecc_bit = 8; -+ spare_per_sector = 27; -+ } else if (spare_per_sector >= 26) { -+ spare_bit = PAGEFMT_SPARE_26; -+ ecc_bit = 8; -+ spare_per_sector = 26; -+ } else if (spare_per_sector >= 16) { -+ spare_bit = PAGEFMT_SPARE_16; -+ ecc_bit = 4; -+ spare_per_sector = 16; -+ } else { -+ MSG(INIT, "[NAND]: NFI not support oobsize: %x\n", spare_per_sector); -+ ASSERT(0); -+ } -+ mtd->oobsize = spare_per_sector*(mtd->writesize/512); -+ MSG(INIT, "[NAND]select ecc bit:%d, sparesize :%d spare_per_sector=%d\n",ecc_bit,mtd->oobsize,spare_per_sector); -+ /* Setup PageFormat */ -+ if (4096 == mtd->writesize) { -+ NFI_SET_REG16(NFI_PAGEFMT_REG16, (spare_bit << PAGEFMT_SPARE_SHIFT) | PAGEFMT_4K); -+ nand->cmdfunc = mtk_nand_command_bp; -+ } else if (2048 == mtd->writesize) { -+ NFI_SET_REG16(NFI_PAGEFMT_REG16, (spare_bit << PAGEFMT_SPARE_SHIFT) | PAGEFMT_2K); -+ nand->cmdfunc = mtk_nand_command_bp; -+ } -+ ECC_Config(hw,ecc_bit); -+ g_bInitDone = true; -+ } -+ switch (chip) { -+ case -1: -+ break; -+ case 0: -+ case 1: -+ /* Jun Shen, 2011.04.13 */ -+ /* Note: MT6577 EVB NAND is mounted on CS0, but FPGA is CS1 */ -+ DRV_WriteReg16(NFI_CSEL_REG16, chip); -+ /* Jun Shen, 2011.04.13 */ -+ break; -+ } -+} -+ -+static uint8_t -+mtk_nand_read_byte(struct mtd_info *mtd) -+{ -+ uint8_t retval = 0; -+ -+ if (!mtk_nand_pio_ready()) { -+ printk("pio ready timeout\n"); -+ retval = false; -+ } -+ -+ if (g_bcmdstatus) { -+ retval = DRV_Reg8(NFI_DATAR_REG32); -+ NFI_CLN_REG16(NFI_CON_REG16, CON_NFI_NOB_MASK); -+ mtk_nand_reset(); -+ if (g_bHwEcc) { -+ NFI_SET_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN); -+ } else { -+ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN); -+ } -+ g_bcmdstatus = false; -+ } else -+ retval = DRV_Reg8(NFI_DATAR_REG32); -+ -+ return retval; -+} -+ -+static void -+mtk_nand_read_buf(struct mtd_info *mtd, uint8_t * buf, int len) -+{ -+ struct nand_chip *nand = (struct nand_chip *)mtd->priv; -+ struct NAND_CMD *pkCMD = &g_kCMD; -+ u32 u4ColAddr = pkCMD->u4ColAddr; -+ u32 u4PageSize = mtd->writesize; -+ -+ if (u4ColAddr < u4PageSize) { -+ if ((u4ColAddr == 0) && (len >= u4PageSize)) { -+ mtk_nand_exec_read_page(mtd, pkCMD->u4RowAddr, u4PageSize, buf, pkCMD->au1OOB); -+ if (len > u4PageSize) { -+ u32 u4Size = min(len - u4PageSize, sizeof(pkCMD->au1OOB)); -+ memcpy(buf + u4PageSize, pkCMD->au1OOB, u4Size); -+ } -+ } else { -+ mtk_nand_exec_read_page(mtd, pkCMD->u4RowAddr, u4PageSize, nand->buffers->databuf, pkCMD->au1OOB); -+ memcpy(buf, nand->buffers->databuf + u4ColAddr, len); -+ } -+ pkCMD->u4OOBRowAddr = pkCMD->u4RowAddr; -+ } else { -+ u32 u4Offset = u4ColAddr - u4PageSize; -+ u32 u4Size = min(len - u4Offset, sizeof(pkCMD->au1OOB)); -+ if (pkCMD->u4OOBRowAddr != pkCMD->u4RowAddr) { -+ mtk_nand_exec_read_page(mtd, pkCMD->u4RowAddr, u4PageSize, nand->buffers->databuf, pkCMD->au1OOB); -+ pkCMD->u4OOBRowAddr = pkCMD->u4RowAddr; -+ } -+ memcpy(buf, pkCMD->au1OOB + u4Offset, u4Size); -+ } -+ pkCMD->u4ColAddr += len; -+} -+ -+static void -+mtk_nand_write_buf(struct mtd_info *mtd, const uint8_t * buf, int len) -+{ -+ struct NAND_CMD *pkCMD = &g_kCMD; -+ u32 u4ColAddr = pkCMD->u4ColAddr; -+ u32 u4PageSize = mtd->writesize; -+ int i4Size, i; -+ -+ if (u4ColAddr >= u4PageSize) { -+ u32 u4Offset = u4ColAddr - u4PageSize; -+ u8 *pOOB = pkCMD->au1OOB + u4Offset; -+ i4Size = min(len, (int)(sizeof(pkCMD->au1OOB) - u4Offset)); -+ for (i = 0; i < i4Size; i++) { -+ pOOB[i] &= buf[i]; -+ } -+ } else { -+ pkCMD->pDataBuf = (u8 *) buf; -+ } -+ -+ pkCMD->u4ColAddr += len; -+} -+ -+static int -+mtk_nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, const uint8_t * buf, int oob_required) -+{ -+ mtk_nand_write_buf(mtd, buf, mtd->writesize); -+ mtk_nand_write_buf(mtd, chip->oob_poi, mtd->oobsize); -+ return 0; -+} -+ -+static int -+mtk_nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, uint8_t * buf, int oob_required, int page) -+{ -+ struct NAND_CMD *pkCMD = &g_kCMD; -+ u32 u4ColAddr = pkCMD->u4ColAddr; -+ u32 u4PageSize = mtd->writesize; -+ -+ if (u4ColAddr == 0) { -+ mtk_nand_exec_read_page(mtd, pkCMD->u4RowAddr, u4PageSize, buf, chip->oob_poi); -+ pkCMD->u4ColAddr += u4PageSize + mtd->oobsize; -+ } -+ -+ return 0; -+} -+ -+static int -+mtk_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip, u8 * buf, int page) -+{ -+ int page_per_block = 1 << (chip->phys_erase_shift - chip->page_shift); -+ int block = page / page_per_block; -+ u16 page_in_block = page % page_per_block; -+ int mapped_block = block; -+ -+#if defined (MTK_NAND_BMT) -+ mapped_block = get_mapping_block_index(block); -+ if (mtk_nand_exec_read_page(mtd, page_in_block + mapped_block * page_per_block, -+ mtd->writesize, buf, chip->oob_poi)) -+ return 0; -+#else -+ if (shift_on_bbt) { -+ mapped_block = block_remap(mtd, block); -+ if (mapped_block == -1) -+ return NAND_STATUS_FAIL; -+ if (nand_bbt_get(mtd, mapped_block << (chip->phys_erase_shift - chip->page_shift)) != 0x0) -+ return NAND_STATUS_FAIL; -+ } -+ -+ if (mtk_nand_exec_read_page(mtd, page_in_block + mapped_block * page_per_block, mtd->writesize, buf, chip->oob_poi)) -+ return 0; -+ else -+ return -EIO; -+#endif -+} -+ -+int -+mtk_nand_erase_hw(struct mtd_info *mtd, int page) -+{ -+ struct nand_chip *chip = (struct nand_chip *)mtd->priv; -+ -+ chip->erase_cmd(mtd, page); -+ -+ return chip->waitfunc(mtd, chip); -+} -+ -+static int -+mtk_nand_erase(struct mtd_info *mtd, int page) -+{ -+ // get mapping -+ struct nand_chip *chip = mtd->priv; -+ int page_per_block = 1 << (chip->phys_erase_shift - chip->page_shift); -+ int page_in_block = page % page_per_block; -+ int block = page / page_per_block; -+ int mapped_block = block; -+ -+#if defined(MTK_NAND_BMT) -+ mapped_block = get_mapping_block_index(block); -+#else -+ if (shift_on_bbt) { -+ mapped_block = block_remap(mtd, block); -+ if (mapped_block == -1) -+ return NAND_STATUS_FAIL; -+ if (nand_bbt_get(mtd, mapped_block << (chip->phys_erase_shift - chip->page_shift)) != 0x0) -+ return NAND_STATUS_FAIL; -+ } -+#endif -+ -+ do { -+ int status = mtk_nand_erase_hw(mtd, page_in_block + page_per_block * mapped_block); -+ -+ if (status & NAND_STATUS_FAIL) { -+#if defined (MTK_NAND_BMT) -+ if (update_bmt( (page_in_block + mapped_block * page_per_block) << chip->page_shift, -+ UPDATE_ERASE_FAIL, NULL, NULL)) -+ { -+ MSG(INIT, "Erase fail at block: 0x%x, update BMT success\n", mapped_block); -+ return 0; -+ } else { -+ MSG(INIT, "Erase fail at block: 0x%x, update BMT fail\n", mapped_block); -+ return NAND_STATUS_FAIL; -+ } -+#else -+ mtk_nand_block_markbad_hw(mtd, (page_in_block + mapped_block * page_per_block) << chip->page_shift); -+ nand_bbt_set(mtd, page_in_block + mapped_block * page_per_block, 0x3); -+ if (shift_on_bbt) { -+ mapped_block = block_remap(mtd, block); -+ if (mapped_block == -1) -+ return NAND_STATUS_FAIL; -+ if (nand_bbt_get(mtd, mapped_block << (chip->phys_erase_shift - chip->page_shift)) != 0x0) -+ return NAND_STATUS_FAIL; -+ } else -+ return NAND_STATUS_FAIL; -+#endif -+ } else -+ break; -+ } while(1); -+ -+ return 0; -+} -+ -+static int -+mtk_nand_read_oob_raw(struct mtd_info *mtd, uint8_t * buf, int page_addr, int len) -+{ -+ struct nand_chip *chip = (struct nand_chip *)mtd->priv; -+ u32 col_addr = 0; -+ u32 sector = 0; -+ int res = 0; -+ u32 colnob = 2, rawnob = devinfo.addr_cycle - 2; -+ int randomread = 0; -+ int read_len = 0; -+ int sec_num = 1<<(chip->page_shift-9); -+ int spare_per_sector = mtd->oobsize/sec_num; -+ -+ if (len > NAND_MAX_OOBSIZE || len % OOB_AVAI_PER_SECTOR || !buf) { -+ printk(KERN_WARNING "[%s] invalid parameter, len: %d, buf: %p\n", __FUNCTION__, len, buf); -+ return -EINVAL; -+ } -+ if (len > spare_per_sector) -+ randomread = 1; -+ if (!randomread || !(devinfo.advancedmode & RAMDOM_READ)) { -+ while (len > 0) { -+ read_len = min(len, spare_per_sector); -+ col_addr = NAND_SECTOR_SIZE + sector * (NAND_SECTOR_SIZE + spare_per_sector); // TODO: Fix this hard-code 16 -+ if (!mtk_nand_ready_for_read(chip, page_addr, col_addr, false, NULL)) { -+ printk(KERN_WARNING "mtk_nand_ready_for_read return failed\n"); -+ res = -EIO; -+ goto error; -+ } -+ if (!mtk_nand_mcu_read_data(buf + spare_per_sector * sector, read_len)) { -+ printk(KERN_WARNING "mtk_nand_mcu_read_data return failed\n"); -+ res = -EIO; -+ goto error; -+ } -+ mtk_nand_check_RW_count(read_len); -+ mtk_nand_stop_read(); -+ sector++; -+ len -= read_len; -+ } -+ } else { -+ col_addr = NAND_SECTOR_SIZE; -+ if (chip->options & NAND_BUSWIDTH_16) -+ col_addr /= 2; -+ if (!mtk_nand_reset()) -+ goto error; -+ mtk_nand_set_mode(0x6000); -+ NFI_SET_REG16(NFI_CNFG_REG16, CNFG_READ_EN); -+ DRV_WriteReg16(NFI_CON_REG16, 4 << CON_NFI_SEC_SHIFT); -+ -+ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_AHB); -+ NFI_CLN_REG16(NFI_CNFG_REG16, CNFG_HW_ECC_EN); -+ -+ mtk_nand_set_autoformat(false); -+ -+ if (!mtk_nand_set_command(NAND_CMD_READ0)) -+ goto error; -+ //1 FIXED ME: For Any Kind of AddrCycle -+ if (!mtk_nand_set_address(col_addr, page_addr, colnob, rawnob)) -+ goto error; -+ if (!mtk_nand_set_command(NAND_CMD_READSTART)) -+ goto error; -+ if (!mtk_nand_status_ready(STA_NAND_BUSY)) -+ goto error; -+ read_len = min(len, spare_per_sector); -+ if (!mtk_nand_mcu_read_data(buf + spare_per_sector * sector, read_len)) { -+ printk(KERN_WARNING "mtk_nand_mcu_read_data return failed first 16\n"); -+ res = -EIO; -+ goto error; -+ } -+ sector++; -+ len -= read_len; -+ mtk_nand_stop_read(); -+ while (len > 0) { -+ read_len = min(len, spare_per_sector); -+ if (!mtk_nand_set_command(0x05)) -+ goto error; -+ col_addr = NAND_SECTOR_SIZE + sector * (NAND_SECTOR_SIZE + spare_per_sector); -+ if (chip->options & NAND_BUSWIDTH_16) -+ col_addr /= 2; -+ DRV_WriteReg32(NFI_COLADDR_REG32, col_addr); -+ DRV_WriteReg16(NFI_ADDRNOB_REG16, 2); -+ DRV_WriteReg16(NFI_CON_REG16, 4 << CON_NFI_SEC_SHIFT); -+ if (!mtk_nand_status_ready(STA_ADDR_STATE)) -+ goto error; -+ if (!mtk_nand_set_command(0xE0)) -+ goto error; -+ if (!mtk_nand_status_ready(STA_NAND_BUSY)) -+ goto error; -+ if (!mtk_nand_mcu_read_data(buf + spare_per_sector * sector, read_len)) { -+ printk(KERN_WARNING "mtk_nand_mcu_read_data return failed first 16\n"); -+ res = -EIO; -+ goto error; -+ } -+ mtk_nand_stop_read(); -+ sector++; -+ len -= read_len; -+ } -+ } -+error: -+ NFI_CLN_REG16(NFI_CON_REG16, CON_NFI_BRD); -+ return res; -+} -+ -+static int -+mtk_nand_write_oob_raw(struct mtd_info *mtd, const uint8_t * buf, int page_addr, int len) -+{ -+ struct nand_chip *chip = mtd->priv; -+ u32 col_addr = 0; -+ u32 sector = 0; -+ int write_len = 0; -+ int status; -+ int sec_num = 1<<(chip->page_shift-9); -+ int spare_per_sector = mtd->oobsize/sec_num; -+ -+ if (len > NAND_MAX_OOBSIZE || len % OOB_AVAI_PER_SECTOR || !buf) { -+ printk(KERN_WARNING "[%s] invalid parameter, len: %d, buf: %p\n", __FUNCTION__, len, buf); -+ return -EINVAL; -+ } -+ -+ while (len > 0) { -+ write_len = min(len, spare_per_sector); -+ col_addr = sector * (NAND_SECTOR_SIZE + spare_per_sector) + NAND_SECTOR_SIZE; -+ if (!mtk_nand_ready_for_write(chip, page_addr, col_addr, false, NULL)) -+ return -EIO; -+ if (!mtk_nand_mcu_write_data(mtd, buf + sector * spare_per_sector, write_len)) -+ return -EIO; -+ (void)mtk_nand_check_RW_count(write_len); -+ NFI_CLN_REG16(NFI_CON_REG16, CON_NFI_BWR); -+ (void)mtk_nand_set_command(NAND_CMD_PAGEPROG); -+ while (DRV_Reg32(NFI_STA_REG32) & STA_NAND_BUSY) -+ ; -+ status = chip->waitfunc(mtd, chip); -+ if (status & NAND_STATUS_FAIL) { -+ printk(KERN_INFO "status: %d\n", status); -+ return -EIO; -+ } -+ len -= write_len; -+ sector++; -+ } -+ -+ return 0; -+} -+ -+static int -+mtk_nand_write_oob_hw(struct mtd_info *mtd, struct nand_chip *chip, int page) -+{ -+ int i, iter; -+ int sec_num = 1<<(chip->page_shift-9); -+ int spare_per_sector = mtd->oobsize/sec_num; -+ -+ memcpy(local_oob_buf, chip->oob_poi, mtd->oobsize); -+ -+ // copy ecc data -+ for (i = 0; i < chip->ecc.layout->eccbytes; i++) { -+ iter = (i / (spare_per_sector-OOB_AVAI_PER_SECTOR)) * spare_per_sector + OOB_AVAI_PER_SECTOR + i % (spare_per_sector-OOB_AVAI_PER_SECTOR); -+ local_oob_buf[iter] = chip->oob_poi[chip->ecc.layout->eccpos[i]]; -+ } -+ -+ // copy FDM data -+ for (i = 0; i < sec_num; i++) -+ memcpy(&local_oob_buf[i * spare_per_sector], &chip->oob_poi[i * OOB_AVAI_PER_SECTOR], OOB_AVAI_PER_SECTOR); -+ -+ return mtk_nand_write_oob_raw(mtd, local_oob_buf, page, mtd->oobsize); -+} -+ -+static int mtk_nand_write_oob(struct mtd_info *mtd, struct nand_chip *chip, int page) -+{ -+ int page_per_block = 1 << (chip->phys_erase_shift - chip->page_shift); -+ int block = page / page_per_block; -+ u16 page_in_block = page % page_per_block; -+ int mapped_block = block; -+ -+#if defined(MTK_NAND_BMT) -+ mapped_block = get_mapping_block_index(block); -+ // write bad index into oob -+ if (mapped_block != block) -+ set_bad_index_to_oob(chip->oob_poi, block); -+ else -+ set_bad_index_to_oob(chip->oob_poi, FAKE_INDEX); -+#else -+ if (shift_on_bbt) -+ { -+ mapped_block = block_remap(mtd, block); -+ if (mapped_block == -1) -+ return NAND_STATUS_FAIL; -+ if (nand_bbt_get(mtd, mapped_block << (chip->phys_erase_shift - chip->page_shift)) != 0x0) -+ return NAND_STATUS_FAIL; -+ } -+#endif -+ do { -+ if (mtk_nand_write_oob_hw(mtd, chip, page_in_block + mapped_block * page_per_block /* page */)) { -+ MSG(INIT, "write oob fail at block: 0x%x, page: 0x%x\n", mapped_block, page_in_block); -+#if defined(MTK_NAND_BMT) -+ if (update_bmt((page_in_block + mapped_block * page_per_block) << chip->page_shift, -+ UPDATE_WRITE_FAIL, NULL, chip->oob_poi)) -+ { -+ MSG(INIT, "Update BMT success\n"); -+ return 0; -+ } else { -+ MSG(INIT, "Update BMT fail\n"); -+ return -EIO; -+ } -+#else -+ mtk_nand_block_markbad_hw(mtd, (page_in_block + mapped_block * page_per_block) << chip->page_shift); -+ nand_bbt_set(mtd, page_in_block + mapped_block * page_per_block, 0x3); -+ if (shift_on_bbt) { -+ mapped_block = block_remap(mtd, mapped_block); -+ if (mapped_block == -1) -+ return NAND_STATUS_FAIL; -+ if (nand_bbt_get(mtd, mapped_block << (chip->phys_erase_shift - chip->page_shift)) != 0x0) -+ return NAND_STATUS_FAIL; -+ } else { -+ return NAND_STATUS_FAIL; -+ } -+#endif -+ } else -+ break; -+ } while (1); -+ -+ return 0; -+} -+ -+int -+mtk_nand_block_markbad_hw(struct mtd_info *mtd, loff_t offset) -+{ -+ struct nand_chip *chip = mtd->priv; -+ int block = (int)offset >> chip->phys_erase_shift; -+ int page = block * (1 << (chip->phys_erase_shift - chip->page_shift)); -+ u8 buf[8]; -+ -+ memset(buf, 0xFF, 8); -+ buf[0] = 0; -+ return mtk_nand_write_oob_raw(mtd, buf, page, 8); -+} -+ -+static int -+mtk_nand_block_markbad(struct mtd_info *mtd, loff_t offset) -+{ -+ struct nand_chip *chip = mtd->priv; -+ int block = (int)offset >> chip->phys_erase_shift; -+ int ret; -+ int mapped_block = block; -+ -+ nand_get_device(chip, mtd, FL_WRITING); -+ -+#if defined(MTK_NAND_BMT) -+ mapped_block = get_mapping_block_index(block); -+ ret = mtk_nand_block_markbad_hw(mtd, mapped_block << chip->phys_erase_shift); -+#else -+ if (shift_on_bbt) { -+ mapped_block = block_remap(mtd, block); -+ if (mapped_block == -1) { -+ printk("NAND mark bad failed\n"); -+ nand_release_device(mtd); -+ return NAND_STATUS_FAIL; -+ } -+ } -+ ret = mtk_nand_block_markbad_hw(mtd, mapped_block << chip->phys_erase_shift); -+#endif -+ nand_release_device(mtd); -+ -+ return ret; -+} -+ -+int -+mtk_nand_read_oob_hw(struct mtd_info *mtd, struct nand_chip *chip, int page) -+{ -+ int i; -+ u8 iter = 0; -+ -+ int sec_num = 1<<(chip->page_shift-9); -+ int spare_per_sector = mtd->oobsize/sec_num; -+ -+ if (mtk_nand_read_oob_raw(mtd, chip->oob_poi, page, mtd->oobsize)) { -+ printk(KERN_ERR "[%s]mtk_nand_read_oob_raw return failed\n", __FUNCTION__); -+ return -EIO; -+ } -+ -+ // adjust to ecc physical layout to memory layout -+ /*********************************************************/ -+ /* FDM0 | ECC0 | FDM1 | ECC1 | FDM2 | ECC2 | FDM3 | ECC3 */ -+ /* 8B | 8B | 8B | 8B | 8B | 8B | 8B | 8B */ -+ /*********************************************************/ -+ -+ memcpy(local_oob_buf, chip->oob_poi, mtd->oobsize); -+ // copy ecc data -+ for (i = 0; i < chip->ecc.layout->eccbytes; i++) { -+ iter = (i / (spare_per_sector-OOB_AVAI_PER_SECTOR)) * spare_per_sector + OOB_AVAI_PER_SECTOR + i % (spare_per_sector-OOB_AVAI_PER_SECTOR); -+ chip->oob_poi[chip->ecc.layout->eccpos[i]] = local_oob_buf[iter]; -+ } -+ -+ // copy FDM data -+ for (i = 0; i < sec_num; i++) { -+ memcpy(&chip->oob_poi[i * OOB_AVAI_PER_SECTOR], &local_oob_buf[i * spare_per_sector], OOB_AVAI_PER_SECTOR); -+ } -+ -+ return 0; -+} -+ -+static int -+mtk_nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip, int page) -+{ -+ int page_per_block = 1 << (chip->phys_erase_shift - chip->page_shift); -+ int block = page / page_per_block; -+ u16 page_in_block = page % page_per_block; -+ int mapped_block = block; -+ -+#if defined (MTK_NAND_BMT) -+ mapped_block = get_mapping_block_index(block); -+ mtk_nand_read_oob_hw(mtd, chip, page_in_block + mapped_block * page_per_block); -+#else -+ if (shift_on_bbt) { -+ mapped_block = block_remap(mtd, block); -+ if (mapped_block == -1) -+ return NAND_STATUS_FAIL; -+ // allow to read oob even if the block is bad -+ } -+ if (mtk_nand_read_oob_hw(mtd, chip, page_in_block + mapped_block * page_per_block)!=0) -+ return -1; -+#endif -+ return 0; -+} -+ -+int -+mtk_nand_block_bad_hw(struct mtd_info *mtd, loff_t ofs) -+{ -+ struct nand_chip *chip = (struct nand_chip *)mtd->priv; -+ int page_addr = (int)(ofs >> chip->page_shift); -+ unsigned int page_per_block = 1 << (chip->phys_erase_shift - chip->page_shift); -+ unsigned char oob_buf[8]; -+ -+ page_addr &= ~(page_per_block - 1); -+ if (mtk_nand_read_oob_raw(mtd, oob_buf, page_addr, sizeof(oob_buf))) { -+ printk(KERN_WARNING "mtk_nand_read_oob_raw return error\n"); -+ return 1; -+ } -+ -+ if (oob_buf[0] != 0xff) { -+ printk(KERN_WARNING "Bad block detected at 0x%x, oob_buf[0] is 0x%x\n", page_addr, oob_buf[0]); -+ // dump_nfi(); -+ return 1; -+ } -+ -+ return 0; -+} -+ -+static int -+mtk_nand_block_bad(struct mtd_info *mtd, loff_t ofs, int getchip) -+{ -+ int chipnr = 0; -+ struct nand_chip *chip = (struct nand_chip *)mtd->priv; -+ int block = (int)ofs >> chip->phys_erase_shift; -+ int mapped_block = block; -+ int ret; -+ -+ if (getchip) { -+ chipnr = (int)(ofs >> chip->chip_shift); -+ nand_get_device(chip, mtd, FL_READING); -+ /* Select the NAND device */ -+ chip->select_chip(mtd, chipnr); -+ } -+ -+#if defined(MTK_NAND_BMT) -+ mapped_block = get_mapping_block_index(block); -+#else -+ if (shift_on_bbt) { -+ mapped_block = block_remap(mtd, block); -+ if (mapped_block == -1) { -+ if (getchip) -+ nand_release_device(mtd); -+ return NAND_STATUS_FAIL; -+ } -+ } -+#endif -+ -+ ret = mtk_nand_block_bad_hw(mtd, mapped_block << chip->phys_erase_shift); -+#if defined (MTK_NAND_BMT) -+ if (ret) { -+ MSG(INIT, "Unmapped bad block: 0x%x\n", mapped_block); -+ if (update_bmt(mapped_block << chip->phys_erase_shift, UPDATE_UNMAPPED_BLOCK, NULL, NULL)) { -+ MSG(INIT, "Update BMT success\n"); -+ ret = 0; -+ } else { -+ MSG(INIT, "Update BMT fail\n"); -+ ret = 1; -+ } -+ } -+#endif -+ -+ if (getchip) -+ nand_release_device(mtd); -+ -+ return ret; -+} -+ -+#ifdef CONFIG_MTD_NAND_VERIFY_WRITE -+char gacBuf[4096 + 288]; -+ -+static int -+mtk_nand_verify_buf(struct mtd_info *mtd, const uint8_t * buf, int len) -+{ -+ struct nand_chip *chip = (struct nand_chip *)mtd->priv; -+ struct NAND_CMD *pkCMD = &g_kCMD; -+ u32 u4PageSize = mtd->writesize; -+ u32 *pSrc, *pDst; -+ int i; -+ -+ mtk_nand_exec_read_page(mtd, pkCMD->u4RowAddr, u4PageSize, gacBuf, gacBuf + u4PageSize); -+ -+ pSrc = (u32 *) buf; -+ pDst = (u32 *) gacBuf; -+ len = len / sizeof(u32); -+ for (i = 0; i < len; ++i) { -+ if (*pSrc != *pDst) { -+ MSG(VERIFY, "mtk_nand_verify_buf page fail at page %d\n", pkCMD->u4RowAddr); -+ return -1; -+ } -+ pSrc++; -+ pDst++; -+ } -+ -+ pSrc = (u32 *) chip->oob_poi; -+ pDst = (u32 *) (gacBuf + u4PageSize); -+ -+ if ((pSrc[0] != pDst[0]) || (pSrc[1] != pDst[1]) || (pSrc[2] != pDst[2]) || (pSrc[3] != pDst[3]) || (pSrc[4] != pDst[4]) || (pSrc[5] != pDst[5])) { -+ // TODO: Ask Designer Why? -+ //(pSrc[6] != pDst[6]) || (pSrc[7] != pDst[7])) -+ MSG(VERIFY, "mtk_nand_verify_buf oob fail at page %d\n", pkCMD->u4RowAddr); -+ MSG(VERIFY, "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", pSrc[0], pSrc[1], pSrc[2], pSrc[3], pSrc[4], pSrc[5], pSrc[6], pSrc[7]); -+ MSG(VERIFY, "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", pDst[0], pDst[1], pDst[2], pDst[3], pDst[4], pDst[5], pDst[6], pDst[7]); -+ return -1; -+ } -+ return 0; -+} -+#endif -+ -+static void -+mtk_nand_init_hw(struct mtk_nand_host *host) { -+ struct mtk_nand_host_hw *hw = host->hw; -+ u32 data; -+ -+ data = DRV_Reg32(RALINK_SYSCTL_BASE+0x60); -+ data &= ~((0x3<<18)|(0x3<<16)); -+ data |= ((0x2<<18) |(0x2<<16)); -+ DRV_WriteReg32(RALINK_SYSCTL_BASE+0x60, data); -+ -+ MSG(INIT, "Enable NFI Clock\n"); -+ nand_enable_clock(); -+ -+ g_bInitDone = false; -+ g_kCMD.u4OOBRowAddr = (u32) - 1; -+ -+ /* Set default NFI access timing control */ -+ DRV_WriteReg32(NFI_ACCCON_REG32, hw->nfi_access_timing); -+ DRV_WriteReg16(NFI_CNFG_REG16, 0); -+ DRV_WriteReg16(NFI_PAGEFMT_REG16, 0); -+ -+ /* Reset the state machine and data FIFO, because flushing FIFO */ -+ (void)mtk_nand_reset(); -+ -+ /* Set the ECC engine */ -+ if (hw->nand_ecc_mode == NAND_ECC_HW) { -+ MSG(INIT, "%s : Use HW ECC\n", MODULE_NAME); -+ if (g_bHwEcc) -+ NFI_SET_REG32(NFI_CNFG_REG16, CNFG_HW_ECC_EN); -+ ECC_Config(host->hw,4); -+ mtk_nand_configure_fdm(8); -+ mtk_nand_configure_lock(); -+ } -+ -+ NFI_SET_REG16(NFI_IOCON_REG16, 0x47); -+} -+ -+static int mtk_nand_dev_ready(struct mtd_info *mtd) -+{ -+ return !(DRV_Reg32(NFI_STA_REG32) & STA_NAND_BUSY); -+} -+ -+#define FACT_BBT_BLOCK_NUM 32 // use the latest 32 BLOCK for factory bbt table -+#define FACT_BBT_OOB_SIGNATURE 1 -+#define FACT_BBT_SIGNATURE_LEN 7 -+const u8 oob_signature[] = "mtknand"; -+static u8 *fact_bbt = 0; -+static u32 bbt_size = 0; -+ -+static int -+read_fact_bbt(struct mtd_info *mtd, unsigned int page) -+{ -+ struct nand_chip *chip = mtd->priv; -+ -+ // read oob -+ if (mtk_nand_read_oob_hw(mtd, chip, page)==0) -+ { -+ if (chip->oob_poi[nand_badblock_offset] != 0xFF) -+ { -+ printk("Bad Block on Page %x\n", page); -+ return -1; -+ } -+ if (memcmp(&chip->oob_poi[FACT_BBT_OOB_SIGNATURE], oob_signature, FACT_BBT_SIGNATURE_LEN) != 0) -+ { -+ printk("compare signature failed %x\n", page); -+ return -1; -+ } -+ if (mtk_nand_exec_read_page(mtd, page, mtd->writesize, chip->buffers->databuf, chip->oob_poi)) -+ { -+ printk("Signature matched and data read!\n"); -+ memcpy(fact_bbt, chip->buffers->databuf, (bbt_size <= mtd->writesize)? bbt_size:mtd->writesize); -+ return 0; -+ } -+ -+ } -+ printk("failed at page %x\n", page); -+ return -1; -+} -+ -+static int -+load_fact_bbt(struct mtd_info *mtd) -+{ -+ struct nand_chip *chip = mtd->priv; -+ int i; -+ u32 total_block; -+ -+ total_block = 1 << (chip->chip_shift - chip->phys_erase_shift); -+ bbt_size = total_block >> 2; -+ -+ if ((!fact_bbt) && (bbt_size)) -+ fact_bbt = (u8 *)kmalloc(bbt_size, GFP_KERNEL); -+ if (!fact_bbt) -+ return -1; -+ -+ for (i = total_block - 1; i >= (total_block - FACT_BBT_BLOCK_NUM); i--) -+ { -+ if (read_fact_bbt(mtd, i << (chip->phys_erase_shift - chip->page_shift)) == 0) -+ { -+ printk("load_fact_bbt success %d\n", i); -+ return 0; -+ } -+ -+ } -+ printk("load_fact_bbt failed\n"); -+ return -1; -+} -+ -+static int -+mtk_nand_probe(struct platform_device *pdev) -+{ -+ struct mtd_part_parser_data ppdata; -+ struct mtk_nand_host_hw *hw; -+ struct mtd_info *mtd; -+ struct nand_chip *nand_chip; -+ u8 ext_id1, ext_id2, ext_id3; -+ int err = 0; -+ int id; -+ u32 ext_id; -+ int i; -+ u32 data; -+ -+ data = DRV_Reg32(RALINK_SYSCTL_BASE+0x60); -+ data &= ~((0x3<<18)|(0x3<<16)); -+ data |= ((0x2<<18) |(0x2<<16)); -+ DRV_WriteReg32(RALINK_SYSCTL_BASE+0x60, data); -+ -+ hw = &mt7621_nand_hw, -+ BUG_ON(!hw); -+ /* Allocate memory for the device structure (and zero it) */ -+ host = kzalloc(sizeof(struct mtk_nand_host), GFP_KERNEL); -+ if (!host) { -+ MSG(INIT, "mtk_nand: failed to allocate device structure.\n"); -+ return -ENOMEM; -+ } -+ -+ /* Allocate memory for 16 byte aligned buffer */ -+ local_buffer_16_align = local_buffer + 16 - ((u32) local_buffer % 16); -+ printk(KERN_INFO "Allocate 16 byte aligned buffer: %p\n", local_buffer_16_align); -+ host->hw = hw; -+ -+ /* init mtd data structure */ -+ nand_chip = &host->nand_chip; -+ nand_chip->priv = host; /* link the private data structures */ -+ -+ mtd = &host->mtd; -+ mtd->priv = nand_chip; -+ mtd->owner = THIS_MODULE; -+ mtd->name = "MT7621-NAND"; -+ -+ hw->nand_ecc_mode = NAND_ECC_HW; -+ -+ /* Set address of NAND IO lines */ -+ nand_chip->IO_ADDR_R = (void __iomem *)NFI_DATAR_REG32; -+ nand_chip->IO_ADDR_W = (void __iomem *)NFI_DATAW_REG32; -+ nand_chip->chip_delay = 20; /* 20us command delay time */ -+ nand_chip->ecc.mode = hw->nand_ecc_mode; /* enable ECC */ -+ nand_chip->ecc.strength = 1; -+ nand_chip->read_byte = mtk_nand_read_byte; -+ nand_chip->read_buf = mtk_nand_read_buf; -+ nand_chip->write_buf = mtk_nand_write_buf; -+#ifdef CONFIG_MTD_NAND_VERIFY_WRITE -+ nand_chip->verify_buf = mtk_nand_verify_buf; -+#endif -+ nand_chip->select_chip = mtk_nand_select_chip; -+ nand_chip->dev_ready = mtk_nand_dev_ready; -+ nand_chip->cmdfunc = mtk_nand_command_bp; -+ nand_chip->ecc.read_page = mtk_nand_read_page_hwecc; -+ nand_chip->ecc.write_page = mtk_nand_write_page_hwecc; -+ -+ nand_chip->ecc.layout = &nand_oob_64; -+ nand_chip->ecc.size = hw->nand_ecc_size; //2048 -+ nand_chip->ecc.bytes = hw->nand_ecc_bytes; //32 -+ -+ // For BMT, we need to revise driver architecture -+ nand_chip->write_page = mtk_nand_write_page; -+ nand_chip->ecc.write_oob = mtk_nand_write_oob; -+ nand_chip->block_markbad = mtk_nand_block_markbad; // need to add nand_get_device()/nand_release_device(). -+ // nand_chip->erase = mtk_nand_erase; -+ // nand_chip->read_page = mtk_nand_read_page; -+ nand_chip->ecc.read_oob = mtk_nand_read_oob; -+ nand_chip->block_bad = mtk_nand_block_bad; -+ -+ //Qwert:Add for Uboot -+ mtk_nand_init_hw(host); -+ /* Select the device */ -+ nand_chip->select_chip(mtd, NFI_DEFAULT_CS); -+ -+ /* -+ * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx) -+ * after power-up -+ */ -+ nand_chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1); -+ -+ memset(&devinfo, 0 , sizeof(flashdev_info)); -+ -+ /* Send the command for reading device ID */ -+ -+ nand_chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1); -+ -+ /* Read manufacturer and device IDs */ -+ manu_id = nand_chip->read_byte(mtd); -+ dev_id = nand_chip->read_byte(mtd); -+ id = dev_id | (manu_id << 8); -+ ext_id1 = nand_chip->read_byte(mtd); -+ ext_id2 = nand_chip->read_byte(mtd); -+ ext_id3 = nand_chip->read_byte(mtd); -+ ext_id = ext_id1 << 16 | ext_id2 << 8 | ext_id3; -+ if (!get_device_info(id, ext_id, &devinfo)) { -+ u32 chip_mode = RALINK_REG(RALINK_SYSCTL_BASE+0x010)&0x0F; -+ MSG(INIT, "Not Support this Device! \r\n"); -+ memset(&devinfo, 0 , sizeof(flashdev_info)); -+ MSG(INIT, "chip_mode=%08X\n",chip_mode); -+ -+ /* apply bootstrap first */ -+ devinfo.addr_cycle = 5; -+ devinfo.iowidth = 8; -+ -+ switch (chip_mode) { -+ case 10: -+ devinfo.pagesize = 2048; -+ devinfo.sparesize = 128; -+ devinfo.totalsize = 128; -+ devinfo.blocksize = 128; -+ break; -+ case 11: -+ devinfo.pagesize = 4096; -+ devinfo.sparesize = 128; -+ devinfo.totalsize = 1024; -+ devinfo.blocksize = 256; -+ break; -+ case 12: -+ devinfo.pagesize = 4096; -+ devinfo.sparesize = 224; -+ devinfo.totalsize = 2048; -+ devinfo.blocksize = 512; -+ break; -+ default: -+ case 1: -+ devinfo.pagesize = 2048; -+ devinfo.sparesize = 64; -+ devinfo.totalsize = 128; -+ devinfo.blocksize = 128; -+ break; -+ } -+ -+ devinfo.timmingsetting = NFI_DEFAULT_ACCESS_TIMING; -+ devinfo.devciename[0] = 'U'; -+ devinfo.advancedmode = 0; -+ } -+ mtd->writesize = devinfo.pagesize; -+ mtd->erasesize = (devinfo.blocksize<<10); -+ mtd->oobsize = devinfo.sparesize; -+ -+ nand_chip->chipsize = (devinfo.totalsize<<20); -+ nand_chip->page_shift = ffs(mtd->writesize) - 1; -+ nand_chip->pagemask = (nand_chip->chipsize >> nand_chip->page_shift) - 1; -+ nand_chip->phys_erase_shift = ffs(mtd->erasesize) - 1; -+ nand_chip->chip_shift = ffs(nand_chip->chipsize) - 1;//0x1C;//ffs(nand_chip->chipsize) - 1; -+ nand_chip->oob_poi = nand_chip->buffers->databuf + mtd->writesize; -+ nand_chip->badblockpos = 0; -+ -+ if (devinfo.pagesize == 4096) -+ nand_chip->ecc.layout = &nand_oob_128; -+ else if (devinfo.pagesize == 2048) -+ nand_chip->ecc.layout = &nand_oob_64; -+ else if (devinfo.pagesize == 512) -+ nand_chip->ecc.layout = &nand_oob_16; -+ -+ nand_chip->ecc.layout->eccbytes = devinfo.sparesize-OOB_AVAI_PER_SECTOR*(devinfo.pagesize/NAND_SECTOR_SIZE); -+ for (i = 0; i < nand_chip->ecc.layout->eccbytes; i++) -+ nand_chip->ecc.layout->eccpos[i]=OOB_AVAI_PER_SECTOR*(devinfo.pagesize/NAND_SECTOR_SIZE)+i; -+ -+ MSG(INIT, "Support this Device in MTK table! %x \r\n", id); -+ hw->nfi_bus_width = devinfo.iowidth; -+ DRV_WriteReg32(NFI_ACCCON_REG32, devinfo.timmingsetting); -+ -+ /* 16-bit bus width */ -+ if (hw->nfi_bus_width == 16) { -+ MSG(INIT, "%s : Set the 16-bit I/O settings!\n", MODULE_NAME); -+ nand_chip->options |= NAND_BUSWIDTH_16; -+ } -+ mtd->oobsize = devinfo.sparesize; -+ hw->nfi_cs_num = 1; -+ -+ /* Scan to find existance of the device */ -+ if (nand_scan(mtd, hw->nfi_cs_num)) { -+ MSG(INIT, "%s : nand_scan fail.\n", MODULE_NAME); -+ err = -ENXIO; -+ goto out; -+ } -+ -+ g_page_size = mtd->writesize; -+ platform_set_drvdata(pdev, host); -+ if (hw->nfi_bus_width == 16) { -+ NFI_SET_REG16(NFI_PAGEFMT_REG16, PAGEFMT_DBYTE_EN); -+ } -+ -+ nand_chip->select_chip(mtd, 0); -+#if defined(MTK_NAND_BMT) -+ nand_chip->chipsize -= (BMT_POOL_SIZE) << nand_chip->phys_erase_shift; -+#endif -+ mtd->size = nand_chip->chipsize; -+ -+ CFG_BLOCKSIZE = mtd->erasesize; -+ -+#if defined(MTK_NAND_BMT) -+ if (!g_bmt) { -+ if (!(g_bmt = init_bmt(nand_chip, BMT_POOL_SIZE))) { -+ MSG(INIT, "Error: init bmt failed\n"); -+ return 0; -+ } -+ } -+#endif -+ -+ ppdata.of_node = pdev->dev.of_node; -+ err = mtd_device_parse_register(mtd, probe_types, &ppdata, -+ NULL, 0); -+ if (!err) { -+ MSG(INIT, "[mtk_nand] probe successfully!\n"); -+ nand_disable_clock(); -+ shift_on_bbt = 1; -+ if (load_fact_bbt(mtd) == 0) { -+ int i; -+ for (i = 0; i < 0x100; i++) -+ nand_chip->bbt[i] |= fact_bbt[i]; -+ } -+ -+ return err; -+ } -+ -+out: -+ MSG(INIT, "[NFI] mtk_nand_probe fail, err = %d!\n", err); -+ nand_release(mtd); -+ platform_set_drvdata(pdev, NULL); -+ kfree(host); -+ nand_disable_clock(); -+ return err; -+} -+ -+static int -+mtk_nand_remove(struct platform_device *pdev) -+{ -+ struct mtk_nand_host *host = platform_get_drvdata(pdev); -+ struct mtd_info *mtd = &host->mtd; -+ -+ nand_release(mtd); -+ kfree(host); -+ nand_disable_clock(); -+ -+ return 0; -+} -+ -+static const struct of_device_id mt7621_nand_match[] = { -+ { .compatible = "mtk,mt7621-nand" }, -+ {}, -+}; -+MODULE_DEVICE_TABLE(of, mt7621_nand_match); -+ -+static struct platform_driver mtk_nand_driver = { -+ .probe = mtk_nand_probe, -+ .remove = mtk_nand_remove, -+ .driver = { -+ .name = "MT7621-NAND", -+ .owner = THIS_MODULE, -+ .of_match_table = mt7621_nand_match, -+ }, -+}; -+ -+static int __init -+mtk_nand_init(void) -+{ -+ printk("MediaTek Nand driver init, version %s\n", VERSION); -+ -+ return platform_driver_register(&mtk_nand_driver); -+} -+ -+static void __exit -+mtk_nand_exit(void) -+{ -+ platform_driver_unregister(&mtk_nand_driver); -+} -+ -+module_init(mtk_nand_init); -+module_exit(mtk_nand_exit); -+MODULE_LICENSE("GPL"); ---- /dev/null -+++ b/drivers/mtd/nand/mtk_nand.h -@@ -0,0 +1,452 @@ -+#ifndef __MTK_NAND_H -+#define __MTK_NAND_H -+ -+#define RALINK_NAND_CTRL_BASE 0xBE003000 -+#define RALINK_SYSCTL_BASE 0xBE000000 -+#define RALINK_NANDECC_CTRL_BASE 0xBE003800 -+/******************************************************************************* -+ * NFI Register Definition -+ *******************************************************************************/ -+ -+#define NFI_CNFG_REG16 ((volatile P_U16)(NFI_BASE+0x0000)) -+#define NFI_PAGEFMT_REG16 ((volatile P_U16)(NFI_BASE+0x0004)) -+#define NFI_CON_REG16 ((volatile P_U16)(NFI_BASE+0x0008)) -+#define NFI_ACCCON_REG32 ((volatile P_U32)(NFI_BASE+0x000C)) -+#define NFI_INTR_EN_REG16 ((volatile P_U16)(NFI_BASE+0x0010)) -+#define NFI_INTR_REG16 ((volatile P_U16)(NFI_BASE+0x0014)) -+ -+#define NFI_CMD_REG16 ((volatile P_U16)(NFI_BASE+0x0020)) -+ -+#define NFI_ADDRNOB_REG16 ((volatile P_U16)(NFI_BASE+0x0030)) -+#define NFI_COLADDR_REG32 ((volatile P_U32)(NFI_BASE+0x0034)) -+#define NFI_ROWADDR_REG32 ((volatile P_U32)(NFI_BASE+0x0038)) -+ -+#define NFI_STRDATA_REG16 ((volatile P_U16)(NFI_BASE+0x0040)) -+ -+#define NFI_DATAW_REG32 ((volatile P_U32)(NFI_BASE+0x0050)) -+#define NFI_DATAR_REG32 ((volatile P_U32)(NFI_BASE+0x0054)) -+#define NFI_PIO_DIRDY_REG16 ((volatile P_U16)(NFI_BASE+0x0058)) -+ -+#define NFI_STA_REG32 ((volatile P_U32)(NFI_BASE+0x0060)) -+#define NFI_FIFOSTA_REG16 ((volatile P_U16)(NFI_BASE+0x0064)) -+#define NFI_LOCKSTA_REG16 ((volatile P_U16)(NFI_BASE+0x0068)) -+ -+#define NFI_ADDRCNTR_REG16 ((volatile P_U16)(NFI_BASE+0x0070)) -+ -+#define NFI_STRADDR_REG32 ((volatile P_U32)(NFI_BASE+0x0080)) -+#define NFI_BYTELEN_REG16 ((volatile P_U16)(NFI_BASE+0x0084)) -+ -+#define NFI_CSEL_REG16 ((volatile P_U16)(NFI_BASE+0x0090)) -+#define NFI_IOCON_REG16 ((volatile P_U16)(NFI_BASE+0x0094)) -+ -+#define NFI_FDM0L_REG32 ((volatile P_U32)(NFI_BASE+0x00A0)) -+#define NFI_FDM0M_REG32 ((volatile P_U32)(NFI_BASE+0x00A4)) -+ -+#define NFI_LOCK_REG16 ((volatile P_U16)(NFI_BASE+0x0100)) -+#define NFI_LOCKCON_REG32 ((volatile P_U32)(NFI_BASE+0x0104)) -+#define NFI_LOCKANOB_REG16 ((volatile P_U16)(NFI_BASE+0x0108)) -+#define NFI_LOCK00ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0110)) -+#define NFI_LOCK00FMT_REG32 ((volatile P_U32)(NFI_BASE+0x0114)) -+#define NFI_LOCK01ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0118)) -+#define NFI_LOCK01FMT_REG32 ((volatile P_U32)(NFI_BASE+0x011C)) -+#define NFI_LOCK02ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0120)) -+#define NFI_LOCK02FMT_REG32 ((volatile P_U32)(NFI_BASE+0x0124)) -+#define NFI_LOCK03ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0128)) -+#define NFI_LOCK03FMT_REG32 ((volatile P_U32)(NFI_BASE+0x012C)) -+#define NFI_LOCK04ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0130)) -+#define NFI_LOCK04FMT_REG32 ((volatile P_U32)(NFI_BASE+0x0134)) -+#define NFI_LOCK05ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0138)) -+#define NFI_LOCK05FMT_REG32 ((volatile P_U32)(NFI_BASE+0x013C)) -+#define NFI_LOCK06ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0140)) -+#define NFI_LOCK06FMT_REG32 ((volatile P_U32)(NFI_BASE+0x0144)) -+#define NFI_LOCK07ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0148)) -+#define NFI_LOCK07FMT_REG32 ((volatile P_U32)(NFI_BASE+0x014C)) -+#define NFI_LOCK08ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0150)) -+#define NFI_LOCK08FMT_REG32 ((volatile P_U32)(NFI_BASE+0x0154)) -+#define NFI_LOCK09ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0158)) -+#define NFI_LOCK09FMT_REG32 ((volatile P_U32)(NFI_BASE+0x015C)) -+#define NFI_LOCK10ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0160)) -+#define NFI_LOCK10FMT_REG32 ((volatile P_U32)(NFI_BASE+0x0164)) -+#define NFI_LOCK11ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0168)) -+#define NFI_LOCK11FMT_REG32 ((volatile P_U32)(NFI_BASE+0x016C)) -+#define NFI_LOCK12ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0170)) -+#define NFI_LOCK12FMT_REG32 ((volatile P_U32)(NFI_BASE+0x0174)) -+#define NFI_LOCK13ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0178)) -+#define NFI_LOCK13FMT_REG32 ((volatile P_U32)(NFI_BASE+0x017C)) -+#define NFI_LOCK14ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0180)) -+#define NFI_LOCK14FMT_REG32 ((volatile P_U32)(NFI_BASE+0x0184)) -+#define NFI_LOCK15ADD_REG32 ((volatile P_U32)(NFI_BASE+0x0188)) -+#define NFI_LOCK15FMT_REG32 ((volatile P_U32)(NFI_BASE+0x018C)) -+ -+#define NFI_FIFODATA0_REG32 ((volatile P_U32)(NFI_BASE+0x0190)) -+#define NFI_FIFODATA1_REG32 ((volatile P_U32)(NFI_BASE+0x0194)) -+#define NFI_FIFODATA2_REG32 ((volatile P_U32)(NFI_BASE+0x0198)) -+#define NFI_FIFODATA3_REG32 ((volatile P_U32)(NFI_BASE+0x019C)) -+#define NFI_MASTERSTA_REG16 ((volatile P_U16)(NFI_BASE+0x0210)) -+ -+ -+/******************************************************************************* -+ * NFI Register Field Definition -+ *******************************************************************************/ -+ -+/* NFI_CNFG */ -+#define CNFG_AHB (0x0001) -+#define CNFG_READ_EN (0x0002) -+#define CNFG_DMA_BURST_EN (0x0004) -+#define CNFG_BYTE_RW (0x0040) -+#define CNFG_HW_ECC_EN (0x0100) -+#define CNFG_AUTO_FMT_EN (0x0200) -+#define CNFG_OP_IDLE (0x0000) -+#define CNFG_OP_READ (0x1000) -+#define CNFG_OP_SRD (0x2000) -+#define CNFG_OP_PRGM (0x3000) -+#define CNFG_OP_ERASE (0x4000) -+#define CNFG_OP_RESET (0x5000) -+#define CNFG_OP_CUST (0x6000) -+#define CNFG_OP_MODE_MASK (0x7000) -+#define CNFG_OP_MODE_SHIFT (12) -+ -+/* NFI_PAGEFMT */ -+#define PAGEFMT_512 (0x0000) -+#define PAGEFMT_2K (0x0001) -+#define PAGEFMT_4K (0x0002) -+ -+#define PAGEFMT_PAGE_MASK (0x0003) -+ -+#define PAGEFMT_DBYTE_EN (0x0008) -+ -+#define PAGEFMT_SPARE_16 (0x0000) -+#define PAGEFMT_SPARE_26 (0x0001) -+#define PAGEFMT_SPARE_27 (0x0002) -+#define PAGEFMT_SPARE_28 (0x0003) -+#define PAGEFMT_SPARE_MASK (0x0030) -+#define PAGEFMT_SPARE_SHIFT (4) -+ -+#define PAGEFMT_FDM_MASK (0x0F00) -+#define PAGEFMT_FDM_SHIFT (8) -+ -+#define PAGEFMT_FDM_ECC_MASK (0xF000) -+#define PAGEFMT_FDM_ECC_SHIFT (12) -+ -+/* NFI_CON */ -+#define CON_FIFO_FLUSH (0x0001) -+#define CON_NFI_RST (0x0002) -+#define CON_NFI_SRD (0x0010) -+ -+#define CON_NFI_NOB_MASK (0x0060) -+#define CON_NFI_NOB_SHIFT (5) -+ -+#define CON_NFI_BRD (0x0100) -+#define CON_NFI_BWR (0x0200) -+ -+#define CON_NFI_SEC_MASK (0xF000) -+#define CON_NFI_SEC_SHIFT (12) -+ -+/* NFI_ACCCON */ -+#define ACCCON_SETTING () -+ -+/* NFI_INTR_EN */ -+#define INTR_RD_DONE_EN (0x0001) -+#define INTR_WR_DONE_EN (0x0002) -+#define INTR_RST_DONE_EN (0x0004) -+#define INTR_ERASE_DONE_EN (0x0008) -+#define INTR_BSY_RTN_EN (0x0010) -+#define INTR_ACC_LOCK_EN (0x0020) -+#define INTR_AHB_DONE_EN (0x0040) -+#define INTR_ALL_INTR_DE (0x0000) -+#define INTR_ALL_INTR_EN (0x007F) -+ -+/* NFI_INTR */ -+#define INTR_RD_DONE (0x0001) -+#define INTR_WR_DONE (0x0002) -+#define INTR_RST_DONE (0x0004) -+#define INTR_ERASE_DONE (0x0008) -+#define INTR_BSY_RTN (0x0010) -+#define INTR_ACC_LOCK (0x0020) -+#define INTR_AHB_DONE (0x0040) -+ -+/* NFI_ADDRNOB */ -+#define ADDR_COL_NOB_MASK (0x0003) -+#define ADDR_COL_NOB_SHIFT (0) -+#define ADDR_ROW_NOB_MASK (0x0030) -+#define ADDR_ROW_NOB_SHIFT (4) -+ -+/* NFI_STA */ -+#define STA_READ_EMPTY (0x00001000) -+#define STA_ACC_LOCK (0x00000010) -+#define STA_CMD_STATE (0x00000001) -+#define STA_ADDR_STATE (0x00000002) -+#define STA_DATAR_STATE (0x00000004) -+#define STA_DATAW_STATE (0x00000008) -+ -+#define STA_NAND_FSM_MASK (0x1F000000) -+#define STA_NAND_BUSY (0x00000100) -+#define STA_NAND_BUSY_RETURN (0x00000200) -+#define STA_NFI_FSM_MASK (0x000F0000) -+#define STA_NFI_OP_MASK (0x0000000F) -+ -+/* NFI_FIFOSTA */ -+#define FIFO_RD_EMPTY (0x0040) -+#define FIFO_RD_FULL (0x0080) -+#define FIFO_WR_FULL (0x8000) -+#define FIFO_WR_EMPTY (0x4000) -+#define FIFO_RD_REMAIN(x) (0x1F&(x)) -+#define FIFO_WR_REMAIN(x) ((0x1F00&(x))>>8) -+ -+/* NFI_ADDRCNTR */ -+#define ADDRCNTR_CNTR(x) ((0xF000&(x))>>12) -+#define ADDRCNTR_OFFSET(x) (0x03FF&(x)) -+ -+/* NFI_LOCK */ -+#define NFI_LOCK_ON (0x0001) -+ -+/* NFI_LOCKANOB */ -+#define PROG_RADD_NOB_MASK (0x7000) -+#define PROG_RADD_NOB_SHIFT (12) -+#define PROG_CADD_NOB_MASK (0x0300) -+#define PROG_CADD_NOB_SHIFT (8) -+#define ERASE_RADD_NOB_MASK (0x0070) -+#define ERASE_RADD_NOB_SHIFT (4) -+#define ERASE_CADD_NOB_MASK (0x0007) -+#define ERASE_CADD_NOB_SHIFT (0) -+ -+/******************************************************************************* -+ * ECC Register Definition -+ *******************************************************************************/ -+ -+#define ECC_ENCCON_REG16 ((volatile P_U16)(NFIECC_BASE+0x0000)) -+#define ECC_ENCCNFG_REG32 ((volatile P_U32)(NFIECC_BASE+0x0004)) -+#define ECC_ENCDIADDR_REG32 ((volatile P_U32)(NFIECC_BASE+0x0008)) -+#define ECC_ENCIDLE_REG32 ((volatile P_U32)(NFIECC_BASE+0x000C)) -+#define ECC_ENCPAR0_REG32 ((volatile P_U32)(NFIECC_BASE+0x0010)) -+#define ECC_ENCPAR1_REG32 ((volatile P_U32)(NFIECC_BASE+0x0014)) -+#define ECC_ENCPAR2_REG32 ((volatile P_U32)(NFIECC_BASE+0x0018)) -+#define ECC_ENCPAR3_REG32 ((volatile P_U32)(NFIECC_BASE+0x001C)) -+#define ECC_ENCPAR4_REG32 ((volatile P_U32)(NFIECC_BASE+0x0020)) -+#define ECC_ENCSTA_REG32 ((volatile P_U32)(NFIECC_BASE+0x0024)) -+#define ECC_ENCIRQEN_REG16 ((volatile P_U16)(NFIECC_BASE+0x0028)) -+#define ECC_ENCIRQSTA_REG16 ((volatile P_U16)(NFIECC_BASE+0x002C)) -+ -+#define ECC_DECCON_REG16 ((volatile P_U16)(NFIECC_BASE+0x0100)) -+#define ECC_DECCNFG_REG32 ((volatile P_U32)(NFIECC_BASE+0x0104)) -+#define ECC_DECDIADDR_REG32 ((volatile P_U32)(NFIECC_BASE+0x0108)) -+#define ECC_DECIDLE_REG16 ((volatile P_U16)(NFIECC_BASE+0x010C)) -+#define ECC_DECFER_REG16 ((volatile P_U16)(NFIECC_BASE+0x0110)) -+#define ECC_DECENUM_REG32 ((volatile P_U32)(NFIECC_BASE+0x0114)) -+#define ECC_DECDONE_REG16 ((volatile P_U16)(NFIECC_BASE+0x0118)) -+#define ECC_DECEL0_REG32 ((volatile P_U32)(NFIECC_BASE+0x011C)) -+#define ECC_DECEL1_REG32 ((volatile P_U32)(NFIECC_BASE+0x0120)) -+#define ECC_DECEL2_REG32 ((volatile P_U32)(NFIECC_BASE+0x0124)) -+#define ECC_DECEL3_REG32 ((volatile P_U32)(NFIECC_BASE+0x0128)) -+#define ECC_DECEL4_REG32 ((volatile P_U32)(NFIECC_BASE+0x012C)) -+#define ECC_DECEL5_REG32 ((volatile P_U32)(NFIECC_BASE+0x0130)) -+#define ECC_DECIRQEN_REG16 ((volatile P_U16)(NFIECC_BASE+0x0134)) -+#define ECC_DECIRQSTA_REG16 ((volatile P_U16)(NFIECC_BASE+0x0138)) -+#define ECC_FDMADDR_REG32 ((volatile P_U32)(NFIECC_BASE+0x013C)) -+#define ECC_DECFSM_REG32 ((volatile P_U32)(NFIECC_BASE+0x0140)) -+#define ECC_SYNSTA_REG32 ((volatile P_U32)(NFIECC_BASE+0x0144)) -+#define ECC_DECNFIDI_REG32 ((volatile P_U32)(NFIECC_BASE+0x0148)) -+#define ECC_SYN0_REG32 ((volatile P_U32)(NFIECC_BASE+0x014C)) -+ -+/******************************************************************************* -+ * ECC register definition -+ *******************************************************************************/ -+/* ECC_ENCON */ -+#define ENC_EN (0x0001) -+#define ENC_DE (0x0000) -+ -+/* ECC_ENCCNFG */ -+#define ECC_CNFG_ECC4 (0x0000) -+#define ECC_CNFG_ECC6 (0x0001) -+#define ECC_CNFG_ECC8 (0x0002) -+#define ECC_CNFG_ECC10 (0x0003) -+#define ECC_CNFG_ECC12 (0x0004) -+#define ECC_CNFG_ECC_MASK (0x00000007) -+ -+#define ENC_CNFG_NFI (0x0010) -+#define ENC_CNFG_MODE_MASK (0x0010) -+ -+#define ENC_CNFG_META6 (0x10300000) -+#define ENC_CNFG_META8 (0x10400000) -+ -+#define ENC_CNFG_MSG_MASK (0x1FFF0000) -+#define ENC_CNFG_MSG_SHIFT (0x10) -+ -+/* ECC_ENCIDLE */ -+#define ENC_IDLE (0x0001) -+ -+/* ECC_ENCSTA */ -+#define STA_FSM (0x001F) -+#define STA_COUNT_PS (0xFF10) -+#define STA_COUNT_MS (0x3FFF0000) -+ -+/* ECC_ENCIRQEN */ -+#define ENC_IRQEN (0x0001) -+ -+/* ECC_ENCIRQSTA */ -+#define ENC_IRQSTA (0x0001) -+ -+/* ECC_DECCON */ -+#define DEC_EN (0x0001) -+#define DEC_DE (0x0000) -+ -+/* ECC_ENCCNFG */ -+#define DEC_CNFG_ECC4 (0x0000) -+//#define DEC_CNFG_ECC6 (0x0001) -+//#define DEC_CNFG_ECC12 (0x0002) -+#define DEC_CNFG_NFI (0x0010) -+//#define DEC_CNFG_META6 (0x10300000) -+//#define DEC_CNFG_META8 (0x10400000) -+ -+#define DEC_CNFG_FER (0x01000) -+#define DEC_CNFG_EL (0x02000) -+#define DEC_CNFG_CORRECT (0x03000) -+#define DEC_CNFG_TYPE_MASK (0x03000) -+ -+#define DEC_CNFG_EMPTY_EN (0x80000000) -+ -+#define DEC_CNFG_CODE_MASK (0x1FFF0000) -+#define DEC_CNFG_CODE_SHIFT (0x10) -+ -+/* ECC_DECIDLE */ -+#define DEC_IDLE (0x0001) -+ -+/* ECC_DECFER */ -+#define DEC_FER0 (0x0001) -+#define DEC_FER1 (0x0002) -+#define DEC_FER2 (0x0004) -+#define DEC_FER3 (0x0008) -+#define DEC_FER4 (0x0010) -+#define DEC_FER5 (0x0020) -+#define DEC_FER6 (0x0040) -+#define DEC_FER7 (0x0080) -+ -+/* ECC_DECENUM */ -+#define ERR_NUM0 (0x0000000F) -+#define ERR_NUM1 (0x000000F0) -+#define ERR_NUM2 (0x00000F00) -+#define ERR_NUM3 (0x0000F000) -+#define ERR_NUM4 (0x000F0000) -+#define ERR_NUM5 (0x00F00000) -+#define ERR_NUM6 (0x0F000000) -+#define ERR_NUM7 (0xF0000000) -+ -+/* ECC_DECDONE */ -+#define DEC_DONE0 (0x0001) -+#define DEC_DONE1 (0x0002) -+#define DEC_DONE2 (0x0004) -+#define DEC_DONE3 (0x0008) -+#define DEC_DONE4 (0x0010) -+#define DEC_DONE5 (0x0020) -+#define DEC_DONE6 (0x0040) -+#define DEC_DONE7 (0x0080) -+ -+/* ECC_DECIRQEN */ -+#define DEC_IRQEN (0x0001) -+ -+/* ECC_DECIRQSTA */ -+#define DEC_IRQSTA (0x0001) -+ -+#define CHIPVER_ECO_1 (0x8a00) -+#define CHIPVER_ECO_2 (0x8a01) -+ -+//#define NAND_PFM -+ -+/******************************************************************************* -+ * Data Structure Definition -+ *******************************************************************************/ -+struct mtk_nand_host -+{ -+ struct nand_chip nand_chip; -+ struct mtd_info mtd; -+ struct mtk_nand_host_hw *hw; -+}; -+ -+struct NAND_CMD -+{ -+ u32 u4ColAddr; -+ u32 u4RowAddr; -+ u32 u4OOBRowAddr; -+ u8 au1OOB[288]; -+ u8* pDataBuf; -+#ifdef NAND_PFM -+ u32 pureReadOOB; -+ u32 pureReadOOBNum; -+#endif -+}; -+ -+/* -+ * ECC layout control structure. Exported to userspace for -+ * diagnosis and to allow creation of raw images -+struct nand_ecclayout { -+ uint32_t eccbytes; -+ uint32_t eccpos[64]; -+ uint32_t oobavail; -+ struct nand_oobfree oobfree[MTD_MAX_OOBFREE_ENTRIES]; -+}; -+*/ -+#define __DEBUG_NAND 1 /* Debug information on/off */ -+ -+/* Debug message event */ -+#define DBG_EVT_NONE 0x00000000 /* No event */ -+#define DBG_EVT_INIT 0x00000001 /* Initial related event */ -+#define DBG_EVT_VERIFY 0x00000002 /* Verify buffer related event */ -+#define DBG_EVT_PERFORMANCE 0x00000004 /* Performance related event */ -+#define DBG_EVT_READ 0x00000008 /* Read related event */ -+#define DBG_EVT_WRITE 0x00000010 /* Write related event */ -+#define DBG_EVT_ERASE 0x00000020 /* Erase related event */ -+#define DBG_EVT_BADBLOCK 0x00000040 /* Badblock related event */ -+#define DBG_EVT_POWERCTL 0x00000080 /* Suspend/Resume related event */ -+ -+#define DBG_EVT_ALL 0xffffffff -+ -+#define DBG_EVT_MASK (DBG_EVT_INIT) -+ -+#if __DEBUG_NAND -+#define MSG(evt, fmt, args...) \ -+do { \ -+ if ((DBG_EVT_##evt) & DBG_EVT_MASK) { \ -+ printk(fmt, ##args); \ -+ } \ -+} while(0) -+ -+#define MSG_FUNC_ENTRY(f) MSG(FUC, ": %s\n", __FUNCTION__) -+#else -+#define MSG(evt, fmt, args...) do{}while(0) -+#define MSG_FUNC_ENTRY(f) do{}while(0) -+#endif -+ -+#define RAMDOM_READ 1<<0 -+#define CACHE_READ 1<<1 -+ -+typedef struct -+{ -+ u16 id; //deviceid+menuid -+ u32 ext_id; -+ u8 addr_cycle; -+ u8 iowidth; -+ u16 totalsize; -+ u16 blocksize; -+ u16 pagesize; -+ u16 sparesize; -+ u32 timmingsetting; -+ char devciename[14]; -+ u32 advancedmode; // -+}flashdev_info,*pflashdev_info; -+ -+/* NAND driver */ -+#if 0 -+struct mtk_nand_host_hw { -+ unsigned int nfi_bus_width; /* NFI_BUS_WIDTH */ -+ unsigned int nfi_access_timing; /* NFI_ACCESS_TIMING */ -+ unsigned int nfi_cs_num; /* NFI_CS_NUM */ -+ unsigned int nand_sec_size; /* NAND_SECTOR_SIZE */ -+ unsigned int nand_sec_shift; /* NAND_SECTOR_SHIFT */ -+ unsigned int nand_ecc_size; -+ unsigned int nand_ecc_bytes; -+ unsigned int nand_ecc_mode; -+}; -+extern struct mtk_nand_host_hw mt7621_nand_hw; -+extern u32 CFG_BLOCKSIZE; -+#endif -+#endif ---- a/drivers/mtd/nand/nand_base.c -+++ b/drivers/mtd/nand/nand_base.c -@@ -90,7 +90,7 @@ static struct nand_ecclayout nand_oob_12 - .length = 78} } - }; - --static int nand_get_device(struct mtd_info *mtd, int new_state); -+int nand_get_device(struct mtd_info *mtd, int new_state); - - static int nand_do_write_oob(struct mtd_info *mtd, loff_t to, - struct mtd_oob_ops *ops); -@@ -128,7 +128,7 @@ static int check_offs_len(struct mtd_inf - * - * Release chip lock and wake up anyone waiting on the device. - */ --static void nand_release_device(struct mtd_info *mtd) -+void nand_release_device(struct mtd_info *mtd) - { - struct nand_chip *chip = mtd->priv; - -@@ -739,7 +739,7 @@ static void panic_nand_get_device(struct - * - * Get the device and lock it for exclusive access - */ --static int -+int - nand_get_device(struct mtd_info *mtd, int new_state) - { - struct nand_chip *chip = mtd->priv; ---- a/drivers/mtd/nand/nand_bbt.c -+++ b/drivers/mtd/nand/nand_bbt.c -@@ -1378,6 +1378,25 @@ int nand_isbad_bbt(struct mtd_info *mtd, - return 1; - } - -+void nand_bbt_set(struct mtd_info *mtd, int page, int flag) -+{ -+ struct nand_chip *this = mtd->priv; -+ int block; -+ -+ block = (int)(page >> (this->bbt_erase_shift - this->page_shift - 1)); -+ this->bbt[block >> 3] &= ~(0x03 << (block & 0x6)); -+ this->bbt[block >> 3] |= (flag & 0x3) << (block & 0x6); -+} -+ -+int nand_bbt_get(struct mtd_info *mtd, int page) -+{ -+ struct nand_chip *this = mtd->priv; -+ int block; -+ -+ block = (int)(page >> (this->bbt_erase_shift - this->page_shift - 1)); -+ return (this->bbt[block >> 3] >> (block & 0x06)) & 0x03; -+} -+ - EXPORT_SYMBOL(nand_scan_bbt); - EXPORT_SYMBOL(nand_default_bbt); - EXPORT_SYMBOL_GPL(nand_update_bbt); ---- /dev/null -+++ b/drivers/mtd/nand/nand_def.h -@@ -0,0 +1,123 @@ -+#ifndef __NAND_DEF_H__ -+#define __NAND_DEF_H__ -+ -+#define VERSION "v2.1 Fix AHB virt2phys error" -+#define MODULE_NAME "# MTK NAND #" -+#define PROCNAME "driver/nand" -+ -+#undef TESTTIME -+//#define __UBOOT_NAND__ 1 -+#define __KERNEL_NAND__ 1 -+//#define __PRELOADER_NAND__ 1 -+//#define PMT 1 -+//#define _MTK_NAND_DUMMY_DRIVER -+//#define CONFIG_BADBLOCK_CHECK 1 -+//#ifdef CONFIG_BADBLOCK_CHECK -+//#define MTK_NAND_BMT 1 -+//#endif -+#define ECC_ENABLE 1 -+#define MANUAL_CORRECT 1 -+//#define __INTERNAL_USE_AHB_MODE__ (0) -+#define SKIP_BAD_BLOCK -+#define FACT_BBT -+ -+#ifndef NAND_OTP_SUPPORT -+#define NAND_OTP_SUPPORT 0 -+#endif -+ -+/******************************************************************************* -+ * Macro definition -+ *******************************************************************************/ -+//#define NFI_SET_REG32(reg, value) (DRV_WriteReg32(reg, DRV_Reg32(reg) | (value))) -+//#define NFI_SET_REG16(reg, value) (DRV_WriteReg16(reg, DRV_Reg16(reg) | (value))) -+//#define NFI_CLN_REG32(reg, value) (DRV_WriteReg32(reg, DRV_Reg32(reg) & (~(value)))) -+//#define NFI_CLN_REG16(reg, value) (DRV_WriteReg16(reg, DRV_Reg16(reg) & (~(value)))) -+ -+#if defined (__KERNEL_NAND__) -+#define NFI_SET_REG32(reg, value) \ -+do { \ -+ g_value = (DRV_Reg32(reg) | (value));\ -+ DRV_WriteReg32(reg, g_value); \ -+} while(0) -+ -+#define NFI_SET_REG16(reg, value) \ -+do { \ -+ g_value = (DRV_Reg16(reg) | (value));\ -+ DRV_WriteReg16(reg, g_value); \ -+} while(0) -+ -+#define NFI_CLN_REG32(reg, value) \ -+do { \ -+ g_value = (DRV_Reg32(reg) & (~(value)));\ -+ DRV_WriteReg32(reg, g_value); \ -+} while(0) -+ -+#define NFI_CLN_REG16(reg, value) \ -+do { \ -+ g_value = (DRV_Reg16(reg) & (~(value)));\ -+ DRV_WriteReg16(reg, g_value); \ -+} while(0) -+#endif -+ -+#define NFI_WAIT_STATE_DONE(state) do{;}while (__raw_readl(NFI_STA_REG32) & state) -+#define NFI_WAIT_TO_READY() do{;}while (!(__raw_readl(NFI_STA_REG32) & STA_BUSY2READY)) -+ -+ -+#define NAND_SECTOR_SIZE (512) -+#define OOB_PER_SECTOR (16) -+#define OOB_AVAI_PER_SECTOR (8) -+ -+#ifndef PART_SIZE_BMTPOOL -+#define BMT_POOL_SIZE (80) -+#else -+#define BMT_POOL_SIZE (PART_SIZE_BMTPOOL) -+#endif -+ -+#define PMT_POOL_SIZE (2) -+ -+#define TIMEOUT_1 0x1fff -+#define TIMEOUT_2 0x8ff -+#define TIMEOUT_3 0xffff -+#define TIMEOUT_4 0xffff//5000 //PIO -+ -+ -+/* temporarity definiation */ -+#if !defined (__KERNEL_NAND__) -+#define KERN_INFO -+#define KERN_WARNING -+#define KERN_ERR -+#define PAGE_SIZE (4096) -+#endif -+#define AddStorageTrace //AddStorageTrace -+#define STORAGE_LOGGER_MSG_NAND 0 -+#define NFI_BASE RALINK_NAND_CTRL_BASE -+#define NFIECC_BASE RALINK_NANDECC_CTRL_BASE -+ -+#ifdef __INTERNAL_USE_AHB_MODE__ -+#define MT65xx_POLARITY_LOW 0 -+#define MT65XX_PDN_PERI_NFI 0 -+#define MT65xx_EDGE_SENSITIVE 0 -+#define MT6575_NFI_IRQ_ID (58) -+#endif -+ -+#if defined (__KERNEL_NAND__) -+#define RALINK_REG(x) (*((volatile u32 *)(x))) -+#define __virt_to_phys(x) virt_to_phys((volatile void*)x) -+#else -+#define CONFIG_MTD_NAND_VERIFY_WRITE (1) -+#define printk printf -+#define ra_dbg printf -+#define BUG() //BUG() -+#define BUG_ON(x) //BUG_ON() -+#define NUM_PARTITIONS 1 -+#endif -+ -+#define NFI_DEFAULT_ACCESS_TIMING (0x30C77fff) //(0x44333) -+ -+//uboot only support 1 cs -+#define NFI_CS_NUM (1) -+#define NFI_DEFAULT_CS (0) -+ -+#include "mt6575_typedefs.h" -+ -+#endif /* __NAND_DEF_H__ */ ---- /dev/null -+++ b/drivers/mtd/nand/nand_device_list.h -@@ -0,0 +1,55 @@ -+/* Copyright Statement: -+ * -+ * This software/firmware and related documentation ("MediaTek Software") are -+ * protected under relevant copyright laws. The information contained herein -+ * is confidential and proprietary to MediaTek Inc. and/or its licensors. -+ * Without the prior written permission of MediaTek inc. and/or its licensors, -+ * any reproduction, modification, use or disclosure of MediaTek Software, -+ * and information contained herein, in whole or in part, shall be strictly prohibited. -+ */ -+/* MediaTek Inc. (C) 2010. All rights reserved. -+ * -+ * BY OPENING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES -+ * THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE") -+ * RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON -+ * AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES, -+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF -+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT. -+ * NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE -+ * SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR -+ * SUPPLIED WITH THE MEDIATEK SOFTWARE, AND RECEIVER AGREES TO LOOK ONLY TO SUCH -+ * THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES -+ * THAT IT IS RECEIVER'S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD PARTY ALL PROPER LICENSES -+ * CONTAINED IN MEDIATEK SOFTWARE. MEDIATEK SHALL ALSO NOT BE RESPONSIBLE FOR ANY MEDIATEK -+ * SOFTWARE RELEASES MADE TO RECEIVER'S SPECIFICATION OR TO CONFORM TO A PARTICULAR -+ * STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND -+ * CUMULATIVE LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE, -+ * AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE, -+ * OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY RECEIVER TO -+ * MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE. -+ * -+ * The following software/firmware and/or related documentation ("MediaTek Software") -+ * have been modified by MediaTek Inc. All revisions are subject to any receiver's -+ * applicable license agreements with MediaTek Inc. -+ */ -+ -+#ifndef __NAND_DEVICE_LIST_H__ -+#define __NAND_DEVICE_LIST_H__ -+ -+static const flashdev_info gen_FlashTable[]={ -+ {0x20BC, 0x105554, 5, 16, 512, 128, 2048, 64, 0x1123, "EHD013151MA_5", 0}, -+ {0xECBC, 0x005554, 5, 16, 512, 128, 2048, 64, 0x1123, "K524G2GACB_A0", 0}, -+ {0x2CBC, 0x905556, 5, 16, 512, 128, 2048, 64, 0x21044333, "MT29C4G96MAZA", 0}, -+ {0xADBC, 0x905554, 5, 16, 512, 128, 2048, 64, 0x10801011, "H9DA4GH4JJAMC", 0}, -+ {0x01F1, 0x801D01, 4, 8, 128, 128, 2048, 64, 0x30C77fff, "S34ML01G100TF", 0}, -+ {0x92F1, 0x8095FF, 4, 8, 128, 128, 2048, 64, 0x30C77fff, "F59L1G81A", 0}, -+ {0xECD3, 0x519558, 5, 8, 1024, 128, 2048, 64, 0x44333, "K9K8G8000", 0}, -+ {0xC2F1, 0x801DC2, 4, 8, 128, 128, 2048, 64, 0x30C77fff, "MX30LF1G08AA", 0}, -+ {0x98D3, 0x902676, 5, 8, 1024, 256, 4096, 224, 0x00C25332, "TC58NVG3S0F", 0}, -+ {0x01DA, 0x909546, 5, 8, 256, 128, 2048, 128, 0x30C77fff, "S34ML02G200TF", 0}, -+ {0x01DC, 0x909556, 5, 8, 512, 128, 2048, 128, 0x30C77fff, "S34ML04G200TF", 0}, -+ {0x0000, 0x000000, 0, 0, 0, 0, 0, 0, 0, "xxxxxxxxxx", 0}, -+}; -+ -+ -+#endif ---- /dev/null -+++ b/drivers/mtd/nand/partition.h -@@ -0,0 +1,115 @@ -+/* Copyright Statement: -+ * -+ * This software/firmware and related documentation ("MediaTek Software") are -+ * protected under relevant copyright laws. The information contained herein -+ * is confidential and proprietary to MediaTek Inc. and/or its licensors. -+ * Without the prior written permission of MediaTek inc. and/or its licensors, -+ * any reproduction, modification, use or disclosure of MediaTek Software, -+ * and information contained herein, in whole or in part, shall be strictly prohibited. -+ */ -+/* MediaTek Inc. (C) 2010. All rights reserved. -+ * -+ * BY OPENING THIS FILE, RECEIVER HEREBY UNEQUIVOCALLY ACKNOWLEDGES AND AGREES -+ * THAT THE SOFTWARE/FIRMWARE AND ITS DOCUMENTATIONS ("MEDIATEK SOFTWARE") -+ * RECEIVED FROM MEDIATEK AND/OR ITS REPRESENTATIVES ARE PROVIDED TO RECEIVER ON -+ * AN "AS-IS" BASIS ONLY. MEDIATEK EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES, -+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF -+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NONINFRINGEMENT. -+ * NEITHER DOES MEDIATEK PROVIDE ANY WARRANTY WHATSOEVER WITH RESPECT TO THE -+ * SOFTWARE OF ANY THIRD PARTY WHICH MAY BE USED BY, INCORPORATED IN, OR -+ * SUPPLIED WITH THE MEDIATEK SOFTWARE, AND RECEIVER AGREES TO LOOK ONLY TO SUCH -+ * THIRD PARTY FOR ANY WARRANTY CLAIM RELATING THERETO. RECEIVER EXPRESSLY ACKNOWLEDGES -+ * THAT IT IS RECEIVER'S SOLE RESPONSIBILITY TO OBTAIN FROM ANY THIRD PARTY ALL PROPER LICENSES -+ * CONTAINED IN MEDIATEK SOFTWARE. MEDIATEK SHALL ALSO NOT BE RESPONSIBLE FOR ANY MEDIATEK -+ * SOFTWARE RELEASES MADE TO RECEIVER'S SPECIFICATION OR TO CONFORM TO A PARTICULAR -+ * STANDARD OR OPEN FORUM. RECEIVER'S SOLE AND EXCLUSIVE REMEDY AND MEDIATEK'S ENTIRE AND -+ * CUMULATIVE LIABILITY WITH RESPECT TO THE MEDIATEK SOFTWARE RELEASED HEREUNDER WILL BE, -+ * AT MEDIATEK'S OPTION, TO REVISE OR REPLACE THE MEDIATEK SOFTWARE AT ISSUE, -+ * OR REFUND ANY SOFTWARE LICENSE FEES OR SERVICE CHARGE PAID BY RECEIVER TO -+ * MEDIATEK FOR SUCH MEDIATEK SOFTWARE AT ISSUE. -+ * -+ * The following software/firmware and/or related documentation ("MediaTek Software") -+ * have been modified by MediaTek Inc. All revisions are subject to any receiver's -+ * applicable license agreements with MediaTek Inc. -+ */ -+ -+#include -+#include -+#include -+ -+#define RECONFIG_PARTITION_SIZE 1 -+ -+#define MTD_BOOT_PART_SIZE 0x80000 -+#define MTD_CONFIG_PART_SIZE 0x20000 -+#define MTD_FACTORY_PART_SIZE 0x20000 -+ -+extern unsigned int CFG_BLOCKSIZE; -+#define LARGE_MTD_BOOT_PART_SIZE (CFG_BLOCKSIZE<<2) -+#define LARGE_MTD_CONFIG_PART_SIZE (CFG_BLOCKSIZE<<2) -+#define LARGE_MTD_FACTORY_PART_SIZE (CFG_BLOCKSIZE<<1) -+ -+/*=======================================================================*/ -+/* NAND PARTITION Mapping */ -+/*=======================================================================*/ -+//#ifdef CONFIG_MTD_PARTITIONS -+static struct mtd_partition g_pasStatic_Partition[] = { -+ { -+ name: "ALL", -+ size: MTDPART_SIZ_FULL, -+ offset: 0, -+ }, -+ /* Put your own partition definitions here */ -+ { -+ name: "Bootloader", -+ size: MTD_BOOT_PART_SIZE, -+ offset: 0, -+ }, { -+ name: "Config", -+ size: MTD_CONFIG_PART_SIZE, -+ offset: MTDPART_OFS_APPEND -+ }, { -+ name: "Factory", -+ size: MTD_FACTORY_PART_SIZE, -+ offset: MTDPART_OFS_APPEND -+#ifdef CONFIG_RT2880_ROOTFS_IN_FLASH -+ }, { -+ name: "Kernel", -+ size: MTD_KERN_PART_SIZE, -+ offset: MTDPART_OFS_APPEND, -+ }, { -+ name: "RootFS", -+ size: MTD_ROOTFS_PART_SIZE, -+ offset: MTDPART_OFS_APPEND, -+#ifdef CONFIG_ROOTFS_IN_FLASH_NO_PADDING -+ }, { -+ name: "Kernel_RootFS", -+ size: MTD_KERN_PART_SIZE + MTD_ROOTFS_PART_SIZE, -+ offset: MTD_BOOT_PART_SIZE + MTD_CONFIG_PART_SIZE + MTD_FACTORY_PART_SIZE, -+#endif -+#else //CONFIG_RT2880_ROOTFS_IN_RAM -+ }, { -+ name: "Kernel", -+ size: 0x10000, -+ offset: MTDPART_OFS_APPEND, -+#endif -+#ifdef CONFIG_DUAL_IMAGE -+ }, { -+ name: "Kernel2", -+ size: MTD_KERN2_PART_SIZE, -+ offset: MTD_KERN2_PART_OFFSET, -+#ifdef CONFIG_RT2880_ROOTFS_IN_FLASH -+ }, { -+ name: "RootFS2", -+ size: MTD_ROOTFS2_PART_SIZE, -+ offset: MTD_ROOTFS2_PART_OFFSET, -+#endif -+#endif -+ } -+ -+}; -+ -+#define NUM_PARTITIONS ARRAY_SIZE(g_pasStatic_Partition) -+extern int part_num; // = NUM_PARTITIONS; -+//#endif -+#undef RECONFIG_PARTITION_SIZE -+ diff --git a/target/linux/ramips/patches-3.10/0508-MIPS-GIC-Fix-gic_set_affinity-infinite-loop.patch b/target/linux/ramips/patches-3.10/0508-MIPS-GIC-Fix-gic_set_affinity-infinite-loop.patch deleted file mode 100644 index c089e5aa7f..0000000000 --- a/target/linux/ramips/patches-3.10/0508-MIPS-GIC-Fix-gic_set_affinity-infinite-loop.patch +++ /dev/null @@ -1,44 +0,0 @@ -From 553ddf4f3f20c28ab03f87ac8c3cde5edf714675 Mon Sep 17 00:00:00 2001 -From: Tony Wu -Date: Fri, 21 Jun 2013 10:13:08 +0000 -Subject: [PATCH 022/105] MIPS: GIC: Fix gic_set_affinity infinite loop - -There is an infinite loop in gic_set_affinity. When irq_set_affinity -gets called on gic controller, it blocks forever. - -Signed-off-by: Tony Wu -Cc: Steven J. Hill -Cc: linux-mips@linux-mips.org -Patchwork: https://patchwork.linux-mips.org/patch/5537/ -Signed-off-by: Ralf Baechle -(cherry picked from commit c214c03512b67e56dea3f4471705f8caae49553a) ---- - arch/mips/kernel/irq-gic.c | 15 +++++++-------- - 1 file changed, 7 insertions(+), 8 deletions(-) - ---- a/arch/mips/kernel/irq-gic.c -+++ b/arch/mips/kernel/irq-gic.c -@@ -219,16 +219,15 @@ static int gic_set_affinity(struct irq_d - - /* Assumption : cpumask refers to a single CPU */ - spin_lock_irqsave(&gic_lock, flags); -- for (;;) { -- /* Re-route this IRQ */ -- GIC_SH_MAP_TO_VPE_SMASK(irq, first_cpu(tmp)); - -- /* Update the pcpu_masks */ -- for (i = 0; i < NR_CPUS; i++) -- clear_bit(irq, pcpu_masks[i].pcpu_mask); -- set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask); -+ /* Re-route this IRQ */ -+ GIC_SH_MAP_TO_VPE_SMASK(irq, first_cpu(tmp)); -+ -+ /* Update the pcpu_masks */ -+ for (i = 0; i < NR_CPUS; i++) -+ clear_bit(irq, pcpu_masks[i].pcpu_mask); -+ set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask); - -- } - cpumask_copy(d->affinity, cpumask); - spin_unlock_irqrestore(&gic_lock, flags); - diff --git a/target/linux/ramips/patches-3.10/0509-MIPS-Kconfig-CMP-support-needs-to-select-SMP-as-well.patch b/target/linux/ramips/patches-3.10/0509-MIPS-Kconfig-CMP-support-needs-to-select-SMP-as-well.patch deleted file mode 100644 index 1e6afb21de..0000000000 --- a/target/linux/ramips/patches-3.10/0509-MIPS-Kconfig-CMP-support-needs-to-select-SMP-as-well.patch +++ /dev/null @@ -1,52 +0,0 @@ -From 184edf882ebb7885b49fa231a503205da94e78f0 Mon Sep 17 00:00:00 2001 -From: Markos Chandras -Date: Wed, 2 Oct 2013 12:40:26 -0500 -Subject: [PATCH 065/105] MIPS: Kconfig: CMP support needs to select SMP as - well - -The CMP code is only designed to work with SMP configurations. -Fixes multiple build problems on certain randconfigs: - -In file included from arch/mips/kernel/smp-cmp.c:34:0: -arch/mips/include/asm/smp.h:28:0: -error: "raw_smp_processor_id" redefined [-Werror] - -In file included from include/linux/sched.h:30:0, -from arch/mips/kernel/smp-cmp.c:22: -include/linux/smp.h:135:0: note: this is the location of the -previous definition - -In file included from arch/mips/kernel/smp-cmp.c:34:0: -arch/mips/include/asm/smp.h:57:20: -error: redefinition of 'smp_send_reschedule' - -In file included from include/linux/sched.h:30:0, -from arch/mips/kernel/smp-cmp.c:22: -include/linux/smp.h:179:20: note: previous -definition of 'smp_send_reschedule' was here - -In file included from arch/mips/kernel/smp-cmp.c:34:0: -arch/mips/include/asm/smp.h: In function 'smp_send_reschedule': -arch/mips/include/asm/smp.h:61:8: -error: dereferencing pointer to incomplete type -[...] - -Signed-off-by: Markos Chandras -Cc: linux-mips@linux-mips.org -Cc: Markos Chandras -Patchwork: https://patchwork.linux-mips.org/patch/5812/ -Signed-off-by: Ralf Baechle ---- - arch/mips/Kconfig | 1 + - 1 file changed, 1 insertion(+) - ---- a/arch/mips/Kconfig -+++ b/arch/mips/Kconfig -@@ -2038,6 +2038,7 @@ config MIPS_VPE_APSP_API - config MIPS_CMP - bool "MIPS CMP framework support" - depends on SYS_SUPPORTS_MIPS_CMP -+ select SMP - select SYNC_R4K - select SYS_SUPPORTS_SMP - select SYS_SUPPORTS_SCHED_SMT if SMP diff --git a/target/linux/ramips/patches-3.10/0510-MIPS-Fix-SMP-core-calculations-when-using-MT-support.patch b/target/linux/ramips/patches-3.10/0510-MIPS-Fix-SMP-core-calculations-when-using-MT-support.patch deleted file mode 100644 index e297876ab0..0000000000 --- a/target/linux/ramips/patches-3.10/0510-MIPS-Fix-SMP-core-calculations-when-using-MT-support.patch +++ /dev/null @@ -1,52 +0,0 @@ -From c4d621e75e865fa5374946515ad0c5e060b9c446 Mon Sep 17 00:00:00 2001 -From: Leonid Yegoshin -Date: Wed, 11 Sep 2013 14:17:47 -0500 -Subject: [PATCH 056/105] MIPS: Fix SMP core calculations when using MT - support. - -The TCBIND register is only available if the core has MT support. It -should not be read otherwise. Secondly, the number of TCs (siblings) -are calculated differently depending on if the kernel is configured -as SMVP or SMTC. - -Signed-off-by: Leonid Yegoshin -Signed-off-by: Steven J. Hill -Cc: linux-mips@linux-mips.org -Patchwork: https://patchwork.linux-mips.org/patch/5822/ -Signed-off-by: Ralf Baechle -(cherry picked from commit 670bac3a8c201fc1f5f92ac6b4a8b42dc8172937) ---- - arch/mips/kernel/smp-cmp.c | 13 +++++++++++-- - 1 file changed, 11 insertions(+), 2 deletions(-) - ---- a/arch/mips/kernel/smp-cmp.c -+++ b/arch/mips/kernel/smp-cmp.c -@@ -99,7 +99,9 @@ static void cmp_init_secondary(void) - - c->core = (read_c0_ebase() >> 1) & 0x1ff; - #if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC) -- c->vpe_id = (read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & TCBIND_CURVPE; -+ if (cpu_has_mipsmt) -+ c->vpe_id = (read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & -+ TCBIND_CURVPE; - #endif - #ifdef CONFIG_MIPS_MT_SMTC - c->tc_id = (read_c0_tcbind() & TCBIND_CURTC) >> TCBIND_CURTC_SHIFT; -@@ -177,9 +179,16 @@ void __init cmp_smp_setup(void) - } - - if (cpu_has_mipsmt) { -- unsigned int nvpe, mvpconf0 = read_c0_mvpconf0(); -+ unsigned int nvpe = 1; -+#ifdef CONFIG_MIPS_MT_SMP -+ unsigned int mvpconf0 = read_c0_mvpconf0(); -+ -+ nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1; -+#elif defined(CONFIG_MIPS_MT_SMTC) -+ unsigned int mvpconf0 = read_c0_mvpconf0(); - - nvpe = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1; -+#endif - smp_num_siblings = nvpe; - } - pr_info("Detected %i available secondary CPU(s)\n", ncpu); diff --git a/target/linux/ramips/patches-3.10/0511-MIPS-GIC-Send-IPIs-using-the-GIC.patch b/target/linux/ramips/patches-3.10/0511-MIPS-GIC-Send-IPIs-using-the-GIC.patch deleted file mode 100644 index 366fdb2aff..0000000000 --- a/target/linux/ramips/patches-3.10/0511-MIPS-GIC-Send-IPIs-using-the-GIC.patch +++ /dev/null @@ -1,101 +0,0 @@ -From 43334f8438704001deb258b6e7223699bd336c77 Mon Sep 17 00:00:00 2001 -From: "Steven J. Hill" -Date: Wed, 25 Sep 2013 14:58:19 -0500 -Subject: [PATCH 093/105] MIPS: GIC: Send IPIs using the GIC. - -If a GIC present, then use it to send IPIs between the cores. - -Signed-off-by: Steven J. Hill ---- - arch/mips/kernel/smp-mt.c | 32 ++++++++++++++++++++++++++++++++ - 1 file changed, 32 insertions(+) - ---- a/arch/mips/kernel/smp-mt.c -+++ b/arch/mips/kernel/smp-mt.c -@@ -71,6 +71,7 @@ static unsigned int __init smvp_vpe_init - - /* Record this as available CPU */ - set_cpu_possible(tc, true); -+ set_cpu_present(tc, true); - __cpu_number_map[tc] = ++ncpu; - __cpu_logical_map[ncpu] = tc; - } -@@ -112,12 +113,35 @@ static void __init smvp_tc_init(unsigned - write_tc_c0_tchalt(TCHALT_H); - } - -+static void mp_send_ipi_single(int cpu, unsigned int action) -+{ -+ unsigned long flags; -+ -+ local_irq_save(flags); -+ -+ switch (action) { -+ case SMP_CALL_FUNCTION: -+ gic_send_ipi(plat_ipi_call_int_xlate(cpu)); -+ break; -+ -+ case SMP_RESCHEDULE_YOURSELF: -+ gic_send_ipi(plat_ipi_resched_int_xlate(cpu)); -+ break; -+ } -+ -+ local_irq_restore(flags); -+} -+ - static void vsmp_send_ipi_single(int cpu, unsigned int action) - { - int i; - unsigned long flags; - int vpflags; - -+ if (gic_present) { -+ mp_send_ipi_single(cpu, action); -+ return; -+ } - local_irq_save(flags); - - vpflags = dvpe(); /* can't access the other CPU's registers whilst MVPE enabled */ -@@ -164,6 +188,8 @@ static void __cpuinit vsmp_init_secondar - - static void __cpuinit vsmp_smp_finish(void) - { -+ pr_debug("SMPMT: CPU%d: vsmp_smp_finish\n", smp_processor_id()); -+ - /* CDFIXME: remove this? */ - write_c0_compare(read_c0_count() + (8* mips_hpt_frequency/HZ)); - -@@ -178,6 +204,7 @@ static void __cpuinit vsmp_smp_finish(vo - - static void vsmp_cpus_done(void) - { -+ pr_debug("SMPMT: CPU%d: vsmp_cpus_done\n", smp_processor_id()); - } - - /* -@@ -191,6 +218,8 @@ static void vsmp_cpus_done(void) - static void __cpuinit vsmp_boot_secondary(int cpu, struct task_struct *idle) - { - struct thread_info *gp = task_thread_info(idle); -+ pr_debug("SMPMT: CPU%d: vsmp_boot_secondary cpu %d\n", -+ smp_processor_id(), cpu); - dvpe(); - set_c0_mvpcontrol(MVPCONTROL_VPC); - -@@ -232,6 +261,7 @@ static void __init vsmp_smp_setup(void) - unsigned int mvpconf0, ntc, tc, ncpu = 0; - unsigned int nvpe; - -+ pr_debug("SMPMT: CPU%d: vsmp_smp_setup\n", smp_processor_id()); - #ifdef CONFIG_MIPS_MT_FPAFF - /* If we have an FPU, enroll ourselves in the FPU-full mask */ - if (cpu_has_fpu) -@@ -272,6 +302,8 @@ static void __init vsmp_smp_setup(void) - - static void __init vsmp_prepare_cpus(unsigned int max_cpus) - { -+ pr_debug("SMPMT: CPU%d: vsmp_prepare_cpus %d\n", -+ smp_processor_id(), max_cpus); - mips_mt_set_cpuoptions(); - } - diff --git a/target/linux/ramips/patches-3.10/0512-USB-add-xhci-support-for-mt7621.patch b/target/linux/ramips/patches-3.10/0512-USB-add-xhci-support-for-mt7621.patch deleted file mode 100644 index 7cc81a2a6d..0000000000 --- a/target/linux/ramips/patches-3.10/0512-USB-add-xhci-support-for-mt7621.patch +++ /dev/null @@ -1,840 +0,0 @@ ---- a/drivers/usb/core/hub.c -+++ b/drivers/usb/core/hub.c -@@ -1254,7 +1254,7 @@ static void hub_quiesce(struct usb_hub * - if (type != HUB_SUSPEND) { - /* Disconnect all the children */ - for (i = 0; i < hdev->maxchild; ++i) { -- if (hub->ports[i]->child) -+ if (hub->ports[i] && hub->ports[i]->child) - usb_disconnect(&hub->ports[i]->child); - } - } ---- a/drivers/usb/core/port.c -+++ b/drivers/usb/core/port.c -@@ -193,6 +193,7 @@ exit: - void usb_hub_remove_port_device(struct usb_hub *hub, - int port1) - { -- device_unregister(&hub->ports[port1 - 1]->dev); -+ if (hub->ports[port1 - 1]) -+ device_unregister(&hub->ports[port1 - 1]->dev); - } - ---- a/drivers/usb/host/Kconfig -+++ b/drivers/usb/host/Kconfig -@@ -28,7 +28,11 @@ config USB_XHCI_HCD - if USB_XHCI_HCD - - config USB_XHCI_PLATFORM -- tristate -+ bool "xHCI platform" -+ -+config USB_MT7621_XHCI_PLATFORM -+ bool "MTK MT7621 xHCI" -+ depends on USB_XHCI_PLATFORM - - config USB_XHCI_HCD_DEBUGGING - bool "Debugging for the xHCI host controller" ---- a/drivers/usb/host/Makefile -+++ b/drivers/usb/host/Makefile -@@ -13,15 +13,23 @@ fhci-$(CONFIG_FHCI_DEBUG) += fhci-dbg.o - - xhci-hcd-y := xhci.o xhci-mem.o - xhci-hcd-y += xhci-ring.o xhci-hub.o xhci-dbg.o -+ifndef CONFIG_USB_MT7621_XHCI_PLATFORM - xhci-hcd-$(CONFIG_PCI) += xhci-pci.o -+endif -+ -+ifdef CONFIG_USB_MT7621_XHCI_PLATFORM -+xhci-hcd-y += mtk-phy.o xhci-mtk-scheduler.o xhci-mtk-power.o xhci-mtk.o mtk-phy-7621.o mtk-phy-ahb.o -+endif - - ifneq ($(CONFIG_USB_XHCI_PLATFORM), ) -- xhci-hcd-y += xhci-plat.o -+xhci-hcd-y += xhci-plat.o - endif - - obj-$(CONFIG_USB_WHCI_HCD) += whci/ - -+ifndef CONFIG_USB_MT7621_XHCI_PLATFORM - obj-$(CONFIG_PCI) += pci-quirks.o -+endif - - obj-$(CONFIG_USB_EHCI_HCD) += ehci-hcd.o - obj-$(CONFIG_USB_EHCI_PCI) += ehci-pci.o ---- a/drivers/usb/host/pci-quirks.h -+++ b/drivers/usb/host/pci-quirks.h -@@ -1,7 +1,7 @@ - #ifndef __LINUX_USB_PCI_QUIRKS_H - #define __LINUX_USB_PCI_QUIRKS_H - --#ifdef CONFIG_PCI -+#if defined (CONFIG_PCI) && !defined (CONFIG_USB_MT7621_XHCI_PLATFORM) - void uhci_reset_hc(struct pci_dev *pdev, unsigned long base); - int uhci_check_and_reset_hc(struct pci_dev *pdev, unsigned long base); - #endif /* CONFIG_PCI */ ---- a/drivers/usb/host/xhci.c -+++ b/drivers/usb/host/xhci.c -@@ -30,6 +30,16 @@ - - #include "xhci.h" - -+#if defined (CONFIG_USB_MT7621_XHCI_PLATFORM) -+#include -+#include -+#include -+#include "mtk-phy.h" -+#include "xhci-mtk-scheduler.h" -+#include "xhci-mtk-power.h" -+#include "xhci-mtk.h" -+#endif -+ - #define DRIVER_AUTHOR "Sarah Sharp" - #define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver" - -@@ -38,6 +48,18 @@ static int link_quirk; - module_param(link_quirk, int, S_IRUGO | S_IWUSR); - MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB"); - -+#if defined (CONFIG_USB_MT7621_XHCI_PLATFORM) -+long xhci_mtk_test_unlock_ioctl(struct file *file, unsigned int cmd, unsigned long arg); -+static struct file_operations xhci_mtk_test_fops = { -+ .owner = THIS_MODULE, -+ .read = xhci_mtk_test_read, -+ .write = xhci_mtk_test_write, -+ .unlocked_ioctl = xhci_mtk_test_unlock_ioctl, -+ .open = xhci_mtk_test_open, -+ .release = xhci_mtk_test_release, -+}; -+#endif -+ - /* TODO: copied from ehci-hcd.c - can this be refactored? */ - /* - * xhci_handshake - spin reading hc until handshake completes or fails -@@ -189,7 +211,7 @@ int xhci_reset(struct xhci_hcd *xhci) - return ret; - } - --#ifdef CONFIG_PCI -+#if defined (CONFIG_PCI) && !defined (CONFIG_USB_MT7621_XHCI_PLATFORM) - static int xhci_free_msi(struct xhci_hcd *xhci) - { - int i; -@@ -389,6 +411,7 @@ static int xhci_try_enable_msi(struct us - return ret; - } - hcd->irq = pdev->irq; -+ - return 0; - } - -@@ -430,6 +453,11 @@ static void compliance_mode_recovery(uns - xhci_dbg(xhci, "Attempting compliance mode recovery\n"); - hcd = xhci->shared_hcd; - -+#if defined (CONFIG_USB_MT7621_XHCI_PLATFORM) -+ temp |= (1 << 31); -+ xhci_writel(xhci, temp, xhci->usb3_ports[i]); -+#endif -+ - if (hcd->state == HC_STATE_SUSPENDED) - usb_hcd_resume_root_hub(hcd); - -@@ -478,6 +506,9 @@ bool xhci_compliance_mode_recovery_timer - { - const char *dmi_product_name, *dmi_sys_vendor; - -+#if defined (CONFIG_USB_MT7621_XHCI_PLATFORM) -+ return true; -+#endif - dmi_product_name = dmi_get_system_info(DMI_PRODUCT_NAME); - dmi_sys_vendor = dmi_get_system_info(DMI_SYS_VENDOR); - if (!dmi_product_name || !dmi_sys_vendor) -@@ -521,6 +552,10 @@ int xhci_init(struct usb_hcd *hcd) - } else { - xhci_dbg(xhci, "xHCI doesn't need link TRB QUIRK\n"); - } -+ -+#if defined (CONFIG_USB_MT7621_XHCI_PLATFORM) -+ mtk_xhci_scheduler_init(); -+#endif - retval = xhci_mem_init(xhci, GFP_KERNEL); - xhci_dbg(xhci, "Finished xhci_init\n"); - -@@ -664,7 +699,11 @@ int xhci_run(struct usb_hcd *hcd) - xhci_dbg(xhci, "// Set the interrupt modulation register\n"); - temp = xhci_readl(xhci, &xhci->ir_set->irq_control); - temp &= ~ER_IRQ_INTERVAL_MASK; -+#if defined (CONFIG_USB_MT7621_XHCI_PLATFORM) -+ temp |= (u32) 16; -+#else - temp |= (u32) 160; -+#endif - xhci_writel(xhci, temp, &xhci->ir_set->irq_control); - - /* Set the HCD state before we enable the irqs */ -@@ -685,6 +724,9 @@ int xhci_run(struct usb_hcd *hcd) - xhci_queue_vendor_command(xhci, 0, 0, 0, - TRB_TYPE(TRB_NEC_GET_FW)); - -+#if defined (CONFIG_USB_MT7621_XHCI_PLATFORM) -+ enableXhciAllPortPower(xhci); -+#endif - xhci_dbg(xhci, "Finished xhci_run for USB2 roothub\n"); - return 0; - } -@@ -1002,7 +1044,6 @@ int xhci_resume(struct xhci_hcd *xhci, b - - /* If restore operation fails, re-initialize the HC during resume */ - if ((temp & STS_SRE) || hibernated) { -- - if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && - !(xhci_all_ports_seen_u0(xhci))) { - del_timer_sync(&xhci->comp_mode_recovery_timer); -@@ -1586,6 +1627,13 @@ int xhci_drop_endpoint(struct usb_hcd *h - u32 drop_flag; - u32 new_add_flags, new_drop_flags, new_slot_info; - int ret; -+#if defined (CONFIG_USB_MT7621_XHCI_PLATFORM) -+#if MTK_SCH_NEW -+ struct sch_ep *sch_ep = NULL; -+ int isTT; -+ int ep_type; -+#endif -+#endif - - ret = xhci_check_args(hcd, udev, ep, 1, true, __func__); - if (ret <= 0) -@@ -1637,6 +1685,40 @@ int xhci_drop_endpoint(struct usb_hcd *h - - xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep); - -+#if defined (CONFIG_USB_MT7621_XHCI_PLATFORM) -+#if MTK_SCH_NEW -+ slot_ctx = xhci_get_slot_ctx(xhci, xhci->devs[udev->slot_id]->out_ctx); -+ if ((slot_ctx->tt_info & 0xff) > 0) { -+ isTT = 1; -+ } -+ else { -+ isTT = 0; -+ } -+ if (usb_endpoint_xfer_int(&ep->desc)) { -+ ep_type = USB_EP_INT; -+ } -+ else if (usb_endpoint_xfer_isoc(&ep->desc)) { -+ ep_type = USB_EP_ISOC; -+ } -+ else if (usb_endpoint_xfer_bulk(&ep->desc)) { -+ ep_type = USB_EP_BULK; -+ } -+ else -+ ep_type = USB_EP_CONTROL; -+ -+ sch_ep = mtk_xhci_scheduler_remove_ep(udev->speed, usb_endpoint_dir_in(&ep->desc) -+ , isTT, ep_type, (mtk_u32 *)ep); -+ if (sch_ep != NULL) { -+ kfree(sch_ep); -+ } -+ else { -+ xhci_dbg(xhci, "[MTK]Doesn't find ep_sch instance when removing endpoint\n"); -+ } -+#else -+ mtk_xhci_scheduler_remove_ep(xhci, udev, ep); -+#endif -+#endif -+ - xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n", - (unsigned int) ep->desc.bEndpointAddress, - udev->slot_id, -@@ -1672,6 +1754,18 @@ int xhci_add_endpoint(struct usb_hcd *hc - u32 new_add_flags, new_drop_flags, new_slot_info; - struct xhci_virt_device *virt_dev; - int ret = 0; -+#if defined (CONFIG_USB_MT7621_XHCI_PLATFORM) -+ struct xhci_ep_ctx *in_ep_ctx; -+#if MTK_SCH_NEW -+ struct sch_ep *sch_ep; -+ int isTT; -+ int ep_type; -+ int maxp = 0; -+ int burst = 0; -+ int mult = 0; -+ int interval; -+#endif -+#endif - - ret = xhci_check_args(hcd, udev, ep, 1, true, __func__); - if (ret <= 0) { -@@ -1734,6 +1828,56 @@ int xhci_add_endpoint(struct usb_hcd *hc - return -ENOMEM; - } - -+#if defined (CONFIG_USB_MT7621_XHCI_PLATFORM) -+ in_ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index); -+#if MTK_SCH_NEW -+ slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); -+ if ((slot_ctx->tt_info & 0xff) > 0) { -+ isTT = 1; -+ } -+ else { -+ isTT = 0; -+ } -+ if (usb_endpoint_xfer_int(&ep->desc)) { -+ ep_type = USB_EP_INT; -+ } -+ else if (usb_endpoint_xfer_isoc(&ep->desc)) { -+ ep_type = USB_EP_ISOC; -+ } -+ else if (usb_endpoint_xfer_bulk(&ep->desc)) { -+ ep_type = USB_EP_BULK; -+ } -+ else -+ ep_type = USB_EP_CONTROL; -+ -+ if (udev->speed == USB_SPEED_FULL || udev->speed == USB_SPEED_HIGH -+ || udev->speed == USB_SPEED_LOW) { -+ maxp = ep->desc.wMaxPacketSize & 0x7FF; -+ burst = ep->desc.wMaxPacketSize >> 11; -+ mult = 0; -+ } -+ else if (udev->speed == USB_SPEED_SUPER) { -+ maxp = ep->desc.wMaxPacketSize & 0x7FF; -+ burst = ep->ss_ep_comp.bMaxBurst; -+ mult = ep->ss_ep_comp.bmAttributes & 0x3; -+ } -+ interval = (1 << ((in_ep_ctx->ep_info >> 16) & 0xff)); -+ sch_ep = kmalloc(sizeof(struct sch_ep), GFP_KERNEL); -+ if (mtk_xhci_scheduler_add_ep(udev->speed, usb_endpoint_dir_in(&ep->desc), -+ isTT, ep_type, maxp, interval, burst, mult, (mtk_u32 *)ep -+ , (mtk_u32 *)in_ep_ctx, sch_ep) != SCH_SUCCESS) { -+ xhci_err(xhci, "[MTK] not enough bandwidth\n"); -+ -+ return -ENOSPC; -+ } -+#else -+ if (mtk_xhci_scheduler_add_ep(xhci, udev, ep, in_ep_ctx) != SCH_SUCCESS) { -+ xhci_err(xhci, "[MTK] not enough bandwidth\n"); -+ -+ return -ENOSPC; -+ } -+#endif -+#endif - ctrl_ctx->add_flags |= cpu_to_le32(added_ctxs); - new_add_flags = le32_to_cpu(ctrl_ctx->add_flags); - -@@ -2697,7 +2841,7 @@ int xhci_check_bandwidth(struct usb_hcd - if (ctrl_ctx->add_flags == cpu_to_le32(SLOT_FLAG) && - ctrl_ctx->drop_flags == 0) - return 0; -- -+ - xhci_dbg(xhci, "New Input Control Context:\n"); - slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); - xhci_dbg_ctx(xhci, virt_dev->in_ctx, -@@ -4233,10 +4377,14 @@ static u16 xhci_call_host_update_timeout - u16 *timeout) - { - if (state == USB3_LPM_U1) { -+#if !defined (CONFIG_USB_MT7621_XHCI_PLATFORM) - if (xhci->quirks & XHCI_INTEL_HOST) -+#endif - return xhci_calculate_intel_u1_timeout(udev, desc); - } else { -+#if !defined (CONFIG_USB_MT7621_XHCI_PLATFORM) - if (xhci->quirks & XHCI_INTEL_HOST) -+#endif - return xhci_calculate_intel_u2_timeout(udev, desc); - } - -@@ -4662,7 +4810,9 @@ int xhci_gen_setup(struct usb_hcd *hcd, - /* Accept arbitrarily long scatter-gather lists */ - hcd->self.sg_tablesize = ~0; - /* XHCI controllers don't stop the ep queue on short packets :| */ -+#if !defined (CONFIG_USB_MT7621_XHCI_PLATFORM) - hcd->self.no_stop_on_short = 1; -+#endif - - if (usb_hcd_is_primary_hcd(hcd)) { - xhci = kzalloc(sizeof(struct xhci_hcd), GFP_KERNEL); -@@ -4731,6 +4881,10 @@ int xhci_gen_setup(struct usb_hcd *hcd, - goto error; - xhci_dbg(xhci, "Reset complete\n"); - -+#if defined (CONFIG_USB_MT7621_XHCI_PLATFORM) -+ setInitialReg(); -+#endif -+ - temp = xhci_readl(xhci, &xhci->cap_regs->hcc_params); - if (HCC_64BIT_ADDR(temp)) { - xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n"); -@@ -4755,8 +4909,21 @@ MODULE_DESCRIPTION(DRIVER_DESC); - MODULE_AUTHOR(DRIVER_AUTHOR); - MODULE_LICENSE("GPL"); - -+#if defined (CONFIG_USB_MT7621_XHCI_PLATFORM) -+static struct platform_device xhci_platform_dev = { -+ .name = "xhci-hcd", -+ .id = -1, -+ .dev = { -+ .coherent_dma_mask = 0xffffffff, -+ }, -+}; -+#endif -+ - static int __init xhci_hcd_init(void) - { -+#if defined (CONFIG_USB_MT7621_XHCI_PLATFORM) -+ struct platform_device *pPlatformDev; -+#endif - int retval; - - retval = xhci_register_pci(); -@@ -4769,6 +4936,33 @@ static int __init xhci_hcd_init(void) - printk(KERN_DEBUG "Problem registering platform driver."); - goto unreg_pci; - } -+ -+#if defined (CONFIG_USB_MT7621_XHCI_PLATFORM) -+ retval = register_chrdev(XHCI_MTK_TEST_MAJOR, DEVICE_NAME, &xhci_mtk_test_fops); -+ -+ u3phy_init(); -+ if (u3phy_ops->u2_slew_rate_calibration) { -+ u3phy_ops->u2_slew_rate_calibration(u3phy); -+ u3phy_ops->u2_slew_rate_calibration(u3phy_p1); -+ } -+ else{ -+ printk(KERN_ERR "WARN: PHY doesn't implement u2 slew rate calibration function\n"); -+ } -+ u3phy_ops->init(u3phy); -+ reinitIP(); -+ -+ pPlatformDev = &xhci_platform_dev; -+ memset(pPlatformDev, 0, sizeof(struct platform_device)); -+ pPlatformDev->name = "xhci-hcd"; -+ pPlatformDev->id = -1; -+ pPlatformDev->dev.coherent_dma_mask = 0xffffffff; -+ pPlatformDev->dev.dma_mask = &pPlatformDev->dev.coherent_dma_mask; -+ -+ retval = platform_device_register(&xhci_platform_dev); -+ if (retval < 0) -+ xhci_unregister_plat(); -+#endif -+ - /* - * Check the compiler generated sizes of structures that must be laid - * out in specific ways for hardware access. -@@ -4786,6 +4980,7 @@ static int __init xhci_hcd_init(void) - BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8); - /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */ - BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8); -+ - return 0; - unreg_pci: - xhci_unregister_pci(); ---- a/drivers/usb/host/xhci-dbg.c -+++ b/drivers/usb/host/xhci-dbg.c -@@ -21,6 +21,9 @@ - */ - - #include "xhci.h" -+#if defined (CONFIG_USB_MT7621_XHCI_PLATFORM) -+#include "xhci-mtk.h" -+#endif - - #define XHCI_INIT_VALUE 0x0 - ---- a/drivers/usb/host/xhci.h -+++ b/drivers/usb/host/xhci.h -@@ -29,9 +29,24 @@ - #include - - /* Code sharing between pci-quirks and xhci hcd */ --#include "xhci-ext-caps.h" -+#include "xhci-ext-caps.h" - #include "pci-quirks.h" - -+#if defined (CONFIG_USB_MT7621_XHCI_PLATFORM) -+#define XHC_IRQ (22 + 8) -+#define XHC_IO_START 0x1E1C0000 -+#define XHC_IO_LENGTH 0x10000 -+/* mtk scheduler bitmasks */ -+#define BPKTS(p) ((p) & 0x3f) -+#define BCSCOUNT(p) (((p) & 0x7) << 8) -+#define BBM(p) ((p) << 11) -+#define BOFFSET(p) ((p) & 0x3fff) -+#define BREPEAT(p) (((p) & 0x7fff) << 16) -+#endif -+ -+ -+ -+ - /* xHCI PCI Configuration Registers */ - #define XHCI_SBRN_OFFSET (0x60) - -@@ -1536,8 +1551,12 @@ struct xhci_hcd { - /* Compliance Mode Recovery Data */ - struct timer_list comp_mode_recovery_timer; - u32 port_status_u0; -+#ifdef CONFIG_USB_MT7621_XHCI_PLATFORM -+#define COMP_MODE_RCVRY_MSECS 5000 -+#else - /* Compliance Mode Timer Triggered every 2 seconds */ - #define COMP_MODE_RCVRY_MSECS 2000 -+#endif - }; - - /* convert between an HCD pointer and the corresponding EHCI_HCD */ -@@ -1703,7 +1722,7 @@ void xhci_urb_free_priv(struct xhci_hcd - void xhci_free_command(struct xhci_hcd *xhci, - struct xhci_command *command); - --#ifdef CONFIG_PCI -+#if defined (CONFIG_PCI) && !defined (CONFIG_USB_MT7621_XHCI_PLATFORM) - /* xHCI PCI glue */ - int xhci_register_pci(void); - void xhci_unregister_pci(void); ---- a/drivers/usb/host/xhci-mem.c -+++ b/drivers/usb/host/xhci-mem.c -@@ -65,6 +65,9 @@ static struct xhci_segment *xhci_segment - - static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg) - { -+ if (!seg) -+ return; -+ - if (seg->trbs) { - dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma); - seg->trbs = NULL; -@@ -1446,9 +1449,17 @@ int xhci_endpoint_init(struct xhci_hcd * - max_burst = (usb_endpoint_maxp(&ep->desc) - & 0x1800) >> 11; - } -+#if defined (CONFIG_USB_MT7621_XHCI_PLATFORM) -+ if ((max_packet % 4 == 2) && (max_packet % 16 != 14) && (max_burst == 0) && usb_endpoint_dir_in(&ep->desc)) -+ max_packet += 2; -+#endif - break; - case USB_SPEED_FULL: - case USB_SPEED_LOW: -+#if defined (CONFIG_USB_MT7621_XHCI_PLATFORM) -+ if ((max_packet % 4 == 2) && (max_packet % 16 != 14) && (max_burst == 0) && usb_endpoint_dir_in(&ep->desc)) -+ max_packet += 2; -+#endif - break; - default: - BUG(); ---- a/drivers/usb/host/xhci-plat.c -+++ b/drivers/usb/host/xhci-plat.c -@@ -25,6 +25,13 @@ static void xhci_plat_quirks(struct devi - * dev struct in order to setup MSI - */ - xhci->quirks |= XHCI_PLAT; -+#if defined (CONFIG_USB_MT7621_XHCI_PLATFORM) -+ /* MTK host controller gives a spurious successful event after a -+ * short transfer. Ignore it. -+ */ -+ xhci->quirks |= XHCI_SPURIOUS_SUCCESS; -+ xhci->quirks |= XHCI_LPM_SUPPORT; -+#endif - } - - /* called during probe() after chip reset completes */ -@@ -96,20 +103,32 @@ static int xhci_plat_probe(struct platfo - - driver = &xhci_plat_xhci_driver; - -+#if defined (CONFIG_USB_MT7621_XHCI_PLATFORM) -+ irq = XHC_IRQ; -+#else - irq = platform_get_irq(pdev, 0); -+#endif -+ - if (irq < 0) - return -ENODEV; - -+#if !defined (CONFIG_USB_MT7621_XHCI_PLATFORM) - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) - return -ENODEV; -+#endif - - hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev)); - if (!hcd) - return -ENOMEM; - -+#if defined (CONFIG_USB_MT7621_XHCI_PLATFORM) -+ hcd->rsrc_start = (uint32_t)XHC_IO_START; -+ hcd->rsrc_len = XHC_IO_LENGTH; -+#else - hcd->rsrc_start = res->start; - hcd->rsrc_len = resource_size(res); -+#endif - - if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, - driver->description)) { ---- a/drivers/usb/host/xhci-ring.c -+++ b/drivers/usb/host/xhci-ring.c -@@ -236,7 +236,6 @@ static void inc_enq(struct xhci_hcd *xhc - */ - if (!chain && !more_trbs_coming) - break; -- - /* If we're not dealing with 0.95 hardware or - * isoc rings on AMD 0.96 host, - * carry over the chain bit of the previous TRB -@@ -273,16 +272,20 @@ static void inc_enq(struct xhci_hcd *xhc - static inline int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring, - unsigned int num_trbs) - { -+#if !defined (CONFIG_USB_MT7621_XHCI_PLATFORM) - int num_trbs_in_deq_seg; -+#endif - - if (ring->num_trbs_free < num_trbs) - return 0; - -+#if !defined (CONFIG_USB_MT7621_XHCI_PLATFORM) - if (ring->type != TYPE_COMMAND && ring->type != TYPE_EVENT) { - num_trbs_in_deq_seg = ring->dequeue - ring->deq_seg->trbs; - if (ring->num_trbs_free < num_trbs + num_trbs_in_deq_seg) - return 0; - } -+#endif - - return 1; - } -@@ -2910,6 +2913,7 @@ static int prepare_ring(struct xhci_hcd - next = ring->enqueue; - - while (last_trb(xhci, ring, ring->enq_seg, next)) { -+#if !defined (CONFIG_USB_MT7621_XHCI_PLATFORM) - /* If we're not dealing with 0.95 hardware or isoc rings - * on AMD 0.96 host, clear the chain bit. - */ -@@ -2919,7 +2923,9 @@ static int prepare_ring(struct xhci_hcd - next->link.control &= cpu_to_le32(~TRB_CHAIN); - else - next->link.control |= cpu_to_le32(TRB_CHAIN); -- -+#else -+ next->link.control &= cpu_to_le32(~TRB_CHAIN); -+#endif - wmb(); - next->link.control ^= cpu_to_le32(TRB_CYCLE); - -@@ -3049,6 +3055,9 @@ static void giveback_first_trb(struct xh - start_trb->field[3] |= cpu_to_le32(start_cycle); - else - start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE); -+#if defined (CONFIG_USB_MT7621_XHCI_PLATFORM) -+ wmb(); -+#endif - xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id); - } - -@@ -3108,6 +3117,29 @@ static u32 xhci_td_remainder(unsigned in - return (remainder >> 10) << 17; - } - -+#if defined (CONFIG_USB_MT7621_XHCI_PLATFORM) -+static u32 mtk_xhci_td_remainder(unsigned int td_transfer_size, unsigned int td_running_total, unsigned int maxp, unsigned trb_buffer_length) -+{ -+ u32 max = 31; -+ int remainder, td_packet_count, packet_transferred; -+ -+ //0 for the last TRB -+ //FIXME: need to workaround if there is ZLP in this TD -+ if (td_running_total + trb_buffer_length == td_transfer_size) -+ return 0; -+ -+ //FIXME: need to take care of high-bandwidth (MAX_ESIT) -+ packet_transferred = (td_running_total /*+ trb_buffer_length*/) / maxp; -+ td_packet_count = DIV_ROUND_UP(td_transfer_size, maxp); -+ remainder = td_packet_count - packet_transferred; -+ -+ if (remainder > max) -+ return max << 17; -+ else -+ return remainder << 17; -+} -+#endif -+ - /* - * For xHCI 1.0 host controllers, TD size is the number of max packet sized - * packets remaining in the TD (*not* including this TRB). -@@ -3245,6 +3277,7 @@ static int queue_bulk_sg_tx(struct xhci_ - } - - /* Set the TRB length, TD size, and interrupter fields. */ -+#if !defined (CONFIG_USB_MT7621_XHCI_PLATFORM) - if (xhci->hci_version < 0x100) { - remainder = xhci_td_remainder( - urb->transfer_buffer_length - -@@ -3254,6 +3287,13 @@ static int queue_bulk_sg_tx(struct xhci_ - trb_buff_len, total_packet_count, urb, - num_trbs - 1); - } -+#else -+ if (num_trbs > 1) -+ remainder = mtk_xhci_td_remainder(urb->transfer_buffer_length, -+ running_total, urb->ep->desc.wMaxPacketSize, trb_buff_len); -+#endif -+ -+ - length_field = TRB_LEN(trb_buff_len) | - remainder | - TRB_INTR_TARGET(0); -@@ -3316,6 +3356,9 @@ int xhci_queue_bulk_tx(struct xhci_hcd * - int running_total, trb_buff_len, ret; - unsigned int total_packet_count; - u64 addr; -+#if defined (CONFIG_USB_MT7621_XHCI_PLATFORM) -+ int max_packet; -+#endif - - if (urb->num_sgs) - return queue_bulk_sg_tx(xhci, mem_flags, urb, slot_id, ep_index); -@@ -3341,6 +3384,25 @@ int xhci_queue_bulk_tx(struct xhci_hcd * - running_total += TRB_MAX_BUFF_SIZE; - } - /* FIXME: this doesn't deal with URB_ZERO_PACKET - need one more */ -+#if defined (CONFIG_USB_MT7621_XHCI_PLATFORM) -+ switch(urb->dev->speed){ -+ case USB_SPEED_SUPER: -+ max_packet = urb->ep->desc.wMaxPacketSize; -+ break; -+ case USB_SPEED_HIGH: -+ case USB_SPEED_FULL: -+ case USB_SPEED_LOW: -+ case USB_SPEED_WIRELESS: -+ case USB_SPEED_UNKNOWN: -+ default: -+ max_packet = urb->ep->desc.wMaxPacketSize & 0x7ff; -+ break; -+ } -+ if((urb->transfer_flags & URB_ZERO_PACKET) -+ && ((urb->transfer_buffer_length % max_packet) == 0)){ -+ num_trbs++; -+ } -+#endif - - ret = prepare_transfer(xhci, xhci->devs[slot_id], - ep_index, urb->stream_id, -@@ -3400,6 +3462,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd * - field |= TRB_ISP; - - /* Set the TRB length, TD size, and interrupter fields. */ -+#if !defined (CONFIG_USB_MT7621_XHCI_PLATFORM) - if (xhci->hci_version < 0x100) { - remainder = xhci_td_remainder( - urb->transfer_buffer_length - -@@ -3409,6 +3472,10 @@ int xhci_queue_bulk_tx(struct xhci_hcd * - trb_buff_len, total_packet_count, urb, - num_trbs - 1); - } -+#else -+ remainder = mtk_xhci_td_remainder(urb->transfer_buffer_length, running_total, max_packet, trb_buff_len); -+#endif -+ - length_field = TRB_LEN(trb_buff_len) | - remainder | - TRB_INTR_TARGET(0); -@@ -3498,7 +3565,11 @@ int xhci_queue_ctrl_tx(struct xhci_hcd * - field |= 0x1; - - /* xHCI 1.0 6.4.1.2.1: Transfer Type field */ -+#if defined (CONFIG_USB_MT7621_XHCI_PLATFORM) -+ if (1) { -+#else - if (xhci->hci_version == 0x100) { -+#endif - if (urb->transfer_buffer_length > 0) { - if (setup->bRequestType & USB_DIR_IN) - field |= TRB_TX_TYPE(TRB_DATA_IN); -@@ -3522,7 +3593,12 @@ int xhci_queue_ctrl_tx(struct xhci_hcd * - field = TRB_TYPE(TRB_DATA); - - length_field = TRB_LEN(urb->transfer_buffer_length) | -+#if !defined (CONFIG_USB_MT7621_XHCI_PLATFORM) - xhci_td_remainder(urb->transfer_buffer_length) | -+#else -+ //CC: MTK style, no scatter-gather for control transfer -+ 0 | -+#endif - TRB_INTR_TARGET(0); - if (urb->transfer_buffer_length > 0) { - if (setup->bRequestType & USB_DIR_IN) -@@ -3533,7 +3609,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd * - length_field, - field | ep_ring->cycle_state); - } -- -+ - /* Save the DMA address of the last TRB in the TD */ - td->last_trb = ep_ring->enqueue; - -@@ -3645,6 +3721,9 @@ static int xhci_queue_isoc_tx(struct xhc - u64 start_addr, addr; - int i, j; - bool more_trbs_coming; -+#if defined (CONFIG_USB_MT7621_XHCI_PLATFORM) -+ int max_packet; -+#endif - - ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; - -@@ -3658,6 +3737,21 @@ static int xhci_queue_isoc_tx(struct xhc - start_trb = &ep_ring->enqueue->generic; - start_cycle = ep_ring->cycle_state; - -+#if defined (CONFIG_USB_MT7621_XHCI_PLATFORM) -+ switch(urb->dev->speed){ -+ case USB_SPEED_SUPER: -+ max_packet = urb->ep->desc.wMaxPacketSize; -+ break; -+ case USB_SPEED_HIGH: -+ case USB_SPEED_FULL: -+ case USB_SPEED_LOW: -+ case USB_SPEED_WIRELESS: -+ case USB_SPEED_UNKNOWN: -+ max_packet = urb->ep->desc.wMaxPacketSize & 0x7ff; -+ break; -+ } -+#endif -+ - urb_priv = urb->hcpriv; - /* Queue the first TRB, even if it's zero-length */ - for (i = 0; i < num_tds; i++) { -@@ -3729,9 +3823,13 @@ static int xhci_queue_isoc_tx(struct xhc - } else { - td->last_trb = ep_ring->enqueue; - field |= TRB_IOC; -+#if defined (CONFIG_USB_MT7621_XHCI_PLATFORM) -+ if (!(xhci->quirks & XHCI_AVOID_BEI)) { -+#else - if (xhci->hci_version == 0x100 && - !(xhci->quirks & - XHCI_AVOID_BEI)) { -+#endif - /* Set BEI bit except for the last td */ - if (i < num_tds - 1) - field |= TRB_BEI; -@@ -3746,6 +3844,7 @@ static int xhci_queue_isoc_tx(struct xhc - trb_buff_len = td_remain_len; - - /* Set the TRB length, TD size, & interrupter fields. */ -+#if !defined (CONFIG_USB_MT7621_XHCI_PLATFORM) - if (xhci->hci_version < 0x100) { - remainder = xhci_td_remainder( - td_len - running_total); -@@ -3755,6 +3854,10 @@ static int xhci_queue_isoc_tx(struct xhc - total_packet_count, urb, - (trbs_per_td - j - 1)); - } -+#else -+ remainder = mtk_xhci_td_remainder(urb->transfer_buffer_length, running_total, max_packet, trb_buff_len); -+#endif -+ - length_field = TRB_LEN(trb_buff_len) | - remainder | - TRB_INTR_TARGET(0); diff --git a/target/linux/ramips/patches-3.10/800-eco.patch b/target/linux/ramips/patches-3.10/800-eco.patch deleted file mode 100644 index 08b2bb0ad9..0000000000 --- a/target/linux/ramips/patches-3.10/800-eco.patch +++ /dev/null @@ -1,12 +0,0 @@ ---- a/arch/mips/include/asm/mach-ralink/mt7620.h -+++ b/arch/mips/include/asm/mach-ralink/mt7620.h -@@ -101,4 +101,9 @@ - #define MT7620_GPIO_MODE_EPHY 15 - #define MT7620_GPIO_MODE_PA 20 - -+static inline int mt7620_get_eco(void) -+{ -+ return rt_sysc_r32(SYSC_REG_CHIP_REV) & CHIP_REV_ECO_MASK; -+} -+ - #endif diff --git a/target/linux/ramips/patches-3.10/999-clk.patch b/target/linux/ramips/patches-3.10/999-clk.patch deleted file mode 100644 index 22ac33aa54..0000000000 --- a/target/linux/ramips/patches-3.10/999-clk.patch +++ /dev/null @@ -1,17 +0,0 @@ -Index: linux-3.10.32/arch/mips/ralink/clk.c -=================================================================== ---- linux-3.10.32.orig/arch/mips/ralink/clk.c 2014-03-12 03:04:05.468396764 +0000 -+++ linux-3.10.32/arch/mips/ralink/clk.c 2014-03-12 03:29:00.220416177 +0000 -@@ -56,6 +56,12 @@ - } - EXPORT_SYMBOL_GPL(clk_get_rate); - -+int clk_set_rate(struct clk *clk, unsigned long rate) -+{ -+ return -1; -+} -+EXPORT_SYMBOL_GPL(clk_set_rate); -+ - void __init plat_time_init(void) - { - struct clk *clk; diff --git a/target/linux/ramips/patches-3.10/999-memory-detect.patch b/target/linux/ramips/patches-3.10/999-memory-detect.patch deleted file mode 100644 index 165a1f0ded..0000000000 --- a/target/linux/ramips/patches-3.10/999-memory-detect.patch +++ /dev/null @@ -1,32 +0,0 @@ ---- a/arch/mips/ralink/of.c -+++ b/arch/mips/ralink/of.c -@@ -80,6 +80,16 @@ void __init device_tree_init(void) - } - - extern struct boot_param_header __image_dtb; -+static int memory_dtb; -+ -+static int __init early_init_dt_find_memory(unsigned long node, const char *uname, -+ int depth, void *data) -+{ -+ if (depth == 1 && !strcmp(uname, "memory@0")) -+ memory_dtb = 1; -+ -+ return 0; -+} - - void __init plat_mem_setup(void) - { -@@ -90,8 +100,10 @@ void __init plat_mem_setup(void) - * parsed resulting in our memory appearing - */ - __dt_setup_arch(&__image_dtb); -- -- if (soc_info.mem_size) -+ of_scan_flat_dt(early_init_dt_find_memory, NULL); -+ if (memory_dtb) -+ of_scan_flat_dt(early_init_dt_scan_memory, NULL); -+ else if (soc_info.mem_size) - add_memory_region(soc_info.mem_base, soc_info.mem_size * SZ_1M, - BOOT_MEM_RAM); - else diff --git a/target/linux/ramips/patches-3.10/999-mt7620n.patch b/target/linux/ramips/patches-3.10/999-mt7620n.patch deleted file mode 100644 index da5c6f9c7a..0000000000 --- a/target/linux/ramips/patches-3.10/999-mt7620n.patch +++ /dev/null @@ -1,53 +0,0 @@ ---- a/arch/mips/include/asm/mach-ralink/mt7620.h -+++ b/arch/mips/include/asm/mach-ralink/mt7620.h -@@ -24,11 +24,8 @@ - #define SYSC_REG_CPLL_CONFIG0 0x54 - #define SYSC_REG_CPLL_CONFIG1 0x58 - --#define MT7620N_CHIP_NAME0 0x33365452 --#define MT7620N_CHIP_NAME1 0x20203235 -- --#define MT7620A_CHIP_NAME0 0x3637544d --#define MT7620A_CHIP_NAME1 0x20203032 -+#define MT7620_CHIP_NAME0 0x3637544d -+#define MT7620_CHIP_NAME1 0x20203032 - - #define CHIP_REV_PKG_MASK 0x1 - #define CHIP_REV_PKG_SHIFT 16 ---- a/arch/mips/ralink/mt7620.c -+++ b/arch/mips/ralink/mt7620.c -@@ -167,22 +167,27 @@ void prom_soc_init(struct ralink_soc_inf - u32 cfg0; - u32 pmu0; - u32 pmu1; -+ u32 bga; - - n0 = __raw_readl(sysc + SYSC_REG_CHIP_NAME0); - n1 = __raw_readl(sysc + SYSC_REG_CHIP_NAME1); -+ rev = __raw_readl(sysc + SYSC_REG_CHIP_REV); -+ bga = (rev >> CHIP_REV_PKG_SHIFT) & CHIP_REV_PKG_MASK; - -- if (n0 == MT7620N_CHIP_NAME0 && n1 == MT7620N_CHIP_NAME1) { -- name = "MT7620N"; -- soc_info->compatible = "ralink,mt7620n-soc"; -- } else if (n0 == MT7620A_CHIP_NAME0 && n1 == MT7620A_CHIP_NAME1) { -+ if (n0 != MT7620_CHIP_NAME0 || n1 != MT7620_CHIP_NAME1) -+ panic("mt7620: unknown SoC, n0:%08x n1:%08x\n", n0, n1); -+ -+ if (bga) { - name = "MT7620A"; - soc_info->compatible = "ralink,mt7620a-soc"; - } else { -- panic("mt7620: unknown SoC, n0:%08x n1:%08x\n", n0, n1); -+ name = "MT7620N"; -+ soc_info->compatible = "ralink,mt7620n-soc"; -+#ifdef CONFIG_PCI -+ panic("mt7620n is only supported for non pci kernels"); -+#endif - } - -- rev = __raw_readl(sysc + SYSC_REG_CHIP_REV); -- - snprintf(soc_info->sys_type, RAMIPS_SYS_TYPE_LEN, - "Ralink %s ver:%u eco:%u", - name, diff --git a/target/linux/ramips/patches-3.10/999-pinctrl_fixes.patch b/target/linux/ramips/patches-3.10/999-pinctrl_fixes.patch deleted file mode 100644 index d7ad676f09..0000000000 --- a/target/linux/ramips/patches-3.10/999-pinctrl_fixes.patch +++ /dev/null @@ -1,33 +0,0 @@ ---- a/drivers/pinctrl/pinctrl-rt2880.c -+++ b/drivers/pinctrl/pinctrl-rt2880.c -@@ -204,6 +204,7 @@ static int rt2880_pmx_group_enable(struc - { - struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev); - u32 mode = 0; -+ int i; - - /* dont allow double use */ - if (p->groups[group].enabled) { -@@ -217,16 +218,16 @@ static int rt2880_pmx_group_enable(struc - mode = rt_sysc_r32(SYSC_REG_GPIO_MODE); - mode &= ~(p->groups[group].mask << p->groups[group].shift); - -+ /* mark the pins as gpio */ -+ for (i = 0; i < p->groups[group].func[0].pin_count; i++) -+ p->gpio[p->groups[group].func[0].pins[i]] = 1; -+ - /* function 0 is gpio and needs special handling */ - if (func == 0) { -- int i; -- -- - mode |= p->groups[group].gpio << p->groups[group].shift; -- /* mark the pins as gpio */ -- for (i = 0; i < p->groups[group].func[0].pin_count; i++) -- p->gpio[p->groups[group].func[0].pins[i]] = 1; - } else { -+ for (i = 0; i < p->func[func]->pin_count; i++) -+ p->gpio[p->func[func]->pins[i]] = 0; - mode |= p->func[func]->value << p->groups[group].shift; - } - rt_sysc_w32(mode, SYSC_REG_GPIO_MODE); diff --git a/target/linux/ramips/patches-3.10/999-raeth_fixes.patch b/target/linux/ramips/patches-3.10/999-raeth_fixes.patch deleted file mode 100644 index 5e1bb4d2ec..0000000000 --- a/target/linux/ramips/patches-3.10/999-raeth_fixes.patch +++ /dev/null @@ -1,21 +0,0 @@ ---- a/drivers/net/ethernet/ralink/ralink_soc_eth.c -+++ b/drivers/net/ethernet/ralink/ralink_soc_eth.c -@@ -335,7 +335,7 @@ static int fe_start_xmit(struct sk_buff - if (priv->soc->tso) - fe_start_tso(skb, dev, nr_frags, tx); - -- if (skb_shinfo(skb)->gso_segs > 1) { -+ if (priv->soc->tso && (skb_shinfo(skb)->gso_segs > 1)) { - struct iphdr *iph = NULL; - struct tcphdr *th = NULL; - struct ipv6hdr *ip6h = NULL; -@@ -741,8 +741,7 @@ static int fe_probe(struct platform_devi - dev_info(&pdev->dev, "Enabling TSO\n"); - netdev->features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_IPV6_CSUM; - } -- -- netdev->hw_features = netdev->vlan_features = netdev->features; -+ netdev->hw_features = netdev->features; - - netdev->irq = platform_get_irq(pdev, 0); - if (netdev->irq < 0) { diff --git a/target/linux/ramips/rt288x/config-3.10 b/target/linux/ramips/rt288x/config-3.10 index a3a12a9e7f..f1ae652ec6 100644 --- a/target/linux/ramips/rt288x/config-3.10 +++ b/target/linux/ramips/rt288x/config-3.10 @@ -1,6 +1,7 @@ CONFIG_ARCH_BINFMT_ELF_RANDOMIZE_PIE=y CONFIG_ARCH_DISCARD_MEMBLOCK=y CONFIG_ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE=y +CONFIG_ARCH_HAS_RESET_CONTROLLER=y CONFIG_ARCH_HAVE_CUSTOM_GPIO_H=y CONFIG_ARCH_HIBERNATION_POSSIBLE=y CONFIG_ARCH_REQUIRE_GPIOLIB=y @@ -82,6 +83,7 @@ CONFIG_IRQCHIP=y CONFIG_IRQ_CPU=y CONFIG_IRQ_DOMAIN=y CONFIG_IRQ_FORCED_THREADING=y +CONFIG_IRQ_INTC=y CONFIG_IRQ_WORK=y CONFIG_M25PXX_USE_FAST_READ=y CONFIG_MDIO_BOARDINFO=y @@ -127,6 +129,7 @@ CONFIG_PINMUX=y # CONFIG_PREEMPT_RCU is not set CONFIG_RALINK=y CONFIG_RALINK_WDT=y +CONFIG_RA_NAT_NONE=y # CONFIG_RCU_STALL_COMMON is not set CONFIG_RESET_CONTROLLER=y # CONFIG_SCSI_DMA is not set @@ -136,6 +139,7 @@ CONFIG_SERIAL_OF_PLATFORM=y # CONFIG_SLAB is not set CONFIG_SLUB=y # CONFIG_SOC_MT7620 is not set +# CONFIG_SOC_MT7621 is not set CONFIG_SOC_RT288X=y # CONFIG_SOC_RT305X is not set # CONFIG_SOC_RT3883 is not set