#define ETH_SWITCH_HEADER_LEN 2
+static int ag71xx_tx_packets(struct ag71xx *ag, bool flush);
+static void ag71xx_qca955x_sgmii_init(void);
+
static inline unsigned int ag71xx_max_frame_len(unsigned int mtu)
{
return ETH_SWITCH_HEADER_LEN + ETH_HLEN + VLAN_HLEN + mtu + ETH_FCS_LEN;
(intr & AG71XX_INT_RX_BE) ? "RXBE " : "");
}
-static void ag71xx_ring_free(struct ag71xx_ring *ring)
-{
- kfree(ring->buf);
-
- if (ring->descs_cpu)
- dma_free_coherent(NULL, ring->size * ring->desc_size,
- ring->descs_cpu, ring->descs_dma);
-}
-
-static int ag71xx_ring_alloc(struct ag71xx_ring *ring)
-{
- int err;
- int i;
-
- ring->desc_size = sizeof(struct ag71xx_desc);
- if (ring->desc_size % cache_line_size()) {
- DBG("ag71xx: ring %p, desc size %u rounded to %u\n",
- ring, ring->desc_size,
- roundup(ring->desc_size, cache_line_size()));
- ring->desc_size = roundup(ring->desc_size, cache_line_size());
- }
-
- ring->descs_cpu = dma_alloc_coherent(NULL, ring->size * ring->desc_size,
- &ring->descs_dma, GFP_ATOMIC);
- if (!ring->descs_cpu) {
- err = -ENOMEM;
- goto err;
- }
-
-
- ring->buf = kzalloc(ring->size * sizeof(*ring->buf), GFP_KERNEL);
- if (!ring->buf) {
- err = -ENOMEM;
- goto err;
- }
-
- for (i = 0; i < ring->size; i++) {
- int idx = i * ring->desc_size;
- ring->buf[i].desc = (struct ag71xx_desc *)&ring->descs_cpu[idx];
- DBG("ag71xx: ring %p, desc %d at %p\n",
- ring, i, ring->buf[i].desc);
- }
-
- return 0;
-
-err:
- return err;
-}
-
static void ag71xx_ring_tx_clean(struct ag71xx *ag)
{
struct ag71xx_ring *ring = &ag->tx_ring;
struct net_device *dev = ag->dev;
+ int ring_mask = BIT(ring->order) - 1;
u32 bytes_compl = 0, pkts_compl = 0;
while (ring->curr != ring->dirty) {
- u32 i = ring->dirty % ring->size;
+ struct ag71xx_desc *desc;
+ u32 i = ring->dirty & ring_mask;
- if (!ag71xx_desc_empty(ring->buf[i].desc)) {
- ring->buf[i].desc->ctrl = 0;
+ desc = ag71xx_ring_desc(ring, i);
+ if (!ag71xx_desc_empty(desc)) {
+ desc->ctrl = 0;
dev->stats.tx_errors++;
}
static void ag71xx_ring_tx_init(struct ag71xx *ag)
{
struct ag71xx_ring *ring = &ag->tx_ring;
+ int ring_size = BIT(ring->order);
+ int ring_mask = ring_size - 1;
int i;
- for (i = 0; i < ring->size; i++) {
- ring->buf[i].desc->next = (u32) (ring->descs_dma +
- ring->desc_size * ((i + 1) % ring->size));
+ for (i = 0; i < ring_size; i++) {
+ struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
+
+ desc->next = (u32) (ring->descs_dma +
+ AG71XX_DESC_SIZE * ((i + 1) & ring_mask));
- ring->buf[i].desc->ctrl = DESC_EMPTY;
+ desc->ctrl = DESC_EMPTY;
ring->buf[i].skb = NULL;
}
static void ag71xx_ring_rx_clean(struct ag71xx *ag)
{
struct ag71xx_ring *ring = &ag->rx_ring;
+ int ring_size = BIT(ring->order);
int i;
if (!ring->buf)
return;
- for (i = 0; i < ring->size; i++)
+ for (i = 0; i < ring_size; i++)
if (ring->buf[i].rx_buf) {
dma_unmap_single(&ag->dev->dev, ring->buf[i].dma_addr,
ag->rx_buf_size, DMA_FROM_DEVICE);
- kfree(ring->buf[i].rx_buf);
+ skb_free_frag(ring->buf[i].rx_buf);
}
}
return offset + NET_IP_ALIGN;
}
+static int ag71xx_buffer_size(struct ag71xx *ag)
+{
+ return ag->rx_buf_size +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+}
+
static bool ag71xx_fill_rx_buf(struct ag71xx *ag, struct ag71xx_buf *buf,
- int offset)
+ int offset,
+ void *(*alloc)(unsigned int size))
{
+ struct ag71xx_ring *ring = &ag->rx_ring;
+ struct ag71xx_desc *desc = ag71xx_ring_desc(ring, buf - &ring->buf[0]);
void *data;
- data = kmalloc(ag->rx_buf_size +
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
- GFP_ATOMIC);
+ data = alloc(ag71xx_buffer_size(ag));
if (!data)
return false;
buf->rx_buf = data;
buf->dma_addr = dma_map_single(&ag->dev->dev, data, ag->rx_buf_size,
DMA_FROM_DEVICE);
- buf->desc->data = (u32) buf->dma_addr + offset;
+ desc->data = (u32) buf->dma_addr + offset;
return true;
}
static int ag71xx_ring_rx_init(struct ag71xx *ag)
{
struct ag71xx_ring *ring = &ag->rx_ring;
+ int ring_size = BIT(ring->order);
+ int ring_mask = BIT(ring->order) - 1;
unsigned int i;
int ret;
int offset = ag71xx_buffer_offset(ag);
ret = 0;
- for (i = 0; i < ring->size; i++) {
- ring->buf[i].desc->next = (u32) (ring->descs_dma +
- ring->desc_size * ((i + 1) % ring->size));
+ for (i = 0; i < ring_size; i++) {
+ struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
+
+ desc->next = (u32) (ring->descs_dma +
+ AG71XX_DESC_SIZE * ((i + 1) & ring_mask));
DBG("ag71xx: RX desc at %p, next is %08x\n",
- ring->buf[i].desc,
- ring->buf[i].desc->next);
+ desc, desc->next);
}
- for (i = 0; i < ring->size; i++) {
- if (!ag71xx_fill_rx_buf(ag, &ring->buf[i], offset)) {
+ for (i = 0; i < ring_size; i++) {
+ struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
+
+ if (!ag71xx_fill_rx_buf(ag, &ring->buf[i], offset,
+ netdev_alloc_frag)) {
ret = -ENOMEM;
break;
}
- ring->buf[i].desc->ctrl = DESC_EMPTY;
+ desc->ctrl = DESC_EMPTY;
}
/* flush descriptors */
static int ag71xx_ring_rx_refill(struct ag71xx *ag)
{
struct ag71xx_ring *ring = &ag->rx_ring;
+ int ring_mask = BIT(ring->order) - 1;
unsigned int count;
int offset = ag71xx_buffer_offset(ag);
count = 0;
for (; ring->curr - ring->dirty > 0; ring->dirty++) {
+ struct ag71xx_desc *desc;
unsigned int i;
- i = ring->dirty % ring->size;
+ i = ring->dirty & ring_mask;
+ desc = ag71xx_ring_desc(ring, i);
if (!ring->buf[i].rx_buf &&
- !ag71xx_fill_rx_buf(ag, &ring->buf[i], offset))
+ !ag71xx_fill_rx_buf(ag, &ring->buf[i], offset,
+ napi_alloc_frag))
break;
- ring->buf[i].desc->ctrl = DESC_EMPTY;
+ desc->ctrl = DESC_EMPTY;
count++;
}
static int ag71xx_rings_init(struct ag71xx *ag)
{
- int ret;
+ struct ag71xx_ring *tx = &ag->tx_ring;
+ struct ag71xx_ring *rx = &ag->rx_ring;
+ int ring_size = BIT(tx->order) + BIT(rx->order);
+ int tx_size = BIT(tx->order);
+
+ tx->buf = kzalloc(ring_size * sizeof(*tx->buf), GFP_KERNEL);
+ if (!tx->buf)
+ return -ENOMEM;
+
+ tx->descs_cpu = dma_alloc_coherent(NULL, ring_size * AG71XX_DESC_SIZE,
+ &tx->descs_dma, GFP_ATOMIC);
+ if (!tx->descs_cpu) {
+ kfree(tx->buf);
+ tx->buf = NULL;
+ return -ENOMEM;
+ }
- ret = ag71xx_ring_alloc(&ag->tx_ring);
- if (ret)
- return ret;
+ rx->buf = &tx->buf[BIT(tx->order)];
+ rx->descs_cpu = ((void *)tx->descs_cpu) + tx_size * AG71XX_DESC_SIZE;
+ rx->descs_dma = tx->descs_dma + tx_size * AG71XX_DESC_SIZE;
ag71xx_ring_tx_init(ag);
+ return ag71xx_ring_rx_init(ag);
+}
- ret = ag71xx_ring_alloc(&ag->rx_ring);
- if (ret)
- return ret;
+static void ag71xx_rings_free(struct ag71xx *ag)
+{
+ struct ag71xx_ring *tx = &ag->tx_ring;
+ struct ag71xx_ring *rx = &ag->rx_ring;
+ int ring_size = BIT(tx->order) + BIT(rx->order);
- ret = ag71xx_ring_rx_init(ag);
- return ret;
+ if (tx->descs_cpu)
+ dma_free_coherent(NULL, ring_size * AG71XX_DESC_SIZE,
+ tx->descs_cpu, tx->descs_dma);
+
+ kfree(tx->buf);
+
+ tx->descs_cpu = NULL;
+ rx->descs_cpu = NULL;
+ tx->buf = NULL;
+ rx->buf = NULL;
}
static void ag71xx_rings_cleanup(struct ag71xx *ag)
{
ag71xx_ring_rx_clean(ag);
- ag71xx_ring_free(&ag->rx_ring);
-
ag71xx_ring_tx_clean(ag);
+ ag71xx_rings_free(ag);
+
netdev_reset_queue(ag->dev);
- ag71xx_ring_free(&ag->tx_ring);
}
static unsigned char *ag71xx_speed_str(struct ag71xx *ag)
static void ag71xx_hw_setup(struct ag71xx *ag)
{
struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag);
+ u32 init = MAC_CFG1_INIT;
/* setup MAC configuration registers */
- ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, MAC_CFG1_INIT);
+ if (pdata->use_flow_control)
+ init |= MAC_CFG1_TFC | MAC_CFG1_RFC;
+ ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, init);
ag71xx_sb(ag, AG71XX_REG_MAC_CFG2,
MAC_CFG2_PAD_CRC_EN | MAC_CFG2_LEN_CHECK);
/* setup FIFO configuration registers */
ag71xx_wr(ag, AG71XX_REG_FIFO_CFG0, FIFO_CFG0_INIT);
if (pdata->is_ar724x) {
- ag71xx_wr(ag, AG71XX_REG_FIFO_CFG1, pdata->fifo_cfg1);
- ag71xx_wr(ag, AG71XX_REG_FIFO_CFG2, pdata->fifo_cfg2);
+ ag71xx_wr(ag, AG71XX_REG_FIFO_CFG1, 0x0010ffff);
+ ag71xx_wr(ag, AG71XX_REG_FIFO_CFG2, 0x015500aa);
} else {
ag71xx_wr(ag, AG71XX_REG_FIFO_CFG1, 0x0fff0000);
ag71xx_wr(ag, AG71XX_REG_FIFO_CFG2, 0x00001fff);
reset_mask &= ~(AR71XX_RESET_GE0_PHY | AR71XX_RESET_GE1_PHY);
ath79_device_reset_set(reset_phy);
- mdelay(50);
+ msleep(50);
ath79_device_reset_clear(reset_phy);
- mdelay(200);
+ msleep(200);
}
ag71xx_sb(ag, AG71XX_REG_MAC_CFG1, MAC_CFG1_SR);
udelay(20);
ath79_device_reset_set(reset_mask);
- mdelay(100);
+ msleep(100);
ath79_device_reset_clear(reset_mask);
- mdelay(200);
+ msleep(200);
ag71xx_hw_setup(ag);
struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag);
struct net_device *dev = ag->dev;
u32 reset_mask = pdata->reset_bit;
- u32 rx_ds, tx_ds;
+ u32 rx_ds;
u32 mii_reg;
reset_mask &= AR71XX_RESET_GE0_MAC | AR71XX_RESET_GE1_MAC;
+ ag71xx_hw_stop(ag);
+ wmb();
+
mii_reg = ag71xx_rr(ag, AG71XX_REG_MII_CFG);
rx_ds = ag71xx_rr(ag, AG71XX_REG_RX_DESC);
- tx_ds = ag71xx_rr(ag, AG71XX_REG_TX_DESC);
+
+ ag71xx_tx_packets(ag, true);
ath79_device_reset_set(reset_mask);
udelay(10);
ag71xx_dma_reset(ag);
ag71xx_hw_setup(ag);
+ ag->tx_ring.curr = 0;
+ ag->tx_ring.dirty = 0;
+ netdev_reset_queue(ag->dev);
/* setup max frame length */
ag71xx_wr(ag, AG71XX_REG_MAC_MFL,
ag71xx_max_frame_len(ag->dev->mtu));
ag71xx_wr(ag, AG71XX_REG_RX_DESC, rx_ds);
- ag71xx_wr(ag, AG71XX_REG_TX_DESC, tx_ds);
+ ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->tx_ring.descs_dma);
ag71xx_wr(ag, AG71XX_REG_MII_CFG, mii_reg);
ag71xx_hw_set_macaddr(ag, dev->dev_addr);
/* enable interrupts */
ag71xx_wr(ag, AG71XX_REG_INT_ENABLE, AG71XX_INT_INIT);
+
+ netif_wake_queue(ag->dev);
}
-void ag71xx_link_adjust(struct ag71xx *ag)
+static void
+__ag71xx_link_adjust(struct ag71xx *ag, bool update)
{
struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag);
u32 cfg2;
u32 fifo5;
u32 fifo3;
- if (!ag->link) {
+ if (!ag->link && update) {
ag71xx_hw_stop(ag);
netif_carrier_off(ag->dev);
if (netif_msg_link(ag))
if (pdata->is_ar91xx)
fifo3 = 0x00780fff;
else if (pdata->is_ar724x)
- fifo3 = pdata->fifo_cfg3;
+ fifo3 = 0x01f00140;
else
fifo3 = 0x008001ff;
ag71xx_wr(ag, AG71XX_REG_FIFO_CFG3, fifo3);
- if (pdata->set_speed)
+ if (update && pdata->set_speed)
pdata->set_speed(ag->speed);
+ if (update && pdata->enable_sgmii_fixup)
+ ag71xx_qca955x_sgmii_init();
+
ag71xx_wr(ag, AG71XX_REG_MAC_CFG2, cfg2);
ag71xx_wr(ag, AG71XX_REG_FIFO_CFG5, fifo5);
ag71xx_wr(ag, AG71XX_REG_MAC_IFCTL, ifctl);
+
+ if (pdata->disable_inline_checksum_engine) {
+ /*
+ * The rx ring buffer can stall on small packets on QCA953x and
+ * QCA956x. Disabling the inline checksum engine fixes the stall.
+ * The wr, rr functions cannot be used since this hidden register
+ * is outside of the normal ag71xx register block.
+ */
+ void __iomem *dam = ioremap_nocache(0xb90001bc, 0x4);
+ if (dam) {
+ __raw_writel(__raw_readl(dam) & ~BIT(27), dam);
+ (void)__raw_readl(dam);
+ iounmap(dam);
+ }
+ }
+
ag71xx_hw_start(ag);
netif_carrier_on(ag->dev);
- if (netif_msg_link(ag))
+ if (update && netif_msg_link(ag))
pr_info("%s: link up (%sMbps/%s duplex)\n",
ag->dev->name,
ag71xx_speed_str(ag),
(DUPLEX_FULL == ag->duplex) ? "Full" : "Half");
- DBG("%s: fifo_cfg0=%#x, fifo_cfg1=%#x, fifo_cfg2=%#x\n",
- ag->dev->name,
- ag71xx_rr(ag, AG71XX_REG_FIFO_CFG0),
- ag71xx_rr(ag, AG71XX_REG_FIFO_CFG1),
- ag71xx_rr(ag, AG71XX_REG_FIFO_CFG2));
+ ag71xx_dump_regs(ag);
+}
- DBG("%s: fifo_cfg3=%#x, fifo_cfg4=%#x, fifo_cfg5=%#x\n",
- ag->dev->name,
- ag71xx_rr(ag, AG71XX_REG_FIFO_CFG3),
- ag71xx_rr(ag, AG71XX_REG_FIFO_CFG4),
- ag71xx_rr(ag, AG71XX_REG_FIFO_CFG5));
+void ag71xx_link_adjust(struct ag71xx *ag)
+{
+ __ag71xx_link_adjust(ag, true);
+}
- DBG("%s: mac_cfg2=%#x, mac_ifctl=%#x\n",
- ag->dev->name,
- ag71xx_rr(ag, AG71XX_REG_MAC_CFG2),
- ag71xx_rr(ag, AG71XX_REG_MAC_IFCTL));
+static int ag71xx_hw_enable(struct ag71xx *ag)
+{
+ int ret;
+
+ ret = ag71xx_rings_init(ag);
+ if (ret)
+ return ret;
+
+ napi_enable(&ag->napi);
+ ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->tx_ring.descs_dma);
+ ag71xx_wr(ag, AG71XX_REG_RX_DESC, ag->rx_ring.descs_dma);
+ netif_start_queue(ag->dev);
+
+ return 0;
+}
+
+static void ag71xx_hw_disable(struct ag71xx *ag)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ag->lock, flags);
+
+ netif_stop_queue(ag->dev);
+
+ ag71xx_hw_stop(ag);
+ ag71xx_dma_reset(ag);
+
+ napi_disable(&ag->napi);
+ del_timer_sync(&ag->oom_timer);
+
+ spin_unlock_irqrestore(&ag->lock, flags);
+
+ ag71xx_rings_cleanup(ag);
}
static int ag71xx_open(struct net_device *dev)
unsigned int max_frame_len;
int ret;
+ netif_carrier_off(dev);
max_frame_len = ag71xx_max_frame_len(dev->mtu);
- ag->rx_buf_size = max_frame_len + NET_SKB_PAD + NET_IP_ALIGN;
+ ag->rx_buf_size = SKB_DATA_ALIGN(max_frame_len + NET_SKB_PAD + NET_IP_ALIGN);
/* setup max frame length */
ag71xx_wr(ag, AG71XX_REG_MAC_MFL, max_frame_len);
+ ag71xx_hw_set_macaddr(ag, dev->dev_addr);
- ret = ag71xx_rings_init(ag);
+ ret = ag71xx_hw_enable(ag);
if (ret)
goto err;
- napi_enable(&ag->napi);
-
- netif_carrier_off(dev);
ag71xx_phy_start(ag);
- ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->tx_ring.descs_dma);
- ag71xx_wr(ag, AG71XX_REG_RX_DESC, ag->rx_ring.descs_dma);
-
- ag71xx_hw_set_macaddr(ag, dev->dev_addr);
-
- netif_start_queue(dev);
-
return 0;
err:
static int ag71xx_stop(struct net_device *dev)
{
struct ag71xx *ag = netdev_priv(dev);
- unsigned long flags;
netif_carrier_off(dev);
ag71xx_phy_stop(ag);
-
- spin_lock_irqsave(&ag->lock, flags);
-
- netif_stop_queue(dev);
-
- ag71xx_hw_stop(ag);
- ag71xx_dma_reset(ag);
-
- napi_disable(&ag->napi);
- del_timer_sync(&ag->oom_timer);
-
- spin_unlock_irqrestore(&ag->lock, flags);
-
- ag71xx_rings_cleanup(ag);
+ ag71xx_hw_disable(ag);
return 0;
}
{
int i;
struct ag71xx_desc *desc;
+ int ring_mask = BIT(ring->order) - 1;
int ndesc = 0;
int split = ring->desc_split;
while (len > 0) {
unsigned int cur_len = len;
- i = (ring->curr + ndesc) % ring->size;
- desc = ring->buf[i].desc;
+ i = (ring->curr + ndesc) & ring_mask;
+ desc = ag71xx_ring_desc(ring, i);
if (!ag71xx_desc_empty(desc))
return -1;
{
struct ag71xx *ag = netdev_priv(dev);
struct ag71xx_ring *ring = &ag->tx_ring;
+ int ring_mask = BIT(ring->order) - 1;
+ int ring_size = BIT(ring->order);
struct ag71xx_desc *desc;
dma_addr_t dma_addr;
int i, n, ring_min;
dma_addr = dma_map_single(&dev->dev, skb->data, skb->len,
DMA_TO_DEVICE);
- i = ring->curr % ring->size;
- desc = ring->buf[i].desc;
+ i = ring->curr & ring_mask;
+ desc = ag71xx_ring_desc(ring, i);
/* setup descriptor fields */
n = ag71xx_fill_dma_desc(ring, (u32) dma_addr, skb->len & ag->desc_pktlen_mask);
if (n < 0)
goto err_drop_unmap;
- i = (ring->curr + n - 1) % ring->size;
+ i = (ring->curr + n - 1) & ring_mask;
ring->buf[i].len = skb->len;
ring->buf[i].skb = skb;
- ring->buf[i].timestamp = jiffies;
netdev_sent_queue(dev, skb->len);
+ skb_tx_timestamp(skb);
+
desc->ctrl &= ~DESC_EMPTY;
ring->curr += n;
if (ring->desc_split)
ring_min *= AG71XX_TX_RING_DS_PER_PKT;
- if (ring->curr - ring->dirty >= ring->size - ring_min) {
+ if (ring->curr - ring->dirty >= ring_size - ring_min) {
DBG("%s: tx queue full\n", dev->name);
netif_stop_queue(dev);
}
if (netif_msg_tx_err(ag))
pr_info("%s: tx timeout\n", ag->dev->name);
- schedule_work(&ag->restart_work);
+ schedule_delayed_work(&ag->restart_work, 1);
}
-static void ag71xx_restart_work_func(struct work_struct *work)
+static void ag71xx_bit_set(void __iomem *reg, u32 bit)
{
- struct ag71xx *ag = container_of(work, struct ag71xx, restart_work);
+ u32 val = __raw_readl(reg) | bit;
+ __raw_writel(val, reg);
+ __raw_readl(reg);
+}
- if (ag71xx_get_pdata(ag)->is_ar724x) {
- ag->link = 0;
- ag71xx_link_adjust(ag);
- return;
- }
+static void ag71xx_bit_clear(void __iomem *reg, u32 bit)
+{
+ u32 val = __raw_readl(reg) & ~bit;
+ __raw_writel(val, reg);
+ __raw_readl(reg);
+}
+
+static void ag71xx_qca955x_sgmii_init()
+{
+ void __iomem *gmac_base;
+ u32 mr_an_status, sgmii_status;
+ u8 tries = 0;
+
+ gmac_base = ioremap_nocache(QCA955X_GMAC_BASE, QCA955X_GMAC_SIZE);
+
+ if (!gmac_base)
+ goto sgmii_out;
+
+ mr_an_status = __raw_readl(gmac_base + QCA955X_GMAC_REG_MR_AN_STATUS);
+ if (!(mr_an_status & QCA955X_MR_AN_STATUS_AN_ABILITY))
+ goto sgmii_out;
+
+ __raw_writel(QCA955X_SGMII_RESET_RX_CLK_N_RESET ,
+ gmac_base + QCA955X_GMAC_REG_SGMII_RESET);
+ __raw_readl(gmac_base + QCA955X_GMAC_REG_SGMII_RESET);
+ udelay(10);
+
+ /* Init sequence */
+ ag71xx_bit_set(gmac_base + QCA955X_GMAC_REG_SGMII_RESET,
+ QCA955X_SGMII_RESET_HW_RX_125M_N);
+ udelay(10);
+
+ ag71xx_bit_set(gmac_base + QCA955X_GMAC_REG_SGMII_RESET,
+ QCA955X_SGMII_RESET_RX_125M_N);
+ udelay(10);
- ag71xx_stop(ag->dev);
- ag71xx_open(ag->dev);
+ ag71xx_bit_set(gmac_base + QCA955X_GMAC_REG_SGMII_RESET,
+ QCA955X_SGMII_RESET_TX_125M_N);
+ udelay(10);
+
+ ag71xx_bit_set(gmac_base + QCA955X_GMAC_REG_SGMII_RESET,
+ QCA955X_SGMII_RESET_RX_CLK_N);
+ udelay(10);
+
+ ag71xx_bit_set(gmac_base + QCA955X_GMAC_REG_SGMII_RESET,
+ QCA955X_SGMII_RESET_TX_CLK_N);
+ udelay(10);
+
+ do {
+ ag71xx_bit_set(gmac_base + QCA955X_GMAC_REG_MR_AN_CONTROL,
+ QCA955X_MR_AN_CONTROL_PHY_RESET |
+ QCA955X_MR_AN_CONTROL_AN_ENABLE);
+ udelay(100);
+ ag71xx_bit_clear(gmac_base + QCA955X_GMAC_REG_MR_AN_CONTROL,
+ QCA955X_MR_AN_CONTROL_PHY_RESET);
+ mdelay(10);
+ sgmii_status = __raw_readl(gmac_base + QCA955X_GMAC_REG_SGMII_DEBUG) & 0xF;
+
+ if (tries++ >= QCA955X_SGMII_LINK_WAR_MAX_TRY) {
+ pr_warn("ag71xx: max retries for SGMII fixup exceeded!\n");
+ break;
+ }
+ } while (!(sgmii_status == 0xf || sgmii_status == 0x10));
+
+sgmii_out:
+ iounmap(gmac_base);
}
-static bool ag71xx_check_dma_stuck(struct ag71xx *ag, unsigned long timestamp)
+static void ag71xx_restart_work_func(struct work_struct *work)
{
+ struct ag71xx *ag = container_of(work, struct ag71xx, restart_work.work);
+
+ rtnl_lock();
+ ag71xx_hw_disable(ag);
+ ag71xx_hw_enable(ag);
+ if (ag->link)
+ __ag71xx_link_adjust(ag, false);
+ rtnl_unlock();
+}
+
+static bool ag71xx_check_dma_stuck(struct ag71xx *ag)
+{
+ unsigned long timestamp;
u32 rx_sm, tx_sm, rx_fd;
+ timestamp = netdev_get_tx_queue(ag->dev, 0)->trans_start;
if (likely(time_before(jiffies, timestamp + HZ/10)))
return false;
return false;
}
-static int ag71xx_tx_packets(struct ag71xx *ag)
+static int ag71xx_tx_packets(struct ag71xx *ag, bool flush)
{
struct ag71xx_ring *ring = &ag->tx_ring;
struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag);
+ bool dma_stuck = false;
+ int ring_mask = BIT(ring->order) - 1;
+ int ring_size = BIT(ring->order);
int sent = 0;
int bytes_compl = 0;
+ int n = 0;
DBG("%s: processing TX ring\n", ag->dev->name);
- while (ring->dirty != ring->curr) {
- unsigned int i = ring->dirty % ring->size;
- struct ag71xx_desc *desc = ring->buf[i].desc;
+ while (ring->dirty + n != ring->curr) {
+ unsigned int i = (ring->dirty + n) & ring_mask;
+ struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
struct sk_buff *skb = ring->buf[i].skb;
- if (!ag71xx_desc_empty(desc)) {
- if (pdata->is_ar7240 &&
- ag71xx_check_dma_stuck(ag, ring->buf[i].timestamp))
- schedule_work(&ag->restart_work);
+ if (!flush && !ag71xx_desc_empty(desc)) {
+ if (pdata->is_ar724x &&
+ ag71xx_check_dma_stuck(ag)) {
+ schedule_delayed_work(&ag->restart_work, HZ / 2);
+ dma_stuck = true;
+ }
break;
}
- ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_PS);
+ if (flush)
+ desc->ctrl |= DESC_EMPTY;
- if (skb) {
- dev_kfree_skb_any(skb);
- ring->buf[i].skb = NULL;
+ n++;
+ if (!skb)
+ continue;
- bytes_compl += ring->buf[i].len;
- sent++;
- }
+ dev_kfree_skb_any(skb);
+ ring->buf[i].skb = NULL;
- ring->dirty++;
+ bytes_compl += ring->buf[i].len;
+
+ sent++;
+ ring->dirty += n;
+
+ while (n > 0) {
+ ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_PS);
+ n--;
+ }
}
DBG("%s: %d packets sent out\n", ag->dev->name, sent);
- ag->dev->stats.tx_bytes += bytes_compl;
- ag->dev->stats.tx_packets += sent;
-
if (!sent)
return 0;
+ ag->dev->stats.tx_bytes += bytes_compl;
+ ag->dev->stats.tx_packets += sent;
+
netdev_completed_queue(ag->dev, sent, bytes_compl);
- if ((ring->curr - ring->dirty) < (ring->size * 3) / 4)
+ if ((ring->curr - ring->dirty) < (ring_size * 3) / 4)
netif_wake_queue(ag->dev);
+ if (!dma_stuck)
+ cancel_delayed_work(&ag->restart_work);
+
return sent;
}
struct ag71xx_ring *ring = &ag->rx_ring;
int offset = ag71xx_buffer_offset(ag);
unsigned int pktlen_mask = ag->desc_pktlen_mask;
+ int ring_mask = BIT(ring->order) - 1;
+ int ring_size = BIT(ring->order);
+ struct sk_buff_head queue;
+ struct sk_buff *skb;
int done = 0;
DBG("%s: rx packets, limit=%d, curr=%u, dirty=%u\n",
dev->name, limit, ring->curr, ring->dirty);
+ skb_queue_head_init(&queue);
+
while (done < limit) {
- unsigned int i = ring->curr % ring->size;
- struct ag71xx_desc *desc = ring->buf[i].desc;
- struct sk_buff *skb;
+ unsigned int i = ring->curr & ring_mask;
+ struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
int pktlen;
int err = 0;
if (ag71xx_desc_empty(desc))
break;
- if ((ring->dirty + ring->size) == ring->curr) {
+ if ((ring->dirty + ring_size) == ring->curr) {
ag71xx_assert(0);
break;
}
dev->stats.rx_packets++;
dev->stats.rx_bytes += pktlen;
- skb = build_skb(ring->buf[i].rx_buf, 0);
+ skb = build_skb(ring->buf[i].rx_buf, ag71xx_buffer_size(ag));
if (!skb) {
- kfree(ring->buf[i].rx_buf);
+ skb_free_frag(ring->buf[i].rx_buf);
goto next;
}
} else {
skb->dev = dev;
skb->ip_summed = CHECKSUM_NONE;
- skb->protocol = eth_type_trans(skb, dev);
- netif_receive_skb(skb);
+ __skb_queue_tail(&queue, skb);
}
next:
ag71xx_ring_rx_refill(ag);
+ while ((skb = __skb_dequeue(&queue)) != NULL) {
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_receive_skb(skb);
+ }
+
DBG("%s: rx finish, curr=%u, dirty=%u, done=%d\n",
dev->name, ring->curr, ring->dirty, done);
struct ag71xx *ag = container_of(napi, struct ag71xx, napi);
struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag);
struct net_device *dev = ag->dev;
- struct ag71xx_ring *rx_ring;
+ struct ag71xx_ring *rx_ring = &ag->rx_ring;
+ int rx_ring_size = BIT(rx_ring->order);
unsigned long flags;
u32 status;
int tx_done;
int rx_done;
pdata->ddr_flush();
- tx_done = ag71xx_tx_packets(ag);
+ tx_done = ag71xx_tx_packets(ag, false);
DBG("%s: processing RX ring\n", dev->name);
rx_done = ag71xx_rx_packets(ag, limit);
ag71xx_debugfs_update_napi_stats(ag, rx_done, tx_done);
- rx_ring = &ag->rx_ring;
- if (rx_ring->buf[rx_ring->dirty % rx_ring->size].rx_buf == NULL)
+ if (rx_ring->buf[rx_ring->dirty % rx_ring_size].rx_buf == NULL)
goto oom;
status = ag71xx_rr(ag, AG71XX_REG_RX_STATUS);
more:
DBG("%s: stay in polling mode, rx=%d, tx=%d, limit=%d\n",
dev->name, rx_done, tx_done, limit);
- return rx_done;
+ return limit;
oom:
if (netif_msg_rx_err(ag))
struct resource *res;
struct ag71xx *ag;
struct ag71xx_platform_data *pdata;
- int err;
+ int tx_size, err;
pdata = pdev->dev.platform_data;
if (!pdata) {
dev->irq = platform_get_irq(pdev, 0);
err = request_irq(dev->irq, ag71xx_interrupt,
- IRQF_DISABLED,
+ 0x0,
dev->name, dev);
if (err) {
dev_err(&pdev->dev, "unable to request IRQ %d\n", dev->irq);
dev->netdev_ops = &ag71xx_netdev_ops;
dev->ethtool_ops = &ag71xx_ethtool_ops;
- INIT_WORK(&ag->restart_work, ag71xx_restart_work_func);
+ INIT_DELAYED_WORK(&ag->restart_work, ag71xx_restart_work_func);
init_timer(&ag->oom_timer);
ag->oom_timer.data = (unsigned long) dev;
ag->oom_timer.function = ag71xx_oom_timer_handler;
- ag->tx_ring.size = AG71XX_TX_RING_SIZE_DEFAULT;
- ag->rx_ring.size = AG71XX_RX_RING_SIZE_DEFAULT;
+ tx_size = AG71XX_TX_RING_SIZE_DEFAULT;
+ ag->rx_ring.order = ag71xx_ring_size_order(AG71XX_RX_RING_SIZE_DEFAULT);
ag->max_frame_len = pdata->max_frame_len;
ag->desc_pktlen_mask = pdata->desc_pktlen_mask;
-#ifdef notyet
if (!pdata->is_ar724x && !pdata->is_ar91xx) {
ag->tx_ring.desc_split = AG71XX_TX_RING_SPLIT;
- ag->tx_ring.size *= AG71XX_TX_RING_DS_PER_PKT;
+ tx_size *= AG71XX_TX_RING_DS_PER_PKT;
}
-#endif
+ ag->tx_ring.order = ag71xx_ring_size_order(tx_size);
ag->stop_desc = dma_alloc_coherent(NULL,
sizeof(struct ag71xx_desc), &ag->stop_desc_dma, GFP_KERNEL);