int i;
for (i = 0; i < NUM_RX_DESC; i++)
- if (re->rx_skb[i])
+ if (re->rx_skb[i]) {
+ dma_unmap_single(NULL, re->rx_dma[i], MAX_RX_LENGTH,
+ DMA_FROM_DEVICE);
dev_kfree_skb_any(re->rx_skb[i]);
+ }
if (re->rx)
dma_free_coherent(NULL,
NUM_RX_DESC * sizeof(struct ramips_rx_dma),
- re->rx, re->phy_rx);
+ re->rx, re->rx_desc_dma);
if (re->tx)
dma_free_coherent(NULL,
NUM_TX_DESC * sizeof(struct ramips_tx_dma),
- re->tx, re->phy_tx);
+ re->tx, re->tx_desc_dma);
}
static int
/* setup tx ring */
re->tx = dma_alloc_coherent(NULL,
NUM_TX_DESC * sizeof(struct ramips_tx_dma),
- &re->phy_tx, GFP_ATOMIC);
+ &re->tx_desc_dma, GFP_ATOMIC);
if (!re->tx)
goto err_cleanup;
/* setup rx ring */
re->rx = dma_alloc_coherent(NULL,
NUM_RX_DESC * sizeof(struct ramips_rx_dma),
- &re->phy_rx, GFP_ATOMIC);
+ &re->rx_desc_dma, GFP_ATOMIC);
if (!re->rx)
goto err_cleanup;
memset(re->rx, 0, sizeof(struct ramips_rx_dma) * NUM_RX_DESC);
for (i = 0; i < NUM_RX_DESC; i++) {
+ dma_addr_t dma_addr;
struct sk_buff *new_skb = dev_alloc_skb(MAX_RX_LENGTH +
NET_IP_ALIGN);
goto err_cleanup;
skb_reserve(new_skb, NET_IP_ALIGN);
- re->rx[i].rxd1 = dma_map_single(NULL,
- new_skb->data,
- MAX_RX_LENGTH,
- DMA_FROM_DEVICE);
+
+ dma_addr = dma_map_single(NULL, new_skb->data,
+ MAX_RX_LENGTH, DMA_FROM_DEVICE);
+ re->rx_dma[i] = dma_addr;
+ re->rx[i].rxd1 = (unsigned int) re->rx_dma[i];
re->rx[i].rxd2 |= RX_DMA_LSO;
re->rx_skb[i] = new_skb;
}
static void
ramips_setup_dma(struct raeth_priv *re)
{
- ramips_fe_wr(phys_to_bus(re->phy_tx), RAMIPS_TX_BASE_PTR0);
+ ramips_fe_wr(re->tx_desc_dma, RAMIPS_TX_BASE_PTR0);
ramips_fe_wr(NUM_TX_DESC, RAMIPS_TX_MAX_CNT0);
ramips_fe_wr(0, RAMIPS_TX_CTX_IDX0);
ramips_fe_wr(RAMIPS_PST_DTX_IDX0, RAMIPS_PDMA_RST_CFG);
- ramips_fe_wr(phys_to_bus(re->phy_rx), RAMIPS_RX_BASE_PTR0);
+ ramips_fe_wr(re->rx_desc_dma, RAMIPS_RX_BASE_PTR0);
ramips_fe_wr(NUM_RX_DESC, RAMIPS_RX_MAX_CNT0);
ramips_fe_wr((NUM_RX_DESC - 1), RAMIPS_RX_CALC_IDX0);
ramips_fe_wr(RAMIPS_PST_DRX_IDX0, RAMIPS_PDMA_RST_CFG);
struct raeth_priv *priv = netdev_priv(dev);
unsigned long tx;
unsigned int tx_next;
- unsigned int mapped_addr;
- unsigned long flags;
+ dma_addr_t mapped_addr;
if (priv->plat->min_pkt_len) {
if (skb->len < priv->plat->min_pkt_len) {
}
dev->trans_start = jiffies;
- mapped_addr = (unsigned int) dma_map_single(NULL, skb->data, skb->len,
- DMA_TO_DEVICE);
- dma_sync_single_for_device(NULL, mapped_addr, skb->len, DMA_TO_DEVICE);
- spin_lock_irqsave(&priv->page_lock, flags);
+ mapped_addr = dma_map_single(NULL, skb->data, skb->len, DMA_TO_DEVICE);
+
+ spin_lock(&priv->page_lock);
tx = ramips_fe_rr(RAMIPS_TX_CTX_IDX0);
tx_next = (tx + 1) % NUM_TX_DESC;
!(priv->tx[tx_next].txd2 & TX_DMA_DONE))
goto out;
- priv->tx[tx].txd1 = mapped_addr;
+ priv->tx[tx].txd1 = (unsigned int) mapped_addr;
priv->tx[tx].txd2 &= ~(TX_DMA_PLEN0_MASK | TX_DMA_DONE);
priv->tx[tx].txd2 |= TX_DMA_PLEN0(skb->len);
dev->stats.tx_packets++;
priv->tx_skb[tx] = skb;
wmb();
ramips_fe_wr(tx_next, RAMIPS_TX_CTX_IDX0);
- spin_unlock_irqrestore(&priv->page_lock, flags);
+ spin_unlock(&priv->page_lock);
return NETDEV_TX_OK;
out:
- spin_unlock_irqrestore(&priv->page_lock, flags);
+ spin_unlock(&priv->page_lock);
dev->stats.tx_dropped++;
kfree_skb(skb);
return NETDEV_TX_OK;
while (max_rx) {
struct sk_buff *rx_skb, *new_skb;
+ int pktlen;
rx = (ramips_fe_rr(RAMIPS_RX_CALC_IDX0) + 1) % NUM_RX_DESC;
if (!(priv->rx[rx].rxd2 & RX_DMA_DONE))
break;
max_rx--;
+ rx_skb = priv->rx_skb[rx];
+ pktlen = RX_DMA_PLEN0(priv->rx[rx].rxd2);
+
new_skb = netdev_alloc_skb(dev, MAX_RX_LENGTH + NET_IP_ALIGN);
/* Reuse the buffer on allocation failures */
if (new_skb) {
- rx_skb = priv->rx_skb[rx];
- skb_put(rx_skb, RX_DMA_PLEN0(priv->rx[rx].rxd2));
+ dma_addr_t dma_addr;
+
+ dma_unmap_single(NULL, priv->rx_dma[rx], MAX_RX_LENGTH,
+ DMA_FROM_DEVICE);
+
+ skb_put(rx_skb, pktlen);
rx_skb->dev = dev;
rx_skb->protocol = eth_type_trans(rx_skb, dev);
rx_skb->ip_summed = CHECKSUM_NONE;
dev->stats.rx_packets++;
- dev->stats.rx_bytes += rx_skb->len;
+ dev->stats.rx_bytes += pktlen;
netif_rx(rx_skb);
priv->rx_skb[rx] = new_skb;
skb_reserve(new_skb, NET_IP_ALIGN);
- priv->rx[rx].rxd1 = dma_map_single(NULL,
- new_skb->data,
- MAX_RX_LENGTH,
- DMA_FROM_DEVICE);
+
+ dma_addr = dma_map_single(NULL,
+ new_skb->data,
+ MAX_RX_LENGTH,
+ DMA_FROM_DEVICE);
+ priv->rx_dma[rx] = dma_addr;
+ priv->rx[rx].rxd1 = (unsigned int) dma_addr;
+ } else {
+ dev->stats.rx_dropped++;
}
priv->rx[rx].rxd2 &= ~RX_DMA_DONE;
struct net_device *dev = (struct net_device*)ptr;
struct raeth_priv *priv = netdev_priv(dev);
+ spin_lock(&priv->page_lock);
while ((priv->tx[priv->skb_free_idx].txd2 & TX_DMA_DONE) &&
(priv->tx_skb[priv->skb_free_idx])) {
dev_kfree_skb_irq(priv->tx_skb[priv->skb_free_idx]);
if (priv->skb_free_idx >= NUM_TX_DESC)
priv->skb_free_idx = 0;
}
+ spin_unlock(&priv->page_lock);
ramips_fe_int_enable(RAMIPS_TX_DLY_INT);
}
tasklet_schedule(&priv->rx_tasklet);
}
- if (fe_int & RAMIPS_TX_DLY_INT)
- ramips_eth_tx_housekeeping((unsigned long)dev);
+ if (fe_int & RAMIPS_TX_DLY_INT) {
+ ramips_fe_int_disable(RAMIPS_TX_DLY_INT);
+ tasklet_schedule(&priv->tx_housekeeping_tasklet);
+ }
return IRQ_HANDLED;
}