struct bcm6368_enetsw *priv = netdev_priv(dev);
struct device *kdev = &priv->pdev->dev;
struct list_head rx_list;
+ struct sk_buff *skb;
int processed = 0;
INIT_LIST_HEAD(&rx_list);
do {
struct bcm6368_enetsw_desc *desc;
unsigned int frag_size;
- struct sk_buff *skb;
unsigned char *buf;
int desc_idx;
u32 len_stat;
skb_reserve(skb, NET_SKB_PAD);
skb_put(skb, len);
- skb->protocol = eth_type_trans(skb, dev);
dev->stats.rx_packets++;
dev->stats.rx_bytes += len;
list_add_tail(&skb->list, &rx_list);
} while (processed < budget);
+ list_for_each_entry(skb, &rx_list, list)
+ skb->protocol = eth_type_trans(skb, dev);
netif_receive_skb_list(&rx_list);
priv->rx_desc_count -= processed;
/*
* try to or force reclaim of transmitted buffers
*/
-static int bcm6368_enetsw_tx_reclaim(struct net_device *dev, int force)
+static int bcm6368_enetsw_tx_reclaim(struct net_device *dev, int force,
+ int budget)
{
struct bcm6368_enetsw *priv = netdev_priv(dev);
+ unsigned int bytes = 0;
int released = 0;
while (priv->tx_desc_count < priv->tx_ring_size) {
if (desc->len_stat & DMADESC_UNDER_MASK)
dev->stats.tx_errors++;
- napi_consume_skb(skb, !force);
+ bytes += skb->len;
+ napi_consume_skb(skb, budget);
released++;
}
+ netdev_completed_queue(dev, released, bytes);
+
if (netif_queue_stopped(dev) && released)
netif_wake_queue(dev);
DMAC_IR_REG, priv->tx_chan);
/* reclaim sent skb */
- bcm6368_enetsw_tx_reclaim(dev, 0);
+ bcm6368_enetsw_tx_reclaim(dev, 0, budget);
spin_lock(&priv->rx_lock);
rx_work_done = bcm6368_enetsw_receive_queue(dev, budget);
desc->len_stat = len_stat;
wmb();
+ netdev_sent_queue(dev, skb->len);
+
/* kick tx dma */
dmac_writel(priv, priv->dma_chan_en_mask, DMAC_CHANCFG_REG,
priv->tx_chan);
bcm6368_enetsw_disable_dma(priv, priv->rx_chan);
/* force reclaim of all tx buffers */
- bcm6368_enetsw_tx_reclaim(dev, 1);
+ bcm6368_enetsw_tx_reclaim(dev, 1, 0);
/* free the rx buffer ring */
for (i = 0; i < priv->rx_ring_size; i++) {
free_irq(priv->irq_tx, dev);
free_irq(priv->irq_rx, dev);
+ netdev_reset_queue(dev);
+
return 0;
}