#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
+#include <linux/version.h>
-/* MTU */
-#define ENETSW_TAG_SIZE 6
-#define ENETSW_MTU_OVERHEAD (VLAN_ETH_HLEN + VLAN_HLEN + \
- ENETSW_TAG_SIZE)
+/* TODO: Bigger frames may work but we do not trust that they are safe on all
+ * platforms so more research is needed, a max frame size of 2048 has been
+ * tested. We use the safe frame size 1542 which is 1532 plus DSA and VLAN
+ * overhead.
+ */
+#define ENETSW_MAX_FRAME 1542
+#define ENETSW_DSA_TAG_SIZE 6
+/* The MTU in Linux does not include ethernet or VLAN headers, but it DOES
+ * include the DSA overhead (the framework will increase the MTU to fit
+ * any DSA header).
+ */
+#define ENETSW_MAX_MTU (ENETSW_MAX_FRAME - VLAN_ETH_HLEN - \
+ VLAN_HLEN)
#define ENETSW_FRAG_SIZE(x) (SKB_DATA_ALIGN(NET_SKB_PAD + x + \
SKB_DATA_ALIGN(sizeof(struct skb_shared_info))))
/* number of dma desc in tx ring */
int tx_ring_size;
- /* maximum dma burst size */
- int dma_maxburst;
-
/* cpu view of rx dma ring */
struct bcm6368_enetsw_desc *tx_desc_cpu;
/* platform device reference */
struct platform_device *pdev;
-
- /* dma channel enable mask */
- u32 dma_chan_en_mask;
-
- /* dma channel interrupt mask */
- u32 dma_chan_int_mask;
-
- /* dma channel width */
- unsigned int dma_chan_width;
};
static inline void dma_writel(struct bcm6368_enetsw *priv, u32 val, u32 off)
static inline u32 dma_readl(struct bcm6368_enetsw *priv, u32 off, int chan)
{
- return __raw_readl(priv->dma_chan + off + chan * priv->dma_chan_width);
+ return __raw_readl(priv->dma_chan + off + chan * DMA_CHAN_WIDTH);
}
-static inline void dmac_writel(struct bcm6368_enetsw *priv, u32 val,
- u32 off, int chan)
+static inline void dmac_writel(struct bcm6368_enetsw *priv, u32 val, u32 off,
+ int chan)
{
- __raw_writel(val, priv->dma_chan + off + chan * priv->dma_chan_width);
+ __raw_writel(val, priv->dma_chan + off + chan * DMA_CHAN_WIDTH);
}
static inline void dmas_writel(struct bcm6368_enetsw *priv, u32 val,
u32 off, int chan)
{
- __raw_writel(val, priv->dma_sram + off + chan * priv->dma_chan_width);
+ __raw_writel(val, priv->dma_sram + off + chan * DMA_CHAN_WIDTH);
}
/*
* refill rx queue
*/
-static int bcm6368_enetsw_refill_rx(struct net_device *dev, bool napi_mode)
+static int bcm6368_enetsw_refill_rx(struct net_device *ndev, bool napi_mode)
{
- struct bcm6368_enetsw *priv = netdev_priv(dev);
+ struct bcm6368_enetsw *priv = netdev_priv(ndev);
+ struct platform_device *pdev = priv->pdev;
+ struct device *dev = &pdev->dev;
while (priv->rx_desc_count < priv->rx_ring_size) {
struct bcm6368_enetsw_desc *desc;
if (!priv->rx_buf[desc_idx]) {
unsigned char *buf;
+ dma_addr_t p;
if (likely(napi_mode))
buf = napi_alloc_frag(priv->rx_frag_size);
if (unlikely(!buf))
break;
+ p = dma_map_single(dev, buf + NET_SKB_PAD,
+ priv->rx_buf_size, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(dev, p))) {
+ skb_free_frag(buf);
+ break;
+ }
+
priv->rx_buf[desc_idx] = buf;
- desc->address = dma_map_single(&priv->pdev->dev,
- buf + NET_SKB_PAD,
- priv->rx_buf_size,
- DMA_FROM_DEVICE);
+ desc->address = p;
}
len_stat = priv->rx_buf_size << DMADESC_LENGTH_SHIFT;
/* If rx ring is still empty, set a timer to try allocating
* again at a later time. */
- if (priv->rx_desc_count == 0 && netif_running(dev)) {
- dev_warn(&priv->pdev->dev, "unable to refill rx ring\n");
+ if (priv->rx_desc_count == 0 && netif_running(ndev)) {
+ dev_warn(dev, "unable to refill rx ring\n");
priv->rx_timeout.expires = jiffies + HZ;
add_timer(&priv->rx_timeout);
}
static void bcm6368_enetsw_refill_rx_timer(struct timer_list *t)
{
struct bcm6368_enetsw *priv = from_timer(priv, t, rx_timeout);
- struct net_device *dev = priv->net_dev;
+ struct net_device *ndev = priv->net_dev;
spin_lock(&priv->rx_lock);
- bcm6368_enetsw_refill_rx(dev, false);
+ bcm6368_enetsw_refill_rx(ndev, false);
spin_unlock(&priv->rx_lock);
}
/*
* extract packet from rx queue
*/
-static int bcm6368_enetsw_receive_queue(struct net_device *dev, int budget)
+static int bcm6368_enetsw_receive_queue(struct net_device *ndev, int budget)
{
- struct bcm6368_enetsw *priv = netdev_priv(dev);
- struct device *kdev = &priv->pdev->dev;
+ struct bcm6368_enetsw *priv = netdev_priv(ndev);
+ struct platform_device *pdev = priv->pdev;
+ struct device *dev = &pdev->dev;
struct list_head rx_list;
struct sk_buff *skb;
int processed = 0;
/* if the packet does not have start of packet _and_
* end of packet flag set, then just recycle it */
if ((len_stat & DMADESC_ESOP_MASK) != DMADESC_ESOP_MASK) {
- dev->stats.rx_dropped++;
+ ndev->stats.rx_dropped++;
continue;
}
if (unlikely(!nbuf)) {
/* forget packet, just rearm desc */
- dev->stats.rx_dropped++;
+ ndev->stats.rx_dropped++;
continue;
}
- dma_sync_single_for_cpu(kdev, desc->address,
+ dma_sync_single_for_cpu(dev, desc->address,
len, DMA_FROM_DEVICE);
memcpy(nbuf + NET_SKB_PAD, buf + NET_SKB_PAD, len);
- dma_sync_single_for_device(kdev, desc->address,
+ dma_sync_single_for_device(dev, desc->address,
len, DMA_FROM_DEVICE);
buf = nbuf;
frag_size = nfrag_size;
} else {
- dma_unmap_single(kdev, desc->address,
+ dma_unmap_single(dev, desc->address,
priv->rx_buf_size, DMA_FROM_DEVICE);
priv->rx_buf[desc_idx] = NULL;
frag_size = priv->rx_frag_size;
skb = napi_build_skb(buf, frag_size);
if (unlikely(!skb)) {
skb_free_frag(buf);
- dev->stats.rx_dropped++;
+ ndev->stats.rx_dropped++;
continue;
}
skb_reserve(skb, NET_SKB_PAD);
skb_put(skb, len);
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += len;
+ ndev->stats.rx_packets++;
+ ndev->stats.rx_bytes += len;
list_add_tail(&skb->list, &rx_list);
} while (processed < budget);
list_for_each_entry(skb, &rx_list, list)
- skb->protocol = eth_type_trans(skb, dev);
+ skb->protocol = eth_type_trans(skb, ndev);
netif_receive_skb_list(&rx_list);
priv->rx_desc_count -= processed;
if (processed || !priv->rx_desc_count) {
- bcm6368_enetsw_refill_rx(dev, true);
+ bcm6368_enetsw_refill_rx(ndev, true);
/* kick rx dma */
- dmac_writel(priv, priv->dma_chan_en_mask,
+ dmac_writel(priv, DMAC_CHANCFG_EN_MASK,
DMAC_CHANCFG_REG, priv->rx_chan);
}
/*
* try to or force reclaim of transmitted buffers
*/
-static int bcm6368_enetsw_tx_reclaim(struct net_device *dev, int force)
+static int bcm6368_enetsw_tx_reclaim(struct net_device *ndev, int force,
+ int budget)
{
- struct bcm6368_enetsw *priv = netdev_priv(dev);
+ struct bcm6368_enetsw *priv = netdev_priv(ndev);
+ struct platform_device *pdev = priv->pdev;
+ struct device *dev = &pdev->dev;
unsigned int bytes = 0;
int released = 0;
skb = priv->tx_skb[priv->tx_dirty_desc];
priv->tx_skb[priv->tx_dirty_desc] = NULL;
- dma_unmap_single(&priv->pdev->dev, desc->address, skb->len,
+ dma_unmap_single(dev, desc->address, skb->len,
DMA_TO_DEVICE);
priv->tx_dirty_desc++;
spin_unlock(&priv->tx_lock);
if (desc->len_stat & DMADESC_UNDER_MASK)
- dev->stats.tx_errors++;
+ ndev->stats.tx_errors++;
bytes += skb->len;
- napi_consume_skb(skb, !force);
+ napi_consume_skb(skb, budget);
released++;
}
- netdev_completed_queue(dev, released, bytes);
+ netdev_completed_queue(ndev, released, bytes);
- if (netif_queue_stopped(dev) && released)
- netif_wake_queue(dev);
+ if (netif_queue_stopped(ndev) && released)
+ netif_wake_queue(ndev);
return released;
}
static int bcm6368_enetsw_poll(struct napi_struct *napi, int budget)
{
struct bcm6368_enetsw *priv = container_of(napi, struct bcm6368_enetsw, napi);
- struct net_device *dev = priv->net_dev;
+ struct net_device *ndev = priv->net_dev;
int rx_work_done;
/* ack interrupts */
- dmac_writel(priv, priv->dma_chan_int_mask,
+ dmac_writel(priv, DMAC_IR_PKTDONE_MASK,
DMAC_IR_REG, priv->rx_chan);
- dmac_writel(priv, priv->dma_chan_int_mask,
+ dmac_writel(priv, DMAC_IR_PKTDONE_MASK,
DMAC_IR_REG, priv->tx_chan);
/* reclaim sent skb */
- bcm6368_enetsw_tx_reclaim(dev, 0);
+ bcm6368_enetsw_tx_reclaim(ndev, 0, budget);
spin_lock(&priv->rx_lock);
- rx_work_done = bcm6368_enetsw_receive_queue(dev, budget);
+ rx_work_done = bcm6368_enetsw_receive_queue(ndev, budget);
spin_unlock(&priv->rx_lock);
if (rx_work_done >= budget) {
napi_complete_done(napi, rx_work_done);
/* restore rx/tx interrupt */
- dmac_writel(priv, priv->dma_chan_int_mask,
- DMAC_IRMASK_REG, priv->rx_chan);
- dmac_writel(priv, priv->dma_chan_int_mask,
- DMAC_IRMASK_REG, priv->tx_chan);
+ dmac_writel(priv, DMAC_IR_PKTDONE_MASK,
+ DMAC_IRMASK_REG, priv->rx_chan);
+ dmac_writel(priv, DMAC_IR_PKTDONE_MASK,
+ DMAC_IRMASK_REG, priv->tx_chan);
return rx_work_done;
}
*/
static irqreturn_t bcm6368_enetsw_isr_dma(int irq, void *dev_id)
{
- struct net_device *dev = dev_id;
- struct bcm6368_enetsw *priv = netdev_priv(dev);
+ struct net_device *ndev = dev_id;
+ struct bcm6368_enetsw *priv = netdev_priv(ndev);
/* mask rx/tx interrupts */
dmac_writel(priv, 0, DMAC_IRMASK_REG, priv->rx_chan);
* tx request callback
*/
static netdev_tx_t
-bcm6368_enetsw_start_xmit(struct sk_buff *skb, struct net_device *dev)
+bcm6368_enetsw_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
- struct bcm6368_enetsw *priv = netdev_priv(dev);
+ struct bcm6368_enetsw *priv = netdev_priv(ndev);
+ struct platform_device *pdev = priv->pdev;
+ struct device *dev = &pdev->dev;
struct bcm6368_enetsw_desc *desc;
u32 len_stat;
netdev_tx_t ret;
+ dma_addr_t p;
/* lock against tx reclaim */
spin_lock(&priv->tx_lock);
/* make sure the tx hw queue is not full, should not happen
* since we stop queue before it's the case */
if (unlikely(!priv->tx_desc_count)) {
- netif_stop_queue(dev);
- dev_err(&priv->pdev->dev, "xmit called with no tx desc "
- "available?\n");
+ netif_stop_queue(ndev);
+ dev_err(dev, "xmit called with no tx desc available?\n");
ret = NETDEV_TX_BUSY;
goto out_unlock;
}
data = skb_put_zero(skb, needed);
}
+ /* fill descriptor */
+ p = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, p))) {
+ dev_kfree_skb(skb);
+ ret = NETDEV_TX_OK;
+ goto out_unlock;
+ }
+
/* point to the next available desc */
desc = &priv->tx_desc_cpu[priv->tx_curr_desc];
priv->tx_skb[priv->tx_curr_desc] = skb;
-
- /* fill descriptor */
- desc->address = dma_map_single(&priv->pdev->dev, skb->data, skb->len,
- DMA_TO_DEVICE);
+ desc->address = p;
len_stat = (skb->len << DMADESC_LENGTH_SHIFT) & DMADESC_LENGTH_MASK;
len_stat |= DMADESC_ESOP_MASK | DMADESC_APPEND_CRC |
desc->len_stat = len_stat;
wmb();
- netdev_sent_queue(dev, skb->len);
+ netdev_sent_queue(ndev, skb->len);
/* kick tx dma */
- dmac_writel(priv, priv->dma_chan_en_mask, DMAC_CHANCFG_REG,
+ dmac_writel(priv, DMAC_CHANCFG_EN_MASK, DMAC_CHANCFG_REG,
priv->tx_chan);
/* stop queue if no more desc available */
if (!priv->tx_desc_count)
- netif_stop_queue(dev);
+ netif_stop_queue(ndev);
- dev->stats.tx_bytes += skb->len;
- dev->stats.tx_packets++;
+ ndev->stats.tx_bytes += skb->len;
+ ndev->stats.tx_packets++;
ret = NETDEV_TX_OK;
out_unlock:
} while (limit--);
}
-static int bcm6368_enetsw_open(struct net_device *dev)
+static int bcm6368_enetsw_open(struct net_device *ndev)
{
- struct bcm6368_enetsw *priv = netdev_priv(dev);
- struct device *kdev = &priv->pdev->dev;
+ struct bcm6368_enetsw *priv = netdev_priv(ndev);
+ struct platform_device *pdev = priv->pdev;
+ struct device *dev = &pdev->dev;
int i, ret;
unsigned int size;
void *p;
dmac_writel(priv, 0, DMAC_IRMASK_REG, priv->tx_chan);
ret = request_irq(priv->irq_rx, bcm6368_enetsw_isr_dma,
- 0, dev->name, dev);
+ 0, ndev->name, ndev);
if (ret)
goto out_freeirq;
if (priv->irq_tx != -1) {
ret = request_irq(priv->irq_tx, bcm6368_enetsw_isr_dma,
- 0, dev->name, dev);
+ 0, ndev->name, ndev);
if (ret)
goto out_freeirq_rx;
}
/* allocate rx dma ring */
size = priv->rx_ring_size * sizeof(struct bcm6368_enetsw_desc);
- p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
+ p = dma_alloc_coherent(dev, size, &priv->rx_desc_dma, GFP_KERNEL);
if (!p) {
- dev_err(kdev, "cannot allocate rx ring %u\n", size);
+ dev_err(dev, "cannot allocate rx ring %u\n", size);
ret = -ENOMEM;
goto out_freeirq_tx;
}
/* allocate tx dma ring */
size = priv->tx_ring_size * sizeof(struct bcm6368_enetsw_desc);
- p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
+ p = dma_alloc_coherent(dev, size, &priv->tx_desc_dma, GFP_KERNEL);
if (!p) {
- dev_err(kdev, "cannot allocate tx ring\n");
+ dev_err(dev, "cannot allocate tx ring\n");
ret = -ENOMEM;
goto out_free_rx_ring;
}
priv->tx_skb = kzalloc(sizeof(struct sk_buff *) * priv->tx_ring_size,
GFP_KERNEL);
if (!priv->tx_skb) {
- dev_err(kdev, "cannot allocate tx skb queue\n");
+ dev_err(dev, "cannot allocate tx skb queue\n");
ret = -ENOMEM;
goto out_free_tx_ring;
}
priv->rx_buf = kzalloc(sizeof(unsigned char *) * priv->rx_ring_size,
GFP_KERNEL);
if (!priv->rx_buf) {
- dev_err(kdev, "cannot allocate rx buffer queue\n");
+ dev_err(dev, "cannot allocate rx buffer queue\n");
ret = -ENOMEM;
goto out_free_tx_skb;
}
dma_writel(priv, DMA_BUFALLOC_FORCE_MASK | 0,
DMA_BUFALLOC_REG(priv->rx_chan));
- if (bcm6368_enetsw_refill_rx(dev, false)) {
- dev_err(kdev, "cannot allocate rx buffer queue\n");
+ if (bcm6368_enetsw_refill_rx(ndev, false)) {
+ dev_err(dev, "cannot allocate rx buffer queue\n");
ret = -ENOMEM;
goto out;
}
dmas_writel(priv, 0, DMAS_SRAM4_REG, priv->tx_chan);
/* set dma maximum burst len */
- dmac_writel(priv, priv->dma_maxburst,
+ dmac_writel(priv, ENETSW_DMA_MAXBURST,
DMAC_MAXBURST_REG, priv->rx_chan);
- dmac_writel(priv, priv->dma_maxburst,
+ dmac_writel(priv, ENETSW_DMA_MAXBURST,
DMAC_MAXBURST_REG, priv->tx_chan);
/* set flow control low/high threshold to 1/3 / 2/3 */
dmac_writel(priv, DMAC_IR_PKTDONE_MASK,
DMAC_IRMASK_REG, priv->tx_chan);
- netif_carrier_on(dev);
- netif_start_queue(dev);
+ netif_carrier_on(ndev);
+ netif_start_queue(ndev);
return 0;
continue;
desc = &priv->rx_desc_cpu[i];
- dma_unmap_single(kdev, desc->address, priv->rx_buf_size,
+ dma_unmap_single(dev, desc->address, priv->rx_buf_size,
DMA_FROM_DEVICE);
skb_free_frag(priv->rx_buf[i]);
}
kfree(priv->tx_skb);
out_free_tx_ring:
- dma_free_coherent(kdev, priv->tx_desc_alloc_size,
+ dma_free_coherent(dev, priv->tx_desc_alloc_size,
priv->tx_desc_cpu, priv->tx_desc_dma);
out_free_rx_ring:
- dma_free_coherent(kdev, priv->rx_desc_alloc_size,
+ dma_free_coherent(dev, priv->rx_desc_alloc_size,
priv->rx_desc_cpu, priv->rx_desc_dma);
out_freeirq_tx:
if (priv->irq_tx != -1)
- free_irq(priv->irq_tx, dev);
+ free_irq(priv->irq_tx, ndev);
out_freeirq_rx:
- free_irq(priv->irq_rx, dev);
+ free_irq(priv->irq_rx, ndev);
out_freeirq:
return ret;
}
-static int bcm6368_enetsw_stop(struct net_device *dev)
+static int bcm6368_enetsw_stop(struct net_device *ndev)
{
- struct bcm6368_enetsw *priv = netdev_priv(dev);
- struct device *kdev = &priv->pdev->dev;
+ struct bcm6368_enetsw *priv = netdev_priv(ndev);
+ struct platform_device *pdev = priv->pdev;
+ struct device *dev = &pdev->dev;
int i;
- netif_stop_queue(dev);
+ netif_stop_queue(ndev);
napi_disable(&priv->napi);
del_timer_sync(&priv->rx_timeout);
bcm6368_enetsw_disable_dma(priv, priv->rx_chan);
/* force reclaim of all tx buffers */
- bcm6368_enetsw_tx_reclaim(dev, 1);
+ bcm6368_enetsw_tx_reclaim(ndev, 1, 0);
/* free the rx buffer ring */
for (i = 0; i < priv->rx_ring_size; i++) {
continue;
desc = &priv->rx_desc_cpu[i];
- dma_unmap_single_attrs(kdev, desc->address, priv->rx_buf_size,
+ dma_unmap_single_attrs(dev, desc->address, priv->rx_buf_size,
DMA_FROM_DEVICE,
DMA_ATTR_SKIP_CPU_SYNC);
skb_free_frag(priv->rx_buf[i]);
/* free remaining allocated memory */
kfree(priv->rx_buf);
kfree(priv->tx_skb);
- dma_free_coherent(kdev, priv->rx_desc_alloc_size,
+ dma_free_coherent(dev, priv->rx_desc_alloc_size,
priv->rx_desc_cpu, priv->rx_desc_dma);
- dma_free_coherent(kdev, priv->tx_desc_alloc_size,
+ dma_free_coherent(dev, priv->tx_desc_alloc_size,
priv->tx_desc_cpu, priv->tx_desc_dma);
if (priv->irq_tx != -1)
- free_irq(priv->irq_tx, dev);
- free_irq(priv->irq_rx, dev);
+ free_irq(priv->irq_tx, ndev);
+ free_irq(priv->irq_rx, ndev);
- netdev_reset_queue(dev);
+ netdev_reset_queue(ndev);
return 0;
}
static int bcm6368_enetsw_probe(struct platform_device *pdev)
{
- struct bcm6368_enetsw *priv;
struct device *dev = &pdev->dev;
struct device_node *node = dev->of_node;
+ struct bcm6368_enetsw *priv;
struct net_device *ndev;
struct resource *res;
+ unsigned char dev_addr[ETH_ALEN];
unsigned i;
+ int num_resets;
int ret;
- ndev = alloc_etherdev(sizeof(*priv));
+ ndev = devm_alloc_etherdev(dev, sizeof(*priv));
if (!ndev)
return -ENOMEM;
+ platform_set_drvdata(pdev, ndev);
+ SET_NETDEV_DEV(ndev, dev);
+
priv = netdev_priv(ndev);
+ priv->pdev = pdev;
+ priv->net_dev = ndev;
priv->num_pms = of_count_phandle_with_args(node, "power-domains",
"#power-domain-cells");
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dma");
priv->dma_base = devm_ioremap_resource(dev, res);
- if (IS_ERR(priv->dma_base))
+ if (IS_ERR_OR_NULL(priv->dma_base))
return PTR_ERR(priv->dma_base);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"dma-channels");
priv->dma_chan = devm_ioremap_resource(dev, res);
- if (IS_ERR(priv->dma_chan))
+ if (IS_ERR_OR_NULL(priv->dma_chan))
return PTR_ERR(priv->dma_chan);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dma-sram");
priv->dma_sram = devm_ioremap_resource(dev, res);
- if (IS_ERR(priv->dma_sram))
+ if (IS_ERR_OR_NULL(priv->dma_sram))
return PTR_ERR(priv->dma_sram);
priv->irq_rx = platform_get_irq_byname(pdev, "rx");
priv->rx_ring_size = ENETSW_DEF_RX_DESC;
priv->tx_ring_size = ENETSW_DEF_TX_DESC;
-
- priv->dma_maxburst = ENETSW_DMA_MAXBURST;
-
priv->copybreak = ENETSW_DEF_CPY_BREAK;
- priv->dma_chan_en_mask = DMAC_CHANCFG_EN_MASK;
- priv->dma_chan_int_mask = DMAC_IR_PKTDONE_MASK;
- priv->dma_chan_width = DMA_CHAN_WIDTH;
-
- of_get_mac_address(node, ndev->dev_addr);
- if (is_valid_ether_addr(ndev->dev_addr)) {
- dev_info(dev, "mtd mac %pM\n", ndev->dev_addr);
+ of_get_mac_address(node, dev_addr);
+ if (is_valid_ether_addr(dev_addr)) {
+ dev_addr_set(ndev, dev_addr);
+ dev_info(dev, "mtd mac %pM\n", dev_addr);
} else {
- random_ether_addr(ndev->dev_addr);
- dev_info(dev, "random mac %pM\n", ndev->dev_addr);
+ eth_hw_addr_random(ndev);
+ dev_info(dev, "random mac\n");
}
- priv->rx_buf_size = ALIGN(ndev->mtu + ENETSW_MTU_OVERHEAD,
- priv->dma_maxburst * 4);
+ priv->rx_buf_size = ALIGN(ENETSW_MAX_FRAME,
+ ENETSW_DMA_MAXBURST * 4);
priv->rx_frag_size = ENETSW_FRAG_SIZE(priv->rx_buf_size);
if (priv->num_clocks) {
priv->clock = devm_kcalloc(dev, priv->num_clocks,
sizeof(struct clk *), GFP_KERNEL);
- if (!priv->clock)
- return -ENOMEM;
+ if (IS_ERR_OR_NULL(priv->clock))
+ return PTR_ERR(priv->clock);
}
for (i = 0; i < priv->num_clocks; i++) {
priv->clock[i] = of_clk_get(node, i);
if (IS_ERR(priv->clock[i])) {
dev_err(dev, "error getting clock %d\n", i);
- return -EINVAL;
+ return PTR_ERR(priv->clock[i]);
}
ret = clk_prepare_enable(priv->clock[i]);
}
}
- priv->num_resets = of_count_phandle_with_args(node, "resets",
- "#reset-cells");
+ num_resets = of_count_phandle_with_args(node, "resets",
+ "#reset-cells");
+ if (num_resets > 0)
+ priv->num_resets = num_resets;
+ else
+ priv->num_resets = 0;
if (priv->num_resets) {
priv->reset = devm_kcalloc(dev, priv->num_resets,
sizeof(struct reset_control *),
GFP_KERNEL);
- if (!priv->reset)
- return -ENOMEM;
+ if (IS_ERR_OR_NULL(priv->reset))
+ return PTR_ERR(priv->reset);
}
for (i = 0; i < priv->num_resets; i++) {
priv->reset[i] = devm_reset_control_get_by_index(dev, i);
if (IS_ERR(priv->reset[i])) {
dev_err(dev, "error getting reset %d\n", i);
- return -EINVAL;
+ return PTR_ERR(priv->reset[i]);
}
ret = reset_control_reset(priv->reset[i]);
/* register netdevice */
ndev->netdev_ops = &bcm6368_enetsw_ops;
ndev->min_mtu = ETH_ZLEN;
- ndev->mtu = ETH_DATA_LEN + ENETSW_TAG_SIZE;
- ndev->max_mtu = ETH_DATA_LEN + ENETSW_TAG_SIZE;
+ ndev->mtu = ETH_DATA_LEN;
+ ndev->max_mtu = ENETSW_MAX_MTU;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(6,1,0)
+ netif_napi_add(ndev, &priv->napi, bcm6368_enetsw_poll);
+#else
netif_napi_add(ndev, &priv->napi, bcm6368_enetsw_poll, 16);
- SET_NETDEV_DEV(ndev, dev);
+#endif
- ret = register_netdev(ndev);
- if (ret)
+ ret = devm_register_netdev(dev, ndev);
+ if (ret) {
+ netif_napi_del(&priv->napi);
goto out_disable_clk;
+ }
netif_carrier_off(ndev);
- platform_set_drvdata(pdev, ndev);
- priv->pdev = pdev;
- priv->net_dev = ndev;
+
+ dev_info(dev, "%s at 0x%px, IRQ %d\n", ndev->name, priv->dma_base, ndev->irq);
return 0;
struct bcm6368_enetsw *priv = netdev_priv(ndev);
unsigned int i;
- unregister_netdev(ndev);
-
pm_runtime_put_sync(dev);
for (i = 0; priv->pm && i < priv->num_pms; i++) {
dev_pm_domain_detach(priv->pm[i], true);
for (i = 0; i < priv->num_clocks; i++)
clk_disable_unprepare(priv->clock[i]);
- free_netdev(ndev);
-
return 0;
}
.remove = bcm6368_enetsw_remove,
};
module_platform_driver(bcm6368_enetsw_driver);
+
+MODULE_AUTHOR("Álvaro Fernández Rojas <noltari@gmail.com>");
+MODULE_DESCRIPTION("BCM6368 Ethernet Switch Controller Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:bcm6368-enetsw");