mvebu: backport upstream ethernet driver improvements and enable buffer manager support
[openwrt/staging/lynxis/omap.git] / target / linux / mvebu / patches-4.4 / 039-net-mvneta-Use-on_each_cpu-when-possible.patch
diff --git a/target/linux/mvebu/patches-4.4/039-net-mvneta-Use-on_each_cpu-when-possible.patch b/target/linux/mvebu/patches-4.4/039-net-mvneta-Use-on_each_cpu-when-possible.patch
new file mode 100644 (file)
index 0000000..76257a2
--- /dev/null
@@ -0,0 +1,68 @@
+From: Gregory CLEMENT <gregory.clement@free-electrons.com>
+Date: Thu, 4 Feb 2016 22:09:25 +0100
+Subject: [PATCH] net: mvneta: Use on_each_cpu when possible
+
+Instead of using a for_each_* loop in which we just call the
+smp_call_function_single macro, it is more simple to directly use the
+on_each_cpu macro. Moreover, this macro ensures that the calls will be
+done all at once.
+
+Suggested-by: Russell King <rmk+kernel@arm.linux.org.uk>
+Signed-off-by: Gregory CLEMENT <gregory.clement@free-electrons.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+
+--- a/drivers/net/ethernet/marvell/mvneta.c
++++ b/drivers/net/ethernet/marvell/mvneta.c
+@@ -2553,7 +2553,7 @@ static void mvneta_percpu_mask_interrupt
+ static void mvneta_start_dev(struct mvneta_port *pp)
+ {
+-      unsigned int cpu;
++      int cpu;
+       mvneta_max_rx_size_set(pp, pp->pkt_size);
+       mvneta_txq_max_tx_size_set(pp, pp->pkt_size);
+@@ -2569,9 +2569,8 @@ static void mvneta_start_dev(struct mvne
+       }
+       /* Unmask interrupts. It has to be done from each CPU */
+-      for_each_online_cpu(cpu)
+-              smp_call_function_single(cpu, mvneta_percpu_unmask_interrupt,
+-                                       pp, true);
++      on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
++
+       mvreg_write(pp, MVNETA_INTR_MISC_MASK,
+                   MVNETA_CAUSE_PHY_STATUS_CHANGE |
+                   MVNETA_CAUSE_LINK_CHANGE |
+@@ -2991,7 +2990,7 @@ static int mvneta_percpu_notifier(struct
+ static int mvneta_open(struct net_device *dev)
+ {
+       struct mvneta_port *pp = netdev_priv(dev);
+-      int ret, cpu;
++      int ret;
+       pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
+       pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
+@@ -3024,9 +3023,7 @@ static int mvneta_open(struct net_device
+       /* Enable per-CPU interrupt on all the CPU to handle our RX
+        * queue interrupts
+        */
+-      for_each_online_cpu(cpu)
+-              smp_call_function_single(cpu, mvneta_percpu_enable,
+-                                       pp, true);
++      on_each_cpu(mvneta_percpu_enable, pp, true);
+       /* Register a CPU notifier to handle the case where our CPU
+@@ -3313,9 +3310,7 @@ static int  mvneta_config_rss(struct mvn
+       netif_tx_stop_all_queues(pp->dev);
+-      for_each_online_cpu(cpu)
+-              smp_call_function_single(cpu, mvneta_percpu_mask_interrupt,
+-                                       pp, true);
++      on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
+       /* We have to synchronise on the napi of each CPU */
+       for_each_online_cpu(cpu) {