batman-adv: Merge bugfixes from 2019.5
[feed/routing.git] / batman-adv / patches / 0073-batman-adv-Avoid-OGM-workqueue-synchronous-cancel-de.patch
diff --git a/batman-adv/patches/0073-batman-adv-Avoid-OGM-workqueue-synchronous-cancel-de.patch b/batman-adv/patches/0073-batman-adv-Avoid-OGM-workqueue-synchronous-cancel-de.patch
new file mode 100644 (file)
index 0000000..d726d30
--- /dev/null
@@ -0,0 +1,261 @@
+From: Sven Eckelmann <sven@narfation.org>
+Date: Sun, 13 Oct 2019 21:03:07 +0200
+Subject: batman-adv: Avoid OGM workqueue synchronous cancel deadlock
+
+batadv_forw_packet_list_free can be called when an interface is being
+disabled. Under this circumstance, the rntl_lock will be held and while it
+calls cancel_delayed_work_sync.
+
+cancel_delayed_work_sync will stop the execution of the current context
+when the work item is currently processed. It can now happen that the
+cancel_delayed_work_sync was called when rtnl_lock was already called in
+batadv_iv_send_outstanding_bat_ogm_packet or when it was in the process of
+calling it. In this case, batadv_iv_send_outstanding_bat_ogm_packet waits
+for the lock and cancel_delayed_work_sync (which holds the rtnl_lock) is
+waiting for batadv_iv_send_outstanding_bat_ogm_packet to finish.
+
+This can only be avoided by not using (conflicting) blocking locks while
+cancel_delayed_work_sync is called. It also has the benefit that the
+ogm scheduling functionality can avoid unnecessary delays which can be
+introduced by a global lock.
+
+Fixes: 9b8ceef26c69 ("batman-adv: Avoid free/alloc race when handling OGM buffer")
+Signed-off-by: Sven Eckelmann <sven@narfation.org>
+
+Origin: upstream, https://git.open-mesh.org/batman-adv.git/commit/d3be478f1aa27b47f61c4a62e18eb063d47c9168
+
+diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
+index ccb60591a01886ceef22408e9387a8a3fda05a36..80fc960e656eb2f11f58fc8211235e033331bfd5 100644
+--- a/net/batman-adv/bat_iv_ogm.c
++++ b/net/batman-adv/bat_iv_ogm.c
+@@ -34,6 +34,7 @@
+ #include <linux/kref.h>
+ #include <linux/list.h>
+ #include <linux/lockdep.h>
++#include <linux/mutex.h>
+ #include <linux/netdevice.h>
+ #include <linux/netlink.h>
+ #include <linux/pkt_sched.h>
+@@ -41,7 +42,6 @@
+ #include <linux/random.h>
+ #include <linux/rculist.h>
+ #include <linux/rcupdate.h>
+-#include <linux/rtnetlink.h>
+ #include <linux/seq_file.h>
+ #include <linux/skbuff.h>
+ #include <linux/slab.h>
+@@ -371,7 +371,7 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
+       unsigned char *ogm_buff;
+       u32 random_seqno;
+-      ASSERT_RTNL();
++      mutex_lock(&hard_iface->bat_iv.ogm_buff_mutex);
+       /* randomize initial seqno to avoid collision */
+       get_random_bytes(&random_seqno, sizeof(random_seqno));
+@@ -379,8 +379,10 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
+       hard_iface->bat_iv.ogm_buff_len = BATADV_OGM_HLEN;
+       ogm_buff = kmalloc(hard_iface->bat_iv.ogm_buff_len, GFP_ATOMIC);
+-      if (!ogm_buff)
++      if (!ogm_buff) {
++              mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex);
+               return -ENOMEM;
++      }
+       hard_iface->bat_iv.ogm_buff = ogm_buff;
+@@ -392,41 +394,59 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
+       batadv_ogm_packet->reserved = 0;
+       batadv_ogm_packet->tq = BATADV_TQ_MAX_VALUE;
++      mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex);
++
+       return 0;
+ }
+ static void batadv_iv_ogm_iface_disable(struct batadv_hard_iface *hard_iface)
+ {
+-      ASSERT_RTNL();
++      mutex_lock(&hard_iface->bat_iv.ogm_buff_mutex);
+       kfree(hard_iface->bat_iv.ogm_buff);
+       hard_iface->bat_iv.ogm_buff = NULL;
++
++      mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex);
+ }
+ static void batadv_iv_ogm_iface_update_mac(struct batadv_hard_iface *hard_iface)
+ {
+       struct batadv_ogm_packet *batadv_ogm_packet;
+-      unsigned char *ogm_buff = hard_iface->bat_iv.ogm_buff;
++      void *ogm_buff;
+-      ASSERT_RTNL();
++      mutex_lock(&hard_iface->bat_iv.ogm_buff_mutex);
+-      batadv_ogm_packet = (struct batadv_ogm_packet *)ogm_buff;
++      ogm_buff = hard_iface->bat_iv.ogm_buff;
++      if (!ogm_buff)
++              goto unlock;
++
++      batadv_ogm_packet = ogm_buff;
+       ether_addr_copy(batadv_ogm_packet->orig,
+                       hard_iface->net_dev->dev_addr);
+       ether_addr_copy(batadv_ogm_packet->prev_sender,
+                       hard_iface->net_dev->dev_addr);
++
++unlock:
++      mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex);
+ }
+ static void
+ batadv_iv_ogm_primary_iface_set(struct batadv_hard_iface *hard_iface)
+ {
+       struct batadv_ogm_packet *batadv_ogm_packet;
+-      unsigned char *ogm_buff = hard_iface->bat_iv.ogm_buff;
++      void *ogm_buff;
+-      ASSERT_RTNL();
++      mutex_lock(&hard_iface->bat_iv.ogm_buff_mutex);
+-      batadv_ogm_packet = (struct batadv_ogm_packet *)ogm_buff;
++      ogm_buff = hard_iface->bat_iv.ogm_buff;
++      if (!ogm_buff)
++              goto unlock;
++
++      batadv_ogm_packet = ogm_buff;
+       batadv_ogm_packet->ttl = BATADV_TTL;
++
++unlock:
++      mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex);
+ }
+ /* when do we schedule our own ogm to be sent */
+@@ -925,7 +945,11 @@ batadv_iv_ogm_slide_own_bcast_window(struct batadv_hard_iface *hard_iface)
+       }
+ }
+-static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
++/**
++ * batadv_iv_ogm_schedule_buff() - schedule submission of hardif ogm buffer
++ * @hard_iface: interface whose ogm buffer should be transmitted
++ */
++static void batadv_iv_ogm_schedule_buff(struct batadv_hard_iface *hard_iface)
+ {
+       struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
+       unsigned char **ogm_buff = &hard_iface->bat_iv.ogm_buff;
+@@ -936,11 +960,7 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
+       u16 tvlv_len = 0;
+       unsigned long send_time;
+-      ASSERT_RTNL();
+-
+-      if ((hard_iface->if_status == BATADV_IF_NOT_IN_USE) ||
+-          (hard_iface->if_status == BATADV_IF_TO_BE_REMOVED))
+-              return;
++      lockdep_assert_held(&hard_iface->bat_iv.ogm_buff_mutex);
+       /* the interface gets activated here to avoid race conditions between
+        * the moment of activating the interface in
+@@ -1008,6 +1028,17 @@ out:
+               batadv_hardif_put(primary_if);
+ }
++static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
++{
++      if (hard_iface->if_status == BATADV_IF_NOT_IN_USE ||
++          hard_iface->if_status == BATADV_IF_TO_BE_REMOVED)
++              return;
++
++      mutex_lock(&hard_iface->bat_iv.ogm_buff_mutex);
++      batadv_iv_ogm_schedule_buff(hard_iface);
++      mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex);
++}
++
+ /**
+  * batadv_iv_ogm_orig_update - use OGM to update corresponding data in an
+  *  originator
+@@ -1792,12 +1823,16 @@ static void batadv_iv_ogm_process(const struct sk_buff *skb, int ogm_offset,
+       batadv_orig_node_put(orig_node);
+ }
+-static void
+-batadv_iv_send_outstanding_forw_packet(struct batadv_forw_packet *forw_packet)
++static void batadv_iv_send_outstanding_bat_ogm_packet(struct work_struct *work)
+ {
++      struct delayed_work *delayed_work;
++      struct batadv_forw_packet *forw_packet;
+       struct batadv_priv *bat_priv;
+       bool dropped = false;
++      delayed_work = to_delayed_work(work);
++      forw_packet = container_of(delayed_work, struct batadv_forw_packet,
++                                 delayed_work);
+       bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
+       if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING) {
+@@ -1826,20 +1861,6 @@ out:
+               batadv_forw_packet_free(forw_packet, dropped);
+ }
+-static void batadv_iv_send_outstanding_bat_ogm_packet(struct work_struct *work)
+-{
+-      struct delayed_work *delayed_work;
+-      struct batadv_forw_packet *forw_packet;
+-
+-      delayed_work = to_delayed_work(work);
+-      forw_packet = container_of(delayed_work, struct batadv_forw_packet,
+-                                 delayed_work);
+-
+-      rtnl_lock();
+-      batadv_iv_send_outstanding_forw_packet(forw_packet);
+-      rtnl_unlock();
+-}
+-
+ static int batadv_iv_ogm_receive(struct sk_buff *skb,
+                                struct batadv_hard_iface *if_incoming)
+ {
+diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
+index 6d96ecd14fb0881e3384850bc34063b999fe5c93..0060d3cf2cfcad24fb26c190e588689e768584d5 100644
+--- a/net/batman-adv/hard-interface.c
++++ b/net/batman-adv/hard-interface.c
+@@ -28,6 +28,7 @@
+ #include <linux/kernel.h>
+ #include <linux/kref.h>
+ #include <linux/list.h>
++#include <linux/mutex.h>
+ #include <linux/netdevice.h>
+ #include <linux/printk.h>
+ #include <linux/rculist.h>
+@@ -905,6 +906,7 @@ batadv_hardif_add_interface(struct net_device *net_dev)
+       INIT_LIST_HEAD(&hard_iface->list);
+       INIT_HLIST_HEAD(&hard_iface->neigh_list);
++      mutex_init(&hard_iface->bat_iv.ogm_buff_mutex);
+       spin_lock_init(&hard_iface->neigh_list_lock);
+       kref_init(&hard_iface->refcount);
+diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
+index 3d9704ce31b4a162c01a74021ef18d53d992d506..6854cb2b107024ad3588ac7eedd71339e529e4f8 100644
+--- a/net/batman-adv/types.h
++++ b/net/batman-adv/types.h
+@@ -79,14 +79,16 @@ enum batadv_dhcp_recipient {
+ /**
+  * struct batadv_hard_iface_bat_iv - per hard-interface B.A.T.M.A.N. IV data
+- * @ogm_buff: buffer holding the OGM packet. rtnl protected
+- * @ogm_buff_len: length of the OGM packet buffer. rtnl protected
++ * @ogm_buff: buffer holding the OGM packet
++ * @ogm_buff_len: length of the OGM packet buffer
+  * @ogm_seqno: OGM sequence number - used to identify each OGM
++ * @ogm_buff_mutex: lock protecting ogm_buff and ogm_buff_len
+  */
+ struct batadv_hard_iface_bat_iv {
+       unsigned char *ogm_buff;
+       int ogm_buff_len;
+       atomic_t ogm_seqno;
++      struct mutex ogm_buff_mutex;
+ };
+ /**