+From d1f13e24ec3ebdadc2bc08c9d4708197279096fa Mon Sep 17 00:00:00 2001
+From: Antonio Quartulli <ordex@autistici.org>
+Date: Wed, 20 Jun 2012 14:12:56 +0200
+Subject: [PATCH] batman-adv: fix race condition in TT full-table replacement
+
+bug introduced with cea194d90b11aff7fc289149e4c7f305fad3535a
+
+In the current TT code, when a TT_Response containing a full table is received
+from an originator, first the node purges all the clients for that originator in
+the global translation-table and then merges the newly received table.
+During the purging phase each client deletion is done by means of a call_rcu()
+invocation and at the end of this phase the global entry counter for that
+originator is set to 0. However the invoked rcu function decreases the global
+entry counter for that originator by one too and since the rcu invocation is
+likely to be postponed, the node will end up in first setting the counter to 0
+and then decreasing it one by one for each deleted client.
+
+This bug leads to having a wrong global entry counter for the related node, say
+X. Then when the node with the broken counter will answer to a TT_REQUEST on
+behalf of node X, it will create faulty TT_RESPONSE that will generate an
+unrecoverable situation on the node that asked for the full table recover.
+
+The non-recoverability is given by the fact that the node with the broken
+counter will keep answering on behalf of X because its knowledge about X's state
+(ttvn + tt_crc) is correct.
+
+To solve this problem the counter is not explicitly set to 0 anymore and the
+counter decrement is performed right before the invocation of call_rcu().
+
+Signed-off-by: Antonio Quartulli <ordex@autistici.org>
+---
+ translation-table.c | 4 ++--
+ 1 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/translation-table.c b/translation-table.c
+index 660c40f..2ab83d7 100644
+--- a/translation-table.c
++++ b/translation-table.c
+@@ -141,13 +141,14 @@ static void tt_orig_list_entry_free_rcu(struct rcu_head *rcu)
+ struct tt_orig_list_entry *orig_entry;
+
+ orig_entry = container_of(rcu, struct tt_orig_list_entry, rcu);
+- atomic_dec(&orig_entry->orig_node->tt_size);
+ orig_node_free_ref(orig_entry->orig_node);
+ kfree(orig_entry);
+ }
+
+ static void tt_orig_list_entry_free_ref(struct tt_orig_list_entry *orig_entry)
+ {
++ /* to avoid race conditions, immediately decrease the tt counter */
++ atomic_dec(&orig_entry->orig_node->tt_size);
+ call_rcu(&orig_entry->rcu, tt_orig_list_entry_free_rcu);
+ }
+
+@@ -910,7 +911,6 @@ void tt_global_del_orig(struct bat_priv *bat_priv,
+ }
+ spin_unlock_bh(list_lock);
+ }
+- atomic_set(&orig_node->tt_size, 0);
+ orig_node->tt_initialised = false;
+ }
+
+--
+1.7.9.1
+