Merge pull request #9660 from liudf0716/master-kcptun
authorRosen Penev <rosenp@gmail.com>
Thu, 8 Aug 2019 16:38:13 +0000 (09:38 -0700)
committerGitHub <noreply@github.com>
Thu, 8 Aug 2019 16:38:13 +0000 (09:38 -0700)
kcptun: add new package

65 files changed:
lang/python/python-pyasn1-modules/Makefile
lang/python/python-pyasn1/Makefile
libs/libredblack/Makefile
libs/libsigar/Makefile
libs/libsigar/patches/020-sysmacros.patch [new file with mode: 0644]
multimedia/graphicsmagick/Makefile
multimedia/imagemagick/Makefile
net/banip/Makefile
net/banip/files/README.md
net/banip/files/banip.conf
net/banip/files/banip.init
net/banip/files/banip.sh
net/haproxy/Makefile
net/haproxy/patches/000-BUG-MEDIUM-protocols-add-a-global-lock-for-the-init-deinit-stuff.patch [new file with mode: 0644]
net/haproxy/patches/000-OPENWRT-add-uclibc-support.patch [deleted file]
net/haproxy/patches/001-BUG-MINOR-proxy-always-lock-stop_proxy.patch [new file with mode: 0644]
net/haproxy/patches/001-OPENWRT-openssl-deprecated.patch [deleted file]
net/haproxy/patches/002-BUILD-threads-add-the-definition-of-PROTO_LOCK.patch [new file with mode: 0644]
net/haproxy/patches/003-BUG-MEDIUM-lb-chash-Fix-the-realloc-when-the-number-of-nodes-is-increased.patch [new file with mode: 0644]
net/haproxy/patches/004-BUG-MEDIUM-streams-Dont-switch-the-SI-to-SI_ST_DIS-if-we-have-data-to-send.patch [new file with mode: 0644]
net/haproxy/patches/005-BUG-MINOR-log-make-sure-writev-is-not-interrupted-on-a-file-output.patch [new file with mode: 0644]
net/haproxy/patches/006-DOC-improve-the-wording-in-CONTRIBUTING-about-how-to-document-a-bug-fix.patch [new file with mode: 0644]
net/haproxy/patches/007-BUG-MINOR-hlua-htx-Reset-channels-analyzers-when-txn-done-is-called.patch [new file with mode: 0644]
net/haproxy/patches/008-BUG-MEDIUM-hlua-Check-the-calling-direction-in-lua-functions-of-the-HTTP-class.patch [new file with mode: 0644]
net/haproxy/patches/009-MINOR-hlua-Dont-set-request-analyzers-on-response-channel-for-lua-actions.patch [new file with mode: 0644]
net/haproxy/patches/010-MINOR-hlua-Add-a-flag-on-the-lua-txn-to-know-in-which-context-it-can-be-used.patch [new file with mode: 0644]
net/haproxy/patches/011-BUG-MINOR-hlua-Only-execute-functions-of-HTTP-class-if-the-txn-is-HTTP-ready.patch [new file with mode: 0644]
net/haproxy/patches/012-BUG-MINOR-htx-Fix-free-space-addresses-calculation-during-a-block-expansion.patch [new file with mode: 0644]
net/haproxy/patches/013-BUG-MAJOR-queue-threads-avoid-an-AB-BA-locking-issue-in-process_srv_queue.patch [new file with mode: 0644]
net/haproxy/patches/014-BUG-MINOR-debug-fix-a-small-race-in-the-thread-dumping-code.patch [new file with mode: 0644]
net/haproxy/patches/015-MINOR-wdt-also-consider-that-waiting-in-the-thread-dumper-is-normal.patch [new file with mode: 0644]
net/haproxy/patches/016-BUG-MEDIUM-lb-chash-Ensure-the-tree-integrity-when-server-weight-is-increased.patch [new file with mode: 0644]
net/haproxy/patches/017-BUG-MAJOR-http-sample-use-a-static-buffer-for-raw---htx-conversion.patch [new file with mode: 0644]
net/haproxy/patches/018-BUG-MINOR-stream-int-also-update-analysers-timeouts-on-activity.patch [new file with mode: 0644]
net/haproxy/patches/019-BUG-MEDIUM-mux-h2-unbreak-receipt-of-large-DATA-frames.patch [new file with mode: 0644]
net/haproxy/patches/020-BUG-MEDIUM-mux-h2-split-the-streams-and-connections-window-sizes.patch [new file with mode: 0644]
net/haproxy/patches/021-OPENWRT-add-uclibc-support.patch [new file with mode: 0644]
net/haproxy/patches/022-OPENWRT-openssl-deprecated.patch [new file with mode: 0644]
net/keepalived/Makefile
net/keepalived/files/keepalived.init
net/nginx/Makefile
net/nginx/files-luci-support/luci_nginx.conf
net/nginx/files-luci-support/luci_nginx_ssl.conf
net/nsd/Makefile
net/nsd/patches/010-musl.patch [new file with mode: 0644]
net/nsd/patches/020-openssl.patch [new file with mode: 0644]
net/phantap/Makefile
net/stubby/files/README.md
net/subversion/Makefile
net/travelmate/Makefile
net/travelmate/files/README.md
net/travelmate/files/travelmate.sh
net/travelmate/files/wifionice.login [new file with mode: 0755]
net/zerotier/Makefile
net/zerotier/patches/0005-link-natpmp.patch [new file with mode: 0644]
utils/btrfs-progs/Makefile
utils/btrfs-progs/files/btrfs-scan.init
utils/gddrescue/Makefile
utils/gddrescue/patches/010-fix-uclibcxx.patch [new file with mode: 0644]
utils/mt-st/Makefile
utils/mt-st/patches/010-sysmacros.patch [new file with mode: 0644]
utils/pservice/Makefile [new file with mode: 0644]
utils/pservice/README.md [new file with mode: 0644]
utils/pservice/files/pservice.config [new file with mode: 0644]
utils/pservice/files/pservice.init [new file with mode: 0755]

index 8456a55aa27caf70ad0eb473887fa2e693ccdf95..10b1160ef2ed6b3f87ebfd24021d3a85a3d37b6c 100644 (file)
@@ -8,12 +8,12 @@
 include $(TOPDIR)/rules.mk
 
 PKG_NAME:=python-pyasn1-modules
-PKG_VERSION:=0.2.5
+PKG_VERSION:=0.2.6
 PKG_RELEASE:=1
 
 PKG_SOURCE:=pyasn1-modules-$(PKG_VERSION).tar.gz
 PKG_SOURCE_URL:=https://files.pythonhosted.org/packages/source/p/pyasn1-modules
-PKG_HASH:=ef721f68f7951fab9b0404d42590f479e30d9005daccb1699b0a51bb4177db96
+PKG_HASH:=43c17a83c155229839cc5c6b868e8d0c6041dba149789b6d6e28801c64821722
 
 PKG_LICENSE:=BSD-2-Clause
 PKG_LICENSE_FILES:=LICENSE.txt
index 4a7c5451c176709ae0a1a866580e9e700bc8bfdc..9c4235d2daef3dc0004b544dfce9a26ff9adc2ea 100644 (file)
@@ -8,12 +8,12 @@
 include $(TOPDIR)/rules.mk
 
 PKG_NAME:=python-pyasn1
-PKG_VERSION:=0.4.5
+PKG_VERSION:=0.4.6
 PKG_RELEASE:=1
 
 PKG_SOURCE:=pyasn1-$(PKG_VERSION).tar.gz
 PKG_SOURCE_URL:=https://files.pythonhosted.org/packages/source/p/pyasn1
-PKG_HASH:=da2420fe13a9452d8ae97a0e478adde1dee153b11ba832a95b223a2ba01c10f7
+PKG_HASH:=b773d5c9196ffbc3a1e13bdf909d446cad80a039aa3340bcad72f395b76ebc86
 
 PKG_LICENSE:=BSD-2-Clause
 PKG_LICENSE_FILES:=LICENSE.txt
index 75a616a0ab8e680bc99864188df3fbe73a5d88eb..dcc527eb1c637b35b8bde2a1152df8766c19302e 100644 (file)
@@ -8,23 +8,17 @@
 include $(TOPDIR)/rules.mk
 
 PKG_NAME:=libredblack
-PKG_VERSION:=0.2.3
-PKG_RELEASE=$(PKG_SOURCE_VERSION)
+PKG_VERSION:=1.3
+PKG_RELEASE:=1
 
-PKG_LICENSE:=GPL-2.0+
-PKG_MAINTAINER:=Mislav Novakovic <mislav.novakovic@sartura.hr>
-
-PKG_SOURCE_PROTO:=git
-PKG_SOURCE_VERSION:=a399310d99b61eec4d3c0677573ab5dddcf9395d
-PKG_MIRROR_HASH:=71b05e70988b97865f734c698dd5564e349680556ccb8634a5bddf344012f22a
-PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION)-$(PKG_SOURCE_VERSION).tar.xz
-PKG_SOURCE_URL:=https://github.com/sysrepo/libredblack.git
-PKG_SOURCE_SUBDIR:=$(PKG_NAME)-$(PKG_VERSION)-$(PKG_SOURCE_VERSION)
+PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
+PKG_SOURCE_URL:=@SF/libredblack
+PKG_HASH:=a0ecc59b0aae2df01558a6950532c711a782a099277b439a51d270003092f44f
 
-PKG_BUILD_ROOT:=$(BUILD_DIR)/$(PKG_SOURCE_SUBDIR)
-PKG_BUILD_DIR:=$(PKG_BUILD_ROOT)
+PKG_MAINTAINER:=Mislav Novakovic <mislav.novakovic@sartura.hr>
+PKG_LICENSE:=GPL-2.0-or-later
+PKG_LICENSE_FILES:=COPYING
 
-PKG_FIXUP:=autoreconf
 PKG_BUILD_PARALLEL:=1
 PKG_INSTALL:=1
 
@@ -35,13 +29,15 @@ define Package/libredblack
   SECTION:=libs
   CATEGORY:=Libraries
   TITLE:=RedBlack tree library
-  URL:=$(PKG_SOURCE_URL)
+  URL:=http://libredblack.sourceforge.net/
 endef
 
 define Package/libredblack/description
  RedBlack Balanced Tree Searching and Sorting Library.
 endef
 
+CONFIGURE_ARGS += --without-rbgen
+
 define Build/InstallDev
        $(INSTALL_DIR) $(1)/usr/lib
        $(CP) $(PKG_INSTALL_DIR)/usr/lib/libredblack.{so*,a,la} $(1)/usr/lib/
index 3c3d0be285f8c56007a54785c0100548caa38874..39f22138e8cedece76a6c732dc69ff4eb0fd6790 100644 (file)
@@ -3,7 +3,7 @@ include $(TOPDIR)/rules.mk
 PKG_NAME:=libsigar
 PKG_SOURCE_DATE:=2017-02-21
 PKG_SOURCE_VERSION:=a6c61edf8c64e013411e8c9d753165cd03102c6e
-PKG_RELEASE:=2
+PKG_RELEASE:=3
 
 PKG_SOURCE_PROTO:=git
 PKG_SOURCE_URL:=https://github.com/boundary/sigar
diff --git a/libs/libsigar/patches/020-sysmacros.patch b/libs/libsigar/patches/020-sysmacros.patch
new file mode 100644 (file)
index 0000000..334a908
--- /dev/null
@@ -0,0 +1,10 @@
+--- a/src/os/linux/linux_sigar.c
++++ b/src/os/linux/linux_sigar.c
+@@ -23,6 +23,7 @@
+ #include <linux/param.h>
+ #include <sys/param.h>
+ #include <sys/stat.h>
++#include <sys/sysmacros.h>
+ #include <sys/times.h>
+ #include <sys/utsname.h>
+ #include <mntent.h>
index 85ce602218d63064aadb280d1f454aff63706193..69b3e16c518301a847e21adb5fca90f94d13e4d6 100644 (file)
@@ -5,13 +5,13 @@
 include $(TOPDIR)/rules.mk
 
 PKG_NAME:=graphicsmagick
-PKG_VERSION:=1.3.32
+PKG_VERSION:=1.3.33
 PKG_RELEASE:=1
 
 PKG_BUILD_DIR:=$(BUILD_DIR)/GraphicsMagick-$(PKG_VERSION)
 PKG_SOURCE:=GraphicsMagick-$(PKG_VERSION).tar.bz2
 PKG_SOURCE_URL:=@SF/graphicsmagick
-PKG_HASH:=d1f70bc6d41de922199ce6b0a04af7b3492b2fc4a2be6ee24e0af4e15250db0a
+PKG_HASH:=d18aaca2d79a10270d49ad1aaa01dce24752f7548880138d59874a78ac62e11f
 
 PKG_LICENSE:=MIT
 PKG_LICENSE_FILES:=Copyright.txt
index fd2fc24bc579b301b9836eaf722a8954fd2c39d2..d61c9160ba5a6ad980f22d9035bcf4a8542b6a5a 100644 (file)
@@ -7,13 +7,13 @@ include $(TOPDIR)/rules.mk
 
 PKG_NAME:=imagemagick
 PKG_VERSION:=7.0.8
-PKG_REVISION:=49
+PKG_REVISION:=59
 PKG_RELEASE:=1
 PKG_MAINTAINER:=Val Kulkov <val.kulkov@gmail.com>
 
 PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION)-$(PKG_REVISION).tar.gz
 PKG_SOURCE_URL:=http://github.com/ImageMagick/ImageMagick/archive/$(PKG_VERSION)-$(PKG_REVISION)
-PKG_HASH:=53f7963bbe81520e799e9e178a13757890ed43bc9faf2e86fae1cf58aea28575
+PKG_HASH:=238ee17196fcb80bb58485910aaefc12d48f99e4043c2a28f06ff9588161c4e3
 PKG_BUILD_DIR:=$(BUILD_DIR)/ImageMagick-$(PKG_VERSION)-$(PKG_REVISION)
 
 PKG_LICENSE:=Apache-2.0
index fbc23ef29e0274189cd77426c53c4cf9a3dea5f4..1ad420738f066810d966b5679055c0da0784e092 100644 (file)
@@ -6,7 +6,7 @@
 include $(TOPDIR)/rules.mk
 
 PKG_NAME:=banip
-PKG_VERSION:=0.1.4
+PKG_VERSION:=0.1.5
 PKG_RELEASE:=1
 PKG_LICENSE:=GPL-3.0+
 PKG_MAINTAINER:=Dirk Brenken <dev@brenken.org>
index 1df1f7cdce65b41a4a23d6c8e0843702945cf16a..dfa6c7d7e7c2c16316ada9da48d4340263d71173 100644 (file)
@@ -14,8 +14,8 @@ IP address blocking is commonly used to protect against brute force attacks, pre
 * supports blocking by ASN numbers
 * supports blocking by iso country codes
 * supports local white & blacklist (IPv4, IPv6 & CIDR notation), located by default in /etc/banip/banip.whitelist and /etc/banip/banip.blacklist
-* auto-add unsuccessful ssh login attempts to local blacklist
-* auto-add the uplink subnet to local whitelist
+* auto-add unsuccessful ssh login attempts to local blacklist (see 'ban_autoblacklist' option)
+* auto-add the uplink subnet to local whitelist (see 'ban_autowhitelist' option)
 * per source configuration of SRC (incoming) and DST (outgoing)
 * integrated IPSet-Lookup
 * integrated RIPE-Lookup
@@ -54,7 +54,7 @@ IP address blocking is commonly used to protect against brute force attacks, pre
     * ban\_iface => space separated list of WAN network interface(s)/device(s) used by banIP (default: automatically set by banIP ('ban_automatic'))
 
 * the following options apply to the 'extra' config section:
-    * ban\_debug => enable/disable banIP debug output (default: '0', disabled)
+    * ban\_debug => enable/disable banIP debug output (bool/default: '0', disabled)
     * ban\_nice => set the nice level of the banIP process and all sub-processes (int/default: '0', standard priority)
     * ban\_triggerdelay => additional trigger delay in seconds before banIP processing begins (int/default: '2')
     * ban\_backup => create compressed blocklist backups, they will be used in case of download errors or during startup in 'backup mode' (bool/default: '0', disabled)
@@ -62,6 +62,8 @@ IP address blocking is commonly used to protect against brute force attacks, pre
     * ban\_backupboot => do not automatically update blocklists during startup, use their backups instead (bool/default: '0', disabled)
     * ban\_maxqueue => size of the download queue to handle downloads & IPSet processing in parallel (int/default: '8')
     * ban\_fetchparm => special config options for the download utility (default: not set)
+    * ban\_autoblacklist => store auto-addons temporary in ipset and permanently in local blacklist as well (bool/default: '1', enabled)
+    * ban\_autowhitelist => store auto-addons temporary in ipset and permanently in local whitelist as well (bool/default: '1', enabled)
 
 ## Examples
 **receive banIP runtime information:**
index 4843fc2b082fadd8279884fee43d816d91c900a6..08053a2fe6887f0092cabc49ebf144bf4d1b77a1 100644 (file)
@@ -1,5 +1,3 @@
-# banIP configuration, for further information
-# see 'https://github.com/openwrt/packages/blob/master/net/banip/files/README.md'
 
 config banip 'global'
        option ban_enabled '0'
@@ -92,14 +90,6 @@ config source 'yoyo'
        option ban_src_ruletype 'src'
        option ban_src_on '0'
 
-config source 'zeus'
-       option ban_src 'https://zeustracker.abuse.ch/blocklist.php?download=ipblocklist'
-       option ban_src_desc 'Zeus Tracker by abuse.ch (IPv4)'
-       option ban_src_rset '/^(([0-9]{1,3}\.){3}[0-9]{1,3})([[:space:]]|$)/{print \"add zeus \"\$1}'
-       option ban_src_settype 'ip'
-       option ban_src_ruletype 'src'
-       option ban_src_on '0'
-
 config source 'sslbl'
        option ban_src 'https://sslbl.abuse.ch/blacklist/sslipblacklist.csv'
        option ban_src_desc 'SSL Blacklist by abuse.ch (IPv4)'
index 9356c4df02abdc384fb1f1b612873346aad4ade6..235bbfd9c0a0b58974b9aa24972c48fb619b83c7 100755 (executable)
@@ -50,7 +50,6 @@ reload_service()
 stop_service()
 {
        rc_procd "${ban_script}" stop
-       rc_procd start_service
 }
 
 status()
index 8d1ae1e2a8385c2013f76d4c66745a084681ea1b..c546e872445e7324fb68f26ff9ef3874672e9f29 100755 (executable)
@@ -10,7 +10,7 @@
 #
 LC_ALL=C
 PATH="/usr/sbin:/usr/bin:/sbin:/bin"
-ban_ver="0.1.4"
+ban_ver="0.1.5"
 ban_sysver="unknown"
 ban_enabled=0
 ban_automatic="1"
@@ -21,6 +21,8 @@ ban_backup=0
 ban_backupboot=0
 ban_backupdir="/mnt"
 ban_maxqueue=4
+ban_autoblacklist=1
+ban_autowhitelist=1
 ban_fetchutil="uclient-fetch"
 ban_ip="$(command -v ip)"
 ban_ipt="$(command -v iptables)"
@@ -623,7 +625,11 @@ f_main()
                                                if [ -z "$(grep -F "${ip}" "${src_url}")" ]
                                                then
                                                        printf '%s\n' "${ip}" >> "${tmp_load}"
-                                                       printf '%s\n' "${ip}" >> "${src_url}"
+                                                       if { [ "${src_name//_*/}" = "blacklist" ] && [ "${ban_autoblacklist}" -eq 1 ]; } || \
+                                                               { [ "${src_name//_*/}" = "whitelist" ] && [ "${ban_autowhitelist}" -eq 1 ]; }
+                                                       then
+                                                               printf '%s\n' "${ip}" >> "${src_url}"
+                                                       fi
                                                fi
                                        done
                                elif [ -n "${src_cat}" ]
index 0c6cb0739e855a8effa0e91c9854e3c717487597..d31349c6db3114e08afaec4bf4790dc721df9b25 100644 (file)
@@ -11,7 +11,7 @@ include $(TOPDIR)/rules.mk
 
 PKG_NAME:=haproxy
 PKG_VERSION:=2.0.3
-PKG_RELEASE:=1
+PKG_RELEASE:=2
 
 PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
 PKG_SOURCE_URL:=https://www.haproxy.org/download/2.0/src
diff --git a/net/haproxy/patches/000-BUG-MEDIUM-protocols-add-a-global-lock-for-the-init-deinit-stuff.patch b/net/haproxy/patches/000-BUG-MEDIUM-protocols-add-a-global-lock-for-the-init-deinit-stuff.patch
new file mode 100644 (file)
index 0000000..de10b7d
--- /dev/null
@@ -0,0 +1,302 @@
+commit 937604b4cfccddd607b8d4883815c4e3f9ab70d0
+Author: Willy Tarreau <w@1wt.eu>
+Date:   Wed Jul 24 16:45:02 2019 +0200
+
+    BUG/MEDIUM: protocols: add a global lock for the init/deinit stuff
+    
+    Dragan Dosen found that the listeners lock is not sufficient to protect
+    the listeners list when proxies are stopping because the listeners are
+    also unlinked from the protocol list, and under certain situations like
+    bombing with soft-stop signals or shutting down many frontends in parallel
+    from multiple CLI connections, it could be possible to provoke multiple
+    instances of delete_listener() to be called in parallel for different
+    listeners, thus corrupting the protocol lists.
+    
+    Such operations are pretty rare, they are performed once per proxy upon
+    startup and once per proxy on shut down. Thus there is no point trying
+    to optimize anything and we can use a global lock to protect the protocol
+    lists during these manipulations.
+    
+    This fix (or a variant) will have to be backported as far as 1.8.
+    
+    (cherry picked from commit daacf3664506d56a1f3b050ccba504886a18b12a)
+    Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
+
+diff --git a/include/proto/protocol.h b/include/proto/protocol.h
+index 7bbebb8e..f25f77f0 100644
+--- a/include/proto/protocol.h
++++ b/include/proto/protocol.h
+@@ -23,9 +23,11 @@
+ #define _PROTO_PROTOCOL_H
+ #include <sys/socket.h>
++#include <common/hathreads.h>
+ #include <types/protocol.h>
+ extern struct protocol *__protocol_by_family[AF_CUST_MAX];
++__decl_hathreads(extern HA_SPINLOCK_T proto_lock);
+ /* Registers the protocol <proto> */
+ void protocol_register(struct protocol *proto);
+diff --git a/include/types/protocol.h b/include/types/protocol.h
+index 1d3404b9..f38baeb9 100644
+--- a/include/types/protocol.h
++++ b/include/types/protocol.h
+@@ -80,9 +80,9 @@ struct protocol {
+       int (*pause)(struct listener *l);               /* temporarily pause this listener for a soft restart */
+       void (*add)(struct listener *l, int port);      /* add a listener for this protocol and port */
+-      struct list listeners;                          /* list of listeners using this protocol */
+-      int nb_listeners;                               /* number of listeners */
+-      struct list list;                               /* list of registered protocols */
++      struct list listeners;                          /* list of listeners using this protocol (under proto_lock) */
++      int nb_listeners;                               /* number of listeners (under proto_lock) */
++      struct list list;                               /* list of registered protocols (under proto_lock) */
+ };
+ #define CONNECT_HAS_DATA                        0x00000001 /* There's data available to be sent */
+diff --git a/src/listener.c b/src/listener.c
+index 40a774ed..b5fe2ac2 100644
+--- a/src/listener.c
++++ b/src/listener.c
+@@ -433,6 +433,9 @@ static void limit_listener(struct listener *l, struct list *list)
+  * used as a protocol's generic enable_all() primitive, for use after the
+  * fork(). It puts the listeners into LI_READY or LI_FULL states depending on
+  * their number of connections. It always returns ERR_NONE.
++ *
++ * Must be called with proto_lock held.
++ *
+  */
+ int enable_all_listeners(struct protocol *proto)
+ {
+@@ -447,6 +450,9 @@ int enable_all_listeners(struct protocol *proto)
+  * the polling lists when they are in the LI_READY or LI_FULL states. It is
+  * intended to be used as a protocol's generic disable_all() primitive. It puts
+  * the listeners into LI_LISTEN, and always returns ERR_NONE.
++ *
++ * Must be called with proto_lock held.
++ *
+  */
+ int disable_all_listeners(struct protocol *proto)
+ {
+@@ -516,6 +522,9 @@ void unbind_listener_no_close(struct listener *listener)
+ /* This function closes all listening sockets bound to the protocol <proto>,
+  * and the listeners end in LI_ASSIGNED state if they were higher. It does not
+  * detach them from the protocol. It always returns ERR_NONE.
++ *
++ * Must be called with proto_lock held.
++ *
+  */
+ int unbind_all_listeners(struct protocol *proto)
+ {
+@@ -580,14 +589,19 @@ int create_listeners(struct bind_conf *bc, const struct sockaddr_storage *ss,
+  * number of listeners is updated, as well as the global number of listeners
+  * and jobs. Note that the listener must have previously been unbound. This
+  * is the generic function to use to remove a listener.
++ *
++ * Will grab the proto_lock.
++ *
+  */
+ void delete_listener(struct listener *listener)
+ {
+       HA_SPIN_LOCK(LISTENER_LOCK, &listener->lock);
+       if (listener->state == LI_ASSIGNED) {
+               listener->state = LI_INIT;
++              HA_SPIN_LOCK(PROTO_LOCK, &proto_lock);
+               LIST_DEL(&listener->proto_list);
+               listener->proto->nb_listeners--;
++              HA_SPIN_UNLOCK(PROTO_LOCK, &proto_lock);
+               _HA_ATOMIC_SUB(&jobs, 1);
+               _HA_ATOMIC_SUB(&listeners, 1);
+       }
+diff --git a/src/proto_sockpair.c b/src/proto_sockpair.c
+index a4faa370..e7dd670d 100644
+--- a/src/proto_sockpair.c
++++ b/src/proto_sockpair.c
+@@ -80,6 +80,9 @@ INITCALL1(STG_REGISTER, protocol_register, &proto_sockpair);
+ /* Add <listener> to the list of sockpair listeners (port is ignored). The
+  * listener's state is automatically updated from LI_INIT to LI_ASSIGNED.
+  * The number of listeners for the protocol is updated.
++ *
++ * Must be called with proto_lock held.
++ *
+  */
+ static void sockpair_add_listener(struct listener *listener, int port)
+ {
+@@ -97,6 +100,8 @@ static void sockpair_add_listener(struct listener *listener, int port)
+  * loose them across the fork(). A call to uxst_enable_listeners() is needed
+  * to complete initialization.
+  *
++ * Must be called with proto_lock held.
++ *
+  * The return value is composed from ERR_NONE, ERR_RETRYABLE and ERR_FATAL.
+  */
+ static int sockpair_bind_listeners(struct protocol *proto, char *errmsg, int errlen)
+diff --git a/src/proto_tcp.c b/src/proto_tcp.c
+index 64ffb83c..bcbe27a7 100644
+--- a/src/proto_tcp.c
++++ b/src/proto_tcp.c
+@@ -1103,6 +1103,9 @@ int tcp_bind_listener(struct listener *listener, char *errmsg, int errlen)
+  * The sockets will be registered but not added to any fd_set, in order not to
+  * loose them across the fork(). A call to enable_all_listeners() is needed
+  * to complete initialization. The return value is composed from ERR_*.
++ *
++ * Must be called with proto_lock held.
++ *
+  */
+ static int tcp_bind_listeners(struct protocol *proto, char *errmsg, int errlen)
+ {
+@@ -1121,6 +1124,9 @@ static int tcp_bind_listeners(struct protocol *proto, char *errmsg, int errlen)
+ /* Add <listener> to the list of tcpv4 listeners, on port <port>. The
+  * listener's state is automatically updated from LI_INIT to LI_ASSIGNED.
+  * The number of listeners for the protocol is updated.
++ *
++ * Must be called with proto_lock held.
++ *
+  */
+ static void tcpv4_add_listener(struct listener *listener, int port)
+ {
+@@ -1136,6 +1142,9 @@ static void tcpv4_add_listener(struct listener *listener, int port)
+ /* Add <listener> to the list of tcpv6 listeners, on port <port>. The
+  * listener's state is automatically updated from LI_INIT to LI_ASSIGNED.
+  * The number of listeners for the protocol is updated.
++ *
++ * Must be called with proto_lock held.
++ *
+  */
+ static void tcpv6_add_listener(struct listener *listener, int port)
+ {
+diff --git a/src/proto_uxst.c b/src/proto_uxst.c
+index 66093af6..7263240f 100644
+--- a/src/proto_uxst.c
++++ b/src/proto_uxst.c
+@@ -379,6 +379,9 @@ static int uxst_unbind_listener(struct listener *listener)
+ /* Add <listener> to the list of unix stream listeners (port is ignored). The
+  * listener's state is automatically updated from LI_INIT to LI_ASSIGNED.
+  * The number of listeners for the protocol is updated.
++ *
++ * Must be called with proto_lock held.
++ *
+  */
+ static void uxst_add_listener(struct listener *listener, int port)
+ {
+@@ -594,6 +597,8 @@ static int uxst_connect_server(struct connection *conn, int flags)
+  * loose them across the fork(). A call to uxst_enable_listeners() is needed
+  * to complete initialization.
+  *
++ * Must be called with proto_lock held.
++ *
+  * The return value is composed from ERR_NONE, ERR_RETRYABLE and ERR_FATAL.
+  */
+ static int uxst_bind_listeners(struct protocol *proto, char *errmsg, int errlen)
+@@ -613,6 +618,9 @@ static int uxst_bind_listeners(struct protocol *proto, char *errmsg, int errlen)
+ /* This function stops all listening UNIX sockets bound to the protocol
+  * <proto>. It does not detaches them from the protocol.
+  * It always returns ERR_NONE.
++ *
++ * Must be called with proto_lock held.
++ *
+  */
+ static int uxst_unbind_listeners(struct protocol *proto)
+ {
+diff --git a/src/protocol.c b/src/protocol.c
+index 96e01c82..ac45cf2e 100644
+--- a/src/protocol.c
++++ b/src/protocol.c
+@@ -18,18 +18,26 @@
+ #include <common/mini-clist.h>
+ #include <common/standard.h>
+-#include <types/protocol.h>
++#include <proto/protocol.h>
+ /* List head of all registered protocols */
+ static struct list protocols = LIST_HEAD_INIT(protocols);
+ struct protocol *__protocol_by_family[AF_CUST_MAX] = { };
++/* This is the global spinlock we may need to register/unregister listeners or
++ * protocols. Its main purpose is in fact to serialize the rare stop/deinit()
++ * phases.
++ */
++__decl_spinlock(proto_lock);
++
+ /* Registers the protocol <proto> */
+ void protocol_register(struct protocol *proto)
+ {
++      HA_SPIN_LOCK(PROTO_LOCK, &proto_lock);
+       LIST_ADDQ(&protocols, &proto->list);
+       if (proto->sock_domain >= 0 && proto->sock_domain < AF_CUST_MAX)
+               __protocol_by_family[proto->sock_domain] = proto;
++      HA_SPIN_UNLOCK(PROTO_LOCK, &proto_lock);
+ }
+ /* Unregisters the protocol <proto>. Note that all listeners must have
+@@ -37,8 +45,10 @@ void protocol_register(struct protocol *proto)
+  */
+ void protocol_unregister(struct protocol *proto)
+ {
++      HA_SPIN_LOCK(PROTO_LOCK, &proto_lock);
+       LIST_DEL(&proto->list);
+       LIST_INIT(&proto->list);
++      HA_SPIN_UNLOCK(PROTO_LOCK, &proto_lock);
+ }
+ /* binds all listeners of all registered protocols. Returns a composition
+@@ -50,6 +60,7 @@ int protocol_bind_all(char *errmsg, int errlen)
+       int err;
+       err = 0;
++      HA_SPIN_LOCK(PROTO_LOCK, &proto_lock);
+       list_for_each_entry(proto, &protocols, list) {
+               if (proto->bind_all) {
+                       err |= proto->bind_all(proto, errmsg, errlen);
+@@ -57,6 +68,7 @@ int protocol_bind_all(char *errmsg, int errlen)
+                               break;
+               }
+       }
++      HA_SPIN_UNLOCK(PROTO_LOCK, &proto_lock);
+       return err;
+ }
+@@ -71,11 +83,13 @@ int protocol_unbind_all(void)
+       int err;
+       err = 0;
++      HA_SPIN_LOCK(PROTO_LOCK, &proto_lock);
+       list_for_each_entry(proto, &protocols, list) {
+               if (proto->unbind_all) {
+                       err |= proto->unbind_all(proto);
+               }
+       }
++      HA_SPIN_UNLOCK(PROTO_LOCK, &proto_lock);
+       return err;
+ }
+@@ -89,11 +103,13 @@ int protocol_enable_all(void)
+       int err;
+       err = 0;
++      HA_SPIN_LOCK(PROTO_LOCK, &proto_lock);
+       list_for_each_entry(proto, &protocols, list) {
+               if (proto->enable_all) {
+                       err |= proto->enable_all(proto);
+               }
+       }
++      HA_SPIN_UNLOCK(PROTO_LOCK, &proto_lock);
+       return err;
+ }
+@@ -107,11 +123,13 @@ int protocol_disable_all(void)
+       int err;
+       err = 0;
++      HA_SPIN_LOCK(PROTO_LOCK, &proto_lock);
+       list_for_each_entry(proto, &protocols, list) {
+               if (proto->disable_all) {
+                       err |= proto->disable_all(proto);
+               }
+       }
++      HA_SPIN_UNLOCK(PROTO_LOCK, &proto_lock);
+       return err;
+ }
diff --git a/net/haproxy/patches/000-OPENWRT-add-uclibc-support.patch b/net/haproxy/patches/000-OPENWRT-add-uclibc-support.patch
deleted file mode 100644 (file)
index 2757025..0000000
+++ /dev/null
@@ -1,18 +0,0 @@
---- a/Makefile
-+++ b/Makefile
-@@ -327,6 +327,15 @@ ifeq ($(TARGET),linux-glibc)
-     USE_GETADDRINFO)
- endif
-+# For linux >= 2.6.28 and uclibc
-+ifeq ($(TARGET),linux-uclibc)
-+  set_target_defaults = $(call default_opts, \
-+    USE_POLL USE_TPROXY USE_DL USE_RT USE_NETFILTER                           \
-+    USE_CPU_AFFINITY USE_THREAD USE_EPOLL USE_FUTEX USE_LINUX_TPROXY          \
-+    USE_ACCEPT4 USE_LINUX_SPLICE USE_PRCTL USE_THREAD_DUMP USE_NS USE_TFO     \
-+    USE_GETADDRINFO)
-+endif
-+
- # Solaris 8 and above
- ifeq ($(TARGET),solaris)
-   # We also enable getaddrinfo() which works since solaris 8.
diff --git a/net/haproxy/patches/001-BUG-MINOR-proxy-always-lock-stop_proxy.patch b/net/haproxy/patches/001-BUG-MINOR-proxy-always-lock-stop_proxy.patch
new file mode 100644 (file)
index 0000000..4f7a370
--- /dev/null
@@ -0,0 +1,64 @@
+commit 6d79cedaaa4a16b2f42d2bf2bc25772a51354e91
+Author: Willy Tarreau <w@1wt.eu>
+Date:   Wed Jul 24 17:42:44 2019 +0200
+
+    BUG/MINOR: proxy: always lock stop_proxy()
+    
+    There is one unprotected call to stop_proxy() from the manage_proxy()
+    task, so there is a single caller by definition, but there is also
+    another such call from the CLI's "shutdown frontend" parser. This
+    one does it under the proxy's lock but the first one doesn't use it.
+    Thus it is theorically possible to corrupt the list of listeners in a
+    proxy by issuing "shutdown frontend" and SIGUSR1 exactly at the same
+    time. While it sounds particularly contrived or stupid, it could
+    possibly happen with automated tools that would send actions via
+    various channels. This could cause the process to loop forever or
+    to crash and thus stop faster than expected.
+    
+    This might be backported as far as 1.8.
+    
+    (cherry picked from commit 3de3cd4d9761324b31d23eb2c4a9434ed33801b8)
+    Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
+
+diff --git a/src/proxy.c b/src/proxy.c
+index f669ebf1..ae761ead 100644
+--- a/src/proxy.c
++++ b/src/proxy.c
+@@ -1258,13 +1258,16 @@ void zombify_proxy(struct proxy *p)
+  * to be called when going down in order to release the ports so that another
+  * process may bind to them. It must also be called on disabled proxies at the
+  * end of start-up. If all listeners are closed, the proxy is set to the
+- * PR_STSTOPPED state.
++ * PR_STSTOPPED state. The function takes the proxy's lock so it's safe to
++ * call from multiple places.
+  */
+ void stop_proxy(struct proxy *p)
+ {
+       struct listener *l;
+       int nostop = 0;
++      HA_SPIN_LOCK(PROXY_LOCK, &p->lock);
++
+       list_for_each_entry(l, &p->conf.listeners, by_fe) {
+               if (l->options & LI_O_NOSTOP) {
+                       HA_ATOMIC_ADD(&unstoppable_jobs, 1);
+@@ -1278,6 +1281,8 @@ void stop_proxy(struct proxy *p)
+       }
+       if (!nostop)
+               p->state = PR_STSTOPPED;
++
++      HA_SPIN_UNLOCK(PROXY_LOCK, &p->lock);
+ }
+ /* This function resumes listening on the specified proxy. It scans all of its
+@@ -2110,10 +2115,7 @@ static int cli_parse_shutdown_frontend(char **args, char *payload, struct appctx
+       send_log(px, LOG_WARNING, "Proxy %s stopped (FE: %lld conns, BE: %lld conns).\n",
+                px->id, px->fe_counters.cum_conn, px->be_counters.cum_conn);
+-      HA_SPIN_LOCK(PROXY_LOCK, &px->lock);
+       stop_proxy(px);
+-      HA_SPIN_UNLOCK(PROXY_LOCK, &px->lock);
+-
+       return 1;
+ }
diff --git a/net/haproxy/patches/001-OPENWRT-openssl-deprecated.patch b/net/haproxy/patches/001-OPENWRT-openssl-deprecated.patch
deleted file mode 100644 (file)
index 541077e..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
---- a/include/common/openssl-compat.h
-+++ b/include/common/openssl-compat.h
-@@ -217,7 +217,8 @@ static inline int EVP_PKEY_base_id(EVP_PKEY *pkey)
- #define TLSEXT_signature_ecdsa      3
- #endif
--#if (OPENSSL_VERSION_NUMBER < 0x10100000L) || (LIBRESSL_VERSION_NUMBER < 0x20700000L)
-+#if (OPENSSL_VERSION_NUMBER < 0x10100000L) || \
-+      (defined(LIBRESSL_VERSION_NUMBER) && (LIBRESSL_VERSION_NUMBER < 0x20700000L))
- #define X509_getm_notBefore     X509_get_notBefore
- #define X509_getm_notAfter      X509_get_notAfter
- #endif
diff --git a/net/haproxy/patches/002-BUILD-threads-add-the-definition-of-PROTO_LOCK.patch b/net/haproxy/patches/002-BUILD-threads-add-the-definition-of-PROTO_LOCK.patch
new file mode 100644 (file)
index 0000000..2c73ec8
--- /dev/null
@@ -0,0 +1,33 @@
+commit a4ca26661f95a60974fb13a78b1a0c89f9c09ea9
+Author: Willy Tarreau <w@1wt.eu>
+Date:   Thu Jul 25 07:53:56 2019 +0200
+
+    BUILD: threads: add the definition of PROTO_LOCK
+    
+    This one was added by commit daacf3664 ("BUG/MEDIUM: protocols: add a
+    global lock for the init/deinit stuff") but I forgot to add it to the
+    include file, breaking DEBUG_THREAD.
+    
+    (cherry picked from commit d6e0c03384cab2c72fb6ab841420045108ea4e6f)
+    Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
+
+diff --git a/include/common/hathreads.h b/include/common/hathreads.h
+index a7c8dc93..b05215bd 100644
+--- a/include/common/hathreads.h
++++ b/include/common/hathreads.h
+@@ -562,6 +562,7 @@ enum lock_label {
+       AUTH_LOCK,
+       LOGSRV_LOCK,
+       DICT_LOCK,
++      PROTO_LOCK,
+       OTHER_LOCK,
+       LOCK_LABELS
+ };
+@@ -679,6 +680,7 @@ static inline const char *lock_label(enum lock_label label)
+       case AUTH_LOCK:            return "AUTH";
+       case LOGSRV_LOCK:          return "LOGSRV";
+       case DICT_LOCK:            return "DICT";
++      case PROTO_LOCK:           return "PROTO";
+       case OTHER_LOCK:           return "OTHER";
+       case LOCK_LABELS:          break; /* keep compiler happy */
+       };
diff --git a/net/haproxy/patches/003-BUG-MEDIUM-lb-chash-Fix-the-realloc-when-the-number-of-nodes-is-increased.patch b/net/haproxy/patches/003-BUG-MEDIUM-lb-chash-Fix-the-realloc-when-the-number-of-nodes-is-increased.patch
new file mode 100644 (file)
index 0000000..28b9fe0
--- /dev/null
@@ -0,0 +1,32 @@
+commit 974c6916ba2f7efc83193bb8c04e95294ca21112
+Author: Christopher Faulet <cfaulet@haproxy.com>
+Date:   Fri Jul 26 13:52:13 2019 +0200
+
+    BUG/MEDIUM: lb-chash: Fix the realloc() when the number of nodes is increased
+    
+    When the number of nodes is increased because the server weight is changed, the
+    nodes array must be realloc. But its new size is not correctly set. Only the
+    total number of nodes is used to set the new size. But it must also depends on
+    the size of a node. It must be the total nomber of nodes times the size of a
+    node.
+    
+    This issue was reported on Github (#189).
+    
+    This patch must be backported to all versions since the 1.6.
+    
+    (cherry picked from commit 366ad86af72c455cc958943913cb2de20eefee71)
+    Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
+
+diff --git a/src/lb_chash.c b/src/lb_chash.c
+index a35351e9..0bf4e81a 100644
+--- a/src/lb_chash.c
++++ b/src/lb_chash.c
+@@ -84,7 +84,7 @@ static inline void chash_queue_dequeue_srv(struct server *s)
+        * increased the weight beyond the original weight
+        */
+       if (s->lb_nodes_tot < s->next_eweight) {
+-              struct tree_occ *new_nodes = realloc(s->lb_nodes, s->next_eweight);
++              struct tree_occ *new_nodes = realloc(s->lb_nodes, s->next_eweight * sizeof(*new_nodes));
+               if (new_nodes) {
+                       unsigned int j;
diff --git a/net/haproxy/patches/004-BUG-MEDIUM-streams-Dont-switch-the-SI-to-SI_ST_DIS-if-we-have-data-to-send.patch b/net/haproxy/patches/004-BUG-MEDIUM-streams-Dont-switch-the-SI-to-SI_ST_DIS-if-we-have-data-to-send.patch
new file mode 100644 (file)
index 0000000..69e1d62
--- /dev/null
@@ -0,0 +1,32 @@
+commit 21a796cb83c29ee276feb04649a1b18214bbdee0
+Author: Olivier Houchard <ohouchard@haproxy.com>
+Date:   Fri Jul 26 14:54:34 2019 +0200
+
+    BUG/MEDIUM: streams: Don't switch the SI to SI_ST_DIS if we have data to send.
+    
+    In sess_established(), don't immediately switch the backend stream_interface
+    to SI_ST_DIS if we only got a SHUTR. We may still have something to send,
+    ie if the request is a POST, and we should be switched to SI_ST8DIS later
+    when the shutw will happen.
+    
+    This should be backported to 2.0 and 1.9.
+    
+    (cherry picked from commit 7859526fd6ce7ea33e20b7e532b21aa2465cb11d)
+    Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
+
+diff --git a/src/stream.c b/src/stream.c
+index a5c5f45c..64875c80 100644
+--- a/src/stream.c
++++ b/src/stream.c
+@@ -954,8 +954,9 @@ static void sess_establish(struct stream *s)
+               si_chk_rcv(si);
+       }
+       req->wex = TICK_ETERNITY;
+-      /* If we managed to get the whole response, switch to SI_ST_DIS now. */
+-      if (rep->flags & CF_SHUTR)
++      /* If we managed to get the whole response, and we don't have anything
++       * left to send, or can't, switch to SI_ST_DIS now. */
++      if (rep->flags & (CF_SHUTR | CF_SHUTW))
+               si->state = SI_ST_DIS;
+ }
diff --git a/net/haproxy/patches/005-BUG-MINOR-log-make-sure-writev-is-not-interrupted-on-a-file-output.patch b/net/haproxy/patches/005-BUG-MINOR-log-make-sure-writev-is-not-interrupted-on-a-file-output.patch
new file mode 100644 (file)
index 0000000..5c4bbcd
--- /dev/null
@@ -0,0 +1,42 @@
+commit 487b38e86c08431bc5f48aac72c8d753ee23cb03
+Author: Willy Tarreau <w@1wt.eu>
+Date:   Fri Jul 26 15:10:39 2019 +0200
+
+    BUG/MINOR: log: make sure writev() is not interrupted on a file output
+    
+    Since 1.9 we support sending logs to various non-blocking outputs like
+    stdou/stderr or flies, by using writev() which guarantees that it only
+    returns after having written everything or nothing. However the syscall
+    may be interrupted while doing so, and this is visible when writing to
+    a tty during debug sessions, as some logs occasionally appear interleaved
+    if an xterm or SSH connection is not very fast. Performance here is not a
+    critical concern, log correctness is. Let's simply take the logger's lock
+    around the writev() call to prevent multiple senders from stepping onto
+    each other's toes.
+    
+    This may be backported to 2.0 and 1.9.
+    
+    (cherry picked from commit 9fbcb7e2e9c32659ab11927394fec2e160be2d0b)
+    Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
+
+diff --git a/src/log.c b/src/log.c
+index ef999d13..99f185e4 100644
+--- a/src/log.c
++++ b/src/log.c
+@@ -1672,8 +1672,15 @@ send:
+       iovec[7].iov_len  = 1;
+       if (logsrv->addr.ss_family == AF_UNSPEC) {
+-              /* the target is a direct file descriptor */
++              /* the target is a direct file descriptor. While writev() guarantees
++               * to write everything, it doesn't guarantee that it will not be
++               * interrupted while doing so. This occasionally results in interleaved
++               * messages when the output is a tty, hence the lock. There's no real
++               * performance concern here for such type of output.
++               */
++              HA_SPIN_LOCK(LOGSRV_LOCK, &logsrv->lock);
+               sent = writev(*plogfd, iovec, 8);
++              HA_SPIN_UNLOCK(LOGSRV_LOCK, &logsrv->lock);
+       }
+       else {
+               msghdr.msg_name = (struct sockaddr *)&logsrv->addr;
diff --git a/net/haproxy/patches/006-DOC-improve-the-wording-in-CONTRIBUTING-about-how-to-document-a-bug-fix.patch b/net/haproxy/patches/006-DOC-improve-the-wording-in-CONTRIBUTING-about-how-to-document-a-bug-fix.patch
new file mode 100644 (file)
index 0000000..e7c8c08
--- /dev/null
@@ -0,0 +1,101 @@
+commit 8de6badd32fb584d60733a6236113edba00f8701
+Author: Willy Tarreau <w@1wt.eu>
+Date:   Fri Jul 26 15:21:54 2019 +0200
+
+    DOC: improve the wording in CONTRIBUTING about how to document a bug fix
+    
+    Insufficiently described bug fixes are still too frequent. It's a real
+    pain to create each new maintenance release, as 3/4 of the time is spent
+    trying to guess what problem a patch fixes, which is already important
+    in order to decide whether to pick the fix or not, but is even more
+    capital in order to write understandable release notes.
+    
+    Christopher rightfully demands that a patch tagged "BUG" MUST ABSOLUTELY
+    describe the problem and why this problem is a bug. Describing the fix
+    is one thing but if the bug is unknown, why would there be a fix ? How
+    can a stable maintainer be convinced to take a fix if its author didn't
+    care about checking whether it was a real bug ? This patch tries to
+    explain a bit better what really needs to appear in the commit message
+    and how to describe a bug.
+    
+    To be backported to all relevant stable versions.
+    
+    (cherry picked from commit 41f638c1eb8167bb473a6c8811d7fd70d7c06e07)
+    Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
+
+diff --git a/CONTRIBUTING b/CONTRIBUTING
+index 0fcd921e..201e122d 100644
+--- a/CONTRIBUTING
++++ b/CONTRIBUTING
+@@ -454,7 +454,18 @@ do not think about them anymore after a few patches.
+ 11) Real commit messages please!
+-   Please properly format your commit messages. To get an idea, just run
++   The commit message is how you're trying to convince a maintainer to adopt
++   your work and maintain it as long as possible. A dirty commit message almost
++   always comes with dirty code. Too short a commit message indicates that too
++   short an analysis was done and that side effects are extremely likely to be
++   encountered. It's the maintainer's job to decide to accept this work in its
++   current form or not, with the known constraints. Some patches which rework
++   architectural parts or fix sensitive bugs come with 20-30 lines of design
++   explanations, limitations, hypothesis or even doubts, and despite this it
++   happens when reading them 6 months later while trying to identify a bug that
++   developers still miss some information about corner cases.
++
++   So please properly format your commit messages. To get an idea, just run
+    "git log" on the file you've just modified. Patches always have the format
+    of an e-mail made of a subject, a description and the actual patch. If you
+    are sending a patch as an e-mail formatted this way, it can quickly be
+@@ -506,9 +517,17 @@ do not think about them anymore after a few patches.
+    But in any case, it is important that there is a clean description of what
+    the patch does, the motivation for what it does, why it's the best way to do
+-   it, its impacts, and what it does not yet cover. Also, in HAProxy, like many
+-   projects which take a great care of maintaining stable branches, patches are
+-   reviewed later so that some of them can be backported to stable releases.
++   it, its impacts, and what it does not yet cover. And this is particularly
++   important for bugs. A patch tagged "BUG" must absolutely explain what the
++   problem is, why it is considered as a bug. Anybody, even non-developers,
++   should be able to tell whether or not a patch is likely to address an issue
++   they are facing. Indicating what the code will do after the fix doesn't help
++   if it does not say what problem is encountered without the patch. Note that
++   in some cases the bug is purely theorical and observed by reading the code.
++   In this case it's perfectly fine to provide an estimate about possible
++   effects. Also, in HAProxy, like many projects which take a great care of
++   maintaining stable branches, patches are reviewed later so that some of them
++   can be backported to stable releases.
+    While reviewing hundreds of patches can seem cumbersome, with a proper
+    formatting of the subject line it actually becomes very easy. For example,
+@@ -630,13 +649,23 @@ patch types include :
+   - BUG      fix for a bug. The severity of the bug should also be indicated
+              when known. Similarly, if a backport is needed to older versions,
+-             it should be indicated on the last line of the commit message. If
+-             the bug has been identified as a regression brought by a specific
+-             patch or version, this indication will be appreciated too. New
+-             maintenance releases are generally emitted when a few of these
+-             patches are merged. If the bug is a vulnerability for which a CVE
+-             identifier was assigned before you publish the fix, you can mention
+-             it in the commit message, it will help distro maintainers.
++             it should be indicated on the last line of the commit message. The
++             commit message MUST ABSOLUTELY describe the problem and its impact
++             to non-developers. Any user must be able to guess if this patch is
++             likely to fix a problem they are facing. Even if the bug was
++             discovered by accident while reading the code or running an
++             automated tool, it is mandatory to try to estimate what potential
++             issue it might cause and under what circumstances. There may even
++             be security implications sometimes so a minimum analysis is really
++             required. Also please think about stable maintainers who have to
++             build the release notes, they need to have enough input about the
++             bug's impact to explain it. If the bug has been identified as a
++             regression brought by a specific patch or version, this indication
++             will be appreciated too. New maintenance releases are generally
++             emitted when a few of these patches are merged. If the bug is a
++             vulnerability for which a CVE identifier was assigned before you
++             publish the fix, you can mention it in the commit message, it will
++             help distro maintainers.
+   - CLEANUP  code cleanup, silence of warnings, etc... theoretically no impact.
+              These patches will rarely be seen in stable branches, though they
diff --git a/net/haproxy/patches/007-BUG-MINOR-hlua-htx-Reset-channels-analyzers-when-txn-done-is-called.patch b/net/haproxy/patches/007-BUG-MINOR-hlua-htx-Reset-channels-analyzers-when-txn-done-is-called.patch
new file mode 100644 (file)
index 0000000..e4f0457
--- /dev/null
@@ -0,0 +1,49 @@
+commit 72c692701ab4197f1f8ec7594b7e8ef5082b9d9e
+Author: Christopher Faulet <cfaulet@haproxy.com>
+Date:   Fri Jul 26 16:40:24 2019 +0200
+
+    BUG/MINOR: hlua/htx: Reset channels analyzers when txn:done() is called
+    
+    For HTX streams, when txn:done() is called, the work is delegated to the
+    function http_reply_and_close(). But it is not enough. The channel's analyzers
+    must also be reset. Otherwise, some analyzers may still be called while
+    processing should be aborted.
+    
+    For instance, if the function is called from an http-request rules on the
+    frontend, request analyzers on the backend side are still called. So we may try
+    to add an header to the request, while this one was already reset.
+    
+    This patch must be backported to 2.0 and 1.9.
+    
+    (cherry picked from commit fe6a71b8e08234dbe03fbd2fa3017590681479df)
+    Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
+
+diff --git a/src/hlua.c b/src/hlua.c
+index 23d2aa04..f9d1d699 100644
+--- a/src/hlua.c
++++ b/src/hlua.c
+@@ -5996,8 +5996,12 @@ __LJMP static int hlua_txn_done(lua_State *L)
+       ic = &htxn->s->req;
+       oc = &htxn->s->res;
+-      if (IS_HTX_STRM(htxn->s))
+-              htx_reply_and_close(htxn->s, 0, NULL);
++      if (IS_HTX_STRM(htxn->s)) {
++              htxn->s->txn->status = 0;
++              http_reply_and_close(htxn->s, 0, NULL);
++              ic->analysers &= AN_REQ_FLT_END;
++              oc->analysers &= AN_RES_FLT_END;
++      }
+       else {
+               if (htxn->s->txn) {
+                       /* HTTP mode, let's stay in sync with the stream */
+@@ -6031,6 +6035,9 @@ __LJMP static int hlua_txn_done(lua_State *L)
+               ic->analysers = 0;
+       }
++      if (!(htxn->s->flags & SF_ERR_MASK))      // this is not really an error but it is
++              htxn->s->flags |= SF_ERR_LOCAL;   // to mark that it comes from the proxy
++
+       hlua->flags |= HLUA_STOP;
+       WILL_LJMP(hlua_done(L));
+       return 0;
diff --git a/net/haproxy/patches/008-BUG-MEDIUM-hlua-Check-the-calling-direction-in-lua-functions-of-the-HTTP-class.patch b/net/haproxy/patches/008-BUG-MEDIUM-hlua-Check-the-calling-direction-in-lua-functions-of-the-HTTP-class.patch
new file mode 100644 (file)
index 0000000..b0a5f9f
--- /dev/null
@@ -0,0 +1,201 @@
+commit dc2ee27c7a1908ca3157a10ad131f13644bcaea3
+Author: Christopher Faulet <cfaulet@haproxy.com>
+Date:   Fri Jul 26 16:17:01 2019 +0200
+
+    BUG/MEDIUM: hlua: Check the calling direction in lua functions of the HTTP class
+    
+    It is invalid to manipulate responses from http-request rules or to manipulate
+    requests from http-response rules. When http-request rules are evaluated, the
+    connection to server is not yet established, so there is no response at all. And
+    when http-response rules are evaluated, the request has already been sent to the
+    server.
+    
+    Now, the calling direction is checked. So functions "txn.http:req_*" can now
+    only be called from http-request rules and the functions "txn.http:res_*" can
+    only be called from http-response rules.
+    
+    This issue was reported on Github (#190).
+    
+    This patch must be backported to all versions since the 1.6.
+    
+    (cherry picked from commit 84a6d5bc217a418db8efc4e76a0a32860db2c608)
+    Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
+
+diff --git a/src/hlua.c b/src/hlua.c
+index f9d1d699..21351cd6 100644
+--- a/src/hlua.c
++++ b/src/hlua.c
+@@ -5346,6 +5346,9 @@ __LJMP static int hlua_http_req_get_headers(lua_State *L)
+       MAY_LJMP(check_args(L, 1, "req_get_headers"));
+       htxn = MAY_LJMP(hlua_checkhttp(L, 1));
++      if (htxn->dir != SMP_OPT_DIR_REQ)
++              WILL_LJMP(lua_error(L));
++
+       return hlua_http_get_headers(L, htxn, &htxn->s->txn->req);
+ }
+@@ -5356,6 +5359,9 @@ __LJMP static int hlua_http_res_get_headers(lua_State *L)
+       MAY_LJMP(check_args(L, 1, "res_get_headers"));
+       htxn = MAY_LJMP(hlua_checkhttp(L, 1));
++      if (htxn->dir != SMP_OPT_DIR_RES)
++              WILL_LJMP(lua_error(L));
++
+       return hlua_http_get_headers(L, htxn, &htxn->s->txn->rsp);
+ }
+@@ -5393,6 +5399,9 @@ __LJMP static int hlua_http_req_rep_hdr(lua_State *L)
+       MAY_LJMP(check_args(L, 4, "req_rep_hdr"));
+       htxn = MAY_LJMP(hlua_checkhttp(L, 1));
++      if (htxn->dir != SMP_OPT_DIR_REQ)
++              WILL_LJMP(lua_error(L));
++
+       return MAY_LJMP(hlua_http_rep_hdr(L, htxn, &htxn->s->txn->req, ACT_HTTP_REPLACE_HDR));
+ }
+@@ -5403,6 +5412,9 @@ __LJMP static int hlua_http_res_rep_hdr(lua_State *L)
+       MAY_LJMP(check_args(L, 4, "res_rep_hdr"));
+       htxn = MAY_LJMP(hlua_checkhttp(L, 1));
++      if (htxn->dir != SMP_OPT_DIR_RES)
++              WILL_LJMP(lua_error(L));
++
+       return MAY_LJMP(hlua_http_rep_hdr(L, htxn, &htxn->s->txn->rsp, ACT_HTTP_REPLACE_HDR));
+ }
+@@ -5413,6 +5425,9 @@ __LJMP static int hlua_http_req_rep_val(lua_State *L)
+       MAY_LJMP(check_args(L, 4, "req_rep_hdr"));
+       htxn = MAY_LJMP(hlua_checkhttp(L, 1));
++      if (htxn->dir != SMP_OPT_DIR_REQ)
++              WILL_LJMP(lua_error(L));
++
+       return MAY_LJMP(hlua_http_rep_hdr(L, htxn, &htxn->s->txn->req, ACT_HTTP_REPLACE_VAL));
+ }
+@@ -5423,6 +5438,9 @@ __LJMP static int hlua_http_res_rep_val(lua_State *L)
+       MAY_LJMP(check_args(L, 4, "res_rep_val"));
+       htxn = MAY_LJMP(hlua_checkhttp(L, 1));
++      if (htxn->dir != SMP_OPT_DIR_RES)
++              WILL_LJMP(lua_error(L));
++
+       return MAY_LJMP(hlua_http_rep_hdr(L, htxn, &htxn->s->txn->rsp, ACT_HTTP_REPLACE_VAL));
+ }
+@@ -5462,6 +5480,9 @@ __LJMP static int hlua_http_req_del_hdr(lua_State *L)
+       MAY_LJMP(check_args(L, 2, "req_del_hdr"));
+       htxn = MAY_LJMP(hlua_checkhttp(L, 1));
++      if (htxn->dir != SMP_OPT_DIR_REQ)
++              WILL_LJMP(lua_error(L));
++
+       return hlua_http_del_hdr(L, htxn, &htxn->s->txn->req);
+ }
+@@ -5469,9 +5490,12 @@ __LJMP static int hlua_http_res_del_hdr(lua_State *L)
+ {
+       struct hlua_txn *htxn;
+-      MAY_LJMP(check_args(L, 2, "req_del_hdr"));
++      MAY_LJMP(check_args(L, 2, "res_del_hdr"));
+       htxn = MAY_LJMP(hlua_checkhttp(L, 1));
++      if (htxn->dir != SMP_OPT_DIR_RES)
++              WILL_LJMP(lua_error(L));
++
+       return hlua_http_del_hdr(L, htxn, &htxn->s->txn->rsp);
+ }
+@@ -5523,6 +5547,9 @@ __LJMP static int hlua_http_req_add_hdr(lua_State *L)
+       MAY_LJMP(check_args(L, 3, "req_add_hdr"));
+       htxn = MAY_LJMP(hlua_checkhttp(L, 1));
++      if (htxn->dir != SMP_OPT_DIR_REQ)
++              WILL_LJMP(lua_error(L));
++
+       return hlua_http_add_hdr(L, htxn, &htxn->s->txn->req);
+ }
+@@ -5533,6 +5560,9 @@ __LJMP static int hlua_http_res_add_hdr(lua_State *L)
+       MAY_LJMP(check_args(L, 3, "res_add_hdr"));
+       htxn = MAY_LJMP(hlua_checkhttp(L, 1));
++      if (htxn->dir != SMP_OPT_DIR_RES)
++              WILL_LJMP(lua_error(L));
++
+       return hlua_http_add_hdr(L, htxn, &htxn->s->txn->rsp);
+ }
+@@ -5543,6 +5573,9 @@ static int hlua_http_req_set_hdr(lua_State *L)
+       MAY_LJMP(check_args(L, 3, "req_set_hdr"));
+       htxn = MAY_LJMP(hlua_checkhttp(L, 1));
++      if (htxn->dir != SMP_OPT_DIR_REQ)
++              WILL_LJMP(lua_error(L));
++
+       hlua_http_del_hdr(L, htxn, &htxn->s->txn->req);
+       return hlua_http_add_hdr(L, htxn, &htxn->s->txn->req);
+ }
+@@ -5554,6 +5587,9 @@ static int hlua_http_res_set_hdr(lua_State *L)
+       MAY_LJMP(check_args(L, 3, "res_set_hdr"));
+       htxn = MAY_LJMP(hlua_checkhttp(L, 1));
++      if (htxn->dir != SMP_OPT_DIR_RES)
++              WILL_LJMP(lua_error(L));
++
+       hlua_http_del_hdr(L, htxn, &htxn->s->txn->rsp);
+       return hlua_http_add_hdr(L, htxn, &htxn->s->txn->rsp);
+ }
+@@ -5565,6 +5601,9 @@ static int hlua_http_req_set_meth(lua_State *L)
+       size_t name_len;
+       const char *name = MAY_LJMP(luaL_checklstring(L, 2, &name_len));
++      if (htxn->dir != SMP_OPT_DIR_REQ)
++              WILL_LJMP(lua_error(L));
++
+       lua_pushboolean(L, http_replace_req_line(0, name, name_len, htxn->p, htxn->s) != -1);
+       return 1;
+ }
+@@ -5576,6 +5615,9 @@ static int hlua_http_req_set_path(lua_State *L)
+       size_t name_len;
+       const char *name = MAY_LJMP(luaL_checklstring(L, 2, &name_len));
++      if (htxn->dir != SMP_OPT_DIR_REQ)
++              WILL_LJMP(lua_error(L));
++
+       lua_pushboolean(L, http_replace_req_line(1, name, name_len, htxn->p, htxn->s) != -1);
+       return 1;
+ }
+@@ -5587,6 +5629,9 @@ static int hlua_http_req_set_query(lua_State *L)
+       size_t name_len;
+       const char *name = MAY_LJMP(luaL_checklstring(L, 2, &name_len));
++      if (htxn->dir != SMP_OPT_DIR_REQ)
++              WILL_LJMP(lua_error(L));
++
+       /* Check length. */
+       if (name_len > trash.size - 1) {
+               lua_pushboolean(L, 0);
+@@ -5611,6 +5656,9 @@ static int hlua_http_req_set_uri(lua_State *L)
+       size_t name_len;
+       const char *name = MAY_LJMP(luaL_checklstring(L, 2, &name_len));
++      if (htxn->dir != SMP_OPT_DIR_REQ)
++              WILL_LJMP(lua_error(L));
++
+       lua_pushboolean(L, http_replace_req_line(3, name, name_len, htxn->p, htxn->s) != -1);
+       return 1;
+ }
+@@ -5622,6 +5670,9 @@ static int hlua_http_res_set_status(lua_State *L)
+       unsigned int code = MAY_LJMP(luaL_checkinteger(L, 2));
+       const char *reason = MAY_LJMP(luaL_optlstring(L, 3, NULL, NULL));
++      if (htxn->dir != SMP_OPT_DIR_RES)
++              WILL_LJMP(lua_error(L));
++
+       http_set_status(code, reason, htxn->s);
+       return 0;
+ }
diff --git a/net/haproxy/patches/009-MINOR-hlua-Dont-set-request-analyzers-on-response-channel-for-lua-actions.patch b/net/haproxy/patches/009-MINOR-hlua-Dont-set-request-analyzers-on-response-channel-for-lua-actions.patch
new file mode 100644 (file)
index 0000000..8d799d3
--- /dev/null
@@ -0,0 +1,34 @@
+commit b22f6501bc9838061472128360e0e55d08cb0bd9
+Author: Christopher Faulet <cfaulet@haproxy.com>
+Date:   Fri Jul 26 14:54:52 2019 +0200
+
+    MINOR: hlua: Don't set request analyzers on response channel for lua actions
+    
+    Setting some requests analyzers on the response channel was an old trick to be
+    sure to re-evaluate the request's analyers after the response's ones have been
+    called. It is no more necessary. In fact, this trick was removed in the version
+    1.8 and backported up to the version 1.6.
+    
+    This patch must be backported to all versions since 1.6 to ease the backports of
+    fixes on the lua code.
+    
+    (cherry picked from commit 51fa358432247fe5d7259d9d8a0e08d49d429c73)
+    Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
+
+diff --git a/src/hlua.c b/src/hlua.c
+index 21351cd6..36454cdc 100644
+--- a/src/hlua.c
++++ b/src/hlua.c
+@@ -6873,11 +6873,8 @@ static enum act_return hlua_action(struct act_rule *rule, struct proxy *px,
+                * is detected on a response channel. This is useful
+                * only for actions targeted on the requests.
+                */
+-              if (HLUA_IS_WAKERESWR(s->hlua)) {
++              if (HLUA_IS_WAKERESWR(s->hlua))
+                       s->res.flags |= CF_WAKE_WRITE;
+-                      if ((analyzer & (AN_REQ_INSPECT_FE|AN_REQ_HTTP_PROCESS_FE)))
+-                              s->res.analysers |= analyzer;
+-              }
+               if (HLUA_IS_WAKEREQWR(s->hlua))
+                       s->req.flags |= CF_WAKE_WRITE;
+               /* We can quit the function without consistency check
diff --git a/net/haproxy/patches/010-MINOR-hlua-Add-a-flag-on-the-lua-txn-to-know-in-which-context-it-can-be-used.patch b/net/haproxy/patches/010-MINOR-hlua-Add-a-flag-on-the-lua-txn-to-know-in-which-context-it-can-be-used.patch
new file mode 100644 (file)
index 0000000..89160e4
--- /dev/null
@@ -0,0 +1,110 @@
+commit ff96b8bd3f85155f65b2b9c9f046fe3e40f630a4
+Author: Christopher Faulet <cfaulet@haproxy.com>
+Date:   Fri Jul 26 15:09:53 2019 +0200
+
+    MINOR: hlua: Add a flag on the lua txn to know in which context it can be used
+    
+    When a lua action or a lua sample fetch is called, a lua transaction is
+    created. It is an entry in the stack containing the class TXN. Thanks to it, we
+    can know the direction (request or response) of the call. But, for some
+    functions, it is also necessary to know if the buffer is "HTTP ready" for the
+    given direction. "HTTP ready" means there is a valid HTTP message in the
+    channel's buffer. So, when a lua action or a lua sample fetch is called, the
+    flag HLUA_TXN_HTTP_RDY is set if it is appropriate.
+    
+    (cherry picked from commit bfab2dddad3ded87617d1e2db54761943d1eb32d)
+    Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
+
+diff --git a/include/types/hlua.h b/include/types/hlua.h
+index 70c76852..2f4e38be 100644
+--- a/include/types/hlua.h
++++ b/include/types/hlua.h
+@@ -43,7 +43,8 @@ struct stream;
+ #define HLUA_F_AS_STRING    0x01
+ #define HLUA_F_MAY_USE_HTTP 0x02
+-#define HLUA_TXN_NOTERM 0x00000001
++#define HLUA_TXN_NOTERM   0x00000001
++#define HLUA_TXN_HTTP_RDY 0x00000002 /* Set if the txn is HTTP ready for the defined direction */
+ #define HLUA_CONCAT_BLOCSZ 2048
+diff --git a/src/hlua.c b/src/hlua.c
+index 36454cdc..d37e3c61 100644
+--- a/src/hlua.c
++++ b/src/hlua.c
+@@ -6494,6 +6494,7 @@ static int hlua_sample_fetch_wrapper(const struct arg *arg_p, struct sample *smp
+       struct stream *stream = smp->strm;
+       const char *error;
+       const struct buffer msg = { };
++      unsigned int hflags = HLUA_TXN_NOTERM;
+       if (!stream)
+               return 0;
+@@ -6517,6 +6518,13 @@ static int hlua_sample_fetch_wrapper(const struct arg *arg_p, struct sample *smp
+       consistency_set(stream, smp->opt, &stream->hlua->cons);
++      if (stream->be->mode == PR_MODE_HTTP) {
++              if ((smp->opt & SMP_OPT_DIR) == SMP_OPT_DIR_REQ)
++                      hflags |= ((stream->txn->req.msg_state < HTTP_MSG_BODY) ? 0 : HLUA_TXN_HTTP_RDY);
++              else
++                      hflags |= ((stream->txn->rsp.msg_state < HTTP_MSG_BODY) ? 0 : HLUA_TXN_HTTP_RDY);
++      }
++
+       /* If it is the first run, initialize the data for the call. */
+       if (!HLUA_IS_RUNNING(stream->hlua)) {
+@@ -6541,8 +6549,7 @@ static int hlua_sample_fetch_wrapper(const struct arg *arg_p, struct sample *smp
+               lua_rawgeti(stream->hlua->T, LUA_REGISTRYINDEX, fcn->function_ref);
+               /* push arguments in the stack. */
+-              if (!hlua_txn_new(stream->hlua->T, stream, smp->px, smp->opt & SMP_OPT_DIR,
+-                                HLUA_TXN_NOTERM)) {
++              if (!hlua_txn_new(stream->hlua->T, stream, smp->px, smp->opt & SMP_OPT_DIR, hflags)) {
+                       SEND_ERR(smp->px, "Lua sample-fetch '%s': full stack.\n", fcn->name);
+                       RESET_SAFE_LJMP(stream->hlua->T);
+                       return 0;
+@@ -6759,16 +6766,16 @@ static enum act_return hlua_action(struct act_rule *rule, struct proxy *px,
+                                    struct session *sess, struct stream *s, int flags)
+ {
+       char **arg;
+-      unsigned int analyzer;
++      unsigned int hflags = 0;
+       int dir;
+       const char *error;
+       const struct buffer msg = { };
+       switch (rule->from) {
+-      case ACT_F_TCP_REQ_CNT: analyzer = AN_REQ_INSPECT_FE     ; dir = SMP_OPT_DIR_REQ; break;
+-      case ACT_F_TCP_RES_CNT: analyzer = AN_RES_INSPECT        ; dir = SMP_OPT_DIR_RES; break;
+-      case ACT_F_HTTP_REQ:    analyzer = AN_REQ_HTTP_PROCESS_FE; dir = SMP_OPT_DIR_REQ; break;
+-      case ACT_F_HTTP_RES:    analyzer = AN_RES_HTTP_PROCESS_BE; dir = SMP_OPT_DIR_RES; break;
++      case ACT_F_TCP_REQ_CNT:                            ; dir = SMP_OPT_DIR_REQ; break;
++      case ACT_F_TCP_RES_CNT:                            ; dir = SMP_OPT_DIR_RES; break;
++      case ACT_F_HTTP_REQ:    hflags = HLUA_TXN_HTTP_RDY ; dir = SMP_OPT_DIR_REQ; break;
++      case ACT_F_HTTP_RES:    hflags = HLUA_TXN_HTTP_RDY ; dir = SMP_OPT_DIR_RES; break;
+       default:
+               SEND_ERR(px, "Lua: internal error while execute action.\n");
+               return ACT_RET_CONT;
+@@ -6821,7 +6828,7 @@ static enum act_return hlua_action(struct act_rule *rule, struct proxy *px,
+               lua_rawgeti(s->hlua->T, LUA_REGISTRYINDEX, rule->arg.hlua_rule->fcn.function_ref);
+               /* Create and and push object stream in the stack. */
+-              if (!hlua_txn_new(s->hlua->T, s, px, dir, 0)) {
++              if (!hlua_txn_new(s->hlua->T, s, px, dir, hflags)) {
+                       SEND_ERR(px, "Lua function '%s': full stack.\n",
+                                rule->arg.hlua_rule->fcn.name);
+                       RESET_SAFE_LJMP(s->hlua->T);
+@@ -6864,9 +6871,9 @@ static enum act_return hlua_action(struct act_rule *rule, struct proxy *px,
+       case HLUA_E_AGAIN:
+               /* Set timeout in the required channel. */
+               if (s->hlua->wake_time != TICK_ETERNITY) {
+-                      if (analyzer & (AN_REQ_INSPECT_FE|AN_REQ_HTTP_PROCESS_FE))
++                      if (dir & SMP_OPT_DIR_REQ)
+                               s->req.analyse_exp = s->hlua->wake_time;
+-                      else if (analyzer & (AN_RES_INSPECT|AN_RES_HTTP_PROCESS_BE))
++                      else
+                               s->res.analyse_exp = s->hlua->wake_time;
+               }
+               /* Some actions can be wake up when a "write" event
diff --git a/net/haproxy/patches/011-BUG-MINOR-hlua-Only-execute-functions-of-HTTP-class-if-the-txn-is-HTTP-ready.patch b/net/haproxy/patches/011-BUG-MINOR-hlua-Only-execute-functions-of-HTTP-class-if-the-txn-is-HTTP-ready.patch
new file mode 100644 (file)
index 0000000..0ebb58e
--- /dev/null
@@ -0,0 +1,180 @@
+commit 2351ca211d655c1be9ef6d62880899102134266d
+Author: Christopher Faulet <cfaulet@haproxy.com>
+Date:   Fri Jul 26 16:31:34 2019 +0200
+
+    BUG/MINOR: hlua: Only execute functions of HTTP class if the txn is HTTP ready
+    
+    The flag HLUA_TXN_HTTP_RDY was added in the previous commit to know when a
+    function is called for a channel with a valid HTTP message or not. Of course it
+    also depends on the calling direction. In this commit, we allow the execution of
+    functions of the HTTP class only if this flag is set.
+    
+    Nobody seems to use them from an unsupported context (for instance, trying to
+    set an HTTP header from a tcp-request rule). But it remains a bug leading to
+    undefined behaviors or crashes.
+    
+    This patch may be backported to all versions since the 1.6. It depends on the
+    commits "MINOR: hlua: Add a flag on the lua txn to know in which context it can
+    be used" and "MINOR: hlua: Don't set request analyzers on response channel for
+    lua actions".
+    
+    (cherry picked from commit 301eff8e215d5dc7130e1ebacd7cf8da09a4f643)
+    Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
+
+diff --git a/src/hlua.c b/src/hlua.c
+index d37e3c61..4d92fa44 100644
+--- a/src/hlua.c
++++ b/src/hlua.c
+@@ -5346,7 +5346,7 @@ __LJMP static int hlua_http_req_get_headers(lua_State *L)
+       MAY_LJMP(check_args(L, 1, "req_get_headers"));
+       htxn = MAY_LJMP(hlua_checkhttp(L, 1));
+-      if (htxn->dir != SMP_OPT_DIR_REQ)
++      if (htxn->dir != SMP_OPT_DIR_REQ || !(htxn->flags & HLUA_TXN_HTTP_RDY))
+               WILL_LJMP(lua_error(L));
+       return hlua_http_get_headers(L, htxn, &htxn->s->txn->req);
+@@ -5359,7 +5359,7 @@ __LJMP static int hlua_http_res_get_headers(lua_State *L)
+       MAY_LJMP(check_args(L, 1, "res_get_headers"));
+       htxn = MAY_LJMP(hlua_checkhttp(L, 1));
+-      if (htxn->dir != SMP_OPT_DIR_RES)
++      if (htxn->dir != SMP_OPT_DIR_RES || !(htxn->flags & HLUA_TXN_HTTP_RDY))
+               WILL_LJMP(lua_error(L));
+       return hlua_http_get_headers(L, htxn, &htxn->s->txn->rsp);
+@@ -5399,7 +5399,7 @@ __LJMP static int hlua_http_req_rep_hdr(lua_State *L)
+       MAY_LJMP(check_args(L, 4, "req_rep_hdr"));
+       htxn = MAY_LJMP(hlua_checkhttp(L, 1));
+-      if (htxn->dir != SMP_OPT_DIR_REQ)
++      if (htxn->dir != SMP_OPT_DIR_REQ || !(htxn->flags & HLUA_TXN_HTTP_RDY))
+               WILL_LJMP(lua_error(L));
+       return MAY_LJMP(hlua_http_rep_hdr(L, htxn, &htxn->s->txn->req, ACT_HTTP_REPLACE_HDR));
+@@ -5412,7 +5412,7 @@ __LJMP static int hlua_http_res_rep_hdr(lua_State *L)
+       MAY_LJMP(check_args(L, 4, "res_rep_hdr"));
+       htxn = MAY_LJMP(hlua_checkhttp(L, 1));
+-      if (htxn->dir != SMP_OPT_DIR_RES)
++      if (htxn->dir != SMP_OPT_DIR_RES || !(htxn->flags & HLUA_TXN_HTTP_RDY))
+               WILL_LJMP(lua_error(L));
+       return MAY_LJMP(hlua_http_rep_hdr(L, htxn, &htxn->s->txn->rsp, ACT_HTTP_REPLACE_HDR));
+@@ -5425,7 +5425,7 @@ __LJMP static int hlua_http_req_rep_val(lua_State *L)
+       MAY_LJMP(check_args(L, 4, "req_rep_hdr"));
+       htxn = MAY_LJMP(hlua_checkhttp(L, 1));
+-      if (htxn->dir != SMP_OPT_DIR_REQ)
++      if (htxn->dir != SMP_OPT_DIR_REQ || !(htxn->flags & HLUA_TXN_HTTP_RDY))
+               WILL_LJMP(lua_error(L));
+       return MAY_LJMP(hlua_http_rep_hdr(L, htxn, &htxn->s->txn->req, ACT_HTTP_REPLACE_VAL));
+@@ -5438,7 +5438,7 @@ __LJMP static int hlua_http_res_rep_val(lua_State *L)
+       MAY_LJMP(check_args(L, 4, "res_rep_val"));
+       htxn = MAY_LJMP(hlua_checkhttp(L, 1));
+-      if (htxn->dir != SMP_OPT_DIR_RES)
++      if (htxn->dir != SMP_OPT_DIR_RES || !(htxn->flags & HLUA_TXN_HTTP_RDY))
+               WILL_LJMP(lua_error(L));
+       return MAY_LJMP(hlua_http_rep_hdr(L, htxn, &htxn->s->txn->rsp, ACT_HTTP_REPLACE_VAL));
+@@ -5480,7 +5480,7 @@ __LJMP static int hlua_http_req_del_hdr(lua_State *L)
+       MAY_LJMP(check_args(L, 2, "req_del_hdr"));
+       htxn = MAY_LJMP(hlua_checkhttp(L, 1));
+-      if (htxn->dir != SMP_OPT_DIR_REQ)
++      if (htxn->dir != SMP_OPT_DIR_REQ || !(htxn->flags & HLUA_TXN_HTTP_RDY))
+               WILL_LJMP(lua_error(L));
+       return hlua_http_del_hdr(L, htxn, &htxn->s->txn->req);
+@@ -5493,7 +5493,7 @@ __LJMP static int hlua_http_res_del_hdr(lua_State *L)
+       MAY_LJMP(check_args(L, 2, "res_del_hdr"));
+       htxn = MAY_LJMP(hlua_checkhttp(L, 1));
+-      if (htxn->dir != SMP_OPT_DIR_RES)
++      if (htxn->dir != SMP_OPT_DIR_RES || !(htxn->flags & HLUA_TXN_HTTP_RDY))
+               WILL_LJMP(lua_error(L));
+       return hlua_http_del_hdr(L, htxn, &htxn->s->txn->rsp);
+@@ -5547,7 +5547,7 @@ __LJMP static int hlua_http_req_add_hdr(lua_State *L)
+       MAY_LJMP(check_args(L, 3, "req_add_hdr"));
+       htxn = MAY_LJMP(hlua_checkhttp(L, 1));
+-      if (htxn->dir != SMP_OPT_DIR_REQ)
++      if (htxn->dir != SMP_OPT_DIR_REQ || !(htxn->flags & HLUA_TXN_HTTP_RDY))
+               WILL_LJMP(lua_error(L));
+       return hlua_http_add_hdr(L, htxn, &htxn->s->txn->req);
+@@ -5560,7 +5560,7 @@ __LJMP static int hlua_http_res_add_hdr(lua_State *L)
+       MAY_LJMP(check_args(L, 3, "res_add_hdr"));
+       htxn = MAY_LJMP(hlua_checkhttp(L, 1));
+-      if (htxn->dir != SMP_OPT_DIR_RES)
++      if (htxn->dir != SMP_OPT_DIR_RES || !(htxn->flags & HLUA_TXN_HTTP_RDY))
+               WILL_LJMP(lua_error(L));
+       return hlua_http_add_hdr(L, htxn, &htxn->s->txn->rsp);
+@@ -5573,7 +5573,7 @@ static int hlua_http_req_set_hdr(lua_State *L)
+       MAY_LJMP(check_args(L, 3, "req_set_hdr"));
+       htxn = MAY_LJMP(hlua_checkhttp(L, 1));
+-      if (htxn->dir != SMP_OPT_DIR_REQ)
++      if (htxn->dir != SMP_OPT_DIR_REQ || !(htxn->flags & HLUA_TXN_HTTP_RDY))
+               WILL_LJMP(lua_error(L));
+       hlua_http_del_hdr(L, htxn, &htxn->s->txn->req);
+@@ -5587,7 +5587,7 @@ static int hlua_http_res_set_hdr(lua_State *L)
+       MAY_LJMP(check_args(L, 3, "res_set_hdr"));
+       htxn = MAY_LJMP(hlua_checkhttp(L, 1));
+-      if (htxn->dir != SMP_OPT_DIR_RES)
++      if (htxn->dir != SMP_OPT_DIR_RES || !(htxn->flags & HLUA_TXN_HTTP_RDY))
+               WILL_LJMP(lua_error(L));
+       hlua_http_del_hdr(L, htxn, &htxn->s->txn->rsp);
+@@ -5601,7 +5601,7 @@ static int hlua_http_req_set_meth(lua_State *L)
+       size_t name_len;
+       const char *name = MAY_LJMP(luaL_checklstring(L, 2, &name_len));
+-      if (htxn->dir != SMP_OPT_DIR_REQ)
++      if (htxn->dir != SMP_OPT_DIR_REQ || !(htxn->flags & HLUA_TXN_HTTP_RDY))
+               WILL_LJMP(lua_error(L));
+       lua_pushboolean(L, http_replace_req_line(0, name, name_len, htxn->p, htxn->s) != -1);
+@@ -5615,7 +5615,7 @@ static int hlua_http_req_set_path(lua_State *L)
+       size_t name_len;
+       const char *name = MAY_LJMP(luaL_checklstring(L, 2, &name_len));
+-      if (htxn->dir != SMP_OPT_DIR_REQ)
++      if (htxn->dir != SMP_OPT_DIR_REQ || !(htxn->flags & HLUA_TXN_HTTP_RDY))
+               WILL_LJMP(lua_error(L));
+       lua_pushboolean(L, http_replace_req_line(1, name, name_len, htxn->p, htxn->s) != -1);
+@@ -5629,7 +5629,7 @@ static int hlua_http_req_set_query(lua_State *L)
+       size_t name_len;
+       const char *name = MAY_LJMP(luaL_checklstring(L, 2, &name_len));
+-      if (htxn->dir != SMP_OPT_DIR_REQ)
++      if (htxn->dir != SMP_OPT_DIR_REQ || !(htxn->flags & HLUA_TXN_HTTP_RDY))
+               WILL_LJMP(lua_error(L));
+       /* Check length. */
+@@ -5656,7 +5656,7 @@ static int hlua_http_req_set_uri(lua_State *L)
+       size_t name_len;
+       const char *name = MAY_LJMP(luaL_checklstring(L, 2, &name_len));
+-      if (htxn->dir != SMP_OPT_DIR_REQ)
++      if (htxn->dir != SMP_OPT_DIR_REQ || !(htxn->flags & HLUA_TXN_HTTP_RDY))
+               WILL_LJMP(lua_error(L));
+       lua_pushboolean(L, http_replace_req_line(3, name, name_len, htxn->p, htxn->s) != -1);
+@@ -5670,7 +5670,7 @@ static int hlua_http_res_set_status(lua_State *L)
+       unsigned int code = MAY_LJMP(luaL_checkinteger(L, 2));
+       const char *reason = MAY_LJMP(luaL_optlstring(L, 3, NULL, NULL));
+-      if (htxn->dir != SMP_OPT_DIR_RES)
++      if (htxn->dir != SMP_OPT_DIR_RES || !(htxn->flags & HLUA_TXN_HTTP_RDY))
+               WILL_LJMP(lua_error(L));
+       http_set_status(code, reason, htxn->s);
diff --git a/net/haproxy/patches/012-BUG-MINOR-htx-Fix-free-space-addresses-calculation-during-a-block-expansion.patch b/net/haproxy/patches/012-BUG-MINOR-htx-Fix-free-space-addresses-calculation-during-a-block-expansion.patch
new file mode 100644 (file)
index 0000000..3b43d72
--- /dev/null
@@ -0,0 +1,37 @@
+commit 3cd7a1ea5110fc6a92627aaad06553a49723ac92
+Author: Christopher Faulet <cfaulet@haproxy.com>
+Date:   Mon Jul 29 10:50:28 2019 +0200
+
+    BUG/MINOR: htx: Fix free space addresses calculation during a block expansion
+    
+    When the payload of a block is shrinked or enlarged, addresses of the free
+    spaces must be updated. There are many possible cases. One of them is
+    buggy. When there is only one block in the HTX message and its payload is just
+    before the tail room and it needs to be moved in the head room to be enlarged,
+    addresses are not correctly updated. This bug may be hit by the compression
+    filter.
+    
+    This patch must be backported to 2.0.
+    
+    (cherry picked from commit 61ed7797f6440ee1102576365553650b1982a233)
+    Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
+
+diff --git a/src/htx.c b/src/htx.c
+index c29a66d7..cd21050c 100644
+--- a/src/htx.c
++++ b/src/htx.c
+@@ -252,11 +252,13 @@ static int htx_prepare_blk_expansion(struct htx *htx, struct htx_blk *blk, int32
+                       ret = 1;
+               }
+               else if ((sz + delta) < headroom) {
++                      uint32_t oldaddr = blk->addr;
++
+                       /* Move the block's payload into the headroom */
+                       blk->addr = htx->head_addr;
+                       htx->tail_addr -= sz;
+                       htx->head_addr += sz + delta;
+-                      if (blk->addr == htx->end_addr) {
++                      if (oldaddr == htx->end_addr) {
+                               if (htx->end_addr == htx->tail_addr) {
+                                       htx->tail_addr = htx->head_addr;
+                                       htx->head_addr = htx->end_addr = 0;
diff --git a/net/haproxy/patches/013-BUG-MAJOR-queue-threads-avoid-an-AB-BA-locking-issue-in-process_srv_queue.patch b/net/haproxy/patches/013-BUG-MAJOR-queue-threads-avoid-an-AB-BA-locking-issue-in-process_srv_queue.patch
new file mode 100644 (file)
index 0000000..78e040c
--- /dev/null
@@ -0,0 +1,225 @@
+commit 0ff395c154ad827c0c30eefc9371ba7f7c171027
+Author: Willy Tarreau <w@1wt.eu>
+Date:   Tue Jul 30 11:59:34 2019 +0200
+
+    BUG/MAJOR: queue/threads: avoid an AB/BA locking issue in process_srv_queue()
+    
+    A problem involving server slowstart was reported by @max2k1 in issue #197.
+    The problem is that pendconn_grab_from_px() takes the proxy lock while
+    already under the server's lock while process_srv_queue() first takes the
+    proxy's lock then the server's lock.
+    
+    While the latter seems more natural, it is fundamentally incompatible with
+    mayn other operations performed on servers, namely state change propagation,
+    where the proxy is only known after the server and cannot be locked around
+    the servers. Howwever reversing the lock in process_srv_queue() is trivial
+    and only the few functions related to dynamic cookies need to be adjusted
+    for this so that the proxy's lock is taken for each server operation. This
+    is possible because the proxy's server list is built once at boot time and
+    remains stable. So this is what this patch does.
+    
+    The comments in the proxy and server structs were updated to mention this
+    rule that the server's lock may not be taken under the proxy's lock but
+    may enclose it.
+    
+    Another approach could consist in using a second lock for the proxy's queue
+    which would be different from the regular proxy's lock, but given that the
+    operations above are rare and operate on small servers list, there is no
+    reason for overdesigning a solution.
+    
+    This fix was successfully tested with 10000 servers in a backend where
+    adjusting the dyncookies in loops over the CLI didn't have a measurable
+    impact on the traffic.
+    
+    The only workaround without the fix is to disable any occurrence of
+    "slowstart" on server lines, or to disable threads using "nbthread 1".
+    
+    This must be backported as far as 1.8.
+    
+    (cherry picked from commit 5e83d996cf965ee5ac625f702a446f4d8c80a220)
+    Signed-off-by: Willy Tarreau <w@1wt.eu>
+
+diff --git a/include/types/proxy.h b/include/types/proxy.h
+index ca24dbfe..2518f88d 100644
+--- a/include/types/proxy.h
++++ b/include/types/proxy.h
+@@ -487,7 +487,7 @@ struct proxy {
+                                                * name is used
+                                                */
+       struct list filter_configs;             /* list of the filters that are declared on this proxy */
+-      __decl_hathreads(HA_SPINLOCK_T lock);
++      __decl_hathreads(HA_SPINLOCK_T lock);   /* may be taken under the server's lock */
+ };
+ struct switching_rule {
+diff --git a/include/types/server.h b/include/types/server.h
+index 4a077268..e0534162 100644
+--- a/include/types/server.h
++++ b/include/types/server.h
+@@ -319,7 +319,7 @@ struct server {
+       } ssl_ctx;
+ #endif
+       struct dns_srvrq *srvrq;                /* Pointer representing the DNS SRV requeest, if any */
+-      __decl_hathreads(HA_SPINLOCK_T lock);
++      __decl_hathreads(HA_SPINLOCK_T lock);   /* may enclose the proxy's lock, must not be taken under */
+       struct {
+               const char *file;               /* file where the section appears */
+               struct eb32_node id;            /* place in the tree of used IDs */
+diff --git a/src/proxy.c b/src/proxy.c
+index ae761ead..a537e0b1 100644
+--- a/src/proxy.c
++++ b/src/proxy.c
+@@ -1940,9 +1940,12 @@ static int cli_parse_enable_dyncookie_backend(char **args, char *payload, struct
+       if (!px)
+               return 1;
++      /* Note: this lock is to make sure this doesn't change while another
++       * thread is in srv_set_dyncookie().
++       */
+       HA_SPIN_LOCK(PROXY_LOCK, &px->lock);
+-
+       px->ck_opts |= PR_CK_DYNAMIC;
++      HA_SPIN_UNLOCK(PROXY_LOCK, &px->lock);
+       for (s = px->srv; s != NULL; s = s->next) {
+               HA_SPIN_LOCK(SERVER_LOCK, &s->lock);
+@@ -1950,8 +1953,6 @@ static int cli_parse_enable_dyncookie_backend(char **args, char *payload, struct
+               HA_SPIN_UNLOCK(SERVER_LOCK, &s->lock);
+       }
+-      HA_SPIN_UNLOCK(PROXY_LOCK, &px->lock);
+-
+       return 1;
+ }
+@@ -1971,9 +1972,12 @@ static int cli_parse_disable_dyncookie_backend(char **args, char *payload, struc
+       if (!px)
+               return 1;
++      /* Note: this lock is to make sure this doesn't change while another
++       * thread is in srv_set_dyncookie().
++       */
+       HA_SPIN_LOCK(PROXY_LOCK, &px->lock);
+-
+       px->ck_opts &= ~PR_CK_DYNAMIC;
++      HA_SPIN_UNLOCK(PROXY_LOCK, &px->lock);
+       for (s = px->srv; s != NULL; s = s->next) {
+               HA_SPIN_LOCK(SERVER_LOCK, &s->lock);
+@@ -1984,8 +1988,6 @@ static int cli_parse_disable_dyncookie_backend(char **args, char *payload, struc
+               HA_SPIN_UNLOCK(SERVER_LOCK, &s->lock);
+       }
+-      HA_SPIN_UNLOCK(PROXY_LOCK, &px->lock);
+-
+       return 1;
+ }
+@@ -2021,10 +2023,13 @@ static int cli_parse_set_dyncookie_key_backend(char **args, char *payload, struc
+               return 1;
+       }
++      /* Note: this lock is to make sure this doesn't change while another
++       * thread is in srv_set_dyncookie().
++       */
+       HA_SPIN_LOCK(PROXY_LOCK, &px->lock);
+-
+       free(px->dyncookie_key);
+       px->dyncookie_key = newkey;
++      HA_SPIN_UNLOCK(PROXY_LOCK, &px->lock);
+       for (s = px->srv; s != NULL; s = s->next) {
+               HA_SPIN_LOCK(SERVER_LOCK, &s->lock);
+@@ -2032,8 +2037,6 @@ static int cli_parse_set_dyncookie_key_backend(char **args, char *payload, struc
+               HA_SPIN_UNLOCK(SERVER_LOCK, &s->lock);
+       }
+-      HA_SPIN_UNLOCK(PROXY_LOCK, &px->lock);
+-
+       return 1;
+ }
+diff --git a/src/queue.c b/src/queue.c
+index f4a94530..6aa54170 100644
+--- a/src/queue.c
++++ b/src/queue.c
+@@ -312,16 +312,16 @@ void process_srv_queue(struct server *s)
+       struct proxy  *p = s->proxy;
+       int maxconn;
+-      HA_SPIN_LOCK(PROXY_LOCK,  &p->lock);
+       HA_SPIN_LOCK(SERVER_LOCK, &s->lock);
++      HA_SPIN_LOCK(PROXY_LOCK,  &p->lock);
+       maxconn = srv_dynamic_maxconn(s);
+       while (s->served < maxconn) {
+               int ret = pendconn_process_next_strm(s, p);
+               if (!ret)
+                       break;
+       }
+-      HA_SPIN_UNLOCK(SERVER_LOCK, &s->lock);
+       HA_SPIN_UNLOCK(PROXY_LOCK,  &p->lock);
++      HA_SPIN_UNLOCK(SERVER_LOCK, &s->lock);
+ }
+ /* Adds the stream <strm> to the pending connection queue of server <strm>->srv
+@@ -424,7 +424,8 @@ int pendconn_redistribute(struct server *s)
+ /* Check for pending connections at the backend, and assign some of them to
+  * the server coming up. The server's weight is checked before being assigned
+  * connections it may not be able to handle. The total number of transferred
+- * connections is returned.
++ * connections is returned. It must be called with the server lock held, and
++ * will take the proxy's lock.
+  */
+ int pendconn_grab_from_px(struct server *s)
+ {
+diff --git a/src/server.c b/src/server.c
+index a96f1ef6..236d6bae 100644
+--- a/src/server.c
++++ b/src/server.c
+@@ -125,7 +125,7 @@ static inline void srv_check_for_dup_dyncookie(struct server *s)
+ }
+ /*
+- * Must be called with the server lock held.
++ * Must be called with the server lock held, and will grab the proxy lock.
+  */
+ void srv_set_dyncookie(struct server *s)
+ {
+@@ -137,15 +137,17 @@ void srv_set_dyncookie(struct server *s)
+       int addr_len;
+       int port;
++      HA_SPIN_LOCK(PROXY_LOCK, &p->lock);
++
+       if ((s->flags & SRV_F_COOKIESET) ||
+           !(s->proxy->ck_opts & PR_CK_DYNAMIC) ||
+           s->proxy->dyncookie_key == NULL)
+-              return;
++              goto out;
+       key_len = strlen(p->dyncookie_key);
+       if (s->addr.ss_family != AF_INET &&
+           s->addr.ss_family != AF_INET6)
+-              return;
++              goto out;
+       /*
+        * Buffer to calculate the cookie value.
+        * The buffer contains the secret key + the server IP address
+@@ -174,7 +176,7 @@ void srv_set_dyncookie(struct server *s)
+       hash_value = XXH64(tmpbuf, buffer_len, 0);
+       memprintf(&s->cookie, "%016llx", hash_value);
+       if (!s->cookie)
+-              return;
++              goto out;
+       s->cklen = 16;
+       /* Don't bother checking if the dyncookie is duplicated if
+@@ -183,6 +185,8 @@ void srv_set_dyncookie(struct server *s)
+        */
+       if (!(s->next_admin & SRV_ADMF_FMAINT))
+               srv_check_for_dup_dyncookie(s);
++ out:
++      HA_SPIN_UNLOCK(PROXY_LOCK, &p->lock);
+ }
+ /*
diff --git a/net/haproxy/patches/014-BUG-MINOR-debug-fix-a-small-race-in-the-thread-dumping-code.patch b/net/haproxy/patches/014-BUG-MINOR-debug-fix-a-small-race-in-the-thread-dumping-code.patch
new file mode 100644 (file)
index 0000000..72dca16
--- /dev/null
@@ -0,0 +1,71 @@
+commit da767eaaf6128eccd349a54ec6eac2a68dcacacb
+Author: Willy Tarreau <w@1wt.eu>
+Date:   Wed Jul 31 19:15:45 2019 +0200
+
+    BUG/MINOR: debug: fix a small race in the thread dumping code
+    
+    If a thread dump is requested from a signal handler, it may interrupt
+    a thread already waiting for a dump to complete, and may see the
+    threads_to_dump variable go to zero while others are waiting, steal
+    the lock and prevent other threads from ever completing. This tends
+    to happen when dumping many threads upon a watchdog timeout, to threads
+    waiting for their turn.
+    
+    Instead now we proceed in two steps :
+      1) the last dumped thread sets all bits again
+      2) all threads only wait for their own bit to appear, then clear it
+         and quit
+    
+    This way there's no risk that a bit performs a double flip in the same
+    loop and threads cannot get stuck here anymore.
+    
+    This should be backported to 2.0 as it clarifies stack traces.
+    
+    (cherry picked from commit c07736209db764fb2aef6f18ed3687a504c35771)
+    Signed-off-by: Willy Tarreau <w@1wt.eu>
+
+diff --git a/src/debug.c b/src/debug.c
+index 059bc6b9..07624ca5 100644
+--- a/src/debug.c
++++ b/src/debug.c
+@@ -440,8 +440,8 @@ void debug_handler(int sig, siginfo_t *si, void *arg)
+        *   1- wait for our turn, i.e. when all lower bits are gone.
+        *   2- perform the action if our bit is set
+        *   3- remove our bit to let the next one go, unless we're
+-       *      the last one and have to put them all but ours
+-       *   4- wait for zero and clear our bit if it's set
++       *      the last one and have to put them all as a signal
++       *   4- wait out bit to re-appear, then clear it and quit.
+        */
+       /* wait for all previous threads to finish first */
+@@ -454,7 +454,7 @@ void debug_handler(int sig, siginfo_t *si, void *arg)
+                       ha_thread_dump(thread_dump_buffer, tid, thread_dump_tid);
+               if ((threads_to_dump & all_threads_mask) == tid_bit) {
+                       /* last one */
+-                      HA_ATOMIC_STORE(&threads_to_dump, all_threads_mask & ~tid_bit);
++                      HA_ATOMIC_STORE(&threads_to_dump, all_threads_mask);
+                       thread_dump_buffer = NULL;
+               }
+               else
+@@ -462,14 +462,13 @@ void debug_handler(int sig, siginfo_t *si, void *arg)
+       }
+       /* now wait for all others to finish dumping. The last one will set all
+-       * bits again to broadcast the leaving condition.
++       * bits again to broadcast the leaving condition so we'll see ourselves
++       * present again. This way the threads_to_dump variable never passes to
++       * zero until all visitors have stopped waiting.
+        */
+-      while (threads_to_dump & all_threads_mask) {
+-              if (threads_to_dump & tid_bit)
+-                      HA_ATOMIC_AND(&threads_to_dump, ~tid_bit);
+-              else
+-                      ha_thread_relax();
+-      }
++      while (!(threads_to_dump & tid_bit))
++              ha_thread_relax();
++      HA_ATOMIC_AND(&threads_to_dump, ~tid_bit);
+       /* mark the current thread as stuck to detect it upon next invocation
+        * if it didn't move.
diff --git a/net/haproxy/patches/015-MINOR-wdt-also-consider-that-waiting-in-the-thread-dumper-is-normal.patch b/net/haproxy/patches/015-MINOR-wdt-also-consider-that-waiting-in-the-thread-dumper-is-normal.patch
new file mode 100644 (file)
index 0000000..07a1eec
--- /dev/null
@@ -0,0 +1,70 @@
+commit 445b2b7c52a13678241a190c4ff52e77a09ef0a6
+Author: Willy Tarreau <w@1wt.eu>
+Date:   Wed Jul 31 19:20:39 2019 +0200
+
+    MINOR: wdt: also consider that waiting in the thread dumper is normal
+    
+    It happens that upon looping threads the watchdog fires, starts a dump,
+    and other threads expire their budget while waiting for the other threads
+    to get dumped and trigger a watchdog event again, adding some confusion
+    to the traces. With this patch the situation becomes clearer as we export
+    the list of threads being dumped so that the watchdog can check it before
+    deciding to trigger. This way such threads in queue for being dumped are
+    not attempted to be reported in turn.
+    
+    This should be backported to 2.0 as it helps understand stack traces.
+    
+    (cherry picked from commit a37cb1880c81b1f038e575d88ba7210aea0b7b8f)
+    Signed-off-by: Willy Tarreau <w@1wt.eu>
+
+diff --git a/include/common/debug.h b/include/common/debug.h
+index 333203dd..f43258e9 100644
+--- a/include/common/debug.h
++++ b/include/common/debug.h
+@@ -70,6 +70,7 @@
+ struct task;
+ struct buffer;
++extern volatile unsigned long threads_to_dump;
+ void ha_task_dump(struct buffer *buf, const struct task *task, const char *pfx);
+ void ha_thread_dump(struct buffer *buf, int thr, int calling_tid);
+ void ha_thread_dump_all_to_trash();
+diff --git a/src/debug.c b/src/debug.c
+index 07624ca5..3077e97c 100644
+--- a/src/debug.c
++++ b/src/debug.c
+@@ -29,6 +29,11 @@
+ #include <proto/stream_interface.h>
+ #include <proto/task.h>
++/* mask of threads still having to dump, used to respect ordering. Only used
++ * when USE_THREAD_DUMP is set.
++ */
++volatile unsigned long threads_to_dump = 0;
++
+ /* Dumps to the buffer some known information for the desired thread, and
+  * optionally extra info for the current thread. The dump will be appended to
+  * the buffer, so the caller is responsible for preliminary initializing it.
+@@ -405,9 +410,6 @@ void ha_thread_dump_all_to_trash()
+  */
+ #define DEBUGSIG SIGURG
+-/* mask of threads still having to dump, used to respect ordering */
+-static volatile unsigned long threads_to_dump;
+-
+ /* ID of the thread requesting the dump */
+ static unsigned int thread_dump_tid;
+diff --git a/src/wdt.c b/src/wdt.c
+index 19d36c34..aa89fd44 100644
+--- a/src/wdt.c
++++ b/src/wdt.c
+@@ -75,7 +75,7 @@ void wdt_handler(int sig, siginfo_t *si, void *arg)
+               if (n - p < 1000000000UL)
+                       goto update_and_leave;
+-              if ((threads_harmless_mask|sleeping_thread_mask) & (1UL << thr)) {
++              if ((threads_harmless_mask|sleeping_thread_mask|threads_to_dump) & (1UL << thr)) {
+                       /* This thread is currently doing exactly nothing
+                        * waiting in the poll loop (unlikely but possible),
+                        * waiting for all other threads to join the rendez-vous
diff --git a/net/haproxy/patches/016-BUG-MEDIUM-lb-chash-Ensure-the-tree-integrity-when-server-weight-is-increased.patch b/net/haproxy/patches/016-BUG-MEDIUM-lb-chash-Ensure-the-tree-integrity-when-server-weight-is-increased.patch
new file mode 100644 (file)
index 0000000..0026b85
--- /dev/null
@@ -0,0 +1,56 @@
+commit 0fc2d46fabb2b9317daf7030162e828c7e1684d5
+Author: Christopher Faulet <cfaulet@haproxy.com>
+Date:   Thu Aug 1 10:09:29 2019 +0200
+
+    BUG/MEDIUM: lb-chash: Ensure the tree integrity when server weight is increased
+    
+    When the server weight is increased in consistant hash, extra nodes have to be
+    allocated. So a realloc() is performed on the nodes array of the server. the
+    previous commit 962ea7732 ("BUG/MEDIUM: lb-chash: Remove all server's entries
+    before realloc() to re-insert them after") have fixed the size used during the
+    realloc() to avoid segfaults. But another bug remains. After the realloc(), the
+    memory area allocated for the nodes array may change, invalidating all node
+    addresses in the chash tree.
+    
+    So, to fix the bug, we must remove all server's entries from the chash tree
+    before the realloc to insert all of them after, old nodes and new ones. The
+    insert will be automatically handled by the loop at the end of the function
+    chash_queue_dequeue_srv().
+    
+    Note that if the call to realloc() failed, no new entries will be created for
+    the server, so the effective server weight will be unchanged.
+    
+    This issue was reported on Github (#189).
+    
+    This patch must be backported to all versions since the 1.6.
+    
+    (cherry picked from commit 0a52c17f819a5b0a17718b605bdd990b9e2b58e6)
+    Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
+
+diff --git a/src/lb_chash.c b/src/lb_chash.c
+index 0bf4e81a..23448df8 100644
+--- a/src/lb_chash.c
++++ b/src/lb_chash.c
+@@ -84,8 +84,13 @@ static inline void chash_queue_dequeue_srv(struct server *s)
+        * increased the weight beyond the original weight
+        */
+       if (s->lb_nodes_tot < s->next_eweight) {
+-              struct tree_occ *new_nodes = realloc(s->lb_nodes, s->next_eweight * sizeof(*new_nodes));
++              struct tree_occ *new_nodes;
++              /* First we need to remove all server's entries from its tree
++               * because the realloc will change all nodes pointers */
++              chash_dequeue_srv(s);
++
++              new_nodes = realloc(s->lb_nodes, s->next_eweight * sizeof(*new_nodes));
+               if (new_nodes) {
+                       unsigned int j;
+@@ -494,7 +499,6 @@ void chash_init_server_tree(struct proxy *p)
+               srv->lb_nodes_tot = srv->uweight * BE_WEIGHT_SCALE;
+               srv->lb_nodes_now = 0;
+               srv->lb_nodes = calloc(srv->lb_nodes_tot, sizeof(struct tree_occ));
+-
+               for (node = 0; node < srv->lb_nodes_tot; node++) {
+                       srv->lb_nodes[node].server = srv;
+                       srv->lb_nodes[node].node.key = full_hash(srv->puid * SRV_EWGHT_RANGE + node);
diff --git a/net/haproxy/patches/017-BUG-MAJOR-http-sample-use-a-static-buffer-for-raw---htx-conversion.patch b/net/haproxy/patches/017-BUG-MAJOR-http-sample-use-a-static-buffer-for-raw---htx-conversion.patch
new file mode 100644 (file)
index 0000000..994bc37
--- /dev/null
@@ -0,0 +1,71 @@
+commit c0968f59b723dfa9effa63ac28b59642b11c6b8b
+Author: Richard Russo <russor@whatsapp.com>
+Date:   Wed Jul 31 11:45:56 2019 -0700
+
+    BUG/MAJOR: http/sample: use a static buffer for raw -> htx conversion
+    
+    Multiple calls to smp_fetch_fhdr use the header context to keep track of
+    header parsing position; however, when using header sampling on a raw
+    connection, the raw buffer is converted into an HTX structure each time, and
+    this was done in the trash areas; so the block reference would be invalid on
+    subsequent calls.
+    
+    This patch must be backported to 2.0 and 1.9.
+    
+    (cherry picked from commit 458eafb36df88932a02d1ce7ca31832abf11b8b3)
+    Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
+
+diff --git a/src/http_fetch.c b/src/http_fetch.c
+index 67ea2094..e372a122 100644
+--- a/src/http_fetch.c
++++ b/src/http_fetch.c
+@@ -46,10 +46,40 @@
+ /* this struct is used between calls to smp_fetch_hdr() or smp_fetch_cookie() */
+ static THREAD_LOCAL struct hdr_ctx static_hdr_ctx;
+ static THREAD_LOCAL struct http_hdr_ctx static_http_hdr_ctx;
++/* this is used to convert raw connection buffers to htx */
++static THREAD_LOCAL struct buffer static_raw_htx_chunk;
++static THREAD_LOCAL char *static_raw_htx_buf;
+ #define SMP_REQ_CHN(smp) (smp->strm ? &smp->strm->req : NULL)
+ #define SMP_RES_CHN(smp) (smp->strm ? &smp->strm->res : NULL)
++/* This function returns the static htx chunk, where raw connections get
++ * converted to HTX as needed for samplxsing.
++ */
++struct buffer *get_raw_htx_chunk(void)
++{
++      chunk_reset(&static_raw_htx_chunk);
++      return &static_raw_htx_chunk;
++}
++
++static int alloc_raw_htx_chunk_per_thread()
++{
++      static_raw_htx_buf = malloc(global.tune.bufsize);
++      if (!static_raw_htx_buf)
++              return 0;
++      chunk_init(&static_raw_htx_chunk, static_raw_htx_buf, global.tune.bufsize);
++      return 1;
++}
++
++static void free_raw_htx_chunk_per_thread()
++{
++      free(static_raw_htx_buf);
++      static_raw_htx_buf = NULL;
++}
++
++REGISTER_PER_THREAD_ALLOC(alloc_raw_htx_chunk_per_thread);
++REGISTER_PER_THREAD_FREE(free_raw_htx_chunk_per_thread);
++
+ /*
+  * Returns the data from Authorization header. Function may be called more
+  * than once so data is stored in txn->auth_data. When no header is found
+@@ -265,7 +295,7 @@ struct htx *smp_prefetch_htx(struct sample *smp, struct channel *chn, int vol)
+               else if (h1m.flags & H1_MF_CLEN)
+                       flags |= HTX_SL_F_CLEN;
+-              htx = htx_from_buf(get_trash_chunk());
++              htx = htx_from_buf(get_raw_htx_chunk());
+               sl = htx_add_stline(htx, HTX_BLK_REQ_SL, flags, h1sl.rq.m, h1sl.rq.u, h1sl.rq.v);
+               if (!sl || !htx_add_all_headers(htx, hdrs))
+                       return NULL;
diff --git a/net/haproxy/patches/018-BUG-MINOR-stream-int-also-update-analysers-timeouts-on-activity.patch b/net/haproxy/patches/018-BUG-MINOR-stream-int-also-update-analysers-timeouts-on-activity.patch
new file mode 100644 (file)
index 0000000..118283b
--- /dev/null
@@ -0,0 +1,46 @@
+commit 7343c710152c586a232a194ef37a56af636d6a56
+Author: Willy Tarreau <w@1wt.eu>
+Date:   Thu Aug 1 18:51:38 2019 +0200
+
+    BUG/MINOR: stream-int: also update analysers timeouts on activity
+    
+    Between 1.6 and 1.7, some parts of the stream forwarding process were
+    moved into lower layers and the stream-interface had to keep the
+    stream's task up to date regarding the timeouts. The analyser timeouts
+    were not updated there as it was believed this was not needed during
+    forwarding, but actually there is a case for this which is "option
+    contstats" which periodically triggers the analyser timeout, and this
+    change broke the option in case of sustained traffic (if there is some
+    I/O activity during the same millisecond as the timeout expires, then
+    the update will be missed).
+    
+    This patch simply brings back the analyser expiration updates from
+    process_stream() to stream_int_notify().
+    
+    It may be backported as far as 1.7, taking care to adjust the fields
+    names if needed.
+    
+    (cherry picked from commit 45bcb37f0f8fa1e16dd9358a59dc280a38834dcd)
+    Signed-off-by: Willy Tarreau <w@1wt.eu>
+
+diff --git a/src/stream_interface.c b/src/stream_interface.c
+index 9b9a8e9f..7d89cc90 100644
+--- a/src/stream_interface.c
++++ b/src/stream_interface.c
+@@ -558,6 +558,16 @@ static void stream_int_notify(struct stream_interface *si)
+               task->expire = tick_first((tick_is_expired(task->expire, now_ms) ? 0 : task->expire),
+                                         tick_first(tick_first(ic->rex, ic->wex),
+                                                    tick_first(oc->rex, oc->wex)));
++
++              task->expire = tick_first(task->expire, ic->analyse_exp);
++              task->expire = tick_first(task->expire, oc->analyse_exp);
++
++              if (si->exp)
++                      task->expire = tick_first(task->expire, si->exp);
++
++              if (sio->exp)
++                      task->expire = tick_first(task->expire, sio->exp);
++
+               task_queue(task);
+       }
+       if (ic->flags & CF_READ_ACTIVITY)
diff --git a/net/haproxy/patches/019-BUG-MEDIUM-mux-h2-unbreak-receipt-of-large-DATA-frames.patch b/net/haproxy/patches/019-BUG-MEDIUM-mux-h2-unbreak-receipt-of-large-DATA-frames.patch
new file mode 100644 (file)
index 0000000..579d358
--- /dev/null
@@ -0,0 +1,37 @@
+commit a8fcdacb8cc0dddec72b1ddc4d9afc92d3684acd
+Author: Willy Tarreau <w@1wt.eu>
+Date:   Fri Aug 2 07:48:47 2019 +0200
+
+    BUG/MEDIUM: mux-h2: unbreak receipt of large DATA frames
+    
+    Recent optimization in commit 4d7a88482 ("MEDIUM: mux-h2: don't try to
+    read more than needed") broke the receipt of large DATA frames because
+    it would unconditionally subscribe if there was some room left, thus
+    preventing any new rx from being done since subscription may only be
+    done once the end was reached, as indicated by ret == 0.
+    
+    However, fixing this uncovered that in HTX mode previous versions might
+    occasionally be affected as well, when an available frame is the same
+    size as the maximum data that may fit into an HTX buffer, we may end
+    up reading that whole frame and still subscribe since it's still allowed
+    to receive, thus causing issues to read the next frame.
+    
+    This patch will only work for 2.1-dev but a minor adaptation will be
+    needed for earlier versions (down to 1.9, where subscribe() was added).
+    
+    (cherry picked from commit 9bc1c95855b9c6300de5ecf3720cbe4b2558c5a1)
+    Signed-off-by: Willy Tarreau <w@1wt.eu>
+
+diff --git a/src/mux_h2.c b/src/mux_h2.c
+index 5bb85181..d605fe94 100644
+--- a/src/mux_h2.c
++++ b/src/mux_h2.c
+@@ -2766,7 +2766,7 @@ static int h2_recv(struct h2c *h2c)
+                       ret = 0;
+       } while (ret > 0);
+-      if (h2_recv_allowed(h2c) && (b_data(buf) < buf->size))
++      if (max && !ret && h2_recv_allowed(h2c))
+               conn->xprt->subscribe(conn, conn->xprt_ctx, SUB_RETRY_RECV, &h2c->wait_event);
+       if (!b_data(buf)) {
diff --git a/net/haproxy/patches/020-BUG-MEDIUM-mux-h2-split-the-streams-and-connections-window-sizes.patch b/net/haproxy/patches/020-BUG-MEDIUM-mux-h2-split-the-streams-and-connections-window-sizes.patch
new file mode 100644 (file)
index 0000000..acde533
--- /dev/null
@@ -0,0 +1,227 @@
+commit 5a9c875f0f1ee83bd5889dd1ad53e9da43e6c34e
+Author: Willy Tarreau <w@1wt.eu>
+Date:   Fri Aug 2 07:52:08 2019 +0200
+
+    BUG/MEDIUM: mux-h2: split the stream's and connection's window sizes
+    
+    The SETTINGS frame parser updates all streams' window for each
+    INITIAL_WINDOW_SIZE setting received on the connection (like h2spec
+    does in test 6.5.3), which can start to be expensive if repeated when
+    there are many streams (up to 100 by default). A quick test shows that
+    it's possible to parse only 35000 settings per second on a 3 GHz core
+    for 100 streams, which is rather small.
+    
+    Given that window sizes are relative and may be negative, there's no
+    point in pre-initializing them for each stream and update them from
+    the settings. Instead, let's make them relative to the connection's
+    initial window size so that any change immediately affects all streams.
+    The only thing that remains needed is to wake up the streams that were
+    unblocked by the update, which is now done once at the end of
+    h2_process_demux() instead of once per setting. This now results in
+    5.7 million settings being processed per second, which is way better.
+    
+    In order to keep the change small, the h2s' mws field was renamed to
+    "sws" for "stream window size", and an h2s_mws() function was added
+    to add it to the connection's initial window setting and determine the
+    window size to use when muxing. The h2c_update_all_ws() function was
+    renamed to h2c_unblock_sfctl() since it's now only used to unblock
+    previously blocked streams.
+    
+    This needs to be backported to all versions till 1.8.
+    
+    (cherry picked from commit 1d4a0f88100daeb17dd0c9470c659b1ec288bc07)
+    [wt: context adjustment, port to legacy parts]
+    Signed-off-by: Willy Tarreau <w@1wt.eu>
+
+diff --git a/src/mux_h2.c b/src/mux_h2.c
+index d605fe94..f90e9435 100644
+--- a/src/mux_h2.c
++++ b/src/mux_h2.c
+@@ -208,7 +208,7 @@ struct h2s {
+       struct eb32_node by_id; /* place in h2c's streams_by_id */
+       int32_t id; /* stream ID */
+       uint32_t flags;      /* H2_SF_* */
+-      int mws;             /* mux window size for this stream */
++      int sws;             /* stream window size, to be added to the mux's initial window size */
+       enum h2_err errcode; /* H2 err code (H2_ERR_*) */
+       enum h2_ss st;
+       uint16_t status;     /* HTTP response status */
+@@ -707,6 +707,14 @@ static inline __maybe_unused int h2s_id(const struct h2s *h2s)
+       return h2s ? h2s->id : 0;
+ }
++/* returns the sum of the stream's own window size and the mux's initial
++ * window, which together form the stream's effective window size.
++ */
++static inline int h2s_mws(const struct h2s *h2s)
++{
++      return h2s->sws + h2s->h2c->miw;
++}
++
+ /* returns true of the mux is currently busy as seen from stream <h2s> */
+ static inline __maybe_unused int h2c_mux_busy(const struct h2c *h2c, const struct h2s *h2s)
+ {
+@@ -945,7 +953,7 @@ static struct h2s *h2s_new(struct h2c *h2c, int id)
+       LIST_INIT(&h2s->sending_list);
+       h2s->h2c       = h2c;
+       h2s->cs        = NULL;
+-      h2s->mws       = h2c->miw;
++      h2s->sws       = 0;
+       h2s->flags     = H2_SF_NONE;
+       h2s->errcode   = H2_ERR_NO_ERROR;
+       h2s->st        = H2_SS_IDLE;
+@@ -1543,30 +1551,23 @@ static void h2_wake_some_streams(struct h2c *h2c, int last)
+       }
+ }
+-/* Increase all streams' outgoing window size by the difference passed in
+- * argument. This is needed upon receipt of the settings frame if the initial
+- * window size is different. The difference may be negative and the resulting
+- * window size as well, for the time it takes to receive some window updates.
++/* Wake up all blocked streams whose window size has become positive after the
++ * mux's initial window was adjusted. This should be done after having processed
++ * SETTINGS frames which have updated the mux's initial window size.
+  */
+-static void h2c_update_all_ws(struct h2c *h2c, int diff)
++static void h2c_unblock_sfctl(struct h2c *h2c)
+ {
+       struct h2s *h2s;
+       struct eb32_node *node;
+-      if (!diff)
+-              return;
+-
+       node = eb32_first(&h2c->streams_by_id);
+       while (node) {
+               h2s = container_of(node, struct h2s, by_id);
+-              h2s->mws += diff;
+-
+-              if (h2s->mws > 0 && (h2s->flags & H2_SF_BLK_SFCTL)) {
++              if (h2s->flags & H2_SF_BLK_SFCTL && h2s_mws(h2s) > 0) {
+                       h2s->flags &= ~H2_SF_BLK_SFCTL;
+                       if (h2s->send_wait && !LIST_ADDED(&h2s->list))
+                               LIST_ADDQ(&h2c->send_list, &h2s->list);
+               }
+-
+               node = eb32_next(node);
+       }
+ }
+@@ -1607,7 +1608,6 @@ static int h2c_handle_settings(struct h2c *h2c)
+                               error = H2_ERR_FLOW_CONTROL_ERROR;
+                               goto fail;
+                       }
+-                      h2c_update_all_ws(h2c, arg - h2c->miw);
+                       h2c->miw = arg;
+                       break;
+               case H2_SETTINGS_MAX_FRAME_SIZE:
+@@ -1869,13 +1869,13 @@ static int h2c_handle_window_update(struct h2c *h2c, struct h2s *h2s)
+                       goto strm_err;
+               }
+-              if (h2s->mws >= 0 && h2s->mws + inc < 0) {
++              if (h2s_mws(h2s) >= 0 && h2s_mws(h2s) + inc < 0) {
+                       error = H2_ERR_FLOW_CONTROL_ERROR;
+                       goto strm_err;
+               }
+-              h2s->mws += inc;
+-              if (h2s->mws > 0 && (h2s->flags & H2_SF_BLK_SFCTL)) {
++              h2s->sws += inc;
++              if (h2s_mws(h2s) > 0 && (h2s->flags & H2_SF_BLK_SFCTL)) {
+                       h2s->flags &= ~H2_SF_BLK_SFCTL;
+                       if (h2s->send_wait && !LIST_ADDED(&h2s->list))
+                               LIST_ADDQ(&h2c->send_list, &h2s->list);
+@@ -2237,6 +2237,7 @@ static void h2_process_demux(struct h2c *h2c)
+       struct h2s *h2s = NULL, *tmp_h2s;
+       struct h2_fh hdr;
+       unsigned int padlen = 0;
++      int32_t old_iw = h2c->miw;
+       if (h2c->st0 >= H2_CS_ERROR)
+               return;
+@@ -2625,6 +2626,9 @@ static void h2_process_demux(struct h2c *h2c)
+               h2s_notify_recv(h2s);
+       }
++      if (old_iw != h2c->miw)
++              h2c_unblock_sfctl(h2c);
++
+       h2c_restart_reading(h2c, 0);
+ }
+@@ -4259,8 +4263,8 @@ static size_t h2s_frt_make_resp_data(struct h2s *h2s, const struct buffer *buf,
+       if (size > max)
+               size = max;
+-      if (size > h2s->mws)
+-              size = h2s->mws;
++      if (size > h2s_mws(h2s))
++              size = h2s_mws(h2s);
+       if (size <= 0) {
+               h2s->flags |= H2_SF_BLK_SFCTL;
+@@ -4362,7 +4366,7 @@ static size_t h2s_frt_make_resp_data(struct h2s *h2s, const struct buffer *buf,
+               ofs += size;
+               total += size;
+               h1m->curr_len -= size;
+-              h2s->mws -= size;
++              h2s->sws -= size;
+               h2c->mws -= size;
+               if (size && !h1m->curr_len && (h1m->flags & H1_MF_CHNK)) {
+@@ -4390,7 +4394,7 @@ static size_t h2s_frt_make_resp_data(struct h2s *h2s, const struct buffer *buf,
+       }
+  end:
+-      trace("[%d] sent simple H2 DATA response (sid=%d) = %d bytes out (%u in, st=%s, ep=%u, es=%s, h2cws=%d h2sws=%d) data=%u", h2c->st0, h2s->id, size+9, (unsigned int)total, h1m_state_str(h1m->state), h1m->err_pos, h1m_state_str(h1m->err_state), h2c->mws, h2s->mws, (unsigned int)b_data(buf));
++      trace("[%d] sent simple H2 DATA response (sid=%d) = %d bytes out (%u in, st=%s, ep=%u, es=%s, h2cws=%d h2sws=%d) data=%u", h2c->st0, h2s->id, size+9, (unsigned int)total, h1m_state_str(h1m->state), h1m->err_pos, h1m_state_str(h1m->err_state), h2c->mws, h2s_mws(h2s), (unsigned int)b_data(buf));
+       return total;
+ }
+@@ -4937,7 +4941,7 @@ static size_t h2s_htx_frt_make_resp_data(struct h2s *h2s, struct buffer *buf, si
+        */
+       if (unlikely(fsize == count &&
+                    htx->used == 1 && type == HTX_BLK_DATA &&
+-                   fsize <= h2s->mws && fsize <= h2c->mws && fsize <= h2c->mfs)) {
++                   fsize <= h2s_mws(h2s) && fsize <= h2c->mws && fsize <= h2c->mfs)) {
+               void *old_area = mbuf->area;
+               if (b_data(mbuf)) {
+@@ -4972,7 +4976,7 @@ static size_t h2s_htx_frt_make_resp_data(struct h2s *h2s, struct buffer *buf, si
+               h2_set_frame_size(outbuf.area, fsize);
+               /* update windows */
+-              h2s->mws -= fsize;
++              h2s->sws -= fsize;
+               h2c->mws -= fsize;
+               /* and exchange with our old area */
+@@ -5024,7 +5028,7 @@ static size_t h2s_htx_frt_make_resp_data(struct h2s *h2s, struct buffer *buf, si
+       if (!fsize)
+               goto send_empty;
+-      if (h2s->mws <= 0) {
++      if (h2s_mws(h2s) <= 0) {
+               h2s->flags |= H2_SF_BLK_SFCTL;
+               if (LIST_ADDED(&h2s->list))
+                       LIST_DEL_INIT(&h2s->list);
+@@ -5034,8 +5038,8 @@ static size_t h2s_htx_frt_make_resp_data(struct h2s *h2s, struct buffer *buf, si
+       if (fsize > count)
+               fsize = count;
+-      if (fsize > h2s->mws)
+-              fsize = h2s->mws; // >0
++      if (fsize > h2s_mws(h2s))
++              fsize = h2s_mws(h2s); // >0
+       if (h2c->mfs && fsize > h2c->mfs)
+               fsize = h2c->mfs; // >0
+@@ -5071,7 +5075,7 @@ static size_t h2s_htx_frt_make_resp_data(struct h2s *h2s, struct buffer *buf, si
+       /* now let's copy this this into the output buffer */
+       memcpy(outbuf.area + 9, htx_get_blk_ptr(htx, blk), fsize);
+-      h2s->mws -= fsize;
++      h2s->sws -= fsize;
+       h2c->mws -= fsize;
+       count    -= fsize;
diff --git a/net/haproxy/patches/021-OPENWRT-add-uclibc-support.patch b/net/haproxy/patches/021-OPENWRT-add-uclibc-support.patch
new file mode 100644 (file)
index 0000000..2757025
--- /dev/null
@@ -0,0 +1,18 @@
+--- a/Makefile
++++ b/Makefile
+@@ -327,6 +327,15 @@ ifeq ($(TARGET),linux-glibc)
+     USE_GETADDRINFO)
+ endif
++# For linux >= 2.6.28 and uclibc
++ifeq ($(TARGET),linux-uclibc)
++  set_target_defaults = $(call default_opts, \
++    USE_POLL USE_TPROXY USE_DL USE_RT USE_NETFILTER                           \
++    USE_CPU_AFFINITY USE_THREAD USE_EPOLL USE_FUTEX USE_LINUX_TPROXY          \
++    USE_ACCEPT4 USE_LINUX_SPLICE USE_PRCTL USE_THREAD_DUMP USE_NS USE_TFO     \
++    USE_GETADDRINFO)
++endif
++
+ # Solaris 8 and above
+ ifeq ($(TARGET),solaris)
+   # We also enable getaddrinfo() which works since solaris 8.
diff --git a/net/haproxy/patches/022-OPENWRT-openssl-deprecated.patch b/net/haproxy/patches/022-OPENWRT-openssl-deprecated.patch
new file mode 100644 (file)
index 0000000..541077e
--- /dev/null
@@ -0,0 +1,12 @@
+--- a/include/common/openssl-compat.h
++++ b/include/common/openssl-compat.h
+@@ -217,7 +217,8 @@ static inline int EVP_PKEY_base_id(EVP_PKEY *pkey)
+ #define TLSEXT_signature_ecdsa      3
+ #endif
+-#if (OPENSSL_VERSION_NUMBER < 0x10100000L) || (LIBRESSL_VERSION_NUMBER < 0x20700000L)
++#if (OPENSSL_VERSION_NUMBER < 0x10100000L) || \
++      (defined(LIBRESSL_VERSION_NUMBER) && (LIBRESSL_VERSION_NUMBER < 0x20700000L))
+ #define X509_getm_notBefore     X509_get_notBefore
+ #define X509_getm_notAfter      X509_get_notAfter
+ #endif
index 24da2bc246f27e1c717e323637d72b751db8b9c9..9310b30787127bd778f3bcf892e635fe3b220074 100644 (file)
@@ -8,12 +8,12 @@
 include $(TOPDIR)/rules.mk
 
 PKG_NAME:=keepalived
-PKG_VERSION:=2.0.16
+PKG_VERSION:=2.0.18
 PKG_RELEASE:=1
 
 PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
 PKG_SOURCE_URL:=https://www.keepalived.org/software
-PKG_HASH:=f0c7dc86147a286913c1c2c918f557735016285d25779d4d2fce5732fcb888df
+PKG_HASH:=1423a2b1b8e541211029b9e1e1452e683bbe5f4b0b287eddd609aaf5ff024fd0
 
 PKG_CPE_ID:=cpe:/a:keepalived:keepalived
 PKG_LICENSE:=GPL-2.0+
index 4e63711f480ee9458308b478f72c04441d206a5d..cf425e87227ab86717f8144fe775a2bf38044da5 100644 (file)
@@ -8,104 +8,106 @@ USE_PROCD=1
 
 KEEPALIVED_CONF=/tmp/keepalived.conf
 
-INDENT_1=\\t
-INDENT_2=$INDENT_1$INDENT_1
-INDENT_3=$INDENT_1$INDENT_1$INDENT_1
-INDENT_4=$INDENT_1$INDENT_1$INDENT_1$INDENT_1
+INDENT_1="\t"
+INDENT_2="${INDENT_1}${INDENT_1}"
+INDENT_3="${INDENT_1}${INDENT_1}${INDENT_1}"
+INDENT_4="${INDENT_1}${INDENT_1}${INDENT_1}${INDENT_1}"
 
 config_section_open() {
-       local tag=$1
-       local name=$2
+       local tag="$1"
+       local name="$2"
 
-       printf "$tag" >> $KEEPALIVED_CONF
-       [ -n "$name" ] && printf " $name" >> $KEEPALIVED_CONF
-       printf " {\n" >> $KEEPALIVED_CONF
+       printf '%s' "$tag" >> "$KEEPALIVED_CONF"
+       [ -n "$name" ] && printf ' %s' "$name" >> "$KEEPALIVED_CONF"
+       printf ' {\n' >> "$KEEPALIVED_CONF"
 }
 
 config_section_close() {
-       printf "}\n\n" >> $KEEPALIVED_CONF
+       printf '}\n\n' >> "$KEEPALIVED_CONF"
 }
 
 config_foreach_wrapper() {
-       local section=$1
-       local function=$1
+       local section="$1"
+       local function="$1"
 
        # Convention is that 'function' and 'section' are the same
-       config_foreach $function $section
+       config_foreach "$function" "$section"
 }
 
 print_elems_indent() {
-       local config=$1
+       local config="$1"
        shift
-       local indent=$1
+       local indent="$1"
        shift
+
        [ -z "$indent" ] && indent="$INDENT_1"
-       for opt in $*; do
-               local $opt
+       for opt in "$@"; do
+               local "$opt"
+               local optval
                local no_val=0
-               if [ ${opt:0:7} == "no_val_" ]; then
-                       opt=${opt:7}
+               if [ "${opt:0:7}" = "no_val_" ]; then
+                       opt="${opt:7}"
                        no_val=1
                fi
-               config_get $opt $config $opt
-               eval optval=\$$opt
+               config_get "$opt" "$config" "$opt"
+               eval optval=\$"$opt"
                [ -z "$optval" ] && continue
-               printf "$indent$opt" >> $KEEPALIVED_CONF
-               [ "$no_val" == "0" ] && {
-                       local words=$(echo "$optval" | wc -w)
-                       if [ $words -gt 1 ]; then
-                               printf " \"$optval\"" >> $KEEPALIVED_CONF
+               printf '%b%s' "$indent" "$opt" >> "$KEEPALIVED_CONF"
+               [ "$no_val" = "0" ] && {
+                       local words=0
+                       words="$(echo "$optval" | wc -w)"
+                       if [ "$words" -gt 1 ]; then
+                               printf ' "%s"' "$optval" >> "$KEEPALIVED_CONF"
                        else
-                               printf " $optval" >> $KEEPALIVED_CONF
+                               printf ' %s' "$optval" >> "$KEEPALIVED_CONF"
                        fi
                }
-               printf "\n" >> $KEEPALIVED_CONF
+               printf '\n' >> "$KEEPALIVED_CONF"
        done
        unset optval
 }
 
 print_list_indent() {
-       local lst=$1
-       local indent=$2
+       local lst="$1"
+       local indent="$2"
        local lst_elems
-       [ -z "$indent" ] && indent=$INDENT_1
+       [ -z "$indent" ] && indent="$INDENT_1"
 
-       eval lst_elems=\$$lst
+       eval lst_elems=\$"$lst"
        [ -z "$lst_elems" ] && return 0
 
-       printf "$indent$lst {\n" >> $KEEPALIVED_CONF
+       printf '%b%s {\n' "$indent" "$lst" >> "$KEEPALIVED_CONF"
        for e in $lst_elems; do
-               [ -n "$eval_item_func" ]
-               printf "$indent$INDENT_1$e\n" >> $KEEPALIVED_CONF
+               printf '%b%s\n' "${indent}${INDENT_1}" "$e">> "$KEEPALIVED_CONF"
        done
-       printf "$indent}\n" >> $KEEPALIVED_CONF
+       printf '%b}\n' "$indent" >> "$KEEPALIVED_CONF"
 }
 
 print_notify() {
-       local type=$1
+       local type="$1"
        shift
-       local name=$1
+       local name="$1"
        shift
-       for notify in $*; do
-               printf "$INDENT_1$notify" >> $KEEPALIVED_CONF
-               notify=$(echo $notify | tr 'a-z' 'A-Z')
-               printf " \"/bin/busybox env -i ACTION=$notify TYPE=$type NAME=$name /sbin/hotplug-call keepalived\"\n" >> $KEEPALIVED_CONF
+       for notify in "$@"; do
+               printf '%b%s' "${INDENT_1}" "$notify">> "$KEEPALIVED_CONF"
+               notify="$(echo "$notify" | tr 'a-z' 'A-Z')"
+               printf ' "/bin/busybox env -i ACTION=%s TYPE=%s NAME=%s /sbin/hotplug-call keepalived"\n' "$notify" "$type" "$name" >> "$KEEPALIVED_CONF"
        done
 }
 
 global_defs() {
        local linkbeat_use_polling notification_email
 
-       config_get alt_config_file $1 alt_config_file
+       config_get alt_config_file "$1" alt_config_file
        [ -z "$alt_config_file" ] || return 0
 
-       config_get_bool linkbeat_use_polling $1 linkbeat_use_polling 0
-       [ $linkbeat_use_polling -gt 0 ] && printf "linkbeat_use_polling\n\n" >> $KEEPALIVED_CONF
+       config_get_bool linkbeat_use_polling "$1" linkbeat_use_polling 0
+       [ "$linkbeat_use_polling" -gt 0 ] && printf 'linkbeat_use_polling\n\n' >> "$KEEPALIVED_CONF"
 
-       config_get notification_email $1 notification_email
+       config_get notification_email "$1" notification_email
        print_list_indent notification_email
 
-       print_elems_indent $1 $INDENT_1 \
+       print_elems_indent "$1" "$INDENT_1" \
                notification_email_from \
                smtp_server \
                smtp_connect_timeout \
@@ -116,129 +118,129 @@ global_defs() {
 }
 
 print_ipaddress_indent() {
-       local section=$1
-       local curr_ipaddr=$2
-       local indent=$3
+       local section="$1"
+       local curr_ipaddr="$2"
+       local indent="$3"
 
        local address device scope name
-       config_get name    $section name
+       config_get name "$section" name
        [ "$name" != "$curr_ipaddr" ] && return 0
 
-       config_get address $section address
-       config_get device  $section device
-       config_get scope   $section scope
+       config_get address "$section" address
+       config_get device "$section" device
+       config_get scope "$section" scope
 
        # Default indent
-       [ -z "$indent" ] && indent=$INDENT_1
+       [ -z "$indent" ] && indent="$INDENT_1"
 
        # If no address exit
        [ -z "$address" ] && return 0
 
        if [ -z "$device" ]; then
-               printf "$indent$address" >> $KEEPALIVED_CONF
+               printf '%b%s' "$indent" "$address" >> "$KEEPALIVED_CONF"
        else
                # Add IP address/netmask and device
-               printf "$indent$address dev $device" >> $KEEPALIVED_CONF
+               printf '%b%s dev %s' "$indent" "$address" "$device">> "$KEEPALIVED_CONF"
                # Add scope
-               [ -n "$scope" ] && printf " scope $scope" >> $KEEPALIVED_CONF
+               [ -n "$scope" ] && printf ' scope %s' "$scope" >> "$KEEPALIVED_CONF"
        fi
 
-       printf "\n" >> $KEEPALIVED_CONF
+       printf '\n' >> "$KEEPALIVED_CONF"
 }
 
 static_ipaddress() {
        local address
        config_get address "$1" address
        for a in $address; do
-               config_foreach print_ipaddress_indent ipaddress $a
+               config_foreach print_ipaddress_indent ipaddress "$a"
        done
 }
 
 print_route_indent() {
-       local section=$1
-       local curr_route=$2
-       local indent=$3
+       local section="$1"
+       local curr_route="$2"
+       local indent="$3"
 
        local name blackhole address src_addr gateway device scope table
 
-       config_get name           $section name
+       config_get name "$section" name
        [ "$name" != "$curr_route" ] && return 0
 
-       config_get_bool blackhole $section blackhole 0
-       config_get address        $section address
-       config_get src_addr       $section src_addr
-       config_get gateway        $section gateway
-       config_get device         $section device
-       config_get table          $section table
+       config_get_bool blackhole "$section" blackhole 0
+       config_get address "$section" address
+       config_get src_addr "$section" src_addr
+       config_get gateway "$section" gateway
+       config_get device "$section" device
+       config_get table "$section" table
 
        # If no address exit
        [ -z "$address" ] && return 0
 
        # Default indent
-       [ -z "$indent" ] && indent=$INDENT_1
+       [ -z "$indent" ] && indent="$INDENT_1"
 
-       [ $blackhole -gt 0 ] && {
-               printf "${indent}blackhole $address\n" >> $KEEPALIVED_CONF
+       [ "$blackhole" -gt 0 ] && {
+               printf '%bblackhole %s\n' "$indent" "$address" >> "$KEEPALIVED_CONF"
                return 0
        }
        # Add src addr or address
        if [ -n "$src_addr" ]; then
-               printf "${indent}src $src_addr $address" >> $KEEPALIVED_CONF
+               printf '%bsrc %s %s' "$indent" "$src_addr" "$address" >> "$KEEPALIVED_CONF"
        else
                [ -z "$device" ] && return 0
-               printf "$indent$address" >> $KEEPALIVED_CONF
+               printf '%b%s' "$indent" "$address" >> "$KEEPALIVED_CONF"
        fi
        # Add route/gateway
-       [ -n "$gateway" ] && printf " via $gateway" >> $KEEPALIVED_CONF
+       [ -n "$gateway" ] && printf ' via %s' "$gateway" >> "$KEEPALIVED_CONF"
        # Add device
-       printf " dev $device" >> $KEEPALIVED_CONF
+       printf ' dev %s' "$device" >> "$KEEPALIVED_CONF"
        # Add scope
-       [ -n "$scope" ] && printf " scope $scope" >> $KEEPALIVED_CONF
+       [ -n "$scope" ] && printf ' scope %s' "$scope" >> "$KEEPALIVED_CONF"
        # Add table
-       [ -n "$table" ] && printf " table $table" >> $KEEPALIVED_CONF
-       printf "\n" >> $KEEPALIVED_CONF
+       [ -n "$table" ] && printf ' table %s' "$table" >> "$KEEPALIVED_CONF"
+       printf '\n' >> "$KEEPALIVED_CONF"
 
 }
 
 print_track_elem_indent() {
-       local section=$1
-       local curr_track_elem=$2
-       local indent=$3
+       local section="$1"
+       local curr_track_elem="$2"
+       local indent="$3"
 
-       local script name value
-       config_get name    $section name
+       local name value
+       config_get name "$section" name
        [ "$name" != "$curr_track_elem" ] && return 0
 
-       config_get value  $section value
-       config_get weight $section weight
+       config_get value "$section" value
+       config_get weight "$section" weight
 
        [ -z "$value" ] && return 0
 
-       printf "$indent$value" >> $KEEPALIVED_CONF
-       [ -n "$weight" ] && printf " weight $weight" >> $KEEPALIVED_CONF
-       printf "\n" >> $KEEPALIVED_CONF
+       printf '%b%s' "$indent" "$value" >> "$KEEPALIVED_CONF"
+       [ -n "$weight" ] && printf ' weight %s' "$weight" >> "$KEEPALIVED_CONF"
+       printf '\n' >> "$KEEPALIVED_CONF"
 }
 
 static_routes() {
        local route
        config_get route "$1" route
        for r in $route; do
-               config_foreach print_route_indent route $r
+               config_foreach print_route_indent route "$r"
        done
 }
 
 # Count 'vrrp_instance' with the given name ; called by vrrp_instance_check()
 vrrp_instance_name_count() {
        local name
-       config_get name $1 name
-       [ "$name" == "$2" ] && count=$((count + 1))
+       config_get name "$1" name
+       [ "$name" = "$2" ] && count="$((count + 1))"
 }
 
 # Check if there's a 'vrrp_instance' section with the given name
 vrrp_instance_check() {
-       local count=0
-       local name=$1
-       config_foreach vrrp_instance_name_count vrrp_instance $name
+       local count="0"
+       local name="$1"
+       config_foreach vrrp_instance_name_count vrrp_instance "$name"
        [ $count -gt 0 ] && return 0 || return 1
 }
 
@@ -247,17 +249,17 @@ vrrp_sync_group() {
        local valid_group
 
        # No name for group, exit
-       config_get name $1 name
+       config_get name "$1" name
        [ -z "$name" ] && return 0
 
        # No members for group, exit
-       config_get group $1 group
+       config_get group "$1" group
        [ -z "$group" ] && return 0
 
        # Check if we have 'vrrp_instance's defined for 
        # each member and remove names with not vrrp_instance defined
        for m in $group; do
-               vrrp_instance_check $m && valid_group="$valid_group $m"
+               vrrp_instance_check "$m" && valid_group="$valid_group $m"
        done
        [ -z "$valid_group" ] && return 0
 
@@ -266,7 +268,7 @@ vrrp_sync_group() {
        group="$valid_group"
        print_list_indent group
 
-       print_elems_indent $1 $INDENT_1 no_val_smtp_alert no_val_global_tracking
+       print_elems_indent "$1" "$INDENT_1" no_val_smtp_alert no_val_global_tracking
 
        print_notify "GROUP" "$name" notify_backup notify_master \
                notify_fault notify
@@ -277,21 +279,21 @@ vrrp_sync_group() {
 vrrp_instance() {
        local name auth_type auth_pass
 
-       config_get name $1 name
+       config_get name "$1" name
        [ -z "$name" ] && return 0
 
        config_section_open "vrrp_instance" "$name"
 
-       config_get auth_type $1 auth_type
-       config_get auth_pass $1 auth_pass
-       [ -n "$auth_type" -a -n "$auth_pass" ] && {
-               printf "${INDENT_1}authentication {\n" >> $KEEPALIVED_CONF
-               printf "${INDENT_2}auth_type $auth_type\n" >> $KEEPALIVED_CONF
-               printf "${INDENT_2}auth_pass $auth_pass\n" >> $KEEPALIVED_CONF
-               printf "$INDENT_1}\n" >> $KEEPALIVED_CONF
+       config_get auth_type "$1" auth_type
+       config_get auth_pass "$1" auth_pass
+       [ -n "$auth_type" ] && [ -n "$auth_pass" ] && {
+               printf '%bauthentication {\n' "${INDENT_1}" >> "$KEEPALIVED_CONF"
+               printf '%bauth_type %s\n' "${INDENT_2}" "$auth_type" >> "$KEEPALIVED_CONF"
+               printf '%bauth_pass %s\n' "${INDENT_2}" "$auth_pass" >> "$KEEPALIVED_CONF"
+               printf '%b}\n' "${INDENT_1}" >> "$KEEPALIVED_CONF"
        }
 
-       print_elems_indent $1 $INDENT_1 state interface \
+       print_elems_indent "$1" "$INDENT_1" state interface \
                mcast_src_ip unicast_src_ip virtual_router_id version priority \
                advert_int preempt_delay debug \
                lvs_sync_daemon_interface garp_master_delay garp_master_refresh \
@@ -305,56 +307,56 @@ vrrp_instance() {
 
        # Handle virtual_ipaddress & virtual_ipaddress_excluded lists
        for opt in virtual_ipaddress virtual_ipaddress_excluded; do
-               config_get $opt $1 $opt
+               config_get "$opt" "$1" "$opt"
                eval optval=\$$opt
                [ -z "$optval" ] && continue
-               printf "$INDENT_1$opt {\n" >> $KEEPALIVED_CONF
+               printf '%b%s {\n' "${INDENT_1}" "$opt" >> "$KEEPALIVED_CONF"
                for a in $optval; do
-                       config_foreach print_ipaddress_indent ipaddress $a $INDENT_2
+                       config_foreach print_ipaddress_indent ipaddress "$a" "$INDENT_2"
                done
-               printf "$INDENT_1}\n" >> $KEEPALIVED_CONF
+               printf '%b}\n' "${INDENT_1}" >> "$KEEPALIVED_CONF"
        done
 
        # Handle virtual_routes
        for opt in virtual_routes; do
-               config_get $opt $1 $opt
+               config_get "$opt" "$1" "$opt"
                eval optval=\$$opt
                [ -z "$optval" ] && continue
-               printf "$INDENT_1$opt {\n" >> $KEEPALIVED_CONF
+               printf '%b%s {\n' "${INDENT_1}" "$opt" >> "$KEEPALIVED_CONF"
                for r in $optval; do
-                       config_foreach print_route_indent route $r $INDENT_2
+                       config_foreach print_route_indent route "$r" "$INDENT_2"
                done
-               printf "$INDENT_1}\n" >> $KEEPALIVED_CONF
+               printf '%b}\n' "${INDENT_1}" >> "$KEEPALIVED_CONF"
        done
 
        # Handle track_script lists
        for opt in track_script; do
-               config_get $opt $1 $opt
+               config_get "$opt" "$1" "$opt"
                eval optval=\$$opt
                [ -z "$optval" ] && continue
-               printf "$INDENT_1$opt {\n" >> $KEEPALIVED_CONF
+               printf '%b%s {\n' "${INDENT_1}" "$opt" >> "$KEEPALIVED_CONF"
                for t in $optval; do
-                       printf "$INDENT_2$optval\n" >> $KEEPALIVED_CONF
+                       printf '%b%s\n' "${INDENT_2}" "$optval" >> "$KEEPALIVED_CONF"
                done
-               printf "$INDENT_1}\n" >> $KEEPALIVED_CONF
+               printf '%b}\n' "${INDENT_1}" >> "$KEEPALIVED_CONF"
        done
 
        # Handle track_interface lists
        for opt in track_interface; do
-               config_get $opt $1 $opt
+               config_get "$opt" "$1" "$opt"
                eval optval=\$$opt
                [ -z "$optval" ] && continue
-               printf "$INDENT_1$opt {\n" >> $KEEPALIVED_CONF
+               printf '%b%s {\n' "${INDENT_1}" "$opt" >> "$KEEPALIVED_CONF"
                for t in $optval; do
-                       config_foreach print_track_elem_indent track_interface $t $INDENT_2
+                       config_foreach print_track_elem_indent track_interface "$t" "$INDENT_2"
                done
-               printf "$INDENT_1}\n" >> $KEEPALIVED_CONF
+               printf '%b}\n' "${INDENT_1}" >> "$KEEPALIVED_CONF"
        done
 
        # Handle simple lists of strings (with no spaces in between)
        for opt in unicast_peer; do
-               config_get $opt $1 $opt
-               print_list_indent $opt
+               config_get "$opt" "$1" "$opt"
+               print_list_indent "$opt"
        done
        unset optval
 
@@ -364,12 +366,12 @@ vrrp_instance() {
 vrrp_script() {
        local name
 
-       config_get name $1 name
+       config_get name "$1" name
        [ -z "$name" ] && return 0
 
        config_section_open "vrrp_script" "$name"
 
-       print_elems_indent $1 $INDENT_1 script interval weight fall rise
+       print_elems_indent "$1" "$INDENT_1" script interval weight fall rise
 
        config_section_close
 }
@@ -379,17 +381,17 @@ url() {
 
        local name path digest
 
-       config_get name $1 name
+       config_get name "$1" name
        [ "$url" = "$name" ] || return 0
 
-       config_get path $1 path
-       config_get digest $1 digest
+       config_get path "$1" path
+       config_get digest "$1" digest
 
-       [ -n "$digest" -a -n "$path" ] && {
-               printf "${INDENT_3}url {\n" >> $KEEPALIVED_CONF
-               printf "${INDENT_4}path "$path"\n" >> $KEEPALIVED_CONF
-               printf "${INDENT_4}digest $digest\n" >> $KEEPALIVED_CONF
-               printf "${INDENT_3}}\n" >> $KEEPALIVED_CONF
+       [ -n "$digest" ] && [ -n "$path" ] && {
+               printf '%burl {\n' "${INDENT_3}" >> "$KEEPALIVED_CONF"
+               printf '%bpath %s\n' "${INDENT_4}" "$path" >> "$KEEPALIVED_CONF"
+               printf '%bdigest %s\n' "${INDENT_4}" "$digest" >> "$KEEPALIVED_CONF"
+               printf '%b}\n' "${INDENT_3}" >> "$KEEPALIVED_CONF"
        }
 }
 
@@ -402,44 +404,44 @@ real_server() {
 
        local enabled name weight ipaddr port check
 
-       config_get_bool enabled $1 enabled 1
+       config_get_bool enabled "$1" enabled 1
        [ "$enabled" -eq 1 ] || return 0
 
-       config_get name $1 name
+       config_get name "$1" name
        [ "$server" = "$name" ] || return 0
 
-       config_get weight $1 weight
+       config_get weight "$1" weight
        [ -n "$weight" ] || return 0
 
-       config_get ipaddr $1 ipaddr
-       config_get port $1 port
-       config_get check $1 check
+       config_get ipaddr "$1" ipaddr
+       config_get port "$1" port
+       config_get check "$1" check
 
-       [ -n "$ipaddr" -a -n "$port" ] && {
-               printf "${INDENT_1}real_server $ipaddr $port {\n" >> $KEEPALIVED_CONF
-               printf "${INDENT_2}weight $weight\n" >> $KEEPALIVED_CONF
+       [ -n "$ipaddr" ] && [ -n "$port" ] && {
+               printf '%breal_server %s %d {\n' "${INDENT_1}" "$ipaddr" "$port" >> "$KEEPALIVED_CONF"
+               printf '%bweight %d\n' "${INDENT_2}" "$weight" >> "$KEEPALIVED_CONF"
                case "$check" in
                        TCP_CHECK)
-                               printf "${INDENT_2}${check} {\n" >> $KEEPALIVED_CONF
-                               print_elems_indent $1 $INDENT_3 connect_timeout \
+                               printf '%b%s {\n' "${INDENT_2}" "$check" >> "$KEEPALIVED_CONF"
+                               print_elems_indent "$1" "$INDENT_3" connect_timeout \
                                        connect_port
-                               printf "${INDENT_2}}\n" >> $KEEPALIVED_CONF
+                               printf '%b}\n' "${INDENT_2}" >> "$KEEPALIVED_CONF"
                                ;;
                        MISC_CHECK)
-                               printf "${INDENT_2}${check} {\n" >> $KEEPALIVED_CONF
-                               print_elems_indent $1 $INDENT_3 misc_path
-                               printf "${INDENT_2}}\n" >> $KEEPALIVED_CONF
+                               printf '%b%s {\n' "${INDENT_2}" "$check" >> "$KEEPALIVED_CONF"
+                               print_elems_indent "$1" "$INDENT_3" misc_path
+                               printf '%b}\n' "${INDENT_2}" >> "$KEEPALIVED_CONF"
                                ;;
                        HTTP_GET | SSL_GET)
-                               printf "${INDENT_2}${check} {\n" >> $KEEPALIVED_CONF
-                               print_elems_indent $1 $INDENT_3 connect_timeout \
+                               printf '%b%s {\n' "${INDENT_2}" "$check" >> "$KEEPALIVED_CONF"
+                               print_elems_indent "$1" "$INDENT_3" connect_timeout \
                                        connect_port nb_get_retry delay_before_retry
                                # Handle url list
-                               config_list_foreach $1 url url_list
-                               printf "${INDENT_2}}\n" >> $KEEPALIVED_CONF
+                               config_list_foreach "$1" url url_list
+                               printf '%b}\n' "${INDENT_2}" >> "$KEEPALIVED_CONF"
                                ;;
                esac
-               printf "${INDENT_1}}\n" >> $KEEPALIVED_CONF
+               printf '%b}\n' "${INDENT_1}" >> "$KEEPALIVED_CONF"
        }
 }
 
@@ -450,33 +452,33 @@ real_server_list() {
 virtual_server() {
        local enabled ipaddr port lb_algo sorry_server_ip sorry_server_port
 
-       config_get_bool enabled $1 enabled 1
+       config_get_bool enabled "$1" enabled 1
        [ "$enabled" -eq 1 ] || return 0
 
-       config_get ipaddr $1 ipaddr
+       config_get ipaddr "$1" ipaddr
        [ -z "$ipaddr" ] && return 0
-       config_get port $1 port
+       config_get port "$1" port
        [ -z "$port" ] && return 0
 
        config_section_open "virtual_server" "$ipaddr $port"
 
-       print_elems_indent $1 $INDENT_1 fwmark delay_loop \
+       print_elems_indent "$1" "$INDENT_1" fwmark delay_loop \
                lb_kind persistence_timeout persistence_granularity \
                virtualhost protocol
 
-       config_get lb_algo $1 lb_algo
+       config_get lb_algo "$1" lb_algo
        [ -z "$lb_algo" ] && lb_algo="rr"
-       modprobe ip_vs_${lb_algo} 2>&1 1>/dev/null
-       printf "${INDENT_1}lb_algo ${lb_algo}\n" >> $KEEPALIVED_CONF
+       modprobe ip_vs_${lb_algo} 1>/dev/null 2>&1
+       printf '%blb_algo %s\n' "${INDENT_1}" "${lb_algo}" >> "$KEEPALIVED_CONF"
 
-       config_get sorry_server_ip $1 sorry_server_ip
-       config_get sorry_server_port $1 sorry_server_port
-       [ -n "$sorry_server_ip" -a -n "$sorry_server_port" ] && {
-               printf "${INDENT_1}sorry_server $sorry_server_ip $sorry_server_port\n" >> $KEEPALIVED_CONF
+       config_get sorry_server_ip "$1" sorry_server_ip
+       config_get sorry_server_port "$1" sorry_server_port
+       [ -n "$sorry_server_ip" ] && [ -n "$sorry_server_port" ] && {
+               printf '%bsorry_server %s %s\n' "${INDENT_1}" "$sorry_server_ip" "$sorry_server_port" >> "$KEEPALIVED_CONF"
        }
 
        # Handle real_server list
-       config_list_foreach $1 real_server real_server_list
+       config_list_foreach "$1" real_server real_server_list
 
        config_section_close
 }
@@ -484,11 +486,11 @@ virtual_server() {
 process_config() {
        local alt_config_file
 
-       rm -f $KEEPALIVED_CONF
+       rm -f "$KEEPALIVED_CONF"
 
        # First line
-       printf "! Configuration file for keepalived (autogenerated via init script)\n" > $KEEPALIVED_CONF
-       printf "! Written %s\n\n" "$(date +'%c')" >> $KEEPALIVED_CONF
+       printf '! Configuration file for keepalived (autogenerated via init script)\n' > "$KEEPALIVED_CONF"
+       printf '! Written %s\n\n' "$(date +'%c')" >> "$KEEPALIVED_CONF"
 
        [ -f /etc/config/keepalived ] || return 0
        config_load 'keepalived'
@@ -499,9 +501,9 @@ process_config() {
 
        # If "alt_config_file" specified, use that instead
        [ -n "$alt_config_file" ] && [ -f "$alt_config_file" ] && {
-               rm -f $KEEPALIVED_CONF
+               rm -f "$KEEPALIVED_CONF"
                # Symlink "alt_config_file" since it's a bit easier and safer
-               ln -s $alt_config_file $KEEPALIVED_CONF
+               ln -s "$alt_config_file" "$KEEPALIVED_CONF"
                return 0
        }
 
index 85f97bc9ded3839745952978157db445d825f6e5..50635960d77ef985ab553c0ee15a439042a3ae20 100644 (file)
@@ -429,18 +429,18 @@ endif
 
 ifeq ($(CONFIG_NGINX_HTTP_BROTLI),y)
   define Download/nginx-brotli
-    VERSION:=e26248ee361c04e25f581b92b85d95681bdffb39
+    VERSION:=dc37f658ccb5a51d090dc09d1a2aca2f24309869
     SUBDIR:=nginx-brotli
-    FILE:=ngx-brotli-module-$$(VERSION).tar.gz
+    FILE:=ngx-brotli-module-$$(VERSION).tar.xz
     URL:=https://github.com/eustas/ngx_brotli.git
-    MIRROR_HASH:=76b891ba49f82f0cfbc9cba875646e26ee986b522373e0aa2698a9923a4adcdb
+    MIRROR_HASH:=6bc0c40ff24f6e0ac616dfddc803bdc7fcf54764ba9dc4f9cecb3a68beedcdaf
     PROTO:=git
   endef
   $(eval $(call Download,nginx-brotli))
 
   define Prepare/nginx-brotli
        $(eval $(Download/nginx-brotli))
-       gzip -dc $(DL_DIR)/$(FILE) | tar -C $(PKG_BUILD_DIR) $(TAR_OPTIONS)
+       xzcat $(DL_DIR)/$(FILE) | tar -C $(PKG_BUILD_DIR) $(TAR_OPTIONS)
   endef
 endif
 
index 31af664a2190590e21c381e396e0780d45a979cc..5b0f3da0ae8c1f537eca1287465dea66c8170fef 100644 (file)
@@ -20,7 +20,7 @@ http {
 
     sendfile on;
     keepalive_timeout 0;
-    
+
     client_body_buffer_size 10K;
     client_header_buffer_size 1k;
     client_max_body_size 1G;
@@ -31,14 +31,15 @@ http {
     gzip_vary on;
     gzip_comp_level 1;
     gzip_proxied any;
-    
+    gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript application/javascript image/svg+xml;
+
     root /www;
 
     server {
         listen 80 default_server;
         listen [::]:80 default_server;
         server_name  localhost;
-        
+
         location ~* .(jpg|jpeg|png|gif|ico|css|js)$ {
             expires 365d;
         }
index 318453b54d7dbd752b7e53d195cbc58e782dfc13..db33e554cef4f2067de348cabe9fe0124a04f1c9 100644 (file)
@@ -20,7 +20,7 @@ http {
 
     sendfile on;
     keepalive_timeout 0;
-    
+
     client_body_buffer_size 10K;
     client_header_buffer_size 1k;
     client_max_body_size 1G;
@@ -31,9 +31,10 @@ http {
     gzip_vary on;
     gzip_comp_level 1;
     gzip_proxied any;
-    
+    gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript application/javascript image/svg+xml;
+
     root /www;
-    
+
     server {
         listen 80 default_server;
         listen [::]:80 default_server;
@@ -45,7 +46,7 @@ http {
         listen 443 ssl default_server;
         listen [::]:443 ssl default_server;
         server_name  localhost;
-        
+
         ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
         ssl_prefer_server_ciphers on;
         ssl_ciphers "EECDH+ECDSA+AESGCM:EECDH+aRSA+AESGCM:EECDH+ECDSA+SHA384:EECDH+ECDSA+SHA256:EECDH+aRSA+SHA384:EECDH+aRSA+SHA256:EECDH:DHE+AESGCM:DHE:!RSA!aNULL:!eNULL:!LOW:!RC4:!3DES:!MD5:!EXP:!PSK:!SRP:!DSS:!CAMELLIA:!SEED";
@@ -53,7 +54,7 @@ http {
 
         ssl_certificate /etc/nginx/nginx.cer;
         ssl_certificate_key /etc/nginx/nginx.key;
-        
+
         location ~* .(jpg|jpeg|png|gif|ico|css|js)$ {
             expires 365d;
         }
index 334ba8726c12b6d8a226908031945bdc68bf75e5..b6fb9f64481d09ebf79d96f4c466c3ccdf4ec475 100644 (file)
@@ -8,27 +8,26 @@
 include $(TOPDIR)/rules.mk
 
 PKG_NAME:=nsd
-PKG_VERSION:=4.1.13
+PKG_VERSION:=4.2.1
 PKG_RELEASE:=1
 
 PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
-PKG_SOURCE_URL:=http://www.nlnetlabs.nl/downloads/nsd
-PKG_HASH:=c45cd4ba2101a027e133b2be44db9378e27602e05f09a5ef25019e1ae45291af
-PKG_FIXUP:=autoreconf
-PKG_INSTALL:=1
-
-PKG_LICENSE:=BSD-3c
-PKG_LICENSE_FILES:=LICENSE
+PKG_SOURCE_URL:=https://www.nlnetlabs.nl/downloads/nsd
+PKG_HASH:=d17c0ea3968cb0eb2be79f2f83eb299b7bfcc554b784007616eed6ece828871f
+PKG_BUILD_DIR:=$(BUILD_DIR)/$(PKG_NAME)-$(BUILD_VARIANT)/$(PKG_NAME)-$(PKG_VERSION)
 
 PKG_MAINTAINER:=Vasilis Tsiligiannis <acinonyx@openwrt.gr>
+PKG_LICENSE:=BSD-3-Clause
+PKG_LICENSE_FILES:=LICENSE
 
-PKG_BUILD_DIR:=$(BUILD_DIR)/$(PKG_NAME)-$(BUILD_VARIANT)/$(PKG_NAME)-$(PKG_VERSION)
+PKG_INSTALL:=1
+PKG_BUILD_PARALLEL:=1
 
 include $(INCLUDE_DIR)/package.mk
 
 define Package/nsd/default
   TITLE:=NSD Name Server Daemon
-  URL:=http://www.nlnetlabs.nl/projects/nsd/
+  URL:=https://www.nlnetlabs.nl/projects/nsd/
   SECTION:=net
   CATEGORY:=Network
   SUBMENU:=IP Addresses and Names
@@ -85,22 +84,22 @@ define Package/nsd-control-setup/description
 endef
 
 CONFIGURE_ARGS+= \
+       --enable-packed \
+       --enable-recvmmsg \
+       --enable-tcp-fastopen \
        --disable-checking \
-       --with-libevent=no \
        --with-user="network" \
+       --without-libevent \
+       $(if $(CONFIG_IPV6),--with,--without)-ipv6
 
 CONFIGURE_VARS+= \
        ac_cv_c_va_list_def=no \
-       ac_cv_c_strptime_needs_defs=no \
+       ac_cv_c_strptime_needs_defs=no
 
 ifeq ($(BUILD_VARIANT),ssl)
-       CONFIGURE_ARGS += \
-               --with-ssl="$(STAGING_DIR)/usr"
-endif
-
-ifeq ($(BUILD_VARIANT),nossl)
-       CONFIGURE_ARGS += \
-               --without-ssl
+       CONFIGURE_ARGS += --with-ssl="$(STAGING_DIR)/usr"
+else
+       CONFIGURE_ARGS += --without-ssl
 endif
 
 define Package/nsd/conffiles
diff --git a/net/nsd/patches/010-musl.patch b/net/nsd/patches/010-musl.patch
new file mode 100644 (file)
index 0000000..3ba61e4
--- /dev/null
@@ -0,0 +1,10 @@
+--- a/nsd-control.c
++++ b/nsd-control.c
+@@ -42,6 +42,7 @@
+  */
+ #include "config.h"
++#include <stdio.h>
+ #ifdef HAVE_SSL
+ #include <sys/types.h>
diff --git a/net/nsd/patches/020-openssl.patch b/net/nsd/patches/020-openssl.patch
new file mode 100644 (file)
index 0000000..31381ff
--- /dev/null
@@ -0,0 +1,13 @@
+--- a/tsig.c
++++ b/tsig.c
+@@ -19,6 +19,10 @@
+ #include "query.h"
+ #include "rbtree.h"
++#ifndef HAVE_SSL
++#define CRYPTO_memcmp memcmp
++#endif
++
+ static region_type *tsig_region;
+ struct tsig_key_table
index 9e4bf5446ef19af3ea28ad994068a6551859546c..33ba174a5b85e9b4cbd07705120266be5dd24147 100644 (file)
@@ -11,9 +11,9 @@ PKG_RELEASE:=1
 
 PKG_SOURCE_PROTO:=git
 PKG_SOURCE_URL:=https://github.com/nccgroup/phantap
-PKG_SOURCE_DATE:=2019.07.30
-PKG_SOURCE_VERSION:=dccee924f005213c8b1c84e05900ed0bbfac5736
-PKG_MIRROR_HASH:=456adfcc0147f3b47d2fad26938e2896096919f053f871c1319218e7d37c6d27
+PKG_SOURCE_DATE:=2019.08.04
+PKG_SOURCE_VERSION:=f104742cf489b2b916a2cf9e2ee980259b89efe7
+PKG_MIRROR_HASH:=ebe090dbfeb0ef928b28a15c17290abbcdee043e77f38bd38acaabe38f2b685e
 
 PKG_MAINTAINER:=Diana Dragusin <diana.dragusin@nccgroup.com>, \
     Etienne Champetier <champetier.etienne@gmail.com>
index bc5344cd807dc05b0417b1f76d50e784fc33171b..12fcef3974084f35f8806f887df98c5a4b6bad55 100644 (file)
@@ -86,7 +86,7 @@ to use resolvers found in `/etc/resolv.conf` by setting the dnsmasq option
 command line:
 
     uci add_list dhcp.@dnsmasq[-1].server='127.0.0.1#5453'
-    uci dhcp.@dnsmasq[-1].noresolv=1
+    uci set dhcp.@dnsmasq[-1].noresolv=1
     uci commit && reload_config
 
 The same outcome can be achieved in the LUCI web interface as follows:
index 9cafc73dd14d6091fdb7ded066f910b7d1e8afdb..efa08cf9f4d22dfca546240aadd0d9cc45dd5ad6 100644 (file)
@@ -7,10 +7,10 @@ include $(TOPDIR)/rules.mk
 
 PKG_NAME:=subversion
 PKG_RELEASE:=1
-PKG_VERSION:=1.12.0
+PKG_VERSION:=1.12.2
 PKG_SOURCE_URL:=@APACHE/subversion
 PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.bz2
-PKG_HASH:=7fae7c73d8a007c107c0ae5eb372bc0bb013dbfe966fcd5c59cd5a195a5e2edf
+PKG_HASH:=3bd0b5c8e4c5175263dc9a92fd9aef94ce917e80af034f26fe5c45fde7e0f771
 PKG_LICENSE:=Apache-2.0
 PKG_LICENSE_FILES:=LICENSE
 PKG_MAINTAINER:=Val Kulkov <val.kulkov@gmail.com>
index 1cf91999607d3932e3f48afb062da3ad7fd94211..17eb903b8fe8fa522d4fe4d69526a823aef93152 100644 (file)
@@ -6,7 +6,7 @@
 include $(TOPDIR)/rules.mk
 
 PKG_NAME:=travelmate
-PKG_VERSION:=1.4.9
+PKG_VERSION:=1.4.11
 PKG_RELEASE:=1
 PKG_LICENSE:=GPL-3.0+
 PKG_MAINTAINER:=Dirk Brenken <dev@brenken.org>
@@ -17,7 +17,7 @@ define Package/travelmate
        SECTION:=net
        CATEGORY:=Network
        TITLE:=A wlan connection manager for travel router
-       DEPENDS:=+iwinfo +jshn +jsonfilter +uclient-fetch
+       DEPENDS:=+iwinfo +jshn +jsonfilter +uclient-fetch +dnsmasq
        PKGARCH:=all
 endef
 
@@ -42,13 +42,16 @@ endef
 
 define Package/travelmate/install
        $(INSTALL_DIR) $(1)/usr/bin
-       $(INSTALL_BIN) ./files/travelmate.sh $(1)/usr/bin/
+       $(INSTALL_BIN) ./files/travelmate.sh $(1)/usr/bin
 
        $(INSTALL_DIR) $(1)/etc/init.d
        $(INSTALL_BIN) ./files/travelmate.init $(1)/etc/init.d/travelmate
 
        $(INSTALL_DIR) $(1)/etc/config
        $(INSTALL_CONF) ./files/travelmate.conf $(1)/etc/config/travelmate
+
+       $(INSTALL_DIR) $(1)/etc/travelmate
+       $(INSTALL_BIN) ./files/*.login $(1)/etc/travelmate
 endef
 
 $(eval $(call BuildPackage,travelmate))
index e261e76614d7fe0fe05252654150035e7ff09881..f84fffa73e8169bc11c56146a74ecd84f4eb7d29 100644 (file)
@@ -13,6 +13,7 @@ To avoid these kind of deadlocks, travelmate will set all station interfaces to
 * support all kinds of uplinks, incl. hidden and enterprise uplinks
 * continuously checks the existing uplink connection (quality), e.g. for conditional uplink (dis-) connections
 * captive portal detection with internet online check and a 'heartbeat' function to keep the uplink connection up & running
+* captive portal auto-login hook (configured via uci/LuCI), you could reference an external script for captive portal auto-logins (see example below)
 * proactively scan and switch to a higher prioritized uplink, despite of an already existing connection
 * support devices with multiple radios in any order
 * procd init and hotplug support
@@ -21,10 +22,11 @@ To avoid these kind of deadlocks, travelmate will set all station interfaces to
 * optional: the LuCI frontend shows the WiFi QR codes from all configured Access Points. It allows you to connect your Android or iOS devices to your router’s WiFi using the QR code
 
 ## Prerequisites
-* [OpenWrt](https://openwrt.org), tested with the stable release series (18.06.x) and with the latest OpenWrt snapshot
-* iwinfo for wlan scanning, uclient-fetch for captive portal detection
+* [OpenWrt](https://openwrt.org), tested with the stable release series (19.07.x) and with the latest OpenWrt snapshot
+* iwinfo for wlan scanning, uclient-fetch for captive portal detection, dnsmasq as dns backend
 * optional: qrencode 4.x for QR code support
 * optional: wpad (the full version, not wpad-mini) to use Enterprise WiFi
+* optional: curl to use external scripts for captive portal auto-logins
 
 ## Installation & Usage
 * download the package [here](https://downloads.openwrt.org/snapshots/packages/x86_64/packages)
@@ -50,24 +52,38 @@ To avoid these kind of deadlocks, travelmate will set all station interfaces to
     * trm\_maxretry => how many times should travelmate try to connect to an uplink (int/default: '3', valid range: 1-10)
     * trm\_timeout => overall retry timeout in seconds (int/default: '60', valid range: 30-300)
     * trm\_radio => limit travelmate to a single radio (e.g. 'radio1') or change the overall scanning priority (e.g. 'radio1 radio2 radio0') (default: not set, use all radios 0-n)
-    * trm\_iface => main uplink / procd trigger network interface (default: trm_wwan)
+    * trm\_iface => uplink / procd trigger network interface (default: trm_wwan)
     * trm\_triggerdelay => additional trigger delay in seconds before travelmate processing begins (int/default: '2')
 
+## Captive Portal auto-logins
+For automated captive portal logins you could reference external shell scripts. All login scripts should be executable and located in '/etc/travelmate' with the extension '.login'. The provided 'wifionice.login' script example requires curl and automates the login to german ICE hotspots, it also explains the principle approach to extract runtime data like security tokens for a succesful login. Hopefully more scripts for different captive portals will be provided by the community ...
+
+A typical/succesful captive portal login looks like this:
+<pre><code>
+[...]
+Mon Aug  5 10:15:48 2019 user.info travelmate-1.4.10[1481]: travelmate instance started ::: action: start, pid: 1481
+Mon Aug  5 10:16:17 2019 user.info travelmate-1.4.10[1481]: captive portal login '/etc/travelmate/wifionice.login' for 'www.wifionice.de' has been executed with rc '0'
+Mon Aug  5 10:16:23 2019 user.info travelmate-1.4.10[1481]: connected to uplink 'radio1/WIFIonICE/-' (1/5, GL.iNet GL-AR750S, OpenWrt SNAPSHOT r10644-cb49e46a8a)
+[...]
+</code></pre>
+
 ## Runtime information
 
 **receive travelmate runtime information:**
 <pre><code>
 ~# /etc/init.d/travelmate status
 ::: travelmate runtime information
-  + travelmate_status  : connected (net ok/78)
-  + travelmate_version : 1.2.3
-  + station_id         : radio1/blackhole/01:02:03:04:05:06
+  + travelmate_status  : connected (net ok/100)
+  + travelmate_version : 1.4.10
+  + station_id         : radio1/blackhole/-
   + station_interface  : trm_wwan
   + faulty_stations    : 
-  + last_rundate       : 07.09.2018 17:22:37
-  + system             : TP-LINK RE450, OpenWrt SNAPSHOT r8018-42f158314e
+  + last_rundate       : 2019.08.03-20:37:19
+  + system             : GL.iNet GL-AR750S, OpenWrt SNAPSHOT r10644-cb49e46a8a
 </code></pre>
 
+To debug travelmate runtime problems, please always enable the 'trm\_debug' flag, restart travelmate and scan the system log (_logread -e "travelmate"_)
+
 ## Manual Setup
 **1. configure the travelmate wwan interface in /etc/config/network:**
 <pre><code>
index acb83a0ad981ee2bd033ff016903dffff42f7cd7..1e00179a21fe258ded029346678999cb1e07506b 100755 (executable)
@@ -10,7 +10,7 @@
 #
 LC_ALL=C
 PATH="/usr/sbin:/usr/bin:/sbin:/bin"
-trm_ver="1.4.9"
+trm_ver="1.4.11"
 trm_sysver="unknown"
 trm_enabled=0
 trm_debug=0
@@ -65,10 +65,9 @@ f_envload()
                trm_sysver="${sys_model}, ${sys_desc}"
        fi
 
-       # get eap capabilities and rebind protection setting
+       # get eap capabilities
        #
        trm_eap="$("${trm_wpa}" -veap >/dev/null 2>&1; printf "%u" ${?})"
-       trm_rebind="$(uci_get dhcp "@dnsmasq[0]" rebind_protection)"
 
        # load config and check 'enabled' option
        #
@@ -169,24 +168,24 @@ f_prep()
                        fi
                fi
        fi
-       f_log "debug" "f_prep ::: config: ${config}, mode: ${mode}, network: ${network}, radio: ${radio}, trm_radio: ${trm_radio:-"-"}, trm_active_sta: ${trm_active_sta:-"-"}, proactive: ${proactive}, trm_eap: ${trm_eap:-"-"}, trm_rebind: ${trm_rebind:-"-"}, disabled: ${disabled}"
+       f_log "debug" "f_prep ::: config: ${config}, mode: ${mode}, network: ${network}, radio: ${radio}, trm_radio: ${trm_radio:-"-"}, trm_active_sta: ${trm_active_sta:-"-"}, proactive: ${proactive}, trm_eap: ${trm_eap:-"-"}, disabled: ${disabled}"
 }
 
 # check interface status
 #
 f_check()
 {
-       local IFS ifname radio dev_status last_status config sta_essid sta_bssid result cp_domain wait mode="${1}" status="${2:-"false"}"
+       local IFS ifname radio dev_status config sta_essid sta_bssid result uci_essid uci_bssid login_command bg_pid wait_time mode="${1}" status="${2:-"false"}" cp_domain="${3:-"false"}"
 
        if [ "${mode}" != "initial" ] && [ "${status}" = "false" ]
        then
                ubus call network reload
-               wait=$((trm_maxwait/6))
-               sleep ${wait}
+               wait_time=$((trm_maxwait/6))
+               sleep ${wait_time}
        fi
 
-       wait=1
-       while [ "${wait}" -le "${trm_maxwait}" ]
+       wait_time=1
+       while [ "${wait_time}" -le "${trm_maxwait}" ]
        do
                dev_status="$(ubus -S call network.wireless status 2>/dev/null)"
                if [ -n "${dev_status}" ]
@@ -206,7 +205,7 @@ f_check()
                                                trm_devlist="$(f_trim "${trm_devlist} ${radio}")"
                                        fi
                                done
-                               if [ "${trm_devlist}" = "${trm_radiolist}" ] || [ "${wait}" -eq "${trm_maxwait}" ]
+                               if [ "${trm_devlist}" = "${trm_radiolist}" ] || [ "${wait_time}" -eq "${trm_maxwait}" ]
                                then
                                        ifname="${trm_devlist}"
                                        break
@@ -224,21 +223,33 @@ f_check()
                                        if [ "${mode}" = "initial" ] && [ "${trm_captive}" -eq 1 ]
                                        then
                                                result="$(${trm_fetch} --timeout=$((trm_maxwait/6)) "${trm_captiveurl}" -O /dev/null 2>&1 | \
-                                                       awk '/^Failed to redirect|^Redirected/{printf "%s" "net cp \047"$NF"\047";exit}/^Download completed/{printf "%s" "net ok";exit}/^Failed|^Connection error/{printf "%s" "net nok";exit}')"
+                                                       awk '/^Failed to redirect|^Redirected/{printf "%s" "net cp \047"$NF"\047";exit}/^Download completed/{printf "%s" "net ok";exit}/^Failed|Connection error/{printf "%s" "net nok";exit}')"
+                                               if [ "${cp_domain}" = "true" ]
+                                               then
+                                                       cp_domain="$(printf "%s" "${result}" | awk -F "[\\'| ]" '/^net cp/{printf "%s" $4}')"
+                                                       uci_essid="$(printf "%s" "${dev_status}" | jsonfilter -l1 -e '@.*.interfaces[@.config.mode="sta"].config.ssid')"
+                                                       uci_essid="$(printf "%s" "${uci_essid//[^[:alnum:]_]/_}" | awk '{print tolower($1)}')"
+                                                       uci_bssid="$(printf "%s" "${dev_status}" | jsonfilter -l1 -e '@.*.interfaces[@.config.mode="sta"].config.bssid')"
+                                                       uci_bssid="${uci_bssid//[^[:alnum:]_]/_}"
+                                               fi
                                        fi
-                                       if [ "${trm_ifquality}" -ge "${trm_minquality}" ] && [ "${result%/*}" != "net nok" ]
+                                       if [ "${trm_ifquality}" -ge "${trm_minquality}" ] && [ "${result}" != "net nok" ]
                                        then
                                                trm_ifstatus="$(ubus -S call network.interface dump 2>/dev/null | jsonfilter -l1 -e "@.interface[@.device=\"${ifname}\"].up")"
                                                if [ "${trm_ifstatus}" = "true" ]
                                                then
-                                                       if [ "${mode}" = "sta" ] && [ "${trm_captive}" -eq 1 ] && [ "${trm_rebind:-0}" -eq 1 ] && [ -x "/etc/init.d/dnsmasq" ]
+                                                       if [ "${mode}" = "sta" ] && [ "${trm_captive}" -eq 1 ]
                                                        then
                                                                while true
                                                                do
                                                                        result="$(${trm_fetch} --timeout=$((trm_maxwait/6)) "${trm_captiveurl}" -O /dev/null 2>&1 | \
-                                                                               awk '/^Failed to redirect|^Redirected/{printf "%s" "net cp \047"$NF"\047";exit}/^Download completed/{printf "%s" "net ok";exit}/^Failed|^Connection error/{printf "%s" "net nok";exit}')"
+                                                                               awk '/^Failed to redirect|^Redirected/{printf "%s" "net cp \047"$NF"\047";exit}/^Download completed/{printf "%s" "net ok";exit}/^Failed|Connection error/{printf "%s" "net nok";exit}')"
                                                                        cp_domain="$(printf "%s" "${result}" | awk -F "[\\'| ]" '/^net cp/{printf "%s" $4}')"
-                                                                       if [ "${trm_netcheck}" -eq 1 ] && [ "${result%/*}" = "net nok" ]
+                                                                       uci_essid="$(printf "%s" "${dev_status}" | jsonfilter -l1 -e '@.*.interfaces[@.config.mode="sta"].config.ssid')"
+                                                                       uci_essid="$(printf "%s" "${uci_essid//[^[:alnum:]_]/_}" | awk '{print tolower($1)}')"
+                                                                       uci_bssid="$(printf "%s" "${dev_status}" | jsonfilter -l1 -e '@.*.interfaces[@.config.mode="sta"].config.bssid')"
+                                                                       uci_bssid="${uci_bssid//[^[:alnum:]_]/_}"
+                                                                       if [ "${trm_netcheck}" -eq 1 ] && [ "${result}" = "net nok" ]
                                                                        then
                                                                                trm_ifstatus="${status}"
                                                                                f_jsnup
@@ -249,13 +260,40 @@ f_check()
                                                                                break
                                                                        fi
                                                                        uci -q add_list dhcp.@dnsmasq[0].rebind_domain="${cp_domain}"
-                                                                       f_log "info" "captive portal domain '${cp_domain}' added to rebind whitelist"
+                                                                       f_log "info" "captive portal domain '${cp_domain}' added to to dhcp rebind whitelist"
+                                                                       if [ -z "$(uci_get travelmate "${uci_essid}${uci_bssid}")" ]
+                                                                       then
+                                                                               uci_add travelmate "login" "${uci_essid}${uci_bssid}"
+                                                                               uci_set travelmate "${uci_essid}${uci_bssid}" "command" "none"
+                                                                               f_log "info" "captive portal login section '${uci_essid}${uci_bssid}' added to travelmate config section"
+                                                                       fi
                                                                done
                                                                if [ -n "$(uci -q changes dhcp)" ]
                                                                then
                                                                        uci_commit dhcp
                                                                        /etc/init.d/dnsmasq reload
                                                                fi
+                                                               if [ -n "$(uci -q changes travelmate)" ]
+                                                               then
+                                                                       uci_commit travelmate
+                                                               fi
+                                                       fi
+                                                       if [ -n "${cp_domain}" ] && [ "${cp_domain}" != "false" ] && [ -n "${uci_essid}" ] && [ "${trm_captive}" -eq 1 ]
+                                                       then
+                                                               trm_connection="${result:-"-"}/${trm_ifquality}"
+                                                               f_jsnup
+                                                               login_command="$(uci_get travelmate "${uci_essid}${uci_bssid}" command)"
+                                                               if [ -x "${login_command}" ]
+                                                               then
+                                                                       "${login_command}" >/dev/null 2>&1
+                                                                       rc=${?}
+                                                                       f_log "info" "captive portal login '${login_command:0:40}' for '${cp_domain}' has been executed with rc '${rc}'"
+                                                                       if [ "${rc}" -eq 0 ]
+                                                                       then
+                                                                               result="$(${trm_fetch} --timeout=$((trm_maxwait/6)) "${trm_captiveurl}" -O /dev/null 2>&1 | \
+                                                                                       awk '/^Failed to redirect|^Redirected/{printf "%s" "net cp \047"$NF"\047";exit}/^Download completed/{printf "%s" "net ok";exit}/^Failed|Connection error/{printf "%s" "net nok";exit}')"
+                                                                       fi
+                                                               fi
                                                        fi
                                                        trm_connection="${result:-"-"}/${trm_ifquality}"
                                                        f_jsnup
@@ -267,13 +305,15 @@ f_check()
                                                sta_bssid="$(printf "%s" "${dev_status}" | jsonfilter -l1 -e '@.*.interfaces[@.config.mode="sta"].*.bssid')"
                                                if [ "${trm_ifquality}" -lt "${trm_minquality}" ]
                                                then
+                                                       unset trm_connection
+                                                       trm_ifstatus="${status}"
                                                        f_log "info" "uplink '${sta_essid:-"-"}/${sta_bssid:-"-"}' is out of range (${trm_ifquality}/${trm_minquality})"
-                                               elif [ "${trm_netcheck}" -eq 1 ] && [ "${result%/*}" = "net nok" ]
+                                               elif [ "${trm_netcheck}" -eq 1 ] && [ "${result}" = "net nok" ]
                                                then
+                                                       unset trm_connection
+                                                       trm_ifstatus="${status}"
                                                        f_log "info" "uplink '${sta_essid:-"-"}/${sta_bssid:-"-"}' has no internet (${result})"
                                                fi
-                                               unset trm_connection
-                                               trm_ifstatus="${status}"
                                                f_jsnup
                                                break
                                        elif [ "${mode}" = "initial" ]
@@ -294,10 +334,10 @@ f_check()
                                fi
                        fi
                fi
-               wait=$((wait+1))
+               wait_time=$((wait_time+1))
                sleep 1
        done
-       f_log "debug" "f_check::: mode: ${mode}, name: ${ifname:-"-"}, status: ${trm_ifstatus}, connection: ${trm_connection:-"-"}, wait: ${wait}, max_wait: ${trm_maxwait}, min_quality: ${trm_minquality}, captive: ${trm_captive}, netcheck: ${trm_netcheck}"
+       f_log "debug" "f_check::: mode: ${mode}, name: ${ifname:-"-"}, status: ${trm_ifstatus}, connection: ${trm_connection:-"-"}, wait: ${wait_time}, max_wait: ${trm_maxwait}, min_quality: ${trm_minquality}, captive: ${trm_captive}, netcheck: ${trm_netcheck}"
 }
 
 # update runtime information
@@ -399,7 +439,7 @@ f_main()
        local IFS cnt dev config spec scan_list scan_essid scan_bssid scan_quality faulty_list
        local station_id sta sta_essid sta_bssid sta_radio sta_iface active_essid active_bssid active_radio
 
-       f_check "initial"
+       f_check "initial" "false" "true"
        f_log "debug" "f_main ::: status: ${trm_ifstatus}, proactive: ${trm_proactive}"
        if [ "${trm_ifstatus}" != "true" ] || [ "${trm_proactive}" -eq 1 ]
        then
diff --git a/net/travelmate/files/wifionice.login b/net/travelmate/files/wifionice.login
new file mode 100755 (executable)
index 0000000..5687921
--- /dev/null
@@ -0,0 +1,37 @@
+#!/bin/sh
+# captive portal auto-login script for german ICE hotspots
+# written by Dirk Brenken (dev@brenken.org)
+
+# This is free software, licensed under the GNU General Public License v3.
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+domain="www.wifionice.de"
+cmd="$(command -v curl)"
+
+# curl check
+#
+if [ ! -x "${cmd}" ]
+then
+       exit 1
+fi
+
+# initial get request to receive & extract a valid security token
+#
+"${cmd}" "http://${domain}/en/" -s -o /dev/null -c "/tmp/${domain}.cookie"
+if [ -f "/tmp/${domain}.cookie" ]
+then
+       sec_token="$(awk '/csrf/{print $7}' "/tmp/${domain}.cookie")"
+       rm -f "/tmp/${domain}.cookie"
+else
+       exit 2
+fi
+
+# final post request/login with valid session cookie/security token
+#
+if [ -n "${sec_token}" ]
+then
+       "${cmd}" "http://${domain}/en/" -H "Cookie: csrf=${sec_token}" --data "login=true&CSRFToken=${sec_token}&connect="
+else
+       exit 3
+fi
index d1f5533f252d6a4667aa7f823422ec1ac72f209d..13937b4da2267a01044f8398c0877e69d7a1d63f 100644 (file)
@@ -7,7 +7,7 @@ include $(TOPDIR)/rules.mk
 
 PKG_NAME:=zerotier
 PKG_VERSION:=1.4.0.1
-PKG_RELEASE:=1
+PKG_RELEASE:=2
 
 PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
 PKG_SOURCE_URL:=https://codeload.github.com/zerotier/ZeroTierOne/tar.gz/$(PKG_VERSION)?
@@ -56,6 +56,10 @@ endef
 TARGET_CFLAGS += -ffunction-sections -fdata-sections
 TARGET_LDFLAGS += -Wl,--gc-sections
 
+ifdef CONFIG_USE_UCLIBC
+       TARGET_CFLAGS += -D'valloc(a)=aligned_alloc(getpagesize(),a)'
+endif
+
 define Package/zerotier/conffiles
 /etc/config/zerotier
 endef
diff --git a/net/zerotier/patches/0005-link-natpmp.patch b/net/zerotier/patches/0005-link-natpmp.patch
new file mode 100644 (file)
index 0000000..d1c8209
--- /dev/null
@@ -0,0 +1,11 @@
+--- a/make-linux.mk
++++ b/make-linux.mk
+@@ -38,7 +38,7 @@ else
+       override DEFS+=-DMINIUPNP_STATICLIB -DMINIUPNPC_SET_SOCKET_TIMEOUT -DMINIUPNPC_GET_SRC_ADDR -D_BSD_SOURCE -D_DEFAULT_SOURCE -D_XOPEN_SOURCE=600 -DOS_STRING=\"Linux\" -DMINIUPNPC_VERSION_STRING=\"2.0\" -DUPNP_VERSION_STRING=\"UPnP/1.1\" -DENABLE_STRNATPMPERR
+       ONE_OBJS+=ext/miniupnpc/connecthostport.o ext/miniupnpc/igd_desc_parse.o ext/miniupnpc/minisoap.o ext/miniupnpc/minissdpc.o ext/miniupnpc/miniupnpc.o ext/miniupnpc/miniwget.o ext/miniupnpc/minixml.o ext/miniupnpc/portlistingparse.o ext/miniupnpc/receivedata.o ext/miniupnpc/upnpcommands.o ext/miniupnpc/upnpdev.o ext/miniupnpc/upnperrors.o ext/miniupnpc/upnpreplyparse.o
+ endif
+-ifeq ($(wildcard /usr/include/natpmp.h),)
++ifeq ($(wildcard $(STAGING_DIR)/usr/include/natpmp.h),)
+       ONE_OBJS+=ext/libnatpmp/natpmp.o ext/libnatpmp/getgateway.o
+ else
+       LDLIBS+=-lnatpmp
index 0e72a87ef89932045a6cd32b8839da8c8c82f9d0..3ad77c9812b24f6ff5f5ad574cd1d34259e5ad94 100644 (file)
@@ -6,16 +6,16 @@
 include $(TOPDIR)/rules.mk
 
 PKG_NAME:=btrfs-progs
-PKG_VERSION:=5.1.1
+PKG_VERSION:=5.2.1
 PKG_RELEASE:=1
 
 PKG_SOURCE:=$(PKG_NAME)-v$(PKG_VERSION).tar.xz
 PKG_SOURCE_URL:=@KERNEL/linux/kernel/people/kdave/btrfs-progs
-PKG_HASH:=9cb91b7de9e10aa6bbf2b003f60bb3f5e5b1984a8008fad7c4b2d3978f5ebe1b
+PKG_HASH:=36ac4a0198ffff79d5800c537ea4b19769a8fd3ad870f75413d25b20e2d83233
 PKG_BUILD_DIR:=$(BUILD_DIR)/$(PKG_NAME)-v$(PKG_VERSION)
 
 PKG_MAINTAINER:=Karel Kočí <karel.koci@nic.cz>
-PKG_LICENSE:=GPL-2.0
+PKG_LICENSE:=GPL-2.0-only
 PKG_LICENSE_FILES:=COPYING
 
 PKG_INSTALL:=1
@@ -44,8 +44,8 @@ define Package/btrfs-progs/config
        source "$(SOURCE)/Config.in"
 endef
 
-progs = btrfs btrfs-find-root btrfs-image btrfs-map-logical \
-       btrfs-select-super btrfstune mkfs.btrfs
+boxprogs = btrfsck mkfs.btrfs btrfs-image btrfstune btrfs-find-root
+progs = btrfs-map-logical btrfs-select-super
 
 TARGET_CFLAGS += -ffunction-sections -fdata-sections
 TARGET_LDFLAGS += -Wl,--gc-sections -Wl,--as-needed
@@ -61,6 +61,10 @@ ifneq ($(CONFIG_BTRFS_PROGS_ZSTD),y)
 CONFIGURE_ARGS += --disable-zstd
 endif
 
+MAKE_INSTALL_FLAGS += BUILD_PROGRAMS=0
+
+Build/Compile=$(call Build/Compile/Default,btrfs.box $(progs))
+
 define Build/InstallDev
        $(INSTALL_DIR) $(1)/usr/include $(1)/usr/lib
        $(CP) $(PKG_INSTALL_DIR)/usr/include/* $(1)/usr/include/
@@ -72,10 +76,11 @@ define Package/btrfs-progs/install
        $(CP) $(PKG_INSTALL_DIR)/usr/lib/libbtrfs.so* $(1)/usr/lib
        $(CP) $(PKG_INSTALL_DIR)/usr/lib/libbtrfsutil.so* $(1)/usr/lib
        $(INSTALL_DIR) $(1)/usr/bin
-       $(INSTALL_BIN) $(addprefix $(PKG_INSTALL_DIR)/usr/bin/, $(progs)) $(1)/usr/bin/
-       $(LN) btrfs $(1)/usr/bin/btrfsck
-       $(INSTALL_DIR) $(1)/etc/init.d
-       $(INSTALL_BIN) ./files/btrfs-scan.init $(1)/etc/init.d/btrfs-scan
+       $(INSTALL_BIN) $(PKG_BUILD_DIR)/btrfs.box $(1)/usr/bin/btrfs
+       $(foreach prog,$(boxprogs),$(LN) btrfs $(1)/usr/bin/$(prog);)
+       $(foreach prog,$(progs),$(INSTALL_BIN) $(PKG_BUILD_DIR)/$(prog) $(1)/usr/bin/;)
+       $(INSTALL_DIR) $(1)/lib/preinit
+       $(INSTALL_BIN) ./files/btrfs-scan.init $(1)/lib/preinit/85_btrfs_scan
 endef
 
 $(eval $(call BuildPackage,btrfs-progs))
index 762e0b84079f1a6e6e9ad0568020f22b756c0959..608d3d6c59b0c861debbb049e515e84f69ba6692 100644 (file)
@@ -1,9 +1,7 @@
-#!/bin/sh /etc/rc.common
-# Copyright (C) 2014 OpenWrt.org
+#!/bin/sh
 
-START=19
-
-start() {
-       grep -q btrfs /proc/filesystems && /usr/bin/btrfs device scan
+preinit_btrfs_scan() {
+       grep -vq btrfs /proc/filesystems || btrfs device scan
 }
 
+boot_hook_add preinit_main preinit_btrfs_scan
index 527fca23b293031d71cbb47375580accbf16cefe..192c26500fe4262d316a2f0845d1a73064ec2495 100644 (file)
@@ -6,7 +6,7 @@ include $(TOPDIR)/rules.mk
 
 PKG_NAME:=gddrescue
 PKG_VERSION:=1.23
-PKG_RELEASE:=1
+PKG_RELEASE:=2
 
 PKG_SOURCE:=$(PKG_NAME)_$(PKG_VERSION).orig.tar.xz
 PKG_SOURCE_URL:=http://http.debian.net/debian/pool/main/g/$(PKG_NAME)
@@ -27,7 +27,7 @@ define Package/gddrescue
   CATEGORY:=Utilities
   TITLE:=Data recovery tool
   URL:=https://www.gnu.org/software/ddrescue/
-  DEPENDS:=$(CXX_DEPENDS) @!USE_UCLIBC
+  DEPENDS:=$(CXX_DEPENDS)
 endef
 
 define Package/gddrescue/description
diff --git a/utils/gddrescue/patches/010-fix-uclibcxx.patch b/utils/gddrescue/patches/010-fix-uclibcxx.patch
new file mode 100644 (file)
index 0000000..66e8e76
--- /dev/null
@@ -0,0 +1,102 @@
+--- a/fillbook.cc
++++ b/fillbook.cc
+@@ -31,6 +31,9 @@
+ #include "block.h"
+ #include "mapbook.h"
++#ifdef __UCLIBCXX_MAJOR__
++#undef fputc
++#endif
+ // Return values: 1 write error, 0 OK.
+ //
+--- a/genbook.cc
++++ b/genbook.cc
+@@ -31,6 +31,9 @@
+ #include "block.h"
+ #include "mapbook.h"
++#ifdef __UCLIBCXX_MAJOR__
++#undef fputc
++#endif
+ const char * format_time( const long t, const bool low_prec )
+   {
+--- a/loggers.cc
++++ b/loggers.cc
+@@ -25,6 +25,9 @@
+ #include "block.h"
+ #include "loggers.h"
++#ifdef __UCLIBCXX_MAJOR__
++#undef fputc
++#endif
+ namespace {
+--- a/main.cc
++++ b/main.cc
+@@ -46,6 +46,11 @@
+ #include "non_posix.h"
+ #include "rescuebook.h"
++#ifdef __UCLIBCXX_MAJOR__
++#undef fgetc
++#undef fputc
++#endif
++
+ #ifndef O_BINARY
+ #define O_BINARY 0
+ #endif
+--- a/main_common.cc
++++ b/main_common.cc
+@@ -15,6 +15,10 @@
+     along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
++#ifdef __UCLIBCXX_MAJOR__
++#undef fputc
++#endif
++
+ int verbosity = 0;
+ namespace {
+--- a/mapbook.cc
++++ b/mapbook.cc
+@@ -32,6 +32,10 @@
+ #include "block.h"
+ #include "mapbook.h"
++#ifdef __UCLIBCXX_MAJOR__
++#undef fgetc
++#undef fputc
++#endif
+ namespace {
+--- a/mapfile.cc
++++ b/mapfile.cc
+@@ -29,6 +29,11 @@
+ #include "block.h"
++#ifdef __UCLIBCXX_MAJOR__
++#undef fgetc
++#undef ferror
++#undef feof
++#endif
+ namespace {
+--- a/rescuebook.cc
++++ b/rescuebook.cc
+@@ -36,6 +36,9 @@
+ #include "mapbook.h"
+ #include "rescuebook.h"
++#ifdef __UCLIBCXX_MAJOR__
++#undef fputc
++#endif
+ namespace {
index aab5e751e90b7c64d6187690d07505c1d0f5ac31..71bfdcf995947e6fe51a9b0d947e9d0121d0fafb 100644 (file)
@@ -9,14 +9,13 @@ include $(TOPDIR)/rules.mk
 
 PKG_NAME:=mt-st
 PKG_VERSION:=1.1
-PKG_RELEASE:=1
+PKG_RELEASE:=2
 
-PKG_SOURCE_SUBDIR:=$(PKG_NAME)
-PKG_SOURCE_VERSION:=$(PKG_VERSION)
-PKG_SOURCE:=$(PKG_NAME)-$(PKG_SOURCE_VERSION).tar.gz
+PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
 PKG_SOURCE_URL:=ftp://ftp.ibiblio.org/pub/Linux/system/backup/
 PKG_HASH:=945cb4f3d9957dabe768f5941a9148b746396836c797b25f020c84319ba8170d
 
+PKG_MAINTAINER:=Giuseppe Magnotta <giuseppe.magnotta@gmail.com>
 PKG_LICENSE:=GPL-2.0
 
 include $(INCLUDE_DIR)/package.mk
@@ -26,7 +25,6 @@ define Package/mt-st
   CATEGORY:=Utilities
   TITLE:=Magnetic tape control tools for Linux SCSI tapes
   URL:=http://ftp.ibiblio.org/pub/Linux/system/backup/
-  MAINTAINER:=Giuseppe Magnotta <giuseppe.magnotta@gmail.com>
 endef
 
 define Package/mt-st/description
diff --git a/utils/mt-st/patches/010-sysmacros.patch b/utils/mt-st/patches/010-sysmacros.patch
new file mode 100644 (file)
index 0000000..8e4b373
--- /dev/null
@@ -0,0 +1,10 @@
+--- a/mt.c
++++ b/mt.c
+@@ -21,6 +21,7 @@
+ #include <sys/types.h>
+ #include <sys/ioctl.h>
+ #include <sys/stat.h>
++#include <sys/sysmacros.h>
+ #include <sys/utsname.h>
+ #include "mtio.h"
diff --git a/utils/pservice/Makefile b/utils/pservice/Makefile
new file mode 100644 (file)
index 0000000..3d1923b
--- /dev/null
@@ -0,0 +1,28 @@
+# Copyright (C) 2017 Yousong Zhou
+
+include $(TOPDIR)/rules.mk
+
+PKG_NAME:=pservice
+PKG_VERSION:=2017-08-29
+PKG_RELEASE=1
+
+PKG_MAINTAINER:=Yousong Zhou <yszhou4tech@gmail.com>
+
+include $(INCLUDE_DIR)/package.mk
+
+define Package/pservice
+  SECTION:=utils
+  CATEGORY:=Utilities
+  TITLE:=Wrap commands as procd services
+endef
+
+define Build/Compile
+endef
+
+define Package/pservice/install
+       $(INSTALL_DIR) $(1)/usr/bin $(1)/etc/init.d $(1)/etc/config
+       $(INSTALL_BIN) ./files/pservice.init $(1)/etc/init.d/pservice
+       $(INSTALL_DATA) ./files/pservice.config $(1)/etc/config/pservice
+endef
+
+$(eval $(call BuildPackage,pservice))
diff --git a/utils/pservice/README.md b/utils/pservice/README.md
new file mode 100644 (file)
index 0000000..85fbd6c
--- /dev/null
@@ -0,0 +1,44 @@
+# uci
+
+`disabled`, bool, default `0`
+
+`name`, string, name of the service instance
+
+`command`, file, the service instance executable
+
+`args`, list of args
+
+`stderr`, bool, default `0`, log stderr output of the service instance
+
+`stdout`, bool, default `0`, log stdout output of the service instance
+
+`env`, list of environment variable settings of the form `var=val`
+
+`file`, list of file names.  Service instances will be restarted if content of
+these files have changed on service reload event.
+
+`respawn_threshold`, uinteger, default `3600`, time in seconds the instances
+have to be in running state to be considered a valid run
+
+`respawn_timeout`, uinteger, default `5`, time in seconds the instance should
+be delayed to start again after the last crash
+
+`respawn_maxfail`, uinteger, default `5`, maximum times the instances can
+crash/fail in a row and procd will not try to bring it up again after this
+limit has been reached
+
+# notes and faq
+
+Initial environment variables presented to service instances may be different
+from what was observed on the interactive terminal.  E.g. `HOME=/` may affect
+reading `~/.ssh/known_hosts` of dropbear ssh instance.
+
+       PATH=/usr/sbin:/usr/bin:/sbin:/bin PWD=/ HOME=/
+
+If `list args xxx` seems to be too long causing pain, consider using `/bin/sh`
+as the `command`.  It is also worth noting that uci supports multi-line option
+value.
+
+Child processes will keep running when their parent process was killed.  This
+is especially the case and should be taken into account with option `command`
+being `/bin/sh` and it is recommended to use `exec` as the last shell command.
diff --git a/utils/pservice/files/pservice.config b/utils/pservice/files/pservice.config
new file mode 100644 (file)
index 0000000..0f505b3
--- /dev/null
@@ -0,0 +1,24 @@
+config pservice
+       option disabled 1
+       option name 'demo0'
+       option command /bin/sh
+       option respawn_maxfail 0
+       list args -c
+       list args 'env | logger -t $name; exec sleep $time'
+       list env 'v0=0'
+       list env 'v1=val with space'
+       list env 'name=demo0'
+       list env 'time=1799'
+       list file /tmp/sleep.conf
+
+config pservice
+       option disabled 1
+       option name 8021x
+       option command /usr/sbin/wpa_supplicant
+       option stdout 1
+       list args -i
+       list args eth0.1
+       list args -D
+       list args wired
+       list args -c
+       list args /etc/wpa_supplicant-eth0.1.conf
diff --git a/utils/pservice/files/pservice.init b/utils/pservice/files/pservice.init
new file mode 100755 (executable)
index 0000000..0a275f8
--- /dev/null
@@ -0,0 +1,85 @@
+#!/bin/sh /etc/rc.common
+# Copyright (C) 2017 Yousong Zhou
+
+START=99
+
+USE_PROCD=1
+
+pservice_list_cb() {
+       local val="$1"; shift
+       local param="$1"; shift
+
+       procd_append_param "$param" "$val"
+}
+
+pservice() {
+       local cfg="$1"
+
+       eval "$(validate_pservice_section "$cfg" pservice_validate_mklocal)"
+       validate_pservice_section "$cfg" || return 1
+       [ "$disabled" = 0 ] || return 0
+       [ -x "$command" ] || return 1
+
+       procd_open_instance "$name"
+       procd_set_param command "$command"
+       procd_set_param stderr "$stderr"
+       procd_set_param stdout "$stdout"
+       procd_set_param respawn "$respawn_threshold" "$respawn_timeout" "$respawn_maxfail"
+       [ -z "$args" ] || config_list_foreach "$cfg" args pservice_list_cb command
+       if [ -n "$env" ]; then
+               procd_set_param env
+               config_list_foreach "$cfg" env pservice_list_cb env
+       fi
+       if [ -n "$file" ]; then
+               procd_set_param file
+               config_list_foreach "$cfg" file pservice_list_cb file
+       fi
+       procd_close_instance
+}
+
+start_service() {
+       config_load 'pservice'
+       config_foreach pservice pservice
+}
+
+stop_service() {
+       true
+}
+
+service_triggers() {
+       procd_open_validate
+       validate_pservice_section
+       procd_close_validate
+}
+
+pservice_validate_mklocal() {
+       local tuple opts
+
+       shift 2
+       for tuple in "$@"; do
+               opts="${tuple%%:*} $opts"
+       done
+       [ -z "$opts" ] || echo "local $opts"
+}
+
+pservice_validate() {
+       uci_validate_section pservice "$@"
+}
+
+validate_pservice_section() {
+       local cfg="$1"; shift
+       local func="$1"; shift
+
+       "${func:-pservice_validate}" pservice "$cfg" \
+               "disabled:bool:0" \
+               "name:string" \
+               "env:regex('^[a-zA-Z_][a-zA-Z0-9_]*=.*$')" \
+               "command:file" \
+               "args:list(string)" \
+               "stderr:bool:0" \
+               "stdout:bool:0" \
+               "respawn_threshold:uinteger:3600" \
+               "respawn_timeout:uinteger:5" \
+               "respawn_maxfail:uinteger:5" \
+               "file:string"
+}