include $(TOPDIR)/rules.mk
PKG_NAME:=netdata
-PKG_VERSION:=1.16.1
-PKG_RELEASE:=2
+PKG_VERSION:=1.17.1
+PKG_RELEASE:=1
PKG_MAINTAINER:=Josef Schlehofer <pepe.schlehofer@gmail.com>, Daniel Engberg <daniel.engberg.lists@pyret.net>
PKG_LICENSE:=GPL-3.0-or-later
PKG_SOURCE:=$(PKG_NAME)-v$(PKG_VERSION).tar.gz
PKG_SOURCE_URL:=https://codeload.github.com/netdata/netdata/tar.gz/v$(PKG_VERSION)?
-PKG_HASH:=94492108a6e24e8b39c011ae35ff6f50a848d816af396fdf2b44655cecd78672
+PKG_HASH:=032f9001e2b7f774989a200519feeafa3a4d9b7c9e8fb0cfa43e68a8da6ad1bf
PKG_INSTALL:=1
PKG_FIXUP:=autoreconf
+++ /dev/null
---- a/collectors/python.d.plugin/python.d.plugin.in
-+++ b/collectors/python.d.plugin/python.d.plugin.in
-@@ -1,10 +1,4 @@
--#!/usr/bin/env bash
--'''':;
--if [[ "$OSTYPE" == "darwin"* ]]; then
-- export OBJC_DISABLE_INITIALIZE_FORK_SAFETY=YES
--fi
--exec "$(command -v python || command -v python3 || command -v python2 ||
--echo "ERROR python IS NOT AVAILABLE IN THIS SYSTEM")" "$0" "$@" # '''
-+#!/usr/bin/python3
-
- # -*- coding: utf-8 -*-
- # Description:
+++ /dev/null
-From 893bfc98d3f33d02ce6d6a3a48fb02c964156fb5 Mon Sep 17 00:00:00 2001
-From: Markos Fountoulakis <markos.fountoulakis.senior@gmail.com>
-Date: Wed, 14 Aug 2019 11:55:50 +0300
-Subject: [PATCH] Stop configure.ac from linking against dbengine and https
- libraries when dbengine or https are disabled
-
----
- configure.ac | 28 +++++++++++++---------------
- 1 file changed, 13 insertions(+), 15 deletions(-)
-
-diff --git a/configure.ac b/configure.ac
-index 56e484cc2c..8dbdcaa17f 100644
---- a/configure.ac
-+++ b/configure.ac
-@@ -280,9 +280,6 @@ AC_CHECK_LIB(
- [UV_LIBS="-luv"]
- )
-
--OPTIONAL_UV_CFLAGS="${UV_CFLAGS}"
--OPTIONAL_UV_LIBS="${UV_LIBS}"
--
-
- # -----------------------------------------------------------------------------
- # lz4 Extremely Fast Compression algorithm
-@@ -293,9 +290,6 @@ AC_CHECK_LIB(
- [LZ4_LIBS="-llz4"]
- )
-
--OPTIONAL_LZ4_CFLAGS="${LZ4_CFLAGS}"
--OPTIONAL_LZ4_LIBS="${LZ4_LIBS}"
--
-
- # -----------------------------------------------------------------------------
- # Judy General purpose dynamic array
-@@ -306,9 +300,6 @@ AC_CHECK_LIB(
- [JUDY_LIBS="-lJudy"]
- )
-
--OPTIONAL_JUDY_CFLAGS="${JUDY_CFLAGS}"
--OPTIONAL_JUDY_LIBS="${JUDY_LIBS}"
--
-
- # -----------------------------------------------------------------------------
- # zlib
-@@ -356,9 +347,6 @@ AC_CHECK_LIB(
- [SSL_LIBS="-lcrypto -lssl"]
- )
-
--OPTIONAL_SSL_CFLAGS="${SSL_CFLAGS}"
--OPTIONAL_SSL_LIBS="${SSL_LIBS}"
--
- # -----------------------------------------------------------------------------
- # JSON-C library
-
-@@ -391,6 +379,14 @@ AC_MSG_CHECKING([if netdata dbengine should be used])
- if test "${enable_dbengine}" != "no" -a "${UV_LIBS}" -a "${LZ4_LIBS}" -a "${JUDY_LIBS}" -a "${SSL_LIBS}"; then
- enable_dbengine="yes"
- AC_DEFINE([ENABLE_DBENGINE], [1], [netdata dbengine usability])
-+ OPTIONAL_UV_CFLAGS="${UV_CFLAGS}"
-+ OPTIONAL_UV_LIBS="${UV_LIBS}"
-+ OPTIONAL_LZ4_CFLAGS="${LZ4_CFLAGS}"
-+ OPTIONAL_LZ4_LIBS="${LZ4_LIBS}"
-+ OPTIONAL_JUDY_CFLAGS="${JUDY_CFLAGS}"
-+ OPTIONAL_JUDY_LIBS="${JUDY_LIBS}"
-+ OPTIONAL_SSL_CFLAGS="${SSL_CFLAGS}"
-+ OPTIONAL_SSL_LIBS="${SSL_LIBS}"
- else
- enable_dbengine="no"
- fi
-@@ -399,10 +395,12 @@ AM_CONDITIONAL([ENABLE_DBENGINE], [test "${enable_dbengine}" = "yes"])
-
- AC_MSG_CHECKING([if netdata https should be used])
- if test "${enable_https}" != "no" -a "${SSL_LIBS}"; then
-- enable_https="yes"
-- AC_DEFINE([ENABLE_HTTPS], [1], [netdata HTTPS usability])
-+ enable_https="yes"
-+ AC_DEFINE([ENABLE_HTTPS], [1], [netdata HTTPS usability])
-+ OPTIONAL_SSL_CFLAGS="${SSL_CFLAGS}"
-+ OPTIONAL_SSL_LIBS="${SSL_LIBS}"
- else
-- enable_https="no"
-+ enable_https="no"
- fi
- AC_MSG_RESULT([${enable_https}])
- AM_CONDITIONAL([ENABLE_HTTPS], [test "${enable_https}" = "yes"])
CATEGORY:=Languages
TITLE:=A compact Java Virtual Machine
URL:=http://jamvm.sourceforge.net/
- DEPENDS:=+zlib +libpthread +librt +CONFIG_powerpc64:libffi @!arc
+ DEPENDS:=+zlib +libpthread +librt +CONFIG_powerpc64:libffi @!arc @!aarch64
endef
define Package/jamvm/description
PKG_NAME:=perl-www-curl
PKG_VERSION:=4.17
-PKG_RELEASE:=5
+PKG_RELEASE:=6
PKG_SOURCE_URL:=http://www.cpan.org/authors/id/S/SZ/SZBALINT/
PKG_SOURCE:=WWW-Curl-$(PKG_VERSION).tar.gz
--- /dev/null
+--- a/Curl.xs
++++ b/Curl.xs
+@@ -70,7 +70,7 @@
+
+
+ typedef struct {
+-#ifdef __CURL_MULTI_H
++#ifdef CURLINC_MULTI_H
+ struct CURLM *curlm;
+ #else
+ struct void *curlm;
+@@ -234,7 +234,7 @@
+ {
+ perl_curl_multi *self;
+ Newz(1, self, 1, perl_curl_multi);
+-#ifdef __CURL_MULTI_H
++#ifdef CURLINC_MULTI_H
+ self->curlm=curl_multi_init();
+ #else
+ croak("curl version too old to support curl_multi_init()");
+@@ -245,7 +245,7 @@
+ /* delete the multi */
+ static void perl_curl_multi_delete(perl_curl_multi *self)
+ {
+-#ifdef __CURL_MULTI_H
++#ifdef CURLINC_MULTI_H
+ if (self->curlm)
+ curl_multi_cleanup(self->curlm);
+ Safefree(self);
+@@ -1065,7 +1065,7 @@
+ WWW::Curl::Multi curlm
+ WWW::Curl::Easy curl
+ CODE:
+-#ifdef __CURL_MULTI_H
++#ifdef CURLINC_MULTI_H
+ curl_multi_add_handle(curlm->curlm, curl->curl);
+ #endif
+
+@@ -1074,7 +1074,7 @@
+ WWW::Curl::Multi curlm
+ WWW::Curl::Easy curl
+ CODE:
+-#ifdef __CURL_MULTI_H
++#ifdef CURLINC_MULTI_H
+ curl_multi_remove_handle(curlm->curlm, curl->curl);
+ #endif
+
+@@ -1149,7 +1149,7 @@
+ PREINIT:
+ int remaining;
+ CODE:
+-#ifdef __CURL_MULTI_H
++#ifdef CURLINC_MULTI_H
+ while(CURLM_CALL_MULTI_PERFORM ==
+ curl_multi_perform(self->curlm, &remaining));
+ RETVAL = remaining;
PKG_LICENSE:=GPL-1.0-or-later Artistic-1.0-Perl
PKG_MAINTAINER:=Marcel Denia <naoir@gmx.net>
+PKG_CPE_ID:=cpe:/a:search.cpan:libwww-perl
PKG_BUILD_DIR:=$(BUILD_DIR)/perl/libwww-perl-$(PKG_VERSION)
PKG_MAINTAINER:=Daniel Golle <daniel@makrotopia.org>
PKG_LICENSE:=BSD-3-Clause
PKG_LICENSE_FILES:=LICENSE
+PKG_CPE_ID:=cpe:/a:palletsprojects:flask
include $(INCLUDE_DIR)/package.mk
include ../python3-package.mk
PKG_MAINTAINER:=Daniel Golle <daniel@makrotopia.org>
PKG_LICENSE:=BSD-3-Clause
PKG_LICENSE_FILES:=LICENSE
+PKG_CPE_ID:=cpe:/a:pocoo:jinja2
include $(INCLUDE_DIR)/package.mk
include ../python3-package.mk
PKG_MAINTAINER:=Alexandru Ardelean <ardeleanalex@gmail.com>
PKG_LICENSE:=HPND
PKG_LICENSE_FILES:=LICENSE
+PKG_CPE_ID:=cpe:/a:python:pillow
include $(INCLUDE_DIR)/package.mk
include ../python-package.mk
include $(TOPDIR)/rules.mk
PKG_NAME:=python-cryptodome
-PKG_VERSION:=3.8.2
+PKG_VERSION:=3.9.0
PKG_RELEASE:=1
PKG_SOURCE:=pycryptodome-$(PKG_VERSION).tar.gz
PKG_SOURCE_URL:=https://files.pythonhosted.org/packages/source/p/pycryptodome
-PKG_HASH:=5bc40f8aa7ba8ca7f833ad2477b9d84e1bfd2630b22a46d9bbd221982f8c3ac0
+PKG_HASH:=dbeb08ad850056747aa7d5f33273b7ce0b9a77910604a1be7b7a6f2ef076213f
PKG_BUILD_DIR:=$(BUILD_DIR)/$(BUILD_VARIANT)-cryptodome-$(PKG_VERSION)
PKG_LICENSE:=BSD-2-Clause
PKG_LICENSE_FILES:=LICENSE.rst
PKG_MAINTAINER:=Richard Yu <yurichard3839@gmail.com>
+PKG_CPE_ID:=cpe:/a:pycryptodome:pycryptodome
include $(INCLUDE_DIR)/package.mk
include ../python-package.mk
include $(TOPDIR)/rules.mk
PKG_NAME:=python-cryptodomex
-PKG_VERSION:=3.8.2
+PKG_VERSION:=3.9.0
PKG_RELEASE:=1
PKG_SOURCE:=pycryptodomex-$(PKG_VERSION).tar.gz
PKG_SOURCE_URL:=https://files.pythonhosted.org/packages/source/p/pycryptodomex
-PKG_HASH:=e50b15af6bbdc6b5f8bd70d818cb846b15303ffa6c371b799db561a403a21607
+PKG_HASH:=8b604f4fa1de456d6d19771b01c2823675a75a2c60e51a6b738f71fdfe865370
PKG_BUILD_DIR:=$(BUILD_DIR)/$(BUILD_VARIANT)-cryptodomex-$(PKG_VERSION)
PKG_VERSION:=4.3.2
PKG_RELEASE:=1
PKG_MAINTAINER:=Karel Kočí <cynerd@email.cz>
+PKG_CPE_ID:=cpe:/a:python:decorator
PKG_SOURCE_URL:=https://codeload.github.com/micheles/decorator/tar.gz/$(PKG_VERSION)?
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
PKG_LICENSE:=ISC
PKG_LICENSE_FILES:=LICENSE
PKG_MAINTAINER:=Josef Schlehofer <pepe.schlehofer@gmail.com>
+PKG_CPE_ID:=cpe:/a:debian:python-dns
PKG_BUILD_DIR:=$(BUILD_DIR)/$(BUILD_VARIANT)-dnspython-$(PKG_VERSION)
PKG_LICENSE:=BSD-3-Clause
PKG_LICENSE_FILES:=enum/LICENSE
PKG_MAINTAINER:=Jeffery To <jeffery.to@gmail.com>
+PKG_CPE_ID:=cpe:/a:python:enum34
include $(INCLUDE_DIR)/package.mk
include ../python-package.mk
PKG_LICENSE:=GPL-3.0-or-later
PKG_LICENSE_FILES:=LICENSE
PKG_MAINTAINER:=Daniel Golle <daniel@makrotopia.org>
+PKG_CPE_ID:=cpe:/a:python-gnupg_project:python-gnupg
PKG_BUILD_DIR:=$(BUILD_DIR)/$(BUILD_VARIANT)-$(PKG_NAME)-$(PKG_VERSION)
PKG_UNPACK=$(HOST_TAR) -C $(PKG_BUILD_DIR) --strip-components=1 -xzf $(DL_DIR)/$(PKG_SOURCE)
PKG_LICENSE:=BSD
PKG_LICENSE_FILES:=LICENSES.txt
PKG_MAINTAINER:=Alexandru Ardelean <ardeleanalex@gmail.com>
+PKG_CPE_ID:=cpe:/a:lxml:lxml
include $(INCLUDE_DIR)/package.mk
include ../python-package.mk
PKG_LICENSE:=Apache-2.0
PKG_LICENSE_FILES:=LICENSE
PKG_MAINTAINER:=Jeffery To <jeffery.to@gmail.com>, Alexandru Ardelean <ardeleanalex@gmail.com>
+PKG_CPE_ID:=cpe:/a:pyopenssl_project:pyopenssl
PKG_BUILD_DIR:=$(BUILD_DIR)/$(BUILD_VARIANT)-pyopenssl-$(PKG_VERSION)
PKG_BUILD_DIR:=$(BUILD_DIR)/$(BUILD_VARIANT)-requests-$(PKG_VERSION)
-PKG_CPE_ID:=cpe:/a:python-requests:requests
-
include $(INCLUDE_DIR)/package.mk
include ../python-package.mk
include ../python3-package.mk
include $(TOPDIR)/rules.mk
PKG_NAME:=python-sentry-sdk
-PKG_VERSION:=0.11.2
+PKG_VERSION:=0.12.2
PKG_RELEASE:=1
PKG_SOURCE:=sentry-sdk-$(PKG_VERSION).tar.gz
PKG_SOURCE_URL:=https://files.pythonhosted.org/packages/source/s/sentry-sdk/
-PKG_HASH:=b4edcb1296fee107439345d0f8b23432b8732b7e28407f928367d0a4a36301a9
+PKG_HASH:=2529ab6f93914d01bcd80b1b16c15a025902350ab19af2033aa5ff797c1600ad
PKG_BUILD_DIR:=$(BUILD_DIR)/sentry-sdk-$(PKG_VERSION)
PKG_MAINTAINER:=Josef Schlehofer <josef.schlehofer@nic.cz>
PKG_VERSION:=3.16.0
PKG_RELEASE:=1
PKG_LICENSE:=MIT
+PKG_CPE_ID:=cpe:/a:simplejson_project:simplejson
PKG_SOURCE:=simplejson-$(PKG_VERSION).tar.gz
PKG_SOURCE_URL:=https://files.pythonhosted.org/packages/source/s/simplejson
PKG_MAINTAINER:=Josef Schlehofer <josef.schlehofer@nic.cz>
PKG_LICENSE:=MIT
PKG_LICENSE_FILES:=LICENSE
+PKG_CPE_ID:=cpe:/a:sqlalchemy:sqlalchemy
include $(INCLUDE_DIR)/package.mk
include ../python3-package.mk
PKG_LICENSE:=MIT
PKG_LICENSE_FILES:=LICENSE
PKG_MAINTAINER:=Jeffery To <jeffery.to@gmail.com>
+PKG_CPE_ID:=cpe:/a:twistedmatrix:twisted
include $(INCLUDE_DIR)/package.mk
include ../python-package.mk
include $(TOPDIR)/rules.mk
PKG_NAME:=python-urllib3
-PKG_VERSION:=1.25.3
+PKG_VERSION:=1.25.6
PKG_RELEASE:=1
PKG_MAINTAINER:=Josef Schlehofer <pepe.schlehofer@gmail.com>
PKG_SOURCE:=urllib3-$(PKG_VERSION).tar.gz
PKG_SOURCE_URL:=https://files.pythonhosted.org/packages/source/u/urllib3
-PKG_HASH:=dbe59173209418ae49d485b87d1681aefa36252ee85884c31346debd19463232
+PKG_HASH:=9a107b99a5393caf59c7aa3c1249c16e6879447533d0887f4336dde834c7be86
PKG_BUILD_DIR:=$(BUILD_DIR)/$(BUILD_VARIANT)-urllib3-$(PKG_VERSION)
PKG_NAME:=avro-c
PKG_VERSION:=1.8.2
-PKG_RELEASE:=1
+PKG_RELEASE:=2
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
PKG_SOURCE_URL:=@APACHE/avro/avro-$(PKG_VERSION)/c
PKG_HASH:=4639982b2b8fbd91fc7128fef672207129c959bb7900dd64b077ce4206edf10e
PKG_MAINTAINER:=John Clark <inindev@gmail.com>
-
-PKG_INSTALL:=1
-PKG_BUILD_PARALLEL:=1
-
PKG_LICENSE:=Apache-2.0
PKG_LICENSE_FILES:=LICENSE
+CMAKE_INSTALL:=1
+PKG_BUILD_PARALLEL:=1
+
include $(INCLUDE_DIR)/package.mk
include $(INCLUDE_DIR)/cmake.mk
This package contains the Apache Avro C library.
endef
-CMAKE_OPTIONS += \
- -DCMAKE_BUILD_TYPE:STRING=MINSIZEREL
-
-define Build/InstallDev
- $(INSTALL_DIR) $(1)/usr/{lib,include}
- $(CP) $(PKG_INSTALL_DIR)/usr/include/* $(1)/usr/include/
- $(CP) $(PKG_INSTALL_DIR)/usr/lib/libavro.{a,so*} $(1)/usr/lib/
-endef
-
define Package/avro-c/install
$(INSTALL_DIR) $(1)/usr/lib
$(CP) $(PKG_INSTALL_DIR)/usr/lib/libavro.so* $(1)/usr/lib/
--- /dev/null
+--- a/src/CMakeLists.txt
++++ b/src/CMakeLists.txt
+@@ -121,9 +121,8 @@ endif(WIN32)
+
+ # Install pkg-config file
+
+-set(prefix ${CMAKE_INSTALL_PREFIX})
+ set(VERSION ${AVRO_VERSION})
+-configure_file(avro-c.pc.in avro-c.pc)
++configure_file(avro-c.pc.in avro-c.pc @ONLY)
+ install(FILES ${CMAKE_CURRENT_BINARY_DIR}/avro-c.pc
+ DESTINATION lib/pkgconfig)
+
+--- a/src/avro-c.pc.in
++++ b/src/avro-c.pc.in
+@@ -1,7 +1,12 @@
++prefix=@CMAKE_INSTALL_PREFIX@
++exec_prefix=@CMAKE_INSTALL_PREFIX@
++libdir=${exec_prefix}/lib
++includedir=${prefix}/include
++
+ Name: avro-c
+ Description: C library for parsing Avro data
+ Version: @VERSION@
+ URL: http://avro.apache.org/
+-Libs: -L@prefix@/lib -lavro
+-Cflags: -I@prefix@/include
++Libs: -L${libdir} -lavro
++Cflags: -I${includedir}
+ Requires: @CODEC_PKG@
PKG_NAME:=boost
PKG_VERSION:=1.71.0
PKG_SOURCE_VERSION:=1_71_0
-PKG_RELEASE:=1
+PKG_RELEASE:=3
PKG_SOURCE:=$(PKG_NAME)_$(PKG_SOURCE_VERSION).tar.bz2
PKG_SOURCE_URL:=@SF/$(PKG_NAME)/$(PKG_NAME)/$(PKG_VERSION) https://dl.bintray.com/boostorg/release/$(PKG_VERSION)/source/
# Invisible config dependency
config boost-context-exclude
bool
- default y if (mips64 || arc || arc700)
+ default y if (TARGET_arc700 || TARGET_archs38 || TARGET_octeon || TARGET_octeontx)
default n
config boost-coroutine-exclude
config boost-fiber-exclude
bool
- default y if (TARGET_brcm47xx_generic || TARGET_brcm47xx_legacy || mips32 || mips64 || boost-coroutine-exclude)
+ default y if (TARGET_ar7 || TARGET_brcm47xx_generic || TARGET_brcm47xx_legacy || TARGET_lantiq_ase || TARGET_rb532 || mips32 || mips64 || boost-coroutine-exclude)
default n
menu "Select Boost Options"
( cd $(HOST_BUILD_DIR) ; \
./bootstrap.sh --prefix=$(STAGING_DIR_HOSTPKG) \
- --with-libraries=atomic,chrono,date_time,filesystem,headers,thread,system ;\
+ --with-libraries=atomic,context,date_time,filesystem,headers,program_options,regex,system,thread ;\
./b2 --ignore-site-config install )
endef
PKG_HASH:=a9240b653d02e8cfc52db48e8c4224426e528e1faa09b65e8ca08a197fad210b
PKG_MAINTAINER:=
PKG_LICENSE:=ISC
+PKG_CPE_ID:=cpe:/a:libconfuse_project:libconfuse
include $(INCLUDE_DIR)/package.mk
include $(TOPDIR)/rules.mk
PKG_NAME:=fbthrift
-PKG_VERSION:=2019.06.10.00
-PKG_RELEASE:=2
+PKG_SOURCE_DATE:=2019-09-22
+PKG_SOURCE_VERSION:=2f9839604e2569120cc4876c667388da6d7342f2
+PKG_RELEASE:=1
-PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
-PKG_SOURCE_URL:=https://codeload.github.com/facebook/fbthrift/tar.gz/v$(PKG_VERSION)?
-PKG_HASH:=0897c66564a2742b24dd6ae77812b42987c3493944d1d5d40d4005d71deb7cb5
+PKG_SOURCE:=$(PKG_NAME)-$(PKG_SOURCE_DATE).tar.gz
+PKG_SOURCE_URL:=https://codeload.github.com/facebook/fbthrift/tar.gz/$(PKG_SOURCE_VERSION)?
+PKG_HASH:=339a73610ad19070d33151127966aaf56e5df3640c238292fc74658c3075da3a
+PKG_BUILD_DIR:=$(BUILD_DIR)/$(PKG_NAME)-$(PKG_SOURCE_VERSION)
+HOST_BUILD_DIR:=$(BUILD_DIR_HOST)/$(PKG_NAME)-$(PKG_SOURCE_VERSION)
PKG_MAINTAINER:=Amol Bhave <ambhave@fb.com>
PKG_LICENSE:=Apache-2.0
PKG_LICENSE_FILES:=LICENSE
HOST_BUILD_DEPENDS:=libmstch/host
-PKG_BUILD_DEPENDS:=fbthrift/host
+PKG_BUILD_DEPENDS:=fbthrift/host libwangle librsocket-cpp
HOST_BUILD_PARALLEL:=1
PKG_BUILD_PARALLEL:=1
include $(INCLUDE_DIR)/cmake.mk
define Package/fbthrift
- SECTION:=libs
- CATEGORY:=Libraries
- TITLE:=Facebook's branch of Apache Thrift, including a new C++ server.
- DEPENDS:=+libwangle +libfmt +librsocket-cpp
+ SECTION:=libs
+ CATEGORY:=Libraries
+ TITLE:=Facebook's branch of Apache Thrift, including a new C++ server.
+ URL:=https://github.com/facebook/fbthrift
+ DEPENDS:=+libfmt +libfolly
endef
define Package/fbthrift/description
- Facebook's branch of Apache Thrift, including a new C++ server.
+ Facebook's branch of Apache Thrift, including a new C++ server.
endef
CMAKE_HOST_OPTIONS += \
- -DBoost_NO_BOOST_CMAKE=ON \
- -DCMAKE_SKIP_RPATH=FALSE \
+ -DBUILD_SHARED_LIBS=ON \
+ -DCMAKE_SKIP_RPATH=OFF \
-DCMAKE_INSTALL_RPATH="${STAGING_DIR_HOSTPKG}/lib" \
-Dcompiler_only=ON
CMAKE_OPTIONS += \
- -DBoost_NO_BOOST_CMAKE=ON \
- -DBUILD_SHARED_LIBS=ON \
-DCMAKE_POSITION_INDEPENDENT_CODE=ON \
+ -DCXX_STD=c++14 \
-Dlib_only=ON \
-DTHRIFT1="$(STAGING_DIR_HOSTPKG)/bin/thrift1" \
- -DTHRIFT_COMPILER_INCLUDE="$(STAGING_DIR_HOSTPKG)/include/"
+ -DTHRIFT_COMPILER_INCLUDE="$(STAGING_DIR_HOSTPKG)/include/" \
+ -Dpython-six_DIR=OFF
-define Package/fbthrift/install
- $(INSTALL_DIR) $(1)/usr/lib
- $(CP) $(PKG_INSTALL_DIR)/usr/lib/lib*.so* $(1)/usr/lib/
-endef
+# This should not be necessary as it is supposed to be done above.
+# Someone broke something.
+HOST_LDFLAGS += -Wl,-rpath="$(STAGING_DIR_HOSTPKG)/lib"
+
+# GCC9 bug
+TARGET_LDFLAGS += -fno-lto
+
+TARGET_CFLAGS += -ffunction-sections -fdata-sections
+TARGET_LDFLAGS += -Wl,--gc-sections,--as-needed
$(eval $(call BuildPackage,fbthrift))
$(eval $(call HostBuild))
--- /dev/null
+--- a/CMakeLists.txt
++++ b/CMakeLists.txt
+@@ -44,7 +44,11 @@ endif(MSVC)
+ find_package(
+ Boost 1.54.0 REQUIRED #1.54.0 or greater
+ COMPONENTS
++ context
++ date_time
+ filesystem
++ program_options
++ regex
+ system
+ thread
+ )
PKG_NAME:=fbzmq
PKG_VERSION:=2019.06.10.00
-PKG_RELEASE:=3
+PKG_RELEASE:=4
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
PKG_SOURCE_URL:=https://codeload.github.com/facebook/fbzmq/tar.gz/v$(PKG_VERSION)?
PKG_LICENSE_FILES:=LICENSE
CMAKE_SOURCE_SUBDIR:=fbzmq
+PKG_BUILD_DEPENDS:=fbthrift
PKG_BUILD_PARALLEL:=1
CMAKE_INSTALL:=1
include $(INCLUDE_DIR)/cmake.mk
define Package/fbzmq
- SECTION:=libs
- CATEGORY:=Libraries
- TITLE:=Facebook ZeroMQ wrappers.
- DEPENDS:=+fbthrift +libzmq +libsigar
+ SECTION:=libs
+ CATEGORY:=Libraries
+ TITLE:=Facebook ZeroMQ wrappers.
+ URL:=https://github.com/facebook/fbzmq
+ DEPENDS:=+libfolly +libzmq +libsigar
endef
define Package/fbzmq/description
- Facebook ZeroMQ wrappers.
+ Facebook ZeroMQ wrappers.
endef
CMAKE_OPTIONS += \
-DCMAKE_POSITION_INDEPENDENT_CODE=ON \
-DBUILD_TESTS=OFF \
- -DBUILD_SHARED_LIBS=ON \
-DTHRIFT1="$(STAGING_DIR_HOSTPKG)/bin/thrift1" \
-DTHRIFT_COMPILER_INCLUDE="$(STAGING_DIR_HOSTPKG)/include/"
TARGET_CXXFLAGS += -faligned-new
-
-define Package/fbzmq/install
- $(INSTALL_DIR) $(1)/usr/lib
- $(CP) $(PKG_INSTALL_DIR)/usr/lib/libfbzmq.so* $(1)/usr/lib/
-endef
+TARGET_CXXFLAGS += -ffunction-sections -fdata-sections -flto
+TARGET_LDFLAGS += -Wl,--gc-sections,--as-needed
$(eval $(call BuildPackage,fbzmq))
PKG_NAME:=jsoncpp
PKG_VERSION:=1.8.4
-PKG_RELEASE:=1
+PKG_RELEASE:=2
+
+PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
+PKG_SOURCE_URL:=https://codeload.github.com/open-source-parsers/jsoncpp/tar.gz/$(PKG_VERSION)?
+PKG_HASH:=c49deac9e0933bcb7044f08516861a2d560988540b23de2ac1ad443b219afdb6
+
PKG_MAINTAINER:=
PKG_LICENSE:=MIT
PKG_LICENSE_FILES:=LICENSE
-PKG_SOURCE_URL:=https://codeload.github.com/open-source-parsers/jsoncpp/tar.gz/$(PKG_VERSION)?
-PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
-PKG_HASH:=c49deac9e0933bcb7044f08516861a2d560988540b23de2ac1ad443b219afdb6
+PKG_BUILD_PARALLEL:=1
+CMAKE_INSTALL:=1
include $(INCLUDE_DIR)/package.mk
include $(INCLUDE_DIR)/cmake.mk
$(CP) $(PKG_INSTALL_DIR)/usr/lib/libjsoncpp.so* $(1)/usr/lib/
endef
-define Build/InstallDev
- $(INSTALL_DIR) $(1)/usr/include
- $(CP) $(PKG_INSTALL_DIR)/usr/include/json $(1)/usr/include
- $(INSTALL_DIR) $(1)/usr/lib/
- $(CP) $(PKG_INSTALL_DIR)/usr/lib/libjsoncpp.so* $(1)/usr/lib/
- $(INSTALL_DIR) $(1)/usr/lib/pkgconfig
- $(CP) $(PKG_INSTALL_DIR)/usr/lib/pkgconfig/jsoncpp.pc $(1)/usr/lib/pkgconfig/
-endef
-
$(eval $(call BuildPackage,jsoncpp))
--- /dev/null
+From 63dc2f77e33e9ff559e051cd2964960b9d3e761e Mon Sep 17 00:00:00 2001
+From: Rosen Penev <rosenp@gmail.com>
+Date: Mon, 16 Sep 2019 18:38:24 -0700
+Subject: [PATCH] pkgconfig: Fix for cross compilation
+
+exec_ and prefix must be overridden in such a case.
+
+Makes the .pc file more consistent with other projects.
+---
+ pkg-config/jsoncpp.pc.in | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/pkg-config/jsoncpp.pc.in b/pkg-config/jsoncpp.pc.in
+index dea51f51..d4fa9ef2 100644
+--- a/pkg-config/jsoncpp.pc.in
++++ b/pkg-config/jsoncpp.pc.in
+@@ -1,5 +1,7 @@
+-libdir=@CMAKE_INSTALL_FULL_LIBDIR@
+-includedir=@CMAKE_INSTALL_FULL_INCLUDEDIR@
++prefix=@CMAKE_INSTALL_PREFIX@
++exec_prefix=@CMAKE_INSTALL_PREFIX@
++libdir=${exec_prefix}/@CMAKE_INSTALL_LIBDIR@
++includedir=${prefix}/@CMAKE_INSTALL_INCLUDEDIR@
+
+ Name: jsoncpp
+ Description: A C++ library for interacting with JSON
PKG_HASH:=ea2449ad3f201ec590d811db9da6d02ffc5e87a677d06b92ab15363d8cb59782
PKG_MAINTAINER:=Ted Hess <thess@kitschensync.net>
+PKG_CPE_ID:=cpe:/a:audiofile:audiofile
PKG_FIXUP:=autoreconf
PKG_INSTALL=1
include $(TOPDIR)/rules.mk
PKG_NAME:=libfizz
-PKG_VERSION:=2019.09.09.00
+PKG_VERSION:=2019.09.22.00
PKG_RELEASE:=1
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
-PKG_SOURCE_URL:=https://codeload.github.com/facebookincubator/fizz/tar.gz/v$(PKG_VERSION)?
-PKG_HASH:=c8e0317fb16b283784ef8607440d0077b4425d1a28d74ea6b083a385bcfb14f6
-PKG_BUILD_DIR:=$(BUILD_DIR)/fizz-$(PKG_VERSION)
+PKG_SOURCE_VERSION:=0ccff0813dea8d3f730d9c3a1b80e6936522d3f3
+PKG_SOURCE_URL:=https://codeload.github.com/facebookincubator/fizz/tar.gz/$(PKG_SOURCE_VERSION)?
+PKG_HASH:=32a8de32e7a83e53ad44931ac637f6e17ce7f81bd65bc835f14d59442041e7f6
+PKG_BUILD_DIR:=$(BUILD_DIR)/fizz-$(PKG_SOURCE_VERSION)
PKG_MAINTAINER:=Amol Bhave <ambhave@fb.com>
PKG_LICENSE:=BSD-3-Clause
PKG_LICENSE_FILES:=LICENSE
+PKG_CPE_ID:=cpe:/a:facebook:fizz
PKG_BUILD_PARALLEL:=1
CMAKE_SOURCE_SUBDIR:=fizz
endef
CMAKE_OPTIONS += \
- -DBoost_NO_BOOST_CMAKE=ON \
-DBUILD_EXAMPLES=OFF
TARGET_CXXFLAGS += -ffunction-sections -fdata-sections -flto
include $(TOPDIR)/rules.mk
PKG_NAME:=libfolly
-PKG_VERSION:=2019.09.09.00
-PKG_RELEASE:=2
+PKG_VERSION:=2019.09.22.00
+PKG_RELEASE:=1
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
-PKG_SOURCE_URL:=https://codeload.github.com/facebook/folly/tar.gz/v$(PKG_VERSION)?
-PKG_HASH:=1aac1bb5c2e22fea68780734069ee73188523c5ede8cd210e3fa81a1ae56bfe0
-PKG_BUILD_DIR:=$(BUILD_DIR)/folly-$(PKG_VERSION)
+PKG_SOURCE_VERSION:=af2373b9858c61685eb6eb2f1db99d9a31edd58e
+PKG_SOURCE_URL:=https://codeload.github.com/facebook/folly/tar.gz/$(PKG_SOURCE_VERSION)?
+PKG_HASH:=fbf25a96e5487fcd959ff50ff1c5413c8e18b8aaa9af3d2406156d750167affd
+PKG_BUILD_DIR:=$(BUILD_DIR)/folly-$(PKG_SOURCE_VERSION)
PKG_MAINTAINER:=Amol Bhave <ambhave@fb.com>
PKG_LICENSE:=Apache-2.0
-DFOLLY_HAVE_WCHAR_SUPPORT_EXITCODE__TRYRUN_OUTPUT=OFF \
-DHAVE_VSNPRINTF_ERRORS_EXITCODE=OFF \
-DHAVE_VSNPRINTF_ERRORS_EXITCODE__TRYRUN_OUTPUT=OFF \
- -DBUILD_SHARED_LIBS=ON \
- -DBoost_NO_BOOST_CMAKE=ON
+ -DBUILD_SHARED_LIBS=ON
TARGET_CXXFLAGS += -ffunction-sections -fdata-sections -flto
TARGET_LDFLAGS += -Wl,--gc-sections,--as-needed
The library is linked with your program in userspace, no kernel driver required.
endef
+CMAKE_OPTIONS += -DBoost_NO_BOOST_CMAKE=ON
+
define Build/InstallDev
$(call Build/InstallDev/cmake,$(1))
$(SED) 's,/usr/bin,/usr,g' $(1)/usr/lib/pkgconfig/libftdi.pc
--- /dev/null
+--- a/CMakeLists.txt
++++ b/CMakeLists.txt
+@@ -89,8 +89,6 @@
+ GV_LT(VERSION GDLIB_LIB_VERSION)
+ MESSAGE(STATUS "gd shared lib version ${GDLIB_LIB_SOVERSION} (${GDLIB_LIB_VERSION})")
+
+- SET(CMAKE_REQUIRED_INCLUDES "/usr/include" "/usr/local/include")
+-
+ include(CheckIncludeFiles)
+ include(CheckIncludeFile)
+
PKG_RELEASE:=1
PKG_LICENSE:=GPL-2.0-or-later LGPL-3.0-or-later
PKG_LICENSE_FILES:=COPYING COPYINGv2 COPYING.LESSERv3
+PKG_CPE_ID:=cpe:/a:libidn2_project:libidn2
PKG_SOURCE_URL:=@GNU/libidn
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
include $(TOPDIR)/rules.mk
PKG_NAME:=librsocket-cpp
-PKG_SOURCE_DATE:=2019-09-12
-PKG_SOURCE_VERSION:=9226988e3da89eff8cd55952ccae61fcac0881fa
+PKG_SOURCE_DATE:=2019-09-22
+PKG_SOURCE_VERSION:=b268907312af589828634c143b9f35b58cd6cfea
PKG_RELEASE:=1
PKG_SOURCE:=$(PKG_NAME)-$(PKG_SOURCE_DATE).tar.gz
PKG_SOURCE_URL:=https://codeload.github.com/rsocket/rsocket-cpp/tar.gz/$(PKG_SOURCE_VERSION)?
-PKG_HASH:=3273012c1ffa149fddfac2b547315834e9a02ff0d8d1357196fec95391150ed6
+PKG_HASH:=7ab4655d3d8148ce689f74a116ae7393d9e802f91d0dfd697ffd8daeff800e61
PKG_BUILD_DIR:=$(BUILD_DIR)/rsocket-cpp-$(PKG_SOURCE_VERSION)
PKG_MAINTAINER:=Amol Bhave <ambhave@fb.com>
SECTION:=libs
CATEGORY:=Libraries
TITLE:=C++ implementation of RSocket
+ URL:=https://rsocket.io/
DEPENDS:=+libyarpl
endef
SECTION:=libs
CATEGORY:=Libraries
TITLE:=yarpl: Yet Another Reactive Programming Library
+ URL:=https://rsocket.io/
DEPENDS:=+libfolly
endef
CMAKE_OPTIONS += \
-DBUILD_BENCHMARKS=OFF \
-DBUILD_EXAMPLES=OFF \
- -DBUILD_TESTS=OFF
+ -DBUILD_TESTS=OFF \
+ -DCMAKE_POSITION_INDEPENDENT_CODE=ON
$(eval $(call BuildPackage,librsocket-cpp))
$(eval $(call BuildPackage,libyarpl))
--- /dev/null
+--- a/CMakeLists.txt
++++ b/CMakeLists.txt
+@@ -194,6 +194,9 @@ find_package(Gflags REQUIRED)
+ # find glog::glog to satisfy the folly dep.
+ find_package(Glog REQUIRED)
+
++find_package(Boost REQUIRED COMPONENTS system thread filesystem regex context
++ date_time program_options)
++
+ include_directories(SYSTEM ${OPENSSL_INCLUDE_DIR})
+
+ include_directories(SYSTEM ${GFLAGS_INCLUDE_DIR})
PKG_SOURCE_URL:=https://github.com/seccomp/libseccomp/releases/download/v$(PKG_VERSION)/
PKG_HASH:=1ca3735249af66a1b2f762fe6e710fcc294ad7185f1cc961e5bd83f9988006e8
PKG_MAINTAINER:=Nikos Mavrogiannopoulos <nmav@gnutls.org>
+PKG_CPE_ID:=cpe:/a:libseccomp_project:libseccomp
PKG_BUILD_PARALLEL:=1
PKG_INSTALL:=1
PKG_NAME:=libsoxr
PKG_VERSION:=0.1.3
-PKG_RELEASE:=1
+PKG_RELEASE:=2
PKG_SOURCE_URL:=@SF/project/soxr/
PKG_SOURCE:=soxr-$(PKG_VERSION)-Source.tar.xz
PKG_MAINTAINER:=Ted Hess <thess@kitschensync.net>, \
Mike Brady <mikebrady@eircom.net>
-
PKG_LICENSE:=LGPL-2.1
PKG_LICENSE_FILES:=LICENCE
PKG_CPE_ID:=cpe:/a:sox:sox
+CMAKE_INSTALL:=1
PKG_BUILD_PARALLEL:=1
-PKG_INSTALL:=1
include $(INCLUDE_DIR)/package.mk
include $(INCLUDE_DIR)/cmake.mk
SECTION:=libs
CATEGORY:=Libraries
TITLE:=The SoX Resampler library
- URL:=http://sourceforge.net/projects/soxr/
+ URL:=https://sourceforge.net/projects/soxr/
DEPENDS:= +libpthread
endef
CMAKE_OPTIONS+= -DHAVE_WORDS_BIGENDIAN_EXITCODE=$(if $(CONFIG_BIG_ENDIAN),0,1)
CMAKE_OPTIONS+= -DWITH_OPENMP=0
-define Build/InstallDev
- $(INSTALL_DIR) $(1)/usr/include
- $(CP) $(PKG_INSTALL_DIR)/usr/include/soxr.h $(1)/usr/include/
- $(INSTALL_DIR) $(1)/usr/lib/pkgconfig
- $(CP) $(PKG_INSTALL_DIR)/usr/lib/libsoxr.so* $(1)/usr/lib/
- $(CP) $(PKG_INSTALL_DIR)/usr/lib/pkgconfig/soxr.pc $(1)/usr/lib/pkgconfig/
-endef
-
define Package/libsoxr/install
$(INSTALL_DIR) $(1)/usr/lib
$(CP) $(PKG_INSTALL_DIR)/usr/lib/libsoxr.so* $(1)/usr/lib/
--- /dev/null
+--- a/src/CMakeLists.txt
++++ b/src/CMakeLists.txt
+@@ -88,7 +88,7 @@ if (BUILD_FRAMEWORK)
+ set_target_properties (${PROJECT_NAME} PROPERTIES FRAMEWORK TRUE)
+ elseif (NOT WIN32)
+ set (TARGET_PCS ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}.pc)
+- configure_file (${CMAKE_CURRENT_SOURCE_DIR}/${PROJECT_NAME}.pc.in ${TARGET_PCS})
++ configure_file (${CMAKE_CURRENT_SOURCE_DIR}/${PROJECT_NAME}.pc.in ${TARGET_PCS} @ONLY)
+ install (FILES ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}.pc DESTINATION ${LIB_INSTALL_DIR}/pkgconfig)
+ endif ()
+
+@@ -112,7 +112,7 @@ if (WITH_LSR_BINDINGS)
+ set_target_properties (${LSR} PROPERTIES FRAMEWORK TRUE)
+ elseif (NOT WIN32)
+ set (TARGET_PCS "${TARGET_PCS} ${CMAKE_CURRENT_BINARY_DIR}/${LSR}.pc")
+- configure_file (${CMAKE_CURRENT_SOURCE_DIR}/${LSR}.pc.in ${CMAKE_CURRENT_BINARY_DIR}/${LSR}.pc)
++ configure_file (${CMAKE_CURRENT_SOURCE_DIR}/${LSR}.pc.in ${CMAKE_CURRENT_BINARY_DIR}/${LSR}.pc @ONLY)
+ install (FILES ${CMAKE_CURRENT_BINARY_DIR}/${LSR}.pc DESTINATION ${LIB_INSTALL_DIR}/pkgconfig)
+ endif ()
+ endif ()
+--- a/src/soxr-lsr.pc.in
++++ b/src/soxr-lsr.pc.in
+@@ -1,5 +1,10 @@
+-Name: ${LSR}
+-Description: ${DESCRIPTION_SUMMARY} (with libsamplerate-like bindings)
+-Version: ${PROJECT_VERSION}
+-Libs: -L${LIB_INSTALL_DIR} -l${LSR}
+-Cflags: -I${INCLUDE_INSTALL_DIR}
++prefix=@CMAKE_INSTALL_PREFIX@
++exec_prefix=@CMAKE_INSTALL_PREFIX@
++libdir=${exec_prefix}/lib
++includedir=${prefix}/include
++
++Name: @LSR@
++Description: @DESCRIPTION_SUMMARY@ (with libsamplerate-like bindings)
++Version: @PROJECT_VERSION@
++Libs: -L${libdir} -l@LSR@
++Cflags: -I${includedir}
+--- a/src/soxr.pc.in
++++ b/src/soxr.pc.in
+@@ -1,5 +1,10 @@
+-Name: ${PROJECT_NAME}
+-Description: ${DESCRIPTION_SUMMARY}
+-Version: ${PROJECT_VERSION}
+-Libs: -L${LIB_INSTALL_DIR} -l${PROJECT_NAME}
+-Cflags: -I${INCLUDE_INSTALL_DIR}
++prefix=@CMAKE_INSTALL_PREFIX@
++exec_prefix=@CMAKE_INSTALL_PREFIX@
++libdir=${exec_prefix}/lib
++includedir=${prefix}/include
++
++Name: @PROJECT_NAME@
++Description: @DESCRIPTION_SUMMARY@
++Version: @PROJECT_VERSION@
++Libs: -L${libdir} -l@PROJECT_NAME@
++Cflags: -I${includedir}
include $(TOPDIR)/rules.mk
PKG_NAME:=libudev-fbsd
-
-PKG_RELEASE:=1
-PKG_VERSION:=20171216
+PKG_SOURCE_DATE:=2017-12-16
PKG_SOURCE_VERSION:=fa190fdf0b22a41b5f42e3a722f754c08ad7b337
+PKG_RELEASE:=1
-PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION)-$(PKG_SOURCE_VERSION).tar.xz
-PKG_MIRROR_HASH:=d4638099fd288a293a165304541eb9c01e828bb358a0091caa02c1327c20964b
PKG_SOURCE_PROTO:=git
PKG_SOURCE_URL:=https://github.com/jiixyj/libudev-fbsd.git
+PKG_MIRROR_HASH:=dac2d960191fe970c974f022d008ef3b712ad331e2426a51debd5aa2e208f02b
+PKG_MAINTAINER:=Daniel Golle <daniel@makrotopia.org>
PKG_LICENSE:=MIT
PKG_LICENSE_FILES:=COPYING
-PKG_MAINTAINER:=Daniel Golle <daniel@makrotopia.org>
-PKG_INSTALL:=1
+CMAKE_INSTALL:=1
+PKG_BUILD_PARALLEL:=1
include $(INCLUDE_DIR)/package.mk
include $(INCLUDE_DIR)/cmake.mk
CONFLICTS:=libudev eudev udev
endef
-define Build/InstallDev
- $(INSTALL_DIR) $(1)/usr/include
- $(CP) $(PKG_INSTALL_DIR)/usr/include/* $(1)/usr/include/
- $(INSTALL_DIR) $(1)/usr/lib
- $(CP) $(PKG_INSTALL_DIR)/usr/lib/*.so* $(1)/usr/lib/
- $(INSTALL_DIR) $(1)/usr/lib/pkgconfig
- $(CP) $(PKG_INSTALL_DIR)/usr/lib/pkgconfig/* $(1)/usr/lib/pkgconfig/
-endef
-
define Package/libudev-fbsd/install
$(INSTALL_DIR) $(1)/usr/lib
$(CP) $(PKG_INSTALL_DIR)/usr/lib/*.so* $(1)/usr/lib/
CATEGORY:=Libraries
TITLE:=User-space Read-Copy-Update library
URL:=https://lttng.org/
- DEPENDS:=+libpthread @!arc
+ DEPENDS:=+libpthread @!TARGET_arc770 @!TARGET_archs38
endef
define Package/liburcu/description
include $(TOPDIR)/rules.mk
PKG_NAME:=libwangle
-PKG_VERSION:=2019.09.09.00
+PKG_VERSION:=2019.09.22.00
PKG_RELEASE:=1
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
-PKG_SOURCE_URL:=https://codeload.github.com/facebook/wangle/tar.gz/v$(PKG_VERSION)?
-PKG_HASH:=b790ba6b975b90d7935e4ae12f84c77d6be9f9a94308a3e0b974d83cf0e01d8b
-PKG_BUILD_DIR:=$(BUILD_DIR)/wangle-$(PKG_VERSION)
+PKG_SOURCE_VERSION:=fb79e25af6d4e184587b62475fb3444643ae8867
+PKG_SOURCE_URL:=https://codeload.github.com/facebook/wangle/tar.gz/$(PKG_SOURCE_VERSION)?
+PKG_HASH:=e973cad492d025c93af49c245b1f77b484df09a4d70ade069648d633d5a85e0e
+PKG_BUILD_DIR:=$(BUILD_DIR)/wangle-$(PKG_SOURCE_VERSION)
PKG_MAINTAINER:=Amol Bhave <ambhave@fb.com>
PKG_LICENSE:=Apache-2.0
URL:=https://github.com/facebook/wangle
endef
-CMAKE_OPTIONS += \
- -DBoost_NO_BOOST_CMAKE=ON
-
TARGET_CXXFLAGS += -ffunction-sections -fdata-sections -flto
TARGET_LDFLAGS += -Wl,--gc-sections,--as-needed
PKG_MAINTAINER:=
PKG_LICENSE:=LGPL-2.1 GPL-2.0
PKG_LICENSE_FILES:=COPYING
+PKG_CPE_ID:=cpe:/a:lttng:ust
PKG_FIXUP:=autoreconf
PKG_USE_MIPS16:=0
include $(TOPDIR)/rules.mk
PKG_NAME:=measurement-kit
-PKG_VERSION:=0.10.5
-PKG_RELEASE:=2
+PKG_VERSION:=0.10.6
+PKG_RELEASE:=1
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
PKG_SOURCE_URL:=https://codeload.github.com/measurement-kit/measurement-kit/tar.gz/v$(PKG_VERSION)?
-PKG_HASH:=8b83f04f4d3c653f93bcee5a6cc5e32e6595a3feb99526017d78099fd90d4a75
+PKG_HASH:=5ec94e522c3bc43cbf749659c18d4b13bcfbb2874db4d6b4e21b160d76dd5bd0
PKG_MAINTAINER:=Jan Pavlinec <jan.pavlinec@nic.cz>
PKG_LICENSE:=BSD-2-Clause
PKG_LICENSE:=BSL-1.0
PKG_LICENSE_FILES:=LICENSE
+PKG_CPE_ID:=cpe:/a:pocoproject:poco
PKG_BUILD_PARALLEL:=1
PKG_INSTALL:=1
PKG_NAME:=libprotobuf-c
PKG_VERSION:=1.3.2
-PKG_RELEASE:=1
+PKG_RELEASE:=2
PKG_SOURCE:=protobuf-c-$(PKG_VERSION).tar.gz
PKG_SOURCE_URL:=https://github.com/protobuf-c/protobuf-c/releases/download/v$(PKG_VERSION)
PKG_LICENSE:=BSD-2-Clause
PKG_LICENSE_FILES:=LICENSE
-PKG_BUILD_DEPENDS:=protobuf-c/host
HOST_BUILD_DEPENDS:=protobuf/host
+PKG_BUILD_DEPENDS:=protobuf
-PKG_INSTALL:=1
+CMAKE_INSTALL:=1
PKG_BUILD_PARALLEL:=1
+CMAKE_SOURCE_SUBDIR:=build-cmake
include $(INCLUDE_DIR)/package.mk
include $(INCLUDE_DIR)/host-build.mk
+include $(INCLUDE_DIR)/cmake.mk
define Package/libprotobuf-c
TITLE:=Protocol Buffers library
internal RPC protocols and file formats.
endef
-CONFIGURE_ARGS += \
- --enable-shared \
- --enable-static \
- --disable-protoc
+CMAKE_HOST_OPTIONS += \
+ -DBUILD_SHARED_LIBS=ON \
+ -DCMAKE_SKIP_RPATH=OFF \
+ -DCMAKE_INSTALL_RPATH="${STAGING_DIR_HOSTPKG}/lib"
-define Build/InstallDev
- $(INSTALL_DIR) $(1)/usr/include/
- $(CP) $(PKG_INSTALL_DIR)/usr/include/* $(1)/usr/include/
- $(INSTALL_DIR) $(1)/usr/lib
- $(CP) $(PKG_INSTALL_DIR)/usr/lib/libprotobuf-c.{a,la,so*} $(1)/usr/lib/
- $(INSTALL_DIR) $(1)/usr/lib/pkgconfig
- $(CP) $(PKG_INSTALL_DIR)/usr/lib/pkgconfig/* $(1)/usr/lib/pkgconfig/
-endef
+CMAKE_OPTIONS += \
+ -DBUILD_SHARED_LIBS=ON
define Package/libprotobuf-c/install
$(INSTALL_DIR) $(1)/usr/lib
- $(CP) $(PKG_INSTALL_DIR)/usr/lib/libprotobuf-c.so.* $(1)/usr/lib/
+ $(CP) $(PKG_INSTALL_DIR)/usr/lib/libprotobuf-c.so* $(1)/usr/lib/
endef
$(eval $(call BuildPackage,libprotobuf-c))
PKG_NAME:=protobuf
PKG_VERSION:=3.8.0
-PKG_RELEASE:=2
+PKG_RELEASE:=4
PKG_SOURCE:=$(PKG_NAME)-cpp-$(PKG_VERSION).tar.gz
PKG_SOURCE_URL:=https://github.com/google/protobuf/releases/download/v$(PKG_VERSION)
PKG_MAINTAINER:=Ken Keys <kkeys@caida.org>
PKG_LICENSE:=BSD-3-Clause
PKG_LICENSE_FILES:=LICENSE
+PKG_CPE_ID:=cpe:/a:google:protobuf
HOST_BUILD_PARALLEL:=1
PKG_BUILD_PARALLEL:=1
CMAKE_HOST_OPTIONS += \
-Dprotobuf_BUILD_PROTOC_BINARIES=ON \
- -Dprotobuf_BUILD_TESTS=OFF
+ -Dprotobuf_BUILD_TESTS=OFF \
+ -DBUILD_SHARED_LIBS=ON \
+ -DCMAKE_SKIP_RPATH=OFF \
+ -DCMAKE_INSTALL_RPATH="${STAGING_DIR_HOSTPKG}/lib"
CMAKE_OPTIONS += \
-Dprotobuf_BUILD_PROTOC_BINARIES=ON \
--- /dev/null
+--- a/cmake/install.cmake
++++ b/cmake/install.cmake
+@@ -16,8 +16,8 @@ foreach(_library ${_protobuf_libraries})
+ $<BUILD_INTERFACE:${protobuf_source_dir}/src>
+ $<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}>)
+ if (UNIX AND NOT APPLE)
+- set_property(TARGET ${_library}
+- PROPERTY INSTALL_RPATH "$ORIGIN")
++# set_property(TARGET ${_library}
++# PROPERTY INSTALL_RPATH "$ORIGIN")
+ elseif (APPLE)
+ set_property(TARGET ${_library}
+ PROPERTY INSTALL_RPATH "@loader_path")
+@@ -32,8 +32,8 @@ if (protobuf_BUILD_PROTOC_BINARIES)
+ install(TARGETS protoc EXPORT protobuf-targets
+ RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT protoc)
+ if (UNIX AND NOT APPLE)
+- set_property(TARGET protoc
+- PROPERTY INSTALL_RPATH "$ORIGIN/../lib")
++# set_property(TARGET protoc
++# PROPERTY INSTALL_RPATH "$ORIGIN/../lib")
+ elseif (APPLE)
+ set_property(TARGET protoc
+ PROPERTY INSTALL_RPATH "@loader_path/../lib")
PKG_MAINTAINER:=Jan Pavlinec <jan.pavlinec@nic.cz>
PKG_LICENSE:=BSD-3-Clause
PKG_LICENSE_FILES:=COPYING
+PKG_CPE_ID:=cpe:/a:pivotal_software:redis
+
PKG_INSTALL:=1
include $(INCLUDE_DIR)/package.mk
PKG_MAINTAINER:=
PKG_LICENSE:=MIT
PKG_LICENSE_FILES:=COPYING
+PKG_CPE_ID:=cpe:/a:pyyaml_project:pyyaml
PKG_INSTALL:=1
PKG_BUILD_PARALLEL:=1
include $(TOPDIR)/rules.mk
PKG_NAME:=dovecot-pigeonhole
-PKG_VERSION_PLUGIN:=0.5.5
+PKG_VERSION_PLUGIN:=0.5.7.2
PKG_VERSION_DOVECOT:=$(shell make --no-print-directory -C ../dovecot/ val.PKG_VERSION V=s)
PKG_VERSION:=$(PKG_VERSION_DOVECOT)-$(PKG_VERSION_PLUGIN)
PKG_RELEASE:=1
PKG_SOURCE:=dovecot-$(DOVECOT_VERSION)-pigeonhole-$(PKG_VERSION_PLUGIN).tar.gz
PKG_SOURCE_URL:=https://pigeonhole.dovecot.org/releases/$(DOVECOT_VERSION)
-PKG_HASH:=cbaa106e1c2b23824420efdd6a9f8572c64c8dccf75a3101a899b6ddb25149a5
+PKG_HASH:=d59d0c5c5225a126e5b98bf95d75e8dd368bdeeb3da2e9766dbe4fddaa9411b0
PKG_LICENSE:=LGPL-2.1
PKG_LICENSE_FILES:=COPYING COPYING.LGPL
PKG_CPE_ID:=cpe:/a:dovecot:pigeonhole
PKG_SOURCE_URL:=@GNOME/grilo-plugins/0.3/
PKG_HASH:=fc2f3bbc319136e53e1efb6659fa65b6af45db114b6621008f9abba64fad6820
-PKG_BUILD_DEPENDS:=glib2
PKG_BUILD_PARALLEL:=1
PKG_INSTALL:=1
PKG_FIXUP:=autoreconf
include $(INCLUDE_DIR)/package.mk
include $(INCLUDE_DIR)/cmake.mk
-PKG_BUILD_DEPENDS:=MJPG_STREAMER_V4L2:libv4l zmq protobuf-c
+PKG_BUILD_DEPENDS:=MJPG_STREAMER_V4L2:libv4l zmq protobuf-c/host
define Package/mjpg-streamer
SECTION:=multimedia
-# dns based ad/abuse domain blocking
+# DNS based ad/abuse domain blocking
## Description
-A lot of people already use adblocker plugins within their desktop browsers, but what if you are using your (smart) phone, tablet, watch or any other (wlan) gadget!? Getting rid of annoying ads, trackers and other abuse sites (like facebook) is simple: block them with your router. When the dns server on your router receives dns requests, you will sort out queries that ask for the resource records of ad servers and return a simple 'NXDOMAIN'. This is nothing but **N**on-e**X**istent Internet or Intranet domain name, if domain name is unable to resolved using the dns server, a condition called the 'NXDOMAIN' occurred.
+A lot of people already use adblocker plugins within their desktop browsers, but what if you are using your (smart) phone, tablet, watch or any other (wlan) gadget!? Getting rid of annoying ads, trackers and other abuse sites (like facebook) is simple: block them with your router. When the DNS server on your router receives DNS requests, you will sort out queries that ask for the resource records of ad servers and return a simple 'NXDOMAIN'. This is nothing but **N**on-e**X**istent Internet or Intranet domain name, if domain name is unable to resolved using the DNS server, a condition called the 'NXDOMAIN' occurred.
## Main Features
* support of the following domain blocklist sources (free for private usage, for commercial use please check their individual licenses):
* [bitcoin](https://github.com/hoshsadiq/adblock-nocoin-list)
* => infrequent updates, approx. 15 entries
* [blacklist]()
- * => static local blacklist, located by default in '/etc/adblock/adblock.blacklist'
+ * => static local blacklist, located by default in `/etc/adblock/adblock.blacklist`
* [disconnect](https://disconnect.me)
* => numerous updates on the same day, approx. 6.500 entries (enabled by default)
* [dshield](http://dshield.org)
* => weekly updates, approx. 2.500 entries (enabled by default)
* zero-conf like automatic installation & setup, usually no manual changes needed
* simple but yet powerful adblock engine: adblock does not use error prone external iptables rulesets, http pixel server instances and things like that
-* support four different dns backends: dnsmasq, unbound, named (bind) and kresd
-* support two different dns blocking variants: 'nxdomain' (default, supported by all backends), 'null' (supported only by dnsmasq)
-* support six different download utilities: uclient-fetch, wget, curl, aria2c, wget-nossl, busybox-wget
+* support four different DNS backends: `dnsmasq`, `unbound`, `named` (bind) and `kresd`
+* support two different DNS blocking variants: `nxdomain` (default, supported by all backends), `null` (supported only by `dnsmasq`)
+* support six different download utilities: `uclient-fetch`, `wget`, `curl`, `aria2c`, `wget-nossl`, `busybox-wget`
* fast downloads & list processing as they are handled in parallel running background jobs (see 'Download Queue')
-* provide 'http only' mode without installed ssl library for all non-SSL blocklist sources
+* provide `http only` mode without installed ssl library for all non-SSL blocklist sources
* support a wide range of router modes, even AP modes are supported
* full IPv4 and IPv6 support
-* provide top level domain compression ('tld compression'), this feature removes thousands of needless host entries from the blocklist and lowers the memory footprint for the dns backend
+* provide top level domain compression (`tld compression`), this feature removes thousands of needless host entries from the blocklist and lowers the memory footprint for the DNS backend
* provide a 'DNS File Reset', where the final DNS blockfile will be purged after DNS backend loading to save storage space
* blocklist source parsing by fast & flexible regex rulesets
-* overall duplicate removal in central blocklist 'adb_list.overall'
-* additional blacklist for manual overrides, located by default in /etc/adblock/adblock.blacklist or in LuCI
-* additional whitelist for manual overrides, located by default in /etc/adblock/adblock.whitelist or in LuCI
-* quality checks during blocklist update to ensure a reliable dns backend service
+* overall duplicate removal in central blocklist `adb_list.overall`
+* additional blacklist for manual overrides, located by default in `/etc/adblock/adblock.blacklist` or in LuCI
+* additional whitelist for manual overrides, located by default in `/etc/adblock/adblock.whitelist` or in LuCI
+* quality checks during blocklist update to ensure a reliable DNS backend service
* minimal status & error logging to syslog, enable debug logging to receive more output
-* procd based init system support (start/stop/restart/reload/suspend/resume/query/status)
+* procd based init system support (`start/stop/restart/reload/suspend/resume/query/status`)
* procd network interface trigger support or classic time based startup
-* keep the dns cache intact after adblock processing (currently supported by unbound, named and kresd)
+* keep the DNS cache intact after adblock processing (currently supported by unbound, named and kresd)
* suspend & resume adblock actions temporarily without blocklist reloading
-* provide comprehensive runtime information via LuCI or via 'status' init command
-* provide a detailed DNS Query Report with dns related information about client requests, top (blocked) domains and more
+* provide comprehensive runtime information via LuCI or via `status` init command
+* provide a detailed DNS Query Report with DNS related information about client requests, top (blocked) domains and more
* provide a query function to quickly identify blocked (sub-)domains, e.g. for whitelisting. This function is also able to search in adblock backups and black-/whitelist, to get back the set of blocking lists sources for a certain domain
-* option to force dns requests to the local resolver
+* option to force DNS requests to the local resolver
* automatic blocklist backup & restore, these backups will be used in case of download errors and during startup
* send notification E-Mails in case of a processing error or if the overall domain count is ≤ 0
* add new adblock sources on your own, see example below
## Prerequisites
* [OpenWrt](https://openwrt.org), tested with the stable release series (19.07) and with the latest snapshot
-* a usual setup with an enabled dns backend at minimum - dump AP modes without a working dns backend are _not_ supported
+* a usual setup with an enabled DNS backend at minimum - dump AP modes without a working DNS backend are _not_ supported
* a download utility:
- * to support all blocklist sources a full version (with ssl support) of 'wget', 'uclient-fetch' with one of the 'libustream-*' ssl libraries, 'aria2c' or 'curl' is required
- * for limited devices with real memory constraints, adblock provides also a 'http only' option and supports wget-nossl and uclient-fetch (without libustream-ssl) as well
+ * to support all blocklist sources a full version (with ssl support) of `wget`, `uclient-fetch` with one of the `libustream-*` ssl libraries, `aria2c` or `curl` is required
+ * for limited devices with real memory constraints, adblock provides also a `http only` option and supports `wget-nossl` and `uclient-fetch` (without `libustream-ssl`) as well
* for more configuration options see examples below
-* E-Mail notification (optional): for E-Mail notification support you need the additional 'msmtp' package
-* DNS Query Report (optional): for this detailed report you need the additional package 'tcpdump' or 'tcpdump-mini'
+* E-Mail notification (optional): for E-Mail notification support you need the additional `msmtp` package
+* DNS Query Report (optional): for this detailed report you need the additional package `tcpdump` or `tcpdump-mini`
## Installation & Usage
-* install 'adblock' (_opkg install adblock_)
-* at minimum configure the appropriate dns backend ('dnsmasq' by default), the download utility and enable the adblock service in _/etc/config/adblock_
-* control the adblock service manually with _/etc/init.d/adblock_ start/stop/restart/reload/suspend/resume/status or use the LuCI frontend
+* install `adblock` (`opkg install adblock`)
+* at minimum configure the appropriate DNS backend (`dnsmasq` by default), the download utility and enable the adblock service in `/etc/config/adblock`
+* control the adblock service manually with `/etc/init.d/adblock` `start/stop/restart/reload/suspend/resume/status` or use the LuCI frontend
## LuCI adblock companion package
* it's strongly recommended to use the LuCI frontend to easily configure all powerful aspects of adblock
-* install 'luci-app-adblock' (_opkg install luci-app-adblock_)
-* the application is located in LuCI under the 'Services' menu
+* install `luci-app-adblock` (`opkg install luci-app-adblock`)
+* the application is located in LuCI under the `Services` menu
## Tweaks
-* **runtime information:** the adblock status is available via _/etc/init.d/adblock status_ (see example below)
-* **debug logging:** for script debugging please set the config option 'adb\_debug' to '1' and check the runtime output with _logread -e "adblock"_
+* **runtime information:** the adblock status is available via `/etc/init.d/adblock status` (see example below)
+* **debug logging:** for script debugging please set the config option `adb\_debug` to `1` and check the runtime output with `logread -e "adblock"`
* **storage expansion:** to process and store all blocklist sources at once it might be helpful to enlarge your temp directory with a swap partition => see [OpenWrt Wiki](https://openwrt.org/docs/guide-user/storage/fstab) for further details
-* **coreutils sort:** To speedup adblock processing in particular with many enabled blocklist sources it's recommended to install the additional package 'coreutils-sort'
-* **add white- / blacklist entries:** add domain black- or whitelist entries to always-deny or -allow certain (sub) domains, by default both lists are empty and located in _/etc/adblock_. Please add one domain per line - ip addresses, wildcards & regex are _not_ allowed (see example below). You need to refresh your blocklists after changes to these static lists.
-* **download queue size:** for further download & list processing performance improvements you can raise the 'adb\_maxqueue' value, e.g. '8' or '16' should be safe
+* **coreutils sort:** To speedup adblock processing in particular with many enabled blocklist sources it's recommended to install the additional package `coreutils-sort`
+* **add white- / blacklist entries:** add domain black- or whitelist entries to always-deny or -allow certain (sub) domains, by default both lists are empty and located in `/etc/adblock`. Please add one domain per line - ip addresses, wildcards & regex are _not_ allowed (see example below). You need to refresh your blocklists after changes to these static lists.
+* **download queue size:** for further download & list processing performance improvements you can raise the `adb\_maxqueue` value, e.g. `8` or `16` should be safe
* **scheduled list updates:** for a scheduled call of the adblock service add an appropriate crontab entry (see example below)
-* **change startup behaviour:** by default the startup will be triggered by the 'wan' procd interface trigger. Choose 'none' to disable automatic startups, 'timed' to use a classic timeout (default 30 sec.) or select another trigger interface
-* **suspend & resume adblocking:** to quickly switch the adblock service 'on' or 'off', simply use _/etc/init.d/adblock [suspend|resume]_
+* **change startup behaviour:** by default the startup will be triggered by the `wan` procd interface trigger. Choose `none` to disable automatic startups, `timed` to use a classic timeout (default 30 sec.) or select another trigger interface
+* **suspend & resume adblocking:** to quickly switch the adblock service `on` or `off`, simply use `/etc/init.d/adblock [suspend|resume]`
* **domain query:** to query the active blocklist for a certain domain, please use the LuCI frontend or run _/etc/init.d/adblock query `<DOMAIN>`_ (see example below)
* **add new list sources:** you could add new blocklist sources on your own via uci config, all you need is a source url and an awk one-liner (see example below)
## Further adblock config options
* usually the pre-configured adblock setup works quite well and no manual overrides are needed
-* the following options apply to the 'global' config section:
- * adb\_enabled => main switch to enable/disable adblock service (default: '0', disabled)
- * adb\_dns => select the dns backend for your environment: 'dnsmasq', 'unbound', 'named' or 'kresd' (default: 'dnsmasq')
- * adb\_dnsvariant => select the blocking variant: 'nxdomain' (default, supported by all backends), 'null (IPv4)' and 'null (IPv4/IPv6)' both options are only supported by dnsmasq
- * adb\_fetchutil => name of the used download utility: 'uclient-fetch', 'wget', 'curl', 'aria2c', 'wget-nossl'. 'busybox' (default: 'uclient-fetch')
- * adb\_fetchparm => special config options for the download utility (default: not set)
- * adb\_trigger => set the startup trigger to a certain interface, to 'timed' or to 'none' (default: 'wan')
-* the following options apply to the 'extra' config section:
- * adb\_debug => enable/disable adblock debug output (default: '0', disabled)
- * adb\_nice => set the nice level of the adblock process and all sub-processes (int/default: '0', standard priority)
- * adb\_forcedns => force dns requests to local resolver (bool/default: '0', disabled)
- * adb\_maxqueue => size of the download queue to handle downloads & list processing in parallel (int/default: '8')
- * adb\_dnsfilereset => the final DNS blockfile will be purged after DNS backend loading to save storage space (bool/default: 'false', disabled)
- * adb\_report => enable the background tcpdump gathering process to provide a detailed DNS Query Report (bool/default: '0', disabled)
- * adb\_repdir => target directory for dns related report files generated by tcpdump (default: '/tmp')
- * adb\_backupdir => target directory for adblock backups (default: '/tmp')
- * adb\_mail => send notification E-Mails in case of a processing errors or if the overall domain count is ≤ 0 (bool/default: '0', disabled)
- * adb\_mreceiver => receiver address for adblock notification E-Mails (default: not set)
-* the following options could be added via "Additional Field" in LuCI and apply to the 'extra' config section as well:
- * adb\_dnsdir => target directory for the generated blocklist 'adb_list.overall' (default: not set, use dns backend default)
- * adb\_blacklist => full path to the static blacklist file (default: '/etc/adblock/adblock.blacklist')
- * adb\_whitelist => full path to the static whitelist file (default: '/etc/adblock/adblock.whitelist')
- * adb\_triggerdelay => additional trigger delay in seconds before adblock processing begins (int/default: '2')
- * adb\_maxtld => disable the tld compression, if the number of blocked domains is greater than this value (int/default: '100000')
- * adb\_portlist => space separated list of fw ports which should be redirected locally (default: '53 853 5353')
- * adb\_dnsinotify => disable adblock triggered restarts and the 'DNS File Reset' for dns backends with autoload features (bool/default: 'false', disabled)
- * adb\_dnsflush => flush DNS cache after adblock processing, i.e. enable the old restart behavior (bool/default: '0', disabled)
- * adb\_repiface => reporting interface used by tcpdump, set to 'any' for multiple interfaces (default: 'br-lan')
- * adb\_replisten => space separated list of reporting port(s) used by tcpdump (default: '53')
- * adb\_repchunkcnt => report chunk count used by tcpdump (default: '5')
- * adb\_repchunksize => report chunk size used by tcpdump in MB (int/default: '1')
- * adb\_msender => sender address for adblock notification E-Mails (default: 'no-reply@adblock')
- * adb\_mtopic => topic for adblock notification E-Mails (default: 'adblock notification')
- * adb\_mprofile => mail profile used in 'msmtp' for adblock notification E-Mails (default: 'adb_notify')
- * adb\_mcnt => raise the minimum domain count E-Mmail notification trigger (int/default: '0')
+* the following options apply to the `global` config section:
+ * `adb\_enabled` => main switch to enable/disable adblock service (default: `0`, disabled)
+ * `adb\_dns` => select the DNS backend for your environment: `dnsmasq`, `unbound`, `named` or `kresd` (default: `dnsmasq`)
+ * `adb\_dnsvariant` => select the blocking variant: `nxdomain` (default, supported by all backends), `null (IPv4)` and `null (IPv4/IPv6)` both options are only supported by `dnsmasq`
+ * `adb\_fetchutil` => name of the used download utility: `uclient-fetch`, `wget`, `curl`, `aria2c`, `wget-nossl` or `busybox` (default: `uclient-fetch`)
+ * `adb\_fetchparm` => special config options for the download utility (default: not set)
+ * `adb\_trigger` => set the startup trigger to a certain interface, to `timed` or to `none` (default: `wan`)
+* the following options apply to the `extra` config section:
+ * `adb\_debug` => enable/disable adblock debug output (default: `0`, disabled)
+ * `adb\_nice` => set the nice level of the adblock process and all sub-processes (int/default: `0`, standard priority)
+ * `adb\_forcedns` => force DNS requests to local resolver (bool/default: `0`, disabled)
+ * `adb\_maxqueue` => size of the download queue to handle downloads & list processing in parallel (int/default: `8`)
+ * `adb\_dnsfilereset` => the final DNS blockfile will be purged after DNS backend loading to save storage space (bool/default: `false`, disabled)
+ * `adb\_report` => enable the background tcpdump gathering process to provide a detailed DNS Query Report (bool/default: `0`, disabled)
+ * `adb\_repdir` => target directory for DNS related report files generated by tcpdump (default: `/tmp`)
+ * `adb\_backupdir` => target directory for adblock backups (default: `/tmp`)
+ * `adb\_mail` => send notification E-Mails in case of a processing errors or if the overall domain count is ≤ 0 (bool/default: `0`, disabled)
+ * `adb\_mreceiver` => receiver address for adblock notification E-Mails (default: not set)
+* the following options could be added via "Additional Field" in LuCI and apply to the `extra` config section as well:
+ * `adb\_dnsdir` => target directory for the generated blocklist `adb_list.overall` (default: not set, use DNS backend default)
+ * `adb\_blacklist` => full path to the static blacklist file (default: `/etc/adblock/adblock.blacklist`)
+ * `adb\_whitelist` => full path to the static whitelist file (default: `/etc/adblock/adblock.whitelist`)
+ * `adb\_triggerdelay` => additional trigger delay in seconds before adblock processing begins (int/default: `2`)
+ * `adb\_maxtld` => disable the tld compression, if the number of blocked domains is greater than this value (int/default: `100000`)
+ * `adb\_portlist` => space separated list of fw ports which should be redirected locally (default: `53 853 5353`)
+ * `adb\_dnsinotify` => disable adblock triggered restarts and the 'DNS File Reset' for DNS backends with autoload features (bool/default: `false`, disabled)
+ * `adb\_dnsflush` => flush DNS cache after adblock processing, i.e. enable the old restart behavior (bool/default: `0`, disabled)
+ * `adb\_repiface` => reporting interface used by tcpdump, set to `any` for multiple interfaces (default: `br-lan`)
+ * `adb\_replisten` => space separated list of reporting port(s) used by tcpdump (default: `53`)
+ * `adb\_repchunkcnt` => report chunk count used by tcpdump (default: `5`)
+ * `adb\_repchunksize` => report chunk size used by tcpdump in MB (int/default: `1`)
+ * `adb\_msender` => sender address for adblock notification E-Mails (default: `no-reply@adblock`)
+ * `adb\_mtopic` => topic for adblock notification E-Mails (default: `adblock notification`)
+ * `adb\_mprofile` => mail profile used in `msmtp` for adblock notification E-Mails (default: `adb_notify`)
+ * `adb\_mcnt` => raise the minimum domain count E-Mmail notification trigger (int/default: `0`)
## Examples
-**change default dns backend to 'unbound':**
+**change default DNS backend to `unbound`:**
-Adblock deposits the final blocklist 'adb_list.overall' in '/var/lib/unbound' where unbound can find them in its jail, no further configuration needed.
-To preserve the DNS cache after adblock processing you need to install 'unbound-control'.
-
-**change default dns backend to 'named' (bind):**
+Adblock deposits the final blocklist `adb_list.overall` in `/var/lib/unbound` where unbound can find them in its jail, no further configuration needed.
+To preserve the DNS cache after adblock processing you need to install `unbound-control`.
-Adblock deposits the final blocklist 'adb_list.overall' in '/var/lib/bind'.
-To preserve the DNS cache after adblock processing you need to install & configure 'bind-rdnc'.
-To use the blocklist please modify '/etc/bind/named.conf':
-<pre><code>
-in the 'options' namespace add:
- response-policy { zone "rpz"; };
+**change default DNS backend to `named` (bind):**
-and at the end of the file add:
+Adblock deposits the final blocklist `adb_list.overall` in `/var/lib/bind`.
+To preserve the DNS cache after adblock processing you need to install & configure `bind-rdnc`.
+To use the blocklist please modify `/etc/bind/named.conf`:
+* in the `options` namespace add:
+```
+ response-policy { zone "rpz"; };
+```
+* and at the end of the file add:
+```
zone "rpz" {
type master;
file "/var/lib/bind/adb_list.overall";
allow-query { none; };
allow-transfer { none; };
};
-</code></pre>
-
-**change default dns backend to 'kresd':**
+```
+
+**change default DNS backend to `kresd`:**
The knot-resolver (kresd) is only available on Turris Omnia devices.
-Adblock deposits the final blocklist 'adb_list.overall' in '/etc/kresd', no further configuration needed.
+Adblock deposits the final blocklist `adb_list.overall` in `/etc/kresd`, no further configuration needed.
**enable E-Mail notification via msmtp:**
-To use the email notification you have to install & configure the package 'msmtp'.
-Modify the file '/etc/msmtprc':
-<pre><code>
+To use the email notification you have to install & configure the package `msmtp`.
+Modify the file `/etc/msmtprc`:
+```
[...]
defaults
auth on
from dev.adblock@gmail.com
user dev.adblock
password xxx
-</code></pre>
+```
Finally enable E-Mail support and add a valid E-Mail address in LuCI.
**receive adblock runtime information:**
-<pre><code>
+```
/etc/init.d/adblock status
::: adblock runtime information
+ adblock_status : enabled
+ backup_dir : /mnt/data/adblock
+ last_rundate : 15.08.2019 08:43:16
+ system_release : GL.iNet GL-AR750S, OpenWrt SNAPSHOT r10720-ccb4b96b8a
-</code></pre>
+```
**receive adblock DNS Query Report information:**
-<pre><code>
+```
/etc/init.d/adblock report
:::
::: Adblock DNS-Query Report
+ 2 ::: settings-win.data.microsoft.com
+ 2 ::: nexusrules.officeapps.live.com
[...]
-</code></pre>
+```
-**cronjob for a regular block list update (/etc/crontabs/root):**
+**cronjob for a regular block list update (`/etc/crontabs/root`):**
-<pre><code>
+```
0 06 * * * /etc/init.d/adblock reload
-</code></pre>
+```
-**blacklist entry (/etc/adblock/adblock.blacklist):**
+**blacklist entry (`/etc/adblock/adblock.blacklist`):**
-<pre><code>
+```
ads.example.com
This entry blocks the following (sub)domains:
This entry does not block:
http://ads.example.com.ua/foo.gif
http://example.com/
-</code></pre>
+```
-**whitelist entry (/etc/adblock/adblock.whitelist):**
+**whitelist entry (`/etc/adblock/adblock.whitelist`):**
-<pre><code>
+```
here.com
This entry removes the following (sub)domains from the blocklist:
This entry does not remove:
where.com
www.adwhere.com
-</code></pre>
+```
**query the active blocklist, the backups and black-/whitelist for a certain (sub-)domain, e.g. for whitelisting:**
The query function checks against the submitted (sub-)domain and recurses automatically to the upper top level domain. For every (sub-)domain it returns the first ten relevant results.
-<pre><code>
+```
/etc/init.d/adblock query google.com
:::
::: results for domain 'google.com' in active blocklist
+ adb_list.yoyo.gz analytics.google.com
+ adb_list.yoyo.gz pagead.l.google.com
+ adb_list.yoyo.gz [...]
-</code></pre>
-
+```
+
**add a new blocklist source:**
1. the easy way ...
example: https://easylist-downloads.adblockplus.org/rolist+easylist.txt
Adblock already supports an easylist source, called 'reg_ru'. To add the additional local easylist as a new source, copy the existing config source section and change only
the source name, the url and the description - that's all!
-<pre><code>
+```
config source 'reg_ro'
option enabled '0'
option adb_src 'https://easylist-downloads.adblockplus.org/rolist+easylist.txt'
option adb_src_rset 'BEGIN{FS=\"[|^]\"}/^\|\|([^([:space:]|#|\*|\/).]+\.)+[[:alpha:]]+\^("\\\$third-party")?$/{print tolower(\$3)}'
option adb_src_desc 'focus on romanian ads plus generic easylist additions, weekly updates, approx. 9.400 entries'
-</code></pre>
+```
2. a bit harder ...
-To add a really new source with different domain/host format you have to write a suitable awk one-liner on your own, so basic awk skills are needed. As a starting point check the already existing awk rulesets 'adb_src_rset' in the config file, probably you need only small changes for your individual list. Download the desired list and test your new awk string locally. The output result should be a sequential list with one domain/host per line - nothing more. If your awk one-liner works quite well, add a new source section to the adblock config file and test the new source.
+To add a really new source with different domain/host format you have to write a suitable awk one-liner on your own, so basic awk skills are needed. As a starting point check the already existing awk rulesets `adb_src_rset` in the config file, probably you need only small changes for your individual list. Download the desired list and test your new awk string locally. The output result should be a sequential list with one domain/host per line - nothing more. If your awk one-liner works quite well, add a new source section to the adblock config file and test the new source.
## Support
Please join the adblock discussion in this [forum thread](https://forum.openwrt.org/t/adblock-support-thread/507) or contact me by mail <dev@brenken.org>
PKG_NAME:=apcupsd
PKG_VERSION:=3.14.14
-PKG_RELEASE:=2
+PKG_RELEASE:=3
PKG_MAINTAINER:=Othmar Truniger <github@truniger.ch>
PKG_LICENSE:=GPL-2.0
URL:=http://www.apcupsd.org/
endef
-CONFIGURE_VARS += SHUTDOWN=/sbin/halt
+CONFIGURE_VARS += SHUTDOWN=/sbin/halt SCRIPTSHELL=/bin/sh WALL=/bin/true APCUPSD_MAIL=/bin/true
define Build/Configure
$(CP) $(SCRIPT_DIR)/config.* $(PKG_BUILD_DIR)/autoconf/
include $(TOPDIR)/rules.mk
PKG_NAME:=bind
-PKG_VERSION:=9.14.4
+PKG_VERSION:=9.14.6
PKG_RELEASE:=1
USERID:=bind=57:bind=57
PKG_MAINTAINER:=Noah Meyerhans <frodo@morgul.net>
-PKG_LICENSE := MPL-2.0
+PKG_LICENSE:=MPL-2.0
+PKG_LICENSE_FILES:=LICENSE
PKG_CPE_ID:=cpe:/a:isc:bind
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
PKG_SOURCE_URL:= \
https://www.mirrorservice.org/sites/ftp.isc.org/isc/bind9/$(PKG_VERSION) \
https://ftp.isc.org/isc/bind9/$(PKG_VERSION)
-PKG_HASH:=312efb82a6889074f31ef2849af498b3ec97ca69acd5c4e5e4b4045a8fe6b83f
+PKG_HASH:=8967a040ed900e1800293b9874357fc2f267f33c723aa617268e163bd921edfe
PKG_FIXUP:=autoreconf
PKG_REMOVE_FILES:=aclocal.m4 libtool.m4
PKG_NAME:=coova-chilli
PKG_VERSION:=1.4
-PKG_RELEASE:=10
+PKG_RELEASE:=11
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
PKG_SOURCE_URL:=https://codeload.github.com/coova/coova-chilli/tar.gz/$(PKG_VERSION)?
#option fg
# Include this flag to include debug information.
- #option debug 9
+ #option debug 1
# Re-read configuration file at this interval. Will also cause new domain
# name lookups to be performed. Value is given in seconds.
}
config_cb() {
- local chilli_inst="$2"
- if [ "$chilli_inst" != "" ]; then
- chilli_conf="/var/run/chilli_${chilli_inst}.conf"
- if [ -e "$chilli_conf" ]; then
- rm -f "$chilli_conf"
- fi
- eval "start_chilli_$chilli_inst=1"
- fi
+ chilli_conf="/var/run/chilli_${2}.conf"
+ [ -e "$chilli_conf" ] && rm -f "$chilli_conf"
}
option_cb() {
case "$1" in
+ # ignored/internal settings
+ disabled)
+ ;;
# UCI settings
network)
. /lib/functions/network.sh
network_get_device ifname "$2"
echo "dhcpif=\"$ifname\"" >> "$chilli_conf"
;;
- disabled)
- [ "$(config_get_bool "$1")" = "1" ] && eval "start_chilli_$chilli_inst=0"
- ;;
# boolean settings
- debug|dhcpbroadcast|nodynip|vlanlocation|locationstopstart|locationcopycalled|locationimmediateupdate|locationopt82|coanoipcheck|noradallow|proxymacaccept|proxyonacct|dhcpmacset|dhcpradius|noc2c|eapolenable|uamanydns|uamanyip|uamnatanyip|nouamsuccess|nowispr1|nowispr2|domaindnslocal|radsec|macauth|macreauth|macauthdeny|macallowlocal|strictmacauth|strictdhcp|ieee8021q|only8021q|radiusoriginalurl|swapoctets|statusfilesave|wpaguests|openidauth|papalwaysok|mschapv2|chillixml|acctupdate|dnsparanoia|seskeepalive|usetap|noarpentries|framedservice|scalewin|redir|injectwispr|redirurl|routeonetone|nousergardendata|uamgardendata|uamotherdata|withunixipc|uamallowpost|redirssl|uamuissl|layer3|patricia|redirdnsreq|dhcpnotidle|ipv6|ipv6only)
+ acctupdate|chillixml|coanoipcheck|debug|dhcpbroadcast|dhcpmacset|dhcpnotidle|\
+ dhcpradius|dnsparanoia|domaindnslocal|eapolenable|fg|forgiving|framedservice|\
+ ieee8021q|injectwispr|ipv6|ipv6only|layer3|locationcopycalled|\
+ locationimmediateupdate|locationopt82|locationstopstart|macallowlocal|\
+ macauth|macauthdeny|macreauth|mmapring|mschapv2|noarpentries|noc2c|nochallenge|\
+ nodynip|noradallow|nosystemdns|nouamsuccess|nousergardendata|nowispr1|nowispr2|\
+ only8021q|openidauth|papalwaysok|patricia|postauthproxyssl|proxymacaccept|\
+ proxyonacct|radiusoriginalurl|radsec|redir|redirdnsreq|redirssl|redirurl|reload|\
+ routeonetone|scalewin|seskeepalive|statusfilesave|strictdhcp|strictmacauth|\
+ swapoctets|uamallowpost|uamanydns|uamanyip|uamauthedallowed|uamgardendata|\
+ uamnatanyip|uamotherdata|uamuissl|usetap|vlanlocation|wpaguests)
[ "$2" = "true" -o "$2" = "1" ] && echo "$1" >> "$chilli_conf"
;;
*)
start_chilli() {
local cfg="$1"
- local start_chilli=$(eval "echo \$start_chilli_$cfg")
- [ "$start_chilli" = "0" ] && return
local base="/var/run/chilli_${cfg}"
+ config_get_bool disabled "$1" 'disabled' 1
+ [ $disabled = 1 ] && return
+
procd_open_instance "$cfg"
procd_set_param command /usr/sbin/chilli
- procd_set_param file "${base}.conf"
- procd_append_param command --fg --conf "${base}.conf" --pidfile "${base}.pid" --cmdsocket "${base}.sock" --unixipc "${base}.ipc"
+ procd_set_param file "$chilli_conf"
+ procd_append_param command \
+ --fg \
+ --conf "${base}.conf" \
+ --pidfile "${base}.pid" \
+ --cmdsocket "${base}.sock" \
+ --unixipc "${base}.ipc"
procd_set_param respawn
procd_set_param stdout 1
procd_set_param stderr 1
include $(TOPDIR)/rules.mk
PKG_NAME:=frp
-PKG_VERSION:=0.28.0
+PKG_VERSION:=0.29.0
PKG_RELEASE:=1
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
PKG_SOURCE_URL:=https://codeload.github.com/fatedier/frp/tar.gz/v${PKG_VERSION}?
-PKG_HASH:=61afbd0e84fc1ab92eacce5a642e2590d1b8c1a972a78f6499165c1778aa62cf
+PKG_HASH:=5d7980b81cfd055e3e5bb7a120098f94342656f647cb906ea075912f63568816
PKG_MAINTAINER:=Richard Yu <yurichard3839@gmail.com>
PKG_LICENSE:=Apache-2.0
--- /dev/null
+#
+# Copyright (C) 2019 Lucian Cristian <lucian.cristian@gmail.com>
+#
+# This is free software, licensed under the GNU General Public License v2.
+# See /LICENSE for more information.
+#
+
+include $(TOPDIR)/rules.mk
+PKG_NAME:=frr
+PKG_VERSION:=7.1
+PKG_RELEASE:=1
+
+PKG_SOURCE_URL:=https://github.com/FRRouting/frr/releases/download/$(PKG_NAME)-$(PKG_VERSION)/
+PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.xz
+PKG_HASH:=2a4210565e7b41d366f7b9f5f745917d67a0b159f3b6bd49d75f9e730557db2f
+PKG_BUILD_DIR:=$(BUILD_DIR)/$(PKG_NAME)-$(PKG_VERSION)
+PKG_MAINTAINER:=Lucian Cristian <lucian.cristian@gmail.com>
+
+PKG_CONFIG_DEPENDS:= \
+ CONFIG_IPV6 \
+ CONFIG_PACKAGE_frr-babeld \
+ CONFIG_PACKAGE_frr-bfdd \
+ CONFIG_PACKAGE_frr-bgpd \
+ CONFIG_PACKAGE_frr-eigrpd \
+ CONFIG_PACKAGE_frr-fabricd \
+ CONFIG_PACKAGE_frr-isisd \
+ CONFIG_PACKAGE_frr-ldpd \
+ CONFIG_PACKAGE_frr-libfrr \
+ CONFIG_PACKAGE_frr-nhrpd \
+ CONFIG_PACKAGE_frr-ospfd \
+ CONFIG_PACKAGE_frr-ospf6d \
+ CONFIG_PACKAGE_frr-pbrd \
+ CONFIG_PACKAGE_frr-pimd \
+ CONFIG_PACKAGE_frr-ripd \
+ CONFIG_PACKAGE_frr-ripngd \
+ CONFIG_PACKAGE_frr-staticd \
+ CONFIG_PACKAGE_frr-vtysh \
+ CONFIG_PACKAGE_frr-watchfrr \
+ CONFIG_PACKAGE_frr-zebra
+
+PKG_BUILD_PARALLEL:=1
+PKG_BUILD_DEPENDS:=python3/host
+PKG_FIXUP:=autoreconf
+PKG_LICENSE:=GPL-2.0
+
+include $(INCLUDE_DIR)/package.mk
+include $(INCLUDE_DIR)/host-build.mk
+
+define Package/frr/Default
+ SECTION:=net
+ CATEGORY:=Network
+ SUBMENU:=Routing and Redirection
+ DEPENDS:=frr
+ TITLE:=The FRRouting (FRR) Software Routing Suite
+ URL:=https://www.frrouting.org/
+endef
+
+define Package/frr
+ $(call Package/frr/Default)
+ DEPENDS:=+librt
+ MENU:=1
+endef
+
+define Package/frr/description
+ FRRouting is free software that implements and manages various IPv4 and IPv6
+ routing protocols.
+
+ Currently FRRouting supports BGP4, BGP4+, OSPFv2, OSPFv3, RIPv1, RIPv2, RIPng,
+ IS-IS, PIM-SM/MSDP, LDP and Babel as well as very early support for EIGRP and
+ NHRP.
+endef
+
+define Package/frr-babeld
+ $(call Package/frr/Default)
+ DEPENDS+=+frr-libfrr
+ TITLE:=BABEL routing engine
+endef
+
+define Package/frr-bfdd
+ $(call Package/frr/Default)
+ DEPENDS+=+frr-libfrr
+ TITLE:=BFD routing engine
+ CONFLICTS:=bfdd
+endef
+
+define Package/frr-bgpd
+ $(call Package/frr/Default)
+ DEPENDS+=+frr-libfrr
+ TITLE:=BGPv4, BGPv4+, BGPv4- routing engine
+ CONFLICTS:=quagga-bgpd
+endef
+
+define Package/frr-eigrpd
+ $(call Package/frr/Default)
+ DEPENDS+=+frr-libfrr
+ TITLE:=EIGRPD routing engine
+endef
+
+define Package/frr-fabricd
+ $(call Package/frr/Default)
+ DEPENDS+=+frr-libfrr
+ TITLE:=OpenFabric routing engine
+endef
+
+define Package/frr-isisd
+ $(call Package/frr/Default)
+ DEPENDS+=+frr-libfrr
+ TITLE:=IS-IS routing engine
+ CONFLICTS:=quagga-isisd
+endef
+
+define Package/frr-ldpd
+ $(call Package/frr/Default)
+ DEPENDS+=+frr-libfrr
+ TITLE:=LDP routing engine
+endef
+
+define Package/frr-libfrr
+ $(call Package/frr/Default)
+ TITLE:=zebra library
+ DEPENDS+=+libjson-c +libyang
+ CONFLICTS:=quagga-libzebra
+endef
+
+define Package/frr-nhrpd
+ $(call Package/frr/Default)
+ DEPENDS+=+frr-libfrr +libcares
+ TITLE:=NHRP routing engine
+endef
+
+define Package/frr-ospfd
+ $(call Package/frr/Default)
+ DEPENDS+=+frr-libfrr
+ TITLE:=OSPFv2 routing engine
+ CONFLICTS:=quagga-ospfd
+endef
+
+define Package/frr-ospf6d
+ $(call Package/frr/Default)
+ DEPENDS+=+frr-libfrr @IPV6
+ TITLE:=OSPFv3 routing engine
+ CONFLICTS:=quagga-ospf6d
+endef
+
+define Package/frr-pbrd
+ $(call Package/frr/Default)
+ DEPENDS+=+frr-libfrr
+ TITLE:=PBRD routing daemon
+endef
+
+define Package/frr-pimd
+ $(call Package/frr/Default)
+ DEPENDS+=+frr-libfrr
+ TITLE:=PIM routing engine
+endef
+
+define Package/frr-ripd
+ $(call Package/frr/Default)
+ DEPENDS+=+frr-libfrr
+ TITLE:=RIP routing engine
+ CONFLICTS:=quagga-ripd
+endef
+
+define Package/frr-ripngd
+ $(call Package/frr/Default)
+ DEPENDS+=+frr-libfrr @IPV6
+ TITLE:=RIPNG routing engine
+ CONFLICTS:=quagga-ripngd
+endef
+
+define Package/frr-staticd
+ $(call Package/frr/Default)
+ DEPENDS+=+frr-libfrr
+ TITLE:=STATICD routing engine
+endef
+
+define Package/frr-vtysh
+ $(call Package/frr/Default)
+ DEPENDS+=+frr-libfrr +libreadline +libncurses
+ TITLE:=integrated shell for frr routing software
+ CONFLICTS:=quagga-vtysh
+endef
+
+define Package/frr-watchfrr
+ $(call Package/frr/Default)
+ TITLE:=frr watchdog
+ DEPENDS+=+frr-libfrr
+ DEFAULT:=y if PACKAGE_frr
+endef
+
+define Package/frr-zebra
+ $(call Package/frr/Default)
+ TITLE:=Zebra daemon
+ DEPENDS+=+frr-libfrr
+ DEFAULT:=y if PACKAGE_frr
+ CONFLICTS:=quagga-zebra
+endef
+
+define Package/frr-libfrr/conffiles
+/etc/frr/
+endef
+
+define Build/Prepare
+ $(Build/Prepare/Default)
+ mkdir -p $(PKG_BUILD_DIR)/build/lib
+ $(CP) $(PKG_BUILD_DIR)/lib/command.h $(PKG_BUILD_DIR)/build/lib/
+endef
+
+define Build/Configure
+ ( cd $(PKG_BUILD_DIR)/build/ ; \
+ cd build ; \
+ ../configure \
+ --host="$(GNU_TARGET_NAME)" \
+ --build="$(GNU_HOST_NAME)" \
+ CFLAGS="$(TARGET_CFLAGS) $(EXTRA_CFLAGS)" \
+ CXXFLAGS="$(TARGET_CFLAGS) $(EXTRA_CFLAGS)" \
+ LDFLAGS="$(TARGET_LDFLAGS) $(EXTRA_LDFLAGS)" \
+ HOST_CFLAGS="$(HOST_CFLAGS)" \
+ HOST_LDFLAGS="$(HOST_LDFLAGS)" \
+ BUILD_CPPFLAGS="$(TARGET_CPPLAGS)" \
+ BUILD_CFLAGS="$(TARGET_CFLAGS)" \
+ BUILD_LDFLAGS="$(TARGET_LDFLAGS)" \
+ --prefix=/usr \
+ --enable-shared \
+ --disable-static \
+ --enable-user=network \
+ --enable-group=network \
+ --enable-multipath=16 \
+ --disable-capabilities \
+ --disable-ospfclient \
+ --disable-doc \
+ --with-vtysh-pager=cat \
+ --localstatedir=/var/run/frr \
+ --sysconfdir=/etc/frr/ \
+ $(call autoconf_bool,CONFIG_PACKAGE_frr-babeld,babeld) \
+ $(call autoconf_bool,CONFIG_PACKAGE_frr-bfdd,bfdd) \
+ $(call autoconf_bool,CONFIG_PACKAGE_frr-bgpd,bgpd) \
+ $(call autoconf_bool,CONFIG_PACKAGE_frr-eigrpd,eigrpd) \
+ $(call autoconf_bool,CONFIG_PACKAGE_frr-fabricd,fabricd) \
+ $(call autoconf_bool,CONFIG_PACKAGE_frr-isisd,isisd) \
+ $(call autoconf_bool,CONFIG_PACKAGE_frr-ldpd,ldpd) \
+ $(call autoconf_bool,CONFIG_PACKAGE_frr-nhrpd,nhrpd) \
+ $(call autoconf_bool,CONFIG_PACKAGE_frr-ospfd,ospfd) \
+ $(call autoconf_bool,CONFIG_PACKAGE_frr-ospf6d,ospf6d) \
+ $(call autoconf_bool,CONFIG_PACKAGE_frr-pbrd,pbrd) \
+ $(call autoconf_bool,CONFIG_PACKAGE_frr-pimd,pimd) \
+ $(call autoconf_bool,CONFIG_PACKAGE_frr-staticd,staticd) \
+ $(call autoconf_bool,CONFIG_PACKAGE_frr-ripd,ripd) \
+ $(call autoconf_bool,CONFIG_PACKAGE_frr-ripngd,ripngd) \
+ $(call autoconf_bool,CONFIG_PACKAGE_frr-vtysh,vtysh) \
+ $(call autoconf_bool,CONFIG_PACKAGE_frr-libfrr,zebra) \
+)
+endef
+
+# just speed it up
+NUM_CORES ?= $(shell grep -c "vendor_id" /proc/cpuinfo)
+
+define Build/Compile
+ $(MAKE) -C $(PKG_BUILD_DIR)/build -j$(NUM_CORES)
+endef
+
+define Package/frr/install
+ $(INSTALL_DIR) $(1)/usr/sbin
+ $(INSTALL_BIN) ./files/frrcommon.sh $(1)/usr/sbin/
+ $(INSTALL_DIR) $(1)/etc/init.d
+ $(INSTALL_BIN) ./files/frr $(1)/etc/init.d/
+endef
+
+define Package/frr-watchfrr/install
+ $(INSTALL_DIR) $(1)/usr/sbin
+ $(INSTALL_BIN) ./files/watchfrr.sh $(1)/usr/sbin/
+ $(INSTALL_BIN) $(PKG_BUILD_DIR)/build/watchfrr/.libs/watchfrr $(1)/usr/sbin/
+endef
+
+define Package/frr-zebra/install
+ $(INSTALL_DIR) $(1)/usr/sbin
+ $(INSTALL_BIN) $(PKG_BUILD_DIR)/build/zebra/.libs/zebra $(1)/usr/sbin/
+endef
+
+define Package/frr-babeld/install
+ $(INSTALL_DIR) $(1)/usr/sbin
+ $(INSTALL_BIN) $(PKG_BUILD_DIR)/build/babeld/.libs/babeld $(1)/usr/sbin/
+endef
+
+define Package/frr-bfdd/install
+ $(INSTALL_DIR) $(1)/usr/sbin
+ $(INSTALL_BIN) $(PKG_BUILD_DIR)/build/bfdd/.libs/bfdd $(1)/usr/sbin/
+endef
+
+define Package/frr-bgpd/install
+ $(INSTALL_DIR) $(1)/usr/sbin
+ $(INSTALL_BIN) $(PKG_BUILD_DIR)/build/bgpd/.libs/bgpd $(1)/usr/sbin/
+endef
+
+define Package/frr-eigrpd/install
+ $(INSTALL_DIR) $(1)/usr/sbin
+ $(INSTALL_BIN) $(PKG_BUILD_DIR)/build/eigrpd/.libs/eigrpd $(1)/usr/sbin/
+endef
+
+define Package/frr-fabricd/install
+ $(INSTALL_DIR) $(1)/usr/sbin
+ $(INSTALL_BIN) $(PKG_BUILD_DIR)/build/isisd/.libs/fabricd $(1)/usr/sbin/
+endef
+
+define Package/frr-isisd/install
+ $(INSTALL_DIR) $(1)/usr/sbin
+ $(INSTALL_BIN) $(PKG_BUILD_DIR)/build/isisd/.libs/isisd $(1)/usr/sbin/
+endef
+
+define Package/frr-ldpd/install
+ $(INSTALL_DIR) $(1)/usr/sbin
+ $(INSTALL_BIN) $(PKG_BUILD_DIR)/build/ldpd/.libs/ldpd $(1)/usr/sbin/
+endef
+
+define Package/frr-nhrpd/install
+ $(INSTALL_DIR) $(1)/usr/sbin
+ $(INSTALL_BIN) $(PKG_BUILD_DIR)/build/nhrpd/.libs/nhrpd $(1)/usr/sbin/
+endef
+
+define Package/frr-ospfd/install
+ $(INSTALL_DIR) $(1)/usr/sbin
+ $(INSTALL_BIN) $(PKG_BUILD_DIR)/build/ospfd/.libs/ospfd $(1)/usr/sbin/
+endef
+
+define Package/frr-ospf6d/install
+ $(INSTALL_DIR) $(1)/usr/sbin
+ $(INSTALL_BIN) $(PKG_BUILD_DIR)/build/ospf6d/.libs/ospf6d $(1)/usr/sbin/
+endef
+
+define Package/frr-pbrd/install
+ $(INSTALL_DIR) $(1)/usr/sbin
+ $(INSTALL_BIN) $(PKG_BUILD_DIR)/build/pbrd/.libs/pbrd $(1)/usr/sbin/
+endef
+
+define Package/frr-pimd/install
+ $(INSTALL_DIR) $(1)/usr/sbin
+ $(INSTALL_BIN) $(PKG_BUILD_DIR)/build/pimd/.libs/pimd $(1)/usr/sbin/
+endef
+
+define Package/frr-ripd/install
+ $(INSTALL_DIR) $(1)/usr/sbin
+ $(INSTALL_BIN) $(PKG_BUILD_DIR)/build/ripd/.libs/ripd $(1)/usr/sbin/
+endef
+
+define Package/frr-ripngd/install
+ $(INSTALL_DIR) $(1)/usr/sbin
+ $(INSTALL_BIN) $(PKG_BUILD_DIR)/build/ripngd/.libs/ripngd $(1)/usr/sbin/
+endef
+
+define Package/frr-staticd/install
+ $(INSTALL_DIR) $(1)/usr/sbin
+ $(INSTALL_BIN) $(PKG_BUILD_DIR)/build/staticd/.libs/staticd $(1)/usr/sbin/
+endef
+
+define Package/frr-vrrpd/install
+ $(INSTALL_DIR) $(1)/usr/sbin
+ $(INSTALL_BIN) $(PKG_BUILD_DIR)/build/vrrpd/.libs/vrrpd $(1)/usr/sbin/
+endef
+
+define Package/frr-vtysh/install
+ $(INSTALL_DIR) $(1)/usr/bin
+ $(INSTALL_BIN) $(PKG_BUILD_DIR)/build/vtysh/.libs/vtysh $(1)/usr/bin/
+endef
+
+define Package/frr-libfrr/install
+ $(INSTALL_DIR) $(1)/usr/lib
+ $(CP) $(PKG_BUILD_DIR)/build/lib/.libs/libfrr.so* $(1)/usr/lib/
+ $(INSTALL_DIR) $(1)/etc/frr
+ chmod 0750 $(1)/etc/frr
+ $(INSTALL_CONF) ./files/{frr.conf,daemons} $(1)/etc/frr/
+endef
+
+$(eval $(call HostBuild))
+$(eval $(call BuildPackage,frr))
+$(eval $(call BuildPackage,frr-babeld))
+$(eval $(call BuildPackage,frr-bfdd))
+$(eval $(call BuildPackage,frr-bgpd))
+$(eval $(call BuildPackage,frr-eigrpd))
+$(eval $(call BuildPackage,frr-fabricd))
+$(eval $(call BuildPackage,frr-isisd))
+$(eval $(call BuildPackage,frr-ldpd))
+$(eval $(call BuildPackage,frr-libfrr))
+$(eval $(call BuildPackage,frr-nhrpd))
+$(eval $(call BuildPackage,frr-ospfd))
+$(eval $(call BuildPackage,frr-ospf6d))
+$(eval $(call BuildPackage,frr-pbrd))
+$(eval $(call BuildPackage,frr-pimd))
+$(eval $(call BuildPackage,frr-ripd))
+$(eval $(call BuildPackage,frr-ripngd))
+$(eval $(call BuildPackage,frr-staticd))
+$(eval $(call BuildPackage,frr-vtysh))
+$(eval $(call BuildPackage,frr-watchfrr))
+$(eval $(call BuildPackage,frr-zebra))
--- /dev/null
+# The watchfrr and zebra daemons are always started.
+#
+bgpd=no
+ospfd=no
+#ospfd_instances=1,20
+ospf6d=no
+ripd=no
+ripngd=no
+isisd=no
+pimd=no
+ldpd=no
+nhrpd=no
+eigrpd=no
+babeld=no
+sharpd=no
+pbrd=no
+bfdd=no
+fabricd=no
+vrrpd=no
+
+#
+# If this option is set the /etc/init.d/frr script automatically loads
+# the config via "vtysh -b" when the servers are started.
+# Check /etc/pam.d/frr if you intend to use "vtysh"!
+#
+vtysh_enable=yes
+zebra_options=" -A 127.0.0.1 -s 90000000"
+bgpd_options=" -A 127.0.0.1"
+ospfd_options=" -A 127.0.0.1"
+ospf6d_options=" -A ::1"
+ripd_options=" -A 127.0.0.1"
+ripngd_options=" -A ::1"
+isisd_options=" -A 127.0.0.1"
+pimd_options=" -A 127.0.0.1"
+ldpd_options=" -A 127.0.0.1"
+nhrpd_options=" -A 127.0.0.1"
+eigrpd_options=" -A 127.0.0.1"
+babeld_options=" -A 127.0.0.1"
+sharpd_options=" -A 127.0.0.1"
+pbrd_options=" -A 127.0.0.1"
+staticd_options="-A 127.0.0.1"
+bfdd_options=" -A 127.0.0.1"
+fabricd_options="-A 127.0.0.1"
+vrrpd_options=" -A 127.0.0.1"
+
+# The list of daemons to watch is automatically generated by the init script.
+#watchfrr_options=""
+
+# for debugging purposes, you can specify a "wrap" command to start instead
+# of starting the daemon directly, e.g. to use valgrind on ospfd:
+# ospfd_wrap="/usr/bin/valgrind"
+# or you can use "all_wrap" for all daemons, e.g. to use perf record:
+# all_wrap="/usr/bin/perf record --call-graph -"
+# the normal daemon command is added to this at the end.
--- /dev/null
+#!/bin/sh /etc/rc.common
+
+START=95
+
+log_success_msg() {
+ echo "$@"
+}
+log_warning_msg() {
+ echo "$@" >&2
+}
+log_failure_msg() {
+ echo "$@" >&2
+}
+
+self="`dirname $0`"
+if [ -r "$self/frrcommon.sh" ]; then
+ . "$self/frrcommon.sh"
+else
+ . "/usr/sbin/frrcommon.sh"
+fi
+
+
+start() {
+ daemon_list daemons
+ watchfrr_options="$watchfrr_options $daemons"
+ daemon_start watchfrr
+}
+stop() {
+ daemon_stop watchfrr
+ all_stop --reallyall
+ exit ${still_running:-0}
+}
+
+restart() {
+ daemon_stop watchfrr
+ all_stop --reallyall
+
+ daemon_list daemons
+ watchfrr_options="$watchfrr_options $daemons"
+ daemon_start watchfrr
+}
+
+status() {
+ fail=0
+ print_status watchfrr || fail=1
+ all_status || fail=1
+ exit $fail
+}
+
+reload() {
+ if [ ! -x "$RELOAD_SCRIPT" ]; then
+ log_failure_msg "The frr-pythontools package is required for reload functionality."
+ exit 1
+ fi
+
+ # restart watchfrr to pick up added daemons.
+ # NB: This will NOT cause the other daemons to be restarted.
+ daemon_list daemons
+ watchfrr_options="$watchfrr_options $daemons"
+ daemon_stop watchfrr && \
+ daemon_start watchfrr
+
+ NEW_CONFIG_FILE="${2:-$C_PATH/frr.conf}"
+ [ ! -r $NEW_CONFIG_FILE ] && log_failure_msg "Unable to read new configuration file $NEW_CONFIG_FILE" && exit 1
+ "$RELOAD_SCRIPT" --reload "$NEW_CONFIG_FILE"
+ exit $?
+}
+
--- /dev/null
+password zebra
+!
+!router eigrp 1
+! network 10.0.0.0/8
+! network 192.168.1.0/24
+!
+!router ospf
+!ospf router-id 172.16.0.2
+!network 192.168.1.0/24 area 0
+!neighbor 172.16.0.1
+!
+!router rip
+! network 10.0.0.0/8
+! network 192.168.1.0/24
+!
+log syslog
+!
+access-list vty permit 127.0.0.0/8
+access-list vty deny any
+!
+line vty
+ access-class vty
--- /dev/null
+#!/bin/sh
+#
+#
+# This is a "library" of sorts for use by the other FRR shell scripts. It
+# has most of the daemon start/stop logic, but expects the following shell
+# functions/commands to be provided by the "calling" script:
+#
+# log_success_msg
+# log_warning_msg
+# log_failure_msg
+#
+# (coincidentally, these are LSB standard functions.)
+#
+# Sourcing this file in a shell script will load FRR config variables but
+# not perform any action. Note there is an "exit 1" if the main config
+# file does not exist.
+#
+# This script should be installed in /usr/sbin/frrcommon.sh
+
+PATH=/bin:/usr/bin:/sbin:/usr/sbin
+D_PATH="/usr/sbin" # /usr/lib/frr
+C_PATH="/etc/frr" # /etc/frr
+V_PATH="/var/run/frr" # /var/run/frr
+VTYSH="/usr/bin/vtysh" # /usr/bin/vtysh
+FRR_USER="network" # frr
+FRR_GROUP="network" # frr
+FRR_VTY_GROUP="" # frrvty
+
+# ORDER MATTERS FOR $DAEMONS!
+# - keep zebra first
+# - watchfrr does NOT belong in this list
+
+DAEMONS="zebra bgpd ripd ripngd ospfd ospf6d isisd babeld pimd ldpd nhrpd eigrpd sharpd pbrd staticd bfdd fabricd vrrpd"
+RELOAD_SCRIPT="$D_PATH/frr-reload.py"
+
+#
+# general helpers
+#
+
+debug() {
+ [ -n "$watchfrr_debug" ] || return 0
+
+ printf '%s %s(%s):' "`date +%Y-%m-%dT%H:%M:%S.%N`" "$0" $$ >&2
+ # this is to show how arguments are split regarding whitespace & co.
+ # (e.g. for use with `debug "message" "$@"`)
+ while [ $# -gt 0 ]; do
+ printf ' "%s"' "$1" >&2
+ shift
+ done
+ printf '\n' >&2
+}
+
+chownfrr() {
+ [ -n "$FRR_USER" ] && chown "$FRR_USER" "$1"
+ [ -n "$FRR_GROUP" ] && chgrp "$FRR_GROUP" "$1"
+}
+
+vtysh_b () {
+ [ "$1" = "watchfrr" ] && return 0
+ [ -r "$C_PATH/frr.conf" ] || return 0
+ if [ -n "$1" ]; then
+ "$VTYSH" -b -n -d "$1"
+ else
+ "$VTYSH" -b -n
+ fi
+}
+
+daemon_inst() {
+ # note this sets global variables ($dmninst, $daemon, $inst)
+ dmninst="$1"
+ daemon="${dmninst%-*}"
+ inst=""
+ [ "$daemon" != "$dmninst" ] && inst="${dmninst#*-}"
+}
+
+daemon_list() {
+ # note $1 and $2 specify names for global variables to be set
+ local enabled disabled evar dvar
+ enabled=""
+ disabled=""
+ evar="$1"
+ dvar="$2"
+
+ for daemon in $DAEMONS; do
+ eval cfg=\$$daemon
+ eval inst=\$${daemon}_instances
+ [ "$daemon" = zebra -o "$daemon" = staticd ] && cfg=yes
+ if [ -n "$cfg" -a "$cfg" != "no" -a "$cfg" != "0" ]; then
+ if ! daemon_prep "$daemon" "$inst"; then
+ continue
+ fi
+ debug "$daemon enabled"
+ enabled="$enabled $daemon"
+ if [ -n "$inst" ]; then
+ debug "$daemon multi-instance $inst"
+ oldifs="${IFS}"
+ IFS="${IFS},"
+ for i in $inst; do
+ enabled="$enabled $daemon-$i"
+ done
+ IFS="${oldifs}"
+ fi
+ else
+ debug "$daemon disabled"
+ disabled="$disabled $daemon"
+ fi
+ done
+
+ enabled="${enabled# }"
+ disabled="${disabled# }"
+ [ -z "$evar" ] && echo "$enabled"
+ [ -n "$evar" ] && eval $evar="\"$enabled\""
+ [ -n "$dvar" ] && eval $dvar="\"$disabled\""
+}
+
+#
+# individual daemon management
+#
+
+daemon_prep() {
+ local daemon inst cfg
+ daemon="$1"
+ inst="$2"
+ [ "$daemon" = "watchfrr" ] && return 0
+ [ -x "$D_PATH/$daemon" ] || {
+ log_failure_msg "cannot start $daemon${inst:+ (instance $inst)}: daemon binary not installed"
+ return 1
+ }
+ [ -r "$C_PATH/frr.conf" ] && return 0
+
+ cfg="$C_PATH/$daemon${inst:+-$inst}.conf"
+ if [ ! -r "$cfg" ]; then
+ touch "$cfg"
+ chownfrr "$cfg"
+ fi
+ return 0
+}
+
+daemon_start() {
+ local dmninst daemon inst args instopt wrap bin
+ daemon_inst "$1"
+
+ ulimit -n $MAX_FDS > /dev/null 2> /dev/null
+ daemon_prep "$daemon" "$inst" || return 1
+ if test ! -d "$V_PATH"; then
+ mkdir -p "$V_PATH"
+ chown frr "$V_PATH"
+ fi
+
+ eval wrap="\$${daemon}_wrap"
+ bin="$D_PATH/$daemon"
+ instopt="${inst:+-n $inst}"
+ eval args="\$${daemon}_options"
+
+ if eval "$all_wrap $wrap $bin -d $instopt $args"; then
+ log_success_msg "Started $dmninst"
+ vtysh_b "$daemon"
+ else
+ log_failure_msg "Failed to start $dmninst!"
+ fi
+}
+
+daemon_stop() {
+ local dmninst daemon inst pidfile vtyfile pid cnt fail
+ daemon_inst "$1"
+
+ pidfile="$V_PATH/$daemon${inst:+-$inst}.pid"
+ vtyfile="$V_PATH/$daemon${inst:+-$inst}.vty"
+
+ [ -r "$pidfile" ] || fail="pid file not found"
+ [ -z "$fail" ] && pid="`cat \"$pidfile\"`"
+ [ -z "$fail" -a -z "$pid" ] && fail="pid file is empty"
+ [ -n "$fail" ] || kill -0 "$pid" 2>/dev/null || fail="pid $pid not running"
+
+ if [ -n "$fail" ]; then
+ log_failure_msg "Cannot stop $dmninst: $fail"
+ return 1
+ fi
+
+ debug "kill -2 $pid"
+ kill -2 "$pid"
+ cnt=1200
+ while kill -0 "$pid" 2>/dev/null; do
+ sleep 1
+ [ $(( cnt -= 1 )) -gt 0 ] || break
+ done
+ if kill -0 "$pid" 2>/dev/null; then
+ log_failure_msg "Failed to stop $dmninst, pid $pid still running"
+ still_running=1
+ return 1
+ else
+ log_success_msg "Stopped $dmninst"
+ rm -f "$pidfile"
+ return 0
+ fi
+}
+
+daemon_status() {
+ local dmninst daemon inst pidfile pid fail
+ daemon_inst "$1"
+
+ pidfile="$V_PATH/$daemon${inst:+-$inst}.pid"
+
+ [ -r "$pidfile" ] || return 3
+ pid="`cat \"$pidfile\"`"
+ [ -z "$pid" ] && return 1
+ kill -0 "$pid" 2>/dev/null || return 1
+ return 0
+}
+
+print_status() {
+ daemon_status "$1"
+ rv=$?
+ if [ "$rv" -eq 0 ]; then
+ log_success_msg "Status of $1: running"
+ else
+ log_failure_msg "Status of $1: FAILED"
+ fi
+ return $rv
+}
+
+#
+# all-daemon commands
+#
+
+all_start() {
+ daemon_list daemons
+ for dmninst in $daemons; do
+ daemon_start "$dmninst"
+ done
+}
+
+all_stop() {
+ local pids reversed
+
+ daemon_list daemons disabled
+ [ "$1" = "--reallyall" ] && daemons="$daemons $disabled"
+
+ reversed=""
+ for dmninst in $daemons; do
+ reversed="$dmninst $reversed"
+ done
+
+ for dmninst in $reversed; do
+ daemon_stop "$dmninst" &
+ pids="$pids $!"
+ done
+ for pid in $pids; do
+ wait $pid
+ done
+}
+
+all_status() {
+ local fail
+
+ daemon_list daemons
+ fail=0
+ for dmninst in $daemons; do
+ print_status "$dmninst" || fail=1
+ done
+ return $fail
+}
+
+#
+# config sourcing
+#
+
+load_old_config() {
+ oldcfg="$1"
+ [ -r "$oldcfg" ] || return 0
+ [ -s "$oldcfg" ] || return 0
+ grep -v '^[[:blank:]]*\(#\|$\)' "$oldcfg" > /dev/null || return 0
+
+ log_warning_msg "Reading deprecated $oldcfg. Please move its settings to $C_PATH/daemons and remove it."
+
+ # save off settings from daemons for the OR below
+ for dmn in $DAEMONS; do eval "_new_$dmn=\${$dmn:-no}"; done
+
+ . "$oldcfg"
+
+ # OR together the daemon enabling options between config files
+ for dmn in $DAEMONS; do eval "test \$_new_$dmn != no && $dmn=\$_new_$dmn; unset _new_$dmn"; done
+}
+
+[ -r "$C_PATH/daemons" ] || {
+ log_failure_msg "cannot run $@: $C_PATH/daemons does not exist"
+ exit 1
+}
+. "$C_PATH/daemons"
+
+load_old_config "$C_PATH/daemons.conf"
+load_old_config "/etc/default/frr"
+load_old_config "/etc/sysconfig/frr"
+
+if { declare -p watchfrr_options 2>/dev/null || true; } | grep -q '^declare \-a'; then
+ log_warning_msg "watchfrr_options contains a bash array value." \
+ "The configured value is intentionally ignored since it is likely wrong." \
+ "Please remove or fix the setting."
+ unset watchfrr_options
+fi
+
+#
+# other defaults and dispatch
+#
+
+frrcommon_main() {
+ local cmd
+
+ debug "frrcommon_main" "$@"
+
+ cmd="$1"
+ shift
+
+ if [ "$1" = "all" -o -z "$1" ]; then
+ case "$cmd" in
+ start) all_start;;
+ stop) all_stop;;
+ restart)
+ all_stop
+ all_start
+ ;;
+ *) $cmd "$@";;
+ esac
+ else
+ case "$cmd" in
+ start) daemon_start "$@";;
+ stop) daemon_stop "$@";;
+ restart)
+ daemon_stop "$@"
+ daemon_start "$@"
+ ;;
+ *) $cmd "$@";;
+ esac
+ fi
+}
--- /dev/null
+#!/bin/sh
+#
+# This is NOT the init script! This is the watchfrr start/stop/restart
+# command handler, passed to watchfrr with the -s/-r/-k commands. It is used
+# internally by watchfrr to start the protocol daemons with the appropriate
+# options.
+#
+# This script should be installed in /usr/sbin/watchfrr.sh
+
+log_success_msg() {
+ :
+}
+
+log_warning_msg() {
+ echo "$@" >&2
+ [ -x /usr/bin/logger ] && echo "$@" \
+ | /usr/bin/logger -t watchfrr.sh -p daemon.warn
+}
+
+log_failure_msg() {
+ echo "$@" >&2
+ [ -x /usr/bin/logger ] && echo "$@" \
+ | /usr/bin/logger -t watchfrr.sh -p daemon.err
+}
+
+self="`dirname $0`"
+if [ -r "$self/frrcommon.sh" ]; then
+ . "$self/frrcommon.sh"
+else
+ . "/usr/sbin/frrcommon.sh"
+fi
+
+chownfrr $V_PATH
+
+frrcommon_main "$@"
--- /dev/null
+--- a/lib/prefix.h 2017-12-03 22:02:41.642317377 +0200
++++ b/lib/prefix.h 2017-12-03 22:03:30.011320262 +0200
+@@ -24,12 +24,12 @@
+
+ #ifdef SUNOS_5
+ #include <sys/ethernet.h>
+-#else
++/*#else
+ #ifdef GNU_LINUX
+ #include <net/ethernet.h>
+ #else
+ #include <netinet/if_ether.h>
+-#endif
++#endif*/
+ #endif
+ #include "sockunion.h"
+ #include "ipaddr.h"
include $(TOPDIR)/rules.mk
PKG_NAME:=haproxy
-PKG_VERSION:=2.0.5
+PKG_VERSION:=2.0.6
PKG_RELEASE:=1
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
PKG_SOURCE_URL:=https://www.haproxy.org/download/2.0/src
-PKG_HASH:=3f2e0d40af66dd6df1dc2f6055d3de106ba62836d77b4c2e497a82a4bdbc5422
+PKG_HASH:=01e1da09452010111107ca1b3a8b7f1927731ba0fe4380bacae1c626fdc521e4
PKG_BUILD_DIR:=$(BUILD_DIR)/$(PKG_NAME)-$(BUILD_VARIANT)/$(PKG_NAME)-$(PKG_VERSION)
PKG_MAINTAINER:=Thomas Heil <heil@terminal-consulting.de>, \
#!/bin/sh
CLONEURL=https://git.haproxy.org/git/haproxy-2.0.git
-BASE_TAG=v2.0.5
+BASE_TAG=v2.0.6
TMP_REPODIR=tmprepo
PATCHESDIR=patches
--- /dev/null
+commit be17bb8fc37db4e12ea24c9480ff6442017ff656
+Author: Christopher Faulet <cfaulet@haproxy.com>
+Date: Fri Sep 13 15:15:56 2019 +0200
+
+ BUG/MEDIUM: stick-table: Properly handle "show table" with a data type argument
+
+ Since the commit 1b8e68e8 ("MEDIUM: stick-table: Stop handling stick-tables as
+ proxies."), the target field into the table context of the CLI applet was not
+ anymore a pointer to a proxy. It was replaced by a pointer to a stktable. But,
+ some parts of the code was not updated accordingly. the function
+ table_prepare_data_request() still tries to cast it to a pointer to a proxy. The
+ result is totally undefined. With a bit of luck, when the "show table" command
+ is used with a data type, we failed to find a table and the error "Data type not
+ stored in this table" is returned. But crashes may also be experienced.
+
+ This patch fixes the issue #262. It must be backported to 2.0.
+
+ (cherry picked from commit 4e9a83349a9b523830f79f9632ef777ab0bfcc9d)
+ Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
+
+diff --git a/src/stick_table.c b/src/stick_table.c
+index 4b04f18c..8528baa0 100644
+--- a/src/stick_table.c
++++ b/src/stick_table.c
+@@ -3601,8 +3601,7 @@ static int table_prepare_data_request(struct appctx *appctx, char **args)
+ return 1;
+ }
+
+- if (!((struct proxy *)appctx->ctx.table.target)->table ||
+- !((struct proxy *)appctx->ctx.table.target)->table->data_ofs[appctx->ctx.table.data_type]) {
++ if (!((struct stktable *)appctx->ctx.table.target)->data_ofs[appctx->ctx.table.data_type]) {
+ appctx->ctx.cli.severity = LOG_ERR;
+ appctx->ctx.cli.msg = "Data type not stored in this table\n";
+ appctx->st0 = CLI_ST_PRINT;
+++ /dev/null
-commit 3a761682a65e7e7f7baf172f58b15e567a685387
-Author: Willy Tarreau <w@1wt.eu>
-Date: Wed Aug 21 14:12:19 2019 +0200
-
- MINOR: debug: indicate the applet name when the task is task_run_applet()
-
- This allows to figure what applet is currently being executed (and likely
- hung).
-
- (cherry picked from commit a512b02f67a30ab5519d04f8c8b1263415321c85)
- [wt: backported to improve troubleshooting when the watchdog fires]
- Signed-off-by: Willy Tarreau <w@1wt.eu>
-
-diff --git a/src/debug.c b/src/debug.c
-index 3077e97c..36cc9e71 100644
---- a/src/debug.c
-+++ b/src/debug.c
-@@ -90,6 +90,7 @@ void ha_thread_dump(struct buffer *buf, int thr, int calling_tid)
- void ha_task_dump(struct buffer *buf, const struct task *task, const char *pfx)
- {
- const struct stream *s = NULL;
-+ const struct appctx __maybe_unused *appctx = NULL;
-
- if (!task) {
- chunk_appendf(buf, "0\n");
-@@ -110,7 +111,7 @@ void ha_task_dump(struct buffer *buf, const struct task *task, const char *pfx)
- task->call_date ? " ns ago" : "");
-
- chunk_appendf(buf, "%s"
-- " fct=%p (%s) ctx=%p\n",
-+ " fct=%p (%s) ctx=%p",
- pfx,
- task->process,
- task->process == process_stream ? "process_stream" :
-@@ -119,6 +120,11 @@ void ha_task_dump(struct buffer *buf, const struct task *task, const char *pfx)
- "?",
- task->context);
-
-+ if (task->process == task_run_applet && (appctx = task->context))
-+ chunk_appendf(buf, "(%s)\n", appctx->applet->name);
-+ else
-+ chunk_appendf(buf, "\n");
-+
- if (task->process == process_stream && task->context)
- s = (struct stream *)task->context;
- else if (task->process == task_run_applet && task->context)
--- /dev/null
+commit 0e01256a314a6f432ab9826dc9b862e8159dbc48
+Author: Christopher Faulet <cfaulet@haproxy.com>
+Date: Wed Sep 18 11:07:20 2019 +0200
+
+ BUG/MINOR: mux-h2: Be sure to have a connection to unsubcribe
+
+ When the mux is released, It must own the connection to unsubcribe.
+ This patch must be backported to 2.0.
+
+ (cherry picked from commit 21d849f52fc64c51e5abf5a8bd69f2aeff8b3125)
+ Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
+
+diff --git a/src/mux_h2.c b/src/mux_h2.c
+index 984a81bd..e6bfd03d 100644
+--- a/src/mux_h2.c
++++ b/src/mux_h2.c
+@@ -677,9 +677,9 @@ static void h2_release(struct h2c *h2c)
+ }
+ if (h2c->wait_event.tasklet)
+ tasklet_free(h2c->wait_event.tasklet);
+- if (h2c->wait_event.events != 0)
++ if (conn && h2c->wait_event.events != 0)
+ conn->xprt->unsubscribe(conn, conn->xprt_ctx, h2c->wait_event.events,
+- &h2c->wait_event);
++ &h2c->wait_event);
+
+ pool_free(pool_head_h2c, h2c);
+ }
+++ /dev/null
-commit fe575b5ca645d6751fba56efa907952eda200b09
-Author: Willy Tarreau <w@1wt.eu>
-Date: Wed Aug 21 13:17:37 2019 +0200
-
- MINOR: tools: add append_prefixed_str()
-
- This is somewhat related to indent_msg() except that this one places a
- known prefix at the beginning of each line, allows to replace the EOL
- character, and not to insert a prefix on the first line if not desired.
- It works with a normal output buffer/chunk so it doesn't need to allocate
- anything nor to modify the input string. It is suitable for use in multi-
- line backtraces.
-
- (cherry picked from commit a2c9911ace8537e0a350daf8d981170a001b6c7a)
- [wt: backported to improve troubleshooting when the watchdog fires]
- Signed-off-by: Willy Tarreau <w@1wt.eu>
-
-diff --git a/include/common/standard.h b/include/common/standard.h
-index 0f4b1870..cdefc9f5 100644
---- a/include/common/standard.h
-+++ b/include/common/standard.h
-@@ -1238,6 +1238,7 @@ char *memprintf(char **out, const char *format, ...)
- * free(err);
- */
- char *indent_msg(char **out, int level);
-+int append_prefixed_str(struct buffer *out, const char *in, const char *pfx, char eol, int first);
-
- /* removes environment variable <name> from the environment as found in
- * environ. This is only provided as an alternative for systems without
-diff --git a/src/standard.c b/src/standard.c
-index 2f205f74..717c14a9 100644
---- a/src/standard.c
-+++ b/src/standard.c
-@@ -3709,6 +3709,41 @@ char *indent_msg(char **out, int level)
- return ret;
- }
-
-+/* makes a copy of message <in> into <out>, with each line prefixed with <pfx>
-+ * and end of lines replaced with <eol> if not 0. The first line to indent has
-+ * to be indicated in <first> (starts at zero), so that it is possible to skip
-+ * indenting the first line if it has to be appended after an existing message.
-+ * Empty strings are never indented, and NULL strings are considered empty both
-+ * for <in> and <pfx>. It returns non-zero if an EOL was appended as the last
-+ * character, non-zero otherwise.
-+ */
-+int append_prefixed_str(struct buffer *out, const char *in, const char *pfx, char eol, int first)
-+{
-+ int bol, lf;
-+ int pfxlen = pfx ? strlen(pfx) : 0;
-+
-+ if (!in)
-+ return 0;
-+
-+ bol = 1;
-+ lf = 0;
-+ while (*in) {
-+ if (bol && pfxlen) {
-+ if (first > 0)
-+ first--;
-+ else
-+ b_putblk(out, pfx, pfxlen);
-+ bol = 0;
-+ }
-+
-+ lf = (*in == '\n');
-+ bol |= lf;
-+ b_putchr(out, (lf && eol) ? eol : *in);
-+ in++;
-+ }
-+ return lf;
-+}
-+
- /* removes environment variable <name> from the environment as found in
- * environ. This is only provided as an alternative for systems without
- * unsetenv() (old Solaris and AIX versions). THIS IS NOT THREAD SAFE.
--- /dev/null
+commit 96b88f2e605e76f2a472cf9fa83398ff242d47bb
+Author: Christopher Faulet <cfaulet@haproxy.com>
+Date: Mon Sep 23 15:28:20 2019 +0200
+
+ BUG/MAJOR: mux-h2: Handle HEADERS frames received after a RST_STREAM frame
+
+ As stated in the RFC7540#5.1, an endpoint that receives any frame other than
+ PRIORITY after receiving a RST_STREAM MUST treat that as a stream error of type
+ STREAM_CLOSED. However, frames carrying compression state must still be
+ processed before being dropped to keep the HPACK decoder synchronized. This had
+ to be the purpose of the commit 8d9ac3ed8b ("BUG/MEDIUM: mux-h2: do not abort
+ HEADERS frame before decoding them"). But, the test on the frame type was
+ inverted.
+
+ This bug is major because desynchronizing the HPACK decoder leads to mixup
+ indexed headers in messages. From the time an HEADERS frame is received and
+ ignored for a closed stream, wrong headers may be sent to the following streams.
+
+ This patch may fix several bugs reported on github (#116, #290, #292). It must
+ be backported to 2.0 and 1.9.
+
+ (cherry picked from commit 6884aa3eb00d1a5eb6f9c81a3a00288c13652938)
+ Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
+
+diff --git a/src/mux_h2.c b/src/mux_h2.c
+index e6bfd03d..eb773a26 100644
+--- a/src/mux_h2.c
++++ b/src/mux_h2.c
+@@ -2106,6 +2106,9 @@ static struct h2s *h2c_frt_handle_headers(struct h2c *h2c, struct h2s *h2s)
+ */
+ static struct h2s *h2c_bck_handle_headers(struct h2c *h2c, struct h2s *h2s)
+ {
++ struct buffer rxbuf = BUF_NULL;
++ unsigned long long body_len = 0;
++ uint32_t flags = 0;
+ int error;
+
+ if (!b_size(&h2c->dbuf))
+@@ -2114,7 +2117,18 @@ static struct h2s *h2c_bck_handle_headers(struct h2c *h2c, struct h2s *h2s)
+ if (b_data(&h2c->dbuf) < h2c->dfl && !b_full(&h2c->dbuf))
+ return NULL; // incomplete frame
+
+- error = h2c_decode_headers(h2c, &h2s->rxbuf, &h2s->flags, &h2s->body_len);
++ if (h2s->st != H2_SS_CLOSED) {
++ error = h2c_decode_headers(h2c, &h2s->rxbuf, &h2s->flags, &h2s->body_len);
++ }
++ else {
++ /* the connection was already killed by an RST, let's consume
++ * the data and send another RST.
++ */
++ error = h2c_decode_headers(h2c, &rxbuf, &flags, &body_len);
++ h2s_error(h2s, H2_ERR_STREAM_CLOSED);
++ h2c->st0 = H2_CS_FRAME_E;
++ goto send_rst;
++ }
+
+ /* unrecoverable error ? */
+ if (h2c->st0 >= H2_CS_ERROR)
+@@ -2150,6 +2164,15 @@ static struct h2s *h2c_bck_handle_headers(struct h2c *h2c, struct h2s *h2s)
+ }
+
+ return h2s;
++
++ send_rst:
++ /* make the demux send an RST for the current stream. We may only
++ * do this if we're certain that the HEADERS frame was properly
++ * decompressed so that the HPACK decoder is still kept up to date.
++ */
++ h2_release_buf(h2c, &rxbuf);
++ h2c->st0 = H2_CS_FRAME_E;
++ return h2s;
+ }
+
+ /* processes a DATA frame. Returns > 0 on success or zero on missing data.
+@@ -2459,7 +2482,7 @@ static void h2_process_demux(struct h2c *h2c)
+ goto strm_err;
+ }
+
+- if (h2s->flags & H2_SF_RST_RCVD && h2_ft_bit(h2c->dft) & H2_FT_HDR_MASK) {
++ if (h2s->flags & H2_SF_RST_RCVD && !(h2_ft_bit(h2c->dft) & H2_FT_HDR_MASK)) {
+ /* RFC7540#5.1:closed: an endpoint that
+ * receives any frame other than PRIORITY after
+ * receiving a RST_STREAM MUST treat that as a
+++ /dev/null
-commit 83a5ff403a2cd625832f01032c0feb8bf9c2a89e
-Author: Willy Tarreau <w@1wt.eu>
-Date: Wed Aug 21 14:14:50 2019 +0200
-
- MINOR: lua: export applet and task handlers
-
- The current functions are seen outside from the debugging code and are
- convenient to export so that we can improve the thread dump output :
-
- void hlua_applet_tcp_fct(struct appctx *ctx);
- void hlua_applet_http_fct(struct appctx *ctx);
- struct task *hlua_process_task(struct task *task, void *context, unsigned short state);
-
- Of course they are only available when USE_LUA is defined.
-
- (cherry picked from commit 60409db0b1743d670e54244425f6e08c389b7dde)
- [wt: backported to improve troubleshooting when the watchdog fires;
- while in 2.0 we also have hlua_applet_htx_fct(), it's not
- visible outside hlua_applet_http_fct() so we don't care]
- Signed-off-by: Willy Tarreau <w@1wt.eu>
-
-diff --git a/include/proto/hlua.h b/include/proto/hlua.h
-index 7ad5a99e..32468b77 100644
---- a/include/proto/hlua.h
-+++ b/include/proto/hlua.h
-@@ -27,6 +27,9 @@
- void hlua_ctx_destroy(struct hlua *lua);
- void hlua_init();
- int hlua_post_init();
-+void hlua_applet_tcp_fct(struct appctx *ctx);
-+void hlua_applet_http_fct(struct appctx *ctx);
-+struct task *hlua_process_task(struct task *task, void *context, unsigned short state);
-
- #else /* USE_LUA */
-
-diff --git a/src/hlua.c b/src/hlua.c
-index d2708f87..813aa724 100644
---- a/src/hlua.c
-+++ b/src/hlua.c
-@@ -6237,7 +6237,7 @@ __LJMP static int hlua_set_nice(lua_State *L)
- * Task wrapper are longjmp safe because the only one Lua code
- * executed is the safe hlua_ctx_resume();
- */
--static struct task *hlua_process_task(struct task *task, void *context, unsigned short state)
-+struct task *hlua_process_task(struct task *task, void *context, unsigned short state)
- {
- struct hlua *hlua = context;
- enum hlua_exec status;
-@@ -7045,7 +7045,7 @@ static int hlua_applet_tcp_init(struct appctx *ctx, struct proxy *px, struct str
- return 1;
- }
-
--static void hlua_applet_tcp_fct(struct appctx *ctx)
-+void hlua_applet_tcp_fct(struct appctx *ctx)
- {
- struct stream_interface *si = ctx->owner;
- struct stream *strm = si_strm(si);
-@@ -7417,7 +7417,7 @@ static void hlua_applet_htx_fct(struct appctx *ctx)
- goto done;
- }
-
--static void hlua_applet_http_fct(struct appctx *ctx)
-+void hlua_applet_http_fct(struct appctx *ctx)
- {
- struct stream_interface *si = ctx->owner;
- struct stream *strm = si_strm(si);
--- /dev/null
+commit b143711afe833f9824a7372b88ef9435ff240e9a
+Author: Willy Tarreau <w@1wt.eu>
+Date: Tue Sep 3 18:55:02 2019 +0200
+
+ BUG/MEDIUM: check/threads: make external checks run exclusively on thread 1
+
+ See GH issues #141 for all the context. In short, registered signal
+ handlers are not inherited by other threads during startup, which is
+ normally not a problem, except that we need that the same thread as
+ the one doing the fork() cleans up the old process using waitpid()
+ once its death is reported via SIGCHLD, as happens in external checks.
+
+ The only simple solution to this at the moment is to make sure that
+ external checks are exclusively run on the first thread, the one
+ which registered the signal handlers on startup. It will be far more
+ than enough anyway given that external checks must not require to be
+ load balanced on multiple threads! A more complex solution could be
+ designed over the long term to let each thread deal with all signals
+ but it sounds overkill.
+
+ This must be backported as far as 1.8.
+
+ (cherry picked from commit 6dd4ac890b5810b0f0fe81725fda05ad3d052849)
+ Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
+
+diff --git a/src/checks.c b/src/checks.c
+index 7b55abda..b088da2e 100644
+--- a/src/checks.c
++++ b/src/checks.c
+@@ -2175,7 +2175,7 @@ static struct task *process_chk_proc(struct task *t, void *context, unsigned sho
+ /* a success was detected */
+ check_notify_success(check);
+ }
+- task_set_affinity(t, MAX_THREADS_MASK);
++ task_set_affinity(t, 1);
+ check->state &= ~CHK_ST_INPROGRESS;
+
+ pid_list_del(check->curpid);
+@@ -2423,8 +2423,13 @@ static int start_check_task(struct check *check, int mininter,
+ int nbcheck, int srvpos)
+ {
+ struct task *t;
++ unsigned long thread_mask = MAX_THREADS_MASK;
++
++ if (check->type == PR_O2_EXT_CHK)
++ thread_mask = 1;
++
+ /* task for the check */
+- if ((t = task_new(MAX_THREADS_MASK)) == NULL) {
++ if ((t = task_new(thread_mask)) == NULL) {
+ ha_alert("Starting [%s:%s] check: out of memory.\n",
+ check->server->proxy->id, check->server->id);
+ return 0;
+++ /dev/null
-commit 4856b36cba80a259a78645753520323caca78d0f
-Author: Willy Tarreau <w@1wt.eu>
-Date: Wed Aug 21 14:16:02 2019 +0200
-
- MEDIUM: debug: make the thread dump code show Lua backtraces
-
- When we dump a thread's state (show thread, panic) we don't know if
- anything is happening in Lua, which can be problematic especially when
- calling external functions. With this patch, the thread dump code can
- now detect if we're running in a global Lua task (hlua_process_task),
- or in a TCP or HTTP Lua service (task_run_applet and applet.fct ==
- hlua_applet_tcp_fct or http_applet_http_fct), or a fetch/converter
- from an analyser (s->hlua != NULL). In such situations, it's able to
- append a formatted Lua backtrace of the Lua execution path with
- function names, file names and line numbers.
-
- Note that a shorter alternative could be to call "luaL_where(hlua->T,0)"
- which only prints the current location, but it's not necessarily sufficient
- for complex code.
-
- (cherry picked from commit 78a7cb648ca33823c06430cedc6859ea7e7cd5df)
- [wt: backported to improve troubleshooting when the watchdog fires]
- Signed-off-by: Willy Tarreau <w@1wt.eu>
-
-diff --git a/src/debug.c b/src/debug.c
-index 36cc9e71..79bea884 100644
---- a/src/debug.c
-+++ b/src/debug.c
-@@ -26,6 +26,7 @@
-
- #include <proto/cli.h>
- #include <proto/fd.h>
-+#include <proto/hlua.h>
- #include <proto/stream_interface.h>
- #include <proto/task.h>
-
-@@ -91,6 +92,7 @@ void ha_task_dump(struct buffer *buf, const struct task *task, const char *pfx)
- {
- const struct stream *s = NULL;
- const struct appctx __maybe_unused *appctx = NULL;
-+ struct hlua __maybe_unused *hlua = NULL;
-
- if (!task) {
- chunk_appendf(buf, "0\n");
-@@ -117,6 +119,9 @@ void ha_task_dump(struct buffer *buf, const struct task *task, const char *pfx)
- task->process == process_stream ? "process_stream" :
- task->process == task_run_applet ? "task_run_applet" :
- task->process == si_cs_io_cb ? "si_cs_io_cb" :
-+#ifdef USE_LUA
-+ task->process == hlua_process_task ? "hlua_process_task" :
-+#endif
- "?",
- task->context);
-
-@@ -134,6 +139,30 @@ void ha_task_dump(struct buffer *buf, const struct task *task, const char *pfx)
-
- if (s)
- stream_dump(buf, s, pfx, '\n');
-+
-+#ifdef USE_LUA
-+ hlua = NULL;
-+ if (s && (hlua = s->hlua)) {
-+ chunk_appendf(buf, "%sCurrent executing Lua from a stream analyser -- ", pfx);
-+ }
-+ else if (task->process == hlua_process_task && (hlua = task->context)) {
-+ chunk_appendf(buf, "%sCurrent executing a Lua task -- ", pfx);
-+ }
-+ else if (task->process == task_run_applet && (appctx = task->context) &&
-+ (appctx->applet->fct == hlua_applet_tcp_fct && (hlua = appctx->ctx.hlua_apptcp.hlua))) {
-+ chunk_appendf(buf, "%sCurrent executing a Lua TCP service -- ", pfx);
-+ }
-+ else if (task->process == task_run_applet && (appctx = task->context) &&
-+ (appctx->applet->fct == hlua_applet_http_fct && (hlua = appctx->ctx.hlua_apphttp.hlua))) {
-+ chunk_appendf(buf, "%sCurrent executing a Lua HTTP service -- ", pfx);
-+ }
-+
-+ if (hlua) {
-+ luaL_traceback(hlua->T, hlua->T, NULL, 0);
-+ if (!append_prefixed_str(buf, lua_tostring(hlua->T, -1), pfx, '\n', 1))
-+ b_putchr(buf, '\n');
-+ }
-+#endif
- }
-
- /* This function dumps all profiling settings. It returns 0 if the output
+++ /dev/null
-commit 9a408abbb8559df5718bc696bd9c3934c6500d63
-Author: Willy Tarreau <w@1wt.eu>
-Date: Fri Aug 23 08:11:36 2019 +0200
-
- BUG/MEDIUM: mux-h1: do not truncate trailing 0CRLF on buffer boundary
-
- The H1 message parser calls the various message block parsers with an
- offset indicating where in the buffer to start from, and only consumes
- the data at the end of the parsing. The headers and trailers parsers
- have a condition detecting if a headers or trailers block is too large
- to fit into the buffer. This is detected by an incomplete block while
- the buffer is full. Unfortunately it doesn't take into account the fact
- that the block may be parsed after other blocks that are still present
- in the buffer, resulting in aborting some transfers early as reported
- in issue #231. This typically happens if a trailers block is incomplete
- at the end of a buffer full of data, which typically happens with data
- sizes multiple of the buffer size minus less than the trailers block
- size. It also happens with the CRLF that follows the 0-sized chunk of
- any transfer-encoded contents is itself on such a boundary since this
- CRLF is technically part of the trailers block. This can be reproduced
- by asking a server to retrieve exactly 31532 or 31533 bytes of static
- data using chunked encoding with curl, which reports:
-
- transfer closed with outstanding read data remaining
-
- This issue was revealed in 2.0 and does not affect 1.9 because in 1.9
- the trailers block was processed at once as part of the data block
- processing, and would simply give up and wait for the rest of the data
- to arrive.
-
- It's interesting to note that the headers block parsing is also affected
- by this issue but in practice it has a much more limited impact since a
- headers block is normally only parsed at the beginning of a buffer. The
- only case where it seems to matter is when dealing with a response buffer
- full of 100-continue header blocks followed by a regular header block,
- which will then be rejected for the same reason.
-
- This fix must be backported to 2.0 and partially to 1.9 (the headers
- block part).
-
- (cherry picked from commit 347f464d4e5a8a2bf3acd2411a6c8228e605e7f6)
- Signed-off-by: Willy Tarreau <w@1wt.eu>
-
-diff --git a/src/mux_h1.c b/src/mux_h1.c
-index fa694c41..01f225a2 100644
---- a/src/mux_h1.c
-+++ b/src/mux_h1.c
-@@ -995,10 +995,11 @@ static size_t h1_process_headers(struct h1s *h1s, struct h1m *h1m, struct htx *h
- ret = h1_headers_to_hdr_list(b_peek(buf, *ofs), b_tail(buf),
- hdrs, sizeof(hdrs)/sizeof(hdrs[0]), h1m, &h1sl);
- if (ret <= 0) {
-- /* Incomplete or invalid message. If the buffer is full, it's an
-- * error because headers are too large to be handled by the
-- * parser. */
-- if (ret < 0 || (!ret && !buf_room_for_htx_data(buf)))
-+ /* Incomplete or invalid message. If the input buffer only
-+ * contains headers and is full, which is detected by it being
-+ * full and the offset to be zero, it's an error because
-+ * headers are too large to be handled by the parser. */
-+ if (ret < 0 || (!ret && !*ofs && !buf_room_for_htx_data(buf)))
- goto error;
- goto end;
- }
-@@ -1339,10 +1340,11 @@ static size_t h1_process_trailers(struct h1s *h1s, struct h1m *h1m, struct htx *
- ret = h1_headers_to_hdr_list(b_peek(buf, *ofs), b_tail(buf),
- hdrs, sizeof(hdrs)/sizeof(hdrs[0]), &tlr_h1m, NULL);
- if (ret <= 0) {
-- /* Incomplete or invalid trailers. If the buffer is full, it's
-- * an error because traliers are too large to be handled by the
-- * parser. */
-- if (ret < 0 || (!ret && !buf_room_for_htx_data(buf)))
-+ /* Incomplete or invalid trailers. If the input buffer only
-+ * contains trailers and is full, which is detected by it being
-+ * full and the offset to be zero, it's an error because
-+ * trailers are too large to be handled by the parser. */
-+ if (ret < 0 || (!ret && !*ofs && !buf_room_for_htx_data(buf)))
- goto error;
- goto end;
- }
--- /dev/null
+commit cfdef9f428869f1570d51a5bd8975d8a42f31eab
+Author: Christopher Faulet <cfaulet@haproxy.com>
+Date: Mon Sep 23 15:57:29 2019 +0200
+
+ BUG/MINOR: stream-int: Process connection/CS errors first in si_cs_send()
+
+ Errors on the connections or the conn-stream must always be processed in
+ si_cs_send(), even if the stream-interface is already subscribed on
+ sending. This patch does not fix any concrete bug per-se. But it is required by
+ the following one to handle those errors during synchronous sends.
+
+ This patch must be backported with the following one to 2.0 and probably to 1.9
+ too, but with caution because the code is really different.
+
+ (cherry picked from commit 328ed220a8c5211aa8b6f37b982f319cf6b3f3d1)
+ Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
+
+diff --git a/src/stream_interface.c b/src/stream_interface.c
+index 7d89cc90..4130444e 100644
+--- a/src/stream_interface.c
++++ b/src/stream_interface.c
+@@ -652,10 +652,6 @@ int si_cs_send(struct conn_stream *cs)
+ int ret;
+ int did_send = 0;
+
+- /* We're already waiting to be able to send, give up */
+- if (si->wait_event.events & SUB_RETRY_SEND)
+- return 0;
+-
+ if (conn->flags & CO_FL_ERROR || cs->flags & (CS_FL_ERROR|CS_FL_ERR_PENDING)) {
+ /* We're probably there because the tasklet was woken up,
+ * but process_stream() ran before, detected there were an
+@@ -669,6 +665,10 @@ int si_cs_send(struct conn_stream *cs)
+ return 1;
+ }
+
++ /* We're already waiting to be able to send, give up */
++ if (si->wait_event.events & SUB_RETRY_SEND)
++ return 0;
++
+ /* we might have been called just after an asynchronous shutw */
+ if (conn->flags & CO_FL_SOCK_WR_SH || oc->flags & CF_SHUTW)
+ return 1;
+++ /dev/null
-commit 620381599324e15403002270637a3b677c3fe7e5
-Author: Willy Tarreau <w@1wt.eu>
-Date: Fri Aug 23 09:29:29 2019 +0200
-
- BUG/MEDIUM: mux-h1: do not report errors on transfers ending on buffer full
-
- If a receipt ends with the HTX buffer full and everything is completed except
- appending the HTX EOM block, we end up detecting an error because the H1
- parser did not switch to H1_MSG_DONE yet while all conditions for an end of
- stream and end of buffer are met. This can be detected by retrieving 31532
- or 31533 chunk-encoded bytes over H1 and seeing haproxy log "SD--" at the
- end of a successful transfer.
-
- Ideally the EOM part should be totally independent on the H1 message state
- since the block was really parsed and finished. So we should switch to a
- last state requiring to send only EOM. However this needs a few risky
- changes. This patch aims for simplicity and backport safety, thus it only
- adds a flag to the H1 stream indicating that an EOM is still needed, and
- excludes this condition from the ones used to detect end of processing. A
- cleaner approach needs to be studied, either by adding a state before DONE
- or by setting DONE once the various blocks are parsed and before trying to
- send EOM.
-
- This fix must be backported to 2.0. The issue does not seem to affect 1.9
- though it is not yet known why, probably that it is related to the different
- encoding of trailers which always leaves a bit of room to let EOM be stored.
-
- (cherry picked from commit 0bb5a5c4b5ad375b1254c2e8bec2dd5ea85d6ebb)
- Signed-off-by: Willy Tarreau <w@1wt.eu>
-
-diff --git a/src/mux_h1.c b/src/mux_h1.c
-index 01f225a2..b9a37ce5 100644
---- a/src/mux_h1.c
-+++ b/src/mux_h1.c
-@@ -67,7 +67,8 @@
- #define H1S_F_BUF_FLUSH 0x00000100 /* Flush input buffer and don't read more data */
- #define H1S_F_SPLICED_DATA 0x00000200 /* Set when the kernel splicing is in used */
- #define H1S_F_HAVE_I_TLR 0x00000800 /* Set during input process to know the trailers were processed */
--/* 0x00001000 .. 0x00002000 unused */
-+#define H1S_F_APPEND_EOM 0x00001000 /* Send EOM to the HTX buffer */
-+/* 0x00002000 .. 0x00002000 unused */
- #define H1S_F_HAVE_O_CONN 0x00004000 /* Set during output process to know connection mode was processed */
-
- /* H1 connection descriptor */
-@@ -954,9 +955,12 @@ static size_t h1_eval_htx_res_size(struct h1m *h1m, union h1_sl *h1sl, struct ht
- */
- static size_t h1_process_eom(struct h1s *h1s, struct h1m *h1m, struct htx *htx, size_t max)
- {
-- if (max < sizeof(struct htx_blk) + 1 || !htx_add_endof(htx, HTX_BLK_EOM))
-+ if (max < sizeof(struct htx_blk) + 1 || !htx_add_endof(htx, HTX_BLK_EOM)) {
-+ h1s->flags |= H1S_F_APPEND_EOM;
- return 0;
-+ }
-
-+ h1s->flags &= ~H1S_F_APPEND_EOM;
- h1m->state = H1_MSG_DONE;
- h1s->cs->flags |= CS_FL_EOI;
- return (sizeof(struct htx_blk) + 1);
-@@ -1472,7 +1476,8 @@ static size_t h1_process_input(struct h1c *h1c, struct buffer *buf, size_t count
- else if (h1s_data_pending(h1s) && !htx_is_empty(htx))
- h1s->cs->flags |= CS_FL_RCV_MORE | CS_FL_WANT_ROOM;
-
-- if ((h1s->flags & H1S_F_REOS) && (!h1s_data_pending(h1s) || htx_is_empty(htx))) {
-+ if (((h1s->flags & (H1S_F_REOS|H1S_F_APPEND_EOM)) == H1S_F_REOS) &&
-+ (!h1s_data_pending(h1s) || htx_is_empty(htx))) {
- h1s->cs->flags |= CS_FL_EOS;
- if (h1m->state > H1_MSG_LAST_LF && h1m->state < H1_MSG_DONE)
- h1s->cs->flags |= CS_FL_ERROR;
--- /dev/null
+commit a80c11777b09ea494b5da76a5bcb096851fb6097
+Author: Christopher Faulet <cfaulet@haproxy.com>
+Date: Mon Sep 23 16:11:57 2019 +0200
+
+ BUG/MEDIUM: stream-int: Process connection/CS errors during synchronous sends
+
+ If an error occurred on the connection or the conn-stream, no syncrhonous send
+ is performed. If the error was not already processed and there is no more I/O,
+ it will never be processed and the stream will never be notified of this
+ error. This may block the stream until a timeout is reached or infinitly if
+ there is no timeout.
+
+ Concretly, this bug can be triggered time to time with h2spec, running the test
+ "http2/5.1.1/2".
+
+ This patch depends on the commit 328ed220a "BUG/MINOR: stream-int: Process
+ connection/CS errors first in si_cs_send()". Both must be backported to 2.0 and
+ probably to 1.9. In 1.9, the code is totally different, so this patch would have
+ to be adapted.
+
+ (cherry picked from commit e55a5a41713b629d349ba020183744a38129b892)
+ Signed-off-by: Christopher Faulet <cfaulet@haproxy.com>
+
+diff --git a/src/stream_interface.c b/src/stream_interface.c
+index 4130444e..ef0fea7f 100644
+--- a/src/stream_interface.c
++++ b/src/stream_interface.c
+@@ -922,12 +922,6 @@ void si_sync_send(struct stream_interface *si)
+ if (!cs)
+ return;
+
+- if (cs->flags & (CS_FL_ERROR|CS_FL_ERR_PENDING))
+- return;
+-
+- if (cs->conn->flags & CO_FL_ERROR)
+- return;
+-
+ si_cs_send(cs);
+ }
+
--- /dev/null
+commit 489bbd321e46c110ab9d92fb91725870d7c40491
+Author: Willy Tarreau <w@1wt.eu>
+Date: Tue Sep 24 10:43:03 2019 +0200
+
+ BUG/MEDIUM: checks: make sure the connection is ready before trying to recv
+
+ As identified in issue #278, the backport of commit c594039225 ("BUG/MINOR:
+ checks: do not uselessly poll for reads before the connection is up")
+ introduced a regression in 2.0 when default checks are enabled (not
+ "option tcp-check"), but it did not affect 2.1.
+
+ What happens is that in 2.0 and earlier we have the fd cache which makes
+ a speculative call to the I/O functions after an attempt to connect, and
+ the __event_srv_chk_r() function was absolutely not designed to be called
+ while a connection attempt is still pending. Thus what happens is that the
+ test for success/failure expects the verdict to be final before waking up
+ the check task, and since the connection is not yet validated, it fails.
+ It will usually work over the loopback depending on scheduling, which is
+ why it doesn't fail in reg tests.
+
+ In 2.1 after the failed connect(), we subscribe to polling and usually come
+ back with a validated connection, so the function is not expected to be
+ called before it completes, except if it happens as a side effect of some
+ spurious wake calls, which should not have any effect on such a check.
+
+ The other check types are not impacted by this issue because they all
+ check for a minimum data length in the buffer, and wait for more data
+ until they are satisfied.
+
+ This patch fixes the issue by explicitly checking that the connection
+ is established before trying to read or to give a verdict. This way the
+ function becomes safe to call regardless of the connection status (even
+ if it's still totally ugly).
+
+ This fix must be backported to 2.0.
+
+ (cherry picked from commit 0f0393fc0d2badc5ea329844691f06ba28827f78)
+ Signed-off-by: Willy Tarreau <w@1wt.eu>
+
+diff --git a/src/checks.c b/src/checks.c
+index b088da2e..06f47ad9 100644
+--- a/src/checks.c
++++ b/src/checks.c
+@@ -875,6 +875,9 @@ static void __event_srv_chk_r(struct conn_stream *cs)
+ }
+ }
+
++ /* the rest of the code below expects the connection to be ready! */
++ if (!(conn->flags & CO_FL_CONNECTED) && !done)
++ goto wait_more_data;
+
+ /* Intermediate or complete response received.
+ * Terminate string in b_head(&check->bi) buffer.
+++ /dev/null
-commit 7c80af0fb53f2a1d93a597f7d97cc67996e36be2
-Author: n9@users.noreply.github.com <n9@users.noreply.github.com>
-Date: Fri Aug 23 11:21:05 2019 +0200
-
- DOC: fixed typo in management.txt
-
- replaced fot -> for
- added two periods
-
- (cherry picked from commit 25a1c8e4539c12c19a3fe04aabe563cdac5e36db)
- Signed-off-by: Willy Tarreau <w@1wt.eu>
-
-diff --git a/doc/management.txt b/doc/management.txt
-index 616a040b..ad6011e5 100644
---- a/doc/management.txt
-+++ b/doc/management.txt
-@@ -1549,8 +1549,8 @@ enable agent <backend>/<server>
- level "admin".
-
- enable dynamic-cookie backend <backend>
-- Enable the generation of dynamic cookies fot the backend <backend>
-- A secret key must also be provided
-+ Enable the generation of dynamic cookies for the backend <backend>.
-+ A secret key must also be provided.
-
- enable frontend <frontend>
- Resume a frontend which was temporarily stopped. It is possible that some of
+++ /dev/null
-commit f259fcc00a04e633a7a64f894a719f78f3644867
-Author: Willy Tarreau <w@1wt.eu>
-Date: Mon Aug 26 10:37:39 2019 +0200
-
- BUG/MINOR: mworker: disable SIGPROF on re-exec
-
- If haproxy is built with profiling enabled with -pg, it is possible to
- see the master quit during a reload while it's re-executing itself with
- error code 155 (signal 27) saying "Profile timer expired)". This happens
- if the SIGPROF signal is delivered during the execve() call while the
- handler was already unregistered. The issue itself is not directly inside
- haproxy but it's easy to address. This patch disables this signal before
- calling execvp() during a master reload. A simple test for this consists
- in running this little script with haproxy started in master-worker mode :
-
- $ while usleep 50000; do killall -USR2 haproxy; done
-
- This fix should be backported to all versions using the master-worker
- model.
-
- (cherry picked from commit e0d86e2c1caaaa2141118e3309d479de5f67e855)
- Signed-off-by: Willy Tarreau <w@1wt.eu>
-
-diff --git a/src/haproxy.c b/src/haproxy.c
-index f6f00fc1..c93b0d13 100644
---- a/src/haproxy.c
-+++ b/src/haproxy.c
-@@ -695,6 +695,7 @@ void mworker_reload()
- }
-
- ha_warning("Reexecuting Master process\n");
-+ signal(SIGPROF, SIG_IGN);
- execvp(next_argv[0], next_argv);
-
- ha_warning("Failed to reexecute the master process [%d]: %s\n", pid, strerror(errno));
--- /dev/null
+--- a/Makefile
++++ b/Makefile
+@@ -327,6 +327,15 @@ ifeq ($(TARGET),linux-glibc)
+ USE_GETADDRINFO)
+ endif
+
++# For linux >= 2.6.28 and uclibc
++ifeq ($(TARGET),linux-uclibc)
++ set_target_defaults = $(call default_opts, \
++ USE_POLL USE_TPROXY USE_DL USE_RT USE_NETFILTER \
++ USE_CPU_AFFINITY USE_THREAD USE_EPOLL USE_FUTEX USE_LINUX_TPROXY \
++ USE_ACCEPT4 USE_LINUX_SPLICE USE_PRCTL USE_THREAD_DUMP USE_NS USE_TFO \
++ USE_GETADDRINFO)
++endif
++
+ # Solaris 8 and above
+ ifeq ($(TARGET),solaris)
+ # We also enable getaddrinfo() which works since solaris 8.
+++ /dev/null
-commit b10c8d7641cc8ceae6fba4506b7f987d66109bd9
-Author: Willy Tarreau <w@1wt.eu>
-Date: Mon Aug 26 10:55:52 2019 +0200
-
- BUG/MEDIUM: listener/threads: fix an AB/BA locking issue in delete_listener()
-
- The delete_listener() function takes the listener's lock before taking
- the proto_lock, which is contrary to what other functions do, possibly
- causing an AB/BA deadlock. In practice the two only places where both
- are taken are during protocol_enable_all() and delete_listener(), the
- former being used during startup and the latter during stop. In practice
- during reload floods, it is technically possible for a thread to be
- initializing the listeners while another one is stopping. While this
- is too hard to trigger on 2.0 and above due to the synchronization of
- all threads during startup, it's reasonably easy to do in 1.9 by having
- hundreds of listeners, starting 64 threads and flooding them with reloads
- like this :
-
- $ while usleep 50000; do killall -USR2 haproxy; done
-
- Usually in less than a minute, all threads will be deadlocked. The fix
- consists in always taking the proto_lock before the listener lock. It
- seems to be the only place where these two locks were reversed. This
- fix needs to be backported to 2.0, 1.9, and 1.8.
-
- (cherry picked from commit 6ee9f8df3bfbb811526cff3313da5758b1277bc6)
- Signed-off-by: Willy Tarreau <w@1wt.eu>
-
-diff --git a/src/listener.c b/src/listener.c
-index b5fe2ac2..54c09960 100644
---- a/src/listener.c
-+++ b/src/listener.c
-@@ -595,17 +595,17 @@ int create_listeners(struct bind_conf *bc, const struct sockaddr_storage *ss,
- */
- void delete_listener(struct listener *listener)
- {
-+ HA_SPIN_LOCK(PROTO_LOCK, &proto_lock);
- HA_SPIN_LOCK(LISTENER_LOCK, &listener->lock);
- if (listener->state == LI_ASSIGNED) {
- listener->state = LI_INIT;
-- HA_SPIN_LOCK(PROTO_LOCK, &proto_lock);
- LIST_DEL(&listener->proto_list);
- listener->proto->nb_listeners--;
-- HA_SPIN_UNLOCK(PROTO_LOCK, &proto_lock);
- _HA_ATOMIC_SUB(&jobs, 1);
- _HA_ATOMIC_SUB(&listeners, 1);
- }
- HA_SPIN_UNLOCK(LISTENER_LOCK, &listener->lock);
-+ HA_SPIN_UNLOCK(PROTO_LOCK, &proto_lock);
- }
-
- /* Returns a suitable value for a listener's backlog. It uses the listener's,
--- /dev/null
+--- a/include/common/openssl-compat.h
++++ b/include/common/openssl-compat.h
+@@ -217,7 +217,8 @@ static inline int EVP_PKEY_base_id(EVP_PKEY *pkey)
+ #define TLSEXT_signature_ecdsa 3
+ #endif
+
+-#if (OPENSSL_VERSION_NUMBER < 0x10100000L) || (LIBRESSL_VERSION_NUMBER < 0x20700000L)
++#if (OPENSSL_VERSION_NUMBER < 0x10100000L) || \
++ (defined(LIBRESSL_VERSION_NUMBER) && (LIBRESSL_VERSION_NUMBER < 0x20700000L))
+ #define X509_getm_notBefore X509_get_notBefore
+ #define X509_getm_notAfter X509_get_notAfter
+ #endif
+++ /dev/null
-commit 4db294bc0b7988607f2dfdb9d57974b2ba47cbc3
-Author: Jerome Magnin <jmagnin@haproxy.com>
-Date: Mon Aug 26 11:44:21 2019 +0200
-
- BUG/MEDIUM: url32 does not take the path part into account in the returned hash.
-
- The url32 sample fetch does not take the path part of the URL into
- account. This is because in smp_fetch_url32() we erroneously modify
- path.len and path.ptr before testing their value and building the
- path based part of the hash.
-
- This fixes issue #235
-
- This must be backported as far as 1.9, when HTX was introduced.
-
- (cherry picked from commit 2dd26ca9ff8e642611b8b012d6aee45ea45196bc)
- [wt: adjusted context, we still have legacy in 2.0]
- Signed-off-by: Willy Tarreau <w@1wt.eu>
-
-diff --git a/src/http_fetch.c b/src/http_fetch.c
-index e372a122..6448bde9 100644
---- a/src/http_fetch.c
-+++ b/src/http_fetch.c
-@@ -2735,10 +2735,6 @@ static int smp_fetch_url32(const struct arg *args, struct sample *smp, const cha
- /* now retrieve the path */
- sl = http_get_stline(htx);
- path = http_get_path(htx_sl_req_uri(sl));
-- while (path.len > 0 && *(path.ptr) != '?') {
-- path.ptr++;
-- path.len--;
-- }
- if (path.len && *(path.ptr) == '/') {
- while (path.len--)
- hash = *(path.ptr++) + (hash << 6) + (hash << 16) - hash;
+++ /dev/null
---- a/Makefile
-+++ b/Makefile
-@@ -327,6 +327,15 @@ ifeq ($(TARGET),linux-glibc)
- USE_GETADDRINFO)
- endif
-
-+# For linux >= 2.6.28 and uclibc
-+ifeq ($(TARGET),linux-uclibc)
-+ set_target_defaults = $(call default_opts, \
-+ USE_POLL USE_TPROXY USE_DL USE_RT USE_NETFILTER \
-+ USE_CPU_AFFINITY USE_THREAD USE_EPOLL USE_FUTEX USE_LINUX_TPROXY \
-+ USE_ACCEPT4 USE_LINUX_SPLICE USE_PRCTL USE_THREAD_DUMP USE_NS USE_TFO \
-+ USE_GETADDRINFO)
-+endif
-+
- # Solaris 8 and above
- ifeq ($(TARGET),solaris)
- # We also enable getaddrinfo() which works since solaris 8.
+++ /dev/null
---- a/include/common/openssl-compat.h
-+++ b/include/common/openssl-compat.h
-@@ -217,7 +217,8 @@ static inline int EVP_PKEY_base_id(EVP_PKEY *pkey)
- #define TLSEXT_signature_ecdsa 3
- #endif
-
--#if (OPENSSL_VERSION_NUMBER < 0x10100000L) || (LIBRESSL_VERSION_NUMBER < 0x20700000L)
-+#if (OPENSSL_VERSION_NUMBER < 0x10100000L) || \
-+ (defined(LIBRESSL_VERSION_NUMBER) && (LIBRESSL_VERSION_NUMBER < 0x20700000L))
- #define X509_getm_notBefore X509_get_notBefore
- #define X509_getm_notAfter X509_get_notAfter
- #endif
include $(TOPDIR)/rules.mk
PKG_NAME:=hcxdumptool
-PKG_VERSION:=5.2.0
+PKG_VERSION:=5.2.2
PKG_RELEASE:=1
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
PKG_SOURCE_URL:=https://codeload.github.com/zerbea/hcxdumptool/tar.gz/$(PKG_VERSION)?
-PKG_HASH:=9da9c8c20b93f6a0a262436a862e376bd3cfd05fb879efcf480ad962a14496c7
+PKG_HASH:=b091171fe5e6f926ace3997219dfc5a84ce6d1f2080d3320d456f88282019057
PKG_MAINTAINER:=Andreas Nilsen <adde88@gmail.com>
PKG_LICENSE:=MIT
include $(TOPDIR)/rules.mk
PKG_NAME:=hcxtools
-PKG_VERSION:=5.2.0
+PKG_VERSION:=5.2.2
PKG_RELEASE:=1
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
PKG_SOURCE_URL:=https://codeload.github.com/zerbea/hcxtools/tar.gz/$(PKG_VERSION)?
-PKG_HASH:=1e8120c5451a38645ade0be4255d3c7f4a837b7611b44d4a5a066e563ad8a112
+PKG_HASH:=a2dd9559e1cc541f07f7a4c2451c295896355a94cfc970dc5cdceb40e605ee7e
PKG_MAINTAINER:=Andreas Nilsen <adde88@gmail.com>
PKG_LICENSE:=MIT
include $(TOPDIR)/rules.mk
PKG_NAME:=i2pd
-PKG_VERSION:=2.26.0
+PKG_VERSION:=2.28.0
PKG_RELEASE:=1
PKG_BUILD_PARALLEL:=1
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
PKG_SOURCE_URL:=https://codeload.github.com/PurpleI2P/i2pd/tar.gz/$(PKG_VERSION)?
-PKG_HASH:=2ae18978c8796bb6b45bc8cfe4e1f25377e0cfc9fcf9f46054b09dc3384eef63
+PKG_HASH:=181327edc7cf5b3c25ee51ecc518a4da90fa9e421eca2107996791038a514a21
PKG_MAINTAINER:=David Yang <mmyangfl@gmail.com>
PKG_LICENSE:=BSD-3-Clause
/etc/config/i2pd
/etc/i2pd/i2pd.conf
/etc/i2pd/tunnels.conf
+ /etc/i2pd/tunnels.d/*
endef
TARGET_LDFLAGS+=-latomic
s/datadir = \/var\/lib/datadir = \/etc/ \
' $(1)/etc/i2pd/i2pd.conf
$(INSTALL_CONF) $(PKG_BUILD_DIR)/contrib/tunnels.conf $(1)/etc/i2pd
+ $(INSTALL_DIR) $(1)/etc/i2pd/tunnels.d
+ $(INSTALL_CONF) $(PKG_BUILD_DIR)/contrib/tunnels.d/*.conf $(1)/etc/i2pd/tunnels.d
+ # subscriptions.txt deprecated, see #8088
$(INSTALL_DIR) $(1)/etc/init.d
$(INSTALL_BIN) ./files/i2pd.init $(1)/etc/init.d/i2pd
endef
config i2pd
- # Set where i2pd should store its data (netDB, certificates, addresses, etc)
- # By default we store it in RAM so no data is written to ROM.
- # IMPORTANT!
- # Data is consistently rewritten. DO NOT POINT IT TO INNER ROM. Flash will
- # die.
+ # Set where i2pd should store its data (netDB, certificates, addresses,
+ # etc). By default we store it in RAM so no data is written to ROM.
+ # If you store data permanently, you may want to enable
+ # 'profiles = true' and 'addressbook = true' in i2pd.conf
+ #### IMPORTANT! ####
+ # Data is consistently rewritten. DO NOT POINT IT TO INNER ROM. Flash
+ # will die.
option data_dir '/var/lib/i2pd'
- # If you don't store i2pd data permanently, you can still choose to store only
- # addressbook. If not, i2pd will be forced to do HTTP reseeding reseeding on
- # every start. Storing addressbook may be useful if HTTP reseeding is not
- # possible or blocked (by censorship).
- # Even addressbook doesn't take up too much space, extroot is still strongly
- # recommended to avoid flash wear-out.
+ # If you don't store i2pd data permanently, you can still choose to
+ # store only addressbooks. If not, i2pd will be forced to do HTTP
+ # reseeding on every start. Storing addressbook may be useful
+ # if HTTP reseeding is not possible or blocked (by censorship).
+ # Even addressbook doesn't take up too much space, extroot is still
+ # strongly recommended to avoid flash wear-out.
+ # Enable 'addressbook = true' in i2pd.conf to make it take effect.
#option addressbook_dir '/etc/i2pd/addressbook'
USER="i2pd"
GROUP="i2pd"
PIDFILE=/var/run/i2pd.pid
-#DATADIR=/var/lib/i2pd
+DATADIR=/var/lib/i2pd
CONFFILE=/etc/i2pd/i2pd.conf
. /lib/functions.sh
local data_dir
local addressbook_dir
- config_get data_dir "$cfg" data_dir
+ config_get data_dir "$cfg" data_dir "$DATADIR"
config_get addressbook_dir "$cfg" addressbook_dir
## Setting up data dir
if [ ! -d "$data_dir" ]; then
mkdir -p "$data_dir"
ln -s /usr/share/i2pd/certificates "$data_dir/certificates"
- ln -s /etc/i2pd/tunnels.conf "$data_dir/tunnels.conf"
if [ -n "$addressbook_dir" ]; then
if [ ! -d "$addressbook_dir" ]; then
mkdir -p "$addressbook_dir"
procd_set_param command "$PROG" --service --conf="$CONFFILE" --pidfile "$PIDFILE"
## Don't know about i2pd user's HOME
procd_set_param env "HOME=$DATADIR"
+ ## For debugging, provide about 512 MB on external storage for coredump and adjust /proc/sys/kernel/core_pattern
+ # echo "/tmp/coredumps/core.%e.%p.%s.%t" > /proc/sys/kernel/core_pattern
+ # procd_set_param limits core="unlimited unlimited"
procd_set_param limits nofile=4096
procd_set_param stdout 1
procd_set_param stderr 1
--- /dev/null
+diff -u -r a/contrib/i2pd.conf b/contrib/i2pd.conf
+--- a/contrib/i2pd.conf 2019-09-04 11:00:27.660713504 +0800
++++ b/contrib/i2pd.conf 2019-09-04 11:07:53.669745273 +0800
+@@ -8,12 +8,12 @@
+
+ ## Tunnels config file
+ ## Default: ~/.i2pd/tunnels.conf or /var/lib/i2pd/tunnels.conf
+-# tunconf = /var/lib/i2pd/tunnels.conf
++tunconf = /etc/i2pd/tunnels.conf
+
+ ## Tunnels config files path
+ ## Use that path to store separated tunnels in different config files.
+ ## Default: ~/.i2pd/tunnels.d or /var/lib/i2pd/tunnels.d
+-# tunnelsdir = /var/lib/i2pd/tunnels.conf.d
++tunnelsdir = /etc/i2pd/tunnels.conf.d
+
+ ## Where to write pidfile (don't write by default)
+ # pidfile = /var/run/i2pd.pid
+@@ -25,12 +25,12 @@
+ ## * stdout - print log entries to stdout
+ ## * file - log entries to a file
+ ## * syslog - use syslog, see man 3 syslog
+-# log = file
++log = syslog
+ ## Path to logfile (default - autodetect)
+ # logfile = /var/log/i2pd.log
+ ## Log messages above this level (debug, *info, warn, error, none)
+ ## If you set it to none, logging will be disabled
+-# loglevel = info
++loglevel = error
+ ## Write full CLF-formatted date and time to log (default: write only time)
+ # logclftime = true
+
+@@ -61,7 +61,7 @@
+ # ifname6 =
+
+ ## Enable NTCP transport (default = true)
+-# ntcp = true
++ntcp = false
+ ## If you run i2pd behind a proxy server, you can only use NTCP transport with ntcpproxy option
+ ## Should be http://address:port or socks://address:port
+ # ntcpproxy = http://127.0.0.1:8118
+@@ -81,7 +81,7 @@
+
+ ## Router will not accept transit tunnels, disabling transit traffic completely
+ ## (default = false)
+-# notransit = true
++notransit = true
+
+ ## Router will be floodfill
+ # floodfill = true
+@@ -91,8 +91,10 @@
+ ## Uncomment and set to 'false' to disable Web Console
+ # enabled = true
+ ## Address and port service will listen on
+-address = 127.0.0.1
++address = 192.168.1.1
++# address = ::
+ port = 7070
++# strictheaders = false
+ ## Path to web console, default "/"
+ # webroot = /
+ ## Uncomment following lines to enable Web Console authentication
+@@ -104,7 +106,7 @@
+ ## Uncomment and set to 'false' to disable HTTP Proxy
+ # enabled = true
+ ## Address and port service will listen on
+-address = 127.0.0.1
++address = 192.168.1.1
+ port = 4444
+ ## Optional keys file for proxy local destination
+ # keys = http-proxy-keys.dat
+@@ -118,7 +120,7 @@
+ ## Uncomment and set to 'false' to disable SOCKS Proxy
+ # enabled = true
+ ## Address and port service will listen on
+-address = 127.0.0.1
++address = 192.168.1.1
+ port = 4447
+ ## Optional keys file for proxy local destination
+ # keys = socks-proxy-keys.dat
+@@ -228,4 +230,5 @@
+
+ [persist]
+ ## Save peer profiles on disk (default: true)
+-# profiles = true
++profiles = false
++addressbook = false
PKG_NAME:=isc-dhcp
UPSTREAM_NAME:=dhcp
PKG_VERSION:=4.4.1
-PKG_RELEASE:=4
+PKG_RELEASE:=5
PKG_LICENSE:=BSD-3-Clause
PKG_LICENSE_FILES:=LICENSE
endef
define Package/isc-dhcp-relay-ipv4/description
-$(call Package/isc-dhcp-relay-ipv6/description)
+$(call Package/isc-dhcp-relay/description)
This package is compiled with IPv4 support only.
endef
include $(TOPDIR)/rules.mk
PKG_NAME:=knot
-PKG_VERSION:=2.8.3
+PKG_VERSION:=2.8.4
PKG_RELEASE:=1
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.xz
PKG_SOURCE_URL:=https://secure.nic.cz/files/knot-dns/
-PKG_HASH:=8a62d81e5cf3df938f469b60ed4e46d9161007c2b89fbf7ae07525fa68368bad
+PKG_HASH:=541e7e43503765c91405c5797b3838103bb656154712e69b3f959c6ab0e700a9
PKG_MAINTAINER:=Daniel Salzman <daniel.salzman@nic.cz>
PKG_LICENSE:=GPL-3.0 LGPL-2.0 0BSD BSD-3-Clause OLDAP-2.8
+PKG_CPE_ID:=cpe:/a:knot-dns:knot_dns
PKG_BUILD_DIR:=$(BUILD_DIR)/$(PKG_NAME)-$(PKG_VERSION)
PKG_FIXUP:=autoreconf
include $(TOPDIR)/rules.mk
PKG_NAME:=mosquitto
-PKG_VERSION:=1.6.4
+PKG_VERSION:=1.6.7
PKG_RELEASE:=1
PKG_LICENSE:=BSD-3-Clause
PKG_LICENSE_FILES:=LICENSE.txt
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
PKG_SOURCE_URL:=https://mosquitto.org/files/source/
-PKG_HASH:=a3d5822c249f6a6e13311b1b09eff6807ea01608a5a77934e1769842e9d146ef
+PKG_HASH:=bcd31a8fbbd053fee328986fadd8666d3058357ded56b9782f7d4f19931d178e
PKG_BUILD_DIR:=$(BUILD_DIR)/$(PKG_NAME)-$(BUILD_VARIANT)/$(PKG_NAME)-$(PKG_VERSION)
include $(INCLUDE_DIR)/package.mk
PKG_HASH:=b55af0bbdf5acc02d1eb6ab18da2acd77a400bafd074489003f3df09676332bb
PKG_MAINTAINER:=Adam Gensler <openwrt@a.gnslr.us>
PKG_LICENSE:=GPL-2.0
+PKG_CPE_ID:=cpe:/a:netcat:netcat
PKG_INSTALL:=1
include $(INCLUDE_DIR)/package.mk
PKG_NAME:=ola
PKG_VERSION:=0.10.7
-PKG_RELEASE:=4
+PKG_RELEASE:=5
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
PKG_SOURCE_URL:=https://codeload.github.com/OpenLightingProject/ola/tar.gz/$(PKG_VERSION)?
PKG_HASH:=95b5e99775f700a9cdf6e2219eccfc8c037fa8841f87cb00344359c4b8c7bf26
+PKG_MAINTAINER:=Christian Beier <dontmind@freeshell.org>
PKG_LICENSE:=LGPL-2.1-or-later
+PKG_LICENSE_FILES:=COPYING
-PKG_FIXUP:=libtool
-
+PKG_FIXUP:=autoreconf
PKG_INSTALL:=1
PKG_BUILD_PARALLEL:=1
PKG_USE_MIPS16:=0
CATEGORY:=Network
TITLE:=Open Lighting Architecture Daemon
URL:=https://www.openlighting.org/
- MAINTAINER:=Christian Beier <dontmind@freeshell.org>
DEPENDS:=+protobuf +libftdi1 +libusb-1.0 +libuuid +libstdcpp +libpthread +librt +zlib +libncurses +liblo +libmicrohttpd
endef
--disable-fatal-warnings \
--disable-unittests \
--without-dns-sd \
- --with-ola-protoc-plugin=$(STAGING_DIR_HOSTPKG)/bin/ola_protoc_plugin
+ --with-ola-protoc-plugin="$(STAGING_DIR_HOSTPKG)/bin/ola_protoc_plugin"
HOST_CONFIGURE_ARGS += \
--disable-all-plugins \
PKG_NAME:=openconnect
PKG_VERSION:=8.04
-PKG_RELEASE:=1
+PKG_RELEASE:=2
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
PKG_SOURCE_URL:=ftp://ftp.infradead.org/pub/openconnect/
# Juniper vpn support
#option juniper '1'
+ # Authentication form responses
+ #list form_entry FORM:OPT=VAL
+
The additional files are also used:
/etc/openconnect/user-cert-vpn-MYVPN.pem: The user certificate
/etc/openconnect/user-key-vpn-MYVPN.pem: The user private key
proto_config_add_string "token_script"
proto_config_add_string "os"
proto_config_add_string "csd_wrapper"
+ proto_config_add_array 'form_entry:regex("[^:]+:[^=]+=.*")'
no_device=1
available=1
}
+proto_openconnect_add_form_entry() {
+ [ -n "$1" ] && append cmdline "--form-entry $1"
+}
+
proto_openconnect_setup() {
local config="$1"
- json_get_vars server port interface username serverhash authgroup password password2 token_mode token_secret token_script os csd_wrapper mtu juniper
+ json_get_vars server port interface username serverhash authgroup password password2 token_mode token_secret token_script os csd_wrapper mtu juniper form_entry
grep -q tun /proc/modules || insmod tun
ifname="vpn-$config"
[ -n "$os" ] && append cmdline "--os=$os"
[ -n "$csd_wrapper" ] && [ -x "$csd_wrapper" ] && append cmdline "--csd-wrapper=$csd_wrapper"
+ json_for_each_item proto_openconnect_add_form_entry form_entry
+
proto_export INTERFACE="$config"
logger -t openconnect "executing 'openconnect $cmdline'"
--- /dev/null
+#
+# Copyright (C) 2019 Daniel Bermond
+#
+# This is free software, licensed under the GNU General Public License v2.
+# See /LICENSE for more information.
+#
+
+include $(TOPDIR)/rules.mk
+
+PKG_NAME:=proxychains-ng
+PKG_VERSION:=4.14
+PKG_RELEASE:=1
+
+PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
+PKG_SOURCE_URL:=https://codeload.github.com/rofl0r/proxychains-ng/tar.gz/v$(PKG_VERSION)?
+PKG_HASH:=ab31626af7177cc2669433bb244b99a8f98c08031498233bb3df3bcc9711a9cc
+
+PKG_MAINTAINER:=Daniel Bermond <dbermond@archlinux.org>
+PKG_LICENSE:=GPL-2.0-or-later
+PKG_LICENSE_FILES:=COPYING
+
+PKG_BUILD_PARALLEL:=1
+PKG_INSTALL:=1
+
+include $(INCLUDE_DIR)/package.mk
+
+define Package/proxychains-ng
+ SECTION:=net
+ CATEGORY:=Network
+ TITLE:=Redirect TCP traffic to a HTTP or SOCKS proxy
+ URL:=https://github.com/rofl0r/proxychains/
+endef
+
+define Package/proxychains-ng/description
+ A hook preloader that allows to redirect TCP traffic of
+ existing dynamically linked programs through one or more
+ SOCKS or HTTP proxies.
+endef
+
+define Build/Compile
+ $(call Build/Compile/Default,all)
+endef
+
+define Build/Install
+ $(call Build/Install/Default,install install-config)
+endef
+
+define Package/proxychains-ng/conffiles
+/etc/proxychains.conf
+endef
+
+define Package/proxychains-ng/install
+ # binary executable
+ $(INSTALL_DIR) $(1)/usr/bin
+ $(INSTALL_BIN) $(PKG_INSTALL_DIR)/usr/bin/proxychains4 $(1)/usr/bin/
+ $(LN) proxychains4 $(1)/usr/bin/proxychains
+
+ # library
+ $(INSTALL_DIR) $(1)/usr/lib
+ $(INSTALL_DATA) $(PKG_INSTALL_DIR)/usr/lib/libproxychains4.so $(1)/usr/lib/
+
+ # config
+ $(INSTALL_DIR) $(1)/etc
+ $(INSTALL_DATA) $(PKG_INSTALL_DIR)/etc/proxychains.conf $(1)/etc/
+endef
+
+$(eval $(call BuildPackage,proxychains-ng))
--- a/core/Makefile
+++ b/core/Makefile
-@@ -49,7 +49,7 @@ irssi/network-openssl.o: CFLAGS:=$(IRSSI_CFLAGS)
+@@ -49,7 +49,7 @@ irssi/network-openssl.o: CFLAGS:=$(IRSSI
quasselc-connector.o: CFLAGS:=$(CFLAGS)
$(TARGET): $(OBJECTS)
+ LDFLAGS += $(shell pkg-config --libs quasselc)
endif
- CFLAGS+=-std=gnu11 -Wall -Wextra -Werror -g $(IRSSI_CFLAGS) $(QUASSELC_FLAGS) -Wmissing-prototypes -Wmissing-declarations
+ CFLAGS+=-std=gnu11 -Wall -Wextra -g $(IRSSI_CFLAGS) $(QUASSELC_FLAGS) -Wmissing-prototypes -Wmissing-declarations
core/quassel-net.c | 64 ++++++++++++++++++++++++++++++++++++++--------
2 files changed, 53 insertions(+), 10 deletions(-)
-diff --git a/core/Makefile b/core/Makefile
-index c1c65fc..987bd7b 100644
--- a/core/Makefile
+++ b/core/Makefile
-@@ -16,7 +16,6 @@ SSL_CFLAGS=$(shell pkg-config --cflags openssl)
+@@ -16,7 +16,6 @@ SSL_CFLAGS=$(shell pkg-config --cflags o
SSL_LDLAGS=$(shell pkg-config --libs openssl)
OBJECTS:=quasselc-connector.o quassel-core.o
OBJECTS+=quassel-net.o quassel-msgs.o quassel-cmds.o
OBJECTS+=quassel-fe-window.o quassel-fe-level.o quassel-cfg.o
LDFLAGS ?=
-diff --git a/core/quassel-net.c b/core/quassel-net.c
-index 8a6eb55..5db7fe0 100644
--- a/core/quassel-net.c
+++ b/core/quassel-net.c
-@@ -132,10 +132,10 @@ static SERVER_REC* quassel_server_init_connect(SERVER_CONNECT_REC* conn) {
+@@ -132,10 +132,10 @@ static SERVER_REC* quassel_server_init_c
ret->got = 0;
server_connect_ref(SERVER_CONNECT(conn));
ret->channels_join = quassel_irssi_channels_join;
ret->send_message = quassel_irssi_send_message;
-@@ -161,12 +161,59 @@ void quassel_net_init(CHAT_PROTOCOL_REC* rec) {
+@@ -161,12 +161,59 @@ void quassel_net_init(CHAT_PROTOCOL_REC*
signal_add_first("server connected", (SIGNAL_FUNC) sig_connected);
}
}
void quassel_irssi_init_nack(void *arg) {
---
-2.17.1
-
include $(TOPDIR)/rules.mk
PKG_NAME:=simple-adblock
-PKG_VERSION:=1.8.0
-PKG_RELEASE:=4
+PKG_VERSION:=1.8.1
+PKG_RELEASE:=7
PKG_MAINTAINER:=Stan Grishin <stangri@melmac.net>
PKG_LICENSE:=GPL-3.0-or-later
## Features
- Super-fast due to the nature of supported block lists and parallel downloading/processing of the blacklists.
-- Supports both hosts files and domains lists for blocking (to keep it lean and fast).
+- Supports hosts files and domains lists for blocking.
- Everything is configurable from Web UI.
- Allows you to easily add your own domains to whitelist or blacklist.
-- Allows you to easily add URLs to your own blocked hosts or domains lists to block/whitelist (just put whitelisted domains one per line).
-- Requires no configuration for the download utility wherever you want to use wget/libopenssl or uclient-fetch/libustream-mbedtls.
-- Installs dependencies automatically.
-- Doesn't stay in memory -- creates the list of blocked domains and then uses DNSMASQ/Unbound and firewall rules to serve NXDOMAIN or 127.0.0.1 (depending on settings) reply for blocked domains.
+- Allows you to easily add URLs to your own blocked hosts or domains lists to block/whitelist (just put whitelisted domains one per line in the file you're linking).
+- Supports multiple modes of AdBlocking implementations with DNSMASQ and Unbound.
+- Doesn't stay in memory -- creates the list of blocked domains and then uses DNSMASQ/Unbound and firewall rules to serve NXDOMAIN or 127.0.0.1 reply or to reject access (depending on settings) for blocked domains.
- As some of the default lists are using https, reliably works with either wget/libopenssl, uclient-fetch/libustream-mbedtls or curl.
- Very lightweight and easily hackable, the whole script is just one ```/etc/init.d/simple-adblock``` file.
- Retains the downloaded/sorted AdBlocking list on service stop and reuses it on service start (use ```dl``` command if you want to force re-download of the list).
+- Has an option to store a compressed copy of the AdBlocking list in persistent memory which survives reboots.
- Blocks ads served over https (unlike PixelServ-derived solutions).
+- Blocks ads inside browsers with [DNS-over-HTTPS proxy](https://en.wikipedia.org/wiki/DNS_over_HTTPS) built-in, like [Mozilla Firefox](https://support.mozilla.org/en-US/kb/firefox-dns-over-https#w_about-dns-over-https) or [Google Chrome/Chromium](https://blog.chromium.org/2019/09/experimenting-with-same-provider-dns.html) -- with the ```dnsmasq.ipset``` option.
- Proudly made in Canada, using locally-sourced electrons.
If you want a more robust AdBlocking, supporting free memory detection and complex block lists, supporting IDN, check out [net/adblock](https://github.com/openwrt/packages/tree/master/net/adblock/files).
-## Screenshot (luci-app-simple-adblock)
+## Screenshots (luci-app-simple-adblock)
-![screenshot](https://raw.githubusercontent.com/stangri/openwrt_packages/master/screenshots/simple-adblock/screenshot07.png "screenshot")
+Service Status
+
+![screenshot](https://raw.githubusercontent.com/stangri/openwrt_packages/master/screenshots/simple-adblock/screenshot08-status.png "Service Status")
+
+Configuration - Basic Configuration
+
+![screenshot](https://raw.githubusercontent.com/stangri/openwrt_packages/master/screenshots/simple-adblock/screenshot08-config-basic.png "Configuration - Basic Configuration")
+
+Configuration - Advanced Configuration
+
+![screenshot](https://raw.githubusercontent.com/stangri/openwrt_packages/master/screenshots/simple-adblock/screenshot08-config-advanced.png "Configuration - Advanced Configuration")
+
+Whitelist and Blocklist Management
+
+![screenshot](https://raw.githubusercontent.com/stangri/openwrt_packages/master/screenshots/simple-adblock/screenshot08-lists.png "Whitelist and Blocklist Management")
## Requirements
To satisfy the requirements for connect to your router via ssh and run the following commands:
+### How to use DNSMASQ ipset
+
+The ```dnsmasq.ipset``` option requires you to install ```ipset``` and ```dnsmasq-full``` instead of the ```dnsmasq```. To do that, connect to your router via ssh and run the following command:
+
+```sh
+opkg update; opkg remove dnsmasq; opkg install dnsmasq-full ipset;
+```
+
### OpenWrt 15.05.1 Requirements
```sh
| --- | --- |
|```dnsmasq.addnhosts```|Creates the DNSMASQ additional hosts file ```/var/run/simple-adblock.addnhosts``` and modifies DNSMASQ settings, so that DNSMASQ resolves all blocked domains to "local machine": 127.0.0.1. This option doesn't allow block-list optimization (by removing secondary level domains if the top-level domain is also in the block-list), so it results in a much larger block-list file, but, unlike other DNSMASQ-based options, it has almost no effect on the DNS look up speed. This option also allows quick reloads of DNSMASQ on block-list updates.|
|```dnsmasq.conf```|Creates the DNSMASQ config file ```/var/dnsmasq.d/simple-adblock``` so that DNSMASQ replies with NXDOMAIN: "domain not found". This option allows the block-list optimization (by removing secondary level domains if the top-level domain is also in the block-list), resulting in the smaller block-list file. This option will slow down DNS look up speed somewhat.|
+|```dnsmasq.ipset```|Creates the DNSMASQ ipset file ```/var/dnsmasq.d/simple-adblock.ipset``` and the firewall rule to reject the matching requests. This is the only option for AdBlocking if you're using a browser with [DNS-over-HTTPS proxy](https://en.wikipedia.org/wiki/DNS_over_HTTPS) built-in, like [Mozilla Firefox](https://support.mozilla.org/en-US/kb/firefox-dns-over-https#w_about-dns-over-https) or [Google Chrome/Chromium](https://blog.chromium.org/2019/09/experimenting-with-same-provider-dns.html). This option allows the block-list optimization (by removing secondary level domains if the top-level domain is also in the block-list), resulting in the smaller block-list file. This option requires you install ```dnsmasq-full``` and ```ipset``` [as described here](#how-to-use-dnsmasq-ipset).<br/>PLEASE NOTE, that unlike other options which are truly domain name based blocking, this is essentially an IP address based blocking, ie: if you try to block ```google-analytics.com``` with this option, it may also block/break things like YouTube, Hangouts and other Google services if they share IP address(es) with ```google-analytics.com```.|
|```dnsmasq.servers```|Creates the DNSMASQ servers file ```/var/run/simple-adblock.servers``` and modifies DNSMASQ settings so that DNSMASQ replies with NXDOMAIN: "domain not found". This option allows the block-list optimization (by removing secondary level domains if the top-level domain is also in the block-list), resulting in the smaller block-list file. This option will slow down DNS look up speed somewhat. This is a default setting as it results in the smaller block-file and allows quick reloads of DNSMASQ.|
|```unbound.adb_list```|Creates the Unbound config file ```/var/lib/unbound/adb_list.simple-adblock``` so that Unbound replies with NXDOMAIN: "domain not found". This option allows the block-list optimization (by removing secondary level domains if the top-level domain is also in the block-list), resulting in the smaller block-list file.|
## Thanks
-I'd like to thank everyone who helped create, test and troubleshoot this service. Special thanks to [@hnyman](https://github.com/hnyman) for general package/luci guidance, [@dibdot](https://github.com/dibdot) for general guidance and block-list optimization code, [@ckuethe](https://github.com/ckuethe) for the curl support, non-ASCII filtering and compressed cache code, [@EricLuehrsen](https://github.com/EricLuehrsen) for the Unbound support information and [@phasecat](https://forum.openwrt.org/u/phasecat/summary) for submitting bugs and testing.
+I'd like to thank everyone who helped create, test and troubleshoot this service. Special thanks to [@hnyman](https://github.com/hnyman) for general package/luci guidance, [@dibdot](https://github.com/dibdot) for general guidance and block-list optimization code, [@ckuethe](https://github.com/ckuethe) for the curl support, non-ASCII filtering and compressed cache code, [@EricLuehrsen](https://github.com/EricLuehrsen) for the Unbound support information, [@mushoz]( https://github.com/mushoz) for performance testing and [@phasecat](https://forum.openwrt.org/u/phasecat/summary) for submitting various bugs and testing.
config simple-adblock 'config'
option enabled '0'
option dns 'dnsmasq.servers'
+ option dns_instance '0'
option verbosity '2'
option force_dns '1'
option led 'none'
option curl_retry '3'
option parallel_downloads '1'
option debug '0'
- option allow_non_ascii '0'
option compressed_cache '0'
list whitelist_domain 'raw.githubusercontent.com'
# list blacklist_hosts_url 'http://support.it-mate.co.uk/downloads/hosts.txt'
export USE_PROCD=1
export LC_ALL=C
-export EXTRA_COMMANDS="check dl killcache status"
-export EXTRA_HELP=" check Checks if specified domain is found in current blacklist
+export EXTRA_COMMANDS='check dl killcache status'
+export EXTRA_HELP=' check Checks if specified domain is found in current blacklist
dl Force-redownloads all the list
- status Shows the service last-run status"
+ status Shows the service last-run status'
-readonly packageName="simple-adblock"
+readonly packageName='simple-adblock'
readonly serviceName="$packageName $PKG_VERSION"
readonly addnhostsFile="/var/run/${packageName}.addnhosts"
readonly addnhostsCache="/var/run/${packageName}.addnhosts.cache"
readonly dnsmasqCache="/var/run/${packageName}.dnsmasq.cache"
readonly dnsmasqGzip="/etc/${packageName}.dnsmasq.gz"
readonly dnsmasqOutputFilter='s|^|local=/|;s|$|/|'
+readonly ipsetFile="/var/dnsmasq.d/${packageName}.ipset"
+readonly ipsetCache="/var/run/${packageName}.ipset.cache"
+readonly ipsetGzip="/etc/${packageName}.ipset.gz"
+readonly ipsetOutputFilter='s|^|ipset=/|;s|$|/adb|'
readonly serversFile="/var/run/${packageName}.servers"
readonly serversCache="/var/run/${packageName}.servers.cache"
readonly serversGzip="/etc/${packageName}.servers.gz"
readonly statusProcessing='Processing'
readonly statusStopped='Stopped'
readonly sharedMemoryError="/dev/shm/$packageName-error"
+readonly sharedMemoryOutput="/dev/shm/$packageName-output"
create_lock() { [ -e "$PIDFile" ] && return 1; touch "$PIDFile"; }
remove_lock() { [ -e "$PIDFile" ] && rm -f "$PIDFile"; }
trap remove_lock EXIT
output_ok() { output 1 "$_OK_"; output 2 "$__OK__\\n"; }
output_okn() { output 1 "$_OK_\\n"; output 2 "$__OK__\\n"; }
-output_fail() { output 1 "$_FAIL_\\n"; output 2 "$__FAIL__\\n"; }
-output_failn() { output 1 "$_FAIL_"; output 2 "$__FAIL__\\n"; }
+output_fail() { output 1 "$_FAIL_"; output 2 "$__FAIL__\\n"; }
+output_failn() { output 1 "$_FAIL_\\n"; output 2 "$__FAIL__\\n"; }
str_replace() { echo "$1" | sed -e "s/$2/$3/g"; }
-str_contains() { [ "$1" != "$(str_replace "$1" "$2" "")" ]; }
-readonly sharedMemoryOutput="/dev/shm/$packageName-output"
+str_contains() { test "$1" != "$(str_replace "$1" "$2" '')"; }
+compare_versions() { test "$(printf '%s\n' "$@" | sort -V | head -n 1)" != "$1"; }
+is_chaos_calmer() { ubus -S call system board | grep -q 'Chaos Calmer'; }
+is_ipset_procd() { compare_versions "$(sed -ne 's/^Version: //p' /usr/lib/opkg/info/firewall.control)" "2019-09-18"; }
+led_on(){ if [ -n "${1}" ] && [ -e "${1}/trigger" ]; then echo 'default-on' > "${1}/trigger" 2>&1; fi; }
+led_off(){ if [ -n "${1}" ] && [ -e "${1}/trigger" ]; then echo 'none' > "${1}/trigger" 2>&1; fi; }
+dnsmasq_kill() { killall -q -HUP dnsmasq; }
+dnsmasq_restart() { /etc/init.d/dnsmasq restart >/dev/null 2>&1; }
+unbound_restart() { /etc/init.d/unbound restart >/dev/null 2>&1; }
+
output() {
# Can take a single parameter (text) to be output at any verbosity
# Or target verbosity level and text to be output at specifc verbosity
printf "%b" "$msg" >> "$sharedMemoryOutput"
fi
}
-led_on(){ if [ -n "${1}" ] && [ -e "${1}/trigger" ]; then echo "default-on" > "${1}/trigger" 2>&1; fi; }
-led_off(){ if [ -n "${1}" ] && [ -e "${1}/trigger" ]; then echo "none" > "${1}/trigger" 2>&1; fi; }
-
-export serviceEnabled
-export forceDNS
-export parallelDL
-export debug
-export allowNonAscii
-export compressedCache
-export targetDNS
-export bootDelay
-export dlTimeout
-export curlRetry
-export verbosity
-export led
-export whitelist_domains
-export blacklist_domains
-export whitelist_domains_urls
-export blacklist_domains_urls
-export blacklist_hosts_urls
+
+export serviceEnabled forceDNS parallelDL debug allowIDN compressedCache
+export targetDNS bootDelay dlTimeout curlRetry verbosity led dnsInstance
+export whitelist_domains blacklist_domains
+export whitelist_domains_urls blacklist_domains_urls blacklist_hosts_urls
export wan_if wan_gw wanphysdev dl_command serviceStatus dl_flag
export outputFilter outputFilterIPv6 outputFile outputGzip outputCache ipv6Enabled
load_package_config() {
config_load "$packageName"
- config_get_bool serviceEnabled "config" "enabled" 1
- config_get_bool forceDNS "config" "force_dns" 1
- config_get_bool parallelDL "config" "parallel_downloads" 1
- config_get_bool debug "config" "debug" 0
- config_get_bool allowNonAscii "config" "allow_non_ascii" 0
- config_get_bool compressedCache "config" "compressed_cache" 0
- config_get_bool ipv6Enabled "config" "ipv6_enabled" 0
- config_get bootDelay "config" "boot_delay" "120"
- config_get dlTimeout "config" "download_timeout" "20"
- config_get curlRetry "config" "curl_retry" "3"
- config_get verbosity "config" "verbosity" "2"
- config_get led "config" "led"
- config_get targetDNS "config" "dns" "dnsmasq.servers"
- config_get whitelist_domains "config" "whitelist_domain"
- config_get blacklist_domains "config" "blacklist_domain"
- config_get whitelist_domains_urls "config" "whitelist_domains_url"
- config_get blacklist_domains_urls "config" "blacklist_domains_url"
- config_get blacklist_hosts_urls "config" "blacklist_hosts_url"
-
- if [ "$targetDNS" != "dnsmasq.addnhosts" ] && [ "$targetDNS" != "dnsmasq.conf" ] && \
- [ "$targetDNS" != "dnsmasq.servers" ] && [ "$targetDNS" != "unbound.adb_list" ]; then
- targetDNS="dnsmasq.servers"
+ config_get_bool serviceEnabled 'config' 'enabled' 1
+ config_get_bool forceDNS 'config' 'force_dns' 1
+ config_get_bool parallelDL 'config' 'parallel_downloads' 1
+ config_get_bool debug 'config' 'debug' 0
+ config_get_bool compressedCache 'config' 'compressed_cache' 0
+ config_get_bool ipv6Enabled 'config' 'ipv6_enabled' 0
+ config_get bootDelay 'config' 'boot_delay' '120'
+ config_get dlTimeout 'config' 'download_timeout' '20'
+ config_get curlRetry 'config' 'curl_retry' '3'
+ config_get verbosity 'config' 'verbosity' '2'
+ config_get led 'config' 'led'
+ config_get targetDNS 'config' 'dns' 'dnsmasq.servers'
+ config_get dnsInstance 'config' 'dns_instance' '0'
+ config_get whitelist_domains 'config' 'whitelist_domain'
+ config_get blacklist_domains 'config' 'blacklist_domain'
+ config_get whitelist_domains_urls 'config' 'whitelist_domains_url'
+ config_get blacklist_domains_urls 'config' 'blacklist_domains_url'
+ config_get blacklist_hosts_urls 'config' 'blacklist_hosts_url'
+
+ if [ "$targetDNS" != 'dnsmasq.addnhosts' ] && [ "$targetDNS" != 'dnsmasq.conf' ] && \
+ [ "$targetDNS" != 'dnsmasq.servers' ] && [ "$targetDNS" != 'unbound.adb_list' ] && \
+ [ "$targetDNS" != 'dnsmasq.ipset' ] ; then
+ targetDNS='dnsmasq.servers'
fi
case "$targetDNS" in
outputGzip="$addnhostsGzip"
[ "$ipv6Enabled" -gt 0 ] && outputFilterIPv6="$addnhostsOutputFilterIPv6"
rm -f "$dnsmasqFile" "$dnsmasqCache" "$dnsmasqGzip"
+ rm -f "$ipsetFile" "$ipsetCache" "$ipsetGzip"
rm -f "$serversFile" "$serversCache" "$serversGzip"
rm -f "$unboundFile" "$unboundCache" "$unboundGzip"
;;
outputCache="$dnsmasqCache"
outputGzip="$dnsmasqGzip"
rm -f "$addnhostsFile" "$addnhostsCache" "$addnhostsGzip"
+ rm -f "$ipsetFile" "$ipsetCache" "$ipsetGzip"
+ rm -f "$serversFile" "$serversCache" "$serversGzip"
+ rm -f "$unboundFile" "$unboundCache" "$unboundGzip"
+ ;;
+ dnsmasq.ipset)
+ outputFilter="$ipsetOutputFilter"
+ outputFile="$ipsetFile"
+ outputCache="$ipsetCache"
+ outputGzip="$ipsetGzip"
+ rm -f "$dnsmasqFile" "$dnsmasqCache" "$dnsmasqGzip"
+ rm -f "$addnhostsFile" "$addnhostsCache" "$addnhostsGzip"
rm -f "$serversFile" "$serversCache" "$serversGzip"
rm -f "$unboundFile" "$unboundCache" "$unboundGzip"
;;
outputGzip="$serversGzip"
rm -f "$dnsmasqFile" "$dnsmasqCache" "$dnsmasqGzip"
rm -f "$addnhostsFile" "$addnhostsCache" "$addnhostsGzip"
+ rm -f "$ipsetFile" "$ipsetCache" "$ipsetGzip"
rm -f "$unboundFile" "$unboundCache" "$unboundGzip"
;;
unbound.adb_list)
outputFile="$unboundFile"
outputCache="$unboundCache"
outputGzip="$unboundGzip"
- rm -f "$dnsmasqFile" "$dnsmasqCache" "$dnsmasqGzip"
rm -f "$addnhostsFile" "$addnhostsCache" "$addnhostsGzip"
+ rm -f "$dnsmasqFile" "$dnsmasqCache" "$dnsmasqGzip"
+ rm -f "$ipsetFile" "$ipsetCache" "$ipsetGzip"
rm -f "$serversFile" "$serversCache" "$serversGzip"
;;
esac
return 1
fi
+ case $targetDNS in
+ dnsmasq.addnhosts | dnsmasq.conf | dnsmasq.ipset | dnsmasq.servers)
+ if dnsmasq -v 2>/dev/null | grep -q 'no-IDN' || ! dnsmasq -v 2>/dev/null | grep -q -w 'IDN'; then
+ allowIDN=0
+ else
+ allowIDN=1
+ fi
+ ;;
+ unbound.adb_list)
+ allowIDN=1;;
+ esac
+
+ case $targetDNS in
+ dnsmasq.ipset)
+ if dnsmasq -v 2>/dev/null | grep -q 'no-ipset' || ! dnsmasq -v 2>/dev/null | grep -q -w 'ipset'; then
+ output "$_ERROR_: DNSMASQ ipset support is enabled in $packageName, but DNSMASQ is either not installed or installed DNSMASQ does not support ipsets!\\n"
+ targetDNS='dnsmasq.servers'
+ fi
+ if ! ipset help hash:net >/dev/null 2>&1; then
+ output "$_ERROR_: DNSMASQ ipset support is enabled in $packageName, but ipset is either not installed or installed ipset does not support 'hash:net' type!\\n"
+ targetDNS='dnsmasq.servers'
+ fi
+ ;;
+ esac
+
[ ! -d "${outputFile%/*}" ] && mkdir -p "${outputFile%/*}"
[ ! -d "${outputCache%/*}" ] && mkdir -p "${outputFile%/*}"
[ ! -d "${outputGzip%/*}" ] && mkdir -p "${outputFile%/*}"
output "$_ERROR_: $serviceName failed to discover WAN gateway.\\n"; return 1;
}
-dnsmasq_kill() { killall -q -HUP dnsmasq; }
-dnsmasq_restart() { /etc/init.d/dnsmasq restart >/dev/null 2>&1; }
-unbound_restart() { /etc/init.d/unbound restart >/dev/null 2>&1; }
reload_resolver() {
local param output_text
case $1 in
fi
case "$targetDNS" in
dnsmasq.addnhosts)
- uci -q del_list dhcp.@dnsmasq[0].addnhosts="$addnhostsFile"
- uci add_list dhcp.@dnsmasq[0].addnhosts="$addnhostsFile"
- if [ -n "$(uci changes dhcp)" ]; then
- uci commit dhcp
- param=dnsmasq_restart
- output_text="Restarting DNSMASQ"
- else
- param=dnsmasq_kill
- output_text="Reloading DNSMASQ"
+ if [ "$(uci -q get dhcp.@dnsmasq["$dnsInstance"].serversfile)" = "$serversFile" ]; then
+ uci -q del dhcp.@dnsmasq["$dnsInstance"].serversfile
fi
+ if ! uci -q get dhcp.@dnsmasq["$dnsInstance"].addnhosts | grep -q "$addnhostsFile"; then
+ uci add_list dhcp.@dnsmasq["$dnsInstance"].addnhosts="$addnhostsFile"
+ fi
+ param=dnsmasq_kill
+ output_text='Reloading DNSMASQ'
;;
dnsmasq.conf)
+ uci -q del_list dhcp.@dnsmasq["$dnsInstance"].addnhosts="$addnhostsFile"
+ if [ "$(uci -q get dhcp.@dnsmasq["$dnsInstance"].serversfile)" = "$serversFile" ]; then
+ uci -q del dhcp.@dnsmasq["$dnsInstance"].serversfile
+ fi
+ param=dnsmasq_restart
+ output_text='Restarting DNSMASQ'
+ ;;
+ dnsmasq.ipset)
+ uci -q del_list dhcp.@dnsmasq["$dnsInstance"].addnhosts="$addnhostsFile"
+ if [ "$(uci -q get dhcp.@dnsmasq["$dnsInstance"].serversfile)" = "$serversFile" ]; then
+ uci -q del dhcp.@dnsmasq["$dnsInstance"].serversfile
+ fi
param=dnsmasq_restart
- output_text="Restarting DNSMASQ"
+ output_text='Restarting DNSMASQ'
;;
dnsmasq.servers)
- if [ "$(uci -q get dhcp.@dnsmasq[0].serversfile)" != "$serversFile" ]; then
- uci set dhcp.@dnsmasq[0].serversfile="$serversFile"
- uci commit dhcp
- param=dnsmasq_restart
- output_text="Restarting DNSMASQ"
- else
- param=dnsmasq_kill
- output_text="Reloading DNSMASQ"
+ uci -q del_list dhcp.@dnsmasq["$dnsInstance"].addnhosts="$addnhostsFile"
+ if [ "$(uci -q get dhcp.@dnsmasq["$dnsInstance"].serversfile)" != "$serversFile" ]; then
+ uci set dhcp.@dnsmasq["$dnsInstance"].serversfile="$serversFile"
fi
+ param=dnsmasq_kill
+ output_text='Reloading DNSMASQ'
;;
unbound.adb_list)
+ uci -q del_list dhcp.@dnsmasq["$dnsInstance"].addnhosts="$addnhostsFile"
+ if [ "$(uci -q get dhcp.@dnsmasq["$dnsInstance"].serversfile)" = "$serversFile" ]; then
+ uci -q del dhcp.@dnsmasq["$dnsInstance"].serversfile
+ fi
param=unbound_restart
- output_text="Restarting Unbound"
+ output_text='Restarting Unbound'
;;
esac
+ if [ -n "$(uci changes dhcp)" ]; then
+ uci commit dhcp
+ if [ "$param" = 'unbound_restart' ]; then
+ param='dnsmasq_restart; unbound_restart;'
+ output_text='Restarting Unbound/DNSMASQ'
+ else
+ param=dnsmasq_restart
+ output_text='Restarting DNSMASQ'
+ fi
+ fi
output 1 "$output_text "
output 2 "$output_text "
tmpfs set message "$output_text"
cacheOps 'create'
case "$targetDNS" in
dnsmasq.addnhosts | dnsmasq.servers)
- if [ -n "$(uci changes dhcp)" ]; then
- uci -q commit dhcp
- param=dnsmasq_restart
- else
- param=dnsmasq_kill
- fi
+ param=dnsmasq_kill
;;
- dnsmasq.conf)
+ dnsmasq.conf | dnsmasq.ipset)
param=dnsmasq_restart
;;
unbound.adb_list)
param=unbound_restart
;;
esac
+ if [ -n "$(uci changes dhcp)" ]; then
+ uci -q commit dhcp
+ if [ "$param" = 'unbound_restart' ]; then
+ param='dnsmasq_restart; unbound_restart;'
+ else
+ param=dnsmasq_restart
+ fi
+ fi
eval "$param"
return $?
;;
quiet)
case "$targetDNS" in
- dnsmasq.addnhosts | dnsmasq.servers | dnsmasq.conf)
+ dnsmasq.addnhosts | dnsmasq.conf | dnsmasq.ipset | dnsmasq.servers)
param=dnsmasq_restart
;;
unbound.adb_list)
stats)
echo "$stats"; return;;
triggers)
- curReload="$allowNonAscii $parallelDL $debug $dlTimeout $whitelist_domains $blacklist_domains $whitelist_domains_urls $blacklist_domains_urls $blacklist_hosts_urls $targetDNS"
+ curReload="$parallelDL $debug $dlTimeout $whitelist_domains $blacklist_domains $whitelist_domains_urls $blacklist_domains_urls $blacklist_hosts_urls $targetDNS"
curRestart="$compressedCache $forceDNS $led"
if [ "$curReload" != "$readReload" ]; then
- ret="download"
+ ret='download'
elif [ "$curRestart" != "$readRestart" ]; then
- ret="restart"
+ ret='restart'
fi
echo "$ret"
return;;
;;
del)
case "$instance" in
+ all)
+ unset status;
+ unset message;
+ unset error;
+ unset stats;
+ ;;
status)
unset status;;
message)
stats)
stats="$value";;
triggers)
- readReload="$allowNonAscii $parallelDL $debug $dlTimeout $whitelist_domains $blacklist_domains $whitelist_domains_urls $blacklist_domains_urls $blacklist_hosts_urls $targetDNS"
+ readReload="$parallelDL $debug $dlTimeout $whitelist_domains $blacklist_domains $whitelist_domains_urls $blacklist_domains_urls $blacklist_hosts_urls $targetDNS"
readRestart="$compressedCache $forceDNS $led"
;;
esac
;;
esac
json_init
- json_add_object "data"
+ json_add_object 'data'
json_add_string version "$PKG_VERSION"
json_add_string status "$status"
json_add_string message "$message"
esac
}
-is_chaos_calmer() { ubus -S call system board | grep -q "Chaos Calmer"; }
-
-remove_fw3_redirect() {
- local name
- config_get name "$1" "name"
-# shellcheck disable=SC2154
- if [ -n "$name" ] && str_contains "$name" "simple-adblock"; then
- uci -q del "firewall.$1"
- fi
-}
-
-fw3_setup() {
- config_load "firewall"
- config_foreach remove_fw3_redirect "redirect"
- if [ "$1" = "start" ]; then
- uci -q add firewall redirect >/dev/null 2>&1
- uci -q set firewall.@redirect[-1].name="simple_adblock_dns_hijack"
- uci -q set firewall.@redirect[-1].target="DNAT"
- uci -q set firewall.@redirect[-1].src="lan"
- uci -q set firewall.@redirect[-1].proto="tcpudp"
- uci -q set firewall.@redirect[-1].src_dport="53"
- uci -q set firewall.@redirect[-1].dest_port="53"
- uci -q set firewall.@redirect[-1].dest_ip="$2"
- uci -q set firewall.@redirect[-1].reflection="0"
- fi
+fw3Ops() {
+ local action="$1" param="$2" _restart
+ case "$action" in
+ reload) /etc/init.d/firewall reload >/dev/null 2>&1;;
+ restart) /etc/init.d/firewall restart >/dev/null 2>&1;;
+ remove)
+ case "$param" in
+ dns_redirect) uci -q del firewall.simple_adblock_dns_redirect;;
+ ipset) uci -q del firewall.simple_adblock_ipset
+ uci -q del firewall.simple_adblock_ipset_rule;;
+ *)
+ uci -q del firewall.simple_adblock_dns_redirect
+ uci -q del firewall.simple_adblock_ipset
+ uci -q del firewall.simple_adblock_ipset_rule
+ ;;
+ esac
+ ;;
+ insert)
+ case "$param" in
+ dns_redirect)
+ if ! uci -q get firewall.simple_adblock_dns_redirect >/dev/null; then
+ uci -q set firewall.simple_adblock_dns_redirect=redirect
+ uci -q set firewall.simple_adblock_dns_redirect.name=simple_adblock_dns_hijack
+ uci -q set firewall.simple_adblock_dns_redirect.target=DNAT
+ uci -q set firewall.simple_adblock_dns_redirect.src=lan
+ uci -q set firewall.simple_adblock_dns_redirect.proto=tcpudp
+ uci -q set firewall.simple_adblock_dns_redirect.src_dport=53
+ uci -q set firewall.simple_adblock_dns_redirect.dest_port=53
+ fi
+ ;;
+ ipset)
+ if ! uci -q get firewall.simple_adblock_ipset >/dev/null; then
+ uci -q set firewall.simple_adblock_ipset=ipset
+ uci -q set firewall.simple_adblock_ipset.name=adb
+ uci -q set firewall.simple_adblock_ipset.match=dest_net
+ uci -q set firewall.simple_adblock_ipset.storage=hash
+ uci -q set firewall.simple_adblock_ipset.enabled=1
+ _restart=1
+ fi
+ if ! uci -q get firewall.simple_adblock_ipset_rule >/dev/null; then
+ uci -q set firewall.simple_adblock_ipset_rule=rule
+ uci -q set firewall.simple_adblock_ipset_rule.name=simple_adblock_ipset_rule
+ uci -q set firewall.simple_adblock_ipset_rule.ipset=adb
+ uci -q set firewall.simple_adblock_ipset_rule.src=lan
+ uci -q set firewall.simple_adblock_ipset_rule.dest='*'
+ uci -q set firewall.simple_adblock_ipset_rule.proto=tcpudp
+ uci -q set firewall.simple_adblock_ipset_rule.target=REJECT
+ uci -q set firewall.simple_adblock_ipset_rule.enabled=1
+ fi
+ ;;
+ *)
+ if ! uci -q get firewall.simple_adblock_dns_redirect >/dev/null; then
+ uci -q set firewall.simple_adblock_dns_redirect=redirect
+ uci -q set firewall.simple_adblock_dns_redirect.name=simple_adblock_dns_hijack
+ uci -q set firewall.simple_adblock_dns_redirect.target=DNAT
+ uci -q set firewall.simple_adblock_dns_redirect.src=lan
+ uci -q set firewall.simple_adblock_dns_redirect.proto=tcpudp
+ uci -q set firewall.simple_adblock_dns_redirect.src_dport=53
+ uci -q set firewall.simple_adblock_dns_redirect.dest_port=53
+ fi
+ if ! uci -q get firewall.simple_adblock_ipset >/dev/null; then
+ uci -q set firewall.simple_adblock_ipset=ipset
+ uci -q set firewall.simple_adblock_ipset.name=adb
+ uci -q set firewall.simple_adblock_ipset.match=dest_net
+ uci -q set firewall.simple_adblock_ipset.storage=hash
+ uci -q set firewall.simple_adblock_ipset.enabled=1
+ _restart=1
+ fi
+ if ! uci -q get firewall.simple_adblock_ipset_rule >/dev/null; then
+ uci -q set firewall.simple_adblock_ipset_rule=rule
+ uci -q set firewall.simple_adblock_ipset_rule.name=simple_adblock_ipset_rule
+ uci -q set firewall.simple_adblock_ipset_rule.ipset=adb
+ uci -q set firewall.simple_adblock_ipset_rule.src=lan
+ uci -q set firewall.simple_adblock_ipset_rule.dest='*'
+ uci -q set firewall.simple_adblock_ipset_rule.proto=tcpudp
+ uci -q set firewall.simple_adblock_ipset_rule.target=REJECT
+ uci -q set firewall.simple_adblock_ipset_rule.enabled=1
+ fi
+ ;;
+ esac
+ esac
if [ -n "$(uci changes firewall)" ]; then
uci -q commit firewall
- /etc/init.d/firewall restart >/dev/null 2>&1
+ if [ -z "$_restart" ]; then
+ fw3Ops 'reload'
+ else
+ fw3Ops 'restart'
+ fi
fi
}
local label type D_TMP R_TMP
if [ -z "$1" ] || [ -z "$2" ] || [ -z "$3" ]; then return 1; fi
label="${1##*//}"; label="${label%%/*}";
- if [ "$2" = "hosts" ]; then
+ if [ "$2" = 'hosts' ]; then
label="Hosts: $label"; filter="$hostsFilter";
else
label="Domains: $label"; filter="$domainsFilter";
fi
- if [ "$3" = "blocked" ]; then
- type="Blocked"; D_TMP="$B_TMP";
+ if [ "$3" = 'blocked' ]; then
+ type='Blocked'; D_TMP="$B_TMP";
else
- type="Allowed"; D_TMP="$A_TMP";
+ type='Allowed'; D_TMP="$A_TMP";
fi
while [ -z "$R_TMP" ] || [ -e "$R_TMP" ]; do
R_TMP="$(mktemp -u -q -t ${packageName}_tmp.XXXXXXXX)"
if ! $dl_command "$1" $dl_flag "$R_TMP" 2>/dev/null || [ ! -s "$R_TMP" ]; then
output 1 "$_FAIL_"
output 2 "[DL] $type $label $__FAIL__\\n"
- printf "%b" "Error: downloading '${1}'.\\n" >> "$sharedMemoryError"
+ echo "Error: downloading '${1}'." >> "$sharedMemoryError"
else
sed -i "$filter" "$R_TMP"
if [ ! -s "$R_TMP" ]; then
output 1 "$_FAIL_"
output 2 "[DL] $type $label $__FAIL__\\n"
- printf "%b" "Error: parsing '${1}'.\\n" >> "$sharedMemoryError"
+ echo "Error: parsing '${1}'." >> "$sharedMemoryError"
else
cat "${R_TMP}" >> "$D_TMP"
output 1 "$_OK_"
tmpfs set message "${statusDownloading}..."
rm -f "$A_TMP" "$B_TMP" "$outputFile" "$outputCache" "$outputGzip"
if [ "$(awk '/^MemFree/ {print int($2/1000)}' "/proc/meminfo")" -lt 32 ]; then
- output 3 "Low free memory, restarting resolver... "
+ output 3 'Low free memory, restarting resolver... '
if reload_resolver 'quiet'; then
output_okn
else
fi
fi
touch $A_TMP; touch $B_TMP;
- output 1 "Downloading lists "
+ output 1 'Downloading lists '
rm -f "$sharedMemoryError"
if [ -n "$blacklist_hosts_urls" ]; then
for hf in ${blacklist_hosts_urls}; do
if [ "$parallelDL" -gt 0 ]; then
- process_url "$hf" "hosts" "blocked" &
+ process_url "$hf" 'hosts' 'blocked' &
else
- process_url "$hf" "hosts" "blocked"
+ process_url "$hf" 'hosts' 'blocked'
fi
done
fi
if [ -n "$blacklist_domains_urls" ]; then
for hf in ${blacklist_domains_urls}; do
if [ "$parallelDL" -gt 0 ]; then
- process_url "$hf" "domains" "blocked" &
+ process_url "$hf" 'domains' 'blocked' &
else
- process_url "$hf" "domains" "blocked"
+ process_url "$hf" 'domains' 'blocked'
fi
done
fi
if [ -n "$whitelist_domains_urls" ]; then
for hf in ${whitelist_domains_urls}; do
if [ "$parallelDL" -gt 0 ]; then
- process_url "$hf" "domains" "allowed" &
+ process_url "$hf" 'domains' 'allowed' &
else
- process_url "$hf" "domains" "allowed"
+ process_url "$hf" 'domains' 'allowed'
fi
done
fi
wait
[ -s "$sharedMemoryError" ] && tmpfs add error "$(cat "$sharedMemoryError")"
rm -f "$sharedMemoryError"
- output 1 "\\n"
+ output 1 '\n'
[ -n "$blacklist_domains" ] && for hf in ${blacklist_domains}; do echo "$hf" | sed "$domainsFilter" >> $B_TMP; done
whitelist_domains="${whitelist_domains}
$(cat $A_TMP)"
- [ -n "$whitelist_domains" ] && for hf in ${whitelist_domains}; do hf=$(echo "$hf" | sed 's/\./\\./g'); w_filter="$w_filter/^${hf}$/d;/\\.${hf}$/d;"; done
+ [ -n "$whitelist_domains" ] && for hf in ${whitelist_domains}; do hf="$(echo "$hf" | sed 's/\./\\./g')"; w_filter="$w_filter/^${hf}$/d;/\\.${hf}$/d;"; done
[ ! -s "$B_TMP" ] && return 1
- output 1 "Processing downloads "
- output 2 "Sorting combined list "
+ output 1 'Processing downloads '
+ output 2 'Sorting combined list '
tmpfs set message "$statusProcessing: sorting combined list"
- if [ "$allowNonAscii" -gt 0 ]; then
- if sort "$B_TMP" | uniq > "$A_TMP"; then
+ if [ "$allowIDN" -gt 0 ]; then
+ if sort -u "$B_TMP" > "$A_TMP"; then
output_ok
else
output_failn
- tmpfs add error "Error: Sorting error."
+ tmpfs add error 'Error: Sorting error.'
fi
else
- if sort "$B_TMP" | uniq | grep -E -v '[^a-zA-Z0-9=/.-]' > "$A_TMP"; then
+ if sort -u "$B_TMP" | grep -E -v '[^a-zA-Z0-9=/.-]' > "$A_TMP"; then
output_ok
else
output_failn
- tmpfs add error "Error: Sorting error."
+ tmpfs add error 'Error: Sorting error.'
fi
fi
- if [ "$targetDNS" = "dnsmasq.conf" ] || \
- [ "$targetDNS" = "dnsmasq.servers" ] || \
- [ "$targetDNS" = "unbound.adb_list" ]; then
+ if [ "$targetDNS" = 'dnsmasq.conf' ] || \
+ [ "$targetDNS" = 'dnsmasq.ipset' ] || \
+ [ "$targetDNS" = 'dnsmasq.servers' ] || \
+ [ "$targetDNS" = 'unbound.adb_list' ]; then
# TLD optimization written by Dirk Brenken (dev@brenken.org)
- output 2 "Optimizing combined list "
+ output 2 'Optimizing combined list '
tmpfs set message "$statusProcessing: optimizing combined list"
+# sed -E 'G;:t;s/(.*)(\.)(.*)(\n)(.*)/\1\4\5\2\3/;tt;s/(.*)\n(\.)(.*)/\3\2\1/' is actually slower than awk
if awk -F "." '{for(f=NF;f>1;f--)printf "%s.",$f;print $1}' "$A_TMP" > "$B_TMP"; then
if sort "$B_TMP" > "$A_TMP"; then
if awk '{if(NR=1){tld=$NF};while(getline){if($NF!~tld"\\."){print tld;tld=$NF}}print tld}' "$A_TMP" > "$B_TMP"; then
if awk -F "." '{for(f=NF;f>1;f--)printf "%s.",$f;print $1}' "$B_TMP" > "$A_TMP"; then
- if sort "$A_TMP" | uniq > "$B_TMP"; then
+ if sort -u "$A_TMP" > "$B_TMP"; then
output_ok
else
output_failn
- tmpfs add error "Error: Data file optimization."
+ tmpfs add error 'Error: Data file optimization.'
mv "$A_TMP" "$B_TMP"
fi
else
output_failn
- tmpfs add error "Error: Data file optimization."
+ tmpfs add error 'Error: Data file optimization.'
fi
else
output_failn
- tmpfs add error "Error: Data file optimization."
+ tmpfs add error 'Error: Data file optimization.'
mv "$A_TMP" "$B_TMP"
fi
else
output_failn
- tmpfs add error "Error: Data file optimization."
+ tmpfs add error 'Error: Data file optimization.'
fi
else
output_failn
- tmpfs add error "Error: Data file optimization."
+ tmpfs add error 'Error: Data file optimization.'
mv "$A_TMP" "$B_TMP"
fi
else
mv "$A_TMP" "$B_TMP"
fi
- output 2 "Whitelisting domains "
+ output 2 'Whitelisting domains '
tmpfs set message "$statusProcessing: whitelisting domains"
if sed -i "$w_filter" "$B_TMP"; then
output_ok
else
output_failn
- tmpfs add error "Error: Whitelist processing."
+ tmpfs add error 'Error: Whitelist processing.'
fi
- output 2 "Formatting merged file "
+ output 2 'Formatting merged file '
tmpfs set message "$statusProcessing: formatting merged file"
if [ -z "$outputFilterIPv6" ]; then
if sed "$outputFilter" "$B_TMP" > "$A_TMP"; then
output_ok
else
output_failn
- tmpfs add error "Error: Data file formatting."
+ tmpfs add error 'Error: Data file formatting.'
fi
else
- if sed "$outputFilter" "$B_TMP" > "$A_TMP" && \
- sed "$outputFilterIPv6" "$B_TMP" >> "$A_TMP"; then
- output_ok
- else
- output_failn
- tmpfs add error "Error: Data file formatting."
- fi
+ case "$targetDNS" in
+ dnsmasq.addnhosts)
+ if sed "$outputFilter" "$B_TMP" > "$A_TMP" && \
+ sed "$outputFilterIPv6" "$B_TMP" >> "$A_TMP"; then
+ output_ok
+ else
+ output_failn
+ tmpfs add error 'Error: Data file formatting.'
+ fi
+ ;;
+ esac
fi
case "$targetDNS" in
dnsmasq.addnhosts)
- output 2 "Creating DNSMASQ addnhosts file "
+ output 2 'Creating DNSMASQ addnhosts file '
tmpfs set message "$statusProcessing: creating DNSMASQ addnhosts file"
;;
dnsmasq.conf)
- output 2 "Creating DNSMASQ config file "
+ output 2 'Creating DNSMASQ config file '
tmpfs set message "$statusProcessing: creating DNSMASQ config file"
;;
+ dnsmasq.ipset)
+ output 2 'Creating DNSMASQ ipset file '
+ tmpfs set message "$statusProcessing: creating DNSMASQ ipset file"
+ ;;
dnsmasq.servers)
- output 2 "Creating DNSMASQ servers file "
+ output 2 'Creating DNSMASQ servers file '
tmpfs set message "$statusProcessing: creating DNSMASQ servers file"
;;
unbound.adb_list)
- output 2 "Creating Unbound adb_list file "
+ output 2 'Creating Unbound adb_list file '
tmpfs set message "$statusProcessing: creating Unbound adb_list file"
;;
esac
tmpfs add error "Error: moving data file '${A_TMP}' to '${outputFile}'."
fi
if [ "$compressedCache" -gt 0 ]; then
- output 2 "Creating compressed cache "
+ output 2 'Creating compressed cache '
tmpfs set message "$statusProcessing: creating compressed cache"
if cacheOps 'createGzip'; then
output_ok
else
output_failn
- tmpfs add error "Error: creating compressed cache."
+ tmpfs add error 'Error: creating compressed cache.'
fi
else
rm -f "$outputGzip"
fi
- output 2 "Removing temporary files "
+ output 2 'Removing temporary files '
tmpfs set message "$statusProcessing: removing temporary files"
rm -f "/tmp/${packageName}_tmp.*" "$A_TMP" "$B_TMP" "$outputCache" || j=1
if [ $j -eq 0 ]; then
output_ok
else
output_failn
- tmpfs add error "Error: removing temporary files."
+ tmpfs add error 'Error: removing temporary files.'
fi
- output 1 "\\n"
+ output 1 '\n'
}
boot() {
start_service() {
is_enabled 'on_start' || return 1
- local ip action status error message stats
+ local action status error message stats
if create_lock; then
- procd_open_instance "main"
- procd_set_param command /bin/true
- procd_set_param stdout 1
- procd_set_param stderr 1
- network_get_ipaddr ip "lan"
-# shellcheck disable=SC2154
- if [ "$forceDNS" -ne 0 ] && [ -n "$ip" ]; then
- if is_chaos_calmer; then
- fw3_setup "start" "$ip"
+ if is_chaos_calmer || ! is_ipset_procd; then
+ if [ "$forceDNS" -ne 0 ]; then
+ fw3Ops 'insert' 'dns_redirect'
+ else
+ fw3Ops 'remove' 'dns_redirect'
+ fi
+ if [ "$targetDNS" = 'dnsmasq.ipset' ]; then
+ fw3Ops 'insert' 'ipset'
else
- procd_open_data
- json_add_array firewall
- json_add_object ""
+ fw3Ops 'remove' 'ipset'
+ fi
+ procd_open_instance 'main'
+ procd_set_param command /bin/true
+ procd_set_param stdout 1
+ procd_set_param stderr 1
+ procd_close_instance
+ else
+ procd_open_instance 'main'
+ procd_set_param command /bin/true
+ procd_set_param stdout 1
+ procd_set_param stderr 1
+ procd_open_data
+ json_add_array firewall
+ if [ "$forceDNS" -ne 0 ]; then
+ json_add_object ''
json_add_string type redirect
- json_add_string target "DNAT"
- json_add_string src "lan"
- json_add_string dest "lan"
- json_add_string proto "tcpudp"
- json_add_string src_dport "53"
- json_add_string dest_port "53"
- json_add_string dest_ip "$ip"
- json_add_string name "simple-adblock-dns-hijack"
- json_add_string reflection "0"
+ json_add_string name simple_adblock_dns_redirect
+ json_add_string target DNAT
+ json_add_string src lan
+ json_add_string proto tcpudp
+ json_add_string src_dport 53
+ json_add_string dest_port 53
+ json_add_string reflection 0
json_close_object
- json_close_array
- procd_close_data
fi
+ if [ "$targetDNS" = 'dnsmasq.ipset' ]; then
+ json_add_object ''
+ json_add_string type ipset
+ json_add_string name adb
+ json_add_string match dest_net
+ json_add_string storage hash
+ json_add_string enabled 1
+ json_close_object
+ json_add_object ''
+ json_add_string type rule
+ json_add_string name simple_adblock_ipset_rule
+ json_add_string ipset adb
+ json_add_string src lan
+ json_add_string dest '*'
+ json_add_string proto tcpudp
+ json_add_string target REJECT
+ json_add_string enabled 1
+ json_close_object
+ fi
+ json_close_array
+ procd_close_data
+ procd_close_instance
fi
- procd_close_instance
status="$(tmpfs get status)"
error="$(tmpfs get error)"
action="$(tmpfs get triggers)"
case "$1" in
- download) action="download";;
+ download) action='download';;
restart|*)
- if [ "$1" != "restart" ] && [ -s "$outputFile" ] && [ -n "$status" ]; then
+ if [ -s "$outputFile" ] && [ -n "$status" ]; then
status
exit 0
elif [ ! -s "$outputFile" ] && ! cacheOps 'test' && ! cacheOps 'testGzip'; then
- action="download"
+ action='download'
elif cacheOps 'test' || cacheOps 'testGzip'; then
- action="start"
+ action='start'
fi
if [ -n "$error" ]; then
- action="download"
+ action='download'
fi
action="${action:-$1}"
;;
esac
- tmpfs del status
- tmpfs del error
- tmpfs del message
- tmpfs del stats
+ tmpfs del all
tmpfs set triggers
case $action in
reload_resolver 'on_start'
;;
restart|start)
- if [ "$action" = "restart" ]; then
+ if [ "$action" = 'restart' ]; then
output 0 "Restarting $serviceName... "
output 3 "Restarting $serviceName...\\n"
tmpfs set status "$statusRestarting"
tmpfs set status "$statusStarting"
fi
if cacheOps 'testGzip' && ! cacheOps 'test' && [ ! -s "$outputFile" ]; then
- output 3 "Found compressed cache file, unpacking it "
- tmpfs set message "found compressed cache file, unpacking it."
+ output 3 'Found compressed cache file, unpacking it '
+ tmpfs set message 'found compressed cache file, unpacking it.'
if cacheOps 'unpackGzip'; then
output_okn
else
output_fail
output "$_ERROR_: $serviceName failed to unpack compressed cache!\\n"
- tmpfs add error "Error: Failed to unpack compressed cache."
+ tmpfs add error 'Error: Failed to unpack compressed cache.'
return 1
fi
fi
if cacheOps 'test' && [ ! -s "$outputFile" ]; then
- output 3 "Found cache file, reusing it "
- tmpfs set message "found cache file, reusing it."
+ output 3 'Found cache file, reusing it '
+ tmpfs set message 'found cache file, reusing it.'
if cacheOps 'restore'; then
output_okn
else
if [ -s "$outputFile" ] && [ "$(tmpfs get status)" != "$statusFail" ]; then
output 0 "$__OK__\\n";
c="$(wc -l < "$outputFile")"
- output 3 "$serviceName is blocking $c domains "; output_okn
+ output 3 "$serviceName is blocking $c domains (with ${targetDNS}) "; output_okn
tmpfs del message
tmpfs set status "$statusSuccess: $c domains blocked (with ${targetDNS})."
error="$(tmpfs get error)"
else
output 0 "$__FAIL__\\n";
tmpfs set status "$statusFail"
- tmpfs add error "Error: Failed to create blocklist."
+ tmpfs add error 'Error: Failed to create blocklist.'
fi
remove_lock
else
fi
}
-service_started() { procd_set_config_changed firewall; }
-service_stopped() { procd_set_config_changed firewall; }
-restart_service() { rc_procd start_service "restart"; }
+service_started() { is_ipset_procd && procd_set_config_changed firewall; }
+service_stopped() { is_ipset_procd && procd_set_config_changed firewall; }
+restart_service() { rc_procd start_service 'restart'; }
reload_service() { restart_service; }
restart() { restart_service; }
reload() { restart_service; }
-dl() { rc_procd start_service "download"; }
+dl() { rc_procd start_service 'download'; }
killcache() {
rm -f "$addnhostsCache" "$addnhostsGzip"
rm -f "$dnsmasqCache" "$dnsmasqGzip"
+ rm -f "$ipsetCache" "$ipsetGzip"
rm -f "$serversCache" "$serversGzip"
rm -f "$unboundCache" "$unboundGzip"
return 0
stop_service() {
load_package_config
- if is_chaos_calmer; then
- fw3_setup 'stop'
- fi
+ fw3Ops 'remove' 'all'
if [ -s "$outputFile" ]; then
output "Stopping $serviceName... "
tmpfs del triggers
check() {
load_package_config
- local string="$1"
+ local string="$1"
+ local c="$(grep -c "$string" "$outputFile")"
if [ ! -f "$outputFile" ]; then
echo "No blacklist ('$outputFile') found."
elif [ -z "$string" ]; then
- echo "Usage: /etc/init.d/${packageName} check domain"
- elif grep -m1 -q "$string" "$outputFile"; then
- echo "Found $(grep -c "$string" "$outputFile") matches for '$string' in '$outputFile':"
+ echo "Usage: /etc/init.d/${packageName} check string"
+ elif [ "$c" -gt 0 ]; then
+ if [ "$c" -gt 1 ]; then
+ echo "Found $c matches for '$string' in '$outputFile':"
+ else
+ echo "Found 1 match for '$string' in '$outputFile':"
+ fi
case "$targetDNS" in
dnsmasq.addnhosts)
grep "$string" "$outputFile" | sed 's|^127.0.0.1 ||;s|^:: ||;';;
dnsmasq.conf)
grep "$string" "$outputFile" | sed 's|local=/||;s|/$||;';;
+ dnsmasq.ipset)
+ grep "$string" "$outputFile" | sed 's|ipset=/||;s|/adb$||;';;
dnsmasq.servers)
grep "$string" "$outputFile" | sed 's|server=/||;s|/$||;';;
unbound.adb_list)
include $(TOPDIR)/rules.mk
-PKG_LICENSE:=ASL-2.0
-PKG_MAINTAINER:=Mislav Novakovic <mislav.novakovic@sartura.hr>
-
PKG_NAME:=sysrepo
PKG_VERSION:=0.7.7
-PKG_RELEASE:=4
+PKG_RELEASE:=5
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
PKG_SOURCE_URL:=https://codeload.github.com/sysrepo/sysrepo/tar.gz/v$(PKG_VERSION)?
PKG_HASH:=2bf8cb4d810d19231eba1e549f58f3a0ce068d170a5e7b212b927396098560e4
+PKG_MAINTAINER:=Mislav Novakovic <mislav.novakovic@sartura.hr>
+PKG_LICENSE:=Apache-2.0
+PKG_LICENSE_FILES:=LICENSE
+
CMAKE_INSTALL:=1
PKG_BUILD_PARALLEL:=1
define Package/libsysrepo
SECTION:=utils
CATEGORY:=Utilities
- URL:=$(PKG_SOURCE_URL)
TITLE:=YANG-based data store library
+ URL:=https://www.sysrepo.org/
DEPENDS:=+libyang +libprotobuf-c +libev +libredblack +librt +libpthread +SYSREPO_BINDINGS:libstdcpp +SYSREPO_PYTHON:python-base +SYSREPO_LUA:lua
MENU:=1
endef
define Package/sysrepo
SECTION:=utils
CATEGORY:=Utilities
- URL:=$(PKG_SOURCE_URL)
TITLE:=YANG-based data store daemon and plugin
+ URL:=https://www.sysrepo.org/
DEPENDS:=+libsysrepo
endef
define Package/sysrepoctl
SECTION:=utils
CATEGORY:=Utilities
- URL:=$(PKG_SOURCE_URL)
TITLE:=sysrepo cli tool
+ URL:=https://www.sysrepo.org/
DEPENDS:=+libsysrepo
endef
define Package/sysrepocfg
SECTION:=utils
CATEGORY:=Utilities
- URL:=$(PKG_SOURCE_URL)
TITLE:=sysrepo configuration tool
+ URL:=https://www.sysrepo.org/
DEPENDS:=+libsysrepo
endef
-Index: sysrepo-0.7.5/src/common/sysrepo.pb-c.c
-===================================================================
--- /dev/null
-+++ sysrepo-0.7.5/src/common/sysrepo.pb-c.c
++++ b/src/common/sysrepo.pb-c.c
@@ -0,0 +1,10537 @@
+/* Generated by the protocol buffer compiler. DO NOT EDIT! */
+/* Generated from: sysrepo.proto */
+ sr__operation__value_ranges,
+ NULL,NULL,NULL,NULL /* reserved[1234] */
+};
-Index: sysrepo-0.7.5/src/common/sysrepo.pb-c.h
-===================================================================
--- /dev/null
-+++ sysrepo-0.7.5/src/common/sysrepo.pb-c.h
++++ b/src/common/sysrepo.pb-c.h
@@ -0,0 +1,4238 @@
+/* Generated by the protocol buffer compiler. DO NOT EDIT! */
+/* Generated from: sysrepo.proto */
-Index: sysrepo-0.7.5/CMakeLists.txt
-===================================================================
---- sysrepo-0.7.5.orig/CMakeLists.txt
-+++ sysrepo-0.7.5/CMakeLists.txt
-@@ -303,7 +303,7 @@ install (FILES ${INTERNAL_YANGS} DESTINA
+--- a/CMakeLists.txt
++++ b/CMakeLists.txt
+@@ -310,7 +310,7 @@ install (FILES ${INTERNAL_YANGS} DESTINA
# install NACM YANG module
if(ENABLE_NACM)
endif(ENABLE_NACM)
find_package(PkgConfig QUIET)
-@@ -327,9 +327,9 @@ if(WITH_SYSTEMD)
+@@ -334,9 +334,9 @@ if(WITH_SYSTEMD)
FILES_MATCHING PATTERN "*.service")
endif()
+#INSTALL_YANG("nc-notifications" "" "666")
+#INSTALL_YANG("notifications" "" "666")
- if(GEN_LANGUAGE_BINDINGS)
- add_subdirectory(swig)
+ # uninstall
+ add_custom_target(uninstall "${CMAKE_COMMAND}" -P "${CMAKE_MODULE_PATH}/uninstall.cmake")
-Index: sysrepo-0.7.5/src/clientlib/client_library.c
-===================================================================
---- sysrepo-0.7.5.orig/src/clientlib/client_library.c
-+++ sysrepo-0.7.5/src/clientlib/client_library.c
+--- a/src/clientlib/client_library.c
++++ b/src/clientlib/client_library.c
@@ -396,13 +396,13 @@ sr_connect(const char *app_name, const s
if (opts & SR_CONN_DAEMON_REQUIRED) {
if ((opts & SR_CONN_DAEMON_START) && (0 == getuid())) {
-Index: sysrepo-0.7.0-7aa2f18d234267403147df92c0005c871f0aa840/src/common/sr_utils.c
-===================================================================
---- sysrepo-0.7.0-7aa2f18d234267403147df92c0005c871f0aa840.orig/src/common/sr_utils.c
-+++ sysrepo-0.7.0-7aa2f18d234267403147df92c0005c871f0aa840/src/common/sr_utils.c
-@@ -460,14 +460,24 @@ sr_fd_set_nonblock(int fd)
+--- a/src/common/sr_utils.c
++++ b/src/common/sr_utils.c
+@@ -477,14 +477,24 @@ sr_fd_set_nonblock(int fd)
#if defined(SO_PEERCRED)
-Index: sysrepo-0.7.7/src/common/sysrepo.pb-c.c
-===================================================================
---- sysrepo-0.7.7.orig/src/common/sysrepo.pb-c.c
-+++ sysrepo-0.7.7/src/common/sysrepo.pb-c.c
+--- a/src/common/sysrepo.pb-c.c
++++ b/src/common/sysrepo.pb-c.c
@@ -8013,7 +8013,7 @@ const ProtobufCMessageDescriptor sr__eve
(ProtobufCMessageInit) sr__event_notif_replay_resp__init,
NULL,NULL,NULL /* reserved[123] */
sr__data_provide_req__field_descriptors,
sr__data_provide_req__field_indices_by_name,
3, sr__data_provide_req__number_ranges,
-Index: sysrepo-0.7.7/src/common/sysrepo.pb-c.h
-===================================================================
---- sysrepo-0.7.7.orig/src/common/sysrepo.pb-c.h
-+++ sysrepo-0.7.7/src/common/sysrepo.pb-c.h
+--- a/src/common/sysrepo.pb-c.h
++++ b/src/common/sysrepo.pb-c.h
@@ -1630,10 +1630,11 @@ struct _Sr__DataProvideReq
char *subscriber_address;
uint32_t subscription_id;
PKG_LICENSE:=GPL-3.0
PKG_LICENSE_FILES:=docs/LICENSE
+PKG_CPE_ID:=cpe:/a:appneta:tcpreplay
PKG_FIXUP:=libtool
PKG_INSTALL:=1
PKG_SOURCE_PROTO:=git
PKG_SOURCE_URL:=https://github.com/ronggang/transmission-web-control
-PKG_SOURCE_DATE:=2019-04-16
-PKG_SOURCE_VERSION:=a747a87babb246b0d3e6dd108ad7b4c19462cfe6
-PKG_MIRROR_HASH:=90a2e33bd799a13504881e26980823fa181b47918900fc4ef03ae3daf0625f14
+PKG_SOURCE_DATE:=2019-07-24
+PKG_SOURCE_VERSION:=c27e3ba9f08148f79fb4121d8a1f5906640ff8d6
+PKG_MIRROR_HASH:=6de823b9b8229b176fbb5c9f33f326c0fb23553ae00a36f60d019fdcdf90a712
PKG_LICENSE:=MIT
PKG_LICENSE_FILES:=LICENSE
PKG_NAME:=transmission
PKG_VERSION:=2.94
-PKG_RELEASE:=5
+PKG_RELEASE:=7
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.xz
PKG_SOURCE_URL:=@GITHUB/transmission/transmission-releases/master
PKG_HASH:=35442cc849f91f8df982c3d0d479d650c6ca19310a994eccdaa79a4af3916b7d
-PKG_MAINTAINER:=Rosen Penev <rosenp@gmail.com>
-PKG_BUILD_DIR:=$(BUILD_DIR)/$(PKG_NAME)-$(BUILD_VARIANT)/$(PKG_NAME)-$(PKG_VERSION)
+PKG_MAINTAINER:=Rosen Penev <rosenp@gmail.com>
PKG_LICENSE:=GPL-2.0-or-later
PKG_LICENSE_FILES:=COPYING
PKG_CPE_ID:=cpe:/a:transmissionbt:transmission
-PKG_FIXUP:=autoreconf
PKG_INSTALL:=1
PKG_BUILD_PARALLEL:=1
SECTION:=net
CATEGORY:=Network
TITLE:=BitTorrent client
- URL:=http://www.transmissionbt.com
+ URL:=https://www.transmissionbt.com
DEPENDS:=+libcurl +libevent2 +libminiupnpc +libnatpmp +libpthread +librt +zlib
endef
endef
Package/transmission-daemon-mbedtls/conffiles = $(Package/transmission-daemon-openssl/conffiles)
-TARGET_CFLAGS += -flto
+TARGET_CFLAGS += -ffunction-sections -fdata-sections -flto
TARGET_LDFLAGS += -Wl,--gc-sections -Wl,--as-needed
CONFIGURE_ARGS += \
IFS="$LIST_SEP"
for v in $v; do
[ -n "$v" ] && (
- echo "\"$p\": $v," | sed -e 's|_|-|g' >> $config_file
- )
+ echo "\"$p\": $v," | sed -e 's|_|-|g'
+ ) >> "$config_file"
done
unset IFS
done
IFS="$LIST_SEP"
for v in $v; do
[ -n "$v" ] && (
- echo -n "\"$p" | sed -e 's|/|\\/|g;s|_|-|g' >> $config_file; \
- echo "\": \"$v\"," >> $config_file
- )
+ printf "\"%s" "$p" | sed -e 's|/|\\/|g;s|_|-|g'; \
+ echo "\": \"$v\","
+ ) >> "$config_file"
done
unset IFS
done
local download_dir config_dir
local mem_percentage
local nice
- local cmdline
local web_home
section_enabled "$section" || return 1
fi
config_file="$config_dir/settings.json"
- [ -d $config_dir ] || {
- mkdir -p $config_dir
- chmod 0755 $config_dir
- touch $config_file
- [ -z "$user" ] || chown -R "$user:$group" $config_dir
+ [ -d "$config_dir" ] || {
+ mkdir -p "$config_dir"
+ chmod 0755 "$config_dir"
+ touch "$config_file"
+ [ -z "$user" ] || chown -R "$user:$group" "$config_dir"
}
[ "$config_overwrite" = 0 ] || {
- echo "{" > $config_file
+ echo "{" > "$config_file"
append_params "$cfg" \
alt_speed_down alt_speed_enabled alt_speed_time_begin alt_speed_time_day \
peer_congestion_algorithm peer_socket_tos rpc_bind_address rpc_password rpc_url \
rpc_username rpc_host_whitelist rpc_whitelist script_torrent_done_filename watch_dir
- echo "\"invalid-key\": false" >> $config_file
- echo "}" >> $config_file
-
+ {
+ echo "\"invalid-key\": false"
+ echo "}"
+ } >> "$config_file"
}
- cmdline="transmission-daemon -g $config_dir -f"
procd_open_instance
- procd_set_param command $cmdline
- procd_set_param user $user
- procd_set_param group $group
- procd_set_param nice $nice
- procd_set_param respawn retry=60
-
- if test -z $USE; then
+ procd_set_param command "/usr/bin/transmission-daemon"
+ procd_append_param command -f --log-error -g "$config_dir"
+ procd_set_param user "$user"
+ procd_set_param group "$group"
+ procd_set_param nice "$nice"
+ procd_set_param stderr 1
+ procd_set_param respawn
+
+ if [ -z "$USE" ]; then
procd_set_param limits core="0 0"
else
procd_set_param limits core="0 0" as="$USE $USE"
fi
procd_add_jail transmission log
- procd_add_jail_mount $config_file
- procd_add_jail_mount_rw $download_dir
+ procd_add_jail_mount "$config_file"
+ procd_add_jail_mount_rw "$download_dir"
procd_close_instance
}
+# Transmission requests large buffers by default
net.core.rmem_max = 4194304
net.core.wmem_max = 1048576
+
+# Some firewalls block SYN packets that are too small
+net.ipv4.tcp_adv_win_scale = 4
+++ /dev/null
-From a2991e22434352a3114bf437674070a135b723fd Mon Sep 17 00:00:00 2001
-From: Rosen Penev <rosenp@gmail.com>
-Date: Sun, 27 May 2018 13:38:27 -0700
-Subject: [PATCH] portcheck: Switch to HTTPS
-
-Useful for testing TLS verification.
----
- libtransmission/rpcimpl.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/libtransmission/rpcimpl.c b/libtransmission/rpcimpl.c
-index 46fd3192c..dcd96df59 100644
---- a/libtransmission/rpcimpl.c
-+++ b/libtransmission/rpcimpl.c
-@@ -1455,7 +1455,7 @@ portTest (tr_session * session,
- struct tr_rpc_idle_data * idle_data)
- {
- const int port = tr_sessionGetPeerPort (session);
-- char * url = tr_strdup_printf ("http://portcheck.transmissionbt.com/%d", port);
-+ char * url = tr_strdup_printf ("https://portcheck.transmissionbt.com/%d", port);
- tr_webRun (session, url, portTested, idle_data);
- tr_free (url);
- return NULL;
---
-2.17.0
-
+++ /dev/null
-From 81d584b9027b2b2ddd3209c1582c9ec73c26cc3e Mon Sep 17 00:00:00 2001
-From: Simon Wells <simonrwells@gmail.com>
-Date: Sun, 27 May 2018 13:44:41 -0700
-Subject: [PATCH] Change TR_CURL_SSL_VERIFY to TR_CURL_SSL_NO_VERIFY
-
-use secure by default and change the env var to match curl -k behaviour
-
-Closes: #179
----
- libtransmission/web.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/libtransmission/web.c b/libtransmission/web.c
-index 997a151b5..ce41e342a 100644
---- a/libtransmission/web.c
-+++ b/libtransmission/web.c
-@@ -392,7 +392,7 @@ tr_webThreadFunc (void * vsession)
- web->taskLock = tr_lockNew ();
- web->tasks = NULL;
- web->curl_verbose = tr_env_key_exists ("TR_CURL_VERBOSE");
-- web->curl_ssl_verify = tr_env_key_exists ("TR_CURL_SSL_VERIFY");
-+ web->curl_ssl_verify = !tr_env_key_exists ("TR_CURL_SSL_NO_VERIFY");
- web->curl_ca_bundle = tr_env_get_string ("CURL_CA_BUNDLE", NULL);
- if (web->curl_ssl_verify)
- {
---
-2.17.0
-
include $(TOPDIR)/rules.mk
PKG_NAME:=travelmate
-PKG_VERSION:=1.4.12
+PKG_VERSION:=1.4.13
PKG_RELEASE:=2
PKG_LICENSE:=GPL-3.0-or-later
PKG_MAINTAINER:=Dirk Brenken <dev@brenken.org>
--- /dev/null
+#!/bin/sh
+
+cmd="$(command -v curl)"
+url="http://example.com/"
+success_string="Thank you!"
+
+if [ ! -x "${cmd}" ]
+then
+ exit 1
+fi
+
+
+response="$("${cmd}" $url -d "password=$2&pwd=$2&username=$1" \
+ --header "Content-Type:application/x-www-form-urlencoded" -s)"
+
+if echo "${response}" | grep -q "${success_string}";
+then
+ exit 0
+else
+ exit 2
+fi
#
LC_ALL=C
PATH="/usr/sbin:/usr/bin:/sbin:/bin"
-trm_ver="1.4.12"
+trm_ver="1.4.13"
trm_enabled=0
trm_debug=0
trm_iface="trm_wwan"
#
f_check()
{
- local IFS ifname radio dev_status config sta_essid sta_bssid result uci_essid uci_bssid login_command wait_time mode="${1}" status="${2:-"false"}" cp_domain="${3:-"false"}"
+ local IFS ifname radio dev_status config sta_essid sta_bssid result uci_essid uci_bssid login_command login_command_args wait_time mode="${1}" status="${2:-"false"}" cp_domain="${3:-"false"}"
if [ "${mode}" != "initial" ] && [ "${status}" = "false" ]
then
login_command="$(uci_get "travelmate" "${uci_essid}${uci_bssid}" "command")"
if [ -x "${login_command}" ]
then
- "${login_command}" >/dev/null 2>&1
+ login_command_args="$(uci_get "travelmate" "${uci_essid}${uci_bssid}" "command_args")"
+ "${login_command}" ${login_command_args} >/dev/null 2>&1
rc=${?}
- f_log "info" "captive portal login '${login_command:0:40}' for '${cp_domain}' has been executed with rc '${rc}'"
+ f_log "info" "captive portal login '${login_command:0:40} ${login_command_args}' for '${cp_domain}' has been executed with rc '${rc}'"
if [ "${rc}" -eq 0 ]
then
result="$(f_net)"
include $(INCLUDE_DIR)/kernel.mk
PKG_NAME:=xtables-addons
-PKG_VERSION:=3.3
+PKG_VERSION:=3.5
PKG_RELEASE:=1
-PKG_HASH:=efa62c7df6cd3b82d7195105bf6fe177b605f91f3522e4114d2f4e0ad54320d6
+PKG_HASH:=189ff57a0b8960969bd99141a6c79c345efa67e4461f450e2f162c9bd3d17da6
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.xz
PKG_SOURCE_URL:=@SF/xtables-addons
$(eval $(call BuildTemplate,logmark,LOGMARK,xt_LOGMARK,xt_LOGMARK,+kmod-ipt-compat-xtables))
$(eval $(call BuildTemplate,lscan,lscan,xt_lscan,xt_lscan,))
$(eval $(call BuildTemplate,lua,Lua PacketScript,xt_LUA,LUA/xt_LUA,+kmod-ipt-conntrack-extra))
+$(eval $(call BuildTemplate,proto,PROTO,xt_PROTO,xt_PROTO,))
$(eval $(call BuildTemplate,psd,psd,xt_psd,xt_psd,))
$(eval $(call BuildTemplate,quota2,quota2,xt_quota2,xt_quota2,))
$(eval $(call BuildTemplate,sysrq,SYSRQ,xt_SYSRQ,xt_SYSRQ,+kmod-ipt-compat-xtables +kmod-crypto-hash))
index 7631f6b..0d3aa72 100644
--- a/configure.ac
+++ b/configure.ac
-@@ -59,7 +59,7 @@ if test -n "$kbuilddir"; then
- echo "$kmajor.$kminor.$kmicro.$kstable in $kbuilddir";
- if test "$kmajor" -gt 5 -o "$kmajor" -eq 5 -a "$kminor" -gt 0; then
+@@ -61,7 +61,7 @@ if test -n "$kbuilddir"; then
echo "WARNING: That kernel version is not officially supported yet. Continue at own luck.";
+ elif test "$kmajor" -eq 5 -a "$kminor" -ge 0; then
+ :
- elif test "$kmajor" -eq 4 -a "$kminor" -ge 18; then
+ elif test "$kmajor" -eq 4 -a "$kminor" -ge 14; then
:
index 0d3aa72..1cea354 100644
--- a/configure.ac
+++ b/configure.ac
-@@ -59,7 +59,7 @@ if test -n "$kbuilddir"; then
- echo "$kmajor.$kminor.$kmicro.$kstable in $kbuilddir";
- if test "$kmajor" -gt 5 -o "$kmajor" -eq 5 -a "$kminor" -gt 0; then
+@@ -61,7 +61,7 @@ if test -n "$kbuilddir"; then
echo "WARNING: That kernel version is not officially supported yet. Continue at own luck.";
+ elif test "$kmajor" -eq 5 -a "$kminor" -ge 0; then
+ :
- elif test "$kmajor" -eq 4 -a "$kminor" -ge 14; then
+ elif test "$kmajor" -eq 4 -a "$kminor" -ge 9; then
:
index 618de5e..221f342 100644
--- a/extensions/xt_DELUDE.c
+++ b/extensions/xt_DELUDE.c
-@@ -146,7 +146,13 @@ delude_tg(struct sk_buff *skb, const struct xt_action_param *par)
+@@ -151,7 +151,13 @@ delude_tg(struct sk_buff *skb, const struct xt_action_param *par)
* a problem, as that is supported since Linux 2.6.35. But since we do not
* actually want to have a connection open, we are still going to drop it.
*/
index cb98e9e..b78683c 100644
--- a/extensions/xt_TARPIT.c
+++ b/extensions/xt_TARPIT.c
-@@ -431,7 +431,12 @@ tarpit_tg4(struct sk_buff *skb, const struct xt_action_param *par)
+@@ -436,7 +436,12 @@ tarpit_tg4(struct sk_buff *skb, const struct xt_action_param *par)
/* We are not interested in fragments */
if (iph->frag_off & htons(IP_OFFSET))
return NF_DROP;
return NF_DROP;
}
-@@ -472,7 +477,12 @@ tarpit_tg6(struct sk_buff *skb, const struct xt_action_param *par)
+@@ -477,7 +482,12 @@ tarpit_tg6(struct sk_buff *skb, const struct xt_action_param *par)
pr_debug("addr is not unicast.\n");
return NF_DROP;
}
+module_exit(fini);
--- a/extensions/Kbuild
+++ b/extensions/Kbuild
-@@ -26,6 +26,7 @@ obj-${build_lscan} += xt_lscan.o
+@@ -27,6 +27,7 @@ obj-${build_lscan} += xt_lscan.o
obj-${build_pknock} += pknock/
obj-${build_psd} += xt_psd.o
obj-${build_quota2} += xt_quota2.o
-include ${M}/Kbuild.*
--- a/mconfig
+++ b/mconfig
-@@ -22,3 +22,4 @@ build_lscan=m
+@@ -23,3 +23,4 @@ build_lscan=m
build_pknock=m
build_psd=m
build_quota2=m
+
--- a/extensions/Kbuild
+++ b/extensions/Kbuild
-@@ -27,6 +27,7 @@ obj-${build_pknock} += pknock/
+@@ -28,6 +28,7 @@ obj-${build_pknock} += pknock/
obj-${build_psd} += xt_psd.o
obj-${build_quota2} += xt_quota2.o
obj-${build_rtsp} += rtsp/
-include ${M}/Kbuild.*
--- a/extensions/Mbuild
+++ b/extensions/Mbuild
-@@ -22,3 +22,4 @@ obj-${build_pknock} += pknock/
+@@ -23,3 +23,4 @@ obj-${build_pknock} += pknock/
obj-${build_psd} += libxt_psd.so
obj-${build_quota2} += libxt_quota2.so
obj-${build_gradm} += libxt_gradm.so
+obj-${build_LUA} += LUA/
--- a/mconfig
+++ b/mconfig
-@@ -23,3 +23,4 @@ build_pknock=m
+@@ -24,3 +24,4 @@ build_pknock=m
build_psd=m
build_quota2=m
build_rtsp=m
include $(TOPDIR)/rules.mk
PKG_NAME:=yggdrasil
-PKG_VERSION:=0.3.8
-PKG_RELEASE:=2
+PKG_VERSION:=0.3.9
+PKG_RELEASE:=1
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
PKG_SOURCE_URL:=https://codeload.github.com/yggdrasil-network/yggdrasil-go/tar.gz/v$(PKG_VERSION)?
-PKG_HASH:=56eebbb63cf2d14897141ce037fb9aec407430718908cfeeb34fff355f08f9af
+PKG_HASH:=34780dbdbcb486320097274ef9d3c606165c44132f250e481671f99686c77b73
PKG_BUILD_DIR:=$(BUILD_DIR)/$(PKG_NAME)-go-$(PKG_VERSION)
PKG_MAINTAINER:=William Fleurant <meshnet@protonmail.com>
+++ /dev/null
-commit 174ebceaacae482df95e7bb96f0848bb1588a4ff
-Author: Neil Alexander <neilalexander@users.noreply.github.com>
-Date: Sun Sep 1 21:32:40 2019 +0100
-
- Fix hjson-go import in go.mod/go.sum
-
-diff --git a/go.mod b/go.mod
-index d2ca6b7..6703d5b 100644
---- a/go.mod
-+++ b/go.mod
-@@ -4,7 +4,7 @@ require (
- github.com/Arceliar/phony v0.0.0-20190831214819-9b642ea019ad
- github.com/gologme/log v0.0.0-20181207131047-4e5d8ccb38e8
- github.com/hashicorp/go-syslog v1.0.0
-- github.com/hjson/hjson-go v0.0.0-20181010104306-a25ecf6bd222
-+ github.com/hjson/hjson-go v3.0.1-0.20190209023717-9147687966d9+incompatible
- github.com/kardianos/minwinsvc v0.0.0-20151122163309-cad6b2b879b0
- github.com/mitchellh/mapstructure v1.1.2
- github.com/songgao/packets v0.0.0-20160404182456-549a10cd4091
-diff --git a/go.sum b/go.sum
-index f0fbaca..60e6090 100644
---- a/go.sum
-+++ b/go.sum
-@@ -6,6 +6,8 @@ github.com/hashicorp/go-syslog v1.0.0 h1:KaodqZuhUoZereWVIYmpUgZysurB1kBLX2j0MwM
- github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
- github.com/hjson/hjson-go v0.0.0-20181010104306-a25ecf6bd222 h1:xmvkbxXDeN1ffWq8kvrhyqVYAO2aXuRBsbpxVTR+JyU=
- github.com/hjson/hjson-go v0.0.0-20181010104306-a25ecf6bd222/go.mod h1:qsetwF8NlsTsOTwZTApNlTCerV+b2GjYRRcIk4JMFio=
-+github.com/hjson/hjson-go v3.0.1-0.20190209023717-9147687966d9+incompatible h1:bLQ2Ve+eW65id3b8xEMQiAwJT4qGZeywAEMLvXjznvw=
-+github.com/hjson/hjson-go v3.0.1-0.20190209023717-9147687966d9+incompatible/go.mod h1:qsetwF8NlsTsOTwZTApNlTCerV+b2GjYRRcIk4JMFio=
- github.com/kardianos/minwinsvc v0.0.0-20151122163309-cad6b2b879b0 h1:YnZmFjg0Nvk8851WTVWlqMC1ecJH07Ctz+Ezxx4u54g=
- github.com/kardianos/minwinsvc v0.0.0-20151122163309-cad6b2b879b0/go.mod h1:rUi0/YffDo1oXBOGn1KRq7Fr07LX48XEBecQnmwjsAo=
- github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
-
PKG_NAME:=domoticz
PKG_VERSION_MAJOR:=4
-PKG_VERSION_PATCH:=9700
+PKG_VERSION_PATCH:=10717
PKG_COMMIT:=
PKG_VERSION:=$(PKG_VERSION_MAJOR).$(PKG_VERSION_PATCH)
-PKG_RELEASE:=4
+PKG_RELEASE:=1
ifeq ($(PKG_COMMIT),)
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
PKG_SOURCE_URL:=https://github.com/domoticz/domoticz/archive/$(PKG_COMMIT)/$(PKG_SOURCE)
PKG_BUILD_DIR:=$(BUILD_DIR)/$(PKG_NAME)-$(PKG_COMMIT)
endif
-PKG_HASH:=c31f185a1ffac01b86a77bf33e059a4403d814e826c9d6639c63c2e9afa55a46
+PKG_HASH:=c053a2161942529f56b748945ec297dcd67f449e68029fc886893a528891ad86
PKG_LICENSE:=GPL-3.0
PKG_LICENSE_FILES:=License.txt
+++ /dev/null
-From 9d226dab339f5767ec01ea1bcc9043ceee185cca Mon Sep 17 00:00:00 2001
-From: Rob Peters <info@domoticz.com>
-Date: Mon, 25 Mar 2019 08:50:23 +0100
-Subject: [PATCH] Fix for boot 1.70 get_io_service, fixed #3117
-
----
- hardware/Pinger.cpp | 10 ++++++++--
- hardware/TCPProxy/tcpproxy_server.cpp | 8 +++++++-
- 2 files changed, 15 insertions(+), 3 deletions(-)
-
-diff --git a/hardware/Pinger.cpp b/hardware/Pinger.cpp
-index 01a955ba4..184378738 100644
---- a/hardware/Pinger.cpp
-+++ b/hardware/Pinger.cpp
-@@ -18,6 +18,12 @@
-
- #include <iostream>
-
-+#if BOOST_VERSION >= 107000
-+#define GET_IO_SERVICE(s) ((boost::asio::io_context&)(s).get_executor().context())
-+#else
-+#define GET_IO_SERVICE(s) ((s).get_io_service())
-+#endif
-+
- class pinger
- : private domoticz::noncopyable
- {
-@@ -76,7 +82,7 @@ class pinger
- num_tries_++;
- if (num_tries_ > 4)
- {
-- resolver_.get_io_service().stop();
-+ GET_IO_SERVICE(resolver_).stop();
- }
- else
- {
-@@ -118,7 +124,7 @@ class pinger
- if (num_replies_++ == 0)
- timer_.cancel();
- m_PingState = true;
-- resolver_.get_io_service().stop();
-+ GET_IO_SERVICE(resolver_).stop();
- }
- else
- {
-diff --git a/hardware/TCPProxy/tcpproxy_server.cpp b/hardware/TCPProxy/tcpproxy_server.cpp
-index fddac08de..60445d9c2 100644
---- a/hardware/TCPProxy/tcpproxy_server.cpp
-+++ b/hardware/TCPProxy/tcpproxy_server.cpp
-@@ -15,6 +15,12 @@
- #include "stdafx.h"
- #include "tcpproxy_server.h"
-
-+#if BOOST_VERSION >= 107000
-+#define GET_IO_SERVICE(s) ((boost::asio::io_context&)(s).get_executor().context())
-+#else
-+#define GET_IO_SERVICE(s) ((s).get_io_service())
-+#endif
-+
- namespace tcp_proxy
- {
- bridge::bridge(boost::asio::io_service& ios)
-@@ -38,7 +44,7 @@ namespace tcp_proxy
- boost::asio::ip::tcp::endpoint end;
-
-
-- boost::asio::io_service &ios=downstream_socket_.get_io_service();
-+ boost::asio::io_service &ios= GET_IO_SERVICE(downstream_socket_);
- boost::asio::ip::tcp::resolver resolver(ios);
- boost::asio::ip::tcp::resolver::query query(upstream_host, upstream_port, boost::asio::ip::resolver_query_base::numeric_service);
- boost::asio::ip::tcp::resolver::iterator i = resolver.resolve(query);
main/WebServerHelper.cpp | 2 ++
1 file changed, 2 insertions(+)
-diff --git a/main/WebServerHelper.cpp b/main/WebServerHelper.cpp
-index 98ebcf7b4..0a028595c 100644
--- a/main/WebServerHelper.cpp
+++ b/main/WebServerHelper.cpp
@@ -37,7 +37,9 @@ namespace http {
include $(TOPDIR)/rules.mk
PKG_NAME:=dump1090
-PKG_VERSION:=3.7.1
+PKG_VERSION:=3.7.2
PKG_RELEASE:=1
-PKG_SOURCE_PROTO:=git
-PKG_SOURCE_URL:=https://github.com/flightaware/dump1090
-PKG_SOURCE_VERSION:=v$(PKG_VERSION)
-PKG_MIRROR_HASH:=d7ed250d624eae2eec6c0a2dd410986f42230bf929dab67893ea3bf1cab8a203
+PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
+PKG_SOURCE_URL:=https://codeload.github.com/flightaware/dump1090/tar.gz/v${PKG_VERSION}?
+PKG_HASH:=a4f8edd051e0a663a92b848bde4ab7c47cb8bce812bb368cba42bbb4b5c83f71
PKG_MAINTAINER:=Álvaro Fernández Rojas <noltari@gmail.com>
PKG_LICENSE:=GPL-2.0-or-later
PKG_NAME:=prometheus-node-exporter-lua
PKG_VERSION:=2019.08.14
-PKG_RELEASE:=1
+PKG_RELEASE:=2
PKG_MAINTAINER:=Etienne CHAMPETIER <champetier.etienne@gmail.com>
PKG_LICENSE:=Apache-2.0
dsl_max_datarate({direction="up"}, dsl_stat.max_data_rate_up)
-- dsl errors
- dsl_error_seconds_total({err="forward error correction",loc="near"}, dsl_stat.errors_fec_near)
- dsl_error_seconds_total({err="forward error correction",loc="far"}, dsl_stat.errors_fec_far)
+ dsl_error_seconds_total({err="forward error correction",loc="near"}, dsl_stat.errors_fecs_near)
+ dsl_error_seconds_total({err="forward error correction",loc="far"}, dsl_stat.errors_fecs_far)
dsl_error_seconds_total({err="errored",loc="near"}, dsl_stat.errors_es_near)
dsl_error_seconds_total({err="errored",loc="far"}, dsl_stat.errors_es_near)
dsl_error_seconds_total({err="severely errored",loc="near"}, dsl_stat.errors_ses_near)
PKG_NAME:=qemu
PKG_VERSION:=4.1.0
-PKG_RELEASE:=1
+PKG_RELEASE:=2
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.xz
PKG_HASH:=656e60218689bdeec69903087fd7582d5d3e72238d02f4481d8dc6d79fd909c6
PKG_SOURCE_URL:=http://download.qemu.org/
--- /dev/null
+From c59279437eda91841b9d26079c70b8a540d41204 Mon Sep 17 00:00:00 2001
+From: Samuel Thibault <samuel.thibault@ens-lyon.org>
+Date: Mon, 26 Aug 2019 00:55:03 +0200
+Subject: [PATCH] ip_reass: Fix use after free
+
+Using ip_deq after m_free might read pointers from an allocation reuse.
+
+This would be difficult to exploit, but that is still related with
+CVE-2019-14378 which generates fragmented IP packets that would trigger this
+issue and at least produce a DoS.
+
+Signed-off-by: Samuel Thibault <samuel.thibault@ens-lyon.org>
+---
+Taken from https://gitlab.freedesktop.org/slirp/libslirp/commit/c5927943
+
+ slirp/src/ip_input.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/slirp/src/ip_input.c b/slirp/src/ip_input.c
+index 7364ce0..aa514ae 100644
+--- a/slirp/src/ip_input.c
++++ b/slirp/src/ip_input.c
+@@ -292,6 +292,7 @@ static struct ip *ip_reass(Slirp *slirp, struct ip *ip, struct ipq *fp)
+ */
+ while (q != (struct ipasfrag *)&fp->frag_link &&
+ ip->ip_off + ip->ip_len > q->ipf_off) {
++ struct ipasfrag *prev;
+ i = (ip->ip_off + ip->ip_len) - q->ipf_off;
+ if (i < q->ipf_len) {
+ q->ipf_len -= i;
+@@ -299,9 +300,10 @@ static struct ip *ip_reass(Slirp *slirp, struct ip *ip, struct ipq *fp)
+ m_adj(dtom(slirp, q), i);
+ break;
+ }
++ prev = q;
+ q = q->ipf_next;
+- m_free(dtom(slirp, q->ipf_prev));
+- ip_deq(q->ipf_prev);
++ ip_deq(prev);
++ m_free(dtom(slirp, prev));
+ }
+
+ insert:
+--
+2.22.0
+