-# openwisp.org
+# SPDX-Identifier-License: GPL-3.0-only
#
-# This is free software, licensed under the GNU General Public License v3.
-# See /LICENSE for more information.
+# openwisp.org
+
include $(TOPDIR)/rules.mk
PKG_NAME:=openwisp-config
-PKG_RELEASE:=1
+PKG_RELEASE:=2
PKG_MAINTAINER:=Federico Capoano <f.capoano@openwisp.io>
-PKG_LICENSE:=GPL3.0-or-later
+PKG_LICENSE:=GPL-3.0-or-later
PKG_SOURCE_URL:=https://github.com/openwisp/openwisp-config.git
PKG_MIRROR_HASH:=7daa10a9d170e665f33a5555a246b4da2223c2d8d0e8a047edb01701c8886986
-# openwisp.org
+# SPDX-Identifier-License: GPL-3.0-only
#
-# This is free software, licensed under the GNU General Public License v3.
-# See /LICENSE for more information.
+# openwisp.org
+
include $(TOPDIR)/rules.mk
PKG_NAME:=openwisp-monitoring
-PKG_RELEASE:=1
+PKG_RELEASE:=2
PKG_MAINTAINER:=Federico Capoano <support@openwisp.io>
-PKG_LICENSE:=GPL3.0-or-later
+PKG_LICENSE:=GPL-3.0-or-later
PKG_LICENSE_FILES:=LICENSE
PKG_SOURCE_URL:=https://github.com/openwisp/openwrt-openwisp-monitoring.git
include $(TOPDIR)/rules.mk
PKG_NAME:=zabbix
-PKG_VERSION:=5.0.18
+PKG_VERSION:=6.2.3
PKG_RELEASE:=2
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
-PKG_SOURCE_URL:=https://cdn.zabbix.com/zabbix/sources/stable/5.0/
-PKG_HASH:=7d15c4d683801edc2bdcda3fd94afdf6a7142a1a92aa71f4a9220af8e39d9e0e
+PKG_SOURCE_URL:=https://cdn.zabbix.com/zabbix/sources/stable/6.2/
+PKG_HASH:=2be7e57fb33a55fee71480598e317ffa6a8ee5a39639a7e1b42b2ea6872107b5
PKG_MAINTAINER:=Etienne CHAMPETIER <champetier.etienne@gmail.com>
PKG_LICENSE:=GPL-2.0
##### Active checks related
-@@ -151,8 +149,6 @@ Server=127.0.0.1
+@@ -164,8 +162,6 @@ Server=127.0.0.1
# Default:
# ServerActive=
-ServerActive=127.0.0.1
-
### Option: Hostname
- # Unique, case sensitive hostname.
- # Required for active checks and must match hostname as configured on the server.
-@@ -162,8 +158,6 @@ ServerActive=127.0.0.1
+ # List of comma delimited unique, case sensitive hostnames.
+ # Required for active checks and must match hostnames as configured on the server.
+@@ -175,8 +171,6 @@ ServerActive=127.0.0.1
# Default:
# Hostname=
### Option: HostnameItem
# Item used for generating Hostname if it is undefined. Ignored if Hostname is defined.
# Does not support UserParameters or aliases.
-@@ -303,8 +297,8 @@ Hostname=Zabbix server
+@@ -326,8 +320,8 @@ Hostname=Zabbix server
# Include=
# Include=/usr/local/etc/zabbix_agentd.userparams.conf
--- a/src/libs/zbxcommon/str.c
+++ b/src/libs/zbxcommon/str.c
-@@ -54,7 +54,7 @@ static const char help_message_footer[]
- void version(void)
+@@ -49,7 +49,7 @@ static const char help_message_footer[]
+ void zbx_version(void)
{
printf("%s (Zabbix) %s\n", title_message, ZABBIX_VERSION);
- printf("Revision %s %s, compilation time: %s %s\n\n", ZABBIX_REVISION, ZABBIX_REVDATE, __DATE__, __TIME__);
+ printf("Revision %s %s\n\n", ZABBIX_REVISION, ZABBIX_REVDATE);
puts(copyright_message);
- #if defined(HAVE_GNUTLS) || defined(HAVE_OPENSSL)
- printf("\n");
+ }
+
PKG_SOURCE_URL:=https://github.com/wkz/mdio-tools
PKG_SOURCE_PROTO:=git
-PKG_SOURCE_VERSION:=1.1.1
-PKG_MIRROR_HASH:=050d7386ed086b2ef3f028b3409ebabfdc51bd5a4c5a1d64afa29a0ceebf4771
+PKG_SOURCE_VERSION:=1.2.0
+PKG_MIRROR_HASH:=5dd21c47d12b5d81dd0783d874480172d00027c4a8902839fa9fc16718092c79
PKG_LICENSE:=GPL-2.0-only
PKG_LICENSE_FILES:=COPYING
PKG_NAME:=lua-eco
PKG_VERSION:=1.0.0
-PKG_RELEASE:=2
+PKG_RELEASE:=4
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
PKG_SOURCE_URL=https://github.com/zhaojh329/lua-eco/releases/download/v$(PKG_VERSION)
config LUA_ECO_DEFAULT_WOLFSSL
bool
default y if PACKAGE_libopenssl != y && \
- PACKAGE_libwolfssl >= PACKAGE_libopenssl && \
- PACKAGE_libwolfssl >= PACKAGE_libmbedtls
+ (PACKAGE_libwolfssl >= PACKAGE_libopenssl || \
+ PACKAGE_libwolfsslcpu-crypto >= PACKAGE_libopenssl) && \
+ (PACKAGE_libwolfssl >= PACKAGE_libmbedtls || \
+ PACKAGE_libwolfsslcpu-crypto >= PACKAGE_libmbedtls)
config LUA_ECO_DEFAULT_OPENSSL
bool
depends on PACKAGE_libopenssl
config LUA_ECO_WOLFSSL
bool "wolfSSL"
- depends on PACKAGE_libwolfssl
+ depends on PACKAGE_libwolfssl || PACKAGE_libwolfsslcpu-crypto
config LUA_ECO_MBEDTLS
bool "mbedTLS"
depends on PACKAGE_libmbedtls
include $(TOPDIR)/rules.mk
PKG_NAME:=lua-penlight
-PKG_VERSION:=1.11.0
+PKG_VERSION:=1.13.1
PKG_RELEASE:=1
PKG_BUILD_DIR:=$(BUILD_DIR)/Penlight-$(PKG_VERSION)
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
PKG_SOURCE_URL:=https://codeload.github.com/lunarmodules/Penlight/tar.gz/$(PKG_VERSION)?
-PKG_HASH:=710782d5b64d9e2c658c1d3018e6b203ac8e7af608ed22382dca573fbdb4f1b3
+PKG_HASH:=530380e1a377df519c2e589f47823f79701f8e0e67f6bbd994e18d09a470b680
PKG_LICENSE:=MIT
PKG_LICENSE_FILES:=LICENSE.md
include $(TOPDIR)/rules.mk
PKG_NAME:=node
-PKG_VERSION:=v16.17.0
+PKG_VERSION:=v16.17.1
PKG_RELEASE:=1
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.xz
PKG_SOURCE_URL:=https://nodejs.org/dist/$(PKG_VERSION)
-PKG_HASH:=1d28c285685e446985921bc963565ca9c0c5f4fda9755e489c06808ea9795645
+PKG_HASH:=6721feb4152d56d2c6b358ce397abd5a7f1daf09ee2e25c5021b9b4d3f86a330
PKG_MAINTAINER:=Hirokazu MORIKAWA <morikw2@gmail.com>, Adrian Panella <ianchi74@outlook.com>
PKG_LICENSE:=MIT
PECL_NAME:=pecl_http
PECL_LONGNAME:=Extended HTTP Support
-PKG_VERSION:=4.2.2
+PKG_VERSION:=4.2.3
PKG_RELEASE:=1
-PKG_HASH:=c338afd29259f0093f07f4e99b80705a2d5bb046c7ab32e5938eef29bbb63a6e
+PKG_HASH:=fa2ab558fc8f0928a10f35c0f566f7c4a1d32e727bd3a96579e4c28482ee9d6a
PKG_NAME:=php8-pecl-http
PKG_SOURCE:=$(PECL_NAME)-$(PKG_VERSION).tgz
+++ /dev/null
-#
-# This is free software, licensed under the GNU General Public License v2.
-# See /LICENSE for more information.
-#
-
-include $(TOPDIR)/rules.mk
-
-PECL_NAME:=sodium
-PECL_LONGNAME:=Bindings for libsodium
-
-PKG_VERSION:=2.0.23
-PKG_RELEASE:=$(AUTORELEASE)
-PKG_HASH:=f8c04533de8e4b48077f65de61b187e195c0919c41d4543e2263604fa0c50379
-
-PKG_NAME:=php8-pecl-sodium
-PKG_SOURCE:=lib$(PECL_NAME)-$(PKG_VERSION).tgz
-PKG_SOURCE_URL:=http://pecl.php.net/get/
-
-PKG_MAINTAINER:=W. Michael Petullo <mike@flyn.org>
-
-PKG_LICENSE:=ISC
-PKG_LICENSE_FILES:=LICENSE
-
-PKG_BUILD_DEPENDS:=php8
-PKG_BUILD_DIR:=$(BUILD_DIR)/pecl-php8/lib$(PECL_NAME)-$(PKG_VERSION)
-PKG_BUILD_PARALLEL:=1
-
-include $(INCLUDE_DIR)/package.mk
-include $(INCLUDE_DIR)/nls.mk
-include ../php8/pecl.mk
-
-$(eval $(call PHP8PECLPackage,sodium,$(PECL_LONGNAME),+libsodium,30))
-$(eval $(call BuildPackage,$(PKG_NAME)))
--- /dev/null
+#
+# This is free software, licensed under the GNU General Public License v2.
+# See /LICENSE for more information.
+#
+
+include $(TOPDIR)/rules.mk
+
+PECL_NAME:=xdebug
+PECL_LONGNAME:=Xdebug extension
+
+PKG_VERSION:=3.1.5
+PKG_RELEASE:=1
+PKG_HASH:=55f6ef381245da079b2fc5ce1cfbcb7961197d0c0e04f9d977613cf9aa969a79
+
+PKG_NAME:=php8-pecl-xdebug
+PKG_SOURCE:=$(PECL_NAME)-$(PKG_VERSION).tgz
+PKG_SOURCE_URL:=http://pecl.php.net/get/
+
+PKG_MAINTAINER:=Michael Heimpold <mhei@heimpold.de>
+
+PKG_LICENSE:=Xdebug
+PKG_LICENSE_FILES:=LICENSE
+
+PKG_BUILD_DIR:=$(BUILD_DIR)/pecl-php8/$(PECL_NAME)-$(PKG_VERSION)
+PKG_BUILD_PARALLEL:=1
+
+include $(INCLUDE_DIR)/package.mk
+include $(INCLUDE_DIR)/nls.mk
+include ../php8/pecl.mk
+
+CONFIGURE_ARGS+= \
+ --disable-xdebug-dev \
+ --without-xdebug-compression
+
+$(eval $(call PHP8PECLPackage,$(PECL_NAME),$(PECL_LONGNAME),,20,zend))
+$(eval $(call BuildPackage,$(PKG_NAME)))
include $(TOPDIR)/rules.mk
PKG_NAME:=php
-PKG_VERSION:=8.1.10
+PKG_VERSION:=8.1.11
PKG_RELEASE:=1
PKG_MAINTAINER:=Michael Heimpold <mhei@heimpold.de>
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.xz
PKG_SOURCE_URL:=http://www.php.net/distributions/
-PKG_HASH:=90e7120c77ee83630e6ac928d23bc6396603d62d83a3cf5df8a450d2e3070162
+PKG_HASH:=3005198d7303f87ab31bc30695de76e8ad62783f806b6ab9744da59fe41cc5bd
PKG_BUILD_PARALLEL:=1
PKG_USE_MIPS16:=0
mbstring mysqli mysqlnd \
opcache openssl \
pcntl pdo pdo-mysql pdo-pgsql pdo-sqlite pgsql phar \
- session shmop simplexml snmp soap sockets sqlite3 sysvmsg sysvsem sysvshm \
+ session shmop simplexml snmp soap sockets sodium sqlite3 sysvmsg sysvsem sysvshm \
tokenizer \
xml xmlreader xmlwriter \
zip
CONFIGURE_ARGS+= --disable-sockets
endif
+ifneq ($(SDK)$(CONFIG_PACKAGE_php8-mod-sodium),)
+ CONFIGURE_ARGS+= --with-sodium=shared,"$(STAGING_DIR)/usr"
+else
+ CONFIGURE_ARGS+= --without-sodium
+endif
+
ifneq ($(SDK)$(CONFIG_PACKAGE_php8-mod-sqlite3),)
CONFIGURE_ARGS+= --with-sqlite3=shared
else
$(eval $(call BuildModule,snmp,SNMP,+PACKAGE_php8-mod-snmp:libnetsnmp +PACKAGE_php8-mod-snmp:libopenssl))
$(eval $(call BuildModule,soap,SOAP,+@PHP8_LIBXML +PACKAGE_php8-mod-soap:libxml2))
$(eval $(call BuildModule,sockets,Sockets))
+$(eval $(call BuildModule,sodium,Sodium,+PACKAGE_php8-mod-sodium:libsodium,30))
$(eval $(call BuildModule,sqlite3,SQLite3,+PACKAGE_php8-mod-sqlite3:libsqlite3))
$(eval $(call BuildModule,sysvmsg,System V messages))
$(eval $(call BuildModule,sysvsem,System V shared memory))
include $(TOPDIR)/rules.mk
PKG_NAME:=django
-PKG_VERSION:=4.1
+PKG_VERSION:=4.1.1
PKG_RELEASE:=$(AUTORELEASE)
PYPI_NAME:=Django
-PKG_HASH:=032f8a6fc7cf05ccd1214e4a2e21dfcd6a23b9d575c6573cacc8c67828dbe642
+PKG_HASH:=a153ffd5143bf26a877bfae2f4ec736ebd8924a46600ca089ad96b54a1d4e28e
PKG_MAINTAINER:=Alexandru Ardelean <ardeleanalex@gmail.com>, Peter Stadler <peter.stadler@student.uibk.ac.at>
PKG_LICENSE:=BSD-3-Clause
include $(TOPDIR)/rules.mk
PKG_NAME:=python-flask-socketio
-PKG_VERSION:=5.0.1
+PKG_VERSION:=5.3.1
PKG_RELEASE:=1
PYPI_NAME:=Flask-SocketIO
-PKG_HASH:=5c4319f5214ada20807857dc8fdf3dc7d2afe8d6dd38f5c516c72e2be47d2227
+PKG_HASH:=fd0ed0fc1341671d92d5f5b2f5503916deb7aa7e2940e6636cfa2c087c828bf9
PKG_MAINTAINER:=Jan Pavlinec <jan.pavlinec1@gmail.com>
PKG_LICENSE:=MIT
include $(TOPDIR)/rules.mk
PKG_NAME:=python-jsonschema
-PKG_VERSION:=4.9.0
+PKG_VERSION:=4.16.0
PKG_RELEASE:=1
PYPI_NAME:=jsonschema
-PKG_HASH:=df10e65c8f3687a48e93d0d348ce0ce5f897b5a28e9bbcbbe8f7c7eaf019e850
+PKG_HASH:=165059f076eff6971bae5b742fc029a7b4ef3f9bcf04c14e4776a7605de14b23
PKG_MAINTAINER:=Javier Marcet <javier@marcet.info>
PKG_LICENSE:=MIT
@@ -0,0 +1,2 @@
+from setuptools import setup
+setup(use_scm_version=True)
+--- /dev/null
++++ b/setup.cfg
+@@ -0,0 +1,96 @@
++[metadata]
++name = jsonschema
++url = https://github.com/python-jsonschema/jsonschema
++project_urls =
++ Funding = https://github.com/sponsors/Julian
++ Tidelift = https://tidelift.com/subscription/pkg/pypi-jsonschema?utm_source=pypi-jsonschema&utm_medium=referral&utm_campaign=pypi-link
++ Documentation = https://python-jsonschema.readthedocs.io/
++ Changelog = https://github.com/python-jsonschema/jsonschema/blob/main/CHANGELOG.rst
++ Source = https://github.com/python-jsonschema/jsonschema
++ Issues = https://github.com/python-jsonschema/jsonschema/issues/
++description = An implementation of JSON Schema validation for Python
++long_description = file: README.rst
++long_description_content_type = text/x-rst
++author = Julian Berman
++author_email = Julian+jsonschema@GrayVines.com
++license = MIT
++classifiers =
++ Development Status :: 5 - Production/Stable
++ Intended Audience :: Developers
++ License :: OSI Approved :: MIT License
++ Operating System :: OS Independent
++ Programming Language :: Python
++ Programming Language :: Python :: 3.7
++ Programming Language :: Python :: 3.8
++ Programming Language :: Python :: 3.9
++ Programming Language :: Python :: 3.10
++ Programming Language :: Python :: 3.11
++ Programming Language :: Python :: Implementation :: CPython
++ Programming Language :: Python :: Implementation :: PyPy
++
++[options]
++packages = find:
++python_requires = >=3.7
++install_requires =
++ attrs>=17.4.0
++ importlib_metadata;python_version<'3.8'
++ importlib_resources>=1.4.0;python_version<'3.9'
++ pyrsistent>=0.14.0,!=0.17.0,!=0.17.1,!=0.17.2
++ typing_extensions;python_version<'3.8'
++
++[options.extras_require]
++format =
++ fqdn
++ idna
++ isoduration
++ jsonpointer>1.13
++ rfc3339-validator
++ rfc3987
++ uri_template
++ webcolors>=1.11
++format_nongpl =
++ fqdn
++ idna
++ isoduration
++ jsonpointer>1.13
++ rfc3339-validator
++ rfc3986-validator>0.1.0
++ uri_template
++ webcolors>=1.11
++
++[options.entry_points]
++console_scripts =
++ jsonschema = jsonschema.cli:main
++
++[options.package_data]
++jsonschema = schemas/*.json, schemas/*/*.json
++
++[flake8]
++ban-relative-imports = true
++inline-quotes = "
++exclude =
++ jsonschema/__init__.py
++ jsonschema/_reflect.py
++ignore =
++ B008, # Barring function calls in default args. Ha, no.
++ B306, # See https://github.com/PyCQA/flake8-bugbear/issues/131
++ W503, # (flake8 default) old PEP8 boolean operator line breaks
++
++[mypy]
++ignore_missing_imports = true
++
++[pydocstyle]
++match = (?!(test_|_|compat|cli)).*\.py # see PyCQA/pydocstyle#323
++add-select =
++ D410, # Trailing whitespace plz
++add-ignore =
++ D107, # Hah, no
++ D200, # 1-line docstrings don't need to be on one line
++ D202, # One line is fine.
++ D412, # Trailing whitespace plz
++ D413, # No trailing whitespace plz
++
++[egg_info]
++tag_build =
++tag_date = 0
++
include $(TOPDIR)/rules.mk
PKG_NAME:=python-stem
-PKG_VERSION:=1.8.0
-PKG_RELEASE:=3
+PKG_VERSION:=1.8.1
+PKG_RELEASE:=$(AUTORELEASE)
PYPI_NAME:=stem
-PKG_HASH:=a0b48ea6224e95f22aa34c0bc3415f0eb4667ddeae3dfb5e32a6920c185568c2
+PKG_HASH:=81d43a7c668ba9d7bc1103b2e7a911e9d148294b373d27a59ae8da79ef7a3e2f
PKG_MAINTAINER:=Javier Marcet <javier@marcet.info>
PKG_LICENSE:=LGPL-3.0-only
+++ /dev/null
-From 36bcb170ba9097885902513640075eac2e6ce384 Mon Sep 17 00:00:00 2001
-From: Calin Culianu <calin.culianu@gmail.com>
-Date: Mon, 8 Nov 2021 18:15:59 -0600
-Subject: [PATCH] Fixup for Python 3.10
-
-Closes issue #109. Long story short: a few names from collection are
-now moved to collection.abc exclusively starting in Python 3.10. The
-only name this app uses from there that was moved is
-`collections.Iterable`. Python versions starting from 3.3 support both
-`collections.Iterable` and `collections.abc.Iterable` as the way to refer to
-this class, which Python 3.10 being the first one to drop
-`collections.Iterable`. So.. we just work around this API quirk
-and always refer ot it as `collections.abc.Iterable`.
----
- stem/control.py | 3 ++-
- 1 file changed, 2 insertions(+), 1 deletion(-)
-
---- a/stem/control.py
-+++ b/stem/control.py
-@@ -249,6 +249,7 @@ If you're fine with allowing your script
-
- import calendar
- import collections
-+import collections.abc
- import functools
- import inspect
- import io
-@@ -2532,7 +2533,7 @@ class Controller(BaseController):
- for param, value in params:
- if isinstance(value, str):
- query_comp.append('%s="%s"' % (param, value.strip()))
-- elif isinstance(value, collections.Iterable):
-+ elif isinstance(value, collections.abc.Iterable):
- query_comp.extend(['%s="%s"' % (param, val.strip()) for val in value])
- elif not value:
- query_comp.append(param)
HOST_CONFIGURE_ARGS+= \
--enable-optimizations \
--with-ensurepip=upgrade \
- --with-system-expat=$(STAGING_DIR_HOSTPKG) \
+ --with-system-expat=$(STAGING_DIR_HOST) \
--with-ssl-default-suites=openssl \
--without-cxx-main \
--without-pymalloc \
include $(TOPDIR)/rules.mk
PKG_NAME:=cJSON
-PKG_VERSION:=1.7.14
+PKG_VERSION:=1.7.15
PKG_RELEASE:=3
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
PKG_SOURCE_URL:=https://codeload.github.com/DaveGamble/cJSON/tar.gz/v$(PKG_VERSION)?
-PKG_HASH:=fb50a663eefdc76bafa80c82bc045af13b1363e8f45cec8b442007aef6a41343
+PKG_HASH:=5308fd4bd90cef7aa060558514de6a1a4a0819974a26e6ed13973c5f624c24b2
PKG_MAINTAINER:=Karl Palsson <karlp@etactica.com>
PKG_LICENSE:=MIT
include $(TOPDIR)/rules.mk
PKG_NAME:=expat
-PKG_VERSION:=2.4.7
+PKG_VERSION:=2.4.9
PKG_RELEASE:=$(AUTORELEASE)
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.xz
PKG_SOURCE_URL:=@SF/expat
-PKG_HASH:=9875621085300591f1e64c18fd3da3a0eeca4a74f884b9abac2758ad1bd07a7d
+PKG_HASH:=6e8c0728fe5c7cd3f93a6acce43046c5e4736c7b4b68e032e9350daa0efc0354
PKG_MAINTAINER:=Ted Hess <thess@kitschensync.net>
PKG_LICENSE:=MIT
include $(TOPDIR)/rules.mk
PKG_NAME:=glib2
-PKG_VERSION:=2.70.5
+PKG_VERSION:=2.74.0
PKG_RELEASE:=$(AUTORELEASE)
PKG_SOURCE:=glib-$(PKG_VERSION).tar.xz
-PKG_SOURCE_URL:=@GNOME/glib/2.70
-PKG_HASH:=f70bf76ebcc84e0705722f038be8e2f9a58d17e1a700810c635fcc18b8974b7e
+PKG_SOURCE_URL:=@GNOME/glib/$(word 1,$(subst ., ,$(PKG_VERSION))).$(word 2,$(subst ., ,$(PKG_VERSION)))
+PKG_HASH:=3652c7f072d7b031a6b5edd623f77ebc5dcd2ae698598abcc89ff39ca75add30
PKG_MAINTAINER:=Peter Wagner <tripolar@gmx.at>
PKG_LICENSE:=LGPL-2.1-or-later
PKG_BUILD_DIR:=$(BUILD_DIR)/glib-$(PKG_VERSION)
HOST_BUILD_DIR:=$(BUILD_DIR_HOST)/glib-$(PKG_VERSION)
-HOST_BUILD_DEPENDS:=libffi/host pcre/host
PKG_CONFIG_DEPENDS:=CONFIG_BUILD_NLS
PKG_FORTIFY_SOURCE:=0
define Package/glib2
SECTION:=libs
CATEGORY:=Libraries
- DEPENDS:=$(ICONV_DEPENDS) $(INTL_DEPENDS) +zlib +libpthread +libffi +libattr +libpcre
+ DEPENDS:=$(ICONV_DEPENDS) $(INTL_DEPENDS) +zlib +libpthread +libffi +libattr +libpcre2
TITLE:=glib 2.0
URL:=http://www.gtk.org/
endef
-Dgtk_doc=false \
-Dbsymbolic_functions=true \
-Dforce_posix_threads=true \
- -Dfam=false \
-Dtests=false \
-Dinstalled_tests=false \
-Doss_fuzz=disabled \
-Dglib_checks=true \
-Dlibelf=disabled
-MESON_HOST_ARGS += $(COMP_ARGS) -Dxattr=false -Ddefault_library=static -Dnls=disabled
+MESON_HOST_ARGS += $(COMP_ARGS) -Dxattr=false -Ddefault_library=static -Dnls=disabled -Dwrap_mode=forcefallback
MESON_ARGS += $(COMP_ARGS) -Dxattr=true -Db_lto=true -Ddefault_library=both -Dnls=$(if $(CONFIG_BUILD_NLS),en,dis)abled
define Build/InstallDev
+++ /dev/null
---- a/meson.build
-+++ b/meson.build
-@@ -948,7 +948,7 @@ if host_system == 'windows' and (cc.get_
- glib_conf.set('HAVE_C99_SNPRINTF', false)
- glib_conf.set('HAVE_C99_VSNPRINTF', false)
- glib_conf.set('HAVE_UNIX98_PRINTF', false)
--elif not cc_can_run and host_system in ['ios', 'darwin']
-+elif true
- # All these are true when compiling natively on macOS, so we should use good
- # defaults when building for iOS and tvOS.
- glib_conf.set('HAVE_C99_SNPRINTF', true)
+++ /dev/null
-From 750687bf2824fcaf8976fb8b558d583f29acdfeb Mon Sep 17 00:00:00 2001
-From: Eli Schwartz <eschwartz@archlinux.org>
-Date: Tue, 7 Jun 2022 16:14:04 -0400
-Subject: [PATCH 1/2] meson: simplify iconv lookups using Meson's builtin
- dependency lookup
-
-iconv is complicated to look up. That complexity now resides in
-Meson, since 0.60.0, via a `dependency('iconv')` lookup, so use that
-instead.
-
-No effort is made to support the old option for which type of iconv to
-use. It was a false choice, because if only one was available, then
-that's the only one you can use, and if both are available, the external
-iconv shadows the builtin one and renders the builtin one unusable,
-so there is still only one you can use.
-
-This meant that when configuring glib with -Diconv=libc on systems that
-had an external iconv, the configure check would detect a valid libc
-iconv, try to use it, and then fail during the build because iconv.h
-belongs to the external iconv and generates machine code using the
-external iconv ABI, but fails to link to the iconv `find_library()`.
-Meson handles this transparently.
----
- meson.build | 20 +-------------------
- meson_options.txt | 8 +-------
- 2 files changed, 2 insertions(+), 26 deletions(-)
-
---- a/meson.build
-+++ b/meson.build
-@@ -1958,28 +1958,10 @@ glibconfig_conf.set10('G_HAVE_GROWING_ST
- # We should never use the MinGW C library's iconv because it may not be
- # available in the actual runtime environment. On Windows, we always use
- # the built-in implementation
--iconv_opt = get_option('iconv')
- if host_system == 'windows'
- libiconv = []
-- # We have a #include "win_iconv.c" in gconvert.c on Windows, so we don't need
-- # any external library for it
-- if iconv_opt != 'auto'
-- warning('-Diconv was set to @0@, which was ignored')
-- endif
- else
-- found_iconv = false
-- if ['auto', 'libc'].contains(iconv_opt) and cc.has_function('iconv_open')
-- libiconv = []
-- found_iconv = true
-- endif
-- if not found_iconv and ['auto', 'external'].contains(iconv_opt) and cc.has_header_symbol('iconv.h', 'iconv_open')
-- libiconv = [cc.find_library('iconv')]
-- found_iconv = true
-- endif
--
-- if not found_iconv
-- error('iconv implementation "@0@" not found'.format(iconv_opt))
-- endif
-+ libiconv = dependency('iconv')
- endif
-
- pcre = dependency('libpcre', version: '>= 8.31', required : false) # Should check for Unicode support, too. FIXME
-@@ -2046,42 +2028,37 @@ endif
- # FIXME: glib-gettext.m4 has much more checks to detect broken/uncompatible
- # implementations. This could be extended if issues are found in some platforms.
- libintl_deps = []
--if cc.has_function('ngettext', args : osx_ldflags)
-- have_bind_textdomain_codeset = cc.has_function('bind_textdomain_codeset')
--else
-- # First just find the bare library.
-- libintl = cc.find_library('intl', required : false)
-- # The bare library probably won't link without help if it's static.
-- if libintl.found() and not cc.has_function('ngettext', args : osx_ldflags, dependencies : libintl)
-- libintl_iconv = cc.find_library('iconv', required : false)
-- # libintl supports different threading APIs, which may not
-- # require additional flags, but it defaults to using pthreads if
-- # found. Meson's "threads" dependency does not allow you to
-- # prefer pthreads. We may not be using pthreads for glib itself
-- # either so just link the library to satisfy libintl rather than
-- # also defining the macros with the -pthread flag.
-- libintl_pthread = cc.find_library('pthread', required : false)
-- # Try linking with just libiconv.
-- if libintl_iconv.found() and cc.has_function('ngettext', args : osx_ldflags, dependencies : [libintl, libintl_iconv])
-- libintl_deps += [libintl_iconv]
-- # Then also try linking with pthreads.
-- elif libintl_iconv.found() and libintl_pthread.found() and cc.has_function('ngettext', args : osx_ldflags, dependencies : [libintl, libintl_iconv, libintl_pthread])
-- libintl_deps += [libintl_iconv, libintl_pthread]
-- else
-- libintl = disabler()
-- endif
-- endif
-- if not libintl.found()
-- libintl = subproject('proxy-libintl').get_variable('intl_dep')
-- libintl_deps = [libintl] + libintl_deps
-- have_bind_textdomain_codeset = true # proxy-libintl supports it
-+libintl = dependency('intl', required: false)
-+if libintl.found()
-+ # libintl supports different threading APIs, which may not
-+ # require additional flags, but it defaults to using pthreads if
-+ # found. Meson's "threads" dependency does not allow you to
-+ # prefer pthreads. We may not be using pthreads for glib itself
-+ # either so just link the library to satisfy libintl rather than
-+ # also defining the macros with the -pthread flag.
-+ #
-+ # Meson's builtin dependency lookup as of 0.60.0 doesn't check for
-+ # pthread, so we do this manually here.
-+ if cc.has_function('ngettext', dependencies : libintl)
-+ libintl_deps += [libintl]
- else
-- libintl_deps = [libintl] + libintl_deps
-- have_bind_textdomain_codeset = cc.has_function('bind_textdomain_codeset', args : osx_ldflags,
-- dependencies : libintl_deps)
-+ libintl_pthread = cc.find_library('pthread', required : false)
-+ if libintl_pthread.found() and cc.has_function('ngettext', dependencies : [libintl, libintl_pthread])
-+ libintl_deps += [libintl, libintl_pthread]
-+ else
-+ libintl = disabler()
-+ endif
- endif
- endif
-
-+if libintl.found()
-+ have_bind_textdomain_codeset = cc.has_function('bind_textdomain_codeset', dependencies: libintl_deps)
-+else
-+ libintl = subproject('proxy-libintl').get_variable('intl_dep')
-+ libintl_deps = [libintl]
-+ have_bind_textdomain_codeset = true # proxy-libintl supports it
-+endif
-+
- glib_conf.set('HAVE_BIND_TEXTDOMAIN_CODESET', have_bind_textdomain_codeset)
-
- # We require gettext to always be present
---- a/meson_options.txt
-+++ b/meson_options.txt
-@@ -3,12 +3,6 @@ option('runtime_libdir',
- value : '',
- description : 'install runtime libraries relative to libdir')
-
--option('iconv',
-- type : 'combo',
-- choices : ['auto', 'libc', 'external'],
-- value : 'auto',
-- description : 'iconv implementation to use (\'libc\' = \'Part of the C library\'; \'external\' = \'External libiconv\'; \'auto\' = \'Auto-detect which iconv is available\')')
--
- option('charsetalias_dir',
- type : 'string',
- value : '',
--- /dev/null
+--- a/glib/meson.build
++++ b/glib/meson.build
+@@ -365,6 +365,7 @@ pcre2_static_args = []
+
+ if use_pcre2_static_flag
+ pcre2_static_args = ['-DPCRE2_STATIC']
++ pcre2 = pcre2.as_link_whole()
+ endif
+
+ glib_c_args = ['-DG_LOG_DOMAIN="GLib"', '-DGLIB_COMPILATION'] + pcre2_static_args + glib_hidden_visibility_args
--- /dev/null
+From ebcc3c01db27b79af38b42c3c52a79d0225f744c Mon Sep 17 00:00:00 2001
+From: Seungha Yang <seungha@centricular.com>
+Date: Sun, 14 Aug 2022 04:56:20 +0900
+Subject: [PATCH] glib-mkenums: Specify output encoding as UTF-8 explicitly for
+ non-English locale
+
+Fixup regression introduced by
+https://gitlab.gnome.org/GNOME/glib/-/merge_requests/2797
+---
+ gobject/glib-mkenums.in | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/gobject/glib-mkenums.in
++++ b/gobject/glib-mkenums.in
+@@ -19,6 +19,9 @@ import errno
+ import codecs
+ import locale
+
++# Non-english locale systems might complain to unrecognized character
++sys.stdout = io.TextIOWrapper(sys.stdout.detach(), encoding='utf-8')
++
+ VERSION_STR = '''glib-mkenums version @VERSION@
+ glib-mkenums comes with ABSOLUTELY NO WARRANTY.
+ You may redistribute copies of glib-mkenums under the terms of
PKG_LICENSE:=Apache-2.0
PKG_LICENSE_FILES:=COPYING
-PKG_BUILD_DEPENDS:=openssl
-
include $(INCLUDE_DIR)/package.mk
include $(INCLUDE_DIR)/meson.mk
+define Package/libjose
+ SECTION:=libs
+ TITLE:=Provides a full crypto stack including key generation, signing and encryption.
+ DEPENDS:=+zlib +jansson +libopenssl +libpthread
+ URL:=https://github.com/latchset/jose
+endef
+
define Package/jose
SECTION:=utils
TITLE:=Provides a full crypto stack including key generation, signing and encryption.
- DEPENDS:=+jansson
+ DEPENDS:=+libjose
URL:=https://github.com/latchset/jose
endef
crypto stack including key generation, signing and encryption.
endef
-MESON_ARGS += \
- -Ddefault_library=static
+define Package/libjose/description
+ libjose is a library for performing various tasks on JSON
+ Object Signing and Encryption (JOSE) objects. José provides a full
+ crypto stack including key generation, signing and encryption.
+endef
define Build/InstallDev
$(INSTALL_DIR) $(1)/usr/lib
$(INSTALL_DIR) $(1)/usr/include
$(INSTALL_DIR) $(1)/usr/include/$(PKG_NAME)
$(INSTALL_DIR) $(1)/usr/lib/pkgconfig
- $(CP) $(PKG_INSTALL_DIR)/usr/lib/lib$(PKG_NAME).a $(1)/usr/lib
+ $(CP) $(PKG_INSTALL_DIR)/usr/lib/lib$(PKG_NAME).so* $(1)/usr/lib
$(CP) $(PKG_INSTALL_DIR)/usr/include/$(PKG_NAME)/*.h $(1)/usr/include/$(PKG_NAME)
$(CP) $(PKG_INSTALL_DIR)/usr/lib/pkgconfig/*.pc $(1)/usr/lib/pkgconfig
endef
+define Package/libjose/install
+ $(INSTALL_DIR) $(1)/usr/lib
+ $(CP) $(PKG_INSTALL_DIR)/usr/lib/lib$(PKG_NAME).so* $(1)/usr/lib/
+endef
+
define Package/jose/install
$(INSTALL_DIR) $(1)/usr/bin
$(INSTALL_BIN) $(PKG_INSTALL_DIR)/usr/bin/$(PKG_NAME) $(1)/usr/bin/
endef
+$(eval $(call BuildPackage,libjose))
$(eval $(call BuildPackage,jose))
--- /dev/null
+# Copyright (C) 2022 Martin Blumenstingl <martin.blumenstingl@googlemail.com>
+#
+# SPDX-License-Identifier: GPL-2.0-or-later
+
+include $(TOPDIR)/rules.mk
+
+PKG_NAME:=libcli
+
+PKG_SOURCE_PROTO:=git
+PKG_SOURCE_URL=https://github.com/dparrish/libcli.git
+PKG_SOURCE_DATE:=2022-07-06
+PKG_SOURCE_VERSION:=V1.10.7
+PKG_MIRROR_HASH:=7698898364d91a1abf9e19c588a19c9587615106cf1caaf2a7e135f9ce1b1b45
+
+PKG_MAINTAINER:=Martin Blumenstingl <martin.blumenstingl@googlemail.com>
+PKG_LICENSE:=LGPL-2.1
+PKG_LICENSE_FILES:=COPYING
+
+PKG_BUILD_PARALLEL:=1
+
+include $(INCLUDE_DIR)/package.mk
+
+define Package/libcli
+ SECTION:=libs
+ CATEGORY:=Libraries
+ TITLE:=libcli
+ URL:=https://dparrish.com/link/libcli
+ DEPENDS:=+libc
+endef
+
+define Package/libcli/description
+ Libcli provides a shared library for including a Cisco-like
+ command-line interface into other software.
+endef
+
+define Build/Compile
+ $(MAKE) -C $(PKG_BUILD_DIR) \
+ CC="$(TARGET_CC)" \
+ LDFLAGS="$(TARGET_LDFLAGS)" \
+ DESTDIR="$(PKG_INSTALL_DIR)" \
+ TESTS=0 \
+ all install
+endef
+
+define Build/InstallDev
+ $(INSTALL_DIR) $(1)/usr/include
+ $(CP) $(PKG_INSTALL_DIR)/usr/local/include/libcli.h $(1)/usr/include/
+ $(INSTALL_DIR) $(1)/usr/lib
+ $(CP) $(PKG_INSTALL_DIR)/usr/local/lib/libcli.so* $(1)/usr/lib/
+endef
+
+define Package/libcli/install
+ $(INSTALL_DIR) $(1)/usr/lib
+ $(CP) $(PKG_INSTALL_DIR)/usr/local/lib/libcli.so* $(1)/usr/lib/
+endef
+
+$(eval $(call BuildPackage,libcli))
include $(TOPDIR)/rules.mk
PKG_NAME:=libmodbus
-PKG_VERSION:=3.1.7
+PKG_VERSION:=3.1.8
PKG_RELEASE:=1
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
-PKG_SOURCE_URL:=http://libmodbus.org/releases
-PKG_HASH:=7dfe958431d0570b271e1a5b329b76a658e89c614cf119eb5aadb725c87f8fbd
+PKG_SOURCE_URL:=https://github.com/stephane/libmodbus/releases/download/v$(PKG_VERSION)/
+PKG_HASH:=b122f2bc29f749702a22c0a760a7ca2182d541f5fa26bf25e3431f907b606f3c
PKG_MAINTAINER:=Michael Heimpold <mhei@heimpold.de>
A Modbus library for Linux, Mac OS X, FreeBSD, QNX and Win32.
endef
-CONFIGURE_ARGS += --without-documentation --disable-tests
+CONFIGURE_ARGS += --disable-tests
TARGET_CFLAGS += $(FPIC)
define Build/InstallDev
--- /dev/null
+#
+# Copyright (C) 2021 TDT AG <development@tdt.de>
+#
+# This is free software, licensed under the GNU General Public License v2.
+# See https://www.gnu.org/licenses/gpl-2.0.txt for more information.
+#
+
+include $(TOPDIR)/rules.mk
+
+PKG_NAME:=libmspack
+PKG_VERSION:=0.10.1alpha
+PKG_RELEASE:=1
+
+PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
+PKG_SOURCE_URL:=https://www.cabextract.org.uk/$(PKG_NAME)/
+PKG_HASH:=bac862dee6e0fc10d92c70212441d9f8ad9b0222edc9a708c3ead4adb1b24a8e
+
+PKG_MAINTAINER:=Florian Eckert <fe@dev.tdt.de>
+PKG_LICENSE:=LGPL-2.1-or-later
+PKG_LICENSE_FILES:=COPYING.LIB
+
+PKG_FIXUP:=autoreconf
+PKG_INSTALL:=1
+
+include $(INCLUDE_DIR)/package.mk
+
+define Package/libmspack
+ SECTION:=libs
+ CATEGORY:=Libraries
+ TITLE:=Compressors and decompressors for Microsoft formats
+ DEPENDS:=@TARGET_x86
+ URL:=https://github.com/kyz/libmspack
+endef
+
+define Package/libmspack/description
+ The purpose of libmspack is to provide compressors and decompressors,
+ archivers and dearchivers for Microsoft compression formats: CAB, CHM, WIM,
+ LIT, HLP, KWAJ and SZDD. It is also designed to be easily embeddable,
+ stable, robust and resource-efficient.
+endef
+
+define Build/InstallDev
+ $(INSTALL_DIR) $(1)/usr/include
+ $(CP) $(PKG_INSTALL_DIR)/usr/include/mspack.h \
+ $(1)/usr/include/
+
+ $(INSTALL_DIR) $(1)/usr/lib/pkgconfig
+ $(CP) $(PKG_INSTALL_DIR)/usr/lib/pkgconfig/*.pc \
+ $(1)/usr/lib/pkgconfig
+
+ $(INSTALL_DIR) $(1)/usr/lib
+ $(CP) $(PKG_INSTALL_DIR)/usr/lib/*.so* \
+ $(1)/usr/lib
+endef
+
+define Package/libmspack/install
+ $(INSTALL_DIR) $(1)/usr/lib
+ $(CP) $(PKG_INSTALL_DIR)/usr/lib/*.so* \
+ $(1)/usr/lib
+endef
+
+$(eval $(call BuildPackage,libmspack))
PKG_NAME:=libowfat
PKG_VERSION:=0.32
-PKG_RELEASE:=4
+PKG_RELEASE:=5
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.xz
PKG_SOURCE_URL:=https://www.fefe.de/libowfat
--- /dev/null
+--- a/byte.h
++++ b/byte.h
+@@ -9,17 +9,24 @@
+ extern "C" {
+ #endif
+
+-#ifndef __pure__
+-#define __pure__
++#ifndef ATTR_PURE
++# if defined __has_attribute
++# if __has_attribute (pure)
++# define ATTR_PURE __attribute__ ((pure))
++# endif
++# endif
++#endif
++#ifndef ATTR_PURE
++# define ATTR_PURE
+ #endif
+
+ /* byte_chr returns the smallest integer i between 0 and len-1
+ * inclusive such that one[i] equals needle, or len if not found. */
+-size_t byte_chr(const void* haystack, size_t len, char needle) __pure__;
++size_t byte_chr(const void* haystack, size_t len, char needle) ATTR_PURE;
+
+ /* byte_rchr returns the largest integer i between 0 and len-1 inclusive
+ * such that one[i] equals needle, or len if not found. */
+-size_t byte_rchr(const void* haystack,size_t len,char needle) __pure__;
++size_t byte_rchr(const void* haystack,size_t len,char needle) ATTR_PURE;
+
+ /* byte_copy copies in[0] to out[0], in[1] to out[1], ... and in[len-1]
+ * to out[len-1]. */
+@@ -34,14 +41,14 @@ void byte_copyr(void* out, size_t len, c
+ * than, equal to, or greater than the string b[0], b[1], ...,
+ * b[len-1]. When the strings are different, byte_diff does not read
+ * bytes past the first difference. */
+-int byte_diff(const void* a, size_t len, const void* b) __pure__;
++int byte_diff(const void* a, size_t len, const void* b) ATTR_PURE;
+
+ /* byte_zero sets the bytes out[0], out[1], ..., out[len-1] to 0 */
+ void byte_zero(void* out, size_t len);
+
+ #define byte_equal(s,n,t) (!byte_diff((s),(n),(t)))
+
+-int byte_equal_notimingattack(const void* a, size_t len,const void* b) __pure__;
++int byte_equal_notimingattack(const void* a, size_t len,const void* b) ATTR_PURE;
+
+ #if defined(__i386__) || defined(__x86_64__)
+ #define UNALIGNED_ACCESS_OK
+--- a/critbit.h
++++ b/critbit.h
+@@ -5,18 +5,25 @@
+ extern "C" {
+ #endif
+
+-/* for __pure__ if we are compiling under dietlibc */
++/* for ATTR_PURE if we are compiling under dietlibc */
+ #include <stddef.h>
+
+-#ifndef __pure__
+-#define __pure__
++#ifndef ATTR_PURE
++# if defined __has_attribute
++# if __has_attribute (pure)
++# define ATTR_PURE __attribute__ ((pure))
++# endif
++# endif
++#endif
++#ifndef ATTR_PURE
++# define ATTR_PURE
+ #endif
+
+ typedef struct {
+ void *root;
+ } critbit0_tree;
+
+-int critbit0_contains(critbit0_tree *t, const char *u) __pure__;
++int critbit0_contains(critbit0_tree *t, const char *u) ATTR_PURE;
+ int critbit0_insert(critbit0_tree *t, const char *u);
+ int critbit0_delete(critbit0_tree *t, const char *u);
+ void critbit0_clear(critbit0_tree *t);
+--- a/scan.h
++++ b/scan.h
+@@ -15,8 +15,15 @@
+ extern "C" {
+ #endif
+
+-#ifndef __pure__
+-#define __pure__
++#ifndef ATTR_PURE
++# if defined __has_attribute
++# if __has_attribute (pure)
++# define ATTR_PURE __attribute__ ((pure))
++# endif
++# endif
++#endif
++#ifndef ATTR_PURE
++# define ATTR_PURE
+ #endif
+
+ /* This file declared functions used to decode / scan / unmarshal
+@@ -84,18 +91,18 @@ size_t scan_double(const char *in, doubl
+ size_t scan_plusminus(const char *src,signed int *dest);
+
+ /* return the highest integer n<=limit so that isspace(in[i]) for all 0<=i<=n */
+-size_t scan_whitenskip(const char *in,size_t limit) __pure__;
++size_t scan_whitenskip(const char *in,size_t limit) ATTR_PURE;
+
+ /* return the highest integer n<=limit so that !isspace(in[i]) for all 0<=i<=n */
+-size_t scan_nonwhitenskip(const char *in,size_t limit) __pure__;
++size_t scan_nonwhitenskip(const char *in,size_t limit) ATTR_PURE;
+
+ /* return the highest integer n<=limit so that in[i] is element of
+ * charset (ASCIIZ string) for all 0<=i<=n */
+-size_t scan_charsetnskip(const char *in,const char *charset,size_t limit) __pure__;
++size_t scan_charsetnskip(const char *in,const char *charset,size_t limit) ATTR_PURE;
+
+ /* return the highest integer n<=limit so that in[i] is not element of
+ * charset (ASCIIZ string) for all 0<=i<=n */
+-size_t scan_noncharsetnskip(const char *in,const char *charset,size_t limit) __pure__;
++size_t scan_noncharsetnskip(const char *in,const char *charset,size_t limit) ATTR_PURE;
+
+ /* try to parse ASCII GMT date; does not understand time zones. */
+ /* example dates:
+@@ -103,17 +110,17 @@ size_t scan_noncharsetnskip(const char *
+ * "Sunday, 06-Nov-94 08:49:37 GMT"
+ * "Sun Nov 6 08:49:37 1994"
+ */
+-size_t scan_httpdate(const char *in,time_t *t) __pure__;
++size_t scan_httpdate(const char *in,time_t *t) ATTR_PURE;
+
+ /* try to parse ASCII ISO-8601 date; does not understand time zones. */
+ /* example date: "2014-05-27T19:22:16Z" */
+-size_t scan_iso8601(const char* in,struct timespec* t) __pure__;
++size_t scan_iso8601(const char* in,struct timespec* t) ATTR_PURE;
+
+ /* some variable length encodings for integers */
+-size_t scan_utf8(const char* in,size_t len,uint32_t* n) __pure__;
+-size_t scan_utf8_sem(const char* in,size_t len,uint32_t* n) __pure__;
+-size_t scan_asn1derlength(const char* in,size_t len,unsigned long long* n) __pure__;
+-size_t scan_asn1dertag(const char* in,size_t len,unsigned long long* n) __pure__;
++size_t scan_utf8(const char* in,size_t len,uint32_t* n) ATTR_PURE;
++size_t scan_utf8_sem(const char* in,size_t len,uint32_t* n) ATTR_PURE;
++size_t scan_asn1derlength(const char* in,size_t len,unsigned long long* n) ATTR_PURE;
++size_t scan_asn1dertag(const char* in,size_t len,unsigned long long* n) ATTR_PURE;
+
+ /* Google protocol buffers */
+ /* A protocol buffer is a sequence of (tag,value).
+@@ -122,15 +129,15 @@ size_t scan_asn1dertag(const char* in,si
+ * 0, double type 1, strings type 2 and floats type 5. However, you
+ * have to check this yourself.
+ */
+-size_t scan_varint(const char* in,size_t len, unsigned long long* n) __pure__; /* internal */
+-size_t scan_pb_tag(const char* in,size_t len, size_t* fieldno,unsigned char* type) __pure__;
++size_t scan_varint(const char* in,size_t len, unsigned long long* n) ATTR_PURE; /* internal */
++size_t scan_pb_tag(const char* in,size_t len, size_t* fieldno,unsigned char* type) ATTR_PURE;
+
+ /* Then, depending on the field number, validate the type and call the
+ * corresponding of these functions to parse the value */
+-size_t scan_pb_type0_int(const char* in,size_t len,unsigned long long* l) __pure__;
+-size_t scan_pb_type0_sint(const char* in,size_t len,signed long long* l) __pure__;
+-size_t scan_pb_type1_double(const char* in,size_t len,double* d) __pure__;
+-size_t scan_pb_type1_fixed64(const char* in,size_t len,uint64_t* b) __pure__;
++size_t scan_pb_type0_int(const char* in,size_t len,unsigned long long* l) ATTR_PURE;
++size_t scan_pb_type0_sint(const char* in,size_t len,signed long long* l) ATTR_PURE;
++size_t scan_pb_type1_double(const char* in,size_t len,double* d) ATTR_PURE;
++size_t scan_pb_type1_fixed64(const char* in,size_t len,uint64_t* b) ATTR_PURE;
+ /* NOTE: scan_pb_type2_stringlen only parses the length of the string,
+ * not the string itself. It will return the number of bytes parsed in
+ * the length, then set slen to the value of the length integer it just
+@@ -141,9 +148,9 @@ size_t scan_pb_type1_fixed64(const char*
+ * parsing early without having to read and allocate memory for the rest
+ * (potentially gigabytes) of the data announced by one unreasonable
+ * string length value. */
+-size_t scan_pb_type2_stringlen(const char* in,size_t len,const char** string, size_t* slen) __pure__;
+-size_t scan_pb_type5_float(const char* in,size_t len,float* f) __pure__;
+-size_t scan_pb_type5_fixed32(const char* in,size_t len,uint32_t* b) __pure__;
++size_t scan_pb_type2_stringlen(const char* in,size_t len,const char** string, size_t* slen) ATTR_PURE;
++size_t scan_pb_type5_float(const char* in,size_t len,float* f) ATTR_PURE;
++size_t scan_pb_type5_fixed32(const char* in,size_t len,uint32_t* b) ATTR_PURE;
+
+ /* parse a netstring, input buffer is in (len bytes).
+ * if parsing is successful:
+@@ -153,7 +160,7 @@ size_t scan_pb_type5_fixed32(const char*
+ * return 0
+ * Note: *dest will point inside the input buffer!
+ */
+-size_t scan_netstring(const char* in,size_t len,char** dest,size_t* slen) __pure__;
++size_t scan_netstring(const char* in,size_t len,char** dest,size_t* slen) ATTR_PURE;
+
+ /* internal function that might be useful independently */
+ /* convert from hex ASCII, return 0 to 15 for success or -1 for failure */
+--- a/str.h
++++ b/str.h
+@@ -8,8 +8,15 @@
+ extern "C" {
+ #endif
+
+-#ifndef __pure__
+-#define __pure__
++#ifndef ATTR_PURE
++# if defined __has_attribute
++# if __has_attribute (pure)
++# define ATTR_PURE __attribute__ ((pure))
++# endif
++# endif
++#endif
++#ifndef ATTR_PURE
++# define ATTR_PURE
+ #endif
+
+ /* str_copy copies leading bytes from in to out until \0.
+@@ -21,7 +28,7 @@ size_t str_copy(char *out,const char *in
+ * equal to, or greater than the string b[0], b[1], ..., b[m-1]=='\0'.
+ * If the strings are different, str_diff does not read bytes past the
+ * first difference. */
+-int str_diff(const char *a,const char *b) __pure__;
++int str_diff(const char *a,const char *b) ATTR_PURE;
+
+ /* str_diffn returns negative, 0, or positive, depending on whether the
+ * string a[0], a[1], ..., a[n]=='\0' is lexicographically smaller than,
+@@ -29,24 +36,24 @@ int str_diff(const char *a,const char *b
+ * If the strings are different, str_diffn does not read bytes past the
+ * first difference. The strings will be considered equal if the first
+ * limit characters match. */
+-int str_diffn(const char *a,const char *b,size_t limit) __pure__;
++int str_diffn(const char *a,const char *b,size_t limit) ATTR_PURE;
+
+ #ifdef __dietlibc__
+ #include <string.h>
+ #define str_len(foo) strlen(foo)
+ #else
+ /* str_len returns the index of \0 in s */
+-size_t str_len(const char *s) __pure__;
++size_t str_len(const char *s) ATTR_PURE;
+ #endif
+
+ /* str_chr returns the index of the first occurance of needle or \0 in haystack */
+-size_t str_chr(const char *haystack,char needle) __pure__;
++size_t str_chr(const char *haystack,char needle) ATTR_PURE;
+
+ /* str_rchr returns the index of the last occurance of needle or \0 in haystack */
+-size_t str_rchr(const char *haystack,char needle) __pure__;
++size_t str_rchr(const char *haystack,char needle) ATTR_PURE;
+
+ /* str_start returns 1 if the b is a prefix of a, 0 otherwise */
+-int str_start(const char *a,const char *b) __pure__;
++int str_start(const char *a,const char *b) ATTR_PURE;
+
+ /* convenience shortcut to test for string equality */
+ #define str_equal(s,t) (!str_diff((s),(t)))
+--- a/stralloc.h
++++ b/stralloc.h
+@@ -8,8 +8,15 @@
+ extern "C" {
+ #endif
+
+-#ifndef __pure__
+-#define __pure__
++#ifndef ATTR_PURE
++# if defined __has_attribute
++# if __has_attribute (pure)
++# define ATTR_PURE __attribute__ ((pure))
++# endif
++# endif
++#endif
++#ifndef ATTR_PURE
++# define ATTR_PURE
+ #endif
+
+ /* stralloc is the internal data structure all functions are working on.
+@@ -101,17 +108,17 @@ static inline int stralloc_APPEND(strall
+ /* stralloc_starts returns 1 if the \0-terminated string in "in", without
+ * the terminating \0, is a prefix of the string stored in sa. Otherwise
+ * it returns 0. sa must already be allocated. */
+-int stralloc_starts(stralloc* sa,const char* in) __pure__;
++int stralloc_starts(stralloc* sa,const char* in) ATTR_PURE;
+
+ /* stralloc_diff returns negative, 0, or positive, depending on whether
+ * a is lexicographically smaller than, equal to, or greater than the
+ * string b. */
+-int stralloc_diff(const stralloc* a,const stralloc* b) __pure__;
++int stralloc_diff(const stralloc* a,const stralloc* b) ATTR_PURE;
+
+ /* stralloc_diffs returns negative, 0, or positive, depending on whether
+ * a is lexicographically smaller than, equal to, or greater than the
+ * string b[0], b[1], ..., b[n]=='\0'. */
+-int stralloc_diffs(const stralloc* a,const char* b) __pure__;
++int stralloc_diffs(const stralloc* a,const char* b) ATTR_PURE;
+
+ #define stralloc_equal(a,b) (!stralloc_diff((a),(b)))
+ #define stralloc_equals(a,b) (!stralloc_diffs((a),(b)))
--- /dev/null
+#
+# This is free software, licensed under the GNU General Public License v2.
+# See /LICENSE for more information.
+#
+
+include $(TOPDIR)/rules.mk
+
+PKG_NAME:=libsoup3
+PKG_VERSION:=3.0.6
+PKG_RELEASE:=$(AUTORELEASE)
+
+PKG_SOURCE:=libsoup-$(PKG_VERSION).tar.xz
+PKG_SOURCE_URL:=@GNOME/libsoup/3.0
+PKG_HASH:=b45d59f840b9acf9bb45fd45854e3ef672f57e3ab957401c3ad8d7502ac23da6
+PKG_BUILD_DIR:=$(BUILD_DIR)/libsoup-$(PKG_VERSION)
+
+PKG_MAINTAINER:=W. Michael Petullo <mike@flyn.org>
+PKG_LICENSE:=LGPL-2.1-or-later
+PKG_LICENSE_FILES:=COPYING
+PKG_CPE_ID:=cpe:/a:gnome:libsoup
+
+PKG_BUILD_DEPENDS:=glib2/host
+
+include $(INCLUDE_DIR)/package.mk
+include $(INCLUDE_DIR)/nls.mk
+include $(INCLUDE_DIR)/meson.mk
+
+define Package/libsoup3
+ SECTION:=libs
+ CATEGORY:=Libraries
+ TITLE:=libsoup3
+ URL:=https://wiki.gnome.org/Projects/libsoup
+ DEPENDS:=+glib2 +libxml2 +libgnutls +libsqlite3 +libpsl +libnghttp2
+endef
+
+MESON_ARGS += \
+ -Dgssapi=disabled \
+ -Dntlm=disabled \
+ -Dbrotli=disabled \
+ -Dtls_check=false \
+ -Dintrospection=disabled \
+ -Dvapi=disabled \
+ -Dgtk_doc=false \
+ -Dtests=false \
+ -Dinstalled_tests=false \
+ -Dsysprof=disabled
+
+define package/libsoup3/decription
+Libsoup is an HTTP library implementation in C
+endef
+
+define Build/InstallDev
+ $(INSTALL_DIR) $(1)/usr/{lib/pkgconfig,include/libsoup-3.0/libsoup}
+
+ $(CP) \
+ $(PKG_INSTALL_DIR)/usr/lib/libsoup-3.0.so* \
+ $(1)/usr/lib/
+
+ $(INSTALL_DATA) \
+ $(PKG_INSTALL_DIR)/usr/lib/pkgconfig/* \
+ $(1)/usr/lib/pkgconfig/
+
+ $(INSTALL_DATA) \
+ $(PKG_INSTALL_DIR)/usr/include/libsoup-3.0/libsoup/*.h \
+ $(1)/usr/include/libsoup-3.0/libsoup/
+endef
+
+define Package/libsoup3/install
+ $(INSTALL_DIR) $(1)/usr/lib
+ $(CP) \
+ $(PKG_INSTALL_DIR)/usr/lib/libsoup-3.0.* \
+ $(1)/usr/lib/
+endef
+
+$(eval $(call BuildPackage,libsoup3))
PKG_NAME:=libuhttpd
PKG_VERSION:=3.12.1
-PKG_RELEASE:=3
+PKG_RELEASE:=4
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
PKG_SOURCE_URL=https://github.com/zhaojh329/libuhttpd/releases/download/v$(PKG_VERSION)
PKG_NAME:=libuwsc
PKG_VERSION:=3.3.5
-PKG_RELEASE:=$(AUTORELEASE)
+PKG_RELEASE:=$(AUTORELEASE).1
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
PKG_SOURCE_URL=https://github.com/zhaojh329/libuwsc/releases/download/v$(PKG_VERSION)
include $(TOPDIR)/rules.mk
PKG_NAME:=libwebsockets
-PKG_VERSION:=4.2.1
-PKG_RELEASE:=3
+PKG_VERSION:=4.3.2
+PKG_RELEASE:=1
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
PKG_SOURCE_SUBDIR:=$(PKG_NAME)-$(PKG_VERSION)
PKG_SOURCE_URL:=https://codeload.github.com/warmcat/libwebsockets/tar.gz/v$(PKG_VERSION)?
-PKG_HASH:=842da21f73ccba2be59e680de10a8cce7928313048750eb6ad73b6fa50763c51
+PKG_HASH:=6a85a1bccf25acc7e8e5383e4934c9b32a102880d1e4c37c70b27ae2a42406e1
PKG_SOURCE_VERSION:=v$(PKG_VERSION)
+++ /dev/null
-From 1f9925b44379941065690f0dce8ca0c79414b9f1 Mon Sep 17 00:00:00 2001
-From: Rosen Penev <rosenp@gmail.com>
-Date: Tue, 1 Feb 2022 09:13:11 +0000
-Subject: [PATCH] genec: show correct nid when not allowed
-
-As noticed by gcc11 warning
-
-https://github.com/warmcat/libwebsockets/pull/2551
----
- lib/tls/lws-genec-common.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
---- a/lib/tls/lws-genec-common.c
-+++ b/lib/tls/lws-genec-common.c
-@@ -94,7 +94,7 @@ lws_genec_confirm_curve_allowed_by_tls_i
- }
- } while (e > 0);
-
-- lwsl_err("%s: unsupported curve group nid %d\n", __func__, n);
-+ lwsl_err("%s: unsupported curve group nid %d\n", __func__, id);
-
- return -1;
- }
+++ /dev/null
-From 6352fee2194a41179a808e9e2b805e0f5c86f8cf Mon Sep 17 00:00:00 2001
-From: Andy Green <andy@warmcat.com>
-Date: Sun, 16 Jan 2022 06:49:38 +0000
-Subject: [PATCH] sort-dns: fix scope comparison
-
-https://github.com/warmcat/libwebsockets/issues/2537
----
- lib/core-net/client/sort-dns.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
---- a/lib/core-net/client/sort-dns.c
-+++ b/lib/core-net/client/sort-dns.c
-@@ -406,7 +406,7 @@ lws_sort_dns_dcomp(const lws_dns_sort_t
- */
-
- scopea = lws_ipv6_unicast_scope(to_v6_sa(&da->dest));
-- scopeb = lws_ipv6_unicast_scope(to_v6_sa(&db));
-+ scopeb = lws_ipv6_unicast_scope(to_v6_sa(&db->dest));
- scope_srca = lws_ipv6_unicast_scope(to_v6_sa(&da->source));
- scope_srcb = lws_ipv6_unicast_scope(to_v6_sa(&db->source));
-
include $(TOPDIR)/rules.mk
PKG_NAME:=libxml2
-PKG_VERSION:=2.9.14
+PKG_VERSION:=2.10.2
PKG_RELEASE:=1
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.xz
PKG_SOURCE_URL:=@GNOME/libxml2/$(basename $(PKG_VERSION))
-PKG_HASH:=60d74a257d1ccec0475e749cba2f21559e48139efba6ff28224357c7c798dfee
+PKG_HASH:=d240abe6da9c65cb1900dd9bf3a3501ccf88b3c2a1cb98317d03f272dda5b265
PKG_LICENSE:=MIT
PKG_LICENSE_FILES:=COPYING
PKG_MAINTAINER:=Michael Heimpold <mhei@heimpold.de>
-PKG_FIXUP:=autoreconf
PKG_INSTALL:=1
PKG_BUILD_PARALLEL:=0
--with-c14n \
--without-catalog \
--with-debug \
- --without-docbook \
--with-html \
--without-ftp \
--without-http \
--with-c14n \
--without-catalog \
--with-debug \
- --without-docbook \
--with-html \
--without-ftp \
--without-http \
include $(TOPDIR)/rules.mk
PKG_NAME:=OpenBLAS
-PKG_VERSION:=0.3.19
+PKG_VERSION:=0.3.21
PKG_RELEASE:=$(AUTORELEASE)
PKG_SOURCE:=OpenBLAS-$(PKG_VERSION).tar.gz
PKG_SOURCE_URL:=https://github.com/xianyi/OpenBLAS/releases/download/v$(PKG_VERSION)/
-PKG_HASH:=947f51bfe50c2a0749304fbe373e00e7637600b0a47b78a51382aeb30ca08562
+PKG_HASH:=f36ba3d7a60e7c8bcc54cd9aaa9b1223dd42eaf02c811791c37e8ca707c241ca
PKG_LICENSE:=BSD 3-Clause
PKG_MAINTAINER:=Alexandru Ardelean <ardeleanalex@gmail.com>
include $(TOPDIR)/rules.mk
PKG_NAME:=ffmpeg
-PKG_VERSION:=5.1.1
+PKG_VERSION:=5.1.2
PKG_RELEASE:=1
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.xz
PKG_SOURCE_URL:=https://ffmpeg.org/releases/
-PKG_HASH:=95bf3ff8c496511e71e958fb249e663c8c9c3de583c5bebc0f5a9745abbc0435
+PKG_HASH:=619e706d662c8420859832ddc259cd4d4096a48a2ce1eefd052db9e440eef3dc
PKG_MAINTAINER:=Ted Hess <thess@kitschensync.net>, \
Ian Leonard <antonlacon@gmail.com>
--- /dev/null
+include $(TOPDIR)/rules.mk
+
+PKG_NAME:=midisport-firmware
+PKG_VERSION:=1.2
+PKG_RELEASE:=1
+
+PKG_SOURCE_URL:=@SF/usb-midi-fw/$(PKG_NAME)/$(PKG_VERSION)
+PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
+PKG_HASH:=2aa82ef0bf26647fbdda4c2e9ed0033b41bd0f1b4020b87fa073e4462a048b2d
+
+PKG_MAINTAINER:=Daniel Golle <daniel@makrotopia.org>
+PKG_LICENSE_FILES:=LICENCE
+
+include $(INCLUDE_DIR)/package.mk
+
+define Package/midisport-firmware
+ SECTION:=firmware
+ CATEGORY:=Firmware
+ URL:=https://sourceforge.net/projects/usb-midi-fw
+ TITLE:=M-Audio USB MIDI Firmware
+ DEPENDS:=+fxload
+ PKGARCH:=all
+endef
+
+define Package/midisport-firmware/description
+This package allows you to use the MidiSport USB MIDI interfaces from
+M-Audio/Midiman.
+
+Supported devices:
+- MidiSport 1x1
+- MidiSport 2x2
+- MidiSport 4x4
+- MidiSport 8x8
+- MidiSport Uno
+- Keystation
+- Oxygen
+- Radium
+
+(You don't need a firmware download for the USB Audio Quattro, Duo, or
+MidiSport 2x4.)
+endef
+
+define Build/Configure
+ true
+endef
+
+define Build/Compile
+ true
+endef
+
+define Package/midisport-firmware/install
+ $(INSTALL_DIR) $(1)/usr/share/usb/maudio $(1)/etc/hotplug.d/usb
+ $(INSTALL_DATA) $(PKG_BUILD_DIR)/*.ihx $(1)/usr/share/usb/maudio
+ $(INSTALL_DATA) ./files/midisport-firmware.hotplug $(1)/etc/hotplug.d/usb/50-midisport-firmware
+endef
+
+$(eval $(call BuildPackage,midisport-firmware))
--- /dev/null
+#!/bin/sh
+
+midisport_load() {
+ local MIDISPORT_FWDIR="/usr/share/usb/maudio"
+ local FXLOAD="/usr/sbin/fxload"
+ case "$DEVPATH" in "/"*".[1-9]") return 0 ;; esac
+ [ "$BUSNUM" ] || return 19
+ [ "$DEVNUM" ] || return 19
+ [ -x "$FXLOAD" ] || return 2
+ "$FXLOAD" -V | grep -q libusb || return 95
+ $FXLOAD -t an21 -p $BUSNUM,$DEVNUM \
+ -s "$MIDISPORT_FWDIR/MidiSportLoader.ihx" \
+ -I "$MIDISPORT_FWDIR/MidiSport${1}.ihx"
+}
+
+if [ "$ACTION" = "add" ]; then
+ case "$PRODUCT" in
+ "763/1001/"*)
+ midisport_load 2x2
+ ;;
+ "763/1010/"*)
+ midisport_load 1x1
+ ;;
+ "763/1014/"*)
+ midisport_load KS
+ ;;
+ "763/1020/"*)
+ midisport_load 4x4
+ ;;
+ "763/1031/110")
+ midisport_load 8x8-2.10
+ ;;
+ "763/1031/121")
+ midisport_load 8x8-2.21
+ ;;
+ esac
+fi
PKG_NAME:=adblock
PKG_VERSION:=4.1.4
-PKG_RELEASE:=2
+PKG_RELEASE:=5
PKG_LICENSE:=GPL-3.0-or-later
PKG_MAINTAINER:=Dirk Brenken <dev@brenken.org>
| firetv_tracking | | S | tracking | [Link](https://github.com/Perflyst/PiHoleBlocklist) |
| games_tracking | | S | tracking | [Link](https://www.gameindustry.eu) |
| hblock | | XL | compilation | [Link](https://hblock.molinero.dev) |
+| lightswitch05 | | XL | compilation | [Link](https://github.com/lightswitch05/hosts) |
| notracking | | XL | tracking | [Link](https://github.com/notracking/hosts-blocklists) |
| oisd_basic | | L | general | [Link](https://oisd.nl) |
| oisd_nsfw | | XL | general | [Link](https://oisd.nl) |
# restart dns backend
#
f_dnsup() {
- local dns_service dns_up dns_pid restart_rc cnt="0" out_rc="4" in_rc="${1:-0}"
+ local rset dns_service dns_up dns_pid restart_rc cnt="0" out_rc="4" in_rc="${1:-0}"
if [ "${adb_dns}" = "raw" ]; then
out_rc="0"
fi
fi
if [ "${restart_rc}" = "0" ]; then
+ rset="/^([[:alnum:]_-]{1,63}\\.)+[[:alpha:]]+([[:space:]]|$)/{print tolower(\$1)}"
while [ "${cnt}" -le "${adb_dnstimeout}" ]; do
dns_service="$(ubus -S call service list "{\"name\":\"${adb_dns}\"}")"
dns_up="$(printf "%s" "${dns_service}" | jsonfilter -l1 -e "@[\"${adb_dns}\"].instances.*.running")"
dns_pid="$(printf "%s" "${dns_service}" | jsonfilter -l1 -e "@[\"${adb_dns}\"].instances.*.pid")"
if [ "${dns_up}" = "true" ] && [ -n "${dns_pid}" ] && ! ls "/proc/${dns_pid}/fd/${adb_dnsdir}/${adb_dnsfile}" >/dev/null 2>&1; then
- if [ -x "${adb_lookupcmd}" ] && [ "${adb_lookupdomain}" != "false" ]; then
+ if [ -x "${adb_lookupcmd}" ] && [ -n "$(printf "%s" "${adb_lookupdomain}" | "${adb_awk}" "${rset}")" ]; then
if "${adb_lookupcmd}" "${adb_lookupdomain}" >/dev/null 2>&1; then
out_rc="0"
break
rm -f "${adb_tmpdir}/tmp.raw.${src_name}"
elif [ "${src_name}" = "whitelist" ] && [ -f "${adb_whitelist}" ]; then
rset="/^([[:alnum:]_-]{1,63}\\.)+[[:alpha:]]+([[:space:]]|$)/{print tolower(\$1)}"
- "${adb_awk}" "${rset}" "${adb_whitelist}" >"${adb_tmpdir}/tmp.raw.${src_name}"
+ printf "%s\n" "${adb_lookupdomain}" | "${adb_awk}" "${rset}" >"${adb_tmpdir}/tmp.raw.${src_name}"
+ "${adb_awk}" "${rset}" "${adb_whitelist}" >>"${adb_tmpdir}/tmp.raw.${src_name}"
out_rc="${?}"
if [ "${out_rc}" = "0" ]; then
rset="/^([[:alnum:]_-]{1,63}\\.)+[[:alpha:]]+([[:space:]]|$)/{gsub(\"\\\\.\",\"\\\\.\",\$1);print tolower(\"^(|.*\\\\.)\"\$1\"$\")}"
;;
"unbound")
prefix=".*[\"\\.]"
- suffix="(static)"
+ suffix="(always_nxdomain)"
field="3"
;;
"named")
"focus": "general",
"descurl": "https://disconnect.me"
},
+ "doh_blocklist": {
+ "url": "https://raw.githubusercontent.com/dibdot/DoH-IP-blocklists/master/doh-domains_overall.txt",
+ "rule": "/^([[:alnum:]_-]{1,63}\\.)+[[:alpha:]]+([[:space:]]|$)/{print tolower($1)}",
+ "size": "S",
+ "focus": "doh_server",
+ "descurl": "https://github.com/dibdot/DoH-IP-blocklists"
+ },
"energized": {
"url": "https://block.energized.pro/",
"rule": "/^([[:alnum:]_-]{1,63}\\.)+[[:alpha:]]+([[:space:]]|$)/{print tolower($1)}",
"size": "XL",
"focus": "compilation",
"descurl": "https://hblock.molinero.dev"
+ },
+ "lightswitch05": {
+ "url": "https://www.github.developerdan.com/hosts/lists/ads-and-tracking-extended.txt",
+ "rule": "/^0\\.0\\.0\\.0[[:space:]]+([[:alnum:]_-]{1,63}\\.)+[[:alpha:]]+([[:space:]]|$)/{print tolower($2)}",
+ "size": "XL",
+ "focus": "compilation",
+ "descurl": "https://github.com/lightswitch05/hosts"
},
"notracking": {
"url": "https://raw.githubusercontent.com/notracking/hosts-blocklists/master/dnscrypt-proxy/dnscrypt-proxy.blacklist.txt",
include $(TOPDIR)/rules.mk
PKG_NAME:=bind
-PKG_VERSION:=9.18.4
+PKG_VERSION:=9.18.7
PKG_RELEASE:=$(AUTORELEASE)
USERID:=bind=57:bind=57
PKG_SOURCE_URL:= \
https://www.mirrorservice.org/sites/ftp.isc.org/isc/bind9/$(PKG_VERSION) \
https://ftp.isc.org/isc/bind9/$(PKG_VERSION)
-PKG_HASH:=f277ae50159a00c300eb926a9c5d51953038a936bd8242d6913dfb6eac42761d
+PKG_HASH:=9e2acf1698f49d70ad12ffbad39ec6716a7da524e9ebd98429c7c70ba1262981
PKG_FIXUP:=autoreconf
PKG_REMOVE_FILES:=aclocal.m4 libtool.m4
include $(TOPDIR)/rules.mk
PKG_NAME:=cloudflared
-PKG_VERSION:=2022.9.0
+PKG_VERSION:=2022.9.1
PKG_RELEASE:=$(AUTORELEASE)
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
PKG_SOURCE_URL:=https://codeload.github.com/cloudflare/cloudflared/tar.gz/$(PKG_VERSION)?
-PKG_HASH:=be1362bb66071aa8d5c55c60db2be558a69a87054048827ed633e1bf4e98ed70
+PKG_HASH:=4919eef5814b4ecc41e1359f9546389688194c9e893b4de3c40b0e3cd2919605
PKG_LICENSE:=Apache-2.0
PKG_LICENSE_FILES:=LICENSE
PKG_NAME:=coova-chilli
PKG_VERSION:=1.6
-PKG_RELEASE:=$(AUTORELEASE)
+PKG_RELEASE:=$(AUTORELEASE).1
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
PKG_SOURCE_URL:=https://codeload.github.com/coova/coova-chilli/tar.gz/$(PKG_VERSION)?
include $(TOPDIR)/rules.mk
PKG_NAME:=curl
-PKG_VERSION:=7.83.1
-PKG_RELEASE:=$(AUTORELEASE)
+PKG_VERSION:=7.85.0
+PKG_RELEASE:=$(AUTORELEASE).1
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.xz
-PKG_SOURCE_URL:=https://dl.uxnr.de/mirror/curl/ \
+PKG_SOURCE_URL:=https://github.com/curl/curl/releases/download/curl-$(subst .,_,$(PKG_VERSION))/ \
+ https://dl.uxnr.de/mirror/curl/ \
https://curl.askapache.com/download/ \
https://curl.se/download/
-PKG_HASH:=2cb9c2356e7263a1272fd1435ef7cdebf2cd21400ec287b068396deb705c22c4
+PKG_HASH:=88b54a6d4b9a48cb4d873c7056dcba997ddd5b7be5a2d537a4acb55c20b04be6
PKG_LICENSE:=MIT
PKG_LICENSE_FILES:=COPYING
+++ /dev/null
---- a/m4/curl-wolfssl.m4
-+++ b/m4/curl-wolfssl.m4
-@@ -93,6 +93,7 @@ if test "x$OPT_WOLFSSL" != xno; then
- They are set up properly later if it is detected. */
- #undef SIZEOF_LONG
- #undef SIZEOF_LONG_LONG
-+#include <wolfssl/options.h>
- #include <wolfssl/ssl.h>
- ]],[[
- return wolfSSL_Init();
--- a/Makefile.am
+++ b/Makefile.am
-@@ -155,7 +155,7 @@ CLEANFILES = $(VC10_LIBVCXPROJ) $(VC10_S
+@@ -158,7 +158,7 @@ CLEANFILES = $(VC10_LIBVCXPROJ) $(VC10_S
bin_SCRIPTS = curl-config
SUBDIRS = lib src
pkgconfigdir = $(libdir)/pkgconfig
pkgconfig_DATA = libcurl.pc
-@@ -269,8 +269,6 @@ cygwinbin:
+@@ -272,8 +272,6 @@ cygwinbin:
# We extend the standard install with a custom hook:
install-data-hook:
(cd include && $(MAKE) install)
PKG_NAME:=ddns-scripts
PKG_VERSION:=2.8.2
-PKG_RELEASE:=26
+PKG_RELEASE:=27
PKG_LICENSE:=GPL-2.0
fi
SECTION_ID="" # hold config's section name
VERBOSE=0 # default mode is log to console, but easily changed with parameter
+DRY_RUN=0 # run without actually doing (sending) any changes
MYPROG=$(basename $0) # my program call name
LOGFILE="" # logfile - all files are set in dynamic_dns_updater.sh
NEXT_TIME=0 # calculated time for next FORCED update
EPOCH_TIME=0 # seconds since 1.1.1970 00:00:00
+CURRENT_IP="" # holds the current IP read from the box
REGISTERED_IP="" # holds the IP read from DNS
-LOCAL_IP="" # holds the local IP read from the box
URL_USER="" # url encoded $username from config file
URL_PASS="" # url encoded $password from config file
UPD_ANSWER="" # Answer given by service on success
ERR_LAST=0 # used to save $? return code of program and function calls
-ERR_UPDATE=0 # error counter on different local and registered ip
+RETRY_COUNT=0 # error counter on different current and registered IPs
PID_SLEEP=0 # ProcessID of current background "sleep"
return $__ERR
elif [ $__ERR -ne 0 ]; then
__CNT=$(( $__CNT + 1 )) # increment error counter
- # if error count > retry_count leave here
- [ $retry_count -gt 0 -a $__CNT -gt $retry_count ] && \
- write_log 14 "Verify DNS server '$1' failed after $retry_count retries"
+ # if error count > retry_max_count leave here
+ [ $retry_max_count -gt 0 -a $__CNT -gt $retry_max_count ] && \
+ write_log 14 "Verify DNS server '$1' failed after $retry_max_count retries"
- write_log 4 "Verify DNS server '$1' failed - retry $__CNT/$retry_count in $RETRY_SECONDS seconds"
+ write_log 4 "Verify DNS server '$1' failed - retry $__CNT/$retry_max_count in $RETRY_SECONDS seconds"
sleep $RETRY_SECONDS &
PID_SLEEP=$!
wait $PID_SLEEP # enable trap-handler
return $__ERR
elif [ $__ERR -gt 0 ]; then
__CNT=$(( $__CNT + 1 )) # increment error counter
- # if error count > retry_count leave here
- [ $retry_count -gt 0 -a $__CNT -gt $retry_count ] && \
- write_log 14 "Verify Proxy server '$1' failed after $retry_count retries"
+ # if error count > retry_max_count leave here
+ [ $retry_max_count -gt 0 -a $__CNT -gt $retry_max_count ] && \
+ write_log 14 "Verify Proxy server '$1' failed after $retry_max_count retries"
- write_log 4 "Verify Proxy server '$1' failed - retry $__CNT/$retry_count in $RETRY_SECONDS seconds"
+ write_log 4 "Verify Proxy server '$1' failed - retry $__CNT/$retry_max_count in $RETRY_SECONDS seconds"
sleep $RETRY_SECONDS &
PID_SLEEP=$!
wait $PID_SLEEP # enable trap-handler
# set correct program to detect IP
[ $use_ipv6 -eq 0 ] && __RUNPROG="network_get_ipaddr" || __RUNPROG="network_get_ipaddr6"
eval "$__RUNPROG __BINDIP $bind_network" || \
- write_log 13 "Can not detect local IP using '$__RUNPROG $bind_network' - Error: '$?'"
+ write_log 13 "Can not detect current IP using '$__RUNPROG $bind_network' - Error: '$?'"
write_log 7 "Force communication via IP '$__BINDIP'"
__PROG="$__PROG --bind-address=$__BINDIP"
fi
}
__CNT=$(( $__CNT + 1 )) # increment error counter
- # if error count > retry_count leave here
- [ $retry_count -gt 0 -a $__CNT -gt $retry_count ] && \
- write_log 14 "Transfer failed after $retry_count retries"
+ # if error count > retry_max_count leave here
+ [ $retry_max_count -gt 0 -a $__CNT -gt $retry_max_count ] && \
+ write_log 14 "Transfer failed after $retry_max_count retries"
- write_log 4 "Transfer failed - retry $__CNT/$retry_count in $RETRY_SECONDS seconds"
+ write_log 4 "Transfer failed - retry $__CNT/$retry_max_count in $RETRY_SECONDS seconds"
sleep $RETRY_SECONDS &
PID_SLEEP=$!
wait $PID_SLEEP # enable trap-handler
fi
}
-get_local_ip () {
- # $1 Name of Variable to store local IP (LOCAL_IP)
+get_current_ip () {
+ # $1 Name of Variable to store current IP
local __CNT=0 # error counter
local __RUNPROG __DATA __URL __ERR
- [ $# -ne 1 ] && write_log 12 "Error calling 'get_local_ip()' - wrong number of parameters"
- write_log 7 "Detect local IP on '$ip_source'"
+ [ $# -ne 1 ] && write_log 12 "Error calling 'get_current_ip()' - wrong number of parameters"
+ write_log 7 "Detect current IP on '$ip_source'"
while : ; do
if [ -n "$ip_network" -a "$ip_source" = "network" ]; then
[ $use_ipv6 -eq 0 ] && __RUNPROG="network_get_ipaddr" \
|| __RUNPROG="network_get_ipaddr6"
eval "$__RUNPROG __DATA $ip_network" || \
- write_log 13 "Can not detect local IP using $__RUNPROG '$ip_network' - Error: '$?'"
- [ -n "$__DATA" ] && write_log 7 "Local IP '$__DATA' detected on network '$ip_network'"
+ write_log 13 "Can not detect current IP using $__RUNPROG '$ip_network' - Error: '$?'"
+ [ -n "$__DATA" ] && write_log 7 "Current IP '$__DATA' detected on network '$ip_network'"
elif [ -n "$ip_interface" -a "$ip_source" = "interface" ]; then
local __DATA4=""; local __DATA6=""
if [ -n "$(command -v ip)" ]; then # ip program installed
fi
fi
[ $use_ipv6 -eq 0 ] && __DATA="$__DATA4" || __DATA="$__DATA6"
- [ -n "$__DATA" ] && write_log 7 "Local IP '$__DATA' detected on interface '$ip_interface'"
+ [ -n "$__DATA" ] && write_log 7 "Current IP '$__DATA' detected on interface '$ip_interface'"
elif [ -n "$ip_script" -a "$ip_source" = "script" ]; then
write_log 7 "#> $ip_script >$DATFILE 2>$ERRFILE"
eval $ip_script >$DATFILE 2>$ERRFILE
__ERR=$?
if [ $__ERR -eq 0 ]; then
__DATA=$(cat $DATFILE)
- [ -n "$__DATA" ] && write_log 7 "Local IP '$__DATA' detected via script '$ip_script'"
+ [ -n "$__DATA" ] && write_log 7 "Current IP '$__DATA' detected via script '$ip_script'"
else
write_log 3 "$ip_script Error: '$__ERR'"
write_log 7 "$(cat $ERRFILE)" # report error
[ $use_ipv6 -eq 0 ] \
&& __DATA=$(grep -m 1 -o "$IPV4_REGEX" $DATFILE) \
|| __DATA=$(grep -m 1 -o "$IPV6_REGEX" $DATFILE)
- [ -n "$__DATA" ] && write_log 7 "Local IP '$__DATA' detected on web at '$ip_url'"
+ [ -n "$__DATA" ] && write_log 7 "Current IP '$__DATA' detected on web at '$ip_url'"
else
- write_log 12 "Error in 'get_local_ip()' - unhandled ip_source '$ip_source'"
+ write_log 12 "Error in 'get_current_ip()' - unhandled ip_source '$ip_source'"
fi
# valid data found return here
[ -n "$__DATA" ] && {
[ $VERBOSE -gt 1 ] && {
# VERBOSE > 1 then NO retry
- write_log 4 "Get local IP via '$ip_source' failed - Verbose Mode: $VERBOSE - NO retry on error"
+ write_log 4 "Get current IP via '$ip_source' failed - Verbose Mode: $VERBOSE - NO retry on error"
return 1
}
__CNT=$(( $__CNT + 1 )) # increment error counter
- # if error count > retry_count leave here
- [ $retry_count -gt 0 -a $__CNT -gt $retry_count ] && \
- write_log 14 "Get local IP via '$ip_source' failed after $retry_count retries"
- write_log 4 "Get local IP via '$ip_source' failed - retry $__CNT/$retry_count in $RETRY_SECONDS seconds"
+ # if error count > retry_max_count leave here
+ [ $retry_max_count -gt 0 -a $__CNT -gt $retry_max_count ] && \
+ write_log 14 "Get current IP via '$ip_source' failed after $retry_max_count retries"
+ write_log 4 "Get current IP via '$ip_source' failed - retry $__CNT/$retry_max_count in $RETRY_SECONDS seconds"
sleep $RETRY_SECONDS &
PID_SLEEP=$!
wait $PID_SLEEP # enable trap-handler
PID_SLEEP=0
done
# we should never come here there must be a programming error
- write_log 12 "Error in 'get_local_ip()' - program coding error"
+ write_log 12 "Error in 'get_current_ip()' - program coding error"
}
get_registered_ip() {
}
__CNT=$(( $__CNT + 1 )) # increment error counter
- # if error count > retry_count leave here
- [ $retry_count -gt 0 -a $__CNT -gt $retry_count ] && \
- write_log 14 "Get registered/public IP for '$lookup_host' failed after $retry_count retries"
+ # if error count > retry_max_count leave here
+ [ $retry_max_count -gt 0 -a $__CNT -gt $retry_max_count ] && \
+ write_log 14 "Get registered/public IP for '$lookup_host' failed after $retry_max_count retries"
- write_log 4 "Get registered/public IP for '$lookup_host' failed - retry $__CNT/$retry_count in $RETRY_SECONDS seconds"
+ write_log 4 "Get registered/public IP for '$lookup_host' failed - retry $__CNT/$retry_max_count in $RETRY_SECONDS seconds"
sleep $RETRY_SECONDS &
PID_SLEEP=$!
wait $PID_SLEEP # enable trap-handler
if [ "$ip_source" = "web" -o "$ip_source" = "script" ]; then
# we wait only 3 seconds for an
# answer from "web" or "script"
- write_log 7 "-----> timeout 3 -- get_local_ip IP"
- timeout 3 -- get_local_ip IP
+ write_log 7 "-----> timeout 3 -- get_current_ip IP"
+ timeout 3 -- get_current_ip IP
else
- write_log 7 "-----> get_local_ip IP"
- get_local_ip IP
+ write_log 7 "-----> get_current_ip IP"
+ get_current_ip IP
fi
__RET=$?
;;
'1' output to console
'2' output to console AND logfile
+ run once WITHOUT retry on error
- '3' output to console AND logfile
- + run once WITHOUT retry on error
- + NOT sending update to DDNS service
+ -d dry run (don't send any changes)
EOF
}
exit 1
}
-while getopts ":hv:n:S:V" OPT; do
+while getopts ":hv:dn:S:V" OPT; do
case "$OPT" in
h) usage; exit 0;;
v) VERBOSE=$OPTARG;;
+ d) DRY_RUN=1;;
n) NETWORK=$OPTARG;;
S) SECTION_ID=$OPTARG;;
V) printf %s\\n "ddns-scripts $VERSION"; exit 0;;
# only with this data of this run for easier diagnostic
# new one created by write_log function
[ $VERBOSE -gt 1 -a -f $LOGFILE ] && rm -f $LOGFILE
+# Previously -v 3 could we used for dry run
+[ $VERBOSE -ge 3 ] && DRY_RUN=1
# TRAP handler
trap "trap_handler 0 \$?" 0 # handle script exit with exit status
#
# use_syslog log activity to syslog
#
-# ip_source source to detect current local IP ('network' or 'web' or 'script' or 'interface')
+# ip_source source to detect current IP ('network' or 'web' or 'script' or 'interface')
# ip_network local defined network to read IP from i.e. 'wan' or 'wan6'
-# ip_url URL to read local address from i.e. http://checkip.dyndns.com/ or http://checkipv6.dyndns.com/
-# ip_script full path and name of your script to detect local IP
+# ip_url URL to read current IP from i.e. http://checkip.dyndns.com/ or http://checkipv6.dyndns.com/
+# ip_script full path and name of your script to detect current IP
# ip_interface physical interface to use for detecting
#
# check_interval check for changes every !!! checks below 10 minutes make no sense because the Internet
#
# retry_interval if error was detected retry in
# retry_unit 'days' 'hours' 'minutes' 'seconds'
-# retry_count number of retries before scripts stops
+# retry_max_count number of retries before scripts stops
#
# use_ipv6 detecting/sending IPv6 address
# force_ipversion force usage of IPv4 or IPv6 for the whole detection and update communication
# dns_server using a non default dns server to get Registered IP from Internet
# force_dnstcp force communication with DNS server via TCP instead of default UDP
-# proxy using a proxy for communication !!! ALSO used to detect local IP via web => return proxy's IP !!!
+# proxy using a proxy for communication !!! ALSO used to detect current IP via web => return proxy's IP !!!
# use_logfile self-explanatory "/var/log/ddns/$SECTION_ID.log"
# is_glue the record that should be updated is a glue record
#
# set defaults if not defined
[ -z "$enabled" ] && enabled=0
-[ -z "$retry_count" ] && retry_count=0 # endless retry
+[ -z "$retry_max_count" ] && retry_max_count=0 # endless retry
[ -z "$use_syslog" ] && use_syslog=2 # syslog "Notice"
[ -z "$use_https" ] && use_https=0 # not use https
[ -z "$use_logfile" ] && use_logfile=1 # use logfile by default
0) write_log 7 "verbose mode : 0 - run normal, NO console output";;
1) write_log 7 "verbose mode : 1 - run normal, console mode";;
2) write_log 7 "verbose mode : 2 - run once, NO retry on error";;
- 3) write_log 7 "verbose mode : 3 - run once, NO retry on error, NOT sending update";;
*) write_log 14 "error detecting VERBOSE '$VERBOSE'";;
esac
+[ $DRY_RUN -ge 1 ] && write_log 7 "Dry Run: NOT sending update"
# check enabled state otherwise we don't need to continue
[ $enabled -eq 0 ] && write_log 14 "Service section disabled!"
# verify ip_source 'script' if script is configured and executable
if [ "$ip_source" = "script" ]; then
set -- $ip_script #handling script with parameters, we need a trick
- [ -z "$1" ] && write_log 14 "No script defined to detect local IP!"
- [ -x "$1" ] || write_log 14 "Script to detect local IP not executable!"
+ [ -z "$1" ] && write_log 14 "No script defined to detect current IP!"
+ [ -x "$1" ] || write_log 14 "Script to detect current IP not executable!"
fi
# compute update interval in seconds
write_log 7 "check interval: $CHECK_SECONDS seconds"
write_log 7 "force interval: $FORCE_SECONDS seconds"
write_log 7 "retry interval: $RETRY_SECONDS seconds"
-write_log 7 "retry counter : $retry_count times"
+write_log 7 "retry max count : $retry_max_count times"
# kill old process if it exists & set new pid file
stop_section_processes "$SECTION_ID"
write_log 6 "Starting main loop at $(eval $DATE_PROG)"
while : ; do
- get_local_ip LOCAL_IP # read local IP
- [ $use_ipv6 -eq 1 ] && expand_ipv6 "$LOCAL_IP" LOCAL_IP # on IPv6 we use expanded version
+ get_current_ip CURRENT_IP # read current IP
+ [ $use_ipv6 -eq 1 ] && expand_ipv6 "$CURRENT_IP" CURRENT_IP # on IPv6 we use expanded version
# prepare update
# never updated or forced immediate then NEXT_TIME = 0
get_uptime CURR_TIME # get current uptime
- # send update when current time > next time or local ip different from registered ip
- if [ $CURR_TIME -ge $NEXT_TIME -o "$LOCAL_IP" != "$REGISTERED_IP" ]; then
- if [ $VERBOSE -gt 2 ]; then
- write_log 7 "Verbose Mode: $VERBOSE - NO UPDATE send"
- elif [ "$LOCAL_IP" != "$REGISTERED_IP" ]; then
- write_log 7 "Update needed - L: '$LOCAL_IP' <> R: '$REGISTERED_IP'"
+ # send update when current time > next time or current ip different from registered ip
+ if [ $CURR_TIME -ge $NEXT_TIME -o "$CURRENT_IP" != "$REGISTERED_IP" ]; then
+ if [ $DRY_RUN -ge 1 ]; then
+ write_log 7 "Dry Run: NO UPDATE send"
+ elif [ "$CURRENT_IP" != "$REGISTERED_IP" ]; then
+ write_log 7 "Update needed - L: '$CURRENT_IP' <> R: '$REGISTERED_IP'"
else
- write_log 7 "Forced Update - L: '$LOCAL_IP' == R: '$REGISTERED_IP'"
+ write_log 7 "Forced Update - L: '$CURRENT_IP' == R: '$REGISTERED_IP'"
fi
ERR_LAST=0
- [ $VERBOSE -lt 3 ] && {
- # only send if VERBOSE < 3
- send_update "$LOCAL_IP"
+ [ $DRY_RUN -eq 0 ] && {
+ send_update "$CURRENT_IP"
ERR_LAST=$? # save return value
}
- # error sending local IP to provider
+ # error sending current IP to provider
# we have no communication error (handled inside send_update/do_transfer)
# but update was not recognized
# do NOT retry after RETRY_SECONDS, do retry after CHECK_SECONDS
if [ $ERR_LAST -eq 0 ]; then
get_uptime LAST_TIME # we send update, so
echo $LAST_TIME > $UPDFILE # save LASTTIME to file
- [ "$LOCAL_IP" != "$REGISTERED_IP" ] \
- && write_log 6 "Update successful - IP '$LOCAL_IP' send" \
- || write_log 6 "Forced update successful - IP: '$LOCAL_IP' send"
+ [ "$CURRENT_IP" != "$REGISTERED_IP" ] \
+ && write_log 6 "Update successful - IP '$CURRENT_IP' send" \
+ || write_log 6 "Forced update successful - IP: '$CURRENT_IP' send"
elif [ $ERR_LAST -eq 127 ]; then
write_log 3 "No update send to DDNS Provider"
else
fi
# now we wait for check interval before testing if update was recognized
- # only sleep if VERBOSE <= 2 because otherwise nothing was send
- [ $VERBOSE -le 2 ] && {
+ [ $DRY_RUN -eq 0 ] && {
write_log 7 "Waiting $CHECK_SECONDS seconds (Check Interval)"
sleep $CHECK_SECONDS &
PID_SLEEP=$!
wait $PID_SLEEP # enable trap-handler
PID_SLEEP=0
- } || write_log 7 "Verbose Mode: $VERBOSE - NO Check Interval waiting"
+ } || write_log 7 "Dry Run: NO Check Interval waiting"
REGISTERED_IP="" # clear variable
get_registered_ip REGISTERED_IP # get registered/public IP
[ $use_ipv6 -eq 1 ] && expand_ipv6 "$REGISTERED_IP" REGISTERED_IP # on IPv6 we use expanded version
# IP's are still different
- if [ "$LOCAL_IP" != "$REGISTERED_IP" ]; then
+ if [ "$CURRENT_IP" != "$REGISTERED_IP" ]; then
if [ $VERBOSE -le 1 ]; then # VERBOSE <=1 then retry
- ERR_UPDATE=$(( $ERR_UPDATE + 1 ))
- [ $retry_count -gt 0 -a $ERR_UPDATE -gt $retry_count ] && \
- write_log 14 "Updating IP at DDNS provider failed after $retry_count retries"
- write_log 4 "Updating IP at DDNS provider failed - starting retry $ERR_UPDATE/$retry_count"
+ RETRY_COUNT=$(( $RETRY_COUNT + 1 ))
+ [ $retry_max_count -gt 0 -a $RETRY_COUNT -gt $retry_max_count ] && \
+ write_log 14 "Updating IP at DDNS provider failed after $retry_max_count retries"
+ write_log 4 "Updating IP at DDNS provider failed - starting retry $RETRY_COUNT/$retry_max_count"
continue # loop to beginning
else
write_log 4 "Updating IP at DDNS provider failed"
fi
else
# we checked successful the last update
- ERR_UPDATE=0 # reset error counter
+ RETRY_COUNT=0 # reset error counter
fi
# force_update=0 or VERBOSE > 1 - leave here
include $(TOPDIR)/rules.mk
PKG_NAME:=dnslookup
-PKG_VERSION:=1.7.3
+PKG_VERSION:=1.8.0
PKG_RELEASE:=$(AUTORELEASE)
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
PKG_SOURCE_URL:=https://codeload.github.com/ameshkov/dnslookup/tar.gz/v$(PKG_VERSION)?
-PKG_HASH:=c63d2dc8c357045e28f29ec716e3c20e39a2c1be4dc4313c6c2ab62838e5e2db
+PKG_HASH:=8358291240a27f20f4d635f9a27a9373144b723c4d73ee01374a9ed5c02126bd
PKG_MAINTAINER:=Tianling Shen <cnsztl@immortalwrt.org>
PKG_LICENSE:=MIT
include $(TOPDIR)/rules.mk
PKG_NAME:=dnsproxy
-PKG_VERSION:=0.44.0
+PKG_VERSION:=0.45.2
PKG_RELEASE:=$(AUTORELEASE)
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
PKG_SOURCE_URL:=https://codeload.github.com/AdguardTeam/dnsproxy/tar.gz/v$(PKG_VERSION)?
-PKG_HASH:=a9617296fe668296d121baa266bd6beba5582834fce3bf9375d084de683cd664
+PKG_HASH:=1c9c20f86621adebee6b61ee4bfba4b05b5e42a9ef66f01425d4e45987ff8d35
PKG_MAINTAINER:=Tianling Shen <cnsztl@immortalwrt.org>
PKG_LICENSE:=Apache-2.0
--- /dev/null
+include $(TOPDIR)/rules.mk
+
+PKG_NAME:=gatling
+PKG_VERSION:=0.16
+PKG_RELEASE:=1
+
+PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.xz
+PKG_SOURCE_URL:=https://www.fefe.de/gatling/
+PKG_HASH:=5f96438ee201d7f1f6c2e0849ff273b196bdc7493f29a719ce8ed08c8be6365b
+
+PKG_BUILD_DEPENDS:=px5g-mbedtls libowfat
+
+PKG_MAINTAINER:=Martin Hübner <martin.hubner@web.de>
+PKG_LICENSE:=GPL-2.0-only
+PKG_LICENSE_FILES:=LICENSE
+
+include $(INCLUDE_DIR)/package.mk
+
+define Package/gatling
+ SECTION:=net
+ CATEGORY:=Network
+ SUBMENU:=Web Servers/Proxies
+ TITLE:=gatling - a high performance web server
+ URL:=https://www.fefe.de/gatling/
+ DEPENDS:=+zlib +libcap +libopenssl
+endef
+
+define Package/gatling/description
+ gatling - a high performance web server
+ with a a rather small binary size but a decent set of extra features
+ like ftp, basic htaccess and basic DoS-Protection.
+ Gatling is particularly good in situations with very high load.
+endef
+
+define Package/gatling/install
+ $(INSTALL_DIR) $(1)/usr/bin
+ $(INSTALL_BIN) $(PKG_BUILD_DIR)/gatling $(1)/usr/bin/
+endef
+
+$(eval $(call BuildPackage,gatling))
--- /dev/null
+gatling: decrease optimisation
+
+When compiling on MIPS-architectures, in -O2 there happen some
+errors on interlinking. Linker will will fail with:
+
+```
+Unsupported jump between ISA modes; consider recompiling with interlinking enabled
+```
+
+Reducing optimisation works around these errors and looks like
+a fairly common solution to this problem.
+https://forum.openwrt.org/t/unsupported-jump-between-isa-modes-consider-recompiling-with-interlinking-enabled/55786
+
+Signed-off-by: Martin Hübner <martin.hubner@web.de>
+--- a/GNUmakefile
++++ b/GNUmakefile
+@@ -47,7 +47,7 @@ ifneq ($(DEBUG),)
+ CFLAGS+=-g -Og
+ LDFLAGS+=-g
+ else
+-CFLAGS+=-O2 -fomit-frame-pointer -I/usr/local/include
++CFLAGS+=-O1 -fomit-frame-pointer -I/usr/local/include
+ LDFLAGS+=-s
+ ifneq ($(DIET),)
+ DIET+=-Os
--- /dev/null
+gatling: configure features
+
+Disables SMB-support, as only deprecated SMBv1 is supported,
+which most users wouldn't use anyway. Avoids potential
+vulnerabilities.
+
+Activates threaded opening: Thus gatling will open files in
+threads to open kernel I/O scheduling opportunities.
+
+Signed-off-by: Martin Hübner <martin.hubner@web.de>
+--- a/gatling_features.h
++++ b/gatling_features.h
+@@ -7,7 +7,7 @@
+
+ #define SUPPORT_SERVERSTATUS
+ // #define SUPPORT_DAV
+-#define SUPPORT_SMB
++// #define SUPPORT_SMB
+ #define SUPPORT_FTP
+ #define SUPPORT_PROXY
+ /* #define DEBUG to enable more verbose debug messages for tracking fd
+@@ -36,7 +36,7 @@
+ #define SUPPORT_FALLBACK_REDIR
+
+ /* open files in threads to open kernel I/O scheduling opportunities */
+-#undef SUPPORT_THREADED_OPEN
++#define SUPPORT_THREADED_OPEN
+
+ /* try to divine MIME type by looking at content */
+ #define SUPPORT_MIMEMAGIC
--- /dev/null
+#!/bin/sh
+
+gatling -h
include $(TOPDIR)/rules.mk
PKG_NAME:=gg
-PKG_VERSION:=0.2.9
+PKG_VERSION:=0.2.13
PKG_RELEASE:=$(AUTORELEASE)
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
PKG_SOURCE_URL:=https://codeload.github.com/mzz2017/gg/tar.gz/v$(PKG_VERSION)?
-PKG_HASH:=a500b148c5e0404672062f7c41fe8cd78dd3dc3cc0376e1b8983bca41dd155e8
+PKG_HASH:=73d624f6cfcc003a1d1cac61b9a314dd29da745570c73660a4a5f9e201ec7b7f
PKG_MAINTAINER:=Tianling Shen <cnsztl@immortalwrt.org>
PKG_LICENSE:=AGPL-3.0-only
include $(TOPDIR)/rules.mk
PKG_NAME:=haproxy
-PKG_VERSION:=2.6.4
+PKG_VERSION:=2.6.6
PKG_RELEASE:=$(AUTORELEASE)
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
PKG_SOURCE_URL:=https://www.haproxy.org/download/2.6/src
-PKG_HASH:=f07d67ada2ff3a999fed4e34459c0489536331a549665ac90cb6a8df91f4a289
+PKG_HASH:=d0c80c90c04ae79598b58b9749d53787f00f7b515175e7d8203f2796e6a6594d
PKG_MAINTAINER:=Thomas Heil <heil@terminal-consulting.de>, \
Christian Lachner <gladiac@gmail.com>
#!/bin/sh
CLONEURL=https://git.haproxy.org/git/haproxy-2.6.git
-BASE_TAG=v2.6.4
+BASE_TAG=v2.6.6
TMP_REPODIR=tmprepo
PATCHESDIR=patches
--- /dev/null
+#
+# Copyright (C) 2022 Alexander E. Patrakov
+#
+# This is free software, licensed under the GNU General Public License v2.
+# See /LICENSE for more information.
+#
+# Special thanks to the Debian and Fedora projects for their hping3
+# packaging, elements of which (patches, list of dependencies, etc.) are
+# reused here.
+
+include $(TOPDIR)/rules.mk
+
+PKG_NAME:=hping3
+PKG_SOURCE_DATE:=2014-12-26
+PKG_SOURCE_VERSION:=3547c7691742c6eaa31f8402e0ccbb81387c1b99
+PKG_RELEASE:=1
+
+PKG_SOURCE_PROTO:=git
+PKG_SOURCE_URL:=https://github.com/antirez/hping
+PKG_MIRROR_HASH:=0d65eaa8e55ab10025fb2d615eb9567255288b3b9eb66465f54d0e1718bb80ad
+
+PKG_MAINTAINER:=Alexander E. Patrakov <patrakov@gmail.com>
+PKG_LICENSE:=GPL-2.0-only
+
+PKG_INSTALL:=1
+PKG_BUILD_PARALLEL:=1
+
+include $(INCLUDE_DIR)/package.mk
+
+define Package/hping3
+ SECTION:=net
+ CATEGORY:=Network
+ TITLE:=Active Network Smashing Tool
+ URL:=https://github.com/antirez/hping
+ DEPENDS:= +libpcap
+endef
+
+define Package/hping3/description
+ hping3 is a network tool able to send custom ICMP/UDP/TCP packets
+ and to display target replies like ping does with ICMP replies. It
+ handles fragmentation and arbitrary packet body and size, and can
+ be used to transfer files under supported protocols. Using hping3,
+ you can test firewall rules, perform (spoofed) port scanning, test
+ network performance using different protocols, do path MTU
+ discovery, perform traceroute-like actions under different
+ protocols, fingerprint remote operating systems, audit TCP/IP
+ stacks, etc. Tcl support is not included.
+endef
+
+define Package/hping3/install
+ $(INSTALL_DIR) $(1)/usr/sbin
+ $(INSTALL_BIN) $(PKG_BUILD_DIR)/hping3 $(1)/usr/sbin/
+endef
+
+$(eval $(call BuildPackage,hping3))
--- /dev/null
+--- a/Makefile.in
++++ b/Makefile.in
+@@ -9,8 +9,8 @@
+ CC= gcc
+ AR=/usr/bin/ar
+ RANLIB=/usr/bin/ranlib
+-CCOPT= -O2 -Wall @PCAP_INCLUDE@ @TCL_INC@ @USE_TCL@
+-DEBUG= -g
++CCOPT= $(CFLAGS) @PCAP_INCLUDE@ @TCL_INC@ @USE_TCL@
++DEBUG=
+ #uncomment the following if you need libpcap based build under linux
+ #(not raccomanded)
+ COMPILE_TIME=
+@@ -50,17 +50,13 @@ libars.a: $(ARSOBJ)
+ $(RANLIB) $@
+
+ hping3: byteorder.h $(OBJ)
+- $(CC) -o hping3 $(CCOPT) $(DEBUG) $(OBJ) -L/usr/local/lib $(PCAP) @SOLARISLIB@ @TCL_LIB@
++ $(CC) -o hping3 $(CCOPT) $(DEBUG) $(OBJ) $(PCAP) @SOLARISLIB@ @TCL_LIB@
+ @echo
+- ./hping3 -v
+ @echo "use \`make strip' to strip hping3 binary"
+ @echo "use \`make install' to install hping3"
+
+ hping3-static: byteorder.h $(OBJ)
+- $(CC) -static -o hping3-static $(CCOPT) $(DEBUG) $(OBJ) -L/usr/local/lib $(PCAP) @SOLARISLIB@ @TCL_LIB@ -ldl
+-
+-byteorder.h:
+- ./configure
++ $(CC) -static -o hping3-static $(CCOPT) $(DEBUG) $(OBJ) $(PCAP) @SOLARISLIB@ @TCL_LIB@ -ldl
+
+ .c.o:
+ $(CC) -c $(CCOPT) $(DEBUG) $(COMPILE_TIME) $<
+@@ -72,6 +68,8 @@ distclean:
+ rm -rf hping3 *.o byteorder byteorder.h systype.h Makefile libars.a .depend
+
+ install: hping3
++
++dont-install:
+ cp -f hping3 /usr/sbin/
+ chmod 755 /usr/sbin/hping3
+ ln -s /usr/sbin/hping3 /usr/sbin/hping
+--- /dev/null
++++ b/byteorder.h
+@@ -0,0 +1,15 @@
++
++#ifndef __BYTEORDER_H
++#define __BYTEORDER_H
++
++#if defined(__BYTE_ORDER__)
++ #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
++ #define __BIG_ENDIAN_BITFIELD
++ #else
++ #define __LITTLE_ENDIAN_BITFIELD
++ #endif
++#else
++ #error Byte order not set
++#endif
++
++#endif /* __BYTEORDER_H */
+--- a/configure
++++ b/configure
+@@ -1,5 +1,24 @@
+ #!/bin/sh
+
++# This fake configure script applies hard-coded settings for OpenWRT
++
++echo creating Makefile...
++sed -e "s^@PCAP@^PCAP=-lpcap^g" \
++ -e "s^@PCAP_INCLUDE@^^g" \
++ -e "s^@MANPATH@^/usr/share/man^g" \
++ -e "s^@SOLARISLIB@^^g" \
++ -e "s^@USE_TCL@^^g" \
++ -e "s^@TCL_INC@^^g" \
++ -e "s^@TCL_VER@^^g" \
++ -e "s^@TCL_LIB@^^g" \
++ <Makefile.in > Makefile
++
++echo now you can try \`make\'
++exit 0
++
++# Original file follows, to make the patch smaller
++#!/bin/sh
++
+ show_help()
+ {
+ echo configure help:
+--- /dev/null
++++ b/systype.h
+@@ -0,0 +1,6 @@
++#ifndef __SYSTYPE_H
++#define __SYSTYPE_H
++
++#define OSTYPE_LINUX
++
++#endif /* SYSTYPE_H */
--- /dev/null
+--- a/libpcap_stuff.c
++++ b/libpcap_stuff.c
+@@ -16,7 +16,7 @@
+ #include <string.h>
+ #include <stdlib.h>
+ #include <sys/ioctl.h>
+-#include <net/bpf.h>
++#include <pcap-bpf.h>
+ #include <pcap.h>
+
+ #include "globals.h"
+--- a/script.c
++++ b/script.c
+@@ -23,7 +23,7 @@
+ #include <sched.h>
+
+ #include <sys/ioctl.h>
+-#include <net/bpf.h>
++#include <pcap-bpf.h>
+ #include <pcap.h>
+
+ #include "release.h"
--- /dev/null
+Subject: Various spelling fixes.
+Author: Romain Francoise <rfrancoise@debian.org>
+Last-Updated: 2020-09-06
+
+--- a/antigetopt.c
++++ b/antigetopt.c
+@@ -142,7 +142,7 @@ chain_start:
+ #define UNK_LONG_ERRSTRING "unrecognized option `--%s'\n"
+ #define ARG_SHORT_ERRSTRING "option requires an argument -- %c\n"
+ #define ARG_LONG_ERRSTRING "option `--%s' requires an argument\n"
+-#define AMB_ERRSTRING "option `--%s' is ambiguos\n"
++#define AMB_ERRSTRING "option `--%s' is ambiguous\n"
+ #define IERR_ERRSTRING "internal error. ago_gnu_error() called with " \
+ "a bad error code (%d)\n"
+ void ago_gnu_error(char *pname, int error)
+--- a/statistics.c
++++ b/statistics.c
+@@ -30,7 +30,7 @@ void print_statistics(int signal_id)
+ lossrate = 100;
+
+ fprintf(stderr, "\n--- %s hping statistic ---\n", targetname);
+- fprintf(stderr, "%d packets tramitted, %d packets received, "
++ fprintf(stderr, "%d packets transmitted, %d packets received, "
+ "%d%% packet loss\n", sent_pkt, recv_pkt, lossrate);
+ if (out_of_sequence_pkt)
+ fprintf(stderr, "%d out of sequence packets received\n",
+--- a/ars.c
++++ b/ars.c
+@@ -361,7 +361,7 @@ void *ars_add_data(struct ars_packet *pk
+ static void *ptr = "zzappt"; /* we can't return NULL for size == 0 */
+
+ if (size < 0) {
+- ars_set_error(pkt, "Tryed to add a DATA layer with size < 0");
++ ars_set_error(pkt, "Tried to add a DATA layer with size < 0");
+ return NULL;
+ }
+ retval = ars_add_generic(pkt, size, ARS_TYPE_DATA);
--- /dev/null
+Subject: Fix "-N" command line argument
+Author: Guillaume Delacour <gui@iroqwa.org>
+Bug-Debian: http://bugs.debian.org/482844
+Last-Update: 2009-05-13
+
+--- a/docs/hping2.8
++++ b/docs/hping2.8
+@@ -343,7 +343,7 @@ options. If in doubt try
+ .I -N --id
+ Set ip->id field. Default id is random but if fragmentation is turned on
+ and id isn't specified it will be
+-.BR "getpid() & 0xFF" ,
++.BR "getpid() & 0xFFFF" ,
+ to implement a better solution is in TODO list.
+ .TP
+ .I -H --ipproto
+@@ -714,4 +714,4 @@ On solaris hping does not work on the lo
+ a solaris problem, as stated in the tcpdump-workers mailing list,
+ so the libpcap can't do nothing to handle it properly.
+ .SH SEE ALSO
+-ping(8), traceroute(8), ifconfig(8), nmap(1)
+\ No newline at end of file
++ping(8), traceroute(8), ifconfig(8), nmap(1)
+--- a/docs/hping3.8
++++ b/docs/hping3.8
+@@ -352,7 +352,7 @@ options. If in doubt try
+ .I -N --id
+ Set ip->id field. Default id is random but if fragmentation is turned on
+ and id isn't specified it will be
+-.BR "getpid() & 0xFF" ,
++.BR "getpid() & 0xFFFF" ,
+ to implement a better solution is in TODO list.
+ .TP
+ .I -H --ipproto
+--- a/hping2.h
++++ b/hping2.h
+@@ -121,7 +121,7 @@
+ #define DEFAULT_ICMP_IP_IHL (IPHDR_SIZE >> 2)
+ #define DEFAULT_ICMP_IP_TOS 0
+ #define DEFAULT_ICMP_IP_TOT_LEN 0 /* computed by send_icmp_*() */
+-#define DEFAULT_ICMP_IP_ID 0 /* rand */
++#define DEFAULT_ICMP_IP_ID -1 /* rand */
+ #define DEFAULT_ICMP_CKSUM -1 /* -1 means compute the cksum */
+ #define DEFAULT_ICMP_IP_PROTOCOL 6 /* TCP */
+ #define DEFAULT_RAW_IP_PROTOCOL 6 /* TCP */
+--- a/parseoptions.c
++++ b/parseoptions.c
+@@ -468,6 +468,10 @@ int parse_options(int argc, char **argv)
+ break;
+ case OPT_ICMP_IPID:
+ icmp_ip_id = strtol(ago_optarg, NULL, 0);
++ if (icmp_ip_id < 0 || icmp_ip_id > 0xffff) {
++ fprintf(stderr, "Bad ICMP IP ID, resetting to random.\n");
++ icmp_ip_id = DEFAULT_ICMP_IP_ID;
++ }
+ break;
+ case OPT_ICMP_IPPROTO:
+ icmp_ip_protocol = strtol(ago_optarg, NULL, 0);
+--- a/sendicmp.c
++++ b/sendicmp.c
+@@ -83,7 +83,7 @@ void send_icmp_echo(void)
+ icmp->type = opt_icmptype; /* echo replay or echo request */
+ icmp->code = opt_icmpcode; /* should be indifferent */
+ icmp->checksum = 0;
+- icmp->un.echo.id = getpid() & 0xffff;
++ icmp->un.echo.id = icmp_ip_id == DEFAULT_ICMP_IP_ID ? getpid() & 0xffff : icmp_ip_id;
+ icmp->un.echo.sequence = _icmp_seq;
+
+ /* data */
--- /dev/null
+Subject: Off-by-one error with --dontfrag
+Author: Christian Mock <cm@coretec.at>
+Bugs-Debian: http://bugs.debian.org/537704
+Last-Update: 2009-09-04
+
+--- a/sendip_handler.c
++++ b/sendip_handler.c
+@@ -19,7 +19,7 @@ void send_ip_handler(char *packet, unsig
+ {
+ ip_optlen = ip_opt_build(ip_opt);
+
+- if (!opt_fragment && (size+ip_optlen+20 >= h_if_mtu))
++ if (!opt_fragment && (size+ip_optlen+20 > h_if_mtu))
+ {
+ /* auto-activate fragmentation */
+ virtual_mtu = h_if_mtu-20;
--- /dev/null
+Subject: Add RTT handling for ICMP destination unreachable packets.
+Author: Baruch Even <baruch@debian.org>
+Bug-Debian: http://bugs.debian.org/248273
+Last-Update: 2009-09-04
+
+--- a/waitpacket.c
++++ b/waitpacket.c
+@@ -229,6 +229,7 @@ int recv_icmp(void *packet, size_t size)
+ {
+ struct myicmphdr icmp;
+ struct myiphdr quoted_ip;
++ struct myudphdr quoted_udp;
+
+ /* Check if the packet can contain the ICMP header */
+ if (size < ICMPHDR_SIZE) {
+@@ -286,6 +287,18 @@ int recv_icmp(void *packet, size_t size)
+ case 3:
+ if (!opt_quiet)
+ log_icmp_unreach(inet_ntoa(src), icmp.code);
++ if (quoted_ip.protocol == IPPROTO_UDP)
++ {
++ int sequence = 0, status;
++ float ms_delay = 0.0;
++ unsigned short port;
++
++ /* Get RTT for UDP packet */
++ memcpy("ed_udp, packet+ICMPHDR_SIZE+(quoted_ip.ihl<<2), sizeof(quoted_udp));
++ port = ntohs(quoted_udp.uh_sport);
++ status = rtt(&sequence, port, &ms_delay);
++ printf("status=%d port=%d seq=%d\n", status, port, sequence);
++ }
+ return 1;
+ case 11:
+ if (opt_traceroute)
--- /dev/null
+Subject; Fix spelling error in binary (lintian -IE)
+Author: Romain Francoise <rfrancoise@debian.org>
+Last-Update: 2005-03-12
+
+--- a/parseoptions.c
++++ b/parseoptions.c
+@@ -179,7 +179,7 @@ void parse_route(unsigned char *route, u
+ str[j++] = '/';
+ break;
+ }
+- fprintf(stderr, "invalid IP adress in route\n");
++ fprintf(stderr, "invalid IP address in route\n");
+ fail_parse_route();
+ case ':':
+ if ((!i) && j && j < 4)
+--- a/getifname.c
++++ b/getifname.c
+@@ -206,7 +206,7 @@ int get_if_name(void)
+ printf("DEBUG: Output interface address: %s\n",
+ inet_ntoa(output_if_addr.sin_addr));
+ /* Put something in saved_ifname in order to tell
+- that the output adress is known */
++ that the output address is known */
+ saved_ifname[0] = 'X'; saved_ifname[1] = 0;
+ } else {
+ fprintf(stderr, "Warning: Unable to guess the output "
+--- a/usage.c
++++ b/usage.c
+@@ -50,7 +50,7 @@ void show_usage(void)
+ " -r --rel relativize id field (to estimate host traffic)\n"
+ " -f --frag split packets in more frag. (may pass weak acl)\n"
+ " -x --morefrag set more fragments flag\n"
+-" -y --dontfrag set dont fragment flag\n"
++" -y --dontfrag set don't fragment flag\n"
+ " -g --fragoff set the fragment offset\n"
+ " -m --mtu set virtual mtu, implies --frag if packet size > mtu\n"
+ " -o --tos type of service (default 0x00), try --tos help\n"
+@@ -131,8 +131,8 @@ void icmp_help(void)
+ "ICMP help:\n"
+ " ICMP concerned packet options:\n"
+ " --icmp-ipver set ip version ( default 4 )\n"
+-" --icmp-iphlen set ip header lenght ( default IPHDR_SIZE >> 2)\n"
+-" --icmp-iplen set ip total lengtht ( default real lenght )\n"
++" --icmp-iphlen set ip header length ( default IPHDR_SIZE >> 2)\n"
++" --icmp-iplen set ip total length ( default real length )\n"
+ " --icmp-ipid set ip id ( default random )\n"
+ " --icmp-ipproto set ip protocol ( default IPPROTO_TCP )\n"
+ " --icmp-ipsrc set ip source ( default 0.0.0.0 )\n"
+--- a/sendtcp.c
++++ b/sendtcp.c
+@@ -50,7 +50,7 @@ void send_tcp(void)
+ memcpy(&pseudoheader->saddr, &local.sin_addr.s_addr, 4);
+ memcpy(&pseudoheader->daddr, &remote.sin_addr.s_addr, 4);
+ pseudoheader->protocol = 6; /* tcp */
+- pseudoheader->lenght = htons(TCPHDR_SIZE+tcp_opt_size+data_size);
++ pseudoheader->length = htons(TCPHDR_SIZE+tcp_opt_size+data_size);
+
+ /* tcp header */
+ tcp->th_dport = htons(dst_port);
+--- a/sendudp.c
++++ b/sendudp.c
+@@ -46,7 +46,7 @@ void send_udp(void)
+ memcpy(&pseudoheader->saddr, &local.sin_addr.s_addr, 4);
+ memcpy(&pseudoheader->daddr, &remote.sin_addr.s_addr, 4);
+ pseudoheader->protocol = 17; /* udp */
+- pseudoheader->lenght = htons(packet_size);
++ pseudoheader->length = htons(packet_size);
+
+ /* udp header */
+ udp->uh_dport = htons(dst_port);
+--- a/sendicmp.c
++++ b/sendicmp.c
+@@ -243,7 +243,7 @@ void send_icmp_other(void)
+ memcpy(&pseudoheader->saddr, &icmp_ip_src.sin_addr.s_addr, 4);
+ memcpy(&pseudoheader->daddr, &icmp_ip_dst.sin_addr.s_addr, 4);
+ pseudoheader->protocol = icmp_ip.protocol;
+- pseudoheader->lenght = icmp_ip.tot_len;
++ pseudoheader->length = icmp_ip.tot_len;
+ icmp_udp->uh_sport = htons(icmp_ip_srcport);
+ icmp_udp->uh_dport = htons(icmp_ip_dstport);
+ icmp_udp->uh_ulen = htons(UDPHDR_SIZE + udp_data_len);
+--- a/hping2.h
++++ b/hping2.h
+@@ -138,7 +138,7 @@
+
+ /* fragmentation defines */
+ #define MF ((unsigned short)0x2000) /* more fragments */
+-#define DF ((unsigned short)0x4000) /* dont fragment */
++#define DF ((unsigned short)0x4000) /* don't fragment */
+ #define NF ((unsigned short)0x0000) /* no more fragments */
+
+ /* ip options defines */
+@@ -341,7 +341,7 @@ struct pseudohdr
+ __u32 daddr;
+ __u8 zero;
+ __u8 protocol;
+- __u16 lenght;
++ __u16 length;
+ };
+
+ #define PSEUDOHDR_SIZE sizeof(struct pseudohdr)
+--- a/ars.c
++++ b/ars.c
+@@ -698,7 +698,7 @@ int ars_udptcp_cksum(struct ars_packet *
+ memcpy(&pseudo.daddr, &ip->daddr, 4);
+ pseudo.protocol = (pkt->p_layer[layer].l_type == ARS_TYPE_TCP)
+ ? ARS_IPPROTO_TCP : ARS_IPPROTO_UDP;
+- pseudo.lenght = htons(ars_relative_size(pkt, layer));
++ pseudo.length = htons(ars_relative_size(pkt, layer));
+
+ /* Finally do the checksum */
+ ars_multi_cksum(&mc, ARS_MC_INIT, NULL, 0);
+--- a/datahandler.c
++++ b/datahandler.c
+@@ -26,7 +26,7 @@ void data_handler(char *data, int data_s
+ }
+
+ if (opt_sign) {
+- memcpy(data, sign, signlen); /* lenght pre-checked */
++ memcpy(data, sign, signlen); /* length pre-checked */
+ data+=signlen;
+ data_size-=signlen;
+ }
+--- a/ars.h
++++ b/ars.h
+@@ -67,7 +67,7 @@
+ #define ARS_MAX_IP_SIZE 65535
+
+ #define ARS_IP_MF ((unsigned short)0x2000) /* more fragments */
+-#define ARS_IP_DF ((unsigned short)0x4000) /* dont fragment */
++#define ARS_IP_DF ((unsigned short)0x4000) /* don't fragment */
+ #define ARS_IP_RF ((unsigned short)0x8000) /* reserved fragment flag */
+
+ #define ARS_IPOPT_COPY 0x80
+@@ -308,7 +308,7 @@ struct ars_pseudohdr
+ __u32 daddr;
+ __u8 zero;
+ __u8 protocol;
+- __u16 lenght;
++ __u16 length;
+ };
+
+ /* The IGRP header structure */
+--- a/sendip_handler.c
++++ b/sendip_handler.c
+@@ -35,7 +35,7 @@ void send_ip_handler(char *packet, unsig
+ unsigned short fragment_flag = 0;
+
+ if (opt_mf) fragment_flag |= MF; /* more fragments */
+- if (opt_df) fragment_flag |= DF; /* dont fragment */
++ if (opt_df) fragment_flag |= DF; /* don't fragment */
+ send_ip((char*)&local.sin_addr,
+ (char*)&remote.sin_addr,
+ packet, size, fragment_flag, ip_frag_offset,
--- /dev/null
+Subject: Fix incorrect data size check for UDP.
+Author: Luca Bigliardi <shammash@artha.org>
+Bug-Debian; http://bugs.debian.org/546590
+Last-Update: 2009-09-19
+
+--- a/parseoptions.c
++++ b/parseoptions.c
+@@ -606,9 +606,12 @@ int parse_options(int argc, char **argv)
+ if (opt_numeric == TRUE) opt_gethost = FALSE;
+
+ /* some error condition */
+- if (data_size+IPHDR_SIZE+TCPHDR_SIZE > 65535) {
++ if (data_size+IPHDR_SIZE+
++ (opt_udpmode?UDPHDR_SIZE:TCPHDR_SIZE) > 65535) {
+ printf("Option error: sorry, data size must be <= %lu\n",
+- (unsigned long)(65535-IPHDR_SIZE+TCPHDR_SIZE));
++ (unsigned long)(65535-(IPHDR_SIZE+
++ (opt_udpmode?UDPHDR_SIZE:TCPHDR_SIZE)))
++ );
+ exit(1);
+ }
+ else if (count <= 0 && count != -1) {
--- /dev/null
+Subject: Add tcp-mss support.
+Origin: http://cvsweb.netbsd.org/bsdweb.cgi/pkgsrc/net/hping/patches/
+Bug-Debian: http://bugs.debian.org/409391
+Last-Update: 2009-09-19
+
+--- a/main.c
++++ b/main.c
+@@ -44,7 +44,8 @@ unsigned int
+ signlen,
+ lsr_length = 0,
+ ssr_length = 0,
+- tcp_ack;
++ tcp_ack,
++ tcp_mss;
+
+
+ unsigned short int
+@@ -95,6 +96,7 @@ int
+ opt_tcpexitcode = FALSE,
+ opt_badcksum = FALSE,
+ opt_tr_keep_ttl = FALSE,
++ opt_tcp_mss = FALSE,
+ opt_tcp_timestamp = FALSE,
+ opt_clock_skew = FALSE,
+ cs_window = DEFAULT_CS_WINDOW,
+--- a/globals.h
++++ b/globals.h
+@@ -32,7 +32,8 @@ extern unsigned int
+ tcp_seqnum,
+ set_ack,
+ ip_header_length,
+- tcp_ack;
++ tcp_ack,
++ tcp_mss;
+
+ extern unsigned short int
+ data_size;
+@@ -77,6 +78,7 @@ extern int opt_debug,
+ opt_tcpexitcode,
+ opt_badcksum,
+ opt_tr_keep_ttl,
++ opt_tcp_mss,
+ opt_tcp_timestamp,
+ opt_clock_skew,
+ cs_window,
+--- a/parseoptions.c
++++ b/parseoptions.c
+@@ -31,7 +31,7 @@ enum { OPT_COUNT, OPT_INTERVAL, OPT_NUME
+ OPT_RROUTE, OPT_IPPROTO, OPT_ICMP_IPVER, OPT_ICMP_IPHLEN,
+ OPT_ICMP_IPLEN, OPT_ICMP_IPID, OPT_ICMP_IPPROTO, OPT_ICMP_CKSUM,
+ OPT_ICMP_TS, OPT_ICMP_ADDR, OPT_TCPEXITCODE, OPT_FAST, OPT_TR_KEEP_TTL,
+- OPT_TCP_TIMESTAMP, OPT_TR_STOP, OPT_TR_NO_RTT, OPT_ICMP_HELP,
++ OPT_TCP_TIMESTAMP, OPT_TCP_MSS, OPT_TR_STOP, OPT_TR_NO_RTT, OPT_ICMP_HELP,
+ OPT_RAND_DEST, OPT_RAND_SOURCE, OPT_LSRR, OPT_SSRR, OPT_ROUTE_HELP,
+ OPT_ICMP_IPSRC, OPT_ICMP_IPDST, OPT_ICMP_SRCPORT, OPT_ICMP_DSTPORT,
+ OPT_ICMP_GW, OPT_FORCE_ICMP, OPT_APD_SEND, OPT_SCAN, OPT_FASTER,
+@@ -125,6 +125,7 @@ static struct ago_optlist hping_optlist[
+ { '\0', "force-icmp", OPT_FORCE_ICMP, AGO_NOARG },
+ { '\0', "beep", OPT_BEEP, AGO_NOARG },
+ { '\0', "flood", OPT_FLOOD, AGO_NOARG },
++ { '\0', "tcp-mss", OPT_TCP_MSS, AGO_NEEDARG|AGO_EXCEPT0 },
+ { '\0', "clock-skew", OPT_CLOCK_SKEW, AGO_NOARG },
+ { '\0', "clock-skew-win", OPT_CS_WINDOW, AGO_NEEDARG},
+ { '\0', "clock-skew-win-shift", OPT_CS_WINDOW_SHIFT, AGO_NEEDARG},
+@@ -561,6 +562,10 @@ int parse_options(int argc, char **argv)
+ case OPT_FLOOD:
+ opt_flood = TRUE;
+ break;
++ case OPT_TCP_MSS:
++ opt_tcp_mss = TRUE;
++ tcp_mss = strtoul(ago_optarg, NULL, 0);
++ break;
+ case OPT_CLOCK_SKEW:
+ opt_tcp_timestamp = TRUE;
+ opt_clock_skew = TRUE;
+--- a/usage.c
++++ b/usage.c
+@@ -87,6 +87,7 @@ void show_usage(void)
+ " -X --xmas set X unused flag (0x40)\n"
+ " -Y --ymas set Y unused flag (0x80)\n"
+ " --tcpexitcode use last tcp->th_flags as exit code\n"
++" --tcp-mss enable the TCP MSS option with the given value\n"
+ " --tcp-timestamp enable the TCP timestamp option to guess the HZ/uptime\n"
+ "Clock skew detection\n"
+ " --clock-skew enable clock skew detection. Try with -S against open port\n"
+--- a/sendtcp.c
++++ b/sendtcp.c
+@@ -28,10 +28,12 @@ void send_tcp(void)
+ char *packet, *data;
+ struct mytcphdr *tcp;
+ struct pseudohdr *pseudoheader;
+- unsigned char *tstamp;
++ unsigned char *opts;
+
++ if (opt_tcp_mss)
++ tcp_opt_size += 4;
+ if (opt_tcp_timestamp)
+- tcp_opt_size = 12;
++ tcp_opt_size += 12;
+
+ packet_size = TCPHDR_SIZE + tcp_opt_size + data_size;
+ packet = malloc(PSEUDOHDR_SIZE + packet_size);
+@@ -41,7 +43,7 @@ void send_tcp(void)
+ }
+ pseudoheader = (struct pseudohdr*) packet;
+ tcp = (struct mytcphdr*) (packet+PSEUDOHDR_SIZE);
+- tstamp = (unsigned char*) (packet+PSEUDOHDR_SIZE+TCPHDR_SIZE);
++ opts = (unsigned char*) (packet+PSEUDOHDR_SIZE+TCPHDR_SIZE);
+ data = (char*) (packet+PSEUDOHDR_SIZE+TCPHDR_SIZE+tcp_opt_size);
+
+ memset(packet, 0, PSEUDOHDR_SIZE+packet_size);
+@@ -64,14 +66,24 @@ void send_tcp(void)
+ tcp->th_win = htons(src_winsize);
+ tcp->th_flags = tcp_th_flags;
+
++ /* tcp MSS option */
++ if (opt_tcp_mss) {
++ opts[0] = 2;
++ opts[1] = 4; /* 4 bytes, kind+len+MSS */
++ opts[2] = tcp_mss >> 8;
++ opts[3] = tcp_mss & 0xff;
++ opts += 4;
++ }
++
+ /* tcp timestamp option */
+ if (opt_tcp_timestamp) {
+ __u32 randts = rand() ^ (rand() << 16);
+- tstamp[0] = tstamp[1] = 1; /* NOOP */
+- tstamp[2] = 8;
+- tstamp[3] = 10; /* 10 bytes, kind+len+T1+T2 */
+- memcpy(tstamp+4, &randts, 4); /* random */
+- memset(tstamp+8, 0, 4); /* zero */
++ opts[0] = opts[1] = 1; /* NOOP */
++ opts[2] = 8;
++ opts[3] = 10; /* 10 bytes, kind+len+T1+T2 */
++ memcpy(opts+4, &randts, 4); /* random */
++ memset(opts+8, 0, 4); /* zero */
++ opts += 12;
+ }
+
+ /* data */
+--- a/docs/hping3.8
++++ b/docs/hping3.8
+@@ -98,6 +98,8 @@ hping2 \- send (almost) arbitrary TCP/IP
+ ] [
+ .B \-\-tcpexitcode
+ ] [
++.B \-\-tcp-mss
++] [
+ .B \-\-tcp-timestamp
+ ] [
+ .B \-\-tr-stop
+@@ -510,6 +512,9 @@ numbers are predictable.
+ .I -b --badcksum
+ Send packets with a bad UDP/TCP checksum.
+ .TP
++.I --tcp-mss
++Enable the TCP MSS option and set it to the given value.
++.TP
+ .I --tcp-timestamp
+ Enable the TCP timestamp option, and try to guess the timestamp update
+ frequency and the remote system uptime.
+--- a/docs/french/hping2-fr.8
++++ b/docs/french/hping2-fr.8
+@@ -99,6 +99,8 @@ hping2 \- envoie des paquets TCP/IP (pre
+ ] [
+ .B \-\-tcpexitcode
+ ] [
++.B \-\-tcp-mss
++] [
+ .B \-\-tcp-timestamp
+ ] [
+ .B \-\-tr-stop
+@@ -538,6 +540,9 @@ pouvez le voir les numéros de séquence d
+ .I -b --badcksum
+ Envoie des paquets avec une mauvaise somme de contrôle UDP/TCP
+ .TP
++.I --tcp-mss
++Active l'option TCP MSS et la fixe avec la valeur donnée.
++.TP
+ .I --tcp-timestamp
+ Active l'option TCP timestamp, et essaye de deviner la fréquence de mise Ã
+ jour du timestamp et l'uptime du système distant.
--- /dev/null
+Author: Michael Tautschnig <mt@debian.org>
+Subject: Conflicting types for variable ip_optlen
+ - globals.h: extern char ip_optlen;
+ - main.c: unsigned ip_optlen;
+ Undefined behaviour if the value of ip_optlen exceeds 127 for
+ any architecture with signed char type.
+Bug-Debian: https://bugs.debian.org/688458
+
+--- a/main.c
++++ b/main.c
+@@ -156,7 +156,7 @@ unsigned char
+ lsr [255] = {0},
+ ssr [255] = {0};
+
+-unsigned
++char
+ ip_optlen = 0;
+
+ struct sockaddr_in
--- /dev/null
+Description: Fixes declaration of global variable
+Author: Marcio de Souza Oliveira <marciosouza@debian.org>
+Last-Update: 2020-09-02
+--- a/hping2.h
++++ b/hping2.h
+@@ -357,7 +357,7 @@ struct delaytable_element {
+ int status;
+ };
+
+-volatile struct delaytable_element delaytable[TABLESIZE];
++extern volatile struct delaytable_element delaytable[TABLESIZE];
+
+ /* protos */
+ void nop(void); /* nop */
--- /dev/null
+Description: Fixes replies when icmp-ipid is used
+Author: David Eisner <deisner@gmail.com>
+Last-Update: 2015-12-18
+--- a/waitpacket.c
++++ b/waitpacket.c
+@@ -244,7 +244,8 @@ int recv_icmp(void *packet, size_t size)
+ if ((icmp.type == ICMP_ECHOREPLY ||
+ icmp.type == ICMP_TIMESTAMPREPLY ||
+ icmp.type == ICMP_ADDRESSREPLY) &&
+- icmp.un.echo.id == (getpid() & 0xffff))
++ icmp.un.echo.id == ((icmp_ip_id == DEFAULT_ICMP_IP_ID) ?
++ getpid() & 0xffff : icmp_ip_id) )
+ {
+ int icmp_seq = icmp.un.echo.sequence;
+ int status;
--- /dev/null
+--- a/getifname.c
++++ b/getifname.c
+@@ -59,7 +59,7 @@ int get_if_name(void)
+ known_output_if = 1;
+ if (opt_debug)
+ printf("DEBUG: Output interface address: %s\n",
+- inet_ntoa(sa.sin_addr));
++ inet_ntoa(output_if_addr.sin_addr));
+ } else {
+ fprintf(stderr, "Warning: Unable to guess the output "
+ "interface\n");
PKG_NAME:=https-dns-proxy
PKG_VERSION:=2021-11-22
-PKG_RELEASE:=3
+PKG_RELEASE:=7
PKG_SOURCE_PROTO:=git
PKG_SOURCE_URL:=https://github.com/aarond10/https_dns_proxy/
config main 'config'
option update_dnsmasq_config '*'
+ option canary_domains_icloud '1'
+ option canary_domains_mozilla '1'
option force_dns '1'
list force_dns_port '53'
list force_dns_port '853'
readonly _FAIL_='\033[0;31m\xe2\x9c\x97\033[0m'
readonly PROG=/usr/sbin/https-dns-proxy
readonly DEFAULT_BOOTSTRAP='1.1.1.1,1.0.0.1,2606:4700:4700::1111,2606:4700:4700::1001,8.8.8.8,8.8.4.4,2001:4860:4860::8888,2001:4860:4860::8844'
-readonly canaryDomains='use-application-dns.net'
-dnsmasqConfig=''; forceDNS=''; forceDNSPorts='';
+readonly canaryDomainsMozilla='use-application-dns.net'
+readonly canaryDomainsiCloud='mask.icloud.com mask-h2.icloud.com'
+canaryDomains=
+canary_domains_icloud=
+canary_domains_mozilla=
+update_dnsmasq_config=
+force_dns=
+force_dns_port=
str_contains() { [ -n "$1" ] &&[ -n "$2" ] && [ "${1//$2}" != "$1" ]; }
is_mac_address() { expr "$1" : '[0-9A-F][0-9A-F]:[0-9A-F][0-9A-F]:[0-9A-F][0-9A-F]:[0-9A-F][0-9A-F]:[0-9A-F][0-9A-F]:[0-9A-F][0-9A-F]$' >/dev/null; }
output_fail() { output "$_FAIL_"; }
output_failn() { output "${_FAIL_}\\n"; }
uci_add_list_if_new() {
- local key="$1" value="$2" i
- if [ -z "$value" ]; then
- value="${key#*=}"
- key="${key%=*}"
- fi
- [ -n "$key" ] && [ -n "$value" ] || return 1
- for i in $(uci -q get "$key"); do
- [ "$i" = "$value" ] && return 0
+ local PACKAGE="$1"
+ local CONFIG="$2"
+ local OPTION="$3"
+ local VALUE="$4"
+ local i
+ [ -n "$PACKAGE" ] && [ -n "$CONFIG" ] && [ -n "$OPTION" ] && [ -n "$VALUE" ] || return 1
+ for i in $(uci_get "$PACKAGE" "$CONFIG" "$OPTION"); do
+ [ "$i" = "$VALUE" ] && return 0
done
- uci -q add_list "${key}=${value}"
+ uci_add_list "$PACKAGE" "$CONFIG" "$OPTION" "$VALUE"
+}
+uci_changes() {
+ local PACKAGE="$1"
+ local CONFIG="$2"
+ local OPTION="$3"
+ /sbin/uci ${UCI_CONFIG_DIR:+-c $UCI_CONFIG_DIR} changes "$PACKAGE${CONFIG:+.$CONFIG}${OPTION:+.$OPTION}"
}
dnsmasq_restart() { [ -x /etc/init.d/dnsmasq ] || return 0; /etc/init.d/dnsmasq restart >/dev/null 2>&1; }
procd_set_param stderr 1
procd_set_param stdout 1
procd_set_param respawn
- if [ "$forceDNS" -ne 0 ]; then
+ if [ "$force_dns" -ne 0 ]; then
procd_open_data
json_add_array firewall
- for p in $forceDNSPorts; do
+ for p in $force_dns_port; do
if netstat -tuln | grep 'LISTEN' | grep ":${p}" >/dev/null 2>&1 || [ "$p" = '53' ]; then
json_add_object ''
json_add_string type redirect
if [ "$?" ]; then
config_get listen_addr "$cfg" 'listen_addr' '127.0.0.1'
config_get listen_port "$cfg" 'listen_port' "$port"
- if [ "$dnsmasqConfig" = '*' ]; then
+ if [ "$update_dnsmasq_config" = '*' ]; then
config_load 'dhcp'
config_foreach dnsmasq_doh_server 'dnsmasq' 'add' "${listen_addr}" "${listen_port}"
- elif [ -n "$dnsmasqConfig" ]; then
- for i in $dnsmasqConfig; do
- if [ -n "$(uci -q get "dhcp.@dnsmasq[$i]")" ]; then
+ elif [ -n "$update_dnsmasq_config" ]; then
+ for i in $update_dnsmasq_config; do
+ if [ -n "$(uci_get 'dhcp' "@dnsmasq[$i]")" ]; then
dnsmasq_doh_server "@dnsmasq[$i]" 'add' "${listen_addr}" "${listen_port}"
- elif [ -n "$(uci -q get "dhcp.${i}")" ]; then
+ elif [ -n "$(uci_get 'dhcp' "$i")" ]; then
dnsmasq_doh_server "${i}" 'add' "${listen_addr}" "${listen_port}"
fi
done
fi
output_ok
port="$((port+1))"
- forceDNS=0
+ force_dns=0
else
output_fail
fi
local port=5053
output "Starting $serviceName "
config_load "$packageName"
- config_get dnsmasqConfig 'config' 'update_dnsmasq_config' '*'
- config_get_bool forceDNS 'config' 'force_dns' '1'
- config_get forceDNSPorts 'config' 'force_dns_port' '53 853'
+ config_get update_dnsmasq_config 'config' 'update_dnsmasq_config' '*'
+ config_get_bool canary_domains_icloud 'config' 'canary_domains_icloud' '1'
+ config_get_bool canary_domains_mozilla 'config' 'canary_domains_mozilla' '1'
+ config_get_bool force_dns 'config' 'force_dns' '1'
+ config_get force_dns_port 'config' 'force_dns_port' '53 853'
+ if [ "$canary_domains_icloud" -ne 0 ]; then
+ canaryDomains="${canaryDomains:+$canaryDomains }${canaryDomainsiCloud}"
+ fi
+ if [ "$canary_domains_mozilla" -ne 0 ]; then
+ canaryDomains="${canaryDomains:+$canaryDomains }${canaryDomainsMozilla}"
+ fi
dhcp_backup 'create'
config_load "$packageName"
config_foreach start_instance "$packageName"
- if [ -n "$(uci -q changes dhcp)" ]; then
- uci -q commit dhcp
+ if [ -n "$(uci_changes dhcp)" ]; then
+ uci_commit 'dhcp'
dnsmasq_restart
fi
output "\\n"
local s=0
output "Stopping $serviceName "
config_load "$packageName"
- config_get dnsmasqConfig 'config' 'update_dnsmasq_config' '*'
+ config_get update_dnsmasq_config 'config' 'update_dnsmasq_config' '*'
dhcp_backup 'restore'
- if [ -n "$(uci -q changes dhcp)" ]; then
- uci -q commit dhcp
+ if [ -n "$(uci_changes dhcp)" ]; then
+ uci_commit 'dhcp'
dnsmasq_restart || s=1
fi
# shellcheck disable=SC2015
local cfg="$1" param="$2" address="${3:-127.0.0.1}" port="$4" i
case "$param" in
add)
- if [ "$forceDNS" -ne 0 ]; then
+ if [ "$force_dns" -ne 0 ]; then
for i in $canaryDomains; do
- uci_add_list_if_new "dhcp.${cfg}.server" "/${i}/"
+ uci_add_list_if_new 'dhcp' "$cfg" 'server' "/${i}/"
done
fi
case $address in
0.0.0.0|::ffff:0.0.0.0) address='127.0.0.1';;
::) address='::1';;
esac
- uci_add_list_if_new "dhcp.${cfg}.server" "${address}#${port}"
+ uci_add_list_if_new 'dhcp' "$cfg" 'server' "${address}#${port}"
;;
remove)
eval "$(ubus call service list "{ 'verbose': true, 'name': '$packageName' }" | jsonfilter -F '# ' -e 'TUPLES=@[*].instances[*].command[4,6]')"
for i in $TUPLES; do
- uci -q del_list "dhcp.${cfg}.server=${i}"
+ uci_remove_list 'dhcp' "$cfg" 'server' "$i"
+ done
+ for i in $canaryDomains; do
+ uci_remove_list 'dhcp' "$cfg" 'server' "/${i}/"
done
;;
esac
dnsmasq_create_server_backup() {
local cfg="$1" i
- uci -q get "dhcp.${cfg}" >/dev/null || return 1
- if ! uci -q get "dhcp.${cfg}.doh_backup_noresolv" >/dev/null; then
- if [ -z "$(uci -q get "dhcp.${cfg}.noresolv")" ]; then
- uci -q set "dhcp.${cfg}.noresolv=1"
- uci -q set "dhcp.${cfg}.doh_backup_noresolv=-1"
- elif [ "$(uci -q get "dhcp.${cfg}.noresolv")" != "1" ]; then
- uci -q set "dhcp.${cfg}.noresolv=1"
- uci -q set "dhcp.${cfg}.doh_backup_noresolv=0"
+ [ -n "$(uci_get 'dhcp' "$cfg")" ] || return 1
+ if [ -z "$(uci_get 'dhcp' "$cfg" 'doh_backup_noresolv')" ]; then
+ if [ -z "$(uci_get 'dhcp' "$cfg" 'noresolv')" ]; then
+ uci_set 'dhcp' "$cfg" 'doh_backup_noresolv' '-1'
+ else
+ uci_set 'dhcp' "$cfg" 'doh_backup_noresolv' "$(uci_get 'dhcp' "$cfg" noresolv)"
fi
+ uci_set 'dhcp' "$cfg" 'noresolv' 1
fi
- if ! uci -q get "dhcp.${cfg}.doh_backup_server" >/dev/null; then
- if [ -z "$(uci -q get "dhcp.${cfg}.server")" ]; then
- uci -q add_list "dhcp.${cfg}.doh_backup_server="
+ if [ -z "$(uci_get 'dhcp' "$cfg" 'doh_backup_server')" ]; then
+ if [ -z "$(uci_get 'dhcp' "$cfg" 'server')" ]; then
+ uci_add_list 'dhcp' "$cfg" 'doh_backup_server' ""
fi
- for i in $(uci -q get "dhcp.${cfg}.server"); do
- uci -q add_list "dhcp.${cfg}.doh_backup_server=$i"
+ for i in $(uci_get 'dhcp' "$cfg" 'server'); do
+ uci_add_list 'dhcp' "$cfg" 'doh_backup_server' "$i"
if [ "$i" = "$(echo "$i" | tr -d /\#)" ]; then
- uci -q del_list "dhcp.${cfg}.server=$i"
+ uci_remove_list 'dhcp' "$cfg" 'server' "$i"
fi
done
fi
dnsmasq_restore_server_backup() {
local cfg="$1" i
- uci -q get "dhcp.${cfg}" >/dev/null || return 0
- if uci -q get "dhcp.${cfg}.doh_backup_noresolv" >/dev/null; then
- if [ "$(uci -q get "dhcp.${cfg}.doh_backup_noresolv")" = "0" ]; then
- uci -q set "dhcp.${cfg}.noresolv=0"
+ [ -n "$(uci_get 'dhcp' "$cfg")" ] || return 0
+ if [ -n "$(uci_get 'dhcp' "$cfg" 'doh_backup_noresolv')" ]; then
+ if [ "$(uci_get 'dhcp' "$cfg" 'doh_backup_noresolv')" = "-1" ]; then
+ uci_remove 'dhcp' "$cfg" 'noresolv'
else
- uci -q del "dhcp.${cfg}.noresolv"
+ uci_set 'dhcp' "$cfg" 'noresolv' "$(uci_get 'dhcp' "$cfg" 'doh_backup_noresolv')"
fi
- uci -q del "dhcp.${cfg}.doh_backup_noresolv"
+ uci_remove 'dhcp' "$cfg" 'doh_backup_noresolv'
fi
- if uci -q get "dhcp.${cfg}.doh_backup_server" >/dev/null; then
+ if [ -n "$(uci_get 'dhcp' "$cfg" 'doh_backup_server')" ]; then
dnsmasq_doh_server "$cfg" 'remove'
- for i in $(uci -q get "dhcp.${cfg}.doh_backup_server"); do
- uci_add_list_if_new "dhcp.${cfg}.server" "$i"
+ for i in $(uci_get 'dhcp' "$cfg" 'doh_backup_server'); do
+ uci_add_list_if_new 'dhcp' "$cfg" 'server' "$i"
done
- uci -q del "dhcp.${cfg}.doh_backup_server"
+ uci_remove 'dhcp' "$cfg" 'doh_backup_server'
fi
}
config_load 'dhcp'
case "$1" in
create)
- if [ "$dnsmasqConfig" = "*" ]; then
+ if [ "$update_dnsmasq_config" = "*" ]; then
config_foreach dnsmasq_create_server_backup 'dnsmasq'
- elif [ -n "$dnsmasqConfig" ]; then
- for i in $dnsmasqConfig; do
- if [ -n "$(uci -q get "dhcp.@dnsmasq[$i]")" ]; then
+ elif [ -n "$update_dnsmasq_config" ]; then
+ for i in $update_dnsmasq_config; do
+ if [ -n "$(uci_get 'dhcp' "@dnsmasq[$i]")" ]; then
dnsmasq_create_server_backup "@dnsmasq[$i]"
- elif [ -n "$(uci -q get "dhcp.${i}")" ]; then
+ elif [ -n "$(uci_get 'dhcp' "$i")" ]; then
dnsmasq_create_server_backup "$i"
fi
done
create_empty_zone "$mynet.in-addr.arpa"
done
+ local need_reload=
+
+ cp -p $conf_local_file ${conf_local_file}_
+ cmp -s $conf_local_file ${conf_local_file}_ || need_reload=1
+ rm -f ${conf_local_file}_
+
cat <<EOF > $conf_local_file
zone "$domain" {
type master;
EOF
done
- /etc/init.d/named reload
+ [ -n "$need_reload" ] && /etc/init.d/named reload
sleep 1
cat <<EOF
include $(TOPDIR)/rules.mk
PKG_NAME:=knot-resolver
-PKG_VERSION:=5.5.1
+PKG_VERSION:=5.5.3
PKG_RELEASE:=1
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.xz
PKG_SOURCE_URL:=https://secure.nic.cz/files/knot-resolver
-PKG_HASH:=9bad1edfd6631446da2d2331bd869887d7fe502f6eeaf62b2e43e2c113f02b6d
+PKG_HASH:=a38f57c68b7d237d662784d8406e6098aad66a148f44dcf498d1e9664c5fed2d
PKG_MAINTAINER:=Jan Pavlinec <jan.pavlinec1@gmail.com>
PKG_LICENSE:=GPL-3.0-later
This is just workaround. It should be removed after proper fixing in upstream.
-Patch fixes problem with lmdb pkg config. It also adds workaround for get_option
+Patch fixes problem with lmdb pkg confing. It also adds workaround for get_option
which is now not propagated in OpenWrt meson host package.
--- a/meson.build
+++ b/meson.build
It reintroduces a fix which enables policy related hack (knot/knot-resolver#205 (comment 94566) )
--- a/modules/policy/policy.lua
+++ b/modules/policy/policy.lua
-@@ -1098,7 +1098,7 @@ policy.layer = {
+@@ -1093,7 +1093,7 @@ policy.layer = {
if bit.band(state, bit.bor(kres.FAIL, kres.DONE)) ~= 0 then return state end
local qry = req:initial() -- same as :current() but more descriptive
return policy.evaluate(policy.rules, req, qry, state)
include $(TOPDIR)/rules.mk
PKG_NAME:=lighttpd
-PKG_VERSION:=1.4.66
-PKG_RELEASE:=1
+PKG_VERSION:=1.4.67
+PKG_RELEASE:=2
# release candidate ~rcX testing; remove for release
-#PKG_BUILD_DIR:=$(BUILD_DIR)/$(PKG_NAME)-1.4.66
+#PKG_BUILD_DIR:=$(BUILD_DIR)/$(PKG_NAME)-1.4.67
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.xz
PKG_SOURCE_URL:=https://download.lighttpd.net/lighttpd/releases-1.4.x
-PKG_HASH:=47ac6e60271aa0196e65472d02d019556dc7c6d09df3b65df2c1ab6866348e3b
+PKG_HASH:=7e04d767f51a8d824b32e2483ef2950982920d427d1272ef4667f49d6f89f358
PKG_MAINTAINER:=W. Michael Petullo <mike@flyn.org>
PKG_LICENSE:=BSD-3-Clause
$(eval $(call BuildPlugin,alias,Directory alias,,30))
$(eval $(call BuildPlugin,cgi,CGI,,30))
$(eval $(call BuildPlugin,deflate,Compress dynamic output,+PACKAGE_lighttpd-mod-deflate:zlib,30))
-$(eval $(call BuildPlugin,evasive,Evasive,,30))
$(eval $(call BuildPlugin,evhost,Enhanced Virtual-Hosting,,30))
$(eval $(call BuildPlugin,expire,Expire,,30))
$(eval $(call BuildPlugin,extforward,Extract client,,30))
$(eval $(call BuildPlugin,rewrite,URL rewriting,+PACKAGE_lighttpd-mod-rewrite:libpcre2,30))
$(eval $(call BuildPlugin,rrdtool,RRDtool,,30))
$(eval $(call BuildPlugin,scgi,SCGI,,30))
-$(eval $(call BuildPlugin,secdownload,Secure and fast download,+PACKAGE_lighttpd-mod-secdownload:libnettle,30))
$(eval $(call BuildPlugin,setenv,Environment variable setting,,30))
$(eval $(call BuildPlugin,simple_vhost,Simple virtual hosting,,30))
$(eval $(call BuildPlugin,sockproxy,sockproxy,,30))
$(eval $(call BuildPlugin,ssi,SSI,,30))
$(eval $(call BuildPlugin,staticfile,staticfile,,30))
$(eval $(call BuildPlugin,status,Server status display,,30))
-$(eval $(call BuildPlugin,uploadprogress,Upload Progress,,30))
$(eval $(call BuildPlugin,userdir,User directory,,30))
-$(eval $(call BuildPlugin,usertrack,User tracking,+PACKAGE_lighttpd-mod-usertrack:libnettle,30))
$(eval $(call BuildPlugin,vhostdb,Virtual Host Database,,30))
$(eval $(call BuildPlugin,vhostdb_dbi,Virtual Host Database (DBI),lighttpd-mod-vhostdb +PACKAGE_lighttpd-mod-vhostdb_dbi:libdbi,30))
$(eval $(call BuildPlugin,vhostdb_ldap,Virtual Host Database (LDAP),lighttpd-mod-vhostdb +PACKAGE_lighttpd-mod-vhostdb_ldap:libopenldap,30))
include $(TOPDIR)/rules.mk
PKG_NAME:=mdio-tools
-PKG_RELEASE:=2
+PKG_RELEASE:=1
PKG_SOURCE_URL:=https://github.com/wkz/mdio-tools
PKG_SOURCE_PROTO:=git
-PKG_SOURCE_VERSION:=1.1.1
-PKG_MIRROR_HASH:=aec5a5d8031de166a5ff38dc5442cfbf5de002b11c0a256ea4a03ae047040d03
+PKG_SOURCE_VERSION:=1.2.0
+PKG_MIRROR_HASH:=396fd48662e0f4182b9c9db6e34d71dafd9e557d5021e8dc28edfc99d6ebc387
PKG_FIXUP:=autoreconf
+++ /dev/null
-From 7da5b168152987806e295ed3b7e97b77ffa93cb9 Mon Sep 17 00:00:00 2001
-From: Robert Marko <robert.marko@sartura.hr>
-Date: Tue, 7 Jun 2022 13:34:40 +0200
-Subject: [PATCH] mdio: bench: make time_t prints portable
-
-Using %ld to print time_t will work fine on 64 bit platforms, however
-now musl libc defines time_t to be 64 even on 32bit platforms.
-
-This will make the compilation fail with:
-mdio.c: In function 'mdio_common_bench_cb':
-mdio.c:555:27: error: format '%ld' expects argument of type 'long int', but argument 2 has type 'time_t' {aka 'long long int'} [-Werror=format=]
- 555 | printf("%ld.%2.2lds\n", end.tv_sec, end.tv_nsec / 10000000);
- | ~~^ ~~~~~~~~~~
- | | |
- | long int time_t {aka long long int}
- | %lld
-
-So, replace the %ld in prints with the PRId64 from inttypes.h and cast
-tv_sec and tv_nsec to int64_t.
-
-This makes it compile and work on 32 bit ARMv7 fine.
-
-Signed-off-by: Robert Marko <robert.marko@sartura.hr>
----
- src/mdio/mdio.c | 8 ++++----
- 1 file changed, 4 insertions(+), 4 deletions(-)
-
---- a/src/mdio/mdio.c
-+++ b/src/mdio/mdio.c
-@@ -552,13 +552,13 @@ int mdio_common_bench_cb(uint32_t *data,
- printf("Performed 1000 reads in ");
-
- if (end.tv_sec)
-- printf("%ld.%2.2lds\n", end.tv_sec, end.tv_nsec / 10000000);
-+ printf("%"PRId64".%2.2"PRId64"s\n", (int64_t)end.tv_sec, (int64_t)end.tv_nsec / 10000000);
- else if (end.tv_nsec > 1000000)
-- printf("%ldms\n", end.tv_nsec / 1000000);
-+ printf("%"PRId64"ms\n", (int64_t)end.tv_nsec / 1000000);
- else if (end.tv_nsec > 1000)
-- printf("%ldus\n", end.tv_nsec / 1000);
-+ printf("%"PRId64"us\n", (int64_t)end.tv_nsec / 1000);
- else
-- printf("%ldns\n", end.tv_nsec);
-+ printf("%"PRId64"ns\n", (int64_t)end.tv_nsec);
-
- return err;
- }
include $(TOPDIR)/rules.mk
PKG_NAME:=mosquitto
-PKG_VERSION:=2.0.13
+PKG_VERSION:=2.0.15
PKG_RELEASE:=1
PKG_LICENSE:=EPL-2.0
PKG_LICENSE_FILES:=LICENSE.txt
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
PKG_SOURCE_URL:=https://mosquitto.org/files/source/
-PKG_HASH:=9cf83a558f1588e8798edd8647b1632af88e88ef2cd45054e903c517df5bd2cd
+PKG_HASH:=4735b1d32e3f91c7a8896741d88a3022e89730a1ee897946decfa0df27039ac6
include $(INCLUDE_DIR)/package.mk
option use_uci 0
config mosquitto mosquitto
+
+config persistence persistence
include $(TOPDIR)/rules.mk
PKG_NAME:=mwan3
-PKG_VERSION:=2.11.1
-PKG_RELEASE:=2
+PKG_VERSION:=2.11.2
+PKG_RELEASE:=3
PKG_MAINTAINER:=Florian Eckert <fe@dev.tdt.de>, \
Aaron Goodman <aaronjg@alumni.stanford.edu>
PKG_LICENSE:=GPL-2.0
config interface 'wan'
option enabled '1'
- list track_ip '8.8.4.4'
- list track_ip '8.8.8.8'
+ list track_ip '1.0.0.1'
+ list track_ip '1.1.1.1'
list track_ip '208.67.222.222'
list track_ip '208.67.220.220'
option family 'ipv4'
config interface 'wan6'
option enabled '0'
- list track_ip '2001:4860:4860::8844'
- list track_ip '2001:4860:4860::8888'
+ list track_ip '2606:4700:4700::1001'
+ list track_ip '2606:4700:4700::1111'
list track_ip '2620:0:ccd::2'
list track_ip '2620:0:ccc::2'
option family 'ipv6'
config interface 'wanb'
option enabled '0'
- list track_ip '8.8.4.4'
- list track_ip '8.8.8.8'
+ list track_ip '1.0.0.1'
+ list track_ip '1.1.1.1'
list track_ip '208.67.222.222'
list track_ip '208.67.220.220'
option family 'ipv4'
config interface 'wanb6'
option enabled '0'
- list track_ip '2001:4860:4860::8844'
- list track_ip '2001:4860:4860::8888'
+ list track_ip '2606:4700:4700::1001'
+ list track_ip '2606:4700:4700::1111'
list track_ip '2620:0:ccd::2'
list track_ip '2620:0:ccc::2'
option family 'ipv6'
# remove "linkdown", expiry and source based routing modifiers from route lines
config_get_bool source_routing globals source_routing 0
[ $source_routing -eq 1 ] && unset source_routing
- MWAN3_ROUTE_LINE_EXP="s/linkdown //; s/expires [0-9]\+sec//; s/error [0-9]\+//; ${source_routing:+s/default\(.*\) from [^ ]*/default\1/;} p"
+ MWAN3_ROUTE_LINE_EXP="s/offload//; s/linkdown //; s/expires [0-9]\+sec//; s/error [0-9]\+//; ${source_routing:+s/default\(.*\) from [^ ]*/default\1/;} p"
# mark mask constants
bitcnt=$(mwan3_count_one_bits MMX_MASK)
PKG_NAME:=netsniff-ng
PKG_VERSION:=0.6.8
-PKG_RELEASE:=1
+PKG_RELEASE:=2
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
PKG_SOURCE_URL:=https://codeload.github.com/netsniff-ng/netsniff-ng/tar.gz/v$(PKG_VERSION)?
PKG_USE_MIPS16:=0
PKG_BUILD_PARALLEL:=1
+PKG_CONFIG_DEPENDS:= \
+ CONFIG_PACKAGE_mausezahn \
+ CONFIG_PACKAGE_netsniff-ng
+
include $(INCLUDE_DIR)/package.mk
-define Package/netsniff-ng
+define Package/netsniff-ng/Default
SECTION:=net
CATEGORY:=Network
- DEPENDS:=+libpthread +libpcap +libncurses +zlib +liburcu +libsodium +libnetfilter-conntrack
- TITLE:=netsniff-ng
+ DEPENDS:=+libpthread +libpcap
URL:=https://github.com/netsniff-ng/netsniff-ng
endef
+define Package/netsniff-ng
+ $(call Package/netsniff-ng/Default)
+ DEPENDS+=+libncurses +libnetfilter-conntrack +libsodium +liburcu +zlib
+ TITLE:=netsniff-ng
+endef
+
define Package/netsniff-ng/description
netsniff-ng is a free, performant Linux network analyzer and
networking toolkit. If you will, the Swiss army knife for network
packets.
endef
+define Package/mausezahn
+ $(call Package/netsniff-ng/Default)
+ DEPENDS+=+libcli +libnet-1.2.x
+ TITLE:=mausezahn
+endef
+
+define Package/mausezahn/description
+ Mausezahn is a traffic generator written which allows sending
+ nearly every possible and impossible packet. It is mainly used
+ to test VoIP or multicast networks.
+endef
+
define Build/Configure
( cd $(PKG_BUILD_DIR); \
CC="$(TARGET_CC)" \
$(INSTALL_BIN) $(PKG_BUILD_DIR)/curvetun/curvetun $(1)/usr/sbin/
endef
+define Package/mausezahn/install
+ $(INSTALL_DIR) $(1)/usr/sbin
+ $(INSTALL_BIN) $(PKG_BUILD_DIR)/mausezahn/mausezahn $(1)/usr/sbin/
+endef
+
$(eval $(call BuildPackage,netsniff-ng))
+$(eval $(call BuildPackage,mausezahn))
PKG_NAME:=nut
PKG_VERSION:=2.8.0
-PKG_RELEASE:=1
+PKG_RELEASE:=$(AUTORELEASE)
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
PKG_SOURCE_URL:=http://www.networkupstools.org/source/2.8/
--with$(if $(CONFIG_PACKAGE_nut-web-cgi),,out)-cgi \
--without-ipmi \
--without-freeipmi \
+ --without-linux-i2c \
--$(if $(CONFIG_NUT_SSL),with,without)-ssl $(if $(CONFIG_NUT_SSL),--with-openssl) \
--without-libltdl \
--without-macosx_ups \
--- /dev/null
+From cafd77993ec5e16634b774b65bf6da9b34a21fc5 Mon Sep 17 00:00:00 2001
+From: Jim Klimov <jimklimov+nut@gmail.com>
+Date: Wed, 31 Aug 2022 11:24:19 +0200
+Subject: [PATCH] clients/upsclient.h: ensure time_t is defined
+
+
+--- a/clients/upsclient.h
++++ b/clients/upsclient.h
+@@ -41,6 +41,18 @@
+ #include <limits.h>
+ #endif
+
++/* Not including NUT timehead.h because this is part of end-user API */
++#ifdef TIME_WITH_SYS_TIME
++# include <sys/time.h>
++# include <time.h>
++#else
++# ifdef HAVE_SYS_TIME_H
++# include <sys/time.h>
++# else
++# include <time.h>
++# endif
++#endif
++
+ #ifdef __cplusplus
+ /* *INDENT-OFF* */
+ extern "C" {
PKG_NAME:=openvpn
PKG_VERSION:=2.5.7
-PKG_RELEASE:=2
+PKG_RELEASE:=3
PKG_SOURCE_URL:=\
https://build.openvpn.net/downloads/releases/ \
PKG_NAME:=pagekitec
PKG_REV:=0.91.201110
PKG_VERSION:=$(PKG_REV)C
-PKG_RELEASE:=1
+PKG_RELEASE:=2
PKG_LICENSE:=Apache-2.0
PKG_SOURCE_SUBDIR:=$(PKG_NAME)-$(PKG_VERSION)
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.xz
--- /dev/null
+From 37be74afa0e49749796591cd6f3c87536a8e8c3f Mon Sep 17 00:00:00 2001
+From: Lukas Zeller <luz@plan44.ch>
+Date: Sat, 4 Jun 2022 19:27:07 +0200
+Subject: [PATCH 1/2] Fix to work ok with 64 bit time_t (time64)
+
+In several places, time_t arguments were formatted using the "%d"
+format specifier, which is not compatible with 64bit time_t
+(as for example in musl >=1.2.0)
+
+This commit now uses "%lld" with an explicit cast of the
+arguments to long long. The cast ensures that the code is still
+safe to compile with 32 bit time_t (but introduces a slight
+performance penalty in that case).
+---
+ libpagekite/pklogging.c | 10 +++++-----
+ libpagekite/pkmanager.c | 8 ++++----
+ libpagekite/pkproto.c | 4 ++--
+ 3 files changed, 11 insertions(+), 11 deletions(-)
+
+diff --git a/libpagekite/pklogging.c b/libpagekite/pklogging.c
+index 6129c30..1b1835c 100644
+--- a/libpagekite/pklogging.c
++++ b/libpagekite/pklogging.c
+@@ -208,9 +208,9 @@ void pk_dump_conn(char* prefix, struct pk_conn* conn)
+ if (conn->sockfd < 0) return;
+
+ pk_log(PK_LOG_MANAGER_DEBUG, "%s/sockfd: %d", prefix, conn->sockfd);
+- pk_log(PK_LOG_MANAGER_DEBUG, "%s/activity: %x (%ds ago)", prefix,
+- conn->activity,
+- pk_time(0) - conn->activity);
++ pk_log(PK_LOG_MANAGER_DEBUG, "%s/activity: %llx (%llds ago)", prefix,
++ (long long)conn->activity,
++ (long long)(pk_time(0) - conn->activity));
+ pk_log(PK_LOG_MANAGER_DEBUG, "%s/read_bytes: %d", prefix, conn->read_bytes);
+ pk_log(PK_LOG_MANAGER_DEBUG, "%s/read_kb: %d", prefix, conn->read_kb);
+ pk_log(PK_LOG_MANAGER_DEBUG, "%s/sent_kb: %d", prefix, conn->sent_kb);
+@@ -281,8 +281,8 @@ void pk_dump_state(struct pk_manager* pkm)
+ pk_log(LL, "pk_manager/kite_max: %d", pkm->kite_max);
+ pk_log(LL, "pk_manager/tunnel_max: %d", pkm->tunnel_max);
+ pk_log(LL, "pk_manager/be_conn_max: %d", pkm->be_conn_max);
+- pk_log(LL, "pk_manager/last_world_update: %x", pkm->last_world_update);
+- pk_log(LL, "pk_manager/next_tick: %d", pkm->next_tick);
++ pk_log(LL, "pk_manager/last_world_update: %llx", (long long)pkm->last_world_update);
++ pk_log(LL, "pk_manager/next_tick: %lld", (long long)pkm->next_tick);
+ pk_log(LL, "pk_manager/enable_timer: %d", 0 < pkm->enable_timer);
+ pk_log(LL, "pk_manager/fancy_pagekite_net_rejection_url: %s", pkm->fancy_pagekite_net_rejection_url);
+ pk_log(LL, "pk_manager/want_spare_frontends: %d", pkm->want_spare_frontends);
+diff --git a/libpagekite/pkmanager.c b/libpagekite/pkmanager.c
+index 2e526c5..007f078 100644
+--- a/libpagekite/pkmanager.c
++++ b/libpagekite/pkmanager.c
+@@ -1070,8 +1070,8 @@ static void pkm_tick_cb(EV_P_ ev_async* w, int revents)
+ pkm->timer.repeat = pkm->next_tick;
+ ev_timer_again(pkm->loop, &(pkm->timer));
+ pk_log(PK_LOG_MANAGER_INFO,
+- "Tick! [repeating=%s, next=%d, status=%d, tunnels=%d, v=%s]",
+- pkm->enable_timer ? "yes" : "no", pkm->next_tick,
++ "Tick! [repeating=%s, next=%lld, status=%d, tunnels=%d, v=%s]",
++ pkm->enable_timer ? "yes" : "no", (long long)pkm->next_tick,
+ pkm->status, pk_state.live_tunnels, PK_VERSION);
+
+ /* We slow down exponentially by default... */
+@@ -1122,8 +1122,8 @@ static void pkm_tick_cb(EV_P_ ev_async* w, int revents)
+ fe->last_ping = now;
+ pkc_write(&(fe->conn), ping, pingsize);
+ pk_log(PK_LOG_TUNNEL_DATA,
+- "%d: Sent PING (idle=%ds>%ds)",
+- fe->conn.sockfd, now - fe->conn.activity, now - inactive);
++ "%d: Sent PING (idle=%llds>%llds)",
++ fe->conn.sockfd, (long long)(now - fe->conn.activity), (long long)(now - inactive));
+ next_tick = 1 + pkm->housekeeping_interval_min;
+ }
+ }
+diff --git a/libpagekite/pkproto.c b/libpagekite/pkproto.c
+index fda41d2..02a47c7 100644
+--- a/libpagekite/pkproto.c
++++ b/libpagekite/pkproto.c
+@@ -577,7 +577,7 @@ int pk_make_salt(char* salt) {
+ char* pk_sign(const char* token, const char* secret, time_t ts,
+ const char* payload, int length, char *buffer)
+ {
+- char tbuffer[128], tsbuf[16], scratch[10240];
++ char tbuffer[128], tsbuf[32], scratch[10240];
+
+ PK_TRACE_FUNCTION;
+
+@@ -606,7 +606,7 @@ char* pk_sign(const char* token, const char* secret, time_t ts,
+
+ /* Optionally embed a timestamp to the resolution of 10 minutes */
+ if (ts > 0) {
+- sprintf(tsbuf, "%lx", ts / 600);
++ sprintf(tsbuf, "%llx", (long long)(ts / 600));
+ buffer[0] = 't';
+ }
+ else tsbuf[0] = '\0';
+--
+2.37.3
+
--- /dev/null
+From 3dc6bd45838bb170ee4d95f014f0588c5d084d31 Mon Sep 17 00:00:00 2001
+From: Karl Palsson <karlp@etactica.com>
+Date: Mon, 19 Sep 2022 17:03:58 +0000
+Subject: [PATCH 2/2] pagekitec: status files: correctly support 64bit time
+
+This explicitly casts to long long, as in #78, which may or may not
+be desired globally. It's certainly required in OpenWrt with musl 1.2+
+
+See also: https://github.com/pagekite/libpagekite/pull/78
+
+Fixes: e8cfa5f (incomplete)
+Signed-off-by: Karl Palsson <karlp@etactica.com>
+---
+ contrib/backends/pagekitec.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/contrib/backends/pagekitec.c b/contrib/backends/pagekitec.c
+index ce39e30..76a78ac 100644
+--- a/contrib/backends/pagekitec.c
++++ b/contrib/backends/pagekitec.c
+@@ -54,7 +54,7 @@ Note: For alternate license terms, see the file COPYING.md.
+ " \"pagekitec_status_code\": %d,\n" \
+ " \"pagekitec_pid\": %d,\n" \
+ " \"pagekitec_update_min_interval\": %d,\n" \
+- " \"pagekitec_update_ts\": %ld\n" \
++ " \"pagekitec_update_ts\": %lld\n" \
+ "}\n")
+
+ /* Enable this format using -Y text:/path/to/file/ */
+@@ -64,7 +64,7 @@ Note: For alternate license terms, see the file COPYING.md.
+ "pagekitec_status_code: %d\n" \
+ "pagekitec_pid: %d\n" \
+ "pagekitec_update_min_interval: %d\n" \
+- "pagekitec_update_ts: %ld\n")
++ "pagekitec_update_ts: %lld\n")
+
+
+ pagekite_mgr m;
+@@ -153,7 +153,7 @@ int summarize_status(const char* fpath, const char* format, const char *status_m
+ }
+
+ fprintf(fd, format,
+- PK_VERSION, status_msg, status, getpid(), STATUS_MIN_INTERVAL, now);
++ PK_VERSION, status_msg, status, getpid(), STATUS_MIN_INTERVAL, (long long)now);
+
+ if (0 == ftruncate(fileno(fd), ftell(fd))) fflush(fd);
+ fclose(fd);
+--
+2.37.3
+
include $(TOPDIR)/rules.mk
PKG_NAME:=pdns-recursor
-PKG_VERSION:=4.7.2
+PKG_VERSION:=4.7.3
PKG_RELEASE:=$(AUTORELEASE)
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.bz2
PKG_SOURCE_URL:=https://downloads.powerdns.com/releases/
-PKG_HASH:=bdb4190790fe759778d6f0515afbbcc0a28b3e7e1b83c570caaf38419d57820d
+PKG_HASH:=206d766cc8f0189f79d69af64d8d937ecc61a4d13e8ea6594d78fe30e61405f2
PKG_MAINTAINER:=Peter van Dijk <peter.van.dijk@powerdns.com>
PKG_LICENCE:=GPL-2.0-only
include $(TOPDIR)/rules.mk
PKG_NAME:=samba
-PKG_VERSION:=4.14.12
+PKG_VERSION:=4.14.14
PKG_RELEASE:=$(AUTORELEASE)
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
http://www.nic.funet.fi/index/samba/pub/samba/stable/ \
http://samba.mirror.bit.nl/samba/ftp/stable/ \
https://download.samba.org/pub/samba/stable/
-PKG_HASH:=155d9c2dfb06a18104422987590858bfe5e9783ebebe63882e7e7f07eaaa512d
+PKG_HASH:=abd5e9e6aa45e55114b188ba189ebdfc8fd3d7718d43f749e477ce7f791e5519
PKG_LICENSE:=GPL-3.0-only
PKG_LICENSE_FILES:=COPYING
Checking whether the realpath function allows a NULL argument: OK
Checking for ftruncate extend: OK
getcwd takes a NULL argument: OK
+Checking for readlink breakage: OK
Checking whether setreuid is available: NO
Checking whether setresuid is available: NO
Checking whether seteuid is available: NO
+Checking for readlink breakage: OK
Checking whether the realpath function allows a NULL argument: OK
Checking for ftruncate extend: OK
getcwd takes a NULL argument: OK
+Checking for readlink breakage: OK
Checking whether the realpath function allows a NULL argument: OK
Checking for ftruncate extend: OK
getcwd takes a NULL argument: OK
+Checking for readlink breakage: OK
Checking whether the realpath function allows a NULL argument: OK
Checking for ftruncate extend: OK
getcwd takes a NULL argument: OK
+Checking for readlink breakage: OK
Checking whether the realpath function allows a NULL argument: OK
Checking for ftruncate extend: OK
getcwd takes a NULL argument: OK
+Checking for readlink breakage: OK
Checking whether the realpath function allows a NULL argument: OK
Checking for ftruncate extend: OK
getcwd takes a NULL argument: OK
+Checking for readlink breakage: OK
Checking whether the realpath function allows a NULL argument: OK
Checking for ftruncate extend: OK
getcwd takes a NULL argument: OK
+Checking for readlink breakage: OK
Checking whether the realpath function allows a NULL argument: OK
Checking for ftruncate extend: OK
getcwd takes a NULL argument: OK
+Checking for readlink breakage: OK
Checking whether the realpath function allows a NULL argument: OK
Checking for ftruncate extend: OK
getcwd takes a NULL argument: OK
+Checking for readlink breakage: OK
Checking whether the realpath function allows a NULL argument: OK
Checking for ftruncate extend: OK
getcwd takes a NULL argument: OK
+Checking for readlink breakage: OK
Checking whether the realpath function allows a NULL argument: OK
Checking for ftruncate extend: OK
getcwd takes a NULL argument: OK
+Checking for readlink breakage: OK
Checking whether the realpath function allows a NULL argument: OK
Checking for ftruncate extend: OK
getcwd takes a NULL argument: OK
+Checking for readlink breakage: OK
include $(TOPDIR)/rules.mk
PKG_NAME:=simple-adblock
-PKG_VERSION:=1.9.0
-PKG_RELEASE:=1
+PKG_VERSION:=1.9.1
+PKG_RELEASE:=2
PKG_MAINTAINER:=Stan Grishin <stangri@melmac.ca>
PKG_LICENSE:=GPL-3.0-or-later
# list force_dns_port '4434'
# list force_dns_port '5443'
# list force_dns_port '8443'
+ option canary_domains_icloud '0'
+ option canary_domains_mozilla '0'
option led 'none'
option boot_delay '120'
option download_timeout '10'
option compressed_cache '0'
list allowed_domain 'cdn.jsdelivr.net'
-# Thu Oct 3 17:54:04 PDT 2019
-# File size: 4.0K
- list blocked_domains_url 'https://s3.amazonaws.com/lists.disconnect.me/simple_tracking.txt'
-
-# File size: 12.0K
- list blocked_domains_url 'https://cdn.jsdelivr.net/gh/paulgb/BarbBlock/blacklists/domain-list.txt'
-
-# File size: 44.0K
- list blocked_domains_url 'https://s3.amazonaws.com/lists.disconnect.me/simple_ad.txt'
-
-# File size: 44.0K
- list blocked_domains_url 'https://s3.amazonaws.com/lists.disconnect.me/simple_malvertising.txt'
-
# File size: 16.0K
list blocked_hosts_url 'https://adaway.org/hosts.txt'
-
+
# File size: 20.0K
list blocked_hosts_url 'https://cdn.jsdelivr.net/gh/hoshsadiq/adblock-nocoin-list/hosts.txt'
# block-list may be too big for some routers
list blocked_hosts_url 'https://someonewhocares.org/hosts/hosts'
+# File size: 613.0K
+# block-list may be too big for some routers
+ list blocked_domains_url 'https://cdn.jsdelivr.net/gh/AdguardTeam/cname-trackers@master/combined_disguised_trackers_justdomains.txt'
+
# File size: 624.0K
# block-list too big for most routers
# list blocked_hosts_url 'http://sysctl.org/cameleon/hosts'
\|dshield.org|d
\|www.malwaredomainlist.com/hostslist/hosts.txt|d
\|https://mirror1.malwaredomains.com/files/justdomains|d
+\|lists.disconnect.me|d
+\|https://cdn.jsdelivr.net/gh/paulgb/BarbBlock/blacklists/domain-list.txt|d
LC_ALL=C
if type extra_command 1>/dev/null 2>&1; then
- extra_command 'allow' 'Allows domain(s) in current block-list and config'
+ extra_command 'allow' 'Allows domain in current block-list and config'
extra_command 'check' 'Checks if specified domain is found in current block-list'
extra_command 'dl' 'Force-downloads all enabled block-list'
extra_command 'killcache' 'Delete all cached files'
readonly packageName='simple-adblock'
readonly serviceName="$packageName $PKG_VERSION"
+readonly packageConfigFile="/etc/config/${packageName}"
readonly dnsmasqAddnhostsFile="/var/run/${packageName}/dnsmasq.addnhosts"
readonly dnsmasqAddnhostsCache="/var/run/${packageName}/dnsmasq.addnhosts.cache"
readonly dnsmasqAddnhostsGzip="/etc/${packageName}.dnsmasq.addnhosts.gz"
readonly dnsmasqNftsetFile="/tmp/dnsmasq.d/${packageName}.nftset"
readonly dnsmasqNftsetCache="/var/run/${packageName}/dnsmasq.nftset.cache"
readonly dnsmasqNftsetGzip="/etc/${packageName}.dnsmasq.nftset.gz"
-readonly dnsmasqNftsetFilter='s|^|nftset=/|;s|$|/adb|'
+readonly dnsmasqNftsetFilter='s|^|nftset=/|;s|$|/4#inet#fw4#adb4|'
+readonly dnsmasqNftsetFilterIPv6='s|^|nftset=/|;s|$|/4#inet#fw4#adb4,6#inet#fw4#adb6|'
readonly dnsmasqServersFile="/var/run/${packageName}/dnsmasq.servers"
readonly dnsmasqServersCache="/var/run/${packageName}/dnsmasq.servers.cache"
readonly dnsmasqServersGzip="/etc/${packageName}.dnsmasq.servers.gz"
readonly __FAIL__='\033[0;31m[\xe2\x9c\x97]\033[0m'
readonly _ERROR_='\033[0;31mERROR\033[0m'
readonly _WARNING_='\033[0;33mWARNING\033[0m'
+# shellcheck disable=SC2155
+readonly ipset="$(command -v ipset)"
+# shellcheck disable=SC2155
+readonly nft="$(command -v nft)"
+readonly canaryDomainsMozilla='use-application-dns.net'
+readonly canaryDomainsiCloud='mask.icloud.com mask-h2.icloud.com'
+# readonly canaryDomains="$canaryDomainsMozilla $canaryDomainsiCloud"
-serviceEnabled=
-forceDNS=
-forceDNSPorts=
-parallelDL=
-debug=
-compressedCache=
-ipv6Enabled=
-configUpdateEnabled=
-configUpdateURL=
-bootDelay=
-dlTimeout=
-curlRetry=
-verbosity=
-led=
-targetDNS=
-dnsInstance=
-allowed_domains=
-allowed_domains_urls=
-blocked_domains=
-blocked_domains_urls=
-blocked_hosts_urls=
-dl_command=
-dl_flag=
-isSSLSupported=
-allowIDN=
-outputFilter=
-outputFilterIPv6=
-outputFile=
-outputGzip=
-outputCache=
-awk='awk'
+debug() { local i j; for i in "$@"; do eval "j=\$$i"; echo "${i}: ${j} "; done; }
uci_add_list_if_new() {
local PACKAGE="$1"
/sbin/uci ${UCI_CONFIG_DIR:+-c $UCI_CONFIG_DIR} changes "$PACKAGE${CONFIG:+.$CONFIG}${OPTION:+.$OPTION}"
}
-version() { echo "$PKG_VERSION"; }
+ipset() { "$ipset" "$@" >/dev/null 2>&1; }
+nft() { "$nft" "$@" >/dev/null 2>&1; }
-getStatusText() {
+get_status_text() {
local _ret
case "$1" in
statusNoInstall) _ret="$serviceName is not installed or not found";;
esac
printf "%b" "$_ret"
}
-getErrorText() {
+get_error_text() {
local _ret
case "$1" in
errorOutputFileCreate) _ret="failed to create $outputFile file";;
# Can take a single parameter (text) to be output at any verbosity
# Or target verbosity level and text to be output at specifc verbosity
local msg memmsg logmsg
+ verbosity="${verbosity:-2}"
if [ $# -ne 1 ]; then
if [ $((verbosity & $1)) -gt 0 ] || [ "$verbosity" = "$1" ]; then shift; else return 0; fi
fi
fi
}
-load_package_config() {
- local i
- config_load "$packageName"
- config_get_bool serviceEnabled 'config' 'enabled' 1
- config_get_bool forceDNS 'config' 'force_dns' 1
- config_get_bool parallelDL 'config' 'parallel_downloads' 1
- config_get_bool debug 'config' 'debug' 0
- config_get_bool compressedCache 'config' 'compressed_cache' 0
- config_get_bool ipv6Enabled 'config' 'ipv6_enabled' 0
- config_get_bool configUpdateEnabled 'config' 'config_update_enabled' 0
- config_get forceDNSPorts 'config' 'force_dns_port' '53 853'
- config_get bootDelay 'config' 'boot_delay' '120'
- config_get dlTimeout 'config' 'download_timeout' '20'
- config_get curlRetry 'config' 'curl_retry' '3'
- config_get verbosity 'config' 'verbosity' '2'
- config_get led 'config' 'led'
- config_get targetDNS 'config' 'dns' 'dnsmasq.servers'
- config_get dnsInstance 'config' 'dns_instance' '0'
- config_get allowed_domains 'config' 'allowed_domain'
- config_get allowed_domains_urls 'config' 'allowed_domains_url'
- config_get blocked_domains 'config' 'blocked_domain'
- config_get blocked_domains_urls 'config' 'blocked_domains_url'
- config_get blocked_hosts_urls 'config' 'blocked_hosts_url'
- config_get configUpdateURL 'config' 'config_update_url' 'https://cdn.jsdelivr.net/gh/openwrt/packages/net/simple-adblock/files/simple-adblock.conf.update'
+load_environment() {
+ local i j wan_if wan_gw
+ local validation_result="$1"
+
+ if [ "$validation_result" != '0' ]; then
+ output "${_ERROR_}: $packageName config validation failed!\\n"
+ output "Please check if the '$packageConfigFile' contains correct values for config options.\\n"
+ exit 1
+ fi
+
+ if [ "$enabled" -eq 0 ]; then
+ output "$packageName is currently disabled.\\n"
+ output "Run the following commands before starting service again:\\n"
+ output "uci set ${packageName}.config.enabled='1'; uci commit $packageName;\\n"
+ return 1
+ fi
- if [ "$targetDNS" != 'dnsmasq.addnhosts' ] && [ "$targetDNS" != 'dnsmasq.conf' ] && \
- [ "$targetDNS" != 'dnsmasq.servers' ] && [ "$targetDNS" != 'unbound.adb_list' ] && \
- [ "$targetDNS" != 'dnsmasq.ipset' ] && [ "$targetDNS" != 'dnsmasq.nftset' ] ; then
- targetDNS='dnsmasq.servers'
+ if [ "$debug" -ne 0 ]; then
+ exec 1>>/tmp/simple-adblock.log
+ exec 2>&1
+ set -x
fi
- case "$targetDNS" in
+ case $dns in
+ dnsmasq.addnhosts|dnsmasq.conf|dnsmasq.ipset|dnsmasq.nftset|dnsmasq.servers)
+ if dnsmasq -v 2>/dev/null | grep -q 'no-IDN' || ! dnsmasq -v 2>/dev/null | grep -q -w 'IDN'; then
+ allow_non_ascii=0
+ fi
+ ;;
+ unbound.adb_list)
+ allow_non_ascii=1;;
+ esac
+
+ case $dns in
+ dnsmasq.ipset)
+ if dnsmasq -v 2>/dev/null | grep -q 'no-ipset' || ! dnsmasq -v 2>/dev/null | grep -q -w 'ipset'; then
+ output "$_ERROR_: dnsmasq ipset support is enabled in $packageName, but dnsmasq is either not installed or installed dnsmasq does not support ipset!\\n"
+ dns='dnsmasq.servers'
+ fi
+ if ! ipset help hash:net; then
+ output "$_ERROR_: dnsmasq ipset support is enabled in $packageName, but ipset is either not installed or installed ipset does not support 'hash:net' type!\\n"
+ dns='dnsmasq.servers'
+ fi
+ ;;
+ dnsmasq.nftset)
+ if dnsmasq -v 2>/dev/null | grep -q 'no-nftset' || ! dnsmasq -v 2>/dev/null | grep -q -w 'nftset'; then
+ output "$_ERROR_: dnsmasq nft sets support is enabled in $packageName, but dnsmasq is either not installed or installed dnsmasq does not support nft sets!\\n"
+ dns='dnsmasq.servers'
+ fi
+ if [ -z "$nft" ]; then
+ output "$_ERROR_: dnsmasq nft sets support is enabled in $packageName, but nft is not installed!\\n"
+ dns='dnsmasq.servers'
+ fi
+ ;;
+ esac
+
+ case "$dns" in
dnsmasq.addnhosts)
outputFilter="$dnsmasqAddnhostsFilter"
outputFile="$dnsmasqAddnhostsFile"
outputCache="$dnsmasqAddnhostsCache"
outputGzip="$dnsmasqAddnhostsGzip"
- [ "$ipv6Enabled" -gt 0 ] && outputFilterIPv6="$dnsmasqAddnhostsFilterIPv6"
+ if [ "$ipv6_enabled" -ne 0 ]; then
+ outputFilterIPv6="$dnsmasqAddnhostsFilterIPv6"
+ fi
rm -f "$dnsmasqConfFile" "$dnsmasqConfCache" "$dnsmasqConfGzip"
rm -f "$dnsmasqIpsetFile" "$dnsmasqIpsetCache" "$dnsmasqIpsetGzip"
rm -f "$dnsmasqNftsetFile" "$dnsmasqNftsetCache" "$dnsmasqNftsetGzip"
rm -f "$dnsmasqServersFile" "$dnsmasqServersCache" "$dnsmasqServersGzip"
rm -f "$unboundFile" "$unboundCache" "$unboundGzip"
- ;;
+ ;;
dnsmasq.conf)
outputFilter="$dnsmasqConfFilter"
outputFile="$dnsmasqConfFile"
rm -f "$dnsmasqNftsetFile" "$dnsmasqNftsetCache" "$dnsmasqNftsetGzip"
rm -f "$dnsmasqServersFile" "$dnsmasqServersCache" "$dnsmasqServersGzip"
rm -f "$unboundFile" "$unboundCache" "$unboundGzip"
- ;;
+ ;;
dnsmasq.ipset)
outputFilter="$dnsmasqIpsetFilter"
outputFile="$dnsmasqIpsetFile"
rm -f "$dnsmasqNftsetFile" "$dnsmasqNftsetCache" "$dnsmasqNftsetGzip"
rm -f "$dnsmasqServersFile" "$dnsmasqServersCache" "$dnsmasqServersGzip"
rm -f "$unboundFile" "$unboundCache" "$unboundGzip"
- ;;
+ ;;
dnsmasq.nftset)
- outputFilter="$dnsmasqNftsetFilter"
+ if [ "$ipv6_enabled" -ne 0 ]; then
+ outputFilter="$dnsmasqNftsetFilterIPv6"
+ else
+ outputFilter="$dnsmasqNftsetFilter"
+ fi
outputFile="$dnsmasqNftsetFile"
outputCache="$dnsmasqNftsetCache"
outputGzip="$dnsmasqNftsetGzip"
rm -f "$dnsmasqIpsetFile" "$dnsmasqIpsetCache" "$dnsmasqIpsetGzip"
rm -f "$dnsmasqServersFile" "$dnsmasqServersCache" "$dnsmasqServersGzip"
rm -f "$unboundFile" "$unboundCache" "$unboundGzip"
- ;;
+ ;;
dnsmasq.servers)
outputFilter="$dnsmasqServersFilter"
outputFile="$dnsmasqServersFile"
rm -f "$dnsmasqIpsetFile" "$dnsmasqIpsetCache" "$dnsmasqIpsetGzip"
rm -f "$dnsmasqNftsetFile" "$dnsmasqNftsetCache" "$dnsmasqNftsetGzip"
rm -f "$unboundFile" "$unboundCache" "$unboundGzip"
- ;;
+ ;;
unbound.adb_list)
outputFilter="$unboundFilter"
outputFile="$unboundFile"
rm -f "$dnsmasqIpsetFile" "$dnsmasqIpsetCache" "$dnsmasqIpsetGzip"
rm -f "$dnsmasqNftsetFile" "$dnsmasqNftsetCache" "$dnsmasqNftsetGzip"
rm -f "$dnsmasqServersFile" "$dnsmasqServersCache" "$dnsmasqServersGzip"
- ;;
+ ;;
esac
- if [ -z "${verbosity##*[!0-9]*}" ] || [ "$verbosity" -lt 0 ] || [ "$verbosity" -gt 2 ]; then
- verbosity=1
- fi
+
+ for i in "$outputFile" "$outputCache" "$outputGzip"; do
+ if ! mkdir -p "$(dirname "$i")"; then
+ output "$_ERROR_: Unable to create directory for $i!\\n"
+ json add error "errorOutputFileCreate"
+ fi
+ done
+
. /lib/functions/network.sh
. /usr/share/libubox/jshn.sh
+
is_present 'gawk' && awk='gawk'
if ! is_present '/usr/libexec/grep-gnu' || ! is_present '/usr/libexec/sed-gnu' || \
! is_present '/usr/libexec/sort-coreutils' || ! is_present 'gawk'; then
fi
# Prefer curl because it supports the file:// scheme.
if is_present 'curl'; then
- dl_command="curl --insecure --retry $curlRetry --connect-timeout $dlTimeout --silent"
+ dl_command="curl --insecure --retry $curl_retry --connect-timeout $download_timeout --silent"
dl_flag="-o"
elif is_present '/usr/libexec/wget-ssl'; then
- dl_command="/usr/libexec/wget-ssl --no-check-certificate --timeout $dlTimeout -q"
+ dl_command="/usr/libexec/wget-ssl --no-check-certificate --timeout $download_timeout -q"
dl_flag="-O"
elif is_present wget && wget --version 2>/dev/null | grep -q "+https"; then
- dl_command="wget --no-check-certificate --timeout $dlTimeout -q"
+ dl_command="wget --no-check-certificate --timeout $download_timeout -q"
dl_flag="-O"
else
- dl_command="uclient-fetch --no-check-certificate --timeout $dlTimeout -q"
+ dl_command="uclient-fetch --no-check-certificate --timeout $download_timeout -q"
dl_flag="-O"
fi
led="${led:+/sys/class/leds/$led}"
else
unset isSSLSupported
fi
-}
-
-is_enabled() {
- wan_if=
- wan_gw=
- load_package_config
-
- if [ "$debug" -ne 0 ]; then
- exec 1>>/tmp/simple-adblock.log
- exec 2>&1
- set -x
- fi
-
- if [ "$serviceEnabled" -eq 0 ]; then
- case "$1" in
- on_start)
- output "$packageName is currently disabled.\\n"
- output "Run the following commands before starting service again:\\n"
- output "uci set ${packageName}.config.enabled='1'; uci commit $packageName;\\n"
- ;;
- esac
- return 1
- fi
-
- case $targetDNS in
- dnsmasq.addnhosts|dnsmasq.conf|dnsmasq.ipset|dnsmasq.nftset|dnsmasq.servers)
- if dnsmasq -v 2>/dev/null | grep -q 'no-IDN' || ! dnsmasq -v 2>/dev/null | grep -q -w 'IDN'; then
- allowIDN=0
- else
- allowIDN=1
- fi
- ;;
- unbound.adb_list)
- allowIDN=1;;
- esac
-
- case $targetDNS in
- dnsmasq.ipset)
- if dnsmasq -v 2>/dev/null | grep -q 'no-ipset' || ! dnsmasq -v 2>/dev/null | grep -q -w 'ipset'; then
- output "$_ERROR_: DNSMASQ ipset support is enabled in $packageName, but DNSMASQ is either not installed or installed DNSMASQ does not support ipset!\\n"
- targetDNS='dnsmasq.servers'
- fi
- if ! ipset help hash:net >/dev/null 2>&1; then
- output "$_ERROR_: DNSMASQ ipset support is enabled in $packageName, but ipset is either not installed or installed ipset does not support 'hash:net' type!\\n"
- targetDNS='dnsmasq.servers'
- fi
- ;;
- dnsmasq.nftset)
- if dnsmasq -v 2>/dev/null | grep -q 'no-nftset' || ! dnsmasq -v 2>/dev/null | grep -q -w 'nftset'; then
- output "$_ERROR_: DNSMASQ nftset support is enabled in $packageName, but DNSMASQ is either not installed or installed DNSMASQ does not support nftset!\\n"
- targetDNS='dnsmasq.servers'
- fi
- if [ -z "$(command -v nft)" ]; then
- output "$_ERROR_: DNSMASQ nftset support is enabled in $packageName, but nft is not installed!\\n"
- targetDNS='dnsmasq.servers'
- fi
- ;;
- esac
-
- for i in "$outputFile" "$outputCache" "$outputGzip"; do
- if ! mkdir -p "$(dirname "$i")"; then
- output "$_ERROR_: Unable to create directory for $i!\\n"
- jsonOps add error "errorOutputFileCreate"
- fi
- done
-
- cacheOps 'testGzip' && return 0
+ cache 'test_gzip' && return 0
network_flush_cache; network_find_wan wan_if; network_get_gateway wan_gw "$wan_if";
[ -n "$wan_gw" ] && return 0
output "$_ERROR_: $serviceName failed to discover WAN gateway.\\n"; return 1;
}
-dnsmasqOps() {
+dnsmasq() {
local cfg="$1" param="$2"
case "$param" in
dnsmasq.addnhosts)
uci_remove 'dhcp' "$cfg" 'serversfile'
fi
uci_add_list_if_new 'dhcp' "$cfg" 'addnhosts' "$dnsmasqAddnhostsFile"
- ;;
- dnsmasq.conf|dnsmasq.ipset|dnsmasq.nftset|unbound.adb_list|cleanup)
+ ;;
+ cleanup|dnsmasq.conf|dnsmasq.ipset|dnsmasq.nftset|unbound.adb_list)
uci_remove_list 'dhcp' "$cfg" 'addnhosts' "$dnsmasqAddnhostsFile"
if [ "$(uci_get 'dhcp' "$cfg" 'serversfile')" = "$dnsmasqServersFile" ]; then
uci_remove 'dhcp' "$cfg" 'serversfile'
fi
- ;;
+ ;;
dnsmasq.servers)
uci_remove_list 'dhcp' "$cfg" 'addnhosts' "$dnsmasqAddnhostsFile"
if [ "$(uci_get 'dhcp' "$cfg" 'serversfile')" != "$dnsmasqServersFile" ]; then
uci_set 'dhcp' "$cfg" 'serversfile' "$dnsmasqServersFile"
fi
- ;;
+ ;;
esac
}
-dnsOps() {
+dns() {
local param output_text i
case $1 in
on_start)
if [ ! -s "$outputFile" ]; then
- jsonOps set status "statusFail"
- jsonOps add error "errorOutputFileCreate"
- output "$_ERROR_: $(getErrorText 'errorOutputFileCreate')!\\n"
+ json set status "statusFail"
+ json add error "errorOutputFileCreate"
+ output "$_ERROR_: $(get_error_text 'errorOutputFileCreate')!\\n"
return 1
fi
config_load 'dhcp'
- if [ "$dnsInstance" = "*" ]; then
- config_foreach dnsmasqOps 'dnsmasq' "$targetDNS"
- elif [ -n "$dnsInstance" ]; then
- for i in $dnsInstance; do
- dnsmasqOps "@dnsmasq[$i]" "$targetDNS"
+ if [ "$dns_instance" = "*" ]; then
+ config_foreach dnsmasq 'dnsmasq' "$dns"
+ elif [ -n "$dns_instance" ]; then
+ for i in $dns_instance; do
+ dnsmasq "@dnsmasq[$i]" "$dns" || dnsmasq "$i" "$dns"
done
fi
- case "$targetDNS" in
+ case "$dns" in
dnsmasq.addnhosts|dnsmasq.servers)
- param=dnsmasq_hup
- output_text='Reloading DNSMASQ'
- ;;
+ param=dnsmasq_restart
+ output_text='Reloading dnsmasq'
+ ;;
dnsmasq.conf|dnsmasq.ipset|dnsmasq.nftset)
param=dnsmasq_restart
- output_text='Restarting DNSMASQ'
- ;;
+ output_text='Restarting dnsmasq'
+ ;;
unbound.adb_list)
param=unbound_restart
output_text='Restarting Unbound'
- ;;
+ ;;
esac
if [ -n "$(uci_changes dhcp)" ]; then
uci_commit dhcp
if [ "$param" = 'unbound_restart' ]; then
param='dnsmasq_restart; unbound_restart;'
- output_text='Restarting Unbound/DNSMASQ'
+ output_text='Restarting Unbound/dnsmasq'
else
param=dnsmasq_restart
- output_text='Restarting DNSMASQ'
+ output_text='Restarting dnsmasq'
fi
fi
output 1 "$output_text "
output 2 "$output_text "
- jsonOps set message "$output_text"
+ json set message "$output_text"
if eval "$param"; then
- jsonOps set status "statusSuccess"
+ json set status "statusSuccess"
led_on "$led"
output_okn
else
output_fail
- jsonOps set status "statusFail"
- jsonOps add error "errorDNSReload"
- output "$_ERROR_: $(getErrorText 'errorDNSReload')!\\n"
+ json set status "statusFail"
+ json add error "errorDNSReload"
+ output "$_ERROR_: $(get_error_text 'errorDNSReload')!\\n"
return 1
fi
- ;;
+ ;;
on_stop)
- case "$targetDNS" in
+ case "$dns" in
dnsmasq.addnhosts|dnsmasq.servers)
- param=dnsmasq_hup
- ;;
+ param=dnsmasq_restart
+ ;;
dnsmasq.conf|dnsmasq.ipset|dnsmasq.nftset)
param=dnsmasq_restart
- ;;
+ ;;
unbound.adb_list)
param=unbound_restart
- ;;
+ ;;
esac
if [ -n "$(uci_changes dhcp)" ]; then
uci_commit dhcp
fi
eval "$param"
return $?
- ;;
+ ;;
quiet)
- case "$targetDNS" in
+ case "$dns" in
dnsmasq.addnhosts|dnsmasq.conf|dnsmasq.ipset|dnsmasq.nftset|dnsmasq.servers)
param=dnsmasq_restart
- ;;
+ ;;
unbound.adb_list)
param=unbound_restart
- ;;
+ ;;
esac
eval "$param"
return $?
- ;;
+ ;;
esac
}
-jsonOps() {
+json() {
# shellcheck disable=SC2034
local action="$1" param="$2" value="$3"
local status message error stats
get)
case "$param" in
triggers)
- curReload="$parallelDL $debug $dlTimeout $allowed_domains $blocked_domains $allowed_domains_urls $blocked_domains_urls $blocked_hosts_urls $targetDNS $configUpdateEnabled $configUpdateURL"
- curRestart="$compressedCache $forceDNS $led $forceDNSPorts"
+ curReload="$parallel_downloads $debug $download_timeout $allowed_domain $blocked_domain $allowed_domains_url $blocked_domains_url $blocked_hosts_url $dns $config_update_enabled $config_update_url"
+ curRestart="$compressed_cache $force_dns $led $force_dns_port"
if [ ! -s "$jsonFile" ]; then
ret='on_boot'
elif [ "$curReload" != "$reload" ]; then
*)
printf "%b" "$(eval echo "\$$param")"; return;;
esac
- ;;
+ ;;
add)
if [ -n "$(eval echo "\$$param")" ]; then
value="$(eval echo "\$$param") ${value}"
fi
eval "$param"='${value}'
- ;;
+ ;;
del)
case "$param" in
all)
*)
unset "$param";;
esac
- ;;
+ ;;
set)
case "$param" in
triggers)
- reload="$parallelDL $debug $dlTimeout $allowed_domains $blocked_domains $allowed_domains_urls $blocked_domains_urls $blocked_hosts_urls $targetDNS $configUpdateEnabled $configUpdateURL"
- restart="$compressedCache $forceDNS $led $forceDNSPorts"
- ;;
+ reload="$parallel_downloads $debug $download_timeout $allowed_domain $blocked_domain $allowed_domains_url $blocked_domains_url $blocked_hosts_url $dns $config_update_enabled $config_update_url"
+ restart="$compressed_cache $force_dns $led $force_dns_port"
+ ;;
*)
eval "$param"='$value';;
esac
- ;;
+ ;;
esac
json_init
json_add_object 'data'
sync
}
-cacheOps() {
+cache() {
local R_TMP
case "$1" in
create|backup)
[ -s "$outputFile" ] && { mv -f "$outputFile" "$outputCache"; true > "$outputFile"; } >/dev/null 2>/dev/null
return $?
- ;;
+ ;;
restore|use)
[ -s "$outputCache" ] && mv "$outputCache" "$outputFile" >/dev/null 2>/dev/null
return $?
- ;;
+ ;;
test)
[ -s "$outputCache" ]
return $?
- ;;
- testGzip)
+ ;;
+ test_gzip)
[ -s "$outputGzip" ] && gzip -t -c "$outputGzip"
return $?
- ;;
- createGzip)
+ ;;
+ create_gzip)
+ rm -f "$outputGzip" >/dev/null 2>/dev/null
R_TMP="$(mktemp -u -q -t ${packageName}_tmp.XXXXXXXX)"
if gzip < "$outputFile" > "$R_TMP"; then
if mv "$R_TMP" "$outputGzip"; then
else
return 1
fi
- ;;
- expand|unpack|expandGzip|unpackGzip)
+ ;;
+ expand|unpack|unpack_gzip)
[ -s "$outputGzip" ] && gzip -dc < "$outputGzip" > "$outputCache"
return $?
- ;;
+ ;;
esac
}
return 0
}
-process_config_update() {
- local label R_TMP
- [ "$configUpdateEnabled" -eq 0 ] && return 0
- label="${1##*//}"; label="${label%%/*}";
- while [ -z "$R_TMP" ] || [ -e "$R_TMP" ]; do
- R_TMP="$(mktemp -u -q -t ${packageName}_tmp.XXXXXXXX)"
- done
- if ! $dl_command "$1" $dl_flag "$R_TMP" 2>/dev/null || [ ! -s "$R_TMP" ]; then
- output 1 "$_FAIL_"
- output 2 "[DL] Config Update: $label $__FAIL__\\n"
- jsonOps add error "errorDownloadingConfigUpdate"
- else
- if [ -s "$R_TMP" ] && sed -f "$R_TMP" -i /etc/config/simple-adblock 2>/dev/null; then
- output 1 "$_OK_"
- output 2 "[DL] Config Update: $label $__OK__\\n"
- else
- output 1 "$_FAIL_"
- output 2 "[DL] Config Update: $label $__FAIL__\\n"
- jsonOps add error "errorParsingConfigUpdate"
- fi
- fi
- rm -f "$R_TMP"
- return 0
-}
-
download_lists() {
local hf w_filter j=0 R_TMP
- jsonOps set message "$(getStatusText "statusDownloading")..."
- jsonOps set status "statusDownloading"
+ json set message "$(get_status_text "statusDownloading")..."
+ json set status "statusDownloading"
rm -f "$A_TMP" "$B_TMP" "$outputFile" "$outputCache" "$outputGzip"
if [ "$($awk '/^MemFree/ {print int($2/1000)}' "/proc/meminfo")" -lt 32 ]; then
- output 3 'Low free memory, restarting resolver... '
- if dnsOps 'quiet'; then
+ output 3 'Low free memory, restarting resolver '
+ if dns 'quiet'; then
output_okn
else
output_fail
fi
touch $A_TMP; touch $B_TMP;
output 1 'Downloading lists '
- process_config_update "$configUpdateURL"
rm -f "$sharedMemoryError"
- if [ -n "$blocked_hosts_urls" ]; then
- for hf in ${blocked_hosts_urls}; do
- if [ "$parallelDL" -gt 0 ]; then
- process_url "$hf" 'hosts' 'blocked' &
- else
- process_url "$hf" 'hosts' 'blocked'
- fi
- done
- fi
- if [ -n "$blocked_domains_urls" ]; then
- for hf in ${blocked_domains_urls}; do
- if [ "$parallelDL" -gt 0 ]; then
- process_url "$hf" 'domains' 'blocked' &
- else
- process_url "$hf" 'domains' 'blocked'
- fi
- done
- fi
- if [ -n "$allowed_domains_urls" ]; then
- for hf in ${allowed_domains_urls}; do
- if [ "$parallelDL" -gt 0 ]; then
- process_url "$hf" 'domains' 'allowed' &
- else
- process_url "$hf" 'domains' 'allowed'
- fi
- done
- fi
+ for hf in ${blocked_hosts_url}; do
+ if [ "$parallel_downloads" -gt 0 ]; then
+ process_url "$hf" 'hosts' 'blocked' &
+ else
+ process_url "$hf" 'hosts' 'blocked'
+ fi
+ done
+ for hf in ${blocked_domains_url}; do
+ if [ "$parallel_downloads" -gt 0 ]; then
+ process_url "$hf" 'domains' 'blocked' &
+ else
+ process_url "$hf" 'domains' 'blocked'
+ fi
+ done
+ for hf in ${allowed_domains_url}; do
+ if [ "$parallel_downloads" -gt 0 ]; then
+ process_url "$hf" 'domains' 'allowed' &
+ else
+ process_url "$hf" 'domains' 'allowed'
+ fi
+ done
wait
output 1 '\n'
if [ -s "$sharedMemoryError" ]; then
while IFS= read -r line; do
- jsonOps add error "$line"
+ json add error "$line"
done < "$sharedMemoryError"
rm -f "$sharedMemoryError"
fi
- [ -n "$blocked_domains" ] && for hf in ${blocked_domains}; do echo "$hf" | sed "$domainsFilter" >> $B_TMP; done
- allowed_domains="${allowed_domains}
+ if [ "$canary_domains_icloud" -ne 0 ]; then
+ canaryDomains="${canaryDomains:+$canaryDomains }${canaryDomainsiCloud}"
+ fi
+ if [ "$canary_domains_mozilla" -ne 0 ]; then
+ canaryDomains="${canaryDomains:+$canaryDomains }${canaryDomainsMozilla}"
+ fi
+
+ for hf in $blocked_domain $canaryDomains; do echo "$hf" | sed "$domainsFilter" >> $B_TMP; done
+ allowed_domain="${allowed_domain}
$(cat $A_TMP)"
- [ -n "$allowed_domains" ] && for hf in ${allowed_domains}; do hf="$(echo "$hf" | sed 's/\./\\./g')"; w_filter="$w_filter/${hf}$/d;"; done
+ for hf in ${allowed_domain}; do hf="$(echo "$hf" | sed 's/\./\\./g')"; w_filter="$w_filter/^${hf}$/d;/\.${hf}$/d;"; done
[ ! -s "$B_TMP" ] && return 1
output 1 'Processing downloads '
output 2 'Sorting combined list '
- jsonOps set message "$(getStatusText "statusProcessing"): sorting combined list"
- if [ "$allowIDN" -gt 0 ]; then
+ json set message "$(get_status_text "statusProcessing"): sorting combined list"
+ if [ "$allow_non_ascii" -gt 0 ]; then
if sort -u "$B_TMP" > "$A_TMP"; then
output_ok
else
output_failn
- jsonOps add error "errorSorting"
+ json add error "errorSorting"
fi
else
if sort -u "$B_TMP" | grep -E -v '[^a-zA-Z0-9=/.-]' > "$A_TMP"; then
output_ok
else
output_failn
- jsonOps add error "errorSorting"
+ json add error "errorSorting"
fi
fi
- if [ "$targetDNS" = 'dnsmasq.conf' ] || \
- [ "$targetDNS" = 'dnsmasq.ipset' ] || \
- [ "$targetDNS" = 'dnsmasq.nftset' ] || \
- [ "$targetDNS" = 'dnsmasq.servers' ] || \
- [ "$targetDNS" = 'unbound.adb_list' ]; then
+ if [ "$dns" = 'dnsmasq.conf' ] || \
+ [ "$dns" = 'dnsmasq.ipset' ] || \
+ [ "$dns" = 'dnsmasq.nftset' ] || \
+ [ "$dns" = 'dnsmasq.servers' ] || \
+ [ "$dns" = 'unbound.adb_list' ]; then
# TLD optimization written by Dirk Brenken (dev@brenken.org)
output 2 'Optimizing combined list '
- jsonOps set message "$(getStatusText "statusProcessing"): optimizing combined list"
+ json set message "$(get_status_text "statusProcessing"): optimizing combined list"
# sed -E 'G;:t;s/(.*)(\.)(.*)(\n)(.*)/\1\4\5\2\3/;tt;s/(.*)\n(\.)(.*)/\3\2\1/' is actually slower than command below
if $awk -F "." '{for(f=NF;f>1;f--)printf "%s.",$f;print $1}' "$A_TMP" > "$B_TMP"; then
if sort "$B_TMP" > "$A_TMP"; then
output_ok
else
output_failn
- jsonOps add error "errorOptimization"
+ json add error "errorOptimization"
mv "$A_TMP" "$B_TMP"
fi
else
output_failn
- jsonOps add error "errorOptimization"
+ json add error "errorOptimization"
fi
else
output_failn
- jsonOps add error "errorOptimization"
+ json add error "errorOptimization"
mv "$A_TMP" "$B_TMP"
fi
else
output_failn
- jsonOps add error "errorOptimization"
+ json add error "errorOptimization"
fi
else
output_failn
- jsonOps add error "errorOptimization"
+ json add error "errorOptimization"
mv "$A_TMP" "$B_TMP"
fi
else
fi
output 2 'Allowing domains '
- jsonOps set message "$(getStatusText "statusProcessing"): allowing domains"
+ json set message "$(get_status_text "statusProcessing"): allowing domains"
if sed -i "$w_filter" "$B_TMP"; then
output_ok
else
output_failn
- jsonOps add error "errorAllowListProcessing"
+ json add error "errorAllowListProcessing"
fi
output 2 'Formatting merged file '
- jsonOps set message "$(getStatusText "statusProcessing"): formatting merged file"
+ json set message "$(get_status_text "statusProcessing"): formatting merged file"
if [ -z "$outputFilterIPv6" ]; then
if sed "$outputFilter" "$B_TMP" > "$A_TMP"; then
output_ok
else
output_failn
- jsonOps add error "errorDataFileFormatting"
+ json add error "errorDataFileFormatting"
fi
else
- case "$targetDNS" in
+ case "$dns" in
dnsmasq.addnhosts)
if sed "$outputFilter" "$B_TMP" > "$A_TMP" && \
sed "$outputFilterIPv6" "$B_TMP" >> "$A_TMP"; then
output_ok
else
output_failn
- jsonOps add error "errorDataFileFormatting"
+ json add error "errorDataFileFormatting"
fi
- ;;
+ ;;
esac
fi
- case "$targetDNS" in
+ case "$dns" in
dnsmasq.addnhosts)
- output 2 'Creating DNSMASQ addnhosts file '
- jsonOps set message "$(getStatusText "statusProcessing"): creating DNSMASQ addnhosts file"
- ;;
+ output 2 'Creating dnsmasq addnhosts file '
+ json set message "$(get_status_text "statusProcessing"): creating dnsmasq addnhosts file"
+ ;;
dnsmasq.conf)
- output 2 'Creating DNSMASQ config file '
- jsonOps set message "$(getStatusText "statusProcessing"): creating DNSMASQ config file"
- ;;
+ output 2 'Creating dnsmasq config file '
+ json set message "$(get_status_text "statusProcessing"): creating dnsmasq config file"
+ ;;
dnsmasq.ipset)
- output 2 'Creating DNSMASQ ipset file '
- jsonOps set message "$(getStatusText "statusProcessing"): creating DNSMASQ ipset file"
- ;;
+ output 2 'Creating dnsmasq ipset file '
+ json set message "$(get_status_text "statusProcessing"): creating dnsmasq ipset file"
+ ;;
dnsmasq.nftset)
- output 2 'Creating DNSMASQ nftset file '
- jsonOps set message "$(getStatusText "statusProcessing"): creating DNSMASQ nftset file"
- ;;
+ output 2 'Creating dnsmasq nft set file '
+ json set message "$(get_status_text "statusProcessing"): creating dnsmasq nft set file"
+ ;;
dnsmasq.servers)
- output 2 'Creating DNSMASQ servers file '
- jsonOps set message "$(getStatusText "statusProcessing"): creating DNSMASQ servers file"
- ;;
+ output 2 'Creating dnsmasq servers file '
+ json set message "$(get_status_text "statusProcessing"): creating dnsmasq servers file"
+ ;;
unbound.adb_list)
output 2 'Creating Unbound adb_list file '
- jsonOps set message "$(getStatusText "statusProcessing"): creating Unbound adb_list file"
- ;;
+ json set message "$(get_status_text "statusProcessing"): creating Unbound adb_list file"
+ ;;
esac
+
if mv "$A_TMP" "$outputFile"; then
output_ok
else
output_failn
- jsonOps add error "errorMovingDataFile"
+ json add error "errorMovingDataFile"
fi
- if [ "$compressedCache" -gt 0 ]; then
+ if [ "$compressed_cache" -gt 0 ]; then
output 2 'Creating compressed cache '
- jsonOps set message "$(getStatusText "statusProcessing"): creating compressed cache"
- if cacheOps 'createGzip'; then
+ json set message "$(get_status_text "statusProcessing"): creating compressed cache"
+ if cache 'create_gzip'; then
output_ok
else
output_failn
- jsonOps add error "errorCreatingCompressedCache"
+ json add error "errorCreatingCompressedCache"
fi
else
rm -f "$outputGzip"
fi
output 2 'Removing temporary files '
- jsonOps set message "$(getStatusText "statusProcessing"): removing temporary files"
+ json set message "$(get_status_text "statusProcessing"): removing temporary files"
rm -f "/tmp/${packageName}_tmp.*" "$A_TMP" "$B_TMP" "$outputCache" || j=1
if [ $j -eq 0 ]; then
output_ok
else
output_failn
- jsonOps add error "errorRemovingTempFiles"
+ json add error "errorRemovingTempFiles"
fi
output 1 '\n'
}
-boot() {
- load_package_config
- sleep "$bootDelay"
- rc_procd start_service 'on_boot' && rc_procd service_triggers
+adb_allow() {
+ local c hf string="$1"
+ local validation_result="$3"
+ load_environment "$validation_result"
+ if [ ! -s "$outputFile" ]; then
+ output "No block-list ('$outputFile') found.\\n"
+ elif [ -z "$string" ]; then
+ output "Usage: /etc/init.d/${packageName} allow 'domain' ...\\n"
+ else
+ case "$dns" in
+ dnsmasq.addnhosts|dnsmasq.conf|dnsmasq.ipset|dnsmasq.nftset|dnsmasq.servers)
+ output 1 "Allowing domain(s) and restarting dnsmasq "
+ output 2 "Allowing domain(s) \\n"
+ for c in $string; do
+ output 2 " $c "
+ hf="$(echo "$c" | sed 's/\./\\./g')"
+ if sed -i "/^${hf}$/d;/\.${hf}$/d;" "$outputFile" && \
+ uci_add_list_if_new "${packageName}" 'config' 'allowed_domain' "$c"; then
+ output_ok
+ else
+ output_fail
+ fi
+ done
+ if [ "$compressed_cache" -gt 0 ]; then
+ output 2 'Creating compressed cache '
+ if cache 'create_gzip'; then
+ output_ok
+ else
+ output_failn
+ fi
+ fi
+ output 2 "Committing changes to config "
+ if [ -n "$(uci_changes "$packageName")" ] && uci_commit "$packageName"; then
+ allowed_domain="$(uci_get "$packageName" 'config' 'allowed_domain')"
+ json set triggers
+ json set stats "$serviceName is blocking $(wc -l < "$outputFile") domains (with ${dns})"
+ output_ok;
+ if [ "$dns" = 'dnsmasq.ipset' ]; then
+ output 2 "Flushing adb ipset "
+ if ipset -q -! flush adb; then output_ok; else output_fail; fi
+ fi
+ if [ "$dns" = 'dnsmasq.nftset' ]; then
+ output 2 "Flushing adb nft sets "
+ nft flush set inet fw4 adb6
+ if nft flush set inet fw4 adb4; then output_ok; else output_fail; fi
+ fi
+ output 2 "Restarting dnsmasq "
+ if dnsmasq_restart; then output_okn; else output_failn; fi
+ else
+ output_fail;
+ fi
+ ;;
+ unbound.adb_list)
+ output 1 "Allowing domain(s) and restarting Unbound "
+ output 2 "Allowing domain(s) \\n"
+ for c in $string; do
+ output 2 " $c "
+ if sed -i "/${string}/d" "$outputFile" && \
+ uci_add_list_if_new "$packageName" 'config' 'allowed_domain' "$string"; then
+ output_ok
+ else
+ output_fail
+ fi
+ done
+ if [ "$compressed_cache" -gt 0 ]; then
+ output 2 'Creating compressed cache '
+ if cache 'create_gzip'; then
+ output_ok
+ else
+ output_failn
+ fi
+ fi
+ output 2 "Committing changes to config "
+ if [ -n "$(uci_changes "$packageName")" ] && uci_commit "$packageName"; then
+ allowed_domain="$(uci_get "$packageName" 'config' 'allowed_domain')"
+ json set triggers
+ json set stats "$serviceName is blocking $(wc -l < "$outputFile") domains (with ${dns})"
+ output_ok;
+ output 2 "Restarting Unbound "
+ if unbound_restart; then output_okn; else output_failn; fi
+ else
+ output_fail;
+ fi
+ ;;
+ esac
+ fi
}
-start_service() {
- is_enabled 'on_start' || return 1
+adb_check() {
+ local c param="$1"
+ local validation_result="$3"
+ load_environment "$validation_result"
+ if [ ! -s "$outputFile" ]; then
+ output "No block-list ('$outputFile') found.\\n"
+ elif [ -z "$param" ]; then
+ output "Usage: /etc/init.d/${packageName} check 'domain' ...\\n"
+ else
+ for string in ${param}; do
+ c="$(grep -c "$string" "$outputFile")"
+ if [ "$c" -gt 0 ]; then
+ if [ "$c" -eq 1 ]; then
+ output "Found 1 match for '$string' in '$outputFile'.\\n"
+ else
+ output "Found $c matches for '$string' in '$outputFile'.\\n"
+ fi
+ if [ "$c" -le 20 ]; then
+ case "$dns" in
+ dnsmasq.addnhosts)
+ grep "$string" "$outputFile" | sed 's|^127.0.0.1 ||;s|^:: ||;';;
+ dnsmasq.conf)
+ grep "$string" "$outputFile" | sed 's|local=/||;s|/$||;';;
+ dnsmasq.ipset)
+ grep "$string" "$outputFile" | sed 's|ipset=/||;s|/adb$||;';;
+ dnsmasq.nftset)
+ grep "$string" "$outputFile" | sed 's|nftset=/||;s|/4#inet#adb#adb4||;';;
+ dnsmasq.servers)
+ grep "$string" "$outputFile" | sed 's|server=/||;s|/$||;';;
+ unbound.adb_list)
+ grep "$string" "$outputFile" | sed 's|^local-zone: "||;s|" static$||;';;
+ esac
+ fi
+ else
+ output "The '$string' is not found in current block-list ('$outputFile').\\n"
+ fi
+ done
+ fi
+}
+
+adb_config_update() {
+ local R_TMP label
+ local param="$1" validation_result="$3"
+ load_environment "$validation_result"
+ label="${config_update_url##*//}"
+ label="${label%%/*}";
+ [ "$config_update_enabled" -ne 0 ] || return 0
+
+ if [ "$param" != 'download' ]; then
+ cache 'test' && return 0
+ cache 'test_gzip' && return 0
+ fi
+ output 1 'Updating config '
+ while [ -z "$R_TMP" ] || [ -e "$R_TMP" ]; do
+ R_TMP="$(mktemp -u -q -t ${packageName}_tmp.XXXXXXXX)"
+ done
+ if ! $dl_command "$config_update_url" $dl_flag "$R_TMP" 2>/dev/null || [ ! -s "$R_TMP" ]; then
+ output 1 "$_FAIL_\\n"
+ output 2 "[DL] Config Update: $label $__FAIL__\\n"
+ json add error "errorDownloadingConfigUpdate"
+ else
+ if [ -s "$R_TMP" ] && sed -f "$R_TMP" -i "$packageConfigFile" 2>/dev/null; then
+ output 1 "$_OK_\\n"
+ output 2 "[DL] Config Update: $label $__OK__\\n"
+ else
+ output 1 "$_FAIL_\\n"
+ output 2 "[DL] Config Update: $label $__FAIL__\\n"
+ json add error "errorParsingConfigUpdate"
+ fi
+ fi
+ rm -f "$R_TMP"
+ return 0
+}
+
+adb_sizes() {
+ local i
+ local validation_result="$3"
+ load_environment "$validation_result"
+
+ echo "# $(date)"
+ for i in $blocked_domains_url; do
+ [ "${i//melmac}" != "$i" ] && continue
+ if $dl_command "$i" $dl_flag /tmp/sast 2>/dev/null && [ -s /tmp/sast ]; then
+ echo "# File size: $(du -sh /tmp/sast | $awk '{print $1}')"
+ if is_greater "$(du -sk /tmp/sast)" "500"; then
+ echo "# block-list too big for most routers"
+ elif is_greater "$(du -sk /tmp/sast)" "100"; then
+ echo "# block-list may be too big for some routers"
+ fi
+ rm -rf /tmp/sast
+ echo " list blocked_domains_url '$i'"
+ echo ""
+ else
+ echo "# site was down on last check"
+ echo "# list blocked_domains_url '$i'"
+ echo ""
+ fi
+ done
+ for i in $blocked_hosts_url; do
+ if $dl_command "$i" $dl_flag /tmp/sast 2>/dev/null && [ -s /tmp/sast ]; then
+ echo "# File size: $(du -sh /tmp/sast | $awk '{print $1}')"
+ if is_greater "$(du -sk /tmp/sast)" "500"; then
+ echo "# block-list too big for most routers"
+ elif is_greater "$(du -sk /tmp/sast)" "100"; then
+ echo "# block-list may be too big for some routers"
+ fi
+ rm -rf /tmp/sast
+ echo " list blocked_hosts_url '$i'"
+ echo ""
+ else
+ echo "# site was down on last check"
+ echo "# list blocked_hosts_url '$i'"
+ echo ""
+ fi
+ done
+}
+
+adb_start() {
local action status error message stats c
+ local validation_result="$3"
+
+ load_environment "$validation_result"
- status="$(jsonOps get status)"
- error="$(jsonOps get error)"
- message="$(jsonOps get message)"
- stats="$(jsonOps get stats)"
- action="$(jsonOps get triggers)"
+ status="$(json get status)"
+ error="$(json get error)"
+ message="$(json get message)"
+ stats="$(json get stats)"
+ action="$(json get triggers)"
if [ "$action" = 'on_boot' ] || [ "$1" = 'on_boot' ]; then
- if cacheOps 'testGzip' || cacheOps 'test'; then
+ if cache 'test_gzip' || cache 'test'; then
action='restore'
else
action='download'
elif [ "$action" = 'download' ] || [ "$1" = 'download' ] || [ -n "$error" ]; then
action='download'
elif [ ! -s "$outputFile" ]; then
- if cacheOps 'testGzip' || cacheOps 'test'; then
+ if cache 'test_gzip' || cache 'test'; then
action='restore'
else
action='download'
action='download'
fi
- jsonOps del all
- jsonOps set triggers
+ json del all
+ json set triggers
procd_open_instance 'main'
procd_set_param command /bin/true
procd_set_param stderr 1
procd_open_data
json_add_array firewall
- if [ "$forceDNS" -ne 0 ]; then
- for c in $forceDNSPorts; do
+ if [ "$force_dns" -ne 0 ]; then
+ for c in $force_dns_port; do
if netstat -tuln | grep LISTEN | grep ":${c}" >/dev/null 2>&1; then
json_add_object ""
json_add_string type redirect
fi
done
fi
- if [ "$targetDNS" = 'dnsmasq.ipset' ]; then
- json_add_object ""
- json_add_string type ipset
- json_add_string name adb
- json_add_string match dest_net
- json_add_string storage hash
- json_close_object
- json_add_object ""
- json_add_string type rule
- json_add_string ipset adb
- json_add_string src lan
- json_add_string dest "*"
- json_add_string proto "tcp udp"
- json_add_string target REJECT
- json_close_object
- fi
+ case "$dns" in
+ dnsmasq.ipset)
+ json_add_object ""
+ json_add_string type ipset
+ json_add_string name adb
+ json_add_string match dest_net
+ json_add_string storage hash
+ json_close_object
+ json_add_object ""
+ json_add_string type rule
+ json_add_string ipset adb
+ json_add_string src lan
+ json_add_string dest "*"
+ json_add_string proto "tcp udp"
+ json_add_string target REJECT
+ json_close_object
+ ;;
+ dnsmasq.nftset)
+ json_add_object ""
+ json_add_string type ipset
+ json_add_string name adb4
+ json_add_string family 4
+ json_add_string match dest_net
+ json_close_object
+ json_add_object ""
+ json_add_string type rule
+ json_add_string ipset adb4
+ json_add_string src lan
+ json_add_string dest "*"
+ json_add_string proto "tcp udp"
+ json_add_string target REJECT
+ json_close_object
+ if [ "$ipv6_enabled" -ne 0 ]; then
+ json_add_object ""
+ json_add_string type ipset
+ json_add_string name adb6
+ json_add_string family 6
+ json_add_string match dest_net
+ json_close_object
+ json_add_object ""
+ json_add_string type rule
+ json_add_string ipset adb6
+ json_add_string src lan
+ json_add_string dest "*"
+ json_add_string proto "tcp udp"
+ json_add_string target REJECT
+ json_close_object
+ fi
+ ;;
+ esac
json_close_array
procd_close_data
procd_close_instance
if [ "$action" = 'restore' ]; then
output 0 "Starting $serviceName... "
output 3 "Starting $serviceName...\\n"
- jsonOps set status "statusStarting"
- if cacheOps 'testGzip' && ! cacheOps 'test' && [ ! -s "$outputFile" ]; then
+ json set status "statusStarting"
+ if cache 'test_gzip' && ! cache 'test' && [ ! -s "$outputFile" ]; then
output 3 'Found compressed cache file, unpacking it '
- jsonOps set message 'found compressed cache file, unpacking it.'
- if cacheOps 'unpackGzip'; then
+ json set message 'found compressed cache file, unpacking it.'
+ if cache 'unpack_gzip'; then
output_okn
else
- output_fail
- jsonOps add error "errorRestoreCompressedCache"
- output "$_ERROR_: $(getErrorText 'errorRestoreCompressedCache')!\\n"
+ output_failn
+ json add error "errorRestoreCompressedCache"
+ output "$_ERROR_: $(get_error_text 'errorRestoreCompressedCache')!\\n"
action='download'
fi
fi
- if cacheOps 'test' && [ ! -s "$outputFile" ]; then
+ if cache 'test' && [ ! -s "$outputFile" ]; then
output 3 'Found cache file, reusing it '
- jsonOps set message 'found cache file, reusing it.'
- if cacheOps 'restore'; then
+ json set message 'found cache file, reusing it.'
+ if cache 'restore'; then
output_okn
- dnsOps 'on_start'
+ dns 'on_start'
else
- output_fail
- jsonOps add error "errorRestoreCache"
- output "$_ERROR_: $(getErrorText 'errorRestoreCache')!\\n"
+ output_failn
+ json add error "errorRestoreCache"
+ output "$_ERROR_: $(get_error_text 'errorRestoreCache')!\\n"
action='download'
fi
fi
fi
if [ "$action" = 'download' ]; then
- if [ -s "$outputFile" ] || cacheOps 'test' || cacheOps 'testGzip'; then
+ if [ -s "$outputFile" ] || cache 'test' || cache 'test_gzip'; then
output 0 "Force-reloading $serviceName... "
output 3 "Force-reloading $serviceName...\\n"
- jsonOps set status "statusForceReloading"
+ json set status "statusForceReloading"
else
output 0 "Starting $serviceName... "
output 3 "Starting $serviceName...\\n"
- jsonOps set status "statusStarting"
+ json set status "statusStarting"
fi
download_lists
- dnsOps 'on_start'
+ dns 'on_start'
fi
if [ "$action" = 'restart' ]; then
output 0 "Restarting $serviceName... "
output 3 "Restarting $serviceName...\\n"
- jsonOps set status "statusRestarting"
- dnsOps 'on_start'
+ json set status "statusRestarting"
+ dns 'on_start'
fi
if [ "$action" = 'start' ]; then
output 0 "Starting $serviceName... "
output 3 "Starting $serviceName...\\n"
- jsonOps set status "statusStarting"
- dnsOps 'on_start'
+ json set status "statusStarting"
+ dns 'on_start'
fi
- if [ -s "$outputFile" ] && [ "$(jsonOps get status)" != "statusFail" ]; then
+ if [ -s "$outputFile" ] && [ "$(json get status)" != "statusFail" ]; then
output 0 "$__OK__\\n";
- jsonOps del message
- jsonOps set status "statusSuccess"
- jsonOps set stats "$serviceName is blocking $(wc -l < "$outputFile") domains (with ${targetDNS})"
+ json del message
+ json set status "statusSuccess"
+ json set stats "$serviceName is blocking $(wc -l < "$outputFile") domains (with ${dns})"
status_service
else
output 0 "$__FAIL__\\n";
- jsonOps set status "statusFail"
- jsonOps add error "errorOhSnap"
+ json set status "statusFail"
+ json add error "errorOhSnap"
status_service
fi
}
-service_started() { procd_set_config_changed firewall; }
-service_stopped() { procd_set_config_changed firewall; }
-
-restart_service() { rc_procd start_service 'restart'; }
-reload_service() { rc_procd start_service 'restart'; }
-dl() { rc_procd start_service 'download'; }
-
-killcache() {
- rm -f "$dnsmasqAddnhostsCache" "$dnsmasqAddnhostsGzip"
- rm -f "$dnsmasqConfCache" "$dnsmasqConfGzip"
- rm -f "$dnsmasqIpsetCache" "$dnsmasqIpsetGzip"
- rm -f "$dnsmasqNftsetCache" "$dnsmasqNftsetGzip"
- rm -f "$dnsmasqServersCache" "$dnsmasqServersGzip"
- rm -f "$unboundCache" "$unboundGzip"
- config_load 'dhcp'
- config_foreach dnsmasqOps 'dnsmasq' 'cleanup'
- uci_commit 'dhcp'
- return 0
-}
-
-status_service() {
+adb_status() {
local c url status message error stats
- config_load "$packageName"
- config_get verbosity 'config' 'verbosity' '2'
- status="$(jsonOps get status)"
- message="$(jsonOps get message)"
- error="$(jsonOps get error)"
- stats="$(jsonOps get stats)"
+ local validation_result="$3"
+ load_environment "$validation_result"
+ status="$(json get status)"
+ message="$(json get message)"
+ error="$(json get error)"
+ stats="$(json get stats)"
if [ "$status" = "statusSuccess" ]; then
output "$stats "; output_okn;
else
- [ -n "$status" ] && status="$(getStatusText "$status")"
+ [ -n "$status" ] && status="$(get_status_text "$status")"
if [ -n "$status" ] && [ -n "$message" ]; then
status="${status}: $message"
fi
c="${c%|*}"
case "$c" in
errorDownloadingList|errorParsingList)
- output "$_ERROR_: $(getErrorText "$c") $url!\\n";;
+ output "$_ERROR_: $(get_error_text "$c") $url!\\n";;
*)
- output "$_ERROR_: $(getErrorText "$c")!\\n";;
+ output "$_ERROR_: $(get_error_text "$c")!\\n";;
esac
n=$((n+1))
done
fi
}
-stop_service() {
- load_package_config
+adb_stop() {
+ local validation_result="$3"
+ load_environment "$validation_result"
if [ -s "$outputFile" ]; then
output "Stopping $serviceName... "
- cacheOps 'create'
- if dnsOps 'on_stop'; then
+ cache 'create'
+ if dns 'on_stop'; then
+ ipset -q -! flush adb
+ ipset -q -! destroy adb
+ nft delete set inet fw4 adb4
+ nft delete set inet fw4 adb6
led_off "$led"
output 0 "$__OK__\\n"; output_okn;
- jsonOps set status "statusStopped"
- jsonOps del message
+ json set status "statusStopped"
+ json del message
else
output 0 "$__FAIL__\\n"; output_fail;
- jsonOps set status "statusFail"
- jsonOps add error "errorStopping"
- output "$_ERROR_: $(getErrorText 'errorStopping')!\\n"
+ json set status "statusFail"
+ json add error "errorStopping"
+ output "$_ERROR_: $(get_error_text 'errorStopping')!\\n"
fi
fi
}
+boot() {
+ local boot_delay
+ config_load "$packageName"
+ config_get boot_delay 'config' 'boot_delay' '120'
+ sleep "$boot_delay" >/dev/null 2>&1
+ rc_procd start_service 'on_boot' && rc_procd service_triggers
+}
+service_started() { procd_set_config_changed firewall; }
+service_stopped() { procd_set_config_changed firewall; }
+restart_service() { rc_procd start_service 'restart'; }
+reload_service() { rc_procd start_service 'restart'; }
+start_service() {
+ load_validate_config 'config' adb_config_update "'$*'"
+ load_validate_config 'config' adb_start "'$*'"
+}
+stop_service() { load_validate_config 'config' adb_stop "'$*'"; }
+status_service() { load_validate_config 'config' adb_status "''"; }
service_triggers() {
procd_open_trigger
- procd_add_config_trigger 'config.change' "${packageName}" /etc/init.d/${packageName} reload
- procd_add_reload_interface_trigger 'wan'
- procd_add_interface_trigger 'interface.*.up' 'wan' /etc/init.d/${packageName} reload
+ procd_add_config_trigger 'config.change' "${packageName}" /etc/init.d/${packageName} reload
+ procd_add_reload_interface_trigger 'wan'
+ procd_add_interface_trigger 'interface.*.up' 'wan' /etc/init.d/${packageName} reload
procd_close_trigger
}
-
-allow() {
- load_package_config
- local c string="$1"
- if [ ! -s "$outputFile" ]; then
- echo "No block-list ('$outputFile') found."
- elif [ -z "$string" ]; then
- echo "Usage: /etc/init.d/${packageName} allow domain(s)"
- else
- case "$targetDNS" in
- dnsmasq.addnhosts|dnsmasq.conf|dnsmasq.ipset|dnsmasq.nftset|dnsmasq.servers)
- output 1 "Allowing domain(s) and restarting DNSMASQ... "
- output 2 "Allowing domain(s)... \\n"
- for c in $string; do
- output 2 " $c "
- if sed -i "/${string}/d" "$outputFile" && \
- uci_add_list "${packageName}" 'config' 'allowed_domain' "$string"; then
- output_ok
- else
- output_fail
- fi
- done
- output 2 "Committing changes to config... "
- if [ -n "$(uci_changes "$packageName")" ] && uci_commit "$packageName"; then
- allowed_domains="$(uci_get "$packageName" 'config' 'allowed_domain')"
- jsonOps set triggers
- jsonOps set stats "$serviceName is blocking $(wc -l < "$outputFile") domains (with ${targetDNS})"
- output_ok;
- if [ "$targetDNS" = 'dnsmasq.ipset' ]; then
- output 2 "Flushing adb ipset... "
- if command -v ipset && ipset flush adb; then output_ok; else output_fail; fi
- fi
- output 2 "Restarting DNSMASQ... "
- if dnsmasq_restart; then output_okn; else output_failn; fi
- else
- output_fail;
- fi
- ;;
- unbound.adb_list)
- output 1 "Allowing domain(s) and restarting Unbound... "
- output 2 "Allowing domain(s)... \\n"
- for c in $string; do
- output 2 " $c "
- if sed -i "/${string}/d" "$outputFile" && \
- uci_add_list "$packageName" 'config' 'allowed_domain' "$string"; then
- output_ok
- else
- output_fail
- fi
- done
- output 2 "Committing changes to config... "
- if [ -n "$(uci_changes "$packageName")" ] && uci_commit "$packageName"; then
- allowed_domains="$(uci_get "$packageName" 'config' 'allowed_domain')"
- jsonOps set triggers
- jsonOps set stats "$serviceName is blocking $(wc -l < "$outputFile") domains (with ${targetDNS})"
- output_ok;
- output 2 "Restarting Unbound... "
- if unbound_restart; then output_okn; else output_failn; fi
- else
- output_fail;
- fi
- esac
- fi
-}
-
-check() {
- load_package_config
- local c string="$1"
- if [ ! -s "$outputFile" ]; then
- echo "No block-list ('$outputFile') found."
- elif [ -z "$string" ]; then
- echo "Usage: /etc/init.d/${packageName} check string"
- else
- c="$(grep -c "$string" "$outputFile")"
- if [ "$c" -gt 0 ]; then
- if [ "$c" -eq 1 ]; then
- echo "Found 1 match for '$string' in '$outputFile':"
- else
- echo "Found $c matches for '$string' in '$outputFile'."
- fi
- if [ "$c" -le 20 ]; then
- case "$targetDNS" in
- dnsmasq.addnhosts)
- grep "$string" "$outputFile" | sed 's|^127.0.0.1 ||;s|^:: ||;';;
- dnsmasq.conf)
- grep "$string" "$outputFile" | sed 's|local=/||;s|/$||;';;
- dnsmasq.ipset)
- grep "$string" "$outputFile" | sed 's|ipset=/||;s|/adb$||;';;
- dnsmasq.nftset)
- grep "$string" "$outputFile" | sed 's|nftset=/||;s|/adb$||;';;
- dnsmasq.servers)
- grep "$string" "$outputFile" | sed 's|server=/||;s|/$||;';;
- unbound.adb_list)
- grep "$string" "$outputFile" | sed 's|^local-zone: "||;s|" static$||;';;
- esac
- fi
- else
- echo "The '$string' is not found in current block-list ('$outputFile')."
- fi
- fi
+allow() { load_validate_config 'config' adb_allow "'$*'"; }
+check() { load_validate_config 'config' adb_check "'$*'"; }
+dl() { rc_procd start_service 'download'; }
+killcache() {
+ rm -f "$dnsmasqAddnhostsCache" "$dnsmasqAddnhostsGzip"
+ rm -f "$dnsmasqConfCache" "$dnsmasqConfGzip"
+ rm -f "$dnsmasqIpsetCache" "$dnsmasqIpsetGzip"
+ rm -f "$dnsmasqNftsetCache" "$dnsmasqNftsetGzip"
+ rm -f "$dnsmasqServersCache" "$dnsmasqServersGzip"
+ rm -f "$unboundCache" "$unboundGzip"
+ config_load 'dhcp'
+ config_foreach dnsmasq 'dnsmasq' 'cleanup'
+ uci_commit 'dhcp'
+ return 0
}
+sizes() { load_validate_config 'config' adb_sizes "''"; }
+version() { echo "$PKG_VERSION"; }
-sizes() {
- local i
- load_package_config
- echo "# $(date)"
-
- for i in $blocked_domains_urls; do
- [ "${i//melmac}" != "$i" ] && continue
- if $dl_command "$i" $dl_flag /tmp/sast 2>/dev/null && [ -s /tmp/sast ]; then
- echo "# File size: $(du -sh /tmp/sast | $awk '{print $1}')"
- if is_greater "$(du -sk /tmp/sast)" "500"; then
- echo "# block-list too big for most routers"
- elif is_greater "$(du -sk /tmp/sast)" "100"; then
- echo "# block-list may be too big for some routers"
- fi
- rm -rf /tmp/sast
- echo " list blocked_domains_url '$i'"
- echo ""
- else
- echo "# site was down on last check"
- echo "# list blocked_domains_url '$i'"
- echo ""
- fi
- done
-
- for i in $blocked_hosts_urls; do
- if $dl_command "$i" $dl_flag /tmp/sast 2>/dev/null && [ -s /tmp/sast ]; then
- echo "# File size: $(du -sh /tmp/sast | $awk '{print $1}')"
- if is_greater "$(du -sk /tmp/sast)" "500"; then
- echo "# block-list too big for most routers"
- elif is_greater "$(du -sk /tmp/sast)" "100"; then
- echo "# block-list may be too big for some routers"
- fi
- rm -rf /tmp/sast
- echo " list blocked_hosts_url '$i'"
- echo ""
- else
- echo "# site was down on last check"
- echo "# list blocked_hosts_url '$i'"
- echo ""
- fi
- done
+load_validate_config() {
+ local dl_command
+ local dl_flag
+ local isSSLSupported
+ local outputFilter
+ local outputFilterIPv6
+ local outputFile
+ local outputGzip
+ local outputCache
+ local awk='awk'
+ local enabled
+ local force_dns
+ local force_dns_port
+ local parallel_downloads
+ local debug
+ local compressed_cache
+ local ipv6_enabled
+ local allow_non_ascii
+ local canary_domains_icloud
+ local canary_domains_mozilla
+ local config_update_enabled
+ local config_update_url
+ local boot_delay
+ local download_timeout
+ local curl_retry
+ local verbosity
+ local led
+ local dns
+ local dns_instance
+ local allowed_domain
+ local allowed_domains_url
+ local blocked_domain
+ local blocked_domains_url
+ local blocked_hosts_url
+ uci_load_validate "$packageName" "$packageName" "$1" "${2}${3:+ $3}" \
+ 'enabled:bool:0' \
+ 'force_dns:bool:1' \
+ 'force_dns_port:list(integer):53 853' \
+ 'parallel_downloads:bool:1' \
+ 'debug:bool:0' \
+ 'compressed_cache:bool:0' \
+ 'ipv6_enabled:bool:0' \
+ 'allow_non_ascii:bool:0' \
+ 'canary_domains_icloud:bool:0' \
+ 'canary_domains_mozilla:bool:0' \
+ 'config_update_enabled:bool:0' \
+ 'config_update_url:string:https://cdn.jsdelivr.net/gh/openwrt/packages/net/simple-adblock/files/simple-adblock.conf.update' \
+ 'boot_delay:range(0,240):120' \
+ 'download_timeout:range(1,40):20' \
+ 'curl_retry:range(1,5):3' \
+ 'verbosity:range(0,2):2' \
+ 'led:or("", "none", file, device, string)' \
+ 'dns:or("dnsmasq.addnhosts", "dnsmasq.conf", "dnsmasq.ipset", "dnsmasq.servers", "unbound.adb_list"):dnsmasq.servers' \
+ 'dns_instance:or(list(integer, string)):0' \
+ 'allowed_domain:list(string)' \
+ 'allowed_domains_url:list(string)' \
+ 'blocked_domain:list(string)' \
+ 'blocked_domains_url:list(string)' \
+ 'blocked_hosts_url:list(string)'
}
--- /dev/null
+include $(TOPDIR)/rules.mk
+
+PKG_NAME:=snowflake
+PKG_VERSION:=2.3.0
+PKG_RELEASE:=2
+
+PKG_SOURCE_PROTO:=git
+PKG_SOURCE_URL=https://git.torproject.org/pluggable-transports/snowflake.git
+PKG_SOURCE_VERSION:=v$(PKG_VERSION)
+PKG_MIRROR_HASH:=e323598c959ea336927bf8dc4a96b02e88de73cb8cf165221465ca2b76cd2c48
+
+PKG_LICENSE:=BSD-3-Clause
+PKG_LICENSE_FILES:=LICENSE
+PKG_MAINTAINER:=Daniel Golle <daniel@makrotopia.org>, Nick Hainke <vincent@systemli.org>
+
+PKG_BUILD_DEPENDS:=golang/host
+PKG_BUILD_PARALLEL:=1
+PKG_USE_MIPS16:=0
+
+GO_PKG:=git.torproject.org/pluggable-transports/snowflake.git/v2
+
+include $(INCLUDE_DIR)/package.mk
+include ../../lang/golang/golang-package.mk
+
+define Package/snowflake/Default
+ TITLE:=Snowflake
+ URL:=https://snowflake.torproject.org/
+ DEPENDS:=$(GO_ARCH_DEPENDS)
+ SECTION:=net
+ CATEGORY:=Network
+ USERID:=snowflake:snowflake
+endef
+
+define Package/snowflake-broker
+$(call Package/snowflake/Default)
+ TITLE+= Broker
+endef
+
+define Package/snowflake-client
+$(call Package/snowflake/Default)
+ TITLE+= Client
+endef
+
+define Package/snowflake-distinctcounter
+$(call Package/snowflake/Default)
+ TITLE+= Distinct Counter
+endef
+
+define Package/snowflake-probetest
+$(call Package/snowflake/Default)
+ TITLE+= Probe test
+endef
+
+define Package/snowflake-proxy
+$(call Package/snowflake/Default)
+ TITLE+= Proxy
+endef
+
+define Package/snowflake-server
+$(call Package/snowflake/Default)
+ TITLE+= Server
+endef
+
+define Package/snowflake/description/Default
+Snowflake is a system that allows people from all over the world to
+access censored websites and applications. Similar to how VPNs assist
+users in getting around Internet censorship, Snowflake helps you avoid
+being noticed by Internet censors by making your Internet activity appear
+as though you're using the Internet for a regular video or voice call.
+endef
+
+define Package/snowflake-broker/description
+$(call Package/snowflake/description/Default)
+
+This package provides the Snowflake broker service.
+endef
+
+define Package/snowflake-client/description
+$(call Package/snowflake/description/Default)
+
+This package contains the Snowflake client which provides the bridge to TOR.
+endef
+
+define Package/snowflake-distinctcounter/description
+$(call Package/snowflake/description/Default)
+
+This package provides the Snowflake distinct counter service.
+endef
+
+define Package/snowflake-probetest/description
+$(call Package/snowflake/description/Default)
+
+This package provides the Snowflake probe test.
+endef
+
+define Package/snowflake-proxy/description
+$(call Package/snowflake/description/Default)
+
+This package provides the standalone Snowflake proxy.
+endef
+
+define Package/snowflake-server/description
+$(call Package/snowflake/description/Default)
+
+This package provides the Snowflake server.
+endef
+
+define Package/snowflake-broker/install
+ $(INSTALL_DIR) $(1)/usr/bin
+ $(INSTALL_BIN) $(GO_PKG_BUILD_BIN_DIR)/broker $(1)/usr/bin/snowflake-broker
+endef
+
+define Package/snowflake-client/install
+ $(INSTALL_DIR) $(1)/usr/bin
+ $(INSTALL_BIN) $(GO_PKG_BUILD_BIN_DIR)/client $(1)/usr/bin/snowflake-client
+endef
+
+define Package/snowflake-distinctcounter/install
+ $(INSTALL_DIR) $(1)/usr/bin
+ $(INSTALL_BIN) $(GO_PKG_BUILD_BIN_DIR)/distinctcounter $(1)/usr/bin/snowflake-distinctcounter
+endef
+
+define Package/snowflake-probetest/install
+ $(INSTALL_DIR) $(1)/usr/bin
+ $(INSTALL_BIN) $(GO_PKG_BUILD_BIN_DIR)/probetest $(1)/usr/bin/snowflake-probetest
+endef
+
+define Package/snowflake-proxy/install
+ $(INSTALL_DIR) $(1)/usr/bin $(1)/etc/init.d
+ $(INSTALL_BIN) $(GO_PKG_BUILD_BIN_DIR)/proxy $(1)/usr/bin/snowflake-proxy
+ $(INSTALL_BIN) ./files/snowflake-proxy.init $(1)/etc/init.d/snowflake-proxy
+endef
+
+define Package/snowflake-server/install
+ $(INSTALL_DIR) $(1)/usr/bin
+ $(INSTALL_BIN) $(GO_PKG_BUILD_BIN_DIR)/server $(1)/usr/bin/snowflake-server
+endef
+
+$(eval $(call BuildPackage,snowflake-broker))
+$(eval $(call BuildPackage,snowflake-client))
+$(eval $(call BuildPackage,snowflake-distinctcounter))
+$(eval $(call BuildPackage,snowflake-probetest))
+$(eval $(call BuildPackage,snowflake-proxy))
+$(eval $(call BuildPackage,snowflake-server))
--- /dev/null
+#!/bin/sh /etc/rc.common
+
+START=99
+
+USE_PROCD=1
+NAME=snowflake-proxy
+PROG=/usr/bin/$NAME
+
+start_service() {
+ procd_open_instance
+ procd_set_param command "$PROG" -verbose
+ procd_set_param stdout 1
+ procd_set_param stderr 1
+ procd_set_param user snowflake
+ procd_set_param group snowflake
+ procd_set_param respawn
+ [ -x /sbin/ujail ] && {
+ procd_add_jail snowflake-proxy ronly
+ procd_add_jail_mount /etc/ssl/certs
+ procd_set_param no_new_privs 1
+ }
+ procd_close_instance
+}
PKG_NAME:=strongswan
PKG_VERSION:=5.9.7
-PKG_RELEASE:=$(AUTORELEASE)
+PKG_RELEASE:=$(AUTORELEASE).1
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.bz2
PKG_SOURCE_URL:=https://download.strongswan.org/ https://download2.strongswan.org/
include $(TOPDIR)/rules.mk
PKG_NAME:=tor
-PKG_VERSION:=0.4.7.8
+PKG_VERSION:=0.4.7.10
PKG_RELEASE:=1
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
PKG_SOURCE_URL:=https://dist.torproject.org/ \
https://archive.torproject.org/tor-package-archive
-PKG_HASH:=9e9a5c67ad2acdd5f0f8be14ed591fed076b1708abf8344066990a0fa66fe195
+PKG_HASH:=647e56dfa59ea36dab052027fcfc7663905c826c03509363c456900ecd435a5b
PKG_MAINTAINER:=Hauke Mehrtens <hauke@hauke-m.de> \
Peter Wagner <tripolar@gmx.at>
PKG_LICENSE_FILES:=LICENSE
include $(TOPDIR)/rules.mk
PKG_NAME:=unbound
-PKG_VERSION:=1.16.2
+PKG_VERSION:=1.16.3
PKG_RELEASE:=$(AUTORELEASE)
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
PKG_SOURCE_URL:=https://nlnetlabs.nl/downloads/unbound
-PKG_HASH:=2e32f283820c24c51ca1dd8afecfdb747c7385a137abe865c99db4b257403581
+PKG_HASH:=ea0c6665e2c3325b769eac1dfccd60fe1828d5fcf662650039eccb3f67edb28e
PKG_MAINTAINER:=Eric Luehrsen <ericluehrsen@gmail.com>
PKG_LICENSE:=BSD-3-Clause
target. Use "uname" on host only if "UNAME" variable is empty.
--- a/configure.ac
+++ b/configure.ac
-@@ -814,7 +814,7 @@ if test x_$ub_test_python != x_no; then
+@@ -815,7 +815,7 @@ if test x_$ub_test_python != x_no; then
fi
fi
include $(INCLUDE_DIR)/package.mk
-GEOIP_VER:=202208250104
+GEOIP_VER:=202209170841
GEOIP_FILE:=geoip.dat.$(GEOIP_VER)
define Download/geoip
URL:=https://github.com/v2fly/geoip/releases/download/$(GEOIP_VER)/
URL_FILE:=geoip.dat
FILE:=$(GEOIP_FILE)
- HASH:=8fadefdcbb973c5294f81a2142ffcfb0d138e6f8285e643f929d2fe035096075
+ HASH:=ceb0cfdf0fab39141e807fe7bb8a0972c6b3f616abcd1097ac30c26368f368a5
endef
-GEOSITE_VER:=20220829045350
+GEOSITE_VER:=20220918140014
GEOSITE_FILE:=dlc.dat.$(GEOSITE_VER)
define Download/geosite
URL:=https://github.com/v2fly/domain-list-community/releases/download/$(GEOSITE_VER)/
URL_FILE:=dlc.dat
FILE:=$(GEOSITE_FILE)
- HASH:=107a52601a94baf02fe0d877f0a0f469606c87b9a0df2b7569630004dcb8f86e
+ HASH:=8a69b68f02d422ab05f351772c871f367bf387fa78dc37c4f8c1e421a13540a6
endef
define Package/v2ray-geodata/template
PKG_NAME:=vpnc-scripts
PKG_VERSION:=20151220
-PKG_RELEASE:=2
+PKG_RELEASE:=3
include $(INCLUDE_DIR)/package.mk
}
do_disconnect() {
- rm -f "/tmp/dnsmasq.d/openconnect.$TUNDEV"
+ if [ -f "/tmp/dnsmasq.d/openconnect.$TUNDEV" ]; then
+ rm -f "/tmp/dnsmasq.d/openconnect.$TUNDEV"
+ /etc/init.d/dnsmasq restart
+ fi
+
proto_init_update "$TUNDEV" 0
proto_send_update "$INTERFACE"
}
include $(TOPDIR)/rules.mk
PKG_NAME:=wifi-presence
-PKG_VERSION:=0.1.2
-PKG_RELEASE:=2
+PKG_VERSION:=0.2.0
+PKG_RELEASE:=1
PKG_SOURCE:=-$(PKG_NAME)-$(PKG_VERSION).tar.gz
PKG_SOURCE_URL:=https://codeload.github.com/awilliams/wifi-presence/tar.gz/v$(PKG_VERSION)?
-PKG_HASH:=f5cf8bf36e3c17ad4b0486007532030760c22c5f63bd63707d911ab94bd744df
+PKG_HASH:=d3b4f2e33ba423e353ad17a000f67690c7c84b136726e683a9cb24be53889407
PKG_LICENSE:=MIT
PKG_MAINTAINER:=Adam Williams <pwnfactory@gmail.com>
PKG_NAME:=wifidog
PKG_VERSION:=1.3.0
-PKG_RELEASE:=7
+PKG_RELEASE:=8
PKG_SOURCE_PROTO:=git
PKG_SOURCE_URL:=https://github.com/wifidog/wifidog-gateway
include $(TOPDIR)/rules.mk
PKG_NAME:=xray-core
-PKG_VERSION:=1.5.10
+PKG_VERSION:=1.6.0
PKG_RELEASE:=$(AUTORELEASE)
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
PKG_SOURCE_URL:=https://codeload.github.com/XTLS/Xray-core/tar.gz/v$(PKG_VERSION)?
-PKG_HASH:=0cce205187a38d7e13dc4e503e9a8667c9cf438844e091bd91989aaac8f2c411
+PKG_HASH:=b65375090a2d48d358a582837d485bfaa9572e4d1f5a649895b9fd83d0f69e43
PKG_MAINTAINER:=Tianling Shen <cnsztl@immortalwrt.org>
PKG_LICENSE:=MPL-2.0
PKG_NAME:=bandwidthd
PKG_VERSION:=2.0.1-35
-PKG_RELEASE:=5
+PKG_RELEASE:=6
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
PKG_SOURCE_URL:=https://codeload.github.com/NethServer/bandwidthd/tar.gz/$(PKG_VERSION)?
--- /dev/null
+--- a/bandwidthd.c
++++ b/bandwidthd.c
+@@ -893,7 +893,7 @@ void StoreIPDataInCDF(struct IPData IncD
+ {
+ IPData = &IncData[counter];
+ HostIp2CharIp(IPData->ip, IPBuffer);
+- fprintf(cdf, "%s,%lu,", IPBuffer, IPData->timestamp);
++ fprintf(cdf, "%s,%" PRId64 ",", IPBuffer, IPData->timestamp);
+ Stats = &(IPData->Send);
+ fprintf(cdf, "%llu,%llu,%llu,%llu,%llu,%llu,%llu,", Stats->total, Stats->icmp, Stats->udp, Stats->tcp, Stats->ftp, Stats->http, Stats->p2p);
+ Stats = &(IPData->Receive);
+@@ -1089,7 +1089,7 @@ int RCDF_Test(char *filename)
+ if (fseek(cdf, -2, SEEK_CUR) == -1)
+ break;
+ }
+- if(fscanf(cdf, " %15[0-9.],%lu,", ipaddrBuffer, ×tamp) != 2)
++ if(fscanf(cdf, " %15[0-9.],%" PRId64 ",", ipaddrBuffer, ×tamp) != 2)
+ {
+ syslog(LOG_ERR, "%s is corrupted, skipping", filename);
+ return FALSE;
+@@ -1122,7 +1122,7 @@ void RCDF_PositionStream(FILE *cdf)
+ }
+ while (fgetc(cdf) != '\n' && !feof(cdf)); // Read to next line
+ ungetc('\n', cdf); // Just so the fscanf mask stays identical
+- if(fscanf(cdf, " %15[0-9.],%lu,", ipaddrBuffer, ×tamp) != 2)
++ if(fscanf(cdf, " %15[0-9.],%" PRId64 ",", ipaddrBuffer, ×tamp) != 2)
+ {
+ syslog(LOG_ERR, "Unknown error while scanning for beginning of data...\n");
+ return;
+@@ -1144,7 +1144,7 @@ void RCDF_Load(FILE *cdf)
+
+ for(Counter = 0; !feof(cdf) && !ferror(cdf); Counter++)
+ {
+- if(fscanf(cdf, " %15[0-9.],%lu,", ipaddrBuffer, ×tamp) != 2)
++ if(fscanf(cdf, " %15[0-9.],%" PRId64 ",", ipaddrBuffer, ×tamp) != 2)
+ goto End_RecoverDataFromCdf;
+
+ if (!timestamp) // First run through loop
$(eval $(call BuildPlugin,write-http,HTTP POST output,write_http,+PACKAGE_collectd-mod-write-http:libcurl))
$(eval $(call BuildScriptPlugin,sqm,SQM/qdisc collection,sqm_collectd,+PACKAGE_collectd-mod-sqm:collectd-mod-exec))
-$(eval $(call BuildScriptLuaPlugin,ltq-dsl,Lantiq DSL collection,dsl,@TARGET_lantiq_xrx200 +PACKAGE_collectd-mod-ltq-dsl:collectd-mod-lua +libubus-lua))
+$(eval $(call BuildScriptLuaPlugin,ltq-dsl,Lantiq DSL collection,dsl,ltq-dsl-app +PACKAGE_collectd-mod-ltq-dsl:collectd-mod-lua +libubus-lua))
printf "</Plugin>\n\n" >> "$COLLECTD_CONF"
}
+process_network_server() {
+ local cfg="$1"
+ local SecurityLevel="$2"
+
+ local Username Password ResolveInterval
+
+ config_get Username "$cfg" Username
+ [ -z "$Username" ] && {
+ $LOG notice "SecurityLevel set to '$SecurityLevel' but no option Username found in config '$cfg'"
+ return 1
+ }
+ printf "\\t\\tUsername \"%s\"\n" "${Username}" >> "$COLLECTD_CONF"
+
+ config_get Password "$cfg" Password
+ [ -z "$Password" ] && {
+ $LOG notice "SecurityLevel set to '$SecurityLevel' but no option Password found in config '$cfg'"
+ return 2
+ }
+ printf "\\t\\tPassword \"%s\"\n" "${Password}" >> "$COLLECTD_CONF"
+
+ config_get ResolveInterval "$cfg" ResolveInterval
+ [ -z "$ResolveInterval" ] || {
+ printf "\\t\\tResolveInterval \"%s\"\n" "${ResolveInterval}" >> "$COLLECTD_CONF"
+ }
+}
+
+process_network_listen() {
+ local cfg="$1"
+
+ local auth_file="/tmp/collectd-auth-${cfg}.conf"
+ local auth_set
+
+ rm -rf "${auth_file}"
+ add_auth() {
+ echo "$1" >> "${auth_file}"
+ auth_set=1
+ }
+ config_list_foreach "$cfg" auth add_auth
+
+ [ -z "$auth_set" ] && {
+ $LOG notice "SecurityLevel set to '$SecurityLevel' but no list option auth found in config '$cfg'"
+ return 1
+ }
+
+ printf "\\t\\tAuthFile \"%s\"\n" "${auth_file}" >> "$COLLECTD_CONF"
+}
+
process_network_sections() {
local cfg="$1"
local section="$2"
- local host port output
+ local host port output rvalue SecurityLevel Interface
config_get host "$cfg" host
[ -z "$host" ] && {
config_get port "$cfg" port
if [ -z "$port" ]; then
- printf "\\t%s\n" "${output}" >> "$COLLECTD_CONF"
+ printf "\\t<%s>\n" "${output}" >> "$COLLECTD_CONF"
+ else
+ printf "\\t<%s \"%s\">\n" "${output}" "${port}" >> "$COLLECTD_CONF"
+ fi
+
+ config_get SecurityLevel "$cfg" SecurityLevel 'None'
+ [ -z "$SecurityLevel" ] || {
+ printf "\\t\\tSecurityLevel \"%s\"\n" "${SecurityLevel}" >> "$COLLECTD_CONF"
+ }
+
+ if [ "$SecurityLevel" != "None" ]; then
+ case "$section" in
+ server)
+ process_network_server "$cfg" "$SecurityLevel"
+ rvalue="$?"
+ [ "$rvalue" != 0 ] && return 0
+ ;;
+ listen)
+ process_network_listen "$cfg" "$SecurityLevel"
+ rvalue="$?"
+ [ "$rvalue" != 0 ] && return 0
+ ;;
+ esac
+ else
+ $LOG notice "SecurityLevel set to 'None' for '$cfg'"
+ fi
+
+ config_get Interface "$cfg" Interface
+ [ -z "$Interface" ] || {
+ printf "\\t\\tInterface \"%s\"\n" "${Interface}" >> "$COLLECTD_CONF"
+ }
+
+ if [ "$section" = "server" ]; then
+ printf "\\t</Server>\n" >> "$COLLECTD_CONF"
else
- printf "\\t%s \"%s\"\n" "${output}" "${port}" >> "$COLLECTD_CONF"
+ printf "\\t</Listen>\n" >> "$COLLECTD_CONF"
fi
}
include $(TOPDIR)/rules.mk
PKG_NAME:=containerd
-PKG_VERSION:=1.6.6
-PKG_RELEASE:=1
+PKG_VERSION:=1.6.8
+PKG_RELEASE:=$(AUTORELEASE)
PKG_LICENSE:=Apache-2.0
PKG_LICENSE_FILES:=LICENSE
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
PKG_SOURCE_URL:=https://codeload.github.com/containerd/containerd/tar.gz/v${PKG_VERSION}?
-PKG_HASH:=27afb673c20d53aa5c31aec07b38eb7e4dc911e7e1f0c76fac9513bbf070bd24
+PKG_HASH:=f5f938513c28377f64f85e84f2750d39f26b01262f3a062b7e8ce35b560ca407
PKG_MAINTAINER:=Gerard Ryan <G.M0N3Y.2503@gmail.com>
include $(TOPDIR)/rules.mk
PKG_NAME:=compose
-PKG_VERSION:=2.11.0
+PKG_VERSION:=2.11.2
PKG_RELEASE:=$(AUTORELEASE)
PKG_LICENSE:=Apache-2.0
PKG_LICENSE_FILES:=LICENSE
PKG_SOURCE:=v$(PKG_VERSION).tar.gz
PKG_SOURCE_URL:=https://codeload.github.com/docker/compose/tar.gz/v${PKG_VERSION}?
-PKG_HASH:=fe01c85ad31767c22d5f6ea24e6809f5b675c9253d534138ee4095428dfcb96a
+PKG_HASH:=592e712f568938046602c0d4c225bc3c333e2b77574634fa0f39a8c066d04561
PKG_MAINTAINER:=Javier Marcet <javier@marcet.info>
include $(TOPDIR)/rules.mk
PKG_NAME:=docker
-PKG_VERSION:=20.10.17
+PKG_VERSION:=20.10.18
PKG_RELEASE:=$(AUTORELEASE)
PKG_LICENSE:=Apache-2.0
PKG_LICENSE_FILES:=LICENSE
PKG_GIT_URL:=github.com/docker/cli
PKG_GIT_REF:=v$(PKG_VERSION)
PKG_SOURCE_URL:=https://codeload.$(PKG_GIT_URL)/tar.gz/$(PKG_GIT_REF)?
-PKG_HASH:=ab2b59c2302017fea9ad2f70827e8a6f0204b557ce28e66bcb80fea262c9fbdc
-PKG_GIT_SHORT_COMMIT:=100c701 # SHA1 used within the docker executables
+PKG_HASH:=87ba64f76d3bb98666fa44552c3eb0b1c10e5e9c43010e8dbc180ba22690413a
+PKG_GIT_SHORT_COMMIT:=b40c2f6 # SHA1 used within the docker executables
PKG_MAINTAINER:=Gerard Ryan <G.M0N3Y.2503@gmail.com>
include $(TOPDIR)/rules.mk
PKG_NAME:=dockerd
-PKG_VERSION:=20.10.17
+PKG_VERSION:=20.10.18
PKG_RELEASE:=$(AUTORELEASE)
PKG_LICENSE:=Apache-2.0
PKG_LICENSE_FILES:=LICENSE
PKG_GIT_URL:=github.com/moby/moby
PKG_GIT_REF:=v$(PKG_VERSION)
PKG_SOURCE_URL:=https://codeload.$(PKG_GIT_URL)/tar.gz/$(PKG_GIT_REF)?
-PKG_HASH:=061cf8579aa3c813c353c80fa480744e2f6cca2e6392f546bd0942a6a10c7a14
-PKG_GIT_SHORT_COMMIT:=a89b842 # SHA1 used within the docker executables
+PKG_HASH:=9907aaaf39fb1c2c3fd427192e4a63d7adf8ddc9fb0e29c692a6ca10de9c34f6
+PKG_GIT_SHORT_COMMIT:=e42327a # SHA1 used within the docker executables
PKG_MAINTAINER:=Gerard Ryan <G.M0N3Y.2503@gmail.com>
include $(TOPDIR)/rules.mk
PKG_NAME:=grep
-PKG_VERSION:=3.7
+PKG_VERSION:=3.8
PKG_RELEASE:=2
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.xz
PKG_SOURCE_URL:=@GNU/grep
-PKG_HASH:=5c10da312460aec721984d5d83246d24520ec438dd48d7ab5a05dbc0d6d6823c
+PKG_HASH:=498d7cc1b4fb081904d87343febb73475cf771e424fb7e6141aff66013abc382
PKG_MAINTAINER:=Julen Landa Alustiza <julen@zokormazo.info>
PKG_LICENSE:=GPL-3.0-or-later
SECTION:=utils
CATEGORY:=Utilities
TITLE:=grep search utility - full version
- DEPENDS:=+libpcre
+ DEPENDS:=+libpcre2
URL:=https://www.gnu.org/software/grep/
ALTERNATIVES:=\
300:/bin/egrep:/usr/libexec/egrep-gnu \
https://github.com/void-linux/void-packages/commit/0d9556b8593d6e67027fb1c83d176b7f899547e5
--- a/lib/sigsegv.c
+++ b/lib/sigsegv.c
-@@ -221,8 +221,10 @@ int libsigsegv_version = LIBSIGSEGV_VERS
- /* both should be equivalent */
- # if 0
- # define SIGSEGV_FAULT_STACKPOINTER ((ucontext_t *) ucp)->uc_mcontext.regs->gpr[1]
--# else
-+# elif defined(__GLIBC__)
- # define SIGSEGV_FAULT_STACKPOINTER ((ucontext_t *) ucp)->uc_mcontext.uc_regs->gregs[1]
-+# else
+@@ -246,8 +246,10 @@ int libsigsegv_version = LIBSIGSEGV_VERS
+ /* Because of the union, both definitions should be equivalent. */
+ # if 0
+ # define SIGSEGV_FAULT_STACKPOINTER ((ucontext_t *) ucp)->uc_mcontext.regs->gpr[1]
+-# else
++# elif defined(__GLIBC__)
+ # define SIGSEGV_FAULT_STACKPOINTER ((ucontext_t *) ucp)->uc_mcontext.uc_regs->gregs[1]
++# else
+# define SIGSEGV_FAULT_STACKPOINTER ((ucontext_t *) ucp)->uc_mcontext.gregs[1]
+ # endif
# endif
# endif
-
include $(TOPDIR)/rules.mk
PKG_NAME:=libnetwork
-PKG_RELEASE:=1
+PKG_RELEASE:=$(AUTORELEASE)
PKG_LICENSE:=Apache-2.0
PKG_LICENSE_FILES:=LICENSE
PKG_SOURCE_PROTO:=git
PKG_SOURCE_URL:=https://$(GO_PKG)
-PKG_SOURCE_VERSION:=f6ccccb1c082a432c2a5814aaedaca56af33d9ea
-PKG_SOURCE_DATE:=2022-06-02
-PKG_MIRROR_HASH:=92a23ff620c3cf3b05bc01a06721a9f78dfb6bc5b048dbcf70ea9542cec8e7a2
+PKG_SOURCE_VERSION:=0dde5c895075df6e3630e76f750a447cf63f4789
+PKG_SOURCE_DATE:=2022-07-16
+PKG_MIRROR_HASH:=ff5c7f30777c5fec50a59b79ff0e056fd09e3e55f3475816a21376bf72812d42
PKG_MAINTAINER:=Gerard Ryan <G.M0N3Y.2503@gmail.com>
PKG_SOURCE_PROTO:=git
PKG_SOURCE_URL:=https://git.kernel.org/pub/scm/utils/mmc/mmc-utils.git
-PKG_SOURCE_DATE:=2022-04-17
-PKG_SOURCE_VERSION:=b7e4d5a6ae9942d26a11de9b05ae7d52c0802802
-PKG_MIRROR_HASH:=7865294ec7619d6696bb06a6e2ad4a3803a0bfbd9754b7d0d617bfb30ab828a1
+PKG_SOURCE_DATE:=2022-09-20
+PKG_SOURCE_VERSION:=2a934139cfeef1249e46f50fbec548f11c2dd8ca
+PKG_MIRROR_HASH:=3ddf26ca9ddf5bb02a749e7dbc8455a924a0340e3fa7dd4e17fac0c4ff1d5883
PKG_LICENSE:=GPL-2.0-only
PKG_LICENSE_FILES:=
define Package/open-vm-tools
$(call Package/open-vm-tools/Default)
- DEPENDS:=@TARGET_x86 +glib2 +libpthread +libtirpc
+ DEPENDS:=@TARGET_x86 \
+ +glib2 \
+ +libpthread \
+ +libtirpc \
+ +libmspack \
+ +libdnet
TITLE:=open-vm-tools
URL:=https://github.com/vmware/open-vm-tools
MAINTAINER:=Yuhei OKAWA <tochiro.srchack@gmail.com>
--without-pam \
--disable-grabbitmqproxy \
--disable-vgauth \
- --disable-deploypkg \
--without-root-privileges \
--without-kernel-modules \
- --without-dnet \
--with-tirpc \
--without-x \
--without-gtk2 \
$(INSTALL_DIR) $(1)/sbin/
$(INSTALL_BIN) ./files/shutdown $(1)/sbin/
+ $(INSTALL_BIN) ./files/telinit $(1)/sbin/
$(INSTALL_DIR) $(1)/lib/
$(CP) $(PKG_INSTALL_DIR)/usr/lib/libguestlib.so* $(1)/lib/
$(CP) $(PKG_INSTALL_DIR)/usr/lib/libhgfs.so* $(1)/lib/
$(CP) $(PKG_INSTALL_DIR)/usr/lib/libvmtools.so* $(1)/lib/
+ $(CP) $(PKG_INSTALL_DIR)/usr/lib/libDeployPkg.so* $(1)/lib/
$(INSTALL_DIR) $(1)/usr/lib/open-vm-tools/plugins/common/
$(INSTALL_DATA) $(PKG_INSTALL_DIR)/usr/lib/open-vm-tools/plugins/common/libhgfsServer.so $(1)/usr/lib/open-vm-tools/plugins/common/
$(INSTALL_DATA) $(PKG_INSTALL_DIR)/usr/lib/open-vm-tools/plugins/vmsvc/libpowerOps.so $(1)/usr/lib/open-vm-tools/plugins/vmsvc/
$(INSTALL_DATA) $(PKG_INSTALL_DIR)/usr/lib/open-vm-tools/plugins/vmsvc/libtimeSync.so $(1)/usr/lib/open-vm-tools/plugins/vmsvc/
$(INSTALL_DATA) $(PKG_INSTALL_DIR)/usr/lib/open-vm-tools/plugins/vmsvc/libvmbackup.so $(1)/usr/lib/open-vm-tools/plugins/vmsvc/
+ $(INSTALL_DATA) $(PKG_INSTALL_DIR)/usr/lib/open-vm-tools/plugins/vmsvc/libdeployPkgPlugin.so $(1)/usr/lib/open-vm-tools/plugins/vmsvc/
$(INSTALL_DIR) $(1)/etc/hotplug.d/block/
$(INSTALL_BIN) ./files/vmware-scsi.hotplug $(1)/etc/hotplug.d/block/80-vmware-scsi
--- /dev/null
+#!/bin/sh
+#compatibility script for traditional customization
+
+/sbin/reboot
[guestinfo]
disable-perf-mon=1
+
+[logging]
+log=true
+vmtoolsd.level=debug
+vmsvc.level=debug
+vmusr.level=debug
+toolboxcmd.level=debug
of sudo with a fraction of the codebase.
endef
+CONFIGURE_ARGS += $(if $(CONFIG_BUSYBOX_CONFIG_PAM),--with,--without)-pam
+
define Package/opendoas/install
$(INSTALL_DIR) $(1)/usr/bin
$(CP) $(PKG_INSTALL_DIR)/usr/bin/doas $(1)/usr/bin/
include $(TOPDIR)/rules.mk
PKG_NAME:=poemgr
-PKG_SOURCE_DATE:=2022-03-19
-PKG_SOURCE_VERSION:=77395242f9b3cb6ef003c5ed74f9b5be78404450
+PKG_SOURCE_DATE:=2022-09-20
+PKG_SOURCE_VERSION:=8988247aacdf18013daa7d7931754e338e08b1bd
PKG_RELEASE:=2
PKG_SOURCE_PROTO:=git
PKG_SOURCE_URL=https://github.com/blocktrron/poemgr.git
-PKG_MIRROR_HASH:=68034f886af8a4ebfd869af940510f755d660362a6069ac49c1ae6ce16cbd950
+PKG_MIRROR_HASH:=39b355954931299ba5a481d23af7144dad7349354df1b3303836b77fd1fdfbb6
PKG_MAINTAINER:=David Bauer <mail@david-bauer.net>
PKG_LICENSE:=GPL-2.0-only
define Package/prometheus-node-exporter-lua-ltq-dsl
$(call Package/prometheus-node-exporter-lua/Default)
TITLE+= (lantiq dsl collector)
- DEPENDS:=prometheus-node-exporter-lua @(PACKAGE_ltq-adsl-app||PACKAGE_ltq-vdsl-app)
+ DEPENDS:=prometheus-node-exporter-lua ltq-dsl-app
endef
define Package/prometheus-node-exporter-lua-ltq-dsl/install
PKG_SOURCE_PROTO:=git
PKG_SOURCE_URL:=https://github.com/Hurricos/realtek-poe.git
-PKG_SOURCE_VERSION:=382c60e7549948399b4244f7149cf76eaf5f0a7d
-PKG_MIRROR_HASH:=442a01521c9a056e137e010be492ed5be0a39c3b68a2fa0ca0f0dbb765d9c74c
+PKG_SOURCE_VERSION:=39c93d39dd10da77b4fe48bc1d6bdd3c5978f866
+PKG_MIRROR_HASH:=ad9652dda8d77281e4724e0104552e18e521cedd4e24f56b0483cf6c5ee5ff69
CMAKE_SOURCE_SUBDIR:=src
include $(INCLUDE_DIR)/package.mk
PKG_NAME:=rtty
PKG_VERSION:=8.0.1
-PKG_RELEASE:=1
+PKG_RELEASE:=2
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
PKG_SOURCE_URL=https://github.com/zhaojh329/rtty/releases/download/v$(PKG_VERSION)
include $(TOPDIR)/rules.mk
PKG_NAME:=runc
-PKG_VERSION:=1.1.2
-PKG_RELEASE:=1
+PKG_VERSION:=1.1.4
+PKG_RELEASE:=$(AUTORELEASE)
PKG_LICENSE:=Apache-2.0
PKG_LICENSE_FILES:=LICENSE
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
PKG_SOURCE_URL:=https://codeload.github.com/opencontainers/runc/tar.gz/v${PKG_VERSION}?
-PKG_HASH:=0ccce82b1d9c058d8fd7443d261c96fd7a803f2775bcb1fec2bdb725bc7640f6
+PKG_HASH:=4f02077432642eebd768fc857318ae7929290b3a3511eb1be338005e360cfa34
PKG_MAINTAINER:=Gerard Ryan <G.M0N3Y.2503@gmail.com>
wait = no
user = root
server = /usr/libexec/tangdw
- server_args = /usr/share/tang/cache
+ server_args = /usr/share/tang/db
log_on_success += USERID
log_on_failure += USERID
disable = no
PKG_NAME:=telldus-core
PKG_VERSION:=2.1.2
-PKG_RELEASE:=7
+PKG_RELEASE:=8
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
PKG_SOURCE_URL:=http://download.telldus.com/TellStick/Software/telldus-core/
Added missing includes required by openwrt. Expected to be portable.
--- a/common/Socket_unix.cpp
+++ b/common/Socket_unix.cpp
-@@ -8,6 +8,7 @@
+@@ -8,9 +8,11 @@
#include <stdio.h>
#include <unistd.h>
#include <sys/socket.h>
#include <sys/un.h>
#include <fcntl.h>
#include <math.h>
++#include <cstring>
+ #include <string>
+
+ #include "common/Socket.h"
--- a/service/ConnectionListener_unix.cpp
+++ b/service/ConnectionListener_unix.cpp
@@ -13,6 +13,7 @@
include $(TOPDIR)/rules.mk
PKG_NAME:=vim
-PKG_VERSION:=8.2
-PKG_RELEASE:=5
-VIMVER:=82
+PKG_VERSION:=9.0
+PKG_RELEASE:=1
+VIMVER:=90
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.bz2
PKG_SOURCE_URL:=http://ftp.vim.org/pub/vim/unix
-PKG_HASH:=f087f821831b4fece16a0461d574ccd55a8279f64d635510a1e10225966ced3b
+PKG_HASH:=a6456bc154999d83d0c20d968ac7ba6e7df0d02f3cb6427fb248660bacfb336e
PKG_MAINTAINER:=Marko Ratkaj <markoratkaj@gmail.com>
PKG_CPE_ID:=cpe:/a:vim:vim
--disable-cscope \
--disable-gpm \
--disable-acl \
+ --disable-libsodium \
--disable-selinux \
--with-tlib=ncurses \
--with-compiledby="non-existent-hostname-compiled"
+++ /dev/null
-From: James McCoy <jamessan@jamessan.com>
-Date: Thu, 28 Jan 2016 10:55:11 -0500
-Subject: Support defining compilation date in $SOURCE_DATE_EPOCH
-
-There is an ongoing effort[0] to make FOSS software reproducibly
-buildable. In order to make Vim build reproducibly, it is necessary to
-allow defining the date/time that is part of VIM_VERSION_LONG as part of
-the build process.
-
-This commit enables that by adding support for the SOURCE_DATE_EPOCH
-spec[1]. When the $SOURCE_DATE_EPOCH environment variable is defined,
-it will be used to populate the BUILD_DATE preprocessor define.
-
-If BUILD_DATE is not defined, the existing behavior of relying on the
-preprocessor's __DATE__/__TIME__ symbols will be used.
-
-[0]: https://reproducible-builds.org/
-[1]: https://reproducible-builds.org/specs/source-date-epoch/
----
- src/config.h.in | 3 +++
- src/configure.ac | 10 ++++++++++
- src/version.c | 8 ++++++++
- 3 files changed, 21 insertions(+)
-
---- a/src/config.h.in
-+++ b/src/config.h.in
-@@ -30,6 +30,9 @@
- /* Define when __DATE__ " " __TIME__ can be used */
- #undef HAVE_DATE_TIME
-
-+/* Defined as the date of last modification */
-+#undef BUILD_DATE
-+
- /* Define when __attribute__((unused)) can be used */
- #undef HAVE_ATTRIBUTE_UNUSED
-
---- a/src/configure.ac
-+++ b/src/configure.ac
-@@ -62,6 +62,16 @@ if test x"$ac_cv_prog_cc_c99" != xno; th
- fi
- fi
-
-+dnl If $SOURCE_DATE_EPOCH is present in the environment, use that as the
-+dnl "compiled" timestamp in :version's output. Attempt to get the formatted
-+dnl date using GNU date syntax, BSD date syntax, and finally falling back to
-+dnl just using the current time.
-+if test -n "$SOURCE_DATE_EPOCH"; then
-+ DATE_FMT="%b %d %Y %H:%M:%S"
-+ BUILD_DATE=$(LC_ALL=C date -u -d "@$SOURCE_DATE_EPOCH" "+$DATE_FMT" 2>/dev/null || LC_ALL=C date -u -r "$SOURCE_DATE_EPOCH" "+$DATE_FMT" 2>/dev/null || LC_ALL=C date -u "+$DATE_FMT")
-+ AC_DEFINE_UNQUOTED(BUILD_DATE, ["$BUILD_DATE"])
-+fi
-+
- dnl Check for the flag that fails if stuff are missing.
-
- AC_MSG_CHECKING(--enable-fail-if-missing argument)
---- a/src/version.c
-+++ b/src/version.c
-@@ -44,9 +44,13 @@ init_longVersion(void)
- * VAX C can't concatenate strings in the preprocessor.
- */
- strcpy(longVersion, VIM_VERSION_LONG_DATE);
-+#ifdef BUILD_DATE
-+ strcat(longVersion, BUILD_DATE);
-+#else
- strcat(longVersion, __DATE__);
- strcat(longVersion, " ");
- strcat(longVersion, __TIME__);
-+#endif
- strcat(longVersion, ")");
- }
-
-@@ -54,7 +58,11 @@ init_longVersion(void)
- void
- init_longVersion(void)
- {
-+#ifdef BUILD_DATE
-+ char *date_time = BUILD_DATE;
-+#else
- char *date_time = __DATE__ " " __TIME__;
-+#endif
- char *msg = _("%s (%s, compiled %s)");
- size_t len = strlen(msg)
- + strlen(VIM_VERSION_LONG_ONLY)
--- a/runtime/doc/Makefile
+++ b/runtime/doc/Makefile
-@@ -323,10 +323,6 @@ all: tags vim.man evim.man vimdiff.man v
+@@ -345,7 +345,6 @@ all: tags vim.man evim.man vimdiff.man v
# Use Vim to generate the tags file. Can only be used when Vim has been
# compiled and installed. Supports multiple languages.
vimtags: $(DOCS)
-- @if command -v $(VIMEXE); then \
-- $(VIMEXE) --clean -eX -u doctags.vim >/dev/null && \
-- echo "help tags updated"; \
-- else echo "vim executable $(VIMEXE) not found; help tags not updated"; fi
+- @$(VIMEXE) --clean -esX -V1 -u doctags.vim
# Use "doctags" to generate the tags file. Only works for English!
tags: doctags $(DOCS)
+++ /dev/null
-From 5289783e0b07cfc3f92ee933261ca4c4acdca007 Mon Sep 17 00:00:00 2001
-From: Bram Moolenaar <Bram@vim.org>
-Date: Thu, 2 Jul 2020 22:50:37 +0200
-Subject: [PATCH] patch 8.2.1119: configure fails with Xcode 12 beta
-
-Problem: Configure fails with Xcode 12 beta.
-Solution: use "return" instead of "exit()". (Nico Weber, closes #6381)
----
- src/auto/configure | 4 ++--
- src/configure.ac | 4 ++--
- src/version.c | 2 ++
- 3 files changed, 6 insertions(+), 4 deletions(-)
-
---- a/src/auto/configure
-+++ b/src/auto/configure
-@@ -14005,8 +14005,8 @@ else
- main() {
- uint32_t nr1 = (uint32_t)-1;
- uint32_t nr2 = (uint32_t)0xffffffffUL;
-- if (sizeof(uint32_t) != 4 || nr1 != 0xffffffffUL || nr2 + 1 != 0) exit(1);
-- exit(0);
-+ if (sizeof(uint32_t) != 4 || nr1 != 0xffffffffUL || nr2 + 1 != 0) return 1;
-+ return 0;
- }
- _ACEOF
- if ac_fn_c_try_run "$LINENO"; then :
---- a/src/configure.ac
-+++ b/src/configure.ac
-@@ -4129,8 +4129,8 @@ AC_TRY_RUN([
- main() {
- uint32_t nr1 = (uint32_t)-1;
- uint32_t nr2 = (uint32_t)0xffffffffUL;
-- if (sizeof(uint32_t) != 4 || nr1 != 0xffffffffUL || nr2 + 1 != 0) exit(1);
-- exit(0);
-+ if (sizeof(uint32_t) != 4 || nr1 != 0xffffffffUL || nr2 + 1 != 0) return 1;
-+ return 0;
- }],
- AC_MSG_RESULT(ok),
- AC_MSG_ERROR([WRONG! uint32_t not defined correctly.]),
include $(TOPDIR)/rules.mk
PKG_NAME:=xz
-PKG_VERSION:=5.2.5
-PKG_RELEASE:=3
+PKG_VERSION:=5.2.6
+PKG_RELEASE:=1
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.xz
PKG_SOURCE_URL:=@SF/lzmautils
-PKG_HASH:=3e1e518ffc912f86608a8cb35e4bd41ad1aec210df2a47aaa1f95e7f5576ef56
+PKG_HASH:=e076ba3439cb7cfc45b908c869f51a8c89f3c9ee9ee982fde28849c015e723a7
PKG_MAINTAINER:=
PKG_LICENSE:=Public-Domain LGPL-2.1-or-later GPL-2.0-or-later GPL-3.0-or-later
include $(TOPDIR)/rules.mk
PKG_NAME:=yq
-PKG_VERSION:=4.27.3
+PKG_VERSION:=4.27.5
PKG_RELEASE:=$(AUTORELEASE)
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
PKG_SOURCE_URL:=https://codeload.github.com/mikefarah/yq/tar.gz/v$(PKG_VERSION)?
-PKG_HASH:=21865c7db6f0aa4d019106f8b7bfceb0ca746ca0265f4d61c855edb0ed41b17d
+PKG_HASH:=0b9ed8759c53534978a661786845eb3c6ec425aee15bab4742d1bead73e28150
PKG_MAINTAINER:=Tianling Shen <cnsztl@immortalwrt.org>
PKG_LICENSE:=MIT
include $(TOPDIR)/rules.mk
PKG_NAME:=zoneinfo
-PKG_VERSION:=2022c
+PKG_VERSION:=2022d
PKG_RELEASE:=1
#As i couldn't find real license used "Public Domain"
PKG_SOURCE:=tzdata$(PKG_VERSION).tar.gz
PKG_SOURCE_CODE:=tzcode$(PKG_VERSION).tar.gz
PKG_SOURCE_URL:=http://www.iana.org/time-zones/repository/releases
-PKG_HASH:=6974f4e348bf2323274b56dff9e7500247e3159eaa4b485dfa0cd66e75c14bfe
+PKG_HASH:=6ecdbee27fa43dcfa49f3d4fd8bb1dfef54c90da1abcd82c9abcf2dc4f321de0
include $(INCLUDE_DIR)/package.mk
define Download/tzcode
FILE=$(PKG_SOURCE_CODE)
URL=$(PKG_SOURCE_URL)
- HASH:=3e7ce1f3620cc0481907c7e074d69910793285bffe0ca331ef1a6d1ae3ea90cc
+ HASH:=d644ba0f938899374ea8cb554e35fb4afa0f7bd7b716c61777cd00500b8759e0
endef
$(eval $(call Download,tzcode))