PKG_NPM_NAME:=javascript-obfuscator
PKG_NAME:=node-$(PKG_NPM_NAME)
-PKG_VERSION:=0.24.0
+PKG_VERSION:=0.27.2
PKG_RELEASE:=1
PKG_SOURCE:=$(PKG_NPM_NAME)-$(PKG_VERSION).tgz
PKG_SOURCE_URL:=https://registry.npmjs.org/$(PKG_NPM_NAME)/-/
-PKG_HASH:=2312220a65b8d03195837bb8e2555115c9c9910eb08d3077297f5b9133570c26
+PKG_HASH:=0180bff7f2bbebc353d7c117d87cd66f7b136b7ea3f4c21b0f39bff5e5c99917
PKG_BUILD_DEPENDS:=node/host
HOST_BUILD_PARALLEL:=1
PECL_NAME:=krb5
PECL_LONGNAME:=Bindings for the Kerberos library
-PKG_VERSION:=1.1.2
-PKG_RELEASE:=4
-PKG_HASH:=3301e047fc7dc3574da19b2a4b18e15feca5ad39db9335c3353a8e16b855c35b
+PKG_VERSION:=1.1.3
+PKG_RELEASE:=1
+PKG_HASH:=3a65bb5f7d64ed5f6fbcf07250c17b0de29f693ac88f96cd1cc9737b996da120
PKG_NAME:=php7-pecl-krb5
PKG_SOURCE:=$(PECL_NAME)-$(PKG_VERSION).tgz
PECL_NAME:=redis
PECL_LONGNAME:=PHP extension for interfacing with Redis
-PKG_VERSION:=5.1.1
-PKG_RELEASE:=2
-PKG_HASH:=621c9d2b4054c797b0e5d5bc5e0f1eeb49bedb37f20e46f838aa4d17d2fe8180
+PKG_VERSION:=5.2.1
+PKG_RELEASE:=1
+PKG_HASH:=c05af6254d704844303fbe8b13b2d54bdb4372b1a081934e67cac42ec79ecbbe
PKG_NAME:=php7-pecl-redis
PKG_SOURCE:=$(PECL_NAME)-$(PKG_VERSION).tgz
include $(TOPDIR)/rules.mk
PKG_NAME:=python-wcwidth
-PKG_VERSION:=0.1.8
+PKG_VERSION:=0.1.9
PKG_RELEASE:=1
PYPI_NAME:=wcwidth
-PKG_HASH:=f28b3e8a6483e5d49e7f8949ac1a78314e740333ae305b4ba5defd3e74fb37a8
+PKG_HASH:=ee73862862a156bf77ff92b09034fc4825dd3af9cf81bc5b360668d425f3c5f1
PKG_MAINTAINER:=Jan Pavlinec <jan.pavlinec@nic.cz>
PKG_LICENSE:=MIT
include $(TOPDIR)/rules.mk
PKG_NAME:=vala
-PKG_VERSION:=0.48.0
+PKG_VERSION:=0.48.2
PKG_RELEASE:=1
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.xz
PKG_SOURCE_URL:=@GNOME/vala/0.48
-PKG_HASH:=0926b29614c82a67e36e95996e905ad052f7f7b02fe855b2d17bd14e155e10cc
+PKG_HASH:=f095b0e624b8f4e5a426028ac255e477fad8c3b4c8cbbdebda8d6cd95bf79477
PKG_MAINTAINER:=
PKG_LICENSE:=LGPL-2.1-or-later
--disable-valadoc \
--without-cgraph
+HOST_LDFLAGS += \
+ -Wl,--rpath-link=$(STAGING_DIR_HOSTPKG)/lib
+
$(eval $(call HostBuild))
$(eval $(call BuildPackage,vala))
- log
- math
- program_options
- - python
- python3
- random
- regex
See more at http://www.boost.org/doc/libs/1_72_0/
endef
-PKG_BUILD_DEPENDS:=boost/host PACKAGE_python:python PACKAGE_python3:python3
-
-include ../../lang/python/python-version.mk
-BOOST_PYTHON_VER=$(PYTHON_VERSION)
+PKG_BUILD_DEPENDS:=boost/host PACKAGE_python3:python3
include ../../lang/python/python3-version.mk
BOOST_PYTHON3_VER=$(PYTHON3_VERSION)
$(foreach lib,$(BOOST_LIBS), \
config PACKAGE_boost-$(lib)
- prompt "Boost $(lib) $(if $(findstring python,$(lib)),$(paren_left)v$(if $(findstring 3,$(lib)),$(BOOST_PYTHON3_VER),$(BOOST_PYTHON_VER))$(paren_right) ,)library."
+ prompt "Boost $(lib) $(if $(findstring python3,$(lib)),$(paren_left)v$(BOOST_PYTHON3_VER)$(paren_right) ,)library."
default m if ALL
$(if $(findstring locale,$(lib)),depends on BUILD_NLS,)\
- $(if $(findstring python,$(lib)),depends on PACKAGE_$(lib),)\
+ $(if $(findstring python3,$(lib)),depends on PACKAGE_python3,)\
$(if $(findstring fiber,$(lib)),depends on !boost-fiber-exclude,)\
$(if $(findstring context,$(lib)),depends on !boost-context-exclude,)
$(if $(findstring coroutine,$(lib)),depends on !boost-coroutine-exclude,)
$(eval $(call DefineBoostLibrary,math))
#$(eval $(call DefineBoostLibrary,mpi,,)) # OpenMPI does no exist in OpenWRT at this time.
$(eval $(call DefineBoostLibrary,program_options))
-$(eval $(call DefineBoostLibrary,python,,,PACKAGE_python))
$(eval $(call DefineBoostLibrary,python3,,,PACKAGE_python3))
$(eval $(call DefineBoostLibrary,random,system))
$(eval $(call DefineBoostLibrary,regex))
$(if $(CONFIG_PACKAGE_boost-test),,--without-test) \
--without-python \
$(foreach lib,$(BOOST_LIBS), \
- $(if $(findstring python,$(lib)),, \
+ $(if $(findstring python3,$(lib)),, \
$(if $(CONFIG_PACKAGE_boost-$(lib)),, \
$(if $(findstring wserialization,$(lib)),,--without-$(lib)) \
) \
$(if $(CONFIG_PACKAGE_boost-iostreams),-sNO_BZIP2=1 -sZLIB_INCLUDE=$(STAGING_DIR)/usr/include \
-sZLIB_LIBPATH=$(STAGING_DIR)/usr/lib) \
install ;\
- $(if $(CONFIG_PACKAGE_boost-python), \
- echo "using gcc : $(GCC_VERSION) : $(GNU_TARGET_NAME)-gcc : <compileflags>\"$(TARGET_CFLAGS) -I$(STAGING_DIR)/usr/include/python$(BOOST_PYTHON_VER)/ \" <cxxflags>\"$(TARGET_CXXFLAGS) $(EXTRA_CXXFLAGS)\" <linkflags>\"$(TARGET_LDFLAGS)\" ;" > \
- tools/build/src/user-config.jam ; \
- echo "using python : $(BOOST_PYTHON_VER) : : $(STAGING_DIR)/usr/include/python$(BOOST_PYTHON_VER)/ : $(STAGING_DIR)/usr/lib/libpython$(BOOST_PYTHON_VER).so ;" >> \
- tools/build/src/user-config.jam; \
- b2 -a \
- $(CONFIGURE_ARGS) \
- --ignore-site-config \
- --toolset=gcc abi=$(BOOST_ABI) \
- --disable-long-double \
- $(if $(CONFIG_boost-variant-release), variant=release,) \
- $(if $(CONFIG_boost-variant-debug), variant=debug,) \
- $(if $(CONFIG_boost-variant-profile), variant=profile,) \
- $(if $(CONFIG_boost-use-name-tags),--layout=tagged,--layout=system) \
- $(if $(CONFIG_boost-build-type-complete),--build-type=complete,--build-type=minimal) \
- $(if $(CONFIG_boost-shared-libs),link=shared,) \
- $(if $(CONFIG_boost-static-libs),link=static,) \
- $(if $(CONFIG_boost-static-and-shared-libs),link=static$(comma)shared,) \
- $(if $(CONFIG_boost-runtime-shared),runtime-link=shared,) \
- $(if $(CONFIG_boost-runtime-static),runtime-link=static,) \
- $(if $(CONFIG_boost-runtime-static-and-shared),runtime-link=shared$(comma)static,) \
- $(if $(CONFIG_boost-single-thread),threading=single,) \
- threading=multi \
- --with-python \
- install ;\
- ,) \
$(if $(CONFIG_PACKAGE_boost-python3), \
echo "using gcc : $(GCC_VERSION) : $(GNU_TARGET_NAME)-gcc : <compileflags>\"$(TARGET_CFLAGS) -I$(STAGING_DIR)/usr/include/python$(BOOST_PYTHON3_VER)/ \" <cxxflags>\"$(TARGET_CXXFLAGS) $(EXTRA_CXXFLAGS)\" <linkflags>\"$(TARGET_LDFLAGS)\" ;" > \
tools/build/src/user-config.jam ; \
define Package/boost/Default/install
$(INSTALL_DIR) $(1)/usr/lib
- $(if $(findstring python,$(2)), $(if $(findstring 3,$(2)), \
- $(CP) $(PKG_INSTALL_DIR)/lib/libboost_python3*.so* $(1)/usr/lib/ , \
- $(CP) $(PKG_INSTALL_DIR)/lib/libboost_python2*.so* $(1)/usr/lib/ ), \
- $(CP) $(PKG_INSTALL_DIR)/lib/libboost_$(2)*.so* $(1)/usr/lib/ )
+ $(CP) $(PKG_INSTALL_DIR)/lib/libboost_$(2)*.so* $(1)/usr/lib/
endef
define Package/boost-test/install
PKG_LICENSE:=BSD-3-Clause
PKG_LICENSE_FILES:=LICENSE.md
PKG_VERSION:=0.8.21
-PKG_RELEASE:=4
+PKG_RELEASE:=5
# Use this for official releasees
PKG_HASH:=51892570f18d1667d0da4d0908a091e41b41c20db9835765677109a3d150cd26
This package contains support for storing the key database as yaml files.
endef
-
-define Package/libelektra-python2
- $(call Package/libelektra/Default)
- TITLE:=Elektra python2 plugin
- DEPENDS:=+libelektra-core +python-light +libstdcpp
-endef
-
-define Package/libelektra-python2/description
-$(call Package/libelektra/Default-description)
-
-This package adds python2 support to elektra.
-endef
-
define Package/libelektra-python3
$(call Package/libelektra/Default)
TITLE:=Elektra python3 plugin
-DKDB_DEFAULT_RESOLVER=resolver_fm_pb_b \
-DKDB_DEFAULT_STORAGE=ini \
-DENABLE_OPTIMIZATIONS=OFF \
- -DPLUGINS="ALL;-multifile" \
+ -DPLUGINS="ALL;-multifile;-python2" \
-DICONV_FIND_REQUIRED=ON \
-DICONV_INCLUDE_DIR="$(ICONV_PREFIX)/include" \
-DICONV_LIBRARY="$(ICONV_PREFIX)/lib"
$(CP) $(PKG_INSTALL_DIR)/usr/lib/libelektra-yamlcpp.so $(1)/usr/lib/
endef
-define Package/libelektra-python2/install
- $(INSTALL_DIR) $(1)/usr/lib/
- $(CP) $(PKG_INSTALL_DIR)/usr/lib/libelektra-python2.so $(1)/usr/lib/
-endef
-
define Package/libelektra-python3/install
$(INSTALL_DIR) $(1)/usr/lib/
$(CP) $(PKG_INSTALL_DIR)/usr/lib/libelektra-python.so $(1)/usr/lib/
$(eval $(call BuildPackage,libelektra-yamlcpp))
$(eval $(call BuildPackage,libelektra-xml))
$(eval $(call BuildPackage,libelektra-yajl))
-$(eval $(call BuildPackage,libelektra-python2))
$(eval $(call BuildPackage,libelektra-python3))
$(eval $(call BuildPackage,libelektra-lua))
$(eval $(call BuildPackage,libelektra-extra))
include $(TOPDIR)/rules.mk
PKG_NAME:=gnutls
-PKG_VERSION:=3.6.12
+PKG_VERSION:=3.6.13
PKG_RELEASE:=1
PKG_USE_MIPS16:=0
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.xz
PKG_SOURCE_URL:=https://www.gnupg.org/ftp/gcrypt/gnutls/v3.6
-PKG_HASH:=bfacf16e342949ffd977a9232556092c47164bd26e166736cf3459a870506c4b
+PKG_HASH:=32041df447d9f4644570cf573c9f60358e865637d69b7e59d1159b7240b52f38
#PKG_FIXUP:=autoreconf gettext-version
PKG_MAINTAINER:=Nikos Mavrogiannopoulos <nmav@gnutls.org>
PKG_LICENSE:=LGPL-2.1-or-later
+++ /dev/null
-#
-# Copyright (C) 2011-2017 OpenWrt.org
-#
-# This is free software, licensed under the GNU General Public License v2.
-# See /LICENSE for more information.
-#
-
-include $(TOPDIR)/rules.mk
-
-PKG_NAME:=jansson
-PKG_VERSION:=2.12
-PKG_RELEASE:=1
-PKG_LICENSE:=MIT
-
-PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.bz2
-PKG_SOURCE_URL:=http://www.digip.org/jansson/releases/
-PKG_HASH:=645d72cc5dbebd4df608d33988e55aa42a7661039e19a379fcbe5c79d1aee1d2
-
-PKG_INSTALL:=1
-PKG_BUILD_PARALLEL:=1
-
-include $(INCLUDE_DIR)/package.mk
-
-define Package/jansson
- SECTION:=libs
- CATEGORY:=Libraries
- TITLE:=Jansson library
- URL:=http://www.digip.org/jansson/
- MAINTAINER:=Gergely Kiss <mail.gery@gmail.com>
-endef
-
-define Package/jansson/description
- Jansson is a C library for encoding, decoding and manipulating JSON data
-endef
-
-TARGET_CFLAGS += $(FPIC)
-TARGET_LDFLAGS += -Wl,-rpath-link=$(STAGING_DIR)/usr/lib -lm
-
-define Build/InstallDev
- $(INSTALL_DIR) $(1)/usr/{lib,include}
- $(INSTALL_DIR) $(1)/usr/lib/pkgconfig
- $(CP) $(PKG_INSTALL_DIR)/usr/lib/libjansson* $(1)/usr/lib
- $(CP) $(PKG_INSTALL_DIR)/usr/lib/pkgconfig/* $(1)/usr/lib/pkgconfig
- $(CP) $(PKG_INSTALL_DIR)/usr/include/* $(1)/usr/include/
-endef
-
-define Package/jansson/install
- $(INSTALL_DIR) $(1)/usr/lib
- $(CP) $(PKG_INSTALL_DIR)/usr/lib/libjansson*so* $(1)/usr/lib/
-endef
-
-$(eval $(call BuildPackage,jansson))
include $(TOPDIR)/rules.mk
PKG_NAME:=libgd
-PKG_VERSION:=2.2.5
-PKG_RELEASE:=6
+PKG_VERSION:=2.3.0
+PKG_RELEASE:=1
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.xz
PKG_SOURCE_URL:=https://github.com/$(PKG_NAME)/$(PKG_NAME)/releases/download/gd-$(PKG_VERSION)/
-PKG_HASH:=8c302ccbf467faec732f0741a859eef4ecae22fea2d2ab87467be940842bde51
+PKG_HASH:=ecd9155b9a417fb3f837f29e5966323796de247789163761dd72dbf83bfcac58
PKG_MAINTAINER:=Jo-Philipp Wich <jo@mein.io>
PKG_LICENSE:=MIT
PKG_LICENSE_FILES:=COPYING
-PKG_CPE_ID:=cpe:/a:libgd:gd_graphics_library
+PKG_CPE_ID:=cpe:/a:libgd:libgd
CMAKE_INSTALL:=1
PKG_BUILD_PARALLEL:=1
-DENABLE_JPEG=ON \
-DENABLE_LIQ=OFF \
-DENABLE_PNG=ON \
+ -DENABLE_TIFF=$(if $(CONFIG_LIBGD_TIFF),ON,OFF) \
-DENABLE_WEBP=ON \
-DENABLE_XPM=OFF \
-DZLIB_INCLUDE_DIR="$(STAGING_DIR)/usr"
else
-ifdef CONFIG_LIBGD_TIFF
- CMAKE_OPTIONS += \
- -DENABLE_TIFF=ON
-else
- CMAKE_OPTIONS += \
- -DENABLE_TIFF=OFF
-endif
-
ifdef CONFIG_LIBGD_FREETYPE
CMAKE_OPTIONS += \
-DFREETYPE_INCLUDE_DIRS=$(STAGING_DIR)/usr/include/freetype2/ \
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
-@@ -23,8 +23,6 @@ SET (LIBGD_SRC_FILES
+@@ -22,8 +22,6 @@ SET (LIBGD_SRC_FILES
gd_io_dp.c
gd_io_file.c
gd_io_ss.c
gd_jpeg.c
gd_matrix.c
gd_nnquant.c
-@@ -60,8 +58,6 @@ SET (LIBGD_SRC_FILES
+@@ -59,8 +57,6 @@ SET (LIBGD_SRC_FILES
gdhelpers.c
gdhelpers.h
gdkanji.c
gdtables.c
gdxpm.c
jisx0208.h
-@@ -175,5 +171,4 @@ install(FILES
+@@ -198,7 +194,6 @@ install(FILES
gdfonts.h
gdfontt.h
gdfx.h
- gdpp.h
DESTINATION include)
+
+ CONFIGURE_FILE(../config/gdlib.pc.cmake gdlib.pc @ONLY)
+++ /dev/null
-From 5ebbd50cffc013a7dd0f3b1eaaa83d199e8e47fd Mon Sep 17 00:00:00 2001
-From: Mike Frysinger <vapier@gentoo.org>
-Date: Sun, 24 Jul 2016 00:14:20 +0530
-Subject: [PATCH] cmake: add soname info to libgd.so
-
-Pull out the library versioning info out of configure and into a common
-script that both cmake & autotools can run. This way we have a single
-source of truth for the versioning info.
----
- CMakeLists.txt | 11 +++++++++++
- config/getlib.sh | 42 ++++++++++++++++++++++++++++++++++++++++++
- configure.ac | 25 +++++++++++--------------
- src/CMakeLists.txt | 2 ++
- 4 files changed, 66 insertions(+), 14 deletions(-)
- create mode 100755 config/getlib.sh
-
-diff --git a/CMakeLists.txt b/CMakeLists.txt
-index 7c8ad34b..9fe2eb4e 100644
---- a/CMakeLists.txt
-+++ b/CMakeLists.txt
-@@ -78,6 +78,17 @@ else (USE_EXT_GD)
-
- SET(GD_VERSION_INT "2020555")
-
-+ MACRO(GV_LT VER VAR)
-+ execute_process(
-+ COMMAND sh ${CMAKE_CURRENT_SOURCE_DIR}/config/getlib.sh ${VER}
-+ OUTPUT_VARIABLE ${VAR}
-+ )
-+ ENDMACRO(GV_LT)
-+
-+ GV_LT(SONAME GDLIB_LIB_SOVERSION)
-+ GV_LT(VERSION GDLIB_LIB_VERSION)
-+ MESSAGE(STATUS "gd shared lib version ${GDLIB_LIB_SOVERSION} (${GDLIB_LIB_VERSION})")
-+
- SET(CMAKE_REQUIRED_INCLUDES "/usr/include" "/usr/local/include")
-
- include(CheckIncludeFiles)
-diff --git a/config/getlib.sh b/config/getlib.sh
-new file mode 100755
-index 00000000..4835cf6c
---- /dev/null
-+++ b/config/getlib.sh
-@@ -0,0 +1,42 @@
-+#!/bin/sh
-+
-+GETVER="${0%/*}/getver.pl"
-+GDLIB_MAJOR=$("${GETVER}" MAJOR)
-+GDLIB_MINOR=$("${GETVER}" MINOR)
-+GDLIB_REVISION=$("${GETVER}" RELEASE)
-+
-+# Dynamic library version information
-+# See http://www.gnu.org/software/libtool/manual/libtool.html#Updating-version-info
-+
-+GDLIB_LT_CURRENT=3
-+# This is the version where the soname (current above) changes. We use it
-+# to reset the revision base back to zero. It's a bit of a pain, but some
-+# systems restrict the revision range below to [0..255] (like OS X).
-+GDLIB_PREV_MAJOR=2
-+GDLIB_PREV_MINOR=2
-+# This isn't 100% correct, but it tends to be a close enough approximation
-+# for how we manage the codebase. It's rare to do a release that doesn't
-+# modify the library since this project is centered around the library.
-+GDLIB_LT_REVISION=$(( ((GDLIB_MAJOR - GDLIB_PREV_MAJOR) << 6) | ((GDLIB_MINOR - GDLIB_PREV_MINOR) << 3) | GDLIB_REVISION ))
-+GDLIB_LT_AGE=0
-+
-+# The first three fields we feed into libtool and the OS target determines how
-+# they get used. The last two fields we feed into cmake. We use the same rules
-+# as Linux SONAME versioning in libtool, but cmake should handle it for us.
-+case $1 in
-+CURRENT)
-+ printf '%s' "${GDLIB_LT_CURRENT}"
-+ ;;
-+REVISION)
-+ printf '%s' "${GDLIB_LT_REVISION}"
-+ ;;
-+AGE)
-+ printf '%s' "${GDLIB_LT_AGE}"
-+ ;;
-+VERSION)
-+ printf '%s' "$(( GDLIB_LT_CURRENT - GDLIB_LT_AGE )).${GDLIB_LT_AGE}.${GDLIB_LT_REVISION}"
-+ ;;
-+SONAME)
-+ printf '%s' "$(( GDLIB_LT_CURRENT - GDLIB_LT_AGE ))"
-+ ;;
-+esac
-diff --git a/configure.ac b/configure.ac
-index 91643bd6..c3fb034e 100644
---- a/configure.ac
-+++ b/configure.ac
-@@ -34,20 +34,17 @@ AC_SUBST(GDLIB_REVISION)
- AC_SUBST(GDLIB_EXTRA)
- AC_SUBST(GDLIB_VERSION)
-
--# Dynamic library version information
--# See http://www.gnu.org/software/libtool/manual/libtool.html#Updating-version-info
--
--GDLIB_LT_CURRENT=3
--dnl This is the version where the soname (current above) changes. We use it
--dnl to reset the revision base back to zero. It's a bit of a pain, but some
--dnl systems restrict the revision range below to [0..255] (like OS X).
--GDLIB_PREV_MAJOR=2
--GDLIB_PREV_MINOR=2
--dnl This isn't 100% correct, but it tends to be a close enough approximation
--dnl for how we manage the codebase. It's rare to do a release that doesn't
--dnl modify the library since this project is centered around the library.
--GDLIB_LT_REVISION=$(( ((GDLIB_MAJOR - GDLIB_PREV_MAJOR) << 6) | ((GDLIB_MINOR - GDLIB_PREV_MINOR) << 3) | GDLIB_REVISION ))
--GDLIB_LT_AGE=0
-+dnl Keep the libtool version details in an external script so cmake can
-+dnl access the values too.
-+define([lt_gv], [config/getlib.sh ]$1)
-+m4_define([gd_LT_CURRENT], esyscmd(lt_gv(CURRENT)))
-+m4_define([gd_LT_REVISION], esyscmd(lt_gv(REVISION)))
-+m4_define([gd_LT_AGE], esyscmd(lt_gv(AGE)))
-+
-+GDLIB_LT_CURRENT=gd_LT_CURRENT
-+GDLIB_LT_REVISION=gd_LT_REVISION
-+GDLIB_LT_AGE=gd_LT_AGE
-+
- AC_SUBST(GDLIB_LT_CURRENT)
- AC_SUBST(GDLIB_LT_REVISION)
- AC_SUBST(GDLIB_LT_AGE)
-diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
-index 08fd6991..a621fe1e 100644
---- a/src/CMakeLists.txt
-+++ b/src/CMakeLists.txt
-@@ -76,6 +76,8 @@ include(GNUInstallDirs)
- if (BUILD_SHARED_LIBS)
- add_library(${GD_LIB} ${LIBGD_SRC_FILES})
- set_target_properties(${GD_LIB} PROPERTIES
-+ SOVERSION ${GDLIB_LIB_SOVERSION}
-+ VERSION ${GDLIB_LIB_VERSION}
- C_VISIBILITY_PRESET hidden
- CXX_VISIBILITY_PRESET hidden
- )
+++ /dev/null
-From dd76e8fcf2a2d7e122110444695ad20f2549420e Mon Sep 17 00:00:00 2001
-From: Mike Frysinger <vapier@gentoo.org>
-Date: Wed, 10 Jan 2018 01:56:10 -0500
-Subject: [PATCH] generate & install gdlib.pc for cmake builds too #164
-
----
- config/gdlib.pc.cmake | 9 +++++++++
- src/CMakeLists.txt | 3 +++
- 2 files changed, 12 insertions(+)
- create mode 100644 config/gdlib.pc.cmake
-
-diff --git a/config/gdlib.pc.cmake b/config/gdlib.pc.cmake
-new file mode 100644
-index 00000000..fb828882
---- /dev/null
-+++ b/config/gdlib.pc.cmake
-@@ -0,0 +1,11 @@
-+prefix=@CMAKE_INSTALL_PREFIX@
-+exec_prefix=@CMAKE_INSTALL_PREFIX@
-+libdir=${exec_prefix}/@CMAKE_INSTALL_LIBDIR@
-+includedir=${prefix}/@CMAKE_INSTALL_INCLUDEDIR@
-+
-+Name: gd
-+Description: GD graphics library
-+Version: @GDLIB_VERSION@
-+Cflags: -I${includedir}
-+Libs.private: @LIBGD_DEP_LIBS@
-+Libs: -L${libdir} -lgd
-diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
-index a621fe1e..38fc9c42 100644
---- a/src/CMakeLists.txt
-+++ b/src/CMakeLists.txt
-@@ -174,3 +174,6 @@ install(FILES
- gdfontt.h
- gdfx.h
- DESTINATION include)
-+
-+CONFIGURE_FILE(../config/gdlib.pc.cmake gdlib.pc @ONLY)
-+INSTALL(FILES "${CMAKE_CURRENT_BINARY_DIR}/gdlib.pc" DESTINATION "${CMAKE_INSTALL_LIBDIR}/pkgconfig")
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
-@@ -89,8 +89,6 @@
+@@ -94,8 +94,6 @@ else (USE_EXT_GD)
GV_LT(VERSION GDLIB_LIB_VERSION)
MESSAGE(STATUS "gd shared lib version ${GDLIB_LIB_SOVERSION} (${GDLIB_LIB_VERSION})")
+++ /dev/null
---- a/config/gdlib.pc.cmake
-+++ b/config/gdlib.pc.cmake
-@@ -5,7 +5,7 @@ includedir=${prefix}/@CMAKE_INSTALL_INCL
-
- Name: gd
- Description: GD graphics library
--Version: @GDLIB_VERSION@
-+Version: @GD_VERSION@
- Cflags: -I${includedir}
- Libs.private: @LIBGD_DEP_LIBS@
- Libs: -L${libdir} -lgd
include $(TOPDIR)/rules.mk
PKG_NAME:=libhttp-parser
-PKG_VERSION:=2.9.3
+PKG_VERSION:=2.9.4
PKG_RELEASE:=1
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
PKG_SOURCE_URL:=https://codeload.github.com/nodejs/http-parser/tar.gz/v$(PKG_VERSION)?
-PKG_HASH:=8fa0ab8770fd8425a9b431fdbf91623c4d7a9cdb842b9339289bd2b0b01b0d3d
+PKG_HASH:=467b9e30fd0979ee301065e70f637d525c28193449e1b13fbcb1b1fab3ad224f
PKG_BUILD_DIR:=$(BUILD_DIR)/http-parser-$(PKG_VERSION)
PKG_MAINTAINER:=Ramanathan Sivagurunathan <ramzthecoder@gmail.com>, Hirokazu MORIKAWA <morikw2@gmail.com>
@@ -25,11 +25,7 @@
SOMAJOR = 2
SOMINOR = 9
- SOREV = 3
+ SOREV = 4
-ifeq (darwin,$(PLATFORM))
-SOEXT ?= dylib
-SONAME ?= $(SOLIBNAME).$(SOMAJOR).$(SOMINOR).$(SOEXT)
include $(INCLUDE_DIR)/package.mk
include $(INCLUDE_DIR)/cmake.mk
-include $(TOPDIR)/feeds/packages/lang/python/python-package.mk
include $(TOPDIR)/feeds/packages/lang/python/python3-package.mk
CMAKE_OPTIONS=-DENABLEEXAMPLES=0 \
This package contains the Node.js libraries.
endef
-define Package/libmraa-python
- $(call Package/libmraa/Default)
- TITLE:=Eclipse MRAA lowlevel IO Python library
- DEPENDS:=+libmraa +python-light
-endef
-
-define Package/libmraa-python/description
-$(call Package/libmraa/Default/description)
-
-This package contains the Python libraries.
-endef
-
define Package/libmraa-python3
$(call Package/libmraa/Default)
TITLE:=Eclipse MRAA lowlevel IO Python3 library
$(CP) $(PKG_INSTALL_DIR)/usr/lib/node_modules/mraa/* $(1)/usr/lib/node/mraa/
endef
-define Package/libmraa-python/install
- $(INSTALL_DIR) $(1)/usr/lib/python$(PYTHON_VERSION)/site-packages
- $(CP) $(PKG_INSTALL_DIR)/usr/lib/python$(PYTHON_VERSION)/site-packages/* \
- $(1)/usr/lib/python$(PYTHON_VERSION)/site-packages/
-endef
-
define Package/libmraa-python3/install
$(INSTALL_DIR) $(1)/usr/lib/python$(PYTHON3_VERSION)/site-packages
$(CP) $(PKG_INSTALL_DIR)/usr/lib/python$(PYTHON3_VERSION)/site-packages/* \
$(eval $(call BuildPackage,libmraa))
$(eval $(call BuildPackage,libmraa-node))
-$(eval $(call BuildPackage,libmraa-python))
$(eval $(call BuildPackage,libmraa-python3))
PKG_NAME:=libsearpc
PKG_VERSION:=3.2.0
-PKG_RELEASE:=1
+PKG_RELEASE:=2
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
PKG_SOURCE_URL:=https://codeload.github.com/haiwen/libsearpc/tar.gz/v$(PKG_VERSION)?
include $(INCLUDE_DIR)/package.mk
include $(INCLUDE_DIR)/nls.mk
-include ../../lang/python/python-package.mk
include ../../lang/python/python3-package.mk
define Package/libsearpc
$(INSTALL_BIN) $(PKG_INSTALL_DIR)/usr/bin/searpc-codegen.py $(1)/usr/bin/
endef
-# Include Python 2 bindings in this package to maintain compatibility with previous versions
define Package/libsearpc/install
$(INSTALL_DIR) $(1)/usr/lib
- $(INSTALL_DIR) $(1)$(PYTHON_PKG_DIR)/pysearpc
$(CP) $(PKG_INSTALL_DIR)/usr/lib/libsearpc.so* $(1)/usr/lib/
- $(INSTALL_DATA) $(PKG_BUILD_DIR)/pysearpc/*.py $(1)$(PYTHON_PKG_DIR)/pysearpc/
- rm -f $(1)$(PYTHON_PKG_DIR)/pysearpc/{pygencode,test_pysearpc}.py
endef
define Py3Build/Compile
PKG_NAME:=talloc
PKG_VERSION:=2.3.1
MAJOR_VERSION:=2
-PKG_RELEASE:=1
+PKG_RELEASE:=2
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
PKG_SOURCE_URL:=https://www.samba.org/ftp/talloc
PKG_BUILD_PARALLEL:=0
-HOST_BUILD_DEPENDS:=python/host
-
include $(INCLUDE_DIR)/package.mk
include $(INCLUDE_DIR)/kernel.mk
include $(INCLUDE_DIR)/package.mk
include $(INCLUDE_DIR)/cmake.mk
-include $(TOPDIR)/feeds/packages/lang/python/python-package.mk
include $(TOPDIR)/feeds/packages/lang/python/python3-package.mk
UPM_MODULES:= \
DEPENDS+=+libjpeg +libmodbus +openzwave
DEPENDS+= $(foreach module, $(UPM_MODULES), +libupm-$(module))
DEPENDS+= $(foreach module, $(UPM_MODULES), +libupm-$(module)-node)
- DEPENDS+= $(foreach module, $(UPM_MODULES), +libupm-$(module)-python)
DEPENDS+= $(foreach module, $(UPM_MODULES), +libupm-$(module)-python3)
endef
$(CP) $(PKG_INSTALL_DIR)/usr/lib/node_modules/jsupm_$(2) $(1)/usr/lib/node/ ;
endef
-define Package/libupm/install/Default-python
- $(INSTALL_DIR) $(1)/usr/lib/python$(PYTHON_VERSION)/site-packages/upm
- $(CP) $(CMAKE_BINARY_DIR)/$(if $(filter interfaces, $(2)),,src/)$(2)/python$(PYTHON_VERSION)/pyupm_$(2).py \
- $(1)/usr/lib/python$(PYTHON_VERSION)/site-packages/upm/
- $(CP) $(PKG_INSTALL_DIR)/usr/lib/python$(PYTHON_VERSION)/site-packages/upm/_pyupm_$(2).so \
- $(1)/usr/lib/python$(PYTHON_VERSION)/site-packages/upm/ ;
-endef
-
define Package/libupm/install/Default-python3
$(INSTALL_DIR) $(1)/usr/lib/python$(PYTHON3_VERSION)/site-packages/upm
$(CP) $(CMAKE_BINARY_DIR)/$(if $(filter interfaces, $(2)),,src/)$(2)/python$(PYTHON3_VERSION)/pyupm_$(2).py \
$(foreach module, $(UPM_MODULES), \
$(call Package/libupm/install/Default,$(1),$(module)) \
$(call Package/libupm/install/Default-node,$(1),$(module)) \
- $(call Package/libupm/install/Default-python,$(1),$(module)) \
$(call Package/libupm/install/Default-python3,$(1),$(module)))
endef
This package contains $(1) sensor Node.js library
endef
-define Package/libupm-$(1)-python
- $(call Package/libupm/Default)
- $(call UpmPackage/depends,$(1))
- TITLE:=$(1) Python library
- DEPENDS+=+libupm-$(1) +libmraa-python +python-light
-endef
-
-define Package/libupm-$(1)-python/description
-$(call Package/libupm/Default/description)
-
-This package contains $(1) sensor Python library
-endef
-
define Package/libupm-$(1)-python3
$(call Package/libupm/Default)
$(call UpmPackage/depends,$(1))
define Package/libupm-$(1)-node/install
$(call Package/libupm/install/Default-node,$$(1),$(1))
endef
-define Package/libupm-$(1)-python/install
- $(call Package/libupm/install/Default-python,$$(1),$(1))
-endef
define Package/libupm-$(1)-python3/install
$(call Package/libupm/install/Default-python3,$$(1),$(1))
endef
$(eval $(call UpmPackage,$(module))) \
$(eval $(call BuildPackage,libupm-$(module))) \
$(eval $(call BuildPackage,libupm-$(module)-node)) \
- $(eval $(call BuildPackage,libupm-$(module)-python)) \
$(eval $(call BuildPackage,libupm-$(module)-python3)))
include $(TOPDIR)/rules.mk
PKG_NAME:=libuwsc
-PKG_VERSION:=3.3.2
+PKG_VERSION:=3.3.4
PKG_RELEASE:=1
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
PKG_SOURCE_URL=https://github.com/zhaojh329/libuwsc/releases/download/v$(PKG_VERSION)
-PKG_HASH:=bae2cd13eda86876ebcf99a38a069f5e8c01717713d2fec25031051b9c47624b
+PKG_HASH:=ef789ff35e537e5191ec0d13f3e0df54187a54eace611e283ad4172d4411d08b
PKG_MAINTAINER:=Jianhui Zhao <jianhuizhao329@gmail.com>
PKG_LICENSE:=MIT
PKG_NAME:=v4l-utils
PKG_VERSION:=1.16.6
-PKG_RELEASE:=1
+PKG_RELEASE:=2
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.bz2
PKG_SOURCE_URL:=https://www.linuxtv.org/downloads/v4l-utils
--- /dev/null
+--- a/utils/keytable/keytable.c
++++ b/utils/keytable/keytable.c
+@@ -1686,7 +1686,7 @@ static void test_event(struct rc_device *rc_dev, int fd)
+
+ for (i = 0; i < rd / sizeof(struct input_event); i++) {
+ printf(_("%ld.%06ld: event type %s(0x%02x)"),
+- ev[i].time.tv_sec, ev[i].time.tv_usec,
++ ev[i].input_event_sec, ev[i].input_event_usec,
+ get_event_name(events_type, ev[i].type), ev[i].type);
+
+ switch (ev[i].type) {
include $(TOPDIR)/rules.mk
PKG_NAME:=libxslt
-PKG_VERSION:=1.1.33
-PKG_RELEASE:=4
+PKG_VERSION:=1.1.34
+PKG_RELEASE:=1
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
PKG_SOURCE_URL:= \
http://xmlsoft.org/sources/ \
ftp://fr.rpmfind.net/pub/libxml/
-PKG_HASH:=8e36605144409df979cab43d835002f63988f3dc94d5d3537c12796db90e38c8
+PKG_HASH:=98b1bd46d6792925ad2dfe9a87452ea2adebf69dcb9919ffd55bf926a7f93f7f
+PKG_MAINTAINER:=Jiri Slachta <jiri@slachta.eu>
PKG_LICENSE:=MIT
PKG_LICENSE_FILES:=COPYING
PKG_CPE_ID:=cpe:/a:xmlsoft:libxslt
-PKG_MAINTAINER:=Jiri Slachta <jiri@slachta.eu>
-
-PKG_FIXUP:=autoreconf
PKG_INSTALL:=1
-
+PKG_BUILD_PARALLEL:=1
HOST_BUILD_DEPENDS:=libxml2/host
include $(INCLUDE_DIR)/package.mk
XSLT XML transformation utility.
endef
-CONFIGURE_ARGS+= \
- --disable-silent-rules \
+CONFIGURE_ARGS += \
--enable-shared \
--enable-static \
--without-python \
--without-debug \
--without-mem-debug \
--without-debugger \
+ --without-profiler \
+ --without-plugins
+
+HOST_CONFIGURE_ARGS += \
+ --with-libxml-prefix=$(STAGING_DIR_HOSTPKG) \
+ --without-python \
+ --without-crypto \
+ --without-debug \
+ --without-mem-debug \
+ --without-debugger \
+ --without-profiler \
+ --without-plugins
define Build/InstallDev/Xslt
$(INSTALL_DIR) $(1)/usr/bin $(2)/bin $(1)/usr/include/libxslt \
$(if $(CONFIG_PACKAGE_libexslt),$(call Build/InstallDev/Exslt,$(1),$(2)))
endef
-HOST_CONFIGURE_ARGS+= \
- --disable-silent-rules \
- --enable-static \
- --with-libxml-prefix=$(STAGING_DIR_HOSTPKG) \
- --without-python \
- --without-crypto \
- --without-debug \
- --without-mem-debug \
- --without-debugger
-
define Package/libxslt/install
$(INSTALL_DIR) $(1)/usr/lib
$(CP) \
+++ /dev/null
-From e03553605b45c88f0b4b2980adfbbb8f6fca2fd6 Mon Sep 17 00:00:00 2001
-From: Nick Wellnhofer <wellnhofer@aevum.de>
-Date: Sun, 24 Mar 2019 09:51:39 +0100
-Subject: [PATCH] Fix security framework bypass
-
-xsltCheckRead and xsltCheckWrite return -1 in case of error but callers
-don't check for this condition and allow access. With a specially
-crafted URL, xsltCheckRead could be tricked into returning an error
-because of a supposedly invalid URL that would still be loaded
-succesfully later on.
-
-Fixes #12.
-
-Thanks to Felix Wilhelm for the report.
----
- libxslt/documents.c | 18 ++++++++++--------
- libxslt/imports.c | 9 +++++----
- libxslt/transform.c | 9 +++++----
- libxslt/xslt.c | 9 +++++----
- 4 files changed, 25 insertions(+), 20 deletions(-)
-
-diff --git a/libxslt/documents.c b/libxslt/documents.c
-index 3f3a7312..4aad11bb 100644
---- a/libxslt/documents.c
-+++ b/libxslt/documents.c
-@@ -296,10 +296,11 @@ xsltLoadDocument(xsltTransformContextPtr ctxt, const xmlChar *URI) {
- int res;
-
- res = xsltCheckRead(ctxt->sec, ctxt, URI);
-- if (res == 0) {
-- xsltTransformError(ctxt, NULL, NULL,
-- "xsltLoadDocument: read rights for %s denied\n",
-- URI);
-+ if (res <= 0) {
-+ if (res == 0)
-+ xsltTransformError(ctxt, NULL, NULL,
-+ "xsltLoadDocument: read rights for %s denied\n",
-+ URI);
- return(NULL);
- }
- }
-@@ -372,10 +373,11 @@ xsltLoadStyleDocument(xsltStylesheetPtr style, const xmlChar *URI) {
- int res;
-
- res = xsltCheckRead(sec, NULL, URI);
-- if (res == 0) {
-- xsltTransformError(NULL, NULL, NULL,
-- "xsltLoadStyleDocument: read rights for %s denied\n",
-- URI);
-+ if (res <= 0) {
-+ if (res == 0)
-+ xsltTransformError(NULL, NULL, NULL,
-+ "xsltLoadStyleDocument: read rights for %s denied\n",
-+ URI);
- return(NULL);
- }
- }
-diff --git a/libxslt/imports.c b/libxslt/imports.c
-index 874870cc..3783b247 100644
---- a/libxslt/imports.c
-+++ b/libxslt/imports.c
-@@ -130,10 +130,11 @@ xsltParseStylesheetImport(xsltStylesheetPtr style, xmlNodePtr cur) {
- int secres;
-
- secres = xsltCheckRead(sec, NULL, URI);
-- if (secres == 0) {
-- xsltTransformError(NULL, NULL, NULL,
-- "xsl:import: read rights for %s denied\n",
-- URI);
-+ if (secres <= 0) {
-+ if (secres == 0)
-+ xsltTransformError(NULL, NULL, NULL,
-+ "xsl:import: read rights for %s denied\n",
-+ URI);
- goto error;
- }
- }
-diff --git a/libxslt/transform.c b/libxslt/transform.c
-index 13793914..0636dbd0 100644
---- a/libxslt/transform.c
-+++ b/libxslt/transform.c
-@@ -3493,10 +3493,11 @@ xsltDocumentElem(xsltTransformContextPtr ctxt, xmlNodePtr node,
- */
- if (ctxt->sec != NULL) {
- ret = xsltCheckWrite(ctxt->sec, ctxt, filename);
-- if (ret == 0) {
-- xsltTransformError(ctxt, NULL, inst,
-- "xsltDocumentElem: write rights for %s denied\n",
-- filename);
-+ if (ret <= 0) {
-+ if (ret == 0)
-+ xsltTransformError(ctxt, NULL, inst,
-+ "xsltDocumentElem: write rights for %s denied\n",
-+ filename);
- xmlFree(URL);
- xmlFree(filename);
- return;
-diff --git a/libxslt/xslt.c b/libxslt/xslt.c
-index 780a5ad7..a234eb79 100644
---- a/libxslt/xslt.c
-+++ b/libxslt/xslt.c
-@@ -6763,10 +6763,11 @@ xsltParseStylesheetFile(const xmlChar* filename) {
- int res;
-
- res = xsltCheckRead(sec, NULL, filename);
-- if (res == 0) {
-- xsltTransformError(NULL, NULL, NULL,
-- "xsltParseStylesheetFile: read rights for %s denied\n",
-- filename);
-+ if (res <= 0) {
-+ if (res == 0)
-+ xsltTransformError(NULL, NULL, NULL,
-+ "xsltParseStylesheetFile: read rights for %s denied\n",
-+ filename);
- return(NULL);
- }
- }
---
-2.18.1
-
+++ /dev/null
-From c5eb6cf3aba0af048596106ed839b4ae17ecbcb1 Mon Sep 17 00:00:00 2001
-From: Nick Wellnhofer <wellnhofer@aevum.de>
-Date: Sat, 27 Apr 2019 11:19:48 +0200
-Subject: [PATCH] Fix uninitialized read of xsl:number token
-
-Found by OSS-Fuzz.
----
- libxslt/numbers.c | 5 ++++-
- 1 file changed, 4 insertions(+), 1 deletion(-)
-
-diff --git a/libxslt/numbers.c b/libxslt/numbers.c
-index 89e1f668..75c31eba 100644
---- a/libxslt/numbers.c
-+++ b/libxslt/numbers.c
-@@ -382,7 +382,10 @@ xsltNumberFormatTokenize(const xmlChar *format,
- tokens->tokens[tokens->nTokens].token = val - 1;
- ix += len;
- val = xmlStringCurrentChar(NULL, format+ix, &len);
-- }
-+ } else {
-+ tokens->tokens[tokens->nTokens].token = (xmlChar)'0';
-+ tokens->tokens[tokens->nTokens].width = 1;
-+ }
- } else if ( (val == (xmlChar)'A') ||
- (val == (xmlChar)'a') ||
- (val == (xmlChar)'I') ||
---
-2.21.0
-
+++ /dev/null
-From 6ce8de69330783977dd14f6569419489875fb71b Mon Sep 17 00:00:00 2001
-From: Nick Wellnhofer <wellnhofer@aevum.de>
-Date: Mon, 3 Jun 2019 13:14:45 +0200
-Subject: [PATCH] Fix uninitialized read with UTF-8 grouping chars
-
-The character type in xsltFormatNumberConversion was too narrow and
-an invalid character/length combination could be passed to
-xsltNumberFormatDecimal, resulting in an uninitialized read.
-
-Found by OSS-Fuzz.
----
- libxslt/numbers.c | 5 +++--
- tests/docs/bug-222.xml | 1 +
- tests/general/bug-222.out | 2 ++
- tests/general/bug-222.xsl | 6 ++++++
- 4 files changed, 12 insertions(+), 2 deletions(-)
- create mode 100644 tests/docs/bug-222.xml
- create mode 100644 tests/general/bug-222.out
- create mode 100644 tests/general/bug-222.xsl
-
-diff --git a/libxslt/numbers.c b/libxslt/numbers.c
-index f1ed8846..20b99d5a 100644
---- a/libxslt/numbers.c
-+++ b/libxslt/numbers.c
-@@ -1298,13 +1298,14 @@ OUTPUT_NUMBER:
- number = floor((scale * number + 0.5)) / scale;
- if ((self->grouping != NULL) &&
- (self->grouping[0] != 0)) {
-+ int gchar;
-
- len = xmlStrlen(self->grouping);
-- pchar = xsltGetUTF8Char(self->grouping, &len);
-+ gchar = xsltGetUTF8Char(self->grouping, &len);
- xsltNumberFormatDecimal(buffer, floor(number), self->zeroDigit[0],
- format_info.integer_digits,
- format_info.group,
-- pchar, len);
-+ gchar, len);
- } else
- xsltNumberFormatDecimal(buffer, floor(number), self->zeroDigit[0],
- format_info.integer_digits,
-diff --git a/tests/docs/bug-222.xml b/tests/docs/bug-222.xml
-new file mode 100644
-index 00000000..69d62f2c
---- /dev/null
-+++ b/tests/docs/bug-222.xml
-@@ -0,0 +1 @@
-+<doc/>
-diff --git a/tests/general/bug-222.out b/tests/general/bug-222.out
-new file mode 100644
-index 00000000..e3139698
---- /dev/null
-+++ b/tests/general/bug-222.out
-@@ -0,0 +1,2 @@
-+<?xml version="1.0"?>
-+1â ¢0
-diff --git a/tests/general/bug-222.xsl b/tests/general/bug-222.xsl
-new file mode 100644
-index 00000000..e32dc473
---- /dev/null
-+++ b/tests/general/bug-222.xsl
-@@ -0,0 +1,6 @@
-+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
-+ <xsl:decimal-format name="f" grouping-separator="â ¢"/>
-+ <xsl:template match="/">
-+ <xsl:value-of select="format-number(10,'#â ¢0','f')"/>
-+ </xsl:template>
-+</xsl:stylesheet>
---
-2.21.0
-
PKG_NAME:=nspr
PKG_VERSION:=4.25
-PKG_RELEASE:=1
+PKG_RELEASE:=2
PKG_MAINTAINER:=Lucian Cristian <lucian.cristian@gmail.com>
PKG_LICENCE:=MPL-2.0
--- /dev/null
+--- a/nspr/pr/include/md/_linux.cfg
++++ b/nspr/pr/include/md/_linux.cfg
+@@ -499,7 +499,7 @@
+ #elif defined(__mips__)
+
+ /* For _ABI64 */
+-#include <sgidefs.h>
++#include <asm/sgidefs.h>
+
+ #ifdef __MIPSEB__
+ #define IS_BIG_ENDIAN 1
+++ /dev/null
-#
-# Copyright (C) 2014 OpenWrt.org
-#
-# This is free software, licensed under the GNU General Public License v2.
-# See /LICENSE for more information.
-#
-
-include $(TOPDIR)/rules.mk
-
-PKG_NAME:=mailman
-PKG_VERSION:=2.1.29
-PKG_RELEASE:=2
-
-PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tgz
-PKG_SOURCE_URL:=@GNU/$(PKG_NAME)
-PKG_HASH:=838872713601e8a124146e550f53709482c1ef168f1e16d201465c651cbf0d2c
-
-PKG_MAINTAINER:=Denis Shulyaka <Shulyaka@gmail.com>
-PKG_LICENSE:=GPL-2.0-or-later
-PKG_LICENSE_FILES:=gnu-COPYING-GPL
-PKG_CPE_ID:=cpe:2.3:a:gnu:mailman
-
-include $(INCLUDE_DIR)/package.mk
-
-define Package/mailman
- SECTION:=mail
- CATEGORY:=Mail
- TITLE:=The GNU Mailing List Manager
- URL:=https://www.gnu.org/software/mailman/
- DEPENDS:=+postfix +python +uhttpd +python-dns
-endef
-
-define Package/mailman/description
- Mailman is free software for managing electronic mail discussion and e-newsletter lists.
-endef
-
-prefix=/usr/local/mailman
-
-define Package/mailman/conffiles
-$(prefix)/Mailman/mm_cfg.py
-endef
-
-CONFIGURE_ARGS += \
- --prefix="$(prefix)" \
- --exec-prefix="$(prefix)" \
- --with-username="root" \
- --with-groupname="root" \
- --with-mail-gid="nogroup" \
- --with-cgi-gid="root" \
- --without-permcheck \
-
-define Build/Compile
-endef
-
-define Package/mailman/install
- $(INSTALL_DIR) $(1)$(prefix)
- cd $(PKG_BUILD_DIR); $(MAKE) DESTDIR=$(1) install
- $(INSTALL_DIR) $(1)/etc/init.d
- $(INSTALL_BIN) ./files/mailman.init $(1)/etc/init.d/mailman
- $(INSTALL_DIR) $(1)/usr/www
- ln -s $(prefix)/cgi-bin/ $(1)/usr/www/mailman
- ln -s $(prefix)/archives/public/ $(1)/usr/www/pipermail
- ln -s $(prefix)/icons $(1)/usr/www/icons
-endef
-
-define Package/mailman/postinst
-#!/bin/sh
-# check if we are on real system
-if [ -z "$${IPKG_INSTROOT}" ]; then
-
- if [ `postconf alias_maps | grep -ci mailman` -eq 0 ]
- then
- postconf -e "`postconf alias_maps`, cdb:$(prefix)/data/aliases"
- fi
- cd $(prefix)
- hostname=`cat /proc/sys/kernel/hostname`
- if [ ! -f data/aliases ]
- then
- ./bin/genaliases
- fi
- newaliases
- if [ `grep -c DEFAULT_URL_HOST Mailman/mm_cfg.py` -eq 0 ]
- then
- echo "DEFAULT_EMAIL_HOST = '$$hostname'" >> Mailman/mm_cfg.py
- echo "DEFAULT_URL_HOST = '$$hostname'" >> Mailman/mm_cfg.py
- echo "add_virtualhost(DEFAULT_URL_HOST, DEFAULT_EMAIL_HOST)" >> Mailman/mm_cfg.py
- echo "QRUNNERS.remove(('NewsRunner',1))" >> Mailman/mm_cfg.py
- fi
- if [ `./bin/list_lists | grep -ci mailman` -eq 0 ]
- then
- ./bin/newlist --urlhost=$$hostname --emailhost=$$hostname --quiet mailman root@$$hostname mailman
- ./bin/config_list -i data/sitelist.cfg mailman
- echo "NOTE: A default site-wide mailing list Mailman with password 'mailman' has been created. Please review it and change the password."
- ./bin/mmsitepass mailman
- echo "NOTE: The default site password 'mailman' has been created."
- fi
- # /etc/init.d/mailman enable
- if [ `ps | grep "mailman/bin/qrunner" | grep -cv grep` -gt 0 ]
- then
- $(prefix)/bin/mailmanctl -q restart
- fi
- if [ `grep -c mailman /etc/config/uhttpd` -eq 0 ]
- then #we assume that the server is not configured yet, thus print out some help for the first time:
- echo "NOTE: Please set the site password using $(prefix)/bin/mmsitepass <your-site-password>"
- echo "Please add uhttpd config section to your /etc/config/uhttpd like this:"
- echo "config uhttpd mailman"
- echo " list listen_http 0.0.0.0:80"
- echo " option home /usr/www"
- echo " option cgi_prefix /mailman"
- echo " no_symlinks 0"
- echo "Don't forget to setup firewall for accessing this website!"
- echo "To add a mailing list go to http://$$hostname/mailman/create."
- fi
-fi
-endef
-
-define Package/mailman/prerm
-#!/bin/sh
-# check if we are on real system
-if [ -z "$${IPKG_INSTROOT}" ]; then
-
- if [ `ps | grep "mailman/bin/qrunner" | grep -cv grep` -gt 0 ]
- then
- $(prefix)/bin/mailmanctl stop
- fi
-fi
-endef
-
-$(eval $(call BuildPackage,mailman))
+++ /dev/null
-#!/bin/sh /etc/rc.common
-# Copyright (C) 2014 OpenWrt.org
-
-START=50
-STOP=50
-
-PYTHON=/usr/bin/python
-MAILMANHOME=/usr/local/mailman
-MAILMANCTL=$MAILMANHOME/bin/mailmanctl
-
-start() {
- #rm -f $MAILMANHOME/locks/*
- $PYTHON $MAILMANCTL -s -q start
-}
-
-stop() {
- $PYTHON $MAILMANCTL -q stop
-}
-
-restart() {
- $PYTHON $MAILMANCTL -q restart
-}
+++ /dev/null
---- a/Mailman/Defaults.py.in
-+++ b/Mailman/Defaults.py.in
-@@ -506,7 +506,7 @@ SMTPLIB_DEBUG_LEVEL = 0
- # standard out (or send an email to the site list owner) for manual twiddling
- # of an /etc/aliases style file. Use 'Postfix' if you are using the Postfix
- # MTA -- but then also see POSTFIX_STYLE_VIRTUAL_DOMAINS.
--MTA = 'Manual'
-+MTA = 'Postfix'
-
- # If you set MTA='Postfix', then you also want to set the following variable,
- # depending on whether you're using virtual domains in Postfix, and which
+++ /dev/null
---- a/configure
-+++ b/configure
-@@ -2255,35 +2255,35 @@ fi
- { $as_echo "$as_me:${as_lineno-$LINENO}: result: $version" >&5
- $as_echo "$version" >&6; }
-
--# See if dnspython is installed.
--{ $as_echo "$as_me:${as_lineno-$LINENO}: checking dnspython" >&5
--$as_echo_n "checking dnspython... " >&6; }
--
--cat > conftest.py <<EOF
--try:
-- import dns.resolver
-- res = 'ok'
--except ImportError:
-- res = 'no'
--fp = open("conftest.out", "w")
--fp.write("%s\n" % res)
--fp.close()
--EOF
--
--$PYTHON conftest.py
--havednspython=`cat conftest.out`
--rm -f conftest.out conftest.py
--if test "$havednspython" = "no"
--then
-- as_fn_error $? "
--
--***** dnspython not found. It is required for the new
--***** dmarc_moderation_action featurer. Get it from
--***** <http://www.dnspython.org/> or
--***** <https://pypi.python.org/pypi/dnspython/>" "$LINENO" 5
--fi
--{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $havednspython" >&5
--$as_echo "$havednspython" >&6; }
-+## See if dnspython is installed.
-+#{ $as_echo "$as_me:${as_lineno-$LINENO}: checking dnspython" >&5
-+#$as_echo_n "checking dnspython... " >&6; }
-+#
-+#cat > conftest.py <<EOF
-+#try:
-+# import dns.resolver
-+# res = 'ok'
-+#except ImportError:
-+# res = 'no'
-+#fp = open("conftest.out", "w")
-+#fp.write("%s\n" % res)
-+#fp.close()
-+#EOF
-+#
-+#$PYTHON conftest.py
-+#havednspython=`cat conftest.out`
-+#rm -f conftest.out conftest.py
-+#if test "$havednspython" = "no"
-+#then
-+# as_fn_error $? "
-+#
-+#***** dnspython not found. It is required for the new
-+#***** dmarc_moderation_action featurer. Get it from
-+#***** <http://www.dnspython.org/> or
-+#***** <https://pypi.python.org/pypi/dnspython/>" "$LINENO" 5
-+#fi
-+#{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $havednspython" >&5
-+#$as_echo "$havednspython" >&6; }
-
- # Check the email package version.
- { $as_echo "$as_me:${as_lineno-$LINENO}: checking Python's email package" >&5
+++ /dev/null
---- a/configure
-+++ b/configure
-@@ -3957,6 +3957,8 @@ fi
- $as_echo "$URLHOST" >&6; }
- rm -f conftest.out conftest.py
-
-+PYTHON=/usr/bin/python
-+
- # Checks for libraries.
-
- for ac_func in strerror setregid syslog
+++ /dev/null
---- a/Mailman/MailList.py
-+++ b/Mailman/MailList.py
-@@ -30,7 +30,7 @@ import re
- import shutil
- import socket
- import urllib
--import cPickle
-+import pickle as cPickle
-
- from cStringIO import StringIO
- from UserDict import UserDict
---- a/misc/paths.py.in
-+++ b/misc/paths.py.in
-@@ -71,14 +71,14 @@ sys.path.append(distdir)
- # In a normal interactive Python environment, the japanese.pth and korean.pth
- # files would be imported automatically. But because we inhibit the importing
- # of the site module, we need to be explicit about importing these codecs.
--if not jaok:
-- import japanese
-+#if not jaok:
-+# import japanese
- # As of KoreanCodecs 2.0.5, you had to do the second import to get the Korean
- # codecs installed, however leave the first import in there in case an upgrade
- # changes this.
--if not kook:
-- import korean
-- import korean.aliases
-+#if not kook:
-+# import korean
-+# import korean.aliases
- # Arabic and Hebrew (RFC-1556) encoding aliases. (temporary solution)
- import encodings.aliases
- encodings.aliases.aliases.update({
PKG_NAME:=grilo-plugins
PKG_VERSION:=0.3.11
-PKG_RELEASE:=1
+PKG_RELEASE:=2
PKG_MAINTAINER:=W. Michael Petullo <mike@flyn.org>
PKG_LICENSE:=LGPLv2.1
PKG_SOURCE_URL:=@GNOME/grilo-plugins/0.3/
PKG_HASH:=dde2e605b1994341c6bf012493e056b406b08571834dea3b3c671d5b8b1dcd73
-PKG_BUILD_PARALLEL:=1
PKG_INSTALL:=1
PKG_BUILD_DEPENDS:=meson/host
-Denable-thetvdb=no \
-Denable-tmdb=no \
-Denable-freebox=no \
+ --wrap-mode=nodownload
define Package/grilo-plugins/install
$(INSTALL_DIR) $(1)/usr/lib/grilo-0.3
include $(TOPDIR)/rules.mk
PKG_NAME:=adblock
-PKG_VERSION:=4.0.0
-PKG_RELEASE:=1
+PKG_VERSION:=4.0.1
+PKG_RELEASE:=2
PKG_LICENSE:=GPL-3.0-or-later
PKG_MAINTAINER:=Dirk Brenken <dev@brenken.org>
SECTION:=net
CATEGORY:=Network
TITLE:=Powerful adblock script to block ad/abuse domains by using DNS
- DEPENDS:=+jshn +jsonfilter +coreutils +coreutils-sort @(PACKAGE_aria2||PACKAGE_curl||PACKAGE_wget||PACKAGE_uclient-fetch&&(PACKAGE_libustream-mbedtls||PACKAGE_libustream-openssl||PACKAGE_libustream-wolfssl))
+ DEPENDS:=+jshn +jsonfilter +coreutils +coreutils-sort
PKGARCH:=all
endef
| :---------------- | :------------------------ | :--------------------------------------------------------------------------------------------- |
| adb_enabled | 1, enabled | set to 0 to disable the adblock service |
| adb_dns | -, auto-detected | 'dnsmasq', 'unbound', 'named', 'kresd' or 'raw' |
-| adb_dnsdir | -, auto-detected | path for the generated blocklist file 'adb_list.overall' |
| adb_fetchutil | -, auto-detected | 'uclient-fetch', 'wget', 'curl' or 'aria2c' |
| adb_fetchparm | -, auto-detected | special config options for the selected download utility |
| adb_trigger | -, not set | trigger network interface or 'not set' to use a time-based startup |
| adb_nice | 0, standard prio. | valid nice level range 0-19 of the adblock processes |
| adb_forcedns | 0, disabled | set to 1 to force DNS requests to the local resolver |
| adb_maxqueue | 4 | size of the download queue to handle downloads & list processing in parallel |
+| adb_dnsdir | -, auto-detected | path for the generated blocklist file 'adb_list.overall' |
+| adb_dnstimeout | 10 | timeout in seconds to wait for a successful DNS backend restart |
+| adb_dnsinstance | 0, first instance | set to the relevant dns backend instance used by adblock (dnsmasq only) |
| adb_dnsfilereset | 0, disabled | set to 1 to purge the final DNS blocklist file after DNS backend loading |
| adb_dnsflush | 0, disabled | set to 1 to flush the DNS Cache before & after adblock processing |
| adb_dnsinotify | -, not set | set to 1 to prevent adblock triggered restarts for DNS backends with autoload functions |
| adb_dnsallow | -, not set | set to 1 to disable selective DNS whitelisting (RPZ pass through) |
-| adb_dnstimeout | 10 | timeout in seconds to wait for a successful DNS backend restart |
| adb_lookupdomain | example.com | external domain to check for a successful DNS backend restart or 'false' to disable this check |
| adb_portlist | 53 853 5353 | space separated list of firewall ports which should be redirected locally |
| adb_report | 0, disabled | set to 1 to enable the background tcpdump gathering process for reporting |
+ yoyo x S general https://pgl.yoyo.org
</code></pre>
-To add new or edit existing sources extract the compressed JSON file _gunzip /etc/adblock/adblock.sources.gz_.
+To add new or edit existing sources extract the compressed JSON file _gunzip /etc/adblock/adblock.sources.gz_.
A valid JSON source object contains the following required information, e.g.:
<pre><code>
[...]
export LC_ALL=C
export PATH="/usr/sbin:/usr/bin:/sbin:/bin"
set -o pipefail
-adb_ver="4.0.0"
+adb_ver="4.0.1"
adb_enabled=0
adb_debug=0
adb_forcedns=0
esac
fi
- if [ -z "${adb_dns}" ] || [ ! -x "$(command -v "${adb_dns}")" ]
+ if [ "${adb_dns}" != "raw" ] && { [ -z "${adb_dns}" ] || [ ! -x "$(command -v "${adb_dns}")" ]; }
then
f_log "err" "dns backend not found, please set 'adb_dns' manually"
fi
if [ "${adb_dns}" != "raw" ] && { [ "${adb_dnsdir}" = "${adb_tmpbase}" ] || [ "${adb_dnsdir}" = "${adb_backupdir}" ] || \
- [ "${adb_dnsdir}" = "${adb_reportdir}" ] || [ "${adb_dnsdir}" = "${adb_jaildir}" ] ; }
+ [ "${adb_dnsdir}" = "${adb_reportdir}" ] || [ "${adb_dnsdir}" = "${adb_jaildir}" ]; }
then
f_log "err" "dns directory '${adb_dnsdir}' has been misconfigured, it must not point to the 'adb_tmpbase', 'adb_backupdir', 'adb_reportdir' or 'adb_jaildir'"
fi
do
if { [ "${util}" = "uclient-fetch" ] && [ -n "$(printf "%s" "${adb_packages}" | grep "^libustream-")" ]; } || \
{ [ "${util}" = "wget" ] && [ -n "$(printf "%s" "${adb_packages}" | grep "^wget -")" ]; } || \
- { [ "${util}" != "uclient-fetch" ] && [ "${util}" != "wget" ]; }
+ [ "${util}" = "curl" ] || [ "${util}" = "aria2c" ]
then
if [ -x "$(command -v "${util}")" ]
then
#
f_query()
{
- local search result prefix suffix field domain="${1}" tld="${1#*.}"
+ local search result prefix suffix field query_start query_end query_timeout=30 domain="${1}" tld="${1#*.}"
if [ -z "${domain}" ] || [ "${domain}" = "${tld}" ]
then
field=1
;;
esac
+ query_start="$(date "+%s")"
if [ "${adb_dnsfilereset}" -eq 0 ]
then
while [ "${domain}" != "${tld}" ]
if [ "${?}" -eq 0 ]
then
result="true"
+ query_end="$(date "+%s")"
+ if [ "$((query_end-query_start))" -gt "${query_timeout}" ]
+ then
+ printf "%s\\n\\n" " - [...]"
+ break
+ fi
fi
done
if [ "${result}" != "true" ]
json_close_object
done
json_close_array
- json_add_string "dns_backend" "${adb_dns}, ${adb_dnsdir}"
+ json_add_string "dns_backend" "${adb_dns:-"-"}, ${adb_dnsdir:-"-"}"
json_add_string "run_utils" "${utils:-"-"}"
json_add_string "run_ifaces" "trigger: ${adb_trigger:-"-"}, report: ${adb_repiface:-"-"}"
json_add_string "run_directories" "base: ${adb_tmpbase}, backup: ${adb_backupdir}, report: ${adb_reportdir}, jail: ${adb_jaildir}"
include $(TOPDIR)/rules.mk
PKG_NAME:=proto-bonding
-PKG_VERSION:=2020-03-03
+PKG_VERSION:=2020-03-30
PKG_RELEASE:=1
PKG_LICENSE:=GPL-2.0
# Check for loaded kernel bonding driver (/sys/class/net/bonding_masters exists)
[ -f "$BONDING_MASTERS" ] || {
echo "$cfg" "setup: bonding_masters does not exist in sysfs (kernel module not loaded?)"
- proto_notify_error "$cfg" "setup: bonding_masters does not exist in sysfs (kernel module not loaded?)"
+ proto_notify_error "$cfg" BONDING_MASTER_DOES_NOT_EXIST
proto_block_restart "$cfg"
return
}
for slave in $slaves; do
if [ "$(cat /proc/net/dev |grep "$slave")" == "" ]; then
- echo "$cfg" "ERROR IN CONFIGURATION - $slave: No such device"
- proto_notify_error "$cfg" "ERROR IN CONFIGURATION - $slave: No such device"
+ echo "$cfg" "No slave device $slave found"
+ proto_notify_error "$cfg" NO_DEVICE
proto_block_restart "$cfg"
return
fi
# For static configuration we _MUST_ have an IP address
[ -z "$ipaddr" ] && {
- echo "$cfg" "INVALID LOCAL ADDRESS"
- proto_notify_error "$cfg" "INVALID_LOCAL_ADDRESS"
+ echo "$cfg" "No local IP address defined"
+ proto_notify_error "$cfg" INVALID_LOCAL_ADDRESS
proto_block_restart "$cfg"
return
}
# Check for loaded kernel bonding driver (/sys/class/net/bonding_masters exists)
[ -f "$BONDING_MASTERS" ] || {
echo "$cfg" "teardown: bonding_masters does not exist in sysfs (kernel module not loaded?)"
- proto_notify_error "$cfg" "teardown: bonding_masters does not exist in sysfs (kernel module not loaded?)"
+ proto_notify_error "$cfg" BONDING_MASTER_DOES_NOT_EXIST
proto_block_restart "$cfg"
return
}
+++ /dev/null
-#
-# Copyright (C) 2017 Andrew McConachie
-#
-# This is free software, licensed under the GNU General Public License v2.
-# See /LICENSE for more information.
-#
-
-include $(TOPDIR)/rules.mk
-
-PKG_NAME:=danish
-PKG_VERSION:=0.1
-PKG_RELEASE:=2
-PKG_MAINTAINER:=Andrew McConachie <andrew@depht.com>
-PKG_LICENSE:=GPL-3.0
-
-PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
-PKG_SOURCE_PROTO:=git
-PKG_SOURCE_URL:=https://github.com/smutt/danish.git
-PKG_SOURCE_VERSION:=f726656a4e37c46c037f61334ac48b35b7de25be
-PKG_MIRROR_HASH:=cb0b4bb7a210091d807ee8e4a2876865f32a9bf195836eb2e1edf6c5975d1130
-PKG_SOURCE_SUBDIR:=$(PKG_NAME)-$(PKG_VERSION)
-
-include $(INCLUDE_DIR)/package.mk
-include ../../lang/python/python-package.mk
-
-define Package/danish
- SECTION:=net
- CATEGORY:=Network
- SUBMENU:=IP Addresses and Names
- TITLE:=A middle box implementation of RFC 6698 for HTTPS.
- URL:=https://github.com/smutt/danish
- DEPENDS:=+python +python-dns +python-pcapy +python-dpkt +kmod-ipt-filter +iptables-mod-filter
-endef
-
-define Package/danish/description
- Danish is an experiment in middle-box DANE (RFC 6698) for HTTPS.
- https://www.middlebox-dane.org/
-endef
-
-define Build/Compile
- $(call Build/Compile/PyMod,,\
- install --prefix=/usr --root="$(PKG_INSTALL_DIR)", \
- )
-endef
-
-define Package/danish/install
- $(INSTALL_DIR) $(1)/usr/sbin
- $(CP) $(PKG_BUILD_DIR)/danish.py $(1)/usr/sbin/danish
- $(INSTALL_DIR) $(1)/etc/config
- $(CP) $(PKG_BUILD_DIR)/danish.conf $(1)/etc/config/danish
- $(INSTALL_DIR) $(1)/etc/init.d
- $(CP) $(PKG_BUILD_DIR)/danish.init $(1)/etc/init.d/danish
-endef
-
-$(eval $(call BuildPackage,danish))
include $(TOPDIR)/rules.mk
PKG_NAME:=dawn
-PKG_SOURCE_DATE:=2020-03-28
+PKG_SOURCE_DATE:=2020-03-29
PKG_RELEASE:=1
PKG_SOURCE_PROTO:=git
PKG_SOURCE_URL:=https://github.com/berlin-open-wireless-lab/DAWN.git
-PKG_SOURCE_VERSION:=b967c1e0b68bc05ec5456ab4ecb1a7f0ae75a1b7
-PKG_MIRROR_HASH:=78a2c0350ba7e8bbbb5045a057e13dbbb661622bcafc9ec96f4cdc20c998e028
+PKG_SOURCE_VERSION:=a25bb91f3751dea3a43ae3b3835a5a6bf7239259
+PKG_MIRROR_HASH:=7f60d026b15f6768a950d31007a247ad7bfb6c8a3b67bd938a1f345be62ca0c4
PKG_MAINTAINER:=Nick Hainke <vincent@systemli.org>
PKG_LICENSE:=GPL-2.0-only
PKG_NAME:=freeradius3
PKG_VERSION:=release_3_0_20
-PKG_RELEASE:=2
+PKG_RELEASE:=3
PKG_SOURCE:=$(PKG_VERSION).tar.gz
PKG_SOURCE_URL:=https://github.com/FreeRADIUS/freeradius-server/archive
CFLAGS += $(FPIC)
include $(INCLUDE_DIR)/package.mk
-include $(TOPDIR)/feeds/packages/lang/python/python-package.mk
include $(TOPDIR)/feeds/packages/lang/python/python3-package.mk
define Package/freeradius3/config
/etc/freeradius3/mods-enabled/preprocess
endef
-define Package/freeradius3-mod-python
- $(call Package/freeradius3/Default)
- DEPENDS:=freeradius3 +python
- TITLE:=Python module
-endef
-
-define Package/freeradius3-mod-python/conffiles
-/etc/freeradius3/mods-available/python
-endef
-
define Package/freeradius3-mod-python3
$(call Package/freeradius3/Default)
DEPENDS:=freeradius3 +python3
--without-rlm_opendirectory \
--without-rlm_pam \
--without-rlm_perl \
+ --without-rlm_python \
--without-rlm_redis \
--without-rlm_rediswho \
--without-rlm_rest \
CONFIGURE_ARGS+= --without-rlm_eap_pwd
endif
-ifneq ($(SDK)$(CONFIG_PACKAGE_freeradius3-mod-python),)
- CFLAGS+= -fPIC
- CONFIGURE_ARGS+= \
- --with-rlm_python \
- --with-rlm-python-include-dir="$(PYTHON_INC_DIR)" \
- --with-rlm-python-lib-dir="$(PYTHON_LIB_DIR)"
- CONFIGURE_VARS+= \
- OPENWRTTARGET_PY_PREFIX="$(PYTHON_DIR)" \
- OPENWRTTARGET_PY_SYS_VERSION="$(PYTHON_VERSION)"
-else
- CONFIGURE_ARGS+= --without-rlm_python
-endif
-
ifneq ($(SDK)$(CONFIG_PACKAGE_freeradius3-mod-python3),)
CFLAGS+= -fPIC
CONFIGURE_ARGS+= \
$(eval $(call BuildPlugin,freeradius3-mod-pap,rlm_pap,))
$(eval $(call BuildPlugin,freeradius3-mod-passwd,rlm_passwd,))
$(eval $(call BuildPlugin,freeradius3-mod-preprocess,rlm_preprocess,))
-$(eval $(call PyPackage,freeradius3-mod-python))
-$(eval $(call BuildPlugin,freeradius3-mod-python,rlm_python,))
$(eval $(call Py3Package,freeradius3-mod-python3))
$(eval $(call BuildPlugin,freeradius3-mod-python3,rlm_python3,))
$(eval $(call BuildPlugin,freeradius3-mod-radutmp,rlm_radutmp,))
---- a/src/modules/rlm_python/configure
-+++ b/src/modules/rlm_python/configure
-@@ -2928,15 +2928,15 @@ fi
-
-
- if test x$fail = x; then
-- PY_PREFIX=`${PYTHON_BIN} -c 'import sys ; print(sys.prefix)'`
-+ PY_PREFIX="$OPENWRTTARGET_PY_PREFIX"
- { $as_echo "$as_me:${as_lineno-$LINENO}: Python sys.prefix \"${PY_PREFIX}\"" >&5
- $as_echo "$as_me: Python sys.prefix \"${PY_PREFIX}\"" >&6;}
-
-- PY_EXEC_PREFIX=`${PYTHON_BIN} -c 'import sys ; print(sys.exec_prefix)'`
-+ PY_EXEC_PREFIX="$OPENWRTTARGET_PY_PREFIX"
- { $as_echo "$as_me:${as_lineno-$LINENO}: Python sys.exec_prefix \"${PY_EXEC_PREFIX}\"" >&5
- $as_echo "$as_me: Python sys.exec_prefix \"${PY_EXEC_PREFIX}\"" >&6;}
-
-- PY_SYS_VERSION=`${PYTHON_BIN} -c 'import sys ; print(sys.version[0:3])'`
-+ PY_SYS_VERSION="$OPENWRTTARGET_PY_SYS_VERSION"
- { $as_echo "$as_me:${as_lineno-$LINENO}: Python sys.version \"${PY_SYS_VERSION}\"" >&5
- $as_echo "$as_me: Python sys.version \"${PY_SYS_VERSION}\"" >&6;}
-
---- a/src/modules/rlm_python/configure.ac
-+++ b/src/modules/rlm_python/configure.ac
-@@ -65,13 +65,13 @@ if test x$with_[]modname != xno; then
- )
-
- if test x$fail = x; then
-- PY_PREFIX=`${PYTHON_BIN} -c 'import sys ; print(sys.prefix)'`
-+ PY_PREFIX="$OPENWRTTARGET_PY_PREFIX"
- AC_MSG_NOTICE([Python sys.prefix \"${PY_PREFIX}\"])
-
-- PY_EXEC_PREFIX=`${PYTHON_BIN} -c 'import sys ; print(sys.exec_prefix)'`
-+ PY_EXEC_PREFIX="$OPENWRTTARGET_PY_PREFIX"
- AC_MSG_NOTICE([Python sys.exec_prefix \"${PY_EXEC_PREFIX}\"])
-
-- PY_SYS_VERSION=`${PYTHON_BIN} -c 'import sys ; print(sys.version[[0:3]])'`
-+ PY_SYS_VERSION="$OPENWRTTARGET_PY_SYS_VERSION"
- AC_MSG_NOTICE([Python sys.version \"${PY_SYS_VERSION}\"])
-
- if test "x$PY_LIB_DIR" = "x"; then
--- a/src/modules/rlm_python3/configure
+++ b/src/modules/rlm_python3/configure
@@ -2928,15 +2928,15 @@ fi
include $(TOPDIR)/rules.mk
PKG_NAME:=frr
PKG_VERSION:=7.3
-PKG_RELEASE:=1
+PKG_RELEASE:=2
PKG_SOURCE_URL:=https://github.com/FRRouting/frr/releases/download/$(PKG_NAME)-$(PKG_VERSION)/
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.xz
define Package/frr-libfrr
$(call Package/frr/Default)
TITLE:=zebra library
- DEPENDS+=+librt +libjson-c +libyang +FRR_OPENSSL:libopenssl
+ DEPENDS+=+librt +libatomic +libjson-c +libyang +FRR_OPENSSL:libopenssl
CONFLICTS:=quagga-libzebra
endef
/etc/frr/
endef
+TARGET_LDFLAGS += -latomic
+
define Build/Prepare
$(Build/Prepare/Default)
mkdir -p $(PKG_BUILD_DIR)/build
include $(INCLUDE_DIR)/kernel.mk
PKG_NAME:=jool
-PKG_VERSION:=4.0.7
+PKG_VERSION:=4.0.8
PKG_RELEASE:=1
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
PKG_SOURCE_URL:=https://github.com/NICMx/Jool/releases/download/v$(PKG_VERSION)
-PKG_HASH:=3ea1b8ab7435de4776405ccf83c936b2028f20f860b7d0a462d39e4524564363
+PKG_HASH:=73dd65a14feedc8bd0f650d3316bca346474c8420d60b48ca95bd1ca8846f1d4
PKG_MAINTAINER:=
PKG_LICENSE:=GPL-2.0-only
--with-log4cplus="$(STAGING_DIR_HOSTPKG)" \
--with-openssl="$(STAGING_DIR_HOST)" \
--enable-boost-headers-only \
- --enable-static-link \
--enable-generate-messages
HOST_LDFLAGS += \
PKG_NAME:=miniupnpd
PKG_VERSION:=2.1.20191006
-PKG_RELEASE:=2
+PKG_RELEASE:=3
PKG_SOURCE_URL:=https://miniupnp.tuxfamily.org/files
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
config_get serial_number config serial_number
config_get model_number config model_number
config_get uuid config uuid
+ config_get use_stun config use_stun 0
config_get stun_host config stun_host
config_get stun_port config stun_port
config_get notify_interval config notify_interval
define Package/nft-qos
SECTION:=utils
CATEGORY:=Base system
- DEPENDS:=+nftables +kmod-nft-netdev +kmod-nft-bridge
+ DEPENDS:=+kmod-nft-netdev +kmod-nft-bridge +nftables
TITLE:=QoS scripts over nftables
PKGARCH:=all
endef
include $(TOPDIR)/rules.mk
PKG_NAME:=nmap
-PKG_VERSION:=7.70
-PKG_RELEASE:=2
+PKG_VERSION:=7.80
+PKG_RELEASE:=1
PKG_MAINTAINER:=Nuno Goncalves <nunojpg@gmail.com>
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.bz2
PKG_SOURCE_URL:=https://nmap.org/dist/
-PKG_HASH:=847b068955f792f4cc247593aca6dc3dc4aae12976169873247488de147a6e18
-PKG_LICENSE:=GPL-2.0
+PKG_HASH:=fcfa5a0e42099e12e4bf7a68ebe6fde05553383a682e816a7ec9256ab4773faa
+PKG_LICENSE:=GPL-2.0-only
PKG_LICENSE_FILES:=COPYING
PKG_CPE_ID:=cpe:/a:nmap:nmap
+PKG_BUILD_PARALLEL:=1
PKG_INSTALL:=1
include $(INCLUDE_DIR)/package.mk
+include ../../lang/python/python3-package.mk
NMAP_DEPENDS:=+libpcap +libstdcpp +zlib +libpcre
NCAT_DEPENDS:=+libpcap
NPING_DEPENDS:=+libpcap +libpthread +libstdcpp
-NDIFF_DEPENDS:=+python
define Package/nmap/default
SUBMENU:=NMAP Suite
SECTION:=net
CATEGORY:=Network
- URL:=http://nmap.org/
+ URL:=https://nmap.org/
endef
define Package/nmap
TITLE:=Nmap (with OpenSSL support)
endef
+define Package/nmap-full
+$(call Package/nmap/default)
+ DEPENDS:=$(NMAP_DEPENDS) +libopenssl +liblua5.3 +libssh2
+ VARIANT:=full
+ TITLE:=Nmap (with OpenSSL and scripting support)
+endef
+
define Package/ncat
$(call Package/nmap/default)
DEPENDS:=$(NCAT_DEPENDS)
TITLE:=Ncat (with OpenSSL support)
endef
+define Package/ncat-full
+$(call Package/nmap/default)
+ DEPENDS:=$(NCAT_DEPENDS) +libopenssl +liblua5.3
+ VARIANT:=full
+ TITLE:=Ncat (with OpenSSL and scripting support)
+endef
+
define Package/nping
$(call Package/nmap/default)
DEPENDS:=$(NPING_DEPENDS)
TITLE:=Network packet generation tool / ping utility
endef
+define Package/nping-ssl
+$(call Package/nmap/default)
+ DEPENDS:=$(NPING_DEPENDS) +libopenssl
+ VARIANT:=ssl
+ TITLE:=Nping (with OpenSSL support)
+endef
+
define Package/ndiff
$(call Package/nmap/default)
- DEPENDS:=$(NDIFF_DEPENDS)
- VARIANT:=nossl
+ DEPENDS:=+python3-light +python3-xml
+ VARIANT:=python3
TITLE:=Utility to compare the results of Nmap scans
endef
CONFIGURE_ARGS += \
--with-libdnet=included \
- --with-libpcre="$(STAGING_DIR)/usr" \
+ --with-liblinear=included \
--with-libpcap="$(STAGING_DIR)/usr" \
- --without-liblua \
+ --with-libpcre="$(STAGING_DIR)/usr" \
+ --with-libz="$(STAGING_DIR)/usr" \
+ --with-ncat \
+ --without-localdirs \
+ --without-ndiff \
+ --without-nmap-update \
+ --without-subversion \
--without-zenmap
+ # --with-libnbase=included
+ # --with-libnsock=included
+ # --without-apr
+
+ifeq ($(BUILD_VARIANT),full)
+ CONFIGURE_ARGS += \
+ --with-liblua="$(STAGING_DIR)/usr" \
+ --with-libssh2="$(STAGING_DIR)/usr" \
+ --with-openssl="$(STAGING_DIR)/usr" \
+ --without-nping
+
+else ifeq ($(BUILD_VARIANT),ssl)
+ CONFIGURE_ARGS += \
+ --with-nping \
+ --with-openssl="$(STAGING_DIR)/usr" \
+ --without-liblua \
+ --without-libssh2
+
+else # nossl
+ CONFIGURE_ARGS += \
+ --with-nping \
+ --without-liblua \
+ --without-libssh2 \
+ --without-openssl
+endif
CONFIGURE_VARS += \
- ac_cv_dnet_bsd_bpf=no \
- CXXFLAGS="$$$$CXXFLAGS -fno-builtin"
+ ac_cv_dnet_bsd_bpf=no
+
+PYTHON3_PKG_SETUP_DIR:=ndiff
+PYTHON3_PKG_SETUP_ARGS:=
-ifeq ($(BUILD_VARIANT),ssl)
- CONFIGURE_ARGS += --with-openssl="$(STAGING_DIR)/usr" --without-libssh2
-else
- CONFIGURE_ARGS += --without-openssl
+ifeq ($(BUILD_VARIANT),python3)
+ Build/Configure:=:
+ Build/Install:=:
endif
define Package/nmap/install
$(INSTALL_DIR) $(1)/usr/bin
$(INSTALL_BIN) $(PKG_INSTALL_DIR)/usr/bin/nmap $(1)/usr/bin/
$(INSTALL_DIR) $(1)/usr/share/nmap
- $(CP) $(PKG_INSTALL_DIR)/usr/share/nmap/* $(1)/usr/share/nmap/
+ $(INSTALL_DATA) $(PKG_INSTALL_DIR)/usr/share/nmap/nmap* $(1)/usr/share/nmap/
endef
-define Package/nmap-ssl/install
- $(INSTALL_DIR) $(1)/usr/bin
- $(INSTALL_BIN) $(PKG_INSTALL_DIR)/usr/bin/nmap $(1)/usr/bin/
- $(INSTALL_DIR) $(1)/usr/share/nmap
- $(CP) $(PKG_INSTALL_DIR)/usr/share/nmap/* $(1)/usr/share/nmap/
+Package/nmap-ssl/install=$(Package/nmap/install)
+
+define Package/nmap-full/install
+ $(call Package/nmap/install,$(1))
+ $(INSTALL_DATA) $(PKG_INSTALL_DIR)/usr/share/nmap/nse_main.lua $(1)/usr/share/nmap/
+ $(CP) $(PKG_INSTALL_DIR)/usr/share/nmap/{nselib,scripts} $(1)/usr/share/nmap/
endef
define Package/ncat/install
endef
define Package/ncat-ssl/install
- $(INSTALL_DIR) $(1)/usr/bin
- $(INSTALL_BIN) $(PKG_INSTALL_DIR)/usr/bin/ncat $(1)/usr/bin/
+ $(call Package/ncat/install,$(1))
$(INSTALL_DIR) $(1)/usr/share/ncat
- $(CP) $(PKG_INSTALL_DIR)/usr/share/ncat/ca-bundle.crt $(1)/usr/share/ncat/
+ $(INSTALL_DATA) $(PKG_INSTALL_DIR)/usr/share/ncat/ca-bundle.crt $(1)/usr/share/ncat/
endef
-define Package/ndiff/install
- $(INSTALL_DIR) $(1)/usr/bin
- $(INSTALL_BIN) $(PKG_INSTALL_DIR)/usr/bin/ndiff $(1)/usr/bin/
-endef
+Package/ncat-full/install=$(Package/ncat-ssl/install)
define Package/nping/install
$(INSTALL_DIR) $(1)/usr/bin
$(INSTALL_BIN) $(PKG_INSTALL_DIR)/usr/bin/nping $(1)/usr/bin/
endef
+Package/nping-ssl/install=$(Package/nping/install)
+
+define Py3Package/ndiff/install
+ $(INSTALL_DIR) $(1)/usr/bin
+ $(INSTALL_BIN) $(PKG_INSTALL_DIR)/usr/bin/ndiff $(1)/usr/bin/
+endef
$(eval $(call BuildPackage,nmap))
$(eval $(call BuildPackage,nmap-ssl))
+$(eval $(call BuildPackage,nmap-full))
$(eval $(call BuildPackage,ncat))
$(eval $(call BuildPackage,ncat-ssl))
+$(eval $(call BuildPackage,ncat-full))
$(eval $(call BuildPackage,nping))
+$(eval $(call BuildPackage,nping-ssl))
+
+$(eval $(call Py3Package,ndiff))
$(eval $(call BuildPackage,ndiff))
+$(eval $(call BuildPackage,ndiff-src))
--- /dev/null
+From ea4e2d6657103a2c3d6f543a1a8619eb4d4472c8 Mon Sep 17 00:00:00 2001
+From: dmiller <dmiller@e0a8ed71-7df4-0310-8962-fdc924857419>
+Date: Mon, 30 Dec 2019 04:03:03 +0000
+Subject: [PATCH] Avoid 'using namespace std' causing name conflicts. Fixes
+ #1363, fixes #1867
+
+---
+ CHANGELOG | 4 ++++
+ nping/EchoServer.cc | 2 +-
+ nping/EchoServer.h | 4 +---
+ nping/NEPContext.h | 3 +--
+ nping/NpingTargets.h | 4 +---
+ nping/ProbeMode.h | 2 --
+ nping/nping.cc | 1 -
+ nping/utils.h | 2 --
+ 8 files changed, 8 insertions(+), 14 deletions(-)
+
+diff --git a/nping/EchoServer.cc b/nping/EchoServer.cc
+index ccdcf9c2d0..a824340cd2 100644
+--- a/nping/EchoServer.cc
++++ b/nping/EchoServer.cc
+@@ -199,7 +199,7 @@ NEPContext *EchoServer::getClientContext(nsock_iod iod){
+ * the context could not be found. */
+ int EchoServer::destroyClientContext(clientid_t clnt){
+ bool deleted=false;
+- vector<NEPContext>::iterator it;
++ std::vector<NEPContext>::iterator it;
+ /* Iterate through the context array and delete the one that belongs to clnt */
+ for ( it=this->client_ctx.begin(); it<this->client_ctx.end(); it++){
+ if(it->getIdentifier()==clnt){
+diff --git a/nping/EchoServer.h b/nping/EchoServer.h
+index c3dece6341..c9fee6de9e 100644
+--- a/nping/EchoServer.h
++++ b/nping/EchoServer.h
+@@ -136,15 +136,13 @@
+ #include <vector>
+ #include "NEPContext.h"
+
+-using namespace std;
+-
+ #define LISTEN_QUEUE_SIZE 10
+
+ class EchoServer {
+
+ private:
+ /* Attributes */
+- vector<NEPContext> client_ctx;
++ std::vector<NEPContext> client_ctx;
+ clientid_t client_id_count;
+
+ /* Methods */
+diff --git a/nping/NEPContext.h b/nping/NEPContext.h
+index 5e470d7551..32b8be48d6 100644
+--- a/nping/NEPContext.h
++++ b/nping/NEPContext.h
+@@ -135,7 +135,6 @@
+ #include "nsock.h"
+ #include "EchoHeader.h"
+ #include <vector>
+-using namespace std;
+
+ /* SERVER STATE MACHINE */
+ /* _ */
+@@ -204,7 +203,7 @@ class NEPContext {
+ u8 client_nonce[NONCE_LEN];
+ bool server_nonce_set;
+ bool client_nonce_set;
+- vector<fspec_t> fspecs;
++ std::vector<fspec_t> fspecs;
+ struct sockaddr_storage clnt_addr;
+
+ u8 *generateKey(int key_type, size_t *final_len);
+diff --git a/nping/NpingTargets.h b/nping/NpingTargets.h
+index 61bb356f39..3a9a2145af 100644
+--- a/nping/NpingTargets.h
++++ b/nping/NpingTargets.h
+@@ -137,8 +137,6 @@
+ #include "NpingTarget.h"
+ #include <vector>
+
+-using namespace std;
+-
+ #define MAX_NPING_HOSTNAME_LEN 512 /**< Max length for named hosts */
+
+ class NpingTargets {
+@@ -176,7 +174,7 @@ class NpingTargets {
+
+ /* TODO: Make private */
+ NpingTarget *currenths;
+- vector<NpingTarget *> Targets;
++ std::vector<NpingTarget *> Targets;
+
+ }; /* End of class NpingTargets */
+
+diff --git a/nping/ProbeMode.h b/nping/ProbeMode.h
+index aa86939e02..313776d862 100644
+--- a/nping/ProbeMode.h
++++ b/nping/ProbeMode.h
+@@ -135,11 +135,9 @@
+
+ #include "nping.h"
+ #include "nsock.h"
+-#include <vector>
+ #include "NpingTarget.h"
+ #include "utils_net.h"
+ #include "utils.h"
+-using namespace std;
+
+ #define PKT_TYPE_TCP_CONNECT 1
+ #define PKT_TYPE_UDP_NORMAL 2
+diff --git a/nping/nping.cc b/nping/nping.cc
+index 9de151a7be..40df912a88 100644
+--- a/nping/nping.cc
++++ b/nping/nping.cc
+@@ -150,7 +150,6 @@
+ #include <signal.h>
+ #include <time.h>
+
+-using namespace std;
+ NpingOps o;
+ EchoClient ec;
+ EchoServer es;
+diff --git a/nping/utils.h b/nping/utils.h
+index c3516cf29f..5de6b64b89 100644
+--- a/nping/utils.h
++++ b/nping/utils.h
+@@ -143,8 +143,6 @@
+ #endif
+
+ #include "global_structures.h"
+-#include <vector>
+-using namespace std;
+
+ /* Function prototypes */
+ bool contains(const char *source, const char *substring);
+++ /dev/null
---- a/nmap_error.cc
-+++ b/nmap_error.cc
-@@ -135,6 +135,7 @@
- #include "xml.h"
-
- #include <errno.h>
-+#include <time.h>
-
- extern NmapOps o;
-
---- a/nping/EchoServer.cc
-+++ b/nping/EchoServer.cc
-@@ -131,6 +131,7 @@
- #include "EchoServer.h"
- #include "EchoHeader.h"
- #include "NEPContext.h"
-+#include <ctime>
- #include <vector>
- #include "nsock.h"
- #include "output.h"
-@@ -281,12 +282,12 @@ int EchoServer::nep_listen_socket(){
- server_addr6.sin6_len = sizeof(struct sockaddr_in6);
- #endif
- /* Bind to local address and the specified port */
-- if( bind(master_sd, (struct sockaddr *)&server_addr6, sizeof(server_addr6)) != 0 ){
-+ if( ::bind(master_sd, (struct sockaddr *)&server_addr6, sizeof(server_addr6)) != 0 ){
- nping_warning(QT_3, "Failed to bind to source address %s. Trying to bind to port %d...", IPtoa(server_addr6.sin6_addr), port);
- /* If the bind failed for the supplied address, just try again with in6addr_any */
- if( o.spoofSource() ){
- server_addr6.sin6_addr = in6addr_any;
-- if( bind(master_sd, (struct sockaddr *)&server_addr6, sizeof(server_addr6)) != 0 ){
-+ if( ::bind(master_sd, (struct sockaddr *)&server_addr6, sizeof(server_addr6)) != 0 ){
- nping_fatal(QT_3, "Could not bind to port %d (%s).", port, strerror(errno));
- }else{
- nping_print(VB_1, "Server bound to port %d", port);
-@@ -319,12 +320,12 @@ int EchoServer::nep_listen_socket(){
- #endif
-
- /* Bind to local address and the specified port */
-- if( bind(master_sd, (struct sockaddr *)&server_addr4, sizeof(server_addr4)) != 0 ){
-+ if( ::bind(master_sd, (struct sockaddr *)&server_addr4, sizeof(server_addr4)) != 0 ){
- nping_warning(QT_3, "Failed to bind to source address %s. Trying to bind to port %d...", IPtoa(server_addr4.sin_addr), port);
- /* If the bind failed for the supplied address, just try again with in6addr_any */
- if( o.spoofSource() ){
- server_addr4.sin_addr.s_addr=INADDR_ANY;
-- if( bind(master_sd, (struct sockaddr *)&server_addr4, sizeof(server_addr4)) != 0 ){
-+ if( ::bind(master_sd, (struct sockaddr *)&server_addr4, sizeof(server_addr4)) != 0 ){
- nping_fatal(QT_3, "Could not bind to port %d (%s).", port, strerror(errno));
- }else{
- nping_print(VB_1, "Server bound to port %d", port);
---- a/osscan.cc
-+++ b/osscan.cc
-@@ -151,6 +151,7 @@
- #endif
-
- #include <algorithm>
-+#include <ctime>
- #include <list>
- #include <set>
-
---- a/osscan2.cc
-+++ b/osscan2.cc
-@@ -145,6 +145,7 @@
-
- #include "struct_ip.h"
-
-+#include <ctime>
- #include <list>
- #include <math.h>
-
---- a/service_scan.cc
-+++ b/service_scan.cc
-@@ -173,6 +173,7 @@
- #endif
-
- #include <algorithm>
-+#include <ctime>
- #include <list>
-
- extern NmapOps o;
--- /dev/null
+From: Bryan Quigley <bryan.quigley@canonical.com>
+Date: Sat, 2 Nov 2019 21:06:44 -0700
+Subject: Python3 port of ndiff
+
+Ported all python scrips in ndiff/ except setup.py
+
+Some hints on cmp taken from #1484
+
+Minor tweaks to Makefile to support python3, but unsure if
+there is a better way to do that.
+
+Seperated .travis.yml commands for easier debugging where it breaks.
+
+This closes the easy half of #1176
+
+Resolves: #1484
+---
+ .travis.yml | 8 +-
+ Makefile.in | 6 +-
+ ndiff/ndiff.py | 495 +++++++++++++++++++++---------------------
+ ndiff/ndifftest.py | 94 ++++----
+ ndiff/scripts/ndiff | 14 +-
+ ndiff/setup.py | 34 +--
+ ndiff/test-scans/anonymize.py | 18 +-
+ 7 files changed, 337 insertions(+), 332 deletions(-)
+ mode change 100644 => 100755 ndiff/setup.py
+
+diff --git a/.travis.yml b/.travis.yml
+index 145ebc6..9bb50d6 100644
+--- a/.travis.yml
++++ b/.travis.yml
+@@ -4,7 +4,13 @@ compiler:
+ - clang
+ # Change this to your needs
+ sudo: false
+-script: mkdir /tmp/n && ./configure $SSL_FLAG $LUA_FLAG --prefix=/tmp/n && make && make check && make install && /tmp/n/bin/nmap -A localhost
++script:
++ - "mkdir /tmp/n"
++ - "./configure $SSL_FLAG $LUA_FLAG --prefix=/tmp/n"
++ - "make"
++ - "make check"
++ - "make install"
++ - "/tmp/n/bin/nmap -A localhost"
+
+ env:
+ - SSL_FLAG="--without-openssl" LUA_FLAG="--without-liblua"
+diff --git a/Makefile.in b/Makefile.in
+index 7ac5ae5..a0152f4 100644
+--- a/Makefile.in
++++ b/Makefile.in
+@@ -35,6 +35,7 @@ ZENMAPDIR = @ZENMAPDIR@
+ NDIFFDIR = @NDIFFDIR@
+ NPINGDIR = @NPINGDIR@
+ PYTHON = @PYTHON@
++PYTHON3 = /usr/bin/env python3
+ DEFS = @DEFS@ -DNMAP_PLATFORM=\"$(NMAP_PLATFORM)\" -DNMAPDATADIR=\"$(nmapdatadir)\"
+ # With GCC, add extra security checks to source code.
+ # http://gcc.gnu.org/ml/gcc-patches/2004-09/msg02055.html
+@@ -368,6 +369,7 @@ tests/check_dns: $(OBJS)
+ # this as the location of the interpreter whenever we're not doing a
+ # local installation.
+ DEFAULT_PYTHON_PATH = /usr/bin/env python
++DEFAULT_PYTHON3_PATH = /usr/bin/env python3
+
+ build-zenmap: $(ZENMAPDIR)/setup.py $(ZENMAPDIR)/zenmapCore/Version.py
+ # When DESTDIR is defined, assume we're building an executable
+@@ -388,7 +390,7 @@ install-zenmap: $(ZENMAPDIR)/setup.py
+ ln -sf zenmap $(DESTDIR)$(bindir)/xnmap
+
+ build-ndiff:
+- cd $(NDIFFDIR) && $(PYTHON) setup.py build $(if $(DESTDIR),--executable "$(DEFAULT_PYTHON_PATH)")
++ cd $(NDIFFDIR) && $(PYTHON) setup.py build $(if $(DESTDIR),--executable "$(DEFAULT_PYTHON3_PATH)")
+
+ build-nping: $(NPINGDIR)/Makefile build-nbase build-nsock build-netutil $(NPINGDIR)/nping.h @DNET_BUILD@ @PCAP_BUILD@
+ @cd $(NPINGDIR) && $(MAKE)
+@@ -458,7 +460,7 @@ check-ncat:
+ @cd $(NCATDIR) && $(MAKE) check
+
+ check-ndiff:
+- @cd $(NDIFFDIR) && $(PYTHON) ndifftest.py
++ @cd $(NDIFFDIR) && $(PYTHON3) ndifftest.py
+
+ check-nsock:
+ @cd $(NSOCKDIR)/src && $(MAKE) check
+diff --git a/ndiff/ndiff.py b/ndiff/ndiff.py
+index 043273f..abbd1c5 100755
+--- a/ndiff/ndiff.py
++++ b/ndiff/ndiff.py
+@@ -1,4 +1,4 @@
+-#!/usr/bin/env python
++#!/usr/bin/env python3
+
+ # Ndiff
+ #
+@@ -26,11 +26,11 @@ xml.__path__ = [x for x in xml.__path__ if "_xmlplus" not in x]
+ import xml.sax
+ import xml.sax.saxutils
+ import xml.dom.minidom
+-from StringIO import StringIO
++from io import StringIO
+
+ verbose = False
+
+-NDIFF_XML_VERSION = u"1"
++NDIFF_XML_VERSION = "1"
+
+
+ class OverrideEntityResolver(xml.sax.handler.EntityResolver):
+@@ -78,35 +78,35 @@ class Scan(object):
+ def write_nmaprun_open(self, writer):
+ attrs = {}
+ if self.scanner is not None:
+- attrs[u"scanner"] = self.scanner
++ attrs["scanner"] = self.scanner
+ if self.args is not None:
+- attrs[u"args"] = self.args
++ attrs["args"] = self.args
+ if self.start_date is not None:
+- attrs[u"start"] = "%d" % time.mktime(self.start_date.timetuple())
+- attrs[u"startstr"] = self.start_date.strftime(
++ attrs["start"] = "%d" % time.mktime(self.start_date.timetuple())
++ attrs["startstr"] = self.start_date.strftime(
+ "%a %b %d %H:%M:%S %Y")
+ if self.version is not None:
+- attrs[u"version"] = self.version
+- writer.startElement(u"nmaprun", attrs)
++ attrs["version"] = self.version
++ writer.startElement("nmaprun", attrs)
+
+ def write_nmaprun_close(self, writer):
+- writer.endElement(u"nmaprun")
++ writer.endElement("nmaprun")
+
+ def nmaprun_to_dom_fragment(self, document):
+ frag = document.createDocumentFragment()
+- elem = document.createElement(u"nmaprun")
++ elem = document.createElement("nmaprun")
+ if self.scanner is not None:
+- elem.setAttribute(u"scanner", self.scanner)
++ elem.setAttribute("scanner", self.scanner)
+ if self.args is not None:
+- elem.setAttribute(u"args", self.args)
++ elem.setAttribute("args", self.args)
+ if self.start_date is not None:
+ elem.setAttribute(
+- u"start", "%d" % time.mktime(self.start_date.timetuple()))
++ "start", "%d" % time.mktime(self.start_date.timetuple()))
+ elem.setAttribute(
+- u"startstr",
++ "startstr",
+ self.start_date.strftime("%a %b %d %H:%M:%S %Y"))
+ if self.version is not None:
+- elem.setAttribute(u"version", self.version)
++ elem.setAttribute("version", self.version)
+ frag.appendChild(elem)
+ return frag
+
+@@ -136,17 +136,17 @@ class Host(object):
+
+ def format_name(self):
+ """Return a human-readable identifier for this host."""
+- address_s = u", ".join(a.s for a in sorted(self.addresses))
+- hostname_s = u", ".join(sorted(self.hostnames))
++ address_s = ", ".join(a.s for a in sorted(self.addresses))
++ hostname_s = ", ".join(sorted(self.hostnames))
+ if len(hostname_s) > 0:
+ if len(address_s) > 0:
+- return u"%s (%s)" % (hostname_s, address_s)
++ return "%s (%s)" % (hostname_s, address_s)
+ else:
+ return hostname_s
+ elif len(address_s) > 0:
+ return address_s
+ else:
+- return u"<no name>"
++ return "<no name>"
+
+ def add_port(self, port):
+ self.ports[port.spec] = port
+@@ -163,46 +163,46 @@ class Host(object):
+ return state is None or state in self.extraports
+
+ def extraports_string(self):
+- list = [(count, state) for (state, count) in self.extraports.items()]
++ locallist = [(count, state) for (state, count) in list(self.extraports.items())]
+ # Reverse-sort by count.
+- list.sort(reverse=True)
+- return u", ".join(
+- [u"%d %s ports" % (count, state) for (count, state) in list])
++ locallist.sort(reverse=True)
++ return ", ".join(
++ ["%d %s ports" % (count, state) for (count, state) in locallist])
+
+ def state_to_dom_fragment(self, document):
+ frag = document.createDocumentFragment()
+ if self.state is not None:
+- elem = document.createElement(u"status")
+- elem.setAttribute(u"state", self.state)
++ elem = document.createElement("status")
++ elem.setAttribute("state", self.state)
+ frag.appendChild(elem)
+ return frag
+
+ def hostname_to_dom_fragment(self, document, hostname):
+ frag = document.createDocumentFragment()
+- elem = document.createElement(u"hostname")
+- elem.setAttribute(u"name", hostname)
++ elem = document.createElement("hostname")
++ elem.setAttribute("name", hostname)
+ frag.appendChild(elem)
+ return frag
+
+ def extraports_to_dom_fragment(self, document):
+ frag = document.createDocumentFragment()
+- for state, count in self.extraports.items():
+- elem = document.createElement(u"extraports")
+- elem.setAttribute(u"state", state)
+- elem.setAttribute(u"count", unicode(count))
++ for state, count in list(self.extraports.items()):
++ elem = document.createElement("extraports")
++ elem.setAttribute("state", state)
++ elem.setAttribute("count", str(count))
+ frag.appendChild(elem)
+ return frag
+
+ def os_to_dom_fragment(self, document, os):
+ frag = document.createDocumentFragment()
+- elem = document.createElement(u"osmatch")
+- elem.setAttribute(u"name", os)
++ elem = document.createElement("osmatch")
++ elem.setAttribute("name", os)
+ frag.appendChild(elem)
+ return frag
+
+ def to_dom_fragment(self, document):
+ frag = document.createDocumentFragment()
+- elem = document.createElement(u"host")
++ elem = document.createElement("host")
+
+ if self.state is not None:
+ elem.appendChild(self.state_to_dom_fragment(document))
+@@ -211,13 +211,13 @@ class Host(object):
+ elem.appendChild(addr.to_dom_fragment(document))
+
+ if len(self.hostnames) > 0:
+- hostnames_elem = document.createElement(u"hostnames")
++ hostnames_elem = document.createElement("hostnames")
+ for hostname in self.hostnames:
+ hostnames_elem.appendChild(
+ self.hostname_to_dom_fragment(document, hostname))
+ elem.appendChild(hostnames_elem)
+
+- ports_elem = document.createElement(u"ports")
++ ports_elem = document.createElement("ports")
+ ports_elem.appendChild(self.extraports_to_dom_fragment(document))
+ for port in sorted(self.ports.values()):
+ if not self.is_extraports(port.state):
+@@ -226,13 +226,13 @@ class Host(object):
+ elem.appendChild(ports_elem)
+
+ if len(self.os) > 0:
+- os_elem = document.createElement(u"os")
++ os_elem = document.createElement("os")
+ for os in self.os:
+ os_elem.appendChild(self.os_to_dom_fragment(document, os))
+ elem.appendChild(os_elem)
+
+ if len(self.script_results) > 0:
+- hostscript_elem = document.createElement(u"hostscript")
++ hostscript_elem = document.createElement("hostscript")
+ for sr in self.script_results:
+ hostscript_elem.appendChild(sr.to_dom_fragment(document))
+ elem.appendChild(hostscript_elem)
+@@ -246,7 +246,7 @@ class Address(object):
+ self.s = s
+
+ def __eq__(self, other):
+- return self.__cmp__(other) == 0
++ return self.sort_key() == other.sort_key()
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+@@ -254,8 +254,8 @@ class Address(object):
+ def __hash__(self):
+ return hash(self.sort_key())
+
+- def __cmp__(self, other):
+- return cmp(self.sort_key(), other.sort_key())
++ def __lt__(self, other):
++ return self.sort_key() < other.sort_key()
+
+ def __str__(self):
+ return str(self.s)
+@@ -264,21 +264,21 @@ class Address(object):
+ return self.s
+
+ def new(type, s):
+- if type == u"ipv4":
++ if type == "ipv4":
+ return IPv4Address(s)
+- elif type == u"ipv6":
++ elif type == "ipv6":
+ return IPv6Address(s)
+- elif type == u"mac":
++ elif type == "mac":
+ return MACAddress(s)
+ else:
+- raise ValueError(u"Unknown address type %s." % type)
++ raise ValueError("Unknown address type %s." % type)
+ new = staticmethod(new)
+
+ def to_dom_fragment(self, document):
+ frag = document.createDocumentFragment()
+- elem = document.createElement(u"address")
+- elem.setAttribute(u"addr", self.s)
+- elem.setAttribute(u"addrtype", self.type)
++ elem = document.createElement("address")
++ elem.setAttribute("addr", self.s)
++ elem.setAttribute("addrtype", self.type)
+ frag.appendChild(elem)
+ return frag
+
+@@ -287,21 +287,21 @@ class Address(object):
+
+
+ class IPv4Address(Address):
+- type = property(lambda self: u"ipv4")
++ type = property(lambda self: "ipv4")
+
+ def sort_key(self):
+ return (0, self.s)
+
+
+ class IPv6Address(Address):
+- type = property(lambda self: u"ipv6")
++ type = property(lambda self: "ipv6")
+
+ def sort_key(self):
+ return (1, self.s)
+
+
+ class MACAddress(Address):
+- type = property(lambda self: u"mac")
++ type = property(lambda self: "mac")
+
+ def sort_key(self):
+ return (2, self.s)
+@@ -320,28 +320,25 @@ class Port(object):
+
+ def state_string(self):
+ if self.state is None:
+- return u"unknown"
++ return "unknown"
+ else:
+- return unicode(self.state)
++ return str(self.state)
+
+ def spec_string(self):
+- return u"%d/%s" % self.spec
++ return "%d/%s" % self.spec
+
+- def __cmp__(self, other):
+- d = cmp(self.spec, other.spec)
+- if d != 0:
+- return d
+- return cmp((self.spec, self.service, self.script_results),
+- (other.spec, other.service, other.script_results))
++ def __lt__(self, other):
++ return (self.spec, self.service, self.script_results) < (
++ other.spec, other.service, other.script_results)
+
+ def to_dom_fragment(self, document):
+ frag = document.createDocumentFragment()
+- elem = document.createElement(u"port")
+- elem.setAttribute(u"portid", unicode(self.spec[0]))
+- elem.setAttribute(u"protocol", self.spec[1])
++ elem = document.createElement("port")
++ elem.setAttribute("portid", str(self.spec[0]))
++ elem.setAttribute("protocol", self.spec[1])
+ if self.state is not None:
+- state_elem = document.createElement(u"state")
+- state_elem.setAttribute(u"state", self.state)
++ state_elem = document.createElement("state")
++ state_elem.setAttribute("state", self.state)
+ elem.appendChild(state_elem)
+ elem.appendChild(self.service.to_dom_fragment(document))
+ for sr in self.script_results:
+@@ -385,7 +382,7 @@ class Service(object):
+ if len(parts) == 0:
+ return None
+ else:
+- return u"/".join(parts)
++ return "/".join(parts)
+
+ def version_string(self):
+ """Get a string like in the VERSION column of Nmap output."""
+@@ -395,17 +392,17 @@ class Service(object):
+ if self.version is not None:
+ parts.append(self.version)
+ if self.extrainfo is not None:
+- parts.append(u"(%s)" % self.extrainfo)
++ parts.append("(%s)" % self.extrainfo)
+
+ if len(parts) == 0:
+ return None
+ else:
+- return u" ".join(parts)
++ return " ".join(parts)
+
+ def to_dom_fragment(self, document):
+ frag = document.createDocumentFragment()
+- elem = document.createElement(u"service")
+- for attr in (u"name", u"product", u"version", u"extrainfo", u"tunnel"):
++ elem = document.createElement("service")
++ for attr in ("name", "product", "version", "extrainfo", "tunnel"):
+ v = getattr(self, attr)
+ if v is None:
+ continue
+@@ -435,53 +432,53 @@ class ScriptResult(object):
+ result = []
+ lines = self.output.splitlines()
+ if len(lines) > 0:
+- lines[0] = self.id + u": " + lines[0]
++ lines[0] = self.id + ": " + lines[0]
+ for line in lines[:-1]:
+- result.append(u"| " + line)
++ result.append("| " + line)
+ if len(lines) > 0:
+- result.append(u"|_ " + lines[-1])
++ result.append("|_ " + lines[-1])
+ return result
+
+ def to_dom_fragment(self, document):
+ frag = document.createDocumentFragment()
+- elem = document.createElement(u"script")
+- elem.setAttribute(u"id", self.id)
+- elem.setAttribute(u"output", self.output)
++ elem = document.createElement("script")
++ elem.setAttribute("id", self.id)
++ elem.setAttribute("output", self.output)
+ frag.appendChild(elem)
+ return frag
+
+
+ def format_banner(scan):
+ """Format a startup banner more or less like Nmap does."""
+- scanner = u"Nmap"
+- if scan.scanner is not None and scan.scanner != u"nmap":
++ scanner = "Nmap"
++ if scan.scanner is not None and scan.scanner != "nmap":
+ scanner = scan.scanner
+ parts = [scanner]
+ if scan.version is not None:
+ parts.append(scan.version)
+- parts.append(u"scan")
++ parts.append("scan")
+ if scan.start_date is not None:
+- parts.append(u"initiated %s" % scan.start_date.strftime(
++ parts.append("initiated %s" % scan.start_date.strftime(
+ "%a %b %d %H:%M:%S %Y"))
+ if scan.args is not None:
+- parts.append(u"as: %s" % scan.args)
+- return u" ".join(parts)
++ parts.append("as: %s" % scan.args)
++ return " ".join(parts)
+
+
+ def print_script_result_diffs_text(title, script_results_a, script_results_b,
+ script_result_diffs, f=sys.stdout):
+- table = Table(u"*")
++ table = Table("*")
+ for sr_diff in script_result_diffs:
+ sr_diff.append_to_port_table(table)
+ if len(table) > 0:
+- print >> f
++ print(file=f)
+ if len(script_results_b) == 0:
+- print >> f, u"-%s:" % title
++ print("-%s:" % title, file=f)
+ elif len(script_results_a) == 0:
+- print >> f, u"+%s:" % title
++ print("+%s:" % title, file=f)
+ else:
+- print >> f, u" %s:" % title
+- print >> f, table
++ print(" %s:" % title, file=f)
++ print(table, file=f)
+
+
+ def script_result_diffs_to_dom_fragment(elem, script_results_a,
+@@ -489,13 +486,13 @@ def script_result_diffs_to_dom_fragment(elem, script_results_a,
+ if len(script_results_a) == 0 and len(script_results_b) == 0:
+ return document.createDocumentFragment()
+ elif len(script_results_b) == 0:
+- a_elem = document.createElement(u"a")
++ a_elem = document.createElement("a")
+ for sr in script_results_a:
+ elem.appendChild(sr.to_dom_fragment(document))
+ a_elem.appendChild(elem)
+ return a_elem
+ elif len(script_results_a) == 0:
+- b_elem = document.createElement(u"b")
++ b_elem = document.createElement("b")
+ for sr in script_results_b:
+ elem.appendChild(sr.to_dom_fragment(document))
+ b_elem.appendChild(elem)
+@@ -581,10 +578,10 @@ class ScanDiffText(ScanDiff):
+ banner_a = format_banner(self.scan_a)
+ banner_b = format_banner(self.scan_b)
+ if banner_a != banner_b:
+- print >> self.f, u"-%s" % banner_a
+- print >> self.f, u"+%s" % banner_b
++ print("-%s" % banner_a, file=self.f)
++ print("+%s" % banner_b, file=self.f)
+ elif verbose:
+- print >> self.f, u" %s" % banner_a
++ print(" %s" % banner_a, file=self.f)
+
+ def output_pre_scripts(self, pre_script_result_diffs):
+ print_script_result_diffs_text("Pre-scan script results",
+@@ -597,7 +594,7 @@ class ScanDiffText(ScanDiff):
+ post_script_result_diffs, self.f)
+
+ def output_host_diff(self, h_diff):
+- print >> self.f
++ print(file=self.f)
+ h_diff.print_text(self.f)
+
+ def output_ending(self):
+@@ -622,8 +619,8 @@ class ScanDiffXML(ScanDiff):
+
+ def output_beginning(self):
+ self.writer.startDocument()
+- self.writer.startElement(u"nmapdiff", {u"version": NDIFF_XML_VERSION})
+- self.writer.startElement(u"scandiff", {})
++ self.writer.startElement("nmapdiff", {"version": NDIFF_XML_VERSION})
++ self.writer.startElement("scandiff", {})
+
+ if self.nmaprun_differs():
+ self.writer.frag_a(
+@@ -636,7 +633,7 @@ class ScanDiffXML(ScanDiff):
+
+ def output_pre_scripts(self, pre_script_result_diffs):
+ if len(pre_script_result_diffs) > 0 or verbose:
+- prescript_elem = self.document.createElement(u"prescript")
++ prescript_elem = self.document.createElement("prescript")
+ frag = script_result_diffs_to_dom_fragment(
+ prescript_elem, self.scan_a.pre_script_results,
+ self.scan_b.pre_script_results, pre_script_result_diffs,
+@@ -646,7 +643,7 @@ class ScanDiffXML(ScanDiff):
+
+ def output_post_scripts(self, post_script_result_diffs):
+ if len(post_script_result_diffs) > 0 or verbose:
+- postscript_elem = self.document.createElement(u"postscript")
++ postscript_elem = self.document.createElement("postscript")
+ frag = script_result_diffs_to_dom_fragment(
+ postscript_elem, self.scan_a.post_script_results,
+ self.scan_b.post_script_results, post_script_result_diffs,
+@@ -660,8 +657,8 @@ class ScanDiffXML(ScanDiff):
+ frag.unlink()
+
+ def output_ending(self):
+- self.writer.endElement(u"scandiff")
+- self.writer.endElement(u"nmapdiff")
++ self.writer.endElement("scandiff")
++ self.writer.endElement("nmapdiff")
+ self.writer.endDocument()
+
+
+@@ -719,9 +716,9 @@ class HostDiff(object):
+ self.cost += os_cost
+
+ extraports_a = tuple((count, state)
+- for (state, count) in self.host_a.extraports.items())
++ for (state, count) in list(self.host_a.extraports.items()))
+ extraports_b = tuple((count, state)
+- for (state, count) in self.host_b.extraports.items())
++ for (state, count) in list(self.host_b.extraports.items()))
+ if extraports_a != extraports_b:
+ self.extraports_changed = True
+ self.cost += 1
+@@ -747,69 +744,69 @@ class HostDiff(object):
+ # Names and addresses.
+ if self.id_changed:
+ if host_a.state is not None:
+- print >> f, u"-%s:" % host_a.format_name()
++ print("-%s:" % host_a.format_name(), file=f)
+ if self.host_b.state is not None:
+- print >> f, u"+%s:" % host_b.format_name()
++ print("+%s:" % host_b.format_name(), file=f)
+ else:
+- print >> f, u" %s:" % host_a.format_name()
++ print(" %s:" % host_a.format_name(), file=f)
+
+ # State.
+ if self.state_changed:
+ if host_a.state is not None:
+- print >> f, u"-Host is %s." % host_a.state
++ print("-Host is %s." % host_a.state, file=f)
+ if host_b.state is not None:
+- print >> f, u"+Host is %s." % host_b.state
++ print("+Host is %s." % host_b.state, file=f)
+ elif verbose:
+- print >> f, u" Host is %s." % host_b.state
++ print(" Host is %s." % host_b.state, file=f)
+
+ # Extraports.
+ if self.extraports_changed:
+ if len(host_a.extraports) > 0:
+- print >> f, u"-Not shown: %s" % host_a.extraports_string()
++ print("-Not shown: %s" % host_a.extraports_string(), file=f)
+ if len(host_b.extraports) > 0:
+- print >> f, u"+Not shown: %s" % host_b.extraports_string()
++ print("+Not shown: %s" % host_b.extraports_string(), file=f)
+ elif verbose:
+ if len(host_a.extraports) > 0:
+- print >> f, u" Not shown: %s" % host_a.extraports_string()
++ print(" Not shown: %s" % host_a.extraports_string(), file=f)
+
+ # Port table.
+- port_table = Table(u"** * * *")
++ port_table = Table("** * * *")
+ if host_a.state is None:
+- mark = u"+"
++ mark = "+"
+ elif host_b.state is None:
+- mark = u"-"
++ mark = "-"
+ else:
+- mark = u" "
+- port_table.append((mark, u"PORT", u"STATE", u"SERVICE", u"VERSION"))
++ mark = " "
++ port_table.append((mark, "PORT", "STATE", "SERVICE", "VERSION"))
+
+ for port in self.ports:
+ port_diff = self.port_diffs[port]
+ port_diff.append_to_port_table(port_table, host_a, host_b)
+
+ if len(port_table) > 1:
+- print >> f, port_table
++ print(port_table, file=f)
+
+ # OS changes.
+ if self.os_changed or verbose:
+ if len(host_a.os) > 0:
+ if len(host_b.os) > 0:
+- print >> f, u" OS details:"
++ print(" OS details:", file=f)
+ else:
+- print >> f, u"-OS details:"
++ print("-OS details:", file=f)
+ elif len(host_b.os) > 0:
+- print >> f, u"+OS details:"
++ print("+OS details:", file=f)
+ # os_diffs is a list of 5-tuples returned by
+ # difflib.SequenceMatcher.
+ for op, i1, i2, j1, j2 in self.os_diffs:
+ if op == "replace" or op == "delete":
+ for i in range(i1, i2):
+- print >> f, "- %s" % host_a.os[i]
++ print("- %s" % host_a.os[i], file=f)
+ if op == "replace" or op == "insert":
+ for i in range(j1, j2):
+- print >> f, "+ %s" % host_b.os[i]
++ print("+ %s" % host_b.os[i], file=f)
+ if op == "equal":
+ for i in range(i1, i2):
+- print >> f, " %s" % host_a.os[i]
++ print(" %s" % host_a.os[i], file=f)
+
+ print_script_result_diffs_text("Host script results",
+ host_a.script_results, host_b.script_results,
+@@ -820,32 +817,32 @@ class HostDiff(object):
+ host_b = self.host_b
+
+ frag = document.createDocumentFragment()
+- hostdiff_elem = document.createElement(u"hostdiff")
++ hostdiff_elem = document.createElement("hostdiff")
+ frag.appendChild(hostdiff_elem)
+
+ if host_a.state is None or host_b.state is None:
+ # The host is missing in one scan. Output the whole thing.
+ if host_a.state is not None:
+- a_elem = document.createElement(u"a")
++ a_elem = document.createElement("a")
+ a_elem.appendChild(host_a.to_dom_fragment(document))
+ hostdiff_elem.appendChild(a_elem)
+ elif host_b.state is not None:
+- b_elem = document.createElement(u"b")
++ b_elem = document.createElement("b")
+ b_elem.appendChild(host_b.to_dom_fragment(document))
+ hostdiff_elem.appendChild(b_elem)
+ return frag
+
+- host_elem = document.createElement(u"host")
++ host_elem = document.createElement("host")
+
+ # State.
+ if host_a.state == host_b.state:
+ if verbose:
+ host_elem.appendChild(host_a.state_to_dom_fragment(document))
+ else:
+- a_elem = document.createElement(u"a")
++ a_elem = document.createElement("a")
+ a_elem.appendChild(host_a.state_to_dom_fragment(document))
+ host_elem.appendChild(a_elem)
+- b_elem = document.createElement(u"b")
++ b_elem = document.createElement("b")
+ b_elem.appendChild(host_b.state_to_dom_fragment(document))
+ host_elem.appendChild(b_elem)
+
+@@ -854,31 +851,31 @@ class HostDiff(object):
+ addrset_b = set(host_b.addresses)
+ for addr in sorted(addrset_a.intersection(addrset_b)):
+ host_elem.appendChild(addr.to_dom_fragment(document))
+- a_elem = document.createElement(u"a")
++ a_elem = document.createElement("a")
+ for addr in sorted(addrset_a - addrset_b):
+ a_elem.appendChild(addr.to_dom_fragment(document))
+ if a_elem.hasChildNodes():
+ host_elem.appendChild(a_elem)
+- b_elem = document.createElement(u"b")
++ b_elem = document.createElement("b")
+ for addr in sorted(addrset_b - addrset_a):
+ b_elem.appendChild(addr.to_dom_fragment(document))
+ if b_elem.hasChildNodes():
+ host_elem.appendChild(b_elem)
+
+ # Host names.
+- hostnames_elem = document.createElement(u"hostnames")
++ hostnames_elem = document.createElement("hostnames")
+ hostnameset_a = set(host_a.hostnames)
+ hostnameset_b = set(host_b.hostnames)
+ for hostname in sorted(hostnameset_a.intersection(hostnameset_b)):
+ hostnames_elem.appendChild(
+ host_a.hostname_to_dom_fragment(document, hostname))
+- a_elem = document.createElement(u"a")
++ a_elem = document.createElement("a")
+ for hostname in sorted(hostnameset_a - hostnameset_b):
+ a_elem.appendChild(
+ host_a.hostname_to_dom_fragment(document, hostname))
+ if a_elem.hasChildNodes():
+ hostnames_elem.appendChild(a_elem)
+- b_elem = document.createElement(u"b")
++ b_elem = document.createElement("b")
+ for hostname in sorted(hostnameset_b - hostnameset_a):
+ b_elem.appendChild(
+ host_b.hostname_to_dom_fragment(document, hostname))
+@@ -887,15 +884,15 @@ class HostDiff(object):
+ if hostnames_elem.hasChildNodes():
+ host_elem.appendChild(hostnames_elem)
+
+- ports_elem = document.createElement(u"ports")
++ ports_elem = document.createElement("ports")
+ # Extraports.
+ if host_a.extraports == host_b.extraports:
+ ports_elem.appendChild(host_a.extraports_to_dom_fragment(document))
+ else:
+- a_elem = document.createElement(u"a")
++ a_elem = document.createElement("a")
+ a_elem.appendChild(host_a.extraports_to_dom_fragment(document))
+ ports_elem.appendChild(a_elem)
+- b_elem = document.createElement(u"b")
++ b_elem = document.createElement("b")
+ b_elem.appendChild(host_b.extraports_to_dom_fragment(document))
+ ports_elem.appendChild(b_elem)
+ # Port list.
+@@ -911,18 +908,18 @@ class HostDiff(object):
+
+ # OS changes.
+ if self.os_changed or verbose:
+- os_elem = document.createElement(u"os")
++ os_elem = document.createElement("os")
+ # os_diffs is a list of 5-tuples returned by
+ # difflib.SequenceMatcher.
+ for op, i1, i2, j1, j2 in self.os_diffs:
+ if op == "replace" or op == "delete":
+- a_elem = document.createElement(u"a")
++ a_elem = document.createElement("a")
+ for i in range(i1, i2):
+ a_elem.appendChild(host_a.os_to_dom_fragment(
+ document, host_a.os[i]))
+ os_elem.appendChild(a_elem)
+ if op == "replace" or op == "insert":
+- b_elem = document.createElement(u"b")
++ b_elem = document.createElement("b")
+ for i in range(j1, j2):
+ b_elem.appendChild(host_b.os_to_dom_fragment(
+ document, host_b.os[i]))
+@@ -936,7 +933,7 @@ class HostDiff(object):
+
+ # Host script changes.
+ if len(self.script_result_diffs) > 0 or verbose:
+- hostscript_elem = document.createElement(u"hostscript")
++ hostscript_elem = document.createElement("hostscript")
+ host_elem.appendChild(script_result_diffs_to_dom_fragment(
+ hostscript_elem, host_a.script_results,
+ host_b.script_results, self.script_result_diffs,
+@@ -989,38 +986,38 @@ class PortDiff(object):
+ self.port_b.service.version_string()]
+ if a_columns == b_columns:
+ if verbose or self.script_result_diffs > 0:
+- table.append([u" "] + a_columns)
++ table.append([" "] + a_columns)
+ else:
+ if not host_a.is_extraports(self.port_a.state):
+- table.append([u"-"] + a_columns)
++ table.append(["-"] + a_columns)
+ if not host_b.is_extraports(self.port_b.state):
+- table.append([u"+"] + b_columns)
++ table.append(["+"] + b_columns)
+
+ for sr_diff in self.script_result_diffs:
+ sr_diff.append_to_port_table(table)
+
+ def to_dom_fragment(self, document):
+ frag = document.createDocumentFragment()
+- portdiff_elem = document.createElement(u"portdiff")
++ portdiff_elem = document.createElement("portdiff")
+ frag.appendChild(portdiff_elem)
+ if (self.port_a.spec == self.port_b.spec and
+ self.port_a.state == self.port_b.state):
+- port_elem = document.createElement(u"port")
+- port_elem.setAttribute(u"portid", unicode(self.port_a.spec[0]))
+- port_elem.setAttribute(u"protocol", self.port_a.spec[1])
++ port_elem = document.createElement("port")
++ port_elem.setAttribute("portid", str(self.port_a.spec[0]))
++ port_elem.setAttribute("protocol", self.port_a.spec[1])
+ if self.port_a.state is not None:
+- state_elem = document.createElement(u"state")
+- state_elem.setAttribute(u"state", self.port_a.state)
++ state_elem = document.createElement("state")
++ state_elem.setAttribute("state", self.port_a.state)
+ port_elem.appendChild(state_elem)
+ if self.port_a.service == self.port_b.service:
+ port_elem.appendChild(
+ self.port_a.service.to_dom_fragment(document))
+ else:
+- a_elem = document.createElement(u"a")
++ a_elem = document.createElement("a")
+ a_elem.appendChild(
+ self.port_a.service.to_dom_fragment(document))
+ port_elem.appendChild(a_elem)
+- b_elem = document.createElement(u"b")
++ b_elem = document.createElement("b")
+ b_elem.appendChild(
+ self.port_b.service.to_dom_fragment(document))
+ port_elem.appendChild(b_elem)
+@@ -1028,10 +1025,10 @@ class PortDiff(object):
+ port_elem.appendChild(sr_diff.to_dom_fragment(document))
+ portdiff_elem.appendChild(port_elem)
+ else:
+- a_elem = document.createElement(u"a")
++ a_elem = document.createElement("a")
+ a_elem.appendChild(self.port_a.to_dom_fragment(document))
+ portdiff_elem.appendChild(a_elem)
+- b_elem = document.createElement(u"b")
++ b_elem = document.createElement("b")
+ b_elem.appendChild(self.port_b.to_dom_fragment(document))
+ portdiff_elem.appendChild(b_elem)
+
+@@ -1086,13 +1083,13 @@ class ScriptResultDiff(object):
+ for op, i1, i2, j1, j2 in diffs.get_opcodes():
+ if op == "replace" or op == "delete":
+ for k in range(i1, i2):
+- table.append_raw(u"-" + a_lines[k])
++ table.append_raw("-" + a_lines[k])
+ if op == "replace" or op == "insert":
+ for k in range(j1, j2):
+- table.append_raw(u"+" + b_lines[k])
++ table.append_raw("+" + b_lines[k])
+ if op == "equal":
+ for k in range(i1, i2):
+- table.append_raw(u" " + a_lines[k])
++ table.append_raw(" " + a_lines[k])
+
+ def to_dom_fragment(self, document):
+ frag = document.createDocumentFragment()
+@@ -1102,11 +1099,11 @@ class ScriptResultDiff(object):
+ frag.appendChild(self.sr_a.to_dom_fragment(document))
+ else:
+ if self.sr_a is not None:
+- a_elem = document.createElement(u"a")
++ a_elem = document.createElement("a")
+ a_elem.appendChild(self.sr_a.to_dom_fragment(document))
+ frag.appendChild(a_elem)
+ if self.sr_b is not None:
+- b_elem = document.createElement(u"b")
++ b_elem = document.createElement("b")
+ b_elem.appendChild(self.sr_b.to_dom_fragment(document))
+ frag.appendChild(b_elem)
+ return frag
+@@ -1120,7 +1117,7 @@ class Table(object):
+ copied to the output."""
+ self.widths = []
+ self.rows = []
+- self.prefix = u""
++ self.prefix = ""
+ self.padding = []
+ j = 0
+ while j < len(template) and template[j] != "*":
+@@ -1145,7 +1142,7 @@ class Table(object):
+
+ for i in range(len(row)):
+ if row[i] is None:
+- s = u""
++ s = ""
+ else:
+ s = str(row[i])
+ if i == len(self.widths):
+@@ -1167,7 +1164,7 @@ class Table(object):
+ for row in self.rows:
+ parts = [self.prefix]
+ i = 0
+- if isinstance(row, basestring):
++ if isinstance(row, str):
+ # A raw string.
+ lines.append(row)
+ else:
+@@ -1176,13 +1173,13 @@ class Table(object):
+ if i < len(self.padding):
+ parts.append(self.padding[i])
+ i += 1
+- lines.append(u"".join(parts).rstrip())
+- return u"\n".join(lines)
++ lines.append("".join(parts).rstrip())
++ return "\n".join(lines)
+
+
+ def warn(str):
+ """Print a warning to stderr."""
+- print >> sys.stderr, str
++ print(str, file=sys.stderr)
+
+
+ class NmapContentHandler(xml.sax.handler.ContentHandler):
+@@ -1200,22 +1197,22 @@ class NmapContentHandler(xml.sax.handler.ContentHandler):
+ self.current_port = None
+
+ self._start_elem_handlers = {
+- u"nmaprun": self._start_nmaprun,
+- u"host": self._start_host,
+- u"status": self._start_status,
+- u"address": self._start_address,
+- u"hostname": self._start_hostname,
+- u"extraports": self._start_extraports,
+- u"port": self._start_port,
+- u"state": self._start_state,
+- u"service": self._start_service,
+- u"script": self._start_script,
+- u"osmatch": self._start_osmatch,
+- u"finished": self._start_finished,
++ "nmaprun": self._start_nmaprun,
++ "host": self._start_host,
++ "status": self._start_status,
++ "address": self._start_address,
++ "hostname": self._start_hostname,
++ "extraports": self._start_extraports,
++ "port": self._start_port,
++ "state": self._start_state,
++ "service": self._start_service,
++ "script": self._start_script,
++ "osmatch": self._start_osmatch,
++ "finished": self._start_finished,
+ }
+ self._end_elem_handlers = {
+- u'host': self._end_host,
+- u'port': self._end_port,
++ 'host': self._end_host,
++ 'port': self._end_port,
+ }
+
+ def parent_element(self):
+@@ -1245,68 +1242,68 @@ class NmapContentHandler(xml.sax.handler.ContentHandler):
+ def _start_nmaprun(self, name, attrs):
+ assert self.parent_element() is None
+ if "start" in attrs:
+- start_timestamp = int(attrs.get(u"start"))
++ start_timestamp = int(attrs.get("start"))
+ self.scan.start_date = datetime.datetime.fromtimestamp(
+ start_timestamp)
+- self.scan.scanner = attrs.get(u"scanner")
+- self.scan.args = attrs.get(u"args")
+- self.scan.version = attrs.get(u"version")
++ self.scan.scanner = attrs.get("scanner")
++ self.scan.args = attrs.get("args")
++ self.scan.version = attrs.get("version")
+
+ def _start_host(self, name, attrs):
+- assert self.parent_element() == u"nmaprun"
++ assert self.parent_element() == "nmaprun"
+ self.current_host = Host()
+ self.scan.hosts.append(self.current_host)
+
+ def _start_status(self, name, attrs):
+- assert self.parent_element() == u"host"
++ assert self.parent_element() == "host"
+ assert self.current_host is not None
+- state = attrs.get(u"state")
++ state = attrs.get("state")
+ if state is None:
+ warn(u'%s element of host %s is missing the "state" attribute; '
+- 'assuming \unknown\.' % (
++ r'assuming \unknown\.' % (
+ name, self.current_host.format_name()))
+ return
+ self.current_host.state = state
+
+ def _start_address(self, name, attrs):
+- assert self.parent_element() == u"host"
++ assert self.parent_element() == "host"
+ assert self.current_host is not None
+- addr = attrs.get(u"addr")
++ addr = attrs.get("addr")
+ if addr is None:
+- warn(u'%s element of host %s is missing the "addr" '
++ warn('%s element of host %s is missing the "addr" '
+ 'attribute; skipping.' % (
+ name, self.current_host.format_name()))
+ return
+- addrtype = attrs.get(u"addrtype", u"ipv4")
++ addrtype = attrs.get("addrtype", "ipv4")
+ self.current_host.add_address(Address.new(addrtype, addr))
+
+ def _start_hostname(self, name, attrs):
+- assert self.parent_element() == u"hostnames"
++ assert self.parent_element() == "hostnames"
+ assert self.current_host is not None
+- hostname = attrs.get(u"name")
++ hostname = attrs.get("name")
+ if hostname is None:
+- warn(u'%s element of host %s is missing the "name" '
++ warn('%s element of host %s is missing the "name" '
+ 'attribute; skipping.' % (
+ name, self.current_host.format_name()))
+ return
+ self.current_host.add_hostname(hostname)
+
+ def _start_extraports(self, name, attrs):
+- assert self.parent_element() == u"ports"
++ assert self.parent_element() == "ports"
+ assert self.current_host is not None
+- state = attrs.get(u"state")
++ state = attrs.get("state")
+ if state is None:
+- warn(u'%s element of host %s is missing the "state" '
++ warn('%s element of host %s is missing the "state" '
+ 'attribute; assuming "unknown".' % (
+ name, self.current_host.format_name()))
+ state = None
+ if state in self.current_host.extraports:
+- warn(u'Duplicate extraports state "%s" in host %s.' % (
++ warn('Duplicate extraports state "%s" in host %s.' % (
+ state, self.current_host.format_name()))
+
+- count = attrs.get(u"count")
++ count = attrs.get("count")
+ if count is None:
+- warn(u'%s element of host %s is missing the "count" '
++ warn('%s element of host %s is missing the "count" '
+ 'attribute; assuming 0.' % (
+ name, self.current_host.format_name()))
+ count = 0
+@@ -1314,99 +1311,99 @@ class NmapContentHandler(xml.sax.handler.ContentHandler):
+ try:
+ count = int(count)
+ except ValueError:
+- warn(u"Can't convert extraports count \"%s\" "
++ warn("Can't convert extraports count \"%s\" "
+ "to an integer in host %s; assuming 0." % (
+- attrs[u"count"], self.current_host.format_name()))
++ attrs["count"], self.current_host.format_name()))
+ count = 0
+ self.current_host.extraports[state] = count
+
+ def _start_port(self, name, attrs):
+- assert self.parent_element() == u"ports"
++ assert self.parent_element() == "ports"
+ assert self.current_host is not None
+- portid_str = attrs.get(u"portid")
++ portid_str = attrs.get("portid")
+ if portid_str is None:
+- warn(u'%s element of host %s missing the "portid" '
++ warn('%s element of host %s missing the "portid" '
+ 'attribute; skipping.' % (
+ name, self.current_host.format_name()))
+ return
+ try:
+ portid = int(portid_str)
+ except ValueError:
+- warn(u"Can't convert portid \"%s\" to an integer "
++ warn("Can't convert portid \"%s\" to an integer "
+ "in host %s; skipping port." % (
+ portid_str, self.current_host.format_name()))
+ return
+- protocol = attrs.get(u"protocol")
++ protocol = attrs.get("protocol")
+ if protocol is None:
+- warn(u'%s element of host %s missing the "protocol" '
++ warn('%s element of host %s missing the "protocol" '
+ 'attribute; skipping.' % (
+ name, self.current_host.format_name()))
+ return
+ self.current_port = Port((portid, protocol))
+
+ def _start_state(self, name, attrs):
+- assert self.parent_element() == u"port"
++ assert self.parent_element() == "port"
+ assert self.current_host is not None
+ if self.current_port is None:
+ return
+ if "state" not in attrs:
+- warn(u'%s element of port %s is missing the "state" '
++ warn('%s element of port %s is missing the "state" '
+ 'attribute; assuming "unknown".' % (
+ name, self.current_port.spec_string()))
+ return
+- self.current_port.state = attrs[u"state"]
++ self.current_port.state = attrs["state"]
+ self.current_host.add_port(self.current_port)
+
+ def _start_service(self, name, attrs):
+- assert self.parent_element() == u"port"
++ assert self.parent_element() == "port"
+ assert self.current_host is not None
+ if self.current_port is None:
+ return
+- self.current_port.service.name = attrs.get(u"name")
+- self.current_port.service.product = attrs.get(u"product")
+- self.current_port.service.version = attrs.get(u"version")
+- self.current_port.service.extrainfo = attrs.get(u"extrainfo")
+- self.current_port.service.tunnel = attrs.get(u"tunnel")
++ self.current_port.service.name = attrs.get("name")
++ self.current_port.service.product = attrs.get("product")
++ self.current_port.service.version = attrs.get("version")
++ self.current_port.service.extrainfo = attrs.get("extrainfo")
++ self.current_port.service.tunnel = attrs.get("tunnel")
+
+ def _start_script(self, name, attrs):
+ result = ScriptResult()
+- result.id = attrs.get(u"id")
++ result.id = attrs.get("id")
+ if result.id is None:
+- warn(u'%s element missing the "id" attribute; skipping.' % name)
++ warn('%s element missing the "id" attribute; skipping.' % name)
+ return
+
+- result.output = attrs.get(u"output")
++ result.output = attrs.get("output")
+ if result.output is None:
+- warn(u'%s element missing the "output" attribute; skipping.'
++ warn('%s element missing the "output" attribute; skipping.'
+ % name)
+ return
+- if self.parent_element() == u"prescript":
++ if self.parent_element() == "prescript":
+ self.scan.pre_script_results.append(result)
+- elif self.parent_element() == u"postscript":
++ elif self.parent_element() == "postscript":
+ self.scan.post_script_results.append(result)
+- elif self.parent_element() == u"hostscript":
++ elif self.parent_element() == "hostscript":
+ self.current_host.script_results.append(result)
+- elif self.parent_element() == u"port":
++ elif self.parent_element() == "port":
+ self.current_port.script_results.append(result)
+ else:
+- warn(u"%s element not inside prescript, postscript, hostscript, "
++ warn("%s element not inside prescript, postscript, hostscript, "
+ "or port element; ignoring." % name)
+ return
+
+ def _start_osmatch(self, name, attrs):
+- assert self.parent_element() == u"os"
++ assert self.parent_element() == "os"
+ assert self.current_host is not None
+ if "name" not in attrs:
+- warn(u'%s element of host %s is missing the "name" '
++ warn('%s element of host %s is missing the "name" '
+ 'attribute; skipping.' % (
+ name, self.current_host.format_name()))
+ return
+- self.current_host.os.append(attrs[u"name"])
++ self.current_host.os.append(attrs["name"])
+
+ def _start_finished(self, name, attrs):
+- assert self.parent_element() == u"runstats"
++ assert self.parent_element() == "runstats"
+ if "time" in attrs:
+- end_timestamp = int(attrs.get(u"time"))
++ end_timestamp = int(attrs.get("time"))
+ self.scan.end_date = datetime.datetime.fromtimestamp(end_timestamp)
+
+ def _end_host(self, name):
+@@ -1425,23 +1422,23 @@ class XMLWriter (xml.sax.saxutils.XMLGenerator):
+
+ def frag(self, frag):
+ for node in frag.childNodes:
+- node.writexml(self.f, newl=u"\n")
++ node.writexml(self.f, newl="\n")
+
+ def frag_a(self, frag):
+- self.startElement(u"a", {})
++ self.startElement("a", {})
+ for node in frag.childNodes:
+- node.writexml(self.f, newl=u"\n")
+- self.endElement(u"a")
++ node.writexml(self.f, newl="\n")
++ self.endElement("a")
+
+ def frag_b(self, frag):
+- self.startElement(u"b", {})
++ self.startElement("b", {})
+ for node in frag.childNodes:
+- node.writexml(self.f, newl=u"\n")
+- self.endElement(u"b")
++ node.writexml(self.f, newl="\n")
++ self.endElement("b")
+
+
+ def usage():
+- print u"""\
++ print("""\
+ Usage: %s [option] FILE1 FILE2
+ Compare two Nmap XML files and display a list of their differences.
+ Differences include host state changes, port state changes, and changes to
+@@ -1451,7 +1448,7 @@ service and OS detection.
+ -v, --verbose also show hosts and ports that haven't changed.
+ --text display output in text format (default)
+ --xml display output in XML format\
+-""" % sys.argv[0]
++""" % sys.argv[0])
+
+ EXIT_EQUAL = 0
+ EXIT_DIFFERENT = 1
+@@ -1459,8 +1456,8 @@ EXIT_ERROR = 2
+
+
+ def usage_error(msg):
+- print >> sys.stderr, u"%s: %s" % (sys.argv[0], msg)
+- print >> sys.stderr, u"Try '%s -h' for help." % sys.argv[0]
++ print("%s: %s" % (sys.argv[0], msg), file=sys.stderr)
++ print("Try '%s -h' for help." % sys.argv[0], file=sys.stderr)
+ sys.exit(EXIT_ERROR)
+
+
+@@ -1471,7 +1468,7 @@ def main():
+ try:
+ opts, input_filenames = getopt.gnu_getopt(
+ sys.argv[1:], "hv", ["help", "text", "verbose", "xml"])
+- except getopt.GetoptError, e:
++ except getopt.GetoptError as e:
+ usage_error(e.msg)
+ for o, a in opts:
+ if o == "-h" or o == "--help":
+@@ -1481,15 +1478,15 @@ def main():
+ verbose = True
+ elif o == "--text":
+ if output_format is not None and output_format != "text":
+- usage_error(u"contradictory output format options.")
++ usage_error("contradictory output format options.")
+ output_format = "text"
+ elif o == "--xml":
+ if output_format is not None and output_format != "xml":
+- usage_error(u"contradictory output format options.")
++ usage_error("contradictory output format options.")
+ output_format = "xml"
+
+ if len(input_filenames) != 2:
+- usage_error(u"need exactly two input filenames.")
++ usage_error("need exactly two input filenames.")
+
+ if output_format is None:
+ output_format = "text"
+@@ -1502,8 +1499,8 @@ def main():
+ scan_a.load_from_file(filename_a)
+ scan_b = Scan()
+ scan_b.load_from_file(filename_b)
+- except IOError, e:
+- print >> sys.stderr, u"Can't open file: %s" % str(e)
++ except IOError as e:
++ print("Can't open file: %s" % str(e), file=sys.stderr)
+ sys.exit(EXIT_ERROR)
+
+ if output_format == "text":
+diff --git a/ndiff/ndifftest.py b/ndiff/ndifftest.py
+index 2fa4ae0..27fc525 100755
+--- a/ndiff/ndifftest.py
++++ b/ndiff/ndifftest.py
+@@ -1,4 +1,4 @@
+-#!/usr/bin/env python
++#!/usr/bin/env python3
+
+ # Unit tests for Ndiff.
+
+@@ -22,7 +22,7 @@ for x in dir(ndiff):
+ sys.dont_write_bytecode = dont_write_bytecode
+ del dont_write_bytecode
+
+-import StringIO
++import io
+
+
+ class scan_test(unittest.TestCase):
+@@ -52,7 +52,7 @@ class scan_test(unittest.TestCase):
+ scan.load_from_file("test-scans/single.xml")
+ host = scan.hosts[0]
+ self.assertEqual(len(host.ports), 5)
+- self.assertEqual(host.extraports.items(), [("filtered", 95)])
++ self.assertEqual(list(host.extraports.items()), [("filtered", 95)])
+
+ def test_extraports_multi(self):
+ """Test that the correct number of known ports is returned when there
+@@ -68,9 +68,9 @@ class scan_test(unittest.TestCase):
+ """Test that nmaprun information is recorded."""
+ scan = Scan()
+ scan.load_from_file("test-scans/empty.xml")
+- self.assertEqual(scan.scanner, u"nmap")
+- self.assertEqual(scan.version, u"4.90RC2")
+- self.assertEqual(scan.args, u"nmap -oX empty.xml -p 1-100")
++ self.assertEqual(scan.scanner, "nmap")
++ self.assertEqual(scan.version, "4.90RC2")
++ self.assertEqual(scan.args, "nmap -oX empty.xml -p 1-100")
+
+ def test_addresses(self):
+ """Test that addresses are recorded."""
+@@ -84,7 +84,7 @@ class scan_test(unittest.TestCase):
+ scan = Scan()
+ scan.load_from_file("test-scans/simple.xml")
+ host = scan.hosts[0]
+- self.assertEqual(host.hostnames, [u"scanme.nmap.org"])
++ self.assertEqual(host.hostnames, ["scanme.nmap.org"])
+
+ def test_os(self):
+ """Test that OS information is recorded."""
+@@ -99,7 +99,7 @@ class scan_test(unittest.TestCase):
+ scan.load_from_file("test-scans/complex.xml")
+ host = scan.hosts[0]
+ self.assertTrue(len(host.script_results) > 0)
+- self.assertTrue(len(host.ports[(22, u"tcp")].script_results) > 0)
++ self.assertTrue(len(host.ports[(22, "tcp")].script_results) > 0)
+
+ # This test is commented out because Nmap XML doesn't store any information
+ # about down hosts, not even the fact that they are down. Recovering the list
+@@ -128,16 +128,16 @@ class host_test(unittest.TestCase):
+
+ def test_format_name(self):
+ h = Host()
+- self.assertTrue(isinstance(h.format_name(), basestring))
+- h.add_address(IPv4Address(u"127.0.0.1"))
+- self.assertTrue(u"127.0.0.1" in h.format_name())
++ self.assertTrue(isinstance(h.format_name(), str))
++ h.add_address(IPv4Address("127.0.0.1"))
++ self.assertTrue("127.0.0.1" in h.format_name())
+ h.add_address(IPv6Address("::1"))
+- self.assertTrue(u"127.0.0.1" in h.format_name())
+- self.assertTrue(u"::1" in h.format_name())
+- h.add_hostname(u"localhost")
+- self.assertTrue(u"127.0.0.1" in h.format_name())
+- self.assertTrue(u"::1" in h.format_name())
+- self.assertTrue(u"localhost" in h.format_name())
++ self.assertTrue("127.0.0.1" in h.format_name())
++ self.assertTrue("::1" in h.format_name())
++ h.add_hostname("localhost")
++ self.assertTrue("127.0.0.1" in h.format_name())
++ self.assertTrue("::1" in h.format_name())
++ self.assertTrue("localhost" in h.format_name())
+
+ def test_empty_get_port(self):
+ h = Host()
+@@ -197,8 +197,8 @@ class host_test(unittest.TestCase):
+ h = s.hosts[0]
+ self.assertEqual(len(h.ports), 5)
+ self.assertEqual(len(h.extraports), 1)
+- self.assertEqual(h.extraports.keys()[0], u"filtered")
+- self.assertEqual(h.extraports.values()[0], 95)
++ self.assertEqual(list(h.extraports.keys())[0], "filtered")
++ self.assertEqual(list(h.extraports.values())[0], 95)
+ self.assertEqual(h.state, "up")
+
+
+@@ -241,13 +241,13 @@ class port_test(unittest.TestCase):
+ """Test the Port class."""
+ def test_spec_string(self):
+ p = Port((10, "tcp"))
+- self.assertEqual(p.spec_string(), u"10/tcp")
++ self.assertEqual(p.spec_string(), "10/tcp")
+ p = Port((100, "ip"))
+- self.assertEqual(p.spec_string(), u"100/ip")
++ self.assertEqual(p.spec_string(), "100/ip")
+
+ def test_state_string(self):
+ p = Port((10, "tcp"))
+- self.assertEqual(p.state_string(), u"unknown")
++ self.assertEqual(p.state_string(), "unknown")
+
+
+ class service_test(unittest.TestCase):
+@@ -255,47 +255,47 @@ class service_test(unittest.TestCase):
+ def test_compare(self):
+ """Test that services with the same contents compare equal."""
+ a = Service()
+- a.name = u"ftp"
+- a.product = u"FooBar FTP"
+- a.version = u"1.1.1"
+- a.tunnel = u"ssl"
++ a.name = "ftp"
++ a.product = "FooBar FTP"
++ a.version = "1.1.1"
++ a.tunnel = "ssl"
+ self.assertEqual(a, a)
+ b = Service()
+- b.name = u"ftp"
+- b.product = u"FooBar FTP"
+- b.version = u"1.1.1"
+- b.tunnel = u"ssl"
++ b.name = "ftp"
++ b.product = "FooBar FTP"
++ b.version = "1.1.1"
++ b.tunnel = "ssl"
+ self.assertEqual(a, b)
+- b.name = u"http"
++ b.name = "http"
+ self.assertNotEqual(a, b)
+ c = Service()
+ self.assertNotEqual(a, c)
+
+ def test_tunnel(self):
+ serv = Service()
+- serv.name = u"http"
+- serv.tunnel = u"ssl"
+- self.assertEqual(serv.name_string(), u"ssl/http")
++ serv.name = "http"
++ serv.tunnel = "ssl"
++ self.assertEqual(serv.name_string(), "ssl/http")
+
+ def test_version_string(self):
+ serv = Service()
+- serv.product = u"FooBar"
++ serv.product = "FooBar"
+ self.assertTrue(len(serv.version_string()) > 0)
+ serv = Service()
+- serv.version = u"1.2.3"
++ serv.version = "1.2.3"
+ self.assertTrue(len(serv.version_string()) > 0)
+ serv = Service()
+- serv.extrainfo = u"misconfigured"
++ serv.extrainfo = "misconfigured"
+ self.assertTrue(len(serv.version_string()) > 0)
+ serv = Service()
+- serv.product = u"FooBar"
+- serv.version = u"1.2.3"
++ serv.product = "FooBar"
++ serv.version = "1.2.3"
+ # Must match Nmap output.
+ self.assertEqual(serv.version_string(),
+- u"%s %s" % (serv.product, serv.version))
+- serv.extrainfo = u"misconfigured"
++ "%s %s" % (serv.product, serv.version))
++ serv.extrainfo = "misconfigured"
+ self.assertEqual(serv.version_string(),
+- u"%s %s (%s)" % (serv.product, serv.version, serv.extrainfo))
++ "%s %s (%s)" % (serv.product, serv.version, serv.extrainfo))
+
+
+ class ScanDiffSub(ScanDiff):
+@@ -703,7 +703,7 @@ class scan_diff_xml_test(unittest.TestCase):
+ a.load_from_file("test-scans/empty.xml")
+ b = Scan()
+ b.load_from_file("test-scans/simple.xml")
+- f = StringIO.StringIO()
++ f = io.StringIO()
+ self.scan_diff = ScanDiffXML(a, b, f)
+ self.scan_diff.output()
+ self.xml = f.getvalue()
+@@ -712,8 +712,8 @@ class scan_diff_xml_test(unittest.TestCase):
+ def test_well_formed(self):
+ try:
+ document = xml.dom.minidom.parseString(self.xml)
+- except Exception, e:
+- self.fail(u"Parsing XML diff output caused the exception: %s"
++ except Exception as e:
++ self.fail("Parsing XML diff output caused the exception: %s"
+ % str(e))
+
+
+@@ -739,8 +739,8 @@ def host_apply_diff(host, diff):
+ host.os = diff.host_b.os[:]
+
+ if diff.extraports_changed:
+- for state in host.extraports.keys():
+- for port in host.ports.values():
++ for state in list(host.extraports.keys()):
++ for port in list(host.ports.values()):
+ if port.state == state:
+ del host.ports[port.spec]
+ host.extraports = diff.host_b.extraports.copy()
+diff --git a/ndiff/scripts/ndiff b/ndiff/scripts/ndiff
+index 8517c07..4671e73 100755
+--- a/ndiff/scripts/ndiff
++++ b/ndiff/scripts/ndiff
+@@ -1,4 +1,4 @@
+-#!/usr/bin/env python
++#!/usr/bin/env python3
+
+ # Ndiff
+ #
+@@ -67,15 +67,15 @@ if INSTALL_LIB is not None and is_secure_dir(INSTALL_LIB):
+
+ try:
+ import ndiff
+-except ImportError, e:
+- print >> sys.stderr, """\
++except ImportError as e:
++ print("""\
+ Could not import the ndiff module: %s.
+-I checked in these directories:""" % repr(e.message)
++I checked in these directories:""" % repr(e), file=sys.stderr)
+ for dir in sys.path:
+- print >> sys.stderr, " %s" % dir
+- print >> sys.stderr, """\
++ print(" %s" % dir, file=sys.stderr)
++ print("""\
+ If you installed Ndiff in another directory, you may have to add the
+-modules directory to the PYTHONPATH environment variable."""
++modules directory to the PYTHONPATH environment variable.""", file=sys.stderr)
+ sys.exit(1)
+
+ import ndiff
+diff --git a/ndiff/setup.py b/ndiff/setup.py
+old mode 100644
+new mode 100755
+index b5e254c..c49bcf3
+--- a/ndiff/setup.py
++++ b/ndiff/setup.py
+@@ -94,7 +94,7 @@ class checked_install(distutils.command.install.install):
+ self.saved_prefix = sys.prefix
+ try:
+ distutils.command.install.install.finalize_options(self)
+- except distutils.errors.DistutilsPlatformError, e:
++ except distutils.errors.DistutilsPlatformError as e:
+ raise distutils.errors.DistutilsPlatformError(str(e) + """
+ Installing your distribution's python-dev package may solve this problem.""")
+
+@@ -155,13 +155,13 @@ Installing your distribution's python-dev package may solve this problem.""")
+ #!/usr/bin/env python
+ import errno, os, os.path, sys
+
+-print 'Uninstall %(name)s'
++print('Uninstall %(name)s')
+
+ answer = raw_input('Are you sure that you want to uninstall '
+ '%(name)s (yes/no) ')
+
+ if answer != 'yes' and answer != 'y':
+- print 'Not uninstalling.'
++ print('Not uninstalling.')
+ sys.exit(0)
+
+ """ % {'name': APP_NAME}
+@@ -177,8 +177,8 @@ if answer != 'yes' and answer != 'y':
+ # This should never happen (everything gets installed
+ # inside the root), but if it does, be safe and don't
+ # delete anything.
+- uninstaller += ("print '%s was not installed inside "
+- "the root %s; skipping.'\n" % (output, self.root))
++ uninstaller += ("print('%s was not installed inside "
++ "the root %s; skipping.')\n" % (output, self.root))
+ continue
+ output = path_strip_prefix(output, self.root)
+ assert os.path.isabs(output)
+@@ -202,24 +202,24 @@ for path in INSTALLED_FILES:
+ dirs.append(path)
+ # Delete the files.
+ for file in files:
+- print "Removing '%s'." % file
++ print("Removing '%s'." % file)
+ try:
+ os.remove(file)
+- except OSError, e:
+- print >> sys.stderr, ' Error: %s.' % str(e)
++ except OSError as e:
++ print(' Error: %s.' % str(e), file=sys.stderr)
+ # Delete the directories. First reverse-sort the normalized paths by
+ # length so that child directories are deleted before their parents.
+ dirs = [os.path.normpath(dir) for dir in dirs]
+ dirs.sort(key = len, reverse = True)
+ for dir in dirs:
+ try:
+- print "Removing the directory '%s'." % dir
++ print("Removing the directory '%s'." % dir)
+ os.rmdir(dir)
+- except OSError, e:
++ except OSError as e:
+ if e.errno == errno.ENOTEMPTY:
+- print "Directory '%s' not empty; not removing." % dir
++ print("Directory '%s' not empty; not removing." % dir)
+ else:
+- print >> sys.stderr, str(e)
++ print(str(e), file=sys.stderr)
+ """
+
+ uninstaller_file = open(uninstaller_filename, 'w')
+@@ -227,7 +227,7 @@ for dir in dirs:
+ uninstaller_file.close()
+
+ # Set exec bit for uninstaller
+- mode = ((os.stat(uninstaller_filename)[ST_MODE]) | 0555) & 07777
++ mode = ((os.stat(uninstaller_filename)[ST_MODE]) | 0o555) & 0o7777
+ os.chmod(uninstaller_filename, mode)
+
+ def write_installed_files(self):
+@@ -242,7 +242,7 @@ for dir in dirs:
+ try:
+ for output in self.get_installed_files():
+ assert "\n" not in output
+- print >> f, output
++ print(output, file=f)
+ finally:
+ f.close()
+
+@@ -266,7 +266,7 @@ class my_uninstall(distutils.cmd.Command):
+ # Read the list of installed files.
+ try:
+ f = open(INSTALLED_FILES_NAME, "r")
+- except IOError, e:
++ except IOError as e:
+ if e.errno == errno.ENOENT:
+ log.error("Couldn't open the installation record '%s'. "
+ "Have you installed yet?" % INSTALLED_FILES_NAME)
+@@ -289,7 +289,7 @@ class my_uninstall(distutils.cmd.Command):
+ try:
+ if not self.dry_run:
+ os.remove(file)
+- except OSError, e:
++ except OSError as e:
+ log.error(str(e))
+ # Delete the directories. First reverse-sort the normalized paths by
+ # length so that child directories are deleted before their parents.
+@@ -300,7 +300,7 @@ class my_uninstall(distutils.cmd.Command):
+ log.info("Removing the directory '%s'." % dir)
+ if not self.dry_run:
+ os.rmdir(dir)
+- except OSError, e:
++ except OSError as e:
+ if e.errno == errno.ENOTEMPTY:
+ log.info("Directory '%s' not empty; not removing." % dir)
+ else:
+diff --git a/ndiff/test-scans/anonymize.py b/ndiff/test-scans/anonymize.py
+index 9ba612a..fd251fe 100755
+--- a/ndiff/test-scans/anonymize.py
++++ b/ndiff/test-scans/anonymize.py
+@@ -1,4 +1,4 @@
+-#!/usr/bin/env python
++#!/usr/bin/env python3
+
+ # Anonymize an Nmap XML file, replacing host name and IP addresses with random
+ # anonymous ones. Anonymized names will be consistent between runs of the
+@@ -20,20 +20,20 @@ r = random.Random()
+
+
+ def hash(s):
+- digest = hashlib.sha512(s).hexdigest()
++ digest = hashlib.sha512(s.encode()).hexdigest()
+ return int(digest, 16)
+
+
+ def anonymize_mac_address(addr):
+ r.seed(hash(addr))
+ nums = (0, 0, 0) + tuple(r.randrange(256) for i in range(3))
+- return u":".join(u"%02X" % x for x in nums)
++ return ":".join("%02X" % x for x in nums)
+
+
+ def anonymize_ipv4_address(addr):
+ r.seed(hash(addr))
+ nums = (10,) + tuple(r.randrange(256) for i in range(3))
+- return u".".join(unicode(x) for x in nums)
++ return ".".join(str(x) for x in nums)
+
+
+ def anonymize_ipv6_address(addr):
+@@ -41,7 +41,7 @@ def anonymize_ipv6_address(addr):
+ # RFC 4193.
+ nums = (0xFD00 + r.randrange(256),)
+ nums = nums + tuple(r.randrange(65536) for i in range(7))
+- return u":".join("%04X" % x for x in nums)
++ return ":".join("%04X" % x for x in nums)
+
+ # Maps to memoize address and host name conversions.
+ hostname_map = {}
+@@ -54,11 +54,11 @@ def anonymize_hostname(name):
+ LETTERS = "acbdefghijklmnopqrstuvwxyz"
+ r.seed(hash(name))
+ length = r.randrange(5, 10)
+- prefix = u"".join(r.sample(LETTERS, length))
++ prefix = "".join(r.sample(LETTERS, length))
+ num = r.randrange(1000)
+- hostname_map[name] = u"%s-%d.example.com" % (prefix, num)
++ hostname_map[name] = "%s-%d.example.com" % (prefix, num)
+ if VERBOSE:
+- print >> sys.stderr, "Replace %s with %s" % (name, hostname_map[name])
++ print("Replace %s with %s" % (name, hostname_map[name]), file=sys.stderr)
+ return hostname_map[name]
+
+ mac_re = re.compile(r'\b([0-9a-fA-F]{2}:){5}[0-9a-fA-F]{2}\b')
+@@ -78,7 +78,7 @@ def anonymize_address(addr):
+ else:
+ assert False
+ if VERBOSE:
+- print >> sys.stderr, "Replace %s with %s" % (addr, address_map[addr])
++ print("Replace %s with %s" % (addr, address_map[addr]), file=sys.stderr)
+ return address_map[addr]
+
+
+++ /dev/null
-#
-# Copyright (C) 2006-2016 OpenWrt.org
-#
-# This is free software, licensed under the GNU General Public License v2.
-# See /LICENSE for more information.
-#
-
-include $(TOPDIR)/rules.mk
-
-PKG_NAME:=obfsproxy
-PKG_VERSION:=0.2.13
-PKG_RELEASE:=3
-
-PYPI_NAME:=$(PKG_NAME)
-PKG_HASH:=1e26c2faef1cfcf856ddf60e9647058a7c78fb0d47f05b58a0f847ed7cc41a66
-
-PKG_LICENSE:=BSD-3-Clause
-PKG_LICENSE_FILES:=LICENSE
-PKG_MAINTAINER:=Jeffery To <jeffery.to@gmail.com>
-
-include ../../lang/python/pypi.mk
-include $(INCLUDE_DIR)/package.mk
-include ../../lang/python/python-package.mk
-
-define Package/obfsproxy
- SECTION:=net
- CATEGORY:=Network
- TITLE:=A pluggable transport proxy written in Python
- URL:=https://gitweb.torproject.org/pluggable-transports/obfsproxy.git/
- DEPENDS:= \
- +python-light \
- +python-crypto \
- +python-pyptlib \
- +python-setuptools \
- +python-twisted \
- +python-yaml
- VARIANT:=python
-endef
-
-define Package/obfsproxy/description
-obfsproxy is a tool that attempts to circumvent censorship, by
-transforming the Tor traffic between the client and the bridge. This
-way, censors, who usually monitor traffic between the client and the
-bridge, will see innocent-looking transformed traffic instead of the
-actual Tor traffic.
-endef
-
-define Package/obfsproxy/conffiles
-/etc/config/obfsproxy
-endef
-
-define PyPackage/obfsproxy/install
- $(INSTALL_DIR) $(1)/usr/bin
- $(INSTALL_BIN) $(PKG_INSTALL_DIR)/usr/bin/obfsproxy $(1)/usr/bin/
- $(INSTALL_DIR) $(1)/etc/config
- $(INSTALL_CONF) ./files/obfsproxy.conf $(1)/etc/config/obfsproxy
- $(INSTALL_DIR) $(1)/etc/init.d
- $(INSTALL_BIN) ./files/obfsproxy.init $(1)/etc/init.d/obfsproxy
-endef
-
-$(eval $(call PyPackage,obfsproxy))
-$(eval $(call BuildPackage,obfsproxy))
-$(eval $(call BuildPackage,obfsproxy-src))
+++ /dev/null
-# To use Obfsproxy with Tor, follow the instructions at:
-#
-# https://www.torproject.org/projects/obfsproxy-instructions.html.en
-#
-# instead of setting up a separate instance of Obfsproxy.
-
-config obfsproxy 'obfsproxy'
- # Set to 1 to enable this instance
- option enabled 0
-
- # One of: managed, dummy, b64, obfs2, obfs3, scramblesuit
- option transport 'scramblesuit'
-
- # Shared secret / password
- # For obfs2 (as the shared secret parameter) and scramblesuit only
- option password 'EXAMPLEPASSWORDNOTREAL'
-
- # One of: server, ext_server, client, socks
- option mode 'socks'
-
- # Destination address
- # Required for all modes except 'socks'
- #option dest_host '0.0.0.0'
- #option dest_port '80'
-
- # Extended ORPort authentication cookie file location
- # Required for 'ext_server' mode
- #option ext_cookie_file ''
-
- # Listener address
- option listen_host '127.0.0.1'
- option listen_port '8080'
-
- # Set to log to a file instead of syslog
- #option log_file '/var/log/obfsproxy.log'
-
- # Minimum logging severity
- # One of: error, warning, info, debug
- #option log_min_severity 'info'
-
- # Set to 1 to disable logging
- #option no_log 0
-
- # Set to 1 to disable safe (scrubbed address) logging
- #option no_safe_logging 0
-
- # Run as a different user
- #option user 'nobody'
-
- # Outgoing proxy
- # proxy_scheme is one of: socks4a, socks5, http
- # txsocksx is required for socks4a or socks5
- #option proxy_scheme ''
- #option proxy_username ''
- #option proxy_password ''
- #option proxy_host ''
- #option proxy_port ''
+++ /dev/null
-#!/bin/sh /etc/rc.common
-# Copyright (C) 2016 OpenWrt.org
-
-START=80
-STOP=20
-USE_PROCD=1
-
-PROG=/usr/bin/obfsproxy
-
-append_arg() {
- local cfg="$1"
- local var="$2"
- local opt="$3"
- local def="$4"
- local val
-
- config_get val "$cfg" "$var"
- [ -n "$val" -o -n "$def" ] && procd_append_param command "$opt" "${val:-$def}"
-}
-
-append_bool() {
- local cfg="$1"
- local var="$2"
- local opt="$3"
- local def="$4"
- local val
-
- config_get_bool val "$cfg" "$var" "$def"
- [ "$val" = 1 ] && procd_append_param command "$opt"
-}
-
-append_plain() {
- procd_append_param command "$1"
-}
-
-append_param() {
- local cfg="$1"
- local var="$2"
- local opt="$3"
- local def="$4"
- local val
-
- config_get val "$cfg" "$var"
- [ -n "$val" -o -n "$def" ] && procd_append_param "$opt" "${val:-$def}"
-}
-
-append_proxy_spec() {
- local cfg="$1"
- local scheme_var="$2"
- local username_var="$3"
- local password_var="$4"
- local host_var="$5"
- local port_var="$6"
- local opt="$7"
- local def="$8"
- local scheme
- local username
- local password
- local host
- local port
- local login
- local val
-
- config_get scheme "$cfg" "$scheme_var"
- config_get username "$cfg" "$username_var"
- config_get password "$cfg" "$password_var"
- config_get host "$cfg" "$host_var"
- config_get port "$cfg" "$port_var"
- [ "$username" ] && login="$username${password:+:$password}@"
- [ -n "$scheme" -a -n "$host" -a -n "$port" ] && val="$scheme://$login$host:$port"
- [ -n "$val" -o -n "$def" ] && procd_append_param command "$opt" "${val:-$def}"
-}
-
-append_host_port() {
- local cfg="$1"
- local host_var="$2"
- local port_var="$3"
- local opt="$4"
- local def="$5"
- local host
- local port
- local val
-
- config_get host "$cfg" "$host_var"
- config_get port "$cfg" "$port_var"
- [ -n "$host" -a -n "$port" ] && val="$host:$port"
- [ -n "$val" -o -n "$def" ] && {
- [ "$opt" ] && procd_append_param command "$opt"
- procd_append_param command "${val:-$def}"
- }
-}
-
-start_instance() {
- local cfg="$1"
- local lib_dir="/var/lib/obfsproxy/$cfg"
- local redirect=0
- local enabled
- local user
- local transport
- local password
- local log_min_severity
-
- config_get_bool enabled "$cfg" 'enabled' '0'
- [ "$enabled" = 0 ] && return 1
-
- config_get user "$cfg" 'user' 'root'
- config_get transport "$cfg" 'transport'
- [ "$transport" = "scramblesuit" ] && config_get password "$cfg" 'password'
-
- [ -d "$lib_dir" ] || {
- mkdir -m 0755 -p "$lib_dir/data"
- chmod -R 0700 "$lib_dir"
- }
- [ "$password" ] && {
- echo "$password" > "$lib_dir/secret"
- chmod 0600 "$lib_dir/secret"
- }
- chown -R "$user:" "$lib_dir"
-
- config_get log_min_severity "$cfg" 'log_min_severity'
- [ "$log_min_severity" = "debug" ] && redirect=1
-
- procd_open_instance
-
- procd_set_param command "$PROG" --data-dir "$lib_dir/data" --syslog "obfsproxy($cfg)"
-
- append_arg "$cfg" log_file "--log-file"
- append_arg "$cfg" log_min_severity "--log-min-severity"
- append_bool "$cfg" no_log "--no-log"
- append_bool "$cfg" no_safe_logging "--no-safe-logging"
- append_proxy_spec "$cfg" proxy_scheme proxy_username proxy_password proxy_host proxy_port "--proxy"
-
- append_param "$cfg" transport command
- [ "$transport" = "obfs2" ] && append_arg "$cfg" password "--shared-secret"
- [ "$password" ] && procd_append_param command "--password-file" "$lib_dir/secret"
-
- append_param "$cfg" mode command
- append_host_port "$cfg" dest_host dest_port "--dest"
- append_arg "$cfg" ext_cookie_file "--ext-cookie-file"
-
- append_host_port "$cfg" listen_host listen_port
-
- procd_set_param respawn
- procd_set_param stdout $redirect
- procd_set_param stderr $redirect
- append_param "$cfg" user user
-
- procd_close_instance
-}
-
-service_triggers() {
- procd_add_reload_trigger obfsproxy
-}
-
-start_service() {
- config_load obfsproxy
- config_foreach start_instance obfsproxy
-}
+++ /dev/null
-diff --git a/setup.py b/setup.py
-index 2353a29..9d2a9a9 100644
---- a/setup.py
-+++ b/setup.py
-@@ -21,7 +21,7 @@ setup(
- version=versioneer.get_version(),
- cmdclass=versioneer.get_cmdclass(),
-
-- packages = find_packages(),
-+ packages = find_packages(exclude=['*.test', '*.test.*']),
- entry_points = {
- 'console_scripts': [
- 'obfsproxy = obfsproxy.pyobfsproxy:run'
+++ /dev/null
-diff --git a/setup.py b/setup.py
-index 2353a29..8d3d56d 100644
---- a/setup.py
-+++ b/setup.py
-@@ -27,6 +27,11 @@ setup(
- 'obfsproxy = obfsproxy.pyobfsproxy:run'
- ]
- },
-+ options = {
-+ 'build_scripts': {
-+ 'executable': '/usr/bin/python'
-+ },
-+ },
-
- install_requires = [
- 'setuptools',
+++ /dev/null
-diff --git a/setup.py b/setup.py
-index 2353a29..e04c5f5 100644
---- a/setup.py
-+++ b/setup.py
-@@ -32,7 +32,6 @@ setup(
- 'setuptools',
- 'PyCrypto',
- 'Twisted',
-- 'argparse',
- 'pyptlib >= 0.0.6',
- 'pyyaml'
- ],
+++ /dev/null
-diff --git a/obfsproxy/common/log.py b/obfsproxy/common/log.py
-index bb30296..79193d2 100644
---- a/obfsproxy/common/log.py
-+++ b/obfsproxy/common/log.py
-@@ -1,5 +1,6 @@
- """obfsproxy logging code"""
- import logging
-+import logging.handlers
- import sys
-
- from twisted.python import log
-@@ -50,6 +51,18 @@ class ObfsLogger(object):
-
- self.obfslogger.addHandler(log_handler)
-
-+ def set_syslog(self, progname):
-+ """Set up our logger so that it starts logging to syslog instead."""
-+
-+ # remove the default handler, and add the SysLogHandler:
-+ self.obfslogger.removeHandler(self.default_handler)
-+
-+ log_handler = logging.handlers.SysLogHandler(address='/dev/log')
-+ formatter = logging.Formatter(progname + "[%(process)d]: %(message)s")
-+ log_handler.setFormatter(formatter)
-+
-+ self.obfslogger.addHandler(log_handler)
-+
-
- def set_log_severity(self, sev_string):
- """Update our minimum logging severity to 'sev_string'."""
-diff --git a/obfsproxy/pyobfsproxy.py b/obfsproxy/pyobfsproxy.py
-index 4a2faf6..eaf8a44 100755
---- a/obfsproxy/pyobfsproxy.py
-+++ b/obfsproxy/pyobfsproxy.py
-@@ -42,6 +42,7 @@ def set_up_cli_parsing():
-
- parser.add_argument('-v', '--version', action='version', version=__version__)
- parser.add_argument('--log-file', help='set logfile')
-+ parser.add_argument('--syslog', metavar='PROGNAME', help='use syslog')
- parser.add_argument('--log-min-severity',
- choices=['error', 'warning', 'info', 'debug'],
- help='set minimum logging severity (default: %(default)s)')
-@@ -110,6 +111,8 @@ def consider_cli_args(args):
-
- if args.log_file:
- log.set_log_file(args.log_file)
-+ elif args.syslog:
-+ log.set_syslog(args.syslog)
- if args.log_min_severity:
- log.set_log_severity(args.log_min_severity)
- if args.no_log:
PKG_NAME:=prosody
PKG_VERSION:=0.11.3
-PKG_RELEASE:=1
+PKG_RELEASE:=2
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
PKG_SOURCE_URL:=https://prosody.im/downloads/source
}
[ -d /var/log/prosody ] && {
- chown -R prosody:prosody /etc/prosody
+ chown -R root:prosody /etc/prosody
+ chown -R prosody:prosody /etc/prosody/data
}
[ -f /sbin/paxctl ] && {
PKG_NAME:=seafile-ccnet
PKG_VERSION:=7.1.2
-PKG_RELEASE:=1
+PKG_RELEASE:=2
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
PKG_SOURCE_URL:=https://codeload.github.com/haiwen/ccnet-server/tar.gz/v$(PKG_VERSION)-server?
include $(INCLUDE_DIR)/package.mk
include $(INCLUDE_DIR)/nls.mk
-include ../../lang/python/python-package.mk
include ../../lang/python/python3-package.mk
define Package/seafile-ccnet
--enable-python \
--with-mysql="$(STAGING_DIR)/usr/bin/mysql_config"
-# Include Python 2 bindings in this package to maintain compatibility with previous versions
define Package/seafile-ccnet/install
$(INSTALL_DIR) $(1)/usr/{lib,libexec}
- $(INSTALL_DIR) $(1)$(PYTHON_PKG_DIR)/ccnet
$(INSTALL_BIN) $(PKG_INSTALL_DIR)/usr/bin/* $(1)/usr/libexec/
- $(INSTALL_DATA) $(PKG_BUILD_DIR)/python/ccnet/*.py $(1)$(PYTHON_PKG_DIR)/ccnet/
$(CP) $(PKG_INSTALL_DIR)/usr/lib/libccnet.so* $(1)/usr/lib/
endef
PKG_NAME:=seafile-server
PKG_VERSION:=7.1.2
-PKG_RELEASE:=1
+PKG_RELEASE:=2
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
PKG_SOURCE_URL:=https://codeload.github.com/haiwen/seafile-server/tar.gz/v$(PKG_VERSION)-server?
include $(INCLUDE_DIR)/package.mk
include $(INCLUDE_DIR)/nls.mk
-include ../../lang/python/python-package.mk
include ../../lang/python/python3-package.mk
# Check that the actual Makefile version-relase match the above.
--enable-python \
--with-mysql="$(STAGING_DIR)/usr/bin/mysql_config"
-# This is required as python[3]-package.mk overrides the default setting of having interlinking enabled
+# This is required as python3-package.mk overrides the default setting of having interlinking enabled
ifdef CONFIG_USE_MIPS16
TARGET_CFLAGS += -minterlink-mips16
endif
setup-seafile-mysql.py \
sqlite2mysql.py
-# Include Python 2 bindings in this package to maintain compatibility with previous versions
define Package/seafile-server/install
$(INSTALL_DIR) $(1)/usr/{bin,libexec}
- $(INSTALL_DIR) $(1)$(PYTHON_PKG_DIR)/{seafile,seaserv}
$(INSTALL_DIR) $(1)/usr/share/seafile/seafile-server
$(INSTALL_DIR) $(1)/etc/{config,init.d,seafile}
$(INSTALL_DIR) $(1)/lib/upgrade/keep.d
$(INSTALL_BIN) $(PKG_INSTALL_DIR)/usr/bin/$$$$bin $(1)/usr/libexec/ ; \
done
- $(INSTALL_DATA) $(PKG_BUILD_DIR)/python/seafile/*.py $(1)$(PYTHON_PKG_DIR)/seafile/
- $(INSTALL_DATA) $(PKG_BUILD_DIR)/python/seaserv/*.py $(1)$(PYTHON_PKG_DIR)/seaserv/
-
for sh in $(SEAFILE_SERVER_SHELL_SCRIPTS) ; do \
$(INSTALL_BIN) $(PKG_BUILD_DIR)/scripts/$$$$sh $(1)/usr/bin/$$$${sh%.sh} ; \
done
define Build/InstallDev
$(INSTALL_DIR) $(1)/usr/include/seafile
$(INSTALL_DIR) $(1)/usr/lib/pkgconfig
- $(INSTALL_DIR) $(1)$(PYTHON_PKG_DIR)
$(INSTALL_DATA) $(PKG_INSTALL_DIR)/usr/include/seafile/*.h $(1)/usr/include/seafile/
$(INSTALL_DATA) $(PKG_INSTALL_DIR)/usr/lib/pkgconfig/libseafile.pc $(1)/usr/lib/pkgconfig/
endef
include $(TOPDIR)/rules.mk
PKG_NAME:=simple-adblock
-PKG_VERSION:=1.8.2
-PKG_RELEASE:=2
+PKG_VERSION:=1.8.3
+PKG_RELEASE:=1
PKG_MAINTAINER:=Stan Grishin <stangri@melmac.net>
PKG_LICENSE:=GPL-3.0-or-later
# check if we are on real system
if [ -z "$${IPKG_INSTROOT}" ]; then
/etc/init.d/simple-adblock enable
- while uci -q del ucitrack.@simple-adblock[-1]; do :; done
- uci -q batch <<-EOF >/dev/null
- add ucitrack simple-adblock
- set ucitrack.@simple-adblock[0].init='simple-adblock'
- commit ucitrack
- EOF
fi
exit 0
endef
#!/bin/sh
# check if we are on real system
if [ -z "$${IPKG_INSTROOT}" ]; then
- while uci -q del ucitrack.@simple-adblock[-1]; do :; done
echo "Stopping service and removing rc.d symlink for simple-adblock"
- uci -q del_list dhcp.@dnsmasq[0].addnhosts="/var/run/simple-adblock.addnhosts" || true
- if [ "$$(uci -q get dhcp.@dnsmasq[0].serversfile)" == "/var/run/simple-adblock.servers" ]; then
- uci -q del dhcp.@dnsmasq[0].serversfile || true
- fi
/etc/init.d/simple-adblock stop || true
/etc/init.d/simple-adblock killcache || true
/etc/init.d/simple-adblock disable || true
|Basic|force_dns|boolean|1|Force router's DNS to local devices which may have different/hardcoded DNS server settings. If enabled, creates a firewall rule to intercept DNS requests from local devices to external DNS servers and redirect them to router.|
|Basic|led|string|none|Use one of the router LEDs to indicate the AdBlocking status.|
|Advanced|dns|string|dnsmasq.servers|DNS resolution option. See [table below](#dns-resolution-option) for addtional information.|
+||dns_instance|string|0|String of space-separated DNSMASQ instance numbers (or '*' for all) to be affected by the service. See [table below](#dns-resolution-option) for addtional information.|
|Advanced|ipv6_enabled|boolean|0|Add IPv6 entries to block-list if ```dnsmasq.addnhosts``` is used. This option is only visible in Web UI if the ```dnsmasq.addnhosts``` is selected as the DNS resolution option.|
|Advanced|boot_delay|integer|120|Delay service activation for that many seconds on boot up. You can shorten it to 10-30 seconds on modern fast routers. Routers with built-in modems may require longer boot delay.|
|Advanced|download_timeout|integer|10|Time-out downloads if no reply received within that many last seconds.|
|Option|Explanation|
| --- | --- |
-|```dnsmasq.addnhosts```|Creates the DNSMASQ additional hosts file ```/var/run/simple-adblock.addnhosts``` and modifies DNSMASQ settings, so that DNSMASQ resolves all blocked domains to "local machine": 127.0.0.1. This option doesn't allow block-list optimization (by removing secondary level domains if the top-level domain is also in the block-list), so it results in a much larger block-list file, but, unlike other DNSMASQ-based options, it has almost no effect on the DNS look up speed. This option also allows quick reloads of DNSMASQ on block-list updates.|
+|```dnsmasq.addnhosts```|Creates the DNSMASQ additional hosts file ```/var/run/simple-adblock.addnhosts``` and modifies DNSMASQ settings, so that DNSMASQ resolves all blocked domains to "local machine": 127.0.0.1. This option doesn't allow block-list optimization (by removing secondary level domains if the top-level domain is also in the block-list), so it results in a much larger block-list file, but, unlike other DNSMASQ-based options, it has almost no effect on the DNS look up speed. This option also allows quick reloads of DNSMASQ on block-list updates. This setting also allows you to configure which DNSMASQ instances would be affected by AdBlocking via ```dns_instance``` option.|
|```dnsmasq.conf```|Creates the DNSMASQ config file ```/var/dnsmasq.d/simple-adblock``` so that DNSMASQ replies with NXDOMAIN: "domain not found". This option allows the block-list optimization (by removing secondary level domains if the top-level domain is also in the block-list), resulting in the smaller block-list file. This option will slow down DNS look up speed somewhat.|
|```dnsmasq.ipset```|Creates the DNSMASQ ipset file ```/var/dnsmasq.d/simple-adblock.ipset``` and the firewall rule to reject the matching requests. This is the only option for AdBlocking if you're using a browser with [DNS-over-HTTPS proxy](https://en.wikipedia.org/wiki/DNS_over_HTTPS) built-in, like [Mozilla Firefox](https://support.mozilla.org/en-US/kb/firefox-dns-over-https#w_about-dns-over-https) or [Google Chrome/Chromium](https://blog.chromium.org/2019/09/experimenting-with-same-provider-dns.html). This option allows the block-list optimization (by removing secondary level domains if the top-level domain is also in the block-list), resulting in the smaller block-list file. This option requires you install ```dnsmasq-full``` and ```ipset``` [as described here](#how-to-use-dnsmasq-ipset).<br/>PLEASE NOTE, that unlike other options which are truly domain name based blocking, this is essentially an IP address based blocking, ie: if you try to block ```google-analytics.com``` with this option, it may also block/break things like YouTube, Hangouts and other Google services if they share IP address(es) with ```google-analytics.com```.|
-|```dnsmasq.servers```|Creates the DNSMASQ servers file ```/var/run/simple-adblock.servers``` and modifies DNSMASQ settings so that DNSMASQ replies with NXDOMAIN: "domain not found". This option allows the block-list optimization (by removing secondary level domains if the top-level domain is also in the block-list), resulting in the smaller block-list file. This option will slow down DNS look up speed somewhat. This is a default setting as it results in the smaller block-file and allows quick reloads of DNSMASQ.|
+|```dnsmasq.servers```|Creates the DNSMASQ servers file ```/var/run/simple-adblock.servers``` and modifies DNSMASQ settings so that DNSMASQ replies with NXDOMAIN: "domain not found". This option allows the block-list optimization (by removing secondary level domains if the top-level domain is also in the block-list), resulting in the smaller block-list file. This option will slow down DNS look up speed somewhat. This is a default setting as it results in the smaller block-file and allows quick reloads of DNSMASQ. This setting also allows you to configure which DNSMASQ instances would be affected by AdBlocking via ```dns_instance``` option.|
|```unbound.adb_list```|Creates the Unbound config file ```/var/lib/unbound/adb_list.simple-adblock``` so that Unbound replies with NXDOMAIN: "domain not found". This option allows the block-list optimization (by removing secondary level domains if the top-level domain is also in the block-list), resulting in the smaller block-list file.|
## How Does It Work
- Enabling ```simple-adblock```'s ```force_dns``` setting to override the hardcoded DNS on your device.
4. By using the DNS-over-TLS, DNS-over-HTTPS or DNSCrypt on your local device or (if supported) by browser on your local device. You can fix this only by:
- Stopping/removing/disabling DNS-over-TLS, DNS-over-HTTPS or DNSCrypt on your local device and using the secure DNS on your router instead. There are merits to all three of the options above, I can recommend the ```https_dns_proxy``` and ```luci-app-https_dns_proxy``` packages for enabling DNS-over-HTTPS on your router.
+ 5. If you are running a wireguard "server" on your router and remote clients connect to it, the AdBlocking may not work properly for your remote clients until you add the following to ```/etc/network``` (credit to [dibdot](https://forum.openwrt.org/t/wireguard-and-adblock/49351/6)):
+
+ ```sh
+ config route
+ option interface 'wg0'
+ option target '192.168.1.0'
+ option netmask '255.255.255.0'
+ ```
## Documentation / Discussion
option parallel_downloads '1'
option debug '0'
option compressed_cache '0'
- list whitelist_domain 'raw.githubusercontent.com'
+ list whitelist_domain 'raw.githubusercontent.com'
# Thu Oct 3 17:54:04 PDT 2019
# File size: 4.0K
- list blacklist_domains_url 'https://s3.amazonaws.com/lists.disconnect.me/simple_tracking.txt'
+ list blacklist_domains_url 'https://s3.amazonaws.com/lists.disconnect.me/simple_tracking.txt'
# File size: 4.0K
- list blacklist_domains_url 'https://dshield.org/feeds/suspiciousdomains_High.txt'
+# list blacklist_domains_url 'https://www.dshield.org/feeds/suspiciousdomains_High.txt'
# File size: 12.0K
- list blacklist_domains_url 'https://ssl.bblck.me/blacklists/domain-list.txt'
+ list blacklist_domains_url 'https://ssl.bblck.me/blacklists/domain-list.txt'
# File size: 44.0K
- list blacklist_domains_url 'https://s3.amazonaws.com/lists.disconnect.me/simple_ad.txt'
+ list blacklist_domains_url 'https://s3.amazonaws.com/lists.disconnect.me/simple_ad.txt'
# File size: 44.0K
- list blacklist_domains_url 'https://s3.amazonaws.com/lists.disconnect.me/simple_malvertising.txt'
-
-# File size: 52.0K
- list blacklist_domains_url 'https://ransomwaretracker.abuse.ch/downloads/RW_DOMBL.txt'
+ list blacklist_domains_url 'https://s3.amazonaws.com/lists.disconnect.me/simple_malvertising.txt'
# File size: 60.0K
-# use just one of the dshield.org blocklists
-# list blacklist_domains_url 'https://dshield.org/feeds/suspiciousdomains_Medium.txt'
+# use just one of the www.dshield.org blocklists
+ list blacklist_domains_url 'https://www.dshield.org/feeds/suspiciousdomains_Medium.txt'
# File size: 64.0K
-# use just one of the dshield.org blocklists
-# list blacklist_domains_url 'https://dshield.org/feeds/suspiciousdomains_Low.txt'
+# use just one of the www.dshield.org blocklists
+# list blacklist_domains_url 'https://www.dshield.org/feeds/suspiciousdomains_Low.txt'
# File size: 584.0K
# blocklist too big for most routers
-# list blacklist_domains_url 'https://mirror1.malwaredomains.com/files/justdomains'
+# list blacklist_domains_url 'https://mirror1.malwaredomains.com/files/justdomains'
# File size: 16.0K
- list blacklist_hosts_url 'https://adaway.org/hosts.txt'
-
+ list blacklist_hosts_url 'https://adaway.org/hosts.txt'
+
# File size: 20.0K
- list blacklist_hosts_url 'https://raw.githubusercontent.com/hoshsadiq/adblock-nocoin-list/master/hosts.txt'
+ list blacklist_hosts_url 'https://raw.githubusercontent.com/hoshsadiq/adblock-nocoin-list/master/hosts.txt'
# File size: 36.0K
- list blacklist_hosts_url 'https://www.malwaredomainlist.com/hostslist/hosts.txt'
+ list blacklist_hosts_url 'https://www.malwaredomainlist.com/hostslist/hosts.txt'
# File size: 80.0K
- list blacklist_hosts_url 'https://pgl.yoyo.org/as/serverlist.php?hostformat=hosts&showintro=1&mimetype=plaintext'
+ list blacklist_hosts_url 'https://pgl.yoyo.org/as/serverlist.php?hostformat=hosts&showintro=1&mimetype=plaintext'
# File size: 388.0K
# blocklist may be too big for some routers
# blocklist may block some video-streaming content
-# list blacklist_hosts_url 'https://raw.githubusercontent.com/jawz101/MobileAdTrackers/master/hosts'
+# list blacklist_hosts_url 'https://raw.githubusercontent.com/jawz101/MobileAdTrackers/master/hosts'
# File size: 424.0K
# blocklist may be too big for some routers
- list blacklist_hosts_url 'http://winhelp2002.mvps.org/hosts.txt'
+ list blacklist_hosts_url 'http://winhelp2002.mvps.org/hosts.txt'
# File size: 432.0K
# blocklist may be too big for some routers
- list blacklist_hosts_url 'https://someonewhocares.org/hosts/hosts'
+ list blacklist_hosts_url 'https://someonewhocares.org/hosts/hosts'
# File size: 624.0K
# blocklist too big for most routers
# File size: 1.7M
# blocklist too big for most routers
-# list blacklist_hosts_url 'https://hosts-file.net/ad_servers.txt'
+# list blacklist_hosts_url 'https://hosts-file.net/ad_servers.txt'
# File size: 3.1M
# blocklist too big for most routers
-# list blacklist_hosts_url 'https://hostsfile.mine.nu/Hosts'
+# list blacklist_hosts_url 'https://hostsfile.mine.nu/Hosts'
# site was down on last check
-# list blacklist_domains_url 'http://support.it-mate.co.uk/downloads/hosts.txt'
+# list blacklist_domains_url 'http://support.it-mate.co.uk/downloads/hosts.txt'
#!/bin/sh
if [ "$ACTION" = "ifup" ]; then
- sleep 10 && /etc/init.d/simple-adblock start hotplug &
+ sleep 10 && /etc/init.d/simple-adblock start hotplug &
fi
export USE_PROCD=1
export LC_ALL=C
-export EXTRA_COMMANDS='check dl killcache sizes status'
+export EXTRA_COMMANDS='check dl killcache sizes show'
export EXTRA_HELP=' check Checks if specified domain is found in current blacklist
- dl Force-redownloads all the list
- sizes Shows the file-sizes of enabled block-lists (by downloading them one by one)
- status Shows the service last-run status'
+ dl Force-downloads all enabled block-list
+ sizes Displays the file-sizes of enabled block-lists
+ show Shows the service last-run status'
readonly packageName='simple-adblock'
readonly serviceName="$packageName $PKG_VERSION"
readonly B_TMP="/var/${packageName}.hosts.b.tmp"
readonly PIDFile="/var/run/${packageName}.pid"
readonly jsonFile="/var/run/${packageName}.json"
+readonly sharedMemoryError="/dev/shm/$packageName-error"
+readonly sharedMemoryOutput="/dev/shm/$packageName-output"
readonly hostsFilter='/localhost/d;/^#/d;/^[^0-9]/d;s/^0\.0\.0\.0.//;s/^127\.0\.0\.1.//;s/[[:space:]]*#.*$//;s/[[:cntrl:]]$//;s/[[:space:]]//g;/[`~!@#\$%\^&\*()=+;:"'\'',<>?/\|[{}]/d;/]/d;/\./!d;/^$/d;/[^[:alnum:]_.-]/d;'
readonly domainsFilter='/^#/d;s/[[:space:]]*#.*$//;s/[[:space:]]*$//;s/[[:cntrl:]]$//;/[[:space:]]/d;/[`~!@#\$%\^&\*()=+;:"'\'',<>?/\|[{}]/d;/]/d;/\./!d;/^$/d;/[^[:alnum:]_.-]/d;'
readonly checkmark='\xe2\x9c\x93'
readonly __OK__='\033[0;32m[\xe2\x9c\x93]\033[0m'
readonly __FAIL__='\033[0;31m[\xe2\x9c\x97]\033[0m'
readonly _ERROR_='\033[0;31mERROR\033[0m'
-readonly statusSuccess='Success'
-readonly statusFail='Fail'
-readonly statusDownloading='Downloading'
-readonly statusReloading='Reloading'
-readonly statusRestarting='Restarting'
-readonly statusStarting='Starting'
-readonly statusForceReloading='Force-Reloading'
-readonly statusProcessing='Processing'
-readonly statusStopped='Stopped'
-readonly sharedMemoryError="/dev/shm/$packageName-error"
-readonly sharedMemoryOutput="/dev/shm/$packageName-output"
+
+readonly messageSuccess='Success'
+readonly messageFail='Fail'
+readonly messageDownloading='Downloading'
+readonly messageReloading='Reloading'
+readonly messageRestarting='Restarting'
+readonly messageStarting='Starting'
+readonly messageForceReloading='Force-Reloading'
+readonly messageProcessing='Processing'
+readonly messageStopped='Stopped'
+getStatusText() {
+ local _ret
+ case "$1" in
+ statusNoInstall) _ret="$serviceName is not installed or not found";;
+ statusStopped) _ret="Stopped";;
+ statusStarting) _ret="Starting";;
+ statusRestarting) _ret="Restarting";;
+ statusForceReloading) _ret="Force Reloading";;
+ statusDownloading) _ret="Downloading";;
+ statusError) _ret="Error";;
+ statusWarning) _ret="Warning";;
+ statusFail) _ret="Fail";;
+ statusSuccess) _ret="Success";;
+ esac
+ printf "%b" "$_ret"
+}
+getErrorText() {
+ local _ret
+ case "$1" in
+ errorOutputFileCreate) _ret="failed to create $outputFile file";;
+ errorFailDNSReload) _ret="failed to restart/reload DNS resolver";;
+ errorSharedMemory) _ret="failed to access shared memory";;
+ errorSorting) _ret="failed to sort data file";;
+ errorOptimization) _ret="failed to optimize data file";;
+ errorWhitelistProcessing) _ret="failed to process whitelist";;
+ errorDataFileFormatting) _ret="failed to format data file";;
+ errorMovingDataFile) _ret="failed to move data file '${A_TMP}' to '${outputFile}'";;
+ errorCreatingCompressedCache) _ret="failed to create compressed cache";;
+ errorRemovingTempFiles) _ret="failed to remove temporary files";;
+ errorRestoreCompressedCache) _ret="failed to unpack compressed cache";;
+ errorRestoreCache) _ret="failed to move '$outputCache' to '$outputFile'";;
+ errorOhSnap) _ret="failed to create blocklist or restart DNS resolver";;
+ errorStopping) _ret="failed to stop $serviceName";;
+ errorDNSReload) _ret="failed to reload/restart DNS resolver";;
+ errorDownloadingList) _ret="failed to download";;
+ errorParsingList) _ret="failed to parse";;
+ esac
+ printf "%b" "$_ret"
+}
create_lock() { [ -e "$PIDFile" ] && return 1; touch "$PIDFile"; }
remove_lock() { [ -e "$PIDFile" ] && rm -f "$PIDFile"; }
output_okn() { output 1 "$_OK_\\n"; output 2 "$__OK__\\n"; }
output_fail() { output 1 "$_FAIL_"; output 2 "$__FAIL__\\n"; }
output_failn() { output 1 "$_FAIL_\\n"; output 2 "$__FAIL__\\n"; }
-str_replace() { printf "%b" "$1" | sed -e "s/$(printf "%b" "$2")/$(printf "%b" "$3")/g"; }
-str_contains() { test "$1" != "$(str_replace "$1" "$2" '')"; }
+# str_replace() { printf "%b" "$1" | sed -e "s/$(printf "%b" "$2")/$(printf "%b" "$3")/g"; }
+# str_contains() { test "$1" != "$(str_replace "$1" "$2" '')"; }
compare_versions() { test "$(printf '%s\n' "$@" | sort -V | head -n 1)" != "$1"; }
is_chaos_calmer() { ubus -S call system board | grep -q 'Chaos Calmer'; }
is_ipset_procd() { compare_versions "$(sed -ne 's/^Version: //p' /usr/lib/opkg/info/firewall.control)" "2019-09-18"; }
if [ $((verbosity & $1)) -gt 0 ] || [ "$verbosity" = "$1" ]; then shift; else return 0; fi
fi
[ -t 1 ] && printf "%b" "$1"
- msg="$(printf "%s" "$(str_replace "$1" "$serviceName " "service ")" | sed 's|\\033\[[0-9]\?;\?[0-9]\?[0-9]\?m||g')";
+ msg="${1//$serviceName /service }";
if [ "$(printf "%b" "$msg" | wc -l)" -gt 0 ]; then
[ -s "$sharedMemoryOutput" ] && memmsg="$(cat "$sharedMemoryOutput")"
logger -t "${packageName:-service} [$$]" "$(printf "%b" "${memmsg}${msg}")"
}
export serviceEnabled forceDNS parallelDL debug allowIDN compressedCache
-export targetDNS bootDelay dlTimeout curlRetry verbosity led dnsInstance
+export targetDNS bootDelay dlTimeout curlRetry verbosity=1 led dnsInstance
export whitelist_domains blacklist_domains
export whitelist_domains_urls blacklist_domains_urls blacklist_hosts_urls
export wan_if wan_gw wanphysdev dl_command serviceStatus dl_flag
. /lib/functions/network.sh
. /usr/share/libubox/jshn.sh
# Prefer curl because it supports the file: scheme.
- if [ -x /usr/bin/curl ] ; then
+ if [ -x /usr/bin/curl ]; then
dl_command="curl --insecure --retry $curlRetry --connect-timeout $dlTimeout --silent"
dl_flag="-o"
- else
+ elif wget -V 2>/dev/null | grep -q "+ssl"; then
dl_command="wget --no-check-certificate --timeout $dlTimeout -q"
dl_flag="-O"
+ else
+ dl_command="uclient-fetch --no-check-certificate --timeout $dlTimeout -q"
+ dl_flag="-O"
fi
led="${led:+/sys/class/leds/$led}"
}
output "$_ERROR_: $serviceName failed to discover WAN gateway.\\n"; return 1;
}
+dnsmasqOps() {
+ local cfg="$1" param="$2"
+ case "$param" in
+ dnsmasq.addnhosts)
+ if [ "$(uci -q get dhcp."$cfg".serversfile)" = "$serversFile" ]; then
+ uci -q del dhcp."$cfg".serversfile
+ fi
+ if ! uci -q get dhcp."$cfg".addnhosts | grep -q "$addnhostsFile"; then
+ uci add_list dhcp."$cfg".addnhosts="$addnhostsFile"
+ fi
+ ;;
+ dnsmasq.conf|dnsmasq.ipset|unbound.adb_list|cleanup)
+ uci -q del_list dhcp."$cfg".addnhosts="$addnhostsFile"
+ if [ "$(uci -q get dhcp."$cfg".serversfile)" = "$serversFile" ]; then
+ uci -q del dhcp."$cfg".serversfile
+ fi
+ ;;
+ dnsmasq.servers)
+ uci -q del_list dhcp."$cfg".addnhosts="$addnhostsFile"
+ if [ "$(uci -q get dhcp."$cfg".serversfile)" != "$serversFile" ]; then
+ uci set dhcp."$cfg".serversfile="$serversFile"
+ fi
+ ;;
+ esac
+}
+
dnsOps() {
- local param output_text
+ local param output_text i
case $1 in
on_start)
if [ ! -s "$outputFile" ]; then
- tmpfs set status "$statusFail"
- tmpfs add error "Error: Failed to create $outputFile file."
- output "$_ERROR_: $serviceName failed to create its data file!\\n"
+ tmpfs set status "statusFail"
+ tmpfs add error "errorOutputFileCreate"
+ output "$_ERROR_: $(getErrorText 'errorOutputFileCreate')!\\n"
return 1
fi
+
+ config_load 'dhcp'
+ if [ "$dnsInstance" = "*" ]; then
+ config_foreach dnsmasqOps 'dnsmasq' "$targetDNS"
+ elif [ -n "$dnsInstance" ]; then
+ for i in $dnsInstance; do
+ dnsmasqOps "@dnsmasq[$i]" "$targetDNS"
+ done
+ fi
+
case "$targetDNS" in
- dnsmasq.addnhosts)
- if [ "$(uci -q get dhcp.@dnsmasq["$dnsInstance"].serversfile)" = "$serversFile" ]; then
- uci -q del dhcp.@dnsmasq["$dnsInstance"].serversfile
- fi
- if ! uci -q get dhcp.@dnsmasq["$dnsInstance"].addnhosts | grep -q "$addnhostsFile"; then
- uci add_list dhcp.@dnsmasq["$dnsInstance"].addnhosts="$addnhostsFile"
- fi
+ dnsmasq.addnhosts|dnsmasq.servers)
param=dnsmasq_hup
output_text='Reloading DNSMASQ'
;;
- dnsmasq.conf)
- uci -q del_list dhcp.@dnsmasq["$dnsInstance"].addnhosts="$addnhostsFile"
- if [ "$(uci -q get dhcp.@dnsmasq["$dnsInstance"].serversfile)" = "$serversFile" ]; then
- uci -q del dhcp.@dnsmasq["$dnsInstance"].serversfile
- fi
- param=dnsmasq_restart
- output_text='Restarting DNSMASQ'
- ;;
- dnsmasq.ipset)
- uci -q del_list dhcp.@dnsmasq["$dnsInstance"].addnhosts="$addnhostsFile"
- if [ "$(uci -q get dhcp.@dnsmasq["$dnsInstance"].serversfile)" = "$serversFile" ]; then
- uci -q del dhcp.@dnsmasq["$dnsInstance"].serversfile
- fi
+ dnsmasq.conf|dnsmasq.ipset)
param=dnsmasq_restart
output_text='Restarting DNSMASQ'
;;
- dnsmasq.servers)
- uci -q del_list dhcp.@dnsmasq["$dnsInstance"].addnhosts="$addnhostsFile"
- if [ "$(uci -q get dhcp.@dnsmasq["$dnsInstance"].serversfile)" != "$serversFile" ]; then
- uci set dhcp.@dnsmasq["$dnsInstance"].serversfile="$serversFile"
- fi
- param=dnsmasq_hup
- output_text='Reloading DNSMASQ'
- ;;
unbound.adb_list)
- uci -q del_list dhcp.@dnsmasq["$dnsInstance"].addnhosts="$addnhostsFile"
- if [ "$(uci -q get dhcp.@dnsmasq["$dnsInstance"].serversfile)" = "$serversFile" ]; then
- uci -q del dhcp.@dnsmasq["$dnsInstance"].serversfile
- fi
param=unbound_restart
output_text='Restarting Unbound'
;;
esac
+
if [ -n "$(uci changes dhcp)" ]; then
uci commit dhcp
if [ "$param" = 'unbound_restart' ]; then
output 2 "$output_text "
tmpfs set message "$output_text"
if eval "$param"; then
- tmpfs set status "$statusSuccess"
+ tmpfs set status "statusSuccess"
led_on "$led"
output_okn
else
output_fail
- tmpfs set status "$statusFail"
- tmpfs add error "Error: $output_text error."
- output "$_ERROR_: $serviceName $output_text error!\\n"
+ tmpfs set status "statusFail"
+ tmpfs add error "errorDNSReload"
+ output "$_ERROR_: $(getErrorText 'errorDNSReload')!\\n"
return 1
fi
;;
}
tmpfs() {
- local action="$1" instance="$2" value="$3"
- local status message error stats
+ local action="$1" instance="$2" value="$3"
+ local status message error stats
local readReload readRestart curReload curRestart ret
if [ -s "$jsonFile" ]; then
status="$(jsonfilter -i $jsonFile -l1 -e "@['data']['status']")"
get)
case "$instance" in
status)
- echo "$status"; return;;
+ printf "%b" "$status"; return;;
message)
- echo "$message"; return;;
+ printf "%b" "$message"; return;;
error)
- echo "$error"; return;;
+ printf "%b" "$error"; return;;
stats)
- echo "$stats"; return;;
+ printf "%b" "$stats"; return;;
triggers)
curReload="$parallelDL $debug $dlTimeout $whitelist_domains $blacklist_domains $whitelist_domains_urls $blacklist_domains_urls $blacklist_hosts_urls $targetDNS"
curRestart="$compressedCache $forceDNS $led"
elif [ "$curRestart" != "$readRestart" ]; then
ret='restart'
fi
- echo "$ret"
+ printf "%b" "$ret"
return;;
esac
;;
status)
[ -n "$status" ] && status="$status $value" || status="$value";;
message)
- [ -n "$message" ] && message="${message} ${value}" || message="$value";;
- error)
+ [ -n "$message" ] && message="$message $value" || message="$value";;
+ error)
[ -n "$error" ] && error="$error $value" || error="$value";;
stats)
[ -n "$stats" ] && stats="$stats $value" || stats="$value";;
esac
;;
esac
- json_init
- json_add_object 'data'
- json_add_string version "$PKG_VERSION"
- json_add_string status "$status"
- json_add_string message "$message"
- json_add_string error "$error"
- json_add_string stats "$stats"
- json_add_string reload "$readReload"
- json_add_string restart "$readRestart"
- json_close_object
- json_dump > "$jsonFile"
+ json_init
+ json_add_object 'data'
+ json_add_string version "$PKG_VERSION"
+ json_add_string status "$status"
+ json_add_string message "$message"
+ json_add_string error "$error"
+ json_add_string stats "$stats"
+ json_add_string reload "$readReload"
+ json_add_string restart "$readRestart"
+ json_close_object
+ json_dump > "$jsonFile"
sync
}
case "$param" in
dns_redirect) uci -q del firewall.simple_adblock_dns_redirect;;
ipset) uci -q del firewall.simple_adblock_ipset
- uci -q del firewall.simple_adblock_ipset_rule;;
+ uci -q del firewall.simple_adblock_ipset_rule;;
*)
uci -q del firewall.simple_adblock_dns_redirect
uci -q del firewall.simple_adblock_ipset
if ! $dl_command "$1" $dl_flag "$R_TMP" 2>/dev/null || [ ! -s "$R_TMP" ]; then
output 1 "$_FAIL_"
output 2 "[DL] $type $label $__FAIL__\\n"
- echo "Error: downloading '${1}'." >> "$sharedMemoryError"
+ echo "errorDownloadingList=${1}" >> "$sharedMemoryError"
else
sed -i "$filter" "$R_TMP"
if [ ! -s "$R_TMP" ]; then
output 1 "$_FAIL_"
output 2 "[DL] $type $label $__FAIL__\\n"
- echo "Error: parsing '${1}'." >> "$sharedMemoryError"
+ echo "errorParsingList=${1}" >> "$sharedMemoryError"
else
cat "${R_TMP}" >> "$D_TMP"
output 1 "$_OK_"
download_lists() {
local hf w_filter j=0 R_TMP
- tmpfs set message "${statusDownloading}..."
+ tmpfs set message "${messageDownloading}..."
+ tmpfs set status "statusDownloading"
+
rm -f "$A_TMP" "$B_TMP" "$outputFile" "$outputCache" "$outputGzip"
if [ "$(awk '/^MemFree/ {print int($2/1000)}' "/proc/meminfo")" -lt 32 ]; then
output 3 'Low free memory, restarting resolver... '
done
fi
wait
- [ -s "$sharedMemoryError" ] && tmpfs add error "$(cat "$sharedMemoryError")"
- rm -f "$sharedMemoryError"
output 1 '\n'
+ if [ -s "$sharedMemoryError" ]; then
+ while IFS= read -r line; do
+ tmpfs add error "$line"
+ done < "$sharedMemoryError"
+ rm -f "$sharedMemoryError"
+ fi
[ -n "$blacklist_domains" ] && for hf in ${blacklist_domains}; do echo "$hf" | sed "$domainsFilter" >> $B_TMP; done
whitelist_domains="${whitelist_domains}
output 1 'Processing downloads '
output 2 'Sorting combined list '
- tmpfs set message "$statusProcessing: sorting combined list"
+ tmpfs set message "$messageProcessing: sorting combined list"
if [ "$allowIDN" -gt 0 ]; then
if sort -u "$B_TMP" > "$A_TMP"; then
output_ok
else
output_failn
- tmpfs add error 'Error: Sorting error.'
+ tmpfs add error "errorSorting"
fi
else
if sort -u "$B_TMP" | grep -E -v '[^a-zA-Z0-9=/.-]' > "$A_TMP"; then
output_ok
else
output_failn
- tmpfs add error 'Error: Sorting error.'
+ tmpfs add error "errorSorting"
fi
fi
if [ "$targetDNS" = 'dnsmasq.conf' ] || \
- [ "$targetDNS" = 'dnsmasq.ipset' ] || \
- [ "$targetDNS" = 'dnsmasq.servers' ] || \
+ [ "$targetDNS" = 'dnsmasq.ipset' ] || \
+ [ "$targetDNS" = 'dnsmasq.servers' ] || \
[ "$targetDNS" = 'unbound.adb_list' ]; then
# TLD optimization written by Dirk Brenken (dev@brenken.org)
output 2 'Optimizing combined list '
- tmpfs set message "$statusProcessing: optimizing combined list"
+ tmpfs set message "$messageProcessing: optimizing combined list"
# sed -E 'G;:t;s/(.*)(\.)(.*)(\n)(.*)/\1\4\5\2\3/;tt;s/(.*)\n(\.)(.*)/\3\2\1/' is actually slower than awk
if awk -F "." '{for(f=NF;f>1;f--)printf "%s.",$f;print $1}' "$A_TMP" > "$B_TMP"; then
if sort "$B_TMP" > "$A_TMP"; then
output_ok
else
output_failn
- tmpfs add error 'Error: Data file optimization.'
+ tmpfs add error "errorOptimization"
mv "$A_TMP" "$B_TMP"
fi
else
output_failn
- tmpfs add error 'Error: Data file optimization.'
+ tmpfs add error "errorOptimization"
fi
else
output_failn
- tmpfs add error 'Error: Data file optimization.'
+ tmpfs add error "errorOptimization"
mv "$A_TMP" "$B_TMP"
fi
else
output_failn
- tmpfs add error 'Error: Data file optimization.'
+ tmpfs add error "errorOptimization"
fi
else
output_failn
- tmpfs add error 'Error: Data file optimization.'
+ tmpfs add error "errorOptimization"
mv "$A_TMP" "$B_TMP"
fi
else
fi
output 2 'Whitelisting domains '
- tmpfs set message "$statusProcessing: whitelisting domains"
+ tmpfs set message "$messageProcessing: whitelisting domains"
if sed -i "$w_filter" "$B_TMP"; then
output_ok
else
output_failn
- tmpfs add error 'Error: Whitelist processing.'
+ tmpfs add error "errorWhitelistProcessing"
fi
output 2 'Formatting merged file '
- tmpfs set message "$statusProcessing: formatting merged file"
+ tmpfs set message "$messageProcessing: formatting merged file"
if [ -z "$outputFilterIPv6" ]; then
if sed "$outputFilter" "$B_TMP" > "$A_TMP"; then
output_ok
else
output_failn
- tmpfs add error 'Error: Data file formatting.'
+ tmpfs add error "errorDataFileFormatting"
fi
else
case "$targetDNS" in
output_ok
else
output_failn
- tmpfs add error 'Error: Data file formatting.'
+ tmpfs add error "errorDataFileFormatting"
fi
;;
esac
case "$targetDNS" in
dnsmasq.addnhosts)
output 2 'Creating DNSMASQ addnhosts file '
- tmpfs set message "$statusProcessing: creating DNSMASQ addnhosts file"
+ tmpfs set message "$messageProcessing: creating DNSMASQ addnhosts file"
;;
dnsmasq.conf)
output 2 'Creating DNSMASQ config file '
- tmpfs set message "$statusProcessing: creating DNSMASQ config file"
+ tmpfs set message "$messageProcessing: creating DNSMASQ config file"
;;
dnsmasq.ipset)
output 2 'Creating DNSMASQ ipset file '
- tmpfs set message "$statusProcessing: creating DNSMASQ ipset file"
+ tmpfs set message "$messageProcessing: creating DNSMASQ ipset file"
;;
dnsmasq.servers)
output 2 'Creating DNSMASQ servers file '
- tmpfs set message "$statusProcessing: creating DNSMASQ servers file"
+ tmpfs set message "$messageProcessing: creating DNSMASQ servers file"
;;
unbound.adb_list)
output 2 'Creating Unbound adb_list file '
- tmpfs set message "$statusProcessing: creating Unbound adb_list file"
+ tmpfs set message "$messageProcessing: creating Unbound adb_list file"
;;
esac
if mv "$A_TMP" "$outputFile"; then
output_ok
else
output_failn
- tmpfs add error "Error: moving data file '${A_TMP}' to '${outputFile}'."
+ tmpfs add error "errorMovingDataFile"
fi
if [ "$compressedCache" -gt 0 ]; then
output 2 'Creating compressed cache '
- tmpfs set message "$statusProcessing: creating compressed cache"
+ tmpfs set message "$messageProcessing: creating compressed cache"
if cacheOps 'createGzip'; then
output_ok
else
output_failn
- tmpfs add error 'Error: creating compressed cache.'
+ tmpfs add error "errorCreatingCompressedCache"
fi
else
rm -f "$outputGzip"
fi
output 2 'Removing temporary files '
- tmpfs set message "$statusProcessing: removing temporary files"
+ tmpfs set message "$messageProcessing: removing temporary files"
rm -f "/tmp/${packageName}_tmp.*" "$A_TMP" "$B_TMP" "$outputCache" || j=1
if [ $j -eq 0 ]; then
output_ok
else
output_failn
- tmpfs add error 'Error: removing temporary files.'
+ tmpfs add error "errorRemovingTempFiles"
fi
output 1 '\n'
}
start_service() {
is_enabled 'on_start' || return 1
- local action status error message stats
+ local action status error message stats c
if ! create_lock; then
output 3 "$serviceName: another instance is starting up "; output_fail
return 0
action='restore'
elif [ "$action" = 'restart' ] || [ "$1" = 'restart' ]; then
action='restart'
- elif [ -s "$outputFile" ] && [ -n "$status" ] && [ -z "$error" ]; then
- if [ "$1" != 'hotplug' ]; then status; fi
+ elif [ -s "$outputFile" ] && [ "$status" = "statusSuccess" ] && [ -z "$error" ]; then
+ [ "$1" != 'hotplug' ] && showstatus
exit 0
else
action='download'
if [ "$action" = 'restore' ]; then
output 0 "Starting $serviceName... "
output 3 "Starting $serviceName...\\n"
- tmpfs set status "$statusStarting"
+ tmpfs set status "statusStarting"
if cacheOps 'testGzip' && ! cacheOps 'test' && [ ! -s "$outputFile" ]; then
output 3 'Found compressed cache file, unpacking it '
tmpfs set message 'found compressed cache file, unpacking it.'
output_okn
else
output_fail
- output "$_ERROR_: $serviceName failed to unpack compressed cache!\\n"
+ tmpfs add error "errorRestoreCompressedCache"
+ output "$_ERROR_: $(getErrorText 'errorRestoreCompressedCache')!\\n"
action='download'
fi
fi
if cacheOps 'restore'; then
output_okn
dnsOps 'on_start'
- else
+ else
output_fail
- output "$_ERROR_: $serviceName failed to move '$outputCache' to '$outputFile'!\\n"
+ tmpfs add error "errorRestoreCache"
+ output "$_ERROR_: $(getErrorText 'errorRestoreCache')!\\n"
action='download'
fi
fi
if [ -s "$outputFile" ] || cacheOps 'test' || cacheOps 'testGzip'; then
output 0 "Force-reloading $serviceName... "
output 3 "Force-reloading $serviceName...\\n"
- tmpfs set status "$statusForceReloading"
+ tmpfs set status "statusForceReloading"
else
output 0 "Starting $serviceName... "
output 3 "Starting $serviceName...\\n"
- tmpfs set status "$statusStarting"
+ tmpfs set status "statusStarting"
fi
download_lists
dnsOps 'on_start'
restart)
output 0 "Restarting $serviceName... "
output 3 "Restarting $serviceName...\\n"
- tmpfs set status "$statusRestarting"
+ tmpfs set status "statusRestarting"
dnsOps 'on_start'
;;
start)
output 0 "Starting $serviceName... "
output 3 "Starting $serviceName...\\n"
- tmpfs set status "$statusStarting"
+ tmpfs set status "statusStarting"
dnsOps 'on_start'
;;
esac
- if [ -s "$outputFile" ] && [ "$(tmpfs get status)" != "$statusFail" ]; then
+ if [ -s "$outputFile" ] && [ "$(tmpfs get status)" != "statusFail" ]; then
output 0 "$__OK__\\n";
- c="$(wc -l < "$outputFile")"
- output 3 "$serviceName is blocking $c domains (with ${targetDNS}) "; output_okn
tmpfs del message
- tmpfs set status "$statusSuccess: $c domains blocked (with ${targetDNS})."
- error="$(tmpfs get error)"
- if [ -n "$error" ]; then
- output "$(str_replace "$error" "Error:" "$_ERROR_:")\\n"
- fi
+ tmpfs set status "statusSuccess"
+ c="$(wc -l < "$outputFile")"
+ tmpfs set stats "$serviceName is blocking $c domains (with ${targetDNS})"
+ showstatus
else
output 0 "$__FAIL__\\n";
- tmpfs set status "$statusFail"
- tmpfs add error 'Error: Failed to create blocklist or restart DNS resolver.'
+ tmpfs set status "statusFail"
+ tmpfs add error "errorOhSnap"
+ showstatus
fi
remove_lock
}
restart() { restart_service; }
reload() { restart_service; }
dl() { rc_procd start_service 'download'; }
-
killcache() {
rm -f "$addnhostsCache" "$addnhostsGzip"
rm -f "$dnsmasqCache" "$dnsmasqGzip"
rm -f "$ipsetCache" "$ipsetGzip"
rm -f "$serversCache" "$serversGzip"
rm -f "$unboundCache" "$unboundGzip"
+ config_load 'dhcp'
+ config_foreach dnsmasqOps 'dnsmasq' 'cleanup'
+ uci -q commit 'dhcp'
return 0
}
-status_service() {
- local status="$(tmpfs get status)" error="$(tmpfs get error)" message="$(tmpfs get message)"
- if [ -n "$status" ] && [ -n "$message" ]; then
- status="${status}: $message"
+show() { showstatus; }
+status_service() { showstatus; }
+showstatus() {
+ local status="$(tmpfs get status)"
+ local message="$(tmpfs get message)"
+ local error="$(tmpfs get error)"
+ local stats="$(tmpfs get stats)"
+ local c url
+ if [ "$status" = "statusSuccess" ]; then
+ output "$stats "; output_okn;
+ else
+ [ -n "$status" ] && status="$(getStatusText "$status")"
+ if [ -n "$status" ] && [ -n "$message" ]; then
+ status="${status}: $message"
+ fi
+ [ -n "$status" ] && output "$serviceName $status\\n"
+ fi
+ if [ -n "$error" ]; then
+ for c in $error; do
+ url="${c##*=}"
+ c="${c%=*}"
+ case "$c" in
+ errorDownloadingList|errorParsingList)
+ output "$_ERROR_: $(getErrorText "$c") $url!\\n";;
+ *)
+ output "$_ERROR_: $(getErrorText "$c")!\\n";;
+ esac
+ let n=n+1
+ done
fi
- [ -n "$status" ] && output "$serviceName $status\\n"
- [ -n "$error" ] && output "$error\\n"
}
stop_service() {
cacheOps 'create'
if dnsOps 'on_stop'; then
led_off "$led"
- output 0 "$__OK__\\n"; output_okn
- tmpfs set status "$statusStopped"
+ output 0 "$__OK__\\n"; output_okn;
+ tmpfs set status "statusStopped"
tmpfs del message
else
- output 0 "$__FAIL__\\n"; output_fail
- tmpfs set status "$statusFail"
- tmpfs add error "Error: error stopping $serviceName."
- output "$_ERROR_: error stopping $serviceName!\\n"
+ output 0 "$__FAIL__\\n"; output_fail;
+ tmpfs set status "statusFail"
+ tmpfs add error "errorStopping"
+ output "$_ERROR_: $(getErrorText 'errorStopping')!\\n"
fi
fi
}
+service_triggers() {
+ procd_add_reload_trigger 'simple-adblock'
+}
+
check() {
load_package_config
local string="$1"
PKG_NAME:=sqm-scripts
PKG_SOURCE_VERSION:=ab763cba8b1516b3afa99760e0ca884f8b8d93b8
PKG_VERSION:=1.4.0
-PKG_RELEASE:=5
+PKG_RELEASE:=6
PKG_SOURCE_PROTO:=git
PKG_SOURCE_URL:=https://github.com/tohojo/sqm-scripts
define Package/sqm-scripts
SECTION:=net
CATEGORY:=Base system
- DEPENDS:=+tc +kmod-sched-core +kmod-sched-cake +kmod-ifb +iptables \
- +iptables-mod-ipopt +iptables-mod-conntrack-extra
+ DEPENDS:=+tc +kmod-sched-core +kmod-ifb +iptables \
+ +iptables-mod-ipopt +iptables-mod-conntrack-extra \
+ +!LINUX_4_14:kmod-sched-cake +LINUX_4_14:kmod-sched-cake-oot
TITLE:=SQM Scripts (QoS)
PKGARCH:=all
endef
PKG_NAME:=strongswan
PKG_VERSION:=5.8.2
-PKG_RELEASE:=2
+PKG_RELEASE:=3
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.bz2
PKG_SOURCE_URL:=https://download.strongswan.org/ https://download2.strongswan.org/
EXTRA_LDFLAGS+= -Wl,-rpath-link,$(STAGING_DIR)/usr/lib
define Package/strongswan/conffiles
-/etc/ipsec.d/
-/etc/ipsec.conf
-/etc/ipsec.secrets
-/etc/ipsec.user
/etc/strongswan.conf
/etc/strongswan.d/
endef
define Package/strongswan/install
$(INSTALL_DIR) $(1)/etc
- $(INSTALL_CONF) $(PKG_INSTALL_DIR)/etc/ipsec.conf $(1)/etc/
$(INSTALL_CONF) $(PKG_INSTALL_DIR)/etc/strongswan.conf $(1)/etc/
$(INSTALL_DIR) $(1)/usr/lib/ipsec
$(CP) $(PKG_INSTALL_DIR)/usr/lib/ipsec/libstrongswan.so.* $(1)/usr/lib/ipsec/
$(INSTALL_BIN) $(PKG_INSTALL_DIR)/usr/sbin/charon-cmd $(1)/usr/sbin/
endef
+define Package/strongswan-ipsec/conffiles
+/etc/ipsec.d/
+/etc/ipsec.conf
+/etc/ipsec.secrets
+/etc/ipsec.user
+endef
+
define Package/strongswan-ipsec/install
- $(INSTALL_DIR) $(1)/usr/sbin
+ $(INSTALL_DIR) $(1)/etc/ $(1)/usr/sbin
+ $(INSTALL_CONF) $(PKG_INSTALL_DIR)/etc/ipsec.conf $(1)/etc/
$(INSTALL_BIN) $(PKG_INSTALL_DIR)/usr/sbin/ipsec $(1)/usr/sbin/
endef
PKG_NAME:=tinyproxy
PKG_VERSION:=1.10.0
-PKG_RELEASE:=1
+PKG_RELEASE:=2
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.xz
PKG_SOURCE_URL:=https://github.com/tinyproxy/tinyproxy/releases/download/$(PKG_VERSION)
# Copyright (C) 2008-2011 OpenWrt.org
START=50
+USE_PROCD=1
CFGFILE=/var/etc/tinyproxy.conf
+PROG=/usr/bin/tinyproxy
section_enabled() {
local enabled
[ "$enabled" -gt 0 ]
}
-start() {
- config_load 'tinyproxy'
- config_foreach start_proxy 'tinyproxy'
-}
-
-stop() {
- service_stop /usr/sbin/tinyproxy
-}
-
-start_proxy() {
- section_enabled "$1" || return 1
-
- mkdir -p /var/etc
- chmod 0755 /var/etc
- {
- echo '### AUTOGENERATED CONFIGURATION'
- echo '### DO NOT EDIT'
- echo '### SEE /etc/config/tinyproxy INSTEAD'
- echo ''
-
- proxy_atom "$1" User
- proxy_atom "$1" Group
- proxy_atom "$1" Port 8888
- proxy_atom "$1" Listen
- proxy_atom "$1" Bind
- proxy_atom "$1" Timeout
-
- proxy_string "$1" ErrorFile_400 "ErrorFile 400"
- proxy_string "$1" ErrorFile_403 "ErrorFile 403"
- proxy_string "$1" ErrorFile_404 "ErrorFile 404"
- proxy_string "$1" ErrorFile_408 "ErrorFile 408"
- proxy_string "$1" ErrorFile_503 "ErrorFile 503"
-
- proxy_string "$1" DefaultErrorFile
- proxy_string "$1" StatHost StatHost 127.0.0.1
- proxy_string "$1" StatFile
- proxy_string "$1" LogFile
-
- proxy_flag "$1" Syslog
-
- proxy_atom "$1" LogLevel
-
- proxy_flag "$1" XTinyproxy
-
- proxy_atom "$1" MaxClients
- proxy_atom "$1" MinSpareServers
- proxy_atom "$1" MaxSpareServers
- proxy_atom "$1" StartServers
- proxy_atom "$1" MaxRequestsPerChild
- proxy_list "$1" Allow
-
- proxy_string "$1" ViaProxyName
- proxy_string "$1" Filter
-
- proxy_flag "$1" FilterURLs
- proxy_flag "$1" FilterExtended
- proxy_flag "$1" FilterCaseSensitive
- proxy_flag "$1" FilterDefaultDeny Yes No
-
- proxy_list "$1" Anonymous '"'
- proxy_list "$1" ConnectPort
-
- config_foreach write_upstream upstream
- } > "$CFGFILE"
-
- service_start /usr/sbin/tinyproxy -c "$CFGFILE"
-}
-
write_upstream() {
local type
local via
done
}
}
+
+start_proxy() {
+ section_enabled "$1" || return 1
+
+ mkdir -p /var/etc
+ chmod 0755 /var/etc
+ {
+ echo '### AUTOGENERATED CONFIGURATION'
+ echo '### DO NOT EDIT'
+ echo '### SEE /etc/config/tinyproxy INSTEAD'
+ echo ''
+
+ proxy_atom "$1" User
+ proxy_atom "$1" Group
+ proxy_atom "$1" Port 8888
+ proxy_atom "$1" Listen
+ proxy_atom "$1" Bind
+ proxy_atom "$1" Timeout
+
+ proxy_string "$1" ErrorFile_400 "ErrorFile 400"
+ proxy_string "$1" ErrorFile_403 "ErrorFile 403"
+ proxy_string "$1" ErrorFile_404 "ErrorFile 404"
+ proxy_string "$1" ErrorFile_408 "ErrorFile 408"
+ proxy_string "$1" ErrorFile_503 "ErrorFile 503"
+
+ proxy_string "$1" DefaultErrorFile
+ proxy_string "$1" StatHost StatHost 127.0.0.1
+ proxy_string "$1" StatFile
+ proxy_string "$1" LogFile
+
+ proxy_flag "$1" Syslog
+
+ proxy_atom "$1" LogLevel
+
+ proxy_flag "$1" XTinyproxy
+
+ proxy_atom "$1" MaxClients
+ proxy_atom "$1" MinSpareServers
+ proxy_atom "$1" MaxSpareServers
+ proxy_atom "$1" StartServers
+ proxy_atom "$1" MaxRequestsPerChild
+ proxy_list "$1" Allow
+
+ proxy_string "$1" ViaProxyName
+ proxy_string "$1" Filter
+
+ proxy_flag "$1" FilterURLs
+ proxy_flag "$1" FilterExtended
+ proxy_flag "$1" FilterCaseSensitive
+ proxy_flag "$1" FilterDefaultDeny Yes No
+
+ proxy_list "$1" Anonymous '"'
+ proxy_list "$1" ConnectPort
+
+ config_foreach write_upstream upstream
+ } > "$CFGFILE"
+
+ procd_open_instance
+ procd_set_param command "$PROG"
+ procd_append_param command -c "$CFGFILE"
+ procd_append_param command -d
+ procd_close_instance
+}
+
+start_service() {
+ config_load 'tinyproxy'
+ config_foreach start_proxy 'tinyproxy'
+}
PKG_NAME:=vpn-policy-routing
PKG_VERSION:=0.2.1
-PKG_RELEASE:=3
+PKG_RELEASE:=7
PKG_LICENSE:=GPL-3.0-or-later
PKG_MAINTAINER:=Stan Grishin <stangri@melmac.net>
|Advanced|supported_interface|list/string||Allows to specify the space-separated list of interface names (in lower case) to be explicitly supported by the ```vpn-policy-routing``` service. Can be useful if your OpenVPN tunnels have dev option other than tun\* or tap\*.|
|Advanced|ignored_interface|list/string||Allows to specify the space-separated list of interface names (in lower case) to be ignored by the ```vpn-policy-routing``` service. Can be useful if running both VPN server and VPN client on the router.|
|Advanced|boot_timeout|number|30|Allows to specify the time (in seconds) for ```vpn-policy-routing``` service to wait for WAN gateway discovery on boot. Can be useful on devices with ADSL modem built in.|
-|Advanced|iptables_rule_option|append/insert|append|Allows to specify the iptables parameter for rules: ```-A``` for ```append``` and ```-I``` for ```insert```. Append is generally speaking more compatible with other packages/firewall rules. Recommended to change to ```insert``` only to improve compatibility with the ```mwan3``` package.|
+|Advanced|iptables_rule_option|append/insert|append|Allows to specify the iptables parameter for rules: ```-A``` for ```append``` and ```-I``` for ```insert```. Append is generally speaking more compatible with other packages/firewall rules. Recommended to change to ```insert``` only to enable compatibility with the ```mwan3``` package.|
|Advanced|iprule_enabled|boolean|0|Add an ```ip rule```, not an ```iptables``` entry for policies with just the local address. Use with caution to manipulate policies priorities.|
|Advanced|icmp_interface|string||Set the default ICMP protocol interface (interface name in lower case). Use with caution.|
|Advanced|append_src_rules|string||Append local IP Tables rules. Can be used to exclude local IP addresses from destinations for policies with local address set.|
|src_port||List of space-separated local/source ports or port-ranges.|
|dest_addr||List of space-separated remote/target IP addresses, CIDRs or hostnames/domain names.|
|dest_port||List of space-separated remote/target ports or port-ranges.|
-|proto|all|Policy protocol, can be any valid protocol from ```/etc/protocols``` for CLI/uci or can be selected from the values set in ```webui_supported_protocol```. To display the ```Protocol``` column for policies in the WebUI, make sure to select ```Enabled``` for ```Show Protocol Column``` in the ```Web UI``` tab.|
+|proto|auto|Policy protocol, can be any valid protocol from ```/etc/protocols``` for CLI/uci or can be selected from the values set in ```webui_supported_protocol```. To display the ```Protocol``` column for policies in the WebUI, make sure to select ```Enabled``` for ```Show Protocol Column``` in the ```Web UI``` tab.<br/>Special cases: ```auto``` will try to intelligently insert protocol-agnostic policy and fall back to TCP/UDP if the protocol must be selected for specific policy; ```all``` will always insert a protocol-agnostic policy (which may fail depending on the policy).|
|chain|PREROUTING|Policy chain, can be either ```PREROUTING```, ```FORWARDING```, ```INPUT``` or ```OUTPUT```. This setting is case-sensitive. To display the ```Chain``` column for policies in the WebUI, make sure to select ```Enabled``` for ```Show Chain Column``` in the ```Web UI``` tab.|
### Custom User Files Include Options
config policy
option name 'Wireguard Server'
option interface 'wan'
- option proto 'tcp'
+ option proto 'udp'
option src_port '61820'
option chain 'OUTPUT'
```
Some browsers, like [Mozilla Firefox](https://support.mozilla.org/en-US/kb/firefox-dns-over-https#w_about-dns-over-https) or [Google Chrome/Chromium](https://blog.chromium.org/2019/09/experimenting-with-same-provider-dns.html) have [DNS-over-HTTPS proxy](https://en.wikipedia.org/wiki/DNS_over_HTTPS) built-in. Their requests to web-sites cannot be affected if the ```dnsmasq.ipset``` is set for the ```dest_ipset``` option. To fix this, you can try either of the following:
- 1. Disable the DNS-over-HTTPS support in your browser and use the OpenWrt's [net/https-dns-proxy](https://github.com/openwrt/packages/tree/master/net/https-dns-proxy) package and set it up either [manually](https://openwrt.org/docs/guide-user/services/dns/doh_dnsmasq_https-dns-proxy?s[]=https&s[]=dns&s[]=proxy) or auto-magically with [https-dns-proxy luci app](https://github.com/openwrt/luci/tree/master/applications/luci-app-https_dns_proxy). You can then continue to use ```dnsmasq.ipset``` setting for the ```dest_ipset``` in VPN Policy Routing.
+ 1. Disable the DNS-over-HTTPS support in your browser and use the OpenWrt's [net/https-dns-proxy](https://github.com/openwrt/packages/tree/master/net/https-dns-proxy) package with optional [https-dns-proxy luci app](https://github.com/openwrt/luci/tree/master/applications/luci-app-https_dns_proxy). You can then continue to use ```dnsmasq.ipset``` setting for the ```dest_ipset``` in VPN Policy Routing.
2. Continue using DNS-over-HTTPS in your browser (which, by the way, also limits your options for router-level AdBlocking as described [in ```dnsmasq.ipset``` option description here](https://github.com/openwrt/packages/tree/master/net/simple-adblock/files#dns-resolution-option)), you than would either have to disable the ```dest_ipset``` or switch it to ```ipset```. Please note, you will lose all the benefits of [```dnsmasq.ipset```](#use-dnsmasq-ipset) option.
## Thanks
-I'd like to thank everyone who helped create, test and troubleshoot this service. Without contributions from [@hnyman](https://github.com/hnyman), [@dibdot](https://github.com/dibdot), [@danrl](https://github.com/danrl), [@tohojo](https://github.com/tohojo), [@cybrnook](https://github.com/cybrnook), [@nidstigator](https://github.com/nidstigator), [@AndreBL](https://github.com/AndreBL) and [@dz0ny](https://github.com/dz0ny) and rigorous testing/bugreporting by [@dziny](https://github.com/dziny), [@bluenote73](https://github.com/bluenote73), [@buckaroo](https://github.com/pgera), [@Alexander-r](https://github.com/Alexander-r), [n8v8R](https://github.com/n8v8R) and [psherman](https://forum.openwrt.org/u/psherman) it wouldn't have been possible. Wireguard/IPv6 support is courtesy of [Mullvad](https://www.mullvad.net), [IVPN](https://www.ivpn.net/) and [WireVPN](https://www.wirevpn.net).
+I'd like to thank everyone who helped create, test and troubleshoot this service. Without contributions from [@hnyman](https://github.com/hnyman), [@dibdot](https://github.com/dibdot), [@danrl](https://github.com/danrl), [@tohojo](https://github.com/tohojo), [@cybrnook](https://github.com/cybrnook), [@nidstigator](https://github.com/nidstigator), [@AndreBL](https://github.com/AndreBL), [@dz0ny](https://github.com/dz0ny), rigorous testing/bugreporting by [@dziny](https://github.com/dziny), [@bluenote73](https://github.com/bluenote73), [@buckaroo](https://github.com/pgera), [@Alexander-r](https://github.com/Alexander-r), [n8v8R](https://github.com/n8v8R), [psherman](https://forum.openwrt.org/u/psherman), multiple contributions from [dl12345](https://github.com/dl12345), [trendy](https://forum.openwrt.org/u/trendy) and feedback from other OpenWrt users it wouldn't have been possible. Wireguard/IPv6 support is courtesy of [Mullvad](https://www.mullvad.net), [IVPN](https://www.ivpn.net/) and [WireVPN](https://www.wirevpn.net).
#!/bin/sh /etc/rc.common
# Copyright 2017-2019 Stan Grishin (stangri@melmac.net)
-# shellcheck disable=SC2039
-# shellcheck disable=SC1091
+# shellcheck disable=SC2039,SC1091,SC2018,SC2019
PKG_VERSION='dev-test'
export START=94
# str_contains() { [ "$1" != "$(str_replace "$1" "$2" "")" ]; }
# shellcheck disable=SC2018,SC2019
str_to_lower() { echo "$1" | tr 'A-Z' 'a-z'; }
+str_extras_to_underscore() { echo "$1" | tr '[\. ~`!@#$%^&*()\+/,<>?//;:]' '_'; }
+str_extras_to_space() { echo "$1" | tr ';{}' ' '; }
+
output() {
# Can take a single parameter (text) to be output at any verbosity
# Or target verbosity level and text to be output at specifc verbosity
else
if [[ -z "$appendix" && -z "$remoteIpset" ]] || \
[[ -n "$appendix" && "$localIpset" -eq 0 ]]; then
- return 1
+ return 1
fi
fi
[ -n "$lport" ] && param="$param -p tcp -m multiport --sport ${lport//-/:}"
[ -n "$raddr" ] && param="$param -d $raddr"
[ -n "$rport" ] && param="$param -p $proto -m multiport --dport ${rport//-/:}"
- [ -n "$comment" ] && param="$param -m comment --comment $(echo "$comment" | tr '[\. ~`!@#$%^&*()\+/,<>?//;:]' '_')"
+ [ -n "$comment" ] && param="$param -m comment --comment $(str_extras_to_underscore "$comment")"
# Here be dragons
return 0
}
return 0
fi
- if [ -z "$proto" ] || [ "$proto" = 'all' ]; then
- if [ -z "${lport}${raddr}${rport}" ] && [ -n "$laddr" ]; then
- proto='all'
- elif [ -z "${laddr}${lport}${rport}" ] && [ -n "$raddr" ]; then
- proto='all'
- elif [ -n "$lport" ] || [ -n "$rport" ]; then
+ if [ -z "$proto" ]; then
+ if [ -n "$lport" ] || [ -n "$rport" ]; then
proto='tcp udp'
else
- proto='tcp'
+ proto='all'
fi
fi
param="$param -m multiport $valueNeg --dport ${value//-/:}"
fi
- [ -n "$comment" ] && param="$param -m comment --comment $(echo "$comment" | tr '[\. ~`!@#$%^&*()\+/,<>?//;:]' '_')"
+ [ -n "$comment" ] && param="$param -m comment --comment $(str_extras_to_underscore "$comment")"
ipt "$param" || processPolicyError="${processPolicyError}${_ERROR_}: iptables $param\\n"
done
return 0
r_process_policy(){
local comment="$1" iface="$2" laddr="$3" lport="$4" raddr="$5" rport="$6" proto="$7" chain="$8" resolved_laddr resolved_raddr i ipsFailFlag
if [ "${laddr//[ ;\{\}]/}" != "$laddr" ]; then
- for i in $(echo "$laddr" | tr ';{}' ' '); do [ -n "$i" ] && r_process_policy "$comment" "$iface" "$i" "$lport" "$raddr" "$rport" "$proto" "$chain"; done
+ for i in $(str_extras_to_space "$laddr"); do [ -n "$i" ] && r_process_policy "$comment" "$iface" "$i" "$lport" "$raddr" "$rport" "$proto" "$chain"; done
return 0
elif [ "${lport//[ ;\{\}]/}" != "$lport" ]; then
- for i in $(echo "$lport" | tr ';{}' ' '); do [ -n "$i" ] && r_process_policy "$comment" "$iface" "$laddr" "$i" "$raddr" "$rport" "$proto" "$chain"; done
+ for i in $(str_extras_to_space "$lport"); do [ -n "$i" ] && r_process_policy "$comment" "$iface" "$laddr" "$i" "$raddr" "$rport" "$proto" "$chain"; done
return 0
elif [ "${raddr//[ ;\{\}]/}" != "$raddr" ]; then
- for i in $(echo "$raddr" | tr ';{}' ' '); do [ -n "$i" ] && r_process_policy "$comment" "$iface" "$laddr" "$lport" "$i" "$rport" "$proto" "$chain"; done
+ for i in $(str_extras_to_space "$raddr"); do [ -n "$i" ] && r_process_policy "$comment" "$iface" "$laddr" "$lport" "$i" "$rport" "$proto" "$chain"; done
return 0
elif [ "${rport//[ ;\{\}]/}" != "$rport" ]; then
- for i in $(echo "$rport" | tr ';{}' ' '); do [ -n "$i" ] && r_process_policy "$comment" "$iface" "$laddr" "$lport" "$raddr" "$i" "$proto" "$chain"; done
+ for i in $(str_extras_to_space "$rport"); do [ -n "$i" ] && r_process_policy "$comment" "$iface" "$laddr" "$lport" "$raddr" "$i" "$proto" "$chain"; done
return 0
fi
config_get_bool enabled "$1" 'enabled' 1
[ "$enabled" -gt 0 ] || return 0
+ proto="$(str_to_lower "$proto")"
[ "$proto" = 'auto' ] && unset proto
- [ "$proto" = 'AUTO' ] && unset proto
comment="${comment:-$name}"
output 2 "Routing '$comment' via $iface "
if [ -z "$comment" ]; then
errorSummary="${errorSummary}${_ERROR_}: Policy name is empty\\n"
output_fail; return 1;
- fi
+ fi
if [ -z "${laddr}${lport}${raddr}${rport}" ]; then
errorSummary="${errorSummary}${_ERROR_}: Policy '$comment' missing all IPs/ports\\n"
output_fail; return 1;
fi
done
ip -4 route flush cache || ipv4_error=1
- ip -4 rule add fwmark "$mark" table "$tid" || ipv4_error=1
+ ip -4 rule add fwmark "${mark}/${fwMask}" table "$tid" || ipv4_error=1
fi
if [ "$ipv6Enabled" -ne 0 ]; then
done
fi
ip -6 route flush cache || ipv6_error=1
- ip -6 rule add fwmark "$mark" table "$tid" || ipv6_error=1
+ ip -6 rule add fwmark "${mark}/${fwMask}" table "$tid" || ipv6_error=1
fi
fi
done
if [ "$modprobeStatus" -gt 0 ] && ! is_chaos_calmer; then
- errorSummary="${errorSummary}${_ERROR_}: Failed to load kernel modules\\n"
+ errorSummary="${errorSummary}${_ERROR_}: Failed to load kernel modules\\n"
fi
for i in PREROUTING FORWARD INPUT OUTPUT; do
[ "$dnsmasqNewHash" != "$dnsmasqStoredHash" ] && dnsmasq_restart
if [ -z "$gatewaySummary" ]; then
- errorSummary="${errorSummary}${_ERROR_}: failed to set up any gateway\\n"
+ errorSummary="${errorSummary}${_ERROR_}: failed to set up any gateway\\n"
else
output "$serviceName started with gateways:\\n${gatewaySummary}"
[ -n "$errorSummary" ] && output "${errorSummary}"
}
input() { local data; while read -r data; do echo "$data" | tee -a /var/${packageName}-support; done; }
+status_service() { support "$@"; }
support() {
local dist vers out id s param status set_d set_p tableCount i=0 dev dev6
is_enabled
include $(TOPDIR)/rules.mk
PKG_NAME:=wsdd2
-PKG_RELEASE:=5
+PKG_RELEASE:=6
PKG_SOURCE_PROTO:=git
PKG_SOURCE_URL:=https://github.com/Andy2244/wsdd2.git
SMB_CONF=""
BIND_IF_PARM=""
-NB_PARM=""
-WG_PARM=""
+NB_PARM="$(cat /proc/sys/kernel/hostname)"
+WG_PARM="WORKGROUP"
BI_PARM=""
start_service() {
SMB_CONF="/etc/ksmbd/smb.conf"
fi
- if [ -e /etc/samba/smb.conf ]; then
+ [ -e /etc/samba/smb.conf ] && {
if [ -e /etc/init.d/samba4 ] && /etc/init.d/samba4 running; then
SMB_CONF="/etc/samba/smb.conf"
elif [ -e /etc/init.d/samba ] && /etc/init.d/samba running; then
SMB_CONF="/etc/samba/smb.conf"
fi
- fi
-
- if [ -z "$SMB_CONF" ]; then
+ }
+
+ [ -z "$SMB_CONF" ] && {
logger -p daemon.error -t 'wsdd2' "samba36/4 or ksmbd is not running, can't start wsdd2!"
exit 1
- fi
-
+ }
+
local nb_name
nb_name="$(grep -i 'netbios name' $SMB_CONF | awk -F'=' '{print $2}' | tr -d ' \n')"
- if [ -n "$nb_name" ]; then
- NB_PARM="-N $nb_name"
- else
- local hostname
- hostname="$(cat /proc/sys/kernel/hostname)"
- NB_PARM="-N $hostname"
- fi
+ [ -n "$nb_name" ] && NB_PARM="$nb_name"
local wg_name
wg_name="$(grep -i 'workgroup' $SMB_CONF | awk -F'=' '{print $2}' | tr -d ' \n')"
- if [ -n "$wg_name" ]; then
- WG_PARM="-G $wg_name"
- else
- WG_PARM="-G WORKGROUP"
- fi
+ [ -n "$wg_name" ] && WG_PARM="$wg_name"
# resolve lan interface (BUG: No multi-interface binds atm)
local ifname
- if network_get_device ifname lan; then
- BIND_IF_PARM="-i $ifname"
- fi
+ network_get_device ifname lan
local board_vendor
local board_model
local board_sku
- if [ -e /tmp/sysinfo/board_name ]; then
- board_vendor="$(cat /tmp/sysinfo/board_name | awk -F',' '{print $1}' | tr ' ' '_' | tr -d ' \n')"
- board_sku="$(cat /tmp/sysinfo/board_name | awk -F',' '{print $2}' | tr ' ' '_' | tr -d ' \n')"
- fi
- if [ -e /tmp/sysinfo/model ]; then
- board_model="$(cat /tmp/sysinfo/model | tr ' ' '_' | tr -d ' \n')"
- fi
- if [ -n "$board_vendor" ] && [ -n "$board_model" ]; then
+ [ -e /tmp/sysinfo/board_name ] && {
+ board_vendor="$(awk -F',' '{print $1}' /tmp/sysinfo/board_name | tr ' ' '_' | tr -d ' \n')"
+ board_sku="$(awk -F',' '{print $2}' /tmp/sysinfo/board_name | tr ' ' '_' | tr -d ' \n')"
+ }
+
+ [ -e /tmp/sysinfo/model ] && {
+ board_model="$(awk -F':' '{print $1}' /tmp/sysinfo/model | tr ' ' '_' | tr -d ' \n')"
+ }
+
+ [ -n "$board_vendor" ] && [ -n "$board_model" ] && {
if [ -n "$board_sku" ]; then
- BI_PARM="-b vendor:$board_vendor,model:$board_model,sku:$board_sku"
+ BI_PARM="vendor:$board_vendor,model:$board_model,sku:$board_sku"
else
- BI_PARM="-b vendor:$board_vendor,model:$board_model"
+ BI_PARM="vendor:$board_vendor,model:$board_model"
fi
- fi
+ }
procd_open_instance
- procd_set_param command /usr/bin/wsdd2 -w $BIND_IF_PARM $NB_PARM $WG_PARM $BI_PARM
- procd_set_param respawn
- procd_set_param file $SMB_CONF
- procd_close_instance
+ procd_set_param command /usr/bin/wsdd2
+ [ -n "$ifname" ] && procd_append_param command -i "$ifname"
+ procd_append_param command -N "$NB_PARM"
+ procd_append_param command -G "$WG_PARM"
+ procd_append_param command -b "$BI_PARM"
+ procd_set_param respawn
+ procd_set_param file "$SMB_CONF"
+ procd_close_instance
}
service_triggers() {
PKG_NAME:=zerotier
PKG_VERSION:=1.4.6
-PKG_RELEASE:=2
+PKG_RELEASE:=3
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
PKG_SOURCE_URL:=https://codeload.github.com/zerotier/ZeroTierOne/tar.gz/$(PKG_VERSION)?
--- /dev/null
+--- a/osdep/OSUtils.hpp
++++ b/osdep/OSUtils.hpp
+@@ -37,9 +37,6 @@
+ #include <sys/time.h>
+ #include <sys/stat.h>
+ #include <arpa/inet.h>
+-#ifdef __LINUX__
+-#include <sys/syscall.h>
+-#endif
+ #endif
+
+ #ifndef OMIT_JSON_SUPPORT
+@@ -211,11 +208,8 @@ public:
+ return (int64_t)( ((tmp.QuadPart - 116444736000000000LL) / 10000L) + st.wMilliseconds );
+ #else
+ struct timeval tv;
+-#ifdef __LINUX__
+- syscall(SYS_gettimeofday,&tv,0); /* fix for musl libc broken gettimeofday bug */
+-#else
+ gettimeofday(&tv,(struct timezone *)0);
+-#endif
++
+ return ( (1000LL * (int64_t)tv.tv_sec) + (int64_t)(tv.tv_usec / 1000) );
+ #endif
+ };
PKG_NAME:=forked-daapd
PKG_VERSION:=27.1
-PKG_RELEASE:=1
+PKG_RELEASE:=2
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.xz
PKG_SOURCE_URL:=https://github.com/ejurgensen/$(PKG_NAME)/releases/download/$(PKG_VERSION)/
--- /dev/null
+--- a/src/websocket.c
++++ b/src/websocket.c
+@@ -25,6 +25,7 @@
+ #ifdef HAVE_PTHREAD_NP_H
+ # include <pthread_np.h>
+ #endif
++#include <errno.h>
+ #include <stdbool.h>
+ #include <stdio.h>
+ #include <string.h>
PKG_NAME:=gammu
PKG_VERSION:=1.41.0
-PKG_RELEASE:=2
+PKG_RELEASE:=3
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.xz
PKG_SOURCE_URL:=https://dl.cihar.com/gammu/releases
PKG_BUILD_PARALLEL:=1
include $(INCLUDE_DIR)/package.mk
-include $(INCLUDE_DIR)/nls.mk
include $(INCLUDE_DIR)/cmake.mk
define Package/gammu
CATEGORY:=Utilities
TITLE:=Cell phone/modem SMS and control tool
URL:=https://wammu.eu/gammu/
- DEPENDS:=+libpthread +libcurl +glib2 $(ICONV_DEPENDS) $(INTL_DEPENDS)
+ DEPENDS:=+libpthread +libcurl +glib2
DEPENDS+=+PACKAGE_bluez-libs:bluez-libs
DEPENDS+=+PACKAGE_libmariadb:libmariadb +PACKAGE_unixodbc:unixodbc +PACKAGE_libpq:libpq
DEPENDS+=+PACKAGE_libusb-1.0:libusb-1.0
endef
-CMAKE_OPTIONS += -DWITH_LibDBI:BOOL=OFF
-ifneq ($(INTL_FULL),)
-CMAKE_OPTIONS += -DLIBINTL_INCLUDE_DIRS:PATH=$(INTL_PREFIX)/include
-CMAKE_OPTIONS += -DLIBINTL_LIBRARIES:STRING=intl
-endif
-ifneq ($(ICONV_FULL),)
-CMAKE_OPTIONS += -DDISABLE_ICONV=OFF
-CMAKE_OPTIONS += -DICONV_INCLUDE_DIRS:PATH=$(ICONV_PREFIX)/include
-CMAKE_OPTIONS += -DICONV_LIBRARIES:STRING=iconv
-endif
+CMAKE_OPTIONS += \
+ -DWITH_LibDBI:BOOL=OFF
define Package/gammu/install
$(INSTALL_DIR) $(1)/usr/bin
local conffile="$CONFPFX-$conf.conf"
config_get_bool item "$conf" enabled 0
- [ $item = 0 ] && return
+ [ "$item" = 0 ] && return
- echo -e "; don't edit. instead look into /etc/config/gammu\n[gammu]\n" > $conffile
+ {
+ printf "; don't edit. instead look into /etc/config/gammu\n[gammu]\n\n"
config_get item "$conf" device
[ -z "$item" ] && return
- echo "device = $item" >> $conffile
+ echo "device = $item"
config_get item "$conf" connection
[ -z "$item" ] && return
- echo "connection = $item" >> $conffile
+ echo "connection = $item"
config_get item "$conf" logfile
[ -z "$item" ] && item=syslog
- echo "logfile = $item" >> $conffile
+ echo "logfile = $item"
- echo -e "\n[smsd]\nphoneid = $conf\nservice = files\nlogfile = $item" >> $conffile
- echo -e "checkbattery = 0\nhangupcalls = 1" >> $conffile
+ printf "\n[smsd]\nphoneid = %s\nservice = files\nlogfile = %s\n" "$conf" "$item"
+ printf "checkbattery = 0\nhangupcalls = 1\n"
config_get item "$conf" pin
- [ -n "$item" ] && echo "pin = $item" >> $conffile
-
+ [ -n "$item" ] && echo "pin = $item"
config_get item "$conf" phonecode
- [ -n "$item" ] && echo "phonecode = $item" >> $conffile
+ [ -n "$item" ] && echo "phonecode = $item"
config_get item "$conf" send
- [ -n "$item" ] && echo "send = $item" >> $conffile
+ [ -n "$item" ] && echo "send = $item"
config_get item "$conf" receive
- [ -n "$item" ] && echo "receive = $item" >> $conffile
+ [ -n "$item" ] && echo "receive = $item"
config_get item "$conf" timeout
- [ -n "$item" ] && echo "commtimeout = $item" >> $conffile
+ [ -n "$item" ] && echo "commtimeout = $item"
config_get item "$conf" sleep
- [ -n "$item" ] && echo "loopsleep = $item" >> $conffile
+ [ -n "$item" ] && echo "loopsleep = $item"
config_get item "$conf" report
- [ -n "$item" ] && echo "deliveryreport = $item" >> $conffile
+ [ -n "$item" ] && echo "deliveryreport = $item"
config_get item "$conf" onreceive
- [ -n "$item" ] && echo "runonreceive = $item" >> $conffile
+ [ -n "$item" ] && echo "runonreceive = $item"
config_get item "$conf" onerror
- [ -n "$item" ] && echo "runonfailure = $item" >> $conffile
+ [ -n "$item" ] && echo "runonfailure = $item"
config_get item "$conf" spool
[ -z "$item" ] && item=/var/sms
- [ ! -d $item ] && {
- mkdir $item
- mkdir $item/inbox
- mkdir $item/outbox
- mkdir $item/sent
- mkdir $item/error
+ [ -d "$item" ] || {
+ mkdir "$item"
+ mkdir "$item/inbox"
+ mkdir "$item/outbox"
+ mkdir "$item/sent"
+ mkdir "$item/error"
}
- echo -e "inboxpath = $item/inbox/\noutboxpath = $item/outbox/" >> $conffile
- echo -e "sentsmspath = $item/sent/\nerrorsmspath = $item/error/" >> $conffile
+ printf "inboxpath = %s/inbox/\noutboxpath = %s/outbox/\n" "$item" "$item"
+ printf "sentsmspath = %s/sent/\nerrorsmspath = %s/error/\n" "$item" "$item"
+ } > "$conffile"
config_get item "$conf" allow
[ -n "$item" ] && {
local data
local allowfile=$CONFPFX-$conf.allow
for data in $item ; do
- echo $data >> $allowfile
+ echo $data >> "$allowfile"
done
- echo "includenumbersfile = $allowfile" >> $conffile
+ echo "includenumbersfile = $allowfile" >> "$conffile"
}
config_get item "$conf" deny
local data
local denyfile=$CONFPFX-$conf.deny
for data in $item ; do
- echo $data >> $denyfile
+ echo $data >> "$denyfile"
done
- echo "excludenumbersfile = $denyfile" >> $conffile
+ echo "excludenumbersfile = $denyfile" >> "$conffile"
}
procd_open_instance
config_get_bool item "$conf" respawn 0
- [ $item != 0 ] && procd_set_param respawn
+ [ "$item" = 0 ] || procd_set_param respawn
- procd_set_param command $PROG --config=$conffile
+ procd_set_param command $PROG --config="$conffile"
config_get item "$conf" pidfile
[ -n "$item" ] && procd_append_param command --pid=$item
+++ /dev/null
---- a/cmake/FindIconv.cmake
-+++ b/cmake/FindIconv.cmake
-@@ -9,10 +9,10 @@
- include(CheckCCompilerFlag)
- include(CheckCSourceCompiles)
-
--IF (ICONV_INCLUDE_DIR AND ICONV_LIBRARIES)
-+IF(NOT DISABLE_ICONV AND ICONV_INCLUDE_DIR AND ICONV_LIBRARIES)
- # Already in cache, be silent
- SET(ICONV_FIND_QUIETLY TRUE)
--ENDIF (ICONV_INCLUDE_DIR AND ICONV_LIBRARIES)
-+ENDIF(NOT DISABLE_ICONV AND ICONV_INCLUDE_DIR AND ICONV_LIBRARIES)
-
- IF(APPLE)
- FIND_PATH(ICONV_INCLUDE_DIR iconv.h
---- a/configure
-+++ b/configure
-@@ -33,6 +33,7 @@ Usage: ./configure [options]
- --enable-backup enable backup support
- --enable-win32 enable mingw crosscomilation
- --enable-protection enable compile time protections
-+--disable-iconv disable iconv support
- --without-gnapplet disable installation of gnapplet
- --without-completion disable installation of bash completion script
-
-@@ -57,6 +58,7 @@ CMAKE_CROSS=
- CMAKE_PROTECTION=
- CMAKE_GNAP=
- CMAKE_COMPLETE=
-+CMAKE_ICONV=
-
- # process command line
- while [ "$#" -gt 0 ] ; do
-@@ -91,6 +93,12 @@ while [ "$#" -gt 0 ] ; do
- --disable-protection)
- CMAKE_PROTECTION="-DENABLE_PROTECTION=OFF"
- ;;
-+ --enable-iconv)
-+ CMAKE_ICONV="-DDISABLE_ICONV=OFF"
-+ ;;
-+ --disable-iconv)
-+ CMAKE_ICONV="-DDISABLE_ICONV=ON"
-+ ;;
- --enable-debug)
- CMAKE_DEBUG="-DCMAKE_BUILD_TYPE=Debug"
- ;;
-@@ -139,4 +147,4 @@ fi
- cd "$BUILD_DIR"
-
- # invoke cmake to do configuration
--cmake $SOURCE_DIR $CMAKE_PREFIX $CMAKE_SHARED $CMAKE_DEBUG $CMAKE_BACKUP $CMAKE_CROSS $CMAKE_PROTECTION $CMAKE_GNAP $CMAKE_COMPLETE
-+cmake $SOURCE_DIR $CMAKE_PREFIX $CMAKE_SHARED $CMAKE_DEBUG $CMAKE_BACKUP $CMAKE_CROSS $CMAKE_PROTECTION $CMAKE_GNAP $CMAKE_COMPLETE $CMAKE_ICONV
+++ /dev/null
---- /dev/null
-+++ b/cmake/Toolchain-cross.cmake
-@@ -0,0 +1,5 @@
-+# search for programs in the build host directories
-+SET(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
-+# for libraries and headers in the target directories
-+SET(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)
-+SET(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)
---- a/configure
-+++ b/configure
-@@ -27,6 +27,7 @@ Usage: ./configure [options]
-
- --help|-h shows this help
- --prefix=<path> installation prefix
-+--cross-root=<path> cross-compilation prefix
- --enable-shared enables shared build
- --enable-debug enables debug build
- --enable-tiger enables Mac OS X 10.4 (Tiger) build
-@@ -51,6 +52,7 @@ BUILD_DIR="$SOURCE_DIR/build-configure"
-
- # cmake parameters
- CMAKE_PREFIX=
-+CMAKE_ROOT=
- CMAKE_SHARED=
- CMAKE_DEBUG=
- CMAKE_BACKUP=
-@@ -69,6 +71,10 @@ while [ "$#" -gt 0 ] ; do
- --prefix=*)
- CMAKE_PREFIX="-DCMAKE_INSTALL_PREFIX=${1##--prefix=}"
- ;;
-+ --cross-root=*)
-+ CMAKE_ROOT="-DCMAKE_TOOLCHAIN_FILE=$SOURCE_DIR/cmake/Toolchain-cross.cmake"
-+ echo "SET(CMAKE_FIND_ROOT_PATH ${1##--cross-root=})" >> $SOURCE_DIR/cmake/Toolchain-cross.cmake
-+ ;;
- --enable-backup)
- CMAKE_BACKUP="-DWITH_BACKUP=ON"
- ;;
-@@ -147,4 +153,4 @@ fi
- cd "$BUILD_DIR"
-
- # invoke cmake to do configuration
--cmake $SOURCE_DIR $CMAKE_PREFIX $CMAKE_SHARED $CMAKE_DEBUG $CMAKE_BACKUP $CMAKE_CROSS $CMAKE_PROTECTION $CMAKE_GNAP $CMAKE_COMPLETE $CMAKE_ICONV
-+cmake $SOURCE_DIR $CMAKE_ROOT $CMAKE_PREFIX $CMAKE_SHARED $CMAKE_DEBUG $CMAKE_BACKUP $CMAKE_CROSS $CMAKE_PROTECTION $CMAKE_GNAP $CMAKE_COMPLETE $CMAKE_ICONV
+++ /dev/null
---- a/configure
-+++ b/configure
-@@ -37,6 +37,7 @@ Usage: ./configure [options]
- --disable-iconv disable iconv support
- --without-gnapplet disable installation of gnapplet
- --without-completion disable installation of bash completion script
-+--without-libdbi disable libdbi support
-
- All enable params have their disable counterparts.
-
-@@ -61,6 +62,7 @@ CMAKE_PROTECTION=
- CMAKE_GNAP=
- CMAKE_COMPLETE=
- CMAKE_ICONV=
-+CMAKE_LIBDBI=
-
- # process command line
- while [ "$#" -gt 0 ] ; do
-@@ -117,6 +119,9 @@ while [ "$#" -gt 0 ] ; do
- --without-completion)
- CMAKE_COMPLETE="-DINSTALL_BASH_COMPLETION=OFF"
- ;;
-+ --without-libdbi)
-+ CMAKE_LIBDBI="-DWITH_LibDBI=OFF"
-+ ;;
- --build=*)
- ;;
- --disable-dependency-tracking)
-@@ -153,4 +158,4 @@ fi
- cd "$BUILD_DIR"
-
- # invoke cmake to do configuration
--cmake $SOURCE_DIR $CMAKE_ROOT $CMAKE_PREFIX $CMAKE_SHARED $CMAKE_DEBUG $CMAKE_BACKUP $CMAKE_CROSS $CMAKE_PROTECTION $CMAKE_GNAP $CMAKE_COMPLETE $CMAKE_ICONV
-+cmake $SOURCE_DIR $CMAKE_ROOT $CMAKE_PREFIX $CMAKE_SHARED $CMAKE_DEBUG $CMAKE_BACKUP $CMAKE_CROSS $CMAKE_PROTECTION $CMAKE_GNAP $CMAKE_COMPLETE $CMAKE_ICONV $CMAKE_LIBDBI
PKG_NAME:=hfsprogs
PKG_VERSION:=332.25
-PKG_RELEASE:=3
-
-PKG_LICENSE:=GPL-3.0-or-later
-PKG_LICENSE_FILES:=COPYING
-
-PKG_MAINTAINER:=
+PKG_RELEASE:=2
PKG_SOURCE:=$(PKG_NAME)_$(PKG_VERSION).orig.tar.gz
PKG_SOURCE_URL:=http://archive.ubuntu.com/ubuntu/pool/universe/h/$(PKG_NAME)
-PKG_BUILD_DIR:=$(BUILD_DIR)/diskdev_cmds-$(PKG_VERSION)
PKG_HASH:=74c9aeca899ed7f4bf155c65fc45bf0f250c0f6d57360ea953b1d536d9aa45e6
-PKG_LICENSE:=APSL 2.0
+PKG_BUILD_DIR:=$(BUILD_DIR)/diskdev_cmds-$(PKG_VERSION)
+
+PKG_MAINTAINER:=
+PKG_LICENSE:=APSL-2.0
+PKG_LICENSE_FILES:=COPYING
include $(INCLUDE_DIR)/package.mk
include $(INCLUDE_DIR)/nls.mk
--- /dev/null
+--- a/fsck_hfs.tproj/fsck_hfs.h
++++ b/fsck_hfs.tproj/fsck_hfs.h
+@@ -24,6 +24,7 @@
+
+ #include "cache.h"
+
++#include <sys/cdefs.h>
+
+ const extern char *cdevname; /* name of device being checked */
+ extern char *progname;
+--- a/newfs_hfs.tproj/makehfs.c
++++ b/newfs_hfs.tproj/makehfs.c
+@@ -28,6 +28,7 @@
+
+ */
+
++#include <sys/cdefs.h>
+ #include <sys/param.h>
+ #include <sys/types.h>
+ #include <sys/time.h>
+--- a/newfs_hfs.tproj/newfs_hfs.c
++++ b/newfs_hfs.tproj/newfs_hfs.c
+@@ -34,6 +34,7 @@
+ #include <syslog.h>
+ #include <unistd.h>
+
++#include <sys/cdefs.h>
+ #include <sys/ioctl.h>
+ #include <sys/mount.h>
+ #include <sys/param.h>
PKG_BUILD_DIR:=$(BUILD_DIR)/$(BUILD_VARIANT)-i2c-tools-$(PKG_VERSION)
include $(INCLUDE_DIR)/package.mk
-include ../../lang/python/python-package.mk
include ../../lang/python/python3-package.mk
PKG_UNPACK:=$(HOST_TAR) -C $(PKG_BUILD_DIR) --strip-components=1 -xJf $(DL_DIR)/$(PKG_SOURCE)
VARIANT:=bin
endef
-define Package/python-smbus
- $(call Package/i2c/Default)
- SUBMENU:=Python
- SECTION:=lang
- CATEGORY:=Languages
- TITLE:=Python bindings for the SMBUS
- DEPENDS:=+PACKAGE_python-smbus:libi2c +PACKAGE_python-smbus:python-light
- VARIANT:=python
-endef
-
define Package/python3-smbus
$(call Package/i2c/Default)
SUBMENU:=Python
SECTION:=lang
CATEGORY:=Languages
TITLE:=Python bindings for the SMBUS
- DEPENDS:=+PACKAGE_python3-smbus:libi2c +PACKAGE_python3-smbus:python3-light
+ DEPENDS:=+libi2c +python3-light
VARIANT:=python3
endef
were originally part of the lm-sensors package.
endef
-define Package/python-smbus/description
- This package contain the python bindings for Linux SMBus access through i2c-dev.
-endef
-
define Package/python3-smbus/description
This package contain the Python3 bindings for Linux SMBus access through i2c-dev.
endef
endif # ifeq
-PYTHON_PKG_SETUP_ARGS:=
PYTHON3_PKG_SETUP_ARGS:=
-
-PYTHON_PKG_SETUP_DIR:=py-smbus
PYTHON3_PKG_SETUP_DIR:=py-smbus
define Package/libi2c/install
$(eval $(call BuildPackage,libi2c))
$(eval $(call BuildPackage,i2c-tools))
-$(eval $(call PyPackage,python-smbus))
-$(eval $(call BuildPackage,python-smbus))
$(eval $(call Py3Package,python3-smbus))
$(eval $(call BuildPackage,python3-smbus))
PKG_NAME:=qemu
PKG_VERSION:=4.2.0
-PKG_RELEASE:=2
+PKG_RELEASE:=3
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.xz
PKG_HASH:=d3481d4108ce211a053ef15be69af1bdd9dde1510fda80d92be0f6c3e98768f0
PKG_SOURCE_URL:=http://download.qemu.org/
From d4fcdea769a4629c874ebe1801d83e854c94d5e4 Mon Sep 17 00:00:00 2001
From: Yousong Zhou <yszhou4tech@gmail.com>
Date: Sat, 24 Feb 2018 13:43:19 +0800
-Subject: [PATCH 1/5] configure: allow disable fortify_source
+Subject: [PATCH] configure: allow disable fortify_source
Tell build system of qemu to not add _FORTIFY_SOURCE options and let the
OpenWrt base build system decide flavor of fortify_source to use
From 2a6ab8342245c8dc2a09478d8eb0292e2dbcecf2 Mon Sep 17 00:00:00 2001
From: Yousong Zhou <yszhou4tech@gmail.com>
Date: Tue, 2 Apr 2019 06:31:31 +0000
-Subject: [PATCH 2/5] configure: allow enabling/disabling libudev from command
- line
+Subject: [PATCH] configure: allow enabling/disabling libudev from command line
---
configure | 4 ++++
From 296215421441b73bc6eb487f1d4e7e15e0510a77 Mon Sep 17 00:00:00 2001
From: Yousong Zhou <yszhou4tech@gmail.com>
Date: Fri, 7 Feb 2020 03:02:44 +0800
-Subject: [PATCH 3/5] configure: enable guest_agent no matter whether softmmu
- is enabled
+Subject: [PATCH] configure: enable guest_agent no matter whether softmmu is
+ enabled
guest_agent as a tool to be run on guest machines does not depend on
whether there is a softmmu is to be built at this configure/make run
From 3ac531a6bdeecbe40741a76f1dc2b7fa6c11f8ef Mon Sep 17 00:00:00 2001
From: Yousong Zhou <yszhou4tech@gmail.com>
Date: Sat, 24 Feb 2018 13:45:25 +0800
-Subject: [PATCH 4/5] disas: fix compilation failure when isnan is a macro
+Subject: [PATCH] disas: fix compilation failure when isnan is a macro
---
disas/libvixl/vixl/utils.h | 16 +++++++++++-----
From b6223a90ebbb5729e41b4fcb3bc9ac309ec04784 Mon Sep 17 00:00:00 2001
From: Yousong Zhou <yszhou4tech@gmail.com>
Date: Sat, 24 Feb 2018 13:46:31 +0800
-Subject: [PATCH 5/5] pc-bios: fix compilation when $(AS) is actually gcc
- driver
+Subject: [PATCH] pc-bios: fix compilation when $(AS) is actually gcc driver
---
pc-bios/optionrom/Makefile | 4 ++--
--- /dev/null
+From f385b623c14b5208df88b0be479a9ab30ab68c72 Mon Sep 17 00:00:00 2001
+From: Yousong Zhou <yszhou4tech@gmail.com>
+Date: Mon, 30 Mar 2020 12:48:58 +0800
+Subject: [PATCH] util/mmap-alloc: fix missing MAP_SYNC
+
+Quote musl-libc commit 9b57db3f958 ("add MAP_SYNC and
+MAP_SHARED_VALIDATE from linux v4.15")
+
+ > for synchronous page faults, new in linux commit
+ > 1c9725974074a047f6080eecc62c50a8e840d050 and
+ > b6fb293f2497a9841d94f6b57bd2bb2cd222da43
+ > note that only targets that use asm-generic/mman.h have this new
+ > flag defined, so undef it on other targets (mips*, powerpc*).
+
+Fixes 119906afa5c ("util/mmap-alloc: support MAP_SYNC in
+qemu_ram_mmap()")
+
+Signed-off-by: Yousong Zhou <yszhou4tech@gmail.com>
+---
+ util/mmap-alloc.c | 10 +++++++---
+ 1 file changed, 7 insertions(+), 3 deletions(-)
+
+diff --git a/util/mmap-alloc.c b/util/mmap-alloc.c
+index 27dcccd8ec..e133e38d21 100644
+--- a/util/mmap-alloc.c
++++ b/util/mmap-alloc.c
+@@ -12,9 +12,6 @@
+
+ #ifdef CONFIG_LINUX
+ #include <linux/mman.h>
+-#else /* !CONFIG_LINUX */
+-#define MAP_SYNC 0x0
+-#define MAP_SHARED_VALIDATE 0x0
+ #endif /* CONFIG_LINUX */
+
+ #include "qemu/osdep.h"
+@@ -27,6 +24,13 @@
+ #include <sys/vfs.h>
+ #endif
+
++#ifndef MAP_SYNC
++#define MAP_SYNC 0x0
++#endif
++#ifndef MAP_SHARED_VALIDATE
++#define MAP_SHARED_VALIDATE 0x0
++#endif
++
+ size_t qemu_fd_getpagesize(int fd)
+ {
+ #ifdef CONFIG_LINUX
include $(TOPDIR)/rules.mk
PKG_NAME:=xz
-PKG_VERSION:=5.2.4
-PKG_RELEASE:=5
+PKG_VERSION:=5.2.5
+PKG_RELEASE:=1
-PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.bz2
+PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.xz
PKG_SOURCE_URL:=@SF/lzmautils
-PKG_HASH:=3313fd2a95f43d88e44264e6b015e7d03053e681860b0d5d3f9baca79c57b7bf
+PKG_HASH:=3e1e518ffc912f86608a8cb35e4bd41ad1aec210df2a47aaa1f95e7f5576ef56
PKG_MAINTAINER:=
PKG_LICENSE:=Public-Domain LGPL-2.1-or-later GPL-2.0-or-later GPL-3.0-or-later
+++ /dev/null
-#
-# Copyright (C) 2006-2011 OpenWrt.org
-#
-# This is free software, licensed under the GNU General Public License v2.
-# See /LICENSE for more information.
-#
-
-include $(TOPDIR)/rules.mk
-
-PKG_NAME:=yunbridge
-PKG_VERSION:=1.6.0
-PKG_RELEASE:=1
-
-PKG_SOURCE_URL:=https://codeload.github.com/arduino/YunBridge/tar.gz/$(PKG_VERSION)?
-PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz
-PKG_HASH:=9e5ffc7a0d0cc2c92e972e425adcc49b77cf39da075d31728e7755d316d910d8
-PKG_BUILD_DIR:=$(BUILD_DIR)/YunBridge-$(PKG_VERSION)
-
-PKG_MAINTAINER:=John Crispin <blogic@openwrt.org>
-PKG_LICENSE:=GPL-2.0
-
-include $(INCLUDE_DIR)/package.mk
-
-define Package/yunbridge
- SECTION:=utils
- CATEGORY:=Utilities
- TITLE:=Arduino YUN bridge library
- URL:=http://arduino.cc/
- DEPENDS:=+python
-endef
-
-define Package/yunbridge/description
- Arduino YUN bridge library
-endef
-
-define Build/Compile
- true
-endef
-
-define Package/yunbridge/install
- mkdir -p $(1)/usr/lib/python2.7/bridge
- $(CP) $(PKG_BUILD_DIR)/bridge/*.py $(1)/usr/lib/python2.7/bridge/
- $(CP) ./files/* $(1)
-endef
-
-$(eval $(call BuildPackage,yunbridge))
+++ /dev/null
-config bridge config
- option socket_timeout 5
- option secure_rest_api false
-
- # remove this line to activae the yunbridge
- option disabled 1
+++ /dev/null
-#!/bin/sh /etc/rc.common
-# Copyright (C) 2013 OpenWrt.org
-
-# start after and stop before networking
-START=20
-STOP=89
-
-USE_PROCD=1
-
-service_triggers()
-{
- procd_add_reload_trigger "yunbridge"
-}
-
-start_service()
-{
- [ "$(uci -q get yunbridge.config.disabled)" = "1" ] && return 0
- procd_open_instance
- procd_set_param command "/sbin/yunbridge"
- procd_set_param respawn
- procd_close_instance
-}
+++ /dev/null
-#!/bin/sh
-stty -F /dev/ttyS0 2500000 clocal cread cs8 -cstopb -parenb
-exec < /dev/ttyS0
-exec > /dev/ttyS0
-exec 2> /dev/ttyS0
-askfirst bin/ash --login
+++ /dev/null
-#!/usr/bin/lua
-
-local function get_basic_net_info(network, iface, accumulator)
- local net = network:get_network(iface)
- local device = net and net:get_interface()
-
- if device then
- accumulator["uptime"] = net:uptime()
- accumulator["iface"] = device:name()
- accumulator["mac"] = device:mac()
- accumulator["rx_bytes"] = device:rx_bytes()
- accumulator["tx_bytes"] = device:tx_bytes()
- accumulator["ipaddrs"] = {}
-
- for _, ipaddr in ipairs(device:ipaddrs()) do
- accumulator.ipaddrs[#accumulator.ipaddrs + 1] = {
- addr = ipaddr:host():string(),
- netmask = ipaddr:mask():string()
- }
- end
- end
-end
-
-local function get_wifi_info(network, iface, accumulator)
- local net = network:get_wifinet(iface)
-
- if net then
- local dev = net:get_device()
- if dev then
- accumulator["mode"] = net:active_mode()
- accumulator["ssid"] = net:active_ssid()
- accumulator["encryption"] = net:active_encryption()
- accumulator["quality"] = net:signal_percent()
- end
- end
-end
-
-local function collect_wifi_info()
- local network = require"luci.model.network".init()
- local accumulator = {}
- get_basic_net_info(network, "lan", accumulator)
- get_wifi_info(network, "wlan0", accumulator)
- return accumulator
-end
-
-local info = collect_wifi_info()
-
-print("Current WiFi configuration")
-if info.ssid then
- print("SSID: " .. info.ssid)
-end
-if info.mode then
- print("Mode: " .. info.mode)
-end
-if info.quality then
- print("Signal: " .. info.quality .. "%")
-end
-if info.encryption then
- print("Encryption method: " .. info.encryption)
-end
-if info.iface then
- print("Interface name: " .. info.iface)
-end
-if info.uptime then
- print("Active for: " .. math.floor(info.uptime / 60) .. " minutes")
-end
-if #info.ipaddrs > 0 then
- print("IP address: " .. info.ipaddrs[1].addr .. "/" .. info.ipaddrs[1].netmask)
-end
-if info.mac then
- print("MAC address: " .. info.mac)
-end
-if info.rx_bytes and info.tx_bytes then
- print("RX/TX: " .. math.floor(info.rx_bytes / 1024) .. "/" .. math.floor(info.tx_bytes / 1024) .. " KBs")
-end
+++ /dev/null
---[[
-This file is part of YunWebUI.
-
-YunWebUI is free software; you can redistribute it and/or modify
-it under the terms of the GNU General Public License as published by
-the Free Software Foundation; either version 2 of the License, or
-(at your option) any later version.
-
-This program is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License
-along with this program; if not, write to the Free Software
-Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
-
-As a special exception, you may use this file as part of a free software
-library without restriction. Specifically, if other files instantiate
-templates or use macros or inline functions from this file, or you compile
-this file and link it with other files to produce an executable, this
-file does not by itself cause the resulting executable to be covered by
-the GNU General Public License. This exception does not however
-invalidate any other reasons why the executable file might be covered by
-the GNU General Public License.
-
-Copyright 2013 Arduino LLC (http://www.arduino.cc/)
-]]
-
-module("luci.controller.arduino.index", package.seeall)
-
-local function not_nil_or_empty(value)
- return value and value ~= ""
-end
-
-local function get_first(cursor, config, type, option)
- return cursor:get_first(config, type, option)
-end
-
-local function set_first(cursor, config, type, option, value)
- cursor:foreach(config, type, function(s)
- if s[".type"] == type then
- cursor:set(config, s[".name"], option, value)
- end
- end)
-end
-
-
-local function to_key_value(s)
- local parts = luci.util.split(s, ":")
- parts[1] = luci.util.trim(parts[1])
- parts[2] = luci.util.trim(parts[2])
- return parts[1], parts[2]
-end
-
-function http_error(code, text)
- luci.http.prepare_content("text/plain")
- luci.http.status(code)
- if text then
- luci.http.write(text)
- end
-end
-
-function index()
- function luci.dispatcher.authenticator.arduinoauth(validator, accs, default)
- require("luci.controller.arduino.index")
-
- local user = luci.http.formvalue("username")
- local pass = luci.http.formvalue("password")
- local basic_auth = luci.http.getenv("HTTP_AUTHORIZATION")
-
- if user and validator(user, pass) then
- return user
- end
-
- if basic_auth and basic_auth ~= "" then
- local decoded_basic_auth = nixio.bin.b64decode(string.sub(basic_auth, 7))
- user = string.sub(decoded_basic_auth, 0, string.find(decoded_basic_auth, ":") - 1)
- pass = string.sub(decoded_basic_auth, string.find(decoded_basic_auth, ":") + 1)
- end
-
- if user then
- if #pass ~= 64 and validator(user, pass) then
- return user
- elseif #pass == 64 then
- local uci = luci.model.uci.cursor()
- uci:load("yunbridge")
- local stored_encrypted_pass = uci:get_first("yunbridge", "bridge", "password")
- if pass == stored_encrypted_pass then
- return user
- end
- end
- end
-
- luci.http.header("WWW-Authenticate", "Basic realm=\"yunbridge\"")
- luci.http.status(401)
-
- return false
- end
-
- local function make_entry(path, target, title, order)
- local page = entry(path, target, title, order)
- page.leaf = true
- return page
- end
-
- -- web panel
- local webpanel = entry({ "webpanel" }, alias("webpanel", "go_to_homepage"), _("%s Web Panel") % luci.sys.hostname(), 10)
- webpanel.sysauth = "root"
- webpanel.sysauth_authenticator = "arduinoauth"
-
- make_entry({ "webpanel", "go_to_homepage" }, call("go_to_homepage"), nil)
-
- --api security level
- local uci = luci.model.uci.cursor()
- uci:load("yunbridge")
- local secure_rest_api = uci:get_first("yunbridge", "bridge", "secure_rest_api")
- local rest_api_sysauth = false
- if secure_rest_api == "true" then
- rest_api_sysauth = webpanel.sysauth
- end
-
- --storage api
- local data_api = node("data")
- data_api.sysauth = rest_api_sysauth
- data_api.sysauth_authenticator = webpanel.sysauth_authenticator
- make_entry({ "data", "get" }, call("storage_send_request"), nil).sysauth = rest_api_sysauth
- make_entry({ "data", "put" }, call("storage_send_request"), nil).sysauth = rest_api_sysauth
- make_entry({ "data", "delete" }, call("storage_send_request"), nil).sysauth = rest_api_sysauth
- local mailbox_api = node("mailbox")
- mailbox_api.sysauth = rest_api_sysauth
- mailbox_api.sysauth_authenticator = webpanel.sysauth_authenticator
- make_entry({ "mailbox" }, call("build_bridge_mailbox_request"), nil).sysauth = rest_api_sysauth
-
- --plain socket endpoint
- local plain_socket_endpoint = make_entry({ "arduino" }, call("board_plain_socket"), nil)
- plain_socket_endpoint.sysauth = rest_api_sysauth
- plain_socket_endpoint.sysauth_authenticator = webpanel.sysauth_authenticator
-end
-
-function go_to_homepage()
- luci.http.redirect("/index.html")
-end
-
-local function build_bridge_request(command, params)
-
- local bridge_request = {
- command = command
- }
-
- if command == "raw" then
- params = table.concat(params, "/")
- if not_nil_or_empty(params) then
- bridge_request["data"] = params
- end
- return bridge_request
- end
-
- if command == "get" then
- if not_nil_or_empty(params[1]) then
- bridge_request["key"] = params[1]
- end
- return bridge_request
- end
-
- if command == "put" and not_nil_or_empty(params[1]) and params[2] then
- bridge_request["key"] = params[1]
- bridge_request["value"] = params[2]
- return bridge_request
- end
-
- if command == "delete" and not_nil_or_empty(params[1]) then
- bridge_request["key"] = params[1]
- return bridge_request
- end
-
- return nil
-end
-
-local function extract_jsonp_param(query_string)
- if not not_nil_or_empty(query_string) then
- return nil
- end
-
- local qs_parts = string.split(query_string, "&")
- for idx, value in ipairs(qs_parts) do
- if string.find(value, "jsonp") == 1 or string.find(value, "callback") == 1 then
- return string.sub(value, string.find(value, "=") + 1)
- end
- end
-end
-
-local function parts_after(url_part)
- local url = luci.http.getenv("PATH_INFO")
- local url_after_part = string.find(url, "/", string.find(url, url_part) + 1)
- if not url_after_part then
- return {}
- end
- return luci.util.split(string.sub(url, url_after_part + 1), "/")
-end
-
-function storage_send_request()
- local method = luci.http.getenv("REQUEST_METHOD")
- local jsonp_callback = extract_jsonp_param(luci.http.getenv("QUERY_STRING"))
- local parts = parts_after("data")
- local command = parts[1]
- if not command or command == "" then
- luci.http.status(404)
- return
- end
- local params = {}
- for idx, param in ipairs(parts) do
- if idx > 1 and not_nil_or_empty(param) then
- table.insert(params, param)
- end
- end
-
- -- TODO check method?
- local bridge_request = build_bridge_request(command, params)
- if not bridge_request then
- luci.http.status(403)
- return
- end
-
- local uci = luci.model.uci.cursor()
- uci:load("yunbridge")
- local socket_timeout = uci:get_first("yunbridge", "bridge", "socket_timeout", 5)
-
- local sock, code, msg = nixio.connect("127.0.0.1", 5700)
- if not sock then
- code = code or ""
- msg = msg or ""
- http_error(500, "nil socket, " .. code .. " " .. msg)
- return
- end
-
- sock:setopt("socket", "sndtimeo", socket_timeout)
- sock:setopt("socket", "rcvtimeo", socket_timeout)
- sock:setopt("tcp", "nodelay", 1)
-
- local json = require("luci.json")
-
- sock:write(json.encode(bridge_request))
- sock:writeall("\n")
-
- local response_text = {}
- while true do
- local bytes = sock:recv(4096)
- if bytes and #bytes > 0 then
- table.insert(response_text, bytes)
- end
-
- local json_response = json.decode(table.concat(response_text))
- if json_response then
- sock:close()
- luci.http.status(200)
- if jsonp_callback then
- luci.http.prepare_content("application/javascript")
- luci.http.write(jsonp_callback)
- luci.http.write("(")
- luci.http.write_json(json_response)
- luci.http.write(");")
- else
- luci.http.prepare_content("application/json")
- luci.http.write(json.encode(json_response))
- end
- return
- end
-
- if not bytes or #response_text == 0 then
- sock:close()
- http_error(500, "Empty response")
- return
- end
- end
-
- sock:close()
-end
-
-function board_plain_socket()
- local function send_response(response_text, jsonp_callback)
- if not response_text then
- luci.http.status(500)
- return
- end
-
- local rows = luci.util.split(response_text, "\r\n")
- if #rows == 1 or string.find(rows[1], "Status") ~= 1 then
- luci.http.prepare_content("text/plain")
- luci.http.status(200)
- luci.http.write(response_text)
- return
- end
-
- local body_start_at_idx = -1
- local content_type = "text/plain"
- for idx, row in ipairs(rows) do
- if row == "" then
- body_start_at_idx = idx
- break
- end
-
- local key, value = to_key_value(row)
- if string.lower(key) == "status" then
- luci.http.status(tonumber(value))
- elseif string.lower(key) == "content-type" then
- content_type = value
- else
- luci.http.header(key, value)
- end
- end
-
- local response_body = table.concat(rows, "\r\n", body_start_at_idx + 1)
- if content_type == "application/json" and jsonp_callback then
- local json = require("luci.json")
- luci.http.prepare_content("application/javascript")
- luci.http.write(jsonp_callback)
- luci.http.write("(")
- luci.http.write_json(json.decode(response_body))
- luci.http.write(");")
- else
- luci.http.prepare_content(content_type)
- luci.http.write(response_body)
- end
- end
-
- local method = luci.http.getenv("REQUEST_METHOD")
- local jsonp_callback = extract_jsonp_param(luci.http.getenv("QUERY_STRING"))
- local parts = parts_after("arduino")
- local params = {}
- for idx, param in ipairs(parts) do
- if not_nil_or_empty(param) then
- table.insert(params, param)
- end
- end
-
- if #params == 0 then
- luci.http.status(404)
- return
- end
-
- params = table.concat(params, "/")
-
- local uci = luci.model.uci.cursor()
- uci:load("yunbridge")
- local socket_timeout = uci:get_first("yunbridge", "bridge", "socket_timeout", 5)
-
- local sock, code, msg = nixio.connect("127.0.0.1", 5555)
- if not sock then
- code = code or ""
- msg = msg or ""
- http_error(500, "Could not connect to YunServer " .. code .. " " .. msg)
- return
- end
-
- sock:setopt("socket", "sndtimeo", socket_timeout)
- sock:setopt("socket", "rcvtimeo", socket_timeout)
- sock:setopt("tcp", "nodelay", 1)
-
- sock:write(params)
- sock:writeall("\r\n")
-
- local response_text = sock:readall()
- sock:close()
-
- send_response(response_text, jsonp_callback)
-end
-
-function build_bridge_mailbox_request()
- local method = luci.http.getenv("REQUEST_METHOD")
- local jsonp_callback = extract_jsonp_param(luci.http.getenv("QUERY_STRING"))
- local parts = parts_after("mailbox")
- local params = {}
- for idx, param in ipairs(parts) do
- if not_nil_or_empty(param) then
- table.insert(params, param)
- end
- end
-
- if #params == 0 then
- luci.http.status(400)
- return
- end
-
- local bridge_request = build_bridge_request("raw", params)
- if not bridge_request then
- luci.http.status(403)
- return
- end
-
- local uci = luci.model.uci.cursor()
- uci:load("yunbridge")
- local socket_timeout = uci:get_first("yunbridge", "bridge", "socket_timeout", 5)
-
- local sock, code, msg = nixio.connect("127.0.0.1", 5700)
- if not sock then
- code = code or ""
- msg = msg or ""
- http_error(500, "nil socket, " .. code .. " " .. msg)
- return
- end
-
- sock:setopt("socket", "sndtimeo", socket_timeout)
- sock:setopt("socket", "rcvtimeo", socket_timeout)
- sock:setopt("tcp", "nodelay", 1)
-
- local json = require("luci.json")
-
- sock:write(json.encode(bridge_request))
- sock:writeall("\n")
- sock:close()
-
- luci.http.status(200)
-end
+++ /dev/null
---
--- Code merged by gravityscore at http://pastebin.com/gsFrNjbt
---
--- Adaptation of the Secure Hashing Algorithm (SHA-244/256)
--- Found Here: http://lua-users.org/wiki/SecureHashAlgorithm
---
--- Using an adapted version of the bit library
--- Found Here: https://bitbucket.org/Boolsheet/bslf/src/1ee664885805/bit.lua
---
-
-module("luci.sha256", package.seeall)
-
-local MOD = 2 ^ 32
-local MODM = MOD - 1
-
-local function memoize(f)
- local mt = {}
- local t = setmetatable({}, mt)
- function mt:__index(k)
- local v = f(k)
- t[k] = v
- return v
- end
-
- return t
-end
-
-local function make_bitop_uncached(t, m)
- local function bitop(a, b)
- local res, p = 0, 1
- while a ~= 0 and b ~= 0 do
- local am, bm = a % m, b % m
- res = res + t[am][bm] * p
- a = (a - am) / m
- b = (b - bm) / m
- p = p * m
- end
- res = res + (a + b) * p
- return res
- end
-
- return bitop
-end
-
-local function make_bitop(t)
- local op1 = make_bitop_uncached(t, 2 ^ 1)
- local op2 = memoize(function(a) return memoize(function(b) return op1(a, b) end) end)
- return make_bitop_uncached(op2, 2 ^ (t.n or 1))
-end
-
-local bxor1 = make_bitop({ [0] = { [0] = 0, [1] = 1 }, [1] = { [0] = 1, [1] = 0 }, n = 4 })
-
-local function bxor(a, b, c, ...)
- local z = nil
- if b then
- a = a % MOD
- b = b % MOD
- z = bxor1(a, b)
- if c then z = bxor(z, c, ...) end
- return z
- elseif a then return a % MOD
- else return 0
- end
-end
-
-local function band(a, b, c, ...)
- local z
- if b then
- a = a % MOD
- b = b % MOD
- z = ((a + b) - bxor1(a, b)) / 2
- if c then z = bit32_band(z, c, ...) end
- return z
- elseif a then return a % MOD
- else return MODM
- end
-end
-
-local function bnot(x) return (-1 - x) % MOD end
-
-local function rshift1(a, disp)
- if disp < 0 then return lshift(a, -disp) end
- return math.floor(a % 2 ^ 32 / 2 ^ disp)
-end
-
-local function rshift(x, disp)
- if disp > 31 or disp < -31 then return 0 end
- return rshift1(x % MOD, disp)
-end
-
-local function lshift(a, disp)
- if disp < 0 then return rshift(a, -disp) end
- return (a * 2 ^ disp) % 2 ^ 32
-end
-
-local function rrotate(x, disp)
- x = x % MOD
- disp = disp % 32
- local low = band(x, 2 ^ disp - 1)
- return rshift(x, disp) + lshift(low, 32 - disp)
-end
-
-local k = {
- 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,
- 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
- 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
- 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
- 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,
- 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
- 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7,
- 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
- 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
- 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
- 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3,
- 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
- 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5,
- 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
- 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
- 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2,
-}
-
-local function str2hexa(s)
- return (string.gsub(s, ".", function(c) return string.format("%02x", string.byte(c)) end))
-end
-
-local function num2s(l, n)
- local s = ""
- for i = 1, n do
- local rem = l % 256
- s = string.char(rem) .. s
- l = (l - rem) / 256
- end
- return s
-end
-
-local function s232num(s, i)
- local n = 0
- for i = i, i + 3 do n = n * 256 + string.byte(s, i) end
- return n
-end
-
-local function preproc(msg, len)
- local extra = 64 - ((len + 9) % 64)
- len = num2s(8 * len, 8)
- msg = msg .. "\128" .. string.rep("\0", extra) .. len
- assert(#msg % 64 == 0)
- return msg
-end
-
-local function initH256(H)
- H[1] = 0x6a09e667
- H[2] = 0xbb67ae85
- H[3] = 0x3c6ef372
- H[4] = 0xa54ff53a
- H[5] = 0x510e527f
- H[6] = 0x9b05688c
- H[7] = 0x1f83d9ab
- H[8] = 0x5be0cd19
- return H
-end
-
-local function digestblock(msg, i, H)
- local w = {}
- for j = 1, 16 do w[j] = s232num(msg, i + (j - 1) * 4) end
- for j = 17, 64 do
- local v = w[j - 15]
- local s0 = bxor(rrotate(v, 7), rrotate(v, 18), rshift(v, 3))
- v = w[j - 2]
- w[j] = w[j - 16] + s0 + w[j - 7] + bxor(rrotate(v, 17), rrotate(v, 19), rshift(v, 10))
- end
-
- local a, b, c, d, e, f, g, h = H[1], H[2], H[3], H[4], H[5], H[6], H[7], H[8]
- for i = 1, 64 do
- local s0 = bxor(rrotate(a, 2), rrotate(a, 13), rrotate(a, 22))
- local maj = bxor(band(a, b), band(a, c), band(b, c))
- local t2 = s0 + maj
- local s1 = bxor(rrotate(e, 6), rrotate(e, 11), rrotate(e, 25))
- local ch = bxor(band(e, f), band(bnot(e), g))
- local t1 = h + s1 + ch + k[i] + w[i]
- h, g, f, e, d, c, b, a = g, f, e, d + t1, c, b, a, t1 + t2
- end
-
- H[1] = band(H[1] + a)
- H[2] = band(H[2] + b)
- H[3] = band(H[3] + c)
- H[4] = band(H[4] + d)
- H[5] = band(H[5] + e)
- H[6] = band(H[6] + f)
- H[7] = band(H[7] + g)
- H[8] = band(H[8] + h)
-end
-
-function sha256(msg)
- msg = preproc(msg, #msg)
- local H = initH256({})
- for i = 1, #msg, 64 do digestblock(msg, i, H) end
- return str2hexa(num2s(H[1], 4) .. num2s(H[2], 4) .. num2s(H[3], 4) .. num2s(H[4], 4) ..
- num2s(H[5], 4) .. num2s(H[6], 4) .. num2s(H[7], 4) .. num2s(H[8], 4))
-end
\ No newline at end of file
+++ /dev/null
---- a/bridge/packet.py
-+++ b/bridge/packet.py
-@@ -93,12 +93,12 @@
-
- def run(self, data):
- if data[0] != 'X':
-- call(['/usr/bin/blink-start', '100'])
-+ #call(['/usr/bin/blink-start', '100'])
- return chr(1)
- if data[1:4] != '100':
-- call(['/usr/bin/blink-start', '100'])
-+ #call(['/usr/bin/blink-start', '100'])
- return chr(2)
-- call(['/usr/bin/blink-stop'])
-+ #call(['/usr/bin/blink-stop'])
- return chr(0) + '160' # send the actual bridge version
-
- class PacketReader: