Compare commits
158 Commits
Author | SHA1 | Date |
---|---|---|
Frank Denis | 0059194a9e | |
Frank Denis | 35d7aa0603 | |
Frank Denis | 8dadd61730 | |
dependabot[bot] | f7da81cf29 | |
Frank Denis | 0efce55895 | |
Frank Denis | 5a1d94d506 | |
Frank Denis | 271943c158 | |
Frank Denis | 34a1f2ebf5 | |
Frank Denis | f8ce22d9b9 | |
Frank Denis | 249dba391d | |
Frank Denis | 987ae216e3 | |
Frank Denis | 7fba32651b | |
Frank Denis | 6ae388e646 | |
Frank Denis | 0af88bc875 | |
Frank Denis | d36edeb612 | |
Frank Denis | 041a6c7d7f | |
cuibuwei | 2c6416d5ae | |
Frank Denis | 4d1cd67d4d | |
Frank Denis | 363d44919f | |
Frank Denis | a88076d06f | |
Frank Denis | 119bc0b660 | |
Robert Edmonds | 49000cd4f4 | |
Frank Denis | ec46e09360 | |
dependabot[bot] | ea5808e024 | |
Frank Denis | 79a1aa8325 | |
Alison Winters | 4c442f5dbb | |
Alison Winters | f7e13502c0 | |
YX Hao | 8d43ce9b56 | |
YX Hao | ac5087315c | |
Frank Denis | ad80d81d43 | |
dependabot[bot] | a7fb13ba4e | |
Frank Denis | 006619837f | |
Frank Denis | 093936f7ab | |
Frank Denis | 7462961980 | |
Frank Denis | 0b559bb54f | |
Frank Denis | 658835b4ff | |
Frank Denis | 90c3017793 | |
dependabot[bot] | e371138b86 | |
Frank Denis | bcbf2db4ff | |
YX Hao | 64fa90839c | |
Frank Denis | f2484f5bd5 | |
Frank Denis | 63f8d9b30d | |
Xiaotong Liu | 49e3570c2c | |
lifenjoiner | 3be53642fe | |
YX Hao | 13e7077200 | |
Frank Denis | f5912d7ca9 | |
dependabot[bot] | 0196d7d2ab | |
Frank Denis | 898ded9c52 | |
Frank Denis | e782207911 | |
Frank Denis | 0f1f635ec1 | |
keatonLiu | 956a14ee21 | |
dependabot[bot] | 22731786a2 | |
Frank Denis | 42b6ae9052 | |
YX Hao | 0d5e52bb16 | |
Frank Denis | 0ba728b6ce | |
Frank Denis | cb80bf33e8 | |
Frank Denis | 88207560a7 | |
Jeffrey Damick | 4a361dbb05 | |
Frank Denis | b37a5c991a | |
Frank Denis | 0232870870 | |
Frank Denis | 1a9bf8a286 | |
Frank Denis | 7fb58720fb | |
Frank Denis | f85b3e81ec | |
Frank Denis | 79779cf744 | |
Frank Denis | 8bea679e7b | |
Frank Denis | 96f21f1bff | |
dependabot[bot] | 21097686c1 | |
Frank Denis | 87571d4a7f | |
Frank Denis | f531c8fffb | |
Frank Denis | 5ae83c1592 | |
Frank Denis | c86e9a90cc | |
Frank Denis | d48c811ea9 | |
Frank Denis | f2b1edcec2 | |
Frank Denis | 1b65fe62b0 | |
Frank Denis | 194752e829 | |
Frank Denis | 808f2dfa0e | |
Frank Denis | 7dd79d5f96 | |
Frank Denis | 5088d8fae1 | |
Frank Denis | aff09648bb | |
Frank Denis | 7bca9a6c0a | |
Frank Denis | 98d0938815 | |
Frank Denis | 50780421a8 | |
RadhaKrishna | be7d5d1277 | |
Frank Denis | c3dd761b81 | |
Frank Denis | d8aec47a72 | |
Frank Denis | cfd6ced134 | |
Frank Denis | bdf27330c9 | |
Frank Denis | a108d048d8 | |
Frank Denis | afcfd566c9 | |
Frank Denis | ce55d1c5bb | |
Frank Denis | 2481fbebd7 | |
Frank Denis | 32aad7bb34 | |
Frank Denis | 7033f242c0 | |
Frank Denis | 2675d73b13 | |
Frank Denis | 5085a22903 | |
Frank Denis | 7cc5a051c7 | |
Frank Denis | 894d20191f | |
Frank Denis | 0a98be94a7 | |
Frank Denis | 1792c06bc7 | |
Expertcoderz | 63e414021b | |
Frank Denis | d659a801c2 | |
Frank Denis | a4eda39563 | |
Expertcoderz | 4114f032c3 | |
Frank Denis | a352a3035c | |
Frank Denis | 60684f8ee4 | |
YX Hao | be369a1f7a | |
YX Hao | 89ccc59f0e | |
Frank Denis | 16b2c84147 | |
Carlo Teubner | b46775ae0c | |
Frank Denis | cef4b041d7 | |
Carlo Teubner | d8b1f4e7cd | |
Frank Denis | 23a6cd7504 | |
Frank Denis | f42b7dad17 | |
Frank Denis | 4f3ce0cbae | |
Frank Denis | 0f1e3b4ba8 | |
Frank Denis | 62ef5c9d02 | |
Frank Denis | f9f68cf0a3 | |
Frank Denis | 0c26d1637a | |
Frank Denis | 9f86ffdd1e | |
lifenjoiner | 9b2c674744 | |
Frank Denis | d381af5510 | |
Frank Denis | c66023c7d7 | |
Frank Denis | 5b8e7d4114 | |
KOLANICH | f4007f709d | |
lifenjoiner | dd1c066724 | |
lifenjoiner | 5d551e54ce | |
Thad Guidry | fbc7817366 | |
Frank Denis | 9b61b73852 | |
Frank Denis | af6340df09 | |
Frank Denis | 9c73ab3070 | |
Frank Denis | ea3625bcfd | |
Frank Denis | f567f57150 | |
Frank Denis | c03f1a31eb | |
Frank Denis | c3c51bb435 | |
Frank Denis | 0f30b3b028 | |
lifenjoiner | 6d826afac5 | |
Frank Denis | b341c21dbd | |
Frank Denis | 92ed5b95e0 | |
Frank Denis | b898e07066 | |
dependabot[bot] | 92063aa76d | |
dependabot[bot] | 4be5264529 | |
Frank Denis | 13d78c042b | |
Frank Denis | 36c17eb59a | |
Frank Denis | b9f8f78c6e | |
Frank Denis | fc16e3c31c | |
lifenjoiner | b3318a94b7 | |
Frank Denis | ca0f353087 | |
Frank Denis | cf7d60a704 | |
Frank Denis | a47f7fe750 | |
Frank Denis | beb002335f | |
Frank Denis | 15c87a68a1 | |
Frank Denis | 47e6a56b16 | |
Frank Denis | 03c6f92a5f | |
lifenjoiner | 24a301b1af | |
lifenjoiner | a8d1c2fd24 | |
Frank Denis | 96ffb21228 | |
Frank Denis | acc25fcefb | |
Frank Denis | 07b4ec33c5 |
|
@ -0,0 +1,58 @@
|
||||||
|
#! /bin/sh
|
||||||
|
|
||||||
|
PACKAGE_VERSION="$1"
|
||||||
|
|
||||||
|
cd dnscrypt-proxy || exit 1
|
||||||
|
|
||||||
|
# setup the environment
|
||||||
|
|
||||||
|
sudo apt-get update -y
|
||||||
|
sudo apt-get install -y wget wine dotnet-sdk-6.0
|
||||||
|
sudo dpkg --add-architecture i386 && sudo apt-get update && sudo apt-get install -y wine32
|
||||||
|
|
||||||
|
sudo apt-get install -y unzip
|
||||||
|
|
||||||
|
export WINEPREFIX="$HOME"/.wine32
|
||||||
|
export WINEARCH=win32
|
||||||
|
export WINEDEBUG=-all
|
||||||
|
|
||||||
|
wget https://dl.winehq.org/wine/wine-mono/8.1.0/wine-mono-8.1.0-x86.msi
|
||||||
|
WINEPREFIX="$HOME/.wine32" WINEARCH=win32 wineboot --init
|
||||||
|
WINEPREFIX="$HOME/.wine32" WINEARCH=win32 wine msiexec /i wine-mono-8.1.0-x86.msi
|
||||||
|
|
||||||
|
mkdir "$HOME"/.wine32/drive_c/temp
|
||||||
|
mkdir -p "$HOME"/.wine/drive_c/temp
|
||||||
|
wget https://github.com/wixtoolset/wix3/releases/download/wix3112rtm/wix311-binaries.zip -nv -O wix.zip
|
||||||
|
|
||||||
|
unzip wix.zip -d "$HOME"/wix
|
||||||
|
rm -f wix.zip
|
||||||
|
|
||||||
|
builddir=$(pwd)
|
||||||
|
srcdir=$(
|
||||||
|
cd ..
|
||||||
|
pwd
|
||||||
|
)
|
||||||
|
version=$PACKAGE_VERSION
|
||||||
|
|
||||||
|
cd "$HOME"/wix || exit
|
||||||
|
|
||||||
|
ln -s "$builddir" "$HOME"/wix/build
|
||||||
|
ln -s "$srcdir"/contrib/msi "$HOME"/wix/wixproj
|
||||||
|
echo "builddir: $builddir"
|
||||||
|
|
||||||
|
# build the msi's
|
||||||
|
#################
|
||||||
|
for arch in x64 x86; do
|
||||||
|
binpath="win32"
|
||||||
|
if [ "$arch" = "x64" ]; then
|
||||||
|
binpath="win64"
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo $arch
|
||||||
|
|
||||||
|
wine candle.exe -dVersion="$version" -dPlatform=$arch -dPath=build\\$binpath -arch $arch wixproj\\dnscrypt.wxs -out build\\dnscrypt-$arch.wixobj
|
||||||
|
wine light.exe -out build\\dnscrypt-proxy-$arch-"$version".msi build\\dnscrypt-$arch.wixobj -sval
|
||||||
|
|
||||||
|
done
|
||||||
|
|
||||||
|
cd "$builddir" || exit
|
|
@ -66,11 +66,11 @@ t || dig -p${DNS_PORT} +dnssec darpa.mil @127.0.0.1 2>&1 | grep -Fvq 'RRSIG' ||
|
||||||
t || dig -p${DNS_PORT} +dnssec www.darpa.mil @127.0.0.1 2>&1 | grep -Fvq 'RRSIG' || fail
|
t || dig -p${DNS_PORT} +dnssec www.darpa.mil @127.0.0.1 2>&1 | grep -Fvq 'RRSIG' || fail
|
||||||
|
|
||||||
section
|
section
|
||||||
t || dig -p${DNS_PORT} +short cloaked.com @127.0.0.1 | grep -Eq '1.1.1.1|1.0.0.1' || fail
|
t || dig -p${DNS_PORT} +short cloakedunregistered.com @127.0.0.1 | grep -Eq '1.1.1.1|1.0.0.1' || fail
|
||||||
t || dig -p${DNS_PORT} +short MX cloaked.com @127.0.0.1 | grep -Fq 'locally blocked' || fail
|
t || dig -p${DNS_PORT} +short MX cloakedunregistered.com @127.0.0.1 | grep -Fq 'locally blocked' || fail
|
||||||
t || dig -p${DNS_PORT} +short MX example.com @127.0.0.1 | grep -Fvq 'locally blocked' || fail
|
t || dig -p${DNS_PORT} +short MX example.com @127.0.0.1 | grep -Fvq 'locally blocked' || fail
|
||||||
t || dig -p${DNS_PORT} NS cloaked.com @127.0.0.1 | grep -Fiq 'gtld-servers.net' || fail
|
t || dig -p${DNS_PORT} NS cloakedunregistered.com @127.0.0.1 | grep -Fiq 'gtld-servers.net' || fail
|
||||||
t || dig -p${DNS_PORT} +short www.cloaked2.com @127.0.0.1 | grep -Eq '1.1.1.1|1.0.0.1' || fail
|
t || dig -p${DNS_PORT} +short www.cloakedunregistered2.com @127.0.0.1 | grep -Eq '1.1.1.1|1.0.0.1' || fail
|
||||||
t || dig -p${DNS_PORT} +short www.dnscrypt-test @127.0.0.1 | grep -Fq '192.168.100.100' || fail
|
t || dig -p${DNS_PORT} +short www.dnscrypt-test @127.0.0.1 | grep -Fq '192.168.100.100' || fail
|
||||||
t || dig -p${DNS_PORT} a.www.dnscrypt-test @127.0.0.1 | grep -Fq 'NXDOMAIN' || fail
|
t || dig -p${DNS_PORT} a.www.dnscrypt-test @127.0.0.1 | grep -Fq 'NXDOMAIN' || fail
|
||||||
t || dig -p${DNS_PORT} +short ptr 101.100.168.192.in-addr.arpa. @127.0.0.1 | grep -Eq 'www.dnscrypt-test.com' || fail
|
t || dig -p${DNS_PORT} +short ptr 101.100.168.192.in-addr.arpa. @127.0.0.1 | grep -Eq 'www.dnscrypt-test.com' || fail
|
||||||
|
@ -122,8 +122,8 @@ t || grep -Eq 'invalid.*SYNTH' query.log || fail
|
||||||
t || grep -Eq '168.192.in-addr.arpa.*SYNTH' query.log || fail
|
t || grep -Eq '168.192.in-addr.arpa.*SYNTH' query.log || fail
|
||||||
t || grep -Eq 'darpa.mil.*FORWARD' query.log || fail
|
t || grep -Eq 'darpa.mil.*FORWARD' query.log || fail
|
||||||
t || grep -Eq 'www.darpa.mil.*FORWARD' query.log || fail
|
t || grep -Eq 'www.darpa.mil.*FORWARD' query.log || fail
|
||||||
t || grep -Eq 'cloaked.com.*CLOAK' query.log || fail
|
t || grep -Eq 'cloakedunregistered.com.*CLOAK' query.log || fail
|
||||||
t || grep -Eq 'www.cloaked2.com.*CLOAK' query.log || fail
|
t || grep -Eq 'www.cloakedunregistered2.com.*CLOAK' query.log || fail
|
||||||
t || grep -Eq 'www.dnscrypt-test.*CLOAK' query.log || fail
|
t || grep -Eq 'www.dnscrypt-test.*CLOAK' query.log || fail
|
||||||
t || grep -Eq 'a.www.dnscrypt-test.*NXDOMAIN' query.log || fail
|
t || grep -Eq 'a.www.dnscrypt-test.*NXDOMAIN' query.log || fail
|
||||||
t || grep -Eq 'telemetry.example.*REJECT' query.log || fail
|
t || grep -Eq 'telemetry.example.*REJECT' query.log || fail
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
cloaked.* one.one.one.one
|
cloakedunregistered.* one.one.one.one
|
||||||
*.cloaked2.* one.one.one.one # inline comment
|
*.cloakedunregistered2.* one.one.one.one # inline comment
|
||||||
=www.dnscrypt-test 192.168.100.100
|
=www.dnscrypt-test 192.168.100.100
|
||||||
=www.dnscrypt-test.com 192.168.100.101
|
=www.dnscrypt-test.com 192.168.100.101
|
||||||
=ipv6.dnscrypt-test.com fd02::1
|
=ipv6.dnscrypt-test.com fd02::1
|
||||||
|
|
|
@ -9,7 +9,7 @@ file = 'query.log'
|
||||||
stamp = 'sdns://BQcAAAAAAAAADm9kb2guY3J5cHRvLnN4Ci9kbnMtcXVlcnk'
|
stamp = 'sdns://BQcAAAAAAAAADm9kb2guY3J5cHRvLnN4Ci9kbnMtcXVlcnk'
|
||||||
|
|
||||||
[static.'odohrelay']
|
[static.'odohrelay']
|
||||||
stamp = 'sdns://hQcAAAAAAAAAACCi3jNJDEdtNW4tvHN8J3lpIklSa2Wrj7qaNCgEgci9_BpvZG9oLXJlbGF5LmVkZ2Vjb21wdXRlLmFwcAEv'
|
stamp = 'sdns://hQcAAAAAAAAADDg5LjM4LjEzMS4zOAAYb2RvaC1ubC5hbGVrYmVyZy5uZXQ6NDQzBi9wcm94eQ'
|
||||||
|
|
||||||
[anonymized_dns]
|
[anonymized_dns]
|
||||||
routes = [
|
routes = [
|
||||||
|
|
|
@ -13,9 +13,7 @@ cache = true
|
||||||
[query_log]
|
[query_log]
|
||||||
file = 'query.log'
|
file = 'query.log'
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
[static]
|
[static]
|
||||||
|
|
||||||
[static.'myserver']
|
[static.'myserver']
|
||||||
stamp = 'sdns://AQcAAAAAAAAADjIxMi40Ny4yMjguMTM2IOgBuE6mBr-wusDOQ0RbsV66ZLAvo8SqMa4QY2oHkDJNHzIuZG5zY3J5cHQtY2VydC5mci5kbnNjcnlwdC5vcmc'
|
stamp = 'sdns://AQcAAAAAAAAADjIxMi40Ny4yMjguMTM2IOgBuE6mBr-wusDOQ0RbsV66ZLAvo8SqMa4QY2oHkDJNHzIuZG5zY3J5cHQtY2VydC5mci5kbnNjcnlwdC5vcmc'
|
||||||
|
|
|
@ -13,15 +13,20 @@ jobs:
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v4
|
||||||
with:
|
with:
|
||||||
fetch-depth: 2
|
fetch-depth: 2
|
||||||
|
|
||||||
|
- name: Setup Go
|
||||||
|
uses: actions/setup-go@v5
|
||||||
|
with:
|
||||||
|
go-version-file: 'go.mod'
|
||||||
|
|
||||||
- name: Initialize CodeQL
|
- name: Initialize CodeQL
|
||||||
uses: github/codeql-action/init@v2
|
uses: github/codeql-action/init@v3
|
||||||
|
|
||||||
- name: Autobuild
|
- name: Autobuild
|
||||||
uses: github/codeql-action/autobuild@v2
|
uses: github/codeql-action/autobuild@v3
|
||||||
|
|
||||||
- name: Perform CodeQL Analysis
|
- name: Perform CodeQL Analysis
|
||||||
uses: github/codeql-action/analyze@v2
|
uses: github/codeql-action/analyze@v3
|
||||||
|
|
|
@ -27,19 +27,19 @@ jobs:
|
||||||
id: get_version
|
id: get_version
|
||||||
run: echo "VERSION=${GITHUB_REF/refs\/tags\//}" >> $GITHUB_OUTPUT
|
run: echo "VERSION=${GITHUB_REF/refs\/tags\//}" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
- name: Check out code
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Set up Go
|
- name: Set up Go
|
||||||
uses: actions/setup-go@v3
|
uses: actions/setup-go@v5
|
||||||
with:
|
with:
|
||||||
go-version: 1
|
go-version: 1
|
||||||
|
check-latest: true
|
||||||
id: go
|
id: go
|
||||||
|
|
||||||
- name: Check out code into the Go module directory
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
|
|
||||||
- name: Test suite
|
- name: Test suite
|
||||||
run: |
|
run: |
|
||||||
go version
|
go version
|
||||||
go mod vendor
|
|
||||||
cd .ci
|
cd .ci
|
||||||
./ci-test.sh
|
./ci-test.sh
|
||||||
cd -
|
cd -
|
||||||
|
@ -49,6 +49,11 @@ jobs:
|
||||||
run: |
|
run: |
|
||||||
.ci/ci-build.sh "${{ steps.get_version.outputs.VERSION }}"
|
.ci/ci-build.sh "${{ steps.get_version.outputs.VERSION }}"
|
||||||
|
|
||||||
|
- name: Package
|
||||||
|
if: startsWith(github.ref, 'refs/tags/')
|
||||||
|
run: |
|
||||||
|
.ci/ci-package.sh "${{ steps.get_version.outputs.VERSION }}"
|
||||||
|
|
||||||
- name: Install minisign and sign
|
- name: Install minisign and sign
|
||||||
if: startsWith(github.ref, 'refs/tags/')
|
if: startsWith(github.ref, 'refs/tags/')
|
||||||
run: |
|
run: |
|
||||||
|
@ -78,7 +83,7 @@ jobs:
|
||||||
prerelease: false
|
prerelease: false
|
||||||
|
|
||||||
- name: Upload release assets
|
- name: Upload release assets
|
||||||
uses: softprops/action-gh-release@d4e8205d7e959a9107da6396278b2f1f07af0f9b
|
uses: softprops/action-gh-release@69320dbe05506a9a39fc8ae11030b214ec2d1f87
|
||||||
if: startsWith(github.ref, 'refs/tags/')
|
if: startsWith(github.ref, 'refs/tags/')
|
||||||
env:
|
env:
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
@ -87,3 +92,4 @@ jobs:
|
||||||
dnscrypt-proxy/*.zip
|
dnscrypt-proxy/*.zip
|
||||||
dnscrypt-proxy/*.tar.gz
|
dnscrypt-proxy/*.tar.gz
|
||||||
dnscrypt-proxy/*.minisig
|
dnscrypt-proxy/*.minisig
|
||||||
|
dnscrypt-proxy/*.msi
|
||||||
|
|
|
@ -6,7 +6,7 @@ jobs:
|
||||||
Scan-Build:
|
Scan-Build:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Perform ShiftLeft Scan
|
- name: Perform ShiftLeft Scan
|
||||||
uses: ShiftLeftSecurity/scan-action@master
|
uses: ShiftLeftSecurity/scan-action@master
|
||||||
|
@ -18,6 +18,6 @@ jobs:
|
||||||
output: reports
|
output: reports
|
||||||
|
|
||||||
- name: Upload report
|
- name: Upload report
|
||||||
uses: github/codeql-action/upload-sarif@v2
|
uses: github/codeql-action/upload-sarif@v3
|
||||||
with:
|
with:
|
||||||
sarif_file: reports
|
sarif_file: reports
|
||||||
|
|
|
@ -14,3 +14,6 @@ dnscrypt-proxy/dnscrypt-proxy
|
||||||
.ci/*.md
|
.ci/*.md
|
||||||
.ci/*.md.minisig
|
.ci/*.md.minisig
|
||||||
.ci/test-dnscrypt-proxy.toml
|
.ci/test-dnscrypt-proxy.toml
|
||||||
|
contrib/msi/*.msi
|
||||||
|
contrib/msi/*.wixpdb
|
||||||
|
contrib/msi/*.wixobj
|
||||||
|
|
|
@ -1,3 +1,11 @@
|
||||||
|
# Version 2.1.5
|
||||||
|
- dnscrypt-proxy can be compiled with Go 1.21.0+
|
||||||
|
- Responses to blocked queries now include extended error codes
|
||||||
|
- Reliability of connections using HTTP/3 has been improved
|
||||||
|
- New configuration directive: `tls_key_log_file`. When defined, this
|
||||||
|
is the path to a file where TLS secret keys will be written to, so
|
||||||
|
that DoH traffic can be locally inspected.
|
||||||
|
|
||||||
# Version 2.1.4
|
# Version 2.1.4
|
||||||
- Fixes a regression from version 2.1.3: when cloaking was enabled,
|
- Fixes a regression from version 2.1.3: when cloaking was enabled,
|
||||||
blocked responses were returned for records that were not A/AAAA/PTR
|
blocked responses were returned for records that were not A/AAAA/PTR
|
||||||
|
|
|
@ -0,0 +1,21 @@
|
||||||
|
FROM ubuntu:latest
|
||||||
|
MAINTAINER dnscrypt-authors
|
||||||
|
|
||||||
|
RUN apt-get update && \
|
||||||
|
apt-get install -y wget wine dotnet-sdk-6.0 && \
|
||||||
|
dpkg --add-architecture i386 && apt-get update && apt-get install -y wine32
|
||||||
|
|
||||||
|
|
||||||
|
ENV WINEPREFIX=/root/.wine32 WINEARCH=win32 WINEDEBUG=-all
|
||||||
|
|
||||||
|
RUN wget https://dl.winehq.org/wine/wine-mono/8.1.0/wine-mono-8.1.0-x86.msi && \
|
||||||
|
WINEPREFIX="$HOME/.wine32" WINEARCH=win32 wineboot --init && \
|
||||||
|
WINEPREFIX="$HOME/.wine32" WINEARCH=win32 wine msiexec /i wine-mono-8.1.0-x86.msi && \
|
||||||
|
mkdir $WINEPREFIX/drive_c/temp && \
|
||||||
|
apt-get install -y unzip && \
|
||||||
|
wget https://github.com/wixtoolset/wix3/releases/download/wix3112rtm/wix311-binaries.zip -nv -O wix.zip && \
|
||||||
|
unzip wix.zip -d /wix && \
|
||||||
|
rm -f wix.zip
|
||||||
|
|
||||||
|
WORKDIR /wix
|
||||||
|
|
|
@ -0,0 +1,13 @@
|
||||||
|
# Scripts and utilities related to building an .msi (Microsoft Standard Installer) file.
|
||||||
|
|
||||||
|
## Docker test image for building an MSI locally
|
||||||
|
|
||||||
|
```sh
|
||||||
|
docker build . -f Dockerfile -t ubuntu:dnscrypt-msi
|
||||||
|
```
|
||||||
|
|
||||||
|
## Test building msi files for intel win32 & win64
|
||||||
|
|
||||||
|
```sh
|
||||||
|
./build.sh
|
||||||
|
```
|
|
@ -0,0 +1,30 @@
|
||||||
|
#! /bin/sh
|
||||||
|
|
||||||
|
version=0.0.0
|
||||||
|
gitver=$(git describe --tags --always --match="[0-9]*.[0-9]*.[0-9]*" --exclude='*[^0-9.]*')
|
||||||
|
if [ "$gitver" != "" ]; then
|
||||||
|
version=$gitver
|
||||||
|
fi
|
||||||
|
|
||||||
|
# build the image by running: docker build . -f Dockerfile -t ubuntu:dnscrypt-msi
|
||||||
|
if [ "$(docker image list -q ubuntu:dnscrypt-msi)" = "" ]; then
|
||||||
|
docker build . -f Dockerfile -t ubuntu:dnscrypt-msi
|
||||||
|
fi
|
||||||
|
|
||||||
|
image=ubuntu:dnscrypt-msi
|
||||||
|
|
||||||
|
for arch in x64 x86; do
|
||||||
|
binpath="win32"
|
||||||
|
if [ "$arch" = "x64" ]; then
|
||||||
|
binpath="win64"
|
||||||
|
fi
|
||||||
|
src=$(
|
||||||
|
cd ../../dnscrypt-proxy/$binpath || exit
|
||||||
|
pwd
|
||||||
|
)
|
||||||
|
echo "$src"
|
||||||
|
|
||||||
|
docker run --rm -v "$(pwd)":/wixproj -v "$src":/src $image wine candle.exe -dVersion="$version" -dPlatform=$arch -dPath=\\src -arch $arch \\wixproj\\dnscrypt.wxs -out \\wixproj\\dnscrypt-$arch.wixobj
|
||||||
|
docker run --rm -v "$(pwd)":/wixproj -v "$src":/src $image wine light.exe -out \\wixproj\\dnscrypt-proxy-$arch-"$version".msi \\wixproj\\dnscrypt-$arch.wixobj -sval
|
||||||
|
|
||||||
|
done
|
|
@ -0,0 +1,60 @@
|
||||||
|
<?xml version="1.0"?>
|
||||||
|
|
||||||
|
<?if $(var.Platform)="x64" ?>
|
||||||
|
<?define Program_Files="ProgramFiles64Folder"?>
|
||||||
|
<?else ?>
|
||||||
|
<?define Program_Files="ProgramFilesFolder"?>
|
||||||
|
<?endif ?>
|
||||||
|
<?ifndef var.Version?>
|
||||||
|
<?error Undefined Version variable?>
|
||||||
|
<?endif ?>
|
||||||
|
<?ifndef var.Path?>
|
||||||
|
<?error Undefined Path variable?>
|
||||||
|
<?endif ?>
|
||||||
|
|
||||||
|
<Wix xmlns="http://schemas.microsoft.com/wix/2006/wi">
|
||||||
|
<Product Id="*"
|
||||||
|
UpgradeCode="fbf99dd8-c21e-4f9b-a632-de53bb64c45e"
|
||||||
|
Name="dnscrypt-proxy"
|
||||||
|
Version="$(var.Version)"
|
||||||
|
Manufacturer="DNSCrypt"
|
||||||
|
Language="1033">
|
||||||
|
|
||||||
|
<Package InstallerVersion="200" Compressed="yes" Comments="Windows Installer Package" InstallScope="perMachine" />
|
||||||
|
<Media Id="1" Cabinet="product.cab" EmbedCab="yes" />
|
||||||
|
<MajorUpgrade DowngradeErrorMessage="A later version of [ProductName] is already installed. Setup will now exit." />
|
||||||
|
|
||||||
|
<Upgrade Id="fbf99dd8-c21e-4f9b-a632-de53bb64c45e">
|
||||||
|
<UpgradeVersion Minimum="$(var.Version)" OnlyDetect="yes" Property="NEWERVERSIONDETECTED" />
|
||||||
|
<UpgradeVersion Minimum="2.1.0" Maximum="$(var.Version)" IncludeMinimum="yes" IncludeMaximum="no" Property="OLDERVERSIONBEINGUPGRADED" />
|
||||||
|
</Upgrade>
|
||||||
|
<Condition Message="A newer version of this software is already installed.">NOT NEWERVERSIONDETECTED</Condition>
|
||||||
|
|
||||||
|
<Directory Id="TARGETDIR" Name="SourceDir">
|
||||||
|
<Directory Id="$(var.Program_Files)">
|
||||||
|
<Directory Id="INSTALLDIR" Name="DNSCrypt">
|
||||||
|
<Component Id="ApplicationFiles" Guid="7d693c0b-71d8-436a-9c84-60a11dc74092">
|
||||||
|
<File Id="dnscryptproxy.exe" KeyPath="yes" Source="$(var.Path)\dnscrypt-proxy.exe" DiskId="1"/>
|
||||||
|
<File Source="$(var.Path)\LICENSE"></File>
|
||||||
|
<File Source="$(var.Path)\service-install.bat"></File>
|
||||||
|
<File Source="$(var.Path)\service-restart.bat"></File>
|
||||||
|
<File Source="$(var.Path)\service-uninstall.bat"></File>
|
||||||
|
<File Source="$(var.Path)\example-dnscrypt-proxy.toml"></File>
|
||||||
|
</Component>
|
||||||
|
<Component Id="ConfigInstall" Guid="db7b691e-f7c7-4c9a-92e1-c6f21ce6430f" KeyPath="yes">
|
||||||
|
<Condition><![CDATA[CONFIGFILE]]></Condition>
|
||||||
|
<CopyFile Id="dnscryptproxytoml" DestinationDirectory="INSTALLDIR" DestinationName="dnscrypt-proxy.toml" SourceProperty="CONFIGFILE">
|
||||||
|
</CopyFile>
|
||||||
|
<RemoveFile Id="RemoveConfig" Directory="INSTALLDIR" Name="dnscrypt-proxy.toml" On="uninstall" />
|
||||||
|
</Component>
|
||||||
|
</Directory>
|
||||||
|
</Directory>
|
||||||
|
</Directory>
|
||||||
|
|
||||||
|
<Feature Id="Complete" Level="1">
|
||||||
|
<ComponentRef Id="ApplicationFiles" />
|
||||||
|
<ComponentRef Id="ConfigInstall" />
|
||||||
|
</Feature>
|
||||||
|
|
||||||
|
</Product>
|
||||||
|
</Wix>
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/jedisct1/dlog"
|
"github.com/jedisct1/dlog"
|
||||||
|
@ -15,17 +16,13 @@ type CaptivePortalEntryIPs []net.IP
|
||||||
type CaptivePortalMap map[string]CaptivePortalEntryIPs
|
type CaptivePortalMap map[string]CaptivePortalEntryIPs
|
||||||
|
|
||||||
type CaptivePortalHandler struct {
|
type CaptivePortalHandler struct {
|
||||||
|
wg sync.WaitGroup
|
||||||
cancelChannel chan struct{}
|
cancelChannel chan struct{}
|
||||||
countChannel chan struct{}
|
|
||||||
channelCount int
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (captivePortalHandler *CaptivePortalHandler) Stop() {
|
func (captivePortalHandler *CaptivePortalHandler) Stop() {
|
||||||
close(captivePortalHandler.cancelChannel)
|
close(captivePortalHandler.cancelChannel)
|
||||||
for len(captivePortalHandler.countChannel) < captivePortalHandler.channelCount {
|
captivePortalHandler.wg.Wait()
|
||||||
time.Sleep(time.Millisecond)
|
|
||||||
}
|
|
||||||
close(captivePortalHandler.countChannel)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ipsMap *CaptivePortalMap) GetEntry(msg *dns.Msg) (*dns.Question, *CaptivePortalEntryIPs) {
|
func (ipsMap *CaptivePortalMap) GetEntry(msg *dns.Msg) (*dns.Question, *CaptivePortalEntryIPs) {
|
||||||
|
@ -120,24 +117,29 @@ func handleColdStartClient(clientPc *net.UDPConn, cancelChannel chan struct{}, i
|
||||||
}
|
}
|
||||||
|
|
||||||
func addColdStartListener(
|
func addColdStartListener(
|
||||||
proxy *Proxy,
|
|
||||||
ipsMap *CaptivePortalMap,
|
ipsMap *CaptivePortalMap,
|
||||||
listenAddrStr string,
|
listenAddrStr string,
|
||||||
captivePortalHandler *CaptivePortalHandler,
|
captivePortalHandler *CaptivePortalHandler,
|
||||||
) error {
|
) error {
|
||||||
listenUDPAddr, err := net.ResolveUDPAddr("udp", listenAddrStr)
|
network := "udp"
|
||||||
|
isIPv4 := isDigit(listenAddrStr[0])
|
||||||
|
if isIPv4 {
|
||||||
|
network = "udp4"
|
||||||
|
}
|
||||||
|
listenUDPAddr, err := net.ResolveUDPAddr(network, listenAddrStr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
clientPc, err := net.ListenUDP("udp", listenUDPAddr)
|
clientPc, err := net.ListenUDP(network, listenUDPAddr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
captivePortalHandler.wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
for !handleColdStartClient(clientPc, captivePortalHandler.cancelChannel, ipsMap) {
|
for !handleColdStartClient(clientPc, captivePortalHandler.cancelChannel, ipsMap) {
|
||||||
}
|
}
|
||||||
clientPc.Close()
|
clientPc.Close()
|
||||||
captivePortalHandler.countChannel <- struct{}{}
|
captivePortalHandler.wg.Done()
|
||||||
}()
|
}()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -146,13 +148,13 @@ func ColdStart(proxy *Proxy) (*CaptivePortalHandler, error) {
|
||||||
if len(proxy.captivePortalMapFile) == 0 {
|
if len(proxy.captivePortalMapFile) == 0 {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
bin, err := ReadTextFile(proxy.captivePortalMapFile)
|
lines, err := ReadTextFile(proxy.captivePortalMapFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
dlog.Warn(err)
|
dlog.Warn(err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
ipsMap := make(CaptivePortalMap)
|
ipsMap := make(CaptivePortalMap)
|
||||||
for lineNo, line := range strings.Split(string(bin), "\n") {
|
for lineNo, line := range strings.Split(lines, "\n") {
|
||||||
line = TrimAndStripInlineComments(line)
|
line = TrimAndStripInlineComments(line)
|
||||||
if len(line) == 0 {
|
if len(line) == 0 {
|
||||||
continue
|
continue
|
||||||
|
@ -185,14 +187,17 @@ func ColdStart(proxy *Proxy) (*CaptivePortalHandler, error) {
|
||||||
listenAddrStrs := proxy.listenAddresses
|
listenAddrStrs := proxy.listenAddresses
|
||||||
captivePortalHandler := CaptivePortalHandler{
|
captivePortalHandler := CaptivePortalHandler{
|
||||||
cancelChannel: make(chan struct{}),
|
cancelChannel: make(chan struct{}),
|
||||||
countChannel: make(chan struct{}, len(listenAddrStrs)),
|
|
||||||
channelCount: 0,
|
|
||||||
}
|
}
|
||||||
|
ok := false
|
||||||
for _, listenAddrStr := range listenAddrStrs {
|
for _, listenAddrStr := range listenAddrStrs {
|
||||||
if err := addColdStartListener(proxy, &ipsMap, listenAddrStr, &captivePortalHandler); err == nil {
|
err = addColdStartListener(&ipsMap, listenAddrStr, &captivePortalHandler)
|
||||||
captivePortalHandler.channelCount++
|
if err == nil {
|
||||||
|
ok = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if ok {
|
||||||
|
err = nil
|
||||||
|
}
|
||||||
proxy.captivePortalMap = &ipsMap
|
proxy.captivePortalMap = &ipsMap
|
||||||
return &captivePortalHandler, nil
|
return &captivePortalHandler, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,9 +6,12 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
"net"
|
"net"
|
||||||
"os"
|
"os"
|
||||||
|
"path"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"unicode"
|
"unicode"
|
||||||
|
|
||||||
|
"github.com/jedisct1/dlog"
|
||||||
)
|
)
|
||||||
|
|
||||||
type CryptoConstruction uint16
|
type CryptoConstruction uint16
|
||||||
|
@ -46,11 +49,6 @@ const (
|
||||||
InheritedDescriptorsBase = uintptr(50)
|
InheritedDescriptorsBase = uintptr(50)
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
|
||||||
IPv4Arpa = "in-addr.arpa"
|
|
||||||
IPv6Arpa = "ip6.arpa"
|
|
||||||
)
|
|
||||||
|
|
||||||
func PrefixWithSize(packet []byte) ([]byte, error) {
|
func PrefixWithSize(packet []byte) ([]byte, error) {
|
||||||
packetLen := len(packet)
|
packetLen := len(packet)
|
||||||
if packetLen > 0xffff {
|
if packetLen > 0xffff {
|
||||||
|
@ -167,3 +165,33 @@ func ReadTextFile(filename string) (string, error) {
|
||||||
bin = bytes.TrimPrefix(bin, []byte{0xef, 0xbb, 0xbf})
|
bin = bytes.TrimPrefix(bin, []byte{0xef, 0xbb, 0xbf})
|
||||||
return string(bin), nil
|
return string(bin), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func isDigit(b byte) bool { return b >= '0' && b <= '9' }
|
||||||
|
|
||||||
|
func maybeWritableByOtherUsers(p string) (bool, string, error) {
|
||||||
|
p = path.Clean(p)
|
||||||
|
for p != "/" && p != "." {
|
||||||
|
st, err := os.Stat(p)
|
||||||
|
if err != nil {
|
||||||
|
return false, p, err
|
||||||
|
}
|
||||||
|
mode := st.Mode()
|
||||||
|
if mode.Perm()&2 != 0 && !(st.IsDir() && mode&os.ModeSticky == os.ModeSticky) {
|
||||||
|
return true, p, nil
|
||||||
|
}
|
||||||
|
p = path.Dir(p)
|
||||||
|
}
|
||||||
|
return false, "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func WarnIfMaybeWritableByOtherUsers(p string) {
|
||||||
|
if ok, px, err := maybeWritableByOtherUsers(p); ok {
|
||||||
|
if px == p {
|
||||||
|
dlog.Criticalf("[%s] is writable by other system users - If this is not intentional, it is recommended to fix the access permissions", p)
|
||||||
|
} else {
|
||||||
|
dlog.Warnf("[%s] can be modified by other system users because [%s] is writable by other users - If this is not intentional, it is recommended to fix the access permissions", p, px)
|
||||||
|
}
|
||||||
|
} else if err != nil {
|
||||||
|
dlog.Warnf("Error while checking if [%s] is accessible: [%s] : [%s]", p, px, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -42,6 +42,7 @@ type Config struct {
|
||||||
Timeout int `toml:"timeout"`
|
Timeout int `toml:"timeout"`
|
||||||
KeepAlive int `toml:"keepalive"`
|
KeepAlive int `toml:"keepalive"`
|
||||||
Proxy string `toml:"proxy"`
|
Proxy string `toml:"proxy"`
|
||||||
|
CertRefreshConcurrency int `toml:"cert_refresh_concurrency"`
|
||||||
CertRefreshDelay int `toml:"cert_refresh_delay"`
|
CertRefreshDelay int `toml:"cert_refresh_delay"`
|
||||||
CertIgnoreTimestamp bool `toml:"cert_ignore_timestamp"`
|
CertIgnoreTimestamp bool `toml:"cert_ignore_timestamp"`
|
||||||
EphemeralKeys bool `toml:"dnscrypt_ephemeral_keys"`
|
EphemeralKeys bool `toml:"dnscrypt_ephemeral_keys"`
|
||||||
|
@ -92,6 +93,7 @@ type Config struct {
|
||||||
LogMaxBackups int `toml:"log_files_max_backups"`
|
LogMaxBackups int `toml:"log_files_max_backups"`
|
||||||
TLSDisableSessionTickets bool `toml:"tls_disable_session_tickets"`
|
TLSDisableSessionTickets bool `toml:"tls_disable_session_tickets"`
|
||||||
TLSCipherSuite []uint16 `toml:"tls_cipher_suite"`
|
TLSCipherSuite []uint16 `toml:"tls_cipher_suite"`
|
||||||
|
TLSKeyLogFile string `toml:"tls_key_log_file"`
|
||||||
NetprobeAddress string `toml:"netprobe_address"`
|
NetprobeAddress string `toml:"netprobe_address"`
|
||||||
NetprobeTimeout int `toml:"netprobe_timeout"`
|
NetprobeTimeout int `toml:"netprobe_timeout"`
|
||||||
OfflineMode bool `toml:"offline_mode"`
|
OfflineMode bool `toml:"offline_mode"`
|
||||||
|
@ -115,6 +117,7 @@ func newConfig() Config {
|
||||||
LocalDoH: LocalDoHConfig{Path: "/dns-query"},
|
LocalDoH: LocalDoHConfig{Path: "/dns-query"},
|
||||||
Timeout: 5000,
|
Timeout: 5000,
|
||||||
KeepAlive: 5,
|
KeepAlive: 5,
|
||||||
|
CertRefreshConcurrency: 10,
|
||||||
CertRefreshDelay: 240,
|
CertRefreshDelay: 240,
|
||||||
HTTP3: false,
|
HTTP3: false,
|
||||||
CertIgnoreTimestamp: false,
|
CertIgnoreTimestamp: false,
|
||||||
|
@ -143,6 +146,7 @@ func newConfig() Config {
|
||||||
LogMaxBackups: 1,
|
LogMaxBackups: 1,
|
||||||
TLSDisableSessionTickets: false,
|
TLSDisableSessionTickets: false,
|
||||||
TLSCipherSuite: nil,
|
TLSCipherSuite: nil,
|
||||||
|
TLSKeyLogFile: "",
|
||||||
NetprobeTimeout: 60,
|
NetprobeTimeout: 60,
|
||||||
OfflineMode: false,
|
OfflineMode: false,
|
||||||
RefusedCodeInResponses: false,
|
RefusedCodeInResponses: false,
|
||||||
|
@ -257,7 +261,7 @@ type ServerSummary struct {
|
||||||
IPv6 bool `json:"ipv6"`
|
IPv6 bool `json:"ipv6"`
|
||||||
Addrs []string `json:"addrs,omitempty"`
|
Addrs []string `json:"addrs,omitempty"`
|
||||||
Ports []int `json:"ports"`
|
Ports []int `json:"ports"`
|
||||||
DNSSEC bool `json:"dnssec"`
|
DNSSEC *bool `json:"dnssec,omitempty"`
|
||||||
NoLog bool `json:"nolog"`
|
NoLog bool `json:"nolog"`
|
||||||
NoFilter bool `json:"nofilter"`
|
NoFilter bool `json:"nofilter"`
|
||||||
Description string `json:"description,omitempty"`
|
Description string `json:"description,omitempty"`
|
||||||
|
@ -288,6 +292,7 @@ type ConfigFlags struct {
|
||||||
Resolve *string
|
Resolve *string
|
||||||
List *bool
|
List *bool
|
||||||
ListAll *bool
|
ListAll *bool
|
||||||
|
IncludeRelays *bool
|
||||||
JSONOutput *bool
|
JSONOutput *bool
|
||||||
Check *bool
|
Check *bool
|
||||||
ConfigFile *string
|
ConfigFile *string
|
||||||
|
@ -321,6 +326,7 @@ func ConfigLoad(proxy *Proxy, flags *ConfigFlags) error {
|
||||||
*flags.ConfigFile,
|
*flags.ConfigFile,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
WarnIfMaybeWritableByOtherUsers(foundConfigFile)
|
||||||
config := newConfig()
|
config := newConfig()
|
||||||
md, err := toml.DecodeFile(foundConfigFile, &config)
|
md, err := toml.DecodeFile(foundConfigFile, &config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -434,6 +440,7 @@ func ConfigLoad(proxy *Proxy, flags *ConfigFlags) error {
|
||||||
if config.ForceTCP {
|
if config.ForceTCP {
|
||||||
proxy.mainProto = "tcp"
|
proxy.mainProto = "tcp"
|
||||||
}
|
}
|
||||||
|
proxy.certRefreshConcurrency = Max(1, config.CertRefreshConcurrency)
|
||||||
proxy.certRefreshDelay = time.Duration(Max(60, config.CertRefreshDelay)) * time.Minute
|
proxy.certRefreshDelay = time.Duration(Max(60, config.CertRefreshDelay)) * time.Minute
|
||||||
proxy.certRefreshDelayAfterFailure = time.Duration(10 * time.Second)
|
proxy.certRefreshDelayAfterFailure = time.Duration(10 * time.Second)
|
||||||
proxy.certIgnoreTimestamp = config.CertIgnoreTimestamp
|
proxy.certIgnoreTimestamp = config.CertIgnoreTimestamp
|
||||||
|
@ -628,6 +635,16 @@ func ConfigLoad(proxy *Proxy, flags *ConfigFlags) error {
|
||||||
proxy.skipAnonIncompatibleResolvers = config.AnonymizedDNS.SkipIncompatible
|
proxy.skipAnonIncompatibleResolvers = config.AnonymizedDNS.SkipIncompatible
|
||||||
proxy.anonDirectCertFallback = config.AnonymizedDNS.DirectCertFallback
|
proxy.anonDirectCertFallback = config.AnonymizedDNS.DirectCertFallback
|
||||||
|
|
||||||
|
if len(config.TLSKeyLogFile) > 0 {
|
||||||
|
f, err := os.OpenFile(config.TLSKeyLogFile, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0o600)
|
||||||
|
if err != nil {
|
||||||
|
dlog.Fatalf("Unable to create key log file [%s]: [%s]", config.TLSKeyLogFile, err)
|
||||||
|
}
|
||||||
|
dlog.Warnf("TLS key log file [%s] enabled", config.TLSKeyLogFile)
|
||||||
|
proxy.xTransport.keyLogWriter = f
|
||||||
|
proxy.xTransport.rebuildTransport()
|
||||||
|
}
|
||||||
|
|
||||||
if config.DoHClientX509AuthLegacy.Creds != nil {
|
if config.DoHClientX509AuthLegacy.Creds != nil {
|
||||||
return errors.New("[tls_client_auth] has been renamed to [doh_client_x509_auth] - Update your config file")
|
return errors.New("[tls_client_auth] has been renamed to [doh_client_x509_auth] - Update your config file")
|
||||||
}
|
}
|
||||||
|
@ -730,7 +747,7 @@ func ConfigLoad(proxy *Proxy, flags *ConfigFlags) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if *flags.List || *flags.ListAll {
|
if *flags.List || *flags.ListAll {
|
||||||
if err := config.printRegisteredServers(proxy, *flags.JSONOutput); err != nil {
|
if err := config.printRegisteredServers(proxy, *flags.JSONOutput, *flags.IncludeRelays); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
os.Exit(0)
|
os.Exit(0)
|
||||||
|
@ -766,8 +783,47 @@ func ConfigLoad(proxy *Proxy, flags *ConfigFlags) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (config *Config) printRegisteredServers(proxy *Proxy, jsonOutput bool) error {
|
func (config *Config) printRegisteredServers(proxy *Proxy, jsonOutput bool, includeRelays bool) error {
|
||||||
var summary []ServerSummary
|
var summary []ServerSummary
|
||||||
|
if includeRelays {
|
||||||
|
for _, registeredRelay := range proxy.registeredRelays {
|
||||||
|
addrStr, port := registeredRelay.stamp.ServerAddrStr, stamps.DefaultPort
|
||||||
|
var hostAddr string
|
||||||
|
hostAddr, port = ExtractHostAndPort(addrStr, port)
|
||||||
|
addrs := make([]string, 0)
|
||||||
|
if (registeredRelay.stamp.Proto == stamps.StampProtoTypeDoH || registeredRelay.stamp.Proto == stamps.StampProtoTypeODoHTarget) &&
|
||||||
|
len(registeredRelay.stamp.ProviderName) > 0 {
|
||||||
|
providerName := registeredRelay.stamp.ProviderName
|
||||||
|
var host string
|
||||||
|
host, port = ExtractHostAndPort(providerName, port)
|
||||||
|
addrs = append(addrs, host)
|
||||||
|
}
|
||||||
|
if len(addrStr) > 0 {
|
||||||
|
addrs = append(addrs, hostAddr)
|
||||||
|
}
|
||||||
|
nolog := true
|
||||||
|
nofilter := true
|
||||||
|
if registeredRelay.stamp.Proto == stamps.StampProtoTypeODoHRelay {
|
||||||
|
nolog = registeredRelay.stamp.Props&stamps.ServerInformalPropertyNoLog != 0
|
||||||
|
}
|
||||||
|
serverSummary := ServerSummary{
|
||||||
|
Name: registeredRelay.name,
|
||||||
|
Proto: registeredRelay.stamp.Proto.String(),
|
||||||
|
IPv6: strings.HasPrefix(addrStr, "["),
|
||||||
|
Ports: []int{port},
|
||||||
|
Addrs: addrs,
|
||||||
|
NoLog: nolog,
|
||||||
|
NoFilter: nofilter,
|
||||||
|
Description: registeredRelay.description,
|
||||||
|
Stamp: registeredRelay.stamp.String(),
|
||||||
|
}
|
||||||
|
if jsonOutput {
|
||||||
|
summary = append(summary, serverSummary)
|
||||||
|
} else {
|
||||||
|
fmt.Println(serverSummary.Name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
for _, registeredServer := range proxy.registeredServers {
|
for _, registeredServer := range proxy.registeredServers {
|
||||||
addrStr, port := registeredServer.stamp.ServerAddrStr, stamps.DefaultPort
|
addrStr, port := registeredServer.stamp.ServerAddrStr, stamps.DefaultPort
|
||||||
var hostAddr string
|
var hostAddr string
|
||||||
|
@ -783,13 +839,14 @@ func (config *Config) printRegisteredServers(proxy *Proxy, jsonOutput bool) erro
|
||||||
if len(addrStr) > 0 {
|
if len(addrStr) > 0 {
|
||||||
addrs = append(addrs, hostAddr)
|
addrs = append(addrs, hostAddr)
|
||||||
}
|
}
|
||||||
|
dnssec := registeredServer.stamp.Props&stamps.ServerInformalPropertyDNSSEC != 0
|
||||||
serverSummary := ServerSummary{
|
serverSummary := ServerSummary{
|
||||||
Name: registeredServer.name,
|
Name: registeredServer.name,
|
||||||
Proto: registeredServer.stamp.Proto.String(),
|
Proto: registeredServer.stamp.Proto.String(),
|
||||||
IPv6: strings.HasPrefix(addrStr, "["),
|
IPv6: strings.HasPrefix(addrStr, "["),
|
||||||
Ports: []int{port},
|
Ports: []int{port},
|
||||||
Addrs: addrs,
|
Addrs: addrs,
|
||||||
DNSSEC: registeredServer.stamp.Props&stamps.ServerInformalPropertyDNSSEC != 0,
|
DNSSEC: &dnssec,
|
||||||
NoLog: registeredServer.stamp.Props&stamps.ServerInformalPropertyNoLog != 0,
|
NoLog: registeredServer.stamp.Props&stamps.ServerInformalPropertyNoLog != 0,
|
||||||
NoFilter: registeredServer.stamp.Props&stamps.ServerInformalPropertyNoFilter != 0,
|
NoFilter: registeredServer.stamp.Props&stamps.ServerInformalPropertyNoFilter != 0,
|
||||||
Description: registeredServer.description,
|
Description: registeredServer.description,
|
||||||
|
@ -849,7 +906,9 @@ func (config *Config) loadSources(proxy *Proxy) error {
|
||||||
}
|
}
|
||||||
proxy.registeredServers = append(proxy.registeredServers, RegisteredServer{name: serverName, stamp: stamp})
|
proxy.registeredServers = append(proxy.registeredServers, RegisteredServer{name: serverName, stamp: stamp})
|
||||||
}
|
}
|
||||||
proxy.updateRegisteredServers()
|
if err := proxy.updateRegisteredServers(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
rs1 := proxy.registeredServers
|
rs1 := proxy.registeredServers
|
||||||
rs2 := proxy.serversInfo.registeredServers
|
rs2 := proxy.serversInfo.registeredServers
|
||||||
rand.Shuffle(len(rs1), func(i, j int) {
|
rand.Shuffle(len(rs1), func(i, j int) {
|
||||||
|
@ -880,9 +939,8 @@ func (config *Config) loadSource(proxy *Proxy, cfgSourceName string, cfgSource *
|
||||||
}
|
}
|
||||||
if cfgSource.RefreshDelay <= 0 {
|
if cfgSource.RefreshDelay <= 0 {
|
||||||
cfgSource.RefreshDelay = 72
|
cfgSource.RefreshDelay = 72
|
||||||
} else if cfgSource.RefreshDelay > 168 {
|
|
||||||
cfgSource.RefreshDelay = 168
|
|
||||||
}
|
}
|
||||||
|
cfgSource.RefreshDelay = Min(168, Max(24, cfgSource.RefreshDelay))
|
||||||
source, err := NewSource(
|
source, err := NewSource(
|
||||||
cfgSourceName,
|
cfgSourceName,
|
||||||
proxy.xTransport,
|
proxy.xTransport,
|
||||||
|
@ -894,7 +952,7 @@ func (config *Config) loadSource(proxy *Proxy, cfgSourceName string, cfgSource *
|
||||||
cfgSource.Prefix,
|
cfgSource.Prefix,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if len(source.in) <= 0 {
|
if len(source.bin) <= 0 {
|
||||||
dlog.Criticalf("Unable to retrieve source [%s]: [%s]", cfgSourceName, err)
|
dlog.Criticalf("Unable to retrieve source [%s]: [%s]", cfgSourceName, err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,7 +5,6 @@ import (
|
||||||
crypto_rand "crypto/rand"
|
crypto_rand "crypto/rand"
|
||||||
"crypto/sha512"
|
"crypto/sha512"
|
||||||
"errors"
|
"errors"
|
||||||
"math/rand"
|
|
||||||
|
|
||||||
"github.com/jedisct1/dlog"
|
"github.com/jedisct1/dlog"
|
||||||
"github.com/jedisct1/xsecretbox"
|
"github.com/jedisct1/xsecretbox"
|
||||||
|
@ -79,7 +78,9 @@ func (proxy *Proxy) Encrypt(
|
||||||
proto string,
|
proto string,
|
||||||
) (sharedKey *[32]byte, encrypted []byte, clientNonce []byte, err error) {
|
) (sharedKey *[32]byte, encrypted []byte, clientNonce []byte, err error) {
|
||||||
nonce, clientNonce := make([]byte, NonceSize), make([]byte, HalfNonceSize)
|
nonce, clientNonce := make([]byte, NonceSize), make([]byte, HalfNonceSize)
|
||||||
crypto_rand.Read(clientNonce)
|
if _, err := crypto_rand.Read(clientNonce); err != nil {
|
||||||
|
return nil, nil, nil, err
|
||||||
|
}
|
||||||
copy(nonce, clientNonce)
|
copy(nonce, clientNonce)
|
||||||
var publicKey *[PublicKeySize]byte
|
var publicKey *[PublicKeySize]byte
|
||||||
if proxy.ephemeralKeys {
|
if proxy.ephemeralKeys {
|
||||||
|
@ -102,7 +103,9 @@ func (proxy *Proxy) Encrypt(
|
||||||
minQuestionSize = Max(proxy.questionSizeEstimator.MinQuestionSize(), minQuestionSize)
|
minQuestionSize = Max(proxy.questionSizeEstimator.MinQuestionSize(), minQuestionSize)
|
||||||
} else {
|
} else {
|
||||||
var xpad [1]byte
|
var xpad [1]byte
|
||||||
rand.Read(xpad[:])
|
if _, err := crypto_rand.Read(xpad[:]); err != nil {
|
||||||
|
return nil, nil, nil, err
|
||||||
|
}
|
||||||
minQuestionSize += int(xpad[0])
|
minQuestionSize += int(xpad[0])
|
||||||
}
|
}
|
||||||
paddedLength := Min(MaxDNSUDPPacketSize, (Max(minQuestionSize, QueryOverhead)+1+63) & ^63)
|
paddedLength := Min(MaxDNSUDPPacketSize, (Max(minQuestionSize, QueryOverhead)+1+63) & ^63)
|
||||||
|
|
|
@ -35,7 +35,7 @@ func FetchCurrentDNSCryptCert(
|
||||||
return CertInfo{}, 0, false, errors.New("Invalid public key length")
|
return CertInfo{}, 0, false, errors.New("Invalid public key length")
|
||||||
}
|
}
|
||||||
if !strings.HasSuffix(providerName, ".") {
|
if !strings.HasSuffix(providerName, ".") {
|
||||||
providerName = providerName + "."
|
providerName += "."
|
||||||
}
|
}
|
||||||
if serverName == nil {
|
if serverName == nil {
|
||||||
serverName = &providerName
|
serverName = &providerName
|
||||||
|
@ -183,7 +183,7 @@ func FetchCurrentDNSCryptCert(
|
||||||
certCountStr = " - additional certificate"
|
certCountStr = " - additional certificate"
|
||||||
}
|
}
|
||||||
if certInfo.CryptoConstruction == UndefinedConstruction {
|
if certInfo.CryptoConstruction == UndefinedConstruction {
|
||||||
return certInfo, 0, fragmentsBlocked, errors.New("No useable certificate found")
|
return certInfo, 0, fragmentsBlocked, errors.New("No usable certificate found")
|
||||||
}
|
}
|
||||||
return certInfo, int(rtt.Nanoseconds() / 1000000), fragmentsBlocked, nil
|
return certInfo, int(rtt.Nanoseconds() / 1000000), fragmentsBlocked, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -40,6 +40,11 @@ func TruncatedResponse(packet []byte) ([]byte, error) {
|
||||||
|
|
||||||
func RefusedResponseFromMessage(srcMsg *dns.Msg, refusedCode bool, ipv4 net.IP, ipv6 net.IP, ttl uint32) *dns.Msg {
|
func RefusedResponseFromMessage(srcMsg *dns.Msg, refusedCode bool, ipv4 net.IP, ipv6 net.IP, ttl uint32) *dns.Msg {
|
||||||
dstMsg := EmptyResponseFromMessage(srcMsg)
|
dstMsg := EmptyResponseFromMessage(srcMsg)
|
||||||
|
ede := new(dns.EDNS0_EDE)
|
||||||
|
if edns0 := dstMsg.IsEdns0(); edns0 != nil {
|
||||||
|
edns0.Option = append(edns0.Option, ede)
|
||||||
|
}
|
||||||
|
ede.InfoCode = dns.ExtendedErrorCodeFiltered
|
||||||
if refusedCode {
|
if refusedCode {
|
||||||
dstMsg.Rcode = dns.RcodeRefused
|
dstMsg.Rcode = dns.RcodeRefused
|
||||||
} else {
|
} else {
|
||||||
|
@ -58,6 +63,7 @@ func RefusedResponseFromMessage(srcMsg *dns.Msg, refusedCode bool, ipv4 net.IP,
|
||||||
if rr.A != nil {
|
if rr.A != nil {
|
||||||
dstMsg.Answer = []dns.RR{rr}
|
dstMsg.Answer = []dns.RR{rr}
|
||||||
sendHInfoResponse = false
|
sendHInfoResponse = false
|
||||||
|
ede.InfoCode = dns.ExtendedErrorCodeForgedAnswer
|
||||||
}
|
}
|
||||||
} else if ipv6 != nil && question.Qtype == dns.TypeAAAA {
|
} else if ipv6 != nil && question.Qtype == dns.TypeAAAA {
|
||||||
rr := new(dns.AAAA)
|
rr := new(dns.AAAA)
|
||||||
|
@ -66,18 +72,24 @@ func RefusedResponseFromMessage(srcMsg *dns.Msg, refusedCode bool, ipv4 net.IP,
|
||||||
if rr.AAAA != nil {
|
if rr.AAAA != nil {
|
||||||
dstMsg.Answer = []dns.RR{rr}
|
dstMsg.Answer = []dns.RR{rr}
|
||||||
sendHInfoResponse = false
|
sendHInfoResponse = false
|
||||||
|
ede.InfoCode = dns.ExtendedErrorCodeForgedAnswer
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if sendHInfoResponse {
|
if sendHInfoResponse {
|
||||||
hinfo := new(dns.HINFO)
|
hinfo := new(dns.HINFO)
|
||||||
hinfo.Hdr = dns.RR_Header{Name: question.Name, Rrtype: dns.TypeHINFO,
|
hinfo.Hdr = dns.RR_Header{
|
||||||
Class: dns.ClassINET, Ttl: ttl}
|
Name: question.Name, Rrtype: dns.TypeHINFO,
|
||||||
|
Class: dns.ClassINET, Ttl: ttl,
|
||||||
|
}
|
||||||
hinfo.Cpu = "This query has been locally blocked"
|
hinfo.Cpu = "This query has been locally blocked"
|
||||||
hinfo.Os = "by dnscrypt-proxy"
|
hinfo.Os = "by dnscrypt-proxy"
|
||||||
dstMsg.Answer = []dns.RR{hinfo}
|
dstMsg.Answer = []dns.RR{hinfo}
|
||||||
|
} else {
|
||||||
|
ede.ExtraText = "This query has been locally blocked by dnscrypt-proxy"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return dstMsg
|
return dstMsg
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -262,8 +274,6 @@ func removeEDNS0Options(msg *dns.Msg) bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func isDigit(b byte) bool { return b >= '0' && b <= '9' }
|
|
||||||
|
|
||||||
func dddToByte(s []byte) byte {
|
func dddToByte(s []byte) byte {
|
||||||
return byte((s[0]-'0')*100 + (s[1]-'0')*10 + (s[2] - '0'))
|
return byte((s[0]-'0')*100 + (s[1]-'0')*10 + (s[2] - '0'))
|
||||||
}
|
}
|
||||||
|
|
|
@ -7,7 +7,7 @@
|
||||||
## going through a captive portal.
|
## going through a captive portal.
|
||||||
##
|
##
|
||||||
## This is a list of hard-coded IP addresses that will be returned when queries
|
## This is a list of hard-coded IP addresses that will be returned when queries
|
||||||
## for these names are received, even before the operating system an interface
|
## for these names are received, even before the operating system reports an interface
|
||||||
## as usable for reaching the Internet.
|
## as usable for reaching the Internet.
|
||||||
##
|
##
|
||||||
## Note that IPv6 addresses don't need to be specified within brackets,
|
## Note that IPv6 addresses don't need to be specified within brackets,
|
||||||
|
|
|
@ -183,6 +183,12 @@ keepalive = 30
|
||||||
# use_syslog = true
|
# use_syslog = true
|
||||||
|
|
||||||
|
|
||||||
|
## The maximum concurrency to reload certificates from the resolvers.
|
||||||
|
## Default is 10.
|
||||||
|
|
||||||
|
# cert_refresh_concurrency = 10
|
||||||
|
|
||||||
|
|
||||||
## Delay, in minutes, after which certificates are reloaded
|
## Delay, in minutes, after which certificates are reloaded
|
||||||
|
|
||||||
cert_refresh_delay = 240
|
cert_refresh_delay = 240
|
||||||
|
@ -207,24 +213,30 @@ cert_refresh_delay = 240
|
||||||
# tls_disable_session_tickets = false
|
# tls_disable_session_tickets = false
|
||||||
|
|
||||||
|
|
||||||
## DoH: Use a specific cipher suite instead of the server preference
|
## DoH: Use TLS 1.2 and specific cipher suite instead of the server preference
|
||||||
## 49199 = TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
|
## 49199 = TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
|
||||||
## 49195 = TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256
|
## 49195 = TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256
|
||||||
## 52392 = TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305
|
## 52392 = TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305
|
||||||
## 52393 = TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305
|
## 52393 = TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305
|
||||||
## 4865 = TLS_AES_128_GCM_SHA256
|
|
||||||
## 4867 = TLS_CHACHA20_POLY1305_SHA256
|
|
||||||
##
|
##
|
||||||
## On non-Intel CPUs such as MIPS routers and ARM systems (Android, Raspberry Pi...),
|
## On non-Intel CPUs such as MIPS routers and ARM systems (Android, Raspberry Pi...),
|
||||||
## the following suite improves performance.
|
## the following suite improves performance.
|
||||||
## This may also help on Intel CPUs running 32-bit operating systems.
|
## This may also help on Intel CPUs running 32-bit operating systems.
|
||||||
##
|
##
|
||||||
## Keep tls_cipher_suite empty if you have issues fetching sources or
|
## Keep tls_cipher_suite empty if you have issues fetching sources or
|
||||||
## connecting to some DoH servers. Google and Cloudflare are fine with it.
|
## connecting to some DoH servers.
|
||||||
|
|
||||||
# tls_cipher_suite = [52392, 49199]
|
# tls_cipher_suite = [52392, 49199]
|
||||||
|
|
||||||
|
|
||||||
|
## Log TLS key material to a file, for debugging purposes only.
|
||||||
|
## This file will contain the TLS master key, which can be used to decrypt
|
||||||
|
## all TLS traffic to/from DoH servers.
|
||||||
|
## Never ever enable except for debugging purposes with a tool such as mitmproxy.
|
||||||
|
|
||||||
|
# tls_key_log_file = '/tmp/keylog.txt'
|
||||||
|
|
||||||
|
|
||||||
## Bootstrap resolvers
|
## Bootstrap resolvers
|
||||||
##
|
##
|
||||||
## These are normal, non-encrypted DNS resolvers, that will be only used
|
## These are normal, non-encrypted DNS resolvers, that will be only used
|
||||||
|
@ -258,7 +270,17 @@ cert_refresh_delay = 240
|
||||||
bootstrap_resolvers = ['9.9.9.11:53', '8.8.8.8:53']
|
bootstrap_resolvers = ['9.9.9.11:53', '8.8.8.8:53']
|
||||||
|
|
||||||
|
|
||||||
## Always use the bootstrap resolver before the system DNS settings.
|
## When internal DNS resolution is required, for example to retrieve
|
||||||
|
## the resolvers list:
|
||||||
|
##
|
||||||
|
## - queries will be sent to dnscrypt-proxy itself, if it is already
|
||||||
|
## running with active servers (*)
|
||||||
|
## - or else, queries will be sent to fallback servers
|
||||||
|
## - finally, if `ignore_system_dns` is `false`, queries will be sent
|
||||||
|
## to the system DNS
|
||||||
|
##
|
||||||
|
## (*) this is incompatible with systemd sockets.
|
||||||
|
## `listen_addrs` must not be empty.
|
||||||
|
|
||||||
ignore_system_dns = true
|
ignore_system_dns = true
|
||||||
|
|
||||||
|
@ -332,6 +354,7 @@ block_ipv6 = false
|
||||||
|
|
||||||
|
|
||||||
## Immediately respond to A and AAAA queries for host names without a domain name
|
## Immediately respond to A and AAAA queries for host names without a domain name
|
||||||
|
## This also prevents "dotless domain names" from being resolved upstream.
|
||||||
|
|
||||||
block_unqualified = true
|
block_unqualified = true
|
||||||
|
|
||||||
|
@ -453,6 +476,8 @@ cache_neg_max_ttl = 600
|
||||||
|
|
||||||
|
|
||||||
## Certificate file and key - Note that the certificate has to be trusted.
|
## Certificate file and key - Note that the certificate has to be trusted.
|
||||||
|
## Can be generated using the following command:
|
||||||
|
## openssl req -x509 -nodes -newkey rsa:2048 -days 5000 -sha256 -keyout localhost.pem -out localhost.pem
|
||||||
## See the documentation (wiki) for more information.
|
## See the documentation (wiki) for more information.
|
||||||
|
|
||||||
# cert_file = 'localhost.pem'
|
# cert_file = 'localhost.pem'
|
||||||
|
@ -677,16 +702,16 @@ format = 'tsv'
|
||||||
## If the `urls` property is missing, cache files and valid signatures
|
## If the `urls` property is missing, cache files and valid signatures
|
||||||
## must already be present. This doesn't prevent these cache files from
|
## must already be present. This doesn't prevent these cache files from
|
||||||
## expiring after `refresh_delay` hours.
|
## expiring after `refresh_delay` hours.
|
||||||
## Cache freshness is checked every 24 hours, so values for 'refresh_delay'
|
## `refreshed_delay` must be in the [24..168] interval.
|
||||||
## of less than 24 hours will have no effect.
|
## The minimum delay of 24 hours (1 day) avoids unnecessary requests to servers.
|
||||||
## A maximum delay of 168 hours (1 week) is imposed to ensure cache freshness.
|
## The maximum delay of 168 hours (1 week) ensures cache freshness.
|
||||||
|
|
||||||
[sources]
|
[sources]
|
||||||
|
|
||||||
### An example of a remote source from https://github.com/DNSCrypt/dnscrypt-resolvers
|
### An example of a remote source from https://github.com/DNSCrypt/dnscrypt-resolvers
|
||||||
|
|
||||||
[sources.public-resolvers]
|
[sources.public-resolvers]
|
||||||
urls = ['https://raw.githubusercontent.com/DNSCrypt/dnscrypt-resolvers/master/v3/public-resolvers.md', 'https://download.dnscrypt.info/resolvers-list/v3/public-resolvers.md', 'https://ipv6.download.dnscrypt.info/resolvers-list/v3/public-resolvers.md']
|
urls = ['https://raw.githubusercontent.com/DNSCrypt/dnscrypt-resolvers/master/v3/public-resolvers.md', 'https://download.dnscrypt.info/resolvers-list/v3/public-resolvers.md']
|
||||||
cache_file = 'public-resolvers.md'
|
cache_file = 'public-resolvers.md'
|
||||||
minisign_key = 'RWQf6LRCGA9i53mlYecO4IzT51TGPpvWucNSCh1CBM0QTaLn73Y7GFO3'
|
minisign_key = 'RWQf6LRCGA9i53mlYecO4IzT51TGPpvWucNSCh1CBM0QTaLn73Y7GFO3'
|
||||||
refresh_delay = 72
|
refresh_delay = 72
|
||||||
|
@ -695,7 +720,7 @@ format = 'tsv'
|
||||||
### Anonymized DNS relays
|
### Anonymized DNS relays
|
||||||
|
|
||||||
[sources.relays]
|
[sources.relays]
|
||||||
urls = ['https://raw.githubusercontent.com/DNSCrypt/dnscrypt-resolvers/master/v3/relays.md', 'https://download.dnscrypt.info/resolvers-list/v3/relays.md', 'https://ipv6.download.dnscrypt.info/resolvers-list/v3/relays.md']
|
urls = ['https://raw.githubusercontent.com/DNSCrypt/dnscrypt-resolvers/master/v3/relays.md', 'https://download.dnscrypt.info/resolvers-list/v3/relays.md']
|
||||||
cache_file = 'relays.md'
|
cache_file = 'relays.md'
|
||||||
minisign_key = 'RWQf6LRCGA9i53mlYecO4IzT51TGPpvWucNSCh1CBM0QTaLn73Y7GFO3'
|
minisign_key = 'RWQf6LRCGA9i53mlYecO4IzT51TGPpvWucNSCh1CBM0QTaLn73Y7GFO3'
|
||||||
refresh_delay = 72
|
refresh_delay = 72
|
||||||
|
@ -704,13 +729,13 @@ format = 'tsv'
|
||||||
### ODoH (Oblivious DoH) servers and relays
|
### ODoH (Oblivious DoH) servers and relays
|
||||||
|
|
||||||
# [sources.odoh-servers]
|
# [sources.odoh-servers]
|
||||||
# urls = ['https://raw.githubusercontent.com/DNSCrypt/dnscrypt-resolvers/master/v3/odoh-servers.md', 'https://download.dnscrypt.info/resolvers-list/v3/odoh-servers.md', 'https://ipv6.download.dnscrypt.info/resolvers-list/v3/odoh-servers.md']
|
# urls = ['https://raw.githubusercontent.com/DNSCrypt/dnscrypt-resolvers/master/v3/odoh-servers.md', 'https://download.dnscrypt.info/resolvers-list/v3/odoh-servers.md']
|
||||||
# cache_file = 'odoh-servers.md'
|
# cache_file = 'odoh-servers.md'
|
||||||
# minisign_key = 'RWQf6LRCGA9i53mlYecO4IzT51TGPpvWucNSCh1CBM0QTaLn73Y7GFO3'
|
# minisign_key = 'RWQf6LRCGA9i53mlYecO4IzT51TGPpvWucNSCh1CBM0QTaLn73Y7GFO3'
|
||||||
# refresh_delay = 24
|
# refresh_delay = 24
|
||||||
# prefix = ''
|
# prefix = ''
|
||||||
# [sources.odoh-relays]
|
# [sources.odoh-relays]
|
||||||
# urls = ['https://raw.githubusercontent.com/DNSCrypt/dnscrypt-resolvers/master/v3/odoh-relays.md', 'https://download.dnscrypt.info/resolvers-list/v3/odoh-relays.md', 'https://ipv6.download.dnscrypt.info/resolvers-list/v3/odoh-relays.md']
|
# urls = ['https://raw.githubusercontent.com/DNSCrypt/dnscrypt-resolvers/master/v3/odoh-relays.md', 'https://download.dnscrypt.info/resolvers-list/v3/odoh-relays.md']
|
||||||
# cache_file = 'odoh-relays.md'
|
# cache_file = 'odoh-relays.md'
|
||||||
# minisign_key = 'RWQf6LRCGA9i53mlYecO4IzT51TGPpvWucNSCh1CBM0QTaLn73Y7GFO3'
|
# minisign_key = 'RWQf6LRCGA9i53mlYecO4IzT51TGPpvWucNSCh1CBM0QTaLn73Y7GFO3'
|
||||||
# refresh_delay = 24
|
# refresh_delay = 24
|
||||||
|
@ -728,10 +753,18 @@ format = 'tsv'
|
||||||
### This is a subset of the `public-resolvers` list, so enabling both is useless.
|
### This is a subset of the `public-resolvers` list, so enabling both is useless.
|
||||||
|
|
||||||
# [sources.parental-control]
|
# [sources.parental-control]
|
||||||
# urls = ['https://raw.githubusercontent.com/DNSCrypt/dnscrypt-resolvers/master/v3/parental-control.md', 'https://download.dnscrypt.info/resolvers-list/v3/parental-control.md', 'https://ipv6.download.dnscrypt.info/resolvers-list/v3/parental-control.md']
|
# urls = ['https://raw.githubusercontent.com/DNSCrypt/dnscrypt-resolvers/master/v3/parental-control.md', 'https://download.dnscrypt.info/resolvers-list/v3/parental-control.md']
|
||||||
# cache_file = 'parental-control.md'
|
# cache_file = 'parental-control.md'
|
||||||
# minisign_key = 'RWQf6LRCGA9i53mlYecO4IzT51TGPpvWucNSCh1CBM0QTaLn73Y7GFO3'
|
# minisign_key = 'RWQf6LRCGA9i53mlYecO4IzT51TGPpvWucNSCh1CBM0QTaLn73Y7GFO3'
|
||||||
|
|
||||||
|
### dnscry.pt servers - See https://www.dnscry.pt
|
||||||
|
|
||||||
|
# [sources.dnscry-pt-resolvers]
|
||||||
|
# urls = ["https://www.dnscry.pt/resolvers.md"]
|
||||||
|
# minisign_key = "RWQM31Nwkqh01x88SvrBL8djp1NH56Rb4mKLHz16K7qsXgEomnDv6ziQ"
|
||||||
|
# cache_file = "dnscry.pt-resolvers.md"
|
||||||
|
# refresh_delay = 72
|
||||||
|
# prefix = "dnscry.pt-"
|
||||||
|
|
||||||
|
|
||||||
#########################################
|
#########################################
|
||||||
|
|
|
@ -23,4 +23,14 @@
|
||||||
# 192.in-addr.arpa 192.168.1.1
|
# 192.in-addr.arpa 192.168.1.1
|
||||||
|
|
||||||
## Forward queries for example.com and *.example.com to 9.9.9.9 and 8.8.8.8
|
## Forward queries for example.com and *.example.com to 9.9.9.9 and 8.8.8.8
|
||||||
# example.com 9.9.9.9,8.8.8.8
|
# example.com 9.9.9.9,8.8.8.8
|
||||||
|
|
||||||
|
## Forward queries to a resolver using IPv6
|
||||||
|
# ipv6.example.com [2001:DB8::42]:53
|
||||||
|
|
||||||
|
## Forward queries for .onion names to a local Tor client
|
||||||
|
## Tor must be configured with the following in the torrc file:
|
||||||
|
## DNSPort 9053
|
||||||
|
## AutomapHostsOnResolve 1
|
||||||
|
|
||||||
|
# onion 127.0.0.1:9053
|
||||||
|
|
|
@ -5,8 +5,9 @@ package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
stamps "github.com/jedisct1/go-dnsstamps"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
stamps "github.com/jedisct1/go-dnsstamps"
|
||||||
)
|
)
|
||||||
|
|
||||||
func FuzzParseODoHTargetConfigs(f *testing.F) {
|
func FuzzParseODoHTargetConfigs(f *testing.F) {
|
||||||
|
|
|
@ -16,12 +16,17 @@ func Logger(logMaxSize int, logMaxAge int, logMaxBackups int, fileName string) i
|
||||||
if st.Mode().IsDir() {
|
if st.Mode().IsDir() {
|
||||||
dlog.Fatalf("[%v] is a directory", fileName)
|
dlog.Fatalf("[%v] is a directory", fileName)
|
||||||
}
|
}
|
||||||
fp, err := os.OpenFile(fileName, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644)
|
fp, err := os.OpenFile(fileName, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0o644)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
dlog.Fatalf("Unable to access [%v]: [%v]", fileName, err)
|
dlog.Fatalf("Unable to access [%v]: [%v]", fileName, err)
|
||||||
}
|
}
|
||||||
return fp
|
return fp
|
||||||
}
|
}
|
||||||
|
if fp, err := os.OpenFile(fileName, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0o644); err == nil {
|
||||||
|
fp.Close()
|
||||||
|
} else {
|
||||||
|
dlog.Errorf("Unable to create [%v]: [%v]", fileName, err)
|
||||||
|
}
|
||||||
logger := &lumberjack.Logger{
|
logger := &lumberjack.Logger{
|
||||||
LocalTime: true,
|
LocalTime: true,
|
||||||
MaxSize: logMaxSize,
|
MaxSize: logMaxSize,
|
||||||
|
|
|
@ -15,7 +15,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
AppVersion = "2.1.4"
|
AppVersion = "2.1.5"
|
||||||
DefaultConfigFileName = "dnscrypt-proxy.toml"
|
DefaultConfigFileName = "dnscrypt-proxy.toml"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -27,13 +27,18 @@ type App struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
TimezoneSetup()
|
tzErr := TimezoneSetup()
|
||||||
dlog.Init("dnscrypt-proxy", dlog.SeverityNotice, "DAEMON")
|
dlog.Init("dnscrypt-proxy", dlog.SeverityNotice, "DAEMON")
|
||||||
|
if tzErr != nil {
|
||||||
|
dlog.Warnf("Timezone setup failed: [%v]", tzErr)
|
||||||
|
}
|
||||||
runtime.MemProfileRate = 0
|
runtime.MemProfileRate = 0
|
||||||
|
|
||||||
seed := make([]byte, 8)
|
seed := make([]byte, 8)
|
||||||
crypto_rand.Read(seed)
|
if _, err := crypto_rand.Read(seed); err != nil {
|
||||||
rand.Seed(int64(binary.LittleEndian.Uint64(seed[:])))
|
dlog.Fatal(err)
|
||||||
|
}
|
||||||
|
rand.Seed(int64(binary.LittleEndian.Uint64(seed)))
|
||||||
|
|
||||||
pwd, err := os.Getwd()
|
pwd, err := os.Getwd()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -46,6 +51,7 @@ func main() {
|
||||||
flags.Resolve = flag.String("resolve", "", "resolve a DNS name (string can be <name> or <name>,<resolver address>)")
|
flags.Resolve = flag.String("resolve", "", "resolve a DNS name (string can be <name> or <name>,<resolver address>)")
|
||||||
flags.List = flag.Bool("list", false, "print the list of available resolvers for the enabled filters")
|
flags.List = flag.Bool("list", false, "print the list of available resolvers for the enabled filters")
|
||||||
flags.ListAll = flag.Bool("list-all", false, "print the complete list of available resolvers, ignoring filters")
|
flags.ListAll = flag.Bool("list-all", false, "print the complete list of available resolvers, ignoring filters")
|
||||||
|
flags.IncludeRelays = flag.Bool("include-relays", false, "include the list of available relays in the output of -list and -list-all")
|
||||||
flags.JSONOutput = flag.Bool("json", false, "output list as JSON")
|
flags.JSONOutput = flag.Bool("json", false, "output list as JSON")
|
||||||
flags.Check = flag.Bool("check", false, "check the configuration file and exit")
|
flags.Check = flag.Bool("check", false, "check the configuration file and exit")
|
||||||
flags.ConfigFile = flag.String("config", DefaultConfigFileName, "Path to the configuration file")
|
flags.ConfigFile = flag.String("config", DefaultConfigFileName, "Path to the configuration file")
|
||||||
|
@ -60,6 +66,10 @@ func main() {
|
||||||
os.Exit(0)
|
os.Exit(0)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if fullexecpath, err := os.Executable(); err == nil {
|
||||||
|
WarnIfMaybeWritableByOtherUsers(fullexecpath)
|
||||||
|
}
|
||||||
|
|
||||||
app := &App{
|
app := &App{
|
||||||
flags: &flags,
|
flags: &flags,
|
||||||
}
|
}
|
||||||
|
@ -124,7 +134,7 @@ func (app *App) AppMain() {
|
||||||
dlog.Fatal(err)
|
dlog.Fatal(err)
|
||||||
}
|
}
|
||||||
if err := PidFileCreate(); err != nil {
|
if err := PidFileCreate(); err != nil {
|
||||||
dlog.Criticalf("Unable to create the PID file: %v", err)
|
dlog.Errorf("Unable to create the PID file: [%v]", err)
|
||||||
}
|
}
|
||||||
if err := app.proxy.InitPluginsGlobals(); err != nil {
|
if err := app.proxy.InitPluginsGlobals(); err != nil {
|
||||||
dlog.Fatal(err)
|
dlog.Fatal(err)
|
||||||
|
@ -139,7 +149,9 @@ func (app *App) AppMain() {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (app *App) Stop(service service.Service) error {
|
func (app *App) Stop(service service.Service) error {
|
||||||
PidFileRemove()
|
if err := PidFileRemove(); err != nil {
|
||||||
|
dlog.Warnf("Failed to remove the PID file: [%v]", err)
|
||||||
|
}
|
||||||
dlog.Notice("Stopped.")
|
dlog.Notice("Stopped.")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -181,7 +181,7 @@ func (q ODoHQuery) decryptResponse(response []byte) ([]byte, error) {
|
||||||
responseLength := binary.BigEndian.Uint16(responsePlaintext[0:2])
|
responseLength := binary.BigEndian.Uint16(responsePlaintext[0:2])
|
||||||
valid := 1
|
valid := 1
|
||||||
for i := 4 + int(responseLength); i < len(responsePlaintext); i++ {
|
for i := 4 + int(responseLength); i < len(responsePlaintext); i++ {
|
||||||
valid = valid & subtle.ConstantTimeByteEq(response[i], 0x00)
|
valid &= subtle.ConstantTimeByteEq(response[i], 0x00)
|
||||||
}
|
}
|
||||||
if valid != 1 {
|
if valid != 1 {
|
||||||
return nil, fmt.Errorf("Malformed response")
|
return nil, fmt.Errorf("Malformed response")
|
||||||
|
|
|
@ -15,10 +15,10 @@ func PidFileCreate() error {
|
||||||
if pidFile == nil || len(*pidFile) == 0 {
|
if pidFile == nil || len(*pidFile) == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if err := os.MkdirAll(filepath.Dir(*pidFile), 0755); err != nil {
|
if err := os.MkdirAll(filepath.Dir(*pidFile), 0o755); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return safefile.WriteFile(*pidFile, []byte(strconv.Itoa(os.Getpid())), 0644)
|
return safefile.WriteFile(*pidFile, []byte(strconv.Itoa(os.Getpid())), 0o644)
|
||||||
}
|
}
|
||||||
|
|
||||||
func PidFileRemove() error {
|
func PidFileRemove() error {
|
||||||
|
|
|
@ -30,13 +30,13 @@ func (plugin *PluginAllowedIP) Description() string {
|
||||||
|
|
||||||
func (plugin *PluginAllowedIP) Init(proxy *Proxy) error {
|
func (plugin *PluginAllowedIP) Init(proxy *Proxy) error {
|
||||||
dlog.Noticef("Loading the set of allowed IP rules from [%s]", proxy.allowedIPFile)
|
dlog.Noticef("Loading the set of allowed IP rules from [%s]", proxy.allowedIPFile)
|
||||||
bin, err := ReadTextFile(proxy.allowedIPFile)
|
lines, err := ReadTextFile(proxy.allowedIPFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
plugin.allowedPrefixes = iradix.New()
|
plugin.allowedPrefixes = iradix.New()
|
||||||
plugin.allowedIPs = make(map[string]interface{})
|
plugin.allowedIPs = make(map[string]interface{})
|
||||||
for lineNo, line := range strings.Split(string(bin), "\n") {
|
for lineNo, line := range strings.Split(lines, "\n") {
|
||||||
line = TrimAndStripInlineComments(line)
|
line = TrimAndStripInlineComments(line)
|
||||||
if len(line) == 0 {
|
if len(line) == 0 {
|
||||||
continue
|
continue
|
||||||
|
@ -119,10 +119,14 @@ func (plugin *PluginAllowedIP) Eval(pluginsState *PluginsState, msg *dns.Msg) er
|
||||||
if plugin.logger != nil {
|
if plugin.logger != nil {
|
||||||
qName := pluginsState.qName
|
qName := pluginsState.qName
|
||||||
var clientIPStr string
|
var clientIPStr string
|
||||||
if pluginsState.clientProto == "udp" {
|
switch pluginsState.clientProto {
|
||||||
|
case "udp":
|
||||||
clientIPStr = (*pluginsState.clientAddr).(*net.UDPAddr).IP.String()
|
clientIPStr = (*pluginsState.clientAddr).(*net.UDPAddr).IP.String()
|
||||||
} else {
|
case "tcp", "local_doh":
|
||||||
clientIPStr = (*pluginsState.clientAddr).(*net.TCPAddr).IP.String()
|
clientIPStr = (*pluginsState.clientAddr).(*net.TCPAddr).IP.String()
|
||||||
|
default:
|
||||||
|
// Ignore internal flow.
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
var line string
|
var line string
|
||||||
if plugin.format == "tsv" {
|
if plugin.format == "tsv" {
|
||||||
|
|
|
@ -29,13 +29,13 @@ func (plugin *PluginAllowName) Description() string {
|
||||||
|
|
||||||
func (plugin *PluginAllowName) Init(proxy *Proxy) error {
|
func (plugin *PluginAllowName) Init(proxy *Proxy) error {
|
||||||
dlog.Noticef("Loading the set of allowed names from [%s]", proxy.allowNameFile)
|
dlog.Noticef("Loading the set of allowed names from [%s]", proxy.allowNameFile)
|
||||||
bin, err := ReadTextFile(proxy.allowNameFile)
|
lines, err := ReadTextFile(proxy.allowNameFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
plugin.allWeeklyRanges = proxy.allWeeklyRanges
|
plugin.allWeeklyRanges = proxy.allWeeklyRanges
|
||||||
plugin.patternMatcher = NewPatternMatcher()
|
plugin.patternMatcher = NewPatternMatcher()
|
||||||
for lineNo, line := range strings.Split(string(bin), "\n") {
|
for lineNo, line := range strings.Split(lines, "\n") {
|
||||||
line = TrimAndStripInlineComments(line)
|
line = TrimAndStripInlineComments(line)
|
||||||
if len(line) == 0 {
|
if len(line) == 0 {
|
||||||
continue
|
continue
|
||||||
|
@ -96,10 +96,14 @@ func (plugin *PluginAllowName) Eval(pluginsState *PluginsState, msg *dns.Msg) er
|
||||||
pluginsState.sessionData["whitelisted"] = true
|
pluginsState.sessionData["whitelisted"] = true
|
||||||
if plugin.logger != nil {
|
if plugin.logger != nil {
|
||||||
var clientIPStr string
|
var clientIPStr string
|
||||||
if pluginsState.clientProto == "udp" {
|
switch pluginsState.clientProto {
|
||||||
|
case "udp":
|
||||||
clientIPStr = (*pluginsState.clientAddr).(*net.UDPAddr).IP.String()
|
clientIPStr = (*pluginsState.clientAddr).(*net.UDPAddr).IP.String()
|
||||||
} else {
|
case "tcp", "local_doh":
|
||||||
clientIPStr = (*pluginsState.clientAddr).(*net.TCPAddr).IP.String()
|
clientIPStr = (*pluginsState.clientAddr).(*net.TCPAddr).IP.String()
|
||||||
|
default:
|
||||||
|
// Ignore internal flow.
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
var line string
|
var line string
|
||||||
if plugin.format == "tsv" {
|
if plugin.format == "tsv" {
|
||||||
|
|
|
@ -30,13 +30,13 @@ func (plugin *PluginBlockIP) Description() string {
|
||||||
|
|
||||||
func (plugin *PluginBlockIP) Init(proxy *Proxy) error {
|
func (plugin *PluginBlockIP) Init(proxy *Proxy) error {
|
||||||
dlog.Noticef("Loading the set of IP blocking rules from [%s]", proxy.blockIPFile)
|
dlog.Noticef("Loading the set of IP blocking rules from [%s]", proxy.blockIPFile)
|
||||||
bin, err := ReadTextFile(proxy.blockIPFile)
|
lines, err := ReadTextFile(proxy.blockIPFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
plugin.blockedPrefixes = iradix.New()
|
plugin.blockedPrefixes = iradix.New()
|
||||||
plugin.blockedIPs = make(map[string]interface{})
|
plugin.blockedIPs = make(map[string]interface{})
|
||||||
for lineNo, line := range strings.Split(string(bin), "\n") {
|
for lineNo, line := range strings.Split(lines, "\n") {
|
||||||
line = TrimAndStripInlineComments(line)
|
line = TrimAndStripInlineComments(line)
|
||||||
if len(line) == 0 {
|
if len(line) == 0 {
|
||||||
continue
|
continue
|
||||||
|
@ -123,10 +123,14 @@ func (plugin *PluginBlockIP) Eval(pluginsState *PluginsState, msg *dns.Msg) erro
|
||||||
if plugin.logger != nil {
|
if plugin.logger != nil {
|
||||||
qName := pluginsState.qName
|
qName := pluginsState.qName
|
||||||
var clientIPStr string
|
var clientIPStr string
|
||||||
if pluginsState.clientProto == "udp" {
|
switch pluginsState.clientProto {
|
||||||
|
case "udp":
|
||||||
clientIPStr = (*pluginsState.clientAddr).(*net.UDPAddr).IP.String()
|
clientIPStr = (*pluginsState.clientAddr).(*net.UDPAddr).IP.String()
|
||||||
} else {
|
case "tcp", "local_doh":
|
||||||
clientIPStr = (*pluginsState.clientAddr).(*net.TCPAddr).IP.String()
|
clientIPStr = (*pluginsState.clientAddr).(*net.TCPAddr).IP.String()
|
||||||
|
default:
|
||||||
|
// Ignore internal flow.
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
var line string
|
var line string
|
||||||
if plugin.format == "tsv" {
|
if plugin.format == "tsv" {
|
||||||
|
|
|
@ -35,8 +35,10 @@ func (plugin *PluginBlockIPv6) Eval(pluginsState *PluginsState, msg *dns.Msg) er
|
||||||
}
|
}
|
||||||
synth := EmptyResponseFromMessage(msg)
|
synth := EmptyResponseFromMessage(msg)
|
||||||
hinfo := new(dns.HINFO)
|
hinfo := new(dns.HINFO)
|
||||||
hinfo.Hdr = dns.RR_Header{Name: question.Name, Rrtype: dns.TypeHINFO,
|
hinfo.Hdr = dns.RR_Header{
|
||||||
Class: dns.ClassINET, Ttl: 86400}
|
Name: question.Name, Rrtype: dns.TypeHINFO,
|
||||||
|
Class: dns.ClassINET, Ttl: 86400,
|
||||||
|
}
|
||||||
hinfo.Cpu = "AAAA queries have been locally blocked by dnscrypt-proxy"
|
hinfo.Cpu = "AAAA queries have been locally blocked by dnscrypt-proxy"
|
||||||
hinfo.Os = "Set block_ipv6 to false to disable that feature"
|
hinfo.Os = "Set block_ipv6 to false to disable that feature"
|
||||||
synth.Answer = []dns.RR{hinfo}
|
synth.Answer = []dns.RR{hinfo}
|
||||||
|
@ -54,8 +56,10 @@ func (plugin *PluginBlockIPv6) Eval(pluginsState *PluginsState, msg *dns.Msg) er
|
||||||
soa.Minttl = 2400
|
soa.Minttl = 2400
|
||||||
soa.Expire = 604800
|
soa.Expire = 604800
|
||||||
soa.Retry = 300
|
soa.Retry = 300
|
||||||
soa.Hdr = dns.RR_Header{Name: parentZone, Rrtype: dns.TypeSOA,
|
soa.Hdr = dns.RR_Header{
|
||||||
Class: dns.ClassINET, Ttl: 60}
|
Name: parentZone, Rrtype: dns.TypeSOA,
|
||||||
|
Class: dns.ClassINET, Ttl: 60,
|
||||||
|
}
|
||||||
synth.Ns = []dns.RR{soa}
|
synth.Ns = []dns.RR{soa}
|
||||||
pluginsState.synthResponse = synth
|
pluginsState.synthResponse = synth
|
||||||
pluginsState.action = PluginsActionSynth
|
pluginsState.action = PluginsActionSynth
|
||||||
|
|
|
@ -44,10 +44,14 @@ func (blockedNames *BlockedNames) check(pluginsState *PluginsState, qName string
|
||||||
pluginsState.returnCode = PluginsReturnCodeReject
|
pluginsState.returnCode = PluginsReturnCodeReject
|
||||||
if blockedNames.logger != nil {
|
if blockedNames.logger != nil {
|
||||||
var clientIPStr string
|
var clientIPStr string
|
||||||
if pluginsState.clientProto == "udp" {
|
switch pluginsState.clientProto {
|
||||||
|
case "udp":
|
||||||
clientIPStr = (*pluginsState.clientAddr).(*net.UDPAddr).IP.String()
|
clientIPStr = (*pluginsState.clientAddr).(*net.UDPAddr).IP.String()
|
||||||
} else {
|
case "tcp", "local_doh":
|
||||||
clientIPStr = (*pluginsState.clientAddr).(*net.TCPAddr).IP.String()
|
clientIPStr = (*pluginsState.clientAddr).(*net.TCPAddr).IP.String()
|
||||||
|
default:
|
||||||
|
// Ignore internal flow.
|
||||||
|
return false, nil
|
||||||
}
|
}
|
||||||
var line string
|
var line string
|
||||||
if blockedNames.format == "tsv" {
|
if blockedNames.format == "tsv" {
|
||||||
|
@ -71,8 +75,7 @@ func (blockedNames *BlockedNames) check(pluginsState *PluginsState, qName string
|
||||||
|
|
||||||
// ---
|
// ---
|
||||||
|
|
||||||
type PluginBlockName struct {
|
type PluginBlockName struct{}
|
||||||
}
|
|
||||||
|
|
||||||
func (plugin *PluginBlockName) Name() string {
|
func (plugin *PluginBlockName) Name() string {
|
||||||
return "block_name"
|
return "block_name"
|
||||||
|
@ -84,7 +87,7 @@ func (plugin *PluginBlockName) Description() string {
|
||||||
|
|
||||||
func (plugin *PluginBlockName) Init(proxy *Proxy) error {
|
func (plugin *PluginBlockName) Init(proxy *Proxy) error {
|
||||||
dlog.Noticef("Loading the set of blocking rules from [%s]", proxy.blockNameFile)
|
dlog.Noticef("Loading the set of blocking rules from [%s]", proxy.blockNameFile)
|
||||||
bin, err := ReadTextFile(proxy.blockNameFile)
|
lines, err := ReadTextFile(proxy.blockNameFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -92,7 +95,7 @@ func (plugin *PluginBlockName) Init(proxy *Proxy) error {
|
||||||
allWeeklyRanges: proxy.allWeeklyRanges,
|
allWeeklyRanges: proxy.allWeeklyRanges,
|
||||||
patternMatcher: NewPatternMatcher(),
|
patternMatcher: NewPatternMatcher(),
|
||||||
}
|
}
|
||||||
for lineNo, line := range strings.Split(string(bin), "\n") {
|
for lineNo, line := range strings.Split(lines, "\n") {
|
||||||
line = TrimAndStripInlineComments(line)
|
line = TrimAndStripInlineComments(line)
|
||||||
if len(line) == 0 {
|
if len(line) == 0 {
|
||||||
continue
|
continue
|
||||||
|
@ -148,8 +151,7 @@ func (plugin *PluginBlockName) Eval(pluginsState *PluginsState, msg *dns.Msg) er
|
||||||
|
|
||||||
// ---
|
// ---
|
||||||
|
|
||||||
type PluginBlockNameResponse struct {
|
type PluginBlockNameResponse struct{}
|
||||||
}
|
|
||||||
|
|
||||||
func (plugin *PluginBlockNameResponse) Name() string {
|
func (plugin *PluginBlockNameResponse) Name() string {
|
||||||
return "block_name"
|
return "block_name"
|
||||||
|
|
|
@ -119,9 +119,11 @@ var undelegatedSet = []string{
|
||||||
"envoy",
|
"envoy",
|
||||||
"example",
|
"example",
|
||||||
"f.f.ip6.arpa",
|
"f.f.ip6.arpa",
|
||||||
|
"fritz.box",
|
||||||
"grp",
|
"grp",
|
||||||
"gw==",
|
"gw==",
|
||||||
"home",
|
"home",
|
||||||
|
"home.arpa",
|
||||||
"hub",
|
"hub",
|
||||||
"internal",
|
"internal",
|
||||||
"intra",
|
"intra",
|
||||||
|
@ -134,6 +136,7 @@ var undelegatedSet = []string{
|
||||||
"localdomain",
|
"localdomain",
|
||||||
"localhost",
|
"localhost",
|
||||||
"localnet",
|
"localnet",
|
||||||
|
"mail",
|
||||||
"modem",
|
"modem",
|
||||||
"mynet",
|
"mynet",
|
||||||
"myrouter",
|
"myrouter",
|
||||||
|
|
|
@ -6,8 +6,7 @@ import (
|
||||||
"github.com/miekg/dns"
|
"github.com/miekg/dns"
|
||||||
)
|
)
|
||||||
|
|
||||||
type PluginBlockUnqualified struct {
|
type PluginBlockUnqualified struct{}
|
||||||
}
|
|
||||||
|
|
||||||
func (plugin *PluginBlockUnqualified) Name() string {
|
func (plugin *PluginBlockUnqualified) Name() string {
|
||||||
return "block_unqualified"
|
return "block_unqualified"
|
||||||
|
|
|
@ -6,8 +6,8 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
lru "github.com/hashicorp/golang-lru"
|
|
||||||
"github.com/miekg/dns"
|
"github.com/miekg/dns"
|
||||||
|
sieve "github.com/opencoff/go-sieve"
|
||||||
)
|
)
|
||||||
|
|
||||||
const StaleResponseTTL = 30 * time.Second
|
const StaleResponseTTL = 30 * time.Second
|
||||||
|
@ -19,7 +19,7 @@ type CachedResponse struct {
|
||||||
|
|
||||||
type CachedResponses struct {
|
type CachedResponses struct {
|
||||||
sync.RWMutex
|
sync.RWMutex
|
||||||
cache *lru.ARCCache
|
cache *sieve.Sieve[[32]byte, CachedResponse]
|
||||||
}
|
}
|
||||||
|
|
||||||
var cachedResponses CachedResponses
|
var cachedResponses CachedResponses
|
||||||
|
@ -45,8 +45,7 @@ func computeCacheKey(pluginsState *PluginsState, msg *dns.Msg) [32]byte {
|
||||||
|
|
||||||
// ---
|
// ---
|
||||||
|
|
||||||
type PluginCache struct {
|
type PluginCache struct{}
|
||||||
}
|
|
||||||
|
|
||||||
func (plugin *PluginCache) Name() string {
|
func (plugin *PluginCache) Name() string {
|
||||||
return "cache"
|
return "cache"
|
||||||
|
@ -76,12 +75,11 @@ func (plugin *PluginCache) Eval(pluginsState *PluginsState, msg *dns.Msg) error
|
||||||
cachedResponses.RUnlock()
|
cachedResponses.RUnlock()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
cachedAny, ok := cachedResponses.cache.Get(cacheKey)
|
cached, ok := cachedResponses.cache.Get(cacheKey)
|
||||||
if !ok {
|
if !ok {
|
||||||
cachedResponses.RUnlock()
|
cachedResponses.RUnlock()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
cached := cachedAny.(CachedResponse)
|
|
||||||
expiration := cached.expiration
|
expiration := cached.expiration
|
||||||
synth := cached.msg.Copy()
|
synth := cached.msg.Copy()
|
||||||
cachedResponses.RUnlock()
|
cachedResponses.RUnlock()
|
||||||
|
@ -108,8 +106,7 @@ func (plugin *PluginCache) Eval(pluginsState *PluginsState, msg *dns.Msg) error
|
||||||
|
|
||||||
// ---
|
// ---
|
||||||
|
|
||||||
type PluginCacheResponse struct {
|
type PluginCacheResponse struct{}
|
||||||
}
|
|
||||||
|
|
||||||
func (plugin *PluginCacheResponse) Name() string {
|
func (plugin *PluginCacheResponse) Name() string {
|
||||||
return "cache_response"
|
return "cache_response"
|
||||||
|
@ -153,8 +150,8 @@ func (plugin *PluginCacheResponse) Eval(pluginsState *PluginsState, msg *dns.Msg
|
||||||
cachedResponses.Lock()
|
cachedResponses.Lock()
|
||||||
if cachedResponses.cache == nil {
|
if cachedResponses.cache == nil {
|
||||||
var err error
|
var err error
|
||||||
cachedResponses.cache, err = lru.NewARC(pluginsState.cacheSize)
|
cachedResponses.cache = sieve.New[[32]byte, CachedResponse](pluginsState.cacheSize)
|
||||||
if err != nil {
|
if cachedResponses.cache == nil {
|
||||||
cachedResponses.Unlock()
|
cachedResponses.Unlock()
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -39,7 +39,7 @@ func (plugin *PluginCloak) Description() string {
|
||||||
|
|
||||||
func (plugin *PluginCloak) Init(proxy *Proxy) error {
|
func (plugin *PluginCloak) Init(proxy *Proxy) error {
|
||||||
dlog.Noticef("Loading the set of cloaking rules from [%s]", proxy.cloakFile)
|
dlog.Noticef("Loading the set of cloaking rules from [%s]", proxy.cloakFile)
|
||||||
bin, err := ReadTextFile(proxy.cloakFile)
|
lines, err := ReadTextFile(proxy.cloakFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -47,7 +47,7 @@ func (plugin *PluginCloak) Init(proxy *Proxy) error {
|
||||||
plugin.createPTR = proxy.cloakedPTR
|
plugin.createPTR = proxy.cloakedPTR
|
||||||
plugin.patternMatcher = NewPatternMatcher()
|
plugin.patternMatcher = NewPatternMatcher()
|
||||||
cloakedNames := make(map[string]*CloakedName)
|
cloakedNames := make(map[string]*CloakedName)
|
||||||
for lineNo, line := range strings.Split(string(bin), "\n") {
|
for lineNo, line := range strings.Split(lines, "\n") {
|
||||||
line = TrimAndStripInlineComments(line)
|
line = TrimAndStripInlineComments(line)
|
||||||
if len(line) == 0 {
|
if len(line) == 0 {
|
||||||
continue
|
continue
|
||||||
|
@ -73,9 +73,9 @@ func (plugin *PluginCloak) Init(proxy *Proxy) error {
|
||||||
ip := net.ParseIP(target)
|
ip := net.ParseIP(target)
|
||||||
if ip != nil {
|
if ip != nil {
|
||||||
if ipv4 := ip.To4(); ipv4 != nil {
|
if ipv4 := ip.To4(); ipv4 != nil {
|
||||||
cloakedName.ipv4 = append((*cloakedName).ipv4, ipv4)
|
cloakedName.ipv4 = append(cloakedName.ipv4, ipv4)
|
||||||
} else if ipv6 := ip.To16(); ipv6 != nil {
|
} else if ipv6 := ip.To16(); ipv6 != nil {
|
||||||
cloakedName.ipv6 = append((*cloakedName).ipv6, ipv6)
|
cloakedName.ipv6 = append(cloakedName.ipv6, ipv6)
|
||||||
} else {
|
} else {
|
||||||
dlog.Errorf("Invalid IP address in cloaking rule at line %d", 1+lineNo)
|
dlog.Errorf("Invalid IP address in cloaking rule at line %d", 1+lineNo)
|
||||||
continue
|
continue
|
||||||
|
|
|
@ -49,7 +49,7 @@ func (plugin *PluginDNS64) Init(proxy *Proxy) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
dlog.Infof("Registered DNS64 prefix [%s]", pref.String())
|
dlog.Noticef("Registered DNS64 prefix [%s]", pref.String())
|
||||||
plugin.pref64 = append(plugin.pref64, pref)
|
plugin.pref64 = append(plugin.pref64, pref)
|
||||||
}
|
}
|
||||||
} else if len(proxy.dns64Resolvers) != 0 {
|
} else if len(proxy.dns64Resolvers) != 0 {
|
||||||
|
@ -57,7 +57,10 @@ func (plugin *PluginDNS64) Init(proxy *Proxy) error {
|
||||||
if err := plugin.refreshPref64(); err != nil {
|
if err := plugin.refreshPref64(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
dlog.Notice("DNS64 map enabled")
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -105,7 +108,7 @@ func (plugin *PluginDNS64) Eval(pluginsState *PluginsState, msg *dns.Msg) error
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err != nil || resp.Rcode != dns.RcodeSuccess {
|
if resp.Rcode != dns.RcodeSuccess {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -152,11 +155,10 @@ func (plugin *PluginDNS64) Eval(pluginsState *PluginsState, msg *dns.Msg) error
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
synth := EmptyResponseFromMessage(msg)
|
msg.Answer = synth64
|
||||||
synth.Answer = append(synth.Answer, synth64...)
|
msg.AuthenticatedData = false
|
||||||
|
msg.SetEdns0(uint16(MaxDNSUDPSafePacketSize), false)
|
||||||
|
|
||||||
pluginsState.synthResponse = synth
|
|
||||||
pluginsState.action = PluginsActionSynth
|
|
||||||
pluginsState.returnCode = PluginsReturnCodeCloak
|
pluginsState.returnCode = PluginsReturnCodeCloak
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -191,7 +193,6 @@ func (plugin *PluginDNS64) fetchPref64(resolver string) error {
|
||||||
|
|
||||||
client := new(dns.Client)
|
client := new(dns.Client)
|
||||||
resp, _, err := client.Exchange(msg, resolver)
|
resp, _, err := client.Exchange(msg, resolver)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -209,17 +210,17 @@ func (plugin *PluginDNS64) fetchPref64(resolver string) error {
|
||||||
prefEnd := 0
|
prefEnd := 0
|
||||||
|
|
||||||
if wka := net.IPv4(ipv6[12], ipv6[13], ipv6[14], ipv6[15]); wka.Equal(rfc7050WKA1) ||
|
if wka := net.IPv4(ipv6[12], ipv6[13], ipv6[14], ipv6[15]); wka.Equal(rfc7050WKA1) ||
|
||||||
wka.Equal(rfc7050WKA2) { //96
|
wka.Equal(rfc7050WKA2) { // 96
|
||||||
prefEnd = 12
|
prefEnd = 12
|
||||||
} else if wka := net.IPv4(ipv6[9], ipv6[10], ipv6[11], ipv6[12]); wka.Equal(rfc7050WKA1) || wka.Equal(rfc7050WKA2) { //64
|
} else if wka := net.IPv4(ipv6[9], ipv6[10], ipv6[11], ipv6[12]); wka.Equal(rfc7050WKA1) || wka.Equal(rfc7050WKA2) { // 64
|
||||||
prefEnd = 8
|
prefEnd = 8
|
||||||
} else if wka := net.IPv4(ipv6[7], ipv6[9], ipv6[10], ipv6[11]); wka.Equal(rfc7050WKA1) || wka.Equal(rfc7050WKA2) { //56
|
} else if wka := net.IPv4(ipv6[7], ipv6[9], ipv6[10], ipv6[11]); wka.Equal(rfc7050WKA1) || wka.Equal(rfc7050WKA2) { // 56
|
||||||
prefEnd = 7
|
prefEnd = 7
|
||||||
} else if wka := net.IPv4(ipv6[6], ipv6[7], ipv6[9], ipv6[10]); wka.Equal(rfc7050WKA1) || wka.Equal(rfc7050WKA2) { //48
|
} else if wka := net.IPv4(ipv6[6], ipv6[7], ipv6[9], ipv6[10]); wka.Equal(rfc7050WKA1) || wka.Equal(rfc7050WKA2) { // 48
|
||||||
prefEnd = 6
|
prefEnd = 6
|
||||||
} else if wka := net.IPv4(ipv6[5], ipv6[6], ipv6[7], ipv6[9]); wka.Equal(rfc7050WKA1) || wka.Equal(rfc7050WKA2) { //40
|
} else if wka := net.IPv4(ipv6[5], ipv6[6], ipv6[7], ipv6[9]); wka.Equal(rfc7050WKA1) || wka.Equal(rfc7050WKA2) { // 40
|
||||||
prefEnd = 5
|
prefEnd = 5
|
||||||
} else if wka := net.IPv4(ipv6[4], ipv6[5], ipv6[6], ipv6[7]); wka.Equal(rfc7050WKA1) || wka.Equal(rfc7050WKA2) { //32
|
} else if wka := net.IPv4(ipv6[4], ipv6[5], ipv6[6], ipv6[7]); wka.Equal(rfc7050WKA1) || wka.Equal(rfc7050WKA2) { // 32
|
||||||
prefEnd = 4
|
prefEnd = 4
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -9,8 +9,7 @@ import (
|
||||||
"github.com/miekg/dns"
|
"github.com/miekg/dns"
|
||||||
)
|
)
|
||||||
|
|
||||||
type PluginFirefox struct {
|
type PluginFirefox struct{}
|
||||||
}
|
|
||||||
|
|
||||||
func (plugin *PluginFirefox) Name() string {
|
func (plugin *PluginFirefox) Name() string {
|
||||||
return "firefox"
|
return "firefox"
|
||||||
|
|
|
@ -29,11 +29,11 @@ func (plugin *PluginForward) Description() string {
|
||||||
|
|
||||||
func (plugin *PluginForward) Init(proxy *Proxy) error {
|
func (plugin *PluginForward) Init(proxy *Proxy) error {
|
||||||
dlog.Noticef("Loading the set of forwarding rules from [%s]", proxy.forwardFile)
|
dlog.Noticef("Loading the set of forwarding rules from [%s]", proxy.forwardFile)
|
||||||
bin, err := ReadTextFile(proxy.forwardFile)
|
lines, err := ReadTextFile(proxy.forwardFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
for lineNo, line := range strings.Split(string(bin), "\n") {
|
for lineNo, line := range strings.Split(lines, "\n") {
|
||||||
line = TrimAndStripInlineComments(line)
|
line = TrimAndStripInlineComments(line)
|
||||||
if len(line) == 0 {
|
if len(line) == 0 {
|
||||||
continue
|
continue
|
||||||
|
@ -49,9 +49,16 @@ func (plugin *PluginForward) Init(proxy *Proxy) error {
|
||||||
var servers []string
|
var servers []string
|
||||||
for _, server := range strings.Split(serversStr, ",") {
|
for _, server := range strings.Split(serversStr, ",") {
|
||||||
server = strings.TrimSpace(server)
|
server = strings.TrimSpace(server)
|
||||||
if net.ParseIP(server) != nil {
|
server = strings.TrimPrefix(server, "[")
|
||||||
server = fmt.Sprintf("%s:%d", server, 53)
|
server = strings.TrimSuffix(server, "]")
|
||||||
|
if ip := net.ParseIP(server); ip != nil {
|
||||||
|
if ip.To4() != nil {
|
||||||
|
server = fmt.Sprintf("%s:%d", server, 53)
|
||||||
|
} else {
|
||||||
|
server = fmt.Sprintf("[%s]:%d", server, 53)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
dlog.Infof("Forwarding [%s] to %s", domain, server)
|
||||||
servers = append(servers, server)
|
servers = append(servers, server)
|
||||||
}
|
}
|
||||||
if len(servers) == 0 {
|
if len(servers) == 0 {
|
||||||
|
@ -82,8 +89,9 @@ func (plugin *PluginForward) Eval(pluginsState *PluginsState, msg *dns.Msg) erro
|
||||||
if candidateLen > qNameLen {
|
if candidateLen > qNameLen {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if qName[qNameLen-candidateLen:] == candidate.domain &&
|
if (qName[qNameLen-candidateLen:] == candidate.domain &&
|
||||||
(candidateLen == qNameLen || (qName[qNameLen-candidateLen-1] == '.')) {
|
(candidateLen == qNameLen || (qName[qNameLen-candidateLen-1] == '.'))) ||
|
||||||
|
(candidate.domain == ".") {
|
||||||
servers = candidate.servers
|
servers = candidate.servers
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
|
@ -43,17 +43,21 @@ func (plugin *PluginNxLog) Eval(pluginsState *PluginsState, msg *dns.Msg) error
|
||||||
if msg.Rcode != dns.RcodeNameError {
|
if msg.Rcode != dns.RcodeNameError {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
var clientIPStr string
|
||||||
|
switch pluginsState.clientProto {
|
||||||
|
case "udp":
|
||||||
|
clientIPStr = (*pluginsState.clientAddr).(*net.UDPAddr).IP.String()
|
||||||
|
case "tcp", "local_doh":
|
||||||
|
clientIPStr = (*pluginsState.clientAddr).(*net.TCPAddr).IP.String()
|
||||||
|
default:
|
||||||
|
// Ignore internal flow.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
question := msg.Question[0]
|
question := msg.Question[0]
|
||||||
qType, ok := dns.TypeToString[question.Qtype]
|
qType, ok := dns.TypeToString[question.Qtype]
|
||||||
if !ok {
|
if !ok {
|
||||||
qType = string(qType)
|
qType = string(qType)
|
||||||
}
|
}
|
||||||
var clientIPStr string
|
|
||||||
if pluginsState.clientProto == "udp" {
|
|
||||||
clientIPStr = (*pluginsState.clientAddr).(*net.UDPAddr).IP.String()
|
|
||||||
} else {
|
|
||||||
clientIPStr = (*pluginsState.clientAddr).(*net.TCPAddr).IP.String()
|
|
||||||
}
|
|
||||||
qName := pluginsState.qName
|
qName := pluginsState.qName
|
||||||
|
|
||||||
var line string
|
var line string
|
||||||
|
|
|
@ -43,6 +43,16 @@ func (plugin *PluginQueryLog) Reload() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (plugin *PluginQueryLog) Eval(pluginsState *PluginsState, msg *dns.Msg) error {
|
func (plugin *PluginQueryLog) Eval(pluginsState *PluginsState, msg *dns.Msg) error {
|
||||||
|
var clientIPStr string
|
||||||
|
switch pluginsState.clientProto {
|
||||||
|
case "udp":
|
||||||
|
clientIPStr = (*pluginsState.clientAddr).(*net.UDPAddr).IP.String()
|
||||||
|
case "tcp", "local_doh":
|
||||||
|
clientIPStr = (*pluginsState.clientAddr).(*net.TCPAddr).IP.String()
|
||||||
|
default:
|
||||||
|
// Ignore internal flow.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
question := msg.Question[0]
|
question := msg.Question[0]
|
||||||
qType, ok := dns.TypeToString[question.Qtype]
|
qType, ok := dns.TypeToString[question.Qtype]
|
||||||
if !ok {
|
if !ok {
|
||||||
|
@ -55,12 +65,6 @@ func (plugin *PluginQueryLog) Eval(pluginsState *PluginsState, msg *dns.Msg) err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
var clientIPStr string
|
|
||||||
if pluginsState.clientProto == "udp" {
|
|
||||||
clientIPStr = (*pluginsState.clientAddr).(*net.UDPAddr).IP.String()
|
|
||||||
} else {
|
|
||||||
clientIPStr = (*pluginsState.clientAddr).(*net.TCPAddr).IP.String()
|
|
||||||
}
|
|
||||||
qName := pluginsState.qName
|
qName := pluginsState.qName
|
||||||
|
|
||||||
if pluginsState.cacheHit {
|
if pluginsState.cacheHit {
|
||||||
|
|
|
@ -18,8 +18,10 @@ func (plugin *PluginQueryMeta) Description() string {
|
||||||
|
|
||||||
func (plugin *PluginQueryMeta) Init(proxy *Proxy) error {
|
func (plugin *PluginQueryMeta) Init(proxy *Proxy) error {
|
||||||
queryMetaRR := new(dns.TXT)
|
queryMetaRR := new(dns.TXT)
|
||||||
queryMetaRR.Hdr = dns.RR_Header{Name: ".", Rrtype: dns.TypeTXT,
|
queryMetaRR.Hdr = dns.RR_Header{
|
||||||
Class: dns.ClassINET, Ttl: 86400}
|
Name: ".", Rrtype: dns.TypeTXT,
|
||||||
|
Class: dns.ClassINET, Ttl: 86400,
|
||||||
|
}
|
||||||
queryMetaRR.Txt = proxy.queryMeta
|
queryMetaRR.Txt = proxy.queryMeta
|
||||||
plugin.queryMetaRR = queryMetaRR
|
plugin.queryMetaRR = queryMetaRR
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -189,11 +189,11 @@ func parseBlockedQueryResponse(blockedResponse string, pluginsGlobals *PluginsGl
|
||||||
|
|
||||||
if strings.HasPrefix(blockedResponse, "a:") {
|
if strings.HasPrefix(blockedResponse, "a:") {
|
||||||
blockedIPStrings := strings.Split(blockedResponse, ",")
|
blockedIPStrings := strings.Split(blockedResponse, ",")
|
||||||
(*pluginsGlobals).respondWithIPv4 = net.ParseIP(strings.TrimPrefix(blockedIPStrings[0], "a:"))
|
pluginsGlobals.respondWithIPv4 = net.ParseIP(strings.TrimPrefix(blockedIPStrings[0], "a:"))
|
||||||
|
|
||||||
if (*pluginsGlobals).respondWithIPv4 == nil {
|
if pluginsGlobals.respondWithIPv4 == nil {
|
||||||
dlog.Notice("Error parsing IPv4 response given in blocked_query_response option, defaulting to `hinfo`")
|
dlog.Notice("Error parsing IPv4 response given in blocked_query_response option, defaulting to `hinfo`")
|
||||||
(*pluginsGlobals).refusedCodeInResponses = false
|
pluginsGlobals.refusedCodeInResponses = false
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -203,9 +203,9 @@ func parseBlockedQueryResponse(blockedResponse string, pluginsGlobals *PluginsGl
|
||||||
if strings.HasPrefix(ipv6Response, "[") {
|
if strings.HasPrefix(ipv6Response, "[") {
|
||||||
ipv6Response = strings.Trim(ipv6Response, "[]")
|
ipv6Response = strings.Trim(ipv6Response, "[]")
|
||||||
}
|
}
|
||||||
(*pluginsGlobals).respondWithIPv6 = net.ParseIP(ipv6Response)
|
pluginsGlobals.respondWithIPv6 = net.ParseIP(ipv6Response)
|
||||||
|
|
||||||
if (*pluginsGlobals).respondWithIPv6 == nil {
|
if pluginsGlobals.respondWithIPv6 == nil {
|
||||||
dlog.Notice(
|
dlog.Notice(
|
||||||
"Error parsing IPv6 response given in blocked_query_response option, defaulting to IPv4",
|
"Error parsing IPv6 response given in blocked_query_response option, defaulting to IPv4",
|
||||||
)
|
)
|
||||||
|
@ -215,18 +215,18 @@ func parseBlockedQueryResponse(blockedResponse string, pluginsGlobals *PluginsGl
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (*pluginsGlobals).respondWithIPv6 == nil {
|
if pluginsGlobals.respondWithIPv6 == nil {
|
||||||
(*pluginsGlobals).respondWithIPv6 = (*pluginsGlobals).respondWithIPv4
|
pluginsGlobals.respondWithIPv6 = pluginsGlobals.respondWithIPv4
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
switch blockedResponse {
|
switch blockedResponse {
|
||||||
case "refused":
|
case "refused":
|
||||||
(*pluginsGlobals).refusedCodeInResponses = true
|
pluginsGlobals.refusedCodeInResponses = true
|
||||||
case "hinfo":
|
case "hinfo":
|
||||||
(*pluginsGlobals).refusedCodeInResponses = false
|
pluginsGlobals.refusedCodeInResponses = false
|
||||||
default:
|
default:
|
||||||
dlog.Noticef("Invalid blocked_query_response option [%s], defaulting to `hinfo`", blockedResponse)
|
dlog.Noticef("Invalid blocked_query_response option [%s], defaulting to `hinfo`", blockedResponse)
|
||||||
(*pluginsGlobals).refusedCodeInResponses = false
|
pluginsGlobals.refusedCodeInResponses = false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -74,6 +74,7 @@ type Proxy struct {
|
||||||
certRefreshDelayAfterFailure time.Duration
|
certRefreshDelayAfterFailure time.Duration
|
||||||
timeout time.Duration
|
timeout time.Duration
|
||||||
certRefreshDelay time.Duration
|
certRefreshDelay time.Duration
|
||||||
|
certRefreshConcurrency int
|
||||||
cacheSize int
|
cacheSize int
|
||||||
logMaxBackups int
|
logMaxBackups int
|
||||||
logMaxAge int
|
logMaxAge int
|
||||||
|
@ -117,11 +118,18 @@ func (proxy *Proxy) registerLocalDoHListener(listener *net.TCPListener) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (proxy *Proxy) addDNSListener(listenAddrStr string) {
|
func (proxy *Proxy) addDNSListener(listenAddrStr string) {
|
||||||
listenUDPAddr, err := net.ResolveUDPAddr("udp", listenAddrStr)
|
udp := "udp"
|
||||||
|
tcp := "tcp"
|
||||||
|
isIPv4 := isDigit(listenAddrStr[0])
|
||||||
|
if isIPv4 {
|
||||||
|
udp = "udp4"
|
||||||
|
tcp = "tcp4"
|
||||||
|
}
|
||||||
|
listenUDPAddr, err := net.ResolveUDPAddr(udp, listenAddrStr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
dlog.Fatal(err)
|
dlog.Fatal(err)
|
||||||
}
|
}
|
||||||
listenTCPAddr, err := net.ResolveTCPAddr("tcp", listenAddrStr)
|
listenTCPAddr, err := net.ResolveTCPAddr(tcp, listenAddrStr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
dlog.Fatal(err)
|
dlog.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -140,11 +148,11 @@ func (proxy *Proxy) addDNSListener(listenAddrStr string) {
|
||||||
// if 'userName' is set and we are the parent process
|
// if 'userName' is set and we are the parent process
|
||||||
if !proxy.child {
|
if !proxy.child {
|
||||||
// parent
|
// parent
|
||||||
listenerUDP, err := net.ListenUDP("udp", listenUDPAddr)
|
listenerUDP, err := net.ListenUDP(udp, listenUDPAddr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
dlog.Fatal(err)
|
dlog.Fatal(err)
|
||||||
}
|
}
|
||||||
listenerTCP, err := net.ListenTCP("tcp", listenTCPAddr)
|
listenerTCP, err := net.ListenTCP(tcp, listenTCPAddr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
dlog.Fatal(err)
|
dlog.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -185,7 +193,12 @@ func (proxy *Proxy) addDNSListener(listenAddrStr string) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (proxy *Proxy) addLocalDoHListener(listenAddrStr string) {
|
func (proxy *Proxy) addLocalDoHListener(listenAddrStr string) {
|
||||||
listenTCPAddr, err := net.ResolveTCPAddr("tcp", listenAddrStr)
|
network := "tcp"
|
||||||
|
isIPv4 := isDigit(listenAddrStr[0])
|
||||||
|
if isIPv4 {
|
||||||
|
network = "tcp4"
|
||||||
|
}
|
||||||
|
listenTCPAddr, err := net.ResolveTCPAddr(network, listenAddrStr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
dlog.Fatal(err)
|
dlog.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -201,7 +214,7 @@ func (proxy *Proxy) addLocalDoHListener(listenAddrStr string) {
|
||||||
// if 'userName' is set and we are the parent process
|
// if 'userName' is set and we are the parent process
|
||||||
if !proxy.child {
|
if !proxy.child {
|
||||||
// parent
|
// parent
|
||||||
listenerTCP, err := net.ListenTCP("tcp", listenTCPAddr)
|
listenerTCP, err := net.ListenTCP(network, listenTCPAddr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
dlog.Fatal(err)
|
dlog.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -242,6 +255,8 @@ func (proxy *Proxy) StartProxy() {
|
||||||
dlog.Fatal(err)
|
dlog.Fatal(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
proxy.xTransport.internalResolverReady = false
|
||||||
|
proxy.xTransport.internalResolvers = proxy.listenAddresses
|
||||||
liveServers, err := proxy.serversInfo.refresh(proxy)
|
liveServers, err := proxy.serversInfo.refresh(proxy)
|
||||||
if liveServers > 0 {
|
if liveServers > 0 {
|
||||||
proxy.certIgnoreTimestamp = false
|
proxy.certIgnoreTimestamp = false
|
||||||
|
@ -439,7 +454,13 @@ func (proxy *Proxy) udpListenerFromAddr(listenAddr *net.UDPAddr) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
clientPc, err := listenConfig.ListenPacket(context.Background(), "udp", listenAddr.String())
|
listenAddrStr := listenAddr.String()
|
||||||
|
network := "udp"
|
||||||
|
isIPv4 := isDigit(listenAddrStr[0])
|
||||||
|
if isIPv4 {
|
||||||
|
network = "udp4"
|
||||||
|
}
|
||||||
|
clientPc, err := listenConfig.ListenPacket(context.Background(), network, listenAddrStr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -453,7 +474,13 @@ func (proxy *Proxy) tcpListenerFromAddr(listenAddr *net.TCPAddr) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
acceptPc, err := listenConfig.Listen(context.Background(), "tcp", listenAddr.String())
|
listenAddrStr := listenAddr.String()
|
||||||
|
network := "tcp"
|
||||||
|
isIPv4 := isDigit(listenAddrStr[0])
|
||||||
|
if isIPv4 {
|
||||||
|
network = "tcp4"
|
||||||
|
}
|
||||||
|
acceptPc, err := listenConfig.Listen(context.Background(), network, listenAddrStr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -467,7 +494,13 @@ func (proxy *Proxy) localDoHListenerFromAddr(listenAddr *net.TCPAddr) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
acceptPc, err := listenConfig.Listen(context.Background(), "tcp", listenAddr.String())
|
listenAddrStr := listenAddr.String()
|
||||||
|
network := "tcp"
|
||||||
|
isIPv4 := isDigit(listenAddrStr[0])
|
||||||
|
if isIPv4 {
|
||||||
|
network = "tcp4"
|
||||||
|
}
|
||||||
|
acceptPc, err := listenConfig.Listen(context.Background(), network, listenAddrStr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -515,7 +548,7 @@ func (proxy *Proxy) exchangeWithUDPServer(
|
||||||
var pc net.Conn
|
var pc net.Conn
|
||||||
proxyDialer := proxy.xTransport.proxyDialer
|
proxyDialer := proxy.xTransport.proxyDialer
|
||||||
if proxyDialer == nil {
|
if proxyDialer == nil {
|
||||||
pc, err = net.DialUDP("udp", nil, upstreamAddr)
|
pc, err = net.DialTimeout("udp", upstreamAddr.String(), serverInfo.Timeout)
|
||||||
} else {
|
} else {
|
||||||
pc, err = (*proxyDialer).Dial("udp", upstreamAddr.String())
|
pc, err = (*proxyDialer).Dial("udp", upstreamAddr.String())
|
||||||
}
|
}
|
||||||
|
@ -558,7 +591,7 @@ func (proxy *Proxy) exchangeWithTCPServer(
|
||||||
var pc net.Conn
|
var pc net.Conn
|
||||||
proxyDialer := proxy.xTransport.proxyDialer
|
proxyDialer := proxy.xTransport.proxyDialer
|
||||||
if proxyDialer == nil {
|
if proxyDialer == nil {
|
||||||
pc, err = net.DialTCP("tcp", nil, upstreamAddr)
|
pc, err = net.DialTimeout("tcp", upstreamAddr.String(), serverInfo.Timeout)
|
||||||
} else {
|
} else {
|
||||||
pc, err = (*proxyDialer).Dial("tcp", upstreamAddr.String())
|
pc, err = (*proxyDialer).Dial("tcp", upstreamAddr.String())
|
||||||
}
|
}
|
||||||
|
@ -617,7 +650,7 @@ func (proxy *Proxy) processIncomingQuery(
|
||||||
start time.Time,
|
start time.Time,
|
||||||
onlyCached bool,
|
onlyCached bool,
|
||||||
) []byte {
|
) []byte {
|
||||||
var response []byte = nil
|
var response []byte
|
||||||
if len(query) < MinDNSPacketSize {
|
if len(query) < MinDNSPacketSize {
|
||||||
return response
|
return response
|
||||||
}
|
}
|
||||||
|
|
|
@ -11,8 +11,10 @@ import (
|
||||||
"github.com/miekg/dns"
|
"github.com/miekg/dns"
|
||||||
)
|
)
|
||||||
|
|
||||||
const myResolverHost string = "resolver.dnscrypt.info."
|
const (
|
||||||
const nonexistentName string = "nonexistent-zone.dnscrypt-test."
|
myResolverHost string = "resolver.dnscrypt.info."
|
||||||
|
nonexistentName string = "nonexistent-zone.dnscrypt-test."
|
||||||
|
)
|
||||||
|
|
||||||
func resolveQuery(server string, qName string, qType uint16, sendClientSubnet bool) (*dns.Msg, error) {
|
func resolveQuery(server string, qName string, qType uint16, sendClientSubnet bool) (*dns.Msg, error) {
|
||||||
client := new(dns.Client)
|
client := new(dns.Client)
|
||||||
|
@ -139,6 +141,7 @@ func Resolve(server string, name string, singleResolver bool) {
|
||||||
fmt.Printf("Lying : ")
|
fmt.Printf("Lying : ")
|
||||||
response, err := resolveQuery(server, nonexistentName, dns.TypeA, false)
|
response, err := resolveQuery(server, nonexistentName, dns.TypeA, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
fmt.Printf("[%v]", err)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if response.Rcode == dns.RcodeSuccess {
|
if response.Rcode == dns.RcodeSuccess {
|
||||||
|
|
|
@ -224,16 +224,34 @@ func (serversInfo *ServersInfo) refresh(proxy *Proxy) (int, error) {
|
||||||
dlog.Debug("Refreshing certificates")
|
dlog.Debug("Refreshing certificates")
|
||||||
serversInfo.RLock()
|
serversInfo.RLock()
|
||||||
// Appending registeredServers slice from sources may allocate new memory.
|
// Appending registeredServers slice from sources may allocate new memory.
|
||||||
registeredServers := make([]RegisteredServer, len(serversInfo.registeredServers))
|
serversCount := len(serversInfo.registeredServers)
|
||||||
|
registeredServers := make([]RegisteredServer, serversCount)
|
||||||
copy(registeredServers, serversInfo.registeredServers)
|
copy(registeredServers, serversInfo.registeredServers)
|
||||||
serversInfo.RUnlock()
|
serversInfo.RUnlock()
|
||||||
|
countChannel := make(chan struct{}, proxy.certRefreshConcurrency)
|
||||||
|
errorChannel := make(chan error, serversCount)
|
||||||
|
for i := range registeredServers {
|
||||||
|
countChannel <- struct{}{}
|
||||||
|
go func(registeredServer *RegisteredServer) {
|
||||||
|
err := serversInfo.refreshServer(proxy, registeredServer.name, registeredServer.stamp)
|
||||||
|
if err == nil {
|
||||||
|
proxy.xTransport.internalResolverReady = true
|
||||||
|
}
|
||||||
|
errorChannel <- err
|
||||||
|
<-countChannel
|
||||||
|
}(®isteredServers[i])
|
||||||
|
}
|
||||||
liveServers := 0
|
liveServers := 0
|
||||||
var err error
|
var err error
|
||||||
for _, registeredServer := range registeredServers {
|
for i := 0; i < serversCount; i++ {
|
||||||
if err = serversInfo.refreshServer(proxy, registeredServer.name, registeredServer.stamp); err == nil {
|
err = <-errorChannel
|
||||||
|
if err == nil {
|
||||||
liveServers++
|
liveServers++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if liveServers > 0 {
|
||||||
|
err = nil
|
||||||
|
}
|
||||||
serversInfo.Lock()
|
serversInfo.Lock()
|
||||||
sort.SliceStable(serversInfo.inner, func(i, j int) bool {
|
sort.SliceStable(serversInfo.inner, func(i, j int) bool {
|
||||||
return serversInfo.inner[i].initialRtt < serversInfo.inner[j].initialRtt
|
return serversInfo.inner[i].initialRtt < serversInfo.inner[j].initialRtt
|
||||||
|
@ -311,7 +329,7 @@ func (serversInfo *ServersInfo) getOne() *ServerInfo {
|
||||||
serversInfo.estimatorUpdate(candidate)
|
serversInfo.estimatorUpdate(candidate)
|
||||||
}
|
}
|
||||||
serverInfo := serversInfo.inner[candidate]
|
serverInfo := serversInfo.inner[candidate]
|
||||||
dlog.Debugf("Using candidate [%s] RTT: %d", (*serverInfo).Name, int((*serverInfo).rtt.Value()))
|
dlog.Debugf("Using candidate [%s] RTT: %d", serverInfo.Name, int(serverInfo.rtt.Value()))
|
||||||
serversInfo.Unlock()
|
serversInfo.Unlock()
|
||||||
|
|
||||||
return serverInfo
|
return serverInfo
|
||||||
|
@ -344,7 +362,7 @@ func findFarthestRoute(proxy *Proxy, name string, relayStamps []stamps.ServerSta
|
||||||
server := proxy.serversInfo.registeredServers[serverIdx]
|
server := proxy.serversInfo.registeredServers[serverIdx]
|
||||||
proxy.serversInfo.RUnlock()
|
proxy.serversInfo.RUnlock()
|
||||||
|
|
||||||
// Fall back to random relays until the logic is implementeed for non-DNSCrypt relays
|
// Fall back to random relays until the logic is implemented for non-DNSCrypt relays
|
||||||
if server.stamp.Proto == stamps.StampProtoTypeODoHTarget {
|
if server.stamp.Proto == stamps.StampProtoTypeODoHTarget {
|
||||||
candidates := make([]int, 0)
|
candidates := make([]int, 0)
|
||||||
for relayIdx, relayStamp := range relayStamps {
|
for relayIdx, relayStamp := range relayStamps {
|
||||||
|
@ -533,7 +551,7 @@ func route(proxy *Proxy, name string, serverProto stamps.StampProtoType) (*Relay
|
||||||
|
|
||||||
func fetchDNSCryptServerInfo(proxy *Proxy, name string, stamp stamps.ServerStamp, isNew bool) (ServerInfo, error) {
|
func fetchDNSCryptServerInfo(proxy *Proxy, name string, stamp stamps.ServerStamp, isNew bool) (ServerInfo, error) {
|
||||||
if len(stamp.ServerPk) != ed25519.PublicKeySize {
|
if len(stamp.ServerPk) != ed25519.PublicKeySize {
|
||||||
serverPk, err := hex.DecodeString(strings.Replace(string(stamp.ServerPk), ":", "", -1))
|
serverPk, err := hex.DecodeString(strings.ReplaceAll(string(stamp.ServerPk), ":", ""))
|
||||||
if err != nil || len(serverPk) != ed25519.PublicKeySize {
|
if err != nil || len(serverPk) != ed25519.PublicKeySize {
|
||||||
dlog.Fatalf("Unsupported public key for [%s]: [%s]", name, stamp.ServerPk)
|
dlog.Fatalf("Unsupported public key for [%s]: [%s]", name, stamp.ServerPk)
|
||||||
}
|
}
|
||||||
|
@ -614,7 +632,7 @@ func dohTestPacket(msgID uint16) []byte {
|
||||||
msg.SetEdns0(uint16(MaxDNSPacketSize), false)
|
msg.SetEdns0(uint16(MaxDNSPacketSize), false)
|
||||||
ext := new(dns.EDNS0_PADDING)
|
ext := new(dns.EDNS0_PADDING)
|
||||||
ext.Padding = make([]byte, 16)
|
ext.Padding = make([]byte, 16)
|
||||||
crypto_rand.Read(ext.Padding)
|
_, _ = crypto_rand.Read(ext.Padding)
|
||||||
edns0 := msg.IsEdns0()
|
edns0 := msg.IsEdns0()
|
||||||
edns0.Option = append(edns0.Option, ext)
|
edns0.Option = append(edns0.Option, ext)
|
||||||
body, err := msg.Pack()
|
body, err := msg.Pack()
|
||||||
|
@ -637,7 +655,7 @@ func dohNXTestPacket(msgID uint16) []byte {
|
||||||
msg.SetEdns0(uint16(MaxDNSPacketSize), false)
|
msg.SetEdns0(uint16(MaxDNSPacketSize), false)
|
||||||
ext := new(dns.EDNS0_PADDING)
|
ext := new(dns.EDNS0_PADDING)
|
||||||
ext.Padding = make([]byte, 16)
|
ext.Padding = make([]byte, 16)
|
||||||
crypto_rand.Read(ext.Padding)
|
_, _ = crypto_rand.Read(ext.Padding)
|
||||||
edns0 := msg.IsEdns0()
|
edns0 := msg.IsEdns0()
|
||||||
edns0.Option = append(edns0.Option, ext)
|
edns0.Option = append(edns0.Option, ext)
|
||||||
body, err := msg.Pack()
|
body, err := msg.Pack()
|
||||||
|
@ -853,10 +871,17 @@ func _fetchODoHTargetInfo(proxy *Proxy, name string, stamp stamps.ServerStamp, i
|
||||||
if msg.Rcode != dns.RcodeNameError {
|
if msg.Rcode != dns.RcodeNameError {
|
||||||
dlog.Criticalf("[%s] may be a lying resolver", name)
|
dlog.Criticalf("[%s] may be a lying resolver", name)
|
||||||
}
|
}
|
||||||
|
protocol := "http"
|
||||||
protocol := tls.NegotiatedProtocol
|
tlsVersion := uint16(0)
|
||||||
if len(protocol) == 0 {
|
tlsCipherSuite := uint16(0)
|
||||||
protocol = "http/1.x"
|
if tls != nil {
|
||||||
|
protocol = tls.NegotiatedProtocol
|
||||||
|
if len(protocol) == 0 {
|
||||||
|
protocol = "http/1.x"
|
||||||
|
} else {
|
||||||
|
tlsVersion = tls.Version
|
||||||
|
tlsCipherSuite = tls.CipherSuite
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if strings.HasPrefix(protocol, "http/1.") {
|
if strings.HasPrefix(protocol, "http/1.") {
|
||||||
dlog.Warnf("[%s] does not support HTTP/2", name)
|
dlog.Warnf("[%s] does not support HTTP/2", name)
|
||||||
|
@ -864,37 +889,39 @@ func _fetchODoHTargetInfo(proxy *Proxy, name string, stamp stamps.ServerStamp, i
|
||||||
dlog.Infof(
|
dlog.Infof(
|
||||||
"[%s] TLS version: %x - Protocol: %v - Cipher suite: %v",
|
"[%s] TLS version: %x - Protocol: %v - Cipher suite: %v",
|
||||||
name,
|
name,
|
||||||
tls.Version,
|
tlsVersion,
|
||||||
protocol,
|
protocol,
|
||||||
tls.CipherSuite,
|
tlsCipherSuite,
|
||||||
)
|
)
|
||||||
showCerts := proxy.showCerts
|
showCerts := proxy.showCerts
|
||||||
found := false
|
found := false
|
||||||
var wantedHash [32]byte
|
var wantedHash [32]byte
|
||||||
for _, cert := range tls.PeerCertificates {
|
if tls != nil {
|
||||||
h := sha256.Sum256(cert.RawTBSCertificate)
|
for _, cert := range tls.PeerCertificates {
|
||||||
if showCerts {
|
h := sha256.Sum256(cert.RawTBSCertificate)
|
||||||
dlog.Noticef("Advertised relay cert: [%s] [%x]", cert.Subject, h)
|
if showCerts {
|
||||||
} else {
|
dlog.Noticef("Advertised relay cert: [%s] [%x]", cert.Subject, h)
|
||||||
dlog.Debugf("Advertised relay cert: [%s] [%x]", cert.Subject, h)
|
} else {
|
||||||
}
|
dlog.Debugf("Advertised relay cert: [%s] [%x]", cert.Subject, h)
|
||||||
for _, hash := range stamp.Hashes {
|
}
|
||||||
if len(hash) == len(wantedHash) {
|
for _, hash := range stamp.Hashes {
|
||||||
copy(wantedHash[:], hash)
|
if len(hash) == len(wantedHash) {
|
||||||
if h == wantedHash {
|
copy(wantedHash[:], hash)
|
||||||
found = true
|
if h == wantedHash {
|
||||||
break
|
found = true
|
||||||
|
break
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if found {
|
||||||
|
break
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if found {
|
if !found && len(stamp.Hashes) > 0 {
|
||||||
break
|
dlog.Criticalf("[%s] Certificate hash [%x] not found", name, wantedHash)
|
||||||
|
return ServerInfo{}, fmt.Errorf("Certificate hash not found")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !found && len(stamp.Hashes) > 0 {
|
|
||||||
dlog.Criticalf("[%s] Certificate hash [%x] not found", name, wantedHash)
|
|
||||||
return ServerInfo{}, fmt.Errorf("Certificate hash not found")
|
|
||||||
}
|
|
||||||
if len(serverResponse) < MinDNSPacketSize || len(serverResponse) > MaxDNSPacketSize ||
|
if len(serverResponse) < MinDNSPacketSize || len(serverResponse) > MaxDNSPacketSize ||
|
||||||
serverResponse[0] != 0xca || serverResponse[1] != 0xfe || serverResponse[4] != 0x00 || serverResponse[5] != 0x01 {
|
serverResponse[0] != 0xca || serverResponse[1] != 0xfe || serverResponse[4] != 0x00 || serverResponse[5] != 0x01 {
|
||||||
dlog.Info("Webserver returned an unexpected response")
|
dlog.Info("Webserver returned an unexpected response")
|
||||||
|
|
|
@ -32,7 +32,7 @@ type Source struct {
|
||||||
name string
|
name string
|
||||||
urls []*url.URL
|
urls []*url.URL
|
||||||
format SourceFormat
|
format SourceFormat
|
||||||
in []byte
|
bin []byte
|
||||||
minisignKey *minisign.PublicKey
|
minisignKey *minisign.PublicKey
|
||||||
cacheFile string
|
cacheFile string
|
||||||
cacheTTL, prefetchDelay time.Duration
|
cacheTTL, prefetchDelay time.Duration
|
||||||
|
@ -40,83 +40,84 @@ type Source struct {
|
||||||
prefix string
|
prefix string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (source *Source) checkSignature(bin, sig []byte) (err error) {
|
// timeNow() is replaced by tests to provide a static value
|
||||||
var signature minisign.Signature
|
var timeNow = time.Now
|
||||||
if signature, err = minisign.DecodeSignature(string(sig)); err == nil {
|
|
||||||
|
func (source *Source) checkSignature(bin, sig []byte) error {
|
||||||
|
signature, err := minisign.DecodeSignature(string(sig))
|
||||||
|
if err == nil {
|
||||||
_, err = source.minisignKey.Verify(bin, signature)
|
_, err = source.minisignKey.Verify(bin, signature)
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// timeNow can be replaced by tests to provide a static value
|
func (source *Source) fetchFromCache(now time.Time) (time.Duration, error) {
|
||||||
var timeNow = time.Now
|
var err error
|
||||||
|
|
||||||
func (source *Source) fetchFromCache(now time.Time) (delay time.Duration, err error) {
|
|
||||||
var bin, sig []byte
|
var bin, sig []byte
|
||||||
if bin, err = os.ReadFile(source.cacheFile); err != nil {
|
if bin, err = os.ReadFile(source.cacheFile); err != nil {
|
||||||
return
|
return 0, err
|
||||||
}
|
}
|
||||||
if sig, err = os.ReadFile(source.cacheFile + ".minisig"); err != nil {
|
if sig, err = os.ReadFile(source.cacheFile + ".minisig"); err != nil {
|
||||||
return
|
return 0, err
|
||||||
}
|
}
|
||||||
if err = source.checkSignature(bin, sig); err != nil {
|
if err = source.checkSignature(bin, sig); err != nil {
|
||||||
return
|
return 0, err
|
||||||
}
|
}
|
||||||
source.in = bin
|
source.bin = bin
|
||||||
var fi os.FileInfo
|
var fi os.FileInfo
|
||||||
if fi, err = os.Stat(source.cacheFile); err != nil {
|
if fi, err = os.Stat(source.cacheFile); err != nil {
|
||||||
return
|
return 0, err
|
||||||
}
|
}
|
||||||
|
var ttl time.Duration = 0
|
||||||
if elapsed := now.Sub(fi.ModTime()); elapsed < source.cacheTTL {
|
if elapsed := now.Sub(fi.ModTime()); elapsed < source.cacheTTL {
|
||||||
delay = source.prefetchDelay - elapsed
|
ttl = source.prefetchDelay - elapsed
|
||||||
dlog.Debugf("Source [%s] cache file [%s] is still fresh, next update: %v", source.name, source.cacheFile, delay)
|
dlog.Debugf("Source [%s] cache file [%s] is still fresh, next update: %v", source.name, source.cacheFile, ttl)
|
||||||
} else {
|
} else {
|
||||||
dlog.Debugf("Source [%s] cache file [%s] needs to be refreshed", source.name, source.cacheFile)
|
dlog.Debugf("Source [%s] cache file [%s] needs to be refreshed", source.name, source.cacheFile)
|
||||||
}
|
}
|
||||||
return
|
return ttl, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func writeSource(f string, bin, sig []byte) (err error) {
|
func writeSource(f string, bin, sig []byte) error {
|
||||||
|
var err error
|
||||||
var fSrc, fSig *safefile.File
|
var fSrc, fSig *safefile.File
|
||||||
if fSrc, err = safefile.Create(f, 0644); err != nil {
|
if fSrc, err = safefile.Create(f, 0o644); err != nil {
|
||||||
return
|
return err
|
||||||
}
|
}
|
||||||
defer fSrc.Close()
|
defer fSrc.Close()
|
||||||
if fSig, err = safefile.Create(f+".minisig", 0644); err != nil {
|
if fSig, err = safefile.Create(f+".minisig", 0o644); err != nil {
|
||||||
return
|
return err
|
||||||
}
|
}
|
||||||
defer fSig.Close()
|
defer fSig.Close()
|
||||||
if _, err = fSrc.Write(bin); err != nil {
|
if _, err = fSrc.Write(bin); err != nil {
|
||||||
return
|
return err
|
||||||
}
|
}
|
||||||
if _, err = fSig.Write(sig); err != nil {
|
if _, err = fSig.Write(sig); err != nil {
|
||||||
return
|
return err
|
||||||
}
|
}
|
||||||
if err = fSrc.Commit(); err != nil {
|
if err = fSrc.Commit(); err != nil {
|
||||||
return
|
return err
|
||||||
}
|
}
|
||||||
return fSig.Commit()
|
return fSig.Commit()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (source *Source) writeToCache(bin, sig []byte, now time.Time) {
|
func (source *Source) updateCache(bin, sig []byte, now time.Time) {
|
||||||
f := source.cacheFile
|
file := source.cacheFile
|
||||||
var writeErr error // an error writing cache isn't fatal
|
absPath := file
|
||||||
defer func() {
|
if resolved, err := filepath.Abs(file); err != nil {
|
||||||
source.in = bin
|
absPath = resolved
|
||||||
if writeErr == nil {
|
}
|
||||||
return
|
|
||||||
}
|
if !bytes.Equal(source.bin, bin) {
|
||||||
if absPath, absErr := filepath.Abs(f); absErr == nil {
|
if err := writeSource(file, bin, sig); err != nil {
|
||||||
f = absPath
|
dlog.Warnf("Couldn't write cache file [%s]: %s", absPath, err) // an error writing to the cache isn't fatal
|
||||||
}
|
|
||||||
dlog.Warnf("%s: %s", f, writeErr)
|
|
||||||
}()
|
|
||||||
if !bytes.Equal(source.in, bin) {
|
|
||||||
if writeErr = writeSource(f, bin, sig); writeErr != nil {
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
writeErr = os.Chtimes(f, now, now)
|
if err := os.Chtimes(file, now, now); err != nil {
|
||||||
|
dlog.Warnf("Couldn't update cache file [%s]: %s", absPath, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
source.bin = bin
|
||||||
}
|
}
|
||||||
|
|
||||||
func (source *Source) parseURLs(urls []string) {
|
func (source *Source) parseURLs(urls []string) {
|
||||||
|
@ -129,28 +130,32 @@ func (source *Source) parseURLs(urls []string) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func fetchFromURL(xTransport *XTransport, u *url.URL) (bin []byte, err error) {
|
func fetchFromURL(xTransport *XTransport, u *url.URL) ([]byte, error) {
|
||||||
bin, _, _, _, err = xTransport.Get(u, "", DefaultTimeout)
|
bin, _, _, _, err := xTransport.GetWithCompression(u, "", DefaultTimeout)
|
||||||
return bin, err
|
return bin, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (source *Source) fetchWithCache(xTransport *XTransport, now time.Time) (delay time.Duration, err error) {
|
func (source *Source) fetchWithCache(xTransport *XTransport, now time.Time) (time.Duration, error) {
|
||||||
if delay, err = source.fetchFromCache(now); err != nil {
|
var err error
|
||||||
|
var ttl time.Duration
|
||||||
|
if ttl, err = source.fetchFromCache(now); err != nil {
|
||||||
if len(source.urls) == 0 {
|
if len(source.urls) == 0 {
|
||||||
dlog.Errorf("Source [%s] cache file [%s] not present and no valid URL", source.name, source.cacheFile)
|
dlog.Errorf("Source [%s] cache file [%s] not present and no valid URL", source.name, source.cacheFile)
|
||||||
return
|
return 0, err
|
||||||
}
|
}
|
||||||
dlog.Debugf("Source [%s] cache file [%s] not present", source.name, source.cacheFile)
|
dlog.Debugf("Source [%s] cache file [%s] not present", source.name, source.cacheFile)
|
||||||
}
|
}
|
||||||
if len(source.urls) > 0 {
|
|
||||||
defer func() {
|
if len(source.urls) == 0 {
|
||||||
source.refresh = now.Add(delay)
|
return 0, err
|
||||||
}()
|
|
||||||
}
|
}
|
||||||
if len(source.urls) == 0 || delay > 0 {
|
if ttl > 0 {
|
||||||
return
|
source.refresh = now.Add(ttl)
|
||||||
|
return 0, err
|
||||||
}
|
}
|
||||||
delay = MinimumPrefetchInterval
|
|
||||||
|
ttl = MinimumPrefetchInterval
|
||||||
|
source.refresh = now.Add(ttl)
|
||||||
var bin, sig []byte
|
var bin, sig []byte
|
||||||
for _, srcURL := range source.urls {
|
for _, srcURL := range source.urls {
|
||||||
dlog.Infof("Source [%s] loading from URL [%s]", source.name, srcURL)
|
dlog.Infof("Source [%s] loading from URL [%s]", source.name, srcURL)
|
||||||
|
@ -165,17 +170,19 @@ func (source *Source) fetchWithCache(xTransport *XTransport, now time.Time) (del
|
||||||
dlog.Debugf("Source [%s] failed to download signature from URL [%s]", source.name, sigURL)
|
dlog.Debugf("Source [%s] failed to download signature from URL [%s]", source.name, sigURL)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if err = source.checkSignature(bin, sig); err == nil {
|
if err = source.checkSignature(bin, sig); err != nil {
|
||||||
break // valid signature
|
dlog.Debugf("Source [%s] failed signature check using URL [%s]", source.name, srcURL)
|
||||||
} // above err check inverted to make use of implicit continue
|
continue
|
||||||
dlog.Debugf("Source [%s] failed signature check using URL [%s]", source.name, srcURL)
|
}
|
||||||
|
break // valid signature
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return 0, err
|
||||||
}
|
}
|
||||||
source.writeToCache(bin, sig, now)
|
source.updateCache(bin, sig, now)
|
||||||
delay = source.prefetchDelay
|
ttl = source.prefetchDelay
|
||||||
return
|
source.refresh = now.Add(ttl)
|
||||||
|
return ttl, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewSource loads a new source using the given cacheFile and urls, ensuring it has a valid signature
|
// NewSource loads a new source using the given cacheFile and urls, ensuring it has a valid signature
|
||||||
|
@ -188,11 +195,11 @@ func NewSource(
|
||||||
formatStr string,
|
formatStr string,
|
||||||
refreshDelay time.Duration,
|
refreshDelay time.Duration,
|
||||||
prefix string,
|
prefix string,
|
||||||
) (source *Source, err error) {
|
) (*Source, error) {
|
||||||
if refreshDelay < DefaultPrefetchDelay {
|
if refreshDelay < DefaultPrefetchDelay {
|
||||||
refreshDelay = DefaultPrefetchDelay
|
refreshDelay = DefaultPrefetchDelay
|
||||||
}
|
}
|
||||||
source = &Source{
|
source := &Source{
|
||||||
name: name,
|
name: name,
|
||||||
urls: []*url.URL{},
|
urls: []*url.URL{},
|
||||||
cacheFile: cacheFile,
|
cacheFile: cacheFile,
|
||||||
|
@ -211,10 +218,11 @@ func NewSource(
|
||||||
return source, err
|
return source, err
|
||||||
}
|
}
|
||||||
source.parseURLs(urls)
|
source.parseURLs(urls)
|
||||||
if _, err = source.fetchWithCache(xTransport, timeNow()); err == nil {
|
_, err := source.fetchWithCache(xTransport, timeNow())
|
||||||
|
if err == nil {
|
||||||
dlog.Noticef("Source [%s] loaded", name)
|
dlog.Noticef("Source [%s] loaded", name)
|
||||||
}
|
}
|
||||||
return
|
return source, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// PrefetchSources downloads latest versions of given sources, ensuring they have a valid signature before caching
|
// PrefetchSources downloads latest versions of given sources, ensuring they have a valid signature before caching
|
||||||
|
@ -229,7 +237,7 @@ func PrefetchSources(xTransport *XTransport, sources []*Source) time.Duration {
|
||||||
if delay, err := source.fetchWithCache(xTransport, now); err != nil {
|
if delay, err := source.fetchWithCache(xTransport, now); err != nil {
|
||||||
dlog.Infof("Prefetching [%s] failed: %v, will retry in %v", source.name, err, interval)
|
dlog.Infof("Prefetching [%s] failed: %v, will retry in %v", source.name, err, interval)
|
||||||
} else {
|
} else {
|
||||||
dlog.Debugf("Prefetching [%s] succeeded, next update: %v", source.name, delay)
|
dlog.Debugf("Prefetching [%s] succeeded, next update in %v min", source.name, delay)
|
||||||
if delay >= MinimumPrefetchInterval && (interval == MinimumPrefetchInterval || interval > delay) {
|
if delay >= MinimumPrefetchInterval && (interval == MinimumPrefetchInterval || interval > delay) {
|
||||||
interval = delay
|
interval = delay
|
||||||
}
|
}
|
||||||
|
@ -254,7 +262,7 @@ func (source *Source) parseV2() ([]RegisteredServer, error) {
|
||||||
stampErrs = append(stampErrs, stampErr)
|
stampErrs = append(stampErrs, stampErr)
|
||||||
dlog.Warn(stampErr)
|
dlog.Warn(stampErr)
|
||||||
}
|
}
|
||||||
in := string(source.in)
|
in := string(source.bin)
|
||||||
parts := strings.Split(in, "## ")
|
parts := strings.Split(in, "## ")
|
||||||
if len(parts) < 2 {
|
if len(parts) < 2 {
|
||||||
return registeredServers, fmt.Errorf("Invalid format for source at [%v]", source.urls)
|
return registeredServers, fmt.Errorf("Invalid format for source at [%v]", source.urls)
|
||||||
|
|
|
@ -14,9 +14,9 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/hectane/go-acl"
|
"github.com/hectane/go-acl"
|
||||||
"github.com/powerman/check"
|
"github.com/jedisct1/dlog"
|
||||||
|
|
||||||
"github.com/jedisct1/go-minisign"
|
"github.com/jedisct1/go-minisign"
|
||||||
|
"github.com/powerman/check"
|
||||||
)
|
)
|
||||||
|
|
||||||
type SourceFixture struct {
|
type SourceFixture struct {
|
||||||
|
@ -83,7 +83,7 @@ func writeSourceCache(t *testing.T, e *SourceTestExpect) {
|
||||||
path := e.cachePath + f.suffix
|
path := e.cachePath + f.suffix
|
||||||
perms := f.perms
|
perms := f.perms
|
||||||
if perms == 0 {
|
if perms == 0 {
|
||||||
perms = 0644
|
perms = 0o644
|
||||||
}
|
}
|
||||||
if err := os.WriteFile(path, f.content, perms); err != nil {
|
if err := os.WriteFile(path, f.content, perms); err != nil {
|
||||||
t.Fatalf("Unable to write cache file %s: %v", path, err)
|
t.Fatalf("Unable to write cache file %s: %v", path, err)
|
||||||
|
@ -107,7 +107,7 @@ func writeSourceCache(t *testing.T, e *SourceTestExpect) {
|
||||||
func checkSourceCache(c *check.C, e *SourceTestExpect) {
|
func checkSourceCache(c *check.C, e *SourceTestExpect) {
|
||||||
for _, f := range e.cache {
|
for _, f := range e.cache {
|
||||||
path := e.cachePath + f.suffix
|
path := e.cachePath + f.suffix
|
||||||
_ = acl.Chmod(path, 0644) // don't worry if this fails, reading it will catch the same problem
|
_ = acl.Chmod(path, 0o644) // don't worry if this fails, reading it will catch the same problem
|
||||||
got, err := os.ReadFile(path)
|
got, err := os.ReadFile(path)
|
||||||
c.DeepEqual(got, f.content, "Unexpected content for cache file '%s', err %v", path, err)
|
c.DeepEqual(got, f.content, "Unexpected content for cache file '%s', err %v", path, err)
|
||||||
if f.suffix != "" {
|
if f.suffix != "" {
|
||||||
|
@ -144,7 +144,7 @@ func loadTestSourceNames(t *testing.T, d *SourceTestData) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func generateFixtureState(t *testing.T, d *SourceTestData, suffix, file string, state SourceTestState) {
|
func generateFixtureState(_ *testing.T, d *SourceTestData, suffix, file string, state SourceTestState) {
|
||||||
if _, ok := d.fixtures[state]; !ok {
|
if _, ok := d.fixtures[state]; !ok {
|
||||||
d.fixtures[state] = map[string]SourceFixture{}
|
d.fixtures[state] = map[string]SourceFixture{}
|
||||||
}
|
}
|
||||||
|
@ -164,7 +164,7 @@ func generateFixtureState(t *testing.T, d *SourceTestData, suffix, file string,
|
||||||
case TestStateReadErr, TestStateReadSigErr:
|
case TestStateReadErr, TestStateReadSigErr:
|
||||||
f.content, f.length = []byte{}, "1"
|
f.content, f.length = []byte{}, "1"
|
||||||
case TestStateOpenErr, TestStateOpenSigErr:
|
case TestStateOpenErr, TestStateOpenSigErr:
|
||||||
f.content, f.perms = d.fixtures[TestStateCorrect][file].content[:1], 0200
|
f.content, f.perms = d.fixtures[TestStateCorrect][file].content[:1], 0o200
|
||||||
}
|
}
|
||||||
d.fixtures[state][file] = f
|
d.fixtures[state][file] = f
|
||||||
}
|
}
|
||||||
|
@ -284,9 +284,9 @@ func prepSourceTestCache(t *testing.T, d *SourceTestData, e *SourceTestExpect, s
|
||||||
e.cache = []SourceFixture{d.fixtures[state][source], d.fixtures[state][source+".minisig"]}
|
e.cache = []SourceFixture{d.fixtures[state][source], d.fixtures[state][source+".minisig"]}
|
||||||
switch state {
|
switch state {
|
||||||
case TestStateCorrect:
|
case TestStateCorrect:
|
||||||
e.Source.in, e.success = e.cache[0].content, true
|
e.Source.bin, e.success = e.cache[0].content, true
|
||||||
case TestStateExpired:
|
case TestStateExpired:
|
||||||
e.Source.in = e.cache[0].content
|
e.Source.bin = e.cache[0].content
|
||||||
case TestStatePartial, TestStatePartialSig:
|
case TestStatePartial, TestStatePartialSig:
|
||||||
e.err = "signature"
|
e.err = "signature"
|
||||||
case TestStateMissing, TestStateMissingSig, TestStateOpenErr, TestStateOpenSigErr:
|
case TestStateMissing, TestStateMissingSig, TestStateOpenErr, TestStateOpenSigErr:
|
||||||
|
@ -296,7 +296,7 @@ func prepSourceTestCache(t *testing.T, d *SourceTestData, e *SourceTestExpect, s
|
||||||
}
|
}
|
||||||
|
|
||||||
func prepSourceTestDownload(
|
func prepSourceTestDownload(
|
||||||
t *testing.T,
|
_ *testing.T,
|
||||||
d *SourceTestData,
|
d *SourceTestData,
|
||||||
e *SourceTestExpect,
|
e *SourceTestExpect,
|
||||||
source string,
|
source string,
|
||||||
|
@ -339,7 +339,7 @@ func prepSourceTestDownload(
|
||||||
switch state {
|
switch state {
|
||||||
case TestStateCorrect:
|
case TestStateCorrect:
|
||||||
e.cache = []SourceFixture{d.fixtures[state][source], d.fixtures[state][source+".minisig"]}
|
e.cache = []SourceFixture{d.fixtures[state][source], d.fixtures[state][source+".minisig"]}
|
||||||
e.Source.in, e.success = e.cache[0].content, true
|
e.Source.bin, e.success = e.cache[0].content, true
|
||||||
fallthrough
|
fallthrough
|
||||||
case TestStateMissingSig, TestStatePartial, TestStatePartialSig, TestStateReadSigErr:
|
case TestStateMissingSig, TestStatePartial, TestStatePartialSig, TestStateReadSigErr:
|
||||||
d.reqExpect[path+".minisig"]++
|
d.reqExpect[path+".minisig"]++
|
||||||
|
@ -362,14 +362,17 @@ func prepSourceTestDownload(
|
||||||
}
|
}
|
||||||
|
|
||||||
func setupSourceTestCase(t *testing.T, d *SourceTestData, i int,
|
func setupSourceTestCase(t *testing.T, d *SourceTestData, i int,
|
||||||
cacheTest *SourceTestState, downloadTest []SourceTestState) (id string, e *SourceTestExpect) {
|
cacheTest *SourceTestState, downloadTest []SourceTestState,
|
||||||
|
) (id string, e *SourceTestExpect) {
|
||||||
id = strconv.Itoa(d.n) + "-" + strconv.Itoa(i)
|
id = strconv.Itoa(d.n) + "-" + strconv.Itoa(i)
|
||||||
e = &SourceTestExpect{
|
e = &SourceTestExpect{
|
||||||
cachePath: filepath.Join(d.tempDir, id),
|
cachePath: filepath.Join(d.tempDir, id),
|
||||||
mtime: d.timeNow,
|
mtime: d.timeNow,
|
||||||
}
|
}
|
||||||
e.Source = &Source{name: id, urls: []*url.URL{}, format: SourceFormatV2, minisignKey: d.key,
|
e.Source = &Source{
|
||||||
cacheFile: e.cachePath, cacheTTL: DefaultPrefetchDelay * 3, prefetchDelay: DefaultPrefetchDelay}
|
name: id, urls: []*url.URL{}, format: SourceFormatV2, minisignKey: d.key,
|
||||||
|
cacheFile: e.cachePath, cacheTTL: DefaultPrefetchDelay * 3, prefetchDelay: DefaultPrefetchDelay,
|
||||||
|
}
|
||||||
if cacheTest != nil {
|
if cacheTest != nil {
|
||||||
prepSourceTestCache(t, d, e, d.sources[i], *cacheTest)
|
prepSourceTestCache(t, d, e, d.sources[i], *cacheTest)
|
||||||
i = (i + 1) % len(d.sources) // make the cached and downloaded fixtures different
|
i = (i + 1) % len(d.sources) // make the cached and downloaded fixtures different
|
||||||
|
@ -379,6 +382,10 @@ func setupSourceTestCase(t *testing.T, d *SourceTestData, i int,
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNewSource(t *testing.T) {
|
func TestNewSource(t *testing.T) {
|
||||||
|
if testing.Verbose() {
|
||||||
|
dlog.SetLogLevel(dlog.SeverityDebug)
|
||||||
|
dlog.UseSyslog(false)
|
||||||
|
}
|
||||||
teardown, d := setupSourceTest(t)
|
teardown, d := setupSourceTest(t)
|
||||||
defer teardown()
|
defer teardown()
|
||||||
checkResult := func(t *testing.T, e *SourceTestExpect, got *Source, err error) {
|
checkResult := func(t *testing.T, e *SourceTestExpect, got *Source, err error) {
|
||||||
|
@ -440,6 +447,10 @@ func TestNewSource(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPrefetchSources(t *testing.T) {
|
func TestPrefetchSources(t *testing.T) {
|
||||||
|
if testing.Verbose() {
|
||||||
|
dlog.SetLogLevel(dlog.SeverityDebug)
|
||||||
|
dlog.UseSyslog(false)
|
||||||
|
}
|
||||||
teardown, d := setupSourceTest(t)
|
teardown, d := setupSourceTest(t)
|
||||||
defer teardown()
|
defer teardown()
|
||||||
checkResult := func(t *testing.T, expects []*SourceTestExpect, got time.Duration) {
|
checkResult := func(t *testing.T, expects []*SourceTestExpect, got time.Duration) {
|
||||||
|
@ -466,7 +477,7 @@ func TestPrefetchSources(t *testing.T) {
|
||||||
e.mtime = d.timeUpd
|
e.mtime = d.timeUpd
|
||||||
s := &Source{}
|
s := &Source{}
|
||||||
*s = *e.Source
|
*s = *e.Source
|
||||||
s.in = nil
|
s.bin = nil
|
||||||
sources = append(sources, s)
|
sources = append(sources, s)
|
||||||
expects = append(expects, e)
|
expects = append(expects, e)
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,6 +2,7 @@ package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"compress/gzip"
|
||||||
"context"
|
"context"
|
||||||
"crypto/sha512"
|
"crypto/sha512"
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
|
@ -60,9 +61,11 @@ type XTransport struct {
|
||||||
timeout time.Duration
|
timeout time.Duration
|
||||||
cachedIPs CachedIPs
|
cachedIPs CachedIPs
|
||||||
altSupport AltSupport
|
altSupport AltSupport
|
||||||
|
internalResolvers []string
|
||||||
bootstrapResolvers []string
|
bootstrapResolvers []string
|
||||||
mainProto string
|
mainProto string
|
||||||
ignoreSystemDNS bool
|
ignoreSystemDNS bool
|
||||||
|
internalResolverReady bool
|
||||||
useIPv4 bool
|
useIPv4 bool
|
||||||
useIPv6 bool
|
useIPv6 bool
|
||||||
http3 bool
|
http3 bool
|
||||||
|
@ -71,6 +74,7 @@ type XTransport struct {
|
||||||
proxyDialer *netproxy.Dialer
|
proxyDialer *netproxy.Dialer
|
||||||
httpProxyFunction func(*http.Request) (*url.URL, error)
|
httpProxyFunction func(*http.Request) (*url.URL, error)
|
||||||
tlsClientCreds DOHClientCreds
|
tlsClientCreds DOHClientCreds
|
||||||
|
keyLogWriter io.Writer
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewXTransport() *XTransport {
|
func NewXTransport() *XTransport {
|
||||||
|
@ -89,6 +93,7 @@ func NewXTransport() *XTransport {
|
||||||
useIPv6: false,
|
useIPv6: false,
|
||||||
tlsDisableSessionTickets: false,
|
tlsDisableSessionTickets: false,
|
||||||
tlsCipherSuite: nil,
|
tlsCipherSuite: nil,
|
||||||
|
keyLogWriter: nil,
|
||||||
}
|
}
|
||||||
return &xTransport
|
return &xTransport
|
||||||
}
|
}
|
||||||
|
@ -132,7 +137,7 @@ func (xTransport *XTransport) loadCachedIP(host string) (ip net.IP, expired bool
|
||||||
func (xTransport *XTransport) rebuildTransport() {
|
func (xTransport *XTransport) rebuildTransport() {
|
||||||
dlog.Debug("Rebuilding transport")
|
dlog.Debug("Rebuilding transport")
|
||||||
if xTransport.transport != nil {
|
if xTransport.transport != nil {
|
||||||
(*xTransport.transport).CloseIdleConnections()
|
xTransport.transport.CloseIdleConnections()
|
||||||
}
|
}
|
||||||
timeout := xTransport.timeout
|
timeout := xTransport.timeout
|
||||||
transport := &http.Transport{
|
transport := &http.Transport{
|
||||||
|
@ -175,6 +180,10 @@ func (xTransport *XTransport) rebuildTransport() {
|
||||||
tlsClientConfig := tls.Config{}
|
tlsClientConfig := tls.Config{}
|
||||||
certPool, certPoolErr := x509.SystemCertPool()
|
certPool, certPoolErr := x509.SystemCertPool()
|
||||||
|
|
||||||
|
if xTransport.keyLogWriter != nil {
|
||||||
|
tlsClientConfig.KeyLogWriter = xTransport.keyLogWriter
|
||||||
|
}
|
||||||
|
|
||||||
if clientCreds.rootCA != "" {
|
if clientCreds.rootCA != "" {
|
||||||
if certPool == nil {
|
if certPool == nil {
|
||||||
dlog.Fatalf("Additional CAs not supported on this platform: %v", certPoolErr)
|
dlog.Fatalf("Additional CAs not supported on this platform: %v", certPoolErr)
|
||||||
|
@ -188,7 +197,7 @@ func (xTransport *XTransport) rebuildTransport() {
|
||||||
|
|
||||||
if certPool != nil {
|
if certPool != nil {
|
||||||
// Some operating systems don't include Let's Encrypt ISRG Root X1 certificate yet
|
// Some operating systems don't include Let's Encrypt ISRG Root X1 certificate yet
|
||||||
var letsEncryptX1Cert = []byte(`-----BEGIN CERTIFICATE-----
|
letsEncryptX1Cert := []byte(`-----BEGIN CERTIFICATE-----
|
||||||
MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAwTzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2VhcmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJuZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBYMTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygch77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6UA5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sWT8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyHB5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UCB5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUvKBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWnOlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTnjh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbwqHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CIrU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkqhkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZLubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KKNFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7UrTkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdCjNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVcoyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPAmRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57demyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc=
|
MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAwTzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2VhcmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJuZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBYMTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygch77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6UA5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sWT8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyHB5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UCB5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUvKBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWnOlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTnjh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbwqHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CIrU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkqhkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZLubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KKNFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7UrTkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdCjNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVcoyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPAmRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57demyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc=
|
||||||
-----END CERTIFICATE-----`)
|
-----END CERTIFICATE-----`)
|
||||||
certPool.AppendCertsFromPEM(letsEncryptX1Cert)
|
certPool.AppendCertsFromPEM(letsEncryptX1Cert)
|
||||||
|
@ -214,7 +223,32 @@ func (xTransport *XTransport) rebuildTransport() {
|
||||||
tlsClientConfig.ClientSessionCache = tls.NewLRUClientSessionCache(10)
|
tlsClientConfig.ClientSessionCache = tls.NewLRUClientSessionCache(10)
|
||||||
}
|
}
|
||||||
if xTransport.tlsCipherSuite != nil {
|
if xTransport.tlsCipherSuite != nil {
|
||||||
|
tlsClientConfig.PreferServerCipherSuites = false
|
||||||
tlsClientConfig.CipherSuites = xTransport.tlsCipherSuite
|
tlsClientConfig.CipherSuites = xTransport.tlsCipherSuite
|
||||||
|
|
||||||
|
// Go doesn't allow changing the cipher suite with TLS 1.3
|
||||||
|
// So, check if the requested set of ciphers matches the TLS 1.3 suite.
|
||||||
|
// If it doesn't, downgrade to TLS 1.2
|
||||||
|
compatibleSuitesCount := 0
|
||||||
|
for _, suite := range tls.CipherSuites() {
|
||||||
|
if suite.Insecure {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for _, supportedVersion := range suite.SupportedVersions {
|
||||||
|
if supportedVersion != tls.VersionTLS13 {
|
||||||
|
for _, expectedSuiteID := range xTransport.tlsCipherSuite {
|
||||||
|
if expectedSuiteID == suite.ID {
|
||||||
|
compatibleSuitesCount += 1
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if compatibleSuitesCount != len(tls.CipherSuites()) {
|
||||||
|
dlog.Notice("Explicit cipher suite configured - downgrading to TLS 1.2")
|
||||||
|
tlsClientConfig.MaxVersion = tls.VersionTLS12
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
transport.TLSClientConfig = &tlsClientConfig
|
transport.TLSClientConfig = &tlsClientConfig
|
||||||
|
@ -224,31 +258,42 @@ func (xTransport *XTransport) rebuildTransport() {
|
||||||
}
|
}
|
||||||
xTransport.transport = transport
|
xTransport.transport = transport
|
||||||
if xTransport.http3 {
|
if xTransport.http3 {
|
||||||
h3Transport := &http3.RoundTripper{DisableCompression: true, TLSClientConfig: &tlsClientConfig, Dial: func(ctx context.Context, addrStr string, tlsCfg *tls.Config, cfg *quic.Config) (quic.EarlyConnection, error) {
|
dial := func(ctx context.Context, addrStr string, tlsCfg *tls.Config, cfg *quic.Config) (quic.EarlyConnection, error) {
|
||||||
dlog.Debugf("Dialing for H3: [%v]", addrStr)
|
dlog.Debugf("Dialing for H3: [%v]", addrStr)
|
||||||
host, port := ExtractHostAndPort(addrStr, stamps.DefaultPort)
|
host, port := ExtractHostAndPort(addrStr, stamps.DefaultPort)
|
||||||
ipOnly := host
|
ipOnly := host
|
||||||
cachedIP, _ := xTransport.loadCachedIP(host)
|
cachedIP, _ := xTransport.loadCachedIP(host)
|
||||||
|
network := "udp4"
|
||||||
if cachedIP != nil {
|
if cachedIP != nil {
|
||||||
if ipv4 := cachedIP.To4(); ipv4 != nil {
|
if ipv4 := cachedIP.To4(); ipv4 != nil {
|
||||||
ipOnly = ipv4.String()
|
ipOnly = ipv4.String()
|
||||||
} else {
|
} else {
|
||||||
ipOnly = "[" + cachedIP.String() + "]"
|
ipOnly = "[" + cachedIP.String() + "]"
|
||||||
|
network = "udp6"
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
dlog.Debugf("[%s] IP address was not cached in H3 DialContext", host)
|
dlog.Debugf("[%s] IP address was not cached in H3 context", host)
|
||||||
|
if xTransport.useIPv6 {
|
||||||
|
if xTransport.useIPv4 {
|
||||||
|
network = "udp"
|
||||||
|
} else {
|
||||||
|
network = "udp6"
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
addrStr = ipOnly + ":" + strconv.Itoa(port)
|
addrStr = ipOnly + ":" + strconv.Itoa(port)
|
||||||
udpAddr, err := net.ResolveUDPAddr("udp", addrStr)
|
udpAddr, err := net.ResolveUDPAddr(network, addrStr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
udpConn, err := net.ListenUDP("udp", &net.UDPAddr{IP: net.IPv4zero, Port: 0})
|
udpConn, err := net.ListenUDP(network, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return quic.DialEarlyContext(ctx, udpConn, udpAddr, host, tlsCfg, cfg)
|
tlsCfg.ServerName = host
|
||||||
}}
|
return quic.DialEarly(ctx, udpConn, udpAddr, tlsCfg, cfg)
|
||||||
|
}
|
||||||
|
h3Transport := &http3.RoundTripper{DisableCompression: true, TLSClientConfig: &tlsClientConfig, Dial: dial}
|
||||||
xTransport.h3Transport = h3Transport
|
xTransport.h3Transport = h3Transport
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -333,16 +378,17 @@ func (xTransport *XTransport) resolveUsingResolvers(
|
||||||
proto, host string,
|
proto, host string,
|
||||||
resolvers []string,
|
resolvers []string,
|
||||||
) (ip net.IP, ttl time.Duration, err error) {
|
) (ip net.IP, ttl time.Duration, err error) {
|
||||||
|
err = errors.New("Empty resolvers")
|
||||||
for i, resolver := range resolvers {
|
for i, resolver := range resolvers {
|
||||||
ip, ttl, err = xTransport.resolveUsingResolver(proto, host, resolver)
|
ip, ttl, err = xTransport.resolveUsingResolver(proto, host, resolver)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
if i > 0 {
|
if i > 0 {
|
||||||
dlog.Infof("Resolution succeeded with bootstrap resolver %s[%s]", proto, resolver)
|
dlog.Infof("Resolution succeeded with resolver %s[%s]", proto, resolver)
|
||||||
resolvers[0], resolvers[i] = resolvers[i], resolvers[0]
|
resolvers[0], resolvers[i] = resolvers[i], resolvers[0]
|
||||||
}
|
}
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
dlog.Infof("Unable to resolve [%s] using bootstrap resolver %s[%s]: %v", host, proto, resolver, err)
|
dlog.Infof("Unable to resolve [%s] using resolver [%s] (%s): %v", host, resolver, proto, err)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -362,23 +408,37 @@ func (xTransport *XTransport) resolveAndUpdateCache(host string) error {
|
||||||
var foundIP net.IP
|
var foundIP net.IP
|
||||||
var ttl time.Duration
|
var ttl time.Duration
|
||||||
var err error
|
var err error
|
||||||
if !xTransport.ignoreSystemDNS {
|
protos := []string{"udp", "tcp"}
|
||||||
foundIP, ttl, err = xTransport.resolveUsingSystem(host)
|
if xTransport.mainProto == "tcp" {
|
||||||
|
protos = []string{"tcp", "udp"}
|
||||||
}
|
}
|
||||||
if xTransport.ignoreSystemDNS || err != nil {
|
if xTransport.ignoreSystemDNS {
|
||||||
protos := []string{"udp", "tcp"}
|
if xTransport.internalResolverReady {
|
||||||
if xTransport.mainProto == "tcp" {
|
for _, proto := range protos {
|
||||||
protos = []string{"tcp", "udp"}
|
foundIP, ttl, err = xTransport.resolveUsingResolvers(proto, host, xTransport.internalResolvers)
|
||||||
|
if err == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
err = errors.New("Service is not usable yet")
|
||||||
|
dlog.Notice(err)
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
foundIP, ttl, err = xTransport.resolveUsingSystem(host)
|
||||||
|
if err != nil {
|
||||||
|
err = errors.New("System DNS is not usable yet")
|
||||||
|
dlog.Notice(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
for _, proto := range protos {
|
for _, proto := range protos {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
dlog.Noticef(
|
dlog.Noticef(
|
||||||
"System DNS configuration not usable yet, exceptionally resolving [%s] using bootstrap resolvers over %s",
|
"Resolving server host [%s] using bootstrap resolvers over %s",
|
||||||
host,
|
host,
|
||||||
proto,
|
proto,
|
||||||
)
|
)
|
||||||
} else {
|
|
||||||
dlog.Debugf("Resolving [%s] using bootstrap resolvers over %s", host, proto)
|
|
||||||
}
|
}
|
||||||
foundIP, ttl, err = xTransport.resolveUsingResolvers(proto, host, xTransport.bootstrapResolvers)
|
foundIP, ttl, err = xTransport.resolveUsingResolvers(proto, host, xTransport.bootstrapResolvers)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
|
@ -402,6 +462,15 @@ func (xTransport *XTransport) resolveAndUpdateCache(host string) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if foundIP == nil {
|
||||||
|
if !xTransport.useIPv4 && xTransport.useIPv6 {
|
||||||
|
dlog.Warnf("no IPv6 address found for [%s]", host)
|
||||||
|
} else if xTransport.useIPv4 && !xTransport.useIPv6 {
|
||||||
|
dlog.Warnf("no IPv4 address found for [%s]", host)
|
||||||
|
} else {
|
||||||
|
dlog.Errorf("no IP address found for [%s]", host)
|
||||||
|
}
|
||||||
|
}
|
||||||
xTransport.saveCachedIP(host, foundIP, ttl)
|
xTransport.saveCachedIP(host, foundIP, ttl)
|
||||||
dlog.Debugf("[%s] IP address [%s] added to the cache, valid for %v", host, foundIP, ttl)
|
dlog.Debugf("[%s] IP address [%s] added to the cache, valid for %v", host, foundIP, ttl)
|
||||||
return nil
|
return nil
|
||||||
|
@ -414,6 +483,7 @@ func (xTransport *XTransport) Fetch(
|
||||||
contentType string,
|
contentType string,
|
||||||
body *[]byte,
|
body *[]byte,
|
||||||
timeout time.Duration,
|
timeout time.Duration,
|
||||||
|
compress bool,
|
||||||
) ([]byte, int, *tls.ConnectionState, time.Duration, error) {
|
) ([]byte, int, *tls.ConnectionState, time.Duration, error) {
|
||||||
if timeout <= 0 {
|
if timeout <= 0 {
|
||||||
timeout = xTransport.timeout
|
timeout = xTransport.timeout
|
||||||
|
@ -462,6 +532,9 @@ func (xTransport *XTransport) Fetch(
|
||||||
)
|
)
|
||||||
return nil, 0, nil, 0, err
|
return nil, 0, nil, 0, err
|
||||||
}
|
}
|
||||||
|
if compress && body == nil {
|
||||||
|
header["Accept-Encoding"] = []string{"gzip"}
|
||||||
|
}
|
||||||
req := &http.Request{
|
req := &http.Request{
|
||||||
Method: method,
|
Method: method,
|
||||||
URL: url,
|
URL: url,
|
||||||
|
@ -482,8 +555,8 @@ func (xTransport *XTransport) Fetch(
|
||||||
err = errors.New(resp.Status)
|
err = errors.New(resp.Status)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
dlog.Debugf("HTTP client error: [%v] - closing idle H3 connections", err)
|
dlog.Debugf("HTTP client error: [%v] - closing idle connections", err)
|
||||||
(*xTransport.transport).CloseIdleConnections()
|
xTransport.transport.CloseIdleConnections()
|
||||||
}
|
}
|
||||||
statusCode := 503
|
statusCode := 503
|
||||||
if resp != nil {
|
if resp != nil {
|
||||||
|
@ -528,7 +601,17 @@ func (xTransport *XTransport) Fetch(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
tls := resp.TLS
|
tls := resp.TLS
|
||||||
bin, err := io.ReadAll(io.LimitReader(resp.Body, MaxHTTPBodyLength))
|
|
||||||
|
var bodyReader io.ReadCloser = resp.Body
|
||||||
|
if compress && resp.Header.Get("Content-Encoding") == "gzip" {
|
||||||
|
bodyReader, err = gzip.NewReader(io.LimitReader(resp.Body, MaxHTTPBodyLength))
|
||||||
|
if err != nil {
|
||||||
|
return nil, statusCode, tls, rtt, err
|
||||||
|
}
|
||||||
|
defer bodyReader.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
bin, err := io.ReadAll(io.LimitReader(bodyReader, MaxHTTPBodyLength))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, statusCode, tls, rtt, err
|
return nil, statusCode, tls, rtt, err
|
||||||
}
|
}
|
||||||
|
@ -536,12 +619,20 @@ func (xTransport *XTransport) Fetch(
|
||||||
return bin, statusCode, tls, rtt, err
|
return bin, statusCode, tls, rtt, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (xTransport *XTransport) GetWithCompression(
|
||||||
|
url *url.URL,
|
||||||
|
accept string,
|
||||||
|
timeout time.Duration,
|
||||||
|
) ([]byte, int, *tls.ConnectionState, time.Duration, error) {
|
||||||
|
return xTransport.Fetch("GET", url, accept, "", nil, timeout, true)
|
||||||
|
}
|
||||||
|
|
||||||
func (xTransport *XTransport) Get(
|
func (xTransport *XTransport) Get(
|
||||||
url *url.URL,
|
url *url.URL,
|
||||||
accept string,
|
accept string,
|
||||||
timeout time.Duration,
|
timeout time.Duration,
|
||||||
) ([]byte, int, *tls.ConnectionState, time.Duration, error) {
|
) ([]byte, int, *tls.ConnectionState, time.Duration, error) {
|
||||||
return xTransport.Fetch("GET", url, accept, "", nil, timeout)
|
return xTransport.Fetch("GET", url, accept, "", nil, timeout, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (xTransport *XTransport) Post(
|
func (xTransport *XTransport) Post(
|
||||||
|
@ -551,7 +642,7 @@ func (xTransport *XTransport) Post(
|
||||||
body *[]byte,
|
body *[]byte,
|
||||||
timeout time.Duration,
|
timeout time.Duration,
|
||||||
) ([]byte, int, *tls.ConnectionState, time.Duration, error) {
|
) ([]byte, int, *tls.ConnectionState, time.Duration, error) {
|
||||||
return xTransport.Fetch("POST", url, accept, contentType, body, timeout)
|
return xTransport.Fetch("POST", url, accept, contentType, body, timeout, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (xTransport *XTransport) dohLikeQuery(
|
func (xTransport *XTransport) dohLikeQuery(
|
||||||
|
|
185
go.mod
185
go.mod
|
@ -1,179 +1,52 @@
|
||||||
module github.com/dnscrypt/dnscrypt-proxy
|
module github.com/dnscrypt/dnscrypt-proxy
|
||||||
|
|
||||||
go 1.19
|
go 1.22
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/BurntSushi/toml v1.2.1
|
github.com/BurntSushi/toml v1.4.0
|
||||||
github.com/VividCortex/ewma v1.2.0
|
github.com/VividCortex/ewma v1.2.0
|
||||||
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf
|
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf
|
||||||
github.com/dchest/safefile v0.0.0-20151022103144-855e8d98f185
|
github.com/dchest/safefile v0.0.0-20151022103144-855e8d98f185
|
||||||
github.com/hashicorp/go-immutable-radix v1.3.1
|
github.com/hashicorp/go-immutable-radix v1.3.1
|
||||||
github.com/hashicorp/golang-lru v0.5.4
|
|
||||||
github.com/hectane/go-acl v0.0.0-20230122075934-ca0b05cb1adb
|
github.com/hectane/go-acl v0.0.0-20230122075934-ca0b05cb1adb
|
||||||
github.com/jedisct1/dlog v0.0.0-20210927135244-3381aa132e7f
|
github.com/jedisct1/dlog v0.0.0-20230811132706-443b333ff1b3
|
||||||
github.com/jedisct1/go-clocksmith v0.0.0-20210101121932-da382b963868
|
github.com/jedisct1/go-clocksmith v0.0.0-20230211133011-392c1afea73e
|
||||||
github.com/jedisct1/go-dnsstamps v0.0.0-20220328103132-6fb2da762370
|
github.com/jedisct1/go-dnsstamps v0.0.0-20240423203910-07a0735c7774
|
||||||
github.com/jedisct1/go-hpke-compact v0.0.0-20210930135406-0763750339f0
|
github.com/jedisct1/go-hpke-compact v0.0.0-20230811132953-4ee502b61f80
|
||||||
github.com/jedisct1/go-minisign v0.0.0-20211028175153-1c139d1cc84b
|
github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267
|
||||||
github.com/jedisct1/xsecretbox v0.0.0-20210927135450-ebe41aef7bef
|
github.com/jedisct1/xsecretbox v0.0.0-20230811132812-b950633f9f1f
|
||||||
github.com/k-sone/critbitgo v1.4.0
|
github.com/k-sone/critbitgo v1.4.0
|
||||||
github.com/kardianos/service v1.2.2
|
github.com/kardianos/service v1.2.2
|
||||||
github.com/miekg/dns v1.1.50
|
github.com/miekg/dns v1.1.59
|
||||||
github.com/powerman/check v1.6.0
|
github.com/opencoff/go-sieve v0.2.1
|
||||||
github.com/quic-go/quic-go v0.32.0
|
github.com/powerman/check v1.7.0
|
||||||
golang.org/x/crypto v0.5.0
|
github.com/quic-go/quic-go v0.44.0
|
||||||
golang.org/x/net v0.5.0
|
golang.org/x/crypto v0.23.0
|
||||||
golang.org/x/sys v0.5.0
|
golang.org/x/net v0.25.0
|
||||||
|
golang.org/x/sys v0.20.0
|
||||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1
|
gopkg.in/natefinch/lumberjack.v2 v2.2.1
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
4d63.com/gochecknoglobals v0.0.0-20201008074935-acfc0b28355a // indirect
|
|
||||||
github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 // indirect
|
|
||||||
github.com/Masterminds/semver v1.5.0 // indirect
|
|
||||||
github.com/OpenPeeDeeP/depguard v1.0.1 // indirect
|
|
||||||
github.com/alexkohler/prealloc v1.0.0 // indirect
|
|
||||||
github.com/ashanbrown/forbidigo v1.2.0 // indirect
|
|
||||||
github.com/ashanbrown/makezero v0.0.0-20210520155254-b6261585ddde // indirect
|
|
||||||
github.com/beorn7/perks v1.0.1 // indirect
|
|
||||||
github.com/bkielbasa/cyclop v1.2.0 // indirect
|
|
||||||
github.com/bombsimon/wsl/v3 v3.3.0 // indirect
|
|
||||||
github.com/cespare/xxhash/v2 v2.1.1 // indirect
|
|
||||||
github.com/charithe/durationcheck v0.0.8 // indirect
|
|
||||||
github.com/chavacava/garif v0.0.0-20210405164556-e8a0a408d6af // indirect
|
|
||||||
github.com/daixiang0/gci v0.2.8 // indirect
|
|
||||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||||
github.com/denis-tingajkin/go-header v0.4.2 // indirect
|
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
|
||||||
github.com/esimonov/ifshort v1.0.2 // indirect
|
github.com/golang/protobuf v1.5.3 // indirect
|
||||||
github.com/ettle/strcase v0.1.1 // indirect
|
|
||||||
github.com/fatih/color v1.12.0 // indirect
|
|
||||||
github.com/fatih/structtag v1.2.0 // indirect
|
|
||||||
github.com/fsnotify/fsnotify v1.4.9 // indirect
|
|
||||||
github.com/fzipp/gocyclo v0.3.1 // indirect
|
|
||||||
github.com/go-critic/go-critic v0.5.6 // indirect
|
|
||||||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect
|
|
||||||
github.com/go-toolsmith/astcast v1.0.0 // indirect
|
|
||||||
github.com/go-toolsmith/astcopy v1.0.0 // indirect
|
|
||||||
github.com/go-toolsmith/astequal v1.0.0 // indirect
|
|
||||||
github.com/go-toolsmith/astfmt v1.0.0 // indirect
|
|
||||||
github.com/go-toolsmith/astp v1.0.0 // indirect
|
|
||||||
github.com/go-toolsmith/strparse v1.0.0 // indirect
|
|
||||||
github.com/go-toolsmith/typep v1.0.2 // indirect
|
|
||||||
github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b // indirect
|
|
||||||
github.com/gobwas/glob v0.2.3 // indirect
|
|
||||||
github.com/gofrs/flock v0.8.0 // indirect
|
|
||||||
github.com/golang/mock v1.6.0 // indirect
|
|
||||||
github.com/golang/protobuf v1.5.2 // indirect
|
|
||||||
github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 // indirect
|
|
||||||
github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a // indirect
|
|
||||||
github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613 // indirect
|
|
||||||
github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a // indirect
|
|
||||||
github.com/golangci/golangci-lint v1.41.1 // indirect
|
|
||||||
github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 // indirect
|
|
||||||
github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca // indirect
|
|
||||||
github.com/golangci/misspell v0.3.5 // indirect
|
|
||||||
github.com/golangci/revgrep v0.0.0-20210208091834-cd28932614b5 // indirect
|
|
||||||
github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 // indirect
|
|
||||||
github.com/google/go-cmp v0.5.8 // indirect
|
|
||||||
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38 // indirect
|
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38 // indirect
|
||||||
github.com/gordonklaus/ineffassign v0.0.0-20210225214923-2e10b2664254 // indirect
|
|
||||||
github.com/gostaticanalysis/analysisutil v0.4.1 // indirect
|
|
||||||
github.com/gostaticanalysis/comment v1.4.1 // indirect
|
|
||||||
github.com/gostaticanalysis/forcetypeassert v0.0.0-20200621232751-01d4955beaa5 // indirect
|
|
||||||
github.com/gostaticanalysis/nilerr v0.1.1 // indirect
|
|
||||||
github.com/hashicorp/errwrap v1.0.0 // indirect
|
|
||||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
|
||||||
github.com/hashicorp/go-syslog v1.0.0 // indirect
|
github.com/hashicorp/go-syslog v1.0.0 // indirect
|
||||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
github.com/hashicorp/golang-lru v0.5.0 // indirect
|
||||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
github.com/onsi/ginkgo/v2 v2.9.5 // indirect
|
||||||
github.com/jgautheron/goconst v1.5.1 // indirect
|
|
||||||
github.com/jingyugao/rowserrcheck v1.1.0 // indirect
|
|
||||||
github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af // indirect
|
|
||||||
github.com/julz/importas v0.0.0-20210419104244-841f0c0fe66d // indirect
|
|
||||||
github.com/kisielk/errcheck v1.6.0 // indirect
|
|
||||||
github.com/kisielk/gotool v1.0.0 // indirect
|
|
||||||
github.com/kulti/thelper v0.4.0 // indirect
|
|
||||||
github.com/kunwardeep/paralleltest v1.0.2 // indirect
|
|
||||||
github.com/kyoh86/exportloopref v0.1.8 // indirect
|
|
||||||
github.com/ldez/gomoddirectives v0.2.1 // indirect
|
|
||||||
github.com/ldez/tagliatelle v0.2.0 // indirect
|
|
||||||
github.com/magiconair/properties v1.8.1 // indirect
|
|
||||||
github.com/maratori/testpackage v1.0.1 // indirect
|
|
||||||
github.com/matoous/godox v0.0.0-20210227103229-6504466cf951 // indirect
|
|
||||||
github.com/mattn/go-colorable v0.1.8 // indirect
|
|
||||||
github.com/mattn/go-isatty v0.0.12 // indirect
|
|
||||||
github.com/mattn/go-runewidth v0.0.9 // indirect
|
|
||||||
github.com/mattn/goveralls v0.0.9 // indirect
|
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
|
|
||||||
github.com/mbilski/exhaustivestruct v1.2.0 // indirect
|
|
||||||
github.com/mgechev/dots v0.0.0-20190921121421-c36f7dcfbb81 // indirect
|
|
||||||
github.com/mgechev/revive v1.0.7 // indirect
|
|
||||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
|
||||||
github.com/mitchellh/mapstructure v1.1.2 // indirect
|
|
||||||
github.com/moricho/tparallel v0.2.1 // indirect
|
|
||||||
github.com/nakabonne/nestif v0.3.0 // indirect
|
|
||||||
github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 // indirect
|
|
||||||
github.com/nishanths/exhaustive v0.1.0 // indirect
|
|
||||||
github.com/nishanths/predeclared v0.2.1 // indirect
|
|
||||||
github.com/olekukonko/tablewriter v0.0.5 // indirect
|
|
||||||
github.com/onsi/ginkgo/v2 v2.2.0 // indirect
|
|
||||||
github.com/pelletier/go-toml v1.2.0 // indirect
|
|
||||||
github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d // indirect
|
|
||||||
github.com/pkg/errors v0.9.1 // indirect
|
github.com/pkg/errors v0.9.1 // indirect
|
||||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||||
github.com/polyfloyd/go-errorlint v0.0.0-20210510181950-ab96adb96fea // indirect
|
|
||||||
github.com/powerman/deepequal v0.1.0 // indirect
|
github.com/powerman/deepequal v0.1.0 // indirect
|
||||||
github.com/prometheus/client_golang v1.7.1 // indirect
|
|
||||||
github.com/prometheus/client_model v0.2.0 // indirect
|
|
||||||
github.com/prometheus/common v0.10.0 // indirect
|
|
||||||
github.com/prometheus/procfs v0.1.3 // indirect
|
|
||||||
github.com/quasilyte/go-ruleguard v0.3.4 // indirect
|
|
||||||
github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95 // indirect
|
|
||||||
github.com/quic-go/qpack v0.4.0 // indirect
|
github.com/quic-go/qpack v0.4.0 // indirect
|
||||||
github.com/quic-go/qtls-go1-18 v0.2.0 // indirect
|
github.com/smartystreets/goconvey v1.7.2 // indirect
|
||||||
github.com/quic-go/qtls-go1-19 v0.2.0 // indirect
|
go.uber.org/mock v0.4.0 // indirect
|
||||||
github.com/quic-go/qtls-go1-20 v0.1.0 // indirect
|
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 // indirect
|
||||||
github.com/ryancurrah/gomodguard v1.2.2 // indirect
|
golang.org/x/mod v0.17.0 // indirect
|
||||||
github.com/ryanrolds/sqlclosecheck v0.3.0 // indirect
|
golang.org/x/sync v0.7.0 // indirect
|
||||||
github.com/sanposhiho/wastedassign/v2 v2.0.6 // indirect
|
golang.org/x/text v0.15.0 // indirect
|
||||||
github.com/securego/gosec/v2 v2.8.0 // indirect
|
golang.org/x/tools v0.21.0 // indirect
|
||||||
github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c // indirect
|
google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f // indirect
|
||||||
github.com/sirupsen/logrus v1.8.1 // indirect
|
google.golang.org/grpc v1.53.0 // indirect
|
||||||
github.com/smartystreets/goconvey v1.6.4 // indirect
|
google.golang.org/protobuf v1.30.0 // indirect
|
||||||
github.com/sonatard/noctx v0.0.1 // indirect
|
|
||||||
github.com/sourcegraph/go-diff v0.6.1 // indirect
|
|
||||||
github.com/spf13/afero v1.1.2 // indirect
|
|
||||||
github.com/spf13/cast v1.3.0 // indirect
|
|
||||||
github.com/spf13/cobra v1.1.3 // indirect
|
|
||||||
github.com/spf13/jwalterweatherman v1.0.0 // indirect
|
|
||||||
github.com/spf13/pflag v1.0.5 // indirect
|
|
||||||
github.com/spf13/viper v1.7.1 // indirect
|
|
||||||
github.com/ssgreg/nlreturn/v2 v2.1.0 // indirect
|
|
||||||
github.com/stretchr/objx v0.1.1 // indirect
|
|
||||||
github.com/stretchr/testify v1.7.0 // indirect
|
|
||||||
github.com/subosito/gotenv v1.2.0 // indirect
|
|
||||||
github.com/tdakkota/asciicheck v0.0.0-20200416200610-e657995f937b // indirect
|
|
||||||
github.com/tetafro/godot v1.4.7 // indirect
|
|
||||||
github.com/timakin/bodyclose v0.0.0-20200424151742-cb6215831a94 // indirect
|
|
||||||
github.com/tomarrell/wrapcheck/v2 v2.1.0 // indirect
|
|
||||||
github.com/tommy-muehle/go-mnd/v2 v2.4.0 // indirect
|
|
||||||
github.com/ultraware/funlen v0.0.3 // indirect
|
|
||||||
github.com/ultraware/whitespace v0.0.4 // indirect
|
|
||||||
github.com/uudashr/gocognit v1.0.1 // indirect
|
|
||||||
github.com/yeya24/promlinter v0.1.0 // indirect
|
|
||||||
golang.org/x/exp v0.0.0-20221205204356-47842c84f3db // indirect
|
|
||||||
golang.org/x/mod v0.6.0 // indirect
|
|
||||||
golang.org/x/text v0.6.0 // indirect
|
|
||||||
golang.org/x/tools v0.2.0 // indirect
|
|
||||||
google.golang.org/genproto v0.0.0-20200707001353-8e8330bf89df // indirect
|
|
||||||
google.golang.org/grpc v1.38.0 // indirect
|
|
||||||
google.golang.org/protobuf v1.28.0 // indirect
|
|
||||||
gopkg.in/ini.v1 v1.51.0 // indirect
|
|
||||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
|
||||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
|
||||||
honnef.co/go/tools v0.2.0 // indirect
|
|
||||||
mvdan.cc/gofumpt v0.1.1 // indirect
|
|
||||||
mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed // indirect
|
|
||||||
mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b // indirect
|
|
||||||
mvdan.cc/unparam v0.0.0-20210104141923-aac4ce9116a7 // indirect
|
|
||||||
)
|
)
|
||||||
|
|
|
@ -1,21 +0,0 @@
|
||||||
MIT License
|
|
||||||
|
|
||||||
Copyright (c) 2018 Leigh McCulloch
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
|
||||||
in the Software without restriction, including without limitation the rights
|
|
||||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
copies of the Software, and to permit persons to whom the Software is
|
|
||||||
furnished to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in all
|
|
||||||
copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
SOFTWARE.
|
|
|
@ -1,154 +0,0 @@
|
||||||
package checknoglobals
|
|
||||||
|
|
||||||
import (
|
|
||||||
"flag"
|
|
||||||
"fmt"
|
|
||||||
"go/ast"
|
|
||||||
"go/token"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"golang.org/x/tools/go/analysis"
|
|
||||||
)
|
|
||||||
|
|
||||||
// allowedExpression is a struct representing packages and methods that will
|
|
||||||
// be an allowed combination to use as a global variable, f.ex. Name `regexp`
|
|
||||||
// and SelName `MustCompile`.
|
|
||||||
type allowedExpression struct {
|
|
||||||
Name string
|
|
||||||
SelName string
|
|
||||||
}
|
|
||||||
|
|
||||||
const Doc = `check that no global variables exist
|
|
||||||
|
|
||||||
This analyzer checks for global variables and errors on any found.
|
|
||||||
|
|
||||||
A global variable is a variable declared in package scope and that can be read
|
|
||||||
and written to by any function within the package. Global variables can cause
|
|
||||||
side effects which are difficult to keep track of. A code in one function may
|
|
||||||
change the variables state while another unrelated chunk of code may be
|
|
||||||
effected by it.`
|
|
||||||
|
|
||||||
// Analyzer provides an Analyzer that checks that there are no global
|
|
||||||
// variables, except for errors and variables containing regular
|
|
||||||
// expressions.
|
|
||||||
func Analyzer() *analysis.Analyzer {
|
|
||||||
return &analysis.Analyzer{
|
|
||||||
Name: "gochecknoglobals",
|
|
||||||
Doc: Doc,
|
|
||||||
Run: checkNoGlobals,
|
|
||||||
Flags: flags(),
|
|
||||||
RunDespiteErrors: true,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func flags() flag.FlagSet {
|
|
||||||
flags := flag.NewFlagSet("", flag.ExitOnError)
|
|
||||||
flags.Bool("t", false, "Include tests")
|
|
||||||
|
|
||||||
return *flags
|
|
||||||
}
|
|
||||||
|
|
||||||
func isAllowed(v ast.Node) bool {
|
|
||||||
switch i := v.(type) {
|
|
||||||
case *ast.Ident:
|
|
||||||
return i.Name == "_" || i.Name == "version" || looksLikeError(i)
|
|
||||||
case *ast.CallExpr:
|
|
||||||
if expr, ok := i.Fun.(*ast.SelectorExpr); ok {
|
|
||||||
return isAllowedSelectorExpression(expr)
|
|
||||||
}
|
|
||||||
case *ast.CompositeLit:
|
|
||||||
if expr, ok := i.Type.(*ast.SelectorExpr); ok {
|
|
||||||
return isAllowedSelectorExpression(expr)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func isAllowedSelectorExpression(v *ast.SelectorExpr) bool {
|
|
||||||
x, ok := v.X.(*ast.Ident)
|
|
||||||
if !ok {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
allowList := []allowedExpression{
|
|
||||||
{Name: "regexp", SelName: "MustCompile"},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, i := range allowList {
|
|
||||||
if x.Name == i.Name && v.Sel.Name == i.SelName {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// looksLikeError returns true if the AST identifier starts
|
|
||||||
// with 'err' or 'Err', or false otherwise.
|
|
||||||
//
|
|
||||||
// TODO: https://github.com/leighmcculloch/gochecknoglobals/issues/5
|
|
||||||
func looksLikeError(i *ast.Ident) bool {
|
|
||||||
prefix := "err"
|
|
||||||
if i.IsExported() {
|
|
||||||
prefix = "Err"
|
|
||||||
}
|
|
||||||
return strings.HasPrefix(i.Name, prefix)
|
|
||||||
}
|
|
||||||
|
|
||||||
func checkNoGlobals(pass *analysis.Pass) (interface{}, error) {
|
|
||||||
includeTests := pass.Analyzer.Flags.Lookup("t").Value.(flag.Getter).Get().(bool)
|
|
||||||
|
|
||||||
for _, file := range pass.Files {
|
|
||||||
filename := pass.Fset.Position(file.Pos()).Filename
|
|
||||||
if !strings.HasSuffix(filename, ".go") {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if !includeTests && strings.HasSuffix(filename, "_test.go") {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, decl := range file.Decls {
|
|
||||||
genDecl, ok := decl.(*ast.GenDecl)
|
|
||||||
if !ok {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if genDecl.Tok != token.VAR {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
for _, spec := range genDecl.Specs {
|
|
||||||
valueSpec := spec.(*ast.ValueSpec)
|
|
||||||
onlyAllowedValues := false
|
|
||||||
|
|
||||||
for _, vn := range valueSpec.Values {
|
|
||||||
if isAllowed(vn) {
|
|
||||||
onlyAllowedValues = true
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
onlyAllowedValues = false
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
if onlyAllowedValues {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, vn := range valueSpec.Names {
|
|
||||||
if isAllowed(vn) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
message := fmt.Sprintf("%s is a global variable", vn.Name)
|
|
||||||
pass.Report(analysis.Diagnostic{
|
|
||||||
Pos: vn.Pos(),
|
|
||||||
Category: "global",
|
|
||||||
Message: message,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
|
@ -9,7 +9,7 @@ See the [releases page](https://github.com/BurntSushi/toml/releases) for a
|
||||||
changelog; this information is also in the git tag annotations (e.g. `git show
|
changelog; this information is also in the git tag annotations (e.g. `git show
|
||||||
v0.4.0`).
|
v0.4.0`).
|
||||||
|
|
||||||
This library requires Go 1.13 or newer; add it to your go.mod with:
|
This library requires Go 1.18 or newer; add it to your go.mod with:
|
||||||
|
|
||||||
% go get github.com/BurntSushi/toml@latest
|
% go get github.com/BurntSushi/toml@latest
|
||||||
|
|
||||||
|
|
|
@ -6,7 +6,7 @@ import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/fs"
|
||||||
"math"
|
"math"
|
||||||
"os"
|
"os"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
@ -18,13 +18,13 @@ import (
|
||||||
// Unmarshaler is the interface implemented by objects that can unmarshal a
|
// Unmarshaler is the interface implemented by objects that can unmarshal a
|
||||||
// TOML description of themselves.
|
// TOML description of themselves.
|
||||||
type Unmarshaler interface {
|
type Unmarshaler interface {
|
||||||
UnmarshalTOML(interface{}) error
|
UnmarshalTOML(any) error
|
||||||
}
|
}
|
||||||
|
|
||||||
// Unmarshal decodes the contents of data in TOML format into a pointer v.
|
// Unmarshal decodes the contents of data in TOML format into a pointer v.
|
||||||
//
|
//
|
||||||
// See [Decoder] for a description of the decoding process.
|
// See [Decoder] for a description of the decoding process.
|
||||||
func Unmarshal(data []byte, v interface{}) error {
|
func Unmarshal(data []byte, v any) error {
|
||||||
_, err := NewDecoder(bytes.NewReader(data)).Decode(v)
|
_, err := NewDecoder(bytes.NewReader(data)).Decode(v)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -32,12 +32,12 @@ func Unmarshal(data []byte, v interface{}) error {
|
||||||
// Decode the TOML data in to the pointer v.
|
// Decode the TOML data in to the pointer v.
|
||||||
//
|
//
|
||||||
// See [Decoder] for a description of the decoding process.
|
// See [Decoder] for a description of the decoding process.
|
||||||
func Decode(data string, v interface{}) (MetaData, error) {
|
func Decode(data string, v any) (MetaData, error) {
|
||||||
return NewDecoder(strings.NewReader(data)).Decode(v)
|
return NewDecoder(strings.NewReader(data)).Decode(v)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DecodeFile reads the contents of a file and decodes it with [Decode].
|
// DecodeFile reads the contents of a file and decodes it with [Decode].
|
||||||
func DecodeFile(path string, v interface{}) (MetaData, error) {
|
func DecodeFile(path string, v any) (MetaData, error) {
|
||||||
fp, err := os.Open(path)
|
fp, err := os.Open(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return MetaData{}, err
|
return MetaData{}, err
|
||||||
|
@ -46,6 +46,17 @@ func DecodeFile(path string, v interface{}) (MetaData, error) {
|
||||||
return NewDecoder(fp).Decode(v)
|
return NewDecoder(fp).Decode(v)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DecodeFS reads the contents of a file from [fs.FS] and decodes it with
|
||||||
|
// [Decode].
|
||||||
|
func DecodeFS(fsys fs.FS, path string, v any) (MetaData, error) {
|
||||||
|
fp, err := fsys.Open(path)
|
||||||
|
if err != nil {
|
||||||
|
return MetaData{}, err
|
||||||
|
}
|
||||||
|
defer fp.Close()
|
||||||
|
return NewDecoder(fp).Decode(v)
|
||||||
|
}
|
||||||
|
|
||||||
// Primitive is a TOML value that hasn't been decoded into a Go value.
|
// Primitive is a TOML value that hasn't been decoded into a Go value.
|
||||||
//
|
//
|
||||||
// This type can be used for any value, which will cause decoding to be delayed.
|
// This type can be used for any value, which will cause decoding to be delayed.
|
||||||
|
@ -58,7 +69,7 @@ func DecodeFile(path string, v interface{}) (MetaData, error) {
|
||||||
// overhead of reflection. They can be useful when you don't know the exact type
|
// overhead of reflection. They can be useful when you don't know the exact type
|
||||||
// of TOML data until runtime.
|
// of TOML data until runtime.
|
||||||
type Primitive struct {
|
type Primitive struct {
|
||||||
undecoded interface{}
|
undecoded any
|
||||||
context Key
|
context Key
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -91,7 +102,7 @@ const (
|
||||||
// UnmarshalText method. See the Unmarshaler example for a demonstration with
|
// UnmarshalText method. See the Unmarshaler example for a demonstration with
|
||||||
// email addresses.
|
// email addresses.
|
||||||
//
|
//
|
||||||
// ### Key mapping
|
// # Key mapping
|
||||||
//
|
//
|
||||||
// TOML keys can map to either keys in a Go map or field names in a Go struct.
|
// TOML keys can map to either keys in a Go map or field names in a Go struct.
|
||||||
// The special `toml` struct tag can be used to map TOML keys to struct fields
|
// The special `toml` struct tag can be used to map TOML keys to struct fields
|
||||||
|
@ -122,7 +133,7 @@ var (
|
||||||
)
|
)
|
||||||
|
|
||||||
// Decode TOML data in to the pointer `v`.
|
// Decode TOML data in to the pointer `v`.
|
||||||
func (dec *Decoder) Decode(v interface{}) (MetaData, error) {
|
func (dec *Decoder) Decode(v any) (MetaData, error) {
|
||||||
rv := reflect.ValueOf(v)
|
rv := reflect.ValueOf(v)
|
||||||
if rv.Kind() != reflect.Ptr {
|
if rv.Kind() != reflect.Ptr {
|
||||||
s := "%q"
|
s := "%q"
|
||||||
|
@ -136,8 +147,8 @@ func (dec *Decoder) Decode(v interface{}) (MetaData, error) {
|
||||||
return MetaData{}, fmt.Errorf("toml: cannot decode to nil value of %q", reflect.TypeOf(v))
|
return MetaData{}, fmt.Errorf("toml: cannot decode to nil value of %q", reflect.TypeOf(v))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if this is a supported type: struct, map, interface{}, or something
|
// Check if this is a supported type: struct, map, any, or something that
|
||||||
// that implements UnmarshalTOML or UnmarshalText.
|
// implements UnmarshalTOML or UnmarshalText.
|
||||||
rv = indirect(rv)
|
rv = indirect(rv)
|
||||||
rt := rv.Type()
|
rt := rv.Type()
|
||||||
if rv.Kind() != reflect.Struct && rv.Kind() != reflect.Map &&
|
if rv.Kind() != reflect.Struct && rv.Kind() != reflect.Map &&
|
||||||
|
@ -148,7 +159,7 @@ func (dec *Decoder) Decode(v interface{}) (MetaData, error) {
|
||||||
|
|
||||||
// TODO: parser should read from io.Reader? Or at the very least, make it
|
// TODO: parser should read from io.Reader? Or at the very least, make it
|
||||||
// read from []byte rather than string
|
// read from []byte rather than string
|
||||||
data, err := ioutil.ReadAll(dec.r)
|
data, err := io.ReadAll(dec.r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return MetaData{}, err
|
return MetaData{}, err
|
||||||
}
|
}
|
||||||
|
@ -179,7 +190,7 @@ func (dec *Decoder) Decode(v interface{}) (MetaData, error) {
|
||||||
// will only reflect keys that were decoded. Namely, any keys hidden behind a
|
// will only reflect keys that were decoded. Namely, any keys hidden behind a
|
||||||
// Primitive will be considered undecoded. Executing this method will update the
|
// Primitive will be considered undecoded. Executing this method will update the
|
||||||
// undecoded keys in the meta data. (See the example.)
|
// undecoded keys in the meta data. (See the example.)
|
||||||
func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
|
func (md *MetaData) PrimitiveDecode(primValue Primitive, v any) error {
|
||||||
md.context = primValue.context
|
md.context = primValue.context
|
||||||
defer func() { md.context = nil }()
|
defer func() { md.context = nil }()
|
||||||
return md.unify(primValue.undecoded, rvalue(v))
|
return md.unify(primValue.undecoded, rvalue(v))
|
||||||
|
@ -190,7 +201,7 @@ func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
|
||||||
//
|
//
|
||||||
// Any type mismatch produces an error. Finding a type that we don't know
|
// Any type mismatch produces an error. Finding a type that we don't know
|
||||||
// how to handle produces an unsupported type error.
|
// how to handle produces an unsupported type error.
|
||||||
func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
|
func (md *MetaData) unify(data any, rv reflect.Value) error {
|
||||||
// Special case. Look for a `Primitive` value.
|
// Special case. Look for a `Primitive` value.
|
||||||
// TODO: #76 would make this superfluous after implemented.
|
// TODO: #76 would make this superfluous after implemented.
|
||||||
if rv.Type() == primitiveType {
|
if rv.Type() == primitiveType {
|
||||||
|
@ -207,7 +218,11 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
|
||||||
|
|
||||||
rvi := rv.Interface()
|
rvi := rv.Interface()
|
||||||
if v, ok := rvi.(Unmarshaler); ok {
|
if v, ok := rvi.(Unmarshaler); ok {
|
||||||
return v.UnmarshalTOML(data)
|
err := v.UnmarshalTOML(data)
|
||||||
|
if err != nil {
|
||||||
|
return md.parseErr(err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
if v, ok := rvi.(encoding.TextUnmarshaler); ok {
|
if v, ok := rvi.(encoding.TextUnmarshaler); ok {
|
||||||
return md.unifyText(data, v)
|
return md.unifyText(data, v)
|
||||||
|
@ -227,14 +242,6 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
|
||||||
return md.unifyInt(data, rv)
|
return md.unifyInt(data, rv)
|
||||||
}
|
}
|
||||||
switch k {
|
switch k {
|
||||||
case reflect.Ptr:
|
|
||||||
elem := reflect.New(rv.Type().Elem())
|
|
||||||
err := md.unify(data, reflect.Indirect(elem))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
rv.Set(elem)
|
|
||||||
return nil
|
|
||||||
case reflect.Struct:
|
case reflect.Struct:
|
||||||
return md.unifyStruct(data, rv)
|
return md.unifyStruct(data, rv)
|
||||||
case reflect.Map:
|
case reflect.Map:
|
||||||
|
@ -248,7 +255,7 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
|
||||||
case reflect.Bool:
|
case reflect.Bool:
|
||||||
return md.unifyBool(data, rv)
|
return md.unifyBool(data, rv)
|
||||||
case reflect.Interface:
|
case reflect.Interface:
|
||||||
if rv.NumMethod() > 0 { // Only support empty interfaces are supported.
|
if rv.NumMethod() > 0 { /// Only empty interfaces are supported.
|
||||||
return md.e("unsupported type %s", rv.Type())
|
return md.e("unsupported type %s", rv.Type())
|
||||||
}
|
}
|
||||||
return md.unifyAnything(data, rv)
|
return md.unifyAnything(data, rv)
|
||||||
|
@ -258,14 +265,13 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
|
||||||
return md.e("unsupported type %s", rv.Kind())
|
return md.e("unsupported type %s", rv.Kind())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
|
func (md *MetaData) unifyStruct(mapping any, rv reflect.Value) error {
|
||||||
tmap, ok := mapping.(map[string]interface{})
|
tmap, ok := mapping.(map[string]any)
|
||||||
if !ok {
|
if !ok {
|
||||||
if mapping == nil {
|
if mapping == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return md.e("type mismatch for %s: expected table but found %T",
|
return md.e("type mismatch for %s: expected table but found %s", rv.Type().String(), fmtType(mapping))
|
||||||
rv.Type().String(), mapping)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for key, datum := range tmap {
|
for key, datum := range tmap {
|
||||||
|
@ -304,14 +310,14 @@ func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error {
|
func (md *MetaData) unifyMap(mapping any, rv reflect.Value) error {
|
||||||
keyType := rv.Type().Key().Kind()
|
keyType := rv.Type().Key().Kind()
|
||||||
if keyType != reflect.String && keyType != reflect.Interface {
|
if keyType != reflect.String && keyType != reflect.Interface {
|
||||||
return fmt.Errorf("toml: cannot decode to a map with non-string key type (%s in %q)",
|
return fmt.Errorf("toml: cannot decode to a map with non-string key type (%s in %q)",
|
||||||
keyType, rv.Type())
|
keyType, rv.Type())
|
||||||
}
|
}
|
||||||
|
|
||||||
tmap, ok := mapping.(map[string]interface{})
|
tmap, ok := mapping.(map[string]any)
|
||||||
if !ok {
|
if !ok {
|
||||||
if tmap == nil {
|
if tmap == nil {
|
||||||
return nil
|
return nil
|
||||||
|
@ -347,7 +353,7 @@ func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error {
|
func (md *MetaData) unifyArray(data any, rv reflect.Value) error {
|
||||||
datav := reflect.ValueOf(data)
|
datav := reflect.ValueOf(data)
|
||||||
if datav.Kind() != reflect.Slice {
|
if datav.Kind() != reflect.Slice {
|
||||||
if !datav.IsValid() {
|
if !datav.IsValid() {
|
||||||
|
@ -361,7 +367,7 @@ func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error {
|
||||||
return md.unifySliceArray(datav, rv)
|
return md.unifySliceArray(datav, rv)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error {
|
func (md *MetaData) unifySlice(data any, rv reflect.Value) error {
|
||||||
datav := reflect.ValueOf(data)
|
datav := reflect.ValueOf(data)
|
||||||
if datav.Kind() != reflect.Slice {
|
if datav.Kind() != reflect.Slice {
|
||||||
if !datav.IsValid() {
|
if !datav.IsValid() {
|
||||||
|
@ -388,7 +394,7 @@ func (md *MetaData) unifySliceArray(data, rv reflect.Value) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error {
|
func (md *MetaData) unifyString(data any, rv reflect.Value) error {
|
||||||
_, ok := rv.Interface().(json.Number)
|
_, ok := rv.Interface().(json.Number)
|
||||||
if ok {
|
if ok {
|
||||||
if i, ok := data.(int64); ok {
|
if i, ok := data.(int64); ok {
|
||||||
|
@ -408,7 +414,7 @@ func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error {
|
||||||
return md.badtype("string", data)
|
return md.badtype("string", data)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error {
|
func (md *MetaData) unifyFloat64(data any, rv reflect.Value) error {
|
||||||
rvk := rv.Kind()
|
rvk := rv.Kind()
|
||||||
|
|
||||||
if num, ok := data.(float64); ok {
|
if num, ok := data.(float64); ok {
|
||||||
|
@ -429,7 +435,7 @@ func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error {
|
||||||
if num, ok := data.(int64); ok {
|
if num, ok := data.(int64); ok {
|
||||||
if (rvk == reflect.Float32 && (num < -maxSafeFloat32Int || num > maxSafeFloat32Int)) ||
|
if (rvk == reflect.Float32 && (num < -maxSafeFloat32Int || num > maxSafeFloat32Int)) ||
|
||||||
(rvk == reflect.Float64 && (num < -maxSafeFloat64Int || num > maxSafeFloat64Int)) {
|
(rvk == reflect.Float64 && (num < -maxSafeFloat64Int || num > maxSafeFloat64Int)) {
|
||||||
return md.parseErr(errParseRange{i: num, size: rvk.String()})
|
return md.parseErr(errUnsafeFloat{i: num, size: rvk.String()})
|
||||||
}
|
}
|
||||||
rv.SetFloat(float64(num))
|
rv.SetFloat(float64(num))
|
||||||
return nil
|
return nil
|
||||||
|
@ -438,7 +444,7 @@ func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error {
|
||||||
return md.badtype("float", data)
|
return md.badtype("float", data)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error {
|
func (md *MetaData) unifyInt(data any, rv reflect.Value) error {
|
||||||
_, ok := rv.Interface().(time.Duration)
|
_, ok := rv.Interface().(time.Duration)
|
||||||
if ok {
|
if ok {
|
||||||
// Parse as string duration, and fall back to regular integer parsing
|
// Parse as string duration, and fall back to regular integer parsing
|
||||||
|
@ -481,7 +487,7 @@ func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error {
|
func (md *MetaData) unifyBool(data any, rv reflect.Value) error {
|
||||||
if b, ok := data.(bool); ok {
|
if b, ok := data.(bool); ok {
|
||||||
rv.SetBool(b)
|
rv.SetBool(b)
|
||||||
return nil
|
return nil
|
||||||
|
@ -489,12 +495,12 @@ func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error {
|
||||||
return md.badtype("boolean", data)
|
return md.badtype("boolean", data)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error {
|
func (md *MetaData) unifyAnything(data any, rv reflect.Value) error {
|
||||||
rv.Set(reflect.ValueOf(data))
|
rv.Set(reflect.ValueOf(data))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (md *MetaData) unifyText(data interface{}, v encoding.TextUnmarshaler) error {
|
func (md *MetaData) unifyText(data any, v encoding.TextUnmarshaler) error {
|
||||||
var s string
|
var s string
|
||||||
switch sdata := data.(type) {
|
switch sdata := data.(type) {
|
||||||
case Marshaler:
|
case Marshaler:
|
||||||
|
@ -523,13 +529,13 @@ func (md *MetaData) unifyText(data interface{}, v encoding.TextUnmarshaler) erro
|
||||||
return md.badtype("primitive (string-like)", data)
|
return md.badtype("primitive (string-like)", data)
|
||||||
}
|
}
|
||||||
if err := v.UnmarshalText([]byte(s)); err != nil {
|
if err := v.UnmarshalText([]byte(s)); err != nil {
|
||||||
return err
|
return md.parseErr(err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (md *MetaData) badtype(dst string, data interface{}) error {
|
func (md *MetaData) badtype(dst string, data any) error {
|
||||||
return md.e("incompatible types: TOML value has type %T; destination has type %s", data, dst)
|
return md.e("incompatible types: TOML value has type %s; destination has type %s", fmtType(data), dst)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (md *MetaData) parseErr(err error) error {
|
func (md *MetaData) parseErr(err error) error {
|
||||||
|
@ -543,7 +549,7 @@ func (md *MetaData) parseErr(err error) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (md *MetaData) e(format string, args ...interface{}) error {
|
func (md *MetaData) e(format string, args ...any) error {
|
||||||
f := "toml: "
|
f := "toml: "
|
||||||
if len(md.context) > 0 {
|
if len(md.context) > 0 {
|
||||||
f = fmt.Sprintf("toml: (last key %q): ", md.context)
|
f = fmt.Sprintf("toml: (last key %q): ", md.context)
|
||||||
|
@ -556,7 +562,7 @@ func (md *MetaData) e(format string, args ...interface{}) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// rvalue returns a reflect.Value of `v`. All pointers are resolved.
|
// rvalue returns a reflect.Value of `v`. All pointers are resolved.
|
||||||
func rvalue(v interface{}) reflect.Value {
|
func rvalue(v any) reflect.Value {
|
||||||
return indirect(reflect.ValueOf(v))
|
return indirect(reflect.ValueOf(v))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -600,3 +606,8 @@ func isUnifiable(rv reflect.Value) bool {
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// fmt %T with "interface {}" replaced with "any", which is far more readable.
|
||||||
|
func fmtType(t any) string {
|
||||||
|
return strings.ReplaceAll(fmt.Sprintf("%T", t), "interface {}", "any")
|
||||||
|
}
|
||||||
|
|
|
@ -1,19 +0,0 @@
|
||||||
//go:build go1.16
|
|
||||||
// +build go1.16
|
|
||||||
|
|
||||||
package toml
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io/fs"
|
|
||||||
)
|
|
||||||
|
|
||||||
// DecodeFS reads the contents of a file from [fs.FS] and decodes it with
|
|
||||||
// [Decode].
|
|
||||||
func DecodeFS(fsys fs.FS, path string, v interface{}) (MetaData, error) {
|
|
||||||
fp, err := fsys.Open(path)
|
|
||||||
if err != nil {
|
|
||||||
return MetaData{}, err
|
|
||||||
}
|
|
||||||
defer fp.Close()
|
|
||||||
return NewDecoder(fp).Decode(v)
|
|
||||||
}
|
|
|
@ -5,17 +5,25 @@ import (
|
||||||
"io"
|
"io"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// TextMarshaler is an alias for encoding.TextMarshaler.
|
||||||
|
//
|
||||||
// Deprecated: use encoding.TextMarshaler
|
// Deprecated: use encoding.TextMarshaler
|
||||||
type TextMarshaler encoding.TextMarshaler
|
type TextMarshaler encoding.TextMarshaler
|
||||||
|
|
||||||
|
// TextUnmarshaler is an alias for encoding.TextUnmarshaler.
|
||||||
|
//
|
||||||
// Deprecated: use encoding.TextUnmarshaler
|
// Deprecated: use encoding.TextUnmarshaler
|
||||||
type TextUnmarshaler encoding.TextUnmarshaler
|
type TextUnmarshaler encoding.TextUnmarshaler
|
||||||
|
|
||||||
|
// DecodeReader is an alias for NewDecoder(r).Decode(v).
|
||||||
|
//
|
||||||
|
// Deprecated: use NewDecoder(reader).Decode(&value).
|
||||||
|
func DecodeReader(r io.Reader, v any) (MetaData, error) { return NewDecoder(r).Decode(v) }
|
||||||
|
|
||||||
|
// PrimitiveDecode is an alias for MetaData.PrimitiveDecode().
|
||||||
|
//
|
||||||
// Deprecated: use MetaData.PrimitiveDecode.
|
// Deprecated: use MetaData.PrimitiveDecode.
|
||||||
func PrimitiveDecode(primValue Primitive, v interface{}) error {
|
func PrimitiveDecode(primValue Primitive, v any) error {
|
||||||
md := MetaData{decoded: make(map[string]struct{})}
|
md := MetaData{decoded: make(map[string]struct{})}
|
||||||
return md.unify(primValue.undecoded, rvalue(v))
|
return md.unify(primValue.undecoded, rvalue(v))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Deprecated: use NewDecoder(reader).Decode(&value).
|
|
||||||
func DecodeReader(r io.Reader, v interface{}) (MetaData, error) { return NewDecoder(r).Decode(v) }
|
|
||||||
|
|
|
@ -2,9 +2,6 @@
|
||||||
//
|
//
|
||||||
// This package supports TOML v1.0.0, as specified at https://toml.io
|
// This package supports TOML v1.0.0, as specified at https://toml.io
|
||||||
//
|
//
|
||||||
// There is also support for delaying decoding with the Primitive type, and
|
|
||||||
// querying the set of keys in a TOML document with the MetaData type.
|
|
||||||
//
|
|
||||||
// The github.com/BurntSushi/toml/cmd/tomlv package implements a TOML validator,
|
// The github.com/BurntSushi/toml/cmd/tomlv package implements a TOML validator,
|
||||||
// and can be used to verify if TOML document is valid. It can also be used to
|
// and can be used to verify if TOML document is valid. It can also be used to
|
||||||
// print the type of each key.
|
// print the type of each key.
|
||||||
|
|
|
@ -2,6 +2,7 @@ package toml
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
|
"bytes"
|
||||||
"encoding"
|
"encoding"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
|
@ -76,6 +77,17 @@ type Marshaler interface {
|
||||||
MarshalTOML() ([]byte, error)
|
MarshalTOML() ([]byte, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Marshal returns a TOML representation of the Go value.
|
||||||
|
//
|
||||||
|
// See [Encoder] for a description of the encoding process.
|
||||||
|
func Marshal(v any) ([]byte, error) {
|
||||||
|
buff := new(bytes.Buffer)
|
||||||
|
if err := NewEncoder(buff).Encode(v); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return buff.Bytes(), nil
|
||||||
|
}
|
||||||
|
|
||||||
// Encoder encodes a Go to a TOML document.
|
// Encoder encodes a Go to a TOML document.
|
||||||
//
|
//
|
||||||
// The mapping between Go values and TOML values should be precisely the same as
|
// The mapping between Go values and TOML values should be precisely the same as
|
||||||
|
@ -115,28 +127,24 @@ type Marshaler interface {
|
||||||
// NOTE: only exported keys are encoded due to the use of reflection. Unexported
|
// NOTE: only exported keys are encoded due to the use of reflection. Unexported
|
||||||
// keys are silently discarded.
|
// keys are silently discarded.
|
||||||
type Encoder struct {
|
type Encoder struct {
|
||||||
// String to use for a single indentation level; default is two spaces.
|
Indent string // string for a single indentation level; default is two spaces.
|
||||||
Indent string
|
hasWritten bool // written any output to w yet?
|
||||||
|
|
||||||
w *bufio.Writer
|
w *bufio.Writer
|
||||||
hasWritten bool // written any output to w yet?
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewEncoder create a new Encoder.
|
// NewEncoder create a new Encoder.
|
||||||
func NewEncoder(w io.Writer) *Encoder {
|
func NewEncoder(w io.Writer) *Encoder {
|
||||||
return &Encoder{
|
return &Encoder{w: bufio.NewWriter(w), Indent: " "}
|
||||||
w: bufio.NewWriter(w),
|
|
||||||
Indent: " ",
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Encode writes a TOML representation of the Go value to the [Encoder]'s writer.
|
// Encode writes a TOML representation of the Go value to the [Encoder]'s writer.
|
||||||
//
|
//
|
||||||
// An error is returned if the value given cannot be encoded to a valid TOML
|
// An error is returned if the value given cannot be encoded to a valid TOML
|
||||||
// document.
|
// document.
|
||||||
func (enc *Encoder) Encode(v interface{}) error {
|
func (enc *Encoder) Encode(v any) error {
|
||||||
rv := eindirect(reflect.ValueOf(v))
|
rv := eindirect(reflect.ValueOf(v))
|
||||||
if err := enc.safeEncode(Key([]string{}), rv); err != nil {
|
err := enc.safeEncode(Key([]string{}), rv)
|
||||||
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return enc.w.Flush()
|
return enc.w.Flush()
|
||||||
|
@ -279,18 +287,30 @@ func (enc *Encoder) eElement(rv reflect.Value) {
|
||||||
case reflect.Float32:
|
case reflect.Float32:
|
||||||
f := rv.Float()
|
f := rv.Float()
|
||||||
if math.IsNaN(f) {
|
if math.IsNaN(f) {
|
||||||
|
if math.Signbit(f) {
|
||||||
|
enc.wf("-")
|
||||||
|
}
|
||||||
enc.wf("nan")
|
enc.wf("nan")
|
||||||
} else if math.IsInf(f, 0) {
|
} else if math.IsInf(f, 0) {
|
||||||
enc.wf("%cinf", map[bool]byte{true: '-', false: '+'}[math.Signbit(f)])
|
if math.Signbit(f) {
|
||||||
|
enc.wf("-")
|
||||||
|
}
|
||||||
|
enc.wf("inf")
|
||||||
} else {
|
} else {
|
||||||
enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 32)))
|
enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 32)))
|
||||||
}
|
}
|
||||||
case reflect.Float64:
|
case reflect.Float64:
|
||||||
f := rv.Float()
|
f := rv.Float()
|
||||||
if math.IsNaN(f) {
|
if math.IsNaN(f) {
|
||||||
|
if math.Signbit(f) {
|
||||||
|
enc.wf("-")
|
||||||
|
}
|
||||||
enc.wf("nan")
|
enc.wf("nan")
|
||||||
} else if math.IsInf(f, 0) {
|
} else if math.IsInf(f, 0) {
|
||||||
enc.wf("%cinf", map[bool]byte{true: '-', false: '+'}[math.Signbit(f)])
|
if math.Signbit(f) {
|
||||||
|
enc.wf("-")
|
||||||
|
}
|
||||||
|
enc.wf("inf")
|
||||||
} else {
|
} else {
|
||||||
enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 64)))
|
enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 64)))
|
||||||
}
|
}
|
||||||
|
@ -303,7 +323,7 @@ func (enc *Encoder) eElement(rv reflect.Value) {
|
||||||
case reflect.Interface:
|
case reflect.Interface:
|
||||||
enc.eElement(rv.Elem())
|
enc.eElement(rv.Elem())
|
||||||
default:
|
default:
|
||||||
encPanic(fmt.Errorf("unexpected type: %T", rv.Interface()))
|
encPanic(fmt.Errorf("unexpected type: %s", fmtType(rv.Interface())))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -457,6 +477,16 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
|
||||||
|
|
||||||
frv := eindirect(rv.Field(i))
|
frv := eindirect(rv.Field(i))
|
||||||
|
|
||||||
|
if is32Bit {
|
||||||
|
// Copy so it works correct on 32bit archs; not clear why this
|
||||||
|
// is needed. See #314, and https://www.reddit.com/r/golang/comments/pnx8v4
|
||||||
|
// This also works fine on 64bit, but 32bit archs are somewhat
|
||||||
|
// rare and this is a wee bit faster.
|
||||||
|
copyStart := make([]int, len(start))
|
||||||
|
copy(copyStart, start)
|
||||||
|
start = copyStart
|
||||||
|
}
|
||||||
|
|
||||||
// Treat anonymous struct fields with tag names as though they are
|
// Treat anonymous struct fields with tag names as though they are
|
||||||
// not anonymous, like encoding/json does.
|
// not anonymous, like encoding/json does.
|
||||||
//
|
//
|
||||||
|
@ -471,17 +501,7 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
|
||||||
if typeIsTable(tomlTypeOfGo(frv)) {
|
if typeIsTable(tomlTypeOfGo(frv)) {
|
||||||
fieldsSub = append(fieldsSub, append(start, f.Index...))
|
fieldsSub = append(fieldsSub, append(start, f.Index...))
|
||||||
} else {
|
} else {
|
||||||
// Copy so it works correct on 32bit archs; not clear why this
|
fieldsDirect = append(fieldsDirect, append(start, f.Index...))
|
||||||
// is needed. See #314, and https://www.reddit.com/r/golang/comments/pnx8v4
|
|
||||||
// This also works fine on 64bit, but 32bit archs are somewhat
|
|
||||||
// rare and this is a wee bit faster.
|
|
||||||
if is32Bit {
|
|
||||||
copyStart := make([]int, len(start))
|
|
||||||
copy(copyStart, start)
|
|
||||||
fieldsDirect = append(fieldsDirect, append(copyStart, f.Index...))
|
|
||||||
} else {
|
|
||||||
fieldsDirect = append(fieldsDirect, append(start, f.Index...))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -490,24 +510,27 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
|
||||||
writeFields := func(fields [][]int) {
|
writeFields := func(fields [][]int) {
|
||||||
for _, fieldIndex := range fields {
|
for _, fieldIndex := range fields {
|
||||||
fieldType := rt.FieldByIndex(fieldIndex)
|
fieldType := rt.FieldByIndex(fieldIndex)
|
||||||
fieldVal := eindirect(rv.FieldByIndex(fieldIndex))
|
fieldVal := rv.FieldByIndex(fieldIndex)
|
||||||
|
|
||||||
if isNil(fieldVal) { /// Don't write anything for nil fields.
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
opts := getOptions(fieldType.Tag)
|
opts := getOptions(fieldType.Tag)
|
||||||
if opts.skip {
|
if opts.skip {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
if opts.omitempty && isEmpty(fieldVal) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
fieldVal = eindirect(fieldVal)
|
||||||
|
|
||||||
|
if isNil(fieldVal) { /// Don't write anything for nil fields.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
keyName := fieldType.Name
|
keyName := fieldType.Name
|
||||||
if opts.name != "" {
|
if opts.name != "" {
|
||||||
keyName = opts.name
|
keyName = opts.name
|
||||||
}
|
}
|
||||||
|
|
||||||
if opts.omitempty && enc.isEmpty(fieldVal) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if opts.omitzero && isZero(fieldVal) {
|
if opts.omitzero && isZero(fieldVal) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -649,7 +672,7 @@ func isZero(rv reflect.Value) bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func (enc *Encoder) isEmpty(rv reflect.Value) bool {
|
func isEmpty(rv reflect.Value) bool {
|
||||||
switch rv.Kind() {
|
switch rv.Kind() {
|
||||||
case reflect.Array, reflect.Slice, reflect.Map, reflect.String:
|
case reflect.Array, reflect.Slice, reflect.Map, reflect.String:
|
||||||
return rv.Len() == 0
|
return rv.Len() == 0
|
||||||
|
@ -664,13 +687,15 @@ func (enc *Encoder) isEmpty(rv reflect.Value) bool {
|
||||||
// type b struct{ s []string }
|
// type b struct{ s []string }
|
||||||
// s := a{field: b{s: []string{"AAA"}}}
|
// s := a{field: b{s: []string{"AAA"}}}
|
||||||
for i := 0; i < rv.NumField(); i++ {
|
for i := 0; i < rv.NumField(); i++ {
|
||||||
if !enc.isEmpty(rv.Field(i)) {
|
if !isEmpty(rv.Field(i)) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
case reflect.Bool:
|
case reflect.Bool:
|
||||||
return !rv.Bool()
|
return !rv.Bool()
|
||||||
|
case reflect.Ptr:
|
||||||
|
return rv.IsNil()
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
@ -693,8 +718,11 @@ func (enc *Encoder) newline() {
|
||||||
// v v v v vv
|
// v v v v vv
|
||||||
// key = {k = 1, k2 = 2}
|
// key = {k = 1, k2 = 2}
|
||||||
func (enc *Encoder) writeKeyValue(key Key, val reflect.Value, inline bool) {
|
func (enc *Encoder) writeKeyValue(key Key, val reflect.Value, inline bool) {
|
||||||
|
/// Marshaler used on top-level document; call eElement() to just call
|
||||||
|
/// Marshal{TOML,Text}.
|
||||||
if len(key) == 0 {
|
if len(key) == 0 {
|
||||||
encPanic(errNoKey)
|
enc.eElement(val)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1))
|
enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1))
|
||||||
enc.eElement(val)
|
enc.eElement(val)
|
||||||
|
@ -703,7 +731,7 @@ func (enc *Encoder) writeKeyValue(key Key, val reflect.Value, inline bool) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (enc *Encoder) wf(format string, v ...interface{}) {
|
func (enc *Encoder) wf(format string, v ...any) {
|
||||||
_, err := fmt.Fprintf(enc.w, format, v...)
|
_, err := fmt.Fprintf(enc.w, format, v...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
encPanic(err)
|
encPanic(err)
|
||||||
|
|
|
@ -84,7 +84,7 @@ func (pe ParseError) Error() string {
|
||||||
pe.Position.Line, pe.LastKey, msg)
|
pe.Position.Line, pe.LastKey, msg)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ErrorWithUsage() returns the error with detailed location context.
|
// ErrorWithPosition returns the error with detailed location context.
|
||||||
//
|
//
|
||||||
// See the documentation on [ParseError].
|
// See the documentation on [ParseError].
|
||||||
func (pe ParseError) ErrorWithPosition() string {
|
func (pe ParseError) ErrorWithPosition() string {
|
||||||
|
@ -114,17 +114,26 @@ func (pe ParseError) ErrorWithPosition() string {
|
||||||
msg, pe.Position.Line, col, col+pe.Position.Len)
|
msg, pe.Position.Line, col, col+pe.Position.Len)
|
||||||
}
|
}
|
||||||
if pe.Position.Line > 2 {
|
if pe.Position.Line > 2 {
|
||||||
fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-2, lines[pe.Position.Line-3])
|
fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-2, expandTab(lines[pe.Position.Line-3]))
|
||||||
}
|
}
|
||||||
if pe.Position.Line > 1 {
|
if pe.Position.Line > 1 {
|
||||||
fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-1, lines[pe.Position.Line-2])
|
fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-1, expandTab(lines[pe.Position.Line-2]))
|
||||||
}
|
}
|
||||||
fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line, lines[pe.Position.Line-1])
|
|
||||||
fmt.Fprintf(b, "% 10s%s%s\n", "", strings.Repeat(" ", col), strings.Repeat("^", pe.Position.Len))
|
/// Expand tabs, so that the ^^^s are at the correct position, but leave
|
||||||
|
/// "column 10-13" intact. Adjusting this to the visual column would be
|
||||||
|
/// better, but we don't know the tabsize of the user in their editor, which
|
||||||
|
/// can be 8, 4, 2, or something else. We can't know. So leaving it as the
|
||||||
|
/// character index is probably the "most correct".
|
||||||
|
expanded := expandTab(lines[pe.Position.Line-1])
|
||||||
|
diff := len(expanded) - len(lines[pe.Position.Line-1])
|
||||||
|
|
||||||
|
fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line, expanded)
|
||||||
|
fmt.Fprintf(b, "% 10s%s%s\n", "", strings.Repeat(" ", col+diff), strings.Repeat("^", pe.Position.Len))
|
||||||
return b.String()
|
return b.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
// ErrorWithUsage() returns the error with detailed location context and usage
|
// ErrorWithUsage returns the error with detailed location context and usage
|
||||||
// guidance.
|
// guidance.
|
||||||
//
|
//
|
||||||
// See the documentation on [ParseError].
|
// See the documentation on [ParseError].
|
||||||
|
@ -159,17 +168,47 @@ func (pe ParseError) column(lines []string) int {
|
||||||
return col
|
return col
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func expandTab(s string) string {
|
||||||
|
var (
|
||||||
|
b strings.Builder
|
||||||
|
l int
|
||||||
|
fill = func(n int) string {
|
||||||
|
b := make([]byte, n)
|
||||||
|
for i := range b {
|
||||||
|
b[i] = ' '
|
||||||
|
}
|
||||||
|
return string(b)
|
||||||
|
}
|
||||||
|
)
|
||||||
|
b.Grow(len(s))
|
||||||
|
for _, r := range s {
|
||||||
|
switch r {
|
||||||
|
case '\t':
|
||||||
|
tw := 8 - l%8
|
||||||
|
b.WriteString(fill(tw))
|
||||||
|
l += tw
|
||||||
|
default:
|
||||||
|
b.WriteRune(r)
|
||||||
|
l += 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return b.String()
|
||||||
|
}
|
||||||
|
|
||||||
type (
|
type (
|
||||||
errLexControl struct{ r rune }
|
errLexControl struct{ r rune }
|
||||||
errLexEscape struct{ r rune }
|
errLexEscape struct{ r rune }
|
||||||
errLexUTF8 struct{ b byte }
|
errLexUTF8 struct{ b byte }
|
||||||
errLexInvalidNum struct{ v string }
|
errParseDate struct{ v string }
|
||||||
errLexInvalidDate struct{ v string }
|
|
||||||
errLexInlineTableNL struct{}
|
errLexInlineTableNL struct{}
|
||||||
errLexStringNL struct{}
|
errLexStringNL struct{}
|
||||||
errParseRange struct {
|
errParseRange struct {
|
||||||
i interface{} // int or float
|
i any // int or float
|
||||||
size string // "int64", "uint16", etc.
|
size string // "int64", "uint16", etc.
|
||||||
|
}
|
||||||
|
errUnsafeFloat struct {
|
||||||
|
i interface{} // float32 or float64
|
||||||
|
size string // "float32" or "float64"
|
||||||
}
|
}
|
||||||
errParseDuration struct{ d string }
|
errParseDuration struct{ d string }
|
||||||
)
|
)
|
||||||
|
@ -183,18 +222,20 @@ func (e errLexEscape) Error() string { return fmt.Sprintf(`invalid escape
|
||||||
func (e errLexEscape) Usage() string { return usageEscape }
|
func (e errLexEscape) Usage() string { return usageEscape }
|
||||||
func (e errLexUTF8) Error() string { return fmt.Sprintf("invalid UTF-8 byte: 0x%02x", e.b) }
|
func (e errLexUTF8) Error() string { return fmt.Sprintf("invalid UTF-8 byte: 0x%02x", e.b) }
|
||||||
func (e errLexUTF8) Usage() string { return "" }
|
func (e errLexUTF8) Usage() string { return "" }
|
||||||
func (e errLexInvalidNum) Error() string { return fmt.Sprintf("invalid number: %q", e.v) }
|
func (e errParseDate) Error() string { return fmt.Sprintf("invalid datetime: %q", e.v) }
|
||||||
func (e errLexInvalidNum) Usage() string { return "" }
|
func (e errParseDate) Usage() string { return usageDate }
|
||||||
func (e errLexInvalidDate) Error() string { return fmt.Sprintf("invalid date: %q", e.v) }
|
|
||||||
func (e errLexInvalidDate) Usage() string { return "" }
|
|
||||||
func (e errLexInlineTableNL) Error() string { return "newlines not allowed within inline tables" }
|
func (e errLexInlineTableNL) Error() string { return "newlines not allowed within inline tables" }
|
||||||
func (e errLexInlineTableNL) Usage() string { return usageInlineNewline }
|
func (e errLexInlineTableNL) Usage() string { return usageInlineNewline }
|
||||||
func (e errLexStringNL) Error() string { return "strings cannot contain newlines" }
|
func (e errLexStringNL) Error() string { return "strings cannot contain newlines" }
|
||||||
func (e errLexStringNL) Usage() string { return usageStringNewline }
|
func (e errLexStringNL) Usage() string { return usageStringNewline }
|
||||||
func (e errParseRange) Error() string { return fmt.Sprintf("%v is out of range for %s", e.i, e.size) }
|
func (e errParseRange) Error() string { return fmt.Sprintf("%v is out of range for %s", e.i, e.size) }
|
||||||
func (e errParseRange) Usage() string { return usageIntOverflow }
|
func (e errParseRange) Usage() string { return usageIntOverflow }
|
||||||
func (e errParseDuration) Error() string { return fmt.Sprintf("invalid duration: %q", e.d) }
|
func (e errUnsafeFloat) Error() string {
|
||||||
func (e errParseDuration) Usage() string { return usageDuration }
|
return fmt.Sprintf("%v is out of the safe %s range", e.i, e.size)
|
||||||
|
}
|
||||||
|
func (e errUnsafeFloat) Usage() string { return usageUnsafeFloat }
|
||||||
|
func (e errParseDuration) Error() string { return fmt.Sprintf("invalid duration: %q", e.d) }
|
||||||
|
func (e errParseDuration) Usage() string { return usageDuration }
|
||||||
|
|
||||||
const usageEscape = `
|
const usageEscape = `
|
||||||
A '\' inside a "-delimited string is interpreted as an escape character.
|
A '\' inside a "-delimited string is interpreted as an escape character.
|
||||||
|
@ -251,19 +292,35 @@ bug in the program that uses too small of an integer.
|
||||||
The maximum and minimum values are:
|
The maximum and minimum values are:
|
||||||
|
|
||||||
size │ lowest │ highest
|
size │ lowest │ highest
|
||||||
───────┼────────────────┼──────────
|
───────┼────────────────┼──────────────
|
||||||
int8 │ -128 │ 127
|
int8 │ -128 │ 127
|
||||||
int16 │ -32,768 │ 32,767
|
int16 │ -32,768 │ 32,767
|
||||||
int32 │ -2,147,483,648 │ 2,147,483,647
|
int32 │ -2,147,483,648 │ 2,147,483,647
|
||||||
int64 │ -9.2 × 10¹⁷ │ 9.2 × 10¹⁷
|
int64 │ -9.2 × 10¹⁷ │ 9.2 × 10¹⁷
|
||||||
uint8 │ 0 │ 255
|
uint8 │ 0 │ 255
|
||||||
uint16 │ 0 │ 65535
|
uint16 │ 0 │ 65,535
|
||||||
uint32 │ 0 │ 4294967295
|
uint32 │ 0 │ 4,294,967,295
|
||||||
uint64 │ 0 │ 1.8 × 10¹⁸
|
uint64 │ 0 │ 1.8 × 10¹⁸
|
||||||
|
|
||||||
int refers to int32 on 32-bit systems and int64 on 64-bit systems.
|
int refers to int32 on 32-bit systems and int64 on 64-bit systems.
|
||||||
`
|
`
|
||||||
|
|
||||||
|
const usageUnsafeFloat = `
|
||||||
|
This number is outside of the "safe" range for floating point numbers; whole
|
||||||
|
(non-fractional) numbers outside the below range can not always be represented
|
||||||
|
accurately in a float, leading to some loss of accuracy.
|
||||||
|
|
||||||
|
Explicitly mark a number as a fractional unit by adding ".0", which will incur
|
||||||
|
some loss of accuracy; for example:
|
||||||
|
|
||||||
|
f = 2_000_000_000.0
|
||||||
|
|
||||||
|
Accuracy ranges:
|
||||||
|
|
||||||
|
float32 = 16,777,215
|
||||||
|
float64 = 9,007,199,254,740,991
|
||||||
|
`
|
||||||
|
|
||||||
const usageDuration = `
|
const usageDuration = `
|
||||||
A duration must be as "number<unit>", without any spaces. Valid units are:
|
A duration must be as "number<unit>", without any spaces. Valid units are:
|
||||||
|
|
||||||
|
@ -277,3 +334,23 @@ A duration must be as "number<unit>", without any spaces. Valid units are:
|
||||||
You can combine multiple units; for example "5m10s" for 5 minutes and 10
|
You can combine multiple units; for example "5m10s" for 5 minutes and 10
|
||||||
seconds.
|
seconds.
|
||||||
`
|
`
|
||||||
|
|
||||||
|
const usageDate = `
|
||||||
|
A TOML datetime must be in one of the following formats:
|
||||||
|
|
||||||
|
2006-01-02T15:04:05Z07:00 Date and time, with timezone.
|
||||||
|
2006-01-02T15:04:05 Date and time, but without timezone.
|
||||||
|
2006-01-02 Date without a time or timezone.
|
||||||
|
15:04:05 Just a time, without any timezone.
|
||||||
|
|
||||||
|
Seconds may optionally have a fraction, up to nanosecond precision:
|
||||||
|
|
||||||
|
15:04:05.123
|
||||||
|
15:04:05.856018510
|
||||||
|
`
|
||||||
|
|
||||||
|
// TOML 1.1:
|
||||||
|
// The seconds part in times is optional, and may be omitted:
|
||||||
|
// 2006-01-02T15:04Z07:00
|
||||||
|
// 2006-01-02T15:04
|
||||||
|
// 15:04
|
||||||
|
|
|
@ -17,6 +17,7 @@ const (
|
||||||
itemEOF
|
itemEOF
|
||||||
itemText
|
itemText
|
||||||
itemString
|
itemString
|
||||||
|
itemStringEsc
|
||||||
itemRawString
|
itemRawString
|
||||||
itemMultilineString
|
itemMultilineString
|
||||||
itemRawMultilineString
|
itemRawMultilineString
|
||||||
|
@ -46,12 +47,14 @@ func (p Position) String() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
type lexer struct {
|
type lexer struct {
|
||||||
input string
|
input string
|
||||||
start int
|
start int
|
||||||
pos int
|
pos int
|
||||||
line int
|
line int
|
||||||
state stateFn
|
state stateFn
|
||||||
items chan item
|
items chan item
|
||||||
|
tomlNext bool
|
||||||
|
esc bool
|
||||||
|
|
||||||
// Allow for backing up up to 4 runes. This is necessary because TOML
|
// Allow for backing up up to 4 runes. This is necessary because TOML
|
||||||
// contains 3-rune tokens (""" and ''').
|
// contains 3-rune tokens (""" and ''').
|
||||||
|
@ -87,13 +90,14 @@ func (lx *lexer) nextItem() item {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func lex(input string) *lexer {
|
func lex(input string, tomlNext bool) *lexer {
|
||||||
lx := &lexer{
|
lx := &lexer{
|
||||||
input: input,
|
input: input,
|
||||||
state: lexTop,
|
state: lexTop,
|
||||||
items: make(chan item, 10),
|
items: make(chan item, 10),
|
||||||
stack: make([]stateFn, 0, 10),
|
stack: make([]stateFn, 0, 10),
|
||||||
line: 1,
|
line: 1,
|
||||||
|
tomlNext: tomlNext,
|
||||||
}
|
}
|
||||||
return lx
|
return lx
|
||||||
}
|
}
|
||||||
|
@ -162,7 +166,7 @@ func (lx *lexer) next() (r rune) {
|
||||||
}
|
}
|
||||||
|
|
||||||
r, w := utf8.DecodeRuneInString(lx.input[lx.pos:])
|
r, w := utf8.DecodeRuneInString(lx.input[lx.pos:])
|
||||||
if r == utf8.RuneError {
|
if r == utf8.RuneError && w == 1 {
|
||||||
lx.error(errLexUTF8{lx.input[lx.pos]})
|
lx.error(errLexUTF8{lx.input[lx.pos]})
|
||||||
return utf8.RuneError
|
return utf8.RuneError
|
||||||
}
|
}
|
||||||
|
@ -268,7 +272,7 @@ func (lx *lexer) errorPos(start, length int, err error) stateFn {
|
||||||
}
|
}
|
||||||
|
|
||||||
// errorf is like error, and creates a new error.
|
// errorf is like error, and creates a new error.
|
||||||
func (lx *lexer) errorf(format string, values ...interface{}) stateFn {
|
func (lx *lexer) errorf(format string, values ...any) stateFn {
|
||||||
if lx.atEOF {
|
if lx.atEOF {
|
||||||
pos := lx.getPos()
|
pos := lx.getPos()
|
||||||
pos.Line--
|
pos.Line--
|
||||||
|
@ -331,9 +335,7 @@ func lexTopEnd(lx *lexer) stateFn {
|
||||||
lx.emit(itemEOF)
|
lx.emit(itemEOF)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return lx.errorf(
|
return lx.errorf("expected a top-level item to end with a newline, comment, or EOF, but got %q instead", r)
|
||||||
"expected a top-level item to end with a newline, comment, or EOF, but got %q instead",
|
|
||||||
r)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// lexTable lexes the beginning of a table. Namely, it makes sure that
|
// lexTable lexes the beginning of a table. Namely, it makes sure that
|
||||||
|
@ -408,7 +410,7 @@ func lexTableNameEnd(lx *lexer) stateFn {
|
||||||
// Lexes only one part, e.g. only 'a' inside 'a.b'.
|
// Lexes only one part, e.g. only 'a' inside 'a.b'.
|
||||||
func lexBareName(lx *lexer) stateFn {
|
func lexBareName(lx *lexer) stateFn {
|
||||||
r := lx.next()
|
r := lx.next()
|
||||||
if isBareKeyChar(r) {
|
if isBareKeyChar(r, lx.tomlNext) {
|
||||||
return lexBareName
|
return lexBareName
|
||||||
}
|
}
|
||||||
lx.backup()
|
lx.backup()
|
||||||
|
@ -618,6 +620,9 @@ func lexInlineTableValue(lx *lexer) stateFn {
|
||||||
case isWhitespace(r):
|
case isWhitespace(r):
|
||||||
return lexSkip(lx, lexInlineTableValue)
|
return lexSkip(lx, lexInlineTableValue)
|
||||||
case isNL(r):
|
case isNL(r):
|
||||||
|
if lx.tomlNext {
|
||||||
|
return lexSkip(lx, lexInlineTableValue)
|
||||||
|
}
|
||||||
return lx.errorPrevLine(errLexInlineTableNL{})
|
return lx.errorPrevLine(errLexInlineTableNL{})
|
||||||
case r == '#':
|
case r == '#':
|
||||||
lx.push(lexInlineTableValue)
|
lx.push(lexInlineTableValue)
|
||||||
|
@ -640,6 +645,9 @@ func lexInlineTableValueEnd(lx *lexer) stateFn {
|
||||||
case isWhitespace(r):
|
case isWhitespace(r):
|
||||||
return lexSkip(lx, lexInlineTableValueEnd)
|
return lexSkip(lx, lexInlineTableValueEnd)
|
||||||
case isNL(r):
|
case isNL(r):
|
||||||
|
if lx.tomlNext {
|
||||||
|
return lexSkip(lx, lexInlineTableValueEnd)
|
||||||
|
}
|
||||||
return lx.errorPrevLine(errLexInlineTableNL{})
|
return lx.errorPrevLine(errLexInlineTableNL{})
|
||||||
case r == '#':
|
case r == '#':
|
||||||
lx.push(lexInlineTableValueEnd)
|
lx.push(lexInlineTableValueEnd)
|
||||||
|
@ -648,6 +656,9 @@ func lexInlineTableValueEnd(lx *lexer) stateFn {
|
||||||
lx.ignore()
|
lx.ignore()
|
||||||
lx.skip(isWhitespace)
|
lx.skip(isWhitespace)
|
||||||
if lx.peek() == '}' {
|
if lx.peek() == '}' {
|
||||||
|
if lx.tomlNext {
|
||||||
|
return lexInlineTableValueEnd
|
||||||
|
}
|
||||||
return lx.errorf("trailing comma not allowed in inline tables")
|
return lx.errorf("trailing comma not allowed in inline tables")
|
||||||
}
|
}
|
||||||
return lexInlineTableValue
|
return lexInlineTableValue
|
||||||
|
@ -687,7 +698,12 @@ func lexString(lx *lexer) stateFn {
|
||||||
return lexStringEscape
|
return lexStringEscape
|
||||||
case r == '"':
|
case r == '"':
|
||||||
lx.backup()
|
lx.backup()
|
||||||
lx.emit(itemString)
|
if lx.esc {
|
||||||
|
lx.esc = false
|
||||||
|
lx.emit(itemStringEsc)
|
||||||
|
} else {
|
||||||
|
lx.emit(itemString)
|
||||||
|
}
|
||||||
lx.next()
|
lx.next()
|
||||||
lx.ignore()
|
lx.ignore()
|
||||||
return lx.pop()
|
return lx.pop()
|
||||||
|
@ -737,6 +753,7 @@ func lexMultilineString(lx *lexer) stateFn {
|
||||||
lx.backup() /// backup: don't include the """ in the item.
|
lx.backup() /// backup: don't include the """ in the item.
|
||||||
lx.backup()
|
lx.backup()
|
||||||
lx.backup()
|
lx.backup()
|
||||||
|
lx.esc = false
|
||||||
lx.emit(itemMultilineString)
|
lx.emit(itemMultilineString)
|
||||||
lx.next() /// Read over ''' again and discard it.
|
lx.next() /// Read over ''' again and discard it.
|
||||||
lx.next()
|
lx.next()
|
||||||
|
@ -770,8 +787,8 @@ func lexRawString(lx *lexer) stateFn {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// lexMultilineRawString consumes a raw string. Nothing can be escaped in such
|
// lexMultilineRawString consumes a raw string. Nothing can be escaped in such a
|
||||||
// a string. It assumes that the beginning ''' has already been consumed and
|
// string. It assumes that the beginning triple-' has already been consumed and
|
||||||
// ignored.
|
// ignored.
|
||||||
func lexMultilineRawString(lx *lexer) stateFn {
|
func lexMultilineRawString(lx *lexer) stateFn {
|
||||||
r := lx.next()
|
r := lx.next()
|
||||||
|
@ -826,8 +843,14 @@ func lexMultilineStringEscape(lx *lexer) stateFn {
|
||||||
}
|
}
|
||||||
|
|
||||||
func lexStringEscape(lx *lexer) stateFn {
|
func lexStringEscape(lx *lexer) stateFn {
|
||||||
|
lx.esc = true
|
||||||
r := lx.next()
|
r := lx.next()
|
||||||
switch r {
|
switch r {
|
||||||
|
case 'e':
|
||||||
|
if !lx.tomlNext {
|
||||||
|
return lx.error(errLexEscape{r})
|
||||||
|
}
|
||||||
|
fallthrough
|
||||||
case 'b':
|
case 'b':
|
||||||
fallthrough
|
fallthrough
|
||||||
case 't':
|
case 't':
|
||||||
|
@ -846,6 +869,11 @@ func lexStringEscape(lx *lexer) stateFn {
|
||||||
fallthrough
|
fallthrough
|
||||||
case '\\':
|
case '\\':
|
||||||
return lx.pop()
|
return lx.pop()
|
||||||
|
case 'x':
|
||||||
|
if !lx.tomlNext {
|
||||||
|
return lx.error(errLexEscape{r})
|
||||||
|
}
|
||||||
|
return lexHexEscape
|
||||||
case 'u':
|
case 'u':
|
||||||
return lexShortUnicodeEscape
|
return lexShortUnicodeEscape
|
||||||
case 'U':
|
case 'U':
|
||||||
|
@ -854,14 +882,23 @@ func lexStringEscape(lx *lexer) stateFn {
|
||||||
return lx.error(errLexEscape{r})
|
return lx.error(errLexEscape{r})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func lexHexEscape(lx *lexer) stateFn {
|
||||||
|
var r rune
|
||||||
|
for i := 0; i < 2; i++ {
|
||||||
|
r = lx.next()
|
||||||
|
if !isHex(r) {
|
||||||
|
return lx.errorf(`expected two hexadecimal digits after '\x', but got %q instead`, lx.current())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return lx.pop()
|
||||||
|
}
|
||||||
|
|
||||||
func lexShortUnicodeEscape(lx *lexer) stateFn {
|
func lexShortUnicodeEscape(lx *lexer) stateFn {
|
||||||
var r rune
|
var r rune
|
||||||
for i := 0; i < 4; i++ {
|
for i := 0; i < 4; i++ {
|
||||||
r = lx.next()
|
r = lx.next()
|
||||||
if !isHexadecimal(r) {
|
if !isHex(r) {
|
||||||
return lx.errorf(
|
return lx.errorf(`expected four hexadecimal digits after '\u', but got %q instead`, lx.current())
|
||||||
`expected four hexadecimal digits after '\u', but got %q instead`,
|
|
||||||
lx.current())
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return lx.pop()
|
return lx.pop()
|
||||||
|
@ -871,10 +908,8 @@ func lexLongUnicodeEscape(lx *lexer) stateFn {
|
||||||
var r rune
|
var r rune
|
||||||
for i := 0; i < 8; i++ {
|
for i := 0; i < 8; i++ {
|
||||||
r = lx.next()
|
r = lx.next()
|
||||||
if !isHexadecimal(r) {
|
if !isHex(r) {
|
||||||
return lx.errorf(
|
return lx.errorf(`expected eight hexadecimal digits after '\U', but got %q instead`, lx.current())
|
||||||
`expected eight hexadecimal digits after '\U', but got %q instead`,
|
|
||||||
lx.current())
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return lx.pop()
|
return lx.pop()
|
||||||
|
@ -941,7 +976,7 @@ func lexDatetime(lx *lexer) stateFn {
|
||||||
// lexHexInteger consumes a hexadecimal integer after seeing the '0x' prefix.
|
// lexHexInteger consumes a hexadecimal integer after seeing the '0x' prefix.
|
||||||
func lexHexInteger(lx *lexer) stateFn {
|
func lexHexInteger(lx *lexer) stateFn {
|
||||||
r := lx.next()
|
r := lx.next()
|
||||||
if isHexadecimal(r) {
|
if isHex(r) {
|
||||||
return lexHexInteger
|
return lexHexInteger
|
||||||
}
|
}
|
||||||
switch r {
|
switch r {
|
||||||
|
@ -1075,7 +1110,7 @@ func lexBaseNumberOrDate(lx *lexer) stateFn {
|
||||||
return lexOctalInteger
|
return lexOctalInteger
|
||||||
case 'x':
|
case 'x':
|
||||||
r = lx.peek()
|
r = lx.peek()
|
||||||
if !isHexadecimal(r) {
|
if !isHex(r) {
|
||||||
lx.errorf("not a hexidecimal number: '%s%c'", lx.current(), r)
|
lx.errorf("not a hexidecimal number: '%s%c'", lx.current(), r)
|
||||||
}
|
}
|
||||||
return lexHexInteger
|
return lexHexInteger
|
||||||
|
@ -1173,7 +1208,7 @@ func (itype itemType) String() string {
|
||||||
return "EOF"
|
return "EOF"
|
||||||
case itemText:
|
case itemText:
|
||||||
return "Text"
|
return "Text"
|
||||||
case itemString, itemRawString, itemMultilineString, itemRawMultilineString:
|
case itemString, itemStringEsc, itemRawString, itemMultilineString, itemRawMultilineString:
|
||||||
return "String"
|
return "String"
|
||||||
case itemBool:
|
case itemBool:
|
||||||
return "Bool"
|
return "Bool"
|
||||||
|
@ -1206,7 +1241,7 @@ func (itype itemType) String() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (item item) String() string {
|
func (item item) String() string {
|
||||||
return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val)
|
return fmt.Sprintf("(%s, %s)", item.typ, item.val)
|
||||||
}
|
}
|
||||||
|
|
||||||
func isWhitespace(r rune) bool { return r == '\t' || r == ' ' }
|
func isWhitespace(r rune) bool { return r == '\t' || r == ' ' }
|
||||||
|
@ -1222,10 +1257,23 @@ func isControl(r rune) bool { // Control characters except \t, \r, \n
|
||||||
func isDigit(r rune) bool { return r >= '0' && r <= '9' }
|
func isDigit(r rune) bool { return r >= '0' && r <= '9' }
|
||||||
func isBinary(r rune) bool { return r == '0' || r == '1' }
|
func isBinary(r rune) bool { return r == '0' || r == '1' }
|
||||||
func isOctal(r rune) bool { return r >= '0' && r <= '7' }
|
func isOctal(r rune) bool { return r >= '0' && r <= '7' }
|
||||||
func isHexadecimal(r rune) bool {
|
func isHex(r rune) bool { return (r >= '0' && r <= '9') || (r|0x20 >= 'a' && r|0x20 <= 'f') }
|
||||||
return (r >= '0' && r <= '9') || (r >= 'a' && r <= 'f') || (r >= 'A' && r <= 'F')
|
func isBareKeyChar(r rune, tomlNext bool) bool {
|
||||||
}
|
if tomlNext {
|
||||||
func isBareKeyChar(r rune) bool {
|
return (r >= 'A' && r <= 'Z') ||
|
||||||
|
(r >= 'a' && r <= 'z') ||
|
||||||
|
(r >= '0' && r <= '9') ||
|
||||||
|
r == '_' || r == '-' ||
|
||||||
|
r == 0xb2 || r == 0xb3 || r == 0xb9 || (r >= 0xbc && r <= 0xbe) ||
|
||||||
|
(r >= 0xc0 && r <= 0xd6) || (r >= 0xd8 && r <= 0xf6) || (r >= 0xf8 && r <= 0x037d) ||
|
||||||
|
(r >= 0x037f && r <= 0x1fff) ||
|
||||||
|
(r >= 0x200c && r <= 0x200d) || (r >= 0x203f && r <= 0x2040) ||
|
||||||
|
(r >= 0x2070 && r <= 0x218f) || (r >= 0x2460 && r <= 0x24ff) ||
|
||||||
|
(r >= 0x2c00 && r <= 0x2fef) || (r >= 0x3001 && r <= 0xd7ff) ||
|
||||||
|
(r >= 0xf900 && r <= 0xfdcf) || (r >= 0xfdf0 && r <= 0xfffd) ||
|
||||||
|
(r >= 0x10000 && r <= 0xeffff)
|
||||||
|
}
|
||||||
|
|
||||||
return (r >= 'A' && r <= 'Z') ||
|
return (r >= 'A' && r <= 'Z') ||
|
||||||
(r >= 'a' && r <= 'z') ||
|
(r >= 'a' && r <= 'z') ||
|
||||||
(r >= '0' && r <= '9') ||
|
(r >= '0' && r <= '9') ||
|
||||||
|
|
|
@ -13,7 +13,7 @@ type MetaData struct {
|
||||||
context Key // Used only during decoding.
|
context Key // Used only during decoding.
|
||||||
|
|
||||||
keyInfo map[string]keyInfo
|
keyInfo map[string]keyInfo
|
||||||
mapping map[string]interface{}
|
mapping map[string]any
|
||||||
keys []Key
|
keys []Key
|
||||||
decoded map[string]struct{}
|
decoded map[string]struct{}
|
||||||
data []byte // Input file; for errors.
|
data []byte // Input file; for errors.
|
||||||
|
@ -31,12 +31,12 @@ func (md *MetaData) IsDefined(key ...string) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
hash map[string]interface{}
|
hash map[string]any
|
||||||
ok bool
|
ok bool
|
||||||
hashOrVal interface{} = md.mapping
|
hashOrVal any = md.mapping
|
||||||
)
|
)
|
||||||
for _, k := range key {
|
for _, k := range key {
|
||||||
if hash, ok = hashOrVal.(map[string]interface{}); !ok {
|
if hash, ok = hashOrVal.(map[string]any); !ok {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
if hashOrVal, ok = hash[k]; !ok {
|
if hashOrVal, ok = hash[k]; !ok {
|
||||||
|
@ -94,28 +94,55 @@ func (md *MetaData) Undecoded() []Key {
|
||||||
type Key []string
|
type Key []string
|
||||||
|
|
||||||
func (k Key) String() string {
|
func (k Key) String() string {
|
||||||
ss := make([]string, len(k))
|
// This is called quite often, so it's a bit funky to make it faster.
|
||||||
for i := range k {
|
var b strings.Builder
|
||||||
ss[i] = k.maybeQuoted(i)
|
b.Grow(len(k) * 25)
|
||||||
|
outer:
|
||||||
|
for i, kk := range k {
|
||||||
|
if i > 0 {
|
||||||
|
b.WriteByte('.')
|
||||||
|
}
|
||||||
|
if kk == "" {
|
||||||
|
b.WriteString(`""`)
|
||||||
|
} else {
|
||||||
|
for _, r := range kk {
|
||||||
|
// "Inline" isBareKeyChar
|
||||||
|
if !((r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z') || (r >= '0' && r <= '9') || r == '_' || r == '-') {
|
||||||
|
b.WriteByte('"')
|
||||||
|
b.WriteString(dblQuotedReplacer.Replace(kk))
|
||||||
|
b.WriteByte('"')
|
||||||
|
continue outer
|
||||||
|
}
|
||||||
|
}
|
||||||
|
b.WriteString(kk)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return strings.Join(ss, ".")
|
return b.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (k Key) maybeQuoted(i int) string {
|
func (k Key) maybeQuoted(i int) string {
|
||||||
if k[i] == "" {
|
if k[i] == "" {
|
||||||
return `""`
|
return `""`
|
||||||
}
|
}
|
||||||
for _, c := range k[i] {
|
for _, r := range k[i] {
|
||||||
if !isBareKeyChar(c) {
|
if (r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z') || (r >= '0' && r <= '9') || r == '_' || r == '-' {
|
||||||
return `"` + dblQuotedReplacer.Replace(k[i]) + `"`
|
continue
|
||||||
}
|
}
|
||||||
|
return `"` + dblQuotedReplacer.Replace(k[i]) + `"`
|
||||||
}
|
}
|
||||||
return k[i]
|
return k[i]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Like append(), but only increase the cap by 1.
|
||||||
func (k Key) add(piece string) Key {
|
func (k Key) add(piece string) Key {
|
||||||
|
if cap(k) > len(k) {
|
||||||
|
return append(k, piece)
|
||||||
|
}
|
||||||
newKey := make(Key, len(k)+1)
|
newKey := make(Key, len(k)+1)
|
||||||
copy(newKey, k)
|
copy(newKey, k)
|
||||||
newKey[len(k)] = piece
|
newKey[len(k)] = piece
|
||||||
return newKey
|
return newKey
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (k Key) parent() Key { return k[:len(k)-1] } // all except the last piece.
|
||||||
|
func (k Key) last() string { return k[len(k)-1] } // last piece of this key.
|
||||||
|
|
|
@ -2,6 +2,8 @@ package toml
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"os"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
@ -15,12 +17,13 @@ type parser struct {
|
||||||
context Key // Full key for the current hash in scope.
|
context Key // Full key for the current hash in scope.
|
||||||
currentKey string // Base key name for everything except hashes.
|
currentKey string // Base key name for everything except hashes.
|
||||||
pos Position // Current position in the TOML file.
|
pos Position // Current position in the TOML file.
|
||||||
|
tomlNext bool
|
||||||
|
|
||||||
ordered []Key // List of keys in the order that they appear in the TOML data.
|
ordered []Key // List of keys in the order that they appear in the TOML data.
|
||||||
|
|
||||||
keyInfo map[string]keyInfo // Map keyname → info about the TOML key.
|
keyInfo map[string]keyInfo // Map keyname → info about the TOML key.
|
||||||
mapping map[string]interface{} // Map keyname → key value.
|
mapping map[string]any // Map keyname → key value.
|
||||||
implicits map[string]struct{} // Record implicit keys (e.g. "key.group.names").
|
implicits map[string]struct{} // Record implicit keys (e.g. "key.group.names").
|
||||||
}
|
}
|
||||||
|
|
||||||
type keyInfo struct {
|
type keyInfo struct {
|
||||||
|
@ -29,6 +32,8 @@ type keyInfo struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func parse(data string) (p *parser, err error) {
|
func parse(data string) (p *parser, err error) {
|
||||||
|
_, tomlNext := os.LookupEnv("BURNTSUSHI_TOML_110")
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
if r := recover(); r != nil {
|
if r := recover(); r != nil {
|
||||||
if pErr, ok := r.(ParseError); ok {
|
if pErr, ok := r.(ParseError); ok {
|
||||||
|
@ -41,9 +46,13 @@ func parse(data string) (p *parser, err error) {
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// Read over BOM; do this here as the lexer calls utf8.DecodeRuneInString()
|
// Read over BOM; do this here as the lexer calls utf8.DecodeRuneInString()
|
||||||
// which mangles stuff.
|
// which mangles stuff. UTF-16 BOM isn't strictly valid, but some tools add
|
||||||
if strings.HasPrefix(data, "\xff\xfe") || strings.HasPrefix(data, "\xfe\xff") {
|
// it anyway.
|
||||||
|
if strings.HasPrefix(data, "\xff\xfe") || strings.HasPrefix(data, "\xfe\xff") { // UTF-16
|
||||||
data = data[2:]
|
data = data[2:]
|
||||||
|
//lint:ignore S1017 https://github.com/dominikh/go-tools/issues/1447
|
||||||
|
} else if strings.HasPrefix(data, "\xef\xbb\xbf") { // UTF-8
|
||||||
|
data = data[3:]
|
||||||
}
|
}
|
||||||
|
|
||||||
// Examine first few bytes for NULL bytes; this probably means it's a UTF-16
|
// Examine first few bytes for NULL bytes; this probably means it's a UTF-16
|
||||||
|
@ -64,10 +73,11 @@ func parse(data string) (p *parser, err error) {
|
||||||
|
|
||||||
p = &parser{
|
p = &parser{
|
||||||
keyInfo: make(map[string]keyInfo),
|
keyInfo: make(map[string]keyInfo),
|
||||||
mapping: make(map[string]interface{}),
|
mapping: make(map[string]any),
|
||||||
lx: lex(data),
|
lx: lex(data, tomlNext),
|
||||||
ordered: make([]Key, 0),
|
ordered: make([]Key, 0),
|
||||||
implicits: make(map[string]struct{}),
|
implicits: make(map[string]struct{}),
|
||||||
|
tomlNext: tomlNext,
|
||||||
}
|
}
|
||||||
for {
|
for {
|
||||||
item := p.next()
|
item := p.next()
|
||||||
|
@ -89,7 +99,7 @@ func (p *parser) panicErr(it item, err error) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *parser) panicItemf(it item, format string, v ...interface{}) {
|
func (p *parser) panicItemf(it item, format string, v ...any) {
|
||||||
panic(ParseError{
|
panic(ParseError{
|
||||||
Message: fmt.Sprintf(format, v...),
|
Message: fmt.Sprintf(format, v...),
|
||||||
Position: it.pos,
|
Position: it.pos,
|
||||||
|
@ -98,7 +108,7 @@ func (p *parser) panicItemf(it item, format string, v ...interface{}) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *parser) panicf(format string, v ...interface{}) {
|
func (p *parser) panicf(format string, v ...any) {
|
||||||
panic(ParseError{
|
panic(ParseError{
|
||||||
Message: fmt.Sprintf(format, v...),
|
Message: fmt.Sprintf(format, v...),
|
||||||
Position: p.pos,
|
Position: p.pos,
|
||||||
|
@ -131,7 +141,7 @@ func (p *parser) nextPos() item {
|
||||||
return it
|
return it
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *parser) bug(format string, v ...interface{}) {
|
func (p *parser) bug(format string, v ...any) {
|
||||||
panic(fmt.Sprintf("BUG: "+format+"\n\n", v...))
|
panic(fmt.Sprintf("BUG: "+format+"\n\n", v...))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -186,20 +196,21 @@ func (p *parser) topLevel(item item) {
|
||||||
p.assertEqual(itemKeyEnd, k.typ)
|
p.assertEqual(itemKeyEnd, k.typ)
|
||||||
|
|
||||||
/// The current key is the last part.
|
/// The current key is the last part.
|
||||||
p.currentKey = key[len(key)-1]
|
p.currentKey = key.last()
|
||||||
|
|
||||||
/// All the other parts (if any) are the context; need to set each part
|
/// All the other parts (if any) are the context; need to set each part
|
||||||
/// as implicit.
|
/// as implicit.
|
||||||
context := key[:len(key)-1]
|
context := key.parent()
|
||||||
for i := range context {
|
for i := range context {
|
||||||
p.addImplicitContext(append(p.context, context[i:i+1]...))
|
p.addImplicitContext(append(p.context, context[i:i+1]...))
|
||||||
}
|
}
|
||||||
|
p.ordered = append(p.ordered, p.context.add(p.currentKey))
|
||||||
|
|
||||||
/// Set value.
|
/// Set value.
|
||||||
vItem := p.next()
|
vItem := p.next()
|
||||||
val, typ := p.value(vItem, false)
|
val, typ := p.value(vItem, false)
|
||||||
p.set(p.currentKey, val, typ, vItem.pos)
|
p.setValue(p.currentKey, val)
|
||||||
p.ordered = append(p.ordered, p.context.add(p.currentKey))
|
p.setType(p.currentKey, typ, vItem.pos)
|
||||||
|
|
||||||
/// Remove the context we added (preserving any context from [tbl] lines).
|
/// Remove the context we added (preserving any context from [tbl] lines).
|
||||||
p.context = outerContext
|
p.context = outerContext
|
||||||
|
@ -214,7 +225,7 @@ func (p *parser) keyString(it item) string {
|
||||||
switch it.typ {
|
switch it.typ {
|
||||||
case itemText:
|
case itemText:
|
||||||
return it.val
|
return it.val
|
||||||
case itemString, itemMultilineString,
|
case itemString, itemStringEsc, itemMultilineString,
|
||||||
itemRawString, itemRawMultilineString:
|
itemRawString, itemRawMultilineString:
|
||||||
s, _ := p.value(it, false)
|
s, _ := p.value(it, false)
|
||||||
return s.(string)
|
return s.(string)
|
||||||
|
@ -231,12 +242,14 @@ var datetimeRepl = strings.NewReplacer(
|
||||||
|
|
||||||
// value translates an expected value from the lexer into a Go value wrapped
|
// value translates an expected value from the lexer into a Go value wrapped
|
||||||
// as an empty interface.
|
// as an empty interface.
|
||||||
func (p *parser) value(it item, parentIsArray bool) (interface{}, tomlType) {
|
func (p *parser) value(it item, parentIsArray bool) (any, tomlType) {
|
||||||
switch it.typ {
|
switch it.typ {
|
||||||
case itemString:
|
case itemString:
|
||||||
|
return it.val, p.typeOfPrimitive(it)
|
||||||
|
case itemStringEsc:
|
||||||
return p.replaceEscapes(it, it.val), p.typeOfPrimitive(it)
|
return p.replaceEscapes(it, it.val), p.typeOfPrimitive(it)
|
||||||
case itemMultilineString:
|
case itemMultilineString:
|
||||||
return p.replaceEscapes(it, stripFirstNewline(p.stripEscapedNewlines(it.val))), p.typeOfPrimitive(it)
|
return p.replaceEscapes(it, p.stripEscapedNewlines(stripFirstNewline(it.val))), p.typeOfPrimitive(it)
|
||||||
case itemRawString:
|
case itemRawString:
|
||||||
return it.val, p.typeOfPrimitive(it)
|
return it.val, p.typeOfPrimitive(it)
|
||||||
case itemRawMultilineString:
|
case itemRawMultilineString:
|
||||||
|
@ -266,7 +279,7 @@ func (p *parser) value(it item, parentIsArray bool) (interface{}, tomlType) {
|
||||||
panic("unreachable")
|
panic("unreachable")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *parser) valueInteger(it item) (interface{}, tomlType) {
|
func (p *parser) valueInteger(it item) (any, tomlType) {
|
||||||
if !numUnderscoresOK(it.val) {
|
if !numUnderscoresOK(it.val) {
|
||||||
p.panicItemf(it, "Invalid integer %q: underscores must be surrounded by digits", it.val)
|
p.panicItemf(it, "Invalid integer %q: underscores must be surrounded by digits", it.val)
|
||||||
}
|
}
|
||||||
|
@ -290,7 +303,7 @@ func (p *parser) valueInteger(it item) (interface{}, tomlType) {
|
||||||
return num, p.typeOfPrimitive(it)
|
return num, p.typeOfPrimitive(it)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *parser) valueFloat(it item) (interface{}, tomlType) {
|
func (p *parser) valueFloat(it item) (any, tomlType) {
|
||||||
parts := strings.FieldsFunc(it.val, func(r rune) bool {
|
parts := strings.FieldsFunc(it.val, func(r rune) bool {
|
||||||
switch r {
|
switch r {
|
||||||
case '.', 'e', 'E':
|
case '.', 'e', 'E':
|
||||||
|
@ -314,7 +327,9 @@ func (p *parser) valueFloat(it item) (interface{}, tomlType) {
|
||||||
p.panicItemf(it, "Invalid float %q: '.' must be followed by one or more digits", it.val)
|
p.panicItemf(it, "Invalid float %q: '.' must be followed by one or more digits", it.val)
|
||||||
}
|
}
|
||||||
val := strings.Replace(it.val, "_", "", -1)
|
val := strings.Replace(it.val, "_", "", -1)
|
||||||
if val == "+nan" || val == "-nan" { // Go doesn't support this, but TOML spec does.
|
signbit := false
|
||||||
|
if val == "+nan" || val == "-nan" {
|
||||||
|
signbit = val == "-nan"
|
||||||
val = "nan"
|
val = "nan"
|
||||||
}
|
}
|
||||||
num, err := strconv.ParseFloat(val, 64)
|
num, err := strconv.ParseFloat(val, 64)
|
||||||
|
@ -325,20 +340,29 @@ func (p *parser) valueFloat(it item) (interface{}, tomlType) {
|
||||||
p.panicItemf(it, "Invalid float value: %q", it.val)
|
p.panicItemf(it, "Invalid float value: %q", it.val)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if signbit {
|
||||||
|
num = math.Copysign(num, -1)
|
||||||
|
}
|
||||||
return num, p.typeOfPrimitive(it)
|
return num, p.typeOfPrimitive(it)
|
||||||
}
|
}
|
||||||
|
|
||||||
var dtTypes = []struct {
|
var dtTypes = []struct {
|
||||||
fmt string
|
fmt string
|
||||||
zone *time.Location
|
zone *time.Location
|
||||||
|
next bool
|
||||||
}{
|
}{
|
||||||
{time.RFC3339Nano, time.Local},
|
{time.RFC3339Nano, time.Local, false},
|
||||||
{"2006-01-02T15:04:05.999999999", internal.LocalDatetime},
|
{"2006-01-02T15:04:05.999999999", internal.LocalDatetime, false},
|
||||||
{"2006-01-02", internal.LocalDate},
|
{"2006-01-02", internal.LocalDate, false},
|
||||||
{"15:04:05.999999999", internal.LocalTime},
|
{"15:04:05.999999999", internal.LocalTime, false},
|
||||||
|
|
||||||
|
// tomlNext
|
||||||
|
{"2006-01-02T15:04Z07:00", time.Local, true},
|
||||||
|
{"2006-01-02T15:04", internal.LocalDatetime, true},
|
||||||
|
{"15:04", internal.LocalTime, true},
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *parser) valueDatetime(it item) (interface{}, tomlType) {
|
func (p *parser) valueDatetime(it item) (any, tomlType) {
|
||||||
it.val = datetimeRepl.Replace(it.val)
|
it.val = datetimeRepl.Replace(it.val)
|
||||||
var (
|
var (
|
||||||
t time.Time
|
t time.Time
|
||||||
|
@ -346,28 +370,49 @@ func (p *parser) valueDatetime(it item) (interface{}, tomlType) {
|
||||||
err error
|
err error
|
||||||
)
|
)
|
||||||
for _, dt := range dtTypes {
|
for _, dt := range dtTypes {
|
||||||
|
if dt.next && !p.tomlNext {
|
||||||
|
continue
|
||||||
|
}
|
||||||
t, err = time.ParseInLocation(dt.fmt, it.val, dt.zone)
|
t, err = time.ParseInLocation(dt.fmt, it.val, dt.zone)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
|
if missingLeadingZero(it.val, dt.fmt) {
|
||||||
|
p.panicErr(it, errParseDate{it.val})
|
||||||
|
}
|
||||||
ok = true
|
ok = true
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !ok {
|
if !ok {
|
||||||
p.panicItemf(it, "Invalid TOML Datetime: %q.", it.val)
|
p.panicErr(it, errParseDate{it.val})
|
||||||
}
|
}
|
||||||
return t, p.typeOfPrimitive(it)
|
return t, p.typeOfPrimitive(it)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *parser) valueArray(it item) (interface{}, tomlType) {
|
// Go's time.Parse() will accept numbers without a leading zero; there isn't any
|
||||||
|
// way to require it. https://github.com/golang/go/issues/29911
|
||||||
|
//
|
||||||
|
// Depend on the fact that the separators (- and :) should always be at the same
|
||||||
|
// location.
|
||||||
|
func missingLeadingZero(d, l string) bool {
|
||||||
|
for i, c := range []byte(l) {
|
||||||
|
if c == '.' || c == 'Z' {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if (c < '0' || c > '9') && d[i] != c {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *parser) valueArray(it item) (any, tomlType) {
|
||||||
p.setType(p.currentKey, tomlArray, it.pos)
|
p.setType(p.currentKey, tomlArray, it.pos)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
types []tomlType
|
// Initialize to a non-nil slice to make it consistent with how S = []
|
||||||
|
// decodes into a non-nil slice inside something like struct { S
|
||||||
// Initialize to a non-nil empty slice. This makes it consistent with
|
// []string }. See #338
|
||||||
// how S = [] decodes into a non-nil slice inside something like struct
|
array = make([]any, 0, 2)
|
||||||
// { S []string }. See #338
|
|
||||||
array = []interface{}{}
|
|
||||||
)
|
)
|
||||||
for it = p.next(); it.typ != itemArrayEnd; it = p.next() {
|
for it = p.next(); it.typ != itemArrayEnd; it = p.next() {
|
||||||
if it.typ == itemCommentStart {
|
if it.typ == itemCommentStart {
|
||||||
|
@ -377,20 +422,20 @@ func (p *parser) valueArray(it item) (interface{}, tomlType) {
|
||||||
|
|
||||||
val, typ := p.value(it, true)
|
val, typ := p.value(it, true)
|
||||||
array = append(array, val)
|
array = append(array, val)
|
||||||
types = append(types, typ)
|
|
||||||
|
|
||||||
// XXX: types isn't used here, we need it to record the accurate type
|
// XXX: type isn't used here, we need it to record the accurate type
|
||||||
// information.
|
// information.
|
||||||
//
|
//
|
||||||
// Not entirely sure how to best store this; could use "key[0]",
|
// Not entirely sure how to best store this; could use "key[0]",
|
||||||
// "key[1]" notation, or maybe store it on the Array type?
|
// "key[1]" notation, or maybe store it on the Array type?
|
||||||
|
_ = typ
|
||||||
}
|
}
|
||||||
return array, tomlArray
|
return array, tomlArray
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *parser) valueInlineTable(it item, parentIsArray bool) (interface{}, tomlType) {
|
func (p *parser) valueInlineTable(it item, parentIsArray bool) (any, tomlType) {
|
||||||
var (
|
var (
|
||||||
hash = make(map[string]interface{})
|
topHash = make(map[string]any)
|
||||||
outerContext = p.context
|
outerContext = p.context
|
||||||
outerKey = p.currentKey
|
outerKey = p.currentKey
|
||||||
)
|
)
|
||||||
|
@ -418,19 +463,33 @@ func (p *parser) valueInlineTable(it item, parentIsArray bool) (interface{}, tom
|
||||||
p.assertEqual(itemKeyEnd, k.typ)
|
p.assertEqual(itemKeyEnd, k.typ)
|
||||||
|
|
||||||
/// The current key is the last part.
|
/// The current key is the last part.
|
||||||
p.currentKey = key[len(key)-1]
|
p.currentKey = key.last()
|
||||||
|
|
||||||
/// All the other parts (if any) are the context; need to set each part
|
/// All the other parts (if any) are the context; need to set each part
|
||||||
/// as implicit.
|
/// as implicit.
|
||||||
context := key[:len(key)-1]
|
context := key.parent()
|
||||||
for i := range context {
|
for i := range context {
|
||||||
p.addImplicitContext(append(p.context, context[i:i+1]...))
|
p.addImplicitContext(append(p.context, context[i:i+1]...))
|
||||||
}
|
}
|
||||||
|
p.ordered = append(p.ordered, p.context.add(p.currentKey))
|
||||||
|
|
||||||
/// Set the value.
|
/// Set the value.
|
||||||
val, typ := p.value(p.next(), false)
|
val, typ := p.value(p.next(), false)
|
||||||
p.set(p.currentKey, val, typ, it.pos)
|
p.setValue(p.currentKey, val)
|
||||||
p.ordered = append(p.ordered, p.context.add(p.currentKey))
|
p.setType(p.currentKey, typ, it.pos)
|
||||||
|
|
||||||
|
hash := topHash
|
||||||
|
for _, c := range context {
|
||||||
|
h, ok := hash[c]
|
||||||
|
if !ok {
|
||||||
|
h = make(map[string]any)
|
||||||
|
hash[c] = h
|
||||||
|
}
|
||||||
|
hash, ok = h.(map[string]any)
|
||||||
|
if !ok {
|
||||||
|
p.panicf("%q is not a table", p.context)
|
||||||
|
}
|
||||||
|
}
|
||||||
hash[p.currentKey] = val
|
hash[p.currentKey] = val
|
||||||
|
|
||||||
/// Restore context.
|
/// Restore context.
|
||||||
|
@ -438,7 +497,7 @@ func (p *parser) valueInlineTable(it item, parentIsArray bool) (interface{}, tom
|
||||||
}
|
}
|
||||||
p.context = outerContext
|
p.context = outerContext
|
||||||
p.currentKey = outerKey
|
p.currentKey = outerKey
|
||||||
return hash, tomlHash
|
return topHash, tomlHash
|
||||||
}
|
}
|
||||||
|
|
||||||
// numHasLeadingZero checks if this number has leading zeroes, allowing for '0',
|
// numHasLeadingZero checks if this number has leading zeroes, allowing for '0',
|
||||||
|
@ -468,9 +527,9 @@ func numUnderscoresOK(s string) bool {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// isHexadecimal is a superset of all the permissable characters
|
// isHexis a superset of all the permissable characters surrounding an
|
||||||
// surrounding an underscore.
|
// underscore.
|
||||||
accept = isHexadecimal(r)
|
accept = isHex(r)
|
||||||
}
|
}
|
||||||
return accept
|
return accept
|
||||||
}
|
}
|
||||||
|
@ -493,21 +552,19 @@ func numPeriodsOK(s string) bool {
|
||||||
// Establishing the context also makes sure that the key isn't a duplicate, and
|
// Establishing the context also makes sure that the key isn't a duplicate, and
|
||||||
// will create implicit hashes automatically.
|
// will create implicit hashes automatically.
|
||||||
func (p *parser) addContext(key Key, array bool) {
|
func (p *parser) addContext(key Key, array bool) {
|
||||||
var ok bool
|
/// Always start at the top level and drill down for our context.
|
||||||
|
|
||||||
// Always start at the top level and drill down for our context.
|
|
||||||
hashContext := p.mapping
|
hashContext := p.mapping
|
||||||
keyContext := make(Key, 0)
|
keyContext := make(Key, 0, len(key)-1)
|
||||||
|
|
||||||
// We only need implicit hashes for key[0:-1]
|
/// We only need implicit hashes for the parents.
|
||||||
for _, k := range key[0 : len(key)-1] {
|
for _, k := range key.parent() {
|
||||||
_, ok = hashContext[k]
|
_, ok := hashContext[k]
|
||||||
keyContext = append(keyContext, k)
|
keyContext = append(keyContext, k)
|
||||||
|
|
||||||
// No key? Make an implicit hash and move on.
|
// No key? Make an implicit hash and move on.
|
||||||
if !ok {
|
if !ok {
|
||||||
p.addImplicit(keyContext)
|
p.addImplicit(keyContext)
|
||||||
hashContext[k] = make(map[string]interface{})
|
hashContext[k] = make(map[string]any)
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the hash context is actually an array of tables, then set
|
// If the hash context is actually an array of tables, then set
|
||||||
|
@ -516,9 +573,9 @@ func (p *parser) addContext(key Key, array bool) {
|
||||||
// Otherwise, it better be a table, since this MUST be a key group (by
|
// Otherwise, it better be a table, since this MUST be a key group (by
|
||||||
// virtue of it not being the last element in a key).
|
// virtue of it not being the last element in a key).
|
||||||
switch t := hashContext[k].(type) {
|
switch t := hashContext[k].(type) {
|
||||||
case []map[string]interface{}:
|
case []map[string]any:
|
||||||
hashContext = t[len(t)-1]
|
hashContext = t[len(t)-1]
|
||||||
case map[string]interface{}:
|
case map[string]any:
|
||||||
hashContext = t
|
hashContext = t
|
||||||
default:
|
default:
|
||||||
p.panicf("Key '%s' was already created as a hash.", keyContext)
|
p.panicf("Key '%s' was already created as a hash.", keyContext)
|
||||||
|
@ -529,40 +586,33 @@ func (p *parser) addContext(key Key, array bool) {
|
||||||
if array {
|
if array {
|
||||||
// If this is the first element for this array, then allocate a new
|
// If this is the first element for this array, then allocate a new
|
||||||
// list of tables for it.
|
// list of tables for it.
|
||||||
k := key[len(key)-1]
|
k := key.last()
|
||||||
if _, ok := hashContext[k]; !ok {
|
if _, ok := hashContext[k]; !ok {
|
||||||
hashContext[k] = make([]map[string]interface{}, 0, 4)
|
hashContext[k] = make([]map[string]any, 0, 4)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add a new table. But make sure the key hasn't already been used
|
// Add a new table. But make sure the key hasn't already been used
|
||||||
// for something else.
|
// for something else.
|
||||||
if hash, ok := hashContext[k].([]map[string]interface{}); ok {
|
if hash, ok := hashContext[k].([]map[string]any); ok {
|
||||||
hashContext[k] = append(hash, make(map[string]interface{}))
|
hashContext[k] = append(hash, make(map[string]any))
|
||||||
} else {
|
} else {
|
||||||
p.panicf("Key '%s' was already created and cannot be used as an array.", key)
|
p.panicf("Key '%s' was already created and cannot be used as an array.", key)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
p.setValue(key[len(key)-1], make(map[string]interface{}))
|
p.setValue(key.last(), make(map[string]any))
|
||||||
}
|
}
|
||||||
p.context = append(p.context, key[len(key)-1])
|
p.context = append(p.context, key.last())
|
||||||
}
|
|
||||||
|
|
||||||
// set calls setValue and setType.
|
|
||||||
func (p *parser) set(key string, val interface{}, typ tomlType, pos Position) {
|
|
||||||
p.setValue(key, val)
|
|
||||||
p.setType(key, typ, pos)
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// setValue sets the given key to the given value in the current context.
|
// setValue sets the given key to the given value in the current context.
|
||||||
// It will make sure that the key hasn't already been defined, account for
|
// It will make sure that the key hasn't already been defined, account for
|
||||||
// implicit key groups.
|
// implicit key groups.
|
||||||
func (p *parser) setValue(key string, value interface{}) {
|
func (p *parser) setValue(key string, value any) {
|
||||||
var (
|
var (
|
||||||
tmpHash interface{}
|
tmpHash any
|
||||||
ok bool
|
ok bool
|
||||||
hash = p.mapping
|
hash = p.mapping
|
||||||
keyContext Key
|
keyContext = make(Key, 0, len(p.context)+1)
|
||||||
)
|
)
|
||||||
for _, k := range p.context {
|
for _, k := range p.context {
|
||||||
keyContext = append(keyContext, k)
|
keyContext = append(keyContext, k)
|
||||||
|
@ -570,11 +620,11 @@ func (p *parser) setValue(key string, value interface{}) {
|
||||||
p.bug("Context for key '%s' has not been established.", keyContext)
|
p.bug("Context for key '%s' has not been established.", keyContext)
|
||||||
}
|
}
|
||||||
switch t := tmpHash.(type) {
|
switch t := tmpHash.(type) {
|
||||||
case []map[string]interface{}:
|
case []map[string]any:
|
||||||
// The context is a table of hashes. Pick the most recent table
|
// The context is a table of hashes. Pick the most recent table
|
||||||
// defined as the current hash.
|
// defined as the current hash.
|
||||||
hash = t[len(t)-1]
|
hash = t[len(t)-1]
|
||||||
case map[string]interface{}:
|
case map[string]any:
|
||||||
hash = t
|
hash = t
|
||||||
default:
|
default:
|
||||||
p.panicf("Key '%s' has already been defined.", keyContext)
|
p.panicf("Key '%s' has already been defined.", keyContext)
|
||||||
|
@ -601,9 +651,8 @@ func (p *parser) setValue(key string, value interface{}) {
|
||||||
p.removeImplicit(keyContext)
|
p.removeImplicit(keyContext)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
// Otherwise, we have a concrete key trying to override a previous key,
|
||||||
// Otherwise, we have a concrete key trying to override a previous
|
// which is *always* wrong.
|
||||||
// key, which is *always* wrong.
|
|
||||||
p.panicf("Key '%s' has already been defined.", keyContext)
|
p.panicf("Key '%s' has already been defined.", keyContext)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -632,14 +681,11 @@ func (p *parser) setType(key string, typ tomlType, pos Position) {
|
||||||
|
|
||||||
// Implicit keys need to be created when tables are implied in "a.b.c.d = 1" and
|
// Implicit keys need to be created when tables are implied in "a.b.c.d = 1" and
|
||||||
// "[a.b.c]" (the "a", "b", and "c" hashes are never created explicitly).
|
// "[a.b.c]" (the "a", "b", and "c" hashes are never created explicitly).
|
||||||
func (p *parser) addImplicit(key Key) { p.implicits[key.String()] = struct{}{} }
|
func (p *parser) addImplicit(key Key) { p.implicits[key.String()] = struct{}{} }
|
||||||
func (p *parser) removeImplicit(key Key) { delete(p.implicits, key.String()) }
|
func (p *parser) removeImplicit(key Key) { delete(p.implicits, key.String()) }
|
||||||
func (p *parser) isImplicit(key Key) bool { _, ok := p.implicits[key.String()]; return ok }
|
func (p *parser) isImplicit(key Key) bool { _, ok := p.implicits[key.String()]; return ok }
|
||||||
func (p *parser) isArray(key Key) bool { return p.keyInfo[key.String()].tomlType == tomlArray }
|
func (p *parser) isArray(key Key) bool { return p.keyInfo[key.String()].tomlType == tomlArray }
|
||||||
func (p *parser) addImplicitContext(key Key) {
|
func (p *parser) addImplicitContext(key Key) { p.addImplicit(key); p.addContext(key, false) }
|
||||||
p.addImplicit(key)
|
|
||||||
p.addContext(key, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
// current returns the full key name of the current context.
|
// current returns the full key name of the current context.
|
||||||
func (p *parser) current() string {
|
func (p *parser) current() string {
|
||||||
|
@ -662,114 +708,131 @@ func stripFirstNewline(s string) string {
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remove newlines inside triple-quoted strings if a line ends with "\".
|
// stripEscapedNewlines removes whitespace after line-ending backslashes in
|
||||||
|
// multiline strings.
|
||||||
|
//
|
||||||
|
// A line-ending backslash is an unescaped \ followed only by whitespace until
|
||||||
|
// the next newline. After a line-ending backslash, all whitespace is removed
|
||||||
|
// until the next non-whitespace character.
|
||||||
func (p *parser) stripEscapedNewlines(s string) string {
|
func (p *parser) stripEscapedNewlines(s string) string {
|
||||||
split := strings.Split(s, "\n")
|
var (
|
||||||
if len(split) < 1 {
|
b strings.Builder
|
||||||
return s
|
i int
|
||||||
}
|
)
|
||||||
|
b.Grow(len(s))
|
||||||
|
for {
|
||||||
|
ix := strings.Index(s[i:], `\`)
|
||||||
|
if ix < 0 {
|
||||||
|
b.WriteString(s)
|
||||||
|
return b.String()
|
||||||
|
}
|
||||||
|
i += ix
|
||||||
|
|
||||||
escNL := false // Keep track of the last non-blank line was escaped.
|
if len(s) > i+1 && s[i+1] == '\\' {
|
||||||
for i, line := range split {
|
// Escaped backslash.
|
||||||
line = strings.TrimRight(line, " \t\r")
|
i += 2
|
||||||
|
continue
|
||||||
if len(line) == 0 || line[len(line)-1] != '\\' {
|
}
|
||||||
split[i] = strings.TrimRight(split[i], "\r")
|
// Scan until the next non-whitespace.
|
||||||
if !escNL && i != len(split)-1 {
|
j := i + 1
|
||||||
split[i] += "\n"
|
whitespaceLoop:
|
||||||
|
for ; j < len(s); j++ {
|
||||||
|
switch s[j] {
|
||||||
|
case ' ', '\t', '\r', '\n':
|
||||||
|
default:
|
||||||
|
break whitespaceLoop
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
if j == i+1 {
|
||||||
|
// Not a whitespace escape.
|
||||||
|
i++
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
if !strings.Contains(s[i:j], "\n") {
|
||||||
escBS := true
|
// This is not a line-ending backslash. (It's a bad escape sequence,
|
||||||
for j := len(line) - 1; j >= 0 && line[j] == '\\'; j-- {
|
// but we can let replaceEscapes catch it.)
|
||||||
escBS = !escBS
|
i++
|
||||||
}
|
|
||||||
if escNL {
|
|
||||||
line = strings.TrimLeft(line, " \t\r")
|
|
||||||
}
|
|
||||||
escNL = !escBS
|
|
||||||
|
|
||||||
if escBS {
|
|
||||||
split[i] += "\n"
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
b.WriteString(s[:i])
|
||||||
if i == len(split)-1 {
|
s = s[j:]
|
||||||
p.panicf("invalid escape: '\\ '")
|
i = 0
|
||||||
}
|
|
||||||
|
|
||||||
split[i] = line[:len(line)-1] // Remove \
|
|
||||||
if len(split)-1 > i {
|
|
||||||
split[i+1] = strings.TrimLeft(split[i+1], " \t\r")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return strings.Join(split, "")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *parser) replaceEscapes(it item, str string) string {
|
func (p *parser) replaceEscapes(it item, str string) string {
|
||||||
replaced := make([]rune, 0, len(str))
|
var (
|
||||||
s := []byte(str)
|
b strings.Builder
|
||||||
r := 0
|
skip = 0
|
||||||
for r < len(s) {
|
)
|
||||||
if s[r] != '\\' {
|
b.Grow(len(str))
|
||||||
c, size := utf8.DecodeRune(s[r:])
|
for i, c := range str {
|
||||||
r += size
|
if skip > 0 {
|
||||||
replaced = append(replaced, c)
|
skip--
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
r += 1
|
if c != '\\' {
|
||||||
if r >= len(s) {
|
b.WriteRune(c)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if i >= len(str) {
|
||||||
p.bug("Escape sequence at end of string.")
|
p.bug("Escape sequence at end of string.")
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
switch s[r] {
|
switch str[i+1] {
|
||||||
default:
|
default:
|
||||||
p.bug("Expected valid escape code after \\, but got %q.", s[r])
|
p.bug("Expected valid escape code after \\, but got %q.", str[i+1])
|
||||||
case ' ', '\t':
|
case ' ', '\t':
|
||||||
p.panicItemf(it, "invalid escape: '\\%c'", s[r])
|
p.panicItemf(it, "invalid escape: '\\%c'", str[i+1])
|
||||||
case 'b':
|
case 'b':
|
||||||
replaced = append(replaced, rune(0x0008))
|
b.WriteByte(0x08)
|
||||||
r += 1
|
skip = 1
|
||||||
case 't':
|
case 't':
|
||||||
replaced = append(replaced, rune(0x0009))
|
b.WriteByte(0x09)
|
||||||
r += 1
|
skip = 1
|
||||||
case 'n':
|
case 'n':
|
||||||
replaced = append(replaced, rune(0x000A))
|
b.WriteByte(0x0a)
|
||||||
r += 1
|
skip = 1
|
||||||
case 'f':
|
case 'f':
|
||||||
replaced = append(replaced, rune(0x000C))
|
b.WriteByte(0x0c)
|
||||||
r += 1
|
skip = 1
|
||||||
case 'r':
|
case 'r':
|
||||||
replaced = append(replaced, rune(0x000D))
|
b.WriteByte(0x0d)
|
||||||
r += 1
|
skip = 1
|
||||||
|
case 'e':
|
||||||
|
if p.tomlNext {
|
||||||
|
b.WriteByte(0x1b)
|
||||||
|
skip = 1
|
||||||
|
}
|
||||||
case '"':
|
case '"':
|
||||||
replaced = append(replaced, rune(0x0022))
|
b.WriteByte(0x22)
|
||||||
r += 1
|
skip = 1
|
||||||
case '\\':
|
case '\\':
|
||||||
replaced = append(replaced, rune(0x005C))
|
b.WriteByte(0x5c)
|
||||||
r += 1
|
skip = 1
|
||||||
|
// The lexer guarantees the correct number of characters are present;
|
||||||
|
// don't need to check here.
|
||||||
|
case 'x':
|
||||||
|
if p.tomlNext {
|
||||||
|
escaped := p.asciiEscapeToUnicode(it, str[i+2:i+4])
|
||||||
|
b.WriteRune(escaped)
|
||||||
|
skip = 3
|
||||||
|
}
|
||||||
case 'u':
|
case 'u':
|
||||||
// At this point, we know we have a Unicode escape of the form
|
escaped := p.asciiEscapeToUnicode(it, str[i+2:i+6])
|
||||||
// `uXXXX` at [r, r+5). (Because the lexer guarantees this
|
b.WriteRune(escaped)
|
||||||
// for us.)
|
skip = 5
|
||||||
escaped := p.asciiEscapeToUnicode(it, s[r+1:r+5])
|
|
||||||
replaced = append(replaced, escaped)
|
|
||||||
r += 5
|
|
||||||
case 'U':
|
case 'U':
|
||||||
// At this point, we know we have a Unicode escape of the form
|
escaped := p.asciiEscapeToUnicode(it, str[i+2:i+10])
|
||||||
// `uXXXX` at [r, r+9). (Because the lexer guarantees this
|
b.WriteRune(escaped)
|
||||||
// for us.)
|
skip = 9
|
||||||
escaped := p.asciiEscapeToUnicode(it, s[r+1:r+9])
|
|
||||||
replaced = append(replaced, escaped)
|
|
||||||
r += 9
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return string(replaced)
|
return b.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *parser) asciiEscapeToUnicode(it item, bs []byte) rune {
|
func (p *parser) asciiEscapeToUnicode(it item, s string) rune {
|
||||||
s := string(bs)
|
|
||||||
hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32)
|
hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
p.bug("Could not parse '%s' as a hexadecimal number, but the lexer claims it's OK: %s", s, err)
|
p.bug("Could not parse '%s' as a hexadecimal number, but the lexer claims it's OK: %s", s, err)
|
||||||
|
|
|
@ -25,10 +25,8 @@ type field struct {
|
||||||
// breaking ties with index sequence.
|
// breaking ties with index sequence.
|
||||||
type byName []field
|
type byName []field
|
||||||
|
|
||||||
func (x byName) Len() int { return len(x) }
|
func (x byName) Len() int { return len(x) }
|
||||||
|
|
||||||
func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||||
|
|
||||||
func (x byName) Less(i, j int) bool {
|
func (x byName) Less(i, j int) bool {
|
||||||
if x[i].name != x[j].name {
|
if x[i].name != x[j].name {
|
||||||
return x[i].name < x[j].name
|
return x[i].name < x[j].name
|
||||||
|
@ -45,10 +43,8 @@ func (x byName) Less(i, j int) bool {
|
||||||
// byIndex sorts field by index sequence.
|
// byIndex sorts field by index sequence.
|
||||||
type byIndex []field
|
type byIndex []field
|
||||||
|
|
||||||
func (x byIndex) Len() int { return len(x) }
|
func (x byIndex) Len() int { return len(x) }
|
||||||
|
|
||||||
func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||||
|
|
||||||
func (x byIndex) Less(i, j int) bool {
|
func (x byIndex) Less(i, j int) bool {
|
||||||
for k, xik := range x[i].index {
|
for k, xik := range x[i].index {
|
||||||
if k >= len(x[j].index) {
|
if k >= len(x[j].index) {
|
||||||
|
|
|
@ -22,13 +22,8 @@ func typeIsTable(t tomlType) bool {
|
||||||
|
|
||||||
type tomlBaseType string
|
type tomlBaseType string
|
||||||
|
|
||||||
func (btype tomlBaseType) typeString() string {
|
func (btype tomlBaseType) typeString() string { return string(btype) }
|
||||||
return string(btype)
|
func (btype tomlBaseType) String() string { return btype.typeString() }
|
||||||
}
|
|
||||||
|
|
||||||
func (btype tomlBaseType) String() string {
|
|
||||||
return btype.typeString()
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
var (
|
||||||
tomlInteger tomlBaseType = "Integer"
|
tomlInteger tomlBaseType = "Integer"
|
||||||
|
@ -54,7 +49,7 @@ func (p *parser) typeOfPrimitive(lexItem item) tomlType {
|
||||||
return tomlFloat
|
return tomlFloat
|
||||||
case itemDatetime:
|
case itemDatetime:
|
||||||
return tomlDatetime
|
return tomlDatetime
|
||||||
case itemString:
|
case itemString, itemStringEsc:
|
||||||
return tomlString
|
return tomlString
|
||||||
case itemMultilineString:
|
case itemMultilineString:
|
||||||
return tomlString
|
return tomlString
|
||||||
|
|
|
@ -1,15 +0,0 @@
|
||||||
# Binaries for programs and plugins
|
|
||||||
*.exe
|
|
||||||
*.exe~
|
|
||||||
*.dll
|
|
||||||
*.so
|
|
||||||
*.dylib
|
|
||||||
|
|
||||||
# Test binary, built with `go test -c`
|
|
||||||
*.test
|
|
||||||
|
|
||||||
# Output of the go coverage tool, specifically when used with LiteIDE
|
|
||||||
*.out
|
|
||||||
|
|
||||||
# Dependency directories (remove the comment below to include it)
|
|
||||||
# vendor/
|
|
|
@ -1,150 +0,0 @@
|
||||||
# This file contains all available configuration options
|
|
||||||
# with their default values.
|
|
||||||
|
|
||||||
# options for analysis running
|
|
||||||
run:
|
|
||||||
# default concurrency is a available CPU number
|
|
||||||
concurrency: 4
|
|
||||||
|
|
||||||
# timeout for analysis, e.g. 30s, 5m, default is 1m
|
|
||||||
deadline: 15m
|
|
||||||
|
|
||||||
# exit code when at least one issue was found, default is 1
|
|
||||||
issues-exit-code: 1
|
|
||||||
|
|
||||||
# include test files or not, default is true
|
|
||||||
tests: false
|
|
||||||
|
|
||||||
# list of build tags, all linters use it. Default is empty list.
|
|
||||||
#build-tags:
|
|
||||||
# - mytag
|
|
||||||
|
|
||||||
# which dirs to skip: they won't be analyzed;
|
|
||||||
# can use regexp here: generated.*, regexp is applied on full path;
|
|
||||||
# default value is empty list, but next dirs are always skipped independently
|
|
||||||
# from this option's value:
|
|
||||||
# vendor$, third_party$, testdata$, examples$, Godeps$, builtin$
|
|
||||||
skip-dirs:
|
|
||||||
- /gen$
|
|
||||||
|
|
||||||
# which files to skip: they will be analyzed, but issues from them
|
|
||||||
# won't be reported. Default value is empty list, but there is
|
|
||||||
# no need to include all autogenerated files, we confidently recognize
|
|
||||||
# autogenerated files. If it's not please let us know.
|
|
||||||
skip-files:
|
|
||||||
- ".*\\.my\\.go$"
|
|
||||||
- lib/bad.go
|
|
||||||
- ".*\\.template\\.go$"
|
|
||||||
|
|
||||||
# output configuration options
|
|
||||||
output:
|
|
||||||
# colored-line-number|line-number|json|tab|checkstyle, default is "colored-line-number"
|
|
||||||
format: colored-line-number
|
|
||||||
|
|
||||||
# print lines of code with issue, default is true
|
|
||||||
print-issued-lines: true
|
|
||||||
|
|
||||||
# print linter name in the end of issue text, default is true
|
|
||||||
print-linter-name: true
|
|
||||||
|
|
||||||
# all available settings of specific linters
|
|
||||||
linters-settings:
|
|
||||||
errcheck:
|
|
||||||
# report about not checking of errors in type assetions: `a := b.(MyStruct)`;
|
|
||||||
# default is false: such cases aren't reported by default.
|
|
||||||
check-type-assertions: false
|
|
||||||
|
|
||||||
# report about assignment of errors to blank identifier: `num, _ := strconv.Atoi(numStr)`;
|
|
||||||
# default is false: such cases aren't reported by default.
|
|
||||||
check-blank: false
|
|
||||||
govet:
|
|
||||||
# report about shadowed variables
|
|
||||||
check-shadowing: true
|
|
||||||
|
|
||||||
# Obtain type information from installed (to $GOPATH/pkg) package files:
|
|
||||||
# golangci-lint will execute `go install -i` and `go test -i` for analyzed packages
|
|
||||||
# before analyzing them.
|
|
||||||
# By default this option is disabled and govet gets type information by loader from source code.
|
|
||||||
# Loading from source code is slow, but it's done only once for all linters.
|
|
||||||
# Go-installing of packages first time is much slower than loading them from source code,
|
|
||||||
# therefore this option is disabled by default.
|
|
||||||
# But repeated installation is fast in go >= 1.10 because of build caching.
|
|
||||||
# Enable this option only if all conditions are met:
|
|
||||||
# 1. you use only "fast" linters (--fast e.g.): no program loading occurs
|
|
||||||
# 2. you use go >= 1.10
|
|
||||||
# 3. you do repeated runs (false for CI) or cache $GOPATH/pkg or `go env GOCACHE` dir in CI.
|
|
||||||
use-installed-packages: false
|
|
||||||
golint:
|
|
||||||
# minimal confidence for issues, default is 0.8
|
|
||||||
min-confidence: 0.8
|
|
||||||
gofmt:
|
|
||||||
# simplify code: gofmt with `-s` option, true by default
|
|
||||||
simplify: true
|
|
||||||
gocyclo:
|
|
||||||
# minimal code complexity to report, 30 by default (but we recommend 10-20)
|
|
||||||
min-complexity: 10
|
|
||||||
maligned:
|
|
||||||
# print struct with more effective memory layout or not, false by default
|
|
||||||
suggest-new: true
|
|
||||||
dupl:
|
|
||||||
# tokens count to trigger issue, 150 by default
|
|
||||||
threshold: 100
|
|
||||||
goconst:
|
|
||||||
# minimal length of string constant, 3 by default
|
|
||||||
min-len: 3
|
|
||||||
# minimal occurrences count to trigger, 3 by default
|
|
||||||
min-occurrences: 3
|
|
||||||
depguard:
|
|
||||||
list-type: blacklist
|
|
||||||
include-go-root: false
|
|
||||||
packages:
|
|
||||||
- github.com/davecgh/go-spew/spew
|
|
||||||
|
|
||||||
linters:
|
|
||||||
#enable:
|
|
||||||
# - staticcheck
|
|
||||||
# - unused
|
|
||||||
# - gosimple
|
|
||||||
enable-all: true
|
|
||||||
disable:
|
|
||||||
- lll
|
|
||||||
disable-all: false
|
|
||||||
#presets:
|
|
||||||
# - bugs
|
|
||||||
# - unused
|
|
||||||
fast: false
|
|
||||||
|
|
||||||
issues:
|
|
||||||
# List of regexps of issue texts to exclude, empty list by default.
|
|
||||||
# But independently from this option we use default exclude patterns,
|
|
||||||
# it can be disabled by `exclude-use-default: false`. To list all
|
|
||||||
# excluded by default patterns execute `golangci-lint run --help`
|
|
||||||
exclude:
|
|
||||||
- "`parseTained` is unused"
|
|
||||||
- "`parseState` is unused"
|
|
||||||
|
|
||||||
# Independently from option `exclude` we use default exclude patterns,
|
|
||||||
# it can be disabled by this option. To list all
|
|
||||||
# excluded by default patterns execute `golangci-lint run --help`.
|
|
||||||
# Default value for this option is false.
|
|
||||||
exclude-use-default: false
|
|
||||||
|
|
||||||
# Maximum issues count per one linter. Set to 0 to disable. Default is 50.
|
|
||||||
max-per-linter: 0
|
|
||||||
|
|
||||||
# Maximum count of issues with the same text. Set to 0 to disable. Default is 3.
|
|
||||||
max-same: 0
|
|
||||||
|
|
||||||
# Show only new issues: if there are unstaged changes or untracked files,
|
|
||||||
# only those changes are analyzed, else only changes in HEAD~ are analyzed.
|
|
||||||
# It's a super-useful option for integration of golangci-lint into existing
|
|
||||||
# large codebase. It's not practical to fix all existing issues at the moment
|
|
||||||
# of integration: much better don't allow issues in new code.
|
|
||||||
# Default is false.
|
|
||||||
new: false
|
|
||||||
|
|
||||||
# Show only new issues created after git revision `REV`
|
|
||||||
#new-from-rev: REV
|
|
||||||
|
|
||||||
# Show only new issues created in git patch with set file path.
|
|
||||||
#new-from-patch: path/to/patch/file
|
|
|
@ -1,24 +0,0 @@
|
||||||
language: go
|
|
||||||
|
|
||||||
go:
|
|
||||||
- "1.13"
|
|
||||||
- "1.14"
|
|
||||||
- tip
|
|
||||||
|
|
||||||
env:
|
|
||||||
- GO111MODULE=on
|
|
||||||
|
|
||||||
before_install:
|
|
||||||
- go get github.com/axw/gocov/gocov
|
|
||||||
- go get github.com/mattn/goveralls
|
|
||||||
- go get golang.org/x/tools/cmd/cover
|
|
||||||
- go get golang.org/x/tools/cmd/goimports
|
|
||||||
- wget -O - -q https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh| sh
|
|
||||||
|
|
||||||
script:
|
|
||||||
- test -z "$(goimports -d ./ 2>&1)"
|
|
||||||
- ./bin/golangci-lint run
|
|
||||||
- go test -v -race ./...
|
|
||||||
|
|
||||||
after_success:
|
|
||||||
- test "$TRAVIS_GO_VERSION" = "1.14" && goveralls -service=travis-ci
|
|
|
@ -1,21 +0,0 @@
|
||||||
MIT License
|
|
||||||
|
|
||||||
Copyright (c) 2020 Djarvur
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
|
||||||
in the Software without restriction, including without limitation the rights
|
|
||||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
copies of the Software, and to permit persons to whom the Software is
|
|
||||||
furnished to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in all
|
|
||||||
copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
SOFTWARE.
|
|
|
@ -1,75 +0,0 @@
|
||||||
= err113 image:https://godoc.org/github.com/Djarvur/go-err113?status.svg["GoDoc",link="http://godoc.org/github.com/Djarvur/go-err113"] image:https://travis-ci.org/Djarvur/go-err113.svg["Build Status",link="https://travis-ci.org/Djarvur/go-err113"] image:https://coveralls.io/repos/Djarvur/go-err113/badge.svg?branch=master&service=github["Coverage Status",link="https://coveralls.io/github/Djarvur/go-err113?branch=master"]
|
|
||||||
Daniel Podolsky
|
|
||||||
:toc:
|
|
||||||
|
|
||||||
Golang linter to check the errors handling expressions
|
|
||||||
|
|
||||||
== Details
|
|
||||||
|
|
||||||
Starting from Go 1.13 the standard `error` type behaviour was changed: one `error` could be derived from another with `fmt.Errorf()` method using `%w` format specifier.
|
|
||||||
|
|
||||||
So the errors hierarchy could be built for flexible and responsible errors processing.
|
|
||||||
|
|
||||||
And to make this possible at least two simple rules should be followed:
|
|
||||||
|
|
||||||
1. `error` values should not be compared directly but with `errors.Is()` method.
|
|
||||||
1. `error` should not be created dynamically from scratch but by the wrapping the static (package-level) error.
|
|
||||||
|
|
||||||
This linter is checking the code for these 2 rules compliance.
|
|
||||||
|
|
||||||
=== Reports
|
|
||||||
|
|
||||||
So, `err113` reports every `==` and `!=` comparison for exact `error` type variables except comparison to `nil` and `io.EOF`.
|
|
||||||
|
|
||||||
Also, any call of `errors.New()` and `fmt.Errorf()` methods are reported except the calls used to initialise package-level variables and the `fmt.Errorf()` calls wrapping the other errors.
|
|
||||||
|
|
||||||
Note: non-standard packages, like `github.com/pkg/errors` are ignored completely.
|
|
||||||
|
|
||||||
== Install
|
|
||||||
|
|
||||||
```
|
|
||||||
go get -u github.com/Djarvur/go-err113/cmd/err113
|
|
||||||
```
|
|
||||||
|
|
||||||
== Usage
|
|
||||||
|
|
||||||
Defined by link:https://pkg.go.dev/golang.org/x/tools/go/analysis/singlechecker[singlechecker] package.
|
|
||||||
|
|
||||||
```
|
|
||||||
err113: checks the error handling rules according to the Go 1.13 new error type
|
|
||||||
|
|
||||||
Usage: err113 [-flag] [package]
|
|
||||||
|
|
||||||
|
|
||||||
Flags:
|
|
||||||
-V print version and exit
|
|
||||||
-all
|
|
||||||
no effect (deprecated)
|
|
||||||
-c int
|
|
||||||
display offending line with this many lines of context (default -1)
|
|
||||||
-cpuprofile string
|
|
||||||
write CPU profile to this file
|
|
||||||
-debug string
|
|
||||||
debug flags, any subset of "fpstv"
|
|
||||||
-fix
|
|
||||||
apply all suggested fixes
|
|
||||||
-flags
|
|
||||||
print analyzer flags in JSON
|
|
||||||
-json
|
|
||||||
emit JSON output
|
|
||||||
-memprofile string
|
|
||||||
write memory profile to this file
|
|
||||||
-source
|
|
||||||
no effect (deprecated)
|
|
||||||
-tags string
|
|
||||||
no effect (deprecated)
|
|
||||||
-trace string
|
|
||||||
write trace log to this file
|
|
||||||
-v no effect (deprecated)
|
|
||||||
```
|
|
||||||
|
|
||||||
== Thanks
|
|
||||||
|
|
||||||
To link:https://github.com/quasilyte[Iskander (Alex) Sharipov] for the really useful advices.
|
|
||||||
|
|
||||||
To link:https://github.com/jackwhelpton[Jack Whelpton] for the bugfix provided.
|
|
|
@ -1,123 +0,0 @@
|
||||||
package err113
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"go/ast"
|
|
||||||
"go/token"
|
|
||||||
"go/types"
|
|
||||||
|
|
||||||
"golang.org/x/tools/go/analysis"
|
|
||||||
)
|
|
||||||
|
|
||||||
func inspectComparision(pass *analysis.Pass, n ast.Node) bool { // nolint: unparam
|
|
||||||
// check whether the call expression matches time.Now().Sub()
|
|
||||||
be, ok := n.(*ast.BinaryExpr)
|
|
||||||
if !ok {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// check if it is a comparison operation
|
|
||||||
if be.Op != token.EQL && be.Op != token.NEQ {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
if !areBothErrors(be.X, be.Y, pass.TypesInfo) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
oldExpr := render(pass.Fset, be)
|
|
||||||
|
|
||||||
negate := ""
|
|
||||||
if be.Op == token.NEQ {
|
|
||||||
negate = "!"
|
|
||||||
}
|
|
||||||
|
|
||||||
newExpr := fmt.Sprintf("%s%s.Is(%s, %s)", negate, "errors", rawString(be.X), rawString(be.Y))
|
|
||||||
|
|
||||||
pass.Report(
|
|
||||||
analysis.Diagnostic{
|
|
||||||
Pos: be.Pos(),
|
|
||||||
Message: fmt.Sprintf("do not compare errors directly %q, use %q instead", oldExpr, newExpr),
|
|
||||||
SuggestedFixes: []analysis.SuggestedFix{
|
|
||||||
{
|
|
||||||
Message: fmt.Sprintf("should replace %q with %q", oldExpr, newExpr),
|
|
||||||
TextEdits: []analysis.TextEdit{
|
|
||||||
{
|
|
||||||
Pos: be.Pos(),
|
|
||||||
End: be.End(),
|
|
||||||
NewText: []byte(newExpr),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func isError(v ast.Expr, info *types.Info) bool {
|
|
||||||
if intf, ok := info.TypeOf(v).Underlying().(*types.Interface); ok {
|
|
||||||
return intf.NumMethods() == 1 && intf.Method(0).FullName() == "(error).Error"
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func isEOF(ex ast.Expr, info *types.Info) bool {
|
|
||||||
se, ok := ex.(*ast.SelectorExpr)
|
|
||||||
if !ok || se.Sel.Name != "EOF" {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
if ep, ok := asImportedName(se.X, info); !ok || ep != "io" {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func asImportedName(ex ast.Expr, info *types.Info) (string, bool) {
|
|
||||||
ei, ok := ex.(*ast.Ident)
|
|
||||||
if !ok {
|
|
||||||
return "", false
|
|
||||||
}
|
|
||||||
|
|
||||||
ep, ok := info.ObjectOf(ei).(*types.PkgName)
|
|
||||||
if !ok {
|
|
||||||
return "", false
|
|
||||||
}
|
|
||||||
|
|
||||||
return ep.Imported().Path(), true
|
|
||||||
}
|
|
||||||
|
|
||||||
func areBothErrors(x, y ast.Expr, typesInfo *types.Info) bool {
|
|
||||||
// check that both left and right hand side are not nil
|
|
||||||
if typesInfo.Types[x].IsNil() || typesInfo.Types[y].IsNil() {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// check that both left and right hand side are not io.EOF
|
|
||||||
if isEOF(x, typesInfo) || isEOF(y, typesInfo) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// check that both left and right hand side are errors
|
|
||||||
if !isError(x, typesInfo) && !isError(y, typesInfo) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func rawString(x ast.Expr) string {
|
|
||||||
switch t := x.(type) {
|
|
||||||
case *ast.Ident:
|
|
||||||
return t.Name
|
|
||||||
case *ast.SelectorExpr:
|
|
||||||
return fmt.Sprintf("%s.%s", rawString(t.X), t.Sel.Name)
|
|
||||||
case *ast.CallExpr:
|
|
||||||
return fmt.Sprintf("%s()", rawString(t.Fun))
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("%s", x)
|
|
||||||
}
|
|
|
@ -1,74 +0,0 @@
|
||||||
package err113
|
|
||||||
|
|
||||||
import (
|
|
||||||
"go/ast"
|
|
||||||
"go/types"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"golang.org/x/tools/go/analysis"
|
|
||||||
)
|
|
||||||
|
|
||||||
var methods2check = map[string]map[string]func(*ast.CallExpr, *types.Info) bool{ // nolint: gochecknoglobals
|
|
||||||
"errors": {"New": justTrue},
|
|
||||||
"fmt": {"Errorf": checkWrap},
|
|
||||||
}
|
|
||||||
|
|
||||||
func justTrue(*ast.CallExpr, *types.Info) bool {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func checkWrap(ce *ast.CallExpr, info *types.Info) bool {
|
|
||||||
return !(len(ce.Args) > 0 && strings.Contains(toString(ce.Args[0], info), `%w`))
|
|
||||||
}
|
|
||||||
|
|
||||||
func inspectDefinition(pass *analysis.Pass, tlds map[*ast.CallExpr]struct{}, n ast.Node) bool { //nolint: unparam
|
|
||||||
// check whether the call expression matches time.Now().Sub()
|
|
||||||
ce, ok := n.(*ast.CallExpr)
|
|
||||||
if !ok {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, ok = tlds[ce]; ok {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
fn, ok := ce.Fun.(*ast.SelectorExpr)
|
|
||||||
if !ok {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
fxName, ok := asImportedName(fn.X, pass.TypesInfo)
|
|
||||||
if !ok {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
methods, ok := methods2check[fxName]
|
|
||||||
if !ok {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
checkFunc, ok := methods[fn.Sel.Name]
|
|
||||||
if !ok {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
if !checkFunc(ce, pass.TypesInfo) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
pass.Reportf(
|
|
||||||
ce.Pos(),
|
|
||||||
"do not define dynamic errors, use wrapped static errors instead: %q",
|
|
||||||
render(pass.Fset, ce),
|
|
||||||
)
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func toString(ex ast.Expr, info *types.Info) string {
|
|
||||||
if tv, ok := info.Types[ex]; ok && tv.Value != nil {
|
|
||||||
return tv.Value.ExactString()
|
|
||||||
}
|
|
||||||
|
|
||||||
return ""
|
|
||||||
}
|
|
|
@ -1,90 +0,0 @@
|
||||||
// Package err113 is a Golang linter to check the errors handling expressions
|
|
||||||
package err113
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"go/ast"
|
|
||||||
"go/printer"
|
|
||||||
"go/token"
|
|
||||||
|
|
||||||
"golang.org/x/tools/go/analysis"
|
|
||||||
)
|
|
||||||
|
|
||||||
// NewAnalyzer creates a new analysis.Analyzer instance tuned to run err113 checks.
|
|
||||||
func NewAnalyzer() *analysis.Analyzer {
|
|
||||||
return &analysis.Analyzer{
|
|
||||||
Name: "err113",
|
|
||||||
Doc: "checks the error handling rules according to the Go 1.13 new error type",
|
|
||||||
Run: run,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func run(pass *analysis.Pass) (interface{}, error) {
|
|
||||||
for _, file := range pass.Files {
|
|
||||||
tlds := enumerateFileDecls(file)
|
|
||||||
|
|
||||||
ast.Inspect(
|
|
||||||
file,
|
|
||||||
func(n ast.Node) bool {
|
|
||||||
return inspectComparision(pass, n) &&
|
|
||||||
inspectDefinition(pass, tlds, n)
|
|
||||||
},
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// render returns the pretty-print of the given node.
|
|
||||||
func render(fset *token.FileSet, x interface{}) string {
|
|
||||||
var buf bytes.Buffer
|
|
||||||
if err := printer.Fprint(&buf, fset, x); err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return buf.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
func enumerateFileDecls(f *ast.File) map[*ast.CallExpr]struct{} {
|
|
||||||
res := make(map[*ast.CallExpr]struct{})
|
|
||||||
|
|
||||||
var ces []*ast.CallExpr // nolint: prealloc
|
|
||||||
|
|
||||||
for _, d := range f.Decls {
|
|
||||||
ces = append(ces, enumerateDeclVars(d)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, ce := range ces {
|
|
||||||
res[ce] = struct{}{}
|
|
||||||
}
|
|
||||||
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
func enumerateDeclVars(d ast.Decl) (res []*ast.CallExpr) {
|
|
||||||
td, ok := d.(*ast.GenDecl)
|
|
||||||
if !ok || td.Tok != token.VAR {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, s := range td.Specs {
|
|
||||||
res = append(res, enumerateSpecValues(s)...)
|
|
||||||
}
|
|
||||||
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
func enumerateSpecValues(s ast.Spec) (res []*ast.CallExpr) {
|
|
||||||
vs, ok := s.(*ast.ValueSpec)
|
|
||||||
if !ok {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, v := range vs.Values {
|
|
||||||
if ce, ok := v.(*ast.CallExpr); ok {
|
|
||||||
res = append(res, ce)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return res
|
|
||||||
}
|
|
|
@ -1,29 +0,0 @@
|
||||||
language: go
|
|
||||||
|
|
||||||
go:
|
|
||||||
- 1.6.x
|
|
||||||
- 1.7.x
|
|
||||||
- 1.8.x
|
|
||||||
- 1.9.x
|
|
||||||
- 1.10.x
|
|
||||||
- 1.11.x
|
|
||||||
- 1.12.x
|
|
||||||
- tip
|
|
||||||
|
|
||||||
# Setting sudo access to false will let Travis CI use containers rather than
|
|
||||||
# VMs to run the tests. For more details see:
|
|
||||||
# - http://docs.travis-ci.com/user/workers/container-based-infrastructure/
|
|
||||||
# - http://docs.travis-ci.com/user/workers/standard-infrastructure/
|
|
||||||
sudo: false
|
|
||||||
|
|
||||||
script:
|
|
||||||
- make setup
|
|
||||||
- make test
|
|
||||||
|
|
||||||
notifications:
|
|
||||||
webhooks:
|
|
||||||
urls:
|
|
||||||
- https://webhooks.gitter.im/e/06e3328629952dabe3e0
|
|
||||||
on_success: change # options: [always|never|change] default: always
|
|
||||||
on_failure: always # options: [always|never|change] default: always
|
|
||||||
on_start: never # options: [always|never|change] default: always
|
|
|
@ -1,109 +0,0 @@
|
||||||
# 1.5.0 (2019-09-11)
|
|
||||||
|
|
||||||
## Added
|
|
||||||
|
|
||||||
- #103: Add basic fuzzing for `NewVersion()` (thanks @jesse-c)
|
|
||||||
|
|
||||||
## Changed
|
|
||||||
|
|
||||||
- #82: Clarify wildcard meaning in range constraints and update tests for it (thanks @greysteil)
|
|
||||||
- #83: Clarify caret operator range for pre-1.0.0 dependencies (thanks @greysteil)
|
|
||||||
- #72: Adding docs comment pointing to vert for a cli
|
|
||||||
- #71: Update the docs on pre-release comparator handling
|
|
||||||
- #89: Test with new go versions (thanks @thedevsaddam)
|
|
||||||
- #87: Added $ to ValidPrerelease for better validation (thanks @jeremycarroll)
|
|
||||||
|
|
||||||
## Fixed
|
|
||||||
|
|
||||||
- #78: Fix unchecked error in example code (thanks @ravron)
|
|
||||||
- #70: Fix the handling of pre-releases and the 0.0.0 release edge case
|
|
||||||
- #97: Fixed copyright file for proper display on GitHub
|
|
||||||
- #107: Fix handling prerelease when sorting alphanum and num
|
|
||||||
- #109: Fixed where Validate sometimes returns wrong message on error
|
|
||||||
|
|
||||||
# 1.4.2 (2018-04-10)
|
|
||||||
|
|
||||||
## Changed
|
|
||||||
- #72: Updated the docs to point to vert for a console appliaction
|
|
||||||
- #71: Update the docs on pre-release comparator handling
|
|
||||||
|
|
||||||
## Fixed
|
|
||||||
- #70: Fix the handling of pre-releases and the 0.0.0 release edge case
|
|
||||||
|
|
||||||
# 1.4.1 (2018-04-02)
|
|
||||||
|
|
||||||
## Fixed
|
|
||||||
- Fixed #64: Fix pre-release precedence issue (thanks @uudashr)
|
|
||||||
|
|
||||||
# 1.4.0 (2017-10-04)
|
|
||||||
|
|
||||||
## Changed
|
|
||||||
- #61: Update NewVersion to parse ints with a 64bit int size (thanks @zknill)
|
|
||||||
|
|
||||||
# 1.3.1 (2017-07-10)
|
|
||||||
|
|
||||||
## Fixed
|
|
||||||
- Fixed #57: number comparisons in prerelease sometimes inaccurate
|
|
||||||
|
|
||||||
# 1.3.0 (2017-05-02)
|
|
||||||
|
|
||||||
## Added
|
|
||||||
- #45: Added json (un)marshaling support (thanks @mh-cbon)
|
|
||||||
- Stability marker. See https://masterminds.github.io/stability/
|
|
||||||
|
|
||||||
## Fixed
|
|
||||||
- #51: Fix handling of single digit tilde constraint (thanks @dgodd)
|
|
||||||
|
|
||||||
## Changed
|
|
||||||
- #55: The godoc icon moved from png to svg
|
|
||||||
|
|
||||||
# 1.2.3 (2017-04-03)
|
|
||||||
|
|
||||||
## Fixed
|
|
||||||
- #46: Fixed 0.x.x and 0.0.x in constraints being treated as *
|
|
||||||
|
|
||||||
# Release 1.2.2 (2016-12-13)
|
|
||||||
|
|
||||||
## Fixed
|
|
||||||
- #34: Fixed issue where hyphen range was not working with pre-release parsing.
|
|
||||||
|
|
||||||
# Release 1.2.1 (2016-11-28)
|
|
||||||
|
|
||||||
## Fixed
|
|
||||||
- #24: Fixed edge case issue where constraint "> 0" does not handle "0.0.1-alpha"
|
|
||||||
properly.
|
|
||||||
|
|
||||||
# Release 1.2.0 (2016-11-04)
|
|
||||||
|
|
||||||
## Added
|
|
||||||
- #20: Added MustParse function for versions (thanks @adamreese)
|
|
||||||
- #15: Added increment methods on versions (thanks @mh-cbon)
|
|
||||||
|
|
||||||
## Fixed
|
|
||||||
- Issue #21: Per the SemVer spec (section 9) a pre-release is unstable and
|
|
||||||
might not satisfy the intended compatibility. The change here ignores pre-releases
|
|
||||||
on constraint checks (e.g., ~ or ^) when a pre-release is not part of the
|
|
||||||
constraint. For example, `^1.2.3` will ignore pre-releases while
|
|
||||||
`^1.2.3-alpha` will include them.
|
|
||||||
|
|
||||||
# Release 1.1.1 (2016-06-30)
|
|
||||||
|
|
||||||
## Changed
|
|
||||||
- Issue #9: Speed up version comparison performance (thanks @sdboyer)
|
|
||||||
- Issue #8: Added benchmarks (thanks @sdboyer)
|
|
||||||
- Updated Go Report Card URL to new location
|
|
||||||
- Updated Readme to add code snippet formatting (thanks @mh-cbon)
|
|
||||||
- Updating tagging to v[SemVer] structure for compatibility with other tools.
|
|
||||||
|
|
||||||
# Release 1.1.0 (2016-03-11)
|
|
||||||
|
|
||||||
- Issue #2: Implemented validation to provide reasons a versions failed a
|
|
||||||
constraint.
|
|
||||||
|
|
||||||
# Release 1.0.1 (2015-12-31)
|
|
||||||
|
|
||||||
- Fixed #1: * constraint failing on valid versions.
|
|
||||||
|
|
||||||
# Release 1.0.0 (2015-10-20)
|
|
||||||
|
|
||||||
- Initial release
|
|
|
@ -1,19 +0,0 @@
|
||||||
Copyright (C) 2014-2019, Matt Butcher and Matt Farina
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
|
||||||
in the Software without restriction, including without limitation the rights
|
|
||||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
copies of the Software, and to permit persons to whom the Software is
|
|
||||||
furnished to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in
|
|
||||||
all copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
||||||
THE SOFTWARE.
|
|
|
@ -1,36 +0,0 @@
|
||||||
.PHONY: setup
|
|
||||||
setup:
|
|
||||||
go get -u gopkg.in/alecthomas/gometalinter.v1
|
|
||||||
gometalinter.v1 --install
|
|
||||||
|
|
||||||
.PHONY: test
|
|
||||||
test: validate lint
|
|
||||||
@echo "==> Running tests"
|
|
||||||
go test -v
|
|
||||||
|
|
||||||
.PHONY: validate
|
|
||||||
validate:
|
|
||||||
@echo "==> Running static validations"
|
|
||||||
@gometalinter.v1 \
|
|
||||||
--disable-all \
|
|
||||||
--enable deadcode \
|
|
||||||
--severity deadcode:error \
|
|
||||||
--enable gofmt \
|
|
||||||
--enable gosimple \
|
|
||||||
--enable ineffassign \
|
|
||||||
--enable misspell \
|
|
||||||
--enable vet \
|
|
||||||
--tests \
|
|
||||||
--vendor \
|
|
||||||
--deadline 60s \
|
|
||||||
./... || exit_code=1
|
|
||||||
|
|
||||||
.PHONY: lint
|
|
||||||
lint:
|
|
||||||
@echo "==> Running linters"
|
|
||||||
@gometalinter.v1 \
|
|
||||||
--disable-all \
|
|
||||||
--enable golint \
|
|
||||||
--vendor \
|
|
||||||
--deadline 60s \
|
|
||||||
./... || :
|
|
|
@ -1,194 +0,0 @@
|
||||||
# SemVer
|
|
||||||
|
|
||||||
The `semver` package provides the ability to work with [Semantic Versions](http://semver.org) in Go. Specifically it provides the ability to:
|
|
||||||
|
|
||||||
* Parse semantic versions
|
|
||||||
* Sort semantic versions
|
|
||||||
* Check if a semantic version fits within a set of constraints
|
|
||||||
* Optionally work with a `v` prefix
|
|
||||||
|
|
||||||
[![Stability:
|
|
||||||
Active](https://masterminds.github.io/stability/active.svg)](https://masterminds.github.io/stability/active.html)
|
|
||||||
[![Build Status](https://travis-ci.org/Masterminds/semver.svg)](https://travis-ci.org/Masterminds/semver) [![Build status](https://ci.appveyor.com/api/projects/status/jfk66lib7hb985k8/branch/master?svg=true&passingText=windows%20build%20passing&failingText=windows%20build%20failing)](https://ci.appveyor.com/project/mattfarina/semver/branch/master) [![GoDoc](https://godoc.org/github.com/Masterminds/semver?status.svg)](https://godoc.org/github.com/Masterminds/semver) [![Go Report Card](https://goreportcard.com/badge/github.com/Masterminds/semver)](https://goreportcard.com/report/github.com/Masterminds/semver)
|
|
||||||
|
|
||||||
If you are looking for a command line tool for version comparisons please see
|
|
||||||
[vert](https://github.com/Masterminds/vert) which uses this library.
|
|
||||||
|
|
||||||
## Parsing Semantic Versions
|
|
||||||
|
|
||||||
To parse a semantic version use the `NewVersion` function. For example,
|
|
||||||
|
|
||||||
```go
|
|
||||||
v, err := semver.NewVersion("1.2.3-beta.1+build345")
|
|
||||||
```
|
|
||||||
|
|
||||||
If there is an error the version wasn't parseable. The version object has methods
|
|
||||||
to get the parts of the version, compare it to other versions, convert the
|
|
||||||
version back into a string, and get the original string. For more details
|
|
||||||
please see the [documentation](https://godoc.org/github.com/Masterminds/semver).
|
|
||||||
|
|
||||||
## Sorting Semantic Versions
|
|
||||||
|
|
||||||
A set of versions can be sorted using the [`sort`](https://golang.org/pkg/sort/)
|
|
||||||
package from the standard library. For example,
|
|
||||||
|
|
||||||
```go
|
|
||||||
raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",}
|
|
||||||
vs := make([]*semver.Version, len(raw))
|
|
||||||
for i, r := range raw {
|
|
||||||
v, err := semver.NewVersion(r)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Error parsing version: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
vs[i] = v
|
|
||||||
}
|
|
||||||
|
|
||||||
sort.Sort(semver.Collection(vs))
|
|
||||||
```
|
|
||||||
|
|
||||||
## Checking Version Constraints
|
|
||||||
|
|
||||||
Checking a version against version constraints is one of the most featureful
|
|
||||||
parts of the package.
|
|
||||||
|
|
||||||
```go
|
|
||||||
c, err := semver.NewConstraint(">= 1.2.3")
|
|
||||||
if err != nil {
|
|
||||||
// Handle constraint not being parseable.
|
|
||||||
}
|
|
||||||
|
|
||||||
v, _ := semver.NewVersion("1.3")
|
|
||||||
if err != nil {
|
|
||||||
// Handle version not being parseable.
|
|
||||||
}
|
|
||||||
// Check if the version meets the constraints. The a variable will be true.
|
|
||||||
a := c.Check(v)
|
|
||||||
```
|
|
||||||
|
|
||||||
## Basic Comparisons
|
|
||||||
|
|
||||||
There are two elements to the comparisons. First, a comparison string is a list
|
|
||||||
of comma separated and comparisons. These are then separated by || separated or
|
|
||||||
comparisons. For example, `">= 1.2, < 3.0.0 || >= 4.2.3"` is looking for a
|
|
||||||
comparison that's greater than or equal to 1.2 and less than 3.0.0 or is
|
|
||||||
greater than or equal to 4.2.3.
|
|
||||||
|
|
||||||
The basic comparisons are:
|
|
||||||
|
|
||||||
* `=`: equal (aliased to no operator)
|
|
||||||
* `!=`: not equal
|
|
||||||
* `>`: greater than
|
|
||||||
* `<`: less than
|
|
||||||
* `>=`: greater than or equal to
|
|
||||||
* `<=`: less than or equal to
|
|
||||||
|
|
||||||
## Working With Pre-release Versions
|
|
||||||
|
|
||||||
Pre-releases, for those not familiar with them, are used for software releases
|
|
||||||
prior to stable or generally available releases. Examples of pre-releases include
|
|
||||||
development, alpha, beta, and release candidate releases. A pre-release may be
|
|
||||||
a version such as `1.2.3-beta.1` while the stable release would be `1.2.3`. In the
|
|
||||||
order of precidence, pre-releases come before their associated releases. In this
|
|
||||||
example `1.2.3-beta.1 < 1.2.3`.
|
|
||||||
|
|
||||||
According to the Semantic Version specification pre-releases may not be
|
|
||||||
API compliant with their release counterpart. It says,
|
|
||||||
|
|
||||||
> A pre-release version indicates that the version is unstable and might not satisfy the intended compatibility requirements as denoted by its associated normal version.
|
|
||||||
|
|
||||||
SemVer comparisons without a pre-release comparator will skip pre-release versions.
|
|
||||||
For example, `>=1.2.3` will skip pre-releases when looking at a list of releases
|
|
||||||
while `>=1.2.3-0` will evaluate and find pre-releases.
|
|
||||||
|
|
||||||
The reason for the `0` as a pre-release version in the example comparison is
|
|
||||||
because pre-releases can only contain ASCII alphanumerics and hyphens (along with
|
|
||||||
`.` separators), per the spec. Sorting happens in ASCII sort order, again per the spec. The lowest character is a `0` in ASCII sort order (see an [ASCII Table](http://www.asciitable.com/))
|
|
||||||
|
|
||||||
Understanding ASCII sort ordering is important because A-Z comes before a-z. That
|
|
||||||
means `>=1.2.3-BETA` will return `1.2.3-alpha`. What you might expect from case
|
|
||||||
sensitivity doesn't apply here. This is due to ASCII sort ordering which is what
|
|
||||||
the spec specifies.
|
|
||||||
|
|
||||||
## Hyphen Range Comparisons
|
|
||||||
|
|
||||||
There are multiple methods to handle ranges and the first is hyphens ranges.
|
|
||||||
These look like:
|
|
||||||
|
|
||||||
* `1.2 - 1.4.5` which is equivalent to `>= 1.2, <= 1.4.5`
|
|
||||||
* `2.3.4 - 4.5` which is equivalent to `>= 2.3.4, <= 4.5`
|
|
||||||
|
|
||||||
## Wildcards In Comparisons
|
|
||||||
|
|
||||||
The `x`, `X`, and `*` characters can be used as a wildcard character. This works
|
|
||||||
for all comparison operators. When used on the `=` operator it falls
|
|
||||||
back to the pack level comparison (see tilde below). For example,
|
|
||||||
|
|
||||||
* `1.2.x` is equivalent to `>= 1.2.0, < 1.3.0`
|
|
||||||
* `>= 1.2.x` is equivalent to `>= 1.2.0`
|
|
||||||
* `<= 2.x` is equivalent to `< 3`
|
|
||||||
* `*` is equivalent to `>= 0.0.0`
|
|
||||||
|
|
||||||
## Tilde Range Comparisons (Patch)
|
|
||||||
|
|
||||||
The tilde (`~`) comparison operator is for patch level ranges when a minor
|
|
||||||
version is specified and major level changes when the minor number is missing.
|
|
||||||
For example,
|
|
||||||
|
|
||||||
* `~1.2.3` is equivalent to `>= 1.2.3, < 1.3.0`
|
|
||||||
* `~1` is equivalent to `>= 1, < 2`
|
|
||||||
* `~2.3` is equivalent to `>= 2.3, < 2.4`
|
|
||||||
* `~1.2.x` is equivalent to `>= 1.2.0, < 1.3.0`
|
|
||||||
* `~1.x` is equivalent to `>= 1, < 2`
|
|
||||||
|
|
||||||
## Caret Range Comparisons (Major)
|
|
||||||
|
|
||||||
The caret (`^`) comparison operator is for major level changes. This is useful
|
|
||||||
when comparisons of API versions as a major change is API breaking. For example,
|
|
||||||
|
|
||||||
* `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0`
|
|
||||||
* `^0.0.1` is equivalent to `>= 0.0.1, < 1.0.0`
|
|
||||||
* `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0`
|
|
||||||
* `^2.3` is equivalent to `>= 2.3, < 3`
|
|
||||||
* `^2.x` is equivalent to `>= 2.0.0, < 3`
|
|
||||||
|
|
||||||
# Validation
|
|
||||||
|
|
||||||
In addition to testing a version against a constraint, a version can be validated
|
|
||||||
against a constraint. When validation fails a slice of errors containing why a
|
|
||||||
version didn't meet the constraint is returned. For example,
|
|
||||||
|
|
||||||
```go
|
|
||||||
c, err := semver.NewConstraint("<= 1.2.3, >= 1.4")
|
|
||||||
if err != nil {
|
|
||||||
// Handle constraint not being parseable.
|
|
||||||
}
|
|
||||||
|
|
||||||
v, _ := semver.NewVersion("1.3")
|
|
||||||
if err != nil {
|
|
||||||
// Handle version not being parseable.
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate a version against a constraint.
|
|
||||||
a, msgs := c.Validate(v)
|
|
||||||
// a is false
|
|
||||||
for _, m := range msgs {
|
|
||||||
fmt.Println(m)
|
|
||||||
|
|
||||||
// Loops over the errors which would read
|
|
||||||
// "1.3 is greater than 1.2.3"
|
|
||||||
// "1.3 is less than 1.4"
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
# Fuzzing
|
|
||||||
|
|
||||||
[dvyukov/go-fuzz](https://github.com/dvyukov/go-fuzz) is used for fuzzing.
|
|
||||||
|
|
||||||
1. `go-fuzz-build`
|
|
||||||
2. `go-fuzz -workdir=fuzz`
|
|
||||||
|
|
||||||
# Contribute
|
|
||||||
|
|
||||||
If you find an issue or want to contribute please file an [issue](https://github.com/Masterminds/semver/issues)
|
|
||||||
or [create a pull request](https://github.com/Masterminds/semver/pulls).
|
|
|
@ -1,44 +0,0 @@
|
||||||
version: build-{build}.{branch}
|
|
||||||
|
|
||||||
clone_folder: C:\gopath\src\github.com\Masterminds\semver
|
|
||||||
shallow_clone: true
|
|
||||||
|
|
||||||
environment:
|
|
||||||
GOPATH: C:\gopath
|
|
||||||
|
|
||||||
platform:
|
|
||||||
- x64
|
|
||||||
|
|
||||||
install:
|
|
||||||
- go version
|
|
||||||
- go env
|
|
||||||
- go get -u gopkg.in/alecthomas/gometalinter.v1
|
|
||||||
- set PATH=%PATH%;%GOPATH%\bin
|
|
||||||
- gometalinter.v1.exe --install
|
|
||||||
|
|
||||||
build_script:
|
|
||||||
- go install -v ./...
|
|
||||||
|
|
||||||
test_script:
|
|
||||||
- "gometalinter.v1 \
|
|
||||||
--disable-all \
|
|
||||||
--enable deadcode \
|
|
||||||
--severity deadcode:error \
|
|
||||||
--enable gofmt \
|
|
||||||
--enable gosimple \
|
|
||||||
--enable ineffassign \
|
|
||||||
--enable misspell \
|
|
||||||
--enable vet \
|
|
||||||
--tests \
|
|
||||||
--vendor \
|
|
||||||
--deadline 60s \
|
|
||||||
./... || exit_code=1"
|
|
||||||
- "gometalinter.v1 \
|
|
||||||
--disable-all \
|
|
||||||
--enable golint \
|
|
||||||
--vendor \
|
|
||||||
--deadline 60s \
|
|
||||||
./... || :"
|
|
||||||
- go test -v
|
|
||||||
|
|
||||||
deploy: off
|
|
|
@ -1,24 +0,0 @@
|
||||||
package semver
|
|
||||||
|
|
||||||
// Collection is a collection of Version instances and implements the sort
|
|
||||||
// interface. See the sort package for more details.
|
|
||||||
// https://golang.org/pkg/sort/
|
|
||||||
type Collection []*Version
|
|
||||||
|
|
||||||
// Len returns the length of a collection. The number of Version instances
|
|
||||||
// on the slice.
|
|
||||||
func (c Collection) Len() int {
|
|
||||||
return len(c)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Less is needed for the sort interface to compare two Version objects on the
|
|
||||||
// slice. If checks if one is less than the other.
|
|
||||||
func (c Collection) Less(i, j int) bool {
|
|
||||||
return c[i].LessThan(c[j])
|
|
||||||
}
|
|
||||||
|
|
||||||
// Swap is needed for the sort interface to replace the Version objects
|
|
||||||
// at two different positions in the slice.
|
|
||||||
func (c Collection) Swap(i, j int) {
|
|
||||||
c[i], c[j] = c[j], c[i]
|
|
||||||
}
|
|
|
@ -1,423 +0,0 @@
|
||||||
package semver
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"regexp"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Constraints is one or more constraint that a semantic version can be
|
|
||||||
// checked against.
|
|
||||||
type Constraints struct {
|
|
||||||
constraints [][]*constraint
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewConstraint returns a Constraints instance that a Version instance can
|
|
||||||
// be checked against. If there is a parse error it will be returned.
|
|
||||||
func NewConstraint(c string) (*Constraints, error) {
|
|
||||||
|
|
||||||
// Rewrite - ranges into a comparison operation.
|
|
||||||
c = rewriteRange(c)
|
|
||||||
|
|
||||||
ors := strings.Split(c, "||")
|
|
||||||
or := make([][]*constraint, len(ors))
|
|
||||||
for k, v := range ors {
|
|
||||||
cs := strings.Split(v, ",")
|
|
||||||
result := make([]*constraint, len(cs))
|
|
||||||
for i, s := range cs {
|
|
||||||
pc, err := parseConstraint(s)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
result[i] = pc
|
|
||||||
}
|
|
||||||
or[k] = result
|
|
||||||
}
|
|
||||||
|
|
||||||
o := &Constraints{constraints: or}
|
|
||||||
return o, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check tests if a version satisfies the constraints.
|
|
||||||
func (cs Constraints) Check(v *Version) bool {
|
|
||||||
// loop over the ORs and check the inner ANDs
|
|
||||||
for _, o := range cs.constraints {
|
|
||||||
joy := true
|
|
||||||
for _, c := range o {
|
|
||||||
if !c.check(v) {
|
|
||||||
joy = false
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if joy {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate checks if a version satisfies a constraint. If not a slice of
|
|
||||||
// reasons for the failure are returned in addition to a bool.
|
|
||||||
func (cs Constraints) Validate(v *Version) (bool, []error) {
|
|
||||||
// loop over the ORs and check the inner ANDs
|
|
||||||
var e []error
|
|
||||||
|
|
||||||
// Capture the prerelease message only once. When it happens the first time
|
|
||||||
// this var is marked
|
|
||||||
var prerelesase bool
|
|
||||||
for _, o := range cs.constraints {
|
|
||||||
joy := true
|
|
||||||
for _, c := range o {
|
|
||||||
// Before running the check handle the case there the version is
|
|
||||||
// a prerelease and the check is not searching for prereleases.
|
|
||||||
if c.con.pre == "" && v.pre != "" {
|
|
||||||
if !prerelesase {
|
|
||||||
em := fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v)
|
|
||||||
e = append(e, em)
|
|
||||||
prerelesase = true
|
|
||||||
}
|
|
||||||
joy = false
|
|
||||||
|
|
||||||
} else {
|
|
||||||
|
|
||||||
if !c.check(v) {
|
|
||||||
em := fmt.Errorf(c.msg, v, c.orig)
|
|
||||||
e = append(e, em)
|
|
||||||
joy = false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if joy {
|
|
||||||
return true, []error{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return false, e
|
|
||||||
}
|
|
||||||
|
|
||||||
var constraintOps map[string]cfunc
|
|
||||||
var constraintMsg map[string]string
|
|
||||||
var constraintRegex *regexp.Regexp
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
constraintOps = map[string]cfunc{
|
|
||||||
"": constraintTildeOrEqual,
|
|
||||||
"=": constraintTildeOrEqual,
|
|
||||||
"!=": constraintNotEqual,
|
|
||||||
">": constraintGreaterThan,
|
|
||||||
"<": constraintLessThan,
|
|
||||||
">=": constraintGreaterThanEqual,
|
|
||||||
"=>": constraintGreaterThanEqual,
|
|
||||||
"<=": constraintLessThanEqual,
|
|
||||||
"=<": constraintLessThanEqual,
|
|
||||||
"~": constraintTilde,
|
|
||||||
"~>": constraintTilde,
|
|
||||||
"^": constraintCaret,
|
|
||||||
}
|
|
||||||
|
|
||||||
constraintMsg = map[string]string{
|
|
||||||
"": "%s is not equal to %s",
|
|
||||||
"=": "%s is not equal to %s",
|
|
||||||
"!=": "%s is equal to %s",
|
|
||||||
">": "%s is less than or equal to %s",
|
|
||||||
"<": "%s is greater than or equal to %s",
|
|
||||||
">=": "%s is less than %s",
|
|
||||||
"=>": "%s is less than %s",
|
|
||||||
"<=": "%s is greater than %s",
|
|
||||||
"=<": "%s is greater than %s",
|
|
||||||
"~": "%s does not have same major and minor version as %s",
|
|
||||||
"~>": "%s does not have same major and minor version as %s",
|
|
||||||
"^": "%s does not have same major version as %s",
|
|
||||||
}
|
|
||||||
|
|
||||||
ops := make([]string, 0, len(constraintOps))
|
|
||||||
for k := range constraintOps {
|
|
||||||
ops = append(ops, regexp.QuoteMeta(k))
|
|
||||||
}
|
|
||||||
|
|
||||||
constraintRegex = regexp.MustCompile(fmt.Sprintf(
|
|
||||||
`^\s*(%s)\s*(%s)\s*$`,
|
|
||||||
strings.Join(ops, "|"),
|
|
||||||
cvRegex))
|
|
||||||
|
|
||||||
constraintRangeRegex = regexp.MustCompile(fmt.Sprintf(
|
|
||||||
`\s*(%s)\s+-\s+(%s)\s*`,
|
|
||||||
cvRegex, cvRegex))
|
|
||||||
}
|
|
||||||
|
|
||||||
// An individual constraint
|
|
||||||
type constraint struct {
|
|
||||||
// The callback function for the restraint. It performs the logic for
|
|
||||||
// the constraint.
|
|
||||||
function cfunc
|
|
||||||
|
|
||||||
msg string
|
|
||||||
|
|
||||||
// The version used in the constraint check. For example, if a constraint
|
|
||||||
// is '<= 2.0.0' the con a version instance representing 2.0.0.
|
|
||||||
con *Version
|
|
||||||
|
|
||||||
// The original parsed version (e.g., 4.x from != 4.x)
|
|
||||||
orig string
|
|
||||||
|
|
||||||
// When an x is used as part of the version (e.g., 1.x)
|
|
||||||
minorDirty bool
|
|
||||||
dirty bool
|
|
||||||
patchDirty bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if a version meets the constraint
|
|
||||||
func (c *constraint) check(v *Version) bool {
|
|
||||||
return c.function(v, c)
|
|
||||||
}
|
|
||||||
|
|
||||||
type cfunc func(v *Version, c *constraint) bool
|
|
||||||
|
|
||||||
func parseConstraint(c string) (*constraint, error) {
|
|
||||||
m := constraintRegex.FindStringSubmatch(c)
|
|
||||||
if m == nil {
|
|
||||||
return nil, fmt.Errorf("improper constraint: %s", c)
|
|
||||||
}
|
|
||||||
|
|
||||||
ver := m[2]
|
|
||||||
orig := ver
|
|
||||||
minorDirty := false
|
|
||||||
patchDirty := false
|
|
||||||
dirty := false
|
|
||||||
if isX(m[3]) {
|
|
||||||
ver = "0.0.0"
|
|
||||||
dirty = true
|
|
||||||
} else if isX(strings.TrimPrefix(m[4], ".")) || m[4] == "" {
|
|
||||||
minorDirty = true
|
|
||||||
dirty = true
|
|
||||||
ver = fmt.Sprintf("%s.0.0%s", m[3], m[6])
|
|
||||||
} else if isX(strings.TrimPrefix(m[5], ".")) {
|
|
||||||
dirty = true
|
|
||||||
patchDirty = true
|
|
||||||
ver = fmt.Sprintf("%s%s.0%s", m[3], m[4], m[6])
|
|
||||||
}
|
|
||||||
|
|
||||||
con, err := NewVersion(ver)
|
|
||||||
if err != nil {
|
|
||||||
|
|
||||||
// The constraintRegex should catch any regex parsing errors. So,
|
|
||||||
// we should never get here.
|
|
||||||
return nil, errors.New("constraint Parser Error")
|
|
||||||
}
|
|
||||||
|
|
||||||
cs := &constraint{
|
|
||||||
function: constraintOps[m[1]],
|
|
||||||
msg: constraintMsg[m[1]],
|
|
||||||
con: con,
|
|
||||||
orig: orig,
|
|
||||||
minorDirty: minorDirty,
|
|
||||||
patchDirty: patchDirty,
|
|
||||||
dirty: dirty,
|
|
||||||
}
|
|
||||||
return cs, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Constraint functions
|
|
||||||
func constraintNotEqual(v *Version, c *constraint) bool {
|
|
||||||
if c.dirty {
|
|
||||||
|
|
||||||
// If there is a pre-release on the version but the constraint isn't looking
|
|
||||||
// for them assume that pre-releases are not compatible. See issue 21 for
|
|
||||||
// more details.
|
|
||||||
if v.Prerelease() != "" && c.con.Prerelease() == "" {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
if c.con.Major() != v.Major() {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if c.con.Minor() != v.Minor() && !c.minorDirty {
|
|
||||||
return true
|
|
||||||
} else if c.minorDirty {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return !v.Equal(c.con)
|
|
||||||
}
|
|
||||||
|
|
||||||
func constraintGreaterThan(v *Version, c *constraint) bool {
|
|
||||||
|
|
||||||
// If there is a pre-release on the version but the constraint isn't looking
|
|
||||||
// for them assume that pre-releases are not compatible. See issue 21 for
|
|
||||||
// more details.
|
|
||||||
if v.Prerelease() != "" && c.con.Prerelease() == "" {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return v.Compare(c.con) == 1
|
|
||||||
}
|
|
||||||
|
|
||||||
func constraintLessThan(v *Version, c *constraint) bool {
|
|
||||||
// If there is a pre-release on the version but the constraint isn't looking
|
|
||||||
// for them assume that pre-releases are not compatible. See issue 21 for
|
|
||||||
// more details.
|
|
||||||
if v.Prerelease() != "" && c.con.Prerelease() == "" {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
if !c.dirty {
|
|
||||||
return v.Compare(c.con) < 0
|
|
||||||
}
|
|
||||||
|
|
||||||
if v.Major() > c.con.Major() {
|
|
||||||
return false
|
|
||||||
} else if v.Minor() > c.con.Minor() && !c.minorDirty {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func constraintGreaterThanEqual(v *Version, c *constraint) bool {
|
|
||||||
|
|
||||||
// If there is a pre-release on the version but the constraint isn't looking
|
|
||||||
// for them assume that pre-releases are not compatible. See issue 21 for
|
|
||||||
// more details.
|
|
||||||
if v.Prerelease() != "" && c.con.Prerelease() == "" {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return v.Compare(c.con) >= 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func constraintLessThanEqual(v *Version, c *constraint) bool {
|
|
||||||
// If there is a pre-release on the version but the constraint isn't looking
|
|
||||||
// for them assume that pre-releases are not compatible. See issue 21 for
|
|
||||||
// more details.
|
|
||||||
if v.Prerelease() != "" && c.con.Prerelease() == "" {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
if !c.dirty {
|
|
||||||
return v.Compare(c.con) <= 0
|
|
||||||
}
|
|
||||||
|
|
||||||
if v.Major() > c.con.Major() {
|
|
||||||
return false
|
|
||||||
} else if v.Minor() > c.con.Minor() && !c.minorDirty {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// ~*, ~>* --> >= 0.0.0 (any)
|
|
||||||
// ~2, ~2.x, ~2.x.x, ~>2, ~>2.x ~>2.x.x --> >=2.0.0, <3.0.0
|
|
||||||
// ~2.0, ~2.0.x, ~>2.0, ~>2.0.x --> >=2.0.0, <2.1.0
|
|
||||||
// ~1.2, ~1.2.x, ~>1.2, ~>1.2.x --> >=1.2.0, <1.3.0
|
|
||||||
// ~1.2.3, ~>1.2.3 --> >=1.2.3, <1.3.0
|
|
||||||
// ~1.2.0, ~>1.2.0 --> >=1.2.0, <1.3.0
|
|
||||||
func constraintTilde(v *Version, c *constraint) bool {
|
|
||||||
// If there is a pre-release on the version but the constraint isn't looking
|
|
||||||
// for them assume that pre-releases are not compatible. See issue 21 for
|
|
||||||
// more details.
|
|
||||||
if v.Prerelease() != "" && c.con.Prerelease() == "" {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
if v.LessThan(c.con) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// ~0.0.0 is a special case where all constraints are accepted. It's
|
|
||||||
// equivalent to >= 0.0.0.
|
|
||||||
if c.con.Major() == 0 && c.con.Minor() == 0 && c.con.Patch() == 0 &&
|
|
||||||
!c.minorDirty && !c.patchDirty {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
if v.Major() != c.con.Major() {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
if v.Minor() != c.con.Minor() && !c.minorDirty {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// When there is a .x (dirty) status it automatically opts in to ~. Otherwise
|
|
||||||
// it's a straight =
|
|
||||||
func constraintTildeOrEqual(v *Version, c *constraint) bool {
|
|
||||||
// If there is a pre-release on the version but the constraint isn't looking
|
|
||||||
// for them assume that pre-releases are not compatible. See issue 21 for
|
|
||||||
// more details.
|
|
||||||
if v.Prerelease() != "" && c.con.Prerelease() == "" {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
if c.dirty {
|
|
||||||
c.msg = constraintMsg["~"]
|
|
||||||
return constraintTilde(v, c)
|
|
||||||
}
|
|
||||||
|
|
||||||
return v.Equal(c.con)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ^* --> (any)
|
|
||||||
// ^2, ^2.x, ^2.x.x --> >=2.0.0, <3.0.0
|
|
||||||
// ^2.0, ^2.0.x --> >=2.0.0, <3.0.0
|
|
||||||
// ^1.2, ^1.2.x --> >=1.2.0, <2.0.0
|
|
||||||
// ^1.2.3 --> >=1.2.3, <2.0.0
|
|
||||||
// ^1.2.0 --> >=1.2.0, <2.0.0
|
|
||||||
func constraintCaret(v *Version, c *constraint) bool {
|
|
||||||
// If there is a pre-release on the version but the constraint isn't looking
|
|
||||||
// for them assume that pre-releases are not compatible. See issue 21 for
|
|
||||||
// more details.
|
|
||||||
if v.Prerelease() != "" && c.con.Prerelease() == "" {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
if v.LessThan(c.con) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
if v.Major() != c.con.Major() {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
var constraintRangeRegex *regexp.Regexp
|
|
||||||
|
|
||||||
const cvRegex string = `v?([0-9|x|X|\*]+)(\.[0-9|x|X|\*]+)?(\.[0-9|x|X|\*]+)?` +
|
|
||||||
`(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` +
|
|
||||||
`(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?`
|
|
||||||
|
|
||||||
func isX(x string) bool {
|
|
||||||
switch x {
|
|
||||||
case "x", "*", "X":
|
|
||||||
return true
|
|
||||||
default:
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func rewriteRange(i string) string {
|
|
||||||
m := constraintRangeRegex.FindAllStringSubmatch(i, -1)
|
|
||||||
if m == nil {
|
|
||||||
return i
|
|
||||||
}
|
|
||||||
o := i
|
|
||||||
for _, v := range m {
|
|
||||||
t := fmt.Sprintf(">= %s, <= %s", v[1], v[11])
|
|
||||||
o = strings.Replace(o, v[0], t, 1)
|
|
||||||
}
|
|
||||||
|
|
||||||
return o
|
|
||||||
}
|
|
|
@ -1,115 +0,0 @@
|
||||||
/*
|
|
||||||
Package semver provides the ability to work with Semantic Versions (http://semver.org) in Go.
|
|
||||||
|
|
||||||
Specifically it provides the ability to:
|
|
||||||
|
|
||||||
* Parse semantic versions
|
|
||||||
* Sort semantic versions
|
|
||||||
* Check if a semantic version fits within a set of constraints
|
|
||||||
* Optionally work with a `v` prefix
|
|
||||||
|
|
||||||
Parsing Semantic Versions
|
|
||||||
|
|
||||||
To parse a semantic version use the `NewVersion` function. For example,
|
|
||||||
|
|
||||||
v, err := semver.NewVersion("1.2.3-beta.1+build345")
|
|
||||||
|
|
||||||
If there is an error the version wasn't parseable. The version object has methods
|
|
||||||
to get the parts of the version, compare it to other versions, convert the
|
|
||||||
version back into a string, and get the original string. For more details
|
|
||||||
please see the documentation at https://godoc.org/github.com/Masterminds/semver.
|
|
||||||
|
|
||||||
Sorting Semantic Versions
|
|
||||||
|
|
||||||
A set of versions can be sorted using the `sort` package from the standard library.
|
|
||||||
For example,
|
|
||||||
|
|
||||||
raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",}
|
|
||||||
vs := make([]*semver.Version, len(raw))
|
|
||||||
for i, r := range raw {
|
|
||||||
v, err := semver.NewVersion(r)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Error parsing version: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
vs[i] = v
|
|
||||||
}
|
|
||||||
|
|
||||||
sort.Sort(semver.Collection(vs))
|
|
||||||
|
|
||||||
Checking Version Constraints
|
|
||||||
|
|
||||||
Checking a version against version constraints is one of the most featureful
|
|
||||||
parts of the package.
|
|
||||||
|
|
||||||
c, err := semver.NewConstraint(">= 1.2.3")
|
|
||||||
if err != nil {
|
|
||||||
// Handle constraint not being parseable.
|
|
||||||
}
|
|
||||||
|
|
||||||
v, err := semver.NewVersion("1.3")
|
|
||||||
if err != nil {
|
|
||||||
// Handle version not being parseable.
|
|
||||||
}
|
|
||||||
// Check if the version meets the constraints. The a variable will be true.
|
|
||||||
a := c.Check(v)
|
|
||||||
|
|
||||||
Basic Comparisons
|
|
||||||
|
|
||||||
There are two elements to the comparisons. First, a comparison string is a list
|
|
||||||
of comma separated and comparisons. These are then separated by || separated or
|
|
||||||
comparisons. For example, `">= 1.2, < 3.0.0 || >= 4.2.3"` is looking for a
|
|
||||||
comparison that's greater than or equal to 1.2 and less than 3.0.0 or is
|
|
||||||
greater than or equal to 4.2.3.
|
|
||||||
|
|
||||||
The basic comparisons are:
|
|
||||||
|
|
||||||
* `=`: equal (aliased to no operator)
|
|
||||||
* `!=`: not equal
|
|
||||||
* `>`: greater than
|
|
||||||
* `<`: less than
|
|
||||||
* `>=`: greater than or equal to
|
|
||||||
* `<=`: less than or equal to
|
|
||||||
|
|
||||||
Hyphen Range Comparisons
|
|
||||||
|
|
||||||
There are multiple methods to handle ranges and the first is hyphens ranges.
|
|
||||||
These look like:
|
|
||||||
|
|
||||||
* `1.2 - 1.4.5` which is equivalent to `>= 1.2, <= 1.4.5`
|
|
||||||
* `2.3.4 - 4.5` which is equivalent to `>= 2.3.4, <= 4.5`
|
|
||||||
|
|
||||||
Wildcards In Comparisons
|
|
||||||
|
|
||||||
The `x`, `X`, and `*` characters can be used as a wildcard character. This works
|
|
||||||
for all comparison operators. When used on the `=` operator it falls
|
|
||||||
back to the pack level comparison (see tilde below). For example,
|
|
||||||
|
|
||||||
* `1.2.x` is equivalent to `>= 1.2.0, < 1.3.0`
|
|
||||||
* `>= 1.2.x` is equivalent to `>= 1.2.0`
|
|
||||||
* `<= 2.x` is equivalent to `<= 3`
|
|
||||||
* `*` is equivalent to `>= 0.0.0`
|
|
||||||
|
|
||||||
Tilde Range Comparisons (Patch)
|
|
||||||
|
|
||||||
The tilde (`~`) comparison operator is for patch level ranges when a minor
|
|
||||||
version is specified and major level changes when the minor number is missing.
|
|
||||||
For example,
|
|
||||||
|
|
||||||
* `~1.2.3` is equivalent to `>= 1.2.3, < 1.3.0`
|
|
||||||
* `~1` is equivalent to `>= 1, < 2`
|
|
||||||
* `~2.3` is equivalent to `>= 2.3, < 2.4`
|
|
||||||
* `~1.2.x` is equivalent to `>= 1.2.0, < 1.3.0`
|
|
||||||
* `~1.x` is equivalent to `>= 1, < 2`
|
|
||||||
|
|
||||||
Caret Range Comparisons (Major)
|
|
||||||
|
|
||||||
The caret (`^`) comparison operator is for major level changes. This is useful
|
|
||||||
when comparisons of API versions as a major change is API breaking. For example,
|
|
||||||
|
|
||||||
* `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0`
|
|
||||||
* `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0`
|
|
||||||
* `^2.3` is equivalent to `>= 2.3, < 3`
|
|
||||||
* `^2.x` is equivalent to `>= 2.0.0, < 3`
|
|
||||||
*/
|
|
||||||
package semver
|
|
|
@ -1,425 +0,0 @@
|
||||||
package semver
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"regexp"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// The compiled version of the regex created at init() is cached here so it
|
|
||||||
// only needs to be created once.
|
|
||||||
var versionRegex *regexp.Regexp
|
|
||||||
var validPrereleaseRegex *regexp.Regexp
|
|
||||||
|
|
||||||
var (
|
|
||||||
// ErrInvalidSemVer is returned a version is found to be invalid when
|
|
||||||
// being parsed.
|
|
||||||
ErrInvalidSemVer = errors.New("Invalid Semantic Version")
|
|
||||||
|
|
||||||
// ErrInvalidMetadata is returned when the metadata is an invalid format
|
|
||||||
ErrInvalidMetadata = errors.New("Invalid Metadata string")
|
|
||||||
|
|
||||||
// ErrInvalidPrerelease is returned when the pre-release is an invalid format
|
|
||||||
ErrInvalidPrerelease = errors.New("Invalid Prerelease string")
|
|
||||||
)
|
|
||||||
|
|
||||||
// SemVerRegex is the regular expression used to parse a semantic version.
|
|
||||||
const SemVerRegex string = `v?([0-9]+)(\.[0-9]+)?(\.[0-9]+)?` +
|
|
||||||
`(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` +
|
|
||||||
`(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?`
|
|
||||||
|
|
||||||
// ValidPrerelease is the regular expression which validates
|
|
||||||
// both prerelease and metadata values.
|
|
||||||
const ValidPrerelease string = `^([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*)$`
|
|
||||||
|
|
||||||
// Version represents a single semantic version.
|
|
||||||
type Version struct {
|
|
||||||
major, minor, patch int64
|
|
||||||
pre string
|
|
||||||
metadata string
|
|
||||||
original string
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
versionRegex = regexp.MustCompile("^" + SemVerRegex + "$")
|
|
||||||
validPrereleaseRegex = regexp.MustCompile(ValidPrerelease)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewVersion parses a given version and returns an instance of Version or
|
|
||||||
// an error if unable to parse the version.
|
|
||||||
func NewVersion(v string) (*Version, error) {
|
|
||||||
m := versionRegex.FindStringSubmatch(v)
|
|
||||||
if m == nil {
|
|
||||||
return nil, ErrInvalidSemVer
|
|
||||||
}
|
|
||||||
|
|
||||||
sv := &Version{
|
|
||||||
metadata: m[8],
|
|
||||||
pre: m[5],
|
|
||||||
original: v,
|
|
||||||
}
|
|
||||||
|
|
||||||
var temp int64
|
|
||||||
temp, err := strconv.ParseInt(m[1], 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("Error parsing version segment: %s", err)
|
|
||||||
}
|
|
||||||
sv.major = temp
|
|
||||||
|
|
||||||
if m[2] != "" {
|
|
||||||
temp, err = strconv.ParseInt(strings.TrimPrefix(m[2], "."), 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("Error parsing version segment: %s", err)
|
|
||||||
}
|
|
||||||
sv.minor = temp
|
|
||||||
} else {
|
|
||||||
sv.minor = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
if m[3] != "" {
|
|
||||||
temp, err = strconv.ParseInt(strings.TrimPrefix(m[3], "."), 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("Error parsing version segment: %s", err)
|
|
||||||
}
|
|
||||||
sv.patch = temp
|
|
||||||
} else {
|
|
||||||
sv.patch = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
return sv, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// MustParse parses a given version and panics on error.
|
|
||||||
func MustParse(v string) *Version {
|
|
||||||
sv, err := NewVersion(v)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return sv
|
|
||||||
}
|
|
||||||
|
|
||||||
// String converts a Version object to a string.
|
|
||||||
// Note, if the original version contained a leading v this version will not.
|
|
||||||
// See the Original() method to retrieve the original value. Semantic Versions
|
|
||||||
// don't contain a leading v per the spec. Instead it's optional on
|
|
||||||
// implementation.
|
|
||||||
func (v *Version) String() string {
|
|
||||||
var buf bytes.Buffer
|
|
||||||
|
|
||||||
fmt.Fprintf(&buf, "%d.%d.%d", v.major, v.minor, v.patch)
|
|
||||||
if v.pre != "" {
|
|
||||||
fmt.Fprintf(&buf, "-%s", v.pre)
|
|
||||||
}
|
|
||||||
if v.metadata != "" {
|
|
||||||
fmt.Fprintf(&buf, "+%s", v.metadata)
|
|
||||||
}
|
|
||||||
|
|
||||||
return buf.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Original returns the original value passed in to be parsed.
|
|
||||||
func (v *Version) Original() string {
|
|
||||||
return v.original
|
|
||||||
}
|
|
||||||
|
|
||||||
// Major returns the major version.
|
|
||||||
func (v *Version) Major() int64 {
|
|
||||||
return v.major
|
|
||||||
}
|
|
||||||
|
|
||||||
// Minor returns the minor version.
|
|
||||||
func (v *Version) Minor() int64 {
|
|
||||||
return v.minor
|
|
||||||
}
|
|
||||||
|
|
||||||
// Patch returns the patch version.
|
|
||||||
func (v *Version) Patch() int64 {
|
|
||||||
return v.patch
|
|
||||||
}
|
|
||||||
|
|
||||||
// Prerelease returns the pre-release version.
|
|
||||||
func (v *Version) Prerelease() string {
|
|
||||||
return v.pre
|
|
||||||
}
|
|
||||||
|
|
||||||
// Metadata returns the metadata on the version.
|
|
||||||
func (v *Version) Metadata() string {
|
|
||||||
return v.metadata
|
|
||||||
}
|
|
||||||
|
|
||||||
// originalVPrefix returns the original 'v' prefix if any.
|
|
||||||
func (v *Version) originalVPrefix() string {
|
|
||||||
|
|
||||||
// Note, only lowercase v is supported as a prefix by the parser.
|
|
||||||
if v.original != "" && v.original[:1] == "v" {
|
|
||||||
return v.original[:1]
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// IncPatch produces the next patch version.
|
|
||||||
// If the current version does not have prerelease/metadata information,
|
|
||||||
// it unsets metadata and prerelease values, increments patch number.
|
|
||||||
// If the current version has any of prerelease or metadata information,
|
|
||||||
// it unsets both values and keeps curent patch value
|
|
||||||
func (v Version) IncPatch() Version {
|
|
||||||
vNext := v
|
|
||||||
// according to http://semver.org/#spec-item-9
|
|
||||||
// Pre-release versions have a lower precedence than the associated normal version.
|
|
||||||
// according to http://semver.org/#spec-item-10
|
|
||||||
// Build metadata SHOULD be ignored when determining version precedence.
|
|
||||||
if v.pre != "" {
|
|
||||||
vNext.metadata = ""
|
|
||||||
vNext.pre = ""
|
|
||||||
} else {
|
|
||||||
vNext.metadata = ""
|
|
||||||
vNext.pre = ""
|
|
||||||
vNext.patch = v.patch + 1
|
|
||||||
}
|
|
||||||
vNext.original = v.originalVPrefix() + "" + vNext.String()
|
|
||||||
return vNext
|
|
||||||
}
|
|
||||||
|
|
||||||
// IncMinor produces the next minor version.
|
|
||||||
// Sets patch to 0.
|
|
||||||
// Increments minor number.
|
|
||||||
// Unsets metadata.
|
|
||||||
// Unsets prerelease status.
|
|
||||||
func (v Version) IncMinor() Version {
|
|
||||||
vNext := v
|
|
||||||
vNext.metadata = ""
|
|
||||||
vNext.pre = ""
|
|
||||||
vNext.patch = 0
|
|
||||||
vNext.minor = v.minor + 1
|
|
||||||
vNext.original = v.originalVPrefix() + "" + vNext.String()
|
|
||||||
return vNext
|
|
||||||
}
|
|
||||||
|
|
||||||
// IncMajor produces the next major version.
|
|
||||||
// Sets patch to 0.
|
|
||||||
// Sets minor to 0.
|
|
||||||
// Increments major number.
|
|
||||||
// Unsets metadata.
|
|
||||||
// Unsets prerelease status.
|
|
||||||
func (v Version) IncMajor() Version {
|
|
||||||
vNext := v
|
|
||||||
vNext.metadata = ""
|
|
||||||
vNext.pre = ""
|
|
||||||
vNext.patch = 0
|
|
||||||
vNext.minor = 0
|
|
||||||
vNext.major = v.major + 1
|
|
||||||
vNext.original = v.originalVPrefix() + "" + vNext.String()
|
|
||||||
return vNext
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetPrerelease defines the prerelease value.
|
|
||||||
// Value must not include the required 'hypen' prefix.
|
|
||||||
func (v Version) SetPrerelease(prerelease string) (Version, error) {
|
|
||||||
vNext := v
|
|
||||||
if len(prerelease) > 0 && !validPrereleaseRegex.MatchString(prerelease) {
|
|
||||||
return vNext, ErrInvalidPrerelease
|
|
||||||
}
|
|
||||||
vNext.pre = prerelease
|
|
||||||
vNext.original = v.originalVPrefix() + "" + vNext.String()
|
|
||||||
return vNext, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetMetadata defines metadata value.
|
|
||||||
// Value must not include the required 'plus' prefix.
|
|
||||||
func (v Version) SetMetadata(metadata string) (Version, error) {
|
|
||||||
vNext := v
|
|
||||||
if len(metadata) > 0 && !validPrereleaseRegex.MatchString(metadata) {
|
|
||||||
return vNext, ErrInvalidMetadata
|
|
||||||
}
|
|
||||||
vNext.metadata = metadata
|
|
||||||
vNext.original = v.originalVPrefix() + "" + vNext.String()
|
|
||||||
return vNext, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// LessThan tests if one version is less than another one.
|
|
||||||
func (v *Version) LessThan(o *Version) bool {
|
|
||||||
return v.Compare(o) < 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// GreaterThan tests if one version is greater than another one.
|
|
||||||
func (v *Version) GreaterThan(o *Version) bool {
|
|
||||||
return v.Compare(o) > 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Equal tests if two versions are equal to each other.
|
|
||||||
// Note, versions can be equal with different metadata since metadata
|
|
||||||
// is not considered part of the comparable version.
|
|
||||||
func (v *Version) Equal(o *Version) bool {
|
|
||||||
return v.Compare(o) == 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compare compares this version to another one. It returns -1, 0, or 1 if
|
|
||||||
// the version smaller, equal, or larger than the other version.
|
|
||||||
//
|
|
||||||
// Versions are compared by X.Y.Z. Build metadata is ignored. Prerelease is
|
|
||||||
// lower than the version without a prerelease.
|
|
||||||
func (v *Version) Compare(o *Version) int {
|
|
||||||
// Compare the major, minor, and patch version for differences. If a
|
|
||||||
// difference is found return the comparison.
|
|
||||||
if d := compareSegment(v.Major(), o.Major()); d != 0 {
|
|
||||||
return d
|
|
||||||
}
|
|
||||||
if d := compareSegment(v.Minor(), o.Minor()); d != 0 {
|
|
||||||
return d
|
|
||||||
}
|
|
||||||
if d := compareSegment(v.Patch(), o.Patch()); d != 0 {
|
|
||||||
return d
|
|
||||||
}
|
|
||||||
|
|
||||||
// At this point the major, minor, and patch versions are the same.
|
|
||||||
ps := v.pre
|
|
||||||
po := o.Prerelease()
|
|
||||||
|
|
||||||
if ps == "" && po == "" {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
if ps == "" {
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
if po == "" {
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
|
|
||||||
return comparePrerelease(ps, po)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalJSON implements JSON.Unmarshaler interface.
|
|
||||||
func (v *Version) UnmarshalJSON(b []byte) error {
|
|
||||||
var s string
|
|
||||||
if err := json.Unmarshal(b, &s); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
temp, err := NewVersion(s)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
v.major = temp.major
|
|
||||||
v.minor = temp.minor
|
|
||||||
v.patch = temp.patch
|
|
||||||
v.pre = temp.pre
|
|
||||||
v.metadata = temp.metadata
|
|
||||||
v.original = temp.original
|
|
||||||
temp = nil
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// MarshalJSON implements JSON.Marshaler interface.
|
|
||||||
func (v *Version) MarshalJSON() ([]byte, error) {
|
|
||||||
return json.Marshal(v.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
func compareSegment(v, o int64) int {
|
|
||||||
if v < o {
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
if v > o {
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func comparePrerelease(v, o string) int {
|
|
||||||
|
|
||||||
// split the prelease versions by their part. The separator, per the spec,
|
|
||||||
// is a .
|
|
||||||
sparts := strings.Split(v, ".")
|
|
||||||
oparts := strings.Split(o, ".")
|
|
||||||
|
|
||||||
// Find the longer length of the parts to know how many loop iterations to
|
|
||||||
// go through.
|
|
||||||
slen := len(sparts)
|
|
||||||
olen := len(oparts)
|
|
||||||
|
|
||||||
l := slen
|
|
||||||
if olen > slen {
|
|
||||||
l = olen
|
|
||||||
}
|
|
||||||
|
|
||||||
// Iterate over each part of the prereleases to compare the differences.
|
|
||||||
for i := 0; i < l; i++ {
|
|
||||||
// Since the lentgh of the parts can be different we need to create
|
|
||||||
// a placeholder. This is to avoid out of bounds issues.
|
|
||||||
stemp := ""
|
|
||||||
if i < slen {
|
|
||||||
stemp = sparts[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
otemp := ""
|
|
||||||
if i < olen {
|
|
||||||
otemp = oparts[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
d := comparePrePart(stemp, otemp)
|
|
||||||
if d != 0 {
|
|
||||||
return d
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reaching here means two versions are of equal value but have different
|
|
||||||
// metadata (the part following a +). They are not identical in string form
|
|
||||||
// but the version comparison finds them to be equal.
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func comparePrePart(s, o string) int {
|
|
||||||
// Fastpath if they are equal
|
|
||||||
if s == o {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// When s or o are empty we can use the other in an attempt to determine
|
|
||||||
// the response.
|
|
||||||
if s == "" {
|
|
||||||
if o != "" {
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
|
|
||||||
if o == "" {
|
|
||||||
if s != "" {
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
|
|
||||||
// When comparing strings "99" is greater than "103". To handle
|
|
||||||
// cases like this we need to detect numbers and compare them. According
|
|
||||||
// to the semver spec, numbers are always positive. If there is a - at the
|
|
||||||
// start like -99 this is to be evaluated as an alphanum. numbers always
|
|
||||||
// have precedence over alphanum. Parsing as Uints because negative numbers
|
|
||||||
// are ignored.
|
|
||||||
|
|
||||||
oi, n1 := strconv.ParseUint(o, 10, 64)
|
|
||||||
si, n2 := strconv.ParseUint(s, 10, 64)
|
|
||||||
|
|
||||||
// The case where both are strings compare the strings
|
|
||||||
if n1 != nil && n2 != nil {
|
|
||||||
if s > o {
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
return -1
|
|
||||||
} else if n1 != nil {
|
|
||||||
// o is a string and s is a number
|
|
||||||
return -1
|
|
||||||
} else if n2 != nil {
|
|
||||||
// s is a string and o is a number
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
// Both are numbers
|
|
||||||
if si > oi {
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
return -1
|
|
||||||
|
|
||||||
}
|
|
|
@ -1,10 +0,0 @@
|
||||||
// +build gofuzz
|
|
||||||
|
|
||||||
package semver
|
|
||||||
|
|
||||||
func Fuzz(data []byte) int {
|
|
||||||
if _, err := NewVersion(string(data)); err != nil {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
return 1
|
|
||||||
}
|
|
|
@ -1,14 +0,0 @@
|
||||||
# Binaries for programs and plugins
|
|
||||||
*.exe
|
|
||||||
*.exe~
|
|
||||||
*.dll
|
|
||||||
*.so
|
|
||||||
*.dylib
|
|
||||||
|
|
||||||
# Test binary, build with `go test -c`
|
|
||||||
*.test
|
|
||||||
|
|
||||||
# Output of the go coverage tool, specifically when used with LiteIDE
|
|
||||||
*.out
|
|
||||||
|
|
||||||
.idea
|
|
|
@ -1,674 +0,0 @@
|
||||||
GNU GENERAL PUBLIC LICENSE
|
|
||||||
Version 3, 29 June 2007
|
|
||||||
|
|
||||||
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
|
|
||||||
Everyone is permitted to copy and distribute verbatim copies
|
|
||||||
of this license document, but changing it is not allowed.
|
|
||||||
|
|
||||||
Preamble
|
|
||||||
|
|
||||||
The GNU General Public License is a free, copyleft license for
|
|
||||||
software and other kinds of works.
|
|
||||||
|
|
||||||
The licenses for most software and other practical works are designed
|
|
||||||
to take away your freedom to share and change the works. By contrast,
|
|
||||||
the GNU General Public License is intended to guarantee your freedom to
|
|
||||||
share and change all versions of a program--to make sure it remains free
|
|
||||||
software for all its users. We, the Free Software Foundation, use the
|
|
||||||
GNU General Public License for most of our software; it applies also to
|
|
||||||
any other work released this way by its authors. You can apply it to
|
|
||||||
your programs, too.
|
|
||||||
|
|
||||||
When we speak of free software, we are referring to freedom, not
|
|
||||||
price. Our General Public Licenses are designed to make sure that you
|
|
||||||
have the freedom to distribute copies of free software (and charge for
|
|
||||||
them if you wish), that you receive source code or can get it if you
|
|
||||||
want it, that you can change the software or use pieces of it in new
|
|
||||||
free programs, and that you know you can do these things.
|
|
||||||
|
|
||||||
To protect your rights, we need to prevent others from denying you
|
|
||||||
these rights or asking you to surrender the rights. Therefore, you have
|
|
||||||
certain responsibilities if you distribute copies of the software, or if
|
|
||||||
you modify it: responsibilities to respect the freedom of others.
|
|
||||||
|
|
||||||
For example, if you distribute copies of such a program, whether
|
|
||||||
gratis or for a fee, you must pass on to the recipients the same
|
|
||||||
freedoms that you received. You must make sure that they, too, receive
|
|
||||||
or can get the source code. And you must show them these terms so they
|
|
||||||
know their rights.
|
|
||||||
|
|
||||||
Developers that use the GNU GPL protect your rights with two steps:
|
|
||||||
(1) assert copyright on the software, and (2) offer you this License
|
|
||||||
giving you legal permission to copy, distribute and/or modify it.
|
|
||||||
|
|
||||||
For the developers' and authors' protection, the GPL clearly explains
|
|
||||||
that there is no warranty for this free software. For both users' and
|
|
||||||
authors' sake, the GPL requires that modified versions be marked as
|
|
||||||
changed, so that their problems will not be attributed erroneously to
|
|
||||||
authors of previous versions.
|
|
||||||
|
|
||||||
Some devices are designed to deny users access to install or run
|
|
||||||
modified versions of the software inside them, although the manufacturer
|
|
||||||
can do so. This is fundamentally incompatible with the aim of
|
|
||||||
protecting users' freedom to change the software. The systematic
|
|
||||||
pattern of such abuse occurs in the area of products for individuals to
|
|
||||||
use, which is precisely where it is most unacceptable. Therefore, we
|
|
||||||
have designed this version of the GPL to prohibit the practice for those
|
|
||||||
products. If such problems arise substantially in other domains, we
|
|
||||||
stand ready to extend this provision to those domains in future versions
|
|
||||||
of the GPL, as needed to protect the freedom of users.
|
|
||||||
|
|
||||||
Finally, every program is threatened constantly by software patents.
|
|
||||||
States should not allow patents to restrict development and use of
|
|
||||||
software on general-purpose computers, but in those that do, we wish to
|
|
||||||
avoid the special danger that patents applied to a free program could
|
|
||||||
make it effectively proprietary. To prevent this, the GPL assures that
|
|
||||||
patents cannot be used to render the program non-free.
|
|
||||||
|
|
||||||
The precise terms and conditions for copying, distribution and
|
|
||||||
modification follow.
|
|
||||||
|
|
||||||
TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
0. Definitions.
|
|
||||||
|
|
||||||
"This License" refers to version 3 of the GNU General Public License.
|
|
||||||
|
|
||||||
"Copyright" also means copyright-like laws that apply to other kinds of
|
|
||||||
works, such as semiconductor masks.
|
|
||||||
|
|
||||||
"The Program" refers to any copyrightable work licensed under this
|
|
||||||
License. Each licensee is addressed as "you". "Licensees" and
|
|
||||||
"recipients" may be individuals or organizations.
|
|
||||||
|
|
||||||
To "modify" a work means to copy from or adapt all or part of the work
|
|
||||||
in a fashion requiring copyright permission, other than the making of an
|
|
||||||
exact copy. The resulting work is called a "modified version" of the
|
|
||||||
earlier work or a work "based on" the earlier work.
|
|
||||||
|
|
||||||
A "covered work" means either the unmodified Program or a work based
|
|
||||||
on the Program.
|
|
||||||
|
|
||||||
To "propagate" a work means to do anything with it that, without
|
|
||||||
permission, would make you directly or secondarily liable for
|
|
||||||
infringement under applicable copyright law, except executing it on a
|
|
||||||
computer or modifying a private copy. Propagation includes copying,
|
|
||||||
distribution (with or without modification), making available to the
|
|
||||||
public, and in some countries other activities as well.
|
|
||||||
|
|
||||||
To "convey" a work means any kind of propagation that enables other
|
|
||||||
parties to make or receive copies. Mere interaction with a user through
|
|
||||||
a computer network, with no transfer of a copy, is not conveying.
|
|
||||||
|
|
||||||
An interactive user interface displays "Appropriate Legal Notices"
|
|
||||||
to the extent that it includes a convenient and prominently visible
|
|
||||||
feature that (1) displays an appropriate copyright notice, and (2)
|
|
||||||
tells the user that there is no warranty for the work (except to the
|
|
||||||
extent that warranties are provided), that licensees may convey the
|
|
||||||
work under this License, and how to view a copy of this License. If
|
|
||||||
the interface presents a list of user commands or options, such as a
|
|
||||||
menu, a prominent item in the list meets this criterion.
|
|
||||||
|
|
||||||
1. Source Code.
|
|
||||||
|
|
||||||
The "source code" for a work means the preferred form of the work
|
|
||||||
for making modifications to it. "Object code" means any non-source
|
|
||||||
form of a work.
|
|
||||||
|
|
||||||
A "Standard Interface" means an interface that either is an official
|
|
||||||
standard defined by a recognized standards body, or, in the case of
|
|
||||||
interfaces specified for a particular programming language, one that
|
|
||||||
is widely used among developers working in that language.
|
|
||||||
|
|
||||||
The "System Libraries" of an executable work include anything, other
|
|
||||||
than the work as a whole, that (a) is included in the normal form of
|
|
||||||
packaging a Major Component, but which is not part of that Major
|
|
||||||
Component, and (b) serves only to enable use of the work with that
|
|
||||||
Major Component, or to implement a Standard Interface for which an
|
|
||||||
implementation is available to the public in source code form. A
|
|
||||||
"Major Component", in this context, means a major essential component
|
|
||||||
(kernel, window system, and so on) of the specific operating system
|
|
||||||
(if any) on which the executable work runs, or a compiler used to
|
|
||||||
produce the work, or an object code interpreter used to run it.
|
|
||||||
|
|
||||||
The "Corresponding Source" for a work in object code form means all
|
|
||||||
the source code needed to generate, install, and (for an executable
|
|
||||||
work) run the object code and to modify the work, including scripts to
|
|
||||||
control those activities. However, it does not include the work's
|
|
||||||
System Libraries, or general-purpose tools or generally available free
|
|
||||||
programs which are used unmodified in performing those activities but
|
|
||||||
which are not part of the work. For example, Corresponding Source
|
|
||||||
includes interface definition files associated with source files for
|
|
||||||
the work, and the source code for shared libraries and dynamically
|
|
||||||
linked subprograms that the work is specifically designed to require,
|
|
||||||
such as by intimate data communication or control flow between those
|
|
||||||
subprograms and other parts of the work.
|
|
||||||
|
|
||||||
The Corresponding Source need not include anything that users
|
|
||||||
can regenerate automatically from other parts of the Corresponding
|
|
||||||
Source.
|
|
||||||
|
|
||||||
The Corresponding Source for a work in source code form is that
|
|
||||||
same work.
|
|
||||||
|
|
||||||
2. Basic Permissions.
|
|
||||||
|
|
||||||
All rights granted under this License are granted for the term of
|
|
||||||
copyright on the Program, and are irrevocable provided the stated
|
|
||||||
conditions are met. This License explicitly affirms your unlimited
|
|
||||||
permission to run the unmodified Program. The output from running a
|
|
||||||
covered work is covered by this License only if the output, given its
|
|
||||||
content, constitutes a covered work. This License acknowledges your
|
|
||||||
rights of fair use or other equivalent, as provided by copyright law.
|
|
||||||
|
|
||||||
You may make, run and propagate covered works that you do not
|
|
||||||
convey, without conditions so long as your license otherwise remains
|
|
||||||
in force. You may convey covered works to others for the sole purpose
|
|
||||||
of having them make modifications exclusively for you, or provide you
|
|
||||||
with facilities for running those works, provided that you comply with
|
|
||||||
the terms of this License in conveying all material for which you do
|
|
||||||
not control copyright. Those thus making or running the covered works
|
|
||||||
for you must do so exclusively on your behalf, under your direction
|
|
||||||
and control, on terms that prohibit them from making any copies of
|
|
||||||
your copyrighted material outside their relationship with you.
|
|
||||||
|
|
||||||
Conveying under any other circumstances is permitted solely under
|
|
||||||
the conditions stated below. Sublicensing is not allowed; section 10
|
|
||||||
makes it unnecessary.
|
|
||||||
|
|
||||||
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
|
||||||
|
|
||||||
No covered work shall be deemed part of an effective technological
|
|
||||||
measure under any applicable law fulfilling obligations under article
|
|
||||||
11 of the WIPO copyright treaty adopted on 20 December 1996, or
|
|
||||||
similar laws prohibiting or restricting circumvention of such
|
|
||||||
measures.
|
|
||||||
|
|
||||||
When you convey a covered work, you waive any legal power to forbid
|
|
||||||
circumvention of technological measures to the extent such circumvention
|
|
||||||
is effected by exercising rights under this License with respect to
|
|
||||||
the covered work, and you disclaim any intention to limit operation or
|
|
||||||
modification of the work as a means of enforcing, against the work's
|
|
||||||
users, your or third parties' legal rights to forbid circumvention of
|
|
||||||
technological measures.
|
|
||||||
|
|
||||||
4. Conveying Verbatim Copies.
|
|
||||||
|
|
||||||
You may convey verbatim copies of the Program's source code as you
|
|
||||||
receive it, in any medium, provided that you conspicuously and
|
|
||||||
appropriately publish on each copy an appropriate copyright notice;
|
|
||||||
keep intact all notices stating that this License and any
|
|
||||||
non-permissive terms added in accord with section 7 apply to the code;
|
|
||||||
keep intact all notices of the absence of any warranty; and give all
|
|
||||||
recipients a copy of this License along with the Program.
|
|
||||||
|
|
||||||
You may charge any price or no price for each copy that you convey,
|
|
||||||
and you may offer support or warranty protection for a fee.
|
|
||||||
|
|
||||||
5. Conveying Modified Source Versions.
|
|
||||||
|
|
||||||
You may convey a work based on the Program, or the modifications to
|
|
||||||
produce it from the Program, in the form of source code under the
|
|
||||||
terms of section 4, provided that you also meet all of these conditions:
|
|
||||||
|
|
||||||
a) The work must carry prominent notices stating that you modified
|
|
||||||
it, and giving a relevant date.
|
|
||||||
|
|
||||||
b) The work must carry prominent notices stating that it is
|
|
||||||
released under this License and any conditions added under section
|
|
||||||
7. This requirement modifies the requirement in section 4 to
|
|
||||||
"keep intact all notices".
|
|
||||||
|
|
||||||
c) You must license the entire work, as a whole, under this
|
|
||||||
License to anyone who comes into possession of a copy. This
|
|
||||||
License will therefore apply, along with any applicable section 7
|
|
||||||
additional terms, to the whole of the work, and all its parts,
|
|
||||||
regardless of how they are packaged. This License gives no
|
|
||||||
permission to license the work in any other way, but it does not
|
|
||||||
invalidate such permission if you have separately received it.
|
|
||||||
|
|
||||||
d) If the work has interactive user interfaces, each must display
|
|
||||||
Appropriate Legal Notices; however, if the Program has interactive
|
|
||||||
interfaces that do not display Appropriate Legal Notices, your
|
|
||||||
work need not make them do so.
|
|
||||||
|
|
||||||
A compilation of a covered work with other separate and independent
|
|
||||||
works, which are not by their nature extensions of the covered work,
|
|
||||||
and which are not combined with it such as to form a larger program,
|
|
||||||
in or on a volume of a storage or distribution medium, is called an
|
|
||||||
"aggregate" if the compilation and its resulting copyright are not
|
|
||||||
used to limit the access or legal rights of the compilation's users
|
|
||||||
beyond what the individual works permit. Inclusion of a covered work
|
|
||||||
in an aggregate does not cause this License to apply to the other
|
|
||||||
parts of the aggregate.
|
|
||||||
|
|
||||||
6. Conveying Non-Source Forms.
|
|
||||||
|
|
||||||
You may convey a covered work in object code form under the terms
|
|
||||||
of sections 4 and 5, provided that you also convey the
|
|
||||||
machine-readable Corresponding Source under the terms of this License,
|
|
||||||
in one of these ways:
|
|
||||||
|
|
||||||
a) Convey the object code in, or embodied in, a physical product
|
|
||||||
(including a physical distribution medium), accompanied by the
|
|
||||||
Corresponding Source fixed on a durable physical medium
|
|
||||||
customarily used for software interchange.
|
|
||||||
|
|
||||||
b) Convey the object code in, or embodied in, a physical product
|
|
||||||
(including a physical distribution medium), accompanied by a
|
|
||||||
written offer, valid for at least three years and valid for as
|
|
||||||
long as you offer spare parts or customer support for that product
|
|
||||||
model, to give anyone who possesses the object code either (1) a
|
|
||||||
copy of the Corresponding Source for all the software in the
|
|
||||||
product that is covered by this License, on a durable physical
|
|
||||||
medium customarily used for software interchange, for a price no
|
|
||||||
more than your reasonable cost of physically performing this
|
|
||||||
conveying of source, or (2) access to copy the
|
|
||||||
Corresponding Source from a network server at no charge.
|
|
||||||
|
|
||||||
c) Convey individual copies of the object code with a copy of the
|
|
||||||
written offer to provide the Corresponding Source. This
|
|
||||||
alternative is allowed only occasionally and noncommercially, and
|
|
||||||
only if you received the object code with such an offer, in accord
|
|
||||||
with subsection 6b.
|
|
||||||
|
|
||||||
d) Convey the object code by offering access from a designated
|
|
||||||
place (gratis or for a charge), and offer equivalent access to the
|
|
||||||
Corresponding Source in the same way through the same place at no
|
|
||||||
further charge. You need not require recipients to copy the
|
|
||||||
Corresponding Source along with the object code. If the place to
|
|
||||||
copy the object code is a network server, the Corresponding Source
|
|
||||||
may be on a different server (operated by you or a third party)
|
|
||||||
that supports equivalent copying facilities, provided you maintain
|
|
||||||
clear directions next to the object code saying where to find the
|
|
||||||
Corresponding Source. Regardless of what server hosts the
|
|
||||||
Corresponding Source, you remain obligated to ensure that it is
|
|
||||||
available for as long as needed to satisfy these requirements.
|
|
||||||
|
|
||||||
e) Convey the object code using peer-to-peer transmission, provided
|
|
||||||
you inform other peers where the object code and Corresponding
|
|
||||||
Source of the work are being offered to the general public at no
|
|
||||||
charge under subsection 6d.
|
|
||||||
|
|
||||||
A separable portion of the object code, whose source code is excluded
|
|
||||||
from the Corresponding Source as a System Library, need not be
|
|
||||||
included in conveying the object code work.
|
|
||||||
|
|
||||||
A "User Product" is either (1) a "consumer product", which means any
|
|
||||||
tangible personal property which is normally used for personal, family,
|
|
||||||
or household purposes, or (2) anything designed or sold for incorporation
|
|
||||||
into a dwelling. In determining whether a product is a consumer product,
|
|
||||||
doubtful cases shall be resolved in favor of coverage. For a particular
|
|
||||||
product received by a particular user, "normally used" refers to a
|
|
||||||
typical or common use of that class of product, regardless of the status
|
|
||||||
of the particular user or of the way in which the particular user
|
|
||||||
actually uses, or expects or is expected to use, the product. A product
|
|
||||||
is a consumer product regardless of whether the product has substantial
|
|
||||||
commercial, industrial or non-consumer uses, unless such uses represent
|
|
||||||
the only significant mode of use of the product.
|
|
||||||
|
|
||||||
"Installation Information" for a User Product means any methods,
|
|
||||||
procedures, authorization keys, or other information required to install
|
|
||||||
and execute modified versions of a covered work in that User Product from
|
|
||||||
a modified version of its Corresponding Source. The information must
|
|
||||||
suffice to ensure that the continued functioning of the modified object
|
|
||||||
code is in no case prevented or interfered with solely because
|
|
||||||
modification has been made.
|
|
||||||
|
|
||||||
If you convey an object code work under this section in, or with, or
|
|
||||||
specifically for use in, a User Product, and the conveying occurs as
|
|
||||||
part of a transaction in which the right of possession and use of the
|
|
||||||
User Product is transferred to the recipient in perpetuity or for a
|
|
||||||
fixed term (regardless of how the transaction is characterized), the
|
|
||||||
Corresponding Source conveyed under this section must be accompanied
|
|
||||||
by the Installation Information. But this requirement does not apply
|
|
||||||
if neither you nor any third party retains the ability to install
|
|
||||||
modified object code on the User Product (for example, the work has
|
|
||||||
been installed in ROM).
|
|
||||||
|
|
||||||
The requirement to provide Installation Information does not include a
|
|
||||||
requirement to continue to provide support service, warranty, or updates
|
|
||||||
for a work that has been modified or installed by the recipient, or for
|
|
||||||
the User Product in which it has been modified or installed. Access to a
|
|
||||||
network may be denied when the modification itself materially and
|
|
||||||
adversely affects the operation of the network or violates the rules and
|
|
||||||
protocols for communication across the network.
|
|
||||||
|
|
||||||
Corresponding Source conveyed, and Installation Information provided,
|
|
||||||
in accord with this section must be in a format that is publicly
|
|
||||||
documented (and with an implementation available to the public in
|
|
||||||
source code form), and must require no special password or key for
|
|
||||||
unpacking, reading or copying.
|
|
||||||
|
|
||||||
7. Additional Terms.
|
|
||||||
|
|
||||||
"Additional permissions" are terms that supplement the terms of this
|
|
||||||
License by making exceptions from one or more of its conditions.
|
|
||||||
Additional permissions that are applicable to the entire Program shall
|
|
||||||
be treated as though they were included in this License, to the extent
|
|
||||||
that they are valid under applicable law. If additional permissions
|
|
||||||
apply only to part of the Program, that part may be used separately
|
|
||||||
under those permissions, but the entire Program remains governed by
|
|
||||||
this License without regard to the additional permissions.
|
|
||||||
|
|
||||||
When you convey a copy of a covered work, you may at your option
|
|
||||||
remove any additional permissions from that copy, or from any part of
|
|
||||||
it. (Additional permissions may be written to require their own
|
|
||||||
removal in certain cases when you modify the work.) You may place
|
|
||||||
additional permissions on material, added by you to a covered work,
|
|
||||||
for which you have or can give appropriate copyright permission.
|
|
||||||
|
|
||||||
Notwithstanding any other provision of this License, for material you
|
|
||||||
add to a covered work, you may (if authorized by the copyright holders of
|
|
||||||
that material) supplement the terms of this License with terms:
|
|
||||||
|
|
||||||
a) Disclaiming warranty or limiting liability differently from the
|
|
||||||
terms of sections 15 and 16 of this License; or
|
|
||||||
|
|
||||||
b) Requiring preservation of specified reasonable legal notices or
|
|
||||||
author attributions in that material or in the Appropriate Legal
|
|
||||||
Notices displayed by works containing it; or
|
|
||||||
|
|
||||||
c) Prohibiting misrepresentation of the origin of that material, or
|
|
||||||
requiring that modified versions of such material be marked in
|
|
||||||
reasonable ways as different from the original version; or
|
|
||||||
|
|
||||||
d) Limiting the use for publicity purposes of names of licensors or
|
|
||||||
authors of the material; or
|
|
||||||
|
|
||||||
e) Declining to grant rights under trademark law for use of some
|
|
||||||
trade names, trademarks, or service marks; or
|
|
||||||
|
|
||||||
f) Requiring indemnification of licensors and authors of that
|
|
||||||
material by anyone who conveys the material (or modified versions of
|
|
||||||
it) with contractual assumptions of liability to the recipient, for
|
|
||||||
any liability that these contractual assumptions directly impose on
|
|
||||||
those licensors and authors.
|
|
||||||
|
|
||||||
All other non-permissive additional terms are considered "further
|
|
||||||
restrictions" within the meaning of section 10. If the Program as you
|
|
||||||
received it, or any part of it, contains a notice stating that it is
|
|
||||||
governed by this License along with a term that is a further
|
|
||||||
restriction, you may remove that term. If a license document contains
|
|
||||||
a further restriction but permits relicensing or conveying under this
|
|
||||||
License, you may add to a covered work material governed by the terms
|
|
||||||
of that license document, provided that the further restriction does
|
|
||||||
not survive such relicensing or conveying.
|
|
||||||
|
|
||||||
If you add terms to a covered work in accord with this section, you
|
|
||||||
must place, in the relevant source files, a statement of the
|
|
||||||
additional terms that apply to those files, or a notice indicating
|
|
||||||
where to find the applicable terms.
|
|
||||||
|
|
||||||
Additional terms, permissive or non-permissive, may be stated in the
|
|
||||||
form of a separately written license, or stated as exceptions;
|
|
||||||
the above requirements apply either way.
|
|
||||||
|
|
||||||
8. Termination.
|
|
||||||
|
|
||||||
You may not propagate or modify a covered work except as expressly
|
|
||||||
provided under this License. Any attempt otherwise to propagate or
|
|
||||||
modify it is void, and will automatically terminate your rights under
|
|
||||||
this License (including any patent licenses granted under the third
|
|
||||||
paragraph of section 11).
|
|
||||||
|
|
||||||
However, if you cease all violation of this License, then your
|
|
||||||
license from a particular copyright holder is reinstated (a)
|
|
||||||
provisionally, unless and until the copyright holder explicitly and
|
|
||||||
finally terminates your license, and (b) permanently, if the copyright
|
|
||||||
holder fails to notify you of the violation by some reasonable means
|
|
||||||
prior to 60 days after the cessation.
|
|
||||||
|
|
||||||
Moreover, your license from a particular copyright holder is
|
|
||||||
reinstated permanently if the copyright holder notifies you of the
|
|
||||||
violation by some reasonable means, this is the first time you have
|
|
||||||
received notice of violation of this License (for any work) from that
|
|
||||||
copyright holder, and you cure the violation prior to 30 days after
|
|
||||||
your receipt of the notice.
|
|
||||||
|
|
||||||
Termination of your rights under this section does not terminate the
|
|
||||||
licenses of parties who have received copies or rights from you under
|
|
||||||
this License. If your rights have been terminated and not permanently
|
|
||||||
reinstated, you do not qualify to receive new licenses for the same
|
|
||||||
material under section 10.
|
|
||||||
|
|
||||||
9. Acceptance Not Required for Having Copies.
|
|
||||||
|
|
||||||
You are not required to accept this License in order to receive or
|
|
||||||
run a copy of the Program. Ancillary propagation of a covered work
|
|
||||||
occurring solely as a consequence of using peer-to-peer transmission
|
|
||||||
to receive a copy likewise does not require acceptance. However,
|
|
||||||
nothing other than this License grants you permission to propagate or
|
|
||||||
modify any covered work. These actions infringe copyright if you do
|
|
||||||
not accept this License. Therefore, by modifying or propagating a
|
|
||||||
covered work, you indicate your acceptance of this License to do so.
|
|
||||||
|
|
||||||
10. Automatic Licensing of Downstream Recipients.
|
|
||||||
|
|
||||||
Each time you convey a covered work, the recipient automatically
|
|
||||||
receives a license from the original licensors, to run, modify and
|
|
||||||
propagate that work, subject to this License. You are not responsible
|
|
||||||
for enforcing compliance by third parties with this License.
|
|
||||||
|
|
||||||
An "entity transaction" is a transaction transferring control of an
|
|
||||||
organization, or substantially all assets of one, or subdividing an
|
|
||||||
organization, or merging organizations. If propagation of a covered
|
|
||||||
work results from an entity transaction, each party to that
|
|
||||||
transaction who receives a copy of the work also receives whatever
|
|
||||||
licenses to the work the party's predecessor in interest had or could
|
|
||||||
give under the previous paragraph, plus a right to possession of the
|
|
||||||
Corresponding Source of the work from the predecessor in interest, if
|
|
||||||
the predecessor has it or can get it with reasonable efforts.
|
|
||||||
|
|
||||||
You may not impose any further restrictions on the exercise of the
|
|
||||||
rights granted or affirmed under this License. For example, you may
|
|
||||||
not impose a license fee, royalty, or other charge for exercise of
|
|
||||||
rights granted under this License, and you may not initiate litigation
|
|
||||||
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
|
||||||
any patent claim is infringed by making, using, selling, offering for
|
|
||||||
sale, or importing the Program or any portion of it.
|
|
||||||
|
|
||||||
11. Patents.
|
|
||||||
|
|
||||||
A "contributor" is a copyright holder who authorizes use under this
|
|
||||||
License of the Program or a work on which the Program is based. The
|
|
||||||
work thus licensed is called the contributor's "contributor version".
|
|
||||||
|
|
||||||
A contributor's "essential patent claims" are all patent claims
|
|
||||||
owned or controlled by the contributor, whether already acquired or
|
|
||||||
hereafter acquired, that would be infringed by some manner, permitted
|
|
||||||
by this License, of making, using, or selling its contributor version,
|
|
||||||
but do not include claims that would be infringed only as a
|
|
||||||
consequence of further modification of the contributor version. For
|
|
||||||
purposes of this definition, "control" includes the right to grant
|
|
||||||
patent sublicenses in a manner consistent with the requirements of
|
|
||||||
this License.
|
|
||||||
|
|
||||||
Each contributor grants you a non-exclusive, worldwide, royalty-free
|
|
||||||
patent license under the contributor's essential patent claims, to
|
|
||||||
make, use, sell, offer for sale, import and otherwise run, modify and
|
|
||||||
propagate the contents of its contributor version.
|
|
||||||
|
|
||||||
In the following three paragraphs, a "patent license" is any express
|
|
||||||
agreement or commitment, however denominated, not to enforce a patent
|
|
||||||
(such as an express permission to practice a patent or covenant not to
|
|
||||||
sue for patent infringement). To "grant" such a patent license to a
|
|
||||||
party means to make such an agreement or commitment not to enforce a
|
|
||||||
patent against the party.
|
|
||||||
|
|
||||||
If you convey a covered work, knowingly relying on a patent license,
|
|
||||||
and the Corresponding Source of the work is not available for anyone
|
|
||||||
to copy, free of charge and under the terms of this License, through a
|
|
||||||
publicly available network server or other readily accessible means,
|
|
||||||
then you must either (1) cause the Corresponding Source to be so
|
|
||||||
available, or (2) arrange to deprive yourself of the benefit of the
|
|
||||||
patent license for this particular work, or (3) arrange, in a manner
|
|
||||||
consistent with the requirements of this License, to extend the patent
|
|
||||||
license to downstream recipients. "Knowingly relying" means you have
|
|
||||||
actual knowledge that, but for the patent license, your conveying the
|
|
||||||
covered work in a country, or your recipient's use of the covered work
|
|
||||||
in a country, would infringe one or more identifiable patents in that
|
|
||||||
country that you have reason to believe are valid.
|
|
||||||
|
|
||||||
If, pursuant to or in connection with a single transaction or
|
|
||||||
arrangement, you convey, or propagate by procuring conveyance of, a
|
|
||||||
covered work, and grant a patent license to some of the parties
|
|
||||||
receiving the covered work authorizing them to use, propagate, modify
|
|
||||||
or convey a specific copy of the covered work, then the patent license
|
|
||||||
you grant is automatically extended to all recipients of the covered
|
|
||||||
work and works based on it.
|
|
||||||
|
|
||||||
A patent license is "discriminatory" if it does not include within
|
|
||||||
the scope of its coverage, prohibits the exercise of, or is
|
|
||||||
conditioned on the non-exercise of one or more of the rights that are
|
|
||||||
specifically granted under this License. You may not convey a covered
|
|
||||||
work if you are a party to an arrangement with a third party that is
|
|
||||||
in the business of distributing software, under which you make payment
|
|
||||||
to the third party based on the extent of your activity of conveying
|
|
||||||
the work, and under which the third party grants, to any of the
|
|
||||||
parties who would receive the covered work from you, a discriminatory
|
|
||||||
patent license (a) in connection with copies of the covered work
|
|
||||||
conveyed by you (or copies made from those copies), or (b) primarily
|
|
||||||
for and in connection with specific products or compilations that
|
|
||||||
contain the covered work, unless you entered into that arrangement,
|
|
||||||
or that patent license was granted, prior to 28 March 2007.
|
|
||||||
|
|
||||||
Nothing in this License shall be construed as excluding or limiting
|
|
||||||
any implied license or other defenses to infringement that may
|
|
||||||
otherwise be available to you under applicable patent law.
|
|
||||||
|
|
||||||
12. No Surrender of Others' Freedom.
|
|
||||||
|
|
||||||
If conditions are imposed on you (whether by court order, agreement or
|
|
||||||
otherwise) that contradict the conditions of this License, they do not
|
|
||||||
excuse you from the conditions of this License. If you cannot convey a
|
|
||||||
covered work so as to satisfy simultaneously your obligations under this
|
|
||||||
License and any other pertinent obligations, then as a consequence you may
|
|
||||||
not convey it at all. For example, if you agree to terms that obligate you
|
|
||||||
to collect a royalty for further conveying from those to whom you convey
|
|
||||||
the Program, the only way you could satisfy both those terms and this
|
|
||||||
License would be to refrain entirely from conveying the Program.
|
|
||||||
|
|
||||||
13. Use with the GNU Affero General Public License.
|
|
||||||
|
|
||||||
Notwithstanding any other provision of this License, you have
|
|
||||||
permission to link or combine any covered work with a work licensed
|
|
||||||
under version 3 of the GNU Affero General Public License into a single
|
|
||||||
combined work, and to convey the resulting work. The terms of this
|
|
||||||
License will continue to apply to the part which is the covered work,
|
|
||||||
but the special requirements of the GNU Affero General Public License,
|
|
||||||
section 13, concerning interaction through a network will apply to the
|
|
||||||
combination as such.
|
|
||||||
|
|
||||||
14. Revised Versions of this License.
|
|
||||||
|
|
||||||
The Free Software Foundation may publish revised and/or new versions of
|
|
||||||
the GNU General Public License from time to time. Such new versions will
|
|
||||||
be similar in spirit to the present version, but may differ in detail to
|
|
||||||
address new problems or concerns.
|
|
||||||
|
|
||||||
Each version is given a distinguishing version number. If the
|
|
||||||
Program specifies that a certain numbered version of the GNU General
|
|
||||||
Public License "or any later version" applies to it, you have the
|
|
||||||
option of following the terms and conditions either of that numbered
|
|
||||||
version or of any later version published by the Free Software
|
|
||||||
Foundation. If the Program does not specify a version number of the
|
|
||||||
GNU General Public License, you may choose any version ever published
|
|
||||||
by the Free Software Foundation.
|
|
||||||
|
|
||||||
If the Program specifies that a proxy can decide which future
|
|
||||||
versions of the GNU General Public License can be used, that proxy's
|
|
||||||
public statement of acceptance of a version permanently authorizes you
|
|
||||||
to choose that version for the Program.
|
|
||||||
|
|
||||||
Later license versions may give you additional or different
|
|
||||||
permissions. However, no additional obligations are imposed on any
|
|
||||||
author or copyright holder as a result of your choosing to follow a
|
|
||||||
later version.
|
|
||||||
|
|
||||||
15. Disclaimer of Warranty.
|
|
||||||
|
|
||||||
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
|
|
||||||
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
|
|
||||||
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
|
|
||||||
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
|
|
||||||
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
|
||||||
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
|
|
||||||
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
|
|
||||||
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
|
||||||
|
|
||||||
16. Limitation of Liability.
|
|
||||||
|
|
||||||
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
|
||||||
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
|
|
||||||
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
|
|
||||||
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
|
|
||||||
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
|
|
||||||
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
|
|
||||||
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
|
|
||||||
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
|
|
||||||
SUCH DAMAGES.
|
|
||||||
|
|
||||||
17. Interpretation of Sections 15 and 16.
|
|
||||||
|
|
||||||
If the disclaimer of warranty and limitation of liability provided
|
|
||||||
above cannot be given local legal effect according to their terms,
|
|
||||||
reviewing courts shall apply local law that most closely approximates
|
|
||||||
an absolute waiver of all civil liability in connection with the
|
|
||||||
Program, unless a warranty or assumption of liability accompanies a
|
|
||||||
copy of the Program in return for a fee.
|
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
How to Apply These Terms to Your New Programs
|
|
||||||
|
|
||||||
If you develop a new program, and you want it to be of the greatest
|
|
||||||
possible use to the public, the best way to achieve this is to make it
|
|
||||||
free software which everyone can redistribute and change under these terms.
|
|
||||||
|
|
||||||
To do so, attach the following notices to the program. It is safest
|
|
||||||
to attach them to the start of each source file to most effectively
|
|
||||||
state the exclusion of warranty; and each file should have at least
|
|
||||||
the "copyright" line and a pointer to where the full notice is found.
|
|
||||||
|
|
||||||
<one line to give the program's name and a brief idea of what it does.>
|
|
||||||
Copyright (C) <year> <name of author>
|
|
||||||
|
|
||||||
This program is free software: you can redistribute it and/or modify
|
|
||||||
it under the terms of the GNU General Public License as published by
|
|
||||||
the Free Software Foundation, either version 3 of the License, or
|
|
||||||
(at your option) any later version.
|
|
||||||
|
|
||||||
This program is distributed in the hope that it will be useful,
|
|
||||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
GNU General Public License for more details.
|
|
||||||
|
|
||||||
You should have received a copy of the GNU General Public License
|
|
||||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
Also add information on how to contact you by electronic and paper mail.
|
|
||||||
|
|
||||||
If the program does terminal interaction, make it output a short
|
|
||||||
notice like this when it starts in an interactive mode:
|
|
||||||
|
|
||||||
<program> Copyright (C) <year> <name of author>
|
|
||||||
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
|
|
||||||
This is free software, and you are welcome to redistribute it
|
|
||||||
under certain conditions; type `show c' for details.
|
|
||||||
|
|
||||||
The hypothetical commands `show w' and `show c' should show the appropriate
|
|
||||||
parts of the General Public License. Of course, your program's commands
|
|
||||||
might be different; for a GUI interface, you would use an "about box".
|
|
||||||
|
|
||||||
You should also get your employer (if you work as a programmer) or school,
|
|
||||||
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
|
||||||
For more information on this, and how to apply and follow the GNU GPL, see
|
|
||||||
<http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
The GNU General Public License does not permit incorporating your program
|
|
||||||
into proprietary programs. If your program is a subroutine library, you
|
|
||||||
may consider it more useful to permit linking proprietary applications with
|
|
||||||
the library. If this is what you want to do, use the GNU Lesser General
|
|
||||||
Public License instead of this License. But first, please read
|
|
||||||
<http://www.gnu.org/philosophy/why-not-lgpl.html>.
|
|
|
@ -1,77 +0,0 @@
|
||||||
# Depguard
|
|
||||||
|
|
||||||
Go linter that checks package imports are in a list of acceptable packages. It
|
|
||||||
supports a white list and black list option and can do prefix or glob matching.
|
|
||||||
This allows you to allow imports from a whole organization or only
|
|
||||||
allow specific packages within a repository. It is recommended to use prefix
|
|
||||||
matching as it is faster than glob matching. The fewer glob matches the better.
|
|
||||||
|
|
||||||
> If a pattern is matched by prefix it does not try to match via glob.
|
|
||||||
|
|
||||||
## Install
|
|
||||||
|
|
||||||
```bash
|
|
||||||
go get -u github.com/OpenPeeDeeP/depguard
|
|
||||||
```
|
|
||||||
|
|
||||||
## Config
|
|
||||||
|
|
||||||
By default, Depguard looks for a file named `.depguard.json` in the current
|
|
||||||
current working directory. If it is somewhere else, pass in the `-c` flag with
|
|
||||||
the location of your configuration file.
|
|
||||||
|
|
||||||
The following is an example configuration file.
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"type": "whitelist",
|
|
||||||
"packages": ["github.com/OpenPeeDeeP/depguard"],
|
|
||||||
"packageErrorMessages": {
|
|
||||||
"github.com/OpenPeeDeeP/depguards": "Please use \"github.com/OpenPeeDeeP/depguard\","
|
|
||||||
},
|
|
||||||
"inTests": ["github.com/stretchr/testify"],
|
|
||||||
"includeGoStdLib": true
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
- `type` can be either `whitelist` or `blacklist`. This check is case insensitive.
|
|
||||||
If not specified the default is `blacklist`.
|
|
||||||
- `packages` is a list of packages for the list type specified.
|
|
||||||
- `packageErrorMessages` is a mapping from packages to the error message to display
|
|
||||||
- `inTests` is a list of packages allowed/disallowed only in test files.
|
|
||||||
- Set `includeGoStdLib` (`includeGoRoot` for backwards compatability) to true if you want to check the list against standard lib.
|
|
||||||
If not specified the default is false.
|
|
||||||
|
|
||||||
## Gometalinter
|
|
||||||
|
|
||||||
The binary installation of this linter can be used with
|
|
||||||
[Gometalinter](github.com/alecthomas/gometalinter).
|
|
||||||
|
|
||||||
If you use a configuration file for Gometalinter then the following will need to
|
|
||||||
be added to your configuration file.
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"linters": {
|
|
||||||
"depguard": {
|
|
||||||
"command": "depguard -c path/to/config.json",
|
|
||||||
"pattern": "PATH:LINE:COL:MESSAGE",
|
|
||||||
"installFrom": "github.com/OpenPeeDeeP/depguard",
|
|
||||||
"isFast": true,
|
|
||||||
"partitionStrategy": "packages"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
If you prefer the command line way the following will work for you as well.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
gometalinter --linter='depguard:depguard -c path/to/config.json:PATH:LINE:COL:MESSAGE'
|
|
||||||
```
|
|
||||||
|
|
||||||
## Golangci-lint
|
|
||||||
|
|
||||||
This linter was built with
|
|
||||||
[Golangci-lint](https://github.com/golangci/golangci-lint) in mind. It is compatable
|
|
||||||
and read their docs to see how to implement all their linters, including this one.
|
|
|
@ -1,241 +0,0 @@
|
||||||
package depguard
|
|
||||||
|
|
||||||
import (
|
|
||||||
"go/build"
|
|
||||||
"go/token"
|
|
||||||
"io/ioutil"
|
|
||||||
"path"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/gobwas/glob"
|
|
||||||
"golang.org/x/tools/go/loader"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ListType states what kind of list is passed in.
|
|
||||||
type ListType int
|
|
||||||
|
|
||||||
const (
|
|
||||||
// LTBlacklist states the list given is a blacklist. (default)
|
|
||||||
LTBlacklist ListType = iota
|
|
||||||
// LTWhitelist states the list given is a whitelist.
|
|
||||||
LTWhitelist
|
|
||||||
)
|
|
||||||
|
|
||||||
// StringToListType makes it easier to turn a string into a ListType.
|
|
||||||
// It assumes that the string representation is lower case.
|
|
||||||
var StringToListType = map[string]ListType{
|
|
||||||
"whitelist": LTWhitelist,
|
|
||||||
"blacklist": LTBlacklist,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Issue with the package with PackageName at the Position.
|
|
||||||
type Issue struct {
|
|
||||||
PackageName string
|
|
||||||
Position token.Position
|
|
||||||
}
|
|
||||||
|
|
||||||
// Depguard checks imports to make sure they follow the given list and constraints.
|
|
||||||
type Depguard struct {
|
|
||||||
ListType ListType
|
|
||||||
IncludeGoRoot bool
|
|
||||||
|
|
||||||
Packages []string
|
|
||||||
prefixPackages []string
|
|
||||||
globPackages []glob.Glob
|
|
||||||
|
|
||||||
TestPackages []string
|
|
||||||
prefixTestPackages []string
|
|
||||||
globTestPackages []glob.Glob
|
|
||||||
|
|
||||||
prefixRoot []string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run checks for dependencies given the program and validates them against
|
|
||||||
// Packages.
|
|
||||||
func (dg *Depguard) Run(config *loader.Config, prog *loader.Program) ([]*Issue, error) {
|
|
||||||
// Shortcut execution on an empty blacklist as that means every package is allowed
|
|
||||||
if dg.ListType == LTBlacklist && len(dg.Packages) == 0 {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := dg.initialize(config, prog); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
directImports, err := dg.createImportMap(prog)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
var issues []*Issue
|
|
||||||
for pkg, positions := range directImports {
|
|
||||||
for _, pos := range positions {
|
|
||||||
|
|
||||||
prefixList, globList := dg.prefixPackages, dg.globPackages
|
|
||||||
if len(dg.TestPackages) > 0 && strings.Index(pos.Filename, "_test.go") != -1 {
|
|
||||||
prefixList, globList = dg.prefixTestPackages, dg.globTestPackages
|
|
||||||
}
|
|
||||||
|
|
||||||
if dg.flagIt(pkg, prefixList, globList) {
|
|
||||||
issues = append(issues, &Issue{
|
|
||||||
PackageName: pkg,
|
|
||||||
Position: pos,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return issues, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (dg *Depguard) initialize(config *loader.Config, prog *loader.Program) error {
|
|
||||||
// parse ordinary guarded packages
|
|
||||||
for _, pkg := range dg.Packages {
|
|
||||||
if strings.ContainsAny(pkg, "!?*[]{}") {
|
|
||||||
g, err := glob.Compile(pkg, '/')
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
dg.globPackages = append(dg.globPackages, g)
|
|
||||||
} else {
|
|
||||||
dg.prefixPackages = append(dg.prefixPackages, pkg)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sort the packages so we can have a faster search in the array
|
|
||||||
sort.Strings(dg.prefixPackages)
|
|
||||||
|
|
||||||
// parse guarded tests packages
|
|
||||||
for _, pkg := range dg.TestPackages {
|
|
||||||
if strings.ContainsAny(pkg, "!?*[]{}") {
|
|
||||||
g, err := glob.Compile(pkg, '/')
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
dg.globTestPackages = append(dg.globTestPackages, g)
|
|
||||||
} else {
|
|
||||||
dg.prefixTestPackages = append(dg.prefixTestPackages, pkg)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sort the test packages so we can have a faster search in the array
|
|
||||||
sort.Strings(dg.prefixTestPackages)
|
|
||||||
|
|
||||||
if !dg.IncludeGoRoot {
|
|
||||||
var err error
|
|
||||||
dg.prefixRoot, err = listRootPrefixs(config.Build)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (dg *Depguard) createImportMap(prog *loader.Program) (map[string][]token.Position, error) {
|
|
||||||
importMap := make(map[string][]token.Position)
|
|
||||||
// For the directly imported packages
|
|
||||||
for _, imported := range prog.InitialPackages() {
|
|
||||||
// Go through their files
|
|
||||||
for _, file := range imported.Files {
|
|
||||||
// And populate a map of all direct imports and their positions
|
|
||||||
// This will filter out GoRoot depending on the Depguard.IncludeGoRoot
|
|
||||||
for _, fileImport := range file.Imports {
|
|
||||||
fileImportPath := cleanBasicLitString(fileImport.Path.Value)
|
|
||||||
if !dg.IncludeGoRoot && dg.isRoot(fileImportPath) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
position := prog.Fset.Position(fileImport.Pos())
|
|
||||||
positions, found := importMap[fileImportPath]
|
|
||||||
if !found {
|
|
||||||
importMap[fileImportPath] = []token.Position{
|
|
||||||
position,
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
importMap[fileImportPath] = append(positions, position)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return importMap, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func pkgInList(pkg string, prefixList []string, globList []glob.Glob) bool {
|
|
||||||
if pkgInPrefixList(pkg, prefixList) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return pkgInGlobList(pkg, globList)
|
|
||||||
}
|
|
||||||
|
|
||||||
func pkgInPrefixList(pkg string, prefixList []string) bool {
|
|
||||||
// Idx represents where in the package slice the passed in package would go
|
|
||||||
// when sorted. -1 Just means that it would be at the very front of the slice.
|
|
||||||
idx := sort.Search(len(prefixList), func(i int) bool {
|
|
||||||
return prefixList[i] > pkg
|
|
||||||
}) - 1
|
|
||||||
// This means that the package passed in has no way to be prefixed by anything
|
|
||||||
// in the package list as it is already smaller then everything
|
|
||||||
if idx == -1 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return strings.HasPrefix(pkg, prefixList[idx])
|
|
||||||
}
|
|
||||||
|
|
||||||
func pkgInGlobList(pkg string, globList []glob.Glob) bool {
|
|
||||||
for _, g := range globList {
|
|
||||||
if g.Match(pkg) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// InList | WhiteList | BlackList
|
|
||||||
// y | | x
|
|
||||||
// n | x |
|
|
||||||
func (dg *Depguard) flagIt(pkg string, prefixList []string, globList []glob.Glob) bool {
|
|
||||||
return pkgInList(pkg, prefixList, globList) == (dg.ListType == LTBlacklist)
|
|
||||||
}
|
|
||||||
|
|
||||||
func cleanBasicLitString(value string) string {
|
|
||||||
return strings.Trim(value, "\"\\")
|
|
||||||
}
|
|
||||||
|
|
||||||
// We can do this as all imports that are not root are either prefixed with a domain
|
|
||||||
// or prefixed with `./` or `/` to dictate it is a local file reference
|
|
||||||
func listRootPrefixs(buildCtx *build.Context) ([]string, error) {
|
|
||||||
if buildCtx == nil {
|
|
||||||
buildCtx = &build.Default
|
|
||||||
}
|
|
||||||
root := path.Join(buildCtx.GOROOT, "src")
|
|
||||||
fs, err := ioutil.ReadDir(root)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
var pkgPrefix []string
|
|
||||||
for _, f := range fs {
|
|
||||||
if !f.IsDir() {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
pkgPrefix = append(pkgPrefix, f.Name())
|
|
||||||
}
|
|
||||||
return pkgPrefix, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (dg *Depguard) isRoot(importPath string) bool {
|
|
||||||
// Idx represents where in the package slice the passed in package would go
|
|
||||||
// when sorted. -1 Just means that it would be at the very front of the slice.
|
|
||||||
idx := sort.Search(len(dg.prefixRoot), func(i int) bool {
|
|
||||||
return dg.prefixRoot[i] > importPath
|
|
||||||
}) - 1
|
|
||||||
// This means that the package passed in has no way to be prefixed by anything
|
|
||||||
// in the package list as it is already smaller then everything
|
|
||||||
if idx == -1 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
// if it is prefixed by a root prefix we need to check if it is an exact match
|
|
||||||
// or prefix with `/` as this could return false posative if the domain was
|
|
||||||
// `archive.com` for example as `archive` is a go root package.
|
|
||||||
if strings.HasPrefix(importPath, dg.prefixRoot[idx]) {
|
|
||||||
return strings.HasPrefix(importPath, dg.prefixRoot[idx]+"/") || importPath == dg.prefixRoot[idx]
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
|
@ -1,21 +0,0 @@
|
||||||
MIT License
|
|
||||||
|
|
||||||
Copyright (c) 2017 Alex Kohler
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
|
||||||
in the Software without restriction, including without limitation the rights
|
|
||||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
copies of the Software, and to permit persons to whom the Software is
|
|
||||||
furnished to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in all
|
|
||||||
copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
SOFTWARE.
|
|
|
@ -1,267 +0,0 @@
|
||||||
package pkg
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"go/ast"
|
|
||||||
"go/token"
|
|
||||||
)
|
|
||||||
|
|
||||||
type sliceDeclaration struct {
|
|
||||||
name string
|
|
||||||
// sType string
|
|
||||||
genD *ast.GenDecl
|
|
||||||
}
|
|
||||||
|
|
||||||
type returnsVisitor struct {
|
|
||||||
// flags
|
|
||||||
simple bool
|
|
||||||
includeRangeLoops bool
|
|
||||||
includeForLoops bool
|
|
||||||
// visitor fields
|
|
||||||
sliceDeclarations []*sliceDeclaration
|
|
||||||
preallocHints []Hint
|
|
||||||
returnsInsideOfLoop bool
|
|
||||||
arrayTypes []string
|
|
||||||
}
|
|
||||||
|
|
||||||
func Check(files []*ast.File, simple, includeRangeLoops, includeForLoops bool) []Hint {
|
|
||||||
hints := []Hint{}
|
|
||||||
for _, f := range files {
|
|
||||||
retVis := &returnsVisitor{
|
|
||||||
simple: simple,
|
|
||||||
includeRangeLoops: includeRangeLoops,
|
|
||||||
includeForLoops: includeForLoops,
|
|
||||||
}
|
|
||||||
ast.Walk(retVis, f)
|
|
||||||
// if simple is true, then we actually have to check if we had returns
|
|
||||||
// inside of our loop. Otherwise, we can just report all messages.
|
|
||||||
if !retVis.simple || !retVis.returnsInsideOfLoop {
|
|
||||||
hints = append(hints, retVis.preallocHints...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return hints
|
|
||||||
}
|
|
||||||
|
|
||||||
func contains(slice []string, item string) bool {
|
|
||||||
for _, s := range slice {
|
|
||||||
if s == item {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *returnsVisitor) Visit(node ast.Node) ast.Visitor {
|
|
||||||
|
|
||||||
v.sliceDeclarations = nil
|
|
||||||
v.returnsInsideOfLoop = false
|
|
||||||
|
|
||||||
switch n := node.(type) {
|
|
||||||
case *ast.TypeSpec:
|
|
||||||
if _, ok := n.Type.(*ast.ArrayType); ok {
|
|
||||||
if n.Name != nil {
|
|
||||||
v.arrayTypes = append(v.arrayTypes, n.Name.Name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case *ast.FuncDecl:
|
|
||||||
if n.Body != nil {
|
|
||||||
for _, stmt := range n.Body.List {
|
|
||||||
switch s := stmt.(type) {
|
|
||||||
// Find non pre-allocated slices
|
|
||||||
case *ast.DeclStmt:
|
|
||||||
genD, ok := s.Decl.(*ast.GenDecl)
|
|
||||||
if !ok {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if genD.Tok == token.TYPE {
|
|
||||||
for _, spec := range genD.Specs {
|
|
||||||
tSpec, ok := spec.(*ast.TypeSpec)
|
|
||||||
if !ok {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, ok := tSpec.Type.(*ast.ArrayType); ok {
|
|
||||||
if tSpec.Name != nil {
|
|
||||||
v.arrayTypes = append(v.arrayTypes, tSpec.Name.Name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else if genD.Tok == token.VAR {
|
|
||||||
for _, spec := range genD.Specs {
|
|
||||||
vSpec, ok := spec.(*ast.ValueSpec)
|
|
||||||
if !ok {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
var isArrType bool
|
|
||||||
switch val := vSpec.Type.(type) {
|
|
||||||
case *ast.ArrayType:
|
|
||||||
isArrType = true
|
|
||||||
case *ast.Ident:
|
|
||||||
isArrType = contains(v.arrayTypes, val.Name)
|
|
||||||
}
|
|
||||||
if isArrType {
|
|
||||||
if vSpec.Names != nil {
|
|
||||||
/*atID, ok := arrayType.Elt.(*ast.Ident)
|
|
||||||
if !ok {
|
|
||||||
continue
|
|
||||||
}*/
|
|
||||||
|
|
||||||
// We should handle multiple slices declared on same line e.g. var mySlice1, mySlice2 []uint32
|
|
||||||
for _, vName := range vSpec.Names {
|
|
||||||
v.sliceDeclarations = append(v.sliceDeclarations, &sliceDeclaration{name: vName.Name /*sType: atID.Name,*/, genD: genD})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
case *ast.RangeStmt:
|
|
||||||
if v.includeRangeLoops {
|
|
||||||
if len(v.sliceDeclarations) == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// Check the value being ranged over and ensure it's not a channel (we cannot offer any recommendations on channel ranges).
|
|
||||||
rangeIdent, ok := s.X.(*ast.Ident)
|
|
||||||
if ok && rangeIdent.Obj != nil {
|
|
||||||
valueSpec, ok := rangeIdent.Obj.Decl.(*ast.ValueSpec)
|
|
||||||
if ok {
|
|
||||||
if _, rangeTargetIsChannel := valueSpec.Type.(*ast.ChanType); rangeTargetIsChannel {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if s.Body != nil {
|
|
||||||
v.handleLoops(s.Body)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
case *ast.ForStmt:
|
|
||||||
if v.includeForLoops {
|
|
||||||
if len(v.sliceDeclarations) == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if s.Body != nil {
|
|
||||||
v.handleLoops(s.Body)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
// handleLoops is a helper function to share the logic required for both *ast.RangeLoops and *ast.ForLoops
|
|
||||||
func (v *returnsVisitor) handleLoops(blockStmt *ast.BlockStmt) {
|
|
||||||
|
|
||||||
for _, stmt := range blockStmt.List {
|
|
||||||
switch bodyStmt := stmt.(type) {
|
|
||||||
case *ast.AssignStmt:
|
|
||||||
asgnStmt := bodyStmt
|
|
||||||
for index, expr := range asgnStmt.Rhs {
|
|
||||||
if index >= len(asgnStmt.Lhs) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
lhsIdent, ok := asgnStmt.Lhs[index].(*ast.Ident)
|
|
||||||
if !ok {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
callExpr, ok := expr.(*ast.CallExpr)
|
|
||||||
if !ok {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
rhsFuncIdent, ok := callExpr.Fun.(*ast.Ident)
|
|
||||||
if !ok {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if rhsFuncIdent.Name != "append" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// e.g., `x = append(x)`
|
|
||||||
// Pointless, but pre-allocation will not help.
|
|
||||||
if len(callExpr.Args) < 2 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
rhsIdent, ok := callExpr.Args[0].(*ast.Ident)
|
|
||||||
if !ok {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// e.g., `x = append(y, a)`
|
|
||||||
// This is weird (and maybe a logic error),
|
|
||||||
// but we cannot recommend pre-allocation.
|
|
||||||
if lhsIdent.Name != rhsIdent.Name {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// e.g., `x = append(x, y...)`
|
|
||||||
// we should ignore this. Pre-allocating in this case
|
|
||||||
// is confusing, and is not possible in general.
|
|
||||||
if callExpr.Ellipsis.IsValid() {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, sliceDecl := range v.sliceDeclarations {
|
|
||||||
if sliceDecl.name == lhsIdent.Name {
|
|
||||||
// This is a potential mark, we just need to make sure there are no returns/continues in the
|
|
||||||
// range loop.
|
|
||||||
// now we just need to grab whatever we're ranging over
|
|
||||||
/*sxIdent, ok := s.X.(*ast.Ident)
|
|
||||||
if !ok {
|
|
||||||
continue
|
|
||||||
}*/
|
|
||||||
|
|
||||||
v.preallocHints = append(v.preallocHints, Hint{
|
|
||||||
Pos: sliceDecl.genD.Pos(),
|
|
||||||
DeclaredSliceName: sliceDecl.name,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case *ast.IfStmt:
|
|
||||||
ifStmt := bodyStmt
|
|
||||||
if ifStmt.Body != nil {
|
|
||||||
for _, ifBodyStmt := range ifStmt.Body.List {
|
|
||||||
// TODO should probably handle embedded ifs here
|
|
||||||
switch /*ift :=*/ ifBodyStmt.(type) {
|
|
||||||
case *ast.BranchStmt, *ast.ReturnStmt:
|
|
||||||
v.returnsInsideOfLoop = true
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
default:
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hint stores the information about an occurrence of a slice that could be
|
|
||||||
// preallocated.
|
|
||||||
type Hint struct {
|
|
||||||
Pos token.Pos
|
|
||||||
DeclaredSliceName string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h Hint) String() string {
|
|
||||||
return fmt.Sprintf("%v: Consider preallocating %v", h.Pos, h.DeclaredSliceName)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h Hint) StringFromFS(f *token.FileSet) string {
|
|
||||||
file := f.File(h.Pos)
|
|
||||||
lineNumber := file.Position(h.Pos).Line
|
|
||||||
|
|
||||||
return fmt.Sprintf("%v:%v Consider preallocating %v", file.Name(), lineNumber, h.DeclaredSliceName)
|
|
||||||
}
|
|
|
@ -1,13 +0,0 @@
|
||||||
Copyright 2019 Andrew Shannon Brown
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
|
@ -1,45 +0,0 @@
|
||||||
package forbidigo
|
|
||||||
|
|
||||||
// Code generated by github.com/launchdarkly/go-options. DO NOT EDIT.
|
|
||||||
|
|
||||||
type ApplyOptionFunc func(c *config) error
|
|
||||||
|
|
||||||
func (f ApplyOptionFunc) apply(c *config) error {
|
|
||||||
return f(c)
|
|
||||||
}
|
|
||||||
|
|
||||||
func newConfig(options ...Option) (config, error) {
|
|
||||||
var c config
|
|
||||||
err := applyConfigOptions(&c, options...)
|
|
||||||
return c, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func applyConfigOptions(c *config, options ...Option) error {
|
|
||||||
c.ExcludeGodocExamples = true
|
|
||||||
for _, o := range options {
|
|
||||||
if err := o.apply(c); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type Option interface {
|
|
||||||
apply(*config) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// OptionExcludeGodocExamples don't check inside Godoc examples (see https://blog.golang.org/examples)
|
|
||||||
func OptionExcludeGodocExamples(o bool) ApplyOptionFunc {
|
|
||||||
return func(c *config) error {
|
|
||||||
c.ExcludeGodocExamples = o
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// OptionIgnorePermitDirectives don't check for `permit` directives(for example, in favor of `nolint`)
|
|
||||||
func OptionIgnorePermitDirectives(o bool) ApplyOptionFunc {
|
|
||||||
return func(c *config) error {
|
|
||||||
c.IgnorePermitDirectives = o
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,193 +0,0 @@
|
||||||
// forbidigo provides a linter for forbidding the use of specific identifiers
|
|
||||||
package forbidigo
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"go/ast"
|
|
||||||
"go/printer"
|
|
||||||
"go/token"
|
|
||||||
"log"
|
|
||||||
"regexp"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Issue interface {
|
|
||||||
Details() string
|
|
||||||
Position() token.Position
|
|
||||||
String() string
|
|
||||||
}
|
|
||||||
|
|
||||||
type UsedIssue struct {
|
|
||||||
identifier string
|
|
||||||
pattern string
|
|
||||||
position token.Position
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a UsedIssue) Details() string {
|
|
||||||
return fmt.Sprintf("use of `%s` forbidden by pattern `%s`", a.identifier, a.pattern)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a UsedIssue) Position() token.Position {
|
|
||||||
return a.position
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a UsedIssue) String() string { return toString(a) }
|
|
||||||
|
|
||||||
func toString(i Issue) string {
|
|
||||||
return fmt.Sprintf("%s at %s", i.Details(), i.Position())
|
|
||||||
}
|
|
||||||
|
|
||||||
type Linter struct {
|
|
||||||
cfg config
|
|
||||||
patterns []*regexp.Regexp
|
|
||||||
}
|
|
||||||
|
|
||||||
func DefaultPatterns() []string {
|
|
||||||
return []string{`^(fmt\.Print(|f|ln)|print|println)$`}
|
|
||||||
}
|
|
||||||
|
|
||||||
//go:generate go-options config
|
|
||||||
type config struct {
|
|
||||||
// don't check inside Godoc examples (see https://blog.golang.org/examples)
|
|
||||||
ExcludeGodocExamples bool `options:",true"`
|
|
||||||
IgnorePermitDirectives bool // don't check for `permit` directives(for example, in favor of `nolint`)
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewLinter(patterns []string, options ...Option) (*Linter, error) {
|
|
||||||
cfg, err := newConfig(options...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrapf(err, "failed to process options")
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(patterns) == 0 {
|
|
||||||
patterns = DefaultPatterns()
|
|
||||||
}
|
|
||||||
compiledPatterns := make([]*regexp.Regexp, 0, len(patterns))
|
|
||||||
for _, p := range patterns {
|
|
||||||
re, err := regexp.Compile(p)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("unable to compile pattern `%s`: %s", p, err)
|
|
||||||
}
|
|
||||||
compiledPatterns = append(compiledPatterns, re)
|
|
||||||
}
|
|
||||||
return &Linter{
|
|
||||||
cfg: cfg,
|
|
||||||
patterns: compiledPatterns,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type visitor struct {
|
|
||||||
cfg config
|
|
||||||
isTestFile bool // godoc only runs on test files
|
|
||||||
|
|
||||||
linter *Linter
|
|
||||||
comments []*ast.CommentGroup
|
|
||||||
|
|
||||||
fset *token.FileSet
|
|
||||||
issues []Issue
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *Linter) Run(fset *token.FileSet, nodes ...ast.Node) ([]Issue, error) {
|
|
||||||
var issues []Issue //nolint:prealloc // we don't know how many there will be
|
|
||||||
for _, node := range nodes {
|
|
||||||
var comments []*ast.CommentGroup
|
|
||||||
isTestFile := false
|
|
||||||
isWholeFileExample := false
|
|
||||||
if file, ok := node.(*ast.File); ok {
|
|
||||||
comments = file.Comments
|
|
||||||
fileName := fset.Position(file.Pos()).Filename
|
|
||||||
isTestFile = strings.HasSuffix(fileName, "_test.go")
|
|
||||||
|
|
||||||
// From https://blog.golang.org/examples, a "whole file example" is:
|
|
||||||
// a file that ends in _test.go and contains exactly one example function,
|
|
||||||
// no test or benchmark functions, and at least one other package-level declaration.
|
|
||||||
if l.cfg.ExcludeGodocExamples && isTestFile && len(file.Decls) > 1 {
|
|
||||||
numExamples := 0
|
|
||||||
numTestsAndBenchmarks := 0
|
|
||||||
for _, decl := range file.Decls {
|
|
||||||
funcDecl, isFuncDecl := decl.(*ast.FuncDecl)
|
|
||||||
// consider only functions, not methods
|
|
||||||
if !isFuncDecl || funcDecl.Recv != nil || funcDecl.Name == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
funcName := funcDecl.Name.Name
|
|
||||||
if strings.HasPrefix(funcName, "Test") || strings.HasPrefix(funcName, "Benchmark") {
|
|
||||||
numTestsAndBenchmarks++
|
|
||||||
break // not a whole file example
|
|
||||||
}
|
|
||||||
if strings.HasPrefix(funcName, "Example") {
|
|
||||||
numExamples++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// if this is a whole file example, skip this node
|
|
||||||
isWholeFileExample = numExamples == 1 && numTestsAndBenchmarks == 0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if isWholeFileExample {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
visitor := visitor{
|
|
||||||
cfg: l.cfg,
|
|
||||||
isTestFile: isTestFile,
|
|
||||||
linter: l,
|
|
||||||
fset: fset,
|
|
||||||
comments: comments,
|
|
||||||
}
|
|
||||||
ast.Walk(&visitor, node)
|
|
||||||
issues = append(issues, visitor.issues...)
|
|
||||||
}
|
|
||||||
return issues, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *visitor) Visit(node ast.Node) ast.Visitor {
|
|
||||||
switch node := node.(type) {
|
|
||||||
case *ast.FuncDecl:
|
|
||||||
// don't descend into godoc examples if we are ignoring them
|
|
||||||
isGodocExample := v.isTestFile && node.Recv == nil && node.Name != nil && strings.HasPrefix(node.Name.Name, "Example")
|
|
||||||
if isGodocExample && v.cfg.ExcludeGodocExamples {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return v
|
|
||||||
case *ast.SelectorExpr:
|
|
||||||
case *ast.Ident:
|
|
||||||
default:
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
for _, p := range v.linter.patterns {
|
|
||||||
if p.MatchString(v.textFor(node)) && !v.permit(node) {
|
|
||||||
v.issues = append(v.issues, UsedIssue{
|
|
||||||
identifier: v.textFor(node),
|
|
||||||
pattern: p.String(),
|
|
||||||
position: v.fset.Position(node.Pos()),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *visitor) textFor(node ast.Node) string {
|
|
||||||
buf := new(bytes.Buffer)
|
|
||||||
if err := printer.Fprint(buf, v.fset, node); err != nil {
|
|
||||||
log.Fatalf("ERROR: unable to print node at %s: %s", v.fset.Position(node.Pos()), err)
|
|
||||||
}
|
|
||||||
return buf.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *visitor) permit(node ast.Node) bool {
|
|
||||||
if v.cfg.IgnorePermitDirectives {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
nodePos := v.fset.Position(node.Pos())
|
|
||||||
var nolint = regexp.MustCompile(fmt.Sprintf(`^//\s?permit:%s\b`, regexp.QuoteMeta(v.textFor(node))))
|
|
||||||
for _, c := range v.comments {
|
|
||||||
commentPos := v.fset.Position(c.Pos())
|
|
||||||
if commentPos.Line == nodePos.Line && len(c.List) > 0 && nolint.MatchString(c.List[0].Text) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
|
@ -1,13 +0,0 @@
|
||||||
Copyright 2019 Andrew Shannon Brown
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
|
@ -1,200 +0,0 @@
|
||||||
// makezero provides a linter for appends to slices initialized with non-zero length.
|
|
||||||
package makezero
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"go/ast"
|
|
||||||
"go/printer"
|
|
||||||
"go/token"
|
|
||||||
"go/types"
|
|
||||||
"log"
|
|
||||||
"regexp"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Issue interface {
|
|
||||||
Details() string
|
|
||||||
Position() token.Position
|
|
||||||
String() string
|
|
||||||
}
|
|
||||||
|
|
||||||
type AppendIssue struct {
|
|
||||||
name string
|
|
||||||
position token.Position
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a AppendIssue) Details() string {
|
|
||||||
return fmt.Sprintf("append to slice `%s` with non-zero initialized length", a.name)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a AppendIssue) Position() token.Position {
|
|
||||||
return a.position
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a AppendIssue) String() string { return toString(a) }
|
|
||||||
|
|
||||||
type MustHaveNonZeroInitLenIssue struct {
|
|
||||||
name string
|
|
||||||
position token.Position
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i MustHaveNonZeroInitLenIssue) Details() string {
|
|
||||||
return fmt.Sprintf("slice `%s` does not have non-zero initial length", i.name)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i MustHaveNonZeroInitLenIssue) Position() token.Position {
|
|
||||||
return i.position
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i MustHaveNonZeroInitLenIssue) String() string { return toString(i) }
|
|
||||||
|
|
||||||
func toString(i Issue) string {
|
|
||||||
return fmt.Sprintf("%s at %s", i.Details(), i.Position())
|
|
||||||
}
|
|
||||||
|
|
||||||
type visitor struct {
|
|
||||||
initLenMustBeZero bool
|
|
||||||
|
|
||||||
comments []*ast.CommentGroup // comments to apply during this visit
|
|
||||||
info *types.Info
|
|
||||||
|
|
||||||
nonZeroLengthSliceDecls map[interface{}]struct{}
|
|
||||||
fset *token.FileSet
|
|
||||||
issues []Issue
|
|
||||||
}
|
|
||||||
|
|
||||||
type Linter struct {
|
|
||||||
initLenMustBeZero bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewLinter(initialLengthMustBeZero bool) *Linter {
|
|
||||||
return &Linter{
|
|
||||||
initLenMustBeZero: initialLengthMustBeZero,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l Linter) Run(fset *token.FileSet, info *types.Info, nodes ...ast.Node) ([]Issue, error) {
|
|
||||||
var issues []Issue // nolint:prealloc // don't know how many there will be
|
|
||||||
for _, node := range nodes {
|
|
||||||
var comments []*ast.CommentGroup
|
|
||||||
if file, ok := node.(*ast.File); ok {
|
|
||||||
comments = file.Comments
|
|
||||||
}
|
|
||||||
visitor := visitor{
|
|
||||||
nonZeroLengthSliceDecls: make(map[interface{}]struct{}),
|
|
||||||
initLenMustBeZero: l.initLenMustBeZero,
|
|
||||||
info: info,
|
|
||||||
fset: fset,
|
|
||||||
comments: comments,
|
|
||||||
}
|
|
||||||
ast.Walk(&visitor, node)
|
|
||||||
issues = append(issues, visitor.issues...)
|
|
||||||
}
|
|
||||||
return issues, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *visitor) Visit(node ast.Node) ast.Visitor {
|
|
||||||
switch node := node.(type) {
|
|
||||||
case *ast.CallExpr:
|
|
||||||
fun, ok := node.Fun.(*ast.Ident)
|
|
||||||
if !ok || fun.Name != "append" {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if sliceIdent, ok := node.Args[0].(*ast.Ident); ok &&
|
|
||||||
v.hasNonZeroInitialLength(sliceIdent) &&
|
|
||||||
!v.hasNoLintOnSameLine(fun) {
|
|
||||||
v.issues = append(v.issues, AppendIssue{name: sliceIdent.Name, position: v.fset.Position(fun.Pos())})
|
|
||||||
}
|
|
||||||
case *ast.AssignStmt:
|
|
||||||
for i, right := range node.Rhs {
|
|
||||||
if right, ok := right.(*ast.CallExpr); ok {
|
|
||||||
fun, ok := right.Fun.(*ast.Ident)
|
|
||||||
if !ok || fun.Name != "make" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
left := node.Lhs[i]
|
|
||||||
if len(right.Args) == 2 {
|
|
||||||
// ignore if not a slice or it has explicit zero length
|
|
||||||
if !v.isSlice(right.Args[0]) {
|
|
||||||
break
|
|
||||||
} else if lit, ok := right.Args[1].(*ast.BasicLit); ok && lit.Kind == token.INT && lit.Value == "0" {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if v.initLenMustBeZero && !v.hasNoLintOnSameLine(fun) {
|
|
||||||
v.issues = append(v.issues, MustHaveNonZeroInitLenIssue{
|
|
||||||
name: v.textFor(left),
|
|
||||||
position: v.fset.Position(node.Pos()),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
v.recordNonZeroLengthSlices(left)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *visitor) textFor(node ast.Node) string {
|
|
||||||
typeBuf := new(bytes.Buffer)
|
|
||||||
if err := printer.Fprint(typeBuf, v.fset, node); err != nil {
|
|
||||||
log.Fatalf("ERROR: unable to print type: %s", err)
|
|
||||||
}
|
|
||||||
return typeBuf.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *visitor) hasNonZeroInitialLength(ident *ast.Ident) bool {
|
|
||||||
if ident.Obj == nil {
|
|
||||||
log.Printf("WARNING: could not determine with %q at %s is a slice (missing object type)",
|
|
||||||
ident.Name, v.fset.Position(ident.Pos()).String())
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
_, exists := v.nonZeroLengthSliceDecls[ident.Obj.Decl]
|
|
||||||
return exists
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *visitor) recordNonZeroLengthSlices(node ast.Node) {
|
|
||||||
ident, ok := node.(*ast.Ident)
|
|
||||||
if !ok {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if ident.Obj == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
v.nonZeroLengthSliceDecls[ident.Obj.Decl] = struct{}{}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *visitor) isSlice(node ast.Node) bool {
|
|
||||||
// determine type if this is a user-defined type
|
|
||||||
if ident, ok := node.(*ast.Ident); ok {
|
|
||||||
obj := ident.Obj
|
|
||||||
if obj == nil {
|
|
||||||
if v.info != nil {
|
|
||||||
_, ok := v.info.ObjectOf(ident).Type().(*types.Slice)
|
|
||||||
return ok
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
spec, ok := obj.Decl.(*ast.TypeSpec)
|
|
||||||
if !ok {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
node = spec.Type
|
|
||||||
}
|
|
||||||
|
|
||||||
if node, ok := node.(*ast.ArrayType); ok {
|
|
||||||
return node.Len == nil // only slices have zero length
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *visitor) hasNoLintOnSameLine(node ast.Node) bool {
|
|
||||||
var nolint = regexp.MustCompile(`^\s*nozero\b`)
|
|
||||||
nodePos := v.fset.Position(node.Pos())
|
|
||||||
for _, c := range v.comments {
|
|
||||||
commentPos := v.fset.Position(c.Pos())
|
|
||||||
if commentPos.Line == nodePos.Line && nolint.MatchString(c.Text()) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
|
@ -1,20 +0,0 @@
|
||||||
Copyright (C) 2013 Blake Mizerany
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining
|
|
||||||
a copy of this software and associated documentation files (the
|
|
||||||
"Software"), to deal in the Software without restriction, including
|
|
||||||
without limitation the rights to use, copy, modify, merge, publish,
|
|
||||||
distribute, sublicense, and/or sell copies of the Software, and to
|
|
||||||
permit persons to whom the Software is furnished to do so, subject to
|
|
||||||
the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be
|
|
||||||
included in all copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
||||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
||||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
||||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
|
||||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
|
||||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
|
||||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,316 +0,0 @@
|
||||||
// Package quantile computes approximate quantiles over an unbounded data
|
|
||||||
// stream within low memory and CPU bounds.
|
|
||||||
//
|
|
||||||
// A small amount of accuracy is traded to achieve the above properties.
|
|
||||||
//
|
|
||||||
// Multiple streams can be merged before calling Query to generate a single set
|
|
||||||
// of results. This is meaningful when the streams represent the same type of
|
|
||||||
// data. See Merge and Samples.
|
|
||||||
//
|
|
||||||
// For more detailed information about the algorithm used, see:
|
|
||||||
//
|
|
||||||
// Effective Computation of Biased Quantiles over Data Streams
|
|
||||||
//
|
|
||||||
// http://www.cs.rutgers.edu/~muthu/bquant.pdf
|
|
||||||
package quantile
|
|
||||||
|
|
||||||
import (
|
|
||||||
"math"
|
|
||||||
"sort"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Sample holds an observed value and meta information for compression. JSON
|
|
||||||
// tags have been added for convenience.
|
|
||||||
type Sample struct {
|
|
||||||
Value float64 `json:",string"`
|
|
||||||
Width float64 `json:",string"`
|
|
||||||
Delta float64 `json:",string"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Samples represents a slice of samples. It implements sort.Interface.
|
|
||||||
type Samples []Sample
|
|
||||||
|
|
||||||
func (a Samples) Len() int { return len(a) }
|
|
||||||
func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value }
|
|
||||||
func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
|
||||||
|
|
||||||
type invariant func(s *stream, r float64) float64
|
|
||||||
|
|
||||||
// NewLowBiased returns an initialized Stream for low-biased quantiles
|
|
||||||
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
|
|
||||||
// error guarantees can still be given even for the lower ranks of the data
|
|
||||||
// distribution.
|
|
||||||
//
|
|
||||||
// The provided epsilon is a relative error, i.e. the true quantile of a value
|
|
||||||
// returned by a query is guaranteed to be within (1±Epsilon)*Quantile.
|
|
||||||
//
|
|
||||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
|
|
||||||
// properties.
|
|
||||||
func NewLowBiased(epsilon float64) *Stream {
|
|
||||||
ƒ := func(s *stream, r float64) float64 {
|
|
||||||
return 2 * epsilon * r
|
|
||||||
}
|
|
||||||
return newStream(ƒ)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewHighBiased returns an initialized Stream for high-biased quantiles
|
|
||||||
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
|
|
||||||
// error guarantees can still be given even for the higher ranks of the data
|
|
||||||
// distribution.
|
|
||||||
//
|
|
||||||
// The provided epsilon is a relative error, i.e. the true quantile of a value
|
|
||||||
// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile).
|
|
||||||
//
|
|
||||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
|
|
||||||
// properties.
|
|
||||||
func NewHighBiased(epsilon float64) *Stream {
|
|
||||||
ƒ := func(s *stream, r float64) float64 {
|
|
||||||
return 2 * epsilon * (s.n - r)
|
|
||||||
}
|
|
||||||
return newStream(ƒ)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewTargeted returns an initialized Stream concerned with a particular set of
|
|
||||||
// quantile values that are supplied a priori. Knowing these a priori reduces
|
|
||||||
// space and computation time. The targets map maps the desired quantiles to
|
|
||||||
// their absolute errors, i.e. the true quantile of a value returned by a query
|
|
||||||
// is guaranteed to be within (Quantile±Epsilon).
|
|
||||||
//
|
|
||||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
|
|
||||||
func NewTargeted(targetMap map[float64]float64) *Stream {
|
|
||||||
// Convert map to slice to avoid slow iterations on a map.
|
|
||||||
// ƒ is called on the hot path, so converting the map to a slice
|
|
||||||
// beforehand results in significant CPU savings.
|
|
||||||
targets := targetMapToSlice(targetMap)
|
|
||||||
|
|
||||||
ƒ := func(s *stream, r float64) float64 {
|
|
||||||
var m = math.MaxFloat64
|
|
||||||
var f float64
|
|
||||||
for _, t := range targets {
|
|
||||||
if t.quantile*s.n <= r {
|
|
||||||
f = (2 * t.epsilon * r) / t.quantile
|
|
||||||
} else {
|
|
||||||
f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile)
|
|
||||||
}
|
|
||||||
if f < m {
|
|
||||||
m = f
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return m
|
|
||||||
}
|
|
||||||
return newStream(ƒ)
|
|
||||||
}
|
|
||||||
|
|
||||||
type target struct {
|
|
||||||
quantile float64
|
|
||||||
epsilon float64
|
|
||||||
}
|
|
||||||
|
|
||||||
func targetMapToSlice(targetMap map[float64]float64) []target {
|
|
||||||
targets := make([]target, 0, len(targetMap))
|
|
||||||
|
|
||||||
for quantile, epsilon := range targetMap {
|
|
||||||
t := target{
|
|
||||||
quantile: quantile,
|
|
||||||
epsilon: epsilon,
|
|
||||||
}
|
|
||||||
targets = append(targets, t)
|
|
||||||
}
|
|
||||||
|
|
||||||
return targets
|
|
||||||
}
|
|
||||||
|
|
||||||
// Stream computes quantiles for a stream of float64s. It is not thread-safe by
|
|
||||||
// design. Take care when using across multiple goroutines.
|
|
||||||
type Stream struct {
|
|
||||||
*stream
|
|
||||||
b Samples
|
|
||||||
sorted bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func newStream(ƒ invariant) *Stream {
|
|
||||||
x := &stream{ƒ: ƒ}
|
|
||||||
return &Stream{x, make(Samples, 0, 500), true}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Insert inserts v into the stream.
|
|
||||||
func (s *Stream) Insert(v float64) {
|
|
||||||
s.insert(Sample{Value: v, Width: 1})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Stream) insert(sample Sample) {
|
|
||||||
s.b = append(s.b, sample)
|
|
||||||
s.sorted = false
|
|
||||||
if len(s.b) == cap(s.b) {
|
|
||||||
s.flush()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Query returns the computed qth percentiles value. If s was created with
|
|
||||||
// NewTargeted, and q is not in the set of quantiles provided a priori, Query
|
|
||||||
// will return an unspecified result.
|
|
||||||
func (s *Stream) Query(q float64) float64 {
|
|
||||||
if !s.flushed() {
|
|
||||||
// Fast path when there hasn't been enough data for a flush;
|
|
||||||
// this also yields better accuracy for small sets of data.
|
|
||||||
l := len(s.b)
|
|
||||||
if l == 0 {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
i := int(math.Ceil(float64(l) * q))
|
|
||||||
if i > 0 {
|
|
||||||
i -= 1
|
|
||||||
}
|
|
||||||
s.maybeSort()
|
|
||||||
return s.b[i].Value
|
|
||||||
}
|
|
||||||
s.flush()
|
|
||||||
return s.stream.query(q)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Merge merges samples into the underlying streams samples. This is handy when
|
|
||||||
// merging multiple streams from separate threads, database shards, etc.
|
|
||||||
//
|
|
||||||
// ATTENTION: This method is broken and does not yield correct results. The
|
|
||||||
// underlying algorithm is not capable of merging streams correctly.
|
|
||||||
func (s *Stream) Merge(samples Samples) {
|
|
||||||
sort.Sort(samples)
|
|
||||||
s.stream.merge(samples)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reset reinitializes and clears the list reusing the samples buffer memory.
|
|
||||||
func (s *Stream) Reset() {
|
|
||||||
s.stream.reset()
|
|
||||||
s.b = s.b[:0]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Samples returns stream samples held by s.
|
|
||||||
func (s *Stream) Samples() Samples {
|
|
||||||
if !s.flushed() {
|
|
||||||
return s.b
|
|
||||||
}
|
|
||||||
s.flush()
|
|
||||||
return s.stream.samples()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Count returns the total number of samples observed in the stream
|
|
||||||
// since initialization.
|
|
||||||
func (s *Stream) Count() int {
|
|
||||||
return len(s.b) + s.stream.count()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Stream) flush() {
|
|
||||||
s.maybeSort()
|
|
||||||
s.stream.merge(s.b)
|
|
||||||
s.b = s.b[:0]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Stream) maybeSort() {
|
|
||||||
if !s.sorted {
|
|
||||||
s.sorted = true
|
|
||||||
sort.Sort(s.b)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Stream) flushed() bool {
|
|
||||||
return len(s.stream.l) > 0
|
|
||||||
}
|
|
||||||
|
|
||||||
type stream struct {
|
|
||||||
n float64
|
|
||||||
l []Sample
|
|
||||||
ƒ invariant
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *stream) reset() {
|
|
||||||
s.l = s.l[:0]
|
|
||||||
s.n = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *stream) insert(v float64) {
|
|
||||||
s.merge(Samples{{v, 1, 0}})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *stream) merge(samples Samples) {
|
|
||||||
// TODO(beorn7): This tries to merge not only individual samples, but
|
|
||||||
// whole summaries. The paper doesn't mention merging summaries at
|
|
||||||
// all. Unittests show that the merging is inaccurate. Find out how to
|
|
||||||
// do merges properly.
|
|
||||||
var r float64
|
|
||||||
i := 0
|
|
||||||
for _, sample := range samples {
|
|
||||||
for ; i < len(s.l); i++ {
|
|
||||||
c := s.l[i]
|
|
||||||
if c.Value > sample.Value {
|
|
||||||
// Insert at position i.
|
|
||||||
s.l = append(s.l, Sample{})
|
|
||||||
copy(s.l[i+1:], s.l[i:])
|
|
||||||
s.l[i] = Sample{
|
|
||||||
sample.Value,
|
|
||||||
sample.Width,
|
|
||||||
math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1),
|
|
||||||
// TODO(beorn7): How to calculate delta correctly?
|
|
||||||
}
|
|
||||||
i++
|
|
||||||
goto inserted
|
|
||||||
}
|
|
||||||
r += c.Width
|
|
||||||
}
|
|
||||||
s.l = append(s.l, Sample{sample.Value, sample.Width, 0})
|
|
||||||
i++
|
|
||||||
inserted:
|
|
||||||
s.n += sample.Width
|
|
||||||
r += sample.Width
|
|
||||||
}
|
|
||||||
s.compress()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *stream) count() int {
|
|
||||||
return int(s.n)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *stream) query(q float64) float64 {
|
|
||||||
t := math.Ceil(q * s.n)
|
|
||||||
t += math.Ceil(s.ƒ(s, t) / 2)
|
|
||||||
p := s.l[0]
|
|
||||||
var r float64
|
|
||||||
for _, c := range s.l[1:] {
|
|
||||||
r += p.Width
|
|
||||||
if r+c.Width+c.Delta > t {
|
|
||||||
return p.Value
|
|
||||||
}
|
|
||||||
p = c
|
|
||||||
}
|
|
||||||
return p.Value
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *stream) compress() {
|
|
||||||
if len(s.l) < 2 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
x := s.l[len(s.l)-1]
|
|
||||||
xi := len(s.l) - 1
|
|
||||||
r := s.n - 1 - x.Width
|
|
||||||
|
|
||||||
for i := len(s.l) - 2; i >= 0; i-- {
|
|
||||||
c := s.l[i]
|
|
||||||
if c.Width+x.Width+x.Delta <= s.ƒ(s, r) {
|
|
||||||
x.Width += c.Width
|
|
||||||
s.l[xi] = x
|
|
||||||
// Remove element at i.
|
|
||||||
copy(s.l[i:], s.l[i+1:])
|
|
||||||
s.l = s.l[:len(s.l)-1]
|
|
||||||
xi -= 1
|
|
||||||
} else {
|
|
||||||
x = c
|
|
||||||
xi = i
|
|
||||||
}
|
|
||||||
r -= c.Width
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *stream) samples() Samples {
|
|
||||||
samples := make(Samples, len(s.l))
|
|
||||||
copy(samples, s.l)
|
|
||||||
return samples
|
|
||||||
}
|
|
|
@ -1,21 +0,0 @@
|
||||||
MIT License
|
|
||||||
|
|
||||||
Copyright (c) 2020 Bartłomiej Klimczak
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
|
||||||
in the Software without restriction, including without limitation the rights
|
|
||||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
copies of the Software, and to permit persons to whom the Software is
|
|
||||||
furnished to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in all
|
|
||||||
copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
SOFTWARE.
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue