diff --git a/README.md b/README.md index b883e96d1..eaf4889ee 100644 --- a/README.md +++ b/README.md @@ -261,6 +261,7 @@ The following open source libraries, frameworks, and tools are used by GoToSocia - [gruf/go-debug](https://codeberg.org/gruf/go-debug); debug build tag. [MIT License](https://spdx.org/licenses/MIT.html). - [gruf/go-errors](https://codeberg.org/gruf/go-errors); context-like error w/ value wrapping [MIT License](https://spdx.org/licenses/MIT.html). - [gruf/go-fastcopy](https://codeberg.org/gruf/go-fastcopy); performant (buffer pooled) I/O copying [MIT License](https://spdx.org/licenses/MIT.html). + - [gruf/go-ffmpreg](https://codeberg.org/gruf/go-ffmpreg); embedded ffmpeg / ffprobe WASM binaries [GPL-3.0 License](https://spdx.org/licenses/GPL-3.0-only.html). - [gruf/go-kv](https://codeberg.org/gruf/go-kv); log field formatting. [MIT License](https://spdx.org/licenses/MIT.html). - [gruf/go-list](https://codeberg.org/gruf/go-list); generic doubly linked list. [MIT License](https://spdx.org/licenses/MIT.html). - [gruf/go-mutexes](https://codeberg.org/gruf/go-mutexes); safemutex & mutex map. [MIT License](https://spdx.org/licenses/MIT.html). diff --git a/cmd/gotosocial/action/server/server.go b/cmd/gotosocial/action/server/server.go index 14db67795..828b9c875 100644 --- a/cmd/gotosocial/action/server/server.go +++ b/cmd/gotosocial/action/server/server.go @@ -24,12 +24,14 @@ import ( "net/http" "os" "os/signal" + "runtime" "strings" "syscall" "time" "github.com/KimMachineGun/automemlimit/memlimit" "github.com/gin-gonic/gin" + "github.com/ncruces/go-sqlite3" "github.com/superseriousbusiness/gotosocial/cmd/gotosocial/action" "github.com/superseriousbusiness/gotosocial/internal/api" apiutil "github.com/superseriousbusiness/gotosocial/internal/api/util" @@ -37,6 +39,7 @@ import ( "github.com/superseriousbusiness/gotosocial/internal/filter/spam" "github.com/superseriousbusiness/gotosocial/internal/filter/visibility" "github.com/superseriousbusiness/gotosocial/internal/gtserror" + "github.com/superseriousbusiness/gotosocial/internal/media/ffmpeg" "github.com/superseriousbusiness/gotosocial/internal/messages" "github.com/superseriousbusiness/gotosocial/internal/metrics" "github.com/superseriousbusiness/gotosocial/internal/middleware" @@ -66,14 +69,15 @@ import ( // Start creates and starts a gotosocial server var Start action.GTSAction = func(ctx context.Context) error { - if _, err := maxprocs.Set(maxprocs.Logger(nil)); err != nil { - log.Warnf(ctx, "could not set CPU limits from cgroup: %s", err) - } + // Set GOMAXPROCS / GOMEMLIMIT + // to match container limits. + setLimits(ctx) - if _, err := memlimit.SetGoMemLimitWithOpts(); err != nil { - if !strings.Contains(err.Error(), "cgroup mountpoint does not exist") { - log.Warnf(ctx, "could not set Memory limits from cgroup: %s", err) - } + // Compile WASM modules ahead of first use + // to prevent unexpected initial slowdowns. + log.Info(ctx, "precompiling WebAssembly") + if err := precompileWASM(ctx); err != nil { + return err } var ( @@ -429,3 +433,30 @@ var Start action.GTSAction = func(ctx context.Context) error { return nil } + +func setLimits(ctx context.Context) { + if _, err := maxprocs.Set(maxprocs.Logger(nil)); err != nil { + log.Warnf(ctx, "could not set CPU limits from cgroup: %s", err) + } + + if _, err := memlimit.SetGoMemLimitWithOpts(); err != nil { + if !strings.Contains(err.Error(), "cgroup mountpoint does not exist") { + log.Warnf(ctx, "could not set Memory limits from cgroup: %s", err) + } + } +} + +func precompileWASM(ctx context.Context) error { + // TODO: make max number instances configurable + maxprocs := runtime.GOMAXPROCS(0) + if err := sqlite3.Initialize(); err != nil { + return gtserror.Newf("error compiling sqlite3: %w", err) + } + if err := ffmpeg.InitFfmpeg(ctx, maxprocs); err != nil { + return gtserror.Newf("error compiling ffmpeg: %w", err) + } + if err := ffmpeg.InitFfprobe(ctx, maxprocs); err != nil { + return gtserror.Newf("error compiling ffprobe: %w", err) + } + return nil +} diff --git a/cmd/process-emoji/main.go b/cmd/process-emoji/main.go new file mode 100644 index 000000000..62253bbdf --- /dev/null +++ b/cmd/process-emoji/main.go @@ -0,0 +1,122 @@ +// GoToSocial +// Copyright (C) GoToSocial Authors admin@gotosocial.org +// SPDX-License-Identifier: AGPL-3.0-or-later +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package main + +import ( + "context" + "io" + "os" + "os/signal" + "syscall" + + "codeberg.org/gruf/go-storage/memory" + "github.com/superseriousbusiness/gotosocial/internal/config" + "github.com/superseriousbusiness/gotosocial/internal/db/bundb" + "github.com/superseriousbusiness/gotosocial/internal/log" + "github.com/superseriousbusiness/gotosocial/internal/media" + "github.com/superseriousbusiness/gotosocial/internal/state" + "github.com/superseriousbusiness/gotosocial/internal/storage" + "github.com/superseriousbusiness/gotosocial/internal/util" +) + +func main() { + ctx := context.Background() + ctx, cncl := signal.NotifyContext(ctx, syscall.SIGTERM, syscall.SIGINT) + defer cncl() + + if len(os.Args) != 3 { + log.Panic(ctx, "Usage: go run ./cmd/process-emoji ") + } + + var st storage.Driver + st.Storage = memory.Open(10, true) + + var state state.State + state.Storage = &st + + state.Caches.Init() + + var err error + + config.SetHost("example.com") + config.SetStorageBackend("disk") + config.SetStorageLocalBasePath("/tmp/gotosocial") + config.SetDbType("sqlite") + config.SetDbAddress(":memory:") + + state.DB, err = bundb.NewBunDBService(ctx, &state) + if err != nil { + log.Panic(ctx, err) + } + + if err := state.DB.CreateInstanceAccount(ctx); err != nil { + log.Panicf(ctx, "error creating instance account: %s", err) + } + + if err := state.DB.CreateInstanceInstance(ctx); err != nil { + log.Panicf(ctx, "error creating instance instance: %s", err) + } + + if err := state.DB.CreateInstanceApplication(ctx); err != nil { + log.Panicf(ctx, "error creating instance application: %s", err) + } + + mgr := media.NewManager(&state) + + processing, err := mgr.CreateEmoji(ctx, + "emoji", + "example.com", + func(ctx context.Context) (reader io.ReadCloser, err error) { + return os.Open(os.Args[1]) + }, + media.AdditionalEmojiInfo{ + URI: util.Ptr("example.com/emoji"), + }, + ) + if err != nil { + log.Panic(ctx, err) + } + + emoji, err := processing.Load(ctx) + if err != nil { + log.Panic(ctx, err) + } + + copyFile(ctx, &st, emoji.ImageStaticPath, os.Args[2]) +} + +func copyFile(ctx context.Context, st *storage.Driver, key string, path string) { + rc, err := st.GetStream(ctx, key) + if err != nil { + log.Panic(ctx, err) + } + defer rc.Close() + + _ = os.Remove(path) + + output, err := os.Create(path) + if err != nil { + log.Panic(ctx, err) + } + defer output.Close() + + _, err = io.Copy(output, rc) + if err != nil { + log.Panic(ctx, err) + } +} diff --git a/cmd/process-media/main.go b/cmd/process-media/main.go new file mode 100644 index 000000000..2f5a43f31 --- /dev/null +++ b/cmd/process-media/main.go @@ -0,0 +1,124 @@ +// GoToSocial +// Copyright (C) GoToSocial Authors admin@gotosocial.org +// SPDX-License-Identifier: AGPL-3.0-or-later +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package main + +import ( + "context" + "io" + "os" + "os/signal" + "syscall" + + "codeberg.org/gruf/go-storage/memory" + "github.com/superseriousbusiness/gotosocial/internal/config" + "github.com/superseriousbusiness/gotosocial/internal/db/bundb" + "github.com/superseriousbusiness/gotosocial/internal/log" + "github.com/superseriousbusiness/gotosocial/internal/media" + "github.com/superseriousbusiness/gotosocial/internal/state" + "github.com/superseriousbusiness/gotosocial/internal/storage" +) + +func main() { + ctx := context.Background() + ctx, cncl := signal.NotifyContext(ctx, syscall.SIGTERM, syscall.SIGINT) + defer cncl() + + if len(os.Args) != 4 { + log.Panic(ctx, "Usage: go run ./cmd/process-media ") + } + + var st storage.Driver + st.Storage = memory.Open(10, true) + + var state state.State + state.Storage = &st + + state.Caches.Init() + + var err error + + config.SetHost("example.com") + config.SetStorageBackend("disk") + config.SetStorageLocalBasePath("/tmp/gotosocial") + config.SetDbType("sqlite") + config.SetDbAddress(":memory:") + + state.DB, err = bundb.NewBunDBService(ctx, &state) + if err != nil { + log.Panic(ctx, err) + } + + if err := state.DB.CreateInstanceAccount(ctx); err != nil { + log.Panicf(ctx, "error creating instance account: %s", err) + } + + if err := state.DB.CreateInstanceInstance(ctx); err != nil { + log.Panicf(ctx, "error creating instance instance: %s", err) + } + + if err := state.DB.CreateInstanceApplication(ctx); err != nil { + log.Panicf(ctx, "error creating instance application: %s", err) + } + + account, err := state.DB.GetInstanceAccount(ctx, "") + if err != nil { + log.Panic(ctx, err) + } + + mgr := media.NewManager(&state) + + processing, err := mgr.CreateMedia(ctx, + account.ID, + func(ctx context.Context) (reader io.ReadCloser, err error) { + return os.Open(os.Args[1]) + }, + media.AdditionalMediaInfo{}, + ) + if err != nil { + log.Panic(ctx, err) + } + + media, err := processing.Load(ctx) + if err != nil { + log.Panic(ctx, err) + } + + copyFile(ctx, &st, media.File.Path, os.Args[2]) + copyFile(ctx, &st, media.Thumbnail.Path, os.Args[3]) +} + +func copyFile(ctx context.Context, st *storage.Driver, key string, path string) { + rc, err := st.GetStream(ctx, key) + if err != nil { + log.Panic(ctx, err) + } + defer rc.Close() + + _ = os.Remove(path) + + output, err := os.Create(path) + if err != nil { + log.Panic(ctx, err) + } + defer output.Close() + + _, err = io.Copy(output, rc) + if err != nil { + log.Panic(ctx, err) + } +} diff --git a/docs/configuration/media.md b/docs/configuration/media.md index 656c93315..e07463320 100644 --- a/docs/configuration/media.md +++ b/docs/configuration/media.md @@ -7,25 +7,24 @@ ##### MEDIA CONFIG ##### ######################## -# Config pertaining to media uploads (videos, image, image descriptions, emoji). +# Config pertaining to media uploads (media, image descriptions, emoji). -# Size. Maximum allowed image upload size in bytes. -# -# Raising this limit may cause other servers to not fetch media -# attached to a post. -# -# Examples: [2097152, 10485760, 10MB, 10MiB] -# Default: 10MiB (10485760 bytes) -media-image-max-size: 10MiB - -# Size. Maximum allowed video upload size in bytes. +# Size. Max size in bytes of media uploads via API. # # Raising this limit may cause other servers to not fetch media # attached to a post. # # Examples: [2097152, 10485760, 40MB, 40MiB] # Default: 40MiB (41943040 bytes) -media-video-max-size: 40MiB +media-local-max-size: 40MiB + +# Size. Max size in bytes of media to download from other instances. +# +# Lowering this limit may cause your instance not to fetch post media. +# +# Examples: [2097152, 10485760, 40MB, 40MiB] +# Default: 40MiB (41943040 bytes) +media-remote-max-size: 40MiB # Int. Minimum amount of characters required as an image or video description. # Examples: [500, 1000, 1500] diff --git a/example/config.yaml b/example/config.yaml index 6ef9f7a6c..75d0587cf 100644 --- a/example/config.yaml +++ b/example/config.yaml @@ -444,25 +444,24 @@ accounts-custom-css-length: 10000 ##### MEDIA CONFIG ##### ######################## -# Config pertaining to media uploads (videos, image, image descriptions, emoji). +# Config pertaining to media uploads (media, image descriptions, emoji). -# Size. Maximum allowed image upload size in bytes. -# -# Raising this limit may cause other servers to not fetch media -# attached to a post. -# -# Examples: [2097152, 10485760, 10MB, 10MiB] -# Default: 10MiB (10485760 bytes) -media-image-max-size: 10MiB - -# Size. Maximum allowed video upload size in bytes. +# Size. Max size in bytes of media uploads via API. # # Raising this limit may cause other servers to not fetch media # attached to a post. # # Examples: [2097152, 10485760, 40MB, 40MiB] # Default: 40MiB (41943040 bytes) -media-video-max-size: 40MiB +media-local-max-size: 40MiB + +# Size. Max size in bytes of media to download from other instances. +# +# Lowering this limit may cause your instance not to fetch post media. +# +# Examples: [2097152, 10485760, 40MB, 40MiB] +# Default: 40MiB (41943040 bytes) +media-remote-max-size: 40MiB # Int. Minimum amount of characters required as an image or video description. # Examples: [500, 1000, 1500] diff --git a/go.mod b/go.mod index c20cce74d..32ca66e37 100644 --- a/go.mod +++ b/go.mod @@ -12,20 +12,20 @@ require ( codeberg.org/gruf/go-debug v1.3.0 codeberg.org/gruf/go-errors/v2 v2.3.2 codeberg.org/gruf/go-fastcopy v1.1.2 - codeberg.org/gruf/go-iotools v0.0.0-20230811115124-5d4223615a7f + codeberg.org/gruf/go-ffmpreg v0.2.2 + codeberg.org/gruf/go-iotools v0.0.0-20240710125620-934ae9c654cf codeberg.org/gruf/go-kv v1.6.4 codeberg.org/gruf/go-list v0.0.0-20240425093752-494db03d641f codeberg.org/gruf/go-logger/v2 v2.2.1 codeberg.org/gruf/go-mempool v0.0.0-20240507125005-cef10d64a760 + codeberg.org/gruf/go-mimetypes v1.2.0 codeberg.org/gruf/go-mutexes v1.5.1 codeberg.org/gruf/go-runners v1.6.2 codeberg.org/gruf/go-sched v1.2.3 - codeberg.org/gruf/go-storage v0.1.1 + codeberg.org/gruf/go-storage v0.1.2 codeberg.org/gruf/go-structr v0.8.7 - codeberg.org/superseriousbusiness/exif-terminator v0.7.0 github.com/DmitriyVTitov/size v1.5.0 github.com/KimMachineGun/automemlimit v0.6.1 - github.com/abema/go-mp4 v1.2.0 github.com/buckket/go-blurhash v1.1.0 github.com/coreos/go-oidc/v3 v3.10.0 github.com/disintegration/imaging v1.6.2 @@ -39,7 +39,6 @@ require ( github.com/google/uuid v1.6.0 github.com/gorilla/feeds v1.2.0 github.com/gorilla/websocket v1.5.2 - github.com/h2non/filetype v1.1.3 github.com/jackc/pgx/v5 v5.6.0 github.com/microcosm-cc/bluemonday v1.0.27 github.com/miekg/dns v1.1.61 @@ -56,6 +55,7 @@ require ( github.com/superseriousbusiness/oauth2/v4 v4.3.2-SSB.0.20230227143000-f4900831d6c8 github.com/tdewolff/minify/v2 v2.20.34 github.com/technologize/otel-go-contrib v1.1.1 + github.com/tetratelabs/wazero v1.7.3 github.com/tomnomnom/linkheader v0.0.0-20180905144013-02ca5825eb80 github.com/ulule/limiter/v3 v3.11.2 github.com/uptrace/bun v1.2.1 @@ -74,7 +74,6 @@ require ( go.opentelemetry.io/otel/trace v1.26.0 go.uber.org/automaxprocs v1.5.3 golang.org/x/crypto v0.25.0 - golang.org/x/image v0.18.0 golang.org/x/net v0.27.0 golang.org/x/oauth2 v0.21.0 golang.org/x/text v0.16.0 @@ -107,17 +106,11 @@ require ( github.com/coreos/go-systemd/v22 v22.3.2 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/docker/go-units v0.5.0 // indirect - github.com/dsoprea/go-exif/v3 v3.0.0-20210625224831-a6301f85c82b // indirect - github.com/dsoprea/go-iptc v0.0.0-20200610044640-bc9ca208b413 // indirect - github.com/dsoprea/go-logging v0.0.0-20200710184922-b02d349568dd // indirect - github.com/dsoprea/go-photoshop-info-format v0.0.0-20200610045659-121dd752914d // indirect - github.com/dsoprea/go-utility/v2 v2.0.0-20200717064901-2fccff4aa15e // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/gabriel-vasile/mimetype v1.4.3 // indirect github.com/gin-contrib/sse v0.1.0 // indirect - github.com/go-errors/errors v1.4.1 // indirect github.com/go-fed/httpsig v1.1.0 // indirect github.com/go-ini/ini v1.67.0 // indirect github.com/go-jose/go-jose/v4 v4.0.1 // indirect @@ -137,11 +130,9 @@ require ( github.com/go-playground/locales v0.14.1 // indirect github.com/go-playground/universal-translator v0.18.1 // indirect github.com/go-playground/validator/v10 v10.20.0 // indirect - github.com/go-xmlfmt/xmlfmt v0.0.0-20211206191508-7fd73a941850 // indirect github.com/goccy/go-json v0.10.3 // indirect github.com/godbus/dbus/v5 v5.0.4 // indirect github.com/golang-jwt/jwt v3.2.2+incompatible // indirect - github.com/golang/geo v0.0.0-20210211234256-740aa86cb551 // indirect github.com/gorilla/context v1.1.2 // indirect github.com/gorilla/css v1.0.1 // indirect github.com/gorilla/handlers v1.5.2 // indirect @@ -196,10 +187,7 @@ require ( github.com/spf13/cast v1.6.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/subosito/gotenv v1.6.0 // indirect - github.com/superseriousbusiness/go-jpeg-image-structure/v2 v2.0.0-20220321154430-d89a106fdabe // indirect - github.com/superseriousbusiness/go-png-image-structure/v2 v2.0.1-SSB // indirect github.com/tdewolff/parse/v2 v2.7.15 // indirect - github.com/tetratelabs/wazero v1.7.3 // indirect github.com/tmthrgd/go-hex v0.0.0-20190904060850-447a3041c3bc // indirect github.com/toqueteos/webbrowser v1.2.0 // indirect github.com/twitchyliquid64/golang-asm v0.15.1 // indirect @@ -213,6 +201,7 @@ require ( go.uber.org/multierr v1.11.0 // indirect golang.org/x/arch v0.8.0 // indirect golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 // indirect + golang.org/x/image v0.18.0 // indirect golang.org/x/mod v0.18.0 // indirect golang.org/x/sync v0.7.0 // indirect golang.org/x/sys v0.22.0 // indirect diff --git a/go.sum b/go.sum index 42087dc35..7bc8a8906 100644 --- a/go.sum +++ b/go.sum @@ -52,8 +52,10 @@ codeberg.org/gruf/go-fastcopy v1.1.2 h1:YwmYXPsyOcRBxKEE2+w1bGAZfclHVaPijFsOVOcn codeberg.org/gruf/go-fastcopy v1.1.2/go.mod h1:GDDYR0Cnb3U/AIfGM3983V/L+GN+vuwVMvrmVABo21s= codeberg.org/gruf/go-fastpath/v2 v2.0.0 h1:iAS9GZahFhyWEH0KLhFEJR+txx1ZhMXxYzu2q5Qo9c0= codeberg.org/gruf/go-fastpath/v2 v2.0.0/go.mod h1:3pPqu5nZjpbRrOqvLyAK7puS1OfEtQvjd6342Cwz56Q= -codeberg.org/gruf/go-iotools v0.0.0-20230811115124-5d4223615a7f h1:Kazm/PInN2m1SannRMRe3DQGQc9V2EuetsQ9KAi+pBQ= -codeberg.org/gruf/go-iotools v0.0.0-20230811115124-5d4223615a7f/go.mod h1:B8uq4yHtIcKXhBZT9C/SYisz25lldLHMVpwZPz4ADLQ= +codeberg.org/gruf/go-ffmpreg v0.2.2 h1:K4I/7+BuzPLOVjL3hzTFdL8Z9wC0oRCK3xMKNVE86TE= +codeberg.org/gruf/go-ffmpreg v0.2.2/go.mod h1:oPMfBkOK7xmR/teT/dKW6SeMFpRos9ceR/OuUrxBfcQ= +codeberg.org/gruf/go-iotools v0.0.0-20240710125620-934ae9c654cf h1:84s/ii8N6lYlskZjHH+DG6jyia8w2mXMZlRwFn8Gs3A= +codeberg.org/gruf/go-iotools v0.0.0-20240710125620-934ae9c654cf/go.mod h1:zZAICsp5rY7+hxnws2V0ePrWxE0Z2Z/KXcN3p/RQCfk= codeberg.org/gruf/go-kv v1.6.4 h1:3NZiW8HVdBM3kpOiLb7XfRiihnzZWMAixdCznguhILk= codeberg.org/gruf/go-kv v1.6.4/go.mod h1:O/YkSvKiS9XsRolM3rqCd9YJmND7dAXu9z+PrlYO4bc= codeberg.org/gruf/go-list v0.0.0-20240425093752-494db03d641f h1:Ss6Z+vygy+jOGhj96d/GwsYYDd22QmIcH74zM7/nQkw= @@ -68,18 +70,18 @@ codeberg.org/gruf/go-maps v1.0.3 h1:VDwhnnaVNUIy5O93CvkcE2IZXnMB1+IJjzfop9V12es= codeberg.org/gruf/go-maps v1.0.3/go.mod h1:D5LNDxlC9rsDuVQVM6JObaVGAdHB6g2dTdOdkh1aXWA= codeberg.org/gruf/go-mempool v0.0.0-20240507125005-cef10d64a760 h1:m2/UCRXhjDwAg4vyji6iKCpomKw6P4PmBOUi5DvAMH4= codeberg.org/gruf/go-mempool v0.0.0-20240507125005-cef10d64a760/go.mod h1:E3RcaCFNq4zXpvaJb8lfpPqdUAmSkP5F1VmMiEUYTEk= +codeberg.org/gruf/go-mimetypes v1.2.0 h1:3rZGXY/SkNYbamiddWXs2gETXIBkGIeWYnbWpp2OEbc= +codeberg.org/gruf/go-mimetypes v1.2.0/go.mod h1:YiUWRj/nAdJQc+UFRvcsL6xXZsbc6b6Ic739ycEO8Yg= codeberg.org/gruf/go-mutexes v1.5.1 h1:xICU0WXhWr6wf+Iror4eE3xT+xnXNPrO6o77D/G6QuY= codeberg.org/gruf/go-mutexes v1.5.1/go.mod h1:rPEqQ/y6CmGITaZ3GPTMQVsoZAOzbsAHyIaLsJcOqVE= codeberg.org/gruf/go-runners v1.6.2 h1:oQef9niahfHu/wch14xNxlRMP8i+ABXH1Cb9PzZ4oYo= codeberg.org/gruf/go-runners v1.6.2/go.mod h1:Tq5PrZ/m/rBXbLZz0u5if+yP3nG5Sf6S8O/GnyEePeQ= codeberg.org/gruf/go-sched v1.2.3 h1:H5ViDxxzOBR3uIyGBCf0eH8b1L8wMybOXcdtUUTXZHk= codeberg.org/gruf/go-sched v1.2.3/go.mod h1:vT9uB6KWFIIwnG9vcPY2a0alYNoqdL1mSzRM8I+PK7A= -codeberg.org/gruf/go-storage v0.1.1 h1:CSX1PMMg/7vqqK8aCFtq94xCrOB3xhj7eWIvzILdLpY= -codeberg.org/gruf/go-storage v0.1.1/go.mod h1:145IWMUOc6YpIiZIiCIEwkkNZZPiSbwMnZxRjSc5q6c= +codeberg.org/gruf/go-storage v0.1.2 h1:dIOVOKq1CJpRmuhbB8Zok3mmo8V6VV/nX5GLIm6hywA= +codeberg.org/gruf/go-storage v0.1.2/go.mod h1:LRDpFHqRJi0f+35c3ltBH2e/pGfwY5dGlNlgCJ/R1DA= codeberg.org/gruf/go-structr v0.8.7 h1:agYCI6tSXU4JHVYPwZk3Og5rrBePNVv5iPWsDu7ZJIw= codeberg.org/gruf/go-structr v0.8.7/go.mod h1:O0FTNgzUnUKwWey4dEW99QD8rPezKPi5sxCVxYOJ1Fg= -codeberg.org/superseriousbusiness/exif-terminator v0.7.0 h1:Y6VApSXhKqExG0H2hZ2JelRK4xmWdjDQjn13CpEfzko= -codeberg.org/superseriousbusiness/exif-terminator v0.7.0/go.mod h1:gCWKduudUWFzsnixoMzu0FYVdxHWG+AbXnZ50DqxsUE= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= @@ -94,8 +96,6 @@ github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0 github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA= github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM= -github.com/abema/go-mp4 v1.2.0 h1:gi4X8xg/m179N/J15Fn5ugywN9vtI6PLk6iLldHGLAk= -github.com/abema/go-mp4 v1.2.0/go.mod h1:vPl9t5ZK7K0x68jh12/+ECWBCXoWuIDtNgPtU2f04ws= github.com/ajg/form v1.5.1 h1:t9c7v8JUKu/XxOGBU0yjNpaMloxGEJhUkqFRq0ibGeU= github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY= github.com/andybalholm/brotli v1.0.0/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y= @@ -138,7 +138,6 @@ github.com/coreos/go-oidc/v3 v3.10.0/go.mod h1:5j11xcw0D3+SGxn6Z/WFADsgcWVMyNAlS github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -148,22 +147,6 @@ github.com/disintegration/imaging v1.6.2 h1:w1LecBlG2Lnp8B3jk5zSuNqd7b4DXhcjwek1 github.com/disintegration/imaging v1.6.2/go.mod h1:44/5580QXChDfwIclfc/PCwrr44amcmDAg8hxG0Ewe4= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/dsoprea/go-exif/v2 v2.0.0-20200321225314-640175a69fe4/go.mod h1:Lm2lMM2zx8p4a34ZemkaUV95AnMl4ZvLbCUbwOvLC2E= -github.com/dsoprea/go-exif/v3 v3.0.0-20200717053412-08f1b6708903/go.mod h1:0nsO1ce0mh5czxGeLo4+OCZ/C6Eo6ZlMWsz7rH/Gxv8= -github.com/dsoprea/go-exif/v3 v3.0.0-20210428042052-dca55bf8ca15/go.mod h1:cg5SNYKHMmzxsr9X6ZeLh/nfBRHHp5PngtEPcujONtk= -github.com/dsoprea/go-exif/v3 v3.0.0-20210625224831-a6301f85c82b h1:NgNuLvW/gAFKU30ULWW0gtkCt56JfB7FrZ2zyo0wT8I= -github.com/dsoprea/go-exif/v3 v3.0.0-20210625224831-a6301f85c82b/go.mod h1:cg5SNYKHMmzxsr9X6ZeLh/nfBRHHp5PngtEPcujONtk= -github.com/dsoprea/go-iptc v0.0.0-20200610044640-bc9ca208b413 h1:YDRiMEm32T60Kpm35YzOK9ZHgjsS1Qrid+XskNcsdp8= -github.com/dsoprea/go-iptc v0.0.0-20200610044640-bc9ca208b413/go.mod h1:kYIdx9N9NaOyD7U6D+YtExN7QhRm+5kq7//yOsRXQtM= -github.com/dsoprea/go-logging v0.0.0-20190624164917-c4f10aab7696/go.mod h1:Nm/x2ZUNRW6Fe5C3LxdY1PyZY5wmDv/s5dkPJ/VB3iA= -github.com/dsoprea/go-logging v0.0.0-20200517223158-a10564966e9d/go.mod h1:7I+3Pe2o/YSU88W0hWlm9S22W7XI1JFNJ86U0zPKMf8= -github.com/dsoprea/go-logging v0.0.0-20200710184922-b02d349568dd h1:l+vLbuxptsC6VQyQsfD7NnEC8BZuFpz45PgY+pH8YTg= -github.com/dsoprea/go-logging v0.0.0-20200710184922-b02d349568dd/go.mod h1:7I+3Pe2o/YSU88W0hWlm9S22W7XI1JFNJ86U0zPKMf8= -github.com/dsoprea/go-photoshop-info-format v0.0.0-20200610045659-121dd752914d h1:dg6UMHa50VI01WuPWXPbNJpO8QSyvIF5T5n2IZiqX3A= -github.com/dsoprea/go-photoshop-info-format v0.0.0-20200610045659-121dd752914d/go.mod h1:pqKB+ijp27cEcrHxhXVgUUMlSDRuGJJp1E+20Lj5H0E= -github.com/dsoprea/go-utility v0.0.0-20200711062821-fab8125e9bdf/go.mod h1:95+K3z2L0mqsVYd6yveIv1lmtT3tcQQ3dVakPySffW8= -github.com/dsoprea/go-utility/v2 v2.0.0-20200717064901-2fccff4aa15e h1:IxIbA7VbCNrwumIYjDoMOdf4KOSkMC6NJE4s8oRbE7E= -github.com/dsoprea/go-utility/v2 v2.0.0-20200717064901-2fccff4aa15e/go.mod h1:uAzdkPTub5Y9yQwXe8W4m2XuP0tK4a9Q/dantD0+uaU= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -197,11 +180,6 @@ github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= github.com/gin-gonic/gin v1.10.0 h1:nTuyha1TYqgedzytsKYqna+DfLos46nTv2ygFy86HFU= github.com/gin-gonic/gin v1.10.0/go.mod h1:4PMNQiOhvDRa013RKVbsiNwoyezlm2rm0uX/T7kzp5Y= -github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= -github.com/go-errors/errors v1.0.2/go.mod h1:psDX2osz5VnTOnFWbDeWwS7yejl+uV3FEWEp4lssFEs= -github.com/go-errors/errors v1.1.1/go.mod h1:psDX2osz5VnTOnFWbDeWwS7yejl+uV3FEWEp4lssFEs= -github.com/go-errors/errors v1.4.1 h1:IvVlgbzSsaUNudsw5dcXSzF3EWyXTi5XrAdngnuhRyg= -github.com/go-errors/errors v1.4.1/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-fed/httpsig v1.1.0 h1:9M+hb0jkEICD8/cAiNqEB66R87tTINszBRTjwjQzWcI= github.com/go-fed/httpsig v1.1.0/go.mod h1:RCMrTZvN1bJYtofsG4rd5NaO5obxQ5xBkdiS7xsT7bM= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= @@ -254,8 +232,6 @@ github.com/go-swagger/go-swagger v0.31.0 h1:H8eOYQnY2u7vNKWDNykv2xJP3pBhRG/R+SOC github.com/go-swagger/go-swagger v0.31.0/go.mod h1:WSigRRWEig8zV6t6Sm8Y+EmUjlzA/HoaZJ5edupq7po= github.com/go-test/deep v1.0.8 h1:TDsG77qcSprGbC6vTN8OuXp5g+J+b5Pcguhf7Zt61VM= github.com/go-test/deep v1.0.8/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= -github.com/go-xmlfmt/xmlfmt v0.0.0-20211206191508-7fd73a941850 h1:PSPmmucxGiFBtbQcttHTUc4LQ3P09AW+ldO2qspyKdY= -github.com/go-xmlfmt/xmlfmt v0.0.0-20211206191508-7fd73a941850/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA= github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= github.com/godbus/dbus/v5 v5.0.4 h1:9349emZab16e7zQvpmsbtjc18ykshndd8y2PG3sgJbA= @@ -263,10 +239,6 @@ github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5x github.com/golang-jwt/jwt v3.2.1+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY= github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= -github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI= -github.com/golang/geo v0.0.0-20200319012246-673a6f80352d/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI= -github.com/golang/geo v0.0.0-20210211234256-740aa86cb551 h1:gtexQ/VGyN+VVFRXSFiguSNcXmS6rkKT+X7FdIrTtfo= -github.com/golang/geo v0.0.0-20210211234256-740aa86cb551/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -324,7 +296,6 @@ github.com/google/pprof v0.0.0-20240409012703-83162a5b38cd h1:gbpYu9NMq8jhDVbvlG github.com/google/pprof v0.0.0-20240409012703-83162a5b38cd/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= @@ -348,8 +319,6 @@ github.com/gorilla/websocket v1.5.2 h1:qoW6V1GT3aZxybsbC6oLnailWnB+qTMVwMreOso9X github.com/gorilla/websocket v1.5.2/go.mod h1:0n9H61RBAcf5/38py2MCYbxzPIY9rOkpvvMT24Rqs30= github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1 h1:/c3QmbOGMGTOumP2iT/rCwB7b0QDGLKzqOmktBjT+Is= github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1/go.mod h1:5SN9VR2LTsRFsrEC6FHgRbTWrTHu6tqPeKxEQv15giM= -github.com/h2non/filetype v1.1.3 h1:FKkx9QbD7HR/zjK1Ia5XiBsq9zdLi5Kf3zGyFTAFkGg= -github.com/h2non/filetype v1.1.3/go.mod h1:319b3zT68BvV+WRj7cwy856M2ehB3HqNOt6sy1HndBY= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= @@ -376,7 +345,6 @@ github.com/jackc/pgx/v5 v5.6.0 h1:SWJzexBzPL5jb0GEsrPMLIsi/3jOo7RHlzTjcAeDrPY= github.com/jackc/pgx/v5 v5.6.0/go.mod h1:DNZ/vlrUnhWCoFGxHAG8U2ljioxukquj7utPDgtQdTw= github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk= github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= -github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v1.5.0 h1:1jKYvbxEjfUl0fmqTCOfonvskHHXMjBySTLW4y9LFvc= github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= @@ -404,7 +372,6 @@ github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORN github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= @@ -459,8 +426,6 @@ github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7J github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/opencontainers/runtime-spec v1.0.2 h1:UfAcuLBJB9Coz72x1hgl8O5RVzTdNiaglX6v2DM6FI0= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/orcaman/writerseeker v0.0.0-20200621085525-1d3f536ff85e h1:s2RNOM/IGdY0Y6qfTeUKhDawdHDpK9RGBdx80qN4Ttw= -github.com/orcaman/writerseeker v0.0.0-20200621085525-1d3f536ff85e/go.mod h1:nBdnFKj15wFbf94Rwfq4m30eAcyY9V/IyKAGQFtqkW0= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= @@ -540,13 +505,8 @@ github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsT github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= -github.com/sunfish-shogi/bufseekio v0.0.0-20210207115823-a4185644b365/go.mod h1:dEzdXgvImkQ3WLI+0KQpmEx8T/C/ma9KeS3AfmU899I= github.com/superseriousbusiness/activity v1.7.0-gts h1:DsCvzksTWptn7JUDTFIIiJ7xkh0A22VZs5KI3q67p+4= github.com/superseriousbusiness/activity v1.7.0-gts/go.mod h1:AZw0Xb4Oju8rmaJCZ21gc5CPg47MmNgyac+Hx5jo8VM= -github.com/superseriousbusiness/go-jpeg-image-structure/v2 v2.0.0-20220321154430-d89a106fdabe h1:ksl2oCx/Qo8sNDc3Grb8WGKBM9nkvhCm25uvlT86azE= -github.com/superseriousbusiness/go-jpeg-image-structure/v2 v2.0.0-20220321154430-d89a106fdabe/go.mod h1:gH4P6gN1V+wmIw5o97KGaa1RgXB/tVpC2UNzijhg3E4= -github.com/superseriousbusiness/go-png-image-structure/v2 v2.0.1-SSB h1:8psprYSK1KdOSH7yQ4PbJq0YYaGQY+gzdW/B0ExDb/8= -github.com/superseriousbusiness/go-png-image-structure/v2 v2.0.1-SSB/go.mod h1:ymKGfy9kg4dIdraeZRAdobMS/flzLk3VcRPLpEWOAXg= github.com/superseriousbusiness/httpsig v1.2.0-SSB h1:BinBGKbf2LSuVT5+MuH0XynHN9f0XVshx2CTDtkaWj0= github.com/superseriousbusiness/httpsig v1.2.0-SSB/go.mod h1:+rxfATjFaDoDIVaJOTSP0gj6UrbicaYPEptvCLC9F28= github.com/superseriousbusiness/oauth2/v4 v4.3.2-SSB.0.20230227143000-f4900831d6c8 h1:nTIhuP157oOFcscuoK1kCme1xTeGIzztSw70lX9NrDQ= @@ -739,7 +699,6 @@ golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200320220750-118fecf932d8/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= @@ -972,13 +931,9 @@ gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/mcuadros/go-syslog.v2 v2.3.0 h1:kcsiS+WsTKyIEPABJBJtoG0KkOS6yzvJ+/eZlhD79kk= gopkg.in/mcuadros/go-syslog.v2 v2.3.0/go.mod h1:l5LPIyOOyIdQquNg+oU6Z3524YwrcqEm0aKH+5zpt2U= -gopkg.in/src-d/go-billy.v4 v4.3.2 h1:0SQA1pRztfTFx2miS8sA97XvooFeNOmvUenF4o0EcVg= -gopkg.in/src-d/go-billy.v4 v4.3.2/go.mod h1:nDjArDMp+XMs1aFAESLRjfGSgfvoYN0hDfzEk0GjC98= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= diff --git a/internal/api/client/admin/emojicreate_test.go b/internal/api/client/admin/emojicreate_test.go index be39ebdf5..a687fb0af 100644 --- a/internal/api/client/admin/emojicreate_test.go +++ b/internal/api/client/admin/emojicreate_test.go @@ -90,10 +90,10 @@ func (suite *EmojiCreateTestSuite) TestEmojiCreateNewCategory() { suite.Equal(apiEmoji.StaticURL, dbEmoji.ImageStaticURL) suite.NotEmpty(dbEmoji.ImagePath) suite.NotEmpty(dbEmoji.ImageStaticPath) - suite.Equal("image/png", dbEmoji.ImageContentType) + suite.Equal("image/apng", dbEmoji.ImageContentType) suite.Equal("image/png", dbEmoji.ImageStaticContentType) suite.Equal(36702, dbEmoji.ImageFileSize) - suite.Equal(10413, dbEmoji.ImageStaticFileSize) + suite.Equal(6092, dbEmoji.ImageStaticFileSize) suite.False(*dbEmoji.Disabled) suite.NotEmpty(dbEmoji.URI) suite.True(*dbEmoji.VisibleInPicker) @@ -163,10 +163,10 @@ func (suite *EmojiCreateTestSuite) TestEmojiCreateExistingCategory() { suite.Equal(apiEmoji.StaticURL, dbEmoji.ImageStaticURL) suite.NotEmpty(dbEmoji.ImagePath) suite.NotEmpty(dbEmoji.ImageStaticPath) - suite.Equal("image/png", dbEmoji.ImageContentType) + suite.Equal("image/apng", dbEmoji.ImageContentType) suite.Equal("image/png", dbEmoji.ImageStaticContentType) suite.Equal(36702, dbEmoji.ImageFileSize) - suite.Equal(10413, dbEmoji.ImageStaticFileSize) + suite.Equal(6092, dbEmoji.ImageStaticFileSize) suite.False(*dbEmoji.Disabled) suite.NotEmpty(dbEmoji.URI) suite.True(*dbEmoji.VisibleInPicker) @@ -236,10 +236,10 @@ func (suite *EmojiCreateTestSuite) TestEmojiCreateNoCategory() { suite.Equal(apiEmoji.StaticURL, dbEmoji.ImageStaticURL) suite.NotEmpty(dbEmoji.ImagePath) suite.NotEmpty(dbEmoji.ImageStaticPath) - suite.Equal("image/png", dbEmoji.ImageContentType) + suite.Equal("image/apng", dbEmoji.ImageContentType) suite.Equal("image/png", dbEmoji.ImageStaticContentType) suite.Equal(36702, dbEmoji.ImageFileSize) - suite.Equal(10413, dbEmoji.ImageStaticFileSize) + suite.Equal(6092, dbEmoji.ImageStaticFileSize) suite.False(*dbEmoji.Disabled) suite.NotEmpty(dbEmoji.URI) suite.True(*dbEmoji.VisibleInPicker) diff --git a/internal/api/client/admin/emojidelete_test.go b/internal/api/client/admin/emojidelete_test.go index 10cf3fe8d..88e929b55 100644 --- a/internal/api/client/admin/emojidelete_test.go +++ b/internal/api/client/admin/emojidelete_test.go @@ -62,7 +62,7 @@ func (suite *EmojiDeleteTestSuite) TestEmojiDelete1() { "id": "01F8MH9H8E4VG3KDYJR9EGPXCQ", "disabled": false, "updated_at": "2021-09-20T10:40:37.000Z", - "total_file_size": 47115, + "total_file_size": 42794, "content_type": "image/png", "uri": "http://localhost:8080/emoji/01F8MH9H8E4VG3KDYJR9EGPXCQ" }`, dst.String()) diff --git a/internal/api/client/admin/emojiget_test.go b/internal/api/client/admin/emojiget_test.go index b8bad2536..d6b2924ab 100644 --- a/internal/api/client/admin/emojiget_test.go +++ b/internal/api/client/admin/emojiget_test.go @@ -60,7 +60,7 @@ func (suite *EmojiGetTestSuite) TestEmojiGet1() { "id": "01F8MH9H8E4VG3KDYJR9EGPXCQ", "disabled": false, "updated_at": "2021-09-20T10:40:37.000Z", - "total_file_size": 47115, + "total_file_size": 42794, "content_type": "image/png", "uri": "http://localhost:8080/emoji/01F8MH9H8E4VG3KDYJR9EGPXCQ" }`, dst.String()) @@ -92,7 +92,7 @@ func (suite *EmojiGetTestSuite) TestEmojiGet2() { "disabled": false, "domain": "fossbros-anonymous.io", "updated_at": "2020-03-18T12:12:00.000Z", - "total_file_size": 21697, + "total_file_size": 19854, "content_type": "image/png", "uri": "http://fossbros-anonymous.io/emoji/01GD5KP5CQEE1R3X43Y1EHS2CW" }`, dst.String()) diff --git a/internal/api/client/admin/emojiupdate_test.go b/internal/api/client/admin/emojiupdate_test.go index 11beaeaa9..073e3cec0 100644 --- a/internal/api/client/admin/emojiupdate_test.go +++ b/internal/api/client/admin/emojiupdate_test.go @@ -100,19 +100,19 @@ func (suite *EmojiUpdateTestSuite) TestEmojiUpdateNewCategory() { suite.Equal("image/png", dbEmoji.ImageContentType) suite.Equal("image/png", dbEmoji.ImageStaticContentType) suite.Equal(36702, dbEmoji.ImageFileSize) - suite.Equal(10413, dbEmoji.ImageStaticFileSize) + suite.Equal(6092, dbEmoji.ImageStaticFileSize) suite.False(*dbEmoji.Disabled) suite.NotEmpty(dbEmoji.URI) suite.True(*dbEmoji.VisibleInPicker) suite.NotEmpty(dbEmoji.CategoryID) // emoji should be in storage - emojiBytes, err := suite.storage.Get(ctx, dbEmoji.ImagePath) + entry, err := suite.storage.Storage.Stat(ctx, dbEmoji.ImagePath) suite.NoError(err) - suite.Len(emojiBytes, dbEmoji.ImageFileSize) - emojiStaticBytes, err := suite.storage.Get(ctx, dbEmoji.ImageStaticPath) + suite.Equal(int64(dbEmoji.ImageFileSize), entry.Size) + entry, err = suite.storage.Storage.Stat(ctx, dbEmoji.ImageStaticPath) suite.NoError(err) - suite.Len(emojiStaticBytes, dbEmoji.ImageStaticFileSize) + suite.Equal(int64(dbEmoji.ImageStaticFileSize), entry.Size) } func (suite *EmojiUpdateTestSuite) TestEmojiUpdateSwitchCategory() { @@ -177,19 +177,19 @@ func (suite *EmojiUpdateTestSuite) TestEmojiUpdateSwitchCategory() { suite.Equal("image/png", dbEmoji.ImageContentType) suite.Equal("image/png", dbEmoji.ImageStaticContentType) suite.Equal(36702, dbEmoji.ImageFileSize) - suite.Equal(10413, dbEmoji.ImageStaticFileSize) + suite.Equal(6092, dbEmoji.ImageStaticFileSize) suite.False(*dbEmoji.Disabled) suite.NotEmpty(dbEmoji.URI) suite.True(*dbEmoji.VisibleInPicker) suite.NotEmpty(dbEmoji.CategoryID) // emoji should be in storage - emojiBytes, err := suite.storage.Get(ctx, dbEmoji.ImagePath) + entry, err := suite.storage.Storage.Stat(ctx, dbEmoji.ImagePath) suite.NoError(err) - suite.Len(emojiBytes, dbEmoji.ImageFileSize) - emojiStaticBytes, err := suite.storage.Get(ctx, dbEmoji.ImageStaticPath) + suite.Equal(int64(dbEmoji.ImageFileSize), entry.Size) + entry, err = suite.storage.Storage.Stat(ctx, dbEmoji.ImageStaticPath) suite.NoError(err) - suite.Len(emojiStaticBytes, dbEmoji.ImageStaticFileSize) + suite.Equal(int64(dbEmoji.ImageStaticFileSize), entry.Size) } func (suite *EmojiUpdateTestSuite) TestEmojiUpdateCopyRemoteToLocal() { @@ -255,19 +255,19 @@ func (suite *EmojiUpdateTestSuite) TestEmojiUpdateCopyRemoteToLocal() { suite.Equal("image/png", dbEmoji.ImageContentType) suite.Equal("image/png", dbEmoji.ImageStaticContentType) suite.Equal(10889, dbEmoji.ImageFileSize) - suite.Equal(10672, dbEmoji.ImageStaticFileSize) + suite.Equal(8965, dbEmoji.ImageStaticFileSize) suite.False(*dbEmoji.Disabled) suite.NotEmpty(dbEmoji.URI) suite.True(*dbEmoji.VisibleInPicker) suite.NotEmpty(dbEmoji.CategoryID) // emoji should be in storage - emojiBytes, err := suite.storage.Get(ctx, dbEmoji.ImagePath) + entry, err := suite.storage.Storage.Stat(ctx, dbEmoji.ImagePath) suite.NoError(err) - suite.Len(emojiBytes, dbEmoji.ImageFileSize) - emojiStaticBytes, err := suite.storage.Get(ctx, dbEmoji.ImageStaticPath) + suite.Equal(int64(dbEmoji.ImageFileSize), entry.Size) + entry, err = suite.storage.Storage.Stat(ctx, dbEmoji.ImageStaticPath) suite.NoError(err) - suite.Len(emojiStaticBytes, dbEmoji.ImageStaticFileSize) + suite.Equal(int64(dbEmoji.ImageStaticFileSize), entry.Size) } func (suite *EmojiUpdateTestSuite) TestEmojiUpdateDisableEmoji() { diff --git a/internal/api/client/instance/instancepatch.go b/internal/api/client/instance/instancepatch.go index afddc5a50..64263caf6 100644 --- a/internal/api/client/instance/instancepatch.go +++ b/internal/api/client/instance/instancepatch.go @@ -182,13 +182,6 @@ func validateInstanceUpdate(form *apimodel.InstanceSettingsUpdateRequest) error return errors.New("empty form submitted") } - if form.Avatar != nil { - maxImageSize := config.GetMediaImageMaxSize() - if size := form.Avatar.Size; size > int64(maxImageSize) { - return fmt.Errorf("file size limit exceeded: limit is %d bytes but desired instance avatar was %d bytes", maxImageSize, size) - } - } - if form.AvatarDescription != nil { maxDescriptionChars := config.GetMediaDescriptionMaxChars() if length := len([]rune(*form.AvatarDescription)); length > maxDescriptionChars { diff --git a/internal/api/client/instance/instancepatch_test.go b/internal/api/client/instance/instancepatch_test.go index 936d6efd9..605b056b9 100644 --- a/internal/api/client/instance/instancepatch_test.go +++ b/internal/api/client/instance/instancepatch_test.go @@ -109,7 +109,7 @@ func (suite *InstancePatchTestSuite) TestInstancePatch1() { "image/webp", "video/mp4" ], - "image_size_limit": 10485760, + "image_size_limit": 41943040, "image_matrix_limit": 16777216, "video_size_limit": 41943040, "video_frame_rate_limit": 60, @@ -230,7 +230,7 @@ func (suite *InstancePatchTestSuite) TestInstancePatch2() { "image/webp", "video/mp4" ], - "image_size_limit": 10485760, + "image_size_limit": 41943040, "image_matrix_limit": 16777216, "video_size_limit": 41943040, "video_frame_rate_limit": 60, @@ -351,7 +351,7 @@ func (suite *InstancePatchTestSuite) TestInstancePatch3() { "image/webp", "video/mp4" ], - "image_size_limit": 10485760, + "image_size_limit": 41943040, "image_matrix_limit": 16777216, "video_size_limit": 41943040, "video_frame_rate_limit": 60, @@ -523,7 +523,7 @@ func (suite *InstancePatchTestSuite) TestInstancePatch6() { "image/webp", "video/mp4" ], - "image_size_limit": 10485760, + "image_size_limit": 41943040, "image_matrix_limit": 16777216, "video_size_limit": 41943040, "video_frame_rate_limit": 60, @@ -666,7 +666,7 @@ func (suite *InstancePatchTestSuite) TestInstancePatch8() { "image/webp", "video/mp4" ], - "image_size_limit": 10485760, + "image_size_limit": 41943040, "image_matrix_limit": 16777216, "video_size_limit": 41943040, "video_frame_rate_limit": 60, @@ -754,7 +754,7 @@ func (suite *InstancePatchTestSuite) TestInstancePatch8() { "url": "http://localhost:8080/fileserver/01AY6P665V14JJR0AFVRT7311Y/attachment/original/`+instanceAccount.AvatarMediaAttachment.ID+`.gif",`+` "thumbnail_type": "image/gif", "thumbnail_description": "A bouncing little green peglin.", - "blurhash": "LG9t;qRS4YtO.4WDRlt5IXoxtPj[" + "blurhash": "LtJ[eKxu_4xt9Yj]M{WBt8WBM{WB" }`, string(instanceV2ThumbnailJson)) // double extra special bonus: now update the image description without changing the image @@ -824,7 +824,7 @@ func (suite *InstancePatchTestSuite) TestInstancePatch9() { "image/webp", "video/mp4" ], - "image_size_limit": 10485760, + "image_size_limit": 41943040, "image_matrix_limit": 16777216, "video_size_limit": 41943040, "video_frame_rate_limit": 60, diff --git a/internal/api/client/media/mediacreate.go b/internal/api/client/media/mediacreate.go index eef945d21..efe567f13 100644 --- a/internal/api/client/media/mediacreate.go +++ b/internal/api/client/media/mediacreate.go @@ -153,22 +153,9 @@ func validateCreateMedia(form *apimodel.AttachmentRequest) error { return errors.New("no attachment given") } - maxVideoSize := config.GetMediaVideoMaxSize() - maxImageSize := config.GetMediaImageMaxSize() minDescriptionChars := config.GetMediaDescriptionMinChars() maxDescriptionChars := config.GetMediaDescriptionMaxChars() - // a very superficial check to see if no size limits are exceeded - // we still don't actually know which media types we're dealing with but the other handlers will go into more detail there - maxSize := maxVideoSize - if maxImageSize > maxSize { - maxSize = maxImageSize - } - - if form.File.Size > int64(maxSize) { - return fmt.Errorf("file size limit exceeded: limit is %d bytes but attachment was %d bytes", maxSize, form.File.Size) - } - if length := len([]rune(form.Description)); length > maxDescriptionChars { return fmt.Errorf("image description length must be between %d and %d characters (inclusive), but provided image description was %d chars", minDescriptionChars, maxDescriptionChars, length) } diff --git a/internal/api/client/media/mediacreate_test.go b/internal/api/client/media/mediacreate_test.go index c2871aff0..2f6813a7c 100644 --- a/internal/api/client/media/mediacreate_test.go +++ b/internal/api/client/media/mediacreate_test.go @@ -206,7 +206,7 @@ func (suite *MediaCreateTestSuite) TestMediaCreateSuccessful() { Y: 0.5, }, }, *attachmentReply.Meta) - suite.Equal("LiBzRk#6V[WF_NvzV@WY_3rqV@a$", *attachmentReply.Blurhash) + suite.Equal("LjCGfG#6RkRn_NvzRjWF?urqV@a$", *attachmentReply.Blurhash) suite.NotEmpty(attachmentReply.ID) suite.NotEmpty(attachmentReply.URL) suite.NotEmpty(attachmentReply.PreviewURL) @@ -291,7 +291,7 @@ func (suite *MediaCreateTestSuite) TestMediaCreateSuccessfulV2() { Y: 0.5, }, }, *attachmentReply.Meta) - suite.Equal("LiBzRk#6V[WF_NvzV@WY_3rqV@a$", *attachmentReply.Blurhash) + suite.Equal("LjCGfG#6RkRn_NvzRjWF?urqV@a$", *attachmentReply.Blurhash) suite.NotEmpty(attachmentReply.ID) suite.Nil(attachmentReply.URL) suite.NotEmpty(attachmentReply.PreviewURL) diff --git a/internal/cleaner/media_test.go b/internal/cleaner/media_test.go index acb5416f7..46c6edcd4 100644 --- a/internal/cleaner/media_test.go +++ b/internal/cleaner/media_test.go @@ -373,13 +373,13 @@ func (suite *MediaTestSuite) TestUncacheAndRecache() { suite.True(storage.IsNotFound(err)) // now recache the image.... - data := func(_ context.Context) (io.ReadCloser, int64, error) { + data := func(_ context.Context) (io.ReadCloser, error) { // load bytes from a test image b, err := os.ReadFile("../../testrig/media/thoughtsofdog-original.jpg") if err != nil { panic(err) } - return io.NopCloser(bytes.NewBuffer(b)), int64(len(b)), nil + return io.NopCloser(bytes.NewBuffer(b)), nil } for _, original := range []*gtsmodel.MediaAttachment{ diff --git a/internal/config/config.go b/internal/config/config.go index 015213184..bffa5b455 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -92,13 +92,13 @@ type Configuration struct { AccountsAllowCustomCSS bool `name:"accounts-allow-custom-css" usage:"Allow accounts to enable custom CSS for their profile pages and statuses."` AccountsCustomCSSLength int `name:"accounts-custom-css-length" usage:"Maximum permitted length (characters) of custom CSS for accounts."` - MediaImageMaxSize bytesize.Size `name:"media-image-max-size" usage:"Max size of accepted images in bytes"` - MediaVideoMaxSize bytesize.Size `name:"media-video-max-size" usage:"Max size of accepted videos in bytes"` MediaDescriptionMinChars int `name:"media-description-min-chars" usage:"Min required chars for an image description"` MediaDescriptionMaxChars int `name:"media-description-max-chars" usage:"Max permitted chars for an image description"` MediaRemoteCacheDays int `name:"media-remote-cache-days" usage:"Number of days to locally cache media from remote instances. If set to 0, remote media will be kept indefinitely."` MediaEmojiLocalMaxSize bytesize.Size `name:"media-emoji-local-max-size" usage:"Max size in bytes of emojis uploaded to this instance via the admin API."` MediaEmojiRemoteMaxSize bytesize.Size `name:"media-emoji-remote-max-size" usage:"Max size in bytes of emojis to download from other instances."` + MediaLocalMaxSize bytesize.Size `name:"media-local-max-size" usage:"Max size in bytes of media uploaded to this instance via API"` + MediaRemoteMaxSize bytesize.Size `name:"media-remote-max-size" usage:"Max size in bytes of media to download from other instances"` MediaCleanupFrom string `name:"media-cleanup-from" usage:"Time of day from which to start running media cleanup/prune jobs. Should be in the format 'hh:mm:ss', eg., '15:04:05'."` MediaCleanupEvery time.Duration `name:"media-cleanup-every" usage:"Period to elapse between cleanups, starting from media-cleanup-at."` diff --git a/internal/config/defaults.go b/internal/config/defaults.go index ba068761e..267e7b4bc 100644 --- a/internal/config/defaults.go +++ b/internal/config/defaults.go @@ -71,11 +71,11 @@ var Defaults = Configuration{ AccountsAllowCustomCSS: false, AccountsCustomCSSLength: 10000, - MediaImageMaxSize: 10 * bytesize.MiB, - MediaVideoMaxSize: 40 * bytesize.MiB, MediaDescriptionMinChars: 0, MediaDescriptionMaxChars: 1500, MediaRemoteCacheDays: 7, + MediaLocalMaxSize: 40 * bytesize.MiB, + MediaRemoteMaxSize: 40 * bytesize.MiB, MediaEmojiLocalMaxSize: 50 * bytesize.KiB, MediaEmojiRemoteMaxSize: 100 * bytesize.KiB, MediaCleanupFrom: "00:00", // Midnight. diff --git a/internal/config/flags.go b/internal/config/flags.go index 042621afe..f96709e70 100644 --- a/internal/config/flags.go +++ b/internal/config/flags.go @@ -97,11 +97,11 @@ func (s *ConfigState) AddServerFlags(cmd *cobra.Command) { cmd.Flags().Bool(AccountsAllowCustomCSSFlag(), cfg.AccountsAllowCustomCSS, fieldtag("AccountsAllowCustomCSS", "usage")) // Media - cmd.Flags().Uint64(MediaImageMaxSizeFlag(), uint64(cfg.MediaImageMaxSize), fieldtag("MediaImageMaxSize", "usage")) - cmd.Flags().Uint64(MediaVideoMaxSizeFlag(), uint64(cfg.MediaVideoMaxSize), fieldtag("MediaVideoMaxSize", "usage")) cmd.Flags().Int(MediaDescriptionMinCharsFlag(), cfg.MediaDescriptionMinChars, fieldtag("MediaDescriptionMinChars", "usage")) cmd.Flags().Int(MediaDescriptionMaxCharsFlag(), cfg.MediaDescriptionMaxChars, fieldtag("MediaDescriptionMaxChars", "usage")) cmd.Flags().Int(MediaRemoteCacheDaysFlag(), cfg.MediaRemoteCacheDays, fieldtag("MediaRemoteCacheDays", "usage")) + cmd.Flags().Uint64(MediaLocalMaxSizeFlag(), uint64(cfg.MediaLocalMaxSize), fieldtag("MediaLocalMaxSize", "usage")) + cmd.Flags().Uint64(MediaRemoteMaxSizeFlag(), uint64(cfg.MediaRemoteMaxSize), fieldtag("MediaRemoteMaxSize", "usage")) cmd.Flags().Uint64(MediaEmojiLocalMaxSizeFlag(), uint64(cfg.MediaEmojiLocalMaxSize), fieldtag("MediaEmojiLocalMaxSize", "usage")) cmd.Flags().Uint64(MediaEmojiRemoteMaxSizeFlag(), uint64(cfg.MediaEmojiRemoteMaxSize), fieldtag("MediaEmojiRemoteMaxSize", "usage")) cmd.Flags().String(MediaCleanupFromFlag(), cfg.MediaCleanupFrom, fieldtag("MediaCleanupFrom", "usage")) diff --git a/internal/config/helpers.gen.go b/internal/config/helpers.gen.go index 8dab7ac6a..8c27da439 100644 --- a/internal/config/helpers.gen.go +++ b/internal/config/helpers.gen.go @@ -1075,56 +1075,6 @@ func GetAccountsCustomCSSLength() int { return global.GetAccountsCustomCSSLength // SetAccountsCustomCSSLength safely sets the value for global configuration 'AccountsCustomCSSLength' field func SetAccountsCustomCSSLength(v int) { global.SetAccountsCustomCSSLength(v) } -// GetMediaImageMaxSize safely fetches the Configuration value for state's 'MediaImageMaxSize' field -func (st *ConfigState) GetMediaImageMaxSize() (v bytesize.Size) { - st.mutex.RLock() - v = st.config.MediaImageMaxSize - st.mutex.RUnlock() - return -} - -// SetMediaImageMaxSize safely sets the Configuration value for state's 'MediaImageMaxSize' field -func (st *ConfigState) SetMediaImageMaxSize(v bytesize.Size) { - st.mutex.Lock() - defer st.mutex.Unlock() - st.config.MediaImageMaxSize = v - st.reloadToViper() -} - -// MediaImageMaxSizeFlag returns the flag name for the 'MediaImageMaxSize' field -func MediaImageMaxSizeFlag() string { return "media-image-max-size" } - -// GetMediaImageMaxSize safely fetches the value for global configuration 'MediaImageMaxSize' field -func GetMediaImageMaxSize() bytesize.Size { return global.GetMediaImageMaxSize() } - -// SetMediaImageMaxSize safely sets the value for global configuration 'MediaImageMaxSize' field -func SetMediaImageMaxSize(v bytesize.Size) { global.SetMediaImageMaxSize(v) } - -// GetMediaVideoMaxSize safely fetches the Configuration value for state's 'MediaVideoMaxSize' field -func (st *ConfigState) GetMediaVideoMaxSize() (v bytesize.Size) { - st.mutex.RLock() - v = st.config.MediaVideoMaxSize - st.mutex.RUnlock() - return -} - -// SetMediaVideoMaxSize safely sets the Configuration value for state's 'MediaVideoMaxSize' field -func (st *ConfigState) SetMediaVideoMaxSize(v bytesize.Size) { - st.mutex.Lock() - defer st.mutex.Unlock() - st.config.MediaVideoMaxSize = v - st.reloadToViper() -} - -// MediaVideoMaxSizeFlag returns the flag name for the 'MediaVideoMaxSize' field -func MediaVideoMaxSizeFlag() string { return "media-video-max-size" } - -// GetMediaVideoMaxSize safely fetches the value for global configuration 'MediaVideoMaxSize' field -func GetMediaVideoMaxSize() bytesize.Size { return global.GetMediaVideoMaxSize() } - -// SetMediaVideoMaxSize safely sets the value for global configuration 'MediaVideoMaxSize' field -func SetMediaVideoMaxSize(v bytesize.Size) { global.SetMediaVideoMaxSize(v) } - // GetMediaDescriptionMinChars safely fetches the Configuration value for state's 'MediaDescriptionMinChars' field func (st *ConfigState) GetMediaDescriptionMinChars() (v int) { st.mutex.RLock() @@ -1250,6 +1200,56 @@ func GetMediaEmojiRemoteMaxSize() bytesize.Size { return global.GetMediaEmojiRem // SetMediaEmojiRemoteMaxSize safely sets the value for global configuration 'MediaEmojiRemoteMaxSize' field func SetMediaEmojiRemoteMaxSize(v bytesize.Size) { global.SetMediaEmojiRemoteMaxSize(v) } +// GetMediaLocalMaxSize safely fetches the Configuration value for state's 'MediaLocalMaxSize' field +func (st *ConfigState) GetMediaLocalMaxSize() (v bytesize.Size) { + st.mutex.RLock() + v = st.config.MediaLocalMaxSize + st.mutex.RUnlock() + return +} + +// SetMediaLocalMaxSize safely sets the Configuration value for state's 'MediaLocalMaxSize' field +func (st *ConfigState) SetMediaLocalMaxSize(v bytesize.Size) { + st.mutex.Lock() + defer st.mutex.Unlock() + st.config.MediaLocalMaxSize = v + st.reloadToViper() +} + +// MediaLocalMaxSizeFlag returns the flag name for the 'MediaLocalMaxSize' field +func MediaLocalMaxSizeFlag() string { return "media-local-max-size" } + +// GetMediaLocalMaxSize safely fetches the value for global configuration 'MediaLocalMaxSize' field +func GetMediaLocalMaxSize() bytesize.Size { return global.GetMediaLocalMaxSize() } + +// SetMediaLocalMaxSize safely sets the value for global configuration 'MediaLocalMaxSize' field +func SetMediaLocalMaxSize(v bytesize.Size) { global.SetMediaLocalMaxSize(v) } + +// GetMediaRemoteMaxSize safely fetches the Configuration value for state's 'MediaRemoteMaxSize' field +func (st *ConfigState) GetMediaRemoteMaxSize() (v bytesize.Size) { + st.mutex.RLock() + v = st.config.MediaRemoteMaxSize + st.mutex.RUnlock() + return +} + +// SetMediaRemoteMaxSize safely sets the Configuration value for state's 'MediaRemoteMaxSize' field +func (st *ConfigState) SetMediaRemoteMaxSize(v bytesize.Size) { + st.mutex.Lock() + defer st.mutex.Unlock() + st.config.MediaRemoteMaxSize = v + st.reloadToViper() +} + +// MediaRemoteMaxSizeFlag returns the flag name for the 'MediaRemoteMaxSize' field +func MediaRemoteMaxSizeFlag() string { return "media-remote-max-size" } + +// GetMediaRemoteMaxSize safely fetches the value for global configuration 'MediaRemoteMaxSize' field +func GetMediaRemoteMaxSize() bytesize.Size { return global.GetMediaRemoteMaxSize() } + +// SetMediaRemoteMaxSize safely sets the value for global configuration 'MediaRemoteMaxSize' field +func SetMediaRemoteMaxSize(v bytesize.Size) { global.SetMediaRemoteMaxSize(v) } + // GetMediaCleanupFrom safely fetches the Configuration value for state's 'MediaCleanupFrom' field func (st *ConfigState) GetMediaCleanupFrom() (v string) { st.mutex.RLock() diff --git a/internal/federation/dereferencing/emoji.go b/internal/federation/dereferencing/emoji.go index 16f5acf25..806a3f5ee 100644 --- a/internal/federation/dereferencing/emoji.go +++ b/internal/federation/dereferencing/emoji.go @@ -23,6 +23,7 @@ import ( "io" "net/url" + "github.com/superseriousbusiness/gotosocial/internal/config" "github.com/superseriousbusiness/gotosocial/internal/db" "github.com/superseriousbusiness/gotosocial/internal/gtserror" "github.com/superseriousbusiness/gotosocial/internal/gtsmodel" @@ -90,9 +91,12 @@ func (d *Dereferencer) GetEmoji( return nil, err } + // Get maximum supported remote emoji size. + maxsz := config.GetMediaEmojiRemoteMaxSize() + // Prepare data function to dereference remote emoji media. - data := func(context.Context) (io.ReadCloser, int64, error) { - return tsport.DereferenceMedia(ctx, url) + data := func(context.Context) (io.ReadCloser, error) { + return tsport.DereferenceMedia(ctx, url, int64(maxsz)) } // Pass along for safe processing. @@ -171,9 +175,12 @@ func (d *Dereferencer) RefreshEmoji( return nil, err } + // Get maximum supported remote emoji size. + maxsz := config.GetMediaEmojiRemoteMaxSize() + // Prepare data function to dereference remote emoji media. - data := func(context.Context) (io.ReadCloser, int64, error) { - return tsport.DereferenceMedia(ctx, url) + data := func(context.Context) (io.ReadCloser, error) { + return tsport.DereferenceMedia(ctx, url, int64(maxsz)) } // Pass along for safe processing. diff --git a/internal/federation/dereferencing/emoji_test.go b/internal/federation/dereferencing/emoji_test.go index fdb815762..12965207c 100644 --- a/internal/federation/dereferencing/emoji_test.go +++ b/internal/federation/dereferencing/emoji_test.go @@ -75,7 +75,7 @@ func (suite *EmojiTestSuite) TestDereferenceEmojiBlocking() { suite.Equal("image/gif", emoji.ImageContentType) suite.Equal("image/png", emoji.ImageStaticContentType) suite.Equal(37796, emoji.ImageFileSize) - suite.Equal(7951, emoji.ImageStaticFileSize) + suite.Equal(9824, emoji.ImageStaticFileSize) suite.WithinDuration(time.Now(), emoji.UpdatedAt, 10*time.Second) suite.False(*emoji.Disabled) suite.Equal(emojiURI, emoji.URI) diff --git a/internal/federation/dereferencing/media.go b/internal/federation/dereferencing/media.go index 874107b13..956866e94 100644 --- a/internal/federation/dereferencing/media.go +++ b/internal/federation/dereferencing/media.go @@ -22,6 +22,7 @@ import ( "io" "net/url" + "github.com/superseriousbusiness/gotosocial/internal/config" "github.com/superseriousbusiness/gotosocial/internal/gtserror" "github.com/superseriousbusiness/gotosocial/internal/gtsmodel" "github.com/superseriousbusiness/gotosocial/internal/media" @@ -69,12 +70,15 @@ func (d *Dereferencer) GetMedia( return nil, gtserror.Newf("failed getting transport for %s: %w", requestUser, err) } + // Get maximum supported remote media size. + maxsz := config.GetMediaRemoteMaxSize() + // Start processing remote attachment at URL. processing, err := d.mediaManager.CreateMedia( ctx, accountID, - func(ctx context.Context) (io.ReadCloser, int64, error) { - return tsport.DereferenceMedia(ctx, url) + func(ctx context.Context) (io.ReadCloser, error) { + return tsport.DereferenceMedia(ctx, url, int64(maxsz)) }, info, ) @@ -163,11 +167,14 @@ func (d *Dereferencer) RefreshMedia( return nil, gtserror.Newf("failed getting transport for %s: %w", requestUser, err) } + // Get maximum supported remote media size. + maxsz := config.GetMediaRemoteMaxSize() + // Start processing remote attachment recache. processing := d.mediaManager.RecacheMedia( media, - func(ctx context.Context) (io.ReadCloser, int64, error) { - return tsport.DereferenceMedia(ctx, url) + func(ctx context.Context) (io.ReadCloser, error) { + return tsport.DereferenceMedia(ctx, url, int64(maxsz)) }, ) diff --git a/internal/httpclient/client.go b/internal/httpclient/client.go index ba8760091..b78dbc2d9 100644 --- a/internal/httpclient/client.go +++ b/internal/httpclient/client.go @@ -31,7 +31,6 @@ import ( "strings" "time" - "codeberg.org/gruf/go-bytesize" "codeberg.org/gruf/go-cache/v3" errorsv2 "codeberg.org/gruf/go-errors/v2" "codeberg.org/gruf/go-iotools" @@ -89,9 +88,6 @@ type Config struct { // WriteBufferSize: see http.Transport{}.WriteBufferSize. WriteBufferSize int - // MaxBodySize determines the maximum fetchable body size. - MaxBodySize int64 - // Timeout: see http.Client{}.Timeout. Timeout time.Duration @@ -111,7 +107,6 @@ type Config struct { type Client struct { client http.Client badHosts cache.TTLCache[string, struct{}] - bodyMax int64 retries uint } @@ -137,11 +132,6 @@ func New(cfg Config) *Client { cfg.MaxIdleConns = cfg.MaxOpenConnsPerHost * 10 } - if cfg.MaxBodySize <= 0 { - // By default set this to a reasonable 40MB. - cfg.MaxBodySize = int64(40 * bytesize.MiB) - } - // Protect the dialer // with IP range sanitizer. d.Control = (&Sanitizer{ @@ -151,7 +141,6 @@ func New(cfg Config) *Client { // Prepare client fields. c.client.Timeout = cfg.Timeout - c.bodyMax = cfg.MaxBodySize // Prepare transport TLS config. tlsClientConfig := &tls.Config{ @@ -377,31 +366,15 @@ func (c *Client) do(r *Request) (rsp *http.Response, retry bool, err error) { rbody := (io.Reader)(rsp.Body) cbody := (io.Closer)(rsp.Body) - var limit int64 - - if limit = rsp.ContentLength; limit < 0 { - // If unknown, use max as reader limit. - limit = c.bodyMax - } - - // Don't trust them, limit body reads. - rbody = io.LimitReader(rbody, limit) - - // Wrap closer to ensure entire body drained BEFORE close. + // Wrap closer to ensure body drained BEFORE close. cbody = iotools.CloserAfterCallback(cbody, func() { _, _ = discard.ReadFrom(rbody) }) - // Wrap body with limit. - rsp.Body = &struct { - io.Reader - io.Closer - }{rbody, cbody} - - // Check response body not too large. - if rsp.ContentLength > c.bodyMax { - _ = rsp.Body.Close() - return nil, false, ErrBodyTooLarge + // Set the wrapped response body. + rsp.Body = &iotools.ReadCloserType{ + Reader: rbody, + Closer: cbody, } return rsp, true, nil diff --git a/internal/httpclient/client_test.go b/internal/httpclient/client_test.go index f0ec01ec3..2e36a6e90 100644 --- a/internal/httpclient/client_test.go +++ b/internal/httpclient/client_test.go @@ -48,44 +48,19 @@ var bodies = []string{ "body with\r\nnewlines", } -func TestHTTPClientSmallBody(t *testing.T) { +func TestHTTPClientBody(t *testing.T) { for _, body := range bodies { - _TestHTTPClientWithBody(t, []byte(body), int(^uint16(0))) + testHTTPClientWithBody(t, []byte(body)) } } -func TestHTTPClientExactBody(t *testing.T) { - for _, body := range bodies { - _TestHTTPClientWithBody(t, []byte(body), len(body)) - } -} - -func TestHTTPClientLargeBody(t *testing.T) { - for _, body := range bodies { - _TestHTTPClientWithBody(t, []byte(body), len(body)-1) - } -} - -func _TestHTTPClientWithBody(t *testing.T, body []byte, max int) { +func testHTTPClientWithBody(t *testing.T, body []byte) { var ( handler http.HandlerFunc - - expect []byte - - expectErr error ) - // If this is a larger body, reslice and - // set error so we know what to expect - expect = body - if max < len(body) { - expect = expect[:max] - expectErr = httpclient.ErrBodyTooLarge - } - // Create new HTTP client with maximum body size client := httpclient.New(httpclient.Config{ - MaxBodySize: int64(max), DisableCompression: true, AllowRanges: []netip.Prefix{ // Loopback (used by server) @@ -110,10 +85,8 @@ func _TestHTTPClientWithBody(t *testing.T, body []byte, max int) { // Perform the test request rsp, err := client.Do(req) - if !errors.Is(err, expectErr) { + if err != nil { t.Fatalf("error performing client request: %v", err) - } else if err != nil { - return // expected error } defer rsp.Body.Close() @@ -124,8 +97,8 @@ func _TestHTTPClientWithBody(t *testing.T, body []byte, max int) { } // Check actual response body matches expected - if !bytes.Equal(expect, check) { - t.Errorf("response body did not match expected: expect=%q actual=%q", string(expect), string(check)) + if !bytes.Equal(body, check) { + t.Errorf("response body did not match expected: expect=%q actual=%q", string(body), string(check)) } } diff --git a/internal/media/ffmpeg.go b/internal/media/ffmpeg.go new file mode 100644 index 000000000..eb94849f0 --- /dev/null +++ b/internal/media/ffmpeg.go @@ -0,0 +1,313 @@ +// GoToSocial +// Copyright (C) GoToSocial Authors admin@gotosocial.org +// SPDX-License-Identifier: AGPL-3.0-or-later +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package media + +import ( + "context" + "encoding/json" + "errors" + "os" + "path" + "strconv" + "strings" + + "codeberg.org/gruf/go-byteutil" + + "codeberg.org/gruf/go-ffmpreg/wasm" + _ffmpeg "github.com/superseriousbusiness/gotosocial/internal/media/ffmpeg" + + "github.com/superseriousbusiness/gotosocial/internal/gtserror" + "github.com/superseriousbusiness/gotosocial/internal/gtsmodel" + "github.com/tetratelabs/wazero" +) + +// ffmpegClearMetadata generates a copy (in-place) of input media with all metadata cleared. +func ffmpegClearMetadata(ctx context.Context, filepath string, ext string) error { + // Get directory from filepath. + dirpath := path.Dir(filepath) + + // Generate output file path with ext. + outpath := filepath + "_cleaned." + ext + + // Clear metadata with ffmpeg. + if err := ffmpeg(ctx, dirpath, + "-loglevel", "error", + "-i", filepath, + "-map_metadata", "-1", + "-codec", "copy", + "-y", + outpath, + ); err != nil { + return err + } + + // Move the new output file path to original location. + if err := os.Rename(outpath, filepath); err != nil { + return gtserror.Newf("error renaming %s: %w", outpath, err) + } + + return nil +} + +// ffmpegGenerateThumb generates a thumbnail jpeg from input media of any type, useful for any media. +func ffmpegGenerateThumb(ctx context.Context, filepath string, width, height int) (string, error) { + // Get directory from filepath. + dirpath := path.Dir(filepath) + + // Generate output frame file path. + outpath := filepath + "_thumb.jpg" + + // Generate thumb with ffmpeg. + if err := ffmpeg(ctx, dirpath, + "-loglevel", "error", + "-i", filepath, + "-filter:v", "thumbnail=n=10", + "-filter:v", "scale="+strconv.Itoa(width)+":"+strconv.Itoa(height), + "-qscale:v", "12", // ~ 70% quality + "-frames:v", "1", + "-y", + outpath, + ); err != nil { + return "", err + } + + return outpath, nil +} + +// ffmpegGenerateStatic generates a static png from input image of any type, useful for emoji. +func ffmpegGenerateStatic(ctx context.Context, filepath string) (string, error) { + // Get directory from filepath. + dirpath := path.Dir(filepath) + + // Generate output static file path. + outpath := filepath + "_static.png" + + // Generate static with ffmpeg. + if err := ffmpeg(ctx, dirpath, + "-loglevel", "error", + "-i", filepath, + "-codec:v", "png", // specifically NOT 'apng' + "-frames:v", "1", // in case animated, only take 1 frame + "-y", + outpath, + ); err != nil { + return "", err + } + + return outpath, nil +} + +// ffmpeg calls `ffmpeg [args...]` (WASM) with directory path mounted in runtime. +func ffmpeg(ctx context.Context, dirpath string, args ...string) error { + var stderr byteutil.Buffer + rc, err := _ffmpeg.Ffmpeg(ctx, wasm.Args{ + Stderr: &stderr, + Args: args, + Config: func(modcfg wazero.ModuleConfig) wazero.ModuleConfig { + fscfg := wazero.NewFSConfig() + fscfg = fscfg.WithDirMount(dirpath, dirpath) + modcfg = modcfg.WithFSConfig(fscfg) + return modcfg + }, + }) + if err != nil { + return gtserror.Newf("error running: %w", err) + } else if rc != 0 { + return gtserror.Newf("non-zero return code %d (%s)", rc, stderr.B) + } + return nil +} + +// ffprobe calls `ffprobe` (WASM) on filepath, returning parsed JSON output. +func ffprobe(ctx context.Context, filepath string) (*ffprobeResult, error) { + var stdout byteutil.Buffer + + // Get directory from filepath. + dirpath := path.Dir(filepath) + + // Run ffprobe on our given file at path. + _, err := _ffmpeg.Ffprobe(ctx, wasm.Args{ + Stdout: &stdout, + + Args: []string{ + "-i", filepath, + "-loglevel", "quiet", + "-print_format", "json", + "-show_streams", + "-show_format", + "-show_error", + }, + + Config: func(modcfg wazero.ModuleConfig) wazero.ModuleConfig { + fscfg := wazero.NewFSConfig() + fscfg = fscfg.WithReadOnlyDirMount(dirpath, dirpath) + modcfg = modcfg.WithFSConfig(fscfg) + return modcfg + }, + }) + if err != nil { + return nil, gtserror.Newf("error running: %w", err) + } + + var result ffprobeResult + + // Unmarshal the ffprobe output as our result type. + if err := json.Unmarshal(stdout.B, &result); err != nil { + return nil, gtserror.Newf("error unmarshaling json: %w", err) + } + + return &result, nil +} + +// ffprobeResult contains parsed JSON data from +// result of calling `ffprobe` on a media file. +type ffprobeResult struct { + Streams []ffprobeStream `json:"streams"` + Format *ffprobeFormat `json:"format"` + Error *ffprobeError `json:"error"` +} + +// ImageMeta extracts image metadata contained within ffprobe'd media result streams. +func (res *ffprobeResult) ImageMeta() (width int, height int, err error) { + for _, stream := range res.Streams { + if stream.Width > width { + width = stream.Width + } + if stream.Height > height { + height = stream.Height + } + } + if width == 0 || height == 0 { + err = errors.New("invalid image stream(s)") + } + return +} + +// VideoMeta extracts video metadata contained within ffprobe'd media result streams. +func (res *ffprobeResult) VideoMeta() (width, height int, framerate float32, err error) { + for _, stream := range res.Streams { + if stream.Width > width { + width = stream.Width + } + if stream.Height > height { + height = stream.Height + } + if fr := stream.GetFrameRate(); fr > 0 { + if framerate == 0 || fr < framerate { + framerate = fr + } + } + } + if width == 0 || height == 0 || framerate == 0 { + err = errors.New("invalid video stream(s)") + } + return +} + +type ffprobeStream struct { + CodecName string `json:"codec_name"` + AvgFrameRate string `json:"avg_frame_rate"` + Width int `json:"width"` + Height int `json:"height"` + // + unused fields. +} + +// GetFrameRate calculates float32 framerate value from stream json string. +func (str *ffprobeStream) GetFrameRate() float32 { + if str.AvgFrameRate != "" { + var ( + // numerator + num float32 + + // denominator + den float32 + ) + + // Check for a provided inequality, i.e. numerator / denominator. + if p := strings.SplitN(str.AvgFrameRate, "/", 2); len(p) == 2 { + n, _ := strconv.ParseFloat(p[0], 32) + d, _ := strconv.ParseFloat(p[1], 32) + num, den = float32(n), float32(d) + } else { + n, _ := strconv.ParseFloat(p[0], 32) + num = float32(n) + } + + return num / den + } + return 0 +} + +type ffprobeFormat struct { + Filename string `json:"filename"` + FormatName string `json:"format_name"` + Duration string `json:"duration"` + BitRate string `json:"bit_rate"` + // + unused fields +} + +// GetFileType determines file type and extension to use for media data. +func (fmt *ffprobeFormat) GetFileType() (gtsmodel.FileType, string) { + switch fmt.FormatName { + case "mov,mp4,m4a,3gp,3g2,mj2": + return gtsmodel.FileTypeVideo, "mp4" + case "apng": + return gtsmodel.FileTypeImage, "apng" + case "png_pipe": + return gtsmodel.FileTypeImage, "png" + case "image2", "jpeg_pipe": + return gtsmodel.FileTypeImage, "jpeg" + case "webp_pipe": + return gtsmodel.FileTypeImage, "webp" + case "gif": + return gtsmodel.FileTypeImage, "gif" + case "mp3": + return gtsmodel.FileTypeAudio, "mp3" + case "ogg": + return gtsmodel.FileTypeAudio, "ogg" + default: + return gtsmodel.FileTypeUnknown, fmt.FormatName + } +} + +// GetDuration calculates float32 framerate value from format json string. +func (fmt *ffprobeFormat) GetDuration() float32 { + if fmt.Duration != "" { + dur, _ := strconv.ParseFloat(fmt.Duration, 32) + return float32(dur) + } + return 0 +} + +// GetBitRate calculates uint64 bitrate value from format json string. +func (fmt *ffprobeFormat) GetBitRate() uint64 { + if fmt.BitRate != "" { + r, _ := strconv.ParseUint(fmt.BitRate, 10, 64) + return r + } + return 0 +} + +type ffprobeError struct { + Code int `json:"code"` + String string `json:"string"` +} + +func (err *ffprobeError) Error() string { + return err.String + " (" + strconv.Itoa(err.Code) + ")" +} diff --git a/internal/media/ffmpeg/cache.go b/internal/media/ffmpeg/cache.go new file mode 100644 index 000000000..371d409dc --- /dev/null +++ b/internal/media/ffmpeg/cache.go @@ -0,0 +1,46 @@ +// GoToSocial +// Copyright (C) GoToSocial Authors admin@gotosocial.org +// SPDX-License-Identifier: AGPL-3.0-or-later +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package ffmpeg + +import ( + "os" + + "github.com/tetratelabs/wazero" +) + +// shared WASM compilation cache. +var cache wazero.CompilationCache + +func initCache() { + if cache != nil { + return + } + + if dir := os.Getenv("WAZERO_COMPILATION_CACHE"); dir != "" { + var err error + + // Use on-filesystem compilation cache given by env. + cache, err = wazero.NewCompilationCacheWithDir(dir) + if err != nil { + panic(err) + } + } else { + // Use in-memory compilation cache. + cache = wazero.NewCompilationCache() + } +} diff --git a/internal/media/ffmpeg/ffmpeg.go b/internal/media/ffmpeg/ffmpeg.go new file mode 100644 index 000000000..357289fcc --- /dev/null +++ b/internal/media/ffmpeg/ffmpeg.go @@ -0,0 +1,92 @@ +// GoToSocial +// Copyright (C) GoToSocial Authors admin@gotosocial.org +// SPDX-License-Identifier: AGPL-3.0-or-later +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package ffmpeg + +import ( + "context" + + ffmpeglib "codeberg.org/gruf/go-ffmpreg/embed/ffmpeg" + "codeberg.org/gruf/go-ffmpreg/util" + "codeberg.org/gruf/go-ffmpreg/wasm" + + "github.com/tetratelabs/wazero" + "github.com/tetratelabs/wazero/api" + "github.com/tetratelabs/wazero/imports/wasi_snapshot_preview1" +) + +// InitFfmpeg initializes the ffmpeg WebAssembly instance pool, +// with given maximum limiting the number of concurrent instances. +func InitFfmpeg(ctx context.Context, max int) error { + initCache() // ensure compilation cache initialized + return ffmpegPool.Init(ctx, max) +} + +// Ffmpeg runs the given arguments with an instance of ffmpeg. +func Ffmpeg(ctx context.Context, args wasm.Args) (uint32, error) { + return ffmpegPool.Run(ctx, args) +} + +var ffmpegPool = wasmInstancePool{ + inst: wasm.Instantiator{ + + // WASM module name. + Module: "ffmpeg", + + // Per-instance WebAssembly runtime (with shared cache). + Runtime: func(ctx context.Context) wazero.Runtime { + + // Prepare config with cache. + cfg := wazero.NewRuntimeConfig() + cfg = cfg.WithCoreFeatures(ffmpeglib.CoreFeatures) + cfg = cfg.WithCompilationCache(cache) + + // Instantiate runtime with our config. + rt := wazero.NewRuntimeWithConfig(ctx, cfg) + + // Prepare default "env" host module. + env := rt.NewHostModuleBuilder("env") + env = env.NewFunctionBuilder(). + WithGoModuleFunction( + api.GoModuleFunc(util.Wasm_Tempnam), + []api.ValueType{api.ValueTypeI32, api.ValueTypeI32}, + []api.ValueType{api.ValueTypeI32}, + ). + Export("tempnam") + + // Instantiate "env" module in our runtime. + _, err := env.Instantiate(context.Background()) + if err != nil { + panic(err) + } + + // Instantiate the wasi snapshot preview 1 in runtime. + _, err = wasi_snapshot_preview1.Instantiate(ctx, rt) + if err != nil { + panic(err) + } + + return rt + }, + + // Per-run module configuration. + Config: wazero.NewModuleConfig, + + // Embedded WASM. + Source: ffmpeglib.B, + }, +} diff --git a/internal/media/ffmpeg/ffprobe.go b/internal/media/ffmpeg/ffprobe.go new file mode 100644 index 000000000..0b9660e60 --- /dev/null +++ b/internal/media/ffmpeg/ffprobe.go @@ -0,0 +1,92 @@ +// GoToSocial +// Copyright (C) GoToSocial Authors admin@gotosocial.org +// SPDX-License-Identifier: AGPL-3.0-or-later +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package ffmpeg + +import ( + "context" + + ffprobelib "codeberg.org/gruf/go-ffmpreg/embed/ffprobe" + "codeberg.org/gruf/go-ffmpreg/util" + "codeberg.org/gruf/go-ffmpreg/wasm" + + "github.com/tetratelabs/wazero" + "github.com/tetratelabs/wazero/api" + "github.com/tetratelabs/wazero/imports/wasi_snapshot_preview1" +) + +// InitFfprobe initializes the ffprobe WebAssembly instance pool, +// with given maximum limiting the number of concurrent instances. +func InitFfprobe(ctx context.Context, max int) error { + initCache() // ensure compilation cache initialized + return ffprobePool.Init(ctx, max) +} + +// Ffprobe runs the given arguments with an instance of ffprobe. +func Ffprobe(ctx context.Context, args wasm.Args) (uint32, error) { + return ffprobePool.Run(ctx, args) +} + +var ffprobePool = wasmInstancePool{ + inst: wasm.Instantiator{ + + // WASM module name. + Module: "ffprobe", + + // Per-instance WebAssembly runtime (with shared cache). + Runtime: func(ctx context.Context) wazero.Runtime { + + // Prepare config with cache. + cfg := wazero.NewRuntimeConfig() + cfg = cfg.WithCoreFeatures(ffprobelib.CoreFeatures) + cfg = cfg.WithCompilationCache(cache) + + // Instantiate runtime with our config. + rt := wazero.NewRuntimeWithConfig(ctx, cfg) + + // Prepare default "env" host module. + env := rt.NewHostModuleBuilder("env") + env = env.NewFunctionBuilder(). + WithGoModuleFunction( + api.GoModuleFunc(util.Wasm_Tempnam), + []api.ValueType{api.ValueTypeI32, api.ValueTypeI32}, + []api.ValueType{api.ValueTypeI32}, + ). + Export("tempnam") + + // Instantiate "env" module in our runtime. + _, err := env.Instantiate(context.Background()) + if err != nil { + panic(err) + } + + // Instantiate the wasi snapshot preview 1 in runtime. + _, err = wasi_snapshot_preview1.Instantiate(ctx, rt) + if err != nil { + panic(err) + } + + return rt + }, + + // Per-run module configuration. + Config: wazero.NewModuleConfig, + + // Embedded WASM. + Source: ffprobelib.B, + }, +} diff --git a/internal/media/ffmpeg/pool.go b/internal/media/ffmpeg/pool.go new file mode 100644 index 000000000..9f6446be3 --- /dev/null +++ b/internal/media/ffmpeg/pool.go @@ -0,0 +1,75 @@ +// GoToSocial +// Copyright (C) GoToSocial Authors admin@gotosocial.org +// SPDX-License-Identifier: AGPL-3.0-or-later +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package ffmpeg + +import ( + "context" + + "codeberg.org/gruf/go-ffmpreg/wasm" +) + +// wasmInstancePool wraps a wasm.Instantiator{} and a +// channel of wasm.Instance{}s to provide a concurrency +// safe pool of WebAssembly module instances capable of +// compiling new instances on-the-fly, with a predetermined +// maximum number of concurrent instances at any one time. +type wasmInstancePool struct { + inst wasm.Instantiator + pool chan *wasm.Instance +} + +func (p *wasmInstancePool) Init(ctx context.Context, sz int) error { + p.pool = make(chan *wasm.Instance, sz) + for i := 0; i < sz; i++ { + inst, err := p.inst.New(ctx) + if err != nil { + return err + } + p.pool <- inst + } + return nil +} + +func (p *wasmInstancePool) Run(ctx context.Context, args wasm.Args) (uint32, error) { + var inst *wasm.Instance + + select { + // Context canceled. + case <-ctx.Done(): + return 0, ctx.Err() + + // Acquire instance. + case inst = <-p.pool: + + // Ensure instance is + // ready for running. + if inst.IsClosed() { + var err error + inst, err = p.inst.New(ctx) + if err != nil { + return 0, err + } + } + } + + // Release instance to pool on end. + defer func() { p.pool <- inst }() + + // Pass args to instance. + return inst.Run(ctx, args) +} diff --git a/internal/media/image.go b/internal/media/image.go deleted file mode 100644 index 8a34e5062..000000000 --- a/internal/media/image.go +++ /dev/null @@ -1,189 +0,0 @@ -// GoToSocial -// Copyright (C) GoToSocial Authors admin@gotosocial.org -// SPDX-License-Identifier: AGPL-3.0-or-later -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -package media - -import ( - "bufio" - "image" - "image/color" - "image/draw" - "image/jpeg" - "image/png" - "io" - "sync" - - "github.com/buckket/go-blurhash" - "github.com/disintegration/imaging" - "github.com/superseriousbusiness/gotosocial/internal/iotools" - - // import to init webp encode/decoding. - _ "golang.org/x/image/webp" -) - -var ( - // pngEncoder provides our global PNG encoding with - // specified compression level, and memory pooled buffers. - pngEncoder = png.Encoder{ - CompressionLevel: png.DefaultCompression, - BufferPool: &pngEncoderBufferPool{}, - } - - // jpegBufferPool is a memory pool - // of byte buffers for JPEG encoding. - jpegBufferPool sync.Pool -) - -// gtsImage is a thin wrapper around the standard library image -// interface to provide our own useful helper functions for image -// size and aspect ratio calculations, streamed encoding to various -// types, and creating reduced size thumbnail images. -type gtsImage struct{ image image.Image } - -// blankImage generates a blank image of given dimensions. -func blankImage(width int, height int) *gtsImage { - // create a rectangle with the same dimensions as the video - img := image.NewRGBA(image.Rect(0, 0, width, height)) - - // fill the rectangle with our desired fill color. - draw.Draw(img, img.Bounds(), &image.Uniform{ - color.RGBA{42, 43, 47, 0}, - }, image.Point{}, draw.Src) - - return >sImage{image: img} -} - -// decodeImage will decode image from reader stream and return image wrapped in our own gtsImage{} type. -func decodeImage(r io.Reader, opts ...imaging.DecodeOption) (*gtsImage, error) { - img, err := imaging.Decode(r, opts...) - if err != nil { - return nil, err - } - return >sImage{image: img}, nil -} - -// Width returns the image width in pixels. -func (m *gtsImage) Width() int { - return m.image.Bounds().Size().X -} - -// Height returns the image height in pixels. -func (m *gtsImage) Height() int { - return m.image.Bounds().Size().Y -} - -// Size returns the total number of image pixels. -func (m *gtsImage) Size() int { - return m.image.Bounds().Size().X * - m.image.Bounds().Size().Y -} - -// AspectRatio returns the image ratio of width:height. -func (m *gtsImage) AspectRatio() float32 { - - // note: we cast bounds to float64 to prevent truncation - // and only at the end aspect ratio do we cast to float32 - // (as the sizes are likely to be much larger than ratio). - return float32(float64(m.image.Bounds().Size().X) / - float64(m.image.Bounds().Size().Y)) -} - -// Thumbnail returns a small sized copy of gtsImage{}, limited to 512x512 if not small enough. -func (m *gtsImage) Thumbnail() *gtsImage { - const ( - // max thumb - // dimensions. - maxWidth = 512 - maxHeight = 512 - ) - - // Check the receiving image is within max thumnail bounds. - if m.Width() <= maxWidth && m.Height() <= maxHeight { - return >sImage{image: imaging.Clone(m.image)} - } - - // Image is too large, needs to be resized to thumbnail max. - img := imaging.Fit(m.image, maxWidth, maxHeight, imaging.Linear) - return >sImage{image: img} -} - -// Blurhash calculates the blurhash for the receiving image data. -func (m *gtsImage) Blurhash() (string, error) { - // for generating blurhashes, it's more cost effective to - // lose detail since it's blurry, so make a tiny version. - tiny := imaging.Resize(m.image, 32, 0, imaging.NearestNeighbor) - - // Encode blurhash from resized version - return blurhash.Encode(4, 3, tiny) -} - -// ToJPEG creates a new streaming JPEG encoder from receiving image, and a size ptr -// which stores the number of bytes written during the image encoding process. -func (m *gtsImage) ToJPEG(opts *jpeg.Options) io.Reader { - return iotools.StreamWriteFunc(func(w io.Writer) error { - // Get encoding buffer - bw := getJPEGBuffer(w) - - // Encode JPEG to buffered writer. - err := jpeg.Encode(bw, m.image, opts) - - // Replace buffer. - // - // NOTE: jpeg.Encode() already - // performs a bufio.Writer.Flush(). - putJPEGBuffer(bw) - - return err - }) -} - -// ToPNG creates a new streaming PNG encoder from receiving image, and a size ptr -// which stores the number of bytes written during the image encoding process. -func (m *gtsImage) ToPNG() io.Reader { - return iotools.StreamWriteFunc(func(w io.Writer) error { - return pngEncoder.Encode(w, m.image) - }) -} - -// getJPEGBuffer fetches a reset JPEG encoding buffer from global JPEG buffer pool. -func getJPEGBuffer(w io.Writer) *bufio.Writer { - v := jpegBufferPool.Get() - if v == nil { - v = bufio.NewWriter(nil) - } - buf := v.(*bufio.Writer) - buf.Reset(w) - return buf -} - -// putJPEGBuffer resets the given bufio writer and places in global JPEG buffer pool. -func putJPEGBuffer(buf *bufio.Writer) { - buf.Reset(nil) - jpegBufferPool.Put(buf) -} - -// pngEncoderBufferPool implements png.EncoderBufferPool. -type pngEncoderBufferPool sync.Pool - -func (p *pngEncoderBufferPool) Get() *png.EncoderBuffer { - buf, _ := (*sync.Pool)(p).Get().(*png.EncoderBuffer) - return buf -} - -func (p *pngEncoderBufferPool) Put(buf *png.EncoderBuffer) { - (*sync.Pool)(p).Put(buf) -} diff --git a/internal/media/manager.go b/internal/media/manager.go index ea126e460..aaf9448b8 100644 --- a/internal/media/manager.go +++ b/internal/media/manager.go @@ -314,21 +314,26 @@ func (m *Manager) RefreshEmoji( // Since this is a refresh we will end up storing new images at new // paths, so we should wrap closer to delete old paths at completion. - wrapped := func(ctx context.Context) (io.ReadCloser, int64, error) { + wrapped := func(ctx context.Context) (io.ReadCloser, error) { - // Call original data func. - rc, sz, err := data(ctx) + // Call original func. + rc, err := data(ctx) if err != nil { - return nil, 0, err + return nil, err } - // Wrap closer to cleanup old data. - c := iotools.CloserFunc(func() error { + // Cast as separated reader / closer types. + rct, ok := rc.(*iotools.ReadCloserType) - // First try close original. - if rc.Close(); err != nil { - return err - } + if !ok { + // Allocate new read closer type. + rct = new(iotools.ReadCloserType) + rct.Reader = rc + rct.Closer = rc + } + + // Wrap underlying io.Closer type to cleanup old data. + rct.Closer = iotools.CloserCallback(rct.Closer, func() { // Remove any *old* emoji image file path now stream is closed. if err := m.state.Storage.Delete(ctx, oldPath); err != nil && @@ -341,12 +346,9 @@ func (m *Manager) RefreshEmoji( !storage.IsNotFound(err) { log.Errorf(ctx, "error deleting old static emoji %s from storage: %v", shortcodeDomain, err) } - - return nil }) - // Return newly wrapped readcloser and size. - return iotools.ReadCloser(rc, c), sz, nil + return rct, nil } // Use a new ID to create a new path diff --git a/internal/media/manager_test.go b/internal/media/manager_test.go index 53c08eed8..a099d2b95 100644 --- a/internal/media/manager_test.go +++ b/internal/media/manager_test.go @@ -20,12 +20,14 @@ package media_test import ( "bytes" "context" + "crypto/md5" "fmt" "io" "os" "testing" "time" + "codeberg.org/gruf/go-iotools" "codeberg.org/gruf/go-storage/disk" "github.com/stretchr/testify/suite" gtsmodel "github.com/superseriousbusiness/gotosocial/internal/gtsmodel" @@ -33,6 +35,7 @@ import ( "github.com/superseriousbusiness/gotosocial/internal/state" "github.com/superseriousbusiness/gotosocial/internal/storage" gtsstorage "github.com/superseriousbusiness/gotosocial/internal/storage" + "github.com/superseriousbusiness/gotosocial/internal/util" "github.com/superseriousbusiness/gotosocial/testrig" ) @@ -43,13 +46,13 @@ type ManagerTestSuite struct { func (suite *ManagerTestSuite) TestEmojiProcess() { ctx := context.Background() - data := func(_ context.Context) (io.ReadCloser, int64, error) { + data := func(_ context.Context) (io.ReadCloser, error) { // load bytes from a test image b, err := os.ReadFile("./test/rainbow-original.png") if err != nil { panic(err) } - return io.NopCloser(bytes.NewBuffer(b)), int64(len(b)), nil + return io.NopCloser(bytes.NewBuffer(b)), nil } processing, err := suite.manager.CreateEmoji(ctx, @@ -66,7 +69,7 @@ func (suite *ManagerTestSuite) TestEmojiProcess() { suite.NotNil(emoji) // file meta should be correctly derived from the image - suite.Equal("image/png", emoji.ImageContentType) + suite.Equal("image/apng", emoji.ImageContentType) suite.Equal("image/png", emoji.ImageStaticContentType) suite.Equal(36702, emoji.ImageFileSize) @@ -75,29 +78,9 @@ func (suite *ManagerTestSuite) TestEmojiProcess() { suite.NoError(err) suite.NotNil(dbEmoji) - // make sure the processed emoji file is in storage - processedFullBytes, err := suite.storage.Get(ctx, emoji.ImagePath) - suite.NoError(err) - suite.NotEmpty(processedFullBytes) - - // load the processed bytes from our test folder, to compare - processedFullBytesExpected, err := os.ReadFile("./test/rainbow-original.png") - suite.NoError(err) - suite.NotEmpty(processedFullBytesExpected) - - // the bytes in storage should be what we expected - suite.Equal(processedFullBytesExpected, processedFullBytes) - - // now do the same for the thumbnail and make sure it's what we expected - processedStaticBytes, err := suite.storage.Get(ctx, emoji.ImageStaticPath) - suite.NoError(err) - suite.NotEmpty(processedStaticBytes) - - processedStaticBytesExpected, err := os.ReadFile("./test/rainbow-static.png") - suite.NoError(err) - suite.NotEmpty(processedStaticBytesExpected) - - suite.Equal(processedStaticBytesExpected, processedStaticBytes) + // ensure the files contain the expected data. + equalFiles(suite.T(), suite.state.Storage, dbEmoji.ImagePath, "./test/rainbow-original.png") + equalFiles(suite.T(), suite.state.Storage, dbEmoji.ImageStaticPath, "./test/rainbow-static.png") } func (suite *ManagerTestSuite) TestEmojiProcessRefresh() { @@ -114,12 +97,12 @@ func (suite *ManagerTestSuite) TestEmojiProcessRefresh() { oldEmojiImagePath := emojiToUpdate.ImagePath oldEmojiImageStaticPath := emojiToUpdate.ImageStaticPath - data := func(_ context.Context) (io.ReadCloser, int64, error) { + data := func(_ context.Context) (io.ReadCloser, error) { b, err := os.ReadFile("./test/gts_pixellated-original.png") if err != nil { panic(err) } - return io.NopCloser(bytes.NewBuffer(b)), int64(len(b)), nil + return io.NopCloser(bytes.NewBuffer(b)), nil } processing, err := suite.manager.RefreshEmoji(ctx, @@ -151,29 +134,9 @@ func (suite *ManagerTestSuite) TestEmojiProcessRefresh() { suite.NoError(err) suite.NotNil(dbEmoji) - // make sure the processed emoji file is in storage - processedFullBytes, err := suite.storage.Get(ctx, emoji.ImagePath) - suite.NoError(err) - suite.NotEmpty(processedFullBytes) - - // load the processed bytes from our test folder, to compare - processedFullBytesExpected, err := os.ReadFile("./test/gts_pixellated-original.png") - suite.NoError(err) - suite.NotEmpty(processedFullBytesExpected) - - // the bytes in storage should be what we expected - suite.Equal(processedFullBytesExpected, processedFullBytes) - - // now do the same for the thumbnail and make sure it's what we expected - processedStaticBytes, err := suite.storage.Get(ctx, emoji.ImageStaticPath) - suite.NoError(err) - suite.NotEmpty(processedStaticBytes) - - processedStaticBytesExpected, err := os.ReadFile("./test/gts_pixellated-static.png") - suite.NoError(err) - suite.NotEmpty(processedStaticBytesExpected) - - suite.Equal(processedStaticBytesExpected, processedStaticBytes) + // ensure the files contain the expected data. + equalFiles(suite.T(), suite.state.Storage, dbEmoji.ImagePath, "./test/gts_pixellated-original.png") + equalFiles(suite.T(), suite.state.Storage, dbEmoji.ImageStaticPath, "./test/gts_pixellated-static.png") // most fields should be different on the emoji now from what they were before suite.Equal(originalEmoji.ID, dbEmoji.ID) @@ -197,124 +160,47 @@ func (suite *ManagerTestSuite) TestEmojiProcessRefresh() { func (suite *ManagerTestSuite) TestEmojiProcessTooLarge() { ctx := context.Background() - data := func(_ context.Context) (io.ReadCloser, int64, error) { - // load bytes from a test image - b, err := os.ReadFile("./test/big-panda.gif") - if err != nil { - panic(err) - } - return io.NopCloser(bytes.NewBuffer(b)), int64(len(b)), nil + // Open test image as file for reading. + file, err := os.Open("./test/big-panda.gif") + if err != nil { + panic(err) } + // Get file size info. + stat, err := file.Stat() + if err != nil { + panic(err) + } + + // Set max allowed size UNDER image size. + lr := io.LimitReader(file, stat.Size()-10) + rc := iotools.ReadCloser(lr, file) + processing, err := suite.manager.CreateEmoji(ctx, "big_panda", "", - data, + func(ctx context.Context) (reader io.ReadCloser, err error) { + return rc, nil + }, media.AdditionalEmojiInfo{}, ) suite.NoError(err) // do a blocking call to fetch the emoji _, err = processing.Load(ctx) - suite.EqualError(err, "store: given emoji size 630kiB greater than max allowed 50.0kiB") -} - -func (suite *ManagerTestSuite) TestEmojiProcessTooLargeNoSizeGiven() { - ctx := context.Background() - - data := func(_ context.Context) (io.ReadCloser, int64, error) { - // load bytes from a test image - b, err := os.ReadFile("./test/big-panda.gif") - if err != nil { - panic(err) - } - return io.NopCloser(bytes.NewBuffer(b)), -1, nil - } - - processing, err := suite.manager.CreateEmoji(ctx, - "big_panda", - "", - data, - media.AdditionalEmojiInfo{}, - ) - suite.NoError(err) - - // do a blocking call to fetch the emoji - _, err = processing.Load(ctx) - suite.EqualError(err, "store: written emoji size 630kiB greater than max allowed 50.0kiB") -} - -func (suite *ManagerTestSuite) TestEmojiProcessNoFileSizeGiven() { - ctx := context.Background() - - data := func(_ context.Context) (io.ReadCloser, int64, error) { - // load bytes from a test image - b, err := os.ReadFile("./test/rainbow-original.png") - if err != nil { - panic(err) - } - return io.NopCloser(bytes.NewBuffer(b)), -1, nil - } - - // process the media with no additional info provided - processing, err := suite.manager.CreateEmoji(ctx, - "rainbow_test", - "", - data, - media.AdditionalEmojiInfo{}, - ) - suite.NoError(err) - - // do a blocking call to fetch the emoji - emoji, err := processing.Load(ctx) - suite.NoError(err) - suite.NotNil(emoji) - - // file meta should be correctly derived from the image - suite.Equal("image/png", emoji.ImageContentType) - suite.Equal("image/png", emoji.ImageStaticContentType) - suite.Equal(36702, emoji.ImageFileSize) - - // now make sure the emoji is in the database - dbEmoji, err := suite.db.GetEmojiByID(ctx, emoji.ID) - suite.NoError(err) - suite.NotNil(dbEmoji) - - // make sure the processed emoji file is in storage - processedFullBytes, err := suite.storage.Get(ctx, emoji.ImagePath) - suite.NoError(err) - suite.NotEmpty(processedFullBytes) - - // load the processed bytes from our test folder, to compare - processedFullBytesExpected, err := os.ReadFile("./test/rainbow-original.png") - suite.NoError(err) - suite.NotEmpty(processedFullBytesExpected) - - // the bytes in storage should be what we expected - suite.Equal(processedFullBytesExpected, processedFullBytes) - - // now do the same for the thumbnail and make sure it's what we expected - processedStaticBytes, err := suite.storage.Get(ctx, emoji.ImageStaticPath) - suite.NoError(err) - suite.NotEmpty(processedStaticBytes) - - processedStaticBytesExpected, err := os.ReadFile("./test/rainbow-static.png") - suite.NoError(err) - suite.NotEmpty(processedStaticBytesExpected) - - suite.Equal(processedStaticBytesExpected, processedStaticBytes) + suite.EqualError(err, "store: error draining data to tmp: reached read limit 630kiB") } func (suite *ManagerTestSuite) TestEmojiWebpProcess() { ctx := context.Background() - data := func(_ context.Context) (io.ReadCloser, int64, error) { + data := func(_ context.Context) (io.ReadCloser, error) { // load bytes from a test image b, err := os.ReadFile("./test/nb-flag-original.webp") if err != nil { panic(err) } - return io.NopCloser(bytes.NewBuffer(b)), int64(len(b)), nil + return io.NopCloser(bytes.NewBuffer(b)), nil } // process the media with no additional info provided @@ -341,41 +227,21 @@ func (suite *ManagerTestSuite) TestEmojiWebpProcess() { suite.NoError(err) suite.NotNil(dbEmoji) - // make sure the processed emoji file is in storage - processedFullBytes, err := suite.storage.Get(ctx, emoji.ImagePath) - suite.NoError(err) - suite.NotEmpty(processedFullBytes) - - // load the processed bytes from our test folder, to compare - processedFullBytesExpected, err := os.ReadFile("./test/nb-flag-original.webp") - suite.NoError(err) - suite.NotEmpty(processedFullBytesExpected) - - // the bytes in storage should be what we expected - suite.Equal(processedFullBytesExpected, processedFullBytes) - - // now do the same for the thumbnail and make sure it's what we expected - processedStaticBytes, err := suite.storage.Get(ctx, emoji.ImageStaticPath) - suite.NoError(err) - suite.NotEmpty(processedStaticBytes) - - processedStaticBytesExpected, err := os.ReadFile("./test/nb-flag-static.png") - suite.NoError(err) - suite.NotEmpty(processedStaticBytesExpected) - - suite.Equal(processedStaticBytesExpected, processedStaticBytes) + // ensure files are equal + equalFiles(suite.T(), suite.state.Storage, dbEmoji.ImagePath, "./test/nb-flag-original.webp") + equalFiles(suite.T(), suite.state.Storage, dbEmoji.ImageStaticPath, "./test/nb-flag-static.png") } func (suite *ManagerTestSuite) TestSimpleJpegProcess() { ctx := context.Background() - data := func(_ context.Context) (io.ReadCloser, int64, error) { + data := func(_ context.Context) (io.ReadCloser, error) { // load bytes from a test image b, err := os.ReadFile("./test/test-jpeg.jpg") if err != nil { panic(err) } - return io.NopCloser(bytes.NewBuffer(b)), int64(len(b)), nil + return io.NopCloser(bytes.NewBuffer(b)), nil } accountID := "01FS1X72SK9ZPW0J1QQ68BD264" @@ -409,117 +275,66 @@ func (suite *ManagerTestSuite) TestSimpleJpegProcess() { suite.Equal("image/jpeg", attachment.File.ContentType) suite.Equal("image/jpeg", attachment.Thumbnail.ContentType) suite.Equal(269739, attachment.File.FileSize) - suite.Equal("LiBzRk#6V[WF_NvzV@WY_3rqV@a$", attachment.Blurhash) + suite.Equal("LjCGfG#6RkRn_NvzRjWF?urqV@a$", attachment.Blurhash) // now make sure the attachment is in the database dbAttachment, err := suite.db.GetAttachmentByID(ctx, attachment.ID) suite.NoError(err) suite.NotNil(dbAttachment) - // make sure the processed file is in storage - processedFullBytes, err := suite.storage.Get(ctx, attachment.File.Path) - suite.NoError(err) - suite.NotEmpty(processedFullBytes) - - // load the processed bytes from our test folder, to compare - processedFullBytesExpected, err := os.ReadFile("./test/test-jpeg-processed.jpg") - suite.NoError(err) - suite.NotEmpty(processedFullBytesExpected) - - // the bytes in storage should be what we expected - suite.Equal(processedFullBytesExpected, processedFullBytes) - - // now do the same for the thumbnail and make sure it's what we expected - processedThumbnailBytes, err := suite.storage.Get(ctx, attachment.Thumbnail.Path) - suite.NoError(err) - suite.NotEmpty(processedThumbnailBytes) - - processedThumbnailBytesExpected, err := os.ReadFile("./test/test-jpeg-thumbnail.jpg") - suite.NoError(err) - suite.NotEmpty(processedThumbnailBytesExpected) - - suite.Equal(processedThumbnailBytesExpected, processedThumbnailBytes) + // ensure the files contain the expected data. + equalFiles(suite.T(), suite.state.Storage, dbAttachment.File.Path, "./test/test-jpeg-processed.jpg") + equalFiles(suite.T(), suite.state.Storage, dbAttachment.Thumbnail.Path, "./test/test-jpeg-thumbnail.jpg") } -func (suite *ManagerTestSuite) TestSimpleJpegProcessPartial() { +func (suite *ManagerTestSuite) TestSimpleJpegProcessTooLarge() { ctx := context.Background() - data := func(_ context.Context) (io.ReadCloser, int64, error) { - // load bytes from a test image - b, err := os.ReadFile("./test/test-jpeg.jpg") - if err != nil { - panic(err) - } - - // Fuck up the bytes a bit by cutting - // off the second half, tee hee! - b = b[:len(b)/2] - - return io.NopCloser(bytes.NewBuffer(b)), int64(len(b)), nil + // Open test image as file for reading. + file, err := os.Open("./test/test-jpeg.jpg") + if err != nil { + panic(err) } + // Get file size info. + stat, err := file.Stat() + if err != nil { + panic(err) + } + + // Set max allowed size UNDER image size. + lr := io.LimitReader(file, stat.Size()-10) + rc := iotools.ReadCloser(lr, file) + accountID := "01FS1X72SK9ZPW0J1QQ68BD264" // process the media with no additional info provided processing, err := suite.manager.CreateMedia(ctx, accountID, - data, + func(ctx context.Context) (reader io.ReadCloser, err error) { + return rc, nil + }, media.AdditionalMediaInfo{}, ) suite.NoError(err) suite.NotNil(processing) // do a blocking call to fetch the attachment - attachment, err := processing.Load(ctx) - - // Since we're cutting off the byte stream - // halfway through, we should get an error here. - suite.EqualError(err, "store: error writing media to storage: scan-data is unbounded; EOI not encountered before EOF") - suite.NotNil(attachment) - - // make sure it's got the stuff set on it that we expect - // the attachment ID and accountID we expect - suite.Equal(processing.ID(), attachment.ID) - suite.Equal(accountID, attachment.AccountID) - - // file meta should be correctly derived from the image - suite.Zero(attachment.FileMeta) - suite.Equal("image/jpeg", attachment.File.ContentType) - suite.Empty(attachment.Blurhash) - - // now make sure the attachment is in the database - dbAttachment, err := suite.db.GetAttachmentByID(ctx, attachment.ID) - suite.NoError(err) - suite.NotNil(dbAttachment) - - // Attachment should have type unknown - suite.Equal(gtsmodel.FileTypeUnknown, dbAttachment.Type) - - // Nothing should be in storage for this attachment. - stored, err := suite.storage.Has(ctx, attachment.File.Path) - if err != nil { - suite.FailNow(err.Error()) - } - suite.False(stored) - - stored, err = suite.storage.Has(ctx, attachment.Thumbnail.Path) - if err != nil { - suite.FailNow(err.Error()) - } - suite.False(stored) + _, err = processing.Load(ctx) + suite.EqualError(err, "store: error draining data to tmp: reached read limit 263kiB") } func (suite *ManagerTestSuite) TestPDFProcess() { ctx := context.Background() - data := func(_ context.Context) (io.ReadCloser, int64, error) { + data := func(_ context.Context) (io.ReadCloser, error) { // load bytes from Frantz b, err := os.ReadFile("./test/Frantz-Fanon-The-Wretched-of-the-Earth-1965.pdf") if err != nil { panic(err) } - return io.NopCloser(bytes.NewBuffer(b)), int64(len(b)), nil + return io.NopCloser(bytes.NewBuffer(b)), nil } accountID := "01FS1X72SK9ZPW0J1QQ68BD264" @@ -545,7 +360,7 @@ func (suite *ManagerTestSuite) TestPDFProcess() { // file meta should be correctly derived from the image suite.Zero(attachment.FileMeta) - suite.Equal("application/pdf", attachment.File.ContentType) + suite.Equal("application/octet-stream", attachment.File.ContentType) suite.Equal("image/jpeg", attachment.Thumbnail.ContentType) suite.Empty(attachment.Blurhash) @@ -559,28 +374,24 @@ func (suite *ManagerTestSuite) TestPDFProcess() { // Nothing should be in storage for this attachment. stored, err := suite.storage.Has(ctx, attachment.File.Path) - if err != nil { - suite.FailNow(err.Error()) - } + suite.NoError(err) suite.False(stored) stored, err = suite.storage.Has(ctx, attachment.Thumbnail.Path) - if err != nil { - suite.FailNow(err.Error()) - } + suite.NoError(err) suite.False(stored) } func (suite *ManagerTestSuite) TestSlothVineProcess() { ctx := context.Background() - data := func(_ context.Context) (io.ReadCloser, int64, error) { + data := func(_ context.Context) (io.ReadCloser, error) { // load bytes from a test video b, err := os.ReadFile("./test/test-mp4-original.mp4") if err != nil { panic(err) } - return io.NopCloser(bytes.NewBuffer(b)), int64(len(b)), nil + return io.NopCloser(bytes.NewBuffer(b)), nil } accountID := "01FS1X72SK9ZPW0J1QQ68BD264" @@ -609,57 +420,37 @@ func (suite *ManagerTestSuite) TestSlothVineProcess() { suite.Equal(240, attachment.FileMeta.Original.Height) suite.Equal(81120, attachment.FileMeta.Original.Size) suite.EqualValues(float32(1.4083333), attachment.FileMeta.Original.Aspect) - suite.EqualValues(float32(6.640907), *attachment.FileMeta.Original.Duration) - suite.EqualValues(float32(29.000029), *attachment.FileMeta.Original.Framerate) - suite.EqualValues(0x59e74, *attachment.FileMeta.Original.Bitrate) + suite.EqualValues(float32(6.641), *attachment.FileMeta.Original.Duration) + suite.EqualValues(float32(29.00003), *attachment.FileMeta.Original.Framerate) + suite.EqualValues(0x5be18, *attachment.FileMeta.Original.Bitrate) suite.EqualValues(gtsmodel.Small{ Width: 338, Height: 240, Size: 81120, Aspect: 1.4083333333333334, }, attachment.FileMeta.Small) suite.Equal("video/mp4", attachment.File.ContentType) suite.Equal("image/jpeg", attachment.Thumbnail.ContentType) - suite.Equal(312413, attachment.File.FileSize) - suite.Equal("L00000fQfQfQfQfQfQfQfQfQfQfQ", attachment.Blurhash) + suite.Equal(312453, attachment.File.FileSize) + suite.Equal("LrJuJat6NZkBt7ayW.j[_4WBsWoL", attachment.Blurhash) // now make sure the attachment is in the database dbAttachment, err := suite.db.GetAttachmentByID(ctx, attachment.ID) suite.NoError(err) suite.NotNil(dbAttachment) - // make sure the processed file is in storage - processedFullBytes, err := suite.storage.Get(ctx, attachment.File.Path) - suite.NoError(err) - suite.NotEmpty(processedFullBytes) - - // load the processed bytes from our test folder, to compare - processedFullBytesExpected, err := os.ReadFile("./test/test-mp4-processed.mp4") - suite.NoError(err) - suite.NotEmpty(processedFullBytesExpected) - - // the bytes in storage should be what we expected - suite.Equal(processedFullBytesExpected, processedFullBytes) - - // now do the same for the thumbnail and make sure it's what we expected - processedThumbnailBytes, err := suite.storage.Get(ctx, attachment.Thumbnail.Path) - suite.NoError(err) - suite.NotEmpty(processedThumbnailBytes) - - processedThumbnailBytesExpected, err := os.ReadFile("./test/test-mp4-thumbnail.jpg") - suite.NoError(err) - suite.NotEmpty(processedThumbnailBytesExpected) - - suite.Equal(processedThumbnailBytesExpected, processedThumbnailBytes) + // ensure the files contain the expected data. + equalFiles(suite.T(), suite.state.Storage, dbAttachment.File.Path, "./test/test-mp4-processed.mp4") + equalFiles(suite.T(), suite.state.Storage, dbAttachment.Thumbnail.Path, "./test/test-mp4-thumbnail.jpg") } func (suite *ManagerTestSuite) TestLongerMp4Process() { ctx := context.Background() - data := func(_ context.Context) (io.ReadCloser, int64, error) { + data := func(_ context.Context) (io.ReadCloser, error) { // load bytes from a test video b, err := os.ReadFile("./test/longer-mp4-original.mp4") if err != nil { panic(err) } - return io.NopCloser(bytes.NewBuffer(b)), int64(len(b)), nil + return io.NopCloser(bytes.NewBuffer(b)), nil } accountID := "01FS1X72SK9ZPW0J1QQ68BD264" @@ -690,55 +481,35 @@ func (suite *ManagerTestSuite) TestLongerMp4Process() { suite.EqualValues(float32(1.8181819), attachment.FileMeta.Original.Aspect) suite.EqualValues(float32(16.6), *attachment.FileMeta.Original.Duration) suite.EqualValues(float32(10), *attachment.FileMeta.Original.Framerate) - suite.EqualValues(0xc8fb, *attachment.FileMeta.Original.Bitrate) + suite.EqualValues(0xce3a, *attachment.FileMeta.Original.Bitrate) suite.EqualValues(gtsmodel.Small{ Width: 512, Height: 281, Size: 143872, Aspect: 1.822064, }, attachment.FileMeta.Small) suite.Equal("video/mp4", attachment.File.ContentType) suite.Equal("image/jpeg", attachment.Thumbnail.ContentType) - suite.Equal(109549, attachment.File.FileSize) - suite.Equal("L00000fQfQfQfQfQfQfQfQfQfQfQ", attachment.Blurhash) + suite.Equal(109569, attachment.File.FileSize) + suite.Equal("LASY{q~qD%_3~qD%ofRjM{ofofRj", attachment.Blurhash) // now make sure the attachment is in the database dbAttachment, err := suite.db.GetAttachmentByID(ctx, attachment.ID) suite.NoError(err) suite.NotNil(dbAttachment) - // make sure the processed file is in storage - processedFullBytes, err := suite.storage.Get(ctx, attachment.File.Path) - suite.NoError(err) - suite.NotEmpty(processedFullBytes) - - // load the processed bytes from our test folder, to compare - processedFullBytesExpected, err := os.ReadFile("./test/longer-mp4-processed.mp4") - suite.NoError(err) - suite.NotEmpty(processedFullBytesExpected) - - // the bytes in storage should be what we expected - suite.Equal(processedFullBytesExpected, processedFullBytes) - - // now do the same for the thumbnail and make sure it's what we expected - processedThumbnailBytes, err := suite.storage.Get(ctx, attachment.Thumbnail.Path) - suite.NoError(err) - suite.NotEmpty(processedThumbnailBytes) - - processedThumbnailBytesExpected, err := os.ReadFile("./test/longer-mp4-thumbnail.jpg") - suite.NoError(err) - suite.NotEmpty(processedThumbnailBytesExpected) - - suite.Equal(processedThumbnailBytesExpected, processedThumbnailBytes) + // ensure the files contain the expected data. + equalFiles(suite.T(), suite.state.Storage, dbAttachment.File.Path, "./test/longer-mp4-processed.mp4") + equalFiles(suite.T(), suite.state.Storage, dbAttachment.Thumbnail.Path, "./test/longer-mp4-thumbnail.jpg") } func (suite *ManagerTestSuite) TestBirdnestMp4Process() { ctx := context.Background() - data := func(_ context.Context) (io.ReadCloser, int64, error) { + data := func(_ context.Context) (io.ReadCloser, error) { // load bytes from a test video b, err := os.ReadFile("./test/birdnest-original.mp4") if err != nil { panic(err) } - return io.NopCloser(bytes.NewBuffer(b)), int64(len(b)), nil + return io.NopCloser(bytes.NewBuffer(b)), nil } accountID := "01FS1X72SK9ZPW0J1QQ68BD264" @@ -767,93 +538,37 @@ func (suite *ManagerTestSuite) TestBirdnestMp4Process() { suite.Equal(720, attachment.FileMeta.Original.Height) suite.Equal(290880, attachment.FileMeta.Original.Size) suite.EqualValues(float32(0.5611111), attachment.FileMeta.Original.Aspect) - suite.EqualValues(float32(9.822041), *attachment.FileMeta.Original.Duration) + suite.EqualValues(float32(9.823), *attachment.FileMeta.Original.Duration) suite.EqualValues(float32(30), *attachment.FileMeta.Original.Framerate) - suite.EqualValues(0x117c79, *attachment.FileMeta.Original.Bitrate) + suite.EqualValues(0x11844c, *attachment.FileMeta.Original.Bitrate) suite.EqualValues(gtsmodel.Small{ Width: 287, Height: 512, Size: 146944, Aspect: 0.5605469, }, attachment.FileMeta.Small) suite.Equal("video/mp4", attachment.File.ContentType) suite.Equal("image/jpeg", attachment.Thumbnail.ContentType) - suite.Equal(1409577, attachment.File.FileSize) - suite.Equal("L00000fQfQfQfQfQfQfQfQfQfQfQ", attachment.Blurhash) + suite.Equal(1409625, attachment.File.FileSize) + suite.Equal("LOGb||RjRO.99DRORPaetkV?afMw", attachment.Blurhash) // now make sure the attachment is in the database dbAttachment, err := suite.db.GetAttachmentByID(ctx, attachment.ID) suite.NoError(err) suite.NotNil(dbAttachment) - // make sure the processed file is in storage - processedFullBytes, err := suite.storage.Get(ctx, attachment.File.Path) - suite.NoError(err) - suite.NotEmpty(processedFullBytes) - - // load the processed bytes from our test folder, to compare - processedFullBytesExpected, err := os.ReadFile("./test/birdnest-processed.mp4") - suite.NoError(err) - suite.NotEmpty(processedFullBytesExpected) - - // the bytes in storage should be what we expected - suite.Equal(processedFullBytesExpected, processedFullBytes) - - // now do the same for the thumbnail and make sure it's what we expected - processedThumbnailBytes, err := suite.storage.Get(ctx, attachment.Thumbnail.Path) - suite.NoError(err) - suite.NotEmpty(processedThumbnailBytes) - - processedThumbnailBytesExpected, err := os.ReadFile("./test/birdnest-thumbnail.jpg") - suite.NoError(err) - suite.NotEmpty(processedThumbnailBytesExpected) - - suite.Equal(processedThumbnailBytesExpected, processedThumbnailBytes) + // ensure the files contain the expected data. + equalFiles(suite.T(), suite.state.Storage, dbAttachment.File.Path, "./test/birdnest-processed.mp4") + equalFiles(suite.T(), suite.state.Storage, dbAttachment.Thumbnail.Path, "./test/birdnest-thumbnail.jpg") } -func (suite *ManagerTestSuite) TestNotAnMp4Process() { - // try to load an 'mp4' that's actually an mkv in disguise - +func (suite *ManagerTestSuite) TestOpusProcess() { ctx := context.Background() - data := func(_ context.Context) (io.ReadCloser, int64, error) { - // load bytes from a test video - b, err := os.ReadFile("./test/not-an.mp4") - if err != nil { - panic(err) - } - return io.NopCloser(bytes.NewBuffer(b)), int64(len(b)), nil - } - - accountID := "01FS1X72SK9ZPW0J1QQ68BD264" - - // pre processing should go fine but... - processing, err := suite.manager.CreateMedia(ctx, - accountID, - data, - media.AdditionalMediaInfo{}, - ) - suite.NoError(err) - suite.NotNil(processing) - - // we should get an error while loading - attachment, err := processing.Load(ctx) - suite.EqualError(err, "finish: error decoding video: error determining video metadata: [width height framerate]") - - // partial attachment should be - // returned, with 'unknown' type. - suite.NotNil(attachment) - suite.Equal(gtsmodel.FileTypeUnknown, attachment.Type) -} - -func (suite *ManagerTestSuite) TestSimpleJpegProcessNoContentLengthGiven() { - ctx := context.Background() - - data := func(_ context.Context) (io.ReadCloser, int64, error) { + data := func(_ context.Context) (io.ReadCloser, error) { // load bytes from a test image - b, err := os.ReadFile("./test/test-jpeg.jpg") + b, err := os.ReadFile("./test/test-opus-original.opus") if err != nil { panic(err) } - // give length as -1 to indicate unknown - return io.NopCloser(bytes.NewBuffer(b)), -1, nil + return io.NopCloser(bytes.NewBuffer(b)), nil } accountID := "01FS1X72SK9ZPW0J1QQ68BD264" @@ -879,132 +594,33 @@ func (suite *ManagerTestSuite) TestSimpleJpegProcessNoContentLengthGiven() { // file meta should be correctly derived from the image suite.EqualValues(gtsmodel.Original{ - Width: 1920, Height: 1080, Size: 2073600, Aspect: 1.7777777777777777, + Duration: util.Ptr(float32(122.10006)), + Bitrate: util.Ptr(uint64(116426)), }, attachment.FileMeta.Original) - suite.EqualValues(gtsmodel.Small{ - Width: 512, Height: 288, Size: 147456, Aspect: 1.7777777777777777, - }, attachment.FileMeta.Small) - suite.Equal("image/jpeg", attachment.File.ContentType) - suite.Equal("image/jpeg", attachment.Thumbnail.ContentType) - suite.Equal(269739, attachment.File.FileSize) - suite.Equal("LiBzRk#6V[WF_NvzV@WY_3rqV@a$", attachment.Blurhash) + suite.Equal("audio/ogg", attachment.File.ContentType) + suite.Equal(1776956, attachment.File.FileSize) + suite.Empty(attachment.Blurhash) // now make sure the attachment is in the database dbAttachment, err := suite.db.GetAttachmentByID(ctx, attachment.ID) suite.NoError(err) suite.NotNil(dbAttachment) - // make sure the processed file is in storage - processedFullBytes, err := suite.storage.Get(ctx, attachment.File.Path) - suite.NoError(err) - suite.NotEmpty(processedFullBytes) - - // load the processed bytes from our test folder, to compare - processedFullBytesExpected, err := os.ReadFile("./test/test-jpeg-processed.jpg") - suite.NoError(err) - suite.NotEmpty(processedFullBytesExpected) - - // the bytes in storage should be what we expected - suite.Equal(processedFullBytesExpected, processedFullBytes) - - // now do the same for the thumbnail and make sure it's what we expected - processedThumbnailBytes, err := suite.storage.Get(ctx, attachment.Thumbnail.Path) - suite.NoError(err) - suite.NotEmpty(processedThumbnailBytes) - - processedThumbnailBytesExpected, err := os.ReadFile("./test/test-jpeg-thumbnail.jpg") - suite.NoError(err) - suite.NotEmpty(processedThumbnailBytesExpected) - - suite.Equal(processedThumbnailBytesExpected, processedThumbnailBytes) -} - -func (suite *ManagerTestSuite) TestSimpleJpegProcessReadCloser() { - ctx := context.Background() - - data := func(_ context.Context) (io.ReadCloser, int64, error) { - // open test image as a file - f, err := os.Open("./test/test-jpeg.jpg") - if err != nil { - panic(err) - } - // give length as -1 to indicate unknown - return f, -1, nil - } - - accountID := "01FS1X72SK9ZPW0J1QQ68BD264" - - // process the media with no additional info provided - processing, err := suite.manager.CreateMedia(ctx, - accountID, - data, - media.AdditionalMediaInfo{}, - ) - suite.NoError(err) - suite.NotNil(processing) - - // do a blocking call to fetch the attachment - attachment, err := processing.Load(ctx) - suite.NoError(err) - suite.NotNil(attachment) - - // make sure it's got the stuff set on it that we expect - // the attachment ID and accountID we expect - suite.Equal(processing.ID(), attachment.ID) - suite.Equal(accountID, attachment.AccountID) - - // file meta should be correctly derived from the image - suite.EqualValues(gtsmodel.Original{ - Width: 1920, Height: 1080, Size: 2073600, Aspect: 1.7777777777777777, - }, attachment.FileMeta.Original) - suite.EqualValues(gtsmodel.Small{ - Width: 512, Height: 288, Size: 147456, Aspect: 1.7777777777777777, - }, attachment.FileMeta.Small) - suite.Equal("image/jpeg", attachment.File.ContentType) - suite.Equal("image/jpeg", attachment.Thumbnail.ContentType) - suite.Equal(269739, attachment.File.FileSize) - suite.Equal("LiBzRk#6V[WF_NvzV@WY_3rqV@a$", attachment.Blurhash) - - // now make sure the attachment is in the database - dbAttachment, err := suite.db.GetAttachmentByID(ctx, attachment.ID) - suite.NoError(err) - suite.NotNil(dbAttachment) - - // make sure the processed file is in storage - processedFullBytes, err := suite.storage.Get(ctx, attachment.File.Path) - suite.NoError(err) - suite.NotEmpty(processedFullBytes) - - // load the processed bytes from our test folder, to compare - processedFullBytesExpected, err := os.ReadFile("./test/test-jpeg-processed.jpg") - suite.NoError(err) - suite.NotEmpty(processedFullBytesExpected) - - // the bytes in storage should be what we expected - suite.Equal(processedFullBytesExpected, processedFullBytes) - - // now do the same for the thumbnail and make sure it's what we expected - processedThumbnailBytes, err := suite.storage.Get(ctx, attachment.Thumbnail.Path) - suite.NoError(err) - suite.NotEmpty(processedThumbnailBytes) - - processedThumbnailBytesExpected, err := os.ReadFile("./test/test-jpeg-thumbnail.jpg") - suite.NoError(err) - suite.NotEmpty(processedThumbnailBytesExpected) - - suite.Equal(processedThumbnailBytesExpected, processedThumbnailBytes) + // ensure the files contain the expected data. + equalFiles(suite.T(), suite.state.Storage, dbAttachment.File.Path, "./test/test-opus-processed.opus") + suite.Zero(dbAttachment.Thumbnail.FileSize) } func (suite *ManagerTestSuite) TestPngNoAlphaChannelProcess() { ctx := context.Background() - data := func(_ context.Context) (io.ReadCloser, int64, error) { + data := func(_ context.Context) (io.ReadCloser, error) { // load bytes from a test image b, err := os.ReadFile("./test/test-png-noalphachannel.png") if err != nil { panic(err) } - return io.NopCloser(bytes.NewBuffer(b)), int64(len(b)), nil + return io.NopCloser(bytes.NewBuffer(b)), nil } accountID := "01FS1X72SK9ZPW0J1QQ68BD264" @@ -1038,48 +654,28 @@ func (suite *ManagerTestSuite) TestPngNoAlphaChannelProcess() { suite.Equal("image/png", attachment.File.ContentType) suite.Equal("image/jpeg", attachment.Thumbnail.ContentType) suite.Equal(17471, attachment.File.FileSize) - suite.Equal("LFQT7e.A%O%4?co$M}M{_1W9~TxV", attachment.Blurhash) + suite.Equal("LDQJl?%i-?WG%go#RURP~of3~UxV", attachment.Blurhash) // now make sure the attachment is in the database dbAttachment, err := suite.db.GetAttachmentByID(ctx, attachment.ID) suite.NoError(err) suite.NotNil(dbAttachment) - // make sure the processed file is in storage - processedFullBytes, err := suite.storage.Get(ctx, attachment.File.Path) - suite.NoError(err) - suite.NotEmpty(processedFullBytes) - - // load the processed bytes from our test folder, to compare - processedFullBytesExpected, err := os.ReadFile("./test/test-png-noalphachannel-processed.png") - suite.NoError(err) - suite.NotEmpty(processedFullBytesExpected) - - // the bytes in storage should be what we expected - suite.Equal(processedFullBytesExpected, processedFullBytes) - - // now do the same for the thumbnail and make sure it's what we expected - processedThumbnailBytes, err := suite.storage.Get(ctx, attachment.Thumbnail.Path) - suite.NoError(err) - suite.NotEmpty(processedThumbnailBytes) - - processedThumbnailBytesExpected, err := os.ReadFile("./test/test-png-noalphachannel-thumbnail.jpg") - suite.NoError(err) - suite.NotEmpty(processedThumbnailBytesExpected) - - suite.Equal(processedThumbnailBytesExpected, processedThumbnailBytes) + // ensure the files contain the expected data. + equalFiles(suite.T(), suite.state.Storage, dbAttachment.File.Path, "./test/test-png-noalphachannel-processed.png") + equalFiles(suite.T(), suite.state.Storage, dbAttachment.Thumbnail.Path, "./test/test-png-noalphachannel-thumbnail.jpg") } func (suite *ManagerTestSuite) TestPngAlphaChannelProcess() { ctx := context.Background() - data := func(_ context.Context) (io.ReadCloser, int64, error) { + data := func(_ context.Context) (io.ReadCloser, error) { // load bytes from a test image b, err := os.ReadFile("./test/test-png-alphachannel.png") if err != nil { panic(err) } - return io.NopCloser(bytes.NewBuffer(b)), int64(len(b)), nil + return io.NopCloser(bytes.NewBuffer(b)), nil } accountID := "01FS1X72SK9ZPW0J1QQ68BD264" @@ -1113,48 +709,28 @@ func (suite *ManagerTestSuite) TestPngAlphaChannelProcess() { suite.Equal("image/png", attachment.File.ContentType) suite.Equal("image/jpeg", attachment.Thumbnail.ContentType) suite.Equal(18904, attachment.File.FileSize) - suite.Equal("LFQT7e.A%O%4?co$M}M{_1W9~TxV", attachment.Blurhash) + suite.Equal("LDQJl?%i-?WG%go#RURP~of3~UxV", attachment.Blurhash) // now make sure the attachment is in the database dbAttachment, err := suite.db.GetAttachmentByID(ctx, attachment.ID) suite.NoError(err) suite.NotNil(dbAttachment) - // make sure the processed file is in storage - processedFullBytes, err := suite.storage.Get(ctx, attachment.File.Path) - suite.NoError(err) - suite.NotEmpty(processedFullBytes) - - // load the processed bytes from our test folder, to compare - processedFullBytesExpected, err := os.ReadFile("./test/test-png-alphachannel-processed.png") - suite.NoError(err) - suite.NotEmpty(processedFullBytesExpected) - - // the bytes in storage should be what we expected - suite.Equal(processedFullBytesExpected, processedFullBytes) - - // now do the same for the thumbnail and make sure it's what we expected - processedThumbnailBytes, err := suite.storage.Get(ctx, attachment.Thumbnail.Path) - suite.NoError(err) - suite.NotEmpty(processedThumbnailBytes) - - processedThumbnailBytesExpected, err := os.ReadFile("./test/test-png-alphachannel-thumbnail.jpg") - suite.NoError(err) - suite.NotEmpty(processedThumbnailBytesExpected) - - suite.Equal(processedThumbnailBytesExpected, processedThumbnailBytes) + // ensure the files contain the expected data. + equalFiles(suite.T(), suite.state.Storage, dbAttachment.File.Path, "./test/test-png-alphachannel-processed.png") + equalFiles(suite.T(), suite.state.Storage, dbAttachment.Thumbnail.Path, "./test/test-png-alphachannel-thumbnail.jpg") } func (suite *ManagerTestSuite) TestSimpleJpegProcessWithCallback() { ctx := context.Background() - data := func(_ context.Context) (io.ReadCloser, int64, error) { + data := func(_ context.Context) (io.ReadCloser, error) { // load bytes from a test image b, err := os.ReadFile("./test/test-jpeg.jpg") if err != nil { panic(err) } - return io.NopCloser(bytes.NewBuffer(b)), int64(len(b)), nil + return io.NopCloser(bytes.NewBuffer(b)), nil } accountID := "01FS1X72SK9ZPW0J1QQ68BD264" @@ -1188,53 +764,33 @@ func (suite *ManagerTestSuite) TestSimpleJpegProcessWithCallback() { suite.Equal("image/jpeg", attachment.File.ContentType) suite.Equal("image/jpeg", attachment.Thumbnail.ContentType) suite.Equal(269739, attachment.File.FileSize) - suite.Equal("LiBzRk#6V[WF_NvzV@WY_3rqV@a$", attachment.Blurhash) + suite.Equal("LjCGfG#6RkRn_NvzRjWF?urqV@a$", attachment.Blurhash) // now make sure the attachment is in the database dbAttachment, err := suite.db.GetAttachmentByID(ctx, attachment.ID) suite.NoError(err) suite.NotNil(dbAttachment) - // make sure the processed file is in storage - processedFullBytes, err := suite.storage.Get(ctx, attachment.File.Path) - suite.NoError(err) - suite.NotEmpty(processedFullBytes) - - // load the processed bytes from our test folder, to compare - processedFullBytesExpected, err := os.ReadFile("./test/test-jpeg-processed.jpg") - suite.NoError(err) - suite.NotEmpty(processedFullBytesExpected) - - // the bytes in storage should be what we expected - suite.Equal(processedFullBytesExpected, processedFullBytes) - - // now do the same for the thumbnail and make sure it's what we expected - processedThumbnailBytes, err := suite.storage.Get(ctx, attachment.Thumbnail.Path) - suite.NoError(err) - suite.NotEmpty(processedThumbnailBytes) - - processedThumbnailBytesExpected, err := os.ReadFile("./test/test-jpeg-thumbnail.jpg") - suite.NoError(err) - suite.NotEmpty(processedThumbnailBytesExpected) - - suite.Equal(processedThumbnailBytesExpected, processedThumbnailBytes) + // ensure the files contain the expected data. + equalFiles(suite.T(), suite.state.Storage, dbAttachment.File.Path, "./test/test-jpeg-processed.jpg") + equalFiles(suite.T(), suite.state.Storage, dbAttachment.Thumbnail.Path, "./test/test-jpeg-thumbnail.jpg") } func (suite *ManagerTestSuite) TestSimpleJpegProcessWithDiskStorage() { ctx := context.Background() - data := func(_ context.Context) (io.ReadCloser, int64, error) { + data := func(_ context.Context) (io.ReadCloser, error) { // load bytes from a test image b, err := os.ReadFile("./test/test-jpeg.jpg") if err != nil { panic(err) } - return io.NopCloser(bytes.NewBuffer(b)), int64(len(b)), nil + return io.NopCloser(bytes.NewBuffer(b)), nil } accountID := "01FS1X72SK9ZPW0J1QQ68BD264" - temp := fmt.Sprintf("%s/gotosocial-test", os.TempDir()) + temp := fmt.Sprintf("./%s/gotosocial-test", os.TempDir()) defer os.RemoveAll(temp) disk, err := disk.Open(temp, nil) @@ -1285,36 +841,16 @@ func (suite *ManagerTestSuite) TestSimpleJpegProcessWithDiskStorage() { suite.Equal("image/jpeg", attachment.File.ContentType) suite.Equal("image/jpeg", attachment.Thumbnail.ContentType) suite.Equal(269739, attachment.File.FileSize) - suite.Equal("LiBzRk#6V[WF_NvzV@WY_3rqV@a$", attachment.Blurhash) + suite.Equal("LjCGfG#6RkRn_NvzRjWF?urqV@a$", attachment.Blurhash) // now make sure the attachment is in the database dbAttachment, err := suite.db.GetAttachmentByID(ctx, attachment.ID) suite.NoError(err) suite.NotNil(dbAttachment) - // make sure the processed file is in storage - processedFullBytes, err := storage.Get(ctx, attachment.File.Path) - suite.NoError(err) - suite.NotEmpty(processedFullBytes) - - // load the processed bytes from our test folder, to compare - processedFullBytesExpected, err := os.ReadFile("./test/test-jpeg-processed.jpg") - suite.NoError(err) - suite.NotEmpty(processedFullBytesExpected) - - // the bytes in storage should be what we expected - suite.Equal(processedFullBytesExpected, processedFullBytes) - - // now do the same for the thumbnail and make sure it's what we expected - processedThumbnailBytes, err := storage.Get(ctx, attachment.Thumbnail.Path) - suite.NoError(err) - suite.NotEmpty(processedThumbnailBytes) - - processedThumbnailBytesExpected, err := os.ReadFile("./test/test-jpeg-thumbnail.jpg") - suite.NoError(err) - suite.NotEmpty(processedThumbnailBytesExpected) - - suite.Equal(processedThumbnailBytesExpected, processedThumbnailBytes) + // ensure the files contain the expected data. + equalFiles(suite.T(), storage, dbAttachment.File.Path, "./test/test-jpeg-processed.jpg") + equalFiles(suite.T(), storage, dbAttachment.Thumbnail.Path, "./test/test-jpeg-thumbnail.jpg") } func (suite *ManagerTestSuite) TestSmallSizedMediaTypeDetection_issue2263() { @@ -1348,12 +884,12 @@ func (suite *ManagerTestSuite) TestSmallSizedMediaTypeDetection_issue2263() { ctx, cncl := context.WithTimeout(context.Background(), time.Second*60) defer cncl() - data := func(_ context.Context) (io.ReadCloser, int64, error) { + data := func(_ context.Context) (io.ReadCloser, error) { // load bytes from a test image b, err := os.ReadFile(test.path) suite.NoError(err, "Test %d: failed during test setup", index+1) - return io.NopCloser(bytes.NewBuffer(b)), int64(len(b)), nil + return io.NopCloser(bytes.NewBuffer(b)), nil } accountID := "01FS1X72SK9ZPW0J1QQ68BD264" @@ -1390,78 +926,23 @@ func (suite *ManagerTestSuite) TestSmallSizedMediaTypeDetection_issue2263() { } } -func (suite *ManagerTestSuite) TestMisreportedSmallMedia() { - const accountID = "01FS1X72SK9ZPW0J1QQ68BD264" - var actualSize int - - data := func(_ context.Context) (io.ReadCloser, int64, error) { - // Load bytes from small png. - b, err := os.ReadFile("./test/test-png-alphachannel-1x1px.png") - if err != nil { - suite.FailNow(err.Error()) - } - - actualSize = len(b) - - // Report media as twice its actual size. This should be corrected. - return io.NopCloser(bytes.NewBuffer(b)), int64(2 * actualSize), nil - } - - ctx := context.Background() - - // process the media with no additional info provided - processing, err := suite.manager.CreateMedia(ctx, - accountID, - data, - media.AdditionalMediaInfo{}, - ) - suite.NoError(err) - suite.NotNil(processing) - - // do a blocking call to fetch the attachment - attachment, err := processing.Load(ctx) - suite.NoError(err) - suite.NotNil(attachment) - - suite.Equal(actualSize, attachment.File.FileSize) -} - -func (suite *ManagerTestSuite) TestNoReportedSizeSmallMedia() { - const accountID = "01FS1X72SK9ZPW0J1QQ68BD264" - var actualSize int - - data := func(_ context.Context) (io.ReadCloser, int64, error) { - // Load bytes from small png. - b, err := os.ReadFile("./test/test-png-alphachannel-1x1px.png") - if err != nil { - suite.FailNow(err.Error()) - } - - actualSize = len(b) - - // Return zero for media size. This should be detected. - return io.NopCloser(bytes.NewBuffer(b)), 0, nil - } - - ctx := context.Background() - - // process the media with no additional info provided - processing, err := suite.manager.CreateMedia(ctx, - accountID, - data, - media.AdditionalMediaInfo{}, - ) - suite.NoError(err) - suite.NotNil(processing) - - // do a blocking call to fetch the attachment - attachment, err := processing.Load(ctx) - suite.NoError(err) - suite.NotNil(attachment) - - suite.Equal(actualSize, attachment.File.FileSize) -} - func TestManagerTestSuite(t *testing.T) { suite.Run(t, &ManagerTestSuite{}) } + +// equalFiles checks whether +func equalFiles(t *testing.T, st *storage.Driver, storagePath, testPath string) { + b1, err := st.Get(context.Background(), storagePath) + if err != nil { + t.Fatalf("error reading file %s: %v", storagePath, err) + } + + b2, err := os.ReadFile(testPath) + if err != nil { + t.Fatalf("error reading file %s: %v", testPath, err) + } + + if md5.Sum(b1) != md5.Sum(b2) { + t.Errorf("%s != %s", storagePath, testPath) + } +} diff --git a/internal/media/png-stripper.go b/internal/media/png-stripper.go deleted file mode 100644 index 09126f6a5..000000000 --- a/internal/media/png-stripper.go +++ /dev/null @@ -1,211 +0,0 @@ -// GoToSocial -// Copyright (C) GoToSocial Authors admin@gotosocial.org -// SPDX-License-Identifier: AGPL-3.0-or-later -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -package media - -/* - The code in this file is taken from the following source: - https://github.com/google/wuffs/blob/414a011491ff513b86d8694c5d71800f3cb5a715/script/strip-png-ancillary-chunks.go - - It presents a workaround for this issue: https://github.com/golang/go/issues/43382 - - The license for the copied code is reproduced below: - - Copyright 2021 The Wuffs Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -// strip-png-ancillary-chunks.go copies PNG data from stdin to stdout, removing -// any ancillary chunks. -// -// Specification-compliant PNG decoders are required to honor critical chunks -// but may ignore ancillary (non-critical) chunks. Stripping out ancillary -// chunks before decoding should mean that different PNG decoders will agree on -// the decoded output regardless of which ancillary chunk types they choose to -// honor. Specifically, some PNG decoders may implement color and gamma -// correction but not all do. -// -// This program will strip out all ancillary chunks, but it should be -// straightforward to copy-paste-and-modify it to strip out only certain chunk -// types (e.g. only "tRNS" transparency chunks). -// -// -------- -// -// A PNG file consists of an 8-byte magic identifier and then a series of -// chunks. Each chunk is: -// -// - a 4-byte uint32 payload length N. -// - a 4-byte chunk type (e.g. "gAMA" for gamma correction metadata). -// - an N-byte payload. -// - a 4-byte CRC-32 checksum of the previous (N + 4) bytes, including the -// chunk type but excluding the payload length. -// -// Chunk types consist of 4 ASCII letters. The upper-case / lower-case bit of -// the first letter denote critical or ancillary chunks: "IDAT" and "PLTE" are -// critical, "gAMA" and "tEXt" are ancillary. See -// https://www.w3.org/TR/2003/REC-PNG-20031110/#5Chunk-naming-conventions -// -// -------- - -import ( - "encoding/binary" - "io" -) - -const ( - chunkTypeIHDR = 0x49484452 - chunkTypePLTE = 0x504C5445 - chunkTypeIDAT = 0x49444154 - chunkTypeIEND = 0x49454E44 - chunkTypeTRNS = 0x74524e53 -) - -func isNecessaryChunkType(chunkType uint32) bool { - switch chunkType { - case chunkTypeIHDR: - return true - case chunkTypePLTE: - return true - case chunkTypeIDAT: - return true - case chunkTypeIEND: - return true - case chunkTypeTRNS: - return true - } - return false -} - -// pngAncillaryChunkStripper wraps another io.Reader to strip ancillary chunks, -// if the data is in the PNG file format. If the data isn't PNG, it is passed -// through unmodified. -type pngAncillaryChunkStripper struct { - // Reader is the wrapped io.Reader. - Reader io.Reader - - // stickyErr is the first error returned from the wrapped io.Reader. - stickyErr error - - // buffer[rIndex:wIndex] holds data read from the wrapped io.Reader that - // wasn't passed through yet. - buffer [8]byte - rIndex int - wIndex int - - // pending and discard is the number of remaining bytes for (and whether to - // discard or pass through) the current chunk-in-progress. - pending int64 - discard bool - - // notPNG is set true if the data stream doesn't start with the 8-byte PNG - // magic identifier. If true, the wrapped io.Reader's data (including the - // first up-to-8 bytes) is passed through without modification. - notPNG bool - - // seenMagic is whether we've seen the 8-byte PNG magic identifier. - seenMagic bool -} - -// Read implements io.Reader. -func (r *pngAncillaryChunkStripper) Read(p []byte) (int, error) { - for { - // If the wrapped io.Reader returned a non-nil error, drain r.buffer - // (what data we have) and return that error (if fully drained). - if r.stickyErr != nil { - n := copy(p, r.buffer[r.rIndex:r.wIndex]) - r.rIndex += n - if r.rIndex < r.wIndex { - return n, nil - } - return n, r.stickyErr - } - - // Handle trivial requests, including draining our buffer. - if len(p) == 0 { - return 0, nil - } else if r.rIndex < r.wIndex { - n := copy(p, r.buffer[r.rIndex:r.wIndex]) - r.rIndex += n - return n, nil - } - - // From here onwards, our buffer is drained: r.rIndex == r.wIndex. - - // Handle non-PNG input. - if r.notPNG { - return r.Reader.Read(p) - } - - // Continue processing any PNG chunk that's in progress, whether - // discarding it or passing it through. - for r.pending > 0 { - if int64(len(p)) > r.pending { - p = p[:r.pending] - } - n, err := r.Reader.Read(p) - r.pending -= int64(n) - r.stickyErr = err - if r.discard { - continue - } - return n, err - } - - // We're either expecting the 8-byte PNG magic identifier or the 4-byte - // PNG chunk length + 4-byte PNG chunk type. Either way, read 8 bytes. - r.rIndex = 0 - r.wIndex, r.stickyErr = io.ReadFull(r.Reader, r.buffer[:8]) - if r.stickyErr != nil { - // Undo io.ReadFull converting io.EOF to io.ErrUnexpectedEOF. - if r.stickyErr == io.ErrUnexpectedEOF { - r.stickyErr = io.EOF - } - continue - } - - // Process those 8 bytes, either: - // - a PNG chunk (if we've already seen the PNG magic identifier), - // - the PNG magic identifier itself (if the input is a PNG) or - // - something else (if it's not a PNG). - //nolint:gocritic - if r.seenMagic { - // The number of pending bytes is equal to (N + 4) because of the 4 - // byte trailer, a checksum. - r.pending = int64(binary.BigEndian.Uint32(r.buffer[:4])) + 4 - chunkType := binary.BigEndian.Uint32(r.buffer[4:]) - r.discard = !isNecessaryChunkType(chunkType) - if r.discard { - r.rIndex = r.wIndex - } - } else if string(r.buffer[:8]) == "\x89PNG\x0D\x0A\x1A\x0A" { - r.seenMagic = true - } else { - r.notPNG = true - } - } -} diff --git a/internal/media/processingemoji.go b/internal/media/processingemoji.go index d61043523..cca456837 100644 --- a/internal/media/processingemoji.go +++ b/internal/media/processingemoji.go @@ -18,16 +18,10 @@ package media import ( - "bytes" "context" - "io" - "slices" - "codeberg.org/gruf/go-bytesize" errorsv2 "codeberg.org/gruf/go-errors/v2" "codeberg.org/gruf/go-runners" - "github.com/h2non/filetype" - "github.com/superseriousbusiness/gotosocial/internal/config" "github.com/superseriousbusiness/gotosocial/internal/gtscontext" "github.com/superseriousbusiness/gotosocial/internal/gtserror" "github.com/superseriousbusiness/gotosocial/internal/gtsmodel" @@ -125,19 +119,8 @@ func (p *ProcessingEmoji) load(ctx context.Context) ( // full-size media attachment details. // // This will update p.emoji as it goes. - if err = p.store(ctx); err != nil { - return err - } - - // Finish processing by reloading media into - // memory to get dimension and generate a thumb. - // - // This will update p.emoji as it goes. - if err = p.finish(ctx); err != nil { - return err //nolint:revive - } - - return nil + err = p.store(ctx) + return err }) emoji = p.emoji return @@ -147,80 +130,66 @@ func (p *ProcessingEmoji) load(ctx context.Context) ( // and updates the underlying attachment fields as necessary. It will then stream // bytes from p's reader directly into storage so that it can be retrieved later. func (p *ProcessingEmoji) store(ctx context.Context) error { - // Load media from provided data fun - rc, sz, err := p.dataFn(ctx) + // Load media from data func. + rc, err := p.dataFn(ctx) if err != nil { return gtserror.Newf("error executing data function: %w", err) } + var ( + // predfine temporary media + // file path variables so we + // can remove them on error. + temppath string + staticpath string + ) + defer func() { - // Ensure data reader gets closed on return. - if err := rc.Close(); err != nil { - log.Errorf(ctx, "error closing data reader: %v", err) + if err := remove(temppath, staticpath); err != nil { + log.Errorf(ctx, "error(s) cleaning up files: %v", err) } }() - var maxSize bytesize.Size - - if p.emoji.IsLocal() { - // this is a local emoji upload - maxSize = config.GetMediaEmojiLocalMaxSize() - } else { - // this is a remote incoming emoji - maxSize = config.GetMediaEmojiRemoteMaxSize() - } - - // Check that provided size isn't beyond max. We check beforehand - // so that we don't attempt to stream the emoji into storage if not needed. - if sz > 0 && sz > int64(maxSize) { - sz := bytesize.Size(sz) // improves log readability - return gtserror.Newf("given emoji size %s greater than max allowed %s", sz, maxSize) - } - - // Prepare to read bytes from - // file header or magic number. - fileSize := int(sz) - hdrBuf := newHdrBuf(fileSize) - - // Read into buffer as much as possible. - // - // UnexpectedEOF means we couldn't read up to the - // given size, but we may still have read something. - // - // EOF means we couldn't read anything at all. - // - // Any other error likely means the connection messed up. - // - // In other words, rather counterintuitively, we - // can only proceed on no error or unexpected error! - n, err := io.ReadFull(rc, hdrBuf) + // Drain reader to tmp file + // (this reader handles close). + temppath, err = drainToTmp(rc) if err != nil { - if err != io.ErrUnexpectedEOF { - return gtserror.Newf("error reading first bytes of incoming media: %w", err) - } - - // Initial file size was misreported, so we didn't read - // fully into hdrBuf. Reslice it to the size we did read. - hdrBuf = hdrBuf[:n] - fileSize = n - p.emoji.ImageFileSize = fileSize + return gtserror.Newf("error draining data to tmp: %w", err) } - // Parse file type info from header buffer. - // This should only ever error if the buffer - // is empty (ie., the attachment is 0 bytes). - info, err := filetype.Match(hdrBuf) + // Pass input file through ffprobe to + // parse further metadata information. + result, err := ffprobe(ctx, temppath) if err != nil { - return gtserror.Newf("error parsing file type: %w", err) + return gtserror.Newf("error ffprobing data: %w", err) } - // Ensure supported emoji img type. - if !slices.Contains(SupportedEmojiMIMETypes, info.MIME.Value) { - return gtserror.Newf("unsupported emoji filetype: %s", info.Extension) + switch { + // No errors parsing data. + case result.Error == nil: + + // Data type unhandleable by ffprobe. + case result.Error.Code == -1094995529: + log.Warn(ctx, "unsupported data type") + return nil + + default: + return gtserror.Newf("ffprobe error: %w", err) } - // Recombine header bytes with remaining stream - r := io.MultiReader(bytes.NewReader(hdrBuf), rc) + var ext string + + // Set media type from ffprobe format data. + fileType, ext := result.Format.GetFileType() + if fileType != gtsmodel.FileTypeImage { + return gtserror.Newf("unsupported emoji filetype: %s (%s)", fileType, ext) + } + + // Generate a static image from input emoji path. + staticpath, err = ffmpegGenerateStatic(ctx, temppath) + if err != nil { + return gtserror.Newf("error generating emoji static: %w", err) + } var pathID string if p.newPathID != "" { @@ -244,95 +213,50 @@ func (p *ProcessingEmoji) store(ctx context.Context) error { string(TypeEmoji), string(SizeOriginal), pathID, - info.Extension, + ext, ) - // File shouldn't already exist in storage at this point, - // but we do a check as it's worth logging / cleaning up. - if have, _ := p.mgr.state.Storage.Has(ctx, p.emoji.ImagePath); have { - log.Warnf(ctx, "emoji already exists at: %s", p.emoji.ImagePath) - - // Attempt to remove existing emoji at storage path (might be broken / out-of-date) - if err := p.mgr.state.Storage.Delete(ctx, p.emoji.ImagePath); err != nil { - return gtserror.Newf("error removing emoji %s from storage: %v", p.emoji.ImagePath, err) - } - } - - // Write the final image reader stream to our storage. - sz, err = p.mgr.state.Storage.PutStream(ctx, p.emoji.ImagePath, r) + // Copy temporary file into storage at path. + filesz, err := p.mgr.state.Storage.PutFile(ctx, + p.emoji.ImagePath, + temppath, + ) if err != nil { return gtserror.Newf("error writing emoji to storage: %w", err) } - // Perform final size check in case none was - // given previously, or size was mis-reported. - // (error here will later perform p.cleanup()). - if sz > int64(maxSize) { - sz := bytesize.Size(sz) // improves log readability - return gtserror.Newf("written emoji size %s greater than max allowed %s", sz, maxSize) + // Copy static emoji file into storage at path. + staticsz, err := p.mgr.state.Storage.PutFile(ctx, + p.emoji.ImageStaticPath, + staticpath, + ) + if err != nil { + return gtserror.Newf("error writing static to storage: %w", err) } + // Set final determined file sizes. + p.emoji.ImageFileSize = int(filesz) + p.emoji.ImageStaticFileSize = int(staticsz) + // Fill in remaining emoji data now it's stored. p.emoji.ImageURL = uris.URIForAttachment( instanceAccID, string(TypeEmoji), string(SizeOriginal), pathID, - info.Extension, + ext, ) - p.emoji.ImageContentType = info.MIME.Value - p.emoji.ImageFileSize = int(sz) + + // Get mimetype for the file container + // type, falling back to generic data. + p.emoji.ImageContentType = getMimeType(ext) + + // We can now consider this cached. p.emoji.Cached = util.Ptr(true) return nil } -func (p *ProcessingEmoji) finish(ctx context.Context) error { - // Get a stream to the original file for further processing. - rc, err := p.mgr.state.Storage.GetStream(ctx, p.emoji.ImagePath) - if err != nil { - return gtserror.Newf("error loading file from storage: %w", err) - } - defer rc.Close() - - // Decode the image from storage. - staticImg, err := decodeImage(rc) - if err != nil { - return gtserror.Newf("error decoding image: %w", err) - } - - // staticImg should be in-memory by - // now so we're done with storage. - if err := rc.Close(); err != nil { - return gtserror.Newf("error closing file: %w", err) - } - - // Static img shouldn't exist in storage at this point, - // but we do a check as it's worth logging / cleaning up. - if have, _ := p.mgr.state.Storage.Has(ctx, p.emoji.ImageStaticPath); have { - log.Warnf(ctx, "static emoji already exists at: %s", p.emoji.ImageStaticPath) - - // Attempt to remove existing thumbnail (might be broken / out-of-date). - if err := p.mgr.state.Storage.Delete(ctx, p.emoji.ImageStaticPath); err != nil { - return gtserror.Newf("error removing static emoji %s from storage: %v", p.emoji.ImageStaticPath, err) - } - } - - // Create emoji PNG encoder stream. - enc := staticImg.ToPNG() - - // Stream-encode the PNG static emoji image into our storage driver. - sz, err := p.mgr.state.Storage.PutStream(ctx, p.emoji.ImageStaticPath, enc) - if err != nil { - return gtserror.Newf("error stream-encoding static emoji to storage: %w", err) - } - - // Set final written thumb size. - p.emoji.ImageStaticFileSize = int(sz) - - return nil -} - // cleanup will remove any traces of processing emoji from storage, // and perform any other necessary cleanup steps after failure. func (p *ProcessingEmoji) cleanup(ctx context.Context) { diff --git a/internal/media/processingmedia.go b/internal/media/processingmedia.go index 466c3443f..43e153a4d 100644 --- a/internal/media/processingmedia.go +++ b/internal/media/processingmedia.go @@ -18,18 +18,12 @@ package media import ( - "bytes" - "cmp" "context" - "image/jpeg" - "io" "time" errorsv2 "codeberg.org/gruf/go-errors/v2" "codeberg.org/gruf/go-runners" - terminator "codeberg.org/superseriousbusiness/exif-terminator" - "github.com/disintegration/imaging" - "github.com/h2non/filetype" + "github.com/superseriousbusiness/gotosocial/internal/gtscontext" "github.com/superseriousbusiness/gotosocial/internal/gtserror" "github.com/superseriousbusiness/gotosocial/internal/gtsmodel" @@ -145,19 +139,8 @@ func (p *ProcessingMedia) load(ctx context.Context) ( // full-size media attachment details. // // This will update p.media as it goes. - if err = p.store(ctx); err != nil { - return err - } - - // Finish processing by reloading media into - // memory to get dimension and generate a thumb. - // - // This will update p.media as it goes. - if err = p.finish(ctx); err != nil { - return err //nolint:revive - } - - return nil + err = p.store(ctx) + return err }) media = p.media return @@ -167,89 +150,224 @@ func (p *ProcessingMedia) load(ctx context.Context) ( // and updates the underlying attachment fields as necessary. It will then stream // bytes from p's reader directly into storage so that it can be retrieved later. func (p *ProcessingMedia) store(ctx context.Context) error { - // Load media from provided data fun - rc, sz, err := p.dataFn(ctx) + // Load media from data func. + rc, err := p.dataFn(ctx) if err != nil { return gtserror.Newf("error executing data function: %w", err) } + var ( + // predfine temporary media + // file path variables so we + // can remove them on error. + temppath string + thumbpath string + ) + defer func() { - // Ensure data reader gets closed on return. - if err := rc.Close(); err != nil { - log.Errorf(ctx, "error closing data reader: %v", err) + if err := remove(temppath, thumbpath); err != nil { + log.Errorf(ctx, "error(s) cleaning up files: %v", err) } }() - // Assume we're given correct file - // size, we can overwrite this later - // once we know THE TRUTH. - fileSize := int(sz) - p.media.File.FileSize = fileSize - - // Prepare to read bytes from - // file header or magic number. - hdrBuf := newHdrBuf(fileSize) - - // Read into buffer as much as possible. - // - // UnexpectedEOF means we couldn't read up to the - // given size, but we may still have read something. - // - // EOF means we couldn't read anything at all. - // - // Any other error likely means the connection messed up. - // - // In other words, rather counterintuitively, we - // can only proceed on no error or unexpected error! - n, err := io.ReadFull(rc, hdrBuf) + // Drain reader to tmp file + // (this reader handles close). + temppath, err = drainToTmp(rc) if err != nil { - if err != io.ErrUnexpectedEOF { - return gtserror.Newf("error reading first bytes of incoming media: %w", err) + return gtserror.Newf("error draining data to tmp: %w", err) + } + + // Pass input file through ffprobe to + // parse further metadata information. + result, err := ffprobe(ctx, temppath) + if err != nil { + return gtserror.Newf("error ffprobing data: %w", err) + } + + switch { + // No errors parsing data. + case result.Error == nil: + + // Data type unhandleable by ffprobe. + case result.Error.Code == -1094995529: + log.Warn(ctx, "unsupported data type") + return nil + + default: + return gtserror.Newf("ffprobe error: %w", err) + } + + var ext string + + // Set the media type from ffprobe format data. + p.media.Type, ext = result.Format.GetFileType() + if p.media.Type == gtsmodel.FileTypeUnknown { + + // Return early (deleting file) + // for unhandled file types. + return nil + } + + switch p.media.Type { + case gtsmodel.FileTypeImage: + // Pass file through ffmpeg clearing + // any excess metadata (e.g. EXIF). + if err := ffmpegClearMetadata(ctx, + temppath, ext, + ); err != nil { + return gtserror.Newf("error cleaning metadata: %w", err) } - // Initial file size was misreported, so we didn't read - // fully into hdrBuf. Reslice it to the size we did read. - hdrBuf = hdrBuf[:n] - fileSize = n - p.media.File.FileSize = fileSize - } + // Extract image metadata from streams. + width, height, err := result.ImageMeta() + if err != nil { + return err + } + p.media.FileMeta.Original.Width = width + p.media.FileMeta.Original.Height = height + p.media.FileMeta.Original.Size = (width * height) + p.media.FileMeta.Original.Aspect = float32(width) / float32(height) - // Parse file type info from header buffer. - // This should only ever error if the buffer - // is empty (ie., the attachment is 0 bytes). - info, err := filetype.Match(hdrBuf) - if err != nil { - return gtserror.Newf("error parsing file type: %w", err) - } + // Determine thumbnail dimensions to use. + thumbWidth, thumbHeight := thumbSize(width, height) + p.media.FileMeta.Small.Width = thumbWidth + p.media.FileMeta.Small.Height = thumbHeight + p.media.FileMeta.Small.Size = (thumbWidth * thumbHeight) + p.media.FileMeta.Small.Aspect = float32(thumbWidth) / float32(thumbHeight) - // Recombine header bytes with remaining stream - r := io.MultiReader(bytes.NewReader(hdrBuf), rc) + // Generate a thumbnail image from input image path. + thumbpath, err = ffmpegGenerateThumb(ctx, temppath, + thumbWidth, + thumbHeight, + ) + if err != nil { + return gtserror.Newf("error generating image thumb: %w", err) + } - // Assume we'll put - // this file in storage. - store := true + case gtsmodel.FileTypeVideo: + // Pass file through ffmpeg clearing + // any excess metadata (e.g. EXIF). + if err := ffmpegClearMetadata(ctx, + temppath, ext, + ); err != nil { + return gtserror.Newf("error cleaning metadata: %w", err) + } - switch info.Extension { - case "mp4": - // No problem. + // Extract video metadata we can from streams. + width, height, framerate, err := result.VideoMeta() + if err != nil { + return err + } + p.media.FileMeta.Original.Width = width + p.media.FileMeta.Original.Height = height + p.media.FileMeta.Original.Size = (width * height) + p.media.FileMeta.Original.Aspect = float32(width) / float32(height) + p.media.FileMeta.Original.Framerate = &framerate - case "gif": - // No problem + // Extract total duration from format. + duration := result.Format.GetDuration() + p.media.FileMeta.Original.Duration = &duration - case "jpg", "jpeg", "png", "webp": - if fileSize > 0 { - // A file size was provided so we can clean - // exif data from image as we're streaming it. - r, err = terminator.Terminate(r, fileSize, info.Extension) + // Extract total bitrate from format. + bitrate := result.Format.GetBitRate() + p.media.FileMeta.Original.Bitrate = &bitrate + + // Determine thumbnail dimensions to use. + thumbWidth, thumbHeight := thumbSize(width, height) + p.media.FileMeta.Small.Width = thumbWidth + p.media.FileMeta.Small.Height = thumbHeight + p.media.FileMeta.Small.Size = (thumbWidth * thumbHeight) + p.media.FileMeta.Small.Aspect = float32(thumbWidth) / float32(thumbHeight) + + // Extract a thumbnail frame from input video path. + thumbpath, err = ffmpegGenerateThumb(ctx, temppath, + thumbWidth, + thumbHeight, + ) + if err != nil { + return gtserror.Newf("error extracting video frame: %w", err) + } + + case gtsmodel.FileTypeAudio: + // Extract total duration from format. + duration := result.Format.GetDuration() + p.media.FileMeta.Original.Duration = &duration + + // Extract total bitrate from format. + bitrate := result.Format.GetBitRate() + p.media.FileMeta.Original.Bitrate = &bitrate + + // Extract image metadata from streams (if any), + // this will only exist for embedded album art. + width, height, _ := result.ImageMeta() + if width > 0 && height > 0 { + + // Determine thumbnail dimensions to use. + thumbWidth, thumbHeight := thumbSize(width, height) + p.media.FileMeta.Small.Width = thumbWidth + p.media.FileMeta.Small.Height = thumbHeight + p.media.FileMeta.Small.Size = (thumbWidth * thumbHeight) + p.media.FileMeta.Small.Aspect = float32(thumbWidth) / float32(thumbHeight) + + // Generate a thumbnail image from input image path. + thumbpath, err = ffmpegGenerateThumb(ctx, temppath, + thumbWidth, + thumbHeight, + ) if err != nil { - return gtserror.Newf("error cleaning exif data: %w", err) + return gtserror.Newf("error generating image thumb: %w", err) } } default: - // The file is not a supported format that we can process, so we can't do much with it. - log.Warnf(ctx, "unsupported media extension '%s'; not caching locally", info.Extension) - store = false + log.Warnf(ctx, "unsupported type: %s (%s)", p.media.Type, result.Format.FormatName) + return nil + } + + // Calculate final media attachment file path. + p.media.File.Path = uris.StoragePathForAttachment( + p.media.AccountID, + string(TypeAttachment), + string(SizeOriginal), + p.media.ID, + ext, + ) + + // Copy temporary file into storage at path. + filesz, err := p.mgr.state.Storage.PutFile(ctx, + p.media.File.Path, + temppath, + ) + if err != nil { + return gtserror.Newf("error writing media to storage: %w", err) + } + + // Set final determined file size. + p.media.File.FileSize = int(filesz) + + if thumbpath != "" { + // Note that neither thumbnail storage + // nor a blurhash are needed for audio. + + if p.media.Blurhash == "" { + // Generate blurhash (if not already) from thumbnail. + p.media.Blurhash, err = generateBlurhash(thumbpath) + if err != nil { + return gtserror.Newf("error generating thumb blurhash: %w", err) + } + } + + // Copy thumbnail file into storage at path. + thumbsz, err := p.mgr.state.Storage.PutFile(ctx, + p.media.Thumbnail.Path, + thumbpath, + ) + if err != nil { + return gtserror.Newf("error writing thumb to storage: %w", err) + } + + // Set final determined thumbnail size. + p.media.Thumbnail.FileSize = int(thumbsz) } // Fill in correct attachment @@ -259,194 +377,17 @@ func (p *ProcessingMedia) store(ctx context.Context) error { string(TypeAttachment), string(SizeOriginal), p.media.ID, - info.Extension, + ext, ) - // Prefer discovered MIME, fallback to generic data stream. - mime := cmp.Or(info.MIME.Value, "application/octet-stream") - p.media.File.ContentType = mime - - // Calculate final media attachment file path. - p.media.File.Path = uris.StoragePathForAttachment( - p.media.AccountID, - string(TypeAttachment), - string(SizeOriginal), - p.media.ID, - info.Extension, - ) - - // We should only try to store the file if it's - // a format we can keep processing, otherwise be - // a bit cheeky: don't store it and let users - // click through to the remote server instead. - if !store { - return nil - } - - // File shouldn't already exist in storage at this point, - // but we do a check as it's worth logging / cleaning up. - if have, _ := p.mgr.state.Storage.Has(ctx, p.media.File.Path); have { - log.Warnf(ctx, "media already exists at: %s", p.media.File.Path) - - // Attempt to remove existing media at storage path (might be broken / out-of-date) - if err := p.mgr.state.Storage.Delete(ctx, p.media.File.Path); err != nil { - return gtserror.Newf("error removing media %s from storage: %v", p.media.File.Path, err) - } - } - - // Write the final reader stream to our storage driver. - sz, err = p.mgr.state.Storage.PutStream(ctx, p.media.File.Path, r) - if err != nil { - return gtserror.Newf("error writing media to storage: %w", err) - } - - // Set actual written size - // as authoritative file size. - p.media.File.FileSize = int(sz) + // Get mimetype for the file container + // type, falling back to generic data. + p.media.File.ContentType = getMimeType(ext) // We can now consider this cached. p.media.Cached = util.Ptr(true) - return nil -} - -func (p *ProcessingMedia) finish(ctx context.Context) error { - // Nothing else to do if - // media was not cached. - if !*p.media.Cached { - return nil - } - - // Get a stream to the original file for further processing. - rc, err := p.mgr.state.Storage.GetStream(ctx, p.media.File.Path) - if err != nil { - return gtserror.Newf("error loading file from storage: %w", err) - } - defer rc.Close() - - // fullImg is the processed version of - // the original (stripped + reoriented). - var fullImg *gtsImage - - // Depending on the content type, we - // can do various types of decoding. - switch p.media.File.ContentType { - - // .jpeg, .gif, .webp image type - case mimeImageJpeg, mimeImageGif, mimeImageWebp: - fullImg, err = decodeImage(rc, - imaging.AutoOrientation(true), - ) - if err != nil { - return gtserror.Newf("error decoding image: %w", err) - } - - // Mark as no longer unknown type now - // we know for sure we can decode it. - p.media.Type = gtsmodel.FileTypeImage - - // .png image (requires ancillary chunk stripping) - case mimeImagePng: - fullImg, err = decodeImage( - &pngAncillaryChunkStripper{Reader: rc}, - imaging.AutoOrientation(true), - ) - if err != nil { - return gtserror.Newf("error decoding image: %w", err) - } - - // Mark as no longer unknown type now - // we know for sure we can decode it. - p.media.Type = gtsmodel.FileTypeImage - - // .mp4 video type - case mimeVideoMp4: - video, err := decodeVideoFrame(rc) - if err != nil { - return gtserror.Newf("error decoding video: %w", err) - } - - // Set video frame as image. - fullImg = video.frame - - // Set video metadata in attachment info. - p.media.FileMeta.Original.Duration = &video.duration - p.media.FileMeta.Original.Framerate = &video.framerate - p.media.FileMeta.Original.Bitrate = &video.bitrate - - // Mark as no longer unknown type now - // we know for sure we can decode it. - p.media.Type = gtsmodel.FileTypeVideo - } - - // fullImg should be in-memory by - // now so we're done with storage. - if err := rc.Close(); err != nil { - return gtserror.Newf("error closing file: %w", err) - } - - // Set full-size dimensions in attachment info. - p.media.FileMeta.Original.Width = fullImg.Width() - p.media.FileMeta.Original.Height = fullImg.Height() - p.media.FileMeta.Original.Size = fullImg.Size() - p.media.FileMeta.Original.Aspect = fullImg.AspectRatio() - - // Get smaller thumbnail image - thumbImg := fullImg.Thumbnail() - - // Garbage collector, you may - // now take our large son. - fullImg = nil - - // Only generate blurhash - // from thumb if necessary. - if p.media.Blurhash == "" { - hash, err := thumbImg.Blurhash() - if err != nil { - return gtserror.Newf("error generating blurhash: %w", err) - } - - // Set the attachment blurhash. - p.media.Blurhash = hash - } - - // Thumbnail shouldn't exist in storage at this point, - // but we do a check as it's worth logging / cleaning up. - if have, _ := p.mgr.state.Storage.Has(ctx, p.media.Thumbnail.Path); have { - log.Warnf(ctx, "thumbnail already exists at: %s", p.media.Thumbnail.Path) - - // Attempt to remove existing thumbnail (might be broken / out-of-date). - if err := p.mgr.state.Storage.Delete(ctx, p.media.Thumbnail.Path); err != nil { - return gtserror.Newf("error removing thumbnail %s from storage: %v", p.media.Thumbnail.Path, err) - } - } - - // Create a thumbnail JPEG encoder stream. - enc := thumbImg.ToJPEG(&jpeg.Options{ - - // Good enough for - // a thumbnail. - Quality: 70, - }) - - // Stream-encode the JPEG thumbnail image into our storage driver. - sz, err := p.mgr.state.Storage.PutStream(ctx, p.media.Thumbnail.Path, enc) - if err != nil { - return gtserror.Newf("error stream-encoding thumbnail to storage: %w", err) - } - - // Set final written thumb size. - p.media.Thumbnail.FileSize = int(sz) - - // Set thumbnail dimensions in attachment info. - p.media.FileMeta.Small = gtsmodel.Small{ - Width: thumbImg.Width(), - Height: thumbImg.Height(), - Size: thumbImg.Size(), - Aspect: thumbImg.AspectRatio(), - } - - // Finally set the attachment as processed. + // Finally set the attachment as finished processing. p.media.Processing = gtsmodel.ProcessingStatusProcessed return nil diff --git a/internal/media/refetch.go b/internal/media/refetch.go index d02f14872..e5b91d56f 100644 --- a/internal/media/refetch.go +++ b/internal/media/refetch.go @@ -24,12 +24,13 @@ import ( "io" "net/url" + "github.com/superseriousbusiness/gotosocial/internal/config" "github.com/superseriousbusiness/gotosocial/internal/db" "github.com/superseriousbusiness/gotosocial/internal/gtsmodel" "github.com/superseriousbusiness/gotosocial/internal/log" ) -type DereferenceMedia func(ctx context.Context, iri *url.URL) (io.ReadCloser, int64, error) +type DereferenceMedia func(ctx context.Context, iri *url.URL, maxsz int64) (io.ReadCloser, error) // RefetchEmojis iterates through remote emojis (for the given domain, or all if domain is empty string). // @@ -48,6 +49,9 @@ func (m *Manager) RefetchEmojis(ctx context.Context, domain string, dereferenceM refetchIDs []string ) + // Get max supported remote emoji media size. + maxsz := config.GetMediaEmojiRemoteMaxSize() + // page through emojis 20 at a time, looking for those with missing images for { // Fetch next block of emojis from database @@ -107,8 +111,8 @@ func (m *Manager) RefetchEmojis(ctx context.Context, domain string, dereferenceM continue } - dataFunc := func(ctx context.Context) (reader io.ReadCloser, fileSize int64, err error) { - return dereferenceMedia(ctx, emojiImageIRI) + dataFunc := func(ctx context.Context) (reader io.ReadCloser, err error) { + return dereferenceMedia(ctx, emojiImageIRI, int64(maxsz)) } processingEmoji, err := m.RefreshEmoji(ctx, emoji, dataFunc, AdditionalEmojiInfo{ diff --git a/internal/media/test/birdnest-processed.mp4 b/internal/media/test/birdnest-processed.mp4 index 2ecc075cd..ed9d73a7d 100644 Binary files a/internal/media/test/birdnest-processed.mp4 and b/internal/media/test/birdnest-processed.mp4 differ diff --git a/internal/media/test/birdnest-thumbnail.jpg b/internal/media/test/birdnest-thumbnail.jpg index b20de32a3..d9d4fc0c9 100644 Binary files a/internal/media/test/birdnest-thumbnail.jpg and b/internal/media/test/birdnest-thumbnail.jpg differ diff --git a/internal/media/test/gts_pixellated-static.png b/internal/media/test/gts_pixellated-static.png index c6dcb0f4a..530b628bf 100644 Binary files a/internal/media/test/gts_pixellated-static.png and b/internal/media/test/gts_pixellated-static.png differ diff --git a/internal/media/test/longer-mp4-processed.mp4 b/internal/media/test/longer-mp4-processed.mp4 index cfb596612..d792dc3c5 100644 Binary files a/internal/media/test/longer-mp4-processed.mp4 and b/internal/media/test/longer-mp4-processed.mp4 differ diff --git a/internal/media/test/longer-mp4-thumbnail.jpg b/internal/media/test/longer-mp4-thumbnail.jpg index 076db8251..1700b0cb1 100644 Binary files a/internal/media/test/longer-mp4-thumbnail.jpg and b/internal/media/test/longer-mp4-thumbnail.jpg differ diff --git a/internal/media/test/nb-flag-static.png b/internal/media/test/nb-flag-static.png index 399eae5e5..384ee53f7 100644 Binary files a/internal/media/test/nb-flag-static.png and b/internal/media/test/nb-flag-static.png differ diff --git a/internal/media/test/rainbow-static.png b/internal/media/test/rainbow-static.png index 79ed5c03a..f762a0470 100644 Binary files a/internal/media/test/rainbow-static.png and b/internal/media/test/rainbow-static.png differ diff --git a/internal/media/test/test-jpeg-thumbnail.jpg b/internal/media/test/test-jpeg-thumbnail.jpg index c11569fe6..e2251afec 100644 Binary files a/internal/media/test/test-jpeg-thumbnail.jpg and b/internal/media/test/test-jpeg-thumbnail.jpg differ diff --git a/internal/media/test/test-mp4-processed.mp4 b/internal/media/test/test-mp4-processed.mp4 index f78f51de6..2bd33ba48 100644 Binary files a/internal/media/test/test-mp4-processed.mp4 and b/internal/media/test/test-mp4-processed.mp4 differ diff --git a/internal/media/test/test-mp4-thumbnail.jpg b/internal/media/test/test-mp4-thumbnail.jpg index 6d33c1b78..35dc7b619 100644 Binary files a/internal/media/test/test-mp4-thumbnail.jpg and b/internal/media/test/test-mp4-thumbnail.jpg differ diff --git a/internal/media/test/test-opus-original.opus b/internal/media/test/test-opus-original.opus new file mode 100644 index 000000000..1dc6f28fa Binary files /dev/null and b/internal/media/test/test-opus-original.opus differ diff --git a/internal/media/test/test-opus-processed.opus b/internal/media/test/test-opus-processed.opus new file mode 100644 index 000000000..1dc6f28fa Binary files /dev/null and b/internal/media/test/test-opus-processed.opus differ diff --git a/internal/media/test/test-png-alphachannel-processed.png b/internal/media/test/test-png-alphachannel-processed.png index 9d05d45ef..cb3857e9c 100644 Binary files a/internal/media/test/test-png-alphachannel-processed.png and b/internal/media/test/test-png-alphachannel-processed.png differ diff --git a/internal/media/test/test-png-alphachannel-thumbnail.jpg b/internal/media/test/test-png-alphachannel-thumbnail.jpg index 8342157be..f98e69800 100644 Binary files a/internal/media/test/test-png-alphachannel-thumbnail.jpg and b/internal/media/test/test-png-alphachannel-thumbnail.jpg differ diff --git a/internal/media/test/test-png-noalphachannel-thumbnail.jpg b/internal/media/test/test-png-noalphachannel-thumbnail.jpg index 8342157be..7e54ebae7 100644 Binary files a/internal/media/test/test-png-noalphachannel-thumbnail.jpg and b/internal/media/test/test-png-noalphachannel-thumbnail.jpg differ diff --git a/internal/media/types.go b/internal/media/types.go index cea026b98..2d19b84cc 100644 --- a/internal/media/types.go +++ b/internal/media/types.go @@ -144,4 +144,4 @@ type AdditionalEmojiInfo struct { } // DataFunc represents a function used to retrieve the raw bytes of a piece of media. -type DataFunc func(ctx context.Context) (reader io.ReadCloser, fileSize int64, err error) +type DataFunc func(ctx context.Context) (reader io.ReadCloser, err error) diff --git a/internal/media/util.go b/internal/media/util.go index 296bdb883..4a31c9f8e 100644 --- a/internal/media/util.go +++ b/internal/media/util.go @@ -17,25 +17,161 @@ package media -// newHdrBuf returns a buffer of suitable size to -// read bytes from a file header or magic number. -// -// File header is *USUALLY* 261 bytes at the start -// of a file; magic number can be much less than -// that (just a few bytes). -// -// To cover both cases, this function returns a buffer -// suitable for whichever is smallest: the first 261 -// bytes of the file, or the whole file. -// -// See: -// -// - https://en.wikipedia.org/wiki/File_format#File_header -// - https://github.com/h2non/filetype. -func newHdrBuf(fileSize int) []byte { - bufSize := 261 - if fileSize > 0 && fileSize < bufSize { - bufSize = fileSize +import ( + "cmp" + "errors" + "fmt" + "image" + "image/jpeg" + "io" + "os" + + "codeberg.org/gruf/go-bytesize" + "codeberg.org/gruf/go-iotools" + "codeberg.org/gruf/go-mimetypes" + "github.com/buckket/go-blurhash" + "github.com/disintegration/imaging" +) + +// thumbSize returns the dimensions to use for an input +// image of given width / height, for its outgoing thumbnail. +// This maintains the original image aspect ratio. +func thumbSize(width, height int) (int, int) { + const ( + maxThumbWidth = 512 + maxThumbHeight = 512 + ) + switch { + // Simplest case, within bounds! + case width < maxThumbWidth && + height < maxThumbHeight: + return width, height + + // Width is larger side. + case width > height: + p := float32(width) / float32(maxThumbWidth) + return maxThumbWidth, int(float32(height) / p) + + // Height is larger side. + case height > width: + p := float32(height) / float32(maxThumbHeight) + return int(float32(width) / p), maxThumbHeight + + // Square. + default: + return maxThumbWidth, maxThumbHeight } - return make([]byte, bufSize) +} + +// jpegDecode decodes the JPEG at filepath into parsed image.Image. +func jpegDecode(filepath string) (image.Image, error) { + // Open the file at given path. + file, err := os.Open(filepath) + if err != nil { + return nil, err + } + + // Decode image from file. + img, err := jpeg.Decode(file) + + // Done with file. + _ = file.Close() + + return img, err +} + +// generateBlurhash generates a blurhash for JPEG at filepath. +func generateBlurhash(filepath string) (string, error) { + // Decode JPEG file at given path. + img, err := jpegDecode(filepath) + if err != nil { + return "", err + } + + // for generating blurhashes, it's more cost effective to + // lose detail since it's blurry, so make a tiny version. + tiny := imaging.Resize(img, 64, 64, imaging.NearestNeighbor) + + // Drop the larger image + // ref as soon as possible + // to allow GC to claim. + img = nil //nolint + + // Generate blurhash for thumbnail. + return blurhash.Encode(4, 3, tiny) +} + +// getMimeType returns a suitable mimetype for file extension. +func getMimeType(ext string) string { + const defaultType = "application/octet-stream" + return cmp.Or(mimetypes.MimeTypes[ext], defaultType) +} + +// drainToTmp drains data from given reader into a new temp file +// and closes it, returning the path of the resulting temp file. +// +// Note that this function specifically makes attempts to unwrap the +// io.ReadCloser as much as it can to underlying type, to maximise +// chance that Linux's sendfile syscall can be utilised for optimal +// draining of data source to temporary file storage. +func drainToTmp(rc io.ReadCloser) (string, error) { + tmp, err := os.CreateTemp(os.TempDir(), "gotosocial-*") + if err != nil { + return "", err + } + + // Close readers + // on func return. + defer tmp.Close() + defer rc.Close() + + // Extract file path. + path := tmp.Name() + + // Limited reader (if any). + var lr *io.LimitedReader + var limit int64 + + // Reader type to use + // for draining to tmp. + rd := (io.Reader)(rc) + + // Check if reader is actually wrapped, + // (as our http client wraps close func). + rct, ok := rc.(*iotools.ReadCloserType) + if ok { + + // Get unwrapped. + rd = rct.Reader + + // Extract limited reader if wrapped. + lr, limit = iotools.GetReaderLimit(rd) + } + + // Drain reader into tmp. + _, err = tmp.ReadFrom(rd) + if err != nil { + return path, err + } + + // Check to see if limit was reached, + // (produces more useful error messages). + if lr != nil && !iotools.AtEOF(lr.R) { + return path, fmt.Errorf("reached read limit %s", bytesize.Size(limit)) + } + + return path, nil +} + +// remove only removes paths if not-empty. +func remove(paths ...string) error { + var errs []error + for _, path := range paths { + if path != "" { + if err := os.Remove(path); err != nil { + errs = append(errs, fmt.Errorf("error removing %s: %w", path, err)) + } + } + } + return errors.Join(errs...) } diff --git a/internal/media/video.go b/internal/media/video.go deleted file mode 100644 index 5068be636..000000000 --- a/internal/media/video.go +++ /dev/null @@ -1,141 +0,0 @@ -// GoToSocial -// Copyright (C) GoToSocial Authors admin@gotosocial.org -// SPDX-License-Identifier: AGPL-3.0-or-later -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -package media - -import ( - "fmt" - "io" - - "github.com/abema/go-mp4" - "github.com/superseriousbusiness/gotosocial/internal/iotools" - "github.com/superseriousbusiness/gotosocial/internal/log" -) - -type gtsVideo struct { - frame *gtsImage - duration float32 // in seconds - bitrate uint64 - framerate float32 -} - -// decodeVideoFrame decodes and returns an image from a single frame in the given video stream. -// (note: currently this only returns a blank image resized to fit video dimensions). -func decodeVideoFrame(r io.Reader) (*gtsVideo, error) { - // Check if video stream supports - // seeking, usually when *os.File. - rsc, ok := r.(io.ReadSeekCloser) - if !ok { - var err error - - // Store stream to temporary location - // in order that we can get seek-reads. - rsc, err = iotools.TempFileSeeker(r) - if err != nil { - return nil, fmt.Errorf("error creating temp file seeker: %w", err) - } - - defer func() { - // Ensure temp. read seeker closed. - if err := rsc.Close(); err != nil { - log.Errorf(nil, "error closing temp file seeker: %s", err) - } - }() - } - - // probe the video file to extract useful metadata from it; for methodology, see: - // https://github.com/abema/go-mp4/blob/7d8e5a7c5e644e0394261b0cf72fef79ce246d31/mp4tool/probe/probe.go#L85-L154 - info, err := mp4.Probe(rsc) - if err != nil { - return nil, fmt.Errorf("error during mp4 probe: %w", err) - } - - var ( - width int - height int - videoBitrate uint64 - audioBitrate uint64 - video gtsVideo - ) - - for _, tr := range info.Tracks { - if tr.AVC == nil { - // audio track - if br := tr.Samples.GetBitrate(tr.Timescale); br > audioBitrate { - audioBitrate = br - } else if br := info.Segments.GetBitrate(tr.TrackID, tr.Timescale); br > audioBitrate { - audioBitrate = br - } - - if d := float64(tr.Duration) / float64(tr.Timescale); d > float64(video.duration) { - video.duration = float32(d) - } - continue - } - - // video track - if w := int(tr.AVC.Width); w > width { - width = w - } - - if h := int(tr.AVC.Height); h > height { - height = h - } - - if br := tr.Samples.GetBitrate(tr.Timescale); br > videoBitrate { - videoBitrate = br - } else if br := info.Segments.GetBitrate(tr.TrackID, tr.Timescale); br > videoBitrate { - videoBitrate = br - } - - if d := float64(tr.Duration) / float64(tr.Timescale); d > float64(video.duration) { - video.framerate = float32(len(tr.Samples)) / float32(d) - video.duration = float32(d) - } - } - - // overall bitrate should be audio + video combined - // (since they're both playing at the same time) - video.bitrate = audioBitrate + videoBitrate - - // Check for empty video metadata. - var empty []string - if width == 0 { - empty = append(empty, "width") - } - if height == 0 { - empty = append(empty, "height") - } - if video.duration == 0 { - empty = append(empty, "duration") - } - if video.framerate == 0 { - empty = append(empty, "framerate") - } - if video.bitrate == 0 { - empty = append(empty, "bitrate") - } - if len(empty) > 0 { - return nil, fmt.Errorf("error determining video metadata: %v", empty) - } - - // Create new empty "frame" image. - // TODO: decode frame from video file. - video.frame = blankImage(width, height) - - return &video, nil -} diff --git a/internal/processing/account/update.go b/internal/processing/account/update.go index ba9360c36..fda871bd5 100644 --- a/internal/processing/account/update.go +++ b/internal/processing/account/update.go @@ -24,7 +24,7 @@ import ( "io" "mime/multipart" - "codeberg.org/gruf/go-bytesize" + "codeberg.org/gruf/go-iotools" "github.com/superseriousbusiness/gotosocial/internal/ap" apimodel "github.com/superseriousbusiness/gotosocial/internal/api/model" "github.com/superseriousbusiness/gotosocial/internal/config" @@ -365,21 +365,31 @@ func (p *Processor) UpdateAvatar( *gtsmodel.MediaAttachment, gtserror.WithCode, ) { - max := config.GetMediaImageMaxSize() - if sz := bytesize.Size(avatar.Size); sz > max { - text := fmt.Sprintf("size %s exceeds max media size %s", sz, max) + // Get maximum supported local media size. + maxsz := config.GetMediaLocalMaxSize() + + // Ensure media within size bounds. + if avatar.Size > int64(maxsz) { + text := fmt.Sprintf("media exceeds configured max size: %s", maxsz) return nil, gtserror.NewErrorBadRequest(errors.New(text), text) } - data := func(_ context.Context) (io.ReadCloser, int64, error) { - f, err := avatar.Open() - return f, avatar.Size, err + // Open multipart file reader. + mpfile, err := avatar.Open() + if err != nil { + err := gtserror.Newf("error opening multipart file: %w", err) + return nil, gtserror.NewErrorInternalError(err) } + // Wrap the multipart file reader to ensure is limited to max. + rc, _, _ := iotools.UpdateReadCloserLimit(mpfile, int64(maxsz)) + // Write to instance storage. return p.c.StoreLocalMedia(ctx, account.ID, - data, + func(ctx context.Context) (reader io.ReadCloser, err error) { + return rc, nil + }, media.AdditionalMediaInfo{ Avatar: util.Ptr(true), Description: description, @@ -400,21 +410,31 @@ func (p *Processor) UpdateHeader( *gtsmodel.MediaAttachment, gtserror.WithCode, ) { - max := config.GetMediaImageMaxSize() - if sz := bytesize.Size(header.Size); sz > max { - text := fmt.Sprintf("size %s exceeds max media size %s", sz, max) + // Get maximum supported local media size. + maxsz := config.GetMediaLocalMaxSize() + + // Ensure media within size bounds. + if header.Size > int64(maxsz) { + text := fmt.Sprintf("media exceeds configured max size: %s", maxsz) return nil, gtserror.NewErrorBadRequest(errors.New(text), text) } - data := func(_ context.Context) (io.ReadCloser, int64, error) { - f, err := header.Open() - return f, header.Size, err + // Open multipart file reader. + mpfile, err := header.Open() + if err != nil { + err := gtserror.Newf("error opening multipart file: %w", err) + return nil, gtserror.NewErrorInternalError(err) } + // Wrap the multipart file reader to ensure is limited to max. + rc, _, _ := iotools.UpdateReadCloserLimit(mpfile, int64(maxsz)) + // Write to instance storage. return p.c.StoreLocalMedia(ctx, account.ID, - data, + func(ctx context.Context) (reader io.ReadCloser, err error) { + return rc, nil + }, media.AdditionalMediaInfo{ Header: util.Ptr(true), Description: description, diff --git a/internal/processing/admin/emoji.go b/internal/processing/admin/emoji.go index c023fabd8..cf5bacef8 100644 --- a/internal/processing/admin/emoji.go +++ b/internal/processing/admin/emoji.go @@ -25,7 +25,10 @@ import ( "mime/multipart" "strings" + "codeberg.org/gruf/go-bytesize" + "codeberg.org/gruf/go-iotools" apimodel "github.com/superseriousbusiness/gotosocial/internal/api/model" + "github.com/superseriousbusiness/gotosocial/internal/config" "github.com/superseriousbusiness/gotosocial/internal/db" "github.com/superseriousbusiness/gotosocial/internal/gtserror" "github.com/superseriousbusiness/gotosocial/internal/gtsmodel" @@ -41,10 +44,26 @@ func (p *Processor) EmojiCreate( form *apimodel.EmojiCreateRequest, ) (*apimodel.Emoji, gtserror.WithCode) { - // Simply read provided form data for emoji data source. - data := func(_ context.Context) (io.ReadCloser, int64, error) { - f, err := form.Image.Open() - return f, form.Image.Size, err + // Get maximum supported local emoji size. + maxsz := config.GetMediaEmojiLocalMaxSize() + + // Ensure media within size bounds. + if form.Image.Size > int64(maxsz) { + text := fmt.Sprintf("emoji exceeds configured max size: %s", maxsz) + return nil, gtserror.NewErrorBadRequest(errors.New(text), text) + } + + // Open multipart file reader. + mpfile, err := form.Image.Open() + if err != nil { + err := gtserror.Newf("error opening multipart file: %w", err) + return nil, gtserror.NewErrorInternalError(err) + } + + // Wrap the multipart file reader to ensure is limited to max. + rc, _, _ := iotools.UpdateReadCloserLimit(mpfile, int64(maxsz)) + data := func(context.Context) (io.ReadCloser, error) { + return rc, nil } // Attempt to create the new local emoji. @@ -285,14 +304,23 @@ func (p *Processor) emojiUpdateCopy( return nil, gtserror.NewErrorNotFound(err) } + // Get maximum supported local emoji size. + maxsz := config.GetMediaEmojiLocalMaxSize() + + // Ensure target emoji image within size bounds. + if bytesize.Size(target.ImageFileSize) > maxsz { + text := fmt.Sprintf("emoji exceeds configured max size: %s", maxsz) + return nil, gtserror.NewErrorBadRequest(errors.New(text), text) + } + // Data function for copying just streams media // out of storage into an additional location. // // This means that data for the copy persists even // if the remote copied emoji gets deleted at some point. - data := func(ctx context.Context) (io.ReadCloser, int64, error) { + data := func(ctx context.Context) (io.ReadCloser, error) { rc, err := p.state.Storage.GetStream(ctx, target.ImagePath) - return rc, int64(target.ImageFileSize), err + return rc, err } // Attempt to create the new local emoji. @@ -413,10 +441,26 @@ func (p *Processor) emojiUpdateModify( // Updating image and maybe categoryID. // We can do both at the same time :) - // Simply read provided form data for emoji data source. - data := func(_ context.Context) (io.ReadCloser, int64, error) { - f, err := image.Open() - return f, image.Size, err + // Get maximum supported local emoji size. + maxsz := config.GetMediaEmojiLocalMaxSize() + + // Ensure media within size bounds. + if image.Size > int64(maxsz) { + text := fmt.Sprintf("emoji exceeds configured max size: %s", maxsz) + return nil, gtserror.NewErrorBadRequest(errors.New(text), text) + } + + // Open multipart file reader. + mpfile, err := image.Open() + if err != nil { + err := gtserror.Newf("error opening multipart file: %w", err) + return nil, gtserror.NewErrorInternalError(err) + } + + // Wrap the multipart file reader to ensure is limited to max. + rc, _, _ := iotools.UpdateReadCloserLimit(mpfile, int64(maxsz)) + data := func(context.Context) (io.ReadCloser, error) { + return rc, nil } // Prepare emoji model for recache from new data. diff --git a/internal/processing/admin/media.go b/internal/processing/admin/media.go index edbcbe349..9cd68d88b 100644 --- a/internal/processing/admin/media.go +++ b/internal/processing/admin/media.go @@ -21,6 +21,7 @@ import ( "context" "fmt" + "github.com/superseriousbusiness/gotosocial/internal/gtscontext" "github.com/superseriousbusiness/gotosocial/internal/gtserror" "github.com/superseriousbusiness/gotosocial/internal/gtsmodel" "github.com/superseriousbusiness/gotosocial/internal/log" @@ -35,8 +36,9 @@ func (p *Processor) MediaRefetch(ctx context.Context, requestingAccount *gtsmode } go func() { + ctx := gtscontext.WithValues(context.Background(), ctx) log.Info(ctx, "starting emoji refetch") - refetched, err := p.media.RefetchEmojis(context.Background(), domain, transport.DereferenceMedia) + refetched, err := p.media.RefetchEmojis(ctx, domain, transport.DereferenceMedia) if err != nil { log.Errorf(ctx, "error refetching emojis: %s", err) } else { diff --git a/internal/processing/media/create.go b/internal/processing/media/create.go index 0dbe997de..b3a7d6052 100644 --- a/internal/processing/media/create.go +++ b/internal/processing/media/create.go @@ -19,10 +19,13 @@ package media import ( "context" + "errors" "fmt" "io" + "codeberg.org/gruf/go-iotools" apimodel "github.com/superseriousbusiness/gotosocial/internal/api/model" + "github.com/superseriousbusiness/gotosocial/internal/config" "github.com/superseriousbusiness/gotosocial/internal/gtserror" "github.com/superseriousbusiness/gotosocial/internal/gtsmodel" "github.com/superseriousbusiness/gotosocial/internal/media" @@ -30,21 +33,39 @@ import ( // Create creates a new media attachment belonging to the given account, using the request form. func (p *Processor) Create(ctx context.Context, account *gtsmodel.Account, form *apimodel.AttachmentRequest) (*apimodel.Attachment, gtserror.WithCode) { - data := func(_ context.Context) (io.ReadCloser, int64, error) { - f, err := form.File.Open() - return f, form.File.Size, err + + // Get maximum supported local media size. + maxsz := config.GetMediaLocalMaxSize() + + // Ensure media within size bounds. + if form.File.Size > int64(maxsz) { + text := fmt.Sprintf("media exceeds configured max size: %s", maxsz) + return nil, gtserror.NewErrorBadRequest(errors.New(text), text) } + // Parse focus details from API form input. focusX, focusY, err := parseFocus(form.Focus) if err != nil { - err := fmt.Errorf("could not parse focus value %s: %s", form.Focus, err) - return nil, gtserror.NewErrorBadRequest(err, err.Error()) + text := fmt.Sprintf("could not parse focus value %s: %s", form.Focus, err) + return nil, gtserror.NewErrorBadRequest(errors.New(text), text) } + // Open multipart file reader. + mpfile, err := form.File.Open() + if err != nil { + err := gtserror.Newf("error opening multipart file: %w", err) + return nil, gtserror.NewErrorInternalError(err) + } + + // Wrap the multipart file reader to ensure is limited to max. + rc, _, _ := iotools.UpdateReadCloserLimit(mpfile, int64(maxsz)) + // Create local media and write to instance storage. attachment, errWithCode := p.c.StoreLocalMedia(ctx, account.ID, - data, + func(ctx context.Context) (reader io.ReadCloser, err error) { + return rc, nil + }, media.AdditionalMediaInfo{ Description: &form.Description, FocusX: &focusX, diff --git a/internal/processing/media/getfile_test.go b/internal/processing/media/getfile_test.go index f0517b339..34f5d99a2 100644 --- a/internal/processing/media/getfile_test.go +++ b/internal/processing/media/getfile_test.go @@ -18,7 +18,6 @@ package media_test import ( - "bytes" "context" "io" "path" @@ -87,9 +86,9 @@ func (suite *GetFileTestSuite) TestGetRemoteFileUncached() { MediaSize: string(media.SizeOriginal), FileName: fileName, }) - suite.NoError(errWithCode) suite.NotNil(content) + b, err := io.ReadAll(content.Content) suite.NoError(err) suite.NoError(content.Content.Close()) @@ -111,7 +110,7 @@ func (suite *GetFileTestSuite) TestGetRemoteFileUncached() { suite.True(*dbAttachment.Cached) // the file should be back in storage at the same path as before - refreshedBytes, err := suite.storage.Get(ctx, testAttachment.File.Path) + refreshedBytes, err := suite.storage.Get(ctx, dbAttachment.File.Path) suite.NoError(err) suite.Equal(suite.testRemoteAttachments[testAttachment.RemoteURL].Data, refreshedBytes) } @@ -139,32 +138,26 @@ func (suite *GetFileTestSuite) TestGetRemoteFileUncachedInterrupted() { MediaSize: string(media.SizeOriginal), FileName: fileName, }) - suite.NoError(errWithCode) suite.NotNil(content) - // only read the first kilobyte and then stop - b := make([]byte, 0, 1024) - if !testrig.WaitFor(func() bool { - read, err := io.CopyN(bytes.NewBuffer(b), content.Content, 1024) - return err == nil && read == 1024 - }) { - suite.FailNow("timed out trying to read first 1024 bytes") - } + _, err = io.CopyN(io.Discard, content.Content, 1024) + suite.NoError(err) - // close the reader - suite.NoError(content.Content.Close()) + err = content.Content.Close() + suite.NoError(err) // the attachment should still be updated in the database even though the caller hung up + var dbAttachment *gtsmodel.MediaAttachment if !testrig.WaitFor(func() bool { - dbAttachment, _ := suite.db.GetAttachmentByID(ctx, testAttachment.ID) + dbAttachment, _ = suite.db.GetAttachmentByID(ctx, testAttachment.ID) return *dbAttachment.Cached }) { suite.FailNow("timed out waiting for attachment to be updated") } // the file should be back in storage at the same path as before - refreshedBytes, err := suite.storage.Get(ctx, testAttachment.File.Path) + refreshedBytes, err := suite.storage.Get(ctx, dbAttachment.File.Path) suite.NoError(err) suite.Equal(suite.testRemoteAttachments[testAttachment.RemoteURL].Data, refreshedBytes) } @@ -196,9 +189,9 @@ func (suite *GetFileTestSuite) TestGetRemoteFileThumbnailUncached() { MediaSize: string(media.SizeSmall), FileName: fileName, }) - suite.NoError(errWithCode) suite.NotNil(content) + b, err := io.ReadAll(content.Content) suite.NoError(err) suite.NoError(content.Content.Close()) diff --git a/internal/storage/storage.go b/internal/storage/storage.go index 55ec0d167..d05fe3519 100644 --- a/internal/storage/storage.go +++ b/internal/storage/storage.go @@ -24,6 +24,7 @@ import ( "io" "mime" "net/url" + "os" "path" "syscall" "time" @@ -95,6 +96,30 @@ func (d *Driver) PutStream(ctx context.Context, key string, r io.Reader) (int64, return d.Storage.WriteStream(ctx, key, r) } +// PutFile moves the contents of file at path, to storage.Driver{} under given key. +func (d *Driver) PutFile(ctx context.Context, key string, filepath string) (int64, error) { + // Open file at path for reading. + file, err := os.Open(filepath) + if err != nil { + return 0, gtserror.Newf("error opening file %s: %w", filepath, err) + } + + // Write the file data to storage under key. Note + // that for disk.DiskStorage{} this should end up + // being a highly optimized Linux sendfile syscall. + sz, err := d.Storage.WriteStream(ctx, key, file) + if err != nil { + err = gtserror.Newf("error writing file %s: %w", key, err) + } + + // Close the file: done with it. + if e := file.Close(); e != nil { + log.Errorf(ctx, "error closing file %s: %v", filepath, e) + } + + return sz, err +} + // Delete attempts to remove the supplied key (and corresponding value) from storage. func (d *Driver) Delete(ctx context.Context, key string) error { return d.Storage.Remove(ctx, key) diff --git a/internal/transport/derefmedia.go b/internal/transport/derefmedia.go index 265a9e77e..873032f39 100644 --- a/internal/transport/derefmedia.go +++ b/internal/transport/derefmedia.go @@ -23,30 +23,42 @@ import ( "net/http" "net/url" + "codeberg.org/gruf/go-bytesize" + "codeberg.org/gruf/go-iotools" "github.com/superseriousbusiness/gotosocial/internal/gtserror" ) -func (t *transport) DereferenceMedia(ctx context.Context, iri *url.URL) (io.ReadCloser, int64, error) { +func (t *transport) DereferenceMedia(ctx context.Context, iri *url.URL, maxsz int64) (io.ReadCloser, error) { // Build IRI just once iriStr := iri.String() // Prepare HTTP request to this media's IRI req, err := http.NewRequestWithContext(ctx, "GET", iriStr, nil) if err != nil { - return nil, 0, err + return nil, err } req.Header.Add("Accept", "*/*") // we don't know what kind of media we're going to get here // Perform the HTTP request rsp, err := t.GET(req) if err != nil { - return nil, 0, err + return nil, err } // Check for an expected status code if rsp.StatusCode != http.StatusOK { - return nil, 0, gtserror.NewFromResponse(rsp) + return nil, gtserror.NewFromResponse(rsp) } - return rsp.Body, rsp.ContentLength, nil + // Check media within size limit. + if rsp.ContentLength > maxsz { + _ = rsp.Body.Close() // close early. + sz := bytesize.Size(maxsz) // nicer log format + return nil, gtserror.Newf("media body exceeds max size %s", sz) + } + + // Update response body with maximum supported media size. + rsp.Body, _, _ = iotools.UpdateReadCloserLimit(rsp.Body, maxsz) + + return rsp.Body, nil } diff --git a/internal/transport/transport.go b/internal/transport/transport.go index 110c19b3d..2971ca603 100644 --- a/internal/transport/transport.go +++ b/internal/transport/transport.go @@ -67,8 +67,8 @@ type Transport interface { // Dereference fetches the ActivityStreams object located at this IRI with a GET request. Dereference(ctx context.Context, iri *url.URL) (*http.Response, error) - // DereferenceMedia fetches the given media attachment IRI, returning the reader and filesize. - DereferenceMedia(ctx context.Context, iri *url.URL) (io.ReadCloser, int64, error) + // DereferenceMedia fetches the given media attachment IRI, returning the reader limited to given max. + DereferenceMedia(ctx context.Context, iri *url.URL, maxsz int64) (io.ReadCloser, error) // DereferenceInstance dereferences remote instance information, first by checking /api/v1/instance, and then by checking /.well-known/nodeinfo. DereferenceInstance(ctx context.Context, iri *url.URL) (*gtsmodel.Instance, error) diff --git a/internal/typeutils/internaltofrontend.go b/internal/typeutils/internaltofrontend.go index 733a21b75..c0cd3d7e7 100644 --- a/internal/typeutils/internaltofrontend.go +++ b/internal/typeutils/internaltofrontend.go @@ -1385,9 +1385,9 @@ func (c *Converter) InstanceToAPIV1Instance(ctx context.Context, i *gtsmodel.Ins instance.Configuration.Statuses.CharactersReservedPerURL = instanceStatusesCharactersReservedPerURL instance.Configuration.Statuses.SupportedMimeTypes = instanceStatusesSupportedMimeTypes instance.Configuration.MediaAttachments.SupportedMimeTypes = media.SupportedMIMETypes - instance.Configuration.MediaAttachments.ImageSizeLimit = int(config.GetMediaImageMaxSize()) + instance.Configuration.MediaAttachments.ImageSizeLimit = int(config.GetMediaRemoteMaxSize()) instance.Configuration.MediaAttachments.ImageMatrixLimit = instanceMediaAttachmentsImageMatrixLimit - instance.Configuration.MediaAttachments.VideoSizeLimit = int(config.GetMediaVideoMaxSize()) + instance.Configuration.MediaAttachments.VideoSizeLimit = int(config.GetMediaRemoteMaxSize()) instance.Configuration.MediaAttachments.VideoFrameRateLimit = instanceMediaAttachmentsVideoFrameRateLimit instance.Configuration.MediaAttachments.VideoMatrixLimit = instanceMediaAttachmentsVideoMatrixLimit instance.Configuration.Polls.MaxOptions = config.GetStatusesPollMaxOptions() @@ -1525,9 +1525,9 @@ func (c *Converter) InstanceToAPIV2Instance(ctx context.Context, i *gtsmodel.Ins instance.Configuration.Statuses.CharactersReservedPerURL = instanceStatusesCharactersReservedPerURL instance.Configuration.Statuses.SupportedMimeTypes = instanceStatusesSupportedMimeTypes instance.Configuration.MediaAttachments.SupportedMimeTypes = media.SupportedMIMETypes - instance.Configuration.MediaAttachments.ImageSizeLimit = int(config.GetMediaImageMaxSize()) + instance.Configuration.MediaAttachments.ImageSizeLimit = int(config.GetMediaRemoteMaxSize()) instance.Configuration.MediaAttachments.ImageMatrixLimit = instanceMediaAttachmentsImageMatrixLimit - instance.Configuration.MediaAttachments.VideoSizeLimit = int(config.GetMediaVideoMaxSize()) + instance.Configuration.MediaAttachments.VideoSizeLimit = int(config.GetMediaRemoteMaxSize()) instance.Configuration.MediaAttachments.VideoFrameRateLimit = instanceMediaAttachmentsVideoFrameRateLimit instance.Configuration.MediaAttachments.VideoMatrixLimit = instanceMediaAttachmentsVideoMatrixLimit instance.Configuration.Polls.MaxOptions = config.GetStatusesPollMaxOptions() diff --git a/internal/typeutils/internaltofrontend_test.go b/internal/typeutils/internaltofrontend_test.go index 522bf6401..1195bc137 100644 --- a/internal/typeutils/internaltofrontend_test.go +++ b/internal/typeutils/internaltofrontend_test.go @@ -1217,7 +1217,7 @@ func (suite *InternalToFrontendTestSuite) TestInstanceV1ToFrontend() { "image/webp", "video/mp4" ], - "image_size_limit": 10485760, + "image_size_limit": 41943040, "image_matrix_limit": 16777216, "video_size_limit": 41943040, "video_frame_rate_limit": 60, @@ -1342,7 +1342,7 @@ func (suite *InternalToFrontendTestSuite) TestInstanceV2ToFrontend() { "image/webp", "video/mp4" ], - "image_size_limit": 10485760, + "image_size_limit": 41943040, "image_matrix_limit": 16777216, "video_size_limit": 41943040, "video_frame_rate_limit": 60, @@ -1433,7 +1433,7 @@ func (suite *InternalToFrontendTestSuite) TestEmojiToFrontendAdmin1() { "id": "01F8MH9H8E4VG3KDYJR9EGPXCQ", "disabled": false, "updated_at": "2021-09-20T10:40:37.000Z", - "total_file_size": 47115, + "total_file_size": 42794, "content_type": "image/png", "uri": "http://localhost:8080/emoji/01F8MH9H8E4VG3KDYJR9EGPXCQ" }`, string(b)) @@ -1455,7 +1455,7 @@ func (suite *InternalToFrontendTestSuite) TestEmojiToFrontendAdmin2() { "disabled": false, "domain": "fossbros-anonymous.io", "updated_at": "2020-03-18T12:12:00.000Z", - "total_file_size": 21697, + "total_file_size": 19854, "content_type": "image/png", "uri": "http://fossbros-anonymous.io/emoji/01GD5KP5CQEE1R3X43Y1EHS2CW" }`, string(b)) diff --git a/test/envparsing.sh b/test/envparsing.sh index 29403011e..22abff48a 100755 --- a/test/envparsing.sh +++ b/test/envparsing.sh @@ -122,9 +122,9 @@ EXPECT=$(cat << "EOF" "media-description-min-chars": 69, "media-emoji-local-max-size": 420, "media-emoji-remote-max-size": 420, - "media-image-max-size": 420, + "media-local-max-size": 420, "media-remote-cache-days": 30, - "media-video-max-size": 420, + "media-remote-max-size": 420, "metrics-auth-enabled": false, "metrics-auth-password": "", "metrics-auth-username": "", @@ -233,10 +233,10 @@ GTS_ACCOUNTS_ALLOW_CUSTOM_CSS=true \ GTS_ACCOUNTS_CUSTOM_CSS_LENGTH=5000 \ GTS_ACCOUNTS_REGISTRATION_OPEN=true \ GTS_ACCOUNTS_REASON_REQUIRED=false \ -GTS_MEDIA_IMAGE_MAX_SIZE=420 \ -GTS_MEDIA_VIDEO_MAX_SIZE=420 \ GTS_MEDIA_DESCRIPTION_MIN_CHARS=69 \ GTS_MEDIA_DESCRIPTION_MAX_CHARS=5000 \ +GTS_MEDIA_LOCAL_MAX_SIZE=420 \ +GTS_MEDIA_REMOTE_MAX_SIZE=420 \ GTS_MEDIA_REMOTE_CACHE_DAYS=30 \ GTS_MEDIA_EMOJI_LOCAL_MAX_SIZE=420 \ GTS_MEDIA_EMOJI_REMOTE_MAX_SIZE=420 \ diff --git a/testrig/config.go b/testrig/config.go index 30beaa910..ed98798d6 100644 --- a/testrig/config.go +++ b/testrig/config.go @@ -18,6 +18,7 @@ package testrig import ( + "context" "os" "strconv" "time" @@ -26,8 +27,23 @@ import ( "github.com/coreos/go-oidc/v3/oidc" "github.com/superseriousbusiness/gotosocial/internal/config" "github.com/superseriousbusiness/gotosocial/internal/language" + "github.com/superseriousbusiness/gotosocial/internal/media/ffmpeg" ) +func init() { + ctx := context.Background() + + // Ensure global ffmpeg WASM pool initialized. + if err := ffmpeg.InitFfmpeg(ctx, 1); err != nil { + panic(err) + } + + // Ensure global ffmpeg WASM pool initialized. + if err := ffmpeg.InitFfprobe(ctx, 1); err != nil { + panic(err) + } +} + // InitTestConfig initializes viper // configuration with test defaults. func InitTestConfig() { @@ -86,11 +102,11 @@ func testDefaults() config.Configuration { AccountsAllowCustomCSS: true, AccountsCustomCSSLength: 10000, - MediaImageMaxSize: 10485760, // 10MiB - MediaVideoMaxSize: 41943040, // 40MiB MediaDescriptionMinChars: 0, MediaDescriptionMaxChars: 500, MediaRemoteCacheDays: 7, + MediaLocalMaxSize: 40 * bytesize.MiB, + MediaRemoteMaxSize: 40 * bytesize.MiB, MediaEmojiLocalMaxSize: 51200, // 50KiB MediaEmojiRemoteMaxSize: 102400, // 100KiB MediaCleanupFrom: "00:00", // midnight. diff --git a/testrig/media/cowlick-small.jpeg b/testrig/media/cowlick-small.jpeg index b3cd2f647..4061653ab 100644 Binary files a/testrig/media/cowlick-small.jpeg and b/testrig/media/cowlick-small.jpeg differ diff --git a/testrig/media/kip-static.png b/testrig/media/kip-static.png index 1ba296687..c60b94c5c 100644 Binary files a/testrig/media/kip-static.png and b/testrig/media/kip-static.png differ diff --git a/testrig/media/ohyou-small.jpg b/testrig/media/ohyou-small.jpg old mode 100755 new mode 100644 index f561884d1..c862be1ff Binary files a/testrig/media/ohyou-small.jpg and b/testrig/media/ohyou-small.jpg differ diff --git a/testrig/media/rainbow-static.png b/testrig/media/rainbow-static.png old mode 100755 new mode 100644 index 79ed5c03a..f762a0470 Binary files a/testrig/media/rainbow-static.png and b/testrig/media/rainbow-static.png differ diff --git a/testrig/media/sloth-small.jpg b/testrig/media/sloth-small.jpg index 2787d535c..60f64a2fd 100644 Binary files a/testrig/media/sloth-small.jpg and b/testrig/media/sloth-small.jpg differ diff --git a/testrig/media/team-fortress-small.jpg b/testrig/media/team-fortress-small.jpg index f6773b9a0..8615834b6 100644 Binary files a/testrig/media/team-fortress-small.jpg and b/testrig/media/team-fortress-small.jpg differ diff --git a/testrig/media/thoughtsofdog-small.jpg b/testrig/media/thoughtsofdog-small.jpg index 98801d235..5f303f939 100644 Binary files a/testrig/media/thoughtsofdog-small.jpg and b/testrig/media/thoughtsofdog-small.jpg differ diff --git a/testrig/media/trent-small.jpg b/testrig/media/trent-small.jpg old mode 100755 new mode 100644 index 726c1aed0..4e9ac26b6 Binary files a/testrig/media/trent-small.jpg and b/testrig/media/trent-small.jpg differ diff --git a/testrig/media/welcome-small.jpg b/testrig/media/welcome-small.jpg old mode 100755 new mode 100644 index b1a585169..04179ccc9 Binary files a/testrig/media/welcome-small.jpg and b/testrig/media/welcome-small.jpg differ diff --git a/testrig/media/yell-static.png b/testrig/media/yell-static.png index 9b5d2837e..b60b9f62b 100644 Binary files a/testrig/media/yell-static.png and b/testrig/media/yell-static.png differ diff --git a/testrig/media/zork-small.jpg b/testrig/media/zork-small.jpg index 60be12564..e8fffe9cb 100644 Binary files a/testrig/media/zork-small.jpg and b/testrig/media/zork-small.jpg differ diff --git a/testrig/testmodels.go b/testrig/testmodels.go index 90c200585..5f41ed190 100644 --- a/testrig/testmodels.go +++ b/testrig/testmodels.go @@ -1028,7 +1028,7 @@ func NewTestAttachments() map[string]*gtsmodel.MediaAttachment { Thumbnail: gtsmodel.Thumbnail{ Path: "01F8MH5ZK5VRH73AKHQM6Y9VNX/attachment/small/01FVW7RXPQ8YJHTEXYPE7Q8ZY0.jpg", ContentType: "image/jpeg", - FileSize: 19312, + FileSize: 11751, URL: "http://localhost:8080/fileserver/01F8MH5ZK5VRH73AKHQM6Y9VNX/attachment/small/01FVW7RXPQ8YJHTEXYPE7Q8ZY0.jpg", RemoteURL: "http://fossbros-anonymous.io/attachments/small/a499f55b-2d1e-4acd-98d2-1ac2ba6d79b9.jpg", }, @@ -1205,7 +1205,7 @@ func NewTestEmojis() map[string]*gtsmodel.Emoji { ImageContentType: "image/png", ImageStaticContentType: "image/png", ImageFileSize: 36702, - ImageStaticFileSize: 10413, + ImageStaticFileSize: 6092, Disabled: util.Ptr(false), URI: "http://localhost:8080/emoji/01F8MH9H8E4VG3KDYJR9EGPXCQ", VisibleInPicker: util.Ptr(true), @@ -1227,7 +1227,7 @@ func NewTestEmojis() map[string]*gtsmodel.Emoji { ImageContentType: "image/png", ImageStaticContentType: "image/png", ImageFileSize: 10889, - ImageStaticFileSize: 10808, + ImageStaticFileSize: 8965, Disabled: util.Ptr(false), URI: "http://fossbros-anonymous.io/emoji/01GD5KP5CQEE1R3X43Y1EHS2CW", VisibleInPicker: util.Ptr(false), diff --git a/vendor/codeberg.org/superseriousbusiness/exif-terminator/LICENSE b/vendor/codeberg.org/gruf/go-ffmpreg/LICENSE similarity index 86% rename from vendor/codeberg.org/superseriousbusiness/exif-terminator/LICENSE rename to vendor/codeberg.org/gruf/go-ffmpreg/LICENSE index dba13ed2d..f288702d2 100644 --- a/vendor/codeberg.org/superseriousbusiness/exif-terminator/LICENSE +++ b/vendor/codeberg.org/gruf/go-ffmpreg/LICENSE @@ -1,21 +1,23 @@ - GNU AFFERO GENERAL PUBLIC LICENSE - Version 3, 19 November 2007 + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 - Copyright (C) 2007 Free Software Foundation, Inc. + Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble - The GNU Affero General Public License is a free, copyleft license for -software and other kinds of works, specifically designed to ensure -cooperation with the community in the case of network server software. + The GNU General Public License is a free, copyleft license for +software and other kinds of works. The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, -our General Public Licenses are intended to guarantee your freedom to +the GNU General Public License is intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free -software for all its users. +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you @@ -24,34 +26,44 @@ them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. - Developers that use our General Public Licenses protect your rights -with two steps: (1) assert copyright on the software, and (2) offer -you this License which gives you legal permission to copy, distribute -and/or modify the software. + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. - A secondary benefit of defending all users' freedom is that -improvements made in alternate versions of the program, if they -receive widespread use, become available for other developers to -incorporate. Many developers of free software are heartened and -encouraged by the resulting cooperation. However, in the case of -software used on network servers, this result may fail to come about. -The GNU General Public License permits making a modified version and -letting the public access it on a server without ever releasing its -source code to the public. + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. - The GNU Affero General Public License is designed specifically to -ensure that, in such cases, the modified source code becomes available -to the community. It requires the operator of a network server to -provide the source code of the modified version running there to the -users of that server. Therefore, public use of a modified version, on -a publicly accessible server, gives the public access to the source -code of the modified version. + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. - An older license, called the Affero General Public License and -published by Affero, was designed to accomplish similar goals. This is -a different license, not a version of the Affero GPL, but Affero has -released a new version of the Affero GPL which permits relicensing under -this license. + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. The precise terms and conditions for copying, distribution and modification follow. @@ -60,7 +72,7 @@ modification follow. 0. Definitions. - "This License" refers to version 3 of the GNU Affero General Public License. + "This License" refers to version 3 of the GNU General Public License. "Copyright" also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. @@ -537,45 +549,35 @@ to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. - 13. Remote Network Interaction; Use with the GNU General Public License. - - Notwithstanding any other provision of this License, if you modify the -Program, your modified version must prominently offer all users -interacting with it remotely through a computer network (if your version -supports such interaction) an opportunity to receive the Corresponding -Source of your version by providing access to the Corresponding Source -from a network server at no charge, through some standard or customary -means of facilitating copying of software. This Corresponding Source -shall include the Corresponding Source for any work covered by version 3 -of the GNU General Public License that is incorporated pursuant to the -following paragraph. + 13. Use with the GNU Affero General Public License. Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed -under version 3 of the GNU General Public License into a single +under version 3 of the GNU Affero General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, -but the work with which it is combined will remain governed by version -3 of the GNU General Public License. +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. 14. Revised Versions of this License. The Free Software Foundation may publish revised and/or new versions of -the GNU Affero General Public License from time to time. Such new versions -will be similar in spirit to the present version, but may differ in detail to +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the -Program specifies that a certain numbered version of the GNU Affero General +Program specifies that a certain numbered version of the GNU General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the -GNU Affero General Public License, you may choose any version ever published +GNU General Public License, you may choose any version ever published by the Free Software Foundation. If the Program specifies that a proxy can decide which future -versions of the GNU Affero General Public License can be used, that proxy's +versions of the GNU General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. @@ -633,29 +635,40 @@ the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software: you can redistribute it and/or modify - it under the terms of the GNU Affero General Public License as published by + it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU Affero General Public License for more details. + GNU General Public License for more details. - You should have received a copy of the GNU Affero General Public License - along with this program. If not, see . + You should have received a copy of the GNU General Public License + along with this program. If not, see . Also add information on how to contact you by electronic and paper mail. - If your software can interact with users remotely through a computer -network, you should also make sure that it provides a way for users to -get its source. For example, if your program is a web application, its -interface could display a "Source" link that leads users to an archive -of the code. There are many ways you could offer source, and different -solutions will be better for different programs; see section 13 for the -specific requirements. + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". You should also get your employer (if you work as a programmer) or school, if any, to sign a "copyright disclaimer" for the program, if necessary. -For more information on this, and how to apply and follow the GNU AGPL, see -. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/vendor/codeberg.org/gruf/go-ffmpreg/embed/ffmpeg/ffmpeg.wasm b/vendor/codeberg.org/gruf/go-ffmpreg/embed/ffmpeg/ffmpeg.wasm new file mode 100644 index 000000000..36626ca05 Binary files /dev/null and b/vendor/codeberg.org/gruf/go-ffmpreg/embed/ffmpeg/ffmpeg.wasm differ diff --git a/vendor/codeberg.org/gruf/go-ffmpreg/embed/ffmpeg/lib.go b/vendor/codeberg.org/gruf/go-ffmpreg/embed/ffmpeg/lib.go new file mode 100644 index 000000000..4c1400114 --- /dev/null +++ b/vendor/codeberg.org/gruf/go-ffmpreg/embed/ffmpeg/lib.go @@ -0,0 +1,38 @@ +package ffmpeg + +import ( + _ "embed" + "os" + + "github.com/tetratelabs/wazero/api" + "github.com/tetratelabs/wazero/experimental" +) + +func init() { + // Check for WASM source file path. + path := os.Getenv("FFMPEG_WASM") + if path == "" { + return + } + + var err error + + // Read file into memory. + B, err = os.ReadFile(path) + if err != nil { + panic(err) + } +} + +// CoreFeatures is the WebAssembly Core specification +// features this embedded binary was compiled with. +const CoreFeatures = api.CoreFeatureSIMD | + api.CoreFeatureBulkMemoryOperations | + api.CoreFeatureNonTrappingFloatToIntConversion | + api.CoreFeatureMutableGlobal | + api.CoreFeatureReferenceTypes | + api.CoreFeatureSignExtensionOps | + experimental.CoreFeaturesThreads + +//go:embed ffmpeg.wasm +var B []byte diff --git a/vendor/codeberg.org/gruf/go-ffmpreg/embed/ffprobe/ffprobe.wasm b/vendor/codeberg.org/gruf/go-ffmpreg/embed/ffprobe/ffprobe.wasm new file mode 100644 index 000000000..ebef6767a Binary files /dev/null and b/vendor/codeberg.org/gruf/go-ffmpreg/embed/ffprobe/ffprobe.wasm differ diff --git a/vendor/codeberg.org/gruf/go-ffmpreg/embed/ffprobe/lib.go b/vendor/codeberg.org/gruf/go-ffmpreg/embed/ffprobe/lib.go new file mode 100644 index 000000000..b2ffb3c54 --- /dev/null +++ b/vendor/codeberg.org/gruf/go-ffmpreg/embed/ffprobe/lib.go @@ -0,0 +1,38 @@ +package ffprobe + +import ( + _ "embed" + "os" + + "github.com/tetratelabs/wazero/api" + "github.com/tetratelabs/wazero/experimental" +) + +func init() { + // Check for WASM source file path. + path := os.Getenv("FFPROBE_WASM") + if path == "" { + return + } + + var err error + + // Read file into memory. + B, err = os.ReadFile(path) + if err != nil { + panic(err) + } +} + +// CoreFeatures is the WebAssembly Core specification +// features this embedded binary was compiled with. +const CoreFeatures = api.CoreFeatureSIMD | + api.CoreFeatureBulkMemoryOperations | + api.CoreFeatureNonTrappingFloatToIntConversion | + api.CoreFeatureMutableGlobal | + api.CoreFeatureReferenceTypes | + api.CoreFeatureSignExtensionOps | + experimental.CoreFeaturesThreads + +//go:embed ffprobe.wasm +var B []byte diff --git a/vendor/codeberg.org/gruf/go-ffmpreg/ffmpeg/ffmpeg.go b/vendor/codeberg.org/gruf/go-ffmpreg/ffmpeg/ffmpeg.go new file mode 100644 index 000000000..d5f4b2b9c --- /dev/null +++ b/vendor/codeberg.org/gruf/go-ffmpreg/ffmpeg/ffmpeg.go @@ -0,0 +1,109 @@ +package ffmpeg + +import ( + "context" + + "codeberg.org/gruf/go-ffmpreg/embed/ffmpeg" + "codeberg.org/gruf/go-ffmpreg/internal" + "codeberg.org/gruf/go-ffmpreg/util" + "codeberg.org/gruf/go-ffmpreg/wasm" + "github.com/tetratelabs/wazero" + "github.com/tetratelabs/wazero/api" + "github.com/tetratelabs/wazero/imports/wasi_snapshot_preview1" +) + +// pool of WASM module instances. +var pool = wasm.InstancePool{ + Instantiator: wasm.Instantiator{ + + // WASM module name. + Module: "ffmpeg", + + // Per-instance WebAssembly runtime (with shared cache). + Runtime: func(ctx context.Context) wazero.Runtime { + + // Prepare config with cache. + cfg := wazero.NewRuntimeConfig() + cfg = cfg.WithCoreFeatures(ffmpeg.CoreFeatures) + cfg = cfg.WithCompilationCache(internal.Cache) + + // Instantiate runtime with our config. + rt := wazero.NewRuntimeWithConfig(ctx, cfg) + + // Prepare default "env" host module. + env := rt.NewHostModuleBuilder("env") + env = env.NewFunctionBuilder(). + WithGoModuleFunction( + api.GoModuleFunc(util.Wasm_Tempnam), + []api.ValueType{api.ValueTypeI32, api.ValueTypeI32}, + []api.ValueType{api.ValueTypeI32}, + ). + Export("tempnam") + + // Instantiate "env" module in our runtime. + _, err := env.Instantiate(context.Background()) + if err != nil { + panic(err) + } + + // Instantiate the wasi snapshot preview 1 in runtime. + _, err = wasi_snapshot_preview1.Instantiate(ctx, rt) + if err != nil { + panic(err) + } + + return rt + }, + + // Per-run module configuration. + Config: wazero.NewModuleConfig, + + // Embedded WASM. + Source: ffmpeg.B, + }, +} + +// Precompile ensures at least compiled ffmpeg +// instance is available in the global pool. +func Precompile(ctx context.Context) error { + inst, err := pool.Get(ctx) + if err != nil { + return err + } + pool.Put(inst) + return nil +} + +// Get fetches new ffmpeg instance from pool, prefering cached if available. +func Get(ctx context.Context) (*wasm.Instance, error) { return pool.Get(ctx) } + +// Put places the given ffmpeg instance in pool. +func Put(inst *wasm.Instance) { pool.Put(inst) } + +// Run will run the given args against an ffmpeg instance from pool. +func Run(ctx context.Context, args wasm.Args) (uint32, error) { + inst, err := pool.Get(ctx) + if err != nil { + return 0, err + } + rc, err := inst.Run(ctx, args) + pool.Put(inst) + return rc, err +} + +// Cached returns a cached instance (if any) from pool. +func Cached() *wasm.Instance { return pool.Cached() } + +// Free drops all instances +// cached in instance pool. +func Free() { + ctx := context.Background() + for { + inst := pool.Cached() + if inst == nil { + return + } + _ = inst.Close(ctx) + } + +} diff --git a/vendor/codeberg.org/gruf/go-ffmpreg/ffprobe/ffprobe.go b/vendor/codeberg.org/gruf/go-ffmpreg/ffprobe/ffprobe.go new file mode 100644 index 000000000..e8178e97a --- /dev/null +++ b/vendor/codeberg.org/gruf/go-ffmpreg/ffprobe/ffprobe.go @@ -0,0 +1,108 @@ +package ffprobe + +import ( + "context" + + "codeberg.org/gruf/go-ffmpreg/embed/ffprobe" + "codeberg.org/gruf/go-ffmpreg/internal" + "codeberg.org/gruf/go-ffmpreg/util" + "codeberg.org/gruf/go-ffmpreg/wasm" + "github.com/tetratelabs/wazero" + "github.com/tetratelabs/wazero/api" + "github.com/tetratelabs/wazero/imports/wasi_snapshot_preview1" +) + +// pool of WASM module instances. +var pool = wasm.InstancePool{ + Instantiator: wasm.Instantiator{ + + // WASM module name. + Module: "ffprobe", + + // Per-instance WebAssembly runtime (with shared cache). + Runtime: func(ctx context.Context) wazero.Runtime { + + // Prepare config with cache. + cfg := wazero.NewRuntimeConfig() + cfg = cfg.WithCoreFeatures(ffprobe.CoreFeatures) + cfg = cfg.WithCompilationCache(internal.Cache) + + // Instantiate runtime with our config. + rt := wazero.NewRuntimeWithConfig(ctx, cfg) + + // Prepare default "env" host module. + env := rt.NewHostModuleBuilder("env") + env = env.NewFunctionBuilder(). + WithGoModuleFunction( + api.GoModuleFunc(util.Wasm_Tempnam), + []api.ValueType{api.ValueTypeI32, api.ValueTypeI32}, + []api.ValueType{api.ValueTypeI32}, + ). + Export("tempnam") + + // Instantiate "env" module in our runtime. + _, err := env.Instantiate(context.Background()) + if err != nil { + panic(err) + } + + // Instantiate the wasi snapshot preview 1 in runtime. + _, err = wasi_snapshot_preview1.Instantiate(ctx, rt) + if err != nil { + panic(err) + } + + return rt + }, + + // Per-run module configuration. + Config: wazero.NewModuleConfig, + + // Embedded WASM. + Source: ffprobe.B, + }, +} + +// Precompile ensures at least compiled ffprobe +// instance is available in the global pool. +func Precompile(ctx context.Context) error { + inst, err := pool.Get(ctx) + if err != nil { + return err + } + pool.Put(inst) + return nil +} + +// Get fetches new ffprobe instance from pool, prefering cached if available. +func Get(ctx context.Context) (*wasm.Instance, error) { return pool.Get(ctx) } + +// Put places the given ffprobe instance in pool. +func Put(inst *wasm.Instance) { pool.Put(inst) } + +// Run will run the given args against an ffprobe instance from pool. +func Run(ctx context.Context, args wasm.Args) (uint32, error) { + inst, err := pool.Get(ctx) + if err != nil { + return 0, err + } + rc, err := inst.Run(ctx, args) + pool.Put(inst) + return rc, err +} + +// Cached returns a cached instance (if any) from pool. +func Cached() *wasm.Instance { return pool.Cached() } + +// Free drops all instances +// cached in instance pool. +func Free() { + ctx := context.Background() + for { + inst := pool.Cached() + if inst == nil { + return + } + _ = inst.Close(ctx) + } +} diff --git a/vendor/codeberg.org/gruf/go-ffmpreg/internal/wasm.go b/vendor/codeberg.org/gruf/go-ffmpreg/internal/wasm.go new file mode 100644 index 000000000..94fdfbdde --- /dev/null +++ b/vendor/codeberg.org/gruf/go-ffmpreg/internal/wasm.go @@ -0,0 +1,25 @@ +package internal + +import ( + "os" + + "github.com/tetratelabs/wazero" +) + +func init() { + var err error + + if dir := os.Getenv("WAZERO_COMPILATION_CACHE"); dir != "" { + // Use on-filesystem compilation cache given by env. + Cache, err = wazero.NewCompilationCacheWithDir(dir) + if err != nil { + panic(err) + } + } else { + // Use in-memory compilation cache. + Cache = wazero.NewCompilationCache() + } +} + +// Shared WASM compilation cache. +var Cache wazero.CompilationCache diff --git a/vendor/codeberg.org/gruf/go-ffmpreg/util/funcs.go b/vendor/codeberg.org/gruf/go-ffmpreg/util/funcs.go new file mode 100644 index 000000000..4b584cf41 --- /dev/null +++ b/vendor/codeberg.org/gruf/go-ffmpreg/util/funcs.go @@ -0,0 +1,65 @@ +package util + +import ( + "context" + "os" + "path" + "strconv" + "time" + + "github.com/tetratelabs/wazero/api" +) + +// Wasm_Tempnam wraps Go_Tempnam to fulfill wazero's api.GoModuleFunc, +// the argument definition is (i32, i32) and return definition is (i32). +// NOTE: the calling module MUST have access to exported malloc / free. +func Wasm_Tempnam(ctx context.Context, mod api.Module, stack []uint64) { + dirptr := api.DecodeU32(stack[0]) + pfxptr := api.DecodeU32(stack[1]) + dir := readString(ctx, mod, dirptr, 0) + pfx := readString(ctx, mod, pfxptr, 0) + tmpstr := Go_Tempnam(dir, pfx) + tmpptr := writeString(ctx, mod, tmpstr) + stack[0] = api.EncodeU32(tmpptr) +} + +// Go_Tempname is functionally similar to C's tempnam. +func Go_Tempnam(dir, prefix string) string { + now := time.Now().Unix() + prefix = path.Join(dir, prefix) + for i := 0; i < 1000; i++ { + n := murmur2(uint32(now + int64(i))) + name := prefix + strconv.FormatUint(uint64(n), 10) + _, err := os.Stat(name) + if err == nil { + continue + } else if os.IsNotExist(err) { + return name + } else { + panic(err) + } + } + panic("too many attempts") +} + +// murmur2 is a simple uint32 murmur2 hash +// impl with fixed seed and input size. +func murmur2(k uint32) (h uint32) { + const ( + // seed ^ bitlen + s = uint32(2147483647) ^ 8 + + M = 0x5bd1e995 + R = 24 + ) + h = s + k *= M + k ^= k >> R + k *= M + h *= M + h ^= k + h ^= h >> 13 + h *= M + h ^= h >> 15 + return +} diff --git a/vendor/codeberg.org/gruf/go-ffmpreg/util/wasm.go b/vendor/codeberg.org/gruf/go-ffmpreg/util/wasm.go new file mode 100644 index 000000000..fce41d0a0 --- /dev/null +++ b/vendor/codeberg.org/gruf/go-ffmpreg/util/wasm.go @@ -0,0 +1,81 @@ +package util + +import ( + "bytes" + "context" + + "github.com/tetratelabs/wazero/api" +) + +// NOTE: +// the below functions are not very well optimized +// for repeated calls. this is relying on the fact +// that the only place they get used (tempnam), is +// not called very often, should only be once per run +// so calls to ExportedFunction() and Call() instead +// of caching api.Function and using CallWithStack() +// will work out the same (if only called once). + +// maxaddr is the maximum +// wasm32 memory address. +const maxaddr = ^uint32(0) + +func malloc(ctx context.Context, mod api.Module, sz uint32) uint32 { + stack, err := mod.ExportedFunction("malloc").Call(ctx, uint64(sz)) + if err != nil { + panic(err) + } + ptr := api.DecodeU32(stack[0]) + if ptr == 0 { + panic("out of memory") + } + return ptr +} + +func free(ctx context.Context, mod api.Module, ptr uint32) { + if ptr != 0 { + mod.ExportedFunction("free").Call(ctx, uint64(ptr)) + } +} + +func view(ctx context.Context, mod api.Module, ptr uint32, n uint32) []byte { + if n == 0 { + n = maxaddr - ptr + } + mem := mod.Memory() + b, ok := mem.Read(ptr, n) + if !ok { + panic("out of range") + } + return b +} + +func read(ctx context.Context, mod api.Module, ptr, n uint32) []byte { + return bytes.Clone(view(ctx, mod, ptr, n)) +} + +func readString(ctx context.Context, mod api.Module, ptr, n uint32) string { + return string(view(ctx, mod, ptr, n)) +} + +func write(ctx context.Context, mod api.Module, b []byte) uint32 { + mem := mod.Memory() + len := uint32(len(b)) + ptr := malloc(ctx, mod, len) + ok := mem.Write(ptr, b) + if !ok { + panic("out of range") + } + return ptr +} + +func writeString(ctx context.Context, mod api.Module, str string) uint32 { + mem := mod.Memory() + len := uint32(len(str) + 1) + ptr := malloc(ctx, mod, len) + ok := mem.WriteString(ptr, str) + if !ok { + panic("out of range") + } + return ptr +} diff --git a/vendor/codeberg.org/gruf/go-ffmpreg/wasm/instance.go b/vendor/codeberg.org/gruf/go-ffmpreg/wasm/instance.go new file mode 100644 index 000000000..21a080ce9 --- /dev/null +++ b/vendor/codeberg.org/gruf/go-ffmpreg/wasm/instance.go @@ -0,0 +1,181 @@ +package wasm + +import ( + "context" + "errors" + "io" + "sync" + + "github.com/tetratelabs/wazero" + "github.com/tetratelabs/wazero/sys" +) + +type Args struct { + // Standard FDs. + Stdin io.Reader + Stdout io.Writer + Stderr io.Writer + + // CLI args. + Args []string + + // Optional further module configuration function. + // (e.g. to mount filesystem dir, set env vars, etc). + Config func(wazero.ModuleConfig) wazero.ModuleConfig +} + +type Instantiator struct { + // Module ... + Module string + + // Runtime ... + Runtime func(context.Context) wazero.Runtime + + // Config ... + Config func() wazero.ModuleConfig + + // Source ... + Source []byte +} + +func (inst *Instantiator) New(ctx context.Context) (*Instance, error) { + switch { + case inst.Module == "": + panic("missing module name") + case inst.Runtime == nil: + panic("missing runtime instantiator") + case inst.Config == nil: + panic("missing module configuration") + case len(inst.Source) == 0: + panic("missing module source") + } + + // Create new host runtime. + rt := inst.Runtime(ctx) + + // Compile guest module from WebAssembly source. + mod, err := rt.CompileModule(ctx, inst.Source) + if err != nil { + return nil, err + } + + return &Instance{ + inst: inst, + wzrt: rt, + cmod: mod, + }, nil +} + +type InstancePool struct { + Instantiator + + pool []*Instance + lock sync.Mutex +} + +func (p *InstancePool) Get(ctx context.Context) (*Instance, error) { + for { + // Check for cached. + inst := p.Cached() + if inst == nil { + break + } + + // Check if closed. + if inst.IsClosed() { + continue + } + + return inst, nil + } + + // Must create new instance. + return p.Instantiator.New(ctx) +} + +func (p *InstancePool) Put(inst *Instance) { + if inst.inst != &p.Instantiator { + panic("instance and pool instantiators do not match") + } + p.lock.Lock() + p.pool = append(p.pool, inst) + p.lock.Unlock() +} + +func (p *InstancePool) Cached() *Instance { + var inst *Instance + p.lock.Lock() + if len(p.pool) > 0 { + inst = p.pool[len(p.pool)-1] + p.pool = p.pool[:len(p.pool)-1] + } + p.lock.Unlock() + return inst +} + +// Instance ... +// +// NOTE: Instance is NOT concurrency +// safe. One at a time please!! +type Instance struct { + inst *Instantiator + wzrt wazero.Runtime + cmod wazero.CompiledModule +} + +func (inst *Instance) Run(ctx context.Context, args Args) (uint32, error) { + if inst.inst == nil { + panic("not initialized") + } + + // Check instance open. + if inst.IsClosed() { + return 0, errors.New("instance closed") + } + + // Prefix binary name as argv0 to args. + cargs := make([]string, len(args.Args)+1) + copy(cargs[1:], args.Args) + cargs[0] = inst.inst.Module + + // Create base module config. + modcfg := inst.inst.Config() + modcfg = modcfg.WithName(inst.inst.Module) + modcfg = modcfg.WithArgs(cargs...) + modcfg = modcfg.WithStdin(args.Stdin) + modcfg = modcfg.WithStdout(args.Stdout) + modcfg = modcfg.WithStderr(args.Stderr) + + if args.Config != nil { + // Pass through config fn. + modcfg = args.Config(modcfg) + } + + // Instantiate the module from precompiled wasm module data. + mod, err := inst.wzrt.InstantiateModule(ctx, inst.cmod, modcfg) + + if mod != nil { + // Close module. + mod.Close(ctx) + } + + // Check for a returned exit code error. + if err, ok := err.(*sys.ExitError); ok { + return err.ExitCode(), nil + } + + return 0, err +} + +func (inst *Instance) IsClosed() bool { + return (inst.wzrt == nil || inst.cmod == nil) +} + +func (inst *Instance) Close(ctx context.Context) error { + if inst.IsClosed() { + return nil + } + err1 := inst.cmod.Close(ctx) + err2 := inst.wzrt.Close(ctx) + return errors.Join(err1, err2) +} diff --git a/vendor/codeberg.org/gruf/go-iotools/close.go b/vendor/codeberg.org/gruf/go-iotools/close.go index 3f0ee7780..f3d4814ba 100644 --- a/vendor/codeberg.org/gruf/go-iotools/close.go +++ b/vendor/codeberg.org/gruf/go-iotools/close.go @@ -2,6 +2,13 @@ package iotools import "io" +// NopCloser is an empty +// implementation of io.Closer, +// that simply does nothing! +type NopCloser struct{} + +func (NopCloser) Close() error { return nil } + // CloserFunc is a function signature which allows // a function to implement the io.Closer type. type CloserFunc func() error @@ -10,6 +17,7 @@ func (c CloserFunc) Close() error { return c() } +// CloserCallback wraps io.Closer to add a callback deferred to call just after Close(). func CloserCallback(c io.Closer, cb func()) io.Closer { return CloserFunc(func() error { defer cb() @@ -17,6 +25,7 @@ func CloserCallback(c io.Closer, cb func()) io.Closer { }) } +// CloserAfterCallback wraps io.Closer to add a callback called just before Close(). func CloserAfterCallback(c io.Closer, cb func()) io.Closer { return CloserFunc(func() (err error) { defer func() { err = c.Close() }() diff --git a/vendor/codeberg.org/gruf/go-iotools/helpers.go b/vendor/codeberg.org/gruf/go-iotools/helpers.go new file mode 100644 index 000000000..0e50e05e0 --- /dev/null +++ b/vendor/codeberg.org/gruf/go-iotools/helpers.go @@ -0,0 +1,85 @@ +package iotools + +import "io" + +// AtEOF returns true when reader at EOF, +// this is checked with a 0 length read. +func AtEOF(r io.Reader) bool { + _, err := r.Read(nil) + return (err == io.EOF) +} + +// GetReadCloserLimit attempts to cast io.Reader to access its io.LimitedReader with limit. +func GetReaderLimit(r io.Reader) (*io.LimitedReader, int64) { + lr, ok := r.(*io.LimitedReader) + if !ok { + return nil, -1 + } + return lr, lr.N +} + +// UpdateReaderLimit attempts to update the limit of a reader for existing, newly wrapping if necessary. +func UpdateReaderLimit(r io.Reader, limit int64) (*io.LimitedReader, int64) { + lr, ok := r.(*io.LimitedReader) + if !ok { + lr = &io.LimitedReader{r, limit} + return lr, limit + } + + if limit < lr.N { + // Update existing. + lr.N = limit + } + + return lr, lr.N +} + +// GetReadCloserLimit attempts to unwrap io.ReadCloser to access its io.LimitedReader with limit. +func GetReadCloserLimit(rc io.ReadCloser) (*io.LimitedReader, int64) { + rct, ok := rc.(*ReadCloserType) + if !ok { + return nil, -1 + } + lr, ok := rct.Reader.(*io.LimitedReader) + if !ok { + return nil, -1 + } + return lr, lr.N +} + +// UpdateReadCloserLimit attempts to update the limit of a readcloser for existing, newly wrapping if necessary. +func UpdateReadCloserLimit(rc io.ReadCloser, limit int64) (io.ReadCloser, *io.LimitedReader, int64) { + + // Check for our wrapped ReadCloserType. + if rct, ok := rc.(*ReadCloserType); ok { + + // Attempt to update existing wrapped limit reader. + if lr, ok := rct.Reader.(*io.LimitedReader); ok { + + if limit < lr.N { + // Update existing. + lr.N = limit + } + + return rct, lr, lr.N + } + + // Wrap the reader type with new limit. + lr := &io.LimitedReader{rct.Reader, limit} + rct.Reader = lr + + return rct, lr, lr.N + } + + // Wrap separated types. + rct := &ReadCloserType{ + Reader: rc, + Closer: rc, + } + + // Wrap separated reader part with limit. + lr := &io.LimitedReader{rct.Reader, limit} + rct.Reader = lr + + return rct, lr, lr.N +} diff --git a/vendor/codeberg.org/gruf/go-iotools/read.go b/vendor/codeberg.org/gruf/go-iotools/read.go index 6ce2789a7..13c5e21ac 100644 --- a/vendor/codeberg.org/gruf/go-iotools/read.go +++ b/vendor/codeberg.org/gruf/go-iotools/read.go @@ -4,6 +4,16 @@ import ( "io" ) +// ReadCloserType implements io.ReadCloser +// by combining the two underlying interfaces, +// while providing an exported type to still +// access the underlying original io.Reader or +// io.Closer separately (e.g. without wrapping). +type ReadCloserType struct { + io.Reader + io.Closer +} + // ReaderFunc is a function signature which allows // a function to implement the io.Reader type. type ReaderFunc func([]byte) (int, error) @@ -22,15 +32,10 @@ func (rf ReaderFromFunc) ReadFrom(r io.Reader) (int64, error) { // ReadCloser wraps an io.Reader and io.Closer in order to implement io.ReadCloser. func ReadCloser(r io.Reader, c io.Closer) io.ReadCloser { - return &struct { - io.Reader - io.Closer - }{r, c} + return &ReadCloserType{r, c} } -// NopReadCloser wraps an io.Reader to implement io.ReadCloser with empty io.Closer implementation. +// NopReadCloser wraps io.Reader with NopCloser{} in ReadCloserType. func NopReadCloser(r io.Reader) io.ReadCloser { - return ReadCloser(r, CloserFunc(func() error { - return nil - })) + return &ReadCloserType{r, NopCloser{}} } diff --git a/vendor/codeberg.org/gruf/go-iotools/size.go b/vendor/codeberg.org/gruf/go-iotools/size.go new file mode 100644 index 000000000..f3841facf --- /dev/null +++ b/vendor/codeberg.org/gruf/go-iotools/size.go @@ -0,0 +1,25 @@ +package iotools + +type Sizer interface { + Size() int64 +} + +// SizerFunc is a function signature which allows +// a function to implement the Sizer type. +type SizerFunc func() int64 + +func (s SizerFunc) Size() int64 { + return s() +} + +type Lengther interface { + Len() int +} + +// LengthFunc is a function signature which allows +// a function to implement the Lengther type. +type LengthFunc func() int + +func (l LengthFunc) Len() int { + return l() +} diff --git a/vendor/codeberg.org/gruf/go-iotools/write.go b/vendor/codeberg.org/gruf/go-iotools/write.go index e1b44db24..2037c42af 100644 --- a/vendor/codeberg.org/gruf/go-iotools/write.go +++ b/vendor/codeberg.org/gruf/go-iotools/write.go @@ -28,7 +28,10 @@ func WriteCloser(w io.Writer, c io.Closer) io.WriteCloser { // NopWriteCloser wraps an io.Writer to implement io.WriteCloser with empty io.Closer implementation. func NopWriteCloser(w io.Writer) io.WriteCloser { - return WriteCloser(w, CloserFunc(func() error { - return nil - })) + return &nopWriteCloser{w} } + +// nopWriteCloser implements io.WriteCloser with a no-op Close(). +type nopWriteCloser struct{ io.Writer } + +func (wc *nopWriteCloser) Close() error { return nil } diff --git a/vendor/codeberg.org/gruf/go-mimetypes/README.md b/vendor/codeberg.org/gruf/go-mimetypes/README.md new file mode 100644 index 000000000..faf0fe4bb --- /dev/null +++ b/vendor/codeberg.org/gruf/go-mimetypes/README.md @@ -0,0 +1,5 @@ +# go-mimetypes + +A generated lookup map of file extensions to mimetypes, from data provided at: https://raw.githubusercontent.com/micnic/mime.json/master/index.json + +This allows determining mimetype without relying on OS mimetype lookups. \ No newline at end of file diff --git a/vendor/codeberg.org/gruf/go-mimetypes/get-mime-types.sh b/vendor/codeberg.org/gruf/go-mimetypes/get-mime-types.sh new file mode 100644 index 000000000..0ee088c73 --- /dev/null +++ b/vendor/codeberg.org/gruf/go-mimetypes/get-mime-types.sh @@ -0,0 +1,42 @@ +#!/bin/sh + +# Mime types JSON source +URL='https://raw.githubusercontent.com/micnic/mime.json/master/index.json' + +# Define intro to file +FILE=' +// This is an automatically generated file, do not edit +package mimetypes + + +// MimeTypes is a map of file extensions to mime types. +var MimeTypes = map[string]string{ +' + +# Set break on new-line +IFS=' +' + +for line in $(curl -fL "$URL" | grep -E '".+"\s*:\s*".+"'); do + # Trim final whitespace + line=$(echo "$line" | sed -e 's|\s*$||') + + # Ensure it ends in a comma + [ "${line%,}" = "$line" ] && line="${line}," + + # Add to file + FILE="${FILE}${line} +" +done + +# Add final statement to file +FILE="${FILE} +} + +" + +# Write to file +echo "$FILE" > 'mime.gen.go' + +# Check for valid go +gofumpt -w 'mime.gen.go' \ No newline at end of file diff --git a/vendor/codeberg.org/gruf/go-mimetypes/mime.gen.go b/vendor/codeberg.org/gruf/go-mimetypes/mime.gen.go new file mode 100644 index 000000000..54e5b2249 --- /dev/null +++ b/vendor/codeberg.org/gruf/go-mimetypes/mime.gen.go @@ -0,0 +1,1207 @@ +// This is an automatically generated file, do not edit +package mimetypes + +// MimeTypes is a map of file extensions to mime types. +var MimeTypes = map[string]string{ + "123": "application/vnd.lotus-1-2-3", + "1km": "application/vnd.1000minds.decision-model+xml", + "3dml": "text/vnd.in3d.3dml", + "3ds": "image/x-3ds", + "3g2": "video/3gpp2", + "3gp": "video/3gpp", + "3gpp": "video/3gpp", + "3mf": "model/3mf", + "7z": "application/x-7z-compressed", + "aab": "application/x-authorware-bin", + "aac": "audio/x-aac", + "aam": "application/x-authorware-map", + "aas": "application/x-authorware-seg", + "abw": "application/x-abiword", + "ac": "application/vnd.nokia.n-gage.ac+xml", + "acc": "application/vnd.americandynamics.acc", + "ace": "application/x-ace-compressed", + "acu": "application/vnd.acucobol", + "acutc": "application/vnd.acucorp", + "adp": "audio/adpcm", + "adts": "audio/aac", + "aep": "application/vnd.audiograph", + "afm": "application/x-font-type1", + "afp": "application/vnd.ibm.modcap", + "age": "application/vnd.age", + "ahead": "application/vnd.ahead.space", + "ai": "application/postscript", + "aif": "audio/x-aiff", + "aifc": "audio/x-aiff", + "aiff": "audio/x-aiff", + "air": "application/vnd.adobe.air-application-installer-package+zip", + "ait": "application/vnd.dvb.ait", + "ami": "application/vnd.amiga.ami", + "aml": "application/automationml-aml+xml", + "amlx": "application/automationml-amlx+zip", + "amr": "audio/amr", + "apk": "application/vnd.android.package-archive", + "apng": "image/apng", + "appcache": "text/cache-manifest", + "appinstaller": "application/appinstaller", + "application": "application/x-ms-application", + "appx": "application/appx", + "appxbundle": "application/appxbundle", + "apr": "application/vnd.lotus-approach", + "arc": "application/x-freearc", + "arj": "application/x-arj", + "asc": "application/pgp-signature", + "asf": "video/x-ms-asf", + "asm": "text/x-asm", + "aso": "application/vnd.accpac.simply.aso", + "asx": "video/x-ms-asf", + "atc": "application/vnd.acucorp", + "atom": "application/atom+xml", + "atomcat": "application/atomcat+xml", + "atomdeleted": "application/atomdeleted+xml", + "atomsvc": "application/atomsvc+xml", + "atx": "application/vnd.antix.game-component", + "au": "audio/basic", + "avci": "image/avci", + "avcs": "image/avcs", + "avi": "video/x-msvideo", + "avif": "image/avif", + "aw": "application/applixware", + "azf": "application/vnd.airzip.filesecure.azf", + "azs": "application/vnd.airzip.filesecure.azs", + "azv": "image/vnd.airzip.accelerator.azv", + "azw": "application/vnd.amazon.ebook", + "b16": "image/vnd.pco.b16", + "bat": "application/x-msdownload", + "bcpio": "application/x-bcpio", + "bdf": "application/x-font-bdf", + "bdm": "application/vnd.syncml.dm+wbxml", + "bdoc": "application/x-bdoc", + "bed": "application/vnd.realvnc.bed", + "bh2": "application/vnd.fujitsu.oasysprs", + "bin": "application/octet-stream", + "blb": "application/x-blorb", + "blorb": "application/x-blorb", + "bmi": "application/vnd.bmi", + "bmml": "application/vnd.balsamiq.bmml+xml", + "bmp": "image/x-ms-bmp", + "book": "application/vnd.framemaker", + "box": "application/vnd.previewsystems.box", + "boz": "application/x-bzip2", + "bpk": "application/octet-stream", + "bsp": "model/vnd.valve.source.compiled-map", + "btf": "image/prs.btif", + "btif": "image/prs.btif", + "buffer": "application/octet-stream", + "bz": "application/x-bzip", + "bz2": "application/x-bzip2", + "c": "text/x-c", + "c11amc": "application/vnd.cluetrust.cartomobile-config", + "c11amz": "application/vnd.cluetrust.cartomobile-config-pkg", + "c4d": "application/vnd.clonk.c4group", + "c4f": "application/vnd.clonk.c4group", + "c4g": "application/vnd.clonk.c4group", + "c4p": "application/vnd.clonk.c4group", + "c4u": "application/vnd.clonk.c4group", + "cab": "application/vnd.ms-cab-compressed", + "caf": "audio/x-caf", + "cap": "application/vnd.tcpdump.pcap", + "car": "application/vnd.curl.car", + "cat": "application/vnd.ms-pki.seccat", + "cb7": "application/x-cbr", + "cba": "application/x-cbr", + "cbr": "application/x-cbr", + "cbt": "application/x-cbr", + "cbz": "application/x-cbr", + "cc": "text/x-c", + "cco": "application/x-cocoa", + "cct": "application/x-director", + "ccxml": "application/ccxml+xml", + "cdbcmsg": "application/vnd.contact.cmsg", + "cdf": "application/x-netcdf", + "cdfx": "application/cdfx+xml", + "cdkey": "application/vnd.mediastation.cdkey", + "cdmia": "application/cdmi-capability", + "cdmic": "application/cdmi-container", + "cdmid": "application/cdmi-domain", + "cdmio": "application/cdmi-object", + "cdmiq": "application/cdmi-queue", + "cdx": "chemical/x-cdx", + "cdxml": "application/vnd.chemdraw+xml", + "cdy": "application/vnd.cinderella", + "cer": "application/pkix-cert", + "cfs": "application/x-cfs-compressed", + "cgm": "image/cgm", + "chat": "application/x-chat", + "chm": "application/vnd.ms-htmlhelp", + "chrt": "application/vnd.kde.kchart", + "cif": "chemical/x-cif", + "cii": "application/vnd.anser-web-certificate-issue-initiation", + "cil": "application/vnd.ms-artgalry", + "cjs": "application/node", + "cla": "application/vnd.claymore", + "class": "application/java-vm", + "cld": "model/vnd.cld", + "clkk": "application/vnd.crick.clicker.keyboard", + "clkp": "application/vnd.crick.clicker.palette", + "clkt": "application/vnd.crick.clicker.template", + "clkw": "application/vnd.crick.clicker.wordbank", + "clkx": "application/vnd.crick.clicker", + "clp": "application/x-msclip", + "cmc": "application/vnd.cosmocaller", + "cmdf": "chemical/x-cmdf", + "cml": "chemical/x-cml", + "cmp": "application/vnd.yellowriver-custom-menu", + "cmx": "image/x-cmx", + "cod": "application/vnd.rim.cod", + "coffee": "text/coffeescript", + "com": "application/x-msdownload", + "conf": "text/plain", + "cpio": "application/x-cpio", + "cpl": "application/cpl+xml", + "cpp": "text/x-c", + "cpt": "application/mac-compactpro", + "crd": "application/x-mscardfile", + "crl": "application/pkix-crl", + "crt": "application/x-x509-ca-cert", + "crx": "application/x-chrome-extension", + "cryptonote": "application/vnd.rig.cryptonote", + "csh": "application/x-csh", + "csl": "application/vnd.citationstyles.style+xml", + "csml": "chemical/x-csml", + "csp": "application/vnd.commonspace", + "css": "text/css", + "cst": "application/x-director", + "csv": "text/csv", + "cu": "application/cu-seeme", + "curl": "text/vnd.curl", + "cwl": "application/cwl", + "cww": "application/prs.cww", + "cxt": "application/x-director", + "cxx": "text/x-c", + "dae": "model/vnd.collada+xml", + "daf": "application/vnd.mobius.daf", + "dart": "application/vnd.dart", + "dataless": "application/vnd.fdsn.seed", + "davmount": "application/davmount+xml", + "dbf": "application/vnd.dbf", + "dbk": "application/docbook+xml", + "dcr": "application/x-director", + "dcurl": "text/vnd.curl.dcurl", + "dd2": "application/vnd.oma.dd2+xml", + "ddd": "application/vnd.fujixerox.ddd", + "ddf": "application/vnd.syncml.dmddf+xml", + "dds": "image/vnd.ms-dds", + "deb": "application/x-debian-package", + "def": "text/plain", + "deploy": "application/octet-stream", + "der": "application/x-x509-ca-cert", + "dfac": "application/vnd.dreamfactory", + "dgc": "application/x-dgc-compressed", + "dib": "image/bmp", + "dic": "text/x-c", + "dir": "application/x-director", + "dis": "application/vnd.mobius.dis", + "disposition-notification": "message/disposition-notification", + "dist": "application/octet-stream", + "distz": "application/octet-stream", + "djv": "image/vnd.djvu", + "djvu": "image/vnd.djvu", + "dll": "application/x-msdownload", + "dmg": "application/x-apple-diskimage", + "dmp": "application/vnd.tcpdump.pcap", + "dms": "application/octet-stream", + "dna": "application/vnd.dna", + "doc": "application/msword", + "docm": "application/vnd.ms-word.document.macroenabled.12", + "docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document", + "dot": "application/msword", + "dotm": "application/vnd.ms-word.template.macroenabled.12", + "dotx": "application/vnd.openxmlformats-officedocument.wordprocessingml.template", + "dp": "application/vnd.osgi.dp", + "dpg": "application/vnd.dpgraph", + "dpx": "image/dpx", + "dra": "audio/vnd.dra", + "drle": "image/dicom-rle", + "dsc": "text/prs.lines.tag", + "dssc": "application/dssc+der", + "dtb": "application/x-dtbook+xml", + "dtd": "application/xml-dtd", + "dts": "audio/vnd.dts", + "dtshd": "audio/vnd.dts.hd", + "dump": "application/octet-stream", + "dvb": "video/vnd.dvb.file", + "dvi": "application/x-dvi", + "dwd": "application/atsc-dwd+xml", + "dwf": "model/vnd.dwf", + "dwg": "image/vnd.dwg", + "dxf": "image/vnd.dxf", + "dxp": "application/vnd.spotfire.dxp", + "dxr": "application/x-director", + "ear": "application/java-archive", + "ecelp4800": "audio/vnd.nuera.ecelp4800", + "ecelp7470": "audio/vnd.nuera.ecelp7470", + "ecelp9600": "audio/vnd.nuera.ecelp9600", + "ecma": "application/ecmascript", + "edm": "application/vnd.novadigm.edm", + "edx": "application/vnd.novadigm.edx", + "efif": "application/vnd.picsel", + "ei6": "application/vnd.pg.osasli", + "elc": "application/octet-stream", + "emf": "image/emf", + "eml": "message/rfc822", + "emma": "application/emma+xml", + "emotionml": "application/emotionml+xml", + "emz": "application/x-msmetafile", + "eol": "audio/vnd.digital-winds", + "eot": "application/vnd.ms-fontobject", + "eps": "application/postscript", + "epub": "application/epub+zip", + "es3": "application/vnd.eszigno3+xml", + "esa": "application/vnd.osgi.subsystem", + "esf": "application/vnd.epson.esf", + "et3": "application/vnd.eszigno3+xml", + "etx": "text/x-setext", + "eva": "application/x-eva", + "evy": "application/x-envoy", + "exe": "application/x-msdownload", + "exi": "application/exi", + "exp": "application/express", + "exr": "image/aces", + "ext": "application/vnd.novadigm.ext", + "ez": "application/andrew-inset", + "ez2": "application/vnd.ezpix-album", + "ez3": "application/vnd.ezpix-package", + "f": "text/x-fortran", + "f4v": "video/x-f4v", + "f77": "text/x-fortran", + "f90": "text/x-fortran", + "fbs": "image/vnd.fastbidsheet", + "fcdt": "application/vnd.adobe.formscentral.fcdt", + "fcs": "application/vnd.isac.fcs", + "fdf": "application/vnd.fdf", + "fdt": "application/fdt+xml", + "fe_launch": "application/vnd.denovo.fcselayout-link", + "fg5": "application/vnd.fujitsu.oasysgp", + "fgd": "application/x-director", + "fh": "image/x-freehand", + "fh4": "image/x-freehand", + "fh5": "image/x-freehand", + "fh7": "image/x-freehand", + "fhc": "image/x-freehand", + "fig": "application/x-xfig", + "fits": "image/fits", + "flac": "audio/x-flac", + "fli": "video/x-fli", + "flo": "application/vnd.micrografx.flo", + "flv": "video/x-flv", + "flw": "application/vnd.kde.kivio", + "flx": "text/vnd.fmi.flexstor", + "fly": "text/vnd.fly", + "fm": "application/vnd.framemaker", + "fnc": "application/vnd.frogans.fnc", + "fo": "application/vnd.software602.filler.form+xml", + "for": "text/x-fortran", + "fpx": "image/vnd.fpx", + "frame": "application/vnd.framemaker", + "fsc": "application/vnd.fsc.weblaunch", + "fst": "image/vnd.fst", + "ftc": "application/vnd.fluxtime.clip", + "fti": "application/vnd.anser-web-funds-transfer-initiation", + "fvt": "video/vnd.fvt", + "fxp": "application/vnd.adobe.fxp", + "fxpl": "application/vnd.adobe.fxp", + "fzs": "application/vnd.fuzzysheet", + "g2w": "application/vnd.geoplan", + "g3": "image/g3fax", + "g3w": "application/vnd.geospace", + "gac": "application/vnd.groove-account", + "gam": "application/x-tads", + "gbr": "application/rpki-ghostbusters", + "gca": "application/x-gca-compressed", + "gdl": "model/vnd.gdl", + "gdoc": "application/vnd.google-apps.document", + "ged": "text/vnd.familysearch.gedcom", + "geo": "application/vnd.dynageo", + "geojson": "application/geo+json", + "gex": "application/vnd.geometry-explorer", + "ggb": "application/vnd.geogebra.file", + "ggt": "application/vnd.geogebra.tool", + "ghf": "application/vnd.groove-help", + "gif": "image/gif", + "gim": "application/vnd.groove-identity-message", + "glb": "model/gltf-binary", + "gltf": "model/gltf+json", + "gml": "application/gml+xml", + "gmx": "application/vnd.gmx", + "gnumeric": "application/x-gnumeric", + "gph": "application/vnd.flographit", + "gpx": "application/gpx+xml", + "gqf": "application/vnd.grafeq", + "gqs": "application/vnd.grafeq", + "gram": "application/srgs", + "gramps": "application/x-gramps-xml", + "gre": "application/vnd.geometry-explorer", + "grv": "application/vnd.groove-injector", + "grxml": "application/srgs+xml", + "gsf": "application/x-font-ghostscript", + "gsheet": "application/vnd.google-apps.spreadsheet", + "gslides": "application/vnd.google-apps.presentation", + "gtar": "application/x-gtar", + "gtm": "application/vnd.groove-tool-message", + "gtw": "model/vnd.gtw", + "gv": "text/vnd.graphviz", + "gxf": "application/gxf", + "gxt": "application/vnd.geonext", + "gz": "application/gzip", + "h": "text/x-c", + "h261": "video/h261", + "h263": "video/h263", + "h264": "video/h264", + "hal": "application/vnd.hal+xml", + "hbci": "application/vnd.hbci", + "hbs": "text/x-handlebars-template", + "hdd": "application/x-virtualbox-hdd", + "hdf": "application/x-hdf", + "heic": "image/heic", + "heics": "image/heic-sequence", + "heif": "image/heif", + "heifs": "image/heif-sequence", + "hej2": "image/hej2k", + "held": "application/atsc-held+xml", + "hh": "text/x-c", + "hjson": "application/hjson", + "hlp": "application/winhlp", + "hpgl": "application/vnd.hp-hpgl", + "hpid": "application/vnd.hp-hpid", + "hps": "application/vnd.hp-hps", + "hqx": "application/mac-binhex40", + "hsj2": "image/hsj2", + "htc": "text/x-component", + "htke": "application/vnd.kenameaapp", + "htm": "text/html", + "html": "text/html", + "hvd": "application/vnd.yamaha.hv-dic", + "hvp": "application/vnd.yamaha.hv-voice", + "hvs": "application/vnd.yamaha.hv-script", + "i2g": "application/vnd.intergeo", + "icc": "application/vnd.iccprofile", + "ice": "x-conference/x-cooltalk", + "icm": "application/vnd.iccprofile", + "ico": "image/x-icon", + "ics": "text/calendar", + "ief": "image/ief", + "ifb": "text/calendar", + "ifm": "application/vnd.shana.informed.formdata", + "iges": "model/iges", + "igl": "application/vnd.igloader", + "igm": "application/vnd.insors.igm", + "igs": "model/iges", + "igx": "application/vnd.micrografx.igx", + "iif": "application/vnd.shana.informed.interchange", + "img": "application/octet-stream", + "imp": "application/vnd.accpac.simply.imp", + "ims": "application/vnd.ms-ims", + "in": "text/plain", + "ini": "text/plain", + "ink": "application/inkml+xml", + "inkml": "application/inkml+xml", + "install": "application/x-install-instructions", + "iota": "application/vnd.astraea-software.iota", + "ipfix": "application/ipfix", + "ipk": "application/vnd.shana.informed.package", + "irm": "application/vnd.ibm.rights-management", + "irp": "application/vnd.irepository.package+xml", + "iso": "application/x-iso9660-image", + "itp": "application/vnd.shana.informed.formtemplate", + "its": "application/its+xml", + "ivp": "application/vnd.immervision-ivp", + "ivu": "application/vnd.immervision-ivu", + "jad": "text/vnd.sun.j2me.app-descriptor", + "jade": "text/jade", + "jam": "application/vnd.jam", + "jar": "application/java-archive", + "jardiff": "application/x-java-archive-diff", + "java": "text/x-java-source", + "jhc": "image/jphc", + "jisp": "application/vnd.jisp", + "jls": "image/jls", + "jlt": "application/vnd.hp-jlyt", + "jng": "image/x-jng", + "jnlp": "application/x-java-jnlp-file", + "joda": "application/vnd.joost.joda-archive", + "jp2": "image/jp2", + "jpe": "image/jpeg", + "jpeg": "image/jpeg", + "jpf": "image/jpx", + "jpg": "image/jpeg", + "jpg2": "image/jp2", + "jpgm": "video/jpm", + "jpgv": "video/jpeg", + "jph": "image/jph", + "jpm": "video/jpm", + "jpx": "image/jpx", + "js": "text/javascript", + "json": "application/json", + "json5": "application/json5", + "jsonld": "application/ld+json", + "jsonml": "application/jsonml+json", + "jsx": "text/jsx", + "jt": "model/jt", + "jxr": "image/jxr", + "jxra": "image/jxra", + "jxrs": "image/jxrs", + "jxs": "image/jxs", + "jxsc": "image/jxsc", + "jxsi": "image/jxsi", + "jxss": "image/jxss", + "kar": "audio/midi", + "karbon": "application/vnd.kde.karbon", + "kdbx": "application/x-keepass2", + "key": "application/x-iwork-keynote-sffkey", + "kfo": "application/vnd.kde.kformula", + "kia": "application/vnd.kidspiration", + "kml": "application/vnd.google-earth.kml+xml", + "kmz": "application/vnd.google-earth.kmz", + "kne": "application/vnd.kinar", + "knp": "application/vnd.kinar", + "kon": "application/vnd.kde.kontour", + "kpr": "application/vnd.kde.kpresenter", + "kpt": "application/vnd.kde.kpresenter", + "kpxx": "application/vnd.ds-keypoint", + "ksp": "application/vnd.kde.kspread", + "ktr": "application/vnd.kahootz", + "ktx": "image/ktx", + "ktx2": "image/ktx2", + "ktz": "application/vnd.kahootz", + "kwd": "application/vnd.kde.kword", + "kwt": "application/vnd.kde.kword", + "lasxml": "application/vnd.las.las+xml", + "latex": "application/x-latex", + "lbd": "application/vnd.llamagraphics.life-balance.desktop", + "lbe": "application/vnd.llamagraphics.life-balance.exchange+xml", + "les": "application/vnd.hhe.lesson-player", + "less": "text/less", + "lgr": "application/lgr+xml", + "lha": "application/x-lzh-compressed", + "link66": "application/vnd.route66.link66+xml", + "list": "text/plain", + "list3820": "application/vnd.ibm.modcap", + "listafp": "application/vnd.ibm.modcap", + "litcoffee": "text/coffeescript", + "lnk": "application/x-ms-shortcut", + "log": "text/plain", + "lostxml": "application/lost+xml", + "lrf": "application/octet-stream", + "lrm": "application/vnd.ms-lrm", + "ltf": "application/vnd.frogans.ltf", + "lua": "text/x-lua", + "luac": "application/x-lua-bytecode", + "lvp": "audio/vnd.lucent.voice", + "lwp": "application/vnd.lotus-wordpro", + "lzh": "application/x-lzh-compressed", + "m13": "application/x-msmediaview", + "m14": "application/x-msmediaview", + "m1v": "video/mpeg", + "m21": "application/mp21", + "m2a": "audio/mpeg", + "m2v": "video/mpeg", + "m3a": "audio/mpeg", + "m3u": "audio/x-mpegurl", + "m3u8": "application/vnd.apple.mpegurl", + "m4a": "audio/x-m4a", + "m4p": "application/mp4", + "m4s": "video/iso.segment", + "m4u": "video/vnd.mpegurl", + "m4v": "video/x-m4v", + "ma": "application/mathematica", + "mads": "application/mads+xml", + "maei": "application/mmt-aei+xml", + "mag": "application/vnd.ecowin.chart", + "maker": "application/vnd.framemaker", + "man": "text/troff", + "manifest": "text/cache-manifest", + "map": "application/json", + "mar": "application/octet-stream", + "markdown": "text/markdown", + "mathml": "application/mathml+xml", + "mb": "application/mathematica", + "mbk": "application/vnd.mobius.mbk", + "mbox": "application/mbox", + "mc1": "application/vnd.medcalcdata", + "mcd": "application/vnd.mcd", + "mcurl": "text/vnd.curl.mcurl", + "md": "text/markdown", + "mdb": "application/x-msaccess", + "mdi": "image/vnd.ms-modi", + "mdx": "text/mdx", + "me": "text/troff", + "mesh": "model/mesh", + "meta4": "application/metalink4+xml", + "metalink": "application/metalink+xml", + "mets": "application/mets+xml", + "mfm": "application/vnd.mfmp", + "mft": "application/rpki-manifest", + "mgp": "application/vnd.osgeo.mapguide.package", + "mgz": "application/vnd.proteus.magazine", + "mid": "audio/midi", + "midi": "audio/midi", + "mie": "application/x-mie", + "mif": "application/vnd.mif", + "mime": "message/rfc822", + "mj2": "video/mj2", + "mjp2": "video/mj2", + "mjs": "text/javascript", + "mk3d": "video/x-matroska", + "mka": "audio/x-matroska", + "mkd": "text/x-markdown", + "mks": "video/x-matroska", + "mkv": "video/x-matroska", + "mlp": "application/vnd.dolby.mlp", + "mmd": "application/vnd.chipnuts.karaoke-mmd", + "mmf": "application/vnd.smaf", + "mml": "text/mathml", + "mmr": "image/vnd.fujixerox.edmics-mmr", + "mng": "video/x-mng", + "mny": "application/x-msmoney", + "mobi": "application/x-mobipocket-ebook", + "mods": "application/mods+xml", + "mov": "video/quicktime", + "movie": "video/x-sgi-movie", + "mp2": "audio/mpeg", + "mp21": "application/mp21", + "mp2a": "audio/mpeg", + "mp3": "audio/mpeg", + "mp4": "video/mp4", + "mp4a": "audio/mp4", + "mp4s": "application/mp4", + "mp4v": "video/mp4", + "mpc": "application/vnd.mophun.certificate", + "mpd": "application/dash+xml", + "mpe": "video/mpeg", + "mpeg": "video/mpeg", + "mpf": "application/media-policy-dataset+xml", + "mpg": "video/mpeg", + "mpg4": "video/mp4", + "mpga": "audio/mpeg", + "mpkg": "application/vnd.apple.installer+xml", + "mpm": "application/vnd.blueice.multipass", + "mpn": "application/vnd.mophun.application", + "mpp": "application/vnd.ms-project", + "mpt": "application/vnd.ms-project", + "mpy": "application/vnd.ibm.minipay", + "mqy": "application/vnd.mobius.mqy", + "mrc": "application/marc", + "mrcx": "application/marcxml+xml", + "ms": "text/troff", + "mscml": "application/mediaservercontrol+xml", + "mseed": "application/vnd.fdsn.mseed", + "mseq": "application/vnd.mseq", + "msf": "application/vnd.epson.msf", + "msg": "application/vnd.ms-outlook", + "msh": "model/mesh", + "msi": "application/x-msdownload", + "msix": "application/msix", + "msixbundle": "application/msixbundle", + "msl": "application/vnd.mobius.msl", + "msm": "application/octet-stream", + "msp": "application/octet-stream", + "msty": "application/vnd.muvee.style", + "mtl": "model/mtl", + "mts": "model/vnd.mts", + "mus": "application/vnd.musician", + "musd": "application/mmt-usd+xml", + "musicxml": "application/vnd.recordare.musicxml+xml", + "mvb": "application/x-msmediaview", + "mvt": "application/vnd.mapbox-vector-tile", + "mwf": "application/vnd.mfer", + "mxf": "application/mxf", + "mxl": "application/vnd.recordare.musicxml", + "mxmf": "audio/mobile-xmf", + "mxml": "application/xv+xml", + "mxs": "application/vnd.triscape.mxs", + "mxu": "video/vnd.mpegurl", + "n-gage": "application/vnd.nokia.n-gage.symbian.install", + "n3": "text/n3", + "nb": "application/mathematica", + "nbp": "application/vnd.wolfram.player", + "nc": "application/x-netcdf", + "ncx": "application/x-dtbncx+xml", + "nfo": "text/x-nfo", + "ngdat": "application/vnd.nokia.n-gage.data", + "nitf": "application/vnd.nitf", + "nlu": "application/vnd.neurolanguage.nlu", + "nml": "application/vnd.enliven", + "nnd": "application/vnd.noblenet-directory", + "nns": "application/vnd.noblenet-sealer", + "nnw": "application/vnd.noblenet-web", + "npx": "image/vnd.net-fpx", + "nq": "application/n-quads", + "nsc": "application/x-conference", + "nsf": "application/vnd.lotus-notes", + "nt": "application/n-triples", + "ntf": "application/vnd.nitf", + "numbers": "application/x-iwork-numbers-sffnumbers", + "nzb": "application/x-nzb", + "oa2": "application/vnd.fujitsu.oasys2", + "oa3": "application/vnd.fujitsu.oasys3", + "oas": "application/vnd.fujitsu.oasys", + "obd": "application/x-msbinder", + "obgx": "application/vnd.openblox.game+xml", + "obj": "model/obj", + "oda": "application/oda", + "odb": "application/vnd.oasis.opendocument.database", + "odc": "application/vnd.oasis.opendocument.chart", + "odf": "application/vnd.oasis.opendocument.formula", + "odft": "application/vnd.oasis.opendocument.formula-template", + "odg": "application/vnd.oasis.opendocument.graphics", + "odi": "application/vnd.oasis.opendocument.image", + "odm": "application/vnd.oasis.opendocument.text-master", + "odp": "application/vnd.oasis.opendocument.presentation", + "ods": "application/vnd.oasis.opendocument.spreadsheet", + "odt": "application/vnd.oasis.opendocument.text", + "oga": "audio/ogg", + "ogex": "model/vnd.opengex", + "ogg": "audio/ogg", + "ogv": "video/ogg", + "ogx": "application/ogg", + "omdoc": "application/omdoc+xml", + "onepkg": "application/onenote", + "onetmp": "application/onenote", + "onetoc": "application/onenote", + "onetoc2": "application/onenote", + "opf": "application/oebps-package+xml", + "opml": "text/x-opml", + "oprc": "application/vnd.palm", + "opus": "audio/ogg", + "org": "text/x-org", + "osf": "application/vnd.yamaha.openscoreformat", + "osfpvg": "application/vnd.yamaha.openscoreformat.osfpvg+xml", + "osm": "application/vnd.openstreetmap.data+xml", + "otc": "application/vnd.oasis.opendocument.chart-template", + "otf": "font/otf", + "otg": "application/vnd.oasis.opendocument.graphics-template", + "oth": "application/vnd.oasis.opendocument.text-web", + "oti": "application/vnd.oasis.opendocument.image-template", + "otp": "application/vnd.oasis.opendocument.presentation-template", + "ots": "application/vnd.oasis.opendocument.spreadsheet-template", + "ott": "application/vnd.oasis.opendocument.text-template", + "ova": "application/x-virtualbox-ova", + "ovf": "application/x-virtualbox-ovf", + "owl": "application/rdf+xml", + "oxps": "application/oxps", + "oxt": "application/vnd.openofficeorg.extension", + "p": "text/x-pascal", + "p10": "application/pkcs10", + "p12": "application/x-pkcs12", + "p7b": "application/x-pkcs7-certificates", + "p7c": "application/pkcs7-mime", + "p7m": "application/pkcs7-mime", + "p7r": "application/x-pkcs7-certreqresp", + "p7s": "application/pkcs7-signature", + "p8": "application/pkcs8", + "pac": "application/x-ns-proxy-autoconfig", + "pages": "application/x-iwork-pages-sffpages", + "pas": "text/x-pascal", + "paw": "application/vnd.pawaafile", + "pbd": "application/vnd.powerbuilder6", + "pbm": "image/x-portable-bitmap", + "pcap": "application/vnd.tcpdump.pcap", + "pcf": "application/x-font-pcf", + "pcl": "application/vnd.hp-pcl", + "pclxl": "application/vnd.hp-pclxl", + "pct": "image/x-pict", + "pcurl": "application/vnd.curl.pcurl", + "pcx": "image/x-pcx", + "pdb": "application/x-pilot", + "pde": "text/x-processing", + "pdf": "application/pdf", + "pem": "application/x-x509-ca-cert", + "pfa": "application/x-font-type1", + "pfb": "application/x-font-type1", + "pfm": "application/x-font-type1", + "pfr": "application/font-tdpfr", + "pfx": "application/x-pkcs12", + "pgm": "image/x-portable-graymap", + "pgn": "application/x-chess-pgn", + "pgp": "application/pgp-encrypted", + "php": "application/x-httpd-php", + "pic": "image/x-pict", + "pkg": "application/octet-stream", + "pki": "application/pkixcmp", + "pkipath": "application/pkix-pkipath", + "pkpass": "application/vnd.apple.pkpass", + "pl": "application/x-perl", + "plb": "application/vnd.3gpp.pic-bw-large", + "plc": "application/vnd.mobius.plc", + "plf": "application/vnd.pocketlearn", + "pls": "application/pls+xml", + "pm": "application/x-perl", + "pml": "application/vnd.ctc-posml", + "png": "image/png", + "pnm": "image/x-portable-anymap", + "portpkg": "application/vnd.macports.portpkg", + "pot": "application/vnd.ms-powerpoint", + "potm": "application/vnd.ms-powerpoint.template.macroenabled.12", + "potx": "application/vnd.openxmlformats-officedocument.presentationml.template", + "ppam": "application/vnd.ms-powerpoint.addin.macroenabled.12", + "ppd": "application/vnd.cups-ppd", + "ppm": "image/x-portable-pixmap", + "pps": "application/vnd.ms-powerpoint", + "ppsm": "application/vnd.ms-powerpoint.slideshow.macroenabled.12", + "ppsx": "application/vnd.openxmlformats-officedocument.presentationml.slideshow", + "ppt": "application/vnd.ms-powerpoint", + "pptm": "application/vnd.ms-powerpoint.presentation.macroenabled.12", + "pptx": "application/vnd.openxmlformats-officedocument.presentationml.presentation", + "pqa": "application/vnd.palm", + "prc": "model/prc", + "pre": "application/vnd.lotus-freelance", + "prf": "application/pics-rules", + "provx": "application/provenance+xml", + "ps": "application/postscript", + "psb": "application/vnd.3gpp.pic-bw-small", + "psd": "image/vnd.adobe.photoshop", + "psf": "application/x-font-linux-psf", + "pskcxml": "application/pskc+xml", + "pti": "image/prs.pti", + "ptid": "application/vnd.pvi.ptid1", + "pub": "application/x-mspublisher", + "pvb": "application/vnd.3gpp.pic-bw-var", + "pwn": "application/vnd.3m.post-it-notes", + "pya": "audio/vnd.ms-playready.media.pya", + "pyo": "model/vnd.pytha.pyox", + "pyox": "model/vnd.pytha.pyox", + "pyv": "video/vnd.ms-playready.media.pyv", + "qam": "application/vnd.epson.quickanime", + "qbo": "application/vnd.intu.qbo", + "qfx": "application/vnd.intu.qfx", + "qps": "application/vnd.publishare-delta-tree", + "qt": "video/quicktime", + "qwd": "application/vnd.quark.quarkxpress", + "qwt": "application/vnd.quark.quarkxpress", + "qxb": "application/vnd.quark.quarkxpress", + "qxd": "application/vnd.quark.quarkxpress", + "qxl": "application/vnd.quark.quarkxpress", + "qxt": "application/vnd.quark.quarkxpress", + "ra": "audio/x-realaudio", + "ram": "audio/x-pn-realaudio", + "raml": "application/raml+yaml", + "rapd": "application/route-apd+xml", + "rar": "application/x-rar-compressed", + "ras": "image/x-cmu-raster", + "rcprofile": "application/vnd.ipunplugged.rcprofile", + "rdf": "application/rdf+xml", + "rdz": "application/vnd.data-vision.rdz", + "relo": "application/p2p-overlay+xml", + "rep": "application/vnd.businessobjects", + "res": "application/x-dtbresource+xml", + "rgb": "image/x-rgb", + "rif": "application/reginfo+xml", + "rip": "audio/vnd.rip", + "ris": "application/x-research-info-systems", + "rl": "application/resource-lists+xml", + "rlc": "image/vnd.fujixerox.edmics-rlc", + "rld": "application/resource-lists-diff+xml", + "rm": "application/vnd.rn-realmedia", + "rmi": "audio/midi", + "rmp": "audio/x-pn-realaudio-plugin", + "rms": "application/vnd.jcp.javame.midlet-rms", + "rmvb": "application/vnd.rn-realmedia-vbr", + "rnc": "application/relax-ng-compact-syntax", + "rng": "application/xml", + "roa": "application/rpki-roa", + "roff": "text/troff", + "rp9": "application/vnd.cloanto.rp9", + "rpm": "application/x-redhat-package-manager", + "rpss": "application/vnd.nokia.radio-presets", + "rpst": "application/vnd.nokia.radio-preset", + "rq": "application/sparql-query", + "rs": "application/rls-services+xml", + "rsat": "application/atsc-rsat+xml", + "rsd": "application/rsd+xml", + "rsheet": "application/urc-ressheet+xml", + "rss": "application/rss+xml", + "rtf": "text/rtf", + "rtx": "text/richtext", + "run": "application/x-makeself", + "rusd": "application/route-usd+xml", + "s": "text/x-asm", + "s3m": "audio/s3m", + "saf": "application/vnd.yamaha.smaf-audio", + "sass": "text/x-sass", + "sbml": "application/sbml+xml", + "sc": "application/vnd.ibm.secure-container", + "scd": "application/x-msschedule", + "scm": "application/vnd.lotus-screencam", + "scq": "application/scvp-cv-request", + "scs": "application/scvp-cv-response", + "scss": "text/x-scss", + "scurl": "text/vnd.curl.scurl", + "sda": "application/vnd.stardivision.draw", + "sdc": "application/vnd.stardivision.calc", + "sdd": "application/vnd.stardivision.impress", + "sdkd": "application/vnd.solent.sdkm+xml", + "sdkm": "application/vnd.solent.sdkm+xml", + "sdp": "application/sdp", + "sdw": "application/vnd.stardivision.writer", + "sea": "application/x-sea", + "see": "application/vnd.seemail", + "seed": "application/vnd.fdsn.seed", + "sema": "application/vnd.sema", + "semd": "application/vnd.semd", + "semf": "application/vnd.semf", + "senmlx": "application/senml+xml", + "sensmlx": "application/sensml+xml", + "ser": "application/java-serialized-object", + "setpay": "application/set-payment-initiation", + "setreg": "application/set-registration-initiation", + "sfd-hdstx": "application/vnd.hydrostatix.sof-data", + "sfs": "application/vnd.spotfire.sfs", + "sfv": "text/x-sfv", + "sgi": "image/sgi", + "sgl": "application/vnd.stardivision.writer-global", + "sgm": "text/sgml", + "sgml": "text/sgml", + "sh": "application/x-sh", + "shar": "application/x-shar", + "shex": "text/shex", + "shf": "application/shf+xml", + "shtml": "text/html", + "sid": "image/x-mrsid-image", + "sieve": "application/sieve", + "sig": "application/pgp-signature", + "sil": "audio/silk", + "silo": "model/mesh", + "sis": "application/vnd.symbian.install", + "sisx": "application/vnd.symbian.install", + "sit": "application/x-stuffit", + "sitx": "application/x-stuffitx", + "siv": "application/sieve", + "skd": "application/vnd.koan", + "skm": "application/vnd.koan", + "skp": "application/vnd.koan", + "skt": "application/vnd.koan", + "sldm": "application/vnd.ms-powerpoint.slide.macroenabled.12", + "sldx": "application/vnd.openxmlformats-officedocument.presentationml.slide", + "slim": "text/slim", + "slm": "text/slim", + "sls": "application/route-s-tsid+xml", + "slt": "application/vnd.epson.salt", + "sm": "application/vnd.stepmania.stepchart", + "smf": "application/vnd.stardivision.math", + "smi": "application/smil+xml", + "smil": "application/smil+xml", + "smv": "video/x-smv", + "smzip": "application/vnd.stepmania.package", + "snd": "audio/basic", + "snf": "application/x-font-snf", + "so": "application/octet-stream", + "spc": "application/x-pkcs7-certificates", + "spdx": "text/spdx", + "spf": "application/vnd.yamaha.smaf-phrase", + "spl": "application/x-futuresplash", + "spot": "text/vnd.in3d.spot", + "spp": "application/scvp-vp-response", + "spq": "application/scvp-vp-request", + "spx": "audio/ogg", + "sql": "application/x-sql", + "src": "application/x-wais-source", + "srt": "application/x-subrip", + "sru": "application/sru+xml", + "srx": "application/sparql-results+xml", + "ssdl": "application/ssdl+xml", + "sse": "application/vnd.kodak-descriptor", + "ssf": "application/vnd.epson.ssf", + "ssml": "application/ssml+xml", + "st": "application/vnd.sailingtracker.track", + "stc": "application/vnd.sun.xml.calc.template", + "std": "application/vnd.sun.xml.draw.template", + "stf": "application/vnd.wt.stf", + "sti": "application/vnd.sun.xml.impress.template", + "stk": "application/hyperstudio", + "stl": "model/stl", + "stpx": "model/step+xml", + "stpxz": "model/step-xml+zip", + "stpz": "model/step+zip", + "str": "application/vnd.pg.format", + "stw": "application/vnd.sun.xml.writer.template", + "styl": "text/stylus", + "stylus": "text/stylus", + "sub": "text/vnd.dvb.subtitle", + "sus": "application/vnd.sus-calendar", + "susp": "application/vnd.sus-calendar", + "sv4cpio": "application/x-sv4cpio", + "sv4crc": "application/x-sv4crc", + "svc": "application/vnd.dvb.service", + "svd": "application/vnd.svd", + "svg": "image/svg+xml", + "svgz": "image/svg+xml", + "swa": "application/x-director", + "swf": "application/x-shockwave-flash", + "swi": "application/vnd.aristanetworks.swi", + "swidtag": "application/swid+xml", + "sxc": "application/vnd.sun.xml.calc", + "sxd": "application/vnd.sun.xml.draw", + "sxg": "application/vnd.sun.xml.writer.global", + "sxi": "application/vnd.sun.xml.impress", + "sxm": "application/vnd.sun.xml.math", + "sxw": "application/vnd.sun.xml.writer", + "t": "text/troff", + "t3": "application/x-t3vm-image", + "t38": "image/t38", + "taglet": "application/vnd.mynfc", + "tao": "application/vnd.tao.intent-module-archive", + "tap": "image/vnd.tencent.tap", + "tar": "application/x-tar", + "tcap": "application/vnd.3gpp2.tcap", + "tcl": "application/x-tcl", + "td": "application/urc-targetdesc+xml", + "teacher": "application/vnd.smart.teacher", + "tei": "application/tei+xml", + "teicorpus": "application/tei+xml", + "tex": "application/x-tex", + "texi": "application/x-texinfo", + "texinfo": "application/x-texinfo", + "text": "text/plain", + "tfi": "application/thraud+xml", + "tfm": "application/x-tex-tfm", + "tfx": "image/tiff-fx", + "tga": "image/x-tga", + "thmx": "application/vnd.ms-officetheme", + "tif": "image/tiff", + "tiff": "image/tiff", + "tk": "application/x-tcl", + "tmo": "application/vnd.tmobile-livetv", + "toml": "application/toml", + "torrent": "application/x-bittorrent", + "tpl": "application/vnd.groove-tool-template", + "tpt": "application/vnd.trid.tpt", + "tr": "text/troff", + "tra": "application/vnd.trueapp", + "trig": "application/trig", + "trm": "application/x-msterminal", + "ts": "video/mp2t", + "tsd": "application/timestamped-data", + "tsv": "text/tab-separated-values", + "ttc": "font/collection", + "ttf": "font/ttf", + "ttl": "text/turtle", + "ttml": "application/ttml+xml", + "twd": "application/vnd.simtech-mindmapper", + "twds": "application/vnd.simtech-mindmapper", + "txd": "application/vnd.genomatix.tuxedo", + "txf": "application/vnd.mobius.txf", + "txt": "text/plain", + "u32": "application/x-authorware-bin", + "u3d": "model/u3d", + "u8dsn": "message/global-delivery-status", + "u8hdr": "message/global-headers", + "u8mdn": "message/global-disposition-notification", + "u8msg": "message/global", + "ubj": "application/ubjson", + "udeb": "application/x-debian-package", + "ufd": "application/vnd.ufdl", + "ufdl": "application/vnd.ufdl", + "ulx": "application/x-glulx", + "umj": "application/vnd.umajin", + "unityweb": "application/vnd.unity", + "uo": "application/vnd.uoml+xml", + "uoml": "application/vnd.uoml+xml", + "uri": "text/uri-list", + "uris": "text/uri-list", + "urls": "text/uri-list", + "usda": "model/vnd.usda", + "usdz": "model/vnd.usdz+zip", + "ustar": "application/x-ustar", + "utz": "application/vnd.uiq.theme", + "uu": "text/x-uuencode", + "uva": "audio/vnd.dece.audio", + "uvd": "application/vnd.dece.data", + "uvf": "application/vnd.dece.data", + "uvg": "image/vnd.dece.graphic", + "uvh": "video/vnd.dece.hd", + "uvi": "image/vnd.dece.graphic", + "uvm": "video/vnd.dece.mobile", + "uvp": "video/vnd.dece.pd", + "uvs": "video/vnd.dece.sd", + "uvt": "application/vnd.dece.ttml+xml", + "uvu": "video/vnd.uvvu.mp4", + "uvv": "video/vnd.dece.video", + "uvva": "audio/vnd.dece.audio", + "uvvd": "application/vnd.dece.data", + "uvvf": "application/vnd.dece.data", + "uvvg": "image/vnd.dece.graphic", + "uvvh": "video/vnd.dece.hd", + "uvvi": "image/vnd.dece.graphic", + "uvvm": "video/vnd.dece.mobile", + "uvvp": "video/vnd.dece.pd", + "uvvs": "video/vnd.dece.sd", + "uvvt": "application/vnd.dece.ttml+xml", + "uvvu": "video/vnd.uvvu.mp4", + "uvvv": "video/vnd.dece.video", + "uvvx": "application/vnd.dece.unspecified", + "uvvz": "application/vnd.dece.zip", + "uvx": "application/vnd.dece.unspecified", + "uvz": "application/vnd.dece.zip", + "vbox": "application/x-virtualbox-vbox", + "vbox-extpack": "application/x-virtualbox-vbox-extpack", + "vcard": "text/vcard", + "vcd": "application/x-cdlink", + "vcf": "text/x-vcard", + "vcg": "application/vnd.groove-vcard", + "vcs": "text/x-vcalendar", + "vcx": "application/vnd.vcx", + "vdi": "application/x-virtualbox-vdi", + "vds": "model/vnd.sap.vds", + "vhd": "application/x-virtualbox-vhd", + "vis": "application/vnd.visionary", + "viv": "video/vnd.vivo", + "vmdk": "application/x-virtualbox-vmdk", + "vob": "video/x-ms-vob", + "vor": "application/vnd.stardivision.writer", + "vox": "application/x-authorware-bin", + "vrml": "model/vrml", + "vsd": "application/vnd.visio", + "vsf": "application/vnd.vsf", + "vss": "application/vnd.visio", + "vst": "application/vnd.visio", + "vsw": "application/vnd.visio", + "vtf": "image/vnd.valve.source.texture", + "vtt": "text/vtt", + "vtu": "model/vnd.vtu", + "vxml": "application/voicexml+xml", + "w3d": "application/x-director", + "wad": "application/x-doom", + "wadl": "application/vnd.sun.wadl+xml", + "war": "application/java-archive", + "wasm": "application/wasm", + "wav": "audio/x-wav", + "wax": "audio/x-ms-wax", + "wbmp": "image/vnd.wap.wbmp", + "wbs": "application/vnd.criticaltools.wbs+xml", + "wbxml": "application/vnd.wap.wbxml", + "wcm": "application/vnd.ms-works", + "wdb": "application/vnd.ms-works", + "wdp": "image/vnd.ms-photo", + "weba": "audio/webm", + "webapp": "application/x-web-app-manifest+json", + "webm": "video/webm", + "webmanifest": "application/manifest+json", + "webp": "image/webp", + "wg": "application/vnd.pmi.widget", + "wgsl": "text/wgsl", + "wgt": "application/widget", + "wif": "application/watcherinfo+xml", + "wks": "application/vnd.ms-works", + "wm": "video/x-ms-wm", + "wma": "audio/x-ms-wma", + "wmd": "application/x-ms-wmd", + "wmf": "image/wmf", + "wml": "text/vnd.wap.wml", + "wmlc": "application/vnd.wap.wmlc", + "wmls": "text/vnd.wap.wmlscript", + "wmlsc": "application/vnd.wap.wmlscriptc", + "wmv": "video/x-ms-wmv", + "wmx": "video/x-ms-wmx", + "wmz": "application/x-msmetafile", + "woff": "font/woff", + "woff2": "font/woff2", + "wpd": "application/vnd.wordperfect", + "wpl": "application/vnd.ms-wpl", + "wps": "application/vnd.ms-works", + "wqd": "application/vnd.wqd", + "wri": "application/x-mswrite", + "wrl": "model/vrml", + "wsc": "message/vnd.wfa.wsc", + "wsdl": "application/wsdl+xml", + "wspolicy": "application/wspolicy+xml", + "wtb": "application/vnd.webturbo", + "wvx": "video/x-ms-wvx", + "x32": "application/x-authorware-bin", + "x3d": "model/x3d+xml", + "x3db": "model/x3d+fastinfoset", + "x3dbz": "model/x3d+binary", + "x3dv": "model/x3d-vrml", + "x3dvz": "model/x3d+vrml", + "x3dz": "model/x3d+xml", + "x_b": "model/vnd.parasolid.transmit.binary", + "x_t": "model/vnd.parasolid.transmit.text", + "xaml": "application/xaml+xml", + "xap": "application/x-silverlight-app", + "xar": "application/vnd.xara", + "xav": "application/xcap-att+xml", + "xbap": "application/x-ms-xbap", + "xbd": "application/vnd.fujixerox.docuworks.binder", + "xbm": "image/x-xbitmap", + "xca": "application/xcap-caps+xml", + "xcs": "application/calendar+xml", + "xdf": "application/xcap-diff+xml", + "xdm": "application/vnd.syncml.dm+xml", + "xdp": "application/vnd.adobe.xdp+xml", + "xdssc": "application/dssc+xml", + "xdw": "application/vnd.fujixerox.docuworks", + "xel": "application/xcap-el+xml", + "xenc": "application/xenc+xml", + "xer": "application/patch-ops-error+xml", + "xfdf": "application/xfdf", + "xfdl": "application/vnd.xfdl", + "xht": "application/xhtml+xml", + "xhtm": "application/vnd.pwg-xhtml-print+xml", + "xhtml": "application/xhtml+xml", + "xhvml": "application/xv+xml", + "xif": "image/vnd.xiff", + "xla": "application/vnd.ms-excel", + "xlam": "application/vnd.ms-excel.addin.macroenabled.12", + "xlc": "application/vnd.ms-excel", + "xlf": "application/xliff+xml", + "xlm": "application/vnd.ms-excel", + "xls": "application/vnd.ms-excel", + "xlsb": "application/vnd.ms-excel.sheet.binary.macroenabled.12", + "xlsm": "application/vnd.ms-excel.sheet.macroenabled.12", + "xlsx": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", + "xlt": "application/vnd.ms-excel", + "xltm": "application/vnd.ms-excel.template.macroenabled.12", + "xltx": "application/vnd.openxmlformats-officedocument.spreadsheetml.template", + "xlw": "application/vnd.ms-excel", + "xm": "audio/xm", + "xml": "text/xml", + "xns": "application/xcap-ns+xml", + "xo": "application/vnd.olpc-sugar", + "xop": "application/xop+xml", + "xpi": "application/x-xpinstall", + "xpl": "application/xproc+xml", + "xpm": "image/x-xpixmap", + "xpr": "application/vnd.is-xpr", + "xps": "application/vnd.ms-xpsdocument", + "xpw": "application/vnd.intercon.formnet", + "xpx": "application/vnd.intercon.formnet", + "xsd": "application/xml", + "xsf": "application/prs.xsf+xml", + "xsl": "application/xslt+xml", + "xslt": "application/xslt+xml", + "xsm": "application/vnd.syncml+xml", + "xspf": "application/xspf+xml", + "xul": "application/vnd.mozilla.xul+xml", + "xvm": "application/xv+xml", + "xvml": "application/xv+xml", + "xwd": "image/x-xwindowdump", + "xyz": "chemical/x-xyz", + "xz": "application/x-xz", + "yaml": "text/yaml", + "yang": "application/yang", + "yin": "application/yin+xml", + "yml": "text/yaml", + "ymp": "text/x-suse-ymp", + "z1": "application/x-zmachine", + "z2": "application/x-zmachine", + "z3": "application/x-zmachine", + "z4": "application/x-zmachine", + "z5": "application/x-zmachine", + "z6": "application/x-zmachine", + "z7": "application/x-zmachine", + "z8": "application/x-zmachine", + "zaz": "application/vnd.zzazz.deck+xml", + "zip": "application/zip", + "zir": "application/vnd.zul", + "zirz": "application/vnd.zul", + "zmm": "application/vnd.handheld-entertainment+xml", +} diff --git a/vendor/codeberg.org/gruf/go-mimetypes/mime.go b/vendor/codeberg.org/gruf/go-mimetypes/mime.go new file mode 100644 index 000000000..028092da6 --- /dev/null +++ b/vendor/codeberg.org/gruf/go-mimetypes/mime.go @@ -0,0 +1,47 @@ +package mimetypes + +import "path" + +// PreferredExts defines preferred file +// extensions for input mime types (as there +// can be multiple extensions per mime type). +var PreferredExts = map[string]string{ + MimeTypes["mp3"]: "mp3", // audio/mpeg + MimeTypes["mpeg"]: "mpeg", // video/mpeg +} + +// GetForFilename returns mimetype for given filename. +func GetForFilename(filename string) (string, bool) { + ext := path.Ext(filename) + if len(ext) < 1 { + return "", false + } + mime, ok := MimeTypes[ext[1:]] + return mime, ok +} + +// GetFileExt returns the file extension to use for mimetype. Relying first upon +// the 'PreferredExts' map. It simply returns the first match there may multiple. +func GetFileExt(mimeType string) (string, bool) { + ext, ok := PreferredExts[mimeType] + if ok { + return ext, true + } + for ext, mime := range MimeTypes { + if mime == mimeType { + return ext, true + } + } + return "", false +} + +// GetFileExts returns known file extensions used for mimetype. +func GetFileExts(mimeType string) []string { + var exts []string + for ext, mime := range MimeTypes { + if mime == mimeType { + exts = append(exts, ext) + } + } + return exts +} diff --git a/vendor/codeberg.org/gruf/go-storage/memory/memory.go b/vendor/codeberg.org/gruf/go-storage/memory/memory.go index 55728b827..db3199338 100644 --- a/vendor/codeberg.org/gruf/go-storage/memory/memory.go +++ b/vendor/codeberg.org/gruf/go-storage/memory/memory.go @@ -7,7 +7,6 @@ import ( "strings" "sync" - "codeberg.org/gruf/go-iotools" "codeberg.org/gruf/go-storage" "codeberg.org/gruf/go-storage/internal" @@ -93,7 +92,7 @@ func (st *MemoryStorage) ReadStream(ctx context.Context, key string) (io.ReadClo // Wrap in readcloser. r := bytes.NewReader(b) - return iotools.NopReadCloser(r), nil + return io.NopCloser(r), nil } // WriteBytes: implements Storage.WriteBytes(). diff --git a/vendor/codeberg.org/superseriousbusiness/exif-terminator/README.md b/vendor/codeberg.org/superseriousbusiness/exif-terminator/README.md deleted file mode 100644 index ddb97fbb6..000000000 --- a/vendor/codeberg.org/superseriousbusiness/exif-terminator/README.md +++ /dev/null @@ -1,122 +0,0 @@ -# exif-terminator - -`exif-terminator` removes exif data from images (jpeg and png currently supported) in a streaming manner. All you need to do is provide a reader of the image in, and exif-terminator will provide a reader of the image out. - -Hasta la vista, baby! - -```text - .,lddxococ. - ..',lxO0Oo;'. - . .. .,coodO0klc:. - .,. ..','. .. .,..'. .':llxKXk' - .;c:cc;;,... .''.,l:cc. .....:l:,,:oo:.. - .,:ll'. .,;cox0OxOKKXX0kOOxlcld0X0d;,,,'. - .:xkl. .':cdKNWWWWMMMMMMMMMMWWNXK0KWNd. - .coxo,..:ollk0KKXNWMMMMMMMMMMWWXXXOoOM0; - ,oc,. .;cloxOKXXWWMMMMMMMMMMMWNXk;;OWO' - . ..;cdOKXNNWWMMMMMMMMMMMMWO,,ONO' - ...... ....;okOO000XWWMMMMMMMMMWXx;,ONNx. -.;c;. .:l'ckl. ..';looooolldolloooodolcc:;'.;oo:. -.oxl. ;:..OO. .. .. .,' .;. -.oko. .cc.'Ok. .:; .:,..';. -.cdc. .;;lc.,Ox. . .',,'..','. .dN0; .. .c:,,':. -.:oc. ,dxkl.,0x. . .. . .oNMMKc.. ...:l. -.:o:. cKXKl.,Ox. .. .lKWMMMXo,. ...''. -.:l; c0KKo.,0x. ...........';:lk0OKNNXKkl,..,;cxd' -.::' ;k00l.;0d. .. .,cloooddddxxddol;:ddloxdc,:odOWNc -.;,. ,ONKc.;0d. 'l,.. .:clllllllokKOl::cllclkKx'.lolxx' -.,. '0W0:.;0d. .:l,. .,:ccc:::oOXNXOkxdook0NWNx,,;c;. -... .kX0c.;0d. .loc' .,::;;;;lk0kddoooooddooO0o',ld; -.. .oOkk:cKd. .... .;:,',;cxK0o::ldkOkkOkxod:';oKx. -.. :dlOolKO, '::'.';:oOK0xdddoollooxOx::ccOx. -.. ';:o,.xKo. .,;'...';lddolooodkkkdol:,::lc. -.. ...:..oOl. ........';:codxxOXKKKk;':;:kl -.. .,..lOc. .. ....,codxkxxxxxo:,,;lKO. .,;'.. -... .. ck: ';,'. .;:cllloc,;;;colOK; .;odxxoc;. -...,.... . :x; .;:cc;'. .,;::c:'..,kXk:xNc .':oook00x:. - . cKx. .'.. ':clllc,...'';:::cc:;.,kOo:xNx. .'codddoox - .. ,xxl;',col:;. .:cccccc;;;:lxkkOOkdc,,lolcxWO' ;kNKc.' - .,. .c' ':dkO0O; .. .;ccccccc:::cldxkxoll:;oolcdN0:.. .xWNk; - .:' .c',xXNKkOXo .,. .,:cccccllc::lloooolc:;lo:;oXKc,::. .kWWX - ,' .cONMWMWkco, ', .';::ccclolc:llolollcccodo;:KXl..cl,. ;KWN - '. .xWWWWMKc;; ....;' ',;::::coolclloooollc:,:o;;0Xx, .,:;... ,0Ko - . ,kKNWWXd,cdd0NXKk:,;;;'';::::coollllllllllc;;ccl0Nkc. ..';loOx' - 'lxXWMXOOXNMMMMWWNNNWXkc;;;;;:cllccccccccc::lllkNWXd,. .cxO0Ol' - ,xKNWWXkkXWM0dxKNWWWMWNX0OOkl;;:c::cccc:,...:oONMMXOo;. :kOkOkl; - .;,;:;...,::. .;lokXKKNMMMWNOc,;;;,::;'...lOKNWNKkol:,..cKdcO0do - .:;... .. .,:okO0KNN0:.',,''''. ':xNMWKkxxOKXd,.cNk,:l:o -``` - -## Why? - -Exif removal is a pain in the arse. Most other libraries seem to parse the whole image into memory, then remove the exif data, then encode the image again. - -`exif-terminator` differs in that it removes exif data *while scanning through the image bytes*, and it doesn't do any reencoding of the image. Bytes of exif data are simply all set to 0, and the image data is piped back out again into the returned reader. - -The only exception is orientation data: if an image contains orientation data, this and only this data will be preserved since it's *actually useful*. - -## Example - -You can run the following example with `go run ./example/main.go`: - -```go -package main - -import ( - "io" - "os" - - terminator "codeberg.org/superseriousbusiness/exif-terminator" -) - -func main() { - // open a file - sloth, err := os.Open("./images/sloth.jpg") - if err != nil { - panic(err) - } - defer sloth.Close() - - // get the length of the file - stat, err := sloth.Stat() - if err != nil { - panic(err) - } - - // terminate! - out, err := terminator.Terminate(sloth, int(stat.Size()), "jpeg") - if err != nil { - panic(err) - } - - // read the bytes from the reader - b, err := io.ReadAll(out) - if err != nil { - panic(err) - } - - // save the file somewhere - if err := os.WriteFile("./images/sloth-clean.jpg", b, 0666); err != nil { - panic(err) - } -} -``` - -## Credits - -### Libraries - -`exif-terminator` borrows heavily from the two [`dsoprea`](https://github.com/dsoprea) libraries credited below. In fact, it's basically a hack on top of those libraries. Thanks `dsoprea`! - -- [dsoprea/go-exif](https://github.com/dsoprea/go-exif): exif header reconstruction. [MIT License](https://spdx.org/licenses/MIT.html). -- [dsoprea/go-jpeg-image-structure](https://github.com/dsoprea/go-jpeg-image-structure): jpeg structure parsing. [MIT License](https://spdx.org/licenses/MIT.html). -- [dsoprea/go-png-image-structure](https://github.com/dsoprea/go-png-image-structure): png structure parsing. [MIT License](https://spdx.org/licenses/MIT.html). -- [stretchr/testify](https://github.com/stretchr/testify); test framework. [MIT License](https://spdx.org/licenses/MIT.html). - -## License - -![the gnu AGPL logo](https://www.gnu.org/graphics/agplv3-155x51.png) - -`exif-terminator` is free software, licensed under the [GNU AGPL v3 LICENSE](LICENSE). - -Copyright (C) 2022-2024 SuperSeriousBusiness. diff --git a/vendor/codeberg.org/superseriousbusiness/exif-terminator/jpeg.go b/vendor/codeberg.org/superseriousbusiness/exif-terminator/jpeg.go deleted file mode 100644 index 576c4e430..000000000 --- a/vendor/codeberg.org/superseriousbusiness/exif-terminator/jpeg.go +++ /dev/null @@ -1,295 +0,0 @@ -/* - exif-terminator - Copyright (C) 2022 SuperSeriousBusiness admin@gotosocial.org - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU Affero General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU Affero General Public License for more details. - - You should have received a copy of the GNU Affero General Public License - along with this program. If not, see . -*/ - -package terminator - -import ( - "bytes" - "encoding/binary" - "fmt" - "io" - - exif "github.com/dsoprea/go-exif/v3" - jpegstructure "github.com/superseriousbusiness/go-jpeg-image-structure/v2" -) - -var markerLen = map[byte]int{ - 0x00: 0, - 0x01: 0, - 0xd0: 0, - 0xd1: 0, - 0xd2: 0, - 0xd3: 0, - 0xd4: 0, - 0xd5: 0, - 0xd6: 0, - 0xd7: 0, - 0xd8: 0, - 0xd9: 0, - 0xda: 0, - - // J2C - 0x30: 0, - 0x31: 0, - 0x32: 0, - 0x33: 0, - 0x34: 0, - 0x35: 0, - 0x36: 0, - 0x37: 0, - 0x38: 0, - 0x39: 0, - 0x3a: 0, - 0x3b: 0, - 0x3c: 0, - 0x3d: 0, - 0x3e: 0, - 0x3f: 0, - 0x4f: 0, - 0x92: 0, - 0x93: 0, - - // J2C extensions - 0x74: 4, - 0x75: 4, - 0x77: 4, -} - -type jpegVisitor struct { - js *jpegstructure.JpegSplitter - writer io.Writer - expectedFileSize int - writtenTotalBytes int -} - -// HandleSegment satisfies the visitor interface{} of the jpegstructure library. -// -// We don't really care about many of the parameters, since all we're interested -// in here is the very last segment that was scanned. -func (v *jpegVisitor) HandleSegment(segmentMarker byte, _ string, _ int, _ bool) error { - // get the most recent segment scanned (ie., last in the segments list) - segmentList := v.js.Segments() - segments := segmentList.Segments() - mostRecentSegment := segments[len(segments)-1] - - // check if we've written the expected number of bytes by EOI - if segmentMarker == jpegstructure.MARKER_EOI { - // take account of the last 2 bytes taken up by the EOI - eoiLength := 2 - - // this is the total file size we will - // have written including the EOI - willHaveWritten := v.writtenTotalBytes + eoiLength - - if willHaveWritten < v.expectedFileSize { - // if we won't have written enough, - // pad the final segment before EOI - // so that we meet expected file size - missingBytes := make([]byte, v.expectedFileSize-willHaveWritten) - if _, err := v.writer.Write(missingBytes); err != nil { - return err - } - } - } - - // process the segment - return v.writeSegment(mostRecentSegment) -} - -func (v *jpegVisitor) writeSegment(s *jpegstructure.Segment) error { - var writtenSegmentData int - w := v.writer - - defer func() { - // whatever happens, when we finished then evict data from the segment; - // once we've written it we don't want it in memory anymore - s.Data = s.Data[:0] - }() - - // The scan-data will have a marker-ID of (0) because it doesn't have a marker-ID or length. - if s.MarkerId != 0 { - markerIDWritten, err := w.Write([]byte{0xff, s.MarkerId}) - if err != nil { - return err - } - writtenSegmentData += markerIDWritten - - sizeLen, found := markerLen[s.MarkerId] - if !found || sizeLen == 2 { - sizeLen = 2 - l := uint16(len(s.Data) + sizeLen) - - if err := binary.Write(w, binary.BigEndian, &l); err != nil { - return err - } - - writtenSegmentData += 2 - } else if sizeLen == 4 { - l := uint32(len(s.Data) + sizeLen) - - if err := binary.Write(w, binary.BigEndian, &l); err != nil { - return err - } - - writtenSegmentData += 4 - } else if sizeLen != 0 { - return fmt.Errorf("not a supported marker-size: MARKER-ID=(0x%02x) MARKER-SIZE-LEN=(%d)", s.MarkerId, sizeLen) - } - } - - if !s.IsExif() { - // if this isn't exif data just copy it over and bail - writtenNormalData, err := w.Write(s.Data) - if err != nil { - return err - } - - writtenSegmentData += writtenNormalData - v.writtenTotalBytes += writtenSegmentData - return nil - } - - ifd, _, err := s.Exif() - if err != nil { - return err - } - - // amount of bytes we've writtenExifData into the exif body, we'll update this as we go - var writtenExifData int - - if orientationEntries, err := ifd.FindTagWithName("Orientation"); err == nil && len(orientationEntries) == 1 { - // If we have an orientation entry, we don't want to completely obliterate the exif data. - // Instead, we want to surgically obliterate everything *except* the orientation tag, so - // that the image will still be rotated correctly when shown in client applications etc. - // - // To accomplish this, we're going to extract just the bytes that we need and write them - // in according to the exif specification, then fill in the rest of the space with empty - // bytes. - // - // First we need to write the exif prefix for this segment. - // - // Then we write the exif header which contains the byte order and offset of the first ifd. - // - // Then we write the ifd0 entry which contains the orientation data. - // - // After that we just fill. - - newExifData := &bytes.Buffer{} - byteOrder := ifd.ByteOrder() - - // 1. Write exif prefix. - // https://www.ozhiker.com/electronics/pjmt/jpeg_info/app_segments.html - prefix := []byte{'E', 'x', 'i', 'f', 0, 0} - if err := binary.Write(newExifData, byteOrder, &prefix); err != nil { - return err - } - writtenExifData += len(prefix) - - // 2. Write exif header, taking the existing byte order. - exifHeader, err := exif.BuildExifHeader(byteOrder, exif.ExifDefaultFirstIfdOffset) - if err != nil { - return err - } - hWritten, err := newExifData.Write(exifHeader) - if err != nil { - return err - } - writtenExifData += hWritten - - // 3. Write in the new ifd - // - // An ifd with one orientation entry is structured like this: - // 2 bytes: the number of entries in the ifd uint16(1) - // 2 bytes: the tag id uint16(274) - // 2 bytes: the tag type uint16(3) - // 4 bytes: the tag count uint32(1) - // 4 bytes: the tag value offset: uint32(one of the below with padding on the end) - // 1 = Horizontal (normal) - // 2 = Mirror horizontal - // 3 = Rotate 180 - // 4 = Mirror vertical - // 5 = Mirror horizontal and rotate 270 CW - // 6 = Rotate 90 CW - // 7 = Mirror horizontal and rotate 90 CW - // 8 = Rotate 270 CW - // - // see https://web.archive.org/web/20190624045241if_/http://www.cipa.jp:80/std/documents/e/DC-008-Translation-2019-E.pdf - p24-25 - orientationEntry := orientationEntries[0] - - ifdCount := uint16(1) // we're only adding one entry into the ifd - if err := binary.Write(newExifData, byteOrder, &ifdCount); err != nil { - return err - } - writtenExifData += 2 - - tagID := orientationEntry.TagId() - if err := binary.Write(newExifData, byteOrder, &tagID); err != nil { - return err - } - writtenExifData += 2 - - tagType := uint16(orientationEntry.TagType()) - if err := binary.Write(newExifData, byteOrder, &tagType); err != nil { - return err - } - writtenExifData += 2 - - tagCount := orientationEntry.UnitCount() - if err := binary.Write(newExifData, byteOrder, &tagCount); err != nil { - return err - } - writtenExifData += 4 - - valueOffset, err := orientationEntry.GetRawBytes() - if err != nil { - return err - } - - vWritten, err := newExifData.Write(valueOffset) - if err != nil { - return err - } - writtenExifData += vWritten - - valuePad := make([]byte, 4-vWritten) - pWritten, err := newExifData.Write(valuePad) - if err != nil { - return err - } - writtenExifData += pWritten - - // write all the new data into the writer from the segment - writtenNewExifData, err := io.Copy(w, newExifData) - if err != nil { - return err - } - - writtenSegmentData += int(writtenNewExifData) - } - - // fill in any remaining exif body with blank bytes - blank := make([]byte, len(s.Data)-writtenExifData) - writtenPadding, err := w.Write(blank) - if err != nil { - return err - } - - writtenSegmentData += writtenPadding - v.writtenTotalBytes += writtenSegmentData - return nil -} diff --git a/vendor/codeberg.org/superseriousbusiness/exif-terminator/png.go b/vendor/codeberg.org/superseriousbusiness/exif-terminator/png.go deleted file mode 100644 index 774ec0ed6..000000000 --- a/vendor/codeberg.org/superseriousbusiness/exif-terminator/png.go +++ /dev/null @@ -1,93 +0,0 @@ -/* - exif-terminator - Copyright (C) 2022 SuperSeriousBusiness admin@gotosocial.org - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU Affero General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU Affero General Public License for more details. - - You should have received a copy of the GNU Affero General Public License - along with this program. If not, see . -*/ - -package terminator - -import ( - "io" - - pngstructure "github.com/superseriousbusiness/go-png-image-structure/v2" -) - -type pngVisitor struct { - ps *pngstructure.PngSplitter - writer io.Writer - lastWrittenChunk int -} - -func (v *pngVisitor) split(data []byte, atEOF bool) (int, []byte, error) { - // execute the ps split function to read in data - advance, token, err := v.ps.Split(data, atEOF) - if err != nil { - return advance, token, err - } - - // if we haven't written anything at all yet, then write the png header back into the writer first - if v.lastWrittenChunk == -1 { - if _, err := v.writer.Write(pngstructure.PngSignature[:]); err != nil { - return advance, token, err - } - } - - // Check if the splitter now has - // any new chunks in it for us. - chunkSlice, err := v.ps.Chunks() - if err != nil { - return advance, token, err - } - - // Write each chunk by passing it - // through our custom write func, - // which strips out exif and fixes - // the CRC of each chunk. - chunks := chunkSlice.Chunks() - for i, chunk := range chunks { - if i <= v.lastWrittenChunk { - // Skip already - // written chunks. - continue - } - - // Write this new chunk. - if err := v.writeChunk(chunk); err != nil { - return advance, token, err - } - v.lastWrittenChunk = i - - // Zero data; here you - // go garbage collector. - chunk.Data = nil - } - - return advance, token, err -} - -func (v *pngVisitor) writeChunk(chunk *pngstructure.Chunk) error { - if chunk.Type == pngstructure.EXifChunkType { - // Replace exif data - // with zero bytes. - clear(chunk.Data) - } - - // Fix CRC of each chunk. - chunk.UpdateCrc32() - - // finally, write chunk to writer. - _, err := chunk.WriteTo(v.writer) - return err -} diff --git a/vendor/codeberg.org/superseriousbusiness/exif-terminator/terminator.go b/vendor/codeberg.org/superseriousbusiness/exif-terminator/terminator.go deleted file mode 100644 index 7dd3d9ad7..000000000 --- a/vendor/codeberg.org/superseriousbusiness/exif-terminator/terminator.go +++ /dev/null @@ -1,158 +0,0 @@ -/* - exif-terminator - Copyright (C) 2022 SuperSeriousBusiness admin@gotosocial.org - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU Affero General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU Affero General Public License for more details. - - You should have received a copy of the GNU Affero General Public License - along with this program. If not, see . -*/ - -package terminator - -import ( - "bufio" - "bytes" - "errors" - "fmt" - "io" - - jpegstructure "github.com/superseriousbusiness/go-jpeg-image-structure/v2" - pngstructure "github.com/superseriousbusiness/go-png-image-structure/v2" -) - -func Terminate(in io.Reader, fileSize int, mediaType string) (io.Reader, error) { - // To avoid keeping too much stuff - // in memory we want to pipe data - // directly to the reader. - pipeReader, pipeWriter := io.Pipe() - - // We don't know ahead of time how long - // segments might be: they could be as - // large as the file itself, so we need - // a buffer with generous overhead. - scanner := bufio.NewScanner(in) - scanner.Buffer([]byte{}, fileSize) - - var err error - switch mediaType { - case "image/jpeg", "jpeg", "jpg": - err = terminateJpeg(scanner, pipeWriter, fileSize) - - case "image/webp", "webp": - err = terminateWebp(scanner, pipeWriter) - - case "image/png", "png": - // For pngs we need to skip the header bytes, so read - // them in and check we're really dealing with a png. - header := make([]byte, len(pngstructure.PngSignature)) - if _, headerError := in.Read(header); headerError != nil { - err = headerError - break - } - - if !bytes.Equal(header, pngstructure.PngSignature[:]) { - err = errors.New("could not decode png: invalid header") - break - } - - err = terminatePng(scanner, pipeWriter) - default: - err = fmt.Errorf("mediaType %s cannot be processed", mediaType) - } - - return pipeReader, err -} - -func terminateJpeg(scanner *bufio.Scanner, writer *io.PipeWriter, expectedFileSize int) error { - v := &jpegVisitor{ - writer: writer, - expectedFileSize: expectedFileSize, - } - - // Provide the visitor to the splitter so - // that it triggers on every section scan. - js := jpegstructure.NewJpegSplitter(v) - - // The visitor also needs to read back the - // list of segments: for this it needs to - // know what jpeg splitter it's attached to, - // so give it a pointer to the splitter. - v.js = js - - // Jpeg visitor's 'split' function - // satisfies bufio.SplitFunc{}. - scanner.Split(js.Split) - - go scanAndClose(scanner, writer) - return nil -} - -func terminateWebp(scanner *bufio.Scanner, writer *io.PipeWriter) error { - v := &webpVisitor{ - writer: writer, - } - - // Webp visitor's 'split' function - // satisfies bufio.SplitFunc{}. - scanner.Split(v.split) - - go scanAndClose(scanner, writer) - return nil -} - -func terminatePng(scanner *bufio.Scanner, writer *io.PipeWriter) error { - ps := pngstructure.NewPngSplitter() - - // Don't bother checking CRC; - // we're overwriting it anyway. - ps.DoCheckCrc(false) - - v := &pngVisitor{ - ps: ps, - writer: writer, - lastWrittenChunk: -1, - } - - // Png visitor's 'split' function - // satisfies bufio.SplitFunc{}. - scanner.Split(v.split) - - go scanAndClose(scanner, writer) - return nil -} - -// scanAndClose scans through the given scanner until there's -// nothing left to scan, and then closes the writer so that the -// reader on the other side of the pipe knows that we're done. -// -// Any error encountered when scanning will be logged by terminator. -// -// Due to the nature of io.Pipe, writing won't actually work -// until the pipeReader starts being read by the caller, which -// is why this function should always be called asynchronously. -func scanAndClose(scanner *bufio.Scanner, writer *io.PipeWriter) { - var err error - - defer func() { - // Always close writer, using returned - // scanner error (if any). If err is nil - // then the standard io.EOF will be used. - // (this will not overwrite existing). - writer.CloseWithError(err) - }() - - for scanner.Scan() { - } - - // Set error on return. - err = scanner.Err() -} diff --git a/vendor/codeberg.org/superseriousbusiness/exif-terminator/webp.go b/vendor/codeberg.org/superseriousbusiness/exif-terminator/webp.go deleted file mode 100644 index 392c4871d..000000000 --- a/vendor/codeberg.org/superseriousbusiness/exif-terminator/webp.go +++ /dev/null @@ -1,101 +0,0 @@ -/* - exif-terminator - Copyright (C) 2022 SuperSeriousBusiness admin@gotosocial.org - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU Affero General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU Affero General Public License for more details. - - You should have received a copy of the GNU Affero General Public License - along with this program. If not, see . -*/ - -package terminator - -import ( - "encoding/binary" - "errors" - "io" -) - -const ( - riffHeaderSize = 4 * 3 -) - -var ( - riffHeader = [4]byte{'R', 'I', 'F', 'F'} - webpHeader = [4]byte{'W', 'E', 'B', 'P'} - exifFourcc = [4]byte{'E', 'X', 'I', 'F'} - xmpFourcc = [4]byte{'X', 'M', 'P', ' '} - - errNoRiffHeader = errors.New("no RIFF header") - errNoWebpHeader = errors.New("not a WEBP file") -) - -type webpVisitor struct { - writer io.Writer - doneHeader bool -} - -func fourCC(b []byte) [4]byte { - return [4]byte{b[0], b[1], b[2], b[3]} -} - -func (v *webpVisitor) split(data []byte, atEOF bool) (advance int, token []byte, err error) { - // parse/write the header first - if !v.doneHeader { - if len(data) < riffHeaderSize { - // need the full header - return - } - if fourCC(data) != riffHeader { - err = errNoRiffHeader - return - } - if fourCC(data[8:]) != webpHeader { - err = errNoWebpHeader - return - } - if _, err = v.writer.Write(data[:riffHeaderSize]); err != nil { - return - } - advance += riffHeaderSize - data = data[riffHeaderSize:] - v.doneHeader = true - } - - // need enough for fourcc and size - if len(data) < 8 { - return - } - size := int64(binary.LittleEndian.Uint32(data[4:])) - if (size & 1) != 0 { - // odd chunk size - extra padding byte - size++ - } - // wait until there is enough - if int64(len(data)-8) < size { - return - } - - fourcc := fourCC(data) - rawChunkData := data[8 : 8+size] - if fourcc == exifFourcc || fourcc == xmpFourcc { - // replace exif/xmp with blank - rawChunkData = make([]byte, size) - } - - if _, err = v.writer.Write(data[:8]); err == nil { - if _, err = v.writer.Write(rawChunkData); err == nil { - advance += 8 + int(size) - } - } - - return -} diff --git a/vendor/github.com/abema/go-mp4/.gitignore b/vendor/github.com/abema/go-mp4/.gitignore deleted file mode 100644 index 22d0d82f8..000000000 --- a/vendor/github.com/abema/go-mp4/.gitignore +++ /dev/null @@ -1 +0,0 @@ -vendor diff --git a/vendor/github.com/abema/go-mp4/LICENSE b/vendor/github.com/abema/go-mp4/LICENSE deleted file mode 100644 index c06ca63d3..000000000 --- a/vendor/github.com/abema/go-mp4/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2020 AbemaTV - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/abema/go-mp4/README.md b/vendor/github.com/abema/go-mp4/README.md deleted file mode 100644 index 2f3e690be..000000000 --- a/vendor/github.com/abema/go-mp4/README.md +++ /dev/null @@ -1,159 +0,0 @@ -go-mp4 ------- - -[![Go Reference](https://pkg.go.dev/badge/github.com/abema/go-mp4.svg)](https://pkg.go.dev/github.com/abema/go-mp4) -![Test](https://github.com/abema/go-mp4/actions/workflows/test.yml/badge.svg) -[![Coverage Status](https://coveralls.io/repos/github/abema/go-mp4/badge.svg)](https://coveralls.io/github/abema/go-mp4) -[![Go Report Card](https://goreportcard.com/badge/github.com/abema/go-mp4)](https://goreportcard.com/report/github.com/abema/go-mp4) - -go-mp4 is Go library which provides low-level I/O interfaces of MP4. -This library supports you to parse or build any MP4 boxes(atoms) directly. - -go-mp4 provides very flexible interfaces for reading boxes. -If you want to read only specific parts of MP4 file, this library extracts those boxes via io.ReadSeeker interface. - -On the other hand, this library is not suitable for complex data conversions. - -## Integration with your Go application - -### Reading - -You can parse MP4 file as follows: - -```go -// expand all boxes -_, err := mp4.ReadBoxStructure(file, func(h *mp4.ReadHandle) (interface{}, error) { - fmt.Println("depth", len(h.Path)) - - // Box Type (e.g. "mdhd", "tfdt", "mdat") - fmt.Println("type", h.BoxInfo.Type.String()) - - // Box Size - fmt.Println("size", h.BoxInfo.Size) - - if h.BoxInfo.IsSupportedType() { - // Payload - box, _, err := h.ReadPayload() - if err != nil { - return nil, err - } - str, err := mp4.Stringify(box, h.BoxInfo.Context) - if err != nil { - return nil, err - } - fmt.Println("payload", str) - - // Expands children - return h.Expand() - } - return nil, nil -}) -``` - -```go -// extract specific boxes -boxes, err := mp4.ExtractBoxWithPayload(file, nil, mp4.BoxPath{mp4.BoxTypeMoov(), mp4.BoxTypeTrak(), mp4.BoxTypeTkhd()}) -if err != nil { - : -} -for _, box := range boxes { - tkhd := box.Payload.(*mp4.Tkhd) - fmt.Println("track ID:", tkhd.TrackID) -} -``` - -```go -// get basic informations -info, err := mp4.Probe(bufseekio.NewReadSeeker(file, 1024, 4)) -if err != nil { - : -} -fmt.Println("track num:", len(info.Tracks)) -``` - -### Writing - -Writer helps you to write box tree. -The following sample code edits emsg box and writes to another file. - -```go -r := bufseekio.NewReadSeeker(inputFile, 128*1024, 4) -w := mp4.NewWriter(outputFile) -_, err = mp4.ReadBoxStructure(r, func(h *mp4.ReadHandle) (interface{}, error) { - switch h.BoxInfo.Type { - case mp4.BoxTypeEmsg(): - // write box size and box type - _, err := w.StartBox(&h.BoxInfo) - if err != nil { - return nil, err - } - // read payload - box, _, err := h.ReadPayload() - if err != nil { - return nil, err - } - // update MessageData - emsg := box.(*mp4.Emsg) - emsg.MessageData = []byte("hello world") - // write box playload - if _, err := mp4.Marshal(w, emsg, h.BoxInfo.Context); err != nil { - return nil, err - } - // rewrite box size - _, err = w.EndBox() - return nil, err - default: - // copy all - return nil, w.CopyBox(r, &h.BoxInfo) - } -}) -``` - -### User-defined Boxes - -You can create additional box definition as follows: - -```go -func BoxTypeXxxx() BoxType { return mp4.StrToBoxType("xxxx") } - -func init() { - mp4.AddBoxDef(&Xxxx{}, 0) -} - -type Xxxx struct { - FullBox `mp4:"0,extend"` - UI32 uint32 `mp4:"1,size=32"` - ByteArray []byte `mp4:"2,size=8,len=dynamic"` -} - -func (*Xxxx) GetType() BoxType { - return BoxTypeXxxx() -} -``` - -### Buffering - -go-mp4 has no buffering feature for I/O. -If you should reduce Read function calls, you can wrap the io.ReadSeeker by [bufseekio](https://github.com/sunfish-shogi/bufseekio). - -## Command Line Tool - -Install mp4tool as follows: - -```sh -go install github.com/abema/go-mp4/cmd/mp4tool@latest - -mp4tool -help -``` - -For example, `mp4tool dump MP4_FILE_NAME` command prints MP4 box tree as follows: - -``` -[moof] Size=504 - [mfhd] Size=16 Version=0 Flags=0x000000 SequenceNumber=1 - [traf] Size=480 - [tfhd] Size=28 Version=0 Flags=0x020038 TrackID=1 DefaultSampleDuration=9000 DefaultSampleSize=33550 DefaultSampleFlags=0x1010000 - [tfdt] Size=20 Version=1 Flags=0x000000 BaseMediaDecodeTimeV1=0 - [trun] Size=424 ... (use -a option to show all) -[mdat] Size=44569 Data=[...] (use -mdat option to expand) -``` diff --git a/vendor/github.com/abema/go-mp4/anytype.go b/vendor/github.com/abema/go-mp4/anytype.go deleted file mode 100644 index d995f59b6..000000000 --- a/vendor/github.com/abema/go-mp4/anytype.go +++ /dev/null @@ -1,19 +0,0 @@ -package mp4 - -type IAnyType interface { - IBox - SetType(BoxType) -} - -type AnyTypeBox struct { - Box - Type BoxType -} - -func (e *AnyTypeBox) GetType() BoxType { - return e.Type -} - -func (e *AnyTypeBox) SetType(boxType BoxType) { - e.Type = boxType -} diff --git a/vendor/github.com/abema/go-mp4/box.go b/vendor/github.com/abema/go-mp4/box.go deleted file mode 100644 index c69e89257..000000000 --- a/vendor/github.com/abema/go-mp4/box.go +++ /dev/null @@ -1,188 +0,0 @@ -package mp4 - -import ( - "errors" - "io" - "math" - - "github.com/abema/go-mp4/internal/bitio" -) - -const LengthUnlimited = math.MaxUint32 - -type ICustomFieldObject interface { - // GetFieldSize returns size of dynamic field - GetFieldSize(name string, ctx Context) uint - - // GetFieldLength returns length of dynamic field - GetFieldLength(name string, ctx Context) uint - - // IsOptFieldEnabled check whether if the optional field is enabled - IsOptFieldEnabled(name string, ctx Context) bool - - // StringifyField returns field value as string - StringifyField(name string, indent string, depth int, ctx Context) (string, bool) - - IsPString(name string, bytes []byte, remainingSize uint64, ctx Context) bool - - BeforeUnmarshal(r io.ReadSeeker, size uint64, ctx Context) (n uint64, override bool, err error) - - OnReadField(name string, r bitio.ReadSeeker, leftBits uint64, ctx Context) (rbits uint64, override bool, err error) - - OnWriteField(name string, w bitio.Writer, ctx Context) (wbits uint64, override bool, err error) -} - -type BaseCustomFieldObject struct { -} - -// GetFieldSize returns size of dynamic field -func (box *BaseCustomFieldObject) GetFieldSize(string, Context) uint { - panic(errors.New("GetFieldSize not implemented")) -} - -// GetFieldLength returns length of dynamic field -func (box *BaseCustomFieldObject) GetFieldLength(string, Context) uint { - panic(errors.New("GetFieldLength not implemented")) -} - -// IsOptFieldEnabled check whether if the optional field is enabled -func (box *BaseCustomFieldObject) IsOptFieldEnabled(string, Context) bool { - return false -} - -// StringifyField returns field value as string -func (box *BaseCustomFieldObject) StringifyField(string, string, int, Context) (string, bool) { - return "", false -} - -func (*BaseCustomFieldObject) IsPString(name string, bytes []byte, remainingSize uint64, ctx Context) bool { - return true -} - -func (*BaseCustomFieldObject) BeforeUnmarshal(io.ReadSeeker, uint64, Context) (uint64, bool, error) { - return 0, false, nil -} - -func (*BaseCustomFieldObject) OnReadField(string, bitio.ReadSeeker, uint64, Context) (uint64, bool, error) { - return 0, false, nil -} - -func (*BaseCustomFieldObject) OnWriteField(string, bitio.Writer, Context) (uint64, bool, error) { - return 0, false, nil -} - -// IImmutableBox is common interface of box -type IImmutableBox interface { - ICustomFieldObject - - // GetVersion returns the box version - GetVersion() uint8 - - // GetFlags returns the flags - GetFlags() uint32 - - // CheckFlag checks the flag status - CheckFlag(uint32) bool - - // GetType returns the BoxType - GetType() BoxType -} - -// IBox is common interface of box -type IBox interface { - IImmutableBox - - // SetVersion sets the box version - SetVersion(uint8) - - // SetFlags sets the flags - SetFlags(uint32) - - // AddFlag adds the flag - AddFlag(uint32) - - // RemoveFlag removes the flag - RemoveFlag(uint32) -} - -type Box struct { - BaseCustomFieldObject -} - -// GetVersion returns the box version -func (box *Box) GetVersion() uint8 { - return 0 -} - -// SetVersion sets the box version -func (box *Box) SetVersion(uint8) { -} - -// GetFlags returns the flags -func (box *Box) GetFlags() uint32 { - return 0x000000 -} - -// CheckFlag checks the flag status -func (box *Box) CheckFlag(flag uint32) bool { - return true -} - -// SetFlags sets the flags -func (box *Box) SetFlags(uint32) { -} - -// AddFlag adds the flag -func (box *Box) AddFlag(flag uint32) { -} - -// RemoveFlag removes the flag -func (box *Box) RemoveFlag(flag uint32) { -} - -// FullBox is ISOBMFF FullBox -type FullBox struct { - BaseCustomFieldObject - Version uint8 `mp4:"0,size=8"` - Flags [3]byte `mp4:"1,size=8"` -} - -// GetVersion returns the box version -func (box *FullBox) GetVersion() uint8 { - return box.Version -} - -// SetVersion sets the box version -func (box *FullBox) SetVersion(version uint8) { - box.Version = version -} - -// GetFlags returns the flags -func (box *FullBox) GetFlags() uint32 { - flag := uint32(box.Flags[0]) << 16 - flag ^= uint32(box.Flags[1]) << 8 - flag ^= uint32(box.Flags[2]) - return flag -} - -// CheckFlag checks the flag status -func (box *FullBox) CheckFlag(flag uint32) bool { - return box.GetFlags()&flag != 0 -} - -// SetFlags sets the flags -func (box *FullBox) SetFlags(flags uint32) { - box.Flags[0] = byte(flags >> 16) - box.Flags[1] = byte(flags >> 8) - box.Flags[2] = byte(flags) -} - -// AddFlag adds the flag -func (box *FullBox) AddFlag(flag uint32) { - box.SetFlags(box.GetFlags() | flag) -} - -// RemoveFlag removes the flag -func (box *FullBox) RemoveFlag(flag uint32) { - box.SetFlags(box.GetFlags() & (^flag)) -} diff --git a/vendor/github.com/abema/go-mp4/box_info.go b/vendor/github.com/abema/go-mp4/box_info.go deleted file mode 100644 index 1cb4b8aa7..000000000 --- a/vendor/github.com/abema/go-mp4/box_info.go +++ /dev/null @@ -1,162 +0,0 @@ -package mp4 - -import ( - "bytes" - "encoding/binary" - "fmt" - "io" - "math" -) - -type Context struct { - // IsQuickTimeCompatible represents whether ftyp.compatible_brands contains "qt ". - IsQuickTimeCompatible bool - - // QuickTimeKeysMetaEntryCount the expected number of items under the ilst box as observed from the keys box - QuickTimeKeysMetaEntryCount int - - // UnderWave represents whether current box is under the wave box. - UnderWave bool - - // UnderIlst represents whether current box is under the ilst box. - UnderIlst bool - - // UnderIlstMeta represents whether current box is under the metadata box under the ilst box. - UnderIlstMeta bool - - // UnderIlstFreeMeta represents whether current box is under "----" box. - UnderIlstFreeMeta bool - - // UnderUdta represents whether current box is under the udta box. - UnderUdta bool -} - -// BoxInfo has common infomations of box -type BoxInfo struct { - // Offset specifies an offset of the box in a file. - Offset uint64 - - // Size specifies size(bytes) of box. - Size uint64 - - // HeaderSize specifies size(bytes) of common fields which are defined as "Box" class member at ISO/IEC 14496-12. - HeaderSize uint64 - - // Type specifies box type which is represented by 4 characters. - Type BoxType - - // ExtendToEOF is set true when Box.size is zero. It means that end of box equals to end of file. - ExtendToEOF bool - - // Context would be set by ReadBoxStructure, not ReadBoxInfo. - Context -} - -func (bi *BoxInfo) IsSupportedType() bool { - return bi.Type.IsSupported(bi.Context) -} - -const ( - SmallHeaderSize = 8 - LargeHeaderSize = 16 -) - -// WriteBoxInfo writes common fields which are defined as "Box" class member at ISO/IEC 14496-12. -// This function ignores bi.Offset and returns BoxInfo which contains real Offset and recalculated Size/HeaderSize. -func WriteBoxInfo(w io.WriteSeeker, bi *BoxInfo) (*BoxInfo, error) { - offset, err := w.Seek(0, io.SeekCurrent) - if err != nil { - return nil, err - } - - var data []byte - if bi.ExtendToEOF { - data = make([]byte, SmallHeaderSize) - } else if bi.Size <= math.MaxUint32 && bi.HeaderSize != LargeHeaderSize { - data = make([]byte, SmallHeaderSize) - binary.BigEndian.PutUint32(data, uint32(bi.Size)) - } else { - data = make([]byte, LargeHeaderSize) - binary.BigEndian.PutUint32(data, 1) - binary.BigEndian.PutUint64(data[SmallHeaderSize:], bi.Size) - } - data[4] = bi.Type[0] - data[5] = bi.Type[1] - data[6] = bi.Type[2] - data[7] = bi.Type[3] - - if _, err := w.Write(data); err != nil { - return nil, err - } - - return &BoxInfo{ - Offset: uint64(offset), - Size: bi.Size - bi.HeaderSize + uint64(len(data)), - HeaderSize: uint64(len(data)), - Type: bi.Type, - ExtendToEOF: bi.ExtendToEOF, - }, nil -} - -// ReadBoxInfo reads common fields which are defined as "Box" class member at ISO/IEC 14496-12. -func ReadBoxInfo(r io.ReadSeeker) (*BoxInfo, error) { - offset, err := r.Seek(0, io.SeekCurrent) - if err != nil { - return nil, err - } - - bi := &BoxInfo{ - Offset: uint64(offset), - } - - // read 8 bytes - buf := bytes.NewBuffer(make([]byte, 0, SmallHeaderSize)) - if _, err := io.CopyN(buf, r, SmallHeaderSize); err != nil { - return nil, err - } - bi.HeaderSize += SmallHeaderSize - - // pick size and type - data := buf.Bytes() - bi.Size = uint64(binary.BigEndian.Uint32(data)) - bi.Type = BoxType{data[4], data[5], data[6], data[7]} - - if bi.Size == 0 { - // box extends to end of file - offsetEOF, err := r.Seek(0, io.SeekEnd) - if err != nil { - return nil, err - } - bi.Size = uint64(offsetEOF) - bi.Offset - bi.ExtendToEOF = true - if _, err := bi.SeekToPayload(r); err != nil { - return nil, err - } - } else if bi.Size == 1 { - // read more 8 bytes - buf.Reset() - if _, err := io.CopyN(buf, r, LargeHeaderSize-SmallHeaderSize); err != nil { - return nil, err - } - bi.HeaderSize += LargeHeaderSize - SmallHeaderSize - bi.Size = binary.BigEndian.Uint64(buf.Bytes()) - } - - if bi.Size == 0 { - return nil, fmt.Errorf("invalid size") - } - - return bi, nil -} - -func (bi *BoxInfo) SeekToStart(s io.Seeker) (int64, error) { - return s.Seek(int64(bi.Offset), io.SeekStart) -} - -func (bi *BoxInfo) SeekToPayload(s io.Seeker) (int64, error) { - return s.Seek(int64(bi.Offset+bi.HeaderSize), io.SeekStart) -} - -func (bi *BoxInfo) SeekToEnd(s io.Seeker) (int64, error) { - return s.Seek(int64(bi.Offset+bi.Size), io.SeekStart) -} diff --git a/vendor/github.com/abema/go-mp4/box_types_3gpp.go b/vendor/github.com/abema/go-mp4/box_types_3gpp.go deleted file mode 100644 index d19640c51..000000000 --- a/vendor/github.com/abema/go-mp4/box_types_3gpp.go +++ /dev/null @@ -1,24 +0,0 @@ -package mp4 - -var udta3GppMetaBoxTypes = []BoxType{ - StrToBoxType("titl"), - StrToBoxType("dscp"), - StrToBoxType("cprt"), - StrToBoxType("perf"), - StrToBoxType("auth"), - StrToBoxType("gnre"), -} - -func init() { - for _, bt := range udta3GppMetaBoxTypes { - AddAnyTypeBoxDefEx(&Udta3GppString{}, bt, isUnderUdta, 0) - } -} - -type Udta3GppString struct { - AnyTypeBox - FullBox `mp4:"0,extend"` - Pad bool `mp4:"1,size=1,hidden"` - Language [3]byte `mp4:"2,size=5,iso639-2"` // ISO-639-2/T language code - Data []byte `mp4:"3,size=8,string"` -} diff --git a/vendor/github.com/abema/go-mp4/box_types_av1.go b/vendor/github.com/abema/go-mp4/box_types_av1.go deleted file mode 100644 index 7b929e72b..000000000 --- a/vendor/github.com/abema/go-mp4/box_types_av1.go +++ /dev/null @@ -1,44 +0,0 @@ -package mp4 - -/*************************** av01 ****************************/ - -// https://aomediacodec.github.io/av1-isobmff - -func BoxTypeAv01() BoxType { return StrToBoxType("av01") } - -func init() { - AddAnyTypeBoxDef(&VisualSampleEntry{}, BoxTypeAv01()) -} - -/*************************** av1C ****************************/ - -// https://aomediacodec.github.io/av1-isobmff - -func BoxTypeAv1C() BoxType { return StrToBoxType("av1C") } - -func init() { - AddBoxDef(&Av1C{}) -} - -type Av1C struct { - Box - Marker uint8 `mp4:"0,size=1,const=1"` - Version uint8 `mp4:"1,size=7,const=1"` - SeqProfile uint8 `mp4:"2,size=3"` - SeqLevelIdx0 uint8 `mp4:"3,size=5"` - SeqTier0 uint8 `mp4:"4,size=1"` - HighBitdepth uint8 `mp4:"5,size=1"` - TwelveBit uint8 `mp4:"6,size=1"` - Monochrome uint8 `mp4:"7,size=1"` - ChromaSubsamplingX uint8 `mp4:"8,size=1"` - ChromaSubsamplingY uint8 `mp4:"9,size=1"` - ChromaSamplePosition uint8 `mp4:"10,size=2"` - Reserved uint8 `mp4:"11,size=3,const=0"` - InitialPresentationDelayPresent uint8 `mp4:"12,size=1"` - InitialPresentationDelayMinusOne uint8 `mp4:"13,size=4"` - ConfigOBUs []uint8 `mp4:"14,size=8"` -} - -func (Av1C) GetType() BoxType { - return BoxTypeAv1C() -} diff --git a/vendor/github.com/abema/go-mp4/box_types_etsi_ts_102_366.go b/vendor/github.com/abema/go-mp4/box_types_etsi_ts_102_366.go deleted file mode 100644 index 7436e1833..000000000 --- a/vendor/github.com/abema/go-mp4/box_types_etsi_ts_102_366.go +++ /dev/null @@ -1,36 +0,0 @@ -package mp4 - -/*************************** ac-3 ****************************/ - -// https://www.etsi.org/deliver/etsi_ts/102300_102399/102366/01.04.01_60/ts_102366v010401p.pdf - -func BoxTypeAC3() BoxType { return StrToBoxType("ac-3") } - -func init() { - AddAnyTypeBoxDef(&AudioSampleEntry{}, BoxTypeAC3()) -} - -/*************************** dac3 ****************************/ - -// https://www.etsi.org/deliver/etsi_ts/102300_102399/102366/01.04.01_60/ts_102366v010401p.pdf - -func BoxTypeDAC3() BoxType { return StrToBoxType("dac3") } - -func init() { - AddBoxDef(&Dac3{}) -} - -type Dac3 struct { - Box - Fscod uint8 `mp4:"0,size=2"` - Bsid uint8 `mp4:"1,size=5"` - Bsmod uint8 `mp4:"2,size=3"` - Acmod uint8 `mp4:"3,size=3"` - LfeOn uint8 `mp4:"4,size=1"` - BitRateCode uint8 `mp4:"5,size=5"` - Reserved uint8 `mp4:"6,size=5,const=0"` -} - -func (Dac3) GetType() BoxType { - return BoxTypeDAC3() -} diff --git a/vendor/github.com/abema/go-mp4/box_types_iso14496_12.go b/vendor/github.com/abema/go-mp4/box_types_iso14496_12.go deleted file mode 100644 index 017fd918d..000000000 --- a/vendor/github.com/abema/go-mp4/box_types_iso14496_12.go +++ /dev/null @@ -1,2460 +0,0 @@ -package mp4 - -import ( - "errors" - "fmt" - "io" - - "github.com/abema/go-mp4/internal/bitio" - "github.com/abema/go-mp4/internal/util" -) - -/*************************** btrt ****************************/ - -func BoxTypeBtrt() BoxType { return StrToBoxType("btrt") } - -func init() { - AddBoxDef(&Btrt{}, 0) -} - -type Btrt struct { - Box - BufferSizeDB uint32 `mp4:"0,size=32"` - MaxBitrate uint32 `mp4:"1,size=32"` - AvgBitrate uint32 `mp4:"2,size=32"` -} - -// GetType returns the BoxType -func (*Btrt) GetType() BoxType { - return BoxTypeBtrt() -} - -/*************************** co64 ****************************/ - -func BoxTypeCo64() BoxType { return StrToBoxType("co64") } - -func init() { - AddBoxDef(&Co64{}, 0) -} - -type Co64 struct { - FullBox `mp4:"0,extend"` - EntryCount uint32 `mp4:"1,size=32"` - ChunkOffset []uint64 `mp4:"2,size=64,len=dynamic"` -} - -// GetType returns the BoxType -func (*Co64) GetType() BoxType { - return BoxTypeCo64() -} - -// GetFieldLength returns length of dynamic field -func (co64 *Co64) GetFieldLength(name string, ctx Context) uint { - switch name { - case "ChunkOffset": - return uint(co64.EntryCount) - } - panic(fmt.Errorf("invalid name of dynamic-length field: boxType=co64 fieldName=%s", name)) -} - -/*************************** colr ****************************/ - -func BoxTypeColr() BoxType { return StrToBoxType("colr") } - -func init() { - AddBoxDef(&Colr{}) -} - -type Colr struct { - Box - ColourType [4]byte `mp4:"0,size=8,string"` - ColourPrimaries uint16 `mp4:"1,size=16,opt=dynamic"` - TransferCharacteristics uint16 `mp4:"2,size=16,opt=dynamic"` - MatrixCoefficients uint16 `mp4:"3,size=16,opt=dynamic"` - FullRangeFlag bool `mp4:"4,size=1,opt=dynamic"` - Reserved uint8 `mp4:"5,size=7,opt=dynamic"` - Profile []byte `mp4:"6,size=8,opt=dynamic"` - Unknown []byte `mp4:"7,size=8,opt=dynamic"` -} - -func (colr *Colr) IsOptFieldEnabled(name string, ctx Context) bool { - switch colr.ColourType { - case [4]byte{'n', 'c', 'l', 'x'}: - switch name { - case "ColourType", - "ColourPrimaries", - "TransferCharacteristics", - "MatrixCoefficients", - "FullRangeFlag", - "Reserved": - return true - default: - return false - } - case [4]byte{'r', 'I', 'C', 'C'}, [4]byte{'p', 'r', 'o', 'f'}: - return name == "Profile" - default: - return name == "Unknown" - } -} - -// GetType returns the BoxType -func (*Colr) GetType() BoxType { - return BoxTypeColr() -} - -/*************************** cslg ****************************/ - -func BoxTypeCslg() BoxType { return StrToBoxType("cslg") } - -func init() { - AddBoxDef(&Cslg{}, 0, 1) -} - -type Cslg struct { - FullBox `mp4:"0,extend"` - CompositionToDTSShiftV0 int32 `mp4:"1,size=32,ver=0"` - LeastDecodeToDisplayDeltaV0 int32 `mp4:"2,size=32,ver=0"` - GreatestDecodeToDisplayDeltaV0 int32 `mp4:"3,size=32,ver=0"` - CompositionStartTimeV0 int32 `mp4:"4,size=32,ver=0"` - CompositionEndTimeV0 int32 `mp4:"5,size=32,ver=0"` - CompositionToDTSShiftV1 int64 `mp4:"6,size=64,nver=0"` - LeastDecodeToDisplayDeltaV1 int64 `mp4:"7,size=64,nver=0"` - GreatestDecodeToDisplayDeltaV1 int64 `mp4:"8,size=64,nver=0"` - CompositionStartTimeV1 int64 `mp4:"9,size=64,nver=0"` - CompositionEndTimeV1 int64 `mp4:"10,size=64,nver=0"` -} - -// GetType returns the BoxType -func (*Cslg) GetType() BoxType { - return BoxTypeCslg() -} - -func (cslg *Cslg) GetCompositionToDTSShift() int64 { - switch cslg.GetVersion() { - case 0: - return int64(cslg.CompositionToDTSShiftV0) - case 1: - return cslg.CompositionToDTSShiftV1 - default: - return 0 - } -} - -func (cslg *Cslg) GetLeastDecodeToDisplayDelta() int64 { - switch cslg.GetVersion() { - case 0: - return int64(cslg.LeastDecodeToDisplayDeltaV0) - case 1: - return cslg.LeastDecodeToDisplayDeltaV1 - default: - return 0 - } -} - -func (cslg *Cslg) GetGreatestDecodeToDisplayDelta() int64 { - switch cslg.GetVersion() { - case 0: - return int64(cslg.GreatestDecodeToDisplayDeltaV0) - case 1: - return cslg.GreatestDecodeToDisplayDeltaV1 - default: - return 0 - } -} - -func (cslg *Cslg) GetCompositionStartTime() int64 { - switch cslg.GetVersion() { - case 0: - return int64(cslg.CompositionStartTimeV0) - case 1: - return cslg.CompositionStartTimeV1 - default: - return 0 - } -} - -func (cslg *Cslg) GetCompositionEndTime() int64 { - switch cslg.GetVersion() { - case 0: - return int64(cslg.CompositionEndTimeV0) - case 1: - return cslg.CompositionEndTimeV1 - default: - return 0 - } -} - -/*************************** ctts ****************************/ - -func BoxTypeCtts() BoxType { return StrToBoxType("ctts") } - -func init() { - AddBoxDef(&Ctts{}, 0, 1) -} - -type Ctts struct { - FullBox `mp4:"0,extend"` - EntryCount uint32 `mp4:"1,size=32"` - Entries []CttsEntry `mp4:"2,len=dynamic,size=64"` -} - -type CttsEntry struct { - SampleCount uint32 `mp4:"0,size=32"` - SampleOffsetV0 uint32 `mp4:"1,size=32,ver=0"` - SampleOffsetV1 int32 `mp4:"2,size=32,ver=1"` -} - -// GetType returns the BoxType -func (*Ctts) GetType() BoxType { - return BoxTypeCtts() -} - -// GetFieldLength returns length of dynamic field -func (ctts *Ctts) GetFieldLength(name string, ctx Context) uint { - switch name { - case "Entries": - return uint(ctts.EntryCount) - } - panic(fmt.Errorf("invalid name of dynamic-length field: boxType=ctts fieldName=%s", name)) -} - -func (ctts *Ctts) GetSampleOffset(index int) int64 { - switch ctts.GetVersion() { - case 0: - return int64(ctts.Entries[index].SampleOffsetV0) - case 1: - return int64(ctts.Entries[index].SampleOffsetV1) - default: - return 0 - } -} - -/*************************** dinf ****************************/ - -func BoxTypeDinf() BoxType { return StrToBoxType("dinf") } - -func init() { - AddBoxDef(&Dinf{}) -} - -// Dinf is ISOBMFF dinf box type -type Dinf struct { - Box -} - -// GetType returns the BoxType -func (*Dinf) GetType() BoxType { - return BoxTypeDinf() -} - -/*************************** dref ****************************/ - -func BoxTypeDref() BoxType { return StrToBoxType("dref") } -func BoxTypeUrl() BoxType { return StrToBoxType("url ") } -func BoxTypeUrn() BoxType { return StrToBoxType("urn ") } - -func init() { - AddBoxDef(&Dref{}, 0) - AddBoxDef(&Url{}, 0) - AddBoxDef(&Urn{}, 0) -} - -// Dref is ISOBMFF dref box type -type Dref struct { - FullBox `mp4:"0,extend"` - EntryCount uint32 `mp4:"1,size=32"` -} - -// GetType returns the BoxType -func (*Dref) GetType() BoxType { - return BoxTypeDref() -} - -type Url struct { - FullBox `mp4:"0,extend"` - Location string `mp4:"1,string,nopt=0x000001"` -} - -func (*Url) GetType() BoxType { - return BoxTypeUrl() -} - -const UrlSelfContained = 0x000001 - -type Urn struct { - FullBox `mp4:"0,extend"` - Name string `mp4:"1,string,nopt=0x000001"` - Location string `mp4:"2,string,nopt=0x000001"` -} - -func (*Urn) GetType() BoxType { - return BoxTypeUrn() -} - -const UrnSelfContained = 0x000001 - -/*************************** edts ****************************/ - -func BoxTypeEdts() BoxType { return StrToBoxType("edts") } - -func init() { - AddBoxDef(&Edts{}) -} - -// Edts is ISOBMFF edts box type -type Edts struct { - Box -} - -// GetType returns the BoxType -func (*Edts) GetType() BoxType { - return BoxTypeEdts() -} - -/*************************** elst ****************************/ - -func BoxTypeElst() BoxType { return StrToBoxType("elst") } - -func init() { - AddBoxDef(&Elst{}, 0, 1) -} - -// Elst is ISOBMFF elst box type -type Elst struct { - FullBox `mp4:"0,extend"` - EntryCount uint32 `mp4:"1,size=32"` - Entries []ElstEntry `mp4:"2,len=dynamic,size=dynamic"` -} - -type ElstEntry struct { - SegmentDurationV0 uint32 `mp4:"0,size=32,ver=0"` - MediaTimeV0 int32 `mp4:"1,size=32,ver=0"` - SegmentDurationV1 uint64 `mp4:"2,size=64,ver=1"` - MediaTimeV1 int64 `mp4:"3,size=64,ver=1"` - MediaRateInteger int16 `mp4:"4,size=16"` - MediaRateFraction int16 `mp4:"5,size=16,const=0"` -} - -// GetType returns the BoxType -func (*Elst) GetType() BoxType { - return BoxTypeElst() -} - -// GetFieldSize returns size of dynamic field -func (elst *Elst) GetFieldSize(name string, ctx Context) uint { - switch name { - case "Entries": - switch elst.GetVersion() { - case 0: - return 0 + - /* segmentDurationV0 */ 32 + - /* mediaTimeV0 */ 32 + - /* mediaRateInteger */ 16 + - /* mediaRateFraction */ 16 - case 1: - return 0 + - /* segmentDurationV1 */ 64 + - /* mediaTimeV1 */ 64 + - /* mediaRateInteger */ 16 + - /* mediaRateFraction */ 16 - } - } - panic(fmt.Errorf("invalid name of dynamic-size field: boxType=elst fieldName=%s", name)) -} - -// GetFieldLength returns length of dynamic field -func (elst *Elst) GetFieldLength(name string, ctx Context) uint { - switch name { - case "Entries": - return uint(elst.EntryCount) - } - panic(fmt.Errorf("invalid name of dynamic-length field: boxType=elst fieldName=%s", name)) -} - -func (elst *Elst) GetSegmentDuration(index int) uint64 { - switch elst.GetVersion() { - case 0: - return uint64(elst.Entries[index].SegmentDurationV0) - case 1: - return elst.Entries[index].SegmentDurationV1 - default: - return 0 - } -} - -func (elst *Elst) GetMediaTime(index int) int64 { - switch elst.GetVersion() { - case 0: - return int64(elst.Entries[index].MediaTimeV0) - case 1: - return elst.Entries[index].MediaTimeV1 - default: - return 0 - } -} - -/*************************** emsg ****************************/ - -func BoxTypeEmsg() BoxType { return StrToBoxType("emsg") } - -func init() { - AddBoxDef(&Emsg{}, 0, 1) -} - -// Emsg is ISOBMFF emsg box type -type Emsg struct { - FullBox `mp4:"0,extend"` - SchemeIdUri string `mp4:"1,string"` - Value string `mp4:"2,string"` - Timescale uint32 `mp4:"3,size=32"` - PresentationTimeDelta uint32 `mp4:"4,size=32,ver=0"` - PresentationTime uint64 `mp4:"5,size=64,ver=1"` - EventDuration uint32 `mp4:"6,size=32"` - Id uint32 `mp4:"7,size=32"` - MessageData []byte `mp4:"8,size=8,string"` -} - -func (emsg *Emsg) OnReadField(name string, r bitio.ReadSeeker, leftBits uint64, ctx Context) (rbits uint64, override bool, err error) { - if emsg.GetVersion() == 0 { - return - } - switch name { - case "SchemeIdUri", "Value": - override = true - return - case "MessageData": - emsg.SchemeIdUri, err = util.ReadString(r) - if err != nil { - return - } - emsg.Value, err = util.ReadString(r) - if err != nil { - return - } - rbits += uint64(len(emsg.SchemeIdUri)+len(emsg.Value)+2) * 8 - return - default: - return - } -} - -func (emsg *Emsg) OnWriteField(name string, w bitio.Writer, ctx Context) (wbits uint64, override bool, err error) { - if emsg.GetVersion() == 0 { - return - } - switch name { - case "SchemeIdUri", "Value": - override = true - return - case "MessageData": - if err = util.WriteString(w, emsg.SchemeIdUri); err != nil { - return - } - if err = util.WriteString(w, emsg.Value); err != nil { - return - } - wbits += uint64(len(emsg.SchemeIdUri)+len(emsg.Value)+2) * 8 - return - default: - return - } -} - -// GetType returns the BoxType -func (*Emsg) GetType() BoxType { - return BoxTypeEmsg() -} - -/*************************** fiel ****************************/ - -func BoxTypeFiel() BoxType { return StrToBoxType("fiel") } - -func init() { - AddBoxDef(&Fiel{}) -} - -type Fiel struct { - Box - FieldCount uint8 `mp4:"0,size=8"` - FieldOrdering uint8 `mp4:"1,size=8"` -} - -func (Fiel) GetType() BoxType { - return BoxTypeFiel() -} - -/************************ free, skip *************************/ - -func BoxTypeFree() BoxType { return StrToBoxType("free") } -func BoxTypeSkip() BoxType { return StrToBoxType("skip") } - -func init() { - AddBoxDef(&Free{}) - AddBoxDef(&Skip{}) -} - -type FreeSpace struct { - Box - Data []uint8 `mp4:"0,size=8"` -} - -type Free FreeSpace - -func (*Free) GetType() BoxType { - return BoxTypeFree() -} - -type Skip FreeSpace - -func (*Skip) GetType() BoxType { - return BoxTypeSkip() -} - -/*************************** frma ****************************/ - -func BoxTypeFrma() BoxType { return StrToBoxType("frma") } - -func init() { - AddBoxDef(&Frma{}) -} - -// Frma is ISOBMFF frma box type -type Frma struct { - Box - DataFormat [4]byte `mp4:"0,size=8,string"` -} - -// GetType returns the BoxType -func (*Frma) GetType() BoxType { - return BoxTypeFrma() -} - -/*************************** ftyp ****************************/ - -func BoxTypeFtyp() BoxType { return StrToBoxType("ftyp") } - -func init() { - AddBoxDef(&Ftyp{}) -} - -func BrandQT() [4]byte { return [4]byte{'q', 't', ' ', ' '} } -func BrandISOM() [4]byte { return [4]byte{'i', 's', 'o', 'm'} } -func BrandISO2() [4]byte { return [4]byte{'i', 's', 'o', '2'} } -func BrandISO3() [4]byte { return [4]byte{'i', 's', 'o', '3'} } -func BrandISO4() [4]byte { return [4]byte{'i', 's', 'o', '4'} } -func BrandISO5() [4]byte { return [4]byte{'i', 's', 'o', '5'} } -func BrandISO6() [4]byte { return [4]byte{'i', 's', 'o', '6'} } -func BrandISO7() [4]byte { return [4]byte{'i', 's', 'o', '7'} } -func BrandISO8() [4]byte { return [4]byte{'i', 's', 'o', '8'} } -func BrandISO9() [4]byte { return [4]byte{'i', 's', 'o', '9'} } -func BrandAVC1() [4]byte { return [4]byte{'a', 'v', 'c', '1'} } -func BrandMP41() [4]byte { return [4]byte{'m', 'p', '4', '1'} } -func BrandMP71() [4]byte { return [4]byte{'m', 'p', '7', '1'} } - -// Ftyp is ISOBMFF ftyp box type -type Ftyp struct { - Box - MajorBrand [4]byte `mp4:"0,size=8,string"` - MinorVersion uint32 `mp4:"1,size=32"` - CompatibleBrands []CompatibleBrandElem `mp4:"2,size=32"` // reach to end of the box -} - -type CompatibleBrandElem struct { - CompatibleBrand [4]byte `mp4:"0,size=8,string"` -} - -func (ftyp *Ftyp) AddCompatibleBrand(cb [4]byte) { - if !ftyp.HasCompatibleBrand(cb) { - ftyp.CompatibleBrands = append(ftyp.CompatibleBrands, CompatibleBrandElem{ - CompatibleBrand: cb, - }) - } -} - -func (ftyp *Ftyp) RemoveCompatibleBrand(cb [4]byte) { - for i := 0; i < len(ftyp.CompatibleBrands); { - if ftyp.CompatibleBrands[i].CompatibleBrand != cb { - i++ - continue - } - ftyp.CompatibleBrands[i] = ftyp.CompatibleBrands[len(ftyp.CompatibleBrands)-1] - ftyp.CompatibleBrands = ftyp.CompatibleBrands[:len(ftyp.CompatibleBrands)-1] - } -} - -func (ftyp *Ftyp) HasCompatibleBrand(cb [4]byte) bool { - for i := range ftyp.CompatibleBrands { - if ftyp.CompatibleBrands[i].CompatibleBrand == cb { - return true - } - } - return false -} - -// GetType returns the BoxType -func (*Ftyp) GetType() BoxType { - return BoxTypeFtyp() -} - -/*************************** hdlr ****************************/ - -func BoxTypeHdlr() BoxType { return StrToBoxType("hdlr") } - -func init() { - AddBoxDef(&Hdlr{}, 0) -} - -// Hdlr is ISOBMFF hdlr box type -type Hdlr struct { - FullBox `mp4:"0,extend"` - // Predefined corresponds to component_type of QuickTime. - // pre_defined of ISO-14496 has always zero, - // however component_type has "mhlr" or "dhlr". - PreDefined uint32 `mp4:"1,size=32"` - HandlerType [4]byte `mp4:"2,size=8,string"` - Reserved [3]uint32 `mp4:"3,size=32,const=0"` - Name string `mp4:"4,string"` -} - -// GetType returns the BoxType -func (*Hdlr) GetType() BoxType { - return BoxTypeHdlr() -} - -func (hdlr *Hdlr) OnReadField(name string, r bitio.ReadSeeker, leftBits uint64, ctx Context) (rbits uint64, override bool, err error) { - switch name { - case "Name": - return hdlr.OnReadName(r, leftBits, ctx) - default: - return 0, false, nil - } -} - -func (hdlr *Hdlr) OnReadName(r bitio.ReadSeeker, leftBits uint64, ctx Context) (rbits uint64, override bool, err error) { - size := leftBits / 8 - if size == 0 { - hdlr.Name = "" - return 0, true, nil - } - - if !readerHasSize(r, size) { - return 0, false, fmt.Errorf("not enough bits") - } - - buf := make([]byte, size) - if _, err := io.ReadFull(r, buf); err != nil { - return 0, false, err - } - - plen := buf[0] - if hdlr.PreDefined != 0 && size >= 2 && size == uint64(plen+1) { - // Pascal-style String - hdlr.Name = string(buf[1 : plen+1]) - } else { - // C-style String - clen := 0 - for _, c := range buf { - if c == 0x00 { - break - } - clen++ - } - hdlr.Name = string(buf[:clen]) - } - return leftBits, true, nil -} - -/*************************** hvcC ****************************/ - -func BoxTypeHvcC() BoxType { return StrToBoxType("hvcC") } - -func init() { - AddBoxDef(&HvcC{}) -} - -type HEVCNalu struct { - BaseCustomFieldObject - Length uint16 `mp4:"0,size=16"` - NALUnit []byte `mp4:"1,size=8,len=dynamic"` -} - -func (s HEVCNalu) GetFieldLength(name string, ctx Context) uint { - switch name { - case "NALUnit": - return uint(s.Length) - } - return 0 -} - -type HEVCNaluArray struct { - BaseCustomFieldObject - Completeness bool `mp4:"0,size=1"` - Reserved bool `mp4:"1,size=1"` - NaluType uint8 `mp4:"2,size=6"` - NumNalus uint16 `mp4:"3,size=16"` - Nalus []HEVCNalu `mp4:"4,len=dynamic"` -} - -func (a HEVCNaluArray) GetFieldLength(name string, ctx Context) uint { - switch name { - case "Nalus": - return uint(a.NumNalus) - } - return 0 -} - -type HvcC struct { - Box - ConfigurationVersion uint8 `mp4:"0,size=8"` - GeneralProfileSpace uint8 `mp4:"1,size=2"` - GeneralTierFlag bool `mp4:"2,size=1"` - GeneralProfileIdc uint8 `mp4:"3,size=5"` - GeneralProfileCompatibility [32]bool `mp4:"4,size=1"` - GeneralConstraintIndicator [6]uint8 `mp4:"5,size=8"` - GeneralLevelIdc uint8 `mp4:"6,size=8"` - Reserved1 uint8 `mp4:"7,size=4,const=15"` - MinSpatialSegmentationIdc uint16 `mp4:"8,size=12"` - Reserved2 uint8 `mp4:"9,size=6,const=63"` - ParallelismType uint8 `mp4:"10,size=2"` - Reserved3 uint8 `mp4:"11,size=6,const=63"` - ChromaFormatIdc uint8 `mp4:"12,size=2"` - Reserved4 uint8 `mp4:"13,size=5,const=31"` - BitDepthLumaMinus8 uint8 `mp4:"14,size=3"` - Reserved5 uint8 `mp4:"15,size=5,const=31"` - BitDepthChromaMinus8 uint8 `mp4:"16,size=3"` - AvgFrameRate uint16 `mp4:"17,size=16"` - ConstantFrameRate uint8 `mp4:"18,size=2"` - NumTemporalLayers uint8 `mp4:"19,size=2"` - TemporalIdNested uint8 `mp4:"20,size=2"` - LengthSizeMinusOne uint8 `mp4:"21,size=2"` - NumOfNaluArrays uint8 `mp4:"22,size=8"` - NaluArrays []HEVCNaluArray `mp4:"23,len=dynamic"` -} - -func (HvcC) GetType() BoxType { - return BoxTypeHvcC() -} - -func (hvcc HvcC) GetFieldLength(name string, ctx Context) uint { - switch name { - case "NaluArrays": - return uint(hvcc.NumOfNaluArrays) - } - return 0 -} - -/*************************** mdat ****************************/ - -func BoxTypeMdat() BoxType { return StrToBoxType("mdat") } - -func init() { - AddBoxDef(&Mdat{}) -} - -// Mdat is ISOBMFF mdat box type -type Mdat struct { - Box - Data []byte `mp4:"0,size=8"` -} - -// GetType returns the BoxType -func (*Mdat) GetType() BoxType { - return BoxTypeMdat() -} - -/*************************** mdhd ****************************/ - -func BoxTypeMdhd() BoxType { return StrToBoxType("mdhd") } - -func init() { - AddBoxDef(&Mdhd{}, 0, 1) -} - -// Mdhd is ISOBMFF mdhd box type -type Mdhd struct { - FullBox `mp4:"0,extend"` - CreationTimeV0 uint32 `mp4:"1,size=32,ver=0"` - ModificationTimeV0 uint32 `mp4:"2,size=32,ver=0"` - CreationTimeV1 uint64 `mp4:"3,size=64,ver=1"` - ModificationTimeV1 uint64 `mp4:"4,size=64,ver=1"` - Timescale uint32 `mp4:"5,size=32"` - DurationV0 uint32 `mp4:"6,size=32,ver=0"` - DurationV1 uint64 `mp4:"7,size=64,ver=1"` - // - Pad bool `mp4:"8,size=1,hidden"` - Language [3]byte `mp4:"9,size=5,iso639-2"` // ISO-639-2/T language code - PreDefined uint16 `mp4:"10,size=16"` -} - -// GetType returns the BoxType -func (*Mdhd) GetType() BoxType { - return BoxTypeMdhd() -} - -func (mdhd *Mdhd) GetCreationTime() uint64 { - switch mdhd.GetVersion() { - case 0: - return uint64(mdhd.CreationTimeV0) - case 1: - return mdhd.CreationTimeV1 - default: - return 0 - } -} - -func (mdhd *Mdhd) GetModificationTime() uint64 { - switch mdhd.GetVersion() { - case 0: - return uint64(mdhd.ModificationTimeV0) - case 1: - return mdhd.ModificationTimeV1 - default: - return 0 - } -} - -func (mdhd *Mdhd) GetDuration() uint64 { - switch mdhd.GetVersion() { - case 0: - return uint64(mdhd.DurationV0) - case 1: - return mdhd.DurationV1 - default: - return 0 - } -} - -/*************************** mdia ****************************/ - -func BoxTypeMdia() BoxType { return StrToBoxType("mdia") } - -func init() { - AddBoxDef(&Mdia{}) -} - -// Mdia is ISOBMFF mdia box type -type Mdia struct { - Box -} - -// GetType returns the BoxType -func (*Mdia) GetType() BoxType { - return BoxTypeMdia() -} - -/*************************** mehd ****************************/ - -func BoxTypeMehd() BoxType { return StrToBoxType("mehd") } - -func init() { - AddBoxDef(&Mehd{}, 0, 1) -} - -// Mehd is ISOBMFF mehd box type -type Mehd struct { - FullBox `mp4:"0,extend"` - FragmentDurationV0 uint32 `mp4:"1,size=32,ver=0"` - FragmentDurationV1 uint64 `mp4:"2,size=64,ver=1"` -} - -// GetType returns the BoxType -func (*Mehd) GetType() BoxType { - return BoxTypeMehd() -} - -func (mdhd *Mehd) GetFragmentDuration() uint64 { - switch mdhd.GetVersion() { - case 0: - return uint64(mdhd.FragmentDurationV0) - case 1: - return mdhd.FragmentDurationV1 - default: - return 0 - } -} - -/*************************** meta ****************************/ - -func BoxTypeMeta() BoxType { return StrToBoxType("meta") } - -func init() { - AddBoxDef(&Meta{}, 0) -} - -// Meta is ISOBMFF meta box type -type Meta struct { - FullBox `mp4:"0,extend"` -} - -// GetType returns the BoxType -func (*Meta) GetType() BoxType { - return BoxTypeMeta() -} - -func (meta *Meta) BeforeUnmarshal(r io.ReadSeeker, size uint64, ctx Context) (n uint64, override bool, err error) { - // for Apple Quick Time - buf := make([]byte, 4) - if _, err := io.ReadFull(r, buf); err != nil { - return 0, false, err - } - if _, err := r.Seek(-int64(len(buf)), io.SeekCurrent); err != nil { - return 0, false, err - } - if buf[0]|buf[1]|buf[2]|buf[3] != 0x00 { - meta.Version = 0 - meta.Flags = [3]byte{0, 0, 0} - return 0, true, nil - } - return 0, false, nil -} - -/*************************** mfhd ****************************/ - -func BoxTypeMfhd() BoxType { return StrToBoxType("mfhd") } - -func init() { - AddBoxDef(&Mfhd{}, 0) -} - -// Mfhd is ISOBMFF mfhd box type -type Mfhd struct { - FullBox `mp4:"0,extend"` - SequenceNumber uint32 `mp4:"1,size=32"` -} - -// GetType returns the BoxType -func (*Mfhd) GetType() BoxType { - return BoxTypeMfhd() -} - -/*************************** mfra ****************************/ - -func BoxTypeMfra() BoxType { return StrToBoxType("mfra") } - -func init() { - AddBoxDef(&Mfra{}) -} - -// Mfra is ISOBMFF mfra box type -type Mfra struct { - Box -} - -// GetType returns the BoxType -func (*Mfra) GetType() BoxType { - return BoxTypeMfra() -} - -/*************************** mfro ****************************/ - -func BoxTypeMfro() BoxType { return StrToBoxType("mfro") } - -func init() { - AddBoxDef(&Mfro{}, 0) -} - -// Mfro is ISOBMFF mfro box type -type Mfro struct { - FullBox `mp4:"0,extend"` - Size uint32 `mp4:"1,size=32"` -} - -// GetType returns the BoxType -func (*Mfro) GetType() BoxType { - return BoxTypeMfro() -} - -/*************************** minf ****************************/ - -func BoxTypeMinf() BoxType { return StrToBoxType("minf") } - -func init() { - AddBoxDef(&Minf{}) -} - -// Minf is ISOBMFF minf box type -type Minf struct { - Box -} - -// GetType returns the BoxType -func (*Minf) GetType() BoxType { - return BoxTypeMinf() -} - -/*************************** moof ****************************/ - -func BoxTypeMoof() BoxType { return StrToBoxType("moof") } - -func init() { - AddBoxDef(&Moof{}) -} - -// Moof is ISOBMFF moof box type -type Moof struct { - Box -} - -// GetType returns the BoxType -func (*Moof) GetType() BoxType { - return BoxTypeMoof() -} - -/*************************** moov ****************************/ - -func BoxTypeMoov() BoxType { return StrToBoxType("moov") } - -func init() { - AddBoxDef(&Moov{}) -} - -// Moov is ISOBMFF moov box type -type Moov struct { - Box -} - -// GetType returns the BoxType -func (*Moov) GetType() BoxType { - return BoxTypeMoov() -} - -/*************************** mvex ****************************/ - -func BoxTypeMvex() BoxType { return StrToBoxType("mvex") } - -func init() { - AddBoxDef(&Mvex{}) -} - -// Mvex is ISOBMFF mvex box type -type Mvex struct { - Box -} - -// GetType returns the BoxType -func (*Mvex) GetType() BoxType { - return BoxTypeMvex() -} - -/*************************** mvhd ****************************/ - -func BoxTypeMvhd() BoxType { return StrToBoxType("mvhd") } - -func init() { - AddBoxDef(&Mvhd{}, 0, 1) -} - -// Mvhd is ISOBMFF mvhd box type -type Mvhd struct { - FullBox `mp4:"0,extend"` - CreationTimeV0 uint32 `mp4:"1,size=32,ver=0"` - ModificationTimeV0 uint32 `mp4:"2,size=32,ver=0"` - CreationTimeV1 uint64 `mp4:"3,size=64,ver=1"` - ModificationTimeV1 uint64 `mp4:"4,size=64,ver=1"` - Timescale uint32 `mp4:"5,size=32"` - DurationV0 uint32 `mp4:"6,size=32,ver=0"` - DurationV1 uint64 `mp4:"7,size=64,ver=1"` - Rate int32 `mp4:"8,size=32"` // fixed-point 16.16 - template=0x00010000 - Volume int16 `mp4:"9,size=16"` // template=0x0100 - Reserved int16 `mp4:"10,size=16,const=0"` - Reserved2 [2]uint32 `mp4:"11,size=32,const=0"` - Matrix [9]int32 `mp4:"12,size=32,hex"` // template={ 0x00010000,0,0,0,0x00010000,0,0,0,0x40000000 } - PreDefined [6]int32 `mp4:"13,size=32"` - NextTrackID uint32 `mp4:"14,size=32"` -} - -// GetType returns the BoxType -func (*Mvhd) GetType() BoxType { - return BoxTypeMvhd() -} - -// StringifyField returns field value as string -func (mvhd *Mvhd) StringifyField(name string, indent string, depth int, ctx Context) (string, bool) { - switch name { - case "Rate": - return util.FormatSignedFixedFloat1616(mvhd.Rate), true - default: - return "", false - } -} - -func (mvhd *Mvhd) GetCreationTime() uint64 { - switch mvhd.GetVersion() { - case 0: - return uint64(mvhd.CreationTimeV0) - case 1: - return mvhd.CreationTimeV1 - default: - return 0 - } -} - -func (mvhd *Mvhd) GetModificationTime() uint64 { - switch mvhd.GetVersion() { - case 0: - return uint64(mvhd.ModificationTimeV0) - case 1: - return mvhd.ModificationTimeV1 - default: - return 0 - } -} - -func (mvhd *Mvhd) GetDuration() uint64 { - switch mvhd.GetVersion() { - case 0: - return uint64(mvhd.DurationV0) - case 1: - return mvhd.DurationV1 - default: - return 0 - } -} - -// GetRate returns value of rate as float64 -func (mvhd *Mvhd) GetRate() float64 { - return float64(mvhd.Rate) / (1 << 16) -} - -// GetRateInt returns value of rate as int16 -func (mvhd *Mvhd) GetRateInt() int16 { - return int16(mvhd.Rate >> 16) -} - -/*************************** saio ****************************/ - -func BoxTypeSaio() BoxType { return StrToBoxType("saio") } - -func init() { - AddBoxDef(&Saio{}, 0, 1) -} - -type Saio struct { - FullBox `mp4:"0,extend"` - AuxInfoType [4]byte `mp4:"1,size=8,opt=0x000001,string"` - AuxInfoTypeParameter uint32 `mp4:"2,size=32,opt=0x000001,hex"` - EntryCount uint32 `mp4:"3,size=32"` - OffsetV0 []uint32 `mp4:"4,size=32,ver=0,len=dynamic"` - OffsetV1 []uint64 `mp4:"5,size=64,nver=0,len=dynamic"` -} - -func (saio *Saio) GetFieldLength(name string, ctx Context) uint { - switch name { - case "OffsetV0", "OffsetV1": - return uint(saio.EntryCount) - } - panic(fmt.Errorf("invalid name of dynamic-length field: boxType=saio fieldName=%s", name)) -} - -func (*Saio) GetType() BoxType { - return BoxTypeSaio() -} - -func (saio *Saio) GetOffset(index int) uint64 { - switch saio.GetVersion() { - case 0: - return uint64(saio.OffsetV0[index]) - case 1: - return saio.OffsetV1[index] - default: - return 0 - } -} - -/*************************** saiz ****************************/ - -func BoxTypeSaiz() BoxType { return StrToBoxType("saiz") } - -func init() { - AddBoxDef(&Saiz{}, 0) -} - -type Saiz struct { - FullBox `mp4:"0,extend"` - AuxInfoType [4]byte `mp4:"1,size=8,opt=0x000001,string"` - AuxInfoTypeParameter uint32 `mp4:"2,size=32,opt=0x000001,hex"` - DefaultSampleInfoSize uint8 `mp4:"3,size=8,dec"` - SampleCount uint32 `mp4:"4,size=32"` - SampleInfoSize []uint8 `mp4:"5,size=8,opt=dynamic,len=dynamic,dec"` -} - -func (saiz *Saiz) IsOptFieldEnabled(name string, ctx Context) bool { - switch name { - case "SampleInfoSize": - return saiz.DefaultSampleInfoSize == 0 - } - return false -} - -func (saiz *Saiz) GetFieldLength(name string, ctx Context) uint { - switch name { - case "SampleInfoSize": - return uint(saiz.SampleCount) - } - panic(fmt.Errorf("invalid name of dynamic-length field: boxType=saiz fieldName=%s", name)) -} - -func (*Saiz) GetType() BoxType { - return BoxTypeSaiz() -} - -/*********************** SampleEntry *************************/ - -func BoxTypeMp4v() BoxType { return StrToBoxType("mp4v") } -func BoxTypeAvc1() BoxType { return StrToBoxType("avc1") } -func BoxTypeEncv() BoxType { return StrToBoxType("encv") } -func BoxTypeHev1() BoxType { return StrToBoxType("hev1") } -func BoxTypeHvc1() BoxType { return StrToBoxType("hvc1") } -func BoxTypeMp4a() BoxType { return StrToBoxType("mp4a") } -func BoxTypeEnca() BoxType { return StrToBoxType("enca") } -func BoxTypeAvcC() BoxType { return StrToBoxType("avcC") } -func BoxTypePasp() BoxType { return StrToBoxType("pasp") } - -func init() { - AddAnyTypeBoxDef(&VisualSampleEntry{}, BoxTypeMp4v()) - AddAnyTypeBoxDef(&VisualSampleEntry{}, BoxTypeAvc1()) - AddAnyTypeBoxDef(&VisualSampleEntry{}, BoxTypeEncv()) - AddAnyTypeBoxDef(&VisualSampleEntry{}, BoxTypeHev1()) - AddAnyTypeBoxDef(&VisualSampleEntry{}, BoxTypeHvc1()) - AddAnyTypeBoxDef(&AudioSampleEntry{}, BoxTypeMp4a()) - AddAnyTypeBoxDef(&AudioSampleEntry{}, BoxTypeEnca()) - AddAnyTypeBoxDef(&AVCDecoderConfiguration{}, BoxTypeAvcC()) - AddAnyTypeBoxDef(&PixelAspectRatioBox{}, BoxTypePasp()) -} - -type SampleEntry struct { - AnyTypeBox - Reserved [6]uint8 `mp4:"0,size=8,const=0"` - DataReferenceIndex uint16 `mp4:"1,size=16"` -} - -type VisualSampleEntry struct { - SampleEntry `mp4:"0,extend"` - PreDefined uint16 `mp4:"1,size=16"` - Reserved uint16 `mp4:"2,size=16,const=0"` - PreDefined2 [3]uint32 `mp4:"3,size=32"` - Width uint16 `mp4:"4,size=16"` - Height uint16 `mp4:"5,size=16"` - Horizresolution uint32 `mp4:"6,size=32"` - Vertresolution uint32 `mp4:"7,size=32"` - Reserved2 uint32 `mp4:"8,size=32,const=0"` - FrameCount uint16 `mp4:"9,size=16"` - Compressorname [32]byte `mp4:"10,size=8"` - Depth uint16 `mp4:"11,size=16"` - PreDefined3 int16 `mp4:"12,size=16"` -} - -// StringifyField returns field value as string -func (vse *VisualSampleEntry) StringifyField(name string, indent string, depth int, ctx Context) (string, bool) { - switch name { - case "Compressorname": - if vse.Compressorname[0] <= 31 { - return `"` + util.EscapeUnprintables(string(vse.Compressorname[1:vse.Compressorname[0]+1])) + `"`, true - } - return "", false - default: - return "", false - } -} - -type AudioSampleEntry struct { - SampleEntry `mp4:"0,extend,opt=dynamic"` - EntryVersion uint16 `mp4:"1,size=16,opt=dynamic"` - Reserved [3]uint16 `mp4:"2,size=16,opt=dynamic,const=0"` - ChannelCount uint16 `mp4:"3,size=16,opt=dynamic"` - SampleSize uint16 `mp4:"4,size=16,opt=dynamic"` - PreDefined uint16 `mp4:"5,size=16,opt=dynamic"` - Reserved2 uint16 `mp4:"6,size=16,opt=dynamic,const=0"` - SampleRate uint32 `mp4:"7,size=32,opt=dynamic"` // fixed-point 16.16 - QuickTimeData []byte `mp4:"8,size=8,opt=dynamic,len=dynamic"` -} - -func (ase *AudioSampleEntry) IsOptFieldEnabled(name string, ctx Context) bool { - if name == "QuickTimeData" { - return ctx.IsQuickTimeCompatible && (ctx.UnderWave || ase.EntryVersion == 1 || ase.EntryVersion == 2) - } - if ctx.IsQuickTimeCompatible && ctx.UnderWave { - return false - } - return true -} - -func (ase *AudioSampleEntry) GetFieldLength(name string, ctx Context) uint { - if name == "QuickTimeData" && ctx.IsQuickTimeCompatible { - if ctx.UnderWave { - return LengthUnlimited - } else if ase.EntryVersion == 1 { - return 16 - } else if ase.EntryVersion == 2 { - return 36 - } - } - return 0 -} - -// StringifyField returns field value as string -func (ase *AudioSampleEntry) StringifyField(name string, indent string, depth int, ctx Context) (string, bool) { - switch name { - case "SampleRate": - return util.FormatUnsignedFixedFloat1616(ase.SampleRate), true - default: - return "", false - } -} - -func (ase *AudioSampleEntry) GetSampleRate() float64 { - return float64(ase.SampleRate) / (1 << 16) -} - -func (ase *AudioSampleEntry) GetSampleRateInt() uint16 { - return uint16(ase.SampleRate >> 16) -} - -const ( - AVCBaselineProfile uint8 = 66 // 0x42 - AVCMainProfile uint8 = 77 // 0x4d - AVCExtendedProfile uint8 = 88 // 0x58 - AVCHighProfile uint8 = 100 // 0x64 - AVCHigh10Profile uint8 = 110 // 0x6e - AVCHigh422Profile uint8 = 122 // 0x7a -) - -type AVCDecoderConfiguration struct { - AnyTypeBox - ConfigurationVersion uint8 `mp4:"0,size=8"` - Profile uint8 `mp4:"1,size=8"` - ProfileCompatibility uint8 `mp4:"2,size=8"` - Level uint8 `mp4:"3,size=8"` - Reserved uint8 `mp4:"4,size=6,const=63"` - LengthSizeMinusOne uint8 `mp4:"5,size=2"` - Reserved2 uint8 `mp4:"6,size=3,const=7"` - NumOfSequenceParameterSets uint8 `mp4:"7,size=5"` - SequenceParameterSets []AVCParameterSet `mp4:"8,len=dynamic"` - NumOfPictureParameterSets uint8 `mp4:"9,size=8"` - PictureParameterSets []AVCParameterSet `mp4:"10,len=dynamic"` - HighProfileFieldsEnabled bool `mp4:"11,hidden"` - Reserved3 uint8 `mp4:"12,size=6,opt=dynamic,const=63"` - ChromaFormat uint8 `mp4:"13,size=2,opt=dynamic"` - Reserved4 uint8 `mp4:"14,size=5,opt=dynamic,const=31"` - BitDepthLumaMinus8 uint8 `mp4:"15,size=3,opt=dynamic"` - Reserved5 uint8 `mp4:"16,size=5,opt=dynamic,const=31"` - BitDepthChromaMinus8 uint8 `mp4:"17,size=3,opt=dynamic"` - NumOfSequenceParameterSetExt uint8 `mp4:"18,size=8,opt=dynamic"` - SequenceParameterSetsExt []AVCParameterSet `mp4:"19,len=dynamic,opt=dynamic"` -} - -func (avcc *AVCDecoderConfiguration) GetFieldLength(name string, ctx Context) uint { - switch name { - case "SequenceParameterSets": - return uint(avcc.NumOfSequenceParameterSets) - case "PictureParameterSets": - return uint(avcc.NumOfPictureParameterSets) - case "SequenceParameterSetsExt": - return uint(avcc.NumOfSequenceParameterSetExt) - } - return 0 -} - -func (avcc *AVCDecoderConfiguration) IsOptFieldEnabled(name string, ctx Context) bool { - switch name { - case "Reserved3", - "ChromaFormat", - "Reserved4", - "BitDepthLumaMinus8", - "Reserved5", - "BitDepthChromaMinus8", - "NumOfSequenceParameterSetExt", - "SequenceParameterSetsExt": - return avcc.HighProfileFieldsEnabled - } - return false -} - -func (avcc *AVCDecoderConfiguration) OnReadField(name string, r bitio.ReadSeeker, leftBits uint64, ctx Context) (rbits uint64, override bool, err error) { - if name == "HighProfileFieldsEnabled" { - avcc.HighProfileFieldsEnabled = leftBits >= 32 && - (avcc.Profile == AVCHighProfile || - avcc.Profile == AVCHigh10Profile || - avcc.Profile == AVCHigh422Profile || - avcc.Profile == 144) - return 0, true, nil - } - return 0, false, nil -} - -func (avcc *AVCDecoderConfiguration) OnWriteField(name string, w bitio.Writer, ctx Context) (wbits uint64, override bool, err error) { - if name == "HighProfileFieldsEnabled" { - if avcc.HighProfileFieldsEnabled && - avcc.Profile != AVCHighProfile && - avcc.Profile != AVCHigh10Profile && - avcc.Profile != AVCHigh422Profile && - avcc.Profile != 144 { - return 0, false, errors.New("each values of Profile and HighProfileFieldsEnabled are inconsistent") - } - return 0, true, nil - } - return 0, false, nil -} - -type AVCParameterSet struct { - BaseCustomFieldObject - Length uint16 `mp4:"0,size=16"` - NALUnit []byte `mp4:"1,size=8,len=dynamic"` -} - -func (s *AVCParameterSet) GetFieldLength(name string, ctx Context) uint { - switch name { - case "NALUnit": - return uint(s.Length) - } - return 0 -} - -type PixelAspectRatioBox struct { - AnyTypeBox - HSpacing uint32 `mp4:"0,size=32"` - VSpacing uint32 `mp4:"1,size=32"` -} - -/*************************** sbgp ****************************/ - -func BoxTypeSbgp() BoxType { return StrToBoxType("sbgp") } - -func init() { - AddBoxDef(&Sbgp{}, 0, 1) -} - -type Sbgp struct { - FullBox `mp4:"0,extend"` - GroupingType uint32 `mp4:"1,size=32"` - GroupingTypeParameter uint32 `mp4:"2,size=32,ver=1"` - EntryCount uint32 `mp4:"3,size=32"` - Entries []SbgpEntry `mp4:"4,len=dynamic,size=64"` -} - -type SbgpEntry struct { - SampleCount uint32 `mp4:"0,size=32"` - GroupDescriptionIndex uint32 `mp4:"1,size=32"` -} - -func (sbgp *Sbgp) GetFieldLength(name string, ctx Context) uint { - switch name { - case "Entries": - return uint(sbgp.EntryCount) - } - panic(fmt.Errorf("invalid name of dynamic-length field: boxType=sbgp fieldName=%s", name)) -} - -func (*Sbgp) GetType() BoxType { - return BoxTypeSbgp() -} - -/*************************** schi ****************************/ - -func BoxTypeSchi() BoxType { return StrToBoxType("schi") } - -func init() { - AddBoxDef(&Schi{}) -} - -type Schi struct { - Box -} - -func (*Schi) GetType() BoxType { - return BoxTypeSchi() -} - -/*************************** schm ****************************/ - -func BoxTypeSchm() BoxType { return StrToBoxType("schm") } - -func init() { - AddBoxDef(&Schm{}, 0) -} - -type Schm struct { - FullBox `mp4:"0,extend"` - SchemeType [4]byte `mp4:"1,size=8,string"` - SchemeVersion uint32 `mp4:"2,size=32,hex"` - SchemeUri []byte `mp4:"3,size=8,opt=0x000001,string"` -} - -func (*Schm) GetType() BoxType { - return BoxTypeSchm() -} - -/*************************** sdtp ****************************/ - -func BoxTypeSdtp() BoxType { return StrToBoxType("sdtp") } - -func init() { - AddBoxDef(&Sdtp{}, 0) -} - -type Sdtp struct { - FullBox `mp4:"0,extend"` - Samples []SdtpSampleElem `mp4:"1,size=8"` -} - -type SdtpSampleElem struct { - IsLeading uint8 `mp4:"0,size=2"` - SampleDependsOn uint8 `mp4:"1,size=2"` - SampleIsDependedOn uint8 `mp4:"2,size=2"` - SampleHasRedundancy uint8 `mp4:"3,size=2"` -} - -func (*Sdtp) GetType() BoxType { - return BoxTypeSdtp() -} - -/*************************** sgpd ****************************/ - -func BoxTypeSgpd() BoxType { return StrToBoxType("sgpd") } - -func init() { - AddBoxDef(&Sgpd{}, 1, 2) // version 0 is deprecated by ISO/IEC 14496-12 -} - -type Sgpd struct { - FullBox `mp4:"0,extend"` - GroupingType [4]byte `mp4:"1,size=8,string"` - DefaultLength uint32 `mp4:"2,size=32,ver=1"` - DefaultSampleDescriptionIndex uint32 `mp4:"3,size=32,ver=2"` - EntryCount uint32 `mp4:"4,size=32"` - RollDistances []int16 `mp4:"5,size=16,opt=dynamic"` - RollDistancesL []RollDistanceWithLength `mp4:"6,size=16,opt=dynamic"` - AlternativeStartupEntries []AlternativeStartupEntry `mp4:"7,size=dynamic,len=dynamic,opt=dynamic"` - AlternativeStartupEntriesL []AlternativeStartupEntryL `mp4:"8,len=dynamic,opt=dynamic"` - VisualRandomAccessEntries []VisualRandomAccessEntry `mp4:"9,len=dynamic,opt=dynamic"` - VisualRandomAccessEntriesL []VisualRandomAccessEntryL `mp4:"10,len=dynamic,opt=dynamic"` - TemporalLevelEntries []TemporalLevelEntry `mp4:"11,len=dynamic,opt=dynamic"` - TemporalLevelEntriesL []TemporalLevelEntryL `mp4:"12,len=dynamic,opt=dynamic"` - Unsupported []byte `mp4:"13,size=8,opt=dynamic"` -} - -type RollDistanceWithLength struct { - DescriptionLength uint32 `mp4:"0,size=32"` - RollDistance int16 `mp4:"1,size=16"` -} - -type AlternativeStartupEntry struct { - BaseCustomFieldObject - RollCount uint16 `mp4:"0,size=16"` - FirstOutputSample uint16 `mp4:"1,size=16"` - SampleOffset []uint32 `mp4:"2,size=32,len=dynamic"` - Opts []AlternativeStartupEntryOpt `mp4:"3,size=32"` -} - -type AlternativeStartupEntryL struct { - DescriptionLength uint32 `mp4:"0,size=32"` - AlternativeStartupEntry `mp4:"1,extend,size=dynamic"` -} - -type AlternativeStartupEntryOpt struct { - NumOutputSamples uint16 `mp4:"0,size=16"` - NumTotalSamples uint16 `mp4:"1,size=16"` -} - -type VisualRandomAccessEntry struct { - NumLeadingSamplesKnown bool `mp4:"0,size=1"` - NumLeadingSamples uint8 `mp4:"1,size=7"` -} - -type VisualRandomAccessEntryL struct { - DescriptionLength uint32 `mp4:"0,size=32"` - VisualRandomAccessEntry `mp4:"1,extend"` -} - -type TemporalLevelEntry struct { - LevelIndependentlyDecodable bool `mp4:"0,size=1"` - Reserved uint8 `mp4:"1,size=7,const=0"` -} - -type TemporalLevelEntryL struct { - DescriptionLength uint32 `mp4:"0,size=32"` - TemporalLevelEntry `mp4:"1,extend"` -} - -func (sgpd *Sgpd) GetFieldSize(name string, ctx Context) uint { - switch name { - case "AlternativeStartupEntries": - return uint(sgpd.DefaultLength * 8) - } - return 0 -} - -func (sgpd *Sgpd) GetFieldLength(name string, ctx Context) uint { - switch name { - case "RollDistances", "RollDistancesL", - "AlternativeStartupEntries", "AlternativeStartupEntriesL", - "VisualRandomAccessEntries", "VisualRandomAccessEntriesL", - "TemporalLevelEntries", "TemporalLevelEntriesL": - return uint(sgpd.EntryCount) - } - return 0 -} - -func (sgpd *Sgpd) IsOptFieldEnabled(name string, ctx Context) bool { - noDefaultLength := sgpd.Version == 1 && sgpd.DefaultLength == 0 - rollDistances := sgpd.GroupingType == [4]byte{'r', 'o', 'l', 'l'} || - sgpd.GroupingType == [4]byte{'p', 'r', 'o', 'l'} - alternativeStartupEntries := sgpd.GroupingType == [4]byte{'a', 'l', 's', 't'} - visualRandomAccessEntries := sgpd.GroupingType == [4]byte{'r', 'a', 'p', ' '} - temporalLevelEntries := sgpd.GroupingType == [4]byte{'t', 'e', 'l', 'e'} - switch name { - case "RollDistances": - return rollDistances && !noDefaultLength - case "RollDistancesL": - return rollDistances && noDefaultLength - case "AlternativeStartupEntries": - return alternativeStartupEntries && !noDefaultLength - case "AlternativeStartupEntriesL": - return alternativeStartupEntries && noDefaultLength - case "VisualRandomAccessEntries": - return visualRandomAccessEntries && !noDefaultLength - case "VisualRandomAccessEntriesL": - return visualRandomAccessEntries && noDefaultLength - case "TemporalLevelEntries": - return temporalLevelEntries && !noDefaultLength - case "TemporalLevelEntriesL": - return temporalLevelEntries && noDefaultLength - case "Unsupported": - return !rollDistances && - !alternativeStartupEntries && - !visualRandomAccessEntries && - !temporalLevelEntries - default: - return false - } -} - -func (*Sgpd) GetType() BoxType { - return BoxTypeSgpd() -} - -func (entry *AlternativeStartupEntry) GetFieldLength(name string, ctx Context) uint { - switch name { - case "SampleOffset": - return uint(entry.RollCount) - } - return 0 -} - -func (entry *AlternativeStartupEntryL) GetFieldSize(name string, ctx Context) uint { - switch name { - case "AlternativeStartupEntry": - return uint(entry.DescriptionLength * 8) - } - return 0 -} - -/*************************** sidx ****************************/ - -func BoxTypeSidx() BoxType { return StrToBoxType("sidx") } - -func init() { - AddBoxDef(&Sidx{}, 0, 1) -} - -type Sidx struct { - FullBox `mp4:"0,extend"` - ReferenceID uint32 `mp4:"1,size=32"` - Timescale uint32 `mp4:"2,size=32"` - EarliestPresentationTimeV0 uint32 `mp4:"3,size=32,ver=0"` - FirstOffsetV0 uint32 `mp4:"4,size=32,ver=0"` - EarliestPresentationTimeV1 uint64 `mp4:"5,size=64,nver=0"` - FirstOffsetV1 uint64 `mp4:"6,size=64,nver=0"` - Reserved uint16 `mp4:"7,size=16,const=0"` - ReferenceCount uint16 `mp4:"8,size=16"` - References []SidxReference `mp4:"9,size=96,len=dynamic"` -} - -type SidxReference struct { - ReferenceType bool `mp4:"0,size=1"` - ReferencedSize uint32 `mp4:"1,size=31"` - SubsegmentDuration uint32 `mp4:"2,size=32"` - StartsWithSAP bool `mp4:"3,size=1"` - SAPType uint32 `mp4:"4,size=3"` - SAPDeltaTime uint32 `mp4:"5,size=28"` -} - -func (*Sidx) GetType() BoxType { - return BoxTypeSidx() -} - -func (sidx *Sidx) GetFieldLength(name string, ctx Context) uint { - switch name { - case "References": - return uint(sidx.ReferenceCount) - } - panic(fmt.Errorf("invalid name of dynamic-length field: boxType=sidx fieldName=%s", name)) -} - -func (sidx *Sidx) GetEarliestPresentationTime() uint64 { - switch sidx.GetVersion() { - case 0: - return uint64(sidx.EarliestPresentationTimeV0) - case 1: - return sidx.EarliestPresentationTimeV1 - default: - return 0 - } -} - -func (sidx *Sidx) GetFirstOffset() uint64 { - switch sidx.GetVersion() { - case 0: - return uint64(sidx.FirstOffsetV0) - case 1: - return sidx.FirstOffsetV1 - default: - return 0 - } -} - -/*************************** sinf ****************************/ - -func BoxTypeSinf() BoxType { return StrToBoxType("sinf") } - -func init() { - AddBoxDef(&Sinf{}) -} - -type Sinf struct { - Box -} - -func (*Sinf) GetType() BoxType { - return BoxTypeSinf() -} - -/*************************** smhd ****************************/ - -func BoxTypeSmhd() BoxType { return StrToBoxType("smhd") } - -func init() { - AddBoxDef(&Smhd{}, 0) -} - -type Smhd struct { - FullBox `mp4:"0,extend"` - Balance int16 `mp4:"1,size=16"` // fixed-point 8.8 template=0 - Reserved uint16 `mp4:"2,size=16,const=0"` -} - -func (*Smhd) GetType() BoxType { - return BoxTypeSmhd() -} - -// StringifyField returns field value as string -func (smhd *Smhd) StringifyField(name string, indent string, depth int, ctx Context) (string, bool) { - switch name { - case "Balance": - return util.FormatSignedFixedFloat88(smhd.Balance), true - default: - return "", false - } -} - -// GetBalance returns value of width as float32 -func (smhd *Smhd) GetBalance() float32 { - return float32(smhd.Balance) / (1 << 8) -} - -// GetBalanceInt returns value of width as int8 -func (smhd *Smhd) GetBalanceInt() int8 { - return int8(smhd.Balance >> 8) -} - -/*************************** stbl ****************************/ - -func BoxTypeStbl() BoxType { return StrToBoxType("stbl") } - -func init() { - AddBoxDef(&Stbl{}) -} - -// Stbl is ISOBMFF stbl box type -type Stbl struct { - Box -} - -// GetType returns the BoxType -func (*Stbl) GetType() BoxType { - return BoxTypeStbl() -} - -/*************************** stco ****************************/ - -func BoxTypeStco() BoxType { return StrToBoxType("stco") } - -func init() { - AddBoxDef(&Stco{}, 0) -} - -// Stco is ISOBMFF stco box type -type Stco struct { - FullBox `mp4:"0,extend"` - EntryCount uint32 `mp4:"1,size=32"` - ChunkOffset []uint32 `mp4:"2,size=32,len=dynamic"` -} - -// GetType returns the BoxType -func (*Stco) GetType() BoxType { - return BoxTypeStco() -} - -// GetFieldLength returns length of dynamic field -func (stco *Stco) GetFieldLength(name string, ctx Context) uint { - switch name { - case "ChunkOffset": - return uint(stco.EntryCount) - } - panic(fmt.Errorf("invalid name of dynamic-length field: boxType=stco fieldName=%s", name)) -} - -/*************************** stsc ****************************/ - -func BoxTypeStsc() BoxType { return StrToBoxType("stsc") } - -func init() { - AddBoxDef(&Stsc{}, 0) -} - -// Stsc is ISOBMFF stsc box type -type Stsc struct { - FullBox `mp4:"0,extend"` - EntryCount uint32 `mp4:"1,size=32"` - Entries []StscEntry `mp4:"2,len=dynamic,size=96"` -} - -type StscEntry struct { - FirstChunk uint32 `mp4:"0,size=32"` - SamplesPerChunk uint32 `mp4:"1,size=32"` - SampleDescriptionIndex uint32 `mp4:"2,size=32"` -} - -// GetType returns the BoxType -func (*Stsc) GetType() BoxType { - return BoxTypeStsc() -} - -// GetFieldLength returns length of dynamic field -func (stsc *Stsc) GetFieldLength(name string, ctx Context) uint { - switch name { - case "Entries": - return uint(stsc.EntryCount) - } - panic(fmt.Errorf("invalid name of dynamic-length field: boxType=stsc fieldName=%s", name)) -} - -/*************************** stsd ****************************/ - -func BoxTypeStsd() BoxType { return StrToBoxType("stsd") } - -func init() { - AddBoxDef(&Stsd{}, 0) -} - -// Stsd is ISOBMFF stsd box type -type Stsd struct { - FullBox `mp4:"0,extend"` - EntryCount uint32 `mp4:"1,size=32"` -} - -// GetType returns the BoxType -func (*Stsd) GetType() BoxType { - return BoxTypeStsd() -} - -/*************************** stss ****************************/ - -func BoxTypeStss() BoxType { return StrToBoxType("stss") } - -func init() { - AddBoxDef(&Stss{}, 0) -} - -type Stss struct { - FullBox `mp4:"0,extend"` - EntryCount uint32 `mp4:"1,size=32"` - SampleNumber []uint32 `mp4:"2,len=dynamic,size=32"` -} - -// GetType returns the BoxType -func (*Stss) GetType() BoxType { - return BoxTypeStss() -} - -// GetFieldLength returns length of dynamic field -func (stss *Stss) GetFieldLength(name string, ctx Context) uint { - switch name { - case "SampleNumber": - return uint(stss.EntryCount) - } - panic(fmt.Errorf("invalid name of dynamic-length field: boxType=stss fieldName=%s", name)) -} - -/*************************** stsz ****************************/ - -func BoxTypeStsz() BoxType { return StrToBoxType("stsz") } - -func init() { - AddBoxDef(&Stsz{}, 0) -} - -// Stsz is ISOBMFF stsz box type -type Stsz struct { - FullBox `mp4:"0,extend"` - SampleSize uint32 `mp4:"1,size=32"` - SampleCount uint32 `mp4:"2,size=32"` - EntrySize []uint32 `mp4:"3,size=32,len=dynamic"` -} - -// GetType returns the BoxType -func (*Stsz) GetType() BoxType { - return BoxTypeStsz() -} - -// GetFieldLength returns length of dynamic field -func (stsz *Stsz) GetFieldLength(name string, ctx Context) uint { - switch name { - case "EntrySize": - if stsz.SampleSize == 0 { - return uint(stsz.SampleCount) - } - return 0 - } - panic(fmt.Errorf("invalid name of dynamic-length field: boxType=stsz fieldName=%s", name)) -} - -/*************************** stts ****************************/ - -func BoxTypeStts() BoxType { return StrToBoxType("stts") } - -func init() { - AddBoxDef(&Stts{}, 0) -} - -// Stts is ISOBMFF stts box type -type Stts struct { - FullBox `mp4:"0,extend"` - EntryCount uint32 `mp4:"1,size=32"` - Entries []SttsEntry `mp4:"2,len=dynamic,size=64"` -} - -type SttsEntry struct { - SampleCount uint32 `mp4:"0,size=32"` - SampleDelta uint32 `mp4:"1,size=32"` -} - -// GetType returns the BoxType -func (*Stts) GetType() BoxType { - return BoxTypeStts() -} - -// GetFieldLength returns length of dynamic field -func (stts *Stts) GetFieldLength(name string, ctx Context) uint { - switch name { - case "Entries": - return uint(stts.EntryCount) - } - panic(fmt.Errorf("invalid name of dynamic-length field: boxType=stts fieldName=%s", name)) -} - -/*************************** styp ****************************/ - -func BoxTypeStyp() BoxType { return StrToBoxType("styp") } - -func init() { - AddBoxDef(&Styp{}) -} - -type Styp struct { - Box - MajorBrand [4]byte `mp4:"0,size=8,string"` - MinorVersion uint32 `mp4:"1,size=32"` - CompatibleBrands []CompatibleBrandElem `mp4:"2,size=32"` // reach to end of the box -} - -func (*Styp) GetType() BoxType { - return BoxTypeStyp() -} - -/*************************** tfdt ****************************/ - -func BoxTypeTfdt() BoxType { return StrToBoxType("tfdt") } - -func init() { - AddBoxDef(&Tfdt{}, 0, 1) -} - -// Tfdt is ISOBMFF tfdt box type -type Tfdt struct { - FullBox `mp4:"0,extend"` - BaseMediaDecodeTimeV0 uint32 `mp4:"1,size=32,ver=0"` - BaseMediaDecodeTimeV1 uint64 `mp4:"2,size=64,ver=1"` -} - -// GetType returns the BoxType -func (*Tfdt) GetType() BoxType { - return BoxTypeTfdt() -} - -func (tfdt *Tfdt) GetBaseMediaDecodeTime() uint64 { - switch tfdt.GetVersion() { - case 0: - return uint64(tfdt.BaseMediaDecodeTimeV0) - case 1: - return tfdt.BaseMediaDecodeTimeV1 - default: - return 0 - } -} - -/*************************** tfhd ****************************/ - -func BoxTypeTfhd() BoxType { return StrToBoxType("tfhd") } - -func init() { - AddBoxDef(&Tfhd{}, 0) -} - -// Tfhd is ISOBMFF tfhd box type -type Tfhd struct { - FullBox `mp4:"0,extend"` - TrackID uint32 `mp4:"1,size=32"` - - // optional - BaseDataOffset uint64 `mp4:"2,size=64,opt=0x000001"` - SampleDescriptionIndex uint32 `mp4:"3,size=32,opt=0x000002"` - DefaultSampleDuration uint32 `mp4:"4,size=32,opt=0x000008"` - DefaultSampleSize uint32 `mp4:"5,size=32,opt=0x000010"` - DefaultSampleFlags uint32 `mp4:"6,size=32,opt=0x000020,hex"` -} - -const ( - TfhdBaseDataOffsetPresent = 0x000001 - TfhdSampleDescriptionIndexPresent = 0x000002 - TfhdDefaultSampleDurationPresent = 0x000008 - TfhdDefaultSampleSizePresent = 0x000010 - TfhdDefaultSampleFlagsPresent = 0x000020 - TfhdDurationIsEmpty = 0x010000 - TfhdDefaultBaseIsMoof = 0x020000 -) - -// GetType returns the BoxType -func (*Tfhd) GetType() BoxType { - return BoxTypeTfhd() -} - -/*************************** tfra ****************************/ - -func BoxTypeTfra() BoxType { return StrToBoxType("tfra") } - -func init() { - AddBoxDef(&Tfra{}, 0, 1) -} - -// Tfra is ISOBMFF tfra box type -type Tfra struct { - FullBox `mp4:"0,extend"` - TrackID uint32 `mp4:"1,size=32"` - Reserved uint32 `mp4:"2,size=26,const=0"` - LengthSizeOfTrafNum byte `mp4:"3,size=2"` - LengthSizeOfTrunNum byte `mp4:"4,size=2"` - LengthSizeOfSampleNum byte `mp4:"5,size=2"` - NumberOfEntry uint32 `mp4:"6,size=32"` - Entries []TfraEntry `mp4:"7,len=dynamic,size=dynamic"` -} - -type TfraEntry struct { - TimeV0 uint32 `mp4:"0,size=32,ver=0"` - MoofOffsetV0 uint32 `mp4:"1,size=32,ver=0"` - TimeV1 uint64 `mp4:"2,size=64,ver=1"` - MoofOffsetV1 uint64 `mp4:"3,size=64,ver=1"` - TrafNumber uint32 `mp4:"4,size=dynamic"` - TrunNumber uint32 `mp4:"5,size=dynamic"` - SampleNumber uint32 `mp4:"6,size=dynamic"` -} - -// GetType returns the BoxType -func (*Tfra) GetType() BoxType { - return BoxTypeTfra() -} - -// GetFieldSize returns size of dynamic field -func (tfra *Tfra) GetFieldSize(name string, ctx Context) uint { - switch name { - case "TrafNumber": - return (uint(tfra.LengthSizeOfTrafNum) + 1) * 8 - case "TrunNumber": - return (uint(tfra.LengthSizeOfTrunNum) + 1) * 8 - case "SampleNumber": - return (uint(tfra.LengthSizeOfSampleNum) + 1) * 8 - case "Entries": - switch tfra.GetVersion() { - case 0: - return 0 + - /* TimeV0 */ 32 + - /* MoofOffsetV0 */ 32 + - /* TrafNumber */ (uint(tfra.LengthSizeOfTrafNum)+1)*8 + - /* TrunNumber */ (uint(tfra.LengthSizeOfTrunNum)+1)*8 + - /* SampleNumber */ (uint(tfra.LengthSizeOfSampleNum)+1)*8 - case 1: - return 0 + - /* TimeV1 */ 64 + - /* MoofOffsetV1 */ 64 + - /* TrafNumber */ (uint(tfra.LengthSizeOfTrafNum)+1)*8 + - /* TrunNumber */ (uint(tfra.LengthSizeOfTrunNum)+1)*8 + - /* SampleNumber */ (uint(tfra.LengthSizeOfSampleNum)+1)*8 - } - } - panic(fmt.Errorf("invalid name of dynamic-size field: boxType=tfra fieldName=%s", name)) -} - -// GetFieldLength returns length of dynamic field -func (tfra *Tfra) GetFieldLength(name string, ctx Context) uint { - switch name { - case "Entries": - return uint(tfra.NumberOfEntry) - } - panic(fmt.Errorf("invalid name of dynamic-length field: boxType=tfra fieldName=%s", name)) -} - -func (tfra *Tfra) GetTime(index int) uint64 { - switch tfra.GetVersion() { - case 0: - return uint64(tfra.Entries[index].TimeV0) - case 1: - return tfra.Entries[index].TimeV1 - default: - return 0 - } -} - -func (tfra *Tfra) GetMoofOffset(index int) uint64 { - switch tfra.GetVersion() { - case 0: - return uint64(tfra.Entries[index].MoofOffsetV0) - case 1: - return tfra.Entries[index].MoofOffsetV1 - default: - return 0 - } -} - -/*************************** tkhd ****************************/ - -func BoxTypeTkhd() BoxType { return StrToBoxType("tkhd") } - -func init() { - AddBoxDef(&Tkhd{}, 0, 1) -} - -// Tkhd is ISOBMFF tkhd box type -type Tkhd struct { - FullBox `mp4:"0,extend"` - CreationTimeV0 uint32 `mp4:"1,size=32,ver=0"` - ModificationTimeV0 uint32 `mp4:"2,size=32,ver=0"` - CreationTimeV1 uint64 `mp4:"3,size=64,ver=1"` - ModificationTimeV1 uint64 `mp4:"4,size=64,ver=1"` - TrackID uint32 `mp4:"5,size=32"` - Reserved0 uint32 `mp4:"6,size=32,const=0"` - DurationV0 uint32 `mp4:"7,size=32,ver=0"` - DurationV1 uint64 `mp4:"8,size=64,ver=1"` - // - Reserved1 [2]uint32 `mp4:"9,size=32,const=0"` - Layer int16 `mp4:"10,size=16"` // template=0 - AlternateGroup int16 `mp4:"11,size=16"` // template=0 - Volume int16 `mp4:"12,size=16"` // template={if track_is_audio 0x0100 else 0} - Reserved2 uint16 `mp4:"13,size=16,const=0"` - Matrix [9]int32 `mp4:"14,size=32,hex"` // template={ 0x00010000,0,0,0,0x00010000,0,0,0,0x40000000 }; - Width uint32 `mp4:"15,size=32"` // fixed-point 16.16 - Height uint32 `mp4:"16,size=32"` // fixed-point 16.16 -} - -// GetType returns the BoxType -func (*Tkhd) GetType() BoxType { - return BoxTypeTkhd() -} - -// StringifyField returns field value as string -func (tkhd *Tkhd) StringifyField(name string, indent string, depth int, ctx Context) (string, bool) { - switch name { - case "Width": - return util.FormatUnsignedFixedFloat1616(tkhd.Width), true - case "Height": - return util.FormatUnsignedFixedFloat1616(tkhd.Height), true - default: - return "", false - } -} - -func (tkhd *Tkhd) GetCreationTime() uint64 { - switch tkhd.GetVersion() { - case 0: - return uint64(tkhd.CreationTimeV0) - case 1: - return tkhd.CreationTimeV1 - default: - return 0 - } -} - -func (tkhd *Tkhd) GetModificationTime() uint64 { - switch tkhd.GetVersion() { - case 0: - return uint64(tkhd.ModificationTimeV0) - case 1: - return tkhd.ModificationTimeV1 - default: - return 0 - } -} - -func (tkhd *Tkhd) GetDuration() uint64 { - switch tkhd.GetVersion() { - case 0: - return uint64(tkhd.DurationV0) - case 1: - return tkhd.DurationV1 - default: - return 0 - } -} - -// GetWidth returns value of width as float64 -func (tkhd *Tkhd) GetWidth() float64 { - return float64(tkhd.Width) / (1 << 16) -} - -// GetWidthInt returns value of width as uint16 -func (tkhd *Tkhd) GetWidthInt() uint16 { - return uint16(tkhd.Width >> 16) -} - -// GetHeight returns value of height as float64 -func (tkhd *Tkhd) GetHeight() float64 { - return float64(tkhd.Height) / (1 << 16) -} - -// GetHeightInt returns value of height as uint16 -func (tkhd *Tkhd) GetHeightInt() uint16 { - return uint16(tkhd.Height >> 16) -} - -/*************************** traf ****************************/ - -func BoxTypeTraf() BoxType { return StrToBoxType("traf") } - -func init() { - AddBoxDef(&Traf{}) -} - -// Traf is ISOBMFF traf box type -type Traf struct { - Box -} - -// GetType returns the BoxType -func (*Traf) GetType() BoxType { - return BoxTypeTraf() -} - -/*************************** trak ****************************/ - -func BoxTypeTrak() BoxType { return StrToBoxType("trak") } - -func init() { - AddBoxDef(&Trak{}) -} - -// Trak is ISOBMFF trak box type -type Trak struct { - Box -} - -// GetType returns the BoxType -func (*Trak) GetType() BoxType { - return BoxTypeTrak() -} - -/*************************** trep ****************************/ - -func BoxTypeTrep() BoxType { return StrToBoxType("trep") } - -func init() { - AddBoxDef(&Trep{}, 0) -} - -// Trep is ISOBMFF trep box type -type Trep struct { - FullBox `mp4:"0,extend"` - TrackID uint32 `mp4:"1,size=32"` -} - -// GetType returns the BoxType -func (*Trep) GetType() BoxType { - return BoxTypeTrep() -} - -/*************************** trex ****************************/ - -func BoxTypeTrex() BoxType { return StrToBoxType("trex") } - -func init() { - AddBoxDef(&Trex{}, 0) -} - -// Trex is ISOBMFF trex box type -type Trex struct { - FullBox `mp4:"0,extend"` - TrackID uint32 `mp4:"1,size=32"` - DefaultSampleDescriptionIndex uint32 `mp4:"2,size=32"` - DefaultSampleDuration uint32 `mp4:"3,size=32"` - DefaultSampleSize uint32 `mp4:"4,size=32"` - DefaultSampleFlags uint32 `mp4:"5,size=32,hex"` -} - -// GetType returns the BoxType -func (*Trex) GetType() BoxType { - return BoxTypeTrex() -} - -/*************************** trun ****************************/ - -func BoxTypeTrun() BoxType { return StrToBoxType("trun") } - -func init() { - AddBoxDef(&Trun{}, 0, 1) -} - -// Trun is ISOBMFF trun box type -type Trun struct { - FullBox `mp4:"0,extend"` - SampleCount uint32 `mp4:"1,size=32"` - - // optional fields - DataOffset int32 `mp4:"2,size=32,opt=0x000001"` - FirstSampleFlags uint32 `mp4:"3,size=32,opt=0x000004,hex"` - Entries []TrunEntry `mp4:"4,len=dynamic,size=dynamic"` -} - -type TrunEntry struct { - SampleDuration uint32 `mp4:"0,size=32,opt=0x000100"` - SampleSize uint32 `mp4:"1,size=32,opt=0x000200"` - SampleFlags uint32 `mp4:"2,size=32,opt=0x000400,hex"` - SampleCompositionTimeOffsetV0 uint32 `mp4:"3,size=32,opt=0x000800,ver=0"` - SampleCompositionTimeOffsetV1 int32 `mp4:"4,size=32,opt=0x000800,nver=0"` -} - -// GetType returns the BoxType -func (*Trun) GetType() BoxType { - return BoxTypeTrun() -} - -// GetFieldSize returns size of dynamic field -func (trun *Trun) GetFieldSize(name string, ctx Context) uint { - switch name { - case "Entries": - var size uint - flags := trun.GetFlags() - if flags&0x100 != 0 { - size += 32 // SampleDuration - } - if flags&0x200 != 0 { - size += 32 // SampleSize - } - if flags&0x400 != 0 { - size += 32 // SampleFlags - } - if flags&0x800 != 0 { - size += 32 // SampleCompositionTimeOffsetV0 or V1 - } - return size - } - panic(fmt.Errorf("invalid name of dynamic-size field: boxType=trun fieldName=%s", name)) -} - -// GetFieldLength returns length of dynamic field -func (trun *Trun) GetFieldLength(name string, ctx Context) uint { - switch name { - case "Entries": - return uint(trun.SampleCount) - } - panic(fmt.Errorf("invalid name of dynamic-length field: boxType=trun fieldName=%s", name)) -} - -func (trun *Trun) GetSampleCompositionTimeOffset(index int) int64 { - switch trun.GetVersion() { - case 0: - return int64(trun.Entries[index].SampleCompositionTimeOffsetV0) - case 1: - return int64(trun.Entries[index].SampleCompositionTimeOffsetV1) - default: - return 0 - } -} - -/*************************** udta ****************************/ - -func BoxTypeUdta() BoxType { return StrToBoxType("udta") } - -func init() { - AddBoxDef(&Udta{}) -} - -// Udta is ISOBMFF udta box type -type Udta struct { - Box -} - -// GetType returns the BoxType -func (*Udta) GetType() BoxType { - return BoxTypeUdta() -} - -func isUnderUdta(ctx Context) bool { - return ctx.UnderUdta -} - -/*************************** vmhd ****************************/ - -func BoxTypeVmhd() BoxType { return StrToBoxType("vmhd") } - -func init() { - AddBoxDef(&Vmhd{}, 0) -} - -// Vmhd is ISOBMFF vmhd box type -type Vmhd struct { - FullBox `mp4:"0,extend"` - Graphicsmode uint16 `mp4:"1,size=16"` // template=0 - Opcolor [3]uint16 `mp4:"2,size=16"` // template={0, 0, 0} -} - -// GetType returns the BoxType -func (*Vmhd) GetType() BoxType { - return BoxTypeVmhd() -} - -/*************************** wave ****************************/ - -func BoxTypeWave() BoxType { return StrToBoxType("wave") } - -func init() { - AddBoxDef(&Wave{}) -} - -// Wave is QuickTime wave box -type Wave struct { - Box -} - -// GetType returns the BoxType -func (*Wave) GetType() BoxType { - return BoxTypeWave() -} diff --git a/vendor/github.com/abema/go-mp4/box_types_iso14496_14.go b/vendor/github.com/abema/go-mp4/box_types_iso14496_14.go deleted file mode 100644 index fe9880ca0..000000000 --- a/vendor/github.com/abema/go-mp4/box_types_iso14496_14.go +++ /dev/null @@ -1,126 +0,0 @@ -package mp4 - -import "fmt" - -/*************************** esds ****************************/ - -// https://developer.apple.com/library/content/documentation/QuickTime/QTFF/QTFFChap3/qtff3.html - -func BoxTypeEsds() BoxType { return StrToBoxType("esds") } - -func init() { - AddBoxDef(&Esds{}, 0) -} - -const ( - ESDescrTag = 0x03 - DecoderConfigDescrTag = 0x04 - DecSpecificInfoTag = 0x05 - SLConfigDescrTag = 0x06 -) - -// Esds is ES descripter box -type Esds struct { - FullBox `mp4:"0,extend"` - Descriptors []Descriptor `mp4:"1,array"` -} - -// GetType returns the BoxType -func (*Esds) GetType() BoxType { - return BoxTypeEsds() -} - -type Descriptor struct { - BaseCustomFieldObject - Tag int8 `mp4:"0,size=8"` // must be 0x03 - Size uint32 `mp4:"1,varint"` - ESDescriptor *ESDescriptor `mp4:"2,extend,opt=dynamic"` - DecoderConfigDescriptor *DecoderConfigDescriptor `mp4:"3,extend,opt=dynamic"` - Data []byte `mp4:"4,size=8,opt=dynamic,len=dynamic"` -} - -// GetFieldLength returns length of dynamic field -func (ds *Descriptor) GetFieldLength(name string, ctx Context) uint { - switch name { - case "Data": - return uint(ds.Size) - } - panic(fmt.Errorf("invalid name of dynamic-length field: boxType=esds fieldName=%s", name)) -} - -func (ds *Descriptor) IsOptFieldEnabled(name string, ctx Context) bool { - switch ds.Tag { - case ESDescrTag: - return name == "ESDescriptor" - case DecoderConfigDescrTag: - return name == "DecoderConfigDescriptor" - default: - return name == "Data" - } -} - -// StringifyField returns field value as string -func (ds *Descriptor) StringifyField(name string, indent string, depth int, ctx Context) (string, bool) { - switch name { - case "Tag": - switch ds.Tag { - case ESDescrTag: - return "ESDescr", true - case DecoderConfigDescrTag: - return "DecoderConfigDescr", true - case DecSpecificInfoTag: - return "DecSpecificInfo", true - case SLConfigDescrTag: - return "SLConfigDescr", true - default: - return "", false - } - default: - return "", false - } -} - -type ESDescriptor struct { - BaseCustomFieldObject - ESID uint16 `mp4:"0,size=16"` - StreamDependenceFlag bool `mp4:"1,size=1"` - UrlFlag bool `mp4:"2,size=1"` - OcrStreamFlag bool `mp4:"3,size=1"` - StreamPriority int8 `mp4:"4,size=5"` - DependsOnESID uint16 `mp4:"5,size=16,opt=dynamic"` - URLLength uint8 `mp4:"6,size=8,opt=dynamic"` - URLString []byte `mp4:"7,size=8,len=dynamic,opt=dynamic,string"` - OCRESID uint16 `mp4:"8,size=16,opt=dynamic"` -} - -func (esds *ESDescriptor) GetFieldLength(name string, ctx Context) uint { - switch name { - case "URLString": - return uint(esds.URLLength) - } - panic(fmt.Errorf("invalid name of dynamic-length field: boxType=ESDescriptor fieldName=%s", name)) -} - -func (esds *ESDescriptor) IsOptFieldEnabled(name string, ctx Context) bool { - switch name { - case "DependsOnESID": - return esds.StreamDependenceFlag - case "URLLength", "URLString": - return esds.UrlFlag - case "OCRESID": - return esds.OcrStreamFlag - default: - return false - } -} - -type DecoderConfigDescriptor struct { - BaseCustomFieldObject - ObjectTypeIndication byte `mp4:"0,size=8"` - StreamType int8 `mp4:"1,size=6"` - UpStream bool `mp4:"2,size=1"` - Reserved bool `mp4:"3,size=1"` - BufferSizeDB uint32 `mp4:"4,size=24"` - MaxBitrate uint32 `mp4:"5,size=32"` - AvgBitrate uint32 `mp4:"6,size=32"` -} diff --git a/vendor/github.com/abema/go-mp4/box_types_iso23001_5.go b/vendor/github.com/abema/go-mp4/box_types_iso23001_5.go deleted file mode 100644 index 849411ad1..000000000 --- a/vendor/github.com/abema/go-mp4/box_types_iso23001_5.go +++ /dev/null @@ -1,35 +0,0 @@ -package mp4 - -/*************************** ipcm ****************************/ - -func BoxTypeIpcm() BoxType { return StrToBoxType("ipcm") } - -func init() { - AddAnyTypeBoxDef(&AudioSampleEntry{}, BoxTypeIpcm()) -} - -/*************************** fpcm ****************************/ - -func BoxTypeFpcm() BoxType { return StrToBoxType("fpcm") } - -func init() { - AddAnyTypeBoxDef(&AudioSampleEntry{}, BoxTypeFpcm()) -} - -/*************************** pcmC ****************************/ - -func BoxTypePcmC() BoxType { return StrToBoxType("pcmC") } - -func init() { - AddBoxDef(&PcmC{}, 0, 1) -} - -type PcmC struct { - FullBox `mp4:"0,extend"` - FormatFlags uint8 `mp4:"1,size=8"` - PCMSampleSize uint8 `mp4:"1,size=8"` -} - -func (PcmC) GetType() BoxType { - return BoxTypePcmC() -} diff --git a/vendor/github.com/abema/go-mp4/box_types_iso23001_7.go b/vendor/github.com/abema/go-mp4/box_types_iso23001_7.go deleted file mode 100644 index 766c348b0..000000000 --- a/vendor/github.com/abema/go-mp4/box_types_iso23001_7.go +++ /dev/null @@ -1,108 +0,0 @@ -package mp4 - -import ( - "bytes" - "fmt" - - "github.com/google/uuid" -) - -/*************************** pssh ****************************/ - -func BoxTypePssh() BoxType { return StrToBoxType("pssh") } - -func init() { - AddBoxDef(&Pssh{}, 0, 1) -} - -// Pssh is ISOBMFF pssh box type -type Pssh struct { - FullBox `mp4:"0,extend"` - SystemID [16]byte `mp4:"1,size=8,uuid"` - KIDCount uint32 `mp4:"2,size=32,nver=0"` - KIDs []PsshKID `mp4:"3,nver=0,len=dynamic,size=128"` - DataSize int32 `mp4:"4,size=32"` - Data []byte `mp4:"5,size=8,len=dynamic"` -} - -type PsshKID struct { - KID [16]byte `mp4:"0,size=8,uuid"` -} - -// GetFieldLength returns length of dynamic field -func (pssh *Pssh) GetFieldLength(name string, ctx Context) uint { - switch name { - case "KIDs": - return uint(pssh.KIDCount) - case "Data": - return uint(pssh.DataSize) - } - panic(fmt.Errorf("invalid name of dynamic-length field: boxType=pssh fieldName=%s", name)) -} - -// StringifyField returns field value as string -func (pssh *Pssh) StringifyField(name string, indent string, depth int, ctx Context) (string, bool) { - switch name { - case "KIDs": - buf := bytes.NewBuffer(nil) - buf.WriteString("[") - for i, e := range pssh.KIDs { - if i != 0 { - buf.WriteString(", ") - } - buf.WriteString(uuid.UUID(e.KID).String()) - } - buf.WriteString("]") - return buf.String(), true - - default: - return "", false - } -} - -// GetType returns the BoxType -func (*Pssh) GetType() BoxType { - return BoxTypePssh() -} - -/*************************** tenc ****************************/ - -func BoxTypeTenc() BoxType { return StrToBoxType("tenc") } - -func init() { - AddBoxDef(&Tenc{}, 0, 1) -} - -// Tenc is ISOBMFF tenc box type -type Tenc struct { - FullBox `mp4:"0,extend"` - Reserved uint8 `mp4:"1,size=8,dec"` - DefaultCryptByteBlock uint8 `mp4:"2,size=4,dec"` // always 0 on version 0 - DefaultSkipByteBlock uint8 `mp4:"3,size=4,dec"` // always 0 on version 0 - DefaultIsProtected uint8 `mp4:"4,size=8,dec"` - DefaultPerSampleIVSize uint8 `mp4:"5,size=8,dec"` - DefaultKID [16]byte `mp4:"6,size=8,uuid"` - DefaultConstantIVSize uint8 `mp4:"7,size=8,opt=dynamic,dec"` - DefaultConstantIV []byte `mp4:"8,size=8,opt=dynamic,len=dynamic"` -} - -func (tenc *Tenc) IsOptFieldEnabled(name string, ctx Context) bool { - switch name { - case "DefaultConstantIVSize", "DefaultConstantIV": - return tenc.DefaultIsProtected == 1 && tenc.DefaultPerSampleIVSize == 0 - } - return false -} - -func (tenc *Tenc) GetFieldLength(name string, ctx Context) uint { - switch name { - case "DefaultConstantIV": - return uint(tenc.DefaultConstantIVSize) - } - panic(fmt.Errorf("invalid name of dynamic-length field: boxType=tenc fieldName=%s", name)) -} - -// GetType returns the BoxType -func (*Tenc) GetType() BoxType { - return BoxTypeTenc() -} diff --git a/vendor/github.com/abema/go-mp4/box_types_metadata.go b/vendor/github.com/abema/go-mp4/box_types_metadata.go deleted file mode 100644 index 7baba2242..000000000 --- a/vendor/github.com/abema/go-mp4/box_types_metadata.go +++ /dev/null @@ -1,257 +0,0 @@ -package mp4 - -import ( - "fmt" - - "github.com/abema/go-mp4/internal/util" -) - -/*************************** ilst ****************************/ - -func BoxTypeIlst() BoxType { return StrToBoxType("ilst") } -func BoxTypeData() BoxType { return StrToBoxType("data") } - -var ilstMetaBoxTypes = []BoxType{ - StrToBoxType("----"), - StrToBoxType("aART"), - StrToBoxType("akID"), - StrToBoxType("apID"), - StrToBoxType("atID"), - StrToBoxType("cmID"), - StrToBoxType("cnID"), - StrToBoxType("covr"), - StrToBoxType("cpil"), - StrToBoxType("cprt"), - StrToBoxType("desc"), - StrToBoxType("disk"), - StrToBoxType("egid"), - StrToBoxType("geID"), - StrToBoxType("gnre"), - StrToBoxType("pcst"), - StrToBoxType("pgap"), - StrToBoxType("plID"), - StrToBoxType("purd"), - StrToBoxType("purl"), - StrToBoxType("rtng"), - StrToBoxType("sfID"), - StrToBoxType("soaa"), - StrToBoxType("soal"), - StrToBoxType("soar"), - StrToBoxType("soco"), - StrToBoxType("sonm"), - StrToBoxType("sosn"), - StrToBoxType("stik"), - StrToBoxType("tmpo"), - StrToBoxType("trkn"), - StrToBoxType("tven"), - StrToBoxType("tves"), - StrToBoxType("tvnn"), - StrToBoxType("tvsh"), - StrToBoxType("tvsn"), - {0xA9, 'A', 'R', 'T'}, - {0xA9, 'a', 'l', 'b'}, - {0xA9, 'c', 'm', 't'}, - {0xA9, 'c', 'o', 'm'}, - {0xA9, 'd', 'a', 'y'}, - {0xA9, 'g', 'e', 'n'}, - {0xA9, 'g', 'r', 'p'}, - {0xA9, 'n', 'a', 'm'}, - {0xA9, 't', 'o', 'o'}, - {0xA9, 'w', 'r', 't'}, -} - -func IsIlstMetaBoxType(boxType BoxType) bool { - for _, bt := range ilstMetaBoxTypes { - if boxType == bt { - return true - } - } - return false -} - -func init() { - AddBoxDef(&Ilst{}) - AddBoxDefEx(&Data{}, isUnderIlstMeta) - for _, bt := range ilstMetaBoxTypes { - AddAnyTypeBoxDefEx(&IlstMetaContainer{}, bt, isIlstMetaContainer) - } - AddAnyTypeBoxDefEx(&StringData{}, StrToBoxType("mean"), isUnderIlstFreeFormat) - AddAnyTypeBoxDefEx(&StringData{}, StrToBoxType("name"), isUnderIlstFreeFormat) -} - -type Ilst struct { - Box -} - -// GetType returns the BoxType -func (*Ilst) GetType() BoxType { - return BoxTypeIlst() -} - -type IlstMetaContainer struct { - AnyTypeBox -} - -func isIlstMetaContainer(ctx Context) bool { - return ctx.UnderIlst && !ctx.UnderIlstMeta -} - -const ( - DataTypeBinary = 0 - DataTypeStringUTF8 = 1 - DataTypeStringUTF16 = 2 - DataTypeStringMac = 3 - DataTypeStringJPEG = 14 - DataTypeSignedIntBigEndian = 21 - DataTypeFloat32BigEndian = 22 - DataTypeFloat64BigEndian = 23 -) - -// Data is a Value BoxType -// https://developer.apple.com/documentation/quicktime-file-format/value_atom -type Data struct { - Box - DataType uint32 `mp4:"0,size=32"` - DataLang uint32 `mp4:"1,size=32"` - Data []byte `mp4:"2,size=8"` -} - -// GetType returns the BoxType -func (*Data) GetType() BoxType { - return BoxTypeData() -} - -func isUnderIlstMeta(ctx Context) bool { - return ctx.UnderIlstMeta -} - -// StringifyField returns field value as string -func (data *Data) StringifyField(name string, indent string, depth int, ctx Context) (string, bool) { - switch name { - case "DataType": - switch data.DataType { - case DataTypeBinary: - return "BINARY", true - case DataTypeStringUTF8: - return "UTF8", true - case DataTypeStringUTF16: - return "UTF16", true - case DataTypeStringMac: - return "MAC_STR", true - case DataTypeStringJPEG: - return "JPEG", true - case DataTypeSignedIntBigEndian: - return "INT", true - case DataTypeFloat32BigEndian: - return "FLOAT32", true - case DataTypeFloat64BigEndian: - return "FLOAT64", true - } - case "Data": - switch data.DataType { - case DataTypeStringUTF8: - return fmt.Sprintf("\"%s\"", util.EscapeUnprintables(string(data.Data))), true - } - } - return "", false -} - -type StringData struct { - AnyTypeBox - Data []byte `mp4:"0,size=8"` -} - -// StringifyField returns field value as string -func (sd *StringData) StringifyField(name string, indent string, depth int, ctx Context) (string, bool) { - if name == "Data" { - return fmt.Sprintf("\"%s\"", util.EscapeUnprintables(string(sd.Data))), true - } - return "", false -} - -/*************************** numbered items ****************************/ - -// Item is a numbered item under an item list atom -// https://developer.apple.com/documentation/quicktime-file-format/metadata_item_list_atom/item_list -type Item struct { - AnyTypeBox - Version uint8 `mp4:"0,size=8"` - Flags [3]byte `mp4:"1,size=8"` - ItemName []byte `mp4:"2,size=8,len=4"` - Data Data `mp4:"3"` -} - -// StringifyField returns field value as string -func (i *Item) StringifyField(name string, indent string, depth int, ctx Context) (string, bool) { - switch name { - case "ItemName": - return fmt.Sprintf("\"%s\"", util.EscapeUnprintables(string(i.ItemName))), true - } - return "", false -} - -func isUnderIlstFreeFormat(ctx Context) bool { - return ctx.UnderIlstFreeMeta -} - -func BoxTypeKeys() BoxType { return StrToBoxType("keys") } - -func init() { - AddBoxDef(&Keys{}) -} - -/*************************** keys ****************************/ - -// Keys is the Keys BoxType -// https://developer.apple.com/documentation/quicktime-file-format/metadata_item_keys_atom -type Keys struct { - FullBox `mp4:"0,extend"` - EntryCount int32 `mp4:"1,size=32"` - Entries []Key `mp4:"2,len=dynamic"` -} - -// GetType implements the IBox interface and returns the BoxType -func (*Keys) GetType() BoxType { - return BoxTypeKeys() -} - -// GetFieldLength implements the ICustomFieldObject interface and returns the length of dynamic fields -func (k *Keys) GetFieldLength(name string, ctx Context) uint { - switch name { - case "Entries": - return uint(k.EntryCount) - } - panic(fmt.Errorf("invalid name of dynamic-length field: boxType=keys fieldName=%s", name)) -} - -/*************************** key ****************************/ - -// Key is a key value field in the Keys BoxType -// https://developer.apple.com/documentation/quicktime-file-format/metadata_item_keys_atom/key_value_key_size-8 -type Key struct { - BaseCustomFieldObject - KeySize int32 `mp4:"0,size=32"` - KeyNamespace []byte `mp4:"1,size=8,len=4"` - KeyValue []byte `mp4:"2,size=8,len=dynamic"` -} - -// GetFieldLength implements the ICustomFieldObject interface and returns the length of dynamic fields -func (k *Key) GetFieldLength(name string, ctx Context) uint { - switch name { - case "KeyValue": - // sizeOf(KeySize)+sizeOf(KeyNamespace) = 8 bytes - return uint(k.KeySize) - 8 - } - panic(fmt.Errorf("invalid name of dynamic-length field: boxType=key fieldName=%s", name)) -} - -// StringifyField returns field value as string -func (k *Key) StringifyField(name string, indent string, depth int, ctx Context) (string, bool) { - switch name { - case "KeyNamespace": - return fmt.Sprintf("\"%s\"", util.EscapeUnprintables(string(k.KeyNamespace))), true - case "KeyValue": - return fmt.Sprintf("\"%s\"", util.EscapeUnprintables(string(k.KeyValue))), true - } - return "", false -} diff --git a/vendor/github.com/abema/go-mp4/box_types_opus.go b/vendor/github.com/abema/go-mp4/box_types_opus.go deleted file mode 100644 index 5d02d365d..000000000 --- a/vendor/github.com/abema/go-mp4/box_types_opus.go +++ /dev/null @@ -1,54 +0,0 @@ -package mp4 - -/*************************** Opus ****************************/ - -// https://opus-codec.org/docs/opus_in_isobmff.html - -func BoxTypeOpus() BoxType { return StrToBoxType("Opus") } - -func init() { - AddAnyTypeBoxDef(&AudioSampleEntry{}, BoxTypeOpus()) -} - -/*************************** dOps ****************************/ - -// https://opus-codec.org/docs/opus_in_isobmff.html - -func BoxTypeDOps() BoxType { return StrToBoxType("dOps") } - -func init() { - AddBoxDef(&DOps{}) -} - -type DOps struct { - Box - Version uint8 `mp4:"0,size=8"` - OutputChannelCount uint8 `mp4:"1,size=8"` - PreSkip uint16 `mp4:"2,size=16"` - InputSampleRate uint32 `mp4:"3,size=32"` - OutputGain int16 `mp4:"4,size=16"` - ChannelMappingFamily uint8 `mp4:"5,size=8"` - StreamCount uint8 `mp4:"6,opt=dynamic,size=8"` - CoupledCount uint8 `mp4:"7,opt=dynamic,size=8"` - ChannelMapping []uint8 `mp4:"8,opt=dynamic,size=8,len=dynamic"` -} - -func (DOps) GetType() BoxType { - return BoxTypeDOps() -} - -func (dops DOps) IsOptFieldEnabled(name string, ctx Context) bool { - switch name { - case "StreamCount", "CoupledCount", "ChannelMapping": - return dops.ChannelMappingFamily != 0 - } - return false -} - -func (ops DOps) GetFieldLength(name string, ctx Context) uint { - switch name { - case "ChannelMapping": - return uint(ops.OutputChannelCount) - } - return 0 -} diff --git a/vendor/github.com/abema/go-mp4/box_types_vp.go b/vendor/github.com/abema/go-mp4/box_types_vp.go deleted file mode 100644 index 6927b2190..000000000 --- a/vendor/github.com/abema/go-mp4/box_types_vp.go +++ /dev/null @@ -1,53 +0,0 @@ -package mp4 - -// https://www.webmproject.org/vp9/mp4/ - -/*************************** vp08 ****************************/ - -func BoxTypeVp08() BoxType { return StrToBoxType("vp08") } - -func init() { - AddAnyTypeBoxDef(&VisualSampleEntry{}, BoxTypeVp08()) -} - -/*************************** vp09 ****************************/ - -func BoxTypeVp09() BoxType { return StrToBoxType("vp09") } - -func init() { - AddAnyTypeBoxDef(&VisualSampleEntry{}, BoxTypeVp09()) -} - -/*************************** VpcC ****************************/ - -func BoxTypeVpcC() BoxType { return StrToBoxType("vpcC") } - -func init() { - AddBoxDef(&VpcC{}) -} - -type VpcC struct { - FullBox `mp4:"0,extend"` - Profile uint8 `mp4:"1,size=8"` - Level uint8 `mp4:"2,size=8"` - BitDepth uint8 `mp4:"3,size=4"` - ChromaSubsampling uint8 `mp4:"4,size=3"` - VideoFullRangeFlag uint8 `mp4:"5,size=1"` - ColourPrimaries uint8 `mp4:"6,size=8"` - TransferCharacteristics uint8 `mp4:"7,size=8"` - MatrixCoefficients uint8 `mp4:"8,size=8"` - CodecInitializationDataSize uint16 `mp4:"9,size=16"` - CodecInitializationData []uint8 `mp4:"10,size=8,len=dynamic"` -} - -func (VpcC) GetType() BoxType { - return BoxTypeVpcC() -} - -func (vpcc VpcC) GetFieldLength(name string, ctx Context) uint { - switch name { - case "CodecInitializationData": - return uint(vpcc.CodecInitializationDataSize) - } - return 0 -} diff --git a/vendor/github.com/abema/go-mp4/extract.go b/vendor/github.com/abema/go-mp4/extract.go deleted file mode 100644 index 7de36b06a..000000000 --- a/vendor/github.com/abema/go-mp4/extract.go +++ /dev/null @@ -1,98 +0,0 @@ -package mp4 - -import ( - "errors" - "io" -) - -type BoxInfoWithPayload struct { - Info BoxInfo - Payload IBox -} - -func ExtractBoxWithPayload(r io.ReadSeeker, parent *BoxInfo, path BoxPath) ([]*BoxInfoWithPayload, error) { - return ExtractBoxesWithPayload(r, parent, []BoxPath{path}) -} - -func ExtractBoxesWithPayload(r io.ReadSeeker, parent *BoxInfo, paths []BoxPath) ([]*BoxInfoWithPayload, error) { - bis, err := ExtractBoxes(r, parent, paths) - if err != nil { - return nil, err - } - - bs := make([]*BoxInfoWithPayload, 0, len(bis)) - for _, bi := range bis { - if _, err := bi.SeekToPayload(r); err != nil { - return nil, err - } - - var ctx Context - if parent != nil { - ctx = parent.Context - } - box, _, err := UnmarshalAny(r, bi.Type, bi.Size-bi.HeaderSize, ctx) - if err != nil { - return nil, err - } - bs = append(bs, &BoxInfoWithPayload{ - Info: *bi, - Payload: box, - }) - } - return bs, nil -} - -func ExtractBox(r io.ReadSeeker, parent *BoxInfo, path BoxPath) ([]*BoxInfo, error) { - return ExtractBoxes(r, parent, []BoxPath{path}) -} - -func ExtractBoxes(r io.ReadSeeker, parent *BoxInfo, paths []BoxPath) ([]*BoxInfo, error) { - if len(paths) == 0 { - return nil, nil - } - - for i := range paths { - if len(paths[i]) == 0 { - return nil, errors.New("box path must not be empty") - } - } - - boxes := make([]*BoxInfo, 0, 8) - - handler := func(handle *ReadHandle) (interface{}, error) { - path := handle.Path - if parent != nil { - path = path[1:] - } - if handle.BoxInfo.Type == BoxTypeAny() { - return nil, nil - } - fm, m := matchPath(paths, path) - if m { - boxes = append(boxes, &handle.BoxInfo) - } - - if fm { - if _, err := handle.Expand(); err != nil { - return nil, err - } - } - return nil, nil - } - - if parent != nil { - _, err := ReadBoxStructureFromInternal(r, parent, handler) - return boxes, err - } - _, err := ReadBoxStructure(r, handler) - return boxes, err -} - -func matchPath(paths []BoxPath, path BoxPath) (forwardMatch bool, match bool) { - for i := range paths { - fm, m := path.compareWith(paths[i]) - forwardMatch = forwardMatch || fm - match = match || m - } - return -} diff --git a/vendor/github.com/abema/go-mp4/field.go b/vendor/github.com/abema/go-mp4/field.go deleted file mode 100644 index 585833e0d..000000000 --- a/vendor/github.com/abema/go-mp4/field.go +++ /dev/null @@ -1,290 +0,0 @@ -package mp4 - -import ( - "fmt" - "os" - "reflect" - "sort" - "strconv" - "strings" -) - -type ( - stringType uint8 - fieldFlag uint16 -) - -const ( - stringType_C stringType = iota - stringType_C_P - - fieldString fieldFlag = 1 << iota // 0 - fieldExtend // 1 - fieldDec // 2 - fieldHex // 3 - fieldISO639_2 // 4 - fieldUUID // 5 - fieldHidden // 6 - fieldOptDynamic // 7 - fieldVarint // 8 - fieldSizeDynamic // 9 - fieldLengthDynamic // 10 -) - -type field struct { - children []*field - name string - cnst string - order int - optFlag uint32 - nOptFlag uint32 - size uint - length uint - flags fieldFlag - strType stringType - version uint8 - nVersion uint8 -} - -func (f *field) set(flag fieldFlag) { - f.flags |= flag -} - -func (f *field) is(flag fieldFlag) bool { - return f.flags&flag != 0 -} - -func buildFields(box IImmutableBox) []*field { - t := reflect.TypeOf(box).Elem() - return buildFieldsStruct(t) -} - -func buildFieldsStruct(t reflect.Type) []*field { - fs := make([]*field, 0, 8) - for i := 0; i < t.NumField(); i++ { - ft := t.Field(i).Type - tag, ok := t.Field(i).Tag.Lookup("mp4") - if !ok { - continue - } - f := buildField(t.Field(i).Name, tag) - f.children = buildFieldsAny(ft) - fs = append(fs, f) - } - sort.SliceStable(fs, func(i, j int) bool { - return fs[i].order < fs[j].order - }) - return fs -} - -func buildFieldsAny(t reflect.Type) []*field { - switch t.Kind() { - case reflect.Struct: - return buildFieldsStruct(t) - case reflect.Ptr, reflect.Array, reflect.Slice: - return buildFieldsAny(t.Elem()) - default: - return nil - } -} - -func buildField(fieldName string, tag string) *field { - f := &field{ - name: fieldName, - } - tagMap := parseFieldTag(tag) - for key, val := range tagMap { - if val != "" { - continue - } - if order, err := strconv.Atoi(key); err == nil { - f.order = order - break - } - } - - if val, contained := tagMap["string"]; contained { - f.set(fieldString) - if val == "c_p" { - f.strType = stringType_C_P - fmt.Fprint(os.Stderr, "go-mp4: string=c_p tag is deprecated!! See https://github.com/abema/go-mp4/issues/76\n") - } - } - - if _, contained := tagMap["varint"]; contained { - f.set(fieldVarint) - } - - if val, contained := tagMap["opt"]; contained { - if val == "dynamic" { - f.set(fieldOptDynamic) - } else { - base := 10 - if strings.HasPrefix(val, "0x") { - val = val[2:] - base = 16 - } - opt, err := strconv.ParseUint(val, base, 32) - if err != nil { - panic(err) - } - f.optFlag = uint32(opt) - } - } - - if val, contained := tagMap["nopt"]; contained { - base := 10 - if strings.HasPrefix(val, "0x") { - val = val[2:] - base = 16 - } - nopt, err := strconv.ParseUint(val, base, 32) - if err != nil { - panic(err) - } - f.nOptFlag = uint32(nopt) - } - - if _, contained := tagMap["extend"]; contained { - f.set(fieldExtend) - } - - if _, contained := tagMap["dec"]; contained { - f.set(fieldDec) - } - - if _, contained := tagMap["hex"]; contained { - f.set(fieldHex) - } - - if _, contained := tagMap["iso639-2"]; contained { - f.set(fieldISO639_2) - } - - if _, contained := tagMap["uuid"]; contained { - f.set(fieldUUID) - } - - if _, contained := tagMap["hidden"]; contained { - f.set(fieldHidden) - } - - if val, contained := tagMap["const"]; contained { - f.cnst = val - } - - f.version = anyVersion - if val, contained := tagMap["ver"]; contained { - ver, err := strconv.Atoi(val) - if err != nil { - panic(err) - } - f.version = uint8(ver) - } - - f.nVersion = anyVersion - if val, contained := tagMap["nver"]; contained { - ver, err := strconv.Atoi(val) - if err != nil { - panic(err) - } - f.nVersion = uint8(ver) - } - - if val, contained := tagMap["size"]; contained { - if val == "dynamic" { - f.set(fieldSizeDynamic) - } else { - size, err := strconv.ParseUint(val, 10, 32) - if err != nil { - panic(err) - } - f.size = uint(size) - } - } - - f.length = LengthUnlimited - if val, contained := tagMap["len"]; contained { - if val == "dynamic" { - f.set(fieldLengthDynamic) - } else { - l, err := strconv.ParseUint(val, 10, 32) - if err != nil { - panic(err) - } - f.length = uint(l) - } - } - - return f -} - -func parseFieldTag(str string) map[string]string { - tag := make(map[string]string, 8) - - list := strings.Split(str, ",") - for _, e := range list { - kv := strings.SplitN(e, "=", 2) - if len(kv) == 2 { - tag[strings.Trim(kv[0], " ")] = strings.Trim(kv[1], " ") - } else { - tag[strings.Trim(kv[0], " ")] = "" - } - } - - return tag -} - -type fieldInstance struct { - field - cfo ICustomFieldObject -} - -func resolveFieldInstance(f *field, box IImmutableBox, parent reflect.Value, ctx Context) *fieldInstance { - fi := fieldInstance{ - field: *f, - } - - cfo, ok := parent.Addr().Interface().(ICustomFieldObject) - if ok { - fi.cfo = cfo - } else { - fi.cfo = box - } - - if fi.is(fieldSizeDynamic) { - fi.size = fi.cfo.GetFieldSize(f.name, ctx) - } - - if fi.is(fieldLengthDynamic) { - fi.length = fi.cfo.GetFieldLength(f.name, ctx) - } - - return &fi -} - -func isTargetField(box IImmutableBox, fi *fieldInstance, ctx Context) bool { - if box.GetVersion() != anyVersion { - if fi.version != anyVersion && box.GetVersion() != fi.version { - return false - } - - if fi.nVersion != anyVersion && box.GetVersion() == fi.nVersion { - return false - } - } - - if fi.optFlag != 0 && box.GetFlags()&fi.optFlag == 0 { - return false - } - - if fi.nOptFlag != 0 && box.GetFlags()&fi.nOptFlag != 0 { - return false - } - - if fi.is(fieldOptDynamic) && !fi.cfo.IsOptFieldEnabled(fi.name, ctx) { - return false - } - - return true -} diff --git a/vendor/github.com/abema/go-mp4/internal/bitio/bitio.go b/vendor/github.com/abema/go-mp4/internal/bitio/bitio.go deleted file mode 100644 index 404fd1b82..000000000 --- a/vendor/github.com/abema/go-mp4/internal/bitio/bitio.go +++ /dev/null @@ -1,8 +0,0 @@ -package bitio - -import "errors" - -var ( - ErrInvalidAlignment = errors.New("invalid alignment") - ErrDiscouragedReader = errors.New("discouraged reader implementation") -) diff --git a/vendor/github.com/abema/go-mp4/internal/bitio/read.go b/vendor/github.com/abema/go-mp4/internal/bitio/read.go deleted file mode 100644 index 4da76eae6..000000000 --- a/vendor/github.com/abema/go-mp4/internal/bitio/read.go +++ /dev/null @@ -1,97 +0,0 @@ -package bitio - -import "io" - -type Reader interface { - io.Reader - - // alignment: - // |-1-byte-block-|--------------|--------------|--------------| - // |<-offset->|<-------------------width---------------------->| - ReadBits(width uint) (data []byte, err error) - - ReadBit() (bit bool, err error) -} - -type ReadSeeker interface { - Reader - io.Seeker -} - -type reader struct { - reader io.Reader - octet byte - width uint -} - -func NewReader(r io.Reader) Reader { - return &reader{reader: r} -} - -func (r *reader) Read(p []byte) (n int, err error) { - if r.width != 0 { - return 0, ErrInvalidAlignment - } - return r.reader.Read(p) -} - -func (r *reader) ReadBits(size uint) ([]byte, error) { - bytes := (size + 7) / 8 - data := make([]byte, bytes) - offset := (bytes * 8) - (size) - - for i := uint(0); i < size; i++ { - bit, err := r.ReadBit() - if err != nil { - return nil, err - } - - byteIdx := (offset + i) / 8 - bitIdx := 7 - (offset+i)%8 - if bit { - data[byteIdx] |= 0x1 << bitIdx - } - } - - return data, nil -} - -func (r *reader) ReadBit() (bool, error) { - if r.width == 0 { - buf := make([]byte, 1) - if n, err := r.reader.Read(buf); err != nil { - return false, err - } else if n != 1 { - return false, ErrDiscouragedReader - } - r.octet = buf[0] - r.width = 8 - } - - r.width-- - return (r.octet>>r.width)&0x01 != 0, nil -} - -type readSeeker struct { - reader - seeker io.Seeker -} - -func NewReadSeeker(r io.ReadSeeker) ReadSeeker { - return &readSeeker{ - reader: reader{reader: r}, - seeker: r, - } -} - -func (r *readSeeker) Seek(offset int64, whence int) (int64, error) { - if whence == io.SeekCurrent && r.reader.width != 0 { - return 0, ErrInvalidAlignment - } - n, err := r.seeker.Seek(offset, whence) - if err != nil { - return n, err - } - r.reader.width = 0 - return n, nil -} diff --git a/vendor/github.com/abema/go-mp4/internal/bitio/write.go b/vendor/github.com/abema/go-mp4/internal/bitio/write.go deleted file mode 100644 index 5f63dd2d2..000000000 --- a/vendor/github.com/abema/go-mp4/internal/bitio/write.go +++ /dev/null @@ -1,61 +0,0 @@ -package bitio - -import ( - "io" -) - -type Writer interface { - io.Writer - - // alignment: - // |-1-byte-block-|--------------|--------------|--------------| - // |<-offset->|<-------------------width---------------------->| - WriteBits(data []byte, width uint) error - - WriteBit(bit bool) error -} - -type writer struct { - writer io.Writer - octet byte - width uint -} - -func NewWriter(w io.Writer) Writer { - return &writer{writer: w} -} - -func (w *writer) Write(p []byte) (n int, err error) { - if w.width != 0 { - return 0, ErrInvalidAlignment - } - return w.writer.Write(p) -} - -func (w *writer) WriteBits(data []byte, width uint) error { - length := uint(len(data)) * 8 - offset := length - width - for i := offset; i < length; i++ { - oi := i / 8 - if err := w.WriteBit((data[oi]>>(7-i%8))&0x01 != 0); err != nil { - return err - } - } - return nil -} - -func (w *writer) WriteBit(bit bool) error { - if bit { - w.octet |= 0x1 << (7 - w.width) - } - w.width++ - - if w.width == 8 { - if _, err := w.writer.Write([]byte{w.octet}); err != nil { - return err - } - w.octet = 0x00 - w.width = 0 - } - return nil -} diff --git a/vendor/github.com/abema/go-mp4/internal/util/io.go b/vendor/github.com/abema/go-mp4/internal/util/io.go deleted file mode 100644 index 1e4681186..000000000 --- a/vendor/github.com/abema/go-mp4/internal/util/io.go +++ /dev/null @@ -1,30 +0,0 @@ -package util - -import ( - "bytes" - "io" -) - -func ReadString(r io.Reader) (string, error) { - b := make([]byte, 1) - buf := bytes.NewBuffer(nil) - for { - if _, err := r.Read(b); err != nil { - return "", err - } - if b[0] == 0 { - return buf.String(), nil - } - buf.Write(b) - } -} - -func WriteString(w io.Writer, s string) error { - if _, err := w.Write([]byte(s)); err != nil { - return err - } - if _, err := w.Write([]byte{0}); err != nil { - return err - } - return nil -} diff --git a/vendor/github.com/abema/go-mp4/internal/util/string.go b/vendor/github.com/abema/go-mp4/internal/util/string.go deleted file mode 100644 index b38251bb3..000000000 --- a/vendor/github.com/abema/go-mp4/internal/util/string.go +++ /dev/null @@ -1,42 +0,0 @@ -package util - -import ( - "strconv" - "strings" - "unicode" -) - -func FormatSignedFixedFloat1616(val int32) string { - if val&0xffff == 0 { - return strconv.Itoa(int(val >> 16)) - } else { - return strconv.FormatFloat(float64(val)/(1<<16), 'f', 5, 64) - } -} - -func FormatUnsignedFixedFloat1616(val uint32) string { - if val&0xffff == 0 { - return strconv.Itoa(int(val >> 16)) - } else { - return strconv.FormatFloat(float64(val)/(1<<16), 'f', 5, 64) - } -} - -func FormatSignedFixedFloat88(val int16) string { - if val&0xff == 0 { - return strconv.Itoa(int(val >> 8)) - } else { - return strconv.FormatFloat(float64(val)/(1<<8), 'f', 3, 32) - } -} - -func EscapeUnprintable(r rune) rune { - if unicode.IsGraphic(r) { - return r - } - return rune('.') -} - -func EscapeUnprintables(src string) string { - return strings.Map(EscapeUnprintable, src) -} diff --git a/vendor/github.com/abema/go-mp4/marshaller.go b/vendor/github.com/abema/go-mp4/marshaller.go deleted file mode 100644 index ff6c64c32..000000000 --- a/vendor/github.com/abema/go-mp4/marshaller.go +++ /dev/null @@ -1,663 +0,0 @@ -package mp4 - -import ( - "bytes" - "errors" - "fmt" - "io" - "math" - "reflect" - - "github.com/abema/go-mp4/internal/bitio" -) - -const ( - anyVersion = math.MaxUint8 -) - -var ErrUnsupportedBoxVersion = errors.New("unsupported box version") - -func readerHasSize(reader bitio.ReadSeeker, size uint64) bool { - pre, err := reader.Seek(0, io.SeekCurrent) - if err != nil { - return false - } - - end, err := reader.Seek(0, io.SeekEnd) - if err != nil { - return false - } - - if uint64(end-pre) < size { - return false - } - - _, err = reader.Seek(pre, io.SeekStart) - if err != nil { - return false - } - - return true -} - -type marshaller struct { - writer bitio.Writer - wbits uint64 - src IImmutableBox - ctx Context -} - -func Marshal(w io.Writer, src IImmutableBox, ctx Context) (n uint64, err error) { - boxDef := src.GetType().getBoxDef(ctx) - if boxDef == nil { - return 0, ErrBoxInfoNotFound - } - - v := reflect.ValueOf(src).Elem() - - m := &marshaller{ - writer: bitio.NewWriter(w), - src: src, - ctx: ctx, - } - - if err := m.marshalStruct(v, boxDef.fields); err != nil { - return 0, err - } - - if m.wbits%8 != 0 { - return 0, fmt.Errorf("box size is not multiple of 8 bits: type=%s, bits=%d", src.GetType().String(), m.wbits) - } - - return m.wbits / 8, nil -} - -func (m *marshaller) marshal(v reflect.Value, fi *fieldInstance) error { - switch v.Type().Kind() { - case reflect.Ptr: - return m.marshalPtr(v, fi) - case reflect.Struct: - return m.marshalStruct(v, fi.children) - case reflect.Array: - return m.marshalArray(v, fi) - case reflect.Slice: - return m.marshalSlice(v, fi) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return m.marshalInt(v, fi) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return m.marshalUint(v, fi) - case reflect.Bool: - return m.marshalBool(v, fi) - case reflect.String: - return m.marshalString(v) - default: - return fmt.Errorf("unsupported type: %s", v.Type().Kind()) - } -} - -func (m *marshaller) marshalPtr(v reflect.Value, fi *fieldInstance) error { - return m.marshal(v.Elem(), fi) -} - -func (m *marshaller) marshalStruct(v reflect.Value, fs []*field) error { - for _, f := range fs { - fi := resolveFieldInstance(f, m.src, v, m.ctx) - - if !isTargetField(m.src, fi, m.ctx) { - continue - } - - wbits, override, err := fi.cfo.OnWriteField(f.name, m.writer, m.ctx) - if err != nil { - return err - } - m.wbits += wbits - if override { - continue - } - - err = m.marshal(v.FieldByName(f.name), fi) - if err != nil { - return err - } - } - - return nil -} - -func (m *marshaller) marshalArray(v reflect.Value, fi *fieldInstance) error { - size := v.Type().Size() - for i := 0; i < int(size)/int(v.Type().Elem().Size()); i++ { - var err error - err = m.marshal(v.Index(i), fi) - if err != nil { - return err - } - } - return nil -} - -func (m *marshaller) marshalSlice(v reflect.Value, fi *fieldInstance) error { - length := uint64(v.Len()) - if fi.length != LengthUnlimited { - if length < uint64(fi.length) { - return fmt.Errorf("the slice has too few elements: required=%d actual=%d", fi.length, length) - } - length = uint64(fi.length) - } - - elemType := v.Type().Elem() - if elemType.Kind() == reflect.Uint8 && fi.size == 8 && m.wbits%8 == 0 { - if _, err := io.CopyN(m.writer, bytes.NewBuffer(v.Bytes()), int64(length)); err != nil { - return err - } - m.wbits += length * 8 - return nil - } - - for i := 0; i < int(length); i++ { - m.marshal(v.Index(i), fi) - } - return nil -} - -func (m *marshaller) marshalInt(v reflect.Value, fi *fieldInstance) error { - signed := v.Int() - - if fi.is(fieldVarint) { - return errors.New("signed varint is unsupported") - } - - signBit := signed < 0 - val := uint64(signed) - for i := uint(0); i < fi.size; i += 8 { - v := val - size := uint(8) - if fi.size > i+8 { - v = v >> (fi.size - (i + 8)) - } else if fi.size < i+8 { - size = fi.size - i - } - - // set sign bit - if i == 0 { - if signBit { - v |= 0x1 << (size - 1) - } else { - v &= 0x1<<(size-1) - 1 - } - } - - if err := m.writer.WriteBits([]byte{byte(v)}, size); err != nil { - return err - } - m.wbits += uint64(size) - } - - return nil -} - -func (m *marshaller) marshalUint(v reflect.Value, fi *fieldInstance) error { - val := v.Uint() - - if fi.is(fieldVarint) { - m.writeUvarint(val) - return nil - } - - for i := uint(0); i < fi.size; i += 8 { - v := val - size := uint(8) - if fi.size > i+8 { - v = v >> (fi.size - (i + 8)) - } else if fi.size < i+8 { - size = fi.size - i - } - if err := m.writer.WriteBits([]byte{byte(v)}, size); err != nil { - return err - } - m.wbits += uint64(size) - } - - return nil -} - -func (m *marshaller) marshalBool(v reflect.Value, fi *fieldInstance) error { - var val byte - if v.Bool() { - val = 0xff - } else { - val = 0x00 - } - if err := m.writer.WriteBits([]byte{val}, fi.size); err != nil { - return err - } - m.wbits += uint64(fi.size) - return nil -} - -func (m *marshaller) marshalString(v reflect.Value) error { - data := []byte(v.String()) - for _, b := range data { - if err := m.writer.WriteBits([]byte{b}, 8); err != nil { - return err - } - m.wbits += 8 - } - // null character - if err := m.writer.WriteBits([]byte{0x00}, 8); err != nil { - return err - } - m.wbits += 8 - return nil -} - -func (m *marshaller) writeUvarint(u uint64) error { - for i := 21; i > 0; i -= 7 { - if err := m.writer.WriteBits([]byte{(byte(u >> uint(i))) | 0x80}, 8); err != nil { - return err - } - m.wbits += 8 - } - - if err := m.writer.WriteBits([]byte{byte(u) & 0x7f}, 8); err != nil { - return err - } - m.wbits += 8 - - return nil -} - -type unmarshaller struct { - reader bitio.ReadSeeker - dst IBox - size uint64 - rbits uint64 - ctx Context -} - -func UnmarshalAny(r io.ReadSeeker, boxType BoxType, payloadSize uint64, ctx Context) (box IBox, n uint64, err error) { - dst, err := boxType.New(ctx) - if err != nil { - return nil, 0, err - } - n, err = Unmarshal(r, payloadSize, dst, ctx) - return dst, n, err -} - -func Unmarshal(r io.ReadSeeker, payloadSize uint64, dst IBox, ctx Context) (n uint64, err error) { - boxDef := dst.GetType().getBoxDef(ctx) - if boxDef == nil { - return 0, ErrBoxInfoNotFound - } - - v := reflect.ValueOf(dst).Elem() - - dst.SetVersion(anyVersion) - - u := &unmarshaller{ - reader: bitio.NewReadSeeker(r), - dst: dst, - size: payloadSize, - ctx: ctx, - } - - if n, override, err := dst.BeforeUnmarshal(r, payloadSize, u.ctx); err != nil { - return 0, err - } else if override { - return n, nil - } else { - u.rbits = n * 8 - } - - sn, err := r.Seek(0, io.SeekCurrent) - if err != nil { - return 0, err - } - - if err := u.unmarshalStruct(v, boxDef.fields); err != nil { - if err == ErrUnsupportedBoxVersion { - r.Seek(sn, io.SeekStart) - } - return 0, err - } - - if u.rbits%8 != 0 { - return 0, fmt.Errorf("box size is not multiple of 8 bits: type=%s, size=%d, bits=%d", dst.GetType().String(), u.size, u.rbits) - } - - if u.rbits > u.size*8 { - return 0, fmt.Errorf("overrun error: type=%s, size=%d, bits=%d", dst.GetType().String(), u.size, u.rbits) - } - - return u.rbits / 8, nil -} - -func (u *unmarshaller) unmarshal(v reflect.Value, fi *fieldInstance) error { - var err error - switch v.Type().Kind() { - case reflect.Ptr: - err = u.unmarshalPtr(v, fi) - case reflect.Struct: - err = u.unmarshalStructInternal(v, fi) - case reflect.Array: - err = u.unmarshalArray(v, fi) - case reflect.Slice: - err = u.unmarshalSlice(v, fi) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - err = u.unmarshalInt(v, fi) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - err = u.unmarshalUint(v, fi) - case reflect.Bool: - err = u.unmarshalBool(v, fi) - case reflect.String: - err = u.unmarshalString(v, fi) - default: - return fmt.Errorf("unsupported type: %s", v.Type().Kind()) - } - return err -} - -func (u *unmarshaller) unmarshalPtr(v reflect.Value, fi *fieldInstance) error { - v.Set(reflect.New(v.Type().Elem())) - return u.unmarshal(v.Elem(), fi) -} - -func (u *unmarshaller) unmarshalStructInternal(v reflect.Value, fi *fieldInstance) error { - if fi.size != 0 && fi.size%8 == 0 { - u2 := *u - u2.size = uint64(fi.size / 8) - u2.rbits = 0 - if err := u2.unmarshalStruct(v, fi.children); err != nil { - return err - } - u.rbits += u2.rbits - if u2.rbits != uint64(fi.size) { - return errors.New("invalid alignment") - } - return nil - } - - return u.unmarshalStruct(v, fi.children) -} - -func (u *unmarshaller) unmarshalStruct(v reflect.Value, fs []*field) error { - for _, f := range fs { - fi := resolveFieldInstance(f, u.dst, v, u.ctx) - - if !isTargetField(u.dst, fi, u.ctx) { - continue - } - - rbits, override, err := fi.cfo.OnReadField(f.name, u.reader, u.size*8-u.rbits, u.ctx) - if err != nil { - return err - } - u.rbits += rbits - if override { - continue - } - - err = u.unmarshal(v.FieldByName(f.name), fi) - if err != nil { - return err - } - - if v.FieldByName(f.name).Type() == reflect.TypeOf(FullBox{}) && !u.dst.GetType().IsSupportedVersion(u.dst.GetVersion(), u.ctx) { - return ErrUnsupportedBoxVersion - } - } - - return nil -} - -func (u *unmarshaller) unmarshalArray(v reflect.Value, fi *fieldInstance) error { - size := v.Type().Size() - for i := 0; i < int(size)/int(v.Type().Elem().Size()); i++ { - var err error - err = u.unmarshal(v.Index(i), fi) - if err != nil { - return err - } - } - return nil -} - -func (u *unmarshaller) unmarshalSlice(v reflect.Value, fi *fieldInstance) error { - var slice reflect.Value - elemType := v.Type().Elem() - - length := uint64(fi.length) - if fi.length == LengthUnlimited { - if fi.size != 0 { - left := (u.size)*8 - u.rbits - if left%uint64(fi.size) != 0 { - return errors.New("invalid alignment") - } - length = left / uint64(fi.size) - } else { - length = 0 - } - } - - if u.rbits%8 == 0 && elemType.Kind() == reflect.Uint8 && fi.size == 8 { - totalSize := length * uint64(fi.size) / 8 - - if !readerHasSize(u.reader, totalSize) { - return fmt.Errorf("not enough bits") - } - - buf := bytes.NewBuffer(make([]byte, 0, totalSize)) - if _, err := io.CopyN(buf, u.reader, int64(totalSize)); err != nil { - return err - } - slice = reflect.ValueOf(buf.Bytes()) - u.rbits += uint64(totalSize) * 8 - - } else { - slice = reflect.MakeSlice(v.Type(), 0, 0) - for i := 0; ; i++ { - if fi.length != LengthUnlimited && uint(i) >= fi.length { - break - } - if fi.length == LengthUnlimited && u.rbits >= u.size*8 { - break - } - slice = reflect.Append(slice, reflect.Zero(elemType)) - if err := u.unmarshal(slice.Index(i), fi); err != nil { - return err - } - if u.rbits > u.size*8 { - return fmt.Errorf("failed to read array completely: fieldName=\"%s\"", fi.name) - } - } - } - - v.Set(slice) - return nil -} - -func (u *unmarshaller) unmarshalInt(v reflect.Value, fi *fieldInstance) error { - if fi.is(fieldVarint) { - return errors.New("signed varint is unsupported") - } - - if fi.size == 0 { - return fmt.Errorf("size must not be zero: %s", fi.name) - } - - data, err := u.reader.ReadBits(fi.size) - if err != nil { - return err - } - u.rbits += uint64(fi.size) - - signBit := false - if len(data) > 0 { - signMask := byte(0x01) << ((fi.size - 1) % 8) - signBit = data[0]&signMask != 0 - if signBit { - data[0] |= ^(signMask - 1) - } - } - - var val uint64 - if signBit { - val = ^uint64(0) - } - for i := range data { - val <<= 8 - val |= uint64(data[i]) - } - v.SetInt(int64(val)) - return nil -} - -func (u *unmarshaller) unmarshalUint(v reflect.Value, fi *fieldInstance) error { - if fi.is(fieldVarint) { - val, err := u.readUvarint() - if err != nil { - return err - } - v.SetUint(val) - return nil - } - - if fi.size == 0 { - return fmt.Errorf("size must not be zero: %s", fi.name) - } - - data, err := u.reader.ReadBits(fi.size) - if err != nil { - return err - } - u.rbits += uint64(fi.size) - - val := uint64(0) - for i := range data { - val <<= 8 - val |= uint64(data[i]) - } - v.SetUint(val) - - return nil -} - -func (u *unmarshaller) unmarshalBool(v reflect.Value, fi *fieldInstance) error { - if fi.size == 0 { - return fmt.Errorf("size must not be zero: %s", fi.name) - } - - data, err := u.reader.ReadBits(fi.size) - if err != nil { - return err - } - u.rbits += uint64(fi.size) - - val := false - for _, b := range data { - val = val || (b != byte(0)) - } - v.SetBool(val) - - return nil -} - -func (u *unmarshaller) unmarshalString(v reflect.Value, fi *fieldInstance) error { - switch fi.strType { - case stringType_C: - return u.unmarshalStringC(v) - case stringType_C_P: - return u.unmarshalStringCP(v, fi) - default: - return fmt.Errorf("unknown string type: %d", fi.strType) - } -} - -func (u *unmarshaller) unmarshalStringC(v reflect.Value) error { - data := make([]byte, 0, 16) - for { - if u.rbits >= u.size*8 { - break - } - - c, err := u.reader.ReadBits(8) - if err != nil { - return err - } - u.rbits += 8 - - if c[0] == 0 { - break // null character - } - - data = append(data, c[0]) - } - v.SetString(string(data)) - - return nil -} - -func (u *unmarshaller) unmarshalStringCP(v reflect.Value, fi *fieldInstance) error { - if ok, err := u.tryReadPString(v, fi); err != nil { - return err - } else if ok { - return nil - } - return u.unmarshalStringC(v) -} - -func (u *unmarshaller) tryReadPString(v reflect.Value, fi *fieldInstance) (ok bool, err error) { - remainingSize := (u.size*8 - u.rbits) / 8 - if remainingSize < 2 { - return false, nil - } - - offset, err := u.reader.Seek(0, io.SeekCurrent) - if err != nil { - return false, err - } - defer func() { - if err == nil && !ok { - _, err = u.reader.Seek(offset, io.SeekStart) - } - }() - - buf0 := make([]byte, 1) - if _, err := io.ReadFull(u.reader, buf0); err != nil { - return false, err - } - remainingSize-- - plen := buf0[0] - if uint64(plen) > remainingSize { - return false, nil - } - buf := make([]byte, int(plen)) - if _, err := io.ReadFull(u.reader, buf); err != nil { - return false, err - } - remainingSize -= uint64(plen) - if fi.cfo.IsPString(fi.name, buf, remainingSize, u.ctx) { - u.rbits += uint64(len(buf)+1) * 8 - v.SetString(string(buf)) - return true, nil - } - return false, nil -} - -func (u *unmarshaller) readUvarint() (uint64, error) { - var val uint64 - for { - octet, err := u.reader.ReadBits(8) - if err != nil { - return 0, err - } - u.rbits += 8 - - val = (val << 7) + uint64(octet[0]&0x7f) - - if octet[0]&0x80 == 0 { - return val, nil - } - } -} diff --git a/vendor/github.com/abema/go-mp4/mp4.go b/vendor/github.com/abema/go-mp4/mp4.go deleted file mode 100644 index 2fab24da7..000000000 --- a/vendor/github.com/abema/go-mp4/mp4.go +++ /dev/null @@ -1,171 +0,0 @@ -package mp4 - -import ( - "encoding/binary" - "errors" - "fmt" - "reflect" - "strings" -) - -var ErrBoxInfoNotFound = errors.New("box info not found") - -// BoxType is mpeg box type -type BoxType [4]byte - -func StrToBoxType(code string) BoxType { - if len(code) != 4 { - panic(fmt.Errorf("invalid box type id length: [%s]", code)) - } - return BoxType{code[0], code[1], code[2], code[3]} -} - -// Uint32ToBoxType returns a new BoxType from the provied uint32 -func Uint32ToBoxType(i uint32) BoxType { - b := make([]byte, 4) - binary.BigEndian.PutUint32(b, i) - return BoxType{b[0], b[1], b[2], b[3]} -} - -func (boxType BoxType) String() string { - if isPrintable(boxType[0]) && isPrintable(boxType[1]) && isPrintable(boxType[2]) && isPrintable(boxType[3]) { - s := string([]byte{boxType[0], boxType[1], boxType[2], boxType[3]}) - s = strings.ReplaceAll(s, string([]byte{0xa9}), "(c)") - return s - } - return fmt.Sprintf("0x%02x%02x%02x%02x", boxType[0], boxType[1], boxType[2], boxType[3]) -} - -func isASCII(c byte) bool { - return c >= 0x20 && c <= 0x7e -} - -func isPrintable(c byte) bool { - return isASCII(c) || c == 0xa9 -} - -func (lhs BoxType) MatchWith(rhs BoxType) bool { - if lhs == boxTypeAny || rhs == boxTypeAny { - return true - } - return lhs == rhs -} - -var boxTypeAny = BoxType{0x00, 0x00, 0x00, 0x00} - -func BoxTypeAny() BoxType { - return boxTypeAny -} - -type boxDef struct { - dataType reflect.Type - versions []uint8 - isTarget func(Context) bool - fields []*field -} - -var boxMap = make(map[BoxType][]boxDef, 64) - -func AddBoxDef(payload IBox, versions ...uint8) { - boxMap[payload.GetType()] = append(boxMap[payload.GetType()], boxDef{ - dataType: reflect.TypeOf(payload).Elem(), - versions: versions, - fields: buildFields(payload), - }) -} - -func AddBoxDefEx(payload IBox, isTarget func(Context) bool, versions ...uint8) { - boxMap[payload.GetType()] = append(boxMap[payload.GetType()], boxDef{ - dataType: reflect.TypeOf(payload).Elem(), - versions: versions, - isTarget: isTarget, - fields: buildFields(payload), - }) -} - -func AddAnyTypeBoxDef(payload IAnyType, boxType BoxType, versions ...uint8) { - boxMap[boxType] = append(boxMap[boxType], boxDef{ - dataType: reflect.TypeOf(payload).Elem(), - versions: versions, - fields: buildFields(payload), - }) -} - -func AddAnyTypeBoxDefEx(payload IAnyType, boxType BoxType, isTarget func(Context) bool, versions ...uint8) { - boxMap[boxType] = append(boxMap[boxType], boxDef{ - dataType: reflect.TypeOf(payload).Elem(), - versions: versions, - isTarget: isTarget, - fields: buildFields(payload), - }) -} - -var itemBoxFields = buildFields(&Item{}) - -func (boxType BoxType) getBoxDef(ctx Context) *boxDef { - boxDefs := boxMap[boxType] - for i := len(boxDefs) - 1; i >= 0; i-- { - boxDef := &boxDefs[i] - if boxDef.isTarget == nil || boxDef.isTarget(ctx) { - return boxDef - } - } - if ctx.UnderIlst { - typeID := int(binary.BigEndian.Uint32(boxType[:])) - if typeID >= 1 && typeID <= ctx.QuickTimeKeysMetaEntryCount { - return &boxDef{ - dataType: reflect.TypeOf(Item{}), - isTarget: isIlstMetaContainer, - fields: itemBoxFields, - } - } - } - return nil -} - -func (boxType BoxType) IsSupported(ctx Context) bool { - return boxType.getBoxDef(ctx) != nil -} - -func (boxType BoxType) New(ctx Context) (IBox, error) { - boxDef := boxType.getBoxDef(ctx) - if boxDef == nil { - return nil, ErrBoxInfoNotFound - } - - box, ok := reflect.New(boxDef.dataType).Interface().(IBox) - if !ok { - return nil, fmt.Errorf("box type not implements IBox interface: %s", boxType.String()) - } - - anyTypeBox, ok := box.(IAnyType) - if ok { - anyTypeBox.SetType(boxType) - } - - return box, nil -} - -func (boxType BoxType) GetSupportedVersions(ctx Context) ([]uint8, error) { - boxDef := boxType.getBoxDef(ctx) - if boxDef == nil { - return nil, ErrBoxInfoNotFound - } - return boxDef.versions, nil -} - -func (boxType BoxType) IsSupportedVersion(ver uint8, ctx Context) bool { - boxDef := boxType.getBoxDef(ctx) - if boxDef == nil { - return false - } - if len(boxDef.versions) == 0 { - return true - } - for _, sver := range boxDef.versions { - if ver == sver { - return true - } - } - return false -} diff --git a/vendor/github.com/abema/go-mp4/probe.go b/vendor/github.com/abema/go-mp4/probe.go deleted file mode 100644 index 2ffaf7aca..000000000 --- a/vendor/github.com/abema/go-mp4/probe.go +++ /dev/null @@ -1,684 +0,0 @@ -package mp4 - -import ( - "bytes" - "errors" - "io" - - "github.com/abema/go-mp4/internal/bitio" -) - -type ProbeInfo struct { - MajorBrand [4]byte - MinorVersion uint32 - CompatibleBrands [][4]byte - FastStart bool - Timescale uint32 - Duration uint64 - Tracks Tracks - Segments Segments -} - -// Deprecated: replace with ProbeInfo -type FraProbeInfo = ProbeInfo - -type Tracks []*Track - -// Deprecated: replace with Track -type TrackInfo = Track - -type Track struct { - TrackID uint32 - Timescale uint32 - Duration uint64 - Codec Codec - Encrypted bool - EditList EditList - Samples Samples - Chunks Chunks - AVC *AVCDecConfigInfo - MP4A *MP4AInfo -} - -type Codec int - -const ( - CodecUnknown Codec = iota - CodecAVC1 - CodecMP4A -) - -type EditList []*EditListEntry - -type EditListEntry struct { - MediaTime int64 - SegmentDuration uint64 -} - -type Samples []*Sample - -type Sample struct { - Size uint32 - TimeDelta uint32 - CompositionTimeOffset int64 -} - -type Chunks []*Chunk - -type Chunk struct { - DataOffset uint64 - SamplesPerChunk uint32 -} - -type AVCDecConfigInfo struct { - ConfigurationVersion uint8 - Profile uint8 - ProfileCompatibility uint8 - Level uint8 - LengthSize uint16 - Width uint16 - Height uint16 -} - -type MP4AInfo struct { - OTI uint8 - AudOTI uint8 - ChannelCount uint16 -} - -type Segments []*Segment - -// Deprecated: replace with Segment -type SegmentInfo = Segment - -type Segment struct { - TrackID uint32 - MoofOffset uint64 - BaseMediaDecodeTime uint64 - DefaultSampleDuration uint32 - SampleCount uint32 - Duration uint32 - CompositionTimeOffset int32 - Size uint32 -} - -// Probe probes MP4 file -func Probe(r io.ReadSeeker) (*ProbeInfo, error) { - probeInfo := &ProbeInfo{ - Tracks: make([]*Track, 0, 8), - Segments: make([]*Segment, 0, 8), - } - bis, err := ExtractBoxes(r, nil, []BoxPath{ - {BoxTypeFtyp()}, - {BoxTypeMoov()}, - {BoxTypeMoov(), BoxTypeMvhd()}, - {BoxTypeMoov(), BoxTypeTrak()}, - {BoxTypeMoof()}, - {BoxTypeMdat()}, - }) - if err != nil { - return nil, err - } - var mdatAppeared bool - for _, bi := range bis { - switch bi.Type { - case BoxTypeFtyp(): - var ftyp Ftyp - if _, err := bi.SeekToPayload(r); err != nil { - return nil, err - } - if _, err := Unmarshal(r, bi.Size-bi.HeaderSize, &ftyp, bi.Context); err != nil { - return nil, err - } - probeInfo.MajorBrand = ftyp.MajorBrand - probeInfo.MinorVersion = ftyp.MinorVersion - probeInfo.CompatibleBrands = make([][4]byte, 0, len(ftyp.CompatibleBrands)) - for _, entry := range ftyp.CompatibleBrands { - probeInfo.CompatibleBrands = append(probeInfo.CompatibleBrands, entry.CompatibleBrand) - } - case BoxTypeMoov(): - probeInfo.FastStart = !mdatAppeared - case BoxTypeMvhd(): - var mvhd Mvhd - if _, err := bi.SeekToPayload(r); err != nil { - return nil, err - } - if _, err := Unmarshal(r, bi.Size-bi.HeaderSize, &mvhd, bi.Context); err != nil { - return nil, err - } - probeInfo.Timescale = mvhd.Timescale - if mvhd.GetVersion() == 0 { - probeInfo.Duration = uint64(mvhd.DurationV0) - } else { - probeInfo.Duration = mvhd.DurationV1 - } - case BoxTypeTrak(): - track, err := probeTrak(r, bi) - if err != nil { - return nil, err - } - probeInfo.Tracks = append(probeInfo.Tracks, track) - case BoxTypeMoof(): - segment, err := probeMoof(r, bi) - if err != nil { - return nil, err - } - probeInfo.Segments = append(probeInfo.Segments, segment) - case BoxTypeMdat(): - mdatAppeared = true - } - } - return probeInfo, nil -} - -// ProbeFra probes fragmented MP4 file -// Deprecated: replace with Probe -func ProbeFra(r io.ReadSeeker) (*FraProbeInfo, error) { - probeInfo, err := Probe(r) - return (*FraProbeInfo)(probeInfo), err -} - -func probeTrak(r io.ReadSeeker, bi *BoxInfo) (*Track, error) { - track := new(Track) - - bips, err := ExtractBoxesWithPayload(r, bi, []BoxPath{ - {BoxTypeTkhd()}, - {BoxTypeEdts(), BoxTypeElst()}, - {BoxTypeMdia(), BoxTypeMdhd()}, - {BoxTypeMdia(), BoxTypeMinf(), BoxTypeStbl(), BoxTypeStsd(), BoxTypeAvc1()}, - {BoxTypeMdia(), BoxTypeMinf(), BoxTypeStbl(), BoxTypeStsd(), BoxTypeAvc1(), BoxTypeAvcC()}, - {BoxTypeMdia(), BoxTypeMinf(), BoxTypeStbl(), BoxTypeStsd(), BoxTypeEncv()}, - {BoxTypeMdia(), BoxTypeMinf(), BoxTypeStbl(), BoxTypeStsd(), BoxTypeEncv(), BoxTypeAvcC()}, - {BoxTypeMdia(), BoxTypeMinf(), BoxTypeStbl(), BoxTypeStsd(), BoxTypeMp4a()}, - {BoxTypeMdia(), BoxTypeMinf(), BoxTypeStbl(), BoxTypeStsd(), BoxTypeMp4a(), BoxTypeEsds()}, - {BoxTypeMdia(), BoxTypeMinf(), BoxTypeStbl(), BoxTypeStsd(), BoxTypeMp4a(), BoxTypeWave(), BoxTypeEsds()}, - {BoxTypeMdia(), BoxTypeMinf(), BoxTypeStbl(), BoxTypeStsd(), BoxTypeEnca()}, - {BoxTypeMdia(), BoxTypeMinf(), BoxTypeStbl(), BoxTypeStsd(), BoxTypeEnca(), BoxTypeEsds()}, - {BoxTypeMdia(), BoxTypeMinf(), BoxTypeStbl(), BoxTypeStco()}, - {BoxTypeMdia(), BoxTypeMinf(), BoxTypeStbl(), BoxTypeCo64()}, - {BoxTypeMdia(), BoxTypeMinf(), BoxTypeStbl(), BoxTypeStts()}, - {BoxTypeMdia(), BoxTypeMinf(), BoxTypeStbl(), BoxTypeCtts()}, - {BoxTypeMdia(), BoxTypeMinf(), BoxTypeStbl(), BoxTypeStsc()}, - {BoxTypeMdia(), BoxTypeMinf(), BoxTypeStbl(), BoxTypeStsz()}, - }) - if err != nil { - return nil, err - } - var tkhd *Tkhd - var elst *Elst - var mdhd *Mdhd - var avc1 *VisualSampleEntry - var avcC *AVCDecoderConfiguration - var audioSampleEntry *AudioSampleEntry - var esds *Esds - var stco *Stco - var stts *Stts - var stsc *Stsc - var ctts *Ctts - var stsz *Stsz - var co64 *Co64 - for _, bip := range bips { - switch bip.Info.Type { - case BoxTypeTkhd(): - tkhd = bip.Payload.(*Tkhd) - case BoxTypeElst(): - elst = bip.Payload.(*Elst) - case BoxTypeMdhd(): - mdhd = bip.Payload.(*Mdhd) - case BoxTypeAvc1(): - track.Codec = CodecAVC1 - avc1 = bip.Payload.(*VisualSampleEntry) - case BoxTypeAvcC(): - avcC = bip.Payload.(*AVCDecoderConfiguration) - case BoxTypeEncv(): - track.Codec = CodecAVC1 - track.Encrypted = true - case BoxTypeMp4a(): - track.Codec = CodecMP4A - audioSampleEntry = bip.Payload.(*AudioSampleEntry) - case BoxTypeEnca(): - track.Codec = CodecMP4A - track.Encrypted = true - audioSampleEntry = bip.Payload.(*AudioSampleEntry) - case BoxTypeEsds(): - esds = bip.Payload.(*Esds) - case BoxTypeStco(): - stco = bip.Payload.(*Stco) - case BoxTypeStts(): - stts = bip.Payload.(*Stts) - case BoxTypeStsc(): - stsc = bip.Payload.(*Stsc) - case BoxTypeCtts(): - ctts = bip.Payload.(*Ctts) - case BoxTypeStsz(): - stsz = bip.Payload.(*Stsz) - case BoxTypeCo64(): - co64 = bip.Payload.(*Co64) - } - } - - if tkhd == nil { - return nil, errors.New("tkhd box not found") - } - track.TrackID = tkhd.TrackID - - if elst != nil { - editList := make([]*EditListEntry, 0, len(elst.Entries)) - for i := range elst.Entries { - editList = append(editList, &EditListEntry{ - MediaTime: elst.GetMediaTime(i), - SegmentDuration: elst.GetSegmentDuration(i), - }) - } - track.EditList = editList - } - - if mdhd == nil { - return nil, errors.New("mdhd box not found") - } - track.Timescale = mdhd.Timescale - track.Duration = mdhd.GetDuration() - - if avc1 != nil && avcC != nil { - track.AVC = &AVCDecConfigInfo{ - ConfigurationVersion: avcC.ConfigurationVersion, - Profile: avcC.Profile, - ProfileCompatibility: avcC.ProfileCompatibility, - Level: avcC.Level, - LengthSize: uint16(avcC.LengthSizeMinusOne) + 1, - Width: avc1.Width, - Height: avc1.Height, - } - } - - if audioSampleEntry != nil && esds != nil { - oti, audOTI, err := detectAACProfile(esds) - if err != nil { - return nil, err - } - track.MP4A = &MP4AInfo{ - OTI: oti, - AudOTI: audOTI, - ChannelCount: audioSampleEntry.ChannelCount, - } - } - - track.Chunks = make([]*Chunk, 0) - if stco != nil { - for _, offset := range stco.ChunkOffset { - track.Chunks = append(track.Chunks, &Chunk{ - DataOffset: uint64(offset), - }) - } - } else if co64 != nil { - for _, offset := range co64.ChunkOffset { - track.Chunks = append(track.Chunks, &Chunk{ - DataOffset: offset, - }) - } - } else { - return nil, errors.New("stco/co64 box not found") - } - - if stts == nil { - return nil, errors.New("stts box not found") - } - track.Samples = make([]*Sample, 0) - for _, entry := range stts.Entries { - for i := uint32(0); i < entry.SampleCount; i++ { - track.Samples = append(track.Samples, &Sample{ - TimeDelta: entry.SampleDelta, - }) - } - } - - if stsc == nil { - return nil, errors.New("stsc box not found") - } - for si, entry := range stsc.Entries { - end := uint32(len(track.Chunks)) - if si != len(stsc.Entries)-1 && stsc.Entries[si+1].FirstChunk-1 < end { - end = stsc.Entries[si+1].FirstChunk - 1 - } - for ci := entry.FirstChunk - 1; ci < end; ci++ { - track.Chunks[ci].SamplesPerChunk = entry.SamplesPerChunk - } - } - - if ctts != nil { - var si uint32 - for ci, entry := range ctts.Entries { - for i := uint32(0); i < entry.SampleCount; i++ { - if si >= uint32(len(track.Samples)) { - break - } - track.Samples[si].CompositionTimeOffset = ctts.GetSampleOffset(ci) - si++ - } - } - } - - if stsz != nil { - for i := 0; i < len(stsz.EntrySize) && i < len(track.Samples); i++ { - track.Samples[i].Size = stsz.EntrySize[i] - } - } - - return track, nil -} - -func detectAACProfile(esds *Esds) (oti, audOTI uint8, err error) { - configDscr := findDescriptorByTag(esds.Descriptors, DecoderConfigDescrTag) - if configDscr == nil || configDscr.DecoderConfigDescriptor == nil { - return 0, 0, nil - } - if configDscr.DecoderConfigDescriptor.ObjectTypeIndication != 0x40 { - return configDscr.DecoderConfigDescriptor.ObjectTypeIndication, 0, nil - } - - specificDscr := findDescriptorByTag(esds.Descriptors, DecSpecificInfoTag) - if specificDscr == nil { - return 0, 0, errors.New("DecoderSpecificationInfoDescriptor not found") - } - - r := bitio.NewReader(bytes.NewReader(specificDscr.Data)) - remaining := len(specificDscr.Data) * 8 - - // audio object type - audioObjectType, read, err := getAudioObjectType(r) - if err != nil { - return 0, 0, err - } - remaining -= read - - // sampling frequency index - samplingFrequencyIndex, err := r.ReadBits(4) - if err != nil { - return 0, 0, err - } - remaining -= 4 - if samplingFrequencyIndex[0] == 0x0f { - if _, err = r.ReadBits(24); err != nil { - return 0, 0, err - } - remaining -= 24 - } - - if audioObjectType == 2 && remaining >= 20 { - if _, err = r.ReadBits(4); err != nil { - return 0, 0, err - } - remaining -= 4 - syncExtensionType, err := r.ReadBits(11) - if err != nil { - return 0, 0, err - } - remaining -= 11 - if syncExtensionType[0] == 0x2 && syncExtensionType[1] == 0xb7 { - extAudioObjectType, _, err := getAudioObjectType(r) - if err != nil { - return 0, 0, err - } - if extAudioObjectType == 5 || extAudioObjectType == 22 { - sbr, err := r.ReadBits(1) - if err != nil { - return 0, 0, err - } - remaining-- - if sbr[0] != 0 { - if extAudioObjectType == 5 { - sfi, err := r.ReadBits(4) - if err != nil { - return 0, 0, err - } - remaining -= 4 - if sfi[0] == 0xf { - if _, err := r.ReadBits(24); err != nil { - return 0, 0, err - } - remaining -= 24 - } - if remaining >= 12 { - syncExtensionType, err := r.ReadBits(11) - if err != nil { - return 0, 0, err - } - if syncExtensionType[0] == 0x5 && syncExtensionType[1] == 0x48 { - ps, err := r.ReadBits(1) - if err != nil { - return 0, 0, err - } - if ps[0] != 0 { - return 0x40, 29, nil - } - } - } - } - return 0x40, 5, nil - } - } - } - } - return 0x40, audioObjectType, nil -} - -func findDescriptorByTag(dscrs []Descriptor, tag int8) *Descriptor { - for _, dscr := range dscrs { - if dscr.Tag == tag { - return &dscr - } - } - return nil -} - -func getAudioObjectType(r bitio.Reader) (byte, int, error) { - audioObjectType, err := r.ReadBits(5) - if err != nil { - return 0, 0, err - } - if audioObjectType[0] != 0x1f { - return audioObjectType[0], 5, nil - } - audioObjectType, err = r.ReadBits(6) - if err != nil { - return 0, 0, err - } - return audioObjectType[0] + 32, 11, nil -} - -func probeMoof(r io.ReadSeeker, bi *BoxInfo) (*Segment, error) { - bips, err := ExtractBoxesWithPayload(r, bi, []BoxPath{ - {BoxTypeTraf(), BoxTypeTfhd()}, - {BoxTypeTraf(), BoxTypeTfdt()}, - {BoxTypeTraf(), BoxTypeTrun()}, - }) - if err != nil { - return nil, err - } - - var tfhd *Tfhd - var tfdt *Tfdt - var trun *Trun - - segment := &Segment{ - MoofOffset: bi.Offset, - } - for _, bip := range bips { - switch bip.Info.Type { - case BoxTypeTfhd(): - tfhd = bip.Payload.(*Tfhd) - case BoxTypeTfdt(): - tfdt = bip.Payload.(*Tfdt) - case BoxTypeTrun(): - trun = bip.Payload.(*Trun) - } - } - - if tfhd == nil { - return nil, errors.New("tfhd not found") - } - segment.TrackID = tfhd.TrackID - segment.DefaultSampleDuration = tfhd.DefaultSampleDuration - - if tfdt != nil { - if tfdt.Version == 0 { - segment.BaseMediaDecodeTime = uint64(tfdt.BaseMediaDecodeTimeV0) - } else { - segment.BaseMediaDecodeTime = tfdt.BaseMediaDecodeTimeV1 - } - } - - if trun != nil { - segment.SampleCount = trun.SampleCount - - if trun.CheckFlag(0x000100) { - segment.Duration = 0 - for ei := range trun.Entries { - segment.Duration += trun.Entries[ei].SampleDuration - } - } else { - segment.Duration = tfhd.DefaultSampleDuration * segment.SampleCount - } - - if trun.CheckFlag(0x000200) { - segment.Size = 0 - for ei := range trun.Entries { - segment.Size += trun.Entries[ei].SampleSize - } - } else { - segment.Size = tfhd.DefaultSampleSize * segment.SampleCount - } - - var duration uint32 - for ei := range trun.Entries { - offset := int32(duration) + int32(trun.GetSampleCompositionTimeOffset(ei)) - if ei == 0 || offset < segment.CompositionTimeOffset { - segment.CompositionTimeOffset = offset - } - if trun.CheckFlag(0x000100) { - duration += trun.Entries[ei].SampleDuration - } else { - duration += tfhd.DefaultSampleDuration - } - } - } - - return segment, nil -} - -func FindIDRFrames(r io.ReadSeeker, trackInfo *TrackInfo) ([]int, error) { - if trackInfo.AVC == nil { - return nil, nil - } - lengthSize := uint32(trackInfo.AVC.LengthSize) - - var si int - idxs := make([]int, 0, 8) - for _, chunk := range trackInfo.Chunks { - end := si + int(chunk.SamplesPerChunk) - dataOffset := chunk.DataOffset - for ; si < end && si < len(trackInfo.Samples); si++ { - sample := trackInfo.Samples[si] - if sample.Size == 0 { - continue - } - for nalOffset := uint32(0); nalOffset+lengthSize+1 <= sample.Size; { - if _, err := r.Seek(int64(dataOffset+uint64(nalOffset)), io.SeekStart); err != nil { - return nil, err - } - data := make([]byte, lengthSize+1) - if _, err := io.ReadFull(r, data); err != nil { - return nil, err - } - var length uint32 - for i := 0; i < int(lengthSize); i++ { - length = (length << 8) + uint32(data[i]) - } - nalHeader := data[lengthSize] - nalType := nalHeader & 0x1f - if nalType == 5 { - idxs = append(idxs, si) - break - } - nalOffset += lengthSize + length - } - dataOffset += uint64(sample.Size) - } - } - return idxs, nil -} - -func (samples Samples) GetBitrate(timescale uint32) uint64 { - var totalSize uint64 - var totalDuration uint64 - for _, sample := range samples { - totalSize += uint64(sample.Size) - totalDuration += uint64(sample.TimeDelta) - } - if totalDuration == 0 { - return 0 - } - return 8 * totalSize * uint64(timescale) / totalDuration -} - -func (samples Samples) GetMaxBitrate(timescale uint32, timeDelta uint64) uint64 { - if timeDelta == 0 { - return 0 - } - var maxBitrate uint64 - var size uint64 - var duration uint64 - var begin int - var end int - for end < len(samples) { - for { - size += uint64(samples[end].Size) - duration += uint64(samples[end].TimeDelta) - end++ - if duration >= timeDelta || end == len(samples) { - break - } - } - bitrate := 8 * size * uint64(timescale) / duration - if bitrate > maxBitrate { - maxBitrate = bitrate - } - for { - size -= uint64(samples[begin].Size) - duration -= uint64(samples[begin].TimeDelta) - begin++ - if duration < timeDelta { - break - } - } - } - return maxBitrate -} - -func (segments Segments) GetBitrate(trackID uint32, timescale uint32) uint64 { - var totalSize uint64 - var totalDuration uint64 - for _, segment := range segments { - if segment.TrackID == trackID { - totalSize += uint64(segment.Size) - totalDuration += uint64(segment.Duration) - } - } - if totalDuration == 0 { - return 0 - } - return 8 * totalSize * uint64(timescale) / totalDuration -} - -func (segments Segments) GetMaxBitrate(trackID uint32, timescale uint32) uint64 { - var maxBitrate uint64 - for _, segment := range segments { - if segment.TrackID == trackID && segment.Duration != 0 { - bitrate := 8 * uint64(segment.Size) * uint64(timescale) / uint64(segment.Duration) - if bitrate > maxBitrate { - maxBitrate = bitrate - } - } - } - return maxBitrate -} diff --git a/vendor/github.com/abema/go-mp4/read.go b/vendor/github.com/abema/go-mp4/read.go deleted file mode 100644 index 7118d802a..000000000 --- a/vendor/github.com/abema/go-mp4/read.go +++ /dev/null @@ -1,199 +0,0 @@ -package mp4 - -import ( - "errors" - "fmt" - "io" -) - -type BoxPath []BoxType - -func (lhs BoxPath) compareWith(rhs BoxPath) (forwardMatch bool, match bool) { - if len(lhs) > len(rhs) { - return false, false - } - for i := 0; i < len(lhs); i++ { - if !lhs[i].MatchWith(rhs[i]) { - return false, false - } - } - if len(lhs) < len(rhs) { - return true, false - } - return false, true -} - -type ReadHandle struct { - Params []interface{} - BoxInfo BoxInfo - Path BoxPath - ReadPayload func() (box IBox, n uint64, err error) - ReadData func(io.Writer) (n uint64, err error) - Expand func(params ...interface{}) (vals []interface{}, err error) -} - -type ReadHandler func(handle *ReadHandle) (val interface{}, err error) - -func ReadBoxStructure(r io.ReadSeeker, handler ReadHandler, params ...interface{}) ([]interface{}, error) { - if _, err := r.Seek(0, io.SeekStart); err != nil { - return nil, err - } - return readBoxStructure(r, 0, true, nil, Context{}, handler, params) -} - -func ReadBoxStructureFromInternal(r io.ReadSeeker, bi *BoxInfo, handler ReadHandler, params ...interface{}) (interface{}, error) { - return readBoxStructureFromInternal(r, bi, nil, handler, params) -} - -func readBoxStructureFromInternal(r io.ReadSeeker, bi *BoxInfo, path BoxPath, handler ReadHandler, params []interface{}) (interface{}, error) { - if _, err := bi.SeekToPayload(r); err != nil { - return nil, err - } - - // check comatible-brands - if len(path) == 0 && bi.Type == BoxTypeFtyp() { - var ftyp Ftyp - if _, err := Unmarshal(r, bi.Size-bi.HeaderSize, &ftyp, bi.Context); err != nil { - return nil, err - } - if ftyp.HasCompatibleBrand(BrandQT()) { - bi.IsQuickTimeCompatible = true - } - if _, err := bi.SeekToPayload(r); err != nil { - return nil, err - } - } - - // parse numbered ilst items after keys box by saving EntryCount field to context - if bi.Type == BoxTypeKeys() { - var keys Keys - if _, err := Unmarshal(r, bi.Size-bi.HeaderSize, &keys, bi.Context); err != nil { - return nil, err - } - bi.QuickTimeKeysMetaEntryCount = int(keys.EntryCount) - if _, err := bi.SeekToPayload(r); err != nil { - return nil, err - } - } - - ctx := bi.Context - if bi.Type == BoxTypeWave() { - ctx.UnderWave = true - } else if bi.Type == BoxTypeIlst() { - ctx.UnderIlst = true - } else if bi.UnderIlst && !bi.UnderIlstMeta && IsIlstMetaBoxType(bi.Type) { - ctx.UnderIlstMeta = true - if bi.Type == StrToBoxType("----") { - ctx.UnderIlstFreeMeta = true - } - } else if bi.Type == BoxTypeUdta() { - ctx.UnderUdta = true - } - - newPath := make(BoxPath, len(path)+1) - copy(newPath, path) - newPath[len(path)] = bi.Type - - h := &ReadHandle{ - Params: params, - BoxInfo: *bi, - Path: newPath, - } - - var childrenOffset uint64 - - h.ReadPayload = func() (IBox, uint64, error) { - if _, err := bi.SeekToPayload(r); err != nil { - return nil, 0, err - } - - box, n, err := UnmarshalAny(r, bi.Type, bi.Size-bi.HeaderSize, bi.Context) - if err != nil { - return nil, 0, err - } - childrenOffset = bi.Offset + bi.HeaderSize + n - return box, n, nil - } - - h.ReadData = func(w io.Writer) (uint64, error) { - if _, err := bi.SeekToPayload(r); err != nil { - return 0, err - } - - size := bi.Size - bi.HeaderSize - if _, err := io.CopyN(w, r, int64(size)); err != nil { - return 0, err - } - return size, nil - } - - h.Expand = func(params ...interface{}) ([]interface{}, error) { - if childrenOffset == 0 { - if _, err := bi.SeekToPayload(r); err != nil { - return nil, err - } - - _, n, err := UnmarshalAny(r, bi.Type, bi.Size-bi.HeaderSize, bi.Context) - if err != nil { - return nil, err - } - childrenOffset = bi.Offset + bi.HeaderSize + n - } else { - if _, err := r.Seek(int64(childrenOffset), io.SeekStart); err != nil { - return nil, err - } - } - - childrenSize := bi.Offset + bi.Size - childrenOffset - return readBoxStructure(r, childrenSize, false, newPath, ctx, handler, params) - } - - if val, err := handler(h); err != nil { - return nil, err - } else if _, err := bi.SeekToEnd(r); err != nil { - return nil, err - } else { - return val, nil - } -} - -func readBoxStructure(r io.ReadSeeker, totalSize uint64, isRoot bool, path BoxPath, ctx Context, handler ReadHandler, params []interface{}) ([]interface{}, error) { - vals := make([]interface{}, 0, 8) - - for isRoot || totalSize >= SmallHeaderSize { - bi, err := ReadBoxInfo(r) - if isRoot && err == io.EOF { - return vals, nil - } else if err != nil { - return nil, err - } - - if !isRoot && bi.Size > totalSize { - return nil, fmt.Errorf("too large box size: type=%s, size=%d, actualBufSize=%d", bi.Type.String(), bi.Size, totalSize) - } - totalSize -= bi.Size - - bi.Context = ctx - - val, err := readBoxStructureFromInternal(r, bi, path, handler, params) - if err != nil { - return nil, err - } - vals = append(vals, val) - - if bi.IsQuickTimeCompatible { - ctx.IsQuickTimeCompatible = true - } - - // preserve keys entry count on context for subsequent ilst number item box - if bi.Type == BoxTypeKeys() { - ctx.QuickTimeKeysMetaEntryCount = bi.QuickTimeKeysMetaEntryCount - } - } - - if totalSize != 0 && !ctx.IsQuickTimeCompatible { - return nil, errors.New("Unexpected EOF") - } - - return vals, nil -} diff --git a/vendor/github.com/abema/go-mp4/string.go b/vendor/github.com/abema/go-mp4/string.go deleted file mode 100644 index 4bc368e42..000000000 --- a/vendor/github.com/abema/go-mp4/string.go +++ /dev/null @@ -1,261 +0,0 @@ -package mp4 - -import ( - "bytes" - "fmt" - "io" - "reflect" - "strconv" - - "github.com/abema/go-mp4/internal/util" -) - -type stringifier struct { - buf *bytes.Buffer - src IImmutableBox - indent string - ctx Context -} - -func Stringify(src IImmutableBox, ctx Context) (string, error) { - return StringifyWithIndent(src, "", ctx) -} - -func StringifyWithIndent(src IImmutableBox, indent string, ctx Context) (string, error) { - boxDef := src.GetType().getBoxDef(ctx) - if boxDef == nil { - return "", ErrBoxInfoNotFound - } - - v := reflect.ValueOf(src).Elem() - - m := &stringifier{ - buf: bytes.NewBuffer(nil), - src: src, - indent: indent, - ctx: ctx, - } - - err := m.stringifyStruct(v, boxDef.fields, 0, true) - if err != nil { - return "", err - } - - return m.buf.String(), nil -} - -func (m *stringifier) stringify(v reflect.Value, fi *fieldInstance, depth int) error { - switch v.Type().Kind() { - case reflect.Ptr: - return m.stringifyPtr(v, fi, depth) - case reflect.Struct: - return m.stringifyStruct(v, fi.children, depth, fi.is(fieldExtend)) - case reflect.Array: - return m.stringifyArray(v, fi, depth) - case reflect.Slice: - return m.stringifySlice(v, fi, depth) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return m.stringifyInt(v, fi, depth) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return m.stringifyUint(v, fi, depth) - case reflect.Bool: - return m.stringifyBool(v, depth) - case reflect.String: - return m.stringifyString(v, depth) - default: - return fmt.Errorf("unsupported type: %s", v.Type().Kind()) - } -} - -func (m *stringifier) stringifyPtr(v reflect.Value, fi *fieldInstance, depth int) error { - return m.stringify(v.Elem(), fi, depth) -} - -func (m *stringifier) stringifyStruct(v reflect.Value, fs []*field, depth int, extended bool) error { - if !extended { - m.buf.WriteString("{") - if m.indent != "" { - m.buf.WriteString("\n") - } - depth++ - } - - for _, f := range fs { - fi := resolveFieldInstance(f, m.src, v, m.ctx) - - if !isTargetField(m.src, fi, m.ctx) { - continue - } - - if f.cnst != "" || f.is(fieldHidden) { - continue - } - - if !f.is(fieldExtend) { - if m.indent != "" { - writeIndent(m.buf, m.indent, depth+1) - } else if m.buf.Len() != 0 && m.buf.Bytes()[m.buf.Len()-1] != '{' { - m.buf.WriteString(" ") - } - m.buf.WriteString(f.name) - m.buf.WriteString("=") - } - - str, ok := fi.cfo.StringifyField(f.name, m.indent, depth+1, m.ctx) - if ok { - m.buf.WriteString(str) - if !f.is(fieldExtend) && m.indent != "" { - m.buf.WriteString("\n") - } - continue - } - - if f.name == "Version" { - m.buf.WriteString(strconv.Itoa(int(m.src.GetVersion()))) - } else if f.name == "Flags" { - fmt.Fprintf(m.buf, "0x%06x", m.src.GetFlags()) - } else { - err := m.stringify(v.FieldByName(f.name), fi, depth) - if err != nil { - return err - } - } - - if !f.is(fieldExtend) && m.indent != "" { - m.buf.WriteString("\n") - } - } - - if !extended { - if m.indent != "" { - writeIndent(m.buf, m.indent, depth) - } - m.buf.WriteString("}") - } - - return nil -} - -func (m *stringifier) stringifyArray(v reflect.Value, fi *fieldInstance, depth int) error { - begin, sep, end := "[", ", ", "]" - if fi.is(fieldString) || fi.is(fieldISO639_2) { - begin, sep, end = "\"", "", "\"" - } else if fi.is(fieldUUID) { - begin, sep, end = "", "", "" - } - - m.buf.WriteString(begin) - - m2 := *m - if fi.is(fieldString) { - m2.buf = bytes.NewBuffer(nil) - } - size := v.Type().Size() - for i := 0; i < int(size)/int(v.Type().Elem().Size()); i++ { - if i != 0 { - m2.buf.WriteString(sep) - } - - if err := m2.stringify(v.Index(i), fi, depth+1); err != nil { - return err - } - - if fi.is(fieldUUID) && (i == 3 || i == 5 || i == 7 || i == 9) { - m.buf.WriteString("-") - } - } - if fi.is(fieldString) { - m.buf.WriteString(util.EscapeUnprintables(m2.buf.String())) - } - - m.buf.WriteString(end) - - return nil -} - -func (m *stringifier) stringifySlice(v reflect.Value, fi *fieldInstance, depth int) error { - begin, sep, end := "[", ", ", "]" - if fi.is(fieldString) || fi.is(fieldISO639_2) { - begin, sep, end = "\"", "", "\"" - } - - m.buf.WriteString(begin) - - m2 := *m - if fi.is(fieldString) { - m2.buf = bytes.NewBuffer(nil) - } - for i := 0; i < v.Len(); i++ { - if fi.length != LengthUnlimited && uint(i) >= fi.length { - break - } - - if i != 0 { - m2.buf.WriteString(sep) - } - - if err := m2.stringify(v.Index(i), fi, depth+1); err != nil { - return err - } - } - if fi.is(fieldString) { - m.buf.WriteString(util.EscapeUnprintables(m2.buf.String())) - } - - m.buf.WriteString(end) - - return nil -} - -func (m *stringifier) stringifyInt(v reflect.Value, fi *fieldInstance, depth int) error { - if fi.is(fieldHex) { - val := v.Int() - if val >= 0 { - m.buf.WriteString("0x") - m.buf.WriteString(strconv.FormatInt(val, 16)) - } else { - m.buf.WriteString("-0x") - m.buf.WriteString(strconv.FormatInt(-val, 16)) - } - } else { - m.buf.WriteString(strconv.FormatInt(v.Int(), 10)) - } - return nil -} - -func (m *stringifier) stringifyUint(v reflect.Value, fi *fieldInstance, depth int) error { - if fi.is(fieldISO639_2) { - m.buf.WriteString(string([]byte{byte(v.Uint() + 0x60)})) - } else if fi.is(fieldUUID) { - fmt.Fprintf(m.buf, "%02x", v.Uint()) - } else if fi.is(fieldString) { - m.buf.WriteString(string([]byte{byte(v.Uint())})) - } else if fi.is(fieldHex) || (!fi.is(fieldDec) && v.Type().Kind() == reflect.Uint8) || v.Type().Kind() == reflect.Uintptr { - m.buf.WriteString("0x") - m.buf.WriteString(strconv.FormatUint(v.Uint(), 16)) - } else { - m.buf.WriteString(strconv.FormatUint(v.Uint(), 10)) - } - - return nil -} - -func (m *stringifier) stringifyBool(v reflect.Value, depth int) error { - m.buf.WriteString(strconv.FormatBool(v.Bool())) - - return nil -} - -func (m *stringifier) stringifyString(v reflect.Value, depth int) error { - m.buf.WriteString("\"") - m.buf.WriteString(util.EscapeUnprintables(v.String())) - m.buf.WriteString("\"") - - return nil -} - -func writeIndent(w io.Writer, indent string, depth int) { - for i := 0; i < depth; i++ { - io.WriteString(w, indent) - } -} diff --git a/vendor/github.com/abema/go-mp4/write.go b/vendor/github.com/abema/go-mp4/write.go deleted file mode 100644 index 72d464444..000000000 --- a/vendor/github.com/abema/go-mp4/write.go +++ /dev/null @@ -1,68 +0,0 @@ -package mp4 - -import ( - "errors" - "io" -) - -type Writer struct { - writer io.WriteSeeker - biStack []*BoxInfo -} - -func NewWriter(w io.WriteSeeker) *Writer { - return &Writer{ - writer: w, - } -} - -func (w *Writer) Write(p []byte) (int, error) { - return w.writer.Write(p) -} - -func (w *Writer) Seek(offset int64, whence int) (int64, error) { - return w.writer.Seek(offset, whence) -} - -func (w *Writer) StartBox(bi *BoxInfo) (*BoxInfo, error) { - bi, err := WriteBoxInfo(w.writer, bi) - if err != nil { - return nil, err - } - w.biStack = append(w.biStack, bi) - return bi, nil -} - -func (w *Writer) EndBox() (*BoxInfo, error) { - bi := w.biStack[len(w.biStack)-1] - w.biStack = w.biStack[:len(w.biStack)-1] - end, err := w.writer.Seek(0, io.SeekCurrent) - if err != nil { - return nil, err - } - bi.Size = uint64(end) - bi.Offset - if _, err = bi.SeekToStart(w.writer); err != nil { - return nil, err - } - if bi2, err := WriteBoxInfo(w.writer, bi); err != nil { - return nil, err - } else if bi.HeaderSize != bi2.HeaderSize { - return nil, errors.New("header size changed") - } - if _, err := w.writer.Seek(end, io.SeekStart); err != nil { - return nil, err - } - return bi, nil -} - -func (w *Writer) CopyBox(r io.ReadSeeker, bi *BoxInfo) error { - if _, err := bi.SeekToStart(r); err != nil { - return err - } - if n, err := io.CopyN(w, r, int64(bi.Size)); err != nil { - return err - } else if n != int64(bi.Size) { - return errors.New("failed to copy box") - } - return nil -} diff --git a/vendor/github.com/dsoprea/go-exif/v3/.MODULE_ROOT b/vendor/github.com/dsoprea/go-exif/v3/.MODULE_ROOT deleted file mode 100644 index e69de29bb..000000000 diff --git a/vendor/github.com/dsoprea/go-exif/v3/LICENSE b/vendor/github.com/dsoprea/go-exif/v3/LICENSE deleted file mode 100644 index 0b9358a3a..000000000 --- a/vendor/github.com/dsoprea/go-exif/v3/LICENSE +++ /dev/null @@ -1,9 +0,0 @@ -MIT LICENSE - -Copyright 2019 Dustin Oprea - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/dsoprea/go-exif/v3/common/ifd.go b/vendor/github.com/dsoprea/go-exif/v3/common/ifd.go deleted file mode 100644 index 01886e966..000000000 --- a/vendor/github.com/dsoprea/go-exif/v3/common/ifd.go +++ /dev/null @@ -1,651 +0,0 @@ -package exifcommon - -import ( - "errors" - "fmt" - "strings" - - "github.com/dsoprea/go-logging" -) - -var ( - ifdLogger = log.NewLogger("exifcommon.ifd") -) - -var ( - ErrChildIfdNotMapped = errors.New("no child-IFD for that tag-ID under parent") -) - -// MappedIfd is one node in the IFD-mapping. -type MappedIfd struct { - ParentTagId uint16 - Placement []uint16 - Path []string - - Name string - TagId uint16 - Children map[uint16]*MappedIfd -} - -// String returns a descriptive string. -func (mi *MappedIfd) String() string { - pathPhrase := mi.PathPhrase() - return fmt.Sprintf("MappedIfd<(0x%04X) [%s] PATH=[%s]>", mi.TagId, mi.Name, pathPhrase) -} - -// PathPhrase returns a non-fully-qualified IFD path. -func (mi *MappedIfd) PathPhrase() string { - return strings.Join(mi.Path, "/") -} - -// TODO(dustin): Refactor this to use IfdIdentity structs. - -// IfdMapping describes all of the IFDs that we currently recognize. -type IfdMapping struct { - rootNode *MappedIfd -} - -// NewIfdMapping returns a new IfdMapping struct. -func NewIfdMapping() (ifdMapping *IfdMapping) { - rootNode := &MappedIfd{ - Path: make([]string, 0), - Children: make(map[uint16]*MappedIfd), - } - - return &IfdMapping{ - rootNode: rootNode, - } -} - -// NewIfdMappingWithStandard retruns a new IfdMapping struct preloaded with the -// standard IFDs. -func NewIfdMappingWithStandard() (ifdMapping *IfdMapping, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - im := NewIfdMapping() - - err = LoadStandardIfds(im) - log.PanicIf(err) - - return im, nil -} - -// Get returns the node given the path slice. -func (im *IfdMapping) Get(parentPlacement []uint16) (childIfd *MappedIfd, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - ptr := im.rootNode - for _, tagId := range parentPlacement { - if descendantPtr, found := ptr.Children[tagId]; found == false { - log.Panicf("ifd child with tag-ID (%04x) not registered: [%s]", tagId, ptr.PathPhrase()) - } else { - ptr = descendantPtr - } - } - - return ptr, nil -} - -// GetWithPath returns the node given the path string. -func (im *IfdMapping) GetWithPath(pathPhrase string) (mi *MappedIfd, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - if pathPhrase == "" { - log.Panicf("path-phrase is empty") - } - - path := strings.Split(pathPhrase, "/") - ptr := im.rootNode - - for _, name := range path { - var hit *MappedIfd - for _, mi := range ptr.Children { - if mi.Name == name { - hit = mi - break - } - } - - if hit == nil { - log.Panicf("ifd child with name [%s] not registered: [%s]", name, ptr.PathPhrase()) - } - - ptr = hit - } - - return ptr, nil -} - -// GetChild is a convenience function to get the child path for a given parent -// placement and child tag-ID. -func (im *IfdMapping) GetChild(parentPathPhrase string, tagId uint16) (mi *MappedIfd, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - mi, err = im.GetWithPath(parentPathPhrase) - log.PanicIf(err) - - for _, childMi := range mi.Children { - if childMi.TagId == tagId { - return childMi, nil - } - } - - // Whether or not an IFD is defined in data, such an IFD is not registered - // and would be unknown. - log.Panic(ErrChildIfdNotMapped) - return nil, nil -} - -// IfdTagIdAndIndex represents a specific part of the IFD path. -// -// This is a legacy type. -type IfdTagIdAndIndex struct { - Name string - TagId uint16 - Index int -} - -// String returns a descriptive string. -func (itii IfdTagIdAndIndex) String() string { - return fmt.Sprintf("IfdTagIdAndIndex", itii.Name, itii.TagId, itii.Index) -} - -// ResolvePath takes a list of names, which can also be suffixed with indices -// (to identify the second, third, etc.. sibling IFD) and returns a list of -// tag-IDs and those indices. -// -// Example: -// -// - IFD/Exif/Iop -// - IFD0/Exif/Iop -// -// This is the only call that supports adding the numeric indices. -func (im *IfdMapping) ResolvePath(pathPhrase string) (lineage []IfdTagIdAndIndex, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - pathPhrase = strings.TrimSpace(pathPhrase) - - if pathPhrase == "" { - log.Panicf("can not resolve empty path-phrase") - } - - path := strings.Split(pathPhrase, "/") - lineage = make([]IfdTagIdAndIndex, len(path)) - - ptr := im.rootNode - empty := IfdTagIdAndIndex{} - for i, name := range path { - indexByte := name[len(name)-1] - index := 0 - if indexByte >= '0' && indexByte <= '9' { - index = int(indexByte - '0') - name = name[:len(name)-1] - } - - itii := IfdTagIdAndIndex{} - for _, mi := range ptr.Children { - if mi.Name != name { - continue - } - - itii.Name = name - itii.TagId = mi.TagId - itii.Index = index - - ptr = mi - - break - } - - if itii == empty { - log.Panicf("ifd child with name [%s] not registered: [%s]", name, pathPhrase) - } - - lineage[i] = itii - } - - return lineage, nil -} - -// FqPathPhraseFromLineage returns the fully-qualified IFD path from the slice. -func (im *IfdMapping) FqPathPhraseFromLineage(lineage []IfdTagIdAndIndex) (fqPathPhrase string) { - fqPathParts := make([]string, len(lineage)) - for i, itii := range lineage { - if itii.Index > 0 { - fqPathParts[i] = fmt.Sprintf("%s%d", itii.Name, itii.Index) - } else { - fqPathParts[i] = itii.Name - } - } - - return strings.Join(fqPathParts, "/") -} - -// PathPhraseFromLineage returns the non-fully-qualified IFD path from the -// slice. -func (im *IfdMapping) PathPhraseFromLineage(lineage []IfdTagIdAndIndex) (pathPhrase string) { - pathParts := make([]string, len(lineage)) - for i, itii := range lineage { - pathParts[i] = itii.Name - } - - return strings.Join(pathParts, "/") -} - -// StripPathPhraseIndices returns a non-fully-qualified path-phrase (no -// indices). -func (im *IfdMapping) StripPathPhraseIndices(pathPhrase string) (strippedPathPhrase string, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - lineage, err := im.ResolvePath(pathPhrase) - log.PanicIf(err) - - strippedPathPhrase = im.PathPhraseFromLineage(lineage) - return strippedPathPhrase, nil -} - -// Add puts the given IFD at the given position of the tree. The position of the -// tree is referred to as the placement and is represented by a set of tag-IDs, -// where the leftmost is the root tag and the tags going to the right are -// progressive descendants. -func (im *IfdMapping) Add(parentPlacement []uint16, tagId uint16, name string) (err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - // TODO(dustin): !! It would be nicer to provide a list of names in the placement rather than tag-IDs. - - ptr, err := im.Get(parentPlacement) - log.PanicIf(err) - - path := make([]string, len(parentPlacement)+1) - if len(parentPlacement) > 0 { - copy(path, ptr.Path) - } - - path[len(path)-1] = name - - placement := make([]uint16, len(parentPlacement)+1) - if len(placement) > 0 { - copy(placement, ptr.Placement) - } - - placement[len(placement)-1] = tagId - - childIfd := &MappedIfd{ - ParentTagId: ptr.TagId, - Path: path, - Placement: placement, - Name: name, - TagId: tagId, - Children: make(map[uint16]*MappedIfd), - } - - if _, found := ptr.Children[tagId]; found == true { - log.Panicf("child IFD with tag-ID (%04x) already registered under IFD [%s] with tag-ID (%04x)", tagId, ptr.Name, ptr.TagId) - } - - ptr.Children[tagId] = childIfd - - return nil -} - -func (im *IfdMapping) dumpLineages(stack []*MappedIfd, input []string) (output []string, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - currentIfd := stack[len(stack)-1] - - output = input - for _, childIfd := range currentIfd.Children { - stackCopy := make([]*MappedIfd, len(stack)+1) - - copy(stackCopy, stack) - stackCopy[len(stack)] = childIfd - - // Add to output, but don't include the obligatory root node. - parts := make([]string, len(stackCopy)-1) - for i, mi := range stackCopy[1:] { - parts[i] = mi.Name - } - - output = append(output, strings.Join(parts, "/")) - - output, err = im.dumpLineages(stackCopy, output) - log.PanicIf(err) - } - - return output, nil -} - -// DumpLineages returns a slice of strings representing all mappings. -func (im *IfdMapping) DumpLineages() (output []string, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - stack := []*MappedIfd{im.rootNode} - output = make([]string, 0) - - output, err = im.dumpLineages(stack, output) - log.PanicIf(err) - - return output, nil -} - -// LoadStandardIfds loads the standard IFDs into the mapping. -func LoadStandardIfds(im *IfdMapping) (err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - err = im.Add( - []uint16{}, - IfdStandardIfdIdentity.TagId(), IfdStandardIfdIdentity.Name()) - - log.PanicIf(err) - - err = im.Add( - []uint16{IfdStandardIfdIdentity.TagId()}, - IfdExifStandardIfdIdentity.TagId(), IfdExifStandardIfdIdentity.Name()) - - log.PanicIf(err) - - err = im.Add( - []uint16{IfdStandardIfdIdentity.TagId(), IfdExifStandardIfdIdentity.TagId()}, - IfdExifIopStandardIfdIdentity.TagId(), IfdExifIopStandardIfdIdentity.Name()) - - log.PanicIf(err) - - err = im.Add( - []uint16{IfdStandardIfdIdentity.TagId()}, - IfdGpsInfoStandardIfdIdentity.TagId(), IfdGpsInfoStandardIfdIdentity.Name()) - - log.PanicIf(err) - - return nil -} - -// IfdTag describes a single IFD tag and its parent (if any). -type IfdTag struct { - parentIfdTag *IfdTag - tagId uint16 - name string -} - -func NewIfdTag(parentIfdTag *IfdTag, tagId uint16, name string) IfdTag { - return IfdTag{ - parentIfdTag: parentIfdTag, - tagId: tagId, - name: name, - } -} - -// ParentIfd returns the IfdTag of this IFD's parent. -func (it IfdTag) ParentIfd() *IfdTag { - return it.parentIfdTag -} - -// TagId returns the tag-ID of this IFD. -func (it IfdTag) TagId() uint16 { - return it.tagId -} - -// Name returns the simple name of this IFD. -func (it IfdTag) Name() string { - return it.name -} - -// String returns a descriptive string. -func (it IfdTag) String() string { - parentIfdPhrase := "" - if it.parentIfdTag != nil { - parentIfdPhrase = fmt.Sprintf(" PARENT=(0x%04x)[%s]", it.parentIfdTag.tagId, it.parentIfdTag.name) - } - - return fmt.Sprintf("IfdTag", it.tagId, it.name, parentIfdPhrase) -} - -var ( - // rootStandardIfd is the standard root IFD. - rootStandardIfd = NewIfdTag(nil, 0x0000, "IFD") // IFD - - // exifStandardIfd is the standard "Exif" IFD. - exifStandardIfd = NewIfdTag(&rootStandardIfd, 0x8769, "Exif") // IFD/Exif - - // iopStandardIfd is the standard "Iop" IFD. - iopStandardIfd = NewIfdTag(&exifStandardIfd, 0xA005, "Iop") // IFD/Exif/Iop - - // gpsInfoStandardIfd is the standard "GPS" IFD. - gpsInfoStandardIfd = NewIfdTag(&rootStandardIfd, 0x8825, "GPSInfo") // IFD/GPSInfo -) - -// IfdIdentityPart represents one component in an IFD path. -type IfdIdentityPart struct { - Name string - Index int -} - -// String returns a fully-qualified IFD path. -func (iip IfdIdentityPart) String() string { - if iip.Index > 0 { - return fmt.Sprintf("%s%d", iip.Name, iip.Index) - } else { - return iip.Name - } -} - -// UnindexedString returned a non-fully-qualified IFD path. -func (iip IfdIdentityPart) UnindexedString() string { - return iip.Name -} - -// IfdIdentity represents a single IFD path and provides access to various -// information and representations. -// -// Only global instances can be used for equality checks. -type IfdIdentity struct { - ifdTag IfdTag - parts []IfdIdentityPart - ifdPath string - fqIfdPath string -} - -// NewIfdIdentity returns a new IfdIdentity struct. -func NewIfdIdentity(ifdTag IfdTag, parts ...IfdIdentityPart) (ii *IfdIdentity) { - ii = &IfdIdentity{ - ifdTag: ifdTag, - parts: parts, - } - - ii.ifdPath = ii.getIfdPath() - ii.fqIfdPath = ii.getFqIfdPath() - - return ii -} - -// NewIfdIdentityFromString parses a string like "IFD/Exif" or "IFD1" or -// something more exotic with custom IFDs ("SomeIFD4/SomeChildIFD6"). Note that -// this will valid the unindexed IFD structure (because the standard tags from -// the specification are unindexed), but not, obviously, any indices (e.g. -// the numbers in "IFD0", "IFD1", "SomeIFD4/SomeChildIFD6"). It is -// required for the caller to check whether these specific instances -// were actually parsed out of the stream. -func NewIfdIdentityFromString(im *IfdMapping, fqIfdPath string) (ii *IfdIdentity, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - lineage, err := im.ResolvePath(fqIfdPath) - log.PanicIf(err) - - var lastIt *IfdTag - identityParts := make([]IfdIdentityPart, len(lineage)) - for i, itii := range lineage { - // Build out the tag that will eventually point to the IFD represented - // by the right-most part in the IFD path. - - it := &IfdTag{ - parentIfdTag: lastIt, - tagId: itii.TagId, - name: itii.Name, - } - - lastIt = it - - // Create the next IfdIdentity part. - - iip := IfdIdentityPart{ - Name: itii.Name, - Index: itii.Index, - } - - identityParts[i] = iip - } - - ii = NewIfdIdentity(*lastIt, identityParts...) - return ii, nil -} - -func (ii *IfdIdentity) getFqIfdPath() string { - partPhrases := make([]string, len(ii.parts)) - for i, iip := range ii.parts { - partPhrases[i] = iip.String() - } - - return strings.Join(partPhrases, "/") -} - -func (ii *IfdIdentity) getIfdPath() string { - partPhrases := make([]string, len(ii.parts)) - for i, iip := range ii.parts { - partPhrases[i] = iip.UnindexedString() - } - - return strings.Join(partPhrases, "/") -} - -// String returns a fully-qualified IFD path. -func (ii *IfdIdentity) String() string { - return ii.fqIfdPath -} - -// UnindexedString returns a non-fully-qualified IFD path. -func (ii *IfdIdentity) UnindexedString() string { - return ii.ifdPath -} - -// IfdTag returns the tag struct behind this IFD. -func (ii *IfdIdentity) IfdTag() IfdTag { - return ii.ifdTag -} - -// TagId returns the tag-ID of the IFD. -func (ii *IfdIdentity) TagId() uint16 { - return ii.ifdTag.TagId() -} - -// LeafPathPart returns the last right-most path-part, which represents the -// current IFD. -func (ii *IfdIdentity) LeafPathPart() IfdIdentityPart { - return ii.parts[len(ii.parts)-1] -} - -// Name returns the simple name of this IFD. -func (ii *IfdIdentity) Name() string { - return ii.LeafPathPart().Name -} - -// Index returns the index of this IFD (more then one IFD under a parent IFD -// will be numbered [0..n]). -func (ii *IfdIdentity) Index() int { - return ii.LeafPathPart().Index -} - -// Equals returns true if the two IfdIdentity instances are effectively -// identical. -// -// Since there's no way to get a specific fully-qualified IFD path without a -// certain slice of parts and all other fields are also derived from this, -// checking that the fully-qualified IFD path is equals is sufficient. -func (ii *IfdIdentity) Equals(ii2 *IfdIdentity) bool { - return ii.String() == ii2.String() -} - -// NewChild creates an IfdIdentity for an IFD that is a child of the current -// IFD. -func (ii *IfdIdentity) NewChild(childIfdTag IfdTag, index int) (iiChild *IfdIdentity) { - if *childIfdTag.parentIfdTag != ii.ifdTag { - log.Panicf("can not add child; we are not the parent:\nUS=%v\nCHILD=%v", ii.ifdTag, childIfdTag) - } - - childPart := IfdIdentityPart{childIfdTag.name, index} - childParts := append(ii.parts, childPart) - - iiChild = NewIfdIdentity(childIfdTag, childParts...) - return iiChild -} - -// NewSibling creates an IfdIdentity for an IFD that is a sibling to the current -// one. -func (ii *IfdIdentity) NewSibling(index int) (iiSibling *IfdIdentity) { - parts := make([]IfdIdentityPart, len(ii.parts)) - - copy(parts, ii.parts) - parts[len(parts)-1].Index = index - - iiSibling = NewIfdIdentity(ii.ifdTag, parts...) - return iiSibling -} - -var ( - // IfdStandardIfdIdentity represents the IFD path for IFD0. - IfdStandardIfdIdentity = NewIfdIdentity(rootStandardIfd, IfdIdentityPart{"IFD", 0}) - - // IfdExifStandardIfdIdentity represents the IFD path for IFD0/Exif0. - IfdExifStandardIfdIdentity = IfdStandardIfdIdentity.NewChild(exifStandardIfd, 0) - - // IfdExifIopStandardIfdIdentity represents the IFD path for IFD0/Exif0/Iop0. - IfdExifIopStandardIfdIdentity = IfdExifStandardIfdIdentity.NewChild(iopStandardIfd, 0) - - // IfdGPSInfoStandardIfdIdentity represents the IFD path for IFD0/GPSInfo0. - IfdGpsInfoStandardIfdIdentity = IfdStandardIfdIdentity.NewChild(gpsInfoStandardIfd, 0) - - // Ifd1StandardIfdIdentity represents the IFD path for IFD1. - Ifd1StandardIfdIdentity = NewIfdIdentity(rootStandardIfd, IfdIdentityPart{"IFD", 1}) -) diff --git a/vendor/github.com/dsoprea/go-exif/v3/common/parser.go b/vendor/github.com/dsoprea/go-exif/v3/common/parser.go deleted file mode 100644 index 76e8ef425..000000000 --- a/vendor/github.com/dsoprea/go-exif/v3/common/parser.go +++ /dev/null @@ -1,280 +0,0 @@ -package exifcommon - -import ( - "bytes" - "errors" - "math" - - "encoding/binary" - - "github.com/dsoprea/go-logging" -) - -var ( - parserLogger = log.NewLogger("exifcommon.parser") -) - -var ( - ErrParseFail = errors.New("parse failure") -) - -// Parser knows how to parse all well-defined, encoded EXIF types. -type Parser struct { -} - -// ParseBytesknows how to parse a byte-type value. -func (p *Parser) ParseBytes(data []byte, unitCount uint32) (value []uint8, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - // TODO(dustin): Add test - - count := int(unitCount) - - if len(data) < (TypeByte.Size() * count) { - log.Panic(ErrNotEnoughData) - } - - value = []uint8(data[:count]) - - return value, nil -} - -// ParseAscii returns a string and auto-strips the trailing NUL character that -// should be at the end of the encoding. -func (p *Parser) ParseAscii(data []byte, unitCount uint32) (value string, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - // TODO(dustin): Add test - - count := int(unitCount) - - if len(data) < (TypeAscii.Size() * count) { - log.Panic(ErrNotEnoughData) - } - - if len(data) == 0 || data[count-1] != 0 { - s := string(data[:count]) - parserLogger.Warningf(nil, "ASCII not terminated with NUL as expected: [%v]", s) - - for i, c := range s { - if c > 127 { - // Binary - - t := s[:i] - parserLogger.Warningf(nil, "ASCII also had binary characters. Truncating: [%v]->[%s]", s, t) - - return t, nil - } - } - - return s, nil - } - - // Auto-strip the NUL from the end. It serves no purpose outside of - // encoding semantics. - - return string(data[:count-1]), nil -} - -// ParseAsciiNoNul returns a string without any consideration for a trailing NUL -// character. -func (p *Parser) ParseAsciiNoNul(data []byte, unitCount uint32) (value string, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - // TODO(dustin): Add test - - count := int(unitCount) - - if len(data) < (TypeAscii.Size() * count) { - log.Panic(ErrNotEnoughData) - } - - return string(data[:count]), nil -} - -// ParseShorts knows how to parse an encoded list of shorts. -func (p *Parser) ParseShorts(data []byte, unitCount uint32, byteOrder binary.ByteOrder) (value []uint16, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - // TODO(dustin): Add test - - count := int(unitCount) - - if len(data) < (TypeShort.Size() * count) { - log.Panic(ErrNotEnoughData) - } - - value = make([]uint16, count) - for i := 0; i < count; i++ { - value[i] = byteOrder.Uint16(data[i*2:]) - } - - return value, nil -} - -// ParseLongs knows how to encode an encoded list of unsigned longs. -func (p *Parser) ParseLongs(data []byte, unitCount uint32, byteOrder binary.ByteOrder) (value []uint32, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - // TODO(dustin): Add test - - count := int(unitCount) - - if len(data) < (TypeLong.Size() * count) { - log.Panic(ErrNotEnoughData) - } - - value = make([]uint32, count) - for i := 0; i < count; i++ { - value[i] = byteOrder.Uint32(data[i*4:]) - } - - return value, nil -} - -// ParseFloats knows how to encode an encoded list of floats. -func (p *Parser) ParseFloats(data []byte, unitCount uint32, byteOrder binary.ByteOrder) (value []float32, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - count := int(unitCount) - - if len(data) != (TypeFloat.Size() * count) { - log.Panic(ErrNotEnoughData) - } - - value = make([]float32, count) - for i := 0; i < count; i++ { - value[i] = math.Float32frombits(byteOrder.Uint32(data[i*4 : (i+1)*4])) - } - - return value, nil -} - -// ParseDoubles knows how to encode an encoded list of doubles. -func (p *Parser) ParseDoubles(data []byte, unitCount uint32, byteOrder binary.ByteOrder) (value []float64, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - count := int(unitCount) - - if len(data) != (TypeDouble.Size() * count) { - log.Panic(ErrNotEnoughData) - } - - value = make([]float64, count) - for i := 0; i < count; i++ { - value[i] = math.Float64frombits(byteOrder.Uint64(data[i*8 : (i+1)*8])) - } - - return value, nil -} - -// ParseRationals knows how to parse an encoded list of unsigned rationals. -func (p *Parser) ParseRationals(data []byte, unitCount uint32, byteOrder binary.ByteOrder) (value []Rational, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - // TODO(dustin): Add test - - count := int(unitCount) - - if len(data) < (TypeRational.Size() * count) { - log.Panic(ErrNotEnoughData) - } - - value = make([]Rational, count) - for i := 0; i < count; i++ { - value[i].Numerator = byteOrder.Uint32(data[i*8:]) - value[i].Denominator = byteOrder.Uint32(data[i*8+4:]) - } - - return value, nil -} - -// ParseSignedLongs knows how to parse an encoded list of signed longs. -func (p *Parser) ParseSignedLongs(data []byte, unitCount uint32, byteOrder binary.ByteOrder) (value []int32, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - // TODO(dustin): Add test - - count := int(unitCount) - - if len(data) < (TypeSignedLong.Size() * count) { - log.Panic(ErrNotEnoughData) - } - - b := bytes.NewBuffer(data) - - value = make([]int32, count) - for i := 0; i < count; i++ { - err := binary.Read(b, byteOrder, &value[i]) - log.PanicIf(err) - } - - return value, nil -} - -// ParseSignedRationals knows how to parse an encoded list of signed -// rationals. -func (p *Parser) ParseSignedRationals(data []byte, unitCount uint32, byteOrder binary.ByteOrder) (value []SignedRational, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - // TODO(dustin): Add test - - count := int(unitCount) - - if len(data) < (TypeSignedRational.Size() * count) { - log.Panic(ErrNotEnoughData) - } - - b := bytes.NewBuffer(data) - - value = make([]SignedRational, count) - for i := 0; i < count; i++ { - err = binary.Read(b, byteOrder, &value[i].Numerator) - log.PanicIf(err) - - err = binary.Read(b, byteOrder, &value[i].Denominator) - log.PanicIf(err) - } - - return value, nil -} diff --git a/vendor/github.com/dsoprea/go-exif/v3/common/testing_common.go b/vendor/github.com/dsoprea/go-exif/v3/common/testing_common.go deleted file mode 100644 index f04fa22b6..000000000 --- a/vendor/github.com/dsoprea/go-exif/v3/common/testing_common.go +++ /dev/null @@ -1,88 +0,0 @@ -package exifcommon - -import ( - "os" - "path" - - "encoding/binary" - "io/ioutil" - - "github.com/dsoprea/go-logging" -) - -var ( - moduleRootPath = "" - - testExifData []byte = nil - - // EncodeDefaultByteOrder is the default byte-order for encoding operations. - EncodeDefaultByteOrder = binary.BigEndian - - // Default byte order for tests. - TestDefaultByteOrder = binary.BigEndian -) - -func GetModuleRootPath() string { - if moduleRootPath == "" { - moduleRootPath = os.Getenv("EXIF_MODULE_ROOT_PATH") - if moduleRootPath != "" { - return moduleRootPath - } - - currentWd, err := os.Getwd() - log.PanicIf(err) - - currentPath := currentWd - - visited := make([]string, 0) - - for { - tryStampFilepath := path.Join(currentPath, ".MODULE_ROOT") - - _, err := os.Stat(tryStampFilepath) - if err != nil && os.IsNotExist(err) != true { - log.Panic(err) - } else if err == nil { - break - } - - visited = append(visited, tryStampFilepath) - - currentPath = path.Dir(currentPath) - if currentPath == "/" { - log.Panicf("could not find module-root: %v", visited) - } - } - - moduleRootPath = currentPath - } - - return moduleRootPath -} - -func GetTestAssetsPath() string { - moduleRootPath := GetModuleRootPath() - assetsPath := path.Join(moduleRootPath, "assets") - - return assetsPath -} - -func getTestImageFilepath() string { - assetsPath := GetTestAssetsPath() - testImageFilepath := path.Join(assetsPath, "NDM_8901.jpg") - return testImageFilepath -} - -func getTestExifData() []byte { - if testExifData == nil { - assetsPath := GetTestAssetsPath() - filepath := path.Join(assetsPath, "NDM_8901.jpg.exif") - - var err error - - testExifData, err = ioutil.ReadFile(filepath) - log.PanicIf(err) - } - - return testExifData -} diff --git a/vendor/github.com/dsoprea/go-exif/v3/common/type.go b/vendor/github.com/dsoprea/go-exif/v3/common/type.go deleted file mode 100644 index e79bcb9a1..000000000 --- a/vendor/github.com/dsoprea/go-exif/v3/common/type.go +++ /dev/null @@ -1,482 +0,0 @@ -package exifcommon - -import ( - "errors" - "fmt" - "reflect" - "strconv" - "strings" - "unicode" - - "encoding/binary" - - "github.com/dsoprea/go-logging" -) - -var ( - typeLogger = log.NewLogger("exif.type") -) - -var ( - // ErrNotEnoughData is used when there isn't enough data to accommodate what - // we're trying to parse (sizeof(type) * unit_count). - ErrNotEnoughData = errors.New("not enough data for type") - - // ErrWrongType is used when we try to parse anything other than the - // current type. - ErrWrongType = errors.New("wrong type, can not parse") - - // ErrUnhandledUndefinedTypedTag is used when we try to parse a tag that's - // recorded as an "unknown" type but not a documented tag (therefore - // leaving us not knowning how to read it). - ErrUnhandledUndefinedTypedTag = errors.New("not a standard unknown-typed tag") -) - -// TagTypePrimitive is a type-alias that let's us easily lookup type properties. -type TagTypePrimitive uint16 - -const ( - // TypeByte describes an encoded list of bytes. - TypeByte TagTypePrimitive = 1 - - // TypeAscii describes an encoded list of characters that is terminated - // with a NUL in its encoded form. - TypeAscii TagTypePrimitive = 2 - - // TypeShort describes an encoded list of shorts. - TypeShort TagTypePrimitive = 3 - - // TypeLong describes an encoded list of longs. - TypeLong TagTypePrimitive = 4 - - // TypeRational describes an encoded list of rationals. - TypeRational TagTypePrimitive = 5 - - // TypeUndefined describes an encoded value that has a complex/non-clearcut - // interpretation. - TypeUndefined TagTypePrimitive = 7 - - // We've seen type-8, but have no documentation on it. - - // TypeSignedLong describes an encoded list of signed longs. - TypeSignedLong TagTypePrimitive = 9 - - // TypeSignedRational describes an encoded list of signed rationals. - TypeSignedRational TagTypePrimitive = 10 - - // TypeFloat describes an encoded list of floats - TypeFloat TagTypePrimitive = 11 - - // TypeDouble describes an encoded list of doubles. - TypeDouble TagTypePrimitive = 12 - - // TypeAsciiNoNul is just a pseudo-type, for our own purposes. - TypeAsciiNoNul TagTypePrimitive = 0xf0 -) - -// String returns the name of the type -func (typeType TagTypePrimitive) String() string { - return TypeNames[typeType] -} - -// Size returns the size of one atomic unit of the type. -func (tagType TagTypePrimitive) Size() int { - switch tagType { - case TypeByte, TypeAscii, TypeAsciiNoNul: - return 1 - case TypeShort: - return 2 - case TypeLong, TypeSignedLong, TypeFloat: - return 4 - case TypeRational, TypeSignedRational, TypeDouble: - return 8 - default: - log.Panicf("can not determine tag-value size for type (%d): [%s]", - tagType, - TypeNames[tagType]) - // Never called. - return 0 - } -} - -// IsValid returns true if tagType is a valid type. -func (tagType TagTypePrimitive) IsValid() bool { - - // TODO(dustin): Add test - - return tagType == TypeByte || - tagType == TypeAscii || - tagType == TypeAsciiNoNul || - tagType == TypeShort || - tagType == TypeLong || - tagType == TypeRational || - tagType == TypeSignedLong || - tagType == TypeSignedRational || - tagType == TypeFloat || - tagType == TypeDouble || - tagType == TypeUndefined -} - -var ( - // TODO(dustin): Rename TypeNames() to typeNames() and add getter. - TypeNames = map[TagTypePrimitive]string{ - TypeByte: "BYTE", - TypeAscii: "ASCII", - TypeShort: "SHORT", - TypeLong: "LONG", - TypeRational: "RATIONAL", - TypeUndefined: "UNDEFINED", - TypeSignedLong: "SLONG", - TypeSignedRational: "SRATIONAL", - TypeFloat: "FLOAT", - TypeDouble: "DOUBLE", - - TypeAsciiNoNul: "_ASCII_NO_NUL", - } - - typeNamesR = map[string]TagTypePrimitive{} -) - -// Rational describes an unsigned rational value. -type Rational struct { - // Numerator is the numerator of the rational value. - Numerator uint32 - - // Denominator is the numerator of the rational value. - Denominator uint32 -} - -// SignedRational describes a signed rational value. -type SignedRational struct { - // Numerator is the numerator of the rational value. - Numerator int32 - - // Denominator is the numerator of the rational value. - Denominator int32 -} - -func isPrintableText(s string) bool { - for _, c := range s { - // unicode.IsPrint() returns false for newline characters. - if c == 0x0d || c == 0x0a { - continue - } else if unicode.IsPrint(rune(c)) == false { - return false - } - } - - return true -} - -// Format returns a stringified value for the given encoding. Automatically -// parses. Automatically calculates count based on type size. This function -// also supports undefined-type values (the ones that we support, anyway) by -// way of the String() method that they all require. We can't be more specific -// because we're a base package and we can't refer to it. -func FormatFromType(value interface{}, justFirst bool) (phrase string, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - // TODO(dustin): !! Add test - - switch t := value.(type) { - case []byte: - return DumpBytesToString(t), nil - case string: - for i, c := range t { - if c == 0 { - t = t[:i] - break - } - } - - if isPrintableText(t) == false { - phrase = fmt.Sprintf("string with binary data (%d bytes)", len(t)) - return phrase, nil - } - - return t, nil - case []uint16, []uint32, []int32, []float64, []float32: - val := reflect.ValueOf(t) - - if val.Len() == 0 { - return "", nil - } - - if justFirst == true { - var valueSuffix string - if val.Len() > 1 { - valueSuffix = "..." - } - - return fmt.Sprintf("%v%s", val.Index(0), valueSuffix), nil - } - - return fmt.Sprintf("%v", val), nil - case []Rational: - if len(t) == 0 { - return "", nil - } - - parts := make([]string, len(t)) - for i, r := range t { - parts[i] = fmt.Sprintf("%d/%d", r.Numerator, r.Denominator) - - if justFirst == true { - break - } - } - - if justFirst == true { - var valueSuffix string - if len(t) > 1 { - valueSuffix = "..." - } - - return fmt.Sprintf("%v%s", parts[0], valueSuffix), nil - } - - return fmt.Sprintf("%v", parts), nil - case []SignedRational: - if len(t) == 0 { - return "", nil - } - - parts := make([]string, len(t)) - for i, r := range t { - parts[i] = fmt.Sprintf("%d/%d", r.Numerator, r.Denominator) - - if justFirst == true { - break - } - } - - if justFirst == true { - var valueSuffix string - if len(t) > 1 { - valueSuffix = "..." - } - - return fmt.Sprintf("%v%s", parts[0], valueSuffix), nil - } - - return fmt.Sprintf("%v", parts), nil - case fmt.Stringer: - s := t.String() - if isPrintableText(s) == false { - phrase = fmt.Sprintf("stringable with binary data (%d bytes)", len(s)) - return phrase, nil - } - - // An undefined value that is documented (or that we otherwise support). - return s, nil - default: - // Affects only "unknown" values, in general. - log.Panicf("type can not be formatted into string: %v", reflect.TypeOf(value).Name()) - - // Never called. - return "", nil - } -} - -// Format returns a stringified value for the given encoding. Automatically -// parses. Automatically calculates count based on type size. -func FormatFromBytes(rawBytes []byte, tagType TagTypePrimitive, justFirst bool, byteOrder binary.ByteOrder) (phrase string, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - // TODO(dustin): !! Add test - - typeSize := tagType.Size() - - if len(rawBytes)%typeSize != 0 { - log.Panicf("byte-count (%d) does not align for [%s] type with a size of (%d) bytes", len(rawBytes), TypeNames[tagType], typeSize) - } - - // unitCount is the calculated unit-count. This should equal the original - // value from the tag (pre-resolution). - unitCount := uint32(len(rawBytes) / typeSize) - - // Truncate the items if it's not bytes or a string and we just want the first. - - var value interface{} - - switch tagType { - case TypeByte: - var err error - - value, err = parser.ParseBytes(rawBytes, unitCount) - log.PanicIf(err) - case TypeAscii: - var err error - - value, err = parser.ParseAscii(rawBytes, unitCount) - log.PanicIf(err) - case TypeAsciiNoNul: - var err error - - value, err = parser.ParseAsciiNoNul(rawBytes, unitCount) - log.PanicIf(err) - case TypeShort: - var err error - - value, err = parser.ParseShorts(rawBytes, unitCount, byteOrder) - log.PanicIf(err) - case TypeLong: - var err error - - value, err = parser.ParseLongs(rawBytes, unitCount, byteOrder) - log.PanicIf(err) - case TypeFloat: - var err error - - value, err = parser.ParseFloats(rawBytes, unitCount, byteOrder) - log.PanicIf(err) - case TypeDouble: - var err error - - value, err = parser.ParseDoubles(rawBytes, unitCount, byteOrder) - log.PanicIf(err) - case TypeRational: - var err error - - value, err = parser.ParseRationals(rawBytes, unitCount, byteOrder) - log.PanicIf(err) - case TypeSignedLong: - var err error - - value, err = parser.ParseSignedLongs(rawBytes, unitCount, byteOrder) - log.PanicIf(err) - case TypeSignedRational: - var err error - - value, err = parser.ParseSignedRationals(rawBytes, unitCount, byteOrder) - log.PanicIf(err) - default: - // Affects only "unknown" values, in general. - log.Panicf("value of type [%s] can not be formatted into string", tagType.String()) - - // Never called. - return "", nil - } - - phrase, err = FormatFromType(value, justFirst) - log.PanicIf(err) - - return phrase, nil -} - -// TranslateStringToType converts user-provided strings to properly-typed -// values. If a string, returns a string. Else, assumes that it's a single -// number. If a list needs to be processed, it is the caller's responsibility to -// split it (according to whichever convention has been established). -func TranslateStringToType(tagType TagTypePrimitive, valueString string) (value interface{}, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - if tagType == TypeUndefined { - // The caller should just call String() on the decoded type. - log.Panicf("undefined-type values are not supported") - } - - if tagType == TypeByte { - wide, err := strconv.ParseInt(valueString, 16, 8) - log.PanicIf(err) - - return byte(wide), nil - } else if tagType == TypeAscii || tagType == TypeAsciiNoNul { - // Whether or not we're putting an NUL on the end is only relevant for - // byte-level encoding. This function really just supports a user - // interface. - - return valueString, nil - } else if tagType == TypeShort { - n, err := strconv.ParseUint(valueString, 10, 16) - log.PanicIf(err) - - return uint16(n), nil - } else if tagType == TypeLong { - n, err := strconv.ParseUint(valueString, 10, 32) - log.PanicIf(err) - - return uint32(n), nil - } else if tagType == TypeRational { - parts := strings.SplitN(valueString, "/", 2) - - numerator, err := strconv.ParseUint(parts[0], 10, 32) - log.PanicIf(err) - - denominator, err := strconv.ParseUint(parts[1], 10, 32) - log.PanicIf(err) - - return Rational{ - Numerator: uint32(numerator), - Denominator: uint32(denominator), - }, nil - } else if tagType == TypeSignedLong { - n, err := strconv.ParseInt(valueString, 10, 32) - log.PanicIf(err) - - return int32(n), nil - } else if tagType == TypeFloat { - n, err := strconv.ParseFloat(valueString, 32) - log.PanicIf(err) - - return float32(n), nil - } else if tagType == TypeDouble { - n, err := strconv.ParseFloat(valueString, 64) - log.PanicIf(err) - - return float64(n), nil - } else if tagType == TypeSignedRational { - parts := strings.SplitN(valueString, "/", 2) - - numerator, err := strconv.ParseInt(parts[0], 10, 32) - log.PanicIf(err) - - denominator, err := strconv.ParseInt(parts[1], 10, 32) - log.PanicIf(err) - - return SignedRational{ - Numerator: int32(numerator), - Denominator: int32(denominator), - }, nil - } - - log.Panicf("from-string encoding for type not supported; this shouldn't happen: [%s]", tagType.String()) - return nil, nil -} - -// GetTypeByName returns the `TagTypePrimitive` for the given type name. -// Returns (0) if not valid. -func GetTypeByName(typeName string) (tagType TagTypePrimitive, found bool) { - tagType, found = typeNamesR[typeName] - return tagType, found -} - -// BasicTag describes a single tag for any purpose. -type BasicTag struct { - // FqIfdPath is the fully-qualified IFD-path. - FqIfdPath string - - // IfdPath is the unindexed IFD-path. - IfdPath string - - // TagId is the tag-ID. - TagId uint16 -} - -func init() { - for typeId, typeName := range TypeNames { - typeNamesR[typeName] = typeId - } -} diff --git a/vendor/github.com/dsoprea/go-exif/v3/common/utility.go b/vendor/github.com/dsoprea/go-exif/v3/common/utility.go deleted file mode 100644 index 575049706..000000000 --- a/vendor/github.com/dsoprea/go-exif/v3/common/utility.go +++ /dev/null @@ -1,148 +0,0 @@ -package exifcommon - -import ( - "bytes" - "fmt" - "reflect" - "strconv" - "strings" - "time" - - "github.com/dsoprea/go-logging" -) - -var ( - timeType = reflect.TypeOf(time.Time{}) -) - -// DumpBytes prints a list of hex-encoded bytes. -func DumpBytes(data []byte) { - fmt.Printf("DUMP: ") - for _, x := range data { - fmt.Printf("%02x ", x) - } - - fmt.Printf("\n") -} - -// DumpBytesClause prints a list like DumpBytes(), but encapsulated in -// "[]byte { ... }". -func DumpBytesClause(data []byte) { - fmt.Printf("DUMP: ") - - fmt.Printf("[]byte { ") - - for i, x := range data { - fmt.Printf("0x%02x", x) - - if i < len(data)-1 { - fmt.Printf(", ") - } - } - - fmt.Printf(" }\n") -} - -// DumpBytesToString returns a stringified list of hex-encoded bytes. -func DumpBytesToString(data []byte) string { - b := new(bytes.Buffer) - - for i, x := range data { - _, err := b.WriteString(fmt.Sprintf("%02x", x)) - log.PanicIf(err) - - if i < len(data)-1 { - _, err := b.WriteRune(' ') - log.PanicIf(err) - } - } - - return b.String() -} - -// DumpBytesClauseToString returns a comma-separated list of hex-encoded bytes. -func DumpBytesClauseToString(data []byte) string { - b := new(bytes.Buffer) - - for i, x := range data { - _, err := b.WriteString(fmt.Sprintf("0x%02x", x)) - log.PanicIf(err) - - if i < len(data)-1 { - _, err := b.WriteString(", ") - log.PanicIf(err) - } - } - - return b.String() -} - -// ExifFullTimestampString produces a string like "2018:11:30 13:01:49" from a -// `time.Time` struct. It will attempt to convert to UTC first. -func ExifFullTimestampString(t time.Time) (fullTimestampPhrase string) { - t = t.UTC() - - return fmt.Sprintf("%04d:%02d:%02d %02d:%02d:%02d", t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second()) -} - -// ParseExifFullTimestamp parses dates like "2018:11:30 13:01:49" into a UTC -// `time.Time` struct. -func ParseExifFullTimestamp(fullTimestampPhrase string) (timestamp time.Time, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - parts := strings.Split(fullTimestampPhrase, " ") - datestampValue, timestampValue := parts[0], parts[1] - - // Normalize the separators. - datestampValue = strings.ReplaceAll(datestampValue, "-", ":") - timestampValue = strings.ReplaceAll(timestampValue, "-", ":") - - dateParts := strings.Split(datestampValue, ":") - - year, err := strconv.ParseUint(dateParts[0], 10, 16) - if err != nil { - log.Panicf("could not parse year") - } - - month, err := strconv.ParseUint(dateParts[1], 10, 8) - if err != nil { - log.Panicf("could not parse month") - } - - day, err := strconv.ParseUint(dateParts[2], 10, 8) - if err != nil { - log.Panicf("could not parse day") - } - - timeParts := strings.Split(timestampValue, ":") - - hour, err := strconv.ParseUint(timeParts[0], 10, 8) - if err != nil { - log.Panicf("could not parse hour") - } - - minute, err := strconv.ParseUint(timeParts[1], 10, 8) - if err != nil { - log.Panicf("could not parse minute") - } - - second, err := strconv.ParseUint(timeParts[2], 10, 8) - if err != nil { - log.Panicf("could not parse second") - } - - timestamp = time.Date(int(year), time.Month(month), int(day), int(hour), int(minute), int(second), 0, time.UTC) - return timestamp, nil -} - -// IsTime returns true if the value is a `time.Time`. -func IsTime(v interface{}) bool { - - // TODO(dustin): Add test - - return reflect.TypeOf(v) == timeType -} diff --git a/vendor/github.com/dsoprea/go-exif/v3/common/value_context.go b/vendor/github.com/dsoprea/go-exif/v3/common/value_context.go deleted file mode 100644 index b9e634106..000000000 --- a/vendor/github.com/dsoprea/go-exif/v3/common/value_context.go +++ /dev/null @@ -1,464 +0,0 @@ -package exifcommon - -import ( - "errors" - "io" - - "encoding/binary" - - "github.com/dsoprea/go-logging" -) - -var ( - parser *Parser -) - -var ( - // ErrNotFarValue indicates that an offset-based lookup was attempted for a - // non-offset-based (embedded) value. - ErrNotFarValue = errors.New("not a far value") -) - -// ValueContext embeds all of the parameters required to find and extract the -// actual tag value. -type ValueContext struct { - unitCount uint32 - valueOffset uint32 - rawValueOffset []byte - rs io.ReadSeeker - - tagType TagTypePrimitive - byteOrder binary.ByteOrder - - // undefinedValueTagType is the effective type to use if this is an - // "undefined" value. - undefinedValueTagType TagTypePrimitive - - ifdPath string - tagId uint16 -} - -// TODO(dustin): We can update newValueContext() to derive `valueOffset` itself (from `rawValueOffset`). - -// NewValueContext returns a new ValueContext struct. -func NewValueContext(ifdPath string, tagId uint16, unitCount, valueOffset uint32, rawValueOffset []byte, rs io.ReadSeeker, tagType TagTypePrimitive, byteOrder binary.ByteOrder) *ValueContext { - return &ValueContext{ - unitCount: unitCount, - valueOffset: valueOffset, - rawValueOffset: rawValueOffset, - rs: rs, - - tagType: tagType, - byteOrder: byteOrder, - - ifdPath: ifdPath, - tagId: tagId, - } -} - -// SetUndefinedValueType sets the effective type if this is an unknown-type tag. -func (vc *ValueContext) SetUndefinedValueType(tagType TagTypePrimitive) { - if vc.tagType != TypeUndefined { - log.Panicf("can not set effective type for unknown-type tag because this is *not* an unknown-type tag") - } - - vc.undefinedValueTagType = tagType -} - -// UnitCount returns the embedded unit-count. -func (vc *ValueContext) UnitCount() uint32 { - return vc.unitCount -} - -// ValueOffset returns the value-offset decoded as a `uint32`. -func (vc *ValueContext) ValueOffset() uint32 { - return vc.valueOffset -} - -// RawValueOffset returns the uninterpreted value-offset. This is used for -// embedded values (values small enough to fit within the offset bytes rather -// than needing to be stored elsewhere and referred to by an actual offset). -func (vc *ValueContext) RawValueOffset() []byte { - return vc.rawValueOffset -} - -// AddressableData returns the block of data that we can dereference into. -func (vc *ValueContext) AddressableData() io.ReadSeeker { - - // RELEASE)dustin): Rename from AddressableData() to ReadSeeker() - - return vc.rs -} - -// ByteOrder returns the byte-order of numbers. -func (vc *ValueContext) ByteOrder() binary.ByteOrder { - return vc.byteOrder -} - -// IfdPath returns the path of the IFD containing this tag. -func (vc *ValueContext) IfdPath() string { - return vc.ifdPath -} - -// TagId returns the ID of the tag that we represent. -func (vc *ValueContext) TagId() uint16 { - return vc.tagId -} - -// isEmbedded returns whether the value is embedded or a reference. This can't -// be precalculated since the size is not defined for all types (namely the -// "undefined" types). -func (vc *ValueContext) isEmbedded() bool { - tagType := vc.effectiveValueType() - - return (tagType.Size() * int(vc.unitCount)) <= 4 -} - -// SizeInBytes returns the number of bytes that this value requires. The -// underlying call will panic if the type is UNDEFINED. It is the -// responsibility of the caller to preemptively check that. -func (vc *ValueContext) SizeInBytes() int { - tagType := vc.effectiveValueType() - - return tagType.Size() * int(vc.unitCount) -} - -// effectiveValueType returns the effective type of the unknown-type tag or, if -// not unknown, the actual type. -func (vc *ValueContext) effectiveValueType() (tagType TagTypePrimitive) { - if vc.tagType == TypeUndefined { - tagType = vc.undefinedValueTagType - - if tagType == 0 { - log.Panicf("undefined-value type not set") - } - } else { - tagType = vc.tagType - } - - return tagType -} - -// readRawEncoded returns the encoded bytes for the value that we represent. -func (vc *ValueContext) readRawEncoded() (rawBytes []byte, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - tagType := vc.effectiveValueType() - - unitSizeRaw := uint32(tagType.Size()) - - if vc.isEmbedded() == true { - byteLength := unitSizeRaw * vc.unitCount - return vc.rawValueOffset[:byteLength], nil - } - - _, err = vc.rs.Seek(int64(vc.valueOffset), io.SeekStart) - log.PanicIf(err) - - rawBytes = make([]byte, vc.unitCount*unitSizeRaw) - - _, err = io.ReadFull(vc.rs, rawBytes) - log.PanicIf(err) - - return rawBytes, nil -} - -// GetFarOffset returns the offset if the value is not embedded [within the -// pointer itself] or an error if an embedded value. -func (vc *ValueContext) GetFarOffset() (offset uint32, err error) { - if vc.isEmbedded() == true { - return 0, ErrNotFarValue - } - - return vc.valueOffset, nil -} - -// ReadRawEncoded returns the encoded bytes for the value that we represent. -func (vc *ValueContext) ReadRawEncoded() (rawBytes []byte, err error) { - - // TODO(dustin): Remove this method and rename readRawEncoded in its place. - - return vc.readRawEncoded() -} - -// Format returns a string representation for the value. -// -// Where the type is not ASCII, `justFirst` indicates whether to just stringify -// the first item in the slice (or return an empty string if the slice is -// empty). -// -// Since this method lacks the information to process undefined-type tags (e.g. -// byte-order, tag-ID, IFD type), it will return an error if attempted. See -// `Undefined()`. -func (vc *ValueContext) Format() (value string, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - rawBytes, err := vc.readRawEncoded() - log.PanicIf(err) - - phrase, err := FormatFromBytes(rawBytes, vc.effectiveValueType(), false, vc.byteOrder) - log.PanicIf(err) - - return phrase, nil -} - -// FormatFirst is similar to `Format` but only gets and stringifies the first -// item. -func (vc *ValueContext) FormatFirst() (value string, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - rawBytes, err := vc.readRawEncoded() - log.PanicIf(err) - - phrase, err := FormatFromBytes(rawBytes, vc.tagType, true, vc.byteOrder) - log.PanicIf(err) - - return phrase, nil -} - -// ReadBytes parses the encoded byte-array from the value-context. -func (vc *ValueContext) ReadBytes() (value []byte, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - rawValue, err := vc.readRawEncoded() - log.PanicIf(err) - - value, err = parser.ParseBytes(rawValue, vc.unitCount) - log.PanicIf(err) - - return value, nil -} - -// ReadAscii parses the encoded NUL-terminated ASCII string from the value- -// context. -func (vc *ValueContext) ReadAscii() (value string, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - rawValue, err := vc.readRawEncoded() - log.PanicIf(err) - - value, err = parser.ParseAscii(rawValue, vc.unitCount) - log.PanicIf(err) - - return value, nil -} - -// ReadAsciiNoNul parses the non-NUL-terminated encoded ASCII string from the -// value-context. -func (vc *ValueContext) ReadAsciiNoNul() (value string, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - rawValue, err := vc.readRawEncoded() - log.PanicIf(err) - - value, err = parser.ParseAsciiNoNul(rawValue, vc.unitCount) - log.PanicIf(err) - - return value, nil -} - -// ReadShorts parses the list of encoded shorts from the value-context. -func (vc *ValueContext) ReadShorts() (value []uint16, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - rawValue, err := vc.readRawEncoded() - log.PanicIf(err) - - value, err = parser.ParseShorts(rawValue, vc.unitCount, vc.byteOrder) - log.PanicIf(err) - - return value, nil -} - -// ReadLongs parses the list of encoded, unsigned longs from the value-context. -func (vc *ValueContext) ReadLongs() (value []uint32, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - rawValue, err := vc.readRawEncoded() - log.PanicIf(err) - - value, err = parser.ParseLongs(rawValue, vc.unitCount, vc.byteOrder) - log.PanicIf(err) - - return value, nil -} - -// ReadFloats parses the list of encoded, floats from the value-context. -func (vc *ValueContext) ReadFloats() (value []float32, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - rawValue, err := vc.readRawEncoded() - log.PanicIf(err) - - value, err = parser.ParseFloats(rawValue, vc.unitCount, vc.byteOrder) - log.PanicIf(err) - - return value, nil -} - -// ReadDoubles parses the list of encoded, doubles from the value-context. -func (vc *ValueContext) ReadDoubles() (value []float64, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - rawValue, err := vc.readRawEncoded() - log.PanicIf(err) - - value, err = parser.ParseDoubles(rawValue, vc.unitCount, vc.byteOrder) - log.PanicIf(err) - - return value, nil -} - -// ReadRationals parses the list of encoded, unsigned rationals from the value- -// context. -func (vc *ValueContext) ReadRationals() (value []Rational, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - rawValue, err := vc.readRawEncoded() - log.PanicIf(err) - - value, err = parser.ParseRationals(rawValue, vc.unitCount, vc.byteOrder) - log.PanicIf(err) - - return value, nil -} - -// ReadSignedLongs parses the list of encoded, signed longs from the value-context. -func (vc *ValueContext) ReadSignedLongs() (value []int32, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - rawValue, err := vc.readRawEncoded() - log.PanicIf(err) - - value, err = parser.ParseSignedLongs(rawValue, vc.unitCount, vc.byteOrder) - log.PanicIf(err) - - return value, nil -} - -// ReadSignedRationals parses the list of encoded, signed rationals from the -// value-context. -func (vc *ValueContext) ReadSignedRationals() (value []SignedRational, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - rawValue, err := vc.readRawEncoded() - log.PanicIf(err) - - value, err = parser.ParseSignedRationals(rawValue, vc.unitCount, vc.byteOrder) - log.PanicIf(err) - - return value, nil -} - -// Values knows how to resolve the given value. This value is always a list -// (undefined-values aside), so we're named accordingly. -// -// Since this method lacks the information to process unknown-type tags (e.g. -// byte-order, tag-ID, IFD type), it will return an error if attempted. See -// `Undefined()`. -func (vc *ValueContext) Values() (values interface{}, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - if vc.tagType == TypeByte { - values, err = vc.ReadBytes() - log.PanicIf(err) - } else if vc.tagType == TypeAscii { - values, err = vc.ReadAscii() - log.PanicIf(err) - } else if vc.tagType == TypeAsciiNoNul { - values, err = vc.ReadAsciiNoNul() - log.PanicIf(err) - } else if vc.tagType == TypeShort { - values, err = vc.ReadShorts() - log.PanicIf(err) - } else if vc.tagType == TypeLong { - values, err = vc.ReadLongs() - log.PanicIf(err) - } else if vc.tagType == TypeRational { - values, err = vc.ReadRationals() - log.PanicIf(err) - } else if vc.tagType == TypeSignedLong { - values, err = vc.ReadSignedLongs() - log.PanicIf(err) - } else if vc.tagType == TypeSignedRational { - values, err = vc.ReadSignedRationals() - log.PanicIf(err) - } else if vc.tagType == TypeFloat { - values, err = vc.ReadFloats() - log.PanicIf(err) - } else if vc.tagType == TypeDouble { - values, err = vc.ReadDoubles() - log.PanicIf(err) - } else if vc.tagType == TypeUndefined { - log.Panicf("will not parse undefined-type value") - - // Never called. - return nil, nil - } else { - log.Panicf("value of type [%s] is unparseable", vc.tagType) - // Never called. - return nil, nil - } - - return values, nil -} - -func init() { - parser = new(Parser) -} diff --git a/vendor/github.com/dsoprea/go-exif/v3/common/value_encoder.go b/vendor/github.com/dsoprea/go-exif/v3/common/value_encoder.go deleted file mode 100644 index 2cd26cc7b..000000000 --- a/vendor/github.com/dsoprea/go-exif/v3/common/value_encoder.go +++ /dev/null @@ -1,273 +0,0 @@ -package exifcommon - -import ( - "bytes" - "math" - "reflect" - "time" - - "encoding/binary" - - "github.com/dsoprea/go-logging" -) - -var ( - typeEncodeLogger = log.NewLogger("exif.type_encode") -) - -// EncodedData encapsulates the compound output of an encoding operation. -type EncodedData struct { - Type TagTypePrimitive - Encoded []byte - - // TODO(dustin): Is this really necessary? We might have this just to correlate to the incoming stream format (raw bytes and a unit-count both for incoming and outgoing). - UnitCount uint32 -} - -// ValueEncoder knows how to encode values of every type to bytes. -type ValueEncoder struct { - byteOrder binary.ByteOrder -} - -// NewValueEncoder returns a new ValueEncoder. -func NewValueEncoder(byteOrder binary.ByteOrder) *ValueEncoder { - return &ValueEncoder{ - byteOrder: byteOrder, - } -} - -func (ve *ValueEncoder) encodeBytes(value []uint8) (ed EncodedData, err error) { - ed.Type = TypeByte - ed.Encoded = []byte(value) - ed.UnitCount = uint32(len(value)) - - return ed, nil -} - -func (ve *ValueEncoder) encodeAscii(value string) (ed EncodedData, err error) { - ed.Type = TypeAscii - - ed.Encoded = []byte(value) - ed.Encoded = append(ed.Encoded, 0) - - ed.UnitCount = uint32(len(ed.Encoded)) - - return ed, nil -} - -// encodeAsciiNoNul returns a string encoded as a byte-string without a trailing -// NUL byte. -// -// Note that: -// -// 1. This type can not be automatically encoded using `Encode()`. The default -// mode is to encode *with* a trailing NUL byte using `encodeAscii`. Only -// certain undefined-type tags using an unterminated ASCII string and these -// are exceptional in nature. -// -// 2. The presence of this method allows us to completely test the complimentary -// no-nul parser. -// -func (ve *ValueEncoder) encodeAsciiNoNul(value string) (ed EncodedData, err error) { - ed.Type = TypeAsciiNoNul - ed.Encoded = []byte(value) - ed.UnitCount = uint32(len(ed.Encoded)) - - return ed, nil -} - -func (ve *ValueEncoder) encodeShorts(value []uint16) (ed EncodedData, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - ed.UnitCount = uint32(len(value)) - ed.Encoded = make([]byte, ed.UnitCount*2) - - for i := uint32(0); i < ed.UnitCount; i++ { - ve.byteOrder.PutUint16(ed.Encoded[i*2:(i+1)*2], value[i]) - } - - ed.Type = TypeShort - - return ed, nil -} - -func (ve *ValueEncoder) encodeLongs(value []uint32) (ed EncodedData, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - ed.UnitCount = uint32(len(value)) - ed.Encoded = make([]byte, ed.UnitCount*4) - - for i := uint32(0); i < ed.UnitCount; i++ { - ve.byteOrder.PutUint32(ed.Encoded[i*4:(i+1)*4], value[i]) - } - - ed.Type = TypeLong - - return ed, nil -} - -func (ve *ValueEncoder) encodeFloats(value []float32) (ed EncodedData, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - ed.UnitCount = uint32(len(value)) - ed.Encoded = make([]byte, ed.UnitCount*4) - - for i := uint32(0); i < ed.UnitCount; i++ { - ve.byteOrder.PutUint32(ed.Encoded[i*4:(i+1)*4], math.Float32bits(value[i])) - } - - ed.Type = TypeFloat - - return ed, nil -} - -func (ve *ValueEncoder) encodeDoubles(value []float64) (ed EncodedData, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - ed.UnitCount = uint32(len(value)) - ed.Encoded = make([]byte, ed.UnitCount*8) - - for i := uint32(0); i < ed.UnitCount; i++ { - ve.byteOrder.PutUint64(ed.Encoded[i*8:(i+1)*8], math.Float64bits(value[i])) - } - - ed.Type = TypeDouble - - return ed, nil -} - -func (ve *ValueEncoder) encodeRationals(value []Rational) (ed EncodedData, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - ed.UnitCount = uint32(len(value)) - ed.Encoded = make([]byte, ed.UnitCount*8) - - for i := uint32(0); i < ed.UnitCount; i++ { - ve.byteOrder.PutUint32(ed.Encoded[i*8+0:i*8+4], value[i].Numerator) - ve.byteOrder.PutUint32(ed.Encoded[i*8+4:i*8+8], value[i].Denominator) - } - - ed.Type = TypeRational - - return ed, nil -} - -func (ve *ValueEncoder) encodeSignedLongs(value []int32) (ed EncodedData, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - ed.UnitCount = uint32(len(value)) - - b := bytes.NewBuffer(make([]byte, 0, 8*ed.UnitCount)) - - for i := uint32(0); i < ed.UnitCount; i++ { - err := binary.Write(b, ve.byteOrder, value[i]) - log.PanicIf(err) - } - - ed.Type = TypeSignedLong - ed.Encoded = b.Bytes() - - return ed, nil -} - -func (ve *ValueEncoder) encodeSignedRationals(value []SignedRational) (ed EncodedData, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - ed.UnitCount = uint32(len(value)) - - b := bytes.NewBuffer(make([]byte, 0, 8*ed.UnitCount)) - - for i := uint32(0); i < ed.UnitCount; i++ { - err := binary.Write(b, ve.byteOrder, value[i].Numerator) - log.PanicIf(err) - - err = binary.Write(b, ve.byteOrder, value[i].Denominator) - log.PanicIf(err) - } - - ed.Type = TypeSignedRational - ed.Encoded = b.Bytes() - - return ed, nil -} - -// Encode returns bytes for the given value, infering type from the actual -// value. This does not support `TypeAsciiNoNull` (all strings are encoded as -// `TypeAscii`). -func (ve *ValueEncoder) Encode(value interface{}) (ed EncodedData, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - switch t := value.(type) { - case []byte: - ed, err = ve.encodeBytes(t) - log.PanicIf(err) - case string: - ed, err = ve.encodeAscii(t) - log.PanicIf(err) - case []uint16: - ed, err = ve.encodeShorts(t) - log.PanicIf(err) - case []uint32: - ed, err = ve.encodeLongs(t) - log.PanicIf(err) - case []float32: - ed, err = ve.encodeFloats(t) - log.PanicIf(err) - case []float64: - ed, err = ve.encodeDoubles(t) - log.PanicIf(err) - case []Rational: - ed, err = ve.encodeRationals(t) - log.PanicIf(err) - case []int32: - ed, err = ve.encodeSignedLongs(t) - log.PanicIf(err) - case []SignedRational: - ed, err = ve.encodeSignedRationals(t) - log.PanicIf(err) - case time.Time: - // For convenience, if the user doesn't want to deal with translation - // semantics with timestamps. - - s := ExifFullTimestampString(t) - - ed, err = ve.encodeAscii(s) - log.PanicIf(err) - default: - log.Panicf("value not encodable: [%s] [%v]", reflect.TypeOf(value), value) - } - - return ed, nil -} diff --git a/vendor/github.com/dsoprea/go-exif/v3/data_layer.go b/vendor/github.com/dsoprea/go-exif/v3/data_layer.go deleted file mode 100644 index 7883752cc..000000000 --- a/vendor/github.com/dsoprea/go-exif/v3/data_layer.go +++ /dev/null @@ -1,50 +0,0 @@ -package exif - -import ( - "io" - - "github.com/dsoprea/go-logging" - "github.com/dsoprea/go-utility/v2/filesystem" -) - -type ExifBlobSeeker interface { - GetReadSeeker(initialOffset int64) (rs io.ReadSeeker, err error) -} - -// ExifReadSeeker knows how to retrieve data from the EXIF blob relative to the -// beginning of the blob (so, absolute position (0) is the first byte of the -// EXIF data). -type ExifReadSeeker struct { - rs io.ReadSeeker -} - -func NewExifReadSeeker(rs io.ReadSeeker) *ExifReadSeeker { - return &ExifReadSeeker{ - rs: rs, - } -} - -func NewExifReadSeekerWithBytes(exifData []byte) *ExifReadSeeker { - sb := rifs.NewSeekableBufferWithBytes(exifData) - edbs := NewExifReadSeeker(sb) - - return edbs -} - -// Fork creates a new ReadSeeker instead that wraps a BouncebackReader to -// maintain its own position in the stream. -func (edbs *ExifReadSeeker) GetReadSeeker(initialOffset int64) (rs io.ReadSeeker, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - br, err := rifs.NewBouncebackReader(edbs.rs) - log.PanicIf(err) - - _, err = br.Seek(initialOffset, io.SeekStart) - log.PanicIf(err) - - return br, nil -} diff --git a/vendor/github.com/dsoprea/go-exif/v3/error.go b/vendor/github.com/dsoprea/go-exif/v3/error.go deleted file mode 100644 index 2f00b08a4..000000000 --- a/vendor/github.com/dsoprea/go-exif/v3/error.go +++ /dev/null @@ -1,14 +0,0 @@ -package exif - -import ( - "errors" -) - -var ( - // ErrTagNotFound indicates that the tag was not found. - ErrTagNotFound = errors.New("tag not found") - - // ErrTagNotKnown indicates that the tag is not registered with us as a - // known tag. - ErrTagNotKnown = errors.New("tag is not known") -) diff --git a/vendor/github.com/dsoprea/go-exif/v3/exif.go b/vendor/github.com/dsoprea/go-exif/v3/exif.go deleted file mode 100644 index f66e839d9..000000000 --- a/vendor/github.com/dsoprea/go-exif/v3/exif.go +++ /dev/null @@ -1,333 +0,0 @@ -package exif - -import ( - "bufio" - "bytes" - "errors" - "fmt" - "io" - "os" - - "encoding/binary" - "io/ioutil" - - "github.com/dsoprea/go-logging" - - "github.com/dsoprea/go-exif/v3/common" -) - -const ( - // ExifAddressableAreaStart is the absolute offset in the file that all - // offsets are relative to. - ExifAddressableAreaStart = uint32(0x0) - - // ExifDefaultFirstIfdOffset is essentially the number of bytes in addition - // to `ExifAddressableAreaStart` that you have to move in order to escape - // the rest of the header and get to the earliest point where we can put - // stuff (which has to be the first IFD). This is the size of the header - // sequence containing the two-character byte-order, two-character fixed- - // bytes, and the four bytes describing the first-IFD offset. - ExifDefaultFirstIfdOffset = uint32(2 + 2 + 4) -) - -const ( - // ExifSignatureLength is the number of bytes in the EXIF signature (which - // customarily includes the first IFD offset). - ExifSignatureLength = 8 -) - -var ( - exifLogger = log.NewLogger("exif.exif") - - ExifBigEndianSignature = [4]byte{'M', 'M', 0x00, 0x2a} - ExifLittleEndianSignature = [4]byte{'I', 'I', 0x2a, 0x00} -) - -var ( - ErrNoExif = errors.New("no exif data") - ErrExifHeaderError = errors.New("exif header error") -) - -// SearchAndExtractExif searches for an EXIF blob in the byte-slice. -func SearchAndExtractExif(data []byte) (rawExif []byte, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - b := bytes.NewBuffer(data) - - rawExif, err = SearchAndExtractExifWithReader(b) - if err != nil { - if err == ErrNoExif { - return nil, err - } - - log.Panic(err) - } - - return rawExif, nil -} - -// SearchAndExtractExifN searches for an EXIF blob in the byte-slice, but skips -// the given number of EXIF blocks first. This is a forensics tool that helps -// identify multiple EXIF blocks in a file. -func SearchAndExtractExifN(data []byte, n int) (rawExif []byte, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - skips := 0 - totalDiscarded := 0 - for { - b := bytes.NewBuffer(data) - - var discarded int - - rawExif, discarded, err = searchAndExtractExifWithReaderWithDiscarded(b) - if err != nil { - if err == ErrNoExif { - return nil, err - } - - log.Panic(err) - } - - exifLogger.Debugf(nil, "Read EXIF block (%d).", skips) - - totalDiscarded += discarded - - if skips >= n { - exifLogger.Debugf(nil, "Reached requested EXIF block (%d).", n) - break - } - - nextOffset := discarded + 1 - exifLogger.Debugf(nil, "Skipping EXIF block (%d) by seeking to position (%d).", skips, nextOffset) - - data = data[nextOffset:] - skips++ - } - - exifLogger.Debugf(nil, "Found EXIF blob (%d) bytes from initial position.", totalDiscarded) - return rawExif, nil -} - -// searchAndExtractExifWithReaderWithDiscarded searches for an EXIF blob using -// an `io.Reader`. We can't know how much long the EXIF data is without parsing -// it, so this will likely grab up a lot of the image-data, too. -// -// This function returned the count of preceding bytes. -func searchAndExtractExifWithReaderWithDiscarded(r io.Reader) (rawExif []byte, discarded int, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - // Search for the beginning of the EXIF information. The EXIF is near the - // beginning of most JPEGs, so this likely doesn't have a high cost (at - // least, again, with JPEGs). - - br := bufio.NewReader(r) - - for { - window, err := br.Peek(ExifSignatureLength) - if err != nil { - if err == io.EOF { - return nil, 0, ErrNoExif - } - - log.Panic(err) - } - - _, err = ParseExifHeader(window) - if err != nil { - if log.Is(err, ErrNoExif) == true { - // No EXIF. Move forward by one byte. - - _, err := br.Discard(1) - log.PanicIf(err) - - discarded++ - - continue - } - - // Some other error. - log.Panic(err) - } - - break - } - - exifLogger.Debugf(nil, "Found EXIF blob (%d) bytes from initial position.", discarded) - - rawExif, err = ioutil.ReadAll(br) - log.PanicIf(err) - - return rawExif, discarded, nil -} - -// RELEASE(dustin): We should replace the implementation of SearchAndExtractExifWithReader with searchAndExtractExifWithReaderWithDiscarded and drop the latter. - -// SearchAndExtractExifWithReader searches for an EXIF blob using an -// `io.Reader`. We can't know how much long the EXIF data is without parsing it, -// so this will likely grab up a lot of the image-data, too. -func SearchAndExtractExifWithReader(r io.Reader) (rawExif []byte, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - rawExif, _, err = searchAndExtractExifWithReaderWithDiscarded(r) - if err != nil { - if err == ErrNoExif { - return nil, err - } - - log.Panic(err) - } - - return rawExif, nil -} - -// SearchFileAndExtractExif returns a slice from the beginning of the EXIF data -// to the end of the file (it's not practical to try and calculate where the -// data actually ends). -func SearchFileAndExtractExif(filepath string) (rawExif []byte, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - // Open the file. - - f, err := os.Open(filepath) - log.PanicIf(err) - - defer f.Close() - - rawExif, err = SearchAndExtractExifWithReader(f) - log.PanicIf(err) - - return rawExif, nil -} - -type ExifHeader struct { - ByteOrder binary.ByteOrder - FirstIfdOffset uint32 -} - -func (eh ExifHeader) String() string { - return fmt.Sprintf("ExifHeader", eh.ByteOrder, eh.FirstIfdOffset) -} - -// ParseExifHeader parses the bytes at the very top of the header. -// -// This will panic with ErrNoExif on any data errors so that we can double as -// an EXIF-detection routine. -func ParseExifHeader(data []byte) (eh ExifHeader, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - // Good reference: - // - // CIPA DC-008-2016; JEITA CP-3451D - // -> http://www.cipa.jp/std/documents/e/DC-008-Translation-2016-E.pdf - - if len(data) < ExifSignatureLength { - exifLogger.Warningf(nil, "Not enough data for EXIF header: (%d)", len(data)) - return eh, ErrNoExif - } - - if bytes.Equal(data[:4], ExifBigEndianSignature[:]) == true { - exifLogger.Debugf(nil, "Byte-order is big-endian.") - eh.ByteOrder = binary.BigEndian - } else if bytes.Equal(data[:4], ExifLittleEndianSignature[:]) == true { - eh.ByteOrder = binary.LittleEndian - exifLogger.Debugf(nil, "Byte-order is little-endian.") - } else { - return eh, ErrNoExif - } - - eh.FirstIfdOffset = eh.ByteOrder.Uint32(data[4:8]) - - return eh, nil -} - -// Visit recursively invokes a callback for every tag. -func Visit(rootIfdIdentity *exifcommon.IfdIdentity, ifdMapping *exifcommon.IfdMapping, tagIndex *TagIndex, exifData []byte, visitor TagVisitorFn, so *ScanOptions) (eh ExifHeader, furthestOffset uint32, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - eh, err = ParseExifHeader(exifData) - log.PanicIf(err) - - ebs := NewExifReadSeekerWithBytes(exifData) - ie := NewIfdEnumerate(ifdMapping, tagIndex, ebs, eh.ByteOrder) - - _, err = ie.Scan(rootIfdIdentity, eh.FirstIfdOffset, visitor, so) - log.PanicIf(err) - - furthestOffset = ie.FurthestOffset() - - return eh, furthestOffset, nil -} - -// Collect recursively builds a static structure of all IFDs and tags. -func Collect(ifdMapping *exifcommon.IfdMapping, tagIndex *TagIndex, exifData []byte) (eh ExifHeader, index IfdIndex, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - eh, err = ParseExifHeader(exifData) - log.PanicIf(err) - - ebs := NewExifReadSeekerWithBytes(exifData) - ie := NewIfdEnumerate(ifdMapping, tagIndex, ebs, eh.ByteOrder) - - index, err = ie.Collect(eh.FirstIfdOffset) - log.PanicIf(err) - - return eh, index, nil -} - -// BuildExifHeader constructs the bytes that go at the front of the stream. -func BuildExifHeader(byteOrder binary.ByteOrder, firstIfdOffset uint32) (headerBytes []byte, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - b := new(bytes.Buffer) - - var signatureBytes []byte - if byteOrder == binary.BigEndian { - signatureBytes = ExifBigEndianSignature[:] - } else { - signatureBytes = ExifLittleEndianSignature[:] - } - - _, err = b.Write(signatureBytes) - log.PanicIf(err) - - err = binary.Write(b, byteOrder, firstIfdOffset) - log.PanicIf(err) - - return b.Bytes(), nil -} diff --git a/vendor/github.com/dsoprea/go-exif/v3/gps.go b/vendor/github.com/dsoprea/go-exif/v3/gps.go deleted file mode 100644 index 7a61cd94d..000000000 --- a/vendor/github.com/dsoprea/go-exif/v3/gps.go +++ /dev/null @@ -1,117 +0,0 @@ -package exif - -import ( - "errors" - "fmt" - "time" - - "github.com/dsoprea/go-logging" - "github.com/golang/geo/s2" - - "github.com/dsoprea/go-exif/v3/common" -) - -var ( - // ErrGpsCoordinatesNotValid means that some part of the geographic data was - // unparseable. - ErrGpsCoordinatesNotValid = errors.New("GPS coordinates not valid") -) - -// GpsDegrees is a high-level struct representing geographic data. -type GpsDegrees struct { - // Orientation describes the N/E/S/W direction that this position is - // relative to. - Orientation byte - - // Degrees is a simple float representing the underlying rational degrees - // amount. - Degrees float64 - - // Minutes is a simple float representing the underlying rational minutes - // amount. - Minutes float64 - - // Seconds is a simple float representing the underlying ration seconds - // amount. - Seconds float64 -} - -// NewGpsDegreesFromRationals returns a GpsDegrees struct given the EXIF-encoded -// information. The refValue is the N/E/S/W direction that this position is -// relative to. -func NewGpsDegreesFromRationals(refValue string, rawCoordinate []exifcommon.Rational) (gd GpsDegrees, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - if len(rawCoordinate) != 3 { - log.Panicf("new GpsDegrees struct requires a raw-coordinate with exactly three rationals") - } - - gd = GpsDegrees{ - Orientation: refValue[0], - Degrees: float64(rawCoordinate[0].Numerator) / float64(rawCoordinate[0].Denominator), - Minutes: float64(rawCoordinate[1].Numerator) / float64(rawCoordinate[1].Denominator), - Seconds: float64(rawCoordinate[2].Numerator) / float64(rawCoordinate[2].Denominator), - } - - return gd, nil -} - -// String provides returns a descriptive string. -func (d GpsDegrees) String() string { - return fmt.Sprintf("Degrees", string([]byte{d.Orientation}), d.Degrees, d.Minutes, d.Seconds) -} - -// Decimal calculates and returns the simplified float representation of the -// component degrees. -func (d GpsDegrees) Decimal() float64 { - decimal := float64(d.Degrees) + float64(d.Minutes)/60.0 + float64(d.Seconds)/3600.0 - - if d.Orientation == 'S' || d.Orientation == 'W' { - return -decimal - } - - return decimal -} - -// Raw returns a Rational struct that can be used to *write* coordinates. In -// practice, the denominator are typically (1) in the original EXIF data, and, -// that being the case, this will best preserve precision. -func (d GpsDegrees) Raw() []exifcommon.Rational { - return []exifcommon.Rational{ - {Numerator: uint32(d.Degrees), Denominator: 1}, - {Numerator: uint32(d.Minutes), Denominator: 1}, - {Numerator: uint32(d.Seconds), Denominator: 1}, - } -} - -// GpsInfo encapsulates all of the geographic information in one place. -type GpsInfo struct { - Latitude, Longitude GpsDegrees - Altitude int - Timestamp time.Time -} - -// String returns a descriptive string. -func (gi *GpsInfo) String() string { - return fmt.Sprintf("GpsInfo", - gi.Latitude.Decimal(), gi.Longitude.Decimal(), gi.Altitude, gi.Timestamp) -} - -// S2CellId returns the cell-ID of the geographic location on the earth. -func (gi *GpsInfo) S2CellId() s2.CellID { - latitude := gi.Latitude.Decimal() - longitude := gi.Longitude.Decimal() - - ll := s2.LatLngFromDegrees(latitude, longitude) - cellId := s2.CellIDFromLatLng(ll) - - if cellId.IsValid() == false { - panic(ErrGpsCoordinatesNotValid) - } - - return cellId -} diff --git a/vendor/github.com/dsoprea/go-exif/v3/ifd_builder.go b/vendor/github.com/dsoprea/go-exif/v3/ifd_builder.go deleted file mode 100644 index a404b362a..000000000 --- a/vendor/github.com/dsoprea/go-exif/v3/ifd_builder.go +++ /dev/null @@ -1,1199 +0,0 @@ -package exif - -// NOTES: -// -// The thumbnail offset and length tags shouldn't be set directly. Use the -// (*IfdBuilder).SetThumbnail() method instead. - -import ( - "errors" - "fmt" - "strings" - - "encoding/binary" - - "github.com/dsoprea/go-logging" - - "github.com/dsoprea/go-exif/v3/common" - "github.com/dsoprea/go-exif/v3/undefined" -) - -var ( - ifdBuilderLogger = log.NewLogger("exif.ifd_builder") -) - -var ( - ErrTagEntryNotFound = errors.New("tag entry not found") - ErrChildIbNotFound = errors.New("child IB not found") -) - -type IfdBuilderTagValue struct { - valueBytes []byte - ib *IfdBuilder -} - -func (ibtv IfdBuilderTagValue) String() string { - if ibtv.IsBytes() == true { - var valuePhrase string - if len(ibtv.valueBytes) <= 8 { - valuePhrase = fmt.Sprintf("%v", ibtv.valueBytes) - } else { - valuePhrase = fmt.Sprintf("%v...", ibtv.valueBytes[:8]) - } - - return fmt.Sprintf("IfdBuilderTagValue", valuePhrase, len(ibtv.valueBytes)) - } else if ibtv.IsIb() == true { - return fmt.Sprintf("IfdBuilderTagValue", ibtv.ib) - } else { - log.Panicf("IBTV state undefined") - return "" - } -} - -func NewIfdBuilderTagValueFromBytes(valueBytes []byte) *IfdBuilderTagValue { - return &IfdBuilderTagValue{ - valueBytes: valueBytes, - } -} - -func NewIfdBuilderTagValueFromIfdBuilder(ib *IfdBuilder) *IfdBuilderTagValue { - return &IfdBuilderTagValue{ - ib: ib, - } -} - -// IsBytes returns true if the bytes are populated. This is always the case -// when we're loaded from a tag in an existing IFD. -func (ibtv IfdBuilderTagValue) IsBytes() bool { - return ibtv.valueBytes != nil -} - -func (ibtv IfdBuilderTagValue) Bytes() []byte { - if ibtv.IsBytes() == false { - log.Panicf("this tag is not a byte-slice value") - } else if ibtv.IsIb() == true { - log.Panicf("this tag is an IFD-builder value not a byte-slice") - } - - return ibtv.valueBytes -} - -func (ibtv IfdBuilderTagValue) IsIb() bool { - return ibtv.ib != nil -} - -func (ibtv IfdBuilderTagValue) Ib() *IfdBuilder { - if ibtv.IsIb() == false { - log.Panicf("this tag is not an IFD-builder value") - } else if ibtv.IsBytes() == true { - log.Panicf("this tag is a byte-slice, not a IFD-builder") - } - - return ibtv.ib -} - -type BuilderTag struct { - // ifdPath is the path of the IFD that hosts this tag. - ifdPath string - - tagId uint16 - typeId exifcommon.TagTypePrimitive - - // value is either a value that can be encoded, an IfdBuilder instance (for - // child IFDs), or an IfdTagEntry instance representing an existing, - // previously-stored tag. - value *IfdBuilderTagValue - - // byteOrder is the byte order. It's chiefly/originally here to support - // printing the value. - byteOrder binary.ByteOrder -} - -func NewBuilderTag(ifdPath string, tagId uint16, typeId exifcommon.TagTypePrimitive, value *IfdBuilderTagValue, byteOrder binary.ByteOrder) *BuilderTag { - return &BuilderTag{ - ifdPath: ifdPath, - tagId: tagId, - typeId: typeId, - value: value, - byteOrder: byteOrder, - } -} - -func NewChildIfdBuilderTag(ifdPath string, tagId uint16, value *IfdBuilderTagValue) *BuilderTag { - return &BuilderTag{ - ifdPath: ifdPath, - tagId: tagId, - typeId: exifcommon.TypeLong, - value: value, - } -} - -func (bt *BuilderTag) Value() (value *IfdBuilderTagValue) { - return bt.value -} - -func (bt *BuilderTag) String() string { - var valueString string - - if bt.value.IsBytes() == true { - var err error - - valueString, err = exifcommon.FormatFromBytes(bt.value.Bytes(), bt.typeId, false, bt.byteOrder) - log.PanicIf(err) - } else { - valueString = fmt.Sprintf("%v", bt.value) - } - - return fmt.Sprintf("BuilderTag", bt.ifdPath, bt.tagId, bt.typeId.String(), valueString) -} - -func (bt *BuilderTag) SetValue(byteOrder binary.ByteOrder, value interface{}) (err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - // TODO(dustin): !! Add test. - - var ed exifcommon.EncodedData - if bt.typeId == exifcommon.TypeUndefined { - encodeable := value.(exifundefined.EncodeableValue) - - encoded, unitCount, err := exifundefined.Encode(encodeable, byteOrder) - log.PanicIf(err) - - ed = exifcommon.EncodedData{ - Type: exifcommon.TypeUndefined, - Encoded: encoded, - UnitCount: unitCount, - } - } else { - ve := exifcommon.NewValueEncoder(byteOrder) - - var err error - - ed, err = ve.Encode(value) - log.PanicIf(err) - } - - bt.value = NewIfdBuilderTagValueFromBytes(ed.Encoded) - - return nil -} - -// NewStandardBuilderTag constructs a `BuilderTag` instance. The type is looked -// up. `ii` is the type of IFD that owns this tag. -func NewStandardBuilderTag(ifdPath string, it *IndexedTag, byteOrder binary.ByteOrder, value interface{}) *BuilderTag { - // If there is more than one supported type, we'll go with the larger to - // encode with. It'll use the same amount of fixed-space, and we'll - // eliminate unnecessary overflows/issues. - tagType := it.GetEncodingType(value) - - var rawBytes []byte - if it.DoesSupportType(exifcommon.TypeUndefined) == true { - encodeable := value.(exifundefined.EncodeableValue) - - var err error - - rawBytes, _, err = exifundefined.Encode(encodeable, byteOrder) - log.PanicIf(err) - } else { - ve := exifcommon.NewValueEncoder(byteOrder) - - ed, err := ve.Encode(value) - log.PanicIf(err) - - rawBytes = ed.Encoded - } - - tagValue := NewIfdBuilderTagValueFromBytes(rawBytes) - - return NewBuilderTag( - ifdPath, - it.Id, - tagType, - tagValue, - byteOrder) -} - -type IfdBuilder struct { - ifdIdentity *exifcommon.IfdIdentity - - byteOrder binary.ByteOrder - - // Includes both normal tags and IFD tags (which point to child IFDs). - // TODO(dustin): Keep a separate list of children like with `Ifd`. - // TODO(dustin): Either rename this or `Entries` in `Ifd` to be the same thing. - tags []*BuilderTag - - // existingOffset will be the offset that this IFD is currently found at if - // it represents an IFD that has previously been stored (or 0 if not). - existingOffset uint32 - - // nextIb represents the next link if we're chaining to another. - nextIb *IfdBuilder - - // thumbnailData is populated with thumbnail data if there was thumbnail - // data. Otherwise, it's nil. - thumbnailData []byte - - ifdMapping *exifcommon.IfdMapping - tagIndex *TagIndex -} - -func NewIfdBuilder(ifdMapping *exifcommon.IfdMapping, tagIndex *TagIndex, ii *exifcommon.IfdIdentity, byteOrder binary.ByteOrder) (ib *IfdBuilder) { - ib = &IfdBuilder{ - ifdIdentity: ii, - - byteOrder: byteOrder, - tags: make([]*BuilderTag, 0), - - ifdMapping: ifdMapping, - tagIndex: tagIndex, - } - - return ib -} - -// NewIfdBuilderWithExistingIfd creates a new IB using the same header type -// information as the given IFD. -func NewIfdBuilderWithExistingIfd(ifd *Ifd) (ib *IfdBuilder) { - ib = &IfdBuilder{ - ifdIdentity: ifd.IfdIdentity(), - - byteOrder: ifd.ByteOrder(), - existingOffset: ifd.Offset(), - ifdMapping: ifd.ifdMapping, - tagIndex: ifd.tagIndex, - } - - return ib -} - -// NewIfdBuilderFromExistingChain creates a chain of IB instances from an -// IFD chain generated from real data. -func NewIfdBuilderFromExistingChain(rootIfd *Ifd) (firstIb *IfdBuilder) { - var lastIb *IfdBuilder - i := 0 - for thisExistingIfd := rootIfd; thisExistingIfd != nil; thisExistingIfd = thisExistingIfd.nextIfd { - newIb := NewIfdBuilder( - rootIfd.ifdMapping, - rootIfd.tagIndex, - rootIfd.ifdIdentity, - thisExistingIfd.ByteOrder()) - - if firstIb == nil { - firstIb = newIb - } else { - lastIb.SetNextIb(newIb) - } - - err := newIb.AddTagsFromExisting(thisExistingIfd, nil, nil) - log.PanicIf(err) - - lastIb = newIb - i++ - } - - return firstIb -} - -func (ib *IfdBuilder) IfdIdentity() *exifcommon.IfdIdentity { - return ib.ifdIdentity -} - -func (ib *IfdBuilder) NextIb() (nextIb *IfdBuilder, err error) { - return ib.nextIb, nil -} - -func (ib *IfdBuilder) ChildWithTagId(childIfdTagId uint16) (childIb *IfdBuilder, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - for _, bt := range ib.tags { - if bt.value.IsIb() == false { - continue - } - - childIbThis := bt.value.Ib() - - if childIbThis.IfdIdentity().TagId() == childIfdTagId { - return childIbThis, nil - } - } - - log.Panic(ErrChildIbNotFound) - - // Never reached. - return nil, nil -} - -func getOrCreateIbFromRootIbInner(rootIb *IfdBuilder, parentIb *IfdBuilder, currentLineage []exifcommon.IfdTagIdAndIndex) (ib *IfdBuilder, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - // TODO(dustin): !! Add test. - - thisIb := rootIb - - // Since we're calling ourselves recursively with incrementally different - // paths, the FQ IFD-path of the parent that called us needs to be passed - // in, in order for us to know it. - var parentLineage []exifcommon.IfdTagIdAndIndex - if parentIb != nil { - var err error - - parentLineage, err = thisIb.ifdMapping.ResolvePath(parentIb.IfdIdentity().String()) - log.PanicIf(err) - } - - // Process the current path part. - currentItIi := currentLineage[0] - - // Make sure the leftmost part of the FQ IFD-path agrees with the IB we - // were given. - - expectedFqRootIfdPath := "" - if parentLineage != nil { - expectedLineage := append(parentLineage, currentItIi) - expectedFqRootIfdPath = thisIb.ifdMapping.PathPhraseFromLineage(expectedLineage) - } else { - expectedFqRootIfdPath = thisIb.ifdMapping.PathPhraseFromLineage(currentLineage[:1]) - } - - if expectedFqRootIfdPath != thisIb.IfdIdentity().String() { - log.Panicf("the FQ IFD-path [%s] we were given does not match the builder's FQ IFD-path [%s]", expectedFqRootIfdPath, thisIb.IfdIdentity().String()) - } - - // If we actually wanted a sibling (currentItIi.Index > 0) then seek to it, - // appending new siblings, as required, until we get there. - for i := 0; i < currentItIi.Index; i++ { - if thisIb.nextIb == nil { - // Generate an FQ IFD-path for the sibling. It'll use the same - // non-FQ IFD-path as the current IB. - - iiSibling := thisIb.IfdIdentity().NewSibling(i + 1) - thisIb.nextIb = NewIfdBuilder(thisIb.ifdMapping, thisIb.tagIndex, iiSibling, thisIb.byteOrder) - } - - thisIb = thisIb.nextIb - } - - // There is no child IFD to process. We're done. - if len(currentLineage) == 1 { - return thisIb, nil - } - - // Establish the next child to be processed. - - childItii := currentLineage[1] - - var foundChild *IfdBuilder - for _, bt := range thisIb.tags { - if bt.value.IsIb() == false { - continue - } - - childIb := bt.value.Ib() - - if childIb.IfdIdentity().TagId() == childItii.TagId { - foundChild = childIb - break - } - } - - // If we didn't find the child, add it. - - if foundChild == nil { - currentIfdTag := thisIb.IfdIdentity().IfdTag() - - childIfdTag := - exifcommon.NewIfdTag( - ¤tIfdTag, - childItii.TagId, - childItii.Name) - - iiChild := thisIb.IfdIdentity().NewChild(childIfdTag, 0) - - foundChild = - NewIfdBuilder( - thisIb.ifdMapping, - thisIb.tagIndex, - iiChild, - thisIb.byteOrder) - - err = thisIb.AddChildIb(foundChild) - log.PanicIf(err) - } - - finalIb, err := getOrCreateIbFromRootIbInner(foundChild, thisIb, currentLineage[1:]) - log.PanicIf(err) - - return finalIb, nil -} - -// GetOrCreateIbFromRootIb returns an IB representing the requested IFD, even if -// an IB doesn't already exist for it. This function may call itself -// recursively. -func GetOrCreateIbFromRootIb(rootIb *IfdBuilder, fqIfdPath string) (ib *IfdBuilder, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - // lineage is a necessity of our recursion process. It doesn't include any - // parent IFDs on its left-side; it starts with the current IB only. - lineage, err := rootIb.ifdMapping.ResolvePath(fqIfdPath) - log.PanicIf(err) - - ib, err = getOrCreateIbFromRootIbInner(rootIb, nil, lineage) - log.PanicIf(err) - - return ib, nil -} - -func (ib *IfdBuilder) String() string { - nextIfdPhrase := "" - if ib.nextIb != nil { - // TODO(dustin): We were setting this to ii.String(), but we were getting hex-data when printing this after building from an existing chain. - nextIfdPhrase = ib.nextIb.IfdIdentity().UnindexedString() - } - - return fmt.Sprintf("IfdBuilder", ib.IfdIdentity().UnindexedString(), ib.IfdIdentity().TagId(), len(ib.tags), ib.existingOffset, nextIfdPhrase) -} - -func (ib *IfdBuilder) Tags() (tags []*BuilderTag) { - return ib.tags -} - -// SetThumbnail sets thumbnail data. -// -// NOTES: -// -// - We don't manage any facet of the thumbnail data. This is the -// responsibility of the user/developer. -// - This method will fail unless the thumbnail is set on a the root IFD. -// However, in order to be valid, it must be set on the second one, linked to -// by the first, as per the EXIF/TIFF specification. -// - We set the offset to (0) now but will allocate the data and properly assign -// the offset when the IB is encoded (later). -func (ib *IfdBuilder) SetThumbnail(data []byte) (err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - if ib.IfdIdentity().UnindexedString() != exifcommon.IfdStandardIfdIdentity.UnindexedString() { - log.Panicf("thumbnails can only go into a root Ifd (and only the second one)") - } - - // TODO(dustin): !! Add a test for this function. - - if data == nil || len(data) == 0 { - log.Panic("thumbnail is empty") - } - - ib.thumbnailData = data - - ibtvfb := NewIfdBuilderTagValueFromBytes(ib.thumbnailData) - offsetBt := - NewBuilderTag( - ib.IfdIdentity().UnindexedString(), - ThumbnailOffsetTagId, - exifcommon.TypeLong, - ibtvfb, - ib.byteOrder) - - err = ib.Set(offsetBt) - log.PanicIf(err) - - thumbnailSizeIt, err := ib.tagIndex.Get(ib.IfdIdentity(), ThumbnailSizeTagId) - log.PanicIf(err) - - sizeBt := NewStandardBuilderTag(ib.IfdIdentity().UnindexedString(), thumbnailSizeIt, ib.byteOrder, []uint32{uint32(len(ib.thumbnailData))}) - - err = ib.Set(sizeBt) - log.PanicIf(err) - - return nil -} - -func (ib *IfdBuilder) Thumbnail() []byte { - return ib.thumbnailData -} - -func (ib *IfdBuilder) printTagTree(levels int) { - indent := strings.Repeat(" ", levels*2) - - i := 0 - for currentIb := ib; currentIb != nil; currentIb = currentIb.nextIb { - prefix := " " - if i > 0 { - prefix = ">" - } - - if levels == 0 { - fmt.Printf("%s%sIFD: %s INDEX=(%d)\n", indent, prefix, currentIb, i) - } else { - fmt.Printf("%s%sChild IFD: %s\n", indent, prefix, currentIb) - } - - if len(currentIb.tags) > 0 { - fmt.Printf("\n") - - for i, tag := range currentIb.tags { - isChildIb := false - _, err := ib.ifdMapping.GetChild(currentIb.IfdIdentity().UnindexedString(), tag.tagId) - if err == nil { - isChildIb = true - } else if log.Is(err, exifcommon.ErrChildIfdNotMapped) == false { - log.Panic(err) - } - - tagName := "" - - // If a normal tag (not a child IFD) get the name. - if isChildIb == true { - tagName = "" - } else { - it, err := ib.tagIndex.Get(ib.ifdIdentity, tag.tagId) - if log.Is(err, ErrTagNotFound) == true { - tagName = "" - } else if err != nil { - log.Panic(err) - } else { - tagName = it.Name - } - } - - value := tag.Value() - - if value.IsIb() == true { - fmt.Printf("%s (%d): [%s] %s\n", indent, i, tagName, value.Ib()) - } else { - fmt.Printf("%s (%d): [%s] %s\n", indent, i, tagName, tag) - } - - if isChildIb == true { - if tag.value.IsIb() == false { - log.Panicf("tag-ID (0x%04x) is an IFD but the tag value is not an IB instance: %v", tag.tagId, tag) - } - - fmt.Printf("\n") - - childIb := tag.value.Ib() - childIb.printTagTree(levels + 1) - } - } - - fmt.Printf("\n") - } - - i++ - } -} - -func (ib *IfdBuilder) PrintTagTree() { - ib.printTagTree(0) -} - -func (ib *IfdBuilder) printIfdTree(levels int) { - indent := strings.Repeat(" ", levels*2) - - i := 0 - for currentIb := ib; currentIb != nil; currentIb = currentIb.nextIb { - prefix := " " - if i > 0 { - prefix = ">" - } - - fmt.Printf("%s%s%s\n", indent, prefix, currentIb) - - if len(currentIb.tags) > 0 { - for _, tag := range currentIb.tags { - isChildIb := false - _, err := ib.ifdMapping.GetChild(currentIb.IfdIdentity().UnindexedString(), tag.tagId) - if err == nil { - isChildIb = true - } else if log.Is(err, exifcommon.ErrChildIfdNotMapped) == false { - log.Panic(err) - } - - if isChildIb == true { - if tag.value.IsIb() == false { - log.Panicf("tag-ID (0x%04x) is an IFD but the tag value is not an IB instance: %v", tag.tagId, tag) - } - - childIb := tag.value.Ib() - childIb.printIfdTree(levels + 1) - } - } - } - - i++ - } -} - -func (ib *IfdBuilder) PrintIfdTree() { - ib.printIfdTree(0) -} - -func (ib *IfdBuilder) dumpToStrings(thisIb *IfdBuilder, prefix string, tagId uint16, lines []string) (linesOutput []string) { - if lines == nil { - linesOutput = make([]string, 0) - } else { - linesOutput = lines - } - - siblingIfdIndex := 0 - for ; thisIb != nil; thisIb = thisIb.nextIb { - line := fmt.Sprintf("IFD", prefix, thisIb.IfdIdentity().String(), siblingIfdIndex, thisIb.IfdIdentity().TagId(), tagId) - linesOutput = append(linesOutput, line) - - for i, tag := range thisIb.tags { - var childIb *IfdBuilder - childIfdName := "" - if tag.value.IsIb() == true { - childIb = tag.value.Ib() - childIfdName = childIb.IfdIdentity().UnindexedString() - } - - line := fmt.Sprintf("TAG", prefix, thisIb.IfdIdentity().String(), thisIb.IfdIdentity().TagId(), childIfdName, i, tag.tagId) - linesOutput = append(linesOutput, line) - - if childIb == nil { - continue - } - - childPrefix := "" - if prefix == "" { - childPrefix = fmt.Sprintf("%s", thisIb.IfdIdentity().UnindexedString()) - } else { - childPrefix = fmt.Sprintf("%s->%s", prefix, thisIb.IfdIdentity().UnindexedString()) - } - - linesOutput = thisIb.dumpToStrings(childIb, childPrefix, tag.tagId, linesOutput) - } - - siblingIfdIndex++ - } - - return linesOutput -} - -func (ib *IfdBuilder) DumpToStrings() (lines []string) { - return ib.dumpToStrings(ib, "", 0, lines) -} - -func (ib *IfdBuilder) SetNextIb(nextIb *IfdBuilder) (err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - ib.nextIb = nextIb - - return nil -} - -func (ib *IfdBuilder) DeleteN(tagId uint16, n int) (err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - if n < 1 { - log.Panicf("N must be at least 1: (%d)", n) - } - - for n > 0 { - j := -1 - for i, bt := range ib.tags { - if bt.tagId == tagId { - j = i - break - } - } - - if j == -1 { - log.Panic(ErrTagEntryNotFound) - } - - ib.tags = append(ib.tags[:j], ib.tags[j+1:]...) - n-- - } - - return nil -} - -func (ib *IfdBuilder) DeleteFirst(tagId uint16) (err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - err = ib.DeleteN(tagId, 1) - log.PanicIf(err) - - return nil -} - -func (ib *IfdBuilder) DeleteAll(tagId uint16) (n int, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - for { - err = ib.DeleteN(tagId, 1) - if log.Is(err, ErrTagEntryNotFound) == true { - break - } else if err != nil { - log.Panic(err) - } - - n++ - } - - return n, nil -} - -func (ib *IfdBuilder) ReplaceAt(position int, bt *BuilderTag) (err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - if position < 0 { - log.Panicf("replacement position must be 0 or greater") - } else if position >= len(ib.tags) { - log.Panicf("replacement position does not exist") - } - - ib.tags[position] = bt - - return nil -} - -func (ib *IfdBuilder) Replace(tagId uint16, bt *BuilderTag) (err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - position, err := ib.Find(tagId) - log.PanicIf(err) - - ib.tags[position] = bt - - return nil -} - -// Set will add a new entry or update an existing entry. -func (ib *IfdBuilder) Set(bt *BuilderTag) (err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - position, err := ib.Find(bt.tagId) - if err == nil { - ib.tags[position] = bt - } else if log.Is(err, ErrTagEntryNotFound) == true { - err = ib.add(bt) - log.PanicIf(err) - } else { - log.Panic(err) - } - - return nil -} - -func (ib *IfdBuilder) FindN(tagId uint16, maxFound int) (found []int, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - found = make([]int, 0) - - for i, bt := range ib.tags { - if bt.tagId == tagId { - found = append(found, i) - if maxFound == 0 || len(found) >= maxFound { - break - } - } - } - - return found, nil -} - -func (ib *IfdBuilder) Find(tagId uint16) (position int, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - found, err := ib.FindN(tagId, 1) - log.PanicIf(err) - - if len(found) == 0 { - log.Panic(ErrTagEntryNotFound) - } - - return found[0], nil -} - -func (ib *IfdBuilder) FindTag(tagId uint16) (bt *BuilderTag, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - found, err := ib.FindN(tagId, 1) - log.PanicIf(err) - - if len(found) == 0 { - log.Panic(ErrTagEntryNotFound) - } - - position := found[0] - - return ib.tags[position], nil -} - -func (ib *IfdBuilder) FindTagWithName(tagName string) (bt *BuilderTag, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - it, err := ib.tagIndex.GetWithName(ib.IfdIdentity(), tagName) - log.PanicIf(err) - - found, err := ib.FindN(it.Id, 1) - log.PanicIf(err) - - if len(found) == 0 { - log.Panic(ErrTagEntryNotFound) - } - - position := found[0] - - return ib.tags[position], nil -} - -func (ib *IfdBuilder) add(bt *BuilderTag) (err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - if bt.ifdPath == "" { - log.Panicf("BuilderTag ifdPath is not set: %s", bt) - } else if bt.typeId == 0x0 { - log.Panicf("BuilderTag type-ID is not set: %s", bt) - } else if bt.value == nil { - log.Panicf("BuilderTag value is not set: %s", bt) - } - - ib.tags = append(ib.tags, bt) - return nil -} - -func (ib *IfdBuilder) Add(bt *BuilderTag) (err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - if bt.value.IsIb() == true { - log.Panicf("child IfdBuilders must be added via AddChildIb() or AddTagsFromExisting(), not Add()") - } - - err = ib.add(bt) - log.PanicIf(err) - - return nil -} - -// AddChildIb adds a tag that branches to a new IFD. -func (ib *IfdBuilder) AddChildIb(childIb *IfdBuilder) (err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - if childIb.IfdIdentity().TagId() == 0 { - log.Panicf("IFD can not be used as a child IFD (not associated with a tag-ID): %v", childIb) - } else if childIb.byteOrder != ib.byteOrder { - log.Panicf("Child IFD does not have the same byte-order: [%s] != [%s]", childIb.byteOrder, ib.byteOrder) - } - - // Since no standard IFDs supports occur`ring more than once, check that a - // tag of this type has not been previously added. Note that we just search - // the current IFD and *not every* IFD. - for _, bt := range childIb.tags { - if bt.tagId == childIb.IfdIdentity().TagId() { - log.Panicf("child-IFD already added: %v", childIb.IfdIdentity().UnindexedString()) - } - } - - bt := ib.NewBuilderTagFromBuilder(childIb) - ib.tags = append(ib.tags, bt) - - return nil -} - -func (ib *IfdBuilder) NewBuilderTagFromBuilder(childIb *IfdBuilder) (bt *BuilderTag) { - defer func() { - if state := recover(); state != nil { - err := log.Wrap(state.(error)) - log.Panic(err) - } - }() - - value := NewIfdBuilderTagValueFromIfdBuilder(childIb) - - bt = NewChildIfdBuilderTag( - ib.IfdIdentity().UnindexedString(), - childIb.IfdIdentity().TagId(), - value) - - return bt -} - -// AddTagsFromExisting does a verbatim copy of the entries in `ifd` to this -// builder. It excludes child IFDs. These must be added explicitly via -// `AddChildIb()`. -func (ib *IfdBuilder) AddTagsFromExisting(ifd *Ifd, includeTagIds []uint16, excludeTagIds []uint16) (err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - thumbnailData, err := ifd.Thumbnail() - if err == nil { - err = ib.SetThumbnail(thumbnailData) - log.PanicIf(err) - } else if log.Is(err, ErrNoThumbnail) == false { - log.Panic(err) - } - - for i, ite := range ifd.Entries() { - if ite.IsThumbnailOffset() == true || ite.IsThumbnailSize() { - // These will be added on-the-fly when we encode. - continue - } - - if excludeTagIds != nil && len(excludeTagIds) > 0 { - found := false - for _, excludedTagId := range excludeTagIds { - if excludedTagId == ite.TagId() { - found = true - } - } - - if found == true { - continue - } - } - - if includeTagIds != nil && len(includeTagIds) > 0 { - // Whether or not there was a list of excludes, if there is a list - // of includes than the current tag has to be in it. - - found := false - for _, includedTagId := range includeTagIds { - if includedTagId == ite.TagId() { - found = true - break - } - } - - if found == false { - continue - } - } - - var bt *BuilderTag - - if ite.ChildIfdPath() != "" { - // If we want to add an IFD tag, we'll have to build it first and - // *then* add it via a different method. - - // Figure out which of the child-IFDs that are associated with - // this IFD represents this specific child IFD. - - var childIfd *Ifd - for _, thisChildIfd := range ifd.Children() { - if thisChildIfd.ParentTagIndex() != i { - continue - } else if thisChildIfd.ifdIdentity.TagId() != 0xffff && thisChildIfd.ifdIdentity.TagId() != ite.TagId() { - log.Panicf("child-IFD tag is not correct: TAG-POSITION=(%d) ITE=%s CHILD-IFD=%s", thisChildIfd.ParentTagIndex(), ite, thisChildIfd) - } - - childIfd = thisChildIfd - break - } - - if childIfd == nil { - childTagIds := make([]string, len(ifd.Children())) - for j, childIfd := range ifd.Children() { - childTagIds[j] = fmt.Sprintf("0x%04x (parent tag-position %d)", childIfd.ifdIdentity.TagId(), childIfd.ParentTagIndex()) - } - - log.Panicf("could not find child IFD for child ITE: IFD-PATH=[%s] TAG-ID=(0x%04x) CURRENT-TAG-POSITION=(%d) CHILDREN=%v", ite.IfdPath(), ite.TagId(), i, childTagIds) - } - - childIb := NewIfdBuilderFromExistingChain(childIfd) - bt = ib.NewBuilderTagFromBuilder(childIb) - } else { - // Non-IFD tag. - - rawBytes, err := ite.GetRawBytes() - log.PanicIf(err) - - value := NewIfdBuilderTagValueFromBytes(rawBytes) - - bt = NewBuilderTag( - ifd.ifdIdentity.UnindexedString(), - ite.TagId(), - ite.TagType(), - value, - ib.byteOrder) - } - - err := ib.add(bt) - log.PanicIf(err) - } - - return nil -} - -// AddStandard quickly and easily composes and adds the tag using the -// information already known about a tag. Only works with standard tags. -func (ib *IfdBuilder) AddStandard(tagId uint16, value interface{}) (err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - it, err := ib.tagIndex.Get(ib.IfdIdentity(), tagId) - log.PanicIf(err) - - bt := NewStandardBuilderTag(ib.IfdIdentity().UnindexedString(), it, ib.byteOrder, value) - - err = ib.add(bt) - log.PanicIf(err) - - return nil -} - -// AddStandardWithName quickly and easily composes and adds the tag using the -// information already known about a tag (using the name). Only works with -// standard tags. -func (ib *IfdBuilder) AddStandardWithName(tagName string, value interface{}) (err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - it, err := ib.tagIndex.GetWithName(ib.IfdIdentity(), tagName) - log.PanicIf(err) - - bt := NewStandardBuilderTag(ib.IfdIdentity().UnindexedString(), it, ib.byteOrder, value) - - err = ib.add(bt) - log.PanicIf(err) - - return nil -} - -// SetStandard quickly and easily composes and adds or replaces the tag using -// the information already known about a tag. Only works with standard tags. -func (ib *IfdBuilder) SetStandard(tagId uint16, value interface{}) (err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - // TODO(dustin): !! Add test for this function. - - it, err := ib.tagIndex.Get(ib.IfdIdentity(), tagId) - log.PanicIf(err) - - bt := NewStandardBuilderTag(ib.IfdIdentity().UnindexedString(), it, ib.byteOrder, value) - - i, err := ib.Find(tagId) - if err != nil { - if log.Is(err, ErrTagEntryNotFound) == false { - log.Panic(err) - } - - ib.tags = append(ib.tags, bt) - } else { - ib.tags[i] = bt - } - - return nil -} - -// SetStandardWithName quickly and easily composes and adds or replaces the -// tag using the information already known about a tag (using the name). Only -// works with standard tags. -func (ib *IfdBuilder) SetStandardWithName(tagName string, value interface{}) (err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - // TODO(dustin): !! Add test for this function. - - it, err := ib.tagIndex.GetWithName(ib.IfdIdentity(), tagName) - log.PanicIf(err) - - bt := NewStandardBuilderTag(ib.IfdIdentity().UnindexedString(), it, ib.byteOrder, value) - - i, err := ib.Find(bt.tagId) - if err != nil { - if log.Is(err, ErrTagEntryNotFound) == false { - log.Panic(err) - } - - ib.tags = append(ib.tags, bt) - } else { - ib.tags[i] = bt - } - - return nil -} diff --git a/vendor/github.com/dsoprea/go-exif/v3/ifd_builder_encode.go b/vendor/github.com/dsoprea/go-exif/v3/ifd_builder_encode.go deleted file mode 100644 index a0f4ff79c..000000000 --- a/vendor/github.com/dsoprea/go-exif/v3/ifd_builder_encode.go +++ /dev/null @@ -1,532 +0,0 @@ -package exif - -import ( - "bytes" - "fmt" - "strings" - - "encoding/binary" - - "github.com/dsoprea/go-logging" - - "github.com/dsoprea/go-exif/v3/common" -) - -const ( - // Tag-ID + Tag-Type + Unit-Count + Value/Offset. - IfdTagEntrySize = uint32(2 + 2 + 4 + 4) -) - -type ByteWriter struct { - b *bytes.Buffer - byteOrder binary.ByteOrder -} - -func NewByteWriter(b *bytes.Buffer, byteOrder binary.ByteOrder) (bw *ByteWriter) { - return &ByteWriter{ - b: b, - byteOrder: byteOrder, - } -} - -func (bw ByteWriter) writeAsBytes(value interface{}) (err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - err = binary.Write(bw.b, bw.byteOrder, value) - log.PanicIf(err) - - return nil -} - -func (bw ByteWriter) WriteUint32(value uint32) (err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - err = bw.writeAsBytes(value) - log.PanicIf(err) - - return nil -} - -func (bw ByteWriter) WriteUint16(value uint16) (err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - err = bw.writeAsBytes(value) - log.PanicIf(err) - - return nil -} - -func (bw ByteWriter) WriteFourBytes(value []byte) (err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - len_ := len(value) - if len_ != 4 { - log.Panicf("value is not four-bytes: (%d)", len_) - } - - _, err = bw.b.Write(value) - log.PanicIf(err) - - return nil -} - -// ifdOffsetIterator keeps track of where the next IFD should be written by -// keeping track of where the offsets start, the data that has been added, and -// bumping the offset *when* the data is added. -type ifdDataAllocator struct { - offset uint32 - b bytes.Buffer -} - -func newIfdDataAllocator(ifdDataAddressableOffset uint32) *ifdDataAllocator { - return &ifdDataAllocator{ - offset: ifdDataAddressableOffset, - } -} - -func (ida *ifdDataAllocator) Allocate(value []byte) (offset uint32, err error) { - _, err = ida.b.Write(value) - log.PanicIf(err) - - offset = ida.offset - ida.offset += uint32(len(value)) - - return offset, nil -} - -func (ida *ifdDataAllocator) NextOffset() uint32 { - return ida.offset -} - -func (ida *ifdDataAllocator) Bytes() []byte { - return ida.b.Bytes() -} - -// IfdByteEncoder converts an IB to raw bytes (for writing) while also figuring -// out all of the allocations and indirection that is required for extended -// data. -type IfdByteEncoder struct { - // journal holds a list of actions taken while encoding. - journal [][3]string -} - -func NewIfdByteEncoder() (ibe *IfdByteEncoder) { - return &IfdByteEncoder{ - journal: make([][3]string, 0), - } -} - -func (ibe *IfdByteEncoder) Journal() [][3]string { - return ibe.journal -} - -func (ibe *IfdByteEncoder) TableSize(entryCount int) uint32 { - // Tag-Count + (Entry-Size * Entry-Count) + Next-IFD-Offset. - return uint32(2) + (IfdTagEntrySize * uint32(entryCount)) + uint32(4) -} - -func (ibe *IfdByteEncoder) pushToJournal(where, direction, format string, args ...interface{}) { - event := [3]string{ - direction, - where, - fmt.Sprintf(format, args...), - } - - ibe.journal = append(ibe.journal, event) -} - -// PrintJournal prints a hierarchical representation of the steps taken during -// encoding. -func (ibe *IfdByteEncoder) PrintJournal() { - maxWhereLength := 0 - for _, event := range ibe.journal { - where := event[1] - - len_ := len(where) - if len_ > maxWhereLength { - maxWhereLength = len_ - } - } - - level := 0 - for i, event := range ibe.journal { - direction := event[0] - where := event[1] - message := event[2] - - if direction != ">" && direction != "<" && direction != "-" { - log.Panicf("journal operation not valid: [%s]", direction) - } - - if direction == "<" { - if level <= 0 { - log.Panicf("journal operations unbalanced (too many closes)") - } - - level-- - } - - indent := strings.Repeat(" ", level) - - fmt.Printf("%3d %s%s %s: %s\n", i, indent, direction, where, message) - - if direction == ">" { - level++ - } - } - - if level != 0 { - log.Panicf("journal operations unbalanced (too many opens)") - } -} - -// encodeTagToBytes encodes the given tag to a byte stream. If -// `nextIfdOffsetToWrite` is more than (0), recurse into child IFDs -// (`nextIfdOffsetToWrite` is required in order for them to know where the its -// IFD data will be written, in order for them to know the offset of where -// their allocated-data block will start, which follows right behind). -func (ibe *IfdByteEncoder) encodeTagToBytes(ib *IfdBuilder, bt *BuilderTag, bw *ByteWriter, ida *ifdDataAllocator, nextIfdOffsetToWrite uint32) (childIfdBlock []byte, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - // Write tag-ID. - err = bw.WriteUint16(bt.tagId) - log.PanicIf(err) - - // Works for both values and child IFDs (which have an official size of - // LONG). - err = bw.WriteUint16(uint16(bt.typeId)) - log.PanicIf(err) - - // Write unit-count. - - if bt.value.IsBytes() == true { - effectiveType := bt.typeId - if bt.typeId == exifcommon.TypeUndefined { - effectiveType = exifcommon.TypeByte - } - - // It's a non-unknown value.Calculate the count of values of - // the type that we're writing and the raw bytes for the whole list. - - typeSize := uint32(effectiveType.Size()) - - valueBytes := bt.value.Bytes() - - len_ := len(valueBytes) - unitCount := uint32(len_) / typeSize - - if _, found := tagsWithoutAlignment[bt.tagId]; found == false { - remainder := uint32(len_) % typeSize - - if remainder > 0 { - log.Panicf("tag (0x%04x) value of (%d) bytes not evenly divisible by type-size (%d)", bt.tagId, len_, typeSize) - } - } - - err = bw.WriteUint32(unitCount) - log.PanicIf(err) - - // Write four-byte value/offset. - - if len_ > 4 { - offset, err := ida.Allocate(valueBytes) - log.PanicIf(err) - - err = bw.WriteUint32(offset) - log.PanicIf(err) - } else { - fourBytes := make([]byte, 4) - copy(fourBytes, valueBytes) - - err = bw.WriteFourBytes(fourBytes) - log.PanicIf(err) - } - } else { - if bt.value.IsIb() == false { - log.Panicf("tag value is not a byte-slice but also not a child IB: %v", bt) - } - - // Write unit-count (one LONG representing one offset). - err = bw.WriteUint32(1) - log.PanicIf(err) - - if nextIfdOffsetToWrite > 0 { - var err error - - ibe.pushToJournal("encodeTagToBytes", ">", "[%s]->[%s]", ib.IfdIdentity().UnindexedString(), bt.value.Ib().IfdIdentity().UnindexedString()) - - // Create the block of IFD data and everything it requires. - childIfdBlock, err = ibe.encodeAndAttachIfd(bt.value.Ib(), nextIfdOffsetToWrite) - log.PanicIf(err) - - ibe.pushToJournal("encodeTagToBytes", "<", "[%s]->[%s]", bt.value.Ib().IfdIdentity().UnindexedString(), ib.IfdIdentity().UnindexedString()) - - // Use the next-IFD offset for it. The IFD will actually get - // attached after we return. - err = bw.WriteUint32(nextIfdOffsetToWrite) - log.PanicIf(err) - - } else { - // No child-IFDs are to be allocated. Finish the entry with a NULL - // pointer. - - ibe.pushToJournal("encodeTagToBytes", "-", "*Not* descending to child: [%s]", bt.value.Ib().IfdIdentity().UnindexedString()) - - err = bw.WriteUint32(0) - log.PanicIf(err) - } - } - - return childIfdBlock, nil -} - -// encodeIfdToBytes encodes the given IB to a byte-slice. We are given the -// offset at which this IFD will be written. This method is used called both to -// pre-determine how big the table is going to be (so that we can calculate the -// address to allocate data at) as well as to write the final table. -// -// It is necessary to fully realize the table in order to predetermine its size -// because it is not enough to know the size of the table: If there are child -// IFDs, we will not be able to allocate them without first knowing how much -// data we need to allocate for the current IFD. -func (ibe *IfdByteEncoder) encodeIfdToBytes(ib *IfdBuilder, ifdAddressableOffset uint32, nextIfdOffsetToWrite uint32, setNextIb bool) (data []byte, tableSize uint32, dataSize uint32, childIfdSizes []uint32, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - ibe.pushToJournal("encodeIfdToBytes", ">", "%s", ib) - - tableSize = ibe.TableSize(len(ib.tags)) - - b := new(bytes.Buffer) - bw := NewByteWriter(b, ib.byteOrder) - - // Write tag count. - err = bw.WriteUint16(uint16(len(ib.tags))) - log.PanicIf(err) - - ida := newIfdDataAllocator(ifdAddressableOffset) - - childIfdBlocks := make([][]byte, 0) - - // Write raw bytes for each tag entry. Allocate larger data to be referred - // to in the follow-up data-block as required. Any "unknown"-byte tags that - // we can't parse will not be present here (using AddTagsFromExisting(), at - // least). - for _, bt := range ib.tags { - childIfdBlock, err := ibe.encodeTagToBytes(ib, bt, bw, ida, nextIfdOffsetToWrite) - log.PanicIf(err) - - if childIfdBlock != nil { - // We aren't allowed to have non-nil child IFDs if we're just - // sizing things up. - if nextIfdOffsetToWrite == 0 { - log.Panicf("no IFD offset provided for child-IFDs; no new child-IFDs permitted") - } - - nextIfdOffsetToWrite += uint32(len(childIfdBlock)) - childIfdBlocks = append(childIfdBlocks, childIfdBlock) - } - } - - dataBytes := ida.Bytes() - dataSize = uint32(len(dataBytes)) - - childIfdSizes = make([]uint32, len(childIfdBlocks)) - childIfdsTotalSize := uint32(0) - for i, childIfdBlock := range childIfdBlocks { - len_ := uint32(len(childIfdBlock)) - childIfdSizes[i] = len_ - childIfdsTotalSize += len_ - } - - // N the link from this IFD to the next IFD that will be written in the - // next cycle. - if setNextIb == true { - // Write address of next IFD in chain. This will be the original - // allocation offset plus the size of everything we have allocated for - // this IFD and its child-IFDs. - // - // It is critical that this number is stepped properly. We experienced - // an issue whereby it first looked like we were duplicating the IFD and - // then that we were duplicating the tags in the wrong IFD, and then - // finally we determined that the next-IFD offset for the first IFD was - // accidentally pointing back to the EXIF IFD, so we were visiting it - // twice when visiting through the tags after decoding. It was an - // expensive bug to find. - - ibe.pushToJournal("encodeIfdToBytes", "-", "Setting 'next' IFD to (0x%08x).", nextIfdOffsetToWrite) - - err := bw.WriteUint32(nextIfdOffsetToWrite) - log.PanicIf(err) - } else { - err := bw.WriteUint32(0) - log.PanicIf(err) - } - - _, err = b.Write(dataBytes) - log.PanicIf(err) - - // Append any child IFD blocks after our table and data blocks. These IFDs - // were equipped with the appropriate offset information so it's expected - // that all offsets referred to by these will be correct. - // - // Note that child-IFDs are append after the current IFD and before the - // next IFD, as opposed to the root IFDs, which are chained together but - // will be interrupted by these child-IFDs (which is expected, per the - // standard). - - for _, childIfdBlock := range childIfdBlocks { - _, err = b.Write(childIfdBlock) - log.PanicIf(err) - } - - ibe.pushToJournal("encodeIfdToBytes", "<", "%s", ib) - - return b.Bytes(), tableSize, dataSize, childIfdSizes, nil -} - -// encodeAndAttachIfd is a reentrant function that processes the IFD chain. -func (ibe *IfdByteEncoder) encodeAndAttachIfd(ib *IfdBuilder, ifdAddressableOffset uint32) (data []byte, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - ibe.pushToJournal("encodeAndAttachIfd", ">", "%s", ib) - - b := new(bytes.Buffer) - - i := 0 - - for thisIb := ib; thisIb != nil; thisIb = thisIb.nextIb { - - // Do a dry-run in order to pre-determine its size requirement. - - ibe.pushToJournal("encodeAndAttachIfd", ">", "Beginning encoding process: (%d) [%s]", i, thisIb.IfdIdentity().UnindexedString()) - - ibe.pushToJournal("encodeAndAttachIfd", ">", "Calculating size: (%d) [%s]", i, thisIb.IfdIdentity().UnindexedString()) - - _, tableSize, allocatedDataSize, _, err := ibe.encodeIfdToBytes(thisIb, ifdAddressableOffset, 0, false) - log.PanicIf(err) - - ibe.pushToJournal("encodeAndAttachIfd", "<", "Finished calculating size: (%d) [%s]", i, thisIb.IfdIdentity().UnindexedString()) - - ifdAddressableOffset += tableSize - nextIfdOffsetToWrite := ifdAddressableOffset + allocatedDataSize - - ibe.pushToJournal("encodeAndAttachIfd", ">", "Next IFD will be written at offset (0x%08x)", nextIfdOffsetToWrite) - - // Write our IFD as well as any child-IFDs (now that we know the offset - // where new IFDs and their data will be allocated). - - setNextIb := thisIb.nextIb != nil - - ibe.pushToJournal("encodeAndAttachIfd", ">", "Encoding starting: (%d) [%s] NEXT-IFD-OFFSET-TO-WRITE=(0x%08x)", i, thisIb.IfdIdentity().UnindexedString(), nextIfdOffsetToWrite) - - tableAndAllocated, effectiveTableSize, effectiveAllocatedDataSize, childIfdSizes, err := - ibe.encodeIfdToBytes(thisIb, ifdAddressableOffset, nextIfdOffsetToWrite, setNextIb) - - log.PanicIf(err) - - if effectiveTableSize != tableSize { - log.Panicf("written table size does not match the pre-calculated table size: (%d) != (%d) %s", effectiveTableSize, tableSize, ib) - } else if effectiveAllocatedDataSize != allocatedDataSize { - log.Panicf("written allocated-data size does not match the pre-calculated allocated-data size: (%d) != (%d) %s", effectiveAllocatedDataSize, allocatedDataSize, ib) - } - - ibe.pushToJournal("encodeAndAttachIfd", "<", "Encoding done: (%d) [%s]", i, thisIb.IfdIdentity().UnindexedString()) - - totalChildIfdSize := uint32(0) - for _, childIfdSize := range childIfdSizes { - totalChildIfdSize += childIfdSize - } - - if len(tableAndAllocated) != int(tableSize+allocatedDataSize+totalChildIfdSize) { - log.Panicf("IFD table and data is not a consistent size: (%d) != (%d)", len(tableAndAllocated), tableSize+allocatedDataSize+totalChildIfdSize) - } - - // TODO(dustin): We might want to verify the original tableAndAllocated length, too. - - _, err = b.Write(tableAndAllocated) - log.PanicIf(err) - - // Advance past what we've allocated, thus far. - - ifdAddressableOffset += allocatedDataSize + totalChildIfdSize - - ibe.pushToJournal("encodeAndAttachIfd", "<", "Finishing encoding process: (%d) [%s] [FINAL:] NEXT-IFD-OFFSET-TO-WRITE=(0x%08x)", i, ib.IfdIdentity().UnindexedString(), nextIfdOffsetToWrite) - - i++ - } - - ibe.pushToJournal("encodeAndAttachIfd", "<", "%s", ib) - - return b.Bytes(), nil -} - -// EncodeToExifPayload is the base encoding step that transcribes the entire IB -// structure to its on-disk layout. -func (ibe *IfdByteEncoder) EncodeToExifPayload(ib *IfdBuilder) (data []byte, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - data, err = ibe.encodeAndAttachIfd(ib, ExifDefaultFirstIfdOffset) - log.PanicIf(err) - - return data, nil -} - -// EncodeToExif calls EncodeToExifPayload and then packages the result into a -// complete EXIF block. -func (ibe *IfdByteEncoder) EncodeToExif(ib *IfdBuilder) (data []byte, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - encodedIfds, err := ibe.EncodeToExifPayload(ib) - log.PanicIf(err) - - // Wrap the IFD in a formal EXIF block. - - b := new(bytes.Buffer) - - headerBytes, err := BuildExifHeader(ib.byteOrder, ExifDefaultFirstIfdOffset) - log.PanicIf(err) - - _, err = b.Write(headerBytes) - log.PanicIf(err) - - _, err = b.Write(encodedIfds) - log.PanicIf(err) - - return b.Bytes(), nil -} diff --git a/vendor/github.com/dsoprea/go-exif/v3/ifd_enumerate.go b/vendor/github.com/dsoprea/go-exif/v3/ifd_enumerate.go deleted file mode 100644 index 3167596ef..000000000 --- a/vendor/github.com/dsoprea/go-exif/v3/ifd_enumerate.go +++ /dev/null @@ -1,1672 +0,0 @@ -package exif - -import ( - "bytes" - "errors" - "fmt" - "io" - "strconv" - "strings" - "time" - - "encoding/binary" - - "github.com/dsoprea/go-logging" - - "github.com/dsoprea/go-exif/v3/common" - "github.com/dsoprea/go-exif/v3/undefined" -) - -var ( - ifdEnumerateLogger = log.NewLogger("exif.ifd_enumerate") -) - -var ( - // ErrNoThumbnail means that no thumbnail was found. - ErrNoThumbnail = errors.New("no thumbnail") - - // ErrNoGpsTags means that no GPS info was found. - ErrNoGpsTags = errors.New("no gps tags") - - // ErrTagTypeNotValid means that the tag-type is not valid. - ErrTagTypeNotValid = errors.New("tag type invalid") - - // ErrOffsetInvalid means that the file offset is not valid. - ErrOffsetInvalid = errors.New("file offset invalid") -) - -var ( - // ValidGpsVersions is the list of recognized EXIF GPS versions/signatures. - ValidGpsVersions = [][4]byte{ - // 2.0.0.0 appears to have a very similar format to 2.2.0.0, so enabling - // it under that assumption. - // - // IFD-PATH=[IFD] ID=(0x8825) NAME=[GPSTag] COUNT=(1) TYPE=[LONG] VALUE=[114] - // IFD-PATH=[IFD/GPSInfo] ID=(0x0000) NAME=[GPSVersionID] COUNT=(4) TYPE=[BYTE] VALUE=[02 00 00 00] - // IFD-PATH=[IFD/GPSInfo] ID=(0x0001) NAME=[GPSLatitudeRef] COUNT=(2) TYPE=[ASCII] VALUE=[S] - // IFD-PATH=[IFD/GPSInfo] ID=(0x0002) NAME=[GPSLatitude] COUNT=(3) TYPE=[RATIONAL] VALUE=[38/1...] - // IFD-PATH=[IFD/GPSInfo] ID=(0x0003) NAME=[GPSLongitudeRef] COUNT=(2) TYPE=[ASCII] VALUE=[E] - // IFD-PATH=[IFD/GPSInfo] ID=(0x0004) NAME=[GPSLongitude] COUNT=(3) TYPE=[RATIONAL] VALUE=[144/1...] - // IFD-PATH=[IFD/GPSInfo] ID=(0x0012) NAME=[GPSMapDatum] COUNT=(7) TYPE=[ASCII] VALUE=[WGS-84] - // - {2, 0, 0, 0}, - - {2, 2, 0, 0}, - - // Suddenly appeared at the default in 2.31: https://home.jeita.or.jp/tsc/std-pdf/CP-3451D.pdf - // - // Note that the presence of 2.3.0.0 doesn't seem to guarantee - // coordinates. In some cases, we seen just the following: - // - // GPS Tag Version |2.3.0.0 - // GPS Receiver Status |V - // Geodetic Survey Data|WGS-84 - // GPS Differential Cor|0 - // - {2, 3, 0, 0}, - } -) - -// byteParser knows how to decode an IFD and all of the tags it -// describes. -// -// The IFDs and the actual values can float throughout the EXIF block, but the -// IFD itself is just a minor header followed by a set of repeating, -// statically-sized records. So, the tags (though notnecessarily their values) -// are fairly simple to enumerate. -type byteParser struct { - byteOrder binary.ByteOrder - rs io.ReadSeeker - ifdOffset uint32 - currentOffset uint32 -} - -// newByteParser returns a new byteParser struct. -// -// initialOffset is for arithmetic-based tracking of where we should be at in -// the stream. -func newByteParser(rs io.ReadSeeker, byteOrder binary.ByteOrder, initialOffset uint32) (bp *byteParser, err error) { - // TODO(dustin): Add test - - bp = &byteParser{ - rs: rs, - byteOrder: byteOrder, - currentOffset: initialOffset, - } - - return bp, nil -} - -// getUint16 reads a uint16 and advances both our current and our current -// accumulator (which allows us to know how far to seek to the beginning of the -// next IFD when it's time to jump). -func (bp *byteParser) getUint16() (value uint16, raw []byte, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - // TODO(dustin): Add test - - needBytes := 2 - - raw = make([]byte, needBytes) - - _, err = io.ReadFull(bp.rs, raw) - log.PanicIf(err) - - value = bp.byteOrder.Uint16(raw) - - bp.currentOffset += uint32(needBytes) - - return value, raw, nil -} - -// getUint32 reads a uint32 and advances both our current and our current -// accumulator (which allows us to know how far to seek to the beginning of the -// next IFD when it's time to jump). -func (bp *byteParser) getUint32() (value uint32, raw []byte, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - // TODO(dustin): Add test - - needBytes := 4 - - raw = make([]byte, needBytes) - - _, err = io.ReadFull(bp.rs, raw) - log.PanicIf(err) - - value = bp.byteOrder.Uint32(raw) - - bp.currentOffset += uint32(needBytes) - - return value, raw, nil -} - -// CurrentOffset returns the starting offset but the number of bytes that we -// have parsed. This is arithmetic-based tracking, not a seek(0) operation. -func (bp *byteParser) CurrentOffset() uint32 { - return bp.currentOffset -} - -// IfdEnumerate is the main enumeration type. It knows how to parse the IFD -// containers in the EXIF blob. -type IfdEnumerate struct { - ebs ExifBlobSeeker - byteOrder binary.ByteOrder - tagIndex *TagIndex - ifdMapping *exifcommon.IfdMapping - furthestOffset uint32 - - visitedIfdOffsets map[uint32]struct{} -} - -// NewIfdEnumerate returns a new instance of IfdEnumerate. -func NewIfdEnumerate(ifdMapping *exifcommon.IfdMapping, tagIndex *TagIndex, ebs ExifBlobSeeker, byteOrder binary.ByteOrder) *IfdEnumerate { - return &IfdEnumerate{ - ebs: ebs, - byteOrder: byteOrder, - ifdMapping: ifdMapping, - tagIndex: tagIndex, - - visitedIfdOffsets: make(map[uint32]struct{}), - } -} - -func (ie *IfdEnumerate) getByteParser(ifdOffset uint32) (bp *byteParser, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - initialOffset := ExifAddressableAreaStart + ifdOffset - - rs, err := ie.ebs.GetReadSeeker(int64(initialOffset)) - log.PanicIf(err) - - bp, err = - newByteParser( - rs, - ie.byteOrder, - initialOffset) - - if err != nil { - if err == ErrOffsetInvalid { - return nil, err - } - - log.Panic(err) - } - - return bp, nil -} - -func (ie *IfdEnumerate) parseTag(ii *exifcommon.IfdIdentity, tagPosition int, bp *byteParser) (ite *IfdTagEntry, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - tagId, _, err := bp.getUint16() - log.PanicIf(err) - - tagTypeRaw, _, err := bp.getUint16() - log.PanicIf(err) - - tagType := exifcommon.TagTypePrimitive(tagTypeRaw) - - unitCount, _, err := bp.getUint32() - log.PanicIf(err) - - valueOffset, rawValueOffset, err := bp.getUint32() - log.PanicIf(err) - - // Check whether the embedded type indicator is valid. - - if tagType.IsValid() == false { - // Technically, we have the type on-file in the tags-index, but - // if the type stored alongside the data disagrees with it, - // which it apparently does, all bets are off. - ifdEnumerateLogger.Warningf(nil, - "Tag (0x%04x) in IFD [%s] at position (%d) has invalid type (0x%04x) and will be skipped.", - tagId, ii, tagPosition, int(tagType)) - - ite = &IfdTagEntry{ - tagId: tagId, - tagType: tagType, - } - - return ite, ErrTagTypeNotValid - } - - // Check whether the embedded type is listed among the supported types for - // the registered tag. If not, skip processing the tag. - - it, err := ie.tagIndex.Get(ii, tagId) - if err != nil { - if log.Is(err, ErrTagNotFound) == true { - ifdEnumerateLogger.Warningf(nil, "Tag (0x%04x) is not known and will be skipped.", tagId) - - ite = &IfdTagEntry{ - tagId: tagId, - } - - return ite, ErrTagNotFound - } - - log.Panic(err) - } - - // If we're trying to be as forgiving as possible then use whatever type was - // reported in the format. Otherwise, only accept a type that's expected for - // this tag. - if ie.tagIndex.UniversalSearch() == false && it.DoesSupportType(tagType) == false { - // The type in the stream disagrees with the type that this tag is - // expected to have. This can present issues with how we handle the - // special-case tags (e.g. thumbnails, GPS, etc..) when those tags - // suddenly have data that we no longer manipulate correctly/ - // accurately. - ifdEnumerateLogger.Warningf(nil, - "Tag (0x%04x) in IFD [%s] at position (%d) has unsupported type (0x%02x) and will be skipped.", - tagId, ii, tagPosition, int(tagType)) - - return nil, ErrTagTypeNotValid - } - - // Construct tag struct. - - rs, err := ie.ebs.GetReadSeeker(0) - log.PanicIf(err) - - ite = newIfdTagEntry( - ii, - tagId, - tagPosition, - tagType, - unitCount, - valueOffset, - rawValueOffset, - rs, - ie.byteOrder) - - ifdPath := ii.UnindexedString() - - // If it's an IFD but not a standard one, it'll just be seen as a LONG - // (the standard IFD tag type), later, unless we skip it because it's - // [likely] not even in the standard list of known tags. - mi, err := ie.ifdMapping.GetChild(ifdPath, tagId) - if err == nil { - currentIfdTag := ii.IfdTag() - - childIt := exifcommon.NewIfdTag(¤tIfdTag, tagId, mi.Name) - iiChild := ii.NewChild(childIt, 0) - ite.SetChildIfd(iiChild) - - // We also need to set `tag.ChildFqIfdPath` but can't do it here - // because we don't have the IFD index. - } else if log.Is(err, exifcommon.ErrChildIfdNotMapped) == false { - log.Panic(err) - } - - return ite, nil -} - -// TagVisitorFn is called for each tag when enumerating through the EXIF. -type TagVisitorFn func(ite *IfdTagEntry) (err error) - -// tagPostParse do some tag-level processing here following the parse of each. -func (ie *IfdEnumerate) tagPostParse(ite *IfdTagEntry, med *MiscellaneousExifData) (err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - // TODO(dustin): Add test - - ii := ite.IfdIdentity() - - tagId := ite.TagId() - tagType := ite.TagType() - - it, err := ie.tagIndex.Get(ii, tagId) - if err == nil { - ite.setTagName(it.Name) - } else { - if err != ErrTagNotFound { - log.Panic(err) - } - - // This is an unknown tag. - - originalBt := exifcommon.BasicTag{ - FqIfdPath: ii.String(), - IfdPath: ii.UnindexedString(), - TagId: tagId, - } - - if med != nil { - med.unknownTags[originalBt] = exifcommon.BasicTag{} - } - - utilityLogger.Debugf(nil, - "Tag (0x%04x) is not valid for IFD [%s]. Attempting secondary "+ - "lookup.", tagId, ii.String()) - - // This will overwrite the existing `it` and `err`. Since `FindFirst()` - // might generate different Errors than `Get()`, the log message above - // is import to try and mitigate confusion in that case. - it, err = ie.tagIndex.FindFirst(tagId, tagType, nil) - if err != nil { - if err != ErrTagNotFound { - log.Panic(err) - } - - // This is supposed to be a convenience function and if we were - // to keep the name empty or set it to some placeholder, it - // might be mismanaged by the package that is calling us. If - // they want to specifically manage these types of tags, they - // can use more advanced functionality to specifically -handle - // unknown tags. - utilityLogger.Warningf(nil, - "Tag with ID (0x%04x) in IFD [%s] is not recognized and "+ - "will be ignored.", tagId, ii.String()) - - return ErrTagNotFound - } - - ite.setTagName(it.Name) - - utilityLogger.Warningf(nil, - "Tag with ID (0x%04x) is not valid for IFD [%s], but it *is* "+ - "valid as tag [%s] under IFD [%s] and has the same type "+ - "[%s], so we will use that. This EXIF blob was probably "+ - "written by a buggy implementation.", - tagId, ii.UnindexedString(), it.Name, it.IfdPath, - tagType) - - if med != nil { - med.unknownTags[originalBt] = exifcommon.BasicTag{ - IfdPath: it.IfdPath, - TagId: tagId, - } - } - } - - // This is a known tag (from the standard, unless the user did - // something different). - - // Skip any tags that have a type that doesn't match the type in the - // index (which is loaded with the standard and accept tag - // information unless configured otherwise). - // - // We've run into multiple instances of the same tag, where a) no - // tag should ever be repeated, and b) all but one had an incorrect - // type and caused parsing/conversion woes. So, this is a quick fix - // for those scenarios. - if ie.tagIndex.UniversalSearch() == false && it.DoesSupportType(tagType) == false { - ifdEnumerateLogger.Warningf(nil, - "Skipping tag [%s] (0x%04x) [%s] with an unexpected type: %v ∉ %v", - ii.UnindexedString(), tagId, it.Name, - tagType, it.SupportedTypes) - - return ErrTagNotFound - } - - return nil -} - -// parseIfd decodes the IFD block that we're currently sitting on the first -// byte of. -func (ie *IfdEnumerate) parseIfd(ii *exifcommon.IfdIdentity, bp *byteParser, visitor TagVisitorFn, doDescend bool, med *MiscellaneousExifData) (nextIfdOffset uint32, entries []*IfdTagEntry, thumbnailData []byte, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - tagCount, _, err := bp.getUint16() - log.PanicIf(err) - - ifdEnumerateLogger.Debugf(nil, "IFD [%s] tag-count: (%d)", ii.String(), tagCount) - - entries = make([]*IfdTagEntry, 0) - - var enumeratorThumbnailOffset *IfdTagEntry - var enumeratorThumbnailSize *IfdTagEntry - - for i := 0; i < int(tagCount); i++ { - ite, err := ie.parseTag(ii, i, bp) - if err != nil { - if log.Is(err, ErrTagNotFound) == true || log.Is(err, ErrTagTypeNotValid) == true { - // These tags should've been fully logged in parseTag(). The - // ITE returned is nil so we can't print anything about them, now. - continue - } - - log.Panic(err) - } - - err = ie.tagPostParse(ite, med) - if err == nil { - if err == ErrTagNotFound { - continue - } - - log.PanicIf(err) - } - - tagId := ite.TagId() - - if visitor != nil { - err := visitor(ite) - log.PanicIf(err) - } - - if ite.IsThumbnailOffset() == true { - ifdEnumerateLogger.Debugf(nil, "Skipping the thumbnail offset tag (0x%04x). Use accessors to get it or set it.", tagId) - - enumeratorThumbnailOffset = ite - entries = append(entries, ite) - - continue - } else if ite.IsThumbnailSize() == true { - ifdEnumerateLogger.Debugf(nil, "Skipping the thumbnail size tag (0x%04x). Use accessors to get it or set it.", tagId) - - enumeratorThumbnailSize = ite - entries = append(entries, ite) - - continue - } - - if ite.TagType() != exifcommon.TypeUndefined { - // If this tag's value is an offset, bump our max-offset value to - // what that offset is plus however large that value is. - - vc := ite.getValueContext() - - farOffset, err := vc.GetFarOffset() - if err == nil { - candidateOffset := farOffset + uint32(vc.SizeInBytes()) - if candidateOffset > ie.furthestOffset { - ie.furthestOffset = candidateOffset - } - } else if err != exifcommon.ErrNotFarValue { - log.PanicIf(err) - } - } - - // If it's an IFD but not a standard one, it'll just be seen as a LONG - // (the standard IFD tag type), later, unless we skip it because it's - // [likely] not even in the standard list of known tags. - if ite.ChildIfdPath() != "" { - if doDescend == true { - ifdEnumerateLogger.Debugf(nil, "Descending from IFD [%s] to IFD [%s].", ii, ite.ChildIfdPath()) - - currentIfdTag := ii.IfdTag() - - childIfdTag := - exifcommon.NewIfdTag( - ¤tIfdTag, - ite.TagId(), - ite.ChildIfdName()) - - iiChild := ii.NewChild(childIfdTag, 0) - - err := ie.scan(iiChild, ite.getValueOffset(), visitor, med) - log.PanicIf(err) - - ifdEnumerateLogger.Debugf(nil, "Ascending from IFD [%s] to IFD [%s].", ite.ChildIfdPath(), ii) - } - } - - entries = append(entries, ite) - } - - if enumeratorThumbnailOffset != nil && enumeratorThumbnailSize != nil { - thumbnailData, err = ie.parseThumbnail(enumeratorThumbnailOffset, enumeratorThumbnailSize) - if err != nil { - ifdEnumerateLogger.Errorf( - nil, err, - "We tried to bump our furthest-offset counter but there was an issue first seeking past the thumbnail.") - } else { - // In this case, the value is always an offset. - offset := enumeratorThumbnailOffset.getValueOffset() - - // This this case, the value is always a length. - length := enumeratorThumbnailSize.getValueOffset() - - ifdEnumerateLogger.Debugf(nil, "Found thumbnail in IFD [%s]. Its offset is (%d) and is (%d) bytes.", ii, offset, length) - - furthestOffset := offset + length - - if furthestOffset > ie.furthestOffset { - ie.furthestOffset = furthestOffset - } - } - } - - nextIfdOffset, _, err = bp.getUint32() - log.PanicIf(err) - - _, alreadyVisited := ie.visitedIfdOffsets[nextIfdOffset] - - if alreadyVisited == true { - ifdEnumerateLogger.Warningf(nil, "IFD at offset (0x%08x) has been linked-to more than once. There might be a cycle in the IFD chain. Not reparsing.", nextIfdOffset) - nextIfdOffset = 0 - } - - if nextIfdOffset != 0 { - ie.visitedIfdOffsets[nextIfdOffset] = struct{}{} - ifdEnumerateLogger.Debugf(nil, "[%s] Next IFD at offset: (0x%08x)", ii.String(), nextIfdOffset) - } else { - ifdEnumerateLogger.Debugf(nil, "[%s] IFD chain has terminated.", ii.String()) - } - - return nextIfdOffset, entries, thumbnailData, nil -} - -func (ie *IfdEnumerate) parseThumbnail(offsetIte, lengthIte *IfdTagEntry) (thumbnailData []byte, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - vRaw, err := lengthIte.Value() - log.PanicIf(err) - - vList := vRaw.([]uint32) - if len(vList) != 1 { - log.Panicf("not exactly one long: (%d)", len(vList)) - } - - length := vList[0] - - // The tag is official a LONG type, but it's actually an offset to a blob of bytes. - offsetIte.updateTagType(exifcommon.TypeByte) - offsetIte.updateUnitCount(length) - - thumbnailData, err = offsetIte.GetRawBytes() - log.PanicIf(err) - - return thumbnailData, nil -} - -// scan parses and enumerates the different IFD blocks and invokes a visitor -// callback for each tag. No information is kept or returned. -func (ie *IfdEnumerate) scan(iiGeneral *exifcommon.IfdIdentity, ifdOffset uint32, visitor TagVisitorFn, med *MiscellaneousExifData) (err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - // TODO(dustin): Add test - - for ifdIndex := 0; ; ifdIndex++ { - iiSibling := iiGeneral.NewSibling(ifdIndex) - - ifdEnumerateLogger.Debugf(nil, "Parsing IFD [%s] at offset (0x%04x) (scan).", iiSibling.String(), ifdOffset) - - bp, err := ie.getByteParser(ifdOffset) - if err != nil { - if err == ErrOffsetInvalid { - ifdEnumerateLogger.Errorf(nil, nil, "IFD [%s] at offset (0x%04x) is unreachable. Terminating scan.", iiSibling.String(), ifdOffset) - break - } - - log.Panic(err) - } - - nextIfdOffset, _, _, err := ie.parseIfd(iiSibling, bp, visitor, true, med) - log.PanicIf(err) - - currentOffset := bp.CurrentOffset() - if currentOffset > ie.furthestOffset { - ie.furthestOffset = currentOffset - } - - if nextIfdOffset == 0 { - break - } - - ifdOffset = nextIfdOffset - } - - return nil -} - -// MiscellaneousExifData is reports additional data collected during the parse. -type MiscellaneousExifData struct { - // UnknownTags contains all tags that were invalid for their containing - // IFDs. The values represent alternative IFDs that were correctly matched - // to those tags and used instead. - unknownTags map[exifcommon.BasicTag]exifcommon.BasicTag -} - -// UnknownTags returns the unknown tags encountered during the scan. -func (med *MiscellaneousExifData) UnknownTags() map[exifcommon.BasicTag]exifcommon.BasicTag { - return med.unknownTags -} - -// ScanOptions tweaks parser behavior/choices. -type ScanOptions struct { - // NOTE(dustin): Reserved for future usage. -} - -// Scan enumerates the different EXIF blocks (called IFDs). `rootIfdName` will -// be "IFD" in the TIFF standard. -func (ie *IfdEnumerate) Scan(iiRoot *exifcommon.IfdIdentity, ifdOffset uint32, visitor TagVisitorFn, so *ScanOptions) (med *MiscellaneousExifData, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - // TODO(dustin): Add test - - med = &MiscellaneousExifData{ - unknownTags: make(map[exifcommon.BasicTag]exifcommon.BasicTag), - } - - err = ie.scan(iiRoot, ifdOffset, visitor, med) - log.PanicIf(err) - - ifdEnumerateLogger.Debugf(nil, "Scan: It looks like the furthest offset that contained EXIF data in the EXIF blob was (%d) (Scan).", ie.FurthestOffset()) - - return med, nil -} - -// Ifd represents a single, parsed IFD. -type Ifd struct { - ifdIdentity *exifcommon.IfdIdentity - - ifdMapping *exifcommon.IfdMapping - tagIndex *TagIndex - - offset uint32 - byteOrder binary.ByteOrder - id int - - parentIfd *Ifd - - // ParentTagIndex is our tag position in the parent IFD, if we had a parent - // (if `ParentIfd` is not nil and we weren't an IFD referenced as a sibling - // instead of as a child). - parentTagIndex int - - entries []*IfdTagEntry - entriesByTagId map[uint16][]*IfdTagEntry - - children []*Ifd - childIfdIndex map[string]*Ifd - - thumbnailData []byte - - nextIfdOffset uint32 - nextIfd *Ifd -} - -// IfdIdentity returns IFD identity that this struct represents. -func (ifd *Ifd) IfdIdentity() *exifcommon.IfdIdentity { - return ifd.ifdIdentity -} - -// Entries returns a flat list of all tags for this IFD. -func (ifd *Ifd) Entries() []*IfdTagEntry { - - // TODO(dustin): Add test - - return ifd.entries -} - -// EntriesByTagId returns a map of all tags for this IFD. -func (ifd *Ifd) EntriesByTagId() map[uint16][]*IfdTagEntry { - - // TODO(dustin): Add test - - return ifd.entriesByTagId -} - -// Children returns a flat list of all child IFDs of this IFD. -func (ifd *Ifd) Children() []*Ifd { - - // TODO(dustin): Add test - - return ifd.children -} - -// ChildWithIfdPath returns a map of all child IFDs of this IFD. -func (ifd *Ifd) ChildIfdIndex() map[string]*Ifd { - - // TODO(dustin): Add test - - return ifd.childIfdIndex -} - -// ParentTagIndex returns the position of this IFD's tag in its parent IFD (*if* -// there is a parent). -func (ifd *Ifd) ParentTagIndex() int { - - // TODO(dustin): Add test - - return ifd.parentTagIndex -} - -// Offset returns the offset of the IFD in the stream. -func (ifd *Ifd) Offset() uint32 { - - // TODO(dustin): Add test - - return ifd.offset -} - -// Offset returns the offset of the IFD in the stream. -func (ifd *Ifd) ByteOrder() binary.ByteOrder { - - // TODO(dustin): Add test - - return ifd.byteOrder -} - -// NextIfd returns the Ifd struct for the next IFD in the chain. -func (ifd *Ifd) NextIfd() *Ifd { - - // TODO(dustin): Add test - - return ifd.nextIfd -} - -// ChildWithIfdPath returns an `Ifd` struct for the given child of the current -// IFD. -func (ifd *Ifd) ChildWithIfdPath(iiChild *exifcommon.IfdIdentity) (childIfd *Ifd, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - // TODO(dustin): This is a bridge while we're introducing the IFD type-system. We should be able to use the (IfdIdentity).Equals() method for this. - ifdPath := iiChild.UnindexedString() - - for _, childIfd := range ifd.children { - if childIfd.ifdIdentity.UnindexedString() == ifdPath { - return childIfd, nil - } - } - - log.Panic(ErrTagNotFound) - return nil, nil -} - -// FindTagWithId returns a list of tags (usually just zero or one) that match -// the given tag ID. This is efficient. -func (ifd *Ifd) FindTagWithId(tagId uint16) (results []*IfdTagEntry, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - results, found := ifd.entriesByTagId[tagId] - if found != true { - log.Panic(ErrTagNotFound) - } - - return results, nil -} - -// FindTagWithName returns a list of tags (usually just zero or one) that match -// the given tag name. This is not efficient (though the labor is trivial). -func (ifd *Ifd) FindTagWithName(tagName string) (results []*IfdTagEntry, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - it, err := ifd.tagIndex.GetWithName(ifd.ifdIdentity, tagName) - if log.Is(err, ErrTagNotFound) == true { - log.Panic(ErrTagNotKnown) - } else if err != nil { - log.Panic(err) - } - - results = make([]*IfdTagEntry, 0) - for _, ite := range ifd.entries { - if ite.TagId() == it.Id { - results = append(results, ite) - } - } - - if len(results) == 0 { - log.Panic(ErrTagNotFound) - } - - return results, nil -} - -// String returns a description string. -func (ifd *Ifd) String() string { - parentOffset := uint32(0) - if ifd.parentIfd != nil { - parentOffset = ifd.parentIfd.offset - } - - return fmt.Sprintf("Ifd", ifd.id, ifd.ifdIdentity.UnindexedString(), ifd.ifdIdentity.Index(), len(ifd.entries), ifd.offset, len(ifd.children), parentOffset, ifd.nextIfdOffset) -} - -// Thumbnail returns the raw thumbnail bytes. This is typically directly -// readable by any standard image viewer. -func (ifd *Ifd) Thumbnail() (data []byte, err error) { - - if ifd.thumbnailData == nil { - return nil, ErrNoThumbnail - } - - return ifd.thumbnailData, nil -} - -// dumpTags recursively builds a list of tags from an IFD. -func (ifd *Ifd) dumpTags(tags []*IfdTagEntry) []*IfdTagEntry { - if tags == nil { - tags = make([]*IfdTagEntry, 0) - } - - // Now, print the tags while also descending to child-IFDS as we encounter them. - - ifdsFoundCount := 0 - - for _, ite := range ifd.entries { - tags = append(tags, ite) - - childIfdPath := ite.ChildIfdPath() - if childIfdPath != "" { - ifdsFoundCount++ - - childIfd, found := ifd.childIfdIndex[childIfdPath] - if found != true { - log.Panicf("alien child IFD referenced by a tag: [%s]", childIfdPath) - } - - tags = childIfd.dumpTags(tags) - } - } - - if len(ifd.children) != ifdsFoundCount { - log.Panicf("have one or more dangling child IFDs: (%d) != (%d)", len(ifd.children), ifdsFoundCount) - } - - if ifd.nextIfd != nil { - tags = ifd.nextIfd.dumpTags(tags) - } - - return tags -} - -// DumpTags prints the IFD hierarchy. -func (ifd *Ifd) DumpTags() []*IfdTagEntry { - return ifd.dumpTags(nil) -} - -func (ifd *Ifd) printTagTree(populateValues bool, index, level int, nextLink bool) { - indent := strings.Repeat(" ", level*2) - - prefix := " " - if nextLink { - prefix = ">" - } - - fmt.Printf("%s%sIFD: %s\n", indent, prefix, ifd) - - // Now, print the tags while also descending to child-IFDS as we encounter them. - - ifdsFoundCount := 0 - - for _, ite := range ifd.entries { - if ite.ChildIfdPath() != "" { - fmt.Printf("%s - TAG: %s\n", indent, ite) - } else { - // This will just add noise to the output (byte-tags are fully - // dumped). - if ite.IsThumbnailOffset() == true || ite.IsThumbnailSize() == true { - continue - } - - it, err := ifd.tagIndex.Get(ifd.ifdIdentity, ite.TagId()) - - tagName := "" - if err == nil { - tagName = it.Name - } - - var valuePhrase string - if populateValues == true { - var err error - - valuePhrase, err = ite.Format() - if err != nil { - if log.Is(err, exifcommon.ErrUnhandledUndefinedTypedTag) == true { - ifdEnumerateLogger.Warningf(nil, "Skipping non-standard undefined tag: [%s] (%04x)", ifd.ifdIdentity.UnindexedString(), ite.TagId()) - continue - } else if err == exifundefined.ErrUnparseableValue { - ifdEnumerateLogger.Warningf(nil, "Skipping unparseable undefined tag: [%s] (%04x) [%s]", ifd.ifdIdentity.UnindexedString(), ite.TagId(), it.Name) - continue - } - - log.Panic(err) - } - } else { - valuePhrase = "!UNRESOLVED" - } - - fmt.Printf("%s - TAG: %s NAME=[%s] VALUE=[%v]\n", indent, ite, tagName, valuePhrase) - } - - childIfdPath := ite.ChildIfdPath() - if childIfdPath != "" { - ifdsFoundCount++ - - childIfd, found := ifd.childIfdIndex[childIfdPath] - if found != true { - log.Panicf("alien child IFD referenced by a tag: [%s]", childIfdPath) - } - - childIfd.printTagTree(populateValues, 0, level+1, false) - } - } - - if len(ifd.children) != ifdsFoundCount { - log.Panicf("have one or more dangling child IFDs: (%d) != (%d)", len(ifd.children), ifdsFoundCount) - } - - if ifd.nextIfd != nil { - ifd.nextIfd.printTagTree(populateValues, index+1, level, true) - } -} - -// PrintTagTree prints the IFD hierarchy. -func (ifd *Ifd) PrintTagTree(populateValues bool) { - ifd.printTagTree(populateValues, 0, 0, false) -} - -func (ifd *Ifd) printIfdTree(level int, nextLink bool) { - indent := strings.Repeat(" ", level*2) - - prefix := " " - if nextLink { - prefix = ">" - } - - fmt.Printf("%s%s%s\n", indent, prefix, ifd) - - // Now, print the tags while also descending to child-IFDS as we encounter them. - - ifdsFoundCount := 0 - - for _, ite := range ifd.entries { - childIfdPath := ite.ChildIfdPath() - if childIfdPath != "" { - ifdsFoundCount++ - - childIfd, found := ifd.childIfdIndex[childIfdPath] - if found != true { - log.Panicf("alien child IFD referenced by a tag: [%s]", childIfdPath) - } - - childIfd.printIfdTree(level+1, false) - } - } - - if len(ifd.children) != ifdsFoundCount { - log.Panicf("have one or more dangling child IFDs: (%d) != (%d)", len(ifd.children), ifdsFoundCount) - } - - if ifd.nextIfd != nil { - ifd.nextIfd.printIfdTree(level, true) - } -} - -// PrintIfdTree prints the IFD hierarchy. -func (ifd *Ifd) PrintIfdTree() { - ifd.printIfdTree(0, false) -} - -func (ifd *Ifd) dumpTree(tagsDump []string, level int) []string { - if tagsDump == nil { - tagsDump = make([]string, 0) - } - - indent := strings.Repeat(" ", level*2) - - var ifdPhrase string - if ifd.parentIfd != nil { - ifdPhrase = fmt.Sprintf("[%s]->[%s]:(%d)", ifd.parentIfd.ifdIdentity.UnindexedString(), ifd.ifdIdentity.UnindexedString(), ifd.ifdIdentity.Index()) - } else { - ifdPhrase = fmt.Sprintf("[ROOT]->[%s]:(%d)", ifd.ifdIdentity.UnindexedString(), ifd.ifdIdentity.Index()) - } - - startBlurb := fmt.Sprintf("%s> IFD %s TOP", indent, ifdPhrase) - tagsDump = append(tagsDump, startBlurb) - - ifdsFoundCount := 0 - for _, ite := range ifd.entries { - tagsDump = append(tagsDump, fmt.Sprintf("%s - (0x%04x)", indent, ite.TagId())) - - childIfdPath := ite.ChildIfdPath() - if childIfdPath != "" { - ifdsFoundCount++ - - childIfd, found := ifd.childIfdIndex[childIfdPath] - if found != true { - log.Panicf("alien child IFD referenced by a tag: [%s]", childIfdPath) - } - - tagsDump = childIfd.dumpTree(tagsDump, level+1) - } - } - - if len(ifd.children) != ifdsFoundCount { - log.Panicf("have one or more dangling child IFDs: (%d) != (%d)", len(ifd.children), ifdsFoundCount) - } - - finishBlurb := fmt.Sprintf("%s< IFD %s BOTTOM", indent, ifdPhrase) - tagsDump = append(tagsDump, finishBlurb) - - if ifd.nextIfd != nil { - siblingBlurb := fmt.Sprintf("%s* LINKING TO SIBLING IFD [%s]:(%d)", indent, ifd.nextIfd.ifdIdentity.UnindexedString(), ifd.nextIfd.ifdIdentity.Index()) - tagsDump = append(tagsDump, siblingBlurb) - - tagsDump = ifd.nextIfd.dumpTree(tagsDump, level) - } - - return tagsDump -} - -// DumpTree returns a list of strings describing the IFD hierarchy. -func (ifd *Ifd) DumpTree() []string { - return ifd.dumpTree(nil, 0) -} - -// GpsInfo parses and consolidates the GPS info. This can only be called on the -// GPS IFD. -func (ifd *Ifd) GpsInfo() (gi *GpsInfo, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - gi = new(GpsInfo) - - if ifd.ifdIdentity.Equals(exifcommon.IfdGpsInfoStandardIfdIdentity) == false { - log.Panicf("GPS can only be read on GPS IFD: [%s]", ifd.ifdIdentity.UnindexedString()) - } - - if tags, found := ifd.entriesByTagId[TagGpsVersionId]; found == false { - // We've seen this. We'll just have to default to assuming we're in a - // 2.2.0.0 format. - ifdEnumerateLogger.Warningf(nil, "No GPS version tag (0x%04x) found.", TagGpsVersionId) - } else { - versionBytes, err := tags[0].GetRawBytes() - log.PanicIf(err) - - hit := false - for _, acceptedGpsVersion := range ValidGpsVersions { - if bytes.Compare(versionBytes, acceptedGpsVersion[:]) == 0 { - hit = true - break - } - } - - if hit != true { - ifdEnumerateLogger.Warningf(nil, "GPS version not supported: %v", versionBytes) - log.Panic(ErrNoGpsTags) - } - } - - tags, found := ifd.entriesByTagId[TagLatitudeId] - if found == false { - ifdEnumerateLogger.Warningf(nil, "latitude not found") - log.Panic(ErrNoGpsTags) - } - - latitudeValue, err := tags[0].Value() - log.PanicIf(err) - - // Look for whether North or South. - tags, found = ifd.entriesByTagId[TagLatitudeRefId] - if found == false { - ifdEnumerateLogger.Warningf(nil, "latitude-ref not found") - log.Panic(ErrNoGpsTags) - } - - latitudeRefValue, err := tags[0].Value() - log.PanicIf(err) - - tags, found = ifd.entriesByTagId[TagLongitudeId] - if found == false { - ifdEnumerateLogger.Warningf(nil, "longitude not found") - log.Panic(ErrNoGpsTags) - } - - longitudeValue, err := tags[0].Value() - log.PanicIf(err) - - // Look for whether West or East. - tags, found = ifd.entriesByTagId[TagLongitudeRefId] - if found == false { - ifdEnumerateLogger.Warningf(nil, "longitude-ref not found") - log.Panic(ErrNoGpsTags) - } - - longitudeRefValue, err := tags[0].Value() - log.PanicIf(err) - - // Parse location. - - latitudeRaw := latitudeValue.([]exifcommon.Rational) - - gi.Latitude, err = NewGpsDegreesFromRationals(latitudeRefValue.(string), latitudeRaw) - log.PanicIf(err) - - longitudeRaw := longitudeValue.([]exifcommon.Rational) - - gi.Longitude, err = NewGpsDegreesFromRationals(longitudeRefValue.(string), longitudeRaw) - log.PanicIf(err) - - // Parse altitude. - - altitudeTags, foundAltitude := ifd.entriesByTagId[TagAltitudeId] - altitudeRefTags, foundAltitudeRef := ifd.entriesByTagId[TagAltitudeRefId] - - if foundAltitude == true && foundAltitudeRef == true { - altitudePhrase, err := altitudeTags[0].Format() - log.PanicIf(err) - - ifdEnumerateLogger.Debugf(nil, "Altitude is [%s].", altitudePhrase) - - altitudeValue, err := altitudeTags[0].Value() - log.PanicIf(err) - - altitudeRefPhrase, err := altitudeRefTags[0].Format() - log.PanicIf(err) - - ifdEnumerateLogger.Debugf(nil, "Altitude-reference is [%s].", altitudeRefPhrase) - - altitudeRefValue, err := altitudeRefTags[0].Value() - log.PanicIf(err) - - altitudeRaw := altitudeValue.([]exifcommon.Rational) - if altitudeRaw[0].Denominator > 0 { - altitude := int(altitudeRaw[0].Numerator / altitudeRaw[0].Denominator) - - if altitudeRefValue.([]byte)[0] == 1 { - altitude *= -1 - } - - gi.Altitude = altitude - } - } - - // Parse timestamp from separate date and time tags. - - timestampTags, foundTimestamp := ifd.entriesByTagId[TagTimestampId] - datestampTags, foundDatestamp := ifd.entriesByTagId[TagDatestampId] - - if foundTimestamp == true && foundDatestamp == true { - datestampValue, err := datestampTags[0].Value() - log.PanicIf(err) - - datePhrase := datestampValue.(string) - ifdEnumerateLogger.Debugf(nil, "Date tag value is [%s].", datePhrase) - - // Normalize the separators. - datePhrase = strings.ReplaceAll(datePhrase, "-", ":") - - dateParts := strings.Split(datePhrase, ":") - - year, err1 := strconv.ParseUint(dateParts[0], 10, 16) - month, err2 := strconv.ParseUint(dateParts[1], 10, 8) - day, err3 := strconv.ParseUint(dateParts[2], 10, 8) - - if err1 == nil && err2 == nil && err3 == nil { - timestampValue, err := timestampTags[0].Value() - log.PanicIf(err) - - timePhrase, err := timestampTags[0].Format() - log.PanicIf(err) - - ifdEnumerateLogger.Debugf(nil, "Time tag value is [%s].", timePhrase) - - timestampRaw := timestampValue.([]exifcommon.Rational) - - hour := int(timestampRaw[0].Numerator / timestampRaw[0].Denominator) - minute := int(timestampRaw[1].Numerator / timestampRaw[1].Denominator) - second := int(timestampRaw[2].Numerator / timestampRaw[2].Denominator) - - gi.Timestamp = time.Date(int(year), time.Month(month), int(day), hour, minute, second, 0, time.UTC) - } - } - - return gi, nil -} - -// ParsedTagVisitor is a callback used if wanting to visit through all tags and -// child IFDs from the current IFD and going down. -type ParsedTagVisitor func(*Ifd, *IfdTagEntry) error - -// EnumerateTagsRecursively calls the given visitor function for every tag and -// IFD in the current IFD, recursively. -func (ifd *Ifd) EnumerateTagsRecursively(visitor ParsedTagVisitor) (err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - for ptr := ifd; ptr != nil; ptr = ptr.nextIfd { - for _, ite := range ifd.entries { - childIfdPath := ite.ChildIfdPath() - if childIfdPath != "" { - childIfd := ifd.childIfdIndex[childIfdPath] - - err := childIfd.EnumerateTagsRecursively(visitor) - log.PanicIf(err) - } else { - err := visitor(ifd, ite) - log.PanicIf(err) - } - } - } - - return nil -} - -// QueuedIfd is one IFD that has been identified but yet to be processed. -type QueuedIfd struct { - IfdIdentity *exifcommon.IfdIdentity - - Offset uint32 - Parent *Ifd - - // ParentTagIndex is our tag position in the parent IFD, if we had a parent - // (if `ParentIfd` is not nil and we weren't an IFD referenced as a sibling - // instead of as a child). - ParentTagIndex int -} - -// IfdIndex collects a bunch of IFD and tag information stored in several -// different ways in order to provide convenient lookups. -type IfdIndex struct { - RootIfd *Ifd - Ifds []*Ifd - Tree map[int]*Ifd - Lookup map[string]*Ifd -} - -// Collect enumerates the different EXIF blocks (called IFDs) and builds out an -// index struct for referencing all of the parsed data. -func (ie *IfdEnumerate) Collect(rootIfdOffset uint32) (index IfdIndex, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - // TODO(dustin): Add MiscellaneousExifData to IfdIndex - - tree := make(map[int]*Ifd) - ifds := make([]*Ifd, 0) - lookup := make(map[string]*Ifd) - - queue := []QueuedIfd{ - { - IfdIdentity: exifcommon.IfdStandardIfdIdentity, - Offset: rootIfdOffset, - }, - } - - edges := make(map[uint32]*Ifd) - - for { - if len(queue) == 0 { - break - } - - qi := queue[0] - ii := qi.IfdIdentity - - offset := qi.Offset - parentIfd := qi.Parent - - queue = queue[1:] - - ifdEnumerateLogger.Debugf(nil, "Parsing IFD [%s] (%d) at offset (0x%04x) (Collect).", ii.String(), ii.Index(), offset) - - bp, err := ie.getByteParser(offset) - if err != nil { - if err == ErrOffsetInvalid { - return index, err - } - - log.Panic(err) - } - - // TODO(dustin): We don't need to pass the index in as a separate argument. Get from the II. - - nextIfdOffset, entries, thumbnailData, err := ie.parseIfd(ii, bp, nil, false, nil) - log.PanicIf(err) - - currentOffset := bp.CurrentOffset() - if currentOffset > ie.furthestOffset { - ie.furthestOffset = currentOffset - } - - id := len(ifds) - - entriesByTagId := make(map[uint16][]*IfdTagEntry) - for _, ite := range entries { - tagId := ite.TagId() - - tags, found := entriesByTagId[tagId] - if found == false { - tags = make([]*IfdTagEntry, 0) - } - - entriesByTagId[tagId] = append(tags, ite) - } - - ifd := &Ifd{ - ifdIdentity: ii, - - byteOrder: ie.byteOrder, - - id: id, - - parentIfd: parentIfd, - parentTagIndex: qi.ParentTagIndex, - - offset: offset, - entries: entries, - entriesByTagId: entriesByTagId, - - // This is populated as each child is processed. - children: make([]*Ifd, 0), - - nextIfdOffset: nextIfdOffset, - thumbnailData: thumbnailData, - - ifdMapping: ie.ifdMapping, - tagIndex: ie.tagIndex, - } - - // Add ourselves to a big list of IFDs. - ifds = append(ifds, ifd) - - // Install ourselves into a by-id lookup table (keys are unique). - tree[id] = ifd - - // Install into by-name buckets. - lookup[ii.String()] = ifd - - // Add a link from the previous IFD in the chain to us. - if previousIfd, found := edges[offset]; found == true { - previousIfd.nextIfd = ifd - } - - // Attach as a child to our parent (where we appeared as a tag in - // that IFD). - if parentIfd != nil { - parentIfd.children = append(parentIfd.children, ifd) - } - - // Determine if any of our entries is a child IFD and queue it. - for i, ite := range entries { - if ite.ChildIfdPath() == "" { - continue - } - - tagId := ite.TagId() - childIfdName := ite.ChildIfdName() - - currentIfdTag := ii.IfdTag() - - childIfdTag := - exifcommon.NewIfdTag( - ¤tIfdTag, - tagId, - childIfdName) - - iiChild := ii.NewChild(childIfdTag, 0) - - qi := QueuedIfd{ - IfdIdentity: iiChild, - - Offset: ite.getValueOffset(), - Parent: ifd, - ParentTagIndex: i, - } - - queue = append(queue, qi) - } - - // If there's another IFD in the chain. - if nextIfdOffset != 0 { - iiSibling := ii.NewSibling(ii.Index() + 1) - - // Allow the next link to know what the previous link was. - edges[nextIfdOffset] = ifd - - qi := QueuedIfd{ - IfdIdentity: iiSibling, - Offset: nextIfdOffset, - } - - queue = append(queue, qi) - } - } - - index.RootIfd = tree[0] - index.Ifds = ifds - index.Tree = tree - index.Lookup = lookup - - err = ie.setChildrenIndex(index.RootIfd) - log.PanicIf(err) - - ifdEnumerateLogger.Debugf(nil, "Collect: It looks like the furthest offset that contained EXIF data in the EXIF blob was (%d).", ie.FurthestOffset()) - - return index, nil -} - -func (ie *IfdEnumerate) setChildrenIndex(ifd *Ifd) (err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - childIfdIndex := make(map[string]*Ifd) - for _, childIfd := range ifd.children { - childIfdIndex[childIfd.ifdIdentity.UnindexedString()] = childIfd - } - - ifd.childIfdIndex = childIfdIndex - - for _, childIfd := range ifd.children { - err := ie.setChildrenIndex(childIfd) - log.PanicIf(err) - } - - return nil -} - -// FurthestOffset returns the furthest offset visited in the EXIF blob. This -// *does not* account for the locations of any undefined tags since we always -// evaluate the furthest offset, whether or not the user wants to know it. -// -// We are not willing to incur the cost of actually parsing those tags just to -// know their length when there are still undefined tags that are out there -// that we still won't have any idea how to parse, thus making this an -// approximation regardless of how clever we get. -func (ie *IfdEnumerate) FurthestOffset() uint32 { - - // TODO(dustin): Add test - - return ie.furthestOffset -} - -// parseOneIfd is a hack to use an IE to parse a raw IFD block. Can be used for -// testing. The fqIfdPath ("fully-qualified IFD path") will be less qualified -// in that the numeric index will always be zero (the zeroth child) rather than -// the proper number (if its actually a sibling to the first child, for -// instance). -func parseOneIfd(ifdMapping *exifcommon.IfdMapping, tagIndex *TagIndex, ii *exifcommon.IfdIdentity, byteOrder binary.ByteOrder, ifdBlock []byte, visitor TagVisitorFn) (nextIfdOffset uint32, entries []*IfdTagEntry, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - // TODO(dustin): Add test - - ebs := NewExifReadSeekerWithBytes(ifdBlock) - - rs, err := ebs.GetReadSeeker(0) - log.PanicIf(err) - - bp, err := newByteParser(rs, byteOrder, 0) - if err != nil { - if err == ErrOffsetInvalid { - return 0, nil, err - } - - log.Panic(err) - } - - dummyEbs := NewExifReadSeekerWithBytes([]byte{}) - ie := NewIfdEnumerate(ifdMapping, tagIndex, dummyEbs, byteOrder) - - nextIfdOffset, entries, _, err = ie.parseIfd(ii, bp, visitor, true, nil) - log.PanicIf(err) - - return nextIfdOffset, entries, nil -} - -// parseOneTag is a hack to use an IE to parse a raw tag block. -func parseOneTag(ifdMapping *exifcommon.IfdMapping, tagIndex *TagIndex, ii *exifcommon.IfdIdentity, byteOrder binary.ByteOrder, tagBlock []byte) (ite *IfdTagEntry, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - // TODO(dustin): Add test - - ebs := NewExifReadSeekerWithBytes(tagBlock) - - rs, err := ebs.GetReadSeeker(0) - log.PanicIf(err) - - bp, err := newByteParser(rs, byteOrder, 0) - if err != nil { - if err == ErrOffsetInvalid { - return nil, err - } - - log.Panic(err) - } - - dummyEbs := NewExifReadSeekerWithBytes([]byte{}) - ie := NewIfdEnumerate(ifdMapping, tagIndex, dummyEbs, byteOrder) - - ite, err = ie.parseTag(ii, 0, bp) - log.PanicIf(err) - - err = ie.tagPostParse(ite, nil) - if err != nil { - if err == ErrTagNotFound { - return nil, err - } - - log.Panic(err) - } - - return ite, nil -} - -// FindIfdFromRootIfd returns the given `Ifd` given the root-IFD and path of the -// desired IFD. -func FindIfdFromRootIfd(rootIfd *Ifd, ifdPath string) (ifd *Ifd, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - // TODO(dustin): !! Add test. - - lineage, err := rootIfd.ifdMapping.ResolvePath(ifdPath) - log.PanicIf(err) - - // Confirm the first IFD is our root IFD type, and then prune it because - // from then on we'll be searching down through our children. - - if len(lineage) == 0 { - log.Panicf("IFD path must be non-empty.") - } else if lineage[0].Name != exifcommon.IfdStandardIfdIdentity.Name() { - log.Panicf("First IFD path item must be [%s].", exifcommon.IfdStandardIfdIdentity.Name()) - } - - desiredRootIndex := lineage[0].Index - lineage = lineage[1:] - - // TODO(dustin): !! This is a poorly conceived fix that just doubles the work we already have to do below, which then interacts badly with the indices not being properly represented in the IFD-phrase. - // TODO(dustin): !! <-- However, we're not sure whether we shouldn't store a secondary IFD-path with the indices. Some IFDs may not necessarily restrict which IFD indices they can be a child of (only the IFD itself matters). Validation should be delegated to the caller. - thisIfd := rootIfd - for currentRootIndex := 0; currentRootIndex < desiredRootIndex; currentRootIndex++ { - if thisIfd.nextIfd == nil { - log.Panicf("Root-IFD index (%d) does not exist in the data.", currentRootIndex) - } - - thisIfd = thisIfd.nextIfd - } - - for _, itii := range lineage { - var hit *Ifd - for _, childIfd := range thisIfd.children { - if childIfd.ifdIdentity.TagId() == itii.TagId { - hit = childIfd - break - } - } - - // If we didn't find the child, add it. - if hit == nil { - log.Panicf("IFD [%s] in [%s] not found: %s", itii.Name, ifdPath, thisIfd.children) - } - - thisIfd = hit - - // If we didn't find the sibling, add it. - for i := 0; i < itii.Index; i++ { - if thisIfd.nextIfd == nil { - log.Panicf("IFD [%s] does not have (%d) occurrences/siblings", thisIfd.ifdIdentity.UnindexedString(), itii.Index) - } - - thisIfd = thisIfd.nextIfd - } - } - - return thisIfd, nil -} diff --git a/vendor/github.com/dsoprea/go-exif/v3/ifd_tag_entry.go b/vendor/github.com/dsoprea/go-exif/v3/ifd_tag_entry.go deleted file mode 100644 index ed6ba2291..000000000 --- a/vendor/github.com/dsoprea/go-exif/v3/ifd_tag_entry.go +++ /dev/null @@ -1,298 +0,0 @@ -package exif - -import ( - "fmt" - "io" - - "encoding/binary" - - "github.com/dsoprea/go-logging" - - "github.com/dsoprea/go-exif/v3/common" - "github.com/dsoprea/go-exif/v3/undefined" -) - -var ( - iteLogger = log.NewLogger("exif.ifd_tag_entry") -) - -// IfdTagEntry refers to a tag in the loaded EXIF block. -type IfdTagEntry struct { - tagId uint16 - tagIndex int - tagType exifcommon.TagTypePrimitive - unitCount uint32 - valueOffset uint32 - rawValueOffset []byte - - // childIfdName is the right most atom in the IFD-path. We need this to - // construct the fully-qualified IFD-path. - childIfdName string - - // childIfdPath is the IFD-path of the child if this tag represents a child - // IFD. - childIfdPath string - - // childFqIfdPath is the IFD-path of the child if this tag represents a - // child IFD. Includes indices. - childFqIfdPath string - - // TODO(dustin): !! IB's host the child-IBs directly in the tag, but that's not the case here. Refactor to accommodate it for a consistent experience. - - ifdIdentity *exifcommon.IfdIdentity - - isUnhandledUnknown bool - - rs io.ReadSeeker - byteOrder binary.ByteOrder - - tagName string -} - -func newIfdTagEntry(ii *exifcommon.IfdIdentity, tagId uint16, tagIndex int, tagType exifcommon.TagTypePrimitive, unitCount uint32, valueOffset uint32, rawValueOffset []byte, rs io.ReadSeeker, byteOrder binary.ByteOrder) *IfdTagEntry { - return &IfdTagEntry{ - ifdIdentity: ii, - tagId: tagId, - tagIndex: tagIndex, - tagType: tagType, - unitCount: unitCount, - valueOffset: valueOffset, - rawValueOffset: rawValueOffset, - rs: rs, - byteOrder: byteOrder, - } -} - -// String returns a stringified representation of the struct. -func (ite *IfdTagEntry) String() string { - return fmt.Sprintf("IfdTagEntry", ite.ifdIdentity.String(), ite.tagId, ite.tagType.String(), ite.unitCount) -} - -// TagName returns the name of the tag. This is determined else and set after -// the parse (since it's not actually stored in the stream). If it's empty, it -// is because it is an unknown tag (nonstandard or otherwise unavailable in the -// tag-index). -func (ite *IfdTagEntry) TagName() string { - return ite.tagName -} - -// setTagName sets the tag-name. This provides the name for convenience and -// efficiency by determining it when most efficient while we're parsing rather -// than delegating it to the caller (or, worse, the user). -func (ite *IfdTagEntry) setTagName(tagName string) { - ite.tagName = tagName -} - -// IfdPath returns the fully-qualified path of the IFD that owns this tag. -func (ite *IfdTagEntry) IfdPath() string { - return ite.ifdIdentity.String() -} - -// TagId returns the ID of the tag that we represent. The combination of -// (IfdPath(), TagId()) is unique. -func (ite *IfdTagEntry) TagId() uint16 { - return ite.tagId -} - -// IsThumbnailOffset returns true if the tag has the IFD and tag-ID of a -// thumbnail offset. -func (ite *IfdTagEntry) IsThumbnailOffset() bool { - return ite.tagId == ThumbnailOffsetTagId && ite.ifdIdentity.String() == ThumbnailFqIfdPath -} - -// IsThumbnailSize returns true if the tag has the IFD and tag-ID of a thumbnail -// size. -func (ite *IfdTagEntry) IsThumbnailSize() bool { - return ite.tagId == ThumbnailSizeTagId && ite.ifdIdentity.String() == ThumbnailFqIfdPath -} - -// TagType is the type of value for this tag. -func (ite *IfdTagEntry) TagType() exifcommon.TagTypePrimitive { - return ite.tagType -} - -// updateTagType sets an alternatively interpreted tag-type. -func (ite *IfdTagEntry) updateTagType(tagType exifcommon.TagTypePrimitive) { - ite.tagType = tagType -} - -// UnitCount returns the unit-count of the tag's value. -func (ite *IfdTagEntry) UnitCount() uint32 { - return ite.unitCount -} - -// updateUnitCount sets an alternatively interpreted unit-count. -func (ite *IfdTagEntry) updateUnitCount(unitCount uint32) { - ite.unitCount = unitCount -} - -// getValueOffset is the four-byte offset converted to an integer to point to -// the location of its value in the EXIF block. The "get" parameter is obviously -// used in order to differentiate the naming of the method from the field. -func (ite *IfdTagEntry) getValueOffset() uint32 { - return ite.valueOffset -} - -// GetRawBytes renders a specific list of bytes from the value in this tag. -func (ite *IfdTagEntry) GetRawBytes() (rawBytes []byte, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - valueContext := ite.getValueContext() - - if ite.tagType == exifcommon.TypeUndefined { - value, err := exifundefined.Decode(valueContext) - if err != nil { - if err == exifcommon.ErrUnhandledUndefinedTypedTag { - ite.setIsUnhandledUnknown(true) - return nil, exifundefined.ErrUnparseableValue - } else if err == exifundefined.ErrUnparseableValue { - return nil, err - } else { - log.Panic(err) - } - } - - // Encode it back, in order to get the raw bytes. This is the best, - // general way to do it with an undefined tag. - - rawBytes, _, err := exifundefined.Encode(value, ite.byteOrder) - log.PanicIf(err) - - return rawBytes, nil - } - - rawBytes, err = valueContext.ReadRawEncoded() - log.PanicIf(err) - - return rawBytes, nil -} - -// Value returns the specific, parsed, typed value from the tag. -func (ite *IfdTagEntry) Value() (value interface{}, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - valueContext := ite.getValueContext() - - if ite.tagType == exifcommon.TypeUndefined { - var err error - - value, err = exifundefined.Decode(valueContext) - if err != nil { - if err == exifcommon.ErrUnhandledUndefinedTypedTag || err == exifundefined.ErrUnparseableValue { - return nil, err - } - - log.Panic(err) - } - } else { - var err error - - value, err = valueContext.Values() - log.PanicIf(err) - } - - return value, nil -} - -// Format returns the tag's value as a string. -func (ite *IfdTagEntry) Format() (phrase string, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - value, err := ite.Value() - if err != nil { - if err == exifcommon.ErrUnhandledUndefinedTypedTag { - return exifundefined.UnparseableUnknownTagValuePlaceholder, nil - } else if err == exifundefined.ErrUnparseableValue { - return exifundefined.UnparseableHandledTagValuePlaceholder, nil - } - - log.Panic(err) - } - - phrase, err = exifcommon.FormatFromType(value, false) - log.PanicIf(err) - - return phrase, nil -} - -// FormatFirst returns the same as Format() but only the first item. -func (ite *IfdTagEntry) FormatFirst() (phrase string, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - // TODO(dustin): We should add a convenience type "timestamp", to simplify translating to and from the physical ASCII and provide validation. - - value, err := ite.Value() - if err != nil { - if err == exifcommon.ErrUnhandledUndefinedTypedTag { - return exifundefined.UnparseableUnknownTagValuePlaceholder, nil - } - - log.Panic(err) - } - - phrase, err = exifcommon.FormatFromType(value, true) - log.PanicIf(err) - - return phrase, nil -} - -func (ite *IfdTagEntry) setIsUnhandledUnknown(isUnhandledUnknown bool) { - ite.isUnhandledUnknown = isUnhandledUnknown -} - -// SetChildIfd sets child-IFD information (if we represent a child IFD). -func (ite *IfdTagEntry) SetChildIfd(ii *exifcommon.IfdIdentity) { - ite.childFqIfdPath = ii.String() - ite.childIfdPath = ii.UnindexedString() - ite.childIfdName = ii.Name() -} - -// ChildIfdName returns the name of the child IFD -func (ite *IfdTagEntry) ChildIfdName() string { - return ite.childIfdName -} - -// ChildIfdPath returns the path of the child IFD. -func (ite *IfdTagEntry) ChildIfdPath() string { - return ite.childIfdPath -} - -// ChildFqIfdPath returns the complete path of the child IFD along with the -// numeric suffixes differentiating sibling occurrences of the same type. "0" -// indices are omitted. -func (ite *IfdTagEntry) ChildFqIfdPath() string { - return ite.childFqIfdPath -} - -// IfdIdentity returns the IfdIdentity associated with this tag. -func (ite *IfdTagEntry) IfdIdentity() *exifcommon.IfdIdentity { - return ite.ifdIdentity -} - -func (ite *IfdTagEntry) getValueContext() *exifcommon.ValueContext { - return exifcommon.NewValueContext( - ite.ifdIdentity.String(), - ite.tagId, - ite.unitCount, - ite.valueOffset, - ite.rawValueOffset, - ite.rs, - ite.tagType, - ite.byteOrder) -} diff --git a/vendor/github.com/dsoprea/go-exif/v3/package.go b/vendor/github.com/dsoprea/go-exif/v3/package.go deleted file mode 100644 index 428f74e3a..000000000 --- a/vendor/github.com/dsoprea/go-exif/v3/package.go +++ /dev/null @@ -1,8 +0,0 @@ -// Package exif parses raw EXIF information given a block of raw EXIF data. It -// can also construct new EXIF information, and provides tools for doing so. -// This package is not involved with the parsing of particular file-formats. -// -// The EXIF data must first be extracted and then provided to us. Conversely, -// when constructing new EXIF data, the caller is responsible for packaging -// this in whichever format they require. -package exif diff --git a/vendor/github.com/dsoprea/go-exif/v3/tags.go b/vendor/github.com/dsoprea/go-exif/v3/tags.go deleted file mode 100644 index aca902c5d..000000000 --- a/vendor/github.com/dsoprea/go-exif/v3/tags.go +++ /dev/null @@ -1,475 +0,0 @@ -package exif - -import ( - "fmt" - "sync" - - "github.com/dsoprea/go-logging" - "gopkg.in/yaml.v2" - - "github.com/dsoprea/go-exif/v3/common" -) - -const ( - // IFD1 - - // ThumbnailFqIfdPath is the fully-qualified IFD path that the thumbnail - // must be found in. - ThumbnailFqIfdPath = "IFD1" - - // ThumbnailOffsetTagId returns the tag-ID of the thumbnail offset. - ThumbnailOffsetTagId = 0x0201 - - // ThumbnailSizeTagId returns the tag-ID of the thumbnail size. - ThumbnailSizeTagId = 0x0202 -) - -const ( - // GPS - - // TagGpsVersionId is the ID of the GPS version tag. - TagGpsVersionId = 0x0000 - - // TagLatitudeId is the ID of the GPS latitude tag. - TagLatitudeId = 0x0002 - - // TagLatitudeRefId is the ID of the GPS latitude orientation tag. - TagLatitudeRefId = 0x0001 - - // TagLongitudeId is the ID of the GPS longitude tag. - TagLongitudeId = 0x0004 - - // TagLongitudeRefId is the ID of the GPS longitude-orientation tag. - TagLongitudeRefId = 0x0003 - - // TagTimestampId is the ID of the GPS time tag. - TagTimestampId = 0x0007 - - // TagDatestampId is the ID of the GPS date tag. - TagDatestampId = 0x001d - - // TagAltitudeId is the ID of the GPS altitude tag. - TagAltitudeId = 0x0006 - - // TagAltitudeRefId is the ID of the GPS altitude-orientation tag. - TagAltitudeRefId = 0x0005 -) - -var ( - // tagsWithoutAlignment is a tag-lookup for tags whose value size won't - // necessarily be a multiple of its tag-type. - tagsWithoutAlignment = map[uint16]struct{}{ - // The thumbnail offset is stored as a long, but its data is a binary - // blob (not a slice of longs). - ThumbnailOffsetTagId: {}, - } -) - -var ( - tagsLogger = log.NewLogger("exif.tags") -) - -// File structures. - -type encodedTag struct { - // id is signed, here, because YAML doesn't have enough information to - // support unsigned. - Id int `yaml:"id"` - Name string `yaml:"name"` - TypeName string `yaml:"type_name"` - TypeNames []string `yaml:"type_names"` -} - -// Indexing structures. - -// IndexedTag describes one index lookup result. -type IndexedTag struct { - // Id is the tag-ID. - Id uint16 - - // Name is the tag name. - Name string - - // IfdPath is the proper IFD path of this tag. This is not fully-qualified. - IfdPath string - - // SupportedTypes is an unsorted list of allowed tag-types. - SupportedTypes []exifcommon.TagTypePrimitive -} - -// String returns a descriptive string. -func (it *IndexedTag) String() string { - return fmt.Sprintf("TAG", it.Id, it.Name, it.IfdPath) -} - -// IsName returns true if this tag matches the given tag name. -func (it *IndexedTag) IsName(ifdPath, name string) bool { - return it.Name == name && it.IfdPath == ifdPath -} - -// Is returns true if this tag matched the given tag ID. -func (it *IndexedTag) Is(ifdPath string, id uint16) bool { - return it.Id == id && it.IfdPath == ifdPath -} - -// GetEncodingType returns the largest type that this tag's value can occupy. -func (it *IndexedTag) GetEncodingType(value interface{}) exifcommon.TagTypePrimitive { - // For convenience, we handle encoding a `time.Time` directly. - if exifcommon.IsTime(value) == true { - // Timestamps are encoded as ASCII. - value = "" - } - - if len(it.SupportedTypes) == 0 { - log.Panicf("IndexedTag [%s] (%d) has no supported types.", it.IfdPath, it.Id) - } else if len(it.SupportedTypes) == 1 { - return it.SupportedTypes[0] - } - - supportsLong := false - supportsShort := false - supportsRational := false - supportsSignedRational := false - for _, supportedType := range it.SupportedTypes { - if supportedType == exifcommon.TypeLong { - supportsLong = true - } else if supportedType == exifcommon.TypeShort { - supportsShort = true - } else if supportedType == exifcommon.TypeRational { - supportsRational = true - } else if supportedType == exifcommon.TypeSignedRational { - supportsSignedRational = true - } - } - - // We specifically check for the cases that we know to expect. - - if supportsLong == true && supportsShort == true { - return exifcommon.TypeLong - } else if supportsRational == true && supportsSignedRational == true { - if value == nil { - log.Panicf("GetEncodingType: require value to be given") - } - - if _, ok := value.(exifcommon.SignedRational); ok == true { - return exifcommon.TypeSignedRational - } - - return exifcommon.TypeRational - } - - log.Panicf("WidestSupportedType() case is not handled for tag [%s] (0x%04x): %v", it.IfdPath, it.Id, it.SupportedTypes) - return 0 -} - -// DoesSupportType returns true if this tag can be found/decoded with this type. -func (it *IndexedTag) DoesSupportType(tagType exifcommon.TagTypePrimitive) bool { - // This is always a very small collection. So, we keep it unsorted. - for _, thisTagType := range it.SupportedTypes { - if thisTagType == tagType { - return true - } - } - - return false -} - -// TagIndex is a tag-lookup facility. -type TagIndex struct { - tagsByIfd map[string]map[uint16]*IndexedTag - tagsByIfdR map[string]map[string]*IndexedTag - - mutex sync.Mutex - - doUniversalSearch bool -} - -// NewTagIndex returns a new TagIndex struct. -func NewTagIndex() *TagIndex { - ti := new(TagIndex) - - ti.tagsByIfd = make(map[string]map[uint16]*IndexedTag) - ti.tagsByIfdR = make(map[string]map[string]*IndexedTag) - - return ti -} - -// SetUniversalSearch enables a fallback to matching tags under *any* IFD. -func (ti *TagIndex) SetUniversalSearch(flag bool) { - ti.doUniversalSearch = flag -} - -// UniversalSearch enables a fallback to matching tags under *any* IFD. -func (ti *TagIndex) UniversalSearch() bool { - return ti.doUniversalSearch -} - -// Add registers a new tag to be recognized during the parse. -func (ti *TagIndex) Add(it *IndexedTag) (err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - ti.mutex.Lock() - defer ti.mutex.Unlock() - - // Store by ID. - - family, found := ti.tagsByIfd[it.IfdPath] - if found == false { - family = make(map[uint16]*IndexedTag) - ti.tagsByIfd[it.IfdPath] = family - } - - if _, found := family[it.Id]; found == true { - log.Panicf("tag-ID defined more than once for IFD [%s]: (%02x)", it.IfdPath, it.Id) - } - - family[it.Id] = it - - // Store by name. - - familyR, found := ti.tagsByIfdR[it.IfdPath] - if found == false { - familyR = make(map[string]*IndexedTag) - ti.tagsByIfdR[it.IfdPath] = familyR - } - - if _, found := familyR[it.Name]; found == true { - log.Panicf("tag-name defined more than once for IFD [%s]: (%s)", it.IfdPath, it.Name) - } - - familyR[it.Name] = it - - return nil -} - -func (ti *TagIndex) getOne(ifdPath string, id uint16) (it *IndexedTag, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - if len(ti.tagsByIfd) == 0 { - err := LoadStandardTags(ti) - log.PanicIf(err) - } - - ti.mutex.Lock() - defer ti.mutex.Unlock() - - family, found := ti.tagsByIfd[ifdPath] - if found == false { - return nil, ErrTagNotFound - } - - it, found = family[id] - if found == false { - return nil, ErrTagNotFound - } - - return it, nil -} - -// Get returns information about the non-IFD tag given a tag ID. `ifdPath` must -// not be fully-qualified. -func (ti *TagIndex) Get(ii *exifcommon.IfdIdentity, id uint16) (it *IndexedTag, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - ifdPath := ii.UnindexedString() - - it, err = ti.getOne(ifdPath, id) - if err == nil { - return it, nil - } else if err != ErrTagNotFound { - log.Panic(err) - } - - if ti.doUniversalSearch == false { - return nil, ErrTagNotFound - } - - // We've been told to fallback to look for the tag in other IFDs. - - skipIfdPath := ii.UnindexedString() - - for currentIfdPath, _ := range ti.tagsByIfd { - if currentIfdPath == skipIfdPath { - // Skip the primary IFD, which has already been checked. - continue - } - - it, err = ti.getOne(currentIfdPath, id) - if err == nil { - tagsLogger.Warningf(nil, - "Found tag (0x%02x) in the wrong IFD: [%s] != [%s]", - id, currentIfdPath, ifdPath) - - return it, nil - } else if err != ErrTagNotFound { - log.Panic(err) - } - } - - return nil, ErrTagNotFound -} - -var ( - // tagGuessDefaultIfdIdentities describes which IFDs we'll look for a given - // tag-ID in, if it's not found where it's supposed to be. We suppose that - // Exif-IFD tags might be found in IFD0 or IFD1, or IFD0/IFD1 tags might be - // found in the Exif IFD. This is the only thing we've seen so far. So, this - // is the limit of our guessing. - tagGuessDefaultIfdIdentities = []*exifcommon.IfdIdentity{ - exifcommon.IfdExifStandardIfdIdentity, - exifcommon.IfdStandardIfdIdentity, - } -) - -// FindFirst looks for the given tag-ID in each of the given IFDs in the given -// order. If `fqIfdPaths` is `nil` then use a default search order. This defies -// the standard, which requires each tag to exist in certain IFDs. This is a -// contingency to make recommendations for malformed data. -// -// Things *can* end badly here, in that the same tag-ID in different IFDs might -// describe different data and different ata-types, and our decode might then -// produce binary and non-printable data. -func (ti *TagIndex) FindFirst(id uint16, typeId exifcommon.TagTypePrimitive, ifdIdentities []*exifcommon.IfdIdentity) (it *IndexedTag, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - if ifdIdentities == nil { - ifdIdentities = tagGuessDefaultIfdIdentities - } - - for _, ii := range ifdIdentities { - it, err := ti.Get(ii, id) - if err != nil { - if err == ErrTagNotFound { - continue - } - - log.Panic(err) - } - - // Even though the tag might be mislocated, the type should still be the - // same. Check this so we don't accidentally end-up on a complete - // irrelevant tag with a totally different data type. This attempts to - // mitigate producing garbage. - for _, supportedType := range it.SupportedTypes { - if supportedType == typeId { - return it, nil - } - } - } - - return nil, ErrTagNotFound -} - -// GetWithName returns information about the non-IFD tag given a tag name. -func (ti *TagIndex) GetWithName(ii *exifcommon.IfdIdentity, name string) (it *IndexedTag, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - if len(ti.tagsByIfdR) == 0 { - err := LoadStandardTags(ti) - log.PanicIf(err) - } - - ifdPath := ii.UnindexedString() - - it, found := ti.tagsByIfdR[ifdPath][name] - if found != true { - log.Panic(ErrTagNotFound) - } - - return it, nil -} - -// LoadStandardTags registers the tags that all devices/applications should -// support. -func LoadStandardTags(ti *TagIndex) (err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - // Read static data. - - encodedIfds := make(map[string][]encodedTag) - - err = yaml.Unmarshal([]byte(tagsYaml), encodedIfds) - log.PanicIf(err) - - // Load structure. - - count := 0 - for ifdPath, tags := range encodedIfds { - for _, tagInfo := range tags { - tagId := uint16(tagInfo.Id) - tagName := tagInfo.Name - tagTypeName := tagInfo.TypeName - tagTypeNames := tagInfo.TypeNames - - if tagTypeNames == nil { - if tagTypeName == "" { - log.Panicf("no tag-types were given when registering standard tag [%s] (0x%04x) [%s]", ifdPath, tagId, tagName) - } - - tagTypeNames = []string{ - tagTypeName, - } - } else if tagTypeName != "" { - log.Panicf("both 'type_names' and 'type_name' were given when registering standard tag [%s] (0x%04x) [%s]", ifdPath, tagId, tagName) - } - - tagTypes := make([]exifcommon.TagTypePrimitive, 0) - for _, tagTypeName := range tagTypeNames { - - // TODO(dustin): Discard unsupported types. This helps us with non-standard types that have actually been found in real data, that we ignore for right now. e.g. SSHORT, FLOAT, DOUBLE - tagTypeId, found := exifcommon.GetTypeByName(tagTypeName) - if found == false { - tagsLogger.Warningf(nil, "Type [%s] for tag [%s] being loaded is not valid and is being ignored.", tagTypeName, tagName) - continue - } - - tagTypes = append(tagTypes, tagTypeId) - } - - if len(tagTypes) == 0 { - tagsLogger.Warningf(nil, "Tag [%s] (0x%04x) [%s] being loaded does not have any supported types and will not be registered.", ifdPath, tagId, tagName) - continue - } - - it := &IndexedTag{ - IfdPath: ifdPath, - Id: tagId, - Name: tagName, - SupportedTypes: tagTypes, - } - - err = ti.Add(it) - log.PanicIf(err) - - count++ - } - } - - tagsLogger.Debugf(nil, "(%d) tags loaded.", count) - - return nil -} diff --git a/vendor/github.com/dsoprea/go-exif/v3/tags_data.go b/vendor/github.com/dsoprea/go-exif/v3/tags_data.go deleted file mode 100644 index dcf0cc4f4..000000000 --- a/vendor/github.com/dsoprea/go-exif/v3/tags_data.go +++ /dev/null @@ -1,968 +0,0 @@ -package exif - -var ( - // From assets/tags.yaml . Needs to be here so it's embedded in the binary. - tagsYaml = ` -# Notes: -# -# This file was produced from http://www.exiv2.org/tags.html, using the included -# tool, though that document appears to have some duplicates when all IDs are -# supposed to be unique (EXIF information only has IDs, not IFDs; IFDs are -# determined by our pre-existing knowledge of those tags). -# -# The webpage that we've produced this file from appears to indicate that -# ImageWidth is represented by both 0x0100 and 0x0001 depending on whether the -# encoding is RGB or YCbCr. -IFD/Exif: -- id: 0x829a - name: ExposureTime - type_name: RATIONAL -- id: 0x829d - name: FNumber - type_name: RATIONAL -- id: 0x8822 - name: ExposureProgram - type_name: SHORT -- id: 0x8824 - name: SpectralSensitivity - type_name: ASCII -- id: 0x8827 - name: ISOSpeedRatings - type_name: SHORT -- id: 0x8828 - name: OECF - type_name: UNDEFINED -- id: 0x8830 - name: SensitivityType - type_name: SHORT -- id: 0x8831 - name: StandardOutputSensitivity - type_name: LONG -- id: 0x8832 - name: RecommendedExposureIndex - type_name: LONG -- id: 0x8833 - name: ISOSpeed - type_name: LONG -- id: 0x8834 - name: ISOSpeedLatitudeyyy - type_name: LONG -- id: 0x8835 - name: ISOSpeedLatitudezzz - type_name: LONG -- id: 0x9000 - name: ExifVersion - type_name: UNDEFINED -- id: 0x9003 - name: DateTimeOriginal - type_name: ASCII -- id: 0x9004 - name: DateTimeDigitized - type_name: ASCII -- id: 0x9010 - name: OffsetTime - type_name: ASCII -- id: 0x9011 - name: OffsetTimeOriginal - type_name: ASCII -- id: 0x9012 - name: OffsetTimeDigitized - type_name: ASCII -- id: 0x9101 - name: ComponentsConfiguration - type_name: UNDEFINED -- id: 0x9102 - name: CompressedBitsPerPixel - type_name: RATIONAL -- id: 0x9201 - name: ShutterSpeedValue - type_name: SRATIONAL -- id: 0x9202 - name: ApertureValue - type_name: RATIONAL -- id: 0x9203 - name: BrightnessValue - type_name: SRATIONAL -- id: 0x9204 - name: ExposureBiasValue - type_name: SRATIONAL -- id: 0x9205 - name: MaxApertureValue - type_name: RATIONAL -- id: 0x9206 - name: SubjectDistance - type_name: RATIONAL -- id: 0x9207 - name: MeteringMode - type_name: SHORT -- id: 0x9208 - name: LightSource - type_name: SHORT -- id: 0x9209 - name: Flash - type_name: SHORT -- id: 0x920a - name: FocalLength - type_name: RATIONAL -- id: 0x9214 - name: SubjectArea - type_name: SHORT -- id: 0x927c - name: MakerNote - type_name: UNDEFINED -- id: 0x9286 - name: UserComment - type_name: UNDEFINED -- id: 0x9290 - name: SubSecTime - type_name: ASCII -- id: 0x9291 - name: SubSecTimeOriginal - type_name: ASCII -- id: 0x9292 - name: SubSecTimeDigitized - type_name: ASCII -- id: 0xa000 - name: FlashpixVersion - type_name: UNDEFINED -- id: 0xa001 - name: ColorSpace - type_name: SHORT -- id: 0xa002 - name: PixelXDimension - type_names: [LONG, SHORT] -- id: 0xa003 - name: PixelYDimension - type_names: [LONG, SHORT] -- id: 0xa004 - name: RelatedSoundFile - type_name: ASCII -- id: 0xa005 - name: InteroperabilityTag - type_name: LONG -- id: 0xa20b - name: FlashEnergy - type_name: RATIONAL -- id: 0xa20c - name: SpatialFrequencyResponse - type_name: UNDEFINED -- id: 0xa20e - name: FocalPlaneXResolution - type_name: RATIONAL -- id: 0xa20f - name: FocalPlaneYResolution - type_name: RATIONAL -- id: 0xa210 - name: FocalPlaneResolutionUnit - type_name: SHORT -- id: 0xa214 - name: SubjectLocation - type_name: SHORT -- id: 0xa215 - name: ExposureIndex - type_name: RATIONAL -- id: 0xa217 - name: SensingMethod - type_name: SHORT -- id: 0xa300 - name: FileSource - type_name: UNDEFINED -- id: 0xa301 - name: SceneType - type_name: UNDEFINED -- id: 0xa302 - name: CFAPattern - type_name: UNDEFINED -- id: 0xa401 - name: CustomRendered - type_name: SHORT -- id: 0xa402 - name: ExposureMode - type_name: SHORT -- id: 0xa403 - name: WhiteBalance - type_name: SHORT -- id: 0xa404 - name: DigitalZoomRatio - type_name: RATIONAL -- id: 0xa405 - name: FocalLengthIn35mmFilm - type_name: SHORT -- id: 0xa406 - name: SceneCaptureType - type_name: SHORT -- id: 0xa407 - name: GainControl - type_name: SHORT -- id: 0xa408 - name: Contrast - type_name: SHORT -- id: 0xa409 - name: Saturation - type_name: SHORT -- id: 0xa40a - name: Sharpness - type_name: SHORT -- id: 0xa40b - name: DeviceSettingDescription - type_name: UNDEFINED -- id: 0xa40c - name: SubjectDistanceRange - type_name: SHORT -- id: 0xa420 - name: ImageUniqueID - type_name: ASCII -- id: 0xa430 - name: CameraOwnerName - type_name: ASCII -- id: 0xa431 - name: BodySerialNumber - type_name: ASCII -- id: 0xa432 - name: LensSpecification - type_name: RATIONAL -- id: 0xa433 - name: LensMake - type_name: ASCII -- id: 0xa434 - name: LensModel - type_name: ASCII -- id: 0xa435 - name: LensSerialNumber - type_name: ASCII -IFD/GPSInfo: -- id: 0x0000 - name: GPSVersionID - type_name: BYTE -- id: 0x0001 - name: GPSLatitudeRef - type_name: ASCII -- id: 0x0002 - name: GPSLatitude - type_name: RATIONAL -- id: 0x0003 - name: GPSLongitudeRef - type_name: ASCII -- id: 0x0004 - name: GPSLongitude - type_name: RATIONAL -- id: 0x0005 - name: GPSAltitudeRef - type_name: BYTE -- id: 0x0006 - name: GPSAltitude - type_name: RATIONAL -- id: 0x0007 - name: GPSTimeStamp - type_name: RATIONAL -- id: 0x0008 - name: GPSSatellites - type_name: ASCII -- id: 0x0009 - name: GPSStatus - type_name: ASCII -- id: 0x000a - name: GPSMeasureMode - type_name: ASCII -- id: 0x000b - name: GPSDOP - type_name: RATIONAL -- id: 0x000c - name: GPSSpeedRef - type_name: ASCII -- id: 0x000d - name: GPSSpeed - type_name: RATIONAL -- id: 0x000e - name: GPSTrackRef - type_name: ASCII -- id: 0x000f - name: GPSTrack - type_name: RATIONAL -- id: 0x0010 - name: GPSImgDirectionRef - type_name: ASCII -- id: 0x0011 - name: GPSImgDirection - type_name: RATIONAL -- id: 0x0012 - name: GPSMapDatum - type_name: ASCII -- id: 0x0013 - name: GPSDestLatitudeRef - type_name: ASCII -- id: 0x0014 - name: GPSDestLatitude - type_name: RATIONAL -- id: 0x0015 - name: GPSDestLongitudeRef - type_name: ASCII -- id: 0x0016 - name: GPSDestLongitude - type_name: RATIONAL -- id: 0x0017 - name: GPSDestBearingRef - type_name: ASCII -- id: 0x0018 - name: GPSDestBearing - type_name: RATIONAL -- id: 0x0019 - name: GPSDestDistanceRef - type_name: ASCII -- id: 0x001a - name: GPSDestDistance - type_name: RATIONAL -- id: 0x001b - name: GPSProcessingMethod - type_name: UNDEFINED -- id: 0x001c - name: GPSAreaInformation - type_name: UNDEFINED -- id: 0x001d - name: GPSDateStamp - type_name: ASCII -- id: 0x001e - name: GPSDifferential - type_name: SHORT -IFD: -- id: 0x000b - name: ProcessingSoftware - type_name: ASCII -- id: 0x00fe - name: NewSubfileType - type_name: LONG -- id: 0x00ff - name: SubfileType - type_name: SHORT -- id: 0x0100 - name: ImageWidth - type_names: [LONG, SHORT] -- id: 0x0101 - name: ImageLength - type_names: [LONG, SHORT] -- id: 0x0102 - name: BitsPerSample - type_name: SHORT -- id: 0x0103 - name: Compression - type_name: SHORT -- id: 0x0106 - name: PhotometricInterpretation - type_name: SHORT -- id: 0x0107 - name: Thresholding - type_name: SHORT -- id: 0x0108 - name: CellWidth - type_name: SHORT -- id: 0x0109 - name: CellLength - type_name: SHORT -- id: 0x010a - name: FillOrder - type_name: SHORT -- id: 0x010d - name: DocumentName - type_name: ASCII -- id: 0x010e - name: ImageDescription - type_name: ASCII -- id: 0x010f - name: Make - type_name: ASCII -- id: 0x0110 - name: Model - type_name: ASCII -- id: 0x0111 - name: StripOffsets - type_names: [LONG, SHORT] -- id: 0x0112 - name: Orientation - type_name: SHORT -- id: 0x0115 - name: SamplesPerPixel - type_name: SHORT -- id: 0x0116 - name: RowsPerStrip - type_names: [LONG, SHORT] -- id: 0x0117 - name: StripByteCounts - type_names: [LONG, SHORT] -- id: 0x011a - name: XResolution - type_name: RATIONAL -- id: 0x011b - name: YResolution - type_name: RATIONAL -- id: 0x011c - name: PlanarConfiguration - type_name: SHORT -- id: 0x0122 - name: GrayResponseUnit - type_name: SHORT -- id: 0x0123 - name: GrayResponseCurve - type_name: SHORT -- id: 0x0124 - name: T4Options - type_name: LONG -- id: 0x0125 - name: T6Options - type_name: LONG -- id: 0x0128 - name: ResolutionUnit - type_name: SHORT -- id: 0x0129 - name: PageNumber - type_name: SHORT -- id: 0x012d - name: TransferFunction - type_name: SHORT -- id: 0x0131 - name: Software - type_name: ASCII -- id: 0x0132 - name: DateTime - type_name: ASCII -- id: 0x013b - name: Artist - type_name: ASCII -- id: 0x013c - name: HostComputer - type_name: ASCII -- id: 0x013d - name: Predictor - type_name: SHORT -- id: 0x013e - name: WhitePoint - type_name: RATIONAL -- id: 0x013f - name: PrimaryChromaticities - type_name: RATIONAL -- id: 0x0140 - name: ColorMap - type_name: SHORT -- id: 0x0141 - name: HalftoneHints - type_name: SHORT -- id: 0x0142 - name: TileWidth - type_name: SHORT -- id: 0x0143 - name: TileLength - type_name: SHORT -- id: 0x0144 - name: TileOffsets - type_name: SHORT -- id: 0x0145 - name: TileByteCounts - type_name: SHORT -- id: 0x014a - name: SubIFDs - type_name: LONG -- id: 0x014c - name: InkSet - type_name: SHORT -- id: 0x014d - name: InkNames - type_name: ASCII -- id: 0x014e - name: NumberOfInks - type_name: SHORT -- id: 0x0150 - name: DotRange - type_name: BYTE -- id: 0x0151 - name: TargetPrinter - type_name: ASCII -- id: 0x0152 - name: ExtraSamples - type_name: SHORT -- id: 0x0153 - name: SampleFormat - type_name: SHORT -- id: 0x0154 - name: SMinSampleValue - type_name: SHORT -- id: 0x0155 - name: SMaxSampleValue - type_name: SHORT -- id: 0x0156 - name: TransferRange - type_name: SHORT -- id: 0x0157 - name: ClipPath - type_name: BYTE -- id: 0x015a - name: Indexed - type_name: SHORT -- id: 0x015b - name: JPEGTables - type_name: UNDEFINED -- id: 0x015f - name: OPIProxy - type_name: SHORT -- id: 0x0200 - name: JPEGProc - type_name: LONG -- id: 0x0201 - name: JPEGInterchangeFormat - type_name: LONG -- id: 0x0202 - name: JPEGInterchangeFormatLength - type_name: LONG -- id: 0x0203 - name: JPEGRestartInterval - type_name: SHORT -- id: 0x0205 - name: JPEGLosslessPredictors - type_name: SHORT -- id: 0x0206 - name: JPEGPointTransforms - type_name: SHORT -- id: 0x0207 - name: JPEGQTables - type_name: LONG -- id: 0x0208 - name: JPEGDCTables - type_name: LONG -- id: 0x0209 - name: JPEGACTables - type_name: LONG -- id: 0x0211 - name: YCbCrCoefficients - type_name: RATIONAL -- id: 0x0212 - name: YCbCrSubSampling - type_name: SHORT -- id: 0x0213 - name: YCbCrPositioning - type_name: SHORT -- id: 0x0214 - name: ReferenceBlackWhite - type_name: RATIONAL -- id: 0x02bc - name: XMLPacket - type_name: BYTE -- id: 0x4746 - name: Rating - type_name: SHORT -- id: 0x4749 - name: RatingPercent - type_name: SHORT -- id: 0x800d - name: ImageID - type_name: ASCII -- id: 0x828d - name: CFARepeatPatternDim - type_name: SHORT -- id: 0x828e - name: CFAPattern - type_name: BYTE -- id: 0x828f - name: BatteryLevel - type_name: RATIONAL -- id: 0x8298 - name: Copyright - type_name: ASCII -- id: 0x829a - name: ExposureTime -# NOTE(dustin): SRATIONAL isn't mentioned in the standard, but we have seen it in real data. - type_names: [RATIONAL, SRATIONAL] -- id: 0x829d - name: FNumber -# NOTE(dustin): SRATIONAL isn't mentioned in the standard, but we have seen it in real data. - type_names: [RATIONAL, SRATIONAL] -- id: 0x83bb - name: IPTCNAA - type_name: LONG -- id: 0x8649 - name: ImageResources - type_name: BYTE -- id: 0x8769 - name: ExifTag - type_name: LONG -- id: 0x8773 - name: InterColorProfile - type_name: UNDEFINED -- id: 0x8822 - name: ExposureProgram - type_name: SHORT -- id: 0x8824 - name: SpectralSensitivity - type_name: ASCII -- id: 0x8825 - name: GPSTag - type_name: LONG -- id: 0x8827 - name: ISOSpeedRatings - type_name: SHORT -- id: 0x8828 - name: OECF - type_name: UNDEFINED -- id: 0x8829 - name: Interlace - type_name: SHORT -- id: 0x882b - name: SelfTimerMode - type_name: SHORT -- id: 0x9003 - name: DateTimeOriginal - type_name: ASCII -- id: 0x9102 - name: CompressedBitsPerPixel - type_name: RATIONAL -- id: 0x9201 - name: ShutterSpeedValue - type_name: SRATIONAL -- id: 0x9202 - name: ApertureValue - type_name: RATIONAL -- id: 0x9203 - name: BrightnessValue - type_name: SRATIONAL -- id: 0x9204 - name: ExposureBiasValue - type_name: SRATIONAL -- id: 0x9205 - name: MaxApertureValue - type_name: RATIONAL -- id: 0x9206 - name: SubjectDistance - type_name: SRATIONAL -- id: 0x9207 - name: MeteringMode - type_name: SHORT -- id: 0x9208 - name: LightSource - type_name: SHORT -- id: 0x9209 - name: Flash - type_name: SHORT -- id: 0x920a - name: FocalLength - type_name: RATIONAL -- id: 0x920b - name: FlashEnergy - type_name: RATIONAL -- id: 0x920c - name: SpatialFrequencyResponse - type_name: UNDEFINED -- id: 0x920d - name: Noise - type_name: UNDEFINED -- id: 0x920e - name: FocalPlaneXResolution - type_name: RATIONAL -- id: 0x920f - name: FocalPlaneYResolution - type_name: RATIONAL -- id: 0x9210 - name: FocalPlaneResolutionUnit - type_name: SHORT -- id: 0x9211 - name: ImageNumber - type_name: LONG -- id: 0x9212 - name: SecurityClassification - type_name: ASCII -- id: 0x9213 - name: ImageHistory - type_name: ASCII -- id: 0x9214 - name: SubjectLocation - type_name: SHORT -- id: 0x9215 - name: ExposureIndex - type_name: RATIONAL -- id: 0x9216 - name: TIFFEPStandardID - type_name: BYTE -- id: 0x9217 - name: SensingMethod - type_name: SHORT -- id: 0x9c9b - name: XPTitle - type_name: BYTE -- id: 0x9c9c - name: XPComment - type_name: BYTE -- id: 0x9c9d - name: XPAuthor - type_name: BYTE -- id: 0x9c9e - name: XPKeywords - type_name: BYTE -- id: 0x9c9f - name: XPSubject - type_name: BYTE -- id: 0xc4a5 - name: PrintImageMatching - type_name: UNDEFINED -- id: 0xc612 - name: DNGVersion - type_name: BYTE -- id: 0xc613 - name: DNGBackwardVersion - type_name: BYTE -- id: 0xc614 - name: UniqueCameraModel - type_name: ASCII -- id: 0xc615 - name: LocalizedCameraModel - type_name: BYTE -- id: 0xc616 - name: CFAPlaneColor - type_name: BYTE -- id: 0xc617 - name: CFALayout - type_name: SHORT -- id: 0xc618 - name: LinearizationTable - type_name: SHORT -- id: 0xc619 - name: BlackLevelRepeatDim - type_name: SHORT -- id: 0xc61a - name: BlackLevel - type_name: RATIONAL -- id: 0xc61b - name: BlackLevelDeltaH - type_name: SRATIONAL -- id: 0xc61c - name: BlackLevelDeltaV - type_name: SRATIONAL -- id: 0xc61d - name: WhiteLevel - type_name: SHORT -- id: 0xc61e - name: DefaultScale - type_name: RATIONAL -- id: 0xc61f - name: DefaultCropOrigin - type_name: SHORT -- id: 0xc620 - name: DefaultCropSize - type_name: SHORT -- id: 0xc621 - name: ColorMatrix1 - type_name: SRATIONAL -- id: 0xc622 - name: ColorMatrix2 - type_name: SRATIONAL -- id: 0xc623 - name: CameraCalibration1 - type_name: SRATIONAL -- id: 0xc624 - name: CameraCalibration2 - type_name: SRATIONAL -- id: 0xc625 - name: ReductionMatrix1 - type_name: SRATIONAL -- id: 0xc626 - name: ReductionMatrix2 - type_name: SRATIONAL -- id: 0xc627 - name: AnalogBalance - type_name: RATIONAL -- id: 0xc628 - name: AsShotNeutral - type_name: SHORT -- id: 0xc629 - name: AsShotWhiteXY - type_name: RATIONAL -- id: 0xc62a - name: BaselineExposure - type_name: SRATIONAL -- id: 0xc62b - name: BaselineNoise - type_name: RATIONAL -- id: 0xc62c - name: BaselineSharpness - type_name: RATIONAL -- id: 0xc62d - name: BayerGreenSplit - type_name: LONG -- id: 0xc62e - name: LinearResponseLimit - type_name: RATIONAL -- id: 0xc62f - name: CameraSerialNumber - type_name: ASCII -- id: 0xc630 - name: LensInfo - type_name: RATIONAL -- id: 0xc631 - name: ChromaBlurRadius - type_name: RATIONAL -- id: 0xc632 - name: AntiAliasStrength - type_name: RATIONAL -- id: 0xc633 - name: ShadowScale - type_name: SRATIONAL -- id: 0xc634 - name: DNGPrivateData - type_name: BYTE -- id: 0xc635 - name: MakerNoteSafety - type_name: SHORT -- id: 0xc65a - name: CalibrationIlluminant1 - type_name: SHORT -- id: 0xc65b - name: CalibrationIlluminant2 - type_name: SHORT -- id: 0xc65c - name: BestQualityScale - type_name: RATIONAL -- id: 0xc65d - name: RawDataUniqueID - type_name: BYTE -- id: 0xc68b - name: OriginalRawFileName - type_name: BYTE -- id: 0xc68c - name: OriginalRawFileData - type_name: UNDEFINED -- id: 0xc68d - name: ActiveArea - type_name: SHORT -- id: 0xc68e - name: MaskedAreas - type_name: SHORT -- id: 0xc68f - name: AsShotICCProfile - type_name: UNDEFINED -- id: 0xc690 - name: AsShotPreProfileMatrix - type_name: SRATIONAL -- id: 0xc691 - name: CurrentICCProfile - type_name: UNDEFINED -- id: 0xc692 - name: CurrentPreProfileMatrix - type_name: SRATIONAL -- id: 0xc6bf - name: ColorimetricReference - type_name: SHORT -- id: 0xc6f3 - name: CameraCalibrationSignature - type_name: BYTE -- id: 0xc6f4 - name: ProfileCalibrationSignature - type_name: BYTE -- id: 0xc6f6 - name: AsShotProfileName - type_name: BYTE -- id: 0xc6f7 - name: NoiseReductionApplied - type_name: RATIONAL -- id: 0xc6f8 - name: ProfileName - type_name: BYTE -- id: 0xc6f9 - name: ProfileHueSatMapDims - type_name: LONG -- id: 0xc6fd - name: ProfileEmbedPolicy - type_name: LONG -- id: 0xc6fe - name: ProfileCopyright - type_name: BYTE -- id: 0xc714 - name: ForwardMatrix1 - type_name: SRATIONAL -- id: 0xc715 - name: ForwardMatrix2 - type_name: SRATIONAL -- id: 0xc716 - name: PreviewApplicationName - type_name: BYTE -- id: 0xc717 - name: PreviewApplicationVersion - type_name: BYTE -- id: 0xc718 - name: PreviewSettingsName - type_name: BYTE -- id: 0xc719 - name: PreviewSettingsDigest - type_name: BYTE -- id: 0xc71a - name: PreviewColorSpace - type_name: LONG -- id: 0xc71b - name: PreviewDateTime - type_name: ASCII -- id: 0xc71c - name: RawImageDigest - type_name: UNDEFINED -- id: 0xc71d - name: OriginalRawFileDigest - type_name: UNDEFINED -- id: 0xc71e - name: SubTileBlockSize - type_name: LONG -- id: 0xc71f - name: RowInterleaveFactor - type_name: LONG -- id: 0xc725 - name: ProfileLookTableDims - type_name: LONG -- id: 0xc740 - name: OpcodeList1 - type_name: UNDEFINED -- id: 0xc741 - name: OpcodeList2 - type_name: UNDEFINED -- id: 0xc74e - name: OpcodeList3 - type_name: UNDEFINED -# This tag may be used to specify the size of raster pixel spacing in the -# model space units, when the raster space can be embedded in the model space -# coordinate system without rotation, and consists of the following 3 values: -# ModelPixelScaleTag = (ScaleX, ScaleY, ScaleZ) -# where ScaleX and ScaleY give the horizontal and vertical spacing of raster -# pixels. The ScaleZ is primarily used to map the pixel value of a digital -# elevation model into the correct Z-scale, and so for most other purposes -# this value should be zero (since most model spaces are 2-D, with Z=0). -# Source: http://geotiff.maptools.org/spec/geotiff2.6.html#2.6.1 -- id: 0x830e - name: ModelPixelScaleTag - type_name: DOUBLE -# This tag stores raster->model tiepoint pairs in the order -# ModelTiepointTag = (...,I,J,K, X,Y,Z...), -# where (I,J,K) is the point at location (I,J) in raster space with -# pixel-value K, and (X,Y,Z) is a vector in model space. In most cases the -# model space is only two-dimensional, in which case both K and Z should be -# set to zero; this third dimension is provided in anticipation of future -# support for 3D digital elevation models and vertical coordinate systems. -# Source: http://geotiff.maptools.org/spec/geotiff2.6.html#2.6.1 -- id: 0x8482 - name: ModelTiepointTag - type_name: DOUBLE -# This tag may be used to specify the transformation matrix between the -# raster space (and its dependent pixel-value space) and the (possibly 3D) -# model space. -# Source: http://geotiff.maptools.org/spec/geotiff2.6.html#2.6.1 -- id: 0x85d8 - name: ModelTransformationTag - type_name: DOUBLE -IFD/Exif/Iop: -- id: 0x0001 - name: InteroperabilityIndex - type_name: ASCII -- id: 0x0002 - name: InteroperabilityVersion - type_name: UNDEFINED -- id: 0x1000 - name: RelatedImageFileFormat - type_name: ASCII -- id: 0x1001 - name: RelatedImageWidth - type_name: LONG -- id: 0x1002 - name: RelatedImageLength - type_name: LONG -` -) diff --git a/vendor/github.com/dsoprea/go-exif/v3/testing_common.go b/vendor/github.com/dsoprea/go-exif/v3/testing_common.go deleted file mode 100644 index 061276430..000000000 --- a/vendor/github.com/dsoprea/go-exif/v3/testing_common.go +++ /dev/null @@ -1,188 +0,0 @@ -package exif - -import ( - "path" - "reflect" - "testing" - - "io/ioutil" - - "github.com/dsoprea/go-logging" - - "github.com/dsoprea/go-exif/v3/common" -) - -var ( - testExifData []byte -) - -func getExifSimpleTestIb() *IfdBuilder { - defer func() { - if state := recover(); state != nil { - err := log.Wrap(state.(error)) - log.Panic(err) - } - }() - - im := exifcommon.NewIfdMapping() - - err := exifcommon.LoadStandardIfds(im) - log.PanicIf(err) - - ti := NewTagIndex() - ib := NewIfdBuilder(im, ti, exifcommon.IfdStandardIfdIdentity, exifcommon.TestDefaultByteOrder) - - err = ib.AddStandard(0x000b, "asciivalue") - log.PanicIf(err) - - err = ib.AddStandard(0x00ff, []uint16{0x1122}) - log.PanicIf(err) - - err = ib.AddStandard(0x0100, []uint32{0x33445566}) - log.PanicIf(err) - - err = ib.AddStandard(0x013e, []exifcommon.Rational{{Numerator: 0x11112222, Denominator: 0x33334444}}) - log.PanicIf(err) - - return ib -} - -func getExifSimpleTestIbBytes() []byte { - defer func() { - if state := recover(); state != nil { - err := log.Wrap(state.(error)) - log.Panic(err) - } - }() - - im := exifcommon.NewIfdMapping() - - err := exifcommon.LoadStandardIfds(im) - log.PanicIf(err) - - ti := NewTagIndex() - ib := NewIfdBuilder(im, ti, exifcommon.IfdStandardIfdIdentity, exifcommon.TestDefaultByteOrder) - - err = ib.AddStandard(0x000b, "asciivalue") - log.PanicIf(err) - - err = ib.AddStandard(0x00ff, []uint16{0x1122}) - log.PanicIf(err) - - err = ib.AddStandard(0x0100, []uint32{0x33445566}) - log.PanicIf(err) - - err = ib.AddStandard(0x013e, []exifcommon.Rational{{Numerator: 0x11112222, Denominator: 0x33334444}}) - log.PanicIf(err) - - ibe := NewIfdByteEncoder() - - exifData, err := ibe.EncodeToExif(ib) - log.PanicIf(err) - - return exifData -} - -func validateExifSimpleTestIb(exifData []byte, t *testing.T) { - defer func() { - if state := recover(); state != nil { - err := log.Wrap(state.(error)) - log.Panic(err) - } - }() - - im := exifcommon.NewIfdMapping() - - err := exifcommon.LoadStandardIfds(im) - log.PanicIf(err) - - ti := NewTagIndex() - - eh, index, err := Collect(im, ti, exifData) - log.PanicIf(err) - - if eh.ByteOrder != exifcommon.TestDefaultByteOrder { - t.Fatalf("EXIF byte-order is not correct: %v", eh.ByteOrder) - } else if eh.FirstIfdOffset != ExifDefaultFirstIfdOffset { - t.Fatalf("EXIF first IFD-offset not correct: (0x%02x)", eh.FirstIfdOffset) - } - - if len(index.Ifds) != 1 { - t.Fatalf("There wasn't exactly one IFD decoded: (%d)", len(index.Ifds)) - } - - ifd := index.RootIfd - - if ifd.ByteOrder() != exifcommon.TestDefaultByteOrder { - t.Fatalf("IFD byte-order not correct.") - } else if ifd.ifdIdentity.UnindexedString() != exifcommon.IfdStandardIfdIdentity.UnindexedString() { - t.Fatalf("IFD name not correct.") - } else if ifd.ifdIdentity.Index() != 0 { - t.Fatalf("IFD index not zero: (%d)", ifd.ifdIdentity.Index()) - } else if ifd.Offset() != uint32(0x0008) { - t.Fatalf("IFD offset not correct.") - } else if len(ifd.Entries()) != 4 { - t.Fatalf("IFD number of entries not correct: (%d)", len(ifd.Entries())) - } else if ifd.nextIfdOffset != uint32(0) { - t.Fatalf("Next-IFD offset is non-zero.") - } else if ifd.nextIfd != nil { - t.Fatalf("Next-IFD pointer is non-nil.") - } - - // Verify the values by using the actual, original types (this is awesome). - - expected := []struct { - tagId uint16 - value interface{} - }{ - {tagId: 0x000b, value: "asciivalue"}, - {tagId: 0x00ff, value: []uint16{0x1122}}, - {tagId: 0x0100, value: []uint32{0x33445566}}, - {tagId: 0x013e, value: []exifcommon.Rational{{Numerator: 0x11112222, Denominator: 0x33334444}}}, - } - - for i, ite := range ifd.Entries() { - if ite.TagId() != expected[i].tagId { - t.Fatalf("Tag-ID for entry (%d) not correct: (0x%02x) != (0x%02x)", i, ite.TagId(), expected[i].tagId) - } - - value, err := ite.Value() - log.PanicIf(err) - - if reflect.DeepEqual(value, expected[i].value) != true { - t.Fatalf("Value for entry (%d) not correct: [%v] != [%v]", i, value, expected[i].value) - } - } -} - -func getTestImageFilepath() string { - assetsPath := exifcommon.GetTestAssetsPath() - testImageFilepath := path.Join(assetsPath, "NDM_8901.jpg") - return testImageFilepath -} - -func getTestExifData() []byte { - if testExifData == nil { - assetsPath := exifcommon.GetTestAssetsPath() - filepath := path.Join(assetsPath, "NDM_8901.jpg.exif") - - var err error - - testExifData, err = ioutil.ReadFile(filepath) - log.PanicIf(err) - } - - return testExifData -} - -func getTestGpsImageFilepath() string { - assetsPath := exifcommon.GetTestAssetsPath() - testGpsImageFilepath := path.Join(assetsPath, "gps.jpg") - return testGpsImageFilepath -} - -func getTestGeotiffFilepath() string { - assetsPath := exifcommon.GetTestAssetsPath() - testGeotiffFilepath := path.Join(assetsPath, "geotiff_example.tif") - return testGeotiffFilepath -} diff --git a/vendor/github.com/dsoprea/go-exif/v3/undefined/README.md b/vendor/github.com/dsoprea/go-exif/v3/undefined/README.md deleted file mode 100644 index d2caa6e51..000000000 --- a/vendor/github.com/dsoprea/go-exif/v3/undefined/README.md +++ /dev/null @@ -1,4 +0,0 @@ - -## 0xa40b - -The specification is not specific/clear enough to be handled. Without a working example ,we're deferring until some point in the future when either we or someone else has a better understanding. diff --git a/vendor/github.com/dsoprea/go-exif/v3/undefined/accessor.go b/vendor/github.com/dsoprea/go-exif/v3/undefined/accessor.go deleted file mode 100644 index 11a21e1f0..000000000 --- a/vendor/github.com/dsoprea/go-exif/v3/undefined/accessor.go +++ /dev/null @@ -1,62 +0,0 @@ -package exifundefined - -import ( - "encoding/binary" - - "github.com/dsoprea/go-logging" - - "github.com/dsoprea/go-exif/v3/common" -) - -// Encode encodes the given encodeable undefined value to bytes. -func Encode(value EncodeableValue, byteOrder binary.ByteOrder) (encoded []byte, unitCount uint32, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - encoderName := value.EncoderName() - - encoder, found := encoders[encoderName] - if found == false { - log.Panicf("no encoder registered for type [%s]", encoderName) - } - - encoded, unitCount, err = encoder.Encode(value, byteOrder) - log.PanicIf(err) - - return encoded, unitCount, nil -} - -// Decode constructs a value from raw encoded bytes -func Decode(valueContext *exifcommon.ValueContext) (value EncodeableValue, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - uth := UndefinedTagHandle{ - IfdPath: valueContext.IfdPath(), - TagId: valueContext.TagId(), - } - - decoder, found := decoders[uth] - if found == false { - // We have no choice but to return the error. We have no way of knowing how - // much data there is without already knowing what data-type this tag is. - return nil, exifcommon.ErrUnhandledUndefinedTypedTag - } - - value, err = decoder.Decode(valueContext) - if err != nil { - if err == ErrUnparseableValue { - return nil, err - } - - log.Panic(err) - } - - return value, nil -} diff --git a/vendor/github.com/dsoprea/go-exif/v3/undefined/exif_8828_oecf.go b/vendor/github.com/dsoprea/go-exif/v3/undefined/exif_8828_oecf.go deleted file mode 100644 index 26f3675ab..000000000 --- a/vendor/github.com/dsoprea/go-exif/v3/undefined/exif_8828_oecf.go +++ /dev/null @@ -1,148 +0,0 @@ -package exifundefined - -import ( - "bytes" - "fmt" - - "encoding/binary" - - "github.com/dsoprea/go-logging" - - "github.com/dsoprea/go-exif/v3/common" -) - -type Tag8828Oecf struct { - Columns uint16 - Rows uint16 - ColumnNames []string - Values []exifcommon.SignedRational -} - -func (oecf Tag8828Oecf) String() string { - return fmt.Sprintf("Tag8828Oecf", oecf.Columns, oecf.Rows) -} - -func (oecf Tag8828Oecf) EncoderName() string { - return "Codec8828Oecf" -} - -type Codec8828Oecf struct { -} - -func (Codec8828Oecf) Encode(value interface{}, byteOrder binary.ByteOrder) (encoded []byte, unitCount uint32, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - // TODO(dustin): Add test - - oecf, ok := value.(Tag8828Oecf) - if ok == false { - log.Panicf("can only encode a Tag8828Oecf") - } - - b := new(bytes.Buffer) - - err = binary.Write(b, byteOrder, oecf.Columns) - log.PanicIf(err) - - err = binary.Write(b, byteOrder, oecf.Rows) - log.PanicIf(err) - - for _, name := range oecf.ColumnNames { - _, err := b.Write([]byte(name)) - log.PanicIf(err) - - _, err = b.Write([]byte{0}) - log.PanicIf(err) - } - - ve := exifcommon.NewValueEncoder(byteOrder) - - ed, err := ve.Encode(oecf.Values) - log.PanicIf(err) - - _, err = b.Write(ed.Encoded) - log.PanicIf(err) - - return b.Bytes(), uint32(b.Len()), nil -} - -func (Codec8828Oecf) Decode(valueContext *exifcommon.ValueContext) (value EncodeableValue, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - // TODO(dustin): Add test using known good data. - - valueContext.SetUndefinedValueType(exifcommon.TypeByte) - - valueBytes, err := valueContext.ReadBytes() - log.PanicIf(err) - - oecf := Tag8828Oecf{} - - oecf.Columns = valueContext.ByteOrder().Uint16(valueBytes[0:2]) - oecf.Rows = valueContext.ByteOrder().Uint16(valueBytes[2:4]) - - columnNames := make([]string, oecf.Columns) - - // startAt is where the current column name starts. - startAt := 4 - - // offset is our current position. - offset := startAt - - currentColumnNumber := uint16(0) - - for currentColumnNumber < oecf.Columns { - if valueBytes[offset] == 0 { - columnName := string(valueBytes[startAt:offset]) - if len(columnName) == 0 { - log.Panicf("SFR column (%d) has zero length", currentColumnNumber) - } - - columnNames[currentColumnNumber] = columnName - currentColumnNumber++ - - offset++ - startAt = offset - continue - } - - offset++ - } - - oecf.ColumnNames = columnNames - - rawRationalBytes := valueBytes[offset:] - - rationalSize := exifcommon.TypeSignedRational.Size() - if len(rawRationalBytes)%rationalSize > 0 { - log.Panicf("OECF signed-rationals not aligned: (%d) %% (%d) > 0", len(rawRationalBytes), rationalSize) - } - - rationalCount := len(rawRationalBytes) / rationalSize - - parser := new(exifcommon.Parser) - - byteOrder := valueContext.ByteOrder() - - items, err := parser.ParseSignedRationals(rawRationalBytes, uint32(rationalCount), byteOrder) - log.PanicIf(err) - - oecf.Values = items - - return oecf, nil -} - -func init() { - registerDecoder( - exifcommon.IfdExifStandardIfdIdentity.UnindexedString(), - 0x8828, - Codec8828Oecf{}) -} diff --git a/vendor/github.com/dsoprea/go-exif/v3/undefined/exif_9000_exif_version.go b/vendor/github.com/dsoprea/go-exif/v3/undefined/exif_9000_exif_version.go deleted file mode 100644 index 8f18c8114..000000000 --- a/vendor/github.com/dsoprea/go-exif/v3/undefined/exif_9000_exif_version.go +++ /dev/null @@ -1,69 +0,0 @@ -package exifundefined - -import ( - "encoding/binary" - - "github.com/dsoprea/go-logging" - - "github.com/dsoprea/go-exif/v3/common" -) - -type Tag9000ExifVersion struct { - ExifVersion string -} - -func (Tag9000ExifVersion) EncoderName() string { - return "Codec9000ExifVersion" -} - -func (ev Tag9000ExifVersion) String() string { - return ev.ExifVersion -} - -type Codec9000ExifVersion struct { -} - -func (Codec9000ExifVersion) Encode(value interface{}, byteOrder binary.ByteOrder) (encoded []byte, unitCount uint32, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - s, ok := value.(Tag9000ExifVersion) - if ok == false { - log.Panicf("can only encode a Tag9000ExifVersion") - } - - return []byte(s.ExifVersion), uint32(len(s.ExifVersion)), nil -} - -func (Codec9000ExifVersion) Decode(valueContext *exifcommon.ValueContext) (value EncodeableValue, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - valueContext.SetUndefinedValueType(exifcommon.TypeAsciiNoNul) - - valueString, err := valueContext.ReadAsciiNoNul() - log.PanicIf(err) - - ev := Tag9000ExifVersion{ - ExifVersion: valueString, - } - - return ev, nil -} - -func init() { - registerEncoder( - Tag9000ExifVersion{}, - Codec9000ExifVersion{}) - - registerDecoder( - exifcommon.IfdExifStandardIfdIdentity.UnindexedString(), - 0x9000, - Codec9000ExifVersion{}) -} diff --git a/vendor/github.com/dsoprea/go-exif/v3/undefined/exif_9101_components_configuration.go b/vendor/github.com/dsoprea/go-exif/v3/undefined/exif_9101_components_configuration.go deleted file mode 100644 index e357fe0a6..000000000 --- a/vendor/github.com/dsoprea/go-exif/v3/undefined/exif_9101_components_configuration.go +++ /dev/null @@ -1,124 +0,0 @@ -package exifundefined - -import ( - "bytes" - "fmt" - - "encoding/binary" - - "github.com/dsoprea/go-logging" - - "github.com/dsoprea/go-exif/v3/common" -) - -const ( - TagUndefinedType_9101_ComponentsConfiguration_Channel_Y = 0x1 - TagUndefinedType_9101_ComponentsConfiguration_Channel_Cb = 0x2 - TagUndefinedType_9101_ComponentsConfiguration_Channel_Cr = 0x3 - TagUndefinedType_9101_ComponentsConfiguration_Channel_R = 0x4 - TagUndefinedType_9101_ComponentsConfiguration_Channel_G = 0x5 - TagUndefinedType_9101_ComponentsConfiguration_Channel_B = 0x6 -) - -const ( - TagUndefinedType_9101_ComponentsConfiguration_OTHER = iota - TagUndefinedType_9101_ComponentsConfiguration_RGB = iota - TagUndefinedType_9101_ComponentsConfiguration_YCBCR = iota -) - -var ( - TagUndefinedType_9101_ComponentsConfiguration_Names = map[int]string{ - TagUndefinedType_9101_ComponentsConfiguration_OTHER: "OTHER", - TagUndefinedType_9101_ComponentsConfiguration_RGB: "RGB", - TagUndefinedType_9101_ComponentsConfiguration_YCBCR: "YCBCR", - } - - TagUndefinedType_9101_ComponentsConfiguration_Configurations = map[int][]byte{ - TagUndefinedType_9101_ComponentsConfiguration_RGB: { - TagUndefinedType_9101_ComponentsConfiguration_Channel_R, - TagUndefinedType_9101_ComponentsConfiguration_Channel_G, - TagUndefinedType_9101_ComponentsConfiguration_Channel_B, - 0, - }, - - TagUndefinedType_9101_ComponentsConfiguration_YCBCR: { - TagUndefinedType_9101_ComponentsConfiguration_Channel_Y, - TagUndefinedType_9101_ComponentsConfiguration_Channel_Cb, - TagUndefinedType_9101_ComponentsConfiguration_Channel_Cr, - 0, - }, - } -) - -type TagExif9101ComponentsConfiguration struct { - ConfigurationId int - ConfigurationBytes []byte -} - -func (TagExif9101ComponentsConfiguration) EncoderName() string { - return "CodecExif9101ComponentsConfiguration" -} - -func (cc TagExif9101ComponentsConfiguration) String() string { - return fmt.Sprintf("Exif9101ComponentsConfiguration", TagUndefinedType_9101_ComponentsConfiguration_Names[cc.ConfigurationId], cc.ConfigurationBytes) -} - -type CodecExif9101ComponentsConfiguration struct { -} - -func (CodecExif9101ComponentsConfiguration) Encode(value interface{}, byteOrder binary.ByteOrder) (encoded []byte, unitCount uint32, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - cc, ok := value.(TagExif9101ComponentsConfiguration) - if ok == false { - log.Panicf("can only encode a TagExif9101ComponentsConfiguration") - } - - return cc.ConfigurationBytes, uint32(len(cc.ConfigurationBytes)), nil -} - -func (CodecExif9101ComponentsConfiguration) Decode(valueContext *exifcommon.ValueContext) (value EncodeableValue, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - valueContext.SetUndefinedValueType(exifcommon.TypeByte) - - valueBytes, err := valueContext.ReadBytes() - log.PanicIf(err) - - for configurationId, configurationBytes := range TagUndefinedType_9101_ComponentsConfiguration_Configurations { - if bytes.Equal(configurationBytes, valueBytes) == true { - cc := TagExif9101ComponentsConfiguration{ - ConfigurationId: configurationId, - ConfigurationBytes: valueBytes, - } - - return cc, nil - } - } - - cc := TagExif9101ComponentsConfiguration{ - ConfigurationId: TagUndefinedType_9101_ComponentsConfiguration_OTHER, - ConfigurationBytes: valueBytes, - } - - return cc, nil -} - -func init() { - registerEncoder( - TagExif9101ComponentsConfiguration{}, - CodecExif9101ComponentsConfiguration{}) - - registerDecoder( - exifcommon.IfdExifStandardIfdIdentity.UnindexedString(), - 0x9101, - CodecExif9101ComponentsConfiguration{}) -} diff --git a/vendor/github.com/dsoprea/go-exif/v3/undefined/exif_927C_maker_note.go b/vendor/github.com/dsoprea/go-exif/v3/undefined/exif_927C_maker_note.go deleted file mode 100644 index f9cd2788e..000000000 --- a/vendor/github.com/dsoprea/go-exif/v3/undefined/exif_927C_maker_note.go +++ /dev/null @@ -1,114 +0,0 @@ -package exifundefined - -import ( - "fmt" - "strings" - - "crypto/sha1" - "encoding/binary" - - "github.com/dsoprea/go-logging" - - "github.com/dsoprea/go-exif/v3/common" -) - -type Tag927CMakerNote struct { - MakerNoteType []byte - MakerNoteBytes []byte -} - -func (Tag927CMakerNote) EncoderName() string { - return "Codec927CMakerNote" -} - -func (mn Tag927CMakerNote) String() string { - parts := make([]string, len(mn.MakerNoteType)) - - for i, c := range mn.MakerNoteType { - parts[i] = fmt.Sprintf("%02x", c) - } - - h := sha1.New() - - _, err := h.Write(mn.MakerNoteBytes) - log.PanicIf(err) - - digest := h.Sum(nil) - - return fmt.Sprintf("MakerNote", strings.Join(parts, " "), len(mn.MakerNoteBytes), digest) -} - -type Codec927CMakerNote struct { -} - -func (Codec927CMakerNote) Encode(value interface{}, byteOrder binary.ByteOrder) (encoded []byte, unitCount uint32, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - mn, ok := value.(Tag927CMakerNote) - if ok == false { - log.Panicf("can only encode a Tag927CMakerNote") - } - - // TODO(dustin): Confirm this size against the specification. - - return mn.MakerNoteBytes, uint32(len(mn.MakerNoteBytes)), nil -} - -func (Codec927CMakerNote) Decode(valueContext *exifcommon.ValueContext) (value EncodeableValue, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - // MakerNote - // TODO(dustin): !! This is the Wild Wild West. This very well might be a child IFD, but any and all OEM's define their own formats. If we're going to be writing changes and this is complete EXIF (which may not have the first eight bytes), it might be fine. However, if these are just IFDs they'll be relative to the main EXIF, this will invalidate the MakerNote data for IFDs and any other implementations that use offsets unless we can interpret them all. It be best to return to this later and just exclude this from being written for now, though means a loss of a wealth of image metadata. - // -> We can also just blindly try to interpret as an IFD and just validate that it's looks good (maybe it will even have a 'next ifd' pointer that we can validate is 0x0). - - valueContext.SetUndefinedValueType(exifcommon.TypeByte) - - valueBytes, err := valueContext.ReadBytes() - log.PanicIf(err) - - // TODO(dustin): Doesn't work, but here as an example. - // ie := NewIfdEnumerate(valueBytes, byteOrder) - - // // TODO(dustin): !! Validate types (might have proprietary types, but it might be worth splitting the list between valid and not valid; maybe fail if a certain proportion are invalid, or maybe aren't less then a certain small integer)? - // ii, err := ie.Collect(0x0) - - // for _, entry := range ii.RootIfd.Entries { - // fmt.Printf("ENTRY: 0x%02x %d\n", entry.TagId, entry.TagType) - // } - - var makerNoteType []byte - if len(valueBytes) >= 20 { - makerNoteType = valueBytes[:20] - } else { - makerNoteType = valueBytes - } - - mn := Tag927CMakerNote{ - MakerNoteType: makerNoteType, - - // MakerNoteBytes has the whole length of bytes. There's always - // the chance that the first 20 bytes includes actual data. - MakerNoteBytes: valueBytes, - } - - return mn, nil -} - -func init() { - registerEncoder( - Tag927CMakerNote{}, - Codec927CMakerNote{}) - - registerDecoder( - exifcommon.IfdExifStandardIfdIdentity.UnindexedString(), - 0x927c, - Codec927CMakerNote{}) -} diff --git a/vendor/github.com/dsoprea/go-exif/v3/undefined/exif_9286_user_comment.go b/vendor/github.com/dsoprea/go-exif/v3/undefined/exif_9286_user_comment.go deleted file mode 100644 index 320edc145..000000000 --- a/vendor/github.com/dsoprea/go-exif/v3/undefined/exif_9286_user_comment.go +++ /dev/null @@ -1,142 +0,0 @@ -package exifundefined - -import ( - "bytes" - "fmt" - - "encoding/binary" - - "github.com/dsoprea/go-logging" - - "github.com/dsoprea/go-exif/v3/common" -) - -var ( - exif9286Logger = log.NewLogger("exifundefined.exif_9286_user_comment") -) - -const ( - TagUndefinedType_9286_UserComment_Encoding_ASCII = iota - TagUndefinedType_9286_UserComment_Encoding_JIS = iota - TagUndefinedType_9286_UserComment_Encoding_UNICODE = iota - TagUndefinedType_9286_UserComment_Encoding_UNDEFINED = iota -) - -var ( - TagUndefinedType_9286_UserComment_Encoding_Names = map[int]string{ - TagUndefinedType_9286_UserComment_Encoding_ASCII: "ASCII", - TagUndefinedType_9286_UserComment_Encoding_JIS: "JIS", - TagUndefinedType_9286_UserComment_Encoding_UNICODE: "UNICODE", - TagUndefinedType_9286_UserComment_Encoding_UNDEFINED: "UNDEFINED", - } - - TagUndefinedType_9286_UserComment_Encodings = map[int][]byte{ - TagUndefinedType_9286_UserComment_Encoding_ASCII: {'A', 'S', 'C', 'I', 'I', 0, 0, 0}, - TagUndefinedType_9286_UserComment_Encoding_JIS: {'J', 'I', 'S', 0, 0, 0, 0, 0}, - TagUndefinedType_9286_UserComment_Encoding_UNICODE: {'U', 'n', 'i', 'c', 'o', 'd', 'e', 0}, - TagUndefinedType_9286_UserComment_Encoding_UNDEFINED: {0, 0, 0, 0, 0, 0, 0, 0}, - } -) - -type Tag9286UserComment struct { - EncodingType int - EncodingBytes []byte -} - -func (Tag9286UserComment) EncoderName() string { - return "Codec9286UserComment" -} - -func (uc Tag9286UserComment) String() string { - var valuePhrase string - - if uc.EncodingType == TagUndefinedType_9286_UserComment_Encoding_ASCII { - return fmt.Sprintf("[ASCII] %s", string(uc.EncodingBytes)) - } else { - if len(uc.EncodingBytes) <= 8 { - valuePhrase = fmt.Sprintf("%v", uc.EncodingBytes) - } else { - valuePhrase = fmt.Sprintf("%v...", uc.EncodingBytes[:8]) - } - } - - return fmt.Sprintf("UserComment", len(uc.EncodingBytes), TagUndefinedType_9286_UserComment_Encoding_Names[uc.EncodingType], valuePhrase, len(uc.EncodingBytes)) -} - -type Codec9286UserComment struct { -} - -func (Codec9286UserComment) Encode(value interface{}, byteOrder binary.ByteOrder) (encoded []byte, unitCount uint32, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - uc, ok := value.(Tag9286UserComment) - if ok == false { - log.Panicf("can only encode a Tag9286UserComment") - } - - encodingTypeBytes, found := TagUndefinedType_9286_UserComment_Encodings[uc.EncodingType] - if found == false { - log.Panicf("encoding-type not valid for unknown-type tag 9286 (UserComment): (%d)", uc.EncodingType) - } - - encoded = make([]byte, len(uc.EncodingBytes)+8) - - copy(encoded[:8], encodingTypeBytes) - copy(encoded[8:], uc.EncodingBytes) - - // TODO(dustin): Confirm this size against the specification. - - return encoded, uint32(len(encoded)), nil -} - -func (Codec9286UserComment) Decode(valueContext *exifcommon.ValueContext) (value EncodeableValue, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - valueContext.SetUndefinedValueType(exifcommon.TypeByte) - - valueBytes, err := valueContext.ReadBytes() - log.PanicIf(err) - - if len(valueBytes) < 8 { - return nil, ErrUnparseableValue - } - - unknownUc := Tag9286UserComment{ - EncodingType: TagUndefinedType_9286_UserComment_Encoding_UNDEFINED, - EncodingBytes: []byte{}, - } - - encoding := valueBytes[:8] - for encodingIndex, encodingBytes := range TagUndefinedType_9286_UserComment_Encodings { - if bytes.Compare(encoding, encodingBytes) == 0 { - uc := Tag9286UserComment{ - EncodingType: encodingIndex, - EncodingBytes: valueBytes[8:], - } - - return uc, nil - } - } - - exif9286Logger.Warningf(nil, "User-comment encoding not valid. Returning 'unknown' type (the default).") - return unknownUc, nil -} - -func init() { - registerEncoder( - Tag9286UserComment{}, - Codec9286UserComment{}) - - registerDecoder( - exifcommon.IfdExifStandardIfdIdentity.UnindexedString(), - 0x9286, - Codec9286UserComment{}) -} diff --git a/vendor/github.com/dsoprea/go-exif/v3/undefined/exif_A000_flashpix_version.go b/vendor/github.com/dsoprea/go-exif/v3/undefined/exif_A000_flashpix_version.go deleted file mode 100644 index 4a0fefad7..000000000 --- a/vendor/github.com/dsoprea/go-exif/v3/undefined/exif_A000_flashpix_version.go +++ /dev/null @@ -1,69 +0,0 @@ -package exifundefined - -import ( - "encoding/binary" - - "github.com/dsoprea/go-logging" - - "github.com/dsoprea/go-exif/v3/common" -) - -type TagA000FlashpixVersion struct { - FlashpixVersion string -} - -func (TagA000FlashpixVersion) EncoderName() string { - return "CodecA000FlashpixVersion" -} - -func (fv TagA000FlashpixVersion) String() string { - return fv.FlashpixVersion -} - -type CodecA000FlashpixVersion struct { -} - -func (CodecA000FlashpixVersion) Encode(value interface{}, byteOrder binary.ByteOrder) (encoded []byte, unitCount uint32, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - s, ok := value.(TagA000FlashpixVersion) - if ok == false { - log.Panicf("can only encode a TagA000FlashpixVersion") - } - - return []byte(s.FlashpixVersion), uint32(len(s.FlashpixVersion)), nil -} - -func (CodecA000FlashpixVersion) Decode(valueContext *exifcommon.ValueContext) (value EncodeableValue, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - valueContext.SetUndefinedValueType(exifcommon.TypeAsciiNoNul) - - valueString, err := valueContext.ReadAsciiNoNul() - log.PanicIf(err) - - fv := TagA000FlashpixVersion{ - FlashpixVersion: valueString, - } - - return fv, nil -} - -func init() { - registerEncoder( - TagA000FlashpixVersion{}, - CodecA000FlashpixVersion{}) - - registerDecoder( - exifcommon.IfdExifStandardIfdIdentity.UnindexedString(), - 0xa000, - CodecA000FlashpixVersion{}) -} diff --git a/vendor/github.com/dsoprea/go-exif/v3/undefined/exif_A20C_spatial_frequency_response.go b/vendor/github.com/dsoprea/go-exif/v3/undefined/exif_A20C_spatial_frequency_response.go deleted file mode 100644 index 0311175d6..000000000 --- a/vendor/github.com/dsoprea/go-exif/v3/undefined/exif_A20C_spatial_frequency_response.go +++ /dev/null @@ -1,160 +0,0 @@ -package exifundefined - -import ( - "bytes" - "fmt" - - "encoding/binary" - - "github.com/dsoprea/go-logging" - - "github.com/dsoprea/go-exif/v3/common" -) - -type TagA20CSpatialFrequencyResponse struct { - Columns uint16 - Rows uint16 - ColumnNames []string - Values []exifcommon.Rational -} - -func (TagA20CSpatialFrequencyResponse) EncoderName() string { - return "CodecA20CSpatialFrequencyResponse" -} - -func (sfr TagA20CSpatialFrequencyResponse) String() string { - return fmt.Sprintf("CodecA20CSpatialFrequencyResponse", sfr.Columns, sfr.Rows) -} - -type CodecA20CSpatialFrequencyResponse struct { -} - -func (CodecA20CSpatialFrequencyResponse) Encode(value interface{}, byteOrder binary.ByteOrder) (encoded []byte, unitCount uint32, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - // TODO(dustin): Add test. - - sfr, ok := value.(TagA20CSpatialFrequencyResponse) - if ok == false { - log.Panicf("can only encode a TagA20CSpatialFrequencyResponse") - } - - b := new(bytes.Buffer) - - err = binary.Write(b, byteOrder, sfr.Columns) - log.PanicIf(err) - - err = binary.Write(b, byteOrder, sfr.Rows) - log.PanicIf(err) - - // Write columns. - - for _, name := range sfr.ColumnNames { - _, err := b.WriteString(name) - log.PanicIf(err) - - err = b.WriteByte(0) - log.PanicIf(err) - } - - // Write values. - - ve := exifcommon.NewValueEncoder(byteOrder) - - ed, err := ve.Encode(sfr.Values) - log.PanicIf(err) - - _, err = b.Write(ed.Encoded) - log.PanicIf(err) - - encoded = b.Bytes() - - // TODO(dustin): Confirm this size against the specification. - - return encoded, uint32(len(encoded)), nil -} - -func (CodecA20CSpatialFrequencyResponse) Decode(valueContext *exifcommon.ValueContext) (value EncodeableValue, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - // TODO(dustin): Add test using known good data. - - byteOrder := valueContext.ByteOrder() - - valueContext.SetUndefinedValueType(exifcommon.TypeByte) - - valueBytes, err := valueContext.ReadBytes() - log.PanicIf(err) - - sfr := TagA20CSpatialFrequencyResponse{} - - sfr.Columns = byteOrder.Uint16(valueBytes[0:2]) - sfr.Rows = byteOrder.Uint16(valueBytes[2:4]) - - columnNames := make([]string, sfr.Columns) - - // startAt is where the current column name starts. - startAt := 4 - - // offset is our current position. - offset := 4 - - currentColumnNumber := uint16(0) - - for currentColumnNumber < sfr.Columns { - if valueBytes[offset] == 0 { - columnName := string(valueBytes[startAt:offset]) - if len(columnName) == 0 { - log.Panicf("SFR column (%d) has zero length", currentColumnNumber) - } - - columnNames[currentColumnNumber] = columnName - currentColumnNumber++ - - offset++ - startAt = offset - continue - } - - offset++ - } - - sfr.ColumnNames = columnNames - - rawRationalBytes := valueBytes[offset:] - - rationalSize := exifcommon.TypeRational.Size() - if len(rawRationalBytes)%rationalSize > 0 { - log.Panicf("SFR rationals not aligned: (%d) %% (%d) > 0", len(rawRationalBytes), rationalSize) - } - - rationalCount := len(rawRationalBytes) / rationalSize - - parser := new(exifcommon.Parser) - - items, err := parser.ParseRationals(rawRationalBytes, uint32(rationalCount), byteOrder) - log.PanicIf(err) - - sfr.Values = items - - return sfr, nil -} - -func init() { - registerEncoder( - TagA20CSpatialFrequencyResponse{}, - CodecA20CSpatialFrequencyResponse{}) - - registerDecoder( - exifcommon.IfdExifStandardIfdIdentity.UnindexedString(), - 0xa20c, - CodecA20CSpatialFrequencyResponse{}) -} diff --git a/vendor/github.com/dsoprea/go-exif/v3/undefined/exif_A300_file_source.go b/vendor/github.com/dsoprea/go-exif/v3/undefined/exif_A300_file_source.go deleted file mode 100644 index f4f3a49f9..000000000 --- a/vendor/github.com/dsoprea/go-exif/v3/undefined/exif_A300_file_source.go +++ /dev/null @@ -1,79 +0,0 @@ -package exifundefined - -import ( - "fmt" - - "encoding/binary" - - "github.com/dsoprea/go-logging" - - "github.com/dsoprea/go-exif/v3/common" -) - -type TagExifA300FileSource uint32 - -func (TagExifA300FileSource) EncoderName() string { - return "CodecExifA300FileSource" -} - -func (af TagExifA300FileSource) String() string { - return fmt.Sprintf("0x%08x", uint32(af)) -} - -const ( - TagUndefinedType_A300_SceneType_Others TagExifA300FileSource = 0 - TagUndefinedType_A300_SceneType_ScannerOfTransparentType TagExifA300FileSource = 1 - TagUndefinedType_A300_SceneType_ScannerOfReflexType TagExifA300FileSource = 2 - TagUndefinedType_A300_SceneType_Dsc TagExifA300FileSource = 3 -) - -type CodecExifA300FileSource struct { -} - -func (CodecExifA300FileSource) Encode(value interface{}, byteOrder binary.ByteOrder) (encoded []byte, unitCount uint32, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - st, ok := value.(TagExifA300FileSource) - if ok == false { - log.Panicf("can only encode a TagExifA300FileSource") - } - - ve := exifcommon.NewValueEncoder(byteOrder) - - ed, err := ve.Encode([]uint32{uint32(st)}) - log.PanicIf(err) - - // TODO(dustin): Confirm this size against the specification. It's non-specific about what type it is, but it looks to be no more than a single integer scalar. So, we're assuming it's a LONG. - - return ed.Encoded, 1, nil -} - -func (CodecExifA300FileSource) Decode(valueContext *exifcommon.ValueContext) (value EncodeableValue, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - valueContext.SetUndefinedValueType(exifcommon.TypeLong) - - valueLongs, err := valueContext.ReadLongs() - log.PanicIf(err) - - return TagExifA300FileSource(valueLongs[0]), nil -} - -func init() { - registerEncoder( - TagExifA300FileSource(0), - CodecExifA300FileSource{}) - - registerDecoder( - exifcommon.IfdExifStandardIfdIdentity.UnindexedString(), - 0xa300, - CodecExifA300FileSource{}) -} diff --git a/vendor/github.com/dsoprea/go-exif/v3/undefined/exif_A301_scene_type.go b/vendor/github.com/dsoprea/go-exif/v3/undefined/exif_A301_scene_type.go deleted file mode 100644 index a29fd7673..000000000 --- a/vendor/github.com/dsoprea/go-exif/v3/undefined/exif_A301_scene_type.go +++ /dev/null @@ -1,76 +0,0 @@ -package exifundefined - -import ( - "fmt" - - "encoding/binary" - - "github.com/dsoprea/go-logging" - - "github.com/dsoprea/go-exif/v3/common" -) - -type TagExifA301SceneType uint32 - -func (TagExifA301SceneType) EncoderName() string { - return "CodecExifA301SceneType" -} - -func (st TagExifA301SceneType) String() string { - return fmt.Sprintf("0x%08x", uint32(st)) -} - -const ( - TagUndefinedType_A301_SceneType_DirectlyPhotographedImage TagExifA301SceneType = 1 -) - -type CodecExifA301SceneType struct { -} - -func (CodecExifA301SceneType) Encode(value interface{}, byteOrder binary.ByteOrder) (encoded []byte, unitCount uint32, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - st, ok := value.(TagExifA301SceneType) - if ok == false { - log.Panicf("can only encode a TagExif9101ComponentsConfiguration") - } - - ve := exifcommon.NewValueEncoder(byteOrder) - - ed, err := ve.Encode([]uint32{uint32(st)}) - log.PanicIf(err) - - // TODO(dustin): Confirm this size against the specification. It's non-specific about what type it is, but it looks to be no more than a single integer scalar. So, we're assuming it's a LONG. - - return ed.Encoded, uint32(int(ed.UnitCount)), nil -} - -func (CodecExifA301SceneType) Decode(valueContext *exifcommon.ValueContext) (value EncodeableValue, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - valueContext.SetUndefinedValueType(exifcommon.TypeLong) - - valueLongs, err := valueContext.ReadLongs() - log.PanicIf(err) - - return TagExifA301SceneType(valueLongs[0]), nil -} - -func init() { - registerEncoder( - TagExifA301SceneType(0), - CodecExifA301SceneType{}) - - registerDecoder( - exifcommon.IfdExifStandardIfdIdentity.UnindexedString(), - 0xa301, - CodecExifA301SceneType{}) -} diff --git a/vendor/github.com/dsoprea/go-exif/v3/undefined/exif_A302_cfa_pattern.go b/vendor/github.com/dsoprea/go-exif/v3/undefined/exif_A302_cfa_pattern.go deleted file mode 100644 index 88976296d..000000000 --- a/vendor/github.com/dsoprea/go-exif/v3/undefined/exif_A302_cfa_pattern.go +++ /dev/null @@ -1,97 +0,0 @@ -package exifundefined - -import ( - "bytes" - "fmt" - - "encoding/binary" - - "github.com/dsoprea/go-logging" - - "github.com/dsoprea/go-exif/v3/common" -) - -type TagA302CfaPattern struct { - HorizontalRepeat uint16 - VerticalRepeat uint16 - CfaValue []byte -} - -func (TagA302CfaPattern) EncoderName() string { - return "CodecA302CfaPattern" -} - -func (cp TagA302CfaPattern) String() string { - return fmt.Sprintf("TagA302CfaPattern", cp.HorizontalRepeat, cp.VerticalRepeat, len(cp.CfaValue)) -} - -type CodecA302CfaPattern struct { -} - -func (CodecA302CfaPattern) Encode(value interface{}, byteOrder binary.ByteOrder) (encoded []byte, unitCount uint32, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - // TODO(dustin): Add test. - - cp, ok := value.(TagA302CfaPattern) - if ok == false { - log.Panicf("can only encode a TagA302CfaPattern") - } - - b := new(bytes.Buffer) - - err = binary.Write(b, byteOrder, cp.HorizontalRepeat) - log.PanicIf(err) - - err = binary.Write(b, byteOrder, cp.VerticalRepeat) - log.PanicIf(err) - - _, err = b.Write(cp.CfaValue) - log.PanicIf(err) - - encoded = b.Bytes() - - // TODO(dustin): Confirm this size against the specification. - - return encoded, uint32(len(encoded)), nil -} - -func (CodecA302CfaPattern) Decode(valueContext *exifcommon.ValueContext) (value EncodeableValue, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - // TODO(dustin): Add test using known good data. - - valueContext.SetUndefinedValueType(exifcommon.TypeByte) - - valueBytes, err := valueContext.ReadBytes() - log.PanicIf(err) - - cp := TagA302CfaPattern{} - - cp.HorizontalRepeat = valueContext.ByteOrder().Uint16(valueBytes[0:2]) - cp.VerticalRepeat = valueContext.ByteOrder().Uint16(valueBytes[2:4]) - - expectedLength := int(cp.HorizontalRepeat * cp.VerticalRepeat) - cp.CfaValue = valueBytes[4 : 4+expectedLength] - - return cp, nil -} - -func init() { - registerEncoder( - TagA302CfaPattern{}, - CodecA302CfaPattern{}) - - registerDecoder( - exifcommon.IfdExifStandardIfdIdentity.UnindexedString(), - 0xa302, - CodecA302CfaPattern{}) -} diff --git a/vendor/github.com/dsoprea/go-exif/v3/undefined/exif_iop_0002_interop_version.go b/vendor/github.com/dsoprea/go-exif/v3/undefined/exif_iop_0002_interop_version.go deleted file mode 100644 index 09ec98703..000000000 --- a/vendor/github.com/dsoprea/go-exif/v3/undefined/exif_iop_0002_interop_version.go +++ /dev/null @@ -1,69 +0,0 @@ -package exifundefined - -import ( - "encoding/binary" - - "github.com/dsoprea/go-logging" - - "github.com/dsoprea/go-exif/v3/common" -) - -type Tag0002InteropVersion struct { - InteropVersion string -} - -func (Tag0002InteropVersion) EncoderName() string { - return "Codec0002InteropVersion" -} - -func (iv Tag0002InteropVersion) String() string { - return iv.InteropVersion -} - -type Codec0002InteropVersion struct { -} - -func (Codec0002InteropVersion) Encode(value interface{}, byteOrder binary.ByteOrder) (encoded []byte, unitCount uint32, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - s, ok := value.(Tag0002InteropVersion) - if ok == false { - log.Panicf("can only encode a Tag0002InteropVersion") - } - - return []byte(s.InteropVersion), uint32(len(s.InteropVersion)), nil -} - -func (Codec0002InteropVersion) Decode(valueContext *exifcommon.ValueContext) (value EncodeableValue, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - valueContext.SetUndefinedValueType(exifcommon.TypeAsciiNoNul) - - valueString, err := valueContext.ReadAsciiNoNul() - log.PanicIf(err) - - iv := Tag0002InteropVersion{ - InteropVersion: valueString, - } - - return iv, nil -} - -func init() { - registerEncoder( - Tag0002InteropVersion{}, - Codec0002InteropVersion{}) - - registerDecoder( - exifcommon.IfdExifIopStandardIfdIdentity.UnindexedString(), - 0x0002, - Codec0002InteropVersion{}) -} diff --git a/vendor/github.com/dsoprea/go-exif/v3/undefined/gps_001B_gps_processing_method.go b/vendor/github.com/dsoprea/go-exif/v3/undefined/gps_001B_gps_processing_method.go deleted file mode 100644 index 6f54d2fc6..000000000 --- a/vendor/github.com/dsoprea/go-exif/v3/undefined/gps_001B_gps_processing_method.go +++ /dev/null @@ -1,65 +0,0 @@ -package exifundefined - -import ( - "encoding/binary" - - "github.com/dsoprea/go-logging" - - "github.com/dsoprea/go-exif/v3/common" -) - -type Tag001BGPSProcessingMethod struct { - string -} - -func (Tag001BGPSProcessingMethod) EncoderName() string { - return "Codec001BGPSProcessingMethod" -} - -func (gpm Tag001BGPSProcessingMethod) String() string { - return gpm.string -} - -type Codec001BGPSProcessingMethod struct { -} - -func (Codec001BGPSProcessingMethod) Encode(value interface{}, byteOrder binary.ByteOrder) (encoded []byte, unitCount uint32, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - s, ok := value.(Tag001BGPSProcessingMethod) - if ok == false { - log.Panicf("can only encode a Tag001BGPSProcessingMethod") - } - - return []byte(s.string), uint32(len(s.string)), nil -} - -func (Codec001BGPSProcessingMethod) Decode(valueContext *exifcommon.ValueContext) (value EncodeableValue, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - valueContext.SetUndefinedValueType(exifcommon.TypeAsciiNoNul) - - valueString, err := valueContext.ReadAsciiNoNul() - log.PanicIf(err) - - return Tag001BGPSProcessingMethod{valueString}, nil -} - -func init() { - registerEncoder( - Tag001BGPSProcessingMethod{}, - Codec001BGPSProcessingMethod{}) - - registerDecoder( - exifcommon.IfdGpsInfoStandardIfdIdentity.UnindexedString(), - 0x001b, - Codec001BGPSProcessingMethod{}) -} diff --git a/vendor/github.com/dsoprea/go-exif/v3/undefined/gps_001C_gps_area_information.go b/vendor/github.com/dsoprea/go-exif/v3/undefined/gps_001C_gps_area_information.go deleted file mode 100644 index ffdeb905b..000000000 --- a/vendor/github.com/dsoprea/go-exif/v3/undefined/gps_001C_gps_area_information.go +++ /dev/null @@ -1,65 +0,0 @@ -package exifundefined - -import ( - "encoding/binary" - - "github.com/dsoprea/go-logging" - - "github.com/dsoprea/go-exif/v3/common" -) - -type Tag001CGPSAreaInformation struct { - string -} - -func (Tag001CGPSAreaInformation) EncoderName() string { - return "Codec001CGPSAreaInformation" -} - -func (gai Tag001CGPSAreaInformation) String() string { - return gai.string -} - -type Codec001CGPSAreaInformation struct { -} - -func (Codec001CGPSAreaInformation) Encode(value interface{}, byteOrder binary.ByteOrder) (encoded []byte, unitCount uint32, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - s, ok := value.(Tag001CGPSAreaInformation) - if ok == false { - log.Panicf("can only encode a Tag001CGPSAreaInformation") - } - - return []byte(s.string), uint32(len(s.string)), nil -} - -func (Codec001CGPSAreaInformation) Decode(valueContext *exifcommon.ValueContext) (value EncodeableValue, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - valueContext.SetUndefinedValueType(exifcommon.TypeAsciiNoNul) - - valueString, err := valueContext.ReadAsciiNoNul() - log.PanicIf(err) - - return Tag001CGPSAreaInformation{valueString}, nil -} - -func init() { - registerEncoder( - Tag001CGPSAreaInformation{}, - Codec001CGPSAreaInformation{}) - - registerDecoder( - exifcommon.IfdGpsInfoStandardIfdIdentity.UnindexedString(), - 0x001c, - Codec001CGPSAreaInformation{}) -} diff --git a/vendor/github.com/dsoprea/go-exif/v3/undefined/registration.go b/vendor/github.com/dsoprea/go-exif/v3/undefined/registration.go deleted file mode 100644 index cccc20a82..000000000 --- a/vendor/github.com/dsoprea/go-exif/v3/undefined/registration.go +++ /dev/null @@ -1,42 +0,0 @@ -package exifundefined - -import ( - "github.com/dsoprea/go-logging" -) - -// UndefinedTagHandle defines one undefined-type tag with a corresponding -// decoder. -type UndefinedTagHandle struct { - IfdPath string - TagId uint16 -} - -func registerEncoder(entity EncodeableValue, encoder UndefinedValueEncoder) { - typeName := entity.EncoderName() - - _, found := encoders[typeName] - if found == true { - log.Panicf("encoder already registered: %v", typeName) - } - - encoders[typeName] = encoder -} - -func registerDecoder(ifdPath string, tagId uint16, decoder UndefinedValueDecoder) { - uth := UndefinedTagHandle{ - IfdPath: ifdPath, - TagId: tagId, - } - - _, found := decoders[uth] - if found == true { - log.Panicf("decoder already registered: %v", uth) - } - - decoders[uth] = decoder -} - -var ( - encoders = make(map[string]UndefinedValueEncoder) - decoders = make(map[UndefinedTagHandle]UndefinedValueDecoder) -) diff --git a/vendor/github.com/dsoprea/go-exif/v3/undefined/type.go b/vendor/github.com/dsoprea/go-exif/v3/undefined/type.go deleted file mode 100644 index ff6ac2b4c..000000000 --- a/vendor/github.com/dsoprea/go-exif/v3/undefined/type.go +++ /dev/null @@ -1,44 +0,0 @@ -package exifundefined - -import ( - "errors" - - "encoding/binary" - - "github.com/dsoprea/go-exif/v3/common" -) - -const ( - // UnparseableUnknownTagValuePlaceholder is the string to use for an unknown - // undefined tag. - UnparseableUnknownTagValuePlaceholder = "!UNKNOWN" - - // UnparseableHandledTagValuePlaceholder is the string to use for a known - // value that is not parseable. - UnparseableHandledTagValuePlaceholder = "!MALFORMED" -) - -var ( - // ErrUnparseableValue is the error for a value that we should have been - // able to parse but were not able to. - ErrUnparseableValue = errors.New("unparseable undefined tag") -) - -// UndefinedValueEncoder knows how to encode an undefined-type tag's value to -// bytes. -type UndefinedValueEncoder interface { - Encode(value interface{}, byteOrder binary.ByteOrder) (encoded []byte, unitCount uint32, err error) -} - -// EncodeableValue wraps a value with the information that will be needed to re- -// encode it later. -type EncodeableValue interface { - EncoderName() string - String() string -} - -// UndefinedValueDecoder knows how to decode an undefined-type tag's value from -// bytes. -type UndefinedValueDecoder interface { - Decode(valueContext *exifcommon.ValueContext) (value EncodeableValue, err error) -} diff --git a/vendor/github.com/dsoprea/go-exif/v3/utility.go b/vendor/github.com/dsoprea/go-exif/v3/utility.go deleted file mode 100644 index f0b5e6383..000000000 --- a/vendor/github.com/dsoprea/go-exif/v3/utility.go +++ /dev/null @@ -1,237 +0,0 @@ -package exif - -import ( - "fmt" - "io" - "math" - - "github.com/dsoprea/go-logging" - "github.com/dsoprea/go-utility/v2/filesystem" - - "github.com/dsoprea/go-exif/v3/common" - "github.com/dsoprea/go-exif/v3/undefined" -) - -var ( - utilityLogger = log.NewLogger("exif.utility") -) - -// ExifTag is one simple representation of a tag in a flat list of all of them. -type ExifTag struct { - // IfdPath is the fully-qualified IFD path (even though it is not named as - // such). - IfdPath string `json:"ifd_path"` - - // TagId is the tag-ID. - TagId uint16 `json:"id"` - - // TagName is the tag-name. This is never empty. - TagName string `json:"name"` - - // UnitCount is the recorded number of units constution of the value. - UnitCount uint32 `json:"unit_count"` - - // TagTypeId is the type-ID. - TagTypeId exifcommon.TagTypePrimitive `json:"type_id"` - - // TagTypeName is the type name. - TagTypeName string `json:"type_name"` - - // Value is the decoded value. - Value interface{} `json:"value"` - - // ValueBytes is the raw, encoded value. - ValueBytes []byte `json:"value_bytes"` - - // Formatted is the human representation of the first value (tag values are - // always an array). - FormattedFirst string `json:"formatted_first"` - - // Formatted is the human representation of the complete value. - Formatted string `json:"formatted"` - - // ChildIfdPath is the name of the child IFD this tag represents (if it - // represents any). Otherwise, this is empty. - ChildIfdPath string `json:"child_ifd_path"` -} - -// String returns a string representation. -func (et ExifTag) String() string { - return fmt.Sprintf( - "ExifTag<"+ - "IFD-PATH=[%s] "+ - "TAG-ID=(0x%02x) "+ - "TAG-NAME=[%s] "+ - "TAG-TYPE=[%s] "+ - "VALUE=[%v] "+ - "VALUE-BYTES=(%d) "+ - "CHILD-IFD-PATH=[%s]", - et.IfdPath, et.TagId, et.TagName, et.TagTypeName, et.FormattedFirst, - len(et.ValueBytes), et.ChildIfdPath) -} - -// GetFlatExifData returns a simple, flat representation of all tags. -func GetFlatExifData(exifData []byte, so *ScanOptions) (exifTags []ExifTag, med *MiscellaneousExifData, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - sb := rifs.NewSeekableBufferWithBytes(exifData) - - exifTags, med, err = getFlatExifDataUniversalSearchWithReadSeeker(sb, so, false) - log.PanicIf(err) - - return exifTags, med, nil -} - -// RELEASE(dustin): GetFlatExifDataUniversalSearch is a kludge to allow univeral tag searching in a backwards-compatible manner. For the next release, undo this and simply add the flag to GetFlatExifData. - -// GetFlatExifDataUniversalSearch returns a simple, flat representation of all -// tags. -func GetFlatExifDataUniversalSearch(exifData []byte, so *ScanOptions, doUniversalSearch bool) (exifTags []ExifTag, med *MiscellaneousExifData, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - sb := rifs.NewSeekableBufferWithBytes(exifData) - - exifTags, med, err = getFlatExifDataUniversalSearchWithReadSeeker(sb, so, doUniversalSearch) - log.PanicIf(err) - - return exifTags, med, nil -} - -// RELEASE(dustin): GetFlatExifDataUniversalSearchWithReadSeeker is a kludge to allow using a ReadSeeker in a backwards-compatible manner. For the next release, drop this and refactor GetFlatExifDataUniversalSearch to take a ReadSeeker. - -// GetFlatExifDataUniversalSearchWithReadSeeker returns a simple, flat -// representation of all tags given a ReadSeeker. -func GetFlatExifDataUniversalSearchWithReadSeeker(rs io.ReadSeeker, so *ScanOptions, doUniversalSearch bool) (exifTags []ExifTag, med *MiscellaneousExifData, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - exifTags, med, err = getFlatExifDataUniversalSearchWithReadSeeker(rs, so, doUniversalSearch) - log.PanicIf(err) - - return exifTags, med, nil -} - -// getFlatExifDataUniversalSearchWithReadSeeker returns a simple, flat -// representation of all tags given a ReadSeeker. -func getFlatExifDataUniversalSearchWithReadSeeker(rs io.ReadSeeker, so *ScanOptions, doUniversalSearch bool) (exifTags []ExifTag, med *MiscellaneousExifData, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - headerData := make([]byte, ExifSignatureLength) - if _, err = io.ReadFull(rs, headerData); err != nil { - if err == io.EOF { - return nil, nil, err - } - - log.Panic(err) - } - - eh, err := ParseExifHeader(headerData) - log.PanicIf(err) - - im, err := exifcommon.NewIfdMappingWithStandard() - log.PanicIf(err) - - ti := NewTagIndex() - - if doUniversalSearch == true { - ti.SetUniversalSearch(true) - } - - ebs := NewExifReadSeeker(rs) - ie := NewIfdEnumerate(im, ti, ebs, eh.ByteOrder) - - exifTags = make([]ExifTag, 0) - - visitor := func(ite *IfdTagEntry) (err error) { - // This encodes down to base64. Since this an example tool and we do not - // expect to ever decode the output, we are not worried about - // specifically base64-encoding it in order to have a measure of - // control. - valueBytes, err := ite.GetRawBytes() - if err != nil { - if err == exifundefined.ErrUnparseableValue { - return nil - } - - log.Panic(err) - } - - value, err := ite.Value() - if err != nil { - if err == exifcommon.ErrUnhandledUndefinedTypedTag { - value = exifundefined.UnparseableUnknownTagValuePlaceholder - } else if log.Is(err, exifcommon.ErrParseFail) == true { - utilityLogger.Warningf(nil, - "Could not parse value for tag [%s] (%04x) [%s].", - ite.IfdPath(), ite.TagId(), ite.TagName()) - - return nil - } else { - log.Panic(err) - } - } - - et := ExifTag{ - IfdPath: ite.IfdPath(), - TagId: ite.TagId(), - TagName: ite.TagName(), - UnitCount: ite.UnitCount(), - TagTypeId: ite.TagType(), - TagTypeName: ite.TagType().String(), - Value: value, - ValueBytes: valueBytes, - ChildIfdPath: ite.ChildIfdPath(), - } - - et.Formatted, err = ite.Format() - log.PanicIf(err) - - et.FormattedFirst, err = ite.FormatFirst() - log.PanicIf(err) - - exifTags = append(exifTags, et) - - return nil - } - - med, err = ie.Scan(exifcommon.IfdStandardIfdIdentity, eh.FirstIfdOffset, visitor, nil) - log.PanicIf(err) - - return exifTags, med, nil -} - -// GpsDegreesEquals returns true if the two `GpsDegrees` are identical. -func GpsDegreesEquals(gi1, gi2 GpsDegrees) bool { - if gi2.Orientation != gi1.Orientation { - return false - } - - degreesRightBound := math.Nextafter(gi1.Degrees, gi1.Degrees+1) - minutesRightBound := math.Nextafter(gi1.Minutes, gi1.Minutes+1) - secondsRightBound := math.Nextafter(gi1.Seconds, gi1.Seconds+1) - - if gi2.Degrees < gi1.Degrees || gi2.Degrees >= degreesRightBound { - return false - } else if gi2.Minutes < gi1.Minutes || gi2.Minutes >= minutesRightBound { - return false - } else if gi2.Seconds < gi1.Seconds || gi2.Seconds >= secondsRightBound { - return false - } - - return true -} diff --git a/vendor/github.com/dsoprea/go-iptc/.MODULE_ROOT b/vendor/github.com/dsoprea/go-iptc/.MODULE_ROOT deleted file mode 100644 index e69de29bb..000000000 diff --git a/vendor/github.com/dsoprea/go-iptc/.travis.yml b/vendor/github.com/dsoprea/go-iptc/.travis.yml deleted file mode 100644 index 710e46b39..000000000 --- a/vendor/github.com/dsoprea/go-iptc/.travis.yml +++ /dev/null @@ -1,14 +0,0 @@ -language: go -go: - - master - - stable - - "1.13" - - "1.12" -env: - - GO111MODULE=on -install: - - go get -t ./... - - go get github.com/mattn/goveralls -script: - - go test -v ./... - - goveralls -v -service=travis-ci diff --git a/vendor/github.com/dsoprea/go-iptc/LICENSE b/vendor/github.com/dsoprea/go-iptc/LICENSE deleted file mode 100644 index d92c04268..000000000 --- a/vendor/github.com/dsoprea/go-iptc/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2020 Dustin Oprea - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/dsoprea/go-iptc/README.md b/vendor/github.com/dsoprea/go-iptc/README.md deleted file mode 100644 index 8065d16e4..000000000 --- a/vendor/github.com/dsoprea/go-iptc/README.md +++ /dev/null @@ -1,8 +0,0 @@ -[![Build Status](https://travis-ci.org/dsoprea/go-iptc.svg?branch=master)](https://travis-ci.org/dsoprea/go-iptc) -[![Coverage Status](https://coveralls.io/repos/github/dsoprea/go-iptc/badge.svg?branch=master)](https://coveralls.io/github/dsoprea/go-iptc?branch=master) -[![Go Report Card](https://goreportcard.com/badge/github.com/dsoprea/go-iptc)](https://goreportcard.com/report/github.com/dsoprea/go-iptc) -[![GoDoc](https://godoc.org/github.com/dsoprea/go-iptc?status.svg)](https://godoc.org/github.com/dsoprea/go-iptc) - -# Overview - -This project provides functionality to parse a series of IPTC records/datasets. It also provides name resolution, but other constraints/validation is not yet implemented (though there is structure present that can accommodate this when desired/required). diff --git a/vendor/github.com/dsoprea/go-iptc/standard.go b/vendor/github.com/dsoprea/go-iptc/standard.go deleted file mode 100644 index 307aa5a87..000000000 --- a/vendor/github.com/dsoprea/go-iptc/standard.go +++ /dev/null @@ -1,101 +0,0 @@ -package iptc - -import ( - "errors" -) - -// StreamTagInfo encapsulates the properties of each tag. -type StreamTagInfo struct { - // Description is the human-readable description of the tag. - Description string -} - -var ( - standardTags = map[StreamTagKey]StreamTagInfo{ - {1, 120}: {"ARM Identifier"}, - - {1, 122}: {"ARM Version"}, - {2, 0}: {"Record Version"}, - {2, 3}: {"Object Type Reference"}, - {2, 4}: {"Object Attribute Reference"}, - {2, 5}: {"Object Name"}, - {2, 7}: {"Edit Status"}, - {2, 8}: {"Editorial Update"}, - {2, 10}: {"Urgency"}, - {2, 12}: {"Subject Reference"}, - {2, 15}: {"Category"}, - {2, 20}: {"Supplemental Category"}, - {2, 22}: {"Fixture Identifier"}, - {2, 25}: {"Keywords"}, - {2, 26}: {"Content Location Code"}, - {2, 27}: {"Content Location Name"}, - {2, 30}: {"Release Date"}, - {2, 35}: {"Release Time"}, - {2, 37}: {"Expiration Date"}, - {2, 38}: {"Expiration Time"}, - {2, 40}: {"Special Instructions"}, - {2, 42}: {"Action Advised"}, - {2, 45}: {"Reference Service"}, - {2, 47}: {"Reference Date"}, - {2, 50}: {"Reference Number"}, - {2, 55}: {"Date Created"}, - {2, 60}: {"Time Created"}, - {2, 62}: {"Digital Creation Date"}, - {2, 63}: {"Digital Creation Time"}, - {2, 65}: {"Originating Program"}, - {2, 70}: {"Program Version"}, - {2, 75}: {"Object Cycle"}, - {2, 80}: {"By-line"}, - {2, 85}: {"By-line Title"}, - {2, 90}: {"City"}, - {2, 92}: {"Sublocation"}, - {2, 95}: {"Province/State"}, - {2, 100}: {"Country/Primary Location Code"}, - {2, 101}: {"Country/Primary Location Name"}, - {2, 103}: {"Original Transmission Reference"}, - {2, 105}: {"Headline"}, - {2, 110}: {"Credit"}, - {2, 115}: {"Source"}, - {2, 116}: {"Copyright Notice"}, - {2, 118}: {"Contact"}, - {2, 120}: {"Caption/Abstract"}, - {2, 122}: {"Writer/Editor"}, - {2, 125}: {"Rasterized Caption"}, - {2, 130}: {"Image Type"}, - {2, 131}: {"Image Orientation"}, - {2, 135}: {"Language Identifier"}, - {2, 150}: {"Audio Type"}, - {2, 151}: {"Audio Sampling Rate"}, - {2, 152}: {"Audio Sampling Resolution"}, - {2, 153}: {"Audio Duration"}, - {2, 154}: {"Audio Outcue"}, - {2, 200}: {"ObjectData Preview File Format"}, - {2, 201}: {"ObjectData Preview File Format Version"}, - {2, 202}: {"ObjectData Preview Data"}, - {7, 10}: {"Size Mode"}, - {7, 20}: {"Max Subfile Size"}, - {7, 90}: {"ObjectData Size Announced"}, - {7, 95}: {"Maximum ObjectData Size"}, - {8, 10}: {"Subfile"}, - {9, 10}: {"Confirmed ObjectData Size"}, - } -) - -var ( - // ErrTagNotStandard indicates that the given tag is not known among the - // documented standard set. - ErrTagNotStandard = errors.New("not a standard tag") -) - -// GetTagInfo return the info for the given tag. Returns ErrTagNotStandard if -// not known. -func GetTagInfo(recordNumber, datasetNumber int) (sti StreamTagInfo, err error) { - stk := StreamTagKey{uint8(recordNumber), uint8(datasetNumber)} - - sti, found := standardTags[stk] - if found == false { - return sti, ErrTagNotStandard - } - - return sti, nil -} diff --git a/vendor/github.com/dsoprea/go-iptc/tag.go b/vendor/github.com/dsoprea/go-iptc/tag.go deleted file mode 100644 index 4ceabf41d..000000000 --- a/vendor/github.com/dsoprea/go-iptc/tag.go +++ /dev/null @@ -1,277 +0,0 @@ -package iptc - -import ( - "errors" - "fmt" - "io" - "strings" - "unicode" - - "encoding/binary" - - "github.com/dsoprea/go-logging" -) - -var ( - // TODO(dustin): We're still not sure if this is the right endianness. No search to IPTC or IIM seems to state one or the other. - - // DefaultEncoding is the standard encoding for the IPTC format. - defaultEncoding = binary.BigEndian -) - -var ( - // ErrInvalidTagMarker indicates that the tag can not be parsed because the - // tag boundary marker is not the expected value. - ErrInvalidTagMarker = errors.New("invalid tag marker") -) - -// Tag describes one tag read from the stream. -type Tag struct { - recordNumber uint8 - datasetNumber uint8 - dataSize uint64 -} - -// String expresses state as a string. -func (tag *Tag) String() string { - return fmt.Sprintf( - "Tag", - tag.recordNumber, tag.datasetNumber, tag.dataSize) -} - -// DecodeTag parses one tag from the stream. -func DecodeTag(r io.Reader) (tag Tag, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - tagMarker := uint8(0) - err = binary.Read(r, defaultEncoding, &tagMarker) - if err != nil { - if err == io.EOF { - return tag, err - } - - log.Panic(err) - } - - if tagMarker != 0x1c { - return tag, ErrInvalidTagMarker - } - - recordNumber := uint8(0) - err = binary.Read(r, defaultEncoding, &recordNumber) - log.PanicIf(err) - - datasetNumber := uint8(0) - err = binary.Read(r, defaultEncoding, &datasetNumber) - log.PanicIf(err) - - dataSize16Raw := uint16(0) - err = binary.Read(r, defaultEncoding, &dataSize16Raw) - log.PanicIf(err) - - var dataSize uint64 - - if dataSize16Raw < 32768 { - // We only had 16-bits (has the MSB set to (0)). - dataSize = uint64(dataSize16Raw) - } else { - // This field is just the length of the length (has the MSB set to (1)). - - // Clear the MSB. - lengthLength := dataSize16Raw & 32767 - - if lengthLength == 4 { - dataSize32Raw := uint32(0) - err := binary.Read(r, defaultEncoding, &dataSize32Raw) - log.PanicIf(err) - - dataSize = uint64(dataSize32Raw) - } else if lengthLength == 8 { - err := binary.Read(r, defaultEncoding, &dataSize) - log.PanicIf(err) - } else { - // No specific sizes or limits are specified in the specification - // so we need to impose our own limits in order to implement. - - log.Panicf("extended data-set tag size is not supported: (%d)", lengthLength) - } - } - - tag = Tag{ - recordNumber: recordNumber, - datasetNumber: datasetNumber, - dataSize: dataSize, - } - - return tag, nil -} - -// StreamTagKey is a convenience type that lets us key our index with a high- -// level type. -type StreamTagKey struct { - // RecordNumber is the major classification of the dataset. - RecordNumber uint8 - - // DatasetNumber is the minor classification of the dataset. - DatasetNumber uint8 -} - -// String returns a descriptive string. -func (stk StreamTagKey) String() string { - return fmt.Sprintf("%d:%d", stk.RecordNumber, stk.DatasetNumber) -} - -// TagData is a convenience wrapper around a byte-slice. -type TagData []byte - -// IsPrintable returns true if all characters are printable. -func (tg TagData) IsPrintable() bool { - for _, b := range tg { - r := rune(b) - - // Newline characters aren't considered printable. - if r == 0x0d || r == 0x0a { - continue - } - - if unicode.IsGraphic(r) == false || unicode.IsPrint(r) == false { - return false - } - } - - return true -} - -// String returns a descriptive string. If the data doesn't include any non- -// printable characters, it will include the value itself. -func (tg TagData) String() string { - if tg.IsPrintable() == true { - return string(tg) - } - - return fmt.Sprintf("BINARY<(%d) bytes>", len(tg)) -} - -// ParsedTags is the complete, unordered set of tags parsed from the stream. -type ParsedTags map[StreamTagKey][]TagData - -// ParseStream parses a serial sequence of tags and tag data out of the stream. -func ParseStream(r io.Reader) (tags map[StreamTagKey][]TagData, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - tags = make(ParsedTags) - - for { - tag, err := DecodeTag(r) - if err != nil { - if err == io.EOF { - break - } - - log.Panic(err) - } - - raw := make([]byte, tag.dataSize) - - _, err = io.ReadFull(r, raw) - log.PanicIf(err) - - data := TagData(raw) - - stk := StreamTagKey{ - RecordNumber: tag.recordNumber, - DatasetNumber: tag.datasetNumber, - } - - if existing, found := tags[stk]; found == true { - tags[stk] = append(existing, data) - } else { - tags[stk] = []TagData{data} - } - } - - return tags, nil -} - -// GetSimpleDictionaryFromParsedTags returns a dictionary of tag names to tag -// values, where all values are strings and any tag that had a non-printable -// value is omitted. We will also only return the first value, therefore -// dropping any follow-up values for repeatable tags. This will ignore non- -// standard tags. This will trim whitespace from the ends of strings. -// -// This is a convenience function for quickly displaying only the summary IPTC -// metadata that a user might actually be interested in at first glance. -func GetSimpleDictionaryFromParsedTags(pt ParsedTags) (distilled map[string]string) { - distilled = make(map[string]string) - - for stk, dataSlice := range pt { - sti, err := GetTagInfo(int(stk.RecordNumber), int(stk.DatasetNumber)) - if err != nil { - if err == ErrTagNotStandard { - continue - } else { - log.Panic(err) - } - } - - data := dataSlice[0] - - if data.IsPrintable() == false { - continue - } - - // TODO(dustin): Trim leading whitespace, too. - distilled[sti.Description] = strings.Trim(string(data), "\r\n") - } - - return distilled -} - -// GetDictionaryFromParsedTags returns all tags. It will keep non-printable -// values, though will not print a placeholder instead. This will keep non- -// standard tags (and print the fully-qualified dataset ID rather than the -// name). It will keep repeated values (with the counter value appended to the -// end). -func GetDictionaryFromParsedTags(pt ParsedTags) (distilled map[string]string) { - distilled = make(map[string]string) - for stk, dataSlice := range pt { - var keyPhrase string - - sti, err := GetTagInfo(int(stk.RecordNumber), int(stk.DatasetNumber)) - if err != nil { - if err == ErrTagNotStandard { - keyPhrase = fmt.Sprintf("%s (not a standard tag)", stk.String()) - } else { - log.Panic(err) - } - } else { - keyPhrase = sti.Description - } - - for i, data := range dataSlice { - currentKeyPhrase := keyPhrase - if len(dataSlice) > 1 { - currentKeyPhrase = fmt.Sprintf("%s (%d)", currentKeyPhrase, i+1) - } - - var presentable string - if data.IsPrintable() == false { - presentable = fmt.Sprintf("[BINARY] %s", DumpBytesToString(data)) - } else { - presentable = string(data) - } - - distilled[currentKeyPhrase] = presentable - } - } - - return distilled -} diff --git a/vendor/github.com/dsoprea/go-iptc/testing_common.go b/vendor/github.com/dsoprea/go-iptc/testing_common.go deleted file mode 100644 index b54b9b8f3..000000000 --- a/vendor/github.com/dsoprea/go-iptc/testing_common.go +++ /dev/null @@ -1,73 +0,0 @@ -package iptc - -import ( - "os" - "path" - - "github.com/dsoprea/go-logging" -) - -var ( - testDataRelFilepath = "iptc.data" -) - -var ( - moduleRootPath = "" - assetsPath = "" -) - -// GetModuleRootPath returns the root-path of the module. -func GetModuleRootPath() string { - if moduleRootPath == "" { - moduleRootPath = os.Getenv("IPTC_MODULE_ROOT_PATH") - if moduleRootPath != "" { - return moduleRootPath - } - - currentWd, err := os.Getwd() - log.PanicIf(err) - - currentPath := currentWd - visited := make([]string, 0) - - for { - tryStampFilepath := path.Join(currentPath, ".MODULE_ROOT") - - _, err := os.Stat(tryStampFilepath) - if err != nil && os.IsNotExist(err) != true { - log.Panic(err) - } else if err == nil { - break - } - - visited = append(visited, tryStampFilepath) - - currentPath = path.Dir(currentPath) - if currentPath == "/" { - log.Panicf("could not find module-root: %v", visited) - } - } - - moduleRootPath = currentPath - } - - return moduleRootPath -} - -// GetTestAssetsPath returns the path of the test-assets. -func GetTestAssetsPath() string { - if assetsPath == "" { - moduleRootPath := GetModuleRootPath() - assetsPath = path.Join(moduleRootPath, "assets") - } - - return assetsPath -} - -// GetTestDataFilepath returns the file-path of the common test-data. -func GetTestDataFilepath() string { - assetsPath := GetTestAssetsPath() - filepath := path.Join(assetsPath, testDataRelFilepath) - - return filepath -} diff --git a/vendor/github.com/dsoprea/go-iptc/utility.go b/vendor/github.com/dsoprea/go-iptc/utility.go deleted file mode 100644 index 5a4a10ad3..000000000 --- a/vendor/github.com/dsoprea/go-iptc/utility.go +++ /dev/null @@ -1,25 +0,0 @@ -package iptc - -import ( - "bytes" - "fmt" - - "github.com/dsoprea/go-logging" -) - -// DumpBytesToString returns a stringified list of hex-encoded bytes. -func DumpBytesToString(data []byte) string { - b := new(bytes.Buffer) - - for i, x := range data { - _, err := b.WriteString(fmt.Sprintf("%02x", x)) - log.PanicIf(err) - - if i < len(data)-1 { - _, err := b.WriteRune(' ') - log.PanicIf(err) - } - } - - return b.String() -} diff --git a/vendor/github.com/dsoprea/go-logging/.travis.yml b/vendor/github.com/dsoprea/go-logging/.travis.yml deleted file mode 100644 index e37da4ba8..000000000 --- a/vendor/github.com/dsoprea/go-logging/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -language: go -go: - - tip -install: - - go get -t ./... - - go get github.com/mattn/goveralls -script: -# v1 - - go test -v . -# v2 - - cd v2 - - goveralls -v -service=travis-ci diff --git a/vendor/github.com/dsoprea/go-logging/LICENSE b/vendor/github.com/dsoprea/go-logging/LICENSE deleted file mode 100644 index 163291ed6..000000000 --- a/vendor/github.com/dsoprea/go-logging/LICENSE +++ /dev/null @@ -1,9 +0,0 @@ -MIT LICENSE - -Copyright 2020 Dustin Oprea - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/dsoprea/go-logging/README.md b/vendor/github.com/dsoprea/go-logging/README.md deleted file mode 100644 index 820cd9dc0..000000000 --- a/vendor/github.com/dsoprea/go-logging/README.md +++ /dev/null @@ -1,223 +0,0 @@ -[![Build Status](https://travis-ci.org/dsoprea/go-logging.svg?branch=master)](https://travis-ci.org/dsoprea/go-logging) -[![Coverage Status](https://coveralls.io/repos/github/dsoprea/go-logging/badge.svg?branch=master)](https://coveralls.io/github/dsoprea/go-logging?branch=master) -[![Go Report Card](https://goreportcard.com/badge/github.com/dsoprea/go-logging/v2)](https://goreportcard.com/report/github.com/dsoprea/go-logging/v2) -[![GoDoc](https://godoc.org/github.com/dsoprea/go-logging/v2?status.svg)](https://godoc.org/github.com/dsoprea/go-logging/v2) - -## Introduction - -This project bridges several gaps that are present in the standard logging support in Go: - -- Equips errors with stacktraces and provides a facility for printing them -- Inherently supports the ability for each Go file to print its messages with a prefix representing that file/package -- Adds some functions to specifically log messages of different levels (e.g. debug, error) -- Adds a `PanicIf()` function that can be used to conditionally manage errors depending on whether an error variable is `nil` or actually has an error -- Adds support for pluggable logging adapters (so the output can be sent somewhere other than the console) -- Adds configuration (such as the logging level or adapter) that can be driven from the environment -- Supports filtering to show/hide the logging of certain places of the application -- The loggers can be definded at the package level, so you can determine which Go file any log message came from. - -When used with the Panic-Defer-Recover pattern in Go, even panics rising from the Go runtime will be caught and wrapped with a stacktrace. This compartmentalizes which function they could have originated from, which is, otherwise, potentially non-trivial to figure out. - -## AppEngine - -Go under AppEngine is very stripped down, such as there being no logging type (e.g. `Logger` in native Go) and there is no support for prefixing. As each logging call from this project takes a `Context`, this works cooperatively to bridge the additional gaps in AppEngine's logging support. - -With standard console logging outside of this context, that parameter will take a`nil`. - - -## Getting Started - -The simplest, possible example: - -```go -package thispackage - -import ( - "context" - "errors" - - "github.com/dsoprea/go-logging/v2" -) - -var ( - thisfileLog = log.NewLogger("thispackage.thisfile") -) - -func a_cry_for_help(ctx context.Context) { - err := errors.New("a big error") - thisfileLog.Errorf(ctx, err, "How big is my problem: %s", "pretty big") -} - -func init() { - cla := log.NewConsoleLogAdapter() - log.AddAdapter("console", cla) -} -``` - -Notice two things: - -1. We register the "console" adapter at the bottom. The first adapter registered will be used by default. -2. We pass-in a prefix (what we refer to as a "noun") to `log.NewLogger()`. This is a simple, descriptive name that represents the subject of the file. By convention, we construct this by dot-separating the current package and the name of the file. We recommend that you define a different log for every file at the package level, but it is your choice whether you want to do this or share the same logger over the entire package, define one in each struct, etc.. - - -### Example Output - -Example output from a real application (not from the above): - -``` -2016/09/09 12:57:44 DEBUG: user: User revisiting: [test@example.com] -2016/09/09 12:57:44 DEBUG: context: Session already inited: [DCRBDGRY6RMWANCSJXVLD7GULDH4NZEB6SBAQ3KSFIGA2LP45IIQ] -2016/09/09 12:57:44 DEBUG: session_data: Session save not necessary: [DCRBDGRY6RMWANCSJXVLD7GULDH4NZEB6SBAQ3KSFIGA2LP45IIQ] -2016/09/09 12:57:44 DEBUG: context: Got session: [DCRBDGRY6RMWANCSJXVLD7GULDH4NZEB6SBAQ3KSFIGA2LP45IIQ] -2016/09/09 12:57:44 DEBUG: session_data: Found user in session. -2016/09/09 12:57:44 DEBUG: cache: Cache miss: [geo.geocode.reverse:dhxp15x] -``` - - -## Adapters - -This project provides one built-in logging adapter, "console", which prints to the screen. To register it: - -```go -cla := log.NewConsoleLogAdapter() -log.AddAdapter("console", cla) -``` - -### Custom Adapters - -If you would like to implement your own logger, just create a struct type that satisfies the LogAdapter interface. - -```go -type LogAdapter interface { - Debugf(lc *LogContext, message *string) error - Infof(lc *LogContext, message *string) error - Warningf(lc *LogContext, message *string) error - Errorf(lc *LogContext, message *string) error -} -``` - -The *LogContext* struct passed in provides additional information that you may need in order to do what you need to do: - -```go -type LogContext struct { - Logger *Logger - Ctx context.Context -} -``` - -`Logger` represents your Logger instance. - -Adapter example: - -```go -type DummyLogAdapter struct { - -} - -func (dla *DummyLogAdapter) Debugf(lc *LogContext, message *string) error { - -} - -func (dla *DummyLogAdapter) Infof(lc *LogContext, message *string) error { - -} - -func (dla *DummyLogAdapter) Warningf(lc *LogContext, message *string) error { - -} - -func (dla *DummyLogAdapter) Errorf(lc *LogContext, message *string) error { - -} -``` - -Then, register it: - -```go -func init() { - log.AddAdapter("dummy", new(DummyLogAdapter)) -} -``` - -If this is a task-specific implementation, just register it from the `init()` of the file that defines it. - -If this is the first adapter you've registered, it will be the default one used. Otherwise, you'll have to deliberately specify it when you are creating a logger: Instead of calling `log.NewLogger(noun string)`, call `log.NewLoggerWithAdapterName(noun string, adapterName string)`. - -We discuss how to configure the adapter from configuration in the "Configuration" section below. - - -### Adapter Notes - -- The `Logger` instance exports `Noun()` in the event you want to discriminate where your log entries go in your adapter. It also exports `Adapter()` for if you need to access the adapter instance from your application. -- If no adapter is registered (specifically, the default adapter-name remains empty), logging calls will be a no-op. This allows libraries to implement *go-logging* where the larger application doesn't. - - -## Filters - -We support the ability to exclusively log for a specific set of nouns (we'll exclude any not specified): - -```go -log.AddIncludeFilter("nountoshow1") -log.AddIncludeFilter("nountoshow2") -``` - -Depending on your needs, you might just want to exclude a couple and include the rest: - -```go -log.AddExcludeFilter("nountohide1") -log.AddExcludeFilter("nountohide2") -``` - -We'll first hit the include-filters. If it's in there, we'll forward the log item to the adapter. If not, and there is at least one include filter in the list, we won't do anything. If the list of include filters is empty but the noun appears in the exclude list, we won't do anything. - -It is a good convention to exclude the nouns of any library you are writing whose logging you do not want to generally be aware of unless you are debugging. You might call `AddExcludeFilter()` from the `init()` function at the bottom of those files unless there is some configuration variable, such as "(LibraryNameHere)DoShowLogging", that has been defined and set to TRUE. - - -## Configuration - -The following configuration items are available: - -- *Format*: The default format used to build the message that gets sent to the adapter. It is assumed that the adapter already prefixes the message with time and log-level (since the default AppEngine logger does). The default value is: `{{.Noun}}: [{{.Level}}] {{if eq .ExcludeBypass true}} [BYPASS]{{end}} {{.Message}}`. The available tokens are "Level", "Noun", "ExcludeBypass", and "Message". -- *DefaultAdapterName*: The default name of the adapter to use when NewLogger() is called (if this isn't defined then the name of the first registered adapter will be used). -- *LevelName*: The priority-level of messages permitted to be logged (all others will be discarded). By default, it is "info". Other levels are: "debug", "warning", "error", "critical" -- *IncludeNouns*: Comma-separated list of nouns to log for. All others will be ignored. -- *ExcludeNouns*: Comma-separated list on nouns to exclude from logging. -- *ExcludeBypassLevelName*: The log-level at which we will show logging for nouns that have been excluded. Allows you to hide excessive, unimportant logging for nouns but to still see their warnings, errors, etc... - - -### Configuration Providers - -You provide the configuration by setting a configuration-provider. Configuration providers must satisfy the `ConfigurationProvider` interface. The following are provided with the project: - -- `EnvironmentConfigurationProvider`: Read values from the environment. -- `StaticConfigurationProvider`: Set values directly on the struct. - -**The configuration provider must be applied before doing any logging (otherwise it will have no effect).** - -Environments such as AppEngine work best with `EnvironmentConfigurationProvider` as this is generally how configuration is exposed *by* AppEngine *to* the application. You can define this configuration directly in *that* configuration. - -By default, no configuration-provider is applied, the level is defaulted to INFO and the format is defaulted to "{{.Noun}}:{{if eq .ExcludeBypass true}} [BYPASS]{{end}} {{.Message}}". - -Again, if a configuration-provider does not provide a log-level or format, they will be defaulted (or left alone, if already set). If it does not provide an adapter-name, the adapter-name of the first registered adapter will be used. - -Usage instructions of both follow. - - -### Environment-Based Configuration - -```go -ecp := log.NewEnvironmentConfigurationProvider() -log.LoadConfiguration(ecp) -``` - -Each of the items listed at the top of the "Configuration" section can be specified in the environment using a prefix of "Log" (e.g. LogDefaultAdapterName). - - -### Static Configuration - -```go -scp := log.NewStaticConfigurationProvider() -scp.SetLevelName(log.LevelNameWarning) - -log.LoadConfiguration(scp) -``` diff --git a/vendor/github.com/dsoprea/go-logging/config.go b/vendor/github.com/dsoprea/go-logging/config.go deleted file mode 100644 index 20896e342..000000000 --- a/vendor/github.com/dsoprea/go-logging/config.go +++ /dev/null @@ -1,246 +0,0 @@ -package log - -import ( - "fmt" - "os" -) - -// Config keys. -const ( - ckFormat = "LogFormat" - ckDefaultAdapterName = "LogDefaultAdapterName" - ckLevelName = "LogLevelName" - ckIncludeNouns = "LogIncludeNouns" - ckExcludeNouns = "LogExcludeNouns" - ckExcludeBypassLevelName = "LogExcludeBypassLevelName" -) - -// Other constants -const ( - defaultFormat = "{{.Noun}}: [{{.Level}}] {{if eq .ExcludeBypass true}} [BYPASS]{{end}} {{.Message}}" - defaultLevelName = LevelNameInfo -) - -// Config -var ( - // Alternative format. - format = defaultFormat - - // Alternative adapter. - defaultAdapterName = "" - - // Alternative level at which to display log-items - levelName = defaultLevelName - - // Configuration-driven comma-separated list of nouns to include. - includeNouns = "" - - // Configuration-driven comma-separated list of nouns to exclude. - excludeNouns = "" - - // Level at which to disregard exclusion (if the severity of a message - // meets or exceed this, always display). - excludeBypassLevelName = "" -) - -// Other -var ( - configurationLoaded = false -) - -// Return the current default adapter name. -func GetDefaultAdapterName() string { - return defaultAdapterName -} - -// The adapter will automatically be the first one registered. This overrides -// that. -func SetDefaultAdapterName(name string) { - defaultAdapterName = name -} - -func LoadConfiguration(cp ConfigurationProvider) { - configuredDefaultAdapterName := cp.DefaultAdapterName() - - if configuredDefaultAdapterName != "" { - defaultAdapterName = configuredDefaultAdapterName - } - - includeNouns = cp.IncludeNouns() - excludeNouns = cp.ExcludeNouns() - excludeBypassLevelName = cp.ExcludeBypassLevelName() - - f := cp.Format() - if f != "" { - format = f - } - - ln := cp.LevelName() - if ln != "" { - levelName = ln - } - - configurationLoaded = true -} - -func getConfigState() map[string]interface{} { - return map[string]interface{}{ - "format": format, - "defaultAdapterName": defaultAdapterName, - "levelName": levelName, - "includeNouns": includeNouns, - "excludeNouns": excludeNouns, - "excludeBypassLevelName": excludeBypassLevelName, - } -} - -func setConfigState(config map[string]interface{}) { - format = config["format"].(string) - - defaultAdapterName = config["defaultAdapterName"].(string) - levelName = config["levelName"].(string) - includeNouns = config["includeNouns"].(string) - excludeNouns = config["excludeNouns"].(string) - excludeBypassLevelName = config["excludeBypassLevelName"].(string) -} - -func getConfigDump() string { - return fmt.Sprintf( - "Current configuration:\n"+ - " FORMAT=[%s]\n"+ - " DEFAULT-ADAPTER-NAME=[%s]\n"+ - " LEVEL-NAME=[%s]\n"+ - " INCLUDE-NOUNS=[%s]\n"+ - " EXCLUDE-NOUNS=[%s]\n"+ - " EXCLUDE-BYPASS-LEVEL-NAME=[%s]", - format, defaultAdapterName, levelName, includeNouns, excludeNouns, excludeBypassLevelName) -} - -func IsConfigurationLoaded() bool { - return configurationLoaded -} - -type ConfigurationProvider interface { - // Alternative format (defaults to . - Format() string - - // Alternative adapter (defaults to "appengine"). - DefaultAdapterName() string - - // Alternative level at which to display log-items (defaults to - // "info"). - LevelName() string - - // Configuration-driven comma-separated list of nouns to include. Defaults - // to empty. - IncludeNouns() string - - // Configuration-driven comma-separated list of nouns to exclude. Defaults - // to empty. - ExcludeNouns() string - - // Level at which to disregard exclusion (if the severity of a message - // meets or exceed this, always display). Defaults to empty. - ExcludeBypassLevelName() string -} - -// Environment configuration-provider. -type EnvironmentConfigurationProvider struct { -} - -func NewEnvironmentConfigurationProvider() *EnvironmentConfigurationProvider { - return new(EnvironmentConfigurationProvider) -} - -func (ecp *EnvironmentConfigurationProvider) Format() string { - return os.Getenv(ckFormat) -} - -func (ecp *EnvironmentConfigurationProvider) DefaultAdapterName() string { - return os.Getenv(ckDefaultAdapterName) -} - -func (ecp *EnvironmentConfigurationProvider) LevelName() string { - return os.Getenv(ckLevelName) -} - -func (ecp *EnvironmentConfigurationProvider) IncludeNouns() string { - return os.Getenv(ckIncludeNouns) -} - -func (ecp *EnvironmentConfigurationProvider) ExcludeNouns() string { - return os.Getenv(ckExcludeNouns) -} - -func (ecp *EnvironmentConfigurationProvider) ExcludeBypassLevelName() string { - return os.Getenv(ckExcludeBypassLevelName) -} - -// Static configuration-provider. -type StaticConfigurationProvider struct { - format string - defaultAdapterName string - levelName string - includeNouns string - excludeNouns string - excludeBypassLevelName string -} - -func NewStaticConfigurationProvider() *StaticConfigurationProvider { - return new(StaticConfigurationProvider) -} - -func (scp *StaticConfigurationProvider) SetFormat(format string) { - scp.format = format -} - -func (scp *StaticConfigurationProvider) SetDefaultAdapterName(adapterName string) { - scp.defaultAdapterName = adapterName -} - -func (scp *StaticConfigurationProvider) SetLevelName(levelName string) { - scp.levelName = levelName -} - -func (scp *StaticConfigurationProvider) SetIncludeNouns(includeNouns string) { - scp.includeNouns = includeNouns -} - -func (scp *StaticConfigurationProvider) SetExcludeNouns(excludeNouns string) { - scp.excludeNouns = excludeNouns -} - -func (scp *StaticConfigurationProvider) SetExcludeBypassLevelName(excludeBypassLevelName string) { - scp.excludeBypassLevelName = excludeBypassLevelName -} - -func (scp *StaticConfigurationProvider) Format() string { - return scp.format -} - -func (scp *StaticConfigurationProvider) DefaultAdapterName() string { - return scp.defaultAdapterName -} - -func (scp *StaticConfigurationProvider) LevelName() string { - return scp.levelName -} - -func (scp *StaticConfigurationProvider) IncludeNouns() string { - return scp.includeNouns -} - -func (scp *StaticConfigurationProvider) ExcludeNouns() string { - return scp.excludeNouns -} - -func (scp *StaticConfigurationProvider) ExcludeBypassLevelName() string { - return scp.excludeBypassLevelName -} - -func init() { - // Do the initial configuration-load from the environment. We gotta seed it - // with something for simplicity's sake. - ecp := NewEnvironmentConfigurationProvider() - LoadConfiguration(ecp) -} diff --git a/vendor/github.com/dsoprea/go-logging/console_adapter.go b/vendor/github.com/dsoprea/go-logging/console_adapter.go deleted file mode 100644 index c63a2911c..000000000 --- a/vendor/github.com/dsoprea/go-logging/console_adapter.go +++ /dev/null @@ -1,36 +0,0 @@ -package log - -import ( - golog "log" -) - -type ConsoleLogAdapter struct { -} - -func NewConsoleLogAdapter() LogAdapter { - return new(ConsoleLogAdapter) -} - -func (cla *ConsoleLogAdapter) Debugf(lc *LogContext, message *string) error { - golog.Println(*message) - - return nil -} - -func (cla *ConsoleLogAdapter) Infof(lc *LogContext, message *string) error { - golog.Println(*message) - - return nil -} - -func (cla *ConsoleLogAdapter) Warningf(lc *LogContext, message *string) error { - golog.Println(*message) - - return nil -} - -func (cla *ConsoleLogAdapter) Errorf(lc *LogContext, message *string) error { - golog.Println(*message) - - return nil -} diff --git a/vendor/github.com/dsoprea/go-logging/log.go b/vendor/github.com/dsoprea/go-logging/log.go deleted file mode 100644 index 84117a92e..000000000 --- a/vendor/github.com/dsoprea/go-logging/log.go +++ /dev/null @@ -1,537 +0,0 @@ -package log - -import ( - "bytes" - e "errors" - "fmt" - "strings" - "sync" - - "text/template" - - "github.com/go-errors/errors" - "golang.org/x/net/context" -) - -// TODO(dustin): Finish symbol documentation - -// Config severity integers. -const ( - LevelDebug = iota - LevelInfo = iota - LevelWarning = iota - LevelError = iota -) - -// Config severity names. -const ( - LevelNameDebug = "debug" - LevelNameInfo = "info" - LevelNameWarning = "warning" - LevelNameError = "error" -) - -// Seveirty name->integer map. -var ( - LevelNameMap = map[string]int{ - LevelNameDebug: LevelDebug, - LevelNameInfo: LevelInfo, - LevelNameWarning: LevelWarning, - LevelNameError: LevelError, - } - - LevelNameMapR = map[int]string{ - LevelDebug: LevelNameDebug, - LevelInfo: LevelNameInfo, - LevelWarning: LevelNameWarning, - LevelError: LevelNameError, - } -) - -// Errors -var ( - ErrAdapterAlreadyRegistered = e.New("adapter already registered") - ErrFormatEmpty = e.New("format is empty") - ErrExcludeLevelNameInvalid = e.New("exclude bypass-level is invalid") - ErrNoAdapterConfigured = e.New("no default adapter configured") - ErrAdapterIsNil = e.New("adapter is nil") - ErrConfigurationNotLoaded = e.New("can not configure because configuration is not loaded") -) - -// Other -var ( - includeFilters = make(map[string]bool) - useIncludeFilters = false - excludeFilters = make(map[string]bool) - useExcludeFilters = false - - adapters = make(map[string]LogAdapter) - - // TODO(dustin): !! Finish implementing this. - excludeBypassLevel = -1 -) - -// Add global include filter. -func AddIncludeFilter(noun string) { - includeFilters[noun] = true - useIncludeFilters = true -} - -// Remove global include filter. -func RemoveIncludeFilter(noun string) { - delete(includeFilters, noun) - if len(includeFilters) == 0 { - useIncludeFilters = false - } -} - -// Add global exclude filter. -func AddExcludeFilter(noun string) { - excludeFilters[noun] = true - useExcludeFilters = true -} - -// Remove global exclude filter. -func RemoveExcludeFilter(noun string) { - delete(excludeFilters, noun) - if len(excludeFilters) == 0 { - useExcludeFilters = false - } -} - -func AddAdapter(name string, la LogAdapter) { - if _, found := adapters[name]; found == true { - Panic(ErrAdapterAlreadyRegistered) - } - - if la == nil { - Panic(ErrAdapterIsNil) - } - - adapters[name] = la - - if GetDefaultAdapterName() == "" { - SetDefaultAdapterName(name) - } -} - -func ClearAdapters() { - adapters = make(map[string]LogAdapter) - SetDefaultAdapterName("") -} - -type LogAdapter interface { - Debugf(lc *LogContext, message *string) error - Infof(lc *LogContext, message *string) error - Warningf(lc *LogContext, message *string) error - Errorf(lc *LogContext, message *string) error -} - -// TODO(dustin): !! Also populate whether we've bypassed an exception so that -// we can add a template macro to prefix an exclamation of -// some sort. -type MessageContext struct { - Level *string - Noun *string - Message *string - ExcludeBypass bool -} - -type LogContext struct { - Logger *Logger - Ctx context.Context -} - -type Logger struct { - isConfigured bool - an string - la LogAdapter - t *template.Template - systemLevel int - noun string -} - -func NewLoggerWithAdapterName(noun string, adapterName string) (l *Logger) { - l = &Logger{ - noun: noun, - an: adapterName, - } - - return l -} - -func NewLogger(noun string) (l *Logger) { - l = NewLoggerWithAdapterName(noun, "") - - return l -} - -func (l *Logger) Noun() string { - return l.noun -} - -func (l *Logger) Adapter() LogAdapter { - return l.la -} - -var ( - configureMutex sync.Mutex -) - -func (l *Logger) doConfigure(force bool) { - configureMutex.Lock() - defer configureMutex.Unlock() - - if l.isConfigured == true && force == false { - return - } - - if IsConfigurationLoaded() == false { - Panic(ErrConfigurationNotLoaded) - } - - if l.an == "" { - l.an = GetDefaultAdapterName() - } - - // If this is empty, then no specific adapter was given or no system - // default was configured (which implies that no adapters were registered). - // All of our logging will be skipped. - if l.an != "" { - la, found := adapters[l.an] - if found == false { - Panic(fmt.Errorf("adapter is not valid: %s", l.an)) - } - - l.la = la - } - - // Set the level. - - systemLevel, found := LevelNameMap[levelName] - if found == false { - Panic(fmt.Errorf("log-level not valid: [%s]", levelName)) - } - - l.systemLevel = systemLevel - - // Set the form. - - if format == "" { - Panic(ErrFormatEmpty) - } - - if t, err := template.New("logItem").Parse(format); err != nil { - Panic(err) - } else { - l.t = t - } - - l.isConfigured = true -} - -func (l *Logger) flattenMessage(lc *MessageContext, format *string, args []interface{}) (string, error) { - m := fmt.Sprintf(*format, args...) - - lc.Message = &m - - var b bytes.Buffer - if err := l.t.Execute(&b, *lc); err != nil { - return "", err - } - - return b.String(), nil -} - -func (l *Logger) allowMessage(noun string, level int) bool { - if _, found := includeFilters[noun]; found == true { - return true - } - - // If we didn't hit an include filter and we *had* include filters, filter - // it out. - if useIncludeFilters == true { - return false - } - - if _, found := excludeFilters[noun]; found == true { - return false - } - - return true -} - -func (l *Logger) makeLogContext(ctx context.Context) *LogContext { - return &LogContext{ - Ctx: ctx, - Logger: l, - } -} - -type LogMethod func(lc *LogContext, message *string) error - -func (l *Logger) log(ctx context.Context, level int, lm LogMethod, format string, args []interface{}) error { - if l.systemLevel > level { - return nil - } - - // Preempt the normal filter checks if we can unconditionally allow at a - // certain level and we've hit that level. - // - // Notice that this is only relevant if the system-log level is letting - // *anything* show logs at the level we came in with. - canExcludeBypass := level >= excludeBypassLevel && excludeBypassLevel != -1 - didExcludeBypass := false - - n := l.Noun() - - if l.allowMessage(n, level) == false { - if canExcludeBypass == false { - return nil - } else { - didExcludeBypass = true - } - } - - levelName, found := LevelNameMapR[level] - if found == false { - Panic(fmt.Errorf("level not valid: (%d)", level)) - } - - levelName = strings.ToUpper(levelName) - - lc := &MessageContext{ - Level: &levelName, - Noun: &n, - ExcludeBypass: didExcludeBypass, - } - - if s, err := l.flattenMessage(lc, &format, args); err != nil { - return err - } else { - lc := l.makeLogContext(ctx) - if err := lm(lc, &s); err != nil { - panic(err) - } - - return e.New(s) - } -} - -func (l *Logger) Debugf(ctx context.Context, format string, args ...interface{}) { - l.doConfigure(false) - - if l.la != nil { - l.log(ctx, LevelDebug, l.la.Debugf, format, args) - } -} - -func (l *Logger) Infof(ctx context.Context, format string, args ...interface{}) { - l.doConfigure(false) - - if l.la != nil { - l.log(ctx, LevelInfo, l.la.Infof, format, args) - } -} - -func (l *Logger) Warningf(ctx context.Context, format string, args ...interface{}) { - l.doConfigure(false) - - if l.la != nil { - l.log(ctx, LevelWarning, l.la.Warningf, format, args) - } -} - -func (l *Logger) mergeStack(err interface{}, format string, args []interface{}) (string, []interface{}) { - if format != "" { - format += "\n%s" - } else { - format = "%s" - } - - var stackified *errors.Error - stackified, ok := err.(*errors.Error) - if ok == false { - stackified = errors.Wrap(err, 2) - } - - args = append(args, stackified.ErrorStack()) - - return format, args -} - -func (l *Logger) Errorf(ctx context.Context, errRaw interface{}, format string, args ...interface{}) { - l.doConfigure(false) - - var err interface{} - - if errRaw != nil { - _, ok := errRaw.(*errors.Error) - if ok == true { - err = errRaw - } else { - err = errors.Wrap(errRaw, 1) - } - } - - if l.la != nil { - if errRaw != nil { - format, args = l.mergeStack(err, format, args) - } - - l.log(ctx, LevelError, l.la.Errorf, format, args) - } -} - -func (l *Logger) ErrorIff(ctx context.Context, errRaw interface{}, format string, args ...interface{}) { - if errRaw == nil { - return - } - - var err interface{} - - _, ok := errRaw.(*errors.Error) - if ok == true { - err = errRaw - } else { - err = errors.Wrap(errRaw, 1) - } - - l.Errorf(ctx, err, format, args...) -} - -func (l *Logger) Panicf(ctx context.Context, errRaw interface{}, format string, args ...interface{}) { - l.doConfigure(false) - - var err interface{} - - _, ok := errRaw.(*errors.Error) - if ok == true { - err = errRaw - } else { - err = errors.Wrap(errRaw, 1) - } - - if l.la != nil { - format, args = l.mergeStack(err, format, args) - err = l.log(ctx, LevelError, l.la.Errorf, format, args) - } - - Panic(err.(error)) -} - -func (l *Logger) PanicIff(ctx context.Context, errRaw interface{}, format string, args ...interface{}) { - if errRaw == nil { - return - } - - var err interface{} - - _, ok := errRaw.(*errors.Error) - if ok == true { - err = errRaw - } else { - err = errors.Wrap(errRaw, 1) - } - - l.Panicf(ctx, err.(error), format, args...) -} - -func Wrap(err interface{}) *errors.Error { - es, ok := err.(*errors.Error) - if ok == true { - return es - } else { - return errors.Wrap(err, 1) - } -} - -func Errorf(message string, args ...interface{}) *errors.Error { - err := fmt.Errorf(message, args...) - return errors.Wrap(err, 1) -} - -func Panic(err interface{}) { - _, ok := err.(*errors.Error) - if ok == true { - panic(err) - } else { - panic(errors.Wrap(err, 1)) - } -} - -func Panicf(message string, args ...interface{}) { - err := Errorf(message, args...) - Panic(err) -} - -func PanicIf(err interface{}) { - if err == nil { - return - } - - _, ok := err.(*errors.Error) - if ok == true { - panic(err) - } else { - panic(errors.Wrap(err, 1)) - } -} - -// Is checks if the left ("actual") error equals the right ("against") error. -// The right must be an unwrapped error (the kind that you'd initialize as a -// global variable). The left can be a wrapped or unwrapped error. -func Is(actual, against error) bool { - // If it's an unwrapped error. - if _, ok := actual.(*errors.Error); ok == false { - return actual == against - } - - return errors.Is(actual, against) -} - -// Print is a utility function to prevent the caller from having to import the -// third-party library. -func PrintError(err error) { - wrapped := Wrap(err) - fmt.Printf("Stack:\n\n%s\n", wrapped.ErrorStack()) -} - -// PrintErrorf is a utility function to prevent the caller from having to -// import the third-party library. -func PrintErrorf(err error, format string, args ...interface{}) { - wrapped := Wrap(err) - - fmt.Printf(format, args...) - fmt.Printf("\n") - fmt.Printf("Stack:\n\n%s\n", wrapped.ErrorStack()) -} - -func init() { - if format == "" { - format = defaultFormat - } - - if levelName == "" { - levelName = defaultLevelName - } - - if includeNouns != "" { - for _, noun := range strings.Split(includeNouns, ",") { - AddIncludeFilter(noun) - } - } - - if excludeNouns != "" { - for _, noun := range strings.Split(excludeNouns, ",") { - AddExcludeFilter(noun) - } - } - - if excludeBypassLevelName != "" { - var found bool - if excludeBypassLevel, found = LevelNameMap[excludeBypassLevelName]; found == false { - panic(ErrExcludeLevelNameInvalid) - } - } -} diff --git a/vendor/github.com/dsoprea/go-photoshop-info-format/.MODULE_ROOT b/vendor/github.com/dsoprea/go-photoshop-info-format/.MODULE_ROOT deleted file mode 100644 index e69de29bb..000000000 diff --git a/vendor/github.com/dsoprea/go-photoshop-info-format/.travis.yml b/vendor/github.com/dsoprea/go-photoshop-info-format/.travis.yml deleted file mode 100644 index 710e46b39..000000000 --- a/vendor/github.com/dsoprea/go-photoshop-info-format/.travis.yml +++ /dev/null @@ -1,14 +0,0 @@ -language: go -go: - - master - - stable - - "1.13" - - "1.12" -env: - - GO111MODULE=on -install: - - go get -t ./... - - go get github.com/mattn/goveralls -script: - - go test -v ./... - - goveralls -v -service=travis-ci diff --git a/vendor/github.com/dsoprea/go-photoshop-info-format/LICENSE b/vendor/github.com/dsoprea/go-photoshop-info-format/LICENSE deleted file mode 100644 index d92c04268..000000000 --- a/vendor/github.com/dsoprea/go-photoshop-info-format/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2020 Dustin Oprea - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/dsoprea/go-photoshop-info-format/README.md b/vendor/github.com/dsoprea/go-photoshop-info-format/README.md deleted file mode 100644 index abbfca67a..000000000 --- a/vendor/github.com/dsoprea/go-photoshop-info-format/README.md +++ /dev/null @@ -1,8 +0,0 @@ -[![Build Status](https://travis-ci.org/dsoprea/go-photoshop-info-format.svg?branch=master)](https://travis-ci.org/dsoprea/go-photoshop-info-format) -[![Coverage Status](https://coveralls.io/repos/github/dsoprea/go-photoshop-info-format/badge.svg?branch=master)](https://coveralls.io/github/dsoprea/go-photoshop-info-format?branch=master) -[![Go Report Card](https://goreportcard.com/badge/github.com/dsoprea/go-photoshop-info-format)](https://goreportcard.com/report/github.com/dsoprea/go-photoshop-info-format) -[![GoDoc](https://godoc.org/github.com/dsoprea/go-photoshop-info-format?status.svg)](https://godoc.org/github.com/dsoprea/go-photoshop-info-format) - -# Overview - -This is a minimal Photoshop format implementation to allow IPTC data to be extracted from a JPEG image. This project primarily services [go-jpeg-image-structure](https://github.com/dsoprea/go-jpeg-image-structure). diff --git a/vendor/github.com/dsoprea/go-photoshop-info-format/info.go b/vendor/github.com/dsoprea/go-photoshop-info-format/info.go deleted file mode 100644 index 7f17fa6c0..000000000 --- a/vendor/github.com/dsoprea/go-photoshop-info-format/info.go +++ /dev/null @@ -1,119 +0,0 @@ -package photoshopinfo - -import ( - "fmt" - "io" - - "encoding/binary" - - "github.com/dsoprea/go-logging" -) - -var ( - defaultByteOrder = binary.BigEndian -) - -// Photoshop30InfoRecord is the data for one parsed Photoshop-info record. -type Photoshop30InfoRecord struct { - // RecordType is the record-type. - RecordType string - - // ImageResourceId is the image resource-ID. - ImageResourceId uint16 - - // Name is the name of the record. It is optional and will be an empty- - // string if not present. - Name string - - // Data is the raw record data. - Data []byte -} - -// String returns a descriptive string. -func (pir Photoshop30InfoRecord) String() string { - return fmt.Sprintf("RECORD-TYPE=[%s] IMAGE-RESOURCE-ID=[0x%04x] NAME=[%s] DATA-SIZE=(%d)", pir.RecordType, pir.ImageResourceId, pir.Name, len(pir.Data)) -} - -// ReadPhotoshop30InfoRecord parses a single photoshop-info record. -func ReadPhotoshop30InfoRecord(r io.Reader) (pir Photoshop30InfoRecord, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - recordType := make([]byte, 4) - _, err = io.ReadFull(r, recordType) - if err != nil { - if err == io.EOF { - return pir, err - } - - log.Panic(err) - } - - // TODO(dustin): Move BigEndian to constant/config. - - irId := uint16(0) - err = binary.Read(r, defaultByteOrder, &irId) - log.PanicIf(err) - - nameSize := uint8(0) - err = binary.Read(r, defaultByteOrder, &nameSize) - log.PanicIf(err) - - // Add an extra byte if the two length+data size is odd to make the total - // bytes read even. - doAddPadding := (1+nameSize)%2 == 1 - if doAddPadding == true { - nameSize++ - } - - name := make([]byte, nameSize) - _, err = io.ReadFull(r, name) - log.PanicIf(err) - - // If the last byte is padding, truncate it. - if doAddPadding == true { - name = name[:nameSize-1] - } - - dataSize := uint32(0) - err = binary.Read(r, defaultByteOrder, &dataSize) - log.PanicIf(err) - - data := make([]byte, dataSize+dataSize%2) - _, err = io.ReadFull(r, data) - log.PanicIf(err) - - data = data[:dataSize] - - pir = Photoshop30InfoRecord{ - RecordType: string(recordType), - ImageResourceId: irId, - Name: string(name), - Data: data, - } - - return pir, nil -} - -// ReadPhotoshop30Info parses a sequence of photoship-info records from the stream. -func ReadPhotoshop30Info(r io.Reader) (pirIndex map[uint16]Photoshop30InfoRecord, err error) { - pirIndex = make(map[uint16]Photoshop30InfoRecord) - - for { - pir, err := ReadPhotoshop30InfoRecord(r) - if err != nil { - if err == io.EOF { - break - } - - log.Panic(err) - } - - pirIndex[pir.ImageResourceId] = pir - } - - return pirIndex, nil -} diff --git a/vendor/github.com/dsoprea/go-photoshop-info-format/testing_common.go b/vendor/github.com/dsoprea/go-photoshop-info-format/testing_common.go deleted file mode 100644 index 681b117ec..000000000 --- a/vendor/github.com/dsoprea/go-photoshop-info-format/testing_common.go +++ /dev/null @@ -1,73 +0,0 @@ -package photoshopinfo - -import ( - "os" - "path" - - "github.com/dsoprea/go-logging" -) - -var ( - testDataRelFilepath = "photoshop.data" -) - -var ( - moduleRootPath = "" - assetsPath = "" -) - -// GetModuleRootPath returns the root-path of the module. -func GetModuleRootPath() string { - if moduleRootPath == "" { - moduleRootPath = os.Getenv("PHOTOSHOPINFO_MODULE_ROOT_PATH") - if moduleRootPath != "" { - return moduleRootPath - } - - currentWd, err := os.Getwd() - log.PanicIf(err) - - currentPath := currentWd - visited := make([]string, 0) - - for { - tryStampFilepath := path.Join(currentPath, ".MODULE_ROOT") - - _, err := os.Stat(tryStampFilepath) - if err != nil && os.IsNotExist(err) != true { - log.Panic(err) - } else if err == nil { - break - } - - visited = append(visited, tryStampFilepath) - - currentPath = path.Dir(currentPath) - if currentPath == "/" { - log.Panicf("could not find module-root: %v", visited) - } - } - - moduleRootPath = currentPath - } - - return moduleRootPath -} - -// GetTestAssetsPath returns the path of the test-assets. -func GetTestAssetsPath() string { - if assetsPath == "" { - moduleRootPath := GetModuleRootPath() - assetsPath = path.Join(moduleRootPath, "assets") - } - - return assetsPath -} - -// GetTestDataFilepath returns the file-path of the common test-data. -func GetTestDataFilepath() string { - assetsPath := GetTestAssetsPath() - filepath := path.Join(assetsPath, testDataRelFilepath) - - return filepath -} diff --git a/vendor/github.com/dsoprea/go-utility/v2/LICENSE b/vendor/github.com/dsoprea/go-utility/v2/LICENSE deleted file mode 100644 index 8941063e1..000000000 --- a/vendor/github.com/dsoprea/go-utility/v2/LICENSE +++ /dev/null @@ -1,7 +0,0 @@ -Copyright 2019 Random Ingenuity InformationWorks - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/dsoprea/go-utility/v2/filesystem/README.md b/vendor/github.com/dsoprea/go-utility/v2/filesystem/README.md deleted file mode 100644 index eb03fea7c..000000000 --- a/vendor/github.com/dsoprea/go-utility/v2/filesystem/README.md +++ /dev/null @@ -1,64 +0,0 @@ -[![GoDoc](https://godoc.org/github.com/dsoprea/go-utility/filesystem?status.svg)](https://godoc.org/github.com/dsoprea/go-utility/filesystem) -[![Build Status](https://travis-ci.org/dsoprea/go-utility.svg?branch=master)](https://travis-ci.org/dsoprea/go-utility) -[![Coverage Status](https://coveralls.io/repos/github/dsoprea/go-utility/badge.svg?branch=master)](https://coveralls.io/github/dsoprea/go-utility?branch=master) -[![Go Report Card](https://goreportcard.com/badge/github.com/dsoprea/go-utility)](https://goreportcard.com/report/github.com/dsoprea/go-utility) - -# bounceback - -An `io.ReadSeeker` and `io.WriteSeeker` that returns to the right place before -reading or writing. Useful when the same file resource is being reused for reads -or writes throughout that file. - -# list_files - -A recursive path walker that supports filters. - -# seekable_buffer - -A memory structure that satisfies `io.ReadWriteSeeker`. - -# copy_bytes_between_positions - -Given an `io.ReadWriteSeeker`, copy N bytes from one position to an earlier -position. - -# read_counter, write_counter - -Wrap `io.Reader` and `io.Writer` structs in order to report how many bytes were -transferred. - -# readseekwritecloser - -Provides the ReadWriteSeekCloser interface that combines a RWS and a Closer. -Also provides a no-op wrapper to augment a plain RWS with a closer. - -# boundedreadwriteseek - -Wraps a ReadWriteSeeker such that no seeks can be at an offset less than a -specific-offset. - -# calculateseek - -Provides a reusable function with which to calculate seek offsets. - -# progress_wrapper - -Provides `io.Reader` and `io.Writer` wrappers that also trigger callbacks after -each call. The reader wrapper also invokes the callback upon EOF. - -# does_exist - -Check whether a file/directory exists using a file-path. - -# graceful_copy - -Do a copy but correctly handle short-writes and reads that might return a non- -zero read count *and* EOF. - -# readseeker_to_readerat - -A wrapper that allows an `io.ReadSeeker` to be used as a `io.ReaderAt`. - -# simplefileinfo - -An implementation of `os.FileInfo` to support testing. diff --git a/vendor/github.com/dsoprea/go-utility/v2/filesystem/bounceback.go b/vendor/github.com/dsoprea/go-utility/v2/filesystem/bounceback.go deleted file mode 100644 index 1112a10ef..000000000 --- a/vendor/github.com/dsoprea/go-utility/v2/filesystem/bounceback.go +++ /dev/null @@ -1,273 +0,0 @@ -package rifs - -import ( - "fmt" - "io" - - "github.com/dsoprea/go-logging" -) - -// BouncebackStats describes operation counts. -type BouncebackStats struct { - reads int - writes int - seeks int - syncs int -} - -func (bbs BouncebackStats) String() string { - return fmt.Sprintf( - "BouncebackStats", - bbs.reads, bbs.writes, bbs.seeks, bbs.syncs) -} - -type bouncebackBase struct { - currentPosition int64 - - stats BouncebackStats -} - -// Position returns the position that we're supposed to be at. -func (bb *bouncebackBase) Position() int64 { - - // TODO(dustin): Add test - - return bb.currentPosition -} - -// StatsReads returns the number of reads that have been attempted. -func (bb *bouncebackBase) StatsReads() int { - - // TODO(dustin): Add test - - return bb.stats.reads -} - -// StatsWrites returns the number of write operations. -func (bb *bouncebackBase) StatsWrites() int { - - // TODO(dustin): Add test - - return bb.stats.writes -} - -// StatsSeeks returns the number of seeks. -func (bb *bouncebackBase) StatsSeeks() int { - - // TODO(dustin): Add test - - return bb.stats.seeks -} - -// StatsSyncs returns the number of corrective seeks ("bounce-backs"). -func (bb *bouncebackBase) StatsSyncs() int { - - // TODO(dustin): Add test - - return bb.stats.syncs -} - -// Seek does a seek to an arbitrary place in the `io.ReadSeeker`. -func (bb *bouncebackBase) seek(s io.Seeker, offset int64, whence int) (newPosition int64, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - // If the seek is relative, make sure we're where we're supposed to be *first*. - if whence != io.SeekStart { - err = bb.checkPosition(s) - log.PanicIf(err) - } - - bb.stats.seeks++ - - newPosition, err = s.Seek(offset, whence) - log.PanicIf(err) - - // Update our internal tracking. - bb.currentPosition = newPosition - - return newPosition, nil -} - -func (bb *bouncebackBase) checkPosition(s io.Seeker) (err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - // Make sure we're where we're supposed to be. - - // This should have no overhead, and enables us to collect stats. - realCurrentPosition, err := s.Seek(0, io.SeekCurrent) - log.PanicIf(err) - - if realCurrentPosition != bb.currentPosition { - bb.stats.syncs++ - - _, err = s.Seek(bb.currentPosition, io.SeekStart) - log.PanicIf(err) - } - - return nil -} - -// BouncebackReader wraps a ReadSeeker, keeps track of our position, and -// seeks back to it before writing. This allows an underlying ReadWriteSeeker -// with an unstable position can still be used for a prolonged series of writes. -type BouncebackReader struct { - rs io.ReadSeeker - - bouncebackBase -} - -// NewBouncebackReader returns a `*BouncebackReader` struct. -func NewBouncebackReader(rs io.ReadSeeker) (br *BouncebackReader, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - initialPosition, err := rs.Seek(0, io.SeekCurrent) - log.PanicIf(err) - - bb := bouncebackBase{ - currentPosition: initialPosition, - } - - br = &BouncebackReader{ - rs: rs, - bouncebackBase: bb, - } - - return br, nil -} - -// Seek does a seek to an arbitrary place in the `io.ReadSeeker`. -func (br *BouncebackReader) Seek(offset int64, whence int) (newPosition int64, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - newPosition, err = br.bouncebackBase.seek(br.rs, offset, whence) - log.PanicIf(err) - - return newPosition, nil -} - -// Seek does a standard read. -func (br *BouncebackReader) Read(p []byte) (n int, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - br.bouncebackBase.stats.reads++ - - err = br.bouncebackBase.checkPosition(br.rs) - log.PanicIf(err) - - // Do read. - - n, err = br.rs.Read(p) - if err != nil { - if err == io.EOF { - return 0, io.EOF - } - - log.Panic(err) - } - - // Update our internal tracking. - br.bouncebackBase.currentPosition += int64(n) - - return n, nil -} - -// BouncebackWriter wraps a WriteSeeker, keeps track of our position, and -// seeks back to it before writing. This allows an underlying ReadWriteSeeker -// with an unstable position can still be used for a prolonged series of writes. -type BouncebackWriter struct { - ws io.WriteSeeker - - bouncebackBase -} - -// NewBouncebackWriter returns a new `BouncebackWriter` struct. -func NewBouncebackWriter(ws io.WriteSeeker) (bw *BouncebackWriter, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - initialPosition, err := ws.Seek(0, io.SeekCurrent) - log.PanicIf(err) - - bb := bouncebackBase{ - currentPosition: initialPosition, - } - - bw = &BouncebackWriter{ - ws: ws, - bouncebackBase: bb, - } - - return bw, nil -} - -// Seek puts us at a specific position in the internal writer for the next -// write/seek. -func (bw *BouncebackWriter) Seek(offset int64, whence int) (newPosition int64, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - newPosition, err = bw.bouncebackBase.seek(bw.ws, offset, whence) - log.PanicIf(err) - - return newPosition, nil -} - -// Write performs a write against the internal `WriteSeeker` starting at the -// position that we're supposed to be at. -func (bw *BouncebackWriter) Write(p []byte) (n int, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - bw.bouncebackBase.stats.writes++ - - // Make sure we're where we're supposed to be. - - realCurrentPosition, err := bw.ws.Seek(0, io.SeekCurrent) - log.PanicIf(err) - - if realCurrentPosition != bw.bouncebackBase.currentPosition { - bw.bouncebackBase.stats.seeks++ - - _, err = bw.ws.Seek(bw.bouncebackBase.currentPosition, io.SeekStart) - log.PanicIf(err) - } - - // Do write. - - n, err = bw.ws.Write(p) - log.PanicIf(err) - - // Update our internal tracking. - bw.bouncebackBase.currentPosition += int64(n) - - return n, nil -} diff --git a/vendor/github.com/dsoprea/go-utility/v2/filesystem/boundedreadwriteseekcloser.go b/vendor/github.com/dsoprea/go-utility/v2/filesystem/boundedreadwriteseekcloser.go deleted file mode 100644 index 3d2e840fa..000000000 --- a/vendor/github.com/dsoprea/go-utility/v2/filesystem/boundedreadwriteseekcloser.go +++ /dev/null @@ -1,95 +0,0 @@ -package rifs - -import ( - "io" - - "github.com/dsoprea/go-logging" -) - -// BoundedReadWriteSeekCloser wraps a RWS that is also a closer with boundaries. -// This proxies the RWS methods to the inner BRWS inside. -type BoundedReadWriteSeekCloser struct { - io.Closer - *BoundedReadWriteSeeker -} - -// NewBoundedReadWriteSeekCloser returns a new BoundedReadWriteSeekCloser. -func NewBoundedReadWriteSeekCloser(rwsc ReadWriteSeekCloser, minimumOffset int64, staticFileSize int64) (brwsc *BoundedReadWriteSeekCloser, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - bs, err := NewBoundedReadWriteSeeker(rwsc, minimumOffset, staticFileSize) - log.PanicIf(err) - - brwsc = &BoundedReadWriteSeekCloser{ - Closer: rwsc, - BoundedReadWriteSeeker: bs, - } - - return brwsc, nil -} - -// Seek forwards calls to the inner RWS. -func (rwsc *BoundedReadWriteSeekCloser) Seek(offset int64, whence int) (newOffset int64, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - newOffset, err = rwsc.BoundedReadWriteSeeker.Seek(offset, whence) - log.PanicIf(err) - - return newOffset, nil -} - -// Read forwards calls to the inner RWS. -func (rwsc *BoundedReadWriteSeekCloser) Read(buffer []byte) (readCount int, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - readCount, err = rwsc.BoundedReadWriteSeeker.Read(buffer) - if err != nil { - if err == io.EOF { - return 0, err - } - - log.Panic(err) - } - - return readCount, nil -} - -// Write forwards calls to the inner RWS. -func (rwsc *BoundedReadWriteSeekCloser) Write(buffer []byte) (writtenCount int, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - writtenCount, err = rwsc.BoundedReadWriteSeeker.Write(buffer) - log.PanicIf(err) - - return writtenCount, nil -} - -// Close forwards calls to the inner RWS. -func (rwsc *BoundedReadWriteSeekCloser) Close() (err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - err = rwsc.Closer.Close() - log.PanicIf(err) - - return nil -} diff --git a/vendor/github.com/dsoprea/go-utility/v2/filesystem/boundedreadwriteseeker.go b/vendor/github.com/dsoprea/go-utility/v2/filesystem/boundedreadwriteseeker.go deleted file mode 100644 index d29657b05..000000000 --- a/vendor/github.com/dsoprea/go-utility/v2/filesystem/boundedreadwriteseeker.go +++ /dev/null @@ -1,156 +0,0 @@ -package rifs - -import ( - "errors" - "io" - "os" - - "github.com/dsoprea/go-logging" -) - -var ( - // ErrSeekBeyondBound is returned when a seek is requested beyond the - // statically-given file-size. No writes or seeks beyond boundaries are - // supported with a statically-given file size. - ErrSeekBeyondBound = errors.New("seek beyond boundary") -) - -// BoundedReadWriteSeeker is a thin filter that ensures that no seeks can be done -// to offsets smaller than the one we were given. This supports libraries that -// might be expecting to read from the front of the stream being used on data -// that is in the middle of a stream instead. -type BoundedReadWriteSeeker struct { - io.ReadWriteSeeker - - currentOffset int64 - minimumOffset int64 - - staticFileSize int64 -} - -// NewBoundedReadWriteSeeker returns a new BoundedReadWriteSeeker instance. -func NewBoundedReadWriteSeeker(rws io.ReadWriteSeeker, minimumOffset int64, staticFileSize int64) (brws *BoundedReadWriteSeeker, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - if minimumOffset < 0 { - log.Panicf("BoundedReadWriteSeeker minimum offset must be zero or larger: (%d)", minimumOffset) - } - - // We'll always started at a relative offset of zero. - _, err = rws.Seek(minimumOffset, os.SEEK_SET) - log.PanicIf(err) - - brws = &BoundedReadWriteSeeker{ - ReadWriteSeeker: rws, - - currentOffset: 0, - minimumOffset: minimumOffset, - - staticFileSize: staticFileSize, - } - - return brws, nil -} - -// Seek moves the offset to the given offset. Prevents offset from ever being -// moved left of `brws.minimumOffset`. -func (brws *BoundedReadWriteSeeker) Seek(offset int64, whence int) (updatedOffset int64, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - fileSize := brws.staticFileSize - - // If we weren't given a static file-size, look it up whenever it is needed. - if whence == os.SEEK_END && fileSize == 0 { - realFileSizeRaw, err := brws.ReadWriteSeeker.Seek(0, os.SEEK_END) - log.PanicIf(err) - - fileSize = realFileSizeRaw - brws.minimumOffset - } - - updatedOffset, err = CalculateSeek(brws.currentOffset, offset, whence, fileSize) - log.PanicIf(err) - - if brws.staticFileSize != 0 && updatedOffset > brws.staticFileSize { - //updatedOffset = int64(brws.staticFileSize) - - // NOTE(dustin): Presumably, this will only be disruptive to writes that are beyond the boundaries, which, if we're being used at all, should already account for the boundary and prevent this error from ever happening. So, time will tell how disruptive this is. - return 0, ErrSeekBeyondBound - } - - if updatedOffset != brws.currentOffset { - updatedRealOffset := updatedOffset + brws.minimumOffset - - _, err = brws.ReadWriteSeeker.Seek(updatedRealOffset, os.SEEK_SET) - log.PanicIf(err) - - brws.currentOffset = updatedOffset - } - - return updatedOffset, nil -} - -// Read forwards writes to the inner RWS. -func (brws *BoundedReadWriteSeeker) Read(buffer []byte) (readCount int, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - if brws.staticFileSize != 0 { - availableCount := brws.staticFileSize - brws.currentOffset - if availableCount == 0 { - return 0, io.EOF - } - - if int64(len(buffer)) > availableCount { - buffer = buffer[:availableCount] - } - } - - readCount, err = brws.ReadWriteSeeker.Read(buffer) - brws.currentOffset += int64(readCount) - - if err != nil { - if err == io.EOF { - return 0, err - } - - log.Panic(err) - } - - return readCount, nil -} - -// Write forwards writes to the inner RWS. -func (brws *BoundedReadWriteSeeker) Write(buffer []byte) (writtenCount int, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - if brws.staticFileSize != 0 { - log.Panicf("writes can not be performed if a static file-size was given") - } - - writtenCount, err = brws.ReadWriteSeeker.Write(buffer) - brws.currentOffset += int64(writtenCount) - - log.PanicIf(err) - - return writtenCount, nil -} - -// MinimumOffset returns the configured minimum-offset. -func (brws *BoundedReadWriteSeeker) MinimumOffset() int64 { - return brws.minimumOffset -} diff --git a/vendor/github.com/dsoprea/go-utility/v2/filesystem/calculate_seek.go b/vendor/github.com/dsoprea/go-utility/v2/filesystem/calculate_seek.go deleted file mode 100644 index cd59d727c..000000000 --- a/vendor/github.com/dsoprea/go-utility/v2/filesystem/calculate_seek.go +++ /dev/null @@ -1,52 +0,0 @@ -package rifs - -import ( - "io" - "os" - - "github.com/dsoprea/go-logging" -) - -// SeekType is a convenience type to associate the different seek-types with -// printable descriptions. -type SeekType int - -// String returns a descriptive string. -func (n SeekType) String() string { - if n == io.SeekCurrent { - return "SEEK-CURRENT" - } else if n == io.SeekEnd { - return "SEEK-END" - } else if n == io.SeekStart { - return "SEEK-START" - } - - log.Panicf("unknown seek-type: (%d)", n) - return "" -} - -// CalculateSeek calculates an offset in a file-stream given the parameters. -func CalculateSeek(currentOffset int64, delta int64, whence int, fileSize int64) (finalOffset int64, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - finalOffset = 0 - } - }() - - if whence == os.SEEK_SET { - finalOffset = delta - } else if whence == os.SEEK_CUR { - finalOffset = currentOffset + delta - } else if whence == os.SEEK_END { - finalOffset = fileSize + delta - } else { - log.Panicf("whence not valid: (%d)", whence) - } - - if finalOffset < 0 { - finalOffset = 0 - } - - return finalOffset, nil -} diff --git a/vendor/github.com/dsoprea/go-utility/v2/filesystem/common.go b/vendor/github.com/dsoprea/go-utility/v2/filesystem/common.go deleted file mode 100644 index 256333d40..000000000 --- a/vendor/github.com/dsoprea/go-utility/v2/filesystem/common.go +++ /dev/null @@ -1,15 +0,0 @@ -package rifs - -import ( - "os" - "path" -) - -var ( - appPath string -) - -func init() { - goPath := os.Getenv("GOPATH") - appPath = path.Join(goPath, "src", "github.com", "dsoprea", "go-utility", "filesystem") -} diff --git a/vendor/github.com/dsoprea/go-utility/v2/filesystem/copy_bytes_between_positions.go b/vendor/github.com/dsoprea/go-utility/v2/filesystem/copy_bytes_between_positions.go deleted file mode 100644 index 89ee9a92c..000000000 --- a/vendor/github.com/dsoprea/go-utility/v2/filesystem/copy_bytes_between_positions.go +++ /dev/null @@ -1,40 +0,0 @@ -package rifs - -import ( - "io" - "os" - - "github.com/dsoprea/go-logging" -) - -// CopyBytesBetweenPositions will copy bytes from one position in the given RWS -// to an earlier position in the same RWS. -func CopyBytesBetweenPositions(rws io.ReadWriteSeeker, fromPosition, toPosition int64, count int) (n int, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - if fromPosition <= toPosition { - log.Panicf("from position (%d) must be larger than to position (%d)", fromPosition, toPosition) - } - - br, err := NewBouncebackReader(rws) - log.PanicIf(err) - - _, err = br.Seek(fromPosition, os.SEEK_SET) - log.PanicIf(err) - - bw, err := NewBouncebackWriter(rws) - log.PanicIf(err) - - _, err = bw.Seek(toPosition, os.SEEK_SET) - log.PanicIf(err) - - written, err := io.CopyN(bw, br, int64(count)) - log.PanicIf(err) - - n = int(written) - return n, nil -} diff --git a/vendor/github.com/dsoprea/go-utility/v2/filesystem/does_exist.go b/vendor/github.com/dsoprea/go-utility/v2/filesystem/does_exist.go deleted file mode 100644 index f5e6cd20a..000000000 --- a/vendor/github.com/dsoprea/go-utility/v2/filesystem/does_exist.go +++ /dev/null @@ -1,19 +0,0 @@ -package rifs - -import ( - "os" -) - -// DoesExist returns true if we can open the given file/path without error. We -// can't simply use `os.IsNotExist()` because we'll get a different error when -// the parent directory doesn't exist, and really the only important thing is if -// it exists *and* it's readable. -func DoesExist(filepath string) bool { - f, err := os.Open(filepath) - if err != nil { - return false - } - - f.Close() - return true -} diff --git a/vendor/github.com/dsoprea/go-utility/v2/filesystem/graceful_copy.go b/vendor/github.com/dsoprea/go-utility/v2/filesystem/graceful_copy.go deleted file mode 100644 index 8705e5fe0..000000000 --- a/vendor/github.com/dsoprea/go-utility/v2/filesystem/graceful_copy.go +++ /dev/null @@ -1,54 +0,0 @@ -package rifs - -import ( - "fmt" - "io" -) - -const ( - defaultCopyBufferSize = 1024 * 1024 -) - -// GracefulCopy willcopy while enduring lesser normal issues. -// -// - We'll ignore EOF if the read byte-count is more than zero. Only an EOF when -// zero bytes were read will terminate the loop. -// -// - Ignore short-writes. If less bytes were written than the bytes that were -// given, we'll keep trying until done. -func GracefulCopy(w io.Writer, r io.Reader, buffer []byte) (copyCount int, err error) { - if buffer == nil { - buffer = make([]byte, defaultCopyBufferSize) - } - - for { - readCount, err := r.Read(buffer) - if err != nil { - if err != io.EOF { - err = fmt.Errorf("read error: %s", err.Error()) - return 0, err - } - - // Only break on EOF if no bytes were actually read. - if readCount == 0 { - break - } - } - - writeBuffer := buffer[:readCount] - - for len(writeBuffer) > 0 { - writtenCount, err := w.Write(writeBuffer) - if err != nil { - err = fmt.Errorf("write error: %s", err.Error()) - return 0, err - } - - writeBuffer = writeBuffer[writtenCount:] - } - - copyCount += readCount - } - - return copyCount, nil -} diff --git a/vendor/github.com/dsoprea/go-utility/v2/filesystem/list_files.go b/vendor/github.com/dsoprea/go-utility/v2/filesystem/list_files.go deleted file mode 100644 index bcdbd67cb..000000000 --- a/vendor/github.com/dsoprea/go-utility/v2/filesystem/list_files.go +++ /dev/null @@ -1,143 +0,0 @@ -package rifs - -import ( - "io" - "os" - "path" - - "github.com/dsoprea/go-logging" -) - -// FileListFilterPredicate is the callback predicate used for filtering. -type FileListFilterPredicate func(parent string, child os.FileInfo) (hit bool, err error) - -// VisitedFile is one visited file. -type VisitedFile struct { - Filepath string - Info os.FileInfo - Index int -} - -// ListFiles feeds a continuous list of files from a recursive folder scan. An -// optional predicate can be provided in order to filter. When done, the -// `filesC` channel is closed. If there's an error, the `errC` channel will -// receive it. -func ListFiles(rootPath string, cb FileListFilterPredicate) (filesC chan VisitedFile, count int, errC chan error) { - defer func() { - if state := recover(); state != nil { - err := log.Wrap(state.(error)) - log.Panic(err) - } - }() - - // Make sure the path exists. - - f, err := os.Open(rootPath) - log.PanicIf(err) - - f.Close() - - // Do our thing. - - filesC = make(chan VisitedFile, 100) - errC = make(chan error, 1) - index := 0 - - go func() { - defer func() { - if state := recover(); state != nil { - err := log.Wrap(state.(error)) - errC <- err - } - }() - - queue := []string{rootPath} - for len(queue) > 0 { - // Pop the next folder to process off the queue. - var thisPath string - thisPath, queue = queue[0], queue[1:] - - // Skip path if a symlink. - - fi, err := os.Lstat(thisPath) - log.PanicIf(err) - - if (fi.Mode() & os.ModeSymlink) > 0 { - continue - } - - // Read information. - - folderF, err := os.Open(thisPath) - if err != nil { - errC <- log.Wrap(err) - return - } - - // Iterate through children. - - for { - children, err := folderF.Readdir(1000) - if err == io.EOF { - break - } else if err != nil { - errC <- log.Wrap(err) - return - } - - for _, child := range children { - filepath := path.Join(thisPath, child.Name()) - - // Skip if a file symlink. - - fi, err := os.Lstat(filepath) - log.PanicIf(err) - - if (fi.Mode() & os.ModeSymlink) > 0 { - continue - } - - // If a predicate was given, determine if this child will be - // left behind. - if cb != nil { - hit, err := cb(thisPath, child) - - if err != nil { - errC <- log.Wrap(err) - return - } - - if hit == false { - continue - } - } - - index++ - - // Push file to channel. - - vf := VisitedFile{ - Filepath: filepath, - Info: child, - Index: index, - } - - filesC <- vf - - // If a folder, queue for later processing. - - if child.IsDir() == true { - queue = append(queue, filepath) - } - } - } - - folderF.Close() - } - - close(filesC) - close(errC) - }() - - return filesC, index, errC -} diff --git a/vendor/github.com/dsoprea/go-utility/v2/filesystem/progress_wrapper.go b/vendor/github.com/dsoprea/go-utility/v2/filesystem/progress_wrapper.go deleted file mode 100644 index 0a064c53d..000000000 --- a/vendor/github.com/dsoprea/go-utility/v2/filesystem/progress_wrapper.go +++ /dev/null @@ -1,93 +0,0 @@ -package rifs - -import ( - "io" - "time" - - "github.com/dsoprea/go-logging" -) - -// ProgressFunc receives progress updates. -type ProgressFunc func(n int, duration time.Duration, isEof bool) error - -// WriteProgressWrapper wraps a reader and calls a callback after each read with -// count and duration info. -type WriteProgressWrapper struct { - w io.Writer - progressCb ProgressFunc -} - -// NewWriteProgressWrapper returns a new WPW instance. -func NewWriteProgressWrapper(w io.Writer, progressCb ProgressFunc) io.Writer { - return &WriteProgressWrapper{ - w: w, - progressCb: progressCb, - } -} - -// Write does a write and calls the callback. -func (wpw *WriteProgressWrapper) Write(buffer []byte) (n int, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - startAt := time.Now() - - n, err = wpw.w.Write(buffer) - log.PanicIf(err) - - duration := time.Since(startAt) - - err = wpw.progressCb(n, duration, false) - log.PanicIf(err) - - return n, nil -} - -// ReadProgressWrapper wraps a reader and calls a callback after each read with -// count and duration info. -type ReadProgressWrapper struct { - r io.Reader - progressCb ProgressFunc -} - -// NewReadProgressWrapper returns a new RPW instance. -func NewReadProgressWrapper(r io.Reader, progressCb ProgressFunc) io.Reader { - return &ReadProgressWrapper{ - r: r, - progressCb: progressCb, - } -} - -// Read reads data and calls the callback. -func (rpw *ReadProgressWrapper) Read(buffer []byte) (n int, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - startAt := time.Now() - - n, err = rpw.r.Read(buffer) - - duration := time.Since(startAt) - - if err != nil { - if err == io.EOF { - errInner := rpw.progressCb(n, duration, true) - log.PanicIf(errInner) - - return n, err - } - - log.Panic(err) - } - - err = rpw.progressCb(n, duration, false) - log.PanicIf(err) - - return n, nil -} diff --git a/vendor/github.com/dsoprea/go-utility/v2/filesystem/read_counter.go b/vendor/github.com/dsoprea/go-utility/v2/filesystem/read_counter.go deleted file mode 100644 index d878ca4e6..000000000 --- a/vendor/github.com/dsoprea/go-utility/v2/filesystem/read_counter.go +++ /dev/null @@ -1,36 +0,0 @@ -package rifs - -import ( - "io" -) - -// ReadCounter proxies read requests and maintains a counter of bytes read. -type ReadCounter struct { - r io.Reader - counter int -} - -// NewReadCounter returns a new `ReadCounter` struct wrapping a `Reader`. -func NewReadCounter(r io.Reader) *ReadCounter { - return &ReadCounter{ - r: r, - } -} - -// Count returns the total number of bytes read. -func (rc *ReadCounter) Count() int { - return rc.counter -} - -// Reset resets the counter to zero. -func (rc *ReadCounter) Reset() { - rc.counter = 0 -} - -// Read forwards a read to the underlying `Reader` while bumping the counter. -func (rc *ReadCounter) Read(b []byte) (n int, err error) { - n, err = rc.r.Read(b) - rc.counter += n - - return n, err -} diff --git a/vendor/github.com/dsoprea/go-utility/v2/filesystem/readseeker_to_readerat.go b/vendor/github.com/dsoprea/go-utility/v2/filesystem/readseeker_to_readerat.go deleted file mode 100644 index 3f3ec44dd..000000000 --- a/vendor/github.com/dsoprea/go-utility/v2/filesystem/readseeker_to_readerat.go +++ /dev/null @@ -1,63 +0,0 @@ -package rifs - -import ( - "io" - - "github.com/dsoprea/go-logging" -) - -// ReadSeekerToReaderAt is a wrapper that allows a ReadSeeker to masquerade as a -// ReaderAt. -type ReadSeekerToReaderAt struct { - rs io.ReadSeeker -} - -// NewReadSeekerToReaderAt returns a new ReadSeekerToReaderAt instance. -func NewReadSeekerToReaderAt(rs io.ReadSeeker) *ReadSeekerToReaderAt { - return &ReadSeekerToReaderAt{ - rs: rs, - } -} - -// ReadAt is a wrapper that satisfies the ReaderAt interface. -// -// Note that a requirement of ReadAt is that it doesn't have an effect on the -// offset in the underlying resource as well as that concurrent calls can be -// made to it. Since we're capturing the current offset in the underlying -// resource and then seeking back to it before returning, it is the -// responsibility of the caller to serialize (i.e. use a mutex with) these -// requests in order to eliminate race-conditions in the parallel-usage -// scenario. -// -// Note also that, since ReadAt() is going to be called on a particular -// instance, that instance is going to internalize a file resource, that file- -// resource is provided by the OS, and [most] OSs are only gonna support one -// file-position per resource, locking is already going to be a necessary -// internal semantic of a ReaderAt implementation. -func (rstra *ReadSeekerToReaderAt) ReadAt(p []byte, offset int64) (n int, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - originalOffset, err := rstra.rs.Seek(0, io.SeekCurrent) - log.PanicIf(err) - - defer func() { - _, err := rstra.rs.Seek(originalOffset, io.SeekStart) - log.PanicIf(err) - }() - - _, err = rstra.rs.Seek(offset, io.SeekStart) - log.PanicIf(err) - - // Note that all errors will be wrapped, here. The usage of this method is - // such that typically no specific errors would be expected as part of - // normal operation (in which case we'd check for those first and return - // them directly). - n, err = io.ReadFull(rstra.rs, p) - log.PanicIf(err) - - return n, nil -} diff --git a/vendor/github.com/dsoprea/go-utility/v2/filesystem/readwriteseekcloser.go b/vendor/github.com/dsoprea/go-utility/v2/filesystem/readwriteseekcloser.go deleted file mode 100644 index c583a8024..000000000 --- a/vendor/github.com/dsoprea/go-utility/v2/filesystem/readwriteseekcloser.go +++ /dev/null @@ -1,29 +0,0 @@ -package rifs - -import ( - "io" -) - -// ReadWriteSeekCloser satisfies `io.ReadWriteSeeker` and `io.Closer` -// interfaces. -type ReadWriteSeekCloser interface { - io.ReadWriteSeeker - io.Closer -} - -type readWriteSeekNoopCloser struct { - io.ReadWriteSeeker -} - -// ReadWriteSeekNoopCloser wraps a `io.ReadWriteSeeker` with a no-op Close() -// call. -func ReadWriteSeekNoopCloser(rws io.ReadWriteSeeker) ReadWriteSeekCloser { - return readWriteSeekNoopCloser{ - ReadWriteSeeker: rws, - } -} - -// Close does nothing but allows the RWS to satisfy `io.Closer`.:wq -func (readWriteSeekNoopCloser) Close() (err error) { - return nil -} diff --git a/vendor/github.com/dsoprea/go-utility/v2/filesystem/seekable_buffer.go b/vendor/github.com/dsoprea/go-utility/v2/filesystem/seekable_buffer.go deleted file mode 100644 index 5d41bb5df..000000000 --- a/vendor/github.com/dsoprea/go-utility/v2/filesystem/seekable_buffer.go +++ /dev/null @@ -1,146 +0,0 @@ -package rifs - -import ( - "io" - "os" - - "github.com/dsoprea/go-logging" -) - -// SeekableBuffer is a simple memory structure that satisfies -// `io.ReadWriteSeeker`. -type SeekableBuffer struct { - data []byte - position int64 -} - -// NewSeekableBuffer is a factory that returns a `*SeekableBuffer`. -func NewSeekableBuffer() *SeekableBuffer { - data := make([]byte, 0) - - return &SeekableBuffer{ - data: data, - } -} - -// NewSeekableBufferWithBytes is a factory that returns a `*SeekableBuffer`. -func NewSeekableBufferWithBytes(originalData []byte) *SeekableBuffer { - data := make([]byte, len(originalData)) - copy(data, originalData) - - return &SeekableBuffer{ - data: data, - } -} - -func len64(data []byte) int64 { - return int64(len(data)) -} - -// Bytes returns the underlying slice. -func (sb *SeekableBuffer) Bytes() []byte { - return sb.data -} - -// Len returns the number of bytes currently stored. -func (sb *SeekableBuffer) Len() int { - return len(sb.data) -} - -// Write does a standard write to the internal slice. -func (sb *SeekableBuffer) Write(p []byte) (n int, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - // The current position we're already at is past the end of the data we - // actually have. Extend our buffer up to our current position. - if sb.position > len64(sb.data) { - extra := make([]byte, sb.position-len64(sb.data)) - sb.data = append(sb.data, extra...) - } - - positionFromEnd := len64(sb.data) - sb.position - tailCount := positionFromEnd - len64(p) - - var tailBytes []byte - if tailCount > 0 { - tailBytes = sb.data[len64(sb.data)-tailCount:] - sb.data = append(sb.data[:sb.position], p...) - } else { - sb.data = append(sb.data[:sb.position], p...) - } - - if tailBytes != nil { - sb.data = append(sb.data, tailBytes...) - } - - dataSize := len64(p) - sb.position += dataSize - - return int(dataSize), nil -} - -// Read does a standard read against the internal slice. -func (sb *SeekableBuffer) Read(p []byte) (n int, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - if sb.position >= len64(sb.data) { - return 0, io.EOF - } - - n = copy(p, sb.data[sb.position:]) - sb.position += int64(n) - - return n, nil -} - -// Truncate either chops or extends the internal buffer. -func (sb *SeekableBuffer) Truncate(size int64) (err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - sizeInt := int(size) - if sizeInt < len(sb.data)-1 { - sb.data = sb.data[:sizeInt] - } else { - new := make([]byte, sizeInt-len(sb.data)) - sb.data = append(sb.data, new...) - } - - return nil -} - -// Seek does a standard seek on the internal slice. -func (sb *SeekableBuffer) Seek(offset int64, whence int) (n int64, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - if whence == os.SEEK_SET { - sb.position = offset - } else if whence == os.SEEK_END { - sb.position = len64(sb.data) + offset - } else if whence == os.SEEK_CUR { - sb.position += offset - } else { - log.Panicf("seek whence is not valid: (%d)", whence) - } - - if sb.position < 0 { - sb.position = 0 - } - - return sb.position, nil -} diff --git a/vendor/github.com/dsoprea/go-utility/v2/filesystem/simplefileinfo.go b/vendor/github.com/dsoprea/go-utility/v2/filesystem/simplefileinfo.go deleted file mode 100644 index a227b0b00..000000000 --- a/vendor/github.com/dsoprea/go-utility/v2/filesystem/simplefileinfo.go +++ /dev/null @@ -1,69 +0,0 @@ -package rifs - -import ( - "os" - "time" -) - -// SimpleFileInfo is a simple `os.FileInfo` implementation useful for testing -// with the bare minimum. -type SimpleFileInfo struct { - filename string - isDir bool - size int64 - mode os.FileMode - modTime time.Time -} - -// NewSimpleFileInfoWithFile returns a new file-specific SimpleFileInfo. -func NewSimpleFileInfoWithFile(filename string, size int64, mode os.FileMode, modTime time.Time) *SimpleFileInfo { - return &SimpleFileInfo{ - filename: filename, - isDir: false, - size: size, - mode: mode, - modTime: modTime, - } -} - -// NewSimpleFileInfoWithDirectory returns a new directory-specific -// SimpleFileInfo. -func NewSimpleFileInfoWithDirectory(filename string, modTime time.Time) *SimpleFileInfo { - return &SimpleFileInfo{ - filename: filename, - isDir: true, - mode: os.ModeDir, - modTime: modTime, - } -} - -// Name returns the base name of the file. -func (sfi *SimpleFileInfo) Name() string { - return sfi.filename -} - -// Size returns the length in bytes for regular files; system-dependent for -// others. -func (sfi *SimpleFileInfo) Size() int64 { - return sfi.size -} - -// Mode returns the file mode bits. -func (sfi *SimpleFileInfo) Mode() os.FileMode { - return sfi.mode -} - -// ModTime returns the modification time. -func (sfi *SimpleFileInfo) ModTime() time.Time { - return sfi.modTime -} - -// IsDir returns true if a directory. -func (sfi *SimpleFileInfo) IsDir() bool { - return sfi.isDir -} - -// Sys returns internal state. -func (sfi *SimpleFileInfo) Sys() interface{} { - return nil -} diff --git a/vendor/github.com/dsoprea/go-utility/v2/filesystem/utility.go b/vendor/github.com/dsoprea/go-utility/v2/filesystem/utility.go deleted file mode 100644 index 4b33b41a9..000000000 --- a/vendor/github.com/dsoprea/go-utility/v2/filesystem/utility.go +++ /dev/null @@ -1,17 +0,0 @@ -package rifs - -import ( - "io" - "os" - - "github.com/dsoprea/go-logging" -) - -// GetOffset returns the current offset of the Seeker and just panics if unable -// to find it. -func GetOffset(s io.Seeker) int64 { - offsetRaw, err := s.Seek(0, os.SEEK_CUR) - log.PanicIf(err) - - return offsetRaw -} diff --git a/vendor/github.com/dsoprea/go-utility/v2/filesystem/write_counter.go b/vendor/github.com/dsoprea/go-utility/v2/filesystem/write_counter.go deleted file mode 100644 index dc39901d5..000000000 --- a/vendor/github.com/dsoprea/go-utility/v2/filesystem/write_counter.go +++ /dev/null @@ -1,36 +0,0 @@ -package rifs - -import ( - "io" -) - -// WriteCounter proxies write requests and maintains a counter of bytes written. -type WriteCounter struct { - w io.Writer - counter int -} - -// NewWriteCounter returns a new `WriteCounter` struct wrapping a `Writer`. -func NewWriteCounter(w io.Writer) *WriteCounter { - return &WriteCounter{ - w: w, - } -} - -// Count returns the total number of bytes read. -func (wc *WriteCounter) Count() int { - return wc.counter -} - -// Reset resets the counter to zero. -func (wc *WriteCounter) Reset() { - wc.counter = 0 -} - -// Write forwards a write to the underlying `Writer` while bumping the counter. -func (wc *WriteCounter) Write(b []byte) (n int, err error) { - n, err = wc.w.Write(b) - wc.counter += n - - return n, err -} diff --git a/vendor/github.com/dsoprea/go-utility/v2/image/README.md b/vendor/github.com/dsoprea/go-utility/v2/image/README.md deleted file mode 100644 index 1509ff666..000000000 --- a/vendor/github.com/dsoprea/go-utility/v2/image/README.md +++ /dev/null @@ -1,9 +0,0 @@ -[![GoDoc](https://godoc.org/github.com/dsoprea/go-utility/image?status.svg)](https://godoc.org/github.com/dsoprea/go-utility/image) -[![Build Status](https://travis-ci.org/dsoprea/go-utility.svg?branch=master)](https://travis-ci.org/dsoprea/go-utility) -[![Coverage Status](https://coveralls.io/repos/github/dsoprea/go-utility/badge.svg?branch=master)](https://coveralls.io/github/dsoprea/go-utility?branch=master) -[![Go Report Card](https://goreportcard.com/badge/github.com/dsoprea/go-utility)](https://goreportcard.com/report/github.com/dsoprea/go-utility) - -# media_parser_type - -Common image-parsing interfaces. Used for JPEG, PNG, and HEIC parsers used by -go-exif-knife. diff --git a/vendor/github.com/dsoprea/go-utility/v2/image/media_parser_type.go b/vendor/github.com/dsoprea/go-utility/v2/image/media_parser_type.go deleted file mode 100644 index 8776a1fdd..000000000 --- a/vendor/github.com/dsoprea/go-utility/v2/image/media_parser_type.go +++ /dev/null @@ -1,34 +0,0 @@ -package riimage - -import ( - "io" - - "github.com/dsoprea/go-exif/v3" -) - -// MediaContext is an accessor that knows how to extract specific metadata from -// the media. -type MediaContext interface { - // Exif returns the EXIF's root IFD. - Exif() (rootIfd *exif.Ifd, data []byte, err error) -} - -// MediaParser prescribes a specific structure for the parser types that are -// imported from other projects. We don't use it directly, but we use this to -// impose structure. -type MediaParser interface { - // Parse parses a stream using an `io.ReadSeeker`. `mc` should *actually* be - // a `ExifContext`. - Parse(r io.ReadSeeker, size int) (mc MediaContext, err error) - - // ParseFile parses a stream using a file. `mc` should *actually* be a - // `ExifContext`. - ParseFile(filepath string) (mc MediaContext, err error) - - // ParseBytes parses a stream direct from bytes. `mc` should *actually* be - // a `ExifContext`. - ParseBytes(data []byte) (mc MediaContext, err error) - - // Parses the data to determine if it's a compatible format. - LooksLikeFormat(data []byte) bool -} diff --git a/vendor/github.com/go-errors/errors/.travis.yml b/vendor/github.com/go-errors/errors/.travis.yml deleted file mode 100644 index 77a6bccf7..000000000 --- a/vendor/github.com/go-errors/errors/.travis.yml +++ /dev/null @@ -1,8 +0,0 @@ -language: go - -go: - - "1.8.x" - - "1.10.x" - - "1.13.x" - - "1.14.x" - - "1.16.x" diff --git a/vendor/github.com/go-errors/errors/LICENSE.MIT b/vendor/github.com/go-errors/errors/LICENSE.MIT deleted file mode 100644 index c9a5b2eeb..000000000 --- a/vendor/github.com/go-errors/errors/LICENSE.MIT +++ /dev/null @@ -1,7 +0,0 @@ -Copyright (c) 2015 Conrad Irwin - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/go-errors/errors/README.md b/vendor/github.com/go-errors/errors/README.md deleted file mode 100644 index 2ee13f117..000000000 --- a/vendor/github.com/go-errors/errors/README.md +++ /dev/null @@ -1,81 +0,0 @@ -go-errors/errors -================ - -[![Build Status](https://travis-ci.org/go-errors/errors.svg?branch=master)](https://travis-ci.org/go-errors/errors) - -Package errors adds stacktrace support to errors in go. - -This is particularly useful when you want to understand the state of execution -when an error was returned unexpectedly. - -It provides the type \*Error which implements the standard golang error -interface, so you can use this library interchangably with code that is -expecting a normal error return. - -Usage ------ - -Full documentation is available on -[godoc](https://godoc.org/github.com/go-errors/errors), but here's a simple -example: - -```go -package crashy - -import "github.com/go-errors/errors" - -var Crashed = errors.Errorf("oh dear") - -func Crash() error { - return errors.New(Crashed) -} -``` - -This can be called as follows: - -```go -package main - -import ( - "crashy" - "fmt" - "github.com/go-errors/errors" -) - -func main() { - err := crashy.Crash() - if err != nil { - if errors.Is(err, crashy.Crashed) { - fmt.Println(err.(*errors.Error).ErrorStack()) - } else { - panic(err) - } - } -} -``` - -Meta-fu -------- - -This package was original written to allow reporting to -[Bugsnag](https://bugsnag.com/) from -[bugsnag-go](https://github.com/bugsnag/bugsnag-go), but after I found similar -packages by Facebook and Dropbox, it was moved to one canonical location so -everyone can benefit. - -This package is licensed under the MIT license, see LICENSE.MIT for details. - - -## Changelog -* v1.1.0 updated to use go1.13's standard-library errors.Is method instead of == in errors.Is -* v1.2.0 added `errors.As` from the standard library. -* v1.3.0 *BREAKING* updated error methods to return `error` instead of `*Error`. -> Code that needs access to the underlying `*Error` can use the new errors.AsError(e) -> ``` -> // before -> errors.New(err).ErrorStack() -> // after ->. errors.AsError(errors.Wrap(err)).ErrorStack() -> ``` -* v1.4.0 *BREAKING* v1.4.0 reverted all changes from v1.3.0 and is identical to v1.2.0 -* v1.4.1 no code change, but now without an unnecessary cover.out file. diff --git a/vendor/github.com/go-errors/errors/error.go b/vendor/github.com/go-errors/errors/error.go deleted file mode 100644 index ccbc2e427..000000000 --- a/vendor/github.com/go-errors/errors/error.go +++ /dev/null @@ -1,209 +0,0 @@ -// Package errors provides errors that have stack-traces. -// -// This is particularly useful when you want to understand the -// state of execution when an error was returned unexpectedly. -// -// It provides the type *Error which implements the standard -// golang error interface, so you can use this library interchangably -// with code that is expecting a normal error return. -// -// For example: -// -// package crashy -// -// import "github.com/go-errors/errors" -// -// var Crashed = errors.Errorf("oh dear") -// -// func Crash() error { -// return errors.New(Crashed) -// } -// -// This can be called as follows: -// -// package main -// -// import ( -// "crashy" -// "fmt" -// "github.com/go-errors/errors" -// ) -// -// func main() { -// err := crashy.Crash() -// if err != nil { -// if errors.Is(err, crashy.Crashed) { -// fmt.Println(err.(*errors.Error).ErrorStack()) -// } else { -// panic(err) -// } -// } -// } -// -// This package was original written to allow reporting to Bugsnag, -// but after I found similar packages by Facebook and Dropbox, it -// was moved to one canonical location so everyone can benefit. -package errors - -import ( - "bytes" - "fmt" - "reflect" - "runtime" -) - -// The maximum number of stackframes on any error. -var MaxStackDepth = 50 - -// Error is an error with an attached stacktrace. It can be used -// wherever the builtin error interface is expected. -type Error struct { - Err error - stack []uintptr - frames []StackFrame - prefix string -} - -// New makes an Error from the given value. If that value is already an -// error then it will be used directly, if not, it will be passed to -// fmt.Errorf("%v"). The stacktrace will point to the line of code that -// called New. -func New(e interface{}) *Error { - var err error - - switch e := e.(type) { - case error: - err = e - default: - err = fmt.Errorf("%v", e) - } - - stack := make([]uintptr, MaxStackDepth) - length := runtime.Callers(2, stack[:]) - return &Error{ - Err: err, - stack: stack[:length], - } -} - -// Wrap makes an Error from the given value. If that value is already an -// error then it will be used directly, if not, it will be passed to -// fmt.Errorf("%v"). The skip parameter indicates how far up the stack -// to start the stacktrace. 0 is from the current call, 1 from its caller, etc. -func Wrap(e interface{}, skip int) *Error { - if e == nil { - return nil - } - - var err error - - switch e := e.(type) { - case *Error: - return e - case error: - err = e - default: - err = fmt.Errorf("%v", e) - } - - stack := make([]uintptr, MaxStackDepth) - length := runtime.Callers(2+skip, stack[:]) - return &Error{ - Err: err, - stack: stack[:length], - } -} - -// WrapPrefix makes an Error from the given value. If that value is already an -// error then it will be used directly, if not, it will be passed to -// fmt.Errorf("%v"). The prefix parameter is used to add a prefix to the -// error message when calling Error(). The skip parameter indicates how far -// up the stack to start the stacktrace. 0 is from the current call, -// 1 from its caller, etc. -func WrapPrefix(e interface{}, prefix string, skip int) *Error { - if e == nil { - return nil - } - - err := Wrap(e, 1+skip) - - if err.prefix != "" { - prefix = fmt.Sprintf("%s: %s", prefix, err.prefix) - } - - return &Error{ - Err: err.Err, - stack: err.stack, - prefix: prefix, - } - -} - -// Errorf creates a new error with the given message. You can use it -// as a drop-in replacement for fmt.Errorf() to provide descriptive -// errors in return values. -func Errorf(format string, a ...interface{}) *Error { - return Wrap(fmt.Errorf(format, a...), 1) -} - -// Error returns the underlying error's message. -func (err *Error) Error() string { - - msg := err.Err.Error() - if err.prefix != "" { - msg = fmt.Sprintf("%s: %s", err.prefix, msg) - } - - return msg -} - -// Stack returns the callstack formatted the same way that go does -// in runtime/debug.Stack() -func (err *Error) Stack() []byte { - buf := bytes.Buffer{} - - for _, frame := range err.StackFrames() { - buf.WriteString(frame.String()) - } - - return buf.Bytes() -} - -// Callers satisfies the bugsnag ErrorWithCallerS() interface -// so that the stack can be read out. -func (err *Error) Callers() []uintptr { - return err.stack -} - -// ErrorStack returns a string that contains both the -// error message and the callstack. -func (err *Error) ErrorStack() string { - return err.TypeName() + " " + err.Error() + "\n" + string(err.Stack()) -} - -// StackFrames returns an array of frames containing information about the -// stack. -func (err *Error) StackFrames() []StackFrame { - if err.frames == nil { - err.frames = make([]StackFrame, len(err.stack)) - - for i, pc := range err.stack { - err.frames[i] = NewStackFrame(pc) - } - } - - return err.frames -} - -// TypeName returns the type this error. e.g. *errors.stringError. -func (err *Error) TypeName() string { - if _, ok := err.Err.(uncaughtPanic); ok { - return "panic" - } - return reflect.TypeOf(err.Err).String() -} - -// Return the wrapped error (implements api for As function). -func (err *Error) Unwrap() error { - return err.Err -} diff --git a/vendor/github.com/go-errors/errors/error_1_13.go b/vendor/github.com/go-errors/errors/error_1_13.go deleted file mode 100644 index 0af2fc806..000000000 --- a/vendor/github.com/go-errors/errors/error_1_13.go +++ /dev/null @@ -1,31 +0,0 @@ -// +build go1.13 - -package errors - -import ( - baseErrors "errors" -) - -// find error in any wrapped error -func As(err error, target interface{}) bool { - return baseErrors.As(err, target) -} - -// Is detects whether the error is equal to a given error. Errors -// are considered equal by this function if they are matched by errors.Is -// or if their contained errors are matched through errors.Is -func Is(e error, original error) bool { - if baseErrors.Is(e, original) { - return true - } - - if e, ok := e.(*Error); ok { - return Is(e.Err, original) - } - - if original, ok := original.(*Error); ok { - return Is(e, original.Err) - } - - return false -} diff --git a/vendor/github.com/go-errors/errors/error_backward.go b/vendor/github.com/go-errors/errors/error_backward.go deleted file mode 100644 index 80b0695e7..000000000 --- a/vendor/github.com/go-errors/errors/error_backward.go +++ /dev/null @@ -1,57 +0,0 @@ -// +build !go1.13 - -package errors - -import ( - "reflect" -) - -type unwrapper interface { - Unwrap() error -} - -// As assigns error or any wrapped error to the value target points -// to. If there is no value of the target type of target As returns -// false. -func As(err error, target interface{}) bool { - targetType := reflect.TypeOf(target) - - for { - errType := reflect.TypeOf(err) - - if errType == nil { - return false - } - - if reflect.PtrTo(errType) == targetType { - reflect.ValueOf(target).Elem().Set(reflect.ValueOf(err)) - return true - } - - wrapped, ok := err.(unwrapper) - if ok { - err = wrapped.Unwrap() - } else { - return false - } - } -} - -// Is detects whether the error is equal to a given error. Errors -// are considered equal by this function if they are the same object, -// or if they both contain the same error inside an errors.Error. -func Is(e error, original error) bool { - if e == original { - return true - } - - if e, ok := e.(*Error); ok { - return Is(e.Err, original) - } - - if original, ok := original.(*Error); ok { - return Is(e, original.Err) - } - - return false -} diff --git a/vendor/github.com/go-errors/errors/parse_panic.go b/vendor/github.com/go-errors/errors/parse_panic.go deleted file mode 100644 index cc37052d7..000000000 --- a/vendor/github.com/go-errors/errors/parse_panic.go +++ /dev/null @@ -1,127 +0,0 @@ -package errors - -import ( - "strconv" - "strings" -) - -type uncaughtPanic struct{ message string } - -func (p uncaughtPanic) Error() string { - return p.message -} - -// ParsePanic allows you to get an error object from the output of a go program -// that panicked. This is particularly useful with https://github.com/mitchellh/panicwrap. -func ParsePanic(text string) (*Error, error) { - lines := strings.Split(text, "\n") - - state := "start" - - var message string - var stack []StackFrame - - for i := 0; i < len(lines); i++ { - line := lines[i] - - if state == "start" { - if strings.HasPrefix(line, "panic: ") { - message = strings.TrimPrefix(line, "panic: ") - state = "seek" - } else { - return nil, Errorf("bugsnag.panicParser: Invalid line (no prefix): %s", line) - } - - } else if state == "seek" { - if strings.HasPrefix(line, "goroutine ") && strings.HasSuffix(line, "[running]:") { - state = "parsing" - } - - } else if state == "parsing" { - if line == "" { - state = "done" - break - } - createdBy := false - if strings.HasPrefix(line, "created by ") { - line = strings.TrimPrefix(line, "created by ") - createdBy = true - } - - i++ - - if i >= len(lines) { - return nil, Errorf("bugsnag.panicParser: Invalid line (unpaired): %s", line) - } - - frame, err := parsePanicFrame(line, lines[i], createdBy) - if err != nil { - return nil, err - } - - stack = append(stack, *frame) - if createdBy { - state = "done" - break - } - } - } - - if state == "done" || state == "parsing" { - return &Error{Err: uncaughtPanic{message}, frames: stack}, nil - } - return nil, Errorf("could not parse panic: %v", text) -} - -// The lines we're passing look like this: -// -// main.(*foo).destruct(0xc208067e98) -// /0/go/src/github.com/bugsnag/bugsnag-go/pan/main.go:22 +0x151 -func parsePanicFrame(name string, line string, createdBy bool) (*StackFrame, error) { - idx := strings.LastIndex(name, "(") - if idx == -1 && !createdBy { - return nil, Errorf("bugsnag.panicParser: Invalid line (no call): %s", name) - } - if idx != -1 { - name = name[:idx] - } - pkg := "" - - if lastslash := strings.LastIndex(name, "/"); lastslash >= 0 { - pkg += name[:lastslash] + "/" - name = name[lastslash+1:] - } - if period := strings.Index(name, "."); period >= 0 { - pkg += name[:period] - name = name[period+1:] - } - - name = strings.Replace(name, "·", ".", -1) - - if !strings.HasPrefix(line, "\t") { - return nil, Errorf("bugsnag.panicParser: Invalid line (no tab): %s", line) - } - - idx = strings.LastIndex(line, ":") - if idx == -1 { - return nil, Errorf("bugsnag.panicParser: Invalid line (no line number): %s", line) - } - file := line[1:idx] - - number := line[idx+1:] - if idx = strings.Index(number, " +"); idx > -1 { - number = number[:idx] - } - - lno, err := strconv.ParseInt(number, 10, 32) - if err != nil { - return nil, Errorf("bugsnag.panicParser: Invalid line (bad line number): %s", line) - } - - return &StackFrame{ - File: file, - LineNumber: int(lno), - Package: pkg, - Name: name, - }, nil -} diff --git a/vendor/github.com/go-errors/errors/stackframe.go b/vendor/github.com/go-errors/errors/stackframe.go deleted file mode 100644 index f420849d2..000000000 --- a/vendor/github.com/go-errors/errors/stackframe.go +++ /dev/null @@ -1,114 +0,0 @@ -package errors - -import ( - "bufio" - "bytes" - "fmt" - "os" - "runtime" - "strings" -) - -// A StackFrame contains all necessary information about to generate a line -// in a callstack. -type StackFrame struct { - // The path to the file containing this ProgramCounter - File string - // The LineNumber in that file - LineNumber int - // The Name of the function that contains this ProgramCounter - Name string - // The Package that contains this function - Package string - // The underlying ProgramCounter - ProgramCounter uintptr -} - -// NewStackFrame popoulates a stack frame object from the program counter. -func NewStackFrame(pc uintptr) (frame StackFrame) { - - frame = StackFrame{ProgramCounter: pc} - if frame.Func() == nil { - return - } - frame.Package, frame.Name = packageAndName(frame.Func()) - - // pc -1 because the program counters we use are usually return addresses, - // and we want to show the line that corresponds to the function call - frame.File, frame.LineNumber = frame.Func().FileLine(pc - 1) - return - -} - -// Func returns the function that contained this frame. -func (frame *StackFrame) Func() *runtime.Func { - if frame.ProgramCounter == 0 { - return nil - } - return runtime.FuncForPC(frame.ProgramCounter) -} - -// String returns the stackframe formatted in the same way as go does -// in runtime/debug.Stack() -func (frame *StackFrame) String() string { - str := fmt.Sprintf("%s:%d (0x%x)\n", frame.File, frame.LineNumber, frame.ProgramCounter) - - source, err := frame.SourceLine() - if err != nil { - return str - } - - return str + fmt.Sprintf("\t%s: %s\n", frame.Name, source) -} - -// SourceLine gets the line of code (from File and Line) of the original source if possible. -func (frame *StackFrame) SourceLine() (string, error) { - if frame.LineNumber <= 0 { - return "???", nil - } - - file, err := os.Open(frame.File) - if err != nil { - return "", New(err) - } - defer file.Close() - - scanner := bufio.NewScanner(file) - currentLine := 1 - for scanner.Scan() { - if currentLine == frame.LineNumber { - return string(bytes.Trim(scanner.Bytes(), " \t")), nil - } - currentLine++ - } - if err := scanner.Err(); err != nil { - return "", New(err) - } - - return "???", nil -} - -func packageAndName(fn *runtime.Func) (string, string) { - name := fn.Name() - pkg := "" - - // The name includes the path name to the package, which is unnecessary - // since the file name is already included. Plus, it has center dots. - // That is, we see - // runtime/debug.*T·ptrmethod - // and want - // *T.ptrmethod - // Since the package path might contains dots (e.g. code.google.com/...), - // we first remove the path prefix if there is one. - if lastslash := strings.LastIndex(name, "/"); lastslash >= 0 { - pkg += name[:lastslash] + "/" - name = name[lastslash+1:] - } - if period := strings.Index(name, "."); period >= 0 { - pkg += name[:period] - name = name[period+1:] - } - - name = strings.Replace(name, "·", ".", -1) - return pkg, name -} diff --git a/vendor/github.com/go-xmlfmt/xmlfmt/LICENSE b/vendor/github.com/go-xmlfmt/xmlfmt/LICENSE deleted file mode 100644 index 890776ab7..000000000 --- a/vendor/github.com/go-xmlfmt/xmlfmt/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2016 go-xmlfmt - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/go-xmlfmt/xmlfmt/README.md b/vendor/github.com/go-xmlfmt/xmlfmt/README.md deleted file mode 100644 index da9aa0763..000000000 --- a/vendor/github.com/go-xmlfmt/xmlfmt/README.md +++ /dev/null @@ -1,240 +0,0 @@ -# Go XML Formatter - -[![MIT License](http://img.shields.io/badge/License-MIT-blue.svg)](LICENSE) -[![Go Doc](https://img.shields.io/badge/godoc-reference-4b68a3.svg)](https://godoc.org/github.com/go-xmlfmt/xmlfmt) -[![Go Report Card](https://goreportcard.com/badge/github.com/go-xmlfmt/xmlfmt)](https://goreportcard.com/report/github.com/go-xmlfmt/xmlfmt) -[![Codeship Status](https://codeship.com/projects/c49f02b0-a384-0134-fb20-2e0351080565/status?branch=master)](https://codeship.com/projects/190297) - -## Synopsis - -The Go XML Formatter, xmlfmt, will format the XML string in a readable way. - -```go -package main - -import "github.com/go-xmlfmt/xmlfmt" - -func main() { - xmlfmt.NL = "\n" - xml1 := `aSome org-or-otherWouldnt you like to knowPatCalifia` - x := xmlfmt.FormatXML(xml1, "\t", " ") - print(x) - - // If the XML Comments have nested tags in them - xml1 = ` Fred - - 23456 ` - x = xmlfmt.FormatXML(xml1, "", " ", true) - print(x) -} - -``` - -Output: - -```xml - - - a - - - - - - Some org-or-other - - Wouldnt you like to know - - - - Pat - - Califia - - - - - - - - - Fred - - - 23456 - - -``` - -There is no XML decoding and encoding involved, only pure regular expression matching and replacing. So it is much faster than going through decoding and encoding procedures. Moreover, the exact XML source string is preserved, instead of being changed by the encoder. This is why this package exists in the first place. - -Note that - -- the XML is mainly used in Windows environments, thus the default line ending is in Windows' `CRLF` format. To change the default line ending, see the above sample code (first line). -- the case of XML comments nested within XML comments is ***not*** supported. Please avoid them or use any other tools to correct them before using this package. -- don't turn on the `nestedTagsInComments` parameter blindly, as the code has become 10+ times more complicated because of it. - -## Command - -To use it on command line, check out [xmlfmt](https://github.com/AntonioSun/xmlfmt): - - -``` -$ xmlfmt -XML Formatter -Version 1.1.0 built on 2021-12-06 -Copyright (C) 2021, Antonio Sun - -The xmlfmt will format the XML string without rewriting the document - -Options: - - -h, --help display help information - -f, --file *The xml file to read from (or stdin) - -p, --prefix each element begins on a new line and this prefix - -i, --indent[= ] indent string for nested elements - -n, --nested nested tags in comments - -$ xmlfmt -f https://pastebin.com/raw/z3euQ5PR - - - - a - - - - - - Some org-or-other - - Wouldnt you like to know - - - - Pat - - Califia - - - - - - -$ xmlfmt -f https://pastebin.com/raw/Zs0qy0qz -n - - - Fred - - - 23456 - - -``` - - -## Justification - -### The format - -The Go XML Formatter is not called XML Beautifier because the result is not *exactly* as what people would expect -- some, but not all, closing tags stays on the same line, just as shown above. Having been looking at the result and thinking over it, I now think it is actually a better way to present it, as those closing tags on the same line are better stay that way in my opinion. I.e., - -When it comes to very big XML strings, which is what I’m dealing every day, saving spaces by not allowing those closing tags taking extra lines is plus instead of negative to me. - -### The alternative - -To format it “properly”, i.e., as what people would normally see, is very hard using pure regular expression. In fact, according to Sam Whited from the go-nuts mlist, - -> Regular expression is, well, regular. This means that they can parse regular grammars, but can't parse context free grammars (like XML). It is actually impossible to use a regex to do this task; it will always be fragile, unfortunately. - -So if the output format is so important to you, then unfortunately you have to go through decoding and encoding procedures. But there are some drawbacks as well, as put by James McGill, in http://stackoverflow.com/questions/21117161, besides such method being slow: - -> I like this solution, but am still in search of a Golang XML formatter/prettyprinter that doesn't rewrite the document (other than formatting whitespace). Marshalling or using the Encoder will change namespace declarations. -> -> For example an element like "< ns1:Element />" will be translated to something like '< Element xmlns="http://bla...bla/ns1" >< /Element >' which seems harmless enough except when the intent is to not alter the xml other than formatting. -- James McGill Nov 12 '15 - -Using Sam's code as an example, - -https://play.golang.org/p/JUqQY3WpW5 - -The above code formats the following XML - -```xml - - - - - - 123 - John Brown - - - - -``` - -into this: - -```xml - -
- - - - 123 - John Brown - - - -
-``` - -I know they are syntactically the same, however the problem is that they *look* totally different. - -That's why there is this package, an XML Beautifier that doesn't rewrite the document. - -## Credit - -The credit goes to **diotalevi** from his post at http://www.perlmonks.org/?node_id=261292. - -However, it does not work for all cases. For example, - -```sh -$ echo '
123John Brown
' | perl -pe 's/(?<=>)\s+(?=<)//g; s(<(/?)([^/>]+)(/?)>\s*(?=(".($1&&($4 eq"
-123 -John Brown - - - - -``` - -I simplified the algorithm, and now it should work for all cases: - -```sh -echo '
123John Brown
' | perl -pe 's/(?<=>)\s+(?=<)//g; s(<(/?)([^>]+)(/?)>)($indent+=$3?0:$1?-1:1;"<$1$2$3>"."\n".(" "x$indent))ge' -``` -```xml - -
-
- - - - - 123 - - John Brown - - - -
-``` - -This package is a direct translate from above Perl code into Go, -then further enhanced by @ruandao. diff --git a/vendor/github.com/go-xmlfmt/xmlfmt/xmlfmt.go b/vendor/github.com/go-xmlfmt/xmlfmt/xmlfmt.go deleted file mode 100644 index 8b5a9e422..000000000 --- a/vendor/github.com/go-xmlfmt/xmlfmt/xmlfmt.go +++ /dev/null @@ -1,77 +0,0 @@ -//////////////////////////////////////////////////////////////////////////// -// Porgram: xmlfmt.go -// Purpose: Go XML Beautify from XML string using pure string manipulation -// Authors: Antonio Sun (c) 2016-2021, All rights reserved -//////////////////////////////////////////////////////////////////////////// - -package xmlfmt - -import ( - "html" - "regexp" - "strings" -) - -var ( - reg = regexp.MustCompile(`<([/!]?)([^>]+?)(/?)>`) - // NL is the newline string used in XML output, define for DOS-convenient. - NL = "\r\n" -) - -// FormatXML will (purly) reformat the XML string in a readable way, without any rewriting/altering the structure. -// If your XML Comments have nested tags in them, or you're not 100% sure otherwise, pass `true` as the third parameter to this function. But don't turn it on blindly, as the code has become ten times more complicated because of it. -func FormatXML(xmls, prefix, indent string, nestedTagsInComments ...bool) string { - nestedTagsInComment := false - if len(nestedTagsInComments) > 0 { - nestedTagsInComment = nestedTagsInComments[0] - } - reXmlComments := regexp.MustCompile(`(?s)()`) - src := regexp.MustCompile(`(?s)>\s+<`).ReplaceAllString(xmls, "><") - if nestedTagsInComment { - src = reXmlComments.ReplaceAllStringFunc(src, func(m string) string { - parts := reXmlComments.FindStringSubmatch(m) - p2 := regexp.MustCompile(`\r*\n`).ReplaceAllString(parts[2], " ") - return parts[1] + html.EscapeString(p2) + parts[3] - }) - } - rf := replaceTag(prefix, indent) - r := prefix + reg.ReplaceAllStringFunc(src, rf) - if nestedTagsInComment { - r = reXmlComments.ReplaceAllStringFunc(r, func(m string) string { - parts := reXmlComments.FindStringSubmatch(m) - return parts[1] + html.UnescapeString(parts[2]) + parts[3] - }) - } - - return r -} - -// replaceTag returns a closure function to do 's/(?<=>)\s+(?=<)//g; s(<(/?)([^>]+?)(/?)>)($indent+=$3?0:$1?-1:1;"<$1$2$3>"."\n".(" "x$indent))ge' as in Perl -// and deal with comments as well -func replaceTag(prefix, indent string) func(string) string { - indentLevel := 0 - return func(m string) string { - // head elem - if strings.HasPrefix(m, "") { - return NL + prefix + strings.Repeat(indent, indentLevel) + m - } - // comment elem - if strings.HasPrefix(m, " Hi then the interval is empty. -type Interval struct { - Lo, Hi float64 -} - -// EmptyInterval returns an empty interval. -func EmptyInterval() Interval { return Interval{1, 0} } - -// IntervalFromPoint returns an interval representing a single point. -func IntervalFromPoint(p float64) Interval { return Interval{p, p} } - -// IsEmpty reports whether the interval is empty. -func (i Interval) IsEmpty() bool { return i.Lo > i.Hi } - -// Equal returns true iff the interval contains the same points as oi. -func (i Interval) Equal(oi Interval) bool { - return i == oi || i.IsEmpty() && oi.IsEmpty() -} - -// Center returns the midpoint of the interval. -// It is undefined for empty intervals. -func (i Interval) Center() float64 { return 0.5 * (i.Lo + i.Hi) } - -// Length returns the length of the interval. -// The length of an empty interval is negative. -func (i Interval) Length() float64 { return i.Hi - i.Lo } - -// Contains returns true iff the interval contains p. -func (i Interval) Contains(p float64) bool { return i.Lo <= p && p <= i.Hi } - -// ContainsInterval returns true iff the interval contains oi. -func (i Interval) ContainsInterval(oi Interval) bool { - if oi.IsEmpty() { - return true - } - return i.Lo <= oi.Lo && oi.Hi <= i.Hi -} - -// InteriorContains returns true iff the interval strictly contains p. -func (i Interval) InteriorContains(p float64) bool { - return i.Lo < p && p < i.Hi -} - -// InteriorContainsInterval returns true iff the interval strictly contains oi. -func (i Interval) InteriorContainsInterval(oi Interval) bool { - if oi.IsEmpty() { - return true - } - return i.Lo < oi.Lo && oi.Hi < i.Hi -} - -// Intersects returns true iff the interval contains any points in common with oi. -func (i Interval) Intersects(oi Interval) bool { - if i.Lo <= oi.Lo { - return oi.Lo <= i.Hi && oi.Lo <= oi.Hi // oi.Lo ∈ i and oi is not empty - } - return i.Lo <= oi.Hi && i.Lo <= i.Hi // i.Lo ∈ oi and i is not empty -} - -// InteriorIntersects returns true iff the interior of the interval contains any points in common with oi, including the latter's boundary. -func (i Interval) InteriorIntersects(oi Interval) bool { - return oi.Lo < i.Hi && i.Lo < oi.Hi && i.Lo < i.Hi && oi.Lo <= oi.Hi -} - -// Intersection returns the interval containing all points common to i and j. -func (i Interval) Intersection(j Interval) Interval { - // Empty intervals do not need to be special-cased. - return Interval{ - Lo: math.Max(i.Lo, j.Lo), - Hi: math.Min(i.Hi, j.Hi), - } -} - -// AddPoint returns the interval expanded so that it contains the given point. -func (i Interval) AddPoint(p float64) Interval { - if i.IsEmpty() { - return Interval{p, p} - } - if p < i.Lo { - return Interval{p, i.Hi} - } - if p > i.Hi { - return Interval{i.Lo, p} - } - return i -} - -// ClampPoint returns the closest point in the interval to the given point "p". -// The interval must be non-empty. -func (i Interval) ClampPoint(p float64) float64 { - return math.Max(i.Lo, math.Min(i.Hi, p)) -} - -// Expanded returns an interval that has been expanded on each side by margin. -// If margin is negative, then the function shrinks the interval on -// each side by margin instead. The resulting interval may be empty. Any -// expansion of an empty interval remains empty. -func (i Interval) Expanded(margin float64) Interval { - if i.IsEmpty() { - return i - } - return Interval{i.Lo - margin, i.Hi + margin} -} - -// Union returns the smallest interval that contains this interval and the given interval. -func (i Interval) Union(other Interval) Interval { - if i.IsEmpty() { - return other - } - if other.IsEmpty() { - return i - } - return Interval{math.Min(i.Lo, other.Lo), math.Max(i.Hi, other.Hi)} -} - -func (i Interval) String() string { return fmt.Sprintf("[%.7f, %.7f]", i.Lo, i.Hi) } - -const ( - // epsilon is a small number that represents a reasonable level of noise between two - // values that can be considered to be equal. - epsilon = 1e-15 - // dblEpsilon is a smaller number for values that require more precision. - // This is the C++ DBL_EPSILON equivalent. - dblEpsilon = 2.220446049250313e-16 -) - -// ApproxEqual reports whether the interval can be transformed into the -// given interval by moving each endpoint a small distance. -// The empty interval is considered to be positioned arbitrarily on the -// real line, so any interval with a small enough length will match -// the empty interval. -func (i Interval) ApproxEqual(other Interval) bool { - if i.IsEmpty() { - return other.Length() <= 2*epsilon - } - if other.IsEmpty() { - return i.Length() <= 2*epsilon - } - return math.Abs(other.Lo-i.Lo) <= epsilon && - math.Abs(other.Hi-i.Hi) <= epsilon -} - -// DirectedHausdorffDistance returns the Hausdorff distance to the given interval. For two -// intervals x and y, this distance is defined as -// h(x, y) = max_{p in x} min_{q in y} d(p, q). -func (i Interval) DirectedHausdorffDistance(other Interval) float64 { - if i.IsEmpty() { - return 0 - } - if other.IsEmpty() { - return math.Inf(1) - } - return math.Max(0, math.Max(i.Hi-other.Hi, other.Lo-i.Lo)) -} diff --git a/vendor/github.com/golang/geo/r2/doc.go b/vendor/github.com/golang/geo/r2/doc.go deleted file mode 100644 index 05b155543..000000000 --- a/vendor/github.com/golang/geo/r2/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2014 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package r2 implements types and functions for working with geometry in ℝ². - -See package s2 for a more detailed overview. -*/ -package r2 diff --git a/vendor/github.com/golang/geo/r2/rect.go b/vendor/github.com/golang/geo/r2/rect.go deleted file mode 100644 index 495545bba..000000000 --- a/vendor/github.com/golang/geo/r2/rect.go +++ /dev/null @@ -1,255 +0,0 @@ -// Copyright 2014 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package r2 - -import ( - "fmt" - "math" - - "github.com/golang/geo/r1" -) - -// Point represents a point in ℝ². -type Point struct { - X, Y float64 -} - -// Add returns the sum of p and op. -func (p Point) Add(op Point) Point { return Point{p.X + op.X, p.Y + op.Y} } - -// Sub returns the difference of p and op. -func (p Point) Sub(op Point) Point { return Point{p.X - op.X, p.Y - op.Y} } - -// Mul returns the scalar product of p and m. -func (p Point) Mul(m float64) Point { return Point{m * p.X, m * p.Y} } - -// Ortho returns a counterclockwise orthogonal point with the same norm. -func (p Point) Ortho() Point { return Point{-p.Y, p.X} } - -// Dot returns the dot product between p and op. -func (p Point) Dot(op Point) float64 { return p.X*op.X + p.Y*op.Y } - -// Cross returns the cross product of p and op. -func (p Point) Cross(op Point) float64 { return p.X*op.Y - p.Y*op.X } - -// Norm returns the vector's norm. -func (p Point) Norm() float64 { return math.Hypot(p.X, p.Y) } - -// Normalize returns a unit point in the same direction as p. -func (p Point) Normalize() Point { - if p.X == 0 && p.Y == 0 { - return p - } - return p.Mul(1 / p.Norm()) -} - -func (p Point) String() string { return fmt.Sprintf("(%.12f, %.12f)", p.X, p.Y) } - -// Rect represents a closed axis-aligned rectangle in the (x,y) plane. -type Rect struct { - X, Y r1.Interval -} - -// RectFromPoints constructs a rect that contains the given points. -func RectFromPoints(pts ...Point) Rect { - // Because the default value on interval is 0,0, we need to manually - // define the interval from the first point passed in as our starting - // interval, otherwise we end up with the case of passing in - // Point{0.2, 0.3} and getting the starting Rect of {0, 0.2}, {0, 0.3} - // instead of the Rect {0.2, 0.2}, {0.3, 0.3} which is not correct. - if len(pts) == 0 { - return Rect{} - } - - r := Rect{ - X: r1.Interval{Lo: pts[0].X, Hi: pts[0].X}, - Y: r1.Interval{Lo: pts[0].Y, Hi: pts[0].Y}, - } - - for _, p := range pts[1:] { - r = r.AddPoint(p) - } - return r -} - -// RectFromCenterSize constructs a rectangle with the given center and size. -// Both dimensions of size must be non-negative. -func RectFromCenterSize(center, size Point) Rect { - return Rect{ - r1.Interval{Lo: center.X - size.X/2, Hi: center.X + size.X/2}, - r1.Interval{Lo: center.Y - size.Y/2, Hi: center.Y + size.Y/2}, - } -} - -// EmptyRect constructs the canonical empty rectangle. Use IsEmpty() to test -// for empty rectangles, since they have more than one representation. A Rect{} -// is not the same as the EmptyRect. -func EmptyRect() Rect { - return Rect{r1.EmptyInterval(), r1.EmptyInterval()} -} - -// IsValid reports whether the rectangle is valid. -// This requires the width to be empty iff the height is empty. -func (r Rect) IsValid() bool { - return r.X.IsEmpty() == r.Y.IsEmpty() -} - -// IsEmpty reports whether the rectangle is empty. -func (r Rect) IsEmpty() bool { - return r.X.IsEmpty() -} - -// Vertices returns all four vertices of the rectangle. Vertices are returned in -// CCW direction starting with the lower left corner. -func (r Rect) Vertices() [4]Point { - return [4]Point{ - {r.X.Lo, r.Y.Lo}, - {r.X.Hi, r.Y.Lo}, - {r.X.Hi, r.Y.Hi}, - {r.X.Lo, r.Y.Hi}, - } -} - -// VertexIJ returns the vertex in direction i along the X-axis (0=left, 1=right) and -// direction j along the Y-axis (0=down, 1=up). -func (r Rect) VertexIJ(i, j int) Point { - x := r.X.Lo - if i == 1 { - x = r.X.Hi - } - y := r.Y.Lo - if j == 1 { - y = r.Y.Hi - } - return Point{x, y} -} - -// Lo returns the low corner of the rect. -func (r Rect) Lo() Point { - return Point{r.X.Lo, r.Y.Lo} -} - -// Hi returns the high corner of the rect. -func (r Rect) Hi() Point { - return Point{r.X.Hi, r.Y.Hi} -} - -// Center returns the center of the rectangle in (x,y)-space -func (r Rect) Center() Point { - return Point{r.X.Center(), r.Y.Center()} -} - -// Size returns the width and height of this rectangle in (x,y)-space. Empty -// rectangles have a negative width and height. -func (r Rect) Size() Point { - return Point{r.X.Length(), r.Y.Length()} -} - -// ContainsPoint reports whether the rectangle contains the given point. -// Rectangles are closed regions, i.e. they contain their boundary. -func (r Rect) ContainsPoint(p Point) bool { - return r.X.Contains(p.X) && r.Y.Contains(p.Y) -} - -// InteriorContainsPoint returns true iff the given point is contained in the interior -// of the region (i.e. the region excluding its boundary). -func (r Rect) InteriorContainsPoint(p Point) bool { - return r.X.InteriorContains(p.X) && r.Y.InteriorContains(p.Y) -} - -// Contains reports whether the rectangle contains the given rectangle. -func (r Rect) Contains(other Rect) bool { - return r.X.ContainsInterval(other.X) && r.Y.ContainsInterval(other.Y) -} - -// InteriorContains reports whether the interior of this rectangle contains all of the -// points of the given other rectangle (including its boundary). -func (r Rect) InteriorContains(other Rect) bool { - return r.X.InteriorContainsInterval(other.X) && r.Y.InteriorContainsInterval(other.Y) -} - -// Intersects reports whether this rectangle and the other rectangle have any points in common. -func (r Rect) Intersects(other Rect) bool { - return r.X.Intersects(other.X) && r.Y.Intersects(other.Y) -} - -// InteriorIntersects reports whether the interior of this rectangle intersects -// any point (including the boundary) of the given other rectangle. -func (r Rect) InteriorIntersects(other Rect) bool { - return r.X.InteriorIntersects(other.X) && r.Y.InteriorIntersects(other.Y) -} - -// AddPoint expands the rectangle to include the given point. The rectangle is -// expanded by the minimum amount possible. -func (r Rect) AddPoint(p Point) Rect { - return Rect{r.X.AddPoint(p.X), r.Y.AddPoint(p.Y)} -} - -// AddRect expands the rectangle to include the given rectangle. This is the -// same as replacing the rectangle by the union of the two rectangles, but -// is more efficient. -func (r Rect) AddRect(other Rect) Rect { - return Rect{r.X.Union(other.X), r.Y.Union(other.Y)} -} - -// ClampPoint returns the closest point in the rectangle to the given point. -// The rectangle must be non-empty. -func (r Rect) ClampPoint(p Point) Point { - return Point{r.X.ClampPoint(p.X), r.Y.ClampPoint(p.Y)} -} - -// Expanded returns a rectangle that has been expanded in the x-direction -// by margin.X, and in y-direction by margin.Y. If either margin is empty, -// then shrink the interval on the corresponding sides instead. The resulting -// rectangle may be empty. Any expansion of an empty rectangle remains empty. -func (r Rect) Expanded(margin Point) Rect { - xx := r.X.Expanded(margin.X) - yy := r.Y.Expanded(margin.Y) - if xx.IsEmpty() || yy.IsEmpty() { - return EmptyRect() - } - return Rect{xx, yy} -} - -// ExpandedByMargin returns a Rect that has been expanded by the amount on all sides. -func (r Rect) ExpandedByMargin(margin float64) Rect { - return r.Expanded(Point{margin, margin}) -} - -// Union returns the smallest rectangle containing the union of this rectangle and -// the given rectangle. -func (r Rect) Union(other Rect) Rect { - return Rect{r.X.Union(other.X), r.Y.Union(other.Y)} -} - -// Intersection returns the smallest rectangle containing the intersection of this -// rectangle and the given rectangle. -func (r Rect) Intersection(other Rect) Rect { - xx := r.X.Intersection(other.X) - yy := r.Y.Intersection(other.Y) - if xx.IsEmpty() || yy.IsEmpty() { - return EmptyRect() - } - - return Rect{xx, yy} -} - -// ApproxEqual returns true if the x- and y-intervals of the two rectangles are -// the same up to the given tolerance. -func (r Rect) ApproxEqual(r2 Rect) bool { - return r.X.ApproxEqual(r2.X) && r.Y.ApproxEqual(r2.Y) -} - -func (r Rect) String() string { return fmt.Sprintf("[Lo%s, Hi%s]", r.Lo(), r.Hi()) } diff --git a/vendor/github.com/golang/geo/r3/doc.go b/vendor/github.com/golang/geo/r3/doc.go deleted file mode 100644 index 1eb4710c8..000000000 --- a/vendor/github.com/golang/geo/r3/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2014 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package r3 implements types and functions for working with geometry in ℝ³. - -See ../s2 for a more detailed overview. -*/ -package r3 diff --git a/vendor/github.com/golang/geo/r3/precisevector.go b/vendor/github.com/golang/geo/r3/precisevector.go deleted file mode 100644 index b13393dbc..000000000 --- a/vendor/github.com/golang/geo/r3/precisevector.go +++ /dev/null @@ -1,198 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package r3 - -import ( - "fmt" - "math/big" -) - -const ( - // prec is the number of bits of precision to use for the Float values. - // To keep things simple, we use the maximum allowable precision on big - // values. This allows us to handle all values we expect in the s2 library. - prec = big.MaxPrec -) - -// define some commonly referenced values. -var ( - precise0 = precInt(0) - precise1 = precInt(1) -) - -// precStr wraps the conversion from a string into a big.Float. For results that -// actually can be represented exactly, this should only be used on values that -// are integer multiples of integer powers of 2. -func precStr(s string) *big.Float { - // Explicitly ignoring the bool return for this usage. - f, _ := new(big.Float).SetPrec(prec).SetString(s) - return f -} - -func precInt(i int64) *big.Float { - return new(big.Float).SetPrec(prec).SetInt64(i) -} - -func precFloat(f float64) *big.Float { - return new(big.Float).SetPrec(prec).SetFloat64(f) -} - -func precAdd(a, b *big.Float) *big.Float { - return new(big.Float).SetPrec(prec).Add(a, b) -} - -func precSub(a, b *big.Float) *big.Float { - return new(big.Float).SetPrec(prec).Sub(a, b) -} - -func precMul(a, b *big.Float) *big.Float { - return new(big.Float).SetPrec(prec).Mul(a, b) -} - -// PreciseVector represents a point in ℝ³ using high-precision values. -// Note that this is NOT a complete implementation because there are some -// operations that Vector supports that are not feasible with arbitrary precision -// math. (e.g., methods that need division like Normalize, or methods needing a -// square root operation such as Norm) -type PreciseVector struct { - X, Y, Z *big.Float -} - -// PreciseVectorFromVector creates a high precision vector from the given Vector. -func PreciseVectorFromVector(v Vector) PreciseVector { - return NewPreciseVector(v.X, v.Y, v.Z) -} - -// NewPreciseVector creates a high precision vector from the given floating point values. -func NewPreciseVector(x, y, z float64) PreciseVector { - return PreciseVector{ - X: precFloat(x), - Y: precFloat(y), - Z: precFloat(z), - } -} - -// Vector returns this precise vector converted to a Vector. -func (v PreciseVector) Vector() Vector { - // The accuracy flag is ignored on these conversions back to float64. - x, _ := v.X.Float64() - y, _ := v.Y.Float64() - z, _ := v.Z.Float64() - return Vector{x, y, z}.Normalize() -} - -// Equal reports whether v and ov are equal. -func (v PreciseVector) Equal(ov PreciseVector) bool { - return v.X.Cmp(ov.X) == 0 && v.Y.Cmp(ov.Y) == 0 && v.Z.Cmp(ov.Z) == 0 -} - -func (v PreciseVector) String() string { - return fmt.Sprintf("(%10g, %10g, %10g)", v.X, v.Y, v.Z) -} - -// Norm2 returns the square of the norm. -func (v PreciseVector) Norm2() *big.Float { return v.Dot(v) } - -// IsUnit reports whether this vector is of unit length. -func (v PreciseVector) IsUnit() bool { - return v.Norm2().Cmp(precise1) == 0 -} - -// Abs returns the vector with nonnegative components. -func (v PreciseVector) Abs() PreciseVector { - return PreciseVector{ - X: new(big.Float).Abs(v.X), - Y: new(big.Float).Abs(v.Y), - Z: new(big.Float).Abs(v.Z), - } -} - -// Add returns the standard vector sum of v and ov. -func (v PreciseVector) Add(ov PreciseVector) PreciseVector { - return PreciseVector{ - X: precAdd(v.X, ov.X), - Y: precAdd(v.Y, ov.Y), - Z: precAdd(v.Z, ov.Z), - } -} - -// Sub returns the standard vector difference of v and ov. -func (v PreciseVector) Sub(ov PreciseVector) PreciseVector { - return PreciseVector{ - X: precSub(v.X, ov.X), - Y: precSub(v.Y, ov.Y), - Z: precSub(v.Z, ov.Z), - } -} - -// Mul returns the standard scalar product of v and f. -func (v PreciseVector) Mul(f *big.Float) PreciseVector { - return PreciseVector{ - X: precMul(v.X, f), - Y: precMul(v.Y, f), - Z: precMul(v.Z, f), - } -} - -// MulByFloat64 returns the standard scalar product of v and f. -func (v PreciseVector) MulByFloat64(f float64) PreciseVector { - return v.Mul(precFloat(f)) -} - -// Dot returns the standard dot product of v and ov. -func (v PreciseVector) Dot(ov PreciseVector) *big.Float { - return precAdd(precMul(v.X, ov.X), precAdd(precMul(v.Y, ov.Y), precMul(v.Z, ov.Z))) -} - -// Cross returns the standard cross product of v and ov. -func (v PreciseVector) Cross(ov PreciseVector) PreciseVector { - return PreciseVector{ - X: precSub(precMul(v.Y, ov.Z), precMul(v.Z, ov.Y)), - Y: precSub(precMul(v.Z, ov.X), precMul(v.X, ov.Z)), - Z: precSub(precMul(v.X, ov.Y), precMul(v.Y, ov.X)), - } -} - -// LargestComponent returns the axis that represents the largest component in this vector. -func (v PreciseVector) LargestComponent() Axis { - t := v.Abs() - - if t.X.Cmp(t.Y) > 0 { - if t.X.Cmp(t.Z) > 0 { - return XAxis - } - return ZAxis - } - if t.Y.Cmp(t.Z) > 0 { - return YAxis - } - return ZAxis -} - -// SmallestComponent returns the axis that represents the smallest component in this vector. -func (v PreciseVector) SmallestComponent() Axis { - t := v.Abs() - - if t.X.Cmp(t.Y) < 0 { - if t.X.Cmp(t.Z) < 0 { - return XAxis - } - return ZAxis - } - if t.Y.Cmp(t.Z) < 0 { - return YAxis - } - return ZAxis -} diff --git a/vendor/github.com/golang/geo/r3/vector.go b/vendor/github.com/golang/geo/r3/vector.go deleted file mode 100644 index ccda622f4..000000000 --- a/vendor/github.com/golang/geo/r3/vector.go +++ /dev/null @@ -1,183 +0,0 @@ -// Copyright 2014 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package r3 - -import ( - "fmt" - "math" - - "github.com/golang/geo/s1" -) - -// Vector represents a point in ℝ³. -type Vector struct { - X, Y, Z float64 -} - -// ApproxEqual reports whether v and ov are equal within a small epsilon. -func (v Vector) ApproxEqual(ov Vector) bool { - const epsilon = 1e-16 - return math.Abs(v.X-ov.X) < epsilon && math.Abs(v.Y-ov.Y) < epsilon && math.Abs(v.Z-ov.Z) < epsilon -} - -func (v Vector) String() string { return fmt.Sprintf("(%0.24f, %0.24f, %0.24f)", v.X, v.Y, v.Z) } - -// Norm returns the vector's norm. -func (v Vector) Norm() float64 { return math.Sqrt(v.Dot(v)) } - -// Norm2 returns the square of the norm. -func (v Vector) Norm2() float64 { return v.Dot(v) } - -// Normalize returns a unit vector in the same direction as v. -func (v Vector) Normalize() Vector { - n2 := v.Norm2() - if n2 == 0 { - return Vector{0, 0, 0} - } - return v.Mul(1 / math.Sqrt(n2)) -} - -// IsUnit returns whether this vector is of approximately unit length. -func (v Vector) IsUnit() bool { - const epsilon = 5e-14 - return math.Abs(v.Norm2()-1) <= epsilon -} - -// Abs returns the vector with nonnegative components. -func (v Vector) Abs() Vector { return Vector{math.Abs(v.X), math.Abs(v.Y), math.Abs(v.Z)} } - -// Add returns the standard vector sum of v and ov. -func (v Vector) Add(ov Vector) Vector { return Vector{v.X + ov.X, v.Y + ov.Y, v.Z + ov.Z} } - -// Sub returns the standard vector difference of v and ov. -func (v Vector) Sub(ov Vector) Vector { return Vector{v.X - ov.X, v.Y - ov.Y, v.Z - ov.Z} } - -// Mul returns the standard scalar product of v and m. -func (v Vector) Mul(m float64) Vector { return Vector{m * v.X, m * v.Y, m * v.Z} } - -// Dot returns the standard dot product of v and ov. -func (v Vector) Dot(ov Vector) float64 { return v.X*ov.X + v.Y*ov.Y + v.Z*ov.Z } - -// Cross returns the standard cross product of v and ov. -func (v Vector) Cross(ov Vector) Vector { - return Vector{ - v.Y*ov.Z - v.Z*ov.Y, - v.Z*ov.X - v.X*ov.Z, - v.X*ov.Y - v.Y*ov.X, - } -} - -// Distance returns the Euclidean distance between v and ov. -func (v Vector) Distance(ov Vector) float64 { return v.Sub(ov).Norm() } - -// Angle returns the angle between v and ov. -func (v Vector) Angle(ov Vector) s1.Angle { - return s1.Angle(math.Atan2(v.Cross(ov).Norm(), v.Dot(ov))) * s1.Radian -} - -// Axis enumerates the 3 axes of ℝ³. -type Axis int - -// The three axes of ℝ³. -const ( - XAxis Axis = iota - YAxis - ZAxis -) - -// Ortho returns a unit vector that is orthogonal to v. -// Ortho(-v) = -Ortho(v) for all v. -func (v Vector) Ortho() Vector { - ov := Vector{0.012, 0.0053, 0.00457} - switch v.LargestComponent() { - case XAxis: - ov.Z = 1 - case YAxis: - ov.X = 1 - default: - ov.Y = 1 - } - return v.Cross(ov).Normalize() -} - -// LargestComponent returns the axis that represents the largest component in this vector. -func (v Vector) LargestComponent() Axis { - t := v.Abs() - - if t.X > t.Y { - if t.X > t.Z { - return XAxis - } - return ZAxis - } - if t.Y > t.Z { - return YAxis - } - return ZAxis -} - -// SmallestComponent returns the axis that represents the smallest component in this vector. -func (v Vector) SmallestComponent() Axis { - t := v.Abs() - - if t.X < t.Y { - if t.X < t.Z { - return XAxis - } - return ZAxis - } - if t.Y < t.Z { - return YAxis - } - return ZAxis -} - -// Cmp compares v and ov lexicographically and returns: -// -// -1 if v < ov -// 0 if v == ov -// +1 if v > ov -// -// This method is based on C++'s std::lexicographical_compare. Two entities -// are compared element by element with the given operator. The first mismatch -// defines which is less (or greater) than the other. If both have equivalent -// values they are lexicographically equal. -func (v Vector) Cmp(ov Vector) int { - if v.X < ov.X { - return -1 - } - if v.X > ov.X { - return 1 - } - - // First elements were the same, try the next. - if v.Y < ov.Y { - return -1 - } - if v.Y > ov.Y { - return 1 - } - - // Second elements were the same return the final compare. - if v.Z < ov.Z { - return -1 - } - if v.Z > ov.Z { - return 1 - } - - // Both are equal - return 0 -} diff --git a/vendor/github.com/golang/geo/s1/angle.go b/vendor/github.com/golang/geo/s1/angle.go deleted file mode 100644 index 747b23dea..000000000 --- a/vendor/github.com/golang/geo/s1/angle.go +++ /dev/null @@ -1,120 +0,0 @@ -// Copyright 2014 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package s1 - -import ( - "math" - "strconv" -) - -// Angle represents a 1D angle. The internal representation is a double precision -// value in radians, so conversion to and from radians is exact. -// Conversions between E5, E6, E7, and Degrees are not always -// exact. For example, Degrees(3.1) is different from E6(3100000) or E7(31000000). -// -// The following conversions between degrees and radians are exact: -// -// Degree*180 == Radian*math.Pi -// Degree*(180/n) == Radian*(math.Pi/n) for n == 0..8 -// -// These identities hold when the arguments are scaled up or down by any power -// of 2. Some similar identities are also true, for example, -// -// Degree*60 == Radian*(math.Pi/3) -// -// But be aware that this type of identity does not hold in general. For example, -// -// Degree*3 != Radian*(math.Pi/60) -// -// Similarly, the conversion to radians means that (Angle(x)*Degree).Degrees() -// does not always equal x. For example, -// -// (Angle(45*n)*Degree).Degrees() == 45*n for n == 0..8 -// -// but -// -// (60*Degree).Degrees() != 60 -// -// When testing for equality, you should allow for numerical errors (ApproxEqual) -// or convert to discrete E5/E6/E7 values first. -type Angle float64 - -// Angle units. -const ( - Radian Angle = 1 - Degree = (math.Pi / 180) * Radian - - E5 = 1e-5 * Degree - E6 = 1e-6 * Degree - E7 = 1e-7 * Degree -) - -// Radians returns the angle in radians. -func (a Angle) Radians() float64 { return float64(a) } - -// Degrees returns the angle in degrees. -func (a Angle) Degrees() float64 { return float64(a / Degree) } - -// round returns the value rounded to nearest as an int32. -// This does not match C++ exactly for the case of x.5. -func round(val float64) int32 { - if val < 0 { - return int32(val - 0.5) - } - return int32(val + 0.5) -} - -// InfAngle returns an angle larger than any finite angle. -func InfAngle() Angle { - return Angle(math.Inf(1)) -} - -// isInf reports whether this Angle is infinite. -func (a Angle) isInf() bool { - return math.IsInf(float64(a), 0) -} - -// E5 returns the angle in hundred thousandths of degrees. -func (a Angle) E5() int32 { return round(a.Degrees() * 1e5) } - -// E6 returns the angle in millionths of degrees. -func (a Angle) E6() int32 { return round(a.Degrees() * 1e6) } - -// E7 returns the angle in ten millionths of degrees. -func (a Angle) E7() int32 { return round(a.Degrees() * 1e7) } - -// Abs returns the absolute value of the angle. -func (a Angle) Abs() Angle { return Angle(math.Abs(float64(a))) } - -// Normalized returns an equivalent angle in (-π, π]. -func (a Angle) Normalized() Angle { - rad := math.Remainder(float64(a), 2*math.Pi) - if rad <= -math.Pi { - rad = math.Pi - } - return Angle(rad) -} - -func (a Angle) String() string { - return strconv.FormatFloat(a.Degrees(), 'f', 7, 64) // like "%.7f" -} - -// ApproxEqual reports whether the two angles are the same up to a small tolerance. -func (a Angle) ApproxEqual(other Angle) bool { - return math.Abs(float64(a)-float64(other)) <= epsilon -} - -// BUG(dsymonds): The major differences from the C++ version are: -// - no unsigned E5/E6/E7 methods diff --git a/vendor/github.com/golang/geo/s1/chordangle.go b/vendor/github.com/golang/geo/s1/chordangle.go deleted file mode 100644 index 77d71648f..000000000 --- a/vendor/github.com/golang/geo/s1/chordangle.go +++ /dev/null @@ -1,320 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package s1 - -import ( - "math" -) - -// ChordAngle represents the angle subtended by a chord (i.e., the straight -// line segment connecting two points on the sphere). Its representation -// makes it very efficient for computing and comparing distances, but unlike -// Angle it is only capable of representing angles between 0 and π radians. -// Generally, ChordAngle should only be used in loops where many angles need -// to be calculated and compared. Otherwise it is simpler to use Angle. -// -// ChordAngle loses some accuracy as the angle approaches π radians. -// There are several different ways to measure this error, including the -// representational error (i.e., how accurately ChordAngle can represent -// angles near π radians), the conversion error (i.e., how much precision is -// lost when an Angle is converted to an ChordAngle), and the measurement -// error (i.e., how accurate the ChordAngle(a, b) constructor is when the -// points A and B are separated by angles close to π radians). All of these -// errors differ by a small constant factor. -// -// For the measurement error (which is the largest of these errors and also -// the most important in practice), let the angle between A and B be (π - x) -// radians, i.e. A and B are within "x" radians of being antipodal. The -// corresponding chord length is -// -// r = 2 * sin((π - x) / 2) = 2 * cos(x / 2) -// -// For values of x not close to π the relative error in the squared chord -// length is at most 4.5 * dblEpsilon (see MaxPointError below). -// The relative error in "r" is thus at most 2.25 * dblEpsilon ~= 5e-16. To -// convert this error into an equivalent angle, we have -// -// |dr / dx| = sin(x / 2) -// -// and therefore -// -// |dx| = dr / sin(x / 2) -// = 5e-16 * (2 * cos(x / 2)) / sin(x / 2) -// = 1e-15 / tan(x / 2) -// -// The maximum error is attained when -// -// x = |dx| -// = 1e-15 / tan(x / 2) -// ~= 1e-15 / (x / 2) -// ~= sqrt(2e-15) -// -// In summary, the measurement error for an angle (π - x) is at most -// -// dx = min(1e-15 / tan(x / 2), sqrt(2e-15)) -// (~= min(2e-15 / x, sqrt(2e-15)) when x is small) -// -// On the Earth's surface (assuming a radius of 6371km), this corresponds to -// the following worst-case measurement errors: -// -// Accuracy: Unless antipodal to within: -// --------- --------------------------- -// 6.4 nanometers 10,000 km (90 degrees) -// 1 micrometer 81.2 kilometers -// 1 millimeter 81.2 meters -// 1 centimeter 8.12 meters -// 28.5 centimeters 28.5 centimeters -// -// The representational and conversion errors referred to earlier are somewhat -// smaller than this. For example, maximum distance between adjacent -// representable ChordAngle values is only 13.5 cm rather than 28.5 cm. To -// see this, observe that the closest representable value to r^2 = 4 is -// r^2 = 4 * (1 - dblEpsilon / 2). Thus r = 2 * (1 - dblEpsilon / 4) and -// the angle between these two representable values is -// -// x = 2 * acos(r / 2) -// = 2 * acos(1 - dblEpsilon / 4) -// ~= 2 * asin(sqrt(dblEpsilon / 2) -// ~= sqrt(2 * dblEpsilon) -// ~= 2.1e-8 -// -// which is 13.5 cm on the Earth's surface. -// -// The worst case rounding error occurs when the value halfway between these -// two representable values is rounded up to 4. This halfway value is -// r^2 = (4 * (1 - dblEpsilon / 4)), thus r = 2 * (1 - dblEpsilon / 8) and -// the worst case rounding error is -// -// x = 2 * acos(r / 2) -// = 2 * acos(1 - dblEpsilon / 8) -// ~= 2 * asin(sqrt(dblEpsilon / 4) -// ~= sqrt(dblEpsilon) -// ~= 1.5e-8 -// -// which is 9.5 cm on the Earth's surface. -type ChordAngle float64 - -const ( - // NegativeChordAngle represents a chord angle smaller than the zero angle. - // The only valid operations on a NegativeChordAngle are comparisons, - // Angle conversions, and Successor/Predecessor. - NegativeChordAngle = ChordAngle(-1) - - // RightChordAngle represents a chord angle of 90 degrees (a "right angle"). - RightChordAngle = ChordAngle(2) - - // StraightChordAngle represents a chord angle of 180 degrees (a "straight angle"). - // This is the maximum finite chord angle. - StraightChordAngle = ChordAngle(4) - - // maxLength2 is the square of the maximum length allowed in a ChordAngle. - maxLength2 = 4.0 -) - -// ChordAngleFromAngle returns a ChordAngle from the given Angle. -func ChordAngleFromAngle(a Angle) ChordAngle { - if a < 0 { - return NegativeChordAngle - } - if a.isInf() { - return InfChordAngle() - } - l := 2 * math.Sin(0.5*math.Min(math.Pi, a.Radians())) - return ChordAngle(l * l) -} - -// ChordAngleFromSquaredLength returns a ChordAngle from the squared chord length. -// Note that the argument is automatically clamped to a maximum of 4 to -// handle possible roundoff errors. The argument must be non-negative. -func ChordAngleFromSquaredLength(length2 float64) ChordAngle { - if length2 > maxLength2 { - return StraightChordAngle - } - return ChordAngle(length2) -} - -// Expanded returns a new ChordAngle that has been adjusted by the given error -// bound (which can be positive or negative). Error should be the value -// returned by either MaxPointError or MaxAngleError. For example: -// a := ChordAngleFromPoints(x, y) -// a1 := a.Expanded(a.MaxPointError()) -func (c ChordAngle) Expanded(e float64) ChordAngle { - // If the angle is special, don't change it. Otherwise clamp it to the valid range. - if c.isSpecial() { - return c - } - return ChordAngle(math.Max(0.0, math.Min(maxLength2, float64(c)+e))) -} - -// Angle converts this ChordAngle to an Angle. -func (c ChordAngle) Angle() Angle { - if c < 0 { - return -1 * Radian - } - if c.isInf() { - return InfAngle() - } - return Angle(2 * math.Asin(0.5*math.Sqrt(float64(c)))) -} - -// InfChordAngle returns a chord angle larger than any finite chord angle. -// The only valid operations on an InfChordAngle are comparisons, Angle -// conversions, and Successor/Predecessor. -func InfChordAngle() ChordAngle { - return ChordAngle(math.Inf(1)) -} - -// isInf reports whether this ChordAngle is infinite. -func (c ChordAngle) isInf() bool { - return math.IsInf(float64(c), 1) -} - -// isSpecial reports whether this ChordAngle is one of the special cases. -func (c ChordAngle) isSpecial() bool { - return c < 0 || c.isInf() -} - -// isValid reports whether this ChordAngle is valid or not. -func (c ChordAngle) isValid() bool { - return (c >= 0 && c <= maxLength2) || c.isSpecial() -} - -// Successor returns the smallest representable ChordAngle larger than this one. -// This can be used to convert a "<" comparison to a "<=" comparison. -// -// Note the following special cases: -// NegativeChordAngle.Successor == 0 -// StraightChordAngle.Successor == InfChordAngle -// InfChordAngle.Successor == InfChordAngle -func (c ChordAngle) Successor() ChordAngle { - if c >= maxLength2 { - return InfChordAngle() - } - if c < 0 { - return 0 - } - return ChordAngle(math.Nextafter(float64(c), 10.0)) -} - -// Predecessor returns the largest representable ChordAngle less than this one. -// -// Note the following special cases: -// InfChordAngle.Predecessor == StraightChordAngle -// ChordAngle(0).Predecessor == NegativeChordAngle -// NegativeChordAngle.Predecessor == NegativeChordAngle -func (c ChordAngle) Predecessor() ChordAngle { - if c <= 0 { - return NegativeChordAngle - } - if c > maxLength2 { - return StraightChordAngle - } - - return ChordAngle(math.Nextafter(float64(c), -10.0)) -} - -// MaxPointError returns the maximum error size for a ChordAngle constructed -// from 2 Points x and y, assuming that x and y are normalized to within the -// bounds guaranteed by s2.Point.Normalize. The error is defined with respect to -// the true distance after the points are projected to lie exactly on the sphere. -func (c ChordAngle) MaxPointError() float64 { - // There is a relative error of (2.5*dblEpsilon) when computing the squared - // distance, plus a relative error of 2 * dblEpsilon, plus an absolute error - // of (16 * dblEpsilon**2) because the lengths of the input points may differ - // from 1 by up to (2*dblEpsilon) each. (This is the maximum error in Normalize). - return 4.5*dblEpsilon*float64(c) + 16*dblEpsilon*dblEpsilon -} - -// MaxAngleError returns the maximum error for a ChordAngle constructed -// as an Angle distance. -func (c ChordAngle) MaxAngleError() float64 { - return dblEpsilon * float64(c) -} - -// Add adds the other ChordAngle to this one and returns the resulting value. -// This method assumes the ChordAngles are not special. -func (c ChordAngle) Add(other ChordAngle) ChordAngle { - // Note that this method (and Sub) is much more efficient than converting - // the ChordAngle to an Angle and adding those and converting back. It - // requires only one square root plus a few additions and multiplications. - - // Optimization for the common case where b is an error tolerance - // parameter that happens to be set to zero. - if other == 0 { - return c - } - - // Clamp the angle sum to at most 180 degrees. - if c+other >= maxLength2 { - return StraightChordAngle - } - - // Let a and b be the (non-squared) chord lengths, and let c = a+b. - // Let A, B, and C be the corresponding half-angles (a = 2*sin(A), etc). - // Then the formula below can be derived from c = 2 * sin(A+B) and the - // relationships sin(A+B) = sin(A)*cos(B) + sin(B)*cos(A) - // cos(X) = sqrt(1 - sin^2(X)) - x := float64(c * (1 - 0.25*other)) - y := float64(other * (1 - 0.25*c)) - return ChordAngle(math.Min(maxLength2, x+y+2*math.Sqrt(x*y))) -} - -// Sub subtracts the other ChordAngle from this one and returns the resulting -// value. This method assumes the ChordAngles are not special. -func (c ChordAngle) Sub(other ChordAngle) ChordAngle { - if other == 0 { - return c - } - if c <= other { - return 0 - } - x := float64(c * (1 - 0.25*other)) - y := float64(other * (1 - 0.25*c)) - return ChordAngle(math.Max(0.0, x+y-2*math.Sqrt(x*y))) -} - -// Sin returns the sine of this chord angle. This method is more efficient -// than converting to Angle and performing the computation. -func (c ChordAngle) Sin() float64 { - return math.Sqrt(c.Sin2()) -} - -// Sin2 returns the square of the sine of this chord angle. -// It is more efficient than Sin. -func (c ChordAngle) Sin2() float64 { - // Let a be the (non-squared) chord length, and let A be the corresponding - // half-angle (a = 2*sin(A)). The formula below can be derived from: - // sin(2*A) = 2 * sin(A) * cos(A) - // cos^2(A) = 1 - sin^2(A) - // This is much faster than converting to an angle and computing its sine. - return float64(c * (1 - 0.25*c)) -} - -// Cos returns the cosine of this chord angle. This method is more efficient -// than converting to Angle and performing the computation. -func (c ChordAngle) Cos() float64 { - // cos(2*A) = cos^2(A) - sin^2(A) = 1 - 2*sin^2(A) - return float64(1 - 0.5*c) -} - -// Tan returns the tangent of this chord angle. -func (c ChordAngle) Tan() float64 { - return c.Sin() / c.Cos() -} - -// TODO(roberts): Differences from C++: -// Helpers to/from E5/E6/E7 -// Helpers to/from degrees and radians directly. -// FastUpperBoundFrom(angle Angle) diff --git a/vendor/github.com/golang/geo/s1/doc.go b/vendor/github.com/golang/geo/s1/doc.go deleted file mode 100644 index 52a2c526d..000000000 --- a/vendor/github.com/golang/geo/s1/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2014 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package s1 implements types and functions for working with geometry in S¹ (circular geometry). - -See ../s2 for a more detailed overview. -*/ -package s1 diff --git a/vendor/github.com/golang/geo/s1/interval.go b/vendor/github.com/golang/geo/s1/interval.go deleted file mode 100644 index 6fea5221f..000000000 --- a/vendor/github.com/golang/geo/s1/interval.go +++ /dev/null @@ -1,462 +0,0 @@ -// Copyright 2014 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package s1 - -import ( - "math" - "strconv" -) - -// An Interval represents a closed interval on a unit circle (also known -// as a 1-dimensional sphere). It is capable of representing the empty -// interval (containing no points), the full interval (containing all -// points), and zero-length intervals (containing a single point). -// -// Points are represented by the angle they make with the positive x-axis in -// the range [-π, π]. An interval is represented by its lower and upper -// bounds (both inclusive, since the interval is closed). The lower bound may -// be greater than the upper bound, in which case the interval is "inverted" -// (i.e. it passes through the point (-1, 0)). -// -// The point (-1, 0) has two valid representations, π and -π. The -// normalized representation of this point is π, so that endpoints -// of normal intervals are in the range (-π, π]. We normalize the latter to -// the former in IntervalFromEndpoints. However, we take advantage of the point -// -π to construct two special intervals: -// The full interval is [-π, π] -// The empty interval is [π, -π]. -// -// Treat the exported fields as read-only. -type Interval struct { - Lo, Hi float64 -} - -// IntervalFromEndpoints constructs a new interval from endpoints. -// Both arguments must be in the range [-π,π]. This function allows inverted intervals -// to be created. -func IntervalFromEndpoints(lo, hi float64) Interval { - i := Interval{lo, hi} - if lo == -math.Pi && hi != math.Pi { - i.Lo = math.Pi - } - if hi == -math.Pi && lo != math.Pi { - i.Hi = math.Pi - } - return i -} - -// IntervalFromPointPair returns the minimal interval containing the two given points. -// Both arguments must be in [-π,π]. -func IntervalFromPointPair(a, b float64) Interval { - if a == -math.Pi { - a = math.Pi - } - if b == -math.Pi { - b = math.Pi - } - if positiveDistance(a, b) <= math.Pi { - return Interval{a, b} - } - return Interval{b, a} -} - -// EmptyInterval returns an empty interval. -func EmptyInterval() Interval { return Interval{math.Pi, -math.Pi} } - -// FullInterval returns a full interval. -func FullInterval() Interval { return Interval{-math.Pi, math.Pi} } - -// IsValid reports whether the interval is valid. -func (i Interval) IsValid() bool { - return (math.Abs(i.Lo) <= math.Pi && math.Abs(i.Hi) <= math.Pi && - !(i.Lo == -math.Pi && i.Hi != math.Pi) && - !(i.Hi == -math.Pi && i.Lo != math.Pi)) -} - -// IsFull reports whether the interval is full. -func (i Interval) IsFull() bool { return i.Lo == -math.Pi && i.Hi == math.Pi } - -// IsEmpty reports whether the interval is empty. -func (i Interval) IsEmpty() bool { return i.Lo == math.Pi && i.Hi == -math.Pi } - -// IsInverted reports whether the interval is inverted; that is, whether Lo > Hi. -func (i Interval) IsInverted() bool { return i.Lo > i.Hi } - -// Invert returns the interval with endpoints swapped. -func (i Interval) Invert() Interval { - return Interval{i.Hi, i.Lo} -} - -// Center returns the midpoint of the interval. -// It is undefined for full and empty intervals. -func (i Interval) Center() float64 { - c := 0.5 * (i.Lo + i.Hi) - if !i.IsInverted() { - return c - } - if c <= 0 { - return c + math.Pi - } - return c - math.Pi -} - -// Length returns the length of the interval. -// The length of an empty interval is negative. -func (i Interval) Length() float64 { - l := i.Hi - i.Lo - if l >= 0 { - return l - } - l += 2 * math.Pi - if l > 0 { - return l - } - return -1 -} - -// Assumes p ∈ (-π,π]. -func (i Interval) fastContains(p float64) bool { - if i.IsInverted() { - return (p >= i.Lo || p <= i.Hi) && !i.IsEmpty() - } - return p >= i.Lo && p <= i.Hi -} - -// Contains returns true iff the interval contains p. -// Assumes p ∈ [-π,π]. -func (i Interval) Contains(p float64) bool { - if p == -math.Pi { - p = math.Pi - } - return i.fastContains(p) -} - -// ContainsInterval returns true iff the interval contains oi. -func (i Interval) ContainsInterval(oi Interval) bool { - if i.IsInverted() { - if oi.IsInverted() { - return oi.Lo >= i.Lo && oi.Hi <= i.Hi - } - return (oi.Lo >= i.Lo || oi.Hi <= i.Hi) && !i.IsEmpty() - } - if oi.IsInverted() { - return i.IsFull() || oi.IsEmpty() - } - return oi.Lo >= i.Lo && oi.Hi <= i.Hi -} - -// InteriorContains returns true iff the interior of the interval contains p. -// Assumes p ∈ [-π,π]. -func (i Interval) InteriorContains(p float64) bool { - if p == -math.Pi { - p = math.Pi - } - if i.IsInverted() { - return p > i.Lo || p < i.Hi - } - return (p > i.Lo && p < i.Hi) || i.IsFull() -} - -// InteriorContainsInterval returns true iff the interior of the interval contains oi. -func (i Interval) InteriorContainsInterval(oi Interval) bool { - if i.IsInverted() { - if oi.IsInverted() { - return (oi.Lo > i.Lo && oi.Hi < i.Hi) || oi.IsEmpty() - } - return oi.Lo > i.Lo || oi.Hi < i.Hi - } - if oi.IsInverted() { - return i.IsFull() || oi.IsEmpty() - } - return (oi.Lo > i.Lo && oi.Hi < i.Hi) || i.IsFull() -} - -// Intersects returns true iff the interval contains any points in common with oi. -func (i Interval) Intersects(oi Interval) bool { - if i.IsEmpty() || oi.IsEmpty() { - return false - } - if i.IsInverted() { - return oi.IsInverted() || oi.Lo <= i.Hi || oi.Hi >= i.Lo - } - if oi.IsInverted() { - return oi.Lo <= i.Hi || oi.Hi >= i.Lo - } - return oi.Lo <= i.Hi && oi.Hi >= i.Lo -} - -// InteriorIntersects returns true iff the interior of the interval contains any points in common with oi, including the latter's boundary. -func (i Interval) InteriorIntersects(oi Interval) bool { - if i.IsEmpty() || oi.IsEmpty() || i.Lo == i.Hi { - return false - } - if i.IsInverted() { - return oi.IsInverted() || oi.Lo < i.Hi || oi.Hi > i.Lo - } - if oi.IsInverted() { - return oi.Lo < i.Hi || oi.Hi > i.Lo - } - return (oi.Lo < i.Hi && oi.Hi > i.Lo) || i.IsFull() -} - -// Compute distance from a to b in [0,2π], in a numerically stable way. -func positiveDistance(a, b float64) float64 { - d := b - a - if d >= 0 { - return d - } - return (b + math.Pi) - (a - math.Pi) -} - -// Union returns the smallest interval that contains both the interval and oi. -func (i Interval) Union(oi Interval) Interval { - if oi.IsEmpty() { - return i - } - if i.fastContains(oi.Lo) { - if i.fastContains(oi.Hi) { - // Either oi ⊂ i, or i ∪ oi is the full interval. - if i.ContainsInterval(oi) { - return i - } - return FullInterval() - } - return Interval{i.Lo, oi.Hi} - } - if i.fastContains(oi.Hi) { - return Interval{oi.Lo, i.Hi} - } - - // Neither endpoint of oi is in i. Either i ⊂ oi, or i and oi are disjoint. - if i.IsEmpty() || oi.fastContains(i.Lo) { - return oi - } - - // This is the only hard case where we need to find the closest pair of endpoints. - if positiveDistance(oi.Hi, i.Lo) < positiveDistance(i.Hi, oi.Lo) { - return Interval{oi.Lo, i.Hi} - } - return Interval{i.Lo, oi.Hi} -} - -// Intersection returns the smallest interval that contains the intersection of the interval and oi. -func (i Interval) Intersection(oi Interval) Interval { - if oi.IsEmpty() { - return EmptyInterval() - } - if i.fastContains(oi.Lo) { - if i.fastContains(oi.Hi) { - // Either oi ⊂ i, or i and oi intersect twice. Neither are empty. - // In the first case we want to return i (which is shorter than oi). - // In the second case one of them is inverted, and the smallest interval - // that covers the two disjoint pieces is the shorter of i and oi. - // We thus want to pick the shorter of i and oi in both cases. - if oi.Length() < i.Length() { - return oi - } - return i - } - return Interval{oi.Lo, i.Hi} - } - if i.fastContains(oi.Hi) { - return Interval{i.Lo, oi.Hi} - } - - // Neither endpoint of oi is in i. Either i ⊂ oi, or i and oi are disjoint. - if oi.fastContains(i.Lo) { - return i - } - return EmptyInterval() -} - -// AddPoint returns the interval expanded by the minimum amount necessary such -// that it contains the given point "p" (an angle in the range [-π, π]). -func (i Interval) AddPoint(p float64) Interval { - if math.Abs(p) > math.Pi { - return i - } - if p == -math.Pi { - p = math.Pi - } - if i.fastContains(p) { - return i - } - if i.IsEmpty() { - return Interval{p, p} - } - if positiveDistance(p, i.Lo) < positiveDistance(i.Hi, p) { - return Interval{p, i.Hi} - } - return Interval{i.Lo, p} -} - -// Define the maximum rounding error for arithmetic operations. Depending on the -// platform the mantissa precision may be different than others, so we choose to -// use specific values to be consistent across all. -// The values come from the C++ implementation. -var ( - // epsilon is a small number that represents a reasonable level of noise between two - // values that can be considered to be equal. - epsilon = 1e-15 - // dblEpsilon is a smaller number for values that require more precision. - dblEpsilon = 2.220446049e-16 -) - -// Expanded returns an interval that has been expanded on each side by margin. -// If margin is negative, then the function shrinks the interval on -// each side by margin instead. The resulting interval may be empty or -// full. Any expansion (positive or negative) of a full interval remains -// full, and any expansion of an empty interval remains empty. -func (i Interval) Expanded(margin float64) Interval { - if margin >= 0 { - if i.IsEmpty() { - return i - } - // Check whether this interval will be full after expansion, allowing - // for a rounding error when computing each endpoint. - if i.Length()+2*margin+2*dblEpsilon >= 2*math.Pi { - return FullInterval() - } - } else { - if i.IsFull() { - return i - } - // Check whether this interval will be empty after expansion, allowing - // for a rounding error when computing each endpoint. - if i.Length()+2*margin-2*dblEpsilon <= 0 { - return EmptyInterval() - } - } - result := IntervalFromEndpoints( - math.Remainder(i.Lo-margin, 2*math.Pi), - math.Remainder(i.Hi+margin, 2*math.Pi), - ) - if result.Lo <= -math.Pi { - result.Lo = math.Pi - } - return result -} - -// ApproxEqual reports whether this interval can be transformed into the given -// interval by moving each endpoint by at most ε, without the -// endpoints crossing (which would invert the interval). Empty and full -// intervals are considered to start at an arbitrary point on the unit circle, -// so any interval with (length <= 2*ε) matches the empty interval, and -// any interval with (length >= 2*π - 2*ε) matches the full interval. -func (i Interval) ApproxEqual(other Interval) bool { - // Full and empty intervals require special cases because the endpoints - // are considered to be positioned arbitrarily. - if i.IsEmpty() { - return other.Length() <= 2*epsilon - } - if other.IsEmpty() { - return i.Length() <= 2*epsilon - } - if i.IsFull() { - return other.Length() >= 2*(math.Pi-epsilon) - } - if other.IsFull() { - return i.Length() >= 2*(math.Pi-epsilon) - } - - // The purpose of the last test below is to verify that moving the endpoints - // does not invert the interval, e.g. [-1e20, 1e20] vs. [1e20, -1e20]. - return (math.Abs(math.Remainder(other.Lo-i.Lo, 2*math.Pi)) <= epsilon && - math.Abs(math.Remainder(other.Hi-i.Hi, 2*math.Pi)) <= epsilon && - math.Abs(i.Length()-other.Length()) <= 2*epsilon) - -} - -func (i Interval) String() string { - // like "[%.7f, %.7f]" - return "[" + strconv.FormatFloat(i.Lo, 'f', 7, 64) + ", " + strconv.FormatFloat(i.Hi, 'f', 7, 64) + "]" -} - -// Complement returns the complement of the interior of the interval. An interval and -// its complement have the same boundary but do not share any interior -// values. The complement operator is not a bijection, since the complement -// of a singleton interval (containing a single value) is the same as the -// complement of an empty interval. -func (i Interval) Complement() Interval { - if i.Lo == i.Hi { - // Singleton. The interval just contains a single point. - return FullInterval() - } - // Handles empty and full. - return Interval{i.Hi, i.Lo} -} - -// ComplementCenter returns the midpoint of the complement of the interval. For full and empty -// intervals, the result is arbitrary. For a singleton interval (containing a -// single point), the result is its antipodal point on S1. -func (i Interval) ComplementCenter() float64 { - if i.Lo != i.Hi { - return i.Complement().Center() - } - // Singleton. The interval just contains a single point. - if i.Hi <= 0 { - return i.Hi + math.Pi - } - return i.Hi - math.Pi -} - -// DirectedHausdorffDistance returns the Hausdorff distance to the given interval. -// For two intervals i and y, this distance is defined by -// h(i, y) = max_{p in i} min_{q in y} d(p, q), -// where d(.,.) is measured along S1. -func (i Interval) DirectedHausdorffDistance(y Interval) Angle { - if y.ContainsInterval(i) { - return 0 // This includes the case i is empty. - } - if y.IsEmpty() { - return Angle(math.Pi) // maximum possible distance on s1. - } - yComplementCenter := y.ComplementCenter() - if i.Contains(yComplementCenter) { - return Angle(positiveDistance(y.Hi, yComplementCenter)) - } - - // The Hausdorff distance is realized by either two i.Hi endpoints or two - // i.Lo endpoints, whichever is farther apart. - hiHi := 0.0 - if IntervalFromEndpoints(y.Hi, yComplementCenter).Contains(i.Hi) { - hiHi = positiveDistance(y.Hi, i.Hi) - } - - loLo := 0.0 - if IntervalFromEndpoints(yComplementCenter, y.Lo).Contains(i.Lo) { - loLo = positiveDistance(i.Lo, y.Lo) - } - - return Angle(math.Max(hiHi, loLo)) -} - -// Project returns the closest point in the interval to the given point p. -// The interval must be non-empty. -func (i Interval) Project(p float64) float64 { - if p == -math.Pi { - p = math.Pi - } - if i.fastContains(p) { - return p - } - // Compute distance from p to each endpoint. - dlo := positiveDistance(p, i.Lo) - dhi := positiveDistance(i.Hi, p) - if dlo < dhi { - return i.Lo - } - return i.Hi -} diff --git a/vendor/github.com/golang/geo/s2/bits_go18.go b/vendor/github.com/golang/geo/s2/bits_go18.go deleted file mode 100644 index 10a674da5..000000000 --- a/vendor/github.com/golang/geo/s2/bits_go18.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2018 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build !go1.9 - -package s2 - -// This file is for the bit manipulation code pre-Go 1.9. - -// findMSBSetNonZero64 returns the index (between 0 and 63) of the most -// significant set bit. Passing zero to this function returns zero. -func findMSBSetNonZero64(x uint64) int { - val := []uint64{0x2, 0xC, 0xF0, 0xFF00, 0xFFFF0000, 0xFFFFFFFF00000000} - shift := []uint64{1, 2, 4, 8, 16, 32} - var msbPos uint64 - for i := 5; i >= 0; i-- { - if x&val[i] != 0 { - x >>= shift[i] - msbPos |= shift[i] - } - } - return int(msbPos) -} - -const deBruijn64 = 0x03f79d71b4ca8b09 -const digitMask = uint64(1<<64 - 1) - -var deBruijn64Lookup = []byte{ - 0, 1, 56, 2, 57, 49, 28, 3, 61, 58, 42, 50, 38, 29, 17, 4, - 62, 47, 59, 36, 45, 43, 51, 22, 53, 39, 33, 30, 24, 18, 12, 5, - 63, 55, 48, 27, 60, 41, 37, 16, 46, 35, 44, 21, 52, 32, 23, 11, - 54, 26, 40, 15, 34, 20, 31, 10, 25, 14, 19, 9, 13, 8, 7, 6, -} - -// findLSBSetNonZero64 returns the index (between 0 and 63) of the least -// significant set bit. Passing zero to this function returns zero. -// -// This code comes from trailingZeroBits in https://golang.org/src/math/big/nat.go -// which references (Knuth, volume 4, section 7.3.1). -func findLSBSetNonZero64(x uint64) int { - return int(deBruijn64Lookup[((x&-x)*(deBruijn64&digitMask))>>58]) -} diff --git a/vendor/github.com/golang/geo/s2/bits_go19.go b/vendor/github.com/golang/geo/s2/bits_go19.go deleted file mode 100644 index 9532b377d..000000000 --- a/vendor/github.com/golang/geo/s2/bits_go19.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2018 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build go1.9 - -package s2 - -// This file is for the bit manipulation code post-Go 1.9. - -import "math/bits" - -// findMSBSetNonZero64 returns the index (between 0 and 63) of the most -// significant set bit. Passing zero to this function return zero. -func findMSBSetNonZero64(x uint64) int { - if x == 0 { - return 0 - } - return 63 - bits.LeadingZeros64(x) -} - -// findLSBSetNonZero64 returns the index (between 0 and 63) of the least -// significant set bit. Passing zero to this function return zero. -func findLSBSetNonZero64(x uint64) int { - if x == 0 { - return 0 - } - return bits.TrailingZeros64(x) -} diff --git a/vendor/github.com/golang/geo/s2/cap.go b/vendor/github.com/golang/geo/s2/cap.go deleted file mode 100644 index c4fb2e1e0..000000000 --- a/vendor/github.com/golang/geo/s2/cap.go +++ /dev/null @@ -1,519 +0,0 @@ -// Copyright 2014 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package s2 - -import ( - "fmt" - "io" - "math" - - "github.com/golang/geo/r1" - "github.com/golang/geo/s1" -) - -var ( - // centerPoint is the default center for Caps - centerPoint = PointFromCoords(1.0, 0, 0) -) - -// Cap represents a disc-shaped region defined by a center and radius. -// Technically this shape is called a "spherical cap" (rather than disc) -// because it is not planar; the cap represents a portion of the sphere that -// has been cut off by a plane. The boundary of the cap is the circle defined -// by the intersection of the sphere and the plane. For containment purposes, -// the cap is a closed set, i.e. it contains its boundary. -// -// For the most part, you can use a spherical cap wherever you would use a -// disc in planar geometry. The radius of the cap is measured along the -// surface of the sphere (rather than the straight-line distance through the -// interior). Thus a cap of radius π/2 is a hemisphere, and a cap of radius -// π covers the entire sphere. -// -// The center is a point on the surface of the unit sphere. (Hence the need for -// it to be of unit length.) -// -// A cap can also be defined by its center point and height. The height is the -// distance from the center point to the cutoff plane. There is also support for -// "empty" and "full" caps, which contain no points and all points respectively. -// -// Here are some useful relationships between the cap height (h), the cap -// radius (r), the maximum chord length from the cap's center (d), and the -// radius of cap's base (a). -// -// h = 1 - cos(r) -// = 2 * sin^2(r/2) -// d^2 = 2 * h -// = a^2 + h^2 -// -// The zero value of Cap is an invalid cap. Use EmptyCap to get a valid empty cap. -type Cap struct { - center Point - radius s1.ChordAngle -} - -// CapFromPoint constructs a cap containing a single point. -func CapFromPoint(p Point) Cap { - return CapFromCenterChordAngle(p, 0) -} - -// CapFromCenterAngle constructs a cap with the given center and angle. -func CapFromCenterAngle(center Point, angle s1.Angle) Cap { - return CapFromCenterChordAngle(center, s1.ChordAngleFromAngle(angle)) -} - -// CapFromCenterChordAngle constructs a cap where the angle is expressed as an -// s1.ChordAngle. This constructor is more efficient than using an s1.Angle. -func CapFromCenterChordAngle(center Point, radius s1.ChordAngle) Cap { - return Cap{ - center: center, - radius: radius, - } -} - -// CapFromCenterHeight constructs a cap with the given center and height. A -// negative height yields an empty cap; a height of 2 or more yields a full cap. -// The center should be unit length. -func CapFromCenterHeight(center Point, height float64) Cap { - return CapFromCenterChordAngle(center, s1.ChordAngleFromSquaredLength(2*height)) -} - -// CapFromCenterArea constructs a cap with the given center and surface area. -// Note that the area can also be interpreted as the solid angle subtended by the -// cap (because the sphere has unit radius). A negative area yields an empty cap; -// an area of 4*π or more yields a full cap. -func CapFromCenterArea(center Point, area float64) Cap { - return CapFromCenterChordAngle(center, s1.ChordAngleFromSquaredLength(area/math.Pi)) -} - -// EmptyCap returns a cap that contains no points. -func EmptyCap() Cap { - return CapFromCenterChordAngle(centerPoint, s1.NegativeChordAngle) -} - -// FullCap returns a cap that contains all points. -func FullCap() Cap { - return CapFromCenterChordAngle(centerPoint, s1.StraightChordAngle) -} - -// IsValid reports whether the Cap is considered valid. -func (c Cap) IsValid() bool { - return c.center.Vector.IsUnit() && c.radius <= s1.StraightChordAngle -} - -// IsEmpty reports whether the cap is empty, i.e. it contains no points. -func (c Cap) IsEmpty() bool { - return c.radius < 0 -} - -// IsFull reports whether the cap is full, i.e. it contains all points. -func (c Cap) IsFull() bool { - return c.radius == s1.StraightChordAngle -} - -// Center returns the cap's center point. -func (c Cap) Center() Point { - return c.center -} - -// Height returns the height of the cap. This is the distance from the center -// point to the cutoff plane. -func (c Cap) Height() float64 { - return float64(0.5 * c.radius) -} - -// Radius returns the cap radius as an s1.Angle. (Note that the cap angle -// is stored internally as a ChordAngle, so this method requires a trigonometric -// operation and may yield a slightly different result than the value passed -// to CapFromCenterAngle). -func (c Cap) Radius() s1.Angle { - return c.radius.Angle() -} - -// Area returns the surface area of the Cap on the unit sphere. -func (c Cap) Area() float64 { - return 2.0 * math.Pi * math.Max(0, c.Height()) -} - -// Contains reports whether this cap contains the other. -func (c Cap) Contains(other Cap) bool { - // In a set containment sense, every cap contains the empty cap. - if c.IsFull() || other.IsEmpty() { - return true - } - return c.radius >= ChordAngleBetweenPoints(c.center, other.center).Add(other.radius) -} - -// Intersects reports whether this cap intersects the other cap. -// i.e. whether they have any points in common. -func (c Cap) Intersects(other Cap) bool { - if c.IsEmpty() || other.IsEmpty() { - return false - } - - return c.radius.Add(other.radius) >= ChordAngleBetweenPoints(c.center, other.center) -} - -// InteriorIntersects reports whether this caps interior intersects the other cap. -func (c Cap) InteriorIntersects(other Cap) bool { - // Make sure this cap has an interior and the other cap is non-empty. - if c.radius <= 0 || other.IsEmpty() { - return false - } - - return c.radius.Add(other.radius) > ChordAngleBetweenPoints(c.center, other.center) -} - -// ContainsPoint reports whether this cap contains the point. -func (c Cap) ContainsPoint(p Point) bool { - return ChordAngleBetweenPoints(c.center, p) <= c.radius -} - -// InteriorContainsPoint reports whether the point is within the interior of this cap. -func (c Cap) InteriorContainsPoint(p Point) bool { - return c.IsFull() || ChordAngleBetweenPoints(c.center, p) < c.radius -} - -// Complement returns the complement of the interior of the cap. A cap and its -// complement have the same boundary but do not share any interior points. -// The complement operator is not a bijection because the complement of a -// singleton cap (containing a single point) is the same as the complement -// of an empty cap. -func (c Cap) Complement() Cap { - if c.IsFull() { - return EmptyCap() - } - if c.IsEmpty() { - return FullCap() - } - - return CapFromCenterChordAngle(Point{c.center.Mul(-1)}, s1.StraightChordAngle.Sub(c.radius)) -} - -// CapBound returns a bounding spherical cap. This is not guaranteed to be exact. -func (c Cap) CapBound() Cap { - return c -} - -// RectBound returns a bounding latitude-longitude rectangle. -// The bounds are not guaranteed to be tight. -func (c Cap) RectBound() Rect { - if c.IsEmpty() { - return EmptyRect() - } - - capAngle := c.Radius().Radians() - allLongitudes := false - lat := r1.Interval{ - Lo: latitude(c.center).Radians() - capAngle, - Hi: latitude(c.center).Radians() + capAngle, - } - lng := s1.FullInterval() - - // Check whether cap includes the south pole. - if lat.Lo <= -math.Pi/2 { - lat.Lo = -math.Pi / 2 - allLongitudes = true - } - - // Check whether cap includes the north pole. - if lat.Hi >= math.Pi/2 { - lat.Hi = math.Pi / 2 - allLongitudes = true - } - - if !allLongitudes { - // Compute the range of longitudes covered by the cap. We use the law - // of sines for spherical triangles. Consider the triangle ABC where - // A is the north pole, B is the center of the cap, and C is the point - // of tangency between the cap boundary and a line of longitude. Then - // C is a right angle, and letting a,b,c denote the sides opposite A,B,C, - // we have sin(a)/sin(A) = sin(c)/sin(C), or sin(A) = sin(a)/sin(c). - // Here "a" is the cap angle, and "c" is the colatitude (90 degrees - // minus the latitude). This formula also works for negative latitudes. - // - // The formula for sin(a) follows from the relationship h = 1 - cos(a). - sinA := c.radius.Sin() - sinC := math.Cos(latitude(c.center).Radians()) - if sinA <= sinC { - angleA := math.Asin(sinA / sinC) - lng.Lo = math.Remainder(longitude(c.center).Radians()-angleA, math.Pi*2) - lng.Hi = math.Remainder(longitude(c.center).Radians()+angleA, math.Pi*2) - } - } - return Rect{lat, lng} -} - -// Equal reports whether this cap is equal to the other cap. -func (c Cap) Equal(other Cap) bool { - return (c.radius == other.radius && c.center == other.center) || - (c.IsEmpty() && other.IsEmpty()) || - (c.IsFull() && other.IsFull()) -} - -// ApproxEqual reports whether this cap is equal to the other cap within the given tolerance. -func (c Cap) ApproxEqual(other Cap) bool { - const epsilon = 1e-14 - r2 := float64(c.radius) - otherR2 := float64(other.radius) - return c.center.ApproxEqual(other.center) && - math.Abs(r2-otherR2) <= epsilon || - c.IsEmpty() && otherR2 <= epsilon || - other.IsEmpty() && r2 <= epsilon || - c.IsFull() && otherR2 >= 2-epsilon || - other.IsFull() && r2 >= 2-epsilon -} - -// AddPoint increases the cap if necessary to include the given point. If this cap is empty, -// then the center is set to the point with a zero height. p must be unit-length. -func (c Cap) AddPoint(p Point) Cap { - if c.IsEmpty() { - c.center = p - c.radius = 0 - return c - } - - // After calling cap.AddPoint(p), cap.Contains(p) must be true. However - // we don't need to do anything special to achieve this because Contains() - // does exactly the same distance calculation that we do here. - if newRad := ChordAngleBetweenPoints(c.center, p); newRad > c.radius { - c.radius = newRad - } - return c -} - -// AddCap increases the cap height if necessary to include the other cap. If this cap is empty, -// it is set to the other cap. -func (c Cap) AddCap(other Cap) Cap { - if c.IsEmpty() { - return other - } - if other.IsEmpty() { - return c - } - - // We round up the distance to ensure that the cap is actually contained. - // TODO(roberts): Do some error analysis in order to guarantee this. - dist := ChordAngleBetweenPoints(c.center, other.center).Add(other.radius) - if newRad := dist.Expanded(dblEpsilon * float64(dist)); newRad > c.radius { - c.radius = newRad - } - return c -} - -// Expanded returns a new cap expanded by the given angle. If the cap is empty, -// it returns an empty cap. -func (c Cap) Expanded(distance s1.Angle) Cap { - if c.IsEmpty() { - return EmptyCap() - } - return CapFromCenterChordAngle(c.center, c.radius.Add(s1.ChordAngleFromAngle(distance))) -} - -func (c Cap) String() string { - return fmt.Sprintf("[Center=%v, Radius=%f]", c.center.Vector, c.Radius().Degrees()) -} - -// radiusToHeight converts an s1.Angle into the height of the cap. -func radiusToHeight(r s1.Angle) float64 { - if r.Radians() < 0 { - return float64(s1.NegativeChordAngle) - } - if r.Radians() >= math.Pi { - return float64(s1.RightChordAngle) - } - return float64(0.5 * s1.ChordAngleFromAngle(r)) - -} - -// ContainsCell reports whether the cap contains the given cell. -func (c Cap) ContainsCell(cell Cell) bool { - // If the cap does not contain all cell vertices, return false. - var vertices [4]Point - for k := 0; k < 4; k++ { - vertices[k] = cell.Vertex(k) - if !c.ContainsPoint(vertices[k]) { - return false - } - } - // Otherwise, return true if the complement of the cap does not intersect the cell. - return !c.Complement().intersects(cell, vertices) -} - -// IntersectsCell reports whether the cap intersects the cell. -func (c Cap) IntersectsCell(cell Cell) bool { - // If the cap contains any cell vertex, return true. - var vertices [4]Point - for k := 0; k < 4; k++ { - vertices[k] = cell.Vertex(k) - if c.ContainsPoint(vertices[k]) { - return true - } - } - return c.intersects(cell, vertices) -} - -// intersects reports whether the cap intersects any point of the cell excluding -// its vertices (which are assumed to already have been checked). -func (c Cap) intersects(cell Cell, vertices [4]Point) bool { - // If the cap is a hemisphere or larger, the cell and the complement of the cap - // are both convex. Therefore since no vertex of the cell is contained, no other - // interior point of the cell is contained either. - if c.radius >= s1.RightChordAngle { - return false - } - - // We need to check for empty caps due to the center check just below. - if c.IsEmpty() { - return false - } - - // Optimization: return true if the cell contains the cap center. This allows half - // of the edge checks below to be skipped. - if cell.ContainsPoint(c.center) { - return true - } - - // At this point we know that the cell does not contain the cap center, and the cap - // does not contain any cell vertex. The only way that they can intersect is if the - // cap intersects the interior of some edge. - sin2Angle := c.radius.Sin2() - for k := 0; k < 4; k++ { - edge := cell.Edge(k).Vector - dot := c.center.Vector.Dot(edge) - if dot > 0 { - // The center is in the interior half-space defined by the edge. We do not need - // to consider these edges, since if the cap intersects this edge then it also - // intersects the edge on the opposite side of the cell, because the center is - // not contained with the cell. - continue - } - - // The Norm2() factor is necessary because "edge" is not normalized. - if dot*dot > sin2Angle*edge.Norm2() { - return false - } - - // Otherwise, the great circle containing this edge intersects the interior of the cap. We just - // need to check whether the point of closest approach occurs between the two edge endpoints. - dir := edge.Cross(c.center.Vector) - if dir.Dot(vertices[k].Vector) < 0 && dir.Dot(vertices[(k+1)&3].Vector) > 0 { - return true - } - } - return false -} - -// CellUnionBound computes a covering of the Cap. In general the covering -// consists of at most 4 cells except for very large caps, which may need -// up to 6 cells. The output is not sorted. -func (c Cap) CellUnionBound() []CellID { - // TODO(roberts): The covering could be made quite a bit tighter by mapping - // the cap to a rectangle in (i,j)-space and finding a covering for that. - - // Find the maximum level such that the cap contains at most one cell vertex - // and such that CellID.AppendVertexNeighbors() can be called. - level := MinWidthMetric.MaxLevel(c.Radius().Radians()) - 1 - - // If level < 0, more than three face cells are required. - if level < 0 { - cellIDs := make([]CellID, 6) - for face := 0; face < 6; face++ { - cellIDs[face] = CellIDFromFace(face) - } - return cellIDs - } - // The covering consists of the 4 cells at the given level that share the - // cell vertex that is closest to the cap center. - return cellIDFromPoint(c.center).VertexNeighbors(level) -} - -// Centroid returns the true centroid of the cap multiplied by its surface area -// The result lies on the ray from the origin through the cap's center, but it -// is not unit length. Note that if you just want the "surface centroid", i.e. -// the normalized result, then it is simpler to call Center. -// -// The reason for multiplying the result by the cap area is to make it -// easier to compute the centroid of more complicated shapes. The centroid -// of a union of disjoint regions can be computed simply by adding their -// Centroid() results. Caveat: for caps that contain a single point -// (i.e., zero radius), this method always returns the origin (0, 0, 0). -// This is because shapes with no area don't affect the centroid of a -// union whose total area is positive. -func (c Cap) Centroid() Point { - // From symmetry, the centroid of the cap must be somewhere on the line - // from the origin to the center of the cap on the surface of the sphere. - // When a sphere is divided into slices of constant thickness by a set of - // parallel planes, all slices have the same surface area. This implies - // that the radial component of the centroid is simply the midpoint of the - // range of radial distances spanned by the cap. That is easily computed - // from the cap height. - if c.IsEmpty() { - return Point{} - } - r := 1 - 0.5*c.Height() - return Point{c.center.Mul(r * c.Area())} -} - -// Union returns the smallest cap which encloses this cap and other. -func (c Cap) Union(other Cap) Cap { - // If the other cap is larger, swap c and other for the rest of the computations. - if c.radius < other.radius { - c, other = other, c - } - - if c.IsFull() || other.IsEmpty() { - return c - } - - // TODO: This calculation would be more efficient using s1.ChordAngles. - cRadius := c.Radius() - otherRadius := other.Radius() - distance := c.center.Distance(other.center) - if cRadius >= distance+otherRadius { - return c - } - - resRadius := 0.5 * (distance + cRadius + otherRadius) - resCenter := InterpolateAtDistance(0.5*(distance-cRadius+otherRadius), c.center, other.center) - return CapFromCenterAngle(resCenter, resRadius) -} - -// Encode encodes the Cap. -func (c Cap) Encode(w io.Writer) error { - e := &encoder{w: w} - c.encode(e) - return e.err -} - -func (c Cap) encode(e *encoder) { - e.writeFloat64(c.center.X) - e.writeFloat64(c.center.Y) - e.writeFloat64(c.center.Z) - e.writeFloat64(float64(c.radius)) -} - -// Decode decodes the Cap. -func (c *Cap) Decode(r io.Reader) error { - d := &decoder{r: asByteReader(r)} - c.decode(d) - return d.err -} - -func (c *Cap) decode(d *decoder) { - c.center.X = d.readFloat64() - c.center.Y = d.readFloat64() - c.center.Z = d.readFloat64() - c.radius = s1.ChordAngle(d.readFloat64()) -} diff --git a/vendor/github.com/golang/geo/s2/cell.go b/vendor/github.com/golang/geo/s2/cell.go deleted file mode 100644 index 0a01a4f1f..000000000 --- a/vendor/github.com/golang/geo/s2/cell.go +++ /dev/null @@ -1,698 +0,0 @@ -// Copyright 2014 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package s2 - -import ( - "io" - "math" - - "github.com/golang/geo/r1" - "github.com/golang/geo/r2" - "github.com/golang/geo/r3" - "github.com/golang/geo/s1" -) - -// Cell is an S2 region object that represents a cell. Unlike CellIDs, -// it supports efficient containment and intersection tests. However, it is -// also a more expensive representation. -type Cell struct { - face int8 - level int8 - orientation int8 - id CellID - uv r2.Rect -} - -// CellFromCellID constructs a Cell corresponding to the given CellID. -func CellFromCellID(id CellID) Cell { - c := Cell{} - c.id = id - f, i, j, o := c.id.faceIJOrientation() - c.face = int8(f) - c.level = int8(c.id.Level()) - c.orientation = int8(o) - c.uv = ijLevelToBoundUV(i, j, int(c.level)) - return c -} - -// CellFromPoint constructs a cell for the given Point. -func CellFromPoint(p Point) Cell { - return CellFromCellID(cellIDFromPoint(p)) -} - -// CellFromLatLng constructs a cell for the given LatLng. -func CellFromLatLng(ll LatLng) Cell { - return CellFromCellID(CellIDFromLatLng(ll)) -} - -// Face returns the face this cell is on. -func (c Cell) Face() int { - return int(c.face) -} - -// oppositeFace returns the face opposite the given face. -func oppositeFace(face int) int { - return (face + 3) % 6 -} - -// Level returns the level of this cell. -func (c Cell) Level() int { - return int(c.level) -} - -// ID returns the CellID this cell represents. -func (c Cell) ID() CellID { - return c.id -} - -// IsLeaf returns whether this Cell is a leaf or not. -func (c Cell) IsLeaf() bool { - return c.level == maxLevel -} - -// SizeIJ returns the edge length of this cell in (i,j)-space. -func (c Cell) SizeIJ() int { - return sizeIJ(int(c.level)) -} - -// SizeST returns the edge length of this cell in (s,t)-space. -func (c Cell) SizeST() float64 { - return c.id.sizeST(int(c.level)) -} - -// Vertex returns the k-th vertex of the cell (k = 0,1,2,3) in CCW order -// (lower left, lower right, upper right, upper left in the UV plane). -func (c Cell) Vertex(k int) Point { - return Point{faceUVToXYZ(int(c.face), c.uv.Vertices()[k].X, c.uv.Vertices()[k].Y).Normalize()} -} - -// Edge returns the inward-facing normal of the great circle passing through -// the CCW ordered edge from vertex k to vertex k+1 (mod 4) (for k = 0,1,2,3). -func (c Cell) Edge(k int) Point { - switch k { - case 0: - return Point{vNorm(int(c.face), c.uv.Y.Lo).Normalize()} // Bottom - case 1: - return Point{uNorm(int(c.face), c.uv.X.Hi).Normalize()} // Right - case 2: - return Point{vNorm(int(c.face), c.uv.Y.Hi).Mul(-1.0).Normalize()} // Top - default: - return Point{uNorm(int(c.face), c.uv.X.Lo).Mul(-1.0).Normalize()} // Left - } -} - -// BoundUV returns the bounds of this cell in (u,v)-space. -func (c Cell) BoundUV() r2.Rect { - return c.uv -} - -// Center returns the direction vector corresponding to the center in -// (s,t)-space of the given cell. This is the point at which the cell is -// divided into four subcells; it is not necessarily the centroid of the -// cell in (u,v)-space or (x,y,z)-space -func (c Cell) Center() Point { - return Point{c.id.rawPoint().Normalize()} -} - -// Children returns the four direct children of this cell in traversal order -// and returns true. If this is a leaf cell, or the children could not be created, -// false is returned. -// The C++ method is called Subdivide. -func (c Cell) Children() ([4]Cell, bool) { - var children [4]Cell - - if c.id.IsLeaf() { - return children, false - } - - // Compute the cell midpoint in uv-space. - uvMid := c.id.centerUV() - - // Create four children with the appropriate bounds. - cid := c.id.ChildBegin() - for pos := 0; pos < 4; pos++ { - children[pos] = Cell{ - face: c.face, - level: c.level + 1, - orientation: c.orientation ^ int8(posToOrientation[pos]), - id: cid, - } - - // We want to split the cell in half in u and v. To decide which - // side to set equal to the midpoint value, we look at cell's (i,j) - // position within its parent. The index for i is in bit 1 of ij. - ij := posToIJ[c.orientation][pos] - i := ij >> 1 - j := ij & 1 - if i == 1 { - children[pos].uv.X.Hi = c.uv.X.Hi - children[pos].uv.X.Lo = uvMid.X - } else { - children[pos].uv.X.Lo = c.uv.X.Lo - children[pos].uv.X.Hi = uvMid.X - } - if j == 1 { - children[pos].uv.Y.Hi = c.uv.Y.Hi - children[pos].uv.Y.Lo = uvMid.Y - } else { - children[pos].uv.Y.Lo = c.uv.Y.Lo - children[pos].uv.Y.Hi = uvMid.Y - } - cid = cid.Next() - } - return children, true -} - -// ExactArea returns the area of this cell as accurately as possible. -func (c Cell) ExactArea() float64 { - v0, v1, v2, v3 := c.Vertex(0), c.Vertex(1), c.Vertex(2), c.Vertex(3) - return PointArea(v0, v1, v2) + PointArea(v0, v2, v3) -} - -// ApproxArea returns the approximate area of this cell. This method is accurate -// to within 3% percent for all cell sizes and accurate to within 0.1% for cells -// at level 5 or higher (i.e. squares 350km to a side or smaller on the Earth's -// surface). It is moderately cheap to compute. -func (c Cell) ApproxArea() float64 { - // All cells at the first two levels have the same area. - if c.level < 2 { - return c.AverageArea() - } - - // First, compute the approximate area of the cell when projected - // perpendicular to its normal. The cross product of its diagonals gives - // the normal, and the length of the normal is twice the projected area. - flatArea := 0.5 * (c.Vertex(2).Sub(c.Vertex(0).Vector). - Cross(c.Vertex(3).Sub(c.Vertex(1).Vector)).Norm()) - - // Now, compensate for the curvature of the cell surface by pretending - // that the cell is shaped like a spherical cap. The ratio of the - // area of a spherical cap to the area of its projected disc turns out - // to be 2 / (1 + sqrt(1 - r*r)) where r is the radius of the disc. - // For example, when r=0 the ratio is 1, and when r=1 the ratio is 2. - // Here we set Pi*r*r == flatArea to find the equivalent disc. - return flatArea * 2 / (1 + math.Sqrt(1-math.Min(1/math.Pi*flatArea, 1))) -} - -// AverageArea returns the average area of cells at the level of this cell. -// This is accurate to within a factor of 1.7. -func (c Cell) AverageArea() float64 { - return AvgAreaMetric.Value(int(c.level)) -} - -// IntersectsCell reports whether the intersection of this cell and the other cell is not nil. -func (c Cell) IntersectsCell(oc Cell) bool { - return c.id.Intersects(oc.id) -} - -// ContainsCell reports whether this cell contains the other cell. -func (c Cell) ContainsCell(oc Cell) bool { - return c.id.Contains(oc.id) -} - -// CellUnionBound computes a covering of the Cell. -func (c Cell) CellUnionBound() []CellID { - return c.CapBound().CellUnionBound() -} - -// latitude returns the latitude of the cell vertex in radians given by (i,j), -// where i and j indicate the Hi (1) or Lo (0) corner. -func (c Cell) latitude(i, j int) float64 { - var u, v float64 - switch { - case i == 0 && j == 0: - u = c.uv.X.Lo - v = c.uv.Y.Lo - case i == 0 && j == 1: - u = c.uv.X.Lo - v = c.uv.Y.Hi - case i == 1 && j == 0: - u = c.uv.X.Hi - v = c.uv.Y.Lo - case i == 1 && j == 1: - u = c.uv.X.Hi - v = c.uv.Y.Hi - default: - panic("i and/or j is out of bounds") - } - return latitude(Point{faceUVToXYZ(int(c.face), u, v)}).Radians() -} - -// longitude returns the longitude of the cell vertex in radians given by (i,j), -// where i and j indicate the Hi (1) or Lo (0) corner. -func (c Cell) longitude(i, j int) float64 { - var u, v float64 - switch { - case i == 0 && j == 0: - u = c.uv.X.Lo - v = c.uv.Y.Lo - case i == 0 && j == 1: - u = c.uv.X.Lo - v = c.uv.Y.Hi - case i == 1 && j == 0: - u = c.uv.X.Hi - v = c.uv.Y.Lo - case i == 1 && j == 1: - u = c.uv.X.Hi - v = c.uv.Y.Hi - default: - panic("i and/or j is out of bounds") - } - return longitude(Point{faceUVToXYZ(int(c.face), u, v)}).Radians() -} - -var ( - poleMinLat = math.Asin(math.Sqrt(1.0/3)) - 0.5*dblEpsilon -) - -// RectBound returns the bounding rectangle of this cell. -func (c Cell) RectBound() Rect { - if c.level > 0 { - // Except for cells at level 0, the latitude and longitude extremes are - // attained at the vertices. Furthermore, the latitude range is - // determined by one pair of diagonally opposite vertices and the - // longitude range is determined by the other pair. - // - // We first determine which corner (i,j) of the cell has the largest - // absolute latitude. To maximize latitude, we want to find the point in - // the cell that has the largest absolute z-coordinate and the smallest - // absolute x- and y-coordinates. To do this we look at each coordinate - // (u and v), and determine whether we want to minimize or maximize that - // coordinate based on the axis direction and the cell's (u,v) quadrant. - u := c.uv.X.Lo + c.uv.X.Hi - v := c.uv.Y.Lo + c.uv.Y.Hi - var i, j int - if uAxis(int(c.face)).Z == 0 { - if u < 0 { - i = 1 - } - } else if u > 0 { - i = 1 - } - if vAxis(int(c.face)).Z == 0 { - if v < 0 { - j = 1 - } - } else if v > 0 { - j = 1 - } - lat := r1.IntervalFromPoint(c.latitude(i, j)).AddPoint(c.latitude(1-i, 1-j)) - lng := s1.EmptyInterval().AddPoint(c.longitude(i, 1-j)).AddPoint(c.longitude(1-i, j)) - - // We grow the bounds slightly to make sure that the bounding rectangle - // contains LatLngFromPoint(P) for any point P inside the loop L defined by the - // four *normalized* vertices. Note that normalization of a vector can - // change its direction by up to 0.5 * dblEpsilon radians, and it is not - // enough just to add Normalize calls to the code above because the - // latitude/longitude ranges are not necessarily determined by diagonally - // opposite vertex pairs after normalization. - // - // We would like to bound the amount by which the latitude/longitude of a - // contained point P can exceed the bounds computed above. In the case of - // longitude, the normalization error can change the direction of rounding - // leading to a maximum difference in longitude of 2 * dblEpsilon. In - // the case of latitude, the normalization error can shift the latitude by - // up to 0.5 * dblEpsilon and the other sources of error can cause the - // two latitudes to differ by up to another 1.5 * dblEpsilon, which also - // leads to a maximum difference of 2 * dblEpsilon. - return Rect{lat, lng}.expanded(LatLng{s1.Angle(2 * dblEpsilon), s1.Angle(2 * dblEpsilon)}).PolarClosure() - } - - // The 4 cells around the equator extend to +/-45 degrees latitude at the - // midpoints of their top and bottom edges. The two cells covering the - // poles extend down to +/-35.26 degrees at their vertices. The maximum - // error in this calculation is 0.5 * dblEpsilon. - var bound Rect - switch c.face { - case 0: - bound = Rect{r1.Interval{-math.Pi / 4, math.Pi / 4}, s1.Interval{-math.Pi / 4, math.Pi / 4}} - case 1: - bound = Rect{r1.Interval{-math.Pi / 4, math.Pi / 4}, s1.Interval{math.Pi / 4, 3 * math.Pi / 4}} - case 2: - bound = Rect{r1.Interval{poleMinLat, math.Pi / 2}, s1.FullInterval()} - case 3: - bound = Rect{r1.Interval{-math.Pi / 4, math.Pi / 4}, s1.Interval{3 * math.Pi / 4, -3 * math.Pi / 4}} - case 4: - bound = Rect{r1.Interval{-math.Pi / 4, math.Pi / 4}, s1.Interval{-3 * math.Pi / 4, -math.Pi / 4}} - default: - bound = Rect{r1.Interval{-math.Pi / 2, -poleMinLat}, s1.FullInterval()} - } - - // Finally, we expand the bound to account for the error when a point P is - // converted to an LatLng to test for containment. (The bound should be - // large enough so that it contains the computed LatLng of any contained - // point, not just the infinite-precision version.) We don't need to expand - // longitude because longitude is calculated via a single call to math.Atan2, - // which is guaranteed to be semi-monotonic. - return bound.expanded(LatLng{s1.Angle(dblEpsilon), s1.Angle(0)}) -} - -// CapBound returns the bounding cap of this cell. -func (c Cell) CapBound() Cap { - // We use the cell center in (u,v)-space as the cap axis. This vector is very close - // to GetCenter() and faster to compute. Neither one of these vectors yields the - // bounding cap with minimal surface area, but they are both pretty close. - cap := CapFromPoint(Point{faceUVToXYZ(int(c.face), c.uv.Center().X, c.uv.Center().Y).Normalize()}) - for k := 0; k < 4; k++ { - cap = cap.AddPoint(c.Vertex(k)) - } - return cap -} - -// ContainsPoint reports whether this cell contains the given point. Note that -// unlike Loop/Polygon, a Cell is considered to be a closed set. This means -// that a point on a Cell's edge or vertex belong to the Cell and the relevant -// adjacent Cells too. -// -// If you want every point to be contained by exactly one Cell, -// you will need to convert the Cell to a Loop. -func (c Cell) ContainsPoint(p Point) bool { - var uv r2.Point - var ok bool - if uv.X, uv.Y, ok = faceXYZToUV(int(c.face), p); !ok { - return false - } - - // Expand the (u,v) bound to ensure that - // - // CellFromPoint(p).ContainsPoint(p) - // - // is always true. To do this, we need to account for the error when - // converting from (u,v) coordinates to (s,t) coordinates. In the - // normal case the total error is at most dblEpsilon. - return c.uv.ExpandedByMargin(dblEpsilon).ContainsPoint(uv) -} - -// Encode encodes the Cell. -func (c Cell) Encode(w io.Writer) error { - e := &encoder{w: w} - c.encode(e) - return e.err -} - -func (c Cell) encode(e *encoder) { - c.id.encode(e) -} - -// Decode decodes the Cell. -func (c *Cell) Decode(r io.Reader) error { - d := &decoder{r: asByteReader(r)} - c.decode(d) - return d.err -} - -func (c *Cell) decode(d *decoder) { - c.id.decode(d) - *c = CellFromCellID(c.id) -} - -// vertexChordDist2 returns the squared chord distance from point P to the -// given corner vertex specified by the Hi or Lo values of each. -func (c Cell) vertexChordDist2(p Point, xHi, yHi bool) s1.ChordAngle { - x := c.uv.X.Lo - y := c.uv.Y.Lo - if xHi { - x = c.uv.X.Hi - } - if yHi { - y = c.uv.Y.Hi - } - - return ChordAngleBetweenPoints(p, PointFromCoords(x, y, 1)) -} - -// uEdgeIsClosest reports whether a point P is closer to the interior of the specified -// Cell edge (either the lower or upper edge of the Cell) or to the endpoints. -func (c Cell) uEdgeIsClosest(p Point, vHi bool) bool { - u0 := c.uv.X.Lo - u1 := c.uv.X.Hi - v := c.uv.Y.Lo - if vHi { - v = c.uv.Y.Hi - } - // These are the normals to the planes that are perpendicular to the edge - // and pass through one of its two endpoints. - dir0 := r3.Vector{v*v + 1, -u0 * v, -u0} - dir1 := r3.Vector{v*v + 1, -u1 * v, -u1} - return p.Dot(dir0) > 0 && p.Dot(dir1) < 0 -} - -// vEdgeIsClosest reports whether a point P is closer to the interior of the specified -// Cell edge (either the right or left edge of the Cell) or to the endpoints. -func (c Cell) vEdgeIsClosest(p Point, uHi bool) bool { - v0 := c.uv.Y.Lo - v1 := c.uv.Y.Hi - u := c.uv.X.Lo - if uHi { - u = c.uv.X.Hi - } - dir0 := r3.Vector{-u * v0, u*u + 1, -v0} - dir1 := r3.Vector{-u * v1, u*u + 1, -v1} - return p.Dot(dir0) > 0 && p.Dot(dir1) < 0 -} - -// edgeDistance reports the distance from a Point P to a given Cell edge. The point -// P is given by its dot product, and the uv edge by its normal in the -// given coordinate value. -func edgeDistance(ij, uv float64) s1.ChordAngle { - // Let P by the target point and let R be the closest point on the given - // edge AB. The desired distance PR can be expressed as PR^2 = PQ^2 + QR^2 - // where Q is the point P projected onto the plane through the great circle - // through AB. We can compute the distance PQ^2 perpendicular to the plane - // from "dirIJ" (the dot product of the target point P with the edge - // normal) and the squared length the edge normal (1 + uv**2). - pq2 := (ij * ij) / (1 + uv*uv) - - // We can compute the distance QR as (1 - OQ) where O is the sphere origin, - // and we can compute OQ^2 = 1 - PQ^2 using the Pythagorean theorem. - // (This calculation loses accuracy as angle POQ approaches Pi/2.) - qr := 1 - math.Sqrt(1-pq2) - return s1.ChordAngleFromSquaredLength(pq2 + qr*qr) -} - -// distanceInternal reports the distance from the given point to the interior of -// the cell if toInterior is true or to the boundary of the cell otherwise. -func (c Cell) distanceInternal(targetXYZ Point, toInterior bool) s1.ChordAngle { - // All calculations are done in the (u,v,w) coordinates of this cell's face. - target := faceXYZtoUVW(int(c.face), targetXYZ) - - // Compute dot products with all four upward or rightward-facing edge - // normals. dirIJ is the dot product for the edge corresponding to axis - // I, endpoint J. For example, dir01 is the right edge of the Cell - // (corresponding to the upper endpoint of the u-axis). - dir00 := target.X - target.Z*c.uv.X.Lo - dir01 := target.X - target.Z*c.uv.X.Hi - dir10 := target.Y - target.Z*c.uv.Y.Lo - dir11 := target.Y - target.Z*c.uv.Y.Hi - inside := true - if dir00 < 0 { - inside = false // Target is to the left of the cell - if c.vEdgeIsClosest(target, false) { - return edgeDistance(-dir00, c.uv.X.Lo) - } - } - if dir01 > 0 { - inside = false // Target is to the right of the cell - if c.vEdgeIsClosest(target, true) { - return edgeDistance(dir01, c.uv.X.Hi) - } - } - if dir10 < 0 { - inside = false // Target is below the cell - if c.uEdgeIsClosest(target, false) { - return edgeDistance(-dir10, c.uv.Y.Lo) - } - } - if dir11 > 0 { - inside = false // Target is above the cell - if c.uEdgeIsClosest(target, true) { - return edgeDistance(dir11, c.uv.Y.Hi) - } - } - if inside { - if toInterior { - return s1.ChordAngle(0) - } - // Although you might think of Cells as rectangles, they are actually - // arbitrary quadrilaterals after they are projected onto the sphere. - // Therefore the simplest approach is just to find the minimum distance to - // any of the four edges. - return minChordAngle(edgeDistance(-dir00, c.uv.X.Lo), - edgeDistance(dir01, c.uv.X.Hi), - edgeDistance(-dir10, c.uv.Y.Lo), - edgeDistance(dir11, c.uv.Y.Hi)) - } - - // Otherwise, the closest point is one of the four cell vertices. Note that - // it is *not* trivial to narrow down the candidates based on the edge sign - // tests above, because (1) the edges don't meet at right angles and (2) - // there are points on the far side of the sphere that are both above *and* - // below the cell, etc. - return minChordAngle(c.vertexChordDist2(target, false, false), - c.vertexChordDist2(target, true, false), - c.vertexChordDist2(target, false, true), - c.vertexChordDist2(target, true, true)) -} - -// Distance reports the distance from the cell to the given point. Returns zero if -// the point is inside the cell. -func (c Cell) Distance(target Point) s1.ChordAngle { - return c.distanceInternal(target, true) -} - -// MaxDistance reports the maximum distance from the cell (including its interior) to the -// given point. -func (c Cell) MaxDistance(target Point) s1.ChordAngle { - // First check the 4 cell vertices. If all are within the hemisphere - // centered around target, the max distance will be to one of these vertices. - targetUVW := faceXYZtoUVW(int(c.face), target) - maxDist := maxChordAngle(c.vertexChordDist2(targetUVW, false, false), - c.vertexChordDist2(targetUVW, true, false), - c.vertexChordDist2(targetUVW, false, true), - c.vertexChordDist2(targetUVW, true, true)) - - if maxDist <= s1.RightChordAngle { - return maxDist - } - - // Otherwise, find the minimum distance dMin to the antipodal point and the - // maximum distance will be pi - dMin. - return s1.StraightChordAngle - c.BoundaryDistance(Point{target.Mul(-1)}) -} - -// BoundaryDistance reports the distance from the cell boundary to the given point. -func (c Cell) BoundaryDistance(target Point) s1.ChordAngle { - return c.distanceInternal(target, false) -} - -// DistanceToEdge returns the minimum distance from the cell to the given edge AB. Returns -// zero if the edge intersects the cell interior. -func (c Cell) DistanceToEdge(a, b Point) s1.ChordAngle { - // Possible optimizations: - // - Currently the (cell vertex, edge endpoint) distances are computed - // twice each, and the length of AB is computed 4 times. - // - To fix this, refactor GetDistance(target) so that it skips calculating - // the distance to each cell vertex. Instead, compute the cell vertices - // and distances in this function, and add a low-level UpdateMinDistance - // that allows the XA, XB, and AB distances to be passed in. - // - It might also be more efficient to do all calculations in UVW-space, - // since this would involve transforming 2 points rather than 4. - - // First, check the minimum distance to the edge endpoints A and B. - // (This also detects whether either endpoint is inside the cell.) - minDist := minChordAngle(c.Distance(a), c.Distance(b)) - if minDist == 0 { - return minDist - } - - // Otherwise, check whether the edge crosses the cell boundary. - crosser := NewChainEdgeCrosser(a, b, c.Vertex(3)) - for i := 0; i < 4; i++ { - if crosser.ChainCrossingSign(c.Vertex(i)) != DoNotCross { - return 0 - } - } - - // Finally, check whether the minimum distance occurs between a cell vertex - // and the interior of the edge AB. (Some of this work is redundant, since - // it also checks the distance to the endpoints A and B again.) - // - // Note that we don't need to check the distance from the interior of AB to - // the interior of a cell edge, because the only way that this distance can - // be minimal is if the two edges cross (already checked above). - for i := 0; i < 4; i++ { - minDist, _ = UpdateMinDistance(c.Vertex(i), a, b, minDist) - } - return minDist -} - -// MaxDistanceToEdge returns the maximum distance from the cell (including its interior) -// to the given edge AB. -func (c Cell) MaxDistanceToEdge(a, b Point) s1.ChordAngle { - // If the maximum distance from both endpoints to the cell is less than π/2 - // then the maximum distance from the edge to the cell is the maximum of the - // two endpoint distances. - maxDist := maxChordAngle(c.MaxDistance(a), c.MaxDistance(b)) - if maxDist <= s1.RightChordAngle { - return maxDist - } - - return s1.StraightChordAngle - c.DistanceToEdge(Point{a.Mul(-1)}, Point{b.Mul(-1)}) -} - -// DistanceToCell returns the minimum distance from this cell to the given cell. -// It returns zero if one cell contains the other. -func (c Cell) DistanceToCell(target Cell) s1.ChordAngle { - // If the cells intersect, the distance is zero. We use the (u,v) ranges - // rather than CellID intersects so that cells that share a partial edge or - // corner are considered to intersect. - if c.face == target.face && c.uv.Intersects(target.uv) { - return 0 - } - - // Otherwise, the minimum distance always occurs between a vertex of one - // cell and an edge of the other cell (including the edge endpoints). This - // represents a total of 32 possible (vertex, edge) pairs. - // - // TODO(roberts): This could be optimized to be at least 5x faster by pruning - // the set of possible closest vertex/edge pairs using the faces and (u,v) - // ranges of both cells. - var va, vb [4]Point - for i := 0; i < 4; i++ { - va[i] = c.Vertex(i) - vb[i] = target.Vertex(i) - } - minDist := s1.InfChordAngle() - for i := 0; i < 4; i++ { - for j := 0; j < 4; j++ { - minDist, _ = UpdateMinDistance(va[i], vb[j], vb[(j+1)&3], minDist) - minDist, _ = UpdateMinDistance(vb[i], va[j], va[(j+1)&3], minDist) - } - } - return minDist -} - -// MaxDistanceToCell returns the maximum distance from the cell (including its -// interior) to the given target cell. -func (c Cell) MaxDistanceToCell(target Cell) s1.ChordAngle { - // Need to check the antipodal target for intersection with the cell. If it - // intersects, the distance is the straight ChordAngle. - // antipodalUV is the transpose of the original UV, interpreted within the opposite face. - antipodalUV := r2.Rect{target.uv.Y, target.uv.X} - if int(c.face) == oppositeFace(int(target.face)) && c.uv.Intersects(antipodalUV) { - return s1.StraightChordAngle - } - - // Otherwise, the maximum distance always occurs between a vertex of one - // cell and an edge of the other cell (including the edge endpoints). This - // represents a total of 32 possible (vertex, edge) pairs. - // - // TODO(roberts): When the maximum distance is at most π/2, the maximum is - // always attained between a pair of vertices, and this could be made much - // faster by testing each vertex pair once rather than the current 4 times. - var va, vb [4]Point - for i := 0; i < 4; i++ { - va[i] = c.Vertex(i) - vb[i] = target.Vertex(i) - } - maxDist := s1.NegativeChordAngle - for i := 0; i < 4; i++ { - for j := 0; j < 4; j++ { - maxDist, _ = UpdateMaxDistance(va[i], vb[j], vb[(j+1)&3], maxDist) - maxDist, _ = UpdateMaxDistance(vb[i], va[j], va[(j+1)&3], maxDist) - } - } - return maxDist -} diff --git a/vendor/github.com/golang/geo/s2/cell_index.go b/vendor/github.com/golang/geo/s2/cell_index.go deleted file mode 100644 index ef16d0895..000000000 --- a/vendor/github.com/golang/geo/s2/cell_index.go +++ /dev/null @@ -1,498 +0,0 @@ -// Copyright 2020 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package s2 - -import ( - "sort" -) - -const ( - // A special label indicating that the ContentsIterator done is true. - cellIndexDoneContents = -1 -) - -// cellIndexNode represents a node in the CellIndex. Cells are organized in a -// tree such that the ancestors of a given node contain that node. -type cellIndexNode struct { - cellID CellID - label int32 - parent int32 -} - -// newCellIndexNode returns a node with the appropriate default values. -func newCellIndexNode() cellIndexNode { - return cellIndexNode{ - cellID: 0, - label: cellIndexDoneContents, - parent: -1, - } -} - -// A rangeNode represents a range of leaf CellIDs. The range starts at -// startID (a leaf cell) and ends at the startID field of the next -// rangeNode. contents points to the node of the CellIndex cellTree -// representing the cells that overlap this range. -type rangeNode struct { - startID CellID // First leaf cell contained by this range. - contents int32 // Contents of this node (an index within the cell tree). -} - -// CellIndexIterator is an iterator that visits the entire set of indexed -// (CellID, label) pairs in an unspecified order. -type CellIndexIterator struct { - // TODO(roberts): Implement -} - -// NewCellIndexIterator creates an iterator for the given CellIndex. -func NewCellIndexIterator(index *CellIndex) *CellIndexIterator { - return &CellIndexIterator{} -} - -// CellIndexRangeIterator is an iterator that seeks and iterates over a set of -// non-overlapping leaf cell ranges that cover the entire sphere. The indexed -// (CellID, label) pairs that intersect the current leaf cell range can be -// visited using CellIndexContentsIterator (see below). -type CellIndexRangeIterator struct { - rangeNodes []rangeNode - pos int - nonEmpty bool -} - -// NewCellIndexRangeIterator creates an iterator for the given CellIndex. -// The iterator is initially *unpositioned*; you must call a positioning method -// such as Begin() or Seek() before accessing its contents. -func NewCellIndexRangeIterator(index *CellIndex) *CellIndexRangeIterator { - return &CellIndexRangeIterator{ - rangeNodes: index.rangeNodes, - } -} - -// NewCellIndexNonEmptyRangeIterator creates an iterator for the given CellIndex. -// The iterator is initially *unpositioned*; you must call a positioning method such as -// Begin() or Seek() before accessing its contents. -func NewCellIndexNonEmptyRangeIterator(index *CellIndex) *CellIndexRangeIterator { - return &CellIndexRangeIterator{ - rangeNodes: index.rangeNodes, - nonEmpty: true, - } -} - -// StartID reports the CellID of the start of the current range of leaf CellIDs. -// -// If done is true, this returns the last possible CellID. This property means -// that most loops do not need to test done explicitly. -func (c *CellIndexRangeIterator) StartID() CellID { - return c.rangeNodes[c.pos].startID -} - -// LimitID reports the non-inclusive end of the current range of leaf CellIDs. -// -// This assumes the iterator is not done. -func (c *CellIndexRangeIterator) LimitID() CellID { - return c.rangeNodes[c.pos+1].startID -} - -// IsEmpty reports if no (CellID, label) pairs intersect this range. -// Also returns true if done() is true. -func (c *CellIndexRangeIterator) IsEmpty() bool { - return c.rangeNodes[c.pos].contents == cellIndexDoneContents -} - -// Begin positions the iterator at the first range of leaf cells (if any). -func (c *CellIndexRangeIterator) Begin() { - c.pos = 0 - for c.nonEmpty && c.IsEmpty() && !c.Done() { - c.pos++ - } -} - -// Prev positions the iterator at the previous entry and reports whether it was not -// already positioned at the beginning. -func (c *CellIndexRangeIterator) Prev() bool { - if c.nonEmpty { - return c.nonEmptyPrev() - } - return c.prev() -} - -// prev is used to position the iterator at the previous entry without checking -// if nonEmpty is true to prevent unwanted recursion. -func (c *CellIndexRangeIterator) prev() bool { - if c.pos == 0 { - return false - } - - c.pos-- - return true -} - -// Prev positions the iterator at the previous entry, and reports whether it was -// already positioned at the beginning. -func (c *CellIndexRangeIterator) nonEmptyPrev() bool { - for c.prev() { - if !c.IsEmpty() { - return true - } - } - - // Return the iterator to its original position. - if c.IsEmpty() && !c.Done() { - c.Next() - } - return false -} - -// Next advances the iterator to the next range of leaf cells. -// -// This assumes the iterator is not done. -func (c *CellIndexRangeIterator) Next() { - c.pos++ - for c.nonEmpty && c.IsEmpty() && !c.Done() { - c.pos++ - } -} - -// Advance reports if advancing would leave it positioned on a valid range. If -// the value would not be valid, the positioning is not changed. -func (c *CellIndexRangeIterator) Advance(n int) bool { - // Note that the last element of rangeNodes is a sentinel value. - if n >= len(c.rangeNodes)-1-c.pos { - return false - } - c.pos += n - return true -} - -// Finish positions the iterator so that done is true. -func (c *CellIndexRangeIterator) Finish() { - // Note that the last element of rangeNodes is a sentinel value. - c.pos = len(c.rangeNodes) - 1 -} - -// Done reports if the iterator is positioned beyond the last valid range. -func (c *CellIndexRangeIterator) Done() bool { - return c.pos >= len(c.rangeNodes)-1 -} - -// Seek positions the iterator at the first range with startID >= target. -// Such an entry always exists as long as "target" is a valid leaf cell. -// -// Note that it is valid to access startID even when done is true. -func (c *CellIndexRangeIterator) Seek(target CellID) { - c.pos = sort.Search(len(c.rangeNodes), func(i int) bool { - return c.rangeNodes[i].startID > target - }) - 1 - - // Ensure we don't go beyond the beginning. - if c.pos < 0 { - c.pos = 0 - } - - // Nonempty needs to find the next non-empty entry. - for c.nonEmpty && c.IsEmpty() && !c.Done() { - // c.Next() - c.pos++ - } -} - -// CellIndexContentsIterator is an iterator that visits the (CellID, label) pairs -// that cover a set of leaf cell ranges (see CellIndexRangeIterator). Note that -// when multiple leaf cell ranges are visited, this iterator only guarantees that -// each result will be reported at least once, i.e. duplicate values may be -// suppressed. If you want duplicate values to be reported again, be sure to call -// Clear first. -// -// In particular, the implementation guarantees that when multiple leaf -// cell ranges are visited in monotonically increasing order, then each -// (CellID, label) pair is reported exactly once. -type CellIndexContentsIterator struct { - // The maximum index within the cellTree slice visited during the - // previous call to StartUnion. This is used to eliminate duplicate - // values when StartUnion is called multiple times. - nodeCutoff int32 - - // The maximum index within the cellTree visited during the - // current call to StartUnion. This is used to update nodeCutoff. - nextNodeCutoff int32 - - // The value of startID from the previous call to StartUnion. - // This is used to check whether these values are monotonically - // increasing. - prevStartID CellID - - // The cell tree from CellIndex - cellTree []cellIndexNode - - // A copy of the current node in the cell tree. - node cellIndexNode -} - -// NewCellIndexContentsIterator returns a new contents iterator. -// -// Note that the iterator needs to be positioned using StartUnion before -// it can be safely used. -func NewCellIndexContentsIterator(index *CellIndex) *CellIndexContentsIterator { - it := &CellIndexContentsIterator{ - cellTree: index.cellTree, - prevStartID: 0, - nodeCutoff: -1, - nextNodeCutoff: -1, - node: cellIndexNode{label: cellIndexDoneContents}, - } - return it -} - -// Clear clears all state with respect to which range(s) have been visited. -func (c *CellIndexContentsIterator) Clear() { - c.prevStartID = 0 - c.nodeCutoff = -1 - c.nextNodeCutoff = -1 - c.node.label = cellIndexDoneContents -} - -// CellID returns the current CellID. -func (c *CellIndexContentsIterator) CellID() CellID { - return c.node.cellID -} - -// Label returns the current Label. -func (c *CellIndexContentsIterator) Label() int32 { - return c.node.label -} - -// Next advances the iterator to the next (CellID, label) pair covered by the -// current leaf cell range. -// -// This requires the iterator to not be done. -func (c *CellIndexContentsIterator) Next() { - if c.node.parent <= c.nodeCutoff { - // We have already processed this node and its ancestors. - c.nodeCutoff = c.nextNodeCutoff - c.node.label = cellIndexDoneContents - } else { - c.node = c.cellTree[c.node.parent] - } -} - -// Done reports if all (CellID, label) pairs have been visited. -func (c *CellIndexContentsIterator) Done() bool { - return c.node.label == cellIndexDoneContents -} - -// StartUnion positions the ContentsIterator at the first (cell_id, label) pair -// that covers the given leaf cell range. Note that when multiple leaf cell -// ranges are visited using the same ContentsIterator, duplicate values -// may be suppressed. If you don't want this behavior, call Reset() first. -func (c *CellIndexContentsIterator) StartUnion(r *CellIndexRangeIterator) { - if r.StartID() < c.prevStartID { - c.nodeCutoff = -1 // Can't automatically eliminate duplicates. - } - c.prevStartID = r.StartID() - - contents := r.rangeNodes[r.pos].contents - if contents <= c.nodeCutoff { - c.node.label = cellIndexDoneContents - } else { - c.node = c.cellTree[contents] - } - - // When visiting ancestors, we can stop as soon as the node index is smaller - // than any previously visited node index. Because indexes are assigned - // using a preorder traversal, such nodes are guaranteed to have already - // been reported. - c.nextNodeCutoff = contents -} - -// CellIndex stores a collection of (CellID, label) pairs. -// -// The CellIDs may be overlapping or contain duplicate values. For example, a -// CellIndex could store a collection of CellUnions, where each CellUnion -// gets its own non-negative int32 label. -// -// Similar to ShapeIndex and PointIndex which map each stored element to an -// identifier, CellIndex stores a label that is typically used to map the -// results of queries back to client's specific data. -// -// The zero value for a CellIndex is sufficient when constructing a CellIndex. -// -// To build a CellIndex where each Cell has a distinct label, call Add for each -// (CellID, label) pair, and then Build the index. For example: -// -// // contents is a mapping of an identifier in my system (restaurantID, -// // vehicleID, etc) to a CellID -// var contents = map[int32]CellID{...} -// -// for key, val := range contents { -// index.Add(val, key) -// } -// -// index.Build() -// -// There is also a helper method that adds all elements of CellUnion with the -// same label: -// -// index.AddCellUnion(cellUnion, label) -// -// Note that the index is not dynamic; the contents of the index cannot be -// changed once it has been built. Adding more after calling Build results in -// undefined behavior of the index. -// -// There are several options for retrieving data from the index. The simplest -// is to use a built-in method such as IntersectingLabels (which returns -// the labels of all cells that intersect a given target CellUnion): -// -// labels := index.IntersectingLabels(targetUnion); -// -// Alternatively, you can use a ClosestCellQuery which computes the cell(s) -// that are closest to a given target geometry. -// -// For example, here is how to find all cells that are closer than -// distanceLimit to a given target point: -// -// query := NewClosestCellQuery(cellIndex, opts) -// target := NewMinDistanceToPointTarget(targetPoint); -// for result := range query.FindCells(target) { -// // result.Distance() is the distance to the target. -// // result.CellID() is the indexed CellID. -// // result.Label() is the label associated with the CellID. -// DoSomething(targetPoint, result); -// } -// -// Internally, the index consists of a set of non-overlapping leaf cell ranges -// that subdivide the sphere and such that each range intersects a particular -// set of (cellID, label) pairs. -// -// Most clients should use either the methods such as VisitIntersectingCells -// and IntersectingLabels, or a helper such as ClosestCellQuery. -type CellIndex struct { - // A tree of (cellID, label) pairs such that if X is an ancestor of Y, then - // X.cellID contains Y.cellID. The contents of a given range of leaf - // cells can be represented by pointing to a node of this tree. - cellTree []cellIndexNode - - // The last element of rangeNodes is a sentinel value, which is necessary - // in order to represent the range covered by the previous element. - rangeNodes []rangeNode -} - -// Add adds the given CellID and Label to the index. -func (c *CellIndex) Add(id CellID, label int32) { - if label < 0 { - panic("labels must be non-negative") - } - c.cellTree = append(c.cellTree, cellIndexNode{cellID: id, label: label, parent: -1}) -} - -// AddCellUnion adds all of the elements of the given CellUnion to the index with the same label. -func (c *CellIndex) AddCellUnion(cu CellUnion, label int32) { - if label < 0 { - panic("labels must be non-negative") - } - for _, cell := range cu { - c.Add(cell, label) - } -} - -// Build builds the index for use. This method should only be called once. -func (c *CellIndex) Build() { - // To build the cell tree and leaf cell ranges, we maintain a stack of - // (CellID, label) pairs that contain the current leaf cell. This struct - // represents an instruction to push or pop a (cellID, label) pair. - // - // If label >= 0, the (cellID, label) pair is pushed on the stack. - // If CellID == SentinelCellID, a pair is popped from the stack. - // Otherwise the stack is unchanged but a rangeNode is still emitted. - - // delta represents an entry in a stack of (CellID, label) pairs used in the - // construction of the CellIndex structure. - type delta struct { - startID CellID - cellID CellID - label int32 - } - - deltas := make([]delta, 0, 2*len(c.cellTree)+2) - - // Create two deltas for each (cellID, label) pair: one to add the pair to - // the stack (at the start of its leaf cell range), and one to remove it from - // the stack (at the end of its leaf cell range). - for _, node := range c.cellTree { - deltas = append(deltas, delta{ - startID: node.cellID.RangeMin(), - cellID: node.cellID, - label: node.label, - }) - deltas = append(deltas, delta{ - startID: node.cellID.RangeMax().Next(), - cellID: SentinelCellID, - label: -1, - }) - } - - // We also create two special deltas to ensure that a RangeNode is emitted at - // the beginning and end of the CellID range. - deltas = append(deltas, delta{ - startID: CellIDFromFace(0).ChildBeginAtLevel(maxLevel), - cellID: CellID(0), - label: -1, - }) - deltas = append(deltas, delta{ - startID: CellIDFromFace(5).ChildEndAtLevel(maxLevel), - cellID: CellID(0), - label: -1, - }) - - sort.Slice(deltas, func(i, j int) bool { - // deltas are sorted first by startID, then in reverse order by cellID, - // and then by label. This is necessary to ensure that (1) larger cells - // are pushed on the stack before smaller cells, and (2) cells are popped - // off the stack before any new cells are added. - - if si, sj := deltas[i].startID, deltas[j].startID; si != sj { - return si < sj - } - if si, sj := deltas[i].cellID, deltas[j].cellID; si != sj { - return si > sj - } - return deltas[i].label < deltas[j].label - }) - - // Now walk through the deltas to build the leaf cell ranges and cell tree - // (which is essentially a permanent form of the "stack" described above). - c.cellTree = nil - c.rangeNodes = nil - contents := int32(-1) - for i := 0; i < len(deltas); { - startID := deltas[i].startID - // Process all the deltas associated with the current startID. - for ; i < len(deltas) && deltas[i].startID == startID; i++ { - if deltas[i].label >= 0 { - c.cellTree = append(c.cellTree, cellIndexNode{ - cellID: deltas[i].cellID, - label: deltas[i].label, - parent: contents}) - contents = int32(len(c.cellTree) - 1) - } else if deltas[i].cellID == SentinelCellID { - contents = c.cellTree[contents].parent - } - } - c.rangeNodes = append(c.rangeNodes, rangeNode{startID, contents}) - } -} - -// TODO(roberts): Differences from C++ -// IntersectingLabels -// VisitIntersectingCells -// CellIndexIterator diff --git a/vendor/github.com/golang/geo/s2/cellid.go b/vendor/github.com/golang/geo/s2/cellid.go deleted file mode 100644 index c6cbaf2db..000000000 --- a/vendor/github.com/golang/geo/s2/cellid.go +++ /dev/null @@ -1,944 +0,0 @@ -// Copyright 2014 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package s2 - -import ( - "bytes" - "fmt" - "io" - "math" - "sort" - "strconv" - "strings" - - "github.com/golang/geo/r1" - "github.com/golang/geo/r2" - "github.com/golang/geo/r3" - "github.com/golang/geo/s1" -) - -// CellID uniquely identifies a cell in the S2 cell decomposition. -// The most significant 3 bits encode the face number (0-5). The -// remaining 61 bits encode the position of the center of this cell -// along the Hilbert curve on that face. The zero value and the value -// (1<<64)-1 are invalid cell IDs. The first compares less than any -// valid cell ID, the second as greater than any valid cell ID. -// -// Sequentially increasing cell IDs follow a continuous space-filling curve -// over the entire sphere. They have the following properties: -// -// - The ID of a cell at level k consists of a 3-bit face number followed -// by k bit pairs that recursively select one of the four children of -// each cell. The next bit is always 1, and all other bits are 0. -// Therefore, the level of a cell is determined by the position of its -// lowest-numbered bit that is turned on (for a cell at level k, this -// position is 2 * (maxLevel - k)). -// -// - The ID of a parent cell is at the midpoint of the range of IDs spanned -// by its children (or by its descendants at any level). -// -// Leaf cells are often used to represent points on the unit sphere, and -// this type provides methods for converting directly between these two -// representations. For cells that represent 2D regions rather than -// discrete point, it is better to use Cells. -type CellID uint64 - -// SentinelCellID is an invalid cell ID guaranteed to be larger than any -// valid cell ID. It is used primarily by ShapeIndex. The value is also used -// by some S2 types when encoding data. -// Note that the sentinel's RangeMin == RangeMax == itself. -const SentinelCellID = CellID(^uint64(0)) - -// sortCellIDs sorts the slice of CellIDs in place. -func sortCellIDs(ci []CellID) { - sort.Sort(cellIDs(ci)) -} - -// cellIDs implements the Sort interface for slices of CellIDs. -type cellIDs []CellID - -func (c cellIDs) Len() int { return len(c) } -func (c cellIDs) Swap(i, j int) { c[i], c[j] = c[j], c[i] } -func (c cellIDs) Less(i, j int) bool { return c[i] < c[j] } - -// TODO(dsymonds): Some of these constants should probably be exported. -const ( - faceBits = 3 - numFaces = 6 - - // This is the number of levels needed to specify a leaf cell. - maxLevel = 30 - - // The extra position bit (61 rather than 60) lets us encode each cell as its - // Hilbert curve position at the cell center (which is halfway along the - // portion of the Hilbert curve that fills that cell). - posBits = 2*maxLevel + 1 - - // The maximum index of a valid leaf cell plus one. The range of valid leaf - // cell indices is [0..maxSize-1]. - maxSize = 1 << maxLevel - - wrapOffset = uint64(numFaces) << posBits -) - -// CellIDFromFacePosLevel returns a cell given its face in the range -// [0,5], the 61-bit Hilbert curve position pos within that face, and -// the level in the range [0,maxLevel]. The position in the cell ID -// will be truncated to correspond to the Hilbert curve position at -// the center of the returned cell. -func CellIDFromFacePosLevel(face int, pos uint64, level int) CellID { - return CellID(uint64(face)< 16 { - return CellID(0) - } - n, err := strconv.ParseUint(s, 16, 64) - if err != nil { - return CellID(0) - } - // Equivalent to right-padding string with zeros to 16 characters. - if len(s) < 16 { - n = n << (4 * uint(16-len(s))) - } - return CellID(n) -} - -// ToToken returns a hex-encoded string of the uint64 cell id, with leading -// zeros included but trailing zeros stripped. -func (ci CellID) ToToken() string { - s := strings.TrimRight(fmt.Sprintf("%016x", uint64(ci)), "0") - if len(s) == 0 { - return "X" - } - return s -} - -// IsValid reports whether ci represents a valid cell. -func (ci CellID) IsValid() bool { - return ci.Face() < numFaces && (ci.lsb()&0x1555555555555555 != 0) -} - -// Face returns the cube face for this cell ID, in the range [0,5]. -func (ci CellID) Face() int { return int(uint64(ci) >> posBits) } - -// Pos returns the position along the Hilbert curve of this cell ID, in the range [0,2^posBits-1]. -func (ci CellID) Pos() uint64 { return uint64(ci) & (^uint64(0) >> faceBits) } - -// Level returns the subdivision level of this cell ID, in the range [0, maxLevel]. -func (ci CellID) Level() int { - return maxLevel - findLSBSetNonZero64(uint64(ci))>>1 -} - -// IsLeaf returns whether this cell ID is at the deepest level; -// that is, the level at which the cells are smallest. -func (ci CellID) IsLeaf() bool { return uint64(ci)&1 != 0 } - -// ChildPosition returns the child position (0..3) of this cell's -// ancestor at the given level, relative to its parent. The argument -// should be in the range 1..kMaxLevel. For example, -// ChildPosition(1) returns the position of this cell's level-1 -// ancestor within its top-level face cell. -func (ci CellID) ChildPosition(level int) int { - return int(uint64(ci)>>uint64(2*(maxLevel-level)+1)) & 3 -} - -// lsbForLevel returns the lowest-numbered bit that is on for cells at the given level. -func lsbForLevel(level int) uint64 { return 1 << uint64(2*(maxLevel-level)) } - -// Parent returns the cell at the given level, which must be no greater than the current level. -func (ci CellID) Parent(level int) CellID { - lsb := lsbForLevel(level) - return CellID((uint64(ci) & -lsb) | lsb) -} - -// immediateParent is cheaper than Parent, but assumes !ci.isFace(). -func (ci CellID) immediateParent() CellID { - nlsb := CellID(ci.lsb() << 2) - return (ci & -nlsb) | nlsb -} - -// isFace returns whether this is a top-level (face) cell. -func (ci CellID) isFace() bool { return uint64(ci)&(lsbForLevel(0)-1) == 0 } - -// lsb returns the least significant bit that is set. -func (ci CellID) lsb() uint64 { return uint64(ci) & -uint64(ci) } - -// Children returns the four immediate children of this cell. -// If ci is a leaf cell, it returns four identical cells that are not the children. -func (ci CellID) Children() [4]CellID { - var ch [4]CellID - lsb := CellID(ci.lsb()) - ch[0] = ci - lsb + lsb>>2 - lsb >>= 1 - ch[1] = ch[0] + lsb - ch[2] = ch[1] + lsb - ch[3] = ch[2] + lsb - return ch -} - -func sizeIJ(level int) int { - return 1 << uint(maxLevel-level) -} - -// EdgeNeighbors returns the four cells that are adjacent across the cell's four edges. -// Edges 0, 1, 2, 3 are in the down, right, up, left directions in the face space. -// All neighbors are guaranteed to be distinct. -func (ci CellID) EdgeNeighbors() [4]CellID { - level := ci.Level() - size := sizeIJ(level) - f, i, j, _ := ci.faceIJOrientation() - return [4]CellID{ - cellIDFromFaceIJWrap(f, i, j-size).Parent(level), - cellIDFromFaceIJWrap(f, i+size, j).Parent(level), - cellIDFromFaceIJWrap(f, i, j+size).Parent(level), - cellIDFromFaceIJWrap(f, i-size, j).Parent(level), - } -} - -// VertexNeighbors returns the neighboring cellIDs with vertex closest to this cell at the given level. -// (Normally there are four neighbors, but the closest vertex may only have three neighbors if it is one of -// the 8 cube vertices.) -func (ci CellID) VertexNeighbors(level int) []CellID { - halfSize := sizeIJ(level + 1) - size := halfSize << 1 - f, i, j, _ := ci.faceIJOrientation() - - var isame, jsame bool - var ioffset, joffset int - if i&halfSize != 0 { - ioffset = size - isame = (i + size) < maxSize - } else { - ioffset = -size - isame = (i - size) >= 0 - } - if j&halfSize != 0 { - joffset = size - jsame = (j + size) < maxSize - } else { - joffset = -size - jsame = (j - size) >= 0 - } - - results := []CellID{ - ci.Parent(level), - cellIDFromFaceIJSame(f, i+ioffset, j, isame).Parent(level), - cellIDFromFaceIJSame(f, i, j+joffset, jsame).Parent(level), - } - - if isame || jsame { - results = append(results, cellIDFromFaceIJSame(f, i+ioffset, j+joffset, isame && jsame).Parent(level)) - } - - return results -} - -// AllNeighbors returns all neighbors of this cell at the given level. Two -// cells X and Y are neighbors if their boundaries intersect but their -// interiors do not. In particular, two cells that intersect at a single -// point are neighbors. Note that for cells adjacent to a face vertex, the -// same neighbor may be returned more than once. There could be up to eight -// neighbors including the diagonal ones that share the vertex. -// -// This requires level >= ci.Level(). -func (ci CellID) AllNeighbors(level int) []CellID { - var neighbors []CellID - - face, i, j, _ := ci.faceIJOrientation() - - // Find the coordinates of the lower left-hand leaf cell. We need to - // normalize (i,j) to a known position within the cell because level - // may be larger than this cell's level. - size := sizeIJ(ci.Level()) - i &= -size - j &= -size - - nbrSize := sizeIJ(level) - - // We compute the top-bottom, left-right, and diagonal neighbors in one - // pass. The loop test is at the end of the loop to avoid 32-bit overflow. - for k := -nbrSize; ; k += nbrSize { - var sameFace bool - if k < 0 { - sameFace = (j+k >= 0) - } else if k >= size { - sameFace = (j+k < maxSize) - } else { - sameFace = true - // Top and bottom neighbors. - neighbors = append(neighbors, cellIDFromFaceIJSame(face, i+k, j-nbrSize, - j-size >= 0).Parent(level)) - neighbors = append(neighbors, cellIDFromFaceIJSame(face, i+k, j+size, - j+size < maxSize).Parent(level)) - } - - // Left, right, and diagonal neighbors. - neighbors = append(neighbors, cellIDFromFaceIJSame(face, i-nbrSize, j+k, - sameFace && i-size >= 0).Parent(level)) - neighbors = append(neighbors, cellIDFromFaceIJSame(face, i+size, j+k, - sameFace && i+size < maxSize).Parent(level)) - - if k >= size { - break - } - } - - return neighbors -} - -// RangeMin returns the minimum CellID that is contained within this cell. -func (ci CellID) RangeMin() CellID { return CellID(uint64(ci) - (ci.lsb() - 1)) } - -// RangeMax returns the maximum CellID that is contained within this cell. -func (ci CellID) RangeMax() CellID { return CellID(uint64(ci) + (ci.lsb() - 1)) } - -// Contains returns true iff the CellID contains oci. -func (ci CellID) Contains(oci CellID) bool { - return uint64(ci.RangeMin()) <= uint64(oci) && uint64(oci) <= uint64(ci.RangeMax()) -} - -// Intersects returns true iff the CellID intersects oci. -func (ci CellID) Intersects(oci CellID) bool { - return uint64(oci.RangeMin()) <= uint64(ci.RangeMax()) && uint64(oci.RangeMax()) >= uint64(ci.RangeMin()) -} - -// String returns the string representation of the cell ID in the form "1/3210". -func (ci CellID) String() string { - if !ci.IsValid() { - return "Invalid: " + strconv.FormatInt(int64(ci), 16) - } - var b bytes.Buffer - b.WriteByte("012345"[ci.Face()]) // values > 5 will have been picked off by !IsValid above - b.WriteByte('/') - for level := 1; level <= ci.Level(); level++ { - b.WriteByte("0123"[ci.ChildPosition(level)]) - } - return b.String() -} - -// cellIDFromString returns a CellID from a string in the form "1/3210". -func cellIDFromString(s string) CellID { - level := len(s) - 2 - if level < 0 || level > maxLevel { - return CellID(0) - } - face := int(s[0] - '0') - if face < 0 || face > 5 || s[1] != '/' { - return CellID(0) - } - id := CellIDFromFace(face) - for i := 2; i < len(s); i++ { - childPos := s[i] - '0' - if childPos < 0 || childPos > 3 { - return CellID(0) - } - id = id.Children()[childPos] - } - return id -} - -// Point returns the center of the s2 cell on the sphere as a Point. -// The maximum directional error in Point (compared to the exact -// mathematical result) is 1.5 * dblEpsilon radians, and the maximum length -// error is 2 * dblEpsilon (the same as Normalize). -func (ci CellID) Point() Point { return Point{ci.rawPoint().Normalize()} } - -// LatLng returns the center of the s2 cell on the sphere as a LatLng. -func (ci CellID) LatLng() LatLng { return LatLngFromPoint(Point{ci.rawPoint()}) } - -// ChildBegin returns the first child in a traversal of the children of this cell, in Hilbert curve order. -// -// for ci := c.ChildBegin(); ci != c.ChildEnd(); ci = ci.Next() { -// ... -// } -func (ci CellID) ChildBegin() CellID { - ol := ci.lsb() - return CellID(uint64(ci) - ol + ol>>2) -} - -// ChildBeginAtLevel returns the first cell in a traversal of children a given level deeper than this cell, in -// Hilbert curve order. The given level must be no smaller than the cell's level. -// See ChildBegin for example use. -func (ci CellID) ChildBeginAtLevel(level int) CellID { - return CellID(uint64(ci) - ci.lsb() + lsbForLevel(level)) -} - -// ChildEnd returns the first cell after a traversal of the children of this cell in Hilbert curve order. -// The returned cell may be invalid. -func (ci CellID) ChildEnd() CellID { - ol := ci.lsb() - return CellID(uint64(ci) + ol + ol>>2) -} - -// ChildEndAtLevel returns the first cell after the last child in a traversal of children a given level deeper -// than this cell, in Hilbert curve order. -// The given level must be no smaller than the cell's level. -// The returned cell may be invalid. -func (ci CellID) ChildEndAtLevel(level int) CellID { - return CellID(uint64(ci) + ci.lsb() + lsbForLevel(level)) -} - -// Next returns the next cell along the Hilbert curve. -// This is expected to be used with ChildBegin and ChildEnd, -// or ChildBeginAtLevel and ChildEndAtLevel. -func (ci CellID) Next() CellID { - return CellID(uint64(ci) + ci.lsb()<<1) -} - -// Prev returns the previous cell along the Hilbert curve. -func (ci CellID) Prev() CellID { - return CellID(uint64(ci) - ci.lsb()<<1) -} - -// NextWrap returns the next cell along the Hilbert curve, wrapping from last to -// first as necessary. This should not be used with ChildBegin and ChildEnd. -func (ci CellID) NextWrap() CellID { - n := ci.Next() - if uint64(n) < wrapOffset { - return n - } - return CellID(uint64(n) - wrapOffset) -} - -// PrevWrap returns the previous cell along the Hilbert curve, wrapping around from -// first to last as necessary. This should not be used with ChildBegin and ChildEnd. -func (ci CellID) PrevWrap() CellID { - p := ci.Prev() - if uint64(p) < wrapOffset { - return p - } - return CellID(uint64(p) + wrapOffset) -} - -// AdvanceWrap advances or retreats the indicated number of steps along the -// Hilbert curve at the current level and returns the new position. The -// position wraps between the first and last faces as necessary. -func (ci CellID) AdvanceWrap(steps int64) CellID { - if steps == 0 { - return ci - } - - // We clamp the number of steps if necessary to ensure that we do not - // advance past the End() or before the Begin() of this level. - shift := uint(2*(maxLevel-ci.Level()) + 1) - if steps < 0 { - if min := -int64(uint64(ci) >> shift); steps < min { - wrap := int64(wrapOffset >> shift) - steps %= wrap - if steps < min { - steps += wrap - } - } - } else { - // Unlike Advance(), we don't want to return End(level). - if max := int64((wrapOffset - uint64(ci)) >> shift); steps > max { - wrap := int64(wrapOffset >> shift) - steps %= wrap - if steps > max { - steps -= wrap - } - } - } - - // If steps is negative, then shifting it left has undefined behavior. - // Cast to uint64 for a 2's complement answer. - return CellID(uint64(ci) + (uint64(steps) << shift)) -} - -// Encode encodes the CellID. -func (ci CellID) Encode(w io.Writer) error { - e := &encoder{w: w} - ci.encode(e) - return e.err -} - -func (ci CellID) encode(e *encoder) { - e.writeUint64(uint64(ci)) -} - -// Decode decodes the CellID. -func (ci *CellID) Decode(r io.Reader) error { - d := &decoder{r: asByteReader(r)} - ci.decode(d) - return d.err -} - -func (ci *CellID) decode(d *decoder) { - *ci = CellID(d.readUint64()) -} - -// TODO: the methods below are not exported yet. Settle on the entire API design -// before doing this. Do we want to mirror the C++ one as closely as possible? - -// distanceFromBegin returns the number of steps along the Hilbert curve that -// this cell is from the first node in the S2 hierarchy at our level. (i.e., -// FromFace(0).ChildBeginAtLevel(ci.Level())). This is analogous to Pos(), but -// for this cell's level. -// The return value is always non-negative. -func (ci CellID) distanceFromBegin() int64 { - return int64(ci >> uint64(2*(maxLevel-ci.Level())+1)) -} - -// rawPoint returns an unnormalized r3 vector from the origin through the center -// of the s2 cell on the sphere. -func (ci CellID) rawPoint() r3.Vector { - face, si, ti := ci.faceSiTi() - return faceUVToXYZ(face, stToUV((0.5/maxSize)*float64(si)), stToUV((0.5/maxSize)*float64(ti))) -} - -// faceSiTi returns the Face/Si/Ti coordinates of the center of the cell. -func (ci CellID) faceSiTi() (face int, si, ti uint32) { - face, i, j, _ := ci.faceIJOrientation() - delta := 0 - if ci.IsLeaf() { - delta = 1 - } else { - if (i^(int(ci)>>2))&1 != 0 { - delta = 2 - } - } - return face, uint32(2*i + delta), uint32(2*j + delta) -} - -// faceIJOrientation uses the global lookupIJ table to unfiddle the bits of ci. -func (ci CellID) faceIJOrientation() (f, i, j, orientation int) { - f = ci.Face() - orientation = f & swapMask - nbits := maxLevel - 7*lookupBits // first iteration - - // Each iteration maps 8 bits of the Hilbert curve position into - // 4 bits of "i" and "j". The lookup table transforms a key of the - // form "ppppppppoo" to a value of the form "iiiijjjjoo", where the - // letters [ijpo] represents bits of "i", "j", the Hilbert curve - // position, and the Hilbert curve orientation respectively. - // - // On the first iteration we need to be careful to clear out the bits - // representing the cube face. - for k := 7; k >= 0; k-- { - orientation += (int(uint64(ci)>>uint64(k*2*lookupBits+1)) & ((1 << uint(2*nbits)) - 1)) << 2 - orientation = lookupIJ[orientation] - i += (orientation >> (lookupBits + 2)) << uint(k*lookupBits) - j += ((orientation >> 2) & ((1 << lookupBits) - 1)) << uint(k*lookupBits) - orientation &= (swapMask | invertMask) - nbits = lookupBits // following iterations - } - - // The position of a non-leaf cell at level "n" consists of a prefix of - // 2*n bits that identifies the cell, followed by a suffix of - // 2*(maxLevel-n)+1 bits of the form 10*. If n==maxLevel, the suffix is - // just "1" and has no effect. Otherwise, it consists of "10", followed - // by (maxLevel-n-1) repetitions of "00", followed by "0". The "10" has - // no effect, while each occurrence of "00" has the effect of reversing - // the swapMask bit. - if ci.lsb()&0x1111111111111110 != 0 { - orientation ^= swapMask - } - - return -} - -// cellIDFromFaceIJ returns a leaf cell given its cube face (range 0..5) and IJ coordinates. -func cellIDFromFaceIJ(f, i, j int) CellID { - // Note that this value gets shifted one bit to the left at the end - // of the function. - n := uint64(f) << (posBits - 1) - // Alternating faces have opposite Hilbert curve orientations; this - // is necessary in order for all faces to have a right-handed - // coordinate system. - bits := f & swapMask - // Each iteration maps 4 bits of "i" and "j" into 8 bits of the Hilbert - // curve position. The lookup table transforms a 10-bit key of the form - // "iiiijjjjoo" to a 10-bit value of the form "ppppppppoo", where the - // letters [ijpo] denote bits of "i", "j", Hilbert curve position, and - // Hilbert curve orientation respectively. - for k := 7; k >= 0; k-- { - mask := (1 << lookupBits) - 1 - bits += ((i >> uint(k*lookupBits)) & mask) << (lookupBits + 2) - bits += ((j >> uint(k*lookupBits)) & mask) << 2 - bits = lookupPos[bits] - n |= uint64(bits>>2) << (uint(k) * 2 * lookupBits) - bits &= (swapMask | invertMask) - } - return CellID(n*2 + 1) -} - -func cellIDFromFaceIJWrap(f, i, j int) CellID { - // Convert i and j to the coordinates of a leaf cell just beyond the - // boundary of this face. This prevents 32-bit overflow in the case - // of finding the neighbors of a face cell. - i = clampInt(i, -1, maxSize) - j = clampInt(j, -1, maxSize) - - // We want to wrap these coordinates onto the appropriate adjacent face. - // The easiest way to do this is to convert the (i,j) coordinates to (x,y,z) - // (which yields a point outside the normal face boundary), and then call - // xyzToFaceUV to project back onto the correct face. - // - // The code below converts (i,j) to (si,ti), and then (si,ti) to (u,v) using - // the linear projection (u=2*s-1 and v=2*t-1). (The code further below - // converts back using the inverse projection, s=0.5*(u+1) and t=0.5*(v+1). - // Any projection would work here, so we use the simplest.) We also clamp - // the (u,v) coordinates so that the point is barely outside the - // [-1,1]x[-1,1] face rectangle, since otherwise the reprojection step - // (which divides by the new z coordinate) might change the other - // coordinates enough so that we end up in the wrong leaf cell. - const scale = 1.0 / maxSize - limit := math.Nextafter(1, 2) - u := math.Max(-limit, math.Min(limit, scale*float64((i<<1)+1-maxSize))) - v := math.Max(-limit, math.Min(limit, scale*float64((j<<1)+1-maxSize))) - - // Find the leaf cell coordinates on the adjacent face, and convert - // them to a cell id at the appropriate level. - f, u, v = xyzToFaceUV(faceUVToXYZ(f, u, v)) - return cellIDFromFaceIJ(f, stToIJ(0.5*(u+1)), stToIJ(0.5*(v+1))) -} - -func cellIDFromFaceIJSame(f, i, j int, sameFace bool) CellID { - if sameFace { - return cellIDFromFaceIJ(f, i, j) - } - return cellIDFromFaceIJWrap(f, i, j) -} - -// ijToSTMin converts the i- or j-index of a leaf cell to the minimum corresponding -// s- or t-value contained by that cell. The argument must be in the range -// [0..2**30], i.e. up to one position beyond the normal range of valid leaf -// cell indices. -func ijToSTMin(i int) float64 { - return float64(i) / float64(maxSize) -} - -// stToIJ converts value in ST coordinates to a value in IJ coordinates. -func stToIJ(s float64) int { - return clampInt(int(math.Floor(maxSize*s)), 0, maxSize-1) -} - -// cellIDFromPoint returns a leaf cell containing point p. Usually there is -// exactly one such cell, but for points along the edge of a cell, any -// adjacent cell may be (deterministically) chosen. This is because -// s2.CellIDs are considered to be closed sets. The returned cell will -// always contain the given point, i.e. -// -// CellFromPoint(p).ContainsPoint(p) -// -// is always true. -func cellIDFromPoint(p Point) CellID { - f, u, v := xyzToFaceUV(r3.Vector{p.X, p.Y, p.Z}) - i := stToIJ(uvToST(u)) - j := stToIJ(uvToST(v)) - return cellIDFromFaceIJ(f, i, j) -} - -// ijLevelToBoundUV returns the bounds in (u,v)-space for the cell at the given -// level containing the leaf cell with the given (i,j)-coordinates. -func ijLevelToBoundUV(i, j, level int) r2.Rect { - cellSize := sizeIJ(level) - xLo := i & -cellSize - yLo := j & -cellSize - - return r2.Rect{ - X: r1.Interval{ - Lo: stToUV(ijToSTMin(xLo)), - Hi: stToUV(ijToSTMin(xLo + cellSize)), - }, - Y: r1.Interval{ - Lo: stToUV(ijToSTMin(yLo)), - Hi: stToUV(ijToSTMin(yLo + cellSize)), - }, - } -} - -// Constants related to the bit mangling in the Cell ID. -const ( - lookupBits = 4 - swapMask = 0x01 - invertMask = 0x02 -) - -// The following lookup tables are used to convert efficiently between an -// (i,j) cell index and the corresponding position along the Hilbert curve. -// -// lookupPos maps 4 bits of "i", 4 bits of "j", and 2 bits representing the -// orientation of the current cell into 8 bits representing the order in which -// that subcell is visited by the Hilbert curve, plus 2 bits indicating the -// new orientation of the Hilbert curve within that subcell. (Cell -// orientations are represented as combination of swapMask and invertMask.) -// -// lookupIJ is an inverted table used for mapping in the opposite -// direction. -// -// We also experimented with looking up 16 bits at a time (14 bits of position -// plus 2 of orientation) but found that smaller lookup tables gave better -// performance. (2KB fits easily in the primary cache.) -var ( - ijToPos = [4][4]int{ - {0, 1, 3, 2}, // canonical order - {0, 3, 1, 2}, // axes swapped - {2, 3, 1, 0}, // bits inverted - {2, 1, 3, 0}, // swapped & inverted - } - posToIJ = [4][4]int{ - {0, 1, 3, 2}, // canonical order: (0,0), (0,1), (1,1), (1,0) - {0, 2, 3, 1}, // axes swapped: (0,0), (1,0), (1,1), (0,1) - {3, 2, 0, 1}, // bits inverted: (1,1), (1,0), (0,0), (0,1) - {3, 1, 0, 2}, // swapped & inverted: (1,1), (0,1), (0,0), (1,0) - } - posToOrientation = [4]int{swapMask, 0, 0, invertMask | swapMask} - lookupIJ [1 << (2*lookupBits + 2)]int - lookupPos [1 << (2*lookupBits + 2)]int -) - -func init() { - initLookupCell(0, 0, 0, 0, 0, 0) - initLookupCell(0, 0, 0, swapMask, 0, swapMask) - initLookupCell(0, 0, 0, invertMask, 0, invertMask) - initLookupCell(0, 0, 0, swapMask|invertMask, 0, swapMask|invertMask) -} - -// initLookupCell initializes the lookupIJ table at init time. -func initLookupCell(level, i, j, origOrientation, pos, orientation int) { - if level == lookupBits { - ij := (i << lookupBits) + j - lookupPos[(ij<<2)+origOrientation] = (pos << 2) + orientation - lookupIJ[(pos<<2)+origOrientation] = (ij << 2) + orientation - return - } - - level++ - i <<= 1 - j <<= 1 - pos <<= 2 - r := posToIJ[orientation] - initLookupCell(level, i+(r[0]>>1), j+(r[0]&1), origOrientation, pos, orientation^posToOrientation[0]) - initLookupCell(level, i+(r[1]>>1), j+(r[1]&1), origOrientation, pos+1, orientation^posToOrientation[1]) - initLookupCell(level, i+(r[2]>>1), j+(r[2]&1), origOrientation, pos+2, orientation^posToOrientation[2]) - initLookupCell(level, i+(r[3]>>1), j+(r[3]&1), origOrientation, pos+3, orientation^posToOrientation[3]) -} - -// CommonAncestorLevel returns the level of the common ancestor of the two S2 CellIDs. -func (ci CellID) CommonAncestorLevel(other CellID) (level int, ok bool) { - bits := uint64(ci ^ other) - if bits < ci.lsb() { - bits = ci.lsb() - } - if bits < other.lsb() { - bits = other.lsb() - } - - msbPos := findMSBSetNonZero64(bits) - if msbPos > 60 { - return 0, false - } - return (60 - msbPos) >> 1, true -} - -// Advance advances or retreats the indicated number of steps along the -// Hilbert curve at the current level, and returns the new position. The -// position is never advanced past End() or before Begin(). -func (ci CellID) Advance(steps int64) CellID { - if steps == 0 { - return ci - } - - // We clamp the number of steps if necessary to ensure that we do not - // advance past the End() or before the Begin() of this level. Note that - // minSteps and maxSteps always fit in a signed 64-bit integer. - stepShift := uint(2*(maxLevel-ci.Level()) + 1) - if steps < 0 { - minSteps := -int64(uint64(ci) >> stepShift) - if steps < minSteps { - steps = minSteps - } - } else { - maxSteps := int64((wrapOffset + ci.lsb() - uint64(ci)) >> stepShift) - if steps > maxSteps { - steps = maxSteps - } - } - return ci + CellID(steps)<= limit.RangeMin() { - return limit - } - - if ci.RangeMax() >= limit { - // The cell is too large, shrink it. Note that when generating coverings - // of CellID ranges, this loop usually executes only once. Also because - // ci.RangeMin() < limit.RangeMin(), we will always exit the loop by the - // time we reach a leaf cell. - for { - ci = ci.Children()[0] - if ci.RangeMax() < limit { - break - } - } - return ci - } - - // The cell may be too small. Grow it if necessary. Note that generally - // this loop only iterates once. - for !ci.isFace() { - parent := ci.immediateParent() - if parent.RangeMin() != start || parent.RangeMax() >= limit { - break - } - ci = parent - } - return ci -} - -// centerFaceSiTi returns the (face, si, ti) coordinates of the center of the cell. -// Note that although (si,ti) coordinates span the range [0,2**31] in general, -// the cell center coordinates are always in the range [1,2**31-1] and -// therefore can be represented using a signed 32-bit integer. -func (ci CellID) centerFaceSiTi() (face, si, ti int) { - // First we compute the discrete (i,j) coordinates of a leaf cell contained - // within the given cell. Given that cells are represented by the Hilbert - // curve position corresponding at their center, it turns out that the cell - // returned by faceIJOrientation is always one of two leaf cells closest - // to the center of the cell (unless the given cell is a leaf cell itself, - // in which case there is only one possibility). - // - // Given a cell of size s >= 2 (i.e. not a leaf cell), and letting (imin, - // jmin) be the coordinates of its lower left-hand corner, the leaf cell - // returned by faceIJOrientation is either (imin + s/2, jmin + s/2) - // (imin + s/2 - 1, jmin + s/2 - 1). The first case is the one we want. - // We can distinguish these two cases by looking at the low bit of i or - // j. In the second case the low bit is one, unless s == 2 (i.e. the - // level just above leaf cells) in which case the low bit is zero. - // - // In the code below, the expression ((i ^ (int(id) >> 2)) & 1) is true - // if we are in the second case described above. - face, i, j, _ := ci.faceIJOrientation() - delta := 0 - if ci.IsLeaf() { - delta = 1 - } else if (int64(i)^(int64(ci)>>2))&1 == 1 { - delta = 2 - } - - // Note that (2 * {i,j} + delta) will never overflow a 32-bit integer. - return face, 2*i + delta, 2*j + delta -} diff --git a/vendor/github.com/golang/geo/s2/cellunion.go b/vendor/github.com/golang/geo/s2/cellunion.go deleted file mode 100644 index 0654de973..000000000 --- a/vendor/github.com/golang/geo/s2/cellunion.go +++ /dev/null @@ -1,590 +0,0 @@ -// Copyright 2014 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package s2 - -import ( - "fmt" - "io" - "sort" - - "github.com/golang/geo/s1" -) - -// A CellUnion is a collection of CellIDs. -// -// It is normalized if it is sorted, and does not contain redundancy. -// Specifically, it may not contain the same CellID twice, nor a CellID that -// is contained by another, nor the four sibling CellIDs that are children of -// a single higher level CellID. -// -// CellUnions are not required to be normalized, but certain operations will -// return different results if they are not (e.g. Contains). -type CellUnion []CellID - -// CellUnionFromRange creates a CellUnion that covers the half-open range -// of leaf cells [begin, end). If begin == end the resulting union is empty. -// This requires that begin and end are both leaves, and begin <= end. -// To create a closed-ended range, pass in end.Next(). -func CellUnionFromRange(begin, end CellID) CellUnion { - // We repeatedly add the largest cell we can. - var cu CellUnion - for id := begin.MaxTile(end); id != end; id = id.Next().MaxTile(end) { - cu = append(cu, id) - } - // The output is normalized because the cells are added in order by the iteration. - return cu -} - -// CellUnionFromUnion creates a CellUnion from the union of the given CellUnions. -func CellUnionFromUnion(cellUnions ...CellUnion) CellUnion { - var cu CellUnion - for _, cellUnion := range cellUnions { - cu = append(cu, cellUnion...) - } - cu.Normalize() - return cu -} - -// CellUnionFromIntersection creates a CellUnion from the intersection of the given CellUnions. -func CellUnionFromIntersection(x, y CellUnion) CellUnion { - var cu CellUnion - - // This is a fairly efficient calculation that uses binary search to skip - // over sections of both input vectors. It takes constant time if all the - // cells of x come before or after all the cells of y in CellID order. - var i, j int - for i < len(x) && j < len(y) { - iMin := x[i].RangeMin() - jMin := y[j].RangeMin() - if iMin > jMin { - // Either j.Contains(i) or the two cells are disjoint. - if x[i] <= y[j].RangeMax() { - cu = append(cu, x[i]) - i++ - } else { - // Advance j to the first cell possibly contained by x[i]. - j = y.lowerBound(j+1, len(y), iMin) - // The previous cell y[j-1] may now contain x[i]. - if x[i] <= y[j-1].RangeMax() { - j-- - } - } - } else if jMin > iMin { - // Identical to the code above with i and j reversed. - if y[j] <= x[i].RangeMax() { - cu = append(cu, y[j]) - j++ - } else { - i = x.lowerBound(i+1, len(x), jMin) - if y[j] <= x[i-1].RangeMax() { - i-- - } - } - } else { - // i and j have the same RangeMin(), so one contains the other. - if x[i] < y[j] { - cu = append(cu, x[i]) - i++ - } else { - cu = append(cu, y[j]) - j++ - } - } - } - - // The output is generated in sorted order. - cu.Normalize() - return cu -} - -// CellUnionFromIntersectionWithCellID creates a CellUnion from the intersection -// of a CellUnion with the given CellID. This can be useful for splitting a -// CellUnion into chunks. -func CellUnionFromIntersectionWithCellID(x CellUnion, id CellID) CellUnion { - var cu CellUnion - if x.ContainsCellID(id) { - cu = append(cu, id) - cu.Normalize() - return cu - } - - idmax := id.RangeMax() - for i := x.lowerBound(0, len(x), id.RangeMin()); i < len(x) && x[i] <= idmax; i++ { - cu = append(cu, x[i]) - } - - cu.Normalize() - return cu -} - -// CellUnionFromDifference creates a CellUnion from the difference (x - y) -// of the given CellUnions. -func CellUnionFromDifference(x, y CellUnion) CellUnion { - // TODO(roberts): This is approximately O(N*log(N)), but could probably - // use similar techniques as CellUnionFromIntersectionWithCellID to be more efficient. - - var cu CellUnion - for _, xid := range x { - cu.cellUnionDifferenceInternal(xid, &y) - } - - // The output is generated in sorted order, and there should not be any - // cells that can be merged (provided that both inputs were normalized). - return cu -} - -// The C++ constructor methods FromNormalized and FromVerbatim are not necessary -// since they don't call Normalize, and just set the CellIDs directly on the object, -// so straight casting is sufficient in Go to replicate this behavior. - -// IsValid reports whether the cell union is valid, meaning that the CellIDs are -// valid, non-overlapping, and sorted in increasing order. -func (cu *CellUnion) IsValid() bool { - for i, cid := range *cu { - if !cid.IsValid() { - return false - } - if i == 0 { - continue - } - if (*cu)[i-1].RangeMax() >= cid.RangeMin() { - return false - } - } - return true -} - -// IsNormalized reports whether the cell union is normalized, meaning that it is -// satisfies IsValid and that no four cells have a common parent. -// Certain operations such as Contains will return a different -// result if the cell union is not normalized. -func (cu *CellUnion) IsNormalized() bool { - for i, cid := range *cu { - if !cid.IsValid() { - return false - } - if i == 0 { - continue - } - if (*cu)[i-1].RangeMax() >= cid.RangeMin() { - return false - } - if i < 3 { - continue - } - if areSiblings((*cu)[i-3], (*cu)[i-2], (*cu)[i-1], cid) { - return false - } - } - return true -} - -// Normalize normalizes the CellUnion. -func (cu *CellUnion) Normalize() { - sortCellIDs(*cu) - - output := make([]CellID, 0, len(*cu)) // the list of accepted cells - // Loop invariant: output is a sorted list of cells with no redundancy. - for _, ci := range *cu { - // The first two passes here either ignore this new candidate, - // or remove previously accepted cells that are covered by this candidate. - - // Ignore this cell if it is contained by the previous one. - // We only need to check the last accepted cell. The ordering of the - // cells implies containment (but not the converse), and output has no redundancy, - // so if this candidate is not contained by the last accepted cell - // then it cannot be contained by any previously accepted cell. - if len(output) > 0 && output[len(output)-1].Contains(ci) { - continue - } - - // Discard any previously accepted cells contained by this one. - // This could be any contiguous trailing subsequence, but it can't be - // a discontiguous subsequence because of the containment property of - // sorted S2 cells mentioned above. - j := len(output) - 1 // last index to keep - for j >= 0 { - if !ci.Contains(output[j]) { - break - } - j-- - } - output = output[:j+1] - - // See if the last three cells plus this one can be collapsed. - // We loop because collapsing three accepted cells and adding a higher level cell - // could cascade into previously accepted cells. - for len(output) >= 3 && areSiblings(output[len(output)-3], output[len(output)-2], output[len(output)-1], ci) { - // Replace four children by their parent cell. - output = output[:len(output)-3] - ci = ci.immediateParent() // checked !ci.isFace above - } - output = append(output, ci) - } - *cu = output -} - -// IntersectsCellID reports whether this CellUnion intersects the given cell ID. -func (cu *CellUnion) IntersectsCellID(id CellID) bool { - // Find index of array item that occurs directly after our probe cell: - i := sort.Search(len(*cu), func(i int) bool { return id < (*cu)[i] }) - - if i != len(*cu) && (*cu)[i].RangeMin() <= id.RangeMax() { - return true - } - return i != 0 && (*cu)[i-1].RangeMax() >= id.RangeMin() -} - -// ContainsCellID reports whether the CellUnion contains the given cell ID. -// Containment is defined with respect to regions, e.g. a cell contains its 4 children. -// -// CAVEAT: If you have constructed a non-normalized CellUnion, note that groups -// of 4 child cells are *not* considered to contain their parent cell. To get -// this behavior you must use one of the call Normalize() explicitly. -func (cu *CellUnion) ContainsCellID(id CellID) bool { - // Find index of array item that occurs directly after our probe cell: - i := sort.Search(len(*cu), func(i int) bool { return id < (*cu)[i] }) - - if i != len(*cu) && (*cu)[i].RangeMin() <= id { - return true - } - return i != 0 && (*cu)[i-1].RangeMax() >= id -} - -// Denormalize replaces this CellUnion with an expanded version of the -// CellUnion where any cell whose level is less than minLevel or where -// (level - minLevel) is not a multiple of levelMod is replaced by its -// children, until either both of these conditions are satisfied or the -// maximum level is reached. -func (cu *CellUnion) Denormalize(minLevel, levelMod int) { - var denorm CellUnion - for _, id := range *cu { - level := id.Level() - newLevel := level - if newLevel < minLevel { - newLevel = minLevel - } - if levelMod > 1 { - newLevel += (maxLevel - (newLevel - minLevel)) % levelMod - if newLevel > maxLevel { - newLevel = maxLevel - } - } - if newLevel == level { - denorm = append(denorm, id) - } else { - end := id.ChildEndAtLevel(newLevel) - for ci := id.ChildBeginAtLevel(newLevel); ci != end; ci = ci.Next() { - denorm = append(denorm, ci) - } - } - } - *cu = denorm -} - -// RectBound returns a Rect that bounds this entity. -func (cu *CellUnion) RectBound() Rect { - bound := EmptyRect() - for _, c := range *cu { - bound = bound.Union(CellFromCellID(c).RectBound()) - } - return bound -} - -// CapBound returns a Cap that bounds this entity. -func (cu *CellUnion) CapBound() Cap { - if len(*cu) == 0 { - return EmptyCap() - } - - // Compute the approximate centroid of the region. This won't produce the - // bounding cap of minimal area, but it should be close enough. - var centroid Point - - for _, ci := range *cu { - area := AvgAreaMetric.Value(ci.Level()) - centroid = Point{centroid.Add(ci.Point().Mul(area))} - } - - if zero := (Point{}); centroid == zero { - centroid = PointFromCoords(1, 0, 0) - } else { - centroid = Point{centroid.Normalize()} - } - - // Use the centroid as the cap axis, and expand the cap angle so that it - // contains the bounding caps of all the individual cells. Note that it is - // *not* sufficient to just bound all the cell vertices because the bounding - // cap may be concave (i.e. cover more than one hemisphere). - c := CapFromPoint(centroid) - for _, ci := range *cu { - c = c.AddCap(CellFromCellID(ci).CapBound()) - } - - return c -} - -// ContainsCell reports whether this cell union contains the given cell. -func (cu *CellUnion) ContainsCell(c Cell) bool { - return cu.ContainsCellID(c.id) -} - -// IntersectsCell reports whether this cell union intersects the given cell. -func (cu *CellUnion) IntersectsCell(c Cell) bool { - return cu.IntersectsCellID(c.id) -} - -// ContainsPoint reports whether this cell union contains the given point. -func (cu *CellUnion) ContainsPoint(p Point) bool { - return cu.ContainsCell(CellFromPoint(p)) -} - -// CellUnionBound computes a covering of the CellUnion. -func (cu *CellUnion) CellUnionBound() []CellID { - return cu.CapBound().CellUnionBound() -} - -// LeafCellsCovered reports the number of leaf cells covered by this cell union. -// This will be no more than 6*2^60 for the whole sphere. -func (cu *CellUnion) LeafCellsCovered() int64 { - var numLeaves int64 - for _, c := range *cu { - numLeaves += 1 << uint64((maxLevel-int64(c.Level()))<<1) - } - return numLeaves -} - -// Returns true if the given four cells have a common parent. -// This requires that the four CellIDs are distinct. -func areSiblings(a, b, c, d CellID) bool { - // A necessary (but not sufficient) condition is that the XOR of the - // four cell IDs must be zero. This is also very fast to test. - if (a ^ b ^ c) != d { - return false - } - - // Now we do a slightly more expensive but exact test. First, compute a - // mask that blocks out the two bits that encode the child position of - // "id" with respect to its parent, then check that the other three - // children all agree with "mask". - mask := d.lsb() << 1 - mask = ^(mask + (mask << 1)) - idMasked := (uint64(d) & mask) - return ((uint64(a)&mask) == idMasked && - (uint64(b)&mask) == idMasked && - (uint64(c)&mask) == idMasked && - !d.isFace()) -} - -// Contains reports whether this CellUnion contains all of the CellIDs of the given CellUnion. -func (cu *CellUnion) Contains(o CellUnion) bool { - // TODO(roberts): Investigate alternatives such as divide-and-conquer - // or alternating-skip-search that may be significantly faster in both - // the average and worst case. This applies to Intersects as well. - for _, id := range o { - if !cu.ContainsCellID(id) { - return false - } - } - - return true -} - -// Intersects reports whether this CellUnion intersects any of the CellIDs of the given CellUnion. -func (cu *CellUnion) Intersects(o CellUnion) bool { - for _, c := range *cu { - if o.IntersectsCellID(c) { - return true - } - } - - return false -} - -// lowerBound returns the index in this CellUnion to the first element whose value -// is not considered to go before the given cell id. (i.e., either it is equivalent -// or comes after the given id.) If there is no match, then end is returned. -func (cu *CellUnion) lowerBound(begin, end int, id CellID) int { - for i := begin; i < end; i++ { - if (*cu)[i] >= id { - return i - } - } - - return end -} - -// cellUnionDifferenceInternal adds the difference between the CellID and the union to -// the result CellUnion. If they intersect but the difference is non-empty, it divides -// and conquers. -func (cu *CellUnion) cellUnionDifferenceInternal(id CellID, other *CellUnion) { - if !other.IntersectsCellID(id) { - (*cu) = append((*cu), id) - return - } - - if !other.ContainsCellID(id) { - for _, child := range id.Children() { - cu.cellUnionDifferenceInternal(child, other) - } - } -} - -// ExpandAtLevel expands this CellUnion by adding a rim of cells at expandLevel -// around the unions boundary. -// -// For each cell c in the union, we add all cells at level -// expandLevel that abut c. There are typically eight of those -// (four edge-abutting and four sharing a vertex). However, if c is -// finer than expandLevel, we add all cells abutting -// c.Parent(expandLevel) as well as c.Parent(expandLevel) itself, -// as an expandLevel cell rarely abuts a smaller cell. -// -// Note that the size of the output is exponential in -// expandLevel. For example, if expandLevel == 20 and the input -// has a cell at level 10, there will be on the order of 4000 -// adjacent cells in the output. For most applications the -// ExpandByRadius method below is easier to use. -func (cu *CellUnion) ExpandAtLevel(level int) { - var output CellUnion - levelLsb := lsbForLevel(level) - for i := len(*cu) - 1; i >= 0; i-- { - id := (*cu)[i] - if id.lsb() < levelLsb { - id = id.Parent(level) - // Optimization: skip over any cells contained by this one. This is - // especially important when very small regions are being expanded. - for i > 0 && id.Contains((*cu)[i-1]) { - i-- - } - } - output = append(output, id) - output = append(output, id.AllNeighbors(level)...) - } - sortCellIDs(output) - - *cu = output - cu.Normalize() -} - -// ExpandByRadius expands this CellUnion such that it contains all points whose -// distance to the CellUnion is at most minRadius, but do not use cells that -// are more than maxLevelDiff levels higher than the largest cell in the input. -// The second parameter controls the tradeoff between accuracy and output size -// when a large region is being expanded by a small amount (e.g. expanding Canada -// by 1km). For example, if maxLevelDiff == 4 the region will always be expanded -// by approximately 1/16 the width of its largest cell. Note that in the worst case, -// the number of cells in the output can be up to 4 * (1 + 2 ** maxLevelDiff) times -// larger than the number of cells in the input. -func (cu *CellUnion) ExpandByRadius(minRadius s1.Angle, maxLevelDiff int) { - minLevel := maxLevel - for _, cid := range *cu { - minLevel = minInt(minLevel, cid.Level()) - } - - // Find the maximum level such that all cells are at least "minRadius" wide. - radiusLevel := MinWidthMetric.MaxLevel(minRadius.Radians()) - if radiusLevel == 0 && minRadius.Radians() > MinWidthMetric.Value(0) { - // The requested expansion is greater than the width of a face cell. - // The easiest way to handle this is to expand twice. - cu.ExpandAtLevel(0) - } - cu.ExpandAtLevel(minInt(minLevel+maxLevelDiff, radiusLevel)) -} - -// Equal reports whether the two CellUnions are equal. -func (cu CellUnion) Equal(o CellUnion) bool { - if len(cu) != len(o) { - return false - } - for i := 0; i < len(cu); i++ { - if cu[i] != o[i] { - return false - } - } - return true -} - -// AverageArea returns the average area of this CellUnion. -// This is accurate to within a factor of 1.7. -func (cu *CellUnion) AverageArea() float64 { - return AvgAreaMetric.Value(maxLevel) * float64(cu.LeafCellsCovered()) -} - -// ApproxArea returns the approximate area of this CellUnion. This method is accurate -// to within 3% percent for all cell sizes and accurate to within 0.1% for cells -// at level 5 or higher within the union. -func (cu *CellUnion) ApproxArea() float64 { - var area float64 - for _, id := range *cu { - area += CellFromCellID(id).ApproxArea() - } - return area -} - -// ExactArea returns the area of this CellUnion as accurately as possible. -func (cu *CellUnion) ExactArea() float64 { - var area float64 - for _, id := range *cu { - area += CellFromCellID(id).ExactArea() - } - return area -} - -// Encode encodes the CellUnion. -func (cu *CellUnion) Encode(w io.Writer) error { - e := &encoder{w: w} - cu.encode(e) - return e.err -} - -func (cu *CellUnion) encode(e *encoder) { - e.writeInt8(encodingVersion) - e.writeInt64(int64(len(*cu))) - for _, ci := range *cu { - ci.encode(e) - } -} - -// Decode decodes the CellUnion. -func (cu *CellUnion) Decode(r io.Reader) error { - d := &decoder{r: asByteReader(r)} - cu.decode(d) - return d.err -} - -func (cu *CellUnion) decode(d *decoder) { - version := d.readInt8() - if d.err != nil { - return - } - if version != encodingVersion { - d.err = fmt.Errorf("only version %d is supported", encodingVersion) - return - } - n := d.readInt64() - if d.err != nil { - return - } - const maxCells = 1000000 - if n > maxCells { - d.err = fmt.Errorf("too many cells (%d; max is %d)", n, maxCells) - return - } - *cu = make([]CellID, n) - for i := range *cu { - (*cu)[i].decode(d) - } -} diff --git a/vendor/github.com/golang/geo/s2/centroids.go b/vendor/github.com/golang/geo/s2/centroids.go deleted file mode 100644 index e8a91c442..000000000 --- a/vendor/github.com/golang/geo/s2/centroids.go +++ /dev/null @@ -1,133 +0,0 @@ -// Copyright 2018 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package s2 - -import ( - "math" - - "github.com/golang/geo/r3" -) - -// There are several notions of the "centroid" of a triangle. First, there -// is the planar centroid, which is simply the centroid of the ordinary -// (non-spherical) triangle defined by the three vertices. Second, there is -// the surface centroid, which is defined as the intersection of the three -// medians of the spherical triangle. It is possible to show that this -// point is simply the planar centroid projected to the surface of the -// sphere. Finally, there is the true centroid (mass centroid), which is -// defined as the surface integral over the spherical triangle of (x,y,z) -// divided by the triangle area. This is the point that the triangle would -// rotate around if it was spinning in empty space. -// -// The best centroid for most purposes is the true centroid. Unlike the -// planar and surface centroids, the true centroid behaves linearly as -// regions are added or subtracted. That is, if you split a triangle into -// pieces and compute the average of their centroids (weighted by triangle -// area), the result equals the centroid of the original triangle. This is -// not true of the other centroids. -// -// Also note that the surface centroid may be nowhere near the intuitive -// "center" of a spherical triangle. For example, consider the triangle -// with vertices A=(1,eps,0), B=(0,0,1), C=(-1,eps,0) (a quarter-sphere). -// The surface centroid of this triangle is at S=(0, 2*eps, 1), which is -// within a distance of 2*eps of the vertex B. Note that the median from A -// (the segment connecting A to the midpoint of BC) passes through S, since -// this is the shortest path connecting the two endpoints. On the other -// hand, the true centroid is at M=(0, 0.5, 0.5), which when projected onto -// the surface is a much more reasonable interpretation of the "center" of -// this triangle. -// - -// TrueCentroid returns the true centroid of the spherical triangle ABC -// multiplied by the signed area of spherical triangle ABC. The reasons for -// multiplying by the signed area are (1) this is the quantity that needs to be -// summed to compute the centroid of a union or difference of triangles, and -// (2) it's actually easier to calculate this way. All points must have unit length. -// -// Note that the result of this function is defined to be Point(0, 0, 0) if -// the triangle is degenerate. -func TrueCentroid(a, b, c Point) Point { - // Use Distance to get accurate results for small triangles. - ra := float64(1) - if sa := float64(b.Distance(c)); sa != 0 { - ra = sa / math.Sin(sa) - } - rb := float64(1) - if sb := float64(c.Distance(a)); sb != 0 { - rb = sb / math.Sin(sb) - } - rc := float64(1) - if sc := float64(a.Distance(b)); sc != 0 { - rc = sc / math.Sin(sc) - } - - // Now compute a point M such that: - // - // [Ax Ay Az] [Mx] [ra] - // [Bx By Bz] [My] = 0.5 * det(A,B,C) * [rb] - // [Cx Cy Cz] [Mz] [rc] - // - // To improve the numerical stability we subtract the first row (A) from the - // other two rows; this reduces the cancellation error when A, B, and C are - // very close together. Then we solve it using Cramer's rule. - // - // The result is the true centroid of the triangle multiplied by the - // triangle's area. - // - // This code still isn't as numerically stable as it could be. - // The biggest potential improvement is to compute B-A and C-A more - // accurately so that (B-A)x(C-A) is always inside triangle ABC. - x := r3.Vector{a.X, b.X - a.X, c.X - a.X} - y := r3.Vector{a.Y, b.Y - a.Y, c.Y - a.Y} - z := r3.Vector{a.Z, b.Z - a.Z, c.Z - a.Z} - r := r3.Vector{ra, rb - ra, rc - ra} - - return Point{r3.Vector{y.Cross(z).Dot(r), z.Cross(x).Dot(r), x.Cross(y).Dot(r)}.Mul(0.5)} -} - -// EdgeTrueCentroid returns the true centroid of the spherical geodesic edge AB -// multiplied by the length of the edge AB. As with triangles, the true centroid -// of a collection of line segments may be computed simply by summing the result -// of this method for each segment. -// -// Note that the planar centroid of a line segment is simply 0.5 * (a + b), -// while the surface centroid is (a + b).Normalize(). However neither of -// these values is appropriate for computing the centroid of a collection of -// edges (such as a polyline). -// -// Also note that the result of this function is defined to be Point(0, 0, 0) -// if the edge is degenerate. -func EdgeTrueCentroid(a, b Point) Point { - // The centroid (multiplied by length) is a vector toward the midpoint - // of the edge, whose length is twice the sine of half the angle between - // the two vertices. Defining theta to be this angle, we have: - vDiff := a.Sub(b.Vector) // Length == 2*sin(theta) - vSum := a.Add(b.Vector) // Length == 2*cos(theta) - sin2 := vDiff.Norm2() - cos2 := vSum.Norm2() - if cos2 == 0 { - return Point{} // Ignore antipodal edges. - } - return Point{vSum.Mul(math.Sqrt(sin2 / cos2))} // Length == 2*sin(theta) -} - -// PlanarCentroid returns the centroid of the planar triangle ABC. This can be -// normalized to unit length to obtain the "surface centroid" of the corresponding -// spherical triangle, i.e. the intersection of the three medians. However, note -// that for large spherical triangles the surface centroid may be nowhere near -// the intuitive "center". -func PlanarCentroid(a, b, c Point) Point { - return Point{a.Add(b.Vector).Add(c.Vector).Mul(1. / 3)} -} diff --git a/vendor/github.com/golang/geo/s2/contains_point_query.go b/vendor/github.com/golang/geo/s2/contains_point_query.go deleted file mode 100644 index 3026f3601..000000000 --- a/vendor/github.com/golang/geo/s2/contains_point_query.go +++ /dev/null @@ -1,190 +0,0 @@ -// Copyright 2018 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package s2 - -// VertexModel defines whether shapes are considered to contain their vertices. -// Note that these definitions differ from the ones used by BooleanOperation. -// -// Note that points other than vertices are never contained by polylines. -// If you want need this behavior, use ClosestEdgeQuery's IsDistanceLess -// with a suitable distance threshold instead. -type VertexModel int - -const ( - // VertexModelOpen means no shapes contain their vertices (not even - // points). Therefore Contains(Point) returns true if and only if the - // point is in the interior of some polygon. - VertexModelOpen VertexModel = iota - - // VertexModelSemiOpen means that polygon point containment is defined - // such that if several polygons tile the region around a vertex, then - // exactly one of those polygons contains that vertex. Points and - // polylines still do not contain any vertices. - VertexModelSemiOpen - - // VertexModelClosed means all shapes contain their vertices (including - // points and polylines). - VertexModelClosed -) - -// ContainsPointQuery determines whether one or more shapes in a ShapeIndex -// contain a given Point. The ShapeIndex may contain any number of points, -// polylines, and/or polygons (possibly overlapping). Shape boundaries may be -// modeled as Open, SemiOpen, or Closed (this affects whether or not shapes are -// considered to contain their vertices). -// -// This type is not safe for concurrent use. -// -// However, note that if you need to do a large number of point containment -// tests, it is more efficient to re-use the query rather than creating a new -// one each time. -type ContainsPointQuery struct { - model VertexModel - index *ShapeIndex - iter *ShapeIndexIterator -} - -// NewContainsPointQuery creates a new instance of the ContainsPointQuery for the index -// and given vertex model choice. -func NewContainsPointQuery(index *ShapeIndex, model VertexModel) *ContainsPointQuery { - return &ContainsPointQuery{ - index: index, - model: model, - iter: index.Iterator(), - } -} - -// Contains reports whether any shape in the queries index contains the point p -// under the queries vertex model (Open, SemiOpen, or Closed). -func (q *ContainsPointQuery) Contains(p Point) bool { - if !q.iter.LocatePoint(p) { - return false - } - - cell := q.iter.IndexCell() - for _, clipped := range cell.shapes { - if q.shapeContains(clipped, q.iter.Center(), p) { - return true - } - } - return false -} - -// shapeContains reports whether the clippedShape from the iterator's center position contains -// the given point. -func (q *ContainsPointQuery) shapeContains(clipped *clippedShape, center, p Point) bool { - inside := clipped.containsCenter - numEdges := clipped.numEdges() - if numEdges <= 0 { - return inside - } - - shape := q.index.Shape(clipped.shapeID) - if shape.Dimension() != 2 { - // Points and polylines can be ignored unless the vertex model is Closed. - if q.model != VertexModelClosed { - return false - } - - // Otherwise, the point is contained if and only if it matches a vertex. - for _, edgeID := range clipped.edges { - edge := shape.Edge(edgeID) - if edge.V0 == p || edge.V1 == p { - return true - } - } - return false - } - - // Test containment by drawing a line segment from the cell center to the - // given point and counting edge crossings. - crosser := NewEdgeCrosser(center, p) - for _, edgeID := range clipped.edges { - edge := shape.Edge(edgeID) - sign := crosser.CrossingSign(edge.V0, edge.V1) - if sign == DoNotCross { - continue - } - if sign == MaybeCross { - // For the Open and Closed models, check whether p is a vertex. - if q.model != VertexModelSemiOpen && (edge.V0 == p || edge.V1 == p) { - return (q.model == VertexModelClosed) - } - // C++ plays fast and loose with the int <-> bool conversions here. - if VertexCrossing(crosser.a, crosser.b, edge.V0, edge.V1) { - sign = Cross - } else { - sign = DoNotCross - } - } - inside = inside != (sign == Cross) - } - - return inside -} - -// ShapeContains reports whether the given shape contains the point under this -// queries vertex model (Open, SemiOpen, or Closed). -// -// This requires the shape belongs to this queries index. -func (q *ContainsPointQuery) ShapeContains(shape Shape, p Point) bool { - if !q.iter.LocatePoint(p) { - return false - } - - clipped := q.iter.IndexCell().findByShapeID(q.index.idForShape(shape)) - if clipped == nil { - return false - } - return q.shapeContains(clipped, q.iter.Center(), p) -} - -// shapeVisitorFunc is a type of function that can be called against shaped in an index. -type shapeVisitorFunc func(shape Shape) bool - -// visitContainingShapes visits all shapes in the given index that contain the -// given point p, terminating early if the given visitor function returns false, -// in which case visitContainingShapes returns false. Each shape is -// visited at most once. -func (q *ContainsPointQuery) visitContainingShapes(p Point, f shapeVisitorFunc) bool { - // This function returns false only if the algorithm terminates early - // because the visitor function returned false. - if !q.iter.LocatePoint(p) { - return true - } - - cell := q.iter.IndexCell() - for _, clipped := range cell.shapes { - if q.shapeContains(clipped, q.iter.Center(), p) && - !f(q.index.Shape(clipped.shapeID)) { - return false - } - } - return true -} - -// ContainingShapes returns a slice of all shapes that contain the given point. -func (q *ContainsPointQuery) ContainingShapes(p Point) []Shape { - var shapes []Shape - q.visitContainingShapes(p, func(shape Shape) bool { - shapes = append(shapes, shape) - return true - }) - return shapes -} - -// TODO(roberts): Remaining methods from C++ -// type edgeVisitorFunc func(shape ShapeEdge) bool -// func (q *ContainsPointQuery) visitIncidentEdges(p Point, v edgeVisitorFunc) bool diff --git a/vendor/github.com/golang/geo/s2/contains_vertex_query.go b/vendor/github.com/golang/geo/s2/contains_vertex_query.go deleted file mode 100644 index 8e74f9e5b..000000000 --- a/vendor/github.com/golang/geo/s2/contains_vertex_query.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright 2017 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package s2 - -// ContainsVertexQuery is used to track the edges entering and leaving the -// given vertex of a Polygon in order to be able to determine if the point is -// contained by the Polygon. -// -// Point containment is defined according to the semi-open boundary model -// which means that if several polygons tile the region around a vertex, -// then exactly one of those polygons contains that vertex. -type ContainsVertexQuery struct { - target Point - edgeMap map[Point]int -} - -// NewContainsVertexQuery returns a new query for the given vertex whose -// containment will be determined. -func NewContainsVertexQuery(target Point) *ContainsVertexQuery { - return &ContainsVertexQuery{ - target: target, - edgeMap: make(map[Point]int), - } -} - -// AddEdge adds the edge between target and v with the given direction. -// (+1 = outgoing, -1 = incoming, 0 = degenerate). -func (q *ContainsVertexQuery) AddEdge(v Point, direction int) { - q.edgeMap[v] += direction -} - -// ContainsVertex reports a +1 if the target vertex is contained, -1 if it is -// not contained, and 0 if the incident edges consisted of matched sibling pairs. -func (q *ContainsVertexQuery) ContainsVertex() int { - // Find the unmatched edge that is immediately clockwise from Ortho(P). - referenceDir := Point{q.target.Ortho()} - - bestPoint := referenceDir - bestDir := 0 - - for k, v := range q.edgeMap { - if v == 0 { - continue // This is a "matched" edge. - } - if OrderedCCW(referenceDir, bestPoint, k, q.target) { - bestPoint = k - bestDir = v - } - } - return bestDir -} diff --git a/vendor/github.com/golang/geo/s2/convex_hull_query.go b/vendor/github.com/golang/geo/s2/convex_hull_query.go deleted file mode 100644 index 68539abb1..000000000 --- a/vendor/github.com/golang/geo/s2/convex_hull_query.go +++ /dev/null @@ -1,258 +0,0 @@ -// Copyright 2018 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package s2 - -import ( - "sort" - - "github.com/golang/geo/r3" -) - -// ConvexHullQuery builds the convex hull of any collection of points, -// polylines, loops, and polygons. It returns a single convex loop. -// -// The convex hull is defined as the smallest convex region on the sphere that -// contains all of your input geometry. Recall that a region is "convex" if -// for every pair of points inside the region, the straight edge between them -// is also inside the region. In our case, a "straight" edge is a geodesic, -// i.e. the shortest path on the sphere between two points. -// -// Containment of input geometry is defined as follows: -// -// - Each input loop and polygon is contained by the convex hull exactly -// (i.e., according to Polygon's Contains(Polygon)). -// -// - Each input point is either contained by the convex hull or is a vertex -// of the convex hull. (Recall that S2Loops do not necessarily contain their -// vertices.) -// -// - For each input polyline, the convex hull contains all of its vertices -// according to the rule for points above. (The definition of convexity -// then ensures that the convex hull also contains the polyline edges.) -// -// To use this type, call the various Add... methods to add your input geometry, and -// then call ConvexHull. Note that ConvexHull does *not* reset the -// state; you can continue adding geometry if desired and compute the convex -// hull again. If you want to start from scratch, simply create a new -// ConvexHullQuery value. -// -// This implement Andrew's monotone chain algorithm, which is a variant of the -// Graham scan (see https://en.wikipedia.org/wiki/Graham_scan). The time -// complexity is O(n log n), and the space required is O(n). In fact only the -// call to "sort" takes O(n log n) time; the rest of the algorithm is linear. -// -// Demonstration of the algorithm and code: -// en.wikibooks.org/wiki/Algorithm_Implementation/Geometry/Convex_hull/Monotone_chain -// -// This type is not safe for concurrent use. -type ConvexHullQuery struct { - bound Rect - points []Point -} - -// NewConvexHullQuery creates a new ConvexHullQuery. -func NewConvexHullQuery() *ConvexHullQuery { - return &ConvexHullQuery{ - bound: EmptyRect(), - } -} - -// AddPoint adds the given point to the input geometry. -func (q *ConvexHullQuery) AddPoint(p Point) { - q.bound = q.bound.AddPoint(LatLngFromPoint(p)) - q.points = append(q.points, p) -} - -// AddPolyline adds the given polyline to the input geometry. -func (q *ConvexHullQuery) AddPolyline(p *Polyline) { - q.bound = q.bound.Union(p.RectBound()) - q.points = append(q.points, (*p)...) -} - -// AddLoop adds the given loop to the input geometry. -func (q *ConvexHullQuery) AddLoop(l *Loop) { - q.bound = q.bound.Union(l.RectBound()) - if l.isEmptyOrFull() { - return - } - q.points = append(q.points, l.vertices...) -} - -// AddPolygon adds the given polygon to the input geometry. -func (q *ConvexHullQuery) AddPolygon(p *Polygon) { - q.bound = q.bound.Union(p.RectBound()) - for _, l := range p.loops { - // Only loops at depth 0 can contribute to the convex hull. - if l.depth == 0 { - q.AddLoop(l) - } - } -} - -// CapBound returns a bounding cap for the input geometry provided. -// -// Note that this method does not clear the geometry; you can continue -// adding to it and call this method again if desired. -func (q *ConvexHullQuery) CapBound() Cap { - // We keep track of a rectangular bound rather than a spherical cap because - // it is easy to compute a tight bound for a union of rectangles, whereas it - // is quite difficult to compute a tight bound around a union of caps. - // Also, polygons and polylines implement CapBound() in terms of - // RectBound() for this same reason, so it is much better to keep track - // of a rectangular bound as we go along and convert it at the end. - // - // TODO(roberts): We could compute an optimal bound by implementing Welzl's - // algorithm. However we would still need to have special handling of loops - // and polygons, since if a loop spans more than 180 degrees in any - // direction (i.e., if it contains two antipodal points), then it is not - // enough just to bound its vertices. In this case the only convex bounding - // cap is FullCap(), and the only convex bounding loop is the full loop. - return q.bound.CapBound() -} - -// ConvexHull returns a Loop representing the convex hull of the input geometry provided. -// -// If there is no geometry, this method returns an empty loop containing no -// points. -// -// If the geometry spans more than half of the sphere, this method returns a -// full loop containing the entire sphere. -// -// If the geometry contains 1 or 2 points, or a single edge, this method -// returns a very small loop consisting of three vertices (which are a -// superset of the input vertices). -// -// Note that this method does not clear the geometry; you can continue -// adding to the query and call this method again. -func (q *ConvexHullQuery) ConvexHull() *Loop { - c := q.CapBound() - if c.Height() >= 1 { - // The bounding cap is not convex. The current bounding cap - // implementation is not optimal, but nevertheless it is likely that the - // input geometry itself is not contained by any convex polygon. In any - // case, we need a convex bounding cap to proceed with the algorithm below - // (in order to construct a point "origin" that is definitely outside the - // convex hull). - return FullLoop() - } - - // Remove duplicates. We need to do this before checking whether there are - // fewer than 3 points. - x := make(map[Point]bool) - r, w := 0, 0 // read/write indexes - for ; r < len(q.points); r++ { - if x[q.points[r]] { - continue - } - q.points[w] = q.points[r] - x[q.points[r]] = true - w++ - } - q.points = q.points[:w] - - // This code implements Andrew's monotone chain algorithm, which is a simple - // variant of the Graham scan. Rather than sorting by x-coordinate, instead - // we sort the points in CCW order around an origin O such that all points - // are guaranteed to be on one side of some geodesic through O. This - // ensures that as we scan through the points, each new point can only - // belong at the end of the chain (i.e., the chain is monotone in terms of - // the angle around O from the starting point). - origin := Point{c.Center().Ortho()} - sort.Slice(q.points, func(i, j int) bool { - return RobustSign(origin, q.points[i], q.points[j]) == CounterClockwise - }) - - // Special cases for fewer than 3 points. - switch len(q.points) { - case 0: - return EmptyLoop() - case 1: - return singlePointLoop(q.points[0]) - case 2: - return singleEdgeLoop(q.points[0], q.points[1]) - } - - // Generate the lower and upper halves of the convex hull. Each half - // consists of the maximal subset of vertices such that the edge chain - // makes only left (CCW) turns. - lower := q.monotoneChain() - - // reverse the points - for left, right := 0, len(q.points)-1; left < right; left, right = left+1, right-1 { - q.points[left], q.points[right] = q.points[right], q.points[left] - } - upper := q.monotoneChain() - - // Remove the duplicate vertices and combine the chains. - lower = lower[:len(lower)-1] - upper = upper[:len(upper)-1] - lower = append(lower, upper...) - - return LoopFromPoints(lower) -} - -// monotoneChain iterates through the points, selecting the maximal subset of points -// such that the edge chain makes only left (CCW) turns. -func (q *ConvexHullQuery) monotoneChain() []Point { - var output []Point - for _, p := range q.points { - // Remove any points that would cause the chain to make a clockwise turn. - for len(output) >= 2 && RobustSign(output[len(output)-2], output[len(output)-1], p) != CounterClockwise { - output = output[:len(output)-1] - } - output = append(output, p) - } - return output -} - -// singlePointLoop constructs a 3-vertex polygon consisting of "p" and two nearby -// vertices. Note that ContainsPoint(p) may be false for the resulting loop. -func singlePointLoop(p Point) *Loop { - const offset = 1e-15 - d0 := p.Ortho() - d1 := p.Cross(d0) - vertices := []Point{ - p, - {p.Add(d0.Mul(offset)).Normalize()}, - {p.Add(d1.Mul(offset)).Normalize()}, - } - return LoopFromPoints(vertices) -} - -// singleEdgeLoop constructs a loop consisting of the two vertices and their midpoint. -func singleEdgeLoop(a, b Point) *Loop { - // If the points are exactly antipodal we return the full loop. - // - // Note that we could use the code below even in this case (which would - // return a zero-area loop that follows the edge AB), except that (1) the - // direction of AB is defined using symbolic perturbations and therefore is - // not predictable by ordinary users, and (2) Loop disallows anitpodal - // adjacent vertices and so we would need to use 4 vertices to define the - // degenerate loop. (Note that the Loop antipodal vertex restriction is - // historical and now could easily be removed, however it would still have - // the problem that the edge direction is not easily predictable.) - if a.Add(b.Vector) == (r3.Vector{}) { - return FullLoop() - } - - // Construct a loop consisting of the two vertices and their midpoint. We - // use Interpolate() to ensure that the midpoint is very close to - // the edge even when its endpoints nearly antipodal. - vertices := []Point{a, b, Interpolate(0.5, a, b)} - loop := LoopFromPoints(vertices) - // The resulting loop may be clockwise, so invert it if necessary. - loop.Normalize() - return loop -} diff --git a/vendor/github.com/golang/geo/s2/crossing_edge_query.go b/vendor/github.com/golang/geo/s2/crossing_edge_query.go deleted file mode 100644 index 51852dab4..000000000 --- a/vendor/github.com/golang/geo/s2/crossing_edge_query.go +++ /dev/null @@ -1,409 +0,0 @@ -// Copyright 2017 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package s2 - -import ( - "sort" - - "github.com/golang/geo/r2" -) - -// CrossingEdgeQuery is used to find the Edge IDs of Shapes that are crossed by -// a given edge(s). -// -// Note that if you need to query many edges, it is more efficient to declare -// a single CrossingEdgeQuery instance and reuse it. -// -// If you want to find *all* the pairs of crossing edges, it is more efficient to -// use the not yet implemented VisitCrossings in shapeutil. -type CrossingEdgeQuery struct { - index *ShapeIndex - - // temporary values used while processing a query. - a, b r2.Point - iter *ShapeIndexIterator - - // candidate cells generated when finding crossings. - cells []*ShapeIndexCell -} - -// NewCrossingEdgeQuery creates a CrossingEdgeQuery for the given index. -func NewCrossingEdgeQuery(index *ShapeIndex) *CrossingEdgeQuery { - c := &CrossingEdgeQuery{ - index: index, - iter: index.Iterator(), - } - return c -} - -// Crossings returns the set of edge of the shape S that intersect the given edge AB. -// If the CrossingType is Interior, then only intersections at a point interior to both -// edges are reported, while if it is CrossingTypeAll then edges that share a vertex -// are also reported. -func (c *CrossingEdgeQuery) Crossings(a, b Point, shape Shape, crossType CrossingType) []int { - edges := c.candidates(a, b, shape) - if len(edges) == 0 { - return nil - } - - crosser := NewEdgeCrosser(a, b) - out := 0 - n := len(edges) - - for in := 0; in < n; in++ { - b := shape.Edge(edges[in]) - sign := crosser.CrossingSign(b.V0, b.V1) - if crossType == CrossingTypeAll && (sign == MaybeCross || sign == Cross) || crossType != CrossingTypeAll && sign == Cross { - edges[out] = edges[in] - out++ - } - } - - if out < n { - edges = edges[0:out] - } - return edges -} - -// EdgeMap stores a sorted set of edge ids for each shape. -type EdgeMap map[Shape][]int - -// CrossingsEdgeMap returns the set of all edges in the index that intersect the given -// edge AB. If crossType is CrossingTypeInterior, then only intersections at a -// point interior to both edges are reported, while if it is CrossingTypeAll -// then edges that share a vertex are also reported. -// -// The edges are returned as a mapping from shape to the edges of that shape -// that intersect AB. Every returned shape has at least one crossing edge. -func (c *CrossingEdgeQuery) CrossingsEdgeMap(a, b Point, crossType CrossingType) EdgeMap { - edgeMap := c.candidatesEdgeMap(a, b) - if len(edgeMap) == 0 { - return nil - } - - crosser := NewEdgeCrosser(a, b) - for shape, edges := range edgeMap { - out := 0 - n := len(edges) - for in := 0; in < n; in++ { - edge := shape.Edge(edges[in]) - sign := crosser.CrossingSign(edge.V0, edge.V1) - if (crossType == CrossingTypeAll && (sign == MaybeCross || sign == Cross)) || (crossType != CrossingTypeAll && sign == Cross) { - edgeMap[shape][out] = edges[in] - out++ - } - } - - if out == 0 { - delete(edgeMap, shape) - } else { - if out < n { - edgeMap[shape] = edgeMap[shape][0:out] - } - } - } - return edgeMap -} - -// candidates returns a superset of the edges of the given shape that intersect -// the edge AB. -func (c *CrossingEdgeQuery) candidates(a, b Point, shape Shape) []int { - var edges []int - - // For small loops it is faster to use brute force. The threshold below was - // determined using benchmarks. - const maxBruteForceEdges = 27 - maxEdges := shape.NumEdges() - if maxEdges <= maxBruteForceEdges { - edges = make([]int, maxEdges) - for i := 0; i < maxEdges; i++ { - edges[i] = i - } - return edges - } - - // Compute the set of index cells intersected by the query edge. - c.getCellsForEdge(a, b) - if len(c.cells) == 0 { - return nil - } - - // Gather all the edges that intersect those cells and sort them. - // TODO(roberts): Shapes don't track their ID, so we need to range over - // the index to find the ID manually. - var shapeID int32 - for k, v := range c.index.shapes { - if v == shape { - shapeID = k - } - } - - for _, cell := range c.cells { - if cell == nil { - continue - } - clipped := cell.findByShapeID(shapeID) - if clipped == nil { - continue - } - edges = append(edges, clipped.edges...) - } - - if len(c.cells) > 1 { - edges = uniqueInts(edges) - } - - return edges -} - -// uniqueInts returns the sorted uniqued values from the given input. -func uniqueInts(in []int) []int { - var edges []int - m := make(map[int]bool) - for _, i := range in { - if m[i] { - continue - } - m[i] = true - edges = append(edges, i) - } - sort.Ints(edges) - return edges -} - -// candidatesEdgeMap returns a map from shapes to the superse of edges for that -// shape that intersect the edge AB. -// -// CAVEAT: This method may return shapes that have an empty set of candidate edges. -// However the return value is non-empty only if at least one shape has a candidate edge. -func (c *CrossingEdgeQuery) candidatesEdgeMap(a, b Point) EdgeMap { - edgeMap := make(EdgeMap) - - // If there are only a few edges then it's faster to use brute force. We - // only bother with this optimization when there is a single shape. - if len(c.index.shapes) == 1 { - // Typically this method is called many times, so it is worth checking - // whether the edge map is empty or already consists of a single entry for - // this shape, and skip clearing edge map in that case. - shape := c.index.Shape(0) - - // Note that we leave the edge map non-empty even if there are no candidates - // (i.e., there is a single entry with an empty set of edges). - edgeMap[shape] = c.candidates(a, b, shape) - return edgeMap - } - - // Compute the set of index cells intersected by the query edge. - c.getCellsForEdge(a, b) - if len(c.cells) == 0 { - return edgeMap - } - - // Gather all the edges that intersect those cells and sort them. - for _, cell := range c.cells { - for _, clipped := range cell.shapes { - s := c.index.Shape(clipped.shapeID) - for j := 0; j < clipped.numEdges(); j++ { - edgeMap[s] = append(edgeMap[s], clipped.edges[j]) - } - } - } - - if len(c.cells) > 1 { - for s, edges := range edgeMap { - edgeMap[s] = uniqueInts(edges) - } - } - - return edgeMap -} - -// getCells returns the set of ShapeIndexCells that might contain edges intersecting -// the edge AB in the given cell root. This method is used primarily by loop and shapeutil. -func (c *CrossingEdgeQuery) getCells(a, b Point, root *PaddedCell) []*ShapeIndexCell { - aUV, bUV, ok := ClipToFace(a, b, root.id.Face()) - if ok { - c.a = aUV - c.b = bUV - edgeBound := r2.RectFromPoints(c.a, c.b) - if root.Bound().Intersects(edgeBound) { - c.computeCellsIntersected(root, edgeBound) - } - } - - if len(c.cells) == 0 { - return nil - } - - return c.cells -} - -// getCellsForEdge populates the cells field to the set of index cells intersected by an edge AB. -func (c *CrossingEdgeQuery) getCellsForEdge(a, b Point) { - c.cells = nil - - segments := FaceSegments(a, b) - for _, segment := range segments { - c.a = segment.a - c.b = segment.b - - // Optimization: rather than always starting the recursive subdivision at - // the top level face cell, instead we start at the smallest S2CellId that - // contains the edge (the edge root cell). This typically lets us skip - // quite a few levels of recursion since most edges are short. - edgeBound := r2.RectFromPoints(c.a, c.b) - pcell := PaddedCellFromCellID(CellIDFromFace(segment.face), 0) - edgeRoot := pcell.ShrinkToFit(edgeBound) - - // Now we need to determine how the edge root cell is related to the cells - // in the spatial index (cellMap). There are three cases: - // - // 1. edgeRoot is an index cell or is contained within an index cell. - // In this case we only need to look at the contents of that cell. - // 2. edgeRoot is subdivided into one or more index cells. In this case - // we recursively subdivide to find the cells intersected by AB. - // 3. edgeRoot does not intersect any index cells. In this case there - // is nothing to do. - relation := c.iter.LocateCellID(edgeRoot) - if relation == Indexed { - // edgeRoot is an index cell or is contained by an index cell (case 1). - c.cells = append(c.cells, c.iter.IndexCell()) - } else if relation == Subdivided { - // edgeRoot is subdivided into one or more index cells (case 2). We - // find the cells intersected by AB using recursive subdivision. - if !edgeRoot.isFace() { - pcell = PaddedCellFromCellID(edgeRoot, 0) - } - c.computeCellsIntersected(pcell, edgeBound) - } - } -} - -// computeCellsIntersected computes the index cells intersected by the current -// edge that are descendants of pcell and adds them to this queries set of cells. -func (c *CrossingEdgeQuery) computeCellsIntersected(pcell *PaddedCell, edgeBound r2.Rect) { - - c.iter.seek(pcell.id.RangeMin()) - if c.iter.Done() || c.iter.CellID() > pcell.id.RangeMax() { - // The index does not contain pcell or any of its descendants. - return - } - if c.iter.CellID() == pcell.id { - // The index contains this cell exactly. - c.cells = append(c.cells, c.iter.IndexCell()) - return - } - - // Otherwise, split the edge among the four children of pcell. - center := pcell.Middle().Lo() - - if edgeBound.X.Hi < center.X { - // Edge is entirely contained in the two left children. - c.clipVAxis(edgeBound, center.Y, 0, pcell) - return - } else if edgeBound.X.Lo >= center.X { - // Edge is entirely contained in the two right children. - c.clipVAxis(edgeBound, center.Y, 1, pcell) - return - } - - childBounds := c.splitUBound(edgeBound, center.X) - if edgeBound.Y.Hi < center.Y { - // Edge is entirely contained in the two lower children. - c.computeCellsIntersected(PaddedCellFromParentIJ(pcell, 0, 0), childBounds[0]) - c.computeCellsIntersected(PaddedCellFromParentIJ(pcell, 1, 0), childBounds[1]) - } else if edgeBound.Y.Lo >= center.Y { - // Edge is entirely contained in the two upper children. - c.computeCellsIntersected(PaddedCellFromParentIJ(pcell, 0, 1), childBounds[0]) - c.computeCellsIntersected(PaddedCellFromParentIJ(pcell, 1, 1), childBounds[1]) - } else { - // The edge bound spans all four children. The edge itself intersects - // at most three children (since no padding is being used). - c.clipVAxis(childBounds[0], center.Y, 0, pcell) - c.clipVAxis(childBounds[1], center.Y, 1, pcell) - } -} - -// clipVAxis computes the intersected cells recursively for a given padded cell. -// Given either the left (i=0) or right (i=1) side of a padded cell pcell, -// determine whether the current edge intersects the lower child, upper child, -// or both children, and call c.computeCellsIntersected recursively on those children. -// The center is the v-coordinate at the center of pcell. -func (c *CrossingEdgeQuery) clipVAxis(edgeBound r2.Rect, center float64, i int, pcell *PaddedCell) { - if edgeBound.Y.Hi < center { - // Edge is entirely contained in the lower child. - c.computeCellsIntersected(PaddedCellFromParentIJ(pcell, i, 0), edgeBound) - } else if edgeBound.Y.Lo >= center { - // Edge is entirely contained in the upper child. - c.computeCellsIntersected(PaddedCellFromParentIJ(pcell, i, 1), edgeBound) - } else { - // The edge intersects both children. - childBounds := c.splitVBound(edgeBound, center) - c.computeCellsIntersected(PaddedCellFromParentIJ(pcell, i, 0), childBounds[0]) - c.computeCellsIntersected(PaddedCellFromParentIJ(pcell, i, 1), childBounds[1]) - } -} - -// splitUBound returns the bound for two children as a result of spliting the -// current edge at the given value U. -func (c *CrossingEdgeQuery) splitUBound(edgeBound r2.Rect, u float64) [2]r2.Rect { - v := edgeBound.Y.ClampPoint(interpolateFloat64(u, c.a.X, c.b.X, c.a.Y, c.b.Y)) - // diag indicates which diagonal of the bounding box is spanned by AB: - // it is 0 if AB has positive slope, and 1 if AB has negative slope. - var diag int - if (c.a.X > c.b.X) != (c.a.Y > c.b.Y) { - diag = 1 - } - return splitBound(edgeBound, 0, diag, u, v) -} - -// splitVBound returns the bound for two children as a result of spliting the -// current edge into two child edges at the given value V. -func (c *CrossingEdgeQuery) splitVBound(edgeBound r2.Rect, v float64) [2]r2.Rect { - u := edgeBound.X.ClampPoint(interpolateFloat64(v, c.a.Y, c.b.Y, c.a.X, c.b.X)) - var diag int - if (c.a.X > c.b.X) != (c.a.Y > c.b.Y) { - diag = 1 - } - return splitBound(edgeBound, diag, 0, u, v) -} - -// splitBound returns the bounds for the two childrenn as a result of spliting -// the current edge into two child edges at the given point (u,v). uEnd and vEnd -// indicate which bound endpoints of the first child will be updated. -func splitBound(edgeBound r2.Rect, uEnd, vEnd int, u, v float64) [2]r2.Rect { - var childBounds = [2]r2.Rect{ - edgeBound, - edgeBound, - } - - if uEnd == 1 { - childBounds[0].X.Lo = u - childBounds[1].X.Hi = u - } else { - childBounds[0].X.Hi = u - childBounds[1].X.Lo = u - } - - if vEnd == 1 { - childBounds[0].Y.Lo = v - childBounds[1].Y.Hi = v - } else { - childBounds[0].Y.Hi = v - childBounds[1].Y.Lo = v - } - - return childBounds -} diff --git a/vendor/github.com/golang/geo/s2/distance_target.go b/vendor/github.com/golang/geo/s2/distance_target.go deleted file mode 100644 index 066bbacfa..000000000 --- a/vendor/github.com/golang/geo/s2/distance_target.go +++ /dev/null @@ -1,149 +0,0 @@ -// Copyright 2019 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package s2 - -import ( - "github.com/golang/geo/s1" -) - -// The distance interface represents a set of common methods used by algorithms -// that compute distances between various S2 types. -type distance interface { - // chordAngle returns this type as a ChordAngle. - chordAngle() s1.ChordAngle - - // fromChordAngle is used to type convert a ChordAngle to this type. - // This is to work around needing to be clever in parts of the code - // where a distanceTarget interface method expects distances, but the - // user only supplies a ChordAngle, and we need to dynamically cast it - // to an appropriate distance interface types. - fromChordAngle(o s1.ChordAngle) distance - - // zero returns a zero distance. - zero() distance - // negative returns a value smaller than any valid value. - negative() distance - // infinity returns a value larger than any valid value. - infinity() distance - - // less is similar to the Less method in Sort. To get minimum values, - // this would be a less than type operation. For maximum, this would - // be a greater than type operation. - less(other distance) bool - - // sub subtracts the other value from this one and returns the new value. - // This is done as a method and not simple mathematical operation to - // allow closest and furthest to implement this in opposite ways. - sub(other distance) distance - - // chordAngleBound reports the upper bound on a ChordAngle corresponding - // to this distance. For example, if distance measures WGS84 ellipsoid - // distance then the corresponding angle needs to be 0.56% larger. - chordAngleBound() s1.ChordAngle - - // updateDistance may update the value this distance represents - // based on the given input. The updated value and a boolean reporting - // if the value was changed are returned. - updateDistance(other distance) (distance, bool) -} - -// distanceTarget is an interface that represents a geometric type to which distances -// are measured. -// -// For example, there are implementations that measure distances to a Point, -// an Edge, a Cell, a CellUnion, and even to an arbitrary collection of geometry -// stored in ShapeIndex. -// -// The distanceTarget types are provided for the benefit of types that measure -// distances and/or find nearby geometry, such as ClosestEdgeQuery, FurthestEdgeQuery, -// ClosestPointQuery, and ClosestCellQuery, etc. -type distanceTarget interface { - // capBound returns a Cap that bounds the set of points whose distance to the - // target is distance.zero(). - capBound() Cap - - // updateDistanceToPoint updates the distance if the distance to - // the point P is within than the given dist. - // The boolean reports if the value was updated. - updateDistanceToPoint(p Point, dist distance) (distance, bool) - - // updateDistanceToEdge updates the distance if the distance to - // the edge E is within than the given dist. - // The boolean reports if the value was updated. - updateDistanceToEdge(e Edge, dist distance) (distance, bool) - - // updateDistanceToCell updates the distance if the distance to the cell C - // (including its interior) is within than the given dist. - // The boolean reports if the value was updated. - updateDistanceToCell(c Cell, dist distance) (distance, bool) - - // setMaxError potentially updates the value of MaxError, and reports if - // the specific type supports altering it. Whenever one of the - // updateDistanceTo... methods above returns true, the returned distance - // is allowed to be up to maxError larger than the true minimum distance. - // In other words, it gives this target object permission to terminate its - // distance calculation as soon as it has determined that (1) the minimum - // distance is less than minDist and (2) the best possible further - // improvement is less than maxError. - // - // If the target takes advantage of maxError to optimize its distance - // calculation, this method must return true. (Most target types will - // default to return false.) - setMaxError(maxErr s1.ChordAngle) bool - - // maxBruteForceIndexSize reports the maximum number of indexed objects for - // which it is faster to compute the distance by brute force (e.g., by testing - // every edge) rather than by using an index. - // - // The following method is provided as a convenience for types that compute - // distances to a collection of indexed geometry, such as ClosestEdgeQuery - // and ClosestPointQuery. - // - // Types that do not support this should return a -1. - maxBruteForceIndexSize() int - - // distance returns an instance of the underlying distance type this - // target uses. This is to work around the use of Templates in the C++. - distance() distance - - // visitContainingShapes finds all polygons in the given index that - // completely contain a connected component of the target geometry. (For - // example, if the target consists of 10 points, this method finds - // polygons that contain any of those 10 points.) For each such polygon, - // the visit function is called with the Shape of the polygon along with - // a point of the target geometry that is contained by that polygon. - // - // Optionally, any polygon that intersects the target geometry may also be - // returned. In other words, this method returns all polygons that - // contain any connected component of the target, along with an arbitrary - // subset of the polygons that intersect the target. - // - // For example, suppose that the index contains two abutting polygons - // A and B. If the target consists of two points "a" contained by A and - // "b" contained by B, then both A and B are returned. But if the target - // consists of the edge "ab", then any subset of {A, B} could be returned - // (because both polygons intersect the target but neither one contains - // the edge "ab"). - // - // If the visit function returns false, this method terminates early and - // returns false as well. Otherwise returns true. - // - // NOTE(roberts): This method exists only for the purpose of implementing - // edgeQuery IncludeInteriors efficiently. - visitContainingShapes(index *ShapeIndex, v shapePointVisitorFunc) bool -} - -// shapePointVisitorFunc defines a type of function the visitContainingShapes can call. -type shapePointVisitorFunc func(containingShape Shape, targetPoint Point) bool diff --git a/vendor/github.com/golang/geo/s2/doc.go b/vendor/github.com/golang/geo/s2/doc.go deleted file mode 100644 index 43e7a6344..000000000 --- a/vendor/github.com/golang/geo/s2/doc.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2014 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package s2 is a library for working with geometry in S² (spherical geometry). - -Its related packages, parallel to this one, are s1 (operates on S¹), r1 (operates on ℝ¹), -r2 (operates on ℝ²) and r3 (operates on ℝ³). - -This package provides types and functions for the S2 cell hierarchy and coordinate systems. -The S2 cell hierarchy is a hierarchical decomposition of the surface of a unit sphere (S²) -into ``cells''; it is highly efficient, scales from continental size to under 1 cm² -and preserves spatial locality (nearby cells have close IDs). - -More information including an in-depth introduction to S2 can be found on the -S2 website https://s2geometry.io/ -*/ -package s2 diff --git a/vendor/github.com/golang/geo/s2/edge_clipping.go b/vendor/github.com/golang/geo/s2/edge_clipping.go deleted file mode 100644 index 57a53bf0f..000000000 --- a/vendor/github.com/golang/geo/s2/edge_clipping.go +++ /dev/null @@ -1,672 +0,0 @@ -// Copyright 2017 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package s2 - -// This file contains a collection of methods for: -// -// (1) Robustly clipping geodesic edges to the faces of the S2 biunit cube -// (see s2stuv), and -// -// (2) Robustly clipping 2D edges against 2D rectangles. -// -// These functions can be used to efficiently find the set of CellIDs that -// are intersected by a geodesic edge (e.g., see CrossingEdgeQuery). - -import ( - "math" - - "github.com/golang/geo/r1" - "github.com/golang/geo/r2" - "github.com/golang/geo/r3" -) - -const ( - // edgeClipErrorUVCoord is the maximum error in a u- or v-coordinate - // compared to the exact result, assuming that the points A and B are in - // the rectangle [-1,1]x[1,1] or slightly outside it (by 1e-10 or less). - edgeClipErrorUVCoord = 2.25 * dblEpsilon - - // edgeClipErrorUVDist is the maximum distance from a clipped point to - // the corresponding exact result. It is equal to the error in a single - // coordinate because at most one coordinate is subject to error. - edgeClipErrorUVDist = 2.25 * dblEpsilon - - // faceClipErrorRadians is the maximum angle between a returned vertex - // and the nearest point on the exact edge AB. It is equal to the - // maximum directional error in PointCross, plus the error when - // projecting points onto a cube face. - faceClipErrorRadians = 3 * dblEpsilon - - // faceClipErrorDist is the same angle expressed as a maximum distance - // in (u,v)-space. In other words, a returned vertex is at most this far - // from the exact edge AB projected into (u,v)-space. - faceClipErrorUVDist = 9 * dblEpsilon - - // faceClipErrorUVCoord is the maximum angle between a returned vertex - // and the nearest point on the exact edge AB expressed as the maximum error - // in an individual u- or v-coordinate. In other words, for each - // returned vertex there is a point on the exact edge AB whose u- and - // v-coordinates differ from the vertex by at most this amount. - faceClipErrorUVCoord = 9.0 * (1.0 / math.Sqrt2) * dblEpsilon - - // intersectsRectErrorUVDist is the maximum error when computing if a point - // intersects with a given Rect. If some point of AB is inside the - // rectangle by at least this distance, the result is guaranteed to be true; - // if all points of AB are outside the rectangle by at least this distance, - // the result is guaranteed to be false. This bound assumes that rect is - // a subset of the rectangle [-1,1]x[-1,1] or extends slightly outside it - // (e.g., by 1e-10 or less). - intersectsRectErrorUVDist = 3 * math.Sqrt2 * dblEpsilon -) - -// ClipToFace returns the (u,v) coordinates for the portion of the edge AB that -// intersects the given face, or false if the edge AB does not intersect. -// This method guarantees that the clipped vertices lie within the [-1,1]x[-1,1] -// cube face rectangle and are within faceClipErrorUVDist of the line AB, but -// the results may differ from those produced by FaceSegments. -func ClipToFace(a, b Point, face int) (aUV, bUV r2.Point, intersects bool) { - return ClipToPaddedFace(a, b, face, 0.0) -} - -// ClipToPaddedFace returns the (u,v) coordinates for the portion of the edge AB that -// intersects the given face, but rather than clipping to the square [-1,1]x[-1,1] -// in (u,v) space, this method clips to [-R,R]x[-R,R] where R=(1+padding). -// Padding must be non-negative. -func ClipToPaddedFace(a, b Point, f int, padding float64) (aUV, bUV r2.Point, intersects bool) { - // Fast path: both endpoints are on the given face. - if face(a.Vector) == f && face(b.Vector) == f { - au, av := validFaceXYZToUV(f, a.Vector) - bu, bv := validFaceXYZToUV(f, b.Vector) - return r2.Point{au, av}, r2.Point{bu, bv}, true - } - - // Convert everything into the (u,v,w) coordinates of the given face. Note - // that the cross product *must* be computed in the original (x,y,z) - // coordinate system because PointCross (unlike the mathematical cross - // product) can produce different results in different coordinate systems - // when one argument is a linear multiple of the other, due to the use of - // symbolic perturbations. - normUVW := pointUVW(faceXYZtoUVW(f, a.PointCross(b))) - aUVW := pointUVW(faceXYZtoUVW(f, a)) - bUVW := pointUVW(faceXYZtoUVW(f, b)) - - // Padding is handled by scaling the u- and v-components of the normal. - // Letting R=1+padding, this means that when we compute the dot product of - // the normal with a cube face vertex (such as (-1,-1,1)), we will actually - // compute the dot product with the scaled vertex (-R,-R,1). This allows - // methods such as intersectsFace, exitAxis, etc, to handle padding - // with no further modifications. - scaleUV := 1 + padding - scaledN := pointUVW{r3.Vector{X: scaleUV * normUVW.X, Y: scaleUV * normUVW.Y, Z: normUVW.Z}} - if !scaledN.intersectsFace() { - return aUV, bUV, false - } - - // TODO(roberts): This is a workaround for extremely small vectors where some - // loss of precision can occur in Normalize causing underflow. When PointCross - // is updated to work around this, this can be removed. - if math.Max(math.Abs(normUVW.X), math.Max(math.Abs(normUVW.Y), math.Abs(normUVW.Z))) < math.Ldexp(1, -511) { - normUVW = pointUVW{normUVW.Mul(math.Ldexp(1, 563))} - } - - normUVW = pointUVW{normUVW.Normalize()} - - aTan := pointUVW{normUVW.Cross(aUVW.Vector)} - bTan := pointUVW{bUVW.Cross(normUVW.Vector)} - - // As described in clipDestination, if the sum of the scores from clipping the two - // endpoints is 3 or more, then the segment does not intersect this face. - aUV, aScore := clipDestination(bUVW, aUVW, pointUVW{scaledN.Mul(-1)}, bTan, aTan, scaleUV) - bUV, bScore := clipDestination(aUVW, bUVW, scaledN, aTan, bTan, scaleUV) - - return aUV, bUV, aScore+bScore < 3 -} - -// ClipEdge returns the portion of the edge defined by AB that is contained by the -// given rectangle. If there is no intersection, false is returned and aClip and bClip -// are undefined. -func ClipEdge(a, b r2.Point, clip r2.Rect) (aClip, bClip r2.Point, intersects bool) { - // Compute the bounding rectangle of AB, clip it, and then extract the new - // endpoints from the clipped bound. - bound := r2.RectFromPoints(a, b) - if bound, intersects = clipEdgeBound(a, b, clip, bound); !intersects { - return aClip, bClip, false - } - ai := 0 - if a.X > b.X { - ai = 1 - } - aj := 0 - if a.Y > b.Y { - aj = 1 - } - - return bound.VertexIJ(ai, aj), bound.VertexIJ(1-ai, 1-aj), true -} - -// The three functions below (sumEqual, intersectsFace, intersectsOppositeEdges) -// all compare a sum (u + v) to a third value w. They are implemented in such a -// way that they produce an exact result even though all calculations are done -// with ordinary floating-point operations. Here are the principles on which these -// functions are based: -// -// A. If u + v < w in floating-point, then u + v < w in exact arithmetic. -// -// B. If u + v < w in exact arithmetic, then at least one of the following -// expressions is true in floating-point: -// u + v < w -// u < w - v -// v < w - u -// -// Proof: By rearranging terms and substituting ">" for "<", we can assume -// that all values are non-negative. Now clearly "w" is not the smallest -// value, so assume WLOG that "u" is the smallest. We want to show that -// u < w - v in floating-point. If v >= w/2, the calculation of w - v is -// exact since the result is smaller in magnitude than either input value, -// so the result holds. Otherwise we have u <= v < w/2 and w - v >= w/2 -// (even in floating point), so the result also holds. - -// sumEqual reports whether u + v == w exactly. -func sumEqual(u, v, w float64) bool { - return (u+v == w) && (u == w-v) && (v == w-u) -} - -// pointUVW represents a Point in (u,v,w) coordinate space of a cube face. -type pointUVW Point - -// intersectsFace reports whether a given directed line L intersects the cube face F. -// The line L is defined by its normal N in the (u,v,w) coordinates of F. -func (p pointUVW) intersectsFace() bool { - // L intersects the [-1,1]x[-1,1] square in (u,v) if and only if the dot - // products of N with the four corner vertices (-1,-1,1), (1,-1,1), (1,1,1), - // and (-1,1,1) do not all have the same sign. This is true exactly when - // |Nu| + |Nv| >= |Nw|. The code below evaluates this expression exactly. - u := math.Abs(p.X) - v := math.Abs(p.Y) - w := math.Abs(p.Z) - - // We only need to consider the cases where u or v is the smallest value, - // since if w is the smallest then both expressions below will have a - // positive LHS and a negative RHS. - return (v >= w-u) && (u >= w-v) -} - -// intersectsOppositeEdges reports whether a directed line L intersects two -// opposite edges of a cube face F. This includs the case where L passes -// exactly through a corner vertex of F. The directed line L is defined -// by its normal N in the (u,v,w) coordinates of F. -func (p pointUVW) intersectsOppositeEdges() bool { - // The line L intersects opposite edges of the [-1,1]x[-1,1] (u,v) square if - // and only exactly two of the corner vertices lie on each side of L. This - // is true exactly when ||Nu| - |Nv|| >= |Nw|. The code below evaluates this - // expression exactly. - u := math.Abs(p.X) - v := math.Abs(p.Y) - w := math.Abs(p.Z) - - // If w is the smallest, the following line returns an exact result. - if math.Abs(u-v) != w { - return math.Abs(u-v) >= w - } - - // Otherwise u - v = w exactly, or w is not the smallest value. In either - // case the following returns the correct result. - if u >= v { - return u-w >= v - } - return v-w >= u -} - -// axis represents the possible results of exitAxis. -type axis int - -const ( - axisU axis = iota - axisV -) - -// exitAxis reports which axis the directed line L exits the cube face F on. -// The directed line L is represented by its CCW normal N in the (u,v,w) coordinates -// of F. It returns axisU if L exits through the u=-1 or u=+1 edge, and axisV if L exits -// through the v=-1 or v=+1 edge. Either result is acceptable if L exits exactly -// through a corner vertex of the cube face. -func (p pointUVW) exitAxis() axis { - if p.intersectsOppositeEdges() { - // The line passes through through opposite edges of the face. - // It exits through the v=+1 or v=-1 edge if the u-component of N has a - // larger absolute magnitude than the v-component. - if math.Abs(p.X) >= math.Abs(p.Y) { - return axisV - } - return axisU - } - - // The line passes through through two adjacent edges of the face. - // It exits the v=+1 or v=-1 edge if an even number of the components of N - // are negative. We test this using signbit() rather than multiplication - // to avoid the possibility of underflow. - var x, y, z int - if math.Signbit(p.X) { - x = 1 - } - if math.Signbit(p.Y) { - y = 1 - } - if math.Signbit(p.Z) { - z = 1 - } - - if x^y^z == 0 { - return axisV - } - return axisU -} - -// exitPoint returns the UV coordinates of the point where a directed line L (represented -// by the CCW normal of this point), exits the cube face this point is derived from along -// the given axis. -func (p pointUVW) exitPoint(a axis) r2.Point { - if a == axisU { - u := -1.0 - if p.Y > 0 { - u = 1.0 - } - return r2.Point{u, (-u*p.X - p.Z) / p.Y} - } - - v := -1.0 - if p.X < 0 { - v = 1.0 - } - return r2.Point{(-v*p.Y - p.Z) / p.X, v} -} - -// clipDestination returns a score which is used to indicate if the clipped edge AB -// on the given face intersects the face at all. This function returns the score for -// the given endpoint, which is an integer ranging from 0 to 3. If the sum of the scores -// from both of the endpoints is 3 or more, then edge AB does not intersect this face. -// -// First, it clips the line segment AB to find the clipped destination B' on a given -// face. (The face is specified implicitly by expressing *all arguments* in the (u,v,w) -// coordinates of that face.) Second, it partially computes whether the segment AB -// intersects this face at all. The actual condition is fairly complicated, but it -// turns out that it can be expressed as a "score" that can be computed independently -// when clipping the two endpoints A and B. -func clipDestination(a, b, scaledN, aTan, bTan pointUVW, scaleUV float64) (r2.Point, int) { - var uv r2.Point - - // Optimization: if B is within the safe region of the face, use it. - maxSafeUVCoord := 1 - faceClipErrorUVCoord - if b.Z > 0 { - uv = r2.Point{b.X / b.Z, b.Y / b.Z} - if math.Max(math.Abs(uv.X), math.Abs(uv.Y)) <= maxSafeUVCoord { - return uv, 0 - } - } - - // Otherwise find the point B' where the line AB exits the face. - uv = scaledN.exitPoint(scaledN.exitAxis()).Mul(scaleUV) - - p := pointUVW(Point{r3.Vector{uv.X, uv.Y, 1.0}}) - - // Determine if the exit point B' is contained within the segment. We do this - // by computing the dot products with two inward-facing tangent vectors at A - // and B. If either dot product is negative, we say that B' is on the "wrong - // side" of that point. As the point B' moves around the great circle AB past - // the segment endpoint B, it is initially on the wrong side of B only; as it - // moves further it is on the wrong side of both endpoints; and then it is on - // the wrong side of A only. If the exit point B' is on the wrong side of - // either endpoint, we can't use it; instead the segment is clipped at the - // original endpoint B. - // - // We reject the segment if the sum of the scores of the two endpoints is 3 - // or more. Here is what that rule encodes: - // - If B' is on the wrong side of A, then the other clipped endpoint A' - // must be in the interior of AB (otherwise AB' would go the wrong way - // around the circle). There is a similar rule for A'. - // - If B' is on the wrong side of either endpoint (and therefore we must - // use the original endpoint B instead), then it must be possible to - // project B onto this face (i.e., its w-coordinate must be positive). - // This rule is only necessary to handle certain zero-length edges (A=B). - score := 0 - if p.Sub(a.Vector).Dot(aTan.Vector) < 0 { - score = 2 // B' is on wrong side of A. - } else if p.Sub(b.Vector).Dot(bTan.Vector) < 0 { - score = 1 // B' is on wrong side of B. - } - - if score > 0 { // B' is not in the interior of AB. - if b.Z <= 0 { - score = 3 // B cannot be projected onto this face. - } else { - uv = r2.Point{b.X / b.Z, b.Y / b.Z} - } - } - - return uv, score -} - -// updateEndpoint returns the interval with the specified endpoint updated to -// the given value. If the value lies beyond the opposite endpoint, nothing is -// changed and false is returned. -func updateEndpoint(bound r1.Interval, highEndpoint bool, value float64) (r1.Interval, bool) { - if !highEndpoint { - if bound.Hi < value { - return bound, false - } - if bound.Lo < value { - bound.Lo = value - } - return bound, true - } - - if bound.Lo > value { - return bound, false - } - if bound.Hi > value { - bound.Hi = value - } - return bound, true -} - -// clipBoundAxis returns the clipped versions of the bounding intervals for the given -// axes for the line segment from (a0,a1) to (b0,b1) so that neither extends beyond the -// given clip interval. negSlope is a precomputed helper variable that indicates which -// diagonal of the bounding box is spanned by AB; it is false if AB has positive slope, -// and true if AB has negative slope. If the clipping interval doesn't overlap the bounds, -// false is returned. -func clipBoundAxis(a0, b0 float64, bound0 r1.Interval, a1, b1 float64, bound1 r1.Interval, - negSlope bool, clip r1.Interval) (bound0c, bound1c r1.Interval, updated bool) { - - if bound0.Lo < clip.Lo { - // If the upper bound is below the clips lower bound, there is nothing to do. - if bound0.Hi < clip.Lo { - return bound0, bound1, false - } - // narrow the intervals lower bound to the clip bound. - bound0.Lo = clip.Lo - if bound1, updated = updateEndpoint(bound1, negSlope, interpolateFloat64(clip.Lo, a0, b0, a1, b1)); !updated { - return bound0, bound1, false - } - } - - if bound0.Hi > clip.Hi { - // If the lower bound is above the clips upper bound, there is nothing to do. - if bound0.Lo > clip.Hi { - return bound0, bound1, false - } - // narrow the intervals upper bound to the clip bound. - bound0.Hi = clip.Hi - if bound1, updated = updateEndpoint(bound1, !negSlope, interpolateFloat64(clip.Hi, a0, b0, a1, b1)); !updated { - return bound0, bound1, false - } - } - return bound0, bound1, true -} - -// edgeIntersectsRect reports whether the edge defined by AB intersects the -// given closed rectangle to within the error bound. -func edgeIntersectsRect(a, b r2.Point, r r2.Rect) bool { - // First check whether the bounds of a Rect around AB intersects the given rect. - if !r.Intersects(r2.RectFromPoints(a, b)) { - return false - } - - // Otherwise AB intersects the rect if and only if all four vertices of rect - // do not lie on the same side of the extended line AB. We test this by finding - // the two vertices of rect with minimum and maximum projections onto the normal - // of AB, and computing their dot products with the edge normal. - n := b.Sub(a).Ortho() - - i := 0 - if n.X >= 0 { - i = 1 - } - j := 0 - if n.Y >= 0 { - j = 1 - } - - max := n.Dot(r.VertexIJ(i, j).Sub(a)) - min := n.Dot(r.VertexIJ(1-i, 1-j).Sub(a)) - - return (max >= 0) && (min <= 0) -} - -// clippedEdgeBound returns the bounding rectangle of the portion of the edge defined -// by AB intersected by clip. The resulting bound may be empty. This is a convenience -// function built on top of clipEdgeBound. -func clippedEdgeBound(a, b r2.Point, clip r2.Rect) r2.Rect { - bound := r2.RectFromPoints(a, b) - if b1, intersects := clipEdgeBound(a, b, clip, bound); intersects { - return b1 - } - return r2.EmptyRect() -} - -// clipEdgeBound clips an edge AB to sequence of rectangles efficiently. -// It represents the clipped edges by their bounding boxes rather than as a pair of -// endpoints. Specifically, let A'B' be some portion of an edge AB, and let bound be -// a tight bound of A'B'. This function returns the bound that is a tight bound -// of A'B' intersected with a given rectangle. If A'B' does not intersect clip, -// it returns false and the original bound. -func clipEdgeBound(a, b r2.Point, clip, bound r2.Rect) (r2.Rect, bool) { - // negSlope indicates which diagonal of the bounding box is spanned by AB: it - // is false if AB has positive slope, and true if AB has negative slope. This is - // used to determine which interval endpoints need to be updated each time - // the edge is clipped. - negSlope := (a.X > b.X) != (a.Y > b.Y) - - b0x, b0y, up1 := clipBoundAxis(a.X, b.X, bound.X, a.Y, b.Y, bound.Y, negSlope, clip.X) - if !up1 { - return bound, false - } - b1y, b1x, up2 := clipBoundAxis(a.Y, b.Y, b0y, a.X, b.X, b0x, negSlope, clip.Y) - if !up2 { - return r2.Rect{b0x, b0y}, false - } - return r2.Rect{X: b1x, Y: b1y}, true -} - -// interpolateFloat64 returns a value with the same combination of a1 and b1 as the -// given value x is of a and b. This function makes the following guarantees: -// - If x == a, then x1 = a1 (exactly). -// - If x == b, then x1 = b1 (exactly). -// - If a <= x <= b, then a1 <= x1 <= b1 (even if a1 == b1). -// This requires a != b. -func interpolateFloat64(x, a, b, a1, b1 float64) float64 { - // To get results that are accurate near both A and B, we interpolate - // starting from the closer of the two points. - if math.Abs(a-x) <= math.Abs(b-x) { - return a1 + (b1-a1)*(x-a)/(b-a) - } - return b1 + (a1-b1)*(x-b)/(a-b) -} - -// FaceSegment represents an edge AB clipped to an S2 cube face. It is -// represented by a face index and a pair of (u,v) coordinates. -type FaceSegment struct { - face int - a, b r2.Point -} - -// FaceSegments subdivides the given edge AB at every point where it crosses the -// boundary between two S2 cube faces and returns the corresponding FaceSegments. -// The segments are returned in order from A toward B. The input points must be -// unit length. -// -// This function guarantees that the returned segments form a continuous path -// from A to B, and that all vertices are within faceClipErrorUVDist of the -// line AB. All vertices lie within the [-1,1]x[-1,1] cube face rectangles. -// The results are consistent with Sign, i.e. the edge is well-defined even its -// endpoints are antipodal. -// TODO(roberts): Extend the implementation of PointCross so that this is true. -func FaceSegments(a, b Point) []FaceSegment { - var segment FaceSegment - - // Fast path: both endpoints are on the same face. - var aFace, bFace int - aFace, segment.a.X, segment.a.Y = xyzToFaceUV(a.Vector) - bFace, segment.b.X, segment.b.Y = xyzToFaceUV(b.Vector) - if aFace == bFace { - segment.face = aFace - return []FaceSegment{segment} - } - - // Starting at A, we follow AB from face to face until we reach the face - // containing B. The following code is designed to ensure that we always - // reach B, even in the presence of numerical errors. - // - // First we compute the normal to the plane containing A and B. This normal - // becomes the ultimate definition of the line AB; it is used to resolve all - // questions regarding where exactly the line goes. Unfortunately due to - // numerical errors, the line may not quite intersect the faces containing - // the original endpoints. We handle this by moving A and/or B slightly if - // necessary so that they are on faces intersected by the line AB. - ab := a.PointCross(b) - - aFace, segment.a = moveOriginToValidFace(aFace, a, ab, segment.a) - bFace, segment.b = moveOriginToValidFace(bFace, b, Point{ab.Mul(-1)}, segment.b) - - // Now we simply follow AB from face to face until we reach B. - var segments []FaceSegment - segment.face = aFace - bSaved := segment.b - - for face := aFace; face != bFace; { - // Complete the current segment by finding the point where AB - // exits the current face. - z := faceXYZtoUVW(face, ab) - n := pointUVW{z.Vector} - - exitAxis := n.exitAxis() - segment.b = n.exitPoint(exitAxis) - segments = append(segments, segment) - - // Compute the next face intersected by AB, and translate the exit - // point of the current segment into the (u,v) coordinates of the - // next face. This becomes the first point of the next segment. - exitXyz := faceUVToXYZ(face, segment.b.X, segment.b.Y) - face = nextFace(face, segment.b, exitAxis, n, bFace) - exitUvw := faceXYZtoUVW(face, Point{exitXyz}) - segment.face = face - segment.a = r2.Point{exitUvw.X, exitUvw.Y} - } - // Finish the last segment. - segment.b = bSaved - return append(segments, segment) -} - -// moveOriginToValidFace updates the origin point to a valid face if necessary. -// Given a line segment AB whose origin A has been projected onto a given cube -// face, determine whether it is necessary to project A onto a different face -// instead. This can happen because the normal of the line AB is not computed -// exactly, so that the line AB (defined as the set of points perpendicular to -// the normal) may not intersect the cube face containing A. Even if it does -// intersect the face, the exit point of the line from that face may be on -// the wrong side of A (i.e., in the direction away from B). If this happens, -// we reproject A onto the adjacent face where the line AB approaches A most -// closely. This moves the origin by a small amount, but never more than the -// error tolerances. -func moveOriginToValidFace(face int, a, ab Point, aUV r2.Point) (int, r2.Point) { - // Fast path: if the origin is sufficiently far inside the face, it is - // always safe to use it. - const maxSafeUVCoord = 1 - faceClipErrorUVCoord - if math.Max(math.Abs((aUV).X), math.Abs((aUV).Y)) <= maxSafeUVCoord { - return face, aUV - } - - // Otherwise check whether the normal AB even intersects this face. - z := faceXYZtoUVW(face, ab) - n := pointUVW{z.Vector} - if n.intersectsFace() { - // Check whether the point where the line AB exits this face is on the - // wrong side of A (by more than the acceptable error tolerance). - uv := n.exitPoint(n.exitAxis()) - exit := faceUVToXYZ(face, uv.X, uv.Y) - aTangent := ab.Normalize().Cross(a.Vector) - - // We can use the given face. - if exit.Sub(a.Vector).Dot(aTangent) >= -faceClipErrorRadians { - return face, aUV - } - } - - // Otherwise we reproject A to the nearest adjacent face. (If line AB does - // not pass through a given face, it must pass through all adjacent faces.) - var dir int - if math.Abs((aUV).X) >= math.Abs((aUV).Y) { - // U-axis - if aUV.X > 0 { - dir = 1 - } - face = uvwFace(face, 0, dir) - } else { - // V-axis - if aUV.Y > 0 { - dir = 1 - } - face = uvwFace(face, 1, dir) - } - - aUV.X, aUV.Y = validFaceXYZToUV(face, a.Vector) - aUV.X = math.Max(-1.0, math.Min(1.0, aUV.X)) - aUV.Y = math.Max(-1.0, math.Min(1.0, aUV.Y)) - - return face, aUV -} - -// nextFace returns the next face that should be visited by FaceSegments, given that -// we have just visited face and we are following the line AB (represented -// by its normal N in the (u,v,w) coordinates of that face). The other -// arguments include the point where AB exits face, the corresponding -// exit axis, and the target face containing the destination point B. -func nextFace(face int, exit r2.Point, axis axis, n pointUVW, targetFace int) int { - // this bit is to work around C++ cleverly casting bools to ints for you. - exitA := exit.X - exit1MinusA := exit.Y - - if axis == axisV { - exitA = exit.Y - exit1MinusA = exit.X - } - exitAPos := 0 - if exitA > 0 { - exitAPos = 1 - } - exit1MinusAPos := 0 - if exit1MinusA > 0 { - exit1MinusAPos = 1 - } - - // We return the face that is adjacent to the exit point along the given - // axis. If line AB exits *exactly* through a corner of the face, there are - // two possible next faces. If one is the target face containing B, then - // we guarantee that we advance to that face directly. - // - // The three conditions below check that (1) AB exits approximately through - // a corner, (2) the adjacent face along the non-exit axis is the target - // face, and (3) AB exits *exactly* through the corner. (The sumEqual - // code checks whether the dot product of (u,v,1) and n is exactly zero.) - if math.Abs(exit1MinusA) == 1 && - uvwFace(face, int(1-axis), exit1MinusAPos) == targetFace && - sumEqual(exit.X*n.X, exit.Y*n.Y, -n.Z) { - return targetFace - } - - // Otherwise return the face that is adjacent to the exit point in the - // direction of the exit axis. - return uvwFace(face, int(axis), exitAPos) -} diff --git a/vendor/github.com/golang/geo/s2/edge_crosser.go b/vendor/github.com/golang/geo/s2/edge_crosser.go deleted file mode 100644 index 69c6da6b9..000000000 --- a/vendor/github.com/golang/geo/s2/edge_crosser.go +++ /dev/null @@ -1,227 +0,0 @@ -// Copyright 2017 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package s2 - -import ( - "math" -) - -// EdgeCrosser allows edges to be efficiently tested for intersection with a -// given fixed edge AB. It is especially efficient when testing for -// intersection with an edge chain connecting vertices v0, v1, v2, ... -// -// Example usage: -// -// func CountIntersections(a, b Point, edges []Edge) int { -// count := 0 -// crosser := NewEdgeCrosser(a, b) -// for _, edge := range edges { -// if crosser.CrossingSign(&edge.First, &edge.Second) != DoNotCross { -// count++ -// } -// } -// return count -// } -// -type EdgeCrosser struct { - a Point - b Point - aXb Point - - // To reduce the number of calls to expensiveSign, we compute an - // outward-facing tangent at A and B if necessary. If the plane - // perpendicular to one of these tangents separates AB from CD (i.e., one - // edge on each side) then there is no intersection. - aTangent Point // Outward-facing tangent at A. - bTangent Point // Outward-facing tangent at B. - - // The fields below are updated for each vertex in the chain. - c Point // Previous vertex in the vertex chain. - acb Direction // The orientation of triangle ACB. -} - -// NewEdgeCrosser returns an EdgeCrosser with the fixed edge AB. -func NewEdgeCrosser(a, b Point) *EdgeCrosser { - norm := a.PointCross(b) - return &EdgeCrosser{ - a: a, - b: b, - aXb: Point{a.Cross(b.Vector)}, - aTangent: Point{a.Cross(norm.Vector)}, - bTangent: Point{norm.Cross(b.Vector)}, - } -} - -// CrossingSign reports whether the edge AB intersects the edge CD. If any two -// vertices from different edges are the same, returns MaybeCross. If either edge -// is degenerate (A == B or C == D), returns either DoNotCross or MaybeCross. -// -// Properties of CrossingSign: -// -// (1) CrossingSign(b,a,c,d) == CrossingSign(a,b,c,d) -// (2) CrossingSign(c,d,a,b) == CrossingSign(a,b,c,d) -// (3) CrossingSign(a,b,c,d) == MaybeCross if a==c, a==d, b==c, b==d -// (3) CrossingSign(a,b,c,d) == DoNotCross or MaybeCross if a==b or c==d -// -// Note that if you want to check an edge against a chain of other edges, -// it is slightly more efficient to use the single-argument version -// ChainCrossingSign below. -func (e *EdgeCrosser) CrossingSign(c, d Point) Crossing { - if c != e.c { - e.RestartAt(c) - } - return e.ChainCrossingSign(d) -} - -// EdgeOrVertexCrossing reports whether if CrossingSign(c, d) > 0, or AB and -// CD share a vertex and VertexCrossing(a, b, c, d) is true. -// -// This method extends the concept of a "crossing" to the case where AB -// and CD have a vertex in common. The two edges may or may not cross, -// according to the rules defined in VertexCrossing above. The rules -// are designed so that point containment tests can be implemented simply -// by counting edge crossings. Similarly, determining whether one edge -// chain crosses another edge chain can be implemented by counting. -func (e *EdgeCrosser) EdgeOrVertexCrossing(c, d Point) bool { - if c != e.c { - e.RestartAt(c) - } - return e.EdgeOrVertexChainCrossing(d) -} - -// NewChainEdgeCrosser is a convenience constructor that uses AB as the fixed edge, -// and C as the first vertex of the vertex chain (equivalent to calling RestartAt(c)). -// -// You don't need to use this or any of the chain functions unless you're trying to -// squeeze out every last drop of performance. Essentially all you are saving is a test -// whether the first vertex of the current edge is the same as the second vertex of the -// previous edge. -func NewChainEdgeCrosser(a, b, c Point) *EdgeCrosser { - e := NewEdgeCrosser(a, b) - e.RestartAt(c) - return e -} - -// RestartAt sets the current point of the edge crosser to be c. -// Call this method when your chain 'jumps' to a new place. -// The argument must point to a value that persists until the next call. -func (e *EdgeCrosser) RestartAt(c Point) { - e.c = c - e.acb = -triageSign(e.a, e.b, e.c) -} - -// ChainCrossingSign is like CrossingSign, but uses the last vertex passed to one of -// the crossing methods (or RestartAt) as the first vertex of the current edge. -func (e *EdgeCrosser) ChainCrossingSign(d Point) Crossing { - // For there to be an edge crossing, the triangles ACB, CBD, BDA, DAC must - // all be oriented the same way (CW or CCW). We keep the orientation of ACB - // as part of our state. When each new point D arrives, we compute the - // orientation of BDA and check whether it matches ACB. This checks whether - // the points C and D are on opposite sides of the great circle through AB. - - // Recall that triageSign is invariant with respect to rotating its - // arguments, i.e. ABD has the same orientation as BDA. - bda := triageSign(e.a, e.b, d) - if e.acb == -bda && bda != Indeterminate { - // The most common case -- triangles have opposite orientations. Save the - // current vertex D as the next vertex C, and also save the orientation of - // the new triangle ACB (which is opposite to the current triangle BDA). - e.c = d - e.acb = -bda - return DoNotCross - } - return e.crossingSign(d, bda) -} - -// EdgeOrVertexChainCrossing is like EdgeOrVertexCrossing, but uses the last vertex -// passed to one of the crossing methods (or RestartAt) as the first vertex of the current edge. -func (e *EdgeCrosser) EdgeOrVertexChainCrossing(d Point) bool { - // We need to copy e.c since it is clobbered by ChainCrossingSign. - c := e.c - switch e.ChainCrossingSign(d) { - case DoNotCross: - return false - case Cross: - return true - } - return VertexCrossing(e.a, e.b, c, d) -} - -// crossingSign handle the slow path of CrossingSign. -func (e *EdgeCrosser) crossingSign(d Point, bda Direction) Crossing { - // Compute the actual result, and then save the current vertex D as the next - // vertex C, and save the orientation of the next triangle ACB (which is - // opposite to the current triangle BDA). - defer func() { - e.c = d - e.acb = -bda - }() - - // At this point, a very common situation is that A,B,C,D are four points on - // a line such that AB does not overlap CD. (For example, this happens when - // a line or curve is sampled finely, or when geometry is constructed by - // computing the union of S2CellIds.) Most of the time, we can determine - // that AB and CD do not intersect using the two outward-facing - // tangents at A and B (parallel to AB) and testing whether AB and CD are on - // opposite sides of the plane perpendicular to one of these tangents. This - // is moderately expensive but still much cheaper than expensiveSign. - - // The error in RobustCrossProd is insignificant. The maximum error in - // the call to CrossProd (i.e., the maximum norm of the error vector) is - // (0.5 + 1/sqrt(3)) * dblEpsilon. The maximum error in each call to - // DotProd below is dblEpsilon. (There is also a small relative error - // term that is insignificant because we are comparing the result against a - // constant that is very close to zero.) - maxError := (1.5 + 1/math.Sqrt(3)) * dblEpsilon - if (e.c.Dot(e.aTangent.Vector) > maxError && d.Dot(e.aTangent.Vector) > maxError) || (e.c.Dot(e.bTangent.Vector) > maxError && d.Dot(e.bTangent.Vector) > maxError) { - return DoNotCross - } - - // Otherwise, eliminate the cases where two vertices from different edges are - // equal. (These cases could be handled in the code below, but we would rather - // avoid calling ExpensiveSign if possible.) - if e.a == e.c || e.a == d || e.b == e.c || e.b == d { - return MaybeCross - } - - // Eliminate the cases where an input edge is degenerate. (Note that in - // most cases, if CD is degenerate then this method is not even called - // because acb and bda have different signs.) - if e.a == e.b || e.c == d { - return DoNotCross - } - - // Otherwise it's time to break out the big guns. - if e.acb == Indeterminate { - e.acb = -expensiveSign(e.a, e.b, e.c) - } - if bda == Indeterminate { - bda = expensiveSign(e.a, e.b, d) - } - - if bda != e.acb { - return DoNotCross - } - - cbd := -RobustSign(e.c, d, e.b) - if cbd != e.acb { - return DoNotCross - } - dac := RobustSign(e.c, d, e.a) - if dac != e.acb { - return DoNotCross - } - return Cross -} diff --git a/vendor/github.com/golang/geo/s2/edge_crossings.go b/vendor/github.com/golang/geo/s2/edge_crossings.go deleted file mode 100644 index a98ec76ff..000000000 --- a/vendor/github.com/golang/geo/s2/edge_crossings.go +++ /dev/null @@ -1,396 +0,0 @@ -// Copyright 2017 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package s2 - -import ( - "fmt" - "math" - - "github.com/golang/geo/r3" - "github.com/golang/geo/s1" -) - -const ( - // intersectionError can be set somewhat arbitrarily, because the algorithm - // uses more precision if necessary in order to achieve the specified error. - // The only strict requirement is that intersectionError >= dblEpsilon - // radians. However, using a larger error tolerance makes the algorithm more - // efficient because it reduces the number of cases where exact arithmetic is - // needed. - intersectionError = s1.Angle(8 * dblError) - - // intersectionMergeRadius is used to ensure that intersection points that - // are supposed to be coincident are merged back together into a single - // vertex. This is required in order for various polygon operations (union, - // intersection, etc) to work correctly. It is twice the intersection error - // because two coincident intersection points might have errors in - // opposite directions. - intersectionMergeRadius = 2 * intersectionError -) - -// A Crossing indicates how edges cross. -type Crossing int - -const ( - // Cross means the edges cross. - Cross Crossing = iota - // MaybeCross means two vertices from different edges are the same. - MaybeCross - // DoNotCross means the edges do not cross. - DoNotCross -) - -func (c Crossing) String() string { - switch c { - case Cross: - return "Cross" - case MaybeCross: - return "MaybeCross" - case DoNotCross: - return "DoNotCross" - default: - return fmt.Sprintf("(BAD CROSSING %d)", c) - } -} - -// CrossingSign reports whether the edge AB intersects the edge CD. -// If AB crosses CD at a point that is interior to both edges, Cross is returned. -// If any two vertices from different edges are the same it returns MaybeCross. -// Otherwise it returns DoNotCross. -// If either edge is degenerate (A == B or C == D), the return value is MaybeCross -// if two vertices from different edges are the same and DoNotCross otherwise. -// -// Properties of CrossingSign: -// -// (1) CrossingSign(b,a,c,d) == CrossingSign(a,b,c,d) -// (2) CrossingSign(c,d,a,b) == CrossingSign(a,b,c,d) -// (3) CrossingSign(a,b,c,d) == MaybeCross if a==c, a==d, b==c, b==d -// (3) CrossingSign(a,b,c,d) == DoNotCross or MaybeCross if a==b or c==d -// -// This method implements an exact, consistent perturbation model such -// that no three points are ever considered to be collinear. This means -// that even if you have 4 points A, B, C, D that lie exactly in a line -// (say, around the equator), C and D will be treated as being slightly to -// one side or the other of AB. This is done in a way such that the -// results are always consistent (see RobustSign). -func CrossingSign(a, b, c, d Point) Crossing { - crosser := NewChainEdgeCrosser(a, b, c) - return crosser.ChainCrossingSign(d) -} - -// VertexCrossing reports whether two edges "cross" in such a way that point-in-polygon -// containment tests can be implemented by counting the number of edge crossings. -// -// Given two edges AB and CD where at least two vertices are identical -// (i.e. CrossingSign(a,b,c,d) == 0), the basic rule is that a "crossing" -// occurs if AB is encountered after CD during a CCW sweep around the shared -// vertex starting from a fixed reference point. -// -// Note that according to this rule, if AB crosses CD then in general CD -// does not cross AB. However, this leads to the correct result when -// counting polygon edge crossings. For example, suppose that A,B,C are -// three consecutive vertices of a CCW polygon. If we now consider the edge -// crossings of a segment BP as P sweeps around B, the crossing number -// changes parity exactly when BP crosses BA or BC. -// -// Useful properties of VertexCrossing (VC): -// -// (1) VC(a,a,c,d) == VC(a,b,c,c) == false -// (2) VC(a,b,a,b) == VC(a,b,b,a) == true -// (3) VC(a,b,c,d) == VC(a,b,d,c) == VC(b,a,c,d) == VC(b,a,d,c) -// (3) If exactly one of a,b equals one of c,d, then exactly one of -// VC(a,b,c,d) and VC(c,d,a,b) is true -// -// It is an error to call this method with 4 distinct vertices. -func VertexCrossing(a, b, c, d Point) bool { - // If A == B or C == D there is no intersection. We need to check this - // case first in case 3 or more input points are identical. - if a == b || c == d { - return false - } - - // If any other pair of vertices is equal, there is a crossing if and only - // if OrderedCCW indicates that the edge AB is further CCW around the - // shared vertex O (either A or B) than the edge CD, starting from an - // arbitrary fixed reference point. - - // Optimization: if AB=CD or AB=DC, we can avoid most of the calculations. - switch { - case a == c: - return (b == d) || OrderedCCW(Point{a.Ortho()}, d, b, a) - case b == d: - return OrderedCCW(Point{b.Ortho()}, c, a, b) - case a == d: - return (b == c) || OrderedCCW(Point{a.Ortho()}, c, b, a) - case b == c: - return OrderedCCW(Point{b.Ortho()}, d, a, b) - } - - return false -} - -// EdgeOrVertexCrossing is a convenience function that calls CrossingSign to -// handle cases where all four vertices are distinct, and VertexCrossing to -// handle cases where two or more vertices are the same. This defines a crossing -// function such that point-in-polygon containment tests can be implemented -// by simply counting edge crossings. -func EdgeOrVertexCrossing(a, b, c, d Point) bool { - switch CrossingSign(a, b, c, d) { - case DoNotCross: - return false - case Cross: - return true - default: - return VertexCrossing(a, b, c, d) - } -} - -// Intersection returns the intersection point of two edges AB and CD that cross -// (CrossingSign(a,b,c,d) == Crossing). -// -// Useful properties of Intersection: -// -// (1) Intersection(b,a,c,d) == Intersection(a,b,d,c) == Intersection(a,b,c,d) -// (2) Intersection(c,d,a,b) == Intersection(a,b,c,d) -// -// The returned intersection point X is guaranteed to be very close to the -// true intersection point of AB and CD, even if the edges intersect at a -// very small angle. -func Intersection(a0, a1, b0, b1 Point) Point { - // It is difficult to compute the intersection point of two edges accurately - // when the angle between the edges is very small. Previously we handled - // this by only guaranteeing that the returned intersection point is within - // intersectionError of each edge. However, this means that when the edges - // cross at a very small angle, the computed result may be very far from the - // true intersection point. - // - // Instead this function now guarantees that the result is always within - // intersectionError of the true intersection. This requires using more - // sophisticated techniques and in some cases extended precision. - // - // - intersectionStable computes the intersection point using - // projection and interpolation, taking care to minimize cancellation - // error. - // - // - intersectionExact computes the intersection point using precision - // arithmetic and converts the final result back to an Point. - pt, ok := intersectionStable(a0, a1, b0, b1) - if !ok { - pt = intersectionExact(a0, a1, b0, b1) - } - - // Make sure the intersection point is on the correct side of the sphere. - // Since all vertices are unit length, and edges are less than 180 degrees, - // (a0 + a1) and (b0 + b1) both have positive dot product with the - // intersection point. We use the sum of all vertices to make sure that the - // result is unchanged when the edges are swapped or reversed. - if pt.Dot((a0.Add(a1.Vector)).Add(b0.Add(b1.Vector))) < 0 { - pt = Point{pt.Mul(-1)} - } - - return pt -} - -// Computes the cross product of two vectors, normalized to be unit length. -// Also returns the length of the cross -// product before normalization, which is useful for estimating the amount of -// error in the result. For numerical stability, the vectors should both be -// approximately unit length. -func robustNormalWithLength(x, y r3.Vector) (r3.Vector, float64) { - var pt r3.Vector - // This computes 2 * (x.Cross(y)), but has much better numerical - // stability when x and y are unit length. - tmp := x.Sub(y).Cross(x.Add(y)) - length := tmp.Norm() - if length != 0 { - pt = tmp.Mul(1 / length) - } - return pt, 0.5 * length // Since tmp == 2 * (x.Cross(y)) -} - -/* -// intersectionSimple is not used by the C++ so it is skipped here. -*/ - -// projection returns the projection of aNorm onto X (x.Dot(aNorm)), and a bound -// on the error in the result. aNorm is not necessarily unit length. -// -// The remaining parameters (the length of aNorm (aNormLen) and the edge endpoints -// a0 and a1) allow this dot product to be computed more accurately and efficiently. -func projection(x, aNorm r3.Vector, aNormLen float64, a0, a1 Point) (proj, bound float64) { - // The error in the dot product is proportional to the lengths of the input - // vectors, so rather than using x itself (a unit-length vector) we use - // the vectors from x to the closer of the two edge endpoints. This - // typically reduces the error by a huge factor. - x0 := x.Sub(a0.Vector) - x1 := x.Sub(a1.Vector) - x0Dist2 := x0.Norm2() - x1Dist2 := x1.Norm2() - - // If both distances are the same, we need to be careful to choose one - // endpoint deterministically so that the result does not change if the - // order of the endpoints is reversed. - var dist float64 - if x0Dist2 < x1Dist2 || (x0Dist2 == x1Dist2 && x0.Cmp(x1) == -1) { - dist = math.Sqrt(x0Dist2) - proj = x0.Dot(aNorm) - } else { - dist = math.Sqrt(x1Dist2) - proj = x1.Dot(aNorm) - } - - // This calculation bounds the error from all sources: the computation of - // the normal, the subtraction of one endpoint, and the dot product itself. - // dblError appears because the input points are assumed to be - // normalized in double precision. - // - // For reference, the bounds that went into this calculation are: - // ||N'-N|| <= ((1 + 2 * sqrt(3))||N|| + 32 * sqrt(3) * dblError) * epsilon - // |(A.B)'-(A.B)| <= (1.5 * (A.B) + 1.5 * ||A|| * ||B||) * epsilon - // ||(X-Y)'-(X-Y)|| <= ||X-Y|| * epsilon - bound = (((3.5+2*math.Sqrt(3))*aNormLen+32*math.Sqrt(3)*dblError)*dist + 1.5*math.Abs(proj)) * epsilon - return proj, bound -} - -// compareEdges reports whether (a0,a1) is less than (b0,b1) with respect to a total -// ordering on edges that is invariant under edge reversals. -func compareEdges(a0, a1, b0, b1 Point) bool { - if a0.Cmp(a1.Vector) != -1 { - a0, a1 = a1, a0 - } - if b0.Cmp(b1.Vector) != -1 { - b0, b1 = b1, b0 - } - return a0.Cmp(b0.Vector) == -1 || (a0 == b0 && b0.Cmp(b1.Vector) == -1) -} - -// intersectionStable returns the intersection point of the edges (a0,a1) and -// (b0,b1) if it can be computed to within an error of at most intersectionError -// by this function. -// -// The intersection point is not guaranteed to have the correct sign because we -// choose to use the longest of the two edges first. The sign is corrected by -// Intersection. -func intersectionStable(a0, a1, b0, b1 Point) (Point, bool) { - // Sort the two edges so that (a0,a1) is longer, breaking ties in a - // deterministic way that does not depend on the ordering of the endpoints. - // This is desirable for two reasons: - // - So that the result doesn't change when edges are swapped or reversed. - // - It reduces error, since the first edge is used to compute the edge - // normal (where a longer edge means less error), and the second edge - // is used for interpolation (where a shorter edge means less error). - aLen2 := a1.Sub(a0.Vector).Norm2() - bLen2 := b1.Sub(b0.Vector).Norm2() - if aLen2 < bLen2 || (aLen2 == bLen2 && compareEdges(a0, a1, b0, b1)) { - return intersectionStableSorted(b0, b1, a0, a1) - } - return intersectionStableSorted(a0, a1, b0, b1) -} - -// intersectionStableSorted is a helper function for intersectionStable. -// It expects that the edges (a0,a1) and (b0,b1) have been sorted so that -// the first edge passed in is longer. -func intersectionStableSorted(a0, a1, b0, b1 Point) (Point, bool) { - var pt Point - - // Compute the normal of the plane through (a0, a1) in a stable way. - aNorm := a0.Sub(a1.Vector).Cross(a0.Add(a1.Vector)) - aNormLen := aNorm.Norm() - bLen := b1.Sub(b0.Vector).Norm() - - // Compute the projection (i.e., signed distance) of b0 and b1 onto the - // plane through (a0, a1). Distances are scaled by the length of aNorm. - b0Dist, b0Error := projection(b0.Vector, aNorm, aNormLen, a0, a1) - b1Dist, b1Error := projection(b1.Vector, aNorm, aNormLen, a0, a1) - - // The total distance from b0 to b1 measured perpendicularly to (a0,a1) is - // |b0Dist - b1Dist|. Note that b0Dist and b1Dist generally have - // opposite signs because b0 and b1 are on opposite sides of (a0, a1). The - // code below finds the intersection point by interpolating along the edge - // (b0, b1) to a fractional distance of b0Dist / (b0Dist - b1Dist). - // - // It can be shown that the maximum error in the interpolation fraction is - // - // (b0Dist * b1Error - b1Dist * b0Error) / (distSum * (distSum - errorSum)) - // - // We save ourselves some work by scaling the result and the error bound by - // "distSum", since the result is normalized to be unit length anyway. - distSum := math.Abs(b0Dist - b1Dist) - errorSum := b0Error + b1Error - if distSum <= errorSum { - return pt, false // Error is unbounded in this case. - } - - x := b1.Mul(b0Dist).Sub(b0.Mul(b1Dist)) - err := bLen*math.Abs(b0Dist*b1Error-b1Dist*b0Error)/ - (distSum-errorSum) + 2*distSum*epsilon - - // Finally we normalize the result, compute the corresponding error, and - // check whether the total error is acceptable. - xLen := x.Norm() - maxError := intersectionError - if err > (float64(maxError)-epsilon)*xLen { - return pt, false - } - - return Point{x.Mul(1 / xLen)}, true -} - -// intersectionExact returns the intersection point of (a0, a1) and (b0, b1) -// using precise arithmetic. Note that the result is not exact because it is -// rounded down to double precision at the end. Also, the intersection point -// is not guaranteed to have the correct sign (i.e., the return value may need -// to be negated). -func intersectionExact(a0, a1, b0, b1 Point) Point { - // Since we are using presice arithmetic, we don't need to worry about - // numerical stability. - a0P := r3.PreciseVectorFromVector(a0.Vector) - a1P := r3.PreciseVectorFromVector(a1.Vector) - b0P := r3.PreciseVectorFromVector(b0.Vector) - b1P := r3.PreciseVectorFromVector(b1.Vector) - aNormP := a0P.Cross(a1P) - bNormP := b0P.Cross(b1P) - xP := aNormP.Cross(bNormP) - - // The final Normalize() call is done in double precision, which creates a - // directional error of up to 2*dblError. (Precise conversion and Normalize() - // each contribute up to dblError of directional error.) - x := xP.Vector() - - if x == (r3.Vector{}) { - // The two edges are exactly collinear, but we still consider them to be - // "crossing" because of simulation of simplicity. Out of the four - // endpoints, exactly two lie in the interior of the other edge. Of - // those two we return the one that is lexicographically smallest. - x = r3.Vector{10, 10, 10} // Greater than any valid S2Point - - aNorm := Point{aNormP.Vector()} - bNorm := Point{bNormP.Vector()} - if OrderedCCW(b0, a0, b1, bNorm) && a0.Cmp(x) == -1 { - return a0 - } - if OrderedCCW(b0, a1, b1, bNorm) && a1.Cmp(x) == -1 { - return a1 - } - if OrderedCCW(a0, b0, a1, aNorm) && b0.Cmp(x) == -1 { - return b0 - } - if OrderedCCW(a0, b1, a1, aNorm) && b1.Cmp(x) == -1 { - return b1 - } - } - - return Point{x} -} diff --git a/vendor/github.com/golang/geo/s2/edge_distances.go b/vendor/github.com/golang/geo/s2/edge_distances.go deleted file mode 100644 index ca197af1d..000000000 --- a/vendor/github.com/golang/geo/s2/edge_distances.go +++ /dev/null @@ -1,408 +0,0 @@ -// Copyright 2017 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package s2 - -// This file defines a collection of methods for computing the distance to an edge, -// interpolating along an edge, projecting points onto edges, etc. - -import ( - "math" - - "github.com/golang/geo/s1" -) - -// DistanceFromSegment returns the distance of point X from line segment AB. -// The points are expected to be normalized. The result is very accurate for small -// distances but may have some numerical error if the distance is large -// (approximately pi/2 or greater). The case A == B is handled correctly. -func DistanceFromSegment(x, a, b Point) s1.Angle { - var minDist s1.ChordAngle - minDist, _ = updateMinDistance(x, a, b, minDist, true) - return minDist.Angle() -} - -// IsDistanceLess reports whether the distance from X to the edge AB is less -// than limit. (For less than or equal to, specify limit.Successor()). -// This method is faster than DistanceFromSegment(). If you want to -// compare against a fixed s1.Angle, you should convert it to an s1.ChordAngle -// once and save the value, since this conversion is relatively expensive. -func IsDistanceLess(x, a, b Point, limit s1.ChordAngle) bool { - _, less := UpdateMinDistance(x, a, b, limit) - return less -} - -// UpdateMinDistance checks if the distance from X to the edge AB is less -// than minDist, and if so, returns the updated value and true. -// The case A == B is handled correctly. -// -// Use this method when you want to compute many distances and keep track of -// the minimum. It is significantly faster than using DistanceFromSegment -// because (1) using s1.ChordAngle is much faster than s1.Angle, and (2) it -// can save a lot of work by not actually computing the distance when it is -// obviously larger than the current minimum. -func UpdateMinDistance(x, a, b Point, minDist s1.ChordAngle) (s1.ChordAngle, bool) { - return updateMinDistance(x, a, b, minDist, false) -} - -// UpdateMaxDistance checks if the distance from X to the edge AB is greater -// than maxDist, and if so, returns the updated value and true. -// Otherwise it returns false. The case A == B is handled correctly. -func UpdateMaxDistance(x, a, b Point, maxDist s1.ChordAngle) (s1.ChordAngle, bool) { - dist := maxChordAngle(ChordAngleBetweenPoints(x, a), ChordAngleBetweenPoints(x, b)) - if dist > s1.RightChordAngle { - dist, _ = updateMinDistance(Point{x.Mul(-1)}, a, b, dist, true) - dist = s1.StraightChordAngle - dist - } - if maxDist < dist { - return dist, true - } - - return maxDist, false -} - -// IsInteriorDistanceLess reports whether the minimum distance from X to the edge -// AB is attained at an interior point of AB (i.e., not an endpoint), and that -// distance is less than limit. (Specify limit.Successor() for less than or equal to). -func IsInteriorDistanceLess(x, a, b Point, limit s1.ChordAngle) bool { - _, less := UpdateMinInteriorDistance(x, a, b, limit) - return less -} - -// UpdateMinInteriorDistance reports whether the minimum distance from X to AB -// is attained at an interior point of AB (i.e., not an endpoint), and that distance -// is less than minDist. If so, the value of minDist is updated and true is returned. -// Otherwise it is unchanged and returns false. -func UpdateMinInteriorDistance(x, a, b Point, minDist s1.ChordAngle) (s1.ChordAngle, bool) { - return interiorDist(x, a, b, minDist, false) -} - -// Project returns the point along the edge AB that is closest to the point X. -// The fractional distance of this point along the edge AB can be obtained -// using DistanceFraction. -// -// This requires that all points are unit length. -func Project(x, a, b Point) Point { - aXb := a.PointCross(b) - // Find the closest point to X along the great circle through AB. - p := x.Sub(aXb.Mul(x.Dot(aXb.Vector) / aXb.Vector.Norm2())) - - // If this point is on the edge AB, then it's the closest point. - if Sign(aXb, a, Point{p}) && Sign(Point{p}, b, aXb) { - return Point{p.Normalize()} - } - - // Otherwise, the closest point is either A or B. - if x.Sub(a.Vector).Norm2() <= x.Sub(b.Vector).Norm2() { - return a - } - return b -} - -// DistanceFraction returns the distance ratio of the point X along an edge AB. -// If X is on the line segment AB, this is the fraction T such -// that X == Interpolate(T, A, B). -// -// This requires that A and B are distinct. -func DistanceFraction(x, a, b Point) float64 { - d0 := x.Angle(a.Vector) - d1 := x.Angle(b.Vector) - return float64(d0 / (d0 + d1)) -} - -// Interpolate returns the point X along the line segment AB whose distance from A -// is the given fraction "t" of the distance AB. Does NOT require that "t" be -// between 0 and 1. Note that all distances are measured on the surface of -// the sphere, so this is more complicated than just computing (1-t)*a + t*b -// and normalizing the result. -func Interpolate(t float64, a, b Point) Point { - if t == 0 { - return a - } - if t == 1 { - return b - } - ab := a.Angle(b.Vector) - return InterpolateAtDistance(s1.Angle(t)*ab, a, b) -} - -// InterpolateAtDistance returns the point X along the line segment AB whose -// distance from A is the angle ax. -func InterpolateAtDistance(ax s1.Angle, a, b Point) Point { - aRad := ax.Radians() - - // Use PointCross to compute the tangent vector at A towards B. The - // result is always perpendicular to A, even if A=B or A=-B, but it is not - // necessarily unit length. (We effectively normalize it below.) - normal := a.PointCross(b) - tangent := normal.Vector.Cross(a.Vector) - - // Now compute the appropriate linear combination of A and "tangent". With - // infinite precision the result would always be unit length, but we - // normalize it anyway to ensure that the error is within acceptable bounds. - // (Otherwise errors can build up when the result of one interpolation is - // fed into another interpolation.) - return Point{(a.Mul(math.Cos(aRad)).Add(tangent.Mul(math.Sin(aRad) / tangent.Norm()))).Normalize()} -} - -// minUpdateDistanceMaxError returns the maximum error in the result of -// UpdateMinDistance (and the associated functions such as -// UpdateMinInteriorDistance, IsDistanceLess, etc), assuming that all -// input points are normalized to within the bounds guaranteed by r3.Vector's -// Normalize. The error can be added or subtracted from an s1.ChordAngle -// using its Expanded method. -func minUpdateDistanceMaxError(dist s1.ChordAngle) float64 { - // There are two cases for the maximum error in UpdateMinDistance(), - // depending on whether the closest point is interior to the edge. - return math.Max(minUpdateInteriorDistanceMaxError(dist), dist.MaxPointError()) -} - -// minUpdateInteriorDistanceMaxError returns the maximum error in the result of -// UpdateMinInteriorDistance, assuming that all input points are normalized -// to within the bounds guaranteed by Point's Normalize. The error can be added -// or subtracted from an s1.ChordAngle using its Expanded method. -// -// Note that accuracy goes down as the distance approaches 0 degrees or 180 -// degrees (for different reasons). Near 0 degrees the error is acceptable -// for all practical purposes (about 1.2e-15 radians ~= 8 nanometers). For -// exactly antipodal points the maximum error is quite high (0.5 meters), -// but this error drops rapidly as the points move away from antipodality -// (approximately 1 millimeter for points that are 50 meters from antipodal, -// and 1 micrometer for points that are 50km from antipodal). -// -// TODO(roberts): Currently the error bound does not hold for edges whose endpoints -// are antipodal to within about 1e-15 radians (less than 1 micron). This could -// be fixed by extending PointCross to use higher precision when necessary. -func minUpdateInteriorDistanceMaxError(dist s1.ChordAngle) float64 { - // If a point is more than 90 degrees from an edge, then the minimum - // distance is always to one of the endpoints, not to the edge interior. - if dist >= s1.RightChordAngle { - return 0.0 - } - - // This bound includes all source of error, assuming that the input points - // are normalized. a and b are components of chord length that are - // perpendicular and parallel to a plane containing the edge respectively. - b := math.Min(1.0, 0.5*float64(dist)) - a := math.Sqrt(b * (2 - b)) - return ((2.5+2*math.Sqrt(3)+8.5*a)*a + - (2+2*math.Sqrt(3)/3+6.5*(1-b))*b + - (23+16/math.Sqrt(3))*dblEpsilon) * dblEpsilon -} - -// updateMinDistance computes the distance from a point X to a line segment AB, -// and if either the distance was less than the given minDist, or alwaysUpdate is -// true, the value and whether it was updated are returned. -func updateMinDistance(x, a, b Point, minDist s1.ChordAngle, alwaysUpdate bool) (s1.ChordAngle, bool) { - if d, ok := interiorDist(x, a, b, minDist, alwaysUpdate); ok { - // Minimum distance is attained along the edge interior. - return d, true - } - - // Otherwise the minimum distance is to one of the endpoints. - xa2, xb2 := (x.Sub(a.Vector)).Norm2(), x.Sub(b.Vector).Norm2() - dist := s1.ChordAngle(math.Min(xa2, xb2)) - if !alwaysUpdate && dist >= minDist { - return minDist, false - } - return dist, true -} - -// interiorDist returns the shortest distance from point x to edge ab, assuming -// that the closest point to X is interior to AB. If the closest point is not -// interior to AB, interiorDist returns (minDist, false). If alwaysUpdate is set to -// false, the distance is only updated when the value exceeds certain the given minDist. -func interiorDist(x, a, b Point, minDist s1.ChordAngle, alwaysUpdate bool) (s1.ChordAngle, bool) { - // Chord distance of x to both end points a and b. - xa2, xb2 := (x.Sub(a.Vector)).Norm2(), x.Sub(b.Vector).Norm2() - - // The closest point on AB could either be one of the two vertices (the - // vertex case) or in the interior (the interior case). Let C = A x B. - // If X is in the spherical wedge extending from A to B around the axis - // through C, then we are in the interior case. Otherwise we are in the - // vertex case. - // - // Check whether we might be in the interior case. For this to be true, XAB - // and XBA must both be acute angles. Checking this condition exactly is - // expensive, so instead we consider the planar triangle ABX (which passes - // through the sphere's interior). The planar angles XAB and XBA are always - // less than the corresponding spherical angles, so if we are in the - // interior case then both of these angles must be acute. - // - // We check this by computing the squared edge lengths of the planar - // triangle ABX, and testing whether angles XAB and XBA are both acute using - // the law of cosines: - // - // | XA^2 - XB^2 | < AB^2 (*) - // - // This test must be done conservatively (taking numerical errors into - // account) since otherwise we might miss a situation where the true minimum - // distance is achieved by a point on the edge interior. - // - // There are two sources of error in the expression above (*). The first is - // that points are not normalized exactly; they are only guaranteed to be - // within 2 * dblEpsilon of unit length. Under the assumption that the two - // sides of (*) are nearly equal, the total error due to normalization errors - // can be shown to be at most - // - // 2 * dblEpsilon * (XA^2 + XB^2 + AB^2) + 8 * dblEpsilon ^ 2 . - // - // The other source of error is rounding of results in the calculation of (*). - // Each of XA^2, XB^2, AB^2 has a maximum relative error of 2.5 * dblEpsilon, - // plus an additional relative error of 0.5 * dblEpsilon in the final - // subtraction which we further bound as 0.25 * dblEpsilon * (XA^2 + XB^2 + - // AB^2) for convenience. This yields a final error bound of - // - // 4.75 * dblEpsilon * (XA^2 + XB^2 + AB^2) + 8 * dblEpsilon ^ 2 . - ab2 := a.Sub(b.Vector).Norm2() - maxError := (4.75*dblEpsilon*(xa2+xb2+ab2) + 8*dblEpsilon*dblEpsilon) - if math.Abs(xa2-xb2) >= ab2+maxError { - return minDist, false - } - - // The minimum distance might be to a point on the edge interior. Let R - // be closest point to X that lies on the great circle through AB. Rather - // than computing the geodesic distance along the surface of the sphere, - // instead we compute the "chord length" through the sphere's interior. - // - // The squared chord length XR^2 can be expressed as XQ^2 + QR^2, where Q - // is the point X projected onto the plane through the great circle AB. - // The distance XQ^2 can be written as (X.C)^2 / |C|^2 where C = A x B. - // We ignore the QR^2 term and instead use XQ^2 as a lower bound, since it - // is faster and the corresponding distance on the Earth's surface is - // accurate to within 1% for distances up to about 1800km. - c := a.PointCross(b) - c2 := c.Norm2() - xDotC := x.Dot(c.Vector) - xDotC2 := xDotC * xDotC - if !alwaysUpdate && xDotC2 > c2*float64(minDist) { - // The closest point on the great circle AB is too far away. We need to - // test this using ">" rather than ">=" because the actual minimum bound - // on the distance is (xDotC2 / c2), which can be rounded differently - // than the (more efficient) multiplicative test above. - return minDist, false - } - - // Otherwise we do the exact, more expensive test for the interior case. - // This test is very likely to succeed because of the conservative planar - // test we did initially. - // - // TODO(roberts): Ensure that the errors in test are accurately reflected in the - // minUpdateInteriorDistanceMaxError. - cx := c.Cross(x.Vector) - if a.Sub(x.Vector).Dot(cx) >= 0 || b.Sub(x.Vector).Dot(cx) <= 0 { - return minDist, false - } - - // Compute the squared chord length XR^2 = XQ^2 + QR^2 (see above). - // This calculation has good accuracy for all chord lengths since it - // is based on both the dot product and cross product (rather than - // deriving one from the other). However, note that the chord length - // representation itself loses accuracy as the angle approaches π. - qr := 1 - math.Sqrt(cx.Norm2()/c2) - dist := s1.ChordAngle((xDotC2 / c2) + (qr * qr)) - - if !alwaysUpdate && dist >= minDist { - return minDist, false - } - - return dist, true -} - -// updateEdgePairMinDistance computes the minimum distance between the given -// pair of edges. If the two edges cross, the distance is zero. The cases -// a0 == a1 and b0 == b1 are handled correctly. -func updateEdgePairMinDistance(a0, a1, b0, b1 Point, minDist s1.ChordAngle) (s1.ChordAngle, bool) { - if minDist == 0 { - return 0, false - } - if CrossingSign(a0, a1, b0, b1) == Cross { - minDist = 0 - return 0, true - } - - // Otherwise, the minimum distance is achieved at an endpoint of at least - // one of the two edges. We ensure that all four possibilities are always checked. - // - // The calculation below computes each of the six vertex-vertex distances - // twice (this could be optimized). - var ok1, ok2, ok3, ok4 bool - minDist, ok1 = UpdateMinDistance(a0, b0, b1, minDist) - minDist, ok2 = UpdateMinDistance(a1, b0, b1, minDist) - minDist, ok3 = UpdateMinDistance(b0, a0, a1, minDist) - minDist, ok4 = UpdateMinDistance(b1, a0, a1, minDist) - return minDist, ok1 || ok2 || ok3 || ok4 -} - -// updateEdgePairMaxDistance reports the minimum distance between the given pair of edges. -// If one edge crosses the antipodal reflection of the other, the distance is pi. -func updateEdgePairMaxDistance(a0, a1, b0, b1 Point, maxDist s1.ChordAngle) (s1.ChordAngle, bool) { - if maxDist == s1.StraightChordAngle { - return s1.StraightChordAngle, false - } - if CrossingSign(a0, a1, Point{b0.Mul(-1)}, Point{b1.Mul(-1)}) == Cross { - return s1.StraightChordAngle, true - } - - // Otherwise, the maximum distance is achieved at an endpoint of at least - // one of the two edges. We ensure that all four possibilities are always checked. - // - // The calculation below computes each of the six vertex-vertex distances - // twice (this could be optimized). - var ok1, ok2, ok3, ok4 bool - maxDist, ok1 = UpdateMaxDistance(a0, b0, b1, maxDist) - maxDist, ok2 = UpdateMaxDistance(a1, b0, b1, maxDist) - maxDist, ok3 = UpdateMaxDistance(b0, a0, a1, maxDist) - maxDist, ok4 = UpdateMaxDistance(b1, a0, a1, maxDist) - return maxDist, ok1 || ok2 || ok3 || ok4 -} - -// EdgePairClosestPoints returns the pair of points (a, b) that achieves the -// minimum distance between edges a0a1 and b0b1, where a is a point on a0a1 and -// b is a point on b0b1. If the two edges intersect, a and b are both equal to -// the intersection point. Handles a0 == a1 and b0 == b1 correctly. -func EdgePairClosestPoints(a0, a1, b0, b1 Point) (Point, Point) { - if CrossingSign(a0, a1, b0, b1) == Cross { - x := Intersection(a0, a1, b0, b1) - return x, x - } - // We save some work by first determining which vertex/edge pair achieves - // the minimum distance, and then computing the closest point on that edge. - var minDist s1.ChordAngle - var ok bool - - minDist, ok = updateMinDistance(a0, b0, b1, minDist, true) - closestVertex := 0 - if minDist, ok = UpdateMinDistance(a1, b0, b1, minDist); ok { - closestVertex = 1 - } - if minDist, ok = UpdateMinDistance(b0, a0, a1, minDist); ok { - closestVertex = 2 - } - if minDist, ok = UpdateMinDistance(b1, a0, a1, minDist); ok { - closestVertex = 3 - } - switch closestVertex { - case 0: - return a0, Project(a0, b0, b1) - case 1: - return a1, Project(a1, b0, b1) - case 2: - return Project(b0, a0, a1), b0 - case 3: - return Project(b1, a0, a1), b1 - default: - panic("illegal case reached") - } -} diff --git a/vendor/github.com/golang/geo/s2/edge_query.go b/vendor/github.com/golang/geo/s2/edge_query.go deleted file mode 100644 index 2d443d1ce..000000000 --- a/vendor/github.com/golang/geo/s2/edge_query.go +++ /dev/null @@ -1,803 +0,0 @@ -// Copyright 2019 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package s2 - -import ( - "sort" - - "github.com/golang/geo/s1" -) - -// EdgeQueryOptions holds the options for controlling how EdgeQuery operates. -// -// Options can be chained together builder-style: -// -// opts = NewClosestEdgeQueryOptions(). -// MaxResults(1). -// DistanceLimit(s1.ChordAngleFromAngle(3 * s1.Degree)). -// MaxError(s1.ChordAngleFromAngle(0.001 * s1.Degree)) -// query = NewClosestEdgeQuery(index, opts) -// -// or set individually: -// -// opts = NewClosestEdgeQueryOptions() -// opts.IncludeInteriors(true) -// -// or just inline: -// -// query = NewClosestEdgeQuery(index, NewClosestEdgeQueryOptions().MaxResults(3)) -// -// If you pass a nil as the options you get the default values for the options. -type EdgeQueryOptions struct { - common *queryOptions -} - -// DistanceLimit specifies that only edges whose distance to the target is -// within, this distance should be returned. Edges whose distance is equal -// are not returned. To include values that are equal, specify the limit with -// the next largest representable distance. i.e. limit.Successor(). -func (e *EdgeQueryOptions) DistanceLimit(limit s1.ChordAngle) *EdgeQueryOptions { - e.common = e.common.DistanceLimit(limit) - return e -} - -// IncludeInteriors specifies whether polygon interiors should be -// included when measuring distances. -func (e *EdgeQueryOptions) IncludeInteriors(x bool) *EdgeQueryOptions { - e.common = e.common.IncludeInteriors(x) - return e -} - -// UseBruteForce sets or disables the use of brute force in a query. -func (e *EdgeQueryOptions) UseBruteForce(x bool) *EdgeQueryOptions { - e.common = e.common.UseBruteForce(x) - return e -} - -// MaxError specifies that edges up to dist away than the true -// matching edges may be substituted in the result set, as long as such -// edges satisfy all the remaining search criteria (such as DistanceLimit). -// This option only has an effect if MaxResults is also specified; -// otherwise all edges closer than MaxDistance will always be returned. -func (e *EdgeQueryOptions) MaxError(dist s1.ChordAngle) *EdgeQueryOptions { - e.common = e.common.MaxError(dist) - return e -} - -// MaxResults specifies that at most MaxResults edges should be returned. -// This must be at least 1. -func (e *EdgeQueryOptions) MaxResults(n int) *EdgeQueryOptions { - e.common = e.common.MaxResults(n) - return e -} - -// NewClosestEdgeQueryOptions returns a set of edge query options suitable -// for performing closest edge queries. -func NewClosestEdgeQueryOptions() *EdgeQueryOptions { - return &EdgeQueryOptions{ - common: newQueryOptions(minDistance(0)), - } -} - -// NewFurthestEdgeQueryOptions returns a set of edge query options suitable -// for performing furthest edge queries. -func NewFurthestEdgeQueryOptions() *EdgeQueryOptions { - return &EdgeQueryOptions{ - common: newQueryOptions(maxDistance(0)), - } -} - -// EdgeQueryResult represents an edge that meets the target criteria for the -// query. Note the following special cases: -// -// - ShapeID >= 0 && EdgeID < 0 represents the interior of a shape. -// Such results may be returned when the option IncludeInteriors is true. -// -// - ShapeID < 0 && EdgeID < 0 is returned to indicate that no edge -// satisfies the requested query options. -type EdgeQueryResult struct { - distance distance - shapeID int32 - edgeID int32 -} - -// Distance reports the distance between the edge in this shape that satisfied -// the query's parameters. -func (e EdgeQueryResult) Distance() s1.ChordAngle { return e.distance.chordAngle() } - -// ShapeID reports the ID of the Shape this result is for. -func (e EdgeQueryResult) ShapeID() int32 { return e.shapeID } - -// EdgeID reports the ID of the edge in the results Shape. -func (e EdgeQueryResult) EdgeID() int32 { return e.edgeID } - -// newEdgeQueryResult returns a result instance with default values. -func newEdgeQueryResult(target distanceTarget) EdgeQueryResult { - return EdgeQueryResult{ - distance: target.distance().infinity(), - shapeID: -1, - edgeID: -1, - } -} - -// IsInterior reports if this result represents the interior of a Shape. -func (e EdgeQueryResult) IsInterior() bool { - return e.shapeID >= 0 && e.edgeID < 0 -} - -// IsEmpty reports if this has no edge that satisfies the given edge query options. -// This result is only returned in one special case, namely when FindEdge() does -// not find any suitable edges. -func (e EdgeQueryResult) IsEmpty() bool { - return e.shapeID < 0 -} - -// Less reports if this results is less that the other first by distance, -// then by (shapeID, edgeID). This is used for sorting. -func (e EdgeQueryResult) Less(other EdgeQueryResult) bool { - if e.distance.chordAngle() != other.distance.chordAngle() { - return e.distance.less(other.distance) - } - if e.shapeID != other.shapeID { - return e.shapeID < other.shapeID - } - return e.edgeID < other.edgeID -} - -// EdgeQuery is used to find the edge(s) between two geometries that match a -// given set of options. It is flexible enough so that it can be adapted to -// compute maximum distances and even potentially Hausdorff distances. -// -// By using the appropriate options, this type can answer questions such as: -// -// - Find the minimum distance between two geometries A and B. -// - Find all edges of geometry A that are within a distance D of geometry B. -// - Find the k edges of geometry A that are closest to a given point P. -// -// You can also specify whether polygons should include their interiors (i.e., -// if a point is contained by a polygon, should the distance be zero or should -// it be measured to the polygon boundary?) -// -// The input geometries may consist of any number of points, polylines, and -// polygons (collectively referred to as "shapes"). Shapes do not need to be -// disjoint; they may overlap or intersect arbitrarily. The implementation is -// designed to be fast for both simple and complex geometries. -type EdgeQuery struct { - index *ShapeIndex - opts *queryOptions - target distanceTarget - - // True if opts.maxError must be subtracted from ShapeIndex cell distances - // in order to ensure that such distances are measured conservatively. This - // is true only if the target takes advantage of maxError in order to - // return faster results, and 0 < maxError < distanceLimit. - useConservativeCellDistance bool - - // The decision about whether to use the brute force algorithm is based on - // counting the total number of edges in the index. However if the index - // contains a large number of shapes, this in itself might take too long. - // So instead we only count edges up to (maxBruteForceIndexSize() + 1) - // for the current target type (stored as indexNumEdgesLimit). - indexNumEdges int - indexNumEdgesLimit int - - // The distance beyond which we can safely ignore further candidate edges. - // (Candidates that are exactly at the limit are ignored; this is more - // efficient for UpdateMinDistance and should not affect clients since - // distance measurements have a small amount of error anyway.) - // - // Initially this is the same as the maximum distance specified by the user, - // but it can also be updated by the algorithm (see maybeAddResult). - distanceLimit distance - - // The current set of results of the query. - results []EdgeQueryResult - - // This field is true when duplicates must be avoided explicitly. This - // is achieved by maintaining a separate set keyed by (shapeID, edgeID) - // only, and checking whether each edge is in that set before computing the - // distance to it. - avoidDuplicates bool - - // testedEdges tracks the set of shape and edges that have already been tested. - testedEdges map[ShapeEdgeID]uint32 - - // For the optimized algorihm we precompute the top-level CellIDs that - // will be added to the priority queue. There can be at most 6 of these - // cells. Essentially this is just a covering of the indexed edges, except - // that we also store pointers to the corresponding ShapeIndexCells to - // reduce the number of index seeks required. - indexCovering []CellID - indexCells []*ShapeIndexCell - - // The algorithm maintains a priority queue of unprocessed CellIDs, sorted - // in increasing order of distance from the target. - queue *queryQueue - - iter *ShapeIndexIterator - maxDistanceCovering []CellID - initialCells []CellID -} - -// NewClosestEdgeQuery returns an EdgeQuery that is used for finding the -// closest edge(s) to a given Point, Edge, Cell, or geometry collection. -// -// You can find either the k closest edges, or all edges within a given -// radius, or both (i.e., the k closest edges up to a given maximum radius). -// E.g. to find all the edges within 5 kilometers, set the DistanceLimit in -// the options. -// -// By default *all* edges are returned, so you should always specify either -// MaxResults or DistanceLimit options or both. -// -// Note that by default, distances are measured to the boundary and interior -// of polygons. For example, if a point is inside a polygon then its distance -// is zero. To change this behavior, set the IncludeInteriors option to false. -// -// If you only need to test whether the distance is above or below a given -// threshold (e.g., 10 km), you can use the IsDistanceLess() method. This is -// much faster than actually calculating the distance with FindEdge, -// since the implementation can stop as soon as it can prove that the minimum -// distance is either above or below the threshold. -func NewClosestEdgeQuery(index *ShapeIndex, opts *EdgeQueryOptions) *EdgeQuery { - if opts == nil { - opts = NewClosestEdgeQueryOptions() - } - e := &EdgeQuery{ - testedEdges: make(map[ShapeEdgeID]uint32), - index: index, - opts: opts.common, - queue: newQueryQueue(), - } - - return e -} - -// NewFurthestEdgeQuery returns an EdgeQuery that is used for finding the -// furthest edge(s) to a given Point, Edge, Cell, or geometry collection. -// -// The furthest edge is defined as the one which maximizes the -// distance from any point on that edge to any point on the target geometry. -// -// Similar to the example in NewClosestEdgeQuery, to find the 5 furthest edges -// from a given Point: -func NewFurthestEdgeQuery(index *ShapeIndex, opts *EdgeQueryOptions) *EdgeQuery { - if opts == nil { - opts = NewFurthestEdgeQueryOptions() - } - e := &EdgeQuery{ - testedEdges: make(map[ShapeEdgeID]uint32), - index: index, - opts: opts.common, - queue: newQueryQueue(), - } - - return e -} - -// Reset resets the state of this EdgeQuery. -func (e *EdgeQuery) Reset() { - e.indexNumEdges = 0 - e.indexNumEdgesLimit = 0 - e.indexCovering = nil - e.indexCells = nil -} - -// FindEdges returns the edges for the given target that satisfy the current options. -// -// Note that if opts.IncludeInteriors is true, the results may include some -// entries with edge_id == -1. This indicates that the target intersects -// the indexed polygon with the given ShapeID. -func (e *EdgeQuery) FindEdges(target distanceTarget) []EdgeQueryResult { - return e.findEdges(target, e.opts) -} - -// Distance reports the distance to the target. If the index or target is empty, -// returns the EdgeQuery's maximal sentinel. -// -// Use IsDistanceLess()/IsDistanceGreater() if you only want to compare the -// distance against a threshold value, since it is often much faster. -func (e *EdgeQuery) Distance(target distanceTarget) s1.ChordAngle { - return e.findEdge(target, e.opts).Distance() -} - -// IsDistanceLess reports if the distance to target is less than the given limit. -// -// This method is usually much faster than Distance(), since it is much -// less work to determine whether the minimum distance is above or below a -// threshold than it is to calculate the actual minimum distance. -// -// If you wish to check if the distance is less than or equal to the limit, use: -// -// query.IsDistanceLess(target, limit.Successor()) -// -func (e *EdgeQuery) IsDistanceLess(target distanceTarget, limit s1.ChordAngle) bool { - opts := e.opts - opts = opts.MaxResults(1). - DistanceLimit(limit). - MaxError(s1.StraightChordAngle) - return !e.findEdge(target, opts).IsEmpty() -} - -// IsDistanceGreater reports if the distance to target is greater than limit. -// -// This method is usually much faster than Distance, since it is much -// less work to determine whether the maximum distance is above or below a -// threshold than it is to calculate the actual maximum distance. -// If you wish to check if the distance is less than or equal to the limit, use: -// -// query.IsDistanceGreater(target, limit.Predecessor()) -// -func (e *EdgeQuery) IsDistanceGreater(target distanceTarget, limit s1.ChordAngle) bool { - return e.IsDistanceLess(target, limit) -} - -// IsConservativeDistanceLessOrEqual reports if the distance to target is less -// or equal to the limit, where the limit has been expanded by the maximum error -// for the distance calculation. -// -// For example, suppose that we want to test whether two geometries might -// intersect each other after they are snapped together using Builder -// (using the IdentitySnapFunction with a given "snap radius"). Since -// Builder uses exact distance predicates (s2predicates), we need to -// measure the distance between the two geometries conservatively. If the -// distance is definitely greater than "snap radius", then the geometries -// are guaranteed to not intersect after snapping. -func (e *EdgeQuery) IsConservativeDistanceLessOrEqual(target distanceTarget, limit s1.ChordAngle) bool { - return e.IsDistanceLess(target, limit.Expanded(minUpdateDistanceMaxError(limit))) -} - -// IsConservativeDistanceGreaterOrEqual reports if the distance to the target is greater -// than or equal to the given limit with some small tolerance. -func (e *EdgeQuery) IsConservativeDistanceGreaterOrEqual(target distanceTarget, limit s1.ChordAngle) bool { - return e.IsDistanceGreater(target, limit.Expanded(-minUpdateDistanceMaxError(limit))) -} - -// findEdges returns the closest edges to the given target that satisfy the given options. -// -// Note that if opts.includeInteriors is true, the results may include some -// entries with edgeID == -1. This indicates that the target intersects the -// indexed polygon with the given shapeID. -func (e *EdgeQuery) findEdges(target distanceTarget, opts *queryOptions) []EdgeQueryResult { - e.findEdgesInternal(target, opts) - // TODO(roberts): Revisit this if there is a heap or other sorted and - // uniquing datastructure we can use instead of just a slice. - e.results = sortAndUniqueResults(e.results) - if len(e.results) > e.opts.maxResults { - e.results = e.results[:e.opts.maxResults] - } - return e.results -} - -func sortAndUniqueResults(results []EdgeQueryResult) []EdgeQueryResult { - if len(results) <= 1 { - return results - } - sort.Slice(results, func(i, j int) bool { return results[i].Less(results[j]) }) - j := 0 - for i := 1; i < len(results); i++ { - if results[j] == results[i] { - continue - } - j++ - results[j] = results[i] - } - return results[:j+1] -} - -// findEdge is a convenience method that returns exactly one edge, and if no -// edges satisfy the given search criteria, then a default Result is returned. -// -// This is primarily to ease the usage of a number of the methods in the DistanceTargets -// and in EdgeQuery. -func (e *EdgeQuery) findEdge(target distanceTarget, opts *queryOptions) EdgeQueryResult { - opts.MaxResults(1) - e.findEdges(target, opts) - if len(e.results) > 0 { - return e.results[0] - } - - return newEdgeQueryResult(target) -} - -// findEdgesInternal does the actual work for find edges that match the given options. -func (e *EdgeQuery) findEdgesInternal(target distanceTarget, opts *queryOptions) { - e.target = target - e.opts = opts - - e.testedEdges = make(map[ShapeEdgeID]uint32) - e.distanceLimit = target.distance().fromChordAngle(opts.distanceLimit) - e.results = make([]EdgeQueryResult, 0) - - if e.distanceLimit == target.distance().zero() { - return - } - - if opts.includeInteriors { - shapeIDs := map[int32]struct{}{} - e.target.visitContainingShapes(e.index, func(containingShape Shape, targetPoint Point) bool { - shapeIDs[e.index.idForShape(containingShape)] = struct{}{} - return len(shapeIDs) < opts.maxResults - }) - for shapeID := range shapeIDs { - e.addResult(EdgeQueryResult{target.distance().zero(), shapeID, -1}) - } - - if e.distanceLimit == target.distance().zero() { - return - } - } - - // If maxError > 0 and the target takes advantage of this, then we may - // need to adjust the distance estimates to ShapeIndex cells to ensure - // that they are always a lower bound on the true distance. For example, - // suppose max_distance == 100, maxError == 30, and we compute the distance - // to the target from some cell C0 as d(C0) == 80. Then because the target - // takes advantage of maxError, the true distance could be as low as 50. - // In order not to miss edges contained by such cells, we need to subtract - // maxError from the distance estimates. This behavior is controlled by - // the useConservativeCellDistance flag. - // - // However there is one important case where this adjustment is not - // necessary, namely when distanceLimit < maxError, This is because - // maxError only affects the algorithm once at least maxEdges edges - // have been found that satisfy the given distance limit. At that point, - // maxError is subtracted from distanceLimit in order to ensure that - // any further matches are closer by at least that amount. But when - // distanceLimit < maxError, this reduces the distance limit to 0, - // i.e. all remaining candidate cells and edges can safely be discarded. - // (This is how IsDistanceLess() and friends are implemented.) - targetUsesMaxError := opts.maxError != target.distance().zero().chordAngle() && - e.target.setMaxError(opts.maxError) - - // Note that we can't compare maxError and distanceLimit directly - // because one is a Delta and one is a Distance. Instead we subtract them. - e.useConservativeCellDistance = targetUsesMaxError && - (e.distanceLimit == target.distance().infinity() || - target.distance().zero().less(e.distanceLimit.sub(target.distance().fromChordAngle(opts.maxError)))) - - // Use the brute force algorithm if the index is small enough. To avoid - // spending too much time counting edges when there are many shapes, we stop - // counting once there are too many edges. We may need to recount the edges - // if we later see a target with a larger brute force edge threshold. - minOptimizedEdges := e.target.maxBruteForceIndexSize() + 1 - if minOptimizedEdges > e.indexNumEdgesLimit && e.indexNumEdges >= e.indexNumEdgesLimit { - e.indexNumEdges = e.index.NumEdgesUpTo(minOptimizedEdges) - e.indexNumEdgesLimit = minOptimizedEdges - } - - if opts.useBruteForce || e.indexNumEdges < minOptimizedEdges { - // The brute force algorithm already considers each edge exactly once. - e.avoidDuplicates = false - e.findEdgesBruteForce() - } else { - // If the target takes advantage of maxError then we need to avoid - // duplicate edges explicitly. (Otherwise it happens automatically.) - e.avoidDuplicates = targetUsesMaxError && opts.maxResults > 1 - e.findEdgesOptimized() - } -} - -func (e *EdgeQuery) addResult(r EdgeQueryResult) { - e.results = append(e.results, r) - if e.opts.maxResults == 1 { - // Optimization for the common case where only the closest edge is wanted. - e.distanceLimit = r.distance.sub(e.target.distance().fromChordAngle(e.opts.maxError)) - } - // TODO(roberts): Add the other if/else cases when a different data structure - // is used for the results. -} - -func (e *EdgeQuery) maybeAddResult(shape Shape, edgeID int32) { - if _, ok := e.testedEdges[ShapeEdgeID{e.index.idForShape(shape), edgeID}]; e.avoidDuplicates && !ok { - return - } - edge := shape.Edge(int(edgeID)) - dist := e.distanceLimit - - if dist, ok := e.target.updateDistanceToEdge(edge, dist); ok { - e.addResult(EdgeQueryResult{dist, e.index.idForShape(shape), edgeID}) - } -} - -func (e *EdgeQuery) findEdgesBruteForce() { - // Range over all shapes in the index. Does order matter here? if so - // switch to for i = 0 .. n? - for _, shape := range e.index.shapes { - // TODO(roberts): can this happen if we are only ranging over current entries? - if shape == nil { - continue - } - for edgeID := int32(0); edgeID < int32(shape.NumEdges()); edgeID++ { - e.maybeAddResult(shape, edgeID) - } - } -} - -func (e *EdgeQuery) findEdgesOptimized() { - e.initQueue() - // Repeatedly find the closest Cell to "target" and either split it into - // its four children or process all of its edges. - for e.queue.size() > 0 { - // We need to copy the top entry before removing it, and we need to - // remove it before adding any new entries to the queue. - entry := e.queue.pop() - - if !entry.distance.less(e.distanceLimit) { - e.queue.reset() // Clear any remaining entries. - break - } - // If this is already known to be an index cell, just process it. - if entry.indexCell != nil { - e.processEdges(entry) - continue - } - // Otherwise split the cell into its four children. Before adding a - // child back to the queue, we first check whether it is empty. We do - // this in two seek operations rather than four by seeking to the key - // between children 0 and 1 and to the key between children 2 and 3. - id := entry.id - ch := id.Children() - e.iter.seek(ch[1].RangeMin()) - - if !e.iter.Done() && e.iter.CellID() <= ch[1].RangeMax() { - e.processOrEnqueueCell(ch[1]) - } - if e.iter.Prev() && e.iter.CellID() >= id.RangeMin() { - e.processOrEnqueueCell(ch[0]) - } - - e.iter.seek(ch[3].RangeMin()) - if !e.iter.Done() && e.iter.CellID() <= id.RangeMax() { - e.processOrEnqueueCell(ch[3]) - } - if e.iter.Prev() && e.iter.CellID() >= ch[2].RangeMin() { - e.processOrEnqueueCell(ch[2]) - } - } -} - -func (e *EdgeQuery) processOrEnqueueCell(id CellID) { - if e.iter.CellID() == id { - e.processOrEnqueue(id, e.iter.IndexCell()) - } else { - e.processOrEnqueue(id, nil) - } -} - -func (e *EdgeQuery) initQueue() { - if len(e.indexCovering) == 0 { - // We delay iterator initialization until now to make queries on very - // small indexes a bit faster (i.e., where brute force is used). - e.iter = NewShapeIndexIterator(e.index) - } - - // Optimization: if the user is searching for just the closest edge, and the - // center of the target's bounding cap happens to intersect an index cell, - // then we try to limit the search region to a small disc by first - // processing the edges in that cell. This sets distance_limit_ based on - // the closest edge in that cell, which we can then use to limit the search - // area. This means that the cell containing "target" will be processed - // twice, but in general this is still faster. - // - // TODO(roberts): Even if the cap center is not contained, we could still - // process one or both of the adjacent index cells in CellID order, - // provided that those cells are closer than distanceLimit. - cb := e.target.capBound() - if cb.IsEmpty() { - return // Empty target. - } - - if e.opts.maxResults == 1 && e.iter.LocatePoint(cb.Center()) { - e.processEdges(&queryQueueEntry{ - distance: e.target.distance().zero(), - id: e.iter.CellID(), - indexCell: e.iter.IndexCell(), - }) - // Skip the rest of the algorithm if we found an intersecting edge. - if e.distanceLimit == e.target.distance().zero() { - return - } - } - if len(e.indexCovering) == 0 { - e.initCovering() - } - if e.distanceLimit == e.target.distance().infinity() { - // Start with the precomputed index covering. - for i := range e.indexCovering { - e.processOrEnqueue(e.indexCovering[i], e.indexCells[i]) - } - } else { - // Compute a covering of the search disc and intersect it with the - // precomputed index covering. - coverer := &RegionCoverer{MaxCells: 4, LevelMod: 1, MaxLevel: maxLevel} - - radius := cb.Radius() + e.distanceLimit.chordAngleBound().Angle() - searchCB := CapFromCenterAngle(cb.Center(), radius) - maxDistCover := coverer.FastCovering(searchCB) - e.initialCells = CellUnionFromIntersection(e.indexCovering, maxDistCover) - - // Now we need to clean up the initial cells to ensure that they all - // contain at least one cell of the ShapeIndex. (Some may not intersect - // the index at all, while other may be descendants of an index cell.) - i, j := 0, 0 - for i < len(e.initialCells) { - idI := e.initialCells[i] - // Find the top-level cell that contains this initial cell. - for e.indexCovering[j].RangeMax() < idI { - j++ - } - - idJ := e.indexCovering[j] - if idI == idJ { - // This initial cell is one of the top-level cells. Use the - // precomputed ShapeIndexCell pointer to avoid an index seek. - e.processOrEnqueue(idJ, e.indexCells[j]) - i++ - j++ - } else { - // This initial cell is a proper descendant of a top-level cell. - // Check how it is related to the cells of the ShapeIndex. - r := e.iter.LocateCellID(idI) - if r == Indexed { - // This cell is a descendant of an index cell. - // Enqueue it and skip any other initial cells - // that are also descendants of this cell. - e.processOrEnqueue(e.iter.CellID(), e.iter.IndexCell()) - lastID := e.iter.CellID().RangeMax() - for i < len(e.initialCells) && e.initialCells[i] <= lastID { - i++ - } - } else { - // Enqueue the cell only if it contains at least one index cell. - if r == Subdivided { - e.processOrEnqueue(idI, nil) - } - i++ - } - } - } - } -} - -func (e *EdgeQuery) initCovering() { - // Find the range of Cells spanned by the index and choose a level such - // that the entire index can be covered with just a few cells. These are - // the "top-level" cells. There are two cases: - // - // - If the index spans more than one face, then there is one top-level cell - // per spanned face, just big enough to cover the index cells on that face. - // - // - If the index spans only one face, then we find the smallest cell "C" - // that covers the index cells on that face (just like the case above). - // Then for each of the 4 children of "C", if the child contains any index - // cells then we create a top-level cell that is big enough to just fit - // those index cells (i.e., shrinking the child as much as possible to fit - // its contents). This essentially replicates what would happen if we - // started with "C" as the top-level cell, since "C" would immediately be - // split, except that we take the time to prune the children further since - // this will save work on every subsequent query. - e.indexCovering = make([]CellID, 0, 6) - - // TODO(roberts): Use a single iterator below and save position - // information using pair {CellID, ShapeIndexCell}. - next := NewShapeIndexIterator(e.index, IteratorBegin) - last := NewShapeIndexIterator(e.index, IteratorEnd) - last.Prev() - if next.CellID() != last.CellID() { - // The index has at least two cells. Choose a level such that the entire - // index can be spanned with at most 6 cells (if the index spans multiple - // faces) or 4 cells (it the index spans a single face). - level, ok := next.CellID().CommonAncestorLevel(last.CellID()) - if !ok { - level = 0 - } else { - level++ - } - - // Visit each potential top-level cell except the last (handled below). - lastID := last.CellID().Parent(level) - for id := next.CellID().Parent(level); id != lastID; id = id.Next() { - // Skip any top-level cells that don't contain any index cells. - if id.RangeMax() < next.CellID() { - continue - } - - // Find the range of index cells contained by this top-level cell and - // then shrink the cell if necessary so that it just covers them. - cellFirst := next.clone() - next.seek(id.RangeMax().Next()) - cellLast := next.clone() - cellLast.Prev() - e.addInitialRange(cellFirst, cellLast) - break - } - - } - e.addInitialRange(next, last) -} - -// addInitialRange adds an entry to the indexCovering and indexCells that covers the given -// inclusive range of cells. -// -// This requires that first and last cells have a common ancestor. -func (e *EdgeQuery) addInitialRange(first, last *ShapeIndexIterator) { - if first.CellID() == last.CellID() { - // The range consists of a single index cell. - e.indexCovering = append(e.indexCovering, first.CellID()) - e.indexCells = append(e.indexCells, first.IndexCell()) - } else { - // Add the lowest common ancestor of the given range. - level, _ := first.CellID().CommonAncestorLevel(last.CellID()) - e.indexCovering = append(e.indexCovering, first.CellID().Parent(level)) - e.indexCells = append(e.indexCells, nil) - } -} - -// processEdges processes all the edges of the given index cell. -func (e *EdgeQuery) processEdges(entry *queryQueueEntry) { - for _, clipped := range entry.indexCell.shapes { - shape := e.index.Shape(clipped.shapeID) - for j := 0; j < clipped.numEdges(); j++ { - e.maybeAddResult(shape, int32(clipped.edges[j])) - } - } -} - -// processOrEnqueue the given cell id and indexCell. -func (e *EdgeQuery) processOrEnqueue(id CellID, indexCell *ShapeIndexCell) { - if indexCell != nil { - // If this index cell has only a few edges, then it is faster to check - // them directly rather than computing the minimum distance to the Cell - // and inserting it into the queue. - const minEdgesToEnqueue = 10 - numEdges := indexCell.numEdges() - if numEdges == 0 { - return - } - if numEdges < minEdgesToEnqueue { - // Set "distance" to zero to avoid the expense of computing it. - e.processEdges(&queryQueueEntry{ - distance: e.target.distance().zero(), - id: id, - indexCell: indexCell, - }) - return - } - } - - // Otherwise compute the minimum distance to any point in the cell and add - // it to the priority queue. - cell := CellFromCellID(id) - dist := e.distanceLimit - var ok bool - if dist, ok = e.target.updateDistanceToCell(cell, dist); !ok { - return - } - if e.useConservativeCellDistance { - // Ensure that "distance" is a lower bound on the true distance to the cell. - dist = dist.sub(e.target.distance().fromChordAngle(e.opts.maxError)) - } - - e.queue.push(&queryQueueEntry{ - distance: dist, - id: id, - indexCell: indexCell, - }) -} - -// TODO(roberts): Remaining pieces -// GetEdge -// Project diff --git a/vendor/github.com/golang/geo/s2/edge_tessellator.go b/vendor/github.com/golang/geo/s2/edge_tessellator.go deleted file mode 100644 index 1d5805c26..000000000 --- a/vendor/github.com/golang/geo/s2/edge_tessellator.go +++ /dev/null @@ -1,291 +0,0 @@ -// Copyright 2018 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package s2 - -import ( - "github.com/golang/geo/r2" - "github.com/golang/geo/s1" -) - -// Tessellation is implemented by subdividing the edge until the estimated -// maximum error is below the given tolerance. Estimating error is a hard -// problem, especially when the only methods available are point evaluation of -// the projection and its inverse. (These are the only methods that -// Projection provides, which makes it easier and less error-prone to -// implement new projections.) -// -// One technique that significantly increases robustness is to treat the -// geodesic and projected edges as parametric curves rather than geometric ones. -// Given a spherical edge AB and a projection p:S2->R2, let f(t) be the -// normalized arc length parametrization of AB and let g(t) be the normalized -// arc length parameterization of the projected edge p(A)p(B). (In other words, -// f(0)=A, f(1)=B, g(0)=p(A), g(1)=p(B).) We now define the geometric error as -// the maximum distance from the point p^-1(g(t)) to the geodesic edge AB for -// any t in [0,1], where p^-1 denotes the inverse projection. In other words, -// the geometric error is the maximum distance from any point on the projected -// edge (mapped back onto the sphere) to the geodesic edge AB. On the other -// hand we define the parametric error as the maximum distance between the -// points f(t) and p^-1(g(t)) for any t in [0,1], i.e. the maximum distance -// (measured on the sphere) between the geodesic and projected points at the -// same interpolation fraction t. -// -// The easiest way to estimate the parametric error is to simply evaluate both -// edges at their midpoints and measure the distance between them (the "midpoint -// method"). This is very fast and works quite well for most edges, however it -// has one major drawback: it doesn't handle points of inflection (i.e., points -// where the curvature changes sign). For example, edges in the Mercator and -// Plate Carree projections always curve towards the equator relative to the -// corresponding geodesic edge, so in these projections there is a point of -// inflection whenever the projected edge crosses the equator. The worst case -// occurs when the edge endpoints have different longitudes but the same -// absolute latitude, since in that case the error is non-zero but the edges -// have exactly the same midpoint (on the equator). -// -// One solution to this problem is to split the input edges at all inflection -// points (i.e., along the equator in the case of the Mercator and Plate Carree -// projections). However for general projections these inflection points can -// occur anywhere on the sphere (e.g., consider the Transverse Mercator -// projection). This could be addressed by adding methods to the S2Projection -// interface to split edges at inflection points but this would make it harder -// and more error-prone to implement new projections. -// -// Another problem with this approach is that the midpoint method sometimes -// underestimates the true error even when edges do not cross the equator. -// For the Plate Carree and Mercator projections, the midpoint method can -// underestimate the error by up to 3%. -// -// Both of these problems can be solved as follows. We assume that the error -// can be modeled as a convex combination of two worst-case functions, one -// where the error is maximized at the edge midpoint and another where the -// error is *minimized* (i.e., zero) at the edge midpoint. For example, we -// could choose these functions as: -// -// E1(x) = 1 - x^2 -// E2(x) = x * (1 - x^2) -// -// where for convenience we use an interpolation parameter "x" in the range -// [-1, 1] rather than the original "t" in the range [0, 1]. Note that both -// error functions must have roots at x = {-1, 1} since the error must be zero -// at the edge endpoints. E1 is simply a parabola whose maximum value is 1 -// attained at x = 0, while E2 is a cubic with an additional root at x = 0, -// and whose maximum value is 2 * sqrt(3) / 9 attained at x = 1 / sqrt(3). -// -// Next, it is convenient to scale these functions so that the both have a -// maximum value of 1. E1 already satisfies this requirement, and we simply -// redefine E2 as -// -// E2(x) = x * (1 - x^2) / (2 * sqrt(3) / 9) -// -// Now define x0 to be the point where these two functions intersect, i.e. the -// point in the range (-1, 1) where E1(x0) = E2(x0). This value has the very -// convenient property that if we evaluate the actual error E(x0), then the -// maximum error on the entire interval [-1, 1] is bounded by -// -// E(x) <= E(x0) / E1(x0) -// -// since whether the error is modeled using E1 or E2, the resulting function -// has the same maximum value (namely E(x0) / E1(x0)). If it is modeled as -// some other convex combination of E1 and E2, the maximum value can only -// decrease. -// -// Finally, since E2 is not symmetric about the y-axis, we must also allow for -// the possibility that the error is a convex combination of E1 and -E2. This -// can be handled by evaluating the error at E(-x0) as well, and then -// computing the final error bound as -// -// E(x) <= max(E(x0), E(-x0)) / E1(x0) . -// -// Effectively, this method is simply evaluating the error at two points about -// 1/3 and 2/3 of the way along the edges, and then scaling the maximum of -// these two errors by a constant factor. Intuitively, the reason this works -// is that if the two edges cross somewhere in the interior, then at least one -// of these points will be far from the crossing. -// -// The actual algorithm implemented below has some additional refinements. -// First, edges longer than 90 degrees are always subdivided; this avoids -// various unusual situations that can happen with very long edges, and there -// is really no reason to avoid adding vertices to edges that are so long. -// -// Second, the error function E1 above needs to be modified to take into -// account spherical distortions. (It turns out that spherical distortions are -// beneficial in the case of E2, i.e. they only make its error estimates -// slightly more conservative.) To do this, we model E1 as the maximum error -// in a Plate Carree edge of length 90 degrees or less. This turns out to be -// an edge from 45:-90 to 45:90 (in lat:lng format). The corresponding error -// as a function of "x" in the range [-1, 1] can be computed as the distance -// between the Plate Caree edge point (45, 90 * x) and the geodesic -// edge point (90 - 45 * abs(x), 90 * sgn(x)). Using the Haversine formula, -// the corresponding function E1 (normalized to have a maximum value of 1) is: -// -// E1(x) = -// asin(sqrt(sin(Pi / 8 * (1 - x)) ^ 2 + -// sin(Pi / 4 * (1 - x)) ^ 2 * cos(Pi / 4) * sin(Pi / 4 * x))) / -// asin(sqrt((1 - 1 / sqrt(2)) / 2)) -// -// Note that this function does not need to be evaluated at runtime, it -// simply affects the calculation of the value x0 where E1(x0) = E2(x0) -// and the corresponding scaling factor C = 1 / E1(x0). -// -// ------------------------------------------------------------------ -// -// In the case of the Mercator and Plate Carree projections this strategy -// produces a conservative upper bound (verified using 10 million random -// edges). Furthermore the bound is nearly tight; the scaling constant is -// C = 1.19289, whereas the maximum observed value was 1.19254. -// -// Compared to the simpler midpoint evaluation method, this strategy requires -// more function evaluations (currently twice as many, but with a smarter -// tessellation algorithm it will only be 50% more). It also results in a -// small amount of additional tessellation (about 1.5%) compared to the -// midpoint method, but this is due almost entirely to the fact that the -// midpoint method does not yield conservative error estimates. -// -// For random edges with a tolerance of 1 meter, the expected amount of -// overtessellation is as follows: -// -// Midpoint Method Cubic Method -// Plate Carree 1.8% 3.0% -// Mercator 15.8% 17.4% - -const ( - // tessellationInterpolationFraction is the fraction at which the two edges - // are evaluated in order to measure the error between them. (Edges are - // evaluated at two points measured this fraction from either end.) - tessellationInterpolationFraction = 0.31215691082248312 - tessellationScaleFactor = 0.83829992569888509 - - // minTessellationTolerance is the minimum supported tolerance (which - // corresponds to a distance less than 1 micrometer on the Earth's - // surface, but is still much larger than the expected projection and - // interpolation errors). - minTessellationTolerance s1.Angle = 1e-13 -) - -// EdgeTessellator converts an edge in a given projection (e.g., Mercator) into -// a chain of spherical geodesic edges such that the maximum distance between -// the original edge and the geodesic edge chain is at most the requested -// tolerance. Similarly, it can convert a spherical geodesic edge into a chain -// of edges in a given 2D projection such that the maximum distance between the -// geodesic edge and the chain of projected edges is at most the requested tolerance. -// -// Method | Input | Output -// ------------|------------------------|----------------------- -// Projected | S2 geodesics | Planar projected edges -// Unprojected | Planar projected edges | S2 geodesics -type EdgeTessellator struct { - projection Projection - - // The given tolerance scaled by a constant fraction so that it can be - // compared against the result returned by estimateMaxError. - scaledTolerance s1.ChordAngle -} - -// NewEdgeTessellator creates a new edge tessellator for the given projection and tolerance. -func NewEdgeTessellator(p Projection, tolerance s1.Angle) *EdgeTessellator { - return &EdgeTessellator{ - projection: p, - scaledTolerance: s1.ChordAngleFromAngle(maxAngle(tolerance, minTessellationTolerance)), - } -} - -// AppendProjected converts the spherical geodesic edge AB to a chain of planar edges -// in the given projection and returns the corresponding vertices. -// -// If the given projection has one or more coordinate axes that wrap, then -// every vertex's coordinates will be as close as possible to the previous -// vertex's coordinates. Note that this may yield vertices whose -// coordinates are outside the usual range. For example, tessellating the -// edge (0:170, 0:-170) (in lat:lng notation) yields (0:170, 0:190). -func (e *EdgeTessellator) AppendProjected(a, b Point, vertices []r2.Point) []r2.Point { - pa := e.projection.Project(a) - if len(vertices) == 0 { - vertices = []r2.Point{pa} - } else { - pa = e.projection.WrapDestination(vertices[len(vertices)-1], pa) - } - - pb := e.projection.Project(b) - return e.appendProjected(pa, a, pb, b, vertices) -} - -// appendProjected splits a geodesic edge AB as necessary and returns the -// projected vertices appended to the given vertices. -// -// The maximum recursion depth is (math.Pi / minTessellationTolerance) < 45 -func (e *EdgeTessellator) appendProjected(pa r2.Point, a Point, pbIn r2.Point, b Point, vertices []r2.Point) []r2.Point { - pb := e.projection.WrapDestination(pa, pbIn) - if e.estimateMaxError(pa, a, pb, b) <= e.scaledTolerance { - return append(vertices, pb) - } - - mid := Point{a.Add(b.Vector).Normalize()} - pmid := e.projection.WrapDestination(pa, e.projection.Project(mid)) - vertices = e.appendProjected(pa, a, pmid, mid, vertices) - return e.appendProjected(pmid, mid, pb, b, vertices) -} - -// AppendUnprojected converts the planar edge AB in the given projection to a chain of -// spherical geodesic edges and returns the vertices. -// -// Note that to construct a Loop, you must eliminate the duplicate first and last -// vertex. Note also that if the given projection involves coordinate wrapping -// (e.g. across the 180 degree meridian) then the first and last vertices may not -// be exactly the same. -func (e *EdgeTessellator) AppendUnprojected(pa, pb r2.Point, vertices []Point) []Point { - a := e.projection.Unproject(pa) - b := e.projection.Unproject(pb) - - if len(vertices) == 0 { - vertices = []Point{a} - } - - // Note that coordinate wrapping can create a small amount of error. For - // example in the edge chain "0:-175, 0:179, 0:-177", the first edge is - // transformed into "0:-175, 0:-181" while the second is transformed into - // "0:179, 0:183". The two coordinate pairs for the middle vertex - // ("0:-181" and "0:179") may not yield exactly the same S2Point. - return e.appendUnprojected(pa, a, pb, b, vertices) -} - -// appendUnprojected interpolates a projected edge and appends the corresponding -// points on the sphere. -func (e *EdgeTessellator) appendUnprojected(pa r2.Point, a Point, pbIn r2.Point, b Point, vertices []Point) []Point { - pb := e.projection.WrapDestination(pa, pbIn) - if e.estimateMaxError(pa, a, pb, b) <= e.scaledTolerance { - return append(vertices, b) - } - - pmid := e.projection.Interpolate(0.5, pa, pb) - mid := e.projection.Unproject(pmid) - - vertices = e.appendUnprojected(pa, a, pmid, mid, vertices) - return e.appendUnprojected(pmid, mid, pb, b, vertices) -} - -func (e *EdgeTessellator) estimateMaxError(pa r2.Point, a Point, pb r2.Point, b Point) s1.ChordAngle { - // See the algorithm description at the top of this file. - // We always tessellate edges longer than 90 degrees on the sphere, since the - // approximation below is not robust enough to handle such edges. - if a.Dot(b.Vector) < -1e-14 { - return s1.InfChordAngle() - } - t1 := tessellationInterpolationFraction - t2 := 1 - tessellationInterpolationFraction - mid1 := Interpolate(t1, a, b) - mid2 := Interpolate(t2, a, b) - pmid1 := e.projection.Unproject(e.projection.Interpolate(t1, pa, pb)) - pmid2 := e.projection.Unproject(e.projection.Interpolate(t2, pa, pb)) - return maxChordAngle(ChordAngleBetweenPoints(mid1, pmid1), ChordAngleBetweenPoints(mid2, pmid2)) -} diff --git a/vendor/github.com/golang/geo/s2/encode.go b/vendor/github.com/golang/geo/s2/encode.go deleted file mode 100644 index 00d0adc71..000000000 --- a/vendor/github.com/golang/geo/s2/encode.go +++ /dev/null @@ -1,224 +0,0 @@ -// Copyright 2017 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package s2 - -import ( - "encoding/binary" - "io" - "math" -) - -const ( - // encodingVersion is the current version of the encoding - // format that is compatible with C++ and other S2 libraries. - encodingVersion = int8(1) - - // encodingCompressedVersion is the current version of the - // compressed format. - encodingCompressedVersion = int8(4) -) - -// encoder handles the specifics of encoding for S2 types. -type encoder struct { - w io.Writer // the real writer passed to Encode - err error -} - -func (e *encoder) writeUvarint(x uint64) { - if e.err != nil { - return - } - var buf [binary.MaxVarintLen64]byte - n := binary.PutUvarint(buf[:], x) - _, e.err = e.w.Write(buf[:n]) -} - -func (e *encoder) writeBool(x bool) { - if e.err != nil { - return - } - var val int8 - if x { - val = 1 - } - e.err = binary.Write(e.w, binary.LittleEndian, val) -} - -func (e *encoder) writeInt8(x int8) { - if e.err != nil { - return - } - e.err = binary.Write(e.w, binary.LittleEndian, x) -} - -func (e *encoder) writeInt16(x int16) { - if e.err != nil { - return - } - e.err = binary.Write(e.w, binary.LittleEndian, x) -} - -func (e *encoder) writeInt32(x int32) { - if e.err != nil { - return - } - e.err = binary.Write(e.w, binary.LittleEndian, x) -} - -func (e *encoder) writeInt64(x int64) { - if e.err != nil { - return - } - e.err = binary.Write(e.w, binary.LittleEndian, x) -} - -func (e *encoder) writeUint8(x uint8) { - if e.err != nil { - return - } - _, e.err = e.w.Write([]byte{x}) -} - -func (e *encoder) writeUint32(x uint32) { - if e.err != nil { - return - } - e.err = binary.Write(e.w, binary.LittleEndian, x) -} - -func (e *encoder) writeUint64(x uint64) { - if e.err != nil { - return - } - e.err = binary.Write(e.w, binary.LittleEndian, x) -} - -func (e *encoder) writeFloat32(x float32) { - if e.err != nil { - return - } - e.err = binary.Write(e.w, binary.LittleEndian, x) -} - -func (e *encoder) writeFloat64(x float64) { - if e.err != nil { - return - } - e.err = binary.Write(e.w, binary.LittleEndian, x) -} - -type byteReader interface { - io.Reader - io.ByteReader -} - -// byteReaderAdapter embellishes an io.Reader with a ReadByte method, -// so that it implements the io.ByteReader interface. -type byteReaderAdapter struct { - io.Reader -} - -func (b byteReaderAdapter) ReadByte() (byte, error) { - buf := []byte{0} - _, err := io.ReadFull(b, buf) - return buf[0], err -} - -func asByteReader(r io.Reader) byteReader { - if br, ok := r.(byteReader); ok { - return br - } - return byteReaderAdapter{r} -} - -type decoder struct { - r byteReader // the real reader passed to Decode - err error - buf []byte -} - -// Get a buffer of size 8, to avoid allocating over and over. -func (d *decoder) buffer() []byte { - if d.buf == nil { - d.buf = make([]byte, 8) - } - return d.buf -} - -func (d *decoder) readBool() (x bool) { - if d.err != nil { - return - } - var val int8 - d.err = binary.Read(d.r, binary.LittleEndian, &val) - return val == 1 -} - -func (d *decoder) readInt8() (x int8) { - if d.err != nil { - return - } - d.err = binary.Read(d.r, binary.LittleEndian, &x) - return -} - -func (d *decoder) readInt64() (x int64) { - if d.err != nil { - return - } - d.err = binary.Read(d.r, binary.LittleEndian, &x) - return -} - -func (d *decoder) readUint8() (x uint8) { - if d.err != nil { - return - } - x, d.err = d.r.ReadByte() - return -} - -func (d *decoder) readUint32() (x uint32) { - if d.err != nil { - return - } - d.err = binary.Read(d.r, binary.LittleEndian, &x) - return -} - -func (d *decoder) readUint64() (x uint64) { - if d.err != nil { - return - } - d.err = binary.Read(d.r, binary.LittleEndian, &x) - return -} - -func (d *decoder) readFloat64() float64 { - if d.err != nil { - return 0 - } - buf := d.buffer() - _, d.err = io.ReadFull(d.r, buf) - return math.Float64frombits(binary.LittleEndian.Uint64(buf)) -} - -func (d *decoder) readUvarint() (x uint64) { - if d.err != nil { - return - } - x, d.err = binary.ReadUvarint(d.r) - return -} diff --git a/vendor/github.com/golang/geo/s2/interleave.go b/vendor/github.com/golang/geo/s2/interleave.go deleted file mode 100644 index 6ac6ef58d..000000000 --- a/vendor/github.com/golang/geo/s2/interleave.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2017 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package s2 - -/* -The lookup table below can convert a sequence of interleaved 8 bits into -non-interleaved 4 bits. The table can convert both odd and even bits at the -same time, and lut[x & 0x55] converts the even bits (bits 0, 2, 4 and 6), -while lut[x & 0xaa] converts the odd bits (bits 1, 3, 5 and 7). - -The lookup table below was generated using the following python code: - - def deinterleave(bits): - if bits == 0: return 0 - if bits < 4: return 1 - return deinterleave(bits / 4) * 2 + deinterleave(bits & 3) - - for i in range(256): print "0x%x," % deinterleave(i), -*/ -var deinterleaveLookup = [256]uint32{ - 0x0, 0x1, 0x1, 0x1, 0x2, 0x3, 0x3, 0x3, - 0x2, 0x3, 0x3, 0x3, 0x2, 0x3, 0x3, 0x3, - 0x4, 0x5, 0x5, 0x5, 0x6, 0x7, 0x7, 0x7, - 0x6, 0x7, 0x7, 0x7, 0x6, 0x7, 0x7, 0x7, - 0x4, 0x5, 0x5, 0x5, 0x6, 0x7, 0x7, 0x7, - 0x6, 0x7, 0x7, 0x7, 0x6, 0x7, 0x7, 0x7, - 0x4, 0x5, 0x5, 0x5, 0x6, 0x7, 0x7, 0x7, - 0x6, 0x7, 0x7, 0x7, 0x6, 0x7, 0x7, 0x7, - - 0x8, 0x9, 0x9, 0x9, 0xa, 0xb, 0xb, 0xb, - 0xa, 0xb, 0xb, 0xb, 0xa, 0xb, 0xb, 0xb, - 0xc, 0xd, 0xd, 0xd, 0xe, 0xf, 0xf, 0xf, - 0xe, 0xf, 0xf, 0xf, 0xe, 0xf, 0xf, 0xf, - 0xc, 0xd, 0xd, 0xd, 0xe, 0xf, 0xf, 0xf, - 0xe, 0xf, 0xf, 0xf, 0xe, 0xf, 0xf, 0xf, - 0xc, 0xd, 0xd, 0xd, 0xe, 0xf, 0xf, 0xf, - 0xe, 0xf, 0xf, 0xf, 0xe, 0xf, 0xf, 0xf, - - 0x8, 0x9, 0x9, 0x9, 0xa, 0xb, 0xb, 0xb, - 0xa, 0xb, 0xb, 0xb, 0xa, 0xb, 0xb, 0xb, - 0xc, 0xd, 0xd, 0xd, 0xe, 0xf, 0xf, 0xf, - 0xe, 0xf, 0xf, 0xf, 0xe, 0xf, 0xf, 0xf, - 0xc, 0xd, 0xd, 0xd, 0xe, 0xf, 0xf, 0xf, - 0xe, 0xf, 0xf, 0xf, 0xe, 0xf, 0xf, 0xf, - 0xc, 0xd, 0xd, 0xd, 0xe, 0xf, 0xf, 0xf, - 0xe, 0xf, 0xf, 0xf, 0xe, 0xf, 0xf, 0xf, - - 0x8, 0x9, 0x9, 0x9, 0xa, 0xb, 0xb, 0xb, - 0xa, 0xb, 0xb, 0xb, 0xa, 0xb, 0xb, 0xb, - 0xc, 0xd, 0xd, 0xd, 0xe, 0xf, 0xf, 0xf, - 0xe, 0xf, 0xf, 0xf, 0xe, 0xf, 0xf, 0xf, - 0xc, 0xd, 0xd, 0xd, 0xe, 0xf, 0xf, 0xf, - 0xe, 0xf, 0xf, 0xf, 0xe, 0xf, 0xf, 0xf, - 0xc, 0xd, 0xd, 0xd, 0xe, 0xf, 0xf, 0xf, - 0xe, 0xf, 0xf, 0xf, 0xe, 0xf, 0xf, 0xf, -} - -// deinterleaveUint32 decodes the interleaved values. -func deinterleaveUint32(code uint64) (uint32, uint32) { - x := (deinterleaveLookup[code&0x55]) | - (deinterleaveLookup[(code>>8)&0x55] << 4) | - (deinterleaveLookup[(code>>16)&0x55] << 8) | - (deinterleaveLookup[(code>>24)&0x55] << 12) | - (deinterleaveLookup[(code>>32)&0x55] << 16) | - (deinterleaveLookup[(code>>40)&0x55] << 20) | - (deinterleaveLookup[(code>>48)&0x55] << 24) | - (deinterleaveLookup[(code>>56)&0x55] << 28) - y := (deinterleaveLookup[code&0xaa]) | - (deinterleaveLookup[(code>>8)&0xaa] << 4) | - (deinterleaveLookup[(code>>16)&0xaa] << 8) | - (deinterleaveLookup[(code>>24)&0xaa] << 12) | - (deinterleaveLookup[(code>>32)&0xaa] << 16) | - (deinterleaveLookup[(code>>40)&0xaa] << 20) | - (deinterleaveLookup[(code>>48)&0xaa] << 24) | - (deinterleaveLookup[(code>>56)&0xaa] << 28) - return x, y -} - -var interleaveLookup = [256]uint64{ - 0x0000, 0x0001, 0x0004, 0x0005, 0x0010, 0x0011, 0x0014, 0x0015, - 0x0040, 0x0041, 0x0044, 0x0045, 0x0050, 0x0051, 0x0054, 0x0055, - 0x0100, 0x0101, 0x0104, 0x0105, 0x0110, 0x0111, 0x0114, 0x0115, - 0x0140, 0x0141, 0x0144, 0x0145, 0x0150, 0x0151, 0x0154, 0x0155, - 0x0400, 0x0401, 0x0404, 0x0405, 0x0410, 0x0411, 0x0414, 0x0415, - 0x0440, 0x0441, 0x0444, 0x0445, 0x0450, 0x0451, 0x0454, 0x0455, - 0x0500, 0x0501, 0x0504, 0x0505, 0x0510, 0x0511, 0x0514, 0x0515, - 0x0540, 0x0541, 0x0544, 0x0545, 0x0550, 0x0551, 0x0554, 0x0555, - - 0x1000, 0x1001, 0x1004, 0x1005, 0x1010, 0x1011, 0x1014, 0x1015, - 0x1040, 0x1041, 0x1044, 0x1045, 0x1050, 0x1051, 0x1054, 0x1055, - 0x1100, 0x1101, 0x1104, 0x1105, 0x1110, 0x1111, 0x1114, 0x1115, - 0x1140, 0x1141, 0x1144, 0x1145, 0x1150, 0x1151, 0x1154, 0x1155, - 0x1400, 0x1401, 0x1404, 0x1405, 0x1410, 0x1411, 0x1414, 0x1415, - 0x1440, 0x1441, 0x1444, 0x1445, 0x1450, 0x1451, 0x1454, 0x1455, - 0x1500, 0x1501, 0x1504, 0x1505, 0x1510, 0x1511, 0x1514, 0x1515, - 0x1540, 0x1541, 0x1544, 0x1545, 0x1550, 0x1551, 0x1554, 0x1555, - - 0x4000, 0x4001, 0x4004, 0x4005, 0x4010, 0x4011, 0x4014, 0x4015, - 0x4040, 0x4041, 0x4044, 0x4045, 0x4050, 0x4051, 0x4054, 0x4055, - 0x4100, 0x4101, 0x4104, 0x4105, 0x4110, 0x4111, 0x4114, 0x4115, - 0x4140, 0x4141, 0x4144, 0x4145, 0x4150, 0x4151, 0x4154, 0x4155, - 0x4400, 0x4401, 0x4404, 0x4405, 0x4410, 0x4411, 0x4414, 0x4415, - 0x4440, 0x4441, 0x4444, 0x4445, 0x4450, 0x4451, 0x4454, 0x4455, - 0x4500, 0x4501, 0x4504, 0x4505, 0x4510, 0x4511, 0x4514, 0x4515, - 0x4540, 0x4541, 0x4544, 0x4545, 0x4550, 0x4551, 0x4554, 0x4555, - - 0x5000, 0x5001, 0x5004, 0x5005, 0x5010, 0x5011, 0x5014, 0x5015, - 0x5040, 0x5041, 0x5044, 0x5045, 0x5050, 0x5051, 0x5054, 0x5055, - 0x5100, 0x5101, 0x5104, 0x5105, 0x5110, 0x5111, 0x5114, 0x5115, - 0x5140, 0x5141, 0x5144, 0x5145, 0x5150, 0x5151, 0x5154, 0x5155, - 0x5400, 0x5401, 0x5404, 0x5405, 0x5410, 0x5411, 0x5414, 0x5415, - 0x5440, 0x5441, 0x5444, 0x5445, 0x5450, 0x5451, 0x5454, 0x5455, - 0x5500, 0x5501, 0x5504, 0x5505, 0x5510, 0x5511, 0x5514, 0x5515, - 0x5540, 0x5541, 0x5544, 0x5545, 0x5550, 0x5551, 0x5554, 0x5555, -} - -// interleaveUint32 interleaves the given arguments into the return value. -// -// The 0-bit in val0 will be the 0-bit in the return value. -// The 0-bit in val1 will be the 1-bit in the return value. -// The 1-bit of val0 will be the 2-bit in the return value, and so on. -func interleaveUint32(x, y uint32) uint64 { - return (interleaveLookup[x&0xff]) | - (interleaveLookup[(x>>8)&0xff] << 16) | - (interleaveLookup[(x>>16)&0xff] << 32) | - (interleaveLookup[x>>24] << 48) | - (interleaveLookup[y&0xff] << 1) | - (interleaveLookup[(y>>8)&0xff] << 17) | - (interleaveLookup[(y>>16)&0xff] << 33) | - (interleaveLookup[y>>24] << 49) -} diff --git a/vendor/github.com/golang/geo/s2/latlng.go b/vendor/github.com/golang/geo/s2/latlng.go deleted file mode 100644 index a750304ab..000000000 --- a/vendor/github.com/golang/geo/s2/latlng.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright 2014 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package s2 - -import ( - "fmt" - "math" - - "github.com/golang/geo/r3" - "github.com/golang/geo/s1" -) - -const ( - northPoleLat = s1.Angle(math.Pi/2) * s1.Radian - southPoleLat = -northPoleLat -) - -// LatLng represents a point on the unit sphere as a pair of angles. -type LatLng struct { - Lat, Lng s1.Angle -} - -// LatLngFromDegrees returns a LatLng for the coordinates given in degrees. -func LatLngFromDegrees(lat, lng float64) LatLng { - return LatLng{s1.Angle(lat) * s1.Degree, s1.Angle(lng) * s1.Degree} -} - -// IsValid returns true iff the LatLng is normalized, with Lat ∈ [-π/2,π/2] and Lng ∈ [-π,π]. -func (ll LatLng) IsValid() bool { - return math.Abs(ll.Lat.Radians()) <= math.Pi/2 && math.Abs(ll.Lng.Radians()) <= math.Pi -} - -// Normalized returns the normalized version of the LatLng, -// with Lat clamped to [-π/2,π/2] and Lng wrapped in [-π,π]. -func (ll LatLng) Normalized() LatLng { - lat := ll.Lat - if lat > northPoleLat { - lat = northPoleLat - } else if lat < southPoleLat { - lat = southPoleLat - } - lng := s1.Angle(math.Remainder(ll.Lng.Radians(), 2*math.Pi)) * s1.Radian - return LatLng{lat, lng} -} - -func (ll LatLng) String() string { return fmt.Sprintf("[%v, %v]", ll.Lat, ll.Lng) } - -// Distance returns the angle between two LatLngs. -func (ll LatLng) Distance(ll2 LatLng) s1.Angle { - // Haversine formula, as used in C++ S2LatLng::GetDistance. - lat1, lat2 := ll.Lat.Radians(), ll2.Lat.Radians() - lng1, lng2 := ll.Lng.Radians(), ll2.Lng.Radians() - dlat := math.Sin(0.5 * (lat2 - lat1)) - dlng := math.Sin(0.5 * (lng2 - lng1)) - x := dlat*dlat + dlng*dlng*math.Cos(lat1)*math.Cos(lat2) - return s1.Angle(2*math.Atan2(math.Sqrt(x), math.Sqrt(math.Max(0, 1-x)))) * s1.Radian -} - -// NOTE(mikeperrow): The C++ implementation publicly exposes latitude/longitude -// functions. Let's see if that's really necessary before exposing the same functionality. - -func latitude(p Point) s1.Angle { - return s1.Angle(math.Atan2(p.Z, math.Sqrt(p.X*p.X+p.Y*p.Y))) * s1.Radian -} - -func longitude(p Point) s1.Angle { - return s1.Angle(math.Atan2(p.Y, p.X)) * s1.Radian -} - -// PointFromLatLng returns an Point for the given LatLng. -// The maximum error in the result is 1.5 * dblEpsilon. (This does not -// include the error of converting degrees, E5, E6, or E7 into radians.) -func PointFromLatLng(ll LatLng) Point { - phi := ll.Lat.Radians() - theta := ll.Lng.Radians() - cosphi := math.Cos(phi) - return Point{r3.Vector{math.Cos(theta) * cosphi, math.Sin(theta) * cosphi, math.Sin(phi)}} -} - -// LatLngFromPoint returns an LatLng for a given Point. -func LatLngFromPoint(p Point) LatLng { - return LatLng{latitude(p), longitude(p)} -} - -// ApproxEqual reports whether the latitude and longitude of the two LatLngs -// are the same up to a small tolerance. -func (ll LatLng) ApproxEqual(other LatLng) bool { - return ll.Lat.ApproxEqual(other.Lat) && ll.Lng.ApproxEqual(other.Lng) -} diff --git a/vendor/github.com/golang/geo/s2/lexicon.go b/vendor/github.com/golang/geo/s2/lexicon.go deleted file mode 100644 index 41cbffdc2..000000000 --- a/vendor/github.com/golang/geo/s2/lexicon.go +++ /dev/null @@ -1,175 +0,0 @@ -// Copyright 2020 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package s2 - -import ( - "encoding/binary" - "hash/adler32" - "math" - "sort" -) - -// TODO(roberts): If any of these are worth making public, change the -// method signatures and type names. - -// emptySetID represents the last ID that will ever be generated. -// (Non-negative IDs are reserved for singleton sets.) -var emptySetID = int32(math.MinInt32) - -// idSetLexicon compactly represents a set of non-negative -// integers such as array indices ("ID sets"). It is especially suitable when -// either (1) there are many duplicate sets, or (2) there are many singleton -// or empty sets. See also sequenceLexicon. -// -// Each distinct ID set is mapped to a 32-bit integer. Empty and singleton -// sets take up no additional space; the set itself is represented -// by the unique ID assigned to the set. Duplicate sets are automatically -// eliminated. Note also that ID sets are referred to using 32-bit integers -// rather than pointers. -type idSetLexicon struct { - idSets *sequenceLexicon -} - -func newIDSetLexicon() *idSetLexicon { - return &idSetLexicon{ - idSets: newSequenceLexicon(), - } -} - -// add adds the given set of integers to the lexicon if it is not already -// present, and return the unique ID for this set. The values are automatically -// sorted and duplicates are removed. -// -// The primary difference between this and sequenceLexicon are: -// 1. Empty and singleton sets are represented implicitly; they use no space. -// 2. Sets are represented rather than sequences; the ordering of values is -// not important and duplicates are removed. -// 3. The values must be 32-bit non-negative integers only. -func (l *idSetLexicon) add(ids ...int32) int32 { - // Empty sets have a special ID chosen not to conflict with other IDs. - if len(ids) == 0 { - return emptySetID - } - - // Singleton sets are represented by their element. - if len(ids) == 1 { - return ids[0] - } - - // Canonicalize the set by sorting and removing duplicates. - // - // Creates a new slice in order to not alter the supplied values. - set := uniqueInt32s(ids) - - // Non-singleton sets are represented by the bitwise complement of the ID - // returned by the sequenceLexicon - return ^l.idSets.add(set) -} - -// idSet returns the set of integers corresponding to an ID returned by add. -func (l *idSetLexicon) idSet(setID int32) []int32 { - if setID >= 0 { - return []int32{setID} - } - if setID == emptySetID { - return []int32{} - } - - return l.idSets.sequence(^setID) -} - -func (l *idSetLexicon) clear() { - l.idSets.clear() -} - -// sequenceLexicon compactly represents a sequence of values (e.g., tuples). -// It automatically eliminates duplicates slices, and maps the remaining -// sequences to sequentially increasing integer IDs. See also idSetLexicon. -// -// Each distinct sequence is mapped to a 32-bit integer. -type sequenceLexicon struct { - values []int32 - begins []uint32 - - // idSet is a mapping of a sequence hash to sequence index in the lexicon. - idSet map[uint32]int32 -} - -func newSequenceLexicon() *sequenceLexicon { - return &sequenceLexicon{ - begins: []uint32{0}, - idSet: make(map[uint32]int32), - } -} - -// clears all data from the lexicon. -func (l *sequenceLexicon) clear() { - l.values = nil - l.begins = []uint32{0} - l.idSet = make(map[uint32]int32) -} - -// add adds the given value to the lexicon if it is not already present, and -// returns its ID. IDs are assigned sequentially starting from zero. -func (l *sequenceLexicon) add(ids []int32) int32 { - if id, ok := l.idSet[hashSet(ids)]; ok { - return id - } - for _, v := range ids { - l.values = append(l.values, v) - } - l.begins = append(l.begins, uint32(len(l.values))) - - id := int32(len(l.begins)) - 2 - l.idSet[hashSet(ids)] = id - - return id -} - -// sequence returns the original sequence of values for the given ID. -func (l *sequenceLexicon) sequence(id int32) []int32 { - return l.values[l.begins[id]:l.begins[id+1]] -} - -// size reports the number of value sequences in the lexicon. -func (l *sequenceLexicon) size() int { - // Subtract one because the list of begins starts out with the first element set to 0. - return len(l.begins) - 1 -} - -// hash returns a hash of this sequence of int32s. -func hashSet(s []int32) uint32 { - // TODO(roberts): We just need a way to nicely hash all the values down to - // a 32-bit value. To ensure no unnecessary dependencies we use the core - // library types available to do this. Is there a better option? - a := adler32.New() - binary.Write(a, binary.LittleEndian, s) - return a.Sum32() -} - -// uniqueInt32s returns the sorted and uniqued set of int32s from the input. -func uniqueInt32s(in []int32) []int32 { - var vals []int32 - m := make(map[int32]bool) - for _, i := range in { - if m[i] { - continue - } - m[i] = true - vals = append(vals, i) - } - sort.Slice(vals, func(i, j int) bool { return vals[i] < vals[j] }) - return vals -} diff --git a/vendor/github.com/golang/geo/s2/loop.go b/vendor/github.com/golang/geo/s2/loop.go deleted file mode 100644 index bfb55ec1d..000000000 --- a/vendor/github.com/golang/geo/s2/loop.go +++ /dev/null @@ -1,1833 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package s2 - -import ( - "fmt" - "io" - "math" - - "github.com/golang/geo/r1" - "github.com/golang/geo/r3" - "github.com/golang/geo/s1" -) - -// Loop represents a simple spherical polygon. It consists of a sequence -// of vertices where the first vertex is implicitly connected to the -// last. All loops are defined to have a CCW orientation, i.e. the interior of -// the loop is on the left side of the edges. This implies that a clockwise -// loop enclosing a small area is interpreted to be a CCW loop enclosing a -// very large area. -// -// Loops are not allowed to have any duplicate vertices (whether adjacent or -// not). Non-adjacent edges are not allowed to intersect, and furthermore edges -// of length 180 degrees are not allowed (i.e., adjacent vertices cannot be -// antipodal). Loops must have at least 3 vertices (except for the "empty" and -// "full" loops discussed below). -// -// There are two special loops: the "empty" loop contains no points and the -// "full" loop contains all points. These loops do not have any edges, but to -// preserve the invariant that every loop can be represented as a vertex -// chain, they are defined as having exactly one vertex each (see EmptyLoop -// and FullLoop). -type Loop struct { - vertices []Point - - // originInside keeps a precomputed value whether this loop contains the origin - // versus computing from the set of vertices every time. - originInside bool - - // depth is the nesting depth of this Loop if it is contained by a Polygon - // or other shape and is used to determine if this loop represents a hole - // or a filled in portion. - depth int - - // bound is a conservative bound on all points contained by this loop. - // If l.ContainsPoint(P), then l.bound.ContainsPoint(P). - bound Rect - - // Since bound is not exact, it is possible that a loop A contains - // another loop B whose bounds are slightly larger. subregionBound - // has been expanded sufficiently to account for this error, i.e. - // if A.Contains(B), then A.subregionBound.Contains(B.bound). - subregionBound Rect - - // index is the spatial index for this Loop. - index *ShapeIndex -} - -// LoopFromPoints constructs a loop from the given points. -func LoopFromPoints(pts []Point) *Loop { - l := &Loop{ - vertices: pts, - index: NewShapeIndex(), - } - - l.initOriginAndBound() - return l -} - -// LoopFromCell constructs a loop corresponding to the given cell. -// -// Note that the loop and cell *do not* contain exactly the same set of -// points, because Loop and Cell have slightly different definitions of -// point containment. For example, a Cell vertex is contained by all -// four neighboring Cells, but it is contained by exactly one of four -// Loops constructed from those cells. As another example, the cell -// coverings of cell and LoopFromCell(cell) will be different, because the -// loop contains points on its boundary that actually belong to other cells -// (i.e., the covering will include a layer of neighboring cells). -func LoopFromCell(c Cell) *Loop { - l := &Loop{ - vertices: []Point{ - c.Vertex(0), - c.Vertex(1), - c.Vertex(2), - c.Vertex(3), - }, - index: NewShapeIndex(), - } - - l.initOriginAndBound() - return l -} - -// These two points are used for the special Empty and Full loops. -var ( - emptyLoopPoint = Point{r3.Vector{X: 0, Y: 0, Z: 1}} - fullLoopPoint = Point{r3.Vector{X: 0, Y: 0, Z: -1}} -) - -// EmptyLoop returns a special "empty" loop. -func EmptyLoop() *Loop { - return LoopFromPoints([]Point{emptyLoopPoint}) -} - -// FullLoop returns a special "full" loop. -func FullLoop() *Loop { - return LoopFromPoints([]Point{fullLoopPoint}) -} - -// initOriginAndBound sets the origin containment for the given point and then calls -// the initialization for the bounds objects and the internal index. -func (l *Loop) initOriginAndBound() { - if len(l.vertices) < 3 { - // Check for the special "empty" and "full" loops (which have one vertex). - if !l.isEmptyOrFull() { - l.originInside = false - return - } - - // This is the special empty or full loop, so the origin depends on if - // the vertex is in the southern hemisphere or not. - l.originInside = l.vertices[0].Z < 0 - } else { - // Point containment testing is done by counting edge crossings starting - // at a fixed point on the sphere (OriginPoint). We need to know whether - // the reference point (OriginPoint) is inside or outside the loop before - // we can construct the ShapeIndex. We do this by first guessing that - // it is outside, and then seeing whether we get the correct containment - // result for vertex 1. If the result is incorrect, the origin must be - // inside the loop. - // - // A loop with consecutive vertices A,B,C contains vertex B if and only if - // the fixed vector R = B.Ortho is contained by the wedge ABC. The - // wedge is closed at A and open at C, i.e. the point B is inside the loop - // if A = R but not if C = R. This convention is required for compatibility - // with VertexCrossing. (Note that we can't use OriginPoint - // as the fixed vector because of the possibility that B == OriginPoint.) - l.originInside = false - v1Inside := OrderedCCW(Point{l.vertices[1].Ortho()}, l.vertices[0], l.vertices[2], l.vertices[1]) - if v1Inside != l.ContainsPoint(l.vertices[1]) { - l.originInside = true - } - } - - // We *must* call initBound before initializing the index, because - // initBound calls ContainsPoint which does a bounds check before using - // the index. - l.initBound() - - // Create a new index and add us to it. - l.index = NewShapeIndex() - l.index.Add(l) -} - -// initBound sets up the approximate bounding Rects for this loop. -func (l *Loop) initBound() { - if len(l.vertices) == 0 { - *l = *EmptyLoop() - return - } - // Check for the special "empty" and "full" loops. - if l.isEmptyOrFull() { - if l.IsEmpty() { - l.bound = EmptyRect() - } else { - l.bound = FullRect() - } - l.subregionBound = l.bound - return - } - - // The bounding rectangle of a loop is not necessarily the same as the - // bounding rectangle of its vertices. First, the maximal latitude may be - // attained along the interior of an edge. Second, the loop may wrap - // entirely around the sphere (e.g. a loop that defines two revolutions of a - // candy-cane stripe). Third, the loop may include one or both poles. - // Note that a small clockwise loop near the equator contains both poles. - bounder := NewRectBounder() - for i := 0; i <= len(l.vertices); i++ { // add vertex 0 twice - bounder.AddPoint(l.Vertex(i)) - } - b := bounder.RectBound() - - if l.ContainsPoint(Point{r3.Vector{0, 0, 1}}) { - b = Rect{r1.Interval{b.Lat.Lo, math.Pi / 2}, s1.FullInterval()} - } - // If a loop contains the south pole, then either it wraps entirely - // around the sphere (full longitude range), or it also contains the - // north pole in which case b.Lng.IsFull() due to the test above. - // Either way, we only need to do the south pole containment test if - // b.Lng.IsFull(). - if b.Lng.IsFull() && l.ContainsPoint(Point{r3.Vector{0, 0, -1}}) { - b.Lat.Lo = -math.Pi / 2 - } - l.bound = b - l.subregionBound = ExpandForSubregions(l.bound) -} - -// Validate checks whether this is a valid loop. -func (l *Loop) Validate() error { - if err := l.findValidationErrorNoIndex(); err != nil { - return err - } - - // Check for intersections between non-adjacent edges (including at vertices) - // TODO(roberts): Once shapeutil gets findAnyCrossing uncomment this. - // return findAnyCrossing(l.index) - - return nil -} - -// findValidationErrorNoIndex reports whether this is not a valid loop, but -// skips checks that would require a ShapeIndex to be built for the loop. This -// is primarily used by Polygon to do validation so it doesn't trigger the -// creation of unneeded ShapeIndices. -func (l *Loop) findValidationErrorNoIndex() error { - // All vertices must be unit length. - for i, v := range l.vertices { - if !v.IsUnit() { - return fmt.Errorf("vertex %d is not unit length", i) - } - } - - // Loops must have at least 3 vertices (except for empty and full). - if len(l.vertices) < 3 { - if l.isEmptyOrFull() { - return nil // Skip remaining tests. - } - return fmt.Errorf("non-empty, non-full loops must have at least 3 vertices") - } - - // Loops are not allowed to have any duplicate vertices or edge crossings. - // We split this check into two parts. First we check that no edge is - // degenerate (identical endpoints). Then we check that there are no - // intersections between non-adjacent edges (including at vertices). The - // second check needs the ShapeIndex, so it does not fall within the scope - // of this method. - for i, v := range l.vertices { - if v == l.Vertex(i+1) { - return fmt.Errorf("edge %d is degenerate (duplicate vertex)", i) - } - - // Antipodal vertices are not allowed. - if other := (Point{l.Vertex(i + 1).Mul(-1)}); v == other { - return fmt.Errorf("vertices %d and %d are antipodal", i, - (i+1)%len(l.vertices)) - } - } - - return nil -} - -// Contains reports whether the region contained by this loop is a superset of the -// region contained by the given other loop. -func (l *Loop) Contains(o *Loop) bool { - // For a loop A to contain the loop B, all of the following must - // be true: - // - // (1) There are no edge crossings between A and B except at vertices. - // - // (2) At every vertex that is shared between A and B, the local edge - // ordering implies that A contains B. - // - // (3) If there are no shared vertices, then A must contain a vertex of B - // and B must not contain a vertex of A. (An arbitrary vertex may be - // chosen in each case.) - // - // The second part of (3) is necessary to detect the case of two loops whose - // union is the entire sphere, i.e. two loops that contains each other's - // boundaries but not each other's interiors. - if !l.subregionBound.Contains(o.bound) { - return false - } - - // Special cases to handle either loop being empty or full. - if l.isEmptyOrFull() || o.isEmptyOrFull() { - return l.IsFull() || o.IsEmpty() - } - - // Check whether there are any edge crossings, and also check the loop - // relationship at any shared vertices. - relation := &containsRelation{} - if hasCrossingRelation(l, o, relation) { - return false - } - - // There are no crossings, and if there are any shared vertices then A - // contains B locally at each shared vertex. - if relation.foundSharedVertex { - return true - } - - // Since there are no edge intersections or shared vertices, we just need to - // test condition (3) above. We can skip this test if we discovered that A - // contains at least one point of B while checking for edge crossings. - if !l.ContainsPoint(o.Vertex(0)) { - return false - } - - // We still need to check whether (A union B) is the entire sphere. - // Normally this check is very cheap due to the bounding box precondition. - if (o.subregionBound.Contains(l.bound) || o.bound.Union(l.bound).IsFull()) && - o.ContainsPoint(l.Vertex(0)) { - return false - } - return true -} - -// Intersects reports whether the region contained by this loop intersects the region -// contained by the other loop. -func (l *Loop) Intersects(o *Loop) bool { - // Given two loops, A and B, A.Intersects(B) if and only if !A.Complement().Contains(B). - // - // This code is similar to Contains, but is optimized for the case - // where both loops enclose less than half of the sphere. - if !l.bound.Intersects(o.bound) { - return false - } - - // Check whether there are any edge crossings, and also check the loop - // relationship at any shared vertices. - relation := &intersectsRelation{} - if hasCrossingRelation(l, o, relation) { - return true - } - if relation.foundSharedVertex { - return false - } - - // Since there are no edge intersections or shared vertices, the loops - // intersect only if A contains B, B contains A, or the two loops contain - // each other's boundaries. These checks are usually cheap because of the - // bounding box preconditions. Note that neither loop is empty (because of - // the bounding box check above), so it is safe to access vertex(0). - - // Check whether A contains B, or A and B contain each other's boundaries. - // (Note that A contains all the vertices of B in either case.) - if l.subregionBound.Contains(o.bound) || l.bound.Union(o.bound).IsFull() { - if l.ContainsPoint(o.Vertex(0)) { - return true - } - } - // Check whether B contains A. - if o.subregionBound.Contains(l.bound) { - if o.ContainsPoint(l.Vertex(0)) { - return true - } - } - return false -} - -// Equal reports whether two loops have the same vertices in the same linear order -// (i.e., cyclic rotations are not allowed). -func (l *Loop) Equal(other *Loop) bool { - if len(l.vertices) != len(other.vertices) { - return false - } - - for i, v := range l.vertices { - if v != other.Vertex(i) { - return false - } - } - return true -} - -// BoundaryEqual reports whether the two loops have the same boundary. This is -// true if and only if the loops have the same vertices in the same cyclic order -// (i.e., the vertices may be cyclically rotated). The empty and full loops are -// considered to have different boundaries. -func (l *Loop) BoundaryEqual(o *Loop) bool { - if len(l.vertices) != len(o.vertices) { - return false - } - - // Special case to handle empty or full loops. Since they have the same - // number of vertices, if one loop is empty/full then so is the other. - if l.isEmptyOrFull() { - return l.IsEmpty() == o.IsEmpty() - } - - // Loop through the vertices to find the first of ours that matches the - // starting vertex of the other loop. Use that offset to then 'align' the - // vertices for comparison. - for offset, vertex := range l.vertices { - if vertex == o.Vertex(0) { - // There is at most one starting offset since loop vertices are unique. - for i := 0; i < len(l.vertices); i++ { - if l.Vertex(i+offset) != o.Vertex(i) { - return false - } - } - return true - } - } - return false -} - -// compareBoundary returns +1 if this loop contains the boundary of the other loop, -// -1 if it excludes the boundary of the other, and 0 if the boundaries of the two -// loops cross. Shared edges are handled as follows: -// -// If XY is a shared edge, define Reversed(XY) to be true if XY -// appears in opposite directions in both loops. -// Then this loop contains XY if and only if Reversed(XY) == the other loop is a hole. -// (Intuitively, this checks whether this loop contains a vanishingly small region -// extending from the boundary of the other toward the interior of the polygon to -// which the other belongs.) -// -// This function is used for testing containment and intersection of -// multi-loop polygons. Note that this method is not symmetric, since the -// result depends on the direction of this loop but not on the direction of -// the other loop (in the absence of shared edges). -// -// This requires that neither loop is empty, and if other loop IsFull, then it must not -// be a hole. -func (l *Loop) compareBoundary(o *Loop) int { - // The bounds must intersect for containment or crossing. - if !l.bound.Intersects(o.bound) { - return -1 - } - - // Full loops are handled as though the loop surrounded the entire sphere. - if l.IsFull() { - return 1 - } - if o.IsFull() { - return -1 - } - - // Check whether there are any edge crossings, and also check the loop - // relationship at any shared vertices. - relation := newCompareBoundaryRelation(o.IsHole()) - if hasCrossingRelation(l, o, relation) { - return 0 - } - if relation.foundSharedVertex { - if relation.containsEdge { - return 1 - } - return -1 - } - - // There are no edge intersections or shared vertices, so we can check - // whether A contains an arbitrary vertex of B. - if l.ContainsPoint(o.Vertex(0)) { - return 1 - } - return -1 -} - -// ContainsOrigin reports true if this loop contains s2.OriginPoint(). -func (l *Loop) ContainsOrigin() bool { - return l.originInside -} - -// ReferencePoint returns the reference point for this loop. -func (l *Loop) ReferencePoint() ReferencePoint { - return OriginReferencePoint(l.originInside) -} - -// NumEdges returns the number of edges in this shape. -func (l *Loop) NumEdges() int { - if l.isEmptyOrFull() { - return 0 - } - return len(l.vertices) -} - -// Edge returns the endpoints for the given edge index. -func (l *Loop) Edge(i int) Edge { - return Edge{l.Vertex(i), l.Vertex(i + 1)} -} - -// NumChains reports the number of contiguous edge chains in the Loop. -func (l *Loop) NumChains() int { - if l.IsEmpty() { - return 0 - } - return 1 -} - -// Chain returns the i-th edge chain in the Shape. -func (l *Loop) Chain(chainID int) Chain { - return Chain{0, l.NumEdges()} -} - -// ChainEdge returns the j-th edge of the i-th edge chain. -func (l *Loop) ChainEdge(chainID, offset int) Edge { - return Edge{l.Vertex(offset), l.Vertex(offset + 1)} -} - -// ChainPosition returns a ChainPosition pair (i, j) such that edgeID is the -// j-th edge of the Loop. -func (l *Loop) ChainPosition(edgeID int) ChainPosition { - return ChainPosition{0, edgeID} -} - -// Dimension returns the dimension of the geometry represented by this Loop. -func (l *Loop) Dimension() int { return 2 } - -func (l *Loop) typeTag() typeTag { return typeTagNone } - -func (l *Loop) privateInterface() {} - -// IsEmpty reports true if this is the special empty loop that contains no points. -func (l *Loop) IsEmpty() bool { - return l.isEmptyOrFull() && !l.ContainsOrigin() -} - -// IsFull reports true if this is the special full loop that contains all points. -func (l *Loop) IsFull() bool { - return l.isEmptyOrFull() && l.ContainsOrigin() -} - -// isEmptyOrFull reports true if this loop is either the "empty" or "full" special loops. -func (l *Loop) isEmptyOrFull() bool { - return len(l.vertices) == 1 -} - -// Vertices returns the vertices in the loop. -func (l *Loop) Vertices() []Point { - return l.vertices -} - -// RectBound returns a tight bounding rectangle. If the loop contains the point, -// the bound also contains it. -func (l *Loop) RectBound() Rect { - return l.bound -} - -// CapBound returns a bounding cap that may have more padding than the corresponding -// RectBound. The bound is conservative such that if the loop contains a point P, -// the bound also contains it. -func (l *Loop) CapBound() Cap { - return l.bound.CapBound() -} - -// Vertex returns the vertex for the given index. For convenience, the vertex indices -// wrap automatically for methods that do index math such as Edge. -// i.e., Vertex(NumEdges() + n) is the same as Vertex(n). -func (l *Loop) Vertex(i int) Point { - return l.vertices[i%len(l.vertices)] -} - -// OrientedVertex returns the vertex in reverse order if the loop represents a polygon -// hole. For example, arguments 0, 1, 2 are mapped to vertices n-1, n-2, n-3, where -// n == len(vertices). This ensures that the interior of the polygon is always to -// the left of the vertex chain. -// -// This requires: 0 <= i < 2 * len(vertices) -func (l *Loop) OrientedVertex(i int) Point { - j := i - len(l.vertices) - if j < 0 { - j = i - } - if l.IsHole() { - j = len(l.vertices) - 1 - j - } - return l.Vertex(j) -} - -// NumVertices returns the number of vertices in this loop. -func (l *Loop) NumVertices() int { - return len(l.vertices) -} - -// bruteForceContainsPoint reports if the given point is contained by this loop. -// This method does not use the ShapeIndex, so it is only preferable below a certain -// size of loop. -func (l *Loop) bruteForceContainsPoint(p Point) bool { - origin := OriginPoint() - inside := l.originInside - crosser := NewChainEdgeCrosser(origin, p, l.Vertex(0)) - for i := 1; i <= len(l.vertices); i++ { // add vertex 0 twice - inside = inside != crosser.EdgeOrVertexChainCrossing(l.Vertex(i)) - } - return inside -} - -// ContainsPoint returns true if the loop contains the point. -func (l *Loop) ContainsPoint(p Point) bool { - if !l.index.IsFresh() && !l.bound.ContainsPoint(p) { - return false - } - - // For small loops it is faster to just check all the crossings. We also - // use this method during loop initialization because InitOriginAndBound() - // calls Contains() before InitIndex(). Otherwise, we keep track of the - // number of calls to Contains() and only build the index when enough calls - // have been made so that we think it is worth the effort. Note that the - // code below is structured so that if many calls are made in parallel only - // one thread builds the index, while the rest continue using brute force - // until the index is actually available. - - const maxBruteForceVertices = 32 - // TODO(roberts): add unindexed contains calls tracking - - if len(l.index.shapes) == 0 || // Index has not been initialized yet. - len(l.vertices) <= maxBruteForceVertices { - return l.bruteForceContainsPoint(p) - } - - // Otherwise, look up the point in the index. - it := l.index.Iterator() - if !it.LocatePoint(p) { - return false - } - return l.iteratorContainsPoint(it, p) -} - -// ContainsCell reports whether the given Cell is contained by this Loop. -func (l *Loop) ContainsCell(target Cell) bool { - it := l.index.Iterator() - relation := it.LocateCellID(target.ID()) - - // If "target" is disjoint from all index cells, it is not contained. - // Similarly, if "target" is subdivided into one or more index cells then it - // is not contained, since index cells are subdivided only if they (nearly) - // intersect a sufficient number of edges. (But note that if "target" itself - // is an index cell then it may be contained, since it could be a cell with - // no edges in the loop interior.) - if relation != Indexed { - return false - } - - // Otherwise check if any edges intersect "target". - if l.boundaryApproxIntersects(it, target) { - return false - } - - // Otherwise check if the loop contains the center of "target". - return l.iteratorContainsPoint(it, target.Center()) -} - -// IntersectsCell reports whether this Loop intersects the given cell. -func (l *Loop) IntersectsCell(target Cell) bool { - it := l.index.Iterator() - relation := it.LocateCellID(target.ID()) - - // If target does not overlap any index cell, there is no intersection. - if relation == Disjoint { - return false - } - // If target is subdivided into one or more index cells, there is an - // intersection to within the ShapeIndex error bound (see Contains). - if relation == Subdivided { - return true - } - // If target is an index cell, there is an intersection because index cells - // are created only if they have at least one edge or they are entirely - // contained by the loop. - if it.CellID() == target.id { - return true - } - // Otherwise check if any edges intersect target. - if l.boundaryApproxIntersects(it, target) { - return true - } - // Otherwise check if the loop contains the center of target. - return l.iteratorContainsPoint(it, target.Center()) -} - -// CellUnionBound computes a covering of the Loop. -func (l *Loop) CellUnionBound() []CellID { - return l.CapBound().CellUnionBound() -} - -// boundaryApproxIntersects reports if the loop's boundary intersects target. -// It may also return true when the loop boundary does not intersect target but -// some edge comes within the worst-case error tolerance. -// -// This requires that it.Locate(target) returned Indexed. -func (l *Loop) boundaryApproxIntersects(it *ShapeIndexIterator, target Cell) bool { - aClipped := it.IndexCell().findByShapeID(0) - - // If there are no edges, there is no intersection. - if len(aClipped.edges) == 0 { - return false - } - - // We can save some work if target is the index cell itself. - if it.CellID() == target.ID() { - return true - } - - // Otherwise check whether any of the edges intersect target. - maxError := (faceClipErrorUVCoord + intersectsRectErrorUVDist) - bound := target.BoundUV().ExpandedByMargin(maxError) - for _, ai := range aClipped.edges { - v0, v1, ok := ClipToPaddedFace(l.Vertex(ai), l.Vertex(ai+1), target.Face(), maxError) - if ok && edgeIntersectsRect(v0, v1, bound) { - return true - } - } - return false -} - -// iteratorContainsPoint reports if the iterator that is positioned at the ShapeIndexCell -// that may contain p, contains the point p. -func (l *Loop) iteratorContainsPoint(it *ShapeIndexIterator, p Point) bool { - // Test containment by drawing a line segment from the cell center to the - // given point and counting edge crossings. - aClipped := it.IndexCell().findByShapeID(0) - inside := aClipped.containsCenter - if len(aClipped.edges) > 0 { - center := it.Center() - crosser := NewEdgeCrosser(center, p) - aiPrev := -2 - for _, ai := range aClipped.edges { - if ai != aiPrev+1 { - crosser.RestartAt(l.Vertex(ai)) - } - aiPrev = ai - inside = inside != crosser.EdgeOrVertexChainCrossing(l.Vertex(ai+1)) - } - } - return inside -} - -// RegularLoop creates a loop with the given number of vertices, all -// located on a circle of the specified radius around the given center. -func RegularLoop(center Point, radius s1.Angle, numVertices int) *Loop { - return RegularLoopForFrame(getFrame(center), radius, numVertices) -} - -// RegularLoopForFrame creates a loop centered around the z-axis of the given -// coordinate frame, with the first vertex in the direction of the positive x-axis. -func RegularLoopForFrame(frame matrix3x3, radius s1.Angle, numVertices int) *Loop { - return LoopFromPoints(regularPointsForFrame(frame, radius, numVertices)) -} - -// CanonicalFirstVertex returns a first index and a direction (either +1 or -1) -// such that the vertex sequence (first, first+dir, ..., first+(n-1)*dir) does -// not change when the loop vertex order is rotated or inverted. This allows the -// loop vertices to be traversed in a canonical order. The return values are -// chosen such that (first, ..., first+n*dir) are in the range [0, 2*n-1] as -// expected by the Vertex method. -func (l *Loop) CanonicalFirstVertex() (firstIdx, direction int) { - firstIdx = 0 - n := len(l.vertices) - for i := 1; i < n; i++ { - if l.Vertex(i).Cmp(l.Vertex(firstIdx).Vector) == -1 { - firstIdx = i - } - } - - // 0 <= firstIdx <= n-1, so (firstIdx+n*dir) <= 2*n-1. - if l.Vertex(firstIdx+1).Cmp(l.Vertex(firstIdx+n-1).Vector) == -1 { - return firstIdx, 1 - } - - // n <= firstIdx <= 2*n-1, so (firstIdx+n*dir) >= 0. - firstIdx += n - return firstIdx, -1 -} - -// TurningAngle returns the sum of the turning angles at each vertex. The return -// value is positive if the loop is counter-clockwise, negative if the loop is -// clockwise, and zero if the loop is a great circle. Degenerate and -// nearly-degenerate loops are handled consistently with Sign. So for example, -// if a loop has zero area (i.e., it is a very small CCW loop) then the turning -// angle will always be negative. -// -// This quantity is also called the "geodesic curvature" of the loop. -func (l *Loop) TurningAngle() float64 { - // For empty and full loops, we return the limit value as the loop area - // approaches 0 or 4*Pi respectively. - if l.isEmptyOrFull() { - if l.ContainsOrigin() { - return -2 * math.Pi - } - return 2 * math.Pi - } - - // Don't crash even if the loop is not well-defined. - if len(l.vertices) < 3 { - return 0 - } - - // To ensure that we get the same result when the vertex order is rotated, - // and that the result is negated when the vertex order is reversed, we need - // to add up the individual turn angles in a consistent order. (In general, - // adding up a set of numbers in a different order can change the sum due to - // rounding errors.) - // - // Furthermore, if we just accumulate an ordinary sum then the worst-case - // error is quadratic in the number of vertices. (This can happen with - // spiral shapes, where the partial sum of the turning angles can be linear - // in the number of vertices.) To avoid this we use the Kahan summation - // algorithm (http://en.wikipedia.org/wiki/Kahan_summation_algorithm). - n := len(l.vertices) - i, dir := l.CanonicalFirstVertex() - sum := TurnAngle(l.Vertex((i+n-dir)%n), l.Vertex(i), l.Vertex((i+dir)%n)) - - compensation := s1.Angle(0) - for n-1 > 0 { - i += dir - angle := TurnAngle(l.Vertex(i-dir), l.Vertex(i), l.Vertex(i+dir)) - oldSum := sum - angle += compensation - sum += angle - compensation = (oldSum - sum) + angle - n-- - } - - const maxCurvature = 2*math.Pi - 4*dblEpsilon - - return math.Max(-maxCurvature, math.Min(maxCurvature, float64(dir)*float64(sum+compensation))) -} - -// turningAngleMaxError return the maximum error in TurningAngle. The value is not -// constant; it depends on the loop. -func (l *Loop) turningAngleMaxError() float64 { - // The maximum error can be bounded as follows: - // 3.00 * dblEpsilon for RobustCrossProd(b, a) - // 3.00 * dblEpsilon for RobustCrossProd(c, b) - // 3.25 * dblEpsilon for Angle() - // 2.00 * dblEpsilon for each addition in the Kahan summation - // ------------------ - // 11.25 * dblEpsilon - maxErrorPerVertex := 11.25 * dblEpsilon - return maxErrorPerVertex * float64(len(l.vertices)) -} - -// IsHole reports whether this loop represents a hole in its containing polygon. -func (l *Loop) IsHole() bool { return l.depth&1 != 0 } - -// Sign returns -1 if this Loop represents a hole in its containing polygon, and +1 otherwise. -func (l *Loop) Sign() int { - if l.IsHole() { - return -1 - } - return 1 -} - -// IsNormalized reports whether the loop area is at most 2*pi. Degenerate loops are -// handled consistently with Sign, i.e., if a loop can be -// expressed as the union of degenerate or nearly-degenerate CCW triangles, -// then it will always be considered normalized. -func (l *Loop) IsNormalized() bool { - // Optimization: if the longitude span is less than 180 degrees, then the - // loop covers less than half the sphere and is therefore normalized. - if l.bound.Lng.Length() < math.Pi { - return true - } - - // We allow some error so that hemispheres are always considered normalized. - // TODO(roberts): This is no longer required by the Polygon implementation, - // so alternatively we could create the invariant that a loop is normalized - // if and only if its complement is not normalized. - return l.TurningAngle() >= -l.turningAngleMaxError() -} - -// Normalize inverts the loop if necessary so that the area enclosed by the loop -// is at most 2*pi. -func (l *Loop) Normalize() { - if !l.IsNormalized() { - l.Invert() - } -} - -// Invert reverses the order of the loop vertices, effectively complementing the -// region represented by the loop. For example, the loop ABCD (with edges -// AB, BC, CD, DA) becomes the loop DCBA (with edges DC, CB, BA, AD). -// Notice that the last edge is the same in both cases except that its -// direction has been reversed. -func (l *Loop) Invert() { - l.index.Reset() - if l.isEmptyOrFull() { - if l.IsFull() { - l.vertices[0] = emptyLoopPoint - } else { - l.vertices[0] = fullLoopPoint - } - } else { - // For non-special loops, reverse the slice of vertices. - for i := len(l.vertices)/2 - 1; i >= 0; i-- { - opp := len(l.vertices) - 1 - i - l.vertices[i], l.vertices[opp] = l.vertices[opp], l.vertices[i] - } - } - - // originInside must be set correctly before building the ShapeIndex. - l.originInside = !l.originInside - if l.bound.Lat.Lo > -math.Pi/2 && l.bound.Lat.Hi < math.Pi/2 { - // The complement of this loop contains both poles. - l.bound = FullRect() - l.subregionBound = l.bound - } else { - l.initBound() - } - l.index.Add(l) -} - -// findVertex returns the index of the vertex at the given Point in the range -// 1..numVertices, and a boolean indicating if a vertex was found. -func (l *Loop) findVertex(p Point) (index int, ok bool) { - const notFound = 0 - if len(l.vertices) < 10 { - // Exhaustive search for loops below a small threshold. - for i := 1; i <= len(l.vertices); i++ { - if l.Vertex(i) == p { - return i, true - } - } - return notFound, false - } - - it := l.index.Iterator() - if !it.LocatePoint(p) { - return notFound, false - } - - aClipped := it.IndexCell().findByShapeID(0) - for i := aClipped.numEdges() - 1; i >= 0; i-- { - ai := aClipped.edges[i] - if l.Vertex(ai) == p { - if ai == 0 { - return len(l.vertices), true - } - return ai, true - } - - if l.Vertex(ai+1) == p { - return ai + 1, true - } - } - return notFound, false -} - -// ContainsNested reports whether the given loops is contained within this loop. -// This function does not test for edge intersections. The two loops must meet -// all of the Polygon requirements; for example this implies that their -// boundaries may not cross or have any shared edges (although they may have -// shared vertices). -func (l *Loop) ContainsNested(other *Loop) bool { - if !l.subregionBound.Contains(other.bound) { - return false - } - - // Special cases to handle either loop being empty or full. Also bail out - // when B has no vertices to avoid heap overflow on the vertex(1) call - // below. (This method is called during polygon initialization before the - // client has an opportunity to call IsValid().) - if l.isEmptyOrFull() || other.NumVertices() < 2 { - return l.IsFull() || other.IsEmpty() - } - - // We are given that A and B do not share any edges, and that either one - // loop contains the other or they do not intersect. - m, ok := l.findVertex(other.Vertex(1)) - if !ok { - // Since other.vertex(1) is not shared, we can check whether A contains it. - return l.ContainsPoint(other.Vertex(1)) - } - - // Check whether the edge order around other.Vertex(1) is compatible with - // A containing B. - return WedgeContains(l.Vertex(m-1), l.Vertex(m), l.Vertex(m+1), other.Vertex(0), other.Vertex(2)) -} - -// surfaceIntegralFloat64 computes the oriented surface integral of some quantity f(x) -// over the loop interior, given a function f(A,B,C) that returns the -// corresponding integral over the spherical triangle ABC. Here "oriented -// surface integral" means: -// -// (1) f(A,B,C) must be the integral of f if ABC is counterclockwise, -// and the integral of -f if ABC is clockwise. -// -// (2) The result of this function is *either* the integral of f over the -// loop interior, or the integral of (-f) over the loop exterior. -// -// Note that there are at least two common situations where it easy to work -// around property (2) above: -// -// - If the integral of f over the entire sphere is zero, then it doesn't -// matter which case is returned because they are always equal. -// -// - If f is non-negative, then it is easy to detect when the integral over -// the loop exterior has been returned, and the integral over the loop -// interior can be obtained by adding the integral of f over the entire -// unit sphere (a constant) to the result. -// -// Any changes to this method may need corresponding changes to surfaceIntegralPoint as well. -func (l *Loop) surfaceIntegralFloat64(f func(a, b, c Point) float64) float64 { - // We sum f over a collection T of oriented triangles, possibly - // overlapping. Let the sign of a triangle be +1 if it is CCW and -1 - // otherwise, and let the sign of a point x be the sum of the signs of the - // triangles containing x. Then the collection of triangles T is chosen - // such that either: - // - // (1) Each point in the loop interior has sign +1, and sign 0 otherwise; or - // (2) Each point in the loop exterior has sign -1, and sign 0 otherwise. - // - // The triangles basically consist of a fan from vertex 0 to every loop - // edge that does not include vertex 0. These triangles will always satisfy - // either (1) or (2). However, what makes this a bit tricky is that - // spherical edges become numerically unstable as their length approaches - // 180 degrees. Of course there is not much we can do if the loop itself - // contains such edges, but we would like to make sure that all the triangle - // edges under our control (i.e., the non-loop edges) are stable. For - // example, consider a loop around the equator consisting of four equally - // spaced points. This is a well-defined loop, but we cannot just split it - // into two triangles by connecting vertex 0 to vertex 2. - // - // We handle this type of situation by moving the origin of the triangle fan - // whenever we are about to create an unstable edge. We choose a new - // location for the origin such that all relevant edges are stable. We also - // create extra triangles with the appropriate orientation so that the sum - // of the triangle signs is still correct at every point. - - // The maximum length of an edge for it to be considered numerically stable. - // The exact value is fairly arbitrary since it depends on the stability of - // the function f. The value below is quite conservative but could be - // reduced further if desired. - const maxLength = math.Pi - 1e-5 - - var sum float64 - origin := l.Vertex(0) - for i := 1; i+1 < len(l.vertices); i++ { - // Let V_i be vertex(i), let O be the current origin, and let length(A,B) - // be the length of edge (A,B). At the start of each loop iteration, the - // "leading edge" of the triangle fan is (O,V_i), and we want to extend - // the triangle fan so that the leading edge is (O,V_i+1). - // - // Invariants: - // 1. length(O,V_i) < maxLength for all (i > 1). - // 2. Either O == V_0, or O is approximately perpendicular to V_0. - // 3. "sum" is the oriented integral of f over the area defined by - // (O, V_0, V_1, ..., V_i). - if l.Vertex(i+1).Angle(origin.Vector) > maxLength { - // We are about to create an unstable edge, so choose a new origin O' - // for the triangle fan. - oldOrigin := origin - if origin == l.Vertex(0) { - // The following point is well-separated from V_i and V_0 (and - // therefore V_i+1 as well). - origin = Point{l.Vertex(0).PointCross(l.Vertex(i)).Normalize()} - } else if l.Vertex(i).Angle(l.Vertex(0).Vector) < maxLength { - // All edges of the triangle (O, V_0, V_i) are stable, so we can - // revert to using V_0 as the origin. - origin = l.Vertex(0) - } else { - // (O, V_i+1) and (V_0, V_i) are antipodal pairs, and O and V_0 are - // perpendicular. Therefore V_0.CrossProd(O) is approximately - // perpendicular to all of {O, V_0, V_i, V_i+1}, and we can choose - // this point O' as the new origin. - origin = Point{l.Vertex(0).Cross(oldOrigin.Vector)} - - // Advance the edge (V_0,O) to (V_0,O'). - sum += f(l.Vertex(0), oldOrigin, origin) - } - // Advance the edge (O,V_i) to (O',V_i). - sum += f(oldOrigin, l.Vertex(i), origin) - } - // Advance the edge (O,V_i) to (O,V_i+1). - sum += f(origin, l.Vertex(i), l.Vertex(i+1)) - } - // If the origin is not V_0, we need to sum one more triangle. - if origin != l.Vertex(0) { - // Advance the edge (O,V_n-1) to (O,V_0). - sum += f(origin, l.Vertex(len(l.vertices)-1), l.Vertex(0)) - } - return sum -} - -// surfaceIntegralPoint mirrors the surfaceIntegralFloat64 method but over Points; -// see that method for commentary. The C++ version uses a templated method. -// Any changes to this method may need corresponding changes to surfaceIntegralFloat64 as well. -func (l *Loop) surfaceIntegralPoint(f func(a, b, c Point) Point) Point { - const maxLength = math.Pi - 1e-5 - var sum r3.Vector - - origin := l.Vertex(0) - for i := 1; i+1 < len(l.vertices); i++ { - if l.Vertex(i+1).Angle(origin.Vector) > maxLength { - oldOrigin := origin - if origin == l.Vertex(0) { - origin = Point{l.Vertex(0).PointCross(l.Vertex(i)).Normalize()} - } else if l.Vertex(i).Angle(l.Vertex(0).Vector) < maxLength { - origin = l.Vertex(0) - } else { - origin = Point{l.Vertex(0).Cross(oldOrigin.Vector)} - sum = sum.Add(f(l.Vertex(0), oldOrigin, origin).Vector) - } - sum = sum.Add(f(oldOrigin, l.Vertex(i), origin).Vector) - } - sum = sum.Add(f(origin, l.Vertex(i), l.Vertex(i+1)).Vector) - } - if origin != l.Vertex(0) { - sum = sum.Add(f(origin, l.Vertex(len(l.vertices)-1), l.Vertex(0)).Vector) - } - return Point{sum} -} - -// Area returns the area of the loop interior, i.e. the region on the left side of -// the loop. The return value is between 0 and 4*pi. (Note that the return -// value is not affected by whether this loop is a "hole" or a "shell".) -func (l *Loop) Area() float64 { - // It is surprisingly difficult to compute the area of a loop robustly. The - // main issues are (1) whether degenerate loops are considered to be CCW or - // not (i.e., whether their area is close to 0 or 4*pi), and (2) computing - // the areas of small loops with good relative accuracy. - // - // With respect to degeneracies, we would like Area to be consistent - // with ContainsPoint in that loops that contain many points - // should have large areas, and loops that contain few points should have - // small areas. For example, if a degenerate triangle is considered CCW - // according to s2predicates Sign, then it will contain very few points and - // its area should be approximately zero. On the other hand if it is - // considered clockwise, then it will contain virtually all points and so - // its area should be approximately 4*pi. - // - // More precisely, let U be the set of Points for which IsUnitLength - // is true, let P(U) be the projection of those points onto the mathematical - // unit sphere, and let V(P(U)) be the Voronoi diagram of the projected - // points. Then for every loop x, we would like Area to approximately - // equal the sum of the areas of the Voronoi regions of the points p for - // which x.ContainsPoint(p) is true. - // - // The second issue is that we want to compute the area of small loops - // accurately. This requires having good relative precision rather than - // good absolute precision. For example, if the area of a loop is 1e-12 and - // the error is 1e-15, then the area only has 3 digits of accuracy. (For - // reference, 1e-12 is about 40 square meters on the surface of the earth.) - // We would like to have good relative accuracy even for small loops. - // - // To achieve these goals, we combine two different methods of computing the - // area. This first method is based on the Gauss-Bonnet theorem, which says - // that the area enclosed by the loop equals 2*pi minus the total geodesic - // curvature of the loop (i.e., the sum of the "turning angles" at all the - // loop vertices). The big advantage of this method is that as long as we - // use Sign to compute the turning angle at each vertex, then - // degeneracies are always handled correctly. In other words, if a - // degenerate loop is CCW according to the symbolic perturbations used by - // Sign, then its turning angle will be approximately 2*pi. - // - // The disadvantage of the Gauss-Bonnet method is that its absolute error is - // about 2e-15 times the number of vertices (see turningAngleMaxError). - // So, it cannot compute the area of small loops accurately. - // - // The second method is based on splitting the loop into triangles and - // summing the area of each triangle. To avoid the difficulty and expense - // of decomposing the loop into a union of non-overlapping triangles, - // instead we compute a signed sum over triangles that may overlap (see the - // comments for surfaceIntegral). The advantage of this method - // is that the area of each triangle can be computed with much better - // relative accuracy (using l'Huilier's theorem). The disadvantage is that - // the result is a signed area: CCW loops may yield a small positive value, - // while CW loops may yield a small negative value (which is converted to a - // positive area by adding 4*pi). This means that small errors in computing - // the signed area may translate into a very large error in the result (if - // the sign of the sum is incorrect). - // - // So, our strategy is to combine these two methods as follows. First we - // compute the area using the "signed sum over triangles" approach (since it - // is generally more accurate). We also estimate the maximum error in this - // result. If the signed area is too close to zero (i.e., zero is within - // the error bounds), then we double-check the sign of the result using the - // Gauss-Bonnet method. (In fact we just call IsNormalized, which is - // based on this method.) If the two methods disagree, we return either 0 - // or 4*pi based on the result of IsNormalized. Otherwise we return the - // area that we computed originally. - if l.isEmptyOrFull() { - if l.ContainsOrigin() { - return 4 * math.Pi - } - return 0 - } - area := l.surfaceIntegralFloat64(SignedArea) - - // TODO(roberts): This error estimate is very approximate. There are two - // issues: (1) SignedArea needs some improvements to ensure that its error - // is actually never higher than GirardArea, and (2) although the number of - // triangles in the sum is typically N-2, in theory it could be as high as - // 2*N for pathological inputs. But in other respects this error bound is - // very conservative since it assumes that the maximum error is achieved on - // every triangle. - maxError := l.turningAngleMaxError() - - // The signed area should be between approximately -4*pi and 4*pi. - if area < 0 { - // We have computed the negative of the area of the loop exterior. - area += 4 * math.Pi - } - - if area > 4*math.Pi { - area = 4 * math.Pi - } - if area < 0 { - area = 0 - } - - // If the area is close enough to zero or 4*pi so that the loop orientation - // is ambiguous, then we compute the loop orientation explicitly. - if area < maxError && !l.IsNormalized() { - return 4 * math.Pi - } else if area > (4*math.Pi-maxError) && l.IsNormalized() { - return 0 - } - - return area -} - -// Centroid returns the true centroid of the loop multiplied by the area of the -// loop. The result is not unit length, so you may want to normalize it. Also -// note that in general, the centroid may not be contained by the loop. -// -// We prescale by the loop area for two reasons: (1) it is cheaper to -// compute this way, and (2) it makes it easier to compute the centroid of -// more complicated shapes (by splitting them into disjoint regions and -// adding their centroids). -// -// Note that the return value is not affected by whether this loop is a -// "hole" or a "shell". -func (l *Loop) Centroid() Point { - // surfaceIntegralPoint() returns either the integral of position over loop - // interior, or the negative of the integral of position over the loop - // exterior. But these two values are the same (!), because the integral of - // position over the entire sphere is (0, 0, 0). - return l.surfaceIntegralPoint(TrueCentroid) -} - -// Encode encodes the Loop. -func (l Loop) Encode(w io.Writer) error { - e := &encoder{w: w} - l.encode(e) - return e.err -} - -func (l Loop) encode(e *encoder) { - e.writeInt8(encodingVersion) - e.writeUint32(uint32(len(l.vertices))) - for _, v := range l.vertices { - e.writeFloat64(v.X) - e.writeFloat64(v.Y) - e.writeFloat64(v.Z) - } - - e.writeBool(l.originInside) - e.writeInt32(int32(l.depth)) - - // Encode the bound. - l.bound.encode(e) -} - -// Decode decodes a loop. -func (l *Loop) Decode(r io.Reader) error { - *l = Loop{} - d := &decoder{r: asByteReader(r)} - l.decode(d) - return d.err -} - -func (l *Loop) decode(d *decoder) { - version := int8(d.readUint8()) - if d.err != nil { - return - } - if version != encodingVersion { - d.err = fmt.Errorf("cannot decode version %d", version) - return - } - - // Empty loops are explicitly allowed here: a newly created loop has zero vertices - // and such loops encode and decode properly. - nvertices := d.readUint32() - if nvertices > maxEncodedVertices { - if d.err == nil { - d.err = fmt.Errorf("too many vertices (%d; max is %d)", nvertices, maxEncodedVertices) - - } - return - } - l.vertices = make([]Point, nvertices) - for i := range l.vertices { - l.vertices[i].X = d.readFloat64() - l.vertices[i].Y = d.readFloat64() - l.vertices[i].Z = d.readFloat64() - } - l.index = NewShapeIndex() - l.originInside = d.readBool() - l.depth = int(d.readUint32()) - l.bound.decode(d) - l.subregionBound = ExpandForSubregions(l.bound) - - l.index.Add(l) -} - -// Bitmasks to read from properties. -const ( - originInside = 1 << iota - boundEncoded -) - -func (l *Loop) xyzFaceSiTiVertices() []xyzFaceSiTi { - ret := make([]xyzFaceSiTi, len(l.vertices)) - for i, v := range l.vertices { - ret[i].xyz = v - ret[i].face, ret[i].si, ret[i].ti, ret[i].level = xyzToFaceSiTi(v) - } - return ret -} - -func (l *Loop) encodeCompressed(e *encoder, snapLevel int, vertices []xyzFaceSiTi) { - if len(l.vertices) != len(vertices) { - panic("encodeCompressed: vertices must be the same length as l.vertices") - } - if len(vertices) > maxEncodedVertices { - if e.err == nil { - e.err = fmt.Errorf("too many vertices (%d; max is %d)", len(vertices), maxEncodedVertices) - } - return - } - e.writeUvarint(uint64(len(vertices))) - encodePointsCompressed(e, vertices, snapLevel) - - props := l.compressedEncodingProperties() - e.writeUvarint(props) - e.writeUvarint(uint64(l.depth)) - if props&boundEncoded != 0 { - l.bound.encode(e) - } -} - -func (l *Loop) compressedEncodingProperties() uint64 { - var properties uint64 - if l.originInside { - properties |= originInside - } - - // Write whether there is a bound so we can change the threshold later. - // Recomputing the bound multiplies the decode time taken per vertex - // by a factor of about 3.5. Without recomputing the bound, decode - // takes approximately 125 ns / vertex. A loop with 63 vertices - // encoded without the bound will take ~30us to decode, which is - // acceptable. At ~3.5 bytes / vertex without the bound, adding - // the bound will increase the size by <15%, which is also acceptable. - const minVerticesForBound = 64 - if len(l.vertices) >= minVerticesForBound { - properties |= boundEncoded - } - - return properties -} - -func (l *Loop) decodeCompressed(d *decoder, snapLevel int) { - nvertices := d.readUvarint() - if d.err != nil { - return - } - if nvertices > maxEncodedVertices { - d.err = fmt.Errorf("too many vertices (%d; max is %d)", nvertices, maxEncodedVertices) - return - } - l.vertices = make([]Point, nvertices) - decodePointsCompressed(d, snapLevel, l.vertices) - properties := d.readUvarint() - - // Make sure values are valid before using. - if d.err != nil { - return - } - - l.index = NewShapeIndex() - l.originInside = (properties & originInside) != 0 - - l.depth = int(d.readUvarint()) - - if (properties & boundEncoded) != 0 { - l.bound.decode(d) - if d.err != nil { - return - } - l.subregionBound = ExpandForSubregions(l.bound) - } else { - l.initBound() - } - - l.index.Add(l) -} - -// crossingTarget is an enum representing the possible crossing target cases for relations. -type crossingTarget int - -const ( - crossingTargetDontCare crossingTarget = iota - crossingTargetDontCross - crossingTargetCross -) - -// loopRelation defines the interface for checking a type of relationship between two loops. -// Some examples of relations are Contains, Intersects, or CompareBoundary. -type loopRelation interface { - // Optionally, aCrossingTarget and bCrossingTarget can specify an early-exit - // condition for the loop relation. If any point P is found such that - // - // A.ContainsPoint(P) == aCrossingTarget() && - // B.ContainsPoint(P) == bCrossingTarget() - // - // then the loop relation is assumed to be the same as if a pair of crossing - // edges were found. For example, the ContainsPoint relation has - // - // aCrossingTarget() == crossingTargetDontCross - // bCrossingTarget() == crossingTargetCross - // - // because if A.ContainsPoint(P) == false and B.ContainsPoint(P) == true - // for any point P, then it is equivalent to finding an edge crossing (i.e., - // since Contains returns false in both cases). - // - // Loop relations that do not have an early-exit condition of this form - // should return crossingTargetDontCare for both crossing targets. - - // aCrossingTarget reports whether loop A crosses the target point with - // the given relation type. - aCrossingTarget() crossingTarget - // bCrossingTarget reports whether loop B crosses the target point with - // the given relation type. - bCrossingTarget() crossingTarget - - // wedgesCross reports if a shared vertex ab1 and the two associated wedges - // (a0, ab1, b2) and (b0, ab1, b2) are equivalent to an edge crossing. - // The loop relation is also allowed to maintain its own internal state, and - // can return true if it observes any sequence of wedges that are equivalent - // to an edge crossing. - wedgesCross(a0, ab1, a2, b0, b2 Point) bool -} - -// loopCrosser is a helper type for determining whether two loops cross. -// It is instantiated twice for each pair of loops to be tested, once for the -// pair (A,B) and once for the pair (B,A), in order to be able to process -// edges in either loop nesting order. -type loopCrosser struct { - a, b *Loop - relation loopRelation - swapped bool - aCrossingTarget crossingTarget - bCrossingTarget crossingTarget - - // state maintained by startEdge and edgeCrossesCell. - crosser *EdgeCrosser - aj, bjPrev int - - // temporary data declared here to avoid repeated memory allocations. - bQuery *CrossingEdgeQuery - bCells []*ShapeIndexCell -} - -// newLoopCrosser creates a loopCrosser from the given values. If swapped is true, -// the loops A and B have been swapped. This affects how arguments are passed to -// the given loop relation, since for example A.Contains(B) is not the same as -// B.Contains(A). -func newLoopCrosser(a, b *Loop, relation loopRelation, swapped bool) *loopCrosser { - l := &loopCrosser{ - a: a, - b: b, - relation: relation, - swapped: swapped, - aCrossingTarget: relation.aCrossingTarget(), - bCrossingTarget: relation.bCrossingTarget(), - bQuery: NewCrossingEdgeQuery(b.index), - } - if swapped { - l.aCrossingTarget, l.bCrossingTarget = l.bCrossingTarget, l.aCrossingTarget - } - - return l -} - -// startEdge sets the crossers state for checking the given edge of loop A. -func (l *loopCrosser) startEdge(aj int) { - l.crosser = NewEdgeCrosser(l.a.Vertex(aj), l.a.Vertex(aj+1)) - l.aj = aj - l.bjPrev = -2 -} - -// edgeCrossesCell reports whether the current edge of loop A has any crossings with -// edges of the index cell of loop B. -func (l *loopCrosser) edgeCrossesCell(bClipped *clippedShape) bool { - // Test the current edge of A against all edges of bClipped - bNumEdges := bClipped.numEdges() - for j := 0; j < bNumEdges; j++ { - bj := bClipped.edges[j] - if bj != l.bjPrev+1 { - l.crosser.RestartAt(l.b.Vertex(bj)) - } - l.bjPrev = bj - if crossing := l.crosser.ChainCrossingSign(l.b.Vertex(bj + 1)); crossing == DoNotCross { - continue - } else if crossing == Cross { - return true - } - - // We only need to check each shared vertex once, so we only - // consider the case where l.aVertex(l.aj+1) == l.b.Vertex(bj+1). - if l.a.Vertex(l.aj+1) == l.b.Vertex(bj+1) { - if l.swapped { - if l.relation.wedgesCross(l.b.Vertex(bj), l.b.Vertex(bj+1), l.b.Vertex(bj+2), l.a.Vertex(l.aj), l.a.Vertex(l.aj+2)) { - return true - } - } else { - if l.relation.wedgesCross(l.a.Vertex(l.aj), l.a.Vertex(l.aj+1), l.a.Vertex(l.aj+2), l.b.Vertex(bj), l.b.Vertex(bj+2)) { - return true - } - } - } - } - - return false -} - -// cellCrossesCell reports whether there are any edge crossings or wedge crossings -// within the two given cells. -func (l *loopCrosser) cellCrossesCell(aClipped, bClipped *clippedShape) bool { - // Test all edges of aClipped against all edges of bClipped. - for _, edge := range aClipped.edges { - l.startEdge(edge) - if l.edgeCrossesCell(bClipped) { - return true - } - } - - return false -} - -// cellCrossesAnySubcell reports whether given an index cell of A, if there are any -// edge or wedge crossings with any index cell of B contained within bID. -func (l *loopCrosser) cellCrossesAnySubcell(aClipped *clippedShape, bID CellID) bool { - // Test all edges of aClipped against all edges of B. The relevant B - // edges are guaranteed to be children of bID, which lets us find the - // correct index cells more efficiently. - bRoot := PaddedCellFromCellID(bID, 0) - for _, aj := range aClipped.edges { - // Use an CrossingEdgeQuery starting at bRoot to find the index cells - // of B that might contain crossing edges. - l.bCells = l.bQuery.getCells(l.a.Vertex(aj), l.a.Vertex(aj+1), bRoot) - if len(l.bCells) == 0 { - continue - } - l.startEdge(aj) - for c := 0; c < len(l.bCells); c++ { - if l.edgeCrossesCell(l.bCells[c].shapes[0]) { - return true - } - } - } - - return false -} - -// hasCrossing reports whether given two iterators positioned such that -// ai.cellID().ContainsCellID(bi.cellID()), there is an edge or wedge crossing -// anywhere within ai.cellID(). This function advances bi only past ai.cellID(). -func (l *loopCrosser) hasCrossing(ai, bi *rangeIterator) bool { - // If ai.CellID() intersects many edges of B, then it is faster to use - // CrossingEdgeQuery to narrow down the candidates. But if it intersects - // only a few edges, it is faster to check all the crossings directly. - // We handle this by advancing bi and keeping track of how many edges we - // would need to test. - const edgeQueryMinEdges = 20 // Tuned from benchmarks. - var totalEdges int - l.bCells = nil - - for { - if n := bi.it.IndexCell().shapes[0].numEdges(); n > 0 { - totalEdges += n - if totalEdges >= edgeQueryMinEdges { - // There are too many edges to test them directly, so use CrossingEdgeQuery. - if l.cellCrossesAnySubcell(ai.it.IndexCell().shapes[0], ai.cellID()) { - return true - } - bi.seekBeyond(ai) - return false - } - l.bCells = append(l.bCells, bi.indexCell()) - } - bi.next() - if bi.cellID() > ai.rangeMax { - break - } - } - - // Test all the edge crossings directly. - for _, c := range l.bCells { - if l.cellCrossesCell(ai.it.IndexCell().shapes[0], c.shapes[0]) { - return true - } - } - - return false -} - -// containsCenterMatches reports if the clippedShapes containsCenter boolean corresponds -// to the crossing target type given. (This is to work around C++ allowing false == 0, -// true == 1 type implicit conversions and comparisons) -func containsCenterMatches(a *clippedShape, target crossingTarget) bool { - return (!a.containsCenter && target == crossingTargetDontCross) || - (a.containsCenter && target == crossingTargetCross) -} - -// hasCrossingRelation reports whether given two iterators positioned such that -// ai.cellID().ContainsCellID(bi.cellID()), there is a crossing relationship -// anywhere within ai.cellID(). Specifically, this method returns true if there -// is an edge crossing, a wedge crossing, or a point P that matches both relations -// crossing targets. This function advances both iterators past ai.cellID. -func (l *loopCrosser) hasCrossingRelation(ai, bi *rangeIterator) bool { - aClipped := ai.it.IndexCell().shapes[0] - if aClipped.numEdges() != 0 { - // The current cell of A has at least one edge, so check for crossings. - if l.hasCrossing(ai, bi) { - return true - } - ai.next() - return false - } - - if containsCenterMatches(aClipped, l.aCrossingTarget) { - // The crossing target for A is not satisfied, so we skip over these cells of B. - bi.seekBeyond(ai) - ai.next() - return false - } - - // All points within ai.cellID() satisfy the crossing target for A, so it's - // worth iterating through the cells of B to see whether any cell - // centers also satisfy the crossing target for B. - for bi.cellID() <= ai.rangeMax { - bClipped := bi.it.IndexCell().shapes[0] - if containsCenterMatches(bClipped, l.bCrossingTarget) { - return true - } - bi.next() - } - ai.next() - return false -} - -// hasCrossingRelation checks all edges of loop A for intersection against all edges -// of loop B and reports if there are any that satisfy the given relation. If there -// is any shared vertex, the wedges centered at this vertex are sent to the given -// relation to be tested. -// -// If the two loop boundaries cross, this method is guaranteed to return -// true. It also returns true in certain cases if the loop relationship is -// equivalent to crossing. For example, if the relation is Contains and a -// point P is found such that B contains P but A does not contain P, this -// method will return true to indicate that the result is the same as though -// a pair of crossing edges were found (since Contains returns false in -// both cases). -// -// See Contains, Intersects and CompareBoundary for the three uses of this function. -func hasCrossingRelation(a, b *Loop, relation loopRelation) bool { - // We look for CellID ranges where the indexes of A and B overlap, and - // then test those edges for crossings. - ai := newRangeIterator(a.index) - bi := newRangeIterator(b.index) - - ab := newLoopCrosser(a, b, relation, false) // Tests edges of A against B - ba := newLoopCrosser(b, a, relation, true) // Tests edges of B against A - - for !ai.done() || !bi.done() { - if ai.rangeMax < bi.rangeMin { - // The A and B cells don't overlap, and A precedes B. - ai.seekTo(bi) - } else if bi.rangeMax < ai.rangeMin { - // The A and B cells don't overlap, and B precedes A. - bi.seekTo(ai) - } else { - // One cell contains the other. Determine which cell is larger. - abRelation := int64(ai.it.CellID().lsb() - bi.it.CellID().lsb()) - if abRelation > 0 { - // A's index cell is larger. - if ab.hasCrossingRelation(ai, bi) { - return true - } - } else if abRelation < 0 { - // B's index cell is larger. - if ba.hasCrossingRelation(bi, ai) { - return true - } - } else { - // The A and B cells are the same. Since the two cells - // have the same center point P, check whether P satisfies - // the crossing targets. - aClipped := ai.it.IndexCell().shapes[0] - bClipped := bi.it.IndexCell().shapes[0] - if containsCenterMatches(aClipped, ab.aCrossingTarget) && - containsCenterMatches(bClipped, ab.bCrossingTarget) { - return true - } - // Otherwise test all the edge crossings directly. - if aClipped.numEdges() > 0 && bClipped.numEdges() > 0 && ab.cellCrossesCell(aClipped, bClipped) { - return true - } - ai.next() - bi.next() - } - } - } - return false -} - -// containsRelation implements loopRelation for a contains operation. If -// A.ContainsPoint(P) == false && B.ContainsPoint(P) == true, it is equivalent -// to having an edge crossing (i.e., Contains returns false). -type containsRelation struct { - foundSharedVertex bool -} - -func (c *containsRelation) aCrossingTarget() crossingTarget { return crossingTargetDontCross } -func (c *containsRelation) bCrossingTarget() crossingTarget { return crossingTargetCross } -func (c *containsRelation) wedgesCross(a0, ab1, a2, b0, b2 Point) bool { - c.foundSharedVertex = true - return !WedgeContains(a0, ab1, a2, b0, b2) -} - -// intersectsRelation implements loopRelation for an intersects operation. Given -// two loops, A and B, if A.ContainsPoint(P) == true && B.ContainsPoint(P) == true, -// it is equivalent to having an edge crossing (i.e., Intersects returns true). -type intersectsRelation struct { - foundSharedVertex bool -} - -func (i *intersectsRelation) aCrossingTarget() crossingTarget { return crossingTargetCross } -func (i *intersectsRelation) bCrossingTarget() crossingTarget { return crossingTargetCross } -func (i *intersectsRelation) wedgesCross(a0, ab1, a2, b0, b2 Point) bool { - i.foundSharedVertex = true - return WedgeIntersects(a0, ab1, a2, b0, b2) -} - -// compareBoundaryRelation implements loopRelation for comparing boundaries. -// -// The compare boundary relation does not have a useful early-exit condition, -// so we return crossingTargetDontCare for both crossing targets. -// -// Aside: A possible early exit condition could be based on the following. -// If A contains a point of both B and ~B, then A intersects Boundary(B). -// If ~A contains a point of both B and ~B, then ~A intersects Boundary(B). -// So if the intersections of {A, ~A} with {B, ~B} are all non-empty, -// the return value is 0, i.e., Boundary(A) intersects Boundary(B). -// Unfortunately it isn't worth detecting this situation because by the -// time we have seen a point in all four intersection regions, we are also -// guaranteed to have seen at least one pair of crossing edges. -type compareBoundaryRelation struct { - reverse bool // True if the other loop should be reversed. - foundSharedVertex bool // True if any wedge was processed. - containsEdge bool // True if any edge of the other loop is contained by this loop. - excludesEdge bool // True if any edge of the other loop is excluded by this loop. -} - -func newCompareBoundaryRelation(reverse bool) *compareBoundaryRelation { - return &compareBoundaryRelation{reverse: reverse} -} - -func (c *compareBoundaryRelation) aCrossingTarget() crossingTarget { return crossingTargetDontCare } -func (c *compareBoundaryRelation) bCrossingTarget() crossingTarget { return crossingTargetDontCare } -func (c *compareBoundaryRelation) wedgesCross(a0, ab1, a2, b0, b2 Point) bool { - // Because we don't care about the interior of the other, only its boundary, - // it is sufficient to check whether this one contains the semiwedge (ab1, b2). - c.foundSharedVertex = true - if wedgeContainsSemiwedge(a0, ab1, a2, b2, c.reverse) { - c.containsEdge = true - } else { - c.excludesEdge = true - } - return c.containsEdge && c.excludesEdge -} - -// wedgeContainsSemiwedge reports whether the wedge (a0, ab1, a2) contains the -// "semiwedge" defined as any non-empty open set of rays immediately CCW from -// the edge (ab1, b2). If reverse is true, then substitute clockwise for CCW; -// this simulates what would happen if the direction of the other loop was reversed. -func wedgeContainsSemiwedge(a0, ab1, a2, b2 Point, reverse bool) bool { - if b2 == a0 || b2 == a2 { - // We have a shared or reversed edge. - return (b2 == a0) == reverse - } - return OrderedCCW(a0, a2, b2, ab1) -} - -// containsNonCrossingBoundary reports whether given two loops whose boundaries -// do not cross (see compareBoundary), if this loop contains the boundary of the -// other loop. If reverse is true, the boundary of the other loop is reversed -// first (which only affects the result when there are shared edges). This method -// is cheaper than compareBoundary because it does not test for edge intersections. -// -// This function requires that neither loop is empty, and that if the other is full, -// then reverse == false. -func (l *Loop) containsNonCrossingBoundary(other *Loop, reverseOther bool) bool { - // The bounds must intersect for containment. - if !l.bound.Intersects(other.bound) { - return false - } - - // Full loops are handled as though the loop surrounded the entire sphere. - if l.IsFull() { - return true - } - if other.IsFull() { - return false - } - - m, ok := l.findVertex(other.Vertex(0)) - if !ok { - // Since the other loops vertex 0 is not shared, we can check if this contains it. - return l.ContainsPoint(other.Vertex(0)) - } - // Otherwise check whether the edge (b0, b1) is contained by this loop. - return wedgeContainsSemiwedge(l.Vertex(m-1), l.Vertex(m), l.Vertex(m+1), - other.Vertex(1), reverseOther) -} - -// TODO(roberts): Differences from the C++ version: -// DistanceToPoint -// DistanceToBoundary -// Project -// ProjectToBoundary -// BoundaryApproxEqual -// BoundaryNear diff --git a/vendor/github.com/golang/geo/s2/matrix3x3.go b/vendor/github.com/golang/geo/s2/matrix3x3.go deleted file mode 100644 index 01696fe83..000000000 --- a/vendor/github.com/golang/geo/s2/matrix3x3.go +++ /dev/null @@ -1,127 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package s2 - -import ( - "fmt" - - "github.com/golang/geo/r3" -) - -// matrix3x3 represents a traditional 3x3 matrix of floating point values. -// This is not a full fledged matrix. It only contains the pieces needed -// to satisfy the computations done within the s2 package. -type matrix3x3 [3][3]float64 - -// col returns the given column as a Point. -func (m *matrix3x3) col(col int) Point { - return Point{r3.Vector{m[0][col], m[1][col], m[2][col]}} -} - -// row returns the given row as a Point. -func (m *matrix3x3) row(row int) Point { - return Point{r3.Vector{m[row][0], m[row][1], m[row][2]}} -} - -// setCol sets the specified column to the value in the given Point. -func (m *matrix3x3) setCol(col int, p Point) *matrix3x3 { - m[0][col] = p.X - m[1][col] = p.Y - m[2][col] = p.Z - - return m -} - -// setRow sets the specified row to the value in the given Point. -func (m *matrix3x3) setRow(row int, p Point) *matrix3x3 { - m[row][0] = p.X - m[row][1] = p.Y - m[row][2] = p.Z - - return m -} - -// scale multiplies the matrix by the given value. -func (m *matrix3x3) scale(f float64) *matrix3x3 { - return &matrix3x3{ - [3]float64{f * m[0][0], f * m[0][1], f * m[0][2]}, - [3]float64{f * m[1][0], f * m[1][1], f * m[1][2]}, - [3]float64{f * m[2][0], f * m[2][1], f * m[2][2]}, - } -} - -// mul returns the multiplication of m by the Point p and converts the -// resulting 1x3 matrix into a Point. -func (m *matrix3x3) mul(p Point) Point { - return Point{r3.Vector{ - m[0][0]*p.X + m[0][1]*p.Y + m[0][2]*p.Z, - m[1][0]*p.X + m[1][1]*p.Y + m[1][2]*p.Z, - m[2][0]*p.X + m[2][1]*p.Y + m[2][2]*p.Z, - }} -} - -// det returns the determinant of this matrix. -func (m *matrix3x3) det() float64 { - // | a b c | - // det | d e f | = aei + bfg + cdh - ceg - bdi - afh - // | g h i | - return m[0][0]*m[1][1]*m[2][2] + m[0][1]*m[1][2]*m[2][0] + m[0][2]*m[1][0]*m[2][1] - - m[0][2]*m[1][1]*m[2][0] - m[0][1]*m[1][0]*m[2][2] - m[0][0]*m[1][2]*m[2][1] -} - -// transpose reflects the matrix along its diagonal and returns the result. -func (m *matrix3x3) transpose() *matrix3x3 { - m[0][1], m[1][0] = m[1][0], m[0][1] - m[0][2], m[2][0] = m[2][0], m[0][2] - m[1][2], m[2][1] = m[2][1], m[1][2] - - return m -} - -// String formats the matrix into an easier to read layout. -func (m *matrix3x3) String() string { - return fmt.Sprintf("[ %0.4f %0.4f %0.4f ] [ %0.4f %0.4f %0.4f ] [ %0.4f %0.4f %0.4f ]", - m[0][0], m[0][1], m[0][2], - m[1][0], m[1][1], m[1][2], - m[2][0], m[2][1], m[2][2], - ) -} - -// getFrame returns the orthonormal frame for the given point on the unit sphere. -func getFrame(p Point) matrix3x3 { - // Given the point p on the unit sphere, extend this into a right-handed - // coordinate frame of unit-length column vectors m = (x,y,z). Note that - // the vectors (x,y) are an orthonormal frame for the tangent space at point p, - // while p itself is an orthonormal frame for the normal space at p. - m := matrix3x3{} - m.setCol(2, p) - m.setCol(1, Point{p.Ortho()}) - m.setCol(0, Point{m.col(1).Cross(p.Vector)}) - return m -} - -// toFrame returns the coordinates of the given point with respect to its orthonormal basis m. -// The resulting point q satisfies the identity (m * q == p). -func toFrame(m matrix3x3, p Point) Point { - // The inverse of an orthonormal matrix is its transpose. - return m.transpose().mul(p) -} - -// fromFrame returns the coordinates of the given point in standard axis-aligned basis -// from its orthonormal basis m. -// The resulting point p satisfies the identity (p == m * q). -func fromFrame(m matrix3x3, q Point) Point { - return m.mul(q) -} diff --git a/vendor/github.com/golang/geo/s2/max_distance_targets.go b/vendor/github.com/golang/geo/s2/max_distance_targets.go deleted file mode 100644 index 92e916d98..000000000 --- a/vendor/github.com/golang/geo/s2/max_distance_targets.go +++ /dev/null @@ -1,306 +0,0 @@ -// Copyright 2019 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package s2 - -import ( - "math" - - "github.com/golang/geo/s1" -) - -// maxDistance implements distance as the supplementary distance (Pi - x) to find -// results that are the furthest using the distance related algorithms. -type maxDistance s1.ChordAngle - -func (m maxDistance) chordAngle() s1.ChordAngle { return s1.ChordAngle(m) } -func (m maxDistance) zero() distance { return maxDistance(s1.StraightChordAngle) } -func (m maxDistance) negative() distance { return maxDistance(s1.InfChordAngle()) } -func (m maxDistance) infinity() distance { return maxDistance(s1.NegativeChordAngle) } -func (m maxDistance) less(other distance) bool { return m.chordAngle() > other.chordAngle() } -func (m maxDistance) sub(other distance) distance { - return maxDistance(m.chordAngle() + other.chordAngle()) -} -func (m maxDistance) chordAngleBound() s1.ChordAngle { - return s1.StraightChordAngle - m.chordAngle() -} -func (m maxDistance) updateDistance(dist distance) (distance, bool) { - if dist.less(m) { - m = maxDistance(dist.chordAngle()) - return m, true - } - return m, false -} - -func (m maxDistance) fromChordAngle(o s1.ChordAngle) distance { - return maxDistance(o) -} - -// MaxDistanceToPointTarget is used for computing the maximum distance to a Point. -type MaxDistanceToPointTarget struct { - point Point - dist distance -} - -// NewMaxDistanceToPointTarget returns a new target for the given Point. -func NewMaxDistanceToPointTarget(point Point) *MaxDistanceToPointTarget { - m := maxDistance(0) - return &MaxDistanceToPointTarget{point: point, dist: &m} -} - -func (m *MaxDistanceToPointTarget) capBound() Cap { - return CapFromCenterChordAngle(Point{m.point.Mul(-1)}, (s1.ChordAngle(0))) -} - -func (m *MaxDistanceToPointTarget) updateDistanceToPoint(p Point, dist distance) (distance, bool) { - return dist.updateDistance(maxDistance(ChordAngleBetweenPoints(p, m.point))) -} - -func (m *MaxDistanceToPointTarget) updateDistanceToEdge(edge Edge, dist distance) (distance, bool) { - if d, ok := UpdateMaxDistance(m.point, edge.V0, edge.V1, dist.chordAngle()); ok { - dist, _ = dist.updateDistance(maxDistance(d)) - return dist, true - } - return dist, false -} - -func (m *MaxDistanceToPointTarget) updateDistanceToCell(cell Cell, dist distance) (distance, bool) { - return dist.updateDistance(maxDistance(cell.MaxDistance(m.point))) -} - -func (m *MaxDistanceToPointTarget) visitContainingShapes(index *ShapeIndex, v shapePointVisitorFunc) bool { - // For furthest points, we visit the polygons whose interior contains - // the antipode of the target point. These are the polygons whose - // distance to the target is maxDistance.zero() - q := NewContainsPointQuery(index, VertexModelSemiOpen) - return q.visitContainingShapes(Point{m.point.Mul(-1)}, func(shape Shape) bool { - return v(shape, m.point) - }) -} - -func (m *MaxDistanceToPointTarget) setMaxError(maxErr s1.ChordAngle) bool { return false } -func (m *MaxDistanceToPointTarget) maxBruteForceIndexSize() int { return 30 } -func (m *MaxDistanceToPointTarget) distance() distance { return m.dist } - -// MaxDistanceToEdgeTarget is used for computing the maximum distance to an Edge. -type MaxDistanceToEdgeTarget struct { - e Edge - dist distance -} - -// NewMaxDistanceToEdgeTarget returns a new target for the given Edge. -func NewMaxDistanceToEdgeTarget(e Edge) *MaxDistanceToEdgeTarget { - m := maxDistance(0) - return &MaxDistanceToEdgeTarget{e: e, dist: m} -} - -// capBound returns a Cap that bounds the antipode of the target. (This -// is the set of points whose maxDistance to the target is maxDistance.zero) -func (m *MaxDistanceToEdgeTarget) capBound() Cap { - // The following computes a radius equal to half the edge length in an - // efficient and numerically stable way. - d2 := float64(ChordAngleBetweenPoints(m.e.V0, m.e.V1)) - r2 := (0.5 * d2) / (1 + math.Sqrt(1-0.25*d2)) - return CapFromCenterChordAngle(Point{m.e.V0.Add(m.e.V1.Vector).Mul(-1).Normalize()}, s1.ChordAngleFromSquaredLength(r2)) -} - -func (m *MaxDistanceToEdgeTarget) updateDistanceToPoint(p Point, dist distance) (distance, bool) { - if d, ok := UpdateMaxDistance(p, m.e.V0, m.e.V1, dist.chordAngle()); ok { - dist, _ = dist.updateDistance(maxDistance(d)) - return dist, true - } - return dist, false -} - -func (m *MaxDistanceToEdgeTarget) updateDistanceToEdge(edge Edge, dist distance) (distance, bool) { - if d, ok := updateEdgePairMaxDistance(m.e.V0, m.e.V1, edge.V0, edge.V1, dist.chordAngle()); ok { - dist, _ = dist.updateDistance(maxDistance(d)) - return dist, true - } - return dist, false -} - -func (m *MaxDistanceToEdgeTarget) updateDistanceToCell(cell Cell, dist distance) (distance, bool) { - return dist.updateDistance(maxDistance(cell.MaxDistanceToEdge(m.e.V0, m.e.V1))) -} - -func (m *MaxDistanceToEdgeTarget) visitContainingShapes(index *ShapeIndex, v shapePointVisitorFunc) bool { - // We only need to test one edge point. That is because the method *must* - // visit a polygon if it fully contains the target, and *is allowed* to - // visit a polygon if it intersects the target. If the tested vertex is not - // contained, we know the full edge is not contained; if the tested vertex is - // contained, then the edge either is fully contained (must be visited) or it - // intersects (is allowed to be visited). We visit the center of the edge so - // that edge AB gives identical results to BA. - target := NewMaxDistanceToPointTarget(Point{m.e.V0.Add(m.e.V1.Vector).Normalize()}) - return target.visitContainingShapes(index, v) -} - -func (m *MaxDistanceToEdgeTarget) setMaxError(maxErr s1.ChordAngle) bool { return false } -func (m *MaxDistanceToEdgeTarget) maxBruteForceIndexSize() int { return 30 } -func (m *MaxDistanceToEdgeTarget) distance() distance { return m.dist } - -// MaxDistanceToCellTarget is used for computing the maximum distance to a Cell. -type MaxDistanceToCellTarget struct { - cell Cell - dist distance -} - -// NewMaxDistanceToCellTarget returns a new target for the given Cell. -func NewMaxDistanceToCellTarget(cell Cell) *MaxDistanceToCellTarget { - m := maxDistance(0) - return &MaxDistanceToCellTarget{cell: cell, dist: m} -} - -func (m *MaxDistanceToCellTarget) capBound() Cap { - c := m.cell.CapBound() - return CapFromCenterAngle(Point{c.Center().Mul(-1)}, c.Radius()) -} - -func (m *MaxDistanceToCellTarget) updateDistanceToPoint(p Point, dist distance) (distance, bool) { - return dist.updateDistance(maxDistance(m.cell.MaxDistance(p))) -} - -func (m *MaxDistanceToCellTarget) updateDistanceToEdge(edge Edge, dist distance) (distance, bool) { - return dist.updateDistance(maxDistance(m.cell.MaxDistanceToEdge(edge.V0, edge.V1))) -} - -func (m *MaxDistanceToCellTarget) updateDistanceToCell(cell Cell, dist distance) (distance, bool) { - return dist.updateDistance(maxDistance(m.cell.MaxDistanceToCell(cell))) -} - -func (m *MaxDistanceToCellTarget) visitContainingShapes(index *ShapeIndex, v shapePointVisitorFunc) bool { - // We only need to check one point here - cell center is simplest. - // See comment at MaxDistanceToEdgeTarget's visitContainingShapes. - target := NewMaxDistanceToPointTarget(m.cell.Center()) - return target.visitContainingShapes(index, v) -} - -func (m *MaxDistanceToCellTarget) setMaxError(maxErr s1.ChordAngle) bool { return false } -func (m *MaxDistanceToCellTarget) maxBruteForceIndexSize() int { return 30 } -func (m *MaxDistanceToCellTarget) distance() distance { return m.dist } - -// MaxDistanceToShapeIndexTarget is used for computing the maximum distance to a ShapeIndex. -type MaxDistanceToShapeIndexTarget struct { - index *ShapeIndex - query *EdgeQuery - dist distance -} - -// NewMaxDistanceToShapeIndexTarget returns a new target for the given ShapeIndex. -func NewMaxDistanceToShapeIndexTarget(index *ShapeIndex) *MaxDistanceToShapeIndexTarget { - m := maxDistance(0) - return &MaxDistanceToShapeIndexTarget{ - index: index, - dist: m, - query: NewFurthestEdgeQuery(index, NewFurthestEdgeQueryOptions()), - } -} - -// capBound returns a Cap that bounds the antipode of the target. This -// is the set of points whose maxDistance to the target is maxDistance.zero() -func (m *MaxDistanceToShapeIndexTarget) capBound() Cap { - // TODO(roberts): Depends on ShapeIndexRegion - // c := makeShapeIndexRegion(m.index).CapBound() - // return CapFromCenterRadius(Point{c.Center.Mul(-1)}, c.Radius()) - panic("not implemented yet") -} - -func (m *MaxDistanceToShapeIndexTarget) updateDistanceToPoint(p Point, dist distance) (distance, bool) { - m.query.opts.distanceLimit = dist.chordAngle() - target := NewMaxDistanceToPointTarget(p) - r := m.query.findEdge(target, m.query.opts) - if r.shapeID < 0 { - return dist, false - } - return r.distance, true -} - -func (m *MaxDistanceToShapeIndexTarget) updateDistanceToEdge(edge Edge, dist distance) (distance, bool) { - m.query.opts.distanceLimit = dist.chordAngle() - target := NewMaxDistanceToEdgeTarget(edge) - r := m.query.findEdge(target, m.query.opts) - if r.shapeID < 0 { - return dist, false - } - return r.distance, true -} - -func (m *MaxDistanceToShapeIndexTarget) updateDistanceToCell(cell Cell, dist distance) (distance, bool) { - m.query.opts.distanceLimit = dist.chordAngle() - target := NewMaxDistanceToCellTarget(cell) - r := m.query.findEdge(target, m.query.opts) - if r.shapeID < 0 { - return dist, false - } - return r.distance, true -} - -// visitContainingShapes returns the polygons containing the antipodal -// reflection of *any* connected component for target types consisting of -// multiple connected components. It is sufficient to test containment of -// one vertex per connected component, since this allows us to also return -// any polygon whose boundary has distance.zero() to the target. -func (m *MaxDistanceToShapeIndexTarget) visitContainingShapes(index *ShapeIndex, v shapePointVisitorFunc) bool { - // It is sufficient to find the set of chain starts in the target index - // (i.e., one vertex per connected component of edges) that are contained by - // the query index, except for one special case to handle full polygons. - // - // TODO(roberts): Do this by merge-joining the two ShapeIndexes and share - // the code with BooleanOperation. - for _, shape := range m.index.shapes { - numChains := shape.NumChains() - // Shapes that don't have any edges require a special case (below). - testedPoint := false - for c := 0; c < numChains; c++ { - chain := shape.Chain(c) - if chain.Length == 0 { - continue - } - testedPoint = true - target := NewMaxDistanceToPointTarget(shape.ChainEdge(c, 0).V0) - if !target.visitContainingShapes(index, v) { - return false - } - } - if !testedPoint { - // Special case to handle full polygons. - ref := shape.ReferencePoint() - if !ref.Contained { - continue - } - target := NewMaxDistanceToPointTarget(ref.Point) - if !target.visitContainingShapes(index, v) { - return false - } - } - } - return true -} - -func (m *MaxDistanceToShapeIndexTarget) setMaxError(maxErr s1.ChordAngle) bool { - m.query.opts.maxError = maxErr - return true -} -func (m *MaxDistanceToShapeIndexTarget) maxBruteForceIndexSize() int { return 30 } -func (m *MaxDistanceToShapeIndexTarget) distance() distance { return m.dist } -func (m *MaxDistanceToShapeIndexTarget) setIncludeInteriors(b bool) { - m.query.opts.includeInteriors = b -} -func (m *MaxDistanceToShapeIndexTarget) setUseBruteForce(b bool) { m.query.opts.useBruteForce = b } - -// TODO(roberts): Remaining methods -// -// func (m *MaxDistanceToShapeIndexTarget) capBound() Cap { -// CellUnionTarget diff --git a/vendor/github.com/golang/geo/s2/metric.go b/vendor/github.com/golang/geo/s2/metric.go deleted file mode 100644 index 53db3d317..000000000 --- a/vendor/github.com/golang/geo/s2/metric.go +++ /dev/null @@ -1,164 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package s2 - -// This file implements functions for various S2 measurements. - -import "math" - -// A Metric is a measure for cells. It is used to describe the shape and size -// of cells. They are useful for deciding which cell level to use in order to -// satisfy a given condition (e.g. that cell vertices must be no further than -// "x" apart). You can use the Value(level) method to compute the corresponding -// length or area on the unit sphere for cells at a given level. The minimum -// and maximum bounds are valid for cells at all levels, but they may be -// somewhat conservative for very large cells (e.g. face cells). -type Metric struct { - // Dim is either 1 or 2, for a 1D or 2D metric respectively. - Dim int - // Deriv is the scaling factor for the metric. - Deriv float64 -} - -// Defined metrics. -// Of the projection methods defined in C++, Go only supports the quadratic projection. - -// Each cell is bounded by four planes passing through its four edges and -// the center of the sphere. These metrics relate to the angle between each -// pair of opposite bounding planes, or equivalently, between the planes -// corresponding to two different s-values or two different t-values. -var ( - MinAngleSpanMetric = Metric{1, 4.0 / 3} - AvgAngleSpanMetric = Metric{1, math.Pi / 2} - MaxAngleSpanMetric = Metric{1, 1.704897179199218452} -) - -// The width of geometric figure is defined as the distance between two -// parallel bounding lines in a given direction. For cells, the minimum -// width is always attained between two opposite edges, and the maximum -// width is attained between two opposite vertices. However, for our -// purposes we redefine the width of a cell as the perpendicular distance -// between a pair of opposite edges. A cell therefore has two widths, one -// in each direction. The minimum width according to this definition agrees -// with the classic geometric one, but the maximum width is different. (The -// maximum geometric width corresponds to MaxDiag defined below.) -// -// The average width in both directions for all cells at level k is approximately -// AvgWidthMetric.Value(k). -// -// The width is useful for bounding the minimum or maximum distance from a -// point on one edge of a cell to the closest point on the opposite edge. -// For example, this is useful when growing regions by a fixed distance. -var ( - MinWidthMetric = Metric{1, 2 * math.Sqrt2 / 3} - AvgWidthMetric = Metric{1, 1.434523672886099389} - MaxWidthMetric = Metric{1, MaxAngleSpanMetric.Deriv} -) - -// The edge length metrics can be used to bound the minimum, maximum, -// or average distance from the center of one cell to the center of one of -// its edge neighbors. In particular, it can be used to bound the distance -// between adjacent cell centers along the space-filling Hilbert curve for -// cells at any given level. -var ( - MinEdgeMetric = Metric{1, 2 * math.Sqrt2 / 3} - AvgEdgeMetric = Metric{1, 1.459213746386106062} - MaxEdgeMetric = Metric{1, MaxAngleSpanMetric.Deriv} - - // MaxEdgeAspect is the maximum edge aspect ratio over all cells at any level, - // where the edge aspect ratio of a cell is defined as the ratio of its longest - // edge length to its shortest edge length. - MaxEdgeAspect = 1.442615274452682920 - - MinAreaMetric = Metric{2, 8 * math.Sqrt2 / 9} - AvgAreaMetric = Metric{2, 4 * math.Pi / 6} - MaxAreaMetric = Metric{2, 2.635799256963161491} -) - -// The maximum diagonal is also the maximum diameter of any cell, -// and also the maximum geometric width (see the comment for widths). For -// example, the distance from an arbitrary point to the closest cell center -// at a given level is at most half the maximum diagonal length. -var ( - MinDiagMetric = Metric{1, 8 * math.Sqrt2 / 9} - AvgDiagMetric = Metric{1, 2.060422738998471683} - MaxDiagMetric = Metric{1, 2.438654594434021032} - - // MaxDiagAspect is the maximum diagonal aspect ratio over all cells at any - // level, where the diagonal aspect ratio of a cell is defined as the ratio - // of its longest diagonal length to its shortest diagonal length. - MaxDiagAspect = math.Sqrt(3) -) - -// Value returns the value of the metric at the given level. -func (m Metric) Value(level int) float64 { - return math.Ldexp(m.Deriv, -m.Dim*level) -} - -// MinLevel returns the minimum level such that the metric is at most -// the given value, or maxLevel (30) if there is no such level. -// -// For example, MinLevel(0.1) returns the minimum level such that all cell diagonal -// lengths are 0.1 or smaller. The returned value is always a valid level. -// -// In C++, this is called GetLevelForMaxValue. -func (m Metric) MinLevel(val float64) int { - if val < 0 { - return maxLevel - } - - level := -(math.Ilogb(val/m.Deriv) >> uint(m.Dim-1)) - if level > maxLevel { - level = maxLevel - } - if level < 0 { - level = 0 - } - return level -} - -// MaxLevel returns the maximum level such that the metric is at least -// the given value, or zero if there is no such level. -// -// For example, MaxLevel(0.1) returns the maximum level such that all cells have a -// minimum width of 0.1 or larger. The returned value is always a valid level. -// -// In C++, this is called GetLevelForMinValue. -func (m Metric) MaxLevel(val float64) int { - if val <= 0 { - return maxLevel - } - - level := math.Ilogb(m.Deriv/val) >> uint(m.Dim-1) - if level > maxLevel { - level = maxLevel - } - if level < 0 { - level = 0 - } - return level -} - -// ClosestLevel returns the level at which the metric has approximately the given -// value. The return value is always a valid level. For example, -// AvgEdgeMetric.ClosestLevel(0.1) returns the level at which the average cell edge -// length is approximately 0.1. -func (m Metric) ClosestLevel(val float64) int { - x := math.Sqrt2 - if m.Dim == 2 { - x = 2 - } - return m.MinLevel(x * val) -} diff --git a/vendor/github.com/golang/geo/s2/min_distance_targets.go b/vendor/github.com/golang/geo/s2/min_distance_targets.go deleted file mode 100644 index b4cbd43ef..000000000 --- a/vendor/github.com/golang/geo/s2/min_distance_targets.go +++ /dev/null @@ -1,362 +0,0 @@ -// Copyright 2019 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package s2 - -import ( - "math" - - "github.com/golang/geo/s1" -) - -// minDistance implements distance interface to find closest distance types. -type minDistance s1.ChordAngle - -func (m minDistance) chordAngle() s1.ChordAngle { return s1.ChordAngle(m) } -func (m minDistance) zero() distance { return minDistance(0) } -func (m minDistance) negative() distance { return minDistance(s1.NegativeChordAngle) } -func (m minDistance) infinity() distance { return minDistance(s1.InfChordAngle()) } -func (m minDistance) less(other distance) bool { return m.chordAngle() < other.chordAngle() } -func (m minDistance) sub(other distance) distance { - return minDistance(m.chordAngle() - other.chordAngle()) -} -func (m minDistance) chordAngleBound() s1.ChordAngle { - return m.chordAngle().Expanded(m.chordAngle().MaxAngleError()) -} - -// updateDistance updates its own value if the other value is less() than it is, -// and reports if it updated. -func (m minDistance) updateDistance(dist distance) (distance, bool) { - if dist.less(m) { - m = minDistance(dist.chordAngle()) - return m, true - } - return m, false -} - -func (m minDistance) fromChordAngle(o s1.ChordAngle) distance { - return minDistance(o) -} - -// MinDistanceToPointTarget is a type for computing the minimum distance to a Point. -type MinDistanceToPointTarget struct { - point Point - dist distance -} - -// NewMinDistanceToPointTarget returns a new target for the given Point. -func NewMinDistanceToPointTarget(point Point) *MinDistanceToPointTarget { - m := minDistance(0) - return &MinDistanceToPointTarget{point: point, dist: &m} -} - -func (m *MinDistanceToPointTarget) capBound() Cap { - return CapFromCenterChordAngle(m.point, s1.ChordAngle(0)) -} - -func (m *MinDistanceToPointTarget) updateDistanceToPoint(p Point, dist distance) (distance, bool) { - var ok bool - dist, ok = dist.updateDistance(minDistance(ChordAngleBetweenPoints(p, m.point))) - return dist, ok -} - -func (m *MinDistanceToPointTarget) updateDistanceToEdge(edge Edge, dist distance) (distance, bool) { - if d, ok := UpdateMinDistance(m.point, edge.V0, edge.V1, dist.chordAngle()); ok { - dist, _ = dist.updateDistance(minDistance(d)) - return dist, true - } - return dist, false -} - -func (m *MinDistanceToPointTarget) updateDistanceToCell(cell Cell, dist distance) (distance, bool) { - var ok bool - dist, ok = dist.updateDistance(minDistance(cell.Distance(m.point))) - return dist, ok -} - -func (m *MinDistanceToPointTarget) visitContainingShapes(index *ShapeIndex, v shapePointVisitorFunc) bool { - // For furthest points, we visit the polygons whose interior contains - // the antipode of the target point. These are the polygons whose - // distance to the target is maxDistance.zero() - q := NewContainsPointQuery(index, VertexModelSemiOpen) - return q.visitContainingShapes(m.point, func(shape Shape) bool { - return v(shape, m.point) - }) -} - -func (m *MinDistanceToPointTarget) setMaxError(maxErr s1.ChordAngle) bool { return false } -func (m *MinDistanceToPointTarget) maxBruteForceIndexSize() int { return 30 } -func (m *MinDistanceToPointTarget) distance() distance { return m.dist } - -// ---------------------------------------------------------- - -// MinDistanceToEdgeTarget is a type for computing the minimum distance to an Edge. -type MinDistanceToEdgeTarget struct { - e Edge - dist distance -} - -// NewMinDistanceToEdgeTarget returns a new target for the given Edge. -func NewMinDistanceToEdgeTarget(e Edge) *MinDistanceToEdgeTarget { - m := minDistance(0) - return &MinDistanceToEdgeTarget{e: e, dist: m} -} - -// capBound returns a Cap that bounds the antipode of the target. (This -// is the set of points whose maxDistance to the target is maxDistance.zero) -func (m *MinDistanceToEdgeTarget) capBound() Cap { - // The following computes a radius equal to half the edge length in an - // efficient and numerically stable way. - d2 := float64(ChordAngleBetweenPoints(m.e.V0, m.e.V1)) - r2 := (0.5 * d2) / (1 + math.Sqrt(1-0.25*d2)) - return CapFromCenterChordAngle(Point{m.e.V0.Add(m.e.V1.Vector).Normalize()}, s1.ChordAngleFromSquaredLength(r2)) -} - -func (m *MinDistanceToEdgeTarget) updateDistanceToPoint(p Point, dist distance) (distance, bool) { - if d, ok := UpdateMinDistance(p, m.e.V0, m.e.V1, dist.chordAngle()); ok { - dist, _ = dist.updateDistance(minDistance(d)) - return dist, true - } - return dist, false -} - -func (m *MinDistanceToEdgeTarget) updateDistanceToEdge(edge Edge, dist distance) (distance, bool) { - if d, ok := updateEdgePairMinDistance(m.e.V0, m.e.V1, edge.V0, edge.V1, dist.chordAngle()); ok { - dist, _ = dist.updateDistance(minDistance(d)) - return dist, true - } - return dist, false -} - -func (m *MinDistanceToEdgeTarget) updateDistanceToCell(cell Cell, dist distance) (distance, bool) { - return dist.updateDistance(minDistance(cell.DistanceToEdge(m.e.V0, m.e.V1))) -} - -func (m *MinDistanceToEdgeTarget) visitContainingShapes(index *ShapeIndex, v shapePointVisitorFunc) bool { - // We test the center of the edge in order to ensure that edge targets AB - // and BA yield identical results (which is not guaranteed by the API but - // users might expect). Other options would be to test both endpoints, or - // return different results for AB and BA in some cases. - target := NewMinDistanceToPointTarget(Point{m.e.V0.Add(m.e.V1.Vector).Normalize()}) - return target.visitContainingShapes(index, v) -} - -func (m *MinDistanceToEdgeTarget) setMaxError(maxErr s1.ChordAngle) bool { return false } -func (m *MinDistanceToEdgeTarget) maxBruteForceIndexSize() int { return 30 } -func (m *MinDistanceToEdgeTarget) distance() distance { return m.dist } - -// ---------------------------------------------------------- - -// MinDistanceToCellTarget is a type for computing the minimum distance to a Cell. -type MinDistanceToCellTarget struct { - cell Cell - dist distance -} - -// NewMinDistanceToCellTarget returns a new target for the given Cell. -func NewMinDistanceToCellTarget(cell Cell) *MinDistanceToCellTarget { - m := minDistance(0) - return &MinDistanceToCellTarget{cell: cell, dist: m} -} - -func (m *MinDistanceToCellTarget) capBound() Cap { - return m.cell.CapBound() -} - -func (m *MinDistanceToCellTarget) updateDistanceToPoint(p Point, dist distance) (distance, bool) { - return dist.updateDistance(minDistance(m.cell.Distance(p))) -} - -func (m *MinDistanceToCellTarget) updateDistanceToEdge(edge Edge, dist distance) (distance, bool) { - return dist.updateDistance(minDistance(m.cell.DistanceToEdge(edge.V0, edge.V1))) -} - -func (m *MinDistanceToCellTarget) updateDistanceToCell(cell Cell, dist distance) (distance, bool) { - return dist.updateDistance(minDistance(m.cell.DistanceToCell(cell))) -} - -func (m *MinDistanceToCellTarget) visitContainingShapes(index *ShapeIndex, v shapePointVisitorFunc) bool { - // The simplest approach is simply to return the polygons that contain the - // cell center. Alternatively, if the index cell is smaller than the target - // cell then we could return all polygons that are present in the - // shapeIndexCell, but since the index is built conservatively this may - // include some polygons that don't quite intersect the cell. So we would - // either need to recheck for intersection more accurately, or weaken the - // VisitContainingShapes contract so that it only guarantees approximate - // intersection, neither of which seems like a good tradeoff. - target := NewMinDistanceToPointTarget(m.cell.Center()) - return target.visitContainingShapes(index, v) -} -func (m *MinDistanceToCellTarget) setMaxError(maxErr s1.ChordAngle) bool { return false } -func (m *MinDistanceToCellTarget) maxBruteForceIndexSize() int { return 30 } -func (m *MinDistanceToCellTarget) distance() distance { return m.dist } - -// ---------------------------------------------------------- - -/* -// MinDistanceToCellUnionTarget is a type for computing the minimum distance to a CellUnion. -type MinDistanceToCellUnionTarget struct { - cu CellUnion - query *ClosestCellQuery - dist distance -} - -// NewMinDistanceToCellUnionTarget returns a new target for the given CellUnion. -func NewMinDistanceToCellUnionTarget(cu CellUnion) *MinDistanceToCellUnionTarget { - m := minDistance(0) - return &MinDistanceToCellUnionTarget{cu: cu, dist: m} -} - -func (m *MinDistanceToCellUnionTarget) capBound() Cap { - return m.cu.CapBound() -} - -func (m *MinDistanceToCellUnionTarget) updateDistanceToCell(cell Cell, dist distance) (distance, bool) { - m.query.opts.DistanceLimit = dist.chordAngle() - target := NewMinDistanceToPointTarget(p) - r := m.query.findEdge(target) - if r.ShapeID < 0 { - return dist, false - } - return minDistance(r.Distance), true -} - -func (m *MinDistanceToCellUnionTarget) visitContainingShapes(index *ShapeIndex, v shapePointVisitorFunc) bool { - // We test the center of the edge in order to ensure that edge targets AB - // and BA yield identical results (which is not guaranteed by the API but - // users might expect). Other options would be to test both endpoints, or - // return different results for AB and BA in some cases. - target := NewMinDistanceToPointTarget(Point{m.e.V0.Add(m.e.V1.Vector).Normalize()}) - return target.visitContainingShapes(index, v) -} -func (m *MinDistanceToCellUnionTarget) setMaxError(maxErr s1.ChordAngle) bool { - m.query.opts.MaxError = maxErr - return true -} -func (m *MinDistanceToCellUnionTarget) maxBruteForceIndexSize() int { return 30 } -func (m *MinDistanceToCellUnionTarget) distance() distance { return m.dist } -*/ - -// ---------------------------------------------------------- - -// MinDistanceToShapeIndexTarget is a type for computing the minimum distance to a ShapeIndex. -type MinDistanceToShapeIndexTarget struct { - index *ShapeIndex - query *EdgeQuery - dist distance -} - -// NewMinDistanceToShapeIndexTarget returns a new target for the given ShapeIndex. -func NewMinDistanceToShapeIndexTarget(index *ShapeIndex) *MinDistanceToShapeIndexTarget { - m := minDistance(0) - return &MinDistanceToShapeIndexTarget{ - index: index, - dist: m, - query: NewClosestEdgeQuery(index, NewClosestEdgeQueryOptions()), - } -} - -func (m *MinDistanceToShapeIndexTarget) capBound() Cap { - // TODO(roberts): Depends on ShapeIndexRegion existing. - // c := makeS2ShapeIndexRegion(m.index).CapBound() - // return CapFromCenterRadius(Point{c.Center.Mul(-1)}, c.Radius()) - panic("not implemented yet") -} - -func (m *MinDistanceToShapeIndexTarget) updateDistanceToPoint(p Point, dist distance) (distance, bool) { - m.query.opts.distanceLimit = dist.chordAngle() - target := NewMinDistanceToPointTarget(p) - r := m.query.findEdge(target, m.query.opts) - if r.shapeID < 0 { - return dist, false - } - return r.distance, true -} - -func (m *MinDistanceToShapeIndexTarget) updateDistanceToEdge(edge Edge, dist distance) (distance, bool) { - m.query.opts.distanceLimit = dist.chordAngle() - target := NewMinDistanceToEdgeTarget(edge) - r := m.query.findEdge(target, m.query.opts) - if r.shapeID < 0 { - return dist, false - } - return r.distance, true -} - -func (m *MinDistanceToShapeIndexTarget) updateDistanceToCell(cell Cell, dist distance) (distance, bool) { - m.query.opts.distanceLimit = dist.chordAngle() - target := NewMinDistanceToCellTarget(cell) - r := m.query.findEdge(target, m.query.opts) - if r.shapeID < 0 { - return dist, false - } - return r.distance, true -} - -// For target types consisting of multiple connected components (such as this one), -// this method should return the polygons containing the antipodal reflection of -// *any* connected component. (It is sufficient to test containment of one vertex per -// connected component, since this allows us to also return any polygon whose -// boundary has distance.zero() to the target.) -func (m *MinDistanceToShapeIndexTarget) visitContainingShapes(index *ShapeIndex, v shapePointVisitorFunc) bool { - // It is sufficient to find the set of chain starts in the target index - // (i.e., one vertex per connected component of edges) that are contained by - // the query index, except for one special case to handle full polygons. - // - // TODO(roberts): Do this by merge-joining the two ShapeIndexes. - for _, shape := range m.index.shapes { - numChains := shape.NumChains() - // Shapes that don't have any edges require a special case (below). - testedPoint := false - for c := 0; c < numChains; c++ { - chain := shape.Chain(c) - if chain.Length == 0 { - continue - } - testedPoint = true - target := NewMinDistanceToPointTarget(shape.ChainEdge(c, 0).V0) - if !target.visitContainingShapes(index, v) { - return false - } - } - if !testedPoint { - // Special case to handle full polygons. - ref := shape.ReferencePoint() - if !ref.Contained { - continue - } - target := NewMinDistanceToPointTarget(ref.Point) - if !target.visitContainingShapes(index, v) { - return false - } - } - } - return true -} - -func (m *MinDistanceToShapeIndexTarget) setMaxError(maxErr s1.ChordAngle) bool { - m.query.opts.maxError = maxErr - return true -} -func (m *MinDistanceToShapeIndexTarget) maxBruteForceIndexSize() int { return 25 } -func (m *MinDistanceToShapeIndexTarget) distance() distance { return m.dist } -func (m *MinDistanceToShapeIndexTarget) setIncludeInteriors(b bool) { - m.query.opts.includeInteriors = b -} -func (m *MinDistanceToShapeIndexTarget) setUseBruteForce(b bool) { m.query.opts.useBruteForce = b } - -// TODO(roberts): Remaining methods -// -// func (m *MinDistanceToShapeIndexTarget) capBound() Cap { -// CellUnionTarget diff --git a/vendor/github.com/golang/geo/s2/nthderivative.go b/vendor/github.com/golang/geo/s2/nthderivative.go deleted file mode 100644 index 73445d6c9..000000000 --- a/vendor/github.com/golang/geo/s2/nthderivative.go +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright 2017 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package s2 - -// nthDerivativeCoder provides Nth Derivative Coding. -// (In signal processing disciplines, this is known as N-th Delta Coding.) -// -// Good for varint coding integer sequences with polynomial trends. -// -// Instead of coding a sequence of values directly, code its nth-order discrete -// derivative. Overflow in integer addition and subtraction makes this a -// lossless transform. -// -// constant linear quadratic -// trend trend trend -// / \ / \ / \_ -// input |0 0 0 0 1 2 3 4 9 16 25 36 -// 0th derivative(identity) |0 0 0 0 1 2 3 4 9 16 25 36 -// 1st derivative(delta coding) | 0 0 0 1 1 1 1 5 7 9 11 -// 2nd derivative(linear prediction) | 0 0 1 0 0 0 4 2 2 2 -// ------------------------------------- -// 0 1 2 3 4 5 6 7 8 9 10 11 -// n in sequence -// -// Higher-order codings can break even or be detrimental on other sequences. -// -// random oscillating -// / \ / \_ -// input |5 9 6 1 8 8 2 -2 4 -4 6 -6 -// 0th derivative(identity) |5 9 6 1 8 8 2 -2 4 -4 6 -6 -// 1st derivative(delta coding) | 4 -3 -5 7 0 -6 -4 6 -8 10 -12 -// 2nd derivative(linear prediction) | -7 -2 12 -7 -6 2 10 -14 18 -22 -// --------------------------------------- -// 0 1 2 3 4 5 6 7 8 9 10 11 -// n in sequence -// -// Note that the nth derivative isn't available until sequence item n. Earlier -// values are coded at lower order. For the above table, read 5 4 -7 -2 12 ... -type nthDerivativeCoder struct { - n, m int - memory [10]int32 -} - -// newNthDerivativeCoder returns a new coder, where n is the derivative order of the encoder (the N in NthDerivative). -// n must be within [0,10]. -func newNthDerivativeCoder(n int) *nthDerivativeCoder { - c := &nthDerivativeCoder{n: n} - if n < 0 || n > len(c.memory) { - panic("unsupported n. Must be within [0,10].") - } - return c -} - -func (c *nthDerivativeCoder) encode(k int32) int32 { - for i := 0; i < c.m; i++ { - delta := k - c.memory[i] - c.memory[i] = k - k = delta - } - if c.m < c.n { - c.memory[c.m] = k - c.m++ - } - return k -} - -func (c *nthDerivativeCoder) decode(k int32) int32 { - if c.m < c.n { - c.m++ - } - for i := c.m - 1; i >= 0; i-- { - c.memory[i] += k - k = c.memory[i] - } - return k -} diff --git a/vendor/github.com/golang/geo/s2/paddedcell.go b/vendor/github.com/golang/geo/s2/paddedcell.go deleted file mode 100644 index ac304a6cc..000000000 --- a/vendor/github.com/golang/geo/s2/paddedcell.go +++ /dev/null @@ -1,252 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package s2 - -import ( - "github.com/golang/geo/r1" - "github.com/golang/geo/r2" -) - -// PaddedCell represents a Cell whose (u,v)-range has been expanded on -// all sides by a given amount of "padding". Unlike Cell, its methods and -// representation are optimized for clipping edges against Cell boundaries -// to determine which cells are intersected by a given set of edges. -type PaddedCell struct { - id CellID - padding float64 - bound r2.Rect - middle r2.Rect // A rect in (u, v)-space that belongs to all four children. - iLo, jLo int // Minimum (i,j)-coordinates of this cell before padding - orientation int // Hilbert curve orientation of this cell. - level int -} - -// PaddedCellFromCellID constructs a padded cell with the given padding. -func PaddedCellFromCellID(id CellID, padding float64) *PaddedCell { - p := &PaddedCell{ - id: id, - padding: padding, - middle: r2.EmptyRect(), - } - - // Fast path for constructing a top-level face (the most common case). - if id.isFace() { - limit := padding + 1 - p.bound = r2.Rect{r1.Interval{-limit, limit}, r1.Interval{-limit, limit}} - p.middle = r2.Rect{r1.Interval{-padding, padding}, r1.Interval{-padding, padding}} - p.orientation = id.Face() & 1 - return p - } - - _, p.iLo, p.jLo, p.orientation = id.faceIJOrientation() - p.level = id.Level() - p.bound = ijLevelToBoundUV(p.iLo, p.jLo, p.level).ExpandedByMargin(padding) - ijSize := sizeIJ(p.level) - p.iLo &= -ijSize - p.jLo &= -ijSize - - return p -} - -// PaddedCellFromParentIJ constructs the child of parent with the given (i,j) index. -// The four child cells have indices of (0,0), (0,1), (1,0), (1,1), where the i and j -// indices correspond to increasing u- and v-values respectively. -func PaddedCellFromParentIJ(parent *PaddedCell, i, j int) *PaddedCell { - // Compute the position and orientation of the child incrementally from the - // orientation of the parent. - pos := ijToPos[parent.orientation][2*i+j] - - p := &PaddedCell{ - id: parent.id.Children()[pos], - padding: parent.padding, - bound: parent.bound, - orientation: parent.orientation ^ posToOrientation[pos], - level: parent.level + 1, - middle: r2.EmptyRect(), - } - - ijSize := sizeIJ(p.level) - p.iLo = parent.iLo + i*ijSize - p.jLo = parent.jLo + j*ijSize - - // For each child, one corner of the bound is taken directly from the parent - // while the diagonally opposite corner is taken from middle(). - middle := parent.Middle() - if i == 1 { - p.bound.X.Lo = middle.X.Lo - } else { - p.bound.X.Hi = middle.X.Hi - } - if j == 1 { - p.bound.Y.Lo = middle.Y.Lo - } else { - p.bound.Y.Hi = middle.Y.Hi - } - - return p -} - -// CellID returns the CellID this padded cell represents. -func (p PaddedCell) CellID() CellID { - return p.id -} - -// Padding returns the amount of padding on this cell. -func (p PaddedCell) Padding() float64 { - return p.padding -} - -// Level returns the level this cell is at. -func (p PaddedCell) Level() int { - return p.level -} - -// Center returns the center of this cell. -func (p PaddedCell) Center() Point { - ijSize := sizeIJ(p.level) - si := uint32(2*p.iLo + ijSize) - ti := uint32(2*p.jLo + ijSize) - return Point{faceSiTiToXYZ(p.id.Face(), si, ti).Normalize()} -} - -// Middle returns the rectangle in the middle of this cell that belongs to -// all four of its children in (u,v)-space. -func (p *PaddedCell) Middle() r2.Rect { - // We compute this field lazily because it is not needed the majority of the - // time (i.e., for cells where the recursion terminates). - if p.middle.IsEmpty() { - ijSize := sizeIJ(p.level) - u := stToUV(siTiToST(uint32(2*p.iLo + ijSize))) - v := stToUV(siTiToST(uint32(2*p.jLo + ijSize))) - p.middle = r2.Rect{ - r1.Interval{u - p.padding, u + p.padding}, - r1.Interval{v - p.padding, v + p.padding}, - } - } - return p.middle -} - -// Bound returns the bounds for this cell in (u,v)-space including padding. -func (p PaddedCell) Bound() r2.Rect { - return p.bound -} - -// ChildIJ returns the (i,j) coordinates for the child cell at the given traversal -// position. The traversal position corresponds to the order in which child -// cells are visited by the Hilbert curve. -func (p PaddedCell) ChildIJ(pos int) (i, j int) { - ij := posToIJ[p.orientation][pos] - return ij >> 1, ij & 1 -} - -// EntryVertex return the vertex where the space-filling curve enters this cell. -func (p PaddedCell) EntryVertex() Point { - // The curve enters at the (0,0) vertex unless the axis directions are - // reversed, in which case it enters at the (1,1) vertex. - i := p.iLo - j := p.jLo - if p.orientation&invertMask != 0 { - ijSize := sizeIJ(p.level) - i += ijSize - j += ijSize - } - return Point{faceSiTiToXYZ(p.id.Face(), uint32(2*i), uint32(2*j)).Normalize()} -} - -// ExitVertex returns the vertex where the space-filling curve exits this cell. -func (p PaddedCell) ExitVertex() Point { - // The curve exits at the (1,0) vertex unless the axes are swapped or - // inverted but not both, in which case it exits at the (0,1) vertex. - i := p.iLo - j := p.jLo - ijSize := sizeIJ(p.level) - if p.orientation == 0 || p.orientation == swapMask+invertMask { - i += ijSize - } else { - j += ijSize - } - return Point{faceSiTiToXYZ(p.id.Face(), uint32(2*i), uint32(2*j)).Normalize()} -} - -// ShrinkToFit returns the smallest CellID that contains all descendants of this -// padded cell whose bounds intersect the given rect. For algorithms that use -// recursive subdivision to find the cells that intersect a particular object, this -// method can be used to skip all of the initial subdivision steps where only -// one child needs to be expanded. -// -// Note that this method is not the same as returning the smallest cell that contains -// the intersection of this cell with rect. Because of the padding, even if one child -// completely contains rect it is still possible that a neighboring child may also -// intersect the given rect. -// -// The provided Rect must intersect the bounds of this cell. -func (p *PaddedCell) ShrinkToFit(rect r2.Rect) CellID { - // Quick rejection test: if rect contains the center of this cell along - // either axis, then no further shrinking is possible. - if p.level == 0 { - // Fast path (most calls to this function start with a face cell). - if rect.X.Contains(0) || rect.Y.Contains(0) { - return p.id - } - } - - ijSize := sizeIJ(p.level) - if rect.X.Contains(stToUV(siTiToST(uint32(2*p.iLo+ijSize)))) || - rect.Y.Contains(stToUV(siTiToST(uint32(2*p.jLo+ijSize)))) { - return p.id - } - - // Otherwise we expand rect by the given padding on all sides and find - // the range of coordinates that it spans along the i- and j-axes. We then - // compute the highest bit position at which the min and max coordinates - // differ. This corresponds to the first cell level at which at least two - // children intersect rect. - - // Increase the padding to compensate for the error in uvToST. - // (The constant below is a provable upper bound on the additional error.) - padded := rect.ExpandedByMargin(p.padding + 1.5*dblEpsilon) - iMin, jMin := p.iLo, p.jLo // Min i- or j- coordinate spanned by padded - var iXor, jXor int // XOR of the min and max i- or j-coordinates - - if iMin < stToIJ(uvToST(padded.X.Lo)) { - iMin = stToIJ(uvToST(padded.X.Lo)) - } - if a, b := p.iLo+ijSize-1, stToIJ(uvToST(padded.X.Hi)); a <= b { - iXor = iMin ^ a - } else { - iXor = iMin ^ b - } - - if jMin < stToIJ(uvToST(padded.Y.Lo)) { - jMin = stToIJ(uvToST(padded.Y.Lo)) - } - if a, b := p.jLo+ijSize-1, stToIJ(uvToST(padded.Y.Hi)); a <= b { - jXor = jMin ^ a - } else { - jXor = jMin ^ b - } - - // Compute the highest bit position where the two i- or j-endpoints differ, - // and then choose the cell level that includes both of these endpoints. So - // if both pairs of endpoints are equal we choose maxLevel; if they differ - // only at bit 0, we choose (maxLevel - 1), and so on. - levelMSB := uint64(((iXor | jXor) << 1) + 1) - level := maxLevel - findMSBSetNonZero64(levelMSB) - if level <= p.level { - return p.id - } - - return cellIDFromFaceIJ(p.id.Face(), iMin, jMin).Parent(level) -} diff --git a/vendor/github.com/golang/geo/s2/point.go b/vendor/github.com/golang/geo/s2/point.go deleted file mode 100644 index 89e7ae0ed..000000000 --- a/vendor/github.com/golang/geo/s2/point.go +++ /dev/null @@ -1,258 +0,0 @@ -// Copyright 2014 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package s2 - -import ( - "fmt" - "io" - "math" - "sort" - - "github.com/golang/geo/r3" - "github.com/golang/geo/s1" -) - -// Point represents a point on the unit sphere as a normalized 3D vector. -// Fields should be treated as read-only. Use one of the factory methods for creation. -type Point struct { - r3.Vector -} - -// sortPoints sorts the slice of Points in place. -func sortPoints(e []Point) { - sort.Sort(points(e)) -} - -// points implements the Sort interface for slices of Point. -type points []Point - -func (p points) Len() int { return len(p) } -func (p points) Swap(i, j int) { p[i], p[j] = p[j], p[i] } -func (p points) Less(i, j int) bool { return p[i].Cmp(p[j].Vector) == -1 } - -// PointFromCoords creates a new normalized point from coordinates. -// -// This always returns a valid point. If the given coordinates can not be normalized -// the origin point will be returned. -// -// This behavior is different from the C++ construction of a S2Point from coordinates -// (i.e. S2Point(x, y, z)) in that in C++ they do not Normalize. -func PointFromCoords(x, y, z float64) Point { - if x == 0 && y == 0 && z == 0 { - return OriginPoint() - } - return Point{r3.Vector{x, y, z}.Normalize()} -} - -// OriginPoint returns a unique "origin" on the sphere for operations that need a fixed -// reference point. In particular, this is the "point at infinity" used for -// point-in-polygon testing (by counting the number of edge crossings). -// -// It should *not* be a point that is commonly used in edge tests in order -// to avoid triggering code to handle degenerate cases (this rules out the -// north and south poles). It should also not be on the boundary of any -// low-level S2Cell for the same reason. -func OriginPoint() Point { - return Point{r3.Vector{-0.0099994664350250197, 0.0025924542609324121, 0.99994664350250195}} -} - -// PointCross returns a Point that is orthogonal to both p and op. This is similar to -// p.Cross(op) (the true cross product) except that it does a better job of -// ensuring orthogonality when the Point is nearly parallel to op, it returns -// a non-zero result even when p == op or p == -op and the result is a Point. -// -// It satisfies the following properties (f == PointCross): -// -// (1) f(p, op) != 0 for all p, op -// (2) f(op,p) == -f(p,op) unless p == op or p == -op -// (3) f(-p,op) == -f(p,op) unless p == op or p == -op -// (4) f(p,-op) == -f(p,op) unless p == op or p == -op -func (p Point) PointCross(op Point) Point { - // NOTE(dnadasi): In the C++ API the equivalent method here was known as "RobustCrossProd", - // but PointCross more accurately describes how this method is used. - x := p.Add(op.Vector).Cross(op.Sub(p.Vector)) - - // Compare exactly to the 0 vector. - if x == (r3.Vector{}) { - // The only result that makes sense mathematically is to return zero, but - // we find it more convenient to return an arbitrary orthogonal vector. - return Point{p.Ortho()} - } - - return Point{x} -} - -// OrderedCCW returns true if the edges OA, OB, and OC are encountered in that -// order while sweeping CCW around the point O. -// -// You can think of this as testing whether A <= B <= C with respect to the -// CCW ordering around O that starts at A, or equivalently, whether B is -// contained in the range of angles (inclusive) that starts at A and extends -// CCW to C. Properties: -// -// (1) If OrderedCCW(a,b,c,o) && OrderedCCW(b,a,c,o), then a == b -// (2) If OrderedCCW(a,b,c,o) && OrderedCCW(a,c,b,o), then b == c -// (3) If OrderedCCW(a,b,c,o) && OrderedCCW(c,b,a,o), then a == b == c -// (4) If a == b or b == c, then OrderedCCW(a,b,c,o) is true -// (5) Otherwise if a == c, then OrderedCCW(a,b,c,o) is false -func OrderedCCW(a, b, c, o Point) bool { - sum := 0 - if RobustSign(b, o, a) != Clockwise { - sum++ - } - if RobustSign(c, o, b) != Clockwise { - sum++ - } - if RobustSign(a, o, c) == CounterClockwise { - sum++ - } - return sum >= 2 -} - -// Distance returns the angle between two points. -func (p Point) Distance(b Point) s1.Angle { - return p.Vector.Angle(b.Vector) -} - -// ApproxEqual reports whether the two points are similar enough to be equal. -func (p Point) ApproxEqual(other Point) bool { - return p.approxEqual(other, s1.Angle(epsilon)) -} - -// approxEqual reports whether the two points are within the given epsilon. -func (p Point) approxEqual(other Point, eps s1.Angle) bool { - return p.Vector.Angle(other.Vector) <= eps -} - -// ChordAngleBetweenPoints constructs a ChordAngle corresponding to the distance -// between the two given points. The points must be unit length. -func ChordAngleBetweenPoints(x, y Point) s1.ChordAngle { - return s1.ChordAngle(math.Min(4.0, x.Sub(y.Vector).Norm2())) -} - -// regularPoints generates a slice of points shaped as a regular polygon with -// the numVertices vertices, all located on a circle of the specified angular radius -// around the center. The radius is the actual distance from center to each vertex. -func regularPoints(center Point, radius s1.Angle, numVertices int) []Point { - return regularPointsForFrame(getFrame(center), radius, numVertices) -} - -// regularPointsForFrame generates a slice of points shaped as a regular polygon -// with numVertices vertices, all on a circle of the specified angular radius around -// the center. The radius is the actual distance from the center to each vertex. -func regularPointsForFrame(frame matrix3x3, radius s1.Angle, numVertices int) []Point { - // We construct the loop in the given frame coordinates, with the center at - // (0, 0, 1). For a loop of radius r, the loop vertices have the form - // (x, y, z) where x^2 + y^2 = sin(r) and z = cos(r). The distance on the - // sphere (arc length) from each vertex to the center is acos(cos(r)) = r. - z := math.Cos(radius.Radians()) - r := math.Sin(radius.Radians()) - radianStep := 2 * math.Pi / float64(numVertices) - var vertices []Point - - for i := 0; i < numVertices; i++ { - angle := float64(i) * radianStep - p := Point{r3.Vector{r * math.Cos(angle), r * math.Sin(angle), z}} - vertices = append(vertices, Point{fromFrame(frame, p).Normalize()}) - } - - return vertices -} - -// CapBound returns a bounding cap for this point. -func (p Point) CapBound() Cap { - return CapFromPoint(p) -} - -// RectBound returns a bounding latitude-longitude rectangle from this point. -func (p Point) RectBound() Rect { - return RectFromLatLng(LatLngFromPoint(p)) -} - -// ContainsCell returns false as Points do not contain any other S2 types. -func (p Point) ContainsCell(c Cell) bool { return false } - -// IntersectsCell reports whether this Point intersects the given cell. -func (p Point) IntersectsCell(c Cell) bool { - return c.ContainsPoint(p) -} - -// ContainsPoint reports if this Point contains the other Point. -// (This method is named to satisfy the Region interface.) -func (p Point) ContainsPoint(other Point) bool { - return p.Contains(other) -} - -// CellUnionBound computes a covering of the Point. -func (p Point) CellUnionBound() []CellID { - return p.CapBound().CellUnionBound() -} - -// Contains reports if this Point contains the other Point. -// (This method matches all other s2 types where the reflexive Contains -// method does not contain the type's name.) -func (p Point) Contains(other Point) bool { return p == other } - -// Encode encodes the Point. -func (p Point) Encode(w io.Writer) error { - e := &encoder{w: w} - p.encode(e) - return e.err -} - -func (p Point) encode(e *encoder) { - e.writeInt8(encodingVersion) - e.writeFloat64(p.X) - e.writeFloat64(p.Y) - e.writeFloat64(p.Z) -} - -// Decode decodes the Point. -func (p *Point) Decode(r io.Reader) error { - d := &decoder{r: asByteReader(r)} - p.decode(d) - return d.err -} - -func (p *Point) decode(d *decoder) { - version := d.readInt8() - if d.err != nil { - return - } - if version != encodingVersion { - d.err = fmt.Errorf("only version %d is supported", encodingVersion) - return - } - p.X = d.readFloat64() - p.Y = d.readFloat64() - p.Z = d.readFloat64() -} - -// Rotate the given point about the given axis by the given angle. p and -// axis must be unit length; angle has no restrictions (e.g., it can be -// positive, negative, greater than 360 degrees, etc). -func Rotate(p, axis Point, angle s1.Angle) Point { - // Let M be the plane through P that is perpendicular to axis, and let - // center be the point where M intersects axis. We construct a - // right-handed orthogonal frame (dx, dy, center) such that dx is the - // vector from center to P, and dy has the same length as dx. The - // result can then be expressed as (cos(angle)*dx + sin(angle)*dy + center). - center := axis.Mul(p.Dot(axis.Vector)) - dx := p.Sub(center) - dy := axis.Cross(p.Vector) - // Mathematically the result is unit length, but normalization is necessary - // to ensure that numerical errors don't accumulate. - return Point{dx.Mul(math.Cos(angle.Radians())).Add(dy.Mul(math.Sin(angle.Radians()))).Add(center).Normalize()} -} diff --git a/vendor/github.com/golang/geo/s2/point_measures.go b/vendor/github.com/golang/geo/s2/point_measures.go deleted file mode 100644 index 6fa9b7ae4..000000000 --- a/vendor/github.com/golang/geo/s2/point_measures.go +++ /dev/null @@ -1,149 +0,0 @@ -// Copyright 2018 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package s2 - -import ( - "math" - - "github.com/golang/geo/s1" -) - -// PointArea returns the area of triangle ABC. This method combines two different -// algorithms to get accurate results for both large and small triangles. -// The maximum error is about 5e-15 (about 0.25 square meters on the Earth's -// surface), the same as GirardArea below, but unlike that method it is -// also accurate for small triangles. Example: when the true area is 100 -// square meters, PointArea yields an error about 1 trillion times smaller than -// GirardArea. -// -// All points should be unit length, and no two points should be antipodal. -// The area is always positive. -func PointArea(a, b, c Point) float64 { - // This method is based on l'Huilier's theorem, - // - // tan(E/4) = sqrt(tan(s/2) tan((s-a)/2) tan((s-b)/2) tan((s-c)/2)) - // - // where E is the spherical excess of the triangle (i.e. its area), - // a, b, c are the side lengths, and - // s is the semiperimeter (a + b + c) / 2. - // - // The only significant source of error using l'Huilier's method is the - // cancellation error of the terms (s-a), (s-b), (s-c). This leads to a - // *relative* error of about 1e-16 * s / min(s-a, s-b, s-c). This compares - // to a relative error of about 1e-15 / E using Girard's formula, where E is - // the true area of the triangle. Girard's formula can be even worse than - // this for very small triangles, e.g. a triangle with a true area of 1e-30 - // might evaluate to 1e-5. - // - // So, we prefer l'Huilier's formula unless dmin < s * (0.1 * E), where - // dmin = min(s-a, s-b, s-c). This basically includes all triangles - // except for extremely long and skinny ones. - // - // Since we don't know E, we would like a conservative upper bound on - // the triangle area in terms of s and dmin. It's possible to show that - // E <= k1 * s * sqrt(s * dmin), where k1 = 2*sqrt(3)/Pi (about 1). - // Using this, it's easy to show that we should always use l'Huilier's - // method if dmin >= k2 * s^5, where k2 is about 1e-2. Furthermore, - // if dmin < k2 * s^5, the triangle area is at most k3 * s^4, where - // k3 is about 0.1. Since the best case error using Girard's formula - // is about 1e-15, this means that we shouldn't even consider it unless - // s >= 3e-4 or so. - sa := float64(b.Angle(c.Vector)) - sb := float64(c.Angle(a.Vector)) - sc := float64(a.Angle(b.Vector)) - s := 0.5 * (sa + sb + sc) - if s >= 3e-4 { - // Consider whether Girard's formula might be more accurate. - dmin := s - math.Max(sa, math.Max(sb, sc)) - if dmin < 1e-2*s*s*s*s*s { - // This triangle is skinny enough to use Girard's formula. - area := GirardArea(a, b, c) - if dmin < s*0.1*area { - return area - } - } - } - - // Use l'Huilier's formula. - return 4 * math.Atan(math.Sqrt(math.Max(0.0, math.Tan(0.5*s)*math.Tan(0.5*(s-sa))* - math.Tan(0.5*(s-sb))*math.Tan(0.5*(s-sc))))) -} - -// GirardArea returns the area of the triangle computed using Girard's formula. -// All points should be unit length, and no two points should be antipodal. -// -// This method is about twice as fast as PointArea() but has poor relative -// accuracy for small triangles. The maximum error is about 5e-15 (about -// 0.25 square meters on the Earth's surface) and the average error is about -// 1e-15. These bounds apply to triangles of any size, even as the maximum -// edge length of the triangle approaches 180 degrees. But note that for -// such triangles, tiny perturbations of the input points can change the -// true mathematical area dramatically. -func GirardArea(a, b, c Point) float64 { - // This is equivalent to the usual Girard's formula but is slightly more - // accurate, faster to compute, and handles a == b == c without a special - // case. PointCross is necessary to get good accuracy when two of - // the input points are very close together. - ab := a.PointCross(b) - bc := b.PointCross(c) - ac := a.PointCross(c) - - area := float64(ab.Angle(ac.Vector) - ab.Angle(bc.Vector) + bc.Angle(ac.Vector)) - if area < 0 { - area = 0 - } - return area -} - -// SignedArea returns a positive value for counterclockwise triangles and a negative -// value otherwise (similar to PointArea). -func SignedArea(a, b, c Point) float64 { - return float64(RobustSign(a, b, c)) * PointArea(a, b, c) -} - -// Angle returns the interior angle at the vertex B in the triangle ABC. The -// return value is always in the range [0, pi]. All points should be -// normalized. Ensures that Angle(a,b,c) == Angle(c,b,a) for all a,b,c. -// -// The angle is undefined if A or C is diametrically opposite from B, and -// becomes numerically unstable as the length of edge AB or BC approaches -// 180 degrees. -func Angle(a, b, c Point) s1.Angle { - // PointCross is necessary to get good accuracy when two of the input - // points are very close together. - return a.PointCross(b).Angle(c.PointCross(b).Vector) -} - -// TurnAngle returns the exterior angle at vertex B in the triangle ABC. The -// return value is positive if ABC is counterclockwise and negative otherwise. -// If you imagine an ant walking from A to B to C, this is the angle that the -// ant turns at vertex B (positive = left = CCW, negative = right = CW). -// This quantity is also known as the "geodesic curvature" at B. -// -// Ensures that TurnAngle(a,b,c) == -TurnAngle(c,b,a) for all distinct -// a,b,c. The result is undefined if (a == b || b == c), but is either -// -Pi or Pi if (a == c). All points should be normalized. -func TurnAngle(a, b, c Point) s1.Angle { - // We use PointCross to get good accuracy when two points are very - // close together, and RobustSign to ensure that the sign is correct for - // turns that are close to 180 degrees. - angle := a.PointCross(b).Angle(b.PointCross(c).Vector) - - // Don't return RobustSign * angle because it is legal to have (a == c). - if RobustSign(a, b, c) == CounterClockwise { - return angle - } - return -angle -} diff --git a/vendor/github.com/golang/geo/s2/point_vector.go b/vendor/github.com/golang/geo/s2/point_vector.go deleted file mode 100644 index f8e6f65b5..000000000 --- a/vendor/github.com/golang/geo/s2/point_vector.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2017 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package s2 - -// Shape interface enforcement -var ( - _ Shape = (*PointVector)(nil) -) - -// PointVector is a Shape representing a set of Points. Each point -// is represented as a degenerate edge with the same starting and ending -// vertices. -// -// This type is useful for adding a collection of points to an ShapeIndex. -// -// Its methods are on *PointVector due to implementation details of ShapeIndex. -type PointVector []Point - -func (p *PointVector) NumEdges() int { return len(*p) } -func (p *PointVector) Edge(i int) Edge { return Edge{(*p)[i], (*p)[i]} } -func (p *PointVector) ReferencePoint() ReferencePoint { return OriginReferencePoint(false) } -func (p *PointVector) NumChains() int { return len(*p) } -func (p *PointVector) Chain(i int) Chain { return Chain{i, 1} } -func (p *PointVector) ChainEdge(i, j int) Edge { return Edge{(*p)[i], (*p)[j]} } -func (p *PointVector) ChainPosition(e int) ChainPosition { return ChainPosition{e, 0} } -func (p *PointVector) Dimension() int { return 0 } -func (p *PointVector) IsEmpty() bool { return defaultShapeIsEmpty(p) } -func (p *PointVector) IsFull() bool { return defaultShapeIsFull(p) } -func (p *PointVector) typeTag() typeTag { return typeTagPointVector } -func (p *PointVector) privateInterface() {} diff --git a/vendor/github.com/golang/geo/s2/pointcompression.go b/vendor/github.com/golang/geo/s2/pointcompression.go deleted file mode 100644 index 018381799..000000000 --- a/vendor/github.com/golang/geo/s2/pointcompression.go +++ /dev/null @@ -1,319 +0,0 @@ -// Copyright 2017 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package s2 - -import ( - "errors" - "fmt" - - "github.com/golang/geo/r3" -) - -// maxEncodedVertices is the maximum number of vertices, in a row, to be encoded or decoded. -// On decode, this defends against malicious encodings that try and have us exceed RAM. -const maxEncodedVertices = 50000000 - -// xyzFaceSiTi represents the The XYZ and face,si,ti coordinates of a Point -// and, if this point is equal to the center of a Cell, the level of this cell -// (-1 otherwise). This is used for Loops and Polygons to store data in a more -// compressed format. -type xyzFaceSiTi struct { - xyz Point - face int - si, ti uint32 - level int -} - -const derivativeEncodingOrder = 2 - -func appendFace(faces []faceRun, face int) []faceRun { - if len(faces) == 0 || faces[len(faces)-1].face != face { - return append(faces, faceRun{face, 1}) - } - faces[len(faces)-1].count++ - return faces -} - -// encodePointsCompressed uses an optimized compressed format to encode the given values. -func encodePointsCompressed(e *encoder, vertices []xyzFaceSiTi, level int) { - var faces []faceRun - for _, v := range vertices { - faces = appendFace(faces, v.face) - } - encodeFaces(e, faces) - - type piQi struct { - pi, qi uint32 - } - verticesPiQi := make([]piQi, len(vertices)) - for i, v := range vertices { - verticesPiQi[i] = piQi{siTitoPiQi(v.si, level), siTitoPiQi(v.ti, level)} - } - piCoder, qiCoder := newNthDerivativeCoder(derivativeEncodingOrder), newNthDerivativeCoder(derivativeEncodingOrder) - for i, v := range verticesPiQi { - f := encodePointCompressed - if i == 0 { - // The first point will be just the (pi, qi) coordinates - // of the Point. NthDerivativeCoder will not save anything - // in that case, so we encode in fixed format rather than varint - // to avoid the varint overhead. - f = encodeFirstPointFixedLength - } - f(e, v.pi, v.qi, level, piCoder, qiCoder) - } - - var offCenter []int - for i, v := range vertices { - if v.level != level { - offCenter = append(offCenter, i) - } - } - e.writeUvarint(uint64(len(offCenter))) - for _, idx := range offCenter { - e.writeUvarint(uint64(idx)) - e.writeFloat64(vertices[idx].xyz.X) - e.writeFloat64(vertices[idx].xyz.Y) - e.writeFloat64(vertices[idx].xyz.Z) - } -} - -func encodeFirstPointFixedLength(e *encoder, pi, qi uint32, level int, piCoder, qiCoder *nthDerivativeCoder) { - // Do not ZigZagEncode the first point, since it cannot be negative. - codedPi, codedQi := piCoder.encode(int32(pi)), qiCoder.encode(int32(qi)) - // Interleave to reduce overhead from two partial bytes to one. - interleaved := interleaveUint32(uint32(codedPi), uint32(codedQi)) - - // Write as little endian. - bytesRequired := (level + 7) / 8 * 2 - for i := 0; i < bytesRequired; i++ { - e.writeUint8(uint8(interleaved)) - interleaved >>= 8 - } -} - -// encodePointCompressed encodes points into e. -// Given a sequence of Points assumed to be the center of level-k cells, -// compresses it into a stream using the following method: -// - decompose the points into (face, si, ti) tuples. -// - run-length encode the faces, combining face number and count into a -// varint32. See the faceRun struct. -// - right shift the (si, ti) to remove the part that's constant for all cells -// of level-k. The result is called the (pi, qi) space. -// - 2nd derivative encode the pi and qi sequences (linear prediction) -// - zig-zag encode all derivative values but the first, which cannot be -// negative -// - interleave the zig-zag encoded values -// - encode the first interleaved value in a fixed length encoding -// (varint would make this value larger) -// - encode the remaining interleaved values as varint64s, as the -// derivative encoding should make the values small. -// In addition, provides a lossless method to compress a sequence of points even -// if some points are not the center of level-k cells. These points are stored -// exactly, using 3 double precision values, after the above encoded string, -// together with their index in the sequence (this leads to some redundancy - it -// is expected that only a small fraction of the points are not cell centers). -// -// To encode leaf cells, this requires 8 bytes for the first vertex plus -// an average of 3.8 bytes for each additional vertex, when computed on -// Google's geographic repository. -func encodePointCompressed(e *encoder, pi, qi uint32, level int, piCoder, qiCoder *nthDerivativeCoder) { - // ZigZagEncode, as varint requires the maximum number of bytes for - // negative numbers. - zzPi := zigzagEncode(piCoder.encode(int32(pi))) - zzQi := zigzagEncode(qiCoder.encode(int32(qi))) - // Interleave to reduce overhead from two partial bytes to one. - interleaved := interleaveUint32(zzPi, zzQi) - e.writeUvarint(interleaved) -} - -type faceRun struct { - face, count int -} - -func decodeFaceRun(d *decoder) faceRun { - faceAndCount := d.readUvarint() - ret := faceRun{ - face: int(faceAndCount % numFaces), - count: int(faceAndCount / numFaces), - } - if ret.count <= 0 && d.err == nil { - d.err = errors.New("non-positive count for face run") - } - return ret -} - -func decodeFaces(numVertices int, d *decoder) []faceRun { - var frs []faceRun - for nparsed := 0; nparsed < numVertices; { - fr := decodeFaceRun(d) - if d.err != nil { - return nil - } - frs = append(frs, fr) - nparsed += fr.count - } - return frs -} - -// encodeFaceRun encodes each faceRun as a varint64 with value numFaces * count + face. -func encodeFaceRun(e *encoder, fr faceRun) { - // It isn't necessary to encode the number of faces left for the last run, - // but since this would only help if there were more than 21 faces, it will - // be a small overall savings, much smaller than the bound encoding. - coded := numFaces*uint64(fr.count) + uint64(fr.face) - e.writeUvarint(coded) -} - -func encodeFaces(e *encoder, frs []faceRun) { - for _, fr := range frs { - encodeFaceRun(e, fr) - } -} - -type facesIterator struct { - faces []faceRun - // How often have we yet shown the current face? - numCurrentFaceShown int - curFace int -} - -func (fi *facesIterator) next() (ok bool) { - if len(fi.faces) == 0 { - return false - } - fi.curFace = fi.faces[0].face - fi.numCurrentFaceShown++ - - // Advance fs if needed. - if fi.faces[0].count <= fi.numCurrentFaceShown { - fi.faces = fi.faces[1:] - fi.numCurrentFaceShown = 0 - } - - return true -} - -func decodePointsCompressed(d *decoder, level int, target []Point) { - faces := decodeFaces(len(target), d) - - piCoder := newNthDerivativeCoder(derivativeEncodingOrder) - qiCoder := newNthDerivativeCoder(derivativeEncodingOrder) - - iter := facesIterator{faces: faces} - for i := range target { - decodeFn := decodePointCompressed - if i == 0 { - decodeFn = decodeFirstPointFixedLength - } - pi, qi := decodeFn(d, level, piCoder, qiCoder) - if ok := iter.next(); !ok && d.err == nil { - d.err = fmt.Errorf("ran out of faces at target %d", i) - return - } - target[i] = Point{facePiQitoXYZ(iter.curFace, pi, qi, level)} - } - - numOffCenter := int(d.readUvarint()) - if d.err != nil { - return - } - if numOffCenter > len(target) { - d.err = fmt.Errorf("numOffCenter = %d, should be at most len(target) = %d", numOffCenter, len(target)) - return - } - for i := 0; i < numOffCenter; i++ { - idx := int(d.readUvarint()) - if d.err != nil { - return - } - if idx >= len(target) { - d.err = fmt.Errorf("off center index = %d, should be < len(target) = %d", idx, len(target)) - return - } - target[idx].X = d.readFloat64() - target[idx].Y = d.readFloat64() - target[idx].Z = d.readFloat64() - } -} - -func decodeFirstPointFixedLength(d *decoder, level int, piCoder, qiCoder *nthDerivativeCoder) (pi, qi uint32) { - bytesToRead := (level + 7) / 8 * 2 - var interleaved uint64 - for i := 0; i < bytesToRead; i++ { - rr := d.readUint8() - interleaved |= (uint64(rr) << uint(i*8)) - } - - piCoded, qiCoded := deinterleaveUint32(interleaved) - - return uint32(piCoder.decode(int32(piCoded))), uint32(qiCoder.decode(int32(qiCoded))) -} - -func zigzagEncode(x int32) uint32 { - return (uint32(x) << 1) ^ uint32(x>>31) -} - -func zigzagDecode(x uint32) int32 { - return int32((x >> 1) ^ uint32((int32(x&1)<<31)>>31)) -} - -func decodePointCompressed(d *decoder, level int, piCoder, qiCoder *nthDerivativeCoder) (pi, qi uint32) { - interleavedZigZagEncodedDerivPiQi := d.readUvarint() - piZigzag, qiZigzag := deinterleaveUint32(interleavedZigZagEncodedDerivPiQi) - return uint32(piCoder.decode(zigzagDecode(piZigzag))), uint32(qiCoder.decode(zigzagDecode(qiZigzag))) -} - -// We introduce a new coordinate system (pi, qi), which is (si, ti) -// with the bits that are constant for cells of that level shifted -// off to the right. -// si = round(s * 2^31) -// pi = si >> (31 - level) -// = floor(s * 2^level) -// If the point has been snapped to the level, the bits that are -// shifted off will be a 1 in the msb, then 0s after that, so the -// fractional part discarded by the cast is (close to) 0.5. - -// stToPiQi returns the value transformed to the PiQi coordinate space. -func stToPiQi(s float64, level uint) uint32 { - return uint32(s * float64(int(1)< max { - s = max - } - - return uint32(s >> (maxLevel + 1 - uint(level))) -} - -// piQiToST returns the value transformed to ST space. -func piQiToST(pi uint32, level int) float64 { - // We want to recover the position at the center of the cell. If the point - // was snapped to the center of the cell, then math.Modf(s * 2^level) == 0.5. - // Inverting STtoPiQi gives: - // s = (pi + 0.5) / 2^level. - return (float64(pi) + 0.5) / float64(int(1)< l.turningAngleMaxError() { - // Normalize the loop. - if angle < 0 { - l.Invert() - } - } else { - // Ensure that the loop does not contain the origin. - if l.ContainsOrigin() { - l.Invert() - } - } - } - - p := PolygonFromLoops(loops) - - if p.NumLoops() > 0 { - originLoop := p.Loop(0) - polygonContainsOrigin := false - for _, l := range p.Loops() { - if l.ContainsOrigin() { - polygonContainsOrigin = !polygonContainsOrigin - - originLoop = l - } - } - if containedOrigin[originLoop] != polygonContainsOrigin { - p.Invert() - } - } - - return p -} - -// Invert inverts the polygon (replaces it by its complement). -func (p *Polygon) Invert() { - // Inverting any one loop will invert the polygon. The best loop to invert - // is the one whose area is largest, since this yields the smallest area - // after inversion. The loop with the largest area is always at depth 0. - // The descendents of this loop all have their depth reduced by 1, while the - // former siblings of this loop all have their depth increased by 1. - - // The empty and full polygons are handled specially. - if p.IsEmpty() { - *p = *FullPolygon() - p.initLoopProperties() - return - } - if p.IsFull() { - *p = Polygon{} - p.initLoopProperties() - return - } - - // Find the loop whose area is largest (i.e., whose turning angle is - // smallest), minimizing calls to TurningAngle(). In particular, for - // polygons with a single shell at level 0 there is no need to call - // TurningAngle() at all. (This method is relatively expensive.) - best := 0 - const none = 10.0 // Flag that means "not computed yet" - bestAngle := none - for i := 1; i < p.NumLoops(); i++ { - if p.Loop(i).depth != 0 { - continue - } - // We defer computing the turning angle of loop 0 until we discover - // that the polygon has another top-level shell. - if bestAngle == none { - bestAngle = p.Loop(best).TurningAngle() - } - angle := p.Loop(i).TurningAngle() - // We break ties deterministically in order to avoid having the output - // depend on the input order of the loops. - if angle < bestAngle || (angle == bestAngle && compareLoops(p.Loop(i), p.Loop(best)) < 0) { - best = i - bestAngle = angle - } - } - // Build the new loops vector, starting with the inverted loop. - p.Loop(best).Invert() - newLoops := make([]*Loop, 0, p.NumLoops()) - // Add the former siblings of this loop as descendants. - lastBest := p.LastDescendant(best) - newLoops = append(newLoops, p.Loop(best)) - for i, l := range p.Loops() { - if i < best || i > lastBest { - l.depth++ - newLoops = append(newLoops, l) - } - } - // Add the former children of this loop as siblings. - for i, l := range p.Loops() { - if i > best && i <= lastBest { - l.depth-- - newLoops = append(newLoops, l) - } - } - - p.loops = newLoops - p.initLoopProperties() -} - -// Defines a total ordering on Loops that does not depend on the cyclic -// order of loop vertices. This function is used to choose which loop to -// invert in the case where several loops have exactly the same area. -func compareLoops(a, b *Loop) int { - if na, nb := a.NumVertices(), b.NumVertices(); na != nb { - return na - nb - } - ai, aDir := a.CanonicalFirstVertex() - bi, bDir := b.CanonicalFirstVertex() - if aDir != bDir { - return aDir - bDir - } - for n := a.NumVertices() - 1; n >= 0; n, ai, bi = n-1, ai+aDir, bi+bDir { - if cmp := a.Vertex(ai).Cmp(b.Vertex(bi).Vector); cmp != 0 { - return cmp - } - } - return 0 -} - -// PolygonFromCell returns a Polygon from a single loop created from the given Cell. -func PolygonFromCell(cell Cell) *Polygon { - return PolygonFromLoops([]*Loop{LoopFromCell(cell)}) -} - -// initNested takes the set of loops in this polygon and performs the nesting -// computations to set the proper nesting and parent/child relationships. -func (p *Polygon) initNested() { - if len(p.loops) == 1 { - p.initOneLoop() - return - } - - lm := make(loopMap) - - for _, l := range p.loops { - lm.insertLoop(l, nil) - } - // The loops have all been added to the loopMap for ordering. Clear the - // loops slice because we add all the loops in-order in initLoops. - p.loops = nil - - // Reorder the loops in depth-first traversal order. - p.initLoops(lm) - p.initLoopProperties() -} - -// loopMap is a map of a loop to its immediate children with respect to nesting. -// It is used to determine which loops are shells and which are holes. -type loopMap map[*Loop][]*Loop - -// insertLoop adds the given loop to the loop map under the specified parent. -// All children of the new entry are checked to see if the need to move up to -// a different level. -func (lm loopMap) insertLoop(newLoop, parent *Loop) { - var children []*Loop - for done := false; !done; { - children = lm[parent] - done = true - for _, child := range children { - if child.ContainsNested(newLoop) { - parent = child - done = false - break - } - } - } - - // Now, we have found a parent for this loop, it may be that some of the - // children of the parent of this loop may now be children of the new loop. - newChildren := lm[newLoop] - for i := 0; i < len(children); { - child := children[i] - if newLoop.ContainsNested(child) { - newChildren = append(newChildren, child) - children = append(children[0:i], children[i+1:]...) - } else { - i++ - } - } - - lm[newLoop] = newChildren - lm[parent] = append(children, newLoop) -} - -// loopStack simplifies access to the loops while being initialized. -type loopStack []*Loop - -func (s *loopStack) push(v *Loop) { - *s = append(*s, v) -} -func (s *loopStack) pop() *Loop { - l := len(*s) - r := (*s)[l-1] - *s = (*s)[:l-1] - return r -} - -// initLoops walks the mapping of loops to all of their children, and adds them in -// order into to the polygons set of loops. -func (p *Polygon) initLoops(lm loopMap) { - var stack loopStack - stack.push(nil) - depth := -1 - - for len(stack) > 0 { - loop := stack.pop() - if loop != nil { - depth = loop.depth - p.loops = append(p.loops, loop) - } - children := lm[loop] - for i := len(children) - 1; i >= 0; i-- { - child := children[i] - child.depth = depth + 1 - stack.push(child) - } - } -} - -// initOneLoop set the properties for a polygon made of a single loop. -// TODO(roberts): Can this be merged with initLoopProperties -func (p *Polygon) initOneLoop() { - p.hasHoles = false - p.numVertices = len(p.loops[0].vertices) - p.bound = p.loops[0].RectBound() - p.subregionBound = ExpandForSubregions(p.bound) - // Ensure the loops depth is set correctly. - p.loops[0].depth = 0 - - p.initEdgesAndIndex() -} - -// initLoopProperties sets the properties for polygons with multiple loops. -func (p *Polygon) initLoopProperties() { - p.numVertices = 0 - // the loops depths are set by initNested/initOriented prior to this. - p.bound = EmptyRect() - p.hasHoles = false - for _, l := range p.loops { - if l.IsHole() { - p.hasHoles = true - } else { - p.bound = p.bound.Union(l.RectBound()) - } - p.numVertices += l.NumVertices() - } - p.subregionBound = ExpandForSubregions(p.bound) - - p.initEdgesAndIndex() -} - -// initEdgesAndIndex performs the shape related initializations and adds the final -// polygon to the index. -func (p *Polygon) initEdgesAndIndex() { - p.numEdges = 0 - p.cumulativeEdges = nil - if p.IsFull() { - return - } - const maxLinearSearchLoops = 12 // Based on benchmarks. - if len(p.loops) > maxLinearSearchLoops { - p.cumulativeEdges = make([]int, 0, len(p.loops)) - } - - for _, l := range p.loops { - if p.cumulativeEdges != nil { - p.cumulativeEdges = append(p.cumulativeEdges, p.numEdges) - } - p.numEdges += len(l.vertices) - } - - p.index = NewShapeIndex() - p.index.Add(p) -} - -// FullPolygon returns a special "full" polygon. -func FullPolygon() *Polygon { - ret := &Polygon{ - loops: []*Loop{ - FullLoop(), - }, - numVertices: len(FullLoop().Vertices()), - bound: FullRect(), - subregionBound: FullRect(), - } - ret.initEdgesAndIndex() - return ret -} - -// Validate checks whether this is a valid polygon, -// including checking whether all the loops are themselves valid. -func (p *Polygon) Validate() error { - for i, l := range p.loops { - // Check for loop errors that don't require building a ShapeIndex. - if err := l.findValidationErrorNoIndex(); err != nil { - return fmt.Errorf("loop %d: %v", i, err) - } - // Check that no loop is empty, and that the full loop only appears in the - // full polygon. - if l.IsEmpty() { - return fmt.Errorf("loop %d: empty loops are not allowed", i) - } - if l.IsFull() && len(p.loops) > 1 { - return fmt.Errorf("loop %d: full loop appears in non-full polygon", i) - } - } - - // TODO(roberts): Uncomment the remaining checks when they are completed. - - // Check for loop self-intersections and loop pairs that cross - // (including duplicate edges and vertices). - // if findSelfIntersection(p.index) { - // return fmt.Errorf("polygon has loop pairs that cross") - // } - - // Check whether initOriented detected inconsistent loop orientations. - // if p.hasInconsistentLoopOrientations { - // return fmt.Errorf("inconsistent loop orientations detected") - // } - - // Finally, verify the loop nesting hierarchy. - return p.findLoopNestingError() -} - -// findLoopNestingError reports if there is an error in the loop nesting hierarchy. -func (p *Polygon) findLoopNestingError() error { - // First check that the loop depths make sense. - lastDepth := -1 - for i, l := range p.loops { - depth := l.depth - if depth < 0 || depth > lastDepth+1 { - return fmt.Errorf("loop %d: invalid loop depth (%d)", i, depth) - } - lastDepth = depth - } - // Then check that they correspond to the actual loop nesting. This test - // is quadratic in the number of loops but the cost per iteration is small. - for i, l := range p.loops { - last := p.LastDescendant(i) - for j, l2 := range p.loops { - if i == j { - continue - } - nested := (j >= i+1) && (j <= last) - const reverseB = false - - if l.containsNonCrossingBoundary(l2, reverseB) != nested { - nestedStr := "" - if !nested { - nestedStr = "not " - } - return fmt.Errorf("invalid nesting: loop %d should %scontain loop %d", i, nestedStr, j) - } - } - } - return nil -} - -// IsEmpty reports whether this is the special "empty" polygon (consisting of no loops). -func (p *Polygon) IsEmpty() bool { - return len(p.loops) == 0 -} - -// IsFull reports whether this is the special "full" polygon (consisting of a -// single loop that encompasses the entire sphere). -func (p *Polygon) IsFull() bool { - return len(p.loops) == 1 && p.loops[0].IsFull() -} - -// NumLoops returns the number of loops in this polygon. -func (p *Polygon) NumLoops() int { - return len(p.loops) -} - -// Loops returns the loops in this polygon. -func (p *Polygon) Loops() []*Loop { - return p.loops -} - -// Loop returns the loop at the given index. Note that during initialization, -// the given loops are reordered according to a pre-order traversal of the loop -// nesting hierarchy. This implies that every loop is immediately followed by -// its descendants. This hierarchy can be traversed using the methods Parent, -// LastDescendant, and Loop.depth. -func (p *Polygon) Loop(k int) *Loop { - return p.loops[k] -} - -// Parent returns the index of the parent of loop k. -// If the loop does not have a parent, ok=false is returned. -func (p *Polygon) Parent(k int) (index int, ok bool) { - // See where we are on the depth hierarchy. - depth := p.loops[k].depth - if depth == 0 { - return -1, false - } - - // There may be several loops at the same nesting level as us that share a - // parent loop with us. (Imagine a slice of swiss cheese, of which we are one loop. - // we don't know how many may be next to us before we get back to our parent loop.) - // Move up one position from us, and then begin traversing back through the set of loops - // until we find the one that is our parent or we get to the top of the polygon. - for k--; k >= 0 && p.loops[k].depth <= depth; k-- { - } - return k, true -} - -// LastDescendant returns the index of the last loop that is contained within loop k. -// If k is negative, it returns the last loop in the polygon. -// Note that loops are indexed according to a pre-order traversal of the nesting -// hierarchy, so the immediate children of loop k can be found by iterating over -// the loops (k+1)..LastDescendant(k) and selecting those whose depth is equal -// to Loop(k).depth+1. -func (p *Polygon) LastDescendant(k int) int { - if k < 0 { - return len(p.loops) - 1 - } - - depth := p.loops[k].depth - - // Find the next loop immediately past us in the set of loops, and then start - // moving down the list until we either get to the end or find the next loop - // that is higher up the hierarchy than we are. - for k++; k < len(p.loops) && p.loops[k].depth > depth; k++ { - } - return k - 1 -} - -// CapBound returns a bounding spherical cap. -func (p *Polygon) CapBound() Cap { return p.bound.CapBound() } - -// RectBound returns a bounding latitude-longitude rectangle. -func (p *Polygon) RectBound() Rect { return p.bound } - -// ContainsPoint reports whether the polygon contains the point. -func (p *Polygon) ContainsPoint(point Point) bool { - // NOTE: A bounds check slows down this function by about 50%. It is - // worthwhile only when it might allow us to delay building the index. - if !p.index.IsFresh() && !p.bound.ContainsPoint(point) { - return false - } - - // For small polygons, and during initial construction, it is faster to just - // check all the crossing. - const maxBruteForceVertices = 32 - if p.numVertices < maxBruteForceVertices || p.index == nil { - inside := false - for _, l := range p.loops { - // use loops bruteforce to avoid building the index on each loop. - inside = inside != l.bruteForceContainsPoint(point) - } - return inside - } - - // Otherwise we look up the ShapeIndex cell containing this point. - return NewContainsPointQuery(p.index, VertexModelSemiOpen).Contains(point) -} - -// ContainsCell reports whether the polygon contains the given cell. -func (p *Polygon) ContainsCell(cell Cell) bool { - it := p.index.Iterator() - relation := it.LocateCellID(cell.ID()) - - // If "cell" is disjoint from all index cells, it is not contained. - // Similarly, if "cell" is subdivided into one or more index cells then it - // is not contained, since index cells are subdivided only if they (nearly) - // intersect a sufficient number of edges. (But note that if "cell" itself - // is an index cell then it may be contained, since it could be a cell with - // no edges in the loop interior.) - if relation != Indexed { - return false - } - - // Otherwise check if any edges intersect "cell". - if p.boundaryApproxIntersects(it, cell) { - return false - } - - // Otherwise check if the loop contains the center of "cell". - return p.iteratorContainsPoint(it, cell.Center()) -} - -// IntersectsCell reports whether the polygon intersects the given cell. -func (p *Polygon) IntersectsCell(cell Cell) bool { - it := p.index.Iterator() - relation := it.LocateCellID(cell.ID()) - - // If cell does not overlap any index cell, there is no intersection. - if relation == Disjoint { - return false - } - // If cell is subdivided into one or more index cells, there is an - // intersection to within the S2ShapeIndex error bound (see Contains). - if relation == Subdivided { - return true - } - // If cell is an index cell, there is an intersection because index cells - // are created only if they have at least one edge or they are entirely - // contained by the loop. - if it.CellID() == cell.id { - return true - } - // Otherwise check if any edges intersect cell. - if p.boundaryApproxIntersects(it, cell) { - return true - } - // Otherwise check if the loop contains the center of cell. - return p.iteratorContainsPoint(it, cell.Center()) -} - -// CellUnionBound computes a covering of the Polygon. -func (p *Polygon) CellUnionBound() []CellID { - // TODO(roberts): Use ShapeIndexRegion when it's available. - return p.CapBound().CellUnionBound() -} - -// boundaryApproxIntersects reports whether the loop's boundary intersects cell. -// It may also return true when the loop boundary does not intersect cell but -// some edge comes within the worst-case error tolerance. -// -// This requires that it.Locate(cell) returned Indexed. -func (p *Polygon) boundaryApproxIntersects(it *ShapeIndexIterator, cell Cell) bool { - aClipped := it.IndexCell().findByShapeID(0) - - // If there are no edges, there is no intersection. - if len(aClipped.edges) == 0 { - return false - } - - // We can save some work if cell is the index cell itself. - if it.CellID() == cell.ID() { - return true - } - - // Otherwise check whether any of the edges intersect cell. - maxError := (faceClipErrorUVCoord + intersectsRectErrorUVDist) - bound := cell.BoundUV().ExpandedByMargin(maxError) - for _, e := range aClipped.edges { - edge := p.index.Shape(0).Edge(e) - v0, v1, ok := ClipToPaddedFace(edge.V0, edge.V1, cell.Face(), maxError) - if ok && edgeIntersectsRect(v0, v1, bound) { - return true - } - } - - return false -} - -// iteratorContainsPoint reports whether the iterator that is positioned at the -// ShapeIndexCell that may contain p, contains the point p. -func (p *Polygon) iteratorContainsPoint(it *ShapeIndexIterator, point Point) bool { - // Test containment by drawing a line segment from the cell center to the - // given point and counting edge crossings. - aClipped := it.IndexCell().findByShapeID(0) - inside := aClipped.containsCenter - - if len(aClipped.edges) == 0 { - return inside - } - - // This block requires ShapeIndex. - crosser := NewEdgeCrosser(it.Center(), point) - shape := p.index.Shape(0) - for _, e := range aClipped.edges { - edge := shape.Edge(e) - inside = inside != crosser.EdgeOrVertexCrossing(edge.V0, edge.V1) - } - - return inside -} - -// Shape Interface - -// NumEdges returns the number of edges in this shape. -func (p *Polygon) NumEdges() int { - return p.numEdges -} - -// Edge returns endpoints for the given edge index. -func (p *Polygon) Edge(e int) Edge { - var i int - - if len(p.cumulativeEdges) > 0 { - for i = range p.cumulativeEdges { - if i+1 >= len(p.cumulativeEdges) || e < p.cumulativeEdges[i+1] { - e -= p.cumulativeEdges[i] - break - } - } - } else { - // When the number of loops is small, use linear search. Most often - // there is exactly one loop and the code below executes zero times. - for i = 0; e >= len(p.Loop(i).vertices); i++ { - e -= len(p.Loop(i).vertices) - } - } - - return Edge{p.Loop(i).OrientedVertex(e), p.Loop(i).OrientedVertex(e + 1)} -} - -// ReferencePoint returns the reference point for this polygon. -func (p *Polygon) ReferencePoint() ReferencePoint { - containsOrigin := false - for _, l := range p.loops { - containsOrigin = containsOrigin != l.ContainsOrigin() - } - return OriginReferencePoint(containsOrigin) -} - -// NumChains reports the number of contiguous edge chains in the Polygon. -func (p *Polygon) NumChains() int { - return p.NumLoops() -} - -// Chain returns the i-th edge Chain (loop) in the Shape. -func (p *Polygon) Chain(chainID int) Chain { - if p.cumulativeEdges != nil { - return Chain{p.cumulativeEdges[chainID], len(p.Loop(chainID).vertices)} - } - e := 0 - for j := 0; j < chainID; j++ { - e += len(p.Loop(j).vertices) - } - - // Polygon represents a full loop as a loop with one vertex, while - // Shape represents a full loop as a chain with no vertices. - if numVertices := p.Loop(chainID).NumVertices(); numVertices != 1 { - return Chain{e, numVertices} - } - return Chain{e, 0} -} - -// ChainEdge returns the j-th edge of the i-th edge Chain (loop). -func (p *Polygon) ChainEdge(i, j int) Edge { - return Edge{p.Loop(i).OrientedVertex(j), p.Loop(i).OrientedVertex(j + 1)} -} - -// ChainPosition returns a pair (i, j) such that edgeID is the j-th edge -// of the i-th edge Chain. -func (p *Polygon) ChainPosition(edgeID int) ChainPosition { - var i int - - if len(p.cumulativeEdges) > 0 { - for i = range p.cumulativeEdges { - if i+1 >= len(p.cumulativeEdges) || edgeID < p.cumulativeEdges[i+1] { - edgeID -= p.cumulativeEdges[i] - break - } - } - } else { - // When the number of loops is small, use linear search. Most often - // there is exactly one loop and the code below executes zero times. - for i = 0; edgeID >= len(p.Loop(i).vertices); i++ { - edgeID -= len(p.Loop(i).vertices) - } - } - // TODO(roberts): unify this and Edge since they are mostly identical. - return ChainPosition{i, edgeID} -} - -// Dimension returns the dimension of the geometry represented by this Polygon. -func (p *Polygon) Dimension() int { return 2 } - -func (p *Polygon) typeTag() typeTag { return typeTagPolygon } - -func (p *Polygon) privateInterface() {} - -// Contains reports whether this polygon contains the other polygon. -// Specifically, it reports whether all the points in the other polygon -// are also in this polygon. -func (p *Polygon) Contains(o *Polygon) bool { - // If both polygons have one loop, use the more efficient Loop method. - // Note that Loop's Contains does its own bounding rectangle check. - if len(p.loops) == 1 && len(o.loops) == 1 { - return p.loops[0].Contains(o.loops[0]) - } - - // Otherwise if neither polygon has holes, we can still use the more - // efficient Loop's Contains method (rather than compareBoundary), - // but it's worthwhile to do our own bounds check first. - if !p.subregionBound.Contains(o.bound) { - // Even though Bound(A) does not contain Bound(B), it is still possible - // that A contains B. This can only happen when union of the two bounds - // spans all longitudes. For example, suppose that B consists of two - // shells with a longitude gap between them, while A consists of one shell - // that surrounds both shells of B but goes the other way around the - // sphere (so that it does not intersect the longitude gap). - if !p.bound.Lng.Union(o.bound.Lng).IsFull() { - return false - } - } - - if !p.hasHoles && !o.hasHoles { - for _, l := range o.loops { - if !p.anyLoopContains(l) { - return false - } - } - return true - } - - // Polygon A contains B iff B does not intersect the complement of A. From - // the intersection algorithm below, this means that the complement of A - // must exclude the entire boundary of B, and B must exclude all shell - // boundaries of the complement of A. (It can be shown that B must then - // exclude the entire boundary of the complement of A.) The first call - // below returns false if the boundaries cross, therefore the second call - // does not need to check for any crossing edges (which makes it cheaper). - return p.containsBoundary(o) && o.excludesNonCrossingComplementShells(p) -} - -// Intersects reports whether this polygon intersects the other polygon, i.e. -// if there is a point that is contained by both polygons. -func (p *Polygon) Intersects(o *Polygon) bool { - // If both polygons have one loop, use the more efficient Loop method. - // Note that Loop Intersects does its own bounding rectangle check. - if len(p.loops) == 1 && len(o.loops) == 1 { - return p.loops[0].Intersects(o.loops[0]) - } - - // Otherwise if neither polygon has holes, we can still use the more - // efficient Loop.Intersects method. The polygons intersect if and - // only if some pair of loop regions intersect. - if !p.bound.Intersects(o.bound) { - return false - } - - if !p.hasHoles && !o.hasHoles { - for _, l := range o.loops { - if p.anyLoopIntersects(l) { - return true - } - } - return false - } - - // Polygon A is disjoint from B if A excludes the entire boundary of B and B - // excludes all shell boundaries of A. (It can be shown that B must then - // exclude the entire boundary of A.) The first call below returns false if - // the boundaries cross, therefore the second call does not need to check - // for crossing edges. - return !p.excludesBoundary(o) || !o.excludesNonCrossingShells(p) -} - -// compareBoundary returns +1 if this polygon contains the boundary of B, -1 if A -// excludes the boundary of B, and 0 if the boundaries of A and B cross. -func (p *Polygon) compareBoundary(o *Loop) int { - result := -1 - for i := 0; i < len(p.loops) && result != 0; i++ { - // If B crosses any loop of A, the result is 0. Otherwise the result - // changes sign each time B is contained by a loop of A. - result *= -p.loops[i].compareBoundary(o) - } - return result -} - -// containsBoundary reports whether this polygon contains the entire boundary of B. -func (p *Polygon) containsBoundary(o *Polygon) bool { - for _, l := range o.loops { - if p.compareBoundary(l) <= 0 { - return false - } - } - return true -} - -// excludesBoundary reports whether this polygon excludes the entire boundary of B. -func (p *Polygon) excludesBoundary(o *Polygon) bool { - for _, l := range o.loops { - if p.compareBoundary(l) >= 0 { - return false - } - } - return true -} - -// containsNonCrossingBoundary reports whether polygon A contains the boundary of -// loop B. Shared edges are handled according to the rule described in loops -// containsNonCrossingBoundary. -func (p *Polygon) containsNonCrossingBoundary(o *Loop, reverse bool) bool { - var inside bool - for _, l := range p.loops { - x := l.containsNonCrossingBoundary(o, reverse) - inside = (inside != x) - } - return inside -} - -// excludesNonCrossingShells reports wheterh given two polygons A and B such that the -// boundary of A does not cross any loop of B, if A excludes all shell boundaries of B. -func (p *Polygon) excludesNonCrossingShells(o *Polygon) bool { - for _, l := range o.loops { - if l.IsHole() { - continue - } - if p.containsNonCrossingBoundary(l, false) { - return false - } - } - return true -} - -// excludesNonCrossingComplementShells reports whether given two polygons A and B -// such that the boundary of A does not cross any loop of B, if A excludes all -// shell boundaries of the complement of B. -func (p *Polygon) excludesNonCrossingComplementShells(o *Polygon) bool { - // Special case to handle the complement of the empty or full polygons. - if o.IsEmpty() { - return !p.IsFull() - } - if o.IsFull() { - return true - } - - // Otherwise the complement of B may be obtained by inverting loop(0) and - // then swapping the shell/hole status of all other loops. This implies - // that the shells of the complement consist of loop 0 plus all the holes of - // the original polygon. - for j, l := range o.loops { - if j > 0 && !l.IsHole() { - continue - } - - // The interior of the complement is to the right of loop 0, and to the - // left of the loops that were originally holes. - if p.containsNonCrossingBoundary(l, j == 0) { - return false - } - } - return true -} - -// anyLoopContains reports whether any loop in this polygon contains the given loop. -func (p *Polygon) anyLoopContains(o *Loop) bool { - for _, l := range p.loops { - if l.Contains(o) { - return true - } - } - return false -} - -// anyLoopIntersects reports whether any loop in this polygon intersects the given loop. -func (p *Polygon) anyLoopIntersects(o *Loop) bool { - for _, l := range p.loops { - if l.Intersects(o) { - return true - } - } - return false -} - -// Area returns the area of the polygon interior, i.e. the region on the left side -// of an odd number of loops. The return value is between 0 and 4*Pi. -func (p *Polygon) Area() float64 { - var area float64 - for _, loop := range p.loops { - area += float64(loop.Sign()) * loop.Area() - } - return area -} - -// Encode encodes the Polygon -func (p *Polygon) Encode(w io.Writer) error { - e := &encoder{w: w} - p.encode(e) - return e.err -} - -// encode only supports lossless encoding and not compressed format. -func (p *Polygon) encode(e *encoder) { - if p.numVertices == 0 { - p.encodeCompressed(e, maxLevel, nil) - return - } - - // Convert all the polygon vertices to XYZFaceSiTi format. - vs := make([]xyzFaceSiTi, 0, p.numVertices) - for _, l := range p.loops { - vs = append(vs, l.xyzFaceSiTiVertices()...) - } - - // Computes a histogram of the cell levels at which the vertices are snapped. - // (histogram[0] is the number of unsnapped vertices, histogram[i] the number - // of vertices snapped at level i-1). - histogram := make([]int, maxLevel+2) - for _, v := range vs { - histogram[v.level+1]++ - } - - // Compute the level at which most of the vertices are snapped. - // If multiple levels have the same maximum number of vertices - // snapped to it, the first one (lowest level number / largest - // area / smallest encoding length) will be chosen, so this - // is desired. - var snapLevel, numSnapped int - for level, h := range histogram[1:] { - if h > numSnapped { - snapLevel, numSnapped = level, h - } - } - - // Choose an encoding format based on the number of unsnapped vertices and a - // rough estimate of the encoded sizes. - numUnsnapped := p.numVertices - numSnapped // Number of vertices that won't be snapped at snapLevel. - const pointSize = 3 * 8 // s2.Point is an r3.Vector, which is 3 float64s. That's 3*8 = 24 bytes. - compressedSize := 4*p.numVertices + (pointSize+2)*numUnsnapped - losslessSize := pointSize * p.numVertices - if compressedSize < losslessSize { - p.encodeCompressed(e, snapLevel, vs) - } else { - p.encodeLossless(e) - } -} - -// encodeLossless encodes the polygon's Points as float64s. -func (p *Polygon) encodeLossless(e *encoder) { - e.writeInt8(encodingVersion) - e.writeBool(true) // a legacy c++ value. must be true. - e.writeBool(p.hasHoles) - e.writeUint32(uint32(len(p.loops))) - - if e.err != nil { - return - } - if len(p.loops) > maxEncodedLoops { - e.err = fmt.Errorf("too many loops (%d; max is %d)", len(p.loops), maxEncodedLoops) - return - } - for _, l := range p.loops { - l.encode(e) - } - - // Encode the bound. - p.bound.encode(e) -} - -func (p *Polygon) encodeCompressed(e *encoder, snapLevel int, vertices []xyzFaceSiTi) { - e.writeUint8(uint8(encodingCompressedVersion)) - e.writeUint8(uint8(snapLevel)) - e.writeUvarint(uint64(len(p.loops))) - - if e.err != nil { - return - } - if l := len(p.loops); l > maxEncodedLoops { - e.err = fmt.Errorf("too many loops to encode: %d; max is %d", l, maxEncodedLoops) - return - } - - for _, l := range p.loops { - l.encodeCompressed(e, snapLevel, vertices[:len(l.vertices)]) - vertices = vertices[len(l.vertices):] - } - // Do not write the bound, num_vertices, or has_holes_ as they can be - // cheaply recomputed by decodeCompressed. Microbenchmarks show the - // speed difference is inconsequential. -} - -// Decode decodes the Polygon. -func (p *Polygon) Decode(r io.Reader) error { - d := &decoder{r: asByteReader(r)} - version := int8(d.readUint8()) - var dec func(*decoder) - switch version { - case encodingVersion: - dec = p.decode - case encodingCompressedVersion: - dec = p.decodeCompressed - default: - return fmt.Errorf("unsupported version %d", version) - } - dec(d) - return d.err -} - -// maxEncodedLoops is the biggest supported number of loops in a polygon during encoding. -// Setting a maximum guards an allocation: it prevents an attacker from easily pushing us OOM. -const maxEncodedLoops = 10000000 - -func (p *Polygon) decode(d *decoder) { - *p = Polygon{} - d.readUint8() // Ignore irrelevant serialized owns_loops_ value. - - p.hasHoles = d.readBool() - - // Polygons with no loops are explicitly allowed here: a newly created - // polygon has zero loops and such polygons encode and decode properly. - nloops := d.readUint32() - if d.err != nil { - return - } - if nloops > maxEncodedLoops { - d.err = fmt.Errorf("too many loops (%d; max is %d)", nloops, maxEncodedLoops) - return - } - p.loops = make([]*Loop, nloops) - for i := range p.loops { - p.loops[i] = new(Loop) - p.loops[i].decode(d) - p.numVertices += len(p.loops[i].vertices) - } - - p.bound.decode(d) - if d.err != nil { - return - } - p.subregionBound = ExpandForSubregions(p.bound) - p.initEdgesAndIndex() -} - -func (p *Polygon) decodeCompressed(d *decoder) { - snapLevel := int(d.readUint8()) - - if snapLevel > maxLevel { - d.err = fmt.Errorf("snaplevel too big: %d", snapLevel) - return - } - // Polygons with no loops are explicitly allowed here: a newly created - // polygon has zero loops and such polygons encode and decode properly. - nloops := int(d.readUvarint()) - if nloops > maxEncodedLoops { - d.err = fmt.Errorf("too many loops (%d; max is %d)", nloops, maxEncodedLoops) - } - p.loops = make([]*Loop, nloops) - for i := range p.loops { - p.loops[i] = new(Loop) - p.loops[i].decodeCompressed(d, snapLevel) - } - p.initLoopProperties() -} - -// TODO(roberts): Differences from C++ -// Centroid -// SnapLevel -// DistanceToPoint -// DistanceToBoundary -// Project -// ProjectToBoundary -// ApproxContains/ApproxDisjoint for Polygons -// InitTo{Intersection/ApproxIntersection/Union/ApproxUnion/Diff/ApproxDiff} -// InitToSimplified -// InitToSnapped -// IntersectWithPolyline -// ApproxIntersectWithPolyline -// SubtractFromPolyline -// ApproxSubtractFromPolyline -// DestructiveUnion -// DestructiveApproxUnion -// InitToCellUnionBorder -// IsNormalized -// Equal/BoundaryEqual/BoundaryApproxEqual/BoundaryNear Polygons -// BreakEdgesAndAddToBuilder -// -// clearLoops -// findLoopNestingError -// initToSimplifiedInternal -// internalClipPolyline -// clipBoundary diff --git a/vendor/github.com/golang/geo/s2/polyline.go b/vendor/github.com/golang/geo/s2/polyline.go deleted file mode 100644 index 517968342..000000000 --- a/vendor/github.com/golang/geo/s2/polyline.go +++ /dev/null @@ -1,589 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package s2 - -import ( - "fmt" - "io" - "math" - - "github.com/golang/geo/s1" -) - -// Polyline represents a sequence of zero or more vertices connected by -// straight edges (geodesics). Edges of length 0 and 180 degrees are not -// allowed, i.e. adjacent vertices should not be identical or antipodal. -type Polyline []Point - -// PolylineFromLatLngs creates a new Polyline from the given LatLngs. -func PolylineFromLatLngs(points []LatLng) *Polyline { - p := make(Polyline, len(points)) - for k, v := range points { - p[k] = PointFromLatLng(v) - } - return &p -} - -// Reverse reverses the order of the Polyline vertices. -func (p *Polyline) Reverse() { - for i := 0; i < len(*p)/2; i++ { - (*p)[i], (*p)[len(*p)-i-1] = (*p)[len(*p)-i-1], (*p)[i] - } -} - -// Length returns the length of this Polyline. -func (p *Polyline) Length() s1.Angle { - var length s1.Angle - - for i := 1; i < len(*p); i++ { - length += (*p)[i-1].Distance((*p)[i]) - } - return length -} - -// Centroid returns the true centroid of the polyline multiplied by the length of the -// polyline. The result is not unit length, so you may wish to normalize it. -// -// Scaling by the Polyline length makes it easy to compute the centroid -// of several Polylines (by simply adding up their centroids). -func (p *Polyline) Centroid() Point { - var centroid Point - for i := 1; i < len(*p); i++ { - // The centroid (multiplied by length) is a vector toward the midpoint - // of the edge, whose length is twice the sin of half the angle between - // the two vertices. Defining theta to be this angle, we have: - vSum := (*p)[i-1].Add((*p)[i].Vector) // Length == 2*cos(theta) - vDiff := (*p)[i-1].Sub((*p)[i].Vector) // Length == 2*sin(theta) - - // Length == 2*sin(theta) - centroid = Point{centroid.Add(vSum.Mul(math.Sqrt(vDiff.Norm2() / vSum.Norm2())))} - } - return centroid -} - -// Equal reports whether the given Polyline is exactly the same as this one. -func (p *Polyline) Equal(b *Polyline) bool { - if len(*p) != len(*b) { - return false - } - for i, v := range *p { - if v != (*b)[i] { - return false - } - } - - return true -} - -// ApproxEqual reports whether two polylines have the same number of vertices, -// and corresponding vertex pairs are separated by no more the standard margin. -func (p *Polyline) ApproxEqual(o *Polyline) bool { - return p.approxEqual(o, s1.Angle(epsilon)) -} - -// approxEqual reports whether two polylines are equal within the given margin. -func (p *Polyline) approxEqual(o *Polyline, maxError s1.Angle) bool { - if len(*p) != len(*o) { - return false - } - for offset, val := range *p { - if !val.approxEqual((*o)[offset], maxError) { - return false - } - } - return true -} - -// CapBound returns the bounding Cap for this Polyline. -func (p *Polyline) CapBound() Cap { - return p.RectBound().CapBound() -} - -// RectBound returns the bounding Rect for this Polyline. -func (p *Polyline) RectBound() Rect { - rb := NewRectBounder() - for _, v := range *p { - rb.AddPoint(v) - } - return rb.RectBound() -} - -// ContainsCell reports whether this Polyline contains the given Cell. Always returns false -// because "containment" is not numerically well-defined except at the Polyline vertices. -func (p *Polyline) ContainsCell(cell Cell) bool { - return false -} - -// IntersectsCell reports whether this Polyline intersects the given Cell. -func (p *Polyline) IntersectsCell(cell Cell) bool { - if len(*p) == 0 { - return false - } - - // We only need to check whether the cell contains vertex 0 for correctness, - // but these tests are cheap compared to edge crossings so we might as well - // check all the vertices. - for _, v := range *p { - if cell.ContainsPoint(v) { - return true - } - } - - cellVertices := []Point{ - cell.Vertex(0), - cell.Vertex(1), - cell.Vertex(2), - cell.Vertex(3), - } - - for j := 0; j < 4; j++ { - crosser := NewChainEdgeCrosser(cellVertices[j], cellVertices[(j+1)&3], (*p)[0]) - for i := 1; i < len(*p); i++ { - if crosser.ChainCrossingSign((*p)[i]) != DoNotCross { - // There is a proper crossing, or two vertices were the same. - return true - } - } - } - return false -} - -// ContainsPoint returns false since Polylines are not closed. -func (p *Polyline) ContainsPoint(point Point) bool { - return false -} - -// CellUnionBound computes a covering of the Polyline. -func (p *Polyline) CellUnionBound() []CellID { - return p.CapBound().CellUnionBound() -} - -// NumEdges returns the number of edges in this shape. -func (p *Polyline) NumEdges() int { - if len(*p) == 0 { - return 0 - } - return len(*p) - 1 -} - -// Edge returns endpoints for the given edge index. -func (p *Polyline) Edge(i int) Edge { - return Edge{(*p)[i], (*p)[i+1]} -} - -// ReferencePoint returns the default reference point with negative containment because Polylines are not closed. -func (p *Polyline) ReferencePoint() ReferencePoint { - return OriginReferencePoint(false) -} - -// NumChains reports the number of contiguous edge chains in this Polyline. -func (p *Polyline) NumChains() int { - return minInt(1, p.NumEdges()) -} - -// Chain returns the i-th edge Chain in the Shape. -func (p *Polyline) Chain(chainID int) Chain { - return Chain{0, p.NumEdges()} -} - -// ChainEdge returns the j-th edge of the i-th edge Chain. -func (p *Polyline) ChainEdge(chainID, offset int) Edge { - return Edge{(*p)[offset], (*p)[offset+1]} -} - -// ChainPosition returns a pair (i, j) such that edgeID is the j-th edge -func (p *Polyline) ChainPosition(edgeID int) ChainPosition { - return ChainPosition{0, edgeID} -} - -// Dimension returns the dimension of the geometry represented by this Polyline. -func (p *Polyline) Dimension() int { return 1 } - -// IsEmpty reports whether this shape contains no points. -func (p *Polyline) IsEmpty() bool { return defaultShapeIsEmpty(p) } - -// IsFull reports whether this shape contains all points on the sphere. -func (p *Polyline) IsFull() bool { return defaultShapeIsFull(p) } - -func (p *Polyline) typeTag() typeTag { return typeTagPolyline } - -func (p *Polyline) privateInterface() {} - -// findEndVertex reports the maximal end index such that the line segment between -// the start index and this one such that the line segment between these two -// vertices passes within the given tolerance of all interior vertices, in order. -func findEndVertex(p Polyline, tolerance s1.Angle, index int) int { - // The basic idea is to keep track of the "pie wedge" of angles - // from the starting vertex such that a ray from the starting - // vertex at that angle will pass through the discs of radius - // tolerance centered around all vertices processed so far. - // - // First we define a coordinate frame for the tangent and normal - // spaces at the starting vertex. Essentially this means picking - // three orthonormal vectors X,Y,Z such that X and Y span the - // tangent plane at the starting vertex, and Z is up. We use - // the coordinate frame to define a mapping from 3D direction - // vectors to a one-dimensional ray angle in the range (-π, - // π]. The angle of a direction vector is computed by - // transforming it into the X,Y,Z basis, and then calculating - // atan2(y,x). This mapping allows us to represent a wedge of - // angles as a 1D interval. Since the interval wraps around, we - // represent it as an Interval, i.e. an interval on the unit - // circle. - origin := p[index] - frame := getFrame(origin) - - // As we go along, we keep track of the current wedge of angles - // and the distance to the last vertex (which must be - // non-decreasing). - currentWedge := s1.FullInterval() - var lastDistance s1.Angle - - for index++; index < len(p); index++ { - candidate := p[index] - distance := origin.Distance(candidate) - - // We don't allow simplification to create edges longer than - // 90 degrees, to avoid numeric instability as lengths - // approach 180 degrees. We do need to allow for original - // edges longer than 90 degrees, though. - if distance > math.Pi/2 && lastDistance > 0 { - break - } - - // Vertices must be in increasing order along the ray, except - // for the initial disc around the origin. - if distance < lastDistance && lastDistance > tolerance { - break - } - - lastDistance = distance - - // Points that are within the tolerance distance of the origin - // do not constrain the ray direction, so we can ignore them. - if distance <= tolerance { - continue - } - - // If the current wedge of angles does not contain the angle - // to this vertex, then stop right now. Note that the wedge - // of possible ray angles is not necessarily empty yet, but we - // can't continue unless we are willing to backtrack to the - // last vertex that was contained within the wedge (since we - // don't create new vertices). This would be more complicated - // and also make the worst-case running time more than linear. - direction := toFrame(frame, candidate) - center := math.Atan2(direction.Y, direction.X) - if !currentWedge.Contains(center) { - break - } - - // To determine how this vertex constrains the possible ray - // angles, consider the triangle ABC where A is the origin, B - // is the candidate vertex, and C is one of the two tangent - // points between A and the spherical cap of radius - // tolerance centered at B. Then from the spherical law of - // sines, sin(a)/sin(A) = sin(c)/sin(C), where a and c are - // the lengths of the edges opposite A and C. In our case C - // is a 90 degree angle, therefore A = asin(sin(a) / sin(c)). - // Angle A is the half-angle of the allowable wedge. - halfAngle := math.Asin(math.Sin(tolerance.Radians()) / math.Sin(distance.Radians())) - target := s1.IntervalFromPointPair(center, center).Expanded(halfAngle) - currentWedge = currentWedge.Intersection(target) - } - - // We break out of the loop when we reach a vertex index that - // can't be included in the line segment, so back up by one - // vertex. - return index - 1 -} - -// SubsampleVertices returns a subsequence of vertex indices such that the -// polyline connecting these vertices is never further than the given tolerance from -// the original polyline. Provided the first and last vertices are distinct, -// they are always preserved; if they are not, the subsequence may contain -// only a single index. -// -// Some useful properties of the algorithm: -// -// - It runs in linear time. -// -// - The output always represents a valid polyline. In particular, adjacent -// output vertices are never identical or antipodal. -// -// - The method is not optimal, but it tends to produce 2-3% fewer -// vertices than the Douglas-Peucker algorithm with the same tolerance. -// -// - The output is parametrically equivalent to the original polyline to -// within the given tolerance. For example, if a polyline backtracks on -// itself and then proceeds onwards, the backtracking will be preserved -// (to within the given tolerance). This is different than the -// Douglas-Peucker algorithm which only guarantees geometric equivalence. -func (p *Polyline) SubsampleVertices(tolerance s1.Angle) []int { - var result []int - - if len(*p) < 1 { - return result - } - - result = append(result, 0) - clampedTolerance := s1.Angle(math.Max(tolerance.Radians(), 0)) - - for index := 0; index+1 < len(*p); { - nextIndex := findEndVertex(*p, clampedTolerance, index) - // Don't create duplicate adjacent vertices. - if (*p)[nextIndex] != (*p)[index] { - result = append(result, nextIndex) - } - index = nextIndex - } - - return result -} - -// Encode encodes the Polyline. -func (p Polyline) Encode(w io.Writer) error { - e := &encoder{w: w} - p.encode(e) - return e.err -} - -func (p Polyline) encode(e *encoder) { - e.writeInt8(encodingVersion) - e.writeUint32(uint32(len(p))) - for _, v := range p { - e.writeFloat64(v.X) - e.writeFloat64(v.Y) - e.writeFloat64(v.Z) - } -} - -// Decode decodes the polyline. -func (p *Polyline) Decode(r io.Reader) error { - d := decoder{r: asByteReader(r)} - p.decode(d) - return d.err -} - -func (p *Polyline) decode(d decoder) { - version := d.readInt8() - if d.err != nil { - return - } - if int(version) != int(encodingVersion) { - d.err = fmt.Errorf("can't decode version %d; my version: %d", version, encodingVersion) - return - } - nvertices := d.readUint32() - if d.err != nil { - return - } - if nvertices > maxEncodedVertices { - d.err = fmt.Errorf("too many vertices (%d; max is %d)", nvertices, maxEncodedVertices) - return - } - *p = make([]Point, nvertices) - for i := range *p { - (*p)[i].X = d.readFloat64() - (*p)[i].Y = d.readFloat64() - (*p)[i].Z = d.readFloat64() - } -} - -// Project returns a point on the polyline that is closest to the given point, -// and the index of the next vertex after the projected point. The -// value of that index is always in the range [1, len(polyline)]. -// The polyline must not be empty. -func (p *Polyline) Project(point Point) (Point, int) { - if len(*p) == 1 { - // If there is only one vertex, it is always closest to any given point. - return (*p)[0], 1 - } - - // Initial value larger than any possible distance on the unit sphere. - minDist := 10 * s1.Radian - minIndex := -1 - - // Find the line segment in the polyline that is closest to the point given. - for i := 1; i < len(*p); i++ { - if dist := DistanceFromSegment(point, (*p)[i-1], (*p)[i]); dist < minDist { - minDist = dist - minIndex = i - } - } - - // Compute the point on the segment found that is closest to the point given. - closest := Project(point, (*p)[minIndex-1], (*p)[minIndex]) - if closest == (*p)[minIndex] { - minIndex++ - } - - return closest, minIndex -} - -// IsOnRight reports whether the point given is on the right hand side of the -// polyline, using a naive definition of "right-hand-sideness" where the point -// is on the RHS of the polyline iff the point is on the RHS of the line segment -// in the polyline which it is closest to. -// The polyline must have at least 2 vertices. -func (p *Polyline) IsOnRight(point Point) bool { - // If the closest point C is an interior vertex of the polyline, let B and D - // be the previous and next vertices. The given point P is on the right of - // the polyline (locally) if B, P, D are ordered CCW around vertex C. - closest, next := p.Project(point) - if closest == (*p)[next-1] && next > 1 && next < len(*p) { - if point == (*p)[next-1] { - // Polyline vertices are not on the RHS. - return false - } - return OrderedCCW((*p)[next-2], point, (*p)[next], (*p)[next-1]) - } - // Otherwise, the closest point C is incident to exactly one polyline edge. - // We test the point P against that edge. - if next == len(*p) { - next-- - } - return Sign(point, (*p)[next], (*p)[next-1]) -} - -// Validate checks whether this is a valid polyline or not. -func (p *Polyline) Validate() error { - // All vertices must be unit length. - for i, pt := range *p { - if !pt.IsUnit() { - return fmt.Errorf("vertex %d is not unit length", i) - } - } - - // Adjacent vertices must not be identical or antipodal. - for i := 1; i < len(*p); i++ { - prev, cur := (*p)[i-1], (*p)[i] - if prev == cur { - return fmt.Errorf("vertices %d and %d are identical", i-1, i) - } - if prev == (Point{cur.Mul(-1)}) { - return fmt.Errorf("vertices %d and %d are antipodal", i-1, i) - } - } - - return nil -} - -// Intersects reports whether this polyline intersects the given polyline. If -// the polylines share a vertex they are considered to be intersecting. When a -// polyline endpoint is the only intersection with the other polyline, the -// function may return true or false arbitrarily. -// -// The running time is quadratic in the number of vertices. -func (p *Polyline) Intersects(o *Polyline) bool { - if len(*p) == 0 || len(*o) == 0 { - return false - } - - if !p.RectBound().Intersects(o.RectBound()) { - return false - } - - // TODO(roberts): Use ShapeIndex here. - for i := 1; i < len(*p); i++ { - crosser := NewChainEdgeCrosser((*p)[i-1], (*p)[i], (*o)[0]) - for j := 1; j < len(*o); j++ { - if crosser.ChainCrossingSign((*o)[j]) != DoNotCross { - return true - } - } - } - return false -} - -// Interpolate returns the point whose distance from vertex 0 along the polyline is -// the given fraction of the polyline's total length, and the index of -// the next vertex after the interpolated point P. Fractions less than zero -// or greater than one are clamped. The return value is unit length. The cost of -// this function is currently linear in the number of vertices. -// -// This method allows the caller to easily construct a given suffix of the -// polyline by concatenating P with the polyline vertices starting at that next -// vertex. Note that P is guaranteed to be different than the point at the next -// vertex, so this will never result in a duplicate vertex. -// -// The polyline must not be empty. Note that if fraction >= 1.0, then the next -// vertex will be set to len(p) (indicating that no vertices from the polyline -// need to be appended). The value of the next vertex is always between 1 and -// len(p). -// -// This method can also be used to construct a prefix of the polyline, by -// taking the polyline vertices up to next vertex-1 and appending the -// returned point P if it is different from the last vertex (since in this -// case there is no guarantee of distinctness). -func (p *Polyline) Interpolate(fraction float64) (Point, int) { - // We intentionally let the (fraction >= 1) case fall through, since - // we need to handle it in the loop below in any case because of - // possible roundoff errors. - if fraction <= 0 { - return (*p)[0], 1 - } - target := s1.Angle(fraction) * p.Length() - - for i := 1; i < len(*p); i++ { - length := (*p)[i-1].Distance((*p)[i]) - if target < length { - // This interpolates with respect to arc length rather than - // straight-line distance, and produces a unit-length result. - result := InterpolateAtDistance(target, (*p)[i-1], (*p)[i]) - - // It is possible that (result == vertex(i)) due to rounding errors. - if result == (*p)[i] { - return result, i + 1 - } - return result, i - } - target -= length - } - - return (*p)[len(*p)-1], len(*p) -} - -// Uninterpolate is the inverse operation of Interpolate. Given a point on the -// polyline, it returns the ratio of the distance to the point from the -// beginning of the polyline over the length of the polyline. The return -// value is always betwen 0 and 1 inclusive. -// -// The polyline should not be empty. If it has fewer than 2 vertices, the -// return value is zero. -func (p *Polyline) Uninterpolate(point Point, nextVertex int) float64 { - if len(*p) < 2 { - return 0 - } - - var sum s1.Angle - for i := 1; i < nextVertex; i++ { - sum += (*p)[i-1].Distance((*p)[i]) - } - lengthToPoint := sum + (*p)[nextVertex-1].Distance(point) - for i := nextVertex; i < len(*p); i++ { - sum += (*p)[i-1].Distance((*p)[i]) - } - // The ratio can be greater than 1.0 due to rounding errors or because the - // point is not exactly on the polyline. - return minFloat64(1.0, float64(lengthToPoint/sum)) -} - -// TODO(roberts): Differences from C++. -// NearlyCoversPolyline -// InitToSnapped -// InitToSimplified -// SnapLevel -// encode/decode compressed diff --git a/vendor/github.com/golang/geo/s2/polyline_measures.go b/vendor/github.com/golang/geo/s2/polyline_measures.go deleted file mode 100644 index 38ce991b5..000000000 --- a/vendor/github.com/golang/geo/s2/polyline_measures.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2018 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package s2 - -// This file defines various measures for polylines on the sphere. These are -// low-level methods that work directly with arrays of Points. They are used to -// implement the methods in various other measures files. - -import ( - "github.com/golang/geo/r3" - "github.com/golang/geo/s1" -) - -// polylineLength returns the length of the given Polyline. -// It returns 0 for polylines with fewer than two vertices. -func polylineLength(p []Point) s1.Angle { - var length s1.Angle - - for i := 1; i < len(p); i++ { - length += p[i-1].Distance(p[i]) - } - return length -} - -// polylineCentroid returns the true centroid of the polyline multiplied by the -// length of the polyline. The result is not unit length, so you may wish to -// normalize it. -// -// Scaling by the Polyline length makes it easy to compute the centroid -// of several Polylines (by simply adding up their centroids). -// -// Note that for degenerate Polylines (e.g., AA) this returns Point(0, 0, 0). -// (This answer is correct; the result of this function is a line integral over -// the polyline, whose value is always zero if the polyline is degenerate.) -func polylineCentroid(p []Point) Point { - var centroid r3.Vector - for i := 1; i < len(p); i++ { - centroid = centroid.Add(EdgeTrueCentroid(p[i-1], p[i]).Vector) - } - return Point{centroid} -} diff --git a/vendor/github.com/golang/geo/s2/predicates.go b/vendor/github.com/golang/geo/s2/predicates.go deleted file mode 100644 index 9fc5e1751..000000000 --- a/vendor/github.com/golang/geo/s2/predicates.go +++ /dev/null @@ -1,701 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package s2 - -// This file contains various predicates that are guaranteed to produce -// correct, consistent results. They are also relatively efficient. This is -// achieved by computing conservative error bounds and falling back to high -// precision or even exact arithmetic when the result is uncertain. Such -// predicates are useful in implementing robust algorithms. -// -// See also EdgeCrosser, which implements various exact -// edge-crossing predicates more efficiently than can be done here. - -import ( - "math" - "math/big" - - "github.com/golang/geo/r3" - "github.com/golang/geo/s1" -) - -const ( - // If any other machine architectures need to be suppported, these next three - // values will need to be updated. - - // epsilon is a small number that represents a reasonable level of noise between two - // values that can be considered to be equal. - epsilon = 1e-15 - // dblEpsilon is a smaller number for values that require more precision. - // This is the C++ DBL_EPSILON equivalent. - dblEpsilon = 2.220446049250313e-16 - // dblError is the C++ value for S2 rounding_epsilon(). - dblError = 1.110223024625156e-16 - - // maxDeterminantError is the maximum error in computing (AxB).C where all vectors - // are unit length. Using standard inequalities, it can be shown that - // - // fl(AxB) = AxB + D where |D| <= (|AxB| + (2/sqrt(3))*|A|*|B|) * e - // - // where "fl()" denotes a calculation done in floating-point arithmetic, - // |x| denotes either absolute value or the L2-norm as appropriate, and - // e is a reasonably small value near the noise level of floating point - // number accuracy. Similarly, - // - // fl(B.C) = B.C + d where |d| <= (|B.C| + 2*|B|*|C|) * e . - // - // Applying these bounds to the unit-length vectors A,B,C and neglecting - // relative error (which does not affect the sign of the result), we get - // - // fl((AxB).C) = (AxB).C + d where |d| <= (3 + 2/sqrt(3)) * e - maxDeterminantError = 1.8274 * dblEpsilon - - // detErrorMultiplier is the factor to scale the magnitudes by when checking - // for the sign of set of points with certainty. Using a similar technique to - // the one used for maxDeterminantError, the error is at most: - // - // |d| <= (3 + 6/sqrt(3)) * |A-C| * |B-C| * e - // - // If the determinant magnitude is larger than this value then we know - // its sign with certainty. - detErrorMultiplier = 3.2321 * dblEpsilon -) - -// Direction is an indication of the ordering of a set of points. -type Direction int - -// These are the three options for the direction of a set of points. -const ( - Clockwise Direction = -1 - Indeterminate Direction = 0 - CounterClockwise Direction = 1 -) - -// newBigFloat constructs a new big.Float with maximum precision. -func newBigFloat() *big.Float { return new(big.Float).SetPrec(big.MaxPrec) } - -// Sign returns true if the points A, B, C are strictly counterclockwise, -// and returns false if the points are clockwise or collinear (i.e. if they are all -// contained on some great circle). -// -// Due to numerical errors, situations may arise that are mathematically -// impossible, e.g. ABC may be considered strictly CCW while BCA is not. -// However, the implementation guarantees the following: -// -// If Sign(a,b,c), then !Sign(c,b,a) for all a,b,c. -func Sign(a, b, c Point) bool { - // NOTE(dnadasi): In the C++ API the equivalent method here was known as "SimpleSign". - - // We compute the signed volume of the parallelepiped ABC. The usual - // formula for this is (A ⨯ B) · C, but we compute it here using (C ⨯ A) · B - // in order to ensure that ABC and CBA are not both CCW. This follows - // from the following identities (which are true numerically, not just - // mathematically): - // - // (1) x ⨯ y == -(y ⨯ x) - // (2) -x · y == -(x · y) - return c.Cross(a.Vector).Dot(b.Vector) > 0 -} - -// RobustSign returns a Direction representing the ordering of the points. -// CounterClockwise is returned if the points are in counter-clockwise order, -// Clockwise for clockwise, and Indeterminate if any two points are the same (collinear), -// or the sign could not completely be determined. -// -// This function has additional logic to make sure that the above properties hold even -// when the three points are coplanar, and to deal with the limitations of -// floating-point arithmetic. -// -// RobustSign satisfies the following conditions: -// -// (1) RobustSign(a,b,c) == Indeterminate if and only if a == b, b == c, or c == a -// (2) RobustSign(b,c,a) == RobustSign(a,b,c) for all a,b,c -// (3) RobustSign(c,b,a) == -RobustSign(a,b,c) for all a,b,c -// -// In other words: -// -// (1) The result is Indeterminate if and only if two points are the same. -// (2) Rotating the order of the arguments does not affect the result. -// (3) Exchanging any two arguments inverts the result. -// -// On the other hand, note that it is not true in general that -// RobustSign(-a,b,c) == -RobustSign(a,b,c), or any similar identities -// involving antipodal points. -func RobustSign(a, b, c Point) Direction { - sign := triageSign(a, b, c) - if sign == Indeterminate { - sign = expensiveSign(a, b, c) - } - return sign -} - -// stableSign reports the direction sign of the points in a numerically stable way. -// Unlike triageSign, this method can usually compute the correct determinant sign -// even when all three points are as collinear as possible. For example if three -// points are spaced 1km apart along a random line on the Earth's surface using -// the nearest representable points, there is only a 0.4% chance that this method -// will not be able to find the determinant sign. The probability of failure -// decreases as the points get closer together; if the collinear points are 1 meter -// apart, the failure rate drops to 0.0004%. -// -// This method could be extended to also handle nearly-antipodal points, but antipodal -// points are rare in practice so it seems better to simply fall back to -// exact arithmetic in that case. -func stableSign(a, b, c Point) Direction { - ab := b.Sub(a.Vector) - ab2 := ab.Norm2() - bc := c.Sub(b.Vector) - bc2 := bc.Norm2() - ca := a.Sub(c.Vector) - ca2 := ca.Norm2() - - // Now compute the determinant ((A-C)x(B-C)).C, where the vertices have been - // cyclically permuted if necessary so that AB is the longest edge. (This - // minimizes the magnitude of cross product.) At the same time we also - // compute the maximum error in the determinant. - - // The two shortest edges, pointing away from their common point. - var e1, e2, op r3.Vector - if ab2 >= bc2 && ab2 >= ca2 { - // AB is the longest edge. - e1, e2, op = ca, bc, c.Vector - } else if bc2 >= ca2 { - // BC is the longest edge. - e1, e2, op = ab, ca, a.Vector - } else { - // CA is the longest edge. - e1, e2, op = bc, ab, b.Vector - } - - det := -e1.Cross(e2).Dot(op) - maxErr := detErrorMultiplier * math.Sqrt(e1.Norm2()*e2.Norm2()) - - // If the determinant isn't zero, within maxErr, we know definitively the point ordering. - if det > maxErr { - return CounterClockwise - } - if det < -maxErr { - return Clockwise - } - return Indeterminate -} - -// triageSign returns the direction sign of the points. It returns Indeterminate if two -// points are identical or the result is uncertain. Uncertain cases can be resolved, if -// desired, by calling expensiveSign. -// -// The purpose of this method is to allow additional cheap tests to be done without -// calling expensiveSign. -func triageSign(a, b, c Point) Direction { - det := a.Cross(b.Vector).Dot(c.Vector) - if det > maxDeterminantError { - return CounterClockwise - } - if det < -maxDeterminantError { - return Clockwise - } - return Indeterminate -} - -// expensiveSign reports the direction sign of the points. It returns Indeterminate -// if two of the input points are the same. It uses multiple-precision arithmetic -// to ensure that its results are always self-consistent. -func expensiveSign(a, b, c Point) Direction { - // Return Indeterminate if and only if two points are the same. - // This ensures RobustSign(a,b,c) == Indeterminate if and only if a == b, b == c, or c == a. - // ie. Property 1 of RobustSign. - if a == b || b == c || c == a { - return Indeterminate - } - - // Next we try recomputing the determinant still using floating-point - // arithmetic but in a more precise way. This is more expensive than the - // simple calculation done by triageSign, but it is still *much* cheaper - // than using arbitrary-precision arithmetic. This optimization is able to - // compute the correct determinant sign in virtually all cases except when - // the three points are truly collinear (e.g., three points on the equator). - detSign := stableSign(a, b, c) - if detSign != Indeterminate { - return detSign - } - - // Otherwise fall back to exact arithmetic and symbolic permutations. - return exactSign(a, b, c, true) -} - -// exactSign reports the direction sign of the points computed using high-precision -// arithmetic and/or symbolic perturbations. -func exactSign(a, b, c Point, perturb bool) Direction { - // Sort the three points in lexicographic order, keeping track of the sign - // of the permutation. (Each exchange inverts the sign of the determinant.) - permSign := CounterClockwise - pa := &a - pb := &b - pc := &c - if pa.Cmp(pb.Vector) > 0 { - pa, pb = pb, pa - permSign = -permSign - } - if pb.Cmp(pc.Vector) > 0 { - pb, pc = pc, pb - permSign = -permSign - } - if pa.Cmp(pb.Vector) > 0 { - pa, pb = pb, pa - permSign = -permSign - } - - // Construct multiple-precision versions of the sorted points and compute - // their precise 3x3 determinant. - xa := r3.PreciseVectorFromVector(pa.Vector) - xb := r3.PreciseVectorFromVector(pb.Vector) - xc := r3.PreciseVectorFromVector(pc.Vector) - xbCrossXc := xb.Cross(xc) - det := xa.Dot(xbCrossXc) - - // The precision of big.Float is high enough that the result should always - // be exact enough (no rounding was performed). - - // If the exact determinant is non-zero, we're done. - detSign := Direction(det.Sign()) - if detSign == Indeterminate && perturb { - // Otherwise, we need to resort to symbolic perturbations to resolve the - // sign of the determinant. - detSign = symbolicallyPerturbedSign(xa, xb, xc, xbCrossXc) - } - return permSign * detSign -} - -// symbolicallyPerturbedSign reports the sign of the determinant of three points -// A, B, C under a model where every possible Point is slightly perturbed by -// a unique infinitesmal amount such that no three perturbed points are -// collinear and no four points are coplanar. The perturbations are so small -// that they do not change the sign of any determinant that was non-zero -// before the perturbations, and therefore can be safely ignored unless the -// determinant of three points is exactly zero (using multiple-precision -// arithmetic). This returns CounterClockwise or Clockwise according to the -// sign of the determinant after the symbolic perturbations are taken into account. -// -// Since the symbolic perturbation of a given point is fixed (i.e., the -// perturbation is the same for all calls to this method and does not depend -// on the other two arguments), the results of this method are always -// self-consistent. It will never return results that would correspond to an -// impossible configuration of non-degenerate points. -// -// This requires that the 3x3 determinant of A, B, C must be exactly zero. -// And the points must be distinct, with A < B < C in lexicographic order. -// -// Reference: -// "Simulation of Simplicity" (Edelsbrunner and Muecke, ACM Transactions on -// Graphics, 1990). -// -func symbolicallyPerturbedSign(a, b, c, bCrossC r3.PreciseVector) Direction { - // This method requires that the points are sorted in lexicographically - // increasing order. This is because every possible Point has its own - // symbolic perturbation such that if A < B then the symbolic perturbation - // for A is much larger than the perturbation for B. - // - // Alternatively, we could sort the points in this method and keep track of - // the sign of the permutation, but it is more efficient to do this before - // converting the inputs to the multi-precision representation, and this - // also lets us re-use the result of the cross product B x C. - // - // Every input coordinate x[i] is assigned a symbolic perturbation dx[i]. - // We then compute the sign of the determinant of the perturbed points, - // i.e. - // | a.X+da.X a.Y+da.Y a.Z+da.Z | - // | b.X+db.X b.Y+db.Y b.Z+db.Z | - // | c.X+dc.X c.Y+dc.Y c.Z+dc.Z | - // - // The perturbations are chosen such that - // - // da.Z > da.Y > da.X > db.Z > db.Y > db.X > dc.Z > dc.Y > dc.X - // - // where each perturbation is so much smaller than the previous one that we - // don't even need to consider it unless the coefficients of all previous - // perturbations are zero. In fact, it is so small that we don't need to - // consider it unless the coefficient of all products of the previous - // perturbations are zero. For example, we don't need to consider the - // coefficient of db.Y unless the coefficient of db.Z *da.X is zero. - // - // The follow code simply enumerates the coefficients of the perturbations - // (and products of perturbations) that appear in the determinant above, in - // order of decreasing perturbation magnitude. The first non-zero - // coefficient determines the sign of the result. The easiest way to - // enumerate the coefficients in the correct order is to pretend that each - // perturbation is some tiny value "eps" raised to a power of two: - // - // eps** 1 2 4 8 16 32 64 128 256 - // da.Z da.Y da.X db.Z db.Y db.X dc.Z dc.Y dc.X - // - // Essentially we can then just count in binary and test the corresponding - // subset of perturbations at each step. So for example, we must test the - // coefficient of db.Z*da.X before db.Y because eps**12 > eps**16. - // - // Of course, not all products of these perturbations appear in the - // determinant above, since the determinant only contains the products of - // elements in distinct rows and columns. Thus we don't need to consider - // da.Z*da.Y, db.Y *da.Y, etc. Furthermore, sometimes different pairs of - // perturbations have the same coefficient in the determinant; for example, - // da.Y*db.X and db.Y*da.X have the same coefficient (c.Z). Therefore - // we only need to test this coefficient the first time we encounter it in - // the binary order above (which will be db.Y*da.X). - // - // The sequence of tests below also appears in Table 4-ii of the paper - // referenced above, if you just want to look it up, with the following - // translations: [a,b,c] -> [i,j,k] and [0,1,2] -> [1,2,3]. Also note that - // some of the signs are different because the opposite cross product is - // used (e.g., B x C rather than C x B). - - detSign := bCrossC.Z.Sign() // da.Z - if detSign != 0 { - return Direction(detSign) - } - detSign = bCrossC.Y.Sign() // da.Y - if detSign != 0 { - return Direction(detSign) - } - detSign = bCrossC.X.Sign() // da.X - if detSign != 0 { - return Direction(detSign) - } - - detSign = newBigFloat().Sub(newBigFloat().Mul(c.X, a.Y), newBigFloat().Mul(c.Y, a.X)).Sign() // db.Z - if detSign != 0 { - return Direction(detSign) - } - detSign = c.X.Sign() // db.Z * da.Y - if detSign != 0 { - return Direction(detSign) - } - detSign = -(c.Y.Sign()) // db.Z * da.X - if detSign != 0 { - return Direction(detSign) - } - - detSign = newBigFloat().Sub(newBigFloat().Mul(c.Z, a.X), newBigFloat().Mul(c.X, a.Z)).Sign() // db.Y - if detSign != 0 { - return Direction(detSign) - } - detSign = c.Z.Sign() // db.Y * da.X - if detSign != 0 { - return Direction(detSign) - } - - // The following test is listed in the paper, but it is redundant because - // the previous tests guarantee that C == (0, 0, 0). - // (c.Y*a.Z - c.Z*a.Y).Sign() // db.X - - detSign = newBigFloat().Sub(newBigFloat().Mul(a.X, b.Y), newBigFloat().Mul(a.Y, b.X)).Sign() // dc.Z - if detSign != 0 { - return Direction(detSign) - } - detSign = -(b.X.Sign()) // dc.Z * da.Y - if detSign != 0 { - return Direction(detSign) - } - detSign = b.Y.Sign() // dc.Z * da.X - if detSign != 0 { - return Direction(detSign) - } - detSign = a.X.Sign() // dc.Z * db.Y - if detSign != 0 { - return Direction(detSign) - } - return CounterClockwise // dc.Z * db.Y * da.X -} - -// CompareDistances returns -1, 0, or +1 according to whether AX < BX, A == B, -// or AX > BX respectively. Distances are measured with respect to the positions -// of X, A, and B as though they were reprojected to lie exactly on the surface of -// the unit sphere. Furthermore, this method uses symbolic perturbations to -// ensure that the result is non-zero whenever A != B, even when AX == BX -// exactly, or even when A and B project to the same point on the sphere. -// Such results are guaranteed to be self-consistent, i.e. if AB < BC and -// BC < AC, then AB < AC. -func CompareDistances(x, a, b Point) int { - // We start by comparing distances using dot products (i.e., cosine of the - // angle), because (1) this is the cheapest technique, and (2) it is valid - // over the entire range of possible angles. (We can only use the sin^2 - // technique if both angles are less than 90 degrees or both angles are - // greater than 90 degrees.) - sign := triageCompareCosDistances(x, a, b) - if sign != 0 { - return sign - } - - // Optimization for (a == b) to avoid falling back to exact arithmetic. - if a == b { - return 0 - } - - // It is much better numerically to compare distances using cos(angle) if - // the distances are near 90 degrees and sin^2(angle) if the distances are - // near 0 or 180 degrees. We only need to check one of the two angles when - // making this decision because the fact that the test above failed means - // that angles "a" and "b" are very close together. - cosAX := a.Dot(x.Vector) - if cosAX > 1/math.Sqrt2 { - // Angles < 45 degrees. - sign = triageCompareSin2Distances(x, a, b) - } else if cosAX < -1/math.Sqrt2 { - // Angles > 135 degrees. sin^2(angle) is decreasing in this range. - sign = -triageCompareSin2Distances(x, a, b) - } - // C++ adds an additional check here using 80-bit floats. - // This is skipped in Go because we only have 32 and 64 bit floats. - - if sign != 0 { - return sign - } - - sign = exactCompareDistances(r3.PreciseVectorFromVector(x.Vector), r3.PreciseVectorFromVector(a.Vector), r3.PreciseVectorFromVector(b.Vector)) - if sign != 0 { - return sign - } - return symbolicCompareDistances(x, a, b) -} - -// cosDistance returns cos(XY) where XY is the angle between X and Y, and the -// maximum error amount in the result. This requires X and Y be normalized. -func cosDistance(x, y Point) (cos, err float64) { - cos = x.Dot(y.Vector) - return cos, 9.5*dblError*math.Abs(cos) + 1.5*dblError -} - -// sin2Distance returns sin**2(XY), where XY is the angle between X and Y, -// and the maximum error amount in the result. This requires X and Y be normalized. -func sin2Distance(x, y Point) (sin2, err float64) { - // The (x-y).Cross(x+y) trick eliminates almost all of error due to x - // and y being not quite unit length. This method is extremely accurate - // for small distances; the *relative* error in the result is O(dblError) for - // distances as small as dblError. - n := x.Sub(y.Vector).Cross(x.Add(y.Vector)) - sin2 = 0.25 * n.Norm2() - err = ((21+4*math.Sqrt(3))*dblError*sin2 + - 32*math.Sqrt(3)*dblError*dblError*math.Sqrt(sin2) + - 768*dblError*dblError*dblError*dblError) - return sin2, err -} - -// triageCompareCosDistances returns -1, 0, or +1 according to whether AX < BX, -// A == B, or AX > BX by comparing the distances between them using cosDistance. -func triageCompareCosDistances(x, a, b Point) int { - cosAX, cosAXerror := cosDistance(a, x) - cosBX, cosBXerror := cosDistance(b, x) - diff := cosAX - cosBX - err := cosAXerror + cosBXerror - if diff > err { - return -1 - } - if diff < -err { - return 1 - } - return 0 -} - -// triageCompareSin2Distances returns -1, 0, or +1 according to whether AX < BX, -// A == B, or AX > BX by comparing the distances between them using sin2Distance. -func triageCompareSin2Distances(x, a, b Point) int { - sin2AX, sin2AXerror := sin2Distance(a, x) - sin2BX, sin2BXerror := sin2Distance(b, x) - diff := sin2AX - sin2BX - err := sin2AXerror + sin2BXerror - if diff > err { - return 1 - } - if diff < -err { - return -1 - } - return 0 -} - -// exactCompareDistances returns -1, 0, or 1 after comparing using the values as -// PreciseVectors. -func exactCompareDistances(x, a, b r3.PreciseVector) int { - // This code produces the same result as though all points were reprojected - // to lie exactly on the surface of the unit sphere. It is based on testing - // whether x.Dot(a.Normalize()) < x.Dot(b.Normalize()), reformulated - // so that it can be evaluated using exact arithmetic. - cosAX := x.Dot(a) - cosBX := x.Dot(b) - - // If the two values have different signs, we need to handle that case now - // before squaring them below. - aSign := cosAX.Sign() - bSign := cosBX.Sign() - if aSign != bSign { - // If cos(AX) > cos(BX), then AX < BX. - if aSign > bSign { - return -1 - } - return 1 - } - cosAX2 := newBigFloat().Mul(cosAX, cosAX) - cosBX2 := newBigFloat().Mul(cosBX, cosBX) - cmp := newBigFloat().Sub(cosBX2.Mul(cosBX2, a.Norm2()), cosAX2.Mul(cosAX2, b.Norm2())) - return aSign * cmp.Sign() -} - -// symbolicCompareDistances returns -1, 0, or +1 given three points such that AX == BX -// (exactly) according to whether AX < BX, AX == BX, or AX > BX after symbolic -// perturbations are taken into account. -func symbolicCompareDistances(x, a, b Point) int { - // Our symbolic perturbation strategy is based on the following model. - // Similar to "simulation of simplicity", we assign a perturbation to every - // point such that if A < B, then the symbolic perturbation for A is much, - // much larger than the symbolic perturbation for B. We imagine that - // rather than projecting every point to lie exactly on the unit sphere, - // instead each point is positioned on its own tiny pedestal that raises it - // just off the surface of the unit sphere. This means that the distance AX - // is actually the true distance AX plus the (symbolic) heights of the - // pedestals for A and X. The pedestals are infinitesmally thin, so they do - // not affect distance measurements except at the two endpoints. If several - // points project to exactly the same point on the unit sphere, we imagine - // that they are placed on separate pedestals placed close together, where - // the distance between pedestals is much, much less than the height of any - // pedestal. (There are a finite number of Points, and therefore a finite - // number of pedestals, so this is possible.) - // - // If A < B, then A is on a higher pedestal than B, and therefore AX > BX. - switch a.Cmp(b.Vector) { - case -1: - return 1 - case 1: - return -1 - default: - return 0 - } -} - -var ( - // ca45Degrees is a predefined ChordAngle representing (approximately) 45 degrees. - ca45Degrees = s1.ChordAngleFromSquaredLength(2 - math.Sqrt2) -) - -// CompareDistance returns -1, 0, or +1 according to whether the distance XY is -// respectively less than, equal to, or greater than the provided chord angle. Distances are measured -// with respect to the positions of all points as though they are projected to lie -// exactly on the surface of the unit sphere. -func CompareDistance(x, y Point, r s1.ChordAngle) int { - // As with CompareDistances, we start by comparing dot products because - // the sin^2 method is only valid when the distance XY and the limit "r" are - // both less than 90 degrees. - sign := triageCompareCosDistance(x, y, float64(r)) - if sign != 0 { - return sign - } - - // Unlike with CompareDistances, it's not worth using the sin^2 method - // when the distance limit is near 180 degrees because the ChordAngle - // representation itself has has a rounding error of up to 2e-8 radians for - // distances near 180 degrees. - if r < ca45Degrees { - sign = triageCompareSin2Distance(x, y, float64(r)) - if sign != 0 { - return sign - } - } - return exactCompareDistance(r3.PreciseVectorFromVector(x.Vector), r3.PreciseVectorFromVector(y.Vector), big.NewFloat(float64(r)).SetPrec(big.MaxPrec)) -} - -// triageCompareCosDistance returns -1, 0, or +1 according to whether the distance XY is -// less than, equal to, or greater than r2 respectively using cos distance. -func triageCompareCosDistance(x, y Point, r2 float64) int { - cosXY, cosXYError := cosDistance(x, y) - cosR := 1.0 - 0.5*r2 - cosRError := 2.0 * dblError * cosR - diff := cosXY - cosR - err := cosXYError + cosRError - if diff > err { - return -1 - } - if diff < -err { - return 1 - } - return 0 -} - -// triageCompareSin2Distance returns -1, 0, or +1 according to whether the distance XY is -// less than, equal to, or greater than r2 respectively using sin^2 distance. -func triageCompareSin2Distance(x, y Point, r2 float64) int { - // Only valid for distance limits < 90 degrees. - sin2XY, sin2XYError := sin2Distance(x, y) - sin2R := r2 * (1.0 - 0.25*r2) - sin2RError := 3.0 * dblError * sin2R - diff := sin2XY - sin2R - err := sin2XYError + sin2RError - if diff > err { - return 1 - } - if diff < -err { - return -1 - } - return 0 -} - -var ( - bigOne = big.NewFloat(1.0).SetPrec(big.MaxPrec) - bigHalf = big.NewFloat(0.5).SetPrec(big.MaxPrec) -) - -// exactCompareDistance returns -1, 0, or +1 after comparing using PreciseVectors. -func exactCompareDistance(x, y r3.PreciseVector, r2 *big.Float) int { - // This code produces the same result as though all points were reprojected - // to lie exactly on the surface of the unit sphere. It is based on - // comparing the cosine of the angle XY (when both points are projected to - // lie exactly on the sphere) to the given threshold. - cosXY := x.Dot(y) - cosR := newBigFloat().Sub(bigOne, newBigFloat().Mul(bigHalf, r2)) - - // If the two values have different signs, we need to handle that case now - // before squaring them below. - xySign := cosXY.Sign() - rSign := cosR.Sign() - if xySign != rSign { - if xySign > rSign { - return -1 - } - return 1 // If cos(XY) > cos(r), then XY < r. - } - cmp := newBigFloat().Sub( - newBigFloat().Mul( - newBigFloat().Mul(cosR, cosR), newBigFloat().Mul(x.Norm2(), y.Norm2())), - newBigFloat().Mul(cosXY, cosXY)) - return xySign * cmp.Sign() -} - -// TODO(roberts): Differences from C++ -// CompareEdgeDistance -// CompareEdgeDirections -// EdgeCircumcenterSign -// GetVoronoiSiteExclusion -// GetClosestVertex -// TriageCompareLineSin2Distance -// TriageCompareLineCos2Distance -// TriageCompareLineDistance -// TriageCompareEdgeDistance -// ExactCompareLineDistance -// ExactCompareEdgeDistance -// TriageCompareEdgeDirections -// ExactCompareEdgeDirections -// ArePointsAntipodal -// ArePointsLinearlyDependent -// GetCircumcenter -// TriageEdgeCircumcenterSign -// ExactEdgeCircumcenterSign -// UnperturbedSign -// SymbolicEdgeCircumcenterSign -// ExactVoronoiSiteExclusion diff --git a/vendor/github.com/golang/geo/s2/projections.go b/vendor/github.com/golang/geo/s2/projections.go deleted file mode 100644 index f7273609c..000000000 --- a/vendor/github.com/golang/geo/s2/projections.go +++ /dev/null @@ -1,241 +0,0 @@ -// Copyright 2018 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package s2 - -import ( - "math" - - "github.com/golang/geo/r2" - "github.com/golang/geo/s1" -) - -// Projection defines an interface for different ways of mapping between s2 and r2 Points. -// It can also define the coordinate wrapping behavior along each axis. -type Projection interface { - // Project converts a point on the sphere to a projected 2D point. - Project(p Point) r2.Point - - // Unproject converts a projected 2D point to a point on the sphere. - // - // If wrapping is defined for a given axis (see below), then this method - // should accept any real number for the corresponding coordinate. - Unproject(p r2.Point) Point - - // FromLatLng is a convenience function equivalent to Project(LatLngToPoint(ll)), - // but the implementation is more efficient. - FromLatLng(ll LatLng) r2.Point - - // ToLatLng is a convenience function equivalent to LatLngFromPoint(Unproject(p)), - // but the implementation is more efficient. - ToLatLng(p r2.Point) LatLng - - // Interpolate returns the point obtained by interpolating the given - // fraction of the distance along the line from A to B. - // Fractions < 0 or > 1 result in extrapolation instead. - Interpolate(f float64, a, b r2.Point) r2.Point - - // WrapDistance reports the coordinate wrapping distance along each axis. - // If this value is non-zero for a given axis, the coordinates are assumed - // to "wrap" with the given period. For example, if WrapDistance.Y == 360 - // then (x, y) and (x, y + 360) should map to the same Point. - // - // This information is used to ensure that edges takes the shortest path - // between two given points. For example, if coordinates represent - // (latitude, longitude) pairs in degrees and WrapDistance().Y == 360, - // then the edge (5:179, 5:-179) would be interpreted as spanning 2 degrees - // of longitude rather than 358 degrees. - // - // If a given axis does not wrap, its WrapDistance should be set to zero. - WrapDistance() r2.Point - - // WrapDestination that wraps the coordinates of B if necessary in order to - // obtain the shortest edge AB. For example, suppose that A = [170, 20], - // B = [-170, 20], and the projection wraps so that [x, y] == [x + 360, y]. - // Then this function would return [190, 20] for point B (reducing the edge - // length in the "x" direction from 340 to 20). - WrapDestination(a, b r2.Point) r2.Point - - // We do not support implementations of this interface outside this package. - privateInterface() -} - -// PlateCarreeProjection defines the "plate carree" (square plate) projection, -// which converts points on the sphere to (longitude, latitude) pairs. -// Coordinates can be scaled so that they represent radians, degrees, etc, but -// the projection is always centered around (latitude=0, longitude=0). -// -// Note that (x, y) coordinates are backwards compared to the usual (latitude, -// longitude) ordering, in order to match the usual convention for graphs in -// which "x" is horizontal and "y" is vertical. -type PlateCarreeProjection struct { - xWrap float64 - toRadians float64 // Multiplier to convert coordinates to radians. - fromRadians float64 // Multiplier to convert coordinates from radians. -} - -// NewPlateCarreeProjection constructs a plate carree projection where the -// x-coordinates (lng) span [-xScale, xScale] and the y coordinates (lat) -// span [-xScale/2, xScale/2]. For example if xScale==180 then the x -// range is [-180, 180] and the y range is [-90, 90]. -// -// By default coordinates are expressed in radians, i.e. the x range is -// [-Pi, Pi] and the y range is [-Pi/2, Pi/2]. -func NewPlateCarreeProjection(xScale float64) Projection { - return &PlateCarreeProjection{ - xWrap: 2 * xScale, - toRadians: math.Pi / xScale, - fromRadians: xScale / math.Pi, - } -} - -// Project converts a point on the sphere to a projected 2D point. -func (p *PlateCarreeProjection) Project(pt Point) r2.Point { - return p.FromLatLng(LatLngFromPoint(pt)) -} - -// Unproject converts a projected 2D point to a point on the sphere. -func (p *PlateCarreeProjection) Unproject(pt r2.Point) Point { - return PointFromLatLng(p.ToLatLng(pt)) -} - -// FromLatLng returns the LatLng projected into an R2 Point. -func (p *PlateCarreeProjection) FromLatLng(ll LatLng) r2.Point { - return r2.Point{ - X: p.fromRadians * ll.Lng.Radians(), - Y: p.fromRadians * ll.Lat.Radians(), - } -} - -// ToLatLng returns the LatLng projected from the given R2 Point. -func (p *PlateCarreeProjection) ToLatLng(pt r2.Point) LatLng { - return LatLng{ - Lat: s1.Angle(p.toRadians * pt.Y), - Lng: s1.Angle(p.toRadians * math.Remainder(pt.X, p.xWrap)), - } -} - -// Interpolate returns the point obtained by interpolating the given -// fraction of the distance along the line from A to B. -func (p *PlateCarreeProjection) Interpolate(f float64, a, b r2.Point) r2.Point { - return a.Mul(1 - f).Add(b.Mul(f)) -} - -// WrapDistance reports the coordinate wrapping distance along each axis. -func (p *PlateCarreeProjection) WrapDistance() r2.Point { - return r2.Point{p.xWrap, 0} -} - -// WrapDestination wraps the points if needed to get the shortest edge. -func (p *PlateCarreeProjection) WrapDestination(a, b r2.Point) r2.Point { - return wrapDestination(a, b, p.WrapDistance) -} - -func (p *PlateCarreeProjection) privateInterface() {} - -// MercatorProjection defines the spherical Mercator projection. Google Maps -// uses this projection together with WGS84 coordinates, in which case it is -// known as the "Web Mercator" projection (see Wikipedia). This class makes -// no assumptions regarding the coordinate system of its input points, but -// simply applies the spherical Mercator projection to them. -// -// The Mercator projection is finite in width (x) but infinite in height (y). -// "x" corresponds to longitude, and spans a finite range such as [-180, 180] -// (with coordinate wrapping), while "y" is a function of latitude and spans -// an infinite range. (As "y" coordinates get larger, points get closer to -// the north pole but never quite reach it.) The north and south poles have -// infinite "y" values. (Note that this will cause problems if you tessellate -// a Mercator edge where one endpoint is a pole. If you need to do this, clip -// the edge first so that the "y" coordinate is no more than about 5 * maxX.) -type MercatorProjection struct { - xWrap float64 - toRadians float64 // Multiplier to convert coordinates to radians. - fromRadians float64 // Multiplier to convert coordinates from radians. -} - -// NewMercatorProjection constructs a Mercator projection with the given maximum -// longitude axis value corresponding to a range of [-maxLng, maxLng]. -// The horizontal and vertical axes are scaled equally. -func NewMercatorProjection(maxLng float64) Projection { - return &MercatorProjection{ - xWrap: 2 * maxLng, - toRadians: math.Pi / maxLng, - fromRadians: maxLng / math.Pi, - } -} - -// Project converts a point on the sphere to a projected 2D point. -func (p *MercatorProjection) Project(pt Point) r2.Point { - return p.FromLatLng(LatLngFromPoint(pt)) -} - -// Unproject converts a projected 2D point to a point on the sphere. -func (p *MercatorProjection) Unproject(pt r2.Point) Point { - return PointFromLatLng(p.ToLatLng(pt)) -} - -// FromLatLng returns the LatLng projected into an R2 Point. -func (p *MercatorProjection) FromLatLng(ll LatLng) r2.Point { - // This formula is more accurate near zero than the log(tan()) version. - // Note that latitudes of +/- 90 degrees yield "y" values of +/- infinity. - sinPhi := math.Sin(float64(ll.Lat)) - y := 0.5 * math.Log((1+sinPhi)/(1-sinPhi)) - return r2.Point{p.fromRadians * float64(ll.Lng), p.fromRadians * y} -} - -// ToLatLng returns the LatLng projected from the given R2 Point. -func (p *MercatorProjection) ToLatLng(pt r2.Point) LatLng { - // This formula is more accurate near zero than the atan(exp()) version. - x := p.toRadians * math.Remainder(pt.X, p.xWrap) - k := math.Exp(2 * p.toRadians * pt.Y) - var y float64 - if math.IsInf(k, 0) { - y = math.Pi / 2 - } else { - y = math.Asin((k - 1) / (k + 1)) - } - return LatLng{s1.Angle(y), s1.Angle(x)} -} - -// Interpolate returns the point obtained by interpolating the given -// fraction of the distance along the line from A to B. -func (p *MercatorProjection) Interpolate(f float64, a, b r2.Point) r2.Point { - return a.Mul(1 - f).Add(b.Mul(f)) -} - -// WrapDistance reports the coordinate wrapping distance along each axis. -func (p *MercatorProjection) WrapDistance() r2.Point { - return r2.Point{p.xWrap, 0} -} - -// WrapDestination wraps the points if needed to get the shortest edge. -func (p *MercatorProjection) WrapDestination(a, b r2.Point) r2.Point { - return wrapDestination(a, b, p.WrapDistance) -} - -func (p *MercatorProjection) privateInterface() {} - -func wrapDestination(a, b r2.Point, wrapDistance func() r2.Point) r2.Point { - wrap := wrapDistance() - x := b.X - y := b.Y - // The code below ensures that "b" is unmodified unless wrapping is required. - if wrap.X > 0 && math.Abs(x-a.X) > 0.5*wrap.X { - x = a.X + math.Remainder(x-a.X, wrap.X) - } - if wrap.Y > 0 && math.Abs(y-a.Y) > 0.5*wrap.Y { - y = a.Y + math.Remainder(y-a.Y, wrap.Y) - } - return r2.Point{x, y} -} diff --git a/vendor/github.com/golang/geo/s2/query_entry.go b/vendor/github.com/golang/geo/s2/query_entry.go deleted file mode 100644 index 65e819e3a..000000000 --- a/vendor/github.com/golang/geo/s2/query_entry.go +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2020 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package s2 - -import "container/heap" - -// A queryQueueEntry stores CellIDs and distance from a target. It is used by the -// different S2 Query types to efficiently build their internal priority queue -// in the optimized algorithm implementations. -type queryQueueEntry struct { - // A lower bound on the distance from the target to ID. This is the key - // of the priority queue. - distance distance - - // The cell being queued. - id CellID - - // If the CellID belongs to a ShapeIndex, this field stores the - // corresponding ShapeIndexCell. Otherwise ID is a proper ancestor of - // one or more ShapeIndexCells and this field stores is nil. - indexCell *ShapeIndexCell -} - -// queryQueue is used by the optimized algorithm to maintain a priority queue of -// unprocessed CellIDs, sorted in increasing order of distance from the target. -type queryQueue struct { - queue queryPQ -} - -// newQueryQueue returns a new initialized queryQueue. -func newQueryQueue() *queryQueue { - q := &queryQueue{ - queue: make(queryPQ, 0), - } - heap.Init(&q.queue) - return q -} - -// push adds the given entry to the top of this queue. -func (q *queryQueue) push(e *queryQueueEntry) { - heap.Push(&q.queue, e) -} - -// pop returns the top element of this queue. -func (q *queryQueue) pop() *queryQueueEntry { - return heap.Pop(&q.queue).(*queryQueueEntry) -} - -func (q *queryQueue) size() int { - return q.queue.Len() -} - -func (q *queryQueue) reset() { - q.queue = q.queue[:0] -} - -// queryPQ is a priority queue that implements the heap interface. -type queryPQ []*queryQueueEntry - -func (q queryPQ) Len() int { return len(q) } -func (q queryPQ) Less(i, j int) bool { - return q[i].distance.less(q[j].distance) -} - -// Swap swaps the two entries. -func (q queryPQ) Swap(i, j int) { - q[i], q[j] = q[j], q[i] -} - -// Push adds the given entry to the queue. -func (q *queryPQ) Push(x interface{}) { - item := x.(*queryQueueEntry) - *q = append(*q, item) -} - -// Pop returns the top element of the queue. -func (q *queryPQ) Pop() interface{} { - item := (*q)[len(*q)-1] - *q = (*q)[:len(*q)-1] - return item -} diff --git a/vendor/github.com/golang/geo/s2/query_options.go b/vendor/github.com/golang/geo/s2/query_options.go deleted file mode 100644 index 9b7e38d62..000000000 --- a/vendor/github.com/golang/geo/s2/query_options.go +++ /dev/null @@ -1,196 +0,0 @@ -// Copyright 2019 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package s2 - -import ( - "math" - - "github.com/golang/geo/s1" -) - -const maxQueryResults = math.MaxInt32 - -// queryOptions represents the set of all configurable parameters used by all of -// the Query types. Most of these fields have non-zero defaults, so initialization -// is handled within each Query type. All of the exported methods accept user -// supplied sets of options to set or adjust as necessary. -// -// Several of the defaults depend on the distance interface type being used -// (e.g. minDistance, maxDistance, etc.) -// -// If a user sets an option value that a given query type doesn't use, it is ignored. -type queryOptions struct { - // maxResults specifies that at most MaxResults edges should be returned. - // This must be at least 1. - // - // The default value is to return all results. - maxResults int - - // distanceLimit specifies that only edges whose distance to the target is - // within this distance should be returned. - // - // Note that edges whose distance is exactly equal to this are - // not returned. In most cases this doesn't matter (since distances are - // not computed exactly in the first place), but if such edges are needed - // then you can retrieve them by specifying the distance as the next - // largest representable distance. i.e. distanceLimit.Successor(). - // - // The default value is the infinity value, such that all results will be - // returned. - distanceLimit s1.ChordAngle - - // maxError specifies that edges up to MaxError further away than the true - // closest edges may be substituted in the result set, as long as such - // edges satisfy all the remaining search criteria (such as DistanceLimit). - // This option only has an effect if MaxResults is also specified; - // otherwise all edges closer than MaxDistance will always be returned. - // - // Note that this does not affect how the distance between edges is - // computed; it simply gives the algorithm permission to stop the search - // early as soon as the best possible improvement drops below MaxError. - // - // This can be used to implement distance predicates efficiently. For - // example, to determine whether the minimum distance is less than D, set - // MaxResults == 1 and MaxDistance == MaxError == D. This causes - // the algorithm to terminate as soon as it finds any edge whose distance - // is less than D, rather than continuing to search for an edge that is - // even closer. - // - // The default value is zero. - maxError s1.ChordAngle - - // includeInteriors specifies that polygon interiors should be included - // when measuring distances. In other words, polygons that contain the target - // should have a distance of zero. (For targets consisting of multiple connected - // components, the distance is zero if any component is contained.) This - // is indicated in the results by returning a (ShapeID, EdgeID) pair - // with EdgeID == -1, i.e. this value denotes the polygons's interior. - // - // Note that for efficiency, any polygon that intersects the target may or - // may not have an EdgeID == -1 result. Such results are optional - // because in that case the distance to the polygon is already zero. - // - // The default value is true. - includeInteriors bool - - // specifies that distances should be computed by examining every edge - // rather than using the ShapeIndex. - // - // TODO(roberts): When optimized is implemented, update the default to false. - // The default value is true. - useBruteForce bool - - // region specifies that results must intersect the given Region. - // - // Note that if you want to set the region to a disc around a target - // point, it is faster to use a PointTarget with distanceLimit set - // instead. You can also set a distance limit and also require that results - // lie within a given rectangle. - // - // The default is nil (no region limits). - region Region -} - -// UseBruteForce sets or disables the use of brute force in a query. -func (q *queryOptions) UseBruteForce(x bool) *queryOptions { - q.useBruteForce = x - return q -} - -// IncludeInteriors specifies whether polygon interiors should be -// included when measuring distances. -func (q *queryOptions) IncludeInteriors(x bool) *queryOptions { - q.includeInteriors = x - return q -} - -// MaxError specifies that edges up to dist away than the true -// matching edges may be substituted in the result set, as long as such -// edges satisfy all the remaining search criteria (such as DistanceLimit). -// This option only has an effect if MaxResults is also specified; -// otherwise all edges closer than MaxDistance will always be returned. -func (q *queryOptions) MaxError(x s1.ChordAngle) *queryOptions { - q.maxError = x - return q -} - -// MaxResults specifies that at most MaxResults edges should be returned. -// This must be at least 1. -func (q *queryOptions) MaxResults(x int) *queryOptions { - // TODO(roberts): What should be done if the value is <= 0? - q.maxResults = int(x) - return q -} - -// DistanceLimit specifies that only edges whose distance to the target is -// within, this distance should be returned. Edges whose distance is equal -// are not returned. -// -// To include values that are equal, specify the limit with the next largest -// representable distance such as limit.Successor(), or set the option with -// Furthest/ClosestInclusiveDistanceLimit. -func (q *queryOptions) DistanceLimit(x s1.ChordAngle) *queryOptions { - q.distanceLimit = x - return q -} - -// ClosestInclusiveDistanceLimit sets the distance limit such that results whose -// distance is exactly equal to the limit are also returned. -func (q *queryOptions) ClosestInclusiveDistanceLimit(limit s1.ChordAngle) *queryOptions { - q.distanceLimit = limit.Successor() - return q -} - -// FurthestInclusiveDistanceLimit sets the distance limit such that results whose -// distance is exactly equal to the limit are also returned. -func (q *queryOptions) FurthestInclusiveDistanceLimit(limit s1.ChordAngle) *queryOptions { - q.distanceLimit = limit.Predecessor() - return q -} - -// ClosestConservativeDistanceLimit sets the distance limit such that results -// also incorporates the error in distance calculations. This ensures that all -// edges whose true distance is less than or equal to limit will be returned -// (along with some edges whose true distance is slightly greater). -// -// Algorithms that need to do exact distance comparisons can use this -// option to find a set of candidate edges that can then be filtered -// further (e.g., using CompareDistance). -func (q *queryOptions) ClosestConservativeDistanceLimit(limit s1.ChordAngle) *queryOptions { - q.distanceLimit = limit.Expanded(minUpdateDistanceMaxError(limit)) - return q -} - -// FurthestConservativeDistanceLimit sets the distance limit such that results -// also incorporates the error in distance calculations. This ensures that all -// edges whose true distance is greater than or equal to limit will be returned -// (along with some edges whose true distance is slightly less). -func (q *queryOptions) FurthestConservativeDistanceLimit(limit s1.ChordAngle) *queryOptions { - q.distanceLimit = limit.Expanded(-minUpdateDistanceMaxError(limit)) - return q -} - -// newQueryOptions returns a set of options using the given distance type -// with the proper default values. -func newQueryOptions(d distance) *queryOptions { - return &queryOptions{ - maxResults: maxQueryResults, - distanceLimit: d.infinity().chordAngle(), - maxError: 0, - includeInteriors: true, - useBruteForce: false, - region: nil, - } -} diff --git a/vendor/github.com/golang/geo/s2/rect.go b/vendor/github.com/golang/geo/s2/rect.go deleted file mode 100644 index f6b52a59e..000000000 --- a/vendor/github.com/golang/geo/s2/rect.go +++ /dev/null @@ -1,710 +0,0 @@ -// Copyright 2014 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package s2 - -import ( - "fmt" - "io" - "math" - - "github.com/golang/geo/r1" - "github.com/golang/geo/r3" - "github.com/golang/geo/s1" -) - -// Rect represents a closed latitude-longitude rectangle. -type Rect struct { - Lat r1.Interval - Lng s1.Interval -} - -var ( - validRectLatRange = r1.Interval{-math.Pi / 2, math.Pi / 2} - validRectLngRange = s1.FullInterval() -) - -// EmptyRect returns the empty rectangle. -func EmptyRect() Rect { return Rect{r1.EmptyInterval(), s1.EmptyInterval()} } - -// FullRect returns the full rectangle. -func FullRect() Rect { return Rect{validRectLatRange, validRectLngRange} } - -// RectFromLatLng constructs a rectangle containing a single point p. -func RectFromLatLng(p LatLng) Rect { - return Rect{ - Lat: r1.Interval{p.Lat.Radians(), p.Lat.Radians()}, - Lng: s1.Interval{p.Lng.Radians(), p.Lng.Radians()}, - } -} - -// RectFromCenterSize constructs a rectangle with the given size and center. -// center needs to be normalized, but size does not. The latitude -// interval of the result is clamped to [-90,90] degrees, and the longitude -// interval of the result is FullRect() if and only if the longitude size is -// 360 degrees or more. -// -// Examples of clamping (in degrees): -// center=(80,170), size=(40,60) -> lat=[60,90], lng=[140,-160] -// center=(10,40), size=(210,400) -> lat=[-90,90], lng=[-180,180] -// center=(-90,180), size=(20,50) -> lat=[-90,-80], lng=[155,-155] -func RectFromCenterSize(center, size LatLng) Rect { - half := LatLng{size.Lat / 2, size.Lng / 2} - return RectFromLatLng(center).expanded(half) -} - -// IsValid returns true iff the rectangle is valid. -// This requires Lat ⊆ [-π/2,π/2] and Lng ⊆ [-π,π], and Lat = ∅ iff Lng = ∅ -func (r Rect) IsValid() bool { - return math.Abs(r.Lat.Lo) <= math.Pi/2 && - math.Abs(r.Lat.Hi) <= math.Pi/2 && - r.Lng.IsValid() && - r.Lat.IsEmpty() == r.Lng.IsEmpty() -} - -// IsEmpty reports whether the rectangle is empty. -func (r Rect) IsEmpty() bool { return r.Lat.IsEmpty() } - -// IsFull reports whether the rectangle is full. -func (r Rect) IsFull() bool { return r.Lat.Equal(validRectLatRange) && r.Lng.IsFull() } - -// IsPoint reports whether the rectangle is a single point. -func (r Rect) IsPoint() bool { return r.Lat.Lo == r.Lat.Hi && r.Lng.Lo == r.Lng.Hi } - -// Vertex returns the i-th vertex of the rectangle (i = 0,1,2,3) in CCW order -// (lower left, lower right, upper right, upper left). -func (r Rect) Vertex(i int) LatLng { - var lat, lng float64 - - switch i { - case 0: - lat = r.Lat.Lo - lng = r.Lng.Lo - case 1: - lat = r.Lat.Lo - lng = r.Lng.Hi - case 2: - lat = r.Lat.Hi - lng = r.Lng.Hi - case 3: - lat = r.Lat.Hi - lng = r.Lng.Lo - } - return LatLng{s1.Angle(lat) * s1.Radian, s1.Angle(lng) * s1.Radian} -} - -// Lo returns one corner of the rectangle. -func (r Rect) Lo() LatLng { - return LatLng{s1.Angle(r.Lat.Lo) * s1.Radian, s1.Angle(r.Lng.Lo) * s1.Radian} -} - -// Hi returns the other corner of the rectangle. -func (r Rect) Hi() LatLng { - return LatLng{s1.Angle(r.Lat.Hi) * s1.Radian, s1.Angle(r.Lng.Hi) * s1.Radian} -} - -// Center returns the center of the rectangle. -func (r Rect) Center() LatLng { - return LatLng{s1.Angle(r.Lat.Center()) * s1.Radian, s1.Angle(r.Lng.Center()) * s1.Radian} -} - -// Size returns the size of the Rect. -func (r Rect) Size() LatLng { - return LatLng{s1.Angle(r.Lat.Length()) * s1.Radian, s1.Angle(r.Lng.Length()) * s1.Radian} -} - -// Area returns the surface area of the Rect. -func (r Rect) Area() float64 { - if r.IsEmpty() { - return 0 - } - capDiff := math.Abs(math.Sin(r.Lat.Hi) - math.Sin(r.Lat.Lo)) - return r.Lng.Length() * capDiff -} - -// AddPoint increases the size of the rectangle to include the given point. -func (r Rect) AddPoint(ll LatLng) Rect { - if !ll.IsValid() { - return r - } - return Rect{ - Lat: r.Lat.AddPoint(ll.Lat.Radians()), - Lng: r.Lng.AddPoint(ll.Lng.Radians()), - } -} - -// expanded returns a rectangle that has been expanded by margin.Lat on each side -// in the latitude direction, and by margin.Lng on each side in the longitude -// direction. If either margin is negative, then it shrinks the rectangle on -// the corresponding sides instead. The resulting rectangle may be empty. -// -// The latitude-longitude space has the topology of a cylinder. Longitudes -// "wrap around" at +/-180 degrees, while latitudes are clamped to range [-90, 90]. -// This means that any expansion (positive or negative) of the full longitude range -// remains full (since the "rectangle" is actually a continuous band around the -// cylinder), while expansion of the full latitude range remains full only if the -// margin is positive. -// -// If either the latitude or longitude interval becomes empty after -// expansion by a negative margin, the result is empty. -// -// Note that if an expanded rectangle contains a pole, it may not contain -// all possible lat/lng representations of that pole, e.g., both points [π/2,0] -// and [π/2,1] represent the same pole, but they might not be contained by the -// same Rect. -// -// If you are trying to grow a rectangle by a certain distance on the -// sphere (e.g. 5km), refer to the ExpandedByDistance() C++ method implementation -// instead. -func (r Rect) expanded(margin LatLng) Rect { - lat := r.Lat.Expanded(margin.Lat.Radians()) - lng := r.Lng.Expanded(margin.Lng.Radians()) - - if lat.IsEmpty() || lng.IsEmpty() { - return EmptyRect() - } - - return Rect{ - Lat: lat.Intersection(validRectLatRange), - Lng: lng, - } -} - -func (r Rect) String() string { return fmt.Sprintf("[Lo%v, Hi%v]", r.Lo(), r.Hi()) } - -// PolarClosure returns the rectangle unmodified if it does not include either pole. -// If it includes either pole, PolarClosure returns an expansion of the rectangle along -// the longitudinal range to include all possible representations of the contained poles. -func (r Rect) PolarClosure() Rect { - if r.Lat.Lo == -math.Pi/2 || r.Lat.Hi == math.Pi/2 { - return Rect{r.Lat, s1.FullInterval()} - } - return r -} - -// Union returns the smallest Rect containing the union of this rectangle and the given rectangle. -func (r Rect) Union(other Rect) Rect { - return Rect{ - Lat: r.Lat.Union(other.Lat), - Lng: r.Lng.Union(other.Lng), - } -} - -// Intersection returns the smallest rectangle containing the intersection of -// this rectangle and the given rectangle. Note that the region of intersection -// may consist of two disjoint rectangles, in which case a single rectangle -// spanning both of them is returned. -func (r Rect) Intersection(other Rect) Rect { - lat := r.Lat.Intersection(other.Lat) - lng := r.Lng.Intersection(other.Lng) - - if lat.IsEmpty() || lng.IsEmpty() { - return EmptyRect() - } - return Rect{lat, lng} -} - -// Intersects reports whether this rectangle and the other have any points in common. -func (r Rect) Intersects(other Rect) bool { - return r.Lat.Intersects(other.Lat) && r.Lng.Intersects(other.Lng) -} - -// CapBound returns a cap that contains Rect. -func (r Rect) CapBound() Cap { - // We consider two possible bounding caps, one whose axis passes - // through the center of the lat-long rectangle and one whose axis - // is the north or south pole. We return the smaller of the two caps. - - if r.IsEmpty() { - return EmptyCap() - } - - var poleZ, poleAngle float64 - if r.Lat.Hi+r.Lat.Lo < 0 { - // South pole axis yields smaller cap. - poleZ = -1 - poleAngle = math.Pi/2 + r.Lat.Hi - } else { - poleZ = 1 - poleAngle = math.Pi/2 - r.Lat.Lo - } - poleCap := CapFromCenterAngle(Point{r3.Vector{0, 0, poleZ}}, s1.Angle(poleAngle)*s1.Radian) - - // For bounding rectangles that span 180 degrees or less in longitude, the - // maximum cap size is achieved at one of the rectangle vertices. For - // rectangles that are larger than 180 degrees, we punt and always return a - // bounding cap centered at one of the two poles. - if math.Remainder(r.Lng.Hi-r.Lng.Lo, 2*math.Pi) >= 0 && r.Lng.Hi-r.Lng.Lo < 2*math.Pi { - midCap := CapFromPoint(PointFromLatLng(r.Center())).AddPoint(PointFromLatLng(r.Lo())).AddPoint(PointFromLatLng(r.Hi())) - if midCap.Height() < poleCap.Height() { - return midCap - } - } - return poleCap -} - -// RectBound returns itself. -func (r Rect) RectBound() Rect { - return r -} - -// Contains reports whether this Rect contains the other Rect. -func (r Rect) Contains(other Rect) bool { - return r.Lat.ContainsInterval(other.Lat) && r.Lng.ContainsInterval(other.Lng) -} - -// ContainsCell reports whether the given Cell is contained by this Rect. -func (r Rect) ContainsCell(c Cell) bool { - // A latitude-longitude rectangle contains a cell if and only if it contains - // the cell's bounding rectangle. This test is exact from a mathematical - // point of view, assuming that the bounds returned by Cell.RectBound() - // are tight. However, note that there can be a loss of precision when - // converting between representations -- for example, if an s2.Cell is - // converted to a polygon, the polygon's bounding rectangle may not contain - // the cell's bounding rectangle. This has some slightly unexpected side - // effects; for instance, if one creates an s2.Polygon from an s2.Cell, the - // polygon will contain the cell, but the polygon's bounding box will not. - return r.Contains(c.RectBound()) -} - -// ContainsLatLng reports whether the given LatLng is within the Rect. -func (r Rect) ContainsLatLng(ll LatLng) bool { - if !ll.IsValid() { - return false - } - return r.Lat.Contains(ll.Lat.Radians()) && r.Lng.Contains(ll.Lng.Radians()) -} - -// ContainsPoint reports whether the given Point is within the Rect. -func (r Rect) ContainsPoint(p Point) bool { - return r.ContainsLatLng(LatLngFromPoint(p)) -} - -// CellUnionBound computes a covering of the Rect. -func (r Rect) CellUnionBound() []CellID { - return r.CapBound().CellUnionBound() -} - -// intersectsLatEdge reports whether the edge AB intersects the given edge of constant -// latitude. Requires the points to have unit length. -func intersectsLatEdge(a, b Point, lat s1.Angle, lng s1.Interval) bool { - // Unfortunately, lines of constant latitude are curves on - // the sphere. They can intersect a straight edge in 0, 1, or 2 points. - - // First, compute the normal to the plane AB that points vaguely north. - z := Point{a.PointCross(b).Normalize()} - if z.Z < 0 { - z = Point{z.Mul(-1)} - } - - // Extend this to an orthonormal frame (x,y,z) where x is the direction - // where the great circle through AB achieves its maximium latitude. - y := Point{z.PointCross(PointFromCoords(0, 0, 1)).Normalize()} - x := y.Cross(z.Vector) - - // Compute the angle "theta" from the x-axis (in the x-y plane defined - // above) where the great circle intersects the given line of latitude. - sinLat := math.Sin(float64(lat)) - if math.Abs(sinLat) >= x.Z { - // The great circle does not reach the given latitude. - return false - } - - cosTheta := sinLat / x.Z - sinTheta := math.Sqrt(1 - cosTheta*cosTheta) - theta := math.Atan2(sinTheta, cosTheta) - - // The candidate intersection points are located +/- theta in the x-y - // plane. For an intersection to be valid, we need to check that the - // intersection point is contained in the interior of the edge AB and - // also that it is contained within the given longitude interval "lng". - - // Compute the range of theta values spanned by the edge AB. - abTheta := s1.IntervalFromPointPair( - math.Atan2(a.Dot(y.Vector), a.Dot(x)), - math.Atan2(b.Dot(y.Vector), b.Dot(x))) - - if abTheta.Contains(theta) { - // Check if the intersection point is also in the given lng interval. - isect := x.Mul(cosTheta).Add(y.Mul(sinTheta)) - if lng.Contains(math.Atan2(isect.Y, isect.X)) { - return true - } - } - - if abTheta.Contains(-theta) { - // Check if the other intersection point is also in the given lng interval. - isect := x.Mul(cosTheta).Sub(y.Mul(sinTheta)) - if lng.Contains(math.Atan2(isect.Y, isect.X)) { - return true - } - } - return false -} - -// intersectsLngEdge reports whether the edge AB intersects the given edge of constant -// longitude. Requires the points to have unit length. -func intersectsLngEdge(a, b Point, lat r1.Interval, lng s1.Angle) bool { - // The nice thing about edges of constant longitude is that - // they are straight lines on the sphere (geodesics). - return CrossingSign(a, b, PointFromLatLng(LatLng{s1.Angle(lat.Lo), lng}), - PointFromLatLng(LatLng{s1.Angle(lat.Hi), lng})) == Cross -} - -// IntersectsCell reports whether this rectangle intersects the given cell. This is an -// exact test and may be fairly expensive. -func (r Rect) IntersectsCell(c Cell) bool { - // First we eliminate the cases where one region completely contains the - // other. Once these are disposed of, then the regions will intersect - // if and only if their boundaries intersect. - if r.IsEmpty() { - return false - } - if r.ContainsPoint(Point{c.id.rawPoint()}) { - return true - } - if c.ContainsPoint(PointFromLatLng(r.Center())) { - return true - } - - // Quick rejection test (not required for correctness). - if !r.Intersects(c.RectBound()) { - return false - } - - // Precompute the cell vertices as points and latitude-longitudes. We also - // check whether the Cell contains any corner of the rectangle, or - // vice-versa, since the edge-crossing tests only check the edge interiors. - vertices := [4]Point{} - latlngs := [4]LatLng{} - - for i := range vertices { - vertices[i] = c.Vertex(i) - latlngs[i] = LatLngFromPoint(vertices[i]) - if r.ContainsLatLng(latlngs[i]) { - return true - } - if c.ContainsPoint(PointFromLatLng(r.Vertex(i))) { - return true - } - } - - // Now check whether the boundaries intersect. Unfortunately, a - // latitude-longitude rectangle does not have straight edges: two edges - // are curved, and at least one of them is concave. - for i := range vertices { - edgeLng := s1.IntervalFromEndpoints(latlngs[i].Lng.Radians(), latlngs[(i+1)&3].Lng.Radians()) - if !r.Lng.Intersects(edgeLng) { - continue - } - - a := vertices[i] - b := vertices[(i+1)&3] - if edgeLng.Contains(r.Lng.Lo) && intersectsLngEdge(a, b, r.Lat, s1.Angle(r.Lng.Lo)) { - return true - } - if edgeLng.Contains(r.Lng.Hi) && intersectsLngEdge(a, b, r.Lat, s1.Angle(r.Lng.Hi)) { - return true - } - if intersectsLatEdge(a, b, s1.Angle(r.Lat.Lo), r.Lng) { - return true - } - if intersectsLatEdge(a, b, s1.Angle(r.Lat.Hi), r.Lng) { - return true - } - } - return false -} - -// Encode encodes the Rect. -func (r Rect) Encode(w io.Writer) error { - e := &encoder{w: w} - r.encode(e) - return e.err -} - -func (r Rect) encode(e *encoder) { - e.writeInt8(encodingVersion) - e.writeFloat64(r.Lat.Lo) - e.writeFloat64(r.Lat.Hi) - e.writeFloat64(r.Lng.Lo) - e.writeFloat64(r.Lng.Hi) -} - -// Decode decodes a rectangle. -func (r *Rect) Decode(rd io.Reader) error { - d := &decoder{r: asByteReader(rd)} - r.decode(d) - return d.err -} - -func (r *Rect) decode(d *decoder) { - if version := d.readUint8(); int8(version) != encodingVersion && d.err == nil { - d.err = fmt.Errorf("can't decode version %d; my version: %d", version, encodingVersion) - return - } - r.Lat.Lo = d.readFloat64() - r.Lat.Hi = d.readFloat64() - r.Lng.Lo = d.readFloat64() - r.Lng.Hi = d.readFloat64() - return -} - -// DistanceToLatLng returns the minimum distance (measured along the surface of the sphere) -// from a given point to the rectangle (both its boundary and its interior). -// If r is empty, the result is meaningless. -// The latlng must be valid. -func (r Rect) DistanceToLatLng(ll LatLng) s1.Angle { - if r.Lng.Contains(float64(ll.Lng)) { - return maxAngle(0, ll.Lat-s1.Angle(r.Lat.Hi), s1.Angle(r.Lat.Lo)-ll.Lat) - } - - i := s1.IntervalFromEndpoints(r.Lng.Hi, r.Lng.ComplementCenter()) - rectLng := r.Lng.Lo - if i.Contains(float64(ll.Lng)) { - rectLng = r.Lng.Hi - } - - lo := LatLng{s1.Angle(r.Lat.Lo) * s1.Radian, s1.Angle(rectLng) * s1.Radian} - hi := LatLng{s1.Angle(r.Lat.Hi) * s1.Radian, s1.Angle(rectLng) * s1.Radian} - return DistanceFromSegment(PointFromLatLng(ll), PointFromLatLng(lo), PointFromLatLng(hi)) -} - -// DirectedHausdorffDistance returns the directed Hausdorff distance (measured along the -// surface of the sphere) to the given Rect. The directed Hausdorff -// distance from rectangle A to rectangle B is given by -// h(A, B) = max_{p in A} min_{q in B} d(p, q). -func (r Rect) DirectedHausdorffDistance(other Rect) s1.Angle { - if r.IsEmpty() { - return 0 * s1.Radian - } - if other.IsEmpty() { - return math.Pi * s1.Radian - } - - lng := r.Lng.DirectedHausdorffDistance(other.Lng) - return directedHausdorffDistance(lng, r.Lat, other.Lat) -} - -// HausdorffDistance returns the undirected Hausdorff distance (measured along the -// surface of the sphere) to the given Rect. -// The Hausdorff distance between rectangle A and rectangle B is given by -// H(A, B) = max{h(A, B), h(B, A)}. -func (r Rect) HausdorffDistance(other Rect) s1.Angle { - return maxAngle(r.DirectedHausdorffDistance(other), - other.DirectedHausdorffDistance(r)) -} - -// ApproxEqual reports whether the latitude and longitude intervals of the two rectangles -// are the same up to a small tolerance. -func (r Rect) ApproxEqual(other Rect) bool { - return r.Lat.ApproxEqual(other.Lat) && r.Lng.ApproxEqual(other.Lng) -} - -// directedHausdorffDistance returns the directed Hausdorff distance -// from one longitudinal edge spanning latitude range 'a' to the other -// longitudinal edge spanning latitude range 'b', with their longitudinal -// difference given by 'lngDiff'. -func directedHausdorffDistance(lngDiff s1.Angle, a, b r1.Interval) s1.Angle { - // By symmetry, we can assume a's longitude is 0 and b's longitude is - // lngDiff. Call b's two endpoints bLo and bHi. Let H be the hemisphere - // containing a and delimited by the longitude line of b. The Voronoi diagram - // of b on H has three edges (portions of great circles) all orthogonal to b - // and meeting at bLo cross bHi. - // E1: (bLo, bLo cross bHi) - // E2: (bHi, bLo cross bHi) - // E3: (-bMid, bLo cross bHi), where bMid is the midpoint of b - // - // They subdivide H into three Voronoi regions. Depending on how longitude 0 - // (which contains edge a) intersects these regions, we distinguish two cases: - // Case 1: it intersects three regions. This occurs when lngDiff <= π/2. - // Case 2: it intersects only two regions. This occurs when lngDiff > π/2. - // - // In the first case, the directed Hausdorff distance to edge b can only be - // realized by the following points on a: - // A1: two endpoints of a. - // A2: intersection of a with the equator, if b also intersects the equator. - // - // In the second case, the directed Hausdorff distance to edge b can only be - // realized by the following points on a: - // B1: two endpoints of a. - // B2: intersection of a with E3 - // B3: farthest point from bLo to the interior of D, and farthest point from - // bHi to the interior of U, if any, where D (resp. U) is the portion - // of edge a below (resp. above) the intersection point from B2. - - if lngDiff < 0 { - panic("impossible: negative lngDiff") - } - if lngDiff > math.Pi { - panic("impossible: lngDiff > Pi") - } - - if lngDiff == 0 { - return s1.Angle(a.DirectedHausdorffDistance(b)) - } - - // Assumed longitude of b. - bLng := lngDiff - // Two endpoints of b. - bLo := PointFromLatLng(LatLng{s1.Angle(b.Lo), bLng}) - bHi := PointFromLatLng(LatLng{s1.Angle(b.Hi), bLng}) - - // Cases A1 and B1. - aLo := PointFromLatLng(LatLng{s1.Angle(a.Lo), 0}) - aHi := PointFromLatLng(LatLng{s1.Angle(a.Hi), 0}) - maxDistance := maxAngle( - DistanceFromSegment(aLo, bLo, bHi), - DistanceFromSegment(aHi, bLo, bHi)) - - if lngDiff <= math.Pi/2 { - // Case A2. - if a.Contains(0) && b.Contains(0) { - maxDistance = maxAngle(maxDistance, lngDiff) - } - return maxDistance - } - - // Case B2. - p := bisectorIntersection(b, bLng) - pLat := LatLngFromPoint(p).Lat - if a.Contains(float64(pLat)) { - maxDistance = maxAngle(maxDistance, p.Angle(bLo.Vector)) - } - - // Case B3. - if pLat > s1.Angle(a.Lo) { - intDist, ok := interiorMaxDistance(r1.Interval{a.Lo, math.Min(float64(pLat), a.Hi)}, bLo) - if ok { - maxDistance = maxAngle(maxDistance, intDist) - } - } - if pLat < s1.Angle(a.Hi) { - intDist, ok := interiorMaxDistance(r1.Interval{math.Max(float64(pLat), a.Lo), a.Hi}, bHi) - if ok { - maxDistance = maxAngle(maxDistance, intDist) - } - } - - return maxDistance -} - -// interiorMaxDistance returns the max distance from a point b to the segment spanning latitude range -// aLat on longitude 0 if the max occurs in the interior of aLat. Otherwise, returns (0, false). -func interiorMaxDistance(aLat r1.Interval, b Point) (a s1.Angle, ok bool) { - // Longitude 0 is in the y=0 plane. b.X >= 0 implies that the maximum - // does not occur in the interior of aLat. - if aLat.IsEmpty() || b.X >= 0 { - return 0, false - } - - // Project b to the y=0 plane. The antipodal of the normalized projection is - // the point at which the maxium distance from b occurs, if it is contained - // in aLat. - intersectionPoint := PointFromCoords(-b.X, 0, -b.Z) - if !aLat.InteriorContains(float64(LatLngFromPoint(intersectionPoint).Lat)) { - return 0, false - } - return b.Angle(intersectionPoint.Vector), true -} - -// bisectorIntersection return the intersection of longitude 0 with the bisector of an edge -// on longitude 'lng' and spanning latitude range 'lat'. -func bisectorIntersection(lat r1.Interval, lng s1.Angle) Point { - lng = s1.Angle(math.Abs(float64(lng))) - latCenter := s1.Angle(lat.Center()) - - // A vector orthogonal to the bisector of the given longitudinal edge. - orthoBisector := LatLng{latCenter - math.Pi/2, lng} - if latCenter < 0 { - orthoBisector = LatLng{-latCenter - math.Pi/2, lng - math.Pi} - } - - // A vector orthogonal to longitude 0. - orthoLng := Point{r3.Vector{0, -1, 0}} - - return orthoLng.PointCross(PointFromLatLng(orthoBisector)) -} - -// Centroid returns the true centroid of the given Rect multiplied by its -// surface area. The result is not unit length, so you may want to normalize it. -// Note that in general the centroid is *not* at the center of the rectangle, and -// in fact it may not even be contained by the rectangle. (It is the "center of -// mass" of the rectangle viewed as subset of the unit sphere, i.e. it is the -// point in space about which this curved shape would rotate.) -// -// The reason for multiplying the result by the rectangle area is to make it -// easier to compute the centroid of more complicated shapes. The centroid -// of a union of disjoint regions can be computed simply by adding their -// Centroid results. -func (r Rect) Centroid() Point { - // When a sphere is divided into slices of constant thickness by a set - // of parallel planes, all slices have the same surface area. This - // implies that the z-component of the centroid is simply the midpoint - // of the z-interval spanned by the Rect. - // - // Similarly, it is easy to see that the (x,y) of the centroid lies in - // the plane through the midpoint of the rectangle's longitude interval. - // We only need to determine the distance "d" of this point from the - // z-axis. - // - // Let's restrict our attention to a particular z-value. In this - // z-plane, the Rect is a circular arc. The centroid of this arc - // lies on a radial line through the midpoint of the arc, and at a - // distance from the z-axis of - // - // r * (sin(alpha) / alpha) - // - // where r = sqrt(1-z^2) is the radius of the arc, and "alpha" is half - // of the arc length (i.e., the arc covers longitudes [-alpha, alpha]). - // - // To find the centroid distance from the z-axis for the entire - // rectangle, we just need to integrate over the z-interval. This gives - // - // d = Integrate[sqrt(1-z^2)*sin(alpha)/alpha, z1..z2] / (z2 - z1) - // - // where [z1, z2] is the range of z-values covered by the rectangle. - // This simplifies to - // - // d = sin(alpha)/(2*alpha*(z2-z1))*(z2*r2 - z1*r1 + theta2 - theta1) - // - // where [theta1, theta2] is the latitude interval, z1=sin(theta1), - // z2=sin(theta2), r1=cos(theta1), and r2=cos(theta2). - // - // Finally, we want to return not the centroid itself, but the centroid - // scaled by the area of the rectangle. The area of the rectangle is - // - // A = 2 * alpha * (z2 - z1) - // - // which fortunately appears in the denominator of "d". - - if r.IsEmpty() { - return Point{} - } - - z1 := math.Sin(r.Lat.Lo) - z2 := math.Sin(r.Lat.Hi) - r1 := math.Cos(r.Lat.Lo) - r2 := math.Cos(r.Lat.Hi) - - alpha := 0.5 * r.Lng.Length() - r0 := math.Sin(alpha) * (r2*z2 - r1*z1 + r.Lat.Length()) - lng := r.Lng.Center() - z := alpha * (z2 + z1) * (z2 - z1) // scaled by the area - - return Point{r3.Vector{r0 * math.Cos(lng), r0 * math.Sin(lng), z}} -} - -// BUG: The major differences from the C++ version are: -// - Get*Distance, Vertex, InteriorContains(LatLng|Rect|Point) diff --git a/vendor/github.com/golang/geo/s2/rect_bounder.go b/vendor/github.com/golang/geo/s2/rect_bounder.go deleted file mode 100644 index 419dea0c1..000000000 --- a/vendor/github.com/golang/geo/s2/rect_bounder.go +++ /dev/null @@ -1,352 +0,0 @@ -// Copyright 2017 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package s2 - -import ( - "math" - - "github.com/golang/geo/r1" - "github.com/golang/geo/r3" - "github.com/golang/geo/s1" -) - -// RectBounder is used to compute a bounding rectangle that contains all edges -// defined by a vertex chain (v0, v1, v2, ...). All vertices must be unit length. -// Note that the bounding rectangle of an edge can be larger than the bounding -// rectangle of its endpoints, e.g. consider an edge that passes through the North Pole. -// -// The bounds are calculated conservatively to account for numerical errors -// when points are converted to LatLngs. More precisely, this function -// guarantees the following: -// Let L be a closed edge chain (Loop) such that the interior of the loop does -// not contain either pole. Now if P is any point such that L.ContainsPoint(P), -// then RectBound(L).ContainsPoint(LatLngFromPoint(P)). -type RectBounder struct { - // The previous vertex in the chain. - a Point - // The previous vertex latitude longitude. - aLL LatLng - bound Rect -} - -// NewRectBounder returns a new instance of a RectBounder. -func NewRectBounder() *RectBounder { - return &RectBounder{ - bound: EmptyRect(), - } -} - -// maxErrorForTests returns the maximum error in RectBound provided that the -// result does not include either pole. It is only used for testing purposes -func (r *RectBounder) maxErrorForTests() LatLng { - // The maximum error in the latitude calculation is - // 3.84 * dblEpsilon for the PointCross calculation - // 0.96 * dblEpsilon for the Latitude calculation - // 5 * dblEpsilon added by AddPoint/RectBound to compensate for error - // ----------------- - // 9.80 * dblEpsilon maximum error in result - // - // The maximum error in the longitude calculation is dblEpsilon. RectBound - // does not do any expansion because this isn't necessary in order to - // bound the *rounded* longitudes of contained points. - return LatLng{10 * dblEpsilon * s1.Radian, 1 * dblEpsilon * s1.Radian} -} - -// AddPoint adds the given point to the chain. The Point must be unit length. -func (r *RectBounder) AddPoint(b Point) { - bLL := LatLngFromPoint(b) - - if r.bound.IsEmpty() { - r.a = b - r.aLL = bLL - r.bound = r.bound.AddPoint(bLL) - return - } - - // First compute the cross product N = A x B robustly. This is the normal - // to the great circle through A and B. We don't use RobustSign - // since that method returns an arbitrary vector orthogonal to A if the two - // vectors are proportional, and we want the zero vector in that case. - n := r.a.Sub(b.Vector).Cross(r.a.Add(b.Vector)) // N = 2 * (A x B) - - // The relative error in N gets large as its norm gets very small (i.e., - // when the two points are nearly identical or antipodal). We handle this - // by choosing a maximum allowable error, and if the error is greater than - // this we fall back to a different technique. Since it turns out that - // the other sources of error in converting the normal to a maximum - // latitude add up to at most 1.16 * dblEpsilon, and it is desirable to - // have the total error be a multiple of dblEpsilon, we have chosen to - // limit the maximum error in the normal to be 3.84 * dblEpsilon. - // It is possible to show that the error is less than this when - // - // n.Norm() >= 8 * sqrt(3) / (3.84 - 0.5 - sqrt(3)) * dblEpsilon - // = 1.91346e-15 (about 8.618 * dblEpsilon) - nNorm := n.Norm() - if nNorm < 1.91346e-15 { - // A and B are either nearly identical or nearly antipodal (to within - // 4.309 * dblEpsilon, or about 6 nanometers on the earth's surface). - if r.a.Dot(b.Vector) < 0 { - // The two points are nearly antipodal. The easiest solution is to - // assume that the edge between A and B could go in any direction - // around the sphere. - r.bound = FullRect() - } else { - // The two points are nearly identical (to within 4.309 * dblEpsilon). - // In this case we can just use the bounding rectangle of the points, - // since after the expansion done by GetBound this Rect is - // guaranteed to include the (lat,lng) values of all points along AB. - r.bound = r.bound.Union(RectFromLatLng(r.aLL).AddPoint(bLL)) - } - r.a = b - r.aLL = bLL - return - } - - // Compute the longitude range spanned by AB. - lngAB := s1.EmptyInterval().AddPoint(r.aLL.Lng.Radians()).AddPoint(bLL.Lng.Radians()) - if lngAB.Length() >= math.Pi-2*dblEpsilon { - // The points lie on nearly opposite lines of longitude to within the - // maximum error of the calculation. The easiest solution is to assume - // that AB could go on either side of the pole. - lngAB = s1.FullInterval() - } - - // Next we compute the latitude range spanned by the edge AB. We start - // with the range spanning the two endpoints of the edge: - latAB := r1.IntervalFromPoint(r.aLL.Lat.Radians()).AddPoint(bLL.Lat.Radians()) - - // This is the desired range unless the edge AB crosses the plane - // through N and the Z-axis (which is where the great circle through A - // and B attains its minimum and maximum latitudes). To test whether AB - // crosses this plane, we compute a vector M perpendicular to this - // plane and then project A and B onto it. - m := n.Cross(r3.Vector{0, 0, 1}) - mA := m.Dot(r.a.Vector) - mB := m.Dot(b.Vector) - - // We want to test the signs of "mA" and "mB", so we need to bound - // the error in these calculations. It is possible to show that the - // total error is bounded by - // - // (1 + sqrt(3)) * dblEpsilon * nNorm + 8 * sqrt(3) * (dblEpsilon**2) - // = 6.06638e-16 * nNorm + 6.83174e-31 - - mError := 6.06638e-16*nNorm + 6.83174e-31 - if mA*mB < 0 || math.Abs(mA) <= mError || math.Abs(mB) <= mError { - // Minimum/maximum latitude *may* occur in the edge interior. - // - // The maximum latitude is 90 degrees minus the latitude of N. We - // compute this directly using atan2 in order to get maximum accuracy - // near the poles. - // - // Our goal is compute a bound that contains the computed latitudes of - // all S2Points P that pass the point-in-polygon containment test. - // There are three sources of error we need to consider: - // - the directional error in N (at most 3.84 * dblEpsilon) - // - converting N to a maximum latitude - // - computing the latitude of the test point P - // The latter two sources of error are at most 0.955 * dblEpsilon - // individually, but it is possible to show by a more complex analysis - // that together they can add up to at most 1.16 * dblEpsilon, for a - // total error of 5 * dblEpsilon. - // - // We add 3 * dblEpsilon to the bound here, and GetBound() will pad - // the bound by another 2 * dblEpsilon. - maxLat := math.Min( - math.Atan2(math.Sqrt(n.X*n.X+n.Y*n.Y), math.Abs(n.Z))+3*dblEpsilon, - math.Pi/2) - - // In order to get tight bounds when the two points are close together, - // we also bound the min/max latitude relative to the latitudes of the - // endpoints A and B. First we compute the distance between A and B, - // and then we compute the maximum change in latitude between any two - // points along the great circle that are separated by this distance. - // This gives us a latitude change "budget". Some of this budget must - // be spent getting from A to B; the remainder bounds the round-trip - // distance (in latitude) from A or B to the min or max latitude - // attained along the edge AB. - latBudget := 2 * math.Asin(0.5*(r.a.Sub(b.Vector)).Norm()*math.Sin(maxLat)) - maxDelta := 0.5*(latBudget-latAB.Length()) + dblEpsilon - - // Test whether AB passes through the point of maximum latitude or - // minimum latitude. If the dot product(s) are small enough then the - // result may be ambiguous. - if mA <= mError && mB >= -mError { - latAB.Hi = math.Min(maxLat, latAB.Hi+maxDelta) - } - if mB <= mError && mA >= -mError { - latAB.Lo = math.Max(-maxLat, latAB.Lo-maxDelta) - } - } - r.a = b - r.aLL = bLL - r.bound = r.bound.Union(Rect{latAB, lngAB}) -} - -// RectBound returns the bounding rectangle of the edge chain that connects the -// vertices defined so far. This bound satisfies the guarantee made -// above, i.e. if the edge chain defines a Loop, then the bound contains -// the LatLng coordinates of all Points contained by the loop. -func (r *RectBounder) RectBound() Rect { - return r.bound.expanded(LatLng{s1.Angle(2 * dblEpsilon), 0}).PolarClosure() -} - -// ExpandForSubregions expands a bounding Rect so that it is guaranteed to -// contain the bounds of any subregion whose bounds are computed using -// ComputeRectBound. For example, consider a loop L that defines a square. -// GetBound ensures that if a point P is contained by this square, then -// LatLngFromPoint(P) is contained by the bound. But now consider a diamond -// shaped loop S contained by L. It is possible that GetBound returns a -// *larger* bound for S than it does for L, due to rounding errors. This -// method expands the bound for L so that it is guaranteed to contain the -// bounds of any subregion S. -// -// More precisely, if L is a loop that does not contain either pole, and S -// is a loop such that L.Contains(S), then -// -// ExpandForSubregions(L.RectBound).Contains(S.RectBound). -// -func ExpandForSubregions(bound Rect) Rect { - // Empty bounds don't need expansion. - if bound.IsEmpty() { - return bound - } - - // First we need to check whether the bound B contains any nearly-antipodal - // points (to within 4.309 * dblEpsilon). If so then we need to return - // FullRect, since the subregion might have an edge between two - // such points, and AddPoint returns Full for such edges. Note that - // this can happen even if B is not Full for example, consider a loop - // that defines a 10km strip straddling the equator extending from - // longitudes -100 to +100 degrees. - // - // It is easy to check whether B contains any antipodal points, but checking - // for nearly-antipodal points is trickier. Essentially we consider the - // original bound B and its reflection through the origin B', and then test - // whether the minimum distance between B and B' is less than 4.309 * dblEpsilon. - - // lngGap is a lower bound on the longitudinal distance between B and its - // reflection B'. (2.5 * dblEpsilon is the maximum combined error of the - // endpoint longitude calculations and the Length call.) - lngGap := math.Max(0, math.Pi-bound.Lng.Length()-2.5*dblEpsilon) - - // minAbsLat is the minimum distance from B to the equator (if zero or - // negative, then B straddles the equator). - minAbsLat := math.Max(bound.Lat.Lo, -bound.Lat.Hi) - - // latGapSouth and latGapNorth measure the minimum distance from B to the - // south and north poles respectively. - latGapSouth := math.Pi/2 + bound.Lat.Lo - latGapNorth := math.Pi/2 - bound.Lat.Hi - - if minAbsLat >= 0 { - // The bound B does not straddle the equator. In this case the minimum - // distance is between one endpoint of the latitude edge in B closest to - // the equator and the other endpoint of that edge in B'. The latitude - // distance between these two points is 2*minAbsLat, and the longitude - // distance is lngGap. We could compute the distance exactly using the - // Haversine formula, but then we would need to bound the errors in that - // calculation. Since we only need accuracy when the distance is very - // small (close to 4.309 * dblEpsilon), we substitute the Euclidean - // distance instead. This gives us a right triangle XYZ with two edges of - // length x = 2*minAbsLat and y ~= lngGap. The desired distance is the - // length of the third edge z, and we have - // - // z ~= sqrt(x^2 + y^2) >= (x + y) / sqrt(2) - // - // Therefore the region may contain nearly antipodal points only if - // - // 2*minAbsLat + lngGap < sqrt(2) * 4.309 * dblEpsilon - // ~= 1.354e-15 - // - // Note that because the given bound B is conservative, minAbsLat and - // lngGap are both lower bounds on their true values so we do not need - // to make any adjustments for their errors. - if 2*minAbsLat+lngGap < 1.354e-15 { - return FullRect() - } - } else if lngGap >= math.Pi/2 { - // B spans at most Pi/2 in longitude. The minimum distance is always - // between one corner of B and the diagonally opposite corner of B'. We - // use the same distance approximation that we used above; in this case - // we have an obtuse triangle XYZ with two edges of length x = latGapSouth - // and y = latGapNorth, and angle Z >= Pi/2 between them. We then have - // - // z >= sqrt(x^2 + y^2) >= (x + y) / sqrt(2) - // - // Unlike the case above, latGapSouth and latGapNorth are not lower bounds - // (because of the extra addition operation, and because math.Pi/2 is not - // exactly equal to Pi/2); they can exceed their true values by up to - // 0.75 * dblEpsilon. Putting this all together, the region may contain - // nearly antipodal points only if - // - // latGapSouth + latGapNorth < (sqrt(2) * 4.309 + 1.5) * dblEpsilon - // ~= 1.687e-15 - if latGapSouth+latGapNorth < 1.687e-15 { - return FullRect() - } - } else { - // Otherwise we know that (1) the bound straddles the equator and (2) its - // width in longitude is at least Pi/2. In this case the minimum - // distance can occur either between a corner of B and the diagonally - // opposite corner of B' (as in the case above), or between a corner of B - // and the opposite longitudinal edge reflected in B'. It is sufficient - // to only consider the corner-edge case, since this distance is also a - // lower bound on the corner-corner distance when that case applies. - - // Consider the spherical triangle XYZ where X is a corner of B with - // minimum absolute latitude, Y is the closest pole to X, and Z is the - // point closest to X on the opposite longitudinal edge of B'. This is a - // right triangle (Z = Pi/2), and from the spherical law of sines we have - // - // sin(z) / sin(Z) = sin(y) / sin(Y) - // sin(maxLatGap) / 1 = sin(dMin) / sin(lngGap) - // sin(dMin) = sin(maxLatGap) * sin(lngGap) - // - // where "maxLatGap" = max(latGapSouth, latGapNorth) and "dMin" is the - // desired minimum distance. Now using the facts that sin(t) >= (2/Pi)*t - // for 0 <= t <= Pi/2, that we only need an accurate approximation when - // at least one of "maxLatGap" or lngGap is extremely small (in which - // case sin(t) ~= t), and recalling that "maxLatGap" has an error of up - // to 0.75 * dblEpsilon, we want to test whether - // - // maxLatGap * lngGap < (4.309 + 0.75) * (Pi/2) * dblEpsilon - // ~= 1.765e-15 - if math.Max(latGapSouth, latGapNorth)*lngGap < 1.765e-15 { - return FullRect() - } - } - // Next we need to check whether the subregion might contain any edges that - // span (math.Pi - 2 * dblEpsilon) radians or more in longitude, since AddPoint - // sets the longitude bound to Full in that case. This corresponds to - // testing whether (lngGap <= 0) in lngExpansion below. - - // Otherwise, the maximum latitude error in AddPoint is 4.8 * dblEpsilon. - // In the worst case, the errors when computing the latitude bound for a - // subregion could go in the opposite direction as the errors when computing - // the bound for the original region, so we need to double this value. - // (More analysis shows that it's okay to round down to a multiple of - // dblEpsilon.) - // - // For longitude, we rely on the fact that atan2 is correctly rounded and - // therefore no additional bounds expansion is necessary. - - latExpansion := 9 * dblEpsilon - lngExpansion := 0.0 - if lngGap <= 0 { - lngExpansion = math.Pi - } - return bound.expanded(LatLng{s1.Angle(latExpansion), s1.Angle(lngExpansion)}).PolarClosure() -} diff --git a/vendor/github.com/golang/geo/s2/region.go b/vendor/github.com/golang/geo/s2/region.go deleted file mode 100644 index 9ea3de1ca..000000000 --- a/vendor/github.com/golang/geo/s2/region.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2014 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package s2 - -// A Region represents a two-dimensional region on the unit sphere. -// -// The purpose of this interface is to allow complex regions to be -// approximated as simpler regions. The interface is restricted to methods -// that are useful for computing approximations. -type Region interface { - // CapBound returns a bounding spherical cap. This is not guaranteed to be exact. - CapBound() Cap - - // RectBound returns a bounding latitude-longitude rectangle that contains - // the region. The bounds are not guaranteed to be tight. - RectBound() Rect - - // ContainsCell reports whether the region completely contains the given region. - // It returns false if containment could not be determined. - ContainsCell(c Cell) bool - - // IntersectsCell reports whether the region intersects the given cell or - // if intersection could not be determined. It returns false if the region - // does not intersect. - IntersectsCell(c Cell) bool - - // ContainsPoint reports whether the region contains the given point or not. - // The point should be unit length, although some implementations may relax - // this restriction. - ContainsPoint(p Point) bool - - // CellUnionBound returns a small collection of CellIDs whose union covers - // the region. The cells are not sorted, may have redundancies (such as cells - // that contain other cells), and may cover much more area than necessary. - // - // This method is not intended for direct use by client code. Clients - // should typically use Covering, which has options to control the size and - // accuracy of the covering. Alternatively, if you want a fast covering and - // don't care about accuracy, consider calling FastCovering (which returns a - // cleaned-up version of the covering computed by this method). - // - // CellUnionBound implementations should attempt to return a small - // covering (ideally 4 cells or fewer) that covers the region and can be - // computed quickly. The result is used by RegionCoverer as a starting - // point for further refinement. - CellUnionBound() []CellID -} - -// Enforce Region interface satisfaction. -var ( - _ Region = Cap{} - _ Region = Cell{} - _ Region = (*CellUnion)(nil) - _ Region = (*Loop)(nil) - _ Region = Point{} - _ Region = (*Polygon)(nil) - _ Region = (*Polyline)(nil) - _ Region = Rect{} -) diff --git a/vendor/github.com/golang/geo/s2/regioncoverer.go b/vendor/github.com/golang/geo/s2/regioncoverer.go deleted file mode 100644 index de5b0c20d..000000000 --- a/vendor/github.com/golang/geo/s2/regioncoverer.go +++ /dev/null @@ -1,615 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package s2 - -import ( - "container/heap" - "sort" -) - -// RegionCoverer allows arbitrary regions to be approximated as unions of cells (CellUnion). -// This is useful for implementing various sorts of search and precomputation operations. -// -// Typical usage: -// -// rc := &s2.RegionCoverer{MaxLevel: 30, MaxCells: 5} -// r := s2.Region(CapFromCenterArea(center, area)) -// covering := rc.Covering(r) -// -// This yields a CellUnion of at most 5 cells that is guaranteed to cover the -// given region (a disc-shaped region on the sphere). -// -// For covering, only cells where (level - MinLevel) is a multiple of LevelMod will be used. -// This effectively allows the branching factor of the S2 CellID hierarchy to be increased. -// Currently the only parameter values allowed are 1, 2, or 3, corresponding to -// branching factors of 4, 16, and 64 respectively. -// -// Note the following: -// -// - MinLevel takes priority over MaxCells, i.e. cells below the given level will -// never be used even if this causes a large number of cells to be returned. -// -// - For any setting of MaxCells, up to 6 cells may be returned if that -// is the minimum number of cells required (e.g. if the region intersects -// all six face cells). Up to 3 cells may be returned even for very tiny -// convex regions if they happen to be located at the intersection of -// three cube faces. -// -// - For any setting of MaxCells, an arbitrary number of cells may be -// returned if MinLevel is too high for the region being approximated. -// -// - If MaxCells is less than 4, the area of the covering may be -// arbitrarily large compared to the area of the original region even if -// the region is convex (e.g. a Cap or Rect). -// -// The approximation algorithm is not optimal but does a pretty good job in -// practice. The output does not always use the maximum number of cells -// allowed, both because this would not always yield a better approximation, -// and because MaxCells is a limit on how much work is done exploring the -// possible covering as well as a limit on the final output size. -// -// Because it is an approximation algorithm, one should not rely on the -// stability of the output. In particular, the output of the covering algorithm -// may change across different versions of the library. -// -// One can also generate interior coverings, which are sets of cells which -// are entirely contained within a region. Interior coverings can be -// empty, even for non-empty regions, if there are no cells that satisfy -// the provided constraints and are contained by the region. Note that for -// performance reasons, it is wise to specify a MaxLevel when computing -// interior coverings - otherwise for regions with small or zero area, the -// algorithm may spend a lot of time subdividing cells all the way to leaf -// level to try to find contained cells. -type RegionCoverer struct { - MinLevel int // the minimum cell level to be used. - MaxLevel int // the maximum cell level to be used. - LevelMod int // the LevelMod to be used. - MaxCells int // the maximum desired number of cells in the approximation. -} - -// NewRegionCoverer returns a region coverer with the appropriate defaults. -func NewRegionCoverer() *RegionCoverer { - return &RegionCoverer{ - MinLevel: 0, - MaxLevel: maxLevel, - LevelMod: 1, - MaxCells: 8, - } -} - -type coverer struct { - minLevel int // the minimum cell level to be used. - maxLevel int // the maximum cell level to be used. - levelMod int // the LevelMod to be used. - maxCells int // the maximum desired number of cells in the approximation. - region Region - result CellUnion - pq priorityQueue - interiorCovering bool -} - -type candidate struct { - cell Cell - terminal bool // Cell should not be expanded further. - numChildren int // Number of children that intersect the region. - children []*candidate // Actual size may be 0, 4, 16, or 64 elements. - priority int // Priority of the candidate. -} - -type priorityQueue []*candidate - -func (pq priorityQueue) Len() int { - return len(pq) -} - -func (pq priorityQueue) Less(i, j int) bool { - // We want Pop to give us the highest, not lowest, priority so we use greater than here. - return pq[i].priority > pq[j].priority -} - -func (pq priorityQueue) Swap(i, j int) { - pq[i], pq[j] = pq[j], pq[i] -} - -func (pq *priorityQueue) Push(x interface{}) { - item := x.(*candidate) - *pq = append(*pq, item) -} - -func (pq *priorityQueue) Pop() interface{} { - item := (*pq)[len(*pq)-1] - *pq = (*pq)[:len(*pq)-1] - return item -} - -func (pq *priorityQueue) Reset() { - *pq = (*pq)[:0] -} - -// newCandidate returns a new candidate with no children if the cell intersects the given region. -// The candidate is marked as terminal if it should not be expanded further. -func (c *coverer) newCandidate(cell Cell) *candidate { - if !c.region.IntersectsCell(cell) { - return nil - } - cand := &candidate{cell: cell} - level := int(cell.level) - if level >= c.minLevel { - if c.interiorCovering { - if c.region.ContainsCell(cell) { - cand.terminal = true - } else if level+c.levelMod > c.maxLevel { - return nil - } - } else if level+c.levelMod > c.maxLevel || c.region.ContainsCell(cell) { - cand.terminal = true - } - } - return cand -} - -// expandChildren populates the children of the candidate by expanding the given number of -// levels from the given cell. Returns the number of children that were marked "terminal". -func (c *coverer) expandChildren(cand *candidate, cell Cell, numLevels int) int { - numLevels-- - var numTerminals int - last := cell.id.ChildEnd() - for ci := cell.id.ChildBegin(); ci != last; ci = ci.Next() { - childCell := CellFromCellID(ci) - if numLevels > 0 { - if c.region.IntersectsCell(childCell) { - numTerminals += c.expandChildren(cand, childCell, numLevels) - } - continue - } - if child := c.newCandidate(childCell); child != nil { - cand.children = append(cand.children, child) - cand.numChildren++ - if child.terminal { - numTerminals++ - } - } - } - return numTerminals -} - -// addCandidate adds the given candidate to the result if it is marked as "terminal", -// otherwise expands its children and inserts it into the priority queue. -// Passing an argument of nil does nothing. -func (c *coverer) addCandidate(cand *candidate) { - if cand == nil { - return - } - - if cand.terminal { - c.result = append(c.result, cand.cell.id) - return - } - - // Expand one level at a time until we hit minLevel to ensure that we don't skip over it. - numLevels := c.levelMod - level := int(cand.cell.level) - if level < c.minLevel { - numLevels = 1 - } - - numTerminals := c.expandChildren(cand, cand.cell, numLevels) - maxChildrenShift := uint(2 * c.levelMod) - if cand.numChildren == 0 { - return - } else if !c.interiorCovering && numTerminals == 1<= c.minLevel { - // Optimization: add the parent cell rather than all of its children. - // We can't do this for interior coverings, since the children just - // intersect the region, but may not be contained by it - we need to - // subdivide them further. - cand.terminal = true - c.addCandidate(cand) - } else { - // We negate the priority so that smaller absolute priorities are returned - // first. The heuristic is designed to refine the largest cells first, - // since those are where we have the largest potential gain. Among cells - // of the same size, we prefer the cells with the fewest children. - // Finally, among cells with equal numbers of children we prefer those - // with the smallest number of children that cannot be refined further. - cand.priority = -(((level< 1 && level > c.minLevel { - level -= (level - c.minLevel) % c.levelMod - } - return level -} - -// adjustCellLevels ensures that all cells with level > minLevel also satisfy levelMod, -// by replacing them with an ancestor if necessary. Cell levels smaller -// than minLevel are not modified (see AdjustLevel). The output is -// then normalized to ensure that no redundant cells are present. -func (c *coverer) adjustCellLevels(cells *CellUnion) { - if c.levelMod == 1 { - return - } - - var out int - for _, ci := range *cells { - level := ci.Level() - newLevel := c.adjustLevel(level) - if newLevel != level { - ci = ci.Parent(newLevel) - } - if out > 0 && (*cells)[out-1].Contains(ci) { - continue - } - for out > 0 && ci.Contains((*cells)[out-1]) { - out-- - } - (*cells)[out] = ci - out++ - } - *cells = (*cells)[:out] -} - -// initialCandidates computes a set of initial candidates that cover the given region. -func (c *coverer) initialCandidates() { - // Optimization: start with a small (usually 4 cell) covering of the region's bounding cap. - temp := &RegionCoverer{MaxLevel: c.maxLevel, LevelMod: 1, MaxCells: minInt(4, c.maxCells)} - - cells := temp.FastCovering(c.region) - c.adjustCellLevels(&cells) - for _, ci := range cells { - c.addCandidate(c.newCandidate(CellFromCellID(ci))) - } -} - -// coveringInternal generates a covering and stores it in result. -// Strategy: Start with the 6 faces of the cube. Discard any -// that do not intersect the shape. Then repeatedly choose the -// largest cell that intersects the shape and subdivide it. -// -// result contains the cells that will be part of the output, while pq -// contains cells that we may still subdivide further. Cells that are -// entirely contained within the region are immediately added to the output, -// while cells that do not intersect the region are immediately discarded. -// Therefore pq only contains cells that partially intersect the region. -// Candidates are prioritized first according to cell size (larger cells -// first), then by the number of intersecting children they have (fewest -// children first), and then by the number of fully contained children -// (fewest children first). -func (c *coverer) coveringInternal(region Region) { - c.region = region - - c.initialCandidates() - for c.pq.Len() > 0 && (!c.interiorCovering || len(c.result) < c.maxCells) { - cand := heap.Pop(&c.pq).(*candidate) - - // For interior covering we keep subdividing no matter how many children - // candidate has. If we reach MaxCells before expanding all children, - // we will just use some of them. - // For exterior covering we cannot do this, because result has to cover the - // whole region, so all children have to be used. - // candidate.numChildren == 1 case takes care of the situation when we - // already have more than MaxCells in result (minLevel is too high). - // Subdividing of the candidate with one child does no harm in this case. - if c.interiorCovering || int(cand.cell.level) < c.minLevel || cand.numChildren == 1 || len(c.result)+c.pq.Len()+cand.numChildren <= c.maxCells { - for _, child := range cand.children { - if !c.interiorCovering || len(c.result) < c.maxCells { - c.addCandidate(child) - } - } - } else { - cand.terminal = true - c.addCandidate(cand) - } - } - - c.pq.Reset() - c.region = nil - - // Rather than just returning the raw list of cell ids, we construct a cell - // union and then denormalize it. This has the effect of replacing four - // child cells with their parent whenever this does not violate the covering - // parameters specified (min_level, level_mod, etc). This significantly - // reduces the number of cells returned in many cases, and it is cheap - // compared to computing the covering in the first place. - c.result.Normalize() - if c.minLevel > 0 || c.levelMod > 1 { - c.result.Denormalize(c.minLevel, c.levelMod) - } -} - -// newCoverer returns an instance of coverer. -func (rc *RegionCoverer) newCoverer() *coverer { - return &coverer{ - minLevel: maxInt(0, minInt(maxLevel, rc.MinLevel)), - maxLevel: maxInt(0, minInt(maxLevel, rc.MaxLevel)), - levelMod: maxInt(1, minInt(3, rc.LevelMod)), - maxCells: rc.MaxCells, - } -} - -// Covering returns a CellUnion that covers the given region and satisfies the various restrictions. -func (rc *RegionCoverer) Covering(region Region) CellUnion { - covering := rc.CellUnion(region) - covering.Denormalize(maxInt(0, minInt(maxLevel, rc.MinLevel)), maxInt(1, minInt(3, rc.LevelMod))) - return covering -} - -// InteriorCovering returns a CellUnion that is contained within the given region and satisfies the various restrictions. -func (rc *RegionCoverer) InteriorCovering(region Region) CellUnion { - intCovering := rc.InteriorCellUnion(region) - intCovering.Denormalize(maxInt(0, minInt(maxLevel, rc.MinLevel)), maxInt(1, minInt(3, rc.LevelMod))) - return intCovering -} - -// CellUnion returns a normalized CellUnion that covers the given region and -// satisfies the restrictions except for minLevel and levelMod. These criteria -// cannot be satisfied using a cell union because cell unions are -// automatically normalized by replacing four child cells with their parent -// whenever possible. (Note that the list of cell ids passed to the CellUnion -// constructor does in fact satisfy all the given restrictions.) -func (rc *RegionCoverer) CellUnion(region Region) CellUnion { - c := rc.newCoverer() - c.coveringInternal(region) - cu := c.result - cu.Normalize() - return cu -} - -// InteriorCellUnion returns a normalized CellUnion that is contained within the given region and -// satisfies the restrictions except for minLevel and levelMod. These criteria -// cannot be satisfied using a cell union because cell unions are -// automatically normalized by replacing four child cells with their parent -// whenever possible. (Note that the list of cell ids passed to the CellUnion -// constructor does in fact satisfy all the given restrictions.) -func (rc *RegionCoverer) InteriorCellUnion(region Region) CellUnion { - c := rc.newCoverer() - c.interiorCovering = true - c.coveringInternal(region) - cu := c.result - cu.Normalize() - return cu -} - -// FastCovering returns a CellUnion that covers the given region similar to Covering, -// except that this method is much faster and the coverings are not as tight. -// All of the usual parameters are respected (MaxCells, MinLevel, MaxLevel, and LevelMod), -// except that the implementation makes no attempt to take advantage of large values of -// MaxCells. (A small number of cells will always be returned.) -// -// This function is useful as a starting point for algorithms that -// recursively subdivide cells. -func (rc *RegionCoverer) FastCovering(region Region) CellUnion { - c := rc.newCoverer() - cu := CellUnion(region.CellUnionBound()) - c.normalizeCovering(&cu) - return cu -} - -// IsCanonical reports whether the given CellUnion represents a valid covering -// that conforms to the current covering parameters. In particular: -// -// - All CellIDs must be valid. -// -// - CellIDs must be sorted and non-overlapping. -// -// - CellID levels must satisfy MinLevel, MaxLevel, and LevelMod. -// -// - If the covering has more than MaxCells, there must be no two cells with -// a common ancestor at MinLevel or higher. -// -// - There must be no sequence of cells that could be replaced by an -// ancestor (i.e. with LevelMod == 1, the 4 child cells of a parent). -func (rc *RegionCoverer) IsCanonical(covering CellUnion) bool { - return rc.newCoverer().isCanonical(covering) -} - -// normalizeCovering normalizes the "covering" so that it conforms to the -// current covering parameters (maxCells, minLevel, maxLevel, and levelMod). -// This method makes no attempt to be optimal. In particular, if -// minLevel > 0 or levelMod > 1 then it may return more than the -// desired number of cells even when this isn't necessary. -// -// Note that when the covering parameters have their default values, almost -// all of the code in this function is skipped. -func (c *coverer) normalizeCovering(covering *CellUnion) { - // If any cells are too small, or don't satisfy levelMod, then replace them with ancestors. - if c.maxLevel < maxLevel || c.levelMod > 1 { - for i, ci := range *covering { - level := ci.Level() - newLevel := c.adjustLevel(minInt(level, c.maxLevel)) - if newLevel != level { - (*covering)[i] = ci.Parent(newLevel) - } - } - } - // Sort the cells and simplify them. - covering.Normalize() - - // Make sure that the covering satisfies minLevel and levelMod, - // possibly at the expense of satisfying MaxCells. - if c.minLevel > 0 || c.levelMod > 1 { - covering.Denormalize(c.minLevel, c.levelMod) - } - - // If there are too many cells and the covering is very large, use the - // RegionCoverer to compute a new covering. (This avoids possible O(n^2) - // behavior of the simpler algorithm below.) - excess := len(*covering) - c.maxCells - if excess <= 0 || c.isCanonical(*covering) { - return - } - if excess*len(*covering) > 10000 { - rc := NewRegionCoverer() - (*covering) = rc.Covering(covering) - return - } - - // If there are still too many cells, then repeatedly replace two adjacent - // cells in CellID order by their lowest common ancestor. - for len(*covering) > c.maxCells { - bestIndex := -1 - bestLevel := -1 - for i := 0; i+1 < len(*covering); i++ { - level, ok := (*covering)[i].CommonAncestorLevel((*covering)[i+1]) - if !ok { - continue - } - level = c.adjustLevel(level) - if level > bestLevel { - bestLevel = level - bestIndex = i - } - } - - if bestLevel < c.minLevel { - break - } - - // Replace all cells contained by the new ancestor cell. - id := (*covering)[bestIndex].Parent(bestLevel) - (*covering) = c.replaceCellsWithAncestor(*covering, id) - - // Now repeatedly check whether all children of the parent cell are - // present, in which case we can replace those cells with their parent. - for bestLevel > c.minLevel { - bestLevel -= c.levelMod - id = id.Parent(bestLevel) - if !c.containsAllChildren(*covering, id) { - break - } - (*covering) = c.replaceCellsWithAncestor(*covering, id) - } - } -} - -// isCanonical reports whether the covering is canonical. -func (c *coverer) isCanonical(covering CellUnion) bool { - trueMax := c.maxLevel - if c.levelMod != 1 { - trueMax = c.maxLevel - (c.maxLevel-c.minLevel)%c.levelMod - } - tooManyCells := len(covering) > c.maxCells - sameParentCount := 1 - - prevID := CellID(0) - for _, id := range covering { - if !id.IsValid() { - return false - } - - // Check that the CellID level is acceptable. - level := id.Level() - if level < c.minLevel || level > trueMax { - return false - } - if c.levelMod > 1 && (level-c.minLevel)%c.levelMod != 0 { - return false - } - - if prevID != 0 { - // Check that cells are sorted and non-overlapping. - if prevID.RangeMax() >= id.RangeMin() { - return false - } - - lev, ok := id.CommonAncestorLevel(prevID) - // If there are too many cells, check that no pair of adjacent cells - // could be replaced by an ancestor. - if tooManyCells && (ok && lev >= c.minLevel) { - return false - } - - // Check that there are no sequences of (4 ** level_mod) cells that all - // have the same parent (considering only multiples of "level_mod"). - pLevel := level - c.levelMod - if pLevel < c.minLevel || level != prevID.Level() || - id.Parent(pLevel) != prevID.Parent(pLevel) { - sameParentCount = 1 - } else { - sameParentCount++ - if sameParentCount == 1<= id.RangeMin() }) - level := id.Level() + c.levelMod - for child := id.ChildBeginAtLevel(level); child != id.ChildEndAtLevel(level); child = child.Next() { - if pos == len(covering) || covering[pos] != child { - return false - } - pos++ - } - return true -} - -// replaceCellsWithAncestor replaces all descendants of the given id in covering -// with id. This requires the covering contains at least one descendant of id. -func (c *coverer) replaceCellsWithAncestor(covering []CellID, id CellID) []CellID { - begin := sort.Search(len(covering), func(i int) bool { return covering[i] > id.RangeMin() }) - end := sort.Search(len(covering), func(i int) bool { return covering[i] > id.RangeMax() }) - - return append(append(covering[:begin], id), covering[end:]...) -} - -// SimpleRegionCovering returns a set of cells at the given level that cover -// the connected region and a starting point on the boundary or inside the -// region. The cells are returned in arbitrary order. -// -// Note that this method is not faster than the regular Covering -// method for most region types, such as Cap or Polygon, and in fact it -// can be much slower when the output consists of a large number of cells. -// Currently it can be faster at generating coverings of long narrow regions -// such as polylines, but this may change in the future. -func SimpleRegionCovering(region Region, start Point, level int) []CellID { - return FloodFillRegionCovering(region, cellIDFromPoint(start).Parent(level)) -} - -// FloodFillRegionCovering returns all edge-connected cells at the same level as -// the given CellID that intersect the given region, in arbitrary order. -func FloodFillRegionCovering(region Region, start CellID) []CellID { - var output []CellID - all := map[CellID]bool{ - start: true, - } - frontier := []CellID{start} - for len(frontier) > 0 { - id := frontier[len(frontier)-1] - frontier = frontier[:len(frontier)-1] - if !region.IntersectsCell(CellFromCellID(id)) { - continue - } - output = append(output, id) - for _, nbr := range id.EdgeNeighbors() { - if !all[nbr] { - all[nbr] = true - frontier = append(frontier, nbr) - } - } - } - - return output -} diff --git a/vendor/github.com/golang/geo/s2/regionunion.go b/vendor/github.com/golang/geo/s2/regionunion.go deleted file mode 100644 index 915b7c330..000000000 --- a/vendor/github.com/golang/geo/s2/regionunion.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright 2020 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package s2 - -// A RegionUnion represents a union of possibly overlapping regions. -// It is convenient for computing a covering of a set of regions. -type RegionUnion []Region - -// CapBound returns a bounding cap for this RegionUnion. -func (ru RegionUnion) CapBound() Cap { return ru.RectBound().CapBound() } - -// RectBound returns a bounding latitude-longitude rectangle for this RegionUnion. -func (ru RegionUnion) RectBound() Rect { - ret := EmptyRect() - for _, reg := range ru { - ret = ret.Union(reg.RectBound()) - } - return ret -} - -// ContainsCell reports whether the given Cell is contained by this RegionUnion. -func (ru RegionUnion) ContainsCell(c Cell) bool { - for _, reg := range ru { - if reg.ContainsCell(c) { - return true - } - } - return false -} - -// IntersectsCell reports whether this RegionUnion intersects the given cell. -func (ru RegionUnion) IntersectsCell(c Cell) bool { - for _, reg := range ru { - if reg.IntersectsCell(c) { - return true - } - } - return false -} - -// ContainsPoint reports whether this RegionUnion contains the Point. -func (ru RegionUnion) ContainsPoint(p Point) bool { - for _, reg := range ru { - if reg.ContainsPoint(p) { - return true - } - } - return false -} - -// CellUnionBound computes a covering of the RegionUnion. -func (ru RegionUnion) CellUnionBound() []CellID { - return ru.CapBound().CellUnionBound() -} diff --git a/vendor/github.com/golang/geo/s2/shape.go b/vendor/github.com/golang/geo/s2/shape.go deleted file mode 100644 index 2cbf170c3..000000000 --- a/vendor/github.com/golang/geo/s2/shape.go +++ /dev/null @@ -1,263 +0,0 @@ -// Copyright 2017 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package s2 - -import ( - "sort" -) - -// Edge represents a geodesic edge consisting of two vertices. Zero-length edges are -// allowed, and can be used to represent points. -type Edge struct { - V0, V1 Point -} - -// Cmp compares the two edges using the underlying Points Cmp method and returns -// -// -1 if e < other -// 0 if e == other -// +1 if e > other -// -// The two edges are compared by first vertex, and then by the second vertex. -func (e Edge) Cmp(other Edge) int { - if v0cmp := e.V0.Cmp(other.V0.Vector); v0cmp != 0 { - return v0cmp - } - return e.V1.Cmp(other.V1.Vector) -} - -// sortEdges sorts the slice of Edges in place. -func sortEdges(e []Edge) { - sort.Sort(edges(e)) -} - -// edges implements the Sort interface for slices of Edge. -type edges []Edge - -func (e edges) Len() int { return len(e) } -func (e edges) Swap(i, j int) { e[i], e[j] = e[j], e[i] } -func (e edges) Less(i, j int) bool { return e[i].Cmp(e[j]) == -1 } - -// ShapeEdgeID is a unique identifier for an Edge within an ShapeIndex, -// consisting of a (shapeID, edgeID) pair. -type ShapeEdgeID struct { - ShapeID int32 - EdgeID int32 -} - -// Cmp compares the two ShapeEdgeIDs and returns -// -// -1 if s < other -// 0 if s == other -// +1 if s > other -// -// The two are compared first by shape id and then by edge id. -func (s ShapeEdgeID) Cmp(other ShapeEdgeID) int { - switch { - case s.ShapeID < other.ShapeID: - return -1 - case s.ShapeID > other.ShapeID: - return 1 - } - switch { - case s.EdgeID < other.EdgeID: - return -1 - case s.EdgeID > other.EdgeID: - return 1 - } - return 0 -} - -// ShapeEdge represents a ShapeEdgeID with the two endpoints of that Edge. -type ShapeEdge struct { - ID ShapeEdgeID - Edge Edge -} - -// Chain represents a range of edge IDs corresponding to a chain of connected -// edges, specified as a (start, length) pair. The chain is defined to consist of -// edge IDs {start, start + 1, ..., start + length - 1}. -type Chain struct { - Start, Length int -} - -// ChainPosition represents the position of an edge within a given edge chain, -// specified as a (chainID, offset) pair. Chains are numbered sequentially -// starting from zero, and offsets are measured from the start of each chain. -type ChainPosition struct { - ChainID, Offset int -} - -// A ReferencePoint consists of a point and a boolean indicating whether the point -// is contained by a particular shape. -type ReferencePoint struct { - Point Point - Contained bool -} - -// OriginReferencePoint returns a ReferencePoint with the given value for -// contained and the origin point. It should be used when all points or no -// points are contained. -func OriginReferencePoint(contained bool) ReferencePoint { - return ReferencePoint{Point: OriginPoint(), Contained: contained} -} - -// typeTag is a 32-bit tag that can be used to identify the type of an encoded -// Shape. All encodable types have a non-zero type tag. The tag associated with -type typeTag uint32 - -const ( - // Indicates that a given Shape type cannot be encoded. - typeTagNone typeTag = 0 - typeTagPolygon typeTag = 1 - typeTagPolyline typeTag = 2 - typeTagPointVector typeTag = 3 - typeTagLaxPolyline typeTag = 4 - typeTagLaxPolygon typeTag = 5 - - // The minimum allowable tag for future user-defined Shape types. - typeTagMinUser typeTag = 8192 -) - -// Shape represents polygonal geometry in a flexible way. It is organized as a -// collection of edges that optionally defines an interior. All geometry -// represented by a given Shape must have the same dimension, which means that -// an Shape can represent either a set of points, a set of polylines, or a set -// of polygons. -// -// Shape is defined as an interface in order to give clients control over the -// underlying data representation. Sometimes an Shape does not have any data of -// its own, but instead wraps some other type. -// -// Shape operations are typically defined on a ShapeIndex rather than -// individual shapes. An ShapeIndex is simply a collection of Shapes, -// possibly of different dimensions (e.g. 10 points and 3 polygons), organized -// into a data structure for efficient edge access. -// -// The edges of a Shape are indexed by a contiguous range of edge IDs -// starting at 0. The edges are further subdivided into chains, where each -// chain consists of a sequence of edges connected end-to-end (a polyline). -// For example, a Shape representing two polylines AB and CDE would have -// three edges (AB, CD, DE) grouped into two chains: (AB) and (CD, DE). -// Similarly, an Shape representing 5 points would have 5 chains consisting -// of one edge each. -// -// Shape has methods that allow edges to be accessed either using the global -// numbering (edge ID) or within a particular chain. The global numbering is -// sufficient for most purposes, but the chain representation is useful for -// certain algorithms such as intersection (see BooleanOperation). -type Shape interface { - // NumEdges returns the number of edges in this shape. - NumEdges() int - - // Edge returns the edge for the given edge index. - Edge(i int) Edge - - // ReferencePoint returns an arbitrary reference point for the shape. (The - // containment boolean value must be false for shapes that do not have an interior.) - // - // This reference point may then be used to compute the containment of other - // points by counting edge crossings. - ReferencePoint() ReferencePoint - - // NumChains reports the number of contiguous edge chains in the shape. - // For example, a shape whose edges are [AB, BC, CD, AE, EF] would consist - // of two chains (AB,BC,CD and AE,EF). Every chain is assigned a chain Id - // numbered sequentially starting from zero. - // - // Note that it is always acceptable to implement this method by returning - // NumEdges, i.e. every chain consists of a single edge, but this may - // reduce the efficiency of some algorithms. - NumChains() int - - // Chain returns the range of edge IDs corresponding to the given edge chain. - // Edge chains must form contiguous, non-overlapping ranges that cover - // the entire range of edge IDs. This is spelled out more formally below: - // - // 0 <= i < NumChains() - // Chain(i).length > 0, for all i - // Chain(0).start == 0 - // Chain(i).start + Chain(i).length == Chain(i+1).start, for i < NumChains()-1 - // Chain(i).start + Chain(i).length == NumEdges(), for i == NumChains()-1 - Chain(chainID int) Chain - - // ChainEdgeReturns the edge at offset "offset" within edge chain "chainID". - // Equivalent to "shape.Edge(shape.Chain(chainID).start + offset)" - // but more efficient. - ChainEdge(chainID, offset int) Edge - - // ChainPosition finds the chain containing the given edge, and returns the - // position of that edge as a ChainPosition(chainID, offset) pair. - // - // shape.Chain(pos.chainID).start + pos.offset == edgeID - // shape.Chain(pos.chainID+1).start > edgeID - // - // where pos == shape.ChainPosition(edgeID). - ChainPosition(edgeID int) ChainPosition - - // Dimension returns the dimension of the geometry represented by this shape, - // either 0, 1 or 2 for point, polyline and polygon geometry respectively. - // - // 0 - Point geometry. Each point is represented as a degenerate edge. - // - // 1 - Polyline geometry. Polyline edges may be degenerate. A shape may - // represent any number of polylines. Polylines edges may intersect. - // - // 2 - Polygon geometry. Edges should be oriented such that the polygon - // interior is always on the left. In theory the edges may be returned - // in any order, but typically the edges are organized as a collection - // of edge chains where each chain represents one polygon loop. - // Polygons may have degeneracies (e.g., degenerate edges or sibling - // pairs consisting of an edge and its corresponding reversed edge). - // A polygon loop may also be full (containing all points on the - // sphere); by convention this is represented as a chain with no edges. - // (See laxPolygon for details.) - // - // This method allows degenerate geometry of different dimensions - // to be distinguished, e.g. it allows a point to be distinguished from a - // polyline or polygon that has been simplified to a single point. - Dimension() int - - // IsEmpty reports whether the Shape contains no points. (Note that the full - // polygon is represented as a chain with zero edges.) - IsEmpty() bool - - // IsFull reports whether the Shape contains all points on the sphere. - IsFull() bool - - // typeTag returns a value that can be used to identify the type of an - // encoded Shape. - typeTag() typeTag - - // We do not support implementations of this interface outside this package. - privateInterface() -} - -// defaultShapeIsEmpty reports whether this shape contains no points. -func defaultShapeIsEmpty(s Shape) bool { - return s.NumEdges() == 0 && (s.Dimension() != 2 || s.NumChains() == 0) -} - -// defaultShapeIsFull reports whether this shape contains all points on the sphere. -func defaultShapeIsFull(s Shape) bool { - return s.NumEdges() == 0 && s.Dimension() == 2 && s.NumChains() > 0 -} - -// A minimal check for types that should satisfy the Shape interface. -var ( - _ Shape = &Loop{} - _ Shape = &Polygon{} - _ Shape = &Polyline{} -) diff --git a/vendor/github.com/golang/geo/s2/shapeindex.go b/vendor/github.com/golang/geo/s2/shapeindex.go deleted file mode 100644 index 6efa213ab..000000000 --- a/vendor/github.com/golang/geo/s2/shapeindex.go +++ /dev/null @@ -1,1526 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package s2 - -import ( - "math" - "sort" - "sync" - "sync/atomic" - - "github.com/golang/geo/r1" - "github.com/golang/geo/r2" -) - -// CellRelation describes the possible relationships between a target cell -// and the cells of the ShapeIndex. If the target is an index cell or is -// contained by an index cell, it is Indexed. If the target is subdivided -// into one or more index cells, it is Subdivided. Otherwise it is Disjoint. -type CellRelation int - -// The possible CellRelations for a ShapeIndex. -const ( - Indexed CellRelation = iota - Subdivided - Disjoint -) - -const ( - // cellPadding defines the total error when clipping an edge which comes - // from two sources: - // (1) Clipping the original spherical edge to a cube face (the face edge). - // The maximum error in this step is faceClipErrorUVCoord. - // (2) Clipping the face edge to the u- or v-coordinate of a cell boundary. - // The maximum error in this step is edgeClipErrorUVCoord. - // Finally, since we encounter the same errors when clipping query edges, we - // double the total error so that we only need to pad edges during indexing - // and not at query time. - cellPadding = 2.0 * (faceClipErrorUVCoord + edgeClipErrorUVCoord) - - // cellSizeToLongEdgeRatio defines the cell size relative to the length of an - // edge at which it is first considered to be long. Long edges do not - // contribute toward the decision to subdivide a cell further. For example, - // a value of 2.0 means that the cell must be at least twice the size of the - // edge in order for that edge to be counted. There are two reasons for not - // counting long edges: (1) such edges typically need to be propagated to - // several children, which increases time and memory costs without much benefit, - // and (2) in pathological cases, many long edges close together could force - // subdivision to continue all the way to the leaf cell level. - cellSizeToLongEdgeRatio = 1.0 -) - -// clippedShape represents the part of a shape that intersects a Cell. -// It consists of the set of edge IDs that intersect that cell and a boolean -// indicating whether the center of the cell is inside the shape (for shapes -// that have an interior). -// -// Note that the edges themselves are not clipped; we always use the original -// edges for intersection tests so that the results will be the same as the -// original shape. -type clippedShape struct { - // shapeID is the index of the shape this clipped shape is a part of. - shapeID int32 - - // containsCenter indicates if the center of the CellID this shape has been - // clipped to falls inside this shape. This is false for shapes that do not - // have an interior. - containsCenter bool - - // edges is the ordered set of ShapeIndex original edge IDs. Edges - // are stored in increasing order of edge ID. - edges []int -} - -// newClippedShape returns a new clipped shape for the given shapeID and number of expected edges. -func newClippedShape(id int32, numEdges int) *clippedShape { - return &clippedShape{ - shapeID: id, - edges: make([]int, numEdges), - } -} - -// numEdges returns the number of edges that intersect the CellID of the Cell this was clipped to. -func (c *clippedShape) numEdges() int { - return len(c.edges) -} - -// containsEdge reports if this clipped shape contains the given edge ID. -func (c *clippedShape) containsEdge(id int) bool { - // Linear search is fast because the number of edges per shape is typically - // very small (less than 10). - for _, e := range c.edges { - if e == id { - return true - } - } - return false -} - -// ShapeIndexCell stores the index contents for a particular CellID. -type ShapeIndexCell struct { - shapes []*clippedShape -} - -// NewShapeIndexCell creates a new cell that is sized to hold the given number of shapes. -func NewShapeIndexCell(numShapes int) *ShapeIndexCell { - return &ShapeIndexCell{ - shapes: make([]*clippedShape, numShapes), - } -} - -// numEdges reports the total number of edges in all clipped shapes in this cell. -func (s *ShapeIndexCell) numEdges() int { - var e int - for _, cs := range s.shapes { - e += cs.numEdges() - } - return e -} - -// add adds the given clipped shape to this index cell. -func (s *ShapeIndexCell) add(c *clippedShape) { - // C++ uses a set, so it's ordered and unique. We don't currently catch - // the case when a duplicate value is added. - s.shapes = append(s.shapes, c) -} - -// findByShapeID returns the clipped shape that contains the given shapeID, -// or nil if none of the clipped shapes contain it. -func (s *ShapeIndexCell) findByShapeID(shapeID int32) *clippedShape { - // Linear search is fine because the number of shapes per cell is typically - // very small (most often 1), and is large only for pathological inputs - // (e.g. very deeply nested loops). - for _, clipped := range s.shapes { - if clipped.shapeID == shapeID { - return clipped - } - } - return nil -} - -// faceEdge and clippedEdge store temporary edge data while the index is being -// updated. -// -// While it would be possible to combine all the edge information into one -// structure, there are two good reasons for separating it: -// -// - Memory usage. Separating the two means that we only need to -// store one copy of the per-face data no matter how many times an edge is -// subdivided, and it also lets us delay computing bounding boxes until -// they are needed for processing each face (when the dataset spans -// multiple faces). -// -// - Performance. UpdateEdges is significantly faster on large polygons when -// the data is separated, because it often only needs to access the data in -// clippedEdge and this data is cached more successfully. - -// faceEdge represents an edge that has been projected onto a given face, -type faceEdge struct { - shapeID int32 // The ID of shape that this edge belongs to - edgeID int // Edge ID within that shape - maxLevel int // Not desirable to subdivide this edge beyond this level - hasInterior bool // Belongs to a shape that has a dimension of 2 - a, b r2.Point // The edge endpoints, clipped to a given face - edge Edge // The original edge. -} - -// clippedEdge represents the portion of that edge that has been clipped to a given Cell. -type clippedEdge struct { - faceEdge *faceEdge // The original unclipped edge - bound r2.Rect // Bounding box for the clipped portion -} - -// ShapeIndexIteratorPos defines the set of possible iterator starting positions. By -// default iterators are unpositioned, since this avoids an extra seek in this -// situation where one of the seek methods (such as Locate) is immediately called. -type ShapeIndexIteratorPos int - -const ( - // IteratorBegin specifies the iterator should be positioned at the beginning of the index. - IteratorBegin ShapeIndexIteratorPos = iota - // IteratorEnd specifies the iterator should be positioned at the end of the index. - IteratorEnd -) - -// ShapeIndexIterator is an iterator that provides low-level access to -// the cells of the index. Cells are returned in increasing order of CellID. -// -// for it := index.Iterator(); !it.Done(); it.Next() { -// fmt.Print(it.CellID()) -// } -// -type ShapeIndexIterator struct { - index *ShapeIndex - position int - id CellID - cell *ShapeIndexCell -} - -// NewShapeIndexIterator creates a new iterator for the given index. If a starting -// position is specified, the iterator is positioned at the given spot. -func NewShapeIndexIterator(index *ShapeIndex, pos ...ShapeIndexIteratorPos) *ShapeIndexIterator { - s := &ShapeIndexIterator{ - index: index, - } - - if len(pos) > 0 { - if len(pos) > 1 { - panic("too many ShapeIndexIteratorPos arguments") - } - switch pos[0] { - case IteratorBegin: - s.Begin() - case IteratorEnd: - s.End() - default: - panic("unknown ShapeIndexIteratorPos value") - } - } - - return s -} - -func (s *ShapeIndexIterator) clone() *ShapeIndexIterator { - return &ShapeIndexIterator{ - index: s.index, - position: s.position, - id: s.id, - cell: s.cell, - } -} - -// CellID returns the CellID of the current index cell. -// If s.Done() is true, a value larger than any valid CellID is returned. -func (s *ShapeIndexIterator) CellID() CellID { - return s.id -} - -// IndexCell returns the current index cell. -func (s *ShapeIndexIterator) IndexCell() *ShapeIndexCell { - // TODO(roberts): C++ has this call a virtual method to allow subclasses - // of ShapeIndexIterator to do other work before returning the cell. Do - // we need such a thing? - return s.cell -} - -// Center returns the Point at the center of the current position of the iterator. -func (s *ShapeIndexIterator) Center() Point { - return s.CellID().Point() -} - -// Begin positions the iterator at the beginning of the index. -func (s *ShapeIndexIterator) Begin() { - if !s.index.IsFresh() { - s.index.maybeApplyUpdates() - } - s.position = 0 - s.refresh() -} - -// Next positions the iterator at the next index cell. -func (s *ShapeIndexIterator) Next() { - s.position++ - s.refresh() -} - -// Prev advances the iterator to the previous cell in the index and returns true to -// indicate it was not yet at the beginning of the index. If the iterator is at the -// first cell the call does nothing and returns false. -func (s *ShapeIndexIterator) Prev() bool { - if s.position <= 0 { - return false - } - - s.position-- - s.refresh() - return true -} - -// End positions the iterator at the end of the index. -func (s *ShapeIndexIterator) End() { - s.position = len(s.index.cells) - s.refresh() -} - -// Done reports if the iterator is positioned at or after the last index cell. -func (s *ShapeIndexIterator) Done() bool { - return s.id == SentinelCellID -} - -// refresh updates the stored internal iterator values. -func (s *ShapeIndexIterator) refresh() { - if s.position < len(s.index.cells) { - s.id = s.index.cells[s.position] - s.cell = s.index.cellMap[s.CellID()] - } else { - s.id = SentinelCellID - s.cell = nil - } -} - -// seek positions the iterator at the first cell whose ID >= target, or at the -// end of the index if no such cell exists. -func (s *ShapeIndexIterator) seek(target CellID) { - s.position = sort.Search(len(s.index.cells), func(i int) bool { - return s.index.cells[i] >= target - }) - s.refresh() -} - -// LocatePoint positions the iterator at the cell that contains the given Point. -// If no such cell exists, the iterator position is unspecified, and false is returned. -// The cell at the matched position is guaranteed to contain all edges that might -// intersect the line segment between target and the cell's center. -func (s *ShapeIndexIterator) LocatePoint(p Point) bool { - // Let I = cellMap.LowerBound(T), where T is the leaf cell containing - // point P. Then if T is contained by an index cell, then the - // containing cell is either I or I'. We test for containment by comparing - // the ranges of leaf cells spanned by T, I, and I'. - target := cellIDFromPoint(p) - s.seek(target) - if !s.Done() && s.CellID().RangeMin() <= target { - return true - } - - if s.Prev() && s.CellID().RangeMax() >= target { - return true - } - return false -} - -// LocateCellID attempts to position the iterator at the first matching index cell -// in the index that has some relation to the given CellID. Let T be the target CellID. -// If T is contained by (or equal to) some index cell I, then the iterator is positioned -// at I and returns Indexed. Otherwise if T contains one or more (smaller) index cells, -// then the iterator is positioned at the first such cell I and return Subdivided. -// Otherwise Disjoint is returned and the iterator position is undefined. -func (s *ShapeIndexIterator) LocateCellID(target CellID) CellRelation { - // Let T be the target, let I = cellMap.LowerBound(T.RangeMin()), and - // let I' be the predecessor of I. If T contains any index cells, then T - // contains I. Similarly, if T is contained by an index cell, then the - // containing cell is either I or I'. We test for containment by comparing - // the ranges of leaf cells spanned by T, I, and I'. - s.seek(target.RangeMin()) - if !s.Done() { - if s.CellID() >= target && s.CellID().RangeMin() <= target { - return Indexed - } - if s.CellID() <= target.RangeMax() { - return Subdivided - } - } - if s.Prev() && s.CellID().RangeMax() >= target { - return Indexed - } - return Disjoint -} - -// tracker keeps track of which shapes in a given set contain a particular point -// (the focus). It provides an efficient way to move the focus from one point -// to another and incrementally update the set of shapes which contain it. We use -// this to compute which shapes contain the center of every CellID in the index, -// by advancing the focus from one cell center to the next. -// -// Initially the focus is at the start of the CellID space-filling curve. We then -// visit all the cells that are being added to the ShapeIndex in increasing order -// of CellID. For each cell, we draw two edges: one from the entry vertex to the -// center, and another from the center to the exit vertex (where entry and exit -// refer to the points where the space-filling curve enters and exits the cell). -// By counting edge crossings we can incrementally compute which shapes contain -// the cell center. Note that the same set of shapes will always contain the exit -// point of one cell and the entry point of the next cell in the index, because -// either (a) these two points are actually the same, or (b) the intervening -// cells in CellID order are all empty, and therefore there are no edge crossings -// if we follow this path from one cell to the other. -// -// In C++, this is S2ShapeIndex::InteriorTracker. -type tracker struct { - isActive bool - a Point - b Point - nextCellID CellID - crosser *EdgeCrosser - shapeIDs []int32 - - // Shape ids saved by saveAndClearStateBefore. The state is never saved - // recursively so we don't need to worry about maintaining a stack. - savedIDs []int32 -} - -// newTracker returns a new tracker with the appropriate defaults. -func newTracker() *tracker { - // As shapes are added, we compute which ones contain the start of the - // CellID space-filling curve by drawing an edge from OriginPoint to this - // point and counting how many shape edges cross this edge. - t := &tracker{ - isActive: false, - b: trackerOrigin(), - nextCellID: CellIDFromFace(0).ChildBeginAtLevel(maxLevel), - } - t.drawTo(Point{faceUVToXYZ(0, -1, -1).Normalize()}) // CellID curve start - - return t -} - -// trackerOrigin returns the initial focus point when the tracker is created -// (corresponding to the start of the CellID space-filling curve). -func trackerOrigin() Point { - // The start of the S2CellId space-filling curve. - return Point{faceUVToXYZ(0, -1, -1).Normalize()} -} - -// focus returns the current focus point of the tracker. -func (t *tracker) focus() Point { return t.b } - -// addShape adds a shape whose interior should be tracked. containsOrigin indicates -// whether the current focus point is inside the shape. Alternatively, if -// the focus point is in the process of being moved (via moveTo/drawTo), you -// can also specify containsOrigin at the old focus point and call testEdge -// for every edge of the shape that might cross the current drawTo line. -// This updates the state to correspond to the new focus point. -// -// This requires shape.HasInterior -func (t *tracker) addShape(shapeID int32, containsFocus bool) { - t.isActive = true - if containsFocus { - t.toggleShape(shapeID) - } -} - -// moveTo moves the focus of the tracker to the given point. This method should -// only be used when it is known that there are no edge crossings between the old -// and new focus locations; otherwise use drawTo. -func (t *tracker) moveTo(b Point) { t.b = b } - -// drawTo moves the focus of the tracker to the given point. After this method is -// called, testEdge should be called with all edges that may cross the line -// segment between the old and new focus locations. -func (t *tracker) drawTo(b Point) { - t.a = t.b - t.b = b - // TODO: the edge crosser may need an in-place Init method if this gets expensive - t.crosser = NewEdgeCrosser(t.a, t.b) -} - -// testEdge checks if the given edge crosses the current edge, and if so, then -// toggle the state of the given shapeID. -// This requires shape to have an interior. -func (t *tracker) testEdge(shapeID int32, edge Edge) { - if t.crosser.EdgeOrVertexCrossing(edge.V0, edge.V1) { - t.toggleShape(shapeID) - } -} - -// setNextCellID is used to indicate that the last argument to moveTo or drawTo -// was the entry vertex of the given CellID, i.e. the tracker is positioned at the -// start of this cell. By using this method together with atCellID, the caller -// can avoid calling moveTo in cases where the exit vertex of the previous cell -// is the same as the entry vertex of the current cell. -func (t *tracker) setNextCellID(nextCellID CellID) { - t.nextCellID = nextCellID.RangeMin() -} - -// atCellID reports if the focus is already at the entry vertex of the given -// CellID (provided that the caller calls setNextCellID as each cell is processed). -func (t *tracker) atCellID(cellid CellID) bool { - return cellid.RangeMin() == t.nextCellID -} - -// toggleShape adds or removes the given shapeID from the set of IDs it is tracking. -func (t *tracker) toggleShape(shapeID int32) { - // Most shapeIDs slices are small, so special case the common steps. - - // If there is nothing here, add it. - if len(t.shapeIDs) == 0 { - t.shapeIDs = append(t.shapeIDs, shapeID) - return - } - - // If it's the first element, drop it from the slice. - if t.shapeIDs[0] == shapeID { - t.shapeIDs = t.shapeIDs[1:] - return - } - - for i, s := range t.shapeIDs { - if s < shapeID { - continue - } - - // If it's in the set, cut it out. - if s == shapeID { - copy(t.shapeIDs[i:], t.shapeIDs[i+1:]) // overwrite the ith element - t.shapeIDs = t.shapeIDs[:len(t.shapeIDs)-1] - return - } - - // We've got to a point in the slice where we should be inserted. - // (the given shapeID is now less than the current positions id.) - t.shapeIDs = append(t.shapeIDs[0:i], - append([]int32{shapeID}, t.shapeIDs[i:len(t.shapeIDs)]...)...) - return - } - - // We got to the end and didn't find it, so add it to the list. - t.shapeIDs = append(t.shapeIDs, shapeID) -} - -// saveAndClearStateBefore makes an internal copy of the state for shape ids below -// the given limit, and then clear the state for those shapes. This is used during -// incremental updates to track the state of added and removed shapes separately. -func (t *tracker) saveAndClearStateBefore(limitShapeID int32) { - limit := t.lowerBound(limitShapeID) - t.savedIDs = append([]int32(nil), t.shapeIDs[:limit]...) - t.shapeIDs = t.shapeIDs[limit:] -} - -// restoreStateBefore restores the state previously saved by saveAndClearStateBefore. -// This only affects the state for shapeIDs below "limitShapeID". -func (t *tracker) restoreStateBefore(limitShapeID int32) { - limit := t.lowerBound(limitShapeID) - t.shapeIDs = append(append([]int32(nil), t.savedIDs...), t.shapeIDs[limit:]...) - t.savedIDs = nil -} - -// lowerBound returns the shapeID of the first entry x where x >= shapeID. -func (t *tracker) lowerBound(shapeID int32) int32 { - panic("not implemented") -} - -// removedShape represents a set of edges from the given shape that is queued for removal. -type removedShape struct { - shapeID int32 - hasInterior bool - containsTrackerOrigin bool - edges []Edge -} - -// There are three basic states the index can be in. -const ( - stale int32 = iota // There are pending updates. - updating // Updates are currently being applied. - fresh // There are no pending updates. -) - -// ShapeIndex indexes a set of Shapes, where a Shape is some collection of edges -// that optionally defines an interior. It can be used to represent a set of -// points, a set of polylines, or a set of polygons. For Shapes that have -// interiors, the index makes it very fast to determine which Shape(s) contain -// a given point or region. -// -// The index can be updated incrementally by adding or removing shapes. It is -// designed to handle up to hundreds of millions of edges. All data structures -// are designed to be small, so the index is compact; generally it is smaller -// than the underlying data being indexed. The index is also fast to construct. -// -// Polygon, Loop, and Polyline implement Shape which allows these objects to -// be indexed easily. You can find useful query methods in CrossingEdgeQuery -// and ClosestEdgeQuery (Not yet implemented in Go). -// -// Example showing how to build an index of Polylines: -// -// index := NewShapeIndex() -// for _, polyline := range polylines { -// index.Add(polyline); -// } -// // Now you can use a CrossingEdgeQuery or ClosestEdgeQuery here. -// -type ShapeIndex struct { - // shapes is a map of shape ID to shape. - shapes map[int32]Shape - - // The maximum number of edges per cell. - // TODO(roberts): Update the comments when the usage of this is implemented. - maxEdgesPerCell int - - // nextID tracks the next ID to hand out. IDs are not reused when shapes - // are removed from the index. - nextID int32 - - // cellMap is a map from CellID to the set of clipped shapes that intersect that - // cell. The cell IDs cover a set of non-overlapping regions on the sphere. - // In C++, this is a BTree, so the cells are ordered naturally by the data structure. - cellMap map[CellID]*ShapeIndexCell - // Track the ordered list of cell IDs. - cells []CellID - - // The current status of the index; accessed atomically. - status int32 - - // Additions and removals are queued and processed on the first subsequent - // query. There are several reasons to do this: - // - // - It is significantly more efficient to process updates in batches if - // the amount of entities added grows. - // - Often the index will never be queried, in which case we can save both - // the time and memory required to build it. Examples: - // + Loops that are created simply to pass to an Polygon. (We don't - // need the Loop index, because Polygon builds its own index.) - // + Applications that load a database of geometry and then query only - // a small fraction of it. - // - // The main drawback is that we need to go to some extra work to ensure that - // some methods are still thread-safe. Note that the goal is *not* to - // make this thread-safe in general, but simply to hide the fact that - // we defer some of the indexing work until query time. - // - // This mutex protects all of following fields in the index. - mu sync.RWMutex - - // pendingAdditionsPos is the index of the first entry that has not been processed - // via applyUpdatesInternal. - pendingAdditionsPos int32 - - // The set of shapes that have been queued for removal but not processed yet by - // applyUpdatesInternal. - pendingRemovals []*removedShape -} - -// NewShapeIndex creates a new ShapeIndex. -func NewShapeIndex() *ShapeIndex { - return &ShapeIndex{ - maxEdgesPerCell: 10, - shapes: make(map[int32]Shape), - cellMap: make(map[CellID]*ShapeIndexCell), - cells: nil, - status: fresh, - } -} - -// Iterator returns an iterator for this index. -func (s *ShapeIndex) Iterator() *ShapeIndexIterator { - s.maybeApplyUpdates() - return NewShapeIndexIterator(s, IteratorBegin) -} - -// Begin positions the iterator at the first cell in the index. -func (s *ShapeIndex) Begin() *ShapeIndexIterator { - s.maybeApplyUpdates() - return NewShapeIndexIterator(s, IteratorBegin) -} - -// End positions the iterator at the last cell in the index. -func (s *ShapeIndex) End() *ShapeIndexIterator { - // TODO(roberts): It's possible that updates could happen to the index between - // the time this is called and the time the iterators position is used and this - // will be invalid or not the end. For now, things will be undefined if this - // happens. See about referencing the IsFresh to guard for this in the future. - s.maybeApplyUpdates() - return NewShapeIndexIterator(s, IteratorEnd) -} - -// Len reports the number of Shapes in this index. -func (s *ShapeIndex) Len() int { - return len(s.shapes) -} - -// Reset resets the index to its original state. -func (s *ShapeIndex) Reset() { - s.shapes = make(map[int32]Shape) - s.nextID = 0 - s.cellMap = make(map[CellID]*ShapeIndexCell) - s.cells = nil - atomic.StoreInt32(&s.status, fresh) -} - -// NumEdges returns the number of edges in this index. -func (s *ShapeIndex) NumEdges() int { - numEdges := 0 - for _, shape := range s.shapes { - numEdges += shape.NumEdges() - } - return numEdges -} - -// NumEdgesUpTo returns the number of edges in the given index, up to the given -// limit. If the limit is encountered, the current running total is returned, -// which may be more than the limit. -func (s *ShapeIndex) NumEdgesUpTo(limit int) int { - var numEdges int - // We choose to iterate over the shapes in order to match the counting - // up behavior in C++ and for test compatibility instead of using a - // more idiomatic range over the shape map. - for i := int32(0); i <= s.nextID; i++ { - s := s.Shape(i) - if s == nil { - continue - } - numEdges += s.NumEdges() - if numEdges >= limit { - break - } - } - - return numEdges -} - -// Shape returns the shape with the given ID, or nil if the shape has been removed from the index. -func (s *ShapeIndex) Shape(id int32) Shape { return s.shapes[id] } - -// idForShape returns the id of the given shape in this index, or -1 if it is -// not in the index. -// -// TODO(roberts): Need to figure out an appropriate way to expose this on a Shape. -// C++ allows a given S2 type (Loop, Polygon, etc) to be part of multiple indexes. -// By having each type extend S2Shape which has an id element, they all inherit their -// own id field rather than having to track it themselves. -func (s *ShapeIndex) idForShape(shape Shape) int32 { - for k, v := range s.shapes { - if v == shape { - return k - } - } - return -1 -} - -// Add adds the given shape to the index and returns the assigned ID.. -func (s *ShapeIndex) Add(shape Shape) int32 { - s.shapes[s.nextID] = shape - s.nextID++ - atomic.StoreInt32(&s.status, stale) - return s.nextID - 1 -} - -// Remove removes the given shape from the index. -func (s *ShapeIndex) Remove(shape Shape) { - // The index updates itself lazily because it is much more efficient to - // process additions and removals in batches. - id := s.idForShape(shape) - - // If the shape wasn't found, it's already been removed or was not in the index. - if s.shapes[id] == nil { - return - } - - // Remove the shape from the shapes map. - delete(s.shapes, id) - - // We are removing a shape that has not yet been added to the index, - // so there is nothing else to do. - if id >= s.pendingAdditionsPos { - return - } - - numEdges := shape.NumEdges() - removed := &removedShape{ - shapeID: id, - hasInterior: shape.Dimension() == 2, - containsTrackerOrigin: shape.ReferencePoint().Contained, - edges: make([]Edge, numEdges), - } - - for e := 0; e < numEdges; e++ { - removed.edges[e] = shape.Edge(e) - } - - s.pendingRemovals = append(s.pendingRemovals, removed) - atomic.StoreInt32(&s.status, stale) -} - -// Build triggers the update of the index. Calls to Add and Release are normally -// queued and processed on the first subsequent query. This has many advantages, -// the most important of which is that sometimes there *is* no subsequent -// query, which lets us avoid building the index completely. -// -// This method forces any pending updates to be applied immediately. -func (s *ShapeIndex) Build() { - s.maybeApplyUpdates() -} - -// IsFresh reports if there are no pending updates that need to be applied. -// This can be useful to avoid building the index unnecessarily, or for -// choosing between two different algorithms depending on whether the index -// is available. -// -// The returned index status may be slightly out of date if the index was -// built in a different thread. This is fine for the intended use (as an -// efficiency hint), but it should not be used by internal methods. -func (s *ShapeIndex) IsFresh() bool { - return atomic.LoadInt32(&s.status) == fresh -} - -// isFirstUpdate reports if this is the first update to the index. -func (s *ShapeIndex) isFirstUpdate() bool { - // Note that it is not sufficient to check whether cellMap is empty, since - // entries are added to it during the update process. - return s.pendingAdditionsPos == 0 -} - -// isShapeBeingRemoved reports if the shape with the given ID is currently slated for removal. -func (s *ShapeIndex) isShapeBeingRemoved(shapeID int32) bool { - // All shape ids being removed fall below the index position of shapes being added. - return shapeID < s.pendingAdditionsPos -} - -// maybeApplyUpdates checks if the index pieces have changed, and if so, applies pending updates. -func (s *ShapeIndex) maybeApplyUpdates() { - // TODO(roberts): To avoid acquiring and releasing the mutex on every - // query, we should use atomic operations when testing whether the status - // is fresh and when updating the status to be fresh. This guarantees - // that any thread that sees a status of fresh will also see the - // corresponding index updates. - if atomic.LoadInt32(&s.status) != fresh { - s.mu.Lock() - s.applyUpdatesInternal() - atomic.StoreInt32(&s.status, fresh) - s.mu.Unlock() - } -} - -// applyUpdatesInternal does the actual work of updating the index by applying all -// pending additions and removals. It does *not* update the indexes status. -func (s *ShapeIndex) applyUpdatesInternal() { - // TODO(roberts): Building the index can use up to 20x as much memory per - // edge as the final index memory size. If this causes issues, add in - // batched updating to limit the amount of items per batch to a - // configurable memory footprint overhead. - t := newTracker() - - // allEdges maps a Face to a collection of faceEdges. - allEdges := make([][]faceEdge, 6) - - for _, p := range s.pendingRemovals { - s.removeShapeInternal(p, allEdges, t) - } - - for id := s.pendingAdditionsPos; id < int32(len(s.shapes)); id++ { - s.addShapeInternal(id, allEdges, t) - } - - for face := 0; face < 6; face++ { - s.updateFaceEdges(face, allEdges[face], t) - } - - s.pendingRemovals = s.pendingRemovals[:0] - s.pendingAdditionsPos = int32(len(s.shapes)) - // It is the caller's responsibility to update the index status. -} - -// addShapeInternal clips all edges of the given shape to the six cube faces, -// adds the clipped edges to the set of allEdges, and starts tracking its -// interior if necessary. -func (s *ShapeIndex) addShapeInternal(shapeID int32, allEdges [][]faceEdge, t *tracker) { - shape, ok := s.shapes[shapeID] - if !ok { - // This shape has already been removed. - return - } - - faceEdge := faceEdge{ - shapeID: shapeID, - hasInterior: shape.Dimension() == 2, - } - - if faceEdge.hasInterior { - t.addShape(shapeID, containsBruteForce(shape, t.focus())) - } - - numEdges := shape.NumEdges() - for e := 0; e < numEdges; e++ { - edge := shape.Edge(e) - - faceEdge.edgeID = e - faceEdge.edge = edge - faceEdge.maxLevel = maxLevelForEdge(edge) - s.addFaceEdge(faceEdge, allEdges) - } -} - -// addFaceEdge adds the given faceEdge into the collection of all edges. -func (s *ShapeIndex) addFaceEdge(fe faceEdge, allEdges [][]faceEdge) { - aFace := face(fe.edge.V0.Vector) - // See if both endpoints are on the same face, and are far enough from - // the edge of the face that they don't intersect any (padded) adjacent face. - if aFace == face(fe.edge.V1.Vector) { - x, y := validFaceXYZToUV(aFace, fe.edge.V0.Vector) - fe.a = r2.Point{x, y} - x, y = validFaceXYZToUV(aFace, fe.edge.V1.Vector) - fe.b = r2.Point{x, y} - - maxUV := 1 - cellPadding - if math.Abs(fe.a.X) <= maxUV && math.Abs(fe.a.Y) <= maxUV && - math.Abs(fe.b.X) <= maxUV && math.Abs(fe.b.Y) <= maxUV { - allEdges[aFace] = append(allEdges[aFace], fe) - return - } - } - - // Otherwise, we simply clip the edge to all six faces. - for face := 0; face < 6; face++ { - if aClip, bClip, intersects := ClipToPaddedFace(fe.edge.V0, fe.edge.V1, face, cellPadding); intersects { - fe.a = aClip - fe.b = bClip - allEdges[face] = append(allEdges[face], fe) - } - } -} - -// updateFaceEdges adds or removes the various edges from the index. -// An edge is added if shapes[id] is not nil, and removed otherwise. -func (s *ShapeIndex) updateFaceEdges(face int, faceEdges []faceEdge, t *tracker) { - numEdges := len(faceEdges) - if numEdges == 0 && len(t.shapeIDs) == 0 { - return - } - - // Create the initial clippedEdge for each faceEdge. Additional clipped - // edges are created when edges are split between child cells. We create - // two arrays, one containing the edge data and another containing pointers - // to those edges, so that during the recursion we only need to copy - // pointers in order to propagate an edge to the correct child. - clippedEdges := make([]*clippedEdge, numEdges) - bound := r2.EmptyRect() - for e := 0; e < numEdges; e++ { - clipped := &clippedEdge{ - faceEdge: &faceEdges[e], - } - clipped.bound = r2.RectFromPoints(faceEdges[e].a, faceEdges[e].b) - clippedEdges[e] = clipped - bound = bound.AddRect(clipped.bound) - } - - // Construct the initial face cell containing all the edges, and then update - // all the edges in the index recursively. - faceID := CellIDFromFace(face) - pcell := PaddedCellFromCellID(faceID, cellPadding) - - disjointFromIndex := s.isFirstUpdate() - if numEdges > 0 { - shrunkID := s.shrinkToFit(pcell, bound) - if shrunkID != pcell.id { - // All the edges are contained by some descendant of the face cell. We - // can save a lot of work by starting directly with that cell, but if we - // are in the interior of at least one shape then we need to create - // index entries for the cells we are skipping over. - s.skipCellRange(faceID.RangeMin(), shrunkID.RangeMin(), t, disjointFromIndex) - pcell = PaddedCellFromCellID(shrunkID, cellPadding) - s.updateEdges(pcell, clippedEdges, t, disjointFromIndex) - s.skipCellRange(shrunkID.RangeMax().Next(), faceID.RangeMax().Next(), t, disjointFromIndex) - return - } - } - - // Otherwise (no edges, or no shrinking is possible), subdivide normally. - s.updateEdges(pcell, clippedEdges, t, disjointFromIndex) -} - -// shrinkToFit shrinks the PaddedCell to fit within the given bounds. -func (s *ShapeIndex) shrinkToFit(pcell *PaddedCell, bound r2.Rect) CellID { - shrunkID := pcell.ShrinkToFit(bound) - - if !s.isFirstUpdate() && shrunkID != pcell.CellID() { - // Don't shrink any smaller than the existing index cells, since we need - // to combine the new edges with those cells. - iter := s.Iterator() - if iter.LocateCellID(shrunkID) == Indexed { - shrunkID = iter.CellID() - } - } - return shrunkID -} - -// skipCellRange skips over the cells in the given range, creating index cells if we are -// currently in the interior of at least one shape. -func (s *ShapeIndex) skipCellRange(begin, end CellID, t *tracker, disjointFromIndex bool) { - // If we aren't in the interior of a shape, then skipping over cells is easy. - if len(t.shapeIDs) == 0 { - return - } - - // Otherwise generate the list of cell ids that we need to visit, and create - // an index entry for each one. - skipped := CellUnionFromRange(begin, end) - for _, cell := range skipped { - var clippedEdges []*clippedEdge - s.updateEdges(PaddedCellFromCellID(cell, cellPadding), clippedEdges, t, disjointFromIndex) - } -} - -// updateEdges adds or removes the given edges whose bounding boxes intersect a -// given cell. disjointFromIndex is an optimization hint indicating that cellMap -// does not contain any entries that overlap the given cell. -func (s *ShapeIndex) updateEdges(pcell *PaddedCell, edges []*clippedEdge, t *tracker, disjointFromIndex bool) { - // This function is recursive with a maximum recursion depth of 30 (maxLevel). - - // Incremental updates are handled as follows. All edges being added or - // removed are combined together in edges, and all shapes with interiors - // are tracked using tracker. We subdivide recursively as usual until we - // encounter an existing index cell. At this point we absorb the index - // cell as follows: - // - // - Edges and shapes that are being removed are deleted from edges and - // tracker. - // - All remaining edges and shapes from the index cell are added to - // edges and tracker. - // - Continue subdividing recursively, creating new index cells as needed. - // - When the recursion gets back to the cell that was absorbed, we - // restore edges and tracker to their previous state. - // - // Note that the only reason that we include removed shapes in the recursive - // subdivision process is so that we can find all of the index cells that - // contain those shapes efficiently, without maintaining an explicit list of - // index cells for each shape (which would be expensive in terms of memory). - indexCellAbsorbed := false - if !disjointFromIndex { - // There may be existing index cells contained inside pcell. If we - // encounter such a cell, we need to combine the edges being updated with - // the existing cell contents by absorbing the cell. - iter := s.Iterator() - r := iter.LocateCellID(pcell.id) - if r == Disjoint { - disjointFromIndex = true - } else if r == Indexed { - // Absorb the index cell by transferring its contents to edges and - // deleting it. We also start tracking the interior of any new shapes. - s.absorbIndexCell(pcell, iter, edges, t) - indexCellAbsorbed = true - disjointFromIndex = true - } else { - // DCHECK_EQ(SUBDIVIDED, r) - } - } - - // If there are existing index cells below us, then we need to keep - // subdividing so that we can merge with those cells. Otherwise, - // makeIndexCell checks if the number of edges is small enough, and creates - // an index cell if possible (returning true when it does so). - if !disjointFromIndex || !s.makeIndexCell(pcell, edges, t) { - // TODO(roberts): If it turns out to have memory problems when there - // are 10M+ edges in the index, look into pre-allocating space so we - // are not always appending. - childEdges := [2][2][]*clippedEdge{} // [i][j] - - // Compute the middle of the padded cell, defined as the rectangle in - // (u,v)-space that belongs to all four (padded) children. By comparing - // against the four boundaries of middle we can determine which children - // each edge needs to be propagated to. - middle := pcell.Middle() - - // Build up a vector edges to be passed to each child cell. The (i,j) - // directions are left (i=0), right (i=1), lower (j=0), and upper (j=1). - // Note that the vast majority of edges are propagated to a single child. - for _, edge := range edges { - if edge.bound.X.Hi <= middle.X.Lo { - // Edge is entirely contained in the two left children. - a, b := s.clipVAxis(edge, middle.Y) - if a != nil { - childEdges[0][0] = append(childEdges[0][0], a) - } - if b != nil { - childEdges[0][1] = append(childEdges[0][1], b) - } - } else if edge.bound.X.Lo >= middle.X.Hi { - // Edge is entirely contained in the two right children. - a, b := s.clipVAxis(edge, middle.Y) - if a != nil { - childEdges[1][0] = append(childEdges[1][0], a) - } - if b != nil { - childEdges[1][1] = append(childEdges[1][1], b) - } - } else if edge.bound.Y.Hi <= middle.Y.Lo { - // Edge is entirely contained in the two lower children. - if a := s.clipUBound(edge, 1, middle.X.Hi); a != nil { - childEdges[0][0] = append(childEdges[0][0], a) - } - if b := s.clipUBound(edge, 0, middle.X.Lo); b != nil { - childEdges[1][0] = append(childEdges[1][0], b) - } - } else if edge.bound.Y.Lo >= middle.Y.Hi { - // Edge is entirely contained in the two upper children. - if a := s.clipUBound(edge, 1, middle.X.Hi); a != nil { - childEdges[0][1] = append(childEdges[0][1], a) - } - if b := s.clipUBound(edge, 0, middle.X.Lo); b != nil { - childEdges[1][1] = append(childEdges[1][1], b) - } - } else { - // The edge bound spans all four children. The edge - // itself intersects either three or four padded children. - left := s.clipUBound(edge, 1, middle.X.Hi) - a, b := s.clipVAxis(left, middle.Y) - if a != nil { - childEdges[0][0] = append(childEdges[0][0], a) - } - if b != nil { - childEdges[0][1] = append(childEdges[0][1], b) - } - right := s.clipUBound(edge, 0, middle.X.Lo) - a, b = s.clipVAxis(right, middle.Y) - if a != nil { - childEdges[1][0] = append(childEdges[1][0], a) - } - if b != nil { - childEdges[1][1] = append(childEdges[1][1], b) - } - } - } - - // Now recursively update the edges in each child. We call the children in - // increasing order of CellID so that when the index is first constructed, - // all insertions into cellMap are at the end (which is much faster). - for pos := 0; pos < 4; pos++ { - i, j := pcell.ChildIJ(pos) - if len(childEdges[i][j]) > 0 || len(t.shapeIDs) > 0 { - s.updateEdges(PaddedCellFromParentIJ(pcell, i, j), childEdges[i][j], - t, disjointFromIndex) - } - } - } - - if indexCellAbsorbed { - // Restore the state for any edges being removed that we are tracking. - t.restoreStateBefore(s.pendingAdditionsPos) - } -} - -// makeIndexCell builds an indexCell from the given padded cell and set of edges and adds -// it to the index. If the cell or edges are empty, no cell is added. -func (s *ShapeIndex) makeIndexCell(p *PaddedCell, edges []*clippedEdge, t *tracker) bool { - // If the cell is empty, no index cell is needed. (In most cases this - // situation is detected before we get to this point, but this can happen - // when all shapes in a cell are removed.) - if len(edges) == 0 && len(t.shapeIDs) == 0 { - return true - } - - // Count the number of edges that have not reached their maximum level yet. - // Return false if there are too many such edges. - count := 0 - for _, ce := range edges { - if p.Level() < ce.faceEdge.maxLevel { - count++ - } - - if count > s.maxEdgesPerCell { - return false - } - } - - // Possible optimization: Continue subdividing as long as exactly one child - // of the padded cell intersects the given edges. This can be done by finding - // the bounding box of all the edges and calling ShrinkToFit: - // - // cellID = p.ShrinkToFit(RectBound(edges)); - // - // Currently this is not beneficial; it slows down construction by 4-25% - // (mainly computing the union of the bounding rectangles) and also slows - // down queries (since more recursive clipping is required to get down to - // the level of a spatial index cell). But it may be worth trying again - // once containsCenter is computed and all algorithms are modified to - // take advantage of it. - - // We update the InteriorTracker as follows. For every Cell in the index - // we construct two edges: one edge from entry vertex of the cell to its - // center, and one from the cell center to its exit vertex. Here entry - // and exit refer the CellID ordering, i.e. the order in which points - // are encountered along the 2 space-filling curve. The exit vertex then - // becomes the entry vertex for the next cell in the index, unless there are - // one or more empty intervening cells, in which case the InteriorTracker - // state is unchanged because the intervening cells have no edges. - - // Shift the InteriorTracker focus point to the center of the current cell. - if t.isActive && len(edges) != 0 { - if !t.atCellID(p.id) { - t.moveTo(p.EntryVertex()) - } - t.drawTo(p.Center()) - s.testAllEdges(edges, t) - } - - // Allocate and fill a new index cell. To get the total number of shapes we - // need to merge the shapes associated with the intersecting edges together - // with the shapes that happen to contain the cell center. - cshapeIDs := t.shapeIDs - numShapes := s.countShapes(edges, cshapeIDs) - cell := NewShapeIndexCell(numShapes) - - // To fill the index cell we merge the two sources of shapes: edge shapes - // (those that have at least one edge that intersects this cell), and - // containing shapes (those that contain the cell center). We keep track - // of the index of the next intersecting edge and the next containing shape - // as we go along. Both sets of shape ids are already sorted. - eNext := 0 - cNextIdx := 0 - for i := 0; i < numShapes; i++ { - var clipped *clippedShape - // advance to next value base + i - eshapeID := int32(s.Len()) - cshapeID := eshapeID // Sentinels - - if eNext != len(edges) { - eshapeID = edges[eNext].faceEdge.shapeID - } - if cNextIdx < len(cshapeIDs) { - cshapeID = cshapeIDs[cNextIdx] - } - eBegin := eNext - if cshapeID < eshapeID { - // The entire cell is in the shape interior. - clipped = newClippedShape(cshapeID, 0) - clipped.containsCenter = true - cNextIdx++ - } else { - // Count the number of edges for this shape and allocate space for them. - for eNext < len(edges) && edges[eNext].faceEdge.shapeID == eshapeID { - eNext++ - } - clipped = newClippedShape(eshapeID, eNext-eBegin) - for e := eBegin; e < eNext; e++ { - clipped.edges[e-eBegin] = edges[e].faceEdge.edgeID - } - if cshapeID == eshapeID { - clipped.containsCenter = true - cNextIdx++ - } - } - cell.shapes[i] = clipped - } - - // Add this cell to the map. - s.cellMap[p.id] = cell - s.cells = append(s.cells, p.id) - - // Shift the tracker focus point to the exit vertex of this cell. - if t.isActive && len(edges) != 0 { - t.drawTo(p.ExitVertex()) - s.testAllEdges(edges, t) - t.setNextCellID(p.id.Next()) - } - return true -} - -// updateBound updates the specified endpoint of the given clipped edge and returns the -// resulting clipped edge. -func (s *ShapeIndex) updateBound(edge *clippedEdge, uEnd int, u float64, vEnd int, v float64) *clippedEdge { - c := &clippedEdge{faceEdge: edge.faceEdge} - if uEnd == 0 { - c.bound.X.Lo = u - c.bound.X.Hi = edge.bound.X.Hi - } else { - c.bound.X.Lo = edge.bound.X.Lo - c.bound.X.Hi = u - } - - if vEnd == 0 { - c.bound.Y.Lo = v - c.bound.Y.Hi = edge.bound.Y.Hi - } else { - c.bound.Y.Lo = edge.bound.Y.Lo - c.bound.Y.Hi = v - } - - return c -} - -// clipUBound clips the given endpoint (lo=0, hi=1) of the u-axis so that -// it does not extend past the given value of the given edge. -func (s *ShapeIndex) clipUBound(edge *clippedEdge, uEnd int, u float64) *clippedEdge { - // First check whether the edge actually requires any clipping. (Sometimes - // this method is called when clipping is not necessary, e.g. when one edge - // endpoint is in the overlap area between two padded child cells.) - if uEnd == 0 { - if edge.bound.X.Lo >= u { - return edge - } - } else { - if edge.bound.X.Hi <= u { - return edge - } - } - // We interpolate the new v-value from the endpoints of the original edge. - // This has two advantages: (1) we don't need to store the clipped endpoints - // at all, just their bounding box; and (2) it avoids the accumulation of - // roundoff errors due to repeated interpolations. The result needs to be - // clamped to ensure that it is in the appropriate range. - e := edge.faceEdge - v := edge.bound.Y.ClampPoint(interpolateFloat64(u, e.a.X, e.b.X, e.a.Y, e.b.Y)) - - // Determine which endpoint of the v-axis bound to update. If the edge - // slope is positive we update the same endpoint, otherwise we update the - // opposite endpoint. - var vEnd int - positiveSlope := (e.a.X > e.b.X) == (e.a.Y > e.b.Y) - if (uEnd == 1) == positiveSlope { - vEnd = 1 - } - return s.updateBound(edge, uEnd, u, vEnd, v) -} - -// clipVBound clips the given endpoint (lo=0, hi=1) of the v-axis so that -// it does not extend past the given value of the given edge. -func (s *ShapeIndex) clipVBound(edge *clippedEdge, vEnd int, v float64) *clippedEdge { - if vEnd == 0 { - if edge.bound.Y.Lo >= v { - return edge - } - } else { - if edge.bound.Y.Hi <= v { - return edge - } - } - - // We interpolate the new v-value from the endpoints of the original edge. - // This has two advantages: (1) we don't need to store the clipped endpoints - // at all, just their bounding box; and (2) it avoids the accumulation of - // roundoff errors due to repeated interpolations. The result needs to be - // clamped to ensure that it is in the appropriate range. - e := edge.faceEdge - u := edge.bound.X.ClampPoint(interpolateFloat64(v, e.a.Y, e.b.Y, e.a.X, e.b.X)) - - // Determine which endpoint of the v-axis bound to update. If the edge - // slope is positive we update the same endpoint, otherwise we update the - // opposite endpoint. - var uEnd int - positiveSlope := (e.a.X > e.b.X) == (e.a.Y > e.b.Y) - if (vEnd == 1) == positiveSlope { - uEnd = 1 - } - return s.updateBound(edge, uEnd, u, vEnd, v) -} - -// cliupVAxis returns the given edge clipped to within the boundaries of the middle -// interval along the v-axis, and adds the result to its children. -func (s *ShapeIndex) clipVAxis(edge *clippedEdge, middle r1.Interval) (a, b *clippedEdge) { - if edge.bound.Y.Hi <= middle.Lo { - // Edge is entirely contained in the lower child. - return edge, nil - } else if edge.bound.Y.Lo >= middle.Hi { - // Edge is entirely contained in the upper child. - return nil, edge - } - // The edge bound spans both children. - return s.clipVBound(edge, 1, middle.Hi), s.clipVBound(edge, 0, middle.Lo) -} - -// absorbIndexCell absorbs an index cell by transferring its contents to edges -// and/or "tracker", and then delete this cell from the index. If edges includes -// any edges that are being removed, this method also updates their -// InteriorTracker state to correspond to the exit vertex of this cell. -func (s *ShapeIndex) absorbIndexCell(p *PaddedCell, iter *ShapeIndexIterator, edges []*clippedEdge, t *tracker) { - // When we absorb a cell, we erase all the edges that are being removed. - // However when we are finished with this cell, we want to restore the state - // of those edges (since that is how we find all the index cells that need - // to be updated). The edges themselves are restored automatically when - // UpdateEdges returns from its recursive call, but the InteriorTracker - // state needs to be restored explicitly. - // - // Here we first update the InteriorTracker state for removed edges to - // correspond to the exit vertex of this cell, and then save the - // InteriorTracker state. This state will be restored by UpdateEdges when - // it is finished processing the contents of this cell. - if t.isActive && len(edges) != 0 && s.isShapeBeingRemoved(edges[0].faceEdge.shapeID) { - // We probably need to update the tracker. ("Probably" because - // it's possible that all shapes being removed do not have interiors.) - if !t.atCellID(p.id) { - t.moveTo(p.EntryVertex()) - } - t.drawTo(p.ExitVertex()) - t.setNextCellID(p.id.Next()) - for _, edge := range edges { - fe := edge.faceEdge - if !s.isShapeBeingRemoved(fe.shapeID) { - break // All shapes being removed come first. - } - if fe.hasInterior { - t.testEdge(fe.shapeID, fe.edge) - } - } - } - - // Save the state of the edges being removed, so that it can be restored - // when we are finished processing this cell and its children. We don't - // need to save the state of the edges being added because they aren't being - // removed from "edges" and will therefore be updated normally as we visit - // this cell and its children. - t.saveAndClearStateBefore(s.pendingAdditionsPos) - - // Create a faceEdge for each edge in this cell that isn't being removed. - var faceEdges []*faceEdge - trackerMoved := false - - cell := iter.IndexCell() - for _, clipped := range cell.shapes { - shapeID := clipped.shapeID - shape := s.Shape(shapeID) - if shape == nil { - continue // This shape is being removed. - } - - numClipped := clipped.numEdges() - - // If this shape has an interior, start tracking whether we are inside the - // shape. updateEdges wants to know whether the entry vertex of this - // cell is inside the shape, but we only know whether the center of the - // cell is inside the shape, so we need to test all the edges against the - // line segment from the cell center to the entry vertex. - edge := &faceEdge{ - shapeID: shapeID, - hasInterior: shape.Dimension() == 2, - } - - if edge.hasInterior { - t.addShape(shapeID, clipped.containsCenter) - // There might not be any edges in this entire cell (i.e., it might be - // in the interior of all shapes), so we delay updating the tracker - // until we see the first edge. - if !trackerMoved && numClipped > 0 { - t.moveTo(p.Center()) - t.drawTo(p.EntryVertex()) - t.setNextCellID(p.id) - trackerMoved = true - } - } - for i := 0; i < numClipped; i++ { - edgeID := clipped.edges[i] - edge.edgeID = edgeID - edge.edge = shape.Edge(edgeID) - edge.maxLevel = maxLevelForEdge(edge.edge) - if edge.hasInterior { - t.testEdge(shapeID, edge.edge) - } - var ok bool - edge.a, edge.b, ok = ClipToPaddedFace(edge.edge.V0, edge.edge.V1, p.id.Face(), cellPadding) - if !ok { - panic("invariant failure in ShapeIndex") - } - faceEdges = append(faceEdges, edge) - } - } - // Now create a clippedEdge for each faceEdge, and put them in "new_edges". - var newEdges []*clippedEdge - for _, faceEdge := range faceEdges { - clipped := &clippedEdge{ - faceEdge: faceEdge, - bound: clippedEdgeBound(faceEdge.a, faceEdge.b, p.bound), - } - newEdges = append(newEdges, clipped) - } - - // Discard any edges from "edges" that are being removed, and append the - // remainder to "newEdges" (This keeps the edges sorted by shape id.) - for i, clipped := range edges { - if !s.isShapeBeingRemoved(clipped.faceEdge.shapeID) { - newEdges = append(newEdges, edges[i:]...) - break - } - } - - // Update the edge list and delete this cell from the index. - edges, newEdges = newEdges, edges - delete(s.cellMap, p.id) - // TODO(roberts): delete from s.Cells -} - -// testAllEdges calls the trackers testEdge on all edges from shapes that have interiors. -func (s *ShapeIndex) testAllEdges(edges []*clippedEdge, t *tracker) { - for _, edge := range edges { - if edge.faceEdge.hasInterior { - t.testEdge(edge.faceEdge.shapeID, edge.faceEdge.edge) - } - } -} - -// countShapes reports the number of distinct shapes that are either associated with the -// given edges, or that are currently stored in the InteriorTracker. -func (s *ShapeIndex) countShapes(edges []*clippedEdge, shapeIDs []int32) int { - count := 0 - lastShapeID := int32(-1) - - // next clipped shape id in the shapeIDs list. - clippedNext := int32(0) - // index of the current element in the shapeIDs list. - shapeIDidx := 0 - for _, edge := range edges { - if edge.faceEdge.shapeID == lastShapeID { - continue - } - - count++ - lastShapeID = edge.faceEdge.shapeID - - // Skip over any containing shapes up to and including this one, - // updating count as appropriate. - for ; shapeIDidx < len(shapeIDs); shapeIDidx++ { - clippedNext = shapeIDs[shapeIDidx] - if clippedNext > lastShapeID { - break - } - if clippedNext < lastShapeID { - count++ - } - } - } - - // Count any remaining containing shapes. - count += len(shapeIDs) - shapeIDidx - return count -} - -// maxLevelForEdge reports the maximum level for a given edge. -func maxLevelForEdge(edge Edge) int { - // Compute the maximum cell size for which this edge is considered long. - // The calculation does not need to be perfectly accurate, so we use Norm - // rather than Angle for speed. - cellSize := edge.V0.Sub(edge.V1.Vector).Norm() * cellSizeToLongEdgeRatio - // Now return the first level encountered during subdivision where the - // average cell size is at most cellSize. - return AvgEdgeMetric.MinLevel(cellSize) -} - -// removeShapeInternal does the actual work for removing a given shape from the index. -func (s *ShapeIndex) removeShapeInternal(removed *removedShape, allEdges [][]faceEdge, t *tracker) { - // TODO(roberts): finish the implementation of this. -} diff --git a/vendor/github.com/golang/geo/s2/shapeutil.go b/vendor/github.com/golang/geo/s2/shapeutil.go deleted file mode 100644 index 64245dfa1..000000000 --- a/vendor/github.com/golang/geo/s2/shapeutil.go +++ /dev/null @@ -1,228 +0,0 @@ -// Copyright 2017 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package s2 - -// CrossingType defines different ways of reporting edge intersections. -type CrossingType int - -const ( - // CrossingTypeInterior reports intersections that occur at a point - // interior to both edges (i.e., not at a vertex). - CrossingTypeInterior CrossingType = iota - - // CrossingTypeAll reports all intersections, even those where two edges - // intersect only because they share a common vertex. - CrossingTypeAll - - // CrossingTypeNonAdjacent reports all intersections except for pairs of - // the form (AB, BC) where both edges are from the same ShapeIndex. - CrossingTypeNonAdjacent -) - -// rangeIterator is a wrapper over ShapeIndexIterator with extra methods -// that are useful for merging the contents of two or more ShapeIndexes. -type rangeIterator struct { - it *ShapeIndexIterator - // The min and max leaf cell ids covered by the current cell. If done() is - // true, these methods return a value larger than any valid cell id. - rangeMin CellID - rangeMax CellID -} - -// newRangeIterator creates a new rangeIterator positioned at the first cell of the given index. -func newRangeIterator(index *ShapeIndex) *rangeIterator { - r := &rangeIterator{ - it: index.Iterator(), - } - r.refresh() - return r -} - -func (r *rangeIterator) cellID() CellID { return r.it.CellID() } -func (r *rangeIterator) indexCell() *ShapeIndexCell { return r.it.IndexCell() } -func (r *rangeIterator) next() { r.it.Next(); r.refresh() } -func (r *rangeIterator) done() bool { return r.it.Done() } - -// seekTo positions the iterator at the first cell that overlaps or follows -// the current range minimum of the target iterator, i.e. such that its -// rangeMax >= target.rangeMin. -func (r *rangeIterator) seekTo(target *rangeIterator) { - r.it.seek(target.rangeMin) - // If the current cell does not overlap target, it is possible that the - // previous cell is the one we are looking for. This can only happen when - // the previous cell contains target but has a smaller CellID. - if r.it.Done() || r.it.CellID().RangeMin() > target.rangeMax { - if r.it.Prev() && r.it.CellID().RangeMax() < target.cellID() { - r.it.Next() - } - } - r.refresh() -} - -// seekBeyond positions the iterator at the first cell that follows the current -// range minimum of the target iterator. i.e. the first cell such that its -// rangeMin > target.rangeMax. -func (r *rangeIterator) seekBeyond(target *rangeIterator) { - r.it.seek(target.rangeMax.Next()) - if !r.it.Done() && r.it.CellID().RangeMin() <= target.rangeMax { - r.it.Next() - } - r.refresh() -} - -// refresh updates the iterators min and max values. -func (r *rangeIterator) refresh() { - r.rangeMin = r.cellID().RangeMin() - r.rangeMax = r.cellID().RangeMax() -} - -// referencePointForShape is a helper function for implementing various Shapes -// ReferencePoint functions. -// -// Given a shape consisting of closed polygonal loops, the interior of the -// shape is defined as the region to the left of all edges (which must be -// oriented consistently). This function then chooses an arbitrary point and -// returns true if that point is contained by the shape. -// -// Unlike Loop and Polygon, this method allows duplicate vertices and -// edges, which requires some extra care with definitions. The rule that we -// apply is that an edge and its reverse edge cancel each other: the result -// is the same as if that edge pair were not present. Therefore shapes that -// consist only of degenerate loop(s) are either empty or full; by convention, -// the shape is considered full if and only if it contains an empty loop (see -// laxPolygon for details). -// -// Determining whether a loop on the sphere contains a point is harder than -// the corresponding problem in 2D plane geometry. It cannot be implemented -// just by counting edge crossings because there is no such thing as a point -// at infinity that is guaranteed to be outside the loop. -// -// This function requires that the given Shape have an interior. -func referencePointForShape(shape Shape) ReferencePoint { - if shape.NumEdges() == 0 { - // A shape with no edges is defined to be full if and only if it - // contains at least one chain. - return OriginReferencePoint(shape.NumChains() > 0) - } - // Define a "matched" edge as one that can be paired with a corresponding - // reversed edge. Define a vertex as "balanced" if all of its edges are - // matched. In order to determine containment, we must find an unbalanced - // vertex. Often every vertex is unbalanced, so we start by trying an - // arbitrary vertex. - edge := shape.Edge(0) - - if ref, ok := referencePointAtVertex(shape, edge.V0); ok { - return ref - } - - // That didn't work, so now we do some extra work to find an unbalanced - // vertex (if any). Essentially we gather a list of edges and a list of - // reversed edges, and then sort them. The first edge that appears in one - // list but not the other is guaranteed to be unmatched. - n := shape.NumEdges() - var edges = make([]Edge, n) - var revEdges = make([]Edge, n) - for i := 0; i < n; i++ { - edge := shape.Edge(i) - edges[i] = edge - revEdges[i] = Edge{V0: edge.V1, V1: edge.V0} - } - - sortEdges(edges) - sortEdges(revEdges) - - for i := 0; i < n; i++ { - if edges[i].Cmp(revEdges[i]) == -1 { // edges[i] is unmatched - if ref, ok := referencePointAtVertex(shape, edges[i].V0); ok { - return ref - } - } - if revEdges[i].Cmp(edges[i]) == -1 { // revEdges[i] is unmatched - if ref, ok := referencePointAtVertex(shape, revEdges[i].V0); ok { - return ref - } - } - } - - // All vertices are balanced, so this polygon is either empty or full except - // for degeneracies. By convention it is defined to be full if it contains - // any chain with no edges. - for i := 0; i < shape.NumChains(); i++ { - if shape.Chain(i).Length == 0 { - return OriginReferencePoint(true) - } - } - - return OriginReferencePoint(false) -} - -// referencePointAtVertex reports whether the given vertex is unbalanced, and -// returns a ReferencePoint indicating if the point is contained. -// Otherwise returns false. -func referencePointAtVertex(shape Shape, vTest Point) (ReferencePoint, bool) { - var ref ReferencePoint - - // Let P be an unbalanced vertex. Vertex P is defined to be inside the - // region if the region contains a particular direction vector starting from - // P, namely the direction p.Ortho(). This can be calculated using - // ContainsVertexQuery. - - containsQuery := NewContainsVertexQuery(vTest) - n := shape.NumEdges() - for e := 0; e < n; e++ { - edge := shape.Edge(e) - if edge.V0 == vTest { - containsQuery.AddEdge(edge.V1, 1) - } - if edge.V1 == vTest { - containsQuery.AddEdge(edge.V0, -1) - } - } - containsSign := containsQuery.ContainsVertex() - if containsSign == 0 { - return ref, false // There are no unmatched edges incident to this vertex. - } - ref.Point = vTest - ref.Contained = containsSign > 0 - - return ref, true -} - -// containsBruteForce reports whether the given shape contains the given point. -// Most clients should not use this method, since its running time is linear in -// the number of shape edges. Instead clients should create a ShapeIndex and use -// ContainsPointQuery, since this strategy is much more efficient when many -// points need to be tested. -// -// Polygon boundaries are treated as being semi-open (see ContainsPointQuery -// and VertexModel for other options). -func containsBruteForce(shape Shape, point Point) bool { - if shape.Dimension() != 2 { - return false - } - - refPoint := shape.ReferencePoint() - if refPoint.Point == point { - return refPoint.Contained - } - - crosser := NewEdgeCrosser(refPoint.Point, point) - inside := refPoint.Contained - for e := 0; e < shape.NumEdges(); e++ { - edge := shape.Edge(e) - inside = inside != crosser.EdgeOrVertexCrossing(edge.V0, edge.V1) - } - return inside -} diff --git a/vendor/github.com/golang/geo/s2/shapeutil_edge_iterator.go b/vendor/github.com/golang/geo/s2/shapeutil_edge_iterator.go deleted file mode 100644 index 2a0d82361..000000000 --- a/vendor/github.com/golang/geo/s2/shapeutil_edge_iterator.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2020 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package s2 - -// EdgeIterator is an iterator that advances through all edges in an ShapeIndex. -// This is different to the ShapeIndexIterator, which advances through the cells in the -// ShapeIndex. -type EdgeIterator struct { - index *ShapeIndex - shapeID int32 - numEdges int32 - edgeID int32 -} - -// NewEdgeIterator creates a new edge iterator for the given index. -func NewEdgeIterator(index *ShapeIndex) *EdgeIterator { - e := &EdgeIterator{ - index: index, - shapeID: -1, - edgeID: -1, - } - - e.Next() - return e -} - -// ShapeID returns the current shape ID. -func (e *EdgeIterator) ShapeID() int32 { return e.shapeID } - -// EdgeID returns the current edge ID. -func (e *EdgeIterator) EdgeID() int32 { return e.edgeID } - -// ShapeEdgeID returns the current (shapeID, edgeID). -func (e *EdgeIterator) ShapeEdgeID() ShapeEdgeID { return ShapeEdgeID{e.shapeID, e.edgeID} } - -// Edge returns the current edge. -func (e *EdgeIterator) Edge() Edge { - return e.index.Shape(e.shapeID).Edge(int(e.edgeID)) -} - -// Done reports if the iterator is positioned at or after the last index edge. -func (e *EdgeIterator) Done() bool { return e.shapeID >= int32(len(e.index.shapes)) } - -// Next positions the iterator at the next index edge. -func (e *EdgeIterator) Next() { - e.edgeID++ - for ; e.edgeID >= e.numEdges; e.edgeID++ { - e.shapeID++ - if e.shapeID >= int32(len(e.index.shapes)) { - break - } - shape := e.index.Shape(e.shapeID) - if shape == nil { - e.numEdges = 0 - } else { - e.numEdges = int32(shape.NumEdges()) - } - e.edgeID = -1 - } -} diff --git a/vendor/github.com/golang/geo/s2/stuv.go b/vendor/github.com/golang/geo/s2/stuv.go deleted file mode 100644 index 7663bb398..000000000 --- a/vendor/github.com/golang/geo/s2/stuv.go +++ /dev/null @@ -1,427 +0,0 @@ -// Copyright 2014 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package s2 - -import ( - "math" - - "github.com/golang/geo/r3" -) - -// -// This file contains documentation of the various coordinate systems used -// throughout the library. Most importantly, S2 defines a framework for -// decomposing the unit sphere into a hierarchy of "cells". Each cell is a -// quadrilateral bounded by four geodesics. The top level of the hierarchy is -// obtained by projecting the six faces of a cube onto the unit sphere, and -// lower levels are obtained by subdividing each cell into four children -// recursively. Cells are numbered such that sequentially increasing cells -// follow a continuous space-filling curve over the entire sphere. The -// transformation is designed to make the cells at each level fairly uniform -// in size. -// -////////////////////////// S2 Cell Decomposition ///////////////////////// -// -// The following methods define the cube-to-sphere projection used by -// the Cell decomposition. -// -// In the process of converting a latitude-longitude pair to a 64-bit cell -// id, the following coordinate systems are used: -// -// (id) -// An CellID is a 64-bit encoding of a face and a Hilbert curve position -// on that face. The Hilbert curve position implicitly encodes both the -// position of a cell and its subdivision level (see s2cellid.go). -// -// (face, i, j) -// Leaf-cell coordinates. "i" and "j" are integers in the range -// [0,(2**30)-1] that identify a particular leaf cell on the given face. -// The (i, j) coordinate system is right-handed on each face, and the -// faces are oriented such that Hilbert curves connect continuously from -// one face to the next. -// -// (face, s, t) -// Cell-space coordinates. "s" and "t" are real numbers in the range -// [0,1] that identify a point on the given face. For example, the point -// (s, t) = (0.5, 0.5) corresponds to the center of the top-level face -// cell. This point is also a vertex of exactly four cells at each -// subdivision level greater than zero. -// -// (face, si, ti) -// Discrete cell-space coordinates. These are obtained by multiplying -// "s" and "t" by 2**31 and rounding to the nearest unsigned integer. -// Discrete coordinates lie in the range [0,2**31]. This coordinate -// system can represent the edge and center positions of all cells with -// no loss of precision (including non-leaf cells). In binary, each -// coordinate of a level-k cell center ends with a 1 followed by -// (30 - k) 0s. The coordinates of its edges end with (at least) -// (31 - k) 0s. -// -// (face, u, v) -// Cube-space coordinates in the range [-1,1]. To make the cells at each -// level more uniform in size after they are projected onto the sphere, -// we apply a nonlinear transformation of the form u=f(s), v=f(t). -// The (u, v) coordinates after this transformation give the actual -// coordinates on the cube face (modulo some 90 degree rotations) before -// it is projected onto the unit sphere. -// -// (face, u, v, w) -// Per-face coordinate frame. This is an extension of the (face, u, v) -// cube-space coordinates that adds a third axis "w" in the direction of -// the face normal. It is always a right-handed 3D coordinate system. -// Cube-space coordinates can be converted to this frame by setting w=1, -// while (u,v,w) coordinates can be projected onto the cube face by -// dividing by w, i.e. (face, u/w, v/w). -// -// (x, y, z) -// Direction vector (Point). Direction vectors are not necessarily unit -// length, and are often chosen to be points on the biunit cube -// [-1,+1]x[-1,+1]x[-1,+1]. They can be be normalized to obtain the -// corresponding point on the unit sphere. -// -// (lat, lng) -// Latitude and longitude (LatLng). Latitudes must be between -90 and -// 90 degrees inclusive, and longitudes must be between -180 and 180 -// degrees inclusive. -// -// Note that the (i, j), (s, t), (si, ti), and (u, v) coordinate systems are -// right-handed on all six faces. -// -// -// There are a number of different projections from cell-space (s,t) to -// cube-space (u,v): linear, quadratic, and tangent. They have the following -// tradeoffs: -// -// Linear - This is the fastest transformation, but also produces the least -// uniform cell sizes. Cell areas vary by a factor of about 5.2, with the -// largest cells at the center of each face and the smallest cells in -// the corners. -// -// Tangent - Transforming the coordinates via Atan makes the cell sizes -// more uniform. The areas vary by a maximum ratio of 1.4 as opposed to a -// maximum ratio of 5.2. However, each call to Atan is about as expensive -// as all of the other calculations combined when converting from points to -// cell ids, i.e. it reduces performance by a factor of 3. -// -// Quadratic - This is an approximation of the tangent projection that -// is much faster and produces cells that are almost as uniform in size. -// It is about 3 times faster than the tangent projection for converting -// cell ids to points or vice versa. Cell areas vary by a maximum ratio of -// about 2.1. -// -// Here is a table comparing the cell uniformity using each projection. Area -// Ratio is the maximum ratio over all subdivision levels of the largest cell -// area to the smallest cell area at that level, Edge Ratio is the maximum -// ratio of the longest edge of any cell to the shortest edge of any cell at -// the same level, and Diag Ratio is the ratio of the longest diagonal of -// any cell to the shortest diagonal of any cell at the same level. -// -// Area Edge Diag -// Ratio Ratio Ratio -// ----------------------------------- -// Linear: 5.200 2.117 2.959 -// Tangent: 1.414 1.414 1.704 -// Quadratic: 2.082 1.802 1.932 -// -// The worst-case cell aspect ratios are about the same with all three -// projections. The maximum ratio of the longest edge to the shortest edge -// within the same cell is about 1.4 and the maximum ratio of the diagonals -// within the same cell is about 1.7. -// -// For Go we have chosen to use only the Quadratic approach. Other language -// implementations may offer other choices. - -const ( - // maxSiTi is the maximum value of an si- or ti-coordinate. - // It is one shift more than maxSize. The range of valid (si,ti) - // values is [0..maxSiTi]. - maxSiTi = maxSize << 1 -) - -// siTiToST converts an si- or ti-value to the corresponding s- or t-value. -// Value is capped at 1.0 because there is no DCHECK in Go. -func siTiToST(si uint32) float64 { - if si > maxSiTi { - return 1.0 - } - return float64(si) / float64(maxSiTi) -} - -// stToSiTi converts the s- or t-value to the nearest si- or ti-coordinate. -// The result may be outside the range of valid (si,ti)-values. Value of -// 0.49999999999999994 (math.NextAfter(0.5, -1)), will be incorrectly rounded up. -func stToSiTi(s float64) uint32 { - if s < 0 { - return uint32(s*maxSiTi - 0.5) - } - return uint32(s*maxSiTi + 0.5) -} - -// stToUV converts an s or t value to the corresponding u or v value. -// This is a non-linear transformation from [-1,1] to [-1,1] that -// attempts to make the cell sizes more uniform. -// This uses what the C++ version calls 'the quadratic transform'. -func stToUV(s float64) float64 { - if s >= 0.5 { - return (1 / 3.) * (4*s*s - 1) - } - return (1 / 3.) * (1 - 4*(1-s)*(1-s)) -} - -// uvToST is the inverse of the stToUV transformation. Note that it -// is not always true that uvToST(stToUV(x)) == x due to numerical -// errors. -func uvToST(u float64) float64 { - if u >= 0 { - return 0.5 * math.Sqrt(1+3*u) - } - return 1 - 0.5*math.Sqrt(1-3*u) -} - -// face returns face ID from 0 to 5 containing the r. For points on the -// boundary between faces, the result is arbitrary but deterministic. -func face(r r3.Vector) int { - f := r.LargestComponent() - switch { - case f == r3.XAxis && r.X < 0: - f += 3 - case f == r3.YAxis && r.Y < 0: - f += 3 - case f == r3.ZAxis && r.Z < 0: - f += 3 - } - return int(f) -} - -// validFaceXYZToUV given a valid face for the given point r (meaning that -// dot product of r with the face normal is positive), returns -// the corresponding u and v values, which may lie outside the range [-1,1]. -func validFaceXYZToUV(face int, r r3.Vector) (float64, float64) { - switch face { - case 0: - return r.Y / r.X, r.Z / r.X - case 1: - return -r.X / r.Y, r.Z / r.Y - case 2: - return -r.X / r.Z, -r.Y / r.Z - case 3: - return r.Z / r.X, r.Y / r.X - case 4: - return r.Z / r.Y, -r.X / r.Y - } - return -r.Y / r.Z, -r.X / r.Z -} - -// xyzToFaceUV converts a direction vector (not necessarily unit length) to -// (face, u, v) coordinates. -func xyzToFaceUV(r r3.Vector) (f int, u, v float64) { - f = face(r) - u, v = validFaceXYZToUV(f, r) - return f, u, v -} - -// faceUVToXYZ turns face and UV coordinates into an unnormalized 3 vector. -func faceUVToXYZ(face int, u, v float64) r3.Vector { - switch face { - case 0: - return r3.Vector{1, u, v} - case 1: - return r3.Vector{-u, 1, v} - case 2: - return r3.Vector{-u, -v, 1} - case 3: - return r3.Vector{-1, -v, -u} - case 4: - return r3.Vector{v, -1, -u} - default: - return r3.Vector{v, u, -1} - } -} - -// faceXYZToUV returns the u and v values (which may lie outside the range -// [-1, 1]) if the dot product of the point p with the given face normal is positive. -func faceXYZToUV(face int, p Point) (u, v float64, ok bool) { - switch face { - case 0: - if p.X <= 0 { - return 0, 0, false - } - case 1: - if p.Y <= 0 { - return 0, 0, false - } - case 2: - if p.Z <= 0 { - return 0, 0, false - } - case 3: - if p.X >= 0 { - return 0, 0, false - } - case 4: - if p.Y >= 0 { - return 0, 0, false - } - default: - if p.Z >= 0 { - return 0, 0, false - } - } - - u, v = validFaceXYZToUV(face, p.Vector) - return u, v, true -} - -// faceXYZtoUVW transforms the given point P to the (u,v,w) coordinate frame of the given -// face where the w-axis represents the face normal. -func faceXYZtoUVW(face int, p Point) Point { - // The result coordinates are simply the dot products of P with the (u,v,w) - // axes for the given face (see faceUVWAxes). - switch face { - case 0: - return Point{r3.Vector{p.Y, p.Z, p.X}} - case 1: - return Point{r3.Vector{-p.X, p.Z, p.Y}} - case 2: - return Point{r3.Vector{-p.X, -p.Y, p.Z}} - case 3: - return Point{r3.Vector{-p.Z, -p.Y, -p.X}} - case 4: - return Point{r3.Vector{-p.Z, p.X, -p.Y}} - default: - return Point{r3.Vector{p.Y, p.X, -p.Z}} - } -} - -// faceSiTiToXYZ transforms the (si, ti) coordinates to a (not necessarily -// unit length) Point on the given face. -func faceSiTiToXYZ(face int, si, ti uint32) Point { - return Point{faceUVToXYZ(face, stToUV(siTiToST(si)), stToUV(siTiToST(ti)))} -} - -// xyzToFaceSiTi transforms the (not necessarily unit length) Point to -// (face, si, ti) coordinates and the level the Point is at. -func xyzToFaceSiTi(p Point) (face int, si, ti uint32, level int) { - face, u, v := xyzToFaceUV(p.Vector) - si = stToSiTi(uvToST(u)) - ti = stToSiTi(uvToST(v)) - - // If the levels corresponding to si,ti are not equal, then p is not a cell - // center. The si,ti values of 0 and maxSiTi need to be handled specially - // because they do not correspond to cell centers at any valid level; they - // are mapped to level -1 by the code at the end. - level = maxLevel - findLSBSetNonZero64(uint64(si|maxSiTi)) - if level < 0 || level != maxLevel-findLSBSetNonZero64(uint64(ti|maxSiTi)) { - return face, si, ti, -1 - } - - // In infinite precision, this test could be changed to ST == SiTi. However, - // due to rounding errors, uvToST(xyzToFaceUV(faceUVToXYZ(stToUV(...)))) is - // not idempotent. On the other hand, the center is computed exactly the same - // way p was originally computed (if it is indeed the center of a Cell); - // the comparison can be exact. - if p.Vector == faceSiTiToXYZ(face, si, ti).Normalize() { - return face, si, ti, level - } - - return face, si, ti, -1 -} - -// uNorm returns the right-handed normal (not necessarily unit length) for an -// edge in the direction of the positive v-axis at the given u-value on -// the given face. (This vector is perpendicular to the plane through -// the sphere origin that contains the given edge.) -func uNorm(face int, u float64) r3.Vector { - switch face { - case 0: - return r3.Vector{u, -1, 0} - case 1: - return r3.Vector{1, u, 0} - case 2: - return r3.Vector{1, 0, u} - case 3: - return r3.Vector{-u, 0, 1} - case 4: - return r3.Vector{0, -u, 1} - default: - return r3.Vector{0, -1, -u} - } -} - -// vNorm returns the right-handed normal (not necessarily unit length) for an -// edge in the direction of the positive u-axis at the given v-value on -// the given face. -func vNorm(face int, v float64) r3.Vector { - switch face { - case 0: - return r3.Vector{-v, 0, 1} - case 1: - return r3.Vector{0, -v, 1} - case 2: - return r3.Vector{0, -1, -v} - case 3: - return r3.Vector{v, -1, 0} - case 4: - return r3.Vector{1, v, 0} - default: - return r3.Vector{1, 0, v} - } -} - -// faceUVWAxes are the U, V, and W axes for each face. -var faceUVWAxes = [6][3]Point{ - {Point{r3.Vector{0, 1, 0}}, Point{r3.Vector{0, 0, 1}}, Point{r3.Vector{1, 0, 0}}}, - {Point{r3.Vector{-1, 0, 0}}, Point{r3.Vector{0, 0, 1}}, Point{r3.Vector{0, 1, 0}}}, - {Point{r3.Vector{-1, 0, 0}}, Point{r3.Vector{0, -1, 0}}, Point{r3.Vector{0, 0, 1}}}, - {Point{r3.Vector{0, 0, -1}}, Point{r3.Vector{0, -1, 0}}, Point{r3.Vector{-1, 0, 0}}}, - {Point{r3.Vector{0, 0, -1}}, Point{r3.Vector{1, 0, 0}}, Point{r3.Vector{0, -1, 0}}}, - {Point{r3.Vector{0, 1, 0}}, Point{r3.Vector{1, 0, 0}}, Point{r3.Vector{0, 0, -1}}}, -} - -// faceUVWFaces are the precomputed neighbors of each face. -var faceUVWFaces = [6][3][2]int{ - {{4, 1}, {5, 2}, {3, 0}}, - {{0, 3}, {5, 2}, {4, 1}}, - {{0, 3}, {1, 4}, {5, 2}}, - {{2, 5}, {1, 4}, {0, 3}}, - {{2, 5}, {3, 0}, {1, 4}}, - {{4, 1}, {3, 0}, {2, 5}}, -} - -// uvwAxis returns the given axis of the given face. -func uvwAxis(face, axis int) Point { - return faceUVWAxes[face][axis] -} - -// uvwFaces returns the face in the (u,v,w) coordinate system on the given axis -// in the given direction. -func uvwFace(face, axis, direction int) int { - return faceUVWFaces[face][axis][direction] -} - -// uAxis returns the u-axis for the given face. -func uAxis(face int) Point { - return uvwAxis(face, 0) -} - -// vAxis returns the v-axis for the given face. -func vAxis(face int) Point { - return uvwAxis(face, 1) -} - -// Return the unit-length normal for the given face. -func unitNorm(face int) Point { - return uvwAxis(face, 2) -} diff --git a/vendor/github.com/golang/geo/s2/util.go b/vendor/github.com/golang/geo/s2/util.go deleted file mode 100644 index 7cab746d8..000000000 --- a/vendor/github.com/golang/geo/s2/util.go +++ /dev/null @@ -1,125 +0,0 @@ -// Copyright 2017 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package s2 - -import "github.com/golang/geo/s1" - -// roundAngle returns the value rounded to nearest as an int32. -// This does not match C++ exactly for the case of x.5. -func roundAngle(val s1.Angle) int32 { - if val < 0 { - return int32(val - 0.5) - } - return int32(val + 0.5) -} - -// minAngle returns the smallest of the given values. -func minAngle(x s1.Angle, others ...s1.Angle) s1.Angle { - min := x - for _, y := range others { - if y < min { - min = y - } - } - return min -} - -// maxAngle returns the largest of the given values. -func maxAngle(x s1.Angle, others ...s1.Angle) s1.Angle { - max := x - for _, y := range others { - if y > max { - max = y - } - } - return max -} - -// minChordAngle returns the smallest of the given values. -func minChordAngle(x s1.ChordAngle, others ...s1.ChordAngle) s1.ChordAngle { - min := x - for _, y := range others { - if y < min { - min = y - } - } - return min -} - -// maxChordAngle returns the largest of the given values. -func maxChordAngle(x s1.ChordAngle, others ...s1.ChordAngle) s1.ChordAngle { - max := x - for _, y := range others { - if y > max { - max = y - } - } - return max -} - -// minFloat64 returns the smallest of the given values. -func minFloat64(x float64, others ...float64) float64 { - min := x - for _, y := range others { - if y < min { - min = y - } - } - return min -} - -// maxFloat64 returns the largest of the given values. -func maxFloat64(x float64, others ...float64) float64 { - max := x - for _, y := range others { - if y > max { - max = y - } - } - return max -} - -// minInt returns the smallest of the given values. -func minInt(x int, others ...int) int { - min := x - for _, y := range others { - if y < min { - min = y - } - } - return min -} - -// maxInt returns the largest of the given values. -func maxInt(x int, others ...int) int { - max := x - for _, y := range others { - if y > max { - max = y - } - } - return max -} - -// clampInt returns the number closest to x within the range min..max. -func clampInt(x, min, max int) int { - if x < min { - return min - } - if x > max { - return max - } - return x -} diff --git a/vendor/github.com/golang/geo/s2/wedge_relations.go b/vendor/github.com/golang/geo/s2/wedge_relations.go deleted file mode 100644 index d637bb68c..000000000 --- a/vendor/github.com/golang/geo/s2/wedge_relations.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2017 Google Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package s2 - -// WedgeRel enumerates the possible relation between two wedges A and B. -type WedgeRel int - -// Define the different possible relationships between two wedges. -// -// Given an edge chain (x0, x1, x2), the wedge at x1 is the region to the -// left of the edges. More precisely, it is the set of all rays from x1x0 -// (inclusive) to x1x2 (exclusive) in the *clockwise* direction. -const ( - WedgeEquals WedgeRel = iota // A and B are equal. - WedgeProperlyContains // A is a strict superset of B. - WedgeIsProperlyContained // A is a strict subset of B. - WedgeProperlyOverlaps // A-B, B-A, and A intersect B are non-empty. - WedgeIsDisjoint // A and B are disjoint. -) - -// WedgeRelation reports the relation between two non-empty wedges -// A=(a0, ab1, a2) and B=(b0, ab1, b2). -func WedgeRelation(a0, ab1, a2, b0, b2 Point) WedgeRel { - // There are 6 possible edge orderings at a shared vertex (all - // of these orderings are circular, i.e. abcd == bcda): - // - // (1) a2 b2 b0 a0: A contains B - // (2) a2 a0 b0 b2: B contains A - // (3) a2 a0 b2 b0: A and B are disjoint - // (4) a2 b0 a0 b2: A and B intersect in one wedge - // (5) a2 b2 a0 b0: A and B intersect in one wedge - // (6) a2 b0 b2 a0: A and B intersect in two wedges - // - // We do not distinguish between 4, 5, and 6. - // We pay extra attention when some of the edges overlap. When edges - // overlap, several of these orderings can be satisfied, and we take - // the most specific. - if a0 == b0 && a2 == b2 { - return WedgeEquals - } - - // Cases 1, 2, 5, and 6 - if OrderedCCW(a0, a2, b2, ab1) { - // The cases with this vertex ordering are 1, 5, and 6, - if OrderedCCW(b2, b0, a0, ab1) { - return WedgeProperlyContains - } - - // We are in case 5 or 6, or case 2 if a2 == b2. - if a2 == b2 { - return WedgeIsProperlyContained - } - return WedgeProperlyOverlaps - - } - // We are in case 2, 3, or 4. - if OrderedCCW(a0, b0, b2, ab1) { - return WedgeIsProperlyContained - } - - if OrderedCCW(a0, b0, a2, ab1) { - return WedgeIsDisjoint - } - return WedgeProperlyOverlaps -} - -// WedgeContains reports whether non-empty wedge A=(a0, ab1, a2) contains B=(b0, ab1, b2). -// Equivalent to WedgeRelation == WedgeProperlyContains || WedgeEquals. -func WedgeContains(a0, ab1, a2, b0, b2 Point) bool { - // For A to contain B (where each loop interior is defined to be its left - // side), the CCW edge order around ab1 must be a2 b2 b0 a0. We split - // this test into two parts that test three vertices each. - return OrderedCCW(a2, b2, b0, ab1) && OrderedCCW(b0, a0, a2, ab1) -} - -// WedgeIntersects reports whether non-empty wedge A=(a0, ab1, a2) intersects B=(b0, ab1, b2). -// Equivalent but faster than WedgeRelation != WedgeIsDisjoint -func WedgeIntersects(a0, ab1, a2, b0, b2 Point) bool { - // For A not to intersect B (where each loop interior is defined to be - // its left side), the CCW edge order around ab1 must be a0 b2 b0 a2. - // Note that it's important to write these conditions as negatives - // (!OrderedCCW(a,b,c,o) rather than Ordered(c,b,a,o)) to get correct - // results when two vertices are the same. - return !(OrderedCCW(a0, b2, b0, ab1) && OrderedCCW(b0, a2, a0, ab1)) -} diff --git a/vendor/github.com/h2non/filetype/.editorconfig b/vendor/github.com/h2non/filetype/.editorconfig deleted file mode 100644 index 000dc0a7a..000000000 --- a/vendor/github.com/h2non/filetype/.editorconfig +++ /dev/null @@ -1,12 +0,0 @@ -root = true - -[*] -indent_style = tabs -indent_size = 2 -end_of_line = lf -charset = utf-8 -trim_trailing_whitespace = true -insert_final_newline = true - -[*.md] -trim_trailing_whitespace = false diff --git a/vendor/github.com/h2non/filetype/.gitignore b/vendor/github.com/h2non/filetype/.gitignore deleted file mode 100644 index 6fefe6cce..000000000 --- a/vendor/github.com/h2non/filetype/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -bin -.DS_Store diff --git a/vendor/github.com/h2non/filetype/.travis.yml b/vendor/github.com/h2non/filetype/.travis.yml deleted file mode 100644 index c9cdbc8da..000000000 --- a/vendor/github.com/h2non/filetype/.travis.yml +++ /dev/null @@ -1,16 +0,0 @@ -language: go -arch: - - AMD64 - - ppc64le -go: - - "1.13" - - "1.14" - -before_install: - - go get -u -v golang.org/x/lint/golint - -script: - - diff -u <(echo -n) <(gofmt -s -d ./) - - diff -u <(echo -n) <(go vet ./...) - - diff -u <(echo -n) <(golint) - - go test -v -race ./... diff --git a/vendor/github.com/h2non/filetype/History.md b/vendor/github.com/h2non/filetype/History.md deleted file mode 100644 index f53f3d241..000000000 --- a/vendor/github.com/h2non/filetype/History.md +++ /dev/null @@ -1,163 +0,0 @@ - -v1.0.3 / 2021-11-21 -=================== - - * fix(#108): add application file matchers - * Merge pull request #106 from hannesbraun/aiff-support - * Add AIFF support - * fix(archive): format issue indentation - * feat(version): bump patch - * Merge pull request #100 from da2018/master - * Enhance Zstd support - * Merge pull request #98 from cfergeau/zstd - * Add zstd support - * Merge pull request #99 from cfergeau/byteprefixmatcher - * Introduce bytePrefixMatcher helper - -v1.1.0 / 2020-06-06 -=================== - - * feat: version bump v1.10 - * feat(ci): add go 1.14 - * Merge pull request #82 from andrewstucki/sqlite-update - * Merge pull request #84 from evanoberholster/master - * Better differentiation: between image/x-canon-cr2 and image/tiff - * Merge pull request #1 from h2non/master - * Update ico filetype per https://www.iana.org/assignments/media-types/image/vnd.microsoft.icon - * Update rar filetype per https://www.iana.org/assignments/media-types/application/vnd.rar - * Update exe filetype per https://www.iana.org/assignments/media-types/application/vnd.microsoft.portable-executable - * Update deb filetype per https://www.iana.org/assignments/media-types/application/vnd.debian.binary-package - * Update sqlite filetype per https://www.iana.org/assignments/media-types/application/vnd.sqlite3 - * Merge pull request #72 from turn88/master - * Update document.go - * Update document.go - * Update document.go - * add matchers for office 2003 - -v1.0.10 / 2019-08-06 -==================== - - * Merge pull request #76 from lex-r/fix-matroska-detection - * fix: mkv and webm types detection - -v1.0.9 / 2019-07-25 -=================== - - * Merge pull request #75 from Trane9991/master - * add video/3gpp support - * fix: use proper iso file mime type - * feat: add iso image format - * Merge pull request #65 from Fentonz/master - * Merge pull request #70 from fanpei91/master - * add image/vnd.dwg to README - * add image/vnd.dwg support - * Added support for .iso files - -v1.0.8 / 2019-02-10 -=================== - - * refactor(images): heic -> heif - * feat(docs): add heif format - * Merge pull request #60 from rikonor/master - * add heif/heic support - * fix(docs): dicom -> dcm - * feat: add dicom type - * Merge pull request #58 from Fentonz/master - * Merge pull request #59 from kmanley/master - * fix example; related to h2non/filetype#43 - * Added DICOM type to archive - - -v1.0.7 / 2019-02-09 -=================== - - * Merge pull request #56 from akupila/wasm - * add wasm to readme - * detect wasm file type - -v1.0.6 / 2019-01-22 -=================== - - * Merge pull request #55 from ivanlemeshev/master - * Added ftypmp4v to MP4 matcher - * Merge pull request #54 from aofei/master - * chore: add support for Go modules - * feat: add support for AAC (audio/aac) - * Merge pull request #53 from lynxbyorion/check-for-docoments - * Added checks for documents. - * Merge pull request #51 from eriken/master - * fixed bad mime and import paths - * Merge pull request #50 from eriken/jpeg2000_support - * fix import paths - * jpeg2000 support - * Merge pull request #47 from Ma124/master - * Merge pull request #49 from amoore614/master - * more robust check for .mov files - * bugfix: reverse order of matcher key list so user registered matchers appear first - * bugfix: store ptr to MatcherKeys in case user registered matchers are used. - * update comment - * Bump buffer size to 8K to allow for more custom file matching - * refactor(readme): update package import path - * Merge pull request #48 from kumakichi/support_msooxml - * do not use v1 - * ok, master already changed travis - * add fixtures, but MatchReader may not work for some msooxml files, 4096 bytes maybe not enough - * support ms ooxml, #40 - * Fixed misspells - * fix(travis): use string notation for matrix items - * Merge pull request #42 from bruth/patch-2 - * refactor(travis): remove Go 1.6, add Go 1.10 - * Change maximum bytes required for detection - * Merge pull request #36 from yiiTT/patch-1 - * Add MP4 dash and additional ISO formats - * Merge pull request #34 from RangelReale/fix-mp4-case - * Merge pull request #32 from yiiTT/fix-m4v - * Fixed mp4 detection case-sensitivity according to http://www.ftyps.com/ - * Fix M4v matcher - -v1.0.5 / 2017-12-12 -=================== - - * Merge pull request #30 from RangelReale/fix_mp4 - * Fix duplicated item in mp4 fix - * Fix MP4 matcher, with information from http://www.file-recovery.com/mp4-signature-format.htm - * Merge pull request #28 from ikovic/master - * Updated file header example. - -v1.0.4 / 2017-11-29 -=================== - - * fix: tests and document types matchers - * refactor(docs): remove codesponsor - * Merge pull request #26 from bienkma/master - * Add support check file type: .doc, .docx, .pptx, .ppt, .xls, .xlsx - * feat(docs): add code sponsor banner - * feat(travis): add go 1.9 - * Merge pull request #24 from strazzere/patch-1 - * Fix typo in unknown - -v1.0.3 / 2017-08-03 -=================== - - * Merge pull request #21 from elemeta/master - * Add Elf file as supported matcher archive type - -v1.0.2 / 2017-07-26 -=================== - - * Merge pull request #20 from marshyski/master - * Added RedHat RPM as supported matcher archive type - * Merge pull request #19 from nlamirault/patch-1 - * Fix typo in documentation - -v1.0.1 / 2017-02-24 -=================== - - * Merge pull request #18 from Impyy/enable-webm - * Enable the webm matcher - * feat(docs): add Go version badge - -1.0.0 / 2016-12-11 -================== - -- Initial stable version (v1.0.0). diff --git a/vendor/github.com/h2non/filetype/LICENSE b/vendor/github.com/h2non/filetype/LICENSE deleted file mode 100644 index 30ede59b6..000000000 --- a/vendor/github.com/h2non/filetype/LICENSE +++ /dev/null @@ -1,24 +0,0 @@ -The MIT License - -Copyright (c) Tomas Aparicio - -Permission is hereby granted, free of charge, to any person -obtaining a copy of this software and associated documentation -files (the "Software"), to deal in the Software without -restriction, including without limitation the rights to use, -copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the -Software is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT -HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/h2non/filetype/README.md b/vendor/github.com/h2non/filetype/README.md deleted file mode 100644 index 64033e903..000000000 --- a/vendor/github.com/h2non/filetype/README.md +++ /dev/null @@ -1,294 +0,0 @@ -# filetype [![Build Status](https://travis-ci.org/h2non/filetype.svg)](https://travis-ci.org/h2non/filetype) [![GoDoc](https://godoc.org/github.com/h2non/filetype?status.svg)](https://godoc.org/github.com/h2non/filetype) [![Go Report Card](http://goreportcard.com/badge/h2non/filetype)](http://goreportcard.com/report/h2non/filetype) [![Go Version](https://img.shields.io/badge/go-v1.0+-green.svg?style=flat)](https://github.com/h2non/gentleman) - -Small and dependency free [Go](https://golang.org) package to infer file and MIME type checking the [magic numbers]() signature. - -For SVG file type checking, see [go-is-svg](https://github.com/h2non/go-is-svg) package. Python port: [filetype.py](https://github.com/h2non/filetype.py). - -## Features - -- Supports a [wide range](#supported-types) of file types -- Provides file extension and proper MIME type -- File discovery by extension or MIME type -- File discovery by class (image, video, audio...) -- Provides a bunch of helpers and file matching shortcuts -- [Pluggable](#add-additional-file-type-matchers): add custom new types and matchers -- Simple and semantic API -- [Blazing fast](#benchmarks), even processing large files -- Only first 262 bytes representing the max file header is required, so you can just [pass a slice](#file-header) -- Dependency free (just Go code, no C compilation needed) -- Cross-platform file recognition - -## Installation - -```bash -go get github.com/h2non/filetype -``` - -## API - -See [Godoc](https://godoc.org/github.com/h2non/filetype) reference. - -### Subpackages - -- [`github.com/h2non/filetype/types`](https://godoc.org/github.com/h2non/filetype/types) -- [`github.com/h2non/filetype/matchers`](https://godoc.org/github.com/h2non/filetype/matchers) - -## Examples - -#### Simple file type checking - -```go -package main - -import ( - "fmt" - "io/ioutil" - - "github.com/h2non/filetype" -) - -func main() { - buf, _ := ioutil.ReadFile("sample.jpg") - - kind, _ := filetype.Match(buf) - if kind == filetype.Unknown { - fmt.Println("Unknown file type") - return - } - - fmt.Printf("File type: %s. MIME: %s\n", kind.Extension, kind.MIME.Value) -} -``` - -#### Check type class - -```go -package main - -import ( - "fmt" - "io/ioutil" - - "github.com/h2non/filetype" -) - -func main() { - buf, _ := ioutil.ReadFile("sample.jpg") - - if filetype.IsImage(buf) { - fmt.Println("File is an image") - } else { - fmt.Println("Not an image") - } -} -``` - -#### Supported type - -```go -package main - -import ( - "fmt" - - "github.com/h2non/filetype" -) - -func main() { - // Check if file is supported by extension - if filetype.IsSupported("jpg") { - fmt.Println("Extension supported") - } else { - fmt.Println("Extension not supported") - } - - // Check if file is supported by extension - if filetype.IsMIMESupported("image/jpeg") { - fmt.Println("MIME type supported") - } else { - fmt.Println("MIME type not supported") - } -} -``` - -#### File header - -```go -package main - -import ( - "fmt" - "io/ioutil" - - "github.com/h2non/filetype" -) - -func main() { - // Open a file descriptor - file, _ := os.Open("movie.mp4") - - // We only have to pass the file header = first 261 bytes - head := make([]byte, 261) - file.Read(head) - - if filetype.IsImage(head) { - fmt.Println("File is an image") - } else { - fmt.Println("Not an image") - } -} -``` - -#### Add additional file type matchers - -```go -package main - -import ( - "fmt" - - "github.com/h2non/filetype" -) - -var fooType = filetype.NewType("foo", "foo/foo") - -func fooMatcher(buf []byte) bool { - return len(buf) > 1 && buf[0] == 0x01 && buf[1] == 0x02 -} - -func main() { - // Register the new matcher and its type - filetype.AddMatcher(fooType, fooMatcher) - - // Check if the new type is supported by extension - if filetype.IsSupported("foo") { - fmt.Println("New supported type: foo") - } - - // Check if the new type is supported by MIME - if filetype.IsMIMESupported("foo/foo") { - fmt.Println("New supported MIME type: foo/foo") - } - - // Try to match the file - fooFile := []byte{0x01, 0x02} - kind, _ := filetype.Match(fooFile) - if kind == filetype.Unknown { - fmt.Println("Unknown file type") - } else { - fmt.Printf("File type matched: %s\n", kind.Extension) - } -} -``` - -## Supported types - -#### Image - -- **jpg** - `image/jpeg` -- **png** - `image/png` -- **gif** - `image/gif` -- **webp** - `image/webp` -- **cr2** - `image/x-canon-cr2` -- **tif** - `image/tiff` -- **bmp** - `image/bmp` -- **heif** - `image/heif` -- **jxr** - `image/vnd.ms-photo` -- **psd** - `image/vnd.adobe.photoshop` -- **ico** - `image/vnd.microsoft.icon` -- **dwg** - `image/vnd.dwg` - -#### Video - -- **mp4** - `video/mp4` -- **m4v** - `video/x-m4v` -- **mkv** - `video/x-matroska` -- **webm** - `video/webm` -- **mov** - `video/quicktime` -- **avi** - `video/x-msvideo` -- **wmv** - `video/x-ms-wmv` -- **mpg** - `video/mpeg` -- **flv** - `video/x-flv` -- **3gp** - `video/3gpp` - -#### Audio - -- **mid** - `audio/midi` -- **mp3** - `audio/mpeg` -- **m4a** - `audio/m4a` -- **ogg** - `audio/ogg` -- **flac** - `audio/x-flac` -- **wav** - `audio/x-wav` -- **amr** - `audio/amr` -- **aac** - `audio/aac` -- **aiff** - `audio/x-aiff` - -#### Archive - -- **epub** - `application/epub+zip` -- **zip** - `application/zip` -- **tar** - `application/x-tar` -- **rar** - `application/vnd.rar` -- **gz** - `application/gzip` -- **bz2** - `application/x-bzip2` -- **7z** - `application/x-7z-compressed` -- **xz** - `application/x-xz` -- **zstd** - `application/zstd` -- **pdf** - `application/pdf` -- **exe** - `application/vnd.microsoft.portable-executable` -- **swf** - `application/x-shockwave-flash` -- **rtf** - `application/rtf` -- **iso** - `application/x-iso9660-image` -- **eot** - `application/octet-stream` -- **ps** - `application/postscript` -- **sqlite** - `application/vnd.sqlite3` -- **nes** - `application/x-nintendo-nes-rom` -- **crx** - `application/x-google-chrome-extension` -- **cab** - `application/vnd.ms-cab-compressed` -- **deb** - `application/vnd.debian.binary-package` -- **ar** - `application/x-unix-archive` -- **Z** - `application/x-compress` -- **lz** - `application/x-lzip` -- **rpm** - `application/x-rpm` -- **elf** - `application/x-executable` -- **dcm** - `application/dicom` - -#### Documents - -- **doc** - `application/msword` -- **docx** - `application/vnd.openxmlformats-officedocument.wordprocessingml.document` -- **xls** - `application/vnd.ms-excel` -- **xlsx** - `application/vnd.openxmlformats-officedocument.spreadsheetml.sheet` -- **ppt** - `application/vnd.ms-powerpoint` -- **pptx** - `application/vnd.openxmlformats-officedocument.presentationml.presentation` - -#### Font - -- **woff** - `application/font-woff` -- **woff2** - `application/font-woff` -- **ttf** - `application/font-sfnt` -- **otf** - `application/font-sfnt` - -#### Application - -- **wasm** - `application/wasm` -- **dex** - `application/vnd.android.dex` -- **dey** - `application/vnd.android.dey` - -## Benchmarks - -Measured using [real files](https://github.com/h2non/filetype/tree/master/fixtures). - -Environment: OSX x64 i7 2.7 Ghz - -```bash -BenchmarkMatchTar-8 1000000 1083 ns/op -BenchmarkMatchZip-8 1000000 1162 ns/op -BenchmarkMatchJpeg-8 1000000 1280 ns/op -BenchmarkMatchGif-8 1000000 1315 ns/op -BenchmarkMatchPng-8 1000000 1121 ns/op -``` - -## License - -MIT - Tomas Aparicio diff --git a/vendor/github.com/h2non/filetype/filetype.go b/vendor/github.com/h2non/filetype/filetype.go deleted file mode 100644 index c99691e5f..000000000 --- a/vendor/github.com/h2non/filetype/filetype.go +++ /dev/null @@ -1,102 +0,0 @@ -package filetype - -import ( - "errors" - - "github.com/h2non/filetype/matchers" - "github.com/h2non/filetype/types" -) - -// Types stores a map of supported types -var Types = types.Types - -// NewType creates and registers a new type -var NewType = types.NewType - -// Unknown represents an unknown file type -var Unknown = types.Unknown - -// ErrEmptyBuffer represents an empty buffer error -var ErrEmptyBuffer = errors.New("Empty buffer") - -// ErrUnknownBuffer represents a unknown buffer error -var ErrUnknownBuffer = errors.New("Unknown buffer type") - -// AddType registers a new file type -func AddType(ext, mime string) types.Type { - return types.NewType(ext, mime) -} - -// Is checks if a given buffer matches with the given file type extension -func Is(buf []byte, ext string) bool { - kind := types.Get(ext) - if kind != types.Unknown { - return IsType(buf, kind) - } - return false -} - -// IsExtension semantic alias to Is() -func IsExtension(buf []byte, ext string) bool { - return Is(buf, ext) -} - -// IsType checks if a given buffer matches with the given file type -func IsType(buf []byte, kind types.Type) bool { - matcher := matchers.Matchers[kind] - if matcher == nil { - return false - } - return matcher(buf) != types.Unknown -} - -// IsMIME checks if a given buffer matches with the given MIME type -func IsMIME(buf []byte, mime string) bool { - result := false - types.Types.Range(func(k, v interface{}) bool { - kind := v.(types.Type) - if kind.MIME.Value == mime { - matcher := matchers.Matchers[kind] - result = matcher(buf) != types.Unknown - return false - } - return true - }) - - return result -} - -// IsSupported checks if a given file extension is supported -func IsSupported(ext string) bool { - result := false - types.Types.Range(func(k, v interface{}) bool { - key := k.(string) - if key == ext { - result = true - return false - } - return true - }) - - return result -} - -// IsMIMESupported checks if a given MIME type is supported -func IsMIMESupported(mime string) bool { - result := false - types.Types.Range(func(k, v interface{}) bool { - kind := v.(types.Type) - if kind.MIME.Value == mime { - result = true - return false - } - return true - }) - - return result -} - -// GetType retrieves a Type by file extension -func GetType(ext string) types.Type { - return types.Get(ext) -} diff --git a/vendor/github.com/h2non/filetype/kind.go b/vendor/github.com/h2non/filetype/kind.go deleted file mode 100644 index 9608b0a7a..000000000 --- a/vendor/github.com/h2non/filetype/kind.go +++ /dev/null @@ -1,91 +0,0 @@ -package filetype - -import ( - "github.com/h2non/filetype/matchers" - "github.com/h2non/filetype/types" -) - -// Image tries to match a file as image type -func Image(buf []byte) (types.Type, error) { - return doMatchMap(buf, matchers.Image) -} - -// IsImage checks if the given buffer is an image type -func IsImage(buf []byte) bool { - kind, _ := Image(buf) - return kind != types.Unknown -} - -// Audio tries to match a file as audio type -func Audio(buf []byte) (types.Type, error) { - return doMatchMap(buf, matchers.Audio) -} - -// IsAudio checks if the given buffer is an audio type -func IsAudio(buf []byte) bool { - kind, _ := Audio(buf) - return kind != types.Unknown -} - -// Video tries to match a file as video type -func Video(buf []byte) (types.Type, error) { - return doMatchMap(buf, matchers.Video) -} - -// IsVideo checks if the given buffer is a video type -func IsVideo(buf []byte) bool { - kind, _ := Video(buf) - return kind != types.Unknown -} - -// Font tries to match a file as text font type -func Font(buf []byte) (types.Type, error) { - return doMatchMap(buf, matchers.Font) -} - -// IsFont checks if the given buffer is a font type -func IsFont(buf []byte) bool { - kind, _ := Font(buf) - return kind != types.Unknown -} - -// Archive tries to match a file as generic archive type -func Archive(buf []byte) (types.Type, error) { - return doMatchMap(buf, matchers.Archive) -} - -// IsArchive checks if the given buffer is an archive type -func IsArchive(buf []byte) bool { - kind, _ := Archive(buf) - return kind != types.Unknown -} - -// Document tries to match a file as document type -func Document(buf []byte) (types.Type, error) { - return doMatchMap(buf, matchers.Document) -} - -// IsDocument checks if the given buffer is an document type -func IsDocument(buf []byte) bool { - kind, _ := Document(buf) - return kind != types.Unknown -} - -// Application tries to match a file as an application type -func Application(buf []byte) (types.Type, error) { - return doMatchMap(buf, matchers.Application) -} - -// IsApplication checks if the given buffer is an application type -func IsApplication(buf []byte) bool { - kind, _ := Application(buf) - return kind != types.Unknown -} - -func doMatchMap(buf []byte, machers matchers.Map) (types.Type, error) { - kind := MatchMap(buf, machers) - if kind != types.Unknown { - return kind, nil - } - return kind, ErrUnknownBuffer -} diff --git a/vendor/github.com/h2non/filetype/match.go b/vendor/github.com/h2non/filetype/match.go deleted file mode 100644 index 82cf80468..000000000 --- a/vendor/github.com/h2non/filetype/match.go +++ /dev/null @@ -1,90 +0,0 @@ -package filetype - -import ( - "io" - "os" - - "github.com/h2non/filetype/matchers" - "github.com/h2non/filetype/types" -) - -// Matchers is an alias to matchers.Matchers -var Matchers = matchers.Matchers - -// MatcherKeys is an alias to matchers.MatcherKeys -var MatcherKeys = &matchers.MatcherKeys - -// NewMatcher is an alias to matchers.NewMatcher -var NewMatcher = matchers.NewMatcher - -// Match infers the file type of a given buffer inspecting its magic numbers signature -func Match(buf []byte) (types.Type, error) { - length := len(buf) - if length == 0 { - return types.Unknown, ErrEmptyBuffer - } - - for _, kind := range *MatcherKeys { - checker := Matchers[kind] - match := checker(buf) - if match != types.Unknown && match.Extension != "" { - return match, nil - } - } - - return types.Unknown, nil -} - -// Get is an alias to Match() -func Get(buf []byte) (types.Type, error) { - return Match(buf) -} - -// MatchFile infers a file type for a file -func MatchFile(filepath string) (types.Type, error) { - file, err := os.Open(filepath) - if err != nil { - return types.Unknown, err - } - defer file.Close() - - return MatchReader(file) -} - -// MatchReader is convenient wrapper to Match() any Reader -func MatchReader(reader io.Reader) (types.Type, error) { - buffer := make([]byte, 8192) // 8K makes msooxml tests happy and allows for expanded custom file checks - - _, err := reader.Read(buffer) - if err != nil && err != io.EOF { - return types.Unknown, err - } - - return Match(buffer) -} - -// AddMatcher registers a new matcher type -func AddMatcher(fileType types.Type, matcher matchers.Matcher) matchers.TypeMatcher { - return matchers.NewMatcher(fileType, matcher) -} - -// Matches checks if the given buffer matches with some supported file type -func Matches(buf []byte) bool { - kind, _ := Match(buf) - return kind != types.Unknown -} - -// MatchMap performs a file matching against a map of match functions -func MatchMap(buf []byte, matchers matchers.Map) types.Type { - for kind, matcher := range matchers { - if matcher(buf) { - return kind - } - } - return types.Unknown -} - -// MatchesMap is an alias to Matches() but using matching against a map of match functions -func MatchesMap(buf []byte, matchers matchers.Map) bool { - return MatchMap(buf, matchers) != types.Unknown -} diff --git a/vendor/github.com/h2non/filetype/matchers/application.go b/vendor/github.com/h2non/filetype/matchers/application.go deleted file mode 100644 index 67fdab3d8..000000000 --- a/vendor/github.com/h2non/filetype/matchers/application.go +++ /dev/null @@ -1,43 +0,0 @@ -package matchers - -var ( - TypeWasm = newType("wasm", "application/wasm") - TypeDex = newType("dex", "application/vnd.android.dex") - TypeDey = newType("dey", "application/vnd.android.dey") -) - -var Application = Map{ - TypeWasm: Wasm, - TypeDex: Dex, - TypeDey: Dey, -} - -// Wasm detects a Web Assembly 1.0 filetype. -func Wasm(buf []byte) bool { - // WASM has starts with `\0asm`, followed by the version. - // http://webassembly.github.io/spec/core/binary/modules.html#binary-magic - return len(buf) >= 8 && - buf[0] == 0x00 && buf[1] == 0x61 && - buf[2] == 0x73 && buf[3] == 0x6D && - buf[4] == 0x01 && buf[5] == 0x00 && - buf[6] == 0x00 && buf[7] == 0x00 -} - -// Dex detects dalvik executable(DEX) -func Dex(buf []byte) bool { - // https://source.android.com/devices/tech/dalvik/dex-format#dex-file-magic - return len(buf) > 36 && - // magic - buf[0] == 0x64 && buf[1] == 0x65 && buf[2] == 0x78 && buf[3] == 0x0A && - // file sise - buf[36] == 0x70 -} - -// Dey Optimized Dalvik Executable(ODEX) -func Dey(buf []byte) bool { - return len(buf) > 100 && - // dey magic - buf[0] == 0x64 && buf[1] == 0x65 && buf[2] == 0x79 && buf[3] == 0x0A && - // dex - Dex(buf[40:100]) -} diff --git a/vendor/github.com/h2non/filetype/matchers/archive.go b/vendor/github.com/h2non/filetype/matchers/archive.go deleted file mode 100644 index ee618a925..000000000 --- a/vendor/github.com/h2non/filetype/matchers/archive.go +++ /dev/null @@ -1,211 +0,0 @@ -package matchers - -import "encoding/binary" - -const ( - ZstdMagicSkippableStart = 0x184D2A50 - ZstdMagicSkippableMask = 0xFFFFFFF0 -) - -var ( - TypeEpub = newType("epub", "application/epub+zip") - TypeZip = newType("zip", "application/zip") - TypeTar = newType("tar", "application/x-tar") - TypeRar = newType("rar", "application/vnd.rar") - TypeGz = newType("gz", "application/gzip") - TypeBz2 = newType("bz2", "application/x-bzip2") - Type7z = newType("7z", "application/x-7z-compressed") - TypeXz = newType("xz", "application/x-xz") - TypeZstd = newType("zst", "application/zstd") - TypePdf = newType("pdf", "application/pdf") - TypeExe = newType("exe", "application/vnd.microsoft.portable-executable") - TypeSwf = newType("swf", "application/x-shockwave-flash") - TypeRtf = newType("rtf", "application/rtf") - TypeEot = newType("eot", "application/octet-stream") - TypePs = newType("ps", "application/postscript") - TypeSqlite = newType("sqlite", "application/vnd.sqlite3") - TypeNes = newType("nes", "application/x-nintendo-nes-rom") - TypeCrx = newType("crx", "application/x-google-chrome-extension") - TypeCab = newType("cab", "application/vnd.ms-cab-compressed") - TypeDeb = newType("deb", "application/vnd.debian.binary-package") - TypeAr = newType("ar", "application/x-unix-archive") - TypeZ = newType("Z", "application/x-compress") - TypeLz = newType("lz", "application/x-lzip") - TypeRpm = newType("rpm", "application/x-rpm") - TypeElf = newType("elf", "application/x-executable") - TypeDcm = newType("dcm", "application/dicom") - TypeIso = newType("iso", "application/x-iso9660-image") - TypeMachO = newType("macho", "application/x-mach-binary") // Mach-O binaries have no common extension. -) - -var Archive = Map{ - TypeEpub: bytePrefixMatcher(epubMagic), - TypeZip: Zip, - TypeTar: Tar, - TypeRar: Rar, - TypeGz: bytePrefixMatcher(gzMagic), - TypeBz2: bytePrefixMatcher(bz2Magic), - Type7z: bytePrefixMatcher(sevenzMagic), - TypeXz: bytePrefixMatcher(xzMagic), - TypeZstd: Zst, - TypePdf: bytePrefixMatcher(pdfMagic), - TypeExe: bytePrefixMatcher(exeMagic), - TypeSwf: Swf, - TypeRtf: bytePrefixMatcher(rtfMagic), - TypeEot: Eot, - TypePs: bytePrefixMatcher(psMagic), - TypeSqlite: bytePrefixMatcher(sqliteMagic), - TypeNes: bytePrefixMatcher(nesMagic), - TypeCrx: bytePrefixMatcher(crxMagic), - TypeCab: Cab, - TypeDeb: bytePrefixMatcher(debMagic), - TypeAr: bytePrefixMatcher(arMagic), - TypeZ: Z, - TypeLz: bytePrefixMatcher(lzMagic), - TypeRpm: Rpm, - TypeElf: Elf, - TypeDcm: Dcm, - TypeIso: Iso, - TypeMachO: MachO, -} - -var ( - epubMagic = []byte{ - 0x50, 0x4B, 0x03, 0x04, 0x6D, 0x69, 0x6D, 0x65, - 0x74, 0x79, 0x70, 0x65, 0x61, 0x70, 0x70, 0x6C, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6F, 0x6E, 0x2F, - 0x65, 0x70, 0x75, 0x62, 0x2B, 0x7A, 0x69, 0x70, - } - gzMagic = []byte{0x1F, 0x8B, 0x08} - bz2Magic = []byte{0x42, 0x5A, 0x68} - sevenzMagic = []byte{0x37, 0x7A, 0xBC, 0xAF, 0x27, 0x1C} - pdfMagic = []byte{0x25, 0x50, 0x44, 0x46} - exeMagic = []byte{0x4D, 0x5A} - rtfMagic = []byte{0x7B, 0x5C, 0x72, 0x74, 0x66} - nesMagic = []byte{0x4E, 0x45, 0x53, 0x1A} - crxMagic = []byte{0x43, 0x72, 0x32, 0x34} - psMagic = []byte{0x25, 0x21} - xzMagic = []byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00} - sqliteMagic = []byte{0x53, 0x51, 0x4C, 0x69} - debMagic = []byte{ - 0x21, 0x3C, 0x61, 0x72, 0x63, 0x68, 0x3E, 0x0A, - 0x64, 0x65, 0x62, 0x69, 0x61, 0x6E, 0x2D, 0x62, - 0x69, 0x6E, 0x61, 0x72, 0x79, - } - arMagic = []byte{0x21, 0x3C, 0x61, 0x72, 0x63, 0x68, 0x3E} - zstdMagic = []byte{0x28, 0xB5, 0x2F, 0xFD} - lzMagic = []byte{0x4C, 0x5A, 0x49, 0x50} -) - -func bytePrefixMatcher(magicPattern []byte) Matcher { - return func(data []byte) bool { - return compareBytes(data, magicPattern, 0) - } -} - -func Zip(buf []byte) bool { - return len(buf) > 3 && - buf[0] == 0x50 && buf[1] == 0x4B && - (buf[2] == 0x3 || buf[2] == 0x5 || buf[2] == 0x7) && - (buf[3] == 0x4 || buf[3] == 0x6 || buf[3] == 0x8) -} - -func Tar(buf []byte) bool { - return len(buf) > 261 && - buf[257] == 0x75 && buf[258] == 0x73 && - buf[259] == 0x74 && buf[260] == 0x61 && - buf[261] == 0x72 -} - -func Rar(buf []byte) bool { - return len(buf) > 6 && - buf[0] == 0x52 && buf[1] == 0x61 && buf[2] == 0x72 && - buf[3] == 0x21 && buf[4] == 0x1A && buf[5] == 0x7 && - (buf[6] == 0x0 || buf[6] == 0x1) -} - -func Swf(buf []byte) bool { - return len(buf) > 2 && - (buf[0] == 0x43 || buf[0] == 0x46) && - buf[1] == 0x57 && buf[2] == 0x53 -} - -func Cab(buf []byte) bool { - return len(buf) > 3 && - ((buf[0] == 0x4D && buf[1] == 0x53 && buf[2] == 0x43 && buf[3] == 0x46) || - (buf[0] == 0x49 && buf[1] == 0x53 && buf[2] == 0x63 && buf[3] == 0x28)) -} - -func Eot(buf []byte) bool { - return len(buf) > 35 && - buf[34] == 0x4C && buf[35] == 0x50 && - ((buf[8] == 0x02 && buf[9] == 0x00 && - buf[10] == 0x01) || (buf[8] == 0x01 && - buf[9] == 0x00 && buf[10] == 0x00) || - (buf[8] == 0x02 && buf[9] == 0x00 && - buf[10] == 0x02)) -} - -func Z(buf []byte) bool { - return len(buf) > 1 && - ((buf[0] == 0x1F && buf[1] == 0xA0) || - (buf[0] == 0x1F && buf[1] == 0x9D)) -} - -func Rpm(buf []byte) bool { - return len(buf) > 96 && - buf[0] == 0xED && buf[1] == 0xAB && - buf[2] == 0xEE && buf[3] == 0xDB -} - -func Elf(buf []byte) bool { - return len(buf) > 52 && - buf[0] == 0x7F && buf[1] == 0x45 && - buf[2] == 0x4C && buf[3] == 0x46 -} - -func Dcm(buf []byte) bool { - return len(buf) > 131 && - buf[128] == 0x44 && buf[129] == 0x49 && - buf[130] == 0x43 && buf[131] == 0x4D -} - -func Iso(buf []byte) bool { - return len(buf) > 32773 && - buf[32769] == 0x43 && buf[32770] == 0x44 && - buf[32771] == 0x30 && buf[32772] == 0x30 && - buf[32773] == 0x31 -} - -func MachO(buf []byte) bool { - return len(buf) > 3 && ((buf[0] == 0xFE && buf[1] == 0xED && buf[2] == 0xFA && buf[3] == 0xCF) || - (buf[0] == 0xFE && buf[1] == 0xED && buf[2] == 0xFA && buf[3] == 0xCE) || - (buf[0] == 0xBE && buf[1] == 0xBA && buf[2] == 0xFE && buf[3] == 0xCA) || - // Big endian versions below here... - (buf[0] == 0xCF && buf[1] == 0xFA && buf[2] == 0xED && buf[3] == 0xFE) || - (buf[0] == 0xCE && buf[1] == 0xFA && buf[2] == 0xED && buf[3] == 0xFE) || - (buf[0] == 0xCA && buf[1] == 0xFE && buf[2] == 0xBA && buf[3] == 0xBE)) -} - -// Zstandard compressed data is made of one or more frames. -// There are two frame formats defined by Zstandard: Zstandard frames and Skippable frames. -// See more details from https://tools.ietf.org/id/draft-kucherawy-dispatch-zstd-00.html#rfc.section.2 -func Zst(buf []byte) bool { - if compareBytes(buf, zstdMagic, 0) { - return true - } else { - // skippable frames - if len(buf) < 8 { - return false - } - if binary.LittleEndian.Uint32(buf[:4]) & ZstdMagicSkippableMask == ZstdMagicSkippableStart { - userDataLength := binary.LittleEndian.Uint32(buf[4:8]) - if len(buf) < 8 + int(userDataLength) { - return false - } - nextFrame := buf[8+userDataLength:] - return Zst(nextFrame) - } - return false - } -} diff --git a/vendor/github.com/h2non/filetype/matchers/audio.go b/vendor/github.com/h2non/filetype/matchers/audio.go deleted file mode 100644 index b34605aec..000000000 --- a/vendor/github.com/h2non/filetype/matchers/audio.go +++ /dev/null @@ -1,85 +0,0 @@ -package matchers - -var ( - TypeMidi = newType("mid", "audio/midi") - TypeMp3 = newType("mp3", "audio/mpeg") - TypeM4a = newType("m4a", "audio/m4a") - TypeOgg = newType("ogg", "audio/ogg") - TypeFlac = newType("flac", "audio/x-flac") - TypeWav = newType("wav", "audio/x-wav") - TypeAmr = newType("amr", "audio/amr") - TypeAac = newType("aac", "audio/aac") - TypeAiff = newType("aiff", "audio/x-aiff") -) - -var Audio = Map{ - TypeMidi: Midi, - TypeMp3: Mp3, - TypeM4a: M4a, - TypeOgg: Ogg, - TypeFlac: Flac, - TypeWav: Wav, - TypeAmr: Amr, - TypeAac: Aac, - TypeAiff: Aiff, -} - -func Midi(buf []byte) bool { - return len(buf) > 3 && - buf[0] == 0x4D && buf[1] == 0x54 && - buf[2] == 0x68 && buf[3] == 0x64 -} - -func Mp3(buf []byte) bool { - return len(buf) > 2 && - ((buf[0] == 0x49 && buf[1] == 0x44 && buf[2] == 0x33) || - (buf[0] == 0xFF && buf[1] == 0xfb)) -} - -func M4a(buf []byte) bool { - return len(buf) > 10 && - ((buf[4] == 0x66 && buf[5] == 0x74 && buf[6] == 0x79 && - buf[7] == 0x70 && buf[8] == 0x4D && buf[9] == 0x34 && buf[10] == 0x41) || - (buf[0] == 0x4D && buf[1] == 0x34 && buf[2] == 0x41 && buf[3] == 0x20)) -} - -func Ogg(buf []byte) bool { - return len(buf) > 3 && - buf[0] == 0x4F && buf[1] == 0x67 && - buf[2] == 0x67 && buf[3] == 0x53 -} - -func Flac(buf []byte) bool { - return len(buf) > 3 && - buf[0] == 0x66 && buf[1] == 0x4C && - buf[2] == 0x61 && buf[3] == 0x43 -} - -func Wav(buf []byte) bool { - return len(buf) > 11 && - buf[0] == 0x52 && buf[1] == 0x49 && - buf[2] == 0x46 && buf[3] == 0x46 && - buf[8] == 0x57 && buf[9] == 0x41 && - buf[10] == 0x56 && buf[11] == 0x45 -} - -func Amr(buf []byte) bool { - return len(buf) > 11 && - buf[0] == 0x23 && buf[1] == 0x21 && - buf[2] == 0x41 && buf[3] == 0x4D && - buf[4] == 0x52 && buf[5] == 0x0A -} - -func Aac(buf []byte) bool { - return len(buf) > 1 && - ((buf[0] == 0xFF && buf[1] == 0xF1) || - (buf[0] == 0xFF && buf[1] == 0xF9)) -} - -func Aiff(buf []byte) bool { - return len(buf) > 11 && - buf[0] == 0x46 && buf[1] == 0x4F && - buf[2] == 0x52 && buf[3] == 0x4D && - buf[8] == 0x41 && buf[9] == 0x49 && - buf[10] == 0x46 && buf[11] == 0x46 -} diff --git a/vendor/github.com/h2non/filetype/matchers/document.go b/vendor/github.com/h2non/filetype/matchers/document.go deleted file mode 100644 index b898c0ff7..000000000 --- a/vendor/github.com/h2non/filetype/matchers/document.go +++ /dev/null @@ -1,197 +0,0 @@ -package matchers - -import ( - "bytes" - "encoding/binary" -) - -var ( - TypeDoc = newType("doc", "application/msword") - TypeDocx = newType("docx", "application/vnd.openxmlformats-officedocument.wordprocessingml.document") - TypeXls = newType("xls", "application/vnd.ms-excel") - TypeXlsx = newType("xlsx", "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet") - TypePpt = newType("ppt", "application/vnd.ms-powerpoint") - TypePptx = newType("pptx", "application/vnd.openxmlformats-officedocument.presentationml.presentation") -) - -var Document = Map{ - TypeDoc: Doc, - TypeDocx: Docx, - TypeXls: Xls, - TypeXlsx: Xlsx, - TypePpt: Ppt, - TypePptx: Pptx, -} - -type docType int - -const ( - TYPE_DOC docType = iota - TYPE_DOCX - TYPE_XLS - TYPE_XLSX - TYPE_PPT - TYPE_PPTX - TYPE_OOXML -) - -//reference: https://bz.apache.org/ooo/show_bug.cgi?id=111457 -func Doc(buf []byte) bool { - if len(buf) > 513 { - return buf[0] == 0xD0 && buf[1] == 0xCF && - buf[2] == 0x11 && buf[3] == 0xE0 && - buf[512] == 0xEC && buf[513] == 0xA5 - } else { - return len(buf) > 3 && - buf[0] == 0xD0 && buf[1] == 0xCF && - buf[2] == 0x11 && buf[3] == 0xE0 - } -} - -func Docx(buf []byte) bool { - typ, ok := msooxml(buf) - return ok && typ == TYPE_DOCX -} - -func Xls(buf []byte) bool { - if len(buf) > 513 { - return buf[0] == 0xD0 && buf[1] == 0xCF && - buf[2] == 0x11 && buf[3] == 0xE0 && - buf[512] == 0x09 && buf[513] == 0x08 - } else { - return len(buf) > 3 && - buf[0] == 0xD0 && buf[1] == 0xCF && - buf[2] == 0x11 && buf[3] == 0xE0 - } -} - -func Xlsx(buf []byte) bool { - typ, ok := msooxml(buf) - return ok && typ == TYPE_XLSX -} - -func Ppt(buf []byte) bool { - if len(buf) > 513 { - return buf[0] == 0xD0 && buf[1] == 0xCF && - buf[2] == 0x11 && buf[3] == 0xE0 && - buf[512] == 0xA0 && buf[513] == 0x46 - } else { - return len(buf) > 3 && - buf[0] == 0xD0 && buf[1] == 0xCF && - buf[2] == 0x11 && buf[3] == 0xE0 - } -} - -func Pptx(buf []byte) bool { - typ, ok := msooxml(buf) - return ok && typ == TYPE_PPTX -} - -func msooxml(buf []byte) (typ docType, found bool) { - signature := []byte{'P', 'K', 0x03, 0x04} - - // start by checking for ZIP local file header signature - if ok := compareBytes(buf, signature, 0); !ok { - return - } - - // make sure the first file is correct - if v, ok := checkMSOoml(buf, 0x1E); ok { - return v, ok - } - - if !compareBytes(buf, []byte("[Content_Types].xml"), 0x1E) && - !compareBytes(buf, []byte("_rels/.rels"), 0x1E) && - !compareBytes(buf, []byte("docProps"), 0x1E) { - return - } - - // skip to the second local file header - // since some documents include a 520-byte extra field following the file - // header, we need to scan for the next header - startOffset := int(binary.LittleEndian.Uint32(buf[18:22]) + 49) - idx := search(buf, startOffset, 6000) - if idx == -1 { - return - } - - // now skip to the *third* local file header; again, we need to scan due to a - // 520-byte extra field following the file header - startOffset += idx + 4 + 26 - idx = search(buf, startOffset, 6000) - if idx == -1 { - return - } - - // and check the subdirectory name to determine which type of OOXML - // file we have. Correct the mimetype with the registered ones: - // http://technet.microsoft.com/en-us/library/cc179224.aspx - startOffset += idx + 4 + 26 - if typ, ok := checkMSOoml(buf, startOffset); ok { - return typ, ok - } - - // OpenOffice/Libreoffice orders ZIP entry differently, so check the 4th file - startOffset += 26 - idx = search(buf, startOffset, 6000) - if idx == -1 { - return TYPE_OOXML, true - } - - startOffset += idx + 4 + 26 - if typ, ok := checkMSOoml(buf, startOffset); ok { - return typ, ok - } else { - return TYPE_OOXML, true - } -} - -func compareBytes(slice, subSlice []byte, startOffset int) bool { - sl := len(subSlice) - - if startOffset+sl > len(slice) { - return false - } - - s := slice[startOffset : startOffset+sl] - for i := range s { - if subSlice[i] != s[i] { - return false - } - } - - return true -} - -func checkMSOoml(buf []byte, offset int) (typ docType, ok bool) { - ok = true - - switch { - case compareBytes(buf, []byte("word/"), offset): - typ = TYPE_DOCX - case compareBytes(buf, []byte("ppt/"), offset): - typ = TYPE_PPTX - case compareBytes(buf, []byte("xl/"), offset): - typ = TYPE_XLSX - default: - ok = false - } - - return -} - -func search(buf []byte, start, rangeNum int) int { - length := len(buf) - end := start + rangeNum - signature := []byte{'P', 'K', 0x03, 0x04} - - if end > length { - end = length - } - - if start >= end { - return -1 - } - - return bytes.Index(buf[start:end], signature) -} diff --git a/vendor/github.com/h2non/filetype/matchers/font.go b/vendor/github.com/h2non/filetype/matchers/font.go deleted file mode 100644 index f39171675..000000000 --- a/vendor/github.com/h2non/filetype/matchers/font.go +++ /dev/null @@ -1,45 +0,0 @@ -package matchers - -var ( - TypeWoff = newType("woff", "application/font-woff") - TypeWoff2 = newType("woff2", "application/font-woff") - TypeTtf = newType("ttf", "application/font-sfnt") - TypeOtf = newType("otf", "application/font-sfnt") -) - -var Font = Map{ - TypeWoff: Woff, - TypeWoff2: Woff2, - TypeTtf: Ttf, - TypeOtf: Otf, -} - -func Woff(buf []byte) bool { - return len(buf) > 7 && - buf[0] == 0x77 && buf[1] == 0x4F && - buf[2] == 0x46 && buf[3] == 0x46 && - buf[4] == 0x00 && buf[5] == 0x01 && - buf[6] == 0x00 && buf[7] == 0x00 -} - -func Woff2(buf []byte) bool { - return len(buf) > 7 && - buf[0] == 0x77 && buf[1] == 0x4F && - buf[2] == 0x46 && buf[3] == 0x32 && - buf[4] == 0x00 && buf[5] == 0x01 && - buf[6] == 0x00 && buf[7] == 0x00 -} - -func Ttf(buf []byte) bool { - return len(buf) > 4 && - buf[0] == 0x00 && buf[1] == 0x01 && - buf[2] == 0x00 && buf[3] == 0x00 && - buf[4] == 0x00 -} - -func Otf(buf []byte) bool { - return len(buf) > 4 && - buf[0] == 0x4F && buf[1] == 0x54 && - buf[2] == 0x54 && buf[3] == 0x4F && - buf[4] == 0x00 -} diff --git a/vendor/github.com/h2non/filetype/matchers/image.go b/vendor/github.com/h2non/filetype/matchers/image.go deleted file mode 100644 index 0465d0d68..000000000 --- a/vendor/github.com/h2non/filetype/matchers/image.go +++ /dev/null @@ -1,143 +0,0 @@ -package matchers - -import "github.com/h2non/filetype/matchers/isobmff" - -var ( - TypeJpeg = newType("jpg", "image/jpeg") - TypeJpeg2000 = newType("jp2", "image/jp2") - TypePng = newType("png", "image/png") - TypeGif = newType("gif", "image/gif") - TypeWebp = newType("webp", "image/webp") - TypeCR2 = newType("cr2", "image/x-canon-cr2") - TypeTiff = newType("tif", "image/tiff") - TypeBmp = newType("bmp", "image/bmp") - TypeJxr = newType("jxr", "image/vnd.ms-photo") - TypePsd = newType("psd", "image/vnd.adobe.photoshop") - TypeIco = newType("ico", "image/vnd.microsoft.icon") - TypeHeif = newType("heif", "image/heif") - TypeDwg = newType("dwg", "image/vnd.dwg") -) - -var Image = Map{ - TypeJpeg: Jpeg, - TypeJpeg2000: Jpeg2000, - TypePng: Png, - TypeGif: Gif, - TypeWebp: Webp, - TypeCR2: CR2, - TypeTiff: Tiff, - TypeBmp: Bmp, - TypeJxr: Jxr, - TypePsd: Psd, - TypeIco: Ico, - TypeHeif: Heif, - TypeDwg: Dwg, -} - -func Jpeg(buf []byte) bool { - return len(buf) > 2 && - buf[0] == 0xFF && - buf[1] == 0xD8 && - buf[2] == 0xFF -} - -func Jpeg2000(buf []byte) bool { - return len(buf) > 12 && - buf[0] == 0x0 && - buf[1] == 0x0 && - buf[2] == 0x0 && - buf[3] == 0xC && - buf[4] == 0x6A && - buf[5] == 0x50 && - buf[6] == 0x20 && - buf[7] == 0x20 && - buf[8] == 0xD && - buf[9] == 0xA && - buf[10] == 0x87 && - buf[11] == 0xA && - buf[12] == 0x0 -} - -func Png(buf []byte) bool { - return len(buf) > 3 && - buf[0] == 0x89 && buf[1] == 0x50 && - buf[2] == 0x4E && buf[3] == 0x47 -} - -func Gif(buf []byte) bool { - return len(buf) > 2 && - buf[0] == 0x47 && buf[1] == 0x49 && buf[2] == 0x46 -} - -func Webp(buf []byte) bool { - return len(buf) > 11 && - buf[8] == 0x57 && buf[9] == 0x45 && - buf[10] == 0x42 && buf[11] == 0x50 -} - -func CR2(buf []byte) bool { - return len(buf) > 10 && - ((buf[0] == 0x49 && buf[1] == 0x49 && buf[2] == 0x2A && buf[3] == 0x0) || // Little Endian - (buf[0] == 0x4D && buf[1] == 0x4D && buf[2] == 0x0 && buf[3] == 0x2A)) && // Big Endian - buf[8] == 0x43 && buf[9] == 0x52 && // CR2 magic word - buf[10] == 0x02 // CR2 major version -} - -func Tiff(buf []byte) bool { - return len(buf) > 10 && - ((buf[0] == 0x49 && buf[1] == 0x49 && buf[2] == 0x2A && buf[3] == 0x0) || // Little Endian - (buf[0] == 0x4D && buf[1] == 0x4D && buf[2] == 0x0 && buf[3] == 0x2A)) && // Big Endian - !CR2(buf) // To avoid conflicts differentiate Tiff from CR2 -} - -func Bmp(buf []byte) bool { - return len(buf) > 1 && - buf[0] == 0x42 && - buf[1] == 0x4D -} - -func Jxr(buf []byte) bool { - return len(buf) > 2 && - buf[0] == 0x49 && - buf[1] == 0x49 && - buf[2] == 0xBC -} - -func Psd(buf []byte) bool { - return len(buf) > 3 && - buf[0] == 0x38 && buf[1] == 0x42 && - buf[2] == 0x50 && buf[3] == 0x53 -} - -func Ico(buf []byte) bool { - return len(buf) > 3 && - buf[0] == 0x00 && buf[1] == 0x00 && - buf[2] == 0x01 && buf[3] == 0x00 -} - -func Heif(buf []byte) bool { - if !isobmff.IsISOBMFF(buf) { - return false - } - - majorBrand, _, compatibleBrands := isobmff.GetFtyp(buf) - if majorBrand == "heic" { - return true - } - - if majorBrand == "mif1" || majorBrand == "msf1" { - for _, compatibleBrand := range compatibleBrands { - if compatibleBrand == "heic" { - return true - } - } - } - - return false -} - -func Dwg(buf []byte) bool { - return len(buf) > 3 && - buf[0] == 0x41 && buf[1] == 0x43 && - buf[2] == 0x31 && buf[3] == 0x30 -} diff --git a/vendor/github.com/h2non/filetype/matchers/isobmff/isobmff.go b/vendor/github.com/h2non/filetype/matchers/isobmff/isobmff.go deleted file mode 100644 index b3e39bf59..000000000 --- a/vendor/github.com/h2non/filetype/matchers/isobmff/isobmff.go +++ /dev/null @@ -1,37 +0,0 @@ -package isobmff - -import "encoding/binary" - -// IsISOBMFF checks whether the given buffer represents ISO Base Media File Format data -func IsISOBMFF(buf []byte) bool { - if len(buf) < 16 || string(buf[4:8]) != "ftyp" { - return false - } - - if ftypLength := binary.BigEndian.Uint32(buf[0:4]); len(buf) < int(ftypLength) { - return false - } - - return true -} - -// GetFtyp returns the major brand, minor version and compatible brands of the ISO-BMFF data -func GetFtyp(buf []byte) (string, string, []string) { - if len(buf) < 17 { - return "", "", []string{""} - } - - ftypLength := binary.BigEndian.Uint32(buf[0:4]) - - majorBrand := string(buf[8:12]) - minorVersion := string(buf[12:16]) - - compatibleBrands := []string{} - for i := 16; i < int(ftypLength); i += 4 { - if len(buf) >= (i + 4) { - compatibleBrands = append(compatibleBrands, string(buf[i:i+4])) - } - } - - return majorBrand, minorVersion, compatibleBrands -} diff --git a/vendor/github.com/h2non/filetype/matchers/matchers.go b/vendor/github.com/h2non/filetype/matchers/matchers.go deleted file mode 100644 index 20d74d080..000000000 --- a/vendor/github.com/h2non/filetype/matchers/matchers.go +++ /dev/null @@ -1,51 +0,0 @@ -package matchers - -import ( - "github.com/h2non/filetype/types" -) - -// Internal shortcut to NewType -var newType = types.NewType - -// Matcher function interface as type alias -type Matcher func([]byte) bool - -// Type interface to store pairs of type with its matcher function -type Map map[types.Type]Matcher - -// Type specific matcher function interface -type TypeMatcher func([]byte) types.Type - -// Store registered file type matchers -var Matchers = make(map[types.Type]TypeMatcher) -var MatcherKeys []types.Type - -// Create and register a new type matcher function -func NewMatcher(kind types.Type, fn Matcher) TypeMatcher { - matcher := func(buf []byte) types.Type { - if fn(buf) { - return kind - } - return types.Unknown - } - - Matchers[kind] = matcher - // prepend here so any user defined matchers get added first - MatcherKeys = append([]types.Type{kind}, MatcherKeys...) - return matcher -} - -func register(matchers ...Map) { - MatcherKeys = MatcherKeys[:0] - for _, m := range matchers { - for kind, matcher := range m { - NewMatcher(kind, matcher) - } - } -} - -func init() { - // Arguments order is intentional - // Archive files will be checked last due to prepend above in func NewMatcher - register(Archive, Document, Font, Audio, Video, Image, Application) -} diff --git a/vendor/github.com/h2non/filetype/matchers/video.go b/vendor/github.com/h2non/filetype/matchers/video.go deleted file mode 100644 index e97cf28a1..000000000 --- a/vendor/github.com/h2non/filetype/matchers/video.go +++ /dev/null @@ -1,145 +0,0 @@ -package matchers - -import "bytes" - -var ( - TypeMp4 = newType("mp4", "video/mp4") - TypeM4v = newType("m4v", "video/x-m4v") - TypeMkv = newType("mkv", "video/x-matroska") - TypeWebm = newType("webm", "video/webm") - TypeMov = newType("mov", "video/quicktime") - TypeAvi = newType("avi", "video/x-msvideo") - TypeWmv = newType("wmv", "video/x-ms-wmv") - TypeMpeg = newType("mpg", "video/mpeg") - TypeFlv = newType("flv", "video/x-flv") - Type3gp = newType("3gp", "video/3gpp") -) - -var Video = Map{ - TypeMp4: Mp4, - TypeM4v: M4v, - TypeMkv: Mkv, - TypeWebm: Webm, - TypeMov: Mov, - TypeAvi: Avi, - TypeWmv: Wmv, - TypeMpeg: Mpeg, - TypeFlv: Flv, - Type3gp: Match3gp, -} - -func M4v(buf []byte) bool { - return len(buf) > 10 && - buf[4] == 0x66 && buf[5] == 0x74 && - buf[6] == 0x79 && buf[7] == 0x70 && - buf[8] == 0x4D && buf[9] == 0x34 && - buf[10] == 0x56 -} - -func Mkv(buf []byte) bool { - return len(buf) > 3 && - buf[0] == 0x1A && buf[1] == 0x45 && - buf[2] == 0xDF && buf[3] == 0xA3 && - containsMatroskaSignature(buf, []byte{'m', 'a', 't', 'r', 'o', 's', 'k', 'a'}) -} - -func Webm(buf []byte) bool { - return len(buf) > 3 && - buf[0] == 0x1A && buf[1] == 0x45 && - buf[2] == 0xDF && buf[3] == 0xA3 && - containsMatroskaSignature(buf, []byte{'w', 'e', 'b', 'm'}) -} - -func Mov(buf []byte) bool { - return len(buf) > 15 && ((buf[0] == 0x0 && buf[1] == 0x0 && - buf[2] == 0x0 && buf[3] == 0x14 && - buf[4] == 0x66 && buf[5] == 0x74 && - buf[6] == 0x79 && buf[7] == 0x70) || - (buf[4] == 0x6d && buf[5] == 0x6f && buf[6] == 0x6f && buf[7] == 0x76) || - (buf[4] == 0x6d && buf[5] == 0x64 && buf[6] == 0x61 && buf[7] == 0x74) || - (buf[12] == 0x6d && buf[13] == 0x64 && buf[14] == 0x61 && buf[15] == 0x74)) -} - -func Avi(buf []byte) bool { - return len(buf) > 10 && - buf[0] == 0x52 && buf[1] == 0x49 && - buf[2] == 0x46 && buf[3] == 0x46 && - buf[8] == 0x41 && buf[9] == 0x56 && - buf[10] == 0x49 -} - -func Wmv(buf []byte) bool { - return len(buf) > 9 && - buf[0] == 0x30 && buf[1] == 0x26 && - buf[2] == 0xB2 && buf[3] == 0x75 && - buf[4] == 0x8E && buf[5] == 0x66 && - buf[6] == 0xCF && buf[7] == 0x11 && - buf[8] == 0xA6 && buf[9] == 0xD9 -} - -func Mpeg(buf []byte) bool { - return len(buf) > 3 && - buf[0] == 0x0 && buf[1] == 0x0 && - buf[2] == 0x1 && buf[3] >= 0xb0 && - buf[3] <= 0xbf -} - -func Flv(buf []byte) bool { - return len(buf) > 3 && - buf[0] == 0x46 && buf[1] == 0x4C && - buf[2] == 0x56 && buf[3] == 0x01 -} - -func Mp4(buf []byte) bool { - return len(buf) > 11 && - (buf[4] == 'f' && buf[5] == 't' && buf[6] == 'y' && buf[7] == 'p') && - ((buf[8] == 'a' && buf[9] == 'v' && buf[10] == 'c' && buf[11] == '1') || - (buf[8] == 'd' && buf[9] == 'a' && buf[10] == 's' && buf[11] == 'h') || - (buf[8] == 'i' && buf[9] == 's' && buf[10] == 'o' && buf[11] == '2') || - (buf[8] == 'i' && buf[9] == 's' && buf[10] == 'o' && buf[11] == '3') || - (buf[8] == 'i' && buf[9] == 's' && buf[10] == 'o' && buf[11] == '4') || - (buf[8] == 'i' && buf[9] == 's' && buf[10] == 'o' && buf[11] == '5') || - (buf[8] == 'i' && buf[9] == 's' && buf[10] == 'o' && buf[11] == '6') || - (buf[8] == 'i' && buf[9] == 's' && buf[10] == 'o' && buf[11] == 'm') || - (buf[8] == 'm' && buf[9] == 'm' && buf[10] == 'p' && buf[11] == '4') || - (buf[8] == 'm' && buf[9] == 'p' && buf[10] == '4' && buf[11] == '1') || - (buf[8] == 'm' && buf[9] == 'p' && buf[10] == '4' && buf[11] == '2') || - (buf[8] == 'm' && buf[9] == 'p' && buf[10] == '4' && buf[11] == 'v') || - (buf[8] == 'm' && buf[9] == 'p' && buf[10] == '7' && buf[11] == '1') || - (buf[8] == 'M' && buf[9] == 'S' && buf[10] == 'N' && buf[11] == 'V') || - (buf[8] == 'N' && buf[9] == 'D' && buf[10] == 'A' && buf[11] == 'S') || - (buf[8] == 'N' && buf[9] == 'D' && buf[10] == 'S' && buf[11] == 'C') || - (buf[8] == 'N' && buf[9] == 'S' && buf[10] == 'D' && buf[11] == 'C') || - (buf[8] == 'N' && buf[9] == 'D' && buf[10] == 'S' && buf[11] == 'H') || - (buf[8] == 'N' && buf[9] == 'D' && buf[10] == 'S' && buf[11] == 'M') || - (buf[8] == 'N' && buf[9] == 'D' && buf[10] == 'S' && buf[11] == 'P') || - (buf[8] == 'N' && buf[9] == 'D' && buf[10] == 'S' && buf[11] == 'S') || - (buf[8] == 'N' && buf[9] == 'D' && buf[10] == 'X' && buf[11] == 'C') || - (buf[8] == 'N' && buf[9] == 'D' && buf[10] == 'X' && buf[11] == 'H') || - (buf[8] == 'N' && buf[9] == 'D' && buf[10] == 'X' && buf[11] == 'M') || - (buf[8] == 'N' && buf[9] == 'D' && buf[10] == 'X' && buf[11] == 'P') || - (buf[8] == 'N' && buf[9] == 'D' && buf[10] == 'X' && buf[11] == 'S') || - (buf[8] == 'F' && buf[9] == '4' && buf[10] == 'V' && buf[11] == ' ') || - (buf[8] == 'F' && buf[9] == '4' && buf[10] == 'P' && buf[11] == ' ')) -} - -func Match3gp(buf []byte) bool { - return len(buf) > 10 && - buf[4] == 0x66 && buf[5] == 0x74 && buf[6] == 0x79 && - buf[7] == 0x70 && buf[8] == 0x33 && buf[9] == 0x67 && - buf[10] == 0x70 -} - -func containsMatroskaSignature(buf, subType []byte) bool { - limit := 4096 - if len(buf) < limit { - limit = len(buf) - } - - index := bytes.Index(buf[:limit], subType) - if index < 3 { - return false - } - - return buf[index-3] == 0x42 && buf[index-2] == 0x82 -} diff --git a/vendor/github.com/h2non/filetype/types/defaults.go b/vendor/github.com/h2non/filetype/types/defaults.go deleted file mode 100644 index 0d985a05d..000000000 --- a/vendor/github.com/h2non/filetype/types/defaults.go +++ /dev/null @@ -1,4 +0,0 @@ -package types - -// Unknown default type -var Unknown = NewType("unknown", "") diff --git a/vendor/github.com/h2non/filetype/types/mime.go b/vendor/github.com/h2non/filetype/types/mime.go deleted file mode 100644 index fe8ea822e..000000000 --- a/vendor/github.com/h2non/filetype/types/mime.go +++ /dev/null @@ -1,14 +0,0 @@ -package types - -// MIME stores the file MIME type values -type MIME struct { - Type string - Subtype string - Value string -} - -// Creates a new MIME type -func NewMIME(mime string) MIME { - kind, subtype := splitMime(mime) - return MIME{Type: kind, Subtype: subtype, Value: mime} -} diff --git a/vendor/github.com/h2non/filetype/types/split.go b/vendor/github.com/h2non/filetype/types/split.go deleted file mode 100644 index 68a5a8b3b..000000000 --- a/vendor/github.com/h2non/filetype/types/split.go +++ /dev/null @@ -1,11 +0,0 @@ -package types - -import "strings" - -func splitMime(s string) (string, string) { - x := strings.Split(s, "/") - if len(x) > 1 { - return x[0], x[1] - } - return x[0], "" -} diff --git a/vendor/github.com/h2non/filetype/types/type.go b/vendor/github.com/h2non/filetype/types/type.go deleted file mode 100644 index 5cf7dfc4b..000000000 --- a/vendor/github.com/h2non/filetype/types/type.go +++ /dev/null @@ -1,16 +0,0 @@ -package types - -// Type represents a file MIME type and its extension -type Type struct { - MIME MIME - Extension string -} - -// NewType creates a new Type -func NewType(ext, mime string) Type { - t := Type{ - MIME: NewMIME(mime), - Extension: ext, - } - return Add(t) -} diff --git a/vendor/github.com/h2non/filetype/types/types.go b/vendor/github.com/h2non/filetype/types/types.go deleted file mode 100644 index f59e256f0..000000000 --- a/vendor/github.com/h2non/filetype/types/types.go +++ /dev/null @@ -1,23 +0,0 @@ -package types - -import "sync" - -// Types Support concurrent map writes -var Types sync.Map - -// Add registers a new type in the package -func Add(t Type) Type { - Types.Store(t.Extension, t) - return t -} - -// Get retrieves a Type by extension -func Get(ext string) Type { - if tmp, ok := Types.Load(ext); ok { - kind := tmp.(Type) - if kind.Extension != "" { - return kind - } - } - return Unknown -} diff --git a/vendor/github.com/h2non/filetype/version.go b/vendor/github.com/h2non/filetype/version.go deleted file mode 100644 index d3730313f..000000000 --- a/vendor/github.com/h2non/filetype/version.go +++ /dev/null @@ -1,4 +0,0 @@ -package filetype - -// Version exposes the current package version. -const Version = "1.1.3" diff --git a/vendor/github.com/superseriousbusiness/go-jpeg-image-structure/v2/.MODULE_ROOT b/vendor/github.com/superseriousbusiness/go-jpeg-image-structure/v2/.MODULE_ROOT deleted file mode 100644 index e69de29bb..000000000 diff --git a/vendor/github.com/superseriousbusiness/go-jpeg-image-structure/v2/LICENSE b/vendor/github.com/superseriousbusiness/go-jpeg-image-structure/v2/LICENSE deleted file mode 100644 index 163291ed6..000000000 --- a/vendor/github.com/superseriousbusiness/go-jpeg-image-structure/v2/LICENSE +++ /dev/null @@ -1,9 +0,0 @@ -MIT LICENSE - -Copyright 2020 Dustin Oprea - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/superseriousbusiness/go-jpeg-image-structure/v2/README.md b/vendor/github.com/superseriousbusiness/go-jpeg-image-structure/v2/README.md deleted file mode 100644 index bf60ef504..000000000 --- a/vendor/github.com/superseriousbusiness/go-jpeg-image-structure/v2/README.md +++ /dev/null @@ -1,10 +0,0 @@ -[![Build Status](https://travis-ci.org/dsoprea/go-jpeg-image-structure/v2.svg?branch=master)](https://travis-ci.org/dsoprea/go-jpeg-image-structure/v2) -[![codecov](https://codecov.io/gh/dsoprea/go-jpeg-image-structure/branch/master/graph/badge.svg)](https://codecov.io/gh/dsoprea/go-jpeg-image-structure) -[![Go Report Card](https://goreportcard.com/badge/github.com/dsoprea/go-jpeg-image-structure/v2)](https://goreportcard.com/report/github.com/dsoprea/go-jpeg-image-structure/v2) -[![GoDoc](https://godoc.org/github.com/dsoprea/go-jpeg-image-structure/v2?status.svg)](https://godoc.org/github.com/dsoprea/go-jpeg-image-structure/v2) - -## Overview - -Parse raw JPEG data into individual segments of data. You can print or export this data, including hash digests for each. You can also parse/modify the EXIF data and write an updated image. - -EXIF, XMP, and IPTC data can also be extracted. The provided CLI tool can print this data as well. diff --git a/vendor/github.com/superseriousbusiness/go-jpeg-image-structure/v2/markers.go b/vendor/github.com/superseriousbusiness/go-jpeg-image-structure/v2/markers.go deleted file mode 100644 index a12171bd8..000000000 --- a/vendor/github.com/superseriousbusiness/go-jpeg-image-structure/v2/markers.go +++ /dev/null @@ -1,212 +0,0 @@ -package jpegstructure - -import ( - "github.com/dsoprea/go-logging" -) - -const ( - // MARKER_SOI marker - MARKER_SOI = 0xd8 - - // MARKER_EOI marker - MARKER_EOI = 0xd9 - - // MARKER_SOS marker - MARKER_SOS = 0xda - - // MARKER_SOD marker - MARKER_SOD = 0x93 - - // MARKER_DQT marker - MARKER_DQT = 0xdb - - // MARKER_APP0 marker - MARKER_APP0 = 0xe0 - - // MARKER_APP1 marker - MARKER_APP1 = 0xe1 - - // MARKER_APP2 marker - MARKER_APP2 = 0xe2 - - // MARKER_APP3 marker - MARKER_APP3 = 0xe3 - - // MARKER_APP4 marker - MARKER_APP4 = 0xe4 - - // MARKER_APP5 marker - MARKER_APP5 = 0xe5 - - // MARKER_APP6 marker - MARKER_APP6 = 0xe6 - - // MARKER_APP7 marker - MARKER_APP7 = 0xe7 - - // MARKER_APP8 marker - MARKER_APP8 = 0xe8 - - // MARKER_APP10 marker - MARKER_APP10 = 0xea - - // MARKER_APP12 marker - MARKER_APP12 = 0xec - - // MARKER_APP13 marker - MARKER_APP13 = 0xed - - // MARKER_APP14 marker - MARKER_APP14 = 0xee - - // MARKER_APP15 marker - MARKER_APP15 = 0xef - - // MARKER_COM marker - MARKER_COM = 0xfe - - // MARKER_CME marker - MARKER_CME = 0x64 - - // MARKER_SIZ marker - MARKER_SIZ = 0x51 - - // MARKER_DHT marker - MARKER_DHT = 0xc4 - - // MARKER_JPG marker - MARKER_JPG = 0xc8 - - // MARKER_DAC marker - MARKER_DAC = 0xcc - - // MARKER_SOF0 marker - MARKER_SOF0 = 0xc0 - - // MARKER_SOF1 marker - MARKER_SOF1 = 0xc1 - - // MARKER_SOF2 marker - MARKER_SOF2 = 0xc2 - - // MARKER_SOF3 marker - MARKER_SOF3 = 0xc3 - - // MARKER_SOF5 marker - MARKER_SOF5 = 0xc5 - - // MARKER_SOF6 marker - MARKER_SOF6 = 0xc6 - - // MARKER_SOF7 marker - MARKER_SOF7 = 0xc7 - - // MARKER_SOF9 marker - MARKER_SOF9 = 0xc9 - - // MARKER_SOF10 marker - MARKER_SOF10 = 0xca - - // MARKER_SOF11 marker - MARKER_SOF11 = 0xcb - - // MARKER_SOF13 marker - MARKER_SOF13 = 0xcd - - // MARKER_SOF14 marker - MARKER_SOF14 = 0xce - - // MARKER_SOF15 marker - MARKER_SOF15 = 0xcf -) - -var ( - jpegLogger = log.NewLogger("jpegstructure.jpeg") - jpegMagicStandard = []byte{0xff, MARKER_SOI, 0xff} - jpegMagic2000 = []byte{0xff, 0x4f, 0xff} - - markerLen = map[byte]int{ - 0x00: 0, - 0x01: 0, - 0xd0: 0, - 0xd1: 0, - 0xd2: 0, - 0xd3: 0, - 0xd4: 0, - 0xd5: 0, - 0xd6: 0, - 0xd7: 0, - 0xd8: 0, - 0xd9: 0, - 0xda: 0, - - // J2C - 0x30: 0, - 0x31: 0, - 0x32: 0, - 0x33: 0, - 0x34: 0, - 0x35: 0, - 0x36: 0, - 0x37: 0, - 0x38: 0, - 0x39: 0, - 0x3a: 0, - 0x3b: 0, - 0x3c: 0, - 0x3d: 0, - 0x3e: 0, - 0x3f: 0, - 0x4f: 0, - 0x92: 0, - 0x93: 0, - - // J2C extensions - 0x74: 4, - 0x75: 4, - 0x77: 4, - } - - markerNames = map[byte]string{ - MARKER_SOI: "SOI", - MARKER_EOI: "EOI", - MARKER_SOS: "SOS", - MARKER_SOD: "SOD", - MARKER_DQT: "DQT", - MARKER_APP0: "APP0", - MARKER_APP1: "APP1", - MARKER_APP2: "APP2", - MARKER_APP3: "APP3", - MARKER_APP4: "APP4", - MARKER_APP5: "APP5", - MARKER_APP6: "APP6", - MARKER_APP7: "APP7", - MARKER_APP8: "APP8", - MARKER_APP10: "APP10", - MARKER_APP12: "APP12", - MARKER_APP13: "APP13", - MARKER_APP14: "APP14", - MARKER_APP15: "APP15", - MARKER_COM: "COM", - MARKER_CME: "CME", - MARKER_SIZ: "SIZ", - - MARKER_DHT: "DHT", - MARKER_JPG: "JPG", - MARKER_DAC: "DAC", - - MARKER_SOF0: "SOF0", - MARKER_SOF1: "SOF1", - MARKER_SOF2: "SOF2", - MARKER_SOF3: "SOF3", - MARKER_SOF5: "SOF5", - MARKER_SOF6: "SOF6", - MARKER_SOF7: "SOF7", - MARKER_SOF9: "SOF9", - MARKER_SOF10: "SOF10", - MARKER_SOF11: "SOF11", - MARKER_SOF13: "SOF13", - MARKER_SOF14: "SOF14", - MARKER_SOF15: "SOF15", - } -) diff --git a/vendor/github.com/superseriousbusiness/go-jpeg-image-structure/v2/media_parser.go b/vendor/github.com/superseriousbusiness/go-jpeg-image-structure/v2/media_parser.go deleted file mode 100644 index e6fc60bc4..000000000 --- a/vendor/github.com/superseriousbusiness/go-jpeg-image-structure/v2/media_parser.go +++ /dev/null @@ -1,139 +0,0 @@ -package jpegstructure - -import ( - "bufio" - "bytes" - "image" - "io" - "os" - - "image/jpeg" - - "github.com/dsoprea/go-logging" - "github.com/dsoprea/go-utility/v2/image" -) - -// JpegMediaParser is a `riimage.MediaParser` that knows how to parse JPEG -// images. -type JpegMediaParser struct { -} - -// NewJpegMediaParser returns a new JpegMediaParser. -func NewJpegMediaParser() *JpegMediaParser { - - // TODO(dustin): Add test - - return new(JpegMediaParser) -} - -// Parse parses a JPEG uses an `io.ReadSeeker`. Even if it fails, it will return -// the list of segments encountered prior to the failure. -func (jmp *JpegMediaParser) Parse(rs io.ReadSeeker, size int) (ec riimage.MediaContext, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - s := bufio.NewScanner(rs) - - // Since each segment can be any size, our buffer must allowed to grow as - // large as the file. - buffer := []byte{} - s.Buffer(buffer, size) - - js := NewJpegSplitter(nil) - s.Split(js.Split) - - for s.Scan() != false { - } - - // Always return the segments that were parsed, at least until there was an - // error. - ec = js.Segments() - - log.PanicIf(s.Err()) - - return ec, nil -} - -// ParseFile parses a JPEG file. Even if it fails, it will return the list of -// segments encountered prior to the failure. -func (jmp *JpegMediaParser) ParseFile(filepath string) (ec riimage.MediaContext, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - // TODO(dustin): Add test - - f, err := os.Open(filepath) - log.PanicIf(err) - - defer f.Close() - - stat, err := f.Stat() - log.PanicIf(err) - - size := stat.Size() - - sl, err := jmp.Parse(f, int(size)) - - // Always return the segments that were parsed, at least until there was an - // error. - ec = sl - - log.PanicIf(err) - - return ec, nil -} - -// ParseBytes parses a JPEG byte-slice. Even if it fails, it will return the -// list of segments encountered prior to the failure. -func (jmp *JpegMediaParser) ParseBytes(data []byte) (ec riimage.MediaContext, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - br := bytes.NewReader(data) - - sl, err := jmp.Parse(br, len(data)) - - // Always return the segments that were parsed, at least until there was an - // error. - ec = sl - - log.PanicIf(err) - - return ec, nil -} - -// LooksLikeFormat indicates whether the data looks like a JPEG image. -func (jmp *JpegMediaParser) LooksLikeFormat(data []byte) bool { - if len(data) < 4 { - return false - } - - l := len(data) - if data[0] != 0xff || data[1] != MARKER_SOI || data[l-2] != 0xff || data[l-1] != MARKER_EOI { - return false - } - - return true -} - -// GetImage returns an image.Image-compatible struct. -func (jmp *JpegMediaParser) GetImage(r io.Reader) (img image.Image, err error) { - img, err = jpeg.Decode(r) - log.PanicIf(err) - - return img, nil -} - -var ( - // Enforce interface conformance. - _ riimage.MediaParser = new(JpegMediaParser) -) diff --git a/vendor/github.com/superseriousbusiness/go-jpeg-image-structure/v2/segment.go b/vendor/github.com/superseriousbusiness/go-jpeg-image-structure/v2/segment.go deleted file mode 100644 index 6b433bf1f..000000000 --- a/vendor/github.com/superseriousbusiness/go-jpeg-image-structure/v2/segment.go +++ /dev/null @@ -1,352 +0,0 @@ -package jpegstructure - -import ( - "bytes" - "errors" - "fmt" - - "crypto/sha1" - "encoding/hex" - - "github.com/dsoprea/go-exif/v3" - "github.com/dsoprea/go-exif/v3/common" - "github.com/dsoprea/go-iptc" - "github.com/dsoprea/go-logging" - "github.com/dsoprea/go-photoshop-info-format" - "github.com/dsoprea/go-utility/v2/image" -) - -const ( - pirIptcImageResourceId = uint16(0x0404) -) - -var ( - // exifPrefix is the prefix found at the top of an EXIF slice. This is JPEG- - // specific. - exifPrefix = []byte{'E', 'x', 'i', 'f', 0, 0} - - xmpPrefix = []byte("http://ns.adobe.com/xap/1.0/\000") - - ps30Prefix = []byte("Photoshop 3.0\000") -) - -var ( - // ErrNoXmp is returned if XMP data was requested but not found. - ErrNoXmp = errors.New("no XMP data") - - // ErrNoIptc is returned if IPTC data was requested but not found. - ErrNoIptc = errors.New("no IPTC data") - - // ErrNoPhotoshopData is returned if Photoshop info was requested but not - // found. - ErrNoPhotoshopData = errors.New("no photoshop data") -) - -// SofSegment has info read from a SOF segment. -type SofSegment struct { - // BitsPerSample is the bits-per-sample. - BitsPerSample byte - - // Width is the image width. - Width uint16 - - // Height is the image height. - Height uint16 - - // ComponentCount is the number of color components. - ComponentCount byte -} - -// String returns a string representation of the SOF segment. -func (ss SofSegment) String() string { - - // TODO(dustin): Add test - - return fmt.Sprintf("SOF", ss.BitsPerSample, ss.Width, ss.Height, ss.ComponentCount) -} - -// SegmentVisitor describes a segment-visitor struct. -type SegmentVisitor interface { - // HandleSegment is triggered for each segment encountered as well as the - // scan-data. - HandleSegment(markerId byte, markerName string, counter int, lastIsScanData bool) error -} - -// SofSegmentVisitor describes a visitor that is only called for each SOF -// segment. -type SofSegmentVisitor interface { - // HandleSof is called for each encountered SOF segment. - HandleSof(sof *SofSegment) error -} - -// Segment describes a single segment. -type Segment struct { - MarkerId byte - MarkerName string - Offset int - Data []byte - - photoshopInfo map[uint16]photoshopinfo.Photoshop30InfoRecord - iptcTags map[iptc.StreamTagKey][]iptc.TagData -} - -// SetExif encodes and sets EXIF data into this segment. -func (s *Segment) SetExif(ib *exif.IfdBuilder) (err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - ibe := exif.NewIfdByteEncoder() - - exifData, err := ibe.EncodeToExif(ib) - log.PanicIf(err) - - l := len(exifPrefix) - - s.Data = make([]byte, l+len(exifData)) - copy(s.Data[0:], exifPrefix) - copy(s.Data[l:], exifData) - - return nil -} - -// Exif returns an `exif.Ifd` instance for the EXIF data we currently have. -func (s *Segment) Exif() (rootIfd *exif.Ifd, data []byte, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - l := len(exifPrefix) - - rawExif := s.Data[l:] - - jpegLogger.Debugf(nil, "Attempting to parse (%d) byte EXIF blob (Exif).", len(rawExif)) - - im, err := exifcommon.NewIfdMappingWithStandard() - log.PanicIf(err) - - ti := exif.NewTagIndex() - - _, index, err := exif.Collect(im, ti, rawExif) - log.PanicIf(err) - - return index.RootIfd, rawExif, nil -} - -// FlatExif parses the EXIF data and just returns a list of tags. -func (s *Segment) FlatExif() (exifTags []exif.ExifTag, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - // TODO(dustin): Add test - - l := len(exifPrefix) - - rawExif := s.Data[l:] - - jpegLogger.Debugf(nil, "Attempting to parse (%d) byte EXIF blob (FlatExif).", len(rawExif)) - - exifTags, _, err = exif.GetFlatExifData(rawExif, nil) - log.PanicIf(err) - - return exifTags, nil -} - -// EmbeddedString returns a string of properties that can be embedded into an -// longer string of properties. -func (s *Segment) EmbeddedString() string { - h := sha1.New() - h.Write(s.Data) - - // TODO(dustin): Add test - - digestString := hex.EncodeToString(h.Sum(nil)) - - return fmt.Sprintf("OFFSET=(0x%08x %10d) ID=(0x%02x) NAME=[%-5s] SIZE=(%10d) SHA1=[%s]", s.Offset, s.Offset, s.MarkerId, markerNames[s.MarkerId], len(s.Data), digestString) -} - -// String returns a descriptive string. -func (s *Segment) String() string { - - // TODO(dustin): Add test - - return fmt.Sprintf("Segment<%s>", s.EmbeddedString()) -} - -// IsExif returns true if EXIF data. -func (s *Segment) IsExif() bool { - if s.MarkerId != MARKER_APP1 { - return false - } - - // TODO(dustin): Add test - - l := len(exifPrefix) - - if len(s.Data) < l { - return false - } - - if bytes.Equal(s.Data[:l], exifPrefix) == false { - return false - } - - return true -} - -// IsXmp returns true if XMP data. -func (s *Segment) IsXmp() bool { - if s.MarkerId != MARKER_APP1 { - return false - } - - // TODO(dustin): Add test - - l := len(xmpPrefix) - - if len(s.Data) < l { - return false - } - - if bytes.Equal(s.Data[:l], xmpPrefix) == false { - return false - } - - return true -} - -// FormattedXmp returns a formatted XML string. This only makes sense for a -// segment comprised of XML data (like XMP). -func (s *Segment) FormattedXmp() (formatted string, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - // TODO(dustin): Add test - - if s.IsXmp() != true { - log.Panicf("not an XMP segment") - } - - l := len(xmpPrefix) - - raw := string(s.Data[l:]) - - formatted, err = FormatXml(raw) - log.PanicIf(err) - - return formatted, nil -} - -func (s *Segment) parsePhotoshopInfo() (photoshopInfo map[uint16]photoshopinfo.Photoshop30InfoRecord, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - if s.photoshopInfo != nil { - return s.photoshopInfo, nil - } - - if s.MarkerId != MARKER_APP13 { - return nil, ErrNoPhotoshopData - } - - l := len(ps30Prefix) - - if len(s.Data) < l { - return nil, ErrNoPhotoshopData - } - - if bytes.Equal(s.Data[:l], ps30Prefix) == false { - return nil, ErrNoPhotoshopData - } - - data := s.Data[l:] - b := bytes.NewBuffer(data) - - // Parse it. - - pirIndex, err := photoshopinfo.ReadPhotoshop30Info(b) - log.PanicIf(err) - - s.photoshopInfo = pirIndex - - return s.photoshopInfo, nil -} - -// IsIptc returns true if XMP data. -func (s *Segment) IsIptc() bool { - // TODO(dustin): Add test - - // There's a cost to determining if there's IPTC data, so we won't do it - // more than once. - if s.iptcTags != nil { - return true - } - - photoshopInfo, err := s.parsePhotoshopInfo() - if err != nil { - if err == ErrNoPhotoshopData { - return false - } - - log.Panic(err) - } - - // Bail if the Photoshop info doesn't have IPTC data. - - _, found := photoshopInfo[pirIptcImageResourceId] - if found == false { - return false - } - - return true -} - -// Iptc parses Photoshop info (if present) and then parses the IPTC info inside -// it (if present). -func (s *Segment) Iptc() (tags map[iptc.StreamTagKey][]iptc.TagData, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - // Cache the parse. - if s.iptcTags != nil { - return s.iptcTags, nil - } - - photoshopInfo, err := s.parsePhotoshopInfo() - log.PanicIf(err) - - iptcPir, found := photoshopInfo[pirIptcImageResourceId] - if found == false { - return nil, ErrNoIptc - } - - b := bytes.NewBuffer(iptcPir.Data) - - tags, err = iptc.ParseStream(b) - log.PanicIf(err) - - s.iptcTags = tags - - return tags, nil -} - -var ( - // Enforce interface conformance. - _ riimage.MediaContext = new(Segment) -) diff --git a/vendor/github.com/superseriousbusiness/go-jpeg-image-structure/v2/segment_list.go b/vendor/github.com/superseriousbusiness/go-jpeg-image-structure/v2/segment_list.go deleted file mode 100644 index b4f4d5810..000000000 --- a/vendor/github.com/superseriousbusiness/go-jpeg-image-structure/v2/segment_list.go +++ /dev/null @@ -1,416 +0,0 @@ -package jpegstructure - -import ( - "bytes" - "fmt" - "io" - - "crypto/sha1" - "encoding/binary" - - "github.com/dsoprea/go-exif/v3" - "github.com/dsoprea/go-exif/v3/common" - "github.com/dsoprea/go-iptc" - "github.com/dsoprea/go-logging" -) - -// SegmentList contains a slice of segments. -type SegmentList struct { - segments []*Segment -} - -// NewSegmentList returns a new SegmentList struct. -func NewSegmentList(segments []*Segment) (sl *SegmentList) { - if segments == nil { - segments = make([]*Segment, 0) - } - - return &SegmentList{ - segments: segments, - } -} - -// OffsetsEqual returns true is all segments have the same marker-IDs and were -// found at the same offsets. -func (sl *SegmentList) OffsetsEqual(o *SegmentList) bool { - if len(o.segments) != len(sl.segments) { - return false - } - - for i, s := range o.segments { - if s.MarkerId != sl.segments[i].MarkerId || s.Offset != sl.segments[i].Offset { - return false - } - } - - return true -} - -// Segments returns the underlying slice of segments. -func (sl *SegmentList) Segments() []*Segment { - return sl.segments -} - -// Add adds another segment. -func (sl *SegmentList) Add(s *Segment) { - sl.segments = append(sl.segments, s) -} - -// Print prints segment info. -func (sl *SegmentList) Print() { - if len(sl.segments) == 0 { - fmt.Printf("No segments.\n") - } else { - exifIndex, _, err := sl.FindExif() - if err != nil { - if err == exif.ErrNoExif { - exifIndex = -1 - } else { - log.Panic(err) - } - } - - xmpIndex, _, err := sl.FindXmp() - if err != nil { - if err == ErrNoXmp { - xmpIndex = -1 - } else { - log.Panic(err) - } - } - - iptcIndex, _, err := sl.FindIptc() - if err != nil { - if err == ErrNoIptc { - iptcIndex = -1 - } else { - log.Panic(err) - } - } - - for i, s := range sl.segments { - fmt.Printf("%2d: %s", i, s.EmbeddedString()) - - if i == exifIndex { - fmt.Printf(" [EXIF]") - } else if i == xmpIndex { - fmt.Printf(" [XMP]") - } else if i == iptcIndex { - fmt.Printf(" [IPTC]") - } - - fmt.Printf("\n") - } - } -} - -// Validate checks that all of the markers are actually located at all of the -// recorded offsets. -func (sl *SegmentList) Validate(data []byte) (err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - if len(sl.segments) < 2 { - log.Panicf("minimum segments not found") - } - - if sl.segments[0].MarkerId != MARKER_SOI { - log.Panicf("first segment not SOI") - } else if sl.segments[len(sl.segments)-1].MarkerId != MARKER_EOI { - log.Panicf("last segment not EOI") - } - - lastOffset := 0 - for i, s := range sl.segments { - if lastOffset != 0 && s.Offset <= lastOffset { - log.Panicf("segment offset not greater than the last: SEGMENT=(%d) (0x%08x) <= (0x%08x)", i, s.Offset, lastOffset) - } - - // The scan-data doesn't start with a marker. - if s.MarkerId == 0x0 { - continue - } - - o := s.Offset - if bytes.Compare(data[o:o+2], []byte{0xff, s.MarkerId}) != 0 { - log.Panicf("segment offset does not point to the start of a segment: SEGMENT=(%d) (0x%08x)", i, s.Offset) - } - - lastOffset = o - } - - return nil -} - -// FindExif returns the the segment that hosts the EXIF data (if present). -func (sl *SegmentList) FindExif() (index int, segment *Segment, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - for i, s := range sl.segments { - if s.IsExif() == true { - return i, s, nil - } - } - - return -1, nil, exif.ErrNoExif -} - -// FindXmp returns the the segment that hosts the XMP data (if present). -func (sl *SegmentList) FindXmp() (index int, segment *Segment, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - for i, s := range sl.segments { - if s.IsXmp() == true { - return i, s, nil - } - } - - return -1, nil, ErrNoXmp -} - -// FindIptc returns the the segment that hosts the IPTC data (if present). -func (sl *SegmentList) FindIptc() (index int, segment *Segment, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - for i, s := range sl.segments { - if s.IsIptc() == true { - return i, s, nil - } - } - - return -1, nil, ErrNoIptc -} - -// Exif returns an `exif.Ifd` instance for the EXIF data we currently have. -func (sl *SegmentList) Exif() (rootIfd *exif.Ifd, rawExif []byte, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - _, s, err := sl.FindExif() - log.PanicIf(err) - - rootIfd, rawExif, err = s.Exif() - log.PanicIf(err) - - return rootIfd, rawExif, nil -} - -// Iptc returns embedded IPTC data if present. -func (sl *SegmentList) Iptc() (tags map[iptc.StreamTagKey][]iptc.TagData, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - // TODO(dustin): Add comment and return data. - - _, s, err := sl.FindIptc() - log.PanicIf(err) - - tags, err = s.Iptc() - log.PanicIf(err) - - return tags, nil -} - -// ConstructExifBuilder returns an `exif.IfdBuilder` instance (needed for -// modifying) preloaded with all existing tags. -func (sl *SegmentList) ConstructExifBuilder() (rootIb *exif.IfdBuilder, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - rootIfd, _, err := sl.Exif() - if log.Is(err, exif.ErrNoExif) == true { - // No EXIF. Just create a boilerplate builder. - - im := exifcommon.NewIfdMapping() - - err := exifcommon.LoadStandardIfds(im) - log.PanicIf(err) - - ti := exif.NewTagIndex() - - rootIb := - exif.NewIfdBuilder( - im, - ti, - exifcommon.IfdStandardIfdIdentity, - exifcommon.EncodeDefaultByteOrder) - - return rootIb, nil - } else if err != nil { - log.Panic(err) - } - - rootIb = exif.NewIfdBuilderFromExistingChain(rootIfd) - - return rootIb, nil -} - -// DumpExif returns an unstructured list of tags (useful when just reviewing). -func (sl *SegmentList) DumpExif() (segmentIndex int, segment *Segment, exifTags []exif.ExifTag, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - segmentIndex, s, err := sl.FindExif() - if err != nil { - if err == exif.ErrNoExif { - return 0, nil, nil, err - } - - log.Panic(err) - } - - exifTags, err = s.FlatExif() - log.PanicIf(err) - - return segmentIndex, s, exifTags, nil -} - -func makeEmptyExifSegment() (s *Segment) { - - // TODO(dustin): Add test - - return &Segment{ - MarkerId: MARKER_APP1, - } -} - -// SetExif encodes and sets EXIF data into the given segment. If `index` is -1, -// append a new segment. -func (sl *SegmentList) SetExif(ib *exif.IfdBuilder) (err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - _, s, err := sl.FindExif() - if err != nil { - if log.Is(err, exif.ErrNoExif) == false { - log.Panic(err) - } - - s = makeEmptyExifSegment() - - prefix := sl.segments[:1] - - // Install it near the beginning where we know it's safe. We can't - // insert it after the EOI segment, and there might be more than one - // depending on implementation and/or lax adherence to the standard. - tail := append([]*Segment{s}, sl.segments[1:]...) - - sl.segments = append(prefix, tail...) - } - - err = s.SetExif(ib) - log.PanicIf(err) - - return nil -} - -// DropExif will drop the EXIF data if present. -func (sl *SegmentList) DropExif() (wasDropped bool, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - // TODO(dustin): Add test - - i, _, err := sl.FindExif() - if err == nil { - // Found. - sl.segments = append(sl.segments[:i], sl.segments[i+1:]...) - - return true, nil - } else if log.Is(err, exif.ErrNoExif) == false { - log.Panic(err) - } - - // Not found. - return false, nil -} - -// Write writes the segment data to the given `io.Writer`. -func (sl *SegmentList) Write(w io.Writer) (err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - offset := 0 - - for i, s := range sl.segments { - h := sha1.New() - h.Write(s.Data) - - // The scan-data will have a marker-ID of (0) because it doesn't have a - // marker-ID or length. - if s.MarkerId != 0 { - _, err := w.Write([]byte{0xff}) - log.PanicIf(err) - - offset++ - - _, err = w.Write([]byte{s.MarkerId}) - log.PanicIf(err) - - offset++ - - sizeLen, found := markerLen[s.MarkerId] - if found == false || sizeLen == 2 { - sizeLen = 2 - l := uint16(len(s.Data) + sizeLen) - - err = binary.Write(w, binary.BigEndian, &l) - log.PanicIf(err) - - offset += 2 - } else if sizeLen == 4 { - l := uint32(len(s.Data) + sizeLen) - - err = binary.Write(w, binary.BigEndian, &l) - log.PanicIf(err) - - offset += 4 - } else if sizeLen != 0 { - log.Panicf("not a supported marker-size: SEGMENT-INDEX=(%d) MARKER-ID=(0x%02x) MARKER-SIZE-LEN=(%d)", i, s.MarkerId, sizeLen) - } - } - - _, err := w.Write(s.Data) - log.PanicIf(err) - - offset += len(s.Data) - } - - return nil -} diff --git a/vendor/github.com/superseriousbusiness/go-jpeg-image-structure/v2/splitter.go b/vendor/github.com/superseriousbusiness/go-jpeg-image-structure/v2/splitter.go deleted file mode 100644 index 1856beddf..000000000 --- a/vendor/github.com/superseriousbusiness/go-jpeg-image-structure/v2/splitter.go +++ /dev/null @@ -1,437 +0,0 @@ -package jpegstructure - -import ( - "bufio" - "bytes" - "io" - - "encoding/binary" - - "github.com/dsoprea/go-logging" -) - -// JpegSplitter uses the Go stream splitter to divide the JPEG stream into -// segments. -type JpegSplitter struct { - lastMarkerId byte - lastMarkerName string - counter int - lastIsScanData bool - visitor interface{} - - currentOffset int - segments *SegmentList - - scandataOffset int -} - -// NewJpegSplitter returns a new JpegSplitter. -func NewJpegSplitter(visitor interface{}) *JpegSplitter { - return &JpegSplitter{ - segments: NewSegmentList(nil), - visitor: visitor, - } -} - -// Segments returns all found segments. -func (js *JpegSplitter) Segments() *SegmentList { - return js.segments -} - -// MarkerId returns the ID of the last processed marker. -func (js *JpegSplitter) MarkerId() byte { - return js.lastMarkerId -} - -// MarkerName returns the name of the last-processed marker. -func (js *JpegSplitter) MarkerName() string { - return js.lastMarkerName -} - -// Counter returns the number of processed segments. -func (js *JpegSplitter) Counter() int { - return js.counter -} - -// IsScanData returns whether the last processed segment was scan-data. -func (js *JpegSplitter) IsScanData() bool { - return js.lastIsScanData -} - -func (js *JpegSplitter) processScanData(data []byte) (advanceBytes int, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - // Search through the segment, past all 0xff's therein, until we encounter - // the EOI segment. - - dataLength := -1 - for i := js.scandataOffset; i < len(data); i++ { - thisByte := data[i] - - if i == 0 { - continue - } - - lastByte := data[i-1] - if lastByte != 0xff { - continue - } - - if thisByte == 0x00 || thisByte >= 0xd0 && thisByte <= 0xd8 { - continue - } - - // After all of the other checks, this means that we're on the EOF - // segment. - if thisByte != MARKER_EOI { - continue - } - - dataLength = i - 1 - break - } - - if dataLength == -1 { - // On the next pass, start on the last byte of this pass, just in case - // the first byte of the two-byte sequence is here. - js.scandataOffset = len(data) - 1 - - jpegLogger.Debugf(nil, "Scan-data not fully available (%d).", len(data)) - return 0, nil - } - - js.lastIsScanData = true - js.lastMarkerId = 0 - js.lastMarkerName = "" - - // Note that we don't increment the counter since this isn't an actual - // segment. - - jpegLogger.Debugf(nil, "End of scan-data.") - - err = js.handleSegment(0x0, "!SCANDATA", 0x0, data[:dataLength]) - log.PanicIf(err) - - return dataLength, nil -} - -func (js *JpegSplitter) readSegment(data []byte) (count int, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - if js.counter == 0 { - // Verify magic bytes. - - if len(data) < 3 { - jpegLogger.Debugf(nil, "Not enough (1)") - return 0, nil - } - - if data[0] == jpegMagic2000[0] && data[1] == jpegMagic2000[1] && data[2] == jpegMagic2000[2] { - // TODO(dustin): Revisit JPEG2000 support. - log.Panicf("JPEG2000 not supported") - } - - if data[0] != jpegMagicStandard[0] || data[1] != jpegMagicStandard[1] || data[2] != jpegMagicStandard[2] { - log.Panicf("file does not look like a JPEG: (%02x) (%02x) (%02x)", data[0], data[1], data[2]) - } - } - - chunkLength := len(data) - - jpegLogger.Debugf(nil, "SPLIT: LEN=(%d) COUNTER=(%d)", chunkLength, js.counter) - - if js.scanDataIsNext() == true { - // If the last segment was the SOS, we're currently sitting on scan data. - // Search for the EOI marker afterward in order to know how much data - // there is. Return this as its own token. - // - // REF: https://stackoverflow.com/questions/26715684/parsing-jpeg-sos-marker - - advanceBytes, err := js.processScanData(data) - log.PanicIf(err) - - // This will either return 0 and implicitly request that we need more - // data and then need to run again or will return an actual byte count - // to progress by. - - return advanceBytes, nil - } else if js.lastMarkerId == MARKER_EOI { - // We have more data following the EOI, which is unexpected. There - // might be non-standard cruft at the end of the file. Terminate the - // parse because the file-structure is, technically, complete at this - // point. - - return 0, io.EOF - } else { - js.lastIsScanData = false - } - - // If we're here, we're supposed to be sitting on the 0xff bytes at the - // beginning of a segment (just before the marker). - - if data[0] != 0xff { - log.Panicf("not on new segment marker @ (%d): (%02X)", js.currentOffset, data[0]) - } - - i := 0 - found := false - for ; i < chunkLength; i++ { - jpegLogger.Debugf(nil, "Prefix check: (%d) %02X", i, data[i]) - - if data[i] != 0xff { - found = true - break - } - } - - jpegLogger.Debugf(nil, "Skipped over leading 0xFF bytes: (%d)", i) - - if found == false || i >= chunkLength { - jpegLogger.Debugf(nil, "Not enough (3)") - return 0, nil - } - - markerId := data[i] - - js.lastMarkerName = markerNames[markerId] - - sizeLen, found := markerLen[markerId] - jpegLogger.Debugf(nil, "MARKER-ID=%x SIZELEN=%v FOUND=%v", markerId, sizeLen, found) - - i++ - - b := bytes.NewBuffer(data[i:]) - payloadLength := 0 - - // marker-ID + size => 2 + - headerSize := 2 + sizeLen - - if found == false { - - // It's not one of the static-length markers. Read the length. - // - // The length is an unsigned 16-bit network/big-endian. - - // marker-ID + size => 2 + 2 - headerSize = 2 + 2 - - if i+2 >= chunkLength { - jpegLogger.Debugf(nil, "Not enough (4)") - return 0, nil - } - - l := uint16(0) - err = binary.Read(b, binary.BigEndian, &l) - log.PanicIf(err) - - if l < 2 { - log.Panicf("length of size read for non-special marker (%02x) is unexpectedly less than two.", markerId) - } - - // (l includes the bytes of the length itself.) - payloadLength = int(l) - 2 - jpegLogger.Debugf(nil, "DataLength (dynamically-sized segment): (%d)", payloadLength) - - i += 2 - } else if sizeLen > 0 { - - // Accommodates the non-zero markers in our marker index, which only - // represent J2C extensions. - // - // The length is an unsigned 32-bit network/big-endian. - - // TODO(dustin): !! This needs to be tested, but we need an image. - - if sizeLen != 4 { - log.Panicf("known non-zero marker is not four bytes, which is not currently handled: M=(%x)", markerId) - } - - if i+4 >= chunkLength { - jpegLogger.Debugf(nil, "Not enough (5)") - return 0, nil - } - - l := uint32(0) - err = binary.Read(b, binary.BigEndian, &l) - log.PanicIf(err) - - payloadLength = int(l) - 4 - jpegLogger.Debugf(nil, "DataLength (four-byte-length segment): (%u)", l) - - i += 4 - } - - jpegLogger.Debugf(nil, "PAYLOAD-LENGTH: %d", payloadLength) - - payload := data[i:] - - if payloadLength < 0 { - log.Panicf("payload length less than zero: (%d)", payloadLength) - } - - i += int(payloadLength) - - if i > chunkLength { - jpegLogger.Debugf(nil, "Not enough (6)") - return 0, nil - } - - jpegLogger.Debugf(nil, "Found whole segment.") - - js.lastMarkerId = markerId - - payloadWindow := payload[:payloadLength] - err = js.handleSegment(markerId, js.lastMarkerName, headerSize, payloadWindow) - log.PanicIf(err) - - js.counter++ - - jpegLogger.Debugf(nil, "Returning advance of (%d)", i) - - return i, nil -} - -func (js *JpegSplitter) scanDataIsNext() bool { - return js.lastMarkerId == MARKER_SOS -} - -// Split is the base splitting function that satisfies `bufio.SplitFunc`. -func (js *JpegSplitter) Split(data []byte, atEOF bool) (advance int, token []byte, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - for len(data) > 0 { - currentAdvance, err := js.readSegment(data) - if err != nil { - if err == io.EOF { - // We've encountered an EOI marker. - return 0, nil, err - } - - log.Panic(err) - } - - if currentAdvance == 0 { - if len(data) > 0 && atEOF == true { - // Provide a little context in the error message. - - if js.scanDataIsNext() == true { - // Yes, we've ran into this. - - log.Panicf("scan-data is unbounded; EOI not encountered before EOF") - } else { - log.Panicf("partial segment data encountered before scan-data") - } - } - - // We don't have enough data for another segment. - break - } - - data = data[currentAdvance:] - advance += currentAdvance - } - - return advance, nil, nil -} - -func (js *JpegSplitter) parseSof(data []byte) (sof *SofSegment, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - stream := bytes.NewBuffer(data) - buffer := bufio.NewReader(stream) - - bitsPerSample, err := buffer.ReadByte() - log.PanicIf(err) - - height := uint16(0) - err = binary.Read(buffer, binary.BigEndian, &height) - log.PanicIf(err) - - width := uint16(0) - err = binary.Read(buffer, binary.BigEndian, &width) - log.PanicIf(err) - - componentCount, err := buffer.ReadByte() - log.PanicIf(err) - - sof = &SofSegment{ - BitsPerSample: bitsPerSample, - Width: width, - Height: height, - ComponentCount: componentCount, - } - - return sof, nil -} - -func (js *JpegSplitter) parseAppData(markerId byte, data []byte) (err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - return nil -} - -func (js *JpegSplitter) handleSegment(markerId byte, markerName string, headerSize int, payload []byte) (err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - cloned := make([]byte, len(payload)) - copy(cloned, payload) - - s := &Segment{ - MarkerId: markerId, - MarkerName: markerName, - Offset: js.currentOffset, - Data: cloned, - } - - jpegLogger.Debugf(nil, "Encountered marker (0x%02x) [%s] at offset (%d)", markerId, markerName, js.currentOffset) - - js.currentOffset += headerSize + len(payload) - - js.segments.Add(s) - - sv, ok := js.visitor.(SegmentVisitor) - if ok == true { - err = sv.HandleSegment(js.lastMarkerId, js.lastMarkerName, js.counter, js.lastIsScanData) - log.PanicIf(err) - } - - if markerId >= MARKER_SOF0 && markerId <= MARKER_SOF15 { - ssv, ok := js.visitor.(SofSegmentVisitor) - if ok == true { - sof, err := js.parseSof(payload) - log.PanicIf(err) - - err = ssv.HandleSof(sof) - log.PanicIf(err) - } - } else if markerId >= MARKER_APP0 && markerId <= MARKER_APP15 { - err := js.parseAppData(markerId, payload) - log.PanicIf(err) - } - - return nil -} diff --git a/vendor/github.com/superseriousbusiness/go-jpeg-image-structure/v2/testing_common.go b/vendor/github.com/superseriousbusiness/go-jpeg-image-structure/v2/testing_common.go deleted file mode 100644 index e7169c2f0..000000000 --- a/vendor/github.com/superseriousbusiness/go-jpeg-image-structure/v2/testing_common.go +++ /dev/null @@ -1,73 +0,0 @@ -package jpegstructure - -import ( - "os" - "path" - - "github.com/dsoprea/go-logging" -) - -var ( - testImageRelFilepath = "NDM_8901.jpg" -) - -var ( - moduleRootPath = "" - assetsPath = "" -) - -// GetModuleRootPath returns the root-path of the module. -func GetModuleRootPath() string { - if moduleRootPath == "" { - moduleRootPath = os.Getenv("JPEG_MODULE_ROOT_PATH") - if moduleRootPath != "" { - return moduleRootPath - } - - currentWd, err := os.Getwd() - log.PanicIf(err) - - currentPath := currentWd - visited := make([]string, 0) - - for { - tryStampFilepath := path.Join(currentPath, ".MODULE_ROOT") - - _, err := os.Stat(tryStampFilepath) - if err != nil && os.IsNotExist(err) != true { - log.Panic(err) - } else if err == nil { - break - } - - visited = append(visited, tryStampFilepath) - - currentPath = path.Dir(currentPath) - if currentPath == "/" { - log.Panicf("could not find module-root: %v", visited) - } - } - - moduleRootPath = currentPath - } - - return moduleRootPath -} - -// GetTestAssetsPath returns the path of the test-assets. -func GetTestAssetsPath() string { - if assetsPath == "" { - moduleRootPath := GetModuleRootPath() - assetsPath = path.Join(moduleRootPath, "assets") - } - - return assetsPath -} - -// GetTestImageFilepath returns the file-path of the common test-image. -func GetTestImageFilepath() string { - assetsPath := GetTestAssetsPath() - filepath := path.Join(assetsPath, testImageRelFilepath) - - return filepath -} diff --git a/vendor/github.com/superseriousbusiness/go-jpeg-image-structure/v2/utility.go b/vendor/github.com/superseriousbusiness/go-jpeg-image-structure/v2/utility.go deleted file mode 100644 index 1c618ba6d..000000000 --- a/vendor/github.com/superseriousbusiness/go-jpeg-image-structure/v2/utility.go +++ /dev/null @@ -1,110 +0,0 @@ -package jpegstructure - -import ( - "bytes" - "fmt" - "sort" - "strings" - - "github.com/dsoprea/go-logging" - "github.com/go-xmlfmt/xmlfmt" -) - -// DumpBytes prints the hex for a given byte-slice. -func DumpBytes(data []byte) { - fmt.Printf("DUMP: ") - for _, x := range data { - fmt.Printf("%02x ", x) - } - - fmt.Printf("\n") -} - -// DumpBytesClause prints a Go-formatted byte-slice expression. -func DumpBytesClause(data []byte) { - fmt.Printf("DUMP: ") - - fmt.Printf("[]byte { ") - - for i, x := range data { - fmt.Printf("0x%02x", x) - - if i < len(data)-1 { - fmt.Printf(", ") - } - } - - fmt.Printf(" }\n") -} - -// DumpBytesToString returns a string of hex-encoded bytes. -func DumpBytesToString(data []byte) string { - b := new(bytes.Buffer) - - for i, x := range data { - _, err := b.WriteString(fmt.Sprintf("%02x", x)) - log.PanicIf(err) - - if i < len(data)-1 { - _, err := b.WriteRune(' ') - log.PanicIf(err) - } - } - - return b.String() -} - -// DumpBytesClauseToString returns a string of Go-formatted byte values. -func DumpBytesClauseToString(data []byte) string { - b := new(bytes.Buffer) - - for i, x := range data { - _, err := b.WriteString(fmt.Sprintf("0x%02x", x)) - log.PanicIf(err) - - if i < len(data)-1 { - _, err := b.WriteString(", ") - log.PanicIf(err) - } - } - - return b.String() -} - -// FormatXml prettifies XML data. -func FormatXml(raw string) (formatted string, err error) { - defer func() { - if state := recover(); state != nil { - err = log.Wrap(state.(error)) - } - }() - - formatted = xmlfmt.FormatXML(raw, " ", " ") - formatted = strings.TrimSpace(formatted) - - return formatted, nil -} - -// SortStringStringMap sorts a string-string dictionary and returns it as a list -// of 2-tuples. -func SortStringStringMap(data map[string]string) (sorted [][2]string) { - // Sort keys. - - sortedKeys := make([]string, len(data)) - i := 0 - for key := range data { - sortedKeys[i] = key - i++ - } - - sort.Strings(sortedKeys) - - // Build result. - - sorted = make([][2]string, len(sortedKeys)) - for i, key := range sortedKeys { - sorted[i] = [2]string{key, data[key]} - } - - return sorted -} diff --git a/vendor/github.com/superseriousbusiness/go-png-image-structure/v2/.MODULE_ROOT b/vendor/github.com/superseriousbusiness/go-png-image-structure/v2/.MODULE_ROOT deleted file mode 100644 index e69de29bb..000000000 diff --git a/vendor/github.com/superseriousbusiness/go-png-image-structure/v2/LICENSE b/vendor/github.com/superseriousbusiness/go-png-image-structure/v2/LICENSE deleted file mode 100644 index 163291ed6..000000000 --- a/vendor/github.com/superseriousbusiness/go-png-image-structure/v2/LICENSE +++ /dev/null @@ -1,9 +0,0 @@ -MIT LICENSE - -Copyright 2020 Dustin Oprea - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/superseriousbusiness/go-png-image-structure/v2/chunk_decoder.go b/vendor/github.com/superseriousbusiness/go-png-image-structure/v2/chunk_decoder.go deleted file mode 100644 index 518bc91ad..000000000 --- a/vendor/github.com/superseriousbusiness/go-png-image-structure/v2/chunk_decoder.go +++ /dev/null @@ -1,81 +0,0 @@ -package pngstructure - -import ( - "bytes" - "fmt" - - "encoding/binary" -) - -type ChunkDecoder struct { -} - -func NewChunkDecoder() *ChunkDecoder { - return new(ChunkDecoder) -} - -func (cd *ChunkDecoder) Decode(c *Chunk) (decoded interface{}, err error) { - switch c.Type { - case "IHDR": - return cd.decodeIHDR(c) - } - - // We don't decode this type. - return nil, nil -} - -type ChunkIHDR struct { - Width uint32 - Height uint32 - BitDepth uint8 - ColorType uint8 - CompressionMethod uint8 - FilterMethod uint8 - InterlaceMethod uint8 -} - -func (ihdr *ChunkIHDR) String() string { - return fmt.Sprintf("IHDR", - ihdr.Width, ihdr.Height, ihdr.BitDepth, ihdr.ColorType, ihdr.CompressionMethod, ihdr.FilterMethod, ihdr.InterlaceMethod, - ) -} - -func (cd *ChunkDecoder) decodeIHDR(c *Chunk) (*ChunkIHDR, error) { - var ( - b = bytes.NewBuffer(c.Data) - ihdr = new(ChunkIHDR) - readf = func(data interface{}) error { - return binary.Read(b, binary.BigEndian, data) - } - ) - - if err := readf(&ihdr.Width); err != nil { - return nil, err - } - - if err := readf(&ihdr.Height); err != nil { - return nil, err - } - - if err := readf(&ihdr.BitDepth); err != nil { - return nil, err - } - - if err := readf(&ihdr.ColorType); err != nil { - return nil, err - } - - if err := readf(&ihdr.CompressionMethod); err != nil { - return nil, err - } - - if err := readf(&ihdr.FilterMethod); err != nil { - return nil, err - } - - if err := readf(&ihdr.InterlaceMethod); err != nil { - return nil, err - } - - return ihdr, nil -} diff --git a/vendor/github.com/superseriousbusiness/go-png-image-structure/v2/media_parser.go b/vendor/github.com/superseriousbusiness/go-png-image-structure/v2/media_parser.go deleted file mode 100644 index 4c8421905..000000000 --- a/vendor/github.com/superseriousbusiness/go-png-image-structure/v2/media_parser.go +++ /dev/null @@ -1,85 +0,0 @@ -package pngstructure - -import ( - "bufio" - "bytes" - "image" - "io" - "os" - - "image/png" - - riimage "github.com/dsoprea/go-utility/v2/image" -) - -// PngMediaParser knows how to parse a PNG stream. -type PngMediaParser struct { -} - -// NewPngMediaParser returns a new `PngMediaParser`. -func NewPngMediaParser() riimage.MediaParser { - return new(PngMediaParser) -} - -// Parse parses a PNG stream given a `io.ReadSeeker`. -func (pmp *PngMediaParser) Parse( - rs io.ReadSeeker, - size int, -) (riimage.MediaContext, error) { - ps := NewPngSplitter() - if err := ps.readHeader(rs); err != nil { - return nil, err - } - - s := bufio.NewScanner(rs) - - // Since each segment can be any - // size, our buffer must be allowed - // to grow as large as the file. - buffer := []byte{} - s.Buffer(buffer, size) - s.Split(ps.Split) - - for s.Scan() { - } - - if err := s.Err(); err != nil { - return nil, err - } - - return ps.Chunks() -} - -// ParseFile parses a PNG stream given a file-path. -func (pmp *PngMediaParser) ParseFile(filepath string) (riimage.MediaContext, error) { - f, err := os.Open(filepath) - if err != nil { - return nil, err - } - defer f.Close() - - stat, err := f.Stat() - if err != nil { - return nil, err - } - - size := stat.Size() - return pmp.Parse(f, int(size)) -} - -// ParseBytes parses a PNG stream given a byte-slice. -func (pmp *PngMediaParser) ParseBytes(data []byte) (riimage.MediaContext, error) { - br := bytes.NewReader(data) - return pmp.Parse(br, len(data)) -} - -// LooksLikeFormat returns a boolean indicating -// whether the stream looks like a PNG image. -func (pmp *PngMediaParser) LooksLikeFormat(data []byte) bool { - return bytes.Equal(data[:len(PngSignature)], PngSignature[:]) -} - -// GetImage returns an image.Image-compatible struct. -func (pmp *PngMediaParser) GetImage(r io.Reader) (img image.Image, err error) { - return png.Decode(r) -} diff --git a/vendor/github.com/superseriousbusiness/go-png-image-structure/v2/png.go b/vendor/github.com/superseriousbusiness/go-png-image-structure/v2/png.go deleted file mode 100644 index dfe773b71..000000000 --- a/vendor/github.com/superseriousbusiness/go-png-image-structure/v2/png.go +++ /dev/null @@ -1,386 +0,0 @@ -package pngstructure - -import ( - "bytes" - "errors" - "fmt" - "io" - - "encoding/binary" - "hash/crc32" - - "github.com/dsoprea/go-exif/v3" - exifcommon "github.com/dsoprea/go-exif/v3/common" - riimage "github.com/dsoprea/go-utility/v2/image" -) - -var ( - PngSignature = [8]byte{137, 'P', 'N', 'G', '\r', '\n', 26, '\n'} - EXifChunkType = "eXIf" - IHDRChunkType = "IHDR" -) - -var ( - ErrNotPng = errors.New("not png data") - ErrCrcFailure = errors.New("crc failure") -) - -// ChunkSlice encapsulates a slice of chunks. -type ChunkSlice struct { - chunks []*Chunk -} - -func NewChunkSlice(chunks []*Chunk) (*ChunkSlice, error) { - if len(chunks) == 0 { - err := errors.New("ChunkSlice must be initialized with at least one chunk (IHDR)") - return nil, err - } else if chunks[0].Type != IHDRChunkType { - err := errors.New("first chunk in any ChunkSlice must be an IHDR") - return nil, err - } - - return &ChunkSlice{chunks}, nil -} - -func NewPngChunkSlice() (*ChunkSlice, error) { - ihdrChunk := &Chunk{ - Type: IHDRChunkType, - } - - ihdrChunk.UpdateCrc32() - - return NewChunkSlice([]*Chunk{ihdrChunk}) -} - -func (cs *ChunkSlice) String() string { - return fmt.Sprintf("ChunkSlize", len(cs.chunks)) -} - -// Chunks exposes the actual slice. -func (cs *ChunkSlice) Chunks() []*Chunk { - return cs.chunks -} - -// Write encodes and writes all chunks. -func (cs *ChunkSlice) WriteTo(w io.Writer) error { - if _, err := w.Write(PngSignature[:]); err != nil { - return err - } - - // TODO(dustin): !! This should respect - // the safe-to-copy characteristic. - for _, c := range cs.chunks { - if _, err := c.WriteTo(w); err != nil { - return err - } - } - - return nil -} - -// Index returns a map of chunk types to chunk slices, grouping all like chunks. -func (cs *ChunkSlice) Index() (index map[string][]*Chunk) { - index = make(map[string][]*Chunk) - for _, c := range cs.chunks { - if grouped, found := index[c.Type]; found { - index[c.Type] = append(grouped, c) - } else { - index[c.Type] = []*Chunk{c} - } - } - - return index -} - -// FindExif returns the the segment that hosts the EXIF data. -func (cs *ChunkSlice) FindExif() (chunk *Chunk, err error) { - index := cs.Index() - if chunks, found := index[EXifChunkType]; found { - return chunks[0], nil - } - - return nil, exif.ErrNoExif -} - -// Exif returns an `exif.Ifd` instance with the existing tags. -func (cs *ChunkSlice) Exif() (*exif.Ifd, []byte, error) { - chunk, err := cs.FindExif() - if err != nil { - return nil, nil, err - } - - im, err := exifcommon.NewIfdMappingWithStandard() - if err != nil { - return nil, nil, err - } - - ti := exif.NewTagIndex() - - _, index, err := exif.Collect(im, ti, chunk.Data) - if err != nil { - return nil, nil, err - } - - return index.RootIfd, chunk.Data, nil -} - -// ConstructExifBuilder returns an `exif.IfdBuilder` instance -// (needed for modifying) preloaded with all existing tags. -func (cs *ChunkSlice) ConstructExifBuilder() (*exif.IfdBuilder, error) { - rootIfd, _, err := cs.Exif() - if err != nil { - return nil, err - } - - return exif.NewIfdBuilderFromExistingChain(rootIfd), nil -} - -// SetExif encodes and sets EXIF data into this segment. -func (cs *ChunkSlice) SetExif(ib *exif.IfdBuilder) error { - // Encode. - - ibe := exif.NewIfdByteEncoder() - - exifData, err := ibe.EncodeToExif(ib) - if err != nil { - return err - } - - // Set. - - exifChunk, err := cs.FindExif() - - switch { - case err == nil: - // EXIF chunk already exists. - exifChunk.Data = exifData - exifChunk.Length = uint32(len(exifData)) - - case errors.Is(err, exif.ErrNoExif): - // Add a EXIF chunk for the first time. - exifChunk = &Chunk{ - Type: EXifChunkType, - Data: exifData, - Length: uint32(len(exifData)), - } - - // Insert exif after the IHDR chunk; it's - // a reliably appropriate place to put it. - cs.chunks = append( - cs.chunks[:1], - append( - []*Chunk{exifChunk}, - cs.chunks[1:]..., - )..., - ) - - default: - return err - } - - exifChunk.UpdateCrc32() - return nil -} - -// PngSplitter hosts the princpal `Split()` -// method uses by `bufio.Scanner`. -type PngSplitter struct { - chunks []*Chunk - currentOffset int - - doCheckCrc bool - crcErrors []string -} - -func (ps *PngSplitter) Chunks() (*ChunkSlice, error) { - return NewChunkSlice(ps.chunks) -} - -func (ps *PngSplitter) DoCheckCrc(doCheck bool) { - ps.doCheckCrc = doCheck -} - -func (ps *PngSplitter) CrcErrors() []string { - return ps.crcErrors -} - -func NewPngSplitter() *PngSplitter { - return &PngSplitter{ - chunks: make([]*Chunk, 0), - doCheckCrc: true, - crcErrors: make([]string, 0), - } -} - -// Chunk describes a single chunk. -type Chunk struct { - Offset int - Length uint32 - Type string - Data []byte - Crc uint32 -} - -func (c *Chunk) String() string { - return fmt.Sprintf("Chunk", c.Offset, c.Length, c.Type, c.Crc) -} - -func calculateCrc32(chunk *Chunk) uint32 { - c := crc32.NewIEEE() - - c.Write([]byte(chunk.Type)) - c.Write(chunk.Data) - - return c.Sum32() -} - -func (c *Chunk) UpdateCrc32() { - c.Crc = calculateCrc32(c) -} - -func (c *Chunk) CheckCrc32() bool { - expected := calculateCrc32(c) - return c.Crc == expected -} - -// Bytes encodes and returns the bytes for this chunk. -func (c *Chunk) Bytes() ([]byte, error) { - if len(c.Data) != int(c.Length) { - return nil, errors.New("length of data not correct") - } - b := make([]byte, 0, 4+4+c.Length+4) - b = binary.BigEndian.AppendUint32(b, c.Length) - b = append(b, c.Type...) - b = append(b, c.Data...) - b = binary.BigEndian.AppendUint32(b, c.Crc) - return b, nil -} - -// Write encodes and writes the bytes for this chunk. -func (c *Chunk) WriteTo(w io.Writer) (int, error) { - if len(c.Data) != int(c.Length) { - return 0, errors.New("length of data not correct") - } - - var n int - - b := make([]byte, 4) // uint32 buf - - binary.BigEndian.PutUint32(b, c.Length) - if nn, err := w.Write(b); err != nil { - return n + nn, err - } - - n += len(b) - - if nn, err := io.WriteString(w, c.Type); err != nil { - return n + nn, err - } - - n += len(c.Type) - - if nn, err := w.Write(c.Data); err != nil { - return n + nn, err - } - - n += len(c.Data) - - binary.BigEndian.PutUint32(b, c.Crc) - if nn, err := w.Write(b); err != nil { - return n + nn, err - } - - n += len(b) - - return n, nil -} - -// readHeader verifies that the PNG header bytes appear next. -func (ps *PngSplitter) readHeader(r io.Reader) error { - var ( - sigLen = len(PngSignature) - header = make([]byte, sigLen) - ) - - if _, err := r.Read(header); err != nil { - return err - } - - ps.currentOffset += sigLen - if !bytes.Equal(header, PngSignature[:]) { - return ErrNotPng - } - - return nil -} - -// Split fulfills the `bufio.SplitFunc` -// function definition for `bufio.Scanner`. -func (ps *PngSplitter) Split( - data []byte, - atEOF bool, -) ( - advance int, - token []byte, - err error, -) { - // We might have more than one chunk's worth, and, - // if `atEOF` is true, we won't be called again. - // We'll repeatedly try to read additional chunks, - // but, when we run out of the data we were given - // then we'll return the number of bytes for the - // chunks we've already completely read. Then, we'll - // be called again from the end ofthose bytes, at - // which point we'll indicate that we don't yet have - // enough for another chunk, and we should be then - // called with more. - for { - len_ := len(data) - if len_ < 8 { - return advance, nil, nil - } - - length := binary.BigEndian.Uint32(data[:4]) - type_ := string(data[4:8]) - chunkSize := (8 + int(length) + 4) - - if len_ < chunkSize { - return advance, nil, nil - } - - crcIndex := 8 + length - crc := binary.BigEndian.Uint32(data[crcIndex : crcIndex+4]) - - content := make([]byte, length) - copy(content, data[8:8+length]) - - c := &Chunk{ - Length: length, - Type: type_, - Data: content, - Crc: crc, - Offset: ps.currentOffset, - } - - ps.chunks = append(ps.chunks, c) - - if !c.CheckCrc32() { - ps.crcErrors = append(ps.crcErrors, type_) - - if ps.doCheckCrc { - err = ErrCrcFailure - return - } - } - - advance += chunkSize - ps.currentOffset += chunkSize - - data = data[chunkSize:] - } -} - -var ( - // Enforce interface conformance. - _ riimage.MediaContext = new(ChunkSlice) -) diff --git a/vendor/github.com/superseriousbusiness/go-png-image-structure/v2/testing_common.go b/vendor/github.com/superseriousbusiness/go-png-image-structure/v2/testing_common.go deleted file mode 100644 index 42f28d282..000000000 --- a/vendor/github.com/superseriousbusiness/go-png-image-structure/v2/testing_common.go +++ /dev/null @@ -1,77 +0,0 @@ -package pngstructure - -import ( - "fmt" - "os" - "path" -) - -var ( - assetsPath = "assets" -) - -func getModuleRootPath() (string, error) { - moduleRootPath := os.Getenv("PNG_MODULE_ROOT_PATH") - if moduleRootPath != "" { - return moduleRootPath, nil - } - - currentWd, err := os.Getwd() - if err != nil { - return "", err - } - - currentPath := currentWd - visited := make([]string, 0) - - for { - tryStampFilepath := path.Join(currentPath, ".MODULE_ROOT") - - _, err := os.Stat(tryStampFilepath) - if err != nil && !os.IsNotExist(err) { - return "", err - } else if err == nil { - break - } - - visited = append(visited, tryStampFilepath) - - currentPath = path.Dir(currentPath) - if currentPath == "/" { - return "", fmt.Errorf("could not find module-root: %v", visited) - } - } - - return currentPath, nil -} - -func getTestAssetsPath() (string, error) { - if assetsPath == "" { - moduleRootPath, err := getModuleRootPath() - if err != nil { - return "", err - } - - assetsPath = path.Join(moduleRootPath, "assets") - } - - return assetsPath, nil -} - -func getTestBasicImageFilepath() (string, error) { - assetsPath, err := getTestAssetsPath() - if err != nil { - return "", err - } - - return path.Join(assetsPath, "libpng.png"), nil -} - -func getTestExifImageFilepath() (string, error) { - assetsPath, err := getTestAssetsPath() - if err != nil { - return "", err - } - - return path.Join(assetsPath, "exif.png"), nil -} diff --git a/vendor/github.com/superseriousbusiness/go-png-image-structure/v2/utility.go b/vendor/github.com/superseriousbusiness/go-png-image-structure/v2/utility.go deleted file mode 100644 index cac6020f2..000000000 --- a/vendor/github.com/superseriousbusiness/go-png-image-structure/v2/utility.go +++ /dev/null @@ -1,67 +0,0 @@ -package pngstructure - -import ( - "bytes" - "fmt" -) - -func DumpBytes(data []byte) { - fmt.Printf("DUMP: ") - for _, x := range data { - fmt.Printf("%02x ", x) - } - - fmt.Printf("\n") -} - -func DumpBytesClause(data []byte) { - fmt.Printf("DUMP: ") - - fmt.Printf("[]byte { ") - - for i, x := range data { - fmt.Printf("0x%02x", x) - - if i < len(data)-1 { - fmt.Printf(", ") - } - } - - fmt.Printf(" }\n") -} - -func DumpBytesToString(data []byte) (string, error) { - b := new(bytes.Buffer) - - for i, x := range data { - if _, err := b.WriteString(fmt.Sprintf("%02x", x)); err != nil { - return "", err - } - - if i < len(data)-1 { - if _, err := b.WriteRune(' '); err != nil { - return "", err - } - } - } - - return b.String(), nil -} - -func DumpBytesClauseToString(data []byte) (string, error) { - b := new(bytes.Buffer) - - for i, x := range data { - if _, err := b.WriteString(fmt.Sprintf("0x%02x", x)); err != nil { - return "", err - } - - if i < len(data)-1 { - if _, err := b.WriteString(", "); err != nil { - return "", err - } - } - } - - return b.String(), nil -} diff --git a/vendor/github.com/tetratelabs/wazero/imports/wasi_snapshot_preview1/args.go b/vendor/github.com/tetratelabs/wazero/imports/wasi_snapshot_preview1/args.go new file mode 100644 index 000000000..4c82e95e2 --- /dev/null +++ b/vendor/github.com/tetratelabs/wazero/imports/wasi_snapshot_preview1/args.go @@ -0,0 +1,97 @@ +package wasi_snapshot_preview1 + +import ( + "context" + + "github.com/tetratelabs/wazero/api" + "github.com/tetratelabs/wazero/experimental/sys" + "github.com/tetratelabs/wazero/internal/wasip1" + "github.com/tetratelabs/wazero/internal/wasm" +) + +// argsGet is the WASI function named ArgsGetName that reads command-line +// argument data. +// +// # Parameters +// +// - argv: offset to begin writing argument offsets in uint32 little-endian +// encoding to api.Memory +// - argsSizesGet result argc * 4 bytes are written to this offset +// - argvBuf: offset to write the null terminated arguments to api.Memory +// - argsSizesGet result argv_len bytes are written to this offset +// +// Result (Errno) +// +// The return value is ErrnoSuccess except the following error conditions: +// - sys.EFAULT: there is not enough memory to write results +// +// For example, if argsSizesGet wrote argc=2 and argvLen=5 for arguments: +// "a" and "bc" parameters argv=7 and argvBuf=1, this function writes the below +// to api.Memory: +// +// argvLen uint32le uint32le +// +----------------+ +--------+ +--------+ +// | | | | | | +// []byte{?, 'a', 0, 'b', 'c', 0, ?, 1, 0, 0, 0, 3, 0, 0, 0, ?} +// argvBuf --^ ^ ^ +// argv --| | +// offset that begins "a" --+ | +// offset that begins "bc" --+ +// +// See argsSizesGet +// See https://github.com/WebAssembly/WASI/blob/snapshot-01/phases/snapshot/docs.md#args_get +// See https://en.wikipedia.org/wiki/Null-terminated_string +var argsGet = newHostFunc(wasip1.ArgsGetName, argsGetFn, []api.ValueType{i32, i32}, "argv", "argv_buf") + +func argsGetFn(_ context.Context, mod api.Module, params []uint64) sys.Errno { + sysCtx := mod.(*wasm.ModuleInstance).Sys + argv, argvBuf := uint32(params[0]), uint32(params[1]) + return writeOffsetsAndNullTerminatedValues(mod.Memory(), sysCtx.Args(), argv, argvBuf, sysCtx.ArgsSize()) +} + +// argsSizesGet is the WASI function named ArgsSizesGetName that reads +// command-line argument sizes. +// +// # Parameters +// +// - resultArgc: offset to write the argument count to api.Memory +// - resultArgvLen: offset to write the null-terminated argument length to +// api.Memory +// +// Result (Errno) +// +// The return value is ErrnoSuccess except the following error conditions: +// - sys.EFAULT: there is not enough memory to write results +// +// For example, if args are "a", "bc" and parameters resultArgc=1 and +// resultArgvLen=6, this function writes the below to api.Memory: +// +// uint32le uint32le +// +--------+ +--------+ +// | | | | +// []byte{?, 2, 0, 0, 0, ?, 5, 0, 0, 0, ?} +// resultArgc --^ ^ +// 2 args --+ | +// resultArgvLen --| +// len([]byte{'a',0,'b',c',0}) --+ +// +// See argsGet +// See https://github.com/WebAssembly/WASI/blob/snapshot-01/phases/snapshot/docs.md#args_sizes_get +// See https://en.wikipedia.org/wiki/Null-terminated_string +var argsSizesGet = newHostFunc(wasip1.ArgsSizesGetName, argsSizesGetFn, []api.ValueType{i32, i32}, "result.argc", "result.argv_len") + +func argsSizesGetFn(_ context.Context, mod api.Module, params []uint64) sys.Errno { + sysCtx := mod.(*wasm.ModuleInstance).Sys + mem := mod.Memory() + resultArgc, resultArgvLen := uint32(params[0]), uint32(params[1]) + + // argc and argv_len offsets are not necessarily sequential, so we have to + // write them independently. + if !mem.WriteUint32Le(resultArgc, uint32(len(sysCtx.Args()))) { + return sys.EFAULT + } + if !mem.WriteUint32Le(resultArgvLen, sysCtx.ArgsSize()) { + return sys.EFAULT + } + return 0 +} diff --git a/vendor/github.com/tetratelabs/wazero/imports/wasi_snapshot_preview1/clock.go b/vendor/github.com/tetratelabs/wazero/imports/wasi_snapshot_preview1/clock.go new file mode 100644 index 000000000..31af91071 --- /dev/null +++ b/vendor/github.com/tetratelabs/wazero/imports/wasi_snapshot_preview1/clock.go @@ -0,0 +1,116 @@ +package wasi_snapshot_preview1 + +import ( + "context" + + "github.com/tetratelabs/wazero/api" + "github.com/tetratelabs/wazero/experimental/sys" + "github.com/tetratelabs/wazero/internal/wasip1" + "github.com/tetratelabs/wazero/internal/wasm" +) + +// clockResGet is the WASI function named ClockResGetName that returns the +// resolution of time values returned by clockTimeGet. +// +// # Parameters +// +// - id: clock ID to use +// - resultResolution: offset to write the resolution to api.Memory +// - the resolution is an uint64 little-endian encoding +// +// Result (Errno) +// +// The return value is 0 except the following error conditions: +// - sys.ENOTSUP: the clock ID is not supported. +// - sys.EINVAL: the clock ID is invalid. +// - sys.EFAULT: there is not enough memory to write results +// +// For example, if the resolution is 100ns, this function writes the below to +// api.Memory: +// +// uint64le +// +-------------------------------------+ +// | | +// []byte{?, 0x64, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, ?} +// resultResolution --^ +// +// Note: This is similar to `clock_getres` in POSIX. +// See https://github.com/WebAssembly/WASI/blob/snapshot-01/phases/snapshot/docs.md#-clock_res_getid-clockid---errno-timestamp +// See https://linux.die.net/man/3/clock_getres +var clockResGet = newHostFunc(wasip1.ClockResGetName, clockResGetFn, []api.ValueType{i32, i32}, "id", "result.resolution") + +func clockResGetFn(_ context.Context, mod api.Module, params []uint64) sys.Errno { + sysCtx := mod.(*wasm.ModuleInstance).Sys + id, resultResolution := uint32(params[0]), uint32(params[1]) + + var resolution uint64 // ns + switch id { + case wasip1.ClockIDRealtime: + resolution = uint64(sysCtx.WalltimeResolution()) + case wasip1.ClockIDMonotonic: + resolution = uint64(sysCtx.NanotimeResolution()) + default: + return sys.EINVAL + } + + if !mod.Memory().WriteUint64Le(resultResolution, resolution) { + return sys.EFAULT + } + return 0 +} + +// clockTimeGet is the WASI function named ClockTimeGetName that returns +// the time value of a name (time.Now). +// +// # Parameters +// +// - id: clock ID to use +// - precision: maximum lag (exclusive) that the returned time value may have, +// compared to its actual value +// - resultTimestamp: offset to write the timestamp to api.Memory +// - the timestamp is epoch nanos encoded as a little-endian uint64 +// +// Result (Errno) +// +// The return value is 0 except the following error conditions: +// - sys.ENOTSUP: the clock ID is not supported. +// - sys.EINVAL: the clock ID is invalid. +// - sys.EFAULT: there is not enough memory to write results +// +// For example, if time.Now returned exactly midnight UTC 2022-01-01 +// (1640995200000000000), and parameters resultTimestamp=1, this function +// writes the below to api.Memory: +// +// uint64le +// +------------------------------------------+ +// | | +// []byte{?, 0x0, 0x0, 0x1f, 0xa6, 0x70, 0xfc, 0xc5, 0x16, ?} +// resultTimestamp --^ +// +// Note: This is similar to `clock_gettime` in POSIX. +// See https://github.com/WebAssembly/WASI/blob/snapshot-01/phases/snapshot/docs.md#-clock_time_getid-clockid-precision-timestamp---errno-timestamp +// See https://linux.die.net/man/3/clock_gettime +var clockTimeGet = newHostFunc(wasip1.ClockTimeGetName, clockTimeGetFn, []api.ValueType{i32, i64, i32}, "id", "precision", "result.timestamp") + +func clockTimeGetFn(_ context.Context, mod api.Module, params []uint64) sys.Errno { + sysCtx := mod.(*wasm.ModuleInstance).Sys + id := uint32(params[0]) + // TODO: precision is currently ignored. + // precision = params[1] + resultTimestamp := uint32(params[2]) + + var val int64 + switch id { + case wasip1.ClockIDRealtime: + val = sysCtx.WalltimeNanos() + case wasip1.ClockIDMonotonic: + val = sysCtx.Nanotime() + default: + return sys.EINVAL + } + + if !mod.Memory().WriteUint64Le(resultTimestamp, uint64(val)) { + return sys.EFAULT + } + return 0 +} diff --git a/vendor/github.com/tetratelabs/wazero/imports/wasi_snapshot_preview1/environ.go b/vendor/github.com/tetratelabs/wazero/imports/wasi_snapshot_preview1/environ.go new file mode 100644 index 000000000..ec8df708a --- /dev/null +++ b/vendor/github.com/tetratelabs/wazero/imports/wasi_snapshot_preview1/environ.go @@ -0,0 +1,100 @@ +package wasi_snapshot_preview1 + +import ( + "context" + + "github.com/tetratelabs/wazero/api" + "github.com/tetratelabs/wazero/experimental/sys" + "github.com/tetratelabs/wazero/internal/wasip1" + "github.com/tetratelabs/wazero/internal/wasm" +) + +// environGet is the WASI function named EnvironGetName that reads +// environment variables. +// +// # Parameters +// +// - environ: offset to begin writing environment offsets in uint32 +// little-endian encoding to api.Memory +// - environSizesGet result environc * 4 bytes are written to this offset +// - environBuf: offset to write the null-terminated variables to api.Memory +// - the format is like os.Environ: null-terminated "key=val" entries +// - environSizesGet result environLen bytes are written to this offset +// +// Result (Errno) +// +// The return value is 0 except the following error conditions: +// - sys.EFAULT: there is not enough memory to write results +// +// For example, if environSizesGet wrote environc=2 and environLen=9 for +// environment variables: "a=b", "b=cd" and parameters environ=11 and +// environBuf=1, this function writes the below to api.Memory: +// +// environLen uint32le uint32le +// +------------------------------------+ +--------+ +--------+ +// | | | | | | +// []byte{?, 'a', '=', 'b', 0, 'b', '=', 'c', 'd', 0, ?, 1, 0, 0, 0, 5, 0, 0, 0, ?} +// environBuf --^ ^ ^ +// environ offset for "a=b" --+ | +// environ offset for "b=cd" --+ +// +// See environSizesGet +// See https://github.com/WebAssembly/WASI/blob/snapshot-01/phases/snapshot/docs.md#environ_get +// See https://en.wikipedia.org/wiki/Null-terminated_string +var environGet = newHostFunc(wasip1.EnvironGetName, environGetFn, []api.ValueType{i32, i32}, "environ", "environ_buf") + +func environGetFn(_ context.Context, mod api.Module, params []uint64) sys.Errno { + sysCtx := mod.(*wasm.ModuleInstance).Sys + environ, environBuf := uint32(params[0]), uint32(params[1]) + + return writeOffsetsAndNullTerminatedValues(mod.Memory(), sysCtx.Environ(), environ, environBuf, sysCtx.EnvironSize()) +} + +// environSizesGet is the WASI function named EnvironSizesGetName that +// reads environment variable sizes. +// +// # Parameters +// +// - resultEnvironc: offset to write the count of environment variables to +// api.Memory +// - resultEnvironvLen: offset to write the null-terminated environment +// variable length to api.Memory +// +// Result (Errno) +// +// The return value is 0 except the following error conditions: +// - sys.EFAULT: there is not enough memory to write results +// +// For example, if environ are "a=b","b=cd" and parameters resultEnvironc=1 and +// resultEnvironvLen=6, this function writes the below to api.Memory: +// +// uint32le uint32le +// +--------+ +--------+ +// | | | | +// []byte{?, 2, 0, 0, 0, ?, 9, 0, 0, 0, ?} +// resultEnvironc --^ ^ +// 2 variables --+ | +// resultEnvironvLen --| +// len([]byte{'a','=','b',0, | +// 'b','=','c','d',0}) --+ +// +// See environGet +// https://github.com/WebAssembly/WASI/blob/snapshot-01/phases/snapshot/docs.md#environ_sizes_get +// and https://en.wikipedia.org/wiki/Null-terminated_string +var environSizesGet = newHostFunc(wasip1.EnvironSizesGetName, environSizesGetFn, []api.ValueType{i32, i32}, "result.environc", "result.environv_len") + +func environSizesGetFn(_ context.Context, mod api.Module, params []uint64) sys.Errno { + sysCtx := mod.(*wasm.ModuleInstance).Sys + mem := mod.Memory() + resultEnvironc, resultEnvironvLen := uint32(params[0]), uint32(params[1]) + + // environc and environv_len offsets are not necessarily sequential, so we + // have to write them independently. + if !mem.WriteUint32Le(resultEnvironc, uint32(len(sysCtx.Environ()))) { + return sys.EFAULT + } + if !mem.WriteUint32Le(resultEnvironvLen, sysCtx.EnvironSize()) { + return sys.EFAULT + } + return 0 +} diff --git a/vendor/github.com/tetratelabs/wazero/imports/wasi_snapshot_preview1/fs.go b/vendor/github.com/tetratelabs/wazero/imports/wasi_snapshot_preview1/fs.go new file mode 100644 index 000000000..1ec0d81b3 --- /dev/null +++ b/vendor/github.com/tetratelabs/wazero/imports/wasi_snapshot_preview1/fs.go @@ -0,0 +1,2016 @@ +package wasi_snapshot_preview1 + +import ( + "context" + "io" + "io/fs" + "math" + "path" + "strings" + "unsafe" + + "github.com/tetratelabs/wazero/api" + experimentalsys "github.com/tetratelabs/wazero/experimental/sys" + socketapi "github.com/tetratelabs/wazero/internal/sock" + "github.com/tetratelabs/wazero/internal/sys" + "github.com/tetratelabs/wazero/internal/wasip1" + "github.com/tetratelabs/wazero/internal/wasm" + sysapi "github.com/tetratelabs/wazero/sys" +) + +// fdAdvise is the WASI function named FdAdviseName which provides file +// advisory information on a file descriptor. +// +// See https://github.com/WebAssembly/WASI/blob/snapshot-01/phases/snapshot/docs.md#-fd_advisefd-fd-offset-filesize-len-filesize-advice-advice---errno +var fdAdvise = newHostFunc( + wasip1.FdAdviseName, fdAdviseFn, + []wasm.ValueType{i32, i64, i64, i32}, + "fd", "offset", "len", "advice", +) + +func fdAdviseFn(_ context.Context, mod api.Module, params []uint64) experimentalsys.Errno { + fd := int32(params[0]) + _ = params[1] + _ = params[2] + advice := byte(params[3]) + fsc := mod.(*wasm.ModuleInstance).Sys.FS() + + _, ok := fsc.LookupFile(fd) + if !ok { + return experimentalsys.EBADF + } + + switch advice { + case wasip1.FdAdviceNormal, + wasip1.FdAdviceSequential, + wasip1.FdAdviceRandom, + wasip1.FdAdviceWillNeed, + wasip1.FdAdviceDontNeed, + wasip1.FdAdviceNoReuse: + default: + return experimentalsys.EINVAL + } + + // FdAdvice corresponds to posix_fadvise, but it can only be supported on linux. + // However, the purpose of the call is just to do best-effort optimization on OS kernels, + // so just making this noop rather than returning NoSup error makes sense and doesn't affect + // the semantics of Wasm applications. + // TODO: invoke posix_fadvise on linux, and partially on darwin. + // - https://gitlab.com/cznic/fileutil/-/blob/v1.1.2/fileutil_linux.go#L87-95 + // - https://github.com/bytecodealliance/system-interface/blob/62b97f9776b86235f318c3a6e308395a1187439b/src/fs/file_io_ext.rs#L430-L442 + return 0 +} + +// fdAllocate is the WASI function named FdAllocateName which forces the +// allocation of space in a file. +// +// See https://github.com/WebAssembly/WASI/blob/snapshot-01/phases/snapshot/docs.md#-fd_allocatefd-fd-offset-filesize-len-filesize---errno +var fdAllocate = newHostFunc( + wasip1.FdAllocateName, fdAllocateFn, + []wasm.ValueType{i32, i64, i64}, + "fd", "offset", "len", +) + +func fdAllocateFn(_ context.Context, mod api.Module, params []uint64) experimentalsys.Errno { + fd := int32(params[0]) + offset := params[1] + length := params[2] + + fsc := mod.(*wasm.ModuleInstance).Sys.FS() + f, ok := fsc.LookupFile(fd) + if !ok { + return experimentalsys.EBADF + } + + tail := int64(offset + length) + if tail < 0 { + return experimentalsys.EINVAL + } + + st, errno := f.File.Stat() + if errno != 0 { + return errno + } + + if st.Size >= tail { + return 0 // We already have enough space. + } + + return f.File.Truncate(tail) +} + +// fdClose is the WASI function named FdCloseName which closes a file +// descriptor. +// +// # Parameters +// +// - fd: file descriptor to close +// +// Result (Errno) +// +// The return value is 0 except the following error conditions: +// - sys.EBADF: the fd was not open. +// - sys.ENOTSUP: the fs was a pre-open +// +// Note: This is similar to `close` in POSIX. +// See https://github.com/WebAssembly/WASI/blob/main/phases/snapshot/docs.md#fd_close +// and https://linux.die.net/man/3/close +var fdClose = newHostFunc(wasip1.FdCloseName, fdCloseFn, []api.ValueType{i32}, "fd") + +func fdCloseFn(_ context.Context, mod api.Module, params []uint64) experimentalsys.Errno { + fsc := mod.(*wasm.ModuleInstance).Sys.FS() + fd := int32(params[0]) + + return fsc.CloseFile(fd) +} + +// fdDatasync is the WASI function named FdDatasyncName which synchronizes +// the data of a file to disk. +// +// See https://github.com/WebAssembly/WASI/blob/snapshot-01/phases/snapshot/docs.md#-fd_datasyncfd-fd---errno +var fdDatasync = newHostFunc(wasip1.FdDatasyncName, fdDatasyncFn, []api.ValueType{i32}, "fd") + +func fdDatasyncFn(_ context.Context, mod api.Module, params []uint64) experimentalsys.Errno { + fsc := mod.(*wasm.ModuleInstance).Sys.FS() + fd := int32(params[0]) + + // Check to see if the file descriptor is available + if f, ok := fsc.LookupFile(fd); !ok { + return experimentalsys.EBADF + } else { + return f.File.Datasync() + } +} + +// fdFdstatGet is the WASI function named FdFdstatGetName which returns the +// attributes of a file descriptor. +// +// # Parameters +// +// - fd: file descriptor to get the fdstat attributes data +// - resultFdstat: offset to write the result fdstat data +// +// Result (Errno) +// +// The return value is 0 except the following error conditions: +// - sys.EBADF: `fd` is invalid +// - sys.EFAULT: `resultFdstat` points to an offset out of memory +// +// fdstat byte layout is 24-byte size, with the following fields: +// - fs_filetype 1 byte: the file type +// - fs_flags 2 bytes: the file descriptor flag +// - 5 pad bytes +// - fs_right_base 8 bytes: ignored as rights were removed from WASI. +// - fs_right_inheriting 8 bytes: ignored as rights were removed from WASI. +// +// For example, with a file corresponding with `fd` was a directory (=3) opened +// with `fd_read` right (=1) and no fs_flags (=0), parameter resultFdstat=1, +// this function writes the below to api.Memory: +// +// uint16le padding uint64le uint64le +// uint8 --+ +--+ +-----------+ +--------------------+ +--------------------+ +// | | | | | | | | | +// []byte{?, 3, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0} +// resultFdstat --^ ^-- fs_flags ^-- fs_right_base ^-- fs_right_inheriting +// | +// +-- fs_filetype +// +// Note: fdFdstatGet returns similar flags to `fsync(fd, F_GETFL)` in POSIX, as +// well as additional fields. +// See https://github.com/WebAssembly/WASI/blob/snapshot-01/phases/snapshot/docs.md#fdstat +// and https://linux.die.net/man/3/fsync +var fdFdstatGet = newHostFunc(wasip1.FdFdstatGetName, fdFdstatGetFn, []api.ValueType{i32, i32}, "fd", "result.stat") + +// fdFdstatGetFn cannot currently use proxyResultParams because fdstat is larger +// than api.ValueTypeI64 (i64 == 8 bytes, but fdstat is 24). +func fdFdstatGetFn(_ context.Context, mod api.Module, params []uint64) experimentalsys.Errno { + fsc := mod.(*wasm.ModuleInstance).Sys.FS() + + fd, resultFdstat := int32(params[0]), uint32(params[1]) + + // Ensure we can write the fdstat + buf, ok := mod.Memory().Read(resultFdstat, 24) + if !ok { + return experimentalsys.EFAULT + } + + var fdflags uint16 + var st sysapi.Stat_t + var errno experimentalsys.Errno + f, ok := fsc.LookupFile(fd) + if !ok { + return experimentalsys.EBADF + } else if st, errno = f.File.Stat(); errno != 0 { + return errno + } else if f.File.IsAppend() { + fdflags |= wasip1.FD_APPEND + } + + if f.File.IsNonblock() { + fdflags |= wasip1.FD_NONBLOCK + } + + var fsRightsBase uint32 + var fsRightsInheriting uint32 + fileType := getExtendedWasiFiletype(f.File, st.Mode) + + switch fileType { + case wasip1.FILETYPE_DIRECTORY: + // To satisfy wasi-testsuite, we must advertise that directories cannot + // be given seek permission (RIGHT_FD_SEEK). + fsRightsBase = dirRightsBase + fsRightsInheriting = fileRightsBase | dirRightsBase + case wasip1.FILETYPE_CHARACTER_DEVICE: + // According to wasi-libc, + // > A tty is a character device that we can't seek or tell on. + // See https://github.com/WebAssembly/wasi-libc/blob/a6f871343313220b76009827ed0153586361c0d5/libc-bottom-half/sources/isatty.c#L13-L18 + fsRightsBase = fileRightsBase &^ wasip1.RIGHT_FD_SEEK &^ wasip1.RIGHT_FD_TELL + default: + fsRightsBase = fileRightsBase + } + + writeFdstat(buf, fileType, fdflags, fsRightsBase, fsRightsInheriting) + return 0 +} + +// isPreopenedStdio returns true if the FD is sys.FdStdin, sys.FdStdout or +// sys.FdStderr and pre-opened. This double check is needed in case the guest +// closes stdin and re-opens it with a random alternative file. +// +// Currently, we only support non-blocking mode for standard I/O streams. +// Non-blocking mode is rarely supported for regular files, and we don't +// yet have support for sockets, so we make a special case. +// +// Note: this to get or set FD_NONBLOCK, but skip FD_APPEND. Our current +// implementation can't set FD_APPEND, without re-opening files. As stdio are +// pre-opened, we don't know how to re-open them, neither should we close the +// underlying file. Later, we could add support for setting FD_APPEND, similar +// to SetNonblock. +func isPreopenedStdio(fd int32, f *sys.FileEntry) bool { + return fd <= sys.FdStderr && f.IsPreopen +} + +const fileRightsBase = wasip1.RIGHT_FD_DATASYNC | + wasip1.RIGHT_FD_READ | + wasip1.RIGHT_FD_SEEK | + wasip1.RIGHT_FDSTAT_SET_FLAGS | + wasip1.RIGHT_FD_SYNC | + wasip1.RIGHT_FD_TELL | + wasip1.RIGHT_FD_WRITE | + wasip1.RIGHT_FD_ADVISE | + wasip1.RIGHT_FD_ALLOCATE | + wasip1.RIGHT_FD_FILESTAT_GET | + wasip1.RIGHT_FD_FILESTAT_SET_SIZE | + wasip1.RIGHT_FD_FILESTAT_SET_TIMES | + wasip1.RIGHT_POLL_FD_READWRITE + +const dirRightsBase = wasip1.RIGHT_FD_DATASYNC | + wasip1.RIGHT_FDSTAT_SET_FLAGS | + wasip1.RIGHT_FD_SYNC | + wasip1.RIGHT_PATH_CREATE_DIRECTORY | + wasip1.RIGHT_PATH_CREATE_FILE | + wasip1.RIGHT_PATH_LINK_SOURCE | + wasip1.RIGHT_PATH_LINK_TARGET | + wasip1.RIGHT_PATH_OPEN | + wasip1.RIGHT_FD_READDIR | + wasip1.RIGHT_PATH_READLINK | + wasip1.RIGHT_PATH_RENAME_SOURCE | + wasip1.RIGHT_PATH_RENAME_TARGET | + wasip1.RIGHT_PATH_FILESTAT_GET | + wasip1.RIGHT_PATH_FILESTAT_SET_SIZE | + wasip1.RIGHT_PATH_FILESTAT_SET_TIMES | + wasip1.RIGHT_FD_FILESTAT_GET | + wasip1.RIGHT_FD_FILESTAT_SET_TIMES | + wasip1.RIGHT_PATH_SYMLINK | + wasip1.RIGHT_PATH_REMOVE_DIRECTORY | + wasip1.RIGHT_PATH_UNLINK_FILE + +func writeFdstat(buf []byte, fileType uint8, fdflags uint16, fsRightsBase, fsRightsInheriting uint32) { + b := (*[24]byte)(buf) + le.PutUint16(b[0:], uint16(fileType)) + le.PutUint16(b[2:], fdflags) + le.PutUint32(b[4:], 0) + le.PutUint64(b[8:], uint64(fsRightsBase)) + le.PutUint64(b[16:], uint64(fsRightsInheriting)) +} + +// fdFdstatSetFlags is the WASI function named FdFdstatSetFlagsName which +// adjusts the flags associated with a file descriptor. +var fdFdstatSetFlags = newHostFunc(wasip1.FdFdstatSetFlagsName, fdFdstatSetFlagsFn, []wasm.ValueType{i32, i32}, "fd", "flags") + +func fdFdstatSetFlagsFn(_ context.Context, mod api.Module, params []uint64) experimentalsys.Errno { + fd, wasiFlag := int32(params[0]), uint16(params[1]) + fsc := mod.(*wasm.ModuleInstance).Sys.FS() + + // Currently we only support APPEND and NONBLOCK. + if wasip1.FD_DSYNC&wasiFlag != 0 || wasip1.FD_RSYNC&wasiFlag != 0 || wasip1.FD_SYNC&wasiFlag != 0 { + return experimentalsys.EINVAL + } + + if f, ok := fsc.LookupFile(fd); !ok { + return experimentalsys.EBADF + } else { + nonblock := wasip1.FD_NONBLOCK&wasiFlag != 0 + errno := f.File.SetNonblock(nonblock) + if errno != 0 { + return errno + } + if stat, err := f.File.Stat(); err == 0 && stat.Mode.IsRegular() { + // For normal files, proceed to apply an append flag. + append := wasip1.FD_APPEND&wasiFlag != 0 + return f.File.SetAppend(append) + } + } + + return 0 +} + +// fdFdstatSetRights will not be implemented as rights were removed from WASI. +// +// See https://github.com/bytecodealliance/wasmtime/pull/4666 +var fdFdstatSetRights = stubFunction( + wasip1.FdFdstatSetRightsName, + []wasm.ValueType{i32, i64, i64}, + "fd", "fs_rights_base", "fs_rights_inheriting", +) + +// fdFilestatGet is the WASI function named FdFilestatGetName which returns +// the stat attributes of an open file. +// +// # Parameters +// +// - fd: file descriptor to get the filestat attributes data for +// - resultFilestat: offset to write the result filestat data +// +// Result (Errno) +// +// The return value is 0 except the following error conditions: +// - sys.EBADF: `fd` is invalid +// - sys.EIO: could not stat `fd` on filesystem +// - sys.EFAULT: `resultFilestat` points to an offset out of memory +// +// filestat byte layout is 64-byte size, with the following fields: +// - dev 8 bytes: the device ID of device containing the file +// - ino 8 bytes: the file serial number +// - filetype 1 byte: the type of the file +// - 7 pad bytes +// - nlink 8 bytes: number of hard links to the file +// - size 8 bytes: for regular files, the file size in bytes. For symbolic links, the length in bytes of the pathname contained in the symbolic link +// - atim 8 bytes: ast data access timestamp +// - mtim 8 bytes: last data modification timestamp +// - ctim 8 bytes: ast file status change timestamp +// +// For example, with a regular file this function writes the below to api.Memory: +// +// uint8 --+ +// uint64le uint64le | padding uint64le uint64le uint64le uint64le uint64le +// +--------------------+ +--------------------+ | +-----------------+ +--------------------+ +-----------------------+ +----------------------------------+ +----------------------------------+ +----------------------------------+ +// | | | | | | | | | | | | | | | | | +// []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 117, 80, 0, 0, 0, 0, 0, 0, 160, 153, 212, 128, 110, 221, 35, 23, 160, 153, 212, 128, 110, 221, 35, 23, 160, 153, 212, 128, 110, 221, 35, 23} +// resultFilestat ^-- dev ^-- ino ^ ^-- nlink ^-- size ^-- atim ^-- mtim ^-- ctim +// | +// +-- filetype +// +// The following properties of filestat are not implemented: +// - dev: not supported by Golang FS +// - ino: not supported by Golang FS +// - nlink: not supported by Golang FS, we use 1 +// - atime: not supported by Golang FS, we use mtim for this +// - ctim: not supported by Golang FS, we use mtim for this +// +// Note: This is similar to `fstat` in POSIX. +// See https://github.com/WebAssembly/WASI/blob/snapshot-01/phases/snapshot/docs.md#-fd_filestat_getfd-fd---errno-filestat +// and https://linux.die.net/man/3/fstat +var fdFilestatGet = newHostFunc(wasip1.FdFilestatGetName, fdFilestatGetFn, []api.ValueType{i32, i32}, "fd", "result.filestat") + +// fdFilestatGetFn cannot currently use proxyResultParams because filestat is +// larger than api.ValueTypeI64 (i64 == 8 bytes, but filestat is 64). +func fdFilestatGetFn(_ context.Context, mod api.Module, params []uint64) experimentalsys.Errno { + return fdFilestatGetFunc(mod, int32(params[0]), uint32(params[1])) +} + +func fdFilestatGetFunc(mod api.Module, fd int32, resultBuf uint32) experimentalsys.Errno { + fsc := mod.(*wasm.ModuleInstance).Sys.FS() + + // Ensure we can write the filestat + buf, ok := mod.Memory().Read(resultBuf, 64) + if !ok { + return experimentalsys.EFAULT + } + + f, ok := fsc.LookupFile(fd) + if !ok { + return experimentalsys.EBADF + } + + st, errno := f.File.Stat() + if errno != 0 { + return errno + } + + filetype := getExtendedWasiFiletype(f.File, st.Mode) + return writeFilestat(buf, &st, filetype) +} + +func getExtendedWasiFiletype(file experimentalsys.File, fm fs.FileMode) (ftype uint8) { + ftype = getWasiFiletype(fm) + if ftype == wasip1.FILETYPE_UNKNOWN { + if _, ok := file.(socketapi.TCPSock); ok { + ftype = wasip1.FILETYPE_SOCKET_STREAM + } else if _, ok = file.(socketapi.TCPConn); ok { + ftype = wasip1.FILETYPE_SOCKET_STREAM + } + } + return +} + +func getWasiFiletype(fm fs.FileMode) uint8 { + switch { + case fm.IsRegular(): + return wasip1.FILETYPE_REGULAR_FILE + case fm.IsDir(): + return wasip1.FILETYPE_DIRECTORY + case fm&fs.ModeSymlink != 0: + return wasip1.FILETYPE_SYMBOLIC_LINK + case fm&fs.ModeDevice != 0: + // Unlike ModeDevice and ModeCharDevice, FILETYPE_CHARACTER_DEVICE and + // FILETYPE_BLOCK_DEVICE are set mutually exclusively. + if fm&fs.ModeCharDevice != 0 { + return wasip1.FILETYPE_CHARACTER_DEVICE + } + return wasip1.FILETYPE_BLOCK_DEVICE + default: // unknown + return wasip1.FILETYPE_UNKNOWN + } +} + +func writeFilestat(buf []byte, st *sysapi.Stat_t, ftype uint8) (errno experimentalsys.Errno) { + le.PutUint64(buf, st.Dev) + le.PutUint64(buf[8:], st.Ino) + le.PutUint64(buf[16:], uint64(ftype)) + le.PutUint64(buf[24:], st.Nlink) + le.PutUint64(buf[32:], uint64(st.Size)) + le.PutUint64(buf[40:], uint64(st.Atim)) + le.PutUint64(buf[48:], uint64(st.Mtim)) + le.PutUint64(buf[56:], uint64(st.Ctim)) + return +} + +// fdFilestatSetSize is the WASI function named FdFilestatSetSizeName which +// adjusts the size of an open file. +// +// See https://github.com/WebAssembly/WASI/blob/snapshot-01/phases/snapshot/docs.md#-fd_filestat_set_sizefd-fd-size-filesize---errno +var fdFilestatSetSize = newHostFunc(wasip1.FdFilestatSetSizeName, fdFilestatSetSizeFn, []wasm.ValueType{i32, i64}, "fd", "size") + +func fdFilestatSetSizeFn(_ context.Context, mod api.Module, params []uint64) experimentalsys.Errno { + fd := int32(params[0]) + size := int64(params[1]) + + fsc := mod.(*wasm.ModuleInstance).Sys.FS() + + // Check to see if the file descriptor is available + if f, ok := fsc.LookupFile(fd); !ok { + return experimentalsys.EBADF + } else { + return f.File.Truncate(size) + } +} + +// fdFilestatSetTimes is the WASI function named functionFdFilestatSetTimes +// which adjusts the times of an open file. +// +// See https://github.com/WebAssembly/WASI/blob/snapshot-01/phases/snapshot/docs.md#-fd_filestat_set_timesfd-fd-atim-timestamp-mtim-timestamp-fst_flags-fstflags---errno +var fdFilestatSetTimes = newHostFunc( + wasip1.FdFilestatSetTimesName, fdFilestatSetTimesFn, + []wasm.ValueType{i32, i64, i64, i32}, + "fd", "atim", "mtim", "fst_flags", +) + +func fdFilestatSetTimesFn(_ context.Context, mod api.Module, params []uint64) experimentalsys.Errno { + fd := int32(params[0]) + atim := int64(params[1]) + mtim := int64(params[2]) + fstFlags := uint16(params[3]) + + sys := mod.(*wasm.ModuleInstance).Sys + fsc := sys.FS() + + f, ok := fsc.LookupFile(fd) + if !ok { + return experimentalsys.EBADF + } + + atim, mtim, errno := toTimes(sys.WalltimeNanos, atim, mtim, fstFlags) + if errno != 0 { + return errno + } + + // Try to update the file timestamps by file-descriptor. + errno = f.File.Utimens(atim, mtim) + + // Fall back to path based, despite it being less precise. + switch errno { + case experimentalsys.EPERM, experimentalsys.ENOSYS: + errno = f.FS.Utimens(f.Name, atim, mtim) + } + + return errno +} + +func toTimes(walltime func() int64, atim, mtim int64, fstFlags uint16) (int64, int64, experimentalsys.Errno) { + // times[0] == atim, times[1] == mtim + + var nowTim int64 + + // coerce atim into a timespec + if set, now := fstFlags&wasip1.FstflagsAtim != 0, fstFlags&wasip1.FstflagsAtimNow != 0; set && now { + return 0, 0, experimentalsys.EINVAL + } else if set { + // atim is already correct + } else if now { + nowTim = walltime() + atim = nowTim + } else { + atim = experimentalsys.UTIME_OMIT + } + + // coerce mtim into a timespec + if set, now := fstFlags&wasip1.FstflagsMtim != 0, fstFlags&wasip1.FstflagsMtimNow != 0; set && now { + return 0, 0, experimentalsys.EINVAL + } else if set { + // mtim is already correct + } else if now { + if nowTim != 0 { + mtim = nowTim + } else { + mtim = walltime() + } + } else { + mtim = experimentalsys.UTIME_OMIT + } + return atim, mtim, 0 +} + +// fdPread is the WASI function named FdPreadName which reads from a file +// descriptor, without using and updating the file descriptor's offset. +// +// Except for handling offset, this implementation is identical to fdRead. +// +// See https://github.com/WebAssembly/WASI/blob/snapshot-01/phases/snapshot/docs.md#-fd_preadfd-fd-iovs-iovec_array-offset-filesize---errno-size +var fdPread = newHostFunc( + wasip1.FdPreadName, fdPreadFn, + []api.ValueType{i32, i32, i32, i64, i32}, + "fd", "iovs", "iovs_len", "offset", "result.nread", +) + +func fdPreadFn(_ context.Context, mod api.Module, params []uint64) experimentalsys.Errno { + return fdReadOrPread(mod, params, true) +} + +// fdPrestatGet is the WASI function named FdPrestatGetName which returns +// the prestat data of a file descriptor. +// +// # Parameters +// +// - fd: file descriptor to get the prestat +// - resultPrestat: offset to write the result prestat data +// +// Result (Errno) +// +// The return value is 0 except the following error conditions: +// - sys.EBADF: `fd` is invalid or the `fd` is not a pre-opened directory +// - sys.EFAULT: `resultPrestat` points to an offset out of memory +// +// prestat byte layout is 8 bytes, beginning with an 8-bit tag and 3 pad bytes. +// The only valid tag is `prestat_dir`, which is tag zero. This simplifies the +// byte layout to 4 empty bytes followed by the uint32le encoded path length. +// +// For example, the directory name corresponding with `fd` was "/tmp" and +// parameter resultPrestat=1, this function writes the below to api.Memory: +// +// padding uint32le +// uint8 --+ +-----+ +--------+ +// | | | | | +// []byte{?, 0, 0, 0, 0, 4, 0, 0, 0, ?} +// resultPrestat --^ ^ +// tag --+ | +// +-- size in bytes of the string "/tmp" +// +// See fdPrestatDirName and +// https://github.com/WebAssembly/WASI/blob/snapshot-01/phases/snapshot/docs.md#prestat +var fdPrestatGet = newHostFunc(wasip1.FdPrestatGetName, fdPrestatGetFn, []api.ValueType{i32, i32}, "fd", "result.prestat") + +func fdPrestatGetFn(_ context.Context, mod api.Module, params []uint64) experimentalsys.Errno { + fsc := mod.(*wasm.ModuleInstance).Sys.FS() + fd, resultPrestat := int32(params[0]), uint32(params[1]) + + name, errno := preopenPath(fsc, fd) + if errno != 0 { + return errno + } + + // Upper 32-bits are zero because... + // * Zero-value 8-bit tag, and 3-byte zero-value padding + prestat := uint64(len(name) << 32) + if !mod.Memory().WriteUint64Le(resultPrestat, prestat) { + return experimentalsys.EFAULT + } + return 0 +} + +// fdPrestatDirName is the WASI function named FdPrestatDirNameName which +// returns the path of the pre-opened directory of a file descriptor. +// +// # Parameters +// +// - fd: file descriptor to get the path of the pre-opened directory +// - path: offset in api.Memory to write the result path +// - pathLen: count of bytes to write to `path` +// - This should match the uint32le fdPrestatGet writes to offset +// `resultPrestat`+4 +// +// Result (Errno) +// +// The return value is 0 except the following error conditions: +// - sys.EBADF: `fd` is invalid +// - sys.EFAULT: `path` points to an offset out of memory +// - sys.ENAMETOOLONG: `pathLen` is longer than the actual length of the result +// +// For example, the directory name corresponding with `fd` was "/tmp" and +// # Parameters path=1 pathLen=4 (correct), this function will write the below to +// api.Memory: +// +// pathLen +// +--------------+ +// | | +// []byte{?, '/', 't', 'm', 'p', ?} +// path --^ +// +// See fdPrestatGet +// See https://github.com/WebAssembly/WASI/blob/snapshot-01/phases/snapshot/docs.md#fd_prestat_dir_name +var fdPrestatDirName = newHostFunc( + wasip1.FdPrestatDirNameName, fdPrestatDirNameFn, + []api.ValueType{i32, i32, i32}, + "fd", "result.path", "result.path_len", +) + +func fdPrestatDirNameFn(_ context.Context, mod api.Module, params []uint64) experimentalsys.Errno { + fsc := mod.(*wasm.ModuleInstance).Sys.FS() + fd, path, pathLen := int32(params[0]), uint32(params[1]), uint32(params[2]) + + name, errno := preopenPath(fsc, fd) + if errno != 0 { + return errno + } + + // Some runtimes may have another semantics. See /RATIONALE.md + if uint32(len(name)) < pathLen { + return experimentalsys.ENAMETOOLONG + } + + if !mod.Memory().Write(path, []byte(name)[:pathLen]) { + return experimentalsys.EFAULT + } + return 0 +} + +// fdPwrite is the WASI function named FdPwriteName which writes to a file +// descriptor, without using and updating the file descriptor's offset. +// +// Except for handling offset, this implementation is identical to fdWrite. +// +// See https://github.com/WebAssembly/WASI/blob/snapshot-01/phases/snapshot/docs.md#-fd_pwritefd-fd-iovs-ciovec_array-offset-filesize---errno-size +var fdPwrite = newHostFunc( + wasip1.FdPwriteName, fdPwriteFn, + []api.ValueType{i32, i32, i32, i64, i32}, + "fd", "iovs", "iovs_len", "offset", "result.nwritten", +) + +func fdPwriteFn(_ context.Context, mod api.Module, params []uint64) experimentalsys.Errno { + return fdWriteOrPwrite(mod, params, true) +} + +// fdRead is the WASI function named FdReadName which reads from a file +// descriptor. +// +// # Parameters +// +// - fd: an opened file descriptor to read data from +// - iovs: offset in api.Memory to read offset, size pairs representing where +// to write file data +// - Both offset and length are encoded as uint32le +// - iovsCount: count of memory offset, size pairs to read sequentially +// starting at iovs +// - resultNread: offset in api.Memory to write the number of bytes read +// +// Result (Errno) +// +// The return value is 0 except the following error conditions: +// - sys.EBADF: `fd` is invalid +// - sys.EFAULT: `iovs` or `resultNread` point to an offset out of memory +// - sys.EIO: a file system error +// +// For example, this function needs to first read `iovs` to determine where +// to write contents. If parameters iovs=1 iovsCount=2, this function reads two +// offset/length pairs from api.Memory: +// +// iovs[0] iovs[1] +// +---------------------+ +--------------------+ +// | uint32le uint32le| |uint32le uint32le| +// +---------+ +--------+ +--------+ +--------+ +// | | | | | | | | +// []byte{?, 18, 0, 0, 0, 4, 0, 0, 0, 23, 0, 0, 0, 2, 0, 0, 0, ?... } +// iovs --^ ^ ^ ^ +// | | | | +// offset --+ length --+ offset --+ length --+ +// +// If the contents of the `fd` parameter was "wazero" (6 bytes) and parameter +// resultNread=26, this function writes the below to api.Memory: +// +// iovs[0].length iovs[1].length +// +--------------+ +----+ uint32le +// | | | | +--------+ +// []byte{ 0..16, ?, 'w', 'a', 'z', 'e', ?, 'r', 'o', ?, 6, 0, 0, 0 } +// iovs[0].offset --^ ^ ^ +// iovs[1].offset --+ | +// resultNread --+ +// +// Note: This is similar to `readv` in POSIX. https://linux.die.net/man/3/readv +// +// See fdWrite +// and https://github.com/WebAssembly/WASI/blob/snapshot-01/phases/snapshot/docs.md#fd_read +var fdRead = newHostFunc( + wasip1.FdReadName, fdReadFn, + []api.ValueType{i32, i32, i32, i32}, + "fd", "iovs", "iovs_len", "result.nread", +) + +// preader tracks an offset across multiple reads. +type preader struct { + f experimentalsys.File + offset int64 +} + +// Read implements the same function as documented on sys.File. +func (w *preader) Read(buf []byte) (n int, errno experimentalsys.Errno) { + if len(buf) == 0 { + return 0, 0 // less overhead on zero-length reads. + } + + n, err := w.f.Pread(buf, w.offset) + w.offset += int64(n) + return n, err +} + +func fdReadFn(_ context.Context, mod api.Module, params []uint64) experimentalsys.Errno { + return fdReadOrPread(mod, params, false) +} + +func fdReadOrPread(mod api.Module, params []uint64, isPread bool) experimentalsys.Errno { + mem := mod.Memory() + fsc := mod.(*wasm.ModuleInstance).Sys.FS() + + fd := int32(params[0]) + iovs := uint32(params[1]) + iovsCount := uint32(params[2]) + + var resultNread uint32 + var reader func(buf []byte) (n int, errno experimentalsys.Errno) + if f, ok := fsc.LookupFile(fd); !ok { + return experimentalsys.EBADF + } else if isPread { + offset := int64(params[3]) + reader = (&preader{f: f.File, offset: offset}).Read + resultNread = uint32(params[4]) + } else { + reader = f.File.Read + resultNread = uint32(params[3]) + } + + nread, errno := readv(mem, iovs, iovsCount, reader) + if errno != 0 { + return errno + } + if !mem.WriteUint32Le(resultNread, nread) { + return experimentalsys.EFAULT + } else { + return 0 + } +} + +func readv(mem api.Memory, iovs uint32, iovsCount uint32, reader func(buf []byte) (nread int, errno experimentalsys.Errno)) (uint32, experimentalsys.Errno) { + var nread uint32 + iovsStop := iovsCount << 3 // iovsCount * 8 + iovsBuf, ok := mem.Read(iovs, iovsStop) + if !ok { + return 0, experimentalsys.EFAULT + } + + for iovsPos := uint32(0); iovsPos < iovsStop; iovsPos += 8 { + offset := le.Uint32(iovsBuf[iovsPos:]) + l := le.Uint32(iovsBuf[iovsPos+4:]) + + if l == 0 { // A zero length iovec could be ahead of another. + continue + } + + b, ok := mem.Read(offset, l) + if !ok { + return 0, experimentalsys.EFAULT + } + + n, errno := reader(b) + nread += uint32(n) + + if errno == experimentalsys.ENOSYS { + return 0, experimentalsys.EBADF // e.g. unimplemented for read + } else if errno != 0 { + return 0, errno + } else if n < int(l) { + break // stop when we read less than capacity. + } + } + return nread, 0 +} + +// fdReaddir is the WASI function named wasip1.FdReaddirName which reads +// directory entries from a directory. Special behaviors required by this +// function are implemented in sys.DirentCache. +// +// See https://github.com/WebAssembly/WASI/blob/snapshot-01/phases/snapshot/docs.md#-fd_readdirfd-fd-buf-pointeru8-buf_len-size-cookie-dircookie---errno-size +// +// # Result (Errno) +// +// The return value is 0 except the following known error conditions: +// - sys.ENOSYS: the implementation does not support this function. +// - sys.EBADF: the file was closed or not a directory. +// - sys.EFAULT: `buf` or `buf_len` point to an offset out of memory. +// - sys.ENOENT: `cookie` was invalid. +// - sys.EINVAL: `buf_len` was not large enough to write a dirent header. +// +// # End of Directory (EOF) +// +// More entries are available when `result.bufused` == `buf_len`. See +// https://github.com/WebAssembly/WASI/blob/snapshot-01/phases/snapshot/docs.md#fd_readdir +// https://github.com/WebAssembly/wasi-libc/blob/659ff414560721b1660a19685110e484a081c3d4/libc-bottom-half/cloudlibc/src/libc/dirent/readdir.c#L44 +var fdReaddir = newHostFunc( + wasip1.FdReaddirName, fdReaddirFn, + []wasm.ValueType{i32, i32, i32, i64, i32}, + "fd", "buf", "buf_len", "cookie", "result.bufused", +) + +func fdReaddirFn(_ context.Context, mod api.Module, params []uint64) experimentalsys.Errno { + mem := mod.Memory() + fsc := mod.(*wasm.ModuleInstance).Sys.FS() + + fd := int32(params[0]) + buf := uint32(params[1]) + bufLen := uint32(params[2]) + cookie := params[3] + resultBufused := uint32(params[4]) + + // The bufLen must be enough to write a dirent header. + if bufLen < wasip1.DirentSize { + // This is a bug in the caller, as unless `buf_len` is large enough to + // write a dirent, it can't read the `d_namlen` from it. + return experimentalsys.EINVAL + } + + // Get or open a dirent cache for this file descriptor. + dir, errno := direntCache(fsc, fd) + if errno != 0 { + return errno + } + + // First, determine the maximum directory entries that can be encoded as + // dirents. The total size is DirentSize(24) + nameSize, for each file. + // Since a zero-length file name is invalid, the minimum size entry is + // 25 (DirentSize + 1 character). + maxDirEntries := bufLen/wasip1.DirentSize + 1 + + // While unlikely maxDirEntries will fit into bufLen, add one more just in + // case, as we need to know if we hit the end of the directory or not to + // write the correct bufused (e.g. == bufLen unless EOF). + // >> If less than the size of the read buffer, the end of the + // >> directory has been reached. + maxDirEntries += 1 + + // Read up to max entries. The underlying implementation will cache these, + // starting at the current location, so that they can be re-read. This is + // important because even the first could end up larger than bufLen due to + // the size of its name. + dirents, errno := dir.Read(cookie, maxDirEntries) + if errno != 0 { + return errno + } + + // Determine how many dirents we can write, including a potentially + // truncated last entry. + bufToWrite, direntCount, truncatedLen := maxDirents(dirents, bufLen) + + // Now, write entries to the underlying buffer. + if bufToWrite > 0 { + + // d_next is the index of the next file in the list, so it should + // always be one higher than the requested cookie. + d_next := cookie + 1 + // ^^ yes this can overflow to negative, which means our implementation + // doesn't support writing greater than max int64 entries. + + buf, ok := mem.Read(buf, bufToWrite) + if !ok { + return experimentalsys.EFAULT + } + + writeDirents(buf, dirents, d_next, direntCount, truncatedLen) + } + + // bufused == bufLen means more dirents exist, which is the case when one + // is truncated. + bufused := bufToWrite + if truncatedLen > 0 { + bufused = bufLen + } + + if !mem.WriteUint32Le(resultBufused, bufused) { + return experimentalsys.EFAULT + } + return 0 +} + +const largestDirent = int64(math.MaxUint32 - wasip1.DirentSize) + +// maxDirents returns the dirents to write. +// +// `bufToWrite` is the amount of memory needed to write direntCount, which +// includes up to wasip1.DirentSize of a last truncated entry. +func maxDirents(dirents []experimentalsys.Dirent, bufLen uint32) (bufToWrite uint32, direntCount int, truncatedLen uint32) { + lenRemaining := bufLen + for i := range dirents { + if lenRemaining == 0 { + break + } + d := dirents[i] + direntCount++ + + // use int64 to guard against huge filenames + nameLen := int64(len(d.Name)) + var entryLen uint32 + + // Check to see if DirentSize + nameLen overflows, or if it would be + // larger than possible to encode. + if el := int64(wasip1.DirentSize) + nameLen; el < 0 || el > largestDirent { + // panic, as testing is difficult. ex we would have to extract a + // function to get size of a string or allocate a 2^32 size one! + panic("invalid filename: too large") + } else { // we know this can fit into a uint32 + entryLen = uint32(el) + } + + if entryLen > lenRemaining { + // We haven't room to write the entry, and docs say to write the + // header. This helps especially when there is an entry with a very + // long filename. Ex if bufLen is 4096 and the filename is 4096, + // we need to write DirentSize(24) + 4096 bytes to write the entry. + // In this case, we only write up to DirentSize(24) to allow the + // caller to resize. + if lenRemaining >= wasip1.DirentSize { + truncatedLen = wasip1.DirentSize + } else { + truncatedLen = lenRemaining + } + bufToWrite += truncatedLen + break + } + + // This won't go negative because we checked entryLen <= lenRemaining. + lenRemaining -= entryLen + bufToWrite += entryLen + } + return +} + +// writeDirents writes the directory entries to the buffer, which is pre-sized +// based on maxDirents. truncatedEntryLen means the last is written without its +// name. +func writeDirents(buf []byte, dirents []experimentalsys.Dirent, d_next uint64, direntCount int, truncatedLen uint32) { + pos := uint32(0) + skipNameI := -1 + + // If the last entry was truncated, we either skip it or write it without + // its name, depending on the length. + if truncatedLen > 0 { + if truncatedLen < wasip1.DirentSize { + direntCount-- // skip as too small to write the header. + } else { + skipNameI = direntCount - 1 // write the header, but not the name. + } + } + + for i := 0; i < direntCount; i++ { + e := dirents[i] + nameLen := uint32(len(e.Name)) + writeDirent(buf[pos:], d_next, e.Ino, nameLen, e.Type) + d_next++ + pos += wasip1.DirentSize + + if i != skipNameI { + copy(buf[pos:], e.Name) + pos += nameLen + } + } +} + +// writeDirent writes DirentSize bytes +func writeDirent(buf []byte, dNext uint64, ino sysapi.Inode, dNamlen uint32, dType fs.FileMode) { + le.PutUint64(buf, dNext) // d_next + le.PutUint64(buf[8:], ino) // d_ino + le.PutUint32(buf[16:], dNamlen) // d_namlen + filetype := getWasiFiletype(dType) + le.PutUint32(buf[20:], uint32(filetype)) // d_type +} + +// direntCache lazy opens a sys.DirentCache for this directory or returns an +// error. +func direntCache(fsc *sys.FSContext, fd int32) (*sys.DirentCache, experimentalsys.Errno) { + if f, ok := fsc.LookupFile(fd); !ok { + return nil, experimentalsys.EBADF + } else if dir, errno := f.DirentCache(); errno == 0 { + return dir, 0 + } else if errno == experimentalsys.ENOTDIR { + // fd_readdir docs don't indicate whether to return sys.ENOTDIR or + // sys.EBADF. It has been noticed that rust will crash on sys.ENOTDIR, + // and POSIX C ref seems to not return this, so we don't either. + // + // See https://github.com/WebAssembly/WASI/blob/snapshot-01/phases/snapshot/docs.md#fd_readdir + // and https://en.wikibooks.org/wiki/C_Programming/POSIX_Reference/dirent.h + return nil, experimentalsys.EBADF + } else { + return nil, errno + } +} + +// fdRenumber is the WASI function named FdRenumberName which atomically +// replaces a file descriptor by renumbering another file descriptor. +// +// See https://github.com/WebAssembly/WASI/blob/snapshot-01/phases/snapshot/docs.md#-fd_renumberfd-fd-to-fd---errno +var fdRenumber = newHostFunc(wasip1.FdRenumberName, fdRenumberFn, []wasm.ValueType{i32, i32}, "fd", "to") + +func fdRenumberFn(_ context.Context, mod api.Module, params []uint64) experimentalsys.Errno { + fsc := mod.(*wasm.ModuleInstance).Sys.FS() + + from := int32(params[0]) + to := int32(params[1]) + + if errno := fsc.Renumber(from, to); errno != 0 { + return errno + } + return 0 +} + +// fdSeek is the WASI function named FdSeekName which moves the offset of a +// file descriptor. +// +// # Parameters +// +// - fd: file descriptor to move the offset of +// - offset: signed int64, which is encoded as uint64, input argument to +// `whence`, which results in a new offset +// - whence: operator that creates the new offset, given `offset` bytes +// - If io.SeekStart, new offset == `offset`. +// - If io.SeekCurrent, new offset == existing offset + `offset`. +// - If io.SeekEnd, new offset == file size of `fd` + `offset`. +// - resultNewoffset: offset in api.Memory to write the new offset to, +// relative to start of the file +// +// Result (Errno) +// +// The return value is 0 except the following error conditions: +// - sys.EBADF: `fd` is invalid +// - sys.EFAULT: `resultNewoffset` points to an offset out of memory +// - sys.EINVAL: `whence` is an invalid value +// - sys.EIO: a file system error +// - sys.EISDIR: the file was a directory. +// +// For example, if fd 3 is a file with offset 0, and parameters fd=3, offset=4, +// whence=0 (=io.SeekStart), resultNewOffset=1, this function writes the below +// to api.Memory: +// +// uint64le +// +--------------------+ +// | | +// []byte{?, 4, 0, 0, 0, 0, 0, 0, 0, ? } +// resultNewoffset --^ +// +// Note: This is similar to `lseek` in POSIX. https://linux.die.net/man/3/lseek +// +// See io.Seeker +// and https://github.com/WebAssembly/WASI/blob/snapshot-01/phases/snapshot/docs.md#fd_seek +var fdSeek = newHostFunc( + wasip1.FdSeekName, fdSeekFn, + []api.ValueType{i32, i64, i32, i32}, + "fd", "offset", "whence", "result.newoffset", +) + +func fdSeekFn(_ context.Context, mod api.Module, params []uint64) experimentalsys.Errno { + fsc := mod.(*wasm.ModuleInstance).Sys.FS() + fd := int32(params[0]) + offset := params[1] + whence := uint32(params[2]) + resultNewoffset := uint32(params[3]) + + if f, ok := fsc.LookupFile(fd); !ok { + return experimentalsys.EBADF + } else if isDir, _ := f.File.IsDir(); isDir { + return experimentalsys.EISDIR // POSIX doesn't forbid seeking a directory, but wasi-testsuite does. + } else if newOffset, errno := f.File.Seek(int64(offset), int(whence)); errno != 0 { + return errno + } else if !mod.Memory().WriteUint64Le(resultNewoffset, uint64(newOffset)) { + return experimentalsys.EFAULT + } + return 0 +} + +// fdSync is the WASI function named FdSyncName which synchronizes the data +// and metadata of a file to disk. +// +// See https://github.com/WebAssembly/WASI/blob/snapshot-01/phases/snapshot/docs.md#-fd_syncfd-fd---errno +var fdSync = newHostFunc(wasip1.FdSyncName, fdSyncFn, []api.ValueType{i32}, "fd") + +func fdSyncFn(_ context.Context, mod api.Module, params []uint64) experimentalsys.Errno { + fsc := mod.(*wasm.ModuleInstance).Sys.FS() + fd := int32(params[0]) + + // Check to see if the file descriptor is available + if f, ok := fsc.LookupFile(fd); !ok { + return experimentalsys.EBADF + } else { + return f.File.Sync() + } +} + +// fdTell is the WASI function named FdTellName which returns the current +// offset of a file descriptor. +// +// See https://github.com/WebAssembly/WASI/blob/snapshot-01/phases/snapshot/docs.md#-fd_tellfd-fd---errno-filesize +var fdTell = newHostFunc(wasip1.FdTellName, fdTellFn, []api.ValueType{i32, i32}, "fd", "result.offset") + +func fdTellFn(ctx context.Context, mod api.Module, params []uint64) experimentalsys.Errno { + fd := params[0] + offset := uint64(0) + whence := uint64(io.SeekCurrent) + resultNewoffset := params[1] + + fdSeekParams := []uint64{fd, offset, whence, resultNewoffset} + return fdSeekFn(ctx, mod, fdSeekParams) +} + +// fdWrite is the WASI function named FdWriteName which writes to a file +// descriptor. +// +// # Parameters +// +// - fd: an opened file descriptor to write data to +// - iovs: offset in api.Memory to read offset, size pairs representing the +// data to write to `fd` +// - Both offset and length are encoded as uint32le. +// - iovsCount: count of memory offset, size pairs to read sequentially +// starting at iovs +// - resultNwritten: offset in api.Memory to write the number of bytes +// written +// +// Result (Errno) +// +// The return value is 0 except the following error conditions: +// - sys.EBADF: `fd` is invalid +// - sys.EFAULT: `iovs` or `resultNwritten` point to an offset out of memory +// - sys.EIO: a file system error +// +// For example, this function needs to first read `iovs` to determine what to +// write to `fd`. If parameters iovs=1 iovsCount=2, this function reads two +// offset/length pairs from api.Memory: +// +// iovs[0] iovs[1] +// +---------------------+ +--------------------+ +// | uint32le uint32le| |uint32le uint32le| +// +---------+ +--------+ +--------+ +--------+ +// | | | | | | | | +// []byte{?, 18, 0, 0, 0, 4, 0, 0, 0, 23, 0, 0, 0, 2, 0, 0, 0, ?... } +// iovs --^ ^ ^ ^ +// | | | | +// offset --+ length --+ offset --+ length --+ +// +// This function reads those chunks api.Memory into the `fd` sequentially. +// +// iovs[0].length iovs[1].length +// +--------------+ +----+ +// | | | | +// []byte{ 0..16, ?, 'w', 'a', 'z', 'e', ?, 'r', 'o', ? } +// iovs[0].offset --^ ^ +// iovs[1].offset --+ +// +// Since "wazero" was written, if parameter resultNwritten=26, this function +// writes the below to api.Memory: +// +// uint32le +// +--------+ +// | | +// []byte{ 0..24, ?, 6, 0, 0, 0', ? } +// resultNwritten --^ +// +// Note: This is similar to `writev` in POSIX. https://linux.die.net/man/3/writev +// +// See fdRead +// https://github.com/WebAssembly/WASI/blob/snapshot-01/phases/snapshot/docs.md#ciovec +// and https://github.com/WebAssembly/WASI/blob/snapshot-01/phases/snapshot/docs.md#fd_write +var fdWrite = newHostFunc( + wasip1.FdWriteName, fdWriteFn, + []api.ValueType{i32, i32, i32, i32}, + "fd", "iovs", "iovs_len", "result.nwritten", +) + +func fdWriteFn(_ context.Context, mod api.Module, params []uint64) experimentalsys.Errno { + return fdWriteOrPwrite(mod, params, false) +} + +// pwriter tracks an offset across multiple writes. +type pwriter struct { + f experimentalsys.File + offset int64 +} + +// Write implements the same function as documented on sys.File. +func (w *pwriter) Write(buf []byte) (n int, errno experimentalsys.Errno) { + if len(buf) == 0 { + return 0, 0 // less overhead on zero-length writes. + } + + n, err := w.f.Pwrite(buf, w.offset) + w.offset += int64(n) + return n, err +} + +func fdWriteOrPwrite(mod api.Module, params []uint64, isPwrite bool) experimentalsys.Errno { + mem := mod.Memory() + fsc := mod.(*wasm.ModuleInstance).Sys.FS() + + fd := int32(params[0]) + iovs := uint32(params[1]) + iovsCount := uint32(params[2]) + + var resultNwritten uint32 + var writer func(buf []byte) (n int, errno experimentalsys.Errno) + if f, ok := fsc.LookupFile(fd); !ok { + return experimentalsys.EBADF + } else if isPwrite { + offset := int64(params[3]) + writer = (&pwriter{f: f.File, offset: offset}).Write + resultNwritten = uint32(params[4]) + } else { + writer = f.File.Write + resultNwritten = uint32(params[3]) + } + + nwritten, errno := writev(mem, iovs, iovsCount, writer) + if errno != 0 { + return errno + } + + if !mod.Memory().WriteUint32Le(resultNwritten, nwritten) { + return experimentalsys.EFAULT + } + return 0 +} + +func writev(mem api.Memory, iovs uint32, iovsCount uint32, writer func(buf []byte) (n int, errno experimentalsys.Errno)) (uint32, experimentalsys.Errno) { + var nwritten uint32 + iovsStop := iovsCount << 3 // iovsCount * 8 + iovsBuf, ok := mem.Read(iovs, iovsStop) + if !ok { + return 0, experimentalsys.EFAULT + } + + for iovsPos := uint32(0); iovsPos < iovsStop; iovsPos += 8 { + offset := le.Uint32(iovsBuf[iovsPos:]) + l := le.Uint32(iovsBuf[iovsPos+4:]) + + b, ok := mem.Read(offset, l) + if !ok { + return 0, experimentalsys.EFAULT + } + n, errno := writer(b) + nwritten += uint32(n) + if errno == experimentalsys.ENOSYS { + return 0, experimentalsys.EBADF // e.g. unimplemented for write + } else if errno != 0 { + return 0, errno + } + } + return nwritten, 0 +} + +// pathCreateDirectory is the WASI function named PathCreateDirectoryName which +// creates a directory. +// +// # Parameters +// +// - fd: file descriptor of a directory that `path` is relative to +// - path: offset in api.Memory to read the path string from +// - pathLen: length of `path` +// +// # Result (Errno) +// +// The return value is 0 except the following error conditions: +// - sys.EBADF: `fd` is invalid +// - sys.ENOENT: `path` does not exist. +// - sys.ENOTDIR: `path` is a file +// +// # Notes +// - This is similar to mkdirat in POSIX. +// See https://linux.die.net/man/2/mkdirat +// +// See https://github.com/WebAssembly/WASI/blob/snapshot-01/phases/snapshot/docs.md#-path_create_directoryfd-fd-path-string---errno +var pathCreateDirectory = newHostFunc( + wasip1.PathCreateDirectoryName, pathCreateDirectoryFn, + []wasm.ValueType{i32, i32, i32}, + "fd", "path", "path_len", +) + +func pathCreateDirectoryFn(_ context.Context, mod api.Module, params []uint64) experimentalsys.Errno { + fsc := mod.(*wasm.ModuleInstance).Sys.FS() + + fd := int32(params[0]) + path := uint32(params[1]) + pathLen := uint32(params[2]) + + preopen, pathName, errno := atPath(fsc, mod.Memory(), fd, path, pathLen) + if errno != 0 { + return errno + } + + if errno = preopen.Mkdir(pathName, 0o700); errno != 0 { + return errno + } + + return 0 +} + +// pathFilestatGet is the WASI function named PathFilestatGetName which +// returns the stat attributes of a file or directory. +// +// # Parameters +// +// - fd: file descriptor of the folder to look in for the path +// - flags: flags determining the method of how paths are resolved +// - path: path under fd to get the filestat attributes data for +// - path_len: length of the path that was given +// - resultFilestat: offset to write the result filestat data +// +// Result (Errno) +// +// The return value is 0 except the following error conditions: +// - sys.EBADF: `fd` is invalid +// - sys.ENOTDIR: `fd` points to a file not a directory +// - sys.EIO: could not stat `fd` on filesystem +// - sys.EINVAL: the path contained "../" +// - sys.ENAMETOOLONG: `path` + `path_len` is out of memory +// - sys.EFAULT: `resultFilestat` points to an offset out of memory +// - sys.ENOENT: could not find the path +// +// The rest of this implementation matches that of fdFilestatGet, so is not +// repeated here. +// +// Note: This is similar to `fstatat` in POSIX. +// See https://github.com/WebAssembly/WASI/blob/snapshot-01/phases/snapshot/docs.md#-path_filestat_getfd-fd-flags-lookupflags-path-string---errno-filestat +// and https://linux.die.net/man/2/fstatat +var pathFilestatGet = newHostFunc( + wasip1.PathFilestatGetName, pathFilestatGetFn, + []api.ValueType{i32, i32, i32, i32, i32}, + "fd", "flags", "path", "path_len", "result.filestat", +) + +func pathFilestatGetFn(_ context.Context, mod api.Module, params []uint64) experimentalsys.Errno { + fsc := mod.(*wasm.ModuleInstance).Sys.FS() + + fd := int32(params[0]) + flags := uint16(params[1]) + path := uint32(params[2]) + pathLen := uint32(params[3]) + + preopen, pathName, errno := atPath(fsc, mod.Memory(), fd, path, pathLen) + if errno != 0 { + return errno + } + + // Stat the file without allocating a file descriptor. + var st sysapi.Stat_t + + if (flags & wasip1.LOOKUP_SYMLINK_FOLLOW) == 0 { + st, errno = preopen.Lstat(pathName) + } else { + st, errno = preopen.Stat(pathName) + } + if errno != 0 { + return errno + } + + // Write the stat result to memory + resultBuf := uint32(params[4]) + buf, ok := mod.Memory().Read(resultBuf, 64) + if !ok { + return experimentalsys.EFAULT + } + + filetype := getWasiFiletype(st.Mode) + return writeFilestat(buf, &st, filetype) +} + +// pathFilestatSetTimes is the WASI function named PathFilestatSetTimesName +// which adjusts the timestamps of a file or directory. +// +// See https://github.com/WebAssembly/WASI/blob/snapshot-01/phases/snapshot/docs.md#-path_filestat_set_timesfd-fd-flags-lookupflags-path-string-atim-timestamp-mtim-timestamp-fst_flags-fstflags---errno +var pathFilestatSetTimes = newHostFunc( + wasip1.PathFilestatSetTimesName, pathFilestatSetTimesFn, + []wasm.ValueType{i32, i32, i32, i32, i64, i64, i32}, + "fd", "flags", "path", "path_len", "atim", "mtim", "fst_flags", +) + +func pathFilestatSetTimesFn(_ context.Context, mod api.Module, params []uint64) experimentalsys.Errno { + fd := int32(params[0]) + flags := uint16(params[1]) + path := uint32(params[2]) + pathLen := uint32(params[3]) + atim := int64(params[4]) + mtim := int64(params[5]) + fstFlags := uint16(params[6]) + + sys := mod.(*wasm.ModuleInstance).Sys + fsc := sys.FS() + + atim, mtim, errno := toTimes(sys.WalltimeNanos, atim, mtim, fstFlags) + if errno != 0 { + return errno + } + + preopen, pathName, errno := atPath(fsc, mod.Memory(), fd, path, pathLen) + if errno != 0 { + return errno + } + + symlinkFollow := flags&wasip1.LOOKUP_SYMLINK_FOLLOW != 0 + if symlinkFollow { + return preopen.Utimens(pathName, atim, mtim) + } + // Otherwise, we need to emulate don't follow by opening the file by path. + if f, errno := preopen.OpenFile(pathName, experimentalsys.O_WRONLY, 0); errno != 0 { + return errno + } else { + defer f.Close() + return f.Utimens(atim, mtim) + } +} + +// pathLink is the WASI function named PathLinkName which adjusts the +// timestamps of a file or directory. +// +// See https://github.com/WebAssembly/WASI/blob/snapshot-01/phases/snapshot/docs.md#path_link +var pathLink = newHostFunc( + wasip1.PathLinkName, pathLinkFn, + []wasm.ValueType{i32, i32, i32, i32, i32, i32, i32}, + "old_fd", "old_flags", "old_path", "old_path_len", "new_fd", "new_path", "new_path_len", +) + +func pathLinkFn(_ context.Context, mod api.Module, params []uint64) experimentalsys.Errno { + mem := mod.Memory() + fsc := mod.(*wasm.ModuleInstance).Sys.FS() + + oldFD := int32(params[0]) + // TODO: use old_flags? + _ = uint32(params[1]) + oldPath := uint32(params[2]) + oldPathLen := uint32(params[3]) + + oldFS, oldName, errno := atPath(fsc, mem, oldFD, oldPath, oldPathLen) + if errno != 0 { + return errno + } + + newFD := int32(params[4]) + newPath := uint32(params[5]) + newPathLen := uint32(params[6]) + + newFS, newName, errno := atPath(fsc, mem, newFD, newPath, newPathLen) + if errno != 0 { + return errno + } + + if oldFS != newFS { // TODO: handle link across filesystems + return experimentalsys.ENOSYS + } + + return oldFS.Link(oldName, newName) +} + +// pathOpen is the WASI function named PathOpenName which opens a file or +// directory. This returns sys.EBADF if the fd is invalid. +// +// # Parameters +// +// - fd: file descriptor of a directory that `path` is relative to +// - dirflags: flags to indicate how to resolve `path` +// - path: offset in api.Memory to read the path string from +// - pathLen: length of `path` +// - oFlags: open flags to indicate the method by which to open the file +// - fsRightsBase: interpret RIGHT_FD_WRITE to set O_RDWR +// - fsRightsInheriting: ignored as rights were removed from WASI. +// created file descriptor for `path` +// - fdFlags: file descriptor flags +// - resultOpenedFD: offset in api.Memory to write the newly created file +// descriptor to. +// - The result FD value is guaranteed to be less than 2**31 +// +// Result (Errno) +// +// The return value is 0 except the following error conditions: +// - sys.EBADF: `fd` is invalid +// - sys.EFAULT: `resultOpenedFD` points to an offset out of memory +// - sys.ENOENT: `path` does not exist. +// - sys.EEXIST: `path` exists, while `oFlags` requires that it must not. +// - sys.ENOTDIR: `path` is not a directory, while `oFlags` requires it. +// - sys.EIO: a file system error +// +// For example, this function needs to first read `path` to determine the file +// to open. If parameters `path` = 1, `pathLen` = 6, and the path is "wazero", +// pathOpen reads the path from api.Memory: +// +// pathLen +// +------------------------+ +// | | +// []byte{ ?, 'w', 'a', 'z', 'e', 'r', 'o', ?... } +// path --^ +// +// Then, if parameters resultOpenedFD = 8, and this function opened a new file +// descriptor 5 with the given flags, this function writes the below to +// api.Memory: +// +// uint32le +// +--------+ +// | | +// []byte{ 0..6, ?, 5, 0, 0, 0, ?} +// resultOpenedFD --^ +// +// # Notes +// - This is similar to `openat` in POSIX. https://linux.die.net/man/3/openat +// - The returned file descriptor is not guaranteed to be the lowest-number +// +// See https://github.com/WebAssembly/WASI/blob/main/phases/snapshot/docs.md#path_open +var pathOpen = newHostFunc( + wasip1.PathOpenName, pathOpenFn, + []api.ValueType{i32, i32, i32, i32, i32, i64, i64, i32, i32}, + "fd", "dirflags", "path", "path_len", "oflags", "fs_rights_base", "fs_rights_inheriting", "fdflags", "result.opened_fd", +) + +func pathOpenFn(_ context.Context, mod api.Module, params []uint64) experimentalsys.Errno { + fsc := mod.(*wasm.ModuleInstance).Sys.FS() + + preopenFD := int32(params[0]) + + // TODO: dirflags is a lookupflags, and it only has one bit: symlink_follow + // https://github.com/WebAssembly/WASI/blob/snapshot-01/phases/snapshot/docs.md#lookupflags + dirflags := uint16(params[1]) + + path := uint32(params[2]) + pathLen := uint32(params[3]) + + oflags := uint16(params[4]) + + rights := uint32(params[5]) + // inherited rights aren't used + _ = params[6] + + fdflags := uint16(params[7]) + resultOpenedFD := uint32(params[8]) + + preopen, pathName, errno := atPath(fsc, mod.Memory(), preopenFD, path, pathLen) + if errno != 0 { + return errno + } + + if pathLen == 0 { + return experimentalsys.EINVAL + } + + fileOpenFlags := openFlags(dirflags, oflags, fdflags, rights) + isDir := fileOpenFlags&experimentalsys.O_DIRECTORY != 0 + + if isDir && oflags&wasip1.O_CREAT != 0 { + return experimentalsys.EINVAL // use pathCreateDirectory! + } + + newFD, errno := fsc.OpenFile(preopen, pathName, fileOpenFlags, 0o600) + if errno != 0 { + return errno + } + + // Check any flags that require the file to evaluate. + if isDir { + if f, ok := fsc.LookupFile(newFD); !ok { + return experimentalsys.EBADF // unexpected + } else if isDir, errno := f.File.IsDir(); errno != 0 { + _ = fsc.CloseFile(newFD) + return errno + } else if !isDir { + _ = fsc.CloseFile(newFD) + return experimentalsys.ENOTDIR + } + } + + if !mod.Memory().WriteUint32Le(resultOpenedFD, uint32(newFD)) { + _ = fsc.CloseFile(newFD) + return experimentalsys.EFAULT + } + return 0 +} + +// atPath returns the pre-open specific path after verifying it is a directory. +// +// # Notes +// +// Languages including Zig and Rust use only pre-opens for the FD because +// wasi-libc `__wasilibc_find_relpath` will only return a preopen. That said, +// our wasi.c example shows other languages act differently and can use a non +// pre-opened file descriptor. +// +// We don't handle `AT_FDCWD`, as that's resolved in the compiler. There's no +// working directory function in WASI, so most assume CWD is "/". Notably, Zig +// has different behavior which assumes it is whatever the first pre-open name +// is. +// +// See https://github.com/WebAssembly/wasi-libc/blob/659ff414560721b1660a19685110e484a081c3d4/libc-bottom-half/sources/at_fdcwd.c +// See https://linux.die.net/man/2/openat +func atPath(fsc *sys.FSContext, mem api.Memory, fd int32, p, pathLen uint32) (experimentalsys.FS, string, experimentalsys.Errno) { + b, ok := mem.Read(p, pathLen) + if !ok { + return nil, "", experimentalsys.EFAULT + } + pathName := string(b) + + // interesting_paths wants us to break on trailing slash if the input ends + // up a file, not a directory! + hasTrailingSlash := strings.HasSuffix(pathName, "/") + + // interesting_paths includes paths that include relative links but end up + // not escaping + pathName = path.Clean(pathName) + + // interesting_paths wants to break on root paths or anything that escapes. + // This part is the same as fs.FS.Open() + if !fs.ValidPath(pathName) { + return nil, "", experimentalsys.EPERM + } + + // add the trailing slash back + if hasTrailingSlash { + pathName = pathName + "/" + } + + if f, ok := fsc.LookupFile(fd); !ok { + return nil, "", experimentalsys.EBADF // closed or invalid + } else if isDir, errno := f.File.IsDir(); errno != 0 { + return nil, "", errno + } else if !isDir { + return nil, "", experimentalsys.ENOTDIR + } else if f.IsPreopen { // don't append the pre-open name + return f.FS, pathName, 0 + } else { + // Join via concat to avoid name conflict on path.Join + return f.FS, f.Name + "/" + pathName, 0 + } +} + +func preopenPath(fsc *sys.FSContext, fd int32) (string, experimentalsys.Errno) { + if f, ok := fsc.LookupFile(fd); !ok { + return "", experimentalsys.EBADF // closed + } else if !f.IsPreopen { + return "", experimentalsys.EBADF + } else if isDir, errno := f.File.IsDir(); errno != 0 || !isDir { + // In wasip1, only directories can be returned by fd_prestat_get as + // there are no prestat types defined for files or sockets. + return "", errno + } else { + return f.Name, 0 + } +} + +func openFlags(dirflags, oflags, fdflags uint16, rights uint32) (openFlags experimentalsys.Oflag) { + if dirflags&wasip1.LOOKUP_SYMLINK_FOLLOW == 0 { + openFlags |= experimentalsys.O_NOFOLLOW + } + if oflags&wasip1.O_DIRECTORY != 0 { + openFlags |= experimentalsys.O_DIRECTORY + } else if oflags&wasip1.O_EXCL != 0 { + openFlags |= experimentalsys.O_EXCL + } + // Because we don't implement rights, we partially rely on the open flags + // to determine the mode in which the file will be opened. This will create + // divergent behavior compared to WASI runtimes which have a more strict + // interpretation of the WASI capabilities model; for example, a program + // which sets O_CREAT but does not give read or write permissions will + // successfully create a file when running with wazero, but might get a + // permission denied error on other runtimes. + defaultMode := experimentalsys.O_RDONLY + if oflags&wasip1.O_TRUNC != 0 { + openFlags |= experimentalsys.O_TRUNC + defaultMode = experimentalsys.O_RDWR + } + if oflags&wasip1.O_CREAT != 0 { + openFlags |= experimentalsys.O_CREAT + defaultMode = experimentalsys.O_RDWR + } + if fdflags&wasip1.FD_NONBLOCK != 0 { + openFlags |= experimentalsys.O_NONBLOCK + } + if fdflags&wasip1.FD_APPEND != 0 { + openFlags |= experimentalsys.O_APPEND + defaultMode = experimentalsys.O_RDWR + } + if fdflags&wasip1.FD_DSYNC != 0 { + openFlags |= experimentalsys.O_DSYNC + } + if fdflags&wasip1.FD_RSYNC != 0 { + openFlags |= experimentalsys.O_RSYNC + } + if fdflags&wasip1.FD_SYNC != 0 { + openFlags |= experimentalsys.O_SYNC + } + + // Since rights were discontinued in wasi, we only interpret RIGHT_FD_WRITE + // because it is the only way to know that we need to set write permissions + // on a file if the application did not pass any of O_CREAT, O_APPEND, nor + // O_TRUNC. + const r = wasip1.RIGHT_FD_READ + const w = wasip1.RIGHT_FD_WRITE + const rw = r | w + switch { + case (rights & rw) == rw: + openFlags |= experimentalsys.O_RDWR + case (rights & w) == w: + openFlags |= experimentalsys.O_WRONLY + case (rights & r) == r: + openFlags |= experimentalsys.O_RDONLY + default: + openFlags |= defaultMode + } + return +} + +// pathReadlink is the WASI function named PathReadlinkName that reads the +// contents of a symbolic link. +// +// See: https://github.com/WebAssembly/WASI/blob/snapshot-01/phases/snapshot/docs.md#-path_readlinkfd-fd-path-string-buf-pointeru8-buf_len-size---errno-size +var pathReadlink = newHostFunc( + wasip1.PathReadlinkName, pathReadlinkFn, + []wasm.ValueType{i32, i32, i32, i32, i32, i32}, + "fd", "path", "path_len", "buf", "buf_len", "result.bufused", +) + +func pathReadlinkFn(_ context.Context, mod api.Module, params []uint64) experimentalsys.Errno { + fsc := mod.(*wasm.ModuleInstance).Sys.FS() + + fd := int32(params[0]) + path := uint32(params[1]) + pathLen := uint32(params[2]) + buf := uint32(params[3]) + bufLen := uint32(params[4]) + resultBufused := uint32(params[5]) + + if pathLen == 0 || bufLen == 0 { + return experimentalsys.EINVAL + } + + mem := mod.Memory() + preopen, p, errno := atPath(fsc, mem, fd, path, pathLen) + if errno != 0 { + return errno + } + + dst, errno := preopen.Readlink(p) + if errno != 0 { + return errno + } + + if len(dst) > int(bufLen) { + return experimentalsys.ERANGE + } + + if ok := mem.WriteString(buf, dst); !ok { + return experimentalsys.EFAULT + } + + if !mem.WriteUint32Le(resultBufused, uint32(len(dst))) { + return experimentalsys.EFAULT + } + return 0 +} + +// pathRemoveDirectory is the WASI function named PathRemoveDirectoryName which +// removes a directory. +// +// # Parameters +// +// - fd: file descriptor of a directory that `path` is relative to +// - path: offset in api.Memory to read the path string from +// - pathLen: length of `path` +// +// # Result (Errno) +// +// The return value is 0 except the following error conditions: +// - sys.EBADF: `fd` is invalid +// - sys.ENOENT: `path` does not exist. +// - sys.ENOTEMPTY: `path` is not empty +// - sys.ENOTDIR: `path` is a file +// +// # Notes +// - This is similar to unlinkat with AT_REMOVEDIR in POSIX. +// See https://linux.die.net/man/2/unlinkat +// +// See https://github.com/WebAssembly/WASI/blob/snapshot-01/phases/snapshot/docs.md#-path_remove_directoryfd-fd-path-string---errno +var pathRemoveDirectory = newHostFunc( + wasip1.PathRemoveDirectoryName, pathRemoveDirectoryFn, + []wasm.ValueType{i32, i32, i32}, + "fd", "path", "path_len", +) + +func pathRemoveDirectoryFn(_ context.Context, mod api.Module, params []uint64) experimentalsys.Errno { + fsc := mod.(*wasm.ModuleInstance).Sys.FS() + + fd := int32(params[0]) + path := uint32(params[1]) + pathLen := uint32(params[2]) + + preopen, pathName, errno := atPath(fsc, mod.Memory(), fd, path, pathLen) + if errno != 0 { + return errno + } + + return preopen.Rmdir(pathName) +} + +// pathRename is the WASI function named PathRenameName which renames a file or +// directory. +// +// # Parameters +// +// - fd: file descriptor of a directory that `old_path` is relative to +// - old_path: offset in api.Memory to read the old path string from +// - old_path_len: length of `old_path` +// - new_fd: file descriptor of a directory that `new_path` is relative to +// - new_path: offset in api.Memory to read the new path string from +// - new_path_len: length of `new_path` +// +// # Result (Errno) +// +// The return value is 0 except the following error conditions: +// - sys.EBADF: `fd` or `new_fd` are invalid +// - sys.ENOENT: `old_path` does not exist. +// - sys.ENOTDIR: `old` is a directory and `new` exists, but is a file. +// - sys.EISDIR: `old` is a file and `new` exists, but is a directory. +// +// # Notes +// - This is similar to unlinkat in POSIX. +// See https://linux.die.net/man/2/renameat +// +// See https://github.com/WebAssembly/WASI/blob/snapshot-01/phases/snapshot/docs.md#-path_renamefd-fd-old_path-string-new_fd-fd-new_path-string---errno +var pathRename = newHostFunc( + wasip1.PathRenameName, pathRenameFn, + []wasm.ValueType{i32, i32, i32, i32, i32, i32}, + "fd", "old_path", "old_path_len", "new_fd", "new_path", "new_path_len", +) + +func pathRenameFn(_ context.Context, mod api.Module, params []uint64) experimentalsys.Errno { + fsc := mod.(*wasm.ModuleInstance).Sys.FS() + + fd := int32(params[0]) + oldPath := uint32(params[1]) + oldPathLen := uint32(params[2]) + + newFD := int32(params[3]) + newPath := uint32(params[4]) + newPathLen := uint32(params[5]) + + oldFS, oldPathName, errno := atPath(fsc, mod.Memory(), fd, oldPath, oldPathLen) + if errno != 0 { + return errno + } + + newFS, newPathName, errno := atPath(fsc, mod.Memory(), newFD, newPath, newPathLen) + if errno != 0 { + return errno + } + + if oldFS != newFS { // TODO: handle renames across filesystems + return experimentalsys.ENOSYS + } + + return oldFS.Rename(oldPathName, newPathName) +} + +// pathSymlink is the WASI function named PathSymlinkName which creates a +// symbolic link. +// +// See https://github.com/WebAssembly/WASI/blob/snapshot-01/phases/snapshot/docs.md#path_symlink +var pathSymlink = newHostFunc( + wasip1.PathSymlinkName, pathSymlinkFn, + []wasm.ValueType{i32, i32, i32, i32, i32}, + "old_path", "old_path_len", "fd", "new_path", "new_path_len", +) + +func pathSymlinkFn(_ context.Context, mod api.Module, params []uint64) experimentalsys.Errno { + fsc := mod.(*wasm.ModuleInstance).Sys.FS() + + oldPath := uint32(params[0]) + oldPathLen := uint32(params[1]) + fd := int32(params[2]) + newPath := uint32(params[3]) + newPathLen := uint32(params[4]) + + mem := mod.Memory() + + dir, ok := fsc.LookupFile(fd) + if !ok { + return experimentalsys.EBADF // closed + } else if isDir, errno := dir.File.IsDir(); errno != 0 { + return errno + } else if !isDir { + return experimentalsys.ENOTDIR + } + + if oldPathLen == 0 || newPathLen == 0 { + return experimentalsys.EINVAL + } + + oldPathBuf, ok := mem.Read(oldPath, oldPathLen) + if !ok { + return experimentalsys.EFAULT + } + + _, newPathName, errno := atPath(fsc, mod.Memory(), fd, newPath, newPathLen) + if errno != 0 { + return errno + } + + return dir.FS.Symlink( + // Do not join old path since it's only resolved when dereference the link created here. + // And the dereference result depends on the opening directory's file descriptor at that point. + bufToStr(oldPathBuf), + newPathName, + ) +} + +// bufToStr converts the given byte slice as string unsafely. +func bufToStr(buf []byte) string { + // TODO: use unsafe.String after flooring Go 1.20. + return *(*string)(unsafe.Pointer(&buf)) +} + +// pathUnlinkFile is the WASI function named PathUnlinkFileName which unlinks a +// file. +// +// # Parameters +// +// - fd: file descriptor of a directory that `path` is relative to +// - path: offset in api.Memory to read the path string from +// - pathLen: length of `path` +// +// # Result (Errno) +// +// The return value is 0 except the following error conditions: +// - sys.EBADF: `fd` is invalid +// - sys.ENOENT: `path` does not exist. +// - sys.EISDIR: `path` is a directory +// +// # Notes +// - This is similar to unlinkat without AT_REMOVEDIR in POSIX. +// See https://linux.die.net/man/2/unlinkat +// +// See https://github.com/WebAssembly/WASI/blob/snapshot-01/phases/snapshot/docs.md#-path_unlink_filefd-fd-path-string---errno +var pathUnlinkFile = newHostFunc( + wasip1.PathUnlinkFileName, pathUnlinkFileFn, + []wasm.ValueType{i32, i32, i32}, + "fd", "path", "path_len", +) + +func pathUnlinkFileFn(_ context.Context, mod api.Module, params []uint64) experimentalsys.Errno { + fsc := mod.(*wasm.ModuleInstance).Sys.FS() + + fd := int32(params[0]) + path := uint32(params[1]) + pathLen := uint32(params[2]) + + preopen, pathName, errno := atPath(fsc, mod.Memory(), fd, path, pathLen) + if errno != 0 { + return errno + } + + return preopen.Unlink(pathName) +} diff --git a/vendor/github.com/tetratelabs/wazero/imports/wasi_snapshot_preview1/poll.go b/vendor/github.com/tetratelabs/wazero/imports/wasi_snapshot_preview1/poll.go new file mode 100644 index 000000000..d09f30245 --- /dev/null +++ b/vendor/github.com/tetratelabs/wazero/imports/wasi_snapshot_preview1/poll.go @@ -0,0 +1,239 @@ +package wasi_snapshot_preview1 + +import ( + "context" + "time" + + "github.com/tetratelabs/wazero/api" + "github.com/tetratelabs/wazero/experimental/sys" + "github.com/tetratelabs/wazero/internal/fsapi" + internalsys "github.com/tetratelabs/wazero/internal/sys" + "github.com/tetratelabs/wazero/internal/wasip1" + "github.com/tetratelabs/wazero/internal/wasm" +) + +// pollOneoff is the WASI function named PollOneoffName that concurrently +// polls for the occurrence of a set of events. +// +// # Parameters +// +// - in: pointer to the subscriptions (48 bytes each) +// - out: pointer to the resulting events (32 bytes each) +// - nsubscriptions: count of subscriptions, zero returns sys.EINVAL. +// - resultNevents: count of events. +// +// Result (Errno) +// +// The return value is 0 except the following error conditions: +// - sys.EINVAL: the parameters are invalid +// - sys.ENOTSUP: a parameters is valid, but not yet supported. +// - sys.EFAULT: there is not enough memory to read the subscriptions or +// write results. +// +// # Notes +// +// - Since the `out` pointer nests Errno, the result is always 0. +// - This is similar to `poll` in POSIX. +// +// See https://github.com/WebAssembly/WASI/blob/snapshot-01/phases/snapshot/docs.md#poll_oneoff +// See https://linux.die.net/man/3/poll +var pollOneoff = newHostFunc( + wasip1.PollOneoffName, pollOneoffFn, + []api.ValueType{i32, i32, i32, i32}, + "in", "out", "nsubscriptions", "result.nevents", +) + +type event struct { + eventType byte + userData []byte + errno wasip1.Errno +} + +func pollOneoffFn(_ context.Context, mod api.Module, params []uint64) sys.Errno { + in := uint32(params[0]) + out := uint32(params[1]) + nsubscriptions := uint32(params[2]) + resultNevents := uint32(params[3]) + + if nsubscriptions == 0 { + return sys.EINVAL + } + + mem := mod.Memory() + + // Ensure capacity prior to the read loop to reduce error handling. + inBuf, ok := mem.Read(in, nsubscriptions*48) + if !ok { + return sys.EFAULT + } + outBuf, ok := mem.Read(out, nsubscriptions*32) + // zero-out all buffer before writing + for i := range outBuf { + outBuf[i] = 0 + } + + if !ok { + return sys.EFAULT + } + + // Eagerly write the number of events which will equal subscriptions unless + // there's a fault in parsing (not processing). + if !mod.Memory().WriteUint32Le(resultNevents, nsubscriptions) { + return sys.EFAULT + } + + // Loop through all subscriptions and write their output. + + // Extract FS context, used in the body of the for loop for FS access. + fsc := mod.(*wasm.ModuleInstance).Sys.FS() + // Slice of events that are processed out of the loop (blocking stdin subscribers). + var blockingStdinSubs []*event + // The timeout is initialized at max Duration, the loop will find the minimum. + var timeout time.Duration = 1<<63 - 1 + // Count of all the subscriptions that have been already written back to outBuf. + // nevents*32 returns at all times the offset where the next event should be written: + // this way we ensure that there are no gaps between records. + nevents := uint32(0) + + // Layout is subscription_u: Union + // https://github.com/WebAssembly/WASI/blob/snapshot-01/phases/snapshot/docs.md#subscription_u + for i := uint32(0); i < nsubscriptions; i++ { + inOffset := i * 48 + outOffset := nevents * 32 + + eventType := inBuf[inOffset+8] // +8 past userdata + // +8 past userdata +8 contents_offset + argBuf := inBuf[inOffset+8+8:] + userData := inBuf[inOffset : inOffset+8] + + evt := &event{ + eventType: eventType, + userData: userData, + errno: wasip1.ErrnoSuccess, + } + + switch eventType { + case wasip1.EventTypeClock: // handle later + newTimeout, err := processClockEvent(argBuf) + if err != 0 { + return err + } + // Min timeout. + if newTimeout < timeout { + timeout = newTimeout + } + // Ack the clock event to the outBuf. + writeEvent(outBuf[outOffset:], evt) + nevents++ + case wasip1.EventTypeFdRead: + fd := int32(le.Uint32(argBuf)) + if fd < 0 { + return sys.EBADF + } + if file, ok := fsc.LookupFile(fd); !ok { + evt.errno = wasip1.ErrnoBadf + writeEvent(outBuf[outOffset:], evt) + nevents++ + } else if fd != internalsys.FdStdin && file.File.IsNonblock() { + writeEvent(outBuf[outOffset:], evt) + nevents++ + } else { + // if the fd is Stdin, and it is in blocking mode, + // do not ack yet, append to a slice for delayed evaluation. + blockingStdinSubs = append(blockingStdinSubs, evt) + } + case wasip1.EventTypeFdWrite: + fd := int32(le.Uint32(argBuf)) + if fd < 0 { + return sys.EBADF + } + if _, ok := fsc.LookupFile(fd); ok { + evt.errno = wasip1.ErrnoNotsup + } else { + evt.errno = wasip1.ErrnoBadf + } + nevents++ + writeEvent(outBuf[outOffset:], evt) + default: + return sys.EINVAL + } + } + + sysCtx := mod.(*wasm.ModuleInstance).Sys + if nevents == nsubscriptions { + // We already wrote back all the results. We already wrote this number + // earlier to offset `resultNevents`. + // We only need to observe the timeout (nonzero if there are clock subscriptions) + // and return. + if timeout > 0 { + sysCtx.Nanosleep(int64(timeout)) + } + return 0 + } + + // If there are blocking stdin subscribers, check for data with given timeout. + stdin, ok := fsc.LookupFile(internalsys.FdStdin) + if !ok { + return sys.EBADF + } + // Wait for the timeout to expire, or for some data to become available on Stdin. + + if stdinReady, errno := stdin.File.Poll(fsapi.POLLIN, int32(timeout.Milliseconds())); errno != 0 { + return errno + } else if stdinReady { + // stdin has data ready to for reading, write back all the events + for i := range blockingStdinSubs { + evt := blockingStdinSubs[i] + evt.errno = 0 + writeEvent(outBuf[nevents*32:], evt) + nevents++ + } + } + + if nevents != nsubscriptions { + if !mod.Memory().WriteUint32Le(resultNevents, nevents) { + return sys.EFAULT + } + } + + return 0 +} + +// processClockEvent supports only relative name events, as that's what's used +// to implement sleep in various compilers including Rust, Zig and TinyGo. +func processClockEvent(inBuf []byte) (time.Duration, sys.Errno) { + _ /* ID */ = le.Uint32(inBuf[0:8]) // See below + timeout := le.Uint64(inBuf[8:16]) // nanos if relative + _ /* precision */ = le.Uint64(inBuf[16:24]) // Unused + flags := le.Uint16(inBuf[24:32]) + + var err sys.Errno + // subclockflags has only one flag defined: subscription_clock_abstime + switch flags { + case 0: // relative time + case 1: // subscription_clock_abstime + err = sys.ENOTSUP + default: // subclockflags has only one flag defined. + err = sys.EINVAL + } + + if err != 0 { + return 0, err + } else { + // https://linux.die.net/man/3/clock_settime says relative timers are + // unaffected. Since this function only supports relative timeout, we can + // skip name ID validation and use a single sleep function. + + return time.Duration(timeout), 0 + } +} + +// writeEvent writes the event corresponding to the processed subscription. +// https://github.com/WebAssembly/WASI/blob/snapshot-01/phases/snapshot/docs.md#-event-struct +func writeEvent(outBuf []byte, evt *event) { + copy(outBuf, evt.userData) // userdata + outBuf[8] = byte(evt.errno) // uint16, but safe as < 255 + outBuf[9] = 0 + le.PutUint32(outBuf[10:], uint32(evt.eventType)) + // TODO: When FD events are supported, write outOffset+16 +} diff --git a/vendor/github.com/tetratelabs/wazero/imports/wasi_snapshot_preview1/proc.go b/vendor/github.com/tetratelabs/wazero/imports/wasi_snapshot_preview1/proc.go new file mode 100644 index 000000000..cb0ab487c --- /dev/null +++ b/vendor/github.com/tetratelabs/wazero/imports/wasi_snapshot_preview1/proc.go @@ -0,0 +1,44 @@ +package wasi_snapshot_preview1 + +import ( + "context" + + "github.com/tetratelabs/wazero/api" + "github.com/tetratelabs/wazero/internal/wasip1" + "github.com/tetratelabs/wazero/internal/wasm" + "github.com/tetratelabs/wazero/sys" +) + +// procExit is the WASI function named ProcExitName that terminates the +// execution of the module with an exit code. The only successful exit code is +// zero. +// +// # Parameters +// +// - exitCode: exit code. +// +// See https://github.com/WebAssembly/WASI/blob/main/phases/snapshot/docs.md#proc_exit +var procExit = &wasm.HostFunc{ + ExportName: wasip1.ProcExitName, + Name: wasip1.ProcExitName, + ParamTypes: []api.ValueType{i32}, + ParamNames: []string{"rval"}, + Code: wasm.Code{GoFunc: api.GoModuleFunc(procExitFn)}, +} + +func procExitFn(ctx context.Context, mod api.Module, params []uint64) { + exitCode := uint32(params[0]) + + // Ensure other callers see the exit code. + _ = mod.CloseWithExitCode(ctx, exitCode) + + // Prevent any code from executing after this function. For example, LLVM + // inserts unreachable instructions after calls to exit. + // See: https://github.com/emscripten-core/emscripten/issues/12322 + panic(sys.NewExitError(exitCode)) +} + +// procRaise is stubbed and will never be supported, as it was removed. +// +// See https://github.com/WebAssembly/WASI/pull/136 +var procRaise = stubFunction(wasip1.ProcRaiseName, []api.ValueType{i32}, "sig") diff --git a/vendor/github.com/tetratelabs/wazero/imports/wasi_snapshot_preview1/random.go b/vendor/github.com/tetratelabs/wazero/imports/wasi_snapshot_preview1/random.go new file mode 100644 index 000000000..e4d7ccee1 --- /dev/null +++ b/vendor/github.com/tetratelabs/wazero/imports/wasi_snapshot_preview1/random.go @@ -0,0 +1,55 @@ +package wasi_snapshot_preview1 + +import ( + "context" + "io" + + "github.com/tetratelabs/wazero/api" + "github.com/tetratelabs/wazero/experimental/sys" + "github.com/tetratelabs/wazero/internal/wasip1" + "github.com/tetratelabs/wazero/internal/wasm" +) + +// randomGet is the WASI function named RandomGetName which writes random +// data to a buffer. +// +// # Parameters +// +// - buf: api.Memory offset to write random values +// - bufLen: size of random data in bytes +// +// Result (Errno) +// +// The return value is ErrnoSuccess except the following error conditions: +// - sys.EFAULT: `buf` or `bufLen` point to an offset out of memory +// - sys.EIO: a file system error +// +// For example, if underlying random source was seeded like +// `rand.NewSource(42)`, we expect api.Memory to contain: +// +// bufLen (5) +// +--------------------------+ +// | | +// []byte{?, 0x53, 0x8c, 0x7f, 0x96, 0xb1, ?} +// buf --^ +// +// See https://github.com/WebAssembly/WASI/blob/snapshot-01/phases/snapshot/docs.md#-random_getbuf-pointeru8-bufLen-size---errno +var randomGet = newHostFunc(wasip1.RandomGetName, randomGetFn, []api.ValueType{i32, i32}, "buf", "buf_len") + +func randomGetFn(_ context.Context, mod api.Module, params []uint64) sys.Errno { + sysCtx := mod.(*wasm.ModuleInstance).Sys + randSource := sysCtx.RandSource() + buf, bufLen := uint32(params[0]), uint32(params[1]) + + randomBytes, ok := mod.Memory().Read(buf, bufLen) + if !ok { // out-of-range + return sys.EFAULT + } + + // We can ignore the returned n as it only != byteCount on error + if _, err := io.ReadAtLeast(randSource, randomBytes, int(bufLen)); err != nil { + return sys.EIO + } + + return 0 +} diff --git a/vendor/github.com/tetratelabs/wazero/imports/wasi_snapshot_preview1/sched.go b/vendor/github.com/tetratelabs/wazero/imports/wasi_snapshot_preview1/sched.go new file mode 100644 index 000000000..86748e6d6 --- /dev/null +++ b/vendor/github.com/tetratelabs/wazero/imports/wasi_snapshot_preview1/sched.go @@ -0,0 +1,22 @@ +package wasi_snapshot_preview1 + +import ( + "context" + + "github.com/tetratelabs/wazero/api" + "github.com/tetratelabs/wazero/experimental/sys" + "github.com/tetratelabs/wazero/internal/wasip1" + "github.com/tetratelabs/wazero/internal/wasm" +) + +// schedYield is the WASI function named SchedYieldName which temporarily +// yields execution of the calling thread. +// +// See https://github.com/WebAssembly/WASI/blob/snapshot-01/phases/snapshot/docs.md#-sched_yield---errno +var schedYield = newHostFunc(wasip1.SchedYieldName, schedYieldFn, nil) + +func schedYieldFn(_ context.Context, mod api.Module, _ []uint64) sys.Errno { + sysCtx := mod.(*wasm.ModuleInstance).Sys + sysCtx.Osyield() + return 0 +} diff --git a/vendor/github.com/tetratelabs/wazero/imports/wasi_snapshot_preview1/sock.go b/vendor/github.com/tetratelabs/wazero/imports/wasi_snapshot_preview1/sock.go new file mode 100644 index 000000000..756c0d391 --- /dev/null +++ b/vendor/github.com/tetratelabs/wazero/imports/wasi_snapshot_preview1/sock.go @@ -0,0 +1,188 @@ +package wasi_snapshot_preview1 + +import ( + "context" + + "github.com/tetratelabs/wazero/api" + "github.com/tetratelabs/wazero/experimental/sys" + socketapi "github.com/tetratelabs/wazero/internal/sock" + "github.com/tetratelabs/wazero/internal/sysfs" + "github.com/tetratelabs/wazero/internal/wasip1" + "github.com/tetratelabs/wazero/internal/wasm" +) + +// sockAccept is the WASI function named SockAcceptName which accepts a new +// incoming connection. +// +// See: https://github.com/WebAssembly/WASI/blob/0ba0c5e2e37625ca5a6d3e4255a998dfaa3efc52/phases/snapshot/docs.md#sock_accept +// and https://github.com/WebAssembly/WASI/pull/458 +var sockAccept = newHostFunc( + wasip1.SockAcceptName, + sockAcceptFn, + []wasm.ValueType{i32, i32, i32}, + "fd", "flags", "result.fd", +) + +func sockAcceptFn(_ context.Context, mod api.Module, params []uint64) (errno sys.Errno) { + mem := mod.Memory() + fsc := mod.(*wasm.ModuleInstance).Sys.FS() + + fd := int32(params[0]) + flags := uint32(params[1]) + resultFd := uint32(params[2]) + nonblock := flags&uint32(wasip1.FD_NONBLOCK) != 0 + + var connFD int32 + if connFD, errno = fsc.SockAccept(fd, nonblock); errno == 0 { + mem.WriteUint32Le(resultFd, uint32(connFD)) + } + return +} + +// sockRecv is the WASI function named SockRecvName which receives a +// message from a socket. +// +// See: https://github.com/WebAssembly/WASI/blob/snapshot-01/phases/snapshot/docs.md#-sock_recvfd-fd-ri_data-iovec_array-ri_flags-riflags---errno-size-roflags +var sockRecv = newHostFunc( + wasip1.SockRecvName, + sockRecvFn, + []wasm.ValueType{i32, i32, i32, i32, i32, i32}, + "fd", "ri_data", "ri_data_len", "ri_flags", "result.ro_datalen", "result.ro_flags", +) + +func sockRecvFn(_ context.Context, mod api.Module, params []uint64) sys.Errno { + mem := mod.Memory() + fsc := mod.(*wasm.ModuleInstance).Sys.FS() + + fd := int32(params[0]) + riData := uint32(params[1]) + riDataCount := uint32(params[2]) + riFlags := uint8(params[3]) + resultRoDatalen := uint32(params[4]) + resultRoFlags := uint32(params[5]) + + var conn socketapi.TCPConn + if e, ok := fsc.LookupFile(fd); !ok { + return sys.EBADF // Not open + } else if conn, ok = e.File.(socketapi.TCPConn); !ok { + return sys.EBADF // Not a conn + } + + if riFlags & ^(wasip1.RI_RECV_PEEK|wasip1.RI_RECV_WAITALL) != 0 { + return sys.ENOTSUP + } + + if riFlags&wasip1.RI_RECV_PEEK != 0 { + // Each record in riData is of the form: + // type iovec struct { buf *uint8; bufLen uint32 } + // This means that the first `uint32` is a `buf *uint8`. + firstIovecBufAddr, ok := mem.ReadUint32Le(riData) + if !ok { + return sys.EINVAL + } + // Read bufLen + firstIovecBufLen, ok := mem.ReadUint32Le(riData + 4) + if !ok { + return sys.EINVAL + } + firstIovecBuf, ok := mem.Read(firstIovecBufAddr, firstIovecBufLen) + if !ok { + return sys.EINVAL + } + n, err := conn.Recvfrom(firstIovecBuf, sysfs.MSG_PEEK) + if err != 0 { + return err + } + mem.WriteUint32Le(resultRoDatalen, uint32(n)) + mem.WriteUint16Le(resultRoFlags, 0) + return 0 + } + + // If riFlags&wasip1.RECV_WAITALL != 0 then we should + // do a blocking operation until all data has been retrieved; + // otherwise we are able to return earlier. + // For simplicity, we currently wait all regardless the flag. + bufSize, errno := readv(mem, riData, riDataCount, conn.Read) + if errno != 0 { + return errno + } + mem.WriteUint32Le(resultRoDatalen, bufSize) + mem.WriteUint16Le(resultRoFlags, 0) + return 0 +} + +// sockSend is the WASI function named SockSendName which sends a message +// on a socket. +// +// See: https://github.com/WebAssembly/WASI/blob/snapshot-01/phases/snapshot/docs.md#-sock_sendfd-fd-si_data-ciovec_array-si_flags-siflags---errno-size +var sockSend = newHostFunc( + wasip1.SockSendName, + sockSendFn, + []wasm.ValueType{i32, i32, i32, i32, i32}, + "fd", "si_data", "si_data_len", "si_flags", "result.so_datalen", +) + +func sockSendFn(_ context.Context, mod api.Module, params []uint64) sys.Errno { + mem := mod.Memory() + fsc := mod.(*wasm.ModuleInstance).Sys.FS() + + fd := int32(params[0]) + siData := uint32(params[1]) + siDataCount := uint32(params[2]) + siFlags := uint32(params[3]) + resultSoDatalen := uint32(params[4]) + + if siFlags != 0 { + return sys.ENOTSUP + } + + var conn socketapi.TCPConn + if e, ok := fsc.LookupFile(fd); !ok { + return sys.EBADF // Not open + } else if conn, ok = e.File.(socketapi.TCPConn); !ok { + return sys.EBADF // Not a conn + } + + bufSize, errno := writev(mem, siData, siDataCount, conn.Write) + if errno != 0 { + return errno + } + mem.WriteUint32Le(resultSoDatalen, bufSize) + return 0 +} + +// sockShutdown is the WASI function named SockShutdownName which shuts +// down socket send and receive channels. +// +// See: https://github.com/WebAssembly/WASI/blob/snapshot-01/phases/snapshot/docs.md#-sock_shutdownfd-fd-how-sdflags---errno +var sockShutdown = newHostFunc(wasip1.SockShutdownName, sockShutdownFn, []wasm.ValueType{i32, i32}, "fd", "how") + +func sockShutdownFn(_ context.Context, mod api.Module, params []uint64) sys.Errno { + fsc := mod.(*wasm.ModuleInstance).Sys.FS() + + fd := int32(params[0]) + how := uint8(params[1]) + + var conn socketapi.TCPConn + if e, ok := fsc.LookupFile(fd); !ok { + return sys.EBADF // Not open + } else if conn, ok = e.File.(socketapi.TCPConn); !ok { + return sys.EBADF // Not a conn + } + + sysHow := 0 + + switch how { + case wasip1.SD_RD | wasip1.SD_WR: + sysHow = socketapi.SHUT_RD | socketapi.SHUT_WR + case wasip1.SD_RD: + sysHow = socketapi.SHUT_RD + case wasip1.SD_WR: + sysHow = socketapi.SHUT_WR + default: + return sys.EINVAL + } + + // TODO: Map this instead of relying on syscall symbols. + return conn.Shutdown(sysHow) +} diff --git a/vendor/github.com/tetratelabs/wazero/imports/wasi_snapshot_preview1/wasi.go b/vendor/github.com/tetratelabs/wazero/imports/wasi_snapshot_preview1/wasi.go new file mode 100644 index 000000000..4ef41d501 --- /dev/null +++ b/vendor/github.com/tetratelabs/wazero/imports/wasi_snapshot_preview1/wasi.go @@ -0,0 +1,314 @@ +// Package wasi_snapshot_preview1 contains Go-defined functions to access +// system calls, such as opening a file, similar to Go's x/sys package. These +// are accessible from WebAssembly-defined functions via importing ModuleName. +// All WASI functions return a single Errno result: ErrnoSuccess on success. +// +// e.g. Call Instantiate before instantiating any wasm binary that imports +// "wasi_snapshot_preview1", Otherwise, it will error due to missing imports. +// +// ctx := context.Background() +// r := wazero.NewRuntime(ctx) +// defer r.Close(ctx) // This closes everything this Runtime created. +// +// wasi_snapshot_preview1.MustInstantiate(ctx, r) +// mod, _ := r.Instantiate(ctx, wasm) +// +// See https://github.com/WebAssembly/WASI +package wasi_snapshot_preview1 + +import ( + "context" + "encoding/binary" + + "github.com/tetratelabs/wazero" + "github.com/tetratelabs/wazero/api" + "github.com/tetratelabs/wazero/experimental/sys" + "github.com/tetratelabs/wazero/internal/wasip1" + "github.com/tetratelabs/wazero/internal/wasm" +) + +// ModuleName is the module name WASI functions are exported into. +// +// See https://github.com/WebAssembly/WASI/blob/snapshot-01/phases/snapshot/docs.md +const ModuleName = wasip1.InternalModuleName + +const i32, i64 = wasm.ValueTypeI32, wasm.ValueTypeI64 + +var le = binary.LittleEndian + +// MustInstantiate calls Instantiate or panics on error. +// +// This is a simpler function for those who know the module ModuleName is not +// already instantiated, and don't need to unload it. +func MustInstantiate(ctx context.Context, r wazero.Runtime) { + if _, err := Instantiate(ctx, r); err != nil { + panic(err) + } +} + +// Instantiate instantiates the ModuleName module into the runtime. +// +// # Notes +// +// - Failure cases are documented on wazero.Runtime InstantiateModule. +// - Closing the wazero.Runtime has the same effect as closing the result. +func Instantiate(ctx context.Context, r wazero.Runtime) (api.Closer, error) { + return NewBuilder(r).Instantiate(ctx) +} + +// Builder configures the ModuleName module for later use via Compile or Instantiate. +// +// # Notes +// +// - This is an interface for decoupling, not third-party implementations. +// All implementations are in wazero. +type Builder interface { + // Compile compiles the ModuleName module. Call this before Instantiate. + // + // Note: This has the same effect as the same function on wazero.HostModuleBuilder. + Compile(context.Context) (wazero.CompiledModule, error) + + // Instantiate instantiates the ModuleName module and returns a function to close it. + // + // Note: This has the same effect as the same function on wazero.HostModuleBuilder. + Instantiate(context.Context) (api.Closer, error) +} + +// NewBuilder returns a new Builder. +func NewBuilder(r wazero.Runtime) Builder { + return &builder{r} +} + +type builder struct{ r wazero.Runtime } + +// hostModuleBuilder returns a new wazero.HostModuleBuilder for ModuleName +func (b *builder) hostModuleBuilder() wazero.HostModuleBuilder { + ret := b.r.NewHostModuleBuilder(ModuleName) + exportFunctions(ret) + return ret +} + +// Compile implements Builder.Compile +func (b *builder) Compile(ctx context.Context) (wazero.CompiledModule, error) { + return b.hostModuleBuilder().Compile(ctx) +} + +// Instantiate implements Builder.Instantiate +func (b *builder) Instantiate(ctx context.Context) (api.Closer, error) { + return b.hostModuleBuilder().Instantiate(ctx) +} + +// FunctionExporter exports functions into a wazero.HostModuleBuilder. +// +// # Notes +// +// - This is an interface for decoupling, not third-party implementations. +// All implementations are in wazero. +type FunctionExporter interface { + ExportFunctions(wazero.HostModuleBuilder) +} + +// NewFunctionExporter returns a new FunctionExporter. This is used for the +// following two use cases: +// - Overriding a builtin function with an alternate implementation. +// - Exporting functions to the module "wasi_unstable" for legacy code. +// +// # Example of overriding default behavior +// +// // Export the default WASI functions. +// wasiBuilder := r.NewHostModuleBuilder(ModuleName) +// wasi_snapshot_preview1.NewFunctionExporter().ExportFunctions(wasiBuilder) +// +// // Subsequent calls to NewFunctionBuilder override built-in exports. +// wasiBuilder.NewFunctionBuilder(). +// WithFunc(func(ctx context.Context, mod api.Module, exitCode uint32) { +// // your custom logic +// }).Export("proc_exit") +// +// # Example of using the old module name for WASI +// +// // Instantiate the current WASI functions under the wasi_unstable +// // instead of wasi_snapshot_preview1. +// wasiBuilder := r.NewHostModuleBuilder("wasi_unstable") +// wasi_snapshot_preview1.NewFunctionExporter().ExportFunctions(wasiBuilder) +// _, err := wasiBuilder.Instantiate(testCtx, r) +func NewFunctionExporter() FunctionExporter { + return &functionExporter{} +} + +type functionExporter struct{} + +// ExportFunctions implements FunctionExporter.ExportFunctions +func (functionExporter) ExportFunctions(builder wazero.HostModuleBuilder) { + exportFunctions(builder) +} + +// ## Translation notes +// ### String +// WebAssembly 1.0 has no string type, so any string input parameter expands to two uint32 parameters: offset +// and length. +// +// ### iovec_array +// `iovec_array` is encoded as two uin32le values (i32): offset and count. +// +// ### Result +// Each result besides Errno is always an uint32 parameter. WebAssembly 1.0 can have up to one result, +// which is already used by Errno. This forces other results to be parameters. A result parameter is a memory +// offset to write the result to. As memory offsets are uint32, each parameter representing a result is uint32. +// +// ### Errno +// The WASI specification is sometimes ambiguous resulting in some runtimes interpreting the same function ways. +// Errno mappings are not defined in WASI, yet, so these mappings are best efforts by maintainers. When in doubt +// about portability, first look at /RATIONALE.md and if needed an issue on +// https://github.com/WebAssembly/WASI/issues +// +// ## Memory +// In WebAssembly 1.0 (20191205), there may be up to one Memory per store, which means api.Memory is always the +// wasm.Store Memories index zero: `store.Memories[0].Buffer` +// +// See https://github.com/WebAssembly/WASI/blob/snapshot-01/phases/snapshot/docs.md +// See https://github.com/WebAssembly/WASI/issues/215 +// See https://wwa.w3.org/TR/2019/REC-wasm-core-1-20191205/#memory-instances%E2%91%A0. + +// exportFunctions adds all go functions that implement wasi. +// These should be exported in the module named ModuleName. +func exportFunctions(builder wazero.HostModuleBuilder) { + exporter := builder.(wasm.HostFuncExporter) + + // Note: these are ordered per spec for consistency even if the resulting + // map can't guarantee that. + // See https://github.com/WebAssembly/WASI/blob/snapshot-01/phases/snapshot/docs.md#functions + exporter.ExportHostFunc(argsGet) + exporter.ExportHostFunc(argsSizesGet) + exporter.ExportHostFunc(environGet) + exporter.ExportHostFunc(environSizesGet) + exporter.ExportHostFunc(clockResGet) + exporter.ExportHostFunc(clockTimeGet) + exporter.ExportHostFunc(fdAdvise) + exporter.ExportHostFunc(fdAllocate) + exporter.ExportHostFunc(fdClose) + exporter.ExportHostFunc(fdDatasync) + exporter.ExportHostFunc(fdFdstatGet) + exporter.ExportHostFunc(fdFdstatSetFlags) + exporter.ExportHostFunc(fdFdstatSetRights) + exporter.ExportHostFunc(fdFilestatGet) + exporter.ExportHostFunc(fdFilestatSetSize) + exporter.ExportHostFunc(fdFilestatSetTimes) + exporter.ExportHostFunc(fdPread) + exporter.ExportHostFunc(fdPrestatGet) + exporter.ExportHostFunc(fdPrestatDirName) + exporter.ExportHostFunc(fdPwrite) + exporter.ExportHostFunc(fdRead) + exporter.ExportHostFunc(fdReaddir) + exporter.ExportHostFunc(fdRenumber) + exporter.ExportHostFunc(fdSeek) + exporter.ExportHostFunc(fdSync) + exporter.ExportHostFunc(fdTell) + exporter.ExportHostFunc(fdWrite) + exporter.ExportHostFunc(pathCreateDirectory) + exporter.ExportHostFunc(pathFilestatGet) + exporter.ExportHostFunc(pathFilestatSetTimes) + exporter.ExportHostFunc(pathLink) + exporter.ExportHostFunc(pathOpen) + exporter.ExportHostFunc(pathReadlink) + exporter.ExportHostFunc(pathRemoveDirectory) + exporter.ExportHostFunc(pathRename) + exporter.ExportHostFunc(pathSymlink) + exporter.ExportHostFunc(pathUnlinkFile) + exporter.ExportHostFunc(pollOneoff) + exporter.ExportHostFunc(procExit) + exporter.ExportHostFunc(procRaise) + exporter.ExportHostFunc(schedYield) + exporter.ExportHostFunc(randomGet) + exporter.ExportHostFunc(sockAccept) + exporter.ExportHostFunc(sockRecv) + exporter.ExportHostFunc(sockSend) + exporter.ExportHostFunc(sockShutdown) +} + +// writeOffsetsAndNullTerminatedValues is used to write NUL-terminated values +// for args or environ, given a pre-defined bytesLen (which includes NUL +// terminators). +func writeOffsetsAndNullTerminatedValues(mem api.Memory, values [][]byte, offsets, bytes, bytesLen uint32) sys.Errno { + // The caller may not place bytes directly after offsets, so we have to + // read them independently. + valuesLen := len(values) + offsetsLen := uint32(valuesLen * 4) // uint32Le + offsetsBuf, ok := mem.Read(offsets, offsetsLen) + if !ok { + return sys.EFAULT + } + bytesBuf, ok := mem.Read(bytes, bytesLen) + if !ok { + return sys.EFAULT + } + + // Loop through the values, first writing the location of its data to + // offsetsBuf[oI], then its NUL-terminated data at bytesBuf[bI] + var oI, bI uint32 + for _, value := range values { + // Go can't guarantee inlining as there's not //go:inline directive. + // This inlines uint32 little-endian encoding instead. + bytesOffset := bytes + bI + offsetsBuf[oI] = byte(bytesOffset) + offsetsBuf[oI+1] = byte(bytesOffset >> 8) + offsetsBuf[oI+2] = byte(bytesOffset >> 16) + offsetsBuf[oI+3] = byte(bytesOffset >> 24) + oI += 4 // size of uint32 we just wrote + + // Write the next value to memory with a NUL terminator + copy(bytesBuf[bI:], value) + bI += uint32(len(value)) + bytesBuf[bI] = 0 // NUL terminator + bI++ + } + + return 0 +} + +func newHostFunc( + name string, + goFunc wasiFunc, + paramTypes []api.ValueType, + paramNames ...string, +) *wasm.HostFunc { + return &wasm.HostFunc{ + ExportName: name, + Name: name, + ParamTypes: paramTypes, + ParamNames: paramNames, + ResultTypes: []api.ValueType{i32}, + ResultNames: []string{"errno"}, + Code: wasm.Code{GoFunc: goFunc}, + } +} + +// wasiFunc special cases that all WASI functions return a single Errno +// result. The returned value will be written back to the stack at index zero. +type wasiFunc func(ctx context.Context, mod api.Module, params []uint64) sys.Errno + +// Call implements the same method as documented on api.GoModuleFunction. +func (f wasiFunc) Call(ctx context.Context, mod api.Module, stack []uint64) { + // Write the result back onto the stack + errno := f(ctx, mod, stack) + if errno != 0 { + stack[0] = uint64(wasip1.ToErrno(errno)) + } else { // special case ass ErrnoSuccess is zero + stack[0] = 0 + } +} + +// stubFunction stubs for GrainLang per #271. +func stubFunction(name string, paramTypes []wasm.ValueType, paramNames ...string) *wasm.HostFunc { + return &wasm.HostFunc{ + ExportName: name, + Name: name, + ParamTypes: paramTypes, + ParamNames: paramNames, + ResultTypes: []api.ValueType{i32}, + ResultNames: []string{"errno"}, + Code: wasm.Code{ + GoFunc: api.GoModuleFunc(func(_ context.Context, _ api.Module, stack []uint64) { stack[0] = uint64(wasip1.ErrnoNosys) }), + }, + } +} diff --git a/vendor/github.com/tetratelabs/wazero/internal/wasip1/args.go b/vendor/github.com/tetratelabs/wazero/internal/wasip1/args.go new file mode 100644 index 000000000..212d3b2de --- /dev/null +++ b/vendor/github.com/tetratelabs/wazero/internal/wasip1/args.go @@ -0,0 +1,6 @@ +package wasip1 + +const ( + ArgsGetName = "args_get" + ArgsSizesGetName = "args_sizes_get" +) diff --git a/vendor/github.com/tetratelabs/wazero/internal/wasip1/clock.go b/vendor/github.com/tetratelabs/wazero/internal/wasip1/clock.go new file mode 100644 index 000000000..1d1b8c12d --- /dev/null +++ b/vendor/github.com/tetratelabs/wazero/internal/wasip1/clock.go @@ -0,0 +1,16 @@ +package wasip1 + +const ( + ClockResGetName = "clock_res_get" + ClockTimeGetName = "clock_time_get" +) + +// https://github.com/WebAssembly/WASI/blob/snapshot-01/phases/snapshot/docs.md#-clockid-enumu32 +const ( + // ClockIDRealtime is the name ID named "realtime" like sys.Walltime + ClockIDRealtime = iota + // ClockIDMonotonic is the name ID named "monotonic" like sys.Nanotime + ClockIDMonotonic + // Note: clockIDProcessCputime and clockIDThreadCputime were removed by + // WASI maintainers: https://github.com/WebAssembly/wasi-libc/pull/294 +) diff --git a/vendor/github.com/tetratelabs/wazero/internal/wasip1/environ.go b/vendor/github.com/tetratelabs/wazero/internal/wasip1/environ.go new file mode 100644 index 000000000..2b0d59828 --- /dev/null +++ b/vendor/github.com/tetratelabs/wazero/internal/wasip1/environ.go @@ -0,0 +1,6 @@ +package wasip1 + +const ( + EnvironGetName = "environ_get" + EnvironSizesGetName = "environ_sizes_get" +) diff --git a/vendor/github.com/tetratelabs/wazero/internal/wasip1/errno.go b/vendor/github.com/tetratelabs/wazero/internal/wasip1/errno.go new file mode 100644 index 000000000..028573d2f --- /dev/null +++ b/vendor/github.com/tetratelabs/wazero/internal/wasip1/errno.go @@ -0,0 +1,314 @@ +package wasip1 + +import ( + "fmt" + + "github.com/tetratelabs/wazero/experimental/sys" +) + +// Errno is neither uint16 nor an alias for parity with wasm.ValueType. +type Errno = uint32 + +// ErrnoName returns the POSIX error code name, except ErrnoSuccess, which is +// not an error. e.g. Errno2big -> "E2BIG" +func ErrnoName(errno uint32) string { + if int(errno) < len(errnoToString) { + return errnoToString[errno] + } + return fmt.Sprintf("errno(%d)", errno) +} + +// Note: Below prefers POSIX symbol names over WASI ones, even if the docs are from WASI. +// See https://linux.die.net/man/3/errno +// See https://github.com/WebAssembly/WASI/blob/snapshot-01/phases/snapshot/docs.md#variants-1 +const ( + // ErrnoSuccess No error occurred. System call completed successfully. + ErrnoSuccess Errno = iota + // Errno2big Argument list too long. + Errno2big + // ErrnoAcces Permission denied. + ErrnoAcces + // ErrnoAddrinuse Address in use. + ErrnoAddrinuse + // ErrnoAddrnotavail Address not available. + ErrnoAddrnotavail + // ErrnoAfnosupport Address family not supported. + ErrnoAfnosupport + // ErrnoAgain Resource unavailable, or operation would block. + ErrnoAgain + // ErrnoAlready Connection already in progress. + ErrnoAlready + // ErrnoBadf Bad file descriptor. + ErrnoBadf + // ErrnoBadmsg Bad message. + ErrnoBadmsg + // ErrnoBusy Device or resource busy. + ErrnoBusy + // ErrnoCanceled Operation canceled. + ErrnoCanceled + // ErrnoChild No child processes. + ErrnoChild + // ErrnoConnaborted Connection aborted. + ErrnoConnaborted + // ErrnoConnrefused Connection refused. + ErrnoConnrefused + // ErrnoConnreset Connection reset. + ErrnoConnreset + // ErrnoDeadlk Resource deadlock would occur. + ErrnoDeadlk + // ErrnoDestaddrreq Destination address required. + ErrnoDestaddrreq + // ErrnoDom Mathematics argument out of domain of function. + ErrnoDom + // ErrnoDquot Reserved. + ErrnoDquot + // ErrnoExist File exists. + ErrnoExist + // ErrnoFault Bad address. + ErrnoFault + // ErrnoFbig File too large. + ErrnoFbig + // ErrnoHostunreach Host is unreachable. + ErrnoHostunreach + // ErrnoIdrm Identifier removed. + ErrnoIdrm + // ErrnoIlseq Illegal byte sequence. + ErrnoIlseq + // ErrnoInprogress Operation in progress. + ErrnoInprogress + // ErrnoIntr Interrupted function. + ErrnoIntr + // ErrnoInval Invalid argument. + ErrnoInval + // ErrnoIo I/O error. + ErrnoIo + // ErrnoIsconn Socket is connected. + ErrnoIsconn + // ErrnoIsdir Is a directory. + ErrnoIsdir + // ErrnoLoop Too many levels of symbolic links. + ErrnoLoop + // ErrnoMfile File descriptor value too large. + ErrnoMfile + // ErrnoMlink Too many links. + ErrnoMlink + // ErrnoMsgsize Message too large. + ErrnoMsgsize + // ErrnoMultihop Reserved. + ErrnoMultihop + // ErrnoNametoolong Filename too long. + ErrnoNametoolong + // ErrnoNetdown Network is down. + ErrnoNetdown + // ErrnoNetreset Connection aborted by network. + ErrnoNetreset + // ErrnoNetunreach Network unreachable. + ErrnoNetunreach + // ErrnoNfile Too many files open in system. + ErrnoNfile + // ErrnoNobufs No buffer space available. + ErrnoNobufs + // ErrnoNodev No such device. + ErrnoNodev + // ErrnoNoent No such file or directory. + ErrnoNoent + // ErrnoNoexec Executable file format error. + ErrnoNoexec + // ErrnoNolck No locks available. + ErrnoNolck + // ErrnoNolink Reserved. + ErrnoNolink + // ErrnoNomem Not enough space. + ErrnoNomem + // ErrnoNomsg No message of the desired type. + ErrnoNomsg + // ErrnoNoprotoopt No message of the desired type. + ErrnoNoprotoopt + // ErrnoNospc No space left on device. + ErrnoNospc + // ErrnoNosys function not supported. + ErrnoNosys + // ErrnoNotconn The socket is not connected. + ErrnoNotconn + // ErrnoNotdir Not a directory or a symbolic link to a directory. + ErrnoNotdir + // ErrnoNotempty Directory not empty. + ErrnoNotempty + // ErrnoNotrecoverable State not recoverable. + ErrnoNotrecoverable + // ErrnoNotsock Not a socket. + ErrnoNotsock + // ErrnoNotsup Not supported, or operation not supported on socket. + ErrnoNotsup + // ErrnoNotty Inappropriate I/O control operation. + ErrnoNotty + // ErrnoNxio No such device or address. + ErrnoNxio + // ErrnoOverflow Value too large to be stored in data type. + ErrnoOverflow + // ErrnoOwnerdead Previous owner died. + ErrnoOwnerdead + // ErrnoPerm Operation not permitted. + ErrnoPerm + // ErrnoPipe Broken pipe. + ErrnoPipe + // ErrnoProto Protocol error. + ErrnoProto + // ErrnoProtonosupport Protocol error. + ErrnoProtonosupport + // ErrnoPrototype Protocol wrong type for socket. + ErrnoPrototype + // ErrnoRange Result too large. + ErrnoRange + // ErrnoRofs Read-only file system. + ErrnoRofs + // ErrnoSpipe Invalid seek. + ErrnoSpipe + // ErrnoSrch No such process. + ErrnoSrch + // ErrnoStale Reserved. + ErrnoStale + // ErrnoTimedout Connection timed out. + ErrnoTimedout + // ErrnoTxtbsy Text file busy. + ErrnoTxtbsy + // ErrnoXdev Cross-device link. + ErrnoXdev + + // Note: ErrnoNotcapable was removed by WASI maintainers. + // See https://github.com/WebAssembly/wasi-libc/pull/294 +) + +var errnoToString = [...]string{ + "ESUCCESS", + "E2BIG", + "EACCES", + "EADDRINUSE", + "EADDRNOTAVAIL", + "EAFNOSUPPORT", + "EAGAIN", + "EALREADY", + "EBADF", + "EBADMSG", + "EBUSY", + "ECANCELED", + "ECHILD", + "ECONNABORTED", + "ECONNREFUSED", + "ECONNRESET", + "EDEADLK", + "EDESTADDRREQ", + "EDOM", + "EDQUOT", + "EEXIST", + "EFAULT", + "EFBIG", + "EHOSTUNREACH", + "EIDRM", + "EILSEQ", + "EINPROGRESS", + "EINTR", + "EINVAL", + "EIO", + "EISCONN", + "EISDIR", + "ELOOP", + "EMFILE", + "EMLINK", + "EMSGSIZE", + "EMULTIHOP", + "ENAMETOOLONG", + "ENETDOWN", + "ENETRESET", + "ENETUNREACH", + "ENFILE", + "ENOBUFS", + "ENODEV", + "ENOENT", + "ENOEXEC", + "ENOLCK", + "ENOLINK", + "ENOMEM", + "ENOMSG", + "ENOPROTOOPT", + "ENOSPC", + "ENOSYS", + "ENOTCONN", + "ENOTDIR", + "ENOTEMPTY", + "ENOTRECOVERABLE", + "ENOTSOCK", + "ENOTSUP", + "ENOTTY", + "ENXIO", + "EOVERFLOW", + "EOWNERDEAD", + "EPERM", + "EPIPE", + "EPROTO", + "EPROTONOSUPPORT", + "EPROTOTYPE", + "ERANGE", + "EROFS", + "ESPIPE", + "ESRCH", + "ESTALE", + "ETIMEDOUT", + "ETXTBSY", + "EXDEV", + "ENOTCAPABLE", +} + +// ToErrno coerces the error to a WASI Errno. +// +// Note: Coercion isn't centralized in sys.FSContext because ABI use different +// error codes. For example, wasi-filesystem doesn't map to these +// Errno. +func ToErrno(errno sys.Errno) Errno { + switch errno { + case 0: + return ErrnoSuccess + case sys.EACCES: + return ErrnoAcces + case sys.EAGAIN: + return ErrnoAgain + case sys.EBADF: + return ErrnoBadf + case sys.EEXIST: + return ErrnoExist + case sys.EFAULT: + return ErrnoFault + case sys.EINTR: + return ErrnoIntr + case sys.EINVAL: + return ErrnoInval + case sys.EIO: + return ErrnoIo + case sys.EISDIR: + return ErrnoIsdir + case sys.ELOOP: + return ErrnoLoop + case sys.ENAMETOOLONG: + return ErrnoNametoolong + case sys.ENOENT: + return ErrnoNoent + case sys.ENOSYS: + return ErrnoNosys + case sys.ENOTDIR: + return ErrnoNotdir + case sys.ERANGE: + return ErrnoRange + case sys.ENOTEMPTY: + return ErrnoNotempty + case sys.ENOTSOCK: + return ErrnoNotsock + case sys.ENOTSUP: + return ErrnoNotsup + case sys.EPERM: + return ErrnoPerm + case sys.EROFS: + return ErrnoRofs + default: + return ErrnoIo + } +} diff --git a/vendor/github.com/tetratelabs/wazero/internal/wasip1/fs.go b/vendor/github.com/tetratelabs/wazero/internal/wasip1/fs.go new file mode 100644 index 000000000..ed8df1edc --- /dev/null +++ b/vendor/github.com/tetratelabs/wazero/internal/wasip1/fs.go @@ -0,0 +1,164 @@ +package wasip1 + +import ( + "fmt" +) + +const ( + FdAdviseName = "fd_advise" + FdAllocateName = "fd_allocate" + FdCloseName = "fd_close" + FdDatasyncName = "fd_datasync" + FdFdstatGetName = "fd_fdstat_get" + FdFdstatSetFlagsName = "fd_fdstat_set_flags" + FdFdstatSetRightsName = "fd_fdstat_set_rights" + FdFilestatGetName = "fd_filestat_get" + FdFilestatSetSizeName = "fd_filestat_set_size" + FdFilestatSetTimesName = "fd_filestat_set_times" + FdPreadName = "fd_pread" + FdPrestatGetName = "fd_prestat_get" + FdPrestatDirNameName = "fd_prestat_dir_name" + FdPwriteName = "fd_pwrite" + FdReadName = "fd_read" + FdReaddirName = "fd_readdir" + FdRenumberName = "fd_renumber" + FdSeekName = "fd_seek" + FdSyncName = "fd_sync" + FdTellName = "fd_tell" + FdWriteName = "fd_write" + + PathCreateDirectoryName = "path_create_directory" + PathFilestatGetName = "path_filestat_get" + PathFilestatSetTimesName = "path_filestat_set_times" + PathLinkName = "path_link" + PathOpenName = "path_open" + PathReadlinkName = "path_readlink" + PathRemoveDirectoryName = "path_remove_directory" + PathRenameName = "path_rename" + PathSymlinkName = "path_symlink" + PathUnlinkFileName = "path_unlink_file" +) + +// oflags are open flags used by path_open +// See https://github.com/WebAssembly/WASI/blob/snapshot-01/phases/snapshot/docs.md#-oflags-flagsu16 +const ( + // O_CREAT creates a file if it does not exist. + O_CREAT uint16 = 1 << iota //nolint + // O_DIRECTORY fails if not a directory. + O_DIRECTORY + // O_EXCL fails if file already exists. + O_EXCL //nolint + // O_TRUNC truncates the file to size 0. + O_TRUNC //nolint +) + +func OflagsString(oflags int) string { + return flagsString(oflagNames[:], oflags) +} + +var oflagNames = [...]string{ + "CREAT", + "DIRECTORY", + "EXCL", + "TRUNC", +} + +// file descriptor flags +// See https://github.com/WebAssembly/WASI/blob/snapshot-01/phases/snapshot/docs.md#fdflags +const ( + FD_APPEND uint16 = 1 << iota //nolint + FD_DSYNC + FD_NONBLOCK + FD_RSYNC + FD_SYNC +) + +func FdFlagsString(fdflags int) string { + return flagsString(fdflagNames[:], fdflags) +} + +var fdflagNames = [...]string{ + "APPEND", + "DSYNC", + "NONBLOCK", + "RSYNC", + "SYNC", +} + +// See https://github.com/WebAssembly/WASI/blob/snapshot-01/phases/snapshot/docs.md#lookupflags +const ( + // LOOKUP_SYMLINK_FOLLOW expands a path if it resolves into a symbolic + // link. + LOOKUP_SYMLINK_FOLLOW uint16 = 1 << iota //nolint +) + +var lookupflagNames = [...]string{ + "SYMLINK_FOLLOW", +} + +func LookupflagsString(lookupflags int) string { + return flagsString(lookupflagNames[:], lookupflags) +} + +// DirentSize is the size of the dirent struct, which should be followed by the +// length of a file name. +const DirentSize = uint32(24) + +const ( + FILETYPE_UNKNOWN uint8 = iota + FILETYPE_BLOCK_DEVICE + FILETYPE_CHARACTER_DEVICE + FILETYPE_DIRECTORY + FILETYPE_REGULAR_FILE + FILETYPE_SOCKET_DGRAM + FILETYPE_SOCKET_STREAM + FILETYPE_SYMBOLIC_LINK +) + +// FiletypeName returns string name of the file type. +func FiletypeName(filetype uint8) string { + if int(filetype) < len(filetypeToString) { + return filetypeToString[filetype] + } + return fmt.Sprintf("filetype(%d)", filetype) +} + +var filetypeToString = [...]string{ + "UNKNOWN", + "BLOCK_DEVICE", + "CHARACTER_DEVICE", + "DIRECTORY", + "REGULAR_FILE", + "SOCKET_DGRAM", + "SOCKET_STREAM", + "SYMBOLIC_LINK", +} + +// https://github.com/WebAssembly/WASI/blob/snapshot-01/phases/snapshot/docs.md#fstflags +const ( + FstflagsAtim uint16 = 1 << iota + FstflagsAtimNow + FstflagsMtim + FstflagsMtimNow +) + +var fstflagNames = [...]string{ + "ATIM", + "ATIM_NOW", + "MTIM", + "MTIM_NOW", +} + +func FstflagsString(fdflags int) string { + return flagsString(fstflagNames[:], fdflags) +} + +// https://github.com/WebAssembly/WASI/blob/snapshot-01/phases/snapshot/docs.md#-advice-enumu8 +const ( + FdAdviceNormal byte = iota + FdAdviceSequential + FdAdviceRandom + FdAdviceWillNeed + FdAdviceDontNeed + FdAdviceNoReuse +) diff --git a/vendor/github.com/tetratelabs/wazero/internal/wasip1/poll.go b/vendor/github.com/tetratelabs/wazero/internal/wasip1/poll.go new file mode 100644 index 000000000..9bde768f2 --- /dev/null +++ b/vendor/github.com/tetratelabs/wazero/internal/wasip1/poll.go @@ -0,0 +1,15 @@ +package wasip1 + +// https://github.com/WebAssembly/WASI/blob/snapshot-01/phases/snapshot/docs.md#-eventtype-enumu8 +const ( + // EventTypeClock is the timeout event named "name". + EventTypeClock = iota + // EventTypeFdRead is the data available event named "fd_read". + EventTypeFdRead + // EventTypeFdWrite is the capacity available event named "fd_write". + EventTypeFdWrite +) + +const ( + PollOneoffName = "poll_oneoff" +) diff --git a/vendor/github.com/tetratelabs/wazero/internal/wasip1/proc.go b/vendor/github.com/tetratelabs/wazero/internal/wasip1/proc.go new file mode 100644 index 000000000..50b040c98 --- /dev/null +++ b/vendor/github.com/tetratelabs/wazero/internal/wasip1/proc.go @@ -0,0 +1,6 @@ +package wasip1 + +const ( + ProcExitName = "proc_exit" + ProcRaiseName = "proc_raise" +) diff --git a/vendor/github.com/tetratelabs/wazero/internal/wasip1/random.go b/vendor/github.com/tetratelabs/wazero/internal/wasip1/random.go new file mode 100644 index 000000000..236453374 --- /dev/null +++ b/vendor/github.com/tetratelabs/wazero/internal/wasip1/random.go @@ -0,0 +1,3 @@ +package wasip1 + +const RandomGetName = "random_get" diff --git a/vendor/github.com/tetratelabs/wazero/internal/wasip1/rights.go b/vendor/github.com/tetratelabs/wazero/internal/wasip1/rights.go new file mode 100644 index 000000000..2ab56c604 --- /dev/null +++ b/vendor/github.com/tetratelabs/wazero/internal/wasip1/rights.go @@ -0,0 +1,148 @@ +package wasip1 + +// See https://github.com/WebAssembly/WASI/blob/snapshot-01/phases/snapshot/docs.md#-rights-flagsu64 +const ( + // RIGHT_FD_DATASYNC is the right to invoke fd_datasync. If RIGHT_PATH_OPEN + // is set, includes the right to invoke path_open with FD_DSYNC. + RIGHT_FD_DATASYNC uint32 = 1 << iota //nolint + + // RIGHT_FD_READ is he right to invoke fd_read and sock_recv. If + // RIGHT_FD_SYNC is set, includes the right to invoke fd_pread. + RIGHT_FD_READ + + // RIGHT_FD_SEEK is the right to invoke fd_seek. This flag implies + // RIGHT_FD_TELL. + RIGHT_FD_SEEK + + // RIGHT_FDSTAT_SET_FLAGS is the right to invoke fd_fdstat_set_flags. + RIGHT_FDSTAT_SET_FLAGS + + // RIGHT_FD_SYNC The right to invoke fd_sync. If path_open is set, includes + // the right to invoke path_open with FD_RSYNC and FD_DSYNC. + RIGHT_FD_SYNC + + // RIGHT_FD_TELL is the right to invoke fd_seek in such a way that the file + // offset remains unaltered (i.e., whence::cur with offset zero), or to + // invoke fd_tell. + RIGHT_FD_TELL + + // RIGHT_FD_WRITE is the right to invoke fd_write and sock_send. If + // RIGHT_FD_SEEK is set, includes the right to invoke fd_pwrite. + RIGHT_FD_WRITE + + // RIGHT_FD_ADVISE is the right to invoke fd_advise. + RIGHT_FD_ADVISE + + // RIGHT_FD_ALLOCATE is the right to invoke fd_allocate. + RIGHT_FD_ALLOCATE + + // RIGHT_PATH_CREATE_DIRECTORY is the right to invoke + // path_create_directory. + RIGHT_PATH_CREATE_DIRECTORY + + // RIGHT_PATH_CREATE_FILE when RIGHT_PATH_OPEN is set, the right to invoke + // path_open with O_CREAT. + RIGHT_PATH_CREATE_FILE + + // RIGHT_PATH_LINK_SOURCE is the right to invoke path_link with the file + // descriptor as the source directory. + RIGHT_PATH_LINK_SOURCE + + // RIGHT_PATH_LINK_TARGET is the right to invoke path_link with the file + // descriptor as the target directory. + RIGHT_PATH_LINK_TARGET + + // RIGHT_PATH_OPEN is the right to invoke path_open. + RIGHT_PATH_OPEN + + // RIGHT_FD_READDIR is the right to invoke fd_readdir. + RIGHT_FD_READDIR + + // RIGHT_PATH_READLINK is the right to invoke path_readlink. + RIGHT_PATH_READLINK + + // RIGHT_PATH_RENAME_SOURCE is the right to invoke path_rename with the + // file descriptor as the source directory. + RIGHT_PATH_RENAME_SOURCE + + // RIGHT_PATH_RENAME_TARGET is the right to invoke path_rename with the + // file descriptor as the target directory. + RIGHT_PATH_RENAME_TARGET + + // RIGHT_PATH_FILESTAT_GET is the right to invoke path_filestat_get. + RIGHT_PATH_FILESTAT_GET + + // RIGHT_PATH_FILESTAT_SET_SIZE is the right to change a file's size (there + // is no path_filestat_set_size). If RIGHT_PATH_OPEN is set, includes the + // right to invoke path_open with O_TRUNC. + RIGHT_PATH_FILESTAT_SET_SIZE + + // RIGHT_PATH_FILESTAT_SET_TIMES is the right to invoke + // path_filestat_set_times. + RIGHT_PATH_FILESTAT_SET_TIMES + + // RIGHT_FD_FILESTAT_GET is the right to invoke fd_filestat_get. + RIGHT_FD_FILESTAT_GET + + // RIGHT_FD_FILESTAT_SET_SIZE is the right to invoke fd_filestat_set_size. + RIGHT_FD_FILESTAT_SET_SIZE + + // RIGHT_FD_FILESTAT_SET_TIMES is the right to invoke + // fd_filestat_set_times. + RIGHT_FD_FILESTAT_SET_TIMES + + // RIGHT_PATH_SYMLINK is the right to invoke path_symlink. + RIGHT_PATH_SYMLINK + + // RIGHT_PATH_REMOVE_DIRECTORY is the right to invoke + // path_remove_directory. + RIGHT_PATH_REMOVE_DIRECTORY + + // RIGHT_PATH_UNLINK_FILE is the right to invoke path_unlink_file. + RIGHT_PATH_UNLINK_FILE + + // RIGHT_POLL_FD_READWRITE when RIGHT_FD_READ is set, includes the right to + // invoke poll_oneoff to subscribe to eventtype::fd_read. If RIGHT_FD_WRITE + // is set, includes the right to invoke poll_oneoff to subscribe to + // eventtype::fd_write. + RIGHT_POLL_FD_READWRITE + + // RIGHT_SOCK_SHUTDOWN is the right to invoke sock_shutdown. + RIGHT_SOCK_SHUTDOWN +) + +func RightsString(rights int) string { + return flagsString(rightNames[:], rights) +} + +var rightNames = [...]string{ + "FD_DATASYNC", + "FD_READ", + "FD_SEEK", + "FDSTAT_SET_FLAGS", + "FD_SYNC", + "FD_TELL", + "FD_WRITE", + "FD_ADVISE", + "FD_ALLOCATE", + "PATH_CREATE_DIRECTORY", + "PATH_CREATE_FILE", + "PATH_LINK_SOURCE", + "PATH_LINK_TARGET", + "PATH_OPEN", + "FD_READDIR", + "PATH_READLINK", + "PATH_RENAME_SOURCE", + "PATH_RENAME_TARGET", + "PATH_FILESTAT_GET", + "PATH_FILESTAT_SET_SIZE", + "PATH_FILESTAT_SET_TIMES", + "FD_FILESTAT_GET", + "FD_FILESTAT_SET_SIZE", + "FD_FILESTAT_SET_TIMES", + "PATH_SYMLINK", + "PATH_REMOVE_DIRECTORY", + "PATH_UNLINK_FILE", + "POLL_FD_READWRITE", + "SOCK_SHUTDOWN", +} diff --git a/vendor/github.com/tetratelabs/wazero/internal/wasip1/sched.go b/vendor/github.com/tetratelabs/wazero/internal/wasip1/sched.go new file mode 100644 index 000000000..bc6e39385 --- /dev/null +++ b/vendor/github.com/tetratelabs/wazero/internal/wasip1/sched.go @@ -0,0 +1,3 @@ +package wasip1 + +const SchedYieldName = "sched_yield" diff --git a/vendor/github.com/tetratelabs/wazero/internal/wasip1/sock.go b/vendor/github.com/tetratelabs/wazero/internal/wasip1/sock.go new file mode 100644 index 000000000..90d33ece8 --- /dev/null +++ b/vendor/github.com/tetratelabs/wazero/internal/wasip1/sock.go @@ -0,0 +1,71 @@ +package wasip1 + +import "strconv" + +const ( + SockAcceptName = "sock_accept" + SockRecvName = "sock_recv" + SockSendName = "sock_send" + SockShutdownName = "sock_shutdown" +) + +// SD Flags indicate which channels on a socket to shut down. +// https://github.com/WebAssembly/WASI/blob/snapshot-01/phases/snapshot/docs.md#-sdflags-flagsu8 +const ( + // SD_RD disables further receive operations. + SD_RD uint8 = 1 << iota //nolint + // SD_WR disables further send operations. + SD_WR +) + +func SdFlagsString(sdflags int) string { + return flagsString(sdflagNames[:], sdflags) +} + +var sdflagNames = [...]string{ + "RD", + "WR", +} + +// SI Flags are flags provided to sock_send. As there are currently no flags defined, it must be set to zero. +// https://github.com/WebAssembly/WASI/blob/snapshot-01/phases/snapshot/docs.md#-siflags-u16 + +func SiFlagsString(siflags int) string { + if siflags == 0 { + return "" + } + return strconv.Itoa(siflags) +} + +// RI Flags are flags provided to sock_recv. +// https://github.com/WebAssembly/WASI/blob/snapshot-01/phases/snapshot/docs.md#-riflags-flagsu16 +const ( + // RI_RECV_PEEK returns the message without removing it from the socket's receive queue + RI_RECV_PEEK uint8 = 1 << iota //nolint + // RI_RECV_WAITALL on byte-stream sockets, block until the full amount of data can be returned. + RI_RECV_WAITALL +) + +func RiFlagsString(riflags int) string { + return flagsString(riflagNames[:], riflags) +} + +var riflagNames = [...]string{ + "RECV_PEEK", + "RECV_WAITALL", +} + +// RO Flags are flags returned by sock_recv. +// https://github.com/WebAssembly/WASI/blob/snapshot-01/phases/snapshot/docs.md#-roflags-flagsu16 +const ( + // RO_RECV_DATA_TRUNCATED is returned by sock_recv when message data has been truncated. + RO_RECV_DATA_TRUNCATED uint8 = 1 << iota //nolint +) + +func RoFlagsString(roflags int) string { + return flagsString(roflagNames[:], roflags) +} + +var roflagNames = [...]string{ + "RECV_DATA_TRUNCATED", +} diff --git a/vendor/github.com/tetratelabs/wazero/internal/wasip1/wasi.go b/vendor/github.com/tetratelabs/wazero/internal/wasip1/wasi.go new file mode 100644 index 000000000..299feea2f --- /dev/null +++ b/vendor/github.com/tetratelabs/wazero/internal/wasip1/wasi.go @@ -0,0 +1,26 @@ +// Package wasip1 is a helper to remove package cycles re-using constants. +package wasip1 + +import ( + "strings" +) + +// InternalModuleName is not named ModuleName, to avoid a clash on dot imports. +const InternalModuleName = "wasi_snapshot_preview1" + +func flagsString(names []string, f int) string { + var builder strings.Builder + first := true + for i, sf := range names { + target := 1 << i + if target&f != 0 { + if !first { + builder.WriteByte('|') + } else { + first = false + } + builder.WriteString(sf) + } + } + return builder.String() +} diff --git a/vendor/golang.org/x/image/riff/riff.go b/vendor/golang.org/x/image/riff/riff.go deleted file mode 100644 index 38dc0e568..000000000 --- a/vendor/golang.org/x/image/riff/riff.go +++ /dev/null @@ -1,193 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package riff implements the Resource Interchange File Format, used by media -// formats such as AVI, WAVE and WEBP. -// -// A RIFF stream contains a sequence of chunks. Each chunk consists of an 8-byte -// header (containing a 4-byte chunk type and a 4-byte chunk length), the chunk -// data (presented as an io.Reader), and some padding bytes. -// -// A detailed description of the format is at -// http://www.tactilemedia.com/info/MCI_Control_Info.html -package riff // import "golang.org/x/image/riff" - -import ( - "errors" - "io" - "io/ioutil" - "math" -) - -var ( - errMissingPaddingByte = errors.New("riff: missing padding byte") - errMissingRIFFChunkHeader = errors.New("riff: missing RIFF chunk header") - errListSubchunkTooLong = errors.New("riff: list subchunk too long") - errShortChunkData = errors.New("riff: short chunk data") - errShortChunkHeader = errors.New("riff: short chunk header") - errStaleReader = errors.New("riff: stale reader") -) - -// u32 decodes the first four bytes of b as a little-endian integer. -func u32(b []byte) uint32 { - return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 -} - -const chunkHeaderSize = 8 - -// FourCC is a four character code. -type FourCC [4]byte - -// LIST is the "LIST" FourCC. -var LIST = FourCC{'L', 'I', 'S', 'T'} - -// NewReader returns the RIFF stream's form type, such as "AVI " or "WAVE", and -// its chunks as a *Reader. -func NewReader(r io.Reader) (formType FourCC, data *Reader, err error) { - var buf [chunkHeaderSize]byte - if _, err := io.ReadFull(r, buf[:]); err != nil { - if err == io.EOF || err == io.ErrUnexpectedEOF { - err = errMissingRIFFChunkHeader - } - return FourCC{}, nil, err - } - if buf[0] != 'R' || buf[1] != 'I' || buf[2] != 'F' || buf[3] != 'F' { - return FourCC{}, nil, errMissingRIFFChunkHeader - } - return NewListReader(u32(buf[4:]), r) -} - -// NewListReader returns a LIST chunk's list type, such as "movi" or "wavl", -// and its chunks as a *Reader. -func NewListReader(chunkLen uint32, chunkData io.Reader) (listType FourCC, data *Reader, err error) { - if chunkLen < 4 { - return FourCC{}, nil, errShortChunkData - } - z := &Reader{r: chunkData} - if _, err := io.ReadFull(chunkData, z.buf[:4]); err != nil { - if err == io.EOF || err == io.ErrUnexpectedEOF { - err = errShortChunkData - } - return FourCC{}, nil, err - } - z.totalLen = chunkLen - 4 - return FourCC{z.buf[0], z.buf[1], z.buf[2], z.buf[3]}, z, nil -} - -// Reader reads chunks from an underlying io.Reader. -type Reader struct { - r io.Reader - err error - - totalLen uint32 - chunkLen uint32 - - chunkReader *chunkReader - buf [chunkHeaderSize]byte - padded bool -} - -// Next returns the next chunk's ID, length and data. It returns io.EOF if there -// are no more chunks. The io.Reader returned becomes stale after the next Next -// call, and should no longer be used. -// -// It is valid to call Next even if all of the previous chunk's data has not -// been read. -func (z *Reader) Next() (chunkID FourCC, chunkLen uint32, chunkData io.Reader, err error) { - if z.err != nil { - return FourCC{}, 0, nil, z.err - } - - // Drain the rest of the previous chunk. - if z.chunkLen != 0 { - want := z.chunkLen - var got int64 - got, z.err = io.Copy(ioutil.Discard, z.chunkReader) - if z.err == nil && uint32(got) != want { - z.err = errShortChunkData - } - if z.err != nil { - return FourCC{}, 0, nil, z.err - } - } - z.chunkReader = nil - if z.padded { - if z.totalLen == 0 { - z.err = errListSubchunkTooLong - return FourCC{}, 0, nil, z.err - } - z.totalLen-- - _, z.err = io.ReadFull(z.r, z.buf[:1]) - if z.err != nil { - if z.err == io.EOF { - z.err = errMissingPaddingByte - } - return FourCC{}, 0, nil, z.err - } - } - - // We are done if we have no more data. - if z.totalLen == 0 { - z.err = io.EOF - return FourCC{}, 0, nil, z.err - } - - // Read the next chunk header. - if z.totalLen < chunkHeaderSize { - z.err = errShortChunkHeader - return FourCC{}, 0, nil, z.err - } - z.totalLen -= chunkHeaderSize - if _, z.err = io.ReadFull(z.r, z.buf[:chunkHeaderSize]); z.err != nil { - if z.err == io.EOF || z.err == io.ErrUnexpectedEOF { - z.err = errShortChunkHeader - } - return FourCC{}, 0, nil, z.err - } - chunkID = FourCC{z.buf[0], z.buf[1], z.buf[2], z.buf[3]} - z.chunkLen = u32(z.buf[4:]) - if z.chunkLen > z.totalLen { - z.err = errListSubchunkTooLong - return FourCC{}, 0, nil, z.err - } - z.padded = z.chunkLen&1 == 1 - z.chunkReader = &chunkReader{z} - return chunkID, z.chunkLen, z.chunkReader, nil -} - -type chunkReader struct { - z *Reader -} - -func (c *chunkReader) Read(p []byte) (int, error) { - if c != c.z.chunkReader { - return 0, errStaleReader - } - z := c.z - if z.err != nil { - if z.err == io.EOF { - return 0, errStaleReader - } - return 0, z.err - } - - n := int(z.chunkLen) - if n == 0 { - return 0, io.EOF - } - if n < 0 { - // Converting uint32 to int overflowed. - n = math.MaxInt32 - } - if n > len(p) { - n = len(p) - } - n, err := z.r.Read(p[:n]) - z.totalLen -= uint32(n) - z.chunkLen -= uint32(n) - if err != io.EOF { - z.err = err - } - return n, err -} diff --git a/vendor/golang.org/x/image/vp8/decode.go b/vendor/golang.org/x/image/vp8/decode.go deleted file mode 100644 index 2aa9fee03..000000000 --- a/vendor/golang.org/x/image/vp8/decode.go +++ /dev/null @@ -1,403 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package vp8 implements a decoder for the VP8 lossy image format. -// -// The VP8 specification is RFC 6386. -package vp8 // import "golang.org/x/image/vp8" - -// This file implements the top-level decoding algorithm. - -import ( - "errors" - "image" - "io" -) - -// limitReader wraps an io.Reader to read at most n bytes from it. -type limitReader struct { - r io.Reader - n int -} - -// ReadFull reads exactly len(p) bytes into p. -func (r *limitReader) ReadFull(p []byte) error { - if len(p) > r.n { - return io.ErrUnexpectedEOF - } - n, err := io.ReadFull(r.r, p) - r.n -= n - return err -} - -// FrameHeader is a frame header, as specified in section 9.1. -type FrameHeader struct { - KeyFrame bool - VersionNumber uint8 - ShowFrame bool - FirstPartitionLen uint32 - Width int - Height int - XScale uint8 - YScale uint8 -} - -const ( - nSegment = 4 - nSegmentProb = 3 -) - -// segmentHeader holds segment-related header information. -type segmentHeader struct { - useSegment bool - updateMap bool - relativeDelta bool - quantizer [nSegment]int8 - filterStrength [nSegment]int8 - prob [nSegmentProb]uint8 -} - -const ( - nRefLFDelta = 4 - nModeLFDelta = 4 -) - -// filterHeader holds filter-related header information. -type filterHeader struct { - simple bool - level int8 - sharpness uint8 - useLFDelta bool - refLFDelta [nRefLFDelta]int8 - modeLFDelta [nModeLFDelta]int8 - perSegmentLevel [nSegment]int8 -} - -// mb is the per-macroblock decode state. A decoder maintains mbw+1 of these -// as it is decoding macroblocks left-to-right and top-to-bottom: mbw for the -// macroblocks in the row above, and one for the macroblock to the left. -type mb struct { - // pred is the predictor mode for the 4 bottom or right 4x4 luma regions. - pred [4]uint8 - // nzMask is a mask of 8 bits: 4 for the bottom or right 4x4 luma regions, - // and 2 + 2 for the bottom or right 4x4 chroma regions. A 1 bit indicates - // that region has non-zero coefficients. - nzMask uint8 - // nzY16 is a 0/1 value that is 1 if the macroblock used Y16 prediction and - // had non-zero coefficients. - nzY16 uint8 -} - -// Decoder decodes VP8 bitstreams into frames. Decoding one frame consists of -// calling Init, DecodeFrameHeader and then DecodeFrame in that order. -// A Decoder can be re-used to decode multiple frames. -type Decoder struct { - // r is the input bitsream. - r limitReader - // scratch is a scratch buffer. - scratch [8]byte - // img is the YCbCr image to decode into. - img *image.YCbCr - // mbw and mbh are the number of 16x16 macroblocks wide and high the image is. - mbw, mbh int - // frameHeader is the frame header. When decoding multiple frames, - // frames that aren't key frames will inherit the Width, Height, - // XScale and YScale of the most recent key frame. - frameHeader FrameHeader - // Other headers. - segmentHeader segmentHeader - filterHeader filterHeader - // The image data is divided into a number of independent partitions. - // There is 1 "first partition" and between 1 and 8 "other partitions" - // for coefficient data. - fp partition - op [8]partition - nOP int - // Quantization factors. - quant [nSegment]quant - // DCT/WHT coefficient decoding probabilities. - tokenProb [nPlane][nBand][nContext][nProb]uint8 - useSkipProb bool - skipProb uint8 - // Loop filter parameters. - filterParams [nSegment][2]filterParam - perMBFilterParams []filterParam - - // The eight fields below relate to the current macroblock being decoded. - // - // Segment-based adjustments. - segment int - // Per-macroblock state for the macroblock immediately left of and those - // macroblocks immediately above the current macroblock. - leftMB mb - upMB []mb - // Bitmasks for which 4x4 regions of coeff contain non-zero coefficients. - nzDCMask, nzACMask uint32 - // Predictor modes. - usePredY16 bool // The libwebp C code calls this !is_i4x4_. - predY16 uint8 - predC8 uint8 - predY4 [4][4]uint8 - - // The two fields below form a workspace for reconstructing a macroblock. - // Their specific sizes are documented in reconstruct.go. - coeff [1*16*16 + 2*8*8 + 1*4*4]int16 - ybr [1 + 16 + 1 + 8][32]uint8 -} - -// NewDecoder returns a new Decoder. -func NewDecoder() *Decoder { - return &Decoder{} -} - -// Init initializes the decoder to read at most n bytes from r. -func (d *Decoder) Init(r io.Reader, n int) { - d.r = limitReader{r, n} -} - -// DecodeFrameHeader decodes the frame header. -func (d *Decoder) DecodeFrameHeader() (fh FrameHeader, err error) { - // All frame headers are at least 3 bytes long. - b := d.scratch[:3] - if err = d.r.ReadFull(b); err != nil { - return - } - d.frameHeader.KeyFrame = (b[0] & 1) == 0 - d.frameHeader.VersionNumber = (b[0] >> 1) & 7 - d.frameHeader.ShowFrame = (b[0]>>4)&1 == 1 - d.frameHeader.FirstPartitionLen = uint32(b[0])>>5 | uint32(b[1])<<3 | uint32(b[2])<<11 - if !d.frameHeader.KeyFrame { - return d.frameHeader, nil - } - // Frame headers for key frames are an additional 7 bytes long. - b = d.scratch[:7] - if err = d.r.ReadFull(b); err != nil { - return - } - // Check the magic sync code. - if b[0] != 0x9d || b[1] != 0x01 || b[2] != 0x2a { - err = errors.New("vp8: invalid format") - return - } - d.frameHeader.Width = int(b[4]&0x3f)<<8 | int(b[3]) - d.frameHeader.Height = int(b[6]&0x3f)<<8 | int(b[5]) - d.frameHeader.XScale = b[4] >> 6 - d.frameHeader.YScale = b[6] >> 6 - d.mbw = (d.frameHeader.Width + 0x0f) >> 4 - d.mbh = (d.frameHeader.Height + 0x0f) >> 4 - d.segmentHeader = segmentHeader{ - prob: [3]uint8{0xff, 0xff, 0xff}, - } - d.tokenProb = defaultTokenProb - d.segment = 0 - return d.frameHeader, nil -} - -// ensureImg ensures that d.img is large enough to hold the decoded frame. -func (d *Decoder) ensureImg() { - if d.img != nil { - p0, p1 := d.img.Rect.Min, d.img.Rect.Max - if p0.X == 0 && p0.Y == 0 && p1.X >= 16*d.mbw && p1.Y >= 16*d.mbh { - return - } - } - m := image.NewYCbCr(image.Rect(0, 0, 16*d.mbw, 16*d.mbh), image.YCbCrSubsampleRatio420) - d.img = m.SubImage(image.Rect(0, 0, d.frameHeader.Width, d.frameHeader.Height)).(*image.YCbCr) - d.perMBFilterParams = make([]filterParam, d.mbw*d.mbh) - d.upMB = make([]mb, d.mbw) -} - -// parseSegmentHeader parses the segment header, as specified in section 9.3. -func (d *Decoder) parseSegmentHeader() { - d.segmentHeader.useSegment = d.fp.readBit(uniformProb) - if !d.segmentHeader.useSegment { - d.segmentHeader.updateMap = false - return - } - d.segmentHeader.updateMap = d.fp.readBit(uniformProb) - if d.fp.readBit(uniformProb) { - d.segmentHeader.relativeDelta = !d.fp.readBit(uniformProb) - for i := range d.segmentHeader.quantizer { - d.segmentHeader.quantizer[i] = int8(d.fp.readOptionalInt(uniformProb, 7)) - } - for i := range d.segmentHeader.filterStrength { - d.segmentHeader.filterStrength[i] = int8(d.fp.readOptionalInt(uniformProb, 6)) - } - } - if !d.segmentHeader.updateMap { - return - } - for i := range d.segmentHeader.prob { - if d.fp.readBit(uniformProb) { - d.segmentHeader.prob[i] = uint8(d.fp.readUint(uniformProb, 8)) - } else { - d.segmentHeader.prob[i] = 0xff - } - } -} - -// parseFilterHeader parses the filter header, as specified in section 9.4. -func (d *Decoder) parseFilterHeader() { - d.filterHeader.simple = d.fp.readBit(uniformProb) - d.filterHeader.level = int8(d.fp.readUint(uniformProb, 6)) - d.filterHeader.sharpness = uint8(d.fp.readUint(uniformProb, 3)) - d.filterHeader.useLFDelta = d.fp.readBit(uniformProb) - if d.filterHeader.useLFDelta && d.fp.readBit(uniformProb) { - for i := range d.filterHeader.refLFDelta { - d.filterHeader.refLFDelta[i] = int8(d.fp.readOptionalInt(uniformProb, 6)) - } - for i := range d.filterHeader.modeLFDelta { - d.filterHeader.modeLFDelta[i] = int8(d.fp.readOptionalInt(uniformProb, 6)) - } - } - if d.filterHeader.level == 0 { - return - } - if d.segmentHeader.useSegment { - for i := range d.filterHeader.perSegmentLevel { - strength := d.segmentHeader.filterStrength[i] - if d.segmentHeader.relativeDelta { - strength += d.filterHeader.level - } - d.filterHeader.perSegmentLevel[i] = strength - } - } else { - d.filterHeader.perSegmentLevel[0] = d.filterHeader.level - } - d.computeFilterParams() -} - -// parseOtherPartitions parses the other partitions, as specified in section 9.5. -func (d *Decoder) parseOtherPartitions() error { - const maxNOP = 1 << 3 - var partLens [maxNOP]int - d.nOP = 1 << d.fp.readUint(uniformProb, 2) - - // The final partition length is implied by the remaining chunk data - // (d.r.n) and the other d.nOP-1 partition lengths. Those d.nOP-1 partition - // lengths are stored as 24-bit uints, i.e. up to 16 MiB per partition. - n := 3 * (d.nOP - 1) - partLens[d.nOP-1] = d.r.n - n - if partLens[d.nOP-1] < 0 { - return io.ErrUnexpectedEOF - } - if n > 0 { - buf := make([]byte, n) - if err := d.r.ReadFull(buf); err != nil { - return err - } - for i := 0; i < d.nOP-1; i++ { - pl := int(buf[3*i+0]) | int(buf[3*i+1])<<8 | int(buf[3*i+2])<<16 - if pl > partLens[d.nOP-1] { - return io.ErrUnexpectedEOF - } - partLens[i] = pl - partLens[d.nOP-1] -= pl - } - } - - // We check if the final partition length can also fit into a 24-bit uint. - // Strictly speaking, this isn't part of the spec, but it guards against a - // malicious WEBP image that is too large to ReadFull the encoded DCT - // coefficients into memory, whether that's because the actual WEBP file is - // too large, or whether its RIFF metadata lists too large a chunk. - if 1<<24 <= partLens[d.nOP-1] { - return errors.New("vp8: too much data to decode") - } - - buf := make([]byte, d.r.n) - if err := d.r.ReadFull(buf); err != nil { - return err - } - for i, pl := range partLens { - if i == d.nOP { - break - } - d.op[i].init(buf[:pl]) - buf = buf[pl:] - } - return nil -} - -// parseOtherHeaders parses header information other than the frame header. -func (d *Decoder) parseOtherHeaders() error { - // Initialize and parse the first partition. - firstPartition := make([]byte, d.frameHeader.FirstPartitionLen) - if err := d.r.ReadFull(firstPartition); err != nil { - return err - } - d.fp.init(firstPartition) - if d.frameHeader.KeyFrame { - // Read and ignore the color space and pixel clamp values. They are - // specified in section 9.2, but are unimplemented. - d.fp.readBit(uniformProb) - d.fp.readBit(uniformProb) - } - d.parseSegmentHeader() - d.parseFilterHeader() - if err := d.parseOtherPartitions(); err != nil { - return err - } - d.parseQuant() - if !d.frameHeader.KeyFrame { - // Golden and AltRef frames are specified in section 9.7. - // TODO(nigeltao): implement. Note that they are only used for video, not still images. - return errors.New("vp8: Golden / AltRef frames are not implemented") - } - // Read and ignore the refreshLastFrameBuffer bit, specified in section 9.8. - // It applies only to video, and not still images. - d.fp.readBit(uniformProb) - d.parseTokenProb() - d.useSkipProb = d.fp.readBit(uniformProb) - if d.useSkipProb { - d.skipProb = uint8(d.fp.readUint(uniformProb, 8)) - } - if d.fp.unexpectedEOF { - return io.ErrUnexpectedEOF - } - return nil -} - -// DecodeFrame decodes the frame and returns it as an YCbCr image. -// The image's contents are valid up until the next call to Decoder.Init. -func (d *Decoder) DecodeFrame() (*image.YCbCr, error) { - d.ensureImg() - if err := d.parseOtherHeaders(); err != nil { - return nil, err - } - // Reconstruct the rows. - for mbx := 0; mbx < d.mbw; mbx++ { - d.upMB[mbx] = mb{} - } - for mby := 0; mby < d.mbh; mby++ { - d.leftMB = mb{} - for mbx := 0; mbx < d.mbw; mbx++ { - skip := d.reconstruct(mbx, mby) - fs := d.filterParams[d.segment][btou(!d.usePredY16)] - fs.inner = fs.inner || !skip - d.perMBFilterParams[d.mbw*mby+mbx] = fs - } - } - if d.fp.unexpectedEOF { - return nil, io.ErrUnexpectedEOF - } - for i := 0; i < d.nOP; i++ { - if d.op[i].unexpectedEOF { - return nil, io.ErrUnexpectedEOF - } - } - // Apply the loop filter. - // - // Even if we are using per-segment levels, section 15 says that "loop - // filtering must be skipped entirely if loop_filter_level at either the - // frame header level or macroblock override level is 0". - if d.filterHeader.level != 0 { - if d.filterHeader.simple { - d.simpleFilter() - } else { - d.normalFilter() - } - } - return d.img, nil -} diff --git a/vendor/golang.org/x/image/vp8/filter.go b/vendor/golang.org/x/image/vp8/filter.go deleted file mode 100644 index e34a811b1..000000000 --- a/vendor/golang.org/x/image/vp8/filter.go +++ /dev/null @@ -1,273 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package vp8 - -// filter2 modifies a 2-pixel wide or 2-pixel high band along an edge. -func filter2(pix []byte, level, index, iStep, jStep int) { - for n := 16; n > 0; n, index = n-1, index+iStep { - p1 := int(pix[index-2*jStep]) - p0 := int(pix[index-1*jStep]) - q0 := int(pix[index+0*jStep]) - q1 := int(pix[index+1*jStep]) - if abs(p0-q0)<<1+abs(p1-q1)>>1 > level { - continue - } - a := 3*(q0-p0) + clamp127(p1-q1) - a1 := clamp15((a + 4) >> 3) - a2 := clamp15((a + 3) >> 3) - pix[index-1*jStep] = clamp255(p0 + a2) - pix[index+0*jStep] = clamp255(q0 - a1) - } -} - -// filter246 modifies a 2-, 4- or 6-pixel wide or high band along an edge. -func filter246(pix []byte, n, level, ilevel, hlevel, index, iStep, jStep int, fourNotSix bool) { - for ; n > 0; n, index = n-1, index+iStep { - p3 := int(pix[index-4*jStep]) - p2 := int(pix[index-3*jStep]) - p1 := int(pix[index-2*jStep]) - p0 := int(pix[index-1*jStep]) - q0 := int(pix[index+0*jStep]) - q1 := int(pix[index+1*jStep]) - q2 := int(pix[index+2*jStep]) - q3 := int(pix[index+3*jStep]) - if abs(p0-q0)<<1+abs(p1-q1)>>1 > level { - continue - } - if abs(p3-p2) > ilevel || - abs(p2-p1) > ilevel || - abs(p1-p0) > ilevel || - abs(q1-q0) > ilevel || - abs(q2-q1) > ilevel || - abs(q3-q2) > ilevel { - continue - } - if abs(p1-p0) > hlevel || abs(q1-q0) > hlevel { - // Filter 2 pixels. - a := 3*(q0-p0) + clamp127(p1-q1) - a1 := clamp15((a + 4) >> 3) - a2 := clamp15((a + 3) >> 3) - pix[index-1*jStep] = clamp255(p0 + a2) - pix[index+0*jStep] = clamp255(q0 - a1) - } else if fourNotSix { - // Filter 4 pixels. - a := 3 * (q0 - p0) - a1 := clamp15((a + 4) >> 3) - a2 := clamp15((a + 3) >> 3) - a3 := (a1 + 1) >> 1 - pix[index-2*jStep] = clamp255(p1 + a3) - pix[index-1*jStep] = clamp255(p0 + a2) - pix[index+0*jStep] = clamp255(q0 - a1) - pix[index+1*jStep] = clamp255(q1 - a3) - } else { - // Filter 6 pixels. - a := clamp127(3*(q0-p0) + clamp127(p1-q1)) - a1 := (27*a + 63) >> 7 - a2 := (18*a + 63) >> 7 - a3 := (9*a + 63) >> 7 - pix[index-3*jStep] = clamp255(p2 + a3) - pix[index-2*jStep] = clamp255(p1 + a2) - pix[index-1*jStep] = clamp255(p0 + a1) - pix[index+0*jStep] = clamp255(q0 - a1) - pix[index+1*jStep] = clamp255(q1 - a2) - pix[index+2*jStep] = clamp255(q2 - a3) - } - } -} - -// simpleFilter implements the simple filter, as specified in section 15.2. -func (d *Decoder) simpleFilter() { - for mby := 0; mby < d.mbh; mby++ { - for mbx := 0; mbx < d.mbw; mbx++ { - f := d.perMBFilterParams[d.mbw*mby+mbx] - if f.level == 0 { - continue - } - l := int(f.level) - yIndex := (mby*d.img.YStride + mbx) * 16 - if mbx > 0 { - filter2(d.img.Y, l+4, yIndex, d.img.YStride, 1) - } - if f.inner { - filter2(d.img.Y, l, yIndex+0x4, d.img.YStride, 1) - filter2(d.img.Y, l, yIndex+0x8, d.img.YStride, 1) - filter2(d.img.Y, l, yIndex+0xc, d.img.YStride, 1) - } - if mby > 0 { - filter2(d.img.Y, l+4, yIndex, 1, d.img.YStride) - } - if f.inner { - filter2(d.img.Y, l, yIndex+d.img.YStride*0x4, 1, d.img.YStride) - filter2(d.img.Y, l, yIndex+d.img.YStride*0x8, 1, d.img.YStride) - filter2(d.img.Y, l, yIndex+d.img.YStride*0xc, 1, d.img.YStride) - } - } - } -} - -// normalFilter implements the normal filter, as specified in section 15.3. -func (d *Decoder) normalFilter() { - for mby := 0; mby < d.mbh; mby++ { - for mbx := 0; mbx < d.mbw; mbx++ { - f := d.perMBFilterParams[d.mbw*mby+mbx] - if f.level == 0 { - continue - } - l, il, hl := int(f.level), int(f.ilevel), int(f.hlevel) - yIndex := (mby*d.img.YStride + mbx) * 16 - cIndex := (mby*d.img.CStride + mbx) * 8 - if mbx > 0 { - filter246(d.img.Y, 16, l+4, il, hl, yIndex, d.img.YStride, 1, false) - filter246(d.img.Cb, 8, l+4, il, hl, cIndex, d.img.CStride, 1, false) - filter246(d.img.Cr, 8, l+4, il, hl, cIndex, d.img.CStride, 1, false) - } - if f.inner { - filter246(d.img.Y, 16, l, il, hl, yIndex+0x4, d.img.YStride, 1, true) - filter246(d.img.Y, 16, l, il, hl, yIndex+0x8, d.img.YStride, 1, true) - filter246(d.img.Y, 16, l, il, hl, yIndex+0xc, d.img.YStride, 1, true) - filter246(d.img.Cb, 8, l, il, hl, cIndex+0x4, d.img.CStride, 1, true) - filter246(d.img.Cr, 8, l, il, hl, cIndex+0x4, d.img.CStride, 1, true) - } - if mby > 0 { - filter246(d.img.Y, 16, l+4, il, hl, yIndex, 1, d.img.YStride, false) - filter246(d.img.Cb, 8, l+4, il, hl, cIndex, 1, d.img.CStride, false) - filter246(d.img.Cr, 8, l+4, il, hl, cIndex, 1, d.img.CStride, false) - } - if f.inner { - filter246(d.img.Y, 16, l, il, hl, yIndex+d.img.YStride*0x4, 1, d.img.YStride, true) - filter246(d.img.Y, 16, l, il, hl, yIndex+d.img.YStride*0x8, 1, d.img.YStride, true) - filter246(d.img.Y, 16, l, il, hl, yIndex+d.img.YStride*0xc, 1, d.img.YStride, true) - filter246(d.img.Cb, 8, l, il, hl, cIndex+d.img.CStride*0x4, 1, d.img.CStride, true) - filter246(d.img.Cr, 8, l, il, hl, cIndex+d.img.CStride*0x4, 1, d.img.CStride, true) - } - } - } -} - -// filterParam holds the loop filter parameters for a macroblock. -type filterParam struct { - // The first three fields are thresholds used by the loop filter to smooth - // over the edges and interior of a macroblock. level is used by both the - // simple and normal filters. The inner level and high edge variance level - // are only used by the normal filter. - level, ilevel, hlevel uint8 - // inner is whether the inner loop filter cannot be optimized out as a - // no-op for this particular macroblock. - inner bool -} - -// computeFilterParams computes the loop filter parameters, as specified in -// section 15.4. -func (d *Decoder) computeFilterParams() { - for i := range d.filterParams { - baseLevel := d.filterHeader.level - if d.segmentHeader.useSegment { - baseLevel = d.segmentHeader.filterStrength[i] - if d.segmentHeader.relativeDelta { - baseLevel += d.filterHeader.level - } - } - - for j := range d.filterParams[i] { - p := &d.filterParams[i][j] - p.inner = j != 0 - level := baseLevel - if d.filterHeader.useLFDelta { - // The libwebp C code has a "TODO: only CURRENT is handled for now." - level += d.filterHeader.refLFDelta[0] - if j != 0 { - level += d.filterHeader.modeLFDelta[0] - } - } - if level <= 0 { - p.level = 0 - continue - } - if level > 63 { - level = 63 - } - ilevel := level - if d.filterHeader.sharpness > 0 { - if d.filterHeader.sharpness > 4 { - ilevel >>= 2 - } else { - ilevel >>= 1 - } - if x := int8(9 - d.filterHeader.sharpness); ilevel > x { - ilevel = x - } - } - if ilevel < 1 { - ilevel = 1 - } - p.ilevel = uint8(ilevel) - p.level = uint8(2*level + ilevel) - if d.frameHeader.KeyFrame { - if level < 15 { - p.hlevel = 0 - } else if level < 40 { - p.hlevel = 1 - } else { - p.hlevel = 2 - } - } else { - if level < 15 { - p.hlevel = 0 - } else if level < 20 { - p.hlevel = 1 - } else if level < 40 { - p.hlevel = 2 - } else { - p.hlevel = 3 - } - } - } - } -} - -// intSize is either 32 or 64. -const intSize = 32 << (^uint(0) >> 63) - -func abs(x int) int { - // m := -1 if x < 0. m := 0 otherwise. - m := x >> (intSize - 1) - - // In two's complement representation, the negative number - // of any number (except the smallest one) can be computed - // by flipping all the bits and add 1. This is faster than - // code with a branch. - // See Hacker's Delight, section 2-4. - return (x ^ m) - m -} - -func clamp15(x int) int { - if x < -16 { - return -16 - } - if x > 15 { - return 15 - } - return x -} - -func clamp127(x int) int { - if x < -128 { - return -128 - } - if x > 127 { - return 127 - } - return x -} - -func clamp255(x int) uint8 { - if x < 0 { - return 0 - } - if x > 255 { - return 255 - } - return uint8(x) -} diff --git a/vendor/golang.org/x/image/vp8/idct.go b/vendor/golang.org/x/image/vp8/idct.go deleted file mode 100644 index 929af2cc9..000000000 --- a/vendor/golang.org/x/image/vp8/idct.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package vp8 - -// This file implements the inverse Discrete Cosine Transform and the inverse -// Walsh Hadamard Transform (WHT), as specified in sections 14.3 and 14.4. - -func clip8(i int32) uint8 { - if i < 0 { - return 0 - } - if i > 255 { - return 255 - } - return uint8(i) -} - -func (z *Decoder) inverseDCT4(y, x, coeffBase int) { - const ( - c1 = 85627 // 65536 * cos(pi/8) * sqrt(2). - c2 = 35468 // 65536 * sin(pi/8) * sqrt(2). - ) - var m [4][4]int32 - for i := 0; i < 4; i++ { - a := int32(z.coeff[coeffBase+0]) + int32(z.coeff[coeffBase+8]) - b := int32(z.coeff[coeffBase+0]) - int32(z.coeff[coeffBase+8]) - c := (int32(z.coeff[coeffBase+4])*c2)>>16 - (int32(z.coeff[coeffBase+12])*c1)>>16 - d := (int32(z.coeff[coeffBase+4])*c1)>>16 + (int32(z.coeff[coeffBase+12])*c2)>>16 - m[i][0] = a + d - m[i][1] = b + c - m[i][2] = b - c - m[i][3] = a - d - coeffBase++ - } - for j := 0; j < 4; j++ { - dc := m[0][j] + 4 - a := dc + m[2][j] - b := dc - m[2][j] - c := (m[1][j]*c2)>>16 - (m[3][j]*c1)>>16 - d := (m[1][j]*c1)>>16 + (m[3][j]*c2)>>16 - z.ybr[y+j][x+0] = clip8(int32(z.ybr[y+j][x+0]) + (a+d)>>3) - z.ybr[y+j][x+1] = clip8(int32(z.ybr[y+j][x+1]) + (b+c)>>3) - z.ybr[y+j][x+2] = clip8(int32(z.ybr[y+j][x+2]) + (b-c)>>3) - z.ybr[y+j][x+3] = clip8(int32(z.ybr[y+j][x+3]) + (a-d)>>3) - } -} - -func (z *Decoder) inverseDCT4DCOnly(y, x, coeffBase int) { - dc := (int32(z.coeff[coeffBase+0]) + 4) >> 3 - for j := 0; j < 4; j++ { - for i := 0; i < 4; i++ { - z.ybr[y+j][x+i] = clip8(int32(z.ybr[y+j][x+i]) + dc) - } - } -} - -func (z *Decoder) inverseDCT8(y, x, coeffBase int) { - z.inverseDCT4(y+0, x+0, coeffBase+0*16) - z.inverseDCT4(y+0, x+4, coeffBase+1*16) - z.inverseDCT4(y+4, x+0, coeffBase+2*16) - z.inverseDCT4(y+4, x+4, coeffBase+3*16) -} - -func (z *Decoder) inverseDCT8DCOnly(y, x, coeffBase int) { - z.inverseDCT4DCOnly(y+0, x+0, coeffBase+0*16) - z.inverseDCT4DCOnly(y+0, x+4, coeffBase+1*16) - z.inverseDCT4DCOnly(y+4, x+0, coeffBase+2*16) - z.inverseDCT4DCOnly(y+4, x+4, coeffBase+3*16) -} - -func (d *Decoder) inverseWHT16() { - var m [16]int32 - for i := 0; i < 4; i++ { - a0 := int32(d.coeff[384+0+i]) + int32(d.coeff[384+12+i]) - a1 := int32(d.coeff[384+4+i]) + int32(d.coeff[384+8+i]) - a2 := int32(d.coeff[384+4+i]) - int32(d.coeff[384+8+i]) - a3 := int32(d.coeff[384+0+i]) - int32(d.coeff[384+12+i]) - m[0+i] = a0 + a1 - m[8+i] = a0 - a1 - m[4+i] = a3 + a2 - m[12+i] = a3 - a2 - } - out := 0 - for i := 0; i < 4; i++ { - dc := m[0+i*4] + 3 - a0 := dc + m[3+i*4] - a1 := m[1+i*4] + m[2+i*4] - a2 := m[1+i*4] - m[2+i*4] - a3 := dc - m[3+i*4] - d.coeff[out+0] = int16((a0 + a1) >> 3) - d.coeff[out+16] = int16((a3 + a2) >> 3) - d.coeff[out+32] = int16((a0 - a1) >> 3) - d.coeff[out+48] = int16((a3 - a2) >> 3) - out += 64 - } -} diff --git a/vendor/golang.org/x/image/vp8/partition.go b/vendor/golang.org/x/image/vp8/partition.go deleted file mode 100644 index 72288bdeb..000000000 --- a/vendor/golang.org/x/image/vp8/partition.go +++ /dev/null @@ -1,129 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package vp8 - -// Each VP8 frame consists of between 2 and 9 bitstream partitions. -// Each partition is byte-aligned and is independently arithmetic-encoded. -// -// This file implements decoding a partition's bitstream, as specified in -// chapter 7. The implementation follows libwebp's approach instead of the -// specification's reference C implementation. For example, we use a look-up -// table instead of a for loop to recalibrate the encoded range. - -var ( - lutShift = [127]uint8{ - 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4, - 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - } - lutRangeM1 = [127]uint8{ - 127, - 127, 191, - 127, 159, 191, 223, - 127, 143, 159, 175, 191, 207, 223, 239, - 127, 135, 143, 151, 159, 167, 175, 183, 191, 199, 207, 215, 223, 231, 239, 247, - 127, 131, 135, 139, 143, 147, 151, 155, 159, 163, 167, 171, 175, 179, 183, 187, - 191, 195, 199, 203, 207, 211, 215, 219, 223, 227, 231, 235, 239, 243, 247, 251, - 127, 129, 131, 133, 135, 137, 139, 141, 143, 145, 147, 149, 151, 153, 155, 157, - 159, 161, 163, 165, 167, 169, 171, 173, 175, 177, 179, 181, 183, 185, 187, 189, - 191, 193, 195, 197, 199, 201, 203, 205, 207, 209, 211, 213, 215, 217, 219, 221, - 223, 225, 227, 229, 231, 233, 235, 237, 239, 241, 243, 245, 247, 249, 251, 253, - } -) - -// uniformProb represents a 50% probability that the next bit is 0. -const uniformProb = 128 - -// partition holds arithmetic-coded bits. -type partition struct { - // buf is the input bytes. - buf []byte - // r is how many of buf's bytes have been consumed. - r int - // rangeM1 is range minus 1, where range is in the arithmetic coding sense, - // not the Go language sense. - rangeM1 uint32 - // bits and nBits hold those bits shifted out of buf but not yet consumed. - bits uint32 - nBits uint8 - // unexpectedEOF tells whether we tried to read past buf. - unexpectedEOF bool -} - -// init initializes the partition. -func (p *partition) init(buf []byte) { - p.buf = buf - p.r = 0 - p.rangeM1 = 254 - p.bits = 0 - p.nBits = 0 - p.unexpectedEOF = false -} - -// readBit returns the next bit. -func (p *partition) readBit(prob uint8) bool { - if p.nBits < 8 { - if p.r >= len(p.buf) { - p.unexpectedEOF = true - return false - } - // Expression split for 386 compiler. - x := uint32(p.buf[p.r]) - p.bits |= x << (8 - p.nBits) - p.r++ - p.nBits += 8 - } - split := (p.rangeM1*uint32(prob))>>8 + 1 - bit := p.bits >= split<<8 - if bit { - p.rangeM1 -= split - p.bits -= split << 8 - } else { - p.rangeM1 = split - 1 - } - if p.rangeM1 < 127 { - shift := lutShift[p.rangeM1] - p.rangeM1 = uint32(lutRangeM1[p.rangeM1]) - p.bits <<= shift - p.nBits -= shift - } - return bit -} - -// readUint returns the next n-bit unsigned integer. -func (p *partition) readUint(prob, n uint8) uint32 { - var u uint32 - for n > 0 { - n-- - if p.readBit(prob) { - u |= 1 << n - } - } - return u -} - -// readInt returns the next n-bit signed integer. -func (p *partition) readInt(prob, n uint8) int32 { - u := p.readUint(prob, n) - b := p.readBit(prob) - if b { - return -int32(u) - } - return int32(u) -} - -// readOptionalInt returns the next n-bit signed integer in an encoding -// where the likely result is zero. -func (p *partition) readOptionalInt(prob, n uint8) int32 { - if !p.readBit(prob) { - return 0 - } - return p.readInt(prob, n) -} diff --git a/vendor/golang.org/x/image/vp8/pred.go b/vendor/golang.org/x/image/vp8/pred.go deleted file mode 100644 index 58c2689ea..000000000 --- a/vendor/golang.org/x/image/vp8/pred.go +++ /dev/null @@ -1,201 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package vp8 - -// This file implements parsing the predictor modes, as specified in chapter -// 11. - -func (d *Decoder) parsePredModeY16(mbx int) { - var p uint8 - if !d.fp.readBit(156) { - if !d.fp.readBit(163) { - p = predDC - } else { - p = predVE - } - } else if !d.fp.readBit(128) { - p = predHE - } else { - p = predTM - } - for i := 0; i < 4; i++ { - d.upMB[mbx].pred[i] = p - d.leftMB.pred[i] = p - } - d.predY16 = p -} - -func (d *Decoder) parsePredModeC8() { - if !d.fp.readBit(142) { - d.predC8 = predDC - } else if !d.fp.readBit(114) { - d.predC8 = predVE - } else if !d.fp.readBit(183) { - d.predC8 = predHE - } else { - d.predC8 = predTM - } -} - -func (d *Decoder) parsePredModeY4(mbx int) { - for j := 0; j < 4; j++ { - p := d.leftMB.pred[j] - for i := 0; i < 4; i++ { - prob := &predProb[d.upMB[mbx].pred[i]][p] - if !d.fp.readBit(prob[0]) { - p = predDC - } else if !d.fp.readBit(prob[1]) { - p = predTM - } else if !d.fp.readBit(prob[2]) { - p = predVE - } else if !d.fp.readBit(prob[3]) { - if !d.fp.readBit(prob[4]) { - p = predHE - } else if !d.fp.readBit(prob[5]) { - p = predRD - } else { - p = predVR - } - } else if !d.fp.readBit(prob[6]) { - p = predLD - } else if !d.fp.readBit(prob[7]) { - p = predVL - } else if !d.fp.readBit(prob[8]) { - p = predHD - } else { - p = predHU - } - d.predY4[j][i] = p - d.upMB[mbx].pred[i] = p - } - d.leftMB.pred[j] = p - } -} - -// predProb are the probabilities to decode a 4x4 region's predictor mode given -// the predictor modes of the regions above and left of it. -// These values are specified in section 11.5. -var predProb = [nPred][nPred][9]uint8{ - { - {231, 120, 48, 89, 115, 113, 120, 152, 112}, - {152, 179, 64, 126, 170, 118, 46, 70, 95}, - {175, 69, 143, 80, 85, 82, 72, 155, 103}, - {56, 58, 10, 171, 218, 189, 17, 13, 152}, - {114, 26, 17, 163, 44, 195, 21, 10, 173}, - {121, 24, 80, 195, 26, 62, 44, 64, 85}, - {144, 71, 10, 38, 171, 213, 144, 34, 26}, - {170, 46, 55, 19, 136, 160, 33, 206, 71}, - {63, 20, 8, 114, 114, 208, 12, 9, 226}, - {81, 40, 11, 96, 182, 84, 29, 16, 36}, - }, - { - {134, 183, 89, 137, 98, 101, 106, 165, 148}, - {72, 187, 100, 130, 157, 111, 32, 75, 80}, - {66, 102, 167, 99, 74, 62, 40, 234, 128}, - {41, 53, 9, 178, 241, 141, 26, 8, 107}, - {74, 43, 26, 146, 73, 166, 49, 23, 157}, - {65, 38, 105, 160, 51, 52, 31, 115, 128}, - {104, 79, 12, 27, 217, 255, 87, 17, 7}, - {87, 68, 71, 44, 114, 51, 15, 186, 23}, - {47, 41, 14, 110, 182, 183, 21, 17, 194}, - {66, 45, 25, 102, 197, 189, 23, 18, 22}, - }, - { - {88, 88, 147, 150, 42, 46, 45, 196, 205}, - {43, 97, 183, 117, 85, 38, 35, 179, 61}, - {39, 53, 200, 87, 26, 21, 43, 232, 171}, - {56, 34, 51, 104, 114, 102, 29, 93, 77}, - {39, 28, 85, 171, 58, 165, 90, 98, 64}, - {34, 22, 116, 206, 23, 34, 43, 166, 73}, - {107, 54, 32, 26, 51, 1, 81, 43, 31}, - {68, 25, 106, 22, 64, 171, 36, 225, 114}, - {34, 19, 21, 102, 132, 188, 16, 76, 124}, - {62, 18, 78, 95, 85, 57, 50, 48, 51}, - }, - { - {193, 101, 35, 159, 215, 111, 89, 46, 111}, - {60, 148, 31, 172, 219, 228, 21, 18, 111}, - {112, 113, 77, 85, 179, 255, 38, 120, 114}, - {40, 42, 1, 196, 245, 209, 10, 25, 109}, - {88, 43, 29, 140, 166, 213, 37, 43, 154}, - {61, 63, 30, 155, 67, 45, 68, 1, 209}, - {100, 80, 8, 43, 154, 1, 51, 26, 71}, - {142, 78, 78, 16, 255, 128, 34, 197, 171}, - {41, 40, 5, 102, 211, 183, 4, 1, 221}, - {51, 50, 17, 168, 209, 192, 23, 25, 82}, - }, - { - {138, 31, 36, 171, 27, 166, 38, 44, 229}, - {67, 87, 58, 169, 82, 115, 26, 59, 179}, - {63, 59, 90, 180, 59, 166, 93, 73, 154}, - {40, 40, 21, 116, 143, 209, 34, 39, 175}, - {47, 15, 16, 183, 34, 223, 49, 45, 183}, - {46, 17, 33, 183, 6, 98, 15, 32, 183}, - {57, 46, 22, 24, 128, 1, 54, 17, 37}, - {65, 32, 73, 115, 28, 128, 23, 128, 205}, - {40, 3, 9, 115, 51, 192, 18, 6, 223}, - {87, 37, 9, 115, 59, 77, 64, 21, 47}, - }, - { - {104, 55, 44, 218, 9, 54, 53, 130, 226}, - {64, 90, 70, 205, 40, 41, 23, 26, 57}, - {54, 57, 112, 184, 5, 41, 38, 166, 213}, - {30, 34, 26, 133, 152, 116, 10, 32, 134}, - {39, 19, 53, 221, 26, 114, 32, 73, 255}, - {31, 9, 65, 234, 2, 15, 1, 118, 73}, - {75, 32, 12, 51, 192, 255, 160, 43, 51}, - {88, 31, 35, 67, 102, 85, 55, 186, 85}, - {56, 21, 23, 111, 59, 205, 45, 37, 192}, - {55, 38, 70, 124, 73, 102, 1, 34, 98}, - }, - { - {125, 98, 42, 88, 104, 85, 117, 175, 82}, - {95, 84, 53, 89, 128, 100, 113, 101, 45}, - {75, 79, 123, 47, 51, 128, 81, 171, 1}, - {57, 17, 5, 71, 102, 57, 53, 41, 49}, - {38, 33, 13, 121, 57, 73, 26, 1, 85}, - {41, 10, 67, 138, 77, 110, 90, 47, 114}, - {115, 21, 2, 10, 102, 255, 166, 23, 6}, - {101, 29, 16, 10, 85, 128, 101, 196, 26}, - {57, 18, 10, 102, 102, 213, 34, 20, 43}, - {117, 20, 15, 36, 163, 128, 68, 1, 26}, - }, - { - {102, 61, 71, 37, 34, 53, 31, 243, 192}, - {69, 60, 71, 38, 73, 119, 28, 222, 37}, - {68, 45, 128, 34, 1, 47, 11, 245, 171}, - {62, 17, 19, 70, 146, 85, 55, 62, 70}, - {37, 43, 37, 154, 100, 163, 85, 160, 1}, - {63, 9, 92, 136, 28, 64, 32, 201, 85}, - {75, 15, 9, 9, 64, 255, 184, 119, 16}, - {86, 6, 28, 5, 64, 255, 25, 248, 1}, - {56, 8, 17, 132, 137, 255, 55, 116, 128}, - {58, 15, 20, 82, 135, 57, 26, 121, 40}, - }, - { - {164, 50, 31, 137, 154, 133, 25, 35, 218}, - {51, 103, 44, 131, 131, 123, 31, 6, 158}, - {86, 40, 64, 135, 148, 224, 45, 183, 128}, - {22, 26, 17, 131, 240, 154, 14, 1, 209}, - {45, 16, 21, 91, 64, 222, 7, 1, 197}, - {56, 21, 39, 155, 60, 138, 23, 102, 213}, - {83, 12, 13, 54, 192, 255, 68, 47, 28}, - {85, 26, 85, 85, 128, 128, 32, 146, 171}, - {18, 11, 7, 63, 144, 171, 4, 4, 246}, - {35, 27, 10, 146, 174, 171, 12, 26, 128}, - }, - { - {190, 80, 35, 99, 180, 80, 126, 54, 45}, - {85, 126, 47, 87, 176, 51, 41, 20, 32}, - {101, 75, 128, 139, 118, 146, 116, 128, 85}, - {56, 41, 15, 176, 236, 85, 37, 9, 62}, - {71, 30, 17, 119, 118, 255, 17, 18, 138}, - {101, 38, 60, 138, 55, 70, 43, 26, 142}, - {146, 36, 19, 30, 171, 255, 97, 27, 20}, - {138, 45, 61, 62, 219, 1, 81, 188, 64}, - {32, 41, 20, 117, 151, 142, 20, 21, 163}, - {112, 19, 12, 61, 195, 128, 48, 4, 24}, - }, -} diff --git a/vendor/golang.org/x/image/vp8/predfunc.go b/vendor/golang.org/x/image/vp8/predfunc.go deleted file mode 100644 index f8999582b..000000000 --- a/vendor/golang.org/x/image/vp8/predfunc.go +++ /dev/null @@ -1,553 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package vp8 - -// This file implements the predicition functions, as specified in chapter 12. -// -// For each macroblock (of 1x16x16 luma and 2x8x8 chroma coefficients), the -// luma values are either predicted as one large 16x16 region or 16 separate -// 4x4 regions. The chroma values are always predicted as one 8x8 region. -// -// For 4x4 regions, the target block's predicted values (Xs) are a function of -// its previously-decoded top and left border values, as well as a number of -// pixels from the top-right: -// -// a b c d e f g h -// p X X X X -// q X X X X -// r X X X X -// s X X X X -// -// The predictor modes are: -// - DC: all Xs = (b + c + d + e + p + q + r + s + 4) / 8. -// - TM: the first X = (b + p - a), the second X = (c + p - a), and so on. -// - VE: each X = the weighted average of its column's top value and that -// value's neighbors, i.e. averages of abc, bcd, cde or def. -// - HE: similar to VE except rows instead of columns, and the final row is -// an average of r, s and s. -// - RD, VR, LD, VL, HD, HU: these diagonal modes ("Right Down", "Vertical -// Right", etc) are more complicated and are described in section 12.3. -// All Xs are clipped to the range [0, 255]. -// -// For 8x8 and 16x16 regions, the target block's predicted values are a -// function of the top and left border values without the top-right overhang, -// i.e. without the 8x8 or 16x16 equivalent of f, g and h. Furthermore: -// - There are no diagonal predictor modes, only DC, TM, VE and HE. -// - The DC mode has variants for macroblocks in the top row and/or left -// column, i.e. for macroblocks with mby == 0 || mbx == 0. -// - The VE and HE modes take only the column top or row left values; they do -// not smooth that top/left value with its neighbors. - -// nPred is the number of predictor modes, not including the Top/Left versions -// of the DC predictor mode. -const nPred = 10 - -const ( - predDC = iota - predTM - predVE - predHE - predRD - predVR - predLD - predVL - predHD - predHU - predDCTop - predDCLeft - predDCTopLeft -) - -func checkTopLeftPred(mbx, mby int, p uint8) uint8 { - if p != predDC { - return p - } - if mbx == 0 { - if mby == 0 { - return predDCTopLeft - } - return predDCLeft - } - if mby == 0 { - return predDCTop - } - return predDC -} - -var predFunc4 = [...]func(*Decoder, int, int){ - predFunc4DC, - predFunc4TM, - predFunc4VE, - predFunc4HE, - predFunc4RD, - predFunc4VR, - predFunc4LD, - predFunc4VL, - predFunc4HD, - predFunc4HU, - nil, - nil, - nil, -} - -var predFunc8 = [...]func(*Decoder, int, int){ - predFunc8DC, - predFunc8TM, - predFunc8VE, - predFunc8HE, - nil, - nil, - nil, - nil, - nil, - nil, - predFunc8DCTop, - predFunc8DCLeft, - predFunc8DCTopLeft, -} - -var predFunc16 = [...]func(*Decoder, int, int){ - predFunc16DC, - predFunc16TM, - predFunc16VE, - predFunc16HE, - nil, - nil, - nil, - nil, - nil, - nil, - predFunc16DCTop, - predFunc16DCLeft, - predFunc16DCTopLeft, -} - -func predFunc4DC(z *Decoder, y, x int) { - sum := uint32(4) - for i := 0; i < 4; i++ { - sum += uint32(z.ybr[y-1][x+i]) - } - for j := 0; j < 4; j++ { - sum += uint32(z.ybr[y+j][x-1]) - } - avg := uint8(sum / 8) - for j := 0; j < 4; j++ { - for i := 0; i < 4; i++ { - z.ybr[y+j][x+i] = avg - } - } -} - -func predFunc4TM(z *Decoder, y, x int) { - delta0 := -int32(z.ybr[y-1][x-1]) - for j := 0; j < 4; j++ { - delta1 := delta0 + int32(z.ybr[y+j][x-1]) - for i := 0; i < 4; i++ { - delta2 := delta1 + int32(z.ybr[y-1][x+i]) - z.ybr[y+j][x+i] = uint8(clip(delta2, 0, 255)) - } - } -} - -func predFunc4VE(z *Decoder, y, x int) { - a := int32(z.ybr[y-1][x-1]) - b := int32(z.ybr[y-1][x+0]) - c := int32(z.ybr[y-1][x+1]) - d := int32(z.ybr[y-1][x+2]) - e := int32(z.ybr[y-1][x+3]) - f := int32(z.ybr[y-1][x+4]) - abc := uint8((a + 2*b + c + 2) / 4) - bcd := uint8((b + 2*c + d + 2) / 4) - cde := uint8((c + 2*d + e + 2) / 4) - def := uint8((d + 2*e + f + 2) / 4) - for j := 0; j < 4; j++ { - z.ybr[y+j][x+0] = abc - z.ybr[y+j][x+1] = bcd - z.ybr[y+j][x+2] = cde - z.ybr[y+j][x+3] = def - } -} - -func predFunc4HE(z *Decoder, y, x int) { - s := int32(z.ybr[y+3][x-1]) - r := int32(z.ybr[y+2][x-1]) - q := int32(z.ybr[y+1][x-1]) - p := int32(z.ybr[y+0][x-1]) - a := int32(z.ybr[y-1][x-1]) - ssr := uint8((s + 2*s + r + 2) / 4) - srq := uint8((s + 2*r + q + 2) / 4) - rqp := uint8((r + 2*q + p + 2) / 4) - apq := uint8((a + 2*p + q + 2) / 4) - for i := 0; i < 4; i++ { - z.ybr[y+0][x+i] = apq - z.ybr[y+1][x+i] = rqp - z.ybr[y+2][x+i] = srq - z.ybr[y+3][x+i] = ssr - } -} - -func predFunc4RD(z *Decoder, y, x int) { - s := int32(z.ybr[y+3][x-1]) - r := int32(z.ybr[y+2][x-1]) - q := int32(z.ybr[y+1][x-1]) - p := int32(z.ybr[y+0][x-1]) - a := int32(z.ybr[y-1][x-1]) - b := int32(z.ybr[y-1][x+0]) - c := int32(z.ybr[y-1][x+1]) - d := int32(z.ybr[y-1][x+2]) - e := int32(z.ybr[y-1][x+3]) - srq := uint8((s + 2*r + q + 2) / 4) - rqp := uint8((r + 2*q + p + 2) / 4) - qpa := uint8((q + 2*p + a + 2) / 4) - pab := uint8((p + 2*a + b + 2) / 4) - abc := uint8((a + 2*b + c + 2) / 4) - bcd := uint8((b + 2*c + d + 2) / 4) - cde := uint8((c + 2*d + e + 2) / 4) - z.ybr[y+0][x+0] = pab - z.ybr[y+0][x+1] = abc - z.ybr[y+0][x+2] = bcd - z.ybr[y+0][x+3] = cde - z.ybr[y+1][x+0] = qpa - z.ybr[y+1][x+1] = pab - z.ybr[y+1][x+2] = abc - z.ybr[y+1][x+3] = bcd - z.ybr[y+2][x+0] = rqp - z.ybr[y+2][x+1] = qpa - z.ybr[y+2][x+2] = pab - z.ybr[y+2][x+3] = abc - z.ybr[y+3][x+0] = srq - z.ybr[y+3][x+1] = rqp - z.ybr[y+3][x+2] = qpa - z.ybr[y+3][x+3] = pab -} - -func predFunc4VR(z *Decoder, y, x int) { - r := int32(z.ybr[y+2][x-1]) - q := int32(z.ybr[y+1][x-1]) - p := int32(z.ybr[y+0][x-1]) - a := int32(z.ybr[y-1][x-1]) - b := int32(z.ybr[y-1][x+0]) - c := int32(z.ybr[y-1][x+1]) - d := int32(z.ybr[y-1][x+2]) - e := int32(z.ybr[y-1][x+3]) - ab := uint8((a + b + 1) / 2) - bc := uint8((b + c + 1) / 2) - cd := uint8((c + d + 1) / 2) - de := uint8((d + e + 1) / 2) - rqp := uint8((r + 2*q + p + 2) / 4) - qpa := uint8((q + 2*p + a + 2) / 4) - pab := uint8((p + 2*a + b + 2) / 4) - abc := uint8((a + 2*b + c + 2) / 4) - bcd := uint8((b + 2*c + d + 2) / 4) - cde := uint8((c + 2*d + e + 2) / 4) - z.ybr[y+0][x+0] = ab - z.ybr[y+0][x+1] = bc - z.ybr[y+0][x+2] = cd - z.ybr[y+0][x+3] = de - z.ybr[y+1][x+0] = pab - z.ybr[y+1][x+1] = abc - z.ybr[y+1][x+2] = bcd - z.ybr[y+1][x+3] = cde - z.ybr[y+2][x+0] = qpa - z.ybr[y+2][x+1] = ab - z.ybr[y+2][x+2] = bc - z.ybr[y+2][x+3] = cd - z.ybr[y+3][x+0] = rqp - z.ybr[y+3][x+1] = pab - z.ybr[y+3][x+2] = abc - z.ybr[y+3][x+3] = bcd -} - -func predFunc4LD(z *Decoder, y, x int) { - a := int32(z.ybr[y-1][x+0]) - b := int32(z.ybr[y-1][x+1]) - c := int32(z.ybr[y-1][x+2]) - d := int32(z.ybr[y-1][x+3]) - e := int32(z.ybr[y-1][x+4]) - f := int32(z.ybr[y-1][x+5]) - g := int32(z.ybr[y-1][x+6]) - h := int32(z.ybr[y-1][x+7]) - abc := uint8((a + 2*b + c + 2) / 4) - bcd := uint8((b + 2*c + d + 2) / 4) - cde := uint8((c + 2*d + e + 2) / 4) - def := uint8((d + 2*e + f + 2) / 4) - efg := uint8((e + 2*f + g + 2) / 4) - fgh := uint8((f + 2*g + h + 2) / 4) - ghh := uint8((g + 2*h + h + 2) / 4) - z.ybr[y+0][x+0] = abc - z.ybr[y+0][x+1] = bcd - z.ybr[y+0][x+2] = cde - z.ybr[y+0][x+3] = def - z.ybr[y+1][x+0] = bcd - z.ybr[y+1][x+1] = cde - z.ybr[y+1][x+2] = def - z.ybr[y+1][x+3] = efg - z.ybr[y+2][x+0] = cde - z.ybr[y+2][x+1] = def - z.ybr[y+2][x+2] = efg - z.ybr[y+2][x+3] = fgh - z.ybr[y+3][x+0] = def - z.ybr[y+3][x+1] = efg - z.ybr[y+3][x+2] = fgh - z.ybr[y+3][x+3] = ghh -} - -func predFunc4VL(z *Decoder, y, x int) { - a := int32(z.ybr[y-1][x+0]) - b := int32(z.ybr[y-1][x+1]) - c := int32(z.ybr[y-1][x+2]) - d := int32(z.ybr[y-1][x+3]) - e := int32(z.ybr[y-1][x+4]) - f := int32(z.ybr[y-1][x+5]) - g := int32(z.ybr[y-1][x+6]) - h := int32(z.ybr[y-1][x+7]) - ab := uint8((a + b + 1) / 2) - bc := uint8((b + c + 1) / 2) - cd := uint8((c + d + 1) / 2) - de := uint8((d + e + 1) / 2) - abc := uint8((a + 2*b + c + 2) / 4) - bcd := uint8((b + 2*c + d + 2) / 4) - cde := uint8((c + 2*d + e + 2) / 4) - def := uint8((d + 2*e + f + 2) / 4) - efg := uint8((e + 2*f + g + 2) / 4) - fgh := uint8((f + 2*g + h + 2) / 4) - z.ybr[y+0][x+0] = ab - z.ybr[y+0][x+1] = bc - z.ybr[y+0][x+2] = cd - z.ybr[y+0][x+3] = de - z.ybr[y+1][x+0] = abc - z.ybr[y+1][x+1] = bcd - z.ybr[y+1][x+2] = cde - z.ybr[y+1][x+3] = def - z.ybr[y+2][x+0] = bc - z.ybr[y+2][x+1] = cd - z.ybr[y+2][x+2] = de - z.ybr[y+2][x+3] = efg - z.ybr[y+3][x+0] = bcd - z.ybr[y+3][x+1] = cde - z.ybr[y+3][x+2] = def - z.ybr[y+3][x+3] = fgh -} - -func predFunc4HD(z *Decoder, y, x int) { - s := int32(z.ybr[y+3][x-1]) - r := int32(z.ybr[y+2][x-1]) - q := int32(z.ybr[y+1][x-1]) - p := int32(z.ybr[y+0][x-1]) - a := int32(z.ybr[y-1][x-1]) - b := int32(z.ybr[y-1][x+0]) - c := int32(z.ybr[y-1][x+1]) - d := int32(z.ybr[y-1][x+2]) - sr := uint8((s + r + 1) / 2) - rq := uint8((r + q + 1) / 2) - qp := uint8((q + p + 1) / 2) - pa := uint8((p + a + 1) / 2) - srq := uint8((s + 2*r + q + 2) / 4) - rqp := uint8((r + 2*q + p + 2) / 4) - qpa := uint8((q + 2*p + a + 2) / 4) - pab := uint8((p + 2*a + b + 2) / 4) - abc := uint8((a + 2*b + c + 2) / 4) - bcd := uint8((b + 2*c + d + 2) / 4) - z.ybr[y+0][x+0] = pa - z.ybr[y+0][x+1] = pab - z.ybr[y+0][x+2] = abc - z.ybr[y+0][x+3] = bcd - z.ybr[y+1][x+0] = qp - z.ybr[y+1][x+1] = qpa - z.ybr[y+1][x+2] = pa - z.ybr[y+1][x+3] = pab - z.ybr[y+2][x+0] = rq - z.ybr[y+2][x+1] = rqp - z.ybr[y+2][x+2] = qp - z.ybr[y+2][x+3] = qpa - z.ybr[y+3][x+0] = sr - z.ybr[y+3][x+1] = srq - z.ybr[y+3][x+2] = rq - z.ybr[y+3][x+3] = rqp -} - -func predFunc4HU(z *Decoder, y, x int) { - s := int32(z.ybr[y+3][x-1]) - r := int32(z.ybr[y+2][x-1]) - q := int32(z.ybr[y+1][x-1]) - p := int32(z.ybr[y+0][x-1]) - pq := uint8((p + q + 1) / 2) - qr := uint8((q + r + 1) / 2) - rs := uint8((r + s + 1) / 2) - pqr := uint8((p + 2*q + r + 2) / 4) - qrs := uint8((q + 2*r + s + 2) / 4) - rss := uint8((r + 2*s + s + 2) / 4) - sss := uint8(s) - z.ybr[y+0][x+0] = pq - z.ybr[y+0][x+1] = pqr - z.ybr[y+0][x+2] = qr - z.ybr[y+0][x+3] = qrs - z.ybr[y+1][x+0] = qr - z.ybr[y+1][x+1] = qrs - z.ybr[y+1][x+2] = rs - z.ybr[y+1][x+3] = rss - z.ybr[y+2][x+0] = rs - z.ybr[y+2][x+1] = rss - z.ybr[y+2][x+2] = sss - z.ybr[y+2][x+3] = sss - z.ybr[y+3][x+0] = sss - z.ybr[y+3][x+1] = sss - z.ybr[y+3][x+2] = sss - z.ybr[y+3][x+3] = sss -} - -func predFunc8DC(z *Decoder, y, x int) { - sum := uint32(8) - for i := 0; i < 8; i++ { - sum += uint32(z.ybr[y-1][x+i]) - } - for j := 0; j < 8; j++ { - sum += uint32(z.ybr[y+j][x-1]) - } - avg := uint8(sum / 16) - for j := 0; j < 8; j++ { - for i := 0; i < 8; i++ { - z.ybr[y+j][x+i] = avg - } - } -} - -func predFunc8TM(z *Decoder, y, x int) { - delta0 := -int32(z.ybr[y-1][x-1]) - for j := 0; j < 8; j++ { - delta1 := delta0 + int32(z.ybr[y+j][x-1]) - for i := 0; i < 8; i++ { - delta2 := delta1 + int32(z.ybr[y-1][x+i]) - z.ybr[y+j][x+i] = uint8(clip(delta2, 0, 255)) - } - } -} - -func predFunc8VE(z *Decoder, y, x int) { - for j := 0; j < 8; j++ { - for i := 0; i < 8; i++ { - z.ybr[y+j][x+i] = z.ybr[y-1][x+i] - } - } -} - -func predFunc8HE(z *Decoder, y, x int) { - for j := 0; j < 8; j++ { - for i := 0; i < 8; i++ { - z.ybr[y+j][x+i] = z.ybr[y+j][x-1] - } - } -} - -func predFunc8DCTop(z *Decoder, y, x int) { - sum := uint32(4) - for j := 0; j < 8; j++ { - sum += uint32(z.ybr[y+j][x-1]) - } - avg := uint8(sum / 8) - for j := 0; j < 8; j++ { - for i := 0; i < 8; i++ { - z.ybr[y+j][x+i] = avg - } - } -} - -func predFunc8DCLeft(z *Decoder, y, x int) { - sum := uint32(4) - for i := 0; i < 8; i++ { - sum += uint32(z.ybr[y-1][x+i]) - } - avg := uint8(sum / 8) - for j := 0; j < 8; j++ { - for i := 0; i < 8; i++ { - z.ybr[y+j][x+i] = avg - } - } -} - -func predFunc8DCTopLeft(z *Decoder, y, x int) { - for j := 0; j < 8; j++ { - for i := 0; i < 8; i++ { - z.ybr[y+j][x+i] = 0x80 - } - } -} - -func predFunc16DC(z *Decoder, y, x int) { - sum := uint32(16) - for i := 0; i < 16; i++ { - sum += uint32(z.ybr[y-1][x+i]) - } - for j := 0; j < 16; j++ { - sum += uint32(z.ybr[y+j][x-1]) - } - avg := uint8(sum / 32) - for j := 0; j < 16; j++ { - for i := 0; i < 16; i++ { - z.ybr[y+j][x+i] = avg - } - } -} - -func predFunc16TM(z *Decoder, y, x int) { - delta0 := -int32(z.ybr[y-1][x-1]) - for j := 0; j < 16; j++ { - delta1 := delta0 + int32(z.ybr[y+j][x-1]) - for i := 0; i < 16; i++ { - delta2 := delta1 + int32(z.ybr[y-1][x+i]) - z.ybr[y+j][x+i] = uint8(clip(delta2, 0, 255)) - } - } -} - -func predFunc16VE(z *Decoder, y, x int) { - for j := 0; j < 16; j++ { - for i := 0; i < 16; i++ { - z.ybr[y+j][x+i] = z.ybr[y-1][x+i] - } - } -} - -func predFunc16HE(z *Decoder, y, x int) { - for j := 0; j < 16; j++ { - for i := 0; i < 16; i++ { - z.ybr[y+j][x+i] = z.ybr[y+j][x-1] - } - } -} - -func predFunc16DCTop(z *Decoder, y, x int) { - sum := uint32(8) - for j := 0; j < 16; j++ { - sum += uint32(z.ybr[y+j][x-1]) - } - avg := uint8(sum / 16) - for j := 0; j < 16; j++ { - for i := 0; i < 16; i++ { - z.ybr[y+j][x+i] = avg - } - } -} - -func predFunc16DCLeft(z *Decoder, y, x int) { - sum := uint32(8) - for i := 0; i < 16; i++ { - sum += uint32(z.ybr[y-1][x+i]) - } - avg := uint8(sum / 16) - for j := 0; j < 16; j++ { - for i := 0; i < 16; i++ { - z.ybr[y+j][x+i] = avg - } - } -} - -func predFunc16DCTopLeft(z *Decoder, y, x int) { - for j := 0; j < 16; j++ { - for i := 0; i < 16; i++ { - z.ybr[y+j][x+i] = 0x80 - } - } -} diff --git a/vendor/golang.org/x/image/vp8/quant.go b/vendor/golang.org/x/image/vp8/quant.go deleted file mode 100644 index da4361604..000000000 --- a/vendor/golang.org/x/image/vp8/quant.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package vp8 - -// This file implements parsing the quantization factors. - -// quant are DC/AC quantization factors. -type quant struct { - y1 [2]uint16 - y2 [2]uint16 - uv [2]uint16 -} - -// clip clips x to the range [min, max] inclusive. -func clip(x, min, max int32) int32 { - if x < min { - return min - } - if x > max { - return max - } - return x -} - -// parseQuant parses the quantization factors, as specified in section 9.6. -func (d *Decoder) parseQuant() { - baseQ0 := d.fp.readUint(uniformProb, 7) - dqy1DC := d.fp.readOptionalInt(uniformProb, 4) - const dqy1AC = 0 - dqy2DC := d.fp.readOptionalInt(uniformProb, 4) - dqy2AC := d.fp.readOptionalInt(uniformProb, 4) - dquvDC := d.fp.readOptionalInt(uniformProb, 4) - dquvAC := d.fp.readOptionalInt(uniformProb, 4) - for i := 0; i < nSegment; i++ { - q := int32(baseQ0) - if d.segmentHeader.useSegment { - if d.segmentHeader.relativeDelta { - q += int32(d.segmentHeader.quantizer[i]) - } else { - q = int32(d.segmentHeader.quantizer[i]) - } - } - d.quant[i].y1[0] = dequantTableDC[clip(q+dqy1DC, 0, 127)] - d.quant[i].y1[1] = dequantTableAC[clip(q+dqy1AC, 0, 127)] - d.quant[i].y2[0] = dequantTableDC[clip(q+dqy2DC, 0, 127)] * 2 - d.quant[i].y2[1] = dequantTableAC[clip(q+dqy2AC, 0, 127)] * 155 / 100 - if d.quant[i].y2[1] < 8 { - d.quant[i].y2[1] = 8 - } - // The 117 is not a typo. The dequant_init function in the spec's Reference - // Decoder Source Code (http://tools.ietf.org/html/rfc6386#section-9.6 Page 145) - // says to clamp the LHS value at 132, which is equal to dequantTableDC[117]. - d.quant[i].uv[0] = dequantTableDC[clip(q+dquvDC, 0, 117)] - d.quant[i].uv[1] = dequantTableAC[clip(q+dquvAC, 0, 127)] - } -} - -// The dequantization tables are specified in section 14.1. -var ( - dequantTableDC = [128]uint16{ - 4, 5, 6, 7, 8, 9, 10, 10, - 11, 12, 13, 14, 15, 16, 17, 17, - 18, 19, 20, 20, 21, 21, 22, 22, - 23, 23, 24, 25, 25, 26, 27, 28, - 29, 30, 31, 32, 33, 34, 35, 36, - 37, 37, 38, 39, 40, 41, 42, 43, - 44, 45, 46, 46, 47, 48, 49, 50, - 51, 52, 53, 54, 55, 56, 57, 58, - 59, 60, 61, 62, 63, 64, 65, 66, - 67, 68, 69, 70, 71, 72, 73, 74, - 75, 76, 76, 77, 78, 79, 80, 81, - 82, 83, 84, 85, 86, 87, 88, 89, - 91, 93, 95, 96, 98, 100, 101, 102, - 104, 106, 108, 110, 112, 114, 116, 118, - 122, 124, 126, 128, 130, 132, 134, 136, - 138, 140, 143, 145, 148, 151, 154, 157, - } - dequantTableAC = [128]uint16{ - 4, 5, 6, 7, 8, 9, 10, 11, - 12, 13, 14, 15, 16, 17, 18, 19, - 20, 21, 22, 23, 24, 25, 26, 27, - 28, 29, 30, 31, 32, 33, 34, 35, - 36, 37, 38, 39, 40, 41, 42, 43, - 44, 45, 46, 47, 48, 49, 50, 51, - 52, 53, 54, 55, 56, 57, 58, 60, - 62, 64, 66, 68, 70, 72, 74, 76, - 78, 80, 82, 84, 86, 88, 90, 92, - 94, 96, 98, 100, 102, 104, 106, 108, - 110, 112, 114, 116, 119, 122, 125, 128, - 131, 134, 137, 140, 143, 146, 149, 152, - 155, 158, 161, 164, 167, 170, 173, 177, - 181, 185, 189, 193, 197, 201, 205, 209, - 213, 217, 221, 225, 229, 234, 239, 245, - 249, 254, 259, 264, 269, 274, 279, 284, - } -) diff --git a/vendor/golang.org/x/image/vp8/reconstruct.go b/vendor/golang.org/x/image/vp8/reconstruct.go deleted file mode 100644 index c1cc4b532..000000000 --- a/vendor/golang.org/x/image/vp8/reconstruct.go +++ /dev/null @@ -1,442 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package vp8 - -// This file implements decoding DCT/WHT residual coefficients and -// reconstructing YCbCr data equal to predicted values plus residuals. -// -// There are 1*16*16 + 2*8*8 + 1*4*4 coefficients per macroblock: -// - 1*16*16 luma DCT coefficients, -// - 2*8*8 chroma DCT coefficients, and -// - 1*4*4 luma WHT coefficients. -// Coefficients are read in lots of 16, and the later coefficients in each lot -// are often zero. -// -// The YCbCr data consists of 1*16*16 luma values and 2*8*8 chroma values, -// plus previously decoded values along the top and left borders. The combined -// values are laid out as a [1+16+1+8][32]uint8 so that vertically adjacent -// samples are 32 bytes apart. In detail, the layout is: -// -// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 -// . . . . . . . a b b b b b b b b b b b b b b b b c c c c . . . . 0 -// . . . . . . . d Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y . . . . . . . . 1 -// . . . . . . . d Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y . . . . . . . . 2 -// . . . . . . . d Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y . . . . . . . . 3 -// . . . . . . . d Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y c c c c . . . . 4 -// . . . . . . . d Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y . . . . . . . . 5 -// . . . . . . . d Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y . . . . . . . . 6 -// . . . . . . . d Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y . . . . . . . . 7 -// . . . . . . . d Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y c c c c . . . . 8 -// . . . . . . . d Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y . . . . . . . . 9 -// . . . . . . . d Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y . . . . . . . . 10 -// . . . . . . . d Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y . . . . . . . . 11 -// . . . . . . . d Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y c c c c . . . . 12 -// . . . . . . . d Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y . . . . . . . . 13 -// . . . . . . . d Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y . . . . . . . . 14 -// . . . . . . . d Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y . . . . . . . . 15 -// . . . . . . . d Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y . . . . . . . . 16 -// . . . . . . . e f f f f f f f f . . . . . . . g h h h h h h h h 17 -// . . . . . . . i B B B B B B B B . . . . . . . j R R R R R R R R 18 -// . . . . . . . i B B B B B B B B . . . . . . . j R R R R R R R R 19 -// . . . . . . . i B B B B B B B B . . . . . . . j R R R R R R R R 20 -// . . . . . . . i B B B B B B B B . . . . . . . j R R R R R R R R 21 -// . . . . . . . i B B B B B B B B . . . . . . . j R R R R R R R R 22 -// . . . . . . . i B B B B B B B B . . . . . . . j R R R R R R R R 23 -// . . . . . . . i B B B B B B B B . . . . . . . j R R R R R R R R 24 -// . . . . . . . i B B B B B B B B . . . . . . . j R R R R R R R R 25 -// -// Y, B and R are the reconstructed luma (Y) and chroma (B, R) values. -// The Y values are predicted (either as one 16x16 region or 16 4x4 regions) -// based on the row above's Y values (some combination of {abc} or {dYC}) and -// the column left's Y values (either {ad} or {bY}). Similarly, B and R values -// are predicted on the row above and column left of their respective 8x8 -// region: {efi} for B, {ghj} for R. -// -// For uppermost macroblocks (i.e. those with mby == 0), the {abcefgh} values -// are initialized to 0x81. Otherwise, they are copied from the bottom row of -// the macroblock above. The {c} values are then duplicated from row 0 to rows -// 4, 8 and 12 of the ybr workspace. -// Similarly, for leftmost macroblocks (i.e. those with mbx == 0), the {adeigj} -// values are initialized to 0x7f. Otherwise, they are copied from the right -// column of the macroblock to the left. -// For the top-left macroblock (with mby == 0 && mbx == 0), {aeg} is 0x81. -// -// When moving from one macroblock to the next horizontally, the {adeigj} -// values can simply be copied from the workspace to itself, shifted by 8 or -// 16 columns. When moving from one macroblock to the next vertically, -// filtering can occur and hence the row values have to be copied from the -// post-filtered image instead of the pre-filtered workspace. - -const ( - bCoeffBase = 1*16*16 + 0*8*8 - rCoeffBase = 1*16*16 + 1*8*8 - whtCoeffBase = 1*16*16 + 2*8*8 -) - -const ( - ybrYX = 8 - ybrYY = 1 - ybrBX = 8 - ybrBY = 18 - ybrRX = 24 - ybrRY = 18 -) - -// prepareYBR prepares the {abcdefghij} elements of ybr. -func (d *Decoder) prepareYBR(mbx, mby int) { - if mbx == 0 { - for y := 0; y < 17; y++ { - d.ybr[y][7] = 0x81 - } - for y := 17; y < 26; y++ { - d.ybr[y][7] = 0x81 - d.ybr[y][23] = 0x81 - } - } else { - for y := 0; y < 17; y++ { - d.ybr[y][7] = d.ybr[y][7+16] - } - for y := 17; y < 26; y++ { - d.ybr[y][7] = d.ybr[y][15] - d.ybr[y][23] = d.ybr[y][31] - } - } - if mby == 0 { - for x := 7; x < 28; x++ { - d.ybr[0][x] = 0x7f - } - for x := 7; x < 16; x++ { - d.ybr[17][x] = 0x7f - } - for x := 23; x < 32; x++ { - d.ybr[17][x] = 0x7f - } - } else { - for i := 0; i < 16; i++ { - d.ybr[0][8+i] = d.img.Y[(16*mby-1)*d.img.YStride+16*mbx+i] - } - for i := 0; i < 8; i++ { - d.ybr[17][8+i] = d.img.Cb[(8*mby-1)*d.img.CStride+8*mbx+i] - } - for i := 0; i < 8; i++ { - d.ybr[17][24+i] = d.img.Cr[(8*mby-1)*d.img.CStride+8*mbx+i] - } - if mbx == d.mbw-1 { - for i := 16; i < 20; i++ { - d.ybr[0][8+i] = d.img.Y[(16*mby-1)*d.img.YStride+16*mbx+15] - } - } else { - for i := 16; i < 20; i++ { - d.ybr[0][8+i] = d.img.Y[(16*mby-1)*d.img.YStride+16*mbx+i] - } - } - } - for y := 4; y < 16; y += 4 { - d.ybr[y][24] = d.ybr[0][24] - d.ybr[y][25] = d.ybr[0][25] - d.ybr[y][26] = d.ybr[0][26] - d.ybr[y][27] = d.ybr[0][27] - } -} - -// btou converts a bool to a 0/1 value. -func btou(b bool) uint8 { - if b { - return 1 - } - return 0 -} - -// pack packs four 0/1 values into four bits of a uint32. -func pack(x [4]uint8, shift int) uint32 { - u := uint32(x[0])<<0 | uint32(x[1])<<1 | uint32(x[2])<<2 | uint32(x[3])<<3 - return u << uint(shift) -} - -// unpack unpacks four 0/1 values from a four-bit value. -var unpack = [16][4]uint8{ - {0, 0, 0, 0}, - {1, 0, 0, 0}, - {0, 1, 0, 0}, - {1, 1, 0, 0}, - {0, 0, 1, 0}, - {1, 0, 1, 0}, - {0, 1, 1, 0}, - {1, 1, 1, 0}, - {0, 0, 0, 1}, - {1, 0, 0, 1}, - {0, 1, 0, 1}, - {1, 1, 0, 1}, - {0, 0, 1, 1}, - {1, 0, 1, 1}, - {0, 1, 1, 1}, - {1, 1, 1, 1}, -} - -var ( - // The mapping from 4x4 region position to band is specified in section 13.3. - bands = [17]uint8{0, 1, 2, 3, 6, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6, 7, 0} - // Category probabilties are specified in section 13.2. - // Decoding categories 1 and 2 are done inline. - cat3456 = [4][12]uint8{ - {173, 148, 140, 0, 0, 0, 0, 0, 0, 0, 0, 0}, - {176, 155, 140, 135, 0, 0, 0, 0, 0, 0, 0, 0}, - {180, 157, 141, 134, 130, 0, 0, 0, 0, 0, 0, 0}, - {254, 254, 243, 230, 196, 177, 153, 140, 133, 130, 129, 0}, - } - // The zigzag order is: - // 0 1 5 6 - // 2 4 7 12 - // 3 8 11 13 - // 9 10 14 15 - zigzag = [16]uint8{0, 1, 4, 8, 5, 2, 3, 6, 9, 12, 13, 10, 7, 11, 14, 15} -) - -// parseResiduals4 parses a 4x4 region of residual coefficients, as specified -// in section 13.3, and returns a 0/1 value indicating whether there was at -// least one non-zero coefficient. -// r is the partition to read bits from. -// plane and context describe which token probability table to use. context is -// either 0, 1 or 2, and equals how many of the macroblock left and macroblock -// above have non-zero coefficients. -// quant are the DC/AC quantization factors. -// skipFirstCoeff is whether the DC coefficient has already been parsed. -// coeffBase is the base index of d.coeff to write to. -func (d *Decoder) parseResiduals4(r *partition, plane int, context uint8, quant [2]uint16, skipFirstCoeff bool, coeffBase int) uint8 { - prob, n := &d.tokenProb[plane], 0 - if skipFirstCoeff { - n = 1 - } - p := prob[bands[n]][context] - if !r.readBit(p[0]) { - return 0 - } - for n != 16 { - n++ - if !r.readBit(p[1]) { - p = prob[bands[n]][0] - continue - } - var v uint32 - if !r.readBit(p[2]) { - v = 1 - p = prob[bands[n]][1] - } else { - if !r.readBit(p[3]) { - if !r.readBit(p[4]) { - v = 2 - } else { - v = 3 + r.readUint(p[5], 1) - } - } else if !r.readBit(p[6]) { - if !r.readBit(p[7]) { - // Category 1. - v = 5 + r.readUint(159, 1) - } else { - // Category 2. - v = 7 + 2*r.readUint(165, 1) + r.readUint(145, 1) - } - } else { - // Categories 3, 4, 5 or 6. - b1 := r.readUint(p[8], 1) - b0 := r.readUint(p[9+b1], 1) - cat := 2*b1 + b0 - tab := &cat3456[cat] - v = 0 - for i := 0; tab[i] != 0; i++ { - v *= 2 - v += r.readUint(tab[i], 1) - } - v += 3 + (8 << cat) - } - p = prob[bands[n]][2] - } - z := zigzag[n-1] - c := int32(v) * int32(quant[btou(z > 0)]) - if r.readBit(uniformProb) { - c = -c - } - d.coeff[coeffBase+int(z)] = int16(c) - if n == 16 || !r.readBit(p[0]) { - return 1 - } - } - return 1 -} - -// parseResiduals parses the residuals and returns whether inner loop filtering -// should be skipped for this macroblock. -func (d *Decoder) parseResiduals(mbx, mby int) (skip bool) { - partition := &d.op[mby&(d.nOP-1)] - plane := planeY1SansY2 - quant := &d.quant[d.segment] - - // Parse the DC coefficient of each 4x4 luma region. - if d.usePredY16 { - nz := d.parseResiduals4(partition, planeY2, d.leftMB.nzY16+d.upMB[mbx].nzY16, quant.y2, false, whtCoeffBase) - d.leftMB.nzY16 = nz - d.upMB[mbx].nzY16 = nz - d.inverseWHT16() - plane = planeY1WithY2 - } - - var ( - nzDC, nzAC [4]uint8 - nzDCMask, nzACMask uint32 - coeffBase int - ) - - // Parse the luma coefficients. - lnz := unpack[d.leftMB.nzMask&0x0f] - unz := unpack[d.upMB[mbx].nzMask&0x0f] - for y := 0; y < 4; y++ { - nz := lnz[y] - for x := 0; x < 4; x++ { - nz = d.parseResiduals4(partition, plane, nz+unz[x], quant.y1, d.usePredY16, coeffBase) - unz[x] = nz - nzAC[x] = nz - nzDC[x] = btou(d.coeff[coeffBase] != 0) - coeffBase += 16 - } - lnz[y] = nz - nzDCMask |= pack(nzDC, y*4) - nzACMask |= pack(nzAC, y*4) - } - lnzMask := pack(lnz, 0) - unzMask := pack(unz, 0) - - // Parse the chroma coefficients. - lnz = unpack[d.leftMB.nzMask>>4] - unz = unpack[d.upMB[mbx].nzMask>>4] - for c := 0; c < 4; c += 2 { - for y := 0; y < 2; y++ { - nz := lnz[y+c] - for x := 0; x < 2; x++ { - nz = d.parseResiduals4(partition, planeUV, nz+unz[x+c], quant.uv, false, coeffBase) - unz[x+c] = nz - nzAC[y*2+x] = nz - nzDC[y*2+x] = btou(d.coeff[coeffBase] != 0) - coeffBase += 16 - } - lnz[y+c] = nz - } - nzDCMask |= pack(nzDC, 16+c*2) - nzACMask |= pack(nzAC, 16+c*2) - } - lnzMask |= pack(lnz, 4) - unzMask |= pack(unz, 4) - - // Save decoder state. - d.leftMB.nzMask = uint8(lnzMask) - d.upMB[mbx].nzMask = uint8(unzMask) - d.nzDCMask = nzDCMask - d.nzACMask = nzACMask - - // Section 15.1 of the spec says that "Steps 2 and 4 [of the loop filter] - // are skipped... [if] there is no DCT coefficient coded for the whole - // macroblock." - return nzDCMask == 0 && nzACMask == 0 -} - -// reconstructMacroblock applies the predictor functions and adds the inverse- -// DCT transformed residuals to recover the YCbCr data. -func (d *Decoder) reconstructMacroblock(mbx, mby int) { - if d.usePredY16 { - p := checkTopLeftPred(mbx, mby, d.predY16) - predFunc16[p](d, 1, 8) - for j := 0; j < 4; j++ { - for i := 0; i < 4; i++ { - n := 4*j + i - y := 4*j + 1 - x := 4*i + 8 - mask := uint32(1) << uint(n) - if d.nzACMask&mask != 0 { - d.inverseDCT4(y, x, 16*n) - } else if d.nzDCMask&mask != 0 { - d.inverseDCT4DCOnly(y, x, 16*n) - } - } - } - } else { - for j := 0; j < 4; j++ { - for i := 0; i < 4; i++ { - n := 4*j + i - y := 4*j + 1 - x := 4*i + 8 - predFunc4[d.predY4[j][i]](d, y, x) - mask := uint32(1) << uint(n) - if d.nzACMask&mask != 0 { - d.inverseDCT4(y, x, 16*n) - } else if d.nzDCMask&mask != 0 { - d.inverseDCT4DCOnly(y, x, 16*n) - } - } - } - } - p := checkTopLeftPred(mbx, mby, d.predC8) - predFunc8[p](d, ybrBY, ybrBX) - if d.nzACMask&0x0f0000 != 0 { - d.inverseDCT8(ybrBY, ybrBX, bCoeffBase) - } else if d.nzDCMask&0x0f0000 != 0 { - d.inverseDCT8DCOnly(ybrBY, ybrBX, bCoeffBase) - } - predFunc8[p](d, ybrRY, ybrRX) - if d.nzACMask&0xf00000 != 0 { - d.inverseDCT8(ybrRY, ybrRX, rCoeffBase) - } else if d.nzDCMask&0xf00000 != 0 { - d.inverseDCT8DCOnly(ybrRY, ybrRX, rCoeffBase) - } -} - -// reconstruct reconstructs one macroblock and returns whether inner loop -// filtering should be skipped for it. -func (d *Decoder) reconstruct(mbx, mby int) (skip bool) { - if d.segmentHeader.updateMap { - if !d.fp.readBit(d.segmentHeader.prob[0]) { - d.segment = int(d.fp.readUint(d.segmentHeader.prob[1], 1)) - } else { - d.segment = int(d.fp.readUint(d.segmentHeader.prob[2], 1)) + 2 - } - } - if d.useSkipProb { - skip = d.fp.readBit(d.skipProb) - } - // Prepare the workspace. - for i := range d.coeff { - d.coeff[i] = 0 - } - d.prepareYBR(mbx, mby) - // Parse the predictor modes. - d.usePredY16 = d.fp.readBit(145) - if d.usePredY16 { - d.parsePredModeY16(mbx) - } else { - d.parsePredModeY4(mbx) - } - d.parsePredModeC8() - // Parse the residuals. - if !skip { - skip = d.parseResiduals(mbx, mby) - } else { - if d.usePredY16 { - d.leftMB.nzY16 = 0 - d.upMB[mbx].nzY16 = 0 - } - d.leftMB.nzMask = 0 - d.upMB[mbx].nzMask = 0 - d.nzDCMask = 0 - d.nzACMask = 0 - } - // Reconstruct the YCbCr data and copy it to the image. - d.reconstructMacroblock(mbx, mby) - for i, y := (mby*d.img.YStride+mbx)*16, 0; y < 16; i, y = i+d.img.YStride, y+1 { - copy(d.img.Y[i:i+16], d.ybr[ybrYY+y][ybrYX:ybrYX+16]) - } - for i, y := (mby*d.img.CStride+mbx)*8, 0; y < 8; i, y = i+d.img.CStride, y+1 { - copy(d.img.Cb[i:i+8], d.ybr[ybrBY+y][ybrBX:ybrBX+8]) - copy(d.img.Cr[i:i+8], d.ybr[ybrRY+y][ybrRX:ybrRX+8]) - } - return skip -} diff --git a/vendor/golang.org/x/image/vp8/token.go b/vendor/golang.org/x/image/vp8/token.go deleted file mode 100644 index da99cf0f9..000000000 --- a/vendor/golang.org/x/image/vp8/token.go +++ /dev/null @@ -1,381 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package vp8 - -// This file contains token probabilities for decoding DCT/WHT coefficients, as -// specified in chapter 13. - -func (d *Decoder) parseTokenProb() { - for i := range d.tokenProb { - for j := range d.tokenProb[i] { - for k := range d.tokenProb[i][j] { - for l := range d.tokenProb[i][j][k] { - if d.fp.readBit(tokenProbUpdateProb[i][j][k][l]) { - d.tokenProb[i][j][k][l] = uint8(d.fp.readUint(uniformProb, 8)) - } - } - } - } - } -} - -// The plane enumeration is specified in section 13.3. -const ( - planeY1WithY2 = iota - planeY2 - planeUV - planeY1SansY2 - nPlane -) - -const ( - nBand = 8 - nContext = 3 - nProb = 11 -) - -// Token probability update probabilities are specified in section 13.4. -var tokenProbUpdateProb = [nPlane][nBand][nContext][nProb]uint8{ - { - { - {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, - {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, - {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, - }, - { - {176, 246, 255, 255, 255, 255, 255, 255, 255, 255, 255}, - {223, 241, 252, 255, 255, 255, 255, 255, 255, 255, 255}, - {249, 253, 253, 255, 255, 255, 255, 255, 255, 255, 255}, - }, - { - {255, 244, 252, 255, 255, 255, 255, 255, 255, 255, 255}, - {234, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255}, - {253, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, - }, - { - {255, 246, 254, 255, 255, 255, 255, 255, 255, 255, 255}, - {239, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255}, - {254, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255}, - }, - { - {255, 248, 254, 255, 255, 255, 255, 255, 255, 255, 255}, - {251, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255}, - {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, - }, - { - {255, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255}, - {251, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255}, - {254, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255}, - }, - { - {255, 254, 253, 255, 254, 255, 255, 255, 255, 255, 255}, - {250, 255, 254, 255, 254, 255, 255, 255, 255, 255, 255}, - {254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, - }, - { - {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, - {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, - {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, - }, - }, - { - { - {217, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, - {225, 252, 241, 253, 255, 255, 254, 255, 255, 255, 255}, - {234, 250, 241, 250, 253, 255, 253, 254, 255, 255, 255}, - }, - { - {255, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255}, - {223, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255}, - {238, 253, 254, 254, 255, 255, 255, 255, 255, 255, 255}, - }, - { - {255, 248, 254, 255, 255, 255, 255, 255, 255, 255, 255}, - {249, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255}, - {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, - }, - { - {255, 253, 255, 255, 255, 255, 255, 255, 255, 255, 255}, - {247, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255}, - {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, - }, - { - {255, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255}, - {252, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, - {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, - }, - { - {255, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255}, - {253, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, - {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, - }, - { - {255, 254, 253, 255, 255, 255, 255, 255, 255, 255, 255}, - {250, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, - {254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, - }, - { - {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, - {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, - {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, - }, - }, - { - { - {186, 251, 250, 255, 255, 255, 255, 255, 255, 255, 255}, - {234, 251, 244, 254, 255, 255, 255, 255, 255, 255, 255}, - {251, 251, 243, 253, 254, 255, 254, 255, 255, 255, 255}, - }, - { - {255, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255}, - {236, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255}, - {251, 253, 253, 254, 254, 255, 255, 255, 255, 255, 255}, - }, - { - {255, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255}, - {254, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255}, - {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, - }, - { - {255, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255}, - {254, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255}, - {254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, - }, - { - {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, - {254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, - {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, - }, - { - {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, - {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, - {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, - }, - { - {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, - {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, - {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, - }, - { - {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, - {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, - {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, - }, - }, - { - { - {248, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, - {250, 254, 252, 254, 255, 255, 255, 255, 255, 255, 255}, - {248, 254, 249, 253, 255, 255, 255, 255, 255, 255, 255}, - }, - { - {255, 253, 253, 255, 255, 255, 255, 255, 255, 255, 255}, - {246, 253, 253, 255, 255, 255, 255, 255, 255, 255, 255}, - {252, 254, 251, 254, 254, 255, 255, 255, 255, 255, 255}, - }, - { - {255, 254, 252, 255, 255, 255, 255, 255, 255, 255, 255}, - {248, 254, 253, 255, 255, 255, 255, 255, 255, 255, 255}, - {253, 255, 254, 254, 255, 255, 255, 255, 255, 255, 255}, - }, - { - {255, 251, 254, 255, 255, 255, 255, 255, 255, 255, 255}, - {245, 251, 254, 255, 255, 255, 255, 255, 255, 255, 255}, - {253, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255}, - }, - { - {255, 251, 253, 255, 255, 255, 255, 255, 255, 255, 255}, - {252, 253, 254, 255, 255, 255, 255, 255, 255, 255, 255}, - {255, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255}, - }, - { - {255, 252, 255, 255, 255, 255, 255, 255, 255, 255, 255}, - {249, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255}, - {255, 255, 254, 255, 255, 255, 255, 255, 255, 255, 255}, - }, - { - {255, 255, 253, 255, 255, 255, 255, 255, 255, 255, 255}, - {250, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, - {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, - }, - { - {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, - {254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, - {255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, - }, - }, -} - -// Default token probabilities are specified in section 13.5. -var defaultTokenProb = [nPlane][nBand][nContext][nProb]uint8{ - { - { - {128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128}, - {128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128}, - {128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128}, - }, - { - {253, 136, 254, 255, 228, 219, 128, 128, 128, 128, 128}, - {189, 129, 242, 255, 227, 213, 255, 219, 128, 128, 128}, - {106, 126, 227, 252, 214, 209, 255, 255, 128, 128, 128}, - }, - { - {1, 98, 248, 255, 236, 226, 255, 255, 128, 128, 128}, - {181, 133, 238, 254, 221, 234, 255, 154, 128, 128, 128}, - {78, 134, 202, 247, 198, 180, 255, 219, 128, 128, 128}, - }, - { - {1, 185, 249, 255, 243, 255, 128, 128, 128, 128, 128}, - {184, 150, 247, 255, 236, 224, 128, 128, 128, 128, 128}, - {77, 110, 216, 255, 236, 230, 128, 128, 128, 128, 128}, - }, - { - {1, 101, 251, 255, 241, 255, 128, 128, 128, 128, 128}, - {170, 139, 241, 252, 236, 209, 255, 255, 128, 128, 128}, - {37, 116, 196, 243, 228, 255, 255, 255, 128, 128, 128}, - }, - { - {1, 204, 254, 255, 245, 255, 128, 128, 128, 128, 128}, - {207, 160, 250, 255, 238, 128, 128, 128, 128, 128, 128}, - {102, 103, 231, 255, 211, 171, 128, 128, 128, 128, 128}, - }, - { - {1, 152, 252, 255, 240, 255, 128, 128, 128, 128, 128}, - {177, 135, 243, 255, 234, 225, 128, 128, 128, 128, 128}, - {80, 129, 211, 255, 194, 224, 128, 128, 128, 128, 128}, - }, - { - {1, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128}, - {246, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128}, - {255, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128}, - }, - }, - { - { - {198, 35, 237, 223, 193, 187, 162, 160, 145, 155, 62}, - {131, 45, 198, 221, 172, 176, 220, 157, 252, 221, 1}, - {68, 47, 146, 208, 149, 167, 221, 162, 255, 223, 128}, - }, - { - {1, 149, 241, 255, 221, 224, 255, 255, 128, 128, 128}, - {184, 141, 234, 253, 222, 220, 255, 199, 128, 128, 128}, - {81, 99, 181, 242, 176, 190, 249, 202, 255, 255, 128}, - }, - { - {1, 129, 232, 253, 214, 197, 242, 196, 255, 255, 128}, - {99, 121, 210, 250, 201, 198, 255, 202, 128, 128, 128}, - {23, 91, 163, 242, 170, 187, 247, 210, 255, 255, 128}, - }, - { - {1, 200, 246, 255, 234, 255, 128, 128, 128, 128, 128}, - {109, 178, 241, 255, 231, 245, 255, 255, 128, 128, 128}, - {44, 130, 201, 253, 205, 192, 255, 255, 128, 128, 128}, - }, - { - {1, 132, 239, 251, 219, 209, 255, 165, 128, 128, 128}, - {94, 136, 225, 251, 218, 190, 255, 255, 128, 128, 128}, - {22, 100, 174, 245, 186, 161, 255, 199, 128, 128, 128}, - }, - { - {1, 182, 249, 255, 232, 235, 128, 128, 128, 128, 128}, - {124, 143, 241, 255, 227, 234, 128, 128, 128, 128, 128}, - {35, 77, 181, 251, 193, 211, 255, 205, 128, 128, 128}, - }, - { - {1, 157, 247, 255, 236, 231, 255, 255, 128, 128, 128}, - {121, 141, 235, 255, 225, 227, 255, 255, 128, 128, 128}, - {45, 99, 188, 251, 195, 217, 255, 224, 128, 128, 128}, - }, - { - {1, 1, 251, 255, 213, 255, 128, 128, 128, 128, 128}, - {203, 1, 248, 255, 255, 128, 128, 128, 128, 128, 128}, - {137, 1, 177, 255, 224, 255, 128, 128, 128, 128, 128}, - }, - }, - { - { - {253, 9, 248, 251, 207, 208, 255, 192, 128, 128, 128}, - {175, 13, 224, 243, 193, 185, 249, 198, 255, 255, 128}, - {73, 17, 171, 221, 161, 179, 236, 167, 255, 234, 128}, - }, - { - {1, 95, 247, 253, 212, 183, 255, 255, 128, 128, 128}, - {239, 90, 244, 250, 211, 209, 255, 255, 128, 128, 128}, - {155, 77, 195, 248, 188, 195, 255, 255, 128, 128, 128}, - }, - { - {1, 24, 239, 251, 218, 219, 255, 205, 128, 128, 128}, - {201, 51, 219, 255, 196, 186, 128, 128, 128, 128, 128}, - {69, 46, 190, 239, 201, 218, 255, 228, 128, 128, 128}, - }, - { - {1, 191, 251, 255, 255, 128, 128, 128, 128, 128, 128}, - {223, 165, 249, 255, 213, 255, 128, 128, 128, 128, 128}, - {141, 124, 248, 255, 255, 128, 128, 128, 128, 128, 128}, - }, - { - {1, 16, 248, 255, 255, 128, 128, 128, 128, 128, 128}, - {190, 36, 230, 255, 236, 255, 128, 128, 128, 128, 128}, - {149, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128}, - }, - { - {1, 226, 255, 128, 128, 128, 128, 128, 128, 128, 128}, - {247, 192, 255, 128, 128, 128, 128, 128, 128, 128, 128}, - {240, 128, 255, 128, 128, 128, 128, 128, 128, 128, 128}, - }, - { - {1, 134, 252, 255, 255, 128, 128, 128, 128, 128, 128}, - {213, 62, 250, 255, 255, 128, 128, 128, 128, 128, 128}, - {55, 93, 255, 128, 128, 128, 128, 128, 128, 128, 128}, - }, - { - {128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128}, - {128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128}, - {128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128}, - }, - }, - { - { - {202, 24, 213, 235, 186, 191, 220, 160, 240, 175, 255}, - {126, 38, 182, 232, 169, 184, 228, 174, 255, 187, 128}, - {61, 46, 138, 219, 151, 178, 240, 170, 255, 216, 128}, - }, - { - {1, 112, 230, 250, 199, 191, 247, 159, 255, 255, 128}, - {166, 109, 228, 252, 211, 215, 255, 174, 128, 128, 128}, - {39, 77, 162, 232, 172, 180, 245, 178, 255, 255, 128}, - }, - { - {1, 52, 220, 246, 198, 199, 249, 220, 255, 255, 128}, - {124, 74, 191, 243, 183, 193, 250, 221, 255, 255, 128}, - {24, 71, 130, 219, 154, 170, 243, 182, 255, 255, 128}, - }, - { - {1, 182, 225, 249, 219, 240, 255, 224, 128, 128, 128}, - {149, 150, 226, 252, 216, 205, 255, 171, 128, 128, 128}, - {28, 108, 170, 242, 183, 194, 254, 223, 255, 255, 128}, - }, - { - {1, 81, 230, 252, 204, 203, 255, 192, 128, 128, 128}, - {123, 102, 209, 247, 188, 196, 255, 233, 128, 128, 128}, - {20, 95, 153, 243, 164, 173, 255, 203, 128, 128, 128}, - }, - { - {1, 222, 248, 255, 216, 213, 128, 128, 128, 128, 128}, - {168, 175, 246, 252, 235, 205, 255, 255, 128, 128, 128}, - {47, 116, 215, 255, 211, 212, 255, 255, 128, 128, 128}, - }, - { - {1, 121, 236, 253, 212, 214, 255, 255, 128, 128, 128}, - {141, 84, 213, 252, 201, 202, 255, 219, 128, 128, 128}, - {42, 80, 160, 240, 162, 185, 255, 205, 128, 128, 128}, - }, - { - {1, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128}, - {244, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128}, - {238, 1, 255, 128, 128, 128, 128, 128, 128, 128, 128}, - }, - }, -} diff --git a/vendor/golang.org/x/image/vp8l/decode.go b/vendor/golang.org/x/image/vp8l/decode.go deleted file mode 100644 index 431948701..000000000 --- a/vendor/golang.org/x/image/vp8l/decode.go +++ /dev/null @@ -1,603 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package vp8l implements a decoder for the VP8L lossless image format. -// -// The VP8L specification is at: -// https://developers.google.com/speed/webp/docs/riff_container -package vp8l // import "golang.org/x/image/vp8l" - -import ( - "bufio" - "errors" - "image" - "image/color" - "io" -) - -var ( - errInvalidCodeLengths = errors.New("vp8l: invalid code lengths") - errInvalidHuffmanTree = errors.New("vp8l: invalid Huffman tree") -) - -// colorCacheMultiplier is the multiplier used for the color cache hash -// function, specified in section 4.2.3. -const colorCacheMultiplier = 0x1e35a7bd - -// distanceMapTable is the look-up table for distanceMap. -var distanceMapTable = [120]uint8{ - 0x18, 0x07, 0x17, 0x19, 0x28, 0x06, 0x27, 0x29, 0x16, 0x1a, - 0x26, 0x2a, 0x38, 0x05, 0x37, 0x39, 0x15, 0x1b, 0x36, 0x3a, - 0x25, 0x2b, 0x48, 0x04, 0x47, 0x49, 0x14, 0x1c, 0x35, 0x3b, - 0x46, 0x4a, 0x24, 0x2c, 0x58, 0x45, 0x4b, 0x34, 0x3c, 0x03, - 0x57, 0x59, 0x13, 0x1d, 0x56, 0x5a, 0x23, 0x2d, 0x44, 0x4c, - 0x55, 0x5b, 0x33, 0x3d, 0x68, 0x02, 0x67, 0x69, 0x12, 0x1e, - 0x66, 0x6a, 0x22, 0x2e, 0x54, 0x5c, 0x43, 0x4d, 0x65, 0x6b, - 0x32, 0x3e, 0x78, 0x01, 0x77, 0x79, 0x53, 0x5d, 0x11, 0x1f, - 0x64, 0x6c, 0x42, 0x4e, 0x76, 0x7a, 0x21, 0x2f, 0x75, 0x7b, - 0x31, 0x3f, 0x63, 0x6d, 0x52, 0x5e, 0x00, 0x74, 0x7c, 0x41, - 0x4f, 0x10, 0x20, 0x62, 0x6e, 0x30, 0x73, 0x7d, 0x51, 0x5f, - 0x40, 0x72, 0x7e, 0x61, 0x6f, 0x50, 0x71, 0x7f, 0x60, 0x70, -} - -// distanceMap maps a LZ77 backwards reference distance to a two-dimensional -// pixel offset, specified in section 4.2.2. -func distanceMap(w int32, code uint32) int32 { - if int32(code) > int32(len(distanceMapTable)) { - return int32(code) - int32(len(distanceMapTable)) - } - distCode := int32(distanceMapTable[code-1]) - yOffset := distCode >> 4 - xOffset := 8 - distCode&0xf - if d := yOffset*w + xOffset; d >= 1 { - return d - } - return 1 -} - -// decoder holds the bit-stream for a VP8L image. -type decoder struct { - r io.ByteReader - bits uint32 - nBits uint32 -} - -// read reads the next n bits from the decoder's bit-stream. -func (d *decoder) read(n uint32) (uint32, error) { - for d.nBits < n { - c, err := d.r.ReadByte() - if err != nil { - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - return 0, err - } - d.bits |= uint32(c) << d.nBits - d.nBits += 8 - } - u := d.bits & (1<>= n - d.nBits -= n - return u, nil -} - -// decodeTransform decodes the next transform and the width of the image after -// transformation (or equivalently, before inverse transformation), specified -// in section 3. -func (d *decoder) decodeTransform(w int32, h int32) (t transform, newWidth int32, err error) { - t.oldWidth = w - t.transformType, err = d.read(2) - if err != nil { - return transform{}, 0, err - } - switch t.transformType { - case transformTypePredictor, transformTypeCrossColor: - t.bits, err = d.read(3) - if err != nil { - return transform{}, 0, err - } - t.bits += 2 - t.pix, err = d.decodePix(nTiles(w, t.bits), nTiles(h, t.bits), 0, false) - if err != nil { - return transform{}, 0, err - } - case transformTypeSubtractGreen: - // No-op. - case transformTypeColorIndexing: - nColors, err := d.read(8) - if err != nil { - return transform{}, 0, err - } - nColors++ - t.bits = 0 - switch { - case nColors <= 2: - t.bits = 3 - case nColors <= 4: - t.bits = 2 - case nColors <= 16: - t.bits = 1 - } - w = nTiles(w, t.bits) - pix, err := d.decodePix(int32(nColors), 1, 4*256, false) - if err != nil { - return transform{}, 0, err - } - for p := 4; p < len(pix); p += 4 { - pix[p+0] += pix[p-4] - pix[p+1] += pix[p-3] - pix[p+2] += pix[p-2] - pix[p+3] += pix[p-1] - } - // The spec says that "if the index is equal or larger than color_table_size, - // the argb color value should be set to 0x00000000 (transparent black)." - // We re-slice up to 256 4-byte pixels. - t.pix = pix[:4*256] - } - return t, w, nil -} - -// repeatsCodeLength is the minimum code length for repeated codes. -const repeatsCodeLength = 16 - -// These magic numbers are specified at the end of section 5.2.2. -// The 3-length arrays apply to code lengths >= repeatsCodeLength. -var ( - codeLengthCodeOrder = [19]uint8{ - 17, 18, 0, 1, 2, 3, 4, 5, 16, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - } - repeatBits = [3]uint8{2, 3, 7} - repeatOffsets = [3]uint8{3, 3, 11} -) - -// decodeCodeLengths decodes a Huffman tree's code lengths which are themselves -// encoded via a Huffman tree, specified in section 5.2.2. -func (d *decoder) decodeCodeLengths(dst []uint32, codeLengthCodeLengths []uint32) error { - h := hTree{} - if err := h.build(codeLengthCodeLengths); err != nil { - return err - } - - maxSymbol := len(dst) - useLength, err := d.read(1) - if err != nil { - return err - } - if useLength != 0 { - n, err := d.read(3) - if err != nil { - return err - } - n = 2 + 2*n - ms, err := d.read(n) - if err != nil { - return err - } - maxSymbol = int(ms) + 2 - if maxSymbol > len(dst) { - return errInvalidCodeLengths - } - } - - // The spec says that "if code 16 [meaning repeat] is used before - // a non-zero value has been emitted, a value of 8 is repeated." - prevCodeLength := uint32(8) - - for symbol := 0; symbol < len(dst); { - if maxSymbol == 0 { - break - } - maxSymbol-- - codeLength, err := h.next(d) - if err != nil { - return err - } - if codeLength < repeatsCodeLength { - dst[symbol] = codeLength - symbol++ - if codeLength != 0 { - prevCodeLength = codeLength - } - continue - } - - repeat, err := d.read(uint32(repeatBits[codeLength-repeatsCodeLength])) - if err != nil { - return err - } - repeat += uint32(repeatOffsets[codeLength-repeatsCodeLength]) - if symbol+int(repeat) > len(dst) { - return errInvalidCodeLengths - } - // A code length of 16 repeats the previous non-zero code. - // A code length of 17 or 18 repeats zeroes. - cl := uint32(0) - if codeLength == 16 { - cl = prevCodeLength - } - for ; repeat > 0; repeat-- { - dst[symbol] = cl - symbol++ - } - } - return nil -} - -// decodeHuffmanTree decodes a Huffman tree into h. -func (d *decoder) decodeHuffmanTree(h *hTree, alphabetSize uint32) error { - useSimple, err := d.read(1) - if err != nil { - return err - } - if useSimple != 0 { - nSymbols, err := d.read(1) - if err != nil { - return err - } - nSymbols++ - firstSymbolLengthCode, err := d.read(1) - if err != nil { - return err - } - firstSymbolLengthCode = 7*firstSymbolLengthCode + 1 - var symbols [2]uint32 - symbols[0], err = d.read(firstSymbolLengthCode) - if err != nil { - return err - } - if nSymbols == 2 { - symbols[1], err = d.read(8) - if err != nil { - return err - } - } - return h.buildSimple(nSymbols, symbols, alphabetSize) - } - - nCodes, err := d.read(4) - if err != nil { - return err - } - nCodes += 4 - if int(nCodes) > len(codeLengthCodeOrder) { - return errInvalidHuffmanTree - } - codeLengthCodeLengths := [len(codeLengthCodeOrder)]uint32{} - for i := uint32(0); i < nCodes; i++ { - codeLengthCodeLengths[codeLengthCodeOrder[i]], err = d.read(3) - if err != nil { - return err - } - } - codeLengths := make([]uint32, alphabetSize) - if err = d.decodeCodeLengths(codeLengths, codeLengthCodeLengths[:]); err != nil { - return err - } - return h.build(codeLengths) -} - -const ( - huffGreen = 0 - huffRed = 1 - huffBlue = 2 - huffAlpha = 3 - huffDistance = 4 - nHuff = 5 -) - -// hGroup is an array of 5 Huffman trees. -type hGroup [nHuff]hTree - -// decodeHuffmanGroups decodes the one or more hGroups used to decode the pixel -// data. If one hGroup is used for the entire image, then hPix and hBits will -// be zero. If more than one hGroup is used, then hPix contains the meta-image -// that maps tiles to hGroup index, and hBits contains the log-2 tile size. -func (d *decoder) decodeHuffmanGroups(w int32, h int32, topLevel bool, ccBits uint32) ( - hGroups []hGroup, hPix []byte, hBits uint32, err error) { - - maxHGroupIndex := 0 - if topLevel { - useMeta, err := d.read(1) - if err != nil { - return nil, nil, 0, err - } - if useMeta != 0 { - hBits, err = d.read(3) - if err != nil { - return nil, nil, 0, err - } - hBits += 2 - hPix, err = d.decodePix(nTiles(w, hBits), nTiles(h, hBits), 0, false) - if err != nil { - return nil, nil, 0, err - } - for p := 0; p < len(hPix); p += 4 { - i := int(hPix[p])<<8 | int(hPix[p+1]) - if maxHGroupIndex < i { - maxHGroupIndex = i - } - } - } - } - hGroups = make([]hGroup, maxHGroupIndex+1) - for i := range hGroups { - for j, alphabetSize := range alphabetSizes { - if j == 0 && ccBits > 0 { - alphabetSize += 1 << ccBits - } - if err := d.decodeHuffmanTree(&hGroups[i][j], alphabetSize); err != nil { - return nil, nil, 0, err - } - } - } - return hGroups, hPix, hBits, nil -} - -const ( - nLiteralCodes = 256 - nLengthCodes = 24 - nDistanceCodes = 40 -) - -var alphabetSizes = [nHuff]uint32{ - nLiteralCodes + nLengthCodes, - nLiteralCodes, - nLiteralCodes, - nLiteralCodes, - nDistanceCodes, -} - -// decodePix decodes pixel data, specified in section 5.2.2. -func (d *decoder) decodePix(w int32, h int32, minCap int32, topLevel bool) ([]byte, error) { - // Decode the color cache parameters. - ccBits, ccShift, ccEntries := uint32(0), uint32(0), ([]uint32)(nil) - useColorCache, err := d.read(1) - if err != nil { - return nil, err - } - if useColorCache != 0 { - ccBits, err = d.read(4) - if err != nil { - return nil, err - } - if ccBits < 1 || 11 < ccBits { - return nil, errors.New("vp8l: invalid color cache parameters") - } - ccShift = 32 - ccBits - ccEntries = make([]uint32, 1<>hBits) + (x >> hBits)) - hg = &hGroups[uint32(hPix[i])<<8|uint32(hPix[i+1])] - } - - green, err := hg[huffGreen].next(d) - if err != nil { - return nil, err - } - switch { - case green < nLiteralCodes: - // We have a literal pixel. - red, err := hg[huffRed].next(d) - if err != nil { - return nil, err - } - blue, err := hg[huffBlue].next(d) - if err != nil { - return nil, err - } - alpha, err := hg[huffAlpha].next(d) - if err != nil { - return nil, err - } - pix[p+0] = uint8(red) - pix[p+1] = uint8(green) - pix[p+2] = uint8(blue) - pix[p+3] = uint8(alpha) - p += 4 - - x++ - if x == w { - x, y = 0, y+1 - } - lookupHG = hMask != 0 && x&hMask == 0 - - case green < nLiteralCodes+nLengthCodes: - // We have a LZ77 backwards reference. - length, err := d.lz77Param(green - nLiteralCodes) - if err != nil { - return nil, err - } - distSym, err := hg[huffDistance].next(d) - if err != nil { - return nil, err - } - distCode, err := d.lz77Param(distSym) - if err != nil { - return nil, err - } - dist := distanceMap(w, distCode) - pEnd := p + 4*int(length) - q := p - 4*int(dist) - qEnd := pEnd - 4*int(dist) - if p < 0 || len(pix) < pEnd || q < 0 || len(pix) < qEnd { - return nil, errors.New("vp8l: invalid LZ77 parameters") - } - for ; p < pEnd; p, q = p+1, q+1 { - pix[p] = pix[q] - } - - x += int32(length) - for x >= w { - x, y = x-w, y+1 - } - lookupHG = hMask != 0 - - default: - // We have a color cache lookup. First, insert previous pixels - // into the cache. Note that VP8L assumes ARGB order, but the - // Go image.RGBA type is in RGBA order. - for ; cachedP < p; cachedP += 4 { - argb := uint32(pix[cachedP+0])<<16 | - uint32(pix[cachedP+1])<<8 | - uint32(pix[cachedP+2])<<0 | - uint32(pix[cachedP+3])<<24 - ccEntries[(argb*colorCacheMultiplier)>>ccShift] = argb - } - green -= nLiteralCodes + nLengthCodes - if int(green) >= len(ccEntries) { - return nil, errors.New("vp8l: invalid color cache index") - } - argb := ccEntries[green] - pix[p+0] = uint8(argb >> 16) - pix[p+1] = uint8(argb >> 8) - pix[p+2] = uint8(argb >> 0) - pix[p+3] = uint8(argb >> 24) - p += 4 - - x++ - if x == w { - x, y = 0, y+1 - } - lookupHG = hMask != 0 && x&hMask == 0 - } - } - return pix, nil -} - -// lz77Param returns the next LZ77 parameter: a length or a distance, specified -// in section 4.2.2. -func (d *decoder) lz77Param(symbol uint32) (uint32, error) { - if symbol < 4 { - return symbol + 1, nil - } - extraBits := (symbol - 2) >> 1 - offset := (2 + symbol&1) << extraBits - n, err := d.read(extraBits) - if err != nil { - return 0, err - } - return offset + n + 1, nil -} - -// decodeHeader decodes the VP8L header from r. -func decodeHeader(r io.Reader) (d *decoder, w int32, h int32, err error) { - rr, ok := r.(io.ByteReader) - if !ok { - rr = bufio.NewReader(r) - } - d = &decoder{r: rr} - magic, err := d.read(8) - if err != nil { - return nil, 0, 0, err - } - if magic != 0x2f { - return nil, 0, 0, errors.New("vp8l: invalid header") - } - width, err := d.read(14) - if err != nil { - return nil, 0, 0, err - } - width++ - height, err := d.read(14) - if err != nil { - return nil, 0, 0, err - } - height++ - _, err = d.read(1) // Read and ignore the hasAlpha hint. - if err != nil { - return nil, 0, 0, err - } - version, err := d.read(3) - if err != nil { - return nil, 0, 0, err - } - if version != 0 { - return nil, 0, 0, errors.New("vp8l: invalid version") - } - return d, int32(width), int32(height), nil -} - -// DecodeConfig decodes the color model and dimensions of a VP8L image from r. -func DecodeConfig(r io.Reader) (image.Config, error) { - _, w, h, err := decodeHeader(r) - if err != nil { - return image.Config{}, err - } - return image.Config{ - ColorModel: color.NRGBAModel, - Width: int(w), - Height: int(h), - }, nil -} - -// Decode decodes a VP8L image from r. -func Decode(r io.Reader) (image.Image, error) { - d, w, h, err := decodeHeader(r) - if err != nil { - return nil, err - } - // Decode the transforms. - var ( - nTransforms int - transforms [nTransformTypes]transform - transformsSeen [nTransformTypes]bool - originalW = w - ) - for { - more, err := d.read(1) - if err != nil { - return nil, err - } - if more == 0 { - break - } - var t transform - t, w, err = d.decodeTransform(w, h) - if err != nil { - return nil, err - } - if transformsSeen[t.transformType] { - return nil, errors.New("vp8l: repeated transform") - } - transformsSeen[t.transformType] = true - transforms[nTransforms] = t - nTransforms++ - } - // Decode the transformed pixels. - pix, err := d.decodePix(w, h, 0, true) - if err != nil { - return nil, err - } - // Apply the inverse transformations. - for i := nTransforms - 1; i >= 0; i-- { - t := &transforms[i] - pix = inverseTransforms[t.transformType](t, pix, h) - } - return &image.NRGBA{ - Pix: pix, - Stride: 4 * int(originalW), - Rect: image.Rect(0, 0, int(originalW), int(h)), - }, nil -} diff --git a/vendor/golang.org/x/image/vp8l/huffman.go b/vendor/golang.org/x/image/vp8l/huffman.go deleted file mode 100644 index 36368a872..000000000 --- a/vendor/golang.org/x/image/vp8l/huffman.go +++ /dev/null @@ -1,245 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package vp8l - -import ( - "io" -) - -// reverseBits reverses the bits in a byte. -var reverseBits = [256]uint8{ - 0x00, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, 0x10, 0x90, 0x50, 0xd0, 0x30, 0xb0, 0x70, 0xf0, - 0x08, 0x88, 0x48, 0xc8, 0x28, 0xa8, 0x68, 0xe8, 0x18, 0x98, 0x58, 0xd8, 0x38, 0xb8, 0x78, 0xf8, - 0x04, 0x84, 0x44, 0xc4, 0x24, 0xa4, 0x64, 0xe4, 0x14, 0x94, 0x54, 0xd4, 0x34, 0xb4, 0x74, 0xf4, - 0x0c, 0x8c, 0x4c, 0xcc, 0x2c, 0xac, 0x6c, 0xec, 0x1c, 0x9c, 0x5c, 0xdc, 0x3c, 0xbc, 0x7c, 0xfc, - 0x02, 0x82, 0x42, 0xc2, 0x22, 0xa2, 0x62, 0xe2, 0x12, 0x92, 0x52, 0xd2, 0x32, 0xb2, 0x72, 0xf2, - 0x0a, 0x8a, 0x4a, 0xca, 0x2a, 0xaa, 0x6a, 0xea, 0x1a, 0x9a, 0x5a, 0xda, 0x3a, 0xba, 0x7a, 0xfa, - 0x06, 0x86, 0x46, 0xc6, 0x26, 0xa6, 0x66, 0xe6, 0x16, 0x96, 0x56, 0xd6, 0x36, 0xb6, 0x76, 0xf6, - 0x0e, 0x8e, 0x4e, 0xce, 0x2e, 0xae, 0x6e, 0xee, 0x1e, 0x9e, 0x5e, 0xde, 0x3e, 0xbe, 0x7e, 0xfe, - 0x01, 0x81, 0x41, 0xc1, 0x21, 0xa1, 0x61, 0xe1, 0x11, 0x91, 0x51, 0xd1, 0x31, 0xb1, 0x71, 0xf1, - 0x09, 0x89, 0x49, 0xc9, 0x29, 0xa9, 0x69, 0xe9, 0x19, 0x99, 0x59, 0xd9, 0x39, 0xb9, 0x79, 0xf9, - 0x05, 0x85, 0x45, 0xc5, 0x25, 0xa5, 0x65, 0xe5, 0x15, 0x95, 0x55, 0xd5, 0x35, 0xb5, 0x75, 0xf5, - 0x0d, 0x8d, 0x4d, 0xcd, 0x2d, 0xad, 0x6d, 0xed, 0x1d, 0x9d, 0x5d, 0xdd, 0x3d, 0xbd, 0x7d, 0xfd, - 0x03, 0x83, 0x43, 0xc3, 0x23, 0xa3, 0x63, 0xe3, 0x13, 0x93, 0x53, 0xd3, 0x33, 0xb3, 0x73, 0xf3, - 0x0b, 0x8b, 0x4b, 0xcb, 0x2b, 0xab, 0x6b, 0xeb, 0x1b, 0x9b, 0x5b, 0xdb, 0x3b, 0xbb, 0x7b, 0xfb, - 0x07, 0x87, 0x47, 0xc7, 0x27, 0xa7, 0x67, 0xe7, 0x17, 0x97, 0x57, 0xd7, 0x37, 0xb7, 0x77, 0xf7, - 0x0f, 0x8f, 0x4f, 0xcf, 0x2f, 0xaf, 0x6f, 0xef, 0x1f, 0x9f, 0x5f, 0xdf, 0x3f, 0xbf, 0x7f, 0xff, -} - -// hNode is a node in a Huffman tree. -type hNode struct { - // symbol is the symbol held by this node. - symbol uint32 - // children, if positive, is the hTree.nodes index of the first of - // this node's two children. Zero means an uninitialized node, - // and -1 means a leaf node. - children int32 -} - -const leafNode = -1 - -// lutSize is the log-2 size of an hTree's look-up table. -const lutSize, lutMask = 7, 1<<7 - 1 - -// hTree is a Huffman tree. -type hTree struct { - // nodes are the nodes of the Huffman tree. During construction, - // len(nodes) grows from 1 up to cap(nodes) by steps of two. - // After construction, len(nodes) == cap(nodes), and both equal - // 2*theNumberOfSymbols - 1. - nodes []hNode - // lut is a look-up table for walking the nodes. The x in lut[x] is - // the next lutSize bits in the bit-stream. The low 8 bits of lut[x] - // equals 1 plus the number of bits in the next code, or 0 if the - // next code requires more than lutSize bits. The high 24 bits are: - // - the symbol, if the code requires lutSize or fewer bits, or - // - the hTree.nodes index to start the tree traversal from, if - // the next code requires more than lutSize bits. - lut [1 << lutSize]uint32 -} - -// insert inserts into the hTree a symbol whose encoding is the least -// significant codeLength bits of code. -func (h *hTree) insert(symbol uint32, code uint32, codeLength uint32) error { - if symbol > 0xffff || codeLength > 0xfe { - return errInvalidHuffmanTree - } - baseCode := uint32(0) - if codeLength > lutSize { - baseCode = uint32(reverseBits[(code>>(codeLength-lutSize))&0xff]) >> (8 - lutSize) - } else { - baseCode = uint32(reverseBits[code&0xff]) >> (8 - codeLength) - for i := 0; i < 1<<(lutSize-codeLength); i++ { - h.lut[baseCode|uint32(i)< 0; { - codeLength-- - if int(n) > len(h.nodes) { - return errInvalidHuffmanTree - } - switch h.nodes[n].children { - case leafNode: - return errInvalidHuffmanTree - case 0: - if len(h.nodes) == cap(h.nodes) { - return errInvalidHuffmanTree - } - // Create two empty child nodes. - h.nodes[n].children = int32(len(h.nodes)) - h.nodes = h.nodes[:len(h.nodes)+2] - } - n = uint32(h.nodes[n].children) + 1&(code>>codeLength) - jump-- - if jump == 0 && h.lut[baseCode] == 0 { - h.lut[baseCode] = n << 8 - } - } - - switch h.nodes[n].children { - case leafNode: - // No-op. - case 0: - // Turn the uninitialized node into a leaf. - h.nodes[n].children = leafNode - default: - return errInvalidHuffmanTree - } - h.nodes[n].symbol = symbol - return nil -} - -// codeLengthsToCodes returns the canonical Huffman codes implied by the -// sequence of code lengths. -func codeLengthsToCodes(codeLengths []uint32) ([]uint32, error) { - maxCodeLength := uint32(0) - for _, cl := range codeLengths { - if maxCodeLength < cl { - maxCodeLength = cl - } - } - const maxAllowedCodeLength = 15 - if len(codeLengths) == 0 || maxCodeLength > maxAllowedCodeLength { - return nil, errInvalidHuffmanTree - } - histogram := [maxAllowedCodeLength + 1]uint32{} - for _, cl := range codeLengths { - histogram[cl]++ - } - currCode, nextCodes := uint32(0), [maxAllowedCodeLength + 1]uint32{} - for cl := 1; cl < len(nextCodes); cl++ { - currCode = (currCode + histogram[cl-1]) << 1 - nextCodes[cl] = currCode - } - codes := make([]uint32, len(codeLengths)) - for symbol, cl := range codeLengths { - if cl > 0 { - codes[symbol] = nextCodes[cl] - nextCodes[cl]++ - } - } - return codes, nil -} - -// build builds a canonical Huffman tree from the given code lengths. -func (h *hTree) build(codeLengths []uint32) error { - // Calculate the number of symbols. - var nSymbols, lastSymbol uint32 - for symbol, cl := range codeLengths { - if cl != 0 { - nSymbols++ - lastSymbol = uint32(symbol) - } - } - if nSymbols == 0 { - return errInvalidHuffmanTree - } - h.nodes = make([]hNode, 1, 2*nSymbols-1) - // Handle the trivial case. - if nSymbols == 1 { - if len(codeLengths) <= int(lastSymbol) { - return errInvalidHuffmanTree - } - return h.insert(lastSymbol, 0, 0) - } - // Handle the non-trivial case. - codes, err := codeLengthsToCodes(codeLengths) - if err != nil { - return err - } - for symbol, cl := range codeLengths { - if cl > 0 { - if err := h.insert(uint32(symbol), codes[symbol], cl); err != nil { - return err - } - } - } - return nil -} - -// buildSimple builds a Huffman tree with 1 or 2 symbols. -func (h *hTree) buildSimple(nSymbols uint32, symbols [2]uint32, alphabetSize uint32) error { - h.nodes = make([]hNode, 1, 2*nSymbols-1) - for i := uint32(0); i < nSymbols; i++ { - if symbols[i] >= alphabetSize { - return errInvalidHuffmanTree - } - if err := h.insert(symbols[i], i, nSymbols-1); err != nil { - return err - } - } - return nil -} - -// next returns the next Huffman-encoded symbol from the bit-stream d. -func (h *hTree) next(d *decoder) (uint32, error) { - var n uint32 - // Read enough bits so that we can use the look-up table. - if d.nBits < lutSize { - c, err := d.r.ReadByte() - if err != nil { - if err == io.EOF { - // There are no more bytes of data, but we may still be able - // to read the next symbol out of the previously read bits. - goto slowPath - } - return 0, err - } - d.bits |= uint32(c) << d.nBits - d.nBits += 8 - } - // Use the look-up table. - n = h.lut[d.bits&lutMask] - if b := n & 0xff; b != 0 { - b-- - d.bits >>= b - d.nBits -= b - return n >> 8, nil - } - n >>= 8 - d.bits >>= lutSize - d.nBits -= lutSize - -slowPath: - for h.nodes[n].children != leafNode { - if d.nBits == 0 { - c, err := d.r.ReadByte() - if err != nil { - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - return 0, err - } - d.bits = uint32(c) - d.nBits = 8 - } - n = uint32(h.nodes[n].children) + 1&d.bits - d.bits >>= 1 - d.nBits-- - } - return h.nodes[n].symbol, nil -} diff --git a/vendor/golang.org/x/image/vp8l/transform.go b/vendor/golang.org/x/image/vp8l/transform.go deleted file mode 100644 index 06543dacb..000000000 --- a/vendor/golang.org/x/image/vp8l/transform.go +++ /dev/null @@ -1,299 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package vp8l - -// This file deals with image transforms, specified in section 3. - -// nTiles returns the number of tiles needed to cover size pixels, where each -// tile's side is 1<> bits -} - -const ( - transformTypePredictor = 0 - transformTypeCrossColor = 1 - transformTypeSubtractGreen = 2 - transformTypeColorIndexing = 3 - nTransformTypes = 4 -) - -// transform holds the parameters for an invertible transform. -type transform struct { - // transformType is the type of the transform. - transformType uint32 - // oldWidth is the width of the image before transformation (or - // equivalently, after inverse transformation). The color-indexing - // transform can reduce the width. For example, a 50-pixel-wide - // image that only needs 4 bits (half a byte) per color index can - // be transformed into a 25-pixel-wide image. - oldWidth int32 - // bits is the log-2 size of the transform's tiles, for the predictor - // and cross-color transforms. 8>>bits is the number of bits per - // color index, for the color-index transform. - bits uint32 - // pix is the tile values, for the predictor and cross-color - // transforms, and the color palette, for the color-index transform. - pix []byte -} - -var inverseTransforms = [nTransformTypes]func(*transform, []byte, int32) []byte{ - transformTypePredictor: inversePredictor, - transformTypeCrossColor: inverseCrossColor, - transformTypeSubtractGreen: inverseSubtractGreen, - transformTypeColorIndexing: inverseColorIndexing, -} - -func inversePredictor(t *transform, pix []byte, h int32) []byte { - if t.oldWidth == 0 || h == 0 { - return pix - } - // The first pixel's predictor is mode 0 (opaque black). - pix[3] += 0xff - p, mask := int32(4), int32(1)<> t.bits) * tilesPerRow - predictorMode := t.pix[q+1] & 0x0f - q += 4 - for x := int32(1); x < t.oldWidth; x++ { - if x&mask == 0 { - predictorMode = t.pix[q+1] & 0x0f - q += 4 - } - switch predictorMode { - case 0: // Opaque black. - pix[p+3] += 0xff - - case 1: // L. - pix[p+0] += pix[p-4] - pix[p+1] += pix[p-3] - pix[p+2] += pix[p-2] - pix[p+3] += pix[p-1] - - case 2: // T. - pix[p+0] += pix[top+0] - pix[p+1] += pix[top+1] - pix[p+2] += pix[top+2] - pix[p+3] += pix[top+3] - - case 3: // TR. - pix[p+0] += pix[top+4] - pix[p+1] += pix[top+5] - pix[p+2] += pix[top+6] - pix[p+3] += pix[top+7] - - case 4: // TL. - pix[p+0] += pix[top-4] - pix[p+1] += pix[top-3] - pix[p+2] += pix[top-2] - pix[p+3] += pix[top-1] - - case 5: // Average2(Average2(L, TR), T). - pix[p+0] += avg2(avg2(pix[p-4], pix[top+4]), pix[top+0]) - pix[p+1] += avg2(avg2(pix[p-3], pix[top+5]), pix[top+1]) - pix[p+2] += avg2(avg2(pix[p-2], pix[top+6]), pix[top+2]) - pix[p+3] += avg2(avg2(pix[p-1], pix[top+7]), pix[top+3]) - - case 6: // Average2(L, TL). - pix[p+0] += avg2(pix[p-4], pix[top-4]) - pix[p+1] += avg2(pix[p-3], pix[top-3]) - pix[p+2] += avg2(pix[p-2], pix[top-2]) - pix[p+3] += avg2(pix[p-1], pix[top-1]) - - case 7: // Average2(L, T). - pix[p+0] += avg2(pix[p-4], pix[top+0]) - pix[p+1] += avg2(pix[p-3], pix[top+1]) - pix[p+2] += avg2(pix[p-2], pix[top+2]) - pix[p+3] += avg2(pix[p-1], pix[top+3]) - - case 8: // Average2(TL, T). - pix[p+0] += avg2(pix[top-4], pix[top+0]) - pix[p+1] += avg2(pix[top-3], pix[top+1]) - pix[p+2] += avg2(pix[top-2], pix[top+2]) - pix[p+3] += avg2(pix[top-1], pix[top+3]) - - case 9: // Average2(T, TR). - pix[p+0] += avg2(pix[top+0], pix[top+4]) - pix[p+1] += avg2(pix[top+1], pix[top+5]) - pix[p+2] += avg2(pix[top+2], pix[top+6]) - pix[p+3] += avg2(pix[top+3], pix[top+7]) - - case 10: // Average2(Average2(L, TL), Average2(T, TR)). - pix[p+0] += avg2(avg2(pix[p-4], pix[top-4]), avg2(pix[top+0], pix[top+4])) - pix[p+1] += avg2(avg2(pix[p-3], pix[top-3]), avg2(pix[top+1], pix[top+5])) - pix[p+2] += avg2(avg2(pix[p-2], pix[top-2]), avg2(pix[top+2], pix[top+6])) - pix[p+3] += avg2(avg2(pix[p-1], pix[top-1]), avg2(pix[top+3], pix[top+7])) - - case 11: // Select(L, T, TL). - l0 := int32(pix[p-4]) - l1 := int32(pix[p-3]) - l2 := int32(pix[p-2]) - l3 := int32(pix[p-1]) - c0 := int32(pix[top-4]) - c1 := int32(pix[top-3]) - c2 := int32(pix[top-2]) - c3 := int32(pix[top-1]) - t0 := int32(pix[top+0]) - t1 := int32(pix[top+1]) - t2 := int32(pix[top+2]) - t3 := int32(pix[top+3]) - l := abs(c0-t0) + abs(c1-t1) + abs(c2-t2) + abs(c3-t3) - t := abs(c0-l0) + abs(c1-l1) + abs(c2-l2) + abs(c3-l3) - if l < t { - pix[p+0] += uint8(l0) - pix[p+1] += uint8(l1) - pix[p+2] += uint8(l2) - pix[p+3] += uint8(l3) - } else { - pix[p+0] += uint8(t0) - pix[p+1] += uint8(t1) - pix[p+2] += uint8(t2) - pix[p+3] += uint8(t3) - } - - case 12: // ClampAddSubtractFull(L, T, TL). - pix[p+0] += clampAddSubtractFull(pix[p-4], pix[top+0], pix[top-4]) - pix[p+1] += clampAddSubtractFull(pix[p-3], pix[top+1], pix[top-3]) - pix[p+2] += clampAddSubtractFull(pix[p-2], pix[top+2], pix[top-2]) - pix[p+3] += clampAddSubtractFull(pix[p-1], pix[top+3], pix[top-1]) - - case 13: // ClampAddSubtractHalf(Average2(L, T), TL). - pix[p+0] += clampAddSubtractHalf(avg2(pix[p-4], pix[top+0]), pix[top-4]) - pix[p+1] += clampAddSubtractHalf(avg2(pix[p-3], pix[top+1]), pix[top-3]) - pix[p+2] += clampAddSubtractHalf(avg2(pix[p-2], pix[top+2]), pix[top-2]) - pix[p+3] += clampAddSubtractHalf(avg2(pix[p-1], pix[top+3]), pix[top-1]) - } - p, top = p+4, top+4 - } - } - return pix -} - -func inverseCrossColor(t *transform, pix []byte, h int32) []byte { - var greenToRed, greenToBlue, redToBlue int32 - p, mask, tilesPerRow := int32(0), int32(1)<> t.bits) * tilesPerRow - for x := int32(0); x < t.oldWidth; x++ { - if x&mask == 0 { - redToBlue = int32(int8(t.pix[q+0])) - greenToBlue = int32(int8(t.pix[q+1])) - greenToRed = int32(int8(t.pix[q+2])) - q += 4 - } - red := pix[p+0] - green := pix[p+1] - blue := pix[p+2] - red += uint8(uint32(greenToRed*int32(int8(green))) >> 5) - blue += uint8(uint32(greenToBlue*int32(int8(green))) >> 5) - blue += uint8(uint32(redToBlue*int32(int8(red))) >> 5) - pix[p+0] = red - pix[p+2] = blue - p += 4 - } - } - return pix -} - -func inverseSubtractGreen(t *transform, pix []byte, h int32) []byte { - for p := 0; p < len(pix); p += 4 { - green := pix[p+1] - pix[p+0] += green - pix[p+2] += green - } - return pix -} - -func inverseColorIndexing(t *transform, pix []byte, h int32) []byte { - if t.bits == 0 { - for p := 0; p < len(pix); p += 4 { - i := 4 * uint32(pix[p+1]) - pix[p+0] = t.pix[i+0] - pix[p+1] = t.pix[i+1] - pix[p+2] = t.pix[i+2] - pix[p+3] = t.pix[i+3] - } - return pix - } - - vMask, xMask, bitsPerPixel := uint32(0), int32(0), uint32(8>>t.bits) - switch t.bits { - case 1: - vMask, xMask = 0x0f, 0x01 - case 2: - vMask, xMask = 0x03, 0x03 - case 3: - vMask, xMask = 0x01, 0x07 - } - - d, p, v, dst := 0, 0, uint32(0), make([]byte, 4*t.oldWidth*h) - for y := int32(0); y < h; y++ { - for x := int32(0); x < t.oldWidth; x++ { - if x&xMask == 0 { - v = uint32(pix[p+1]) - p += 4 - } - - i := 4 * (v & vMask) - dst[d+0] = t.pix[i+0] - dst[d+1] = t.pix[i+1] - dst[d+2] = t.pix[i+2] - dst[d+3] = t.pix[i+3] - d += 4 - - v >>= bitsPerPixel - } - } - return dst -} - -func abs(x int32) int32 { - if x < 0 { - return -x - } - return x -} - -func avg2(a, b uint8) uint8 { - return uint8((int32(a) + int32(b)) / 2) -} - -func clampAddSubtractFull(a, b, c uint8) uint8 { - x := int32(a) + int32(b) - int32(c) - if x < 0 { - return 0 - } - if x > 255 { - return 255 - } - return uint8(x) -} - -func clampAddSubtractHalf(a, b uint8) uint8 { - x := int32(a) + (int32(a)-int32(b))/2 - if x < 0 { - return 0 - } - if x > 255 { - return 255 - } - return uint8(x) -} diff --git a/vendor/golang.org/x/image/webp/decode.go b/vendor/golang.org/x/image/webp/decode.go deleted file mode 100644 index e211c7d57..000000000 --- a/vendor/golang.org/x/image/webp/decode.go +++ /dev/null @@ -1,276 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package webp - -import ( - "bytes" - "errors" - "image" - "image/color" - "io" - - "golang.org/x/image/riff" - "golang.org/x/image/vp8" - "golang.org/x/image/vp8l" -) - -var errInvalidFormat = errors.New("webp: invalid format") - -var ( - fccALPH = riff.FourCC{'A', 'L', 'P', 'H'} - fccVP8 = riff.FourCC{'V', 'P', '8', ' '} - fccVP8L = riff.FourCC{'V', 'P', '8', 'L'} - fccVP8X = riff.FourCC{'V', 'P', '8', 'X'} - fccWEBP = riff.FourCC{'W', 'E', 'B', 'P'} -) - -func decode(r io.Reader, configOnly bool) (image.Image, image.Config, error) { - formType, riffReader, err := riff.NewReader(r) - if err != nil { - return nil, image.Config{}, err - } - if formType != fccWEBP { - return nil, image.Config{}, errInvalidFormat - } - - var ( - alpha []byte - alphaStride int - wantAlpha bool - seenVP8X bool - widthMinusOne uint32 - heightMinusOne uint32 - buf [10]byte - ) - for { - chunkID, chunkLen, chunkData, err := riffReader.Next() - if err == io.EOF { - err = errInvalidFormat - } - if err != nil { - return nil, image.Config{}, err - } - - switch chunkID { - case fccALPH: - if !wantAlpha { - return nil, image.Config{}, errInvalidFormat - } - wantAlpha = false - // Read the Pre-processing | Filter | Compression byte. - if _, err := io.ReadFull(chunkData, buf[:1]); err != nil { - if err == io.EOF { - err = errInvalidFormat - } - return nil, image.Config{}, err - } - alpha, alphaStride, err = readAlpha(chunkData, widthMinusOne, heightMinusOne, buf[0]&0x03) - if err != nil { - return nil, image.Config{}, err - } - unfilterAlpha(alpha, alphaStride, (buf[0]>>2)&0x03) - - case fccVP8: - if wantAlpha || int32(chunkLen) < 0 { - return nil, image.Config{}, errInvalidFormat - } - d := vp8.NewDecoder() - d.Init(chunkData, int(chunkLen)) - fh, err := d.DecodeFrameHeader() - if err != nil { - return nil, image.Config{}, err - } - if configOnly { - return nil, image.Config{ - ColorModel: color.YCbCrModel, - Width: fh.Width, - Height: fh.Height, - }, nil - } - m, err := d.DecodeFrame() - if err != nil { - return nil, image.Config{}, err - } - if alpha != nil { - return &image.NYCbCrA{ - YCbCr: *m, - A: alpha, - AStride: alphaStride, - }, image.Config{}, nil - } - return m, image.Config{}, nil - - case fccVP8L: - if wantAlpha || alpha != nil { - return nil, image.Config{}, errInvalidFormat - } - if configOnly { - c, err := vp8l.DecodeConfig(chunkData) - return nil, c, err - } - m, err := vp8l.Decode(chunkData) - return m, image.Config{}, err - - case fccVP8X: - if seenVP8X { - return nil, image.Config{}, errInvalidFormat - } - seenVP8X = true - if chunkLen != 10 { - return nil, image.Config{}, errInvalidFormat - } - if _, err := io.ReadFull(chunkData, buf[:10]); err != nil { - return nil, image.Config{}, err - } - const ( - animationBit = 1 << 1 - xmpMetadataBit = 1 << 2 - exifMetadataBit = 1 << 3 - alphaBit = 1 << 4 - iccProfileBit = 1 << 5 - ) - wantAlpha = (buf[0] & alphaBit) != 0 - widthMinusOne = uint32(buf[4]) | uint32(buf[5])<<8 | uint32(buf[6])<<16 - heightMinusOne = uint32(buf[7]) | uint32(buf[8])<<8 | uint32(buf[9])<<16 - if configOnly { - if wantAlpha { - return nil, image.Config{ - ColorModel: color.NYCbCrAModel, - Width: int(widthMinusOne) + 1, - Height: int(heightMinusOne) + 1, - }, nil - } - return nil, image.Config{ - ColorModel: color.YCbCrModel, - Width: int(widthMinusOne) + 1, - Height: int(heightMinusOne) + 1, - }, nil - } - } - } -} - -func readAlpha(chunkData io.Reader, widthMinusOne, heightMinusOne uint32, compression byte) ( - alpha []byte, alphaStride int, err error) { - - switch compression { - case 0: - w := int(widthMinusOne) + 1 - h := int(heightMinusOne) + 1 - alpha = make([]byte, w*h) - if _, err := io.ReadFull(chunkData, alpha); err != nil { - return nil, 0, err - } - return alpha, w, nil - - case 1: - // Read the VP8L-compressed alpha values. First, synthesize a 5-byte VP8L header: - // a 1-byte magic number, a 14-bit widthMinusOne, a 14-bit heightMinusOne, - // a 1-bit (ignored, zero) alphaIsUsed and a 3-bit (zero) version. - // TODO(nigeltao): be more efficient than decoding an *image.NRGBA just to - // extract the green values to a separately allocated []byte. Fixing this - // will require changes to the vp8l package's API. - if widthMinusOne > 0x3fff || heightMinusOne > 0x3fff { - return nil, 0, errors.New("webp: invalid format") - } - alphaImage, err := vp8l.Decode(io.MultiReader( - bytes.NewReader([]byte{ - 0x2f, // VP8L magic number. - uint8(widthMinusOne), - uint8(widthMinusOne>>8) | uint8(heightMinusOne<<6), - uint8(heightMinusOne >> 2), - uint8(heightMinusOne >> 10), - }), - chunkData, - )) - if err != nil { - return nil, 0, err - } - // The green values of the inner NRGBA image are the alpha values of the - // outer NYCbCrA image. - pix := alphaImage.(*image.NRGBA).Pix - alpha = make([]byte, len(pix)/4) - for i := range alpha { - alpha[i] = pix[4*i+1] - } - return alpha, int(widthMinusOne) + 1, nil - } - return nil, 0, errInvalidFormat -} - -func unfilterAlpha(alpha []byte, alphaStride int, filter byte) { - if len(alpha) == 0 || alphaStride == 0 { - return - } - switch filter { - case 1: // Horizontal filter. - for i := 1; i < alphaStride; i++ { - alpha[i] += alpha[i-1] - } - for i := alphaStride; i < len(alpha); i += alphaStride { - // The first column is equivalent to the vertical filter. - alpha[i] += alpha[i-alphaStride] - - for j := 1; j < alphaStride; j++ { - alpha[i+j] += alpha[i+j-1] - } - } - - case 2: // Vertical filter. - // The first row is equivalent to the horizontal filter. - for i := 1; i < alphaStride; i++ { - alpha[i] += alpha[i-1] - } - - for i := alphaStride; i < len(alpha); i++ { - alpha[i] += alpha[i-alphaStride] - } - - case 3: // Gradient filter. - // The first row is equivalent to the horizontal filter. - for i := 1; i < alphaStride; i++ { - alpha[i] += alpha[i-1] - } - - for i := alphaStride; i < len(alpha); i += alphaStride { - // The first column is equivalent to the vertical filter. - alpha[i] += alpha[i-alphaStride] - - // The interior is predicted on the three top/left pixels. - for j := 1; j < alphaStride; j++ { - c := int(alpha[i+j-alphaStride-1]) - b := int(alpha[i+j-alphaStride]) - a := int(alpha[i+j-1]) - x := a + b - c - if x < 0 { - x = 0 - } else if x > 255 { - x = 255 - } - alpha[i+j] += uint8(x) - } - } - } -} - -// Decode reads a WEBP image from r and returns it as an image.Image. -func Decode(r io.Reader) (image.Image, error) { - m, _, err := decode(r, false) - if err != nil { - return nil, err - } - return m, err -} - -// DecodeConfig returns the color model and dimensions of a WEBP image without -// decoding the entire image. -func DecodeConfig(r io.Reader) (image.Config, error) { - _, c, err := decode(r, true) - return c, err -} - -func init() { - image.RegisterFormat("webp", "RIFF????WEBPVP8", Decode, DecodeConfig) -} diff --git a/vendor/golang.org/x/image/webp/doc.go b/vendor/golang.org/x/image/webp/doc.go deleted file mode 100644 index e321c8542..000000000 --- a/vendor/golang.org/x/image/webp/doc.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package webp implements a decoder for WEBP images. -// -// WEBP is defined at: -// https://developers.google.com/speed/webp/docs/riff_container -package webp // import "golang.org/x/image/webp" diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go deleted file mode 100644 index cf66309c4..000000000 --- a/vendor/golang.org/x/net/context/context.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package context defines the Context type, which carries deadlines, -// cancelation signals, and other request-scoped values across API boundaries -// and between processes. -// As of Go 1.7 this package is available in the standard library under the -// name context. https://golang.org/pkg/context. -// -// Incoming requests to a server should create a Context, and outgoing calls to -// servers should accept a Context. The chain of function calls between must -// propagate the Context, optionally replacing it with a modified copy created -// using WithDeadline, WithTimeout, WithCancel, or WithValue. -// -// Programs that use Contexts should follow these rules to keep interfaces -// consistent across packages and enable static analysis tools to check context -// propagation: -// -// Do not store Contexts inside a struct type; instead, pass a Context -// explicitly to each function that needs it. The Context should be the first -// parameter, typically named ctx: -// -// func DoSomething(ctx context.Context, arg Arg) error { -// // ... use ctx ... -// } -// -// Do not pass a nil Context, even if a function permits it. Pass context.TODO -// if you are unsure about which Context to use. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -// -// The same Context may be passed to functions running in different goroutines; -// Contexts are safe for simultaneous use by multiple goroutines. -// -// See http://blog.golang.org/context for example code for a server that uses -// Contexts. -package context // import "golang.org/x/net/context" - -// Background returns a non-nil, empty Context. It is never canceled, has no -// values, and has no deadline. It is typically used by the main function, -// initialization, and tests, and as the top-level Context for incoming -// requests. -func Background() Context { - return background -} - -// TODO returns a non-nil, empty Context. Code should use context.TODO when -// it's unclear which Context to use or it is not yet available (because the -// surrounding function has not yet been extended to accept a Context -// parameter). TODO is recognized by static analysis tools that determine -// whether Contexts are propagated correctly in a program. -func TODO() Context { - return todo -} diff --git a/vendor/golang.org/x/net/context/go17.go b/vendor/golang.org/x/net/context/go17.go deleted file mode 100644 index 0c1b86793..000000000 --- a/vendor/golang.org/x/net/context/go17.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.7 - -package context - -import ( - "context" // standard library's context, as of Go 1.7 - "time" -) - -var ( - todo = context.TODO() - background = context.Background() -) - -// Canceled is the error returned by Context.Err when the context is canceled. -var Canceled = context.Canceled - -// DeadlineExceeded is the error returned by Context.Err when the context's -// deadline passes. -var DeadlineExceeded = context.DeadlineExceeded - -// WithCancel returns a copy of parent with a new Done channel. The returned -// context's Done channel is closed when the returned cancel function is called -// or when the parent context's Done channel is closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { - ctx, f := context.WithCancel(parent) - return ctx, f -} - -// WithDeadline returns a copy of the parent context with the deadline adjusted -// to be no later than d. If the parent's deadline is already earlier than d, -// WithDeadline(parent, d) is semantically equivalent to parent. The returned -// context's Done channel is closed when the deadline expires, when the returned -// cancel function is called, or when the parent context's Done channel is -// closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { - ctx, f := context.WithDeadline(parent, deadline) - return ctx, f -} - -// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete: -// -// func slowOperationWithTimeout(ctx context.Context) (Result, error) { -// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) -// defer cancel() // releases resources if slowOperation completes before timeout elapses -// return slowOperation(ctx) -// } -func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { - return WithDeadline(parent, time.Now().Add(timeout)) -} - -// WithValue returns a copy of parent in which the value associated with key is -// val. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -func WithValue(parent Context, key interface{}, val interface{}) Context { - return context.WithValue(parent, key, val) -} diff --git a/vendor/golang.org/x/net/context/go19.go b/vendor/golang.org/x/net/context/go19.go deleted file mode 100644 index e31e35a90..000000000 --- a/vendor/golang.org/x/net/context/go19.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.9 - -package context - -import "context" // standard library's context, as of Go 1.7 - -// A Context carries a deadline, a cancelation signal, and other values across -// API boundaries. -// -// Context's methods may be called by multiple goroutines simultaneously. -type Context = context.Context - -// A CancelFunc tells an operation to abandon its work. -// A CancelFunc does not wait for the work to stop. -// After the first call, subsequent calls to a CancelFunc do nothing. -type CancelFunc = context.CancelFunc diff --git a/vendor/golang.org/x/net/context/pre_go17.go b/vendor/golang.org/x/net/context/pre_go17.go deleted file mode 100644 index 065ff3dfa..000000000 --- a/vendor/golang.org/x/net/context/pre_go17.go +++ /dev/null @@ -1,300 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.7 - -package context - -import ( - "errors" - "fmt" - "sync" - "time" -) - -// An emptyCtx is never canceled, has no values, and has no deadline. It is not -// struct{}, since vars of this type must have distinct addresses. -type emptyCtx int - -func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { - return -} - -func (*emptyCtx) Done() <-chan struct{} { - return nil -} - -func (*emptyCtx) Err() error { - return nil -} - -func (*emptyCtx) Value(key interface{}) interface{} { - return nil -} - -func (e *emptyCtx) String() string { - switch e { - case background: - return "context.Background" - case todo: - return "context.TODO" - } - return "unknown empty Context" -} - -var ( - background = new(emptyCtx) - todo = new(emptyCtx) -) - -// Canceled is the error returned by Context.Err when the context is canceled. -var Canceled = errors.New("context canceled") - -// DeadlineExceeded is the error returned by Context.Err when the context's -// deadline passes. -var DeadlineExceeded = errors.New("context deadline exceeded") - -// WithCancel returns a copy of parent with a new Done channel. The returned -// context's Done channel is closed when the returned cancel function is called -// or when the parent context's Done channel is closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { - c := newCancelCtx(parent) - propagateCancel(parent, c) - return c, func() { c.cancel(true, Canceled) } -} - -// newCancelCtx returns an initialized cancelCtx. -func newCancelCtx(parent Context) *cancelCtx { - return &cancelCtx{ - Context: parent, - done: make(chan struct{}), - } -} - -// propagateCancel arranges for child to be canceled when parent is. -func propagateCancel(parent Context, child canceler) { - if parent.Done() == nil { - return // parent is never canceled - } - if p, ok := parentCancelCtx(parent); ok { - p.mu.Lock() - if p.err != nil { - // parent has already been canceled - child.cancel(false, p.err) - } else { - if p.children == nil { - p.children = make(map[canceler]bool) - } - p.children[child] = true - } - p.mu.Unlock() - } else { - go func() { - select { - case <-parent.Done(): - child.cancel(false, parent.Err()) - case <-child.Done(): - } - }() - } -} - -// parentCancelCtx follows a chain of parent references until it finds a -// *cancelCtx. This function understands how each of the concrete types in this -// package represents its parent. -func parentCancelCtx(parent Context) (*cancelCtx, bool) { - for { - switch c := parent.(type) { - case *cancelCtx: - return c, true - case *timerCtx: - return c.cancelCtx, true - case *valueCtx: - parent = c.Context - default: - return nil, false - } - } -} - -// removeChild removes a context from its parent. -func removeChild(parent Context, child canceler) { - p, ok := parentCancelCtx(parent) - if !ok { - return - } - p.mu.Lock() - if p.children != nil { - delete(p.children, child) - } - p.mu.Unlock() -} - -// A canceler is a context type that can be canceled directly. The -// implementations are *cancelCtx and *timerCtx. -type canceler interface { - cancel(removeFromParent bool, err error) - Done() <-chan struct{} -} - -// A cancelCtx can be canceled. When canceled, it also cancels any children -// that implement canceler. -type cancelCtx struct { - Context - - done chan struct{} // closed by the first cancel call. - - mu sync.Mutex - children map[canceler]bool // set to nil by the first cancel call - err error // set to non-nil by the first cancel call -} - -func (c *cancelCtx) Done() <-chan struct{} { - return c.done -} - -func (c *cancelCtx) Err() error { - c.mu.Lock() - defer c.mu.Unlock() - return c.err -} - -func (c *cancelCtx) String() string { - return fmt.Sprintf("%v.WithCancel", c.Context) -} - -// cancel closes c.done, cancels each of c's children, and, if -// removeFromParent is true, removes c from its parent's children. -func (c *cancelCtx) cancel(removeFromParent bool, err error) { - if err == nil { - panic("context: internal error: missing cancel error") - } - c.mu.Lock() - if c.err != nil { - c.mu.Unlock() - return // already canceled - } - c.err = err - close(c.done) - for child := range c.children { - // NOTE: acquiring the child's lock while holding parent's lock. - child.cancel(false, err) - } - c.children = nil - c.mu.Unlock() - - if removeFromParent { - removeChild(c.Context, c) - } -} - -// WithDeadline returns a copy of the parent context with the deadline adjusted -// to be no later than d. If the parent's deadline is already earlier than d, -// WithDeadline(parent, d) is semantically equivalent to parent. The returned -// context's Done channel is closed when the deadline expires, when the returned -// cancel function is called, or when the parent context's Done channel is -// closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { - if cur, ok := parent.Deadline(); ok && cur.Before(deadline) { - // The current deadline is already sooner than the new one. - return WithCancel(parent) - } - c := &timerCtx{ - cancelCtx: newCancelCtx(parent), - deadline: deadline, - } - propagateCancel(parent, c) - d := deadline.Sub(time.Now()) - if d <= 0 { - c.cancel(true, DeadlineExceeded) // deadline has already passed - return c, func() { c.cancel(true, Canceled) } - } - c.mu.Lock() - defer c.mu.Unlock() - if c.err == nil { - c.timer = time.AfterFunc(d, func() { - c.cancel(true, DeadlineExceeded) - }) - } - return c, func() { c.cancel(true, Canceled) } -} - -// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to -// implement Done and Err. It implements cancel by stopping its timer then -// delegating to cancelCtx.cancel. -type timerCtx struct { - *cancelCtx - timer *time.Timer // Under cancelCtx.mu. - - deadline time.Time -} - -func (c *timerCtx) Deadline() (deadline time.Time, ok bool) { - return c.deadline, true -} - -func (c *timerCtx) String() string { - return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now())) -} - -func (c *timerCtx) cancel(removeFromParent bool, err error) { - c.cancelCtx.cancel(false, err) - if removeFromParent { - // Remove this timerCtx from its parent cancelCtx's children. - removeChild(c.cancelCtx.Context, c) - } - c.mu.Lock() - if c.timer != nil { - c.timer.Stop() - c.timer = nil - } - c.mu.Unlock() -} - -// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete: -// -// func slowOperationWithTimeout(ctx context.Context) (Result, error) { -// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) -// defer cancel() // releases resources if slowOperation completes before timeout elapses -// return slowOperation(ctx) -// } -func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { - return WithDeadline(parent, time.Now().Add(timeout)) -} - -// WithValue returns a copy of parent in which the value associated with key is -// val. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -func WithValue(parent Context, key interface{}, val interface{}) Context { - return &valueCtx{parent, key, val} -} - -// A valueCtx carries a key-value pair. It implements Value for that key and -// delegates all other calls to the embedded Context. -type valueCtx struct { - Context - key, val interface{} -} - -func (c *valueCtx) String() string { - return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val) -} - -func (c *valueCtx) Value(key interface{}) interface{} { - if c.key == key { - return c.val - } - return c.Context.Value(key) -} diff --git a/vendor/golang.org/x/net/context/pre_go19.go b/vendor/golang.org/x/net/context/pre_go19.go deleted file mode 100644 index ec5a63803..000000000 --- a/vendor/golang.org/x/net/context/pre_go19.go +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.9 - -package context - -import "time" - -// A Context carries a deadline, a cancelation signal, and other values across -// API boundaries. -// -// Context's methods may be called by multiple goroutines simultaneously. -type Context interface { - // Deadline returns the time when work done on behalf of this context - // should be canceled. Deadline returns ok==false when no deadline is - // set. Successive calls to Deadline return the same results. - Deadline() (deadline time.Time, ok bool) - - // Done returns a channel that's closed when work done on behalf of this - // context should be canceled. Done may return nil if this context can - // never be canceled. Successive calls to Done return the same value. - // - // WithCancel arranges for Done to be closed when cancel is called; - // WithDeadline arranges for Done to be closed when the deadline - // expires; WithTimeout arranges for Done to be closed when the timeout - // elapses. - // - // Done is provided for use in select statements: - // - // // Stream generates values with DoSomething and sends them to out - // // until DoSomething returns an error or ctx.Done is closed. - // func Stream(ctx context.Context, out chan<- Value) error { - // for { - // v, err := DoSomething(ctx) - // if err != nil { - // return err - // } - // select { - // case <-ctx.Done(): - // return ctx.Err() - // case out <- v: - // } - // } - // } - // - // See http://blog.golang.org/pipelines for more examples of how to use - // a Done channel for cancelation. - Done() <-chan struct{} - - // Err returns a non-nil error value after Done is closed. Err returns - // Canceled if the context was canceled or DeadlineExceeded if the - // context's deadline passed. No other values for Err are defined. - // After Done is closed, successive calls to Err return the same value. - Err() error - - // Value returns the value associated with this context for key, or nil - // if no value is associated with key. Successive calls to Value with - // the same key returns the same result. - // - // Use context values only for request-scoped data that transits - // processes and API boundaries, not for passing optional parameters to - // functions. - // - // A key identifies a specific value in a Context. Functions that wish - // to store values in Context typically allocate a key in a global - // variable then use that key as the argument to context.WithValue and - // Context.Value. A key can be any type that supports equality; - // packages should define keys as an unexported type to avoid - // collisions. - // - // Packages that define a Context key should provide type-safe accessors - // for the values stores using that key: - // - // // Package user defines a User type that's stored in Contexts. - // package user - // - // import "golang.org/x/net/context" - // - // // User is the type of value stored in the Contexts. - // type User struct {...} - // - // // key is an unexported type for keys defined in this package. - // // This prevents collisions with keys defined in other packages. - // type key int - // - // // userKey is the key for user.User values in Contexts. It is - // // unexported; clients use user.NewContext and user.FromContext - // // instead of using this key directly. - // var userKey key = 0 - // - // // NewContext returns a new Context that carries value u. - // func NewContext(ctx context.Context, u *User) context.Context { - // return context.WithValue(ctx, userKey, u) - // } - // - // // FromContext returns the User value stored in ctx, if any. - // func FromContext(ctx context.Context) (*User, bool) { - // u, ok := ctx.Value(userKey).(*User) - // return u, ok - // } - Value(key interface{}) interface{} -} - -// A CancelFunc tells an operation to abandon its work. -// A CancelFunc does not wait for the work to stop. -// After the first call, subsequent calls to a CancelFunc do nothing. -type CancelFunc func() diff --git a/vendor/modules.txt b/vendor/modules.txt index 7ad1940b4..d09321953 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -30,8 +30,17 @@ codeberg.org/gruf/go-fastcopy # codeberg.org/gruf/go-fastpath/v2 v2.0.0 ## explicit; go 1.14 codeberg.org/gruf/go-fastpath/v2 -# codeberg.org/gruf/go-iotools v0.0.0-20230811115124-5d4223615a7f -## explicit; go 1.19 +# codeberg.org/gruf/go-ffmpreg v0.2.2 +## explicit; go 1.22.0 +codeberg.org/gruf/go-ffmpreg/embed/ffmpeg +codeberg.org/gruf/go-ffmpreg/embed/ffprobe +codeberg.org/gruf/go-ffmpreg/ffmpeg +codeberg.org/gruf/go-ffmpreg/ffprobe +codeberg.org/gruf/go-ffmpreg/internal +codeberg.org/gruf/go-ffmpreg/util +codeberg.org/gruf/go-ffmpreg/wasm +# codeberg.org/gruf/go-iotools v0.0.0-20240710125620-934ae9c654cf +## explicit; go 1.21 codeberg.org/gruf/go-iotools # codeberg.org/gruf/go-kv v1.6.4 ## explicit; go 1.19 @@ -52,6 +61,9 @@ codeberg.org/gruf/go-maps # codeberg.org/gruf/go-mempool v0.0.0-20240507125005-cef10d64a760 ## explicit; go 1.22.2 codeberg.org/gruf/go-mempool +# codeberg.org/gruf/go-mimetypes v1.2.0 +## explicit; go 1.17 +codeberg.org/gruf/go-mimetypes # codeberg.org/gruf/go-mutexes v1.5.1 ## explicit; go 1.22.2 codeberg.org/gruf/go-mutexes @@ -61,7 +73,7 @@ codeberg.org/gruf/go-runners # codeberg.org/gruf/go-sched v1.2.3 ## explicit; go 1.19 codeberg.org/gruf/go-sched -# codeberg.org/gruf/go-storage v0.1.1 +# codeberg.org/gruf/go-storage v0.1.2 ## explicit; go 1.22 codeberg.org/gruf/go-storage codeberg.org/gruf/go-storage/disk @@ -71,9 +83,6 @@ codeberg.org/gruf/go-storage/s3 # codeberg.org/gruf/go-structr v0.8.7 ## explicit; go 1.21 codeberg.org/gruf/go-structr -# codeberg.org/superseriousbusiness/exif-terminator v0.7.0 -## explicit; go 1.21 -codeberg.org/superseriousbusiness/exif-terminator # github.com/DmitriyVTitov/size v1.5.0 ## explicit; go 1.14 github.com/DmitriyVTitov/size @@ -89,11 +98,6 @@ github.com/Masterminds/semver/v3 # github.com/Masterminds/sprig/v3 v3.2.3 ## explicit; go 1.13 github.com/Masterminds/sprig/v3 -# github.com/abema/go-mp4 v1.2.0 -## explicit; go 1.14 -github.com/abema/go-mp4 -github.com/abema/go-mp4/internal/bitio -github.com/abema/go-mp4/internal/util # github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 ## explicit; go 1.13 github.com/asaskevich/govalidator @@ -179,24 +183,6 @@ github.com/disintegration/imaging # github.com/docker/go-units v0.5.0 ## explicit github.com/docker/go-units -# github.com/dsoprea/go-exif/v3 v3.0.0-20210625224831-a6301f85c82b -## explicit; go 1.12 -github.com/dsoprea/go-exif/v3 -github.com/dsoprea/go-exif/v3/common -github.com/dsoprea/go-exif/v3/undefined -# github.com/dsoprea/go-iptc v0.0.0-20200610044640-bc9ca208b413 -## explicit; go 1.13 -github.com/dsoprea/go-iptc -# github.com/dsoprea/go-logging v0.0.0-20200710184922-b02d349568dd -## explicit; go 1.13 -github.com/dsoprea/go-logging -# github.com/dsoprea/go-photoshop-info-format v0.0.0-20200610045659-121dd752914d -## explicit; go 1.13 -github.com/dsoprea/go-photoshop-info-format -# github.com/dsoprea/go-utility/v2 v2.0.0-20200717064901-2fccff4aa15e -## explicit; go 1.12 -github.com/dsoprea/go-utility/v2/filesystem -github.com/dsoprea/go-utility/v2/image # github.com/dustin/go-humanize v1.0.1 ## explicit; go 1.16 github.com/dustin/go-humanize @@ -232,9 +218,6 @@ github.com/gin-gonic/gin/binding github.com/gin-gonic/gin/internal/bytesconv github.com/gin-gonic/gin/internal/json github.com/gin-gonic/gin/render -# github.com/go-errors/errors v1.4.1 -## explicit; go 1.14 -github.com/go-errors/errors # github.com/go-fed/httpsig v1.1.0 ## explicit; go 1.13 github.com/go-fed/httpsig @@ -322,9 +305,6 @@ github.com/go-swagger/go-swagger/cmd/swagger/commands/generate github.com/go-swagger/go-swagger/cmd/swagger/commands/initcmd github.com/go-swagger/go-swagger/codescan github.com/go-swagger/go-swagger/generator -# github.com/go-xmlfmt/xmlfmt v0.0.0-20211206191508-7fd73a941850 -## explicit -github.com/go-xmlfmt/xmlfmt # github.com/goccy/go-json v0.10.3 ## explicit; go 1.19 github.com/goccy/go-json @@ -342,13 +322,6 @@ github.com/godbus/dbus/v5 # github.com/golang-jwt/jwt v3.2.2+incompatible ## explicit github.com/golang-jwt/jwt -# github.com/golang/geo v0.0.0-20210211234256-740aa86cb551 -## explicit; go 1.12 -github.com/golang/geo/r1 -github.com/golang/geo/r2 -github.com/golang/geo/r3 -github.com/golang/geo/s1 -github.com/golang/geo/s2 # github.com/google/go-cmp v0.6.0 ## explicit; go 1.13 github.com/google/go-cmp/cmp @@ -385,12 +358,6 @@ github.com/gorilla/websocket github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule github.com/grpc-ecosystem/grpc-gateway/v2/runtime github.com/grpc-ecosystem/grpc-gateway/v2/utilities -# github.com/h2non/filetype v1.1.3 -## explicit; go 1.13 -github.com/h2non/filetype -github.com/h2non/filetype/matchers -github.com/h2non/filetype/matchers/isobmff -github.com/h2non/filetype/types # github.com/hashicorp/golang-lru/v2 v2.0.7 ## explicit; go 1.18 github.com/hashicorp/golang-lru/v2 @@ -815,12 +782,6 @@ github.com/superseriousbusiness/activity/streams/values/rfc2045 github.com/superseriousbusiness/activity/streams/values/rfc5988 github.com/superseriousbusiness/activity/streams/values/string github.com/superseriousbusiness/activity/streams/vocab -# github.com/superseriousbusiness/go-jpeg-image-structure/v2 v2.0.0-20220321154430-d89a106fdabe -## explicit; go 1.17 -github.com/superseriousbusiness/go-jpeg-image-structure/v2 -# github.com/superseriousbusiness/go-png-image-structure/v2 v2.0.1-SSB -## explicit; go 1.12 -github.com/superseriousbusiness/go-png-image-structure/v2 # github.com/superseriousbusiness/httpsig v1.2.0-SSB ## explicit; go 1.21 github.com/superseriousbusiness/httpsig @@ -851,6 +812,7 @@ github.com/tetratelabs/wazero github.com/tetratelabs/wazero/api github.com/tetratelabs/wazero/experimental github.com/tetratelabs/wazero/experimental/sys +github.com/tetratelabs/wazero/imports/wasi_snapshot_preview1 github.com/tetratelabs/wazero/internal/descriptor github.com/tetratelabs/wazero/internal/engine/interpreter github.com/tetratelabs/wazero/internal/engine/wazevo @@ -875,6 +837,7 @@ github.com/tetratelabs/wazero/internal/sysfs github.com/tetratelabs/wazero/internal/u32 github.com/tetratelabs/wazero/internal/u64 github.com/tetratelabs/wazero/internal/version +github.com/tetratelabs/wazero/internal/wasip1 github.com/tetratelabs/wazero/internal/wasm github.com/tetratelabs/wazero/internal/wasm/binary github.com/tetratelabs/wazero/internal/wasmdebug @@ -1086,12 +1049,8 @@ golang.org/x/exp/slog/internal/buffer ## explicit; go 1.18 golang.org/x/image/bmp golang.org/x/image/ccitt -golang.org/x/image/riff golang.org/x/image/tiff golang.org/x/image/tiff/lzw -golang.org/x/image/vp8 -golang.org/x/image/vp8l -golang.org/x/image/webp # golang.org/x/mod v0.18.0 ## explicit; go 1.18 golang.org/x/mod/internal/lazyregexp @@ -1100,7 +1059,6 @@ golang.org/x/mod/semver # golang.org/x/net v0.27.0 ## explicit; go 1.18 golang.org/x/net/bpf -golang.org/x/net/context golang.org/x/net/html golang.org/x/net/html/atom golang.org/x/net/http/httpguts