[chore] add back exif-terminator and use only for jpeg,png,webp (#3161)

* add back exif-terminator and use only for jpeg,png,webp

* fix arguments passed to terminateExif()

* pull in latest exif-terminator

* fix test

* update processed img

---------

Co-authored-by: tobi <tobi.smethurst@protonmail.com>
This commit is contained in:
kim
2024-08-02 11:46:41 +00:00
committed by GitHub
parent 7b5917d6ae
commit 94e87610c4
191 changed files with 38772 additions and 58 deletions

0
vendor/github.com/dsoprea/go-exif/v3/.MODULE_ROOT generated vendored Normal file
View File

9
vendor/github.com/dsoprea/go-exif/v3/LICENSE generated vendored Normal file
View File

@ -0,0 +1,9 @@
MIT LICENSE
Copyright 2019 Dustin Oprea
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

651
vendor/github.com/dsoprea/go-exif/v3/common/ifd.go generated vendored Normal file
View File

@ -0,0 +1,651 @@
package exifcommon
import (
"errors"
"fmt"
"strings"
"github.com/dsoprea/go-logging"
)
var (
ifdLogger = log.NewLogger("exifcommon.ifd")
)
var (
ErrChildIfdNotMapped = errors.New("no child-IFD for that tag-ID under parent")
)
// MappedIfd is one node in the IFD-mapping.
type MappedIfd struct {
ParentTagId uint16
Placement []uint16
Path []string
Name string
TagId uint16
Children map[uint16]*MappedIfd
}
// String returns a descriptive string.
func (mi *MappedIfd) String() string {
pathPhrase := mi.PathPhrase()
return fmt.Sprintf("MappedIfd<(0x%04X) [%s] PATH=[%s]>", mi.TagId, mi.Name, pathPhrase)
}
// PathPhrase returns a non-fully-qualified IFD path.
func (mi *MappedIfd) PathPhrase() string {
return strings.Join(mi.Path, "/")
}
// TODO(dustin): Refactor this to use IfdIdentity structs.
// IfdMapping describes all of the IFDs that we currently recognize.
type IfdMapping struct {
rootNode *MappedIfd
}
// NewIfdMapping returns a new IfdMapping struct.
func NewIfdMapping() (ifdMapping *IfdMapping) {
rootNode := &MappedIfd{
Path: make([]string, 0),
Children: make(map[uint16]*MappedIfd),
}
return &IfdMapping{
rootNode: rootNode,
}
}
// NewIfdMappingWithStandard retruns a new IfdMapping struct preloaded with the
// standard IFDs.
func NewIfdMappingWithStandard() (ifdMapping *IfdMapping, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
im := NewIfdMapping()
err = LoadStandardIfds(im)
log.PanicIf(err)
return im, nil
}
// Get returns the node given the path slice.
func (im *IfdMapping) Get(parentPlacement []uint16) (childIfd *MappedIfd, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
ptr := im.rootNode
for _, tagId := range parentPlacement {
if descendantPtr, found := ptr.Children[tagId]; found == false {
log.Panicf("ifd child with tag-ID (%04x) not registered: [%s]", tagId, ptr.PathPhrase())
} else {
ptr = descendantPtr
}
}
return ptr, nil
}
// GetWithPath returns the node given the path string.
func (im *IfdMapping) GetWithPath(pathPhrase string) (mi *MappedIfd, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
if pathPhrase == "" {
log.Panicf("path-phrase is empty")
}
path := strings.Split(pathPhrase, "/")
ptr := im.rootNode
for _, name := range path {
var hit *MappedIfd
for _, mi := range ptr.Children {
if mi.Name == name {
hit = mi
break
}
}
if hit == nil {
log.Panicf("ifd child with name [%s] not registered: [%s]", name, ptr.PathPhrase())
}
ptr = hit
}
return ptr, nil
}
// GetChild is a convenience function to get the child path for a given parent
// placement and child tag-ID.
func (im *IfdMapping) GetChild(parentPathPhrase string, tagId uint16) (mi *MappedIfd, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
mi, err = im.GetWithPath(parentPathPhrase)
log.PanicIf(err)
for _, childMi := range mi.Children {
if childMi.TagId == tagId {
return childMi, nil
}
}
// Whether or not an IFD is defined in data, such an IFD is not registered
// and would be unknown.
log.Panic(ErrChildIfdNotMapped)
return nil, nil
}
// IfdTagIdAndIndex represents a specific part of the IFD path.
//
// This is a legacy type.
type IfdTagIdAndIndex struct {
Name string
TagId uint16
Index int
}
// String returns a descriptive string.
func (itii IfdTagIdAndIndex) String() string {
return fmt.Sprintf("IfdTagIdAndIndex<NAME=[%s] ID=(%04x) INDEX=(%d)>", itii.Name, itii.TagId, itii.Index)
}
// ResolvePath takes a list of names, which can also be suffixed with indices
// (to identify the second, third, etc.. sibling IFD) and returns a list of
// tag-IDs and those indices.
//
// Example:
//
// - IFD/Exif/Iop
// - IFD0/Exif/Iop
//
// This is the only call that supports adding the numeric indices.
func (im *IfdMapping) ResolvePath(pathPhrase string) (lineage []IfdTagIdAndIndex, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
pathPhrase = strings.TrimSpace(pathPhrase)
if pathPhrase == "" {
log.Panicf("can not resolve empty path-phrase")
}
path := strings.Split(pathPhrase, "/")
lineage = make([]IfdTagIdAndIndex, len(path))
ptr := im.rootNode
empty := IfdTagIdAndIndex{}
for i, name := range path {
indexByte := name[len(name)-1]
index := 0
if indexByte >= '0' && indexByte <= '9' {
index = int(indexByte - '0')
name = name[:len(name)-1]
}
itii := IfdTagIdAndIndex{}
for _, mi := range ptr.Children {
if mi.Name != name {
continue
}
itii.Name = name
itii.TagId = mi.TagId
itii.Index = index
ptr = mi
break
}
if itii == empty {
log.Panicf("ifd child with name [%s] not registered: [%s]", name, pathPhrase)
}
lineage[i] = itii
}
return lineage, nil
}
// FqPathPhraseFromLineage returns the fully-qualified IFD path from the slice.
func (im *IfdMapping) FqPathPhraseFromLineage(lineage []IfdTagIdAndIndex) (fqPathPhrase string) {
fqPathParts := make([]string, len(lineage))
for i, itii := range lineage {
if itii.Index > 0 {
fqPathParts[i] = fmt.Sprintf("%s%d", itii.Name, itii.Index)
} else {
fqPathParts[i] = itii.Name
}
}
return strings.Join(fqPathParts, "/")
}
// PathPhraseFromLineage returns the non-fully-qualified IFD path from the
// slice.
func (im *IfdMapping) PathPhraseFromLineage(lineage []IfdTagIdAndIndex) (pathPhrase string) {
pathParts := make([]string, len(lineage))
for i, itii := range lineage {
pathParts[i] = itii.Name
}
return strings.Join(pathParts, "/")
}
// StripPathPhraseIndices returns a non-fully-qualified path-phrase (no
// indices).
func (im *IfdMapping) StripPathPhraseIndices(pathPhrase string) (strippedPathPhrase string, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
lineage, err := im.ResolvePath(pathPhrase)
log.PanicIf(err)
strippedPathPhrase = im.PathPhraseFromLineage(lineage)
return strippedPathPhrase, nil
}
// Add puts the given IFD at the given position of the tree. The position of the
// tree is referred to as the placement and is represented by a set of tag-IDs,
// where the leftmost is the root tag and the tags going to the right are
// progressive descendants.
func (im *IfdMapping) Add(parentPlacement []uint16, tagId uint16, name string) (err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
// TODO(dustin): !! It would be nicer to provide a list of names in the placement rather than tag-IDs.
ptr, err := im.Get(parentPlacement)
log.PanicIf(err)
path := make([]string, len(parentPlacement)+1)
if len(parentPlacement) > 0 {
copy(path, ptr.Path)
}
path[len(path)-1] = name
placement := make([]uint16, len(parentPlacement)+1)
if len(placement) > 0 {
copy(placement, ptr.Placement)
}
placement[len(placement)-1] = tagId
childIfd := &MappedIfd{
ParentTagId: ptr.TagId,
Path: path,
Placement: placement,
Name: name,
TagId: tagId,
Children: make(map[uint16]*MappedIfd),
}
if _, found := ptr.Children[tagId]; found == true {
log.Panicf("child IFD with tag-ID (%04x) already registered under IFD [%s] with tag-ID (%04x)", tagId, ptr.Name, ptr.TagId)
}
ptr.Children[tagId] = childIfd
return nil
}
func (im *IfdMapping) dumpLineages(stack []*MappedIfd, input []string) (output []string, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
currentIfd := stack[len(stack)-1]
output = input
for _, childIfd := range currentIfd.Children {
stackCopy := make([]*MappedIfd, len(stack)+1)
copy(stackCopy, stack)
stackCopy[len(stack)] = childIfd
// Add to output, but don't include the obligatory root node.
parts := make([]string, len(stackCopy)-1)
for i, mi := range stackCopy[1:] {
parts[i] = mi.Name
}
output = append(output, strings.Join(parts, "/"))
output, err = im.dumpLineages(stackCopy, output)
log.PanicIf(err)
}
return output, nil
}
// DumpLineages returns a slice of strings representing all mappings.
func (im *IfdMapping) DumpLineages() (output []string, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
stack := []*MappedIfd{im.rootNode}
output = make([]string, 0)
output, err = im.dumpLineages(stack, output)
log.PanicIf(err)
return output, nil
}
// LoadStandardIfds loads the standard IFDs into the mapping.
func LoadStandardIfds(im *IfdMapping) (err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
err = im.Add(
[]uint16{},
IfdStandardIfdIdentity.TagId(), IfdStandardIfdIdentity.Name())
log.PanicIf(err)
err = im.Add(
[]uint16{IfdStandardIfdIdentity.TagId()},
IfdExifStandardIfdIdentity.TagId(), IfdExifStandardIfdIdentity.Name())
log.PanicIf(err)
err = im.Add(
[]uint16{IfdStandardIfdIdentity.TagId(), IfdExifStandardIfdIdentity.TagId()},
IfdExifIopStandardIfdIdentity.TagId(), IfdExifIopStandardIfdIdentity.Name())
log.PanicIf(err)
err = im.Add(
[]uint16{IfdStandardIfdIdentity.TagId()},
IfdGpsInfoStandardIfdIdentity.TagId(), IfdGpsInfoStandardIfdIdentity.Name())
log.PanicIf(err)
return nil
}
// IfdTag describes a single IFD tag and its parent (if any).
type IfdTag struct {
parentIfdTag *IfdTag
tagId uint16
name string
}
func NewIfdTag(parentIfdTag *IfdTag, tagId uint16, name string) IfdTag {
return IfdTag{
parentIfdTag: parentIfdTag,
tagId: tagId,
name: name,
}
}
// ParentIfd returns the IfdTag of this IFD's parent.
func (it IfdTag) ParentIfd() *IfdTag {
return it.parentIfdTag
}
// TagId returns the tag-ID of this IFD.
func (it IfdTag) TagId() uint16 {
return it.tagId
}
// Name returns the simple name of this IFD.
func (it IfdTag) Name() string {
return it.name
}
// String returns a descriptive string.
func (it IfdTag) String() string {
parentIfdPhrase := ""
if it.parentIfdTag != nil {
parentIfdPhrase = fmt.Sprintf(" PARENT=(0x%04x)[%s]", it.parentIfdTag.tagId, it.parentIfdTag.name)
}
return fmt.Sprintf("IfdTag<TAG-ID=(0x%04x) NAME=[%s]%s>", it.tagId, it.name, parentIfdPhrase)
}
var (
// rootStandardIfd is the standard root IFD.
rootStandardIfd = NewIfdTag(nil, 0x0000, "IFD") // IFD
// exifStandardIfd is the standard "Exif" IFD.
exifStandardIfd = NewIfdTag(&rootStandardIfd, 0x8769, "Exif") // IFD/Exif
// iopStandardIfd is the standard "Iop" IFD.
iopStandardIfd = NewIfdTag(&exifStandardIfd, 0xA005, "Iop") // IFD/Exif/Iop
// gpsInfoStandardIfd is the standard "GPS" IFD.
gpsInfoStandardIfd = NewIfdTag(&rootStandardIfd, 0x8825, "GPSInfo") // IFD/GPSInfo
)
// IfdIdentityPart represents one component in an IFD path.
type IfdIdentityPart struct {
Name string
Index int
}
// String returns a fully-qualified IFD path.
func (iip IfdIdentityPart) String() string {
if iip.Index > 0 {
return fmt.Sprintf("%s%d", iip.Name, iip.Index)
} else {
return iip.Name
}
}
// UnindexedString returned a non-fully-qualified IFD path.
func (iip IfdIdentityPart) UnindexedString() string {
return iip.Name
}
// IfdIdentity represents a single IFD path and provides access to various
// information and representations.
//
// Only global instances can be used for equality checks.
type IfdIdentity struct {
ifdTag IfdTag
parts []IfdIdentityPart
ifdPath string
fqIfdPath string
}
// NewIfdIdentity returns a new IfdIdentity struct.
func NewIfdIdentity(ifdTag IfdTag, parts ...IfdIdentityPart) (ii *IfdIdentity) {
ii = &IfdIdentity{
ifdTag: ifdTag,
parts: parts,
}
ii.ifdPath = ii.getIfdPath()
ii.fqIfdPath = ii.getFqIfdPath()
return ii
}
// NewIfdIdentityFromString parses a string like "IFD/Exif" or "IFD1" or
// something more exotic with custom IFDs ("SomeIFD4/SomeChildIFD6"). Note that
// this will valid the unindexed IFD structure (because the standard tags from
// the specification are unindexed), but not, obviously, any indices (e.g.
// the numbers in "IFD0", "IFD1", "SomeIFD4/SomeChildIFD6"). It is
// required for the caller to check whether these specific instances
// were actually parsed out of the stream.
func NewIfdIdentityFromString(im *IfdMapping, fqIfdPath string) (ii *IfdIdentity, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
lineage, err := im.ResolvePath(fqIfdPath)
log.PanicIf(err)
var lastIt *IfdTag
identityParts := make([]IfdIdentityPart, len(lineage))
for i, itii := range lineage {
// Build out the tag that will eventually point to the IFD represented
// by the right-most part in the IFD path.
it := &IfdTag{
parentIfdTag: lastIt,
tagId: itii.TagId,
name: itii.Name,
}
lastIt = it
// Create the next IfdIdentity part.
iip := IfdIdentityPart{
Name: itii.Name,
Index: itii.Index,
}
identityParts[i] = iip
}
ii = NewIfdIdentity(*lastIt, identityParts...)
return ii, nil
}
func (ii *IfdIdentity) getFqIfdPath() string {
partPhrases := make([]string, len(ii.parts))
for i, iip := range ii.parts {
partPhrases[i] = iip.String()
}
return strings.Join(partPhrases, "/")
}
func (ii *IfdIdentity) getIfdPath() string {
partPhrases := make([]string, len(ii.parts))
for i, iip := range ii.parts {
partPhrases[i] = iip.UnindexedString()
}
return strings.Join(partPhrases, "/")
}
// String returns a fully-qualified IFD path.
func (ii *IfdIdentity) String() string {
return ii.fqIfdPath
}
// UnindexedString returns a non-fully-qualified IFD path.
func (ii *IfdIdentity) UnindexedString() string {
return ii.ifdPath
}
// IfdTag returns the tag struct behind this IFD.
func (ii *IfdIdentity) IfdTag() IfdTag {
return ii.ifdTag
}
// TagId returns the tag-ID of the IFD.
func (ii *IfdIdentity) TagId() uint16 {
return ii.ifdTag.TagId()
}
// LeafPathPart returns the last right-most path-part, which represents the
// current IFD.
func (ii *IfdIdentity) LeafPathPart() IfdIdentityPart {
return ii.parts[len(ii.parts)-1]
}
// Name returns the simple name of this IFD.
func (ii *IfdIdentity) Name() string {
return ii.LeafPathPart().Name
}
// Index returns the index of this IFD (more then one IFD under a parent IFD
// will be numbered [0..n]).
func (ii *IfdIdentity) Index() int {
return ii.LeafPathPart().Index
}
// Equals returns true if the two IfdIdentity instances are effectively
// identical.
//
// Since there's no way to get a specific fully-qualified IFD path without a
// certain slice of parts and all other fields are also derived from this,
// checking that the fully-qualified IFD path is equals is sufficient.
func (ii *IfdIdentity) Equals(ii2 *IfdIdentity) bool {
return ii.String() == ii2.String()
}
// NewChild creates an IfdIdentity for an IFD that is a child of the current
// IFD.
func (ii *IfdIdentity) NewChild(childIfdTag IfdTag, index int) (iiChild *IfdIdentity) {
if *childIfdTag.parentIfdTag != ii.ifdTag {
log.Panicf("can not add child; we are not the parent:\nUS=%v\nCHILD=%v", ii.ifdTag, childIfdTag)
}
childPart := IfdIdentityPart{childIfdTag.name, index}
childParts := append(ii.parts, childPart)
iiChild = NewIfdIdentity(childIfdTag, childParts...)
return iiChild
}
// NewSibling creates an IfdIdentity for an IFD that is a sibling to the current
// one.
func (ii *IfdIdentity) NewSibling(index int) (iiSibling *IfdIdentity) {
parts := make([]IfdIdentityPart, len(ii.parts))
copy(parts, ii.parts)
parts[len(parts)-1].Index = index
iiSibling = NewIfdIdentity(ii.ifdTag, parts...)
return iiSibling
}
var (
// IfdStandardIfdIdentity represents the IFD path for IFD0.
IfdStandardIfdIdentity = NewIfdIdentity(rootStandardIfd, IfdIdentityPart{"IFD", 0})
// IfdExifStandardIfdIdentity represents the IFD path for IFD0/Exif0.
IfdExifStandardIfdIdentity = IfdStandardIfdIdentity.NewChild(exifStandardIfd, 0)
// IfdExifIopStandardIfdIdentity represents the IFD path for IFD0/Exif0/Iop0.
IfdExifIopStandardIfdIdentity = IfdExifStandardIfdIdentity.NewChild(iopStandardIfd, 0)
// IfdGPSInfoStandardIfdIdentity represents the IFD path for IFD0/GPSInfo0.
IfdGpsInfoStandardIfdIdentity = IfdStandardIfdIdentity.NewChild(gpsInfoStandardIfd, 0)
// Ifd1StandardIfdIdentity represents the IFD path for IFD1.
Ifd1StandardIfdIdentity = NewIfdIdentity(rootStandardIfd, IfdIdentityPart{"IFD", 1})
)

280
vendor/github.com/dsoprea/go-exif/v3/common/parser.go generated vendored Normal file
View File

@ -0,0 +1,280 @@
package exifcommon
import (
"bytes"
"errors"
"math"
"encoding/binary"
"github.com/dsoprea/go-logging"
)
var (
parserLogger = log.NewLogger("exifcommon.parser")
)
var (
ErrParseFail = errors.New("parse failure")
)
// Parser knows how to parse all well-defined, encoded EXIF types.
type Parser struct {
}
// ParseBytesknows how to parse a byte-type value.
func (p *Parser) ParseBytes(data []byte, unitCount uint32) (value []uint8, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
// TODO(dustin): Add test
count := int(unitCount)
if len(data) < (TypeByte.Size() * count) {
log.Panic(ErrNotEnoughData)
}
value = []uint8(data[:count])
return value, nil
}
// ParseAscii returns a string and auto-strips the trailing NUL character that
// should be at the end of the encoding.
func (p *Parser) ParseAscii(data []byte, unitCount uint32) (value string, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
// TODO(dustin): Add test
count := int(unitCount)
if len(data) < (TypeAscii.Size() * count) {
log.Panic(ErrNotEnoughData)
}
if len(data) == 0 || data[count-1] != 0 {
s := string(data[:count])
parserLogger.Warningf(nil, "ASCII not terminated with NUL as expected: [%v]", s)
for i, c := range s {
if c > 127 {
// Binary
t := s[:i]
parserLogger.Warningf(nil, "ASCII also had binary characters. Truncating: [%v]->[%s]", s, t)
return t, nil
}
}
return s, nil
}
// Auto-strip the NUL from the end. It serves no purpose outside of
// encoding semantics.
return string(data[:count-1]), nil
}
// ParseAsciiNoNul returns a string without any consideration for a trailing NUL
// character.
func (p *Parser) ParseAsciiNoNul(data []byte, unitCount uint32) (value string, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
// TODO(dustin): Add test
count := int(unitCount)
if len(data) < (TypeAscii.Size() * count) {
log.Panic(ErrNotEnoughData)
}
return string(data[:count]), nil
}
// ParseShorts knows how to parse an encoded list of shorts.
func (p *Parser) ParseShorts(data []byte, unitCount uint32, byteOrder binary.ByteOrder) (value []uint16, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
// TODO(dustin): Add test
count := int(unitCount)
if len(data) < (TypeShort.Size() * count) {
log.Panic(ErrNotEnoughData)
}
value = make([]uint16, count)
for i := 0; i < count; i++ {
value[i] = byteOrder.Uint16(data[i*2:])
}
return value, nil
}
// ParseLongs knows how to encode an encoded list of unsigned longs.
func (p *Parser) ParseLongs(data []byte, unitCount uint32, byteOrder binary.ByteOrder) (value []uint32, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
// TODO(dustin): Add test
count := int(unitCount)
if len(data) < (TypeLong.Size() * count) {
log.Panic(ErrNotEnoughData)
}
value = make([]uint32, count)
for i := 0; i < count; i++ {
value[i] = byteOrder.Uint32(data[i*4:])
}
return value, nil
}
// ParseFloats knows how to encode an encoded list of floats.
func (p *Parser) ParseFloats(data []byte, unitCount uint32, byteOrder binary.ByteOrder) (value []float32, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
count := int(unitCount)
if len(data) != (TypeFloat.Size() * count) {
log.Panic(ErrNotEnoughData)
}
value = make([]float32, count)
for i := 0; i < count; i++ {
value[i] = math.Float32frombits(byteOrder.Uint32(data[i*4 : (i+1)*4]))
}
return value, nil
}
// ParseDoubles knows how to encode an encoded list of doubles.
func (p *Parser) ParseDoubles(data []byte, unitCount uint32, byteOrder binary.ByteOrder) (value []float64, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
count := int(unitCount)
if len(data) != (TypeDouble.Size() * count) {
log.Panic(ErrNotEnoughData)
}
value = make([]float64, count)
for i := 0; i < count; i++ {
value[i] = math.Float64frombits(byteOrder.Uint64(data[i*8 : (i+1)*8]))
}
return value, nil
}
// ParseRationals knows how to parse an encoded list of unsigned rationals.
func (p *Parser) ParseRationals(data []byte, unitCount uint32, byteOrder binary.ByteOrder) (value []Rational, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
// TODO(dustin): Add test
count := int(unitCount)
if len(data) < (TypeRational.Size() * count) {
log.Panic(ErrNotEnoughData)
}
value = make([]Rational, count)
for i := 0; i < count; i++ {
value[i].Numerator = byteOrder.Uint32(data[i*8:])
value[i].Denominator = byteOrder.Uint32(data[i*8+4:])
}
return value, nil
}
// ParseSignedLongs knows how to parse an encoded list of signed longs.
func (p *Parser) ParseSignedLongs(data []byte, unitCount uint32, byteOrder binary.ByteOrder) (value []int32, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
// TODO(dustin): Add test
count := int(unitCount)
if len(data) < (TypeSignedLong.Size() * count) {
log.Panic(ErrNotEnoughData)
}
b := bytes.NewBuffer(data)
value = make([]int32, count)
for i := 0; i < count; i++ {
err := binary.Read(b, byteOrder, &value[i])
log.PanicIf(err)
}
return value, nil
}
// ParseSignedRationals knows how to parse an encoded list of signed
// rationals.
func (p *Parser) ParseSignedRationals(data []byte, unitCount uint32, byteOrder binary.ByteOrder) (value []SignedRational, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
// TODO(dustin): Add test
count := int(unitCount)
if len(data) < (TypeSignedRational.Size() * count) {
log.Panic(ErrNotEnoughData)
}
b := bytes.NewBuffer(data)
value = make([]SignedRational, count)
for i := 0; i < count; i++ {
err = binary.Read(b, byteOrder, &value[i].Numerator)
log.PanicIf(err)
err = binary.Read(b, byteOrder, &value[i].Denominator)
log.PanicIf(err)
}
return value, nil
}

View File

@ -0,0 +1,88 @@
package exifcommon
import (
"os"
"path"
"encoding/binary"
"io/ioutil"
"github.com/dsoprea/go-logging"
)
var (
moduleRootPath = ""
testExifData []byte = nil
// EncodeDefaultByteOrder is the default byte-order for encoding operations.
EncodeDefaultByteOrder = binary.BigEndian
// Default byte order for tests.
TestDefaultByteOrder = binary.BigEndian
)
func GetModuleRootPath() string {
if moduleRootPath == "" {
moduleRootPath = os.Getenv("EXIF_MODULE_ROOT_PATH")
if moduleRootPath != "" {
return moduleRootPath
}
currentWd, err := os.Getwd()
log.PanicIf(err)
currentPath := currentWd
visited := make([]string, 0)
for {
tryStampFilepath := path.Join(currentPath, ".MODULE_ROOT")
_, err := os.Stat(tryStampFilepath)
if err != nil && os.IsNotExist(err) != true {
log.Panic(err)
} else if err == nil {
break
}
visited = append(visited, tryStampFilepath)
currentPath = path.Dir(currentPath)
if currentPath == "/" {
log.Panicf("could not find module-root: %v", visited)
}
}
moduleRootPath = currentPath
}
return moduleRootPath
}
func GetTestAssetsPath() string {
moduleRootPath := GetModuleRootPath()
assetsPath := path.Join(moduleRootPath, "assets")
return assetsPath
}
func getTestImageFilepath() string {
assetsPath := GetTestAssetsPath()
testImageFilepath := path.Join(assetsPath, "NDM_8901.jpg")
return testImageFilepath
}
func getTestExifData() []byte {
if testExifData == nil {
assetsPath := GetTestAssetsPath()
filepath := path.Join(assetsPath, "NDM_8901.jpg.exif")
var err error
testExifData, err = ioutil.ReadFile(filepath)
log.PanicIf(err)
}
return testExifData
}

482
vendor/github.com/dsoprea/go-exif/v3/common/type.go generated vendored Normal file
View File

@ -0,0 +1,482 @@
package exifcommon
import (
"errors"
"fmt"
"reflect"
"strconv"
"strings"
"unicode"
"encoding/binary"
"github.com/dsoprea/go-logging"
)
var (
typeLogger = log.NewLogger("exif.type")
)
var (
// ErrNotEnoughData is used when there isn't enough data to accommodate what
// we're trying to parse (sizeof(type) * unit_count).
ErrNotEnoughData = errors.New("not enough data for type")
// ErrWrongType is used when we try to parse anything other than the
// current type.
ErrWrongType = errors.New("wrong type, can not parse")
// ErrUnhandledUndefinedTypedTag is used when we try to parse a tag that's
// recorded as an "unknown" type but not a documented tag (therefore
// leaving us not knowning how to read it).
ErrUnhandledUndefinedTypedTag = errors.New("not a standard unknown-typed tag")
)
// TagTypePrimitive is a type-alias that let's us easily lookup type properties.
type TagTypePrimitive uint16
const (
// TypeByte describes an encoded list of bytes.
TypeByte TagTypePrimitive = 1
// TypeAscii describes an encoded list of characters that is terminated
// with a NUL in its encoded form.
TypeAscii TagTypePrimitive = 2
// TypeShort describes an encoded list of shorts.
TypeShort TagTypePrimitive = 3
// TypeLong describes an encoded list of longs.
TypeLong TagTypePrimitive = 4
// TypeRational describes an encoded list of rationals.
TypeRational TagTypePrimitive = 5
// TypeUndefined describes an encoded value that has a complex/non-clearcut
// interpretation.
TypeUndefined TagTypePrimitive = 7
// We've seen type-8, but have no documentation on it.
// TypeSignedLong describes an encoded list of signed longs.
TypeSignedLong TagTypePrimitive = 9
// TypeSignedRational describes an encoded list of signed rationals.
TypeSignedRational TagTypePrimitive = 10
// TypeFloat describes an encoded list of floats
TypeFloat TagTypePrimitive = 11
// TypeDouble describes an encoded list of doubles.
TypeDouble TagTypePrimitive = 12
// TypeAsciiNoNul is just a pseudo-type, for our own purposes.
TypeAsciiNoNul TagTypePrimitive = 0xf0
)
// String returns the name of the type
func (typeType TagTypePrimitive) String() string {
return TypeNames[typeType]
}
// Size returns the size of one atomic unit of the type.
func (tagType TagTypePrimitive) Size() int {
switch tagType {
case TypeByte, TypeAscii, TypeAsciiNoNul:
return 1
case TypeShort:
return 2
case TypeLong, TypeSignedLong, TypeFloat:
return 4
case TypeRational, TypeSignedRational, TypeDouble:
return 8
default:
log.Panicf("can not determine tag-value size for type (%d): [%s]",
tagType,
TypeNames[tagType])
// Never called.
return 0
}
}
// IsValid returns true if tagType is a valid type.
func (tagType TagTypePrimitive) IsValid() bool {
// TODO(dustin): Add test
return tagType == TypeByte ||
tagType == TypeAscii ||
tagType == TypeAsciiNoNul ||
tagType == TypeShort ||
tagType == TypeLong ||
tagType == TypeRational ||
tagType == TypeSignedLong ||
tagType == TypeSignedRational ||
tagType == TypeFloat ||
tagType == TypeDouble ||
tagType == TypeUndefined
}
var (
// TODO(dustin): Rename TypeNames() to typeNames() and add getter.
TypeNames = map[TagTypePrimitive]string{
TypeByte: "BYTE",
TypeAscii: "ASCII",
TypeShort: "SHORT",
TypeLong: "LONG",
TypeRational: "RATIONAL",
TypeUndefined: "UNDEFINED",
TypeSignedLong: "SLONG",
TypeSignedRational: "SRATIONAL",
TypeFloat: "FLOAT",
TypeDouble: "DOUBLE",
TypeAsciiNoNul: "_ASCII_NO_NUL",
}
typeNamesR = map[string]TagTypePrimitive{}
)
// Rational describes an unsigned rational value.
type Rational struct {
// Numerator is the numerator of the rational value.
Numerator uint32
// Denominator is the numerator of the rational value.
Denominator uint32
}
// SignedRational describes a signed rational value.
type SignedRational struct {
// Numerator is the numerator of the rational value.
Numerator int32
// Denominator is the numerator of the rational value.
Denominator int32
}
func isPrintableText(s string) bool {
for _, c := range s {
// unicode.IsPrint() returns false for newline characters.
if c == 0x0d || c == 0x0a {
continue
} else if unicode.IsPrint(rune(c)) == false {
return false
}
}
return true
}
// Format returns a stringified value for the given encoding. Automatically
// parses. Automatically calculates count based on type size. This function
// also supports undefined-type values (the ones that we support, anyway) by
// way of the String() method that they all require. We can't be more specific
// because we're a base package and we can't refer to it.
func FormatFromType(value interface{}, justFirst bool) (phrase string, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
// TODO(dustin): !! Add test
switch t := value.(type) {
case []byte:
return DumpBytesToString(t), nil
case string:
for i, c := range t {
if c == 0 {
t = t[:i]
break
}
}
if isPrintableText(t) == false {
phrase = fmt.Sprintf("string with binary data (%d bytes)", len(t))
return phrase, nil
}
return t, nil
case []uint16, []uint32, []int32, []float64, []float32:
val := reflect.ValueOf(t)
if val.Len() == 0 {
return "", nil
}
if justFirst == true {
var valueSuffix string
if val.Len() > 1 {
valueSuffix = "..."
}
return fmt.Sprintf("%v%s", val.Index(0), valueSuffix), nil
}
return fmt.Sprintf("%v", val), nil
case []Rational:
if len(t) == 0 {
return "", nil
}
parts := make([]string, len(t))
for i, r := range t {
parts[i] = fmt.Sprintf("%d/%d", r.Numerator, r.Denominator)
if justFirst == true {
break
}
}
if justFirst == true {
var valueSuffix string
if len(t) > 1 {
valueSuffix = "..."
}
return fmt.Sprintf("%v%s", parts[0], valueSuffix), nil
}
return fmt.Sprintf("%v", parts), nil
case []SignedRational:
if len(t) == 0 {
return "", nil
}
parts := make([]string, len(t))
for i, r := range t {
parts[i] = fmt.Sprintf("%d/%d", r.Numerator, r.Denominator)
if justFirst == true {
break
}
}
if justFirst == true {
var valueSuffix string
if len(t) > 1 {
valueSuffix = "..."
}
return fmt.Sprintf("%v%s", parts[0], valueSuffix), nil
}
return fmt.Sprintf("%v", parts), nil
case fmt.Stringer:
s := t.String()
if isPrintableText(s) == false {
phrase = fmt.Sprintf("stringable with binary data (%d bytes)", len(s))
return phrase, nil
}
// An undefined value that is documented (or that we otherwise support).
return s, nil
default:
// Affects only "unknown" values, in general.
log.Panicf("type can not be formatted into string: %v", reflect.TypeOf(value).Name())
// Never called.
return "", nil
}
}
// Format returns a stringified value for the given encoding. Automatically
// parses. Automatically calculates count based on type size.
func FormatFromBytes(rawBytes []byte, tagType TagTypePrimitive, justFirst bool, byteOrder binary.ByteOrder) (phrase string, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
// TODO(dustin): !! Add test
typeSize := tagType.Size()
if len(rawBytes)%typeSize != 0 {
log.Panicf("byte-count (%d) does not align for [%s] type with a size of (%d) bytes", len(rawBytes), TypeNames[tagType], typeSize)
}
// unitCount is the calculated unit-count. This should equal the original
// value from the tag (pre-resolution).
unitCount := uint32(len(rawBytes) / typeSize)
// Truncate the items if it's not bytes or a string and we just want the first.
var value interface{}
switch tagType {
case TypeByte:
var err error
value, err = parser.ParseBytes(rawBytes, unitCount)
log.PanicIf(err)
case TypeAscii:
var err error
value, err = parser.ParseAscii(rawBytes, unitCount)
log.PanicIf(err)
case TypeAsciiNoNul:
var err error
value, err = parser.ParseAsciiNoNul(rawBytes, unitCount)
log.PanicIf(err)
case TypeShort:
var err error
value, err = parser.ParseShorts(rawBytes, unitCount, byteOrder)
log.PanicIf(err)
case TypeLong:
var err error
value, err = parser.ParseLongs(rawBytes, unitCount, byteOrder)
log.PanicIf(err)
case TypeFloat:
var err error
value, err = parser.ParseFloats(rawBytes, unitCount, byteOrder)
log.PanicIf(err)
case TypeDouble:
var err error
value, err = parser.ParseDoubles(rawBytes, unitCount, byteOrder)
log.PanicIf(err)
case TypeRational:
var err error
value, err = parser.ParseRationals(rawBytes, unitCount, byteOrder)
log.PanicIf(err)
case TypeSignedLong:
var err error
value, err = parser.ParseSignedLongs(rawBytes, unitCount, byteOrder)
log.PanicIf(err)
case TypeSignedRational:
var err error
value, err = parser.ParseSignedRationals(rawBytes, unitCount, byteOrder)
log.PanicIf(err)
default:
// Affects only "unknown" values, in general.
log.Panicf("value of type [%s] can not be formatted into string", tagType.String())
// Never called.
return "", nil
}
phrase, err = FormatFromType(value, justFirst)
log.PanicIf(err)
return phrase, nil
}
// TranslateStringToType converts user-provided strings to properly-typed
// values. If a string, returns a string. Else, assumes that it's a single
// number. If a list needs to be processed, it is the caller's responsibility to
// split it (according to whichever convention has been established).
func TranslateStringToType(tagType TagTypePrimitive, valueString string) (value interface{}, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
if tagType == TypeUndefined {
// The caller should just call String() on the decoded type.
log.Panicf("undefined-type values are not supported")
}
if tagType == TypeByte {
wide, err := strconv.ParseInt(valueString, 16, 8)
log.PanicIf(err)
return byte(wide), nil
} else if tagType == TypeAscii || tagType == TypeAsciiNoNul {
// Whether or not we're putting an NUL on the end is only relevant for
// byte-level encoding. This function really just supports a user
// interface.
return valueString, nil
} else if tagType == TypeShort {
n, err := strconv.ParseUint(valueString, 10, 16)
log.PanicIf(err)
return uint16(n), nil
} else if tagType == TypeLong {
n, err := strconv.ParseUint(valueString, 10, 32)
log.PanicIf(err)
return uint32(n), nil
} else if tagType == TypeRational {
parts := strings.SplitN(valueString, "/", 2)
numerator, err := strconv.ParseUint(parts[0], 10, 32)
log.PanicIf(err)
denominator, err := strconv.ParseUint(parts[1], 10, 32)
log.PanicIf(err)
return Rational{
Numerator: uint32(numerator),
Denominator: uint32(denominator),
}, nil
} else if tagType == TypeSignedLong {
n, err := strconv.ParseInt(valueString, 10, 32)
log.PanicIf(err)
return int32(n), nil
} else if tagType == TypeFloat {
n, err := strconv.ParseFloat(valueString, 32)
log.PanicIf(err)
return float32(n), nil
} else if tagType == TypeDouble {
n, err := strconv.ParseFloat(valueString, 64)
log.PanicIf(err)
return float64(n), nil
} else if tagType == TypeSignedRational {
parts := strings.SplitN(valueString, "/", 2)
numerator, err := strconv.ParseInt(parts[0], 10, 32)
log.PanicIf(err)
denominator, err := strconv.ParseInt(parts[1], 10, 32)
log.PanicIf(err)
return SignedRational{
Numerator: int32(numerator),
Denominator: int32(denominator),
}, nil
}
log.Panicf("from-string encoding for type not supported; this shouldn't happen: [%s]", tagType.String())
return nil, nil
}
// GetTypeByName returns the `TagTypePrimitive` for the given type name.
// Returns (0) if not valid.
func GetTypeByName(typeName string) (tagType TagTypePrimitive, found bool) {
tagType, found = typeNamesR[typeName]
return tagType, found
}
// BasicTag describes a single tag for any purpose.
type BasicTag struct {
// FqIfdPath is the fully-qualified IFD-path.
FqIfdPath string
// IfdPath is the unindexed IFD-path.
IfdPath string
// TagId is the tag-ID.
TagId uint16
}
func init() {
for typeId, typeName := range TypeNames {
typeNamesR[typeName] = typeId
}
}

148
vendor/github.com/dsoprea/go-exif/v3/common/utility.go generated vendored Normal file
View File

@ -0,0 +1,148 @@
package exifcommon
import (
"bytes"
"fmt"
"reflect"
"strconv"
"strings"
"time"
"github.com/dsoprea/go-logging"
)
var (
timeType = reflect.TypeOf(time.Time{})
)
// DumpBytes prints a list of hex-encoded bytes.
func DumpBytes(data []byte) {
fmt.Printf("DUMP: ")
for _, x := range data {
fmt.Printf("%02x ", x)
}
fmt.Printf("\n")
}
// DumpBytesClause prints a list like DumpBytes(), but encapsulated in
// "[]byte { ... }".
func DumpBytesClause(data []byte) {
fmt.Printf("DUMP: ")
fmt.Printf("[]byte { ")
for i, x := range data {
fmt.Printf("0x%02x", x)
if i < len(data)-1 {
fmt.Printf(", ")
}
}
fmt.Printf(" }\n")
}
// DumpBytesToString returns a stringified list of hex-encoded bytes.
func DumpBytesToString(data []byte) string {
b := new(bytes.Buffer)
for i, x := range data {
_, err := b.WriteString(fmt.Sprintf("%02x", x))
log.PanicIf(err)
if i < len(data)-1 {
_, err := b.WriteRune(' ')
log.PanicIf(err)
}
}
return b.String()
}
// DumpBytesClauseToString returns a comma-separated list of hex-encoded bytes.
func DumpBytesClauseToString(data []byte) string {
b := new(bytes.Buffer)
for i, x := range data {
_, err := b.WriteString(fmt.Sprintf("0x%02x", x))
log.PanicIf(err)
if i < len(data)-1 {
_, err := b.WriteString(", ")
log.PanicIf(err)
}
}
return b.String()
}
// ExifFullTimestampString produces a string like "2018:11:30 13:01:49" from a
// `time.Time` struct. It will attempt to convert to UTC first.
func ExifFullTimestampString(t time.Time) (fullTimestampPhrase string) {
t = t.UTC()
return fmt.Sprintf("%04d:%02d:%02d %02d:%02d:%02d", t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second())
}
// ParseExifFullTimestamp parses dates like "2018:11:30 13:01:49" into a UTC
// `time.Time` struct.
func ParseExifFullTimestamp(fullTimestampPhrase string) (timestamp time.Time, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
parts := strings.Split(fullTimestampPhrase, " ")
datestampValue, timestampValue := parts[0], parts[1]
// Normalize the separators.
datestampValue = strings.ReplaceAll(datestampValue, "-", ":")
timestampValue = strings.ReplaceAll(timestampValue, "-", ":")
dateParts := strings.Split(datestampValue, ":")
year, err := strconv.ParseUint(dateParts[0], 10, 16)
if err != nil {
log.Panicf("could not parse year")
}
month, err := strconv.ParseUint(dateParts[1], 10, 8)
if err != nil {
log.Panicf("could not parse month")
}
day, err := strconv.ParseUint(dateParts[2], 10, 8)
if err != nil {
log.Panicf("could not parse day")
}
timeParts := strings.Split(timestampValue, ":")
hour, err := strconv.ParseUint(timeParts[0], 10, 8)
if err != nil {
log.Panicf("could not parse hour")
}
minute, err := strconv.ParseUint(timeParts[1], 10, 8)
if err != nil {
log.Panicf("could not parse minute")
}
second, err := strconv.ParseUint(timeParts[2], 10, 8)
if err != nil {
log.Panicf("could not parse second")
}
timestamp = time.Date(int(year), time.Month(month), int(day), int(hour), int(minute), int(second), 0, time.UTC)
return timestamp, nil
}
// IsTime returns true if the value is a `time.Time`.
func IsTime(v interface{}) bool {
// TODO(dustin): Add test
return reflect.TypeOf(v) == timeType
}

View File

@ -0,0 +1,464 @@
package exifcommon
import (
"errors"
"io"
"encoding/binary"
"github.com/dsoprea/go-logging"
)
var (
parser *Parser
)
var (
// ErrNotFarValue indicates that an offset-based lookup was attempted for a
// non-offset-based (embedded) value.
ErrNotFarValue = errors.New("not a far value")
)
// ValueContext embeds all of the parameters required to find and extract the
// actual tag value.
type ValueContext struct {
unitCount uint32
valueOffset uint32
rawValueOffset []byte
rs io.ReadSeeker
tagType TagTypePrimitive
byteOrder binary.ByteOrder
// undefinedValueTagType is the effective type to use if this is an
// "undefined" value.
undefinedValueTagType TagTypePrimitive
ifdPath string
tagId uint16
}
// TODO(dustin): We can update newValueContext() to derive `valueOffset` itself (from `rawValueOffset`).
// NewValueContext returns a new ValueContext struct.
func NewValueContext(ifdPath string, tagId uint16, unitCount, valueOffset uint32, rawValueOffset []byte, rs io.ReadSeeker, tagType TagTypePrimitive, byteOrder binary.ByteOrder) *ValueContext {
return &ValueContext{
unitCount: unitCount,
valueOffset: valueOffset,
rawValueOffset: rawValueOffset,
rs: rs,
tagType: tagType,
byteOrder: byteOrder,
ifdPath: ifdPath,
tagId: tagId,
}
}
// SetUndefinedValueType sets the effective type if this is an unknown-type tag.
func (vc *ValueContext) SetUndefinedValueType(tagType TagTypePrimitive) {
if vc.tagType != TypeUndefined {
log.Panicf("can not set effective type for unknown-type tag because this is *not* an unknown-type tag")
}
vc.undefinedValueTagType = tagType
}
// UnitCount returns the embedded unit-count.
func (vc *ValueContext) UnitCount() uint32 {
return vc.unitCount
}
// ValueOffset returns the value-offset decoded as a `uint32`.
func (vc *ValueContext) ValueOffset() uint32 {
return vc.valueOffset
}
// RawValueOffset returns the uninterpreted value-offset. This is used for
// embedded values (values small enough to fit within the offset bytes rather
// than needing to be stored elsewhere and referred to by an actual offset).
func (vc *ValueContext) RawValueOffset() []byte {
return vc.rawValueOffset
}
// AddressableData returns the block of data that we can dereference into.
func (vc *ValueContext) AddressableData() io.ReadSeeker {
// RELEASE)dustin): Rename from AddressableData() to ReadSeeker()
return vc.rs
}
// ByteOrder returns the byte-order of numbers.
func (vc *ValueContext) ByteOrder() binary.ByteOrder {
return vc.byteOrder
}
// IfdPath returns the path of the IFD containing this tag.
func (vc *ValueContext) IfdPath() string {
return vc.ifdPath
}
// TagId returns the ID of the tag that we represent.
func (vc *ValueContext) TagId() uint16 {
return vc.tagId
}
// isEmbedded returns whether the value is embedded or a reference. This can't
// be precalculated since the size is not defined for all types (namely the
// "undefined" types).
func (vc *ValueContext) isEmbedded() bool {
tagType := vc.effectiveValueType()
return (tagType.Size() * int(vc.unitCount)) <= 4
}
// SizeInBytes returns the number of bytes that this value requires. The
// underlying call will panic if the type is UNDEFINED. It is the
// responsibility of the caller to preemptively check that.
func (vc *ValueContext) SizeInBytes() int {
tagType := vc.effectiveValueType()
return tagType.Size() * int(vc.unitCount)
}
// effectiveValueType returns the effective type of the unknown-type tag or, if
// not unknown, the actual type.
func (vc *ValueContext) effectiveValueType() (tagType TagTypePrimitive) {
if vc.tagType == TypeUndefined {
tagType = vc.undefinedValueTagType
if tagType == 0 {
log.Panicf("undefined-value type not set")
}
} else {
tagType = vc.tagType
}
return tagType
}
// readRawEncoded returns the encoded bytes for the value that we represent.
func (vc *ValueContext) readRawEncoded() (rawBytes []byte, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
tagType := vc.effectiveValueType()
unitSizeRaw := uint32(tagType.Size())
if vc.isEmbedded() == true {
byteLength := unitSizeRaw * vc.unitCount
return vc.rawValueOffset[:byteLength], nil
}
_, err = vc.rs.Seek(int64(vc.valueOffset), io.SeekStart)
log.PanicIf(err)
rawBytes = make([]byte, vc.unitCount*unitSizeRaw)
_, err = io.ReadFull(vc.rs, rawBytes)
log.PanicIf(err)
return rawBytes, nil
}
// GetFarOffset returns the offset if the value is not embedded [within the
// pointer itself] or an error if an embedded value.
func (vc *ValueContext) GetFarOffset() (offset uint32, err error) {
if vc.isEmbedded() == true {
return 0, ErrNotFarValue
}
return vc.valueOffset, nil
}
// ReadRawEncoded returns the encoded bytes for the value that we represent.
func (vc *ValueContext) ReadRawEncoded() (rawBytes []byte, err error) {
// TODO(dustin): Remove this method and rename readRawEncoded in its place.
return vc.readRawEncoded()
}
// Format returns a string representation for the value.
//
// Where the type is not ASCII, `justFirst` indicates whether to just stringify
// the first item in the slice (or return an empty string if the slice is
// empty).
//
// Since this method lacks the information to process undefined-type tags (e.g.
// byte-order, tag-ID, IFD type), it will return an error if attempted. See
// `Undefined()`.
func (vc *ValueContext) Format() (value string, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
rawBytes, err := vc.readRawEncoded()
log.PanicIf(err)
phrase, err := FormatFromBytes(rawBytes, vc.effectiveValueType(), false, vc.byteOrder)
log.PanicIf(err)
return phrase, nil
}
// FormatFirst is similar to `Format` but only gets and stringifies the first
// item.
func (vc *ValueContext) FormatFirst() (value string, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
rawBytes, err := vc.readRawEncoded()
log.PanicIf(err)
phrase, err := FormatFromBytes(rawBytes, vc.tagType, true, vc.byteOrder)
log.PanicIf(err)
return phrase, nil
}
// ReadBytes parses the encoded byte-array from the value-context.
func (vc *ValueContext) ReadBytes() (value []byte, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
rawValue, err := vc.readRawEncoded()
log.PanicIf(err)
value, err = parser.ParseBytes(rawValue, vc.unitCount)
log.PanicIf(err)
return value, nil
}
// ReadAscii parses the encoded NUL-terminated ASCII string from the value-
// context.
func (vc *ValueContext) ReadAscii() (value string, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
rawValue, err := vc.readRawEncoded()
log.PanicIf(err)
value, err = parser.ParseAscii(rawValue, vc.unitCount)
log.PanicIf(err)
return value, nil
}
// ReadAsciiNoNul parses the non-NUL-terminated encoded ASCII string from the
// value-context.
func (vc *ValueContext) ReadAsciiNoNul() (value string, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
rawValue, err := vc.readRawEncoded()
log.PanicIf(err)
value, err = parser.ParseAsciiNoNul(rawValue, vc.unitCount)
log.PanicIf(err)
return value, nil
}
// ReadShorts parses the list of encoded shorts from the value-context.
func (vc *ValueContext) ReadShorts() (value []uint16, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
rawValue, err := vc.readRawEncoded()
log.PanicIf(err)
value, err = parser.ParseShorts(rawValue, vc.unitCount, vc.byteOrder)
log.PanicIf(err)
return value, nil
}
// ReadLongs parses the list of encoded, unsigned longs from the value-context.
func (vc *ValueContext) ReadLongs() (value []uint32, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
rawValue, err := vc.readRawEncoded()
log.PanicIf(err)
value, err = parser.ParseLongs(rawValue, vc.unitCount, vc.byteOrder)
log.PanicIf(err)
return value, nil
}
// ReadFloats parses the list of encoded, floats from the value-context.
func (vc *ValueContext) ReadFloats() (value []float32, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
rawValue, err := vc.readRawEncoded()
log.PanicIf(err)
value, err = parser.ParseFloats(rawValue, vc.unitCount, vc.byteOrder)
log.PanicIf(err)
return value, nil
}
// ReadDoubles parses the list of encoded, doubles from the value-context.
func (vc *ValueContext) ReadDoubles() (value []float64, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
rawValue, err := vc.readRawEncoded()
log.PanicIf(err)
value, err = parser.ParseDoubles(rawValue, vc.unitCount, vc.byteOrder)
log.PanicIf(err)
return value, nil
}
// ReadRationals parses the list of encoded, unsigned rationals from the value-
// context.
func (vc *ValueContext) ReadRationals() (value []Rational, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
rawValue, err := vc.readRawEncoded()
log.PanicIf(err)
value, err = parser.ParseRationals(rawValue, vc.unitCount, vc.byteOrder)
log.PanicIf(err)
return value, nil
}
// ReadSignedLongs parses the list of encoded, signed longs from the value-context.
func (vc *ValueContext) ReadSignedLongs() (value []int32, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
rawValue, err := vc.readRawEncoded()
log.PanicIf(err)
value, err = parser.ParseSignedLongs(rawValue, vc.unitCount, vc.byteOrder)
log.PanicIf(err)
return value, nil
}
// ReadSignedRationals parses the list of encoded, signed rationals from the
// value-context.
func (vc *ValueContext) ReadSignedRationals() (value []SignedRational, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
rawValue, err := vc.readRawEncoded()
log.PanicIf(err)
value, err = parser.ParseSignedRationals(rawValue, vc.unitCount, vc.byteOrder)
log.PanicIf(err)
return value, nil
}
// Values knows how to resolve the given value. This value is always a list
// (undefined-values aside), so we're named accordingly.
//
// Since this method lacks the information to process unknown-type tags (e.g.
// byte-order, tag-ID, IFD type), it will return an error if attempted. See
// `Undefined()`.
func (vc *ValueContext) Values() (values interface{}, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
if vc.tagType == TypeByte {
values, err = vc.ReadBytes()
log.PanicIf(err)
} else if vc.tagType == TypeAscii {
values, err = vc.ReadAscii()
log.PanicIf(err)
} else if vc.tagType == TypeAsciiNoNul {
values, err = vc.ReadAsciiNoNul()
log.PanicIf(err)
} else if vc.tagType == TypeShort {
values, err = vc.ReadShorts()
log.PanicIf(err)
} else if vc.tagType == TypeLong {
values, err = vc.ReadLongs()
log.PanicIf(err)
} else if vc.tagType == TypeRational {
values, err = vc.ReadRationals()
log.PanicIf(err)
} else if vc.tagType == TypeSignedLong {
values, err = vc.ReadSignedLongs()
log.PanicIf(err)
} else if vc.tagType == TypeSignedRational {
values, err = vc.ReadSignedRationals()
log.PanicIf(err)
} else if vc.tagType == TypeFloat {
values, err = vc.ReadFloats()
log.PanicIf(err)
} else if vc.tagType == TypeDouble {
values, err = vc.ReadDoubles()
log.PanicIf(err)
} else if vc.tagType == TypeUndefined {
log.Panicf("will not parse undefined-type value")
// Never called.
return nil, nil
} else {
log.Panicf("value of type [%s] is unparseable", vc.tagType)
// Never called.
return nil, nil
}
return values, nil
}
func init() {
parser = new(Parser)
}

View File

@ -0,0 +1,273 @@
package exifcommon
import (
"bytes"
"math"
"reflect"
"time"
"encoding/binary"
"github.com/dsoprea/go-logging"
)
var (
typeEncodeLogger = log.NewLogger("exif.type_encode")
)
// EncodedData encapsulates the compound output of an encoding operation.
type EncodedData struct {
Type TagTypePrimitive
Encoded []byte
// TODO(dustin): Is this really necessary? We might have this just to correlate to the incoming stream format (raw bytes and a unit-count both for incoming and outgoing).
UnitCount uint32
}
// ValueEncoder knows how to encode values of every type to bytes.
type ValueEncoder struct {
byteOrder binary.ByteOrder
}
// NewValueEncoder returns a new ValueEncoder.
func NewValueEncoder(byteOrder binary.ByteOrder) *ValueEncoder {
return &ValueEncoder{
byteOrder: byteOrder,
}
}
func (ve *ValueEncoder) encodeBytes(value []uint8) (ed EncodedData, err error) {
ed.Type = TypeByte
ed.Encoded = []byte(value)
ed.UnitCount = uint32(len(value))
return ed, nil
}
func (ve *ValueEncoder) encodeAscii(value string) (ed EncodedData, err error) {
ed.Type = TypeAscii
ed.Encoded = []byte(value)
ed.Encoded = append(ed.Encoded, 0)
ed.UnitCount = uint32(len(ed.Encoded))
return ed, nil
}
// encodeAsciiNoNul returns a string encoded as a byte-string without a trailing
// NUL byte.
//
// Note that:
//
// 1. This type can not be automatically encoded using `Encode()`. The default
// mode is to encode *with* a trailing NUL byte using `encodeAscii`. Only
// certain undefined-type tags using an unterminated ASCII string and these
// are exceptional in nature.
//
// 2. The presence of this method allows us to completely test the complimentary
// no-nul parser.
//
func (ve *ValueEncoder) encodeAsciiNoNul(value string) (ed EncodedData, err error) {
ed.Type = TypeAsciiNoNul
ed.Encoded = []byte(value)
ed.UnitCount = uint32(len(ed.Encoded))
return ed, nil
}
func (ve *ValueEncoder) encodeShorts(value []uint16) (ed EncodedData, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
ed.UnitCount = uint32(len(value))
ed.Encoded = make([]byte, ed.UnitCount*2)
for i := uint32(0); i < ed.UnitCount; i++ {
ve.byteOrder.PutUint16(ed.Encoded[i*2:(i+1)*2], value[i])
}
ed.Type = TypeShort
return ed, nil
}
func (ve *ValueEncoder) encodeLongs(value []uint32) (ed EncodedData, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
ed.UnitCount = uint32(len(value))
ed.Encoded = make([]byte, ed.UnitCount*4)
for i := uint32(0); i < ed.UnitCount; i++ {
ve.byteOrder.PutUint32(ed.Encoded[i*4:(i+1)*4], value[i])
}
ed.Type = TypeLong
return ed, nil
}
func (ve *ValueEncoder) encodeFloats(value []float32) (ed EncodedData, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
ed.UnitCount = uint32(len(value))
ed.Encoded = make([]byte, ed.UnitCount*4)
for i := uint32(0); i < ed.UnitCount; i++ {
ve.byteOrder.PutUint32(ed.Encoded[i*4:(i+1)*4], math.Float32bits(value[i]))
}
ed.Type = TypeFloat
return ed, nil
}
func (ve *ValueEncoder) encodeDoubles(value []float64) (ed EncodedData, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
ed.UnitCount = uint32(len(value))
ed.Encoded = make([]byte, ed.UnitCount*8)
for i := uint32(0); i < ed.UnitCount; i++ {
ve.byteOrder.PutUint64(ed.Encoded[i*8:(i+1)*8], math.Float64bits(value[i]))
}
ed.Type = TypeDouble
return ed, nil
}
func (ve *ValueEncoder) encodeRationals(value []Rational) (ed EncodedData, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
ed.UnitCount = uint32(len(value))
ed.Encoded = make([]byte, ed.UnitCount*8)
for i := uint32(0); i < ed.UnitCount; i++ {
ve.byteOrder.PutUint32(ed.Encoded[i*8+0:i*8+4], value[i].Numerator)
ve.byteOrder.PutUint32(ed.Encoded[i*8+4:i*8+8], value[i].Denominator)
}
ed.Type = TypeRational
return ed, nil
}
func (ve *ValueEncoder) encodeSignedLongs(value []int32) (ed EncodedData, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
ed.UnitCount = uint32(len(value))
b := bytes.NewBuffer(make([]byte, 0, 8*ed.UnitCount))
for i := uint32(0); i < ed.UnitCount; i++ {
err := binary.Write(b, ve.byteOrder, value[i])
log.PanicIf(err)
}
ed.Type = TypeSignedLong
ed.Encoded = b.Bytes()
return ed, nil
}
func (ve *ValueEncoder) encodeSignedRationals(value []SignedRational) (ed EncodedData, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
ed.UnitCount = uint32(len(value))
b := bytes.NewBuffer(make([]byte, 0, 8*ed.UnitCount))
for i := uint32(0); i < ed.UnitCount; i++ {
err := binary.Write(b, ve.byteOrder, value[i].Numerator)
log.PanicIf(err)
err = binary.Write(b, ve.byteOrder, value[i].Denominator)
log.PanicIf(err)
}
ed.Type = TypeSignedRational
ed.Encoded = b.Bytes()
return ed, nil
}
// Encode returns bytes for the given value, infering type from the actual
// value. This does not support `TypeAsciiNoNull` (all strings are encoded as
// `TypeAscii`).
func (ve *ValueEncoder) Encode(value interface{}) (ed EncodedData, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
switch t := value.(type) {
case []byte:
ed, err = ve.encodeBytes(t)
log.PanicIf(err)
case string:
ed, err = ve.encodeAscii(t)
log.PanicIf(err)
case []uint16:
ed, err = ve.encodeShorts(t)
log.PanicIf(err)
case []uint32:
ed, err = ve.encodeLongs(t)
log.PanicIf(err)
case []float32:
ed, err = ve.encodeFloats(t)
log.PanicIf(err)
case []float64:
ed, err = ve.encodeDoubles(t)
log.PanicIf(err)
case []Rational:
ed, err = ve.encodeRationals(t)
log.PanicIf(err)
case []int32:
ed, err = ve.encodeSignedLongs(t)
log.PanicIf(err)
case []SignedRational:
ed, err = ve.encodeSignedRationals(t)
log.PanicIf(err)
case time.Time:
// For convenience, if the user doesn't want to deal with translation
// semantics with timestamps.
s := ExifFullTimestampString(t)
ed, err = ve.encodeAscii(s)
log.PanicIf(err)
default:
log.Panicf("value not encodable: [%s] [%v]", reflect.TypeOf(value), value)
}
return ed, nil
}

50
vendor/github.com/dsoprea/go-exif/v3/data_layer.go generated vendored Normal file
View File

@ -0,0 +1,50 @@
package exif
import (
"io"
"github.com/dsoprea/go-logging"
"github.com/dsoprea/go-utility/v2/filesystem"
)
type ExifBlobSeeker interface {
GetReadSeeker(initialOffset int64) (rs io.ReadSeeker, err error)
}
// ExifReadSeeker knows how to retrieve data from the EXIF blob relative to the
// beginning of the blob (so, absolute position (0) is the first byte of the
// EXIF data).
type ExifReadSeeker struct {
rs io.ReadSeeker
}
func NewExifReadSeeker(rs io.ReadSeeker) *ExifReadSeeker {
return &ExifReadSeeker{
rs: rs,
}
}
func NewExifReadSeekerWithBytes(exifData []byte) *ExifReadSeeker {
sb := rifs.NewSeekableBufferWithBytes(exifData)
edbs := NewExifReadSeeker(sb)
return edbs
}
// Fork creates a new ReadSeeker instead that wraps a BouncebackReader to
// maintain its own position in the stream.
func (edbs *ExifReadSeeker) GetReadSeeker(initialOffset int64) (rs io.ReadSeeker, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
br, err := rifs.NewBouncebackReader(edbs.rs)
log.PanicIf(err)
_, err = br.Seek(initialOffset, io.SeekStart)
log.PanicIf(err)
return br, nil
}

14
vendor/github.com/dsoprea/go-exif/v3/error.go generated vendored Normal file
View File

@ -0,0 +1,14 @@
package exif
import (
"errors"
)
var (
// ErrTagNotFound indicates that the tag was not found.
ErrTagNotFound = errors.New("tag not found")
// ErrTagNotKnown indicates that the tag is not registered with us as a
// known tag.
ErrTagNotKnown = errors.New("tag is not known")
)

333
vendor/github.com/dsoprea/go-exif/v3/exif.go generated vendored Normal file
View File

@ -0,0 +1,333 @@
package exif
import (
"bufio"
"bytes"
"errors"
"fmt"
"io"
"os"
"encoding/binary"
"io/ioutil"
"github.com/dsoprea/go-logging"
"github.com/dsoprea/go-exif/v3/common"
)
const (
// ExifAddressableAreaStart is the absolute offset in the file that all
// offsets are relative to.
ExifAddressableAreaStart = uint32(0x0)
// ExifDefaultFirstIfdOffset is essentially the number of bytes in addition
// to `ExifAddressableAreaStart` that you have to move in order to escape
// the rest of the header and get to the earliest point where we can put
// stuff (which has to be the first IFD). This is the size of the header
// sequence containing the two-character byte-order, two-character fixed-
// bytes, and the four bytes describing the first-IFD offset.
ExifDefaultFirstIfdOffset = uint32(2 + 2 + 4)
)
const (
// ExifSignatureLength is the number of bytes in the EXIF signature (which
// customarily includes the first IFD offset).
ExifSignatureLength = 8
)
var (
exifLogger = log.NewLogger("exif.exif")
ExifBigEndianSignature = [4]byte{'M', 'M', 0x00, 0x2a}
ExifLittleEndianSignature = [4]byte{'I', 'I', 0x2a, 0x00}
)
var (
ErrNoExif = errors.New("no exif data")
ErrExifHeaderError = errors.New("exif header error")
)
// SearchAndExtractExif searches for an EXIF blob in the byte-slice.
func SearchAndExtractExif(data []byte) (rawExif []byte, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
b := bytes.NewBuffer(data)
rawExif, err = SearchAndExtractExifWithReader(b)
if err != nil {
if err == ErrNoExif {
return nil, err
}
log.Panic(err)
}
return rawExif, nil
}
// SearchAndExtractExifN searches for an EXIF blob in the byte-slice, but skips
// the given number of EXIF blocks first. This is a forensics tool that helps
// identify multiple EXIF blocks in a file.
func SearchAndExtractExifN(data []byte, n int) (rawExif []byte, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
skips := 0
totalDiscarded := 0
for {
b := bytes.NewBuffer(data)
var discarded int
rawExif, discarded, err = searchAndExtractExifWithReaderWithDiscarded(b)
if err != nil {
if err == ErrNoExif {
return nil, err
}
log.Panic(err)
}
exifLogger.Debugf(nil, "Read EXIF block (%d).", skips)
totalDiscarded += discarded
if skips >= n {
exifLogger.Debugf(nil, "Reached requested EXIF block (%d).", n)
break
}
nextOffset := discarded + 1
exifLogger.Debugf(nil, "Skipping EXIF block (%d) by seeking to position (%d).", skips, nextOffset)
data = data[nextOffset:]
skips++
}
exifLogger.Debugf(nil, "Found EXIF blob (%d) bytes from initial position.", totalDiscarded)
return rawExif, nil
}
// searchAndExtractExifWithReaderWithDiscarded searches for an EXIF blob using
// an `io.Reader`. We can't know how much long the EXIF data is without parsing
// it, so this will likely grab up a lot of the image-data, too.
//
// This function returned the count of preceding bytes.
func searchAndExtractExifWithReaderWithDiscarded(r io.Reader) (rawExif []byte, discarded int, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
// Search for the beginning of the EXIF information. The EXIF is near the
// beginning of most JPEGs, so this likely doesn't have a high cost (at
// least, again, with JPEGs).
br := bufio.NewReader(r)
for {
window, err := br.Peek(ExifSignatureLength)
if err != nil {
if err == io.EOF {
return nil, 0, ErrNoExif
}
log.Panic(err)
}
_, err = ParseExifHeader(window)
if err != nil {
if log.Is(err, ErrNoExif) == true {
// No EXIF. Move forward by one byte.
_, err := br.Discard(1)
log.PanicIf(err)
discarded++
continue
}
// Some other error.
log.Panic(err)
}
break
}
exifLogger.Debugf(nil, "Found EXIF blob (%d) bytes from initial position.", discarded)
rawExif, err = ioutil.ReadAll(br)
log.PanicIf(err)
return rawExif, discarded, nil
}
// RELEASE(dustin): We should replace the implementation of SearchAndExtractExifWithReader with searchAndExtractExifWithReaderWithDiscarded and drop the latter.
// SearchAndExtractExifWithReader searches for an EXIF blob using an
// `io.Reader`. We can't know how much long the EXIF data is without parsing it,
// so this will likely grab up a lot of the image-data, too.
func SearchAndExtractExifWithReader(r io.Reader) (rawExif []byte, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
rawExif, _, err = searchAndExtractExifWithReaderWithDiscarded(r)
if err != nil {
if err == ErrNoExif {
return nil, err
}
log.Panic(err)
}
return rawExif, nil
}
// SearchFileAndExtractExif returns a slice from the beginning of the EXIF data
// to the end of the file (it's not practical to try and calculate where the
// data actually ends).
func SearchFileAndExtractExif(filepath string) (rawExif []byte, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
// Open the file.
f, err := os.Open(filepath)
log.PanicIf(err)
defer f.Close()
rawExif, err = SearchAndExtractExifWithReader(f)
log.PanicIf(err)
return rawExif, nil
}
type ExifHeader struct {
ByteOrder binary.ByteOrder
FirstIfdOffset uint32
}
func (eh ExifHeader) String() string {
return fmt.Sprintf("ExifHeader<BYTE-ORDER=[%v] FIRST-IFD-OFFSET=(0x%02x)>", eh.ByteOrder, eh.FirstIfdOffset)
}
// ParseExifHeader parses the bytes at the very top of the header.
//
// This will panic with ErrNoExif on any data errors so that we can double as
// an EXIF-detection routine.
func ParseExifHeader(data []byte) (eh ExifHeader, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
// Good reference:
//
// CIPA DC-008-2016; JEITA CP-3451D
// -> http://www.cipa.jp/std/documents/e/DC-008-Translation-2016-E.pdf
if len(data) < ExifSignatureLength {
exifLogger.Warningf(nil, "Not enough data for EXIF header: (%d)", len(data))
return eh, ErrNoExif
}
if bytes.Equal(data[:4], ExifBigEndianSignature[:]) == true {
exifLogger.Debugf(nil, "Byte-order is big-endian.")
eh.ByteOrder = binary.BigEndian
} else if bytes.Equal(data[:4], ExifLittleEndianSignature[:]) == true {
eh.ByteOrder = binary.LittleEndian
exifLogger.Debugf(nil, "Byte-order is little-endian.")
} else {
return eh, ErrNoExif
}
eh.FirstIfdOffset = eh.ByteOrder.Uint32(data[4:8])
return eh, nil
}
// Visit recursively invokes a callback for every tag.
func Visit(rootIfdIdentity *exifcommon.IfdIdentity, ifdMapping *exifcommon.IfdMapping, tagIndex *TagIndex, exifData []byte, visitor TagVisitorFn, so *ScanOptions) (eh ExifHeader, furthestOffset uint32, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
eh, err = ParseExifHeader(exifData)
log.PanicIf(err)
ebs := NewExifReadSeekerWithBytes(exifData)
ie := NewIfdEnumerate(ifdMapping, tagIndex, ebs, eh.ByteOrder)
_, err = ie.Scan(rootIfdIdentity, eh.FirstIfdOffset, visitor, so)
log.PanicIf(err)
furthestOffset = ie.FurthestOffset()
return eh, furthestOffset, nil
}
// Collect recursively builds a static structure of all IFDs and tags.
func Collect(ifdMapping *exifcommon.IfdMapping, tagIndex *TagIndex, exifData []byte) (eh ExifHeader, index IfdIndex, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
eh, err = ParseExifHeader(exifData)
log.PanicIf(err)
ebs := NewExifReadSeekerWithBytes(exifData)
ie := NewIfdEnumerate(ifdMapping, tagIndex, ebs, eh.ByteOrder)
index, err = ie.Collect(eh.FirstIfdOffset)
log.PanicIf(err)
return eh, index, nil
}
// BuildExifHeader constructs the bytes that go at the front of the stream.
func BuildExifHeader(byteOrder binary.ByteOrder, firstIfdOffset uint32) (headerBytes []byte, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
b := new(bytes.Buffer)
var signatureBytes []byte
if byteOrder == binary.BigEndian {
signatureBytes = ExifBigEndianSignature[:]
} else {
signatureBytes = ExifLittleEndianSignature[:]
}
_, err = b.Write(signatureBytes)
log.PanicIf(err)
err = binary.Write(b, byteOrder, firstIfdOffset)
log.PanicIf(err)
return b.Bytes(), nil
}

117
vendor/github.com/dsoprea/go-exif/v3/gps.go generated vendored Normal file
View File

@ -0,0 +1,117 @@
package exif
import (
"errors"
"fmt"
"time"
"github.com/dsoprea/go-logging"
"github.com/golang/geo/s2"
"github.com/dsoprea/go-exif/v3/common"
)
var (
// ErrGpsCoordinatesNotValid means that some part of the geographic data was
// unparseable.
ErrGpsCoordinatesNotValid = errors.New("GPS coordinates not valid")
)
// GpsDegrees is a high-level struct representing geographic data.
type GpsDegrees struct {
// Orientation describes the N/E/S/W direction that this position is
// relative to.
Orientation byte
// Degrees is a simple float representing the underlying rational degrees
// amount.
Degrees float64
// Minutes is a simple float representing the underlying rational minutes
// amount.
Minutes float64
// Seconds is a simple float representing the underlying ration seconds
// amount.
Seconds float64
}
// NewGpsDegreesFromRationals returns a GpsDegrees struct given the EXIF-encoded
// information. The refValue is the N/E/S/W direction that this position is
// relative to.
func NewGpsDegreesFromRationals(refValue string, rawCoordinate []exifcommon.Rational) (gd GpsDegrees, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
if len(rawCoordinate) != 3 {
log.Panicf("new GpsDegrees struct requires a raw-coordinate with exactly three rationals")
}
gd = GpsDegrees{
Orientation: refValue[0],
Degrees: float64(rawCoordinate[0].Numerator) / float64(rawCoordinate[0].Denominator),
Minutes: float64(rawCoordinate[1].Numerator) / float64(rawCoordinate[1].Denominator),
Seconds: float64(rawCoordinate[2].Numerator) / float64(rawCoordinate[2].Denominator),
}
return gd, nil
}
// String provides returns a descriptive string.
func (d GpsDegrees) String() string {
return fmt.Sprintf("Degrees<O=[%s] D=(%g) M=(%g) S=(%g)>", string([]byte{d.Orientation}), d.Degrees, d.Minutes, d.Seconds)
}
// Decimal calculates and returns the simplified float representation of the
// component degrees.
func (d GpsDegrees) Decimal() float64 {
decimal := float64(d.Degrees) + float64(d.Minutes)/60.0 + float64(d.Seconds)/3600.0
if d.Orientation == 'S' || d.Orientation == 'W' {
return -decimal
}
return decimal
}
// Raw returns a Rational struct that can be used to *write* coordinates. In
// practice, the denominator are typically (1) in the original EXIF data, and,
// that being the case, this will best preserve precision.
func (d GpsDegrees) Raw() []exifcommon.Rational {
return []exifcommon.Rational{
{Numerator: uint32(d.Degrees), Denominator: 1},
{Numerator: uint32(d.Minutes), Denominator: 1},
{Numerator: uint32(d.Seconds), Denominator: 1},
}
}
// GpsInfo encapsulates all of the geographic information in one place.
type GpsInfo struct {
Latitude, Longitude GpsDegrees
Altitude int
Timestamp time.Time
}
// String returns a descriptive string.
func (gi *GpsInfo) String() string {
return fmt.Sprintf("GpsInfo<LAT=(%.05f) LON=(%.05f) ALT=(%d) TIME=[%s]>",
gi.Latitude.Decimal(), gi.Longitude.Decimal(), gi.Altitude, gi.Timestamp)
}
// S2CellId returns the cell-ID of the geographic location on the earth.
func (gi *GpsInfo) S2CellId() s2.CellID {
latitude := gi.Latitude.Decimal()
longitude := gi.Longitude.Decimal()
ll := s2.LatLngFromDegrees(latitude, longitude)
cellId := s2.CellIDFromLatLng(ll)
if cellId.IsValid() == false {
panic(ErrGpsCoordinatesNotValid)
}
return cellId
}

1199
vendor/github.com/dsoprea/go-exif/v3/ifd_builder.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,532 @@
package exif
import (
"bytes"
"fmt"
"strings"
"encoding/binary"
"github.com/dsoprea/go-logging"
"github.com/dsoprea/go-exif/v3/common"
)
const (
// Tag-ID + Tag-Type + Unit-Count + Value/Offset.
IfdTagEntrySize = uint32(2 + 2 + 4 + 4)
)
type ByteWriter struct {
b *bytes.Buffer
byteOrder binary.ByteOrder
}
func NewByteWriter(b *bytes.Buffer, byteOrder binary.ByteOrder) (bw *ByteWriter) {
return &ByteWriter{
b: b,
byteOrder: byteOrder,
}
}
func (bw ByteWriter) writeAsBytes(value interface{}) (err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
err = binary.Write(bw.b, bw.byteOrder, value)
log.PanicIf(err)
return nil
}
func (bw ByteWriter) WriteUint32(value uint32) (err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
err = bw.writeAsBytes(value)
log.PanicIf(err)
return nil
}
func (bw ByteWriter) WriteUint16(value uint16) (err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
err = bw.writeAsBytes(value)
log.PanicIf(err)
return nil
}
func (bw ByteWriter) WriteFourBytes(value []byte) (err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
len_ := len(value)
if len_ != 4 {
log.Panicf("value is not four-bytes: (%d)", len_)
}
_, err = bw.b.Write(value)
log.PanicIf(err)
return nil
}
// ifdOffsetIterator keeps track of where the next IFD should be written by
// keeping track of where the offsets start, the data that has been added, and
// bumping the offset *when* the data is added.
type ifdDataAllocator struct {
offset uint32
b bytes.Buffer
}
func newIfdDataAllocator(ifdDataAddressableOffset uint32) *ifdDataAllocator {
return &ifdDataAllocator{
offset: ifdDataAddressableOffset,
}
}
func (ida *ifdDataAllocator) Allocate(value []byte) (offset uint32, err error) {
_, err = ida.b.Write(value)
log.PanicIf(err)
offset = ida.offset
ida.offset += uint32(len(value))
return offset, nil
}
func (ida *ifdDataAllocator) NextOffset() uint32 {
return ida.offset
}
func (ida *ifdDataAllocator) Bytes() []byte {
return ida.b.Bytes()
}
// IfdByteEncoder converts an IB to raw bytes (for writing) while also figuring
// out all of the allocations and indirection that is required for extended
// data.
type IfdByteEncoder struct {
// journal holds a list of actions taken while encoding.
journal [][3]string
}
func NewIfdByteEncoder() (ibe *IfdByteEncoder) {
return &IfdByteEncoder{
journal: make([][3]string, 0),
}
}
func (ibe *IfdByteEncoder) Journal() [][3]string {
return ibe.journal
}
func (ibe *IfdByteEncoder) TableSize(entryCount int) uint32 {
// Tag-Count + (Entry-Size * Entry-Count) + Next-IFD-Offset.
return uint32(2) + (IfdTagEntrySize * uint32(entryCount)) + uint32(4)
}
func (ibe *IfdByteEncoder) pushToJournal(where, direction, format string, args ...interface{}) {
event := [3]string{
direction,
where,
fmt.Sprintf(format, args...),
}
ibe.journal = append(ibe.journal, event)
}
// PrintJournal prints a hierarchical representation of the steps taken during
// encoding.
func (ibe *IfdByteEncoder) PrintJournal() {
maxWhereLength := 0
for _, event := range ibe.journal {
where := event[1]
len_ := len(where)
if len_ > maxWhereLength {
maxWhereLength = len_
}
}
level := 0
for i, event := range ibe.journal {
direction := event[0]
where := event[1]
message := event[2]
if direction != ">" && direction != "<" && direction != "-" {
log.Panicf("journal operation not valid: [%s]", direction)
}
if direction == "<" {
if level <= 0 {
log.Panicf("journal operations unbalanced (too many closes)")
}
level--
}
indent := strings.Repeat(" ", level)
fmt.Printf("%3d %s%s %s: %s\n", i, indent, direction, where, message)
if direction == ">" {
level++
}
}
if level != 0 {
log.Panicf("journal operations unbalanced (too many opens)")
}
}
// encodeTagToBytes encodes the given tag to a byte stream. If
// `nextIfdOffsetToWrite` is more than (0), recurse into child IFDs
// (`nextIfdOffsetToWrite` is required in order for them to know where the its
// IFD data will be written, in order for them to know the offset of where
// their allocated-data block will start, which follows right behind).
func (ibe *IfdByteEncoder) encodeTagToBytes(ib *IfdBuilder, bt *BuilderTag, bw *ByteWriter, ida *ifdDataAllocator, nextIfdOffsetToWrite uint32) (childIfdBlock []byte, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
// Write tag-ID.
err = bw.WriteUint16(bt.tagId)
log.PanicIf(err)
// Works for both values and child IFDs (which have an official size of
// LONG).
err = bw.WriteUint16(uint16(bt.typeId))
log.PanicIf(err)
// Write unit-count.
if bt.value.IsBytes() == true {
effectiveType := bt.typeId
if bt.typeId == exifcommon.TypeUndefined {
effectiveType = exifcommon.TypeByte
}
// It's a non-unknown value.Calculate the count of values of
// the type that we're writing and the raw bytes for the whole list.
typeSize := uint32(effectiveType.Size())
valueBytes := bt.value.Bytes()
len_ := len(valueBytes)
unitCount := uint32(len_) / typeSize
if _, found := tagsWithoutAlignment[bt.tagId]; found == false {
remainder := uint32(len_) % typeSize
if remainder > 0 {
log.Panicf("tag (0x%04x) value of (%d) bytes not evenly divisible by type-size (%d)", bt.tagId, len_, typeSize)
}
}
err = bw.WriteUint32(unitCount)
log.PanicIf(err)
// Write four-byte value/offset.
if len_ > 4 {
offset, err := ida.Allocate(valueBytes)
log.PanicIf(err)
err = bw.WriteUint32(offset)
log.PanicIf(err)
} else {
fourBytes := make([]byte, 4)
copy(fourBytes, valueBytes)
err = bw.WriteFourBytes(fourBytes)
log.PanicIf(err)
}
} else {
if bt.value.IsIb() == false {
log.Panicf("tag value is not a byte-slice but also not a child IB: %v", bt)
}
// Write unit-count (one LONG representing one offset).
err = bw.WriteUint32(1)
log.PanicIf(err)
if nextIfdOffsetToWrite > 0 {
var err error
ibe.pushToJournal("encodeTagToBytes", ">", "[%s]->[%s]", ib.IfdIdentity().UnindexedString(), bt.value.Ib().IfdIdentity().UnindexedString())
// Create the block of IFD data and everything it requires.
childIfdBlock, err = ibe.encodeAndAttachIfd(bt.value.Ib(), nextIfdOffsetToWrite)
log.PanicIf(err)
ibe.pushToJournal("encodeTagToBytes", "<", "[%s]->[%s]", bt.value.Ib().IfdIdentity().UnindexedString(), ib.IfdIdentity().UnindexedString())
// Use the next-IFD offset for it. The IFD will actually get
// attached after we return.
err = bw.WriteUint32(nextIfdOffsetToWrite)
log.PanicIf(err)
} else {
// No child-IFDs are to be allocated. Finish the entry with a NULL
// pointer.
ibe.pushToJournal("encodeTagToBytes", "-", "*Not* descending to child: [%s]", bt.value.Ib().IfdIdentity().UnindexedString())
err = bw.WriteUint32(0)
log.PanicIf(err)
}
}
return childIfdBlock, nil
}
// encodeIfdToBytes encodes the given IB to a byte-slice. We are given the
// offset at which this IFD will be written. This method is used called both to
// pre-determine how big the table is going to be (so that we can calculate the
// address to allocate data at) as well as to write the final table.
//
// It is necessary to fully realize the table in order to predetermine its size
// because it is not enough to know the size of the table: If there are child
// IFDs, we will not be able to allocate them without first knowing how much
// data we need to allocate for the current IFD.
func (ibe *IfdByteEncoder) encodeIfdToBytes(ib *IfdBuilder, ifdAddressableOffset uint32, nextIfdOffsetToWrite uint32, setNextIb bool) (data []byte, tableSize uint32, dataSize uint32, childIfdSizes []uint32, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
ibe.pushToJournal("encodeIfdToBytes", ">", "%s", ib)
tableSize = ibe.TableSize(len(ib.tags))
b := new(bytes.Buffer)
bw := NewByteWriter(b, ib.byteOrder)
// Write tag count.
err = bw.WriteUint16(uint16(len(ib.tags)))
log.PanicIf(err)
ida := newIfdDataAllocator(ifdAddressableOffset)
childIfdBlocks := make([][]byte, 0)
// Write raw bytes for each tag entry. Allocate larger data to be referred
// to in the follow-up data-block as required. Any "unknown"-byte tags that
// we can't parse will not be present here (using AddTagsFromExisting(), at
// least).
for _, bt := range ib.tags {
childIfdBlock, err := ibe.encodeTagToBytes(ib, bt, bw, ida, nextIfdOffsetToWrite)
log.PanicIf(err)
if childIfdBlock != nil {
// We aren't allowed to have non-nil child IFDs if we're just
// sizing things up.
if nextIfdOffsetToWrite == 0 {
log.Panicf("no IFD offset provided for child-IFDs; no new child-IFDs permitted")
}
nextIfdOffsetToWrite += uint32(len(childIfdBlock))
childIfdBlocks = append(childIfdBlocks, childIfdBlock)
}
}
dataBytes := ida.Bytes()
dataSize = uint32(len(dataBytes))
childIfdSizes = make([]uint32, len(childIfdBlocks))
childIfdsTotalSize := uint32(0)
for i, childIfdBlock := range childIfdBlocks {
len_ := uint32(len(childIfdBlock))
childIfdSizes[i] = len_
childIfdsTotalSize += len_
}
// N the link from this IFD to the next IFD that will be written in the
// next cycle.
if setNextIb == true {
// Write address of next IFD in chain. This will be the original
// allocation offset plus the size of everything we have allocated for
// this IFD and its child-IFDs.
//
// It is critical that this number is stepped properly. We experienced
// an issue whereby it first looked like we were duplicating the IFD and
// then that we were duplicating the tags in the wrong IFD, and then
// finally we determined that the next-IFD offset for the first IFD was
// accidentally pointing back to the EXIF IFD, so we were visiting it
// twice when visiting through the tags after decoding. It was an
// expensive bug to find.
ibe.pushToJournal("encodeIfdToBytes", "-", "Setting 'next' IFD to (0x%08x).", nextIfdOffsetToWrite)
err := bw.WriteUint32(nextIfdOffsetToWrite)
log.PanicIf(err)
} else {
err := bw.WriteUint32(0)
log.PanicIf(err)
}
_, err = b.Write(dataBytes)
log.PanicIf(err)
// Append any child IFD blocks after our table and data blocks. These IFDs
// were equipped with the appropriate offset information so it's expected
// that all offsets referred to by these will be correct.
//
// Note that child-IFDs are append after the current IFD and before the
// next IFD, as opposed to the root IFDs, which are chained together but
// will be interrupted by these child-IFDs (which is expected, per the
// standard).
for _, childIfdBlock := range childIfdBlocks {
_, err = b.Write(childIfdBlock)
log.PanicIf(err)
}
ibe.pushToJournal("encodeIfdToBytes", "<", "%s", ib)
return b.Bytes(), tableSize, dataSize, childIfdSizes, nil
}
// encodeAndAttachIfd is a reentrant function that processes the IFD chain.
func (ibe *IfdByteEncoder) encodeAndAttachIfd(ib *IfdBuilder, ifdAddressableOffset uint32) (data []byte, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
ibe.pushToJournal("encodeAndAttachIfd", ">", "%s", ib)
b := new(bytes.Buffer)
i := 0
for thisIb := ib; thisIb != nil; thisIb = thisIb.nextIb {
// Do a dry-run in order to pre-determine its size requirement.
ibe.pushToJournal("encodeAndAttachIfd", ">", "Beginning encoding process: (%d) [%s]", i, thisIb.IfdIdentity().UnindexedString())
ibe.pushToJournal("encodeAndAttachIfd", ">", "Calculating size: (%d) [%s]", i, thisIb.IfdIdentity().UnindexedString())
_, tableSize, allocatedDataSize, _, err := ibe.encodeIfdToBytes(thisIb, ifdAddressableOffset, 0, false)
log.PanicIf(err)
ibe.pushToJournal("encodeAndAttachIfd", "<", "Finished calculating size: (%d) [%s]", i, thisIb.IfdIdentity().UnindexedString())
ifdAddressableOffset += tableSize
nextIfdOffsetToWrite := ifdAddressableOffset + allocatedDataSize
ibe.pushToJournal("encodeAndAttachIfd", ">", "Next IFD will be written at offset (0x%08x)", nextIfdOffsetToWrite)
// Write our IFD as well as any child-IFDs (now that we know the offset
// where new IFDs and their data will be allocated).
setNextIb := thisIb.nextIb != nil
ibe.pushToJournal("encodeAndAttachIfd", ">", "Encoding starting: (%d) [%s] NEXT-IFD-OFFSET-TO-WRITE=(0x%08x)", i, thisIb.IfdIdentity().UnindexedString(), nextIfdOffsetToWrite)
tableAndAllocated, effectiveTableSize, effectiveAllocatedDataSize, childIfdSizes, err :=
ibe.encodeIfdToBytes(thisIb, ifdAddressableOffset, nextIfdOffsetToWrite, setNextIb)
log.PanicIf(err)
if effectiveTableSize != tableSize {
log.Panicf("written table size does not match the pre-calculated table size: (%d) != (%d) %s", effectiveTableSize, tableSize, ib)
} else if effectiveAllocatedDataSize != allocatedDataSize {
log.Panicf("written allocated-data size does not match the pre-calculated allocated-data size: (%d) != (%d) %s", effectiveAllocatedDataSize, allocatedDataSize, ib)
}
ibe.pushToJournal("encodeAndAttachIfd", "<", "Encoding done: (%d) [%s]", i, thisIb.IfdIdentity().UnindexedString())
totalChildIfdSize := uint32(0)
for _, childIfdSize := range childIfdSizes {
totalChildIfdSize += childIfdSize
}
if len(tableAndAllocated) != int(tableSize+allocatedDataSize+totalChildIfdSize) {
log.Panicf("IFD table and data is not a consistent size: (%d) != (%d)", len(tableAndAllocated), tableSize+allocatedDataSize+totalChildIfdSize)
}
// TODO(dustin): We might want to verify the original tableAndAllocated length, too.
_, err = b.Write(tableAndAllocated)
log.PanicIf(err)
// Advance past what we've allocated, thus far.
ifdAddressableOffset += allocatedDataSize + totalChildIfdSize
ibe.pushToJournal("encodeAndAttachIfd", "<", "Finishing encoding process: (%d) [%s] [FINAL:] NEXT-IFD-OFFSET-TO-WRITE=(0x%08x)", i, ib.IfdIdentity().UnindexedString(), nextIfdOffsetToWrite)
i++
}
ibe.pushToJournal("encodeAndAttachIfd", "<", "%s", ib)
return b.Bytes(), nil
}
// EncodeToExifPayload is the base encoding step that transcribes the entire IB
// structure to its on-disk layout.
func (ibe *IfdByteEncoder) EncodeToExifPayload(ib *IfdBuilder) (data []byte, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
data, err = ibe.encodeAndAttachIfd(ib, ExifDefaultFirstIfdOffset)
log.PanicIf(err)
return data, nil
}
// EncodeToExif calls EncodeToExifPayload and then packages the result into a
// complete EXIF block.
func (ibe *IfdByteEncoder) EncodeToExif(ib *IfdBuilder) (data []byte, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
encodedIfds, err := ibe.EncodeToExifPayload(ib)
log.PanicIf(err)
// Wrap the IFD in a formal EXIF block.
b := new(bytes.Buffer)
headerBytes, err := BuildExifHeader(ib.byteOrder, ExifDefaultFirstIfdOffset)
log.PanicIf(err)
_, err = b.Write(headerBytes)
log.PanicIf(err)
_, err = b.Write(encodedIfds)
log.PanicIf(err)
return b.Bytes(), nil
}

1672
vendor/github.com/dsoprea/go-exif/v3/ifd_enumerate.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

298
vendor/github.com/dsoprea/go-exif/v3/ifd_tag_entry.go generated vendored Normal file
View File

@ -0,0 +1,298 @@
package exif
import (
"fmt"
"io"
"encoding/binary"
"github.com/dsoprea/go-logging"
"github.com/dsoprea/go-exif/v3/common"
"github.com/dsoprea/go-exif/v3/undefined"
)
var (
iteLogger = log.NewLogger("exif.ifd_tag_entry")
)
// IfdTagEntry refers to a tag in the loaded EXIF block.
type IfdTagEntry struct {
tagId uint16
tagIndex int
tagType exifcommon.TagTypePrimitive
unitCount uint32
valueOffset uint32
rawValueOffset []byte
// childIfdName is the right most atom in the IFD-path. We need this to
// construct the fully-qualified IFD-path.
childIfdName string
// childIfdPath is the IFD-path of the child if this tag represents a child
// IFD.
childIfdPath string
// childFqIfdPath is the IFD-path of the child if this tag represents a
// child IFD. Includes indices.
childFqIfdPath string
// TODO(dustin): !! IB's host the child-IBs directly in the tag, but that's not the case here. Refactor to accommodate it for a consistent experience.
ifdIdentity *exifcommon.IfdIdentity
isUnhandledUnknown bool
rs io.ReadSeeker
byteOrder binary.ByteOrder
tagName string
}
func newIfdTagEntry(ii *exifcommon.IfdIdentity, tagId uint16, tagIndex int, tagType exifcommon.TagTypePrimitive, unitCount uint32, valueOffset uint32, rawValueOffset []byte, rs io.ReadSeeker, byteOrder binary.ByteOrder) *IfdTagEntry {
return &IfdTagEntry{
ifdIdentity: ii,
tagId: tagId,
tagIndex: tagIndex,
tagType: tagType,
unitCount: unitCount,
valueOffset: valueOffset,
rawValueOffset: rawValueOffset,
rs: rs,
byteOrder: byteOrder,
}
}
// String returns a stringified representation of the struct.
func (ite *IfdTagEntry) String() string {
return fmt.Sprintf("IfdTagEntry<TAG-IFD-PATH=[%s] TAG-ID=(0x%04x) TAG-TYPE=[%s] UNIT-COUNT=(%d)>", ite.ifdIdentity.String(), ite.tagId, ite.tagType.String(), ite.unitCount)
}
// TagName returns the name of the tag. This is determined else and set after
// the parse (since it's not actually stored in the stream). If it's empty, it
// is because it is an unknown tag (nonstandard or otherwise unavailable in the
// tag-index).
func (ite *IfdTagEntry) TagName() string {
return ite.tagName
}
// setTagName sets the tag-name. This provides the name for convenience and
// efficiency by determining it when most efficient while we're parsing rather
// than delegating it to the caller (or, worse, the user).
func (ite *IfdTagEntry) setTagName(tagName string) {
ite.tagName = tagName
}
// IfdPath returns the fully-qualified path of the IFD that owns this tag.
func (ite *IfdTagEntry) IfdPath() string {
return ite.ifdIdentity.String()
}
// TagId returns the ID of the tag that we represent. The combination of
// (IfdPath(), TagId()) is unique.
func (ite *IfdTagEntry) TagId() uint16 {
return ite.tagId
}
// IsThumbnailOffset returns true if the tag has the IFD and tag-ID of a
// thumbnail offset.
func (ite *IfdTagEntry) IsThumbnailOffset() bool {
return ite.tagId == ThumbnailOffsetTagId && ite.ifdIdentity.String() == ThumbnailFqIfdPath
}
// IsThumbnailSize returns true if the tag has the IFD and tag-ID of a thumbnail
// size.
func (ite *IfdTagEntry) IsThumbnailSize() bool {
return ite.tagId == ThumbnailSizeTagId && ite.ifdIdentity.String() == ThumbnailFqIfdPath
}
// TagType is the type of value for this tag.
func (ite *IfdTagEntry) TagType() exifcommon.TagTypePrimitive {
return ite.tagType
}
// updateTagType sets an alternatively interpreted tag-type.
func (ite *IfdTagEntry) updateTagType(tagType exifcommon.TagTypePrimitive) {
ite.tagType = tagType
}
// UnitCount returns the unit-count of the tag's value.
func (ite *IfdTagEntry) UnitCount() uint32 {
return ite.unitCount
}
// updateUnitCount sets an alternatively interpreted unit-count.
func (ite *IfdTagEntry) updateUnitCount(unitCount uint32) {
ite.unitCount = unitCount
}
// getValueOffset is the four-byte offset converted to an integer to point to
// the location of its value in the EXIF block. The "get" parameter is obviously
// used in order to differentiate the naming of the method from the field.
func (ite *IfdTagEntry) getValueOffset() uint32 {
return ite.valueOffset
}
// GetRawBytes renders a specific list of bytes from the value in this tag.
func (ite *IfdTagEntry) GetRawBytes() (rawBytes []byte, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
valueContext := ite.getValueContext()
if ite.tagType == exifcommon.TypeUndefined {
value, err := exifundefined.Decode(valueContext)
if err != nil {
if err == exifcommon.ErrUnhandledUndefinedTypedTag {
ite.setIsUnhandledUnknown(true)
return nil, exifundefined.ErrUnparseableValue
} else if err == exifundefined.ErrUnparseableValue {
return nil, err
} else {
log.Panic(err)
}
}
// Encode it back, in order to get the raw bytes. This is the best,
// general way to do it with an undefined tag.
rawBytes, _, err := exifundefined.Encode(value, ite.byteOrder)
log.PanicIf(err)
return rawBytes, nil
}
rawBytes, err = valueContext.ReadRawEncoded()
log.PanicIf(err)
return rawBytes, nil
}
// Value returns the specific, parsed, typed value from the tag.
func (ite *IfdTagEntry) Value() (value interface{}, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
valueContext := ite.getValueContext()
if ite.tagType == exifcommon.TypeUndefined {
var err error
value, err = exifundefined.Decode(valueContext)
if err != nil {
if err == exifcommon.ErrUnhandledUndefinedTypedTag || err == exifundefined.ErrUnparseableValue {
return nil, err
}
log.Panic(err)
}
} else {
var err error
value, err = valueContext.Values()
log.PanicIf(err)
}
return value, nil
}
// Format returns the tag's value as a string.
func (ite *IfdTagEntry) Format() (phrase string, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
value, err := ite.Value()
if err != nil {
if err == exifcommon.ErrUnhandledUndefinedTypedTag {
return exifundefined.UnparseableUnknownTagValuePlaceholder, nil
} else if err == exifundefined.ErrUnparseableValue {
return exifundefined.UnparseableHandledTagValuePlaceholder, nil
}
log.Panic(err)
}
phrase, err = exifcommon.FormatFromType(value, false)
log.PanicIf(err)
return phrase, nil
}
// FormatFirst returns the same as Format() but only the first item.
func (ite *IfdTagEntry) FormatFirst() (phrase string, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
// TODO(dustin): We should add a convenience type "timestamp", to simplify translating to and from the physical ASCII and provide validation.
value, err := ite.Value()
if err != nil {
if err == exifcommon.ErrUnhandledUndefinedTypedTag {
return exifundefined.UnparseableUnknownTagValuePlaceholder, nil
}
log.Panic(err)
}
phrase, err = exifcommon.FormatFromType(value, true)
log.PanicIf(err)
return phrase, nil
}
func (ite *IfdTagEntry) setIsUnhandledUnknown(isUnhandledUnknown bool) {
ite.isUnhandledUnknown = isUnhandledUnknown
}
// SetChildIfd sets child-IFD information (if we represent a child IFD).
func (ite *IfdTagEntry) SetChildIfd(ii *exifcommon.IfdIdentity) {
ite.childFqIfdPath = ii.String()
ite.childIfdPath = ii.UnindexedString()
ite.childIfdName = ii.Name()
}
// ChildIfdName returns the name of the child IFD
func (ite *IfdTagEntry) ChildIfdName() string {
return ite.childIfdName
}
// ChildIfdPath returns the path of the child IFD.
func (ite *IfdTagEntry) ChildIfdPath() string {
return ite.childIfdPath
}
// ChildFqIfdPath returns the complete path of the child IFD along with the
// numeric suffixes differentiating sibling occurrences of the same type. "0"
// indices are omitted.
func (ite *IfdTagEntry) ChildFqIfdPath() string {
return ite.childFqIfdPath
}
// IfdIdentity returns the IfdIdentity associated with this tag.
func (ite *IfdTagEntry) IfdIdentity() *exifcommon.IfdIdentity {
return ite.ifdIdentity
}
func (ite *IfdTagEntry) getValueContext() *exifcommon.ValueContext {
return exifcommon.NewValueContext(
ite.ifdIdentity.String(),
ite.tagId,
ite.unitCount,
ite.valueOffset,
ite.rawValueOffset,
ite.rs,
ite.tagType,
ite.byteOrder)
}

8
vendor/github.com/dsoprea/go-exif/v3/package.go generated vendored Normal file
View File

@ -0,0 +1,8 @@
// Package exif parses raw EXIF information given a block of raw EXIF data. It
// can also construct new EXIF information, and provides tools for doing so.
// This package is not involved with the parsing of particular file-formats.
//
// The EXIF data must first be extracted and then provided to us. Conversely,
// when constructing new EXIF data, the caller is responsible for packaging
// this in whichever format they require.
package exif

475
vendor/github.com/dsoprea/go-exif/v3/tags.go generated vendored Normal file
View File

@ -0,0 +1,475 @@
package exif
import (
"fmt"
"sync"
"github.com/dsoprea/go-logging"
"gopkg.in/yaml.v2"
"github.com/dsoprea/go-exif/v3/common"
)
const (
// IFD1
// ThumbnailFqIfdPath is the fully-qualified IFD path that the thumbnail
// must be found in.
ThumbnailFqIfdPath = "IFD1"
// ThumbnailOffsetTagId returns the tag-ID of the thumbnail offset.
ThumbnailOffsetTagId = 0x0201
// ThumbnailSizeTagId returns the tag-ID of the thumbnail size.
ThumbnailSizeTagId = 0x0202
)
const (
// GPS
// TagGpsVersionId is the ID of the GPS version tag.
TagGpsVersionId = 0x0000
// TagLatitudeId is the ID of the GPS latitude tag.
TagLatitudeId = 0x0002
// TagLatitudeRefId is the ID of the GPS latitude orientation tag.
TagLatitudeRefId = 0x0001
// TagLongitudeId is the ID of the GPS longitude tag.
TagLongitudeId = 0x0004
// TagLongitudeRefId is the ID of the GPS longitude-orientation tag.
TagLongitudeRefId = 0x0003
// TagTimestampId is the ID of the GPS time tag.
TagTimestampId = 0x0007
// TagDatestampId is the ID of the GPS date tag.
TagDatestampId = 0x001d
// TagAltitudeId is the ID of the GPS altitude tag.
TagAltitudeId = 0x0006
// TagAltitudeRefId is the ID of the GPS altitude-orientation tag.
TagAltitudeRefId = 0x0005
)
var (
// tagsWithoutAlignment is a tag-lookup for tags whose value size won't
// necessarily be a multiple of its tag-type.
tagsWithoutAlignment = map[uint16]struct{}{
// The thumbnail offset is stored as a long, but its data is a binary
// blob (not a slice of longs).
ThumbnailOffsetTagId: {},
}
)
var (
tagsLogger = log.NewLogger("exif.tags")
)
// File structures.
type encodedTag struct {
// id is signed, here, because YAML doesn't have enough information to
// support unsigned.
Id int `yaml:"id"`
Name string `yaml:"name"`
TypeName string `yaml:"type_name"`
TypeNames []string `yaml:"type_names"`
}
// Indexing structures.
// IndexedTag describes one index lookup result.
type IndexedTag struct {
// Id is the tag-ID.
Id uint16
// Name is the tag name.
Name string
// IfdPath is the proper IFD path of this tag. This is not fully-qualified.
IfdPath string
// SupportedTypes is an unsorted list of allowed tag-types.
SupportedTypes []exifcommon.TagTypePrimitive
}
// String returns a descriptive string.
func (it *IndexedTag) String() string {
return fmt.Sprintf("TAG<ID=(0x%04x) NAME=[%s] IFD=[%s]>", it.Id, it.Name, it.IfdPath)
}
// IsName returns true if this tag matches the given tag name.
func (it *IndexedTag) IsName(ifdPath, name string) bool {
return it.Name == name && it.IfdPath == ifdPath
}
// Is returns true if this tag matched the given tag ID.
func (it *IndexedTag) Is(ifdPath string, id uint16) bool {
return it.Id == id && it.IfdPath == ifdPath
}
// GetEncodingType returns the largest type that this tag's value can occupy.
func (it *IndexedTag) GetEncodingType(value interface{}) exifcommon.TagTypePrimitive {
// For convenience, we handle encoding a `time.Time` directly.
if exifcommon.IsTime(value) == true {
// Timestamps are encoded as ASCII.
value = ""
}
if len(it.SupportedTypes) == 0 {
log.Panicf("IndexedTag [%s] (%d) has no supported types.", it.IfdPath, it.Id)
} else if len(it.SupportedTypes) == 1 {
return it.SupportedTypes[0]
}
supportsLong := false
supportsShort := false
supportsRational := false
supportsSignedRational := false
for _, supportedType := range it.SupportedTypes {
if supportedType == exifcommon.TypeLong {
supportsLong = true
} else if supportedType == exifcommon.TypeShort {
supportsShort = true
} else if supportedType == exifcommon.TypeRational {
supportsRational = true
} else if supportedType == exifcommon.TypeSignedRational {
supportsSignedRational = true
}
}
// We specifically check for the cases that we know to expect.
if supportsLong == true && supportsShort == true {
return exifcommon.TypeLong
} else if supportsRational == true && supportsSignedRational == true {
if value == nil {
log.Panicf("GetEncodingType: require value to be given")
}
if _, ok := value.(exifcommon.SignedRational); ok == true {
return exifcommon.TypeSignedRational
}
return exifcommon.TypeRational
}
log.Panicf("WidestSupportedType() case is not handled for tag [%s] (0x%04x): %v", it.IfdPath, it.Id, it.SupportedTypes)
return 0
}
// DoesSupportType returns true if this tag can be found/decoded with this type.
func (it *IndexedTag) DoesSupportType(tagType exifcommon.TagTypePrimitive) bool {
// This is always a very small collection. So, we keep it unsorted.
for _, thisTagType := range it.SupportedTypes {
if thisTagType == tagType {
return true
}
}
return false
}
// TagIndex is a tag-lookup facility.
type TagIndex struct {
tagsByIfd map[string]map[uint16]*IndexedTag
tagsByIfdR map[string]map[string]*IndexedTag
mutex sync.Mutex
doUniversalSearch bool
}
// NewTagIndex returns a new TagIndex struct.
func NewTagIndex() *TagIndex {
ti := new(TagIndex)
ti.tagsByIfd = make(map[string]map[uint16]*IndexedTag)
ti.tagsByIfdR = make(map[string]map[string]*IndexedTag)
return ti
}
// SetUniversalSearch enables a fallback to matching tags under *any* IFD.
func (ti *TagIndex) SetUniversalSearch(flag bool) {
ti.doUniversalSearch = flag
}
// UniversalSearch enables a fallback to matching tags under *any* IFD.
func (ti *TagIndex) UniversalSearch() bool {
return ti.doUniversalSearch
}
// Add registers a new tag to be recognized during the parse.
func (ti *TagIndex) Add(it *IndexedTag) (err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
ti.mutex.Lock()
defer ti.mutex.Unlock()
// Store by ID.
family, found := ti.tagsByIfd[it.IfdPath]
if found == false {
family = make(map[uint16]*IndexedTag)
ti.tagsByIfd[it.IfdPath] = family
}
if _, found := family[it.Id]; found == true {
log.Panicf("tag-ID defined more than once for IFD [%s]: (%02x)", it.IfdPath, it.Id)
}
family[it.Id] = it
// Store by name.
familyR, found := ti.tagsByIfdR[it.IfdPath]
if found == false {
familyR = make(map[string]*IndexedTag)
ti.tagsByIfdR[it.IfdPath] = familyR
}
if _, found := familyR[it.Name]; found == true {
log.Panicf("tag-name defined more than once for IFD [%s]: (%s)", it.IfdPath, it.Name)
}
familyR[it.Name] = it
return nil
}
func (ti *TagIndex) getOne(ifdPath string, id uint16) (it *IndexedTag, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
if len(ti.tagsByIfd) == 0 {
err := LoadStandardTags(ti)
log.PanicIf(err)
}
ti.mutex.Lock()
defer ti.mutex.Unlock()
family, found := ti.tagsByIfd[ifdPath]
if found == false {
return nil, ErrTagNotFound
}
it, found = family[id]
if found == false {
return nil, ErrTagNotFound
}
return it, nil
}
// Get returns information about the non-IFD tag given a tag ID. `ifdPath` must
// not be fully-qualified.
func (ti *TagIndex) Get(ii *exifcommon.IfdIdentity, id uint16) (it *IndexedTag, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
ifdPath := ii.UnindexedString()
it, err = ti.getOne(ifdPath, id)
if err == nil {
return it, nil
} else if err != ErrTagNotFound {
log.Panic(err)
}
if ti.doUniversalSearch == false {
return nil, ErrTagNotFound
}
// We've been told to fallback to look for the tag in other IFDs.
skipIfdPath := ii.UnindexedString()
for currentIfdPath, _ := range ti.tagsByIfd {
if currentIfdPath == skipIfdPath {
// Skip the primary IFD, which has already been checked.
continue
}
it, err = ti.getOne(currentIfdPath, id)
if err == nil {
tagsLogger.Warningf(nil,
"Found tag (0x%02x) in the wrong IFD: [%s] != [%s]",
id, currentIfdPath, ifdPath)
return it, nil
} else if err != ErrTagNotFound {
log.Panic(err)
}
}
return nil, ErrTagNotFound
}
var (
// tagGuessDefaultIfdIdentities describes which IFDs we'll look for a given
// tag-ID in, if it's not found where it's supposed to be. We suppose that
// Exif-IFD tags might be found in IFD0 or IFD1, or IFD0/IFD1 tags might be
// found in the Exif IFD. This is the only thing we've seen so far. So, this
// is the limit of our guessing.
tagGuessDefaultIfdIdentities = []*exifcommon.IfdIdentity{
exifcommon.IfdExifStandardIfdIdentity,
exifcommon.IfdStandardIfdIdentity,
}
)
// FindFirst looks for the given tag-ID in each of the given IFDs in the given
// order. If `fqIfdPaths` is `nil` then use a default search order. This defies
// the standard, which requires each tag to exist in certain IFDs. This is a
// contingency to make recommendations for malformed data.
//
// Things *can* end badly here, in that the same tag-ID in different IFDs might
// describe different data and different ata-types, and our decode might then
// produce binary and non-printable data.
func (ti *TagIndex) FindFirst(id uint16, typeId exifcommon.TagTypePrimitive, ifdIdentities []*exifcommon.IfdIdentity) (it *IndexedTag, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
if ifdIdentities == nil {
ifdIdentities = tagGuessDefaultIfdIdentities
}
for _, ii := range ifdIdentities {
it, err := ti.Get(ii, id)
if err != nil {
if err == ErrTagNotFound {
continue
}
log.Panic(err)
}
// Even though the tag might be mislocated, the type should still be the
// same. Check this so we don't accidentally end-up on a complete
// irrelevant tag with a totally different data type. This attempts to
// mitigate producing garbage.
for _, supportedType := range it.SupportedTypes {
if supportedType == typeId {
return it, nil
}
}
}
return nil, ErrTagNotFound
}
// GetWithName returns information about the non-IFD tag given a tag name.
func (ti *TagIndex) GetWithName(ii *exifcommon.IfdIdentity, name string) (it *IndexedTag, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
if len(ti.tagsByIfdR) == 0 {
err := LoadStandardTags(ti)
log.PanicIf(err)
}
ifdPath := ii.UnindexedString()
it, found := ti.tagsByIfdR[ifdPath][name]
if found != true {
log.Panic(ErrTagNotFound)
}
return it, nil
}
// LoadStandardTags registers the tags that all devices/applications should
// support.
func LoadStandardTags(ti *TagIndex) (err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
// Read static data.
encodedIfds := make(map[string][]encodedTag)
err = yaml.Unmarshal([]byte(tagsYaml), encodedIfds)
log.PanicIf(err)
// Load structure.
count := 0
for ifdPath, tags := range encodedIfds {
for _, tagInfo := range tags {
tagId := uint16(tagInfo.Id)
tagName := tagInfo.Name
tagTypeName := tagInfo.TypeName
tagTypeNames := tagInfo.TypeNames
if tagTypeNames == nil {
if tagTypeName == "" {
log.Panicf("no tag-types were given when registering standard tag [%s] (0x%04x) [%s]", ifdPath, tagId, tagName)
}
tagTypeNames = []string{
tagTypeName,
}
} else if tagTypeName != "" {
log.Panicf("both 'type_names' and 'type_name' were given when registering standard tag [%s] (0x%04x) [%s]", ifdPath, tagId, tagName)
}
tagTypes := make([]exifcommon.TagTypePrimitive, 0)
for _, tagTypeName := range tagTypeNames {
// TODO(dustin): Discard unsupported types. This helps us with non-standard types that have actually been found in real data, that we ignore for right now. e.g. SSHORT, FLOAT, DOUBLE
tagTypeId, found := exifcommon.GetTypeByName(tagTypeName)
if found == false {
tagsLogger.Warningf(nil, "Type [%s] for tag [%s] being loaded is not valid and is being ignored.", tagTypeName, tagName)
continue
}
tagTypes = append(tagTypes, tagTypeId)
}
if len(tagTypes) == 0 {
tagsLogger.Warningf(nil, "Tag [%s] (0x%04x) [%s] being loaded does not have any supported types and will not be registered.", ifdPath, tagId, tagName)
continue
}
it := &IndexedTag{
IfdPath: ifdPath,
Id: tagId,
Name: tagName,
SupportedTypes: tagTypes,
}
err = ti.Add(it)
log.PanicIf(err)
count++
}
}
tagsLogger.Debugf(nil, "(%d) tags loaded.", count)
return nil
}

968
vendor/github.com/dsoprea/go-exif/v3/tags_data.go generated vendored Normal file
View File

@ -0,0 +1,968 @@
package exif
var (
// From assets/tags.yaml . Needs to be here so it's embedded in the binary.
tagsYaml = `
# Notes:
#
# This file was produced from http://www.exiv2.org/tags.html, using the included
# tool, though that document appears to have some duplicates when all IDs are
# supposed to be unique (EXIF information only has IDs, not IFDs; IFDs are
# determined by our pre-existing knowledge of those tags).
#
# The webpage that we've produced this file from appears to indicate that
# ImageWidth is represented by both 0x0100 and 0x0001 depending on whether the
# encoding is RGB or YCbCr.
IFD/Exif:
- id: 0x829a
name: ExposureTime
type_name: RATIONAL
- id: 0x829d
name: FNumber
type_name: RATIONAL
- id: 0x8822
name: ExposureProgram
type_name: SHORT
- id: 0x8824
name: SpectralSensitivity
type_name: ASCII
- id: 0x8827
name: ISOSpeedRatings
type_name: SHORT
- id: 0x8828
name: OECF
type_name: UNDEFINED
- id: 0x8830
name: SensitivityType
type_name: SHORT
- id: 0x8831
name: StandardOutputSensitivity
type_name: LONG
- id: 0x8832
name: RecommendedExposureIndex
type_name: LONG
- id: 0x8833
name: ISOSpeed
type_name: LONG
- id: 0x8834
name: ISOSpeedLatitudeyyy
type_name: LONG
- id: 0x8835
name: ISOSpeedLatitudezzz
type_name: LONG
- id: 0x9000
name: ExifVersion
type_name: UNDEFINED
- id: 0x9003
name: DateTimeOriginal
type_name: ASCII
- id: 0x9004
name: DateTimeDigitized
type_name: ASCII
- id: 0x9010
name: OffsetTime
type_name: ASCII
- id: 0x9011
name: OffsetTimeOriginal
type_name: ASCII
- id: 0x9012
name: OffsetTimeDigitized
type_name: ASCII
- id: 0x9101
name: ComponentsConfiguration
type_name: UNDEFINED
- id: 0x9102
name: CompressedBitsPerPixel
type_name: RATIONAL
- id: 0x9201
name: ShutterSpeedValue
type_name: SRATIONAL
- id: 0x9202
name: ApertureValue
type_name: RATIONAL
- id: 0x9203
name: BrightnessValue
type_name: SRATIONAL
- id: 0x9204
name: ExposureBiasValue
type_name: SRATIONAL
- id: 0x9205
name: MaxApertureValue
type_name: RATIONAL
- id: 0x9206
name: SubjectDistance
type_name: RATIONAL
- id: 0x9207
name: MeteringMode
type_name: SHORT
- id: 0x9208
name: LightSource
type_name: SHORT
- id: 0x9209
name: Flash
type_name: SHORT
- id: 0x920a
name: FocalLength
type_name: RATIONAL
- id: 0x9214
name: SubjectArea
type_name: SHORT
- id: 0x927c
name: MakerNote
type_name: UNDEFINED
- id: 0x9286
name: UserComment
type_name: UNDEFINED
- id: 0x9290
name: SubSecTime
type_name: ASCII
- id: 0x9291
name: SubSecTimeOriginal
type_name: ASCII
- id: 0x9292
name: SubSecTimeDigitized
type_name: ASCII
- id: 0xa000
name: FlashpixVersion
type_name: UNDEFINED
- id: 0xa001
name: ColorSpace
type_name: SHORT
- id: 0xa002
name: PixelXDimension
type_names: [LONG, SHORT]
- id: 0xa003
name: PixelYDimension
type_names: [LONG, SHORT]
- id: 0xa004
name: RelatedSoundFile
type_name: ASCII
- id: 0xa005
name: InteroperabilityTag
type_name: LONG
- id: 0xa20b
name: FlashEnergy
type_name: RATIONAL
- id: 0xa20c
name: SpatialFrequencyResponse
type_name: UNDEFINED
- id: 0xa20e
name: FocalPlaneXResolution
type_name: RATIONAL
- id: 0xa20f
name: FocalPlaneYResolution
type_name: RATIONAL
- id: 0xa210
name: FocalPlaneResolutionUnit
type_name: SHORT
- id: 0xa214
name: SubjectLocation
type_name: SHORT
- id: 0xa215
name: ExposureIndex
type_name: RATIONAL
- id: 0xa217
name: SensingMethod
type_name: SHORT
- id: 0xa300
name: FileSource
type_name: UNDEFINED
- id: 0xa301
name: SceneType
type_name: UNDEFINED
- id: 0xa302
name: CFAPattern
type_name: UNDEFINED
- id: 0xa401
name: CustomRendered
type_name: SHORT
- id: 0xa402
name: ExposureMode
type_name: SHORT
- id: 0xa403
name: WhiteBalance
type_name: SHORT
- id: 0xa404
name: DigitalZoomRatio
type_name: RATIONAL
- id: 0xa405
name: FocalLengthIn35mmFilm
type_name: SHORT
- id: 0xa406
name: SceneCaptureType
type_name: SHORT
- id: 0xa407
name: GainControl
type_name: SHORT
- id: 0xa408
name: Contrast
type_name: SHORT
- id: 0xa409
name: Saturation
type_name: SHORT
- id: 0xa40a
name: Sharpness
type_name: SHORT
- id: 0xa40b
name: DeviceSettingDescription
type_name: UNDEFINED
- id: 0xa40c
name: SubjectDistanceRange
type_name: SHORT
- id: 0xa420
name: ImageUniqueID
type_name: ASCII
- id: 0xa430
name: CameraOwnerName
type_name: ASCII
- id: 0xa431
name: BodySerialNumber
type_name: ASCII
- id: 0xa432
name: LensSpecification
type_name: RATIONAL
- id: 0xa433
name: LensMake
type_name: ASCII
- id: 0xa434
name: LensModel
type_name: ASCII
- id: 0xa435
name: LensSerialNumber
type_name: ASCII
IFD/GPSInfo:
- id: 0x0000
name: GPSVersionID
type_name: BYTE
- id: 0x0001
name: GPSLatitudeRef
type_name: ASCII
- id: 0x0002
name: GPSLatitude
type_name: RATIONAL
- id: 0x0003
name: GPSLongitudeRef
type_name: ASCII
- id: 0x0004
name: GPSLongitude
type_name: RATIONAL
- id: 0x0005
name: GPSAltitudeRef
type_name: BYTE
- id: 0x0006
name: GPSAltitude
type_name: RATIONAL
- id: 0x0007
name: GPSTimeStamp
type_name: RATIONAL
- id: 0x0008
name: GPSSatellites
type_name: ASCII
- id: 0x0009
name: GPSStatus
type_name: ASCII
- id: 0x000a
name: GPSMeasureMode
type_name: ASCII
- id: 0x000b
name: GPSDOP
type_name: RATIONAL
- id: 0x000c
name: GPSSpeedRef
type_name: ASCII
- id: 0x000d
name: GPSSpeed
type_name: RATIONAL
- id: 0x000e
name: GPSTrackRef
type_name: ASCII
- id: 0x000f
name: GPSTrack
type_name: RATIONAL
- id: 0x0010
name: GPSImgDirectionRef
type_name: ASCII
- id: 0x0011
name: GPSImgDirection
type_name: RATIONAL
- id: 0x0012
name: GPSMapDatum
type_name: ASCII
- id: 0x0013
name: GPSDestLatitudeRef
type_name: ASCII
- id: 0x0014
name: GPSDestLatitude
type_name: RATIONAL
- id: 0x0015
name: GPSDestLongitudeRef
type_name: ASCII
- id: 0x0016
name: GPSDestLongitude
type_name: RATIONAL
- id: 0x0017
name: GPSDestBearingRef
type_name: ASCII
- id: 0x0018
name: GPSDestBearing
type_name: RATIONAL
- id: 0x0019
name: GPSDestDistanceRef
type_name: ASCII
- id: 0x001a
name: GPSDestDistance
type_name: RATIONAL
- id: 0x001b
name: GPSProcessingMethod
type_name: UNDEFINED
- id: 0x001c
name: GPSAreaInformation
type_name: UNDEFINED
- id: 0x001d
name: GPSDateStamp
type_name: ASCII
- id: 0x001e
name: GPSDifferential
type_name: SHORT
IFD:
- id: 0x000b
name: ProcessingSoftware
type_name: ASCII
- id: 0x00fe
name: NewSubfileType
type_name: LONG
- id: 0x00ff
name: SubfileType
type_name: SHORT
- id: 0x0100
name: ImageWidth
type_names: [LONG, SHORT]
- id: 0x0101
name: ImageLength
type_names: [LONG, SHORT]
- id: 0x0102
name: BitsPerSample
type_name: SHORT
- id: 0x0103
name: Compression
type_name: SHORT
- id: 0x0106
name: PhotometricInterpretation
type_name: SHORT
- id: 0x0107
name: Thresholding
type_name: SHORT
- id: 0x0108
name: CellWidth
type_name: SHORT
- id: 0x0109
name: CellLength
type_name: SHORT
- id: 0x010a
name: FillOrder
type_name: SHORT
- id: 0x010d
name: DocumentName
type_name: ASCII
- id: 0x010e
name: ImageDescription
type_name: ASCII
- id: 0x010f
name: Make
type_name: ASCII
- id: 0x0110
name: Model
type_name: ASCII
- id: 0x0111
name: StripOffsets
type_names: [LONG, SHORT]
- id: 0x0112
name: Orientation
type_name: SHORT
- id: 0x0115
name: SamplesPerPixel
type_name: SHORT
- id: 0x0116
name: RowsPerStrip
type_names: [LONG, SHORT]
- id: 0x0117
name: StripByteCounts
type_names: [LONG, SHORT]
- id: 0x011a
name: XResolution
type_name: RATIONAL
- id: 0x011b
name: YResolution
type_name: RATIONAL
- id: 0x011c
name: PlanarConfiguration
type_name: SHORT
- id: 0x0122
name: GrayResponseUnit
type_name: SHORT
- id: 0x0123
name: GrayResponseCurve
type_name: SHORT
- id: 0x0124
name: T4Options
type_name: LONG
- id: 0x0125
name: T6Options
type_name: LONG
- id: 0x0128
name: ResolutionUnit
type_name: SHORT
- id: 0x0129
name: PageNumber
type_name: SHORT
- id: 0x012d
name: TransferFunction
type_name: SHORT
- id: 0x0131
name: Software
type_name: ASCII
- id: 0x0132
name: DateTime
type_name: ASCII
- id: 0x013b
name: Artist
type_name: ASCII
- id: 0x013c
name: HostComputer
type_name: ASCII
- id: 0x013d
name: Predictor
type_name: SHORT
- id: 0x013e
name: WhitePoint
type_name: RATIONAL
- id: 0x013f
name: PrimaryChromaticities
type_name: RATIONAL
- id: 0x0140
name: ColorMap
type_name: SHORT
- id: 0x0141
name: HalftoneHints
type_name: SHORT
- id: 0x0142
name: TileWidth
type_name: SHORT
- id: 0x0143
name: TileLength
type_name: SHORT
- id: 0x0144
name: TileOffsets
type_name: SHORT
- id: 0x0145
name: TileByteCounts
type_name: SHORT
- id: 0x014a
name: SubIFDs
type_name: LONG
- id: 0x014c
name: InkSet
type_name: SHORT
- id: 0x014d
name: InkNames
type_name: ASCII
- id: 0x014e
name: NumberOfInks
type_name: SHORT
- id: 0x0150
name: DotRange
type_name: BYTE
- id: 0x0151
name: TargetPrinter
type_name: ASCII
- id: 0x0152
name: ExtraSamples
type_name: SHORT
- id: 0x0153
name: SampleFormat
type_name: SHORT
- id: 0x0154
name: SMinSampleValue
type_name: SHORT
- id: 0x0155
name: SMaxSampleValue
type_name: SHORT
- id: 0x0156
name: TransferRange
type_name: SHORT
- id: 0x0157
name: ClipPath
type_name: BYTE
- id: 0x015a
name: Indexed
type_name: SHORT
- id: 0x015b
name: JPEGTables
type_name: UNDEFINED
- id: 0x015f
name: OPIProxy
type_name: SHORT
- id: 0x0200
name: JPEGProc
type_name: LONG
- id: 0x0201
name: JPEGInterchangeFormat
type_name: LONG
- id: 0x0202
name: JPEGInterchangeFormatLength
type_name: LONG
- id: 0x0203
name: JPEGRestartInterval
type_name: SHORT
- id: 0x0205
name: JPEGLosslessPredictors
type_name: SHORT
- id: 0x0206
name: JPEGPointTransforms
type_name: SHORT
- id: 0x0207
name: JPEGQTables
type_name: LONG
- id: 0x0208
name: JPEGDCTables
type_name: LONG
- id: 0x0209
name: JPEGACTables
type_name: LONG
- id: 0x0211
name: YCbCrCoefficients
type_name: RATIONAL
- id: 0x0212
name: YCbCrSubSampling
type_name: SHORT
- id: 0x0213
name: YCbCrPositioning
type_name: SHORT
- id: 0x0214
name: ReferenceBlackWhite
type_name: RATIONAL
- id: 0x02bc
name: XMLPacket
type_name: BYTE
- id: 0x4746
name: Rating
type_name: SHORT
- id: 0x4749
name: RatingPercent
type_name: SHORT
- id: 0x800d
name: ImageID
type_name: ASCII
- id: 0x828d
name: CFARepeatPatternDim
type_name: SHORT
- id: 0x828e
name: CFAPattern
type_name: BYTE
- id: 0x828f
name: BatteryLevel
type_name: RATIONAL
- id: 0x8298
name: Copyright
type_name: ASCII
- id: 0x829a
name: ExposureTime
# NOTE(dustin): SRATIONAL isn't mentioned in the standard, but we have seen it in real data.
type_names: [RATIONAL, SRATIONAL]
- id: 0x829d
name: FNumber
# NOTE(dustin): SRATIONAL isn't mentioned in the standard, but we have seen it in real data.
type_names: [RATIONAL, SRATIONAL]
- id: 0x83bb
name: IPTCNAA
type_name: LONG
- id: 0x8649
name: ImageResources
type_name: BYTE
- id: 0x8769
name: ExifTag
type_name: LONG
- id: 0x8773
name: InterColorProfile
type_name: UNDEFINED
- id: 0x8822
name: ExposureProgram
type_name: SHORT
- id: 0x8824
name: SpectralSensitivity
type_name: ASCII
- id: 0x8825
name: GPSTag
type_name: LONG
- id: 0x8827
name: ISOSpeedRatings
type_name: SHORT
- id: 0x8828
name: OECF
type_name: UNDEFINED
- id: 0x8829
name: Interlace
type_name: SHORT
- id: 0x882b
name: SelfTimerMode
type_name: SHORT
- id: 0x9003
name: DateTimeOriginal
type_name: ASCII
- id: 0x9102
name: CompressedBitsPerPixel
type_name: RATIONAL
- id: 0x9201
name: ShutterSpeedValue
type_name: SRATIONAL
- id: 0x9202
name: ApertureValue
type_name: RATIONAL
- id: 0x9203
name: BrightnessValue
type_name: SRATIONAL
- id: 0x9204
name: ExposureBiasValue
type_name: SRATIONAL
- id: 0x9205
name: MaxApertureValue
type_name: RATIONAL
- id: 0x9206
name: SubjectDistance
type_name: SRATIONAL
- id: 0x9207
name: MeteringMode
type_name: SHORT
- id: 0x9208
name: LightSource
type_name: SHORT
- id: 0x9209
name: Flash
type_name: SHORT
- id: 0x920a
name: FocalLength
type_name: RATIONAL
- id: 0x920b
name: FlashEnergy
type_name: RATIONAL
- id: 0x920c
name: SpatialFrequencyResponse
type_name: UNDEFINED
- id: 0x920d
name: Noise
type_name: UNDEFINED
- id: 0x920e
name: FocalPlaneXResolution
type_name: RATIONAL
- id: 0x920f
name: FocalPlaneYResolution
type_name: RATIONAL
- id: 0x9210
name: FocalPlaneResolutionUnit
type_name: SHORT
- id: 0x9211
name: ImageNumber
type_name: LONG
- id: 0x9212
name: SecurityClassification
type_name: ASCII
- id: 0x9213
name: ImageHistory
type_name: ASCII
- id: 0x9214
name: SubjectLocation
type_name: SHORT
- id: 0x9215
name: ExposureIndex
type_name: RATIONAL
- id: 0x9216
name: TIFFEPStandardID
type_name: BYTE
- id: 0x9217
name: SensingMethod
type_name: SHORT
- id: 0x9c9b
name: XPTitle
type_name: BYTE
- id: 0x9c9c
name: XPComment
type_name: BYTE
- id: 0x9c9d
name: XPAuthor
type_name: BYTE
- id: 0x9c9e
name: XPKeywords
type_name: BYTE
- id: 0x9c9f
name: XPSubject
type_name: BYTE
- id: 0xc4a5
name: PrintImageMatching
type_name: UNDEFINED
- id: 0xc612
name: DNGVersion
type_name: BYTE
- id: 0xc613
name: DNGBackwardVersion
type_name: BYTE
- id: 0xc614
name: UniqueCameraModel
type_name: ASCII
- id: 0xc615
name: LocalizedCameraModel
type_name: BYTE
- id: 0xc616
name: CFAPlaneColor
type_name: BYTE
- id: 0xc617
name: CFALayout
type_name: SHORT
- id: 0xc618
name: LinearizationTable
type_name: SHORT
- id: 0xc619
name: BlackLevelRepeatDim
type_name: SHORT
- id: 0xc61a
name: BlackLevel
type_name: RATIONAL
- id: 0xc61b
name: BlackLevelDeltaH
type_name: SRATIONAL
- id: 0xc61c
name: BlackLevelDeltaV
type_name: SRATIONAL
- id: 0xc61d
name: WhiteLevel
type_name: SHORT
- id: 0xc61e
name: DefaultScale
type_name: RATIONAL
- id: 0xc61f
name: DefaultCropOrigin
type_name: SHORT
- id: 0xc620
name: DefaultCropSize
type_name: SHORT
- id: 0xc621
name: ColorMatrix1
type_name: SRATIONAL
- id: 0xc622
name: ColorMatrix2
type_name: SRATIONAL
- id: 0xc623
name: CameraCalibration1
type_name: SRATIONAL
- id: 0xc624
name: CameraCalibration2
type_name: SRATIONAL
- id: 0xc625
name: ReductionMatrix1
type_name: SRATIONAL
- id: 0xc626
name: ReductionMatrix2
type_name: SRATIONAL
- id: 0xc627
name: AnalogBalance
type_name: RATIONAL
- id: 0xc628
name: AsShotNeutral
type_name: SHORT
- id: 0xc629
name: AsShotWhiteXY
type_name: RATIONAL
- id: 0xc62a
name: BaselineExposure
type_name: SRATIONAL
- id: 0xc62b
name: BaselineNoise
type_name: RATIONAL
- id: 0xc62c
name: BaselineSharpness
type_name: RATIONAL
- id: 0xc62d
name: BayerGreenSplit
type_name: LONG
- id: 0xc62e
name: LinearResponseLimit
type_name: RATIONAL
- id: 0xc62f
name: CameraSerialNumber
type_name: ASCII
- id: 0xc630
name: LensInfo
type_name: RATIONAL
- id: 0xc631
name: ChromaBlurRadius
type_name: RATIONAL
- id: 0xc632
name: AntiAliasStrength
type_name: RATIONAL
- id: 0xc633
name: ShadowScale
type_name: SRATIONAL
- id: 0xc634
name: DNGPrivateData
type_name: BYTE
- id: 0xc635
name: MakerNoteSafety
type_name: SHORT
- id: 0xc65a
name: CalibrationIlluminant1
type_name: SHORT
- id: 0xc65b
name: CalibrationIlluminant2
type_name: SHORT
- id: 0xc65c
name: BestQualityScale
type_name: RATIONAL
- id: 0xc65d
name: RawDataUniqueID
type_name: BYTE
- id: 0xc68b
name: OriginalRawFileName
type_name: BYTE
- id: 0xc68c
name: OriginalRawFileData
type_name: UNDEFINED
- id: 0xc68d
name: ActiveArea
type_name: SHORT
- id: 0xc68e
name: MaskedAreas
type_name: SHORT
- id: 0xc68f
name: AsShotICCProfile
type_name: UNDEFINED
- id: 0xc690
name: AsShotPreProfileMatrix
type_name: SRATIONAL
- id: 0xc691
name: CurrentICCProfile
type_name: UNDEFINED
- id: 0xc692
name: CurrentPreProfileMatrix
type_name: SRATIONAL
- id: 0xc6bf
name: ColorimetricReference
type_name: SHORT
- id: 0xc6f3
name: CameraCalibrationSignature
type_name: BYTE
- id: 0xc6f4
name: ProfileCalibrationSignature
type_name: BYTE
- id: 0xc6f6
name: AsShotProfileName
type_name: BYTE
- id: 0xc6f7
name: NoiseReductionApplied
type_name: RATIONAL
- id: 0xc6f8
name: ProfileName
type_name: BYTE
- id: 0xc6f9
name: ProfileHueSatMapDims
type_name: LONG
- id: 0xc6fd
name: ProfileEmbedPolicy
type_name: LONG
- id: 0xc6fe
name: ProfileCopyright
type_name: BYTE
- id: 0xc714
name: ForwardMatrix1
type_name: SRATIONAL
- id: 0xc715
name: ForwardMatrix2
type_name: SRATIONAL
- id: 0xc716
name: PreviewApplicationName
type_name: BYTE
- id: 0xc717
name: PreviewApplicationVersion
type_name: BYTE
- id: 0xc718
name: PreviewSettingsName
type_name: BYTE
- id: 0xc719
name: PreviewSettingsDigest
type_name: BYTE
- id: 0xc71a
name: PreviewColorSpace
type_name: LONG
- id: 0xc71b
name: PreviewDateTime
type_name: ASCII
- id: 0xc71c
name: RawImageDigest
type_name: UNDEFINED
- id: 0xc71d
name: OriginalRawFileDigest
type_name: UNDEFINED
- id: 0xc71e
name: SubTileBlockSize
type_name: LONG
- id: 0xc71f
name: RowInterleaveFactor
type_name: LONG
- id: 0xc725
name: ProfileLookTableDims
type_name: LONG
- id: 0xc740
name: OpcodeList1
type_name: UNDEFINED
- id: 0xc741
name: OpcodeList2
type_name: UNDEFINED
- id: 0xc74e
name: OpcodeList3
type_name: UNDEFINED
# This tag may be used to specify the size of raster pixel spacing in the
# model space units, when the raster space can be embedded in the model space
# coordinate system without rotation, and consists of the following 3 values:
# ModelPixelScaleTag = (ScaleX, ScaleY, ScaleZ)
# where ScaleX and ScaleY give the horizontal and vertical spacing of raster
# pixels. The ScaleZ is primarily used to map the pixel value of a digital
# elevation model into the correct Z-scale, and so for most other purposes
# this value should be zero (since most model spaces are 2-D, with Z=0).
# Source: http://geotiff.maptools.org/spec/geotiff2.6.html#2.6.1
- id: 0x830e
name: ModelPixelScaleTag
type_name: DOUBLE
# This tag stores raster->model tiepoint pairs in the order
# ModelTiepointTag = (...,I,J,K, X,Y,Z...),
# where (I,J,K) is the point at location (I,J) in raster space with
# pixel-value K, and (X,Y,Z) is a vector in model space. In most cases the
# model space is only two-dimensional, in which case both K and Z should be
# set to zero; this third dimension is provided in anticipation of future
# support for 3D digital elevation models and vertical coordinate systems.
# Source: http://geotiff.maptools.org/spec/geotiff2.6.html#2.6.1
- id: 0x8482
name: ModelTiepointTag
type_name: DOUBLE
# This tag may be used to specify the transformation matrix between the
# raster space (and its dependent pixel-value space) and the (possibly 3D)
# model space.
# Source: http://geotiff.maptools.org/spec/geotiff2.6.html#2.6.1
- id: 0x85d8
name: ModelTransformationTag
type_name: DOUBLE
IFD/Exif/Iop:
- id: 0x0001
name: InteroperabilityIndex
type_name: ASCII
- id: 0x0002
name: InteroperabilityVersion
type_name: UNDEFINED
- id: 0x1000
name: RelatedImageFileFormat
type_name: ASCII
- id: 0x1001
name: RelatedImageWidth
type_name: LONG
- id: 0x1002
name: RelatedImageLength
type_name: LONG
`
)

188
vendor/github.com/dsoprea/go-exif/v3/testing_common.go generated vendored Normal file
View File

@ -0,0 +1,188 @@
package exif
import (
"path"
"reflect"
"testing"
"io/ioutil"
"github.com/dsoprea/go-logging"
"github.com/dsoprea/go-exif/v3/common"
)
var (
testExifData []byte
)
func getExifSimpleTestIb() *IfdBuilder {
defer func() {
if state := recover(); state != nil {
err := log.Wrap(state.(error))
log.Panic(err)
}
}()
im := exifcommon.NewIfdMapping()
err := exifcommon.LoadStandardIfds(im)
log.PanicIf(err)
ti := NewTagIndex()
ib := NewIfdBuilder(im, ti, exifcommon.IfdStandardIfdIdentity, exifcommon.TestDefaultByteOrder)
err = ib.AddStandard(0x000b, "asciivalue")
log.PanicIf(err)
err = ib.AddStandard(0x00ff, []uint16{0x1122})
log.PanicIf(err)
err = ib.AddStandard(0x0100, []uint32{0x33445566})
log.PanicIf(err)
err = ib.AddStandard(0x013e, []exifcommon.Rational{{Numerator: 0x11112222, Denominator: 0x33334444}})
log.PanicIf(err)
return ib
}
func getExifSimpleTestIbBytes() []byte {
defer func() {
if state := recover(); state != nil {
err := log.Wrap(state.(error))
log.Panic(err)
}
}()
im := exifcommon.NewIfdMapping()
err := exifcommon.LoadStandardIfds(im)
log.PanicIf(err)
ti := NewTagIndex()
ib := NewIfdBuilder(im, ti, exifcommon.IfdStandardIfdIdentity, exifcommon.TestDefaultByteOrder)
err = ib.AddStandard(0x000b, "asciivalue")
log.PanicIf(err)
err = ib.AddStandard(0x00ff, []uint16{0x1122})
log.PanicIf(err)
err = ib.AddStandard(0x0100, []uint32{0x33445566})
log.PanicIf(err)
err = ib.AddStandard(0x013e, []exifcommon.Rational{{Numerator: 0x11112222, Denominator: 0x33334444}})
log.PanicIf(err)
ibe := NewIfdByteEncoder()
exifData, err := ibe.EncodeToExif(ib)
log.PanicIf(err)
return exifData
}
func validateExifSimpleTestIb(exifData []byte, t *testing.T) {
defer func() {
if state := recover(); state != nil {
err := log.Wrap(state.(error))
log.Panic(err)
}
}()
im := exifcommon.NewIfdMapping()
err := exifcommon.LoadStandardIfds(im)
log.PanicIf(err)
ti := NewTagIndex()
eh, index, err := Collect(im, ti, exifData)
log.PanicIf(err)
if eh.ByteOrder != exifcommon.TestDefaultByteOrder {
t.Fatalf("EXIF byte-order is not correct: %v", eh.ByteOrder)
} else if eh.FirstIfdOffset != ExifDefaultFirstIfdOffset {
t.Fatalf("EXIF first IFD-offset not correct: (0x%02x)", eh.FirstIfdOffset)
}
if len(index.Ifds) != 1 {
t.Fatalf("There wasn't exactly one IFD decoded: (%d)", len(index.Ifds))
}
ifd := index.RootIfd
if ifd.ByteOrder() != exifcommon.TestDefaultByteOrder {
t.Fatalf("IFD byte-order not correct.")
} else if ifd.ifdIdentity.UnindexedString() != exifcommon.IfdStandardIfdIdentity.UnindexedString() {
t.Fatalf("IFD name not correct.")
} else if ifd.ifdIdentity.Index() != 0 {
t.Fatalf("IFD index not zero: (%d)", ifd.ifdIdentity.Index())
} else if ifd.Offset() != uint32(0x0008) {
t.Fatalf("IFD offset not correct.")
} else if len(ifd.Entries()) != 4 {
t.Fatalf("IFD number of entries not correct: (%d)", len(ifd.Entries()))
} else if ifd.nextIfdOffset != uint32(0) {
t.Fatalf("Next-IFD offset is non-zero.")
} else if ifd.nextIfd != nil {
t.Fatalf("Next-IFD pointer is non-nil.")
}
// Verify the values by using the actual, original types (this is awesome).
expected := []struct {
tagId uint16
value interface{}
}{
{tagId: 0x000b, value: "asciivalue"},
{tagId: 0x00ff, value: []uint16{0x1122}},
{tagId: 0x0100, value: []uint32{0x33445566}},
{tagId: 0x013e, value: []exifcommon.Rational{{Numerator: 0x11112222, Denominator: 0x33334444}}},
}
for i, ite := range ifd.Entries() {
if ite.TagId() != expected[i].tagId {
t.Fatalf("Tag-ID for entry (%d) not correct: (0x%02x) != (0x%02x)", i, ite.TagId(), expected[i].tagId)
}
value, err := ite.Value()
log.PanicIf(err)
if reflect.DeepEqual(value, expected[i].value) != true {
t.Fatalf("Value for entry (%d) not correct: [%v] != [%v]", i, value, expected[i].value)
}
}
}
func getTestImageFilepath() string {
assetsPath := exifcommon.GetTestAssetsPath()
testImageFilepath := path.Join(assetsPath, "NDM_8901.jpg")
return testImageFilepath
}
func getTestExifData() []byte {
if testExifData == nil {
assetsPath := exifcommon.GetTestAssetsPath()
filepath := path.Join(assetsPath, "NDM_8901.jpg.exif")
var err error
testExifData, err = ioutil.ReadFile(filepath)
log.PanicIf(err)
}
return testExifData
}
func getTestGpsImageFilepath() string {
assetsPath := exifcommon.GetTestAssetsPath()
testGpsImageFilepath := path.Join(assetsPath, "gps.jpg")
return testGpsImageFilepath
}
func getTestGeotiffFilepath() string {
assetsPath := exifcommon.GetTestAssetsPath()
testGeotiffFilepath := path.Join(assetsPath, "geotiff_example.tif")
return testGeotiffFilepath
}

View File

@ -0,0 +1,4 @@
## 0xa40b
The specification is not specific/clear enough to be handled. Without a working example ,we're deferring until some point in the future when either we or someone else has a better understanding.

View File

@ -0,0 +1,62 @@
package exifundefined
import (
"encoding/binary"
"github.com/dsoprea/go-logging"
"github.com/dsoprea/go-exif/v3/common"
)
// Encode encodes the given encodeable undefined value to bytes.
func Encode(value EncodeableValue, byteOrder binary.ByteOrder) (encoded []byte, unitCount uint32, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
encoderName := value.EncoderName()
encoder, found := encoders[encoderName]
if found == false {
log.Panicf("no encoder registered for type [%s]", encoderName)
}
encoded, unitCount, err = encoder.Encode(value, byteOrder)
log.PanicIf(err)
return encoded, unitCount, nil
}
// Decode constructs a value from raw encoded bytes
func Decode(valueContext *exifcommon.ValueContext) (value EncodeableValue, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
uth := UndefinedTagHandle{
IfdPath: valueContext.IfdPath(),
TagId: valueContext.TagId(),
}
decoder, found := decoders[uth]
if found == false {
// We have no choice but to return the error. We have no way of knowing how
// much data there is without already knowing what data-type this tag is.
return nil, exifcommon.ErrUnhandledUndefinedTypedTag
}
value, err = decoder.Decode(valueContext)
if err != nil {
if err == ErrUnparseableValue {
return nil, err
}
log.Panic(err)
}
return value, nil
}

View File

@ -0,0 +1,148 @@
package exifundefined
import (
"bytes"
"fmt"
"encoding/binary"
"github.com/dsoprea/go-logging"
"github.com/dsoprea/go-exif/v3/common"
)
type Tag8828Oecf struct {
Columns uint16
Rows uint16
ColumnNames []string
Values []exifcommon.SignedRational
}
func (oecf Tag8828Oecf) String() string {
return fmt.Sprintf("Tag8828Oecf<COLUMNS=(%d) ROWS=(%d)>", oecf.Columns, oecf.Rows)
}
func (oecf Tag8828Oecf) EncoderName() string {
return "Codec8828Oecf"
}
type Codec8828Oecf struct {
}
func (Codec8828Oecf) Encode(value interface{}, byteOrder binary.ByteOrder) (encoded []byte, unitCount uint32, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
// TODO(dustin): Add test
oecf, ok := value.(Tag8828Oecf)
if ok == false {
log.Panicf("can only encode a Tag8828Oecf")
}
b := new(bytes.Buffer)
err = binary.Write(b, byteOrder, oecf.Columns)
log.PanicIf(err)
err = binary.Write(b, byteOrder, oecf.Rows)
log.PanicIf(err)
for _, name := range oecf.ColumnNames {
_, err := b.Write([]byte(name))
log.PanicIf(err)
_, err = b.Write([]byte{0})
log.PanicIf(err)
}
ve := exifcommon.NewValueEncoder(byteOrder)
ed, err := ve.Encode(oecf.Values)
log.PanicIf(err)
_, err = b.Write(ed.Encoded)
log.PanicIf(err)
return b.Bytes(), uint32(b.Len()), nil
}
func (Codec8828Oecf) Decode(valueContext *exifcommon.ValueContext) (value EncodeableValue, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
// TODO(dustin): Add test using known good data.
valueContext.SetUndefinedValueType(exifcommon.TypeByte)
valueBytes, err := valueContext.ReadBytes()
log.PanicIf(err)
oecf := Tag8828Oecf{}
oecf.Columns = valueContext.ByteOrder().Uint16(valueBytes[0:2])
oecf.Rows = valueContext.ByteOrder().Uint16(valueBytes[2:4])
columnNames := make([]string, oecf.Columns)
// startAt is where the current column name starts.
startAt := 4
// offset is our current position.
offset := startAt
currentColumnNumber := uint16(0)
for currentColumnNumber < oecf.Columns {
if valueBytes[offset] == 0 {
columnName := string(valueBytes[startAt:offset])
if len(columnName) == 0 {
log.Panicf("SFR column (%d) has zero length", currentColumnNumber)
}
columnNames[currentColumnNumber] = columnName
currentColumnNumber++
offset++
startAt = offset
continue
}
offset++
}
oecf.ColumnNames = columnNames
rawRationalBytes := valueBytes[offset:]
rationalSize := exifcommon.TypeSignedRational.Size()
if len(rawRationalBytes)%rationalSize > 0 {
log.Panicf("OECF signed-rationals not aligned: (%d) %% (%d) > 0", len(rawRationalBytes), rationalSize)
}
rationalCount := len(rawRationalBytes) / rationalSize
parser := new(exifcommon.Parser)
byteOrder := valueContext.ByteOrder()
items, err := parser.ParseSignedRationals(rawRationalBytes, uint32(rationalCount), byteOrder)
log.PanicIf(err)
oecf.Values = items
return oecf, nil
}
func init() {
registerDecoder(
exifcommon.IfdExifStandardIfdIdentity.UnindexedString(),
0x8828,
Codec8828Oecf{})
}

View File

@ -0,0 +1,69 @@
package exifundefined
import (
"encoding/binary"
"github.com/dsoprea/go-logging"
"github.com/dsoprea/go-exif/v3/common"
)
type Tag9000ExifVersion struct {
ExifVersion string
}
func (Tag9000ExifVersion) EncoderName() string {
return "Codec9000ExifVersion"
}
func (ev Tag9000ExifVersion) String() string {
return ev.ExifVersion
}
type Codec9000ExifVersion struct {
}
func (Codec9000ExifVersion) Encode(value interface{}, byteOrder binary.ByteOrder) (encoded []byte, unitCount uint32, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
s, ok := value.(Tag9000ExifVersion)
if ok == false {
log.Panicf("can only encode a Tag9000ExifVersion")
}
return []byte(s.ExifVersion), uint32(len(s.ExifVersion)), nil
}
func (Codec9000ExifVersion) Decode(valueContext *exifcommon.ValueContext) (value EncodeableValue, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
valueContext.SetUndefinedValueType(exifcommon.TypeAsciiNoNul)
valueString, err := valueContext.ReadAsciiNoNul()
log.PanicIf(err)
ev := Tag9000ExifVersion{
ExifVersion: valueString,
}
return ev, nil
}
func init() {
registerEncoder(
Tag9000ExifVersion{},
Codec9000ExifVersion{})
registerDecoder(
exifcommon.IfdExifStandardIfdIdentity.UnindexedString(),
0x9000,
Codec9000ExifVersion{})
}

View File

@ -0,0 +1,124 @@
package exifundefined
import (
"bytes"
"fmt"
"encoding/binary"
"github.com/dsoprea/go-logging"
"github.com/dsoprea/go-exif/v3/common"
)
const (
TagUndefinedType_9101_ComponentsConfiguration_Channel_Y = 0x1
TagUndefinedType_9101_ComponentsConfiguration_Channel_Cb = 0x2
TagUndefinedType_9101_ComponentsConfiguration_Channel_Cr = 0x3
TagUndefinedType_9101_ComponentsConfiguration_Channel_R = 0x4
TagUndefinedType_9101_ComponentsConfiguration_Channel_G = 0x5
TagUndefinedType_9101_ComponentsConfiguration_Channel_B = 0x6
)
const (
TagUndefinedType_9101_ComponentsConfiguration_OTHER = iota
TagUndefinedType_9101_ComponentsConfiguration_RGB = iota
TagUndefinedType_9101_ComponentsConfiguration_YCBCR = iota
)
var (
TagUndefinedType_9101_ComponentsConfiguration_Names = map[int]string{
TagUndefinedType_9101_ComponentsConfiguration_OTHER: "OTHER",
TagUndefinedType_9101_ComponentsConfiguration_RGB: "RGB",
TagUndefinedType_9101_ComponentsConfiguration_YCBCR: "YCBCR",
}
TagUndefinedType_9101_ComponentsConfiguration_Configurations = map[int][]byte{
TagUndefinedType_9101_ComponentsConfiguration_RGB: {
TagUndefinedType_9101_ComponentsConfiguration_Channel_R,
TagUndefinedType_9101_ComponentsConfiguration_Channel_G,
TagUndefinedType_9101_ComponentsConfiguration_Channel_B,
0,
},
TagUndefinedType_9101_ComponentsConfiguration_YCBCR: {
TagUndefinedType_9101_ComponentsConfiguration_Channel_Y,
TagUndefinedType_9101_ComponentsConfiguration_Channel_Cb,
TagUndefinedType_9101_ComponentsConfiguration_Channel_Cr,
0,
},
}
)
type TagExif9101ComponentsConfiguration struct {
ConfigurationId int
ConfigurationBytes []byte
}
func (TagExif9101ComponentsConfiguration) EncoderName() string {
return "CodecExif9101ComponentsConfiguration"
}
func (cc TagExif9101ComponentsConfiguration) String() string {
return fmt.Sprintf("Exif9101ComponentsConfiguration<ID=[%s] BYTES=%v>", TagUndefinedType_9101_ComponentsConfiguration_Names[cc.ConfigurationId], cc.ConfigurationBytes)
}
type CodecExif9101ComponentsConfiguration struct {
}
func (CodecExif9101ComponentsConfiguration) Encode(value interface{}, byteOrder binary.ByteOrder) (encoded []byte, unitCount uint32, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
cc, ok := value.(TagExif9101ComponentsConfiguration)
if ok == false {
log.Panicf("can only encode a TagExif9101ComponentsConfiguration")
}
return cc.ConfigurationBytes, uint32(len(cc.ConfigurationBytes)), nil
}
func (CodecExif9101ComponentsConfiguration) Decode(valueContext *exifcommon.ValueContext) (value EncodeableValue, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
valueContext.SetUndefinedValueType(exifcommon.TypeByte)
valueBytes, err := valueContext.ReadBytes()
log.PanicIf(err)
for configurationId, configurationBytes := range TagUndefinedType_9101_ComponentsConfiguration_Configurations {
if bytes.Equal(configurationBytes, valueBytes) == true {
cc := TagExif9101ComponentsConfiguration{
ConfigurationId: configurationId,
ConfigurationBytes: valueBytes,
}
return cc, nil
}
}
cc := TagExif9101ComponentsConfiguration{
ConfigurationId: TagUndefinedType_9101_ComponentsConfiguration_OTHER,
ConfigurationBytes: valueBytes,
}
return cc, nil
}
func init() {
registerEncoder(
TagExif9101ComponentsConfiguration{},
CodecExif9101ComponentsConfiguration{})
registerDecoder(
exifcommon.IfdExifStandardIfdIdentity.UnindexedString(),
0x9101,
CodecExif9101ComponentsConfiguration{})
}

View File

@ -0,0 +1,114 @@
package exifundefined
import (
"fmt"
"strings"
"crypto/sha1"
"encoding/binary"
"github.com/dsoprea/go-logging"
"github.com/dsoprea/go-exif/v3/common"
)
type Tag927CMakerNote struct {
MakerNoteType []byte
MakerNoteBytes []byte
}
func (Tag927CMakerNote) EncoderName() string {
return "Codec927CMakerNote"
}
func (mn Tag927CMakerNote) String() string {
parts := make([]string, len(mn.MakerNoteType))
for i, c := range mn.MakerNoteType {
parts[i] = fmt.Sprintf("%02x", c)
}
h := sha1.New()
_, err := h.Write(mn.MakerNoteBytes)
log.PanicIf(err)
digest := h.Sum(nil)
return fmt.Sprintf("MakerNote<TYPE-ID=[%s] LEN=(%d) SHA1=[%020x]>", strings.Join(parts, " "), len(mn.MakerNoteBytes), digest)
}
type Codec927CMakerNote struct {
}
func (Codec927CMakerNote) Encode(value interface{}, byteOrder binary.ByteOrder) (encoded []byte, unitCount uint32, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
mn, ok := value.(Tag927CMakerNote)
if ok == false {
log.Panicf("can only encode a Tag927CMakerNote")
}
// TODO(dustin): Confirm this size against the specification.
return mn.MakerNoteBytes, uint32(len(mn.MakerNoteBytes)), nil
}
func (Codec927CMakerNote) Decode(valueContext *exifcommon.ValueContext) (value EncodeableValue, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
// MakerNote
// TODO(dustin): !! This is the Wild Wild West. This very well might be a child IFD, but any and all OEM's define their own formats. If we're going to be writing changes and this is complete EXIF (which may not have the first eight bytes), it might be fine. However, if these are just IFDs they'll be relative to the main EXIF, this will invalidate the MakerNote data for IFDs and any other implementations that use offsets unless we can interpret them all. It be best to return to this later and just exclude this from being written for now, though means a loss of a wealth of image metadata.
// -> We can also just blindly try to interpret as an IFD and just validate that it's looks good (maybe it will even have a 'next ifd' pointer that we can validate is 0x0).
valueContext.SetUndefinedValueType(exifcommon.TypeByte)
valueBytes, err := valueContext.ReadBytes()
log.PanicIf(err)
// TODO(dustin): Doesn't work, but here as an example.
// ie := NewIfdEnumerate(valueBytes, byteOrder)
// // TODO(dustin): !! Validate types (might have proprietary types, but it might be worth splitting the list between valid and not valid; maybe fail if a certain proportion are invalid, or maybe aren't less then a certain small integer)?
// ii, err := ie.Collect(0x0)
// for _, entry := range ii.RootIfd.Entries {
// fmt.Printf("ENTRY: 0x%02x %d\n", entry.TagId, entry.TagType)
// }
var makerNoteType []byte
if len(valueBytes) >= 20 {
makerNoteType = valueBytes[:20]
} else {
makerNoteType = valueBytes
}
mn := Tag927CMakerNote{
MakerNoteType: makerNoteType,
// MakerNoteBytes has the whole length of bytes. There's always
// the chance that the first 20 bytes includes actual data.
MakerNoteBytes: valueBytes,
}
return mn, nil
}
func init() {
registerEncoder(
Tag927CMakerNote{},
Codec927CMakerNote{})
registerDecoder(
exifcommon.IfdExifStandardIfdIdentity.UnindexedString(),
0x927c,
Codec927CMakerNote{})
}

View File

@ -0,0 +1,142 @@
package exifundefined
import (
"bytes"
"fmt"
"encoding/binary"
"github.com/dsoprea/go-logging"
"github.com/dsoprea/go-exif/v3/common"
)
var (
exif9286Logger = log.NewLogger("exifundefined.exif_9286_user_comment")
)
const (
TagUndefinedType_9286_UserComment_Encoding_ASCII = iota
TagUndefinedType_9286_UserComment_Encoding_JIS = iota
TagUndefinedType_9286_UserComment_Encoding_UNICODE = iota
TagUndefinedType_9286_UserComment_Encoding_UNDEFINED = iota
)
var (
TagUndefinedType_9286_UserComment_Encoding_Names = map[int]string{
TagUndefinedType_9286_UserComment_Encoding_ASCII: "ASCII",
TagUndefinedType_9286_UserComment_Encoding_JIS: "JIS",
TagUndefinedType_9286_UserComment_Encoding_UNICODE: "UNICODE",
TagUndefinedType_9286_UserComment_Encoding_UNDEFINED: "UNDEFINED",
}
TagUndefinedType_9286_UserComment_Encodings = map[int][]byte{
TagUndefinedType_9286_UserComment_Encoding_ASCII: {'A', 'S', 'C', 'I', 'I', 0, 0, 0},
TagUndefinedType_9286_UserComment_Encoding_JIS: {'J', 'I', 'S', 0, 0, 0, 0, 0},
TagUndefinedType_9286_UserComment_Encoding_UNICODE: {'U', 'n', 'i', 'c', 'o', 'd', 'e', 0},
TagUndefinedType_9286_UserComment_Encoding_UNDEFINED: {0, 0, 0, 0, 0, 0, 0, 0},
}
)
type Tag9286UserComment struct {
EncodingType int
EncodingBytes []byte
}
func (Tag9286UserComment) EncoderName() string {
return "Codec9286UserComment"
}
func (uc Tag9286UserComment) String() string {
var valuePhrase string
if uc.EncodingType == TagUndefinedType_9286_UserComment_Encoding_ASCII {
return fmt.Sprintf("[ASCII] %s", string(uc.EncodingBytes))
} else {
if len(uc.EncodingBytes) <= 8 {
valuePhrase = fmt.Sprintf("%v", uc.EncodingBytes)
} else {
valuePhrase = fmt.Sprintf("%v...", uc.EncodingBytes[:8])
}
}
return fmt.Sprintf("UserComment<SIZE=(%d) ENCODING=[%s] V=%v LEN=(%d)>", len(uc.EncodingBytes), TagUndefinedType_9286_UserComment_Encoding_Names[uc.EncodingType], valuePhrase, len(uc.EncodingBytes))
}
type Codec9286UserComment struct {
}
func (Codec9286UserComment) Encode(value interface{}, byteOrder binary.ByteOrder) (encoded []byte, unitCount uint32, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
uc, ok := value.(Tag9286UserComment)
if ok == false {
log.Panicf("can only encode a Tag9286UserComment")
}
encodingTypeBytes, found := TagUndefinedType_9286_UserComment_Encodings[uc.EncodingType]
if found == false {
log.Panicf("encoding-type not valid for unknown-type tag 9286 (UserComment): (%d)", uc.EncodingType)
}
encoded = make([]byte, len(uc.EncodingBytes)+8)
copy(encoded[:8], encodingTypeBytes)
copy(encoded[8:], uc.EncodingBytes)
// TODO(dustin): Confirm this size against the specification.
return encoded, uint32(len(encoded)), nil
}
func (Codec9286UserComment) Decode(valueContext *exifcommon.ValueContext) (value EncodeableValue, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
valueContext.SetUndefinedValueType(exifcommon.TypeByte)
valueBytes, err := valueContext.ReadBytes()
log.PanicIf(err)
if len(valueBytes) < 8 {
return nil, ErrUnparseableValue
}
unknownUc := Tag9286UserComment{
EncodingType: TagUndefinedType_9286_UserComment_Encoding_UNDEFINED,
EncodingBytes: []byte{},
}
encoding := valueBytes[:8]
for encodingIndex, encodingBytes := range TagUndefinedType_9286_UserComment_Encodings {
if bytes.Compare(encoding, encodingBytes) == 0 {
uc := Tag9286UserComment{
EncodingType: encodingIndex,
EncodingBytes: valueBytes[8:],
}
return uc, nil
}
}
exif9286Logger.Warningf(nil, "User-comment encoding not valid. Returning 'unknown' type (the default).")
return unknownUc, nil
}
func init() {
registerEncoder(
Tag9286UserComment{},
Codec9286UserComment{})
registerDecoder(
exifcommon.IfdExifStandardIfdIdentity.UnindexedString(),
0x9286,
Codec9286UserComment{})
}

View File

@ -0,0 +1,69 @@
package exifundefined
import (
"encoding/binary"
"github.com/dsoprea/go-logging"
"github.com/dsoprea/go-exif/v3/common"
)
type TagA000FlashpixVersion struct {
FlashpixVersion string
}
func (TagA000FlashpixVersion) EncoderName() string {
return "CodecA000FlashpixVersion"
}
func (fv TagA000FlashpixVersion) String() string {
return fv.FlashpixVersion
}
type CodecA000FlashpixVersion struct {
}
func (CodecA000FlashpixVersion) Encode(value interface{}, byteOrder binary.ByteOrder) (encoded []byte, unitCount uint32, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
s, ok := value.(TagA000FlashpixVersion)
if ok == false {
log.Panicf("can only encode a TagA000FlashpixVersion")
}
return []byte(s.FlashpixVersion), uint32(len(s.FlashpixVersion)), nil
}
func (CodecA000FlashpixVersion) Decode(valueContext *exifcommon.ValueContext) (value EncodeableValue, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
valueContext.SetUndefinedValueType(exifcommon.TypeAsciiNoNul)
valueString, err := valueContext.ReadAsciiNoNul()
log.PanicIf(err)
fv := TagA000FlashpixVersion{
FlashpixVersion: valueString,
}
return fv, nil
}
func init() {
registerEncoder(
TagA000FlashpixVersion{},
CodecA000FlashpixVersion{})
registerDecoder(
exifcommon.IfdExifStandardIfdIdentity.UnindexedString(),
0xa000,
CodecA000FlashpixVersion{})
}

View File

@ -0,0 +1,160 @@
package exifundefined
import (
"bytes"
"fmt"
"encoding/binary"
"github.com/dsoprea/go-logging"
"github.com/dsoprea/go-exif/v3/common"
)
type TagA20CSpatialFrequencyResponse struct {
Columns uint16
Rows uint16
ColumnNames []string
Values []exifcommon.Rational
}
func (TagA20CSpatialFrequencyResponse) EncoderName() string {
return "CodecA20CSpatialFrequencyResponse"
}
func (sfr TagA20CSpatialFrequencyResponse) String() string {
return fmt.Sprintf("CodecA20CSpatialFrequencyResponse<COLUMNS=(%d) ROWS=(%d)>", sfr.Columns, sfr.Rows)
}
type CodecA20CSpatialFrequencyResponse struct {
}
func (CodecA20CSpatialFrequencyResponse) Encode(value interface{}, byteOrder binary.ByteOrder) (encoded []byte, unitCount uint32, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
// TODO(dustin): Add test.
sfr, ok := value.(TagA20CSpatialFrequencyResponse)
if ok == false {
log.Panicf("can only encode a TagA20CSpatialFrequencyResponse")
}
b := new(bytes.Buffer)
err = binary.Write(b, byteOrder, sfr.Columns)
log.PanicIf(err)
err = binary.Write(b, byteOrder, sfr.Rows)
log.PanicIf(err)
// Write columns.
for _, name := range sfr.ColumnNames {
_, err := b.WriteString(name)
log.PanicIf(err)
err = b.WriteByte(0)
log.PanicIf(err)
}
// Write values.
ve := exifcommon.NewValueEncoder(byteOrder)
ed, err := ve.Encode(sfr.Values)
log.PanicIf(err)
_, err = b.Write(ed.Encoded)
log.PanicIf(err)
encoded = b.Bytes()
// TODO(dustin): Confirm this size against the specification.
return encoded, uint32(len(encoded)), nil
}
func (CodecA20CSpatialFrequencyResponse) Decode(valueContext *exifcommon.ValueContext) (value EncodeableValue, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
// TODO(dustin): Add test using known good data.
byteOrder := valueContext.ByteOrder()
valueContext.SetUndefinedValueType(exifcommon.TypeByte)
valueBytes, err := valueContext.ReadBytes()
log.PanicIf(err)
sfr := TagA20CSpatialFrequencyResponse{}
sfr.Columns = byteOrder.Uint16(valueBytes[0:2])
sfr.Rows = byteOrder.Uint16(valueBytes[2:4])
columnNames := make([]string, sfr.Columns)
// startAt is where the current column name starts.
startAt := 4
// offset is our current position.
offset := 4
currentColumnNumber := uint16(0)
for currentColumnNumber < sfr.Columns {
if valueBytes[offset] == 0 {
columnName := string(valueBytes[startAt:offset])
if len(columnName) == 0 {
log.Panicf("SFR column (%d) has zero length", currentColumnNumber)
}
columnNames[currentColumnNumber] = columnName
currentColumnNumber++
offset++
startAt = offset
continue
}
offset++
}
sfr.ColumnNames = columnNames
rawRationalBytes := valueBytes[offset:]
rationalSize := exifcommon.TypeRational.Size()
if len(rawRationalBytes)%rationalSize > 0 {
log.Panicf("SFR rationals not aligned: (%d) %% (%d) > 0", len(rawRationalBytes), rationalSize)
}
rationalCount := len(rawRationalBytes) / rationalSize
parser := new(exifcommon.Parser)
items, err := parser.ParseRationals(rawRationalBytes, uint32(rationalCount), byteOrder)
log.PanicIf(err)
sfr.Values = items
return sfr, nil
}
func init() {
registerEncoder(
TagA20CSpatialFrequencyResponse{},
CodecA20CSpatialFrequencyResponse{})
registerDecoder(
exifcommon.IfdExifStandardIfdIdentity.UnindexedString(),
0xa20c,
CodecA20CSpatialFrequencyResponse{})
}

View File

@ -0,0 +1,79 @@
package exifundefined
import (
"fmt"
"encoding/binary"
"github.com/dsoprea/go-logging"
"github.com/dsoprea/go-exif/v3/common"
)
type TagExifA300FileSource uint32
func (TagExifA300FileSource) EncoderName() string {
return "CodecExifA300FileSource"
}
func (af TagExifA300FileSource) String() string {
return fmt.Sprintf("0x%08x", uint32(af))
}
const (
TagUndefinedType_A300_SceneType_Others TagExifA300FileSource = 0
TagUndefinedType_A300_SceneType_ScannerOfTransparentType TagExifA300FileSource = 1
TagUndefinedType_A300_SceneType_ScannerOfReflexType TagExifA300FileSource = 2
TagUndefinedType_A300_SceneType_Dsc TagExifA300FileSource = 3
)
type CodecExifA300FileSource struct {
}
func (CodecExifA300FileSource) Encode(value interface{}, byteOrder binary.ByteOrder) (encoded []byte, unitCount uint32, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
st, ok := value.(TagExifA300FileSource)
if ok == false {
log.Panicf("can only encode a TagExifA300FileSource")
}
ve := exifcommon.NewValueEncoder(byteOrder)
ed, err := ve.Encode([]uint32{uint32(st)})
log.PanicIf(err)
// TODO(dustin): Confirm this size against the specification. It's non-specific about what type it is, but it looks to be no more than a single integer scalar. So, we're assuming it's a LONG.
return ed.Encoded, 1, nil
}
func (CodecExifA300FileSource) Decode(valueContext *exifcommon.ValueContext) (value EncodeableValue, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
valueContext.SetUndefinedValueType(exifcommon.TypeLong)
valueLongs, err := valueContext.ReadLongs()
log.PanicIf(err)
return TagExifA300FileSource(valueLongs[0]), nil
}
func init() {
registerEncoder(
TagExifA300FileSource(0),
CodecExifA300FileSource{})
registerDecoder(
exifcommon.IfdExifStandardIfdIdentity.UnindexedString(),
0xa300,
CodecExifA300FileSource{})
}

View File

@ -0,0 +1,76 @@
package exifundefined
import (
"fmt"
"encoding/binary"
"github.com/dsoprea/go-logging"
"github.com/dsoprea/go-exif/v3/common"
)
type TagExifA301SceneType uint32
func (TagExifA301SceneType) EncoderName() string {
return "CodecExifA301SceneType"
}
func (st TagExifA301SceneType) String() string {
return fmt.Sprintf("0x%08x", uint32(st))
}
const (
TagUndefinedType_A301_SceneType_DirectlyPhotographedImage TagExifA301SceneType = 1
)
type CodecExifA301SceneType struct {
}
func (CodecExifA301SceneType) Encode(value interface{}, byteOrder binary.ByteOrder) (encoded []byte, unitCount uint32, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
st, ok := value.(TagExifA301SceneType)
if ok == false {
log.Panicf("can only encode a TagExif9101ComponentsConfiguration")
}
ve := exifcommon.NewValueEncoder(byteOrder)
ed, err := ve.Encode([]uint32{uint32(st)})
log.PanicIf(err)
// TODO(dustin): Confirm this size against the specification. It's non-specific about what type it is, but it looks to be no more than a single integer scalar. So, we're assuming it's a LONG.
return ed.Encoded, uint32(int(ed.UnitCount)), nil
}
func (CodecExifA301SceneType) Decode(valueContext *exifcommon.ValueContext) (value EncodeableValue, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
valueContext.SetUndefinedValueType(exifcommon.TypeLong)
valueLongs, err := valueContext.ReadLongs()
log.PanicIf(err)
return TagExifA301SceneType(valueLongs[0]), nil
}
func init() {
registerEncoder(
TagExifA301SceneType(0),
CodecExifA301SceneType{})
registerDecoder(
exifcommon.IfdExifStandardIfdIdentity.UnindexedString(),
0xa301,
CodecExifA301SceneType{})
}

View File

@ -0,0 +1,97 @@
package exifundefined
import (
"bytes"
"fmt"
"encoding/binary"
"github.com/dsoprea/go-logging"
"github.com/dsoprea/go-exif/v3/common"
)
type TagA302CfaPattern struct {
HorizontalRepeat uint16
VerticalRepeat uint16
CfaValue []byte
}
func (TagA302CfaPattern) EncoderName() string {
return "CodecA302CfaPattern"
}
func (cp TagA302CfaPattern) String() string {
return fmt.Sprintf("TagA302CfaPattern<HORZ-REPEAT=(%d) VERT-REPEAT=(%d) CFA-VALUE=(%d)>", cp.HorizontalRepeat, cp.VerticalRepeat, len(cp.CfaValue))
}
type CodecA302CfaPattern struct {
}
func (CodecA302CfaPattern) Encode(value interface{}, byteOrder binary.ByteOrder) (encoded []byte, unitCount uint32, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
// TODO(dustin): Add test.
cp, ok := value.(TagA302CfaPattern)
if ok == false {
log.Panicf("can only encode a TagA302CfaPattern")
}
b := new(bytes.Buffer)
err = binary.Write(b, byteOrder, cp.HorizontalRepeat)
log.PanicIf(err)
err = binary.Write(b, byteOrder, cp.VerticalRepeat)
log.PanicIf(err)
_, err = b.Write(cp.CfaValue)
log.PanicIf(err)
encoded = b.Bytes()
// TODO(dustin): Confirm this size against the specification.
return encoded, uint32(len(encoded)), nil
}
func (CodecA302CfaPattern) Decode(valueContext *exifcommon.ValueContext) (value EncodeableValue, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
// TODO(dustin): Add test using known good data.
valueContext.SetUndefinedValueType(exifcommon.TypeByte)
valueBytes, err := valueContext.ReadBytes()
log.PanicIf(err)
cp := TagA302CfaPattern{}
cp.HorizontalRepeat = valueContext.ByteOrder().Uint16(valueBytes[0:2])
cp.VerticalRepeat = valueContext.ByteOrder().Uint16(valueBytes[2:4])
expectedLength := int(cp.HorizontalRepeat * cp.VerticalRepeat)
cp.CfaValue = valueBytes[4 : 4+expectedLength]
return cp, nil
}
func init() {
registerEncoder(
TagA302CfaPattern{},
CodecA302CfaPattern{})
registerDecoder(
exifcommon.IfdExifStandardIfdIdentity.UnindexedString(),
0xa302,
CodecA302CfaPattern{})
}

View File

@ -0,0 +1,69 @@
package exifundefined
import (
"encoding/binary"
"github.com/dsoprea/go-logging"
"github.com/dsoprea/go-exif/v3/common"
)
type Tag0002InteropVersion struct {
InteropVersion string
}
func (Tag0002InteropVersion) EncoderName() string {
return "Codec0002InteropVersion"
}
func (iv Tag0002InteropVersion) String() string {
return iv.InteropVersion
}
type Codec0002InteropVersion struct {
}
func (Codec0002InteropVersion) Encode(value interface{}, byteOrder binary.ByteOrder) (encoded []byte, unitCount uint32, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
s, ok := value.(Tag0002InteropVersion)
if ok == false {
log.Panicf("can only encode a Tag0002InteropVersion")
}
return []byte(s.InteropVersion), uint32(len(s.InteropVersion)), nil
}
func (Codec0002InteropVersion) Decode(valueContext *exifcommon.ValueContext) (value EncodeableValue, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
valueContext.SetUndefinedValueType(exifcommon.TypeAsciiNoNul)
valueString, err := valueContext.ReadAsciiNoNul()
log.PanicIf(err)
iv := Tag0002InteropVersion{
InteropVersion: valueString,
}
return iv, nil
}
func init() {
registerEncoder(
Tag0002InteropVersion{},
Codec0002InteropVersion{})
registerDecoder(
exifcommon.IfdExifIopStandardIfdIdentity.UnindexedString(),
0x0002,
Codec0002InteropVersion{})
}

View File

@ -0,0 +1,65 @@
package exifundefined
import (
"encoding/binary"
"github.com/dsoprea/go-logging"
"github.com/dsoprea/go-exif/v3/common"
)
type Tag001BGPSProcessingMethod struct {
string
}
func (Tag001BGPSProcessingMethod) EncoderName() string {
return "Codec001BGPSProcessingMethod"
}
func (gpm Tag001BGPSProcessingMethod) String() string {
return gpm.string
}
type Codec001BGPSProcessingMethod struct {
}
func (Codec001BGPSProcessingMethod) Encode(value interface{}, byteOrder binary.ByteOrder) (encoded []byte, unitCount uint32, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
s, ok := value.(Tag001BGPSProcessingMethod)
if ok == false {
log.Panicf("can only encode a Tag001BGPSProcessingMethod")
}
return []byte(s.string), uint32(len(s.string)), nil
}
func (Codec001BGPSProcessingMethod) Decode(valueContext *exifcommon.ValueContext) (value EncodeableValue, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
valueContext.SetUndefinedValueType(exifcommon.TypeAsciiNoNul)
valueString, err := valueContext.ReadAsciiNoNul()
log.PanicIf(err)
return Tag001BGPSProcessingMethod{valueString}, nil
}
func init() {
registerEncoder(
Tag001BGPSProcessingMethod{},
Codec001BGPSProcessingMethod{})
registerDecoder(
exifcommon.IfdGpsInfoStandardIfdIdentity.UnindexedString(),
0x001b,
Codec001BGPSProcessingMethod{})
}

View File

@ -0,0 +1,65 @@
package exifundefined
import (
"encoding/binary"
"github.com/dsoprea/go-logging"
"github.com/dsoprea/go-exif/v3/common"
)
type Tag001CGPSAreaInformation struct {
string
}
func (Tag001CGPSAreaInformation) EncoderName() string {
return "Codec001CGPSAreaInformation"
}
func (gai Tag001CGPSAreaInformation) String() string {
return gai.string
}
type Codec001CGPSAreaInformation struct {
}
func (Codec001CGPSAreaInformation) Encode(value interface{}, byteOrder binary.ByteOrder) (encoded []byte, unitCount uint32, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
s, ok := value.(Tag001CGPSAreaInformation)
if ok == false {
log.Panicf("can only encode a Tag001CGPSAreaInformation")
}
return []byte(s.string), uint32(len(s.string)), nil
}
func (Codec001CGPSAreaInformation) Decode(valueContext *exifcommon.ValueContext) (value EncodeableValue, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
valueContext.SetUndefinedValueType(exifcommon.TypeAsciiNoNul)
valueString, err := valueContext.ReadAsciiNoNul()
log.PanicIf(err)
return Tag001CGPSAreaInformation{valueString}, nil
}
func init() {
registerEncoder(
Tag001CGPSAreaInformation{},
Codec001CGPSAreaInformation{})
registerDecoder(
exifcommon.IfdGpsInfoStandardIfdIdentity.UnindexedString(),
0x001c,
Codec001CGPSAreaInformation{})
}

View File

@ -0,0 +1,42 @@
package exifundefined
import (
"github.com/dsoprea/go-logging"
)
// UndefinedTagHandle defines one undefined-type tag with a corresponding
// decoder.
type UndefinedTagHandle struct {
IfdPath string
TagId uint16
}
func registerEncoder(entity EncodeableValue, encoder UndefinedValueEncoder) {
typeName := entity.EncoderName()
_, found := encoders[typeName]
if found == true {
log.Panicf("encoder already registered: %v", typeName)
}
encoders[typeName] = encoder
}
func registerDecoder(ifdPath string, tagId uint16, decoder UndefinedValueDecoder) {
uth := UndefinedTagHandle{
IfdPath: ifdPath,
TagId: tagId,
}
_, found := decoders[uth]
if found == true {
log.Panicf("decoder already registered: %v", uth)
}
decoders[uth] = decoder
}
var (
encoders = make(map[string]UndefinedValueEncoder)
decoders = make(map[UndefinedTagHandle]UndefinedValueDecoder)
)

44
vendor/github.com/dsoprea/go-exif/v3/undefined/type.go generated vendored Normal file
View File

@ -0,0 +1,44 @@
package exifundefined
import (
"errors"
"encoding/binary"
"github.com/dsoprea/go-exif/v3/common"
)
const (
// UnparseableUnknownTagValuePlaceholder is the string to use for an unknown
// undefined tag.
UnparseableUnknownTagValuePlaceholder = "!UNKNOWN"
// UnparseableHandledTagValuePlaceholder is the string to use for a known
// value that is not parseable.
UnparseableHandledTagValuePlaceholder = "!MALFORMED"
)
var (
// ErrUnparseableValue is the error for a value that we should have been
// able to parse but were not able to.
ErrUnparseableValue = errors.New("unparseable undefined tag")
)
// UndefinedValueEncoder knows how to encode an undefined-type tag's value to
// bytes.
type UndefinedValueEncoder interface {
Encode(value interface{}, byteOrder binary.ByteOrder) (encoded []byte, unitCount uint32, err error)
}
// EncodeableValue wraps a value with the information that will be needed to re-
// encode it later.
type EncodeableValue interface {
EncoderName() string
String() string
}
// UndefinedValueDecoder knows how to decode an undefined-type tag's value from
// bytes.
type UndefinedValueDecoder interface {
Decode(valueContext *exifcommon.ValueContext) (value EncodeableValue, err error)
}

237
vendor/github.com/dsoprea/go-exif/v3/utility.go generated vendored Normal file
View File

@ -0,0 +1,237 @@
package exif
import (
"fmt"
"io"
"math"
"github.com/dsoprea/go-logging"
"github.com/dsoprea/go-utility/v2/filesystem"
"github.com/dsoprea/go-exif/v3/common"
"github.com/dsoprea/go-exif/v3/undefined"
)
var (
utilityLogger = log.NewLogger("exif.utility")
)
// ExifTag is one simple representation of a tag in a flat list of all of them.
type ExifTag struct {
// IfdPath is the fully-qualified IFD path (even though it is not named as
// such).
IfdPath string `json:"ifd_path"`
// TagId is the tag-ID.
TagId uint16 `json:"id"`
// TagName is the tag-name. This is never empty.
TagName string `json:"name"`
// UnitCount is the recorded number of units constution of the value.
UnitCount uint32 `json:"unit_count"`
// TagTypeId is the type-ID.
TagTypeId exifcommon.TagTypePrimitive `json:"type_id"`
// TagTypeName is the type name.
TagTypeName string `json:"type_name"`
// Value is the decoded value.
Value interface{} `json:"value"`
// ValueBytes is the raw, encoded value.
ValueBytes []byte `json:"value_bytes"`
// Formatted is the human representation of the first value (tag values are
// always an array).
FormattedFirst string `json:"formatted_first"`
// Formatted is the human representation of the complete value.
Formatted string `json:"formatted"`
// ChildIfdPath is the name of the child IFD this tag represents (if it
// represents any). Otherwise, this is empty.
ChildIfdPath string `json:"child_ifd_path"`
}
// String returns a string representation.
func (et ExifTag) String() string {
return fmt.Sprintf(
"ExifTag<"+
"IFD-PATH=[%s] "+
"TAG-ID=(0x%02x) "+
"TAG-NAME=[%s] "+
"TAG-TYPE=[%s] "+
"VALUE=[%v] "+
"VALUE-BYTES=(%d) "+
"CHILD-IFD-PATH=[%s]",
et.IfdPath, et.TagId, et.TagName, et.TagTypeName, et.FormattedFirst,
len(et.ValueBytes), et.ChildIfdPath)
}
// GetFlatExifData returns a simple, flat representation of all tags.
func GetFlatExifData(exifData []byte, so *ScanOptions) (exifTags []ExifTag, med *MiscellaneousExifData, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
sb := rifs.NewSeekableBufferWithBytes(exifData)
exifTags, med, err = getFlatExifDataUniversalSearchWithReadSeeker(sb, so, false)
log.PanicIf(err)
return exifTags, med, nil
}
// RELEASE(dustin): GetFlatExifDataUniversalSearch is a kludge to allow univeral tag searching in a backwards-compatible manner. For the next release, undo this and simply add the flag to GetFlatExifData.
// GetFlatExifDataUniversalSearch returns a simple, flat representation of all
// tags.
func GetFlatExifDataUniversalSearch(exifData []byte, so *ScanOptions, doUniversalSearch bool) (exifTags []ExifTag, med *MiscellaneousExifData, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
sb := rifs.NewSeekableBufferWithBytes(exifData)
exifTags, med, err = getFlatExifDataUniversalSearchWithReadSeeker(sb, so, doUniversalSearch)
log.PanicIf(err)
return exifTags, med, nil
}
// RELEASE(dustin): GetFlatExifDataUniversalSearchWithReadSeeker is a kludge to allow using a ReadSeeker in a backwards-compatible manner. For the next release, drop this and refactor GetFlatExifDataUniversalSearch to take a ReadSeeker.
// GetFlatExifDataUniversalSearchWithReadSeeker returns a simple, flat
// representation of all tags given a ReadSeeker.
func GetFlatExifDataUniversalSearchWithReadSeeker(rs io.ReadSeeker, so *ScanOptions, doUniversalSearch bool) (exifTags []ExifTag, med *MiscellaneousExifData, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
exifTags, med, err = getFlatExifDataUniversalSearchWithReadSeeker(rs, so, doUniversalSearch)
log.PanicIf(err)
return exifTags, med, nil
}
// getFlatExifDataUniversalSearchWithReadSeeker returns a simple, flat
// representation of all tags given a ReadSeeker.
func getFlatExifDataUniversalSearchWithReadSeeker(rs io.ReadSeeker, so *ScanOptions, doUniversalSearch bool) (exifTags []ExifTag, med *MiscellaneousExifData, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
headerData := make([]byte, ExifSignatureLength)
if _, err = io.ReadFull(rs, headerData); err != nil {
if err == io.EOF {
return nil, nil, err
}
log.Panic(err)
}
eh, err := ParseExifHeader(headerData)
log.PanicIf(err)
im, err := exifcommon.NewIfdMappingWithStandard()
log.PanicIf(err)
ti := NewTagIndex()
if doUniversalSearch == true {
ti.SetUniversalSearch(true)
}
ebs := NewExifReadSeeker(rs)
ie := NewIfdEnumerate(im, ti, ebs, eh.ByteOrder)
exifTags = make([]ExifTag, 0)
visitor := func(ite *IfdTagEntry) (err error) {
// This encodes down to base64. Since this an example tool and we do not
// expect to ever decode the output, we are not worried about
// specifically base64-encoding it in order to have a measure of
// control.
valueBytes, err := ite.GetRawBytes()
if err != nil {
if err == exifundefined.ErrUnparseableValue {
return nil
}
log.Panic(err)
}
value, err := ite.Value()
if err != nil {
if err == exifcommon.ErrUnhandledUndefinedTypedTag {
value = exifundefined.UnparseableUnknownTagValuePlaceholder
} else if log.Is(err, exifcommon.ErrParseFail) == true {
utilityLogger.Warningf(nil,
"Could not parse value for tag [%s] (%04x) [%s].",
ite.IfdPath(), ite.TagId(), ite.TagName())
return nil
} else {
log.Panic(err)
}
}
et := ExifTag{
IfdPath: ite.IfdPath(),
TagId: ite.TagId(),
TagName: ite.TagName(),
UnitCount: ite.UnitCount(),
TagTypeId: ite.TagType(),
TagTypeName: ite.TagType().String(),
Value: value,
ValueBytes: valueBytes,
ChildIfdPath: ite.ChildIfdPath(),
}
et.Formatted, err = ite.Format()
log.PanicIf(err)
et.FormattedFirst, err = ite.FormatFirst()
log.PanicIf(err)
exifTags = append(exifTags, et)
return nil
}
med, err = ie.Scan(exifcommon.IfdStandardIfdIdentity, eh.FirstIfdOffset, visitor, nil)
log.PanicIf(err)
return exifTags, med, nil
}
// GpsDegreesEquals returns true if the two `GpsDegrees` are identical.
func GpsDegreesEquals(gi1, gi2 GpsDegrees) bool {
if gi2.Orientation != gi1.Orientation {
return false
}
degreesRightBound := math.Nextafter(gi1.Degrees, gi1.Degrees+1)
minutesRightBound := math.Nextafter(gi1.Minutes, gi1.Minutes+1)
secondsRightBound := math.Nextafter(gi1.Seconds, gi1.Seconds+1)
if gi2.Degrees < gi1.Degrees || gi2.Degrees >= degreesRightBound {
return false
} else if gi2.Minutes < gi1.Minutes || gi2.Minutes >= minutesRightBound {
return false
} else if gi2.Seconds < gi1.Seconds || gi2.Seconds >= secondsRightBound {
return false
}
return true
}

0
vendor/github.com/dsoprea/go-iptc/.MODULE_ROOT generated vendored Normal file
View File

21
vendor/github.com/dsoprea/go-iptc/LICENSE generated vendored Normal file
View File

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2020 Dustin Oprea
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

3
vendor/github.com/dsoprea/go-iptc/README.md generated vendored Normal file
View File

@ -0,0 +1,3 @@
# Overview
This project provides functionality to parse a series of IPTC records/datasets. It also provides name resolution, but other constraints/validation is not yet implemented (though there is structure present that can accommodate this when desired/required).

99
vendor/github.com/dsoprea/go-iptc/standard.go generated vendored Normal file
View File

@ -0,0 +1,99 @@
package iptc
import (
"errors"
)
type StreamTagInfo struct {
Description string
}
var (
standardTags = map[StreamTagKey]StreamTagInfo{
StreamTagKey{1, 120}: StreamTagInfo{"ARM Identifier"},
StreamTagKey{1, 122}: StreamTagInfo{"ARM Version"},
StreamTagKey{2, 0}: StreamTagInfo{"Record Version"},
StreamTagKey{2, 3}: StreamTagInfo{"Object Type Reference"},
StreamTagKey{2, 4}: StreamTagInfo{"Object Attribute Reference"},
StreamTagKey{2, 5}: StreamTagInfo{"Object Name"},
StreamTagKey{2, 7}: StreamTagInfo{"Edit Status"},
StreamTagKey{2, 8}: StreamTagInfo{"Editorial Update"},
StreamTagKey{2, 10}: StreamTagInfo{"Urgency"},
StreamTagKey{2, 12}: StreamTagInfo{"Subject Reference"},
StreamTagKey{2, 15}: StreamTagInfo{"Category"},
StreamTagKey{2, 20}: StreamTagInfo{"Supplemental Category"},
StreamTagKey{2, 22}: StreamTagInfo{"Fixture Identifier"},
StreamTagKey{2, 25}: StreamTagInfo{"Keywords"},
StreamTagKey{2, 26}: StreamTagInfo{"Content Location Code"},
StreamTagKey{2, 27}: StreamTagInfo{"Content Location Name"},
StreamTagKey{2, 30}: StreamTagInfo{"Release Date"},
StreamTagKey{2, 35}: StreamTagInfo{"Release Time"},
StreamTagKey{2, 37}: StreamTagInfo{"Expiration Date"},
StreamTagKey{2, 38}: StreamTagInfo{"Expiration Time"},
StreamTagKey{2, 40}: StreamTagInfo{"Special Instructions"},
StreamTagKey{2, 42}: StreamTagInfo{"Action Advised"},
StreamTagKey{2, 45}: StreamTagInfo{"Reference Service"},
StreamTagKey{2, 47}: StreamTagInfo{"Reference Date"},
StreamTagKey{2, 50}: StreamTagInfo{"Reference Number"},
StreamTagKey{2, 55}: StreamTagInfo{"Date Created"},
StreamTagKey{2, 60}: StreamTagInfo{"Time Created"},
StreamTagKey{2, 62}: StreamTagInfo{"Digital Creation Date"},
StreamTagKey{2, 63}: StreamTagInfo{"Digital Creation Time"},
StreamTagKey{2, 65}: StreamTagInfo{"Originating Program"},
StreamTagKey{2, 70}: StreamTagInfo{"Program Version"},
StreamTagKey{2, 75}: StreamTagInfo{"Object Cycle"},
StreamTagKey{2, 80}: StreamTagInfo{"By-line"},
StreamTagKey{2, 85}: StreamTagInfo{"By-line Title"},
StreamTagKey{2, 90}: StreamTagInfo{"City"},
StreamTagKey{2, 92}: StreamTagInfo{"Sublocation"},
StreamTagKey{2, 95}: StreamTagInfo{"Province/State"},
StreamTagKey{2, 100}: StreamTagInfo{"Country/Primary Location Code"},
StreamTagKey{2, 101}: StreamTagInfo{"Country/Primary Location Name"},
StreamTagKey{2, 103}: StreamTagInfo{"Original Transmission Reference"},
StreamTagKey{2, 105}: StreamTagInfo{"Headline"},
StreamTagKey{2, 110}: StreamTagInfo{"Credit"},
StreamTagKey{2, 115}: StreamTagInfo{"Source"},
StreamTagKey{2, 116}: StreamTagInfo{"Copyright Notice"},
StreamTagKey{2, 118}: StreamTagInfo{"Contact"},
StreamTagKey{2, 120}: StreamTagInfo{"Caption/Abstract"},
StreamTagKey{2, 122}: StreamTagInfo{"Writer/Editor"},
StreamTagKey{2, 125}: StreamTagInfo{"Rasterized Caption"},
StreamTagKey{2, 130}: StreamTagInfo{"Image Type"},
StreamTagKey{2, 131}: StreamTagInfo{"Image Orientation"},
StreamTagKey{2, 135}: StreamTagInfo{"Language Identifier"},
StreamTagKey{2, 150}: StreamTagInfo{"Audio Type"},
StreamTagKey{2, 151}: StreamTagInfo{"Audio Sampling Rate"},
StreamTagKey{2, 152}: StreamTagInfo{"Audio Sampling Resolution"},
StreamTagKey{2, 153}: StreamTagInfo{"Audio Duration"},
StreamTagKey{2, 154}: StreamTagInfo{"Audio Outcue"},
StreamTagKey{2, 200}: StreamTagInfo{"ObjectData Preview File Format"},
StreamTagKey{2, 201}: StreamTagInfo{"ObjectData Preview File Format Version"},
StreamTagKey{2, 202}: StreamTagInfo{"ObjectData Preview Data"},
StreamTagKey{7, 10}: StreamTagInfo{"Size Mode"},
StreamTagKey{7, 20}: StreamTagInfo{"Max Subfile Size"},
StreamTagKey{7, 90}: StreamTagInfo{"ObjectData Size Announced"},
StreamTagKey{7, 95}: StreamTagInfo{"Maximum ObjectData Size"},
StreamTagKey{8, 10}: StreamTagInfo{"Subfile"},
StreamTagKey{9, 10}: StreamTagInfo{"Confirmed ObjectData Size"},
}
)
var (
// ErrTagNotStandard indicates that the given tag is not known among the
// documented standard set.
ErrTagNotStandard = errors.New("not a standard tag")
)
// GetTagInfo return the info for the given tag. Returns ErrTagNotStandard if
// not known.
func GetTagInfo(recordNumber, datasetNumber int) (sti StreamTagInfo, err error) {
stk := StreamTagKey{uint8(recordNumber), uint8(datasetNumber)}
sti, found := standardTags[stk]
if found == false {
return sti, ErrTagNotStandard
}
return sti, nil
}

277
vendor/github.com/dsoprea/go-iptc/tag.go generated vendored Normal file
View File

@ -0,0 +1,277 @@
package iptc
import (
"errors"
"fmt"
"io"
"strings"
"unicode"
"encoding/binary"
"github.com/dsoprea/go-logging"
)
var (
// TODO(dustin): We're still not sure if this is the right endianness. No search to IPTC or IIM seems to state one or the other.
// DefaultEncoding is the standard encoding for the IPTC format.
defaultEncoding = binary.BigEndian
)
var (
// ErrInvalidTagMarker indicates that the tag can not be parsed because the
// tag boundary marker is not the expected value.
ErrInvalidTagMarker = errors.New("invalid tag marker")
)
// Tag describes one tag read from the stream.
type Tag struct {
recordNumber uint8
datasetNumber uint8
dataSize uint64
}
// String expresses state as a string.
func (tag *Tag) String() string {
return fmt.Sprintf(
"Tag<DATASET=(%d:%d) DATA-SIZE=(%d)>",
tag.recordNumber, tag.datasetNumber, tag.dataSize)
}
// DecodeTag parses one tag from the stream.
func DecodeTag(r io.Reader) (tag Tag, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
tagMarker := uint8(0)
err = binary.Read(r, defaultEncoding, &tagMarker)
if err != nil {
if err == io.EOF {
return tag, err
}
log.Panic(err)
}
if tagMarker != 0x1c {
return tag, ErrInvalidTagMarker
}
recordNumber := uint8(0)
err = binary.Read(r, defaultEncoding, &recordNumber)
log.PanicIf(err)
datasetNumber := uint8(0)
err = binary.Read(r, defaultEncoding, &datasetNumber)
log.PanicIf(err)
dataSize16Raw := uint16(0)
err = binary.Read(r, defaultEncoding, &dataSize16Raw)
log.PanicIf(err)
var dataSize uint64
if dataSize16Raw < 32768 {
// We only had 16-bits (has the MSB set to (0)).
dataSize = uint64(dataSize16Raw)
} else {
// This field is just the length of the length (has the MSB set to (1)).
// Clear the MSB.
lengthLength := dataSize16Raw & 32767
if lengthLength == 4 {
dataSize32Raw := uint32(0)
err := binary.Read(r, defaultEncoding, &dataSize32Raw)
log.PanicIf(err)
dataSize = uint64(dataSize32Raw)
} else if lengthLength == 8 {
err := binary.Read(r, defaultEncoding, &dataSize)
log.PanicIf(err)
} else {
// No specific sizes or limits are specified in the specification
// so we need to impose our own limits in order to implement.
log.Panicf("extended data-set tag size is not supported: (%d)", lengthLength)
}
}
tag = Tag{
recordNumber: recordNumber,
datasetNumber: datasetNumber,
dataSize: dataSize,
}
return tag, nil
}
// StreamTagKey is a convenience type that lets us key our index with a high-
// level type.
type StreamTagKey struct {
// RecordNumber is the major classification of the dataset.
RecordNumber uint8
// DatasetNumber is the minor classification of the dataset.
DatasetNumber uint8
}
// String returns a descriptive string.
func (stk StreamTagKey) String() string {
return fmt.Sprintf("%d:%d", stk.RecordNumber, stk.DatasetNumber)
}
// Data is a convenience wrapper around a byte-slice.
type TagData []byte
// IsPrintable returns true if all characters are printable.
func (tg TagData) IsPrintable() bool {
for _, b := range tg {
r := rune(b)
// Newline characters aren't considered printable.
if r == 0x0d || r == 0x0a {
continue
}
if unicode.IsGraphic(r) == false || unicode.IsPrint(r) == false {
return false
}
}
return true
}
// String returns a descriptive string. If the data doesn't include any non-
// printable characters, it will include the value itself.
func (tg TagData) String() string {
if tg.IsPrintable() == true {
return string(tg)
} else {
return fmt.Sprintf("BINARY<(%d) bytes>", len(tg))
}
}
// ParsedTags is the complete, unordered set of tags parsed from the stream.
type ParsedTags map[StreamTagKey][]TagData
// ParseStream parses a serial sequence of tags and tag data out of the stream.
func ParseStream(r io.Reader) (tags map[StreamTagKey][]TagData, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
tags = make(ParsedTags)
for {
tag, err := DecodeTag(r)
if err != nil {
if err == io.EOF {
break
}
log.Panic(err)
}
raw := make([]byte, tag.dataSize)
_, err = io.ReadFull(r, raw)
log.PanicIf(err)
data := TagData(raw)
stk := StreamTagKey{
RecordNumber: tag.recordNumber,
DatasetNumber: tag.datasetNumber,
}
if existing, found := tags[stk]; found == true {
tags[stk] = append(existing, data)
} else {
tags[stk] = []TagData{data}
}
}
return tags, nil
}
// GetSimpleDictionaryFromParsedTags returns a dictionary of tag names to tag
// values, where all values are strings and any tag that had a non-printable
// value is omitted. We will also only return the first value, therefore
// dropping any follow-up values for repeatable tags. This will ignore non-
// standard tags. This will trim whitespace from the ends of strings.
//
// This is a convenience function for quickly displaying only the summary IPTC
// metadata that a user might actually be interested in at first glance.
func GetSimpleDictionaryFromParsedTags(pt ParsedTags) (distilled map[string]string) {
distilled = make(map[string]string)
for stk, dataSlice := range pt {
sti, err := GetTagInfo(int(stk.RecordNumber), int(stk.DatasetNumber))
if err != nil {
if err == ErrTagNotStandard {
continue
} else {
log.Panic(err)
}
}
data := dataSlice[0]
if data.IsPrintable() == false {
continue
}
// TODO(dustin): Trim leading whitespace, too.
distilled[sti.Description] = strings.Trim(string(data), "\r\n")
}
return distilled
}
// GetDictionaryFromParsedTags returns all tags. It will keep non-printable
// values, though will not print a placeholder instead. This will keep non-
// standard tags (and print the fully-qualified dataset ID rather than the
// name). It will keep repeated values (with the counter value appended to the
// end).
func GetDictionaryFromParsedTags(pt ParsedTags) (distilled map[string]string) {
distilled = make(map[string]string)
for stk, dataSlice := range pt {
var keyPhrase string
sti, err := GetTagInfo(int(stk.RecordNumber), int(stk.DatasetNumber))
if err != nil {
if err == ErrTagNotStandard {
keyPhrase = fmt.Sprintf("%s (not a standard tag)", stk.String())
} else {
log.Panic(err)
}
} else {
keyPhrase = sti.Description
}
for i, data := range dataSlice {
currentKeyPhrase := keyPhrase
if len(dataSlice) > 1 {
currentKeyPhrase = fmt.Sprintf("%s (%d)", currentKeyPhrase, i+1)
}
var presentable string
if data.IsPrintable() == false {
presentable = fmt.Sprintf("[BINARY] %s", DumpBytesToString(data))
} else {
presentable = string(data)
}
distilled[currentKeyPhrase] = presentable
}
}
return distilled
}

70
vendor/github.com/dsoprea/go-iptc/testing_common.go generated vendored Normal file
View File

@ -0,0 +1,70 @@
package iptc
import (
"os"
"path"
"github.com/dsoprea/go-logging"
)
var (
testDataRelFilepath = "iptc.data"
)
var (
moduleRootPath = ""
assetsPath = ""
)
func GetModuleRootPath() string {
if moduleRootPath == "" {
moduleRootPath = os.Getenv("IPTC_MODULE_ROOT_PATH")
if moduleRootPath != "" {
return moduleRootPath
}
currentWd, err := os.Getwd()
log.PanicIf(err)
currentPath := currentWd
visited := make([]string, 0)
for {
tryStampFilepath := path.Join(currentPath, ".MODULE_ROOT")
_, err := os.Stat(tryStampFilepath)
if err != nil && os.IsNotExist(err) != true {
log.Panic(err)
} else if err == nil {
break
}
visited = append(visited, tryStampFilepath)
currentPath = path.Dir(currentPath)
if currentPath == "/" {
log.Panicf("could not find module-root: %v", visited)
}
}
moduleRootPath = currentPath
}
return moduleRootPath
}
func GetTestAssetsPath() string {
if assetsPath == "" {
moduleRootPath := GetModuleRootPath()
assetsPath = path.Join(moduleRootPath, "assets")
}
return assetsPath
}
func GetTestDataFilepath() string {
assetsPath := GetTestAssetsPath()
filepath := path.Join(assetsPath, testDataRelFilepath)
return filepath
}

25
vendor/github.com/dsoprea/go-iptc/utility.go generated vendored Normal file
View File

@ -0,0 +1,25 @@
package iptc
import (
"bytes"
"fmt"
"github.com/dsoprea/go-logging"
)
// DumpBytesToString returns a stringified list of hex-encoded bytes.
func DumpBytesToString(data []byte) string {
b := new(bytes.Buffer)
for i, x := range data {
_, err := b.WriteString(fmt.Sprintf("%02x", x))
log.PanicIf(err)
if i < len(data)-1 {
_, err := b.WriteRune(' ')
log.PanicIf(err)
}
}
return b.String()
}

12
vendor/github.com/dsoprea/go-logging/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,12 @@
language: go
go:
- tip
install:
- go get -t ./...
- go get github.com/mattn/goveralls
script:
# v1
- go test -v .
# v2
- cd v2
- goveralls -v -service=travis-ci

9
vendor/github.com/dsoprea/go-logging/LICENSE generated vendored Normal file
View File

@ -0,0 +1,9 @@
MIT LICENSE
Copyright 2020 Dustin Oprea
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

223
vendor/github.com/dsoprea/go-logging/README.md generated vendored Normal file
View File

@ -0,0 +1,223 @@
[![Build Status](https://travis-ci.org/dsoprea/go-logging.svg?branch=master)](https://travis-ci.org/dsoprea/go-logging)
[![Coverage Status](https://coveralls.io/repos/github/dsoprea/go-logging/badge.svg?branch=master)](https://coveralls.io/github/dsoprea/go-logging?branch=master)
[![Go Report Card](https://goreportcard.com/badge/github.com/dsoprea/go-logging/v2)](https://goreportcard.com/report/github.com/dsoprea/go-logging/v2)
[![GoDoc](https://godoc.org/github.com/dsoprea/go-logging/v2?status.svg)](https://godoc.org/github.com/dsoprea/go-logging/v2)
## Introduction
This project bridges several gaps that are present in the standard logging support in Go:
- Equips errors with stacktraces and provides a facility for printing them
- Inherently supports the ability for each Go file to print its messages with a prefix representing that file/package
- Adds some functions to specifically log messages of different levels (e.g. debug, error)
- Adds a `PanicIf()` function that can be used to conditionally manage errors depending on whether an error variable is `nil` or actually has an error
- Adds support for pluggable logging adapters (so the output can be sent somewhere other than the console)
- Adds configuration (such as the logging level or adapter) that can be driven from the environment
- Supports filtering to show/hide the logging of certain places of the application
- The loggers can be definded at the package level, so you can determine which Go file any log message came from.
When used with the Panic-Defer-Recover pattern in Go, even panics rising from the Go runtime will be caught and wrapped with a stacktrace. This compartmentalizes which function they could have originated from, which is, otherwise, potentially non-trivial to figure out.
## AppEngine
Go under AppEngine is very stripped down, such as there being no logging type (e.g. `Logger` in native Go) and there is no support for prefixing. As each logging call from this project takes a `Context`, this works cooperatively to bridge the additional gaps in AppEngine's logging support.
With standard console logging outside of this context, that parameter will take a`nil`.
## Getting Started
The simplest, possible example:
```go
package thispackage
import (
"context"
"errors"
"github.com/dsoprea/go-logging/v2"
)
var (
thisfileLog = log.NewLogger("thispackage.thisfile")
)
func a_cry_for_help(ctx context.Context) {
err := errors.New("a big error")
thisfileLog.Errorf(ctx, err, "How big is my problem: %s", "pretty big")
}
func init() {
cla := log.NewConsoleLogAdapter()
log.AddAdapter("console", cla)
}
```
Notice two things:
1. We register the "console" adapter at the bottom. The first adapter registered will be used by default.
2. We pass-in a prefix (what we refer to as a "noun") to `log.NewLogger()`. This is a simple, descriptive name that represents the subject of the file. By convention, we construct this by dot-separating the current package and the name of the file. We recommend that you define a different log for every file at the package level, but it is your choice whether you want to do this or share the same logger over the entire package, define one in each struct, etc..
### Example Output
Example output from a real application (not from the above):
```
2016/09/09 12:57:44 DEBUG: user: User revisiting: [test@example.com]
2016/09/09 12:57:44 DEBUG: context: Session already inited: [DCRBDGRY6RMWANCSJXVLD7GULDH4NZEB6SBAQ3KSFIGA2LP45IIQ]
2016/09/09 12:57:44 DEBUG: session_data: Session save not necessary: [DCRBDGRY6RMWANCSJXVLD7GULDH4NZEB6SBAQ3KSFIGA2LP45IIQ]
2016/09/09 12:57:44 DEBUG: context: Got session: [DCRBDGRY6RMWANCSJXVLD7GULDH4NZEB6SBAQ3KSFIGA2LP45IIQ]
2016/09/09 12:57:44 DEBUG: session_data: Found user in session.
2016/09/09 12:57:44 DEBUG: cache: Cache miss: [geo.geocode.reverse:dhxp15x]
```
## Adapters
This project provides one built-in logging adapter, "console", which prints to the screen. To register it:
```go
cla := log.NewConsoleLogAdapter()
log.AddAdapter("console", cla)
```
### Custom Adapters
If you would like to implement your own logger, just create a struct type that satisfies the LogAdapter interface.
```go
type LogAdapter interface {
Debugf(lc *LogContext, message *string) error
Infof(lc *LogContext, message *string) error
Warningf(lc *LogContext, message *string) error
Errorf(lc *LogContext, message *string) error
}
```
The *LogContext* struct passed in provides additional information that you may need in order to do what you need to do:
```go
type LogContext struct {
Logger *Logger
Ctx context.Context
}
```
`Logger` represents your Logger instance.
Adapter example:
```go
type DummyLogAdapter struct {
}
func (dla *DummyLogAdapter) Debugf(lc *LogContext, message *string) error {
}
func (dla *DummyLogAdapter) Infof(lc *LogContext, message *string) error {
}
func (dla *DummyLogAdapter) Warningf(lc *LogContext, message *string) error {
}
func (dla *DummyLogAdapter) Errorf(lc *LogContext, message *string) error {
}
```
Then, register it:
```go
func init() {
log.AddAdapter("dummy", new(DummyLogAdapter))
}
```
If this is a task-specific implementation, just register it from the `init()` of the file that defines it.
If this is the first adapter you've registered, it will be the default one used. Otherwise, you'll have to deliberately specify it when you are creating a logger: Instead of calling `log.NewLogger(noun string)`, call `log.NewLoggerWithAdapterName(noun string, adapterName string)`.
We discuss how to configure the adapter from configuration in the "Configuration" section below.
### Adapter Notes
- The `Logger` instance exports `Noun()` in the event you want to discriminate where your log entries go in your adapter. It also exports `Adapter()` for if you need to access the adapter instance from your application.
- If no adapter is registered (specifically, the default adapter-name remains empty), logging calls will be a no-op. This allows libraries to implement *go-logging* where the larger application doesn't.
## Filters
We support the ability to exclusively log for a specific set of nouns (we'll exclude any not specified):
```go
log.AddIncludeFilter("nountoshow1")
log.AddIncludeFilter("nountoshow2")
```
Depending on your needs, you might just want to exclude a couple and include the rest:
```go
log.AddExcludeFilter("nountohide1")
log.AddExcludeFilter("nountohide2")
```
We'll first hit the include-filters. If it's in there, we'll forward the log item to the adapter. If not, and there is at least one include filter in the list, we won't do anything. If the list of include filters is empty but the noun appears in the exclude list, we won't do anything.
It is a good convention to exclude the nouns of any library you are writing whose logging you do not want to generally be aware of unless you are debugging. You might call `AddExcludeFilter()` from the `init()` function at the bottom of those files unless there is some configuration variable, such as "(LibraryNameHere)DoShowLogging", that has been defined and set to TRUE.
## Configuration
The following configuration items are available:
- *Format*: The default format used to build the message that gets sent to the adapter. It is assumed that the adapter already prefixes the message with time and log-level (since the default AppEngine logger does). The default value is: `{{.Noun}}: [{{.Level}}] {{if eq .ExcludeBypass true}} [BYPASS]{{end}} {{.Message}}`. The available tokens are "Level", "Noun", "ExcludeBypass", and "Message".
- *DefaultAdapterName*: The default name of the adapter to use when NewLogger() is called (if this isn't defined then the name of the first registered adapter will be used).
- *LevelName*: The priority-level of messages permitted to be logged (all others will be discarded). By default, it is "info". Other levels are: "debug", "warning", "error", "critical"
- *IncludeNouns*: Comma-separated list of nouns to log for. All others will be ignored.
- *ExcludeNouns*: Comma-separated list on nouns to exclude from logging.
- *ExcludeBypassLevelName*: The log-level at which we will show logging for nouns that have been excluded. Allows you to hide excessive, unimportant logging for nouns but to still see their warnings, errors, etc...
### Configuration Providers
You provide the configuration by setting a configuration-provider. Configuration providers must satisfy the `ConfigurationProvider` interface. The following are provided with the project:
- `EnvironmentConfigurationProvider`: Read values from the environment.
- `StaticConfigurationProvider`: Set values directly on the struct.
**The configuration provider must be applied before doing any logging (otherwise it will have no effect).**
Environments such as AppEngine work best with `EnvironmentConfigurationProvider` as this is generally how configuration is exposed *by* AppEngine *to* the application. You can define this configuration directly in *that* configuration.
By default, no configuration-provider is applied, the level is defaulted to INFO and the format is defaulted to "{{.Noun}}:{{if eq .ExcludeBypass true}} [BYPASS]{{end}} {{.Message}}".
Again, if a configuration-provider does not provide a log-level or format, they will be defaulted (or left alone, if already set). If it does not provide an adapter-name, the adapter-name of the first registered adapter will be used.
Usage instructions of both follow.
### Environment-Based Configuration
```go
ecp := log.NewEnvironmentConfigurationProvider()
log.LoadConfiguration(ecp)
```
Each of the items listed at the top of the "Configuration" section can be specified in the environment using a prefix of "Log" (e.g. LogDefaultAdapterName).
### Static Configuration
```go
scp := log.NewStaticConfigurationProvider()
scp.SetLevelName(log.LevelNameWarning)
log.LoadConfiguration(scp)
```

246
vendor/github.com/dsoprea/go-logging/config.go generated vendored Normal file
View File

@ -0,0 +1,246 @@
package log
import (
"fmt"
"os"
)
// Config keys.
const (
ckFormat = "LogFormat"
ckDefaultAdapterName = "LogDefaultAdapterName"
ckLevelName = "LogLevelName"
ckIncludeNouns = "LogIncludeNouns"
ckExcludeNouns = "LogExcludeNouns"
ckExcludeBypassLevelName = "LogExcludeBypassLevelName"
)
// Other constants
const (
defaultFormat = "{{.Noun}}: [{{.Level}}] {{if eq .ExcludeBypass true}} [BYPASS]{{end}} {{.Message}}"
defaultLevelName = LevelNameInfo
)
// Config
var (
// Alternative format.
format = defaultFormat
// Alternative adapter.
defaultAdapterName = ""
// Alternative level at which to display log-items
levelName = defaultLevelName
// Configuration-driven comma-separated list of nouns to include.
includeNouns = ""
// Configuration-driven comma-separated list of nouns to exclude.
excludeNouns = ""
// Level at which to disregard exclusion (if the severity of a message
// meets or exceed this, always display).
excludeBypassLevelName = ""
)
// Other
var (
configurationLoaded = false
)
// Return the current default adapter name.
func GetDefaultAdapterName() string {
return defaultAdapterName
}
// The adapter will automatically be the first one registered. This overrides
// that.
func SetDefaultAdapterName(name string) {
defaultAdapterName = name
}
func LoadConfiguration(cp ConfigurationProvider) {
configuredDefaultAdapterName := cp.DefaultAdapterName()
if configuredDefaultAdapterName != "" {
defaultAdapterName = configuredDefaultAdapterName
}
includeNouns = cp.IncludeNouns()
excludeNouns = cp.ExcludeNouns()
excludeBypassLevelName = cp.ExcludeBypassLevelName()
f := cp.Format()
if f != "" {
format = f
}
ln := cp.LevelName()
if ln != "" {
levelName = ln
}
configurationLoaded = true
}
func getConfigState() map[string]interface{} {
return map[string]interface{}{
"format": format,
"defaultAdapterName": defaultAdapterName,
"levelName": levelName,
"includeNouns": includeNouns,
"excludeNouns": excludeNouns,
"excludeBypassLevelName": excludeBypassLevelName,
}
}
func setConfigState(config map[string]interface{}) {
format = config["format"].(string)
defaultAdapterName = config["defaultAdapterName"].(string)
levelName = config["levelName"].(string)
includeNouns = config["includeNouns"].(string)
excludeNouns = config["excludeNouns"].(string)
excludeBypassLevelName = config["excludeBypassLevelName"].(string)
}
func getConfigDump() string {
return fmt.Sprintf(
"Current configuration:\n"+
" FORMAT=[%s]\n"+
" DEFAULT-ADAPTER-NAME=[%s]\n"+
" LEVEL-NAME=[%s]\n"+
" INCLUDE-NOUNS=[%s]\n"+
" EXCLUDE-NOUNS=[%s]\n"+
" EXCLUDE-BYPASS-LEVEL-NAME=[%s]",
format, defaultAdapterName, levelName, includeNouns, excludeNouns, excludeBypassLevelName)
}
func IsConfigurationLoaded() bool {
return configurationLoaded
}
type ConfigurationProvider interface {
// Alternative format (defaults to .
Format() string
// Alternative adapter (defaults to "appengine").
DefaultAdapterName() string
// Alternative level at which to display log-items (defaults to
// "info").
LevelName() string
// Configuration-driven comma-separated list of nouns to include. Defaults
// to empty.
IncludeNouns() string
// Configuration-driven comma-separated list of nouns to exclude. Defaults
// to empty.
ExcludeNouns() string
// Level at which to disregard exclusion (if the severity of a message
// meets or exceed this, always display). Defaults to empty.
ExcludeBypassLevelName() string
}
// Environment configuration-provider.
type EnvironmentConfigurationProvider struct {
}
func NewEnvironmentConfigurationProvider() *EnvironmentConfigurationProvider {
return new(EnvironmentConfigurationProvider)
}
func (ecp *EnvironmentConfigurationProvider) Format() string {
return os.Getenv(ckFormat)
}
func (ecp *EnvironmentConfigurationProvider) DefaultAdapterName() string {
return os.Getenv(ckDefaultAdapterName)
}
func (ecp *EnvironmentConfigurationProvider) LevelName() string {
return os.Getenv(ckLevelName)
}
func (ecp *EnvironmentConfigurationProvider) IncludeNouns() string {
return os.Getenv(ckIncludeNouns)
}
func (ecp *EnvironmentConfigurationProvider) ExcludeNouns() string {
return os.Getenv(ckExcludeNouns)
}
func (ecp *EnvironmentConfigurationProvider) ExcludeBypassLevelName() string {
return os.Getenv(ckExcludeBypassLevelName)
}
// Static configuration-provider.
type StaticConfigurationProvider struct {
format string
defaultAdapterName string
levelName string
includeNouns string
excludeNouns string
excludeBypassLevelName string
}
func NewStaticConfigurationProvider() *StaticConfigurationProvider {
return new(StaticConfigurationProvider)
}
func (scp *StaticConfigurationProvider) SetFormat(format string) {
scp.format = format
}
func (scp *StaticConfigurationProvider) SetDefaultAdapterName(adapterName string) {
scp.defaultAdapterName = adapterName
}
func (scp *StaticConfigurationProvider) SetLevelName(levelName string) {
scp.levelName = levelName
}
func (scp *StaticConfigurationProvider) SetIncludeNouns(includeNouns string) {
scp.includeNouns = includeNouns
}
func (scp *StaticConfigurationProvider) SetExcludeNouns(excludeNouns string) {
scp.excludeNouns = excludeNouns
}
func (scp *StaticConfigurationProvider) SetExcludeBypassLevelName(excludeBypassLevelName string) {
scp.excludeBypassLevelName = excludeBypassLevelName
}
func (scp *StaticConfigurationProvider) Format() string {
return scp.format
}
func (scp *StaticConfigurationProvider) DefaultAdapterName() string {
return scp.defaultAdapterName
}
func (scp *StaticConfigurationProvider) LevelName() string {
return scp.levelName
}
func (scp *StaticConfigurationProvider) IncludeNouns() string {
return scp.includeNouns
}
func (scp *StaticConfigurationProvider) ExcludeNouns() string {
return scp.excludeNouns
}
func (scp *StaticConfigurationProvider) ExcludeBypassLevelName() string {
return scp.excludeBypassLevelName
}
func init() {
// Do the initial configuration-load from the environment. We gotta seed it
// with something for simplicity's sake.
ecp := NewEnvironmentConfigurationProvider()
LoadConfiguration(ecp)
}

View File

@ -0,0 +1,36 @@
package log
import (
golog "log"
)
type ConsoleLogAdapter struct {
}
func NewConsoleLogAdapter() LogAdapter {
return new(ConsoleLogAdapter)
}
func (cla *ConsoleLogAdapter) Debugf(lc *LogContext, message *string) error {
golog.Println(*message)
return nil
}
func (cla *ConsoleLogAdapter) Infof(lc *LogContext, message *string) error {
golog.Println(*message)
return nil
}
func (cla *ConsoleLogAdapter) Warningf(lc *LogContext, message *string) error {
golog.Println(*message)
return nil
}
func (cla *ConsoleLogAdapter) Errorf(lc *LogContext, message *string) error {
golog.Println(*message)
return nil
}

537
vendor/github.com/dsoprea/go-logging/log.go generated vendored Normal file
View File

@ -0,0 +1,537 @@
package log
import (
"bytes"
e "errors"
"fmt"
"strings"
"sync"
"text/template"
"github.com/go-errors/errors"
"golang.org/x/net/context"
)
// TODO(dustin): Finish symbol documentation
// Config severity integers.
const (
LevelDebug = iota
LevelInfo = iota
LevelWarning = iota
LevelError = iota
)
// Config severity names.
const (
LevelNameDebug = "debug"
LevelNameInfo = "info"
LevelNameWarning = "warning"
LevelNameError = "error"
)
// Seveirty name->integer map.
var (
LevelNameMap = map[string]int{
LevelNameDebug: LevelDebug,
LevelNameInfo: LevelInfo,
LevelNameWarning: LevelWarning,
LevelNameError: LevelError,
}
LevelNameMapR = map[int]string{
LevelDebug: LevelNameDebug,
LevelInfo: LevelNameInfo,
LevelWarning: LevelNameWarning,
LevelError: LevelNameError,
}
)
// Errors
var (
ErrAdapterAlreadyRegistered = e.New("adapter already registered")
ErrFormatEmpty = e.New("format is empty")
ErrExcludeLevelNameInvalid = e.New("exclude bypass-level is invalid")
ErrNoAdapterConfigured = e.New("no default adapter configured")
ErrAdapterIsNil = e.New("adapter is nil")
ErrConfigurationNotLoaded = e.New("can not configure because configuration is not loaded")
)
// Other
var (
includeFilters = make(map[string]bool)
useIncludeFilters = false
excludeFilters = make(map[string]bool)
useExcludeFilters = false
adapters = make(map[string]LogAdapter)
// TODO(dustin): !! Finish implementing this.
excludeBypassLevel = -1
)
// Add global include filter.
func AddIncludeFilter(noun string) {
includeFilters[noun] = true
useIncludeFilters = true
}
// Remove global include filter.
func RemoveIncludeFilter(noun string) {
delete(includeFilters, noun)
if len(includeFilters) == 0 {
useIncludeFilters = false
}
}
// Add global exclude filter.
func AddExcludeFilter(noun string) {
excludeFilters[noun] = true
useExcludeFilters = true
}
// Remove global exclude filter.
func RemoveExcludeFilter(noun string) {
delete(excludeFilters, noun)
if len(excludeFilters) == 0 {
useExcludeFilters = false
}
}
func AddAdapter(name string, la LogAdapter) {
if _, found := adapters[name]; found == true {
Panic(ErrAdapterAlreadyRegistered)
}
if la == nil {
Panic(ErrAdapterIsNil)
}
adapters[name] = la
if GetDefaultAdapterName() == "" {
SetDefaultAdapterName(name)
}
}
func ClearAdapters() {
adapters = make(map[string]LogAdapter)
SetDefaultAdapterName("")
}
type LogAdapter interface {
Debugf(lc *LogContext, message *string) error
Infof(lc *LogContext, message *string) error
Warningf(lc *LogContext, message *string) error
Errorf(lc *LogContext, message *string) error
}
// TODO(dustin): !! Also populate whether we've bypassed an exception so that
// we can add a template macro to prefix an exclamation of
// some sort.
type MessageContext struct {
Level *string
Noun *string
Message *string
ExcludeBypass bool
}
type LogContext struct {
Logger *Logger
Ctx context.Context
}
type Logger struct {
isConfigured bool
an string
la LogAdapter
t *template.Template
systemLevel int
noun string
}
func NewLoggerWithAdapterName(noun string, adapterName string) (l *Logger) {
l = &Logger{
noun: noun,
an: adapterName,
}
return l
}
func NewLogger(noun string) (l *Logger) {
l = NewLoggerWithAdapterName(noun, "")
return l
}
func (l *Logger) Noun() string {
return l.noun
}
func (l *Logger) Adapter() LogAdapter {
return l.la
}
var (
configureMutex sync.Mutex
)
func (l *Logger) doConfigure(force bool) {
configureMutex.Lock()
defer configureMutex.Unlock()
if l.isConfigured == true && force == false {
return
}
if IsConfigurationLoaded() == false {
Panic(ErrConfigurationNotLoaded)
}
if l.an == "" {
l.an = GetDefaultAdapterName()
}
// If this is empty, then no specific adapter was given or no system
// default was configured (which implies that no adapters were registered).
// All of our logging will be skipped.
if l.an != "" {
la, found := adapters[l.an]
if found == false {
Panic(fmt.Errorf("adapter is not valid: %s", l.an))
}
l.la = la
}
// Set the level.
systemLevel, found := LevelNameMap[levelName]
if found == false {
Panic(fmt.Errorf("log-level not valid: [%s]", levelName))
}
l.systemLevel = systemLevel
// Set the form.
if format == "" {
Panic(ErrFormatEmpty)
}
if t, err := template.New("logItem").Parse(format); err != nil {
Panic(err)
} else {
l.t = t
}
l.isConfigured = true
}
func (l *Logger) flattenMessage(lc *MessageContext, format *string, args []interface{}) (string, error) {
m := fmt.Sprintf(*format, args...)
lc.Message = &m
var b bytes.Buffer
if err := l.t.Execute(&b, *lc); err != nil {
return "", err
}
return b.String(), nil
}
func (l *Logger) allowMessage(noun string, level int) bool {
if _, found := includeFilters[noun]; found == true {
return true
}
// If we didn't hit an include filter and we *had* include filters, filter
// it out.
if useIncludeFilters == true {
return false
}
if _, found := excludeFilters[noun]; found == true {
return false
}
return true
}
func (l *Logger) makeLogContext(ctx context.Context) *LogContext {
return &LogContext{
Ctx: ctx,
Logger: l,
}
}
type LogMethod func(lc *LogContext, message *string) error
func (l *Logger) log(ctx context.Context, level int, lm LogMethod, format string, args []interface{}) error {
if l.systemLevel > level {
return nil
}
// Preempt the normal filter checks if we can unconditionally allow at a
// certain level and we've hit that level.
//
// Notice that this is only relevant if the system-log level is letting
// *anything* show logs at the level we came in with.
canExcludeBypass := level >= excludeBypassLevel && excludeBypassLevel != -1
didExcludeBypass := false
n := l.Noun()
if l.allowMessage(n, level) == false {
if canExcludeBypass == false {
return nil
} else {
didExcludeBypass = true
}
}
levelName, found := LevelNameMapR[level]
if found == false {
Panic(fmt.Errorf("level not valid: (%d)", level))
}
levelName = strings.ToUpper(levelName)
lc := &MessageContext{
Level: &levelName,
Noun: &n,
ExcludeBypass: didExcludeBypass,
}
if s, err := l.flattenMessage(lc, &format, args); err != nil {
return err
} else {
lc := l.makeLogContext(ctx)
if err := lm(lc, &s); err != nil {
panic(err)
}
return e.New(s)
}
}
func (l *Logger) Debugf(ctx context.Context, format string, args ...interface{}) {
l.doConfigure(false)
if l.la != nil {
l.log(ctx, LevelDebug, l.la.Debugf, format, args)
}
}
func (l *Logger) Infof(ctx context.Context, format string, args ...interface{}) {
l.doConfigure(false)
if l.la != nil {
l.log(ctx, LevelInfo, l.la.Infof, format, args)
}
}
func (l *Logger) Warningf(ctx context.Context, format string, args ...interface{}) {
l.doConfigure(false)
if l.la != nil {
l.log(ctx, LevelWarning, l.la.Warningf, format, args)
}
}
func (l *Logger) mergeStack(err interface{}, format string, args []interface{}) (string, []interface{}) {
if format != "" {
format += "\n%s"
} else {
format = "%s"
}
var stackified *errors.Error
stackified, ok := err.(*errors.Error)
if ok == false {
stackified = errors.Wrap(err, 2)
}
args = append(args, stackified.ErrorStack())
return format, args
}
func (l *Logger) Errorf(ctx context.Context, errRaw interface{}, format string, args ...interface{}) {
l.doConfigure(false)
var err interface{}
if errRaw != nil {
_, ok := errRaw.(*errors.Error)
if ok == true {
err = errRaw
} else {
err = errors.Wrap(errRaw, 1)
}
}
if l.la != nil {
if errRaw != nil {
format, args = l.mergeStack(err, format, args)
}
l.log(ctx, LevelError, l.la.Errorf, format, args)
}
}
func (l *Logger) ErrorIff(ctx context.Context, errRaw interface{}, format string, args ...interface{}) {
if errRaw == nil {
return
}
var err interface{}
_, ok := errRaw.(*errors.Error)
if ok == true {
err = errRaw
} else {
err = errors.Wrap(errRaw, 1)
}
l.Errorf(ctx, err, format, args...)
}
func (l *Logger) Panicf(ctx context.Context, errRaw interface{}, format string, args ...interface{}) {
l.doConfigure(false)
var err interface{}
_, ok := errRaw.(*errors.Error)
if ok == true {
err = errRaw
} else {
err = errors.Wrap(errRaw, 1)
}
if l.la != nil {
format, args = l.mergeStack(err, format, args)
err = l.log(ctx, LevelError, l.la.Errorf, format, args)
}
Panic(err.(error))
}
func (l *Logger) PanicIff(ctx context.Context, errRaw interface{}, format string, args ...interface{}) {
if errRaw == nil {
return
}
var err interface{}
_, ok := errRaw.(*errors.Error)
if ok == true {
err = errRaw
} else {
err = errors.Wrap(errRaw, 1)
}
l.Panicf(ctx, err.(error), format, args...)
}
func Wrap(err interface{}) *errors.Error {
es, ok := err.(*errors.Error)
if ok == true {
return es
} else {
return errors.Wrap(err, 1)
}
}
func Errorf(message string, args ...interface{}) *errors.Error {
err := fmt.Errorf(message, args...)
return errors.Wrap(err, 1)
}
func Panic(err interface{}) {
_, ok := err.(*errors.Error)
if ok == true {
panic(err)
} else {
panic(errors.Wrap(err, 1))
}
}
func Panicf(message string, args ...interface{}) {
err := Errorf(message, args...)
Panic(err)
}
func PanicIf(err interface{}) {
if err == nil {
return
}
_, ok := err.(*errors.Error)
if ok == true {
panic(err)
} else {
panic(errors.Wrap(err, 1))
}
}
// Is checks if the left ("actual") error equals the right ("against") error.
// The right must be an unwrapped error (the kind that you'd initialize as a
// global variable). The left can be a wrapped or unwrapped error.
func Is(actual, against error) bool {
// If it's an unwrapped error.
if _, ok := actual.(*errors.Error); ok == false {
return actual == against
}
return errors.Is(actual, against)
}
// Print is a utility function to prevent the caller from having to import the
// third-party library.
func PrintError(err error) {
wrapped := Wrap(err)
fmt.Printf("Stack:\n\n%s\n", wrapped.ErrorStack())
}
// PrintErrorf is a utility function to prevent the caller from having to
// import the third-party library.
func PrintErrorf(err error, format string, args ...interface{}) {
wrapped := Wrap(err)
fmt.Printf(format, args...)
fmt.Printf("\n")
fmt.Printf("Stack:\n\n%s\n", wrapped.ErrorStack())
}
func init() {
if format == "" {
format = defaultFormat
}
if levelName == "" {
levelName = defaultLevelName
}
if includeNouns != "" {
for _, noun := range strings.Split(includeNouns, ",") {
AddIncludeFilter(noun)
}
}
if excludeNouns != "" {
for _, noun := range strings.Split(excludeNouns, ",") {
AddExcludeFilter(noun)
}
}
if excludeBypassLevelName != "" {
var found bool
if excludeBypassLevel, found = LevelNameMap[excludeBypassLevelName]; found == false {
panic(ErrExcludeLevelNameInvalid)
}
}
}

View File

View File

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2020 Dustin Oprea
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -0,0 +1,3 @@
# Overview
This is a minimal Photoshop format implementation to allow IPTC data to be extracted from a JPEG image. This project primarily services [go-jpeg-image-structure](https://github.com/dsoprea/go-jpeg-image-structure).

View File

@ -0,0 +1,119 @@
package photoshopinfo
import (
"fmt"
"io"
"encoding/binary"
"github.com/dsoprea/go-logging"
)
var (
defaultByteOrder = binary.BigEndian
)
// Photoshop30InfoRecord is the data for one parsed Photoshop-info record.
type Photoshop30InfoRecord struct {
// RecordType is the record-type.
RecordType string
// ImageResourceId is the image resource-ID.
ImageResourceId uint16
// Name is the name of the record. It is optional and will be an empty-
// string if not present.
Name string
// Data is the raw record data.
Data []byte
}
// String returns a descriptive string.
func (pir Photoshop30InfoRecord) String() string {
return fmt.Sprintf("RECORD-TYPE=[%s] IMAGE-RESOURCE-ID=[0x%04x] NAME=[%s] DATA-SIZE=(%d)", pir.RecordType, pir.ImageResourceId, pir.Name, len(pir.Data))
}
// ReadPhotoshop30InfoRecord parses a single photoshop-info record.
func ReadPhotoshop30InfoRecord(r io.Reader) (pir Photoshop30InfoRecord, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
recordType := make([]byte, 4)
_, err = io.ReadFull(r, recordType)
if err != nil {
if err == io.EOF {
return pir, err
}
log.Panic(err)
}
// TODO(dustin): Move BigEndian to constant/config.
irId := uint16(0)
err = binary.Read(r, defaultByteOrder, &irId)
log.PanicIf(err)
nameSize := uint8(0)
err = binary.Read(r, defaultByteOrder, &nameSize)
log.PanicIf(err)
// Add an extra byte if the two length+data size is odd to make the total
// bytes read even.
doAddPadding := (1+nameSize)%2 == 1
if doAddPadding == true {
nameSize++
}
name := make([]byte, nameSize)
_, err = io.ReadFull(r, name)
log.PanicIf(err)
// If the last byte is padding, truncate it.
if doAddPadding == true {
name = name[:nameSize-1]
}
dataSize := uint32(0)
err = binary.Read(r, defaultByteOrder, &dataSize)
log.PanicIf(err)
data := make([]byte, dataSize+dataSize%2)
_, err = io.ReadFull(r, data)
log.PanicIf(err)
data = data[:dataSize]
pir = Photoshop30InfoRecord{
RecordType: string(recordType),
ImageResourceId: irId,
Name: string(name),
Data: data,
}
return pir, nil
}
// ReadPhotoshop30Info parses a sequence of photoship-info records from the stream.
func ReadPhotoshop30Info(r io.Reader) (pirIndex map[uint16]Photoshop30InfoRecord, err error) {
pirIndex = make(map[uint16]Photoshop30InfoRecord)
for {
pir, err := ReadPhotoshop30InfoRecord(r)
if err != nil {
if err == io.EOF {
break
}
log.Panic(err)
}
pirIndex[pir.ImageResourceId] = pir
}
return pirIndex, nil
}

View File

@ -0,0 +1,70 @@
package photoshopinfo
import (
"os"
"path"
"github.com/dsoprea/go-logging"
)
var (
testDataRelFilepath = "photoshop.data"
)
var (
moduleRootPath = ""
assetsPath = ""
)
func GetModuleRootPath() string {
if moduleRootPath == "" {
moduleRootPath = os.Getenv("PHOTOSHOPINFO_MODULE_ROOT_PATH")
if moduleRootPath != "" {
return moduleRootPath
}
currentWd, err := os.Getwd()
log.PanicIf(err)
currentPath := currentWd
visited := make([]string, 0)
for {
tryStampFilepath := path.Join(currentPath, ".MODULE_ROOT")
_, err := os.Stat(tryStampFilepath)
if err != nil && os.IsNotExist(err) != true {
log.Panic(err)
} else if err == nil {
break
}
visited = append(visited, tryStampFilepath)
currentPath = path.Dir(currentPath)
if currentPath == "/" {
log.Panicf("could not find module-root: %v", visited)
}
}
moduleRootPath = currentPath
}
return moduleRootPath
}
func GetTestAssetsPath() string {
if assetsPath == "" {
moduleRootPath := GetModuleRootPath()
assetsPath = path.Join(moduleRootPath, "assets")
}
return assetsPath
}
func GetTestDataFilepath() string {
assetsPath := GetTestAssetsPath()
filepath := path.Join(assetsPath, testDataRelFilepath)
return filepath
}

7
vendor/github.com/dsoprea/go-utility/v2/LICENSE generated vendored Normal file
View File

@ -0,0 +1,7 @@
Copyright 2019 Random Ingenuity InformationWorks
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@ -0,0 +1,64 @@
[![GoDoc](https://godoc.org/github.com/dsoprea/go-utility/filesystem?status.svg)](https://godoc.org/github.com/dsoprea/go-utility/filesystem)
[![Build Status](https://travis-ci.org/dsoprea/go-utility.svg?branch=master)](https://travis-ci.org/dsoprea/go-utility)
[![Coverage Status](https://coveralls.io/repos/github/dsoprea/go-utility/badge.svg?branch=master)](https://coveralls.io/github/dsoprea/go-utility?branch=master)
[![Go Report Card](https://goreportcard.com/badge/github.com/dsoprea/go-utility)](https://goreportcard.com/report/github.com/dsoprea/go-utility)
# bounceback
An `io.ReadSeeker` and `io.WriteSeeker` that returns to the right place before
reading or writing. Useful when the same file resource is being reused for reads
or writes throughout that file.
# list_files
A recursive path walker that supports filters.
# seekable_buffer
A memory structure that satisfies `io.ReadWriteSeeker`.
# copy_bytes_between_positions
Given an `io.ReadWriteSeeker`, copy N bytes from one position to an earlier
position.
# read_counter, write_counter
Wrap `io.Reader` and `io.Writer` structs in order to report how many bytes were
transferred.
# readseekwritecloser
Provides the ReadWriteSeekCloser interface that combines a RWS and a Closer.
Also provides a no-op wrapper to augment a plain RWS with a closer.
# boundedreadwriteseek
Wraps a ReadWriteSeeker such that no seeks can be at an offset less than a
specific-offset.
# calculateseek
Provides a reusable function with which to calculate seek offsets.
# progress_wrapper
Provides `io.Reader` and `io.Writer` wrappers that also trigger callbacks after
each call. The reader wrapper also invokes the callback upon EOF.
# does_exist
Check whether a file/directory exists using a file-path.
# graceful_copy
Do a copy but correctly handle short-writes and reads that might return a non-
zero read count *and* EOF.
# readseeker_to_readerat
A wrapper that allows an `io.ReadSeeker` to be used as a `io.ReaderAt`.
# simplefileinfo
An implementation of `os.FileInfo` to support testing.

View File

@ -0,0 +1,273 @@
package rifs
import (
"fmt"
"io"
"github.com/dsoprea/go-logging"
)
// BouncebackStats describes operation counts.
type BouncebackStats struct {
reads int
writes int
seeks int
syncs int
}
func (bbs BouncebackStats) String() string {
return fmt.Sprintf(
"BouncebackStats<READS=(%d) WRITES=(%d) SEEKS=(%d) SYNCS=(%d)>",
bbs.reads, bbs.writes, bbs.seeks, bbs.syncs)
}
type bouncebackBase struct {
currentPosition int64
stats BouncebackStats
}
// Position returns the position that we're supposed to be at.
func (bb *bouncebackBase) Position() int64 {
// TODO(dustin): Add test
return bb.currentPosition
}
// StatsReads returns the number of reads that have been attempted.
func (bb *bouncebackBase) StatsReads() int {
// TODO(dustin): Add test
return bb.stats.reads
}
// StatsWrites returns the number of write operations.
func (bb *bouncebackBase) StatsWrites() int {
// TODO(dustin): Add test
return bb.stats.writes
}
// StatsSeeks returns the number of seeks.
func (bb *bouncebackBase) StatsSeeks() int {
// TODO(dustin): Add test
return bb.stats.seeks
}
// StatsSyncs returns the number of corrective seeks ("bounce-backs").
func (bb *bouncebackBase) StatsSyncs() int {
// TODO(dustin): Add test
return bb.stats.syncs
}
// Seek does a seek to an arbitrary place in the `io.ReadSeeker`.
func (bb *bouncebackBase) seek(s io.Seeker, offset int64, whence int) (newPosition int64, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
// If the seek is relative, make sure we're where we're supposed to be *first*.
if whence != io.SeekStart {
err = bb.checkPosition(s)
log.PanicIf(err)
}
bb.stats.seeks++
newPosition, err = s.Seek(offset, whence)
log.PanicIf(err)
// Update our internal tracking.
bb.currentPosition = newPosition
return newPosition, nil
}
func (bb *bouncebackBase) checkPosition(s io.Seeker) (err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
// Make sure we're where we're supposed to be.
// This should have no overhead, and enables us to collect stats.
realCurrentPosition, err := s.Seek(0, io.SeekCurrent)
log.PanicIf(err)
if realCurrentPosition != bb.currentPosition {
bb.stats.syncs++
_, err = s.Seek(bb.currentPosition, io.SeekStart)
log.PanicIf(err)
}
return nil
}
// BouncebackReader wraps a ReadSeeker, keeps track of our position, and
// seeks back to it before writing. This allows an underlying ReadWriteSeeker
// with an unstable position can still be used for a prolonged series of writes.
type BouncebackReader struct {
rs io.ReadSeeker
bouncebackBase
}
// NewBouncebackReader returns a `*BouncebackReader` struct.
func NewBouncebackReader(rs io.ReadSeeker) (br *BouncebackReader, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
initialPosition, err := rs.Seek(0, io.SeekCurrent)
log.PanicIf(err)
bb := bouncebackBase{
currentPosition: initialPosition,
}
br = &BouncebackReader{
rs: rs,
bouncebackBase: bb,
}
return br, nil
}
// Seek does a seek to an arbitrary place in the `io.ReadSeeker`.
func (br *BouncebackReader) Seek(offset int64, whence int) (newPosition int64, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
newPosition, err = br.bouncebackBase.seek(br.rs, offset, whence)
log.PanicIf(err)
return newPosition, nil
}
// Seek does a standard read.
func (br *BouncebackReader) Read(p []byte) (n int, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
br.bouncebackBase.stats.reads++
err = br.bouncebackBase.checkPosition(br.rs)
log.PanicIf(err)
// Do read.
n, err = br.rs.Read(p)
if err != nil {
if err == io.EOF {
return 0, io.EOF
}
log.Panic(err)
}
// Update our internal tracking.
br.bouncebackBase.currentPosition += int64(n)
return n, nil
}
// BouncebackWriter wraps a WriteSeeker, keeps track of our position, and
// seeks back to it before writing. This allows an underlying ReadWriteSeeker
// with an unstable position can still be used for a prolonged series of writes.
type BouncebackWriter struct {
ws io.WriteSeeker
bouncebackBase
}
// NewBouncebackWriter returns a new `BouncebackWriter` struct.
func NewBouncebackWriter(ws io.WriteSeeker) (bw *BouncebackWriter, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
initialPosition, err := ws.Seek(0, io.SeekCurrent)
log.PanicIf(err)
bb := bouncebackBase{
currentPosition: initialPosition,
}
bw = &BouncebackWriter{
ws: ws,
bouncebackBase: bb,
}
return bw, nil
}
// Seek puts us at a specific position in the internal writer for the next
// write/seek.
func (bw *BouncebackWriter) Seek(offset int64, whence int) (newPosition int64, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
newPosition, err = bw.bouncebackBase.seek(bw.ws, offset, whence)
log.PanicIf(err)
return newPosition, nil
}
// Write performs a write against the internal `WriteSeeker` starting at the
// position that we're supposed to be at.
func (bw *BouncebackWriter) Write(p []byte) (n int, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
bw.bouncebackBase.stats.writes++
// Make sure we're where we're supposed to be.
realCurrentPosition, err := bw.ws.Seek(0, io.SeekCurrent)
log.PanicIf(err)
if realCurrentPosition != bw.bouncebackBase.currentPosition {
bw.bouncebackBase.stats.seeks++
_, err = bw.ws.Seek(bw.bouncebackBase.currentPosition, io.SeekStart)
log.PanicIf(err)
}
// Do write.
n, err = bw.ws.Write(p)
log.PanicIf(err)
// Update our internal tracking.
bw.bouncebackBase.currentPosition += int64(n)
return n, nil
}

View File

@ -0,0 +1,95 @@
package rifs
import (
"io"
"github.com/dsoprea/go-logging"
)
// BoundedReadWriteSeekCloser wraps a RWS that is also a closer with boundaries.
// This proxies the RWS methods to the inner BRWS inside.
type BoundedReadWriteSeekCloser struct {
io.Closer
*BoundedReadWriteSeeker
}
// NewBoundedReadWriteSeekCloser returns a new BoundedReadWriteSeekCloser.
func NewBoundedReadWriteSeekCloser(rwsc ReadWriteSeekCloser, minimumOffset int64, staticFileSize int64) (brwsc *BoundedReadWriteSeekCloser, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
bs, err := NewBoundedReadWriteSeeker(rwsc, minimumOffset, staticFileSize)
log.PanicIf(err)
brwsc = &BoundedReadWriteSeekCloser{
Closer: rwsc,
BoundedReadWriteSeeker: bs,
}
return brwsc, nil
}
// Seek forwards calls to the inner RWS.
func (rwsc *BoundedReadWriteSeekCloser) Seek(offset int64, whence int) (newOffset int64, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
newOffset, err = rwsc.BoundedReadWriteSeeker.Seek(offset, whence)
log.PanicIf(err)
return newOffset, nil
}
// Read forwards calls to the inner RWS.
func (rwsc *BoundedReadWriteSeekCloser) Read(buffer []byte) (readCount int, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
readCount, err = rwsc.BoundedReadWriteSeeker.Read(buffer)
if err != nil {
if err == io.EOF {
return 0, err
}
log.Panic(err)
}
return readCount, nil
}
// Write forwards calls to the inner RWS.
func (rwsc *BoundedReadWriteSeekCloser) Write(buffer []byte) (writtenCount int, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
writtenCount, err = rwsc.BoundedReadWriteSeeker.Write(buffer)
log.PanicIf(err)
return writtenCount, nil
}
// Close forwards calls to the inner RWS.
func (rwsc *BoundedReadWriteSeekCloser) Close() (err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
err = rwsc.Closer.Close()
log.PanicIf(err)
return nil
}

View File

@ -0,0 +1,156 @@
package rifs
import (
"errors"
"io"
"os"
"github.com/dsoprea/go-logging"
)
var (
// ErrSeekBeyondBound is returned when a seek is requested beyond the
// statically-given file-size. No writes or seeks beyond boundaries are
// supported with a statically-given file size.
ErrSeekBeyondBound = errors.New("seek beyond boundary")
)
// BoundedReadWriteSeeker is a thin filter that ensures that no seeks can be done
// to offsets smaller than the one we were given. This supports libraries that
// might be expecting to read from the front of the stream being used on data
// that is in the middle of a stream instead.
type BoundedReadWriteSeeker struct {
io.ReadWriteSeeker
currentOffset int64
minimumOffset int64
staticFileSize int64
}
// NewBoundedReadWriteSeeker returns a new BoundedReadWriteSeeker instance.
func NewBoundedReadWriteSeeker(rws io.ReadWriteSeeker, minimumOffset int64, staticFileSize int64) (brws *BoundedReadWriteSeeker, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
if minimumOffset < 0 {
log.Panicf("BoundedReadWriteSeeker minimum offset must be zero or larger: (%d)", minimumOffset)
}
// We'll always started at a relative offset of zero.
_, err = rws.Seek(minimumOffset, os.SEEK_SET)
log.PanicIf(err)
brws = &BoundedReadWriteSeeker{
ReadWriteSeeker: rws,
currentOffset: 0,
minimumOffset: minimumOffset,
staticFileSize: staticFileSize,
}
return brws, nil
}
// Seek moves the offset to the given offset. Prevents offset from ever being
// moved left of `brws.minimumOffset`.
func (brws *BoundedReadWriteSeeker) Seek(offset int64, whence int) (updatedOffset int64, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
fileSize := brws.staticFileSize
// If we weren't given a static file-size, look it up whenever it is needed.
if whence == os.SEEK_END && fileSize == 0 {
realFileSizeRaw, err := brws.ReadWriteSeeker.Seek(0, os.SEEK_END)
log.PanicIf(err)
fileSize = realFileSizeRaw - brws.minimumOffset
}
updatedOffset, err = CalculateSeek(brws.currentOffset, offset, whence, fileSize)
log.PanicIf(err)
if brws.staticFileSize != 0 && updatedOffset > brws.staticFileSize {
//updatedOffset = int64(brws.staticFileSize)
// NOTE(dustin): Presumably, this will only be disruptive to writes that are beyond the boundaries, which, if we're being used at all, should already account for the boundary and prevent this error from ever happening. So, time will tell how disruptive this is.
return 0, ErrSeekBeyondBound
}
if updatedOffset != brws.currentOffset {
updatedRealOffset := updatedOffset + brws.minimumOffset
_, err = brws.ReadWriteSeeker.Seek(updatedRealOffset, os.SEEK_SET)
log.PanicIf(err)
brws.currentOffset = updatedOffset
}
return updatedOffset, nil
}
// Read forwards writes to the inner RWS.
func (brws *BoundedReadWriteSeeker) Read(buffer []byte) (readCount int, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
if brws.staticFileSize != 0 {
availableCount := brws.staticFileSize - brws.currentOffset
if availableCount == 0 {
return 0, io.EOF
}
if int64(len(buffer)) > availableCount {
buffer = buffer[:availableCount]
}
}
readCount, err = brws.ReadWriteSeeker.Read(buffer)
brws.currentOffset += int64(readCount)
if err != nil {
if err == io.EOF {
return 0, err
}
log.Panic(err)
}
return readCount, nil
}
// Write forwards writes to the inner RWS.
func (brws *BoundedReadWriteSeeker) Write(buffer []byte) (writtenCount int, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
if brws.staticFileSize != 0 {
log.Panicf("writes can not be performed if a static file-size was given")
}
writtenCount, err = brws.ReadWriteSeeker.Write(buffer)
brws.currentOffset += int64(writtenCount)
log.PanicIf(err)
return writtenCount, nil
}
// MinimumOffset returns the configured minimum-offset.
func (brws *BoundedReadWriteSeeker) MinimumOffset() int64 {
return brws.minimumOffset
}

View File

@ -0,0 +1,52 @@
package rifs
import (
"io"
"os"
"github.com/dsoprea/go-logging"
)
// SeekType is a convenience type to associate the different seek-types with
// printable descriptions.
type SeekType int
// String returns a descriptive string.
func (n SeekType) String() string {
if n == io.SeekCurrent {
return "SEEK-CURRENT"
} else if n == io.SeekEnd {
return "SEEK-END"
} else if n == io.SeekStart {
return "SEEK-START"
}
log.Panicf("unknown seek-type: (%d)", n)
return ""
}
// CalculateSeek calculates an offset in a file-stream given the parameters.
func CalculateSeek(currentOffset int64, delta int64, whence int, fileSize int64) (finalOffset int64, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
finalOffset = 0
}
}()
if whence == os.SEEK_SET {
finalOffset = delta
} else if whence == os.SEEK_CUR {
finalOffset = currentOffset + delta
} else if whence == os.SEEK_END {
finalOffset = fileSize + delta
} else {
log.Panicf("whence not valid: (%d)", whence)
}
if finalOffset < 0 {
finalOffset = 0
}
return finalOffset, nil
}

View File

@ -0,0 +1,15 @@
package rifs
import (
"os"
"path"
)
var (
appPath string
)
func init() {
goPath := os.Getenv("GOPATH")
appPath = path.Join(goPath, "src", "github.com", "dsoprea", "go-utility", "filesystem")
}

View File

@ -0,0 +1,40 @@
package rifs
import (
"io"
"os"
"github.com/dsoprea/go-logging"
)
// CopyBytesBetweenPositions will copy bytes from one position in the given RWS
// to an earlier position in the same RWS.
func CopyBytesBetweenPositions(rws io.ReadWriteSeeker, fromPosition, toPosition int64, count int) (n int, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
if fromPosition <= toPosition {
log.Panicf("from position (%d) must be larger than to position (%d)", fromPosition, toPosition)
}
br, err := NewBouncebackReader(rws)
log.PanicIf(err)
_, err = br.Seek(fromPosition, os.SEEK_SET)
log.PanicIf(err)
bw, err := NewBouncebackWriter(rws)
log.PanicIf(err)
_, err = bw.Seek(toPosition, os.SEEK_SET)
log.PanicIf(err)
written, err := io.CopyN(bw, br, int64(count))
log.PanicIf(err)
n = int(written)
return n, nil
}

View File

@ -0,0 +1,19 @@
package rifs
import (
"os"
)
// DoesExist returns true if we can open the given file/path without error. We
// can't simply use `os.IsNotExist()` because we'll get a different error when
// the parent directory doesn't exist, and really the only important thing is if
// it exists *and* it's readable.
func DoesExist(filepath string) bool {
f, err := os.Open(filepath)
if err != nil {
return false
}
f.Close()
return true
}

View File

@ -0,0 +1,54 @@
package rifs
import (
"fmt"
"io"
)
const (
defaultCopyBufferSize = 1024 * 1024
)
// GracefulCopy willcopy while enduring lesser normal issues.
//
// - We'll ignore EOF if the read byte-count is more than zero. Only an EOF when
// zero bytes were read will terminate the loop.
//
// - Ignore short-writes. If less bytes were written than the bytes that were
// given, we'll keep trying until done.
func GracefulCopy(w io.Writer, r io.Reader, buffer []byte) (copyCount int, err error) {
if buffer == nil {
buffer = make([]byte, defaultCopyBufferSize)
}
for {
readCount, err := r.Read(buffer)
if err != nil {
if err != io.EOF {
err = fmt.Errorf("read error: %s", err.Error())
return 0, err
}
// Only break on EOF if no bytes were actually read.
if readCount == 0 {
break
}
}
writeBuffer := buffer[:readCount]
for len(writeBuffer) > 0 {
writtenCount, err := w.Write(writeBuffer)
if err != nil {
err = fmt.Errorf("write error: %s", err.Error())
return 0, err
}
writeBuffer = writeBuffer[writtenCount:]
}
copyCount += readCount
}
return copyCount, nil
}

View File

@ -0,0 +1,143 @@
package rifs
import (
"io"
"os"
"path"
"github.com/dsoprea/go-logging"
)
// FileListFilterPredicate is the callback predicate used for filtering.
type FileListFilterPredicate func(parent string, child os.FileInfo) (hit bool, err error)
// VisitedFile is one visited file.
type VisitedFile struct {
Filepath string
Info os.FileInfo
Index int
}
// ListFiles feeds a continuous list of files from a recursive folder scan. An
// optional predicate can be provided in order to filter. When done, the
// `filesC` channel is closed. If there's an error, the `errC` channel will
// receive it.
func ListFiles(rootPath string, cb FileListFilterPredicate) (filesC chan VisitedFile, count int, errC chan error) {
defer func() {
if state := recover(); state != nil {
err := log.Wrap(state.(error))
log.Panic(err)
}
}()
// Make sure the path exists.
f, err := os.Open(rootPath)
log.PanicIf(err)
f.Close()
// Do our thing.
filesC = make(chan VisitedFile, 100)
errC = make(chan error, 1)
index := 0
go func() {
defer func() {
if state := recover(); state != nil {
err := log.Wrap(state.(error))
errC <- err
}
}()
queue := []string{rootPath}
for len(queue) > 0 {
// Pop the next folder to process off the queue.
var thisPath string
thisPath, queue = queue[0], queue[1:]
// Skip path if a symlink.
fi, err := os.Lstat(thisPath)
log.PanicIf(err)
if (fi.Mode() & os.ModeSymlink) > 0 {
continue
}
// Read information.
folderF, err := os.Open(thisPath)
if err != nil {
errC <- log.Wrap(err)
return
}
// Iterate through children.
for {
children, err := folderF.Readdir(1000)
if err == io.EOF {
break
} else if err != nil {
errC <- log.Wrap(err)
return
}
for _, child := range children {
filepath := path.Join(thisPath, child.Name())
// Skip if a file symlink.
fi, err := os.Lstat(filepath)
log.PanicIf(err)
if (fi.Mode() & os.ModeSymlink) > 0 {
continue
}
// If a predicate was given, determine if this child will be
// left behind.
if cb != nil {
hit, err := cb(thisPath, child)
if err != nil {
errC <- log.Wrap(err)
return
}
if hit == false {
continue
}
}
index++
// Push file to channel.
vf := VisitedFile{
Filepath: filepath,
Info: child,
Index: index,
}
filesC <- vf
// If a folder, queue for later processing.
if child.IsDir() == true {
queue = append(queue, filepath)
}
}
}
folderF.Close()
}
close(filesC)
close(errC)
}()
return filesC, index, errC
}

View File

@ -0,0 +1,93 @@
package rifs
import (
"io"
"time"
"github.com/dsoprea/go-logging"
)
// ProgressFunc receives progress updates.
type ProgressFunc func(n int, duration time.Duration, isEof bool) error
// WriteProgressWrapper wraps a reader and calls a callback after each read with
// count and duration info.
type WriteProgressWrapper struct {
w io.Writer
progressCb ProgressFunc
}
// NewWriteProgressWrapper returns a new WPW instance.
func NewWriteProgressWrapper(w io.Writer, progressCb ProgressFunc) io.Writer {
return &WriteProgressWrapper{
w: w,
progressCb: progressCb,
}
}
// Write does a write and calls the callback.
func (wpw *WriteProgressWrapper) Write(buffer []byte) (n int, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
startAt := time.Now()
n, err = wpw.w.Write(buffer)
log.PanicIf(err)
duration := time.Since(startAt)
err = wpw.progressCb(n, duration, false)
log.PanicIf(err)
return n, nil
}
// ReadProgressWrapper wraps a reader and calls a callback after each read with
// count and duration info.
type ReadProgressWrapper struct {
r io.Reader
progressCb ProgressFunc
}
// NewReadProgressWrapper returns a new RPW instance.
func NewReadProgressWrapper(r io.Reader, progressCb ProgressFunc) io.Reader {
return &ReadProgressWrapper{
r: r,
progressCb: progressCb,
}
}
// Read reads data and calls the callback.
func (rpw *ReadProgressWrapper) Read(buffer []byte) (n int, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
startAt := time.Now()
n, err = rpw.r.Read(buffer)
duration := time.Since(startAt)
if err != nil {
if err == io.EOF {
errInner := rpw.progressCb(n, duration, true)
log.PanicIf(errInner)
return n, err
}
log.Panic(err)
}
err = rpw.progressCb(n, duration, false)
log.PanicIf(err)
return n, nil
}

View File

@ -0,0 +1,36 @@
package rifs
import (
"io"
)
// ReadCounter proxies read requests and maintains a counter of bytes read.
type ReadCounter struct {
r io.Reader
counter int
}
// NewReadCounter returns a new `ReadCounter` struct wrapping a `Reader`.
func NewReadCounter(r io.Reader) *ReadCounter {
return &ReadCounter{
r: r,
}
}
// Count returns the total number of bytes read.
func (rc *ReadCounter) Count() int {
return rc.counter
}
// Reset resets the counter to zero.
func (rc *ReadCounter) Reset() {
rc.counter = 0
}
// Read forwards a read to the underlying `Reader` while bumping the counter.
func (rc *ReadCounter) Read(b []byte) (n int, err error) {
n, err = rc.r.Read(b)
rc.counter += n
return n, err
}

View File

@ -0,0 +1,63 @@
package rifs
import (
"io"
"github.com/dsoprea/go-logging"
)
// ReadSeekerToReaderAt is a wrapper that allows a ReadSeeker to masquerade as a
// ReaderAt.
type ReadSeekerToReaderAt struct {
rs io.ReadSeeker
}
// NewReadSeekerToReaderAt returns a new ReadSeekerToReaderAt instance.
func NewReadSeekerToReaderAt(rs io.ReadSeeker) *ReadSeekerToReaderAt {
return &ReadSeekerToReaderAt{
rs: rs,
}
}
// ReadAt is a wrapper that satisfies the ReaderAt interface.
//
// Note that a requirement of ReadAt is that it doesn't have an effect on the
// offset in the underlying resource as well as that concurrent calls can be
// made to it. Since we're capturing the current offset in the underlying
// resource and then seeking back to it before returning, it is the
// responsibility of the caller to serialize (i.e. use a mutex with) these
// requests in order to eliminate race-conditions in the parallel-usage
// scenario.
//
// Note also that, since ReadAt() is going to be called on a particular
// instance, that instance is going to internalize a file resource, that file-
// resource is provided by the OS, and [most] OSs are only gonna support one
// file-position per resource, locking is already going to be a necessary
// internal semantic of a ReaderAt implementation.
func (rstra *ReadSeekerToReaderAt) ReadAt(p []byte, offset int64) (n int, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
originalOffset, err := rstra.rs.Seek(0, io.SeekCurrent)
log.PanicIf(err)
defer func() {
_, err := rstra.rs.Seek(originalOffset, io.SeekStart)
log.PanicIf(err)
}()
_, err = rstra.rs.Seek(offset, io.SeekStart)
log.PanicIf(err)
// Note that all errors will be wrapped, here. The usage of this method is
// such that typically no specific errors would be expected as part of
// normal operation (in which case we'd check for those first and return
// them directly).
n, err = io.ReadFull(rstra.rs, p)
log.PanicIf(err)
return n, nil
}

View File

@ -0,0 +1,29 @@
package rifs
import (
"io"
)
// ReadWriteSeekCloser satisfies `io.ReadWriteSeeker` and `io.Closer`
// interfaces.
type ReadWriteSeekCloser interface {
io.ReadWriteSeeker
io.Closer
}
type readWriteSeekNoopCloser struct {
io.ReadWriteSeeker
}
// ReadWriteSeekNoopCloser wraps a `io.ReadWriteSeeker` with a no-op Close()
// call.
func ReadWriteSeekNoopCloser(rws io.ReadWriteSeeker) ReadWriteSeekCloser {
return readWriteSeekNoopCloser{
ReadWriteSeeker: rws,
}
}
// Close does nothing but allows the RWS to satisfy `io.Closer`.:wq
func (readWriteSeekNoopCloser) Close() (err error) {
return nil
}

View File

@ -0,0 +1,146 @@
package rifs
import (
"io"
"os"
"github.com/dsoprea/go-logging"
)
// SeekableBuffer is a simple memory structure that satisfies
// `io.ReadWriteSeeker`.
type SeekableBuffer struct {
data []byte
position int64
}
// NewSeekableBuffer is a factory that returns a `*SeekableBuffer`.
func NewSeekableBuffer() *SeekableBuffer {
data := make([]byte, 0)
return &SeekableBuffer{
data: data,
}
}
// NewSeekableBufferWithBytes is a factory that returns a `*SeekableBuffer`.
func NewSeekableBufferWithBytes(originalData []byte) *SeekableBuffer {
data := make([]byte, len(originalData))
copy(data, originalData)
return &SeekableBuffer{
data: data,
}
}
func len64(data []byte) int64 {
return int64(len(data))
}
// Bytes returns the underlying slice.
func (sb *SeekableBuffer) Bytes() []byte {
return sb.data
}
// Len returns the number of bytes currently stored.
func (sb *SeekableBuffer) Len() int {
return len(sb.data)
}
// Write does a standard write to the internal slice.
func (sb *SeekableBuffer) Write(p []byte) (n int, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
// The current position we're already at is past the end of the data we
// actually have. Extend our buffer up to our current position.
if sb.position > len64(sb.data) {
extra := make([]byte, sb.position-len64(sb.data))
sb.data = append(sb.data, extra...)
}
positionFromEnd := len64(sb.data) - sb.position
tailCount := positionFromEnd - len64(p)
var tailBytes []byte
if tailCount > 0 {
tailBytes = sb.data[len64(sb.data)-tailCount:]
sb.data = append(sb.data[:sb.position], p...)
} else {
sb.data = append(sb.data[:sb.position], p...)
}
if tailBytes != nil {
sb.data = append(sb.data, tailBytes...)
}
dataSize := len64(p)
sb.position += dataSize
return int(dataSize), nil
}
// Read does a standard read against the internal slice.
func (sb *SeekableBuffer) Read(p []byte) (n int, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
if sb.position >= len64(sb.data) {
return 0, io.EOF
}
n = copy(p, sb.data[sb.position:])
sb.position += int64(n)
return n, nil
}
// Truncate either chops or extends the internal buffer.
func (sb *SeekableBuffer) Truncate(size int64) (err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
sizeInt := int(size)
if sizeInt < len(sb.data)-1 {
sb.data = sb.data[:sizeInt]
} else {
new := make([]byte, sizeInt-len(sb.data))
sb.data = append(sb.data, new...)
}
return nil
}
// Seek does a standard seek on the internal slice.
func (sb *SeekableBuffer) Seek(offset int64, whence int) (n int64, err error) {
defer func() {
if state := recover(); state != nil {
err = log.Wrap(state.(error))
}
}()
if whence == os.SEEK_SET {
sb.position = offset
} else if whence == os.SEEK_END {
sb.position = len64(sb.data) + offset
} else if whence == os.SEEK_CUR {
sb.position += offset
} else {
log.Panicf("seek whence is not valid: (%d)", whence)
}
if sb.position < 0 {
sb.position = 0
}
return sb.position, nil
}

View File

@ -0,0 +1,69 @@
package rifs
import (
"os"
"time"
)
// SimpleFileInfo is a simple `os.FileInfo` implementation useful for testing
// with the bare minimum.
type SimpleFileInfo struct {
filename string
isDir bool
size int64
mode os.FileMode
modTime time.Time
}
// NewSimpleFileInfoWithFile returns a new file-specific SimpleFileInfo.
func NewSimpleFileInfoWithFile(filename string, size int64, mode os.FileMode, modTime time.Time) *SimpleFileInfo {
return &SimpleFileInfo{
filename: filename,
isDir: false,
size: size,
mode: mode,
modTime: modTime,
}
}
// NewSimpleFileInfoWithDirectory returns a new directory-specific
// SimpleFileInfo.
func NewSimpleFileInfoWithDirectory(filename string, modTime time.Time) *SimpleFileInfo {
return &SimpleFileInfo{
filename: filename,
isDir: true,
mode: os.ModeDir,
modTime: modTime,
}
}
// Name returns the base name of the file.
func (sfi *SimpleFileInfo) Name() string {
return sfi.filename
}
// Size returns the length in bytes for regular files; system-dependent for
// others.
func (sfi *SimpleFileInfo) Size() int64 {
return sfi.size
}
// Mode returns the file mode bits.
func (sfi *SimpleFileInfo) Mode() os.FileMode {
return sfi.mode
}
// ModTime returns the modification time.
func (sfi *SimpleFileInfo) ModTime() time.Time {
return sfi.modTime
}
// IsDir returns true if a directory.
func (sfi *SimpleFileInfo) IsDir() bool {
return sfi.isDir
}
// Sys returns internal state.
func (sfi *SimpleFileInfo) Sys() interface{} {
return nil
}

View File

@ -0,0 +1,17 @@
package rifs
import (
"io"
"os"
"github.com/dsoprea/go-logging"
)
// GetOffset returns the current offset of the Seeker and just panics if unable
// to find it.
func GetOffset(s io.Seeker) int64 {
offsetRaw, err := s.Seek(0, os.SEEK_CUR)
log.PanicIf(err)
return offsetRaw
}

View File

@ -0,0 +1,36 @@
package rifs
import (
"io"
)
// WriteCounter proxies write requests and maintains a counter of bytes written.
type WriteCounter struct {
w io.Writer
counter int
}
// NewWriteCounter returns a new `WriteCounter` struct wrapping a `Writer`.
func NewWriteCounter(w io.Writer) *WriteCounter {
return &WriteCounter{
w: w,
}
}
// Count returns the total number of bytes read.
func (wc *WriteCounter) Count() int {
return wc.counter
}
// Reset resets the counter to zero.
func (wc *WriteCounter) Reset() {
wc.counter = 0
}
// Write forwards a write to the underlying `Writer` while bumping the counter.
func (wc *WriteCounter) Write(b []byte) (n int, err error) {
n, err = wc.w.Write(b)
wc.counter += n
return n, err
}

View File

@ -0,0 +1,9 @@
[![GoDoc](https://godoc.org/github.com/dsoprea/go-utility/image?status.svg)](https://godoc.org/github.com/dsoprea/go-utility/image)
[![Build Status](https://travis-ci.org/dsoprea/go-utility.svg?branch=master)](https://travis-ci.org/dsoprea/go-utility)
[![Coverage Status](https://coveralls.io/repos/github/dsoprea/go-utility/badge.svg?branch=master)](https://coveralls.io/github/dsoprea/go-utility?branch=master)
[![Go Report Card](https://goreportcard.com/badge/github.com/dsoprea/go-utility)](https://goreportcard.com/report/github.com/dsoprea/go-utility)
# media_parser_type
Common image-parsing interfaces. Used for JPEG, PNG, and HEIC parsers used by
go-exif-knife.

View File

@ -0,0 +1,34 @@
package riimage
import (
"io"
"github.com/dsoprea/go-exif/v3"
)
// MediaContext is an accessor that knows how to extract specific metadata from
// the media.
type MediaContext interface {
// Exif returns the EXIF's root IFD.
Exif() (rootIfd *exif.Ifd, data []byte, err error)
}
// MediaParser prescribes a specific structure for the parser types that are
// imported from other projects. We don't use it directly, but we use this to
// impose structure.
type MediaParser interface {
// Parse parses a stream using an `io.ReadSeeker`. `mc` should *actually* be
// a `ExifContext`.
Parse(r io.ReadSeeker, size int) (mc MediaContext, err error)
// ParseFile parses a stream using a file. `mc` should *actually* be a
// `ExifContext`.
ParseFile(filepath string) (mc MediaContext, err error)
// ParseBytes parses a stream direct from bytes. `mc` should *actually* be
// a `ExifContext`.
ParseBytes(data []byte) (mc MediaContext, err error)
// Parses the data to determine if it's a compatible format.
LooksLikeFormat(data []byte) bool
}

7
vendor/github.com/go-errors/errors/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,7 @@
language: go
go:
- "1.8.x"
- "1.10.x"
- "1.13.x"
- "1.14.x"

7
vendor/github.com/go-errors/errors/LICENSE.MIT generated vendored Normal file
View File

@ -0,0 +1,7 @@
Copyright (c) 2015 Conrad Irwin <conrad@bugsnag.com>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

69
vendor/github.com/go-errors/errors/README.md generated vendored Normal file
View File

@ -0,0 +1,69 @@
go-errors/errors
================
[![Build Status](https://travis-ci.org/go-errors/errors.svg?branch=master)](https://travis-ci.org/go-errors/errors)
Package errors adds stacktrace support to errors in go.
This is particularly useful when you want to understand the state of execution
when an error was returned unexpectedly.
It provides the type \*Error which implements the standard golang error
interface, so you can use this library interchangably with code that is
expecting a normal error return.
Usage
-----
Full documentation is available on
[godoc](https://godoc.org/github.com/go-errors/errors), but here's a simple
example:
```go
package crashy
import "github.com/go-errors/errors"
var Crashed = errors.Errorf("oh dear")
func Crash() error {
return errors.New(Crashed)
}
```
This can be called as follows:
```go
package main
import (
"crashy"
"fmt"
"github.com/go-errors/errors"
)
func main() {
err := crashy.Crash()
if err != nil {
if errors.Is(err, crashy.Crashed) {
fmt.Println(err.(*errors.Error).ErrorStack())
} else {
panic(err)
}
}
}
```
Meta-fu
-------
This package was original written to allow reporting to
[Bugsnag](https://bugsnag.com/) from
[bugsnag-go](https://github.com/bugsnag/bugsnag-go), but after I found similar
packages by Facebook and Dropbox, it was moved to one canonical location so
everyone can benefit.
This package is licensed under the MIT license, see LICENSE.MIT for details.
## Changelog
* v1.1.0 updated to use go1.13's standard-library errors.Is method instead of == in errors.Is

89
vendor/github.com/go-errors/errors/cover.out generated vendored Normal file
View File

@ -0,0 +1,89 @@
mode: set
github.com/go-errors/errors/stackframe.go:27.51,30.25 2 1
github.com/go-errors/errors/stackframe.go:33.2,38.8 3 1
github.com/go-errors/errors/stackframe.go:30.25,32.3 1 0
github.com/go-errors/errors/stackframe.go:43.47,44.31 1 1
github.com/go-errors/errors/stackframe.go:47.2,47.48 1 1
github.com/go-errors/errors/stackframe.go:44.31,46.3 1 1
github.com/go-errors/errors/stackframe.go:52.42,56.16 3 1
github.com/go-errors/errors/stackframe.go:60.2,60.60 1 1
github.com/go-errors/errors/stackframe.go:56.16,58.3 1 0
github.com/go-errors/errors/stackframe.go:64.55,67.16 2 1
github.com/go-errors/errors/stackframe.go:71.2,72.61 2 1
github.com/go-errors/errors/stackframe.go:76.2,76.66 1 1
github.com/go-errors/errors/stackframe.go:67.16,69.3 1 0
github.com/go-errors/errors/stackframe.go:72.61,74.3 1 0
github.com/go-errors/errors/stackframe.go:79.56,91.63 3 1
github.com/go-errors/errors/stackframe.go:95.2,95.53 1 1
github.com/go-errors/errors/stackframe.go:100.2,101.18 2 1
github.com/go-errors/errors/stackframe.go:91.63,94.3 2 1
github.com/go-errors/errors/stackframe.go:95.53,98.3 2 1
github.com/go-errors/errors/error.go:70.32,73.23 2 1
github.com/go-errors/errors/error.go:80.2,85.3 3 1
github.com/go-errors/errors/error.go:74.2,75.10 1 1
github.com/go-errors/errors/error.go:76.2,77.28 1 1
github.com/go-errors/errors/error.go:92.43,95.23 2 1
github.com/go-errors/errors/error.go:104.2,109.3 3 1
github.com/go-errors/errors/error.go:96.2,97.11 1 1
github.com/go-errors/errors/error.go:98.2,99.10 1 1
github.com/go-errors/errors/error.go:100.2,101.28 1 1
github.com/go-errors/errors/error.go:115.39,117.19 1 1
github.com/go-errors/errors/error.go:121.2,121.29 1 1
github.com/go-errors/errors/error.go:125.2,125.43 1 1
github.com/go-errors/errors/error.go:129.2,129.14 1 1
github.com/go-errors/errors/error.go:117.19,119.3 1 1
github.com/go-errors/errors/error.go:121.29,123.3 1 1
github.com/go-errors/errors/error.go:125.43,127.3 1 1
github.com/go-errors/errors/error.go:135.53,137.2 1 1
github.com/go-errors/errors/error.go:140.34,142.2 1 1
github.com/go-errors/errors/error.go:146.34,149.42 2 1
github.com/go-errors/errors/error.go:153.2,153.20 1 1
github.com/go-errors/errors/error.go:149.42,151.3 1 1
github.com/go-errors/errors/error.go:158.39,160.2 1 1
github.com/go-errors/errors/error.go:164.46,165.23 1 1
github.com/go-errors/errors/error.go:173.2,173.19 1 1
github.com/go-errors/errors/error.go:165.23,168.32 2 1
github.com/go-errors/errors/error.go:168.32,170.4 1 1
github.com/go-errors/errors/error.go:177.37,178.42 1 1
github.com/go-errors/errors/error.go:181.2,181.41 1 1
github.com/go-errors/errors/error.go:178.42,180.3 1 1
github.com/go-errors/errors/parse_panic.go:10.39,12.2 1 1
github.com/go-errors/errors/parse_panic.go:16.46,24.34 5 1
github.com/go-errors/errors/parse_panic.go:70.2,70.43 1 1
github.com/go-errors/errors/parse_panic.go:73.2,73.55 1 0
github.com/go-errors/errors/parse_panic.go:24.34,27.23 2 1
github.com/go-errors/errors/parse_panic.go:27.23,28.42 1 1
github.com/go-errors/errors/parse_panic.go:28.42,31.5 2 1
github.com/go-errors/errors/parse_panic.go:31.6,33.5 1 0
github.com/go-errors/errors/parse_panic.go:35.5,35.29 1 1
github.com/go-errors/errors/parse_panic.go:35.29,36.86 1 1
github.com/go-errors/errors/parse_panic.go:36.86,38.5 1 1
github.com/go-errors/errors/parse_panic.go:40.5,40.32 1 1
github.com/go-errors/errors/parse_panic.go:40.32,41.18 1 1
github.com/go-errors/errors/parse_panic.go:45.4,46.46 2 1
github.com/go-errors/errors/parse_panic.go:51.4,53.23 2 1
github.com/go-errors/errors/parse_panic.go:57.4,58.18 2 1
github.com/go-errors/errors/parse_panic.go:62.4,63.17 2 1
github.com/go-errors/errors/parse_panic.go:41.18,43.10 2 1
github.com/go-errors/errors/parse_panic.go:46.46,49.5 2 1
github.com/go-errors/errors/parse_panic.go:53.23,55.5 1 0
github.com/go-errors/errors/parse_panic.go:58.18,60.5 1 0
github.com/go-errors/errors/parse_panic.go:63.17,65.10 2 1
github.com/go-errors/errors/parse_panic.go:70.43,72.3 1 1
github.com/go-errors/errors/parse_panic.go:80.85,82.29 2 1
github.com/go-errors/errors/parse_panic.go:85.2,85.15 1 1
github.com/go-errors/errors/parse_panic.go:88.2,90.63 2 1
github.com/go-errors/errors/parse_panic.go:94.2,94.53 1 1
github.com/go-errors/errors/parse_panic.go:99.2,101.36 2 1
github.com/go-errors/errors/parse_panic.go:105.2,106.15 2 1
github.com/go-errors/errors/parse_panic.go:109.2,112.49 3 1
github.com/go-errors/errors/parse_panic.go:116.2,117.16 2 1
github.com/go-errors/errors/parse_panic.go:121.2,126.8 1 1
github.com/go-errors/errors/parse_panic.go:82.29,84.3 1 0
github.com/go-errors/errors/parse_panic.go:85.15,87.3 1 1
github.com/go-errors/errors/parse_panic.go:90.63,93.3 2 1
github.com/go-errors/errors/parse_panic.go:94.53,97.3 2 1
github.com/go-errors/errors/parse_panic.go:101.36,103.3 1 0
github.com/go-errors/errors/parse_panic.go:106.15,108.3 1 0
github.com/go-errors/errors/parse_panic.go:112.49,114.3 1 1
github.com/go-errors/errors/parse_panic.go:117.16,119.3 1 0

205
vendor/github.com/go-errors/errors/error.go generated vendored Normal file
View File

@ -0,0 +1,205 @@
// Package errors provides errors that have stack-traces.
//
// This is particularly useful when you want to understand the
// state of execution when an error was returned unexpectedly.
//
// It provides the type *Error which implements the standard
// golang error interface, so you can use this library interchangably
// with code that is expecting a normal error return.
//
// For example:
//
// package crashy
//
// import "github.com/go-errors/errors"
//
// var Crashed = errors.Errorf("oh dear")
//
// func Crash() error {
// return errors.New(Crashed)
// }
//
// This can be called as follows:
//
// package main
//
// import (
// "crashy"
// "fmt"
// "github.com/go-errors/errors"
// )
//
// func main() {
// err := crashy.Crash()
// if err != nil {
// if errors.Is(err, crashy.Crashed) {
// fmt.Println(err.(*errors.Error).ErrorStack())
// } else {
// panic(err)
// }
// }
// }
//
// This package was original written to allow reporting to Bugsnag,
// but after I found similar packages by Facebook and Dropbox, it
// was moved to one canonical location so everyone can benefit.
package errors
import (
"bytes"
"fmt"
"reflect"
"runtime"
)
// The maximum number of stackframes on any error.
var MaxStackDepth = 50
// Error is an error with an attached stacktrace. It can be used
// wherever the builtin error interface is expected.
type Error struct {
Err error
stack []uintptr
frames []StackFrame
prefix string
}
// New makes an Error from the given value. If that value is already an
// error then it will be used directly, if not, it will be passed to
// fmt.Errorf("%v"). The stacktrace will point to the line of code that
// called New.
func New(e interface{}) *Error {
var err error
switch e := e.(type) {
case error:
err = e
default:
err = fmt.Errorf("%v", e)
}
stack := make([]uintptr, MaxStackDepth)
length := runtime.Callers(2, stack[:])
return &Error{
Err: err,
stack: stack[:length],
}
}
// Wrap makes an Error from the given value. If that value is already an
// error then it will be used directly, if not, it will be passed to
// fmt.Errorf("%v"). The skip parameter indicates how far up the stack
// to start the stacktrace. 0 is from the current call, 1 from its caller, etc.
func Wrap(e interface{}, skip int) *Error {
if e == nil {
return nil
}
var err error
switch e := e.(type) {
case *Error:
return e
case error:
err = e
default:
err = fmt.Errorf("%v", e)
}
stack := make([]uintptr, MaxStackDepth)
length := runtime.Callers(2+skip, stack[:])
return &Error{
Err: err,
stack: stack[:length],
}
}
// WrapPrefix makes an Error from the given value. If that value is already an
// error then it will be used directly, if not, it will be passed to
// fmt.Errorf("%v"). The prefix parameter is used to add a prefix to the
// error message when calling Error(). The skip parameter indicates how far
// up the stack to start the stacktrace. 0 is from the current call,
// 1 from its caller, etc.
func WrapPrefix(e interface{}, prefix string, skip int) *Error {
if e == nil {
return nil
}
err := Wrap(e, 1+skip)
if err.prefix != "" {
prefix = fmt.Sprintf("%s: %s", prefix, err.prefix)
}
return &Error{
Err: err.Err,
stack: err.stack,
prefix: prefix,
}
}
// Errorf creates a new error with the given message. You can use it
// as a drop-in replacement for fmt.Errorf() to provide descriptive
// errors in return values.
func Errorf(format string, a ...interface{}) *Error {
return Wrap(fmt.Errorf(format, a...), 1)
}
// Error returns the underlying error's message.
func (err *Error) Error() string {
msg := err.Err.Error()
if err.prefix != "" {
msg = fmt.Sprintf("%s: %s", err.prefix, msg)
}
return msg
}
// Stack returns the callstack formatted the same way that go does
// in runtime/debug.Stack()
func (err *Error) Stack() []byte {
buf := bytes.Buffer{}
for _, frame := range err.StackFrames() {
buf.WriteString(frame.String())
}
return buf.Bytes()
}
// Callers satisfies the bugsnag ErrorWithCallerS() interface
// so that the stack can be read out.
func (err *Error) Callers() []uintptr {
return err.stack
}
// ErrorStack returns a string that contains both the
// error message and the callstack.
func (err *Error) ErrorStack() string {
return err.TypeName() + " " + err.Error() + "\n" + string(err.Stack())
}
// StackFrames returns an array of frames containing information about the
// stack.
func (err *Error) StackFrames() []StackFrame {
if err.frames == nil {
err.frames = make([]StackFrame, len(err.stack))
for i, pc := range err.stack {
err.frames[i] = NewStackFrame(pc)
}
}
return err.frames
}
// TypeName returns the type this error. e.g. *errors.stringError.
func (err *Error) TypeName() string {
if _, ok := err.Err.(uncaughtPanic); ok {
return "panic"
}
return reflect.TypeOf(err.Err).String()
}

26
vendor/github.com/go-errors/errors/error_1_13.go generated vendored Normal file
View File

@ -0,0 +1,26 @@
// +build go1.13
package errors
import (
baseErrors "errors"
)
// Is detects whether the error is equal to a given error. Errors
// are considered equal by this function if they are matched by errors.Is
// or if their contained errors are matched through errors.Is
func Is(e error, original error) bool {
if baseErrors.Is(e, original) {
return true
}
if e, ok := e.(*Error); ok {
return Is(e.Err, original)
}
if original, ok := original.(*Error); ok {
return Is(e, original.Err)
}
return false
}

22
vendor/github.com/go-errors/errors/error_backward.go generated vendored Normal file
View File

@ -0,0 +1,22 @@
// +build !go1.13
package errors
// Is detects whether the error is equal to a given error. Errors
// are considered equal by this function if they are the same object,
// or if they both contain the same error inside an errors.Error.
func Is(e error, original error) bool {
if e == original {
return true
}
if e, ok := e.(*Error); ok {
return Is(e.Err, original)
}
if original, ok := original.(*Error); ok {
return Is(e, original.Err)
}
return false
}

127
vendor/github.com/go-errors/errors/parse_panic.go generated vendored Normal file
View File

@ -0,0 +1,127 @@
package errors
import (
"strconv"
"strings"
)
type uncaughtPanic struct{ message string }
func (p uncaughtPanic) Error() string {
return p.message
}
// ParsePanic allows you to get an error object from the output of a go program
// that panicked. This is particularly useful with https://github.com/mitchellh/panicwrap.
func ParsePanic(text string) (*Error, error) {
lines := strings.Split(text, "\n")
state := "start"
var message string
var stack []StackFrame
for i := 0; i < len(lines); i++ {
line := lines[i]
if state == "start" {
if strings.HasPrefix(line, "panic: ") {
message = strings.TrimPrefix(line, "panic: ")
state = "seek"
} else {
return nil, Errorf("bugsnag.panicParser: Invalid line (no prefix): %s", line)
}
} else if state == "seek" {
if strings.HasPrefix(line, "goroutine ") && strings.HasSuffix(line, "[running]:") {
state = "parsing"
}
} else if state == "parsing" {
if line == "" {
state = "done"
break
}
createdBy := false
if strings.HasPrefix(line, "created by ") {
line = strings.TrimPrefix(line, "created by ")
createdBy = true
}
i++
if i >= len(lines) {
return nil, Errorf("bugsnag.panicParser: Invalid line (unpaired): %s", line)
}
frame, err := parsePanicFrame(line, lines[i], createdBy)
if err != nil {
return nil, err
}
stack = append(stack, *frame)
if createdBy {
state = "done"
break
}
}
}
if state == "done" || state == "parsing" {
return &Error{Err: uncaughtPanic{message}, frames: stack}, nil
}
return nil, Errorf("could not parse panic: %v", text)
}
// The lines we're passing look like this:
//
// main.(*foo).destruct(0xc208067e98)
// /0/go/src/github.com/bugsnag/bugsnag-go/pan/main.go:22 +0x151
func parsePanicFrame(name string, line string, createdBy bool) (*StackFrame, error) {
idx := strings.LastIndex(name, "(")
if idx == -1 && !createdBy {
return nil, Errorf("bugsnag.panicParser: Invalid line (no call): %s", name)
}
if idx != -1 {
name = name[:idx]
}
pkg := ""
if lastslash := strings.LastIndex(name, "/"); lastslash >= 0 {
pkg += name[:lastslash] + "/"
name = name[lastslash+1:]
}
if period := strings.Index(name, "."); period >= 0 {
pkg += name[:period]
name = name[period+1:]
}
name = strings.Replace(name, "·", ".", -1)
if !strings.HasPrefix(line, "\t") {
return nil, Errorf("bugsnag.panicParser: Invalid line (no tab): %s", line)
}
idx = strings.LastIndex(line, ":")
if idx == -1 {
return nil, Errorf("bugsnag.panicParser: Invalid line (no line number): %s", line)
}
file := line[1:idx]
number := line[idx+1:]
if idx = strings.Index(number, " +"); idx > -1 {
number = number[:idx]
}
lno, err := strconv.ParseInt(number, 10, 32)
if err != nil {
return nil, Errorf("bugsnag.panicParser: Invalid line (bad line number): %s", line)
}
return &StackFrame{
File: file,
LineNumber: int(lno),
Package: pkg,
Name: name,
}, nil
}

114
vendor/github.com/go-errors/errors/stackframe.go generated vendored Normal file
View File

@ -0,0 +1,114 @@
package errors
import (
"bufio"
"bytes"
"fmt"
"os"
"runtime"
"strings"
)
// A StackFrame contains all necessary information about to generate a line
// in a callstack.
type StackFrame struct {
// The path to the file containing this ProgramCounter
File string
// The LineNumber in that file
LineNumber int
// The Name of the function that contains this ProgramCounter
Name string
// The Package that contains this function
Package string
// The underlying ProgramCounter
ProgramCounter uintptr
}
// NewStackFrame popoulates a stack frame object from the program counter.
func NewStackFrame(pc uintptr) (frame StackFrame) {
frame = StackFrame{ProgramCounter: pc}
if frame.Func() == nil {
return
}
frame.Package, frame.Name = packageAndName(frame.Func())
// pc -1 because the program counters we use are usually return addresses,
// and we want to show the line that corresponds to the function call
frame.File, frame.LineNumber = frame.Func().FileLine(pc - 1)
return
}
// Func returns the function that contained this frame.
func (frame *StackFrame) Func() *runtime.Func {
if frame.ProgramCounter == 0 {
return nil
}
return runtime.FuncForPC(frame.ProgramCounter)
}
// String returns the stackframe formatted in the same way as go does
// in runtime/debug.Stack()
func (frame *StackFrame) String() string {
str := fmt.Sprintf("%s:%d (0x%x)\n", frame.File, frame.LineNumber, frame.ProgramCounter)
source, err := frame.SourceLine()
if err != nil {
return str
}
return str + fmt.Sprintf("\t%s: %s\n", frame.Name, source)
}
// SourceLine gets the line of code (from File and Line) of the original source if possible.
func (frame *StackFrame) SourceLine() (string, error) {
if frame.LineNumber <= 0 {
return "???", nil
}
file, err := os.Open(frame.File)
if err != nil {
return "", New(err)
}
defer file.Close()
scanner := bufio.NewScanner(file)
currentLine := 1
for scanner.Scan() {
if currentLine == frame.LineNumber {
return string(bytes.Trim(scanner.Bytes(), " \t")), nil
}
currentLine++
}
if err := scanner.Err(); err != nil {
return "", New(err)
}
return "???", nil
}
func packageAndName(fn *runtime.Func) (string, string) {
name := fn.Name()
pkg := ""
// The name includes the path name to the package, which is unnecessary
// since the file name is already included. Plus, it has center dots.
// That is, we see
// runtime/debug.*T·ptrmethod
// and want
// *T.ptrmethod
// Since the package path might contains dots (e.g. code.google.com/...),
// we first remove the path prefix if there is one.
if lastslash := strings.LastIndex(name, "/"); lastslash >= 0 {
pkg += name[:lastslash] + "/"
name = name[lastslash+1:]
}
if period := strings.Index(name, "."); period >= 0 {
pkg += name[:period]
name = name[period+1:]
}
name = strings.Replace(name, "·", ".", -1)
return pkg, name
}

21
vendor/github.com/go-xmlfmt/xmlfmt/LICENSE generated vendored Normal file
View File

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2016 go-xmlfmt
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

178
vendor/github.com/go-xmlfmt/xmlfmt/README.md generated vendored Normal file
View File

@ -0,0 +1,178 @@
# Go XML Formatter
[![MIT License](http://img.shields.io/badge/License-MIT-blue.svg)](LICENSE)
[![Go Doc](https://img.shields.io/badge/godoc-reference-4b68a3.svg)](https://godoc.org/github.com/go-xmlfmt/xmlfmt)
[![Go Report Card](https://goreportcard.com/badge/github.com/go-xmlfmt/xmlfmt)](https://goreportcard.com/report/github.com/go-xmlfmt/xmlfmt)
[![Codeship Status](https://codeship.com/projects/c49f02b0-a384-0134-fb20-2e0351080565/status?branch=master)](https://codeship.com/projects/190297)
## Synopsis
The Go XML Formatter, xmlfmt, will format the XML string in a readable way.
```go
package main
import "github.com/go-xmlfmt/xmlfmt"
func main() {
xml1 := `<root><this><is>a</is><test /><message><org><cn>Some org-or-other</cn><ph>Wouldnt you like to know</ph></org><contact><fn>Pat</fn><ln>Califia</ln></contact></message></this></root>`
x := xmlfmt.FormatXML(xml1, "\t", " ")
print(x)
}
```
Output:
```xml
<root>
<this>
<is>a
</is>
<test />
<message>
<!-- with comment -->
<org>
<cn>Some org-or-other
</cn>
<ph>Wouldnt you like to know
</ph>
</org>
<contact>
<fn>Pat
</fn>
<ln>Califia
</ln>
</contact>
</message>
</this>
</root>
```
There is no XML decoding and encoding involved, only pure regular expression matching and replacing. So it is much faster than going through decoding and encoding procedures. Moreover, the exact XML source string is preserved, instead of being changed by the encoder. This is why this package exists in the first place.
## Command
To use it on command line, check out [xmlfmt](https://github.com/AntonioSun/xmlfmt):
```
$ xmlfmt
XML Formatter
built on 2019-12-08
The xmlfmt will format the XML string without rewriting the document
Options:
-h, --help display help information
-f, --file *The xml file to read from (or stdin)
-p, --prefix each element begins on a new line and this prefix
-i, --indent[= ] indent string for nested elements
```
## Justification
### The format
The Go XML Formatter is not called XML Beautifier because the result is not *exactly* as what people would expect -- some, but not all, closing tags stays on the same line, just as shown above. Having been looking at the result and thinking over it, I now think it is actually a better way to present it, as those closing tags on the same line are better stay that way in my opinion. I.e.,
When it comes to very big XML strings, which is what Im dealing every day, saving spaces by not allowing those closing tags taking extra lines is plus instead of negative to me.
### The alternative
To format it “properly”, i.e., as what people would normally see, is very hard using pure regular expression. In fact, according to Sam Whited from the go-nuts mlist,
> Regular expression is, well, regular. This means that they can parse regular grammars, but can't parse context free grammars (like XML). It is actually impossible to use a regex to do this task; it will always be fragile, unfortunately.
So if the output format is so important to you, then unfortunately you have to go through decoding and encoding procedures. But there are some drawbacks as well, as put by James McGill, in http://stackoverflow.com/questions/21117161, besides such method being slow:
> I like this solution, but am still in search of a Golang XML formatter/prettyprinter that doesn't rewrite the document (other than formatting whitespace). Marshalling or using the Encoder will change namespace declarations.
>
> For example an element like "< ns1:Element />" will be translated to something like '< Element xmlns="http://bla...bla/ns1" >< /Element >' which seems harmless enough except when the intent is to not alter the xml other than formatting. -- James McGill Nov 12 '15
Using Sam's code as an example,
https://play.golang.org/p/JUqQY3WpW5
The above code formats the following XML
```xml
<soapenv:Envelope xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/"
xmlns:ns="http://example.com/ns">
<soapenv:Header/>
<soapenv:Body>
<ns:request>
<ns:customer>
<ns:id>123</ns:id>
<ns:name type="NCHZ">John Brown</ns:name>
</ns:customer>
</ns:request>
</soapenv:Body>
</soapenv:Envelope>
```
into this:
```xml
<Envelope xmlns="http://schemas.xmlsoap.org/soap/envelope/" xmlns:_xmlns="xmlns" _xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/" _xmlns:ns="http://example.com/ns">
<Header xmlns="http://schemas.xmlsoap.org/soap/envelope/"></Header>
<Body xmlns="http://schemas.xmlsoap.org/soap/envelope/">
<request xmlns="http://example.com/ns">
<customer xmlns="http://example.com/ns">
<id xmlns="http://example.com/ns">123</id>
<name xmlns="http://example.com/ns" type="NCHZ">John Brown</name>
</customer>
</request>
</Body>
</Envelope>
```
I know they are syntactically the same, however the problem is that they *look* totally different.
That's why there is this package, an XML Beautifier that doesn't rewrite the document.
## Credit
The credit goes to **diotalevi** from his post at http://www.perlmonks.org/?node_id=261292.
However, it does not work for all cases. For example,
```sh
$ echo '<Envelope xmlns=http://schemas.xmlsoap.org/soap/envelope/ xmlns:_xmlns=xmlns _xmlns:soapenv=http://schemas.xmlsoap.org/soap/envelope/ _xmlns:ns=http://example.com/ns><Header xmlns=http://schemas.xmlsoap.org/soap/envelope/></Header><Body xmlns=http://schemas.xmlsoap.org/soap/envelope/><request xmlns=http://example.com/ns><customer xmlns=http://example.com/ns><id xmlns=http://example.com/ns>123</id><name xmlns=http://example.com/ns type=NCHZ>John Brown</name></customer></request></Body></Envelope>' | perl -pe 's/(?<=>)\s+(?=<)//g; s(<(/?)([^/>]+)(/?)>\s*(?=(</?))?)($indent+=$3?0:$1?-1:1;"<$1$2$3>".($1&&($4 eq"</")?"\n".(" "x$indent):$4?"\n".(" "x$indent):""))ge'
```
```xml
<Envelope xmlns=http://schemas.xmlsoap.org/soap/envelope/ xmlns:_xmlns=xmlns _xmlns:soapenv=http://schemas.xmlsoap.org/soap/envelope/ _xmlns:ns=http://example.com/ns><Header xmlns=http://schemas.xmlsoap.org/soap/envelope/></Header>
<Body xmlns=http://schemas.xmlsoap.org/soap/envelope/><request xmlns=http://example.com/ns><customer xmlns=http://example.com/ns><id xmlns=http://example.com/ns>123</id>
<name xmlns=http://example.com/ns type=NCHZ>John Brown</name>
</customer>
</request>
</Body>
</Envelope>
```
I simplified the algorithm, and now it should work for all cases:
```sh
echo '<Envelope xmlns="http://schemas.xmlsoap.org/soap/envelope/" xmlns:_xmlns="xmlns" _xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/" _xmlns:ns="http://example.com/ns"><Header xmlns="http://schemas.xmlsoap.org/soap/envelope/"></Header><Body xmlns="http://schemas.xmlsoap.org/soap/envelope/"><request xmlns="http://example.com/ns"><customer xmlns="http://example.com/ns"><id xmlns="http://example.com/ns">123</id><name xmlns="http://example.com/ns" type="NCHZ">John Brown</name></customer></request></Body></Envelope>' | perl -pe 's/(?<=>)\s+(?=<)//g; s(<(/?)([^>]+)(/?)>)($indent+=$3?0:$1?-1:1;"<$1$2$3>"."\n".(" "x$indent))ge'
```
```xml
<Envelope xmlns="http://schemas.xmlsoap.org/soap/envelope/" xmlns:_xmlns="xmlns" _xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/" _xmlns:ns="http://example.com/ns">
<Header xmlns="http://schemas.xmlsoap.org/soap/envelope/">
</Header>
<Body xmlns="http://schemas.xmlsoap.org/soap/envelope/">
<request xmlns="http://example.com/ns">
<customer xmlns="http://example.com/ns">
<id xmlns="http://example.com/ns">
123</id>
<name xmlns="http://example.com/ns" type="NCHZ">
John Brown</name>
</customer>
</request>
</Body>
</Envelope>
```
This package is a direct translate from above Perl code into Go,
then further enhanced by @ruandao.

56
vendor/github.com/go-xmlfmt/xmlfmt/xmlfmt.go generated vendored Normal file
View File

@ -0,0 +1,56 @@
////////////////////////////////////////////////////////////////////////////
// Porgram: xmlfmt.go
// Purpose: Go XML Beautify from XML string using pure string manipulation
// Authors: Antonio Sun (c) 2016-2019, All rights reserved
////////////////////////////////////////////////////////////////////////////
package xmlfmt
import (
"regexp"
"strings"
)
var (
reg = regexp.MustCompile(`<([/!]?)([^>]+?)(/?)>`)
// NL is the newline string used in XML output, define for DOS-convenient.
NL = "\r\n"
)
// FormatXML will (purly) reformat the XML string in a readable way, without any rewriting/altering the structure
func FormatXML(xmls, prefix, indent string) string {
src := regexp.MustCompile(`(?s)>\s+<`).ReplaceAllString(xmls, "><")
rf := replaceTag(prefix, indent)
return (prefix + reg.ReplaceAllStringFunc(src, rf))
}
// replaceTag returns a closure function to do 's/(?<=>)\s+(?=<)//g; s(<(/?)([^>]+?)(/?)>)($indent+=$3?0:$1?-1:1;"<$1$2$3>"."\n".(" "x$indent))ge' as in Perl
// and deal with comments as well
func replaceTag(prefix, indent string) func(string) string {
indentLevel := 0
return func(m string) string {
// head elem
if strings.HasPrefix(m, "<?xml") {
return NL + prefix + strings.Repeat(indent, indentLevel) + m
}
// empty elem
if strings.HasSuffix(m, "/>") {
return NL + prefix + strings.Repeat(indent, indentLevel) + m
}
// comment elem
if strings.HasPrefix(m, "<!") {
return NL + prefix + strings.Repeat(indent, indentLevel) + m
}
// end elem
if strings.HasPrefix(m, "</") {
indentLevel--
return NL + prefix + strings.Repeat(indent, indentLevel) + m
}
defer func() {
indentLevel++
}()
return NL + prefix + strings.Repeat(indent, indentLevel) + m
}
}

202
vendor/github.com/golang/geo/LICENSE generated vendored Normal file
View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

20
vendor/github.com/golang/geo/r1/doc.go generated vendored Normal file
View File

@ -0,0 +1,20 @@
// Copyright 2014 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
Package r1 implements types and functions for working with geometry in ℝ¹.
See ../s2 for a more detailed overview.
*/
package r1

177
vendor/github.com/golang/geo/r1/interval.go generated vendored Normal file
View File

@ -0,0 +1,177 @@
// Copyright 2014 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package r1
import (
"fmt"
"math"
)
// Interval represents a closed interval on .
// Zero-length intervals (where Lo == Hi) represent single points.
// If Lo > Hi then the interval is empty.
type Interval struct {
Lo, Hi float64
}
// EmptyInterval returns an empty interval.
func EmptyInterval() Interval { return Interval{1, 0} }
// IntervalFromPoint returns an interval representing a single point.
func IntervalFromPoint(p float64) Interval { return Interval{p, p} }
// IsEmpty reports whether the interval is empty.
func (i Interval) IsEmpty() bool { return i.Lo > i.Hi }
// Equal returns true iff the interval contains the same points as oi.
func (i Interval) Equal(oi Interval) bool {
return i == oi || i.IsEmpty() && oi.IsEmpty()
}
// Center returns the midpoint of the interval.
// It is undefined for empty intervals.
func (i Interval) Center() float64 { return 0.5 * (i.Lo + i.Hi) }
// Length returns the length of the interval.
// The length of an empty interval is negative.
func (i Interval) Length() float64 { return i.Hi - i.Lo }
// Contains returns true iff the interval contains p.
func (i Interval) Contains(p float64) bool { return i.Lo <= p && p <= i.Hi }
// ContainsInterval returns true iff the interval contains oi.
func (i Interval) ContainsInterval(oi Interval) bool {
if oi.IsEmpty() {
return true
}
return i.Lo <= oi.Lo && oi.Hi <= i.Hi
}
// InteriorContains returns true iff the interval strictly contains p.
func (i Interval) InteriorContains(p float64) bool {
return i.Lo < p && p < i.Hi
}
// InteriorContainsInterval returns true iff the interval strictly contains oi.
func (i Interval) InteriorContainsInterval(oi Interval) bool {
if oi.IsEmpty() {
return true
}
return i.Lo < oi.Lo && oi.Hi < i.Hi
}
// Intersects returns true iff the interval contains any points in common with oi.
func (i Interval) Intersects(oi Interval) bool {
if i.Lo <= oi.Lo {
return oi.Lo <= i.Hi && oi.Lo <= oi.Hi // oi.Lo ∈ i and oi is not empty
}
return i.Lo <= oi.Hi && i.Lo <= i.Hi // i.Lo ∈ oi and i is not empty
}
// InteriorIntersects returns true iff the interior of the interval contains any points in common with oi, including the latter's boundary.
func (i Interval) InteriorIntersects(oi Interval) bool {
return oi.Lo < i.Hi && i.Lo < oi.Hi && i.Lo < i.Hi && oi.Lo <= oi.Hi
}
// Intersection returns the interval containing all points common to i and j.
func (i Interval) Intersection(j Interval) Interval {
// Empty intervals do not need to be special-cased.
return Interval{
Lo: math.Max(i.Lo, j.Lo),
Hi: math.Min(i.Hi, j.Hi),
}
}
// AddPoint returns the interval expanded so that it contains the given point.
func (i Interval) AddPoint(p float64) Interval {
if i.IsEmpty() {
return Interval{p, p}
}
if p < i.Lo {
return Interval{p, i.Hi}
}
if p > i.Hi {
return Interval{i.Lo, p}
}
return i
}
// ClampPoint returns the closest point in the interval to the given point "p".
// The interval must be non-empty.
func (i Interval) ClampPoint(p float64) float64 {
return math.Max(i.Lo, math.Min(i.Hi, p))
}
// Expanded returns an interval that has been expanded on each side by margin.
// If margin is negative, then the function shrinks the interval on
// each side by margin instead. The resulting interval may be empty. Any
// expansion of an empty interval remains empty.
func (i Interval) Expanded(margin float64) Interval {
if i.IsEmpty() {
return i
}
return Interval{i.Lo - margin, i.Hi + margin}
}
// Union returns the smallest interval that contains this interval and the given interval.
func (i Interval) Union(other Interval) Interval {
if i.IsEmpty() {
return other
}
if other.IsEmpty() {
return i
}
return Interval{math.Min(i.Lo, other.Lo), math.Max(i.Hi, other.Hi)}
}
func (i Interval) String() string { return fmt.Sprintf("[%.7f, %.7f]", i.Lo, i.Hi) }
const (
// epsilon is a small number that represents a reasonable level of noise between two
// values that can be considered to be equal.
epsilon = 1e-15
// dblEpsilon is a smaller number for values that require more precision.
// This is the C++ DBL_EPSILON equivalent.
dblEpsilon = 2.220446049250313e-16
)
// ApproxEqual reports whether the interval can be transformed into the
// given interval by moving each endpoint a small distance.
// The empty interval is considered to be positioned arbitrarily on the
// real line, so any interval with a small enough length will match
// the empty interval.
func (i Interval) ApproxEqual(other Interval) bool {
if i.IsEmpty() {
return other.Length() <= 2*epsilon
}
if other.IsEmpty() {
return i.Length() <= 2*epsilon
}
return math.Abs(other.Lo-i.Lo) <= epsilon &&
math.Abs(other.Hi-i.Hi) <= epsilon
}
// DirectedHausdorffDistance returns the Hausdorff distance to the given interval. For two
// intervals x and y, this distance is defined as
// h(x, y) = max_{p in x} min_{q in y} d(p, q).
func (i Interval) DirectedHausdorffDistance(other Interval) float64 {
if i.IsEmpty() {
return 0
}
if other.IsEmpty() {
return math.Inf(1)
}
return math.Max(0, math.Max(i.Hi-other.Hi, other.Lo-i.Lo))
}

20
vendor/github.com/golang/geo/r2/doc.go generated vendored Normal file
View File

@ -0,0 +1,20 @@
// Copyright 2014 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
Package r2 implements types and functions for working with geometry in ℝ².
See package s2 for a more detailed overview.
*/
package r2

255
vendor/github.com/golang/geo/r2/rect.go generated vendored Normal file
View File

@ -0,0 +1,255 @@
// Copyright 2014 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package r2
import (
"fmt"
"math"
"github.com/golang/geo/r1"
)
// Point represents a point in ℝ².
type Point struct {
X, Y float64
}
// Add returns the sum of p and op.
func (p Point) Add(op Point) Point { return Point{p.X + op.X, p.Y + op.Y} }
// Sub returns the difference of p and op.
func (p Point) Sub(op Point) Point { return Point{p.X - op.X, p.Y - op.Y} }
// Mul returns the scalar product of p and m.
func (p Point) Mul(m float64) Point { return Point{m * p.X, m * p.Y} }
// Ortho returns a counterclockwise orthogonal point with the same norm.
func (p Point) Ortho() Point { return Point{-p.Y, p.X} }
// Dot returns the dot product between p and op.
func (p Point) Dot(op Point) float64 { return p.X*op.X + p.Y*op.Y }
// Cross returns the cross product of p and op.
func (p Point) Cross(op Point) float64 { return p.X*op.Y - p.Y*op.X }
// Norm returns the vector's norm.
func (p Point) Norm() float64 { return math.Hypot(p.X, p.Y) }
// Normalize returns a unit point in the same direction as p.
func (p Point) Normalize() Point {
if p.X == 0 && p.Y == 0 {
return p
}
return p.Mul(1 / p.Norm())
}
func (p Point) String() string { return fmt.Sprintf("(%.12f, %.12f)", p.X, p.Y) }
// Rect represents a closed axis-aligned rectangle in the (x,y) plane.
type Rect struct {
X, Y r1.Interval
}
// RectFromPoints constructs a rect that contains the given points.
func RectFromPoints(pts ...Point) Rect {
// Because the default value on interval is 0,0, we need to manually
// define the interval from the first point passed in as our starting
// interval, otherwise we end up with the case of passing in
// Point{0.2, 0.3} and getting the starting Rect of {0, 0.2}, {0, 0.3}
// instead of the Rect {0.2, 0.2}, {0.3, 0.3} which is not correct.
if len(pts) == 0 {
return Rect{}
}
r := Rect{
X: r1.Interval{Lo: pts[0].X, Hi: pts[0].X},
Y: r1.Interval{Lo: pts[0].Y, Hi: pts[0].Y},
}
for _, p := range pts[1:] {
r = r.AddPoint(p)
}
return r
}
// RectFromCenterSize constructs a rectangle with the given center and size.
// Both dimensions of size must be non-negative.
func RectFromCenterSize(center, size Point) Rect {
return Rect{
r1.Interval{Lo: center.X - size.X/2, Hi: center.X + size.X/2},
r1.Interval{Lo: center.Y - size.Y/2, Hi: center.Y + size.Y/2},
}
}
// EmptyRect constructs the canonical empty rectangle. Use IsEmpty() to test
// for empty rectangles, since they have more than one representation. A Rect{}
// is not the same as the EmptyRect.
func EmptyRect() Rect {
return Rect{r1.EmptyInterval(), r1.EmptyInterval()}
}
// IsValid reports whether the rectangle is valid.
// This requires the width to be empty iff the height is empty.
func (r Rect) IsValid() bool {
return r.X.IsEmpty() == r.Y.IsEmpty()
}
// IsEmpty reports whether the rectangle is empty.
func (r Rect) IsEmpty() bool {
return r.X.IsEmpty()
}
// Vertices returns all four vertices of the rectangle. Vertices are returned in
// CCW direction starting with the lower left corner.
func (r Rect) Vertices() [4]Point {
return [4]Point{
{r.X.Lo, r.Y.Lo},
{r.X.Hi, r.Y.Lo},
{r.X.Hi, r.Y.Hi},
{r.X.Lo, r.Y.Hi},
}
}
// VertexIJ returns the vertex in direction i along the X-axis (0=left, 1=right) and
// direction j along the Y-axis (0=down, 1=up).
func (r Rect) VertexIJ(i, j int) Point {
x := r.X.Lo
if i == 1 {
x = r.X.Hi
}
y := r.Y.Lo
if j == 1 {
y = r.Y.Hi
}
return Point{x, y}
}
// Lo returns the low corner of the rect.
func (r Rect) Lo() Point {
return Point{r.X.Lo, r.Y.Lo}
}
// Hi returns the high corner of the rect.
func (r Rect) Hi() Point {
return Point{r.X.Hi, r.Y.Hi}
}
// Center returns the center of the rectangle in (x,y)-space
func (r Rect) Center() Point {
return Point{r.X.Center(), r.Y.Center()}
}
// Size returns the width and height of this rectangle in (x,y)-space. Empty
// rectangles have a negative width and height.
func (r Rect) Size() Point {
return Point{r.X.Length(), r.Y.Length()}
}
// ContainsPoint reports whether the rectangle contains the given point.
// Rectangles are closed regions, i.e. they contain their boundary.
func (r Rect) ContainsPoint(p Point) bool {
return r.X.Contains(p.X) && r.Y.Contains(p.Y)
}
// InteriorContainsPoint returns true iff the given point is contained in the interior
// of the region (i.e. the region excluding its boundary).
func (r Rect) InteriorContainsPoint(p Point) bool {
return r.X.InteriorContains(p.X) && r.Y.InteriorContains(p.Y)
}
// Contains reports whether the rectangle contains the given rectangle.
func (r Rect) Contains(other Rect) bool {
return r.X.ContainsInterval(other.X) && r.Y.ContainsInterval(other.Y)
}
// InteriorContains reports whether the interior of this rectangle contains all of the
// points of the given other rectangle (including its boundary).
func (r Rect) InteriorContains(other Rect) bool {
return r.X.InteriorContainsInterval(other.X) && r.Y.InteriorContainsInterval(other.Y)
}
// Intersects reports whether this rectangle and the other rectangle have any points in common.
func (r Rect) Intersects(other Rect) bool {
return r.X.Intersects(other.X) && r.Y.Intersects(other.Y)
}
// InteriorIntersects reports whether the interior of this rectangle intersects
// any point (including the boundary) of the given other rectangle.
func (r Rect) InteriorIntersects(other Rect) bool {
return r.X.InteriorIntersects(other.X) && r.Y.InteriorIntersects(other.Y)
}
// AddPoint expands the rectangle to include the given point. The rectangle is
// expanded by the minimum amount possible.
func (r Rect) AddPoint(p Point) Rect {
return Rect{r.X.AddPoint(p.X), r.Y.AddPoint(p.Y)}
}
// AddRect expands the rectangle to include the given rectangle. This is the
// same as replacing the rectangle by the union of the two rectangles, but
// is more efficient.
func (r Rect) AddRect(other Rect) Rect {
return Rect{r.X.Union(other.X), r.Y.Union(other.Y)}
}
// ClampPoint returns the closest point in the rectangle to the given point.
// The rectangle must be non-empty.
func (r Rect) ClampPoint(p Point) Point {
return Point{r.X.ClampPoint(p.X), r.Y.ClampPoint(p.Y)}
}
// Expanded returns a rectangle that has been expanded in the x-direction
// by margin.X, and in y-direction by margin.Y. If either margin is empty,
// then shrink the interval on the corresponding sides instead. The resulting
// rectangle may be empty. Any expansion of an empty rectangle remains empty.
func (r Rect) Expanded(margin Point) Rect {
xx := r.X.Expanded(margin.X)
yy := r.Y.Expanded(margin.Y)
if xx.IsEmpty() || yy.IsEmpty() {
return EmptyRect()
}
return Rect{xx, yy}
}
// ExpandedByMargin returns a Rect that has been expanded by the amount on all sides.
func (r Rect) ExpandedByMargin(margin float64) Rect {
return r.Expanded(Point{margin, margin})
}
// Union returns the smallest rectangle containing the union of this rectangle and
// the given rectangle.
func (r Rect) Union(other Rect) Rect {
return Rect{r.X.Union(other.X), r.Y.Union(other.Y)}
}
// Intersection returns the smallest rectangle containing the intersection of this
// rectangle and the given rectangle.
func (r Rect) Intersection(other Rect) Rect {
xx := r.X.Intersection(other.X)
yy := r.Y.Intersection(other.Y)
if xx.IsEmpty() || yy.IsEmpty() {
return EmptyRect()
}
return Rect{xx, yy}
}
// ApproxEqual returns true if the x- and y-intervals of the two rectangles are
// the same up to the given tolerance.
func (r Rect) ApproxEqual(r2 Rect) bool {
return r.X.ApproxEqual(r2.X) && r.Y.ApproxEqual(r2.Y)
}
func (r Rect) String() string { return fmt.Sprintf("[Lo%s, Hi%s]", r.Lo(), r.Hi()) }

20
vendor/github.com/golang/geo/r3/doc.go generated vendored Normal file
View File

@ -0,0 +1,20 @@
// Copyright 2014 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
Package r3 implements types and functions for working with geometry in ℝ³.
See ../s2 for a more detailed overview.
*/
package r3

198
vendor/github.com/golang/geo/r3/precisevector.go generated vendored Normal file
View File

@ -0,0 +1,198 @@
// Copyright 2016 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package r3
import (
"fmt"
"math/big"
)
const (
// prec is the number of bits of precision to use for the Float values.
// To keep things simple, we use the maximum allowable precision on big
// values. This allows us to handle all values we expect in the s2 library.
prec = big.MaxPrec
)
// define some commonly referenced values.
var (
precise0 = precInt(0)
precise1 = precInt(1)
)
// precStr wraps the conversion from a string into a big.Float. For results that
// actually can be represented exactly, this should only be used on values that
// are integer multiples of integer powers of 2.
func precStr(s string) *big.Float {
// Explicitly ignoring the bool return for this usage.
f, _ := new(big.Float).SetPrec(prec).SetString(s)
return f
}
func precInt(i int64) *big.Float {
return new(big.Float).SetPrec(prec).SetInt64(i)
}
func precFloat(f float64) *big.Float {
return new(big.Float).SetPrec(prec).SetFloat64(f)
}
func precAdd(a, b *big.Float) *big.Float {
return new(big.Float).SetPrec(prec).Add(a, b)
}
func precSub(a, b *big.Float) *big.Float {
return new(big.Float).SetPrec(prec).Sub(a, b)
}
func precMul(a, b *big.Float) *big.Float {
return new(big.Float).SetPrec(prec).Mul(a, b)
}
// PreciseVector represents a point in ℝ³ using high-precision values.
// Note that this is NOT a complete implementation because there are some
// operations that Vector supports that are not feasible with arbitrary precision
// math. (e.g., methods that need division like Normalize, or methods needing a
// square root operation such as Norm)
type PreciseVector struct {
X, Y, Z *big.Float
}
// PreciseVectorFromVector creates a high precision vector from the given Vector.
func PreciseVectorFromVector(v Vector) PreciseVector {
return NewPreciseVector(v.X, v.Y, v.Z)
}
// NewPreciseVector creates a high precision vector from the given floating point values.
func NewPreciseVector(x, y, z float64) PreciseVector {
return PreciseVector{
X: precFloat(x),
Y: precFloat(y),
Z: precFloat(z),
}
}
// Vector returns this precise vector converted to a Vector.
func (v PreciseVector) Vector() Vector {
// The accuracy flag is ignored on these conversions back to float64.
x, _ := v.X.Float64()
y, _ := v.Y.Float64()
z, _ := v.Z.Float64()
return Vector{x, y, z}.Normalize()
}
// Equal reports whether v and ov are equal.
func (v PreciseVector) Equal(ov PreciseVector) bool {
return v.X.Cmp(ov.X) == 0 && v.Y.Cmp(ov.Y) == 0 && v.Z.Cmp(ov.Z) == 0
}
func (v PreciseVector) String() string {
return fmt.Sprintf("(%10g, %10g, %10g)", v.X, v.Y, v.Z)
}
// Norm2 returns the square of the norm.
func (v PreciseVector) Norm2() *big.Float { return v.Dot(v) }
// IsUnit reports whether this vector is of unit length.
func (v PreciseVector) IsUnit() bool {
return v.Norm2().Cmp(precise1) == 0
}
// Abs returns the vector with nonnegative components.
func (v PreciseVector) Abs() PreciseVector {
return PreciseVector{
X: new(big.Float).Abs(v.X),
Y: new(big.Float).Abs(v.Y),
Z: new(big.Float).Abs(v.Z),
}
}
// Add returns the standard vector sum of v and ov.
func (v PreciseVector) Add(ov PreciseVector) PreciseVector {
return PreciseVector{
X: precAdd(v.X, ov.X),
Y: precAdd(v.Y, ov.Y),
Z: precAdd(v.Z, ov.Z),
}
}
// Sub returns the standard vector difference of v and ov.
func (v PreciseVector) Sub(ov PreciseVector) PreciseVector {
return PreciseVector{
X: precSub(v.X, ov.X),
Y: precSub(v.Y, ov.Y),
Z: precSub(v.Z, ov.Z),
}
}
// Mul returns the standard scalar product of v and f.
func (v PreciseVector) Mul(f *big.Float) PreciseVector {
return PreciseVector{
X: precMul(v.X, f),
Y: precMul(v.Y, f),
Z: precMul(v.Z, f),
}
}
// MulByFloat64 returns the standard scalar product of v and f.
func (v PreciseVector) MulByFloat64(f float64) PreciseVector {
return v.Mul(precFloat(f))
}
// Dot returns the standard dot product of v and ov.
func (v PreciseVector) Dot(ov PreciseVector) *big.Float {
return precAdd(precMul(v.X, ov.X), precAdd(precMul(v.Y, ov.Y), precMul(v.Z, ov.Z)))
}
// Cross returns the standard cross product of v and ov.
func (v PreciseVector) Cross(ov PreciseVector) PreciseVector {
return PreciseVector{
X: precSub(precMul(v.Y, ov.Z), precMul(v.Z, ov.Y)),
Y: precSub(precMul(v.Z, ov.X), precMul(v.X, ov.Z)),
Z: precSub(precMul(v.X, ov.Y), precMul(v.Y, ov.X)),
}
}
// LargestComponent returns the axis that represents the largest component in this vector.
func (v PreciseVector) LargestComponent() Axis {
t := v.Abs()
if t.X.Cmp(t.Y) > 0 {
if t.X.Cmp(t.Z) > 0 {
return XAxis
}
return ZAxis
}
if t.Y.Cmp(t.Z) > 0 {
return YAxis
}
return ZAxis
}
// SmallestComponent returns the axis that represents the smallest component in this vector.
func (v PreciseVector) SmallestComponent() Axis {
t := v.Abs()
if t.X.Cmp(t.Y) < 0 {
if t.X.Cmp(t.Z) < 0 {
return XAxis
}
return ZAxis
}
if t.Y.Cmp(t.Z) < 0 {
return YAxis
}
return ZAxis
}

183
vendor/github.com/golang/geo/r3/vector.go generated vendored Normal file
View File

@ -0,0 +1,183 @@
// Copyright 2014 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package r3
import (
"fmt"
"math"
"github.com/golang/geo/s1"
)
// Vector represents a point in ℝ³.
type Vector struct {
X, Y, Z float64
}
// ApproxEqual reports whether v and ov are equal within a small epsilon.
func (v Vector) ApproxEqual(ov Vector) bool {
const epsilon = 1e-16
return math.Abs(v.X-ov.X) < epsilon && math.Abs(v.Y-ov.Y) < epsilon && math.Abs(v.Z-ov.Z) < epsilon
}
func (v Vector) String() string { return fmt.Sprintf("(%0.24f, %0.24f, %0.24f)", v.X, v.Y, v.Z) }
// Norm returns the vector's norm.
func (v Vector) Norm() float64 { return math.Sqrt(v.Dot(v)) }
// Norm2 returns the square of the norm.
func (v Vector) Norm2() float64 { return v.Dot(v) }
// Normalize returns a unit vector in the same direction as v.
func (v Vector) Normalize() Vector {
n2 := v.Norm2()
if n2 == 0 {
return Vector{0, 0, 0}
}
return v.Mul(1 / math.Sqrt(n2))
}
// IsUnit returns whether this vector is of approximately unit length.
func (v Vector) IsUnit() bool {
const epsilon = 5e-14
return math.Abs(v.Norm2()-1) <= epsilon
}
// Abs returns the vector with nonnegative components.
func (v Vector) Abs() Vector { return Vector{math.Abs(v.X), math.Abs(v.Y), math.Abs(v.Z)} }
// Add returns the standard vector sum of v and ov.
func (v Vector) Add(ov Vector) Vector { return Vector{v.X + ov.X, v.Y + ov.Y, v.Z + ov.Z} }
// Sub returns the standard vector difference of v and ov.
func (v Vector) Sub(ov Vector) Vector { return Vector{v.X - ov.X, v.Y - ov.Y, v.Z - ov.Z} }
// Mul returns the standard scalar product of v and m.
func (v Vector) Mul(m float64) Vector { return Vector{m * v.X, m * v.Y, m * v.Z} }
// Dot returns the standard dot product of v and ov.
func (v Vector) Dot(ov Vector) float64 { return v.X*ov.X + v.Y*ov.Y + v.Z*ov.Z }
// Cross returns the standard cross product of v and ov.
func (v Vector) Cross(ov Vector) Vector {
return Vector{
v.Y*ov.Z - v.Z*ov.Y,
v.Z*ov.X - v.X*ov.Z,
v.X*ov.Y - v.Y*ov.X,
}
}
// Distance returns the Euclidean distance between v and ov.
func (v Vector) Distance(ov Vector) float64 { return v.Sub(ov).Norm() }
// Angle returns the angle between v and ov.
func (v Vector) Angle(ov Vector) s1.Angle {
return s1.Angle(math.Atan2(v.Cross(ov).Norm(), v.Dot(ov))) * s1.Radian
}
// Axis enumerates the 3 axes of ℝ³.
type Axis int
// The three axes of ℝ³.
const (
XAxis Axis = iota
YAxis
ZAxis
)
// Ortho returns a unit vector that is orthogonal to v.
// Ortho(-v) = -Ortho(v) for all v.
func (v Vector) Ortho() Vector {
ov := Vector{0.012, 0.0053, 0.00457}
switch v.LargestComponent() {
case XAxis:
ov.Z = 1
case YAxis:
ov.X = 1
default:
ov.Y = 1
}
return v.Cross(ov).Normalize()
}
// LargestComponent returns the axis that represents the largest component in this vector.
func (v Vector) LargestComponent() Axis {
t := v.Abs()
if t.X > t.Y {
if t.X > t.Z {
return XAxis
}
return ZAxis
}
if t.Y > t.Z {
return YAxis
}
return ZAxis
}
// SmallestComponent returns the axis that represents the smallest component in this vector.
func (v Vector) SmallestComponent() Axis {
t := v.Abs()
if t.X < t.Y {
if t.X < t.Z {
return XAxis
}
return ZAxis
}
if t.Y < t.Z {
return YAxis
}
return ZAxis
}
// Cmp compares v and ov lexicographically and returns:
//
// -1 if v < ov
// 0 if v == ov
// +1 if v > ov
//
// This method is based on C++'s std::lexicographical_compare. Two entities
// are compared element by element with the given operator. The first mismatch
// defines which is less (or greater) than the other. If both have equivalent
// values they are lexicographically equal.
func (v Vector) Cmp(ov Vector) int {
if v.X < ov.X {
return -1
}
if v.X > ov.X {
return 1
}
// First elements were the same, try the next.
if v.Y < ov.Y {
return -1
}
if v.Y > ov.Y {
return 1
}
// Second elements were the same return the final compare.
if v.Z < ov.Z {
return -1
}
if v.Z > ov.Z {
return 1
}
// Both are equal
return 0
}

120
vendor/github.com/golang/geo/s1/angle.go generated vendored Normal file
View File

@ -0,0 +1,120 @@
// Copyright 2014 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package s1
import (
"math"
"strconv"
)
// Angle represents a 1D angle. The internal representation is a double precision
// value in radians, so conversion to and from radians is exact.
// Conversions between E5, E6, E7, and Degrees are not always
// exact. For example, Degrees(3.1) is different from E6(3100000) or E7(31000000).
//
// The following conversions between degrees and radians are exact:
//
// Degree*180 == Radian*math.Pi
// Degree*(180/n) == Radian*(math.Pi/n) for n == 0..8
//
// These identities hold when the arguments are scaled up or down by any power
// of 2. Some similar identities are also true, for example,
//
// Degree*60 == Radian*(math.Pi/3)
//
// But be aware that this type of identity does not hold in general. For example,
//
// Degree*3 != Radian*(math.Pi/60)
//
// Similarly, the conversion to radians means that (Angle(x)*Degree).Degrees()
// does not always equal x. For example,
//
// (Angle(45*n)*Degree).Degrees() == 45*n for n == 0..8
//
// but
//
// (60*Degree).Degrees() != 60
//
// When testing for equality, you should allow for numerical errors (ApproxEqual)
// or convert to discrete E5/E6/E7 values first.
type Angle float64
// Angle units.
const (
Radian Angle = 1
Degree = (math.Pi / 180) * Radian
E5 = 1e-5 * Degree
E6 = 1e-6 * Degree
E7 = 1e-7 * Degree
)
// Radians returns the angle in radians.
func (a Angle) Radians() float64 { return float64(a) }
// Degrees returns the angle in degrees.
func (a Angle) Degrees() float64 { return float64(a / Degree) }
// round returns the value rounded to nearest as an int32.
// This does not match C++ exactly for the case of x.5.
func round(val float64) int32 {
if val < 0 {
return int32(val - 0.5)
}
return int32(val + 0.5)
}
// InfAngle returns an angle larger than any finite angle.
func InfAngle() Angle {
return Angle(math.Inf(1))
}
// isInf reports whether this Angle is infinite.
func (a Angle) isInf() bool {
return math.IsInf(float64(a), 0)
}
// E5 returns the angle in hundred thousandths of degrees.
func (a Angle) E5() int32 { return round(a.Degrees() * 1e5) }
// E6 returns the angle in millionths of degrees.
func (a Angle) E6() int32 { return round(a.Degrees() * 1e6) }
// E7 returns the angle in ten millionths of degrees.
func (a Angle) E7() int32 { return round(a.Degrees() * 1e7) }
// Abs returns the absolute value of the angle.
func (a Angle) Abs() Angle { return Angle(math.Abs(float64(a))) }
// Normalized returns an equivalent angle in (-π, π].
func (a Angle) Normalized() Angle {
rad := math.Remainder(float64(a), 2*math.Pi)
if rad <= -math.Pi {
rad = math.Pi
}
return Angle(rad)
}
func (a Angle) String() string {
return strconv.FormatFloat(a.Degrees(), 'f', 7, 64) // like "%.7f"
}
// ApproxEqual reports whether the two angles are the same up to a small tolerance.
func (a Angle) ApproxEqual(other Angle) bool {
return math.Abs(float64(a)-float64(other)) <= epsilon
}
// BUG(dsymonds): The major differences from the C++ version are:
// - no unsigned E5/E6/E7 methods

250
vendor/github.com/golang/geo/s1/chordangle.go generated vendored Normal file
View File

@ -0,0 +1,250 @@
// Copyright 2015 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package s1
import (
"math"
)
// ChordAngle represents the angle subtended by a chord (i.e., the straight
// line segment connecting two points on the sphere). Its representation
// makes it very efficient for computing and comparing distances, but unlike
// Angle it is only capable of representing angles between 0 and π radians.
// Generally, ChordAngle should only be used in loops where many angles need
// to be calculated and compared. Otherwise it is simpler to use Angle.
//
// ChordAngle loses some accuracy as the angle approaches π radians.
// Specifically, the representation of (π - x) radians has an error of about
// (1e-15 / x), with a maximum error of about 2e-8 radians (about 13cm on the
// Earth's surface). For comparison, for angles up to π/2 radians (10000km)
// the worst-case representation error is about 2e-16 radians (1 nanonmeter),
// which is about the same as Angle.
//
// ChordAngles are represented by the squared chord length, which can
// range from 0 to 4. Positive infinity represents an infinite squared length.
type ChordAngle float64
const (
// NegativeChordAngle represents a chord angle smaller than the zero angle.
// The only valid operations on a NegativeChordAngle are comparisons,
// Angle conversions, and Successor/Predecessor.
NegativeChordAngle = ChordAngle(-1)
// RightChordAngle represents a chord angle of 90 degrees (a "right angle").
RightChordAngle = ChordAngle(2)
// StraightChordAngle represents a chord angle of 180 degrees (a "straight angle").
// This is the maximum finite chord angle.
StraightChordAngle = ChordAngle(4)
// maxLength2 is the square of the maximum length allowed in a ChordAngle.
maxLength2 = 4.0
)
// ChordAngleFromAngle returns a ChordAngle from the given Angle.
func ChordAngleFromAngle(a Angle) ChordAngle {
if a < 0 {
return NegativeChordAngle
}
if a.isInf() {
return InfChordAngle()
}
l := 2 * math.Sin(0.5*math.Min(math.Pi, a.Radians()))
return ChordAngle(l * l)
}
// ChordAngleFromSquaredLength returns a ChordAngle from the squared chord length.
// Note that the argument is automatically clamped to a maximum of 4 to
// handle possible roundoff errors. The argument must be non-negative.
func ChordAngleFromSquaredLength(length2 float64) ChordAngle {
if length2 > maxLength2 {
return StraightChordAngle
}
return ChordAngle(length2)
}
// Expanded returns a new ChordAngle that has been adjusted by the given error
// bound (which can be positive or negative). Error should be the value
// returned by either MaxPointError or MaxAngleError. For example:
// a := ChordAngleFromPoints(x, y)
// a1 := a.Expanded(a.MaxPointError())
func (c ChordAngle) Expanded(e float64) ChordAngle {
// If the angle is special, don't change it. Otherwise clamp it to the valid range.
if c.isSpecial() {
return c
}
return ChordAngle(math.Max(0.0, math.Min(maxLength2, float64(c)+e)))
}
// Angle converts this ChordAngle to an Angle.
func (c ChordAngle) Angle() Angle {
if c < 0 {
return -1 * Radian
}
if c.isInf() {
return InfAngle()
}
return Angle(2 * math.Asin(0.5*math.Sqrt(float64(c))))
}
// InfChordAngle returns a chord angle larger than any finite chord angle.
// The only valid operations on an InfChordAngle are comparisons, Angle
// conversions, and Successor/Predecessor.
func InfChordAngle() ChordAngle {
return ChordAngle(math.Inf(1))
}
// isInf reports whether this ChordAngle is infinite.
func (c ChordAngle) isInf() bool {
return math.IsInf(float64(c), 1)
}
// isSpecial reports whether this ChordAngle is one of the special cases.
func (c ChordAngle) isSpecial() bool {
return c < 0 || c.isInf()
}
// isValid reports whether this ChordAngle is valid or not.
func (c ChordAngle) isValid() bool {
return (c >= 0 && c <= maxLength2) || c.isSpecial()
}
// Successor returns the smallest representable ChordAngle larger than this one.
// This can be used to convert a "<" comparison to a "<=" comparison.
//
// Note the following special cases:
// NegativeChordAngle.Successor == 0
// StraightChordAngle.Successor == InfChordAngle
// InfChordAngle.Successor == InfChordAngle
func (c ChordAngle) Successor() ChordAngle {
if c >= maxLength2 {
return InfChordAngle()
}
if c < 0 {
return 0
}
return ChordAngle(math.Nextafter(float64(c), 10.0))
}
// Predecessor returns the largest representable ChordAngle less than this one.
//
// Note the following special cases:
// InfChordAngle.Predecessor == StraightChordAngle
// ChordAngle(0).Predecessor == NegativeChordAngle
// NegativeChordAngle.Predecessor == NegativeChordAngle
func (c ChordAngle) Predecessor() ChordAngle {
if c <= 0 {
return NegativeChordAngle
}
if c > maxLength2 {
return StraightChordAngle
}
return ChordAngle(math.Nextafter(float64(c), -10.0))
}
// MaxPointError returns the maximum error size for a ChordAngle constructed
// from 2 Points x and y, assuming that x and y are normalized to within the
// bounds guaranteed by s2.Point.Normalize. The error is defined with respect to
// the true distance after the points are projected to lie exactly on the sphere.
func (c ChordAngle) MaxPointError() float64 {
// There is a relative error of (2.5*dblEpsilon) when computing the squared
// distance, plus a relative error of 2 * dblEpsilon, plus an absolute error
// of (16 * dblEpsilon**2) because the lengths of the input points may differ
// from 1 by up to (2*dblEpsilon) each. (This is the maximum error in Normalize).
return 4.5*dblEpsilon*float64(c) + 16*dblEpsilon*dblEpsilon
}
// MaxAngleError returns the maximum error for a ChordAngle constructed
// as an Angle distance.
func (c ChordAngle) MaxAngleError() float64 {
return dblEpsilon * float64(c)
}
// Add adds the other ChordAngle to this one and returns the resulting value.
// This method assumes the ChordAngles are not special.
func (c ChordAngle) Add(other ChordAngle) ChordAngle {
// Note that this method (and Sub) is much more efficient than converting
// the ChordAngle to an Angle and adding those and converting back. It
// requires only one square root plus a few additions and multiplications.
// Optimization for the common case where b is an error tolerance
// parameter that happens to be set to zero.
if other == 0 {
return c
}
// Clamp the angle sum to at most 180 degrees.
if c+other >= maxLength2 {
return StraightChordAngle
}
// Let a and b be the (non-squared) chord lengths, and let c = a+b.
// Let A, B, and C be the corresponding half-angles (a = 2*sin(A), etc).
// Then the formula below can be derived from c = 2 * sin(A+B) and the
// relationships sin(A+B) = sin(A)*cos(B) + sin(B)*cos(A)
// cos(X) = sqrt(1 - sin^2(X))
x := float64(c * (1 - 0.25*other))
y := float64(other * (1 - 0.25*c))
return ChordAngle(math.Min(maxLength2, x+y+2*math.Sqrt(x*y)))
}
// Sub subtracts the other ChordAngle from this one and returns the resulting
// value. This method assumes the ChordAngles are not special.
func (c ChordAngle) Sub(other ChordAngle) ChordAngle {
if other == 0 {
return c
}
if c <= other {
return 0
}
x := float64(c * (1 - 0.25*other))
y := float64(other * (1 - 0.25*c))
return ChordAngle(math.Max(0.0, x+y-2*math.Sqrt(x*y)))
}
// Sin returns the sine of this chord angle. This method is more efficient
// than converting to Angle and performing the computation.
func (c ChordAngle) Sin() float64 {
return math.Sqrt(c.Sin2())
}
// Sin2 returns the square of the sine of this chord angle.
// It is more efficient than Sin.
func (c ChordAngle) Sin2() float64 {
// Let a be the (non-squared) chord length, and let A be the corresponding
// half-angle (a = 2*sin(A)). The formula below can be derived from:
// sin(2*A) = 2 * sin(A) * cos(A)
// cos^2(A) = 1 - sin^2(A)
// This is much faster than converting to an angle and computing its sine.
return float64(c * (1 - 0.25*c))
}
// Cos returns the cosine of this chord angle. This method is more efficient
// than converting to Angle and performing the computation.
func (c ChordAngle) Cos() float64 {
// cos(2*A) = cos^2(A) - sin^2(A) = 1 - 2*sin^2(A)
return float64(1 - 0.5*c)
}
// Tan returns the tangent of this chord angle.
func (c ChordAngle) Tan() float64 {
return c.Sin() / c.Cos()
}
// TODO(roberts): Differences from C++:
// Helpers to/from E5/E6/E7
// Helpers to/from degrees and radians directly.
// FastUpperBoundFrom(angle Angle)

Some files were not shown because too many files have changed in this diff Show More