Update BurntSushi/toml

This commit is contained in:
Frank Denis 2022-01-13 09:22:22 +01:00
parent 351bced7c5
commit 4fd26029c7
14 changed files with 775 additions and 458 deletions

4
go.mod
View File

@ -3,7 +3,7 @@ module github.com/dnscrypt/dnscrypt-proxy
go 1.17
require (
github.com/BurntSushi/toml v0.4.1
github.com/BurntSushi/toml v1.0.0
github.com/VividCortex/ewma v1.2.0
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf
github.com/dchest/safefile v0.0.0-20151022103144-855e8d98f185
@ -20,7 +20,7 @@ require (
github.com/kardianos/service v1.2.1
github.com/miekg/dns v1.1.45
github.com/powerman/check v1.6.0
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3
golang.org/x/crypto v0.0.0-20220112180741-5e0467b6c7ce
golang.org/x/net v0.0.0-20220111093109-d55c255bac03
golang.org/x/sys v0.0.0-20220111092808-5a964db01320
gopkg.in/natefinch/lumberjack.v2 v2.0.0

8
go.sum
View File

@ -37,8 +37,8 @@ cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RX
contrib.go.opencensus.io/exporter/stackdriver v0.13.4/go.mod h1:aXENhDJ1Y4lIg4EUaVTwzvYETVNZk10Pu26tevFKLUc=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/toml v0.4.1 h1:GaI7EiDXDRfa8VshkTj7Fym7ha+y8/XxIgD2okUIjLw=
github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/BurntSushi/toml v1.0.0 h1:dtDWrepsVPfW9H/4y7dDgFc2MBUSeJhlaDtK13CxFlU=
github.com/BurntSushi/toml v1.0.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 h1:sHglBQTwgx+rWPdisA5ynNEsoARbiCBOyGcJM4/OzsM=
github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs=
@ -723,8 +723,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3 h1:0es+/5331RGQPcXlMfP+WrnIIS6dNnNRe0WB02W0F4M=
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220112180741-5e0467b6c7ce h1:Roh6XWxHFKrPgC/EQhVubSAGQ6Ozk6IdxHSzt1mR0EI=
golang.org/x/crypto v0.0.0-20220112180741-5e0467b6c7ce/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=

View File

@ -1,10 +1,6 @@
## TOML parser and encoder for Go with reflection
TOML stands for Tom's Obvious, Minimal Language. This Go package provides a
reflection interface similar to Go's standard library `json` and `xml`
packages. This package also supports the `encoding.TextUnmarshaler` and
`encoding.TextMarshaler` interfaces so that you can define custom data
representations. (There is an example of this below.)
packages.
Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0).
@ -16,26 +12,25 @@ v0.4.0`).
This library requires Go 1.13 or newer; install it with:
$ go get github.com/BurntSushi/toml
% go get github.com/BurntSushi/toml@latest
It also comes with a TOML validator CLI tool:
$ go get github.com/BurntSushi/toml/cmd/tomlv
$ tomlv some-toml-file.toml
% go install github.com/BurntSushi/toml/cmd/tomlv@latest
% tomlv some-toml-file.toml
### Testing
This package passes all tests in [toml-test] for both the decoder and the
encoder.
This package passes all tests in
[toml-test](https://github.com/BurntSushi/toml-test) for both the decoder
and the encoder.
[toml-test]: https://github.com/BurntSushi/toml-test
### Examples
This package works similar to how the Go standard library handles XML and JSON.
Namely, data is loaded into Go values via reflection.
This package works similarly to how the Go standard library handles XML and
JSON. Namely, data is loaded into Go values via reflection.
For the simplest example, consider some TOML file as just a list of keys
and values:
For the simplest example, consider some TOML file as just a list of keys and
values:
```toml
Age = 25
@ -61,9 +56,8 @@ And then decoded with:
```go
var conf Config
if _, err := toml.Decode(tomlData, &conf); err != nil {
// handle error
}
err := toml.Decode(tomlData, &conf)
// handle error
```
You can also use struct tags if your struct field name doesn't map to a TOML
@ -75,15 +69,14 @@ some_key_NAME = "wat"
```go
type TOML struct {
ObscureKey string `toml:"some_key_NAME"`
ObscureKey string `toml:"some_key_NAME"`
}
```
Beware that like other most other decoders **only exported fields** are
considered when encoding and decoding; private fields are silently ignored.
### Using the `encoding.TextUnmarshaler` interface
### Using the `Marshaler` and `encoding.TextUnmarshaler` interfaces
Here's an example that automatically parses duration strings into
`time.Duration` values:
@ -136,7 +129,6 @@ To target TOML specifically you can implement `UnmarshalTOML` TOML interface in
a similar way.
### More complex usage
Here's an example of how to load the example from the official spec page:
```toml
@ -216,5 +208,4 @@ type clients struct {
Note that a case insensitive match will be tried if an exact match can't be
found.
A working example of the above can be found in `_examples/example.{go,toml}`.
A working example of the above can be found in `_example/example.{go,toml}`.

View File

@ -9,7 +9,6 @@ import (
"os"
"reflect"
"strings"
"time"
)
// Unmarshaler is the interface implemented by objects that can unmarshal a
@ -40,6 +39,13 @@ type Primitive struct {
context Key
}
// The significand precision for float32 and float64 is 24 and 53 bits; this is
// the range a natural number can be stored in a float without loss of data.
const (
maxSafeFloat32Int = 16777215 // 2^24-1
maxSafeFloat64Int = 9007199254740991 // 2^53-1
)
// PrimitiveDecode is just like the other `Decode*` functions, except it
// decodes a TOML value that has already been parsed. Valid primitive values
// can *only* be obtained from values filled by the decoder functions,
@ -100,18 +106,38 @@ func NewDecoder(r io.Reader) *Decoder {
return &Decoder{r: r}
}
var (
unmarshalToml = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
unmarshalText = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
)
// Decode TOML data in to the pointer `v`.
func (dec *Decoder) Decode(v interface{}) (MetaData, error) {
rv := reflect.ValueOf(v)
if rv.Kind() != reflect.Ptr {
return MetaData{}, e("Decode of non-pointer %s", reflect.TypeOf(v))
s := "%q"
if reflect.TypeOf(v) == nil {
s = "%v"
}
return MetaData{}, e("cannot decode to non-pointer "+s, reflect.TypeOf(v))
}
if rv.IsNil() {
return MetaData{}, e("Decode of nil %s", reflect.TypeOf(v))
return MetaData{}, e("cannot decode to nil value of %q", reflect.TypeOf(v))
}
// TODO: have parser should read from io.Reader? Or at the very least, make
// it read from []byte rather than string
// Check if this is a supported type: struct, map, interface{}, or something
// that implements UnmarshalTOML or UnmarshalText.
rv = indirect(rv)
rt := rv.Type()
if rv.Kind() != reflect.Struct && rv.Kind() != reflect.Map &&
!(rv.Kind() == reflect.Interface && rv.NumMethod() == 0) &&
!rt.Implements(unmarshalToml) && !rt.Implements(unmarshalText) {
return MetaData{}, e("cannot decode to type %s", rt)
}
// TODO: parser should read from io.Reader? Or at the very least, make it
// read from []byte rather than string
data, err := ioutil.ReadAll(dec.r)
if err != nil {
return MetaData{}, err
@ -121,11 +147,15 @@ func (dec *Decoder) Decode(v interface{}) (MetaData, error) {
if err != nil {
return MetaData{}, err
}
md := MetaData{
p.mapping, p.types, p.ordered,
make(map[string]bool, len(p.ordered)), nil,
mapping: p.mapping,
types: p.types,
keys: p.ordered,
decoded: make(map[string]struct{}, len(p.ordered)),
context: nil,
}
return md, md.unify(p.mapping, indirect(rv))
return md, md.unify(p.mapping, rv)
}
// Decode the TOML data in to the pointer v.
@ -218,9 +248,7 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
return e("unsupported type %s", rv.Type())
}
return md.unifyAnything(data, rv)
case reflect.Float32:
fallthrough
case reflect.Float64:
case reflect.Float32, reflect.Float64:
return md.unifyFloat64(data, rv)
}
return e("unsupported type %s", rv.Kind())
@ -254,17 +282,17 @@ func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
for _, i := range f.index {
subv = indirect(subv.Field(i))
}
if isUnifiable(subv) {
md.decoded[md.context.add(key).String()] = true
md.decoded[md.context.add(key).String()] = struct{}{}
md.context = append(md.context, key)
if err := md.unify(datum, subv); err != nil {
err := md.unify(datum, subv)
if err != nil {
return err
}
md.context = md.context[0 : len(md.context)-1]
} else if f.name != "" {
// Bad user! No soup for you!
return e("cannot write unexported field %s.%s",
rv.Type().String(), f.name)
return e("cannot write unexported field %s.%s", rv.Type().String(), f.name)
}
}
}
@ -283,22 +311,22 @@ func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error {
if tmap == nil {
return nil
}
return badtype("map", mapping)
return md.badtype("map", mapping)
}
if rv.IsNil() {
rv.Set(reflect.MakeMap(rv.Type()))
}
for k, v := range tmap {
md.decoded[md.context.add(k).String()] = true
md.decoded[md.context.add(k).String()] = struct{}{}
md.context = append(md.context, k)
rvkey := indirect(reflect.New(rv.Type().Key()))
rvval := reflect.Indirect(reflect.New(rv.Type().Elem()))
if err := md.unify(v, rvval); err != nil {
return err
}
md.context = md.context[0 : len(md.context)-1]
rvkey := indirect(reflect.New(rv.Type().Key()))
rvkey.SetString(k)
rv.SetMapIndex(rvkey, rvval)
}
@ -311,7 +339,7 @@ func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error {
if !datav.IsValid() {
return nil
}
return badtype("slice", data)
return md.badtype("slice", data)
}
if l := datav.Len(); l != rv.Len() {
return e("expected array length %d; got TOML array of length %d", rv.Len(), l)
@ -325,7 +353,7 @@ func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error {
if !datav.IsValid() {
return nil
}
return badtype("slice", data)
return md.badtype("slice", data)
}
n := datav.Len()
if rv.IsNil() || rv.Cap() < n {
@ -346,26 +374,21 @@ func (md *MetaData) unifySliceArray(data, rv reflect.Value) error {
return nil
}
func (md *MetaData) unifyDatetime(data interface{}, rv reflect.Value) error {
if _, ok := data.(time.Time); ok {
rv.Set(reflect.ValueOf(data))
return nil
}
return badtype("time.Time", data)
}
func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error {
if s, ok := data.(string); ok {
rv.SetString(s)
return nil
}
return badtype("string", data)
return md.badtype("string", data)
}
func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error {
if num, ok := data.(float64); ok {
switch rv.Kind() {
case reflect.Float32:
if num < -math.MaxFloat32 || num > math.MaxFloat32 {
return e("value %f is out of range for float32", num)
}
fallthrough
case reflect.Float64:
rv.SetFloat(num)
@ -374,7 +397,26 @@ func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error {
}
return nil
}
return badtype("float", data)
if num, ok := data.(int64); ok {
switch rv.Kind() {
case reflect.Float32:
if num < -maxSafeFloat32Int || num > maxSafeFloat32Int {
return e("value %d is out of range for float32", num)
}
fallthrough
case reflect.Float64:
if num < -maxSafeFloat64Int || num > maxSafeFloat64Int {
return e("value %d is out of range for float64", num)
}
rv.SetFloat(float64(num))
default:
panic("bug")
}
return nil
}
return md.badtype("float", data)
}
func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error {
@ -421,7 +463,7 @@ func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error {
}
return nil
}
return badtype("integer", data)
return md.badtype("integer", data)
}
func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error {
@ -429,7 +471,7 @@ func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error {
rv.SetBool(b)
return nil
}
return badtype("boolean", data)
return md.badtype("boolean", data)
}
func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error {
@ -440,6 +482,12 @@ func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error {
func (md *MetaData) unifyText(data interface{}, v encoding.TextUnmarshaler) error {
var s string
switch sdata := data.(type) {
case Marshaler:
text, err := sdata.MarshalTOML()
if err != nil {
return err
}
s = string(text)
case TextMarshaler:
text, err := sdata.MarshalText()
if err != nil {
@ -457,7 +505,7 @@ func (md *MetaData) unifyText(data interface{}, v encoding.TextUnmarshaler) erro
case float64:
s = fmt.Sprintf("%f", sdata)
default:
return badtype("primitive (string-like)", data)
return md.badtype("primitive (string-like)", data)
}
if err := v.UnmarshalText([]byte(s)); err != nil {
return err
@ -465,17 +513,22 @@ func (md *MetaData) unifyText(data interface{}, v encoding.TextUnmarshaler) erro
return nil
}
func (md *MetaData) badtype(dst string, data interface{}) error {
return e("incompatible types: TOML key %q has type %T; destination has type %s", md.context, data, dst)
}
// rvalue returns a reflect.Value of `v`. All pointers are resolved.
func rvalue(v interface{}) reflect.Value {
return indirect(reflect.ValueOf(v))
}
// indirect returns the value pointed to by a pointer.
// Pointers are followed until the value is not a pointer.
// New values are allocated for each nil pointer.
//
// An exception to this rule is if the value satisfies an interface of
// interest to us (like encoding.TextUnmarshaler).
// Pointers are followed until the value is not a pointer. New values are
// allocated for each nil pointer.
//
// An exception to this rule is if the value satisfies an interface of interest
// to us (like encoding.TextUnmarshaler).
func indirect(v reflect.Value) reflect.Value {
if v.Kind() != reflect.Ptr {
if v.CanSet() {
@ -505,7 +558,3 @@ func isUnifiable(rv reflect.Value) bool {
func e(format string, args ...interface{}) error {
return fmt.Errorf("toml: "+format, args...)
}
func badtype(expected string, data interface{}) error {
return e("cannot load TOML value of type %T into a Go %s", data, expected)
}

View File

@ -1,3 +1,4 @@
//go:build go1.16
// +build go1.16
package toml

View File

@ -5,29 +5,17 @@ import (
"io"
)
// DEPRECATED!
//
// Use the identical encoding.TextMarshaler instead. It is defined here to
// support Go 1.1 and older.
// Deprecated: use encoding.TextMarshaler
type TextMarshaler encoding.TextMarshaler
// DEPRECATED!
//
// Use the identical encoding.TextUnmarshaler instead. It is defined here to
// support Go 1.1 and older.
// Deprecated: use encoding.TextUnmarshaler
type TextUnmarshaler encoding.TextUnmarshaler
// DEPRECATED!
//
// Use MetaData.PrimitiveDecode instead.
// Deprecated: use MetaData.PrimitiveDecode.
func PrimitiveDecode(primValue Primitive, v interface{}) error {
md := MetaData{decoded: make(map[string]bool)}
md := MetaData{decoded: make(map[string]struct{})}
return md.unify(primValue.undecoded, rvalue(v))
}
// DEPRECATED!
//
// Use NewDecoder(reader).Decode(&v) instead.
func DecodeReader(r io.Reader, v interface{}) (MetaData, error) {
return NewDecoder(r).Decode(v)
}
// Deprecated: use NewDecoder(reader).Decode(&value).
func DecodeReader(r io.Reader, v interface{}) (MetaData, error) { return NewDecoder(r).Decode(v) }

View File

@ -21,12 +21,11 @@ type tomlEncodeError struct{ error }
var (
errArrayNilElement = errors.New("toml: cannot encode array with nil element")
errNonString = errors.New("toml: cannot encode a map with non-string key type")
errAnonNonStruct = errors.New("toml: cannot encode an anonymous field that is not a struct")
errNoKey = errors.New("toml: top-level values must be Go maps or structs")
errAnything = errors.New("") // used in testing
)
var quotedReplacer = strings.NewReplacer(
var dblQuotedReplacer = strings.NewReplacer(
"\"", "\\\"",
"\\", "\\\\",
"\x00", `\u0000`,
@ -64,13 +63,22 @@ var quotedReplacer = strings.NewReplacer(
"\x7f", `\u007f`,
)
// Marshaler is the interface implemented by types that can marshal themselves
// into valid TOML.
type Marshaler interface {
MarshalTOML() ([]byte, error)
}
// Encoder encodes a Go to a TOML document.
//
// The mapping between Go values and TOML values should be precisely the same as
// for the Decode* functions. Similarly, the TextMarshaler interface is
// supported by encoding the resulting bytes as strings. If you want to write
// arbitrary binary data then you will need to use something like base64 since
// TOML does not have any binary types.
// for the Decode* functions.
//
// The toml.Marshaler and encoder.TextMarshaler interfaces are supported to
// encoding the value as custom TOML.
//
// If you want to write arbitrary binary data then you will need to use
// something like base64 since TOML does not have any binary types.
//
// When encoding TOML hashes (Go maps or structs), keys without any sub-hashes
// are encoded first.
@ -83,16 +91,14 @@ var quotedReplacer = strings.NewReplacer(
// structs. (e.g. [][]map[string]string is not allowed but []map[string]string
// is okay, as is []map[string][]string).
//
// NOTE: Only exported keys are encoded due to the use of reflection. Unexported
// NOTE: only exported keys are encoded due to the use of reflection. Unexported
// keys are silently discarded.
type Encoder struct {
// The string to use for a single indentation level. The default is two
// spaces.
// String to use for a single indentation level; default is two spaces.
Indent string
// hasWritten is whether we have written any output to w yet.
hasWritten bool
w *bufio.Writer
hasWritten bool // written any output to w yet?
}
// NewEncoder create a new Encoder.
@ -130,12 +136,13 @@ func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) {
}
func (enc *Encoder) encode(key Key, rv reflect.Value) {
// Special case. Time needs to be in ISO8601 format.
// Special case. If we can marshal the type to text, then we used that.
// Basically, this prevents the encoder for handling these types as
// generic structs (or whatever the underlying type of a TextMarshaler is).
// Special case: time needs to be in ISO8601 format.
//
// Special case: if we can marshal the type to text, then we used that. This
// prevents the encoder for handling these types as generic structs (or
// whatever the underlying type of a TextMarshaler is).
switch t := rv.Interface().(type) {
case time.Time, encoding.TextMarshaler:
case time.Time, encoding.TextMarshaler, Marshaler:
enc.writeKeyValue(key, rv, false)
return
// TODO: #76 would make this superfluous after implemented.
@ -200,13 +207,19 @@ func (enc *Encoder) eElement(rv reflect.Value) {
enc.wf(v.In(time.UTC).Format(format))
}
return
case encoding.TextMarshaler:
// Use text marshaler if it's available for this value.
if s, err := v.MarshalText(); err != nil {
case Marshaler:
s, err := v.MarshalTOML()
if err != nil {
encPanic(err)
} else {
enc.writeQuoted(string(s))
}
enc.writeQuoted(string(s))
return
case encoding.TextMarshaler:
s, err := v.MarshalText()
if err != nil {
encPanic(err)
}
enc.writeQuoted(string(s))
return
}
@ -260,7 +273,7 @@ func floatAddDecimal(fstr string) string {
}
func (enc *Encoder) writeQuoted(s string) {
enc.wf("\"%s\"", quotedReplacer.Replace(s))
enc.wf("\"%s\"", dblQuotedReplacer.Replace(s))
}
func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) {
@ -286,7 +299,7 @@ func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) {
continue
}
enc.newline()
enc.wf("%s[[%s]]", enc.indentStr(key), key.maybeQuotedAll())
enc.wf("%s[[%s]]", enc.indentStr(key), key)
enc.newline()
enc.eMapOrStruct(key, trv, false)
}
@ -299,7 +312,7 @@ func (enc *Encoder) eTable(key Key, rv reflect.Value) {
enc.newline()
}
if len(key) > 0 {
enc.wf("%s[%s]", enc.indentStr(key), key.maybeQuotedAll())
enc.wf("%s[%s]", enc.indentStr(key), key)
enc.newline()
}
enc.eMapOrStruct(key, rv, false)
@ -328,7 +341,7 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) {
var mapKeysDirect, mapKeysSub []string
for _, mapKey := range rv.MapKeys() {
k := mapKey.String()
if typeIsHash(tomlTypeOfGo(rv.MapIndex(mapKey))) {
if typeIsTable(tomlTypeOfGo(rv.MapIndex(mapKey))) {
mapKeysSub = append(mapKeysSub, k)
} else {
mapKeysDirect = append(mapKeysDirect, k)
@ -364,6 +377,8 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) {
}
}
const is32Bit = (32 << (^uint(0) >> 63)) == 32
func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
// Write keys for fields directly under this key first, because if we write
// a field that creates a new table then all keys under it will be in that
@ -408,10 +423,20 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
}
}
if typeIsHash(tomlTypeOfGo(frv)) {
if typeIsTable(tomlTypeOfGo(frv)) {
fieldsSub = append(fieldsSub, append(start, f.Index...))
} else {
fieldsDirect = append(fieldsDirect, append(start, f.Index...))
// Copy so it works correct on 32bit archs; not clear why this
// is needed. See #314, and https://www.reddit.com/r/golang/comments/pnx8v4
// This also works fine on 64bit, but 32bit archs are somewhat
// rare and this is a wee bit faster.
if is32Bit {
copyStart := make([]int, len(start))
copy(copyStart, start)
fieldsDirect = append(fieldsDirect, append(copyStart, f.Index...))
} else {
fieldsDirect = append(fieldsDirect, append(start, f.Index...))
}
}
}
}
@ -462,13 +487,13 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
}
}
// tomlTypeName returns the TOML type name of the Go value's type. It is
// used to determine whether the types of array elements are mixed (which is
// forbidden). If the Go value is nil, then it is illegal for it to be an array
// element, and valueIsNil is returned as true.
// Returns the TOML type of a Go value. The type may be `nil`, which means
// no concrete TOML type could be found.
// tomlTypeOfGo returns the TOML type name of the Go value's type.
//
// It is used to determine whether the types of array elements are mixed (which
// is forbidden). If the Go value is nil, then it is illegal for it to be an
// array element, and valueIsNil is returned as true.
//
// The type may be `nil`, which means no concrete TOML type could be found.
func tomlTypeOfGo(rv reflect.Value) tomlType {
if isNil(rv) || !rv.IsValid() {
return nil
@ -495,32 +520,43 @@ func tomlTypeOfGo(rv reflect.Value) tomlType {
case reflect.Map:
return tomlHash
case reflect.Struct:
switch rv.Interface().(type) {
case time.Time:
if _, ok := rv.Interface().(time.Time); ok {
return tomlDatetime
case encoding.TextMarshaler:
return tomlString
default:
// Someone used a pointer receiver: we can make it work for pointer
// values.
if rv.CanAddr() {
_, ok := rv.Addr().Interface().(encoding.TextMarshaler)
if ok {
return tomlString
}
}
return tomlHash
}
if isMarshaler(rv) {
return tomlString
}
return tomlHash
default:
_, ok := rv.Interface().(encoding.TextMarshaler)
if ok {
if isMarshaler(rv) {
return tomlString
}
encPanic(errors.New("unsupported type: " + rv.Kind().String()))
panic("") // Need *some* return value
panic("unreachable")
}
}
func isMarshaler(rv reflect.Value) bool {
switch rv.Interface().(type) {
case encoding.TextMarshaler:
return true
case Marshaler:
return true
}
// Someone used a pointer receiver: we can make it work for pointer values.
if rv.CanAddr() {
if _, ok := rv.Addr().Interface().(encoding.TextMarshaler); ok {
return true
}
if _, ok := rv.Addr().Interface().(Marshaler); ok {
return true
}
}
return false
}
// tomlArrayType returns the element type of a TOML array. The type returned
// may be nil if it cannot be determined (e.g., a nil slice or a zero length
// slize). This function may also panic if it finds a type that cannot be
@ -604,7 +640,14 @@ func (enc *Encoder) newline() {
//
// key = <any value>
//
// If inline is true it won't add a newline at the end.
// This is also used for "k = v" in inline tables; so something like this will
// be written in three calls:
//
// ┌────────────────────┐
// │ ┌───┐ ┌─────┐│
// v v v v vv
// key = {k = v, k2 = v2}
//
func (enc *Encoder) writeKeyValue(key Key, val reflect.Value, inline bool) {
if len(key) == 0 {
encPanic(errNoKey)
@ -617,7 +660,8 @@ func (enc *Encoder) writeKeyValue(key Key, val reflect.Value, inline bool) {
}
func (enc *Encoder) wf(format string, v ...interface{}) {
if _, err := fmt.Fprintf(enc.w, format, v...); err != nil {
_, err := fmt.Fprintf(enc.w, format, v...)
if err != nil {
encPanic(err)
}
enc.hasWritten = true

229
vendor/github.com/BurntSushi/toml/error.go generated vendored Normal file
View File

@ -0,0 +1,229 @@
package toml
import (
"fmt"
"strings"
)
// ParseError is returned when there is an error parsing the TOML syntax.
//
// For example invalid syntax, duplicate keys, etc.
//
// In addition to the error message itself, you can also print detailed location
// information with context by using ErrorWithLocation():
//
// toml: error: Key 'fruit' was already created and cannot be used as an array.
//
// At line 4, column 2-7:
//
// 2 | fruit = []
// 3 |
// 4 | [[fruit]] # Not allowed
// ^^^^^
//
// Furthermore, the ErrorWithUsage() can be used to print the above with some
// more detailed usage guidance:
//
// toml: error: newlines not allowed within inline tables
//
// At line 1, column 18:
//
// 1 | x = [{ key = 42 #
// ^
//
// Error help:
//
// Inline tables must always be on a single line:
//
// table = {key = 42, second = 43}
//
// It is invalid to split them over multiple lines like so:
//
// # INVALID
// table = {
// key = 42,
// second = 43
// }
//
// Use regular for this:
//
// [table]
// key = 42
// second = 43
type ParseError struct {
Message string // Short technical message.
Usage string // Longer message with usage guidance; may be blank.
Position Position // Position of the error
LastKey string // Last parsed key, may be blank.
Line int // Line the error occurred. Deprecated: use Position.
err error
input string
}
// Position of an error.
type Position struct {
Line int // Line number, starting at 1.
Start int // Start of error, as byte offset starting at 0.
Len int // Lenght in bytes.
}
func (pe ParseError) Error() string {
msg := pe.Message
if msg == "" { // Error from errorf()
msg = pe.err.Error()
}
if pe.LastKey == "" {
return fmt.Sprintf("toml: line %d: %s", pe.Position.Line, msg)
}
return fmt.Sprintf("toml: line %d (last key %q): %s",
pe.Position.Line, pe.LastKey, msg)
}
// ErrorWithUsage() returns the error with detailed location context.
//
// See the documentation on ParseError.
func (pe ParseError) ErrorWithPosition() string {
if pe.input == "" { // Should never happen, but just in case.
return pe.Error()
}
var (
lines = strings.Split(pe.input, "\n")
col = pe.column(lines)
b = new(strings.Builder)
)
msg := pe.Message
if msg == "" {
msg = pe.err.Error()
}
// TODO: don't show control characters as literals? This may not show up
// well everywhere.
if pe.Position.Len == 1 {
fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d:\n\n",
msg, pe.Position.Line, col+1)
} else {
fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d-%d:\n\n",
msg, pe.Position.Line, col, col+pe.Position.Len)
}
if pe.Position.Line > 2 {
fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-2, lines[pe.Position.Line-3])
}
if pe.Position.Line > 1 {
fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-1, lines[pe.Position.Line-2])
}
fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line, lines[pe.Position.Line-1])
fmt.Fprintf(b, "% 10s%s%s\n", "", strings.Repeat(" ", col), strings.Repeat("^", pe.Position.Len))
return b.String()
}
// ErrorWithUsage() returns the error with detailed location context and usage
// guidance.
//
// See the documentation on ParseError.
func (pe ParseError) ErrorWithUsage() string {
m := pe.ErrorWithPosition()
if u, ok := pe.err.(interface{ Usage() string }); ok && u.Usage() != "" {
return m + "Error help:\n\n " +
strings.ReplaceAll(strings.TrimSpace(u.Usage()), "\n", "\n ") +
"\n"
}
return m
}
func (pe ParseError) column(lines []string) int {
var pos, col int
for i := range lines {
ll := len(lines[i]) + 1 // +1 for the removed newline
if pos+ll >= pe.Position.Start {
col = pe.Position.Start - pos
if col < 0 { // Should never happen, but just in case.
col = 0
}
break
}
pos += ll
}
return col
}
type (
errLexControl struct{ r rune }
errLexEscape struct{ r rune }
errLexUTF8 struct{ b byte }
errLexInvalidNum struct{ v string }
errLexInvalidDate struct{ v string }
errLexInlineTableNL struct{}
errLexStringNL struct{}
)
func (e errLexControl) Error() string {
return fmt.Sprintf("TOML files cannot contain control characters: '0x%02x'", e.r)
}
func (e errLexControl) Usage() string { return "" }
func (e errLexEscape) Error() string { return fmt.Sprintf(`invalid escape in string '\%c'`, e.r) }
func (e errLexEscape) Usage() string { return usageEscape }
func (e errLexUTF8) Error() string { return fmt.Sprintf("invalid UTF-8 byte: 0x%02x", e.b) }
func (e errLexUTF8) Usage() string { return "" }
func (e errLexInvalidNum) Error() string { return fmt.Sprintf("invalid number: %q", e.v) }
func (e errLexInvalidNum) Usage() string { return "" }
func (e errLexInvalidDate) Error() string { return fmt.Sprintf("invalid date: %q", e.v) }
func (e errLexInvalidDate) Usage() string { return "" }
func (e errLexInlineTableNL) Error() string { return "newlines not allowed within inline tables" }
func (e errLexInlineTableNL) Usage() string { return usageInlineNewline }
func (e errLexStringNL) Error() string { return "strings cannot contain newlines" }
func (e errLexStringNL) Usage() string { return usageStringNewline }
const usageEscape = `
A '\' inside a "-delimited string is interpreted as an escape character.
The following escape sequences are supported:
\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX
To prevent a '\' from being recognized as an escape character, use either:
- a ' or '''-delimited string; escape characters aren't processed in them; or
- write two backslashes to get a single backslash: '\\'.
If you're trying to add a Windows path (e.g. "C:\Users\martin") then using '/'
instead of '\' will usually also work: "C:/Users/martin".
`
const usageInlineNewline = `
Inline tables must always be on a single line:
table = {key = 42, second = 43}
It is invalid to split them over multiple lines like so:
# INVALID
table = {
key = 42,
second = 43
}
Use regular for this:
[table]
key = 42
second = 43
`
const usageStringNewline = `
Strings must always be on a single line, and cannot span more than one line:
# INVALID
string = "Hello,
world!"
Instead use """ or ''' to split strings over multiple lines:
string = """Hello,
world!"""
`

View File

@ -37,28 +37,14 @@ const (
itemInlineTableEnd
)
const (
eof = 0
comma = ','
tableStart = '['
tableEnd = ']'
arrayTableStart = '['
arrayTableEnd = ']'
tableSep = '.'
keySep = '='
arrayStart = '['
arrayEnd = ']'
commentStart = '#'
stringStart = '"'
stringEnd = '"'
rawStringStart = '\''
rawStringEnd = '\''
inlineTableStart = '{'
inlineTableEnd = '}'
)
const eof = 0
type stateFn func(lx *lexer) stateFn
func (p Position) String() string {
return fmt.Sprintf("at line %d; start %d; length %d", p.Line, p.Start, p.Len)
}
type lexer struct {
input string
start int
@ -67,26 +53,26 @@ type lexer struct {
state stateFn
items chan item
// Allow for backing up up to four runes.
// This is necessary because TOML contains 3-rune tokens (""" and ''').
// Allow for backing up up to 4 runes. This is necessary because TOML
// contains 3-rune tokens (""" and ''').
prevWidths [4]int
nprev int // how many of prevWidths are in use
// If we emit an eof, we can still back up, but it is not OK to call
// next again.
atEOF bool
nprev int // how many of prevWidths are in use
atEOF bool // If we emit an eof, we can still back up, but it is not OK to call next again.
// A stack of state functions used to maintain context.
// The idea is to reuse parts of the state machine in various places.
// For example, values can appear at the top level or within arbitrarily
// nested arrays. The last state on the stack is used after a value has
// been lexed. Similarly for comments.
//
// The idea is to reuse parts of the state machine in various places. For
// example, values can appear at the top level or within arbitrarily nested
// arrays. The last state on the stack is used after a value has been lexed.
// Similarly for comments.
stack []stateFn
}
type item struct {
typ itemType
val string
line int
typ itemType
val string
err error
pos Position
}
func (lx *lexer) nextItem() item {
@ -96,7 +82,7 @@ func (lx *lexer) nextItem() item {
return item
default:
lx.state = lx.state(lx)
//fmt.Printf(" STATE %-24s current: %-10q stack: %s\n", lx.state, lx.current(), lx.stack)
//fmt.Printf(" STATE %-24s current: %-10q stack: %s\n", lx.state, lx.current(), lx.stack)
}
}
}
@ -105,9 +91,9 @@ func lex(input string) *lexer {
lx := &lexer{
input: input,
state: lexTop,
line: 1,
items: make(chan item, 10),
stack: make([]stateFn, 0, 10),
line: 1,
}
return lx
}
@ -129,13 +115,25 @@ func (lx *lexer) current() string {
return lx.input[lx.start:lx.pos]
}
func (lx lexer) getPos() Position {
p := Position{
Line: lx.line,
Start: lx.start,
Len: lx.pos - lx.start,
}
if p.Len <= 0 {
p.Len = 1
}
return p
}
func (lx *lexer) emit(typ itemType) {
lx.items <- item{typ, lx.current(), lx.line}
lx.items <- item{typ: typ, pos: lx.getPos(), val: lx.current()}
lx.start = lx.pos
}
func (lx *lexer) emitTrim(typ itemType) {
lx.items <- item{typ, strings.TrimSpace(lx.current()), lx.line}
lx.items <- item{typ: typ, pos: lx.getPos(), val: strings.TrimSpace(lx.current())}
lx.start = lx.pos
}
@ -160,7 +158,13 @@ func (lx *lexer) next() (r rune) {
r, w := utf8.DecodeRuneInString(lx.input[lx.pos:])
if r == utf8.RuneError {
lx.errorf("invalid UTF-8 byte at position %d (line %d): 0x%02x", lx.pos, lx.line, lx.input[lx.pos])
lx.error(errLexUTF8{lx.input[lx.pos]})
return utf8.RuneError
}
// Note: don't use peek() here, as this calls next().
if isControl(r) || (r == '\r' && (len(lx.input)-1 == lx.pos || lx.input[lx.pos+1] != '\n')) {
lx.errorControlChar(r)
return utf8.RuneError
}
@ -188,6 +192,7 @@ func (lx *lexer) backup() {
lx.prevWidths[1] = lx.prevWidths[2]
lx.prevWidths[2] = lx.prevWidths[3]
lx.nprev--
lx.pos -= w
if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' {
lx.line--
@ -223,18 +228,58 @@ func (lx *lexer) skip(pred func(rune) bool) {
}
}
// errorf stops all lexing by emitting an error and returning `nil`.
// error stops all lexing by emitting an error and returning `nil`.
//
// Note that any value that is a character is escaped if it's a special
// character (newlines, tabs, etc.).
func (lx *lexer) errorf(format string, values ...interface{}) stateFn {
lx.items <- item{
itemError,
fmt.Sprintf(format, values...),
lx.line,
func (lx *lexer) error(err error) stateFn {
if lx.atEOF {
return lx.errorPrevLine(err)
}
lx.items <- item{typ: itemError, pos: lx.getPos(), err: err}
return nil
}
// errorfPrevline is like error(), but sets the position to the last column of
// the previous line.
//
// This is so that unexpected EOF or NL errors don't show on a new blank line.
func (lx *lexer) errorPrevLine(err error) stateFn {
pos := lx.getPos()
pos.Line--
pos.Len = 1
pos.Start = lx.pos - 1
lx.items <- item{typ: itemError, pos: pos, err: err}
return nil
}
// errorPos is like error(), but allows explicitly setting the position.
func (lx *lexer) errorPos(start, length int, err error) stateFn {
pos := lx.getPos()
pos.Start = start
pos.Len = length
lx.items <- item{typ: itemError, pos: pos, err: err}
return nil
}
// errorf is like error, and creates a new error.
func (lx *lexer) errorf(format string, values ...interface{}) stateFn {
if lx.atEOF {
pos := lx.getPos()
pos.Line--
pos.Len = 1
pos.Start = lx.pos - 1
lx.items <- item{typ: itemError, pos: pos, err: fmt.Errorf(format, values...)}
return nil
}
lx.items <- item{typ: itemError, pos: lx.getPos(), err: fmt.Errorf(format, values...)}
return nil
}
func (lx *lexer) errorControlChar(cc rune) stateFn {
return lx.errorPos(lx.pos-1, 1, errLexControl{cc})
}
// lexTop consumes elements at the top level of TOML data.
func lexTop(lx *lexer) stateFn {
r := lx.next()
@ -242,10 +287,10 @@ func lexTop(lx *lexer) stateFn {
return lexSkip(lx, lexTop)
}
switch r {
case commentStart:
case '#':
lx.push(lexTop)
return lexCommentStart
case tableStart:
case '[':
return lexTableStart
case eof:
if lx.pos > lx.start {
@ -268,7 +313,7 @@ func lexTop(lx *lexer) stateFn {
func lexTopEnd(lx *lexer) stateFn {
r := lx.next()
switch {
case r == commentStart:
case r == '#':
// a comment will read to a newline for us.
lx.push(lexTop)
return lexCommentStart
@ -292,7 +337,7 @@ func lexTopEnd(lx *lexer) stateFn {
// It also handles the case that this is an item in an array of tables.
// e.g., '[[name]]'.
func lexTableStart(lx *lexer) stateFn {
if lx.peek() == arrayTableStart {
if lx.peek() == '[' {
lx.next()
lx.emit(itemArrayTableStart)
lx.push(lexArrayTableEnd)
@ -309,10 +354,8 @@ func lexTableEnd(lx *lexer) stateFn {
}
func lexArrayTableEnd(lx *lexer) stateFn {
if r := lx.next(); r != arrayTableEnd {
return lx.errorf(
"expected end of table array name delimiter %q, but got %q instead",
arrayTableEnd, r)
if r := lx.next(); r != ']' {
return lx.errorf("expected end of table array name delimiter ']', but got %q instead", r)
}
lx.emit(itemArrayTableEnd)
return lexTopEnd
@ -321,11 +364,11 @@ func lexArrayTableEnd(lx *lexer) stateFn {
func lexTableNameStart(lx *lexer) stateFn {
lx.skip(isWhitespace)
switch r := lx.peek(); {
case r == tableEnd || r == eof:
case r == ']' || r == eof:
return lx.errorf("unexpected end of table name (table names cannot be empty)")
case r == tableSep:
case r == '.':
return lx.errorf("unexpected table separator (table names cannot be empty)")
case r == stringStart || r == rawStringStart:
case r == '"' || r == '\'':
lx.ignore()
lx.push(lexTableNameEnd)
return lexQuotedName
@ -342,10 +385,10 @@ func lexTableNameEnd(lx *lexer) stateFn {
switch r := lx.next(); {
case isWhitespace(r):
return lexTableNameEnd
case r == tableSep:
case r == '.':
lx.ignore()
return lexTableNameStart
case r == tableEnd:
case r == ']':
return lx.pop()
default:
return lx.errorf("expected '.' or ']' to end table name, but got %q instead", r)
@ -379,10 +422,10 @@ func lexQuotedName(lx *lexer) stateFn {
switch {
case isWhitespace(r):
return lexSkip(lx, lexValue)
case r == stringStart:
case r == '"':
lx.ignore() // ignore the '"'
return lexString
case r == rawStringStart:
case r == '\'':
lx.ignore() // ignore the "'"
return lexRawString
case r == eof:
@ -400,7 +443,7 @@ func lexKeyStart(lx *lexer) stateFn {
return lx.errorf("unexpected '=': key name appears blank")
case r == '.':
return lx.errorf("unexpected '.': keys cannot start with a '.'")
case r == stringStart || r == rawStringStart:
case r == '"' || r == '\'':
lx.ignore()
fallthrough
default: // Bare key
@ -416,7 +459,7 @@ func lexKeyNameStart(lx *lexer) stateFn {
return lx.errorf("unexpected '='")
case r == '.':
return lx.errorf("unexpected '.'")
case r == stringStart || r == rawStringStart:
case r == '"' || r == '\'':
lx.ignore()
lx.push(lexKeyEnd)
return lexQuotedName
@ -434,7 +477,7 @@ func lexKeyEnd(lx *lexer) stateFn {
case isWhitespace(r):
return lexSkip(lx, lexKeyEnd)
case r == eof:
return lx.errorf("unexpected EOF; expected key separator %q", keySep)
return lx.errorf("unexpected EOF; expected key separator '='")
case r == '.':
lx.ignore()
return lexKeyNameStart
@ -461,17 +504,17 @@ func lexValue(lx *lexer) stateFn {
return lexNumberOrDateStart
}
switch r {
case arrayStart:
case '[':
lx.ignore()
lx.emit(itemArray)
return lexArrayValue
case inlineTableStart:
case '{':
lx.ignore()
lx.emit(itemInlineTableStart)
return lexInlineTableValue
case stringStart:
if lx.accept(stringStart) {
if lx.accept(stringStart) {
case '"':
if lx.accept('"') {
if lx.accept('"') {
lx.ignore() // Ignore """
return lexMultilineString
}
@ -479,9 +522,9 @@ func lexValue(lx *lexer) stateFn {
}
lx.ignore() // ignore the '"'
return lexString
case rawStringStart:
if lx.accept(rawStringStart) {
if lx.accept(rawStringStart) {
case '\'':
if lx.accept('\'') {
if lx.accept('\'') {
lx.ignore() // Ignore """
return lexMultilineRawString
}
@ -520,14 +563,12 @@ func lexArrayValue(lx *lexer) stateFn {
switch {
case isWhitespace(r) || isNL(r):
return lexSkip(lx, lexArrayValue)
case r == commentStart:
case r == '#':
lx.push(lexArrayValue)
return lexCommentStart
case r == comma:
case r == ',':
return lx.errorf("unexpected comma")
case r == arrayEnd:
// NOTE(caleb): The spec isn't clear about whether you can have
// a trailing comma or not, so we'll allow it.
case r == ']':
return lexArrayEnd
}
@ -540,22 +581,20 @@ func lexArrayValue(lx *lexer) stateFn {
// the next value (or the end of the array): it ignores whitespace and newlines
// and expects either a ',' or a ']'.
func lexArrayValueEnd(lx *lexer) stateFn {
r := lx.next()
switch {
switch r := lx.next(); {
case isWhitespace(r) || isNL(r):
return lexSkip(lx, lexArrayValueEnd)
case r == commentStart:
case r == '#':
lx.push(lexArrayValueEnd)
return lexCommentStart
case r == comma:
case r == ',':
lx.ignore()
return lexArrayValue // move on to the next value
case r == arrayEnd:
case r == ']':
return lexArrayEnd
default:
return lx.errorf("expected a comma (',') or array terminator (']'), but got %s", runeOrEOF(r))
}
return lx.errorf(
"expected a comma or array terminator %q, but got %s instead",
arrayEnd, runeOrEOF(r))
}
// lexArrayEnd finishes the lexing of an array.
@ -574,13 +613,13 @@ func lexInlineTableValue(lx *lexer) stateFn {
case isWhitespace(r):
return lexSkip(lx, lexInlineTableValue)
case isNL(r):
return lx.errorf("newlines not allowed within inline tables")
case r == commentStart:
return lx.errorPrevLine(errLexInlineTableNL{})
case r == '#':
lx.push(lexInlineTableValue)
return lexCommentStart
case r == comma:
case r == ',':
return lx.errorf("unexpected comma")
case r == inlineTableEnd:
case r == '}':
return lexInlineTableEnd
}
lx.backup()
@ -596,23 +635,21 @@ func lexInlineTableValueEnd(lx *lexer) stateFn {
case isWhitespace(r):
return lexSkip(lx, lexInlineTableValueEnd)
case isNL(r):
return lx.errorf("newlines not allowed within inline tables")
case r == commentStart:
return lx.errorPrevLine(errLexInlineTableNL{})
case r == '#':
lx.push(lexInlineTableValueEnd)
return lexCommentStart
case r == comma:
case r == ',':
lx.ignore()
lx.skip(isWhitespace)
if lx.peek() == '}' {
return lx.errorf("trailing comma not allowed in inline tables")
}
return lexInlineTableValue
case r == inlineTableEnd:
case r == '}':
return lexInlineTableEnd
default:
return lx.errorf(
"expected a comma or an inline table terminator %q, but got %s instead",
inlineTableEnd, runeOrEOF(r))
return lx.errorf("expected a comma or an inline table terminator '}', but got %s instead", runeOrEOF(r))
}
}
@ -638,14 +675,12 @@ func lexString(lx *lexer) stateFn {
switch {
case r == eof:
return lx.errorf(`unexpected EOF; expected '"'`)
case isControl(r) || r == '\r':
return lx.errorf("control characters are not allowed inside strings: '0x%02x'", r)
case isNL(r):
return lx.errorf("strings cannot contain newlines")
return lx.errorPrevLine(errLexStringNL{})
case r == '\\':
lx.push(lexString)
return lexStringEscape
case r == stringEnd:
case r == '"':
lx.backup()
lx.emit(itemString)
lx.next()
@ -660,23 +695,20 @@ func lexString(lx *lexer) stateFn {
func lexMultilineString(lx *lexer) stateFn {
r := lx.next()
switch r {
default:
return lexMultilineString
case eof:
return lx.errorf(`unexpected EOF; expected '"""'`)
case '\r':
if lx.peek() != '\n' {
return lx.errorf("control characters are not allowed inside strings: '0x%02x'", r)
}
return lexMultilineString
case '\\':
return lexMultilineStringEscape
case stringEnd:
case '"':
/// Found " → try to read two more "".
if lx.accept(stringEnd) {
if lx.accept(stringEnd) {
if lx.accept('"') {
if lx.accept('"') {
/// Peek ahead: the string can contain " and "", including at the
/// end: """str"""""
/// 6 or more at the end, however, is an error.
if lx.peek() == stringEnd {
if lx.peek() == '"' {
/// Check if we already lexed 5 's; if so we have 6 now, and
/// that's just too many man!
if strings.HasSuffix(lx.current(), `"""""`) {
@ -699,12 +731,8 @@ func lexMultilineString(lx *lexer) stateFn {
}
lx.backup()
}
return lexMultilineString
}
if isControl(r) {
return lx.errorf("control characters are not allowed inside strings: '0x%02x'", r)
}
return lexMultilineString
}
// lexRawString consumes a raw string. Nothing can be escaped in such a string.
@ -712,20 +740,19 @@ func lexMultilineString(lx *lexer) stateFn {
func lexRawString(lx *lexer) stateFn {
r := lx.next()
switch {
default:
return lexRawString
case r == eof:
return lx.errorf(`unexpected EOF; expected "'"`)
case isControl(r) || r == '\r':
return lx.errorf("control characters are not allowed inside strings: '0x%02x'", r)
case isNL(r):
return lx.errorf("strings cannot contain newlines")
case r == rawStringEnd:
return lx.errorPrevLine(errLexStringNL{})
case r == '\'':
lx.backup()
lx.emit(itemRawString)
lx.next()
lx.ignore()
return lx.pop()
}
return lexRawString
}
// lexMultilineRawString consumes a raw string. Nothing can be escaped in such
@ -734,21 +761,18 @@ func lexRawString(lx *lexer) stateFn {
func lexMultilineRawString(lx *lexer) stateFn {
r := lx.next()
switch r {
default:
return lexMultilineRawString
case eof:
return lx.errorf(`unexpected EOF; expected "'''"`)
case '\r':
if lx.peek() != '\n' {
return lx.errorf("control characters are not allowed inside strings: '0x%02x'", r)
}
return lexMultilineRawString
case rawStringEnd:
case '\'':
/// Found ' → try to read two more ''.
if lx.accept(rawStringEnd) {
if lx.accept(rawStringEnd) {
if lx.accept('\'') {
if lx.accept('\'') {
/// Peek ahead: the string can contain ' and '', including at the
/// end: '''str'''''
/// 6 or more at the end, however, is an error.
if lx.peek() == rawStringEnd {
if lx.peek() == '\'' {
/// Check if we already lexed 5 's; if so we have 6 now, and
/// that's just too many man!
if strings.HasSuffix(lx.current(), "'''''") {
@ -771,12 +795,8 @@ func lexMultilineRawString(lx *lexer) stateFn {
}
lx.backup()
}
return lexMultilineRawString
}
if isControl(r) {
return lx.errorf("control characters are not allowed inside strings: '0x%02x'", r)
}
return lexMultilineRawString
}
// lexMultilineStringEscape consumes an escaped character. It assumes that the
@ -817,8 +837,7 @@ func lexStringEscape(lx *lexer) stateFn {
case 'U':
return lexLongUnicodeEscape
}
return lx.errorf("invalid escape character %q; only the following escape characters are allowed: "+
`\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX`, r)
return lx.error(errLexEscape{r})
}
func lexShortUnicodeEscape(lx *lexer) stateFn {
@ -1108,8 +1127,6 @@ func lexComment(lx *lexer) stateFn {
lx.backup()
lx.emit(itemText)
return lx.pop()
case isControl(r):
return lx.errorf("control characters are not allowed inside comments: '0x%02x'", r)
default:
return lexComment
}
@ -1121,52 +1138,6 @@ func lexSkip(lx *lexer, nextState stateFn) stateFn {
return nextState
}
// isWhitespace returns true if `r` is a whitespace character according
// to the spec.
func isWhitespace(r rune) bool {
return r == '\t' || r == ' '
}
func isNL(r rune) bool {
return r == '\n' || r == '\r'
}
// Control characters except \n, \t
func isControl(r rune) bool {
switch r {
case '\t', '\r', '\n':
return false
default:
return (r >= 0x00 && r <= 0x1f) || r == 0x7f
}
}
func isDigit(r rune) bool {
return r >= '0' && r <= '9'
}
func isHexadecimal(r rune) bool {
return (r >= '0' && r <= '9') ||
(r >= 'a' && r <= 'f') ||
(r >= 'A' && r <= 'F')
}
func isOctal(r rune) bool {
return r >= '0' && r <= '7'
}
func isBinary(r rune) bool {
return r == '0' || r == '1'
}
func isBareKeyChar(r rune) bool {
return (r >= 'A' && r <= 'Z') ||
(r >= 'a' && r <= 'z') ||
(r >= '0' && r <= '9') ||
r == '_' ||
r == '-'
}
func (s stateFn) String() string {
name := runtime.FuncForPC(reflect.ValueOf(s).Pointer()).Name()
if i := strings.LastIndexByte(name, '.'); i > -1 {
@ -1223,3 +1194,26 @@ func (itype itemType) String() string {
func (item item) String() string {
return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val)
}
func isWhitespace(r rune) bool { return r == '\t' || r == ' ' }
func isNL(r rune) bool { return r == '\n' || r == '\r' }
func isControl(r rune) bool { // Control characters except \t, \r, \n
switch r {
case '\t', '\r', '\n':
return false
default:
return (r >= 0x00 && r <= 0x1f) || r == 0x7f
}
}
func isDigit(r rune) bool { return r >= '0' && r <= '9' }
func isBinary(r rune) bool { return r == '0' || r == '1' }
func isOctal(r rune) bool { return r >= '0' && r <= '7' }
func isHexadecimal(r rune) bool {
return (r >= '0' && r <= '9') || (r >= 'a' && r <= 'f') || (r >= 'A' && r <= 'F')
}
func isBareKeyChar(r rune) bool {
return (r >= 'A' && r <= 'Z') ||
(r >= 'a' && r <= 'z') ||
(r >= '0' && r <= '9') ||
r == '_' || r == '-'
}

View File

@ -1,34 +1,39 @@
package toml
import "strings"
import (
"strings"
)
// MetaData allows access to meta information about TOML data that may not be
// inferable via reflection. In particular, whether a key has been defined and
// the TOML type of a key.
// MetaData allows access to meta information about TOML data that's not
// accessible otherwise.
//
// It allows checking if a key is defined in the TOML data, whether any keys
// were undecoded, and the TOML type of a key.
type MetaData struct {
context Key // Used only during decoding.
mapping map[string]interface{}
types map[string]tomlType
keys []Key
decoded map[string]bool
context Key // Used only during decoding.
decoded map[string]struct{}
}
// IsDefined reports if the key exists in the TOML data.
//
// The key should be specified hierarchically, for example to access the TOML
// key "a.b.c" you would use:
// key "a.b.c" you would use IsDefined("a", "b", "c"). Keys are case sensitive.
//
// IsDefined("a", "b", "c")
//
// IsDefined will return false if an empty key given. Keys are case sensitive.
// Returns false for an empty key.
func (md *MetaData) IsDefined(key ...string) bool {
if len(key) == 0 {
return false
}
var hash map[string]interface{}
var ok bool
var hashOrVal interface{} = md.mapping
var (
hash map[string]interface{}
ok bool
hashOrVal interface{} = md.mapping
)
for _, k := range key {
if hash, ok = hashOrVal.(map[string]interface{}); !ok {
return false
@ -45,51 +50,12 @@ func (md *MetaData) IsDefined(key ...string) bool {
// Type will return the empty string if given an empty key or a key that does
// not exist. Keys are case sensitive.
func (md *MetaData) Type(key ...string) string {
fullkey := strings.Join(key, ".")
if typ, ok := md.types[fullkey]; ok {
if typ, ok := md.types[Key(key).String()]; ok {
return typ.typeString()
}
return ""
}
// Key represents any TOML key, including key groups. Use (MetaData).Keys to get
// values of this type.
type Key []string
func (k Key) String() string { return strings.Join(k, ".") }
func (k Key) maybeQuotedAll() string {
var ss []string
for i := range k {
ss = append(ss, k.maybeQuoted(i))
}
return strings.Join(ss, ".")
}
func (k Key) maybeQuoted(i int) string {
if k[i] == "" {
return `""`
}
quote := false
for _, c := range k[i] {
if !isBareKeyChar(c) {
quote = true
break
}
}
if quote {
return `"` + quotedReplacer.Replace(k[i]) + `"`
}
return k[i]
}
func (k Key) add(piece string) Key {
newKey := make(Key, len(k)+1)
copy(newKey, k)
newKey[len(k)] = piece
return newKey
}
// Keys returns a slice of every key in the TOML data, including key groups.
//
// Each key is itself a slice, where the first element is the top of the
@ -115,9 +81,40 @@ func (md *MetaData) Keys() []Key {
func (md *MetaData) Undecoded() []Key {
undecoded := make([]Key, 0, len(md.keys))
for _, key := range md.keys {
if !md.decoded[key.String()] {
if _, ok := md.decoded[key.String()]; !ok {
undecoded = append(undecoded, key)
}
}
return undecoded
}
// Key represents any TOML key, including key groups. Use (MetaData).Keys to get
// values of this type.
type Key []string
func (k Key) String() string {
ss := make([]string, len(k))
for i := range k {
ss[i] = k.maybeQuoted(i)
}
return strings.Join(ss, ".")
}
func (k Key) maybeQuoted(i int) string {
if k[i] == "" {
return `""`
}
for _, c := range k[i] {
if !isBareKeyChar(c) {
return `"` + dblQuotedReplacer.Replace(k[i]) + `"`
}
}
return k[i]
}
func (k Key) add(piece string) Key {
newKey := make(Key, len(k)+1)
copy(newKey, k)
newKey[len(k)] = piece
return newKey
}

View File

@ -1,7 +1,6 @@
package toml
import (
"errors"
"fmt"
"strconv"
"strings"
@ -12,35 +11,23 @@ import (
)
type parser struct {
mapping map[string]interface{}
types map[string]tomlType
lx *lexer
lx *lexer
context Key // Full key for the current hash in scope.
currentKey string // Base key name for everything except hashes.
pos Position // Current position in the TOML file.
ordered []Key // List of keys in the order that they appear in the TOML data.
context Key // Full key for the current hash in scope.
currentKey string // Base key name for everything except hashes.
approxLine int // Rough approximation of line number
implicits map[string]bool // Record implied keys (e.g. 'key.group.names').
}
// ParseError is used when a file can't be parsed: for example invalid integer
// literals, duplicate keys, etc.
type ParseError struct {
Message string
Line int
LastKey string
}
func (pe ParseError) Error() string {
return fmt.Sprintf("Near line %d (last key parsed '%s'): %s",
pe.Line, pe.LastKey, pe.Message)
ordered []Key // List of keys in the order that they appear in the TOML data.
mapping map[string]interface{} // Map keyname → key value.
types map[string]tomlType // Map keyname → TOML type.
implicits map[string]struct{} // Record implicit keys (e.g. "key.group.names").
}
func parse(data string) (p *parser, err error) {
defer func() {
if r := recover(); r != nil {
var ok bool
if err, ok = r.(ParseError); ok {
if pErr, ok := r.(ParseError); ok {
pErr.input = data
err = pErr
return
}
panic(r)
@ -60,8 +47,13 @@ func parse(data string) (p *parser, err error) {
if len(data) < 6 {
ex = len(data)
}
if strings.ContainsRune(data[:ex], 0) {
return nil, errors.New("files cannot contain NULL bytes; probably using UTF-16; TOML files must be UTF-8")
if i := strings.IndexRune(data[:ex], 0); i > -1 {
return nil, ParseError{
Message: "files cannot contain NULL bytes; probably using UTF-16; TOML files must be UTF-8",
Position: Position{Line: 1, Start: i, Len: 1},
Line: 1,
input: data,
}
}
p = &parser{
@ -69,7 +61,7 @@ func parse(data string) (p *parser, err error) {
types: make(map[string]tomlType),
lx: lex(data),
ordered: make([]Key, 0),
implicits: make(map[string]bool),
implicits: make(map[string]struct{}),
}
for {
item := p.next()
@ -82,12 +74,21 @@ func parse(data string) (p *parser, err error) {
return p, nil
}
func (p *parser) panicf(format string, v ...interface{}) {
msg := fmt.Sprintf(format, v...)
func (p *parser) panicItemf(it item, format string, v ...interface{}) {
panic(ParseError{
Message: msg,
Line: p.approxLine,
LastKey: p.current(),
Message: fmt.Sprintf(format, v...),
Position: it.pos,
Line: it.pos.Len,
LastKey: p.current(),
})
}
func (p *parser) panicf(format string, v ...interface{}) {
panic(ParseError{
Message: fmt.Sprintf(format, v...),
Position: p.pos,
Line: p.pos.Line,
LastKey: p.current(),
})
}
@ -95,11 +96,26 @@ func (p *parser) next() item {
it := p.lx.nextItem()
//fmt.Printf("ITEM %-18s line %-3d │ %q\n", it.typ, it.line, it.val)
if it.typ == itemError {
p.panicf("%s", it.val)
if it.err != nil {
panic(ParseError{
Position: it.pos,
Line: it.pos.Line,
LastKey: p.current(),
err: it.err,
})
}
p.panicItemf(it, "%s", it.val)
}
return it
}
func (p *parser) nextPos() item {
it := p.next()
p.pos = it.pos
return it
}
func (p *parser) bug(format string, v ...interface{}) {
panic(fmt.Sprintf("BUG: "+format+"\n\n", v...))
}
@ -119,11 +135,9 @@ func (p *parser) assertEqual(expected, got itemType) {
func (p *parser) topLevel(item item) {
switch item.typ {
case itemCommentStart: // # ..
p.approxLine = item.line
p.expect(itemText)
case itemTableStart: // [ .. ]
name := p.next()
p.approxLine = name.line
name := p.nextPos()
var key Key
for ; name.typ != itemTableEnd && name.typ != itemEOF; name = p.next() {
@ -135,8 +149,7 @@ func (p *parser) topLevel(item item) {
p.setType("", tomlHash)
p.ordered = append(p.ordered, key)
case itemArrayTableStart: // [[ .. ]]
name := p.next()
p.approxLine = name.line
name := p.nextPos()
var key Key
for ; name.typ != itemArrayTableEnd && name.typ != itemEOF; name = p.next() {
@ -150,8 +163,7 @@ func (p *parser) topLevel(item item) {
case itemKeyStart: // key = ..
outerContext := p.context
/// Read all the key parts (e.g. 'a' and 'b' in 'a.b')
k := p.next()
p.approxLine = k.line
k := p.nextPos()
var key Key
for ; k.typ != itemKeyEnd && k.typ != itemEOF; k = p.next() {
key = append(key, p.keyString(k))
@ -206,9 +218,9 @@ var datetimeRepl = strings.NewReplacer(
func (p *parser) value(it item, parentIsArray bool) (interface{}, tomlType) {
switch it.typ {
case itemString:
return p.replaceEscapes(it.val), p.typeOfPrimitive(it)
return p.replaceEscapes(it, it.val), p.typeOfPrimitive(it)
case itemMultilineString:
return p.replaceEscapes(stripFirstNewline(stripEscapedNewlines(it.val))), p.typeOfPrimitive(it)
return p.replaceEscapes(it, stripFirstNewline(stripEscapedNewlines(it.val))), p.typeOfPrimitive(it)
case itemRawString:
return it.val, p.typeOfPrimitive(it)
case itemRawMultilineString:
@ -240,10 +252,10 @@ func (p *parser) value(it item, parentIsArray bool) (interface{}, tomlType) {
func (p *parser) valueInteger(it item) (interface{}, tomlType) {
if !numUnderscoresOK(it.val) {
p.panicf("Invalid integer %q: underscores must be surrounded by digits", it.val)
p.panicItemf(it, "Invalid integer %q: underscores must be surrounded by digits", it.val)
}
if numHasLeadingZero(it.val) {
p.panicf("Invalid integer %q: cannot have leading zeroes", it.val)
p.panicItemf(it, "Invalid integer %q: cannot have leading zeroes", it.val)
}
num, err := strconv.ParseInt(it.val, 0, 64)
@ -254,7 +266,7 @@ func (p *parser) valueInteger(it item) (interface{}, tomlType) {
// So mark the former as a bug but the latter as a legitimate user
// error.
if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange {
p.panicf("Integer '%s' is out of the range of 64-bit signed integers.", it.val)
p.panicItemf(it, "Integer '%s' is out of the range of 64-bit signed integers.", it.val)
} else {
p.bug("Expected integer value, but got '%s'.", it.val)
}
@ -272,18 +284,18 @@ func (p *parser) valueFloat(it item) (interface{}, tomlType) {
})
for _, part := range parts {
if !numUnderscoresOK(part) {
p.panicf("Invalid float %q: underscores must be surrounded by digits", it.val)
p.panicItemf(it, "Invalid float %q: underscores must be surrounded by digits", it.val)
}
}
if len(parts) > 0 && numHasLeadingZero(parts[0]) {
p.panicf("Invalid float %q: cannot have leading zeroes", it.val)
p.panicItemf(it, "Invalid float %q: cannot have leading zeroes", it.val)
}
if !numPeriodsOK(it.val) {
// As a special case, numbers like '123.' or '1.e2',
// which are valid as far as Go/strconv are concerned,
// must be rejected because TOML says that a fractional
// part consists of '.' followed by 1+ digits.
p.panicf("Invalid float %q: '.' must be followed by one or more digits", it.val)
p.panicItemf(it, "Invalid float %q: '.' must be followed by one or more digits", it.val)
}
val := strings.Replace(it.val, "_", "", -1)
if val == "+nan" || val == "-nan" { // Go doesn't support this, but TOML spec does.
@ -292,9 +304,9 @@ func (p *parser) valueFloat(it item) (interface{}, tomlType) {
num, err := strconv.ParseFloat(val, 64)
if err != nil {
if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange {
p.panicf("Float '%s' is out of the range of 64-bit IEEE-754 floating-point numbers.", it.val)
p.panicItemf(it, "Float '%s' is out of the range of 64-bit IEEE-754 floating-point numbers.", it.val)
} else {
p.panicf("Invalid float value: %q", it.val)
p.panicItemf(it, "Invalid float value: %q", it.val)
}
}
return num, p.typeOfPrimitive(it)
@ -325,7 +337,7 @@ func (p *parser) valueDatetime(it item) (interface{}, tomlType) {
}
}
if !ok {
p.panicf("Invalid TOML Datetime: %q.", it.val)
p.panicItemf(it, "Invalid TOML Datetime: %q.", it.val)
}
return t, p.typeOfPrimitive(it)
}
@ -335,8 +347,12 @@ func (p *parser) valueArray(it item) (interface{}, tomlType) {
// p.setType(p.currentKey, typ)
var (
array []interface{}
types []tomlType
// Initialize to a non-nil empty slice. This makes it consistent with
// how S = [] decodes into a non-nil slice inside something like struct
// { S []string }. See #338
array = []interface{}{}
)
for it = p.next(); it.typ != itemArrayEnd; it = p.next() {
if it.typ == itemCommentStart {
@ -347,6 +363,12 @@ func (p *parser) valueArray(it item) (interface{}, tomlType) {
val, typ := p.value(it, true)
array = append(array, val)
types = append(types, typ)
// XXX: types isn't used here, we need it to record the accurate type
// information.
//
// Not entirely sure how to best store this; could use "key[0]",
// "key[1]" notation, or maybe store it on the Array type?
}
return array, tomlArray
}
@ -373,8 +395,7 @@ func (p *parser) valueInlineTable(it item, parentIsArray bool) (interface{}, tom
}
/// Read all key parts.
k := p.next()
p.approxLine = k.line
k := p.nextPos()
var key Key
for ; k.typ != itemKeyEnd && k.typ != itemEOF; k = p.next() {
key = append(key, p.keyString(k))
@ -408,7 +429,7 @@ func (p *parser) valueInlineTable(it item, parentIsArray bool) (interface{}, tom
// numHasLeadingZero checks if this number has leading zeroes, allowing for '0',
// +/- signs, and base prefixes.
func numHasLeadingZero(s string) bool {
if len(s) > 1 && s[0] == '0' && isDigit(rune(s[1])) { // >1 to allow "0" and isDigit to allow 0x
if len(s) > 1 && s[0] == '0' && !(s[1] == 'b' || s[1] == 'o' || s[1] == 'x') { // Allow 0b, 0o, 0x
return true
}
if len(s) > 2 && (s[0] == '-' || s[0] == '+') && s[1] == '0' {
@ -503,7 +524,7 @@ func (p *parser) addContext(key Key, array bool) {
if hash, ok := hashContext[k].([]map[string]interface{}); ok {
hashContext[k] = append(hash, make(map[string]interface{}))
} else {
p.panicf("Key '%s' was already created and cannot be used as an array.", keyContext)
p.panicf("Key '%s' was already created and cannot be used as an array.", key)
}
} else {
p.setValue(key[len(key)-1], make(map[string]interface{}))
@ -513,8 +534,8 @@ func (p *parser) addContext(key Key, array bool) {
// set calls setValue and setType.
func (p *parser) set(key string, val interface{}, typ tomlType) {
p.setValue(p.currentKey, val)
p.setType(p.currentKey, typ)
p.setValue(key, val)
p.setType(key, typ)
}
// setValue sets the given key to the given value in the current context.
@ -573,27 +594,31 @@ func (p *parser) setValue(key string, value interface{}) {
hash[key] = value
}
// setType sets the type of a particular value at a given key.
// It should be called immediately AFTER setValue.
// setType sets the type of a particular value at a given key. It should be
// called immediately AFTER setValue.
//
// Note that if `key` is empty, then the type given will be applied to the
// current context (which is either a table or an array of tables).
func (p *parser) setType(key string, typ tomlType) {
keyContext := make(Key, 0, len(p.context)+1)
for _, k := range p.context {
keyContext = append(keyContext, k)
}
keyContext = append(keyContext, p.context...)
if len(key) > 0 { // allow type setting for hashes
keyContext = append(keyContext, key)
}
// Special case to make empty keys ("" = 1) work.
// Without it it will set "" rather than `""`.
// TODO: why is this needed? And why is this only needed here?
if len(keyContext) == 0 {
keyContext = Key{""}
}
p.types[keyContext.String()] = typ
}
// Implicit keys need to be created when tables are implied in "a.b.c.d = 1" and
// "[a.b.c]" (the "a", "b", and "c" hashes are never created explicitly).
func (p *parser) addImplicit(key Key) { p.implicits[key.String()] = true }
func (p *parser) removeImplicit(key Key) { p.implicits[key.String()] = false }
func (p *parser) isImplicit(key Key) bool { return p.implicits[key.String()] }
func (p *parser) addImplicit(key Key) { p.implicits[key.String()] = struct{}{} }
func (p *parser) removeImplicit(key Key) { delete(p.implicits, key.String()) }
func (p *parser) isImplicit(key Key) bool { _, ok := p.implicits[key.String()]; return ok }
func (p *parser) isArray(key Key) bool { return p.types[key.String()] == tomlArray }
func (p *parser) addImplicitContext(key Key) {
p.addImplicit(key)
@ -662,8 +687,8 @@ func stripEscapedNewlines(s string) string {
return strings.Join(split, "")
}
func (p *parser) replaceEscapes(str string) string {
var replaced []rune
func (p *parser) replaceEscapes(it item, str string) string {
replaced := make([]rune, 0, len(str))
s := []byte(str)
r := 0
for r < len(s) {
@ -683,7 +708,7 @@ func (p *parser) replaceEscapes(str string) string {
p.bug("Expected valid escape code after \\, but got %q.", s[r])
return ""
case ' ', '\t':
p.panicf("invalid escape: '\\%c'", s[r])
p.panicItemf(it, "invalid escape: '\\%c'", s[r])
return ""
case 'b':
replaced = append(replaced, rune(0x0008))
@ -710,14 +735,14 @@ func (p *parser) replaceEscapes(str string) string {
// At this point, we know we have a Unicode escape of the form
// `uXXXX` at [r, r+5). (Because the lexer guarantees this
// for us.)
escaped := p.asciiEscapeToUnicode(s[r+1 : r+5])
escaped := p.asciiEscapeToUnicode(it, s[r+1:r+5])
replaced = append(replaced, escaped)
r += 5
case 'U':
// At this point, we know we have a Unicode escape of the form
// `uXXXX` at [r, r+9). (Because the lexer guarantees this
// for us.)
escaped := p.asciiEscapeToUnicode(s[r+1 : r+9])
escaped := p.asciiEscapeToUnicode(it, s[r+1:r+9])
replaced = append(replaced, escaped)
r += 9
}
@ -725,15 +750,14 @@ func (p *parser) replaceEscapes(str string) string {
return string(replaced)
}
func (p *parser) asciiEscapeToUnicode(bs []byte) rune {
func (p *parser) asciiEscapeToUnicode(it item, bs []byte) rune {
s := string(bs)
hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32)
if err != nil {
p.bug("Could not parse '%s' as a hexadecimal number, but the "+
"lexer claims it's OK: %s", s, err)
p.bug("Could not parse '%s' as a hexadecimal number, but the lexer claims it's OK: %s", s, err)
}
if !utf8.ValidRune(rune(hex)) {
p.panicf("Escaped character '\\u%s' is not valid UTF-8.", s)
p.panicItemf(it, "Escaped character '\\u%s' is not valid UTF-8.", s)
}
return rune(hex)
}

View File

@ -70,8 +70,8 @@ func typeFields(t reflect.Type) []field {
next := []field{{typ: t}}
// Count of queued names for current level and the next.
count := map[reflect.Type]int{}
nextCount := map[reflect.Type]int{}
var count map[reflect.Type]int
var nextCount map[reflect.Type]int
// Types already visited at an earlier level.
visited := map[reflect.Type]bool{}

View File

@ -16,7 +16,7 @@ func typeEqual(t1, t2 tomlType) bool {
return t1.typeString() == t2.typeString()
}
func typeIsHash(t tomlType) bool {
func typeIsTable(t tomlType) bool {
return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash)
}

4
vendor/modules.txt vendored
View File

@ -1,7 +1,7 @@
# 4d63.com/gochecknoglobals v0.0.0-20201008074935-acfc0b28355a
## explicit; go 1.15
4d63.com/gochecknoglobals/checknoglobals
# github.com/BurntSushi/toml v0.4.1
# github.com/BurntSushi/toml v1.0.0
## explicit; go 1.16
github.com/BurntSushi/toml
github.com/BurntSushi/toml/internal
@ -524,7 +524,7 @@ github.com/uudashr/gocognit
# github.com/yeya24/promlinter v0.1.0
## explicit; go 1.14
github.com/yeya24/promlinter
# golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3
# golang.org/x/crypto v0.0.0-20220112180741-5e0467b6c7ce
## explicit; go 1.17
golang.org/x/crypto/blake2b
golang.org/x/crypto/chacha20