aboutsummaryrefslogtreecommitdiff
path: root/vendor/github.com/BurntSushi/toml
diff options
context:
space:
mode:
authorGibheer <gibheer+git@zero-knowledge.org>2024-09-05 19:38:25 +0200
committerGibheer <gibheer+git@zero-knowledge.org>2024-09-05 19:38:25 +0200
commit6ea4d2c82de80efc87708e5e182034b7c6c2019e (patch)
tree35c0856a929040216c82153ca62d43b27530a887 /vendor/github.com/BurntSushi/toml
parent6f64eeace1b66639b9380b44e88a8d54850a4306 (diff)
switch from github.com/lib/pq to github.com/jackc/pgx/v5HEAD20240905master
lib/pq is out of maintenance for some time now, so switch to the newer more active library. Looks like it finally stabilized after a long time.
Diffstat (limited to 'vendor/github.com/BurntSushi/toml')
-rw-r--r--vendor/github.com/BurntSushi/toml/README.md2
-rw-r--r--vendor/github.com/BurntSushi/toml/decode.go97
-rw-r--r--vendor/github.com/BurntSushi/toml/decode_go116.go19
-rw-r--r--vendor/github.com/BurntSushi/toml/deprecated.go16
-rw-r--r--vendor/github.com/BurntSushi/toml/doc.go3
-rw-r--r--vendor/github.com/BurntSushi/toml/encode.go98
-rw-r--r--vendor/github.com/BurntSushi/toml/error.go115
-rw-r--r--vendor/github.com/BurntSushi/toml/lex.go122
-rw-r--r--vendor/github.com/BurntSushi/toml/meta.go49
-rw-r--r--vendor/github.com/BurntSushi/toml/parse.go383
-rw-r--r--vendor/github.com/BurntSushi/toml/type_fields.go8
-rw-r--r--vendor/github.com/BurntSushi/toml/type_toml.go11
12 files changed, 577 insertions, 346 deletions
diff --git a/vendor/github.com/BurntSushi/toml/README.md b/vendor/github.com/BurntSushi/toml/README.md
index 3651cfa..639e6c3 100644
--- a/vendor/github.com/BurntSushi/toml/README.md
+++ b/vendor/github.com/BurntSushi/toml/README.md
@@ -9,7 +9,7 @@ See the [releases page](https://github.com/BurntSushi/toml/releases) for a
changelog; this information is also in the git tag annotations (e.g. `git show
v0.4.0`).
-This library requires Go 1.13 or newer; add it to your go.mod with:
+This library requires Go 1.18 or newer; add it to your go.mod with:
% go get github.com/BurntSushi/toml@latest
diff --git a/vendor/github.com/BurntSushi/toml/decode.go b/vendor/github.com/BurntSushi/toml/decode.go
index 0ca1dc4..7aaf462 100644
--- a/vendor/github.com/BurntSushi/toml/decode.go
+++ b/vendor/github.com/BurntSushi/toml/decode.go
@@ -6,7 +6,7 @@ import (
"encoding/json"
"fmt"
"io"
- "io/ioutil"
+ "io/fs"
"math"
"os"
"reflect"
@@ -18,13 +18,13 @@ import (
// Unmarshaler is the interface implemented by objects that can unmarshal a
// TOML description of themselves.
type Unmarshaler interface {
- UnmarshalTOML(interface{}) error
+ UnmarshalTOML(any) error
}
// Unmarshal decodes the contents of data in TOML format into a pointer v.
//
// See [Decoder] for a description of the decoding process.
-func Unmarshal(data []byte, v interface{}) error {
+func Unmarshal(data []byte, v any) error {
_, err := NewDecoder(bytes.NewReader(data)).Decode(v)
return err
}
@@ -32,12 +32,12 @@ func Unmarshal(data []byte, v interface{}) error {
// Decode the TOML data in to the pointer v.
//
// See [Decoder] for a description of the decoding process.
-func Decode(data string, v interface{}) (MetaData, error) {
+func Decode(data string, v any) (MetaData, error) {
return NewDecoder(strings.NewReader(data)).Decode(v)
}
// DecodeFile reads the contents of a file and decodes it with [Decode].
-func DecodeFile(path string, v interface{}) (MetaData, error) {
+func DecodeFile(path string, v any) (MetaData, error) {
fp, err := os.Open(path)
if err != nil {
return MetaData{}, err
@@ -46,6 +46,17 @@ func DecodeFile(path string, v interface{}) (MetaData, error) {
return NewDecoder(fp).Decode(v)
}
+// DecodeFS reads the contents of a file from [fs.FS] and decodes it with
+// [Decode].
+func DecodeFS(fsys fs.FS, path string, v any) (MetaData, error) {
+ fp, err := fsys.Open(path)
+ if err != nil {
+ return MetaData{}, err
+ }
+ defer fp.Close()
+ return NewDecoder(fp).Decode(v)
+}
+
// Primitive is a TOML value that hasn't been decoded into a Go value.
//
// This type can be used for any value, which will cause decoding to be delayed.
@@ -58,7 +69,7 @@ func DecodeFile(path string, v interface{}) (MetaData, error) {
// overhead of reflection. They can be useful when you don't know the exact type
// of TOML data until runtime.
type Primitive struct {
- undecoded interface{}
+ undecoded any
context Key
}
@@ -91,7 +102,7 @@ const (
// UnmarshalText method. See the Unmarshaler example for a demonstration with
// email addresses.
//
-// ### Key mapping
+// # Key mapping
//
// TOML keys can map to either keys in a Go map or field names in a Go struct.
// The special `toml` struct tag can be used to map TOML keys to struct fields
@@ -122,7 +133,7 @@ var (
)
// Decode TOML data in to the pointer `v`.
-func (dec *Decoder) Decode(v interface{}) (MetaData, error) {
+func (dec *Decoder) Decode(v any) (MetaData, error) {
rv := reflect.ValueOf(v)
if rv.Kind() != reflect.Ptr {
s := "%q"
@@ -136,8 +147,8 @@ func (dec *Decoder) Decode(v interface{}) (MetaData, error) {
return MetaData{}, fmt.Errorf("toml: cannot decode to nil value of %q", reflect.TypeOf(v))
}
- // Check if this is a supported type: struct, map, interface{}, or something
- // that implements UnmarshalTOML or UnmarshalText.
+ // Check if this is a supported type: struct, map, any, or something that
+ // implements UnmarshalTOML or UnmarshalText.
rv = indirect(rv)
rt := rv.Type()
if rv.Kind() != reflect.Struct && rv.Kind() != reflect.Map &&
@@ -148,7 +159,7 @@ func (dec *Decoder) Decode(v interface{}) (MetaData, error) {
// TODO: parser should read from io.Reader? Or at the very least, make it
// read from []byte rather than string
- data, err := ioutil.ReadAll(dec.r)
+ data, err := io.ReadAll(dec.r)
if err != nil {
return MetaData{}, err
}
@@ -179,7 +190,7 @@ func (dec *Decoder) Decode(v interface{}) (MetaData, error) {
// will only reflect keys that were decoded. Namely, any keys hidden behind a
// Primitive will be considered undecoded. Executing this method will update the
// undecoded keys in the meta data. (See the example.)
-func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
+func (md *MetaData) PrimitiveDecode(primValue Primitive, v any) error {
md.context = primValue.context
defer func() { md.context = nil }()
return md.unify(primValue.undecoded, rvalue(v))
@@ -190,7 +201,7 @@ func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
//
// Any type mismatch produces an error. Finding a type that we don't know
// how to handle produces an unsupported type error.
-func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
+func (md *MetaData) unify(data any, rv reflect.Value) error {
// Special case. Look for a `Primitive` value.
// TODO: #76 would make this superfluous after implemented.
if rv.Type() == primitiveType {
@@ -207,7 +218,11 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
rvi := rv.Interface()
if v, ok := rvi.(Unmarshaler); ok {
- return v.UnmarshalTOML(data)
+ err := v.UnmarshalTOML(data)
+ if err != nil {
+ return md.parseErr(err)
+ }
+ return nil
}
if v, ok := rvi.(encoding.TextUnmarshaler); ok {
return md.unifyText(data, v)
@@ -227,14 +242,6 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
return md.unifyInt(data, rv)
}
switch k {
- case reflect.Ptr:
- elem := reflect.New(rv.Type().Elem())
- err := md.unify(data, reflect.Indirect(elem))
- if err != nil {
- return err
- }
- rv.Set(elem)
- return nil
case reflect.Struct:
return md.unifyStruct(data, rv)
case reflect.Map:
@@ -248,7 +255,7 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
case reflect.Bool:
return md.unifyBool(data, rv)
case reflect.Interface:
- if rv.NumMethod() > 0 { // Only support empty interfaces are supported.
+ if rv.NumMethod() > 0 { /// Only empty interfaces are supported.
return md.e("unsupported type %s", rv.Type())
}
return md.unifyAnything(data, rv)
@@ -258,14 +265,13 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
return md.e("unsupported type %s", rv.Kind())
}
-func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
- tmap, ok := mapping.(map[string]interface{})
+func (md *MetaData) unifyStruct(mapping any, rv reflect.Value) error {
+ tmap, ok := mapping.(map[string]any)
if !ok {
if mapping == nil {
return nil
}
- return md.e("type mismatch for %s: expected table but found %T",
- rv.Type().String(), mapping)
+ return md.e("type mismatch for %s: expected table but found %s", rv.Type().String(), fmtType(mapping))
}
for key, datum := range tmap {
@@ -304,14 +310,14 @@ func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
return nil
}
-func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error {
+func (md *MetaData) unifyMap(mapping any, rv reflect.Value) error {
keyType := rv.Type().Key().Kind()
if keyType != reflect.String && keyType != reflect.Interface {
return fmt.Errorf("toml: cannot decode to a map with non-string key type (%s in %q)",
keyType, rv.Type())
}
- tmap, ok := mapping.(map[string]interface{})
+ tmap, ok := mapping.(map[string]any)
if !ok {
if tmap == nil {
return nil
@@ -347,7 +353,7 @@ func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error {
return nil
}
-func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error {
+func (md *MetaData) unifyArray(data any, rv reflect.Value) error {
datav := reflect.ValueOf(data)
if datav.Kind() != reflect.Slice {
if !datav.IsValid() {
@@ -361,7 +367,7 @@ func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error {
return md.unifySliceArray(datav, rv)
}
-func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error {
+func (md *MetaData) unifySlice(data any, rv reflect.Value) error {
datav := reflect.ValueOf(data)
if datav.Kind() != reflect.Slice {
if !datav.IsValid() {
@@ -388,7 +394,7 @@ func (md *MetaData) unifySliceArray(data, rv reflect.Value) error {
return nil
}
-func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error {
+func (md *MetaData) unifyString(data any, rv reflect.Value) error {
_, ok := rv.Interface().(json.Number)
if ok {
if i, ok := data.(int64); ok {
@@ -408,7 +414,7 @@ func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error {
return md.badtype("string", data)
}
-func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error {
+func (md *MetaData) unifyFloat64(data any, rv reflect.Value) error {
rvk := rv.Kind()
if num, ok := data.(float64); ok {
@@ -429,7 +435,7 @@ func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error {
if num, ok := data.(int64); ok {
if (rvk == reflect.Float32 && (num < -maxSafeFloat32Int || num > maxSafeFloat32Int)) ||
(rvk == reflect.Float64 && (num < -maxSafeFloat64Int || num > maxSafeFloat64Int)) {
- return md.parseErr(errParseRange{i: num, size: rvk.String()})
+ return md.parseErr(errUnsafeFloat{i: num, size: rvk.String()})
}
rv.SetFloat(float64(num))
return nil
@@ -438,7 +444,7 @@ func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error {
return md.badtype("float", data)
}
-func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error {
+func (md *MetaData) unifyInt(data any, rv reflect.Value) error {
_, ok := rv.Interface().(time.Duration)
if ok {
// Parse as string duration, and fall back to regular integer parsing
@@ -481,7 +487,7 @@ func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error {
return nil
}
-func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error {
+func (md *MetaData) unifyBool(data any, rv reflect.Value) error {
if b, ok := data.(bool); ok {
rv.SetBool(b)
return nil
@@ -489,12 +495,12 @@ func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error {
return md.badtype("boolean", data)
}
-func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error {
+func (md *MetaData) unifyAnything(data any, rv reflect.Value) error {
rv.Set(reflect.ValueOf(data))
return nil
}
-func (md *MetaData) unifyText(data interface{}, v encoding.TextUnmarshaler) error {
+func (md *MetaData) unifyText(data any, v encoding.TextUnmarshaler) error {
var s string
switch sdata := data.(type) {
case Marshaler:
@@ -523,13 +529,13 @@ func (md *MetaData) unifyText(data interface{}, v encoding.TextUnmarshaler) erro
return md.badtype("primitive (string-like)", data)
}
if err := v.UnmarshalText([]byte(s)); err != nil {
- return err
+ return md.parseErr(err)
}
return nil
}
-func (md *MetaData) badtype(dst string, data interface{}) error {
- return md.e("incompatible types: TOML value has type %T; destination has type %s", data, dst)
+func (md *MetaData) badtype(dst string, data any) error {
+ return md.e("incompatible types: TOML value has type %s; destination has type %s", fmtType(data), dst)
}
func (md *MetaData) parseErr(err error) error {
@@ -543,7 +549,7 @@ func (md *MetaData) parseErr(err error) error {
}
}
-func (md *MetaData) e(format string, args ...interface{}) error {
+func (md *MetaData) e(format string, args ...any) error {
f := "toml: "
if len(md.context) > 0 {
f = fmt.Sprintf("toml: (last key %q): ", md.context)
@@ -556,7 +562,7 @@ func (md *MetaData) e(format string, args ...interface{}) error {
}
// rvalue returns a reflect.Value of `v`. All pointers are resolved.
-func rvalue(v interface{}) reflect.Value {
+func rvalue(v any) reflect.Value {
return indirect(reflect.ValueOf(v))
}
@@ -600,3 +606,8 @@ func isUnifiable(rv reflect.Value) bool {
}
return false
}
+
+// fmt %T with "interface {}" replaced with "any", which is far more readable.
+func fmtType(t any) string {
+ return strings.ReplaceAll(fmt.Sprintf("%T", t), "interface {}", "any")
+}
diff --git a/vendor/github.com/BurntSushi/toml/decode_go116.go b/vendor/github.com/BurntSushi/toml/decode_go116.go
deleted file mode 100644
index 086d0b6..0000000
--- a/vendor/github.com/BurntSushi/toml/decode_go116.go
+++ /dev/null
@@ -1,19 +0,0 @@
-//go:build go1.16
-// +build go1.16
-
-package toml
-
-import (
- "io/fs"
-)
-
-// DecodeFS reads the contents of a file from [fs.FS] and decodes it with
-// [Decode].
-func DecodeFS(fsys fs.FS, path string, v interface{}) (MetaData, error) {
- fp, err := fsys.Open(path)
- if err != nil {
- return MetaData{}, err
- }
- defer fp.Close()
- return NewDecoder(fp).Decode(v)
-}
diff --git a/vendor/github.com/BurntSushi/toml/deprecated.go b/vendor/github.com/BurntSushi/toml/deprecated.go
index c6af3f2..155709a 100644
--- a/vendor/github.com/BurntSushi/toml/deprecated.go
+++ b/vendor/github.com/BurntSushi/toml/deprecated.go
@@ -5,17 +5,25 @@ import (
"io"
)
+// TextMarshaler is an alias for encoding.TextMarshaler.
+//
// Deprecated: use encoding.TextMarshaler
type TextMarshaler encoding.TextMarshaler
+// TextUnmarshaler is an alias for encoding.TextUnmarshaler.
+//
// Deprecated: use encoding.TextUnmarshaler
type TextUnmarshaler encoding.TextUnmarshaler
+// DecodeReader is an alias for NewDecoder(r).Decode(v).
+//
+// Deprecated: use NewDecoder(reader).Decode(&value).
+func DecodeReader(r io.Reader, v any) (MetaData, error) { return NewDecoder(r).Decode(v) }
+
+// PrimitiveDecode is an alias for MetaData.PrimitiveDecode().
+//
// Deprecated: use MetaData.PrimitiveDecode.
-func PrimitiveDecode(primValue Primitive, v interface{}) error {
+func PrimitiveDecode(primValue Primitive, v any) error {
md := MetaData{decoded: make(map[string]struct{})}
return md.unify(primValue.undecoded, rvalue(v))
}
-
-// Deprecated: use NewDecoder(reader).Decode(&value).
-func DecodeReader(r io.Reader, v interface{}) (MetaData, error) { return NewDecoder(r).Decode(v) }
diff --git a/vendor/github.com/BurntSushi/toml/doc.go b/vendor/github.com/BurntSushi/toml/doc.go
index 81a7c0f..82c90a9 100644
--- a/vendor/github.com/BurntSushi/toml/doc.go
+++ b/vendor/github.com/BurntSushi/toml/doc.go
@@ -2,9 +2,6 @@
//
// This package supports TOML v1.0.0, as specified at https://toml.io
//
-// There is also support for delaying decoding with the Primitive type, and
-// querying the set of keys in a TOML document with the MetaData type.
-//
// The github.com/BurntSushi/toml/cmd/tomlv package implements a TOML validator,
// and can be used to verify if TOML document is valid. It can also be used to
// print the type of each key.
diff --git a/vendor/github.com/BurntSushi/toml/encode.go b/vendor/github.com/BurntSushi/toml/encode.go
index 930e1d5..73366c0 100644
--- a/vendor/github.com/BurntSushi/toml/encode.go
+++ b/vendor/github.com/BurntSushi/toml/encode.go
@@ -2,6 +2,7 @@ package toml
import (
"bufio"
+ "bytes"
"encoding"
"encoding/json"
"errors"
@@ -76,6 +77,17 @@ type Marshaler interface {
MarshalTOML() ([]byte, error)
}
+// Marshal returns a TOML representation of the Go value.
+//
+// See [Encoder] for a description of the encoding process.
+func Marshal(v any) ([]byte, error) {
+ buff := new(bytes.Buffer)
+ if err := NewEncoder(buff).Encode(v); err != nil {
+ return nil, err
+ }
+ return buff.Bytes(), nil
+}
+
// Encoder encodes a Go to a TOML document.
//
// The mapping between Go values and TOML values should be precisely the same as
@@ -115,28 +127,24 @@ type Marshaler interface {
// NOTE: only exported keys are encoded due to the use of reflection. Unexported
// keys are silently discarded.
type Encoder struct {
- // String to use for a single indentation level; default is two spaces.
- Indent string
-
+ Indent string // string for a single indentation level; default is two spaces.
+ hasWritten bool // written any output to w yet?
w *bufio.Writer
- hasWritten bool // written any output to w yet?
}
// NewEncoder create a new Encoder.
func NewEncoder(w io.Writer) *Encoder {
- return &Encoder{
- w: bufio.NewWriter(w),
- Indent: " ",
- }
+ return &Encoder{w: bufio.NewWriter(w), Indent: " "}
}
// Encode writes a TOML representation of the Go value to the [Encoder]'s writer.
//
// An error is returned if the value given cannot be encoded to a valid TOML
// document.
-func (enc *Encoder) Encode(v interface{}) error {
+func (enc *Encoder) Encode(v any) error {
rv := eindirect(reflect.ValueOf(v))
- if err := enc.safeEncode(Key([]string{}), rv); err != nil {
+ err := enc.safeEncode(Key([]string{}), rv)
+ if err != nil {
return err
}
return enc.w.Flush()
@@ -279,18 +287,30 @@ func (enc *Encoder) eElement(rv reflect.Value) {
case reflect.Float32:
f := rv.Float()
if math.IsNaN(f) {
+ if math.Signbit(f) {
+ enc.wf("-")
+ }
enc.wf("nan")
} else if math.IsInf(f, 0) {
- enc.wf("%cinf", map[bool]byte{true: '-', false: '+'}[math.Signbit(f)])
+ if math.Signbit(f) {
+ enc.wf("-")
+ }
+ enc.wf("inf")
} else {
enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 32)))
}
case reflect.Float64:
f := rv.Float()
if math.IsNaN(f) {
+ if math.Signbit(f) {
+ enc.wf("-")
+ }
enc.wf("nan")
} else if math.IsInf(f, 0) {
- enc.wf("%cinf", map[bool]byte{true: '-', false: '+'}[math.Signbit(f)])
+ if math.Signbit(f) {
+ enc.wf("-")
+ }
+ enc.wf("inf")
} else {
enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 64)))
}
@@ -303,7 +323,7 @@ func (enc *Encoder) eElement(rv reflect.Value) {
case reflect.Interface:
enc.eElement(rv.Elem())
default:
- encPanic(fmt.Errorf("unexpected type: %T", rv.Interface()))
+ encPanic(fmt.Errorf("unexpected type: %s", fmtType(rv.Interface())))
}
}
@@ -457,6 +477,16 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
frv := eindirect(rv.Field(i))
+ if is32Bit {
+ // Copy so it works correct on 32bit archs; not clear why this
+ // is needed. See #314, and https://www.reddit.com/r/golang/comments/pnx8v4
+ // This also works fine on 64bit, but 32bit archs are somewhat
+ // rare and this is a wee bit faster.
+ copyStart := make([]int, len(start))
+ copy(copyStart, start)
+ start = copyStart
+ }
+
// Treat anonymous struct fields with tag names as though they are
// not anonymous, like encoding/json does.
//
@@ -471,17 +501,7 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
if typeIsTable(tomlTypeOfGo(frv)) {
fieldsSub = append(fieldsSub, append(start, f.Index...))
} else {
- // Copy so it works correct on 32bit archs; not clear why this
- // is needed. See #314, and https://www.reddit.com/r/golang/comments/pnx8v4
- // This also works fine on 64bit, but 32bit archs are somewhat
- // rare and this is a wee bit faster.
- if is32Bit {
- copyStart := make([]int, len(start))
- copy(copyStart, start)
- fieldsDirect = append(fieldsDirect, append(copyStart, f.Index...))
- } else {
- fieldsDirect = append(fieldsDirect, append(start, f.Index...))
- }
+ fieldsDirect = append(fieldsDirect, append(start, f.Index...))
}
}
}
@@ -490,24 +510,27 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
writeFields := func(fields [][]int) {
for _, fieldIndex := range fields {
fieldType := rt.FieldByIndex(fieldIndex)
- fieldVal := eindirect(rv.FieldByIndex(fieldIndex))
+ fieldVal := rv.FieldByIndex(fieldIndex)
- if isNil(fieldVal) { /// Don't write anything for nil fields.
+ opts := getOptions(fieldType.Tag)
+ if opts.skip {
+ continue
+ }
+ if opts.omitempty && isEmpty(fieldVal) {
continue
}
- opts := getOptions(fieldType.Tag)
- if opts.skip {
+ fieldVal = eindirect(fieldVal)
+
+ if isNil(fieldVal) { /// Don't write anything for nil fields.
continue
}
+
keyName := fieldType.Name
if opts.name != "" {
keyName = opts.name
}
- if opts.omitempty && enc.isEmpty(fieldVal) {
- continue
- }
if opts.omitzero && isZero(fieldVal) {
continue
}
@@ -649,7 +672,7 @@ func isZero(rv reflect.Value) bool {
return false
}
-func (enc *Encoder) isEmpty(rv reflect.Value) bool {
+func isEmpty(rv reflect.Value) bool {
switch rv.Kind() {
case reflect.Array, reflect.Slice, reflect.Map, reflect.String:
return rv.Len() == 0
@@ -664,13 +687,15 @@ func (enc *Encoder) isEmpty(rv reflect.Value) bool {
// type b struct{ s []string }
// s := a{field: b{s: []string{"AAA"}}}
for i := 0; i < rv.NumField(); i++ {
- if !enc.isEmpty(rv.Field(i)) {
+ if !isEmpty(rv.Field(i)) {
return false
}
}
return true
case reflect.Bool:
return !rv.Bool()
+ case reflect.Ptr:
+ return rv.IsNil()
}
return false
}
@@ -693,8 +718,11 @@ func (enc *Encoder) newline() {
// v v v v vv
// key = {k = 1, k2 = 2}
func (enc *Encoder) writeKeyValue(key Key, val reflect.Value, inline bool) {
+ /// Marshaler used on top-level document; call eElement() to just call
+ /// Marshal{TOML,Text}.
if len(key) == 0 {
- encPanic(errNoKey)
+ enc.eElement(val)
+ return
}
enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1))
enc.eElement(val)
@@ -703,7 +731,7 @@ func (enc *Encoder) writeKeyValue(key Key, val reflect.Value, inline bool) {
}
}
-func (enc *Encoder) wf(format string, v ...interface{}) {
+func (enc *Encoder) wf(format string, v ...any) {
_, err := fmt.Fprintf(enc.w, format, v...)
if err != nil {
encPanic(err)
diff --git a/vendor/github.com/BurntSushi/toml/error.go b/vendor/github.com/BurntSushi/toml/error.go
index f4f390e..b45a3f4 100644
--- a/vendor/github.com/BurntSushi/toml/error.go
+++ b/vendor/github.com/BurntSushi/toml/error.go
@@ -84,7 +84,7 @@ func (pe ParseError) Error() string {
pe.Position.Line, pe.LastKey, msg)
}
-// ErrorWithUsage() returns the error with detailed location context.
+// ErrorWithPosition returns the error with detailed location context.
//
// See the documentation on [ParseError].
func (pe ParseError) ErrorWithPosition() string {
@@ -114,17 +114,26 @@ func (pe ParseError) ErrorWithPosition() string {
msg, pe.Position.Line, col, col+pe.Position.Len)
}
if pe.Position.Line > 2 {
- fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-2, lines[pe.Position.Line-3])
+ fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-2, expandTab(lines[pe.Position.Line-3]))
}
if pe.Position.Line > 1 {
- fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-1, lines[pe.Position.Line-2])
+ fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-1, expandTab(lines[pe.Position.Line-2]))
}
- fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line, lines[pe.Position.Line-1])
- fmt.Fprintf(b, "% 10s%s%s\n", "", strings.Repeat(" ", col), strings.Repeat("^", pe.Position.Len))
+
+ /// Expand tabs, so that the ^^^s are at the correct position, but leave
+ /// "column 10-13" intact. Adjusting this to the visual column would be
+ /// better, but we don't know the tabsize of the user in their editor, which
+ /// can be 8, 4, 2, or something else. We can't know. So leaving it as the
+ /// character index is probably the "most correct".
+ expanded := expandTab(lines[pe.Position.Line-1])
+ diff := len(expanded) - len(lines[pe.Position.Line-1])
+
+ fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line, expanded)
+ fmt.Fprintf(b, "% 10s%s%s\n", "", strings.Repeat(" ", col+diff), strings.Repeat("^", pe.Position.Len))
return b.String()
}
-// ErrorWithUsage() returns the error with detailed location context and usage
+// ErrorWithUsage returns the error with detailed location context and usage
// guidance.
//
// See the documentation on [ParseError].
@@ -159,17 +168,47 @@ func (pe ParseError) column(lines []string) int {
return col
}
+func expandTab(s string) string {
+ var (
+ b strings.Builder
+ l int
+ fill = func(n int) string {
+ b := make([]byte, n)
+ for i := range b {
+ b[i] = ' '
+ }
+ return string(b)
+ }
+ )
+ b.Grow(len(s))
+ for _, r := range s {
+ switch r {
+ case '\t':
+ tw := 8 - l%8
+ b.WriteString(fill(tw))
+ l += tw
+ default:
+ b.WriteRune(r)
+ l += 1
+ }
+ }
+ return b.String()
+}
+
type (
errLexControl struct{ r rune }
errLexEscape struct{ r rune }
errLexUTF8 struct{ b byte }
- errLexInvalidNum struct{ v string }
- errLexInvalidDate struct{ v string }
+ errParseDate struct{ v string }
errLexInlineTableNL struct{}
errLexStringNL struct{}
errParseRange struct {
- i interface{} // int or float
- size string // "int64", "uint16", etc.
+ i any // int or float
+ size string // "int64", "uint16", etc.
+ }
+ errUnsafeFloat struct {
+ i interface{} // float32 or float64
+ size string // "float32" or "float64"
}
errParseDuration struct{ d string }
)
@@ -183,18 +222,20 @@ func (e errLexEscape) Error() string { return fmt.Sprintf(`invalid escape
func (e errLexEscape) Usage() string { return usageEscape }
func (e errLexUTF8) Error() string { return fmt.Sprintf("invalid UTF-8 byte: 0x%02x", e.b) }
func (e errLexUTF8) Usage() string { return "" }
-func (e errLexInvalidNum) Error() string { return fmt.Sprintf("invalid number: %q", e.v) }
-func (e errLexInvalidNum) Usage() string { return "" }
-func (e errLexInvalidDate) Error() string { return fmt.Sprintf("invalid date: %q", e.v) }
-func (e errLexInvalidDate) Usage() string { return "" }
+func (e errParseDate) Error() string { return fmt.Sprintf("invalid datetime: %q", e.v) }
+func (e errParseDate) Usage() string { return usageDate }
func (e errLexInlineTableNL) Error() string { return "newlines not allowed within inline tables" }
func (e errLexInlineTableNL) Usage() string { return usageInlineNewline }
func (e errLexStringNL) Error() string { return "strings cannot contain newlines" }
func (e errLexStringNL) Usage() string { return usageStringNewline }
func (e errParseRange) Error() string { return fmt.Sprintf("%v is out of range for %s", e.i, e.size) }
func (e errParseRange) Usage() string { return usageIntOverflow }
-func (e errParseDuration) Error() string { return fmt.Sprintf("invalid duration: %q", e.d) }
-func (e errParseDuration) Usage() string { return usageDuration }
+func (e errUnsafeFloat) Error() string {
+ return fmt.Sprintf("%v is out of the safe %s range", e.i, e.size)
+}
+func (e errUnsafeFloat) Usage() string { return usageUnsafeFloat }
+func (e errParseDuration) Error() string { return fmt.Sprintf("invalid duration: %q", e.d) }
+func (e errParseDuration) Usage() string { return usageDuration }
const usageEscape = `
A '\' inside a "-delimited string is interpreted as an escape character.
@@ -251,19 +292,35 @@ bug in the program that uses too small of an integer.
The maximum and minimum values are:
size │ lowest │ highest
- ───────┼────────────────┼──────────
+ ───────┼────────────────┼──────────────
int8 │ -128 │ 127
int16 │ -32,768 │ 32,767
int32 │ -2,147,483,648 │ 2,147,483,647
int64 │ -9.2 × 10¹⁷ │ 9.2 × 10¹⁷
uint8 │ 0 │ 255
- uint16 │ 0 │ 65535
- uint32 │ 0 │ 4294967295
+ uint16 │ 0 │ 65,535
+ uint32 │ 0 │ 4,294,967,295
uint64 │ 0 │ 1.8 × 10¹⁸
int refers to int32 on 32-bit systems and int64 on 64-bit systems.
`
+const usageUnsafeFloat = `
+This number is outside of the "safe" range for floating point numbers; whole
+(non-fractional) numbers outside the below range can not always be represented
+accurately in a float, leading to some loss of accuracy.
+
+Explicitly mark a number as a fractional unit by adding ".0", which will incur
+some loss of accuracy; for example:
+
+ f = 2_000_000_000.0
+
+Accuracy ranges:
+
+ float32 = 16,777,215
+ float64 = 9,007,199,254,740,991
+`
+
const usageDuration = `
A duration must be as "number<unit>", without any spaces. Valid units are:
@@ -277,3 +334,23 @@ A duration must be as "number<unit>", without any spaces. Valid units are:
You can combine multiple units; for example "5m10s" for 5 minutes and 10
seconds.
`
+
+const usageDate = `
+A TOML datetime must be in one of the following formats:
+
+ 2006-01-02T15:04:05Z07:00 Date and time, with timezone.
+ 2006-01-02T15:04:05 Date and time, but without timezone.
+ 2006-01-02 Date without a time or timezone.
+ 15:04:05 Just a time, without any timezone.
+
+Seconds may optionally have a fraction, up to nanosecond precision:
+
+ 15:04:05.123
+ 15:04:05.856018510
+`
+
+// TOML 1.1:
+// The seconds part in times is optional, and may be omitted:
+// 2006-01-02T15:04Z07:00
+// 2006-01-02T15:04
+// 15:04
diff --git a/vendor/github.com/BurntSushi/toml/lex.go b/vendor/github.com/BurntSushi/toml/lex.go
index d4d7087..a1016d9 100644
--- a/vendor/github.com/BurntSushi/toml/lex.go
+++ b/vendor/github.com/BurntSushi/toml/lex.go
@@ -17,6 +17,7 @@ const (
itemEOF
itemText
itemString
+ itemStringEsc
itemRawString
itemMultilineString
itemRawMultilineString
@@ -46,12 +47,14 @@ func (p Position) String() string {
}
type lexer struct {
- input string
- start int
- pos int
- line int
- state stateFn
- items chan item
+ input string
+ start int
+ pos int
+ line int
+ state stateFn
+ items chan item
+ tomlNext bool
+ esc bool
// Allow for backing up up to 4 runes. This is necessary because TOML
// contains 3-rune tokens (""" and ''').
@@ -87,13 +90,14 @@ func (lx *lexer) nextItem() item {
}
}
-func lex(input string) *lexer {
+func lex(input string, tomlNext bool) *lexer {
lx := &lexer{
- input: input,
- state: lexTop,
- items: make(chan item, 10),
- stack: make([]stateFn, 0, 10),
- line: 1,
+ input: input,
+ state: lexTop,
+ items: make(chan item, 10),
+ stack: make([]stateFn, 0, 10),
+ line: 1,
+ tomlNext: tomlNext,
}
return lx
}
@@ -162,7 +166,7 @@ func (lx *lexer) next() (r rune) {
}
r, w := utf8.DecodeRuneInString(lx.input[lx.pos:])
- if r == utf8.RuneError {
+ if r == utf8.RuneError && w == 1 {
lx.error(errLexUTF8{lx.input[lx.pos]})
return utf8.RuneError
}
@@ -268,7 +272,7 @@ func (lx *lexer) errorPos(start, length int, err error) stateFn {
}
// errorf is like error, and creates a new error.
-func (lx *lexer) errorf(format string, values ...interface{}) stateFn {
+func (lx *lexer) errorf(format string, values ...any) stateFn {
if lx.atEOF {
pos := lx.getPos()
pos.Line--
@@ -331,9 +335,7 @@ func lexTopEnd(lx *lexer) stateFn {
lx.emit(itemEOF)
return nil
}
- return lx.errorf(
- "expected a top-level item to end with a newline, comment, or EOF, but got %q instead",
- r)
+ return lx.errorf("expected a top-level item to end with a newline, comment, or EOF, but got %q instead", r)
}
// lexTable lexes the beginning of a table. Namely, it makes sure that
@@ -408,7 +410,7 @@ func lexTableNameEnd(lx *lexer) stateFn {
// Lexes only one part, e.g. only 'a' inside 'a.b'.
func lexBareName(lx *lexer) stateFn {
r := lx.next()
- if isBareKeyChar(r) {
+ if isBareKeyChar(r, lx.tomlNext) {
return lexBareName
}
lx.backup()
@@ -618,6 +620,9 @@ func lexInlineTableValue(lx *lexer) stateFn {
case isWhitespace(r):
return lexSkip(lx, lexInlineTableValue)
case isNL(r):
+ if lx.tomlNext {
+ return lexSkip(lx, lexInlineTableValue)
+ }
return lx.errorPrevLine(errLexInlineTableNL{})
case r == '#':
lx.push(lexInlineTableValue)
@@ -640,6 +645,9 @@ func lexInlineTableValueEnd(lx *lexer) stateFn {
case isWhitespace(r):
return lexSkip(lx, lexInlineTableValueEnd)
case isNL(r):
+ if lx.tomlNext {
+ return lexSkip(lx, lexInlineTableValueEnd)
+ }
return lx.errorPrevLine(errLexInlineTableNL{})
case r == '#':
lx.push(lexInlineTableValueEnd)
@@ -648,6 +656,9 @@ func lexInlineTableValueEnd(lx *lexer) stateFn {
lx.ignore()
lx.skip(isWhitespace)
if lx.peek() == '}' {
+ if lx.tomlNext {
+ return lexInlineTableValueEnd
+ }
return lx.errorf("trailing comma not allowed in inline tables")
}
return lexInlineTableValue
@@ -687,7 +698,12 @@ func lexString(lx *lexer) stateFn {
return lexStringEscape
case r == '"':
lx.backup()
- lx.emit(itemString)
+ if lx.esc {
+ lx.esc = false
+ lx.emit(itemStringEsc)
+ } else {
+ lx.emit(itemString)
+ }
lx.next()
lx.ignore()
return lx.pop()
@@ -737,6 +753,7 @@ func lexMultilineString(lx *lexer) stateFn {
lx.backup() /// backup: don't include the """ in the item.
lx.backup()
lx.backup()
+ lx.esc = false
lx.emit(itemMultilineString)
lx.next() /// Read over ''' again and discard it.
lx.next()
@@ -770,8 +787,8 @@ func lexRawString(lx *lexer) stateFn {
}
}
-// lexMultilineRawString consumes a raw string. Nothing can be escaped in such
-// a string. It assumes that the beginning ''' has already been consumed and
+// lexMultilineRawString consumes a raw string. Nothing can be escaped in such a
+// string. It assumes that the beginning triple-' has already been consumed and
// ignored.
func lexMultilineRawString(lx *lexer) stateFn {
r := lx.next()
@@ -826,8 +843,14 @@ func lexMultilineStringEscape(lx *lexer) stateFn {
}
func lexStringEscape(lx *lexer) stateFn {
+ lx.esc = true
r := lx.next()
switch r {
+ case 'e':
+ if !lx.tomlNext {
+ return lx.error(errLexEscape{r})
+ }
+ fallthrough
case 'b':
fallthrough
case 't':
@@ -846,6 +869,11 @@ func lexStringEscape(lx *lexer) stateFn {
fallthrough
case '\\':
return lx.pop()
+ case 'x':
+ if !lx.tomlNext {
+ return lx.error(errLexEscape{r})
+ }
+ return lexHexEscape
case 'u':
return lexShortUnicodeEscape
case 'U':
@@ -854,14 +882,23 @@ func lexStringEscape(lx *lexer) stateFn {
return lx.error(errLexEscape{r})
}
+func lexHexEscape(lx *lexer) stateFn {
+ var r rune
+ for i := 0; i < 2; i++ {
+ r = lx.next()
+ if !isHex(r) {
+ return lx.errorf(`expected two hexadecimal digits after '\x', but got %q instead`, lx.current())
+ }
+ }
+ return lx.pop()
+}
+
func lexShortUnicodeEscape(lx *lexer) stateFn {
var r rune
for i := 0; i < 4; i++ {
r = lx.next()
- if !isHexadecimal(r) {
- return lx.errorf(
- `expected four hexadecimal digits after '\u', but got %q instead`,
- lx.current())
+ if !isHex(r) {
+ return lx.errorf(`expected four hexadecimal digits after '\u', but got %q instead`, lx.current())
}
}
return lx.pop()
@@ -871,10 +908,8 @@ func lexLongUnicodeEscape(lx *lexer) stateFn {
var r rune
for i := 0; i < 8; i++ {
r = lx.next()
- if !isHexadecimal(r) {
- return lx.errorf(
- `expected eight hexadecimal digits after '\U', but got %q instead`,
- lx.current())
+ if !isHex(r) {
+ return lx.errorf(`expected eight hexadecimal digits after '\U', but got %q instead`, lx.current())
}
}
return lx.pop()
@@ -941,7 +976,7 @@ func lexDatetime(lx *lexer) stateFn {
// lexHexInteger consumes a hexadecimal integer after seeing the '0x' prefix.
func lexHexInteger(lx *lexer) stateFn {
r := lx.next()
- if isHexadecimal(r) {
+ if isHex(r) {
return lexHexInteger
}
switch r {
@@ -1075,7 +1110,7 @@ func lexBaseNumberOrDate(lx *lexer) stateFn {
return lexOctalInteger
case 'x':
r = lx.peek()
- if !isHexadecimal(r) {
+ if !isHex(r) {
lx.errorf("not a hexidecimal number: '%s%c'", lx.current(), r)
}
return lexHexInteger
@@ -1173,7 +1208,7 @@ func (itype itemType) String() string {
return "EOF"
case itemText:
return "Text"
- case itemString, itemRawString, itemMultilineString, itemRawMultilineString:
+ case itemString, itemStringEsc, itemRawString, itemMultilineString, itemRawMultilineString:
return "String"
case itemBool:
return "Bool"
@@ -1206,7 +1241,7 @@ func (itype itemType) String() string {
}
func (item item) String() string {
- return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val)
+ return fmt.Sprintf("(%s, %s)", item.typ, item.val)
}
func isWhitespace(r rune) bool { return r == '\t' || r == ' ' }
@@ -1222,10 +1257,23 @@ func isControl(r rune) bool { // Control characters except \t, \r, \n
func isDigit(r rune) bool { return r >= '0' && r <= '9' }
func isBinary(r rune) bool { return r == '0' || r == '1' }
func isOctal(r rune) bool { return r >= '0' && r <= '7' }
-func isHexadecimal(r rune) bool {
- return (r >= '0' && r <= '9') || (r >= 'a' && r <= 'f') || (r >= 'A' && r <= 'F')
-}
-func isBareKeyChar(r rune) bool {
+func isHex(r rune) bool { return (r >= '0' && r <= '9') || (r|0x20 >= 'a' && r|0x20 <= 'f') }
+func isBareKeyChar(r rune, tomlNext bool) bool {
+ if tomlNext {
+ return (r >= 'A' && r <= 'Z') ||
+ (r >= 'a' && r <= 'z') ||
+ (r >= '0' && r <= '9') ||
+ r == '_' || r == '-' ||
+ r == 0xb2 || r == 0xb3 || r == 0xb9 || (r >= 0xbc && r <= 0xbe) ||
+ (r >= 0xc0 && r <= 0xd6) || (r >= 0xd8 && r <= 0xf6) || (r >= 0xf8 && r <= 0x037d) ||
+ (r >= 0x037f && r <= 0x1fff) ||
+ (r >= 0x200c && r <= 0x200d) || (r >= 0x203f && r <= 0x2040) ||
+ (r >= 0x2070 && r <= 0x218f) || (r >= 0x2460 && r <= 0x24ff) ||
+ (r >= 0x2c00 && r <= 0x2fef) || (r >= 0x3001 && r <= 0xd7ff) ||
+ (r >= 0xf900 && r <= 0xfdcf) || (r >= 0xfdf0 && r <= 0xfffd) ||
+ (r >= 0x10000 && r <= 0xeffff)
+ }
+
return (r >= 'A' && r <= 'Z') ||
(r >= 'a' && r <= 'z') ||
(r >= '0' && r <= '9') ||
diff --git a/vendor/github.com/BurntSushi/toml/meta.go b/vendor/github.com/BurntSushi/toml/meta.go
index 71847a0..e614537 100644
--- a/vendor/github.com/BurntSushi/toml/meta.go
+++ b/vendor/github.com/BurntSushi/toml/meta.go
@@ -13,7 +13,7 @@ type MetaData struct {
context Key // Used only during decoding.
keyInfo map[string]keyInfo
- mapping map[string]interface{}
+ mapping map[string]any
keys []Key
decoded map[string]struct{}
data []byte // Input file; for errors.
@@ -31,12 +31,12 @@ func (md *MetaData) IsDefined(key ...string) bool {
}
var (
- hash map[string]interface{}
+ hash map[string]any
ok bool
- hashOrVal interface{} = md.mapping
+ hashOrVal any = md.mapping
)
for _, k := range key {
- if hash, ok = hashOrVal.(map[string]interface{}); !ok {
+ if hash, ok = hashOrVal.(map[string]any); !ok {
return false
}
if hashOrVal, ok = hash[k]; !ok {
@@ -94,28 +94,55 @@ func (md *MetaData) Undecoded() []Key {
type Key []string
func (k Key) String() string {
- ss := make([]string, len(k))
- for i := range k {
- ss[i] = k.maybeQuoted(i)
+ // This is called quite often, so it's a bit funky to make it faster.
+ var b strings.Builder
+ b.Grow(len(k) * 25)
+outer:
+ for i, kk := range k {
+ if i > 0 {
+ b.WriteByte('.')
+ }
+ if kk == "" {
+ b.WriteString(`""`)
+ } else {
+ for _, r := range kk {
+ // "Inline" isBareKeyChar
+ if !((r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z') || (r >= '0' && r <= '9') || r == '_' || r == '-') {
+ b.WriteByte('"')
+ b.WriteString(dblQuotedReplacer.Replace(kk))
+ b.WriteByte('"')
+ continue outer
+ }
+ }
+ b.WriteString(kk)
+ }
}
- return strings.Join(ss, ".")
+ return b.String()
}
func (k Key) maybeQuoted(i int) string {
if k[i] == "" {
return `""`
}
- for _, c := range k[i] {
- if !isBareKeyChar(c) {
- return `"` + dblQuotedReplacer.Replace(k[i]) + `"`
+ for _, r := range k[i] {
+ if (r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z') || (r >= '0' && r <= '9') || r == '_' || r == '-' {
+ continue
}
+ return `"` + dblQuotedReplacer.Replace(k[i]) + `"`
}
return k[i]
}
+// Like append(), but only increase the cap by 1.
func (k Key) add(piece string) Key {
+ if cap(k) > len(k) {
+ return append(k, piece)
+ }
newKey := make(Key, len(k)+1)
copy(newKey, k)
newKey[len(k)] = piece
return newKey
}
+
+func (k Key) parent() Key { return k[:len(k)-1] } // all except the last piece.
+func (k Key) last() string { return k[len(k)-1] } // last piece of this key.
diff --git a/vendor/github.com/BurntSushi/toml/parse.go b/vendor/github.com/BurntSushi/toml/parse.go
index d2542d6..11ac310 100644
--- a/vendor/github.com/BurntSushi/toml/parse.go
+++ b/vendor/github.com/BurntSushi/toml/parse.go
@@ -2,6 +2,8 @@ package toml
import (
"fmt"
+ "math"
+ "os"
"strconv"
"strings"
"time"
@@ -15,12 +17,13 @@ type parser struct {
context Key // Full key for the current hash in scope.
currentKey string // Base key name for everything except hashes.
pos Position // Current position in the TOML file.
+ tomlNext bool
ordered []Key // List of keys in the order that they appear in the TOML data.
- keyInfo map[string]keyInfo // Map keyname → info about the TOML key.
- mapping map[string]interface{} // Map keyname → key value.
- implicits map[string]struct{} // Record implicit keys (e.g. "key.group.names").
+ keyInfo map[string]keyInfo // Map keyname → info about the TOML key.
+ mapping map[string]any // Map keyname → key value.
+ implicits map[string]struct{} // Record implicit keys (e.g. "key.group.names").
}
type keyInfo struct {
@@ -29,6 +32,8 @@ type keyInfo struct {
}
func parse(data string) (p *parser, err error) {
+ _, tomlNext := os.LookupEnv("BURNTSUSHI_TOML_110")
+
defer func() {
if r := recover(); r != nil {
if pErr, ok := r.(ParseError); ok {
@@ -41,9 +46,13 @@ func parse(data string) (p *parser, err error) {
}()
// Read over BOM; do this here as the lexer calls utf8.DecodeRuneInString()
- // which mangles stuff.
- if strings.HasPrefix(data, "\xff\xfe") || strings.HasPrefix(data, "\xfe\xff") {
+ // which mangles stuff. UTF-16 BOM isn't strictly valid, but some tools add
+ // it anyway.
+ if strings.HasPrefix(data, "\xff\xfe") || strings.HasPrefix(data, "\xfe\xff") { // UTF-16
data = data[2:]
+ //lint:ignore S1017 https://github.com/dominikh/go-tools/issues/1447
+ } else if strings.HasPrefix(data, "\xef\xbb\xbf") { // UTF-8
+ data = data[3:]
}
// Examine first few bytes for NULL bytes; this probably means it's a UTF-16
@@ -64,10 +73,11 @@ func parse(data string) (p *parser, err error) {
p = &parser{
keyInfo: make(map[string]keyInfo),
- mapping: make(map[string]interface{}),
- lx: lex(data),
+ mapping: make(map[string]any),
+ lx: lex(data, tomlNext),
ordered: make([]Key, 0),
implicits: make(map[string]struct{}),
+ tomlNext: tomlNext,
}
for {
item := p.next()
@@ -89,7 +99,7 @@ func (p *parser) panicErr(it item, err error) {
})
}
-func (p *parser) panicItemf(it item, format string, v ...interface{}) {
+func (p *parser) panicItemf(it item, format string, v ...any) {
panic(ParseError{
Message: fmt.Sprintf(format, v...),
Position: it.pos,
@@ -98,7 +108,7 @@ func (p *parser) panicItemf(it item, format string, v ...interface{}) {
})
}
-func (p *parser) panicf(format string, v ...interface{}) {
+func (p *parser) panicf(format string, v ...any) {
panic(ParseError{
Message: fmt.Sprintf(format, v...),
Position: p.pos,
@@ -131,7 +141,7 @@ func (p *parser) nextPos() item {
return it
}
-func (p *parser) bug(format string, v ...interface{}) {
+func (p *parser) bug(format string, v ...any) {
panic(fmt.Sprintf("BUG: "+format+"\n\n", v...))
}
@@ -186,20 +196,21 @@ func (p *parser) topLevel(item item) {
p.assertEqual(itemKeyEnd, k.typ)
/// The current key is the last part.
- p.currentKey = key[len(key)-1]
+ p.currentKey = key.last()
/// All the other parts (if any) are the context; need to set each part
/// as implicit.
- context := key[:len(key)-1]
+ context := key.parent()
for i := range context {
p.addImplicitContext(append(p.context, context[i:i+1]...))
}
+ p.ordered = append(p.ordered, p.context.add(p.currentKey))
/// Set value.
vItem := p.next()
val, typ := p.value(vItem, false)
- p.set(p.currentKey, val, typ, vItem.pos)
- p.ordered = append(p.ordered, p.context.add(p.currentKey))
+ p.setValue(p.currentKey, val)
+ p.setType(p.currentKey, typ, vItem.pos)
/// Remove the context we added (preserving any context from [tbl] lines).
p.context = outerContext
@@ -214,7 +225,7 @@ func (p *parser) keyString(it item) string {
switch it.typ {
case itemText:
return it.val
- case itemString, itemMultilineString,
+ case itemString, itemStringEsc, itemMultilineString,
itemRawString, itemRawMultilineString:
s, _ := p.value(it, false)
return s.(string)
@@ -231,12 +242,14 @@ var datetimeRepl = strings.NewReplacer(
// value translates an expected value from the lexer into a Go value wrapped
// as an empty interface.
-func (p *parser) value(it item, parentIsArray bool) (interface{}, tomlType) {
+func (p *parser) value(it item, parentIsArray bool) (any, tomlType) {
switch it.typ {
case itemString:
+ return it.val, p.typeOfPrimitive(it)
+ case itemStringEsc:
return p.replaceEscapes(it, it.val), p.typeOfPrimitive(it)
case itemMultilineString:
- return p.replaceEscapes(it, stripFirstNewline(p.stripEscapedNewlines(it.val))), p.typeOfPrimitive(it)
+ return p.replaceEscapes(it, p.stripEscapedNewlines(stripFirstNewline(it.val))), p.typeOfPrimitive(it)
case itemRawString:
return it.val, p.typeOfPrimitive(it)
case itemRawMultilineString:
@@ -266,7 +279,7 @@ func (p *parser) value(it item, parentIsArray bool) (interface{}, tomlType) {
panic("unreachable")
}
-func (p *parser) valueInteger(it item) (interface{}, tomlType) {
+func (p *parser) valueInteger(it item) (any, tomlType) {
if !numUnderscoresOK(it.val) {
p.panicItemf(it, "Invalid integer %q: underscores must be surrounded by digits", it.val)
}
@@ -290,7 +303,7 @@ func (p *parser) valueInteger(it item) (interface{}, tomlType) {
return num, p.typeOfPrimitive(it)
}
-func (p *parser) valueFloat(it item) (interface{}, tomlType) {
+func (p *parser) valueFloat(it item) (any, tomlType) {
parts := strings.FieldsFunc(it.val, func(r rune) bool {
switch r {
case '.', 'e', 'E':
@@ -314,7 +327,9 @@ func (p *parser) valueFloat(it item) (interface{}, tomlType) {
p.panicItemf(it, "Invalid float %q: '.' must be followed by one or more digits", it.val)
}
val := strings.Replace(it.val, "_", "", -1)
- if val == "+nan" || val == "-nan" { // Go doesn't support this, but TOML spec does.
+ signbit := false
+ if val == "+nan" || val == "-nan" {
+ signbit = val == "-nan"
val = "nan"
}
num, err := strconv.ParseFloat(val, 64)
@@ -325,20 +340,29 @@ func (p *parser) valueFloat(it item) (interface{}, tomlType) {
p.panicItemf(it, "Invalid float value: %q", it.val)
}
}
+ if signbit {
+ num = math.Copysign(num, -1)
+ }
return num, p.typeOfPrimitive(it)
}
var dtTypes = []struct {
fmt string
zone *time.Location
+ next bool
}{
- {time.RFC3339Nano, time.Local},
- {"2006-01-02T15:04:05.999999999", internal.LocalDatetime},
- {"2006-01-02", internal.LocalDate},
- {"15:04:05.999999999", internal.LocalTime},
+ {time.RFC3339Nano, time.Local, false},
+ {"2006-01-02T15:04:05.999999999", internal.LocalDatetime, false},
+ {"2006-01-02", internal.LocalDate, false},
+ {"15:04:05.999999999", internal.LocalTime, false},
+
+ // tomlNext
+ {"2006-01-02T15:04Z07:00", time.Local, true},
+ {"2006-01-02T15:04", internal.LocalDatetime, true},
+ {"15:04", internal.LocalTime, true},
}
-func (p *parser) valueDatetime(it item) (interface{}, tomlType) {
+func (p *parser) valueDatetime(it item) (any, tomlType) {
it.val = datetimeRepl.Replace(it.val)
var (
t time.Time
@@ -346,28 +370,49 @@ func (p *parser) valueDatetime(it item) (interface{}, tomlType) {
err error
)
for _, dt := range dtTypes {
+ if dt.next && !p.tomlNext {
+ continue
+ }
t, err = time.ParseInLocation(dt.fmt, it.val, dt.zone)
if err == nil {
+ if missingLeadingZero(it.val, dt.fmt) {
+ p.panicErr(it, errParseDate{it.val})
+ }
ok = true
break
}
}
if !ok {
- p.panicItemf(it, "Invalid TOML Datetime: %q.", it.val)
+ p.panicErr(it, errParseDate{it.val})
}
return t, p.typeOfPrimitive(it)
}
-func (p *parser) valueArray(it item) (interface{}, tomlType) {
+// Go's time.Parse() will accept numbers without a leading zero; there isn't any
+// way to require it. https://github.com/golang/go/issues/29911
+//
+// Depend on the fact that the separators (- and :) should always be at the same
+// location.
+func missingLeadingZero(d, l string) bool {
+ for i, c := range []byte(l) {
+ if c == '.' || c == 'Z' {
+ return false
+ }
+ if (c < '0' || c > '9') && d[i] != c {
+ return true
+ }
+ }
+ return false
+}
+
+func (p *parser) valueArray(it item) (any, tomlType) {
p.setType(p.currentKey, tomlArray, it.pos)
var (
- types []tomlType
-
- // Initialize to a non-nil empty slice. This makes it consistent with
- // how S = [] decodes into a non-nil slice inside something like struct
- // { S []string }. See #338
- array = []interface{}{}
+ // Initialize to a non-nil slice to make it consistent with how S = []
+ // decodes into a non-nil slice inside something like struct { S
+ // []string }. See #338
+ array = make([]any, 0, 2)
)
for it = p.next(); it.typ != itemArrayEnd; it = p.next() {
if it.typ == itemCommentStart {
@@ -377,20 +422,20 @@ func (p *parser) valueArray(it item) (interface{}, tomlType) {
val, typ := p.value(it, true)
array = append(array, val)
- types = append(types, typ)
- // XXX: types isn't used here, we need it to record the accurate type
+ // XXX: type isn't used here, we need it to record the accurate type
// information.
//
// Not entirely sure how to best store this; could use "key[0]",
// "key[1]" notation, or maybe store it on the Array type?
+ _ = typ
}
return array, tomlArray
}
-func (p *parser) valueInlineTable(it item, parentIsArray bool) (interface{}, tomlType) {
+func (p *parser) valueInlineTable(it item, parentIsArray bool) (any, tomlType) {
var (
- hash = make(map[string]interface{})
+ topHash = make(map[string]any)
outerContext = p.context
outerKey = p.currentKey
)
@@ -418,19 +463,33 @@ func (p *parser) valueInlineTable(it item, parentIsArray bool) (interface{}, tom
p.assertEqual(itemKeyEnd, k.typ)
/// The current key is the last part.
- p.currentKey = key[len(key)-1]
+ p.currentKey = key.last()
/// All the other parts (if any) are the context; need to set each part
/// as implicit.
- context := key[:len(key)-1]
+ context := key.parent()
for i := range context {
p.addImplicitContext(append(p.context, context[i:i+1]...))
}
+ p.ordered = append(p.ordered, p.context.add(p.currentKey))
/// Set the value.
val, typ := p.value(p.next(), false)
- p.set(p.currentKey, val, typ, it.pos)
- p.ordered = append(p.ordered, p.context.add(p.currentKey))
+ p.setValue(p.currentKey, val)
+ p.setType(p.currentKey, typ, it.pos)
+
+ hash := topHash
+ for _, c := range context {
+ h, ok := hash[c]
+ if !ok {
+ h = make(map[string]any)
+ hash[c] = h
+ }
+ hash, ok = h.(map[string]any)
+ if !ok {
+ p.panicf("%q is not a table", p.context)
+ }
+ }
hash[p.currentKey] = val
/// Restore context.
@@ -438,7 +497,7 @@ func (p *parser) valueInlineTable(it item, parentIsArray bool) (interface{}, tom
}
p.context = outerContext
p.currentKey = outerKey
- return hash, tomlHash
+ return topHash, tomlHash
}
// numHasLeadingZero checks if this number has leading zeroes, allowing for '0',
@@ -468,9 +527,9 @@ func numUnderscoresOK(s string) bool {
}
}
- // isHexadecimal is a superset of all the permissable characters
- // surrounding an underscore.
- accept = isHexadecimal(r)
+ // isHexis a superset of all the permissable characters surrounding an
+ // underscore.
+ accept = isHex(r)
}
return accept
}
@@ -493,21 +552,19 @@ func numPeriodsOK(s string) bool {
// Establishing the context also makes sure that the key isn't a duplicate, and
// will create implicit hashes automatically.
func (p *parser) addContext(key Key, array bool) {
- var ok bool
-
- // Always start at the top level and drill down for our context.
+ /// Always start at the top level and drill down for our context.
hashContext := p.mapping
- keyContext := make(Key, 0)
+ keyContext := make(Key, 0, len(key)-1)
- // We only need implicit hashes for key[0:-1]
- for _, k := range key[0 : len(key)-1] {
- _, ok = hashContext[k]
+ /// We only need implicit hashes for the parents.
+ for _, k := range key.parent() {
+ _, ok := hashContext[k]
keyContext = append(keyContext, k)
// No key? Make an implicit hash and move on.
if !ok {
p.addImplicit(keyContext)
- hashContext[k] = make(map[string]interface{})
+ hashContext[k] = make(map[string]any)
}
// If the hash context is actually an array of tables, then set
@@ -516,9 +573,9 @@ func (p *parser) addContext(key Key, array bool) {
// Otherwise, it better be a table, since this MUST be a key group (by
// virtue of it not being the last element in a key).
switch t := hashContext[k].(type) {
- case []map[string]interface{}:
+ case []map[string]any:
hashContext = t[len(t)-1]
- case map[string]interface{}:
+ case map[string]any:
hashContext = t
default:
p.panicf("Key '%s' was already created as a hash.", keyContext)
@@ -529,40 +586,33 @@ func (p *parser) addContext(key Key, array bool) {
if array {
// If this is the first element for this array, then allocate a new
// list of tables for it.
- k := key[len(key)-1]
+ k := key.last()
if _, ok := hashContext[k]; !ok {
- hashContext[k] = make([]map[string]interface{}, 0, 4)
+ hashContext[k] = make([]map[string]any, 0, 4)
}
// Add a new table. But make sure the key hasn't already been used
// for something else.
- if hash, ok := hashContext[k].([]map[string]interface{}); ok {
- hashContext[k] = append(hash, make(map[string]interface{}))
+ if hash, ok := hashContext[k].([]map[string]any); ok {
+ hashContext[k] = append(hash, make(map[string]any))
} else {
p.panicf("Key '%s' was already created and cannot be used as an array.", key)
}
} else {
- p.setValue(key[len(key)-1], make(map[string]interface{}))
+ p.setValue(key.last(), make(map[string]any))
}
- p.context = append(p.context, key[len(key)-1])
-}
-
-// set calls setValue and setType.
-func (p *parser) set(key string, val interface{}, typ tomlType, pos Position) {
- p.setValue(key, val)
- p.setType(key, typ, pos)
-
+ p.context = append(p.context, key.last())
}
// setValue sets the given key to the given value in the current context.
// It will make sure that the key hasn't already been defined, account for
// implicit key groups.
-func (p *parser) setValue(key string, value interface{}) {
+func (p *parser) setValue(key string, value any) {
var (
- tmpHash interface{}
+ tmpHash any
ok bool
hash = p.mapping
- keyContext Key
+ keyContext = make(Key, 0, len(p.context)+1)
)
for _, k := range p.context {
keyContext = append(keyContext, k)
@@ -570,11 +620,11 @@ func (p *parser) setValue(key string, value interface{}) {
p.bug("Context for key '%s' has not been established.", keyContext)
}
switch t := tmpHash.(type) {
- case []map[string]interface{}:
+ case []map[string]any:
// The context is a table of hashes. Pick the most recent table
// defined as the current hash.
hash = t[len(t)-1]
- case map[string]interface{}:
+ case map[string]any:
hash = t
default:
p.panicf("Key '%s' has already been defined.", keyContext)
@@ -601,9 +651,8 @@ func (p *parser) setValue(key string, value interface{}) {
p.removeImplicit(keyContext)
return
}
-
- // Otherwise, we have a concrete key trying to override a previous
- // key, which is *always* wrong.
+ // Otherwise, we have a concrete key trying to override a previous key,
+ // which is *always* wrong.
p.panicf("Key '%s' has already been defined.", keyContext)
}
@@ -632,14 +681,11 @@ func (p *parser) setType(key string, typ tomlType, pos Position) {
// Implicit keys need to be created when tables are implied in "a.b.c.d = 1" and
// "[a.b.c]" (the "a", "b", and "c" hashes are never created explicitly).
-func (p *parser) addImplicit(key Key) { p.implicits[key.String()] = struct{}{} }
-func (p *parser) removeImplicit(key Key) { delete(p.implicits, key.String()) }
-func (p *parser) isImplicit(key Key) bool { _, ok := p.implicits[key.String()]; return ok }
-func (p *parser) isArray(key Key) bool { return p.keyInfo[key.String()].tomlType == tomlArray }
-func (p *parser) addImplicitContext(key Key) {
- p.addImplicit(key)
- p.addContext(key, false)
-}
+func (p *parser) addImplicit(key Key) { p.implicits[key.String()] = struct{}{} }
+func (p *parser) removeImplicit(key Key) { delete(p.implicits, key.String()) }
+func (p *parser) isImplicit(key Key) bool { _, ok := p.implicits[key.String()]; return ok }
+func (p *parser) isArray(key Key) bool { return p.keyInfo[key.String()].tomlType == tomlArray }
+func (p *parser) addImplicitContext(key Key) { p.addImplicit(key); p.addContext(key, false) }
// current returns the full key name of the current context.
func (p *parser) current() string {
@@ -662,114 +708,131 @@ func stripFirstNewline(s string) string {
return s
}
-// Remove newlines inside triple-quoted strings if a line ends with "\".
+// stripEscapedNewlines removes whitespace after line-ending backslashes in
+// multiline strings.
+//
+// A line-ending backslash is an unescaped \ followed only by whitespace until
+// the next newline. After a line-ending backslash, all whitespace is removed
+// until the next non-whitespace character.
func (p *parser) stripEscapedNewlines(s string) string {
- split := strings.Split(s, "\n")
- if len(split) < 1 {
- return s
- }
-
- escNL := false // Keep track of the last non-blank line was escaped.
- for i, line := range split {
- line = strings.TrimRight(line, " \t\r")
-
- if len(line) == 0 || line[len(line)-1] != '\\' {
- split[i] = strings.TrimRight(split[i], "\r")
- if !escNL && i != len(split)-1 {
- split[i] += "\n"
- }
- continue
+ var (
+ b strings.Builder
+ i int
+ )
+ b.Grow(len(s))
+ for {
+ ix := strings.Index(s[i:], `\`)
+ if ix < 0 {
+ b.WriteString(s)
+ return b.String()
}
+ i += ix
- escBS := true
- for j := len(line) - 1; j >= 0 && line[j] == '\\'; j-- {
- escBS = !escBS
+ if len(s) > i+1 && s[i+1] == '\\' {
+ // Escaped backslash.
+ i += 2
+ continue
}
- if escNL {
- line = strings.TrimLeft(line, " \t\r")
+ // Scan until the next non-whitespace.
+ j := i + 1
+ whitespaceLoop:
+ for ; j < len(s); j++ {
+ switch s[j] {
+ case ' ', '\t', '\r', '\n':
+ default:
+ break whitespaceLoop
+ }
}
- escNL = !escBS
-
- if escBS {
- split[i] += "\n"
+ if j == i+1 {
+ // Not a whitespace escape.
+ i++
continue
}
-
- if i == len(split)-1 {
- p.panicf("invalid escape: '\\ '")
- }
-
- split[i] = line[:len(line)-1] // Remove \
- if len(split)-1 > i {
- split[i+1] = strings.TrimLeft(split[i+1], " \t\r")
+ if !strings.Contains(s[i:j], "\n") {
+ // This is not a line-ending backslash. (It's a bad escape sequence,
+ // but we can let replaceEscapes catch it.)
+ i++
+ continue
}
+ b.WriteString(s[:i])
+ s = s[j:]
+ i = 0
}
- return strings.Join(split, "")
}
func (p *parser) replaceEscapes(it item, str string) string {
- replaced := make([]rune, 0, len(str))
- s := []byte(str)
- r := 0
- for r < len(s) {
- if s[r] != '\\' {
- c, size := utf8.DecodeRune(s[r:])
- r += size
- replaced = append(replaced, c)
+ var (
+ b strings.Builder
+ skip = 0
+ )
+ b.Grow(len(str))
+ for i, c := range str {
+ if skip > 0 {
+ skip--
+ continue
+ }
+ if c != '\\' {
+ b.WriteRune(c)
continue
}
- r += 1
- if r >= len(s) {
+
+ if i >= len(str) {
p.bug("Escape sequence at end of string.")
return ""
}
- switch s[r] {
+ switch str[i+1] {
default:
- p.bug("Expected valid escape code after \\, but got %q.", s[r])
+ p.bug("Expected valid escape code after \\, but got %q.", str[i+1])
case ' ', '\t':
- p.panicItemf(it, "invalid escape: '\\%c'", s[r])
+ p.panicItemf(it, "invalid escape: '\\%c'", str[i+1])
case 'b':
- replaced = append(replaced, rune(0x0008))
- r += 1
+ b.WriteByte(0x08)
+ skip = 1
case 't':
- replaced = append(replaced, rune(0x0009))
- r += 1
+ b.WriteByte(0x09)
+ skip = 1
case 'n':
- replaced = append(replaced, rune(0x000A))
- r += 1
+ b.WriteByte(0x0a)
+ skip = 1
case 'f':
- replaced = append(replaced, rune(0x000C))
- r += 1
+ b.WriteByte(0x0c)
+ skip = 1
case 'r':
- replaced = append(replaced, rune(0x000D))
- r += 1
+ b.WriteByte(0x0d)
+ skip = 1
+ case 'e':
+ if p.tomlNext {
+ b.WriteByte(0x1b)
+ skip = 1
+ }
case '"':
- replaced = append(replaced, rune(0x0022))
- r += 1
+ b.WriteByte(0x22)
+ skip = 1
case '\\':
- replaced = append(replaced, rune(0x005C))
- r += 1
+ b.WriteByte(0x5c)
+ skip = 1
+ // The lexer guarantees the correct number of characters are present;
+ // don't need to check here.
+ case 'x':
+ if p.tomlNext {
+ escaped := p.asciiEscapeToUnicode(it, str[i+2:i+4])
+ b.WriteRune(escaped)
+ skip = 3
+ }
case 'u':
- // At this point, we know we have a Unicode escape of the form
- // `uXXXX` at [r, r+5). (Because the lexer guarantees this
- // for us.)
- escaped := p.asciiEscapeToUnicode(it, s[r+1:r+5])
- replaced = append(replaced, escaped)
- r += 5
+ escaped := p.asciiEscapeToUnicode(it, str[i+2:i+6])
+ b.WriteRune(escaped)
+ skip = 5
case 'U':
- // At this point, we know we have a Unicode escape of the form
- // `uXXXX` at [r, r+9). (Because the lexer guarantees this
- // for us.)
- escaped := p.asciiEscapeToUnicode(it, s[r+1:r+9])
- replaced = append(replaced, escaped)
- r += 9
+ escaped := p.asciiEscapeToUnicode(it, str[i+2:i+10])
+ b.WriteRune(escaped)
+ skip = 9
}
}
- return string(replaced)
+ return b.String()
}
-func (p *parser) asciiEscapeToUnicode(it item, bs []byte) rune {
- s := string(bs)
+func (p *parser) asciiEscapeToUnicode(it item, s string) rune {
hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32)
if err != nil {
p.bug("Could not parse '%s' as a hexadecimal number, but the lexer claims it's OK: %s", s, err)
diff --git a/vendor/github.com/BurntSushi/toml/type_fields.go b/vendor/github.com/BurntSushi/toml/type_fields.go
index 254ca82..10c51f7 100644
--- a/vendor/github.com/BurntSushi/toml/type_fields.go
+++ b/vendor/github.com/BurntSushi/toml/type_fields.go
@@ -25,10 +25,8 @@ type field struct {
// breaking ties with index sequence.
type byName []field
-func (x byName) Len() int { return len(x) }
-
+func (x byName) Len() int { return len(x) }
func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
-
func (x byName) Less(i, j int) bool {
if x[i].name != x[j].name {
return x[i].name < x[j].name
@@ -45,10 +43,8 @@ func (x byName) Less(i, j int) bool {
// byIndex sorts field by index sequence.
type byIndex []field
-func (x byIndex) Len() int { return len(x) }
-
+func (x byIndex) Len() int { return len(x) }
func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
-
func (x byIndex) Less(i, j int) bool {
for k, xik := range x[i].index {
if k >= len(x[j].index) {
diff --git a/vendor/github.com/BurntSushi/toml/type_toml.go b/vendor/github.com/BurntSushi/toml/type_toml.go
index 4e90d77..1c090d3 100644
--- a/vendor/github.com/BurntSushi/toml/type_toml.go
+++ b/vendor/github.com/BurntSushi/toml/type_toml.go
@@ -22,13 +22,8 @@ func typeIsTable(t tomlType) bool {
type tomlBaseType string
-func (btype tomlBaseType) typeString() string {
- return string(btype)
-}
-
-func (btype tomlBaseType) String() string {
- return btype.typeString()
-}
+func (btype tomlBaseType) typeString() string { return string(btype) }
+func (btype tomlBaseType) String() string { return btype.typeString() }
var (
tomlInteger tomlBaseType = "Integer"
@@ -54,7 +49,7 @@ func (p *parser) typeOfPrimitive(lexItem item) tomlType {
return tomlFloat
case itemDatetime:
return tomlDatetime
- case itemString:
+ case itemString, itemStringEsc:
return tomlString
case itemMultilineString:
return tomlString