blob: bd29895b6d148cb18133a70b485a60aa3655c58a [file] [log] [blame]
khenaidooffe076b2019-01-15 16:08:08 -05001// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved.
2// Use of this source code is governed by a MIT license found in the LICENSE file.
3
4package codec
5
6// Contains code shared by both encode and decode.
7
8// Some shared ideas around encoding/decoding
9// ------------------------------------------
10//
11// If an interface{} is passed, we first do a type assertion to see if it is
12// a primitive type or a map/slice of primitive types, and use a fastpath to handle it.
13//
14// If we start with a reflect.Value, we are already in reflect.Value land and
15// will try to grab the function for the underlying Type and directly call that function.
16// This is more performant than calling reflect.Value.Interface().
17//
18// This still helps us bypass many layers of reflection, and give best performance.
19//
20// Containers
21// ------------
22// Containers in the stream are either associative arrays (key-value pairs) or
23// regular arrays (indexed by incrementing integers).
24//
25// Some streams support indefinite-length containers, and use a breaking
26// byte-sequence to denote that the container has come to an end.
27//
28// Some streams also are text-based, and use explicit separators to denote the
29// end/beginning of different values.
30//
31// During encode, we use a high-level condition to determine how to iterate through
32// the container. That decision is based on whether the container is text-based (with
33// separators) or binary (without separators). If binary, we do not even call the
34// encoding of separators.
35//
36// During decode, we use a different high-level condition to determine how to iterate
37// through the containers. That decision is based on whether the stream contained
38// a length prefix, or if it used explicit breaks. If length-prefixed, we assume that
39// it has to be binary, and we do not even try to read separators.
40//
41// Philosophy
42// ------------
43// On decode, this codec will update containers appropriately:
44// - If struct, update fields from stream into fields of struct.
45// If field in stream not found in struct, handle appropriately (based on option).
46// If a struct field has no corresponding value in the stream, leave it AS IS.
47// If nil in stream, set value to nil/zero value.
48// - If map, update map from stream.
49// If the stream value is NIL, set the map to nil.
50// - if slice, try to update up to length of array in stream.
51// if container len is less than stream array length,
52// and container cannot be expanded, handled (based on option).
53// This means you can decode 4-element stream array into 1-element array.
54//
55// ------------------------------------
56// On encode, user can specify omitEmpty. This means that the value will be omitted
57// if the zero value. The problem may occur during decode, where omitted values do not affect
58// the value being decoded into. This means that if decoding into a struct with an
59// int field with current value=5, and the field is omitted in the stream, then after
60// decoding, the value will still be 5 (not 0).
61// omitEmpty only works if you guarantee that you always decode into zero-values.
62//
63// ------------------------------------
64// We could have truncated a map to remove keys not available in the stream,
65// or set values in the struct which are not in the stream to their zero values.
66// We decided against it because there is no efficient way to do it.
67// We may introduce it as an option later.
68// However, that will require enabling it for both runtime and code generation modes.
69//
70// To support truncate, we need to do 2 passes over the container:
71// map
72// - first collect all keys (e.g. in k1)
73// - for each key in stream, mark k1 that the key should not be removed
74// - after updating map, do second pass and call delete for all keys in k1 which are not marked
75// struct:
76// - for each field, track the *typeInfo s1
77// - iterate through all s1, and for each one not marked, set value to zero
78// - this involves checking the possible anonymous fields which are nil ptrs.
79// too much work.
80//
81// ------------------------------------------
82// Error Handling is done within the library using panic.
83//
84// This way, the code doesn't have to keep checking if an error has happened,
85// and we don't have to keep sending the error value along with each call
86// or storing it in the En|Decoder and checking it constantly along the way.
87//
88// The disadvantage is that small functions which use panics cannot be inlined.
89// The code accounts for that by only using panics behind an interface;
90// since interface calls cannot be inlined, this is irrelevant.
91//
92// We considered storing the error is En|Decoder.
93// - once it has its err field set, it cannot be used again.
94// - panicing will be optional, controlled by const flag.
95// - code should always check error first and return early.
96// We eventually decided against it as it makes the code clumsier to always
97// check for these error conditions.
98
99import (
100 "bytes"
101 "encoding"
102 "encoding/binary"
103 "errors"
104 "fmt"
105 "io"
106 "math"
107 "reflect"
108 "sort"
109 "strconv"
110 "strings"
111 "sync"
112 "time"
113)
114
115const (
116 scratchByteArrayLen = 32
117 // initCollectionCap = 16 // 32 is defensive. 16 is preferred.
118
119 // Support encoding.(Binary|Text)(Unm|M)arshaler.
120 // This constant flag will enable or disable it.
121 supportMarshalInterfaces = true
122
123 // for debugging, set this to false, to catch panic traces.
124 // Note that this will always cause rpc tests to fail, since they need io.EOF sent via panic.
125 recoverPanicToErr = true
126
127 // arrayCacheLen is the length of the cache used in encoder or decoder for
128 // allowing zero-alloc initialization.
129 arrayCacheLen = 8
130
131 // size of the cacheline: defaulting to value for archs: amd64, arm64, 386
132 // should use "runtime/internal/sys".CacheLineSize, but that is not exposed.
133 cacheLineSize = 64
134
135 wordSizeBits = 32 << (^uint(0) >> 63) // strconv.IntSize
136 wordSize = wordSizeBits / 8
137
138 maxLevelsEmbedding = 15 // use this, so structFieldInfo fits into 8 bytes
139)
140
141var (
142 oneByteArr = [1]byte{0}
143 zeroByteSlice = oneByteArr[:0:0]
144)
145
146var refBitset bitset32
147var pool pooler
148var panicv panicHdl
149
150func init() {
151 pool.init()
152
153 refBitset.set(byte(reflect.Map))
154 refBitset.set(byte(reflect.Ptr))
155 refBitset.set(byte(reflect.Func))
156 refBitset.set(byte(reflect.Chan))
157}
158
159type charEncoding uint8
160
161const (
162 cRAW charEncoding = iota
163 cUTF8
164 cUTF16LE
165 cUTF16BE
166 cUTF32LE
167 cUTF32BE
168)
169
170// valueType is the stream type
171type valueType uint8
172
173const (
174 valueTypeUnset valueType = iota
175 valueTypeNil
176 valueTypeInt
177 valueTypeUint
178 valueTypeFloat
179 valueTypeBool
180 valueTypeString
181 valueTypeSymbol
182 valueTypeBytes
183 valueTypeMap
184 valueTypeArray
185 valueTypeTime
186 valueTypeExt
187
188 // valueTypeInvalid = 0xff
189)
190
191var valueTypeStrings = [...]string{
192 "Unset",
193 "Nil",
194 "Int",
195 "Uint",
196 "Float",
197 "Bool",
198 "String",
199 "Symbol",
200 "Bytes",
201 "Map",
202 "Array",
203 "Timestamp",
204 "Ext",
205}
206
207func (x valueType) String() string {
208 if int(x) < len(valueTypeStrings) {
209 return valueTypeStrings[x]
210 }
211 return strconv.FormatInt(int64(x), 10)
212}
213
214type seqType uint8
215
216const (
217 _ seqType = iota
218 seqTypeArray
219 seqTypeSlice
220 seqTypeChan
221)
222
223// note that containerMapStart and containerArraySend are not sent.
224// This is because the ReadXXXStart and EncodeXXXStart already does these.
225type containerState uint8
226
227const (
228 _ containerState = iota
229
230 containerMapStart // slot left open, since Driver method already covers it
231 containerMapKey
232 containerMapValue
233 containerMapEnd
234 containerArrayStart // slot left open, since Driver methods already cover it
235 containerArrayElem
236 containerArrayEnd
237)
238
239// // sfiIdx used for tracking where a (field/enc)Name is seen in a []*structFieldInfo
240// type sfiIdx struct {
241// name string
242// index int
243// }
244
245// do not recurse if a containing type refers to an embedded type
246// which refers back to its containing type (via a pointer).
247// The second time this back-reference happens, break out,
248// so as not to cause an infinite loop.
249const rgetMaxRecursion = 2
250
251// Anecdotally, we believe most types have <= 12 fields.
252// - even Java's PMD rules set TooManyFields threshold to 15.
253// However, go has embedded fields, which should be regarded as
254// top level, allowing structs to possibly double or triple.
255// In addition, we don't want to keep creating transient arrays,
256// especially for the sfi index tracking, and the evtypes tracking.
257//
258// So - try to keep typeInfoLoadArray within 2K bytes
259const (
260 typeInfoLoadArraySfisLen = 16
261 typeInfoLoadArraySfiidxLen = 8 * 112
262 typeInfoLoadArrayEtypesLen = 12
263 typeInfoLoadArrayBLen = 8 * 4
264)
265
266type typeInfoLoad struct {
267 // fNames []string
268 // encNames []string
269 etypes []uintptr
270 sfis []structFieldInfo
271}
272
273type typeInfoLoadArray struct {
274 // fNames [typeInfoLoadArrayLen]string
275 // encNames [typeInfoLoadArrayLen]string
276 sfis [typeInfoLoadArraySfisLen]structFieldInfo
277 sfiidx [typeInfoLoadArraySfiidxLen]byte
278 etypes [typeInfoLoadArrayEtypesLen]uintptr
279 b [typeInfoLoadArrayBLen]byte // scratch - used for struct field names
280}
281
282// mirror json.Marshaler and json.Unmarshaler here,
283// so we don't import the encoding/json package
284
285type jsonMarshaler interface {
286 MarshalJSON() ([]byte, error)
287}
288type jsonUnmarshaler interface {
289 UnmarshalJSON([]byte) error
290}
291
292type isZeroer interface {
293 IsZero() bool
294}
295
296// type byteAccepter func(byte) bool
297
298var (
299 bigen = binary.BigEndian
300 structInfoFieldName = "_struct"
301
302 mapStrIntfTyp = reflect.TypeOf(map[string]interface{}(nil))
303 mapIntfIntfTyp = reflect.TypeOf(map[interface{}]interface{}(nil))
304 intfSliceTyp = reflect.TypeOf([]interface{}(nil))
305 intfTyp = intfSliceTyp.Elem()
306
307 reflectValTyp = reflect.TypeOf((*reflect.Value)(nil)).Elem()
308
309 stringTyp = reflect.TypeOf("")
310 timeTyp = reflect.TypeOf(time.Time{})
311 rawExtTyp = reflect.TypeOf(RawExt{})
312 rawTyp = reflect.TypeOf(Raw{})
313 uintptrTyp = reflect.TypeOf(uintptr(0))
314 uint8Typ = reflect.TypeOf(uint8(0))
315 uint8SliceTyp = reflect.TypeOf([]uint8(nil))
316 uintTyp = reflect.TypeOf(uint(0))
317 intTyp = reflect.TypeOf(int(0))
318
319 mapBySliceTyp = reflect.TypeOf((*MapBySlice)(nil)).Elem()
320
321 binaryMarshalerTyp = reflect.TypeOf((*encoding.BinaryMarshaler)(nil)).Elem()
322 binaryUnmarshalerTyp = reflect.TypeOf((*encoding.BinaryUnmarshaler)(nil)).Elem()
323
324 textMarshalerTyp = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem()
325 textUnmarshalerTyp = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
326
327 jsonMarshalerTyp = reflect.TypeOf((*jsonMarshaler)(nil)).Elem()
328 jsonUnmarshalerTyp = reflect.TypeOf((*jsonUnmarshaler)(nil)).Elem()
329
330 selferTyp = reflect.TypeOf((*Selfer)(nil)).Elem()
331 iszeroTyp = reflect.TypeOf((*isZeroer)(nil)).Elem()
332
333 uint8TypId = rt2id(uint8Typ)
334 uint8SliceTypId = rt2id(uint8SliceTyp)
335 rawExtTypId = rt2id(rawExtTyp)
336 rawTypId = rt2id(rawTyp)
337 intfTypId = rt2id(intfTyp)
338 timeTypId = rt2id(timeTyp)
339 stringTypId = rt2id(stringTyp)
340
341 mapStrIntfTypId = rt2id(mapStrIntfTyp)
342 mapIntfIntfTypId = rt2id(mapIntfIntfTyp)
343 intfSliceTypId = rt2id(intfSliceTyp)
344 // mapBySliceTypId = rt2id(mapBySliceTyp)
345
346 intBitsize = uint8(intTyp.Bits())
347 uintBitsize = uint8(uintTyp.Bits())
348
349 bsAll0x00 = []byte{0, 0, 0, 0, 0, 0, 0, 0}
350 bsAll0xff = []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
351
352 chkOvf checkOverflow
353
354 errNoFieldNameToStructFieldInfo = errors.New("no field name passed to parseStructFieldInfo")
355)
356
357var defTypeInfos = NewTypeInfos([]string{"codec", "json"})
358
359var immutableKindsSet = [32]bool{
360 // reflect.Invalid: ,
361 reflect.Bool: true,
362 reflect.Int: true,
363 reflect.Int8: true,
364 reflect.Int16: true,
365 reflect.Int32: true,
366 reflect.Int64: true,
367 reflect.Uint: true,
368 reflect.Uint8: true,
369 reflect.Uint16: true,
370 reflect.Uint32: true,
371 reflect.Uint64: true,
372 reflect.Uintptr: true,
373 reflect.Float32: true,
374 reflect.Float64: true,
375 reflect.Complex64: true,
376 reflect.Complex128: true,
377 // reflect.Array
378 // reflect.Chan
379 // reflect.Func: true,
380 // reflect.Interface
381 // reflect.Map
382 // reflect.Ptr
383 // reflect.Slice
384 reflect.String: true,
385 // reflect.Struct
386 // reflect.UnsafePointer
387}
388
389// Selfer defines methods by which a value can encode or decode itself.
390//
391// Any type which implements Selfer will be able to encode or decode itself.
392// Consequently, during (en|de)code, this takes precedence over
393// (text|binary)(M|Unm)arshal or extension support.
394//
395// Note: *the first set of bytes of any value MUST NOT represent nil in the format*.
396// This is because, during each decode, we first check the the next set of bytes
397// represent nil, and if so, we just set the value to nil.
398type Selfer interface {
399 CodecEncodeSelf(*Encoder)
400 CodecDecodeSelf(*Decoder)
401}
402
403// MapBySlice is a tag interface that denotes wrapped slice should encode as a map in the stream.
404// The slice contains a sequence of key-value pairs.
405// This affords storing a map in a specific sequence in the stream.
406//
407// Example usage:
408// type T1 []string // or []int or []Point or any other "slice" type
409// func (_ T1) MapBySlice{} // T1 now implements MapBySlice, and will be encoded as a map
410// type T2 struct { KeyValues T1 }
411//
412// var kvs = []string{"one", "1", "two", "2", "three", "3"}
413// var v2 = T2{ KeyValues: T1(kvs) }
414// // v2 will be encoded like the map: {"KeyValues": {"one": "1", "two": "2", "three": "3"} }
415//
416// The support of MapBySlice affords the following:
417// - A slice type which implements MapBySlice will be encoded as a map
418// - A slice can be decoded from a map in the stream
419// - It MUST be a slice type (not a pointer receiver) that implements MapBySlice
420type MapBySlice interface {
421 MapBySlice()
422}
423
424// BasicHandle encapsulates the common options and extension functions.
425//
426// Deprecated: DO NOT USE DIRECTLY. EXPORTED FOR GODOC BENEFIT. WILL BE REMOVED.
427type BasicHandle struct {
428 // BasicHandle is always a part of a different type.
429 // It doesn't have to fit into it own cache lines.
430
431 // TypeInfos is used to get the type info for any type.
432 //
433 // If not configured, the default TypeInfos is used, which uses struct tag keys: codec, json
434 TypeInfos *TypeInfos
435
436 // Note: BasicHandle is not comparable, due to these slices here (extHandle, intf2impls).
437 // If *[]T is used instead, this becomes comparable, at the cost of extra indirection.
438 // Thses slices are used all the time, so keep as slices (not pointers).
439
440 extHandle
441
442 intf2impls
443
444 RPCOptions
445
446 // ---- cache line
447
448 DecodeOptions
449
450 // ---- cache line
451
452 EncodeOptions
453
454 // noBuiltInTypeChecker
455}
456
457func (x *BasicHandle) getBasicHandle() *BasicHandle {
458 return x
459}
460
461func (x *BasicHandle) getTypeInfo(rtid uintptr, rt reflect.Type) (pti *typeInfo) {
462 if x.TypeInfos == nil {
463 return defTypeInfos.get(rtid, rt)
464 }
465 return x.TypeInfos.get(rtid, rt)
466}
467
468// Handle is the interface for a specific encoding format.
469//
470// Typically, a Handle is pre-configured before first time use,
471// and not modified while in use. Such a pre-configured Handle
472// is safe for concurrent access.
473type Handle interface {
474 Name() string
475 getBasicHandle() *BasicHandle
476 recreateEncDriver(encDriver) bool
477 newEncDriver(w *Encoder) encDriver
478 newDecDriver(r *Decoder) decDriver
479 isBinary() bool
480 hasElemSeparators() bool
481 // IsBuiltinType(rtid uintptr) bool
482}
483
484// Raw represents raw formatted bytes.
485// We "blindly" store it during encode and retrieve the raw bytes during decode.
486// Note: it is dangerous during encode, so we may gate the behaviour
487// behind an Encode flag which must be explicitly set.
488type Raw []byte
489
490// RawExt represents raw unprocessed extension data.
491// Some codecs will decode extension data as a *RawExt
492// if there is no registered extension for the tag.
493//
494// Only one of Data or Value is nil.
495// If Data is nil, then the content of the RawExt is in the Value.
496type RawExt struct {
497 Tag uint64
498 // Data is the []byte which represents the raw ext. If nil, ext is exposed in Value.
499 // Data is used by codecs (e.g. binc, msgpack, simple) which do custom serialization of types
500 Data []byte
501 // Value represents the extension, if Data is nil.
502 // Value is used by codecs (e.g. cbor, json) which leverage the format to do
503 // custom serialization of the types.
504 Value interface{}
505}
506
507// BytesExt handles custom (de)serialization of types to/from []byte.
508// It is used by codecs (e.g. binc, msgpack, simple) which do custom serialization of the types.
509type BytesExt interface {
510 // WriteExt converts a value to a []byte.
511 //
512 // Note: v is a pointer iff the registered extension type is a struct or array kind.
513 WriteExt(v interface{}) []byte
514
515 // ReadExt updates a value from a []byte.
516 //
517 // Note: dst is always a pointer kind to the registered extension type.
518 ReadExt(dst interface{}, src []byte)
519}
520
521// InterfaceExt handles custom (de)serialization of types to/from another interface{} value.
522// The Encoder or Decoder will then handle the further (de)serialization of that known type.
523//
524// It is used by codecs (e.g. cbor, json) which use the format to do custom serialization of types.
525type InterfaceExt interface {
526 // ConvertExt converts a value into a simpler interface for easy encoding
527 // e.g. convert time.Time to int64.
528 //
529 // Note: v is a pointer iff the registered extension type is a struct or array kind.
530 ConvertExt(v interface{}) interface{}
531
532 // UpdateExt updates a value from a simpler interface for easy decoding
533 // e.g. convert int64 to time.Time.
534 //
535 // Note: dst is always a pointer kind to the registered extension type.
536 UpdateExt(dst interface{}, src interface{})
537}
538
539// Ext handles custom (de)serialization of custom types / extensions.
540type Ext interface {
541 BytesExt
542 InterfaceExt
543}
544
545// addExtWrapper is a wrapper implementation to support former AddExt exported method.
546type addExtWrapper struct {
547 encFn func(reflect.Value) ([]byte, error)
548 decFn func(reflect.Value, []byte) error
549}
550
551func (x addExtWrapper) WriteExt(v interface{}) []byte {
552 bs, err := x.encFn(reflect.ValueOf(v))
553 if err != nil {
554 panic(err)
555 }
556 return bs
557}
558
559func (x addExtWrapper) ReadExt(v interface{}, bs []byte) {
560 if err := x.decFn(reflect.ValueOf(v), bs); err != nil {
561 panic(err)
562 }
563}
564
565func (x addExtWrapper) ConvertExt(v interface{}) interface{} {
566 return x.WriteExt(v)
567}
568
569func (x addExtWrapper) UpdateExt(dest interface{}, v interface{}) {
570 x.ReadExt(dest, v.([]byte))
571}
572
573type extWrapper struct {
574 BytesExt
575 InterfaceExt
576}
577
578type bytesExtFailer struct{}
579
580func (bytesExtFailer) WriteExt(v interface{}) []byte {
581 panicv.errorstr("BytesExt.WriteExt is not supported")
582 return nil
583}
584func (bytesExtFailer) ReadExt(v interface{}, bs []byte) {
585 panicv.errorstr("BytesExt.ReadExt is not supported")
586}
587
588type interfaceExtFailer struct{}
589
590func (interfaceExtFailer) ConvertExt(v interface{}) interface{} {
591 panicv.errorstr("InterfaceExt.ConvertExt is not supported")
592 return nil
593}
594func (interfaceExtFailer) UpdateExt(dest interface{}, v interface{}) {
595 panicv.errorstr("InterfaceExt.UpdateExt is not supported")
596}
597
598type binaryEncodingType struct{}
599
600func (binaryEncodingType) isBinary() bool { return true }
601
602type textEncodingType struct{}
603
604func (textEncodingType) isBinary() bool { return false }
605
606// noBuiltInTypes is embedded into many types which do not support builtins
607// e.g. msgpack, simple, cbor.
608
609// type noBuiltInTypeChecker struct{}
610// func (noBuiltInTypeChecker) IsBuiltinType(rt uintptr) bool { return false }
611// type noBuiltInTypes struct{ noBuiltInTypeChecker }
612
613type noBuiltInTypes struct{}
614
615func (noBuiltInTypes) EncodeBuiltin(rt uintptr, v interface{}) {}
616func (noBuiltInTypes) DecodeBuiltin(rt uintptr, v interface{}) {}
617
618// type noStreamingCodec struct{}
619// func (noStreamingCodec) CheckBreak() bool { return false }
620// func (noStreamingCodec) hasElemSeparators() bool { return false }
621
622type noElemSeparators struct{}
623
624func (noElemSeparators) hasElemSeparators() (v bool) { return }
625func (noElemSeparators) recreateEncDriver(e encDriver) (v bool) { return }
626
627// bigenHelper.
628// Users must already slice the x completely, because we will not reslice.
629type bigenHelper struct {
630 x []byte // must be correctly sliced to appropriate len. slicing is a cost.
631 w encWriter
632}
633
634func (z bigenHelper) writeUint16(v uint16) {
635 bigen.PutUint16(z.x, v)
636 z.w.writeb(z.x)
637}
638
639func (z bigenHelper) writeUint32(v uint32) {
640 bigen.PutUint32(z.x, v)
641 z.w.writeb(z.x)
642}
643
644func (z bigenHelper) writeUint64(v uint64) {
645 bigen.PutUint64(z.x, v)
646 z.w.writeb(z.x)
647}
648
649type extTypeTagFn struct {
650 rtid uintptr
651 rtidptr uintptr
652 rt reflect.Type
653 tag uint64
654 ext Ext
655 _ [1]uint64 // padding
656}
657
658type extHandle []extTypeTagFn
659
660// AddExt registes an encode and decode function for a reflect.Type.
661// To deregister an Ext, call AddExt with nil encfn and/or nil decfn.
662//
663// Deprecated: Use SetBytesExt or SetInterfaceExt on the Handle instead.
664func (o *extHandle) AddExt(rt reflect.Type, tag byte,
665 encfn func(reflect.Value) ([]byte, error),
666 decfn func(reflect.Value, []byte) error) (err error) {
667 if encfn == nil || decfn == nil {
668 return o.SetExt(rt, uint64(tag), nil)
669 }
670 return o.SetExt(rt, uint64(tag), addExtWrapper{encfn, decfn})
671}
672
673// SetExt will set the extension for a tag and reflect.Type.
674// Note that the type must be a named type, and specifically not a pointer or Interface.
675// An error is returned if that is not honored.
676// To Deregister an ext, call SetExt with nil Ext.
677//
678// Deprecated: Use SetBytesExt or SetInterfaceExt on the Handle instead.
679func (o *extHandle) SetExt(rt reflect.Type, tag uint64, ext Ext) (err error) {
680 // o is a pointer, because we may need to initialize it
681 rk := rt.Kind()
682 for rk == reflect.Ptr {
683 rt = rt.Elem()
684 rk = rt.Kind()
685 }
686
687 if rt.PkgPath() == "" || rk == reflect.Interface { // || rk == reflect.Ptr {
688 return fmt.Errorf("codec.Handle.SetExt: Takes named type, not a pointer or interface: %v", rt)
689 }
690
691 rtid := rt2id(rt)
692 switch rtid {
693 case timeTypId, rawTypId, rawExtTypId:
694 // all natively supported type, so cannot have an extension
695 return // TODO: should we silently ignore, or return an error???
696 }
697 // if o == nil {
698 // return errors.New("codec.Handle.SetExt: extHandle not initialized")
699 // }
700 o2 := *o
701 // if o2 == nil {
702 // return errors.New("codec.Handle.SetExt: extHandle not initialized")
703 // }
704 for i := range o2 {
705 v := &o2[i]
706 if v.rtid == rtid {
707 v.tag, v.ext = tag, ext
708 return
709 }
710 }
711 rtidptr := rt2id(reflect.PtrTo(rt))
712 *o = append(o2, extTypeTagFn{rtid, rtidptr, rt, tag, ext, [1]uint64{}})
713 return
714}
715
716func (o extHandle) getExt(rtid uintptr) (v *extTypeTagFn) {
717 for i := range o {
718 v = &o[i]
719 if v.rtid == rtid || v.rtidptr == rtid {
720 return
721 }
722 }
723 return nil
724}
725
726func (o extHandle) getExtForTag(tag uint64) (v *extTypeTagFn) {
727 for i := range o {
728 v = &o[i]
729 if v.tag == tag {
730 return
731 }
732 }
733 return nil
734}
735
736type intf2impl struct {
737 rtid uintptr // for intf
738 impl reflect.Type
739 // _ [1]uint64 // padding // not-needed, as *intf2impl is never returned.
740}
741
742type intf2impls []intf2impl
743
744// Intf2Impl maps an interface to an implementing type.
745// This allows us support infering the concrete type
746// and populating it when passed an interface.
747// e.g. var v io.Reader can be decoded as a bytes.Buffer, etc.
748//
749// Passing a nil impl will clear the mapping.
750func (o *intf2impls) Intf2Impl(intf, impl reflect.Type) (err error) {
751 if impl != nil && !impl.Implements(intf) {
752 return fmt.Errorf("Intf2Impl: %v does not implement %v", impl, intf)
753 }
754 rtid := rt2id(intf)
755 o2 := *o
756 for i := range o2 {
757 v := &o2[i]
758 if v.rtid == rtid {
759 v.impl = impl
760 return
761 }
762 }
763 *o = append(o2, intf2impl{rtid, impl})
764 return
765}
766
767func (o intf2impls) intf2impl(rtid uintptr) (rv reflect.Value) {
768 for i := range o {
769 v := &o[i]
770 if v.rtid == rtid {
771 if v.impl == nil {
772 return
773 }
774 if v.impl.Kind() == reflect.Ptr {
775 return reflect.New(v.impl.Elem())
776 }
777 return reflect.New(v.impl).Elem()
778 }
779 }
780 return
781}
782
783type structFieldInfoFlag uint8
784
785const (
786 _ structFieldInfoFlag = 1 << iota
787 structFieldInfoFlagReady
788 structFieldInfoFlagOmitEmpty
789)
790
791func (x *structFieldInfoFlag) flagSet(f structFieldInfoFlag) {
792 *x = *x | f
793}
794
795func (x *structFieldInfoFlag) flagClr(f structFieldInfoFlag) {
796 *x = *x &^ f
797}
798
799func (x structFieldInfoFlag) flagGet(f structFieldInfoFlag) bool {
800 return x&f != 0
801}
802
803func (x structFieldInfoFlag) omitEmpty() bool {
804 return x.flagGet(structFieldInfoFlagOmitEmpty)
805}
806
807func (x structFieldInfoFlag) ready() bool {
808 return x.flagGet(structFieldInfoFlagReady)
809}
810
811type structFieldInfo struct {
812 encName string // encode name
813 fieldName string // field name
814
815 is [maxLevelsEmbedding]uint16 // (recursive/embedded) field index in struct
816 nis uint8 // num levels of embedding. if 1, then it's not embedded.
817 structFieldInfoFlag
818}
819
820func (si *structFieldInfo) setToZeroValue(v reflect.Value) {
821 if v, valid := si.field(v, false); valid {
822 v.Set(reflect.Zero(v.Type()))
823 }
824}
825
826// rv returns the field of the struct.
827// If anonymous, it returns an Invalid
828func (si *structFieldInfo) field(v reflect.Value, update bool) (rv2 reflect.Value, valid bool) {
829 // replicate FieldByIndex
830 for i, x := range si.is {
831 if uint8(i) == si.nis {
832 break
833 }
834 if v, valid = baseStructRv(v, update); !valid {
835 return
836 }
837 v = v.Field(int(x))
838 }
839
840 return v, true
841}
842
843// func (si *structFieldInfo) fieldval(v reflect.Value, update bool) reflect.Value {
844// v, _ = si.field(v, update)
845// return v
846// }
847
848func parseStructInfo(stag string) (toArray, omitEmpty bool, keytype valueType) {
849 keytype = valueTypeString // default
850 if stag == "" {
851 return
852 }
853 for i, s := range strings.Split(stag, ",") {
854 if i == 0 {
855 } else {
856 switch s {
857 case "omitempty":
858 omitEmpty = true
859 case "toarray":
860 toArray = true
861 case "int":
862 keytype = valueTypeInt
863 case "uint":
864 keytype = valueTypeUint
865 case "float":
866 keytype = valueTypeFloat
867 // case "bool":
868 // keytype = valueTypeBool
869 case "string":
870 keytype = valueTypeString
871 }
872 }
873 }
874 return
875}
876
877func (si *structFieldInfo) parseTag(stag string) {
878 // if fname == "" {
879 // panic(errNoFieldNameToStructFieldInfo)
880 // }
881
882 if stag == "" {
883 return
884 }
885 for i, s := range strings.Split(stag, ",") {
886 if i == 0 {
887 if s != "" {
888 si.encName = s
889 }
890 } else {
891 switch s {
892 case "omitempty":
893 si.flagSet(structFieldInfoFlagOmitEmpty)
894 // si.omitEmpty = true
895 // case "toarray":
896 // si.toArray = true
897 }
898 }
899 }
900}
901
902type sfiSortedByEncName []*structFieldInfo
903
904func (p sfiSortedByEncName) Len() int {
905 return len(p)
906}
907
908func (p sfiSortedByEncName) Less(i, j int) bool {
909 return p[i].encName < p[j].encName
910}
911
912func (p sfiSortedByEncName) Swap(i, j int) {
913 p[i], p[j] = p[j], p[i]
914}
915
916const structFieldNodeNumToCache = 4
917
918type structFieldNodeCache struct {
919 rv [structFieldNodeNumToCache]reflect.Value
920 idx [structFieldNodeNumToCache]uint32
921 num uint8
922}
923
924func (x *structFieldNodeCache) get(key uint32) (fv reflect.Value, valid bool) {
925 for i, k := range &x.idx {
926 if uint8(i) == x.num {
927 return // break
928 }
929 if key == k {
930 return x.rv[i], true
931 }
932 }
933 return
934}
935
936func (x *structFieldNodeCache) tryAdd(fv reflect.Value, key uint32) {
937 if x.num < structFieldNodeNumToCache {
938 x.rv[x.num] = fv
939 x.idx[x.num] = key
940 x.num++
941 return
942 }
943}
944
945type structFieldNode struct {
946 v reflect.Value
947 cache2 structFieldNodeCache
948 cache3 structFieldNodeCache
949 update bool
950}
951
952func (x *structFieldNode) field(si *structFieldInfo) (fv reflect.Value) {
953 // return si.fieldval(x.v, x.update)
954 // Note: we only cache if nis=2 or nis=3 i.e. up to 2 levels of embedding
955 // This mostly saves us time on the repeated calls to v.Elem, v.Field, etc.
956 var valid bool
957 switch si.nis {
958 case 1:
959 fv = x.v.Field(int(si.is[0]))
960 case 2:
961 if fv, valid = x.cache2.get(uint32(si.is[0])); valid {
962 fv = fv.Field(int(si.is[1]))
963 return
964 }
965 fv = x.v.Field(int(si.is[0]))
966 if fv, valid = baseStructRv(fv, x.update); !valid {
967 return
968 }
969 x.cache2.tryAdd(fv, uint32(si.is[0]))
970 fv = fv.Field(int(si.is[1]))
971 case 3:
972 var key uint32 = uint32(si.is[0])<<16 | uint32(si.is[1])
973 if fv, valid = x.cache3.get(key); valid {
974 fv = fv.Field(int(si.is[2]))
975 return
976 }
977 fv = x.v.Field(int(si.is[0]))
978 if fv, valid = baseStructRv(fv, x.update); !valid {
979 return
980 }
981 fv = fv.Field(int(si.is[1]))
982 if fv, valid = baseStructRv(fv, x.update); !valid {
983 return
984 }
985 x.cache3.tryAdd(fv, key)
986 fv = fv.Field(int(si.is[2]))
987 default:
988 fv, _ = si.field(x.v, x.update)
989 }
990 return
991}
992
993func baseStructRv(v reflect.Value, update bool) (v2 reflect.Value, valid bool) {
994 for v.Kind() == reflect.Ptr {
995 if v.IsNil() {
996 if !update {
997 return
998 }
999 v.Set(reflect.New(v.Type().Elem()))
1000 }
1001 v = v.Elem()
1002 }
1003 return v, true
1004}
1005
1006type typeInfoFlag uint8
1007
1008const (
1009 typeInfoFlagComparable = 1 << iota
1010 typeInfoFlagIsZeroer
1011 typeInfoFlagIsZeroerPtr
1012)
1013
1014// typeInfo keeps information about each (non-ptr) type referenced in the encode/decode sequence.
1015//
1016// During an encode/decode sequence, we work as below:
1017// - If base is a built in type, en/decode base value
1018// - If base is registered as an extension, en/decode base value
1019// - If type is binary(M/Unm)arshaler, call Binary(M/Unm)arshal method
1020// - If type is text(M/Unm)arshaler, call Text(M/Unm)arshal method
1021// - Else decode appropriately based on the reflect.Kind
1022type typeInfo struct {
1023 rt reflect.Type
1024 elem reflect.Type
1025 pkgpath string
1026
1027 rtid uintptr
1028 // rv0 reflect.Value // saved zero value, used if immutableKind
1029
1030 numMeth uint16 // number of methods
1031 kind uint8
1032 chandir uint8
1033
1034 anyOmitEmpty bool // true if a struct, and any of the fields are tagged "omitempty"
1035 toArray bool // whether this (struct) type should be encoded as an array
1036 keyType valueType // if struct, how is the field name stored in a stream? default is string
1037 mbs bool // base type (T or *T) is a MapBySlice
1038
1039 // ---- cpu cache line boundary?
1040 sfiSort []*structFieldInfo // sorted. Used when enc/dec struct to map.
1041 sfiSrc []*structFieldInfo // unsorted. Used when enc/dec struct to array.
1042
1043 key reflect.Type
1044
1045 // ---- cpu cache line boundary?
1046 // sfis []structFieldInfo // all sfi, in src order, as created.
1047 sfiNamesSort []byte // all names, with indexes into the sfiSort
1048
1049 // format of marshal type fields below: [btj][mu]p? OR csp?
1050
1051 bm bool // T is a binaryMarshaler
1052 bmp bool // *T is a binaryMarshaler
1053 bu bool // T is a binaryUnmarshaler
1054 bup bool // *T is a binaryUnmarshaler
1055 tm bool // T is a textMarshaler
1056 tmp bool // *T is a textMarshaler
1057 tu bool // T is a textUnmarshaler
1058 tup bool // *T is a textUnmarshaler
1059
1060 jm bool // T is a jsonMarshaler
1061 jmp bool // *T is a jsonMarshaler
1062 ju bool // T is a jsonUnmarshaler
1063 jup bool // *T is a jsonUnmarshaler
1064 cs bool // T is a Selfer
1065 csp bool // *T is a Selfer
1066
1067 // other flags, with individual bits representing if set.
1068 flags typeInfoFlag
1069
1070 // _ [2]byte // padding
1071 _ [3]uint64 // padding
1072}
1073
1074func (ti *typeInfo) isFlag(f typeInfoFlag) bool {
1075 return ti.flags&f != 0
1076}
1077
1078func (ti *typeInfo) indexForEncName(name []byte) (index int16) {
1079 var sn []byte
1080 if len(name)+2 <= 32 {
1081 var buf [32]byte // should not escape
1082 sn = buf[:len(name)+2]
1083 } else {
1084 sn = make([]byte, len(name)+2)
1085 }
1086 copy(sn[1:], name)
1087 sn[0], sn[len(sn)-1] = tiSep2(name), 0xff
1088 j := bytes.Index(ti.sfiNamesSort, sn)
1089 if j < 0 {
1090 return -1
1091 }
1092 index = int16(uint16(ti.sfiNamesSort[j+len(sn)+1]) | uint16(ti.sfiNamesSort[j+len(sn)])<<8)
1093 return
1094}
1095
1096type rtid2ti struct {
1097 rtid uintptr
1098 ti *typeInfo
1099}
1100
1101// TypeInfos caches typeInfo for each type on first inspection.
1102//
1103// It is configured with a set of tag keys, which are used to get
1104// configuration for the type.
1105type TypeInfos struct {
1106 // infos: formerly map[uintptr]*typeInfo, now *[]rtid2ti, 2 words expected
1107 infos atomicTypeInfoSlice
1108 mu sync.Mutex
1109 tags []string
1110 _ [2]uint64 // padding
1111}
1112
1113// NewTypeInfos creates a TypeInfos given a set of struct tags keys.
1114//
1115// This allows users customize the struct tag keys which contain configuration
1116// of their types.
1117func NewTypeInfos(tags []string) *TypeInfos {
1118 return &TypeInfos{tags: tags}
1119}
1120
1121func (x *TypeInfos) structTag(t reflect.StructTag) (s string) {
1122 // check for tags: codec, json, in that order.
1123 // this allows seamless support for many configured structs.
1124 for _, x := range x.tags {
1125 s = t.Get(x)
1126 if s != "" {
1127 return s
1128 }
1129 }
1130 return
1131}
1132
1133func (x *TypeInfos) find(s []rtid2ti, rtid uintptr) (idx int, ti *typeInfo) {
1134 // binary search. adapted from sort/search.go.
1135 // if sp == nil {
1136 // return -1, nil
1137 // }
1138 // s := *sp
1139 h, i, j := 0, 0, len(s)
1140 for i < j {
1141 h = i + (j-i)/2
1142 if s[h].rtid < rtid {
1143 i = h + 1
1144 } else {
1145 j = h
1146 }
1147 }
1148 if i < len(s) && s[i].rtid == rtid {
1149 return i, s[i].ti
1150 }
1151 return i, nil
1152}
1153
1154func (x *TypeInfos) get(rtid uintptr, rt reflect.Type) (pti *typeInfo) {
1155 sp := x.infos.load()
1156 var idx int
1157 if sp != nil {
1158 idx, pti = x.find(sp, rtid)
1159 if pti != nil {
1160 return
1161 }
1162 }
1163
1164 rk := rt.Kind()
1165
1166 if rk == reflect.Ptr { // || (rk == reflect.Interface && rtid != intfTypId) {
1167 panicv.errorf("invalid kind passed to TypeInfos.get: %v - %v", rk, rt)
1168 }
1169
1170 // do not hold lock while computing this.
1171 // it may lead to duplication, but that's ok.
1172 ti := typeInfo{rt: rt, rtid: rtid, kind: uint8(rk), pkgpath: rt.PkgPath()}
1173 // ti.rv0 = reflect.Zero(rt)
1174
1175 // ti.comparable = rt.Comparable()
1176 ti.numMeth = uint16(rt.NumMethod())
1177
1178 ti.bm, ti.bmp = implIntf(rt, binaryMarshalerTyp)
1179 ti.bu, ti.bup = implIntf(rt, binaryUnmarshalerTyp)
1180 ti.tm, ti.tmp = implIntf(rt, textMarshalerTyp)
1181 ti.tu, ti.tup = implIntf(rt, textUnmarshalerTyp)
1182 ti.jm, ti.jmp = implIntf(rt, jsonMarshalerTyp)
1183 ti.ju, ti.jup = implIntf(rt, jsonUnmarshalerTyp)
1184 ti.cs, ti.csp = implIntf(rt, selferTyp)
1185
1186 b1, b2 := implIntf(rt, iszeroTyp)
1187 if b1 {
1188 ti.flags |= typeInfoFlagIsZeroer
1189 }
1190 if b2 {
1191 ti.flags |= typeInfoFlagIsZeroerPtr
1192 }
1193 if rt.Comparable() {
1194 ti.flags |= typeInfoFlagComparable
1195 }
1196
1197 switch rk {
1198 case reflect.Struct:
1199 var omitEmpty bool
1200 if f, ok := rt.FieldByName(structInfoFieldName); ok {
1201 ti.toArray, omitEmpty, ti.keyType = parseStructInfo(x.structTag(f.Tag))
1202 } else {
1203 ti.keyType = valueTypeString
1204 }
1205 pp, pi := pool.tiLoad()
1206 pv := pi.(*typeInfoLoadArray)
1207 pv.etypes[0] = ti.rtid
1208 // vv := typeInfoLoad{pv.fNames[:0], pv.encNames[:0], pv.etypes[:1], pv.sfis[:0]}
1209 vv := typeInfoLoad{pv.etypes[:1], pv.sfis[:0]}
1210 x.rget(rt, rtid, omitEmpty, nil, &vv)
1211 // ti.sfis = vv.sfis
1212 ti.sfiSrc, ti.sfiSort, ti.sfiNamesSort, ti.anyOmitEmpty = rgetResolveSFI(rt, vv.sfis, pv)
1213 pp.Put(pi)
1214 case reflect.Map:
1215 ti.elem = rt.Elem()
1216 ti.key = rt.Key()
1217 case reflect.Slice:
1218 ti.mbs, _ = implIntf(rt, mapBySliceTyp)
1219 ti.elem = rt.Elem()
1220 case reflect.Chan:
1221 ti.elem = rt.Elem()
1222 ti.chandir = uint8(rt.ChanDir())
1223 case reflect.Array, reflect.Ptr:
1224 ti.elem = rt.Elem()
1225 }
1226 // sfi = sfiSrc
1227
1228 x.mu.Lock()
1229 sp = x.infos.load()
1230 if sp == nil {
1231 pti = &ti
1232 vs := []rtid2ti{{rtid, pti}}
1233 x.infos.store(vs)
1234 } else {
1235 idx, pti = x.find(sp, rtid)
1236 if pti == nil {
1237 pti = &ti
1238 vs := make([]rtid2ti, len(sp)+1)
1239 copy(vs, sp[:idx])
1240 copy(vs[idx+1:], sp[idx:])
1241 vs[idx] = rtid2ti{rtid, pti}
1242 x.infos.store(vs)
1243 }
1244 }
1245 x.mu.Unlock()
1246 return
1247}
1248
1249func (x *TypeInfos) rget(rt reflect.Type, rtid uintptr, omitEmpty bool,
1250 indexstack []uint16, pv *typeInfoLoad) {
1251 // Read up fields and store how to access the value.
1252 //
1253 // It uses go's rules for message selectors,
1254 // which say that the field with the shallowest depth is selected.
1255 //
1256 // Note: we consciously use slices, not a map, to simulate a set.
1257 // Typically, types have < 16 fields,
1258 // and iteration using equals is faster than maps there
1259 flen := rt.NumField()
1260 if flen > (1<<maxLevelsEmbedding - 1) {
1261 panicv.errorf("codec: types with > %v fields are not supported - has %v fields",
1262 (1<<maxLevelsEmbedding - 1), flen)
1263 }
1264 // pv.sfis = make([]structFieldInfo, flen)
1265LOOP:
1266 for j, jlen := uint16(0), uint16(flen); j < jlen; j++ {
1267 f := rt.Field(int(j))
1268 fkind := f.Type.Kind()
1269 // skip if a func type, or is unexported, or structTag value == "-"
1270 switch fkind {
1271 case reflect.Func, reflect.Complex64, reflect.Complex128, reflect.UnsafePointer:
1272 continue LOOP
1273 }
1274
1275 isUnexported := f.PkgPath != ""
1276 if isUnexported && !f.Anonymous {
1277 continue
1278 }
1279 stag := x.structTag(f.Tag)
1280 if stag == "-" {
1281 continue
1282 }
1283 var si structFieldInfo
1284 var parsed bool
1285 // if anonymous and no struct tag (or it's blank),
1286 // and a struct (or pointer to struct), inline it.
1287 if f.Anonymous && fkind != reflect.Interface {
1288 // ^^ redundant but ok: per go spec, an embedded pointer type cannot be to an interface
1289 ft := f.Type
1290 isPtr := ft.Kind() == reflect.Ptr
1291 for ft.Kind() == reflect.Ptr {
1292 ft = ft.Elem()
1293 }
1294 isStruct := ft.Kind() == reflect.Struct
1295
1296 // Ignore embedded fields of unexported non-struct types.
1297 // Also, from go1.10, ignore pointers to unexported struct types
1298 // because unmarshal cannot assign a new struct to an unexported field.
1299 // See https://golang.org/issue/21357
1300 if (isUnexported && !isStruct) || (!allowSetUnexportedEmbeddedPtr && isUnexported && isPtr) {
1301 continue
1302 }
1303 doInline := stag == ""
1304 if !doInline {
1305 si.parseTag(stag)
1306 parsed = true
1307 doInline = si.encName == ""
1308 // doInline = si.isZero()
1309 }
1310 if doInline && isStruct {
1311 // if etypes contains this, don't call rget again (as fields are already seen here)
1312 ftid := rt2id(ft)
1313 // We cannot recurse forever, but we need to track other field depths.
1314 // So - we break if we see a type twice (not the first time).
1315 // This should be sufficient to handle an embedded type that refers to its
1316 // owning type, which then refers to its embedded type.
1317 processIt := true
1318 numk := 0
1319 for _, k := range pv.etypes {
1320 if k == ftid {
1321 numk++
1322 if numk == rgetMaxRecursion {
1323 processIt = false
1324 break
1325 }
1326 }
1327 }
1328 if processIt {
1329 pv.etypes = append(pv.etypes, ftid)
1330 indexstack2 := make([]uint16, len(indexstack)+1)
1331 copy(indexstack2, indexstack)
1332 indexstack2[len(indexstack)] = j
1333 // indexstack2 := append(append(make([]int, 0, len(indexstack)+4), indexstack...), j)
1334 x.rget(ft, ftid, omitEmpty, indexstack2, pv)
1335 }
1336 continue
1337 }
1338 }
1339
1340 // after the anonymous dance: if an unexported field, skip
1341 if isUnexported {
1342 continue
1343 }
1344
1345 if f.Name == "" {
1346 panic(errNoFieldNameToStructFieldInfo)
1347 }
1348
1349 // pv.fNames = append(pv.fNames, f.Name)
1350 // if si.encName == "" {
1351
1352 if !parsed {
1353 si.encName = f.Name
1354 si.parseTag(stag)
1355 parsed = true
1356 } else if si.encName == "" {
1357 si.encName = f.Name
1358 }
1359 si.fieldName = f.Name
1360 si.flagSet(structFieldInfoFlagReady)
1361
1362 // pv.encNames = append(pv.encNames, si.encName)
1363
1364 // si.ikind = int(f.Type.Kind())
1365 if len(indexstack) > maxLevelsEmbedding-1 {
1366 panicv.errorf("codec: only supports up to %v depth of embedding - type has %v depth",
1367 maxLevelsEmbedding-1, len(indexstack))
1368 }
1369 si.nis = uint8(len(indexstack)) + 1
1370 copy(si.is[:], indexstack)
1371 si.is[len(indexstack)] = j
1372
1373 if omitEmpty {
1374 si.flagSet(structFieldInfoFlagOmitEmpty)
1375 }
1376 pv.sfis = append(pv.sfis, si)
1377 }
1378}
1379
1380func tiSep(name string) uint8 {
1381 // (xn[0]%64) // (between 192-255 - outside ascii BMP)
1382 // return 0xfe - (name[0] & 63)
1383 // return 0xfe - (name[0] & 63) - uint8(len(name))
1384 // return 0xfe - (name[0] & 63) - uint8(len(name)&63)
1385 // return ((0xfe - (name[0] & 63)) & 0xf8) | (uint8(len(name) & 0x07))
1386 return 0xfe - (name[0] & 63) - uint8(len(name)&63)
1387}
1388
1389func tiSep2(name []byte) uint8 {
1390 return 0xfe - (name[0] & 63) - uint8(len(name)&63)
1391}
1392
1393// resolves the struct field info got from a call to rget.
1394// Returns a trimmed, unsorted and sorted []*structFieldInfo.
1395func rgetResolveSFI(rt reflect.Type, x []structFieldInfo, pv *typeInfoLoadArray) (
1396 y, z []*structFieldInfo, ss []byte, anyOmitEmpty bool) {
1397 sa := pv.sfiidx[:0]
1398 sn := pv.b[:]
1399 n := len(x)
1400
1401 var xn string
1402 var ui uint16
1403 var sep byte
1404
1405 for i := range x {
1406 ui = uint16(i)
1407 xn = x[i].encName // fieldName or encName? use encName for now.
1408 if len(xn)+2 > cap(pv.b) {
1409 sn = make([]byte, len(xn)+2)
1410 } else {
1411 sn = sn[:len(xn)+2]
1412 }
1413 // use a custom sep, so that misses are less frequent,
1414 // since the sep (first char in search) is as unique as first char in field name.
1415 sep = tiSep(xn)
1416 sn[0], sn[len(sn)-1] = sep, 0xff
1417 copy(sn[1:], xn)
1418 j := bytes.Index(sa, sn)
1419 if j == -1 {
1420 sa = append(sa, sep)
1421 sa = append(sa, xn...)
1422 sa = append(sa, 0xff, byte(ui>>8), byte(ui))
1423 } else {
1424 index := uint16(sa[j+len(sn)+1]) | uint16(sa[j+len(sn)])<<8
1425 // one of them must be reset to nil,
1426 // and the index updated appropriately to the other one
1427 if x[i].nis == x[index].nis {
1428 } else if x[i].nis < x[index].nis {
1429 sa[j+len(sn)], sa[j+len(sn)+1] = byte(ui>>8), byte(ui)
1430 if x[index].ready() {
1431 x[index].flagClr(structFieldInfoFlagReady)
1432 n--
1433 }
1434 } else {
1435 if x[i].ready() {
1436 x[i].flagClr(structFieldInfoFlagReady)
1437 n--
1438 }
1439 }
1440 }
1441
1442 }
1443 var w []structFieldInfo
1444 sharingArray := len(x) <= typeInfoLoadArraySfisLen // sharing array with typeInfoLoadArray
1445 if sharingArray {
1446 w = make([]structFieldInfo, n)
1447 }
1448
1449 // remove all the nils (non-ready)
1450 y = make([]*structFieldInfo, n)
1451 n = 0
1452 var sslen int
1453 for i := range x {
1454 if !x[i].ready() {
1455 continue
1456 }
1457 if !anyOmitEmpty && x[i].omitEmpty() {
1458 anyOmitEmpty = true
1459 }
1460 if sharingArray {
1461 w[n] = x[i]
1462 y[n] = &w[n]
1463 } else {
1464 y[n] = &x[i]
1465 }
1466 sslen = sslen + len(x[i].encName) + 4
1467 n++
1468 }
1469 if n != len(y) {
1470 panicv.errorf("failure reading struct %v - expecting %d of %d valid fields, got %d",
1471 rt, len(y), len(x), n)
1472 }
1473
1474 z = make([]*structFieldInfo, len(y))
1475 copy(z, y)
1476 sort.Sort(sfiSortedByEncName(z))
1477
1478 sharingArray = len(sa) <= typeInfoLoadArraySfiidxLen
1479 if sharingArray {
1480 ss = make([]byte, 0, sslen)
1481 } else {
1482 ss = sa[:0] // reuse the newly made sa array if necessary
1483 }
1484 for i := range z {
1485 xn = z[i].encName
1486 sep = tiSep(xn)
1487 ui = uint16(i)
1488 ss = append(ss, sep)
1489 ss = append(ss, xn...)
1490 ss = append(ss, 0xff, byte(ui>>8), byte(ui))
1491 }
1492 return
1493}
1494
1495func implIntf(rt, iTyp reflect.Type) (base bool, indir bool) {
1496 return rt.Implements(iTyp), reflect.PtrTo(rt).Implements(iTyp)
1497}
1498
1499// isEmptyStruct is only called from isEmptyValue, and checks if a struct is empty:
1500// - does it implement IsZero() bool
1501// - is it comparable, and can i compare directly using ==
1502// - if checkStruct, then walk through the encodable fields
1503// and check if they are empty or not.
1504func isEmptyStruct(v reflect.Value, tinfos *TypeInfos, deref, checkStruct bool) bool {
1505 // v is a struct kind - no need to check again.
1506 // We only check isZero on a struct kind, to reduce the amount of times
1507 // that we lookup the rtid and typeInfo for each type as we walk the tree.
1508
1509 vt := v.Type()
1510 rtid := rt2id(vt)
1511 if tinfos == nil {
1512 tinfos = defTypeInfos
1513 }
1514 ti := tinfos.get(rtid, vt)
1515 if ti.rtid == timeTypId {
1516 return rv2i(v).(time.Time).IsZero()
1517 }
1518 if ti.isFlag(typeInfoFlagIsZeroerPtr) && v.CanAddr() {
1519 return rv2i(v.Addr()).(isZeroer).IsZero()
1520 }
1521 if ti.isFlag(typeInfoFlagIsZeroer) {
1522 return rv2i(v).(isZeroer).IsZero()
1523 }
1524 if ti.isFlag(typeInfoFlagComparable) {
1525 return rv2i(v) == rv2i(reflect.Zero(vt))
1526 }
1527 if !checkStruct {
1528 return false
1529 }
1530 // We only care about what we can encode/decode,
1531 // so that is what we use to check omitEmpty.
1532 for _, si := range ti.sfiSrc {
1533 sfv, valid := si.field(v, false)
1534 if valid && !isEmptyValue(sfv, tinfos, deref, checkStruct) {
1535 return false
1536 }
1537 }
1538 return true
1539}
1540
1541// func roundFloat(x float64) float64 {
1542// t := math.Trunc(x)
1543// if math.Abs(x-t) >= 0.5 {
1544// return t + math.Copysign(1, x)
1545// }
1546// return t
1547// }
1548
1549func panicToErr(h errstrDecorator, err *error) {
1550 // Note: This method MUST be called directly from defer i.e. defer panicToErr ...
1551 // else it seems the recover is not fully handled
1552 if recoverPanicToErr {
1553 if x := recover(); x != nil {
1554 // fmt.Printf("panic'ing with: %v\n", x)
1555 // debug.PrintStack()
1556 panicValToErr(h, x, err)
1557 }
1558 }
1559}
1560
1561func panicValToErr(h errstrDecorator, v interface{}, err *error) {
1562 switch xerr := v.(type) {
1563 case nil:
1564 case error:
1565 switch xerr {
1566 case nil:
1567 case io.EOF, io.ErrUnexpectedEOF, errEncoderNotInitialized, errDecoderNotInitialized:
1568 // treat as special (bubble up)
1569 *err = xerr
1570 default:
1571 h.wrapErrstr(xerr.Error(), err)
1572 }
1573 case string:
1574 if xerr != "" {
1575 h.wrapErrstr(xerr, err)
1576 }
1577 case fmt.Stringer:
1578 if xerr != nil {
1579 h.wrapErrstr(xerr.String(), err)
1580 }
1581 default:
1582 h.wrapErrstr(v, err)
1583 }
1584}
1585
1586func isImmutableKind(k reflect.Kind) (v bool) {
1587 return immutableKindsSet[k]
1588}
1589
1590// ----
1591
1592type codecFnInfo struct {
1593 ti *typeInfo
1594 xfFn Ext
1595 xfTag uint64
1596 seq seqType
1597 addrD bool
1598 addrF bool // if addrD, this says whether decode function can take a value or a ptr
1599 addrE bool
1600 ready bool // ready to use
1601}
1602
1603// codecFn encapsulates the captured variables and the encode function.
1604// This way, we only do some calculations one times, and pass to the
1605// code block that should be called (encapsulated in a function)
1606// instead of executing the checks every time.
1607type codecFn struct {
1608 i codecFnInfo
1609 fe func(*Encoder, *codecFnInfo, reflect.Value)
1610 fd func(*Decoder, *codecFnInfo, reflect.Value)
1611 _ [1]uint64 // padding
1612}
1613
1614type codecRtidFn struct {
1615 rtid uintptr
1616 fn *codecFn
1617}
1618
1619type codecFner struct {
1620 // hh Handle
1621 h *BasicHandle
1622 s []codecRtidFn
1623 be bool
1624 js bool
1625 _ [6]byte // padding
1626 _ [3]uint64 // padding
1627}
1628
1629func (c *codecFner) reset(hh Handle) {
1630 bh := hh.getBasicHandle()
1631 // only reset iff extensions changed or *TypeInfos changed
1632 var hhSame = true &&
1633 c.h == bh && c.h.TypeInfos == bh.TypeInfos &&
1634 len(c.h.extHandle) == len(bh.extHandle) &&
1635 (len(c.h.extHandle) == 0 || &c.h.extHandle[0] == &bh.extHandle[0])
1636 if !hhSame {
1637 // c.hh = hh
1638 c.h, bh = bh, c.h // swap both
1639 _, c.js = hh.(*JsonHandle)
1640 c.be = hh.isBinary()
1641 for i := range c.s {
1642 c.s[i].fn.i.ready = false
1643 }
1644 }
1645}
1646
1647func (c *codecFner) get(rt reflect.Type, checkFastpath, checkCodecSelfer bool) (fn *codecFn) {
1648 rtid := rt2id(rt)
1649
1650 for _, x := range c.s {
1651 if x.rtid == rtid {
1652 // if rtid exists, then there's a *codenFn attached (non-nil)
1653 fn = x.fn
1654 if fn.i.ready {
1655 return
1656 }
1657 break
1658 }
1659 }
1660 var ti *typeInfo
1661 if fn == nil {
1662 fn = new(codecFn)
1663 if c.s == nil {
1664 c.s = make([]codecRtidFn, 0, 8)
1665 }
1666 c.s = append(c.s, codecRtidFn{rtid, fn})
1667 } else {
1668 ti = fn.i.ti
1669 *fn = codecFn{}
1670 fn.i.ti = ti
1671 // fn.fe, fn.fd = nil, nil
1672 }
1673 fi := &(fn.i)
1674 fi.ready = true
1675 if ti == nil {
1676 ti = c.h.getTypeInfo(rtid, rt)
1677 fi.ti = ti
1678 }
1679
1680 rk := reflect.Kind(ti.kind)
1681
1682 if checkCodecSelfer && (ti.cs || ti.csp) {
1683 fn.fe = (*Encoder).selferMarshal
1684 fn.fd = (*Decoder).selferUnmarshal
1685 fi.addrF = true
1686 fi.addrD = ti.csp
1687 fi.addrE = ti.csp
1688 } else if rtid == timeTypId {
1689 fn.fe = (*Encoder).kTime
1690 fn.fd = (*Decoder).kTime
1691 } else if rtid == rawTypId {
1692 fn.fe = (*Encoder).raw
1693 fn.fd = (*Decoder).raw
1694 } else if rtid == rawExtTypId {
1695 fn.fe = (*Encoder).rawExt
1696 fn.fd = (*Decoder).rawExt
1697 fi.addrF = true
1698 fi.addrD = true
1699 fi.addrE = true
1700 } else if xfFn := c.h.getExt(rtid); xfFn != nil {
1701 fi.xfTag, fi.xfFn = xfFn.tag, xfFn.ext
1702 fn.fe = (*Encoder).ext
1703 fn.fd = (*Decoder).ext
1704 fi.addrF = true
1705 fi.addrD = true
1706 if rk == reflect.Struct || rk == reflect.Array {
1707 fi.addrE = true
1708 }
1709 } else if supportMarshalInterfaces && c.be && (ti.bm || ti.bmp) && (ti.bu || ti.bup) {
1710 fn.fe = (*Encoder).binaryMarshal
1711 fn.fd = (*Decoder).binaryUnmarshal
1712 fi.addrF = true
1713 fi.addrD = ti.bup
1714 fi.addrE = ti.bmp
1715 } else if supportMarshalInterfaces && !c.be && c.js && (ti.jm || ti.jmp) && (ti.ju || ti.jup) {
1716 //If JSON, we should check JSONMarshal before textMarshal
1717 fn.fe = (*Encoder).jsonMarshal
1718 fn.fd = (*Decoder).jsonUnmarshal
1719 fi.addrF = true
1720 fi.addrD = ti.jup
1721 fi.addrE = ti.jmp
1722 } else if supportMarshalInterfaces && !c.be && (ti.tm || ti.tmp) && (ti.tu || ti.tup) {
1723 fn.fe = (*Encoder).textMarshal
1724 fn.fd = (*Decoder).textUnmarshal
1725 fi.addrF = true
1726 fi.addrD = ti.tup
1727 fi.addrE = ti.tmp
1728 } else {
1729 if fastpathEnabled && checkFastpath && (rk == reflect.Map || rk == reflect.Slice) {
1730 if ti.pkgpath == "" { // un-named slice or map
1731 if idx := fastpathAV.index(rtid); idx != -1 {
1732 fn.fe = fastpathAV[idx].encfn
1733 fn.fd = fastpathAV[idx].decfn
1734 fi.addrD = true
1735 fi.addrF = false
1736 }
1737 } else {
1738 // use mapping for underlying type if there
1739 var rtu reflect.Type
1740 if rk == reflect.Map {
1741 rtu = reflect.MapOf(ti.key, ti.elem)
1742 } else {
1743 rtu = reflect.SliceOf(ti.elem)
1744 }
1745 rtuid := rt2id(rtu)
1746 if idx := fastpathAV.index(rtuid); idx != -1 {
1747 xfnf := fastpathAV[idx].encfn
1748 xrt := fastpathAV[idx].rt
1749 fn.fe = func(e *Encoder, xf *codecFnInfo, xrv reflect.Value) {
1750 xfnf(e, xf, xrv.Convert(xrt))
1751 }
1752 fi.addrD = true
1753 fi.addrF = false // meaning it can be an address(ptr) or a value
1754 xfnf2 := fastpathAV[idx].decfn
1755 fn.fd = func(d *Decoder, xf *codecFnInfo, xrv reflect.Value) {
1756 if xrv.Kind() == reflect.Ptr {
1757 xfnf2(d, xf, xrv.Convert(reflect.PtrTo(xrt)))
1758 } else {
1759 xfnf2(d, xf, xrv.Convert(xrt))
1760 }
1761 }
1762 }
1763 }
1764 }
1765 if fn.fe == nil && fn.fd == nil {
1766 switch rk {
1767 case reflect.Bool:
1768 fn.fe = (*Encoder).kBool
1769 fn.fd = (*Decoder).kBool
1770 case reflect.String:
1771 fn.fe = (*Encoder).kString
1772 fn.fd = (*Decoder).kString
1773 case reflect.Int:
1774 fn.fd = (*Decoder).kInt
1775 fn.fe = (*Encoder).kInt
1776 case reflect.Int8:
1777 fn.fe = (*Encoder).kInt8
1778 fn.fd = (*Decoder).kInt8
1779 case reflect.Int16:
1780 fn.fe = (*Encoder).kInt16
1781 fn.fd = (*Decoder).kInt16
1782 case reflect.Int32:
1783 fn.fe = (*Encoder).kInt32
1784 fn.fd = (*Decoder).kInt32
1785 case reflect.Int64:
1786 fn.fe = (*Encoder).kInt64
1787 fn.fd = (*Decoder).kInt64
1788 case reflect.Uint:
1789 fn.fd = (*Decoder).kUint
1790 fn.fe = (*Encoder).kUint
1791 case reflect.Uint8:
1792 fn.fe = (*Encoder).kUint8
1793 fn.fd = (*Decoder).kUint8
1794 case reflect.Uint16:
1795 fn.fe = (*Encoder).kUint16
1796 fn.fd = (*Decoder).kUint16
1797 case reflect.Uint32:
1798 fn.fe = (*Encoder).kUint32
1799 fn.fd = (*Decoder).kUint32
1800 case reflect.Uint64:
1801 fn.fe = (*Encoder).kUint64
1802 fn.fd = (*Decoder).kUint64
1803 case reflect.Uintptr:
1804 fn.fe = (*Encoder).kUintptr
1805 fn.fd = (*Decoder).kUintptr
1806 case reflect.Float32:
1807 fn.fe = (*Encoder).kFloat32
1808 fn.fd = (*Decoder).kFloat32
1809 case reflect.Float64:
1810 fn.fe = (*Encoder).kFloat64
1811 fn.fd = (*Decoder).kFloat64
1812 case reflect.Invalid:
1813 fn.fe = (*Encoder).kInvalid
1814 fn.fd = (*Decoder).kErr
1815 case reflect.Chan:
1816 fi.seq = seqTypeChan
1817 fn.fe = (*Encoder).kSlice
1818 fn.fd = (*Decoder).kSlice
1819 case reflect.Slice:
1820 fi.seq = seqTypeSlice
1821 fn.fe = (*Encoder).kSlice
1822 fn.fd = (*Decoder).kSlice
1823 case reflect.Array:
1824 fi.seq = seqTypeArray
1825 fn.fe = (*Encoder).kSlice
1826 fi.addrF = false
1827 fi.addrD = false
1828 rt2 := reflect.SliceOf(ti.elem)
1829 fn.fd = func(d *Decoder, xf *codecFnInfo, xrv reflect.Value) {
1830 d.cfer().get(rt2, true, false).fd(d, xf, xrv.Slice(0, xrv.Len()))
1831 }
1832 // fn.fd = (*Decoder).kArray
1833 case reflect.Struct:
1834 if ti.anyOmitEmpty {
1835 fn.fe = (*Encoder).kStruct
1836 } else {
1837 fn.fe = (*Encoder).kStructNoOmitempty
1838 }
1839 fn.fd = (*Decoder).kStruct
1840 case reflect.Map:
1841 fn.fe = (*Encoder).kMap
1842 fn.fd = (*Decoder).kMap
1843 case reflect.Interface:
1844 // encode: reflect.Interface are handled already by preEncodeValue
1845 fn.fd = (*Decoder).kInterface
1846 fn.fe = (*Encoder).kErr
1847 default:
1848 // reflect.Ptr and reflect.Interface are handled already by preEncodeValue
1849 fn.fe = (*Encoder).kErr
1850 fn.fd = (*Decoder).kErr
1851 }
1852 }
1853 }
1854 return
1855}
1856
1857type codecFnPooler struct {
1858 cf *codecFner
1859 cfp *sync.Pool
1860 hh Handle
1861}
1862
1863func (d *codecFnPooler) cfer() *codecFner {
1864 if d.cf == nil {
1865 var v interface{}
1866 d.cfp, v = pool.codecFner()
1867 d.cf = v.(*codecFner)
1868 d.cf.reset(d.hh)
1869 }
1870 return d.cf
1871}
1872
1873func (d *codecFnPooler) alwaysAtEnd() {
1874 if d.cf != nil {
1875 d.cfp.Put(d.cf)
1876 d.cf, d.cfp = nil, nil
1877 }
1878}
1879
1880// ----
1881
1882// these "checkOverflow" functions must be inlinable, and not call anybody.
1883// Overflow means that the value cannot be represented without wrapping/overflow.
1884// Overflow=false does not mean that the value can be represented without losing precision
1885// (especially for floating point).
1886
1887type checkOverflow struct{}
1888
1889// func (checkOverflow) Float16(f float64) (overflow bool) {
1890// panicv.errorf("unimplemented")
1891// if f < 0 {
1892// f = -f
1893// }
1894// return math.MaxFloat32 < f && f <= math.MaxFloat64
1895// }
1896
1897func (checkOverflow) Float32(v float64) (overflow bool) {
1898 if v < 0 {
1899 v = -v
1900 }
1901 return math.MaxFloat32 < v && v <= math.MaxFloat64
1902}
1903func (checkOverflow) Uint(v uint64, bitsize uint8) (overflow bool) {
1904 if bitsize == 0 || bitsize >= 64 || v == 0 {
1905 return
1906 }
1907 if trunc := (v << (64 - bitsize)) >> (64 - bitsize); v != trunc {
1908 overflow = true
1909 }
1910 return
1911}
1912func (checkOverflow) Int(v int64, bitsize uint8) (overflow bool) {
1913 if bitsize == 0 || bitsize >= 64 || v == 0 {
1914 return
1915 }
1916 if trunc := (v << (64 - bitsize)) >> (64 - bitsize); v != trunc {
1917 overflow = true
1918 }
1919 return
1920}
1921func (checkOverflow) SignedInt(v uint64) (overflow bool) {
1922 //e.g. -127 to 128 for int8
1923 pos := (v >> 63) == 0
1924 ui2 := v & 0x7fffffffffffffff
1925 if pos {
1926 if ui2 > math.MaxInt64 {
1927 overflow = true
1928 }
1929 } else {
1930 if ui2 > math.MaxInt64-1 {
1931 overflow = true
1932 }
1933 }
1934 return
1935}
1936
1937func (x checkOverflow) Float32V(v float64) float64 {
1938 if x.Float32(v) {
1939 panicv.errorf("float32 overflow: %v", v)
1940 }
1941 return v
1942}
1943func (x checkOverflow) UintV(v uint64, bitsize uint8) uint64 {
1944 if x.Uint(v, bitsize) {
1945 panicv.errorf("uint64 overflow: %v", v)
1946 }
1947 return v
1948}
1949func (x checkOverflow) IntV(v int64, bitsize uint8) int64 {
1950 if x.Int(v, bitsize) {
1951 panicv.errorf("int64 overflow: %v", v)
1952 }
1953 return v
1954}
1955func (x checkOverflow) SignedIntV(v uint64) int64 {
1956 if x.SignedInt(v) {
1957 panicv.errorf("uint64 to int64 overflow: %v", v)
1958 }
1959 return int64(v)
1960}
1961
1962// ------------------ SORT -----------------
1963
1964func isNaN(f float64) bool { return f != f }
1965
1966// -----------------------
1967
1968type ioFlusher interface {
1969 Flush() error
1970}
1971
1972type ioPeeker interface {
1973 Peek(int) ([]byte, error)
1974}
1975
1976type ioBuffered interface {
1977 Buffered() int
1978}
1979
1980// -----------------------
1981
1982type intSlice []int64
1983type uintSlice []uint64
1984
1985// type uintptrSlice []uintptr
1986type floatSlice []float64
1987type boolSlice []bool
1988type stringSlice []string
1989
1990// type bytesSlice [][]byte
1991
1992func (p intSlice) Len() int { return len(p) }
1993func (p intSlice) Less(i, j int) bool { return p[i] < p[j] }
1994func (p intSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
1995
1996func (p uintSlice) Len() int { return len(p) }
1997func (p uintSlice) Less(i, j int) bool { return p[i] < p[j] }
1998func (p uintSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
1999
2000// func (p uintptrSlice) Len() int { return len(p) }
2001// func (p uintptrSlice) Less(i, j int) bool { return p[i] < p[j] }
2002// func (p uintptrSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
2003
2004func (p floatSlice) Len() int { return len(p) }
2005func (p floatSlice) Less(i, j int) bool {
2006 return p[i] < p[j] || isNaN(p[i]) && !isNaN(p[j])
2007}
2008func (p floatSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
2009
2010func (p stringSlice) Len() int { return len(p) }
2011func (p stringSlice) Less(i, j int) bool { return p[i] < p[j] }
2012func (p stringSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
2013
2014// func (p bytesSlice) Len() int { return len(p) }
2015// func (p bytesSlice) Less(i, j int) bool { return bytes.Compare(p[i], p[j]) == -1 }
2016// func (p bytesSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
2017
2018func (p boolSlice) Len() int { return len(p) }
2019func (p boolSlice) Less(i, j int) bool { return !p[i] && p[j] }
2020func (p boolSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
2021
2022// ---------------------
2023
2024type intRv struct {
2025 v int64
2026 r reflect.Value
2027}
2028type intRvSlice []intRv
2029type uintRv struct {
2030 v uint64
2031 r reflect.Value
2032}
2033type uintRvSlice []uintRv
2034type floatRv struct {
2035 v float64
2036 r reflect.Value
2037}
2038type floatRvSlice []floatRv
2039type boolRv struct {
2040 v bool
2041 r reflect.Value
2042}
2043type boolRvSlice []boolRv
2044type stringRv struct {
2045 v string
2046 r reflect.Value
2047}
2048type stringRvSlice []stringRv
2049type bytesRv struct {
2050 v []byte
2051 r reflect.Value
2052}
2053type bytesRvSlice []bytesRv
2054type timeRv struct {
2055 v time.Time
2056 r reflect.Value
2057}
2058type timeRvSlice []timeRv
2059
2060func (p intRvSlice) Len() int { return len(p) }
2061func (p intRvSlice) Less(i, j int) bool { return p[i].v < p[j].v }
2062func (p intRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
2063
2064func (p uintRvSlice) Len() int { return len(p) }
2065func (p uintRvSlice) Less(i, j int) bool { return p[i].v < p[j].v }
2066func (p uintRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
2067
2068func (p floatRvSlice) Len() int { return len(p) }
2069func (p floatRvSlice) Less(i, j int) bool {
2070 return p[i].v < p[j].v || isNaN(p[i].v) && !isNaN(p[j].v)
2071}
2072func (p floatRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
2073
2074func (p stringRvSlice) Len() int { return len(p) }
2075func (p stringRvSlice) Less(i, j int) bool { return p[i].v < p[j].v }
2076func (p stringRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
2077
2078func (p bytesRvSlice) Len() int { return len(p) }
2079func (p bytesRvSlice) Less(i, j int) bool { return bytes.Compare(p[i].v, p[j].v) == -1 }
2080func (p bytesRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
2081
2082func (p boolRvSlice) Len() int { return len(p) }
2083func (p boolRvSlice) Less(i, j int) bool { return !p[i].v && p[j].v }
2084func (p boolRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
2085
2086func (p timeRvSlice) Len() int { return len(p) }
2087func (p timeRvSlice) Less(i, j int) bool { return p[i].v.Before(p[j].v) }
2088func (p timeRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
2089
2090// -----------------
2091
2092type bytesI struct {
2093 v []byte
2094 i interface{}
2095}
2096
2097type bytesISlice []bytesI
2098
2099func (p bytesISlice) Len() int { return len(p) }
2100func (p bytesISlice) Less(i, j int) bool { return bytes.Compare(p[i].v, p[j].v) == -1 }
2101func (p bytesISlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
2102
2103// -----------------
2104
2105type set []uintptr
2106
2107func (s *set) add(v uintptr) (exists bool) {
2108 // e.ci is always nil, or len >= 1
2109 x := *s
2110 if x == nil {
2111 x = make([]uintptr, 1, 8)
2112 x[0] = v
2113 *s = x
2114 return
2115 }
2116 // typically, length will be 1. make this perform.
2117 if len(x) == 1 {
2118 if j := x[0]; j == 0 {
2119 x[0] = v
2120 } else if j == v {
2121 exists = true
2122 } else {
2123 x = append(x, v)
2124 *s = x
2125 }
2126 return
2127 }
2128 // check if it exists
2129 for _, j := range x {
2130 if j == v {
2131 exists = true
2132 return
2133 }
2134 }
2135 // try to replace a "deleted" slot
2136 for i, j := range x {
2137 if j == 0 {
2138 x[i] = v
2139 return
2140 }
2141 }
2142 // if unable to replace deleted slot, just append it.
2143 x = append(x, v)
2144 *s = x
2145 return
2146}
2147
2148func (s *set) remove(v uintptr) (exists bool) {
2149 x := *s
2150 if len(x) == 0 {
2151 return
2152 }
2153 if len(x) == 1 {
2154 if x[0] == v {
2155 x[0] = 0
2156 }
2157 return
2158 }
2159 for i, j := range x {
2160 if j == v {
2161 exists = true
2162 x[i] = 0 // set it to 0, as way to delete it.
2163 // copy(x[i:], x[i+1:])
2164 // x = x[:len(x)-1]
2165 return
2166 }
2167 }
2168 return
2169}
2170
2171// ------
2172
2173// bitset types are better than [256]bool, because they permit the whole
2174// bitset array being on a single cache line and use less memory.
2175
2176// given x > 0 and n > 0 and x is exactly 2^n, then pos/x === pos>>n AND pos%x === pos&(x-1).
2177// consequently, pos/32 === pos>>5, pos/16 === pos>>4, pos/8 === pos>>3, pos%8 == pos&7
2178
2179type bitset256 [32]byte
2180
2181func (x *bitset256) isset(pos byte) bool {
2182 return x[pos>>3]&(1<<(pos&7)) != 0
2183}
2184func (x *bitset256) issetv(pos byte) byte {
2185 return x[pos>>3] & (1 << (pos & 7))
2186}
2187func (x *bitset256) set(pos byte) {
2188 x[pos>>3] |= (1 << (pos & 7))
2189}
2190
2191// func (x *bitset256) unset(pos byte) {
2192// x[pos>>3] &^= (1 << (pos & 7))
2193// }
2194
2195type bitset128 [16]byte
2196
2197func (x *bitset128) isset(pos byte) bool {
2198 return x[pos>>3]&(1<<(pos&7)) != 0
2199}
2200func (x *bitset128) set(pos byte) {
2201 x[pos>>3] |= (1 << (pos & 7))
2202}
2203
2204// func (x *bitset128) unset(pos byte) {
2205// x[pos>>3] &^= (1 << (pos & 7))
2206// }
2207
2208type bitset32 [4]byte
2209
2210func (x *bitset32) isset(pos byte) bool {
2211 return x[pos>>3]&(1<<(pos&7)) != 0
2212}
2213func (x *bitset32) set(pos byte) {
2214 x[pos>>3] |= (1 << (pos & 7))
2215}
2216
2217// func (x *bitset32) unset(pos byte) {
2218// x[pos>>3] &^= (1 << (pos & 7))
2219// }
2220
2221// type bit2set256 [64]byte
2222
2223// func (x *bit2set256) set(pos byte, v1, v2 bool) {
2224// var pos2 uint8 = (pos & 3) << 1 // returning 0, 2, 4 or 6
2225// if v1 {
2226// x[pos>>2] |= 1 << (pos2 + 1)
2227// }
2228// if v2 {
2229// x[pos>>2] |= 1 << pos2
2230// }
2231// }
2232// func (x *bit2set256) get(pos byte) uint8 {
2233// var pos2 uint8 = (pos & 3) << 1 // returning 0, 2, 4 or 6
2234// return x[pos>>2] << (6 - pos2) >> 6 // 11000000 -> 00000011
2235// }
2236
2237// ------------
2238
2239type pooler struct {
2240 dn sync.Pool // for decNaked
2241 cfn sync.Pool // for codecFner
2242 tiload sync.Pool
2243 strRv8, strRv16, strRv32, strRv64, strRv128 sync.Pool // for stringRV
2244}
2245
2246func (p *pooler) init() {
2247 p.strRv8.New = func() interface{} { return new([8]stringRv) }
2248 p.strRv16.New = func() interface{} { return new([16]stringRv) }
2249 p.strRv32.New = func() interface{} { return new([32]stringRv) }
2250 p.strRv64.New = func() interface{} { return new([64]stringRv) }
2251 p.strRv128.New = func() interface{} { return new([128]stringRv) }
2252 p.dn.New = func() interface{} { x := new(decNaked); x.init(); return x }
2253 p.tiload.New = func() interface{} { return new(typeInfoLoadArray) }
2254 p.cfn.New = func() interface{} { return new(codecFner) }
2255}
2256
2257func (p *pooler) stringRv8() (sp *sync.Pool, v interface{}) {
2258 return &p.strRv8, p.strRv8.Get()
2259}
2260func (p *pooler) stringRv16() (sp *sync.Pool, v interface{}) {
2261 return &p.strRv16, p.strRv16.Get()
2262}
2263func (p *pooler) stringRv32() (sp *sync.Pool, v interface{}) {
2264 return &p.strRv32, p.strRv32.Get()
2265}
2266func (p *pooler) stringRv64() (sp *sync.Pool, v interface{}) {
2267 return &p.strRv64, p.strRv64.Get()
2268}
2269func (p *pooler) stringRv128() (sp *sync.Pool, v interface{}) {
2270 return &p.strRv128, p.strRv128.Get()
2271}
2272func (p *pooler) decNaked() (sp *sync.Pool, v interface{}) {
2273 return &p.dn, p.dn.Get()
2274}
2275func (p *pooler) codecFner() (sp *sync.Pool, v interface{}) {
2276 return &p.cfn, p.cfn.Get()
2277}
2278func (p *pooler) tiLoad() (sp *sync.Pool, v interface{}) {
2279 return &p.tiload, p.tiload.Get()
2280}
2281
2282// func (p *pooler) decNaked() (v *decNaked, f func(*decNaked) ) {
2283// sp := &(p.dn)
2284// vv := sp.Get()
2285// return vv.(*decNaked), func(x *decNaked) { sp.Put(vv) }
2286// }
2287// func (p *pooler) decNakedGet() (v interface{}) {
2288// return p.dn.Get()
2289// }
2290// func (p *pooler) codecFnerGet() (v interface{}) {
2291// return p.cfn.Get()
2292// }
2293// func (p *pooler) tiLoadGet() (v interface{}) {
2294// return p.tiload.Get()
2295// }
2296// func (p *pooler) decNakedPut(v interface{}) {
2297// p.dn.Put(v)
2298// }
2299// func (p *pooler) codecFnerPut(v interface{}) {
2300// p.cfn.Put(v)
2301// }
2302// func (p *pooler) tiLoadPut(v interface{}) {
2303// p.tiload.Put(v)
2304// }
2305
2306type panicHdl struct{}
2307
2308func (panicHdl) errorv(err error) {
2309 if err != nil {
2310 panic(err)
2311 }
2312}
2313
2314func (panicHdl) errorstr(message string) {
2315 if message != "" {
2316 panic(message)
2317 }
2318}
2319
2320func (panicHdl) errorf(format string, params ...interface{}) {
2321 if format != "" {
2322 if len(params) == 0 {
2323 panic(format)
2324 } else {
2325 panic(fmt.Sprintf(format, params...))
2326 }
2327 }
2328}
2329
2330type errstrDecorator interface {
2331 wrapErrstr(interface{}, *error)
2332}
2333
2334type errstrDecoratorDef struct{}
2335
2336func (errstrDecoratorDef) wrapErrstr(v interface{}, e *error) { *e = fmt.Errorf("%v", v) }
2337
2338type must struct{}
2339
2340func (must) String(s string, err error) string {
2341 if err != nil {
2342 panicv.errorv(err)
2343 }
2344 return s
2345}
2346func (must) Int(s int64, err error) int64 {
2347 if err != nil {
2348 panicv.errorv(err)
2349 }
2350 return s
2351}
2352func (must) Uint(s uint64, err error) uint64 {
2353 if err != nil {
2354 panicv.errorv(err)
2355 }
2356 return s
2357}
2358func (must) Float(s float64, err error) float64 {
2359 if err != nil {
2360 panicv.errorv(err)
2361 }
2362 return s
2363}
2364
2365// xdebugf prints the message in red on the terminal.
2366// Use it in place of fmt.Printf (which it calls internally)
2367func xdebugf(pattern string, args ...interface{}) {
2368 var delim string
2369 if len(pattern) > 0 && pattern[len(pattern)-1] != '\n' {
2370 delim = "\n"
2371 }
2372 fmt.Printf("\033[1;31m"+pattern+delim+"\033[0m", args...)
2373}
2374
2375// func isImmutableKind(k reflect.Kind) (v bool) {
2376// return false ||
2377// k == reflect.Int ||
2378// k == reflect.Int8 ||
2379// k == reflect.Int16 ||
2380// k == reflect.Int32 ||
2381// k == reflect.Int64 ||
2382// k == reflect.Uint ||
2383// k == reflect.Uint8 ||
2384// k == reflect.Uint16 ||
2385// k == reflect.Uint32 ||
2386// k == reflect.Uint64 ||
2387// k == reflect.Uintptr ||
2388// k == reflect.Float32 ||
2389// k == reflect.Float64 ||
2390// k == reflect.Bool ||
2391// k == reflect.String
2392// }
2393
2394// func timeLocUTCName(tzint int16) string {
2395// if tzint == 0 {
2396// return "UTC"
2397// }
2398// var tzname = []byte("UTC+00:00")
2399// //tzname := fmt.Sprintf("UTC%s%02d:%02d", tzsign, tz/60, tz%60) //perf issue using Sprintf. inline below.
2400// //tzhr, tzmin := tz/60, tz%60 //faster if u convert to int first
2401// var tzhr, tzmin int16
2402// if tzint < 0 {
2403// tzname[3] = '-' // (TODO: verify. this works here)
2404// tzhr, tzmin = -tzint/60, (-tzint)%60
2405// } else {
2406// tzhr, tzmin = tzint/60, tzint%60
2407// }
2408// tzname[4] = timeDigits[tzhr/10]
2409// tzname[5] = timeDigits[tzhr%10]
2410// tzname[7] = timeDigits[tzmin/10]
2411// tzname[8] = timeDigits[tzmin%10]
2412// return string(tzname)
2413// //return time.FixedZone(string(tzname), int(tzint)*60)
2414// }