aboutsummaryrefslogtreecommitdiff
path: root/libgo/go/reflect/type.go
diff options
context:
space:
mode:
Diffstat (limited to 'libgo/go/reflect/type.go')
-rw-r--r--libgo/go/reflect/type.go429
1 files changed, 200 insertions, 229 deletions
diff --git a/libgo/go/reflect/type.go b/libgo/go/reflect/type.go
index 97b986a7bba..664d9717a06 100644
--- a/libgo/go/reflect/type.go
+++ b/libgo/go/reflect/type.go
@@ -18,6 +18,8 @@ package reflect
import (
"strconv"
"sync"
+ "unicode"
+ "unicode/utf8"
"unsafe"
)
@@ -258,6 +260,8 @@ const (
// It is embedded in other, public struct types, but always
// with a unique tag like `reflect:"array"` or `reflect:"ptr"`
// so that code cannot convert from, say, *arrayType to *ptrType.
+//
+// rtype must be kept in sync with ../runtime/type.go:/^type._type.
type rtype struct {
size uintptr
ptrdata uintptr // size of memory prefix holding all pointers
@@ -516,79 +520,52 @@ func (t *rtype) pointers() bool { return t.kind&kindNoPointers == 0 }
func (t *rtype) common() *rtype { return t }
-func (t *uncommonType) Method(i int) (m Method) {
- if t == nil || i < 0 || i >= len(t.methods) {
- panic("reflect: Method index out of range")
- }
- found := false
- for mi := range t.methods {
- if t.methods[mi].pkgPath == nil {
- if i == 0 {
- i = mi
- found = true
- break
- }
- i--
- }
- }
- if !found {
- panic("reflect: Method index out of range")
- }
+var methodCache sync.Map // map[*rtype][]method
- p := &t.methods[i]
- if p.name != nil {
- m.Name = *p.name
- }
- fl := flag(Func)
- if p.pkgPath != nil {
- m.PkgPath = *p.pkgPath
- fl |= flagStickyRO
+func (t *rtype) exportedMethods() []method {
+ methodsi, found := methodCache.Load(t)
+ if found {
+ return methodsi.([]method)
}
- mt := p.typ
- m.Type = toType(mt)
- x := new(unsafe.Pointer)
- *x = unsafe.Pointer(&p.tfn)
- m.Func = Value{mt, unsafe.Pointer(x), fl | flagIndir | flagMethodFn}
- m.Index = i
- return
-}
-func (t *uncommonType) NumMethod() int {
- if t == nil {
- return 0
+ ut := t.uncommon()
+ if ut == nil {
+ return nil
}
- c := 0
- for i := range t.methods {
- if t.methods[i].pkgPath == nil {
- c++
+ allm := ut.methods
+ allExported := true
+ for _, m := range allm {
+ if m.pkgPath != nil {
+ allExported = false
+ break
}
}
- return c
-}
-
-func (t *uncommonType) MethodByName(name string) (m Method, ok bool) {
- if t == nil {
- return
- }
- var p *method
- for i := range t.methods {
- p = &t.methods[i]
- if p.pkgPath == nil && p.name != nil && *p.name == name {
- return t.Method(i), true
+ var methods []method
+ if allExported {
+ methods = allm
+ } else {
+ methods = make([]method, 0, len(allm))
+ for _, m := range allm {
+ if m.pkgPath == nil {
+ methods = append(methods, m)
+ }
}
+ methods = methods[:len(methods):len(methods)]
}
- return
+
+ methodsi, _ = methodCache.LoadOrStore(t, methods)
+ return methodsi.([]method)
}
-// TODO(rsc): gc supplies these, but they are not
-// as efficient as they could be: they have commonType
-// as the receiver instead of *rtype.
func (t *rtype) NumMethod() int {
if t.Kind() == Interface {
tt := (*interfaceType)(unsafe.Pointer(t))
return tt.NumMethod()
}
- return t.uncommonType.NumMethod()
+ if t.uncommonType == nil {
+ return 0 // avoid methodCache synchronization
+ }
+ return len(t.exportedMethods())
}
func (t *rtype) Method(i int) (m Method) {
@@ -596,7 +573,22 @@ func (t *rtype) Method(i int) (m Method) {
tt := (*interfaceType)(unsafe.Pointer(t))
return tt.Method(i)
}
- return t.uncommonType.Method(i)
+ methods := t.exportedMethods()
+ if i < 0 || i >= len(methods) {
+ panic("reflect: Method index out of range")
+ }
+ p := methods[i]
+ if p.name != nil {
+ m.Name = *p.name
+ }
+ fl := flag(Func)
+ mt := p.typ
+ m.Type = toType(mt)
+ x := new(unsafe.Pointer)
+ *x = unsafe.Pointer(&p.tfn)
+ m.Func = Value{mt, unsafe.Pointer(x), fl | flagIndir | flagMethodFn}
+ m.Index = i
+ return m
}
func (t *rtype) MethodByName(name string) (m Method, ok bool) {
@@ -604,7 +596,17 @@ func (t *rtype) MethodByName(name string) (m Method, ok bool) {
tt := (*interfaceType)(unsafe.Pointer(t))
return tt.MethodByName(name)
}
- return t.uncommonType.MethodByName(name)
+ ut := t.uncommon()
+ if ut == nil {
+ return Method{}, false
+ }
+ for i := range ut.methods {
+ p := &ut.methods[i]
+ if p.pkgPath == nil && p.name != nil && *p.name == name {
+ return t.Method(i), true
+ }
+ }
+ return Method{}, false
}
func (t *rtype) PkgPath() string {
@@ -983,12 +985,11 @@ func (t *structType) FieldByNameFunc(match func(string) bool) (result StructFiel
visited[t] = true
for i := range t.fields {
f := &t.fields[i]
- // Find name and type for field f.
+ // Find name and (for anonymous field) type for field f.
fname := *f.name
var ntyp *rtype
if f.anon() {
// Anonymous field of type T or *T.
- // Name taken from type.
ntyp = f.typ
if ntyp.Kind() == Ptr {
ntyp = ntyp.Elem().common()
@@ -1072,10 +1073,7 @@ func TypeOf(i interface{}) Type {
}
// ptrMap is the cache for PtrTo.
-var ptrMap struct {
- sync.RWMutex
- m map[*rtype]*ptrType
-}
+var ptrMap sync.Map // map[*rtype]*ptrType
// PtrTo returns the pointer type with element t.
// For example, if t represents type Foo, PtrTo(t) represents *Foo.
@@ -1089,24 +1087,8 @@ func (t *rtype) ptrTo() *rtype {
}
// Check the cache.
- ptrMap.RLock()
- if m := ptrMap.m; m != nil {
- if p := m[t]; p != nil {
- ptrMap.RUnlock()
- return &p.rtype
- }
- }
- ptrMap.RUnlock()
-
- ptrMap.Lock()
- if ptrMap.m == nil {
- ptrMap.m = make(map[*rtype]*ptrType)
- }
- p := ptrMap.m[t]
- if p != nil {
- // some other goroutine won the race and created it
- ptrMap.Unlock()
- return &p.rtype
+ if pi, ok := ptrMap.Load(t); ok {
+ return &pi.(*ptrType).rtype
}
s := "*" + *t.string
@@ -1115,9 +1097,9 @@ func (t *rtype) ptrTo() *rtype {
r, ok := canonicalType[s]
canonicalTypeLock.RUnlock()
if ok {
- ptrMap.m[t] = (*ptrType)(unsafe.Pointer(r.(*rtype)))
- ptrMap.Unlock()
- return r.(*rtype)
+ p := (*ptrType)(unsafe.Pointer(r.(*rtype)))
+ pi, _ := ptrMap.LoadOrStore(t, p)
+ return &pi.(*ptrType).rtype
}
// Create a new ptrType starting with the description
@@ -1143,11 +1125,10 @@ func (t *rtype) ptrTo() *rtype {
pp.elem = t
q := canonicalize(&pp.rtype)
- p = (*ptrType)(unsafe.Pointer(q.(*rtype)))
+ p := (*ptrType)(unsafe.Pointer(q.(*rtype)))
- ptrMap.m[t] = p
- ptrMap.Unlock()
- return &p.rtype
+ pi, _ := ptrMap.LoadOrStore(t, p)
+ return &pi.(*ptrType).rtype
}
// fnv1 incorporates the list of bytes into the hash x using the FNV-1 hash function.
@@ -1396,11 +1377,8 @@ func haveIdenticalUnderlyingType(T, V *rtype, cmpTags bool) bool {
return false
}
-// The lookupCache caches ChanOf, MapOf, and SliceOf lookups.
-var lookupCache struct {
- sync.RWMutex
- m map[cacheKey]*rtype
-}
+// The lookupCache caches ArrayOf, ChanOf, MapOf and SliceOf lookups.
+var lookupCache sync.Map // map[cacheKey]*rtype
// A cacheKey is the key for use in the lookupCache.
// Four values describe any of the types we are looking for:
@@ -1412,48 +1390,15 @@ type cacheKey struct {
extra uintptr
}
-// cacheGet looks for a type under the key k in the lookupCache.
-// If it finds one, it returns that type.
-// If not, it returns nil with the cache locked.
-// The caller is expected to use cachePut to unlock the cache.
-func cacheGet(k cacheKey) Type {
- lookupCache.RLock()
- t := lookupCache.m[k]
- lookupCache.RUnlock()
- if t != nil {
- return t
- }
-
- lookupCache.Lock()
- t = lookupCache.m[k]
- if t != nil {
- lookupCache.Unlock()
- return t
- }
-
- if lookupCache.m == nil {
- lookupCache.m = make(map[cacheKey]*rtype)
- }
-
- return nil
-}
-
-// cachePut stores the given type in the cache, unlocks the cache,
-// and returns the type. It is expected that the cache is locked
-// because cacheGet returned nil.
-func cachePut(k cacheKey, t *rtype) Type {
- t = toType(t).common()
- lookupCache.m[k] = t
- lookupCache.Unlock()
- return t
-}
-
// The funcLookupCache caches FuncOf lookups.
// FuncOf does not share the common lookupCache since cacheKey is not
// sufficient to represent functions unambiguously.
var funcLookupCache struct {
- sync.RWMutex
- m map[uint32][]*rtype // keyed by hash calculated in FuncOf
+ sync.Mutex // Guards stores (but not loads) on m.
+
+ // m is a map[uint32][]*rtype keyed by the hash calculated in FuncOf.
+ // Elements of m are append-only and thus safe for concurrent reading.
+ m sync.Map
}
// ChanOf returns the channel type with the given direction and element type.
@@ -1466,13 +1411,12 @@ func ChanOf(dir ChanDir, t Type) Type {
// Look in cache.
ckey := cacheKey{Chan, typ, nil, uintptr(dir)}
- if ch := cacheGet(ckey); ch != nil {
- return ch
+ if ch, ok := lookupCache.Load(ckey); ok {
+ return ch.(*rtype)
}
// This restriction is imposed by the gc compiler and the runtime.
if typ.size >= 1<<16 {
- lookupCache.Unlock()
panic("reflect.ChanOf: element size too large")
}
@@ -1481,7 +1425,6 @@ func ChanOf(dir ChanDir, t Type) Type {
var s string
switch dir {
default:
- lookupCache.Unlock()
panic("reflect.ChanOf: invalid dir")
case SendDir:
s = "chan<- " + *typ.string
@@ -1515,7 +1458,8 @@ func ChanOf(dir ChanDir, t Type) Type {
ch.uncommonType = nil
ch.ptrToThis = nil
- return cachePut(ckey, &ch.rtype)
+ ti, _ := lookupCache.LoadOrStore(ckey, &ch.rtype)
+ return ti.(Type)
}
func ismapkey(*rtype) bool // implemented in runtime
@@ -1536,8 +1480,8 @@ func MapOf(key, elem Type) Type {
// Look in cache.
ckey := cacheKey{Map, ktyp, etyp, 0}
- if mt := cacheGet(ckey); mt != nil {
- return mt
+ if mt, ok := lookupCache.Load(ckey); ok {
+ return mt.(Type)
}
// Look in known types.
@@ -1576,7 +1520,8 @@ func MapOf(key, elem Type) Type {
mt.reflexivekey = isReflexive(ktyp)
mt.needkeyupdate = needKeyUpdate(ktyp)
- return cachePut(ckey, &mt.rtype)
+ ti, _ := lookupCache.LoadOrStore(ckey, &mt.rtype)
+ return ti.(Type)
}
// FuncOf returns the function type with the given argument and result types.
@@ -1625,25 +1570,32 @@ func FuncOf(in, out []Type, variadic bool) Type {
ft.dotdotdot = variadic
// Look in cache.
- funcLookupCache.RLock()
- for _, t := range funcLookupCache.m[hash] {
- if haveIdenticalUnderlyingType(&ft.rtype, t, true) {
- funcLookupCache.RUnlock()
- return t
+ if ts, ok := funcLookupCache.m.Load(hash); ok {
+ for _, t := range ts.([]*rtype) {
+ if haveIdenticalUnderlyingType(&ft.rtype, t, true) {
+ return t
+ }
}
}
- funcLookupCache.RUnlock()
// Not in cache, lock and retry.
funcLookupCache.Lock()
defer funcLookupCache.Unlock()
- if funcLookupCache.m == nil {
- funcLookupCache.m = make(map[uint32][]*rtype)
+ if ts, ok := funcLookupCache.m.Load(hash); ok {
+ for _, t := range ts.([]*rtype) {
+ if haveIdenticalUnderlyingType(&ft.rtype, t, true) {
+ return t
+ }
+ }
}
- for _, t := range funcLookupCache.m[hash] {
- if haveIdenticalUnderlyingType(&ft.rtype, t, true) {
- return t
+
+ addToCache := func(tt *rtype) Type {
+ var rts []*rtype
+ if rti, ok := funcLookupCache.m.Load(hash); ok {
+ rts = rti.([]*rtype)
}
+ funcLookupCache.m.Store(hash, append(rts, tt))
+ return tt
}
str := funcStr(ft)
@@ -1652,10 +1604,7 @@ func FuncOf(in, out []Type, variadic bool) Type {
ft.string = &str
ft.uncommonType = nil
ft.ptrToThis = nil
-
- funcLookupCache.m[hash] = append(funcLookupCache.m[hash], &ft.rtype)
-
- return toType(&ft.rtype)
+ return addToCache(&ft.rtype)
}
// funcStr builds a string representation of a funcType.
@@ -1771,9 +1720,6 @@ func bucketOf(ktyp, etyp *rtype) *rtype {
// Prepare GC data if any.
// A bucket is at most bucketSize*(1+maxKeySize+maxValSize)+2*ptrSize bytes,
// or 2072 bytes, or 259 pointer-size words, or 33 bytes of pointer bitmap.
- // Normally the enforced limit on pointer maps is 16 bytes,
- // but larger ones are acceptable, 33 bytes isn't too too big,
- // and it's easier to generate a pointer bitmap than a GC program.
// Note that since the key and value are known to be <= 128 bytes,
// they're guaranteed to have bitmaps instead of GC programs.
var gcdata *byte
@@ -1812,7 +1758,7 @@ func bucketOf(ktyp, etyp *rtype) *rtype {
panic("reflect: unexpected GC program in MapOf")
}
kmask := (*[16]byte)(unsafe.Pointer(ktyp.gcdata))
- for i := uintptr(0); i < ktyp.size/ptrSize; i++ {
+ for i := uintptr(0); i < ktyp.ptrdata/ptrSize; i++ {
if (kmask[i/8]>>(i%8))&1 != 0 {
for j := uintptr(0); j < bucketSize; j++ {
word := base + j*ktyp.size/ptrSize + i
@@ -1830,7 +1776,7 @@ func bucketOf(ktyp, etyp *rtype) *rtype {
panic("reflect: unexpected GC program in MapOf")
}
emask := (*[16]byte)(unsafe.Pointer(etyp.gcdata))
- for i := uintptr(0); i < etyp.size/ptrSize; i++ {
+ for i := uintptr(0); i < etyp.ptrdata/ptrSize; i++ {
if (emask[i/8]>>(i%8))&1 != 0 {
for j := uintptr(0); j < bucketSize; j++ {
word := base + j*etyp.size/ptrSize + i
@@ -1871,8 +1817,8 @@ func SliceOf(t Type) Type {
// Look in cache.
ckey := cacheKey{Slice, typ, nil, 0}
- if slice := cacheGet(ckey); slice != nil {
- return slice
+ if slice, ok := lookupCache.Load(ckey); ok {
+ return slice.(Type)
}
// Look in known types.
@@ -1892,17 +1838,44 @@ func SliceOf(t Type) Type {
slice.uncommonType = nil
slice.ptrToThis = nil
- return cachePut(ckey, &slice.rtype)
+ ti, _ := lookupCache.LoadOrStore(ckey, &slice.rtype)
+ return ti.(Type)
}
// The structLookupCache caches StructOf lookups.
// StructOf does not share the common lookupCache since we need to pin
// the memory associated with *structTypeFixedN.
var structLookupCache struct {
- sync.RWMutex
- m map[uint32][]interface {
- common() *rtype
- } // keyed by hash calculated in StructOf
+ sync.Mutex // Guards stores (but not loads) on m.
+
+ // m is a map[uint32][]Type keyed by the hash calculated in StructOf.
+ // Elements in m are append-only and thus safe for concurrent reading.
+ m sync.Map
+}
+
+// isLetter returns true if a given 'rune' is classified as a Letter.
+func isLetter(ch rune) bool {
+ return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= utf8.RuneSelf && unicode.IsLetter(ch)
+}
+
+// isValidFieldName checks if a string is a valid (struct) field name or not.
+//
+// According to the language spec, a field name should be an identifier.
+//
+// identifier = letter { letter | unicode_digit } .
+// letter = unicode_letter | "_" .
+func isValidFieldName(fieldName string) bool {
+ for i, c := range fieldName {
+ if i == 0 && !isLetter(c) {
+ return false
+ }
+
+ if !(isLetter(c) || unicode.IsDigit(c)) {
+ return false
+ }
+ }
+
+ return len(fieldName) > 0
}
// StructOf returns the struct type containing fields.
@@ -1930,6 +1903,12 @@ func StructOf(fields []StructField) Type {
lastzero := uintptr(0)
repr = append(repr, "struct {"...)
for i, field := range fields {
+ if field.Name == "" {
+ panic("reflect.StructOf: field " + strconv.Itoa(i) + " has no name")
+ }
+ if !isValidFieldName(field.Name) {
+ panic("reflect.StructOf: field " + strconv.Itoa(i) + " has invalid name")
+ }
if field.Type == nil {
panic("reflect.StructOf: field " + strconv.Itoa(i) + " has no type")
}
@@ -1960,30 +1939,29 @@ func StructOf(fields []StructField) Type {
} else {
name = ft.String()
}
- // TODO(sbinet) check for syntactically impossible type names?
switch f.typ.Kind() {
case Interface:
ift := (*interfaceType)(unsafe.Pointer(ft))
if len(ift.methods) > 0 {
- panic("reflect.StructOf: embedded field with methods not supported")
+ panic("reflect.StructOf: embedded field with methods not implemented")
}
case Ptr:
ptr := (*ptrType)(unsafe.Pointer(ft))
if unt := ptr.uncommon(); unt != nil {
if len(unt.methods) > 0 {
- panic("reflect.StructOf: embedded field with methods not supported")
+ panic("reflect.StructOf: embedded field with methods not implemented")
}
}
if unt := ptr.elem.uncommon(); unt != nil {
if len(unt.methods) > 0 {
- panic("reflect.StructOf: embedded field with methods not supported")
+ panic("reflect.StructOf: embedded field with methods not implemented")
}
}
default:
if unt := ft.uncommon(); unt != nil {
if len(unt.methods) > 0 {
- panic("reflect.StructOf: embedded field with methods not supported")
+ panic("reflect.StructOf: embedded field with methods not implemented")
}
}
}
@@ -2044,30 +2022,35 @@ func StructOf(fields []StructField) Type {
*typ = *prototype
typ.fields = fs
- // Look in cache
- structLookupCache.RLock()
- for _, st := range structLookupCache.m[hash] {
- t := st.common()
- if haveIdenticalUnderlyingType(&typ.rtype, t, true) {
- structLookupCache.RUnlock()
- return t
+ // Look in cache.
+ if ts, ok := structLookupCache.m.Load(hash); ok {
+ for _, st := range ts.([]Type) {
+ t := st.common()
+ if haveIdenticalUnderlyingType(&typ.rtype, t, true) {
+ return t
+ }
}
}
- structLookupCache.RUnlock()
- // not in cache, lock and retry
+ // Not in cache, lock and retry.
structLookupCache.Lock()
defer structLookupCache.Unlock()
- if structLookupCache.m == nil {
- structLookupCache.m = make(map[uint32][]interface {
- common() *rtype
- })
+ if ts, ok := structLookupCache.m.Load(hash); ok {
+ for _, st := range ts.([]Type) {
+ t := st.common()
+ if haveIdenticalUnderlyingType(&typ.rtype, t, true) {
+ return t
+ }
+ }
}
- for _, st := range structLookupCache.m[hash] {
- t := st.common()
- if haveIdenticalUnderlyingType(&typ.rtype, t, true) {
- return t
+
+ addToCache := func(t Type) Type {
+ var ts []Type
+ if ti, ok := structLookupCache.m.Load(hash); ok {
+ ts = ti.([]Type)
}
+ structLookupCache.m.Store(hash, append(ts, t))
+ return t
}
typ.string = &str
@@ -2172,24 +2155,19 @@ func StructOf(fields []StructField) Type {
typ.uncommonType = nil
typ.ptrToThis = nil
- structLookupCache.m[hash] = append(structLookupCache.m[hash], typ)
- return &typ.rtype
+ return addToCache(&typ.rtype)
}
func runtimeStructField(field StructField) structField {
- var name *string
- if field.Name == "" {
- t := field.Type.(*rtype)
- if t.Kind() == Ptr {
- t = t.Elem().(*rtype)
- }
- } else if field.PkgPath == "" {
- s := field.Name
- name = &s
- b0 := s[0]
- if ('a' <= b0 && b0 <= 'z') || b0 == '_' {
- panic("reflect.StructOf: field \"" + field.Name + "\" is unexported but has no PkgPath")
- }
+ if field.PkgPath != "" {
+ panic("reflect.StructOf: StructOf does not allow unexported fields")
+ }
+
+ // Best-effort check for misuse.
+ // Since PkgPath is empty, not much harm done if Unicode lowercase slips through.
+ c := field.Name[0]
+ if 'a' <= c && c <= 'z' || c == '_' {
+ panic("reflect.StructOf: field \"" + field.Name + "\" is unexported but missing PkgPath")
}
offsetAnon := uintptr(0)
@@ -2197,24 +2175,18 @@ func runtimeStructField(field StructField) structField {
offsetAnon |= 1
}
- var pkgPath *string
- if field.PkgPath != "" {
- s := field.PkgPath
- pkgPath = &s
- // This could work with gccgo but we panic to be
- // compatible with gc.
- panic("reflect: creating a name with a package path is not supported")
- }
+ s := field.Name
+ name := &s
var tag *string
if field.Tag != "" {
- s := string(field.Tag)
- tag = &s
+ st := string(field.Tag)
+ tag = &st
}
return structField{
name: name,
- pkgPath: pkgPath,
+ pkgPath: nil,
typ: field.Type.common(),
tag: tag,
offsetAnon: offsetAnon,
@@ -2257,15 +2229,11 @@ const maxPtrmaskBytes = 2048
// ArrayOf panics.
func ArrayOf(count int, elem Type) Type {
typ := elem.(*rtype)
- // call SliceOf here as it calls cacheGet/cachePut.
- // ArrayOf also calls cacheGet/cachePut and thus may modify the state of
- // the lookupCache mutex.
- slice := SliceOf(elem)
// Look in cache.
ckey := cacheKey{Array, typ, nil, uintptr(count)}
- if array := cacheGet(ckey); array != nil {
- return array
+ if array, ok := lookupCache.Load(ckey); ok {
+ return array.(Type)
}
// Look in known types.
@@ -2287,9 +2255,11 @@ func ArrayOf(count int, elem Type) Type {
array.elem = typ
array.ptrToThis = nil
- max := ^uintptr(0) / typ.size
- if uintptr(count) > max {
- panic("reflect.ArrayOf: array size would exceed virtual address space")
+ if typ.size > 0 {
+ max := ^uintptr(0) / typ.size
+ if uintptr(count) > max {
+ panic("reflect.ArrayOf: array size would exceed virtual address space")
+ }
}
array.size = typ.size * uintptr(count)
if count > 0 && typ.ptrdata != 0 {
@@ -2299,7 +2269,7 @@ func ArrayOf(count int, elem Type) Type {
array.fieldAlign = typ.fieldAlign
array.uncommonType = nil
array.len = uintptr(count)
- array.slice = slice.(*rtype)
+ array.slice = SliceOf(elem).(*rtype)
array.kind &^= kindNoPointers
switch {
@@ -2413,7 +2383,8 @@ func ArrayOf(count int, elem Type) Type {
}
}
- return cachePut(ckey, &array.rtype)
+ ti, _ := lookupCache.LoadOrStore(ckey, &array.rtype)
+ return ti.(Type)
}
func appendVarint(x []byte, v uintptr) []byte {
@@ -2466,7 +2437,7 @@ func ifaceIndir(t *rtype) bool {
return t.kind&kindDirectIface == 0
}
-// Layout matches runtime.BitVector (well enough).
+// Layout matches runtime.gobitvector (well enough).
type bitVector struct {
n uint32 // number of bits
data []byte