1 // Copyright 2014 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
7 // This file contains the implementation of Go's map type.
9 // A map is just a hash table. The data is arranged
10 // into an array of buckets. Each bucket contains up to
11 // 8 key/elem pairs. The low-order bits of the hash are
12 // used to select a bucket. Each bucket contains a few
13 // high-order bits of each hash to distinguish the entries
14 // within a single bucket.
16 // If more than 8 keys hash to a bucket, we chain on
19 // When the hashtable grows, we allocate a new array
20 // of buckets twice as big. Buckets are incrementally
21 // copied from the old bucket array to the new bucket array.
23 // Map iterators walk through the array of buckets and
24 // return the keys in walk order (bucket #, then overflow
25 // chain order, then bucket index). To maintain iteration
26 // semantics, we never move keys within their bucket (if
27 // we did, keys might be returned 0 or 2 times). When
28 // growing the table, iterators remain iterating through the
29 // old table and must check the new table if the bucket
30 // they are iterating through has been moved ("evacuated")
33 // Picking loadFactor: too large and we have lots of overflow
34 // buckets, too small and we waste a lot of space. I wrote
35 // a simple program to check some stats for different loads:
36 // (64-bit, 8 byte keys and elems)
37 // loadFactor %overflow bytes/entry hitprobe missprobe
38 // 4.00 2.13 20.77 3.00 4.00
39 // 4.50 4.05 17.30 3.25 4.50
40 // 5.00 6.85 14.77 3.50 5.00
41 // 5.50 10.55 12.94 3.75 5.50
42 // 6.00 15.27 11.67 4.00 6.00
43 // 6.50 20.90 10.79 4.25 6.50
44 // 7.00 27.14 10.15 4.50 7.00
45 // 7.50 34.03 9.73 4.75 7.50
46 // 8.00 41.10 9.40 5.00 8.00
48 // %overflow = percentage of buckets which have an overflow bucket
49 // bytes/entry = overhead bytes used per key/elem pair
50 // hitprobe = # of entries to check when looking up a present key
51 // missprobe = # of entries to check when looking up an absent key
53 // Keep in mind this data is for maximally loaded tables, i.e. just
54 // before the table grows. Typical tables will be somewhat less loaded.
59 "runtime/internal/atomic"
60 "runtime/internal/math"
65 // Maximum number of key/elem pairs a bucket can hold.
66 bucketCntBits = abi.MapBucketCountBits
67 bucketCnt = abi.MapBucketCount
69 // Maximum average load of a bucket that triggers growth is bucketCnt*13/16 (about 80% full)
70 // Because of minimum alignment rules, bucketCnt is known to be at least 8.
71 // Represent as loadFactorNum/loadFactorDen, to allow integer math.
73 loadFactorNum = loadFactorDen * bucketCnt * 13 / 16
75 // Maximum key or elem size to keep inline (instead of mallocing per element).
76 // Must fit in a uint8.
77 // Fast versions cannot handle big elems - the cutoff size for
78 // fast versions in cmd/compile/internal/gc/walk.go must be at most this elem.
79 maxKeySize = abi.MapMaxKeyBytes
80 maxElemSize = abi.MapMaxElemBytes
82 // data offset should be the size of the bmap struct, but needs to be
83 // aligned correctly. For amd64p32 this means 64-bit alignment
84 // even though pointers are 32 bit.
85 dataOffset = unsafe.Offsetof(struct {
90 // Possible tophash values. We reserve a few possibilities for special marks.
91 // Each bucket (including its overflow buckets, if any) will have either all or none of its
92 // entries in the evacuated* states (except during the evacuate() method, which only happens
93 // during map writes and thus no one else can observe the map during that time).
94 emptyRest = 0 // this cell is empty, and there are no more non-empty cells at higher indexes or overflows.
95 emptyOne = 1 // this cell is empty
96 evacuatedX = 2 // key/elem is valid. Entry has been evacuated to first half of larger table.
97 evacuatedY = 3 // same as above, but evacuated to second half of larger table.
98 evacuatedEmpty = 4 // cell is empty, bucket is evacuated.
99 minTopHash = 5 // minimum tophash for a normal filled cell.
102 iterator = 1 // there may be an iterator using buckets
103 oldIterator = 2 // there may be an iterator using oldbuckets
104 hashWriting = 4 // a goroutine is writing to the map
105 sameSizeGrow = 8 // the current map growth is to a new map of the same size
107 // sentinel bucket ID for iterator checks
108 noCheck = 1<<(8*goarch.PtrSize) - 1
111 // isEmpty reports whether the given tophash array entry represents an empty bucket entry.
112 func isEmpty(x uint8) bool {
116 // A header for a Go map.
118 // Note: the format of the hmap is also encoded in cmd/compile/internal/reflectdata/reflect.go.
119 // Make sure this stays in sync with the compiler's definition.
120 count int // # live cells == size of map. Must be first (used by len() builtin)
122 B uint8 // log_2 of # of buckets (can hold up to loadFactor * 2^B items)
123 noverflow uint16 // approximate number of overflow buckets; see incrnoverflow for details
124 hash0 uint32 // hash seed
126 buckets unsafe.Pointer // array of 2^B Buckets. may be nil if count==0.
127 oldbuckets unsafe.Pointer // previous bucket array of half the size, non-nil only when growing
128 nevacuate uintptr // progress counter for evacuation (buckets less than this have been evacuated)
130 extra *mapextra // optional fields
133 // mapextra holds fields that are not present on all maps.
134 type mapextra struct {
135 // If both key and elem do not contain pointers and are inline, then we mark bucket
136 // type as containing no pointers. This avoids scanning such maps.
137 // However, bmap.overflow is a pointer. In order to keep overflow buckets
138 // alive, we store pointers to all overflow buckets in hmap.extra.overflow and hmap.extra.oldoverflow.
139 // overflow and oldoverflow are only used if key and elem do not contain pointers.
140 // overflow contains overflow buckets for hmap.buckets.
141 // oldoverflow contains overflow buckets for hmap.oldbuckets.
142 // The indirection allows to store a pointer to the slice in hiter.
146 // nextOverflow holds a pointer to a free overflow bucket.
150 // A bucket for a Go map.
152 // tophash generally contains the top byte of the hash value
153 // for each key in this bucket. If tophash[0] < minTopHash,
154 // tophash[0] is a bucket evacuation state instead.
155 tophash [bucketCnt]uint8
156 // Followed by bucketCnt keys and then bucketCnt elems.
157 // NOTE: packing all the keys together and then all the elems together makes the
158 // code a bit more complicated than alternating key/elem/key/elem/... but it allows
159 // us to eliminate padding which would be needed for, e.g., map[int64]int8.
160 // Followed by an overflow pointer.
163 // A hash iteration structure.
164 // If you modify hiter, also change cmd/compile/internal/reflectdata/reflect.go
165 // and reflect/value.go to match the layout of this structure.
167 key unsafe.Pointer // Must be in first position. Write nil to indicate iteration end (see cmd/compile/internal/walk/range.go).
168 elem unsafe.Pointer // Must be in second position (see cmd/compile/internal/walk/range.go).
171 buckets unsafe.Pointer // bucket ptr at hash_iter initialization time
172 bptr *bmap // current bucket
173 overflow *[]*bmap // keeps overflow buckets of hmap.buckets alive
174 oldoverflow *[]*bmap // keeps overflow buckets of hmap.oldbuckets alive
175 startBucket uintptr // bucket iteration started at
176 offset uint8 // intra-bucket offset to start from during iteration (should be big enough to hold bucketCnt-1)
177 wrapped bool // already wrapped around from end of bucket array to beginning
184 // bucketShift returns 1<<b, optimized for code generation.
185 func bucketShift(b uint8) uintptr {
186 // Masking the shift amount allows overflow checks to be elided.
187 return uintptr(1) << (b & (goarch.PtrSize*8 - 1))
190 // bucketMask returns 1<<b - 1, optimized for code generation.
191 func bucketMask(b uint8) uintptr {
192 return bucketShift(b) - 1
195 // tophash calculates the tophash value for hash.
196 func tophash(hash uintptr) uint8 {
197 top := uint8(hash >> (goarch.PtrSize*8 - 8))
198 if top < minTopHash {
204 func evacuated(b *bmap) bool {
206 return h > emptyOne && h < minTopHash
209 func (b *bmap) overflow(t *maptype) *bmap {
210 return *(**bmap)(add(unsafe.Pointer(b), uintptr(t.BucketSize)-goarch.PtrSize))
213 func (b *bmap) setoverflow(t *maptype, ovf *bmap) {
214 *(**bmap)(add(unsafe.Pointer(b), uintptr(t.BucketSize)-goarch.PtrSize)) = ovf
217 func (b *bmap) keys() unsafe.Pointer {
218 return add(unsafe.Pointer(b), dataOffset)
221 // incrnoverflow increments h.noverflow.
222 // noverflow counts the number of overflow buckets.
223 // This is used to trigger same-size map growth.
224 // See also tooManyOverflowBuckets.
225 // To keep hmap small, noverflow is a uint16.
226 // When there are few buckets, noverflow is an exact count.
227 // When there are many buckets, noverflow is an approximate count.
228 func (h *hmap) incrnoverflow() {
229 // We trigger same-size map growth if there are
230 // as many overflow buckets as buckets.
231 // We need to be able to count to 1<<h.B.
236 // Increment with probability 1/(1<<(h.B-15)).
237 // When we reach 1<<15 - 1, we will have approximately
238 // as many overflow buckets as buckets.
239 mask := uint32(1)<<(h.B-15) - 1
240 // Example: if h.B == 18, then mask == 7,
241 // and fastrand & 7 == 0 with probability 1/8.
242 if fastrand()&mask == 0 {
247 func (h *hmap) newoverflow(t *maptype, b *bmap) *bmap {
249 if h.extra != nil && h.extra.nextOverflow != nil {
250 // We have preallocated overflow buckets available.
251 // See makeBucketArray for more details.
252 ovf = h.extra.nextOverflow
253 if ovf.overflow(t) == nil {
254 // We're not at the end of the preallocated overflow buckets. Bump the pointer.
255 h.extra.nextOverflow = (*bmap)(add(unsafe.Pointer(ovf), uintptr(t.BucketSize)))
257 // This is the last preallocated overflow bucket.
258 // Reset the overflow pointer on this bucket,
259 // which was set to a non-nil sentinel value.
260 ovf.setoverflow(t, nil)
261 h.extra.nextOverflow = nil
264 ovf = (*bmap)(newobject(t.Bucket))
267 if t.Bucket.PtrBytes == 0 {
269 *h.extra.overflow = append(*h.extra.overflow, ovf)
271 b.setoverflow(t, ovf)
275 func (h *hmap) createOverflow() {
277 h.extra = new(mapextra)
279 if h.extra.overflow == nil {
280 h.extra.overflow = new([]*bmap)
284 func makemap64(t *maptype, hint int64, h *hmap) *hmap {
285 if int64(int(hint)) != hint {
288 return makemap(t, int(hint), h)
291 // makemap_small implements Go map creation for make(map[k]v) and
292 // make(map[k]v, hint) when hint is known to be at most bucketCnt
293 // at compile time and the map needs to be allocated on the heap.
294 func makemap_small() *hmap {
300 // makemap implements Go map creation for make(map[k]v, hint).
301 // If the compiler has determined that the map or the first bucket
302 // can be created on the stack, h and/or bucket may be non-nil.
303 // If h != nil, the map can be created directly in h.
304 // If h.buckets != nil, bucket pointed to can be used as the first bucket.
305 func makemap(t *maptype, hint int, h *hmap) *hmap {
306 mem, overflow := math.MulUintptr(uintptr(hint), t.Bucket.Size_)
307 if overflow || mem > maxAlloc {
317 // Find the size parameter B which will hold the requested # of elements.
318 // For hint < 0 overLoadFactor returns false since hint < bucketCnt.
320 for overLoadFactor(hint, B) {
325 // allocate initial hash table
326 // if B == 0, the buckets field is allocated lazily later (in mapassign)
327 // If hint is large zeroing this memory could take a while.
329 var nextOverflow *bmap
330 h.buckets, nextOverflow = makeBucketArray(t, h.B, nil)
331 if nextOverflow != nil {
332 h.extra = new(mapextra)
333 h.extra.nextOverflow = nextOverflow
340 // makeBucketArray initializes a backing array for map buckets.
341 // 1<<b is the minimum number of buckets to allocate.
342 // dirtyalloc should either be nil or a bucket array previously
343 // allocated by makeBucketArray with the same t and b parameters.
344 // If dirtyalloc is nil a new backing array will be alloced and
345 // otherwise dirtyalloc will be cleared and reused as backing array.
346 func makeBucketArray(t *maptype, b uint8, dirtyalloc unsafe.Pointer) (buckets unsafe.Pointer, nextOverflow *bmap) {
347 base := bucketShift(b)
349 // For small b, overflow buckets are unlikely.
350 // Avoid the overhead of the calculation.
352 // Add on the estimated number of overflow buckets
353 // required to insert the median number of elements
354 // used with this value of b.
355 nbuckets += bucketShift(b - 4)
356 sz := t.Bucket.Size_ * nbuckets
357 up := roundupsize(sz, t.Bucket.PtrBytes == 0)
359 nbuckets = up / t.Bucket.Size_
363 if dirtyalloc == nil {
364 buckets = newarray(t.Bucket, int(nbuckets))
366 // dirtyalloc was previously generated by
367 // the above newarray(t.Bucket, int(nbuckets))
368 // but may not be empty.
370 size := t.Bucket.Size_ * nbuckets
371 if t.Bucket.PtrBytes != 0 {
372 memclrHasPointers(buckets, size)
374 memclrNoHeapPointers(buckets, size)
378 if base != nbuckets {
379 // We preallocated some overflow buckets.
380 // To keep the overhead of tracking these overflow buckets to a minimum,
381 // we use the convention that if a preallocated overflow bucket's overflow
382 // pointer is nil, then there are more available by bumping the pointer.
383 // We need a safe non-nil pointer for the last overflow bucket; just use buckets.
384 nextOverflow = (*bmap)(add(buckets, base*uintptr(t.BucketSize)))
385 last := (*bmap)(add(buckets, (nbuckets-1)*uintptr(t.BucketSize)))
386 last.setoverflow(t, (*bmap)(buckets))
388 return buckets, nextOverflow
391 // mapaccess1 returns a pointer to h[key]. Never returns nil, instead
392 // it will return a reference to the zero object for the elem type if
393 // the key is not in the map.
394 // NOTE: The returned pointer may keep the whole map live, so don't
395 // hold onto it for very long.
396 func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
397 if raceenabled && h != nil {
398 callerpc := getcallerpc()
399 pc := abi.FuncPCABIInternal(mapaccess1)
400 racereadpc(unsafe.Pointer(h), callerpc, pc)
401 raceReadObjectPC(t.Key, key, callerpc, pc)
403 if msanenabled && h != nil {
404 msanread(key, t.Key.Size_)
406 if asanenabled && h != nil {
407 asanread(key, t.Key.Size_)
409 if h == nil || h.count == 0 {
410 if err := mapKeyError(t, key); err != nil {
411 panic(err) // see issue 23734
413 return unsafe.Pointer(&zeroVal[0])
415 if h.flags&hashWriting != 0 {
416 fatal("concurrent map read and map write")
418 hash := t.Hasher(key, uintptr(h.hash0))
420 b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
421 if c := h.oldbuckets; c != nil {
422 if !h.sameSizeGrow() {
423 // There used to be half as many buckets; mask down one more power of two.
426 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
427 if !evacuated(oldb) {
433 for ; b != nil; b = b.overflow(t) {
434 for i := uintptr(0); i < bucketCnt; i++ {
435 if b.tophash[i] != top {
436 if b.tophash[i] == emptyRest {
441 k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
443 k = *((*unsafe.Pointer)(k))
445 if t.Key.Equal(key, k) {
446 e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
447 if t.IndirectElem() {
448 e = *((*unsafe.Pointer)(e))
454 return unsafe.Pointer(&zeroVal[0])
457 func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) {
458 if raceenabled && h != nil {
459 callerpc := getcallerpc()
460 pc := abi.FuncPCABIInternal(mapaccess2)
461 racereadpc(unsafe.Pointer(h), callerpc, pc)
462 raceReadObjectPC(t.Key, key, callerpc, pc)
464 if msanenabled && h != nil {
465 msanread(key, t.Key.Size_)
467 if asanenabled && h != nil {
468 asanread(key, t.Key.Size_)
470 if h == nil || h.count == 0 {
471 if err := mapKeyError(t, key); err != nil {
472 panic(err) // see issue 23734
474 return unsafe.Pointer(&zeroVal[0]), false
476 if h.flags&hashWriting != 0 {
477 fatal("concurrent map read and map write")
479 hash := t.Hasher(key, uintptr(h.hash0))
481 b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
482 if c := h.oldbuckets; c != nil {
483 if !h.sameSizeGrow() {
484 // There used to be half as many buckets; mask down one more power of two.
487 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
488 if !evacuated(oldb) {
494 for ; b != nil; b = b.overflow(t) {
495 for i := uintptr(0); i < bucketCnt; i++ {
496 if b.tophash[i] != top {
497 if b.tophash[i] == emptyRest {
502 k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
504 k = *((*unsafe.Pointer)(k))
506 if t.Key.Equal(key, k) {
507 e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
508 if t.IndirectElem() {
509 e = *((*unsafe.Pointer)(e))
515 return unsafe.Pointer(&zeroVal[0]), false
518 // returns both key and elem. Used by map iterator.
519 func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer) {
520 if h == nil || h.count == 0 {
523 hash := t.Hasher(key, uintptr(h.hash0))
525 b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
526 if c := h.oldbuckets; c != nil {
527 if !h.sameSizeGrow() {
528 // There used to be half as many buckets; mask down one more power of two.
531 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
532 if !evacuated(oldb) {
538 for ; b != nil; b = b.overflow(t) {
539 for i := uintptr(0); i < bucketCnt; i++ {
540 if b.tophash[i] != top {
541 if b.tophash[i] == emptyRest {
546 k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
548 k = *((*unsafe.Pointer)(k))
550 if t.Key.Equal(key, k) {
551 e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
552 if t.IndirectElem() {
553 e = *((*unsafe.Pointer)(e))
562 func mapaccess1_fat(t *maptype, h *hmap, key, zero unsafe.Pointer) unsafe.Pointer {
563 e := mapaccess1(t, h, key)
564 if e == unsafe.Pointer(&zeroVal[0]) {
570 func mapaccess2_fat(t *maptype, h *hmap, key, zero unsafe.Pointer) (unsafe.Pointer, bool) {
571 e := mapaccess1(t, h, key)
572 if e == unsafe.Pointer(&zeroVal[0]) {
578 // Like mapaccess, but allocates a slot for the key if it is not present in the map.
579 func mapassign(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
581 panic(plainError("assignment to entry in nil map"))
584 callerpc := getcallerpc()
585 pc := abi.FuncPCABIInternal(mapassign)
586 racewritepc(unsafe.Pointer(h), callerpc, pc)
587 raceReadObjectPC(t.Key, key, callerpc, pc)
590 msanread(key, t.Key.Size_)
593 asanread(key, t.Key.Size_)
595 if h.flags&hashWriting != 0 {
596 fatal("concurrent map writes")
598 hash := t.Hasher(key, uintptr(h.hash0))
600 // Set hashWriting after calling t.hasher, since t.hasher may panic,
601 // in which case we have not actually done a write.
602 h.flags ^= hashWriting
604 if h.buckets == nil {
605 h.buckets = newobject(t.Bucket) // newarray(t.Bucket, 1)
609 bucket := hash & bucketMask(h.B)
611 growWork(t, h, bucket)
613 b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
617 var insertk unsafe.Pointer
618 var elem unsafe.Pointer
621 for i := uintptr(0); i < bucketCnt; i++ {
622 if b.tophash[i] != top {
623 if isEmpty(b.tophash[i]) && inserti == nil {
624 inserti = &b.tophash[i]
625 insertk = add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
626 elem = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
628 if b.tophash[i] == emptyRest {
633 k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
635 k = *((*unsafe.Pointer)(k))
637 if !t.Key.Equal(key, k) {
640 // already have a mapping for key. Update it.
641 if t.NeedKeyUpdate() {
642 typedmemmove(t.Key, k, key)
644 elem = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
654 // Did not find mapping for key. Allocate new cell & add entry.
656 // If we hit the max load factor or we have too many overflow buckets,
657 // and we're not already in the middle of growing, start growing.
658 if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
660 goto again // Growing the table invalidates everything, so try again
664 // The current bucket and all the overflow buckets connected to it are full, allocate a new one.
665 newb := h.newoverflow(t, b)
666 inserti = &newb.tophash[0]
667 insertk = add(unsafe.Pointer(newb), dataOffset)
668 elem = add(insertk, bucketCnt*uintptr(t.KeySize))
671 // store new key/elem at insert position
673 kmem := newobject(t.Key)
674 *(*unsafe.Pointer)(insertk) = kmem
677 if t.IndirectElem() {
678 vmem := newobject(t.Elem)
679 *(*unsafe.Pointer)(elem) = vmem
681 typedmemmove(t.Key, insertk, key)
686 if h.flags&hashWriting == 0 {
687 fatal("concurrent map writes")
689 h.flags &^= hashWriting
690 if t.IndirectElem() {
691 elem = *((*unsafe.Pointer)(elem))
696 func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
697 if raceenabled && h != nil {
698 callerpc := getcallerpc()
699 pc := abi.FuncPCABIInternal(mapdelete)
700 racewritepc(unsafe.Pointer(h), callerpc, pc)
701 raceReadObjectPC(t.Key, key, callerpc, pc)
703 if msanenabled && h != nil {
704 msanread(key, t.Key.Size_)
706 if asanenabled && h != nil {
707 asanread(key, t.Key.Size_)
709 if h == nil || h.count == 0 {
710 if err := mapKeyError(t, key); err != nil {
711 panic(err) // see issue 23734
715 if h.flags&hashWriting != 0 {
716 fatal("concurrent map writes")
719 hash := t.Hasher(key, uintptr(h.hash0))
721 // Set hashWriting after calling t.hasher, since t.hasher may panic,
722 // in which case we have not actually done a write (delete).
723 h.flags ^= hashWriting
725 bucket := hash & bucketMask(h.B)
727 growWork(t, h, bucket)
729 b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
733 for ; b != nil; b = b.overflow(t) {
734 for i := uintptr(0); i < bucketCnt; i++ {
735 if b.tophash[i] != top {
736 if b.tophash[i] == emptyRest {
741 k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize))
744 k2 = *((*unsafe.Pointer)(k2))
746 if !t.Key.Equal(key, k2) {
749 // Only clear key if there are pointers in it.
751 *(*unsafe.Pointer)(k) = nil
752 } else if t.Key.PtrBytes != 0 {
753 memclrHasPointers(k, t.Key.Size_)
755 e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
756 if t.IndirectElem() {
757 *(*unsafe.Pointer)(e) = nil
758 } else if t.Elem.PtrBytes != 0 {
759 memclrHasPointers(e, t.Elem.Size_)
761 memclrNoHeapPointers(e, t.Elem.Size_)
763 b.tophash[i] = emptyOne
764 // If the bucket now ends in a bunch of emptyOne states,
765 // change those to emptyRest states.
766 // It would be nice to make this a separate function, but
767 // for loops are not currently inlineable.
768 if i == bucketCnt-1 {
769 if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
773 if b.tophash[i+1] != emptyRest {
778 b.tophash[i] = emptyRest
781 break // beginning of initial bucket, we're done.
783 // Find previous bucket, continue at its last entry.
785 for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
791 if b.tophash[i] != emptyOne {
797 // Reset the hash seed to make it more difficult for attackers to
798 // repeatedly trigger hash collisions. See issue 25237.
806 if h.flags&hashWriting == 0 {
807 fatal("concurrent map writes")
809 h.flags &^= hashWriting
812 // mapiterinit initializes the hiter struct used for ranging over maps.
813 // The hiter struct pointed to by 'it' is allocated on the stack
814 // by the compilers order pass or on the heap by reflect_mapiterinit.
815 // Both need to have zeroed hiter since the struct contains pointers.
816 func mapiterinit(t *maptype, h *hmap, it *hiter) {
817 if raceenabled && h != nil {
818 callerpc := getcallerpc()
819 racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapiterinit))
823 if h == nil || h.count == 0 {
827 if unsafe.Sizeof(hiter{})/goarch.PtrSize != 12 {
828 throw("hash_iter size incorrect") // see cmd/compile/internal/reflectdata/reflect.go
832 // grab snapshot of bucket state
834 it.buckets = h.buckets
835 if t.Bucket.PtrBytes == 0 {
836 // Allocate the current slice and remember pointers to both current and old.
837 // This preserves all relevant overflow buckets alive even if
838 // the table grows and/or overflow buckets are added to the table
839 // while we are iterating.
841 it.overflow = h.extra.overflow
842 it.oldoverflow = h.extra.oldoverflow
845 // decide where to start
847 if h.B > 31-bucketCntBits {
848 r = uintptr(fastrand64())
850 r = uintptr(fastrand())
852 it.startBucket = r & bucketMask(h.B)
853 it.offset = uint8(r >> h.B & (bucketCnt - 1))
856 it.bucket = it.startBucket
858 // Remember we have an iterator.
859 // Can run concurrently with another mapiterinit().
860 if old := h.flags; old&(iterator|oldIterator) != iterator|oldIterator {
861 atomic.Or8(&h.flags, iterator|oldIterator)
867 func mapiternext(it *hiter) {
870 callerpc := getcallerpc()
871 racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapiternext))
873 if h.flags&hashWriting != 0 {
874 fatal("concurrent map iteration and map write")
880 checkBucket := it.checkBucket
884 if bucket == it.startBucket && it.wrapped {
890 if h.growing() && it.B == h.B {
891 // Iterator was started in the middle of a grow, and the grow isn't done yet.
892 // If the bucket we're looking at hasn't been filled in yet (i.e. the old
893 // bucket hasn't been evacuated) then we need to iterate through the old
894 // bucket and only return the ones that will be migrated to this bucket.
895 oldbucket := bucket & it.h.oldbucketmask()
896 b = (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)))
900 b = (*bmap)(add(it.buckets, bucket*uintptr(t.BucketSize)))
901 checkBucket = noCheck
904 b = (*bmap)(add(it.buckets, bucket*uintptr(t.BucketSize)))
905 checkBucket = noCheck
908 if bucket == bucketShift(it.B) {
914 for ; i < bucketCnt; i++ {
915 offi := (i + it.offset) & (bucketCnt - 1)
916 if isEmpty(b.tophash[offi]) || b.tophash[offi] == evacuatedEmpty {
917 // TODO: emptyRest is hard to use here, as we start iterating
918 // in the middle of a bucket. It's feasible, just tricky.
921 k := add(unsafe.Pointer(b), dataOffset+uintptr(offi)*uintptr(t.KeySize))
923 k = *((*unsafe.Pointer)(k))
925 e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+uintptr(offi)*uintptr(t.ValueSize))
926 if checkBucket != noCheck && !h.sameSizeGrow() {
927 // Special case: iterator was started during a grow to a larger size
928 // and the grow is not done yet. We're working on a bucket whose
929 // oldbucket has not been evacuated yet. Or at least, it wasn't
930 // evacuated when we started the bucket. So we're iterating
931 // through the oldbucket, skipping any keys that will go
932 // to the other new bucket (each oldbucket expands to two
933 // buckets during a grow).
934 if t.ReflexiveKey() || t.Key.Equal(k, k) {
935 // If the item in the oldbucket is not destined for
936 // the current new bucket in the iteration, skip it.
937 hash := t.Hasher(k, uintptr(h.hash0))
938 if hash&bucketMask(it.B) != checkBucket {
942 // Hash isn't repeatable if k != k (NaNs). We need a
943 // repeatable and randomish choice of which direction
944 // to send NaNs during evacuation. We'll use the low
945 // bit of tophash to decide which way NaNs go.
946 // NOTE: this case is why we need two evacuate tophash
947 // values, evacuatedX and evacuatedY, that differ in
949 if checkBucket>>(it.B-1) != uintptr(b.tophash[offi]&1) {
954 if (b.tophash[offi] != evacuatedX && b.tophash[offi] != evacuatedY) ||
955 !(t.ReflexiveKey() || t.Key.Equal(k, k)) {
956 // This is the golden data, we can return it.
958 // key!=key, so the entry can't be deleted or updated, so we can just return it.
959 // That's lucky for us because when key!=key we can't look it up successfully.
961 if t.IndirectElem() {
962 e = *((*unsafe.Pointer)(e))
966 // The hash table has grown since the iterator was started.
967 // The golden data for this key is now somewhere else.
968 // Check the current hash table for the data.
969 // This code handles the case where the key
970 // has been deleted, updated, or deleted and reinserted.
971 // NOTE: we need to regrab the key as it has potentially been
972 // updated to an equal() but not identical key (e.g. +0.0 vs -0.0).
973 rk, re := mapaccessK(t, h, k)
975 continue // key has been deleted
981 if it.bptr != b { // avoid unnecessary write barrier; see issue 14921
985 it.checkBucket = checkBucket
993 // mapclear deletes all keys from a map.
994 func mapclear(t *maptype, h *hmap) {
995 if raceenabled && h != nil {
996 callerpc := getcallerpc()
997 pc := abi.FuncPCABIInternal(mapclear)
998 racewritepc(unsafe.Pointer(h), callerpc, pc)
1001 if h == nil || h.count == 0 {
1005 if h.flags&hashWriting != 0 {
1006 fatal("concurrent map writes")
1009 h.flags ^= hashWriting
1011 // Mark buckets empty, so existing iterators can be terminated, see issue #59411.
1012 markBucketsEmpty := func(bucket unsafe.Pointer, mask uintptr) {
1013 for i := uintptr(0); i <= mask; i++ {
1014 b := (*bmap)(add(bucket, i*uintptr(t.BucketSize)))
1015 for ; b != nil; b = b.overflow(t) {
1016 for i := uintptr(0); i < bucketCnt; i++ {
1017 b.tophash[i] = emptyRest
1022 markBucketsEmpty(h.buckets, bucketMask(h.B))
1023 if oldBuckets := h.oldbuckets; oldBuckets != nil {
1024 markBucketsEmpty(oldBuckets, h.oldbucketmask())
1027 h.flags &^= sameSizeGrow
1033 // Reset the hash seed to make it more difficult for attackers to
1034 // repeatedly trigger hash collisions. See issue 25237.
1035 h.hash0 = fastrand()
1037 // Keep the mapextra allocation but clear any extra information.
1039 *h.extra = mapextra{}
1042 // makeBucketArray clears the memory pointed to by h.buckets
1043 // and recovers any overflow buckets by generating them
1044 // as if h.buckets was newly alloced.
1045 _, nextOverflow := makeBucketArray(t, h.B, h.buckets)
1046 if nextOverflow != nil {
1047 // If overflow buckets are created then h.extra
1048 // will have been allocated during initial bucket creation.
1049 h.extra.nextOverflow = nextOverflow
1052 if h.flags&hashWriting == 0 {
1053 fatal("concurrent map writes")
1055 h.flags &^= hashWriting
1058 func hashGrow(t *maptype, h *hmap) {
1059 // If we've hit the load factor, get bigger.
1060 // Otherwise, there are too many overflow buckets,
1061 // so keep the same number of buckets and "grow" laterally.
1063 if !overLoadFactor(h.count+1, h.B) {
1065 h.flags |= sameSizeGrow
1067 oldbuckets := h.buckets
1068 newbuckets, nextOverflow := makeBucketArray(t, h.B+bigger, nil)
1070 flags := h.flags &^ (iterator | oldIterator)
1071 if h.flags&iterator != 0 {
1072 flags |= oldIterator
1074 // commit the grow (atomic wrt gc)
1077 h.oldbuckets = oldbuckets
1078 h.buckets = newbuckets
1082 if h.extra != nil && h.extra.overflow != nil {
1083 // Promote current overflow buckets to the old generation.
1084 if h.extra.oldoverflow != nil {
1085 throw("oldoverflow is not nil")
1087 h.extra.oldoverflow = h.extra.overflow
1088 h.extra.overflow = nil
1090 if nextOverflow != nil {
1092 h.extra = new(mapextra)
1094 h.extra.nextOverflow = nextOverflow
1097 // the actual copying of the hash table data is done incrementally
1098 // by growWork() and evacuate().
1101 // overLoadFactor reports whether count items placed in 1<<B buckets is over loadFactor.
1102 func overLoadFactor(count int, B uint8) bool {
1103 return count > bucketCnt && uintptr(count) > loadFactorNum*(bucketShift(B)/loadFactorDen)
1106 // tooManyOverflowBuckets reports whether noverflow buckets is too many for a map with 1<<B buckets.
1107 // Note that most of these overflow buckets must be in sparse use;
1108 // if use was dense, then we'd have already triggered regular map growth.
1109 func tooManyOverflowBuckets(noverflow uint16, B uint8) bool {
1110 // If the threshold is too low, we do extraneous work.
1111 // If the threshold is too high, maps that grow and shrink can hold on to lots of unused memory.
1112 // "too many" means (approximately) as many overflow buckets as regular buckets.
1113 // See incrnoverflow for more details.
1117 // The compiler doesn't see here that B < 16; mask B to generate shorter shift code.
1118 return noverflow >= uint16(1)<<(B&15)
1121 // growing reports whether h is growing. The growth may be to the same size or bigger.
1122 func (h *hmap) growing() bool {
1123 return h.oldbuckets != nil
1126 // sameSizeGrow reports whether the current growth is to a map of the same size.
1127 func (h *hmap) sameSizeGrow() bool {
1128 return h.flags&sameSizeGrow != 0
1131 // noldbuckets calculates the number of buckets prior to the current map growth.
1132 func (h *hmap) noldbuckets() uintptr {
1134 if !h.sameSizeGrow() {
1137 return bucketShift(oldB)
1140 // oldbucketmask provides a mask that can be applied to calculate n % noldbuckets().
1141 func (h *hmap) oldbucketmask() uintptr {
1142 return h.noldbuckets() - 1
1145 func growWork(t *maptype, h *hmap, bucket uintptr) {
1146 // make sure we evacuate the oldbucket corresponding
1147 // to the bucket we're about to use
1148 evacuate(t, h, bucket&h.oldbucketmask())
1150 // evacuate one more oldbucket to make progress on growing
1152 evacuate(t, h, h.nevacuate)
1156 func bucketEvacuated(t *maptype, h *hmap, bucket uintptr) bool {
1157 b := (*bmap)(add(h.oldbuckets, bucket*uintptr(t.BucketSize)))
1161 // evacDst is an evacuation destination.
1162 type evacDst struct {
1163 b *bmap // current destination bucket
1164 i int // key/elem index into b
1165 k unsafe.Pointer // pointer to current key storage
1166 e unsafe.Pointer // pointer to current elem storage
1169 func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
1170 b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)))
1171 newbit := h.noldbuckets()
1173 // TODO: reuse overflow buckets instead of using new ones, if there
1174 // is no iterator using the old buckets. (If !oldIterator.)
1176 // xy contains the x and y (low and high) evacuation destinations.
1179 x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize)))
1180 x.k = add(unsafe.Pointer(x.b), dataOffset)
1181 x.e = add(x.k, bucketCnt*uintptr(t.KeySize))
1183 if !h.sameSizeGrow() {
1184 // Only calculate y pointers if we're growing bigger.
1185 // Otherwise GC can see bad pointers.
1187 y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize)))
1188 y.k = add(unsafe.Pointer(y.b), dataOffset)
1189 y.e = add(y.k, bucketCnt*uintptr(t.KeySize))
1192 for ; b != nil; b = b.overflow(t) {
1193 k := add(unsafe.Pointer(b), dataOffset)
1194 e := add(k, bucketCnt*uintptr(t.KeySize))
1195 for i := 0; i < bucketCnt; i, k, e = i+1, add(k, uintptr(t.KeySize)), add(e, uintptr(t.ValueSize)) {
1198 b.tophash[i] = evacuatedEmpty
1201 if top < minTopHash {
1202 throw("bad map state")
1205 if t.IndirectKey() {
1206 k2 = *((*unsafe.Pointer)(k2))
1209 if !h.sameSizeGrow() {
1210 // Compute hash to make our evacuation decision (whether we need
1211 // to send this key/elem to bucket x or bucket y).
1212 hash := t.Hasher(k2, uintptr(h.hash0))
1213 if h.flags&iterator != 0 && !t.ReflexiveKey() && !t.Key.Equal(k2, k2) {
1214 // If key != key (NaNs), then the hash could be (and probably
1215 // will be) entirely different from the old hash. Moreover,
1216 // it isn't reproducible. Reproducibility is required in the
1217 // presence of iterators, as our evacuation decision must
1218 // match whatever decision the iterator made.
1219 // Fortunately, we have the freedom to send these keys either
1220 // way. Also, tophash is meaningless for these kinds of keys.
1221 // We let the low bit of tophash drive the evacuation decision.
1222 // We recompute a new random tophash for the next level so
1223 // these keys will get evenly distributed across all buckets
1224 // after multiple grows.
1228 if hash&newbit != 0 {
1234 if evacuatedX+1 != evacuatedY || evacuatedX^1 != evacuatedY {
1235 throw("bad evacuatedN")
1238 b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY
1239 dst := &xy[useY] // evacuation destination
1241 if dst.i == bucketCnt {
1242 dst.b = h.newoverflow(t, dst.b)
1244 dst.k = add(unsafe.Pointer(dst.b), dataOffset)
1245 dst.e = add(dst.k, bucketCnt*uintptr(t.KeySize))
1247 dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
1248 if t.IndirectKey() {
1249 *(*unsafe.Pointer)(dst.k) = k2 // copy pointer
1251 typedmemmove(t.Key, dst.k, k) // copy elem
1253 if t.IndirectElem() {
1254 *(*unsafe.Pointer)(dst.e) = *(*unsafe.Pointer)(e)
1256 typedmemmove(t.Elem, dst.e, e)
1259 // These updates might push these pointers past the end of the
1260 // key or elem arrays. That's ok, as we have the overflow pointer
1261 // at the end of the bucket to protect against pointing past the
1262 // end of the bucket.
1263 dst.k = add(dst.k, uintptr(t.KeySize))
1264 dst.e = add(dst.e, uintptr(t.ValueSize))
1267 // Unlink the overflow buckets & clear key/elem to help GC.
1268 if h.flags&oldIterator == 0 && t.Bucket.PtrBytes != 0 {
1269 b := add(h.oldbuckets, oldbucket*uintptr(t.BucketSize))
1270 // Preserve b.tophash because the evacuation
1271 // state is maintained there.
1272 ptr := add(b, dataOffset)
1273 n := uintptr(t.BucketSize) - dataOffset
1274 memclrHasPointers(ptr, n)
1278 if oldbucket == h.nevacuate {
1279 advanceEvacuationMark(h, t, newbit)
1283 func advanceEvacuationMark(h *hmap, t *maptype, newbit uintptr) {
1285 // Experiments suggest that 1024 is overkill by at least an order of magnitude.
1286 // Put it in there as a safeguard anyway, to ensure O(1) behavior.
1287 stop := h.nevacuate + 1024
1291 for h.nevacuate != stop && bucketEvacuated(t, h, h.nevacuate) {
1294 if h.nevacuate == newbit { // newbit == # of oldbuckets
1295 // Growing is all done. Free old main bucket array.
1297 // Can discard old overflow buckets as well.
1298 // If they are still referenced by an iterator,
1299 // then the iterator holds a pointers to the slice.
1301 h.extra.oldoverflow = nil
1303 h.flags &^= sameSizeGrow
1307 // Reflect stubs. Called from ../reflect/asm_*.s
1309 //go:linkname reflect_makemap reflect.makemap
1310 func reflect_makemap(t *maptype, cap int) *hmap {
1311 // Check invariants and reflects math.
1312 if t.Key.Equal == nil {
1313 throw("runtime.reflect_makemap: unsupported map key type")
1315 if t.Key.Size_ > maxKeySize && (!t.IndirectKey() || t.KeySize != uint8(goarch.PtrSize)) ||
1316 t.Key.Size_ <= maxKeySize && (t.IndirectKey() || t.KeySize != uint8(t.Key.Size_)) {
1317 throw("key size wrong")
1319 if t.Elem.Size_ > maxElemSize && (!t.IndirectElem() || t.ValueSize != uint8(goarch.PtrSize)) ||
1320 t.Elem.Size_ <= maxElemSize && (t.IndirectElem() || t.ValueSize != uint8(t.Elem.Size_)) {
1321 throw("elem size wrong")
1323 if t.Key.Align_ > bucketCnt {
1324 throw("key align too big")
1326 if t.Elem.Align_ > bucketCnt {
1327 throw("elem align too big")
1329 if t.Key.Size_%uintptr(t.Key.Align_) != 0 {
1330 throw("key size not a multiple of key align")
1332 if t.Elem.Size_%uintptr(t.Elem.Align_) != 0 {
1333 throw("elem size not a multiple of elem align")
1336 throw("bucketsize too small for proper alignment")
1338 if dataOffset%uintptr(t.Key.Align_) != 0 {
1339 throw("need padding in bucket (key)")
1341 if dataOffset%uintptr(t.Elem.Align_) != 0 {
1342 throw("need padding in bucket (elem)")
1345 return makemap(t, cap, nil)
1348 //go:linkname reflect_mapaccess reflect.mapaccess
1349 func reflect_mapaccess(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
1350 elem, ok := mapaccess2(t, h, key)
1352 // reflect wants nil for a missing element
1358 //go:linkname reflect_mapaccess_faststr reflect.mapaccess_faststr
1359 func reflect_mapaccess_faststr(t *maptype, h *hmap, key string) unsafe.Pointer {
1360 elem, ok := mapaccess2_faststr(t, h, key)
1362 // reflect wants nil for a missing element
1368 //go:linkname reflect_mapassign reflect.mapassign0
1369 func reflect_mapassign(t *maptype, h *hmap, key unsafe.Pointer, elem unsafe.Pointer) {
1370 p := mapassign(t, h, key)
1371 typedmemmove(t.Elem, p, elem)
1374 //go:linkname reflect_mapassign_faststr reflect.mapassign_faststr0
1375 func reflect_mapassign_faststr(t *maptype, h *hmap, key string, elem unsafe.Pointer) {
1376 p := mapassign_faststr(t, h, key)
1377 typedmemmove(t.Elem, p, elem)
1380 //go:linkname reflect_mapdelete reflect.mapdelete
1381 func reflect_mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
1382 mapdelete(t, h, key)
1385 //go:linkname reflect_mapdelete_faststr reflect.mapdelete_faststr
1386 func reflect_mapdelete_faststr(t *maptype, h *hmap, key string) {
1387 mapdelete_faststr(t, h, key)
1390 //go:linkname reflect_mapiterinit reflect.mapiterinit
1391 func reflect_mapiterinit(t *maptype, h *hmap, it *hiter) {
1392 mapiterinit(t, h, it)
1395 //go:linkname reflect_mapiternext reflect.mapiternext
1396 func reflect_mapiternext(it *hiter) {
1400 //go:linkname reflect_mapiterkey reflect.mapiterkey
1401 func reflect_mapiterkey(it *hiter) unsafe.Pointer {
1405 //go:linkname reflect_mapiterelem reflect.mapiterelem
1406 func reflect_mapiterelem(it *hiter) unsafe.Pointer {
1410 //go:linkname reflect_maplen reflect.maplen
1411 func reflect_maplen(h *hmap) int {
1416 callerpc := getcallerpc()
1417 racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(reflect_maplen))
1422 //go:linkname reflect_mapclear reflect.mapclear
1423 func reflect_mapclear(t *maptype, h *hmap) {
1427 //go:linkname reflectlite_maplen internal/reflectlite.maplen
1428 func reflectlite_maplen(h *hmap) int {
1433 callerpc := getcallerpc()
1434 racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(reflect_maplen))
1439 const maxZero = 1024 // must match value in reflect/value.go:maxZero cmd/compile/internal/gc/walk.go:zeroValSize
1440 var zeroVal [maxZero]byte
1442 // mapinitnoop is a no-op function known the Go linker; if a given global
1443 // map (of the right size) is determined to be dead, the linker will
1444 // rewrite the relocation (from the package init func) from the outlined
1445 // map init function to this symbol. Defined in assembly so as to avoid
1446 // complications with instrumentation (coverage, etc).
1449 // mapclone for implementing maps.Clone
1451 //go:linkname mapclone maps.clone
1452 func mapclone(m any) any {
1454 e.data = unsafe.Pointer(mapclone2((*maptype)(unsafe.Pointer(e._type)), (*hmap)(e.data)))
1458 // moveToBmap moves a bucket from src to dst. It returns the destination bucket or new destination bucket if it overflows
1459 // and the pos that the next key/value will be written, if pos == bucketCnt means needs to written in overflow bucket.
1460 func moveToBmap(t *maptype, h *hmap, dst *bmap, pos int, src *bmap) (*bmap, int) {
1461 for i := 0; i < bucketCnt; i++ {
1462 if isEmpty(src.tophash[i]) {
1466 for ; pos < bucketCnt; pos++ {
1467 if isEmpty(dst.tophash[pos]) {
1472 if pos == bucketCnt {
1473 dst = h.newoverflow(t, dst)
1477 srcK := add(unsafe.Pointer(src), dataOffset+uintptr(i)*uintptr(t.KeySize))
1478 srcEle := add(unsafe.Pointer(src), dataOffset+bucketCnt*uintptr(t.KeySize)+uintptr(i)*uintptr(t.ValueSize))
1479 dstK := add(unsafe.Pointer(dst), dataOffset+uintptr(pos)*uintptr(t.KeySize))
1480 dstEle := add(unsafe.Pointer(dst), dataOffset+bucketCnt*uintptr(t.KeySize)+uintptr(pos)*uintptr(t.ValueSize))
1482 dst.tophash[pos] = src.tophash[i]
1483 if t.IndirectKey() {
1484 *(*unsafe.Pointer)(dstK) = *(*unsafe.Pointer)(srcK)
1486 typedmemmove(t.Key, dstK, srcK)
1488 if t.IndirectElem() {
1489 *(*unsafe.Pointer)(dstEle) = *(*unsafe.Pointer)(srcEle)
1491 typedmemmove(t.Elem, dstEle, srcEle)
1499 func mapclone2(t *maptype, src *hmap) *hmap {
1500 dst := makemap(t, src.count, nil)
1501 dst.hash0 = src.hash0
1503 //flags do not need to be copied here, just like a new map has no flags.
1509 if src.flags&hashWriting != 0 {
1510 fatal("concurrent map clone and map write")
1514 dst.buckets = newobject(t.Bucket)
1515 dst.count = src.count
1516 typedmemmove(t.Bucket, dst.buckets, src.buckets)
1522 dst.buckets = newobject(t.Bucket)
1524 dstArraySize := int(bucketShift(dst.B))
1525 srcArraySize := int(bucketShift(src.B))
1526 for i := 0; i < dstArraySize; i++ {
1527 dstBmap := (*bmap)(add(dst.buckets, uintptr(i*int(t.BucketSize))))
1529 for j := 0; j < srcArraySize; j += dstArraySize {
1530 srcBmap := (*bmap)(add(src.buckets, uintptr((i+j)*int(t.BucketSize))))
1531 for srcBmap != nil {
1532 dstBmap, pos = moveToBmap(t, dst, dstBmap, pos, srcBmap)
1533 srcBmap = srcBmap.overflow(t)
1538 if src.oldbuckets == nil {
1543 srcOldbuckets := src.oldbuckets
1544 if !src.sameSizeGrow() {
1547 oldSrcArraySize := int(bucketShift(oldB))
1549 for i := 0; i < oldSrcArraySize; i++ {
1550 srcBmap := (*bmap)(add(srcOldbuckets, uintptr(i*int(t.BucketSize))))
1551 if evacuated(srcBmap) {
1555 if oldB >= dst.B { // main bucket bits in dst is less than oldB bits in src
1556 dstBmap := (*bmap)(add(dst.buckets, (uintptr(i)&bucketMask(dst.B))*uintptr(t.BucketSize)))
1557 for dstBmap.overflow(t) != nil {
1558 dstBmap = dstBmap.overflow(t)
1561 for srcBmap != nil {
1562 dstBmap, pos = moveToBmap(t, dst, dstBmap, pos, srcBmap)
1563 srcBmap = srcBmap.overflow(t)
1568 for srcBmap != nil {
1569 // move from oldBlucket to new bucket
1570 for i := uintptr(0); i < bucketCnt; i++ {
1571 if isEmpty(srcBmap.tophash[i]) {
1575 if src.flags&hashWriting != 0 {
1576 fatal("concurrent map clone and map write")
1579 srcK := add(unsafe.Pointer(srcBmap), dataOffset+i*uintptr(t.KeySize))
1580 if t.IndirectKey() {
1581 srcK = *((*unsafe.Pointer)(srcK))
1584 srcEle := add(unsafe.Pointer(srcBmap), dataOffset+bucketCnt*uintptr(t.KeySize)+i*uintptr(t.ValueSize))
1585 if t.IndirectElem() {
1586 srcEle = *((*unsafe.Pointer)(srcEle))
1588 dstEle := mapassign(t, dst, srcK)
1589 typedmemmove(t.Elem, dstEle, srcEle)
1591 srcBmap = srcBmap.overflow(t)
1597 // keys for implementing maps.keys
1599 //go:linkname keys maps.keys
1600 func keys(m any, p unsafe.Pointer) {
1602 t := (*maptype)(unsafe.Pointer(e._type))
1603 h := (*hmap)(e.data)
1605 if h == nil || h.count == 0 {
1609 r := int(fastrand())
1610 offset := uint8(r >> h.B & (bucketCnt - 1))
1612 copyKeys(t, h, (*bmap)(h.buckets), s, offset)
1615 arraySize := int(bucketShift(h.B))
1616 buckets := h.buckets
1617 for i := 0; i < arraySize; i++ {
1618 bucket := (i + r) & (arraySize - 1)
1619 b := (*bmap)(add(buckets, uintptr(bucket)*uintptr(t.BucketSize)))
1620 copyKeys(t, h, b, s, offset)
1624 oldArraySize := int(h.noldbuckets())
1625 for i := 0; i < oldArraySize; i++ {
1626 bucket := (i + r) & (oldArraySize - 1)
1627 b := (*bmap)(add(h.oldbuckets, uintptr(bucket)*uintptr(t.BucketSize)))
1631 copyKeys(t, h, b, s, offset)
1637 func copyKeys(t *maptype, h *hmap, b *bmap, s *slice, offset uint8) {
1639 for i := uintptr(0); i < bucketCnt; i++ {
1640 offi := (i + uintptr(offset)) & (bucketCnt - 1)
1641 if isEmpty(b.tophash[offi]) {
1644 if h.flags&hashWriting != 0 {
1645 fatal("concurrent map read and map write")
1647 k := add(unsafe.Pointer(b), dataOffset+offi*uintptr(t.KeySize))
1648 if t.IndirectKey() {
1649 k = *((*unsafe.Pointer)(k))
1652 fatal("concurrent map read and map write")
1654 typedmemmove(t.Key, add(s.array, uintptr(s.len)*uintptr(t.Key.Size())), k)
1661 // values for implementing maps.values
1663 //go:linkname values maps.values
1664 func values(m any, p unsafe.Pointer) {
1666 t := (*maptype)(unsafe.Pointer(e._type))
1667 h := (*hmap)(e.data)
1668 if h == nil || h.count == 0 {
1672 r := int(fastrand())
1673 offset := uint8(r >> h.B & (bucketCnt - 1))
1675 copyValues(t, h, (*bmap)(h.buckets), s, offset)
1678 arraySize := int(bucketShift(h.B))
1679 buckets := h.buckets
1680 for i := 0; i < arraySize; i++ {
1681 bucket := (i + r) & (arraySize - 1)
1682 b := (*bmap)(add(buckets, uintptr(bucket)*uintptr(t.BucketSize)))
1683 copyValues(t, h, b, s, offset)
1687 oldArraySize := int(h.noldbuckets())
1688 for i := 0; i < oldArraySize; i++ {
1689 bucket := (i + r) & (oldArraySize - 1)
1690 b := (*bmap)(add(h.oldbuckets, uintptr(bucket)*uintptr(t.BucketSize)))
1694 copyValues(t, h, b, s, offset)
1700 func copyValues(t *maptype, h *hmap, b *bmap, s *slice, offset uint8) {
1702 for i := uintptr(0); i < bucketCnt; i++ {
1703 offi := (i + uintptr(offset)) & (bucketCnt - 1)
1704 if isEmpty(b.tophash[offi]) {
1708 if h.flags&hashWriting != 0 {
1709 fatal("concurrent map read and map write")
1712 ele := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+offi*uintptr(t.ValueSize))
1713 if t.IndirectElem() {
1714 ele = *((*unsafe.Pointer)(ele))
1717 fatal("concurrent map read and map write")
1719 typedmemmove(t.Elem, add(s.array, uintptr(s.len)*uintptr(t.Elem.Size())), ele)