1 // Copyright 2018 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
13 func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
14 if raceenabled && h != nil {
15 callerpc := getcallerpc()
16 racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess1_fast32))
18 if h == nil || h.count == 0 {
19 return unsafe.Pointer(&zeroVal[0])
21 if h.flags&hashWriting != 0 {
22 fatal("concurrent map read and map write")
26 // One-bucket table. No need to hash.
27 b = (*bmap)(h.buckets)
29 hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
31 b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
32 if c := h.oldbuckets; c != nil {
33 if !h.sameSizeGrow() {
34 // There used to be half as many buckets; mask down one more power of two.
37 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
43 for ; b != nil; b = b.overflow(t) {
44 for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 4) {
45 if *(*uint32)(k) == key && !isEmpty(b.tophash[i]) {
46 return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.ValueSize))
50 return unsafe.Pointer(&zeroVal[0])
53 func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) {
54 if raceenabled && h != nil {
55 callerpc := getcallerpc()
56 racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess2_fast32))
58 if h == nil || h.count == 0 {
59 return unsafe.Pointer(&zeroVal[0]), false
61 if h.flags&hashWriting != 0 {
62 fatal("concurrent map read and map write")
66 // One-bucket table. No need to hash.
67 b = (*bmap)(h.buckets)
69 hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
71 b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
72 if c := h.oldbuckets; c != nil {
73 if !h.sameSizeGrow() {
74 // There used to be half as many buckets; mask down one more power of two.
77 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
83 for ; b != nil; b = b.overflow(t) {
84 for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 4) {
85 if *(*uint32)(k) == key && !isEmpty(b.tophash[i]) {
86 return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.ValueSize)), true
90 return unsafe.Pointer(&zeroVal[0]), false
93 func mapassign_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
95 panic(plainError("assignment to entry in nil map"))
98 callerpc := getcallerpc()
99 racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast32))
101 if h.flags&hashWriting != 0 {
102 fatal("concurrent map writes")
104 hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
106 // Set hashWriting after calling t.hasher for consistency with mapassign.
107 h.flags ^= hashWriting
109 if h.buckets == nil {
110 h.buckets = newobject(t.Bucket) // newarray(t.bucket, 1)
114 bucket := hash & bucketMask(h.B)
116 growWork_fast32(t, h, bucket)
118 b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
122 var insertk unsafe.Pointer
126 for i := uintptr(0); i < bucketCnt; i++ {
127 if isEmpty(b.tophash[i]) {
132 if b.tophash[i] == emptyRest {
137 k := *((*uint32)(add(unsafe.Pointer(b), dataOffset+i*4)))
152 // Did not find mapping for key. Allocate new cell & add entry.
154 // If we hit the max load factor or we have too many overflow buckets,
155 // and we're not already in the middle of growing, start growing.
156 if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
158 goto again // Growing the table invalidates everything, so try again
162 // The current bucket and all the overflow buckets connected to it are full, allocate a new one.
163 insertb = h.newoverflow(t, b)
164 inserti = 0 // not necessary, but avoids needlessly spilling inserti
166 insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash) // mask inserti to avoid bounds checks
168 insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*4)
169 // store new key at insert position
170 *(*uint32)(insertk) = key
175 elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*4+inserti*uintptr(t.ValueSize))
176 if h.flags&hashWriting == 0 {
177 fatal("concurrent map writes")
179 h.flags &^= hashWriting
183 func mapassign_fast32ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
185 panic(plainError("assignment to entry in nil map"))
188 callerpc := getcallerpc()
189 racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast32))
191 if h.flags&hashWriting != 0 {
192 fatal("concurrent map writes")
194 hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
196 // Set hashWriting after calling t.hasher for consistency with mapassign.
197 h.flags ^= hashWriting
199 if h.buckets == nil {
200 h.buckets = newobject(t.Bucket) // newarray(t.bucket, 1)
204 bucket := hash & bucketMask(h.B)
206 growWork_fast32(t, h, bucket)
208 b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
212 var insertk unsafe.Pointer
216 for i := uintptr(0); i < bucketCnt; i++ {
217 if isEmpty(b.tophash[i]) {
222 if b.tophash[i] == emptyRest {
227 k := *((*unsafe.Pointer)(add(unsafe.Pointer(b), dataOffset+i*4)))
242 // Did not find mapping for key. Allocate new cell & add entry.
244 // If we hit the max load factor or we have too many overflow buckets,
245 // and we're not already in the middle of growing, start growing.
246 if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
248 goto again // Growing the table invalidates everything, so try again
252 // The current bucket and all the overflow buckets connected to it are full, allocate a new one.
253 insertb = h.newoverflow(t, b)
254 inserti = 0 // not necessary, but avoids needlessly spilling inserti
256 insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash) // mask inserti to avoid bounds checks
258 insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*4)
259 // store new key at insert position
260 *(*unsafe.Pointer)(insertk) = key
265 elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*4+inserti*uintptr(t.ValueSize))
266 if h.flags&hashWriting == 0 {
267 fatal("concurrent map writes")
269 h.flags &^= hashWriting
273 func mapdelete_fast32(t *maptype, h *hmap, key uint32) {
274 if raceenabled && h != nil {
275 callerpc := getcallerpc()
276 racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapdelete_fast32))
278 if h == nil || h.count == 0 {
281 if h.flags&hashWriting != 0 {
282 fatal("concurrent map writes")
285 hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
287 // Set hashWriting after calling t.hasher for consistency with mapdelete
288 h.flags ^= hashWriting
290 bucket := hash & bucketMask(h.B)
292 growWork_fast32(t, h, bucket)
294 b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
297 for ; b != nil; b = b.overflow(t) {
298 for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 4) {
299 if key != *(*uint32)(k) || isEmpty(b.tophash[i]) {
302 // Only clear key if there are pointers in it.
303 // This can only happen if pointers are 32 bit
304 // wide as 64 bit pointers do not fit into a 32 bit key.
305 if goarch.PtrSize == 4 && t.Key.PtrBytes != 0 {
306 // The key must be a pointer as we checked pointers are
307 // 32 bits wide and the key is 32 bits wide also.
308 *(*unsafe.Pointer)(k) = nil
310 e := add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.ValueSize))
311 if t.Elem.PtrBytes != 0 {
312 memclrHasPointers(e, t.Elem.Size_)
314 memclrNoHeapPointers(e, t.Elem.Size_)
316 b.tophash[i] = emptyOne
317 // If the bucket now ends in a bunch of emptyOne states,
318 // change those to emptyRest states.
319 if i == bucketCnt-1 {
320 if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
324 if b.tophash[i+1] != emptyRest {
329 b.tophash[i] = emptyRest
332 break // beginning of initial bucket, we're done.
334 // Find previous bucket, continue at its last entry.
336 for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
342 if b.tophash[i] != emptyOne {
348 // Reset the hash seed to make it more difficult for attackers to
349 // repeatedly trigger hash collisions. See issue 25237.
357 if h.flags&hashWriting == 0 {
358 fatal("concurrent map writes")
360 h.flags &^= hashWriting
363 func growWork_fast32(t *maptype, h *hmap, bucket uintptr) {
364 // make sure we evacuate the oldbucket corresponding
365 // to the bucket we're about to use
366 evacuate_fast32(t, h, bucket&h.oldbucketmask())
368 // evacuate one more oldbucket to make progress on growing
370 evacuate_fast32(t, h, h.nevacuate)
374 func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr) {
375 b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)))
376 newbit := h.noldbuckets()
378 // TODO: reuse overflow buckets instead of using new ones, if there
379 // is no iterator using the old buckets. (If !oldIterator.)
381 // xy contains the x and y (low and high) evacuation destinations.
384 x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize)))
385 x.k = add(unsafe.Pointer(x.b), dataOffset)
386 x.e = add(x.k, bucketCnt*4)
388 if !h.sameSizeGrow() {
389 // Only calculate y pointers if we're growing bigger.
390 // Otherwise GC can see bad pointers.
392 y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize)))
393 y.k = add(unsafe.Pointer(y.b), dataOffset)
394 y.e = add(y.k, bucketCnt*4)
397 for ; b != nil; b = b.overflow(t) {
398 k := add(unsafe.Pointer(b), dataOffset)
399 e := add(k, bucketCnt*4)
400 for i := 0; i < bucketCnt; i, k, e = i+1, add(k, 4), add(e, uintptr(t.ValueSize)) {
403 b.tophash[i] = evacuatedEmpty
406 if top < minTopHash {
407 throw("bad map state")
410 if !h.sameSizeGrow() {
411 // Compute hash to make our evacuation decision (whether we need
412 // to send this key/elem to bucket x or bucket y).
413 hash := t.Hasher(k, uintptr(h.hash0))
414 if hash&newbit != 0 {
419 b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap
420 dst := &xy[useY] // evacuation destination
422 if dst.i == bucketCnt {
423 dst.b = h.newoverflow(t, dst.b)
425 dst.k = add(unsafe.Pointer(dst.b), dataOffset)
426 dst.e = add(dst.k, bucketCnt*4)
428 dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
431 if goarch.PtrSize == 4 && t.Key.PtrBytes != 0 && writeBarrier.enabled {
432 // Write with a write barrier.
433 *(*unsafe.Pointer)(dst.k) = *(*unsafe.Pointer)(k)
435 *(*uint32)(dst.k) = *(*uint32)(k)
438 typedmemmove(t.Elem, dst.e, e)
440 // These updates might push these pointers past the end of the
441 // key or elem arrays. That's ok, as we have the overflow pointer
442 // at the end of the bucket to protect against pointing past the
443 // end of the bucket.
444 dst.k = add(dst.k, 4)
445 dst.e = add(dst.e, uintptr(t.ValueSize))
448 // Unlink the overflow buckets & clear key/elem to help GC.
449 if h.flags&oldIterator == 0 && t.Bucket.PtrBytes != 0 {
450 b := add(h.oldbuckets, oldbucket*uintptr(t.BucketSize))
451 // Preserve b.tophash because the evacuation
452 // state is maintained there.
453 ptr := add(b, dataOffset)
454 n := uintptr(t.BucketSize) - dataOffset
455 memclrHasPointers(ptr, n)
459 if oldbucket == h.nevacuate {
460 advanceEvacuationMark(h, t, newbit)