]> Cypherpunks.ru repositories - gostls13.git/blob - src/runtime/map_fast32.go
cmd/compile/internal/inline: score call sites exposed by inlines
[gostls13.git] / src / runtime / map_fast32.go
1 // Copyright 2018 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
4
5 package runtime
6
7 import (
8         "internal/abi"
9         "internal/goarch"
10         "unsafe"
11 )
12
13 func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
14         if raceenabled && h != nil {
15                 callerpc := getcallerpc()
16                 racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess1_fast32))
17         }
18         if h == nil || h.count == 0 {
19                 return unsafe.Pointer(&zeroVal[0])
20         }
21         if h.flags&hashWriting != 0 {
22                 fatal("concurrent map read and map write")
23         }
24         var b *bmap
25         if h.B == 0 {
26                 // One-bucket table. No need to hash.
27                 b = (*bmap)(h.buckets)
28         } else {
29                 hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
30                 m := bucketMask(h.B)
31                 b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
32                 if c := h.oldbuckets; c != nil {
33                         if !h.sameSizeGrow() {
34                                 // There used to be half as many buckets; mask down one more power of two.
35                                 m >>= 1
36                         }
37                         oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
38                         if !evacuated(oldb) {
39                                 b = oldb
40                         }
41                 }
42         }
43         for ; b != nil; b = b.overflow(t) {
44                 for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 4) {
45                         if *(*uint32)(k) == key && !isEmpty(b.tophash[i]) {
46                                 return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.ValueSize))
47                         }
48                 }
49         }
50         return unsafe.Pointer(&zeroVal[0])
51 }
52
53 func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) {
54         if raceenabled && h != nil {
55                 callerpc := getcallerpc()
56                 racereadpc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapaccess2_fast32))
57         }
58         if h == nil || h.count == 0 {
59                 return unsafe.Pointer(&zeroVal[0]), false
60         }
61         if h.flags&hashWriting != 0 {
62                 fatal("concurrent map read and map write")
63         }
64         var b *bmap
65         if h.B == 0 {
66                 // One-bucket table. No need to hash.
67                 b = (*bmap)(h.buckets)
68         } else {
69                 hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
70                 m := bucketMask(h.B)
71                 b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.BucketSize)))
72                 if c := h.oldbuckets; c != nil {
73                         if !h.sameSizeGrow() {
74                                 // There used to be half as many buckets; mask down one more power of two.
75                                 m >>= 1
76                         }
77                         oldb := (*bmap)(add(c, (hash&m)*uintptr(t.BucketSize)))
78                         if !evacuated(oldb) {
79                                 b = oldb
80                         }
81                 }
82         }
83         for ; b != nil; b = b.overflow(t) {
84                 for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 4) {
85                         if *(*uint32)(k) == key && !isEmpty(b.tophash[i]) {
86                                 return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.ValueSize)), true
87                         }
88                 }
89         }
90         return unsafe.Pointer(&zeroVal[0]), false
91 }
92
93 func mapassign_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
94         if h == nil {
95                 panic(plainError("assignment to entry in nil map"))
96         }
97         if raceenabled {
98                 callerpc := getcallerpc()
99                 racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast32))
100         }
101         if h.flags&hashWriting != 0 {
102                 fatal("concurrent map writes")
103         }
104         hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
105
106         // Set hashWriting after calling t.hasher for consistency with mapassign.
107         h.flags ^= hashWriting
108
109         if h.buckets == nil {
110                 h.buckets = newobject(t.Bucket) // newarray(t.bucket, 1)
111         }
112
113 again:
114         bucket := hash & bucketMask(h.B)
115         if h.growing() {
116                 growWork_fast32(t, h, bucket)
117         }
118         b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
119
120         var insertb *bmap
121         var inserti uintptr
122         var insertk unsafe.Pointer
123
124 bucketloop:
125         for {
126                 for i := uintptr(0); i < bucketCnt; i++ {
127                         if isEmpty(b.tophash[i]) {
128                                 if insertb == nil {
129                                         inserti = i
130                                         insertb = b
131                                 }
132                                 if b.tophash[i] == emptyRest {
133                                         break bucketloop
134                                 }
135                                 continue
136                         }
137                         k := *((*uint32)(add(unsafe.Pointer(b), dataOffset+i*4)))
138                         if k != key {
139                                 continue
140                         }
141                         inserti = i
142                         insertb = b
143                         goto done
144                 }
145                 ovf := b.overflow(t)
146                 if ovf == nil {
147                         break
148                 }
149                 b = ovf
150         }
151
152         // Did not find mapping for key. Allocate new cell & add entry.
153
154         // If we hit the max load factor or we have too many overflow buckets,
155         // and we're not already in the middle of growing, start growing.
156         if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
157                 hashGrow(t, h)
158                 goto again // Growing the table invalidates everything, so try again
159         }
160
161         if insertb == nil {
162                 // The current bucket and all the overflow buckets connected to it are full, allocate a new one.
163                 insertb = h.newoverflow(t, b)
164                 inserti = 0 // not necessary, but avoids needlessly spilling inserti
165         }
166         insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash) // mask inserti to avoid bounds checks
167
168         insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*4)
169         // store new key at insert position
170         *(*uint32)(insertk) = key
171
172         h.count++
173
174 done:
175         elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*4+inserti*uintptr(t.ValueSize))
176         if h.flags&hashWriting == 0 {
177                 fatal("concurrent map writes")
178         }
179         h.flags &^= hashWriting
180         return elem
181 }
182
183 func mapassign_fast32ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
184         if h == nil {
185                 panic(plainError("assignment to entry in nil map"))
186         }
187         if raceenabled {
188                 callerpc := getcallerpc()
189                 racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapassign_fast32))
190         }
191         if h.flags&hashWriting != 0 {
192                 fatal("concurrent map writes")
193         }
194         hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
195
196         // Set hashWriting after calling t.hasher for consistency with mapassign.
197         h.flags ^= hashWriting
198
199         if h.buckets == nil {
200                 h.buckets = newobject(t.Bucket) // newarray(t.bucket, 1)
201         }
202
203 again:
204         bucket := hash & bucketMask(h.B)
205         if h.growing() {
206                 growWork_fast32(t, h, bucket)
207         }
208         b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
209
210         var insertb *bmap
211         var inserti uintptr
212         var insertk unsafe.Pointer
213
214 bucketloop:
215         for {
216                 for i := uintptr(0); i < bucketCnt; i++ {
217                         if isEmpty(b.tophash[i]) {
218                                 if insertb == nil {
219                                         inserti = i
220                                         insertb = b
221                                 }
222                                 if b.tophash[i] == emptyRest {
223                                         break bucketloop
224                                 }
225                                 continue
226                         }
227                         k := *((*unsafe.Pointer)(add(unsafe.Pointer(b), dataOffset+i*4)))
228                         if k != key {
229                                 continue
230                         }
231                         inserti = i
232                         insertb = b
233                         goto done
234                 }
235                 ovf := b.overflow(t)
236                 if ovf == nil {
237                         break
238                 }
239                 b = ovf
240         }
241
242         // Did not find mapping for key. Allocate new cell & add entry.
243
244         // If we hit the max load factor or we have too many overflow buckets,
245         // and we're not already in the middle of growing, start growing.
246         if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
247                 hashGrow(t, h)
248                 goto again // Growing the table invalidates everything, so try again
249         }
250
251         if insertb == nil {
252                 // The current bucket and all the overflow buckets connected to it are full, allocate a new one.
253                 insertb = h.newoverflow(t, b)
254                 inserti = 0 // not necessary, but avoids needlessly spilling inserti
255         }
256         insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash) // mask inserti to avoid bounds checks
257
258         insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*4)
259         // store new key at insert position
260         *(*unsafe.Pointer)(insertk) = key
261
262         h.count++
263
264 done:
265         elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*4+inserti*uintptr(t.ValueSize))
266         if h.flags&hashWriting == 0 {
267                 fatal("concurrent map writes")
268         }
269         h.flags &^= hashWriting
270         return elem
271 }
272
273 func mapdelete_fast32(t *maptype, h *hmap, key uint32) {
274         if raceenabled && h != nil {
275                 callerpc := getcallerpc()
276                 racewritepc(unsafe.Pointer(h), callerpc, abi.FuncPCABIInternal(mapdelete_fast32))
277         }
278         if h == nil || h.count == 0 {
279                 return
280         }
281         if h.flags&hashWriting != 0 {
282                 fatal("concurrent map writes")
283         }
284
285         hash := t.Hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
286
287         // Set hashWriting after calling t.hasher for consistency with mapdelete
288         h.flags ^= hashWriting
289
290         bucket := hash & bucketMask(h.B)
291         if h.growing() {
292                 growWork_fast32(t, h, bucket)
293         }
294         b := (*bmap)(add(h.buckets, bucket*uintptr(t.BucketSize)))
295         bOrig := b
296 search:
297         for ; b != nil; b = b.overflow(t) {
298                 for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 4) {
299                         if key != *(*uint32)(k) || isEmpty(b.tophash[i]) {
300                                 continue
301                         }
302                         // Only clear key if there are pointers in it.
303                         // This can only happen if pointers are 32 bit
304                         // wide as 64 bit pointers do not fit into a 32 bit key.
305                         if goarch.PtrSize == 4 && t.Key.PtrBytes != 0 {
306                                 // The key must be a pointer as we checked pointers are
307                                 // 32 bits wide and the key is 32 bits wide also.
308                                 *(*unsafe.Pointer)(k) = nil
309                         }
310                         e := add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.ValueSize))
311                         if t.Elem.PtrBytes != 0 {
312                                 memclrHasPointers(e, t.Elem.Size_)
313                         } else {
314                                 memclrNoHeapPointers(e, t.Elem.Size_)
315                         }
316                         b.tophash[i] = emptyOne
317                         // If the bucket now ends in a bunch of emptyOne states,
318                         // change those to emptyRest states.
319                         if i == bucketCnt-1 {
320                                 if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
321                                         goto notLast
322                                 }
323                         } else {
324                                 if b.tophash[i+1] != emptyRest {
325                                         goto notLast
326                                 }
327                         }
328                         for {
329                                 b.tophash[i] = emptyRest
330                                 if i == 0 {
331                                         if b == bOrig {
332                                                 break // beginning of initial bucket, we're done.
333                                         }
334                                         // Find previous bucket, continue at its last entry.
335                                         c := b
336                                         for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
337                                         }
338                                         i = bucketCnt - 1
339                                 } else {
340                                         i--
341                                 }
342                                 if b.tophash[i] != emptyOne {
343                                         break
344                                 }
345                         }
346                 notLast:
347                         h.count--
348                         // Reset the hash seed to make it more difficult for attackers to
349                         // repeatedly trigger hash collisions. See issue 25237.
350                         if h.count == 0 {
351                                 h.hash0 = fastrand()
352                         }
353                         break search
354                 }
355         }
356
357         if h.flags&hashWriting == 0 {
358                 fatal("concurrent map writes")
359         }
360         h.flags &^= hashWriting
361 }
362
363 func growWork_fast32(t *maptype, h *hmap, bucket uintptr) {
364         // make sure we evacuate the oldbucket corresponding
365         // to the bucket we're about to use
366         evacuate_fast32(t, h, bucket&h.oldbucketmask())
367
368         // evacuate one more oldbucket to make progress on growing
369         if h.growing() {
370                 evacuate_fast32(t, h, h.nevacuate)
371         }
372 }
373
374 func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr) {
375         b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)))
376         newbit := h.noldbuckets()
377         if !evacuated(b) {
378                 // TODO: reuse overflow buckets instead of using new ones, if there
379                 // is no iterator using the old buckets.  (If !oldIterator.)
380
381                 // xy contains the x and y (low and high) evacuation destinations.
382                 var xy [2]evacDst
383                 x := &xy[0]
384                 x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize)))
385                 x.k = add(unsafe.Pointer(x.b), dataOffset)
386                 x.e = add(x.k, bucketCnt*4)
387
388                 if !h.sameSizeGrow() {
389                         // Only calculate y pointers if we're growing bigger.
390                         // Otherwise GC can see bad pointers.
391                         y := &xy[1]
392                         y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize)))
393                         y.k = add(unsafe.Pointer(y.b), dataOffset)
394                         y.e = add(y.k, bucketCnt*4)
395                 }
396
397                 for ; b != nil; b = b.overflow(t) {
398                         k := add(unsafe.Pointer(b), dataOffset)
399                         e := add(k, bucketCnt*4)
400                         for i := 0; i < bucketCnt; i, k, e = i+1, add(k, 4), add(e, uintptr(t.ValueSize)) {
401                                 top := b.tophash[i]
402                                 if isEmpty(top) {
403                                         b.tophash[i] = evacuatedEmpty
404                                         continue
405                                 }
406                                 if top < minTopHash {
407                                         throw("bad map state")
408                                 }
409                                 var useY uint8
410                                 if !h.sameSizeGrow() {
411                                         // Compute hash to make our evacuation decision (whether we need
412                                         // to send this key/elem to bucket x or bucket y).
413                                         hash := t.Hasher(k, uintptr(h.hash0))
414                                         if hash&newbit != 0 {
415                                                 useY = 1
416                                         }
417                                 }
418
419                                 b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap
420                                 dst := &xy[useY]                 // evacuation destination
421
422                                 if dst.i == bucketCnt {
423                                         dst.b = h.newoverflow(t, dst.b)
424                                         dst.i = 0
425                                         dst.k = add(unsafe.Pointer(dst.b), dataOffset)
426                                         dst.e = add(dst.k, bucketCnt*4)
427                                 }
428                                 dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
429
430                                 // Copy key.
431                                 if goarch.PtrSize == 4 && t.Key.PtrBytes != 0 && writeBarrier.enabled {
432                                         // Write with a write barrier.
433                                         *(*unsafe.Pointer)(dst.k) = *(*unsafe.Pointer)(k)
434                                 } else {
435                                         *(*uint32)(dst.k) = *(*uint32)(k)
436                                 }
437
438                                 typedmemmove(t.Elem, dst.e, e)
439                                 dst.i++
440                                 // These updates might push these pointers past the end of the
441                                 // key or elem arrays.  That's ok, as we have the overflow pointer
442                                 // at the end of the bucket to protect against pointing past the
443                                 // end of the bucket.
444                                 dst.k = add(dst.k, 4)
445                                 dst.e = add(dst.e, uintptr(t.ValueSize))
446                         }
447                 }
448                 // Unlink the overflow buckets & clear key/elem to help GC.
449                 if h.flags&oldIterator == 0 && t.Bucket.PtrBytes != 0 {
450                         b := add(h.oldbuckets, oldbucket*uintptr(t.BucketSize))
451                         // Preserve b.tophash because the evacuation
452                         // state is maintained there.
453                         ptr := add(b, dataOffset)
454                         n := uintptr(t.BucketSize) - dataOffset
455                         memclrHasPointers(ptr, n)
456                 }
457         }
458
459         if oldbucket == h.nevacuate {
460                 advanceEvacuationMark(h, t, newbit)
461         }
462 }