]> Cypherpunks.ru repositories - gostls13.git/blobdiff - src/runtime/malloc.go
[dev.garbage] all: merge dev.cc (493ad916c3b1) into dev.garbage
[gostls13.git] / src / runtime / malloc.go
index d73d1ba6a613865bed394273f401071d26836bd8..e9fec7bb142ae2a9a569ec00eecf9424a7f818c9 100644 (file)
@@ -140,14 +140,14 @@ func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer {
                        // Allocate a new maxTinySize block.
                        s = c.alloc[tinySizeClass]
                        v := s.freelist
-                       if v == nil {
+                       if v.ptr() == nil {
                                systemstack(func() {
                                        mCache_Refill(c, tinySizeClass)
                                })
                                s = c.alloc[tinySizeClass]
                                v = s.freelist
                        }
-                       s.freelist = v.next
+                       s.freelist = v.ptr().next
                        s.ref++
                        //TODO: prefetch v.next
                        x = unsafe.Pointer(v)
@@ -170,19 +170,19 @@ func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer {
                        size = uintptr(class_to_size[sizeclass])
                        s = c.alloc[sizeclass]
                        v := s.freelist
-                       if v == nil {
+                       if v.ptr() == nil {
                                systemstack(func() {
                                        mCache_Refill(c, int32(sizeclass))
                                })
                                s = c.alloc[sizeclass]
                                v = s.freelist
                        }
-                       s.freelist = v.next
+                       s.freelist = v.ptr().next
                        s.ref++
                        //TODO: prefetch
                        x = unsafe.Pointer(v)
                        if flags&flagNoZero == 0 {
-                               v.next = nil
+                               v.ptr().next = 0
                                if size > 2*ptrSize && ((*[2]uintptr)(x))[1] != 0 {
                                        memclr(unsafe.Pointer(v), size)
                                }
@@ -241,6 +241,8 @@ func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer {
                        masksize = masksize * pointersPerByte / 8 // 4 bits per word
                        masksize++                                // unroll flag in the beginning
                        if masksize > maxGCMask && typ.gc[1] != 0 {
+                               // write barriers have not been updated to deal with this case yet.
+                               gothrow("maxGCMask too small for now")
                                // If the mask is too large, unroll the program directly
                                // into the GC bitmap. It's 7 times slower than copying
                                // from the pre-unrolled mask, but saves 1/16 of type size
@@ -295,6 +297,17 @@ func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer {
                }
        }
 marked:
+
+       // GCmarkterminate allocates black
+       // All slots hold nil so no scanning is needed.
+       // This may be racing with GC so do it atomically if there can be
+       // a race marking the bit.
+       if gcphase == _GCmarktermination {
+               systemstack(func() {
+                       gcmarknewobject_m(uintptr(x))
+               })
+       }
+
        if raceenabled {
                racemalloc(x, size)
        }
@@ -328,13 +341,43 @@ marked:
                }
        }
 
-       if memstats.heap_alloc >= memstats.next_gc {
+       if memstats.heap_alloc >= memstats.next_gc/2 {
                gogc(0)
        }
 
        return x
 }
 
+func loadPtrMask(typ *_type) []uint8 {
+       var ptrmask *uint8
+       nptr := (uintptr(typ.size) + ptrSize - 1) / ptrSize
+       if typ.kind&kindGCProg != 0 {
+               masksize := nptr
+               if masksize%2 != 0 {
+                       masksize *= 2 // repeated
+               }
+               masksize = masksize * pointersPerByte / 8 // 4 bits per word
+               masksize++                                // unroll flag in the beginning
+               if masksize > maxGCMask && typ.gc[1] != 0 {
+                       // write barriers have not been updated to deal with this case yet.
+                       gothrow("maxGCMask too small for now")
+               }
+               ptrmask = (*uint8)(unsafe.Pointer(uintptr(typ.gc[0])))
+               // Check whether the program is already unrolled
+               // by checking if the unroll flag byte is set
+               maskword := uintptr(atomicloadp(unsafe.Pointer(ptrmask)))
+               if *(*uint8)(unsafe.Pointer(&maskword)) == 0 {
+                       systemstack(func() {
+                               unrollgcprog_m(typ)
+                       })
+               }
+               ptrmask = (*uint8)(add(unsafe.Pointer(ptrmask), 1)) // skip the unroll flag byte
+       } else {
+               ptrmask = (*uint8)(unsafe.Pointer(typ.gc[0])) // pointer to unrolled mask
+       }
+       return (*[1 << 30]byte)(unsafe.Pointer(ptrmask))[:(nptr+1)/2]
+}
+
 // implementation of new builtin
 func newobject(typ *_type) unsafe.Pointer {
        flags := uint32(0)
@@ -429,7 +472,21 @@ func gogc(force int32) {
        mp = acquirem()
        mp.gcing = 1
        releasem(mp)
+
        systemstack(stoptheworld)
+       systemstack(finishsweep_m) // finish sweep before we start concurrent scan.
+       if true {                  // To turn on concurrent scan and mark set to true...
+               systemstack(starttheworld)
+               // Do a concurrent heap scan before we stop the world.
+               systemstack(gcscan_m)
+               systemstack(stoptheworld)
+               systemstack(gcinstallmarkwb_m)
+               systemstack(starttheworld)
+               systemstack(gcmark_m)
+               systemstack(stoptheworld)
+               systemstack(gcinstalloffwb_m)
+       }
+
        if mp != acquirem() {
                gothrow("gogc: rescheduled")
        }
@@ -445,17 +502,21 @@ func gogc(force int32) {
        if debug.gctrace > 1 {
                n = 2
        }
+       eagersweep := force >= 2
        for i := 0; i < n; i++ {
                if i > 0 {
                        startTime = nanotime()
                }
                // switch to g0, call gc, then switch back
-               eagersweep := force >= 2
                systemstack(func() {
                        gc_m(startTime, eagersweep)
                })
        }
 
+       systemstack(func() {
+               gccheckmark_m(startTime, eagersweep)
+       })
+
        // all done
        mp.gcing = 0
        semrelease(&worldsema)
@@ -470,6 +531,14 @@ func gogc(force int32) {
        }
 }
 
+func GCcheckmarkenable() {
+       systemstack(gccheckmarkenable_m)
+}
+
+func GCcheckmarkdisable() {
+       systemstack(gccheckmarkdisable_m)
+}
+
 // GC runs a garbage collection.
 func GC() {
        gogc(2)