]> Cypherpunks.ru repositories - gostls13.git/commitdiff
[dev.typeparams] runtime: simplify defer record allocation
authorCherry Mui <cherryyz@google.com>
Tue, 8 Jun 2021 22:45:18 +0000 (18:45 -0400)
committerCherry Mui <cherryyz@google.com>
Fri, 11 Jun 2021 18:33:07 +0000 (18:33 +0000)
Now that deferred functions are always argumentless and defer
records are no longer with arguments, defer record can be fixed
size (just the _defer struct). This allows us to simplify the
allocation of defer records, specifically, remove the defer
classes and the pools of different sized defers.

Change-Id: Icc4b16afc23b38262ca9dd1f7369ad40874cf701
Reviewed-on: https://go-review.googlesource.com/c/go/+/326062
Trust: Cherry Mui <cherryyz@google.com>
Run-TryBot: Cherry Mui <cherryyz@google.com>
Reviewed-by: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Go Bot <gobot@golang.org>

src/cmd/compile/internal/test/inl_test.go
src/runtime/malloc.go
src/runtime/mgc.go
src/runtime/panic.go
src/runtime/proc.go
src/runtime/runtime2.go

index 5b0db8330102305044cd3b37732bcad52733ffe2..bbdbe0c37ca6269d1e0efe117fddcf9a9cda922b 100644 (file)
@@ -42,7 +42,6 @@ func TestIntendedInlining(t *testing.T) {
                        "bucketMask",
                        "bucketShift",
                        "chanbuf",
-                       "deferclass",
                        "evacuated",
                        "fastlog2",
                        "fastrand",
@@ -63,7 +62,6 @@ func TestIntendedInlining(t *testing.T) {
                        "subtract1",
                        "subtractb",
                        "tophash",
-                       "totaldefersize",
                        "(*bmap).keys",
                        "(*bmap).overflow",
                        "(*waitq).enqueue",
index 2759bbdaf90f5539a06197728d7fa8476bf89e64..c5f62483ffca45b00ef836035e5df3e3b0e522b5 100644 (file)
@@ -420,8 +420,6 @@ func mallocinit() {
                throw("bad TinySizeClass")
        }
 
-       testdefersizes()
-
        if heapArenaBitmapBytes&(heapArenaBitmapBytes-1) != 0 {
                // heapBits expects modular arithmetic on bitmap
                // addresses to work.
@@ -1088,15 +1086,6 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
 
        var scanSize uintptr
        if !noscan {
-               // If allocating a defer+arg block, now that we've picked a malloc size
-               // large enough to hold everything, cut the "asked for" size down to
-               // just the defer header, so that the GC bitmap will record the arg block
-               // as containing nothing at all (as if it were unused space at the end of
-               // a malloc block caused by size rounding).
-               // The defer arg areas are scanned as part of scanstack.
-               if typ == deferType {
-                       dataSize = unsafe.Sizeof(_defer{})
-               }
                heapBitsSetType(uintptr(x), size, dataSize, typ)
                if dataSize > typ.size {
                        // Array allocation. If there are any
index c239fa0f636509bc32bb86e1c66665d78048be05..34b5b482a355b634d2f2b9b85d120ca4981afd9e 100644 (file)
@@ -1558,19 +1558,17 @@ func clearpools() {
        sched.sudogcache = nil
        unlock(&sched.sudoglock)
 
-       // Clear central defer pools.
+       // Clear central defer pool.
        // Leave per-P pools alone, they have strictly bounded size.
        lock(&sched.deferlock)
-       for i := range sched.deferpool {
-               // disconnect cached list before dropping it on the floor,
-               // so that a dangling ref to one entry does not pin all of them.
-               var d, dlink *_defer
-               for d = sched.deferpool[i]; d != nil; d = dlink {
-                       dlink = d.link
-                       d.link = nil
-               }
-               sched.deferpool[i] = nil
+       // disconnect cached list before dropping it on the floor,
+       // so that a dangling ref to one entry does not pin all of them.
+       var d, dlink *_defer
+       for d = sched.deferpool; d != nil; d = dlink {
+               dlink = d.link
+               d.link = nil
        }
+       sched.deferpool = nil
        unlock(&sched.deferlock)
 }
 
index 39013163b6ec0ed8806213eea8d2d8ce91946ff0..86d41c4e1c525c237494e34f936e559c84617c15 100644 (file)
@@ -236,7 +236,7 @@ func deferproc(fn func()) {
        sp := getcallersp()
        callerpc := getcallerpc()
 
-       d := newdefer(0)
+       d := newdefer()
        if d._panic != nil {
                throw("deferproc: d.panic != nil after newdefer")
        }
@@ -302,107 +302,38 @@ func deferprocStack(d *_defer) {
        // been set and must not be clobbered.
 }
 
-// Small malloc size classes >= 16 are the multiples of 16: 16, 32, 48, 64, 80, 96, 112, 128, 144, ...
-// Each P holds a pool for defers with small arg sizes.
-// Assign defer allocations to pools by rounding to 16, to match malloc size classes.
-
-const (
-       deferHeaderSize = unsafe.Sizeof(_defer{})
-       minDeferAlloc   = (deferHeaderSize + 15) &^ 15
-       minDeferArgs    = minDeferAlloc - deferHeaderSize
-)
-
-// defer size class for arg size sz
-//go:nosplit
-func deferclass(siz uintptr) uintptr {
-       if siz <= minDeferArgs {
-               return 0
-       }
-       return (siz - minDeferArgs + 15) / 16
-}
-
-// total size of memory block for defer with arg size sz
-func totaldefersize(siz uintptr) uintptr {
-       if siz <= minDeferArgs {
-               return minDeferAlloc
-       }
-       return deferHeaderSize + siz
-}
-
-// Ensure that defer arg sizes that map to the same defer size class
-// also map to the same malloc size class.
-func testdefersizes() {
-       var m [len(p{}.deferpool)]int32
-
-       for i := range m {
-               m[i] = -1
-       }
-       for i := uintptr(0); ; i++ {
-               defersc := deferclass(i)
-               if defersc >= uintptr(len(m)) {
-                       break
-               }
-               siz := roundupsize(totaldefersize(i))
-               if m[defersc] < 0 {
-                       m[defersc] = int32(siz)
-                       continue
-               }
-               if m[defersc] != int32(siz) {
-                       print("bad defer size class: i=", i, " siz=", siz, " defersc=", defersc, "\n")
-                       throw("bad defer size class")
-               }
-       }
-}
-
-var deferType *_type // type of _defer struct
-
-func init() {
-       var x interface{}
-       x = (*_defer)(nil)
-       deferType = (*(**ptrtype)(unsafe.Pointer(&x))).elem
-}
+// Each P holds a pool for defers.
 
 // Allocate a Defer, usually using per-P pool.
 // Each defer must be released with freedefer.  The defer is not
 // added to any defer chain yet.
-//
-// This must not grow the stack because there may be a frame without
-// stack map information when this is called.
-//
-//go:nosplit
-func newdefer(siz int32) *_defer {
+func newdefer() *_defer {
        var d *_defer
-       sc := deferclass(uintptr(siz))
        gp := getg()
-       if sc < uintptr(len(p{}.deferpool)) {
-               pp := gp.m.p.ptr()
-               if len(pp.deferpool[sc]) == 0 && sched.deferpool[sc] != nil {
-                       // Take the slow path on the system stack so
-                       // we don't grow newdefer's stack.
-                       systemstack(func() {
-                               lock(&sched.deferlock)
-                               for len(pp.deferpool[sc]) < cap(pp.deferpool[sc])/2 && sched.deferpool[sc] != nil {
-                                       d := sched.deferpool[sc]
-                                       sched.deferpool[sc] = d.link
-                                       d.link = nil
-                                       pp.deferpool[sc] = append(pp.deferpool[sc], d)
-                               }
-                               unlock(&sched.deferlock)
-                       })
-               }
-               if n := len(pp.deferpool[sc]); n > 0 {
-                       d = pp.deferpool[sc][n-1]
-                       pp.deferpool[sc][n-1] = nil
-                       pp.deferpool[sc] = pp.deferpool[sc][:n-1]
-               }
-       }
-       if d == nil {
-               // Allocate new defer+args.
+       pp := gp.m.p.ptr()
+       if len(pp.deferpool) == 0 && sched.deferpool != nil {
+               // Take the slow path on the system stack so
+               // we don't grow newdefer's stack.
                systemstack(func() {
-                       total := roundupsize(totaldefersize(uintptr(siz)))
-                       d = (*_defer)(mallocgc(total, deferType, true))
+                       lock(&sched.deferlock)
+                       for len(pp.deferpool) < cap(pp.deferpool)/2 && sched.deferpool != nil {
+                               d := sched.deferpool
+                               sched.deferpool = d.link
+                               d.link = nil
+                               pp.deferpool = append(pp.deferpool, d)
+                       }
+                       unlock(&sched.deferlock)
                })
        }
+       if n := len(pp.deferpool); n > 0 {
+               d = pp.deferpool[n-1]
+               pp.deferpool[n-1] = nil
+               pp.deferpool = pp.deferpool[:n-1]
+       }
+       if d == nil {
+               // Allocate new defer.
+               d = new(_defer)
+       }
        d.heap = true
        return d
 }
@@ -424,23 +355,19 @@ func freedefer(d *_defer) {
        if !d.heap {
                return
        }
-       sc := deferclass(0)
-       if sc >= uintptr(len(p{}.deferpool)) {
-               return
-       }
        pp := getg().m.p.ptr()
-       if len(pp.deferpool[sc]) == cap(pp.deferpool[sc]) {
+       if len(pp.deferpool) == cap(pp.deferpool) {
                // Transfer half of local cache to the central cache.
                //
                // Take this slow path on the system stack so
                // we don't grow freedefer's stack.
                systemstack(func() {
                        var first, last *_defer
-                       for len(pp.deferpool[sc]) > cap(pp.deferpool[sc])/2 {
-                               n := len(pp.deferpool[sc])
-                               d := pp.deferpool[sc][n-1]
-                               pp.deferpool[sc][n-1] = nil
-                               pp.deferpool[sc] = pp.deferpool[sc][:n-1]
+                       for len(pp.deferpool) > cap(pp.deferpool)/2 {
+                               n := len(pp.deferpool)
+                               d := pp.deferpool[n-1]
+                               pp.deferpool[n-1] = nil
+                               pp.deferpool = pp.deferpool[:n-1]
                                if first == nil {
                                        first = d
                                } else {
@@ -449,8 +376,8 @@ func freedefer(d *_defer) {
                                last = d
                        }
                        lock(&sched.deferlock)
-                       last.link = sched.deferpool[sc]
-                       sched.deferpool[sc] = first
+                       last.link = sched.deferpool
+                       sched.deferpool = first
                        unlock(&sched.deferlock)
                })
        }
@@ -469,7 +396,7 @@ func freedefer(d *_defer) {
        // both of which throw.
        d.link = nil
 
-       pp.deferpool[sc] = append(pp.deferpool[sc], d)
+       pp.deferpool = append(pp.deferpool, d)
 }
 
 // Separate function so that it can split stack.
@@ -720,7 +647,7 @@ func addOneOpenDeferFrame(gp *g, pc uintptr, sp unsafe.Pointer) {
                                        throw("missing deferreturn")
                                }
 
-                               d1 := newdefer(0)
+                               d1 := newdefer()
                                d1.openDefer = true
                                d1._panic = nil
                                // These are the pc/sp to set after we've
index d6f3af690ba1379c47c75948d1edee9d471af18a..4a116130a5404d9be2c78503eed856fcd6f37e0c 100644 (file)
@@ -4784,9 +4784,7 @@ func (pp *p) init(id int32) {
        pp.id = id
        pp.status = _Pgcstop
        pp.sudogcache = pp.sudogbuf[:0]
-       for i := range pp.deferpool {
-               pp.deferpool[i] = pp.deferpoolbuf[i][:0]
-       }
+       pp.deferpool = pp.deferpoolbuf[:0]
        pp.wbBuf.reset()
        if pp.mcache == nil {
                if id == 0 {
@@ -4864,12 +4862,10 @@ func (pp *p) destroy() {
                pp.sudogbuf[i] = nil
        }
        pp.sudogcache = pp.sudogbuf[:0]
-       for i := range pp.deferpool {
-               for j := range pp.deferpoolbuf[i] {
-                       pp.deferpoolbuf[i][j] = nil
-               }
-               pp.deferpool[i] = pp.deferpoolbuf[i][:0]
+       for j := range pp.deferpoolbuf {
+               pp.deferpoolbuf[j] = nil
        }
+       pp.deferpool = pp.deferpoolbuf[:0]
        systemstack(func() {
                for i := 0; i < pp.mspancache.len; i++ {
                        // Safe to call since the world is stopped.
index cf4b0bff430b0d27c2ac2551f87d3ddbf6fd527a..75c4818599071dbcbbc0c384af40dbeea854b2cb 100644 (file)
@@ -613,8 +613,8 @@ type p struct {
        pcache      pageCache
        raceprocctx uintptr
 
-       deferpool    [5][]*_defer // pool of available defer structs of different sizes (see panic.go)
-       deferpoolbuf [5][32]*_defer
+       deferpool    []*_defer // pool of available defer structs (see panic.go)
+       deferpoolbuf [32]*_defer
 
        // Cache of goroutine ids, amortizes accesses to runtime·sched.goidgen.
        goidcache    uint64
@@ -801,9 +801,9 @@ type schedt struct {
        sudoglock  mutex
        sudogcache *sudog
 
-       // Central pool of available defer structs of different sizes.
+       // Central pool of available defer structs.
        deferlock mutex
-       deferpool [5]*_defer
+       deferpool *_defer
 
        // freem is the list of m's waiting to be freed when their
        // m.exited is set. Linked through m.freelink.