]> Cypherpunks.ru repositories - gostls13.git/commitdiff
[dev.garbage] runtime: use s.base() everywhere it makes sense
authorAustin Clements <austin@google.com>
Thu, 28 Apr 2016 14:59:00 +0000 (10:59 -0400)
committerAustin Clements <austin@google.com>
Fri, 29 Apr 2016 03:53:14 +0000 (03:53 +0000)
Currently we have lots of (s.start << _PageShift) and variants. We now
have an s.base() function that returns this. It's faster and more
readable, so use it.

Change-Id: I888060a9dae15ea75ca8cc1c2b31c905e71b452b
Reviewed-on: https://go-review.googlesource.com/22559
Reviewed-by: Rick Hudson <rlh@golang.org>
Run-TryBot: Austin Clements <austin@google.com>

src/runtime/heapdump.go
src/runtime/mfinal.go
src/runtime/mgcmark.go
src/runtime/mheap.go
src/runtime/stack.go

index 6085c6866cf8c6526f267ad44f2835d324236cf9..4afe663418e5b9c81737b647d7b4523877ba05b0 100644 (file)
@@ -447,7 +447,7 @@ func dumproots() {
                                        continue
                                }
                                spf := (*specialfinalizer)(unsafe.Pointer(sp))
-                               p := unsafe.Pointer((uintptr(s.start) << _PageShift) + uintptr(spf.special.offset))
+                               p := unsafe.Pointer(s.base() + uintptr(spf.special.offset))
                                dumpfinalizer(p, spf.fn, spf.fint, spf.ot)
                        }
                }
@@ -467,7 +467,7 @@ func dumpobjs() {
                if s.state != _MSpanInUse {
                        continue
                }
-               p := uintptr(s.start << _PageShift)
+               p := s.base()
                size := s.elemsize
                n := (s.npages << _PageShift) / size
                if n > uintptr(len(freemark)) {
@@ -619,7 +619,7 @@ func dumpmemprof() {
                                continue
                        }
                        spp := (*specialprofile)(unsafe.Pointer(sp))
-                       p := uintptr(s.start<<_PageShift) + uintptr(spp.special.offset)
+                       p := s.base() + uintptr(spp.special.offset)
                        dumpint(tagAllocSample)
                        dumpint(uint64(p))
                        dumpint(uint64(uintptr(unsafe.Pointer(spp.b))))
index e81650d842d22c9ae4e3b1c44e18973d88a38ded..6dce6d75011ec9d5ca8fa40bc158051f0563c025 100644 (file)
@@ -402,7 +402,7 @@ func findObject(v unsafe.Pointer) (s *mspan, x unsafe.Pointer, n uintptr) {
        if s == nil {
                return
        }
-       x = unsafe.Pointer(uintptr(s.start) << pageShift)
+       x = unsafe.Pointer(s.base())
 
        if uintptr(v) < uintptr(x) || uintptr(v) >= uintptr(unsafe.Pointer(s.limit)) || s.state != mSpanInUse {
                s = nil
index 18f930f89a72d46fdf746616fb8d2fb549c12274..14449c3d4b401a05ada77a2229783676d9972552 100644 (file)
@@ -1247,7 +1247,7 @@ func gcDumpObject(label string, obj, off uintptr) {
                print(" s=nil\n")
                return
        }
-       print(" s.start*_PageSize=", hex(s.start*_PageSize), " s.limit=", hex(s.limit), " s.sizeclass=", s.sizeclass, " s.elemsize=", s.elemsize, "\n")
+       print(" s.base()=", hex(s.base()), " s.limit=", hex(s.limit), " s.sizeclass=", s.sizeclass, " s.elemsize=", s.elemsize, "\n")
        skipped := false
        for i := uintptr(0); i < s.elemsize; i += sys.PtrSize {
                // For big objects, just print the beginning (because
index e4946ff8e951b05b99f8fa76c6c38c9a7336db51..40ed466038a4d999758e273de61401edae792f6d 100644 (file)
@@ -808,7 +808,7 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince i
                }
        case _MSpanInUse:
                if s.allocCount != 0 || s.sweepgen != h.sweepgen {
-                       print("MHeap_FreeSpanLocked - span ", s, " ptr ", hex(s.start<<_PageShift), " allocCount ", s.allocCount, " sweepgen ", s.sweepgen, "/", h.sweepgen, "\n")
+                       print("MHeap_FreeSpanLocked - span ", s, " ptr ", hex(s.base()), " allocCount ", s.allocCount, " sweepgen ", s.sweepgen, "/", h.sweepgen, "\n")
                        throw("MHeap_FreeSpanLocked - invalid free")
                }
                h.pagesInUse -= uint64(s.npages)
@@ -892,7 +892,7 @@ func scavengelist(list *mSpanList, now, limit uint64) uintptr {
        var sumreleased uintptr
        for s := list.first; s != nil; s = s.next {
                if (now-uint64(s.unusedsince)) > limit && s.npreleased != s.npages {
-                       start := uintptr(s.start) << _PageShift
+                       start := s.base()
                        end := start + s.npages<<_PageShift
                        if sys.PhysPageSize > _PageSize {
                                // We can only release pages in
@@ -1062,7 +1062,7 @@ func addspecial(p unsafe.Pointer, s *special) bool {
        mp := acquirem()
        span.ensureSwept()
 
-       offset := uintptr(p) - uintptr(span.start<<_PageShift)
+       offset := uintptr(p) - span.base()
        kind := s.kind
 
        lock(&span.speciallock)
@@ -1110,7 +1110,7 @@ func removespecial(p unsafe.Pointer, kind uint8) *special {
        mp := acquirem()
        span.ensureSwept()
 
-       offset := uintptr(p) - uintptr(span.start<<_PageShift)
+       offset := uintptr(p) - span.base()
 
        lock(&span.speciallock)
        t := &span.specials
index ac4efc114be89c992c678c57b8e4acceaad1d9a7..f68c513fd67e39ce00b40f765b0519f0a86121ed 100644 (file)
@@ -198,7 +198,7 @@ func stackpoolalloc(order uint8) gclinkptr {
                        throw("bad stackfreelist")
                }
                for i := uintptr(0); i < _StackCacheSize; i += _FixedStack << order {
-                       x := gclinkptr(uintptr(s.start)<<_PageShift + i)
+                       x := gclinkptr(s.base() + i)
                        x.ptr().next = s.stackfreelist
                        s.stackfreelist = x
                }
@@ -391,7 +391,7 @@ func stackalloc(n uint32) (stack, []stkbar) {
                                throw("out of memory")
                        }
                }
-               v = unsafe.Pointer(s.start << _PageShift)
+               v = unsafe.Pointer(s.base())
        }
 
        if raceenabled {
@@ -456,7 +456,7 @@ func stackfree(stk stack, n uintptr) {
        } else {
                s := mheap_.lookup(v)
                if s.state != _MSpanStack {
-                       println(hex(s.start<<_PageShift), v)
+                       println(hex(s.base()), v)
                        throw("bad span state")
                }
                if gcphase == _GCoff {