// Check that transition is valid.
switch oldval {
+ default:
+ print("runtime: casfrom_Gscanstatus bad oldval gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
+ dumpgstatus(gp)
+ gothrow("casfrom_Gscanstatus:top gp->status is not in scan state")
case _Gscanrunnable,
_Gscanwaiting,
_Gscanrunning,
// loop if gp->atomicstatus is in a scan state giving
// GC time to finish and change the state to oldval.
for !cas(&gp.atomicstatus, oldval, newval) {
- if gp.preemptscan && !gp.gcworkdone && (oldval == _Grunning || oldval == _Gsyscall) {
- gp.preemptscan = false
- systemstack(func() {
- gcphasework(gp)
- })
- }
+ if oldval == _Gwaiting && gp.atomicstatus == _Grunnable {
+ systemstack(func() {
+ gothrow("casgstatus: waiting for Gwaiting but is Grunnable")
+ })
+ }
+ // Help GC if needed.
++ // if gp.preemptscan && !gp.gcworkdone && (oldval == _Grunning || oldval == _Gsyscall) {
++ // gp.preemptscan = false
++ // systemstack(func() {
++ // gcphasework(gp)
++ // })
++ // }
+ }
+ }
+
+ // casgstatus(gp, oldstatus, Gcopystack), assuming oldstatus is Gwaiting or Grunnable.
+ // Returns old status. Cannot call casgstatus directly, because we are racing with an
+ // async wakeup that might come in from netpoll. If we see Gwaiting from the readgstatus,
+ // it might have become Grunnable by the time we get to the cas. If we called casgstatus,
+ // it would loop waiting for the status to go back to Gwaiting, which it never will.
+ //go:nosplit
+ func casgcopystack(gp *g) uint32 {
+ for {
+ oldstatus := readgstatus(gp) &^ _Gscan
+ if oldstatus != _Gwaiting && oldstatus != _Grunnable {
+ gothrow("copystack: bad status, not Gwaiting or Grunnable")
+ }
+ if cas(&gp.atomicstatus, oldstatus, _Gcopystack) {
+ return oldstatus
+ }
}
}
// Runs on g0 and does the actual work after putting the g back on the run queue.
func mquiesce(gpmaster *g) {
- activeglen := len(allgs)
// enqueue the calling goroutine.
restartg(gpmaster)
+
+ activeglen := len(allgs)
for i := 0; i < activeglen; i++ {
gp := allgs[i]
if readgstatus(gp) == _Gdead {
_g_.sched.lr = 0
_g_.sched.ret = 0
_g_.sched.ctxt = nil
- _g_.sched.g = _g_
+ // write as uintptr to avoid write barrier, which will smash _g_.sched.
+ *(*uintptr)(unsafe.Pointer(&_g_.sched.g)) = uintptr(unsafe.Pointer(_g_))
}
// The goroutine g is about to enter a system call.
_g_.syscallpc = pc
casgstatus(_g_, _Grunning, _Gsyscall)
if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp {
- systemstack(entersyscall_bad)
+ systemstack(func() {
+ print("entersyscall inconsistent ", hex(_g_.syscallsp), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n")
+ gothrow("entersyscall")
+ })
}
if atomicload(&sched.sysmonwait) != 0 { // TODO: fast atomic
reentersyscall(getcallerpc(unsafe.Pointer(&dummy)), getcallersp(unsafe.Pointer(&dummy)))
}
-func entersyscall_bad() {
- var gp *g
- gp = getg().m.curg
- print("entersyscall inconsistent ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
- gothrow("entersyscall")
-}
-
func entersyscall_sysmon() {
lock(&sched.lock)
if atomicload(&sched.sysmonwait) != 0 {
_g_.stackguard0 = stackPreempt // see comment in entersyscall
// Leave SP around for GC and traceback.
- save(getcallerpc(unsafe.Pointer(&dummy)), getcallersp(unsafe.Pointer(&dummy)))
+ pc := getcallerpc(unsafe.Pointer(&dummy))
+ sp := getcallersp(unsafe.Pointer(&dummy))
+ save(pc, sp)
_g_.syscallsp = _g_.sched.sp
_g_.syscallpc = _g_.sched.pc
+ if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp {
+ sp1 := sp
+ sp2 := _g_.sched.sp
+ sp3 := _g_.syscallsp
+ systemstack(func() {
+ print("entersyscallblock inconsistent ", hex(sp1), " ", hex(sp2), " ", hex(sp3), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n")
+ gothrow("entersyscallblock")
+ })
+ }
casgstatus(_g_, _Grunning, _Gsyscall)
if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp {
- systemstack(entersyscall_bad)
+ systemstack(func() {
+ print("entersyscallblock inconsistent ", hex(sp), " ", hex(_g_.sched.sp), " ", hex(_g_.syscallsp), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n")
+ gothrow("entersyscallblock")
+ })
}
systemstack(entersyscallblock_handoff)
// Freezetheworld sets stopwait but does not retake P's.
if sched.stopwait != 0 {
+ _g_.m.mcache = nil
_g_.m.p = nil
return false
}
}
// Try to get any other idle P.
+ _g_.m.mcache = nil
_g_.m.p = nil
if sched.pidle != nil {
var ok bool
}
// Change number of processors. The world is stopped, sched is locked.
+// gcworkbufs are not being modified by either the GC or
+// the write barrier code.
func procresize(new int32) {
old := gomaxprocs
if old < 0 || old > _MaxGomaxprocs || new <= 0 || new > _MaxGomaxprocs {
// Allocates a stack from the free pool. Must be called with
// stackpoolmu held.
-func stackpoolalloc(order uint8) *mlink {
+func stackpoolalloc(order uint8) gclinkptr {
list := &stackpool[order]
s := list.next
if s == list {
if s.ref != 0 {
gothrow("bad ref")
}
- if s.freelist != nil {
+ if s.freelist.ptr() != nil {
gothrow("bad freelist")
}
for i := uintptr(0); i < _StackCacheSize; i += _FixedStack << order {
- x := (*mlink)(unsafe.Pointer(uintptr(s.start)<<_PageShift + i))
- x.next = s.freelist
+ x := gclinkptr(uintptr(s.start)<<_PageShift + i)
+ x.ptr().next = s.freelist
s.freelist = x
}
mSpanList_Insert(list, s)
}
x := s.freelist
- if x == nil {
+ if x.ptr() == nil {
gothrow("span has no free stacks")
}
- s.freelist = x.next
+ s.freelist = x.ptr().next
s.ref++
- if s.freelist == nil {
+ if s.freelist.ptr() == nil {
// all stacks in s are allocated.
mSpanList_Remove(s)
}
}
// Adds stack x to the free pool. Must be called with stackpoolmu held.
-func stackpoolfree(x *mlink, order uint8) {
+func stackpoolfree(x gclinkptr, order uint8) {
s := mHeap_Lookup(&mheap_, (unsafe.Pointer)(x))
if s.state != _MSpanStack {
gothrow("freeing stack not in a stack span")
}
- if s.freelist == nil {
+ if s.freelist.ptr() == nil {
// s will now have a free stack
mSpanList_Insert(&stackpool[order], s)
}
- x.next = s.freelist
+ x.ptr().next = s.freelist
s.freelist = x
s.ref--
if s.ref == 0 {
// span is completely free - return to heap
mSpanList_Remove(s)
- s.freelist = nil
+ s.freelist = 0
mHeap_FreeStack(&mheap_, s)
}
}
// Grab some stacks from the global cache.
// Grab half of the allowed capacity (to prevent thrashing).
- var list *mlink
+ var list gclinkptr
var size uintptr
lock(&stackpoolmu)
for size < _StackCacheSize/2 {
x := stackpoolalloc(order)
- x.next = list
+ x.ptr().next = list
list = x
size += _FixedStack << order
}
size := c.stackcache[order].size
lock(&stackpoolmu)
for size > _StackCacheSize/2 {
- y := x.next
+ y := x.ptr().next
stackpoolfree(x, order)
x = y
size -= _FixedStack << order
lock(&stackpoolmu)
for order := uint8(0); order < _NumStackOrders; order++ {
x := c.stackcache[order].list
- for x != nil {
- y := x.next
+ for x.ptr() != nil {
+ y := x.ptr().next
stackpoolfree(x, order)
x = y
}
- c.stackcache[order].list = nil
+ c.stackcache[order].list = 0
c.stackcache[order].size = 0
}
unlock(&stackpoolmu)
order++
n2 >>= 1
}
- var x *mlink
+ var x gclinkptr
c := thisg.m.mcache
if c == nil || thisg.m.gcing != 0 || thisg.m.helpgc != 0 {
// c == nil can happen in the guts of exitsyscall or
unlock(&stackpoolmu)
} else {
x = c.stackcache[order].list
- if x == nil {
+ if x.ptr() == nil {
stackcacherefill(c, order)
x = c.stackcache[order].list
}
- c.stackcache[order].list = x.next
+ c.stackcache[order].list = x.ptr().next
c.stackcache[order].size -= uintptr(n)
}
v = (unsafe.Pointer)(x)
order++
n2 >>= 1
}
- x := (*mlink)(v)
+ x := gclinkptr(v)
c := gp.m.mcache
if c == nil || gp.m.gcing != 0 || gp.m.helpgc != 0 {
lock(&stackpoolmu)
if c.stackcache[order].size >= _StackCacheSize {
stackcacherelease(c, order)
}
- x.next = c.stackcache[order].list
+ x.ptr().next = c.stackcache[order].list
c.stackcache[order].list = x
c.stackcache[order].size += n
}
}
// Copies gp's stack to a new stack of a different size.
+// Caller must have changed gp status to Gcopystack.
func copystack(gp *g, newsize uintptr) {
if gp.syscallsp != 0 {
gothrow("stack growth not allowed in system call")
}
memmove(unsafe.Pointer(new.hi-used), unsafe.Pointer(old.hi-used), used)
- oldstatus := casgcopystack(gp) // cas from Gwaiting or Grunnable to Gcopystack, return old status
-
// Swap out old stack for new one
gp.stack = new
gp.stackguard0 = new.lo + _StackGuard // NOTE: might clobber a preempt request
gp.sched.sp = new.hi - used
- casgstatus(gp, _Gcopystack, oldstatus) // oldstatus is Gwaiting or Grunnable
-
// free old stack
if stackPoisonCopy != 0 {
fillstack(old, 0xfc)
gothrow("runtime: split stack overflow")
}
+ if gp.sched.ctxt != nil {
+ // morestack wrote sched.ctxt on its way in here,
+ // without a write barrier. Run the write barrier now.
+ // It is not possible to be preempted between then
+ // and now, so it's okay.
+ writebarrierptr_nostore((*uintptr)(unsafe.Pointer(&gp.sched.ctxt)), uintptr(gp.sched.ctxt))
+ }
+
if gp.stackguard0 == stackPreempt {
if gp == thisg.m.g0 {
gothrow("runtime: preempt g0")
gothrow("runtime: g is running but p is not")
}
if gp.preemptscan {
+ for !castogscanstatus(gp, _Gwaiting, _Gscanwaiting) {
+ // Likely to be racing with the GC as it sees a _Gwaiting and does the stack scan.
+ // If so this stack will be scanned twice which does not change correctness.
+ }
gcphasework(gp)
+ casfrom_Gscanstatus(gp, _Gscanwaiting, _Gwaiting)
casgstatus(gp, _Gwaiting, _Grunning)
gp.stackguard0 = gp.stack.lo + _StackGuard
gp.preempt = false
gothrow("stack overflow")
}
- oldstatus := readgstatus(gp)
- oldstatus &^= _Gscan
- casgstatus(gp, oldstatus, _Gcopystack) // oldstatus is Gwaiting or Grunnable
- // Note that the concurrent GC might be scanning the stack as we try to replace it.
- // copystack takes care of the appropriate coordination with the stack scanner.
++ casgstatus(gp, _Gwaiting, _Gcopystack)
+
+ // The concurrent GC will not scan the stack while we are doing the copy since
+ // the gp is in a Gcopystack status.
copystack(gp, uintptr(newsize))
if stackDebug >= 1 {
print("stack grow done\n")
}
- casgstatus(gp, _Gwaiting, _Grunning)
+ casgstatus(gp, _Gcopystack, _Grunning)
gogo(&gp.sched)
}
if gp.syscallsp != 0 {
return
}
-
- /* TODO
- if goos_windows && gp.m != nil && gp.m.libcallsp != 0 {
+ if goos_windows != 0 && gp.m != nil && gp.m.libcallsp != 0 {
return
}
- */
if stackDebug > 0 {
print("shrinking stack ", oldsize, "->", newsize, "\n")
}
- // This is being done in a Gscan state and was initiated by the GC so no need to move to
- // the Gcopystate.
- // The world is stopped, so the goroutine must be Gwaiting or Grunnable,
- // and what it is is not changing underfoot.
- oldstatus := readgstatus(gp) &^ _Gscan
- if oldstatus != _Gwaiting && oldstatus != _Grunnable {
- gothrow("status is not Gwaiting or Grunnable")
- }
- casgstatus(gp, oldstatus, _Gcopystack)
+
++ oldstatus := casgcopystack(gp)
copystack(gp, newsize)
+ casgstatus(gp, _Gcopystack, oldstatus)
}
// Do any delayed stack freeing that was queued up during GC.