}
func dumproots() {
+ // To protect mheap_.allspans.
+ assertWorldStopped()
+
// TODO(mwhudson): dump datamask etc from all objects
// data segment
dumpint(tagData)
var freemark [_PageSize / 8]bool
func dumpobjs() {
+ // To protect mheap_.allspans.
+ assertWorldStopped()
+
for _, s := range mheap_.allspans {
if s.state.get() != mSpanInUse {
continue
//go:systemstack
func dumpmemstats(m *MemStats) {
+ assertWorldStopped()
+
// These ints should be identical to the exported
// MemStats structure and should be ordered the same
// way too.
}
func dumpmemprof() {
+ // To protect mheap_.allspans.
+ assertWorldStopped()
+
iterate_memprof(dumpmemprof_callback)
for _, s := range mheap_.allspans {
if s.state.get() != mSpanInUse {
var dumphdr = []byte("go1.7 heap dump\n")
func mdump(m *MemStats) {
+ assertWorldStopped()
+
// make sure we're done sweeping
for _, s := range mheap_.allspans {
if s.state.get() == mSpanInUse {
}
func writeheapdump_m(fd uintptr, m *MemStats) {
+ assertWorldStopped()
+
_g_ := getg()
casgstatus(_g_.m.curg, _Grunning, _Gwaiting)
_g_.waitreason = waitReasonDumpingHeap
//go:nosplit
func assertRankHeld(r lockRank) {
}
+
+//go:nosplit
+func worldStopped() {
+}
+
+//go:nosplit
+func worldStarted() {
+}
+
+//go:nosplit
+func assertWorldStopped() {
+}
+
+//go:nosplit
+func assertWorldStoppedOrLockHeld(l *mutex) {
+}
package runtime
import (
+ "runtime/internal/atomic"
"unsafe"
)
+// worldIsStopped is accessed atomically to track world-stops. 1 == world
+// stopped.
+var worldIsStopped uint32
+
// lockRankStruct is embedded in mutex
type lockRankStruct struct {
// static lock ranking of the lock
throw("not holding required lock!")
})
}
+
+// worldStopped notes that the world is stopped.
+//
+// Caller must hold worldsema.
+//
+// nosplit to ensure it can be called in as many contexts as possible.
+//go:nosplit
+func worldStopped() {
+ if stopped := atomic.Xadd(&worldIsStopped, 1); stopped != 1 {
+ print("world stop count=", stopped, "\n")
+ throw("recursive world stop")
+ }
+}
+
+// worldStarted that the world is starting.
+//
+// Caller must hold worldsema.
+//
+// nosplit to ensure it can be called in as many contexts as possible.
+//go:nosplit
+func worldStarted() {
+ if stopped := atomic.Xadd(&worldIsStopped, -1); stopped != 0 {
+ print("world stop count=", stopped, "\n")
+ throw("released non-stopped world stop")
+ }
+}
+
+// nosplit to ensure it can be called in as many contexts as possible.
+//go:nosplit
+func checkWorldStopped() bool {
+ stopped := atomic.Load(&worldIsStopped)
+ if stopped > 1 {
+ print("inconsistent world stop count=", stopped, "\n")
+ throw("inconsistent world stop count")
+ }
+
+ return stopped == 1
+}
+
+// assertWorldStopped throws if the world is not stopped. It does not check
+// which M stopped the world.
+//
+// nosplit to ensure it can be called in as many contexts as possible.
+//go:nosplit
+func assertWorldStopped() {
+ if checkWorldStopped() {
+ return
+ }
+
+ throw("world not stopped")
+}
+
+// assertWorldStoppedOrLockHeld throws if the world is not stopped and the
+// passed lock is not held.
+//
+// nosplit to ensure it can be called in as many contexts as possible.
+//go:nosplit
+func assertWorldStoppedOrLockHeld(l *mutex) {
+ if checkWorldStopped() {
+ return
+ }
+
+ gp := getg()
+ systemstack(func() {
+ held := checkLockHeld(gp, l)
+ if !held {
+ printlock()
+ print("caller requires world stop or lock ", l, " (rank ", l.rank.String(), "), holding:\n")
+ println("<no world stop>")
+ printHeldLocks(gp)
+ throw("no world stop or required lock!")
+ }
+ })
+}
//
// The world must be stopped.
func startCheckmarks() {
+ assertWorldStopped()
+
// Clear all checkmarks.
for _, ai := range mheap_.allArenas {
arena := mheap_.arenas[ai.l1()][ai.l2()]
//
//go:systemstack
func gcSweep(mode gcMode) {
+ assertWorldStopped()
+
if gcphase != _GCoff {
throw("gcSweep being done but phase is not GCoff")
}
//
// The world must be stopped.
func gcMarkRootPrepare() {
+ assertWorldStopped()
+
work.nFlushCacheRoots = 0
// Compute how many data and BSS root blocks there are.
//
// The world must be stopped.
func gcMarkTinyAllocs() {
+ assertWorldStopped()
+
for _, p := range allp {
c := p.mcache
if c == nil || c.tiny == 0 {
//
//go:nowritebarrier
func finishsweep_m() {
+ assertWorldStopped()
+
// Sweeping must be complete before marking commences, so
// sweep any unswept spans. If this is a concurrent GC, there
// shouldn't be any spans left to sweep, so this should finish
//
//go:nowritebarrier
func updatememstats() {
+ assertWorldStopped()
+
// Flush mcaches to mcentral before doing anything else.
//
// Flushing to the mcentral may in general cause stats to
//
//go:nowritebarrier
func flushmcache(i int) {
+ assertWorldStopped()
+
p := allp[i]
c := p.mcache
if c == nil {
//
//go:nowritebarrier
func flushallmcaches() {
+ assertWorldStopped()
+
for i := 0; i < int(gomaxprocs); i++ {
flushmcache(i)
}
// unsafeRead aggregates the delta for this shard into out.
//
// Unsafe because it does so without any synchronization. The
-// only safe time to call this is if the world is stopped or
-// we're freezing the world or going down anyway (and we just
-// want _some_ estimate).
+// world must be stopped.
func (m *consistentHeapStats) unsafeRead(out *heapStatsDelta) {
+ assertWorldStopped()
+
for i := range m.stats {
out.merge(&m.stats[i])
}
// Unsafe because the world must be stopped and values should
// be donated elsewhere before clearing.
func (m *consistentHeapStats) unsafeClear() {
+ assertWorldStopped()
+
for i := range m.stats {
m.stats[i] = heapStatsDelta{}
}
sched.maxmcount = 10000
+ // The world starts stopped.
+ worldStopped()
+
moduledataverify()
stackinit()
mallocinit()
}
unlock(&sched.lock)
+ // World is effectively started now, as P's can run.
+ worldStarted()
+
// For cgocheck > 1, we turn on the write barrier at all times
// and check all pointer writes. We can't do this until after
// procresize because the write barrier needs a P.
if bad != "" {
throw(bad)
}
+
+ worldStopped()
}
func startTheWorldWithSema(emitTraceEvent bool) int64 {
+ assertWorldStopped()
+
mp := acquirem() // disable preemption because it can be holding p in a local var
if netpollinited() {
list := netpoll(0) // non-blocking
}
unlock(&sched.lock)
+ worldStarted()
+
for p1 != nil {
p := p1
p1 = p1.link.ptr()
// sched.lock must be held and the world must be stopped.
func (pp *p) destroy() {
assertLockHeld(&sched.lock)
+ assertWorldStopped()
// Move all runnable goroutines to the global queue
for pp.runqhead != pp.runqtail {
// Returns list of Ps with local work, they need to be scheduled by the caller.
func procresize(nprocs int32) *p {
assertLockHeld(&sched.lock)
+ assertWorldStopped()
old := gomaxprocs
if old < 0 || nprocs <= 0 {