package runtime
import (
+ "internal/goexperiment"
"runtime/internal/atomic"
"unsafe"
)
g *g
parked bool
- nbgsweep uint32
- npausesweep uint32
-
// active tracks outstanding sweepers and the sweep
// termination condition.
active activeSweep
// instantly. If GC was forced before the concurrent sweep
// finished, there may be spans to sweep.
for sweepone() != ^uintptr(0) {
- sweep.npausesweep++
}
// Make sure there aren't any outstanding sweepers left.
const sweepBatchSize = 10
nSwept := 0
for sweepone() != ^uintptr(0) {
- sweep.nbgsweep++
nSwept++
if nSwept%sweepBatchSize == 0 {
goschedIfBusy()
throw("mspan.sweep: bad span state")
}
- if traceEnabled() {
- traceGCSweepSpan(s.npages * _PageSize)
+ trace := traceAcquire()
+ if trace.ok() {
+ trace.GCSweepSpan(s.npages * _PageSize)
+ traceRelease(trace)
}
mheap_.pagesSwept.Add(int64(s.npages))
} else {
mheap_.freeSpan(s)
}
+ if goexperiment.AllocHeaders && s.largeType != nil && s.largeType.Kind_&kindGCProg != 0 {
+ // In the allocheaders experiment, the unrolled GCProg bitmap is allocated separately.
+ // Free the space for the unrolled bitmap.
+ systemstack(func() {
+ s := spanOf(uintptr(unsafe.Pointer(s.largeType)))
+ mheap_.freeManual(s, spanAllocPtrScalarBits)
+ })
+ s.largeType = nil
+ }
// Count the free in the consistent, external stats.
stats := memstats.heapStats.acquire()
return
}
- if traceEnabled() {
- traceGCSweepStart()
+ trace := traceAcquire()
+ if trace.ok() {
+ trace.GCSweepStart()
+ traceRelease(trace)
}
// Fix debt if necessary.
}
}
- if traceEnabled() {
- traceGCSweepDone()
+ trace = traceAcquire()
+ if trace.ok() {
+ trace.GCSweepDone()
+ traceRelease(trace)
}
}