const (
debugMalloc = false
- flagNoScan = _FlagNoScan
- flagNoZero = _FlagNoZero
-
maxTinySize = _TinySize
tinySizeClass = _TinySizeClass
maxSmallSize = _MaxSmallSize
// base address for all 0-byte allocations
var zerobase uintptr
-const (
- // flags to malloc
- _FlagNoScan = 1 << 0 // GC doesn't have to scan object
- _FlagNoZero = 1 << 1 // don't zero memory
-)
-
// nextFreeFast returns the next free object if one is quickly available.
// Otherwise it returns 0.
func (c *mcache) nextFreeFast(sizeclass int8) gclinkptr {
// Allocate an object of size bytes.
// Small objects are allocated from the per-P cache's free lists.
// Large objects (> 32 kB) are allocated straight from the heap.
-func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer {
+func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
if gcphase == _GCmarktermination {
throw("mallocgc called with gcphase == _GCmarktermination")
}
return unsafe.Pointer(&zerobase)
}
- if flags&flagNoScan == 0 && typ == nil {
- throw("malloc missing type")
- }
-
if debug.sbrk != 0 {
align := uintptr(16)
if typ != nil {
dataSize := size
c := gomcache()
var x unsafe.Pointer
+ noscan := typ == nil || typ.kind&kindNoPointers != 0
if size <= maxSmallSize {
- if flags&flagNoScan != 0 && size < maxTinySize {
+ if noscan && size < maxTinySize {
// Tiny allocator.
//
// Tiny allocator combines several tiny allocation requests
// into a single memory block. The resulting memory block
// is freed when all subobjects are unreachable. The subobjects
- // must be FlagNoScan (don't have pointers), this ensures that
+ // must be noscan (don't have pointers), this ensures that
// the amount of potentially wasted memory is bounded.
//
// Size of the memory block used for combining (maxTinySize) is tunable.
v, shouldhelpgc = c.nextFree(sizeclass)
}
x = unsafe.Pointer(v)
- if flags&flagNoZero == 0 {
+ if needzero {
memclr(unsafe.Pointer(v), size)
// TODO:(rlh) Only clear if object is not known to be zeroed.
}
var s *mspan
shouldhelpgc = true
systemstack(func() {
- s = largeAlloc(size, flags)
+ s = largeAlloc(size, needzero)
})
s.freeindex = 1
x = unsafe.Pointer(s.base())
size = s.elemsize
}
- if flags&flagNoScan != 0 {
+ var scanSize uintptr
+ if noscan {
heapBitsSetTypeNoScan(uintptr(x), size)
} else {
// If allocating a defer+arg block, now that we've picked a malloc size
// pointers, GC has to scan to the last
// element.
if typ.ptrdata != 0 {
- c.local_scan += dataSize - typ.size + typ.ptrdata
+ scanSize = dataSize - typ.size + typ.ptrdata
}
} else {
- c.local_scan += typ.ptrdata
+ scanSize = typ.ptrdata
}
+ c.local_scan += scanSize
// Ensure that the stores above that initialize x to
// type-safe memory and set the heap bits occur before
publicationBarrier()
}
- // GCmarkterminate allocates black
+ // Allocate black during GC.
// All slots hold nil so no scanning is needed.
// This may be racing with GC so do it atomically if there can be
// a race marking the bit.
- if gcphase == _GCmarktermination || gcBlackenPromptly {
- systemstack(func() {
- gcmarknewobject_m(uintptr(x), size)
- })
+ if gcphase != _GCoff {
+ gcmarknewobject(uintptr(x), size, scanSize)
}
// The object x is about to be reused but tracefree and msanfree
return x
}
-func largeAlloc(size uintptr, flag uint32) *mspan {
+func largeAlloc(size uintptr, needzero bool) *mspan {
// print("largeAlloc size=", size, "\n")
if size+_PageSize < size {
// pays the debt down to npage pages.
deductSweepCredit(npages*_PageSize, npages)
- s := mheap_.alloc(npages, 0, true, flag&_FlagNoZero == 0)
+ s := mheap_.alloc(npages, 0, true, needzero)
if s == nil {
throw("out of memory")
}
// implementation of new builtin
func newobject(typ *_type) unsafe.Pointer {
- flags := uint32(0)
- if typ.kind&kindNoPointers != 0 {
- flags |= flagNoScan
- }
- return mallocgc(typ.size, typ, flags)
+ return mallocgc(typ.size, typ, true)
}
//go:linkname reflect_unsafe_New reflect.unsafe_New
return newobject(typ)
}
-// implementation of make builtin for slices
-func newarray(typ *_type, n uintptr) unsafe.Pointer {
- flags := uint32(0)
- if typ.kind&kindNoPointers != 0 {
- flags |= flagNoScan
+// newarray allocates an array of n elements of type typ.
+func newarray(typ *_type, n int) unsafe.Pointer {
+ if n < 0 || uintptr(n) > maxSliceCap(typ.size) {
+ panic(plainError("runtime: allocation size out of range"))
}
- if int(n) < 0 || (typ.size > 0 && n > _MaxMem/typ.size) {
- panic("runtime: allocation size out of range")
- }
- return mallocgc(typ.size*n, typ, flags)
+ return mallocgc(typ.size*uintptr(n), typ, true)
}
//go:linkname reflect_unsafe_NewArray reflect.unsafe_NewArray
-func reflect_unsafe_NewArray(typ *_type, n uintptr) unsafe.Pointer {
+func reflect_unsafe_NewArray(typ *_type, n int) unsafe.Pointer {
return newarray(typ, n)
}
-// rawmem returns a chunk of pointerless memory. It is
-// not zeroed.
-func rawmem(size uintptr) unsafe.Pointer {
- return mallocgc(size, nil, flagNoScan|flagNoZero)
-}
-
func profilealloc(mp *m, x unsafe.Pointer, size uintptr) {
mp.mcache.next_sample = nextSample()
mProf_Malloc(x, size)