varDesc[i] = v[1]
}
- // Check all of our alignemnts. This is the actual core of the test.
+ // Check all of our alignments. This is the actual core of the test.
for i, d := range runtime.AtomicFields {
if d%8 != 0 {
t.Errorf("field alignment of %s failed: offset is %d", fieldDesc[i], d)
// The implementation works in layers. At the bottom, arenas are managed in chunks.
// Each chunk must be a multiple of the heap arena size, or the heap arena size must
// be divisible by the arena chunks. The address space for each chunk, and each
-// corresponding heapArena for that addres space, are eternelly reserved for use as
+// corresponding heapArena for that address space, are eternally reserved for use as
// arena chunks. That is, they can never be used for the general heap. Each chunk
// is also represented by a single mspan, and is modeled as a single large heap
// allocation. It must be, because each chunk contains ordinary Go values that may
func init() {
if userArenaChunkPages*pageSize != userArenaChunkBytes {
- throw("user arena chunk size is not a mutliple of the page size")
+ throw("user arena chunk size is not a multiple of the page size")
}
if userArenaChunkBytes%physPageSize != 0 {
- throw("user arena chunk size is not a mutliple of the physical page size")
+ throw("user arena chunk size is not a multiple of the physical page size")
}
if userArenaChunkBytes < heapArenaBytes {
if heapArenaBytes%userArenaChunkBytes != 0 {
#define RARG3 CX
#endif
-// Called from intrumented code.
+// Called from instrumented code.
// func runtime·doasanread(addr unsafe.Pointer, sz, sp, pc uintptr)
TEXT runtime·doasanread(SB), NOSPLIT, $0-32
MOVQ addr+0(FP), RARG0
CLD // convention is D is always left cleared
- // Check GOAMD64 reqirements
+ // Check GOAMD64 requirements
// We need to do this after setting up TLS, so that
// we can report an error if there is a failure. See issue 49586.
#ifdef NEED_FEATURES_CX
PUSHQ BX
// Load the starting address of the return sled into BX.
MOVQ $ret_sled<>(SB), BX
- // Load the address of the i'th return instruction fron the return sled.
+ // Load the address of the i'th return instruction from the return sled.
// The index is given in the fakePC argument.
ADDQ R8, BX
PUSHQ BX
// Function arguments arg0 and arg1 are passed in the registers specified
// by the x64 calling convention.
JMP AX
-// This code will not be executed and is only there to statisfy assembler
+// This code will not be executed and is only there to satisfy assembler
// check of a balanced stack.
not_reachable:
POPQ BX
}
}
}
- // There are a few very limited cirumstances where we won't have a P here.
+ // There are a few very limited circumstances where we won't have a P here.
// It's OK to simply skip scavenging in these cases. Something else will notice
// and pick up the tab.
var now int64
}
// mheap_.lock. This level of indirection makes it possible
- // to test pageAlloc indepedently of the runtime allocator.
+ // to test pageAlloc independently of the runtime allocator.
mheapLock *mutex
// sysStat is the runtime memstat to update when new system
}
}
- // addrRangeToSumAddrRange is a convienience function that converts
+ // addrRangeToSumAddrRange is a convenience function that converts
// an address range r to the address range of the given summary level
// that stores the summaries for r.
addrRangeToSumAddrRange := func(level int, r addrRange) addrRange {
}
// Strategy: shrink all runs of zeros by max. If any runs of zero
- // remain, then we've identified a larger maxiumum zero run.
+ // remain, then we've identified a larger maximum zero run.
p := max // number of zeros we still need to shrink by.
k := uint(1) // current minimum length of runs of ones in x.
for {
// TODO: we set HasFunctions if all symbols from samples were symbolized (hasFuncs).
// Decide what to do about HasInlineFrames and HasLineNumbers.
// Also, another approach to handle the mapping entry with
- // incomplete symbolization results is to dupliace the mapping
+ // incomplete symbolization results is to duplicate the mapping
// entry (but with different Has* fields values) and use
// different entries for symbolized locations and unsymbolized locations.
if hasFuncs {
}
// fmtJSON returns a pretty-printed JSON form for x.
-// It works reasonbly well for printing protocol-buffer
+// It works reasonably well for printing protocol-buffer
// data structures like profile.Profile.
func fmtJSON(x any) string {
js, _ := json.MarshalIndent(x, "", "\t")
hi = 1
}
// Same behavior as for 1.17.
- // TODO: Simplify ths.
+ // TODO: Simplify this.
if goarch.BigEndian {
mp.fastrand = uint64(lo)<<32 | uint64(hi)
} else {
}
// saveAncestors copies previous ancestors of the given caller g and
-// includes infor for the current caller into a new set of tracebacks for
+// includes info for the current caller into a new set of tracebacks for
// a g being created.
func saveAncestors(callergp *g) *[]ancestorInfo {
// Copy all prior info, except for the root goroutine (goid 0).
// The starting function must return in the loader to
-// initialise some librairies, especially libthread which
+// initialise some libraries, especially libthread which
// creates the main thread and adds the TLS in R13
// R19 contains a function descriptor to the loader function
// which needs to be called.
//
// When loading via glibc, the first doubleword on the stack points
// to NULL a value. (that is *(uintptr)(R1) == 0). This is used to
- // differentiate static vs dynamicly linked binaries.
+ // differentiate static vs dynamically linked binaries.
//
// If loading with the musl loader, it doesn't follow the ELFv2 ABI. It
// passes argc/argv similar to the linux kernel, R13 (TLS) is
#include "textflag.h"
// If !iscgo, this is a no-op.
-// NOTE: gogo asumes load_g only clobers g (R30) and REGTMP (R23)
+// NOTE: gogo assumes load_g only clobers g (R30) and REGTMP (R23)
TEXT runtime·save_g(SB),NOSPLIT|NOFRAME,$0-0
MOVB runtime·iscgo(SB), R23
BEQ R23, nocgo
// of CPU samples, so we'll call that success.
overflowed := totalTraceSamples >= 1900
if traceSamples < pprofSamples {
- t.Logf("exectution trace did not include all CPU profile samples; %d in profile, %d in trace", pprofSamples, traceSamples)
+ t.Logf("execution trace did not include all CPU profile samples; %d in profile, %d in trace", pprofSamples, traceSamples)
if !overflowed {
t.Fail()
}