}
// parsePAX parses PAX headers.
-// If an extended header (type 'x') is invalid, ErrHeader is returned
+// If an extended header (type 'x') is invalid, ErrHeader is returned.
func parsePAX(r io.Reader) (map[string]string, error) {
buf, err := readSpecialFile(r)
if err != nil {
const aMCR = arm.ALAST + 1
// IsARMMRC reports whether the op (as defined by an arm.A* constant) is
-// MRC or MCR
+// MRC or MCR.
func IsARMMRC(op obj.As) bool {
switch op {
case arm.AMRC, aMCR: // Note: aMCR is defined in this package.
}
}
-// base strips away qualifiers and typedefs to get the underlying type
+// base strips away qualifiers and typedefs to get the underlying type.
func base(dt dwarf.Type) dwarf.Type {
for {
if d, ok := dt.(*dwarf.QualType); ok {
spillOffset int64 // current spill offset
}
-// align returns a rounded up to t's alignment
+// align returns a rounded up to t's alignment.
func align(a int64, t *types.Type) int64 {
return alignTo(a, int(uint8(t.Alignment())))
}
panic("bad store type")
}
-// shift type is used as Offset in obj.TYPE_SHIFT operands to encode shifted register operands
+// shift type is used as Offset in obj.TYPE_SHIFT operands to encode shifted register operands.
type shift int64
// copied from ../../../internal/obj/util.go:/TYPE_SHIFT
}
}
-// makeshift encodes a register shifted by a constant
+// makeshift encodes a register shifted by a constant.
func makeshift(v *ssa.Value, reg int16, typ int64, s int64) shift {
if s < 0 || s >= 32 {
v.Fatalf("shift out of range: %d", s)
return shift(int64(reg&0xf) | typ | (s&31)<<7)
}
-// genshift generates a Prog for r = r0 op (r1 shifted by n)
+// genshift generates a Prog for r = r0 op (r1 shifted by n).
func genshift(s *ssagen.State, v *ssa.Value, as obj.As, r0, r1, r int16, typ int64, n int64) *obj.Prog {
p := s.Prog(as)
p.From.Type = obj.TYPE_SHIFT
return p
}
-// makeregshift encodes a register shifted by a register
+// makeregshift encodes a register shifted by a register.
func makeregshift(r1 int16, typ int64, r2 int16) shift {
return shift(int64(r1&0xf) | typ | int64(r2&0xf)<<8 | 1<<4)
}
-// genregshift generates a Prog for r = r0 op (r1 shifted by r2)
+// genregshift generates a Prog for r = r0 op (r1 shifted by r2).
func genregshift(s *ssagen.State, as obj.As, r0, r1, r2, r int16, typ int64) *obj.Prog {
p := s.Prog(as)
p.From.Type = obj.TYPE_SHIFT
ssa.BlockARMGEnoov: {arm.ABPL, arm.ABMI},
}
-// To model a 'LEnoov' ('<=' without overflow checking) branching
+// To model a 'LEnoov' ('<=' without overflow checking) branching.
var leJumps = [2][2]ssagen.IndexJump{
{{Jump: arm.ABEQ, Index: 0}, {Jump: arm.ABPL, Index: 1}}, // next == b.Succs[0]
{{Jump: arm.ABMI, Index: 0}, {Jump: arm.ABEQ, Index: 0}}, // next == b.Succs[1]
}
-// To model a 'GTnoov' ('>' without overflow checking) branching
+// To model a 'GTnoov' ('>' without overflow checking) branching.
var gtJumps = [2][2]ssagen.IndexJump{
{{Jump: arm.ABMI, Index: 1}, {Jump: arm.ABEQ, Index: 1}}, // next == b.Succs[0]
{{Jump: arm.ABEQ, Index: 1}, {Jump: arm.ABPL, Index: 0}}, // next == b.Succs[1]
panic("bad store type")
}
-// makeshift encodes a register shifted by a constant, used as an Offset in Prog
+// makeshift encodes a register shifted by a constant, used as an Offset in Prog.
func makeshift(v *ssa.Value, reg int16, typ int64, s int64) int64 {
if s < 0 || s >= 64 {
v.Fatalf("shift out of range: %d", s)
return int64(reg&31)<<16 | typ | (s&63)<<10
}
-// genshift generates a Prog for r = r0 op (r1 shifted by n)
+// genshift generates a Prog for r = r0 op (r1 shifted by n).
func genshift(s *ssagen.State, v *ssa.Value, as obj.As, r0, r1, r int16, typ int64, n int64) *obj.Prog {
p := s.Prog(as)
p.From.Type = obj.TYPE_SHIFT
ssa.BlockARM64GEnoov: {arm64.ABPL, arm64.ABMI},
}
-// To model a 'LEnoov' ('<=' without overflow checking) branching
+// To model a 'LEnoov' ('<=' without overflow checking) branching.
var leJumps = [2][2]ssagen.IndexJump{
{{Jump: arm64.ABEQ, Index: 0}, {Jump: arm64.ABPL, Index: 1}}, // next == b.Succs[0]
{{Jump: arm64.ABMI, Index: 0}, {Jump: arm64.ABEQ, Index: 0}}, // next == b.Succs[1]
}
-// To model a 'GTnoov' ('>' without overflow checking) branching
+// To model a 'GTnoov' ('>' without overflow checking) branching.
var gtJumps = [2][2]ssagen.IndexJump{
{{Jump: arm64.ABMI, Index: 1}, {Jump: arm64.ABEQ, Index: 1}}, // next == b.Succs[0]
{{Jump: arm64.ABEQ, Index: 1}, {Jump: arm64.ABPL, Index: 0}}, // next == b.Succs[1]
return numErrors
}
-// SyntaxErrors returns the number of syntax errors reported
+// SyntaxErrors returns the number of syntax errors reported.
func SyntaxErrors() int {
return numSyntaxErrors
}
return s
}
-// parseLeaks parses a binary string representing a leaks
+// parseLeaks parses a binary string representing a leaks.
func parseLeaks(s string) leaks {
var l leaks
if !strings.HasPrefix(s, "esc:") {
}
// ClosureDebugRuntimeCheck applies boilerplate checks for debug flags
-// and compiling runtime
+// and compiling runtime.
func ClosureDebugRuntimeCheck(clo *ClosureExpr) {
if base.Debug.Closure > 0 {
if clo.Esc() == EscHeap {
return -1, 0
}
-// affectedVar returns the *ir.Name node affected by v
+// affectedVar returns the *ir.Name node affected by v.
func affectedVar(v *ssa.Value) (*ir.Name, ssa.SymEffect) {
// Special cases.
switch v.Op {
// checkLogPath does superficial early checking of the string specifying
// the directory to which optimizer logging is directed, and if
-// it passes the test, stores the string in LO_dir
+// it passes the test, stores the string in LO_dir.
func checkLogPath(destination string) string {
path, complaint := parseLogPath(destination)
if complaint != "" {
// LogOpt logs information about a (usually missed) optimization performed by the compiler.
// Pos is the source position (including inlining), what is the message, pass is which pass created the message,
-// funcName is the name of the function
+// funcName is the name of the function.
func LogOpt(pos src.XPos, what, pass, funcName string, args ...interface{}) {
if Format == None {
return
End: Position{p.Line(), p.Col()}}
}
-// newLocation returns the Location for the compiler source location p
+// newLocation returns the Location for the compiler source location p.
func newLocation(p src.Pos) Location {
loc := Location{URI: uriIfy(uprootedPath(p.Filename())), Range: newPointRange(p)}
return loc
}
-// appendInlinedPos extracts inlining information from posTmp and append it to diagnostic
+// appendInlinedPos extracts inlining information from posTmp and append it to diagnostic.
func appendInlinedPos(posTmp []src.Pos, diagnostic *Diagnostic) {
for i := 1; i < len(posTmp); i++ {
p := posTmp[i]
"cmd/internal/obj/loong64"
)
-// isFPreg reports whether r is an FP register
+// isFPreg reports whether r is an FP register.
func isFPreg(r int16) bool {
return loong64.REG_F0 <= r && r <= loong64.REG_F31
}
"cmd/internal/obj/mips"
)
-// isFPreg reports whether r is an FP register
+// isFPreg reports whether r is an FP register.
func isFPreg(r int16) bool {
return mips.REG_F0 <= r && r <= mips.REG_F31
}
-// isHILO reports whether r is HI or LO register
+// isHILO reports whether r is HI or LO register.
func isHILO(r int16) bool {
return r == mips.REG_HI || r == mips.REG_LO
}
"cmd/internal/obj/mips"
)
-// isFPreg reports whether r is an FP register
+// isFPreg reports whether r is an FP register.
func isFPreg(r int16) bool {
return mips.REG_F0 <= r && r <= mips.REG_F31
}
-// isHILO reports whether r is HI or LO register
+// isHILO reports whether r is HI or LO register.
func isHILO(r int16) bool {
return r == mips.REG_HI || r == mips.REG_LO
}
return n
}
-// compressOrig is the "simple" compress function from LT paper
+// compressOrig is the "simple" compress function from LT paper.
func compressOrig(v ID, ancestor, semi, label []ID) {
if ancestor[ancestor[v]] != 0 {
compressOrig(ancestor[v], ancestor, semi, label)
}
}
-// evalOrig is the "simple" eval function from LT paper
+// evalOrig is the "simple" eval function from LT paper.
func evalOrig(v ID, ancestor, semi, label []ID) ID {
if ancestor[v] == 0 {
return v
}
}
-// findLastMems maps block ids to last memory-output op in a block, if any
+// findLastMems maps block ids to last memory-output op in a block, if any.
func findLastMems(f *Func) []*Value {
var stores []*Value
package ssa
-// convert to machine-dependent ops
+// convert to machine-dependent ops.
func lower(f *Func) {
// repeat rewrites until we find no more rewrites
applyRewrite(f, f.Config.lowerBlock, f.Config.lowerValue, removeDeadValues)
return a.TypeOfArg(which).Size()
}
-// NResults returns the number of results
+// NResults returns the number of results.
func (a *AuxCall) NResults() int64 {
return int64(len(a.abiInfo.OutParams()))
}
func (*AuxCall) CanBeAnSSAAux() {}
-// OwnAuxCall returns a function's own AuxCall
+// OwnAuxCall returns a function's own AuxCall.
func OwnAuxCall(fn *obj.LSym, paramResultInfo *abi.ABIParamResultInfo) *AuxCall {
// TODO if this remains identical to ClosureAuxCall above after new ABI is done, should deduplicate.
var reg *regInfo
package ssa
-// machine-independent optimization
+// machine-independent optimization.
func opt(f *Func) {
applyRewrite(f, rewriteBlockgeneric, rewriteValuegeneric, removeDeadValues)
}
}
// isCleanExt reports whether v is the result of a value-preserving
-// sign or zero extension
+// sign or zero extension.
func isCleanExt(v *Value) bool {
switch v.Op {
case OpSignExt8to16, OpSignExt8to32, OpSignExt8to64,
return true
}
-// isSameCall reports whether sym is the same as the given named symbol
+// isSameCall reports whether sym is the same as the given named symbol.
func isSameCall(sym interface{}, name string) bool {
fn := sym.(*AuxCall).Fn
return fn != nil && fn.String() == name
}
-// canLoadUnaligned reports if the architecture supports unaligned load operations
+// canLoadUnaligned reports if the architecture supports unaligned load operations.
func canLoadUnaligned(c *Config) bool {
return c.ctxt.Arch.Alignment == 1
}
}
// de-virtualize an InterLECall
-// 'sym' is the symbol for the itab
+// 'sym' is the symbol for the itab.
func devirtLESym(v *Value, aux Aux, sym Sym, offset int64) *obj.LSym {
n, ok := sym.(*obj.LSym)
if !ok {
return false
}
-// moveSize returns the number of bytes an aligned MOV instruction moves
+// moveSize returns the number of bytes an aligned MOV instruction moves.
func moveSize(align int64, c *Config) int64 {
switch {
case align%8 == 0 && c.PtrSize == 8:
return true
}
-// for a pseudo-op like (LessThan x), extract x
+// for a pseudo-op like (LessThan x), extract x.
func flagArg(v *Value) *Value {
if len(v.Args) != 1 || !v.Args[0].Type.IsFlags() {
return nil
}
}
-// check if an immediate can be directly encoded into an ARM's instruction
+// check if an immediate can be directly encoded into an ARM's instruction.
func isARMImmRot(v uint32) bool {
for i := 0; i < 16; i++ {
if v&^0xff == 0 {
return false
}
-// zeroUpper48Bits is similar to zeroUpper32Bits, but for upper 48 bits
+// zeroUpper48Bits is similar to zeroUpper32Bits, but for upper 48 bits.
func zeroUpper48Bits(x *Value, depth int) bool {
switch x.Op {
case OpAMD64MOVWQZX, OpAMD64MOVWload, OpAMD64MOVWloadidx1, OpAMD64MOVWloadidx2:
return false
}
-// zeroUpper56Bits is similar to zeroUpper32Bits, but for upper 56 bits
+// zeroUpper56Bits is similar to zeroUpper32Bits, but for upper 56 bits.
func zeroUpper56Bits(x *Value, depth int) bool {
switch x.Op {
case OpAMD64MOVBQZX, OpAMD64MOVBload, OpAMD64MOVBloadidx1:
return int64(me) | int64(mb<<8) | int64(rotate<<16) | int64(nbits<<24)
}
-// The inverse operation of encodePPC64RotateMask. The values returned as
+// DecodePPC64RotateMask is the inverse operation of encodePPC64RotateMask. The values returned as
// mb and me satisfy the POWER ISA definition of MASK(x,y) where MASK(mb,me) = mask.
func DecodePPC64RotateMask(sauxint int64) (rotate, mb, me int64, mask uint64) {
auxint := uint64(sauxint)
return shiftedMask != 0 && isPowerOfTwo64(shiftedMask+1) && nto(shiftedMask)+lsb < 64
}
-// returns the bitfield width of mask >> rshift for arm64 bitfield ops
+// returns the bitfield width of mask >> rshift for arm64 bitfield ops.
func arm64BFWidth(mask, rshift int64) int64 {
shiftedMask := int64(uint64(mask) >> uint64(rshift))
if shiftedMask == 0 {
}
// newSparseSet returns a sparseSet that can represent
-// integers between 0 and n-1
+// integers between 0 and n-1.
func newSparseSet(n int) *sparseSet {
return &sparseSet{dense: nil, sparse: make([]int32, n)}
}
// such as whether one block dominates another.
type SparseTree []SparseTreeNode
-// newSparseTree creates a SparseTree from a block-to-parent map (array indexed by Block.ID)
+// newSparseTree creates a SparseTree from a block-to-parent map (array indexed by Block.ID).
func newSparseTree(f *Func, parentOf []*Block) SparseTree {
t := make(SparseTree, f.NumBlocks())
for _, b := range f.Blocks {
return t[x.ID].parent
}
-// isAncestorEq reports whether x is an ancestor of or equal to y.
+// IsAncestorEq reports whether x is an ancestor of or equal to y.
func (t SparseTree) IsAncestorEq(x, y *Block) bool {
if x == y {
return true
}
// emptyBlock reports whether the block does not contain actual
-// instructions
+// instructions.
func emptyBlock(b *Block) bool {
for _, v := range b.Values {
if v.Op != OpPhi {
// trimmableBlock reports whether the block can be trimmed from the CFG,
// subject to the following criteria:
-// - it should not be the first block
-// - it should be BlockPlain
-// - it should not loop back to itself
+// - it should not be the first block.
+// - it should be BlockPlain.
+// - it should not loop back to itself.
// - it either is the single predecessor of the successor block or
-// contains no actual instructions
+// contains no actual instructions.
func trimmableBlock(b *Block) bool {
if b.Kind != BlockPlain || b == b.Func.Entry {
return false
return b.NewValue1I(pos, OpSelectN, types.TypeMem, 0, call)
}
-// round to a multiple of r, r is a power of 2
+// round to a multiple of r, r is a power of 2.
func round(o int64, r int64) int64 {
return (o + r - 1) &^ (r - 1)
}
}
// newNowritebarrierrecChecker creates a nowritebarrierrecChecker. It
-// must be called before walk
+// must be called before walk.
func newNowritebarrierrecChecker() *nowritebarrierrecChecker {
c := &nowritebarrierrecChecker{
extraCalls: make(map[*ir.Func][]nowritebarrierrecCall),
}
// newSparseSet returns a sparseSet that can represent
-// integers between 0 and n-1
+// integers between 0 and n-1.
func newSparseSet(n int) *sparseSet {
return &sparseSet{dense: nil, sparse: make([]int32, n)}
}
return a
}
-// dvarint writes a varint v to the funcdata in symbol x and returns the new offset
+// dvarint writes a varint v to the funcdata in symbol x and returns the new offset.
func dvarint(x *obj.LSym, off int, v int64) int {
if v < 0 || v > 1e9 {
panic(fmt.Sprintf("dvarint: bad offset for funcdata - %v", v))
return TempAt(base.Pos, ir.CurFunc, t)
}
-// make a new Node off the books
+// make a new Node off the books.
func TempAt(pos src.XPos, curfn *ir.Func, t *types.Type) *ir.Name {
if curfn == nil {
base.Fatalf("no curfn for TempAt")
)
// importalias declares symbol s as an imported type alias with type t.
-// ipkg is the package being imported
+// ipkg is the package being imported.
func importalias(pos src.XPos, s *types.Sym, t *types.Type) *ir.Name {
return importobj(pos, s, ir.OTYPE, ir.PEXTERN, t)
}
// importconst declares symbol s as an imported constant with type t and value val.
-// ipkg is the package being imported
+// ipkg is the package being imported.
func importconst(pos src.XPos, s *types.Sym, t *types.Type, val constant.Value) *ir.Name {
n := importobj(pos, s, ir.OLITERAL, ir.PEXTERN, t)
n.SetVal(val)
}
// importfunc declares symbol s as an imported function with type t.
-// ipkg is the package being imported
+// ipkg is the package being imported.
func importfunc(pos src.XPos, s *types.Sym, t *types.Type) *ir.Name {
n := importobj(pos, s, ir.ONAME, ir.PFUNC, t)
n.Func = ir.NewFunc(pos)
}
// importobj declares symbol s as an imported object representable by op.
-// ipkg is the package being imported
+// ipkg is the package being imported.
func importobj(pos src.XPos, s *types.Sym, op ir.Op, ctxt ir.Class, t *types.Type) *ir.Name {
n := importsym(pos, s, op, ctxt)
n.SetType(t)
// importtype returns the named type declared by symbol s.
// If no such type has been declared yet, a forward declaration is returned.
-// ipkg is the package being imported
+// ipkg is the package being imported.
func importtype(pos src.XPos, s *types.Sym) *ir.Name {
n := importsym(pos, s, ir.OTYPE, ir.PEXTERN)
n.SetType(types.NewNamed(n))
}
// importvar declares symbol s as an imported variable with type t.
-// ipkg is the package being imported
+// ipkg is the package being imported.
func importvar(pos src.XPos, s *types.Sym, t *types.Type) *ir.Name {
return importobj(pos, s, ir.ONAME, ir.PEXTERN, t)
}
return fmt.Sprintf("(%s)", strings.Join(typeStrings, ", "))
}
-// type check composite
+// type check composite.
func fielddup(name string, hash map[string]bool) {
if hash[name] {
base.Errorf("duplicate field name in struct literal: %s", name)
return n
}
-// generate code for print
+// generate code for print.
func walkPrint(nn *ir.CallExpr, init *ir.Nodes) ir.Node {
// Hoist all the argument evaluation up before the lock.
walkExprListCheap(nn.Args, init)
// expression or simple statement.
// the types expressions are calculated.
// compile-time constants are evaluated.
-// complex side effects like statements are appended to init
+// complex side effects like statements are appended to init.
func walkExprList(s []ir.Node, init *ir.Nodes) {
for i := range s {
s[i] = walkExpr(s[i], init)
return n
}
-// return 1 if integer n must be in range [0, max), 0 otherwise
+// return 1 if integer n must be in range [0, max), 0 otherwise.
func bounded(n ir.Node, max int64) bool {
if n.Type() == nil || !n.Type().IsInteger() {
return false
fmt.Fprintf(os.Stderr, format, args...)
}
-// xsamefile reports whether f1 and f2 are the same file (or dir)
+// xsamefile reports whether f1 and f2 are the same file (or dir).
func xsamefile(f1, f2 string) bool {
fi1, err1 := os.Stat(f1)
fi2, err2 := os.Stat(f2)
}
// Reset resets globals in the modfetch package, so previous loads don't affect
-// contents of go.sum files
+// contents of go.sum files.
func Reset() {
GoSumFile = ""
WorkspaceGoSumFiles = nil
return !live
}
-// Slot -1: return top-level inlines
-// Slot >= 0: return children of that slot
+// Slot -1: return top-level inlines.
+// Slot >= 0: return children of that slot.
func inlChildren(slot int, calls *InlCalls) []int {
var kids []int
if slot != -1 {
return sequenceOfOnes(x) || sequenceOfOnes(^x)
}
-// sequenceOfOnes tests whether a constant is a sequence of ones in binary, with leading and trailing zeros
+// sequenceOfOnes tests whether a constant is a sequence of ones in binary, with leading and trailing zeros.
func sequenceOfOnes(x uint64) bool {
y := x & -x // lowest set bit of x. x is good iff x+y is a power of 2
y += x
/*
* if v contains a single 16-bit value aligned
* on a 16-bit field, and thus suitable for movk/movn,
- * return the field index 0 to 3; otherwise return -1
+ * return the field index 0 to 3; otherwise return -1.
*/
func movcon(v int64) int {
for s := 0; s < 64; s += 16 {
return op | (i&0xFFFFF)<<5 | (r2&0x1F)<<0 // ui20, rd5
}
-// Encoding for the 'b' or 'bl' instruction
+// Encoding for the 'b' or 'bl' instruction.
func OP_B_BL(op uint32, i uint32) uint32 {
return op | ((i & 0xFFFF) << 10) | ((i >> 16) & 0x3FF)
}
var xcmp [C_NCLASS][C_NCLASS]bool
-// padding bytes to add to align code as requested
+// padding bytes to add to align code as requested.
func addpad(pc, a int64, ctxt *obj.Link, cursym *obj.LSym) int {
// For 16 and 32 byte alignment, there is a tradeoff
// between aligning the code and adding too many NOPs.
return o<<26 | xo<<2 | rc&1
}
-/* the order is dest, a/s, b/imm for both arithmetic and logical operations */
+/* the order is dest, a/s, b/imm for both arithmetic and logical operations. */
func AOP_RRR(op uint32, d uint32, a uint32, b uint32) uint32 {
return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11
}
return int32(c.vregoff(a))
}
-// find if the displacement is within 12 bit
+// find if the displacement is within 12 bit.
func isU12(displacement int32) bool {
return displacement >= 0 && displacement < DISP12
}
-// zopload12 returns the RX op with 12 bit displacement for the given load
+// zopload12 returns the RX op with 12 bit displacement for the given load.
func (c *ctxtz) zopload12(a obj.As) (uint32, bool) {
switch a {
case AFMOVD:
return 0, false
}
-// zopload returns the RXY op for the given load
+// zopload returns the RXY op for the given load.
func (c *ctxtz) zopload(a obj.As) uint32 {
switch a {
// fixed point load
return 0
}
-// zopstore12 returns the RX op with 12 bit displacement for the given store
+// zopstore12 returns the RX op with 12 bit displacement for the given store.
func (c *ctxtz) zopstore12(a obj.As) (uint32, bool) {
switch a {
case AFMOVD:
return 0, false
}
-// zopstore returns the RXY op for the given store
+// zopstore returns the RXY op for the given store.
func (c *ctxtz) zopstore(a obj.As) uint32 {
switch a {
// fixed point store
return 0
}
-// zoprre returns the RRE op for the given a
+// zoprre returns the RRE op for the given a.
func (c *ctxtz) zoprre(a obj.As) uint32 {
switch a {
case ACMP:
return 0
}
-// zoprr returns the RR op for the given a
+// zoprr returns the RR op for the given a.
func (c *ctxtz) zoprr(a obj.As) uint32 {
switch a {
case ACMPW:
return 0
}
-// zopril returns the RIL op for the given a
+// zopril returns the RIL op for the given a.
func (c *ctxtz) zopril(a obj.As) uint32 {
switch a {
case ACMP:
}
// Preserve highest 8 bits of a, and do addition to lower 24-bit
-// of a and b; used to adjust ARM branch instruction's target
+// of a and b; used to adjust ARM branch instruction's target.
func braddoff(a int32, b int32) int32 {
return int32((uint32(a))&0xff000000 | 0x00ffffff&uint32(a+b))
}
return true
}
-// sign extend a 24-bit integer
+// sign extend a 24-bit integer.
func signext24(x int64) int32 {
return (int32(x) << 8) >> 8
}
return 0
}
-// Convert the direct jump relocation r to refer to a trampoline if the target is too far
+// Convert the direct jump relocation r to refer to a trampoline if the target is too far.
func trampoline(ctxt *ld.Link, ldr *loader.Loader, ri int, rs, s loader.Sym) {
relocs := ldr.Relocs(s)
r := relocs.At(ri)
}
}
-// generate a trampoline to target+offset
+// generate a trampoline to target+offset.
func gentramp(arch *sys.Arch, linkmode ld.LinkMode, ldr *loader.Loader, tramp *loader.SymbolBuilder, target loader.Sym, offset int64) {
tramp.SetSize(12) // 3 instructions
P := make([]byte, tramp.Size())
}
}
-// generate a trampoline to target+offset in position independent code
+// generate a trampoline to target+offset in position independent code.
func gentramppic(arch *sys.Arch, tramp *loader.SymbolBuilder, target loader.Sym, offset int64) {
tramp.SetSize(16) // 4 instructions
P := make([]byte, tramp.Size())
r.SetAdd(offset + 4)
}
-// generate a trampoline to target+offset in dynlink mode (using GOT)
+// generate a trampoline to target+offset in dynlink mode (using GOT).
func gentrampdyn(arch *sys.Arch, tramp *loader.SymbolBuilder, target loader.Sym, offset int64) {
tramp.SetSize(20) // 5 instructions
o1 := uint32(0xe5900000 | 12<<12 | 15<<16 | 8) // MOVW 8(R15), R12 // R15 is actual pc + 8
return fmt.Sprintf("%s+%d", ldr.SymExtname(s), off)
}
-// Convert the direct jump relocation r to refer to a trampoline if the target is too far
+// Convert the direct jump relocation r to refer to a trampoline if the target is too far.
func trampoline(ctxt *ld.Link, ldr *loader.Loader, ri int, rs, s loader.Sym) {
relocs := ldr.Relocs(s)
r := relocs.At(ri)
"sync/atomic"
)
-// isRuntimeDepPkg reports whether pkg is the runtime package or its dependency
+// isRuntimeDepPkg reports whether pkg is the runtime package or its dependency.
func isRuntimeDepPkg(pkg string) bool {
switch pkg {
case "runtime",
}
}
-// assigns address for a text symbol, returns (possibly new) section, its number, and the address
+// assigns address for a text symbol, returns (possibly new) section, its number, and the address.
func assignAddress(ctxt *Link, sect *sym.Section, n int, s loader.Sym, va uint64, isTramp, big bool) (*sym.Section, int, uint64) {
ldr := ctxt.loader
if thearch.AssignAddress != nil {
return elf32writehdr(out)
}
-/* Taken directly from the definition document for ELF64 */
+/* Taken directly from the definition document for ELF64. */
func elfhash(name string) uint32 {
var h uint32
for i := 0; i < len(name); i++ {
}
// Create an ElfShdr for the section with name.
-// Create a duplicate if one already exists with that name
+// Create a duplicate if one already exists with that name.
func elfshnamedup(name string) *ElfShdr {
for i := 0; i < nelfstr; i++ {
if name == elfstr[i].s {
}
// Xcoffinit initialised some internal value and setups
-// already known header information
+// already known header information.
func Xcoffinit(ctxt *Link) {
xfile.dynLibraries = make(map[string]int)
return syms
}
-// put function used by genasmsym to write symbol table
+// put function used by genasmsym to write symbol table.
func putaixsym(ctxt *Link, x loader.Sym, t SymbolType) {
// All XCOFF symbols generated by this GO symbols
// Can be a symbol entry or a auxiliary entry
// Currently, this section is created from scratch when assembling the XCOFF file
// according to information retrieved in xfile object.
-// Create loader section and returns its size
+// Create loader section and returns its size.
func Loaderblk(ctxt *Link, off uint64) {
xfile.writeLdrScn(ctxt, off)
}
}
}
-// Generate XCOFF assembly file
+// Generate XCOFF assembly file.
func asmbXcoff(ctxt *Link) {
ctxt.Out.SeekSet(0)
fileoff := int64(Segdwarf.Fileoff + Segdwarf.Filelen)
return stub.Sym(), firstUse
}
-// Scan relocs and generate PLT stubs and generate/fixup ABI defined functions created by the linker
+// Scan relocs and generate PLT stubs and generate/fixup ABI defined functions created by the linker.
func genstubs(ctxt *ld.Link, ldr *loader.Loader) {
var stubs []loader.Sym
var abifuncs []loader.Sym
return packInstPair(target, o1, o2)
}
-// Determine if the code was compiled so that the TOC register R2 is initialized and maintained
+// Determine if the code was compiled so that the TOC register R2 is initialized and maintained.
func r2Valid(ctxt *ld.Link) bool {
switch ctxt.BuildMode {
case ld.BuildModeCArchive, ld.BuildModeCShared, ld.BuildModePIE, ld.BuildModeShared, ld.BuildModePlugin:
return ctxt.IsSharedGoLink()
}
-// resolve direct jump relocation r in s, and add trampoline if necessary
+// resolve direct jump relocation r in s, and add trampoline if necessary.
func trampoline(ctxt *ld.Link, ldr *loader.Loader, ri int, rs, s loader.Sym) {
// Trampolines are created if the branch offset is too large and the linker cannot insert a call stub to handle it.
}
}
-// Generate the glink resolver stub if necessary and return the .glink section
+// Generate the glink resolver stub if necessary and return the .glink section.
func ensureglinkresolver(ctxt *ld.Link, ldr *loader.Loader) *loader.SymbolBuilder {
glink := ldr.CreateSymForUpdate(".glink", 0)
if glink.Size() != 0 {
// cpuProfileHandler is the Go pprof CPU profile handler URL.
const cpuProfileHandler = "/debug/pprof/profile"
-// adjustURL applies the duration/timeout values and Go specific defaults
+// adjustURL applies the duration/timeout values and Go specific defaults.
func adjustURL(source string, duration, timeout time.Duration) (string, time.Duration) {
u, err := url.Parse(source)
if err != nil || (u.Host == "" && u.Scheme != "" && u.Scheme != "file") {
}
// bulkHash4 will compute hashes using the same
-// algorithm as hash4
+// algorithm as hash4.
func bulkHash4(b []byte, dst []uint32) {
if len(b) < minMatchLength {
return
return &huffmanEncoder{codes: make([]hcode, size)}
}
-// Generates a HuffmanCode corresponding to the fixed literal table
+// Generates a HuffmanCode corresponding to the fixed literal table.
func generateFixedLiteralEncoding() *huffmanEncoder {
h := newHuffmanEncoder(maxNumLit)
codes := h.codes
return token(matchType + xlength<<lengthShift + xoffset)
}
-// Returns the literal of a literal token
+// Returns the literal of a literal token.
func (t token) literal() uint32 { return uint32(t - literalType) }
-// Returns the extra offset of a match token
+// Returns the extra offset of a match token.
func (t token) offset() uint32 { return uint32(t) & offsetMask }
func (t token) length() uint32 { return uint32((t - matchType) >> lengthShift) }
func lengthCode(len uint32) uint32 { return lengthCodes[len] }
-// Returns the offset code corresponding to a specific offset
+// Returns the offset code corresponding to a specific offset.
func offsetCode(off uint32) uint32 {
if off < uint32(len(offsetCodes)) {
return offsetCodes[off]
var feistelBoxOnce sync.Once
-// general purpose function to perform DES block permutations
+// general purpose function to perform DES block permutations.
func permuteBlock(src uint64, permutation []uint8) (block uint64) {
for position, n := range permutation {
bit := (src >> n) & 1
}
// creates 16 28-bit blocks rotated according
-// to the rotation schedule
+// to the rotation schedule.
func ksRotate(in uint32) (out []uint32) {
out = make([]uint32, 16)
last := in
return
}
-// creates 16 56-bit subkeys from the original key
+// creates 16 56-bit subkeys from the original key.
func (c *desCipher) generateSubkeys(keyBytes []byte) {
feistelBoxOnce.Do(initFeistelBox)
panic("boringcrypto: invalid code execution")
}
-// provided by runtime to avoid os import
+// provided by runtime to avoid os import.
func runtime_arg0() string
func hasSuffix(s, t string) bool {
return string(b)
}
-// TimeToCFDateRef converts a time.Time into an apple CFDateRef
+// TimeToCFDateRef converts a time.Time into an apple CFDateRef.
func TimeToCFDateRef(t time.Time) CFRef {
secs := t.Sub(time.Date(2001, 1, 1, 0, 0, 0, 0, time.UTC)).Seconds()
ref := CFDateCreate(secs)
return scanError
}
-// quoteChar formats c as a quoted character literal
+// quoteChar formats c as a quoted character literal.
func quoteChar(c byte) string {
// special cases - different from quoted strings
if c == '\'' {
var outputPrefix = lazyregexp.New(`(?i)^[[:space:]]*(unordered )?output:`)
-// Extracts the expected output and whether there was a valid output comment
+// Extracts the expected output and whether there was a valid output comment.
func exampleOutput(b *ast.BlockStmt, comments []*ast.CommentGroup) (output string, unordered, ok bool) {
if _, last := lastComment(b, comments); last != nil {
// test that it begins with the correct prefix
func (d *data) Swap(i, j int) { d.swap(i, j) }
func (d *data) Less(i, j int) bool { return d.less(i, j) }
-// sortBy is a helper function for sorting
+// sortBy is a helper function for sorting.
func sortBy(less func(i, j int) bool, swap func(i, j int), n int) {
sort.Sort(&data{n, swap, less})
}
errorCount++
}
-// parse may be called concurrently
+// parse may be called concurrently.
func parse(filename string, src any) (*ast.File, error) {
if *verbose {
fmt.Println(filename)
//go:noescape
func ppc64SlicingUpdateBy8(crc uint32, table8 *slicing8Table, p []byte) uint32
-// this function requires the buffer to be 16 byte aligned and > 16 bytes long
+// this function requires the buffer to be 16 byte aligned and > 16 bytes long.
//
//go:noescape
func vectorCrc32(crc uint32, poly uint32, p []byte) uint32
// indirectToStringerOrError returns the value, after dereferencing as many times
// as necessary to reach the base type (or nil) or an implementation of fmt.Stringer
-// or error,
+// or error.
func indirectToStringerOrError(a any) any {
if a == nil {
return nil
}
// isCoverageSubset returns true if all the base coverage bits are set in
-// snapshot
+// snapshot.
func isCoverageSubset(base, snapshot []byte) bool {
for i, v := range base {
if v&snapshot[i] != v {
}
// focusedSample checks a sample against focus and ignore regexps.
-// Returns whether the focus/ignore regexps match any tags
+// Returns whether the focus/ignore regexps match any tags.
func focusedSample(s *Sample, focus, ignore TagMatch) (fm, im bool) {
fm = focus == nil
for key, vals := range s.Label {
// where the signs of u0, u1, v0, v1 are given by even
// For even == true: u0, v1 >= 0 && u1, v0 <= 0
// For even == false: u0, v1 <= 0 && u1, v0 >= 0
-// q, r, s, t are temporary variables to avoid allocations in the multiplication
+// q, r, s, t are temporary variables to avoid allocations in the multiplication.
func lehmerUpdate(A, B, q, r, s, t *Int, u0, u1, v0, v1 Word, even bool) {
t.abs = t.abs.setWord(u0)
}
// euclidUpdate performs a single step of the Euclidean GCD algorithm
-// if extended is true, it also updates the cosequence Ua, Ub
+// if extended is true, it also updates the cosequence Ua, Ub.
func euclidUpdate(A, B, Ua, Ub, q, r, s, t *Int, extended bool) {
q, r = q.QuoRem(A, B, r)
return x.Text(10)
}
-// write count copies of text to s
+// write count copies of text to s.
func writeMultiple(s fmt.State, text string, count int) {
if len(text) > 0 {
b := []byte(text)
return z.expNN(nat(nil).setWord(x), nat(nil).setWord(y), nil, false)
}
-// construct table of powers of bb*leafSize to use in subdivisions
+// construct table of powers of bb*leafSize to use in subdivisions.
func divisors(m int, b Word, ndigits int, bb Word) []divisor {
// only compute table when recursive conversion is enabled and x is large
if leafSize == 0 || m <= leafSize {
return complex(c*ch, s*sh)
}
-// calculate sinh and cosh
+// calculate sinh and cosh.
func sinhcosh(x float64) (sh, ch float64) {
if math.Abs(x) <= 0.5 {
return math.Sinh(x), math.Cosh(x)
}
// redirectBehavior describes what should happen when the
-// client encounters a 3xx status code from the server
+// client encounters a 3xx status code from the server.
func redirectBehavior(reqMethod string, resp *Response, ireq *Request) (redirectMethod string, shouldRedirect, includeBody bool) {
switch resp.StatusCode {
case 301, 302, 303:
// readCookies parses all "Cookie" values from the header h and
// returns the successfully parsed Cookies.
//
-// if filter isn't empty, only cookies of that name are returned
+// if filter isn't empty, only cookies of that name are returned.
func readCookies(h Header, filter string) []*Cookie {
lines := h["Cookie"]
if len(lines) == 0 {
return nil
}
-// Checks whether chunked is part of the encodings stack
+// Checks whether chunked is part of the encodings stack.
func chunked(te []string) bool { return len(te) > 0 && te[0] == "chunked" }
// Checks whether the encoding is explicitly "identity".
// Determine whether to hang up after sending a request and body, or
// receiving a response and body
-// 'header' is the request headers
+// 'header' is the request headers.
func shouldClose(major, minor int, header Header, removeCloseHeader bool) bool {
if major < 1 {
return true
return hasClose
}
-// Parse the trailer header
+// Parse the trailer header.
func fixTrailer(header Header, chunked bool) (Header, error) {
vv, ok := header["Trailer"]
if !ok {
}{}))
// unwrapNopCloser return the underlying reader and true if r is a NopCloser
-// else it return false
+// else it return false.
func unwrapNopCloser(r io.Reader) (underlyingReader io.Reader, isNopCloser bool) {
switch reflect.TypeOf(r) {
case nopCloserType, nopCloserWriterToType:
"socks5": "1080",
}
-// canonicalAddr returns url.Host but always with a ":port" suffix
+// canonicalAddr returns url.Host but always with a ":port" suffix.
func canonicalAddr(url *url.URL) string {
addr := url.Hostname()
if v, err := idnaASCII(addr); err == nil {
io.WriteString(w, fmt.Sprintf("channel send #%d\n", <-ch))
}
-// exec a program, redirecting output
+// exec a program, redirecting output.
func DateServer(rw http.ResponseWriter, req *http.Request) {
rw.Header().Set("Content-Type", "text/plain; charset=utf-8")
const dnsSectionMask = 0x0003
-// returns only results applicable to name and resolves CNAME entries
+// returns only results applicable to name and resolves CNAME entries.
func validRecs(r *syscall.DNSRecord, dnstype uint16, name string) []*syscall.DNSRecord {
cname := syscall.StringToUTF16Ptr(name)
if dnstype != syscall.DNS_TYPE_CNAME {
return rec
}
-// returns the last CNAME in chain
+// returns the last CNAME in chain.
func resolveCNAME(name *uint16, r *syscall.DNSRecord) *uint16 {
// limit cname resolving to 10 in case of an infinite CNAME loop
Cname:
}
// isMultibyte reports whether r is a multi-byte UTF-8 character
-// as supported by RFC 6532
+// as supported by RFC 6532.
func isMultibyte(r rune) bool {
return r >= utf8.RuneSelf
}
return c.Text.Close()
}
-// validateLine checks to see if a line has CR or LF as per RFC 5321
+// validateLine checks to see if a line has CR or LF as per RFC 5321.
func validateLine(line string) error {
if strings.ContainsAny(line, "\n\r") {
return errors.New("smtp: A line must not contain CR or LF")
return false
}
-// isAlphaNum reports whether the byte is an ASCII letter, number, or underscore
+// isAlphaNum reports whether the byte is an ASCII letter, number, or underscore.
func isAlphaNum(c uint8) bool {
return c == '_' || '0' <= c && c <= '9' || 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z'
}
plugins map[string]*Plugin
)
-// lastmoduleinit is defined in package runtime
+// lastmoduleinit is defined in package runtime.
func lastmoduleinit() (pluginpath string, syms map[string]any, errstr string)
-// doInit is defined in package runtime
+// doInit is defined in package runtime.
//
//go:linkname doInit runtime.doInit
func doInit(t unsafe.Pointer) // t should be a *runtime.initTask
// regexp must start with. Complete is true if the prefix
// is the entire match. Pc is the index of the last rune instruction
// in the string. The onePassPrefix skips over the mandatory
-// EmptyBeginText
+// EmptyBeginText.
func onePassPrefix(p *syntax.Prog) (prefix string, complete bool, pc uint32) {
i := &p.Inst[p.Start]
if i.Op != syntax.InstEmptyWidth || (syntax.EmptyOp(i.Arg))&syntax.EmptyBeginText == 0 {
}
}
-// onePassCopy creates a copy of the original Prog, as we'll be modifying it
+// onePassCopy creates a copy of the original Prog, as we'll be modifying it.
func onePassCopy(prog *syntax.Prog) *onePassProg {
p := &onePassProg{
Start: prog.Start,
matchPool[re.mpool].Put(m)
}
-// minInputLen walks the regexp to find the minimum length of any matchable input
+// minInputLen walks the regexp to find the minimum length of any matchable input.
func minInputLen(re *syntax.Regexp) int {
switch re.Op {
default:
}
}
-// called from assembly
+// called from assembly.
func badcgocallback() {
throw("misaligned stack in cgocallback")
}
-// called from (incomplete) assembly
+// called from (incomplete) assembly.
func cgounimpl() {
throw("cgo not implemented")
}
return c.qcount == c.dataqsiz
}
-// entry point for c <- x from compiled code
+// entry point for c <- x from compiled code.
//
//go:nosplit
func chansend1(c *hchan, elem unsafe.Pointer) {
return atomic.Loaduint(&c.qcount) == 0
}
-// entry points for <- c from compiled code
+// entry points for <- c from compiled code.
//
//go:nosplit
func chanrecv1(c *hchan, elem unsafe.Pointer) {
"strings"
)
-// exported from runtime
+// exported from runtime.
func modinfo() string
// ReadBuildInfo returns the build information embedded
var typecache [typeCacheBuckets]typeCacheBucket
-// dump a uint64 in a varint format parseable by encoding/binary
+// dump a uint64 in a varint format parseable by encoding/binary.
func dumpint(v uint64) {
var buf [10]byte
var n int
}
}
-// dump varint uint64 length followed by memory contents
+// dump varint uint64 length followed by memory contents.
func dumpmemrange(data unsafe.Pointer, len uintptr) {
dumpint(uint64(len))
dwrite(data, len)
dumpmemrange(unsafe.Pointer(unsafe.StringData(s)), uintptr(len(s)))
}
-// dump information for a type
+// dump information for a type.
func dumptype(t *_type) {
if t == nil {
return
dumpbool(t.kind&kindDirectIface == 0 || t.ptrdata != 0)
}
-// dump an object
+// dump an object.
func dumpobj(obj unsafe.Pointer, size uintptr, bv bitvector) {
dumpint(tagObject)
dumpint(uint64(uintptr(obj)))
depth uintptr // depth in call stack (0 == most recent)
}
-// dump kinds & offsets of interesting fields in bv
+// dump kinds & offsets of interesting fields in bv.
func dumpbv(cbv *bitvector, offset uintptr) {
for i := uintptr(0); i < uintptr(cbv.n); i++ {
if cbv.ptrbit(i) == 1 {
}
// same as runtimeĀ·notetsleep, but called on user g (not g0)
-// calls only nosplit functions between entersyscallblock/exitsyscall
+// calls only nosplit functions between entersyscallblock/exitsyscall.
func notetsleepg(n *note, ns int64) bool {
gp := getg()
if gp == gp.m.g0 {
}
// same as runtimeĀ·notetsleep, but called on user g (not g0)
-// calls only nosplit functions between entersyscallblock/exitsyscall
+// calls only nosplit functions between entersyscallblock/exitsyscall.
func notetsleepg(n *note, ns int64) bool {
gp := getg()
if gp == gp.m.g0 {
// implementation of new builtin
// compiler (both frontend and SSA backend) knows the signature
-// of this function
+// of this function.
func newobject(typ *_type) unsafe.Pointer {
return mallocgc(typ.size, typ, true)
}
return unsafe.Pointer(&zeroVal[0]), false
}
-// returns both key and elem. Used by map iterator
+// returns both key and elem. Used by map iterator.
func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer) {
if h == nil || h.count == 0 {
return nil, nil
return true
}
-// This is the goroutine that runs all of the finalizers
+// This is the goroutine that runs all of the finalizers.
func runfinq() {
var (
frame unsafe.Pointer
}
// trygetfull tries to get a full or partially empty workbuffer.
-// If one is not immediately available return nil
+// If one is not immediately available return nil.
//
//go:nowritebarrier
func trygetfull() *workbuf {
}
// errno address must be retrieved by calling _Errno libc function.
-// This will return a pointer to errno
+// This will return a pointer to errno.
func miniterrno() {
mp := getg().m
r, _ := syscall0(&libc__Errno)
//go:noescape
func open(name *byte, mode, perm int32) int32
-// return value is only set on linux to be used in osinit()
+// return value is only set on linux to be used in osinit().
func madvise(addr unsafe.Pointer, n uintptr, flags int32) int32
// exitThread terminates the current thread, writing *wait = freeMStack when
return writeProfileInternal(w, debug, "mutex", runtime.MutexProfile)
}
-// writeProfileInternal writes the current blocking or mutex profile depending on the passed parameters
+// writeProfileInternal writes the current blocking or mutex profile depending on the passed parameters.
func writeProfileInternal(w io.Writer, debug int, name string, runtimeProfile func([]runtime.BlockProfileRecord) (int, bool)) error {
var p []runtime.BlockProfileRecord
n, ok := runtimeProfile(nil)
releasem(mp)
}
-// called from assembly
+// called from assembly.
func badmcall(fn func(*g)) {
throw("runtime: mcall called on m->g0 stack")
}
goschedImpl(gp)
}
-// goschedguarded is a forbidden-states-avoided version of gosched_m
+// goschedguarded is a forbidden-states-avoided version of gosched_m.
func goschedguarded_m(gp *g) {
if !canPreemptM(gp.m) {
// with up to 4 uintptr arguments.
func racecall(fn *byte, arg0, arg1, arg2, arg3 uintptr)
-// checks if the address has shadow (i.e. heap or data/bss)
+// checks if the address has shadow (i.e. heap or data/bss).
//
//go:nosplit
func isvalidaddr(addr unsafe.Pointer) bool {
argv **byte
)
-// nosplit for use in linux startup sysargs
+// nosplit for use in linux startup sysargs.
//
//go:nosplit
func argv_index(argv **byte, i int32) *byte {
//go:noescape
func open(name *byte, mode, perm int32) int32
-// return value is only set on linux to be used in osinit()
+// return value is only set on linux to be used in osinit().
func madvise(addr unsafe.Pointer, n uintptr, flags int32) int32
// exitThread terminates the current thread, writing *wait = freeMStack when
package runtime
-// This is needed for vet
+// This is needed for vet.
//
//go:noescape
func callCgoSigaction(sig uintptr, new, old *sigactiont) int32
}
}
-// mapJSError maps an error given by Node.js to the appropriate Go error
+// mapJSError maps an error given by Node.js to the appropriate Go error.
func mapJSError(jsErr js.Value) error {
errno, ok := errnoByCode[jsErr.Get("code").String()]
if !ok {
// parseSignedOffset parses a signed timezone offset (e.g. "+03" or "-04").
// The function checks for a signed number in the range -23 through +23 excluding zero.
-// Returns length of the found offset string or 0 otherwise
+// Returns length of the found offset string or 0 otherwise.
func parseSignedOffset(value string) int {
sign := value[0]
if sign != '-' && sign != '+' {