3. What did you do?
-(Use play.golang.org to provide a runnable example, if possible.)
+If possible, provide a recipe for reproducing the error.
+A complete runnable program is good.
+A link on play.golang.org is best.
4. What did you expect to see?
before sending patches.
**We do not accept GitHub pull requests**
-(we use [Gerrit](https://code.google.com/p/gerrit/) instead for code review).
+(we use [an instance](https://go-review.googlesource.com/) of the
+[Gerrit](https://www.gerritcodereview.com/) code review system instead).
Unless otherwise noted, the Go source files are distributed under
the BSD-style license found in the LICENSE file.
<p>A <a href="/doc/devel/release.html">summary</a> of the changes between Go releases. Notes for the major releases:</p>
<ul>
+ <li><a href="/doc/go1.6">Go 1.6</a> <small>(February 2016)</small></li>
<li><a href="/doc/go1.5">Go 1.5</a> <small>(August 2015)</small></li>
<li><a href="/doc/go1.4">Go 1.4</a> <small>(December 2014)</small></li>
<li><a href="/doc/go1.3">Go 1.3</a> <small>(June 2014)</small></li>
A custom git command called <code>git-codereview</code>,
discussed below, helps manage the code review process through a Google-hosted
<a href="https://go-review.googlesource.com/">instance</a> of the code review
-system called <a href="https://code.google.com/p/gerrit/">Gerrit</a>.
+system called <a https://www.gerritcodereview.com/">Gerrit</a>.
</p>
<h3 id="auth">Set up authentication for code review</h3>
<p>
go1.5.3 (released 2016/01/13) includes a security fix to the <code>math/big</code> package
affecting the <code>crypto/tls</code> package.
-See the <a href="https://github.com/golang/go/issues?q=milestone%3AGo1.5.3">Go 1.5.3 milestone on our issue tracker</a>
-and the <a href="https://golang.org/s/go153announce">release announcement</a> for details.
+See the <a href="https://golang.org/s/go153announce">release announcement</a> for details.
</p>
<h2 id="go1.4">go1.4 (released 2014/12/10)</h2>
<li>If it is determined, in consultation with the submitter, that a CVE-ID is
required, the primary handler obtains one via email to
<a href="http://oss-security.openwall.org/wiki/mailing-lists/distros">oss-distros</a>.</li>
-<li>Fixes are prepared for the current stable release and the head/master
+<li>Fixes are prepared for the two most recent major releases and the head/master
revision. These fixes are not yet committed to the public repository.</li>
<li>A notification is sent to the
<a href="https://groups.google.com/group/golang-announce">golang-announce</a>
long long
mysleep(int seconds) {
long long st = GetTickCount();
- sleep(seconds);
+ Sleep(1000 * seconds);
return st;
}
#else
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cgotest
-
-/*
-// mingw32 on windows/386 provides usleep() but not sleep(),
-// as we don't want to require all other OSes to provide usleep,
-// we emulate sleep(int s) using win32 API Sleep(int ms).
-
-#include <windows.h>
-
-unsigned int sleep(unsigned int seconds) {
- Sleep(1000 * seconds);
- return 0;
-}
-
-*/
-import "C"
gofmt_test.go
testdata
+
- newlink
- testdata
- +
archive
tar
testdata
if n < 0 {
return nil, ErrNegativeCount
}
- if n > len(b.buf) {
- return nil, ErrBufferFull
- }
- // 0 <= n <= len(b.buf)
- for b.w-b.r < n && b.err == nil {
+
+ for b.w-b.r < n && b.w-b.r < len(b.buf) && b.err == nil {
b.fill() // b.w-b.r < len(b.buf) => buffer is not full
}
+ if n > len(b.buf) {
+ return b.buf[b.r:b.w], ErrBufferFull
+ }
+
+ // 0 <= n <= len(b.buf)
var err error
if avail := b.w - b.r; avail < n {
// not enough data in buffer
if _, err := buf.Peek(-1); err != ErrNegativeCount {
t.Fatalf("want ErrNegativeCount got %v", err)
}
- if _, err := buf.Peek(32); err != ErrBufferFull {
- t.Fatalf("want ErrBufFull got %v", err)
+ if s, err := buf.Peek(32); string(s) != "abcdefghijklmnop" || err != ErrBufferFull {
+ t.Fatalf("want %q, ErrBufFull got %q, err=%v", "abcdefghijklmnop", string(s), err)
}
if _, err := buf.Read(p[0:3]); string(p[0:3]) != "abc" || err != nil {
t.Fatalf("want %q got %q, err=%v", "abc", string(p[0:3]), err)
type Arch struct {
*obj.LinkArch
// Map of instruction names to enumeration.
- Instructions map[string]int
+ Instructions map[string]obj.As
// Map of register names to enumeration.
Register map[string]int16
// Table of register prefix names. These are things like R for R(0) and SPR for SPR(268).
return 0, false
}
-var Pseudos = map[string]int{
+var Pseudos = map[string]obj.As{
"DATA": obj.ADATA,
"FUNCDATA": obj.AFUNCDATA,
"GLOBL": obj.AGLOBL,
register["PC"] = RPC
// Register prefix not used on this architecture.
- instructions := make(map[string]int)
+ instructions := make(map[string]obj.As)
for i, s := range obj.Anames {
- instructions[s] = i
+ instructions[s] = obj.As(i)
}
for i, s := range x86.Anames {
- if i >= obj.A_ARCHSPECIFIC {
- instructions[s] = i + obj.ABaseAMD64
+ if obj.As(i) >= obj.A_ARCHSPECIFIC {
+ instructions[s] = obj.As(i) + obj.ABaseAMD64
}
}
// Annoying aliases.
"R": true,
}
- instructions := make(map[string]int)
+ instructions := make(map[string]obj.As)
for i, s := range obj.Anames {
- instructions[s] = i
+ instructions[s] = obj.As(i)
}
for i, s := range arm.Anames {
- if i >= obj.A_ARCHSPECIFIC {
- instructions[s] = i + obj.ABaseARM
+ if obj.As(i) >= obj.A_ARCHSPECIFIC {
+ instructions[s] = obj.As(i) + obj.ABaseARM
}
}
// Annoying aliases.
"V": true,
}
- instructions := make(map[string]int)
+ instructions := make(map[string]obj.As)
for i, s := range obj.Anames {
- instructions[s] = i
+ instructions[s] = obj.As(i)
}
for i, s := range arm64.Anames {
- if i >= obj.A_ARCHSPECIFIC {
- instructions[s] = i + obj.ABaseARM64
+ if obj.As(i) >= obj.A_ARCHSPECIFIC {
+ instructions[s] = obj.As(i) + obj.ABaseARM64
}
}
// Annoying aliases.
"SPR": true,
}
- instructions := make(map[string]int)
+ instructions := make(map[string]obj.As)
for i, s := range obj.Anames {
- instructions[s] = i
+ instructions[s] = obj.As(i)
}
for i, s := range ppc64.Anames {
- if i >= obj.A_ARCHSPECIFIC {
- instructions[s] = i + obj.ABasePPC64
+ if obj.As(i) >= obj.A_ARCHSPECIFIC {
+ instructions[s] = obj.As(i) + obj.ABasePPC64
}
}
// Annoying aliases.
"R": true,
}
- instructions := make(map[string]int)
+ instructions := make(map[string]obj.As)
for i, s := range obj.Anames {
- instructions[s] = i
+ instructions[s] = obj.As(i)
}
for i, s := range mips.Anames {
- if i >= obj.A_ARCHSPECIFIC {
- instructions[s] = i + obj.ABaseMIPS64
+ if obj.As(i) >= obj.A_ARCHSPECIFIC {
+ instructions[s] = obj.As(i) + obj.ABaseMIPS64
}
}
// Annoying alias.
// IsARMCMP reports whether the op (as defined by an arm.A* constant) is
// one of the comparison instructions that require special handling.
-func IsARMCMP(op int) bool {
+func IsARMCMP(op obj.As) bool {
switch op {
case arm.ACMN, arm.ACMP, arm.ATEQ, arm.ATST:
return true
// IsARMSTREX reports whether the op (as defined by an arm.A* constant) is
// one of the STREX-like instructions that require special handling.
-func IsARMSTREX(op int) bool {
+func IsARMSTREX(op obj.As) bool {
switch op {
case arm.ASTREX, arm.ASTREXD, arm.ASWPW, arm.ASWPBU:
return true
// IsARMMRC reports whether the op (as defined by an arm.A* constant) is
// MRC or MCR
-func IsARMMRC(op int) bool {
+func IsARMMRC(op obj.As) bool {
switch op {
case arm.AMRC, aMCR: // Note: aMCR is defined in this package.
return true
}
// IsARMFloatCmp reports whether the op is a floating comparison instruction.
-func IsARMFloatCmp(op int) bool {
+func IsARMFloatCmp(op obj.As) bool {
switch op {
case arm.ACMPF, arm.ACMPD:
return true
// The difference between MRC and MCR is represented by a bit high in the word, not
// in the usual way by the opcode itself. Asm must use AMRC for both instructions, so
// we return the opcode for MRC so that asm doesn't need to import obj/arm.
-func ARMMRCOffset(op int, cond string, x0, x1, x2, x3, x4, x5 int64) (offset int64, op0 int16, ok bool) {
+func ARMMRCOffset(op obj.As, cond string, x0, x1, x2, x3, x4, x5 int64) (offset int64, op0 obj.As, ok bool) {
op1 := int64(0)
if op == arm.AMRC {
op1 = 1
// IsARMMULA reports whether the op (as defined by an arm.A* constant) is
// MULA, MULAWT or MULAWB, the 4-operand instructions.
-func IsARMMULA(op int) bool {
+func IsARMMULA(op obj.As) bool {
switch op {
case arm.AMULA, arm.AMULAWB, arm.AMULAWT:
return true
return false
}
-var bcode = []int{
+var bcode = []obj.As{
arm.ABEQ,
arm.ABNE,
arm.ABCS,
}
/* hack to make B.NE etc. work: turn it into the corresponding conditional */
if prog.As == arm.AB {
- prog.As = int16(bcode[(bits^arm.C_SCOND_XOR)&0xf])
+ prog.As = bcode[(bits^arm.C_SCOND_XOR)&0xf]
bits = (bits &^ 0xf) | arm.C_SCOND_NONE
}
prog.Scond = bits
// IsARM64CMP reports whether the op (as defined by an arm.A* constant) is
// one of the comparison instructions that require special handling.
-func IsARM64CMP(op int) bool {
+func IsARM64CMP(op obj.As) bool {
switch op {
case arm64.ACMN, arm64.ACMP, arm64.ATST,
arm64.ACMNW, arm64.ACMPW, arm64.ATSTW:
// IsARM64STLXR reports whether the op (as defined by an arm64.A*
// constant) is one of the STLXR-like instructions that require special
// handling.
-func IsARM64STLXR(op int) bool {
+func IsARM64STLXR(op obj.As) bool {
switch op {
case arm64.ASTLXRB, arm64.ASTLXRH, arm64.ASTLXRW, arm64.ASTLXR:
return true
package arch
-import "cmd/internal/obj/mips"
+import (
+ "cmd/internal/obj"
+ "cmd/internal/obj/mips"
+)
func jumpMIPS64(word string) bool {
switch word {
// IsMIPS64CMP reports whether the op (as defined by an mips.A* constant) is
// one of the CMP instructions that require special handling.
-func IsMIPS64CMP(op int) bool {
+func IsMIPS64CMP(op obj.As) bool {
switch op {
case mips.ACMPEQF, mips.ACMPEQD, mips.ACMPGEF, mips.ACMPGED,
mips.ACMPGTF, mips.ACMPGTD:
// IsMIPS64MUL reports whether the op (as defined by an mips.A* constant) is
// one of the MUL/DIV/REM instructions that require special handling.
-func IsMIPS64MUL(op int) bool {
+func IsMIPS64MUL(op obj.As) bool {
switch op {
case mips.AMUL, mips.AMULU, mips.AMULV, mips.AMULVU,
mips.ADIV, mips.ADIVU, mips.ADIVV, mips.ADIVVU,
package arch
-import "cmd/internal/obj/ppc64"
+import (
+ "cmd/internal/obj"
+ "cmd/internal/obj/ppc64"
+)
func jumpPPC64(word string) bool {
switch word {
// IsPPC64RLD reports whether the op (as defined by an ppc64.A* constant) is
// one of the RLD-like instructions that require special handling.
// The FMADD-like instructions behave similarly.
-func IsPPC64RLD(op int) bool {
+func IsPPC64RLD(op obj.As) bool {
switch op {
case ppc64.ARLDC, ppc64.ARLDCCC, ppc64.ARLDCL, ppc64.ARLDCLCC,
ppc64.ARLDCR, ppc64.ARLDCRCC, ppc64.ARLDMI, ppc64.ARLDMICC,
// IsPPC64CMP reports whether the op (as defined by an ppc64.A* constant) is
// one of the CMP instructions that require special handling.
-func IsPPC64CMP(op int) bool {
+func IsPPC64CMP(op obj.As) bool {
switch op {
case ppc64.ACMP, ppc64.ACMPU, ppc64.ACMPW, ppc64.ACMPWU:
return true
// IsPPC64NEG reports whether the op (as defined by an ppc64.A* constant) is
// one of the NEG-like instructions that require special handling.
-func IsPPC64NEG(op int) bool {
+func IsPPC64NEG(op obj.As) bool {
switch op {
case ppc64.AADDMECC, ppc64.AADDMEVCC, ppc64.AADDMEV, ppc64.AADDME,
ppc64.AADDZECC, ppc64.AADDZEVCC, ppc64.AADDZEV, ppc64.AADDZE,
// JMP R1
// JMP exit
// JMP 3(PC)
-func (p *Parser) asmJump(op int, cond string, a []obj.Addr) {
+func (p *Parser) asmJump(op obj.As, cond string, a []obj.Addr) {
var target *obj.Addr
prog := &obj.Prog{
Ctxt: p.ctxt,
Lineno: p.histLineNum,
- As: int16(op),
+ As: op,
}
switch len(a) {
case 1:
// asmInstruction assembles an instruction.
// MOVW R9, (R10)
-func (p *Parser) asmInstruction(op int, cond string, a []obj.Addr) {
+func (p *Parser) asmInstruction(op obj.As, cond string, a []obj.Addr) {
// fmt.Printf("%s %+v\n", obj.Aconv(op), a)
prog := &obj.Prog{
Ctxt: p.ctxt,
Lineno: p.histLineNum,
- As: int16(op),
+ As: op,
}
switch len(a) {
case 0:
}
// getConstant checks that addr represents a plain constant and returns its value.
-func (p *Parser) getConstant(prog *obj.Prog, op int, addr *obj.Addr) int64 {
+func (p *Parser) getConstant(prog *obj.Prog, op obj.As, addr *obj.Addr) int64 {
if addr.Type != obj.TYPE_MEM || addr.Name != 0 || addr.Reg != 0 || addr.Index != 0 {
p.errorf("%s: expected integer constant; found %s", obj.Aconv(op), obj.Dconv(prog, addr))
}
}
// getImmediate checks that addr represents an immediate constant and returns its value.
-func (p *Parser) getImmediate(prog *obj.Prog, op int, addr *obj.Addr) int64 {
+func (p *Parser) getImmediate(prog *obj.Prog, op obj.As, addr *obj.Addr) int64 {
if addr.Type != obj.TYPE_CONST || addr.Name != 0 || addr.Reg != 0 || addr.Index != 0 {
p.errorf("%s: expected immediate constant; found %s", obj.Aconv(op), obj.Dconv(prog, addr))
}
}
// getRegister checks that addr represents a register and returns its value.
-func (p *Parser) getRegister(prog *obj.Prog, op int, addr *obj.Addr) int16 {
+func (p *Parser) getRegister(prog *obj.Prog, op obj.As, addr *obj.Addr) int16 {
if addr.Type != obj.TYPE_REG || addr.Offset != 0 || addr.Name != 0 || addr.Index != 0 {
p.errorf("%s: expected register; found %s", obj.Aconv(op), obj.Dconv(prog, addr))
}
return true
}
-func (p *Parser) instruction(op int, word, cond string, operands [][]lex.Token) {
+func (p *Parser) instruction(op obj.As, word, cond string, operands [][]lex.Token) {
p.addr = p.addr[0:0]
p.isJump = p.arch.IsJump(word)
for _, op := range operands {
p.asmInstruction(op, cond, p.addr)
}
-func (p *Parser) pseudo(op int, word string, operands [][]lex.Token) {
+func (p *Parser) pseudo(op obj.As, word string, operands [][]lex.Token) {
switch op {
case obj.ATEXT:
p.asmText(word, operands)
var MAXWIDTH int64 = 1 << 50
var (
- addptr int = x86.AADDQ
- movptr int = x86.AMOVQ
- leaptr int = x86.ALEAQ
- cmpptr int = x86.ACMPQ
+ addptr = x86.AADDQ
+ movptr = x86.AMOVQ
+ leaptr = x86.ALEAQ
+ cmpptr = x86.ACMPQ
)
-/*
- * go declares several platform-specific type aliases:
- * int, uint, and uintptr
- */
-var typedefs = []gc.Typedef{
- {"int", gc.TINT, gc.TINT64},
- {"uint", gc.TUINT, gc.TUINT64},
- {"uintptr", gc.TUINTPTR, gc.TUINT64},
-}
-
func betypeinit() {
gc.Widthptr = 8
gc.Widthint = 8
movptr = x86.AMOVL
leaptr = x86.ALEAL
cmpptr = x86.ACMPL
- typedefs[0].Sameas = gc.TINT32
- typedefs[1].Sameas = gc.TUINT32
- typedefs[2].Sameas = gc.TUINT32
}
if gc.Ctxt.Flag_dynlink {
gc.Thearch.Thechar = thechar
gc.Thearch.Thestring = thestring
gc.Thearch.Thelinkarch = thelinkarch
- gc.Thearch.Typedefs = typedefs
gc.Thearch.REGSP = x86.REGSP
gc.Thearch.REGCTXT = x86.REGCTXT
gc.Thearch.REGCALLX = x86.REG_BX
return p
}
-func appendpp(p *obj.Prog, as int, ftype int, freg int, foffset int64, ttype int, treg int, toffset int64) *obj.Prog {
+func appendpp(p *obj.Prog, as obj.As, ftype obj.AddrType, freg int, foffset int64, ttype obj.AddrType, treg int, toffset int64) *obj.Prog {
q := gc.Ctxt.NewProg()
gc.Clearp(q)
- q.As = int16(as)
+ q.As = as
q.Lineno = p.Lineno
- q.From.Type = int16(ftype)
+ q.From.Type = ftype
q.From.Reg = int16(freg)
q.From.Offset = foffset
- q.To.Type = int16(ttype)
+ q.To.Type = ttype
q.To.Reg = int16(treg)
q.To.Offset = toffset
q.Link = p.Link
continue
}
if gc.Debug_checknil != 0 && p.Lineno > 1 { // p->lineno==1 in generated wrappers
- gc.Warnl(int(p.Lineno), "generated nil check")
+ gc.Warnl(p.Lineno, "generated nil check")
}
// check is
p2.Lineno = p.Lineno
p1.Pc = 9999
p2.Pc = 9999
- p.As = int16(cmpptr)
+ p.As = cmpptr
p.To.Type = obj.TYPE_CONST
p.To.Offset = 0
p1.As = x86.AJNE
* generate
* as $c, reg
*/
-func gconreg(as int, c int64, reg int) {
+func gconreg(as obj.As, c int64, reg int) {
var nr gc.Node
switch as {
* generate
* as $c, n
*/
-func ginscon(as int, c int64, n2 *gc.Node) {
+func ginscon(as obj.As, c int64, n2 *gc.Node) {
var n1 gc.Node
switch as {
return gc.Gbranch(optoas(op, t), nil, likely)
}
-func ginsboolval(a int, n *gc.Node) {
+func ginsboolval(a obj.As, n *gc.Node) {
gins(jmptoset(a), nil, n)
}
}
// cannot have two memory operands
- var a int
+ var a obj.As
if gc.Ismem(f) && gc.Ismem(t) {
goto hard
}
* generate one instruction:
* as f, t
*/
-func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
+func gins(as obj.As, f *gc.Node, t *gc.Node) *obj.Prog {
// Node nod;
// if(f != N && f->op == OINDEX) {
/*
* return Axxx for Oxxx on type t.
*/
-func optoas(op gc.Op, t *gc.Type) int {
+func optoas(op gc.Op, t *gc.Type) obj.As {
if t == nil {
gc.Fatalf("optoas: t is nil")
}
a := obj.AXXX
switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) {
default:
- gc.Fatalf("optoas: no entry %v-%v", gc.Oconv(int(op), 0), t)
+ gc.Fatalf("optoas: no entry %v-%v", gc.Oconv(op, 0), t)
case OADDR_ | gc.TPTR32:
a = x86.ALEAL
}
// jmptoset returns ASETxx for AJxx.
-func jmptoset(jmp int) int {
+func jmptoset(jmp obj.As) obj.As {
switch jmp {
case x86.AJEQ:
return x86.ASETEQ
case x86.AJPS:
return x86.ASETPS
}
- gc.Fatalf("jmptoset: no entry for %v", gc.Oconv(jmp, 0))
+ gc.Fatalf("jmptoset: no entry for %v", jmp)
panic("unreachable")
}
* after successful sudoaddable,
* to release the register used for a.
*/
-func sudoaddable(as int, n *gc.Node, a *obj.Addr) bool {
+func sudoaddable(as obj.As, n *gc.Node, a *obj.Addr) bool {
if n.Type == nil {
return false
}
}
a.Type = obj.TYPE_NONE
- a.Index = obj.TYPE_NONE
+ a.Index = x86.REG_NONE
gc.Fixlargeoffset(&n1)
gc.Naddr(a, &n1)
return true
}
func peep(firstp *obj.Prog) {
- g := (*gc.Graph)(gc.Flowstart(firstp, nil))
+ g := gc.Flowstart(firstp, nil)
if g == nil {
return
}
// another MOV $con,R without
// setting R in the interim
var p *obj.Prog
- for r := (*gc.Flow)(g.Start); r != nil; r = r.Link {
+ for r := g.Start; r != nil; r = r.Link {
p = r.Prog
switch p.As {
case x86.ALEAL,
// can be replaced by MOVAPD, which moves the pair of float64s
// instead of just the lower one. We only use the lower one, but
// the processor can do better if we do moves using both.
- for r := (*gc.Flow)(g.Start); r != nil; r = r.Link {
+ for r := g.Start; r != nil; r = r.Link {
p = r.Prog
if p.As == x86.AMOVLQZX {
if regtyp(&p.From) {
// load pipelining
// push any load from memory as early as possible
// to give it time to complete before use.
- for r := (*gc.Flow)(g.Start); r != nil; r = r.Link {
+ for r := g.Start; r != nil; r = r.Link {
p = r.Prog
switch p.As {
case x86.AMOVB,
var p *obj.Prog
var b *gc.Flow
- p0 := (*obj.Prog)(r0.Prog)
+ p0 := r0.Prog
for r = gc.Uniqp(r0); r != nil && gc.Uniqs(r) != nil; r = gc.Uniqp(r) {
p = r.Prog
if p.As != obj.ANOP {
if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
fmt.Printf("pushback\n")
- for r := (*gc.Flow)(b); ; r = r.Link {
+ for r := b; ; r = r.Link {
fmt.Printf("\t%v\n", r.Prog)
if r == r0 {
break
}
}
- t := obj.Prog(*r0.Prog)
+ t := *r0.Prog
for r = gc.Uniqp(r0); ; r = gc.Uniqp(r) {
p0 = r.Link.Prog
p = r.Prog
if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
fmt.Printf("\tafter\n")
- for r := (*gc.Flow)(b); ; r = r.Link {
+ for r := b; ; r = r.Link {
fmt.Printf("\t%v\n", r.Prog)
if r == r0 {
break
}
func excise(r *gc.Flow) {
- p := (*obj.Prog)(r.Prog)
+ p := r.Prog
if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
fmt.Printf("%v ===delete===\n", p)
}
func elimshortmov(g *gc.Graph) {
var p *obj.Prog
- for r := (*gc.Flow)(g.Start); r != nil; r = r.Link {
+ for r := g.Start; r != nil; r = r.Link {
p = r.Prog
if regtyp(&p.To) {
switch p.As {
// is reg guaranteed to be truncated by a previous L instruction?
func prevl(r0 *gc.Flow, reg int) bool {
- for r := (*gc.Flow)(gc.Uniqp(r0)); r != nil; r = gc.Uniqp(r) {
+ for r := gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
p := r.Prog
if p.To.Type == obj.TYPE_REG && int(p.To.Reg) == reg {
flags := progflags(p)
if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
fmt.Printf("subprop %v\n", r0.Prog)
}
- p := (*obj.Prog)(r0.Prog)
- v1 := (*obj.Addr)(&p.From)
+ p := r0.Prog
+ v1 := &p.From
if !regtyp(v1) {
if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
fmt.Printf("\tnot regtype %v; return 0\n", gc.Ctxt.Dconv(v1))
return false
}
- v2 := (*obj.Addr)(&p.To)
+ v2 := &p.To
if !regtyp(v2) {
if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
fmt.Printf("\tnot regtype %v; return 0\n", gc.Ctxt.Dconv(v2))
}
}
- t := int(int(v1.Reg))
- v1.Reg = v2.Reg
- v2.Reg = int16(t)
+ v1.Reg, v2.Reg = v2.Reg, v1.Reg
if gc.Debug['P'] != 0 {
fmt.Printf("%v last\n", r.Prog)
}
if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
fmt.Printf("copyprop %v\n", r0.Prog)
}
- p := (*obj.Prog)(r0.Prog)
- v1 := (*obj.Addr)(&p.From)
- v2 := (*obj.Addr)(&p.To)
+ p := r0.Prog
+ v1 := &p.From
+ v2 := &p.To
if copyas(v1, v2) {
return true
}
*/
func copysub(a *obj.Addr, v *obj.Addr, s *obj.Addr, f int) int {
if copyas(a, v) {
- reg := int(int(s.Reg))
- if reg >= x86.REG_AX && reg <= x86.REG_R15 || reg >= x86.REG_X0 && reg <= x86.REG_X0+15 {
+ if s.Reg >= x86.REG_AX && s.Reg <= x86.REG_R15 || s.Reg >= x86.REG_X0 && s.Reg <= x86.REG_X0+15 {
if f != 0 {
- a.Reg = int16(reg)
+ a.Reg = s.Reg
}
}
-
return 0
}
if regtyp(v) {
- reg := int(int(v.Reg))
- if a.Type == obj.TYPE_MEM && int(a.Reg) == reg {
+ if a.Type == obj.TYPE_MEM && a.Reg == v.Reg {
if (s.Reg == x86.REG_BP || s.Reg == x86.REG_R13) && a.Index != x86.REG_NONE {
return 1 /* can't use BP-base with index */
}
a.Reg = s.Reg
}
}
-
- // return 0;
- if int(a.Index) == reg {
+ if a.Index == v.Reg {
if f != 0 {
a.Index = s.Reg
}
return 0
}
-
return 0
}
-
return 0
}
var p *obj.Prog
var t int
- p0 := (*obj.Prog)(r0.Prog)
- v0 := (*obj.Addr)(&p0.To)
- r := (*gc.Flow)(r0)
+ p0 := r0.Prog
+ v0 := &p0.To
+ r := r0
loop:
r = gc.Uniqs(r)
// for example moving [4]byte must use 4 MOVB not 1 MOVW.
align := int(n.Type.Align)
- var op int
+ var op obj.As
switch align {
default:
gc.Fatalf("sgen: invalid alignment %d for %v", align, n.Type)
// if we are copying forward on the stack and
// the src and dst overlap, then reverse direction
dir := align
- if osrc < odst && int64(odst) < int64(osrc)+w {
+ if osrc < odst && odst < osrc+w {
dir = -dir
}
if res.Op != gc.OINDREG && res.Op != gc.ONAME {
gc.Dump("n", n)
gc.Dump("res", res)
- gc.Fatalf("cgen64 %v of %v", gc.Oconv(int(n.Op), 0), gc.Oconv(int(res.Op), 0))
+ gc.Fatalf("cgen64 %v of %v", gc.Oconv(n.Op, 0), gc.Oconv(res.Op, 0))
}
l := n.Left
split64(l, &lo1, &hi1)
switch n.Op {
default:
- gc.Fatalf("cgen64 %v", gc.Oconv(int(n.Op), 0))
+ gc.Fatalf("cgen64 %v", gc.Oconv(n.Op, 0))
case gc.OMINUS:
var lo2 gc.Node
var br *obj.Prog
switch op {
default:
- gc.Fatalf("cmp64 %v %v", gc.Oconv(int(op), 0), t)
+ gc.Fatalf("cmp64 %v %v", gc.Oconv(op, 0), t)
// cmp hi
// bne L
var MAXWIDTH int64 = (1 << 32) - 1
-/*
- * go declares several platform-specific type aliases:
- * int, uint, and uintptr
- */
-var typedefs = []gc.Typedef{
- {"int", gc.TINT, gc.TINT32},
- {"uint", gc.TUINT, gc.TUINT32},
- {"uintptr", gc.TUINTPTR, gc.TUINT32},
-}
-
func betypeinit() {
gc.Widthptr = 4
gc.Widthint = 4
gc.Thearch.Thechar = thechar
gc.Thearch.Thestring = thestring
gc.Thearch.Thelinkarch = thelinkarch
- gc.Thearch.Typedefs = typedefs
gc.Thearch.REGSP = arm.REGSP
gc.Thearch.REGCTXT = arm.REGCTXT
gc.Thearch.REGCALLX = arm.REG_R1
return p
}
-func appendpp(p *obj.Prog, as int, ftype int, freg int, foffset int32, ttype int, treg int, toffset int32) *obj.Prog {
+func appendpp(p *obj.Prog, as obj.As, ftype obj.AddrType, freg int, foffset int32, ttype obj.AddrType, treg int, toffset int32) *obj.Prog {
q := gc.Ctxt.NewProg()
gc.Clearp(q)
- q.As = int16(as)
+ q.As = as
q.Lineno = p.Lineno
- q.From.Type = int16(ftype)
+ q.From.Type = ftype
q.From.Reg = int16(freg)
q.From.Offset = int64(foffset)
- q.To.Type = int16(ttype)
+ q.To.Type = ttype
q.To.Reg = int16(treg)
q.To.Offset = int64(toffset)
q.Link = p.Link
}
t := nl.Type
- w := int(t.Width * 8)
+ w := t.Width * 8
var n1 gc.Node
gc.Regalloc(&n1, t, res)
gc.Cgen(nl, &n1)
continue
}
if gc.Debug_checknil != 0 && p.Lineno > 1 { // p->lineno==1 in generated wrappers
- gc.Warnl(int(p.Lineno), "generated nil check")
+ gc.Warnl(p.Lineno, "generated nil check")
}
if p.From.Type != obj.TYPE_REG {
gc.Fatalf("invalid nil check %v", p)
* generate
* as $c, n
*/
-func ginscon(as int, c int64, n *gc.Node) {
+func ginscon(as obj.As, c int64, n *gc.Node) {
var n1 gc.Node
gc.Nodconst(&n1, gc.Types[gc.TINT32], c)
var n2 gc.Node
// cannot have two memory operands;
// except 64-bit, which always copies via registers anyway.
- var a int
+ var a obj.As
var r1 gc.Node
if !gc.Is64(f.Type) && !gc.Is64(t.Type) && gc.Ismem(f) && gc.Ismem(t) {
goto hard
* generate one instruction:
* as f, t
*/
-func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
+func gins(as obj.As, f *gc.Node, t *gc.Node) *obj.Prog {
// Node nod;
// int32 v;
gc.Naddr(&a, n)
if a.Type != obj.TYPE_REG {
if n != nil {
- gc.Fatalf("bad in raddr: %v", gc.Oconv(int(n.Op), 0))
+ gc.Fatalf("bad in raddr: %v", gc.Oconv(n.Op, 0))
} else {
gc.Fatalf("bad in raddr: <null>")
}
/* generate a constant shift
* arm encodes a shift by 32 as 0, thus asking for 0 shift is illegal.
*/
-func gshift(as int, lhs *gc.Node, stype int32, sval int32, rhs *gc.Node) *obj.Prog {
+func gshift(as obj.As, lhs *gc.Node, stype int32, sval int32, rhs *gc.Node) *obj.Prog {
if sval <= 0 || sval > 32 {
gc.Fatalf("bad shift value: %d", sval)
}
/* generate a register shift
*/
-func gregshift(as int, lhs *gc.Node, stype int32, reg *gc.Node, rhs *gc.Node) *obj.Prog {
+func gregshift(as obj.As, lhs *gc.Node, stype int32, reg *gc.Node, rhs *gc.Node) *obj.Prog {
p := gins(as, nil, rhs)
p.From.Type = obj.TYPE_SHIFT
p.From.Offset = int64(stype) | (int64(reg.Reg)&15)<<8 | 1<<4 | int64(lhs.Reg)&15
/*
* return Axxx for Oxxx on type t.
*/
-func optoas(op gc.Op, t *gc.Type) int {
+func optoas(op gc.Op, t *gc.Type) obj.As {
if t == nil {
gc.Fatalf("optoas: t is nil")
}
a := obj.AXXX
switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) {
default:
- gc.Fatalf("optoas: no entry %v-%v etype %v simtype %v", gc.Oconv(int(op), 0), t, gc.Types[t.Etype], gc.Types[gc.Simtype[t.Etype]])
+ gc.Fatalf("optoas: no entry %v-%v etype %v simtype %v", gc.Oconv(op, 0), t, gc.Types[t.Etype], gc.Types[gc.Simtype[t.Etype]])
/* case CASE(OADDR, TPTR32):
a = ALEAL;
* after successful sudoaddable,
* to release the register used for a.
*/
-func sudoaddable(as int, n *gc.Node, a *obj.Addr) bool {
+func sudoaddable(as obj.As, n *gc.Node, a *obj.Addr) bool {
if n.Type == nil {
return false
}
// UNUSED
func peep(firstp *obj.Prog) {
- g := (*gc.Graph)(gc.Flowstart(firstp, nil))
+ g := gc.Flowstart(firstp, nil)
if g == nil {
return
}
goto loop1
}
- for r := (*gc.Flow)(g.Start); r != nil; r = r.Link {
+ for r := g.Start; r != nil; r = r.Link {
p = r.Prog
switch p.As {
/*
}
}
- for r := (*gc.Flow)(g.Start); r != nil; r = r.Link {
+ for r := g.Start; r != nil; r = r.Link {
p = r.Prog
switch p.As {
case arm.AMOVW,
* will be eliminated by copy propagation.
*/
func subprop(r0 *gc.Flow) bool {
- p := (*obj.Prog)(r0.Prog)
- v1 := (*obj.Addr)(&p.From)
+ p := r0.Prog
+ v1 := &p.From
if !regtyp(v1) {
return false
}
- v2 := (*obj.Addr)(&p.To)
+ v2 := &p.To
if !regtyp(v2) {
return false
}
}
}
- t := int(int(v1.Reg))
- v1.Reg = v2.Reg
- v2.Reg = int16(t)
+ v1.Reg, v2.Reg = v2.Reg, v1.Reg
if gc.Debug['P'] != 0 {
fmt.Printf("%v last\n", r.Prog)
}
* set v2 return success
*/
func copyprop(g *gc.Graph, r0 *gc.Flow) bool {
- p := (*obj.Prog)(r0.Prog)
- v1 := (*obj.Addr)(&p.From)
- v2 := (*obj.Addr)(&p.To)
+ p := r0.Prog
+ v1 := &p.From
+ v2 := &p.To
if copyas(v1, v2) {
return true
}
* MOVBS above can be a MOVBS, MOVBU, MOVHS or MOVHU.
*/
func shortprop(r *gc.Flow) bool {
- p := (*obj.Prog)(r.Prog)
- r1 := (*gc.Flow)(findpre(r, &p.From))
+ p := r.Prog
+ r1 := findpre(r, &p.From)
if r1 == nil {
return false
}
- p1 := (*obj.Prog)(r1.Prog)
+ p1 := r1.Prog
if p1.As == p.As {
// Two consecutive extensions.
goto gotit
}
if gc.Debug['P'] != 0 {
- fmt.Printf(" => %v\n", obj.Aconv(int(p.As)))
+ fmt.Printf(" => %v\n", obj.Aconv(p.As))
}
return true
}
* ..
*/
func shiftprop(r *gc.Flow) bool {
- p := (*obj.Prog)(r.Prog)
+ p := r.Prog
if p.To.Type != obj.TYPE_REG {
if gc.Debug['P'] != 0 {
fmt.Printf("\tBOTCH: result not reg; FAILURE\n")
return false
}
- n := int(int(p.To.Reg))
- a := obj.Addr(obj.Addr{})
+ n := p.To.Reg
+ var a obj.Addr
if p.Reg != 0 && p.Reg != p.To.Reg {
a.Type = obj.TYPE_REG
a.Reg = p.Reg
if gc.Debug['P'] != 0 {
fmt.Printf("shiftprop\n%v", p)
}
- r1 := (*gc.Flow)(r)
+ r1 := r
var p1 *obj.Prog
for {
/* find first use of shift result; abort if shift operands or result are changed */
arm.ASBC,
arm.ARSB,
arm.ARSC:
- if int(p1.Reg) == n || (p1.Reg == 0 && p1.To.Type == obj.TYPE_REG && int(p1.To.Reg) == n) {
+ if p1.Reg == n || (p1.Reg == 0 && p1.To.Type == obj.TYPE_REG && p1.To.Reg == n) {
if p1.From.Type != obj.TYPE_REG {
if gc.Debug['P'] != 0 {
fmt.Printf("\tcan't swap; FAILURE\n")
}
p1.Reg = p1.From.Reg
- p1.From.Reg = int16(n)
+ p1.From.Reg = n
switch p1.As {
case arm.ASUB:
p1.As = arm.ARSB
arm.ATST,
arm.ACMP,
arm.ACMN:
- if int(p1.Reg) == n {
+ if p1.Reg == n {
if gc.Debug['P'] != 0 {
fmt.Printf("\tcan't swap; FAILURE\n")
}
return false
}
- if p1.Reg == 0 && int(p1.To.Reg) == n {
+ if p1.Reg == 0 && p1.To.Reg == n {
if gc.Debug['P'] != 0 {
fmt.Printf("\tshift result used twice; FAILURE\n")
}
return false
}
- if p1.From.Type != obj.TYPE_REG || int(p1.From.Reg) != n {
+ if p1.From.Type != obj.TYPE_REG || p1.From.Reg != n {
if gc.Debug['P'] != 0 {
fmt.Printf("\tBOTCH: where is it used?; FAILURE\n")
}
}
/* check whether shift result is used subsequently */
- p2 := (*obj.Prog)(p1)
+ p2 := p1
- if int(p1.To.Reg) != n {
+ if p1.To.Reg != n {
var p1 *obj.Prog
for {
r1 = gc.Uniqs(r1)
/* make the substitution */
p2.From.Reg = 0
-
- o := int(int(p.Reg))
+ o := p.Reg
if o == 0 {
- o = int(p.To.Reg)
+ o = p.To.Reg
}
o &= 15
switch p.From.Type {
case obj.TYPE_CONST:
- o |= int((p.From.Offset & 0x1f) << 7)
+ o |= int16(p.From.Offset&0x1f) << 7
case obj.TYPE_REG:
- o |= 1<<4 | (int(p.From.Reg)&15)<<8
+ o |= 1<<4 | (p.From.Reg&15)<<8
}
switch p.As {
}
func finduse(g *gc.Graph, r *gc.Flow, v *obj.Addr) bool {
- for r1 := (*gc.Flow)(g.Start); r1 != nil; r1 = r1.Link {
+ for r1 := g.Start; r1 != nil; r1 = r1.Link {
r1.Active = 0
}
return findu1(r, v)
* MOVBU R0<<0(R1),R0
*/
func xtramodes(g *gc.Graph, r *gc.Flow, a *obj.Addr) bool {
- p := (*obj.Prog)(r.Prog)
- v := obj.Addr(*a)
+ p := r.Prog
+ v := *a
v.Type = obj.TYPE_REG
- r1 := (*gc.Flow)(findpre(r, &v))
+ r1 := findpre(r, &v)
if r1 != nil {
p1 := r1.Prog
if p1.To.Type == obj.TYPE_REG && p1.To.Reg == v.Reg {
case arm.AMOVW:
if p1.From.Type == obj.TYPE_REG {
- r2 := (*gc.Flow)(findinc(r1, r, &p1.From))
+ r2 := findinc(r1, r, &p1.From)
if r2 != nil {
var r3 *gc.Flow
for r3 = gc.Uniqs(r2); r3.Prog.As == obj.ANOP; r3 = gc.Uniqs(r3) {
}
if a != &p.From || a.Reg != p.To.Reg {
- r1 := (*gc.Flow)(findinc(r, nil, &v))
+ r1 := findinc(r, nil, &v)
if r1 != nil {
/* post-indexing */
p1 := r1.Prog
func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
switch p.As {
default:
- fmt.Printf("copyu: can't find %v\n", obj.Aconv(int(p.As)))
+ fmt.Printf("copyu: can't find %v\n", obj.Aconv(p.As))
return 2
case arm.AMOVM:
}
var predinfo = []struct {
- opcode int
- notopcode int
+ opcode obj.As
+ notopcode obj.As
scond int
notscond int
}{
pred = predinfo[rstart.Prog.As-arm.ABEQ].notscond
}
- for r := (*gc.Flow)(j.start); ; r = successor(r) {
+ for r := j.start; ; r = successor(r) {
if r.Prog.As == arm.AB {
if r != j.last || branch == Delbranch {
excise(r)
} else {
if cond == Truecond {
- r.Prog.As = int16(predinfo[rstart.Prog.As-arm.ABEQ].opcode)
+ r.Prog.As = predinfo[rstart.Prog.As-arm.ABEQ].opcode
} else {
- r.Prog.As = int16(predinfo[rstart.Prog.As-arm.ABEQ].notopcode)
+ r.Prog.As = predinfo[rstart.Prog.As-arm.ABEQ].notopcode
}
}
} else if predicable(r.Prog) {
var j1 Joininfo
var j2 Joininfo
- for r := (*gc.Flow)(g.Start); r != nil; r = r.Link {
+ for r := g.Start; r != nil; r = r.Link {
if isbranch(r.Prog) {
t1 = joinsplit(r.S1, &j1)
t2 = joinsplit(r.S2, &j2)
}
func excise(r *gc.Flow) {
- p := (*obj.Prog)(r.Prog)
+ p := r.Prog
obj.Nopout(p)
}
// for example moving [4]byte must use 4 MOVB not 1 MOVW.
align := int(n.Type.Align)
- var op int
+ var op obj.As
switch align {
default:
gc.Fatalf("sgen: invalid alignment %d for %v", align, n.Type)
// the src and dst overlap, then reverse direction
dir := align
- if osrc < odst && int64(odst) < int64(osrc)+w {
+ if osrc < odst && odst < osrc+w {
dir = -dir
}
var MAXWIDTH int64 = 1 << 50
-/*
- * go declares several platform-specific type aliases:
- * int, uint, and uintptr
- */
-var typedefs = []gc.Typedef{
- {"int", gc.TINT, gc.TINT64},
- {"uint", gc.TUINT, gc.TUINT64},
- {"uintptr", gc.TUINTPTR, gc.TUINT64},
-}
-
func betypeinit() {
gc.Widthptr = 8
gc.Widthint = 8
gc.Thearch.Thechar = thechar
gc.Thearch.Thestring = thestring
gc.Thearch.Thelinkarch = thelinkarch
- gc.Thearch.Typedefs = typedefs
gc.Thearch.REGSP = arm64.REGSP
gc.Thearch.REGCTXT = arm64.REGCTXT
gc.Thearch.REGCALLX = arm64.REGRT1
return p
}
-func appendpp(p *obj.Prog, as int, ftype int, freg int, foffset int64, ttype int, treg int, toffset int64) *obj.Prog {
+func appendpp(p *obj.Prog, as obj.As, ftype obj.AddrType, freg int, foffset int64, ttype obj.AddrType, treg int, toffset int64) *obj.Prog {
q := gc.Ctxt.NewProg()
gc.Clearp(q)
- q.As = int16(as)
+ q.As = as
q.Lineno = p.Lineno
- q.From.Type = int16(ftype)
+ q.From.Type = ftype
q.From.Reg = int16(freg)
q.From.Offset = foffset
- q.To.Type = int16(ttype)
+ q.To.Type = ttype
q.To.Reg = int16(treg)
q.To.Offset = toffset
q.Link = p.Link
nl, nr = nr, nl
}
- t := (*gc.Type)(nl.Type)
- w := int(int(t.Width * 8))
+ t := nl.Type
+ w := t.Width * 8
var n1 gc.Node
gc.Cgenr(nl, &n1, res)
var n2 gc.Node
gc.TINT16,
gc.TINT32:
gins(optoas(gc.OMUL, t), &n2, &n1)
- p := (*obj.Prog)(gins(arm64.AASR, nil, &n1))
+ p := gins(arm64.AASR, nil, &n1)
p.From.Type = obj.TYPE_CONST
- p.From.Offset = int64(w)
+ p.From.Offset = w
case gc.TUINT8,
gc.TUINT16,
gc.TUINT32:
gins(optoas(gc.OMUL, t), &n2, &n1)
- p := (*obj.Prog)(gins(arm64.ALSR, nil, &n1))
+ p := gins(arm64.ALSR, nil, &n1)
p.From.Type = obj.TYPE_CONST
- p.From.Offset = int64(w)
+ p.From.Offset = w
case gc.TINT64,
gc.TUINT64:
* res = nl >> nr
*/
func cgen_shift(op gc.Op, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
- a := int(optoas(op, nl.Type))
+ a := optoas(op, nl.Type)
if nr.Op == gc.OLITERAL {
var n1 gc.Node
gc.Regalloc(&n1, nl.Type, res)
gc.Cgen(nl, &n1)
sc := uint64(nr.Int())
- if sc >= uint64(nl.Type.Width*8) {
+ if sc >= uint64(nl.Type.Width)*8 {
// large shift gets 2 shifts by width-1
var n3 gc.Node
gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1)
if !bounded {
gc.Nodconst(&n3, tcount, nl.Type.Width*8)
gcmp(optoas(gc.OCMP, tcount), &n1, &n3)
- p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, tcount), nil, +1))
+ p1 := gc.Gbranch(optoas(gc.OLT, tcount), nil, +1)
if op == gc.ORSH && gc.Issigned[nl.Type.Etype] {
gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1)
gins(a, &n3, &n2)
fmt.Printf("clearfat %v (%v, size: %d)\n", nl, nl.Type, nl.Type.Width)
}
- w := uint64(uint64(nl.Type.Width))
+ w := uint64(nl.Type.Width)
// Avoid taking the address for simple enough types.
if gc.Componentgen(nil, nl) {
return
}
- c := uint64(w % 8) // bytes
- q := uint64(w / 8) // dwords
+ c := w % 8 // bytes
+ q := w / 8 // dwords
var r0 gc.Node
gc.Nodreg(&r0, gc.Types[gc.TUINT64], arm64.REGZERO)
p.To.Type = obj.TYPE_MEM
p.To.Offset = 8
p.Scond = arm64.C_XPRE
- pl := (*obj.Prog)(p)
+ pl := p
p = gcmp(arm64.ACMP, &dst, &end)
gc.Patch(gc.Gbranch(arm64.ABNE, nil, 0), pl)
p := gins(arm64.ASUB, nil, &dst)
p.From.Type = obj.TYPE_CONST
p.From.Offset = 8
- f := (*gc.Node)(gc.Sysfunc("duffzero"))
+ f := gc.Sysfunc("duffzero")
p = gins(obj.ADUFFZERO, nil, f)
gc.Afunclit(&p.To, f)
func expandchecks(firstp *obj.Prog) {
var p1 *obj.Prog
- for p := (*obj.Prog)(firstp); p != nil; p = p.Link {
+ for p := firstp; p != nil; p = p.Link {
if gc.Debug_checknil != 0 && gc.Ctxt.Debugvlog != 0 {
fmt.Printf("expandchecks: %v\n", p)
}
continue
}
if gc.Debug_checknil != 0 && p.Lineno > 1 { // p->lineno==1 in generated wrappers
- gc.Warnl(int(p.Lineno), "generated nil check")
+ gc.Warnl(p.Lineno, "generated nil check")
}
if p.From.Type != obj.TYPE_REG {
gc.Fatalf("invalid nil check %v\n", p)
* generate
* as $c, n
*/
-func ginscon(as int, c int64, n2 *gc.Node) {
+func ginscon(as obj.As, c int64, n2 *gc.Node) {
var n1 gc.Node
gc.Nodconst(&n1, gc.Types[gc.TINT64], c)
* generate
* as n, $c (CMP)
*/
-func ginscon2(as int, n2 *gc.Node, c int64) {
+func ginscon2(as obj.As, n2 *gc.Node, c int64) {
var n1 gc.Node
gc.Nodconst(&n1, gc.Types[gc.TINT64], c)
ft := int(gc.Simsimtype(f.Type))
tt := int(gc.Simsimtype(t.Type))
- cvt := (*gc.Type)(t.Type)
+ cvt := t.Type
if gc.Iscomplex[ft] || gc.Iscomplex[tt] {
gc.Complexmove(f, t)
// cannot have two memory operands
var r1 gc.Node
- var a int
+ var a obj.As
if gc.Ismem(f) && gc.Ismem(t) {
goto hard
}
// gins is called by the front end.
// It synthesizes some multiple-instruction sequences
// so the front end can stay simpler.
-func gins(as int, f, t *gc.Node) *obj.Prog {
+func gins(as obj.As, f, t *gc.Node) *obj.Prog {
if as >= obj.A_ARCHSPECIFIC {
if x, ok := f.IntLiteral(); ok {
ginscon(as, x, t)
* generate one instruction:
* as f, t
*/
-func rawgins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
+func rawgins(as obj.As, f *gc.Node, t *gc.Node) *obj.Prog {
// TODO(austin): Add self-move test like in 6g (but be careful
// of truncation moves)
gc.Naddr(&a, n)
if a.Type != obj.TYPE_REG {
if n != nil {
- gc.Fatalf("bad in raddr: %v", gc.Oconv(int(n.Op), 0))
+ gc.Fatalf("bad in raddr: %v", gc.Oconv(n.Op, 0))
} else {
gc.Fatalf("bad in raddr: <null>")
}
}
}
-func gcmp(as int, lhs *gc.Node, rhs *gc.Node) *obj.Prog {
+func gcmp(as obj.As, lhs *gc.Node, rhs *gc.Node) *obj.Prog {
if lhs.Op != gc.OREGISTER {
- gc.Fatalf("bad operands to gcmp: %v %v", gc.Oconv(int(lhs.Op), 0), gc.Oconv(int(rhs.Op), 0))
+ gc.Fatalf("bad operands to gcmp: %v %v", gc.Oconv(lhs.Op, 0), gc.Oconv(rhs.Op, 0))
}
p := rawgins(as, rhs, nil)
/*
* return Axxx for Oxxx on type t.
*/
-func optoas(op gc.Op, t *gc.Type) int {
+func optoas(op gc.Op, t *gc.Type) obj.As {
if t == nil {
gc.Fatalf("optoas: t is nil")
}
OSQRT_ = uint32(gc.OSQRT) << 16
)
- a := int(obj.AXXX)
+ a := obj.AXXX
switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) {
default:
- gc.Fatalf("optoas: no entry for op=%v type=%v", gc.Oconv(int(op), 0), t)
+ gc.Fatalf("optoas: no entry for op=%v type=%v", gc.Oconv(op, 0), t)
case OEQ_ | gc.TBOOL,
OEQ_ | gc.TINT8,
* after successful sudoaddable,
* to release the register used for a.
*/
-func sudoaddable(as int, n *gc.Node, a *obj.Addr) bool {
+func sudoaddable(as obj.As, n *gc.Node, a *obj.Addr) bool {
// TODO(minux)
*a = obj.Addr{}
var gactive uint32
func peep(firstp *obj.Prog) {
- g := (*gc.Graph)(gc.Flowstart(firstp, nil))
+ g := gc.Flowstart(firstp, nil)
if g == nil {
return
}
*/
var p1 *obj.Prog
var r1 *gc.Flow
- for r := (*gc.Flow)(g.Start); r != nil; r = r.Link {
+ for r := g.Start; r != nil; r = r.Link {
p = r.Prog
switch p.As {
default:
}
// MOVD $c, R'; ADD R', R (R' unused) -> ADD $c, R
- for r := (*gc.Flow)(g.Start); r != nil; r = r.Link {
+ for r := g.Start; r != nil; r = r.Link {
p = r.Prog
switch p.As {
default:
continue
}
if gc.Debug['P'] != 0 {
- fmt.Printf("encoding $%d directly into %v in:\n%v\n%v\n", p.From.Offset, obj.Aconv(int(p1.As)), p, p1)
+ fmt.Printf("encoding $%d directly into %v in:\n%v\n%v\n", p.From.Offset, obj.Aconv(p1.As), p, p1)
}
p1.From.Type = obj.TYPE_CONST
p1.From = p.From
}
func excise(r *gc.Flow) {
- p := (*obj.Prog)(r.Prog)
+ p := r.Prog
if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
fmt.Printf("%v ===delete===\n", p)
}
* above sequences. This returns 1 if it modified any instructions.
*/
func subprop(r0 *gc.Flow) bool {
- p := (*obj.Prog)(r0.Prog)
- v1 := (*obj.Addr)(&p.From)
+ p := r0.Prog
+ v1 := &p.From
if !regtyp(v1) {
return false
}
- v2 := (*obj.Addr)(&p.To)
+ v2 := &p.To
if !regtyp(v2) {
return false
}
}
}
- t := int(int(v1.Reg))
- v1.Reg = v2.Reg
- v2.Reg = int16(t)
+ v1.Reg, v2.Reg = v2.Reg, v1.Reg
if gc.Debug['P'] != 0 {
fmt.Printf("%v last\n", r.Prog)
}
* set v2 return success (caller can remove v1->v2 move)
*/
func copyprop(r0 *gc.Flow) bool {
- p := (*obj.Prog)(r0.Prog)
- v1 := (*obj.Addr)(&p.From)
- v2 := (*obj.Addr)(&p.To)
+ p := r0.Prog
+ v1 := &p.From
+ v2 := &p.To
if copyas(v1, v2) {
if gc.Debug['P'] != 0 {
fmt.Printf("eliminating self-move: %v\n", r0.Prog)
switch p.As {
default:
- fmt.Printf("copyu: can't find %v\n", obj.Aconv(int(p.As)))
+ fmt.Printf("copyu: can't find %v\n", obj.Aconv(p.As))
return 2
case obj.ANOP, /* read p->from, write p->to */
func excludedregs() uint64 {
// Exclude registers with fixed functions
- regbits := uint64(RtoB(arm64.REGRT1) | RtoB(arm64.REGRT2) | RtoB(arm64.REGPR))
+ regbits := RtoB(arm64.REGRT1) | RtoB(arm64.REGRT2) | RtoB(arm64.REGPR)
// Exclude R26 - R31.
for r := arm64.REGMAX + 1; r <= arm64.REGZERO; r++ {
emax = bias // 127 largest unbiased exponent (normal)
)
- // Float mantissa m is 0.5 <= m < 1.0; compute exponent for floatxx mantissa.
+ // Float mantissa m is 0.5 <= m < 1.0; compute exponent for float32 mantissa.
e := x.exp - 1 // exponent for mantissa m with 1.0 <= m < 2.0
p := mbits + 1 // precision of normal float
// If the exponent is too small, we may have a denormal number
- // in which case we have fewer mantissa bits available: reduce
- // precision accordingly.
+ // in which case we have fewer mantissa bits available: recompute
+ // precision.
if e < emin {
- p -= emin - int(e)
+ p = mbits + 1 - emin + int(e)
// Make sure we have at least 1 bit so that we don't
// lose numbers rounded up to the smallest denormal.
if p < 1 {
return 0.0, Below
}
// bexp = 0
- mant = msb32(r.mant) >> (fbits - r.prec)
+ // recompute precision
+ p = mbits + 1 - emin + int(e)
+ mant = msb32(r.mant) >> uint(fbits-p)
} else {
// normal number: emin <= e <= emax
bexp = uint32(e+bias) << mbits
emax = bias // 1023 largest unbiased exponent (normal)
)
- // Float mantissa m is 0.5 <= m < 1.0; compute exponent for floatxx mantissa.
+ // Float mantissa m is 0.5 <= m < 1.0; compute exponent for float64 mantissa.
e := x.exp - 1 // exponent for mantissa m with 1.0 <= m < 2.0
p := mbits + 1 // precision of normal float
// If the exponent is too small, we may have a denormal number
- // in which case we have fewer mantissa bits available: reduce
- // precision accordingly.
+ // in which case we have fewer mantissa bits available: recompute
+ // precision.
if e < emin {
- p -= emin - int(e)
+ p = mbits + 1 - emin + int(e)
// Make sure we have at least 1 bit so that we don't
// lose numbers rounded up to the smallest denormal.
if p < 1 {
return 0.0, Below
}
// bexp = 0
- mant = msb64(r.mant) >> (fbits - r.prec)
+ // recompute precision
+ p = mbits + 1 - emin + int(e)
+ mant = msb64(r.mant) >> uint(fbits-p)
} else {
// normal number: emin <= e <= emax
bexp = uint64(e+bias) << mbits
}
if x.form == finite && y.form == finite {
- // x + y (commom case)
+ // x + y (common case)
z.neg = x.neg
if x.neg == y.neg {
// x + y == x + y
{"1p-149", math.SmallestNonzeroFloat32, Exact},
{"0x.fffffep-126", math.Float32frombits(0x7fffff), Exact}, // largest denormal
+ // special cases (see issue 14553)
+ {"0x0.bp-149", math.Float32frombits(0x000000000), Below}, // ToNearestEven rounds down (to even)
+ {"0x0.cp-149", math.Float32frombits(0x000000001), Above},
+
+ {"0x1.0p-149", math.Float32frombits(0x000000001), Exact},
+ {"0x1.7p-149", math.Float32frombits(0x000000001), Below},
+ {"0x1.8p-149", math.Float32frombits(0x000000002), Above},
+ {"0x1.9p-149", math.Float32frombits(0x000000002), Above},
+
+ {"0x2.0p-149", math.Float32frombits(0x000000002), Exact},
+ {"0x2.8p-149", math.Float32frombits(0x000000002), Below}, // ToNearestEven rounds down (to even)
+ {"0x2.9p-149", math.Float32frombits(0x000000003), Above},
+
+ {"0x3.0p-149", math.Float32frombits(0x000000003), Exact},
+ {"0x3.7p-149", math.Float32frombits(0x000000003), Below},
+ {"0x3.8p-149", math.Float32frombits(0x000000004), Above}, // ToNearestEven rounds up (to even)
+
+ {"0x4.0p-149", math.Float32frombits(0x000000004), Exact},
+ {"0x4.8p-149", math.Float32frombits(0x000000004), Below}, // ToNearestEven rounds down (to even)
+ {"0x4.9p-149", math.Float32frombits(0x000000005), Above},
+
+ // specific case from issue 14553
+ {"0x7.7p-149", math.Float32frombits(0x000000007), Below},
+ {"0x7.8p-149", math.Float32frombits(0x000000008), Above},
+ {"0x7.9p-149", math.Float32frombits(0x000000008), Above},
+
// normals
{"0x.ffffffp-126", math.Float32frombits(0x00800000), Above}, // rounded up to smallest normal
{"1p-126", math.Float32frombits(0x00800000), Exact}, // smallest normal
{"1p-1074", math.SmallestNonzeroFloat64, Exact},
{"0x.fffffffffffffp-1022", math.Float64frombits(0x000fffffffffffff), Exact}, // largest denormal
+ // special cases (see issue 14553)
+ {"0x0.bp-1074", math.Float64frombits(0x00000000000000000), Below}, // ToNearestEven rounds down (to even)
+ {"0x0.cp-1074", math.Float64frombits(0x00000000000000001), Above},
+
+ {"0x1.0p-1074", math.Float64frombits(0x00000000000000001), Exact},
+ {"0x1.7p-1074", math.Float64frombits(0x00000000000000001), Below},
+ {"0x1.8p-1074", math.Float64frombits(0x00000000000000002), Above},
+ {"0x1.9p-1074", math.Float64frombits(0x00000000000000002), Above},
+
+ {"0x2.0p-1074", math.Float64frombits(0x00000000000000002), Exact},
+ {"0x2.8p-1074", math.Float64frombits(0x00000000000000002), Below}, // ToNearestEven rounds down (to even)
+ {"0x2.9p-1074", math.Float64frombits(0x00000000000000003), Above},
+
+ {"0x3.0p-1074", math.Float64frombits(0x00000000000000003), Exact},
+ {"0x3.7p-1074", math.Float64frombits(0x00000000000000003), Below},
+ {"0x3.8p-1074", math.Float64frombits(0x00000000000000004), Above}, // ToNearestEven rounds up (to even)
+
+ {"0x4.0p-1074", math.Float64frombits(0x00000000000000004), Exact},
+ {"0x4.8p-1074", math.Float64frombits(0x00000000000000004), Below}, // ToNearestEven rounds down (to even)
+ {"0x4.9p-1074", math.Float64frombits(0x00000000000000005), Above},
+
// normals
{"0x.fffffffffffff8p-1022", math.Float64frombits(0x0010000000000000), Above}, // rounded up to smallest normal
{"1p-1022", math.Float64frombits(0x0010000000000000), Exact}, // smallest normal
if fcount < 0 {
// The mantissa has a "decimal" point ddd.dddd; and
// -fcount is the number of digits to the right of '.'.
- // Adjust relevant exponent accodingly.
+ // Adjust relevant exponent accordingly.
d := int64(fcount)
switch b {
case 10:
}
switch t.Etype {
- // will be defined later.
case TANY, TFORW:
+ // will be defined later.
*bad = t
-
return -1
- case TINT8,
- TUINT8,
- TINT16,
- TUINT16,
- TINT32,
- TUINT32,
- TINT64,
- TUINT64,
- TINT,
- TUINT,
- TUINTPTR,
- TBOOL,
- TPTR32,
- TPTR64,
- TCHAN,
- TUNSAFEPTR:
+ case TINT8, TUINT8, TINT16, TUINT16,
+ TINT32, TUINT32, TINT64, TUINT64,
+ TINT, TUINT, TUINTPTR,
+ TBOOL, TPTR32, TPTR64,
+ TCHAN, TUNSAFEPTR:
return AMEM
case TFUNC, TMAP:
}
a := algtype1(t.Type, bad)
- if a == ANOEQ || a == AMEM {
- if a == ANOEQ && bad != nil {
+ switch a {
+ case AMEM:
+ return AMEM
+ case ANOEQ:
+ if bad != nil {
*bad = t
}
- return a
+ return ANOEQ
}
switch t.Bound {
}
ret := AMEM
- var a int
- for t1 := t.Type; t1 != nil; t1 = t1.Down {
+ for f := t.Type; f != nil; f = f.Down {
// All fields must be comparable.
- a = algtype1(t1.Type, bad)
-
+ a := algtype1(f.Type, bad)
if a == ANOEQ {
return ANOEQ
}
// Blank fields, padded fields, fields with non-memory
// equality need special compare.
- if a != AMEM || isblanksym(t1.Sym) || ispaddedfield(t1, t.Width) {
+ if a != AMEM || isblanksym(f.Sym) || ispaddedfield(t, f) {
ret = -1
- continue
}
}
fn.Func.Nname.Name.Param.Ntype = tfn
n := Nod(ODCLFIELD, newname(Lookup("p")), typenod(Ptrto(t)))
- tfn.List = list(tfn.List, n)
+ tfn.List.Append(n)
np := n.Left
n = Nod(ODCLFIELD, newname(Lookup("h")), typenod(Types[TUINTPTR]))
- tfn.List = list(tfn.List, n)
+ tfn.List.Append(n)
nh := n.Left
n = Nod(ODCLFIELD, nil, typenod(Types[TUINTPTR])) // return value
- tfn.Rlist = list(tfn.Rlist, n)
+ tfn.Rlist.Append(n)
funchdr(fn)
typecheck(&fn.Func.Nname.Name.Param.Ntype, Etype)
n := Nod(ORANGE, nil, Nod(OIND, np, nil))
ni := newname(Lookup("i"))
ni.Type = Types[TINT]
- n.List = list1(ni)
+ n.List.Set([]*Node{ni})
n.Colas = true
colasdefn(n.List, n)
- ni = n.List.N
+ ni = n.List.First()
// h = hashel(&p[i], h)
call := Nod(OCALL, hashel, nil)
nx.Bounded = true
na := Nod(OADDR, nx, nil)
na.Etype = 1 // no escape to heap
- call.List = list(call.List, na)
- call.List = list(call.List, nh)
+ call.List.Append(na)
+ call.List.Append(nh)
n.Nbody.Append(Nod(OAS, nh, call))
fn.Nbody.Append(n)
- // Walk the struct using memhash for runs of AMEM
- // and calling specific hash functions for the others.
case TSTRUCT:
- var call *Node
- var nx *Node
- var na *Node
- var hashel *Node
-
- t1 := t.Type
- for {
- first, size, next := memrun(t, t1)
- t1 = next
-
- // Run memhash for fields up to this one.
- if first != nil {
- hashel = hashmem(first.Type)
-
- // h = hashel(&p.first, size, h)
- call = Nod(OCALL, hashel, nil)
+ // Walk the struct using memhash for runs of AMEM
+ // and calling specific hash functions for the others.
+ for f := t.Type; f != nil; {
+ // Skip blank fields.
+ if isblanksym(f.Sym) {
+ f = f.Down
+ continue
+ }
- nx = Nod(OXDOT, np, newname(first.Sym)) // TODO: fields from other packages?
- na = Nod(OADDR, nx, nil)
+ // Hash non-memory fields with appropriate hash function.
+ if algtype1(f.Type, nil) != AMEM {
+ hashel := hashfor(f.Type)
+ call := Nod(OCALL, hashel, nil)
+ nx := Nod(OXDOT, np, newname(f.Sym)) // TODO: fields from other packages?
+ na := Nod(OADDR, nx, nil)
na.Etype = 1 // no escape to heap
- call.List = list(call.List, na)
- call.List = list(call.List, nh)
- call.List = list(call.List, Nodintconst(size))
+ call.List.Append(na)
+ call.List.Append(nh)
fn.Nbody.Append(Nod(OAS, nh, call))
- }
-
- if t1 == nil {
- break
- }
- if isblanksym(t1.Sym) {
- t1 = t1.Down
- continue
- }
- if algtype1(t1.Type, nil) == AMEM {
- // Our memory run might have been stopped by padding or a blank field.
- // If the next field is memory-ish, it could be the start of a new run.
+ f = f.Down
continue
}
- hashel = hashfor(t1.Type)
- call = Nod(OCALL, hashel, nil)
- nx = Nod(OXDOT, np, newname(t1.Sym)) // TODO: fields from other packages?
- na = Nod(OADDR, nx, nil)
+ // Otherwise, hash a maximal length run of raw memory.
+ size, next := memrun(t, f)
+
+ // h = hashel(&p.first, size, h)
+ hashel := hashmem(f.Type)
+ call := Nod(OCALL, hashel, nil)
+ nx := Nod(OXDOT, np, newname(f.Sym)) // TODO: fields from other packages?
+ na := Nod(OADDR, nx, nil)
na.Etype = 1 // no escape to heap
- call.List = list(call.List, na)
- call.List = list(call.List, nh)
+ call.List.Append(na)
+ call.List.Append(nh)
+ call.List.Append(Nodintconst(size))
fn.Nbody.Append(Nod(OAS, nh, call))
- t1 = t1.Down
+ f = next
}
}
r := Nod(ORETURN, nil, nil)
- r.List = list(r.List, nh)
+ r.List.Append(nh)
fn.Nbody.Append(r)
if Debug['r'] != 0 {
- dumpslice("genhash body", fn.Nbody.Slice())
+ dumplist("genhash body", fn.Nbody)
}
funcbody(fn)
Curfn = fn
fn.Func.Dupok = true
typecheck(&fn, Etop)
- typecheckslice(fn.Nbody.Slice(), Etop)
+ typechecklist(fn.Nbody.Slice(), Etop)
Curfn = nil
// Disable safemode while compiling this code: the code we
func hashfor(t *Type) *Node {
var sym *Sym
- a := algtype1(t, nil)
- switch a {
+ switch algtype1(t, nil) {
case AMEM:
Fatalf("hashfor with AMEM type")
-
case AINTER:
sym = Pkglookup("interhash", Runtimepkg)
-
case ANILINTER:
sym = Pkglookup("nilinterhash", Runtimepkg)
-
case ASTRING:
sym = Pkglookup("strhash", Runtimepkg)
-
case AFLOAT32:
sym = Pkglookup("f32hash", Runtimepkg)
-
case AFLOAT64:
sym = Pkglookup("f64hash", Runtimepkg)
-
case ACPLX64:
sym = Pkglookup("c64hash", Runtimepkg)
-
case ACPLX128:
sym = Pkglookup("c128hash", Runtimepkg)
-
default:
sym = typesymprefix(".hash", t)
}
n := newname(sym)
n.Class = PFUNC
tfn := Nod(OTFUNC, nil, nil)
- tfn.List = list(tfn.List, Nod(ODCLFIELD, nil, typenod(Ptrto(t))))
- tfn.List = list(tfn.List, Nod(ODCLFIELD, nil, typenod(Types[TUINTPTR])))
- tfn.Rlist = list(tfn.Rlist, Nod(ODCLFIELD, nil, typenod(Types[TUINTPTR])))
+ tfn.List.Append(Nod(ODCLFIELD, nil, typenod(Ptrto(t))))
+ tfn.List.Append(Nod(ODCLFIELD, nil, typenod(Types[TUINTPTR])))
+ tfn.Rlist.Append(Nod(ODCLFIELD, nil, typenod(Types[TUINTPTR])))
typecheck(&tfn, Etype)
n.Type = tfn.Type
return n
fn.Func.Nname.Name.Param.Ntype = tfn
n := Nod(ODCLFIELD, newname(Lookup("p")), typenod(Ptrto(t)))
- tfn.List = list(tfn.List, n)
+ tfn.List.Append(n)
np := n.Left
n = Nod(ODCLFIELD, newname(Lookup("q")), typenod(Ptrto(t)))
- tfn.List = list(tfn.List, n)
+ tfn.List.Append(n)
nq := n.Left
n = Nod(ODCLFIELD, nil, typenod(Types[TBOOL]))
- tfn.Rlist = list(tfn.Rlist, n)
+ tfn.Rlist.Append(n)
funchdr(fn)
ni := newname(Lookup("i"))
ni.Type = Types[TINT]
- nrange.List = list1(ni)
+ nrange.List.Set([]*Node{ni})
nrange.Colas = true
colasdefn(nrange.List, nrange)
- ni = nrange.List.N
+ ni = nrange.List.First()
// if p[i] != q[i] { return false }
nx := Nod(OINDEX, np, ni)
nif := Nod(OIF, nil, nil)
nif.Left = Nod(ONE, nx, ny)
r := Nod(ORETURN, nil, nil)
- r.List = list(r.List, Nodbool(false))
+ r.List.Append(Nodbool(false))
nif.Nbody.Append(r)
nrange.Nbody.Append(nif)
fn.Nbody.Append(nrange)
// return true
ret := Nod(ORETURN, nil, nil)
- ret.List = list(ret.List, Nodbool(true))
+ ret.List.Append(Nodbool(true))
fn.Nbody.Append(ret)
- // Walk the struct using memequal for runs of AMEM
- // and calling specific equality tests for the others.
- // Skip blank-named fields.
case TSTRUCT:
var conjuncts []*Node
- t1 := t.Type
- for {
- first, size, next := memrun(t, t1)
- t1 = next
-
- // Run memequal for fields up to this one.
- // TODO(rsc): All the calls to newname are wrong for
- // cross-package unexported fields.
- if first != nil {
- if first.Down == t1 {
- conjuncts = append(conjuncts, eqfield(np, nq, newname(first.Sym)))
- } else if first.Down.Down == t1 {
- conjuncts = append(conjuncts, eqfield(np, nq, newname(first.Sym)))
- first = first.Down
- if !isblanksym(first.Sym) {
- conjuncts = append(conjuncts, eqfield(np, nq, newname(first.Sym)))
- }
- } else {
- // More than two fields: use memequal.
- conjuncts = append(conjuncts, eqmem(np, nq, newname(first.Sym), size))
- }
- }
-
- if t1 == nil {
- break
- }
- if isblanksym(t1.Sym) {
- t1 = t1.Down
+ // Walk the struct using memequal for runs of AMEM
+ // and calling specific equality tests for the others.
+ for f := t.Type; f != nil; {
+ // Skip blank-named fields.
+ if isblanksym(f.Sym) {
+ f = f.Down
continue
}
- if algtype1(t1.Type, nil) == AMEM {
- // Our memory run might have been stopped by padding or a blank field.
- // If the next field is memory-ish, it could be the start of a new run.
+
+ // Compare non-memory fields with field equality.
+ if algtype1(f.Type, nil) != AMEM {
+ conjuncts = append(conjuncts, eqfield(np, nq, newname(f.Sym)))
+ f = f.Down
continue
}
- // Check this field, which is not just memory.
- conjuncts = append(conjuncts, eqfield(np, nq, newname(t1.Sym)))
- t1 = t1.Down
+ // Find maximal length run of memory-only fields.
+ size, next := memrun(t, f)
+
+ // Run memequal on fields from f to next.
+ // TODO(rsc): All the calls to newname are wrong for
+ // cross-package unexported fields.
+ if f.Down == next {
+ conjuncts = append(conjuncts, eqfield(np, nq, newname(f.Sym)))
+ } else if f.Down.Down == next {
+ conjuncts = append(conjuncts, eqfield(np, nq, newname(f.Sym)))
+ conjuncts = append(conjuncts, eqfield(np, nq, newname(f.Down.Sym)))
+ } else {
+ // More than two fields: use memequal.
+ conjuncts = append(conjuncts, eqmem(np, nq, newname(f.Sym), size))
+ }
+ f = next
}
var and *Node
}
ret := Nod(ORETURN, nil, nil)
- ret.List = list(ret.List, and)
+ ret.List.Append(and)
fn.Nbody.Append(ret)
}
if Debug['r'] != 0 {
- dumpslice("geneq body", fn.Nbody.Slice())
+ dumplist("geneq body", fn.Nbody)
}
funcbody(fn)
Curfn = fn
fn.Func.Dupok = true
typecheck(&fn, Etop)
- typecheckslice(fn.Nbody.Slice(), Etop)
+ typechecklist(fn.Nbody.Slice(), Etop)
Curfn = nil
// Disable safemode while compiling this code: the code we
// eqmem returns the node
// memequal(&p.field, &q.field [, size])
func eqmem(p *Node, q *Node, field *Node, size int64) *Node {
- var needsize int
-
nx := Nod(OADDR, Nod(OXDOT, p, field), nil)
nx.Etype = 1 // does not escape
ny := Nod(OADDR, Nod(OXDOT, q, field), nil)
typecheck(&nx, Erv)
typecheck(&ny, Erv)
- call := Nod(OCALL, eqmemfunc(size, nx.Type.Type, &needsize), nil)
- call.List = list(call.List, nx)
- call.List = list(call.List, ny)
- if needsize != 0 {
- call.List = list(call.List, Nodintconst(size))
+ fn, needsize := eqmemfunc(size, nx.Type.Type)
+ call := Nod(OCALL, fn, nil)
+ call.List.Append(nx)
+ call.List.Append(ny)
+ if needsize {
+ call.List.Append(Nodintconst(size))
}
return call
}
-func eqmemfunc(size int64, type_ *Type, needsize *int) *Node {
- var fn *Node
-
+func eqmemfunc(size int64, t *Type) (fn *Node, needsize bool) {
switch size {
default:
- fn = syslook("memequal", 1)
- *needsize = 1
-
+ fn = syslook("memequal")
+ needsize = true
case 1, 2, 4, 8, 16:
buf := fmt.Sprintf("memequal%d", int(size)*8)
- fn = syslook(buf, 1)
- *needsize = 0
+ fn = syslook(buf)
}
- substArgTypes(fn, type_, type_)
- return fn
+ substArgTypes(&fn, t, t)
+ return fn, needsize
}
// memrun finds runs of struct fields for which memory-only algs are appropriate.
-// t is the parent struct type, and field is the field at which to start.
-// first is the first field in the memory run.
+// t is the parent struct type, and start is the field that starts the run.
// size is the length in bytes of the memory included in the run.
// next is the next field after the memory run.
-func memrun(t *Type, field *Type) (first *Type, size int64, next *Type) {
- var offend int64
+func memrun(t *Type, start *Type) (size int64, next *Type) {
+ var last *Type
+ next = start
for {
- if field == nil || algtype1(field.Type, nil) != AMEM || isblanksym(field.Sym) {
+ last, next = next, next.Down
+ if next == nil {
break
}
- offend = field.Width + field.Type.Width
- if first == nil {
- first = field
+ // Stop run after a padded field.
+ if ispaddedfield(t, last) {
+ break
}
-
- // If it's a memory field but it's padded, stop here.
- if ispaddedfield(field, t.Width) {
- field = field.Down
+ // Also, stop before a blank or non-memory field.
+ if isblanksym(next.Sym) || algtype1(next.Type, nil) != AMEM {
break
}
- field = field.Down
}
- if first != nil {
- size = offend - first.Width // first.Width is offset
- }
- return first, size, field
+ end := last.Width + last.Type.Width
+ return end - start.Width, next
}
-// ispaddedfield reports whether the given field
-// is followed by padding. For the case where t is
-// the last field, total gives the size of the enclosing struct.
-func ispaddedfield(t *Type, total int64) bool {
- if t.Etype != TFIELD {
- Fatalf("ispaddedfield called non-field %v", t)
+// ispaddedfield reports whether the given field f, assumed to be
+// a field in struct t, is followed by padding.
+func ispaddedfield(t *Type, f *Type) bool {
+ if t.Etype != TSTRUCT {
+ Fatalf("ispaddedfield called non-struct %v", t)
+ }
+ if f.Etype != TFIELD {
+ Fatalf("ispaddedfield called non-field %v", f)
}
- if t.Down == nil {
- return t.Width+t.Type.Width != total
+ end := t.Width
+ if f.Down != nil {
+ end = f.Down.Width
}
- return t.Width+t.Type.Width != t.Down.Width
+ return f.Width+f.Type.Width != end
}
}
if t.Width == -2 {
- lno := int(lineno)
- lineno = int32(t.Lineno)
if !t.Broke {
t.Broke = true
- Yyerror("invalid recursive type %v", t)
+ yyerrorl(t.Lineno, "invalid recursive type %v", t)
}
t.Width = 0
- lineno = int32(lno)
return
}
// defer checkwidth calls until after we're done
defercalc++
- lno := int(lineno)
- lineno = int32(t.Lineno)
+ lno := lineno
+ lineno = t.Lineno
t.Width = -2
t.Align = 0
case TFUNCARGS:
t1 := t.Type
- w = widstruct(t.Type, *getthis(t1), 0, 0)
- w = widstruct(t.Type, *getinarg(t1), w, Widthreg)
- w = widstruct(t.Type, *Getoutarg(t1), w, Widthreg)
+ w = widstruct(t.Type, t1.Recv(), 0, 0)
+ w = widstruct(t.Type, t1.Params(), w, Widthreg)
+ w = widstruct(t.Type, t1.Results(), w, Widthreg)
t1.Argwid = w
if w%int64(Widthreg) != 0 {
Warn("bad type %v %d\n", t1, w)
t.Align = uint8(w)
}
- lineno = int32(lno)
+ lineno = lno
if defercalc == 1 {
resumecheckwidth()
// dowidth should only be called when the type's size
// is needed immediately. checkwidth makes sure the
// size is evaluated eventually.
-type TypeList struct {
- t *Type
- next *TypeList
-}
-
-var tlfree *TypeList
-var tlq *TypeList
+var deferredTypeStack []*Type
func checkwidth(t *Type) {
if t == nil {
}
t.Deferwidth = true
- l := tlfree
- if l != nil {
- tlfree = l.next
- } else {
- l = new(TypeList)
- }
-
- l.t = t
- l.next = tlq
- tlq = l
+ deferredTypeStack = append(deferredTypeStack, t)
}
func defercheckwidth() {
if defercalc == 0 {
Fatalf("resumecheckwidth")
}
- for l := tlq; l != nil; l = tlq {
- l.t.Deferwidth = false
- tlq = l.next
- dowidth(l.t)
- l.next = tlfree
- tlfree = l
+ for len(deferredTypeStack) > 0 {
+ t := deferredTypeStack[len(deferredTypeStack)-1]
+ deferredTypeStack = deferredTypeStack[:len(deferredTypeStack)-1]
+ t.Deferwidth = false
+ dowidth(t)
}
defercalc = 0
Simtype[TFUNC] = Tptr
Simtype[TUNSAFEPTR] = Tptr
- // pick up the backend thearch.typedefs
- for i = range Thearch.Typedefs {
- s := Lookup(Thearch.Typedefs[i].Name)
- s1 := Pkglookup(Thearch.Typedefs[i].Name, builtinpkg)
-
- etype := Thearch.Typedefs[i].Etype
- if int(etype) >= len(Types) {
- Fatalf("typeinit: %s bad etype", s.Name)
- }
- sameas := Thearch.Typedefs[i].Sameas
- if int(sameas) >= len(Types) {
- Fatalf("typeinit: %s bad sameas", s.Name)
- }
- Simtype[etype] = sameas
- minfltval[etype] = minfltval[sameas]
- maxfltval[etype] = maxfltval[sameas]
- Minintval[etype] = Minintval[sameas]
- Maxintval[etype] = Maxintval[sameas]
-
- t = Types[etype]
- if t != nil {
- Fatalf("typeinit: %s already defined", s.Name)
- }
-
- t = typ(etype)
- t.Sym = s1
-
- dowidth(t)
- Types[etype] = t
- s1.Def = typenod(t)
- s1.Def.Name = new(Name)
- }
-
Array_array = int(Rnd(0, int64(Widthptr)))
Array_nel = int(Rnd(int64(Array_array)+int64(Widthptr), int64(Widthint)))
Array_cap = int(Rnd(int64(Array_nel)+int64(Widthint), int64(Widthint)))
// compute total size of f's in/out arguments.
func Argsize(t *Type) int {
- var save Iter
- var x int64
-
- w := int64(0)
-
- fp := Structfirst(&save, Getoutarg(t))
- for fp != nil {
- x = fp.Width + fp.Type.Width
- if x > w {
- w = x
- }
- fp = structnext(&save)
- }
+ var w int64
- fp = funcfirst(&save, t)
- for fp != nil {
- x = fp.Width + fp.Type.Width
- if x > w {
- w = x
+ for _, p := range recvParamsResults {
+ for f, it := IterFields(p(t)); f != nil; f = it.Next() {
+ if x := f.Width + f.Type.Width; x > w {
+ w = x
+ }
}
- fp = funcnext(&save)
}
w = (w + int64(Widthptr) - 1) &^ (int64(Widthptr) - 1)
// Binary package export.
// Based loosely on x/tools/go/importer.
-// (see fmt.go, go.y as "documentation" for how to use/setup data structures)
+// (see fmt.go, parser.go as "documentation" for how to use/setup data structures)
//
// Use "-newexport" flag to enable.
-// TODO(gri):
-// - inlined functions
-
/*
Export data encoding:
encoded as either an integer or string value.
Only packages and types may be referred to more than once. When getting
-to a package or type that was not serialized before, a number (index) is
-assigned to it, starting at 0. In this case, the encoding starts with an
-integer tag with a value < 0. The tag value indicates the kind of object
+to a package or type that was not serialized before, an integer _index_
+is assigned to it, starting at 0. In this case, the encoding starts
+with an integer _tag_ < 0. The tag value indicates the kind of object
(package or type) that follows and that this is the first time that we
see this object. If the package or tag was already serialized, the encoding
starts with the respective package or type index >= 0. An importer can
trivially determine if a package or type needs to be read in for the first
time (tag < 0) and entered into the respective package or type table, or
if the package or type was seen already (index >= 0), in which case the
-index is the table index where the respective object can be found.
+index is used to look up the object in a table.
Before exporting or importing, the type tables are populated with the
predeclared types (int, string, error, unsafe.Pointer, etc.). This way
(i.e., one pointer) for each named type (and read but discard the current
type encoding). Unnamed types simply encode their respective fields.
-In the encoding, all lists (of objects, struct fields, methods, parameter
-names, but also the bytes of a string, etc.) start with an integer which
-is the length of the list. This permits an importer to allocate the right
-amount of space to hold the list without the need to grow it later.
+In the encoding, any list (of objects, struct fields, methods, parameter
+names, but also the bytes of a string, etc.) starts with the list length.
+This permits an importer to allocate the right amount of memory for the
+list upfront, without the need to grow it later.
-All integer values use a variable-length encoding for compact representation.
+All integer values use variable-length encoding for compact representation.
If debugFormat is set, each integer and string value is preceded by a marker
and position information in the encoding. This mechanism permits an importer
to recognize immediately when it is out of sync. The importer recognizes this
mode automatically (i.e., it can import export data produced with debugging
-support even if debugFormat is not set at the time of import). Using this mode
-will massively increase the size of the export data (by a factor of 2 to 3)
-and is only recommended for debugging.
+support even if debugFormat is not set at the time of import). This mode will
+lead to massively larger export data (by a factor of 2 to 3) and should only
+be enabled during development and debugging.
The exporter and importer are completely symmetric in implementation: For
-each encoding routine there is the matching and symmetric decoding routine.
+each encoding routine there is a matching and symmetric decoding routine.
This symmetry makes it very easy to change or extend the format: If a new
field needs to be encoded, a symmetric change can be made to exporter and
importer.
debugFormat = false // use debugging format for export data (emits a lot of additional data)
)
-const exportVersion = "v0"
-
-// Set forceNewExport to force the use of the new export format - for testing on the build dashboard.
// TODO(gri) remove eventually
-const forceNewExport = false
+const forceNewExport = false // force new export format - do not submit with this flag set
+
+const exportVersion = "v0"
// Export writes the export data for localpkg to out and returns the number of bytes written.
func Export(out *obj.Biobuf, trace bool) int {
// --- generic export data ---
if p.trace {
- p.tracef("\n--- generic export data ---\n")
+ p.tracef("\n--- package ---\n")
if p.indent != 0 {
- Fatalf("incorrect indentation %d", p.indent)
+ Fatalf("exporter: incorrect indentation %d", p.indent)
}
}
+ if p.trace {
+ p.tracef("version = ")
+ }
p.string(exportVersion)
if p.trace {
p.tracef("\n")
p.typIndex[typ] = index
}
if len(p.typIndex) != len(predecl) {
- Fatalf("duplicate entries in type map?")
+ Fatalf("exporter: duplicate entries in type map?")
}
// write package data
if localpkg.Path != "" {
- Fatalf("local package path not empty: %q", localpkg.Path)
+ Fatalf("exporter: local package path not empty: %q", localpkg.Path)
}
p.pkg(localpkg)
// write compiler-specific flags
- // go.y:import_safety
{
var flags string
if safemode != 0 {
}
p.string(flags)
}
-
if p.trace {
p.tracef("\n")
}
// We may not need this eventually. See also comment
// on sym.Flags&SymExported test above.
if strings.Contains(sym.Name, ".") {
- Fatalf("unexpected export symbol: %v", sym)
+ Fatalf("exporter: unexpected symbol: %v", sym)
}
if sym.Flags&SymExport != 0 {
if sym.Def == nil {
- Fatalf("unknown export symbol: %v", sym)
+ Fatalf("exporter: unknown export symbol: %v", sym)
}
switch n := sym.Def; n.Op {
case OLITERAL:
// constant
typecheck(&n, Erv)
if n == nil || n.Op != OLITERAL {
- Fatalf("dumpexportconst: oconst nil: %v", sym)
+ Fatalf("exporter: dumpexportconst: oconst nil: %v", sym)
}
consts = append(consts, sym)
// variable or function
typecheck(&n, Erv|Ecall)
if n == nil || n.Type == nil {
- Fatalf("variable/function exported but not defined: %v", sym)
+ Fatalf("exporter: variable/function exported but not defined: %v", sym)
}
if n.Type.Etype == TFUNC && n.Class == PFUNC {
funcs = append(funcs, sym)
// named type
t := n.Type
if t.Etype == TFORW {
- Fatalf("export of incomplete type %v", sym)
+ Fatalf("exporter: export of incomplete type %v", sym)
}
types = append(types, t)
default:
- Fatalf("unexpected export symbol: %v %v", Oconv(int(n.Op), 0), sym)
+ Fatalf("exporter: unexpected export symbol: %v %v", Oconv(n.Op, 0), sym)
}
}
}
// sort types later when we have fewer types left
// write consts
+ if p.trace {
+ p.tracef("\n--- consts ---\n[ ")
+ }
p.int(len(consts))
+ if p.trace {
+ p.tracef("]\n")
+ }
for _, sym := range consts {
- n := sym.Def
- typ := n.Type // may or may not be specified
- // Untyped (ideal) constants get their own type. This decouples
- // the constant type from the encoding of the constant value.
- if typ == nil || isideal(typ) {
- typ = untype(n.Val().Ctype())
- }
-
p.string(sym.Name)
- p.typ(typ)
+ n := sym.Def
+ p.typ(unidealType(n.Type, n.Val()))
p.value(n.Val())
+ if p.trace {
+ p.tracef("\n")
+ }
}
// write vars
+ if p.trace {
+ p.tracef("\n--- vars ---\n[ ")
+ }
p.int(len(vars))
+ if p.trace {
+ p.tracef("]\n")
+ }
for _, sym := range vars {
p.string(sym.Name)
p.typ(sym.Def.Type)
+ if p.trace {
+ p.tracef("\n")
+ }
}
// write funcs
+ if p.trace {
+ p.tracef("\n--- funcs ---\n[ ")
+ }
p.int(len(funcs))
+ if p.trace {
+ p.tracef("]\n")
+ }
for _, sym := range funcs {
p.string(sym.Name)
// The type can only be a signature for functions. However, by always
// we keep the option open of sharing common signatures across multiple
// functions as a means to further compress the export data.
p.typ(sym.Def.Type)
- p.int(p.collectInlined(sym.Def))
+ p.inlinedBody(sym.Def)
+ if p.trace {
+ p.tracef("\n")
+ }
}
// determine which types are still left to write and sort them
sort.Sort(typByName(types))
// write types
+ if p.trace {
+ p.tracef("\n--- types ---\n[ ")
+ }
p.int(len(types))
+ if p.trace {
+ p.tracef("]\n")
+ }
for _, t := range types {
// Writing a type may further reduce the number of types
// that are left to be written, but at this point we don't
// care.
p.typ(t)
- }
-
- if p.trace {
- p.tracef("\n")
+ if p.trace {
+ p.tracef("\n")
+ }
}
// --- compiler-specific export data ---
if p.trace {
- p.tracef("\n--- compiler specific export data ---\n")
+ p.tracef("\n--- inlined function bodies ---\n[ ")
if p.indent != 0 {
- Fatalf("incorrect indentation")
+ Fatalf("exporter: incorrect indentation")
}
}
// write inlined function bodies
p.int(len(p.inlined))
- for i, f := range p.inlined {
- p.body(i, f)
+ if p.trace {
+ p.tracef("]\n")
+ }
+ for _, f := range p.inlined {
+ if p.trace {
+ p.tracef("{ %s }\n", Hconv(f.Inl, obj.FmtSharp))
+ }
+ p.nodeList(f.Inl)
+ if p.trace {
+ p.tracef("\n")
+ }
}
if p.trace {
- p.tracef("\n")
+ p.tracef("\n--- end ---\n")
}
// --- end of export data ---
return p.written
}
+func unidealType(typ *Type, val Val) *Type {
+ // Untyped (ideal) constants get their own type. This decouples
+ // the constant type from the encoding of the constant value.
+ if typ == nil || isideal(typ) {
+ typ = untype(val.Ctype())
+ }
+ return typ
+}
+
type symByName []*Sym
func (a symByName) Len() int { return len(a) }
func (p *exporter) pkg(pkg *Pkg) {
if pkg == nil {
- Fatalf("unexpected nil pkg")
+ Fatalf("exporter: unexpected nil pkg")
}
// if we saw the package before, write its index (>= 0)
func (p *exporter) typ(t *Type) {
if t == nil {
- Fatalf("nil type")
+ Fatalf("exporter: nil type")
}
// Possible optimization: Anonymous pointer types *T where
if sym := t.Sym; sym != nil {
// Fields should be exported by p.field().
if t.Etype == TFIELD {
- Fatalf("printing a field/parameter with wrong function")
+ Fatalf("exporter: printing a field/parameter with wrong function")
}
// Predeclared types should have been found in the type map.
if t.Orig == t {
- Fatalf("predeclared type missing from type map?")
+ Fatalf("exporter: predeclared type missing from type map?")
}
// TODO(gri) The assertion below seems incorrect (crashes during all.bash).
// Investigate.
/*
// we expect the respective definition to point to us
if sym.Def.Type != t {
- Fatalf("type definition doesn't point to us?")
+ Fatalf("exporter: type definition doesn't point to us?")
}
*/
for _, m := range methods {
p.string(m.Sym.Name)
- p.paramList(getthisx(m.Type))
- p.paramList(getinargx(m.Type))
- p.paramList(getoutargx(m.Type))
- p.int(p.collectInlined(m.Type.Nname))
+ p.paramList(m.Type.Recv())
+ p.paramList(m.Type.Params())
+ p.paramList(m.Type.Results())
+ p.inlinedBody(m.Type.Nname)
if p.trace && m.Down != nil {
p.tracef("\n")
case TFUNC:
p.tag(signatureTag)
- p.paramList(getinargx(t))
- p.paramList(getoutargx(t))
+ p.paramList(t.Params())
+ p.paramList(t.Results())
case TINTER:
p.tag(interfaceTag)
p.typ(t.Type)
default:
- Fatalf("unexpected type: %s (Etype = %d)", Tconv(t, 0), t.Etype)
+ Fatalf("exporter: unexpected type: %s (Etype = %d)", Tconv(t, 0), t.Etype)
}
}
func (p *exporter) field(f *Type) {
if f.Etype != TFIELD {
- Fatalf("field expected")
+ Fatalf("exporter: field expected")
}
p.fieldName(f)
func (p *exporter) method(m *Type) {
if m.Etype != TFIELD {
- Fatalf("method expected")
+ Fatalf("exporter: method expected")
}
p.fieldName(m)
// TODO(gri) For functions signatures, we use p.typ() to export
// so we could share the same type with multiple functions. Do
// the same here, or never try to do this for functions.
- p.paramList(getinargx(m.Type))
- p.paramList(getoutargx(m.Type))
+ p.paramList(m.Type.Params())
+ p.paramList(m.Type.Results())
}
// fieldName is like qualifiedName but it doesn't record the package
func (p *exporter) paramList(params *Type) {
if params.Etype != TSTRUCT || !params.Funarg {
- Fatalf("parameter list expected")
+ Fatalf("exporter: parameter list expected")
}
// use negative length to indicate unnamed parameters
func (p *exporter) param(q *Type, n int) {
if q.Etype != TFIELD {
- Fatalf("parameter expected")
+ Fatalf("exporter: parameter expected")
}
t := q.Type
if q.Isddd {
// TODO(gri) This is compiler-specific (escape info).
// Move into compiler-specific section eventually?
// (Not having escape info causes tests to fail, e.g. runtime GCInfoTest)
+ //
+ // TODO(gri) The q.Note is much more verbose that necessary and
+ // adds significantly to export data size. FIX THIS.
p.note(q.Note)
}
case 'r':
return ""
default:
- Fatalf("unexpected parameter name: %s", name)
+ Fatalf("exporter: unexpected parameter name: %s", name)
}
}
// undo gc-internal name specialization
}
// uncommon case: large x - use float encoding
// (powers of 2 will be encoded efficiently with exponent)
- p.tag(floatTag)
f := newMpflt()
Mpmovefixflt(f, x)
+ p.tag(floatTag)
p.float(f)
case *Mpflt:
p.tag(stringTag)
p.string(x)
+ case *NilVal:
+ // not a constant but used in exported function bodies
+ p.tag(nilTag)
+
default:
- Fatalf("unexpected value %v (%T)", x, x)
+ Fatalf("exporter: unexpected value %v (%T)", x, x)
}
}
m.SetMantExp(&m, int(m.MinPrec()))
mant, acc := m.Int(nil)
if acc != big.Exact {
- Fatalf("internal error")
+ Fatalf("exporter: internal error")
}
p.int(sign)
// ----------------------------------------------------------------------------
// Inlined function bodies
-// TODO(gri) This section is incomplete. At the moment nothing meaningful
-// is written out for exported functions with inlined function bodies.
-
-func (p *exporter) collectInlined(n *Node) int {
+func (p *exporter) inlinedBody(n *Node) {
+ index := -1 // index < 0 => not inlined
if n != nil && n.Func != nil && len(n.Func.Inl.Slice()) != 0 {
// when lazily typechecking inlined bodies, some re-exported ones may not have been typechecked yet.
// currently that can leave unresolved ONONAMEs in import-dot-ed packages in the wrong package
if Debug['l'] < 2 {
typecheckinl(n)
}
+ index = len(p.inlined) // index >= 0 => inlined
p.inlined = append(p.inlined, n.Func)
- return len(p.inlined) - 1 // index >= 0 => inlined
}
- return -1 // index < 0 => not inlined
+ p.int(index)
}
-func (p *exporter) body(i int, f *Func) {
- p.int(i)
- p.block(f.Inl.Slice())
+func (p *exporter) nodeList(list Nodes) {
+ it := nodeSeqIterate(list)
+ if p.trace {
+ p.tracef("[ ")
+ }
+ p.int(it.Len())
+ if p.trace {
+ if it.Len() <= 1 {
+ p.tracef("] {}")
+ } else {
+ p.tracef("] {>")
+ defer p.tracef("<\n}")
+ }
+ }
+ for ; !it.Done(); it.Next() {
+ if p.trace {
+ p.tracef("\n")
+ }
+ p.node(it.N())
+ }
+}
+
+func (p *exporter) node(n *Node) {
+ p.op(n.Op)
+
+ switch n.Op {
+ // names
+ case ONAME, OPACK, ONONAME:
+ p.sym(n.Sym)
+
+ case OTYPE:
+ if p.bool(n.Type == nil) {
+ p.sym(n.Sym)
+ } else {
+ p.typ(n.Type)
+ }
+
+ case OLITERAL:
+ p.typ(unidealType(n.Type, n.Val()))
+ p.value(n.Val())
+
+ // expressions
+ case OMAKEMAP, OMAKECHAN, OMAKESLICE:
+ if p.bool(n.List.Len() != 0) {
+ p.nodeList(n.List) // TODO(gri) do we still need to export this?
+ }
+ p.nodesOrNil(n.Left, n.Right)
+ p.typ(n.Type)
+
+ case OPLUS, OMINUS, OADDR, OCOM, OIND, ONOT, ORECV:
+ p.node(n.Left)
+
+ case OADD, OAND, OANDAND, OANDNOT, ODIV, OEQ, OGE, OGT, OLE, OLT,
+ OLSH, OMOD, OMUL, ONE, OOR, OOROR, ORSH, OSEND,
+ OSUB, OXOR:
+ p.node(n.Left)
+ p.node(n.Right)
+
+ case OADDSTR:
+ p.nodeList(n.List)
+
+ case OPTRLIT:
+ p.node(n.Left)
+
+ case OSTRUCTLIT:
+ p.typ(n.Type)
+ p.nodeList(n.List)
+ p.bool(n.Implicit)
+
+ case OARRAYLIT, OMAPLIT:
+ p.typ(n.Type)
+ p.nodeList(n.List)
+ p.bool(n.Implicit)
+
+ case OKEY:
+ p.nodesOrNil(n.Left, n.Right)
+
+ case OCOPY, OCOMPLEX:
+ p.node(n.Left)
+ p.node(n.Right)
+
+ case OCONV, OCONVIFACE, OCONVNOP, OARRAYBYTESTR, OARRAYRUNESTR, OSTRARRAYBYTE, OSTRARRAYRUNE, ORUNESTR:
+ p.typ(n.Type)
+ if p.bool(n.Left != nil) {
+ p.node(n.Left)
+ } else {
+ p.nodeList(n.List)
+ }
+
+ case ODOT, ODOTPTR, ODOTMETH, ODOTINTER, OXDOT:
+ p.node(n.Left)
+ p.sym(n.Right.Sym)
+
+ case ODOTTYPE, ODOTTYPE2:
+ p.node(n.Left)
+ if p.bool(n.Right != nil) {
+ p.node(n.Right)
+ } else {
+ p.typ(n.Type)
+ }
+
+ case OINDEX, OINDEXMAP, OSLICE, OSLICESTR, OSLICEARR, OSLICE3, OSLICE3ARR:
+ p.node(n.Left)
+ p.node(n.Right)
+
+ case OREAL, OIMAG, OAPPEND, OCAP, OCLOSE, ODELETE, OLEN, OMAKE, ONEW, OPANIC,
+ ORECOVER, OPRINT, OPRINTN:
+ p.nodesOrNil(n.Left, nil)
+ p.nodeList(n.List)
+ p.bool(n.Isddd)
+
+ case OCALL, OCALLFUNC, OCALLMETH, OCALLINTER, OGETG:
+ p.node(n.Left)
+ p.nodeList(n.List)
+ p.bool(n.Isddd)
+
+ case OCMPSTR, OCMPIFACE:
+ p.node(n.Left)
+ p.node(n.Right)
+ p.int(int(n.Etype))
+
+ case OPAREN:
+ p.node(n.Left)
+
+ // statements
+ case ODCL:
+ p.node(n.Left) // TODO(gri) compare with fmt code
+ p.typ(n.Left.Type)
+
+ case OAS, OASWB:
+ p.nodesOrNil(n.Left, n.Right) // n.Right might be nil
+ p.bool(n.Colas)
+
+ case OASOP:
+ p.node(n.Left)
+ // n.Implicit indicates ++ or --, n.Right is 1 in those cases
+ p.node(n.Right)
+ p.int(int(n.Etype))
+
+ case OAS2:
+ p.nodeList(n.List)
+ p.nodeList(n.Rlist)
+
+ case OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV:
+ p.nodeList(n.List)
+ p.nodeList(n.Rlist)
+
+ case ORETURN:
+ p.nodeList(n.List)
+
+ case OPROC, ODEFER:
+ p.node(n.Left)
+
+ case OIF:
+ p.nodeList(n.Ninit)
+ p.node(n.Left)
+ p.nodeList(n.Nbody)
+ p.nodeList(n.Rlist)
+
+ case OFOR:
+ p.nodeList(n.Ninit)
+ p.nodesOrNil(n.Left, n.Right)
+ p.nodeList(n.Nbody)
+
+ case ORANGE:
+ if p.bool(n.List.Len() != 0) {
+ p.nodeList(n.List)
+ }
+ p.node(n.Right)
+ p.nodeList(n.Nbody)
+
+ case OSELECT, OSWITCH:
+ p.nodeList(n.Ninit)
+ p.nodesOrNil(n.Left, nil)
+ p.nodeList(n.List)
+
+ case OCASE, OXCASE:
+ if p.bool(n.List.Len() != 0) {
+ p.nodeList(n.List)
+ }
+ p.nodeList(n.Nbody)
+
+ case OBREAK, OCONTINUE, OGOTO, OFALL, OXFALL:
+ p.nodesOrNil(n.Left, nil)
+
+ case OEMPTY:
+ // nothing to do
+
+ case OLABEL:
+ p.node(n.Left)
+
+ default:
+ Fatalf("exporter: CANNOT EXPORT: %s\nPlease notify gri@\n", opnames[n.Op])
+ }
+}
+
+func (p *exporter) nodesOrNil(a, b *Node) {
+ ab := 0
+ if a != nil {
+ ab |= 1
+ }
+ if b != nil {
+ ab |= 2
+ }
+ p.int(ab)
+ if ab&1 != 0 {
+ p.node(a)
+ }
+ if ab&2 != 0 {
+ p.node(b)
+ }
+}
+
+func (p *exporter) sym(s *Sym) {
+ name := s.Name
+ p.string(name)
+ if name == "?" || name != "_" && name != "" && !exportname(name) {
+ p.pkg(s.Pkg)
+ }
}
-func (p *exporter) block(list []*Node) {
- p.int(len(list))
- for _, n := range list {
- p.stmt(n)
+func (p *exporter) bool(b bool) bool {
+ x := 0
+ if b {
+ x = 1
}
+ p.int(x)
+ return b
}
-func (p *exporter) stmt(n *Node) {
- // TODO(gri) do something sensible here
- p.string("body")
+func (p *exporter) op(op Op) {
+ p.int(int(op))
+ if p.trace {
+ p.tracef("%s ", opnames[op])
+ }
}
// ----------------------------------------------------------------------------
func (p *exporter) index(marker byte, index int) {
if index < 0 {
- Fatalf("invalid index < 0")
+ Fatalf("exporter: invalid index < 0")
}
if debugFormat {
p.marker('t')
func (p *exporter) tag(tag int) {
if tag >= 0 {
- Fatalf("invalid tag >= 0")
+ Fatalf("exporter: invalid tag >= 0")
}
if debugFormat {
p.marker('t')
fractionTag // not used by gc
complexTag
stringTag
+ nilTag
)
// Debugging support.
case CTNIL:
return Types[TNIL]
}
- Fatalf("unknown Ctype")
+ Fatalf("exporter: unknown Ctype")
return nil
}
-var (
- idealint = typ(TIDEAL)
- idealrune = typ(TIDEAL)
- idealfloat = typ(TIDEAL)
- idealcomplex = typ(TIDEAL)
-)
-
var predecl []*Type // initialized lazily
func predeclared() []*Type {
"cmd/compile/internal/big"
"cmd/internal/obj"
"encoding/binary"
+ "fmt"
)
// The overall structure of Import is symmetric to Export: For each
case 'd':
p.debugFormat = true
default:
- Fatalf("invalid encoding format in export data: got %q; want 'c' or 'd'", format)
+ Fatalf("importer: invalid encoding format in export data: got %q; want 'c' or 'd'", format)
}
// --- generic export data ---
if v := p.string(); v != exportVersion {
- Fatalf("unknown export data version: %s", v)
+ Fatalf("importer: unknown export data version: %s", v)
}
// populate typList with predeclared "known" types
// read package data
p.pkg()
if p.pkgList[0] != importpkg {
- Fatalf("imported package not found in pkgList[0]")
+ Fatalf("importer: imported package not found in pkgList[0]")
}
// read compiler-specific flags
importpkg.Safe = p.string() == "safe"
// defer some type-checking until all types are read in completely
- // (go.y:import_there)
+ // (parser.go:import_package)
tcok := typecheckok
typecheckok = true
defercheckwidth()
sym := p.localname()
typ := p.typ()
val := p.value(typ)
- if isideal(typ) {
- // canonicalize ideal types
- typ = Types[TIDEAL]
- }
- importconst(sym, typ, nodlit(val))
+ importconst(sym, idealType(typ), nodlit(val))
}
// read vars
// read funcs
for i := p.int(); i > 0; i-- {
- // go.y:hidden_fndcl
+ // parser.go:hidden_fndcl
sym := p.localname()
typ := p.typ()
- // TODO(gri) fix this
- p.int() // read and discard index of inlined function body for now
+ inl := p.int()
importsym(sym, ONAME)
if sym.Def != nil && sym.Def.Op == ONAME && !Eqtype(typ, sym.Def.Type) {
- Fatalf("inconsistent definition for func %v during import\n\t%v\n\t%v", sym, sym.Def.Type, typ)
+ Fatalf("importer: inconsistent definition for func %v during import\n\t%v\n\t%v", sym, sym.Def.Type, typ)
}
n := newfuncname(sym)
declare(n, PFUNC)
funchdr(n)
- // go.y:hidden_import
+ // parser.go:hidden_import
n.Func.Inl.Set(nil)
+ if inl >= 0 {
+ if inl != len(p.inlined) {
+ panic("inlined body list inconsistent")
+ }
+ p.inlined = append(p.inlined, n.Func)
+ }
funcbody(n)
importlist = append(importlist, n) // TODO(gri) do this only if body is inlineable?
}
// --- compiler-specific export data ---
- for i := p.int(); i > 0; i-- {
- p.body()
+ // read inlined functions bodies
+ n := p.int()
+ for i := 0; i < n; i++ {
+ body := p.nodeList()
+ const hookup = false // TODO(gri) enable and remove this condition
+ if hookup {
+ p.inlined[i].Inl.Set(body)
+ }
}
// --- end of export data ---
testdclstack() // debugging only
}
+func idealType(typ *Type) *Type {
+ if isideal(typ) {
+ // canonicalize ideal types
+ typ = Types[TIDEAL]
+ }
+ return typ
+}
+
type importer struct {
in *obj.Biobuf
buf []byte // for reading strings
bufarray [64]byte // initial underlying array for buf, large enough to avoid allocation when compiling std lib
pkgList []*Pkg
typList []*Type
+ inlined []*Func
debugFormat bool
read int // bytes read
// otherwise, i is the package tag (< 0)
if i != packageTag {
- Fatalf("expected package tag, found tag = %d", i)
+ Fatalf("importer: expected package tag, found tag = %d", i)
}
// read package data
// we should never see an empty package name
if name == "" {
- Fatalf("empty package name in import")
+ Fatalf("importer: empty package name in import")
}
// we should never see a bad import path
if isbadimport(path) {
- Fatalf("bad path in import: %q", path)
+ Fatalf("importer: bad path in import: %q", path)
}
// an empty path denotes the package we are currently importing
if pkg.Name == "" {
pkg.Name = name
} else if pkg.Name != name {
- Fatalf("inconsistent package names: got %s; want %s (path = %s)", pkg.Name, name, path)
+ Fatalf("importer: inconsistent package names: got %s; want %s (path = %s)", pkg.Name, name, path)
}
p.pkgList = append(p.pkgList, pkg)
}
func (p *importer) localname() *Sym {
- // go.y:hidden_importsym
+ // parser.go:hidden_importsym
name := p.string()
if name == "" {
- Fatalf("unexpected anonymous name")
+ Fatalf("importer: unexpected anonymous name")
}
- structpkg = importpkg // go.y:hidden_pkg_importsym
+ structpkg = importpkg // parser.go:hidden_pkg_importsym
return importpkg.Lookup(name)
}
var t *Type
switch i {
case namedTag:
- // go.y:hidden_importsym
+ // parser.go:hidden_importsym
tsym := p.qualifiedName()
- // go.y:hidden_pkgtype
+ // parser.go:hidden_pkgtype
t = pkgtype(tsym)
importsym(tsym, OTYPE)
p.typList = append(p.typList, t)
// read underlying type
- // go.y:hidden_type
+ // parser.go:hidden_type
t0 := p.typ()
- importtype(t, t0) // go.y:hidden_import
+ importtype(t, t0) // parser.go:hidden_import
// interfaces don't have associated methods
if t0.Etype == TINTER {
// read associated methods
for i := p.int(); i > 0; i-- {
- // go.y:hidden_fndcl
+ // parser.go:hidden_fndcl
name := p.string()
recv := p.paramList() // TODO(gri) do we need a full param list for the receiver?
params := p.paramList()
result := p.paramList()
- // TODO(gri) fix this
- p.int() // read and discard index of inlined function body for now
+ inl := p.int()
pkg := localpkg
if !exportname(name) {
}
sym := pkg.Lookup(name)
- n := methodname1(newname(sym), recv.N.Right)
- n.Type = functype(recv.N, params, result)
+ n := methodname1(newname(sym), recv[0].Right)
+ n.Type = functype(recv[0], params, result)
checkwidth(n.Type)
// addmethod uses the global variable structpkg to verify consistency
{
}
funchdr(n)
- // (comment from go.y)
+ // (comment from parser.go)
// inl.C's inlnode in on a dotmeth node expects to find the inlineable body as
// (dotmeth's type).Nname.Inl, and dotmeth's type has been pulled
// out by typecheck's lookdot as this $$.ttype. So by providing
// this back link here we avoid special casing there.
n.Type.Nname = n
- // go.y:hidden_import
+ // parser.go:hidden_import
n.Func.Inl.Set(nil)
+ if inl >= 0 {
+ if inl != len(p.inlined) {
+ panic("inlined body list inconsistent")
+ }
+ p.inlined = append(p.inlined, n.Func)
+ }
funcbody(n)
importlist = append(importlist, n) // TODO(gri) do this only if body is inlineable?
}
case interfaceTag:
t = p.newtyp(TINTER)
if p.int() != 0 {
- Fatalf("unexpected embedded interface")
+ Fatalf("importer: unexpected embedded interface")
}
tointerface0(t, p.methodList())
t.Type = p.typ()
default:
- Fatalf("unexpected type (tag = %d)", i)
+ Fatalf("importer: unexpected type (tag = %d)", i)
}
if t == nil {
- Fatalf("nil type (type tag = %d)", i)
+ Fatalf("importer: nil type (type tag = %d)", i)
}
return t
return pkg.Lookup(name)
}
-// go.y:hidden_structdcl_list
-func (p *importer) fieldList() *NodeList {
+// parser.go:hidden_structdcl_list
+func (p *importer) fieldList() []*Node {
i := p.int()
if i == 0 {
return nil
}
- n := list1(p.field())
- for i--; i > 0; i-- {
- n = list(n, p.field())
+ n := make([]*Node, i)
+ for i := range n {
+ n[i] = p.field()
}
return n
}
-// go.y:hidden_structdcl
+// parser.go:hidden_structdcl
func (p *importer) field() *Node {
sym := p.fieldName()
typ := p.typ()
return
}
-// go.y:hidden_interfacedcl_list
-func (p *importer) methodList() *NodeList {
+// parser.go:hidden_interfacedcl_list
+func (p *importer) methodList() []*Node {
i := p.int()
if i == 0 {
return nil
}
- n := list1(p.method())
- for i--; i > 0; i-- {
- n = list(n, p.method())
+ n := make([]*Node, i)
+ for i := range n {
+ n[i] = p.method()
}
return n
}
-// go.y:hidden_interfacedcl
+// parser.go:hidden_interfacedcl
func (p *importer) method() *Node {
sym := p.fieldName()
params := p.paramList()
return Nod(ODCLFIELD, newname(sym), typenod(functype(fakethis(), params, result)))
}
-// go.y:sym,hidden_importsym
+// parser.go:sym,hidden_importsym
func (p *importer) fieldName() *Sym {
name := p.string()
pkg := localpkg
if name == "_" {
// During imports, unqualified non-exported identifiers are from builtinpkg
- // (see go.y:sym). The binary exporter only exports blank as a non-exported
+ // (see parser.go:sym). The binary exporter only exports blank as a non-exported
// identifier without qualification.
pkg = builtinpkg
} else if name == "?" || name != "" && !exportname(name) {
return pkg.Lookup(name)
}
-// go.y:ohidden_funarg_list
-func (p *importer) paramList() *NodeList {
+// parser.go:ohidden_funarg_list
+func (p *importer) paramList() []*Node {
i := p.int()
if i == 0 {
return nil
named = false
}
// i > 0
- n := list1(p.param(named))
- i--
- for ; i > 0; i-- {
- n = list(n, p.param(named))
+ n := make([]*Node, i)
+ for i := range n {
+ n[i] = p.param(named)
}
return n
}
-// go.y:hidden_funarg
+// parser.go:hidden_funarg
func (p *importer) param(named bool) *Node {
typ := p.typ()
if named {
name := p.string()
if name == "" {
- Fatalf("expected named parameter")
+ Fatalf("importer: expected named parameter")
}
// The parameter package doesn't matter; it's never consulted.
- // We use the builtinpkg per go.y:sym (line 1181).
+ // We use the builtinpkg per parser.go:sym (line 1181).
n.Left = newname(builtinpkg.Lookup(name))
}
switch tag := p.tagOrIndex(); tag {
case falseTag:
x.U = false
+
case trueTag:
x.U = true
+
case int64Tag:
u := new(Mpint)
Mpmovecfix(u, p.int64())
u.Rune = typ == idealrune
x.U = u
+
case floatTag:
f := newMpflt()
p.float(f)
break
}
x.U = f
+
case complexTag:
u := new(Mpcplx)
p.float(&u.Real)
p.float(&u.Imag)
x.U = u
+
case stringTag:
x.U = p.string()
+
+ case nilTag:
+ x.U = new(NilVal)
+
default:
- Fatalf("unexpected value tag %d", tag)
+ Fatalf("importer: unexpected value tag %d", tag)
}
// verify ideal type
if isideal(typ) && untype(x.Ctype()) != typ {
- Fatalf("value %v and type %v don't match", x, typ)
+ Fatalf("importer: value %v and type %v don't match", x, typ)
}
return
// ----------------------------------------------------------------------------
// Inlined function bodies
-func (p *importer) body() {
- p.int()
- p.block()
+// parser.go:stmt_list
+func (p *importer) nodeList() []*Node {
+ c := p.int()
+ s := make([]*Node, c)
+ for i := range s {
+ s[i] = p.node()
+ }
+ return s
}
-func (p *importer) block() {
- for i := p.int(); i > 0; i-- {
- p.stmt()
+func (p *importer) node() *Node {
+ // TODO(gri) eventually we may need to allocate in each branch
+ n := Nod(p.op(), nil, nil)
+
+ switch n.Op {
+ // names
+ case ONAME, OPACK, ONONAME:
+ name := mkname(p.sym())
+ // TODO(gri) decide what to do here (this code throws away n)
+ /*
+ if name.Op != n.Op {
+ Fatalf("importer: got node op = %s; want %s", opnames[name.Op], opnames[n.Op])
+ }
+ */
+ n = name
+
+ case OTYPE:
+ if p.bool() {
+ n.Sym = p.sym()
+ } else {
+ n.Type = p.typ()
+ }
+
+ case OLITERAL:
+ typ := p.typ()
+ n.Type = idealType(typ)
+ n.SetVal(p.value(typ))
+
+ // expressions
+ case OMAKEMAP, OMAKECHAN, OMAKESLICE:
+ if p.bool() {
+ n.List.Set(p.nodeList())
+ }
+ n.Left, n.Right = p.nodesOrNil()
+ n.Type = p.typ()
+
+ case OPLUS, OMINUS, OADDR, OCOM, OIND, ONOT, ORECV:
+ n.Left = p.node()
+
+ case OADD, OAND, OANDAND, OANDNOT, ODIV, OEQ, OGE, OGT, OLE, OLT,
+ OLSH, OMOD, OMUL, ONE, OOR, OOROR, ORSH, OSEND,
+ OSUB, OXOR:
+ n.Left = p.node()
+ n.Right = p.node()
+
+ case OADDSTR:
+ n.List.Set(p.nodeList())
+
+ case OPTRLIT:
+ n.Left = p.node()
+
+ case OSTRUCTLIT:
+ n.Type = p.typ()
+ n.List.Set(p.nodeList())
+ n.Implicit = p.bool()
+
+ case OARRAYLIT, OMAPLIT:
+ n.Type = p.typ()
+ n.List.Set(p.nodeList())
+ n.Implicit = p.bool()
+
+ case OKEY:
+ n.Left, n.Right = p.nodesOrNil()
+
+ case OCOPY, OCOMPLEX:
+ n.Left = p.node()
+ n.Right = p.node()
+
+ case OCONV, OCONVIFACE, OCONVNOP, OARRAYBYTESTR, OARRAYRUNESTR, OSTRARRAYBYTE, OSTRARRAYRUNE, ORUNESTR:
+ // n.Type = p.typ()
+ // if p.bool() {
+ // n.Left = p.node()
+ // } else {
+ // setNodeSeq(&n.List, p.nodeList())
+ // }
+ x := Nod(OCALL, p.typ().Nod, nil)
+ if p.bool() {
+ x.List.Set([]*Node{p.node()})
+ } else {
+ x.List.Set(p.nodeList())
+ }
+ return x
+
+ case ODOT, ODOTPTR, ODOTMETH, ODOTINTER, OXDOT:
+ // see parser.new_dotname
+ obj := p.node()
+ sel := p.sym()
+ if obj.Op == OPACK {
+ s := restrictlookup(sel.Name, obj.Name.Pkg)
+ obj.Used = true
+ return oldname(s)
+ }
+ return Nod(OXDOT, obj, newname(sel))
+
+ case ODOTTYPE, ODOTTYPE2:
+ n.Left = p.node()
+ if p.bool() {
+ n.Right = p.node()
+ } else {
+ n.Type = p.typ()
+ }
+
+ case OINDEX, OINDEXMAP, OSLICE, OSLICESTR, OSLICEARR, OSLICE3, OSLICE3ARR:
+ n.Left = p.node()
+ n.Right = p.node()
+
+ case OREAL, OIMAG, OAPPEND, OCAP, OCLOSE, ODELETE, OLEN, OMAKE, ONEW, OPANIC,
+ ORECOVER, OPRINT, OPRINTN:
+ n.Left, _ = p.nodesOrNil()
+ n.List.Set(p.nodeList())
+ n.Isddd = p.bool()
+
+ case OCALL, OCALLFUNC, OCALLMETH, OCALLINTER, OGETG:
+ n.Left = p.node()
+ n.List.Set(p.nodeList())
+ n.Isddd = p.bool()
+
+ case OCMPSTR, OCMPIFACE:
+ n.Left = p.node()
+ n.Right = p.node()
+ n.Etype = EType(p.int())
+
+ case OPAREN:
+ n.Left = p.node()
+
+ // statements
+ case ODCL:
+ n.Left = p.node() // TODO(gri) compare with fmt code
+ n.Left.Type = p.typ()
+
+ case OAS:
+ n.Left, n.Right = p.nodesOrNil()
+ n.Colas = p.bool() // TODO(gri) what about complexinit?
+
+ case OASOP:
+ n.Left = p.node()
+ n.Right = p.node()
+ n.Etype = EType(p.int())
+
+ case OAS2, OASWB:
+ n.List.Set(p.nodeList())
+ n.Rlist.Set(p.nodeList())
+
+ case OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV:
+ n.List.Set(p.nodeList())
+ n.Rlist.Set(p.nodeList())
+
+ case ORETURN:
+ n.List.Set(p.nodeList())
+
+ case OPROC, ODEFER:
+ n.Left = p.node()
+
+ case OIF:
+ n.Ninit.Set(p.nodeList())
+ n.Left = p.node()
+ n.Nbody.Set(p.nodeList())
+ n.Rlist.Set(p.nodeList())
+
+ case OFOR:
+ n.Ninit.Set(p.nodeList())
+ n.Left, n.Right = p.nodesOrNil()
+ n.Nbody.Set(p.nodeList())
+
+ case ORANGE:
+ if p.bool() {
+ n.List.Set(p.nodeList())
+ }
+ n.Right = p.node()
+ n.Nbody.Set(p.nodeList())
+
+ case OSELECT, OSWITCH:
+ n.Ninit.Set(p.nodeList())
+ n.Left, _ = p.nodesOrNil()
+ n.List.Set(p.nodeList())
+
+ case OCASE, OXCASE:
+ if p.bool() {
+ n.List.Set(p.nodeList())
+ }
+ n.Nbody.Set(p.nodeList())
+
+ case OBREAK, OCONTINUE, OGOTO, OFALL, OXFALL:
+ n.Left, _ = p.nodesOrNil()
+
+ case OEMPTY:
+ // nothing to do
+
+ case OLABEL:
+ n.Left = p.node()
+
+ default:
+ panic(fmt.Sprintf("importer: %s (%d) node not yet supported", opnames[n.Op], n.Op))
+ }
+
+ return n
+}
+
+func (p *importer) nodesOrNil() (a, b *Node) {
+ ab := p.int()
+ if ab&1 != 0 {
+ a = p.node()
}
+ if ab&2 != 0 {
+ b = p.node()
+ }
+ return
+}
+
+func (p *importer) sym() *Sym {
+ return p.fieldName()
+}
+
+func (p *importer) bool() bool {
+ return p.int() != 0
}
-func (p *importer) stmt() {
- // TODO(gri) do something sensible here
- p.string()
+func (p *importer) op() Op {
+ return Op(p.int())
}
// ----------------------------------------------------------------------------
func (p *importer) int() int {
x := p.int64()
if int64(int(x)) != x {
- Fatalf("exported integer too large")
+ Fatalf("importer: exported integer too large")
}
return int(x)
}
} else {
p.buf = p.buf[:n]
}
- for i := 0; i < n; i++ {
+ for i := range p.buf {
p.buf[i] = p.byte()
}
return string(p.buf)
func (p *importer) marker(want byte) {
if got := p.byte(); got != want {
- Fatalf("incorrect marker: got %c; want %c (pos = %d)", got, want, p.read)
+ Fatalf("importer: incorrect marker: got %c; want %c (pos = %d)", got, want, p.read)
}
pos := p.read
if n := int(p.rawInt64()); n != pos {
- Fatalf("incorrect position: got %d; want %d", n, pos)
+ Fatalf("importer: incorrect position: got %d; want %d", n, pos)
}
}
func (p *importer) rawInt64() int64 {
i, err := binary.ReadVarint(p)
if err != nil {
- Fatalf("read error: %v", err)
+ Fatalf("importer: read error: %v", err)
}
return i
}
c := obj.Bgetc(p.in)
p.read++
if c < 0 {
- Fatalf("read error")
+ Fatalf("importer: read error")
}
if c == '|' {
c = obj.Bgetc(p.in)
p.read++
if c < 0 {
- Fatalf("read error")
+ Fatalf("importer: read error")
}
switch c {
case 'S':
case '|':
// nothing to do
default:
- Fatalf("unexpected escape sequence in export data")
+ Fatalf("importer: unexpected escape sequence in export data")
}
}
return byte(c)
}
}
- var a int
+ var a obj.As
switch n.Op {
default:
Dump("cgen", n)
Cgenr(n, &src, nil)
}
- wbVar := syslook("writeBarrier", 0)
+ wbVar := syslook("writeBarrier")
wbEnabled := Nod(ODOT, wbVar, newname(wbVar.Type.Type.Sym))
wbEnabled = typecheck(&wbEnabled, Erv)
pbr := Thearch.Ginscmp(ONE, Types[TUINT8], wbEnabled, Nodintconst(0), -1)
cgen_callinter(n, nil, 0)
}
- var flist Iter
- fp := Structfirst(&flist, Getoutarg(n.Left.Type))
+ fp, _ := IterFields(n.Left.Type.Results())
*a = Node{}
a.Op = OINDREG
a.Reg = int16(Thearch.REGSP)
func bvgenjump(n, res *Node, wantTrue, geninit bool) {
init := n.Ninit
if !geninit {
- n.Ninit = nil
+ n.Ninit.Set(nil)
}
p1 := Gbranch(obj.AJMP, nil, 0)
p2 := Pc
Bgen(n, wantTrue, 0, p2)
Thearch.Gmove(Nodbool(false), res)
Patch(p3, Pc)
- n.Ninit = init
+ n.Ninit.Set(init.Slice())
}
// bgenx is the backend for Bgen and Bvgen.
}
if n.Type.Etype != TBOOL {
- Fatalf("bgen: bad type %v for %v", n.Type, Oconv(int(n.Op), 0))
+ Fatalf("bgen: bad type %v for %v", n.Type, Oconv(n.Op, 0))
}
for n.Op == OCONVNOP {
if Isfloat[nr.Type.Etype] {
// Brcom is not valid on floats when NaN is involved.
ll := n.Ninit // avoid re-genning Ninit
- n.Ninit = nil
+ n.Ninit.Set(nil)
if genval {
bgenx(n, res, true, likely, to)
Thearch.Gins(Thearch.Optoas(OXOR, Types[TUINT8]), Nodintconst(1), res) // res = !res
- n.Ninit = ll
+ n.Ninit.Set(ll.Slice())
return
}
p1 := Gbranch(obj.AJMP, nil, 0)
bgenx(n, res, true, -likely, p2)
Patch(Gbranch(obj.AJMP, nil, 0), to)
Patch(p2, Pc)
- n.Ninit = ll
+ n.Ninit.Set(ll.Slice())
return
}
t = t.Type
}
- var flist Iter
- t = Structfirst(&flist, Getoutarg(t))
+ t, _ = IterFields(t.Results())
if t != nil {
return t.Width + Ctxt.FixedFrameSize()
}
func cgen_callinter(n *Node, res *Node, proc int) {
i := n.Left
if i.Op != ODOTINTER {
- Fatalf("cgen_callinter: not ODOTINTER %v", Oconv(int(i.Op), 0))
+ Fatalf("cgen_callinter: not ODOTINTER %v", Oconv(i.Op, 0))
}
f := i.Right // field
if f.Op != ONAME {
- Fatalf("cgen_callinter: not ONAME %v", Oconv(int(f.Op), 0))
+ Fatalf("cgen_callinter: not ONAME %v", Oconv(f.Op, 0))
}
i = i.Left // interface
t = t.Type
}
- var flist Iter
- fp := Structfirst(&flist, Getoutarg(t))
+ fp, _ := IterFields(t.Results())
if fp == nil {
Fatalf("cgen_callret: nil")
}
t = t.Type
}
- var flist Iter
- fp := Structfirst(&flist, Getoutarg(t))
+ fp, _ := IterFields(t.Results())
if fp == nil {
Fatalf("cgen_aret: nil")
}
if hasdefer {
Ginscall(Deferreturn, 0)
}
- Genslice(Curfn.Func.Exit.Slice())
+ Genlist(Curfn.Func.Exit)
p := Thearch.Gins(obj.ARET, nil, nil)
if n != nil && n.Op == ORETJMP {
p.To.Type = obj.TYPE_MEM
Dump("cgen_append-n", n)
Dump("cgen_append-res", res)
}
- if res.Op != ONAME && !samesafeexpr(res, n.List.N) {
+ if res.Op != ONAME && !samesafeexpr(res, n.List.First()) {
Dump("cgen_append-n", n)
Dump("cgen_append-res", res)
Fatalf("append not lowered")
}
- for l := n.List; l != nil; l = l.Next {
- if l.N.Ullman >= UINF {
+ for _, n1 := range n.List.Slice() {
+ if n1.Ullman >= UINF {
Fatalf("append with function call arguments")
}
}
//
// If res and src are the same, we can avoid writing to base and cap
// unless we grow the underlying array.
- needFullUpdate := !samesafeexpr(res, n.List.N)
+ needFullUpdate := !samesafeexpr(res, n.List.First())
// Copy src triple into base, len, cap.
base := temp(Types[Tptr])
cap := temp(Types[TUINT])
var src Node
- Igen(n.List.N, &src, nil)
+ Igen(n.List.First(), &src, nil)
src.Type = Types[Tptr]
Thearch.Gmove(&src, base)
src.Type = Types[TUINT]
var rlen Node
Regalloc(&rlen, Types[TUINT], nil)
Thearch.Gmove(len, &rlen)
- Thearch.Ginscon(Thearch.Optoas(OADD, Types[TUINT]), int64(count(n.List)-1), &rlen)
+ Thearch.Ginscon(Thearch.Optoas(OADD, Types[TUINT]), int64(n.List.Len()-1), &rlen)
p := Thearch.Ginscmp(OLE, Types[TUINT], &rlen, cap, +1)
// Note: rlen and src are Regrealloc'ed below at the target of the
// branch we just emitted; do not reuse these Go variables for
arg.Xoffset += int64(Widthptr)
Regfree(&rlen)
- fn := syslook("growslice", 1)
- substArgTypes(fn, res.Type.Type, res.Type.Type)
+ fn := syslook("growslice")
+ substArgTypes(&fn, res.Type.Type, res.Type.Type)
Ginscall(fn, 0)
if Widthptr == 4 && Widthreg == 8 {
dst.Xoffset += int64(Widthptr)
Regalloc(&r1, Types[TUINT], nil)
Thearch.Gmove(len, &r1)
- Thearch.Ginscon(Thearch.Optoas(OADD, Types[TUINT]), int64(count(n.List)-1), &r1)
+ Thearch.Ginscon(Thearch.Optoas(OADD, Types[TUINT]), int64(n.List.Len()-1), &r1)
Thearch.Gmove(&r1, &dst)
Regfree(&r1)
dst.Xoffset += int64(Widthptr)
// is not going to use a write barrier.
i := 0
var r2 Node
- for l := n.List.Next; l != nil; l = l.Next {
+ it := nodeSeqIterate(n.List)
+ it.Next()
+ for ; !it.Done(); it.Next() {
Regalloc(&r1, Types[Tptr], nil)
Thearch.Gmove(base, &r1)
Regalloc(&r2, Types[TUINT], nil)
} else if w == 1 {
Thearch.Gins(Thearch.Optoas(OADD, Types[Tptr]), &r2, &r1)
} else {
- Thearch.Ginscon(Thearch.Optoas(OMUL, Types[TUINT]), int64(w), &r2)
+ Thearch.Ginscon(Thearch.Optoas(OMUL, Types[TUINT]), w, &r2)
Thearch.Gins(Thearch.Optoas(OADD, Types[Tptr]), &r2, &r1)
}
Regfree(&r2)
r1.Op = OINDREG
r1.Type = res.Type.Type
- cgen_wb(l.N, &r1, needwritebarrier(&r1, l.N))
+ cgen_wb(it.N(), &r1, needwritebarrier(&r1, it.N()))
Regfree(&r1)
i++
}
regalloc = func(n *Node, t *Type, reuse *Node) {
Tempname(n, t)
}
- ginscon = func(as int, c int64, n *Node) {
+ ginscon = func(as obj.As, c int64, n *Node) {
var n1 Node
Regalloc(&n1, n.Type, n)
Thearch.Gmove(n, &n1)
Thearch.Gmove(&n1, n)
Regfree(&n1)
}
- gins = func(as int, f, t *Node) *obj.Prog {
+ gins = func(as obj.As, f, t *Node) *obj.Prog {
var n1 Node
Regalloc(&n1, t.Type, t)
Thearch.Gmove(t, &n1)
// references to these variables need to
// refer to the variables in the external
// function declared below; see walkclosure.
- n.List = ntype.List
+ n.List.Set(ntype.List.Slice())
- n.Rlist = ntype.Rlist
- ntype.List = nil
- ntype.Rlist = nil
- for l := n.List; l != nil; l = l.Next {
- name = l.N.Left
+ n.Rlist.Set(ntype.Rlist.Slice())
+ ntype.List.Set(nil)
+ ntype.Rlist.Set(nil)
+ for _, n1 := range n.List.Slice() {
+ name = n1.Left
if name != nil {
name = newname(name.Sym)
}
- a = Nod(ODCLFIELD, name, l.N.Right)
- a.Isddd = l.N.Isddd
+ a = Nod(ODCLFIELD, name, n1.Right)
+ a.Isddd = n1.Isddd
if name != nil {
name.Isddd = a.Isddd
}
- ntype.List = list(ntype.List, a)
+ ntype.List.Append(a)
}
-
- for l := n.Rlist; l != nil; l = l.Next {
- name = l.N.Left
+ for _, n2 := range n.Rlist.Slice() {
+ name = n2.Left
if name != nil {
name = newname(name.Sym)
}
- ntype.Rlist = list(ntype.Rlist, Nod(ODCLFIELD, name, l.N.Right))
+ ntype.Rlist.Append(Nod(ODCLFIELD, name, n2.Right))
}
}
Curfn = func_
olddd := decldepth
decldepth = 1
- typecheckslice(func_.Nbody.Slice(), Etop)
+ typechecklist(func_.Nbody.Slice(), Etop)
decldepth = olddd
Curfn = oldfn
}
// that begins by reading closure parameters.
xtype := Nod(OTFUNC, nil, nil)
- xtype.List = func_.List
- xtype.Rlist = func_.Rlist
+ xtype.List.Set(func_.List.Slice())
+ xtype.Rlist.Set(func_.Rlist.Slice())
// create the function
xfunc := Nod(ODCLFUNC, nil, nil)
func_.Func.Closure = xfunc
func_.Nbody.Set(nil)
- func_.List = nil
- func_.Rlist = nil
+ func_.List.Set(nil)
+ func_.Rlist.Set(nil)
return xfunc
}
func capturevars(xfunc *Node) {
var outer *Node
- lno := int(lineno)
+ lno := lineno
lineno = xfunc.Lineno
func_ := xfunc.Func.Closure
if v.Name.Byval {
how = "value"
}
- Warnl(int(v.Lineno), "%v capturing by %s: %v (addr=%v assign=%v width=%d)", name, how, v.Sym, v.Name.Param.Closure.Addrtaken, v.Name.Param.Closure.Assigned, int32(v.Type.Width))
+ Warnl(v.Lineno, "%v capturing by %s: %v (addr=%v assign=%v width=%d)", name, how, v.Sym, v.Name.Param.Closure.Addrtaken, v.Name.Param.Closure.Assigned, int32(v.Type.Width))
}
typecheck(&outer, Erv)
func_.Func.Enter.Append(outer)
}
- lineno = int32(lno)
+ lineno = lno
}
// transformclosure is called in a separate phase after escape analysis.
// It transform closure bodies to properly reference captured variables.
func transformclosure(xfunc *Node) {
- lno := int(lineno)
+ lno := lineno
lineno = xfunc.Lineno
func_ := xfunc.Func.Closure
// Get pointer to input arguments.
// We are going to insert captured variables before input args.
- param := &getinargx(f.Type).Type
+ param := &f.Type.Params().Type
original_args := *param // old input args
original_dcl := xfunc.Func.Dcl
xfunc.Func.Dcl = nil
if len(body) > 0 {
typecheckslice(body, Etop)
- walkstmtslice(body)
+ walkstmtlist(body)
xfunc.Func.Enter.Set(body)
xfunc.Func.Needctxt = true
}
}
- lineno = int32(lno)
+ lineno = lno
}
-func walkclosure(func_ *Node, init **NodeList) *Node {
+func walkclosure(func_ *Node, init *Nodes) *Node {
// If no closure vars, don't bother wrapping.
if len(func_.Func.Cvars.Slice()) == 0 {
return func_.Func.Closure.Func.Nname
typ := Nod(OTSTRUCT, nil, nil)
- typ.List = list1(Nod(ODCLFIELD, newname(Lookup(".F")), typenod(Types[TUINTPTR])))
+ typ.List.Set([]*Node{Nod(ODCLFIELD, newname(Lookup(".F")), typenod(Types[TUINTPTR]))})
var typ1 *Node
for _, v := range func_.Func.Cvars.Slice() {
if v.Op == OXXX {
if !v.Name.Byval {
typ1 = Nod(OIND, typ1, nil)
}
- typ.List = list(typ.List, Nod(ODCLFIELD, newname(v.Sym), typ1))
+ typ.List.Append(Nod(ODCLFIELD, newname(v.Sym), typ1))
}
clos := Nod(OCOMPLIT, nil, Nod(OIND, typ, nil))
clos.Esc = func_.Esc
clos.Right.Implicit = true
- clos.List = concat(list1(Nod(OCFUNC, func_.Func.Closure.Func.Nname, nil)), func_.Func.Enter.NodeList())
+ clos.List.Set(append([]*Node{Nod(OCFUNC, func_.Func.Closure.Func.Nname, nil)}, func_.Func.Enter.Slice()...))
// Force type conversion from *struct to the func type.
clos = Nod(OCONVNOP, clos, nil)
xtype := Nod(OTFUNC, nil, nil)
i := 0
- var l *NodeList
- var callargs *NodeList
+ var l []*Node
+ var callargs []*Node
ddd := false
xfunc := Nod(ODCLFUNC, nil, nil)
Curfn = xfunc
var fld *Node
var n *Node
- for t := getinargx(t0).Type; t != nil; t = t.Down {
+ for t, it := IterFields(t0.Params()); t != nil; t = it.Next() {
n = newname(Lookupf("a%d", i))
i++
n.Class = PPARAM
xfunc.Func.Dcl = append(xfunc.Func.Dcl, n)
- callargs = list(callargs, n)
+ callargs = append(callargs, n)
fld = Nod(ODCLFIELD, n, typenod(t.Type))
if t.Isddd {
fld.Isddd = true
ddd = true
}
- l = list(l, fld)
+ l = append(l, fld)
}
- xtype.List = l
+ xtype.List.Set(l)
i = 0
l = nil
- var retargs *NodeList
- for t := getoutargx(t0).Type; t != nil; t = t.Down {
+ var retargs []*Node
+ for t, it := IterFields(t0.Results()); t != nil; t = it.Next() {
n = newname(Lookupf("r%d", i))
i++
n.Class = PPARAMOUT
xfunc.Func.Dcl = append(xfunc.Func.Dcl, n)
- retargs = list(retargs, n)
- l = list(l, Nod(ODCLFIELD, n, typenod(t.Type)))
+ retargs = append(retargs, n)
+ l = append(l, Nod(ODCLFIELD, n, typenod(t.Type)))
}
- xtype.Rlist = l
+ xtype.Rlist.Set(l)
xfunc.Func.Dupok = true
xfunc.Func.Nname = newfuncname(sym)
}
call := Nod(OCALL, Nod(OXDOT, ptr, meth), nil)
- call.List = callargs
+ call.List.Set(callargs)
call.Isddd = ddd
if t0.Outtuple == 0 {
body = append(body, call)
} else {
n := Nod(OAS2, nil, nil)
- n.List = retargs
- n.Rlist = list1(call)
+ n.List.Set(retargs)
+ n.Rlist.Set([]*Node{call})
body = append(body, n)
n = Nod(ORETURN, nil, nil)
body = append(body, n)
return xfunc
}
-func walkpartialcall(n *Node, init **NodeList) *Node {
+func walkpartialcall(n *Node, init *Nodes) *Node {
// Create closure in the form of a composite literal.
// For x.M with receiver (x) type T, the generated code looks like:
//
}
typ := Nod(OTSTRUCT, nil, nil)
- typ.List = list1(Nod(ODCLFIELD, newname(Lookup("F")), typenod(Types[TUINTPTR])))
- typ.List = list(typ.List, Nod(ODCLFIELD, newname(Lookup("R")), typenod(n.Left.Type)))
+ typ.List.Set([]*Node{Nod(ODCLFIELD, newname(Lookup("F")), typenod(Types[TUINTPTR]))})
+ typ.List.Append(Nod(ODCLFIELD, newname(Lookup("R")), typenod(n.Left.Type)))
clos := Nod(OCOMPLIT, nil, Nod(OIND, typ, nil))
clos.Esc = n.Esc
clos.Right.Implicit = true
- clos.List = list1(Nod(OCFUNC, n.Func.Nname, nil))
- clos.List = list(clos.List, n.Left)
+ clos.List.Set([]*Node{Nod(OCFUNC, n.Func.Nname, nil)})
+ clos.List.Append(n.Left)
// Force type conversion from *struct to the func type.
clos = Nod(OCONVNOP, clos, nil)
// merge adjacent constants in the argument list.
case OADDSTR:
- var nr *Node
- var nl *Node
- var l2 *NodeList
- for l1 := n.List; l1 != nil; l1 = l1.Next {
- if Isconst(l1.N, CTSTR) && l1.Next != nil && Isconst(l1.Next.N, CTSTR) {
- // merge from l1 up to but not including l2
+ s := n.List.Slice()
+ for i1 := 0; i1 < len(s); i1++ {
+ if Isconst(s[i1], CTSTR) && i1+1 < len(s) && Isconst(s[i1+1], CTSTR) {
+ // merge from i1 up to but not including i2
var strs []string
- l2 = l1
- for l2 != nil && Isconst(l2.N, CTSTR) {
- nr = l2.N
- strs = append(strs, nr.Val().U.(string))
- l2 = l2.Next
+ i2 := i1
+ for i2 < len(s) && Isconst(s[i2], CTSTR) {
+ strs = append(strs, s[i2].Val().U.(string))
+ i2++
}
- nl = Nod(OXXX, nil, nil)
- *nl = *l1.N
+ nl := Nod(OXXX, nil, nil)
+ *nl = *s[i1]
nl.Orig = nl
nl.SetVal(Val{strings.Join(strs, "")})
- l1.N = nl
- l1.Next = l2
+ s[i1] = nl
+ s = append(s[:i1+1], s[i2:]...)
}
}
- // fix list end pointer.
- for l2 := n.List; l2 != nil; l2 = l2.Next {
- n.List.End = l2
- }
-
- // collapse single-constant list to single constant.
- if count(n.List) == 1 && Isconst(n.List.N, CTSTR) {
+ if len(s) == 1 && Isconst(s[0], CTSTR) {
n.Op = OLITERAL
- n.SetVal(n.List.N.Val())
+ n.SetVal(s[0].Val())
+ } else {
+ n.List.Set(s)
}
return
nr := n.Right
var rv Val
- var lno int
+ var lno int32
var wr EType
var v Val
var norig *Node
switch uint32(n.Op)<<16 | uint32(v.Ctype()) {
default:
if n.Diag == 0 {
- Yyerror("illegal constant expression %v %v", Oconv(int(n.Op), 0), nl.Type)
+ Yyerror("illegal constant expression %v %v", Oconv(n.Op, 0), nl.Type)
n.Diag = 1
}
return
n.SetVal(v)
// check range.
- lno = int(setlineno(n))
-
+ lno = setlineno(n)
overflow(v, n.Type)
- lineno = int32(lno)
+ lineno = lno
// truncate precision for non-ideal float.
if v.Ctype() == CTFLT && n.Type.Etype != TIDEAL {
illegal:
if n.Diag == 0 {
- Yyerror("illegal constant expression: %v %v %v", nl.Type, Oconv(int(n.Op), 0), nr.Type)
+ Yyerror("illegal constant expression: %v %v %v", nl.Type, Oconv(n.Op, 0), nr.Type)
n.Diag = 1
}
}
*np = n
}
- lno := int(setlineno(n))
+ lno := setlineno(n)
ctype := idealkind(n)
var t1 *Type
switch ctype {
}
if n.Val().Ctype() == CTNIL {
- lineno = int32(lno)
+ lineno = lno
if n.Diag == 0 {
Yyerror("use of untyped nil")
n.Diag = 1
goto num
}
- lineno = int32(lno)
+ lineno = lno
return
num:
overflow(n.Val(), t1)
}
Convlit(np, t1)
- lineno = int32(lno)
+ lineno = lno
return
}
if hascallchan(n.Left) || hascallchan(n.Right) {
return true
}
-
- for l := n.List; l != nil; l = l.Next {
- if hascallchan(l.N) {
+ for _, n1 := range n.List.Slice() {
+ if hascallchan(n1) {
return true
}
}
- for l := n.Rlist; l != nil; l = l.Next {
- if hascallchan(l.N) {
+ for _, n2 := range n.Rlist.Slice() {
+ if hascallchan(n2) {
return true
}
}
switch n.Op {
default:
Dump("complexgen: unknown op", n)
- Fatalf("complexgen: unknown op %v", Oconv(int(n.Op), 0))
+ Fatalf("complexgen: unknown op %v", Oconv(n.Op, 0))
case ODOT,
ODOTPTR,
switch n.Op {
default:
- Fatalf("complexgen: unknown op %v", Oconv(int(n.Op), 0))
+ Fatalf("complexgen: unknown op %v", Oconv(n.Op, 0))
case OCONV:
Complexmove(nl, res)
d := push()
dcopy(d, s)
if dflag() {
- fmt.Printf("\t%v push %v %p\n", Ctxt.Line(int(lineno)), s, s.Def)
+ fmt.Printf("\t%v push %v %p\n", linestr(lineno), s, s.Def)
}
return d
}
func popdcl() {
- var d *Sym
- var s *Sym
- var lno int
-
- // if(dflag())
- // print("revert\n");
-
- for d = dclstack; d != nil; d = d.Link {
- if d.Name == "" {
- break
- }
- s = Pkglookup(d.Name, d.Pkg)
- lno = int(s.Lastlineno)
+ d := dclstack
+ for ; d != nil && d.Name != ""; d = d.Link {
+ s := Pkglookup(d.Name, d.Pkg)
+ lno := s.Lastlineno
dcopy(s, d)
- d.Lastlineno = int32(lno)
+ d.Lastlineno = lno
if dflag() {
- fmt.Printf("\t%v pop %v %p\n", Ctxt.Line(int(lineno)), s, s.Def)
+ fmt.Printf("\t%v pop %v %p\n", linestr(lineno), s, s.Def)
}
}
if d == nil {
Fatalf("popdcl: no mark")
}
- dclstack = d.Link
+
+ dclstack = d.Link // pop mark
block = d.Block
}
block = blockgen
}
-// if(dflag())
-// print("markdcl\n");
func dumpdcl(st string) {
- var s *Sym
-
i := 0
for d := dclstack; d != nil; d = d.Link {
i++
}
fmt.Printf(" '%s'", d.Name)
- s = Pkglookup(d.Name, d.Pkg)
- fmt.Printf(" %v\n", s)
+ fmt.Printf(" %v\n", Pkglookup(d.Name, d.Pkg))
}
}
errorexit()
}
Yyerror("mark left on the stack")
- continue
}
}
}
pkgstr := tmp
Yyerror("%v redeclared %s\n"+"\tprevious declaration during import %q", s, where, pkgstr)
} else {
- line1 := parserline()
- line2 := int(s.Lastlineno)
+ line1 := lineno
+ line2 := s.Lastlineno
// When an import and a declaration collide in separate files,
// present the import as the "redeclared", because the declaration
// See issue 4510.
if s.Def == nil {
line2 = line1
- line1 = int(s.Lastlineno)
+ line1 = s.Lastlineno
}
- yyerrorl(int(line1), "%v redeclared %s\n"+"\tprevious declaration at %v", s, where, Ctxt.Line(line2))
+ yyerrorl(line1, "%v redeclared %s\n"+"\tprevious declaration at %v", s, where, linestr(line2))
}
}
// named OLITERAL needs Name; most OLITERALs don't.
n.Name = new(Name)
}
- n.Lineno = int32(parserline())
+ n.Lineno = lineno
s := n.Sym
// kludgy: typecheckok means we're past parsing. Eg genwrapper may declare out of package names later.
if ctxt == PEXTERN {
externdcl = append(externdcl, n)
if dflag() {
- fmt.Printf("\t%v global decl %v %p\n", Ctxt.Line(int(lineno)), s, n)
+ fmt.Printf("\t%v global decl %v %p\n", linestr(lineno), s, n)
}
} else {
if Curfn == nil && ctxt == PAUTO {
}
s.Block = block
- s.Lastlineno = int32(parserline())
+ s.Lastlineno = lineno
s.Def = n
n.Name.Vargen = int32(gen)
n.Name.Funcdepth = Funcdepth
if count(el) == 1 && count(vl) > 1 {
e := el.N
as2 := Nod(OAS2, nil, nil)
- as2.List = vl
- as2.Rlist = list1(e)
+ setNodeSeq(&as2.List, vl)
+ as2.Rlist.Set([]*Node{e})
var v *Node
for ; vl != nil; vl = vl.Next {
v = vl.N
lastconst = cl
lasttype = t
}
- cl = listtreecopy(cl, lno)
+ clcopy := listtreecopy(nodeSeqSlice(cl), lno)
var v *Node
var c *Node
var vv *NodeList
for ; vl != nil; vl = vl.Next {
- if cl == nil {
+ if len(clcopy) == 0 {
Yyerror("missing value in const declaration")
break
}
- c = cl.N
- cl = cl.Next
+ c = clcopy[0]
+ clcopy = clcopy[1:]
v = vl.N
v.Op = OLITERAL
vv = list(vv, Nod(ODCLCONST, v, nil))
}
- if cl != nil {
+ if len(clcopy) != 0 {
Yyerror("extra expression in const declaration")
}
iota_ += 1
n = newname(s)
n.Op = ONONAME
n.Name.Iota = iota_ // save current iota value in const declarations
+ return n
}
if Curfn != nil && n.Op == ONAME && n.Name.Funcdepth > 0 && n.Name.Funcdepth != Funcdepth {
return false
}
-func colasdefn(left *NodeList, defn *Node) {
- for l := left; l != nil; l = l.Next {
- if l.N.Sym != nil {
- l.N.Sym.Flags |= SymUniq
+func colasdefn(left Nodes, defn *Node) {
+ for _, n1 := range left.Slice() {
+ if n1.Sym != nil {
+ n1.Sym.Flags |= SymUniq
}
}
nnew := 0
nerr := 0
var n *Node
- for l := left; l != nil; l = l.Next {
- n = l.N
+ for i2, n2 := range left.Slice() {
+ n = n2
if isblank(n) {
continue
}
if !colasname(n) {
- yyerrorl(int(defn.Lineno), "non-name %v on left side of :=", n)
+ yyerrorl(defn.Lineno, "non-name %v on left side of :=", n)
nerr++
continue
}
if n.Sym.Flags&SymUniq == 0 {
- yyerrorl(int(defn.Lineno), "%v repeated on left side of :=", n.Sym)
+ yyerrorl(defn.Lineno, "%v repeated on left side of :=", n.Sym)
n.Diag++
nerr++
continue
n = newname(n.Sym)
declare(n, dclcontext)
n.Name.Defn = defn
- defn.Ninit = list(defn.Ninit, Nod(ODCL, n, nil))
- l.N = n
+ defn.Ninit.Append(Nod(ODCL, n, nil))
+ left.Slice()[i2] = n
}
if nnew == 0 && nerr == 0 {
- yyerrorl(int(defn.Lineno), "no new variables on left side of :=")
+ yyerrorl(defn.Lineno, "no new variables on left side of :=")
}
}
func colas(left *NodeList, right *NodeList, lno int32) *Node {
as := Nod(OAS2, nil, nil)
- as.List = left
- as.Rlist = right
+ setNodeSeq(&as.List, left)
+ setNodeSeq(&as.Rlist, right)
as.Colas = true
as.Lineno = lno
- colasdefn(left, as)
+ colasdefn(as.List, as)
// make the tree prettier; not necessary
- if count(left) == 1 && count(right) == 1 {
- as.Left = as.List.N
- as.Right = as.Rlist.N
- as.List = nil
- as.Rlist = nil
+ if as.List.Len() == 1 && as.Rlist.Len() == 1 {
+ as.Left = as.List.First()
+ as.Right = as.Rlist.First()
+ as.List.Set(nil)
+ as.Rlist.Set(nil)
as.Op = OAS
}
func funcargs(nt *Node) {
if nt.Op != OTFUNC {
- Fatalf("funcargs %v", Oconv(int(nt.Op), 0))
+ Fatalf("funcargs %v", Oconv(nt.Op, 0))
}
// re-start the variable generation number
// we want to use small numbers for the return variables,
// so let them have the chunk starting at 1.
- vargen = count(nt.Rlist)
+ vargen = nt.Rlist.Len()
// declare the receiver and in arguments.
// no n->defn because type checking of func header
if nt.Left != nil {
n := nt.Left
if n.Op != ODCLFIELD {
- Fatalf("funcargs receiver %v", Oconv(int(n.Op), 0))
+ Fatalf("funcargs receiver %v", Oconv(n.Op, 0))
}
if n.Left != nil {
n.Left.Op = ONAME
}
var n *Node
- for l := nt.List; l != nil; l = l.Next {
- n = l.N
+ for _, n = range nt.List.Slice() {
if n.Op != ODCLFIELD {
- Fatalf("funcargs in %v", Oconv(int(n.Op), 0))
+ Fatalf("funcargs in %v", Oconv(n.Op, 0))
}
if n.Left != nil {
n.Left.Op = ONAME
}
// declare the out arguments.
- gen := count(nt.List)
+ gen := nt.List.Len()
var i int = 0
var nn *Node
- for l := nt.Rlist; l != nil; l = l.Next {
- n = l.N
-
+ for _, n = range nt.Rlist.Slice() {
if n.Op != ODCLFIELD {
- Fatalf("funcargs out %v", Oconv(int(n.Op), 0))
+ Fatalf("funcargs out %v", Oconv(n.Op, 0))
}
if n.Left == nil {
}
if t.Thistuple != 0 {
- var n *Node
- for ft := getthisx(t).Type; ft != nil; ft = ft.Down {
+ for ft, it := IterFields(t.Recv()); ft != nil; ft = it.Next() {
if ft.Nname == nil || ft.Nname.Sym == nil {
continue
}
- n = ft.Nname // no need for newname(ft->nname->sym)
+ n := ft.Nname // no need for newname(ft->nname->sym)
n.Type = ft.Type
declare(n, PPARAM)
}
}
if t.Intuple != 0 {
- var n *Node
- for ft := getinargx(t).Type; ft != nil; ft = ft.Down {
+ for ft, it := IterFields(t.Params()); ft != nil; ft = it.Next() {
if ft.Nname == nil || ft.Nname.Sym == nil {
continue
}
- n = ft.Nname
+ n := ft.Nname
n.Type = ft.Type
declare(n, PPARAM)
}
}
if t.Outtuple != 0 {
- var n *Node
- for ft := getoutargx(t).Type; ft != nil; ft = ft.Down {
+ for ft, it := IterFields(t.Results()); ft != nil; ft = it.Next() {
if ft.Nname == nil || ft.Nname.Sym == nil {
continue
}
- n = ft.Nname
+ n := ft.Nname
n.Type = ft.Type
declare(n, PPARAMOUT)
}
}
func structfield(n *Node) *Type {
- lno := int(lineno)
+ lno := lineno
lineno = n.Lineno
if n.Op != ODCLFIELD {
f.Sym = f.Nname.Sym
}
- lineno = int32(lno)
+ lineno = lno
return f
}
var uniqgen uint32
func checkdupfields(t *Type, what string) {
- lno := int(lineno)
+ lno := lineno
for ; t != nil; t = t.Down {
if t.Sym != nil && t.Nname != nil && !isblank(t.Nname) {
}
}
- lineno = int32(lno)
+ lineno = lno
}
// convert a parsed id/type list into
// a type for struct/interface/arglist
-func tostruct(l *NodeList) *Type {
+func tostruct(l []*Node) *Type {
t := typ(TSTRUCT)
tostruct0(t, l)
return t
}
-func tostruct0(t *Type, l *NodeList) {
+func tostruct0(t *Type, l []*Node) {
if t == nil || t.Etype != TSTRUCT {
Fatalf("struct expected")
}
- for tp := &t.Type; l != nil; l = l.Next {
- f := structfield(l.N)
+ for tp, it := &t.Type, nodeSeqIterate(l); !it.Done(); it.Next() {
+ f := structfield(it.N())
*tp = f
tp = &f.Down
}
}
-func tofunargs(l *NodeList) *Type {
+func tofunargs(l []*Node) *Type {
var f *Type
t := typ(TSTRUCT)
t.Funarg = true
- for tp := &t.Type; l != nil; l = l.Next {
- f = structfield(l.N)
+ for tp, it := &t.Type, nodeSeqIterate(l); !it.Done(); it.Next() {
+ f = structfield(it.N())
f.Funarg = true
// esc.go needs to find f given a PPARAM to add the tag.
- if l.N.Left != nil && l.N.Left.Class == PPARAM {
- l.N.Left.Name.Param.Field = f
+ if it.N().Left != nil && it.N().Left.Class == PPARAM {
+ it.N().Left.Name.Param.Field = f
}
*tp = f
}
func interfacefield(n *Node) *Type {
- lno := int(lineno)
+ lno := lineno
lineno = n.Lineno
if n.Op != ODCLFIELD {
f.Broke = true
}
- lineno = int32(lno)
+ lineno = lno
return f
}
-func tointerface(l *NodeList) *Type {
+func tointerface(l []*Node) *Type {
t := typ(TINTER)
tointerface0(t, l)
return t
}
-func tointerface0(t *Type, l *NodeList) *Type {
+func tointerface0(t *Type, l []*Node) *Type {
if t == nil || t.Etype != TINTER {
Fatalf("interface expected")
}
tp := &t.Type
- for ; l != nil; l = l.Next {
- f := interfacefield(l.N)
+ for _, n := range l {
+ f := interfacefield(n)
- if l.N.Left == nil && f.Type.Etype == TINTER {
+ if n.Left == nil && f.Type.Etype == TINTER {
// embedded interface, inline methods
for t1 := f.Type.Type; t1 != nil; t1 = t1.Down {
f = typ(TFIELD)
// Those methods have an anonymous *struct{} as the receiver.
// (See fakethis above.)
func isifacemethod(f *Type) bool {
- rcvr := getthisx(f).Type
+ rcvr := f.Recv().Type
if rcvr.Sym != nil {
return false
}
}
// turn a parsed function declaration into a type
-func functype(this *Node, in *NodeList, out *NodeList) *Type {
+func functype(this *Node, in, out []*Node) *Type {
t := typ(TFUNC)
functype0(t, this, in, out)
return t
}
-func functype0(t *Type, this *Node, in *NodeList, out *NodeList) {
+func functype0(t *Type, this *Node, in, out []*Node) {
if t == nil || t.Etype != TFUNC {
Fatalf("function type expected")
}
- var rcvr *NodeList
+ var rcvr []*Node
if this != nil {
- rcvr = list1(this)
+ rcvr = []*Node{this}
}
- t.Type = tofunargs(rcvr)
- t.Type.Down = tofunargs(out)
- t.Type.Down.Down = tofunargs(in)
+ *t.RecvP() = tofunargs(rcvr)
+ *t.ResultsP() = tofunargs(out)
+ *t.ParamsP() = tofunargs(in)
uniqgen++
- checkdupfields(t.Type.Type, "argument")
- checkdupfields(t.Type.Down.Type, "argument")
- checkdupfields(t.Type.Down.Down.Type, "argument")
+ checkdupfields(t.Recv().Type, "argument")
+ checkdupfields(t.Results().Type, "argument")
+ checkdupfields(t.Params().Type, "argument")
- if t.Type.Broke || t.Type.Down.Broke || t.Type.Down.Down.Broke {
+ if t.Recv().Broke || t.Results().Broke || t.Params().Broke {
t.Broke = true
}
if this != nil {
t.Thistuple = 1
}
- t.Outtuple = count(out)
- t.Intuple = count(in)
+ t.Outtuple = nodeSeqLen(out)
+ t.Intuple = nodeSeqLen(in)
t.Outnamed = false
- if t.Outtuple > 0 && out.N.Left != nil && out.N.Left.Orig != nil {
- s := out.N.Left.Orig.Sym
+ if t.Outtuple > 0 && nodeSeqFirst(out).Left != nil && nodeSeqFirst(out).Left.Orig != nil {
+ s := nodeSeqFirst(out).Left.Orig.Sym
if s != nil && (s.Name[0] != '~' || s.Name[1] != 'r') { // ~r%d is the name invented for an unnamed result
t.Outnamed = true
}
}
// get parent type sym
- pa := getthisx(t).Type // ptr to this structure
+ pa := t.Recv().Type // ptr to this structure
if pa == nil {
Yyerror("missing receiver")
return
for _, n := range list {
if n.Func.WBLineno == 0 {
c.curfn = n
- c.visitcodeslice(n.Nbody.Slice())
+ c.visitcodelist(n.Nbody)
}
}
if c.stable {
// Build the error message in reverse.
err := ""
for call.target != nil {
- err = fmt.Sprintf("\n\t%v: called by %v%s", Ctxt.Line(int(call.lineno)), n.Func.Nname, err)
+ err = fmt.Sprintf("\n\t%v: called by %v%s", linestr(call.lineno), n.Func.Nname, err)
n = call.target
call = c.best[n]
}
err = fmt.Sprintf("write barrier prohibited by caller; %v%s", n.Func.Nname, err)
- yyerrorl(int(n.Func.WBLineno), err)
+ yyerrorl(n.Func.WBLineno, err)
}
})
}
-func (c *nowritebarrierrecChecker) visitcodelist(l *NodeList) {
- for ; l != nil; l = l.Next {
- c.visitcode(l.N)
- }
-}
-
-func (c *nowritebarrierrecChecker) visitcodeslice(l []*Node) {
- for _, n := range l {
+func (c *nowritebarrierrecChecker) visitcodelist(l Nodes) {
+ for _, n := range l.Slice() {
c.visitcode(n)
}
}
c.visitcode(n.Left)
c.visitcode(n.Right)
c.visitcodelist(n.List)
- c.visitcodeslice(n.Nbody.Slice())
+ c.visitcodelist(n.Nbody)
c.visitcodelist(n.Rlist)
}
min := v.visitgen
v.stack = append(v.stack, n)
- min = v.visitcodeslice(n.Nbody.Slice(), min)
+ min = v.visitcodelist(n.Nbody, min)
if (min == id || min == id+1) && n.Func.FCurfn == nil {
// This node is the root of a strongly connected component.
return min
}
-func (v *bottomUpVisitor) visitcodelist(l *NodeList, min uint32) uint32 {
- for ; l != nil; l = l.Next {
- min = v.visitcode(l.N, min)
- }
- return min
-}
-
-func (v *bottomUpVisitor) visitcodeslice(l []*Node, min uint32) uint32 {
- for _, n := range l {
+func (v *bottomUpVisitor) visitcodelist(l Nodes, min uint32) uint32 {
+ for _, n := range l.Slice() {
min = v.visitcode(n, min)
}
return min
min = v.visitcode(n.Left, min)
min = v.visitcode(n.Right, min)
min = v.visitcodelist(n.List, min)
- min = v.visitcodeslice(n.Nbody.Slice(), min)
+ min = v.visitcodelist(n.Nbody, min)
min = v.visitcodelist(n.Rlist, min)
if n.Op == OCALLFUNC || n.Op == OCALLMETH {
type NodeEscState struct {
Curfn *Node
- Escflowsrc *NodeList // flow(this, src)
- Escretval *NodeList // on OCALLxxx, list of dummy return values
- Escloopdepth int32 // -1: global, 0: return variables, 1:function top level, increased inside function for every loop or label to mark scopes
+ Escflowsrc []*Node // flow(this, src)
+ Escretval Nodes // on OCALLxxx, list of dummy return values
+ Escloopdepth int32 // -1: global, 0: return variables, 1:function top level, increased inside function for every loop or label to mark scopes
Esclevel Level
Walkgen uint32
Maxextraloopdepth int32
n.Esc = EscNone // until proven otherwise
nE := e.nodeEscState(n)
nE.Escloopdepth = e.loopdepth
- e.noesc = list(e.noesc, n)
+ e.noesc = append(e.noesc, n)
}
// Escape constants are numbered in order of increasing "escapiness"
// flow to.
theSink Node
- dsts *NodeList // all dst nodes
- loopdepth int32 // for detecting nested loop scopes
- pdepth int // for debug printing in recursions.
- dstcount int // diagnostic
- edgecount int // diagnostic
- noesc *NodeList // list of possible non-escaping nodes, for printing
- recursive bool // recursive function or group of mutually recursive functions.
- opts []*Node // nodes with .Opt initialized
+ dsts []*Node // all dst nodes
+ loopdepth int32 // for detecting nested loop scopes
+ pdepth int // for debug printing in recursions.
+ dstcount int // diagnostic
+ edgecount int // diagnostic
+ noesc []*Node // list of possible non-escaping nodes, for printing
+ recursive bool // recursive function or group of mutually recursive functions.
+ opts []*Node // nodes with .Opt initialized
walkgen uint32
}
// visit the upstream of each dst, mark address nodes with
// addrescapes, mark parameters unsafe
- for l := e.dsts; l != nil; l = l.Next {
- escflood(e, l.N)
+ for _, n := range e.dsts {
+ escflood(e, n)
}
// for all top level functions, tag the typenodes corresponding to the param nodes
}
if Debug['m'] != 0 {
- for l := e.noesc; l != nil; l = l.Next {
- if l.N.Esc == EscNone {
- Warnl(int(l.N.Lineno), "%v %v does not escape", e.curfnSym(l.N), Nconv(l.N, obj.FmtShort))
+ for _, n := range e.noesc {
+ if n.Esc == EscNone {
+ Warnl(n.Lineno, "%v %v does not escape", e.curfnSym(n), Nconv(n, obj.FmtShort))
}
}
}
} else {
ln.Esc = EscNone // prime for escflood later
}
- e.noesc = list(e.noesc, ln)
+ e.noesc = append(e.noesc, ln)
}
}
}
}
- escloopdepthslice(e, Curfn.Nbody.Slice())
- escslice(e, Curfn.Nbody.Slice(), Curfn)
+ escloopdepthlist(e, Curfn.Nbody)
+ esclist(e, Curfn.Nbody, Curfn)
Curfn = savefn
e.loopdepth = saveld
}
var nonlooping Label
-func escloopdepthlist(e *EscState, l *NodeList) {
- for ; l != nil; l = l.Next {
- escloopdepth(e, l.N)
- }
-}
-
-func escloopdepthslice(e *EscState, l []*Node) {
- for _, n := range l {
+func escloopdepthlist(e *EscState, l Nodes) {
+ for _, n := range l.Slice() {
escloopdepth(e, n)
}
}
escloopdepth(e, n.Left)
escloopdepth(e, n.Right)
escloopdepthlist(e, n.List)
- escloopdepthslice(e, n.Nbody.Slice())
+ escloopdepthlist(e, n.Nbody)
escloopdepthlist(e, n.Rlist)
}
-func esclist(e *EscState, l *NodeList, up *Node) {
- for ; l != nil; l = l.Next {
- esc(e, l.N, up)
- }
-}
-
-func escslice(e *EscState, l []*Node, up *Node) {
- for _, n := range l {
+func esclist(e *EscState, l Nodes, up *Node) {
+ for _, n := range l.Slice() {
esc(e, n, up)
}
}
return
}
- lno := int(setlineno(n))
+ lno := setlineno(n)
// ninit logically runs at a different loopdepth than the rest of the for loop.
esclist(e, n.Ninit, n)
// must happen before processing of switch body,
// so before recursion.
if n.Op == OSWITCH && n.Left != nil && n.Left.Op == OTYPESW {
- for ll := n.List; ll != nil; ll = ll.Next { // cases
-
- // ll.N.Rlist is the variable per case
- if ll.N.Rlist != nil {
- e.nodeEscState(ll.N.Rlist.N).Escloopdepth = e.loopdepth
+ for _, n1 := range n.List.Slice() { // cases
+ // it.N().Rlist is the variable per case
+ if n1.Rlist.Len() != 0 {
+ e.nodeEscState(n1.Rlist.First()).Escloopdepth = e.loopdepth
}
}
}
n.Op == ONEW && n.Type.Type.Width >= 1<<16 ||
n.Op == OMAKESLICE && !isSmallMakeSlice(n)) {
if Debug['m'] > 1 {
- Warnl(int(n.Lineno), "%v is too large for stack", n)
+ Warnl(n.Lineno, "%v is too large for stack", n)
}
n.Esc = EscHeap
addrescapes(n)
esc(e, n.Left, n)
esc(e, n.Right, n)
- escslice(e, n.Nbody.Slice(), n)
+ esclist(e, n.Nbody, n)
esclist(e, n.List, n)
esclist(e, n.Rlist, n)
}
if Debug['m'] > 1 {
- fmt.Printf("%v:[%d] %v esc: %v\n", Ctxt.Line(int(lineno)), e.loopdepth, funcSym(Curfn), n)
+ fmt.Printf("%v:[%d] %v esc: %v\n", linestr(lineno), e.loopdepth, funcSym(Curfn), n)
}
switch n.Op {
case OLABEL:
if n.Left.Sym.Label == &nonlooping {
if Debug['m'] > 1 {
- fmt.Printf("%v:%v non-looping label\n", Ctxt.Line(int(lineno)), n)
+ fmt.Printf("%v:%v non-looping label\n", linestr(lineno), n)
}
} else if n.Left.Sym.Label == &looping {
if Debug['m'] > 1 {
- fmt.Printf("%v: %v looping label\n", Ctxt.Line(int(lineno)), n)
+ fmt.Printf("%v: %v looping label\n", linestr(lineno), n)
}
e.loopdepth++
}
n.Left.Sym.Label = nil
case ORANGE:
- if n.List != nil && n.List.Next != nil {
+ if n.List.Len() >= 2 {
// Everything but fixed array is a dereference.
// If fixed array is really the address of fixed array,
// dereferenced (see #12588)
if Isfixedarray(n.Type) &&
!(Isptr[n.Right.Type.Etype] && Eqtype(n.Right.Type.Type, n.Type)) {
- escassign(e, n.List.Next.N, n.Right)
+ escassign(e, n.List.Second(), n.Right)
} else {
- escassignDereference(e, n.List.Next.N, n.Right)
+ escassignDereference(e, n.List.Second(), n.Right)
}
}
case OSWITCH:
if n.Left != nil && n.Left.Op == OTYPESW {
- for ll := n.List; ll != nil; ll = ll.Next {
+ for _, n2 := range n.List.Slice() {
// cases
// n.Left.Right is the argument of the .(type),
- // ll.N.Rlist is the variable per case
- if ll.N.Rlist != nil {
- escassign(e, ll.N.Rlist.N, n.Left.Right)
+ // it.N().Rlist is the variable per case
+ if n2.Rlist.Len() != 0 {
+ escassign(e, n2.Rlist.First(), n.Left.Right)
}
}
}
// b escapes as well. If we ignore such OSLICEARR, we will conclude
// that b does not escape when b contents do.
if Debug['m'] != 0 {
- Warnl(int(n.Lineno), "%v ignoring self-assignment to %v", e.curfnSym(n), Nconv(n.Left, obj.FmtShort))
+ Warnl(n.Lineno, "%v ignoring self-assignment to %v", e.curfnSym(n), Nconv(n.Left, obj.FmtShort))
}
break
escassign(e, n.Left, n.Right)
case OAS2: // x,y = a,b
- if count(n.List) == count(n.Rlist) {
- ll := n.List
- lr := n.Rlist
- for ; ll != nil; ll, lr = ll.Next, lr.Next {
- escassign(e, ll.N, lr.N)
+ if n.List.Len() == n.Rlist.Len() {
+ lrit := nodeSeqIterate(n.Rlist)
+ for _, n3 := range n.List.Slice() {
+ escassign(e, n3, lrit.N())
+ lrit.Next()
}
}
case OAS2RECV, // v, ok = <-ch
OAS2MAPR, // v, ok = m[k]
OAS2DOTTYPE: // v, ok = x.(type)
- escassign(e, n.List.N, n.Rlist.N)
+ escassign(e, n.List.First(), n.Rlist.First())
case OSEND: // ch <- x
escassign(e, &e.theSink, n.Right)
escassign(e, &e.theSink, n.Left.Left)
escassign(e, &e.theSink, n.Left.Right) // ODDDARG for call
- for ll := n.Left.List; ll != nil; ll = ll.Next {
- escassign(e, &e.theSink, ll.N)
+ for _, n4 := range n.Left.List.Slice() {
+ escassign(e, &e.theSink, n4)
}
case OCALLMETH, OCALLFUNC, OCALLINTER:
// esccall already done on n->rlist->n. tie it's escretval to n->list
case OAS2FUNC: // x,y = f()
- lr := e.nodeEscState(n.Rlist.N).Escretval
+ lrit := nodeSeqIterate(e.nodeEscState(n.Rlist.First()).Escretval)
- var ll *NodeList
- for ll = n.List; lr != nil && ll != nil; lr, ll = lr.Next, ll.Next {
- escassign(e, ll.N, lr.N)
+ var llit nodeSeqIterator
+ for llit = nodeSeqIterate(n.List); !lrit.Done() && !llit.Done(); llit.Next() {
+ escassign(e, llit.N(), lrit.N())
+ lrit.Next()
}
- if lr != nil || ll != nil {
+ if !llit.Done() || !lrit.Done() {
Fatalf("esc oas2func")
}
case ORETURN:
ll := n.List
- if count(n.List) == 1 && Curfn.Type.Outtuple > 1 {
+ if n.List.Len() == 1 && Curfn.Type.Outtuple > 1 {
// OAS2FUNC in disguise
// esccall already done on n->list->n
// tie n->list->n->escretval to curfn->dcl PPARAMOUT's
- ll = e.nodeEscState(n.List.N).Escretval
+ ll = e.nodeEscState(n.List.First()).Escretval
}
+ llit := nodeSeqIterate(ll)
for _, lrn := range Curfn.Func.Dcl {
- if ll == nil {
+ if llit.Done() {
break
}
if lrn.Op != ONAME || lrn.Class != PPARAMOUT {
continue
}
- escassign(e, lrn, ll.N)
- ll = ll.Next
+ escassign(e, lrn, llit.N())
+ llit.Next()
}
- if ll != nil {
+ if !llit.Done() {
Fatalf("esc return list")
}
case OAPPEND:
if !n.Isddd {
- for ll := n.List.Next; ll != nil; ll = ll.Next {
- escassign(e, &e.theSink, ll.N) // lose track of assign to dereference
+ llit := nodeSeqIterate(n.List)
+ llit.Next()
+ for ; !llit.Done(); llit.Next() {
+ escassign(e, &e.theSink, llit.N()) // lose track of assign to dereference
}
} else {
// append(slice1, slice2...) -- slice2 itself does not escape, but contents do.
- slice2 := n.List.Next.N
+ slice2 := n.List.Second()
escassignDereference(e, &e.theSink, slice2) // lose track of assign of dereference
if Debug['m'] > 2 {
- Warnl(int(n.Lineno), "%v special treatment of append(slice1, slice2...) %v", e.curfnSym(n), Nconv(n, obj.FmtShort))
+ Warnl(n.Lineno, "%v special treatment of append(slice1, slice2...) %v", e.curfnSym(n), Nconv(n, obj.FmtShort))
}
}
- escassignDereference(e, &e.theSink, n.List.N) // The original elements are now leaked, too
+ escassignDereference(e, &e.theSink, n.List.First()) // The original elements are now leaked, too
case OCOPY:
escassignDereference(e, &e.theSink, n.Right) // lose track of assign of dereference
// Slice itself is not leaked until proven otherwise
e.track(n)
}
-
// Link values to array/slice
- for ll := n.List; ll != nil; ll = ll.Next {
- escassign(e, n, ll.N.Right)
+ for _, n5 := range n.List.Slice() {
+ escassign(e, n, n5.Right)
}
// Link values to struct.
case OSTRUCTLIT:
- for ll := n.List; ll != nil; ll = ll.Next {
- escassign(e, n, ll.N.Right)
+ for _, n6 := range n.List.Slice() {
+ escassign(e, n, n6.Right)
}
case OPTRLIT:
case OMAPLIT:
e.track(n)
-
// Keys and values make it to memory, lose track.
- for ll := n.List; ll != nil; ll = ll.Next {
- escassign(e, &e.theSink, ll.N.Left)
- escassign(e, &e.theSink, ll.N.Right)
+ for _, n7 := range n.List.Slice() {
+ escassign(e, &e.theSink, n7.Left)
+ escassign(e, &e.theSink, n7.Right)
}
// Link addresses of captured variables to closure.
}
}
- lineno = int32(lno)
+ lineno = lno
}
// Assert that expr somehow gets assigned to dst, if non nil. for
if Debug['m'] > 1 {
fmt.Printf("%v:[%d] %v escassign: %v(%v)[%v] = %v(%v)[%v]\n",
- Ctxt.Line(int(lineno)), e.loopdepth, funcSym(Curfn),
- Nconv(dst, obj.FmtShort), Jconv(dst, obj.FmtShort), Oconv(int(dst.Op), 0),
- Nconv(src, obj.FmtShort), Jconv(src, obj.FmtShort), Oconv(int(src.Op), 0))
+ linestr(lineno), e.loopdepth, funcSym(Curfn),
+ Nconv(dst, obj.FmtShort), Jconv(dst, obj.FmtShort), Oconv(dst.Op, 0),
+ Nconv(src, obj.FmtShort), Jconv(src, obj.FmtShort), Oconv(src.Op, 0))
}
setlineno(dst)
dst = &e.theSink
}
- lno := int(setlineno(src))
+ lno := setlineno(src)
e.pdepth++
switch src.Op {
// Flowing multiple returns to a single dst happens when
// analyzing "go f(g())": here g() flows to sink (issue 4529).
case OCALLMETH, OCALLFUNC, OCALLINTER:
- for ll := e.nodeEscState(src).Escretval; ll != nil; ll = ll.Next {
- escflows(e, dst, ll.N)
+ for _, n := range e.nodeEscState(src).Escretval.Slice() {
+ escflows(e, dst, n)
}
// A non-pointer escaping from a struct does not concern us.
case OAPPEND:
// Append returns first argument.
// Subsequent arguments are already leaked because they are operands to append.
- escassign(e, dst, src.List.N)
+ escassign(e, dst, src.List.First())
case OINDEX:
// Index of array preserves input value.
}
e.pdepth--
- lineno = int32(lno)
+ lineno = lno
}
// Common case for escapes is 16 bits 000000000xxxEEEE
// escassignfromtag models the input-to-output assignment flow of one of a function
// calls arguments, where the flow is encoded in "note".
-func escassignfromtag(e *EscState, note *string, dsts *NodeList, src *Node) uint16 {
+func escassignfromtag(e *EscState, note *string, dsts Nodes, src *Node) uint16 {
em := parsetag(note)
if src.Op == OLITERAL {
return em
if Debug['m'] > 2 {
fmt.Printf("%v::assignfromtag:: src=%v, em=%s\n",
- Ctxt.Line(int(lineno)), Nconv(src, obj.FmtShort), describeEscape(em))
+ linestr(lineno), Nconv(src, obj.FmtShort), describeEscape(em))
}
if em == EscUnknown {
}
em0 := em
- for em >>= EscReturnBits; em != 0 && dsts != nil; em, dsts = em>>bitsPerOutputInTag, dsts.Next {
+ it := nodeSeqIterate(dsts)
+ for em >>= EscReturnBits; em != 0 && !it.Done(); em = em >> bitsPerOutputInTag {
// Prefer the lowest-level path to the reference (for escape purposes).
// Two-bit encoding (for example. 1, 3, and 4 bits are other options)
// 01 = 0-level
for i := uint16(0); i < embits-1; i++ {
n = e.addDereference(n) // encode level>0 as indirections
}
- escassign(e, dsts.N, n)
+ escassign(e, it.N(), n)
}
+ it.Next()
}
// If there are too many outputs to fit in the tag,
// that is handled at the encoding end as EscHeap,
// so there is no need to check here.
- if em != 0 && dsts == nil {
+ if em != 0 && it.Done() {
Fatalf("corrupt esc tag %q or messed up escretval list\n", note)
}
return em0
func initEscretval(e *EscState, n *Node, fntype *Type) {
i := 0
nE := e.nodeEscState(n)
- nE.Escretval = nil // Suspect this is not nil for indirect calls.
- for t := getoutargx(fntype).Type; t != nil; t = t.Down {
+ nE.Escretval.Set(nil) // Suspect this is not nil for indirect calls.
+ for t, it := IterFields(fntype.Results()); t != nil; t = it.Next() {
src := Nod(ONAME, nil, nil)
buf := fmt.Sprintf(".out%d", i)
i++
e.nodeEscState(src).Escloopdepth = e.loopdepth
src.Used = true
src.Lineno = n.Lineno
- nE.Escretval = list(nE.Escretval, src)
+ nE.Escretval.Append(src)
}
}
}
ll := n.List
- if n.List != nil && n.List.Next == nil {
- a := n.List.N
+ if n.List.Len() == 1 {
+ a := n.List.First()
if a.Type.Etype == TSTRUCT && a.Type.Funarg { // f(g()).
ll = e.nodeEscState(a).Escretval
}
if indirect {
// We know nothing!
// Leak all the parameters
- for ; ll != nil; ll = ll.Next {
- escassign(e, &e.theSink, ll.N)
+ for _, n1 := range ll.Slice() {
+ escassign(e, &e.theSink, n1)
if Debug['m'] > 2 {
- fmt.Printf("%v::esccall:: indirect call <- %v, untracked\n", Ctxt.Line(int(lineno)), Nconv(ll.N, obj.FmtShort))
+ fmt.Printf("%v::esccall:: indirect call <- %v, untracked\n", linestr(lineno), Nconv(n1, obj.FmtShort))
}
}
// Set up bogus outputs
initEscretval(e, n, fntype)
// If there is a receiver, it also leaks to heap.
if n.Op != OCALLFUNC {
- t := getthisx(fntype).Type
+ t := fntype.Recv().Type
src := n.Left.Left
if haspointers(t.Type) {
escassign(e, &e.theSink, src)
if fn != nil && fn.Op == ONAME && fn.Class == PFUNC &&
fn.Name.Defn != nil && len(fn.Name.Defn.Nbody.Slice()) != 0 && fn.Name.Param.Ntype != nil && fn.Name.Defn.Esc < EscFuncTagged {
if Debug['m'] > 2 {
- fmt.Printf("%v::esccall:: %v in recursive group\n", Ctxt.Line(int(lineno)), Nconv(n, obj.FmtShort))
+ fmt.Printf("%v::esccall:: %v in recursive group\n", linestr(lineno), Nconv(n, obj.FmtShort))
}
// function in same mutually recursive group. Incorporate into flow graph.
// print("esc local fn: %N\n", fn->ntype);
- if fn.Name.Defn.Esc == EscFuncUnknown || nE.Escretval != nil {
+ if fn.Name.Defn.Esc == EscFuncUnknown || nE.Escretval.Len() != 0 {
Fatalf("graph inconsistency")
}
-
// set up out list on this call node
- for lr := fn.Name.Param.Ntype.Rlist; lr != nil; lr = lr.Next {
- nE.Escretval = list(nE.Escretval, lr.N.Left) // type.rlist -> dclfield -> ONAME (PPARAMOUT)
+ for _, n2 := range fn.Name.Param.Ntype.Rlist.Slice() {
+ nE.Escretval.Append(n2.Left) // type.rlist -> dclfield -> ONAME (PPARAMOUT)
}
// Receiver.
}
var src *Node
- for lr := fn.Name.Param.Ntype.List; ll != nil && lr != nil; ll, lr = ll.Next, lr.Next {
- src = ll.N
- if lr.N.Isddd && !n.Isddd {
+ llit := nodeSeqIterate(ll)
+ for lrit := nodeSeqIterate(fn.Name.Param.Ntype.List); !llit.Done() && !lrit.Done(); llit.Next() {
+ src = llit.N()
+ if lrit.N().Isddd && !n.Isddd {
// Introduce ODDDARG node to represent ... allocation.
src = Nod(ODDDARG, nil, nil)
src.Type = typ(TARRAY)
- src.Type.Type = lr.N.Type.Type
- src.Type.Bound = int64(count(ll))
+ src.Type.Type = lrit.N().Type.Type
+ src.Type.Bound = int64(llit.Len())
src.Type = Ptrto(src.Type) // make pointer so it will be tracked
src.Lineno = n.Lineno
e.track(src)
n.Right = src
}
- if lr.N.Left != nil {
- escassign(e, lr.N.Left, src)
+ if lrit.N().Left != nil {
+ escassign(e, lrit.N().Left, src)
}
- if src != ll.N {
+ if src != llit.N() {
break
}
+ lrit.Next()
}
// "..." arguments are untracked
- for ; ll != nil; ll = ll.Next {
+ for ; !llit.Done(); llit.Next() {
if Debug['m'] > 2 {
- fmt.Printf("%v::esccall:: ... <- %v, untracked\n", Ctxt.Line(int(lineno)), Nconv(ll.N, obj.FmtShort))
+ fmt.Printf("%v::esccall:: ... <- %v, untracked\n", linestr(lineno), Nconv(llit.N(), obj.FmtShort))
}
- escassign(e, &e.theSink, ll.N)
+ escassign(e, &e.theSink, llit.N())
}
return
}
// Imported or completely analyzed function. Use the escape tags.
- if nE.Escretval != nil {
+ if nE.Escretval.Len() != 0 {
Fatalf("esc already decorated call %v\n", Nconv(n, obj.FmtSign))
}
if Debug['m'] > 2 {
- fmt.Printf("%v::esccall:: %v not recursive\n", Ctxt.Line(int(lineno)), Nconv(n, obj.FmtShort))
+ fmt.Printf("%v::esccall:: %v not recursive\n", linestr(lineno), Nconv(n, obj.FmtShort))
}
// set up out list on this call node with dummy auto ONAMES in the current (calling) function.
// Receiver.
if n.Op != OCALLFUNC {
- t := getthisx(fntype).Type
+ t := fntype.Recv().Type
src := n.Left.Left
if haspointers(t.Type) {
escassignfromtag(e, t.Note, nE.Escretval, src)
}
var src *Node
- for t := getinargx(fntype).Type; ll != nil; ll = ll.Next {
- src = ll.N
+ it := nodeSeqIterate(ll)
+ for t := fntype.Params().Type; !it.Done(); it.Next() {
+ src = it.N()
if t.Isddd && !n.Isddd {
// Introduce ODDDARG node to represent ... allocation.
src = Nod(ODDDARG, nil, nil)
src.Lineno = n.Lineno
src.Type = typ(TARRAY)
src.Type.Type = t.Type.Type
- src.Type.Bound = int64(count(ll))
+ src.Type.Bound = int64(it.Len())
src.Type = Ptrto(src.Type) // make pointer so it will be tracked
e.track(src)
n.Right = src
}
}
- if src != ll.N {
+ if src != it.N() {
// This occurs when function parameter type Isddd and n not Isddd
break
}
t = t.Down
}
- for ; ll != nil; ll = ll.Next {
+ for ; !it.Done(); it.Next() {
if Debug['m'] > 2 {
- fmt.Printf("%v::esccall:: ... <- %v\n", Ctxt.Line(int(lineno)), Nconv(ll.N, obj.FmtShort))
+ fmt.Printf("%v::esccall:: ... <- %v\n", linestr(lineno), Nconv(it.N(), obj.FmtShort))
}
- escassign(e, src, ll.N) // args to slice
+ escassign(e, src, it.N()) // args to slice
}
}
}
if Debug['m'] > 2 {
- fmt.Printf("%v::flows:: %v <- %v\n", Ctxt.Line(int(lineno)), Nconv(dst, obj.FmtShort), Nconv(src, obj.FmtShort))
+ fmt.Printf("%v::flows:: %v <- %v\n", linestr(lineno), Nconv(dst, obj.FmtShort), Nconv(src, obj.FmtShort))
}
dstE := e.nodeEscState(dst)
- if dstE.Escflowsrc == nil {
- e.dsts = list(e.dsts, dst)
+ if len(dstE.Escflowsrc) == 0 {
+ e.dsts = append(e.dsts, dst)
e.dstcount++
}
e.edgecount++
- dstE.Escflowsrc = list(dstE.Escflowsrc, src)
+ dstE.Escflowsrc = append(dstE.Escflowsrc, src)
}
// Whenever we hit a reference node, the level goes up by one, and whenever
fmt.Printf("\nescflood:%d: dst %v scope:%v[%d]\n", e.walkgen, Nconv(dst, obj.FmtShort), e.curfnSym(dst), dstE.Escloopdepth)
}
- for l := dstE.Escflowsrc; l != nil; l = l.Next {
+ for _, n := range dstE.Escflowsrc {
e.walkgen++
- escwalk(e, levelFrom(0), dst, l.N)
+ escwalk(e, levelFrom(0), dst, n)
}
}
if Debug['m'] > 1 {
fmt.Printf("escwalk: level:%d depth:%d %.*s op=%v %v(%v) scope:%v[%d] extraloopdepth=%v\n",
- level, e.pdepth, e.pdepth, "\t\t\t\t\t\t\t\t\t\t", Oconv(int(src.Op), 0), Nconv(src, obj.FmtShort), Jconv(src, obj.FmtShort), e.curfnSym(src), srcE.Escloopdepth, extraloopdepth)
+ level, e.pdepth, e.pdepth, "\t\t\t\t\t\t\t\t\t\t", Oconv(src.Op, 0), Nconv(src, obj.FmtShort), Jconv(src, obj.FmtShort), e.curfnSym(src), srcE.Escloopdepth, extraloopdepth)
}
e.pdepth++
// 4. return *in
if Debug['m'] != 0 {
if Debug['m'] == 1 {
- Warnl(int(src.Lineno), "leaking param: %v to result %v level=%v", Nconv(src, obj.FmtShort), dst.Sym, level.int())
+ Warnl(src.Lineno, "leaking param: %v to result %v level=%v", Nconv(src, obj.FmtShort), dst.Sym, level.int())
} else {
- Warnl(int(src.Lineno), "leaking param: %v to result %v level=%v", Nconv(src, obj.FmtShort), dst.Sym, level)
+ Warnl(src.Lineno, "leaking param: %v to result %v level=%v", Nconv(src, obj.FmtShort), dst.Sym, level)
}
}
if src.Esc&EscMask != EscReturn {
level.int() > 0 {
src.Esc = escMax(EscContentEscapes|src.Esc, EscNone)
if Debug['m'] != 0 {
- Warnl(int(src.Lineno), "mark escaped content: %v", Nconv(src, obj.FmtShort))
+ Warnl(src.Lineno, "mark escaped content: %v", Nconv(src, obj.FmtShort))
}
}
src.Esc = escMax(EscContentEscapes|src.Esc, EscNone)
if Debug['m'] != 0 {
if Debug['m'] == 1 {
- Warnl(int(src.Lineno), "leaking param content: %v", Nconv(src, obj.FmtShort))
+ Warnl(src.Lineno, "leaking param content: %v", Nconv(src, obj.FmtShort))
} else {
- Warnl(int(src.Lineno), "leaking param content: %v level=%v dst.eld=%v src.eld=%v dst=%v",
+ Warnl(src.Lineno, "leaking param content: %v level=%v dst.eld=%v src.eld=%v dst=%v",
Nconv(src, obj.FmtShort), level, dstE.Escloopdepth, modSrcLoopdepth, Nconv(dst, obj.FmtShort))
}
}
src.Esc = EscScope
if Debug['m'] != 0 {
if Debug['m'] == 1 {
- Warnl(int(src.Lineno), "leaking param: %v", Nconv(src, obj.FmtShort))
+ Warnl(src.Lineno, "leaking param: %v", Nconv(src, obj.FmtShort))
} else {
- Warnl(int(src.Lineno), "leaking param: %v level=%v dst.eld=%v src.eld=%v dst=%v",
+ Warnl(src.Lineno, "leaking param: %v level=%v dst.eld=%v src.eld=%v dst=%v",
Nconv(src, obj.FmtShort), level, dstE.Escloopdepth, modSrcLoopdepth, Nconv(dst, obj.FmtShort))
}
}
// original variable.
if src.Class == PPARAMREF {
if leaks && Debug['m'] != 0 {
- Warnl(int(src.Lineno), "leaking closure reference %v", Nconv(src, obj.FmtShort))
+ Warnl(src.Lineno, "leaking closure reference %v", Nconv(src, obj.FmtShort))
}
escwalk(e, level, dst, src.Name.Param.Closure)
}
p = p.Left // merely to satisfy error messages in tests
}
if Debug['m'] > 1 {
- Warnl(int(src.Lineno), "%v escapes to heap, level=%v, dst.eld=%v, src.eld=%v",
+ Warnl(src.Lineno, "%v escapes to heap, level=%v, dst.eld=%v, src.eld=%v",
Nconv(p, obj.FmtShort), level, dstE.Escloopdepth, modSrcLoopdepth)
} else {
- Warnl(int(src.Lineno), "%v escapes to heap", Nconv(p, obj.FmtShort))
+ Warnl(src.Lineno, "%v escapes to heap", Nconv(p, obj.FmtShort))
}
}
escwalkBody(e, level.dec(), dst, src.Left, modSrcLoopdepth)
}
case OAPPEND:
- escwalk(e, level, dst, src.List.N)
+ escwalk(e, level, dst, src.List.First())
case ODDDARG:
if leaks {
src.Esc = EscHeap
if Debug['m'] != 0 {
- Warnl(int(src.Lineno), "%v escapes to heap", Nconv(src, obj.FmtShort))
+ Warnl(src.Lineno, "%v escapes to heap", Nconv(src, obj.FmtShort))
}
extraloopdepth = modSrcLoopdepth
}
if Isfixedarray(src.Type) {
break
}
- for ll := src.List; ll != nil; ll = ll.Next {
- escwalk(e, level.dec(), dst, ll.N.Right)
+ for _, n1 := range src.List.Slice() {
+ escwalk(e, level.dec(), dst, n1.Right)
}
fallthrough
if leaks {
src.Esc = EscHeap
if Debug['m'] != 0 {
- Warnl(int(src.Lineno), "%v escapes to heap", Nconv(src, obj.FmtShort))
+ Warnl(src.Lineno, "%v escapes to heap", Nconv(src, obj.FmtShort))
}
extraloopdepth = modSrcLoopdepth
}
// See e.g. #10466
// This can only happen with functions returning a single result.
case OCALLMETH, OCALLFUNC, OCALLINTER:
- if srcE.Escretval != nil {
+ if srcE.Escretval.Len() != 0 {
if Debug['m'] > 1 {
fmt.Printf("%v:[%d] dst %v escwalk replace src: %v with %v\n",
- Ctxt.Line(int(lineno)), e.loopdepth,
- Nconv(dst, obj.FmtShort), Nconv(src, obj.FmtShort), Nconv(srcE.Escretval.N, obj.FmtShort))
+ linestr(lineno), e.loopdepth,
+ Nconv(dst, obj.FmtShort), Nconv(src, obj.FmtShort), Nconv(srcE.Escretval.First(), obj.FmtShort))
}
- src = srcE.Escretval.N
+ src = srcE.Escretval.First()
srcE = e.nodeEscState(src)
}
}
recurse:
level = level.copy()
- for ll := srcE.Escflowsrc; ll != nil; ll = ll.Next {
- escwalkBody(e, level, dst, ll.N, extraloopdepth)
+ for _, n := range srcE.Escflowsrc {
+ escwalkBody(e, level, dst, n, extraloopdepth)
}
e.pdepth--
// unless //go:noescape is given before the declaration.
if len(func_.Nbody.Slice()) == 0 {
if func_.Noescape {
- for t := getinargx(func_.Type).Type; t != nil; t = t.Down {
+ for t, it := IterFields(func_.Type.Params()); t != nil; t = it.Next() {
if haspointers(t.Type) {
t.Note = mktag(EscNone)
}
// but we are reusing the ability to annotate an individual function
// argument and pass those annotations along to importing code.
narg := 0
- for t := getinargx(func_.Type).Type; t != nil; t = t.Down {
+ for t, it := IterFields(func_.Type.Params()); t != nil; t = it.Next() {
narg++
if t.Type.Etype == TUINTPTR {
if Debug['m'] != 0 {
} else {
name = fmt.Sprintf("arg#%d", narg)
}
- Warnl(int(func_.Lineno), "%v assuming %v is unsafe uintptr", funcSym(func_), name)
+ Warnl(func_.Lineno, "%v assuming %v is unsafe uintptr", funcSym(func_), name)
}
t.Note = &unsafeUintptrTag
}
}
}
-var asmlist *NodeList
+var asmlist []*Node
// Mark n's symbol as exported
func exportsym(n *Node) {
}
if asmhdr != "" && n.Sym.Pkg == localpkg && n.Sym.Flags&SymAsm == 0 {
n.Sym.Flags |= SymAsm
- asmlist = list(asmlist, n)
+ asmlist = append(asmlist, n)
}
}
}
// Look for anything we need for the inline body
-func reexportdeplist(ll *NodeList) {
- for ; ll != nil; ll = ll.Next {
- reexportdep(ll.N)
- }
-}
-
-func reexportdepslice(ll []*Node) {
- for _, n := range ll {
+func reexportdeplist(ll Nodes) {
+ for _, n := range ll.Slice() {
reexportdep(n)
}
}
reexportdeplist(n.List)
reexportdeplist(n.Rlist)
reexportdeplist(n.Ninit)
- reexportdepslice(n.Nbody.Slice())
+ reexportdeplist(n.Nbody)
}
func dumpexportconst(s *Sym) {
}
// NOTE: The space after %#S here is necessary for ld's export data parser.
- exportf("\tfunc %v %v { %v }\n", Sconv(s, obj.FmtSharp), Tconv(t, obj.FmtShort|obj.FmtSharp), Hconvslice(n.Func.Inl.Slice(), obj.FmtSharp|obj.FmtBody))
+ exportf("\tfunc %v %v { %v }\n", Sconv(s, obj.FmtSharp), Tconv(t, obj.FmtShort|obj.FmtSharp), Hconv(n.Func.Inl, obj.FmtSharp|obj.FmtBody))
- reexportdepslice(n.Func.Inl.Slice())
+ reexportdeplist(n.Func.Inl)
} else {
exportf("\tfunc %v %v\n", Sconv(s, obj.FmtSharp), Tconv(t, obj.FmtShort|obj.FmtSharp))
}
if Debug['l'] < 2 {
typecheckinl(f.Type.Nname)
}
- exportf("\tfunc (%v) %v %v { %v }\n", Tconv(getthisx(f.Type).Type, obj.FmtSharp), Sconv(f.Sym, obj.FmtShort|obj.FmtByte|obj.FmtSharp), Tconv(f.Type, obj.FmtShort|obj.FmtSharp), Hconvslice(f.Type.Nname.Func.Inl.Slice(), obj.FmtSharp))
- reexportdepslice(f.Type.Nname.Func.Inl.Slice())
+ exportf("\tfunc (%v) %v %v { %v }\n", Tconv(f.Type.Recv().Type, obj.FmtSharp), Sconv(f.Sym, obj.FmtShort|obj.FmtByte|obj.FmtSharp), Tconv(f.Type, obj.FmtShort|obj.FmtSharp), Hconv(f.Type.Nname.Func.Inl, obj.FmtSharp))
+ reexportdeplist(f.Type.Nname.Func.Inl)
} else {
- exportf("\tfunc (%v) %v %v\n", Tconv(getthisx(f.Type).Type, obj.FmtSharp), Sconv(f.Sym, obj.FmtShort|obj.FmtByte|obj.FmtSharp), Tconv(f.Type, obj.FmtShort|obj.FmtSharp))
+ exportf("\tfunc (%v) %v %v\n", Tconv(f.Type.Recv().Type, obj.FmtSharp), Sconv(f.Sym, obj.FmtShort|obj.FmtByte|obj.FmtSharp), Tconv(f.Type, obj.FmtShort|obj.FmtSharp))
}
}
}
switch s.Def.Op {
default:
- Yyerror("unexpected export symbol: %v %v", Oconv(int(s.Def.Op), 0), s)
+ Yyerror("unexpected export symbol: %v %v", Oconv(s.Def.Op, 0), s)
case OLITERAL:
dumpexportconst(s)
copytype(pt.Nod, t)
pt.Nod = n // unzero nod
pt.Sym.Importdef = importpkg
- pt.Sym.Lastlineno = int32(parserline())
+ pt.Sym.Lastlineno = lineno
declare(n, PEXTERN)
checkwidth(pt)
} else if !Eqtype(pt.Orig, t) {
}
func dumpasmhdr() {
- var b *obj.Biobuf
-
b, err := obj.Bopenw(asmhdr)
if err != nil {
Fatalf("%v", err)
}
- fmt.Fprintf(b, "// generated by %cg -asmhdr from package %s\n\n", Thearch.Thechar, localpkg.Name)
- var n *Node
- var t *Type
- for l := asmlist; l != nil; l = l.Next {
- n = l.N
+ fmt.Fprintf(b, "// generated by compile -asmhdr from package %s\n\n", localpkg.Name)
+ for _, n := range asmlist {
if isblanksym(n.Sym) {
continue
}
fmt.Fprintf(b, "#define const_%s %v\n", n.Sym.Name, Vconv(n.Val(), obj.FmtSharp))
case OTYPE:
- t = n.Type
+ t := n.Type
if t.Etype != TSTRUCT || t.Map != nil || t.Funarg {
break
}
}
// Fmt "%O": Node opcodes
-func Oconv(o int, flag int) string {
+func Oconv(o Op, flag int) string {
if (flag&obj.FmtSharp != 0) || fmtmode != FDbg {
- if o >= 0 && o < len(goopnames) && goopnames[o] != "" {
+ if o >= 0 && int(o) < len(goopnames) && goopnames[o] != "" {
return goopnames[o]
}
}
- if o >= 0 && o < len(opnames) && opnames[o] != "" {
+ if o >= 0 && int(o) < len(opnames) && opnames[o] != "" {
return opnames[o]
}
} else {
if t.Thistuple != 0 {
buf.WriteString("method")
- buf.WriteString(Tconv(getthisx(t), 0))
+ buf.WriteString(Tconv(t.Recv(), 0))
buf.WriteString(" ")
}
buf.WriteString("func")
}
- buf.WriteString(Tconv(getinargx(t), 0))
+ buf.WriteString(Tconv(t.Params(), 0))
switch t.Outtuple {
case 0:
case 1:
if fmtmode != FExp {
buf.WriteString(" ")
- buf.WriteString(Tconv(getoutargx(t).Type.Type, 0)) // struct->field->field's type
+ buf.WriteString(Tconv(t.Results().Type.Type, 0)) // struct->field->field's type
break
}
fallthrough
default:
buf.WriteString(" ")
- buf.WriteString(Tconv(getoutargx(t), 0))
+ buf.WriteString(Tconv(t.Results(), 0))
}
return buf.String()
// block starting with the init statements.
// if we can just say "for" n->ninit; ... then do so
- simpleinit := n.Ninit != nil && n.Ninit.Next == nil && n.Ninit.N.Ninit == nil && stmtwithinit(n.Op)
+ simpleinit := n.Ninit.Len() == 1 && n.Ninit.First().Ninit.Len() == 0 && stmtwithinit(n.Op)
// otherwise, print the inits as separate statements
- complexinit := n.Ninit != nil && !simpleinit && (fmtmode != FErr)
+ complexinit := n.Ninit.Len() != 0 && !simpleinit && (fmtmode != FErr)
// but if it was for if/for/switch, put in an extra surrounding block to limit the scope
extrablock := complexinit && stmtwithinit(n.Op)
break
}
- f += fmt.Sprintf("%v %v= %v", n.Left, Oconv(int(n.Etype), obj.FmtSharp), n.Right)
+ f += fmt.Sprintf("%v %v= %v", n.Left, Oconv(Op(n.Etype), obj.FmtSharp), n.Right)
case OAS2:
if n.Colas && !complexinit {
case OIF:
if simpleinit {
- f += fmt.Sprintf("if %v; %v { %v }", n.Ninit.N, n.Left, n.Nbody)
+ f += fmt.Sprintf("if %v; %v { %v }", n.Ninit.First(), n.Left, n.Nbody)
} else {
f += fmt.Sprintf("if %v { %v }", n.Left, n.Nbody)
}
- if n.Rlist != nil {
+ if n.Rlist.Len() != 0 {
f += fmt.Sprintf(" else { %v }", n.Rlist)
}
f += "for"
if simpleinit {
- f += fmt.Sprintf(" %v;", n.Ninit.N)
+ f += fmt.Sprintf(" %v;", n.Ninit.First())
} else if n.Right != nil {
f += " ;"
}
break
}
- if n.List == nil {
+ if n.List.Len() == 0 {
f += fmt.Sprintf("for range %v { %v }", n.Right, n.Nbody)
break
}
case OSELECT, OSWITCH:
if fmtmode == FErr {
- f += fmt.Sprintf("%v statement", Oconv(int(n.Op), 0))
+ f += fmt.Sprintf("%v statement", Oconv(n.Op, 0))
break
}
- f += Oconv(int(n.Op), obj.FmtSharp)
+ f += Oconv(n.Op, obj.FmtSharp)
if simpleinit {
- f += fmt.Sprintf(" %v;", n.Ninit.N)
+ f += fmt.Sprintf(" %v;", n.Ninit.First())
}
if n.Left != nil {
f += Nconv(n.Left, 0)
f += fmt.Sprintf(" { %v }", n.List)
case OCASE, OXCASE:
- if n.List != nil {
+ if n.List.Len() != 0 {
f += fmt.Sprintf("case %v: %v", Hconv(n.List, obj.FmtComma), n.Nbody)
} else {
f += fmt.Sprintf("default: %v", n.Nbody)
OFALL,
OXFALL:
if n.Left != nil {
- f += fmt.Sprintf("%v %v", Oconv(int(n.Op), obj.FmtSharp), n.Left)
+ f += fmt.Sprintf("%v %v", Oconv(n.Op, obj.FmtSharp), n.Left)
} else {
- f += Oconv(int(n.Op), obj.FmtSharp)
+ f += Oconv(n.Op, obj.FmtSharp)
}
case OEMPTY:
} else {
f += fmt.Sprintf("(%v{", n.Type)
}
- for l := n.List; l != nil; l = l.Next {
- f += fmt.Sprintf(" %v:%v", Sconv(l.N.Left.Sym, obj.FmtShort|obj.FmtByte), l.N.Right)
+ for it := nodeSeqIterate(n.List); !it.Done(); it.Next() {
+ f += fmt.Sprintf(" %v:%v", Sconv(it.N().Left.Sym, obj.FmtShort|obj.FmtByte), it.N().Right)
- if l.Next != nil {
+ if it.Len() > 1 {
f += ","
} else {
f += " "
return f
case OCOPY, OCOMPLEX:
- return fmt.Sprintf("%v(%v, %v)", Oconv(int(n.Op), obj.FmtSharp), n.Left, n.Right)
+ return fmt.Sprintf("%v(%v, %v)", Oconv(n.Op, obj.FmtSharp), n.Left, n.Right)
case OCONV,
OCONVIFACE,
OPRINT,
OPRINTN:
if n.Left != nil {
- return fmt.Sprintf("%v(%v)", Oconv(int(n.Op), obj.FmtSharp), n.Left)
+ return fmt.Sprintf("%v(%v)", Oconv(n.Op, obj.FmtSharp), n.Left)
}
if n.Isddd {
- return fmt.Sprintf("%v(%v...)", Oconv(int(n.Op), obj.FmtSharp), Hconv(n.List, obj.FmtComma))
+ return fmt.Sprintf("%v(%v...)", Oconv(n.Op, obj.FmtSharp), Hconv(n.List, obj.FmtComma))
}
- return fmt.Sprintf("%v(%v)", Oconv(int(n.Op), obj.FmtSharp), Hconv(n.List, obj.FmtComma))
+ return fmt.Sprintf("%v(%v)", Oconv(n.Op, obj.FmtSharp), Hconv(n.List, obj.FmtComma))
case OCALL, OCALLFUNC, OCALLINTER, OCALLMETH, OGETG:
var f string
return f
case OMAKEMAP, OMAKECHAN, OMAKESLICE:
- if n.List != nil { // pre-typecheck
+ if n.List.Len() != 0 { // pre-typecheck
return fmt.Sprintf("make(%v, %v)", n.Type, Hconv(n.List, obj.FmtComma))
}
if n.Right != nil {
ORECV:
var f string
if n.Left.Op == n.Op {
- f += fmt.Sprintf("%v ", Oconv(int(n.Op), obj.FmtSharp))
+ f += fmt.Sprintf("%v ", Oconv(n.Op, obj.FmtSharp))
} else {
- f += Oconv(int(n.Op), obj.FmtSharp)
+ f += Oconv(n.Op, obj.FmtSharp)
}
f += exprfmt(n.Left, nprec+1)
return f
var f string
f += exprfmt(n.Left, nprec)
- f += fmt.Sprintf(" %v ", Oconv(int(n.Op), obj.FmtSharp))
+ f += fmt.Sprintf(" %v ", Oconv(n.Op, obj.FmtSharp))
f += exprfmt(n.Right, nprec+1)
return f
case OADDSTR:
var f string
- for l := n.List; l != nil; l = l.Next {
- if l != n.List {
+ i := 0
+ for _, n1 := range n.List.Slice() {
+ if i != 0 {
f += " + "
}
- f += exprfmt(l.N, nprec)
+ f += exprfmt(n1, nprec)
+ i++
}
return f
var f string
f += exprfmt(n.Left, nprec)
// TODO(marvin): Fix Node.EType type union.
- f += fmt.Sprintf(" %v ", Oconv(int(n.Etype), obj.FmtSharp))
+ f += fmt.Sprintf(" %v ", Oconv(Op(n.Etype), obj.FmtSharp))
f += exprfmt(n.Right, nprec+1)
return f
}
- return fmt.Sprintf("<node %v>", Oconv(int(n.Op), 0))
+ return fmt.Sprintf("<node %v>", Oconv(n.Op, 0))
}
func nodefmt(n *Node, flag int) string {
return buf.String()
}
- if n.Ninit != nil {
- fmt.Fprintf(&buf, "%v-init%v", Oconv(int(n.Op), 0), n.Ninit)
+ if n.Ninit.Len() != 0 {
+ fmt.Fprintf(&buf, "%v-init%v", Oconv(n.Op, 0), n.Ninit)
indent(&buf)
}
}
switch n.Op {
default:
- fmt.Fprintf(&buf, "%v%v", Oconv(int(n.Op), 0), Jconv(n, 0))
+ fmt.Fprintf(&buf, "%v%v", Oconv(n.Op, 0), Jconv(n, 0))
case OREGISTER, OINDREG:
- fmt.Fprintf(&buf, "%v-%v%v", Oconv(int(n.Op), 0), obj.Rconv(int(n.Reg)), Jconv(n, 0))
+ fmt.Fprintf(&buf, "%v-%v%v", Oconv(n.Op, 0), obj.Rconv(int(n.Reg)), Jconv(n, 0))
case OLITERAL:
- fmt.Fprintf(&buf, "%v-%v%v", Oconv(int(n.Op), 0), Vconv(n.Val(), 0), Jconv(n, 0))
+ fmt.Fprintf(&buf, "%v-%v%v", Oconv(n.Op, 0), Vconv(n.Val(), 0), Jconv(n, 0))
case ONAME, ONONAME:
if n.Sym != nil {
- fmt.Fprintf(&buf, "%v-%v%v", Oconv(int(n.Op), 0), n.Sym, Jconv(n, 0))
+ fmt.Fprintf(&buf, "%v-%v%v", Oconv(n.Op, 0), n.Sym, Jconv(n, 0))
} else {
- fmt.Fprintf(&buf, "%v%v", Oconv(int(n.Op), 0), Jconv(n, 0))
+ fmt.Fprintf(&buf, "%v%v", Oconv(n.Op, 0), Jconv(n, 0))
}
if recur && n.Type == nil && n.Name != nil && n.Name.Param != nil && n.Name.Param.Ntype != nil {
indent(&buf)
- fmt.Fprintf(&buf, "%v-ntype%v", Oconv(int(n.Op), 0), n.Name.Param.Ntype)
+ fmt.Fprintf(&buf, "%v-ntype%v", Oconv(n.Op, 0), n.Name.Param.Ntype)
}
case OASOP:
- fmt.Fprintf(&buf, "%v-%v%v", Oconv(int(n.Op), 0), Oconv(int(n.Etype), 0), Jconv(n, 0))
+ fmt.Fprintf(&buf, "%v-%v%v", Oconv(n.Op, 0), Oconv(Op(n.Etype), 0), Jconv(n, 0))
case OTYPE:
- fmt.Fprintf(&buf, "%v %v%v type=%v", Oconv(int(n.Op), 0), n.Sym, Jconv(n, 0), n.Type)
+ fmt.Fprintf(&buf, "%v %v%v type=%v", Oconv(n.Op, 0), n.Sym, Jconv(n, 0), n.Type)
if recur && n.Type == nil && n.Name.Param.Ntype != nil {
indent(&buf)
- fmt.Fprintf(&buf, "%v-ntype%v", Oconv(int(n.Op), 0), n.Name.Param.Ntype)
+ fmt.Fprintf(&buf, "%v-ntype%v", Oconv(n.Op, 0), n.Name.Param.Ntype)
}
}
if n.Right != nil {
buf.WriteString(Nconv(n.Right, 0))
}
- if n.List != nil {
+ if n.List.Len() != 0 {
indent(&buf)
- fmt.Fprintf(&buf, "%v-list%v", Oconv(int(n.Op), 0), n.List)
+ fmt.Fprintf(&buf, "%v-list%v", Oconv(n.Op, 0), n.List)
}
- if n.Rlist != nil {
+ if n.Rlist.Len() != 0 {
indent(&buf)
- fmt.Fprintf(&buf, "%v-rlist%v", Oconv(int(n.Op), 0), n.Rlist)
+ fmt.Fprintf(&buf, "%v-rlist%v", Oconv(n.Op, 0), n.Rlist)
}
if len(n.Nbody.Slice()) != 0 {
indent(&buf)
- fmt.Fprintf(&buf, "%v-body%v", Oconv(int(n.Op), 0), n.Nbody)
+ fmt.Fprintf(&buf, "%v-body%v", Oconv(n.Op, 0), n.Nbody)
}
}
}
func (l *NodeList) String() string {
- return Hconv(l, 0)
+ var n Nodes
+ n.Set(nodeSeqSlice(l))
+ return Hconv(n, 0)
}
func (n Nodes) String() string {
- return Hconvslice(n.Slice(), 0)
+ return Hconv(n, 0)
}
// Fmt '%H': NodeList.
// Flags: all those of %N plus ',': separate with comma's instead of semicolons.
-func Hconv(l *NodeList, flag int) string {
- if l == nil && fmtmode == FDbg {
+func Hconv(l Nodes, flag int) string {
+ if l.Len() == 0 && fmtmode == FDbg {
return "<nil>"
}
}
var buf bytes.Buffer
- for ; l != nil; l = l.Next {
- buf.WriteString(Nconv(l.N, 0))
- if l.Next != nil {
+ for it := nodeSeqIterate(l); !it.Done(); it.Next() {
+ buf.WriteString(Nconv(it.N(), 0))
+ if it.Len() > 1 {
buf.WriteString(sep)
}
}
return buf.String()
}
-func Hconvslice(l []*Node, flag int) string {
- if len(l) == 0 && fmtmode == FDbg {
- return "<nil>"
- }
-
- sf := flag
- sm, sb := setfmode(&flag)
- sep := "; "
- if fmtmode == FDbg {
- sep = "\n"
- } else if flag&obj.FmtComma != 0 {
- sep = ", "
- }
-
- var buf bytes.Buffer
- for i, n := range l {
- buf.WriteString(Nconv(n, 0))
- if i+1 < len(l) {
- buf.WriteString(sep)
- }
- }
-
- flag = sf
- fmtbody = sb
- fmtmode = sm
- return buf.String()
-}
-
-func dumplist(s string, l *NodeList) {
+func dumplist(s string, l Nodes) {
fmt.Printf("%s%v\n", s, Hconv(l, obj.FmtSign))
}
-func dumpslice(s string, l []*Node) {
- fmt.Printf("%s%v\n", s, Hconvslice(l, obj.FmtSign))
-}
-
func Dump(s string, n *Node) {
fmt.Printf("%s [%p]%v\n", s, n, Nconv(n, obj.FmtSign))
}
fs = fs.Link
}
if fs != to.Sym {
- lno := int(lineno)
+ lno := lineno
setlineno(from)
// decide what to complain about.
}
if block != nil {
- Yyerror("goto %v jumps into block starting at %v", from.Left.Sym, Ctxt.Line(int(block.Lastlineno)))
+ Yyerror("goto %v jumps into block starting at %v", from.Left.Sym, linestr(block.Lastlineno))
} else {
- Yyerror("goto %v jumps over declaration of %v at %v", from.Left.Sym, dcl, Ctxt.Line(int(dcl.Lastlineno)))
+ Yyerror("goto %v jumps over declaration of %v at %v", from.Left.Sym, dcl, linestr(dcl.Lastlineno))
}
- lineno = int32(lno)
+ lineno = lno
}
}
}
// compile statements
-func Genlist(l *NodeList) {
- for ; l != nil; l = l.Next {
- gen(l.N)
- }
-}
-
-func Genslice(l []*Node) {
- for _, n := range l {
+func Genlist(l Nodes) {
+ for _, n := range l.Slice() {
gen(n)
}
}
func cgen_proc(n *Node, proc int) {
switch n.Left.Op {
default:
- Fatalf("cgen_proc: unknown call %v", Oconv(int(n.Left.Op), 0))
+ Fatalf("cgen_proc: unknown call %v", Oconv(n.Left.Op, 0))
case OCALLMETH:
cgen_callmeth(n.Left, proc)
q := Gbranch(obj.AJMP, nil, 0)
Patch(p, Pc)
Regrealloc(&r2) // reclaim from above, for this failure path
- fn := syslook("panicdottype", 0)
+ fn := syslook("panicdottype")
dowidth(fn.Type)
call := Nod(OCALLFUNC, fn, nil)
r1.Type = byteptr
r2.Type = byteptr
- call.List = list(list(list1(&r1), &r2), typename(n.Left.Type))
- call.List = ascompatte(OCALLFUNC, call, false, getinarg(fn.Type), call.List, 0, nil)
+ setNodeSeq(&call.List, list(list(list1(&r1), &r2), typename(n.Left.Type)))
+ call.List.Set(ascompatte(OCALLFUNC, call, false, fn.Type.ParamsP(), call.List.Slice(), 0, nil))
gen(call)
Regfree(&r1)
Regfree(&r2)
q := Gbranch(obj.AJMP, nil, 0)
Patch(p, Pc)
- fn := syslook("panicdottype", 0)
+ fn := syslook("panicdottype")
dowidth(fn.Type)
call := Nod(OCALLFUNC, fn, nil)
- call.List = list(list(list1(&r1), &r2), typename(n.Left.Type))
- call.List = ascompatte(OCALLFUNC, call, false, getinarg(fn.Type), call.List, 0, nil)
+ setNodeSeq(&call.List, list(list(list1(&r1), &r2), typename(n.Left.Type)))
+ call.List.Set(ascompatte(OCALLFUNC, call, false, fn.Type.ParamsP(), call.List.Slice(), 0, nil))
gen(call)
Regfree(&r1)
Regfree(&r2)
goto ret
}
- if n.Ninit != nil {
+ if n.Ninit.Len() > 0 {
Genlist(n.Ninit)
}
gen(n.Right) // contin: incr
Patch(p1, Pc) // test:
Bgen(n.Left, false, -1, breakpc) // if(!test) goto break
- Genslice(n.Nbody.Slice()) // body
+ Genlist(n.Nbody) // body
gjmp(continpc)
Patch(breakpc, Pc) // done:
continpc = scontin
p2 := gjmp(nil) // p2: goto else
Patch(p1, Pc) // test:
Bgen(n.Left, false, int(-n.Likely), p2) // if(!test) goto p2
- Genslice(n.Nbody.Slice()) // then
+ Genlist(n.Nbody) // then
p3 := gjmp(nil) // goto done
Patch(p2, Pc) // else:
Genlist(n.Rlist) // else
lab.Breakpc = breakpc
}
- Patch(p1, Pc) // test:
- Genslice(n.Nbody.Slice()) // switch(test) body
- Patch(breakpc, Pc) // done:
+ Patch(p1, Pc) // test:
+ Genlist(n.Nbody) // switch(test) body
+ Patch(breakpc, Pc) // done:
breakpc = sbreak
if lab != nil {
lab.Breakpc = nil
lab.Breakpc = breakpc
}
- Patch(p1, Pc) // test:
- Genslice(n.Nbody.Slice()) // select() body
- Patch(breakpc, Pc) // done:
+ Patch(p1, Pc) // test:
+ Genlist(n.Nbody) // select() body
+ Patch(breakpc, Pc) // done:
breakpc = sbreak
if lab != nil {
lab.Breakpc = nil
Cgen_as_wb(n.Left, n.Right, true)
case OAS2DOTTYPE:
- cgen_dottype(n.Rlist.N, n.List.N, n.List.Next.N, needwritebarrier(n.List.N, n.Rlist.N))
+ cgen_dottype(n.Rlist.First(), n.List.First(), n.List.Second(), needwritebarrier(n.List.First(), n.Rlist.First()))
case OCALLMETH:
cgen_callmeth(n, 0)
for lab := labellist; lab != nil; lab = lab.Link {
if lab.Def == nil {
for _, n := range lab.Use {
- yyerrorl(int(n.Lineno), "label %v not defined", lab.Sym)
+ yyerrorl(n.Lineno, "label %v not defined", lab.Sym)
}
continue
}
if lab.Use == nil && !lab.Used {
- yyerrorl(int(lab.Def.Lineno), "label %v defined and not used", lab.Sym)
+ yyerrorl(lab.Def.Lineno, "label %v defined and not used", lab.Sym)
continue
}
Fsym *Sym // funcsym
}
-type Type struct {
- Etype EType
- Nointerface bool
- Noalg bool
- Chan uint8
- Trecur uint8 // to detect loops
- Printed bool
- Embedded uint8 // TFIELD embedded type
- Funarg bool // on TSTRUCT and TFIELD
- Copyany bool
- Local bool // created in this file
- Deferwidth bool
- Broke bool // broken type definition.
- Isddd bool // TFIELD is ... argument
- Align uint8
- Haspointers uint8 // 0 unknown, 1 no, 2 yes
-
- Nod *Node // canonical OTYPE node
- Orig *Type // original type (type literal or predefined type)
- Lineno int
-
- // TFUNC
- Thistuple int
- Outtuple int
- Intuple int
- Outnamed bool
-
- Method *Type
- Xmethod *Type
-
- Sym *Sym
- Vargen int32 // unique name for OTYPE/ONAME
-
- Nname *Node
- Argwid int64
-
- // most nodes
- Type *Type // actual type for TFIELD, element type for TARRAY, TCHAN, TMAP, TPTRxx
- Width int64 // offset in TFIELD, width in all others
-
- // TFIELD
- Down *Type // next struct field, also key type in TMAP
- Outer *Type // outer struct
- Note *string // literal string annotation
-
- // TARRAY
- Bound int64 // negative is slice
-
- // TMAP
- Bucket *Type // internal type representing a hash bucket
- Hmap *Type // internal type representing a Hmap (map header object)
- Hiter *Type // internal type representing hash iterator state
- Map *Type // link from the above 3 internal types back to the map type.
-
- Maplineno int32 // first use of TFORW as map key
- Embedlineno int32 // first use of TFORW as embedded type
-
- // for TFORW, where to copy the eventual value to
- Copyto []*Node
-
- Lastfn *Node // for usefield
-}
-
type Label struct {
Sym *Sym
Def *Node
var dclstack *Sym
-type Iter struct {
- Done int
- Tfunc *Type
- T *Type
-}
-
-type EType uint8
-
-const (
- Txxx = iota
-
- TINT8
- TUINT8
- TINT16
- TUINT16
- TINT32
- TUINT32
- TINT64
- TUINT64
- TINT
- TUINT
- TUINTPTR
-
- TCOMPLEX64
- TCOMPLEX128
-
- TFLOAT32
- TFLOAT64
-
- TBOOL
-
- TPTR32
- TPTR64
-
- TFUNC
- TARRAY
- T_old_DARRAY // Doesn't seem to be used in existing code. Used now for Isddd export (see bexport.go). TODO(gri) rename.
- TSTRUCT
- TCHAN
- TMAP
- TINTER
- TFORW
- TFIELD
- TANY
- TSTRING
- TUNSAFEPTR
-
- // pseudo-types for literals
- TIDEAL
- TNIL
- TBLANK
-
- // pseudo-type for frame layout
- TFUNCARGS
- TCHANARGS
- TINTERMETH
-
- NTYPE
-)
-
// Ctype describes the constant kind of an "ideal" (untyped) constant.
type Ctype int8
Ecomplit = 1 << 11 // type in composite literal
)
-type Typedef struct {
- Name string
- Etype EType
- Sameas EType
-}
-
type Sig struct {
name string
pkg *Pkg
var typelinkpkg *Pkg // fake package for runtime type info (data)
-var weaktypepkg *Pkg // weak references to runtime type info
-
var unsafepkg *Pkg // package unsafe
var trackpkg *Pkg // fake package for field tracking
var asmhdr string
-var Types [NTYPE]*Type
-
-var idealstring *Type
-
-var idealbool *Type
-
-var bytetype *Type
-
-var runetype *Type
-
-var errortype *Type
-
var Simtype [NTYPE]EType
var (
Thechar int
Thestring string
Thelinkarch *obj.LinkArch
- Typedefs []Typedef
REGSP int
REGCTXT int
REGCALLX int // BX
Excise func(*Flow)
Expandchecks func(*obj.Prog)
Getg func(*Node)
- Gins func(int, *Node, *Node) *obj.Prog
+ Gins func(obj.As, *Node, *Node) *obj.Prog
// Ginscmp generates code comparing n1 to n2 and jumping away if op is satisfied.
// The returned prog should be Patch'ed with the jump target.
// corresponding to the desired value.
// The second argument is the destination.
// If not present, Ginsboolval will be emulated with jumps.
- Ginsboolval func(int, *Node)
+ Ginsboolval func(obj.As, *Node)
- Ginscon func(int, int64, *Node)
+ Ginscon func(obj.As, int64, *Node)
Ginsnop func()
Gmove func(*Node, *Node)
Igenindex func(*Node, *Node, bool) *obj.Prog
Smallindir func(*obj.Addr, *obj.Addr) bool
Stackaddr func(*obj.Addr) bool
Blockcopy func(*Node, *Node, int64, int64, int64)
- Sudoaddable func(int, *Node, *obj.Addr) bool
+ Sudoaddable func(obj.As, *Node, *obj.Addr) bool
Sudoclean func()
Excludedregs func() uint64
RtoB func(int) uint64
FtoB func(int) uint64
BtoR func(uint64) int
BtoF func(uint64) int
- Optoas func(Op, *Type) int
+ Optoas func(Op, *Type) obj.As
Doregbits func(int) uint64
Regnames func(*int) []string
Use387 bool // should 8g use 387 FP instructions instead of sse2.
return true
}
-func Gbranch(as int, t *Type, likely int) *obj.Prog {
+func Gbranch(as obj.As, t *Type, likely int) *obj.Prog {
p := Prog(as)
p.To.Type = obj.TYPE_BRANCH
p.To.Val = nil
return p
}
-func Prog(as int) *obj.Prog {
+func Prog(as obj.As) *obj.Prog {
var p *obj.Prog
if as == obj.ADATA || as == obj.AGLOBL {
}
}
- p.As = int16(as)
+ p.As = as
p.Lineno = lineno
return p
}
a := a // copy to let escape into Ctxt.Dconv
Debug['h'] = 1
Dump("naddr", n)
- Fatalf("naddr: bad %v %v", Oconv(int(n.Op), 0), Ctxt.Dconv(a))
+ Fatalf("naddr: bad %v %v", Oconv(n.Op, 0), Ctxt.Dconv(a))
case OREGISTER:
a.Type = obj.TYPE_REG
if n.Left.Type.Etype != TSTRUCT || n.Left.Type.Type.Sym != n.Right.Sym {
Debug['h'] = 1
Dump("naddr", n)
- Fatalf("naddr: bad %v %v", Oconv(int(n.Op), 0), Ctxt.Dconv(a))
+ Fatalf("naddr: bad %v %v", Oconv(n.Op, 0), Ctxt.Dconv(a))
}
Naddr(a, n.Left)
}
if a.Type != obj.TYPE_MEM {
a := a // copy to let escape into Ctxt.Dconv
- Fatalf("naddr: OADDR %v (from %v)", Ctxt.Dconv(a), Oconv(int(n.Left.Op), 0))
+ Fatalf("naddr: OADDR %v (from %v)", Ctxt.Dconv(a), Oconv(n.Left.Op, 0))
}
a.Type = obj.TYPE_ADDR
n = Nod(ONAME, nil, nil)
n.Sym = Lookup(".args")
n.Type = t
- var savet Iter
- first := Structfirst(&savet, &t)
+ first, _ := IterFields(t)
if first == nil {
Fatalf("nodarg: bad struct")
}
return
}
- nf := initfix(n)
+ nf := initfix(nodeSeqSlice(n))
if !anyinit(nf) {
return
}
b.Likely = 1
r = append(r, b)
// (4a)
- b.Nbody.Set([]*Node{Nod(OCALL, syslook("throwinit", 0), nil)})
+ b.Nbody.Set([]*Node{Nod(OCALL, syslook("throwinit"), nil)})
// (6)
a = Nod(OAS, gatevar, Nodintconst(1))
func fnpkg(fn *Node) *Pkg {
if fn.Type.Thistuple != 0 {
// method
- rcvr := getthisx(fn.Type).Type.Type
+ rcvr := fn.Type.Recv().Type.Type
if Isptr[rcvr.Etype] {
rcvr = rcvr.Type
// Lazy typechecking of imported bodies. For local functions, caninl will set ->typecheck
// because they're a copy of an already checked body.
func typecheckinl(fn *Node) {
- lno := int(setlineno(fn))
+ lno := setlineno(fn)
// typecheckinl is only for imported functions;
// their bodies may refer to unsafe as long as the package
}
if Debug['m'] > 2 {
- fmt.Printf("typecheck import [%v] %v { %v }\n", fn.Sym, Nconv(fn, obj.FmtLong), Hconvslice(fn.Func.Inl.Slice(), obj.FmtSharp))
+ fmt.Printf("typecheck import [%v] %v { %v }\n", fn.Sym, Nconv(fn, obj.FmtLong), Hconv(fn.Func.Inl, obj.FmtSharp))
}
save_safemode := safemode
savefn := Curfn
Curfn = fn
- typecheckslice(fn.Func.Inl.Slice(), Etop)
+ typechecklist(fn.Func.Inl.Slice(), Etop)
Curfn = savefn
safemode = save_safemode
- lineno = int32(lno)
+ lineno = lno
}
// Caninl determines whether fn is inlineable.
// can't handle ... args yet
if Debug['l'] < 3 {
- for t := fn.Type.Type.Down.Down.Type; t != nil; t = t.Down {
+ for t, it := IterFields(fn.Type.Params()); t != nil; t = it.Next() {
if t.Isddd {
return
}
const maxBudget = 80
budget := maxBudget // allowed hairyness
- if ishairyslice(fn.Nbody.Slice(), &budget) || budget < 0 {
+ if ishairylist(fn.Nbody, &budget) || budget < 0 {
return
}
Curfn = fn
fn.Func.Nname.Func.Inl.Set(fn.Nbody.Slice())
- fn.Nbody.Set(inlcopyslice(fn.Func.Nname.Func.Inl.Slice()))
- inldcl := inlcopyslice(fn.Func.Nname.Name.Defn.Func.Dcl)
+ fn.Nbody.Set(inlcopylist(fn.Func.Nname.Func.Inl.Slice()))
+ inldcl := inlcopylist(fn.Func.Nname.Name.Defn.Func.Dcl)
if len(inldcl) > 0 {
fn.Func.Nname.Func.Inldcl = &inldcl
}
fn.Type.Nname = fn.Func.Nname
if Debug['m'] > 1 {
- fmt.Printf("%v: can inline %v as: %v { %v }\n", fn.Line(), Nconv(fn.Func.Nname, obj.FmtSharp), Tconv(fn.Type, obj.FmtSharp), Hconvslice(fn.Func.Nname.Func.Inl.Slice(), obj.FmtSharp))
+ fmt.Printf("%v: can inline %v as: %v { %v }\n", fn.Line(), Nconv(fn.Func.Nname, obj.FmtSharp), Tconv(fn.Type, obj.FmtSharp), Hconv(fn.Func.Nname.Func.Inl, obj.FmtSharp))
} else if Debug['m'] != 0 {
fmt.Printf("%v: can inline %v\n", fn.Line(), fn.Func.Nname)
}
}
// Look for anything we want to punt on.
-func ishairylist(ll *NodeList, budget *int) bool {
- for ; ll != nil; ll = ll.Next {
- if ishairy(ll.N, budget) {
- return true
- }
- }
- return false
-}
-
-func ishairyslice(ll []*Node, budget *int) bool {
- for _, n := range ll {
+func ishairylist(ll Nodes, budget *int) bool {
+ for _, n := range ll.Slice() {
if ishairy(n, budget) {
return true
}
(*budget)--
- return *budget < 0 || ishairy(n.Left, budget) || ishairy(n.Right, budget) || ishairylist(n.List, budget) || ishairylist(n.Rlist, budget) || ishairylist(n.Ninit, budget) || ishairyslice(n.Nbody.Slice(), budget)
+ return *budget < 0 || ishairy(n.Left, budget) || ishairy(n.Right, budget) || ishairylist(n.List, budget) || ishairylist(n.Rlist, budget) || ishairylist(n.Ninit, budget) || ishairylist(n.Nbody, budget)
}
// Inlcopy and inlcopylist recursively copy the body of a function.
// Any name-like node of non-local class is marked for re-export by adding it to
// the exportlist.
-func inlcopylist(ll *NodeList) *NodeList {
- var l *NodeList
- for ; ll != nil; ll = ll.Next {
- l = list(l, inlcopy(ll.N))
+func inlcopylist(ll []*Node) []*Node {
+ s := make([]*Node, 0, nodeSeqLen(ll))
+ for _, n := range ll {
+ s = append(s, inlcopy(n))
}
- return l
+ return s
}
func inlcopy(n *Node) *Node {
}
m.Left = inlcopy(n.Left)
m.Right = inlcopy(n.Right)
- m.List = inlcopylist(n.List)
- m.Rlist = inlcopylist(n.Rlist)
- m.Ninit = inlcopylist(n.Ninit)
- m.Nbody.Set(inlcopyslice(n.Nbody.Slice()))
+ m.List.Set(inlcopylist(n.List.Slice()))
+ m.Rlist.Set(inlcopylist(n.Rlist.Slice()))
+ m.Ninit.Set(inlcopylist(n.Ninit.Slice()))
+ m.Nbody.Set(inlcopylist(n.Nbody.Slice()))
return m
}
-// Inlcopyslice is like inlcopylist, but for a slice.
-func inlcopyslice(ll []*Node) []*Node {
- r := make([]*Node, 0, len(ll))
- for _, ln := range ll {
- c := inlcopy(ln)
- if c != nil {
- r = append(r, c)
- }
- }
- return r
-}
-
// Inlcalls/nodelist/node walks fn's statements and expressions and substitutes any
// calls made to inlineable functions. This is the external entry point.
func inlcalls(fn *Node) {
n.Op = OBLOCK
// n->ninit stays
- n.List = n.Nbody.NodeList()
+ n.List.Set(n.Nbody.Slice())
n.Nbody.Set(nil)
- n.Rlist = nil
+ n.Rlist.Set(nil)
}
// Turn an OINLCALL into a single valued expression.
func inlconv2expr(np **Node) {
n := *np
- r := n.Rlist.N
- addinit(&r, concat(n.Ninit, n.Nbody.NodeList()))
+ r := n.Rlist.First()
+ addinit(&r, append(n.Ninit.Slice(), n.Nbody.Slice()...))
*np = r
}
// containing the inlined statements on the first list element so
// order will be preserved Used in return, oas2func and call
// statements.
-func inlconv2list(n *Node) *NodeList {
- if n.Op != OINLCALL || n.Rlist == nil {
+func inlconv2list(n *Node) []*Node {
+ if n.Op != OINLCALL || n.Rlist.Len() == 0 {
Fatalf("inlconv2list %v\n", Nconv(n, obj.FmtSign))
}
- l := n.Rlist
- addinit(&l.N, concat(n.Ninit, n.Nbody.NodeList()))
- return l
+ s := n.Rlist.Slice()
+ addinit(&s[0], append(n.Ninit.Slice(), n.Nbody.Slice()...))
+ return s
}
-func inlnodelist(l *NodeList) {
- for ; l != nil; l = l.Next {
- inlnode(&l.N)
- }
-}
-
-func inlnodeslice(l []*Node) {
- for i := range l {
- inlnode(&l[i])
+func inlnodelist(l Nodes) {
+ for i := range l.Slice() {
+ inlnode(&l.Slice()[i])
}
}
return
}
- lno := int(setlineno(n))
+ lno := setlineno(n)
inlnodelist(n.Ninit)
- for l := n.Ninit; l != nil; l = l.Next {
- if l.N.Op == OINLCALL {
- inlconv2stmt(l.N)
+ for _, n1 := range n.Ninit.Slice() {
+ if n1.Op == OINLCALL {
+ inlconv2stmt(n1)
}
}
inlnodelist(n.List)
switch n.Op {
case OBLOCK:
- for l := n.List; l != nil; l = l.Next {
- if l.N.Op == OINLCALL {
- inlconv2stmt(l.N)
+ for _, n2 := range n.List.Slice() {
+ if n2.Op == OINLCALL {
+ inlconv2stmt(n2)
}
}
OCALLINTER,
OAPPEND,
OCOMPLEX:
- if count(n.List) == 1 && n.List.N.Op == OINLCALL && count(n.List.N.Rlist) > 1 {
- n.List = inlconv2list(n.List.N)
+ if n.List.Len() == 1 && n.List.First().Op == OINLCALL && n.List.First().Rlist.Len() > 1 {
+ n.List.Set(inlconv2list(n.List.First()))
break
}
fallthrough
default:
- for l := n.List; l != nil; l = l.Next {
- if l.N.Op == OINLCALL {
- inlconv2expr(&l.N)
+ for i3, n3 := range n.List.Slice() {
+ if n3.Op == OINLCALL {
+ inlconv2expr(&n.List.Slice()[i3])
}
}
}
inlnodelist(n.Rlist)
switch n.Op {
case OAS2FUNC:
- if n.Rlist.N.Op == OINLCALL {
- n.Rlist = inlconv2list(n.Rlist.N)
+ if n.Rlist.First().Op == OINLCALL {
+ n.Rlist.Set(inlconv2list(n.Rlist.First()))
n.Op = OAS2
n.Typecheck = 0
typecheck(np, Etop)
fallthrough
default:
- for l := n.Rlist; l != nil; l = l.Next {
- if l.N.Op == OINLCALL {
+ for i4, n4 := range n.Rlist.Slice() {
+ if n4.Op == OINLCALL {
if n.Op == OIF {
- inlconv2stmt(l.N)
+ inlconv2stmt(n4)
} else {
- inlconv2expr(&l.N)
+ inlconv2expr(&n.Rlist.Slice()[i4])
}
}
}
}
- inlnodeslice(n.Nbody.Slice())
+ inlnodelist(n.Nbody)
for _, n := range n.Nbody.Slice() {
if n.Op == OINLCALL {
inlconv2stmt(n)
mkinlcall(np, n.Left.Type.Nname, n.Isddd)
}
- lineno = int32(lno)
+ lineno = lno
}
func mkinlcall(np **Node, fn *Node, isddd bool) {
// Bingo, we have a function node, and it has an inlineable body
if Debug['m'] > 1 {
- fmt.Printf("%v: inlining call to %v %v { %v }\n", n.Line(), fn.Sym, Tconv(fn.Type, obj.FmtSharp), Hconvslice(fn.Func.Inl.Slice(), obj.FmtSharp))
+ fmt.Printf("%v: inlining call to %v %v { %v }\n", n.Line(), fn.Sym, Tconv(fn.Type, obj.FmtSharp), Hconv(fn.Func.Inl, obj.FmtSharp))
} else if Debug['m'] != 0 {
fmt.Printf("%v: inlining call to %v\n", n.Line(), fn)
}
typecheck(&ln.Name.Inlvar, Erv)
if ln.Class&^PHEAP != PAUTO {
- ninit = list(ninit, Nod(ODCL, ln.Name.Inlvar, nil)) // otherwise gen won't emit the allocations for heapallocs
+ ninit.Append(Nod(ODCL, ln.Name.Inlvar, nil)) // otherwise gen won't emit the allocations for heapallocs
}
}
}
// temporaries for return values.
var m *Node
- for t := getoutargx(fn.Type).Type; t != nil; t = t.Down {
+ for t, it := IterFields(fn.Type.Results()); t != nil; t = it.Next() {
if t != nil && t.Nname != nil && !isblank(t.Nname) {
m = inlvar(t.Nname)
typecheck(&m, Erv)
i++
}
- ninit = list(ninit, Nod(ODCL, m, nil))
+ ninit.Append(Nod(ODCL, m, nil))
inlretvars = list(inlretvars, m)
}
var as *Node
if fn.Type.Thistuple != 0 && n.Left.Op == ODOTMETH {
// method call with a receiver.
- t := getthisx(fn.Type).Type
+ t := fn.Type.Recv().Type
if t != nil && t.Nname != nil && !isblank(t.Nname) && t.Nname.Name.Inlvar == nil {
Fatalf("missing inlvar for %v\n", t.Nname)
as = Nod(OAS, tinlvar(t), n.Left.Left)
if as != nil {
typecheck(&as, Etop)
- ninit = list(ninit, as)
+ ninit.Append(as)
}
}
var varargtype *Type
varargcount := 0
- for t := fn.Type.Type.Down.Down.Type; t != nil; t = t.Down {
+ for t, it := IterFields(fn.Type.Params()); t != nil; t = it.Next() {
if t.Isddd {
variadic = true
varargtype = t.Type
// check if argument is actually a returned tuple from call.
multiret := 0
- if n.List != nil && n.List.Next == nil {
- switch n.List.N.Op {
+ if n.List.Len() == 1 {
+ switch n.List.First().Op {
case OCALL, OCALLFUNC, OCALLINTER, OCALLMETH:
- if n.List.N.Left.Type.Outtuple > 1 {
- multiret = n.List.N.Left.Type.Outtuple - 1
+ if n.List.First().Left.Type.Outtuple > 1 {
+ multiret = n.List.First().Left.Type.Outtuple - 1
}
}
}
if variadic {
- varargcount = count(n.List) + multiret
+ varargcount = n.List.Len() + multiret
if n.Left.Op != ODOTMETH {
varargcount -= fn.Type.Thistuple
}
// assign arguments to the parameters' temp names
as = Nod(OAS2, nil, nil)
- as.Rlist = n.List
- ll := n.List
+ as.Rlist.Set(n.List.Slice())
+ it := nodeSeqIterate(n.List)
// TODO: if len(nlist) == 1 but multiple args, check that n->list->n is a call?
if fn.Type.Thistuple != 0 && n.Left.Op != ODOTMETH {
// non-method call to method
- if n.List == nil {
+ if n.List.Len() == 0 {
Fatalf("non-method call to method without first arg: %v", Nconv(n, obj.FmtSign))
}
// append receiver inlvar to LHS.
- t := getthisx(fn.Type).Type
+ t := fn.Type.Recv().Type
if t != nil && t.Nname != nil && !isblank(t.Nname) && t.Nname.Name.Inlvar == nil {
Fatalf("missing inlvar for %v\n", t.Nname)
if t == nil {
Fatalf("method call unknown receiver type: %v", Nconv(n, obj.FmtSign))
}
- as.List = list(as.List, tinlvar(t))
- ll = ll.Next // track argument count.
+ as.List.Append(tinlvar(t))
+ it.Next() // track argument count.
}
// append ordinary arguments to LHS.
- chkargcount := n.List != nil && n.List.Next != nil
+ chkargcount := n.List.Len() > 1
- var vararg *Node // the slice argument to a variadic call
- var varargs *NodeList // the list of LHS names to put in vararg.
+ var vararg *Node // the slice argument to a variadic call
+ var varargs []*Node // the list of LHS names to put in vararg.
if !chkargcount {
// 0 or 1 expression on RHS.
var i int
- for t := getinargx(fn.Type).Type; t != nil; t = t.Down {
+ for t, it2 := IterFields(fn.Type.Params()); t != nil; t = it2.Next() {
if variadic && t.Isddd {
vararg = tinlvar(t)
- for i = 0; i < varargcount && ll != nil; i++ {
+ for i = 0; i < varargcount && it.Len() != 0; i++ {
m = argvar(varargtype, i)
- varargs = list(varargs, m)
- as.List = list(as.List, m)
+ varargs = append(varargs, m)
+ as.List.Append(m)
}
break
}
- as.List = list(as.List, tinlvar(t))
+ as.List.Append(tinlvar(t))
}
} else {
// match arguments except final variadic (unless the call is dotted itself)
var t *Type
- for t = getinargx(fn.Type).Type; t != nil; {
- if ll == nil {
+ for t = fn.Type.Params().Type; t != nil; {
+ if it.Done() {
break
}
if variadic && t.Isddd {
break
}
- as.List = list(as.List, tinlvar(t))
+ as.List.Append(tinlvar(t))
t = t.Down
- ll = ll.Next
+ it.Next()
}
// match varargcount arguments with variadic parameters.
if variadic && t != nil && t.Isddd {
vararg = tinlvar(t)
var i int
- for i = 0; i < varargcount && ll != nil; i++ {
+ for i = 0; i < varargcount && !it.Done(); i++ {
m = argvar(varargtype, i)
- varargs = list(varargs, m)
- as.List = list(as.List, m)
- ll = ll.Next
+ varargs = append(varargs, m)
+ as.List.Append(m)
+ it.Next()
}
if i == varargcount {
}
}
- if ll != nil || t != nil {
- Fatalf("arg count mismatch: %v vs %v\n", Tconv(getinargx(fn.Type), obj.FmtSharp), Hconv(n.List, obj.FmtComma))
+ if !it.Done() || t != nil {
+ Fatalf("arg count mismatch: %v vs %v\n", Tconv(fn.Type.Params(), obj.FmtSharp), Hconv(n.List, obj.FmtComma))
}
}
- if as.Rlist != nil {
+ if as.Rlist.Len() != 0 {
typecheck(&as, Etop)
- ninit = list(ninit, as)
+ ninit.Append(as)
}
// turn the variadic args into a slice.
vararrtype.Bound = int64(varargcount)
as.Right = Nod(OCOMPLIT, nil, typenod(varargtype))
- as.Right.List = varargs
+ as.Right.List.Set(varargs)
as.Right = Nod(OSLICE, as.Right, Nod(OKEY, nil, nil))
}
typecheck(&as, Etop)
- ninit = list(ninit, as)
+ ninit.Append(as)
}
// zero the outparams
for ll := inlretvars; ll != nil; ll = ll.Next {
as = Nod(OAS, ll.N, nil)
typecheck(&as, Etop)
- ninit = list(ninit, as)
+ ninit.Append(as)
}
inlretlabel = newlabel_inl()
inlgen++
- body := inlsubstslice(fn.Func.Inl.Slice())
+ body := inlsubstlist(fn.Func.Inl)
body = append(body, Nod(OGOTO, inlretlabel, nil)) // avoid 'not used' when function doesn't have return
body = append(body, Nod(OLABEL, inlretlabel, nil))
call := Nod(OINLCALL, nil, nil)
- call.Ninit = ninit
+ call.Ninit.Set(ninit.Slice())
call.Nbody.Set(body)
- call.Rlist = inlretvars
+ setNodeSeq(&call.Rlist, inlretvars)
call.Type = n.Type
call.Typecheck = 1
// Hide the args from setlno -- the parameters to the inlined
// call already have good line numbers that should be preserved.
args := as.Rlist
- as.Rlist = nil
+ as.Rlist.Set(nil)
- setlno(call, int(n.Lineno))
+ setlno(call, n.Lineno)
- as.Rlist = args
+ as.Rlist.Set(args.Slice())
//dumplist("call body", body);
// luckily these are small.
body = fn.Func.Inl.Slice()
fn.Func.Inl.Set(nil) // prevent infinite recursion (shouldn't happen anyway)
- inlnodeslice(call.Nbody.Slice())
+ inlnodelist(call.Nbody)
for _, n := range call.Nbody.Slice() {
if n.Op == OINLCALL {
inlconv2stmt(n)
return n
}
-// inlsubst, inlsubstlist, and inlsubstslice recursively copy the body of the
-// saved pristine ->inl body of the function while substituting references
+// inlsubst and inlsubstlist recursively copy the body of the saved
+// pristine ->inl body of the function while substituting references
// to input/output parameters with ones to the tmpnames, and
// substituting returns with assignments to the output.
-func inlsubstlist(ll *NodeList) *NodeList {
- var l *NodeList
- for ; ll != nil; ll = ll.Next {
- l = list(l, inlsubst(ll.N))
+func inlsubstlist(ll Nodes) []*Node {
+ s := make([]*Node, 0, ll.Len())
+ for _, n := range ll.Slice() {
+ s = append(s, inlsubst(n))
}
- return l
-}
-
-func inlsubstslice(ll []*Node) []*Node {
- l := make([]*Node, 0, len(ll))
- for _, n := range ll {
- l = append(l, inlsubst(n))
- }
- return l
+ return s
}
func inlsubst(n *Node) *Node {
case ORETURN:
m := Nod(OGOTO, inlretlabel, nil)
- m.Ninit = inlsubstlist(n.Ninit)
+ m.Ninit.Set(inlsubstlist(n.Ninit))
- if inlretvars != nil && n.List != nil {
+ if inlretvars != nil && n.List.Len() != 0 {
as := Nod(OAS2, nil, nil)
// shallow copy or OINLCALL->rlist will be the same list, and later walk and typecheck may clobber that.
for ll := inlretvars; ll != nil; ll = ll.Next {
- as.List = list(as.List, ll.N)
+ as.List.Append(ll.N)
}
- as.Rlist = inlsubstlist(n.List)
+ as.Rlist.Set(inlsubstlist(n.List))
typecheck(&as, Etop)
- m.Ninit = list(m.Ninit, as)
+ m.Ninit.Append(as)
}
- typechecklist(m.Ninit, Etop)
+ typechecklist(m.Ninit.Slice(), Etop)
typecheck(&m, Etop)
// dump("Return after substitution", m);
case OGOTO, OLABEL:
m := Nod(OXXX, nil, nil)
*m = *n
- m.Ninit = nil
+ m.Ninit.Set(nil)
p := fmt.Sprintf("%s·%d", n.Left.Sym.Name, inlgen)
m.Left = newname(Lookup(p))
m := Nod(OXXX, nil, nil)
*m = *n
- m.Ninit = nil
+ m.Ninit.Set(nil)
if n.Op == OCLOSURE {
Fatalf("cannot inline function containing closure: %v", Nconv(n, obj.FmtSign))
m.Left = inlsubst(n.Left)
m.Right = inlsubst(n.Right)
- m.List = inlsubstlist(n.List)
- m.Rlist = inlsubstlist(n.Rlist)
- m.Ninit = concat(m.Ninit, inlsubstlist(n.Ninit))
- m.Nbody.Set(inlsubstslice(n.Nbody.Slice()))
+ m.List.Set(inlsubstlist(n.List))
+ m.Rlist.Set(inlsubstlist(n.Rlist))
+ m.Ninit.Set(append(m.Ninit.Slice(), inlsubstlist(n.Ninit)...))
+ m.Nbody.Set(inlsubstlist(n.Nbody))
return m
}
// Plaster over linenumbers
-func setlnolist(ll *NodeList, lno int) {
- for ; ll != nil; ll = ll.Next {
- setlno(ll.N, lno)
- }
-}
-
-func setlnoslice(ll []*Node, lno int) {
- for _, n := range ll {
+func setlnolist(ll Nodes, lno int32) {
+ for _, n := range ll.Slice() {
setlno(n, lno)
}
}
-func setlno(n *Node, lno int) {
+func setlno(n *Node, lno int32) {
if n == nil {
return
}
// don't clobber names, unless they're freshly synthesized
if n.Op != ONAME || n.Lineno == 0 {
- n.Lineno = int32(lno)
+ n.Lineno = lno
}
setlno(n.Left, lno)
setlnolist(n.List, lno)
setlnolist(n.Rlist, lno)
setlnolist(n.Ninit, lno)
- setlnoslice(n.Nbody.Slice(), lno)
+ setlnolist(n.Nbody, lno)
}
itabpkg.Name = "go.itab"
itabpkg.Prefix = "go.itab" // not go%2eitab
- weaktypepkg = mkpkg("go.weak.type")
-
- weaktypepkg.Name = "go.weak.type"
- weaktypepkg.Prefix = "go.weak.type" // not go%2eweak%2etype
-
typelinkpkg = mkpkg("go.typelink")
typelinkpkg.Name = "go.typelink"
typelinkpkg.Prefix = "go.typelink" // not go%2etypelink
obj.Flagstr("cpuprofile", "write cpu profile to `file`", &cpuprofile)
obj.Flagstr("memprofile", "write memory profile to `file`", &memprofile)
obj.Flagint64("memprofilerate", "set runtime.MemProfileRate to `rate`", &memprofilerate)
+ flag.BoolVar(&ssaEnabled, "ssa", true, "use SSA backend to generate code")
obj.Flagparse(usage)
if flag_dynlink {
Curfn = l.N
decldepth = 1
saveerrors()
- typecheckslice(l.N.Nbody.Slice(), Etop)
+ typechecklist(l.N.Nbody.Slice(), Etop)
checkreturn(l.N)
if nerrors != 0 {
l.N.Nbody.Set(nil) // type errors; do not compile
lx:
if Debug['x'] != 0 {
if c >= utf8.RuneSelf {
- fmt.Printf("%v lex: TOKEN %s\n", Ctxt.Line(int(lineno)), lexname(c))
+ fmt.Printf("%v lex: TOKEN %s\n", linestr(lineno), lexname(c))
} else {
- fmt.Printf("%v lex: TOKEN '%c'\n", Ctxt.Line(int(lineno)), c)
+ fmt.Printf("%v lex: TOKEN '%c'\n", linestr(lineno), c)
}
}
c := obj.Bgetc(l.bin)
if c < utf8.RuneSelf {
if c == 0 {
- yyerrorl(int(lexlineno), "illegal NUL byte")
+ yyerrorl(lexlineno, "illegal NUL byte")
return 0
}
if c == '\n' && importpkg == nil {
// The string conversion here makes a copy for passing
// to fmt.Printf, so that buf itself does not escape and
// can be allocated on the stack.
- yyerrorl(int(lexlineno), "illegal UTF-8 sequence % x", string(buf[:i]))
+ yyerrorl(lexlineno, "illegal UTF-8 sequence % x", string(buf[:i]))
}
if r == BOM {
- yyerrorl(int(lexlineno), "Unicode (UTF-8) BOM in middle of file")
+ yyerrorl(lexlineno, "Unicode (UTF-8) BOM in middle of file")
goto redo
}
{"any", TANY},
}
+var typedefs = [...]struct {
+ name string
+ etype EType
+ width *int
+ sameas32 EType
+ sameas64 EType
+}{
+ {"int", TINT, &Widthint, TINT32, TINT64},
+ {"uint", TUINT, &Widthint, TUINT32, TUINT64},
+ {"uintptr", TUINTPTR, &Widthptr, TUINT32, TUINT64},
+}
+
var builtinFuncs = [...]struct {
name string
op Op
s2.Def.Etype = EType(s.op)
}
- // logically, the type of a string literal.
- // types[TSTRING] is the named type string
- // (the type of x in var x string or var x = "hello").
- // this is the ideal form
- // (the type of x in const x = "hello").
idealstring = typ(TSTRING)
-
idealbool = typ(TBOOL)
s := Pkglookup("true", builtinpkg)
out.Type.Type = Types[TSTRING]
out.Funarg = true
f := typ(TFUNC)
- *getthis(f) = rcvr
- *Getoutarg(f) = out
- *getinarg(f) = in
+ *f.RecvP() = rcvr
+ *f.ResultsP() = out
+ *f.ParamsP() = in
f.Thistuple = 1
f.Intuple = 0
f.Outnamed = false
s.Def = typenod(runetype)
s.Def.Name = new(Name)
- // backend-specific builtin types (e.g. int).
- for i := range Thearch.Typedefs {
- s := Pkglookup(Thearch.Typedefs[i].Name, builtinpkg)
- s.Def = typenod(Types[Thearch.Typedefs[i].Etype])
- s.Def.Name = new(Name)
- s.Origpkg = builtinpkg
+ // backend-dependent builtin types (e.g. int).
+ for _, s := range typedefs {
+ s1 := Pkglookup(s.name, builtinpkg)
+
+ sameas := s.sameas32
+ if *s.width == 8 {
+ sameas = s.sameas64
+ }
+
+ Simtype[s.etype] = sameas
+ minfltval[s.etype] = minfltval[sameas]
+ maxfltval[s.etype] = maxfltval[sameas]
+ Minintval[s.etype] = Minintval[sameas]
+ Maxintval[s.etype] = Maxintval[sameas]
+
+ t := typ(s.etype)
+ t.Sym = s1
+ Types[s.etype] = t
+ s1.Def = typenod(t)
+ s1.Def.Name = new(Name)
+ s1.Origpkg = builtinpkg
+
+ dowidth(t)
}
}
func lexfini() {
for _, s := range builtinpkg.Syms {
- if s.Def == nil {
+ if s.Def == nil || (s.Name == "any" && Debug['A'] == 0) {
continue
}
s1 := Lookup(s.Name)
return fmt.Sprintf("LEX-%d", lex)
}
-func pkgnotused(lineno int, path string, name string) {
+func pkgnotused(lineno int32, path string, name string) {
// If the package was imported with a name other than the final
// import path element, show it explicitly in the error message.
// Note that this handles both renamed imports and imports of
elem = elem[i+1:]
}
if name == "" || elem == name {
- yyerrorl(int(lineno), "imported and not used: %q", path)
+ yyerrorl(lineno, "imported and not used: %q", path)
} else {
- yyerrorl(int(lineno), "imported and not used: %q as %s", path, name)
+ yyerrorl(lineno, "imported and not used: %q as %s", path, name)
}
}
// errors if a conflicting top-level name is
// introduced by a different file.
if !s.Def.Used && nsyntaxerrors == 0 {
- pkgnotused(int(s.Def.Lineno), s.Def.Name.Pkg.Path, s.Name)
+ pkgnotused(s.Def.Lineno, s.Def.Name.Pkg.Path, s.Name)
}
s.Def = nil
continue
// throw away top-level name left over
// from previous import . "x"
if s.Def.Name != nil && s.Def.Name.Pack != nil && !s.Def.Name.Pack.Used && nsyntaxerrors == 0 {
- pkgnotused(int(s.Def.Name.Pack.Lineno), s.Def.Name.Pack.Name.Pkg.Path, "")
+ pkgnotused(s.Def.Name.Pack.Lineno, s.Def.Name.Pack.Name.Pkg.Path, "")
s.Def.Name.Pack.Used = true
}
log.Fatal(err)
}
- // Look for $$ that introduces imports.
- i := bytes.Index(b, []byte("\n$$\n"))
+ // Look for $$B that introduces binary export data.
+ textual := false // TODO(gri) remove once we switched to binary export format
+ i := bytes.Index(b, []byte("\n$$B\n"))
if i < 0 {
- log.Fatal("did not find beginning of imports")
+ // Look for $$ that introduces textual export data.
+ i = bytes.Index(b, []byte("\n$$\n"))
+ if i < 0 {
+ log.Fatal("did not find beginning of export data")
+ }
+ textual = true
+ i-- // textual data doesn't have B
}
- i += 4
+ b = b[i+5:]
- // Look for $$ that closes imports.
- j := bytes.Index(b[i:], []byte("\n$$\n"))
- if j < 0 {
- log.Fatal("did not find end of imports")
+ // Look for $$ that closes export data.
+ i = bytes.Index(b, []byte("\n$$\n"))
+ if i < 0 {
+ log.Fatal("did not find end of export data")
}
- j += i + 4
+ b = b[:i+4]
- // Process and reformat imports.
+ // Process and reformat export data.
fmt.Fprintf(w, "\nconst %simport = \"\"", name)
- for _, p := range bytes.SplitAfter(b[i:j], []byte("\n")) {
- // Chop leading white space.
- p = bytes.TrimLeft(p, " \t")
- if len(p) == 0 {
- continue
- }
+ if textual {
+ for _, p := range bytes.SplitAfter(b, []byte("\n")) {
+ // Chop leading white space.
+ p = bytes.TrimLeft(p, " \t")
+ if len(p) == 0 {
+ continue
+ }
- fmt.Fprintf(w, " +\n\t%q", p)
+ fmt.Fprintf(w, " +\n\t%q", p)
+ }
+ } else {
+ const n = 40 // number of bytes per line
+ for len(b) > 0 {
+ i := len(b)
+ if i > n {
+ i = n
+ }
+ fmt.Fprintf(w, " +\n\t%q", b[:i])
+ b = b[i:]
+ }
}
fmt.Fprintf(w, "\n")
}
off = dsname(symdata, off, s[n:n+m])
}
- off = duint8(symdata, off, 0) // terminating NUL for runtime
ggloblsym(symdata, int32(off), obj.DUPOK|obj.RODATA|obj.LOCAL)
return symhdr, symdata
func order(fn *Node) {
if Debug['W'] > 1 {
s := fmt.Sprintf("\nbefore order %v", fn.Func.Nname.Sym)
- dumpslice(s, fn.Nbody.Slice())
+ dumplist(s, fn.Nbody)
}
orderblockNodes(&fn.Nbody)
return a
}
- Fatalf("ordersafeexpr %v", Oconv(int(n.Op), 0))
+ Fatalf("ordersafeexpr %v", Oconv(n.Op, 0))
return nil // not reached
}
}
// Orderstmtlist orders each of the statements in the list.
-func orderstmtlist(l *NodeList, order *Order) {
- for ; l != nil; l = l.Next {
- orderstmt(l.N, order)
- }
-}
-
-// Orderstmtslice orders each of the statements in the slice.
-func orderstmtslice(l []*Node, order *Order) {
- for _, n := range l {
+func orderstmtlist(l Nodes, order *Order) {
+ for _, n := range l.Slice() {
orderstmt(n, order)
}
}
-// Orderblock orders the block of statements *l onto a new list,
-// and then replaces *l with that list.
-func orderblock(l **NodeList) {
+// Orderblock orders the block of statements l onto a new list,
+// and returns the ordered list.
+func orderblock(l Nodes) []*Node {
var order Order
mark := marktemp(&order)
- orderstmtlist(*l, &order)
+ orderstmtlist(l, &order)
cleantemp(mark, &order)
- var ll *NodeList
- for _, n := range order.out {
- ll = list(ll, n)
- }
- *l = ll
+ return order.out
}
// OrderblockNodes orders the block of statements in n into a new slice,
func orderblockNodes(n *Nodes) {
var order Order
mark := marktemp(&order)
- orderstmtslice(n.Slice(), &order)
+ orderstmtlist(*n, &order)
cleantemp(mark, &order)
n.Set(order.out)
}
n := *np
var order Order
orderexpr(&n, &order, nil)
- addinitslice(&n, order.out)
+ addinit(&n, order.out)
// insert new temporaries from order
// at head of outer list.
mark := marktemp(&order)
orderstmt(n, &order)
cleantemp(mark, &order)
- *np = liststmtslice(order.out)
+ *np = liststmt(order.out)
}
// Orderinit moves n's init list to order->out.
func orderinit(n *Node, order *Order) {
orderstmtlist(n.Ninit, order)
- n.Ninit = nil
+ n.Ninit.Set(nil)
}
// Ismulticall reports whether the list l is f() for a multi-value function.
// Such an f() could appear as the lone argument to a multi-arg function.
-func ismulticall(l *NodeList) bool {
+func ismulticall(l Nodes) bool {
// one arg only
- if l == nil || l.Next != nil {
+ if l.Len() != 1 {
return false
}
- n := l.N
+ n := l.First()
// must be call
switch n.Op {
// Copyret emits t1, t2, ... = n, where n is a function call,
// and then returns the list t1, t2, ....
-func copyret(n *Node, order *Order) *NodeList {
+func copyret(n *Node, order *Order) Nodes {
if n.Type.Etype != TSTRUCT || !n.Type.Funarg {
Fatalf("copyret %v %d", n.Type, n.Left.Type.Outtuple)
}
- var l1 *NodeList
- var l2 *NodeList
- var tl Iter
- var tmp *Node
- for t := Structfirst(&tl, &n.Type); t != nil; t = structnext(&tl) {
- tmp = temp(t.Type)
- l1 = list(l1, tmp)
- l2 = list(l2, tmp)
+ var l1 []*Node
+ var l2 []*Node
+ for t, it := IterFields(n.Type); t != nil; t = it.Next() {
+ tmp := temp(t.Type)
+ l1 = append(l1, tmp)
+ l2 = append(l2, tmp)
}
as := Nod(OAS2, nil, nil)
- as.List = l1
- as.Rlist = list1(n)
+ as.List.Set(l1)
+ as.Rlist.Set([]*Node{n})
typecheck(&as, Etop)
orderstmt(as, order)
- return l2
+ var r Nodes
+ r.Set(l2)
+ return r
}
-// Ordercallargs orders the list of call arguments *l.
-func ordercallargs(l **NodeList, order *Order) {
- if ismulticall(*l) {
+// Ordercallargs orders the list of call arguments l and returns the
+// ordered list.
+func ordercallargs(l Nodes, order *Order) Nodes {
+ if ismulticall(l) {
// return f() where f() is multiple values.
- *l = copyret((*l).N, order)
+ return copyret(l.First(), order)
} else {
- orderexprlist(*l, order)
+ orderexprlist(l, order)
+ return l
}
}
func ordercall(n *Node, order *Order) {
orderexpr(&n.Left, order, nil)
orderexpr(&n.Right, order, nil) // ODDDARG temp
- ordercallargs(&n.List, order)
+ setNodeSeq(&n.List, ordercallargs(n.List, order))
if n.Op == OCALLFUNC {
- for l, t := n.List, getinargx(n.Left.Type).Type; l != nil && t != nil; l, t = l.Next, t.Down {
+ t := n.Left.Type.Params().Type
+ for it := nodeSeqIterate(n.List); !it.Done() && t != nil; it.Next() {
// Check for "unsafe-uintptr" tag provided by escape analysis.
// If present and the argument is really a pointer being converted
// to uintptr, arrange for the pointer to be kept alive until the call
// returns, by copying it into a temp and marking that temp
// still alive when we pop the temp stack.
if t.Note != nil && *t.Note == unsafeUintptrTag {
- xp := &l.N
+ xp := it.P()
for (*xp).Op == OCONVNOP && !Isptr[(*xp).Type.Etype] {
xp = &(*xp).Left
}
*xp = x
}
}
+ t = t.Down
}
}
}
func ordermapassign(n *Node, order *Order) {
switch n.Op {
default:
- Fatalf("ordermapassign %v", Oconv(int(n.Op), 0))
+ Fatalf("ordermapassign %v", Oconv(n.Op, 0))
case OAS:
order.out = append(order.out, n)
var post []*Node
var m *Node
var a *Node
- for l := n.List; l != nil; l = l.Next {
- if l.N.Op == OINDEXMAP {
- m = l.N
+ for i1, n1 := range n.List.Slice() {
+ if n1.Op == OINDEXMAP {
+ m = n1
if !istemp(m.Left) {
m.Left = ordercopyexpr(m.Left, m.Left.Type, order, 0)
}
if !istemp(m.Right) {
m.Right = ordercopyexpr(m.Right, m.Right.Type, order, 0)
}
- l.N = ordertemp(m.Type, order, false)
- a = Nod(OAS, m, l.N)
+ n.List.Slice()[i1] = ordertemp(m.Type, order, false)
+ a = Nod(OAS, m, n.List.Slice()[i1])
typecheck(&a, Etop)
post = append(post, a)
- } else if instrumenting && n.Op == OAS2FUNC && !isblank(l.N) {
- m = l.N
- l.N = ordertemp(m.Type, order, false)
- a = Nod(OAS, m, l.N)
+ } else if instrumenting && n.Op == OAS2FUNC && !isblank(n.List.Slice()[i1]) {
+ m = n.List.Slice()[i1]
+ n.List.Slice()[i1] = ordertemp(m.Type, order, false)
+ a = Nod(OAS, m, n.List.Slice()[i1])
typecheck(&a, Etop)
post = append(post, a)
}
return
}
- lno := int(setlineno(n))
+ lno := setlineno(n)
orderinit(n, order)
switch n.Op {
default:
- Fatalf("orderstmt %v", Oconv(int(n.Op), 0))
+ Fatalf("orderstmt %v", Oconv(n.Op, 0))
case OVARKILL, OVARLIVE:
order.out = append(order.out, n)
t := marktemp(order)
orderexprlist(n.List, order)
- r := n.Rlist.N
+ r := n.Rlist.First()
orderexpr(&r.Left, order, nil)
orderexpr(&r.Right, order, nil)
t := marktemp(order)
orderexprlist(n.List, order)
- ordercall(n.Rlist.N, order)
+ ordercall(n.Rlist.First(), order)
ordermapassign(n, order)
cleantemp(t, order)
t := marktemp(order)
orderexprlist(n.List, order)
- orderexpr(&n.Rlist.N.Left, order, nil) // i in i.(T)
- if isblank(n.List.N) {
+ orderexpr(&n.Rlist.First().Left, order, nil) // i in i.(T)
+ if isblank(n.List.First()) {
order.out = append(order.out, n)
} else {
- typ := n.Rlist.N.Type
+ typ := n.Rlist.First().Type
tmp1 := ordertemp(typ, order, haspointers(typ))
order.out = append(order.out, n)
- r := Nod(OAS, n.List.N, tmp1)
+ r := Nod(OAS, n.List.First(), tmp1)
typecheck(&r, Etop)
ordermapassign(r, order)
- n.List = list(list1(tmp1), n.List.Next.N)
+ n.List.Set([]*Node{tmp1, n.List.Second()})
}
cleantemp(t, order)
t := marktemp(order)
orderexprlist(n.List, order)
- orderexpr(&n.Rlist.N.Left, order, nil) // arg to recv
- ch := n.Rlist.N.Left.Type
+ orderexpr(&n.Rlist.First().Left, order, nil) // arg to recv
+ ch := n.Rlist.First().Left.Type
tmp1 := ordertemp(ch.Type, order, haspointers(ch.Type))
var tmp2 *Node
- if !isblank(n.List.Next.N) {
- tmp2 = ordertemp(n.List.Next.N.Type, order, false)
+ if !isblank(n.List.Second()) {
+ tmp2 = ordertemp(n.List.Second().Type, order, false)
} else {
tmp2 = ordertemp(Types[TBOOL], order, false)
}
order.out = append(order.out, n)
- r := Nod(OAS, n.List.N, tmp1)
+ r := Nod(OAS, n.List.First(), tmp1)
typecheck(&r, Etop)
ordermapassign(r, order)
- r = Nod(OAS, n.List.Next.N, tmp2)
+ r = Nod(OAS, n.List.Second(), tmp2)
typecheck(&r, Etop)
ordermapassign(r, order)
- n.List = list(list1(tmp1), tmp2)
+ n.List.Set([]*Node{tmp1, tmp2})
cleantemp(t, order)
// Special: does not save n onto out.
orderexprlist(n.Left.List, order)
t1 := marktemp(order)
- np := &n.Left.List.Next.N // map key
+ it := nodeSeqIterate(n.Left.List)
+ it.Next()
+ np := it.P() // map key
*np = ordercopyexpr(*np, (*np).Type, order, 0)
poptemp(t1, order)
case ODELETE:
t := marktemp(order)
- orderexpr(&n.List.N, order, nil)
- orderexpr(&n.List.Next.N, order, nil)
- orderaddrtemp(&n.List.Next.N, order) // map key
+ it := nodeSeqIterate(n.List)
+ orderexpr(it.P(), order, nil)
+ it.Next()
+ orderexpr(it.P(), order, nil)
+ orderaddrtemp(it.P(), order) // map key
order.out = append(order.out, n)
cleantemp(t, order)
n.Nbody.Set(append(l, n.Nbody.Slice()...))
l = nil
cleantempnopop(t, order, &l)
- var ll *NodeList
- for _, n := range l {
- ll = list(ll, n)
- }
- n.Rlist = concat(ll, n.Rlist)
+ n.Rlist.Set(append(l, n.Rlist.Slice()...))
poptemp(t, order)
orderblockNodes(&n.Nbody)
- orderblock(&n.Rlist)
+ n.Rlist.Set(orderblock(n.Rlist))
order.out = append(order.out, n)
// Special: argument will be converted to interface using convT2E
if n.Right.Op == OSTRARRAYBYTE {
n.Right.Op = OSTRARRAYBYTETMP
}
- if count(n.List) < 2 || isblank(n.List.Next.N) {
+ if n.List.Len() < 2 || isblank(n.List.Second()) {
// for i := range x will only use x once, to compute len(x).
// No need to copy it.
break
// n->alloc is the temp for the iterator.
prealloc[n] = ordertemp(Types[TUINT8], order, true)
}
-
- for l := n.List; l != nil; l = l.Next {
- orderexprinplace(&l.N, order)
+ for i1 := range n.List.Slice() {
+ orderexprinplace(&n.List.Slice()[i1], order)
}
orderblockNodes(&n.Nbody)
order.out = append(order.out, n)
cleantemp(t, order)
case ORETURN:
- ordercallargs(&n.List, order)
+ setNodeSeq(&n.List, ordercallargs(n.List, order))
order.out = append(order.out, n)
// Special: clean case temporaries in each block entry.
var tmp1 *Node
var tmp2 *Node
var r *Node
- for l := n.List; l != nil; l = l.Next {
- if l.N.Op != OXCASE {
- Fatalf("order select case %v", Oconv(int(l.N.Op), 0))
+ for _, n2 := range n.List.Slice() {
+ if n2.Op != OXCASE {
+ Fatalf("order select case %v", Oconv(n2.Op, 0))
}
- r = l.N.Left
- setlineno(l.N)
+ r = n2.Left
+ setlineno(n2)
// Append any new body prologue to ninit.
// The next loop will insert ninit into nbody.
- if l.N.Ninit != nil {
+ if n2.Ninit.Len() != 0 {
Fatalf("order select ninit")
}
if r != nil {
switch r.Op {
default:
- Yyerror("unknown op in select %v", Oconv(int(r.Op), 0))
+ Yyerror("unknown op in select %v", Oconv(r.Op, 0))
Dump("select case", r)
// If this is case x := <-ch or case x, y := <-ch, the case has
// Delete the ODCL nodes here and recreate them inside the body below.
case OSELRECV, OSELRECV2:
if r.Colas {
- init := r.Ninit
- if init != nil && init.N.Op == ODCL && init.N.Left == r.Left {
- init = init.Next
+ itinit := nodeSeqIterate(r.Ninit)
+ if itinit.Len() != 0 && itinit.N().Op == ODCL && itinit.N().Left == r.Left {
+ itinit.Next()
}
- if init != nil && init.N.Op == ODCL && r.List != nil && init.N.Left == r.List.N {
- init = init.Next
+ if itinit.Len() != 0 && itinit.N().Op == ODCL && r.List.Len() != 0 && itinit.N().Left == r.List.First() {
+ itinit.Next()
}
- if init == nil {
- r.Ninit = nil
+ if itinit.Done() {
+ r.Ninit.Set(nil)
}
}
- if r.Ninit != nil {
+ if r.Ninit.Len() != 0 {
Yyerror("ninit on select recv")
dumplist("ninit", r.Ninit)
}
if r.Colas {
tmp2 = Nod(ODCL, tmp1, nil)
typecheck(&tmp2, Etop)
- l.N.Ninit = list(l.N.Ninit, tmp2)
+ n2.Ninit.Append(tmp2)
}
r.Left = ordertemp(r.Right.Left.Type.Type, order, haspointers(r.Right.Left.Type.Type))
tmp2 = Nod(OAS, tmp1, r.Left)
typecheck(&tmp2, Etop)
- l.N.Ninit = list(l.N.Ninit, tmp2)
+ n2.Ninit.Append(tmp2)
}
- if r.List != nil && isblank(r.List.N) {
- r.List = nil
+ if r.List.Len() != 0 && isblank(r.List.First()) {
+ r.List.Set(nil)
}
- if r.List != nil {
- tmp1 = r.List.N
+ if r.List.Len() != 0 {
+ tmp1 = r.List.First()
if r.Colas {
tmp2 = Nod(ODCL, tmp1, nil)
typecheck(&tmp2, Etop)
- l.N.Ninit = list(l.N.Ninit, tmp2)
+ n2.Ninit.Append(tmp2)
}
- r.List = list1(ordertemp(tmp1.Type, order, false))
- tmp2 = Nod(OAS, tmp1, r.List.N)
+ setNodeSeq(&r.List, list1(ordertemp(tmp1.Type, order, false)))
+ tmp2 = Nod(OAS, tmp1, r.List.First())
typecheck(&tmp2, Etop)
- l.N.Ninit = list(l.N.Ninit, tmp2)
+ n2.Ninit.Append(tmp2)
}
-
- orderblock(&l.N.Ninit)
+ n2.Ninit.Set(orderblock(n2.Ninit))
case OSEND:
- if r.Ninit != nil {
+ if r.Ninit.Len() != 0 {
Yyerror("ninit on select send")
dumplist("ninit", r.Ninit)
}
}
}
- orderblockNodes(&l.N.Nbody)
+ orderblockNodes(&n2.Nbody)
}
-
// Now that we have accumulated all the temporaries, clean them.
// Also insert any ninit queued during the previous loop.
// (The temporary cleaning must follow that ninit work.)
- for l := n.List; l != nil; l = l.Next {
- s := make([]*Node, 0, count(l.N.Ninit))
- for ll := l.N.Ninit; ll != nil; ll = ll.Next {
- s = append(s, ll.N)
- }
+ for _, n3 := range n.List.Slice() {
+ s := n3.Ninit.Slice()
cleantempnopop(t, order, &s)
- l.N.Nbody.Set(append(s, l.N.Nbody.Slice()...))
- l.N.Ninit = nil
+ n3.Nbody.Set(append(s, n3.Nbody.Slice()...))
+ n3.Ninit.Set(nil)
}
order.out = append(order.out, n)
t := marktemp(order)
orderexpr(&n.Left, order, nil)
- for l := n.List; l != nil; l = l.Next {
- if l.N.Op != OXCASE {
- Fatalf("order switch case %v", Oconv(int(l.N.Op), 0))
+ for _, n4 := range n.List.Slice() {
+ if n4.Op != OXCASE {
+ Fatalf("order switch case %v", Oconv(n4.Op, 0))
}
- orderexprlistinplace(l.N.List, order)
- orderblockNodes(&l.N.Nbody)
+ orderexprlistinplace(n4.List, order)
+ orderblockNodes(&n4.Nbody)
}
order.out = append(order.out, n)
cleantemp(t, order)
}
- lineno = int32(lno)
+ lineno = lno
}
// Orderexprlist orders the expression list l into order.
-func orderexprlist(l *NodeList, order *Order) {
- for ; l != nil; l = l.Next {
- orderexpr(&l.N, order, nil)
+func orderexprlist(l Nodes, order *Order) {
+ for i := range l.Slice() {
+ orderexpr(&l.Slice()[i], order, nil)
}
}
// Orderexprlist orders the expression list l but saves
// the side effects on the individual expression ninit lists.
-func orderexprlistinplace(l *NodeList, order *Order) {
- for ; l != nil; l = l.Next {
- orderexprinplace(&l.N, order)
+func orderexprlistinplace(l Nodes, order *Order) {
+ for i := range l.Slice() {
+ orderexprinplace(&l.Slice()[i], order)
}
}
return
}
- lno := int(setlineno(n))
+ lno := setlineno(n)
orderinit(n, order)
switch n.Op {
case OADDSTR:
orderexprlist(n.List, order)
- if count(n.List) > 5 {
+ if n.List.Len() > 5 {
t := typ(TARRAY)
- t.Bound = int64(count(n.List))
+ t.Bound = int64(n.List.Len())
t.Type = Types[TSTRING]
prealloc[n] = ordertemp(t, order, false)
}
hasbyte := false
haslit := false
- for l := n.List; l != nil; l = l.Next {
- hasbyte = hasbyte || l.N.Op == OARRAYBYTESTR
- haslit = haslit || l.N.Op == OLITERAL && len(l.N.Val().U.(string)) != 0
+ for _, n1 := range n.List.Slice() {
+ hasbyte = hasbyte || n1.Op == OARRAYBYTESTR
+ haslit = haslit || n1.Op == OLITERAL && len(n1.Val().U.(string)) != 0
}
if haslit && hasbyte {
- for l := n.List; l != nil; l = l.Next {
- if l.N.Op == OARRAYBYTESTR {
- l.N.Op = OARRAYBYTESTRTMP
+ for _, n2 := range n.List.Slice() {
+ if n2.Op == OARRAYBYTESTR {
+ n2.Op = OARRAYBYTESTRTMP
}
}
}
var s []*Node
cleantempnopop(mark, order, &s)
- var l *NodeList
- for _, n := range s {
- l = list(l, n)
- }
- n.Right.Ninit = concat(l, n.Right.Ninit)
+ n.Right.Ninit.Set(append(s, n.Right.Ninit.Slice()...))
orderexprinplace(&n.Right, order)
case OCALLFUNC,
}
case OAPPEND:
- ordercallargs(&n.List, order)
- if lhs == nil || lhs.Op != ONAME && !samesafeexpr(lhs, n.List.N) {
+ setNodeSeq(&n.List, ordercallargs(n.List, order))
+ if lhs == nil || lhs.Op != ONAME && !samesafeexpr(lhs, n.List.First()) {
n = ordercopyexpr(n, n.Type, order, 0)
}
}
}
- lineno = int32(lno)
+ lineno = lno
*np = n
}
return
}
- line := int32(parserline())
+ line := lineno
// We need to clear importpkg before calling p.next(),
// otherwise it will affect lexlineno.
if rangeOk && p.got(LRANGE) {
// expr_list '=' LRANGE expr
r := Nod(ORANGE, nil, p.expr())
- r.List = lhs
+ setNodeSeq(&r.List, lhs)
r.Etype = 0 // := flag
return r
}
}
// multiple
stmt := Nod(OAS2, nil, nil)
- stmt.List = lhs
- stmt.Rlist = rhs
+ setNodeSeq(&stmt.List, lhs)
+ setNodeSeq(&stmt.Rlist, rhs)
return stmt
case LCOLAS:
if rangeOk && p.got(LRANGE) {
// expr_list LCOLAS LRANGE expr
r := Nod(ORANGE, nil, p.expr())
- r.List = lhs
+ setNodeSeq(&r.List, lhs)
r.Colas = true
- colasdefn(lhs, r)
+ colasdefn(r.List, r)
return r
}
} // it's a colas, so must not re-use an oldname
return ts
}
- return colas(lhs, rhs, int32(lno))
+ return colas(lhs, rhs, lno)
default:
p.syntax_error("expecting := or = or comma")
}
label.Name.Defn = ls
- l := list1(label)
+ l := []*Node{label}
if ls != nil {
- l = list(l, ls)
+ if ls.Op == OBLOCK && ls.Ninit.Len() == 0 {
+ l = append(l, ls.List.Slice()...)
+ } else {
+ l = append(l, ls)
+ }
}
return liststmt(l)
}
// done in casebody()
markdcl() // matching popdcl in caseblock
stmt := Nod(OXCASE, nil, nil)
- stmt.List = cases
+ setNodeSeq(&stmt.List, cases)
if tswitch != nil {
if n := tswitch.Left; n != nil {
// type switch - declare variable
nn := newname(n.Sym)
declare(nn, dclcontext)
- stmt.Rlist = list1(nn)
+ stmt.Rlist.Set([]*Node{nn})
// keep track of the instances for reporting unused
nn.Name.Defn = tswitch
n = Nod(OAS, cases.N, rhs)
} else {
n = Nod(OAS2, nil, nil)
- n.List = cases
- n.Rlist = list1(rhs)
+ setNodeSeq(&n.List, cases)
+ n.Rlist.Set([]*Node{rhs})
}
- stmt.List = list1(n)
+ stmt.List.Set([]*Node{n})
p.want(':') // consume ':' after declaring select cases for correct lineno
return stmt
// done in casebody()
markdcl() // matching popdcl in caseblock
stmt := Nod(OXCASE, nil, nil)
- stmt.List = list1(colas(cases, list1(rhs), int32(lno)))
+ stmt.List.Set([]*Node{colas(cases, list1(rhs), lno)})
p.want(':') // consume ':' after declaring select cases for correct lineno
return stmt
// type switch - declare variable
nn := newname(n.Sym)
declare(nn, dclcontext)
- stmt.Rlist = list1(nn)
+ stmt.Rlist.Set([]*Node{nn})
// keep track of the instances for reporting unused
nn.Name.Defn = tswitch
if l == nil {
stmt = Nod(OEMPTY, nil, nil)
} else {
- stmt = liststmt(l)
+ stmt = liststmt(nodeSeqSlice(l))
}
popdcl()
}
h := Nod(OFOR, nil, nil)
if init != nil {
- h.Ninit = list1(init)
+ h.Ninit.Set([]*Node{init})
}
h.Left = cond
h.Right = post
init, cond, _ := p.header(false)
h := Nod(OIF, nil, nil)
- h.Ninit = list1(init)
+ if init != nil {
+ h.Ninit.Set([]*Node{init})
+ }
h.Left = cond
return h
}
if p.got(LELSE) {
if p.tok == LIF {
- stmt.Rlist = list1(p.if_stmt())
+ stmt.Rlist.Set([]*Node{p.if_stmt()})
} else {
- stmt.Rlist = list1(p.compound_stmt(true))
+ cs := p.compound_stmt(true)
+ if cs.Op == OBLOCK && cs.Ninit.Len() == 0 {
+ stmt.Rlist.Set(cs.List.Slice())
+ } else {
+ stmt.Rlist.Set([]*Node{cs})
+ }
}
}
tswitch = nil
}
- hdr.List = p.caseblock_list(tswitch)
+ setNodeSeq(&hdr.List, p.caseblock_list(tswitch))
popdcl()
return hdr
p.want(LSELECT)
hdr := Nod(OSELECT, nil, nil)
- hdr.List = p.caseblock_list(nil)
+ setNodeSeq(&hdr.List, p.caseblock_list(nil))
return hdr
}
// call or conversion
x = Nod(OCALL, x, nil)
- x.List = args
+ setNodeSeq(&x.List, args)
x.Isddd = ddd
case '{':
p.want('{')
p.xnest++
- var l *NodeList
+ var l []*Node
for p.tok != EOF && p.tok != '}' {
- l = list(l, p.keyval())
+ l = append(l, p.keyval())
if !p.ocomma('}') {
break
}
p.xnest--
p.want('}')
- n.List = l
+ n.List.Set(l)
return n
}
symlineno := lineno
sym := p.sym()
if sym == nil {
- yyerrorl(int(symlineno), "invalid declaration")
+ yyerrorl(symlineno, "invalid declaration")
return nil
}
return dclname(sym)
result := p.fnres()
params = checkarglist(params, 1)
t := Nod(OTFUNC, nil, nil)
- t.List = params
- t.Rlist = result
+ setNodeSeq(&t.List, params)
+ setNodeSeq(&t.Rlist, result)
return t
case '[':
return nil
}
-func (p *parser) new_dotname(pkg *Node) *Node {
+func (p *parser) new_dotname(obj *Node) *Node {
if trace && Debug['x'] != 0 {
defer p.trace("new_dotname")()
}
sel := p.sym()
- if pkg.Op == OPACK {
- s := restrictlookup(sel.Name, pkg.Name.Pkg)
- pkg.Used = true
+ if obj.Op == OPACK {
+ s := restrictlookup(sel.Name, obj.Name.Pkg)
+ obj.Used = true
return oldname(s)
}
- return Nod(OXDOT, pkg, newname(sel))
-
+ return Nod(OXDOT, obj, newname(sel))
}
func (p *parser) dotname() *Node {
p.want('}')
t := Nod(OTSTRUCT, nil, nil)
- t.List = l
+ setNodeSeq(&t.List, l)
return t
}
p.want(LINTERFACE)
p.want('{')
- var l *NodeList
+ var l []*Node
for p.tok != EOF && p.tok != '}' {
- l = list(l, p.interfacedcl())
+ l = append(l, p.interfacedcl())
if !p.osemi('}') {
break
}
p.want('}')
t := Nod(OTINTER, nil, nil)
- t.List = l
+ t.List.Set(l)
return t
}
}
t := Nod(OTFUNC, nil, nil)
- t.List = params
- t.Rlist = result
+ setNodeSeq(&t.List, params)
+ setNodeSeq(&t.Rlist, result)
f := Nod(ODCLFUNC, nil, nil)
f.Func.Nname = newfuncname(name)
}
t := Nod(OTFUNC, rcvr, nil)
- t.List = params
- t.Rlist = result
+ setNodeSeq(&t.List, params)
+ setNodeSeq(&t.Rlist, result)
f := Nod(ODCLFUNC, nil, nil)
f.Func.Shortname = newfuncname(name)
s5 := p.ohidden_funres()
s := s1
- t := functype(nil, s3, s5)
+ t := functype(nil, nodeSeqSlice(s3), nodeSeqSlice(s5))
importsym(s, ONAME)
if s.Def != nil && s.Def.Op == ONAME {
s8 := p.ohidden_funres()
ss := methodname1(newname(s4), s2.N.Right)
- ss.Type = functype(s2.N, s6, s8)
+ ss.Type = functype(s2.N, nodeSeqSlice(s6), nodeSeqSlice(s8))
checkwidth(ss.Type)
addmethod(s4, ss.Type, false, false)
// without func keyword
params = checkarglist(params, 1)
t := Nod(OTFUNC, fakethis(), nil)
- t.List = params
- t.Rlist = result
+ setNodeSeq(&t.List, params)
+ setNodeSeq(&t.Rlist, result)
return t
}
return p.compound_stmt(false)
case LVAR, LCONST, LTYPE:
- return liststmt(p.common_dcl())
+ return liststmt(nodeSeqSlice(p.common_dcl()))
case LNAME, '@', '?', LLITERAL, LFUNC, '(', // operands
'[', LSTRUCT, LMAP, LCHAN, LINTERFACE, // composite types
}
stmt := Nod(ORETURN, nil, nil)
- stmt.List = results
- if stmt.List == nil && Curfn != nil {
+ setNodeSeq(&stmt.List, results)
+ if stmt.List.Len() == 0 && Curfn != nil {
for _, ln := range Curfn.Func.Dcl {
if ln.Class == PPARAM {
continue
if s == missing_stmt {
break
}
- l = list(l, s)
+ if s != nil && s.Op == OBLOCK && s.Ninit.Len() == 0 {
+ appendNodeSeq(&l, s.List)
+ } else {
+ appendNodeSeqNode(&l, s)
+ }
// customized version of osemi:
// ';' is optional before a closing ')' or '}'
if p.tok == ')' || p.tok == '}' {
s3 := p.ohidden_structdcl_list()
p.want('}')
- return tostruct(s3)
+ return tostruct(nodeSeqSlice(s3))
case LINTERFACE:
// LINTERFACE '{' ohidden_interfacedcl_list '}'
s3 := p.ohidden_interfacedcl_list()
p.want('}')
- return tointerface(s3)
+ return tointerface(nodeSeqSlice(s3))
case '*':
// '*' hidden_type
p.want(')')
s5 := p.ohidden_funres()
- return functype(nil, s3, s5)
+ return functype(nil, nodeSeqSlice(s3), nodeSeqSlice(s5))
}
func (p *parser) hidden_funarg() *Node {
p.want(')')
s5 := p.ohidden_funres()
- return Nod(ODCLFIELD, newname(s1), typenod(functype(fakethis(), s3, s5)))
+ return Nod(ODCLFIELD, newname(s1), typenod(functype(fakethis(), nodeSeqSlice(s3), nodeSeqSlice(s5))))
}
func (p *parser) ohidden_funres() *NodeList {
// that its argument is certainly dead, for use when the liveness analysis
// would not otherwise be able to deduce that fact.
-func gvardefx(n *Node, as int) {
+func gvardefx(n *Node, as obj.As) {
if n == nil {
Fatalf("gvardef nil")
}
if n.Op != ONAME {
- Yyerror("gvardef %v; %v", Oconv(int(n.Op), obj.FmtSharp), n)
+ Yyerror("gvardef %v; %v", Oconv(n.Op, obj.FmtSharp), n)
return
}
var xoffset int64
if Curfn.Type.Thistuple > 0 {
xoffset = 0
- onebitwalktype1(getthisx(Curfn.Type), &xoffset, bv)
+ onebitwalktype1(Curfn.Type.Recv(), &xoffset, bv)
}
if Curfn.Type.Intuple > 0 {
xoffset = 0
- onebitwalktype1(getinargx(Curfn.Type), &xoffset, bv)
+ onebitwalktype1(Curfn.Type.Params(), &xoffset, bv)
}
for j := 0; int32(j) < bv.n; j += 32 {
}
if Curfn.Type.Outtuple > 0 {
xoffset = 0
- onebitwalktype1(getoutargx(Curfn.Type), &xoffset, bv)
+ onebitwalktype1(Curfn.Type.Results(), &xoffset, bv)
for j := 0; int32(j) < bv.n; j += 32 {
off = duint32(sym, off, bv.b[j/32])
}
panicdottype = Sysfunc("panicdottype")
}
- lno := setlineno(fn)
+ defer func(lno int32) {
+ lineno = lno
+ }(setlineno(fn))
Curfn = fn
dowidth(Curfn.Type)
- var oldstksize int64
- var nod1 Node
- var ptxt *obj.Prog
- var pl *obj.Plist
- var p *obj.Prog
- var n *Node
- var nam *Node
- var gcargs *Sym
- var gclocals *Sym
- var ssafn *ssa.Func
if len(fn.Nbody.Slice()) == 0 {
if pure_go != 0 || strings.HasPrefix(fn.Func.Nname.Sym.Name, "init.") {
Yyerror("missing function body for %q", fn.Func.Nname.Sym.Name)
- goto ret
+ return
}
if Debug['A'] != 0 {
- goto ret
+ return
}
emitptrargsmap()
- goto ret
+ return
}
saveerrors()
if Curfn.Type.Outnamed {
// add clearing of the output parameters
- var save Iter
- t := Structfirst(&save, Getoutarg(Curfn.Type))
-
- for t != nil {
+ for t, it := IterFields(Curfn.Type.Results()); t != nil; t = it.Next() {
if t.Nname != nil {
- n = Nod(OAS, t.Nname, nil)
+ n := Nod(OAS, t.Nname, nil)
typecheck(&n, Etop)
Curfn.Nbody.Set(append([]*Node{n}, Curfn.Nbody.Slice()...))
}
-
- t = structnext(&save)
}
}
order(Curfn)
if nerrors != 0 {
- goto ret
+ return
}
hasdefer = false
walk(Curfn)
if nerrors != 0 {
- goto ret
+ return
}
if instrumenting {
instrument(Curfn)
}
if nerrors != 0 {
- goto ret
+ return
}
// Build an SSA backend function.
+ var ssafn *ssa.Func
if shouldssa(Curfn) {
ssafn = buildssa(Curfn)
}
continpc = nil
breakpc = nil
- pl = newplist()
+ pl := newplist()
pl.Name = Linksym(Curfn.Func.Nname.Sym)
setlineno(Curfn)
+ var nod1 Node
Nodconst(&nod1, Types[TINT32], 0)
- nam = Curfn.Func.Nname
+ nam := Curfn.Func.Nname
if isblank(nam) {
nam = nil
}
- ptxt = Thearch.Gins(obj.ATEXT, nam, &nod1)
+ ptxt := Thearch.Gins(obj.ATEXT, nam, &nod1)
Afunclit(&ptxt.From, Curfn.Func.Nname)
ptxt.From3 = new(obj.Addr)
if fn.Func.Dupok {
// Clumsy but important.
// See test/recover.go for test cases and src/reflect/value.go
// for the actual functions being considered.
- if myimportpath != "" && myimportpath == "reflect" {
+ if myimportpath == "reflect" {
if Curfn.Func.Nname.Sym.Name == "callReflect" || Curfn.Func.Nname.Sym.Name == "callMethod" {
ptxt.From3.Offset |= obj.WRAPPER
}
ginit()
- gcargs = makefuncdatasym("gcargs·%d", obj.FUNCDATA_ArgsPointerMaps)
- gclocals = makefuncdatasym("gclocals·%d", obj.FUNCDATA_LocalsPointerMaps)
+ gcargs := makefuncdatasym("gcargs·%d", obj.FUNCDATA_ArgsPointerMaps)
+ gclocals := makefuncdatasym("gclocals·%d", obj.FUNCDATA_LocalsPointerMaps)
for _, t := range Curfn.Func.Fieldtrack {
gtrack(tracksym(t))
switch n.Class {
case PAUTO, PPARAM, PPARAMOUT:
Nodconst(&nod1, Types[TUINTPTR], n.Type.Width)
- p = Thearch.Gins(obj.ATYPE, n, &nod1)
+ p := Thearch.Gins(obj.ATYPE, n, &nod1)
p.From.Gotype = Linksym(ngotype(n))
}
}
if ssafn != nil {
genssa(ssafn, ptxt, gcargs, gclocals)
- if Curfn.Func.Endlineno != 0 {
- lineno = Curfn.Func.Endlineno
- }
ssafn.Free()
- return
+ } else {
+ genlegacy(ptxt, gcargs, gclocals)
}
- Genslice(Curfn.Func.Enter.Slice())
- Genslice(Curfn.Nbody.Slice())
+}
+
+// genlegacy compiles Curfn using the legacy non-SSA code generator.
+func genlegacy(ptxt *obj.Prog, gcargs, gclocals *Sym) {
+ Genlist(Curfn.Func.Enter)
+ Genlist(Curfn.Nbody)
gclean()
checklabels()
if nerrors != 0 {
- goto ret
+ return
}
if Curfn.Func.Endlineno != 0 {
lineno = Curfn.Func.Endlineno
gclean()
if nerrors != 0 {
- goto ret
+ return
}
Pc.As = obj.ARET // overwrite AEND
Thearch.Expandchecks(ptxt)
- oldstksize = Stksize
allocauto(ptxt)
- if false {
- fmt.Printf("allocauto: %d to %d\n", oldstksize, int64(Stksize))
- }
-
setlineno(Curfn)
- if int64(Stksize)+Maxarg > 1<<31 {
+ if Stksize+Maxarg > 1<<31 {
Yyerror("stack frame too large (>2GB)")
- goto ret
+ return
}
// Emit garbage collection symbols.
// Remove leftover instrumentation from the instruction stream.
removevardef(ptxt)
-
-ret:
- lineno = lno
}
import (
"reflect"
+ "sort"
"testing"
)
return s
}
-func TestListsort(t *testing.T) {
+func TestStackvarSort(t *testing.T) {
inp := []*Node{
{Class: PFUNC, Type: &Type{}, Name: &Name{}, Sym: &Sym{}},
{Class: PAUTO, Type: &Type{}, Name: &Name{}, Sym: &Sym{}},
haspointers(inp[i].Type)
}
- nl := slice2nodelist(inp)
- listsort(&nl, cmpstackvarlt)
- got := nodelist2slice(nl)
- if !reflect.DeepEqual(want, got) {
- t.Error("listsort failed")
- for i := range got {
- g := got[i]
+ sort.Sort(byStackVar(inp))
+ if !reflect.DeepEqual(want, inp) {
+ t.Error("sort failed")
+ for i := range inp {
+ g := inp[i]
w := want[i]
eq := reflect.DeepEqual(w, g)
if !eq {
// Add missing successor edges to the selectgo blocks.
if len(selectgo) != 0 {
- fixselectgo([]*BasicBlock(selectgo))
+ fixselectgo(selectgo)
}
// Find a depth-first order and assign a depth-first number to
}
fmt.Printf("\n")
- printvars("\tuevar", bb.uevar, []*Node(lv.vars))
- printvars("\tvarkill", bb.varkill, []*Node(lv.vars))
- printvars("\tlivein", bb.livein, []*Node(lv.vars))
- printvars("\tliveout", bb.liveout, []*Node(lv.vars))
- printvars("\tavarinit", bb.avarinit, []*Node(lv.vars))
- printvars("\tavarinitany", bb.avarinitany, []*Node(lv.vars))
- printvars("\tavarinitall", bb.avarinitall, []*Node(lv.vars))
+ printvars("\tuevar", bb.uevar, lv.vars)
+ printvars("\tvarkill", bb.varkill, lv.vars)
+ printvars("\tlivein", bb.livein, lv.vars)
+ printvars("\tliveout", bb.liveout, lv.vars)
+ printvars("\tavarinit", bb.avarinit, lv.vars)
+ printvars("\tavarinitany", bb.avarinitany, lv.vars)
+ printvars("\tavarinitall", bb.avarinitall, lv.vars)
fmt.Printf("\tprog:\n")
for prog := bb.first; ; prog = prog.Link {
// If the receiver or arguments are unnamed, they will be omitted
// from the list above. Preserve those values - even though they are unused -
// in order to keep their addresses live for use in stack traces.
- thisargtype := getthisx(lv.fn.Type)
+ thisargtype := lv.fn.Type.Recv()
if thisargtype != nil {
xoffset = 0
onebitwalktype1(thisargtype, &xoffset, args)
}
- inargtype := getinargx(lv.fn.Type)
+ inargtype := lv.fn.Type.Params()
if inargtype != nil {
xoffset = 0
onebitwalktype1(inargtype, &xoffset, args)
}
// Construct a disembodied instruction.
-func unlinkedprog(as int) *obj.Prog {
+func unlinkedprog(as obj.As) *obj.Prog {
p := Ctxt.NewProg()
Clearp(p)
- p.As = int16(as)
+ p.As = as
return p
}
// Walk the block instructions backward and update the block
// effects with the each prog effects.
for p := bb.last; p != nil; p = p.Opt.(*obj.Prog) {
- progeffects(p, []*Node(lv.vars), uevar, varkill, avarinit)
+ progeffects(p, lv.vars, uevar, varkill, avarinit)
if debuglive >= 3 {
printeffects(p, uevar, varkill, avarinit)
}
bvresetall(varkill)
for p := bb.first; ; p = p.Link {
- progeffects(p, []*Node(lv.vars), uevar, varkill, avarinit)
+ progeffects(p, lv.vars, uevar, varkill, avarinit)
if debuglive >= 3 {
printeffects(p, uevar, varkill, avarinit)
}
// allocate liveness maps for those instructions that need them.
// Seed the maps with information about the addrtaken variables.
for p = bb.first; ; p = p.Link {
- progeffects(p, []*Node(lv.vars), uevar, varkill, avarinit)
+ progeffects(p, lv.vars, uevar, varkill, avarinit)
bvandnot(any, any, varkill)
bvandnot(all, all, varkill)
bvor(any, any, avarinit)
if !n.Name.Needzero {
n.Name.Needzero = true
if debuglive >= 1 {
- Warnl(int(p.Lineno), "%v: %v is ambiguously live", Curfn.Func.Nname, Nconv(n, obj.FmtLong))
+ Warnl(p.Lineno, "%v: %v is ambiguously live", Curfn.Func.Nname, Nconv(n, obj.FmtLong))
}
// Record in 'ambiguous' bitmap.
}
n = lv.vars[j]
if n.Class != PPARAM {
- yyerrorl(int(p.Lineno), "internal error: %v %v recorded as live on entry, p.Pc=%v", Curfn.Func.Nname, Nconv(n, obj.FmtLong), p.Pc)
+ yyerrorl(p.Lineno, "internal error: %v %v recorded as live on entry, p.Pc=%v", Curfn.Func.Nname, Nconv(n, obj.FmtLong), p.Pc)
}
}
}
cfg := newcfg(firstp)
if debuglive >= 3 {
- printcfg([]*BasicBlock(cfg))
+ printcfg(cfg)
}
vars := getvariables(fn)
lv := newliveness(fn, firstp, cfg, vars)
}
freeliveness(lv)
- freecfg([]*BasicBlock(cfg))
+ freecfg(cfg)
debuglive -= debugdelta
}
ncheck++
if Thearch.Stackaddr(&p.From) {
if Debug_checknil != 0 && p.Lineno > 1 {
- Warnl(int(p.Lineno), "removed nil check of SP address")
+ Warnl(p.Lineno, "removed nil check of SP address")
}
f.Data = &killed
continue
nilwalkfwd(f)
if f.Data != nil {
if Debug_checknil != 0 && p.Lineno > 1 {
- Warnl(int(p.Lineno), "removed nil check before indirect")
+ Warnl(p.Lineno, "removed nil check before indirect")
}
continue
}
nilwalkback(f)
if f.Data != nil {
if Debug_checknil != 0 && p.Lineno > 1 {
- Warnl(int(p.Lineno), "removed repeated nil check")
+ Warnl(p.Lineno, "removed repeated nil check")
}
continue
}
}
if flag_race == 0 || !ispkgin(norace_inst_pkgs) {
- instrumentslice(fn.Nbody.Slice(), nil)
+ instrumentlist(fn.Nbody, nil)
// nothing interesting for race detector in fn->enter
- instrumentslice(fn.Func.Exit.Slice(), nil)
+ instrumentlist(fn.Func.Exit, nil)
}
if flag_race != 0 {
if Debug['W'] != 0 {
s := fmt.Sprintf("after instrument %v", fn.Func.Nname.Sym)
- dumpslice(s, fn.Nbody.Slice())
+ dumplist(s, fn.Nbody)
s = fmt.Sprintf("enter %v", fn.Func.Nname.Sym)
- dumpslice(s, fn.Func.Enter.Slice())
+ dumplist(s, fn.Func.Enter)
s = fmt.Sprintf("exit %v", fn.Func.Nname.Sym)
- dumpslice(s, fn.Func.Exit.Slice())
+ dumplist(s, fn.Func.Exit)
}
}
-func instrumentlist(l *NodeList, init **NodeList) {
- var instr *NodeList
-
- for ; l != nil; l = l.Next {
- instr = nil
- instrumentnode(&l.N, &instr, 0, 0)
+func instrumentlist(l Nodes, init *Nodes) {
+ for i := range l.Slice() {
+ var instr Nodes
+ instrumentnode(&l.Slice()[i], &instr, 0, 0)
if init == nil {
- l.N.Ninit = concat(l.N.Ninit, instr)
+ l.Slice()[i].Ninit.AppendNodes(&instr)
} else {
- *init = concat(*init, instr)
- }
- }
-}
-
-func instrumentslice(l []*Node, init **NodeList) {
- for i := range l {
- var instr *NodeList
- instrumentnode(&l[i], &instr, 0, 0)
- if init == nil {
- l[i].Ninit = concat(l[i].Ninit, instr)
- } else {
- *init = concat(*init, instr)
+ init.AppendNodes(&instr)
}
}
}
// walkexpr and walkstmt combined
// walks the tree and adds calls to the
// instrumentation code to top-level (statement) nodes' init
-func instrumentnode(np **Node, init **NodeList, wr int, skip int) {
+func instrumentnode(np **Node, init *Nodes, wr int, skip int) {
n := *np
if n == nil {
// nil it out and handle it separately before putting it back.
l := n.Ninit
- n.Ninit = nil
+ n.Ninit.Set(nil)
instrumentlist(l, nil)
instrumentnode(&n, &l, wr, skip) // recurse with nil n->ninit
appendinit(&n, l)
switch n.Op {
default:
- Fatalf("instrument: unknown node type %v", Oconv(int(n.Op), 0))
+ Fatalf("instrument: unknown node type %v", Oconv(n.Op, 0))
case OAS, OASWB, OAS2FUNC:
instrumentnode(&n.Left, init, 1, 0)
goto ret
case OBLOCK:
- var out *NodeList
- for l := n.List; l != nil; l = l.Next {
- switch l.N.Op {
+ var out []*Node
+ for it := nodeSeqIterate(n.List); !it.Done(); it.Next() {
+ switch it.N().Op {
case OCALLFUNC, OCALLMETH, OCALLINTER:
- instrumentnode(&l.N, &l.N.Ninit, 0, 0)
- out = list(out, l.N)
+ instrumentnode(it.P(), &it.N().Ninit, 0, 0)
+ out = append(out, it.N())
// Scan past OAS nodes copying results off stack.
// Those must not be instrumented, because the
// instrumentation calls will smash the results.
// The assignments are to temporaries, so they cannot
// be involved in races and need not be instrumented.
- for l.Next != nil && l.Next.N.Op == OAS && iscallret(l.Next.N.Right) {
- l = l.Next
- out = list(out, l.N)
+ for it.Len() > 1 && nodeSeqSecond(it.Seq()).Op == OAS && iscallret(nodeSeqSecond(it.Seq()).Right) {
+ it.Next()
+ out = append(out, it.N())
}
default:
- instrumentnode(&l.N, &out, 0, 0)
- out = list(out, l.N)
+ var outn Nodes
+ outn.Set(out)
+ instrumentnode(it.P(), &outn, 0, 0)
+ out = append(outn.Slice(), it.N())
}
}
- n.List = out
+ n.List.Set(out)
goto ret
case ODEFER:
OAS2RECV,
OAS2MAPR,
OASOP:
- Yyerror("instrument: %v must be lowered by now", Oconv(int(n.Op), 0))
+ Yyerror("instrument: %v must be lowered by now", Oconv(n.Op, 0))
goto ret
// impossible nodes: only appear in backend.
case ORROTC, OEXTEND:
- Yyerror("instrument: %v cannot exist now", Oconv(int(n.Op), 0))
+ Yyerror("instrument: %v cannot exist now", Oconv(n.Op, 0))
goto ret
case OGETG:
if n.Op != OBLOCK { // OBLOCK is handled above in a special way.
instrumentlist(n.List, init)
}
- instrumentslice(n.Nbody.Slice(), nil)
+ instrumentlist(n.Nbody, nil)
instrumentlist(n.Rlist, nil)
*np = n
}
return false
}
-func callinstr(np **Node, init **NodeList, wr int, skip int) bool {
+func callinstr(np **Node, init *Nodes, wr int, skip int) bool {
n := *np
//print("callinstr for %+N [ %O ] etype=%E class=%d\n",
f = mkcall(name, nil, init, uintptraddr(n))
}
- *init = list(*init, f)
+ init.Append(f)
return true
}
return r
}
-func detachexpr(n *Node, init **NodeList) *Node {
+func detachexpr(n *Node, init *Nodes) *Node {
addr := Nod(OADDR, n, nil)
l := temp(Ptrto(n.Type))
as := Nod(OAS, l, addr)
typecheck(&as, Etop)
walkexpr(&as, init)
- *init = list(*init, as)
+ init.Append(as)
ind := Nod(OIND, l, nil)
typecheck(&ind, Erv)
walkexpr(&ind, init)
}
}
-func foreachlist(l *NodeList, f func(*Node, interface{}), c interface{}) {
- for ; l != nil; l = l.Next {
- foreachnode(l.N, f, c)
- }
-}
-
-func foreachslice(l []*Node, f func(*Node, interface{}), c interface{}) {
- for _, n := range l {
+func foreachlist(l Nodes, f func(*Node, interface{}), c interface{}) {
+ for _, n := range l.Slice() {
foreachnode(n, f, c)
}
}
foreachnode(n.Left, f, c)
foreachnode(n.Right, f, c)
foreachlist(n.List, f, c)
- foreachslice(n.Nbody.Slice(), f, c)
+ foreachlist(n.Nbody, f, c)
foreachlist(n.Rlist, f, c)
}
// appendinit is like addinit in subr.go
// but appends rather than prepends.
-func appendinit(np **Node, init *NodeList) {
- if init == nil {
+func appendinit(np **Node, init Nodes) {
+ if init.Len() == 0 {
return
}
*np = n
}
- n.Ninit = concat(n.Ninit, init)
+ n.Ninit.AppendNodes(&init)
n.Ullman = UINF
}
if t == nil {
goto out
}
-
// delicate little dance. see typecheckas2
- for ll := n.List; ll != nil; ll = ll.Next {
- if ll.N.Name == nil || ll.N.Name.Defn != n {
- typecheck(&ll.N, Erv|Easgn)
+ for i1, n1 := range n.List.Slice() {
+ if n1.Name == nil || n1.Name.Defn != n {
+ typecheck(&n.List.Slice()[i1], Erv|Easgn)
}
}
t1 = t.Type
t2 = nil
- if count(n.List) == 2 {
+ if n.List.Len() == 2 {
toomany = 1
}
t2 = runetype
}
- if count(n.List) > 2 || toomany != 0 {
+ if n.List.Len() > 2 || toomany != 0 {
Yyerror("too many variables in range")
}
v1 = nil
- if n.List != nil {
- v1 = n.List.N
+ if n.List.Len() != 0 {
+ v1 = n.List.First()
}
v2 = nil
- if n.List != nil && n.List.Next != nil {
- v2 = n.List.Next.N
+ if n.List.Len() > 1 {
+ v2 = n.List.Second()
}
// this is not only a optimization but also a requirement in the spec.
// present."
if isblank(v2) {
if v1 != nil {
- n.List = list1(v1)
+ n.List.Set([]*Node{v1})
}
v2 = nil
}
// second half of dance
out:
n.Typecheck = 1
-
- for ll := n.List; ll != nil; ll = ll.Next {
- if ll.N.Typecheck == 0 {
- typecheck(&ll.N, Erv|Easgn)
+ for i2, n2 := range n.List.Slice() {
+ if n2.Typecheck == 0 {
+ typecheck(&n.List.Slice()[i2], Erv|Easgn)
}
}
decldepth++
- typecheckslice(n.Nbody.Slice(), Etop)
+ typechecklist(n.Nbody.Slice(), Etop)
decldepth--
}
t := n.Type
a := n.Right
- lno := int(setlineno(a))
+ lno := setlineno(a)
n.Right = nil
var v1 *Node
- if n.List != nil {
- v1 = n.List.N
+ if n.List.Len() != 0 {
+ v1 = n.List.First()
}
var v2 *Node
- if n.List != nil && n.List.Next != nil && !isblank(n.List.Next.N) {
- v2 = n.List.Next.N
+ if n.List.Len() > 1 && !isblank(n.List.Second()) {
+ v2 = n.List.Second()
}
// n->list has no meaning anymore, clear it
// to avoid erroneous processing by racewalk.
- n.List = nil
+ n.List.Set(nil)
var body []*Node
- var init *NodeList
+ var init []*Node
switch t.Etype {
default:
Fatalf("walkrange")
case TARRAY:
if memclrrange(n, v1, v2, a) {
- lineno = int32(lno)
+ lineno = lno
return
}
hn := temp(Types[TINT])
var hp *Node
- init = list(init, Nod(OAS, hv1, nil))
- init = list(init, Nod(OAS, hn, Nod(OLEN, ha, nil)))
+ init = append(init, Nod(OAS, hv1, nil))
+ init = append(init, Nod(OAS, hn, Nod(OLEN, ha, nil)))
if v2 != nil {
hp = temp(Ptrto(n.Type.Type))
tmp := Nod(OINDEX, ha, Nodintconst(0))
tmp.Bounded = true
- init = list(init, Nod(OAS, hp, Nod(OADDR, tmp, nil)))
+ init = append(init, Nod(OAS, hp, Nod(OADDR, tmp, nil)))
}
n.Left = Nod(OLT, hv1, hn)
body = []*Node{Nod(OAS, v1, hv1)}
} else {
a := Nod(OAS2, nil, nil)
- a.List = list(list1(v1), v2)
- a.Rlist = list(list1(hv1), Nod(OIND, hp, nil))
+ a.List.Set([]*Node{v1, v2})
+ a.Rlist.Set([]*Node{hv1, Nod(OIND, hp, nil)})
body = []*Node{a}
// Advance pointer as part of increment.
tmp.Right.Typecheck = 1
a = Nod(OAS, hp, tmp)
typecheck(&a, Etop)
- n.Right.Ninit = list1(a)
+ n.Right.Ninit.Set([]*Node{a})
}
// orderstmt allocated the iterator for us.
keyname := newname(th.Type.Sym) // depends on layout of iterator struct. See reflect.go:hiter
valname := newname(th.Type.Down.Sym) // ditto
- fn := syslook("mapiterinit", 1)
+ fn := syslook("mapiterinit")
- substArgTypes(fn, t.Down, t.Type, th)
- init = list(init, mkcall1(fn, nil, nil, typename(t), ha, Nod(OADDR, hit, nil)))
+ substArgTypes(&fn, t.Down, t.Type, th)
+ init = append(init, mkcall1(fn, nil, nil, typename(t), ha, Nod(OADDR, hit, nil)))
n.Left = Nod(ONE, Nod(ODOT, hit, keyname), nodnil())
- fn = syslook("mapiternext", 1)
- substArgTypes(fn, th)
+ fn = syslook("mapiternext")
+ substArgTypes(&fn, th)
n.Right = mkcall1(fn, nil, nil, Nod(OADDR, hit, nil))
key := Nod(ODOT, hit, keyname)
val := Nod(ODOT, hit, valname)
val = Nod(OIND, val, nil)
a := Nod(OAS2, nil, nil)
- a.List = list(list1(v1), v2)
- a.Rlist = list(list1(key), val)
+ a.List.Set([]*Node{v1, v2})
+ a.Rlist.Set([]*Node{key, val})
body = []*Node{a}
}
hv1 := temp(t.Type)
hv1.Typecheck = 1
if haspointers(t.Type) {
- init = list(init, Nod(OAS, hv1, nil))
+ init = append(init, Nod(OAS, hv1, nil))
}
hb := temp(Types[TBOOL])
n.Left = Nod(ONE, hb, Nodbool(false))
a := Nod(OAS2RECV, nil, nil)
a.Typecheck = 1
- a.List = list(list1(hv1), hb)
- a.Rlist = list1(Nod(ORECV, ha, nil))
- n.Left.Ninit = list1(a)
+ a.List.Set([]*Node{hv1, hb})
+ a.Rlist.Set([]*Node{Nod(ORECV, ha, nil)})
+ n.Left.Ninit.Set([]*Node{a})
if v1 == nil {
body = nil
} else {
ohv1 := temp(Types[TINT])
hv1 := temp(Types[TINT])
- init = list(init, Nod(OAS, hv1, nil))
+ init = append(init, Nod(OAS, hv1, nil))
var a *Node
var hv2 *Node
} else {
hv2 = temp(runetype)
a = Nod(OAS2, nil, nil)
- a.List = list(list1(hv1), hv2)
- fn := syslook("stringiter2", 0)
- a.Rlist = list1(mkcall1(fn, getoutargx(fn.Type), nil, ha, hv1))
+ a.List.Set([]*Node{hv1, hv2})
+ fn := syslook("stringiter2")
+ a.Rlist.Set([]*Node{mkcall1(fn, fn.Type.Results(), nil, ha, hv1)})
}
n.Left = Nod(ONE, hv1, Nodintconst(0))
- n.Left.Ninit = list(list1(Nod(OAS, ohv1, hv1)), a)
+ n.Left.Ninit.Set([]*Node{Nod(OAS, ohv1, hv1), a})
body = nil
if v1 != nil {
n.Op = OFOR
typechecklist(init, Etop)
- n.Ninit = concat(n.Ninit, init)
- typechecklist(n.Left.Ninit, Etop)
+ n.Ninit.Append(init...)
+ typechecklist(n.Left.Ninit.Slice(), Etop)
typecheck(&n.Left, Erv)
typecheck(&n.Right, Etop)
typecheckslice(body, Etop)
n.Nbody.Set(append(body, n.Nbody.Slice()...))
walkstmt(&n)
- lineno = int32(lno)
+ lineno = lno
}
// Lower n into runtime·memclr if possible, for
n.Nbody.Append(v1)
typecheck(&n.Left, Erv)
- typecheckslice(n.Nbody.Slice(), Etop)
+ typechecklist(n.Nbody.Slice(), Etop)
walkstmt(&n)
return true
}
"fmt"
"os"
"sort"
+ "strings"
)
// runtime interface and reflection data structures
MAXVALSIZE = 128
)
+func structfieldSize() int { return 5 * Widthptr } // Sizeof(runtime.structfield{})
+func imethodSize() int { return 3 * Widthptr } // Sizeof(runtime.imethod{})
+func uncommonSize(t *Type) int { // Sizeof(runtime.uncommontype{})
+ if t.Sym == nil && len(methods(t)) == 0 {
+ return 0
+ }
+ return 2*Widthptr + 2*Widthint
+}
+
func makefield(name string, t *Type) *Type {
f := typ(TFIELD)
f.Type = t
// f is method type, with receiver.
// return function type, receiver as first argument (or not).
func methodfunc(f *Type, receiver *Type) *Type {
- var in *NodeList
+ var in []*Node
if receiver != nil {
d := Nod(ODCLFIELD, nil, nil)
d.Type = receiver
- in = list(in, d)
+ in = append(in, d)
}
var d *Node
- for t := getinargx(f).Type; t != nil; t = t.Down {
+ for t, it := IterFields(f.Params()); t != nil; t = it.Next() {
d = Nod(ODCLFIELD, nil, nil)
d.Type = t.Type
d.Isddd = t.Isddd
- in = list(in, d)
+ in = append(in, d)
}
- var out *NodeList
- for t := getoutargx(f).Type; t != nil; t = t.Down {
+ var out []*Node
+ for t, it := IterFields(f.Results()); t != nil; t = it.Next() {
d = Nod(ODCLFIELD, nil, nil)
d.Type = t.Type
- out = list(out, d)
+ out = append(out, d)
}
t := functype(nil, in, out)
if f.Type.Etype != TFUNC || f.Type.Thistuple == 0 {
Fatalf("non-method on %v method %v %v\n", mt, f.Sym, f)
}
- if getthisx(f.Type).Type == nil {
+ if f.Type.Recv().Type == nil {
Fatalf("receiver with no type on %v method %v %v\n", mt, f.Sym, f)
}
if f.Nointerface {
// if pointer receiver but non-pointer t and
// this is not an embedded pointer inside a struct,
// method does not apply.
- this := getthisx(f.Type).Type.Type
+ this := f.Type.Recv().Type.Type
if Isptr[this.Etype] && this.Type == t {
continue
// imethods returns the methods of the interface type t, sorted by name.
func imethods(t *Type) []*Sig {
var methods []*Sig
- for f := t.Type; f != nil; f = f.Down {
+ for f, it := IterFields(t); f != nil; f = it.Next() {
if f.Etype != TFIELD {
Fatalf("imethods: not field")
}
return dsymptr(s, ot, pkg.Pathsym, 0)
}
-// uncommonType
-// ../../../../runtime/type.go:/uncommonType
-func dextratype(sym *Sym, off int, t *Type, ptroff int) int {
+// dextratype dumps the fields of a runtime.uncommontype.
+// dataAdd is the offset in bytes after the header where the
+// backing array of the []method field is written (by dextratypeData).
+func dextratype(sym *Sym, off int, t *Type, dataAdd int) int {
m := methods(t)
if t.Sym == nil && len(m) == 0 {
return off
}
-
- // fill in *extraType pointer in header
- off = int(Rnd(int64(off), int64(Widthptr)))
-
- dsymptr(sym, ptroff, sym, off)
+ noff := int(Rnd(int64(off), int64(Widthptr)))
+ if noff != off {
+ panic("dextratype rounding does something. :-(")
+ }
+ off = noff
for _, a := range m {
dtypesym(a.type_)
}
// slice header
- ot = dsymptr(s, ot, s, ot+Widthptr+2*Widthint)
+ ot = dsymptr(s, ot, s, ot+Widthptr+2*Widthint+dataAdd)
n := len(m)
ot = duintxx(s, ot, uint64(n), Widthint)
ot = duintxx(s, ot, uint64(n), Widthint)
- // methods
- for _, a := range m {
+ return ot
+}
+
+// dextratypeData dumps the backing array for the []method field of
+// runtime.uncommontype.
+func dextratypeData(s *Sym, ot int, t *Type) int {
+ for _, a := range methods(t) {
// method
// ../../../../runtime/type.go:/method
ot = dgostringptr(s, ot, a.name)
ot = duintptr(s, ot, 0)
}
}
-
return ot
}
case TSTRUCT:
ret = false
- for t1 := t.Type; t1 != nil; t1 = t1.Down {
+ for t1, it := IterFields(t); t1 != nil; t1 = it.Next() {
if haspointers(t1.Type) {
ret = true
break
case TSTRUCT:
// Find the last field that has pointers.
var lastPtrField *Type
- for t1 := t.Type; t1 != nil; t1 = t1.Down {
+ for t1, it := IterFields(t); t1 != nil; t1 = it.Next() {
if haspointers(t1.Type) {
lastPtrField = t1
}
}
}
+// tflag is documented in ../../../../reflect/type.go.
+const tflagUncommon = 1
+
// commonType
-// ../../runtime/type.go:/commonType
+// ../../../../runtime/type.go:/commonType
var dcommontype_algarray *Sym
// size uintptr
// ptrdata uintptr
// hash uint32
- // _ uint8
+ // tflag tflag
// align uint8
// fieldAlign uint8
// kind uint8
// alg *typeAlg
// gcdata *byte
// string *string
- // *uncommonType
// }
ot = duintptr(s, ot, uint64(t.Width))
ot = duintptr(s, ot, uint64(ptrdata))
ot = duint32(s, ot, typehash(t))
- ot = duint8(s, ot, 0) // unused
+
+ var tflag uint8
+ if uncommonSize(t) != 0 {
+ tflag |= tflagUncommon
+ }
+ ot = duint8(s, ot, tflag)
// runtime (and common sense) expects alignment to be a power of two.
i := int(t.Align)
p := Tconv(t, obj.FmtLeft|obj.FmtUnsigned)
+ // If we're writing out type T,
+ // we are very likely to write out type *T as well.
+ // Use the string "*T"[1:] for "T", so that the two
+ // share storage. This is a cheap way to reduce the
+ // amount of space taken up by reflect strings.
+ prefix := 0
+ if !strings.HasPrefix(p, "*") {
+ p = "*" + p
+ prefix = 1
+ }
_, symdata := stringsym(p) // string
- ot = dsymptr(s, ot, symdata, 0)
- ot = duintxx(s, ot, uint64(len(p)), Widthint)
- //fmt.Printf("dcommontype: %s\n", p)
-
- // skip pointer to extraType,
- // which follows the rest of this type structure.
- // caller will fill in if needed.
- // otherwise linker will assume 0.
- ot += Widthptr
+ ot = dsymptr(s, ot, symdata, prefix)
+ ot = duintxx(s, ot, uint64(len(p)-prefix), Widthint)
return ot
}
return n
}
-func weaktypesym(t *Type) *Sym {
- p := Tconv(t, obj.FmtLeft)
- s := Pkglookup(p, weaktypepkg)
-
- //print("weaktypesym: %s -> %+S\n", p, s);
-
- return s
-}
-
// isreflexive reports whether t has a reflexive equality operator.
// That is, if x==x for all x of type t.
func isreflexive(t *Type) bool {
return isreflexive(t.Type)
case TSTRUCT:
- for t1 := t.Type; t1 != nil; t1 = t1.Down {
+ for t1, it := IterFields(t); t1 != nil; t1 = it.Next() {
if !isreflexive(t1.Type) {
return false
}
return needkeyupdate(t.Type)
case TSTRUCT:
- for t1 := t.Type; t1 != nil; t1 = t1.Down {
+ for t1, it := IterFields(t); t1 != nil; t1 = it.Next() {
if needkeyupdate(t1.Type) {
return true
}
ok:
ot := 0
- xt := 0
switch t.Etype {
default:
ot = dcommontype(s, ot, t)
- xt = ot - 1*Widthptr
+ ot = dextratype(s, ot, t, 0)
case TARRAY:
if t.Bound >= 0 {
t2.Bound = -1 // slice
s2 := dtypesym(t2)
ot = dcommontype(s, ot, t)
- xt = ot - 1*Widthptr
ot = dsymptr(s, ot, s1, 0)
ot = dsymptr(s, ot, s2, 0)
ot = duintptr(s, ot, uint64(t.Bound))
s1 := dtypesym(t.Type)
ot = dcommontype(s, ot, t)
- xt = ot - 1*Widthptr
ot = dsymptr(s, ot, s1, 0)
}
+ ot = dextratype(s, ot, t, 0)
// ../../../../runtime/type.go:/chanType
case TCHAN:
s1 := dtypesym(t.Type)
ot = dcommontype(s, ot, t)
- xt = ot - 1*Widthptr
ot = dsymptr(s, ot, s1, 0)
ot = duintptr(s, ot, uint64(t.Chan))
+ ot = dextratype(s, ot, t, 0)
case TFUNC:
- for t1 := getthisx(t).Type; t1 != nil; t1 = t1.Down {
+ for t1, it := IterFields(t.Recv()); t1 != nil; t1 = it.Next() {
dtypesym(t1.Type)
}
isddd := false
- for t1 := getinargx(t).Type; t1 != nil; t1 = t1.Down {
+ for t1, it := IterFields(t.Params()); t1 != nil; t1 = it.Next() {
isddd = t1.Isddd
dtypesym(t1.Type)
}
-
- for t1 := getoutargx(t).Type; t1 != nil; t1 = t1.Down {
+ for t1, it := IterFields(t.Results()); t1 != nil; t1 = it.Next() {
dtypesym(t1.Type)
}
ot = dcommontype(s, ot, t)
- xt = ot - 1*Widthptr
- ot = duint8(s, ot, uint8(obj.Bool2int(isddd)))
+ inCount := t.Thistuple + t.Intuple
+ outCount := t.Outtuple
+ if isddd {
+ outCount |= 1 << 15
+ }
+ ot = duint16(s, ot, uint16(inCount))
+ ot = duint16(s, ot, uint16(outCount))
+ if Widthptr == 8 {
+ ot += 4 // align for *rtype
+ }
- // two slice headers: in and out.
- ot = int(Rnd(int64(ot), int64(Widthptr)))
+ dataAdd := (inCount + outCount) * Widthptr
+ ot = dextratype(s, ot, t, dataAdd)
- ot = dsymptr(s, ot, s, ot+2*(Widthptr+2*Widthint))
- n := t.Thistuple + t.Intuple
- ot = duintxx(s, ot, uint64(n), Widthint)
- ot = duintxx(s, ot, uint64(n), Widthint)
- ot = dsymptr(s, ot, s, ot+1*(Widthptr+2*Widthint)+n*Widthptr)
- ot = duintxx(s, ot, uint64(t.Outtuple), Widthint)
- ot = duintxx(s, ot, uint64(t.Outtuple), Widthint)
-
- // slice data
- for t1 := getthisx(t).Type; t1 != nil; t1 = t1.Down {
+ // Array of rtype pointers follows funcType.
+ for t1, it := IterFields(t.Recv()); t1 != nil; t1 = it.Next() {
ot = dsymptr(s, ot, dtypesym(t1.Type), 0)
- n++
}
- for t1 := getinargx(t).Type; t1 != nil; t1 = t1.Down {
+ for t1, it := IterFields(t.Params()); t1 != nil; t1 = it.Next() {
ot = dsymptr(s, ot, dtypesym(t1.Type), 0)
- n++
}
- for t1 := getoutargx(t).Type; t1 != nil; t1 = t1.Down {
+ for t1, it := IterFields(t.Results()); t1 != nil; t1 = it.Next() {
ot = dsymptr(s, ot, dtypesym(t1.Type), 0)
- n++
}
case TINTER:
// ../../../../runtime/type.go:/interfaceType
ot = dcommontype(s, ot, t)
- xt = ot - 1*Widthptr
- ot = dsymptr(s, ot, s, ot+Widthptr+2*Widthint)
+ ot = dsymptr(s, ot, s, ot+Widthptr+2*Widthint+uncommonSize(t))
ot = duintxx(s, ot, uint64(n), Widthint)
ot = duintxx(s, ot, uint64(n), Widthint)
+ dataAdd := imethodSize() * n
+ ot = dextratype(s, ot, t, dataAdd)
+
for _, a := range m {
// ../../../../runtime/type.go:/imethod
ot = dgostringptr(s, ot, a.name)
-
ot = dgopkgpath(s, ot, a.pkg)
ot = dsymptr(s, ot, dtypesym(a.type_), 0)
}
s3 := dtypesym(mapbucket(t))
s4 := dtypesym(hmap(t))
ot = dcommontype(s, ot, t)
- xt = ot - 1*Widthptr
ot = dsymptr(s, ot, s1, 0)
ot = dsymptr(s, ot, s2, 0)
ot = dsymptr(s, ot, s3, 0)
ot = duint16(s, ot, uint16(mapbucket(t).Width))
ot = duint8(s, ot, uint8(obj.Bool2int(isreflexive(t.Down))))
ot = duint8(s, ot, uint8(obj.Bool2int(needkeyupdate(t.Down))))
+ ot = dextratype(s, ot, t, 0)
case TPTR32, TPTR64:
if t.Type.Etype == TANY {
// ../../../../runtime/type.go:/UnsafePointerType
ot = dcommontype(s, ot, t)
+ ot = dextratype(s, ot, t, 0)
break
}
s1 := dtypesym(t.Type)
ot = dcommontype(s, ot, t)
- xt = ot - 1*Widthptr
ot = dsymptr(s, ot, s1, 0)
+ ot = dextratype(s, ot, t, 0)
// ../../../../runtime/type.go:/structType
// for security, only the exported fields.
case TSTRUCT:
n := 0
- for t1 := t.Type; t1 != nil; t1 = t1.Down {
+ for t1, it := IterFields(t); t1 != nil; t1 = it.Next() {
dtypesym(t1.Type)
n++
}
ot = dcommontype(s, ot, t)
- xt = ot - 1*Widthptr
- ot = dsymptr(s, ot, s, ot+Widthptr+2*Widthint)
+ ot = dsymptr(s, ot, s, ot+Widthptr+2*Widthint+uncommonSize(t))
ot = duintxx(s, ot, uint64(n), Widthint)
ot = duintxx(s, ot, uint64(n), Widthint)
- for t1 := t.Type; t1 != nil; t1 = t1.Down {
+
+ dataAdd := n * structfieldSize()
+ ot = dextratype(s, ot, t, dataAdd)
+
+ for t1, it := IterFields(t); t1 != nil; t1 = it.Next() {
// ../../../../runtime/type.go:/structField
if t1.Sym != nil && t1.Embedded == 0 {
ot = dgostringptr(s, ot, t1.Sym.Name)
}
}
- ot = dextratype(s, ot, t, xt)
+ ot = dextratypeData(s, ot, t)
ggloblsym(s, int32(ot), int16(dupok|obj.RODATA))
// generate typelink.foo pointing at s = type.foo.
// The latter is the type of an auto-generated wrapper.
dtypesym(Ptrto(errortype))
- dtypesym(functype(nil, list1(Nod(ODCLFIELD, nil, typenod(errortype))), list1(Nod(ODCLFIELD, nil, typenod(Types[TSTRING])))))
+ dtypesym(functype(nil, []*Node{Nod(ODCLFIELD, nil, typenod(errortype))}, []*Node{Nod(ODCLFIELD, nil, typenod(Types[TSTRING]))}))
// add paths for runtime and main, which 6l imports implicitly.
dimportpath(Runtimepkg)
p.w.Repeat(elem.Width/int64(Widthptr), count-1)
case TSTRUCT:
- for t1 := t.Type; t1 != nil; t1 = t1.Down {
+ for t1, it := IterFields(t); t1 != nil; t1 = it.Next() {
p.emit(t1.Type, offset+t1.Width)
}
}
else if(a->sym == nil)
a->type = TYPE_CONST;
*/
- p1.As = int16(Thearch.Optoas(OAS, Types[uint8(v.etype)]))
+ p1.As = Thearch.Optoas(OAS, Types[uint8(v.etype)])
// TODO(rsc): Remove special case here.
if (Thearch.Thechar == '0' || Thearch.Thechar == '5' || Thearch.Thechar == '7' || Thearch.Thechar == '9') && v.etype == TBOOL {
- p1.As = int16(Thearch.Optoas(OAS, Types[TUINT8]))
+ p1.As = Thearch.Optoas(OAS, Types[TUINT8])
}
p1.From.Type = obj.TYPE_REG
p1.From.Reg = int16(rn)
} else {
fmt.Fprintf(&buf, "%s(%d)", v.node.Sym.Name, i)
if v.offset != 0 {
- fmt.Fprintf(&buf, "%+d", int64(v.offset))
+ fmt.Fprintf(&buf, "%+d", v.offset)
}
}
biclr(&bits, uint(i))
var n *Node
var def *Node
- lno := int(setlineno(sel))
+ lno := setlineno(sel)
count := 0
- typechecklist(sel.Ninit, Etop)
- for l := sel.List; l != nil; l = l.Next {
+ typechecklist(sel.Ninit.Slice(), Etop)
+ for _, n1 := range sel.List.Slice() {
count++
- ncase = l.N
+ ncase = n1
setlineno(ncase)
if ncase.Op != OXCASE {
- Fatalf("typecheckselect %v", Oconv(int(ncase.Op), 0))
+ Fatalf("typecheckselect %v", Oconv(ncase.Op, 0))
}
- if ncase.List == nil {
+ if ncase.List.Len() == 0 {
// default
if def != nil {
Yyerror("multiple defaults in select (first at %v)", def.Line())
} else {
def = ncase
}
- } else if ncase.List.Next != nil {
+ } else if ncase.List.Len() > 1 {
Yyerror("select cases cannot be lists")
} else {
- n = typecheck(&ncase.List.N, Etop)
+ it2 := nodeSeqIterate(ncase.List)
+ n = typecheck(it2.P(), Etop)
ncase.Left = n
- ncase.List = nil
+ ncase.List.Set(nil)
setlineno(n)
switch n.Op {
default:
// convert x, ok = <-c into OSELRECV2(x, <-c) with ntest=ok
case OAS2RECV:
- if n.Rlist.N.Op != ORECV {
+ if n.Rlist.First().Op != ORECV {
Yyerror("select assignment must have receive on right hand side")
break
}
n.Op = OSELRECV2
- n.Left = n.List.N
- n.List = list1(n.List.Next.N)
- n.Right = n.Rlist.N
- n.Rlist = nil
+ n.Left = n.List.First()
+ n.List.Set([]*Node{n.List.Second()})
+ n.Right = n.Rlist.First()
+ n.Rlist.Set(nil)
// convert <-c into OSELRECV(N, <-c)
case ORECV:
}
}
- typecheckslice(ncase.Nbody.Slice(), Etop)
+ typechecklist(ncase.Nbody.Slice(), Etop)
}
sel.Xoffset = int64(count)
- lineno = int32(lno)
+ lineno = lno
}
func walkselect(sel *Node) {
- if sel.List == nil && sel.Xoffset != 0 {
+ if sel.List.Len() == 0 && sel.Xoffset != 0 {
Fatalf("double walkselect") // already rewrote
}
- lno := int(setlineno(sel))
- i := count(sel.List)
+ lno := setlineno(sel)
+ i := sel.List.Len()
// optimization: zero-case select
var init []*Node
// TODO(rsc): Reenable optimization once order.go can handle it.
// golang.org/issue/7672.
if i == 1 {
- cas := sel.List.N
+ cas := sel.List.First()
setlineno(cas)
- l := cas.Ninit
+ l := cas.Ninit.Slice()
if cas.Left != nil { // not default:
n := cas.Left
- l = concat(l, n.Ninit)
- n.Ninit = nil
+ l = append(l, n.Ninit.Slice()...)
+ n.Ninit.Set(nil)
var ch *Node
switch n.Op {
default:
- Fatalf("select %v", Oconv(int(n.Op), 0))
+ Fatalf("select %v", Oconv(n.Op, 0))
// ok already
case OSEND:
case OSELRECV, OSELRECV2:
ch = n.Right.Left
- if n.Op == OSELRECV || n.List == nil {
+ if n.Op == OSELRECV || n.List.Len() == 0 {
if n.Left == nil {
n = n.Right
} else {
}
n.Op = OAS2
- n.List = concat(list1(n.Left), n.List)
- n.Rlist = list1(n.Right)
+ n.List.Set(append([]*Node{n.Left}, n.List.Slice()...))
+ n.Rlist.Set([]*Node{n.Right})
n.Right = nil
n.Left = nil
n.Typecheck = 0
a := Nod(OIF, nil, nil)
a.Left = Nod(OEQ, ch, nodnil())
- a.Nbody.Set([]*Node{mkcall("block", nil, &l)})
+ var ln Nodes
+ ln.Set(l)
+ a.Nbody.Set([]*Node{mkcall("block", nil, &ln)})
+ l = ln.Slice()
typecheck(&a, Etop)
- l = list(l, a)
- l = list(l, n)
+ l = append(l, a)
+ l = append(l, n)
}
- s := make([]*Node, 0, count(l))
- for ll := l; ll != nil; ll = ll.Next {
- s = append(s, ll.N)
- }
- s = append(s, cas.Nbody.Slice()...)
- sel.Nbody.Set(s)
+ l = append(l, cas.Nbody.Slice()...)
+ sel.Nbody.Set(l)
goto out
}
// convert case value arguments to addresses.
// this rewrite is used by both the general code and the next optimization.
- for l := sel.List; l != nil; l = l.Next {
- cas = l.N
+ for it := nodeSeqIterate(sel.List); !it.Done(); it.Next() {
+ cas = it.N()
setlineno(cas)
n = cas.Left
if n == nil {
typecheck(&n.Right, Erv)
case OSELRECV, OSELRECV2:
- if n.Op == OSELRECV2 && n.List == nil {
+ if n.Op == OSELRECV2 && n.List.Len() == 0 {
n.Op = OSELRECV
}
if n.Op == OSELRECV2 {
- n.List.N = Nod(OADDR, n.List.N, nil)
- typecheck(&n.List.N, Erv)
+ it := nodeSeqIterate(n.List)
+ *it.P() = Nod(OADDR, it.N(), nil)
+ typecheck(it.P(), Erv)
}
if n.Left == nil {
}
// optimization: two-case select but one is default: single non-blocking op.
- if i == 2 && (sel.List.N.Left == nil || sel.List.Next.N.Left == nil) {
+ if i == 2 && (sel.List.First().Left == nil || sel.List.Second().Left == nil) {
var cas *Node
var dflt *Node
- if sel.List.N.Left == nil {
- cas = sel.List.Next.N
- dflt = sel.List.N
+ if sel.List.First().Left == nil {
+ cas = sel.List.Second()
+ dflt = sel.List.First()
} else {
- dflt = sel.List.Next.N
- cas = sel.List.N
+ dflt = sel.List.Second()
+ cas = nodeSeqFirst(sel.List.Slice())
}
n := cas.Left
setlineno(n)
r := Nod(OIF, nil, nil)
- r.Ninit = cas.Ninit
+ r.Ninit.Set(cas.Ninit.Slice())
switch n.Op {
default:
- Fatalf("select %v", Oconv(int(n.Op), 0))
+ Fatalf("select %v", Oconv(n.Op, 0))
// if selectnbsend(c, v) { body } else { default body }
case OSEND:
case OSELRECV:
r = Nod(OIF, nil, nil)
- r.Ninit = cas.Ninit
+ r.Ninit.Set(cas.Ninit.Slice())
ch := n.Right.Left
r.Left = mkcall1(chanfn("selectnbrecv", 2, ch.Type), Types[TBOOL], &r.Ninit, typename(ch.Type), n.Left, ch)
case OSELRECV2:
r = Nod(OIF, nil, nil)
- r.Ninit = cas.Ninit
+ r.Ninit.Set(cas.Ninit.Slice())
ch := n.Right.Left
- r.Left = mkcall1(chanfn("selectnbrecv2", 2, ch.Type), Types[TBOOL], &r.Ninit, typename(ch.Type), n.Left, n.List.N, ch)
+ r.Left = mkcall1(chanfn("selectnbrecv2", 2, ch.Type), Types[TBOOL], &r.Ninit, typename(ch.Type), n.Left, n.List.First(), ch)
}
typecheck(&r.Left, Erv)
r.Nbody.Set(cas.Nbody.Slice())
- r.Rlist = concat(dflt.Ninit, dflt.Nbody.NodeList())
+ r.Rlist.Set(append(dflt.Ninit.Slice(), dflt.Nbody.Slice()...))
sel.Nbody.Set([]*Node{r})
goto out
}
- init = make([]*Node, 0, count(sel.Ninit))
- for ll := sel.Ninit; ll != nil; ll = ll.Next {
- init = append(init, ll.N)
- }
- sel.Ninit = nil
+ init = sel.Ninit.Slice()
+ sel.Ninit.Set(nil)
// generate sel-struct
setlineno(sel)
r = mkcall("newselect", nil, nil, var_, Nodintconst(selv.Type.Width), Nodintconst(sel.Xoffset))
typecheck(&r, Etop)
init = append(init, r)
-
// register cases
- for l := sel.List; l != nil; l = l.Next {
- cas = l.N
+ for _, cas = range sel.List.Slice() {
setlineno(cas)
n = cas.Left
r = Nod(OIF, nil, nil)
- r.Ninit = cas.Ninit
- cas.Ninit = nil
+ r.Ninit.Set(cas.Ninit.Slice())
+ cas.Ninit.Set(nil)
if n != nil {
- r.Ninit = concat(r.Ninit, n.Ninit)
- n.Ninit = nil
+ r.Ninit.AppendNodes(&n.Ninit)
+ n.Ninit.Set(nil)
}
if n == nil {
} else {
switch n.Op {
default:
- Fatalf("select %v", Oconv(int(n.Op), 0))
+ Fatalf("select %v", Oconv(n.Op, 0))
// selectsend(sel *byte, hchan *chan any, elem *any) (selected bool);
case OSEND:
// selectrecv2(sel *byte, hchan *chan any, elem *any, received *bool) (selected bool);
case OSELRECV2:
- r.Left = mkcall1(chanfn("selectrecv2", 2, n.Right.Left.Type), Types[TBOOL], &r.Ninit, var_, n.Right.Left, n.Left, n.List.N)
+ r.Left = mkcall1(chanfn("selectrecv2", 2, n.Right.Left.Type), Types[TBOOL], &r.Ninit, var_, n.Right.Left, n.Left, n.List.First())
}
}
// selv is no longer alive after use.
r.Nbody.Append(Nod(OVARKILL, selv, nil))
- r.Nbody.Append(cas.Nbody.Slice()...)
+ r.Nbody.AppendNodes(&cas.Nbody)
r.Nbody.Append(Nod(OBREAK, nil, nil))
init = append(init, r)
}
sel.Nbody.Set(init)
out:
- sel.List = nil
- walkstmtslice(sel.Nbody.Slice())
- lineno = int32(lno)
+ sel.List.Set(nil)
+ walkstmtlist(sel.Nbody.Slice())
+ lineno = lno
}
// Keep in sync with src/runtime/runtime2.go and src/runtime/select.go.
// and then cache; and also cache Select per size.
sudog := Nod(OTSTRUCT, nil, nil)
- sudog.List = list(sudog.List, Nod(ODCLFIELD, newname(Lookup("g")), typenod(Ptrto(Types[TUINT8]))))
- sudog.List = list(sudog.List, Nod(ODCLFIELD, newname(Lookup("selectdone")), typenod(Ptrto(Types[TUINT8]))))
- sudog.List = list(sudog.List, Nod(ODCLFIELD, newname(Lookup("next")), typenod(Ptrto(Types[TUINT8]))))
- sudog.List = list(sudog.List, Nod(ODCLFIELD, newname(Lookup("prev")), typenod(Ptrto(Types[TUINT8]))))
- sudog.List = list(sudog.List, Nod(ODCLFIELD, newname(Lookup("elem")), typenod(Ptrto(Types[TUINT8]))))
- sudog.List = list(sudog.List, Nod(ODCLFIELD, newname(Lookup("releasetime")), typenod(Types[TUINT64])))
- sudog.List = list(sudog.List, Nod(ODCLFIELD, newname(Lookup("nrelease")), typenod(Types[TINT32])))
- sudog.List = list(sudog.List, Nod(ODCLFIELD, newname(Lookup("waitlink")), typenod(Ptrto(Types[TUINT8]))))
+ sudog.List.Append(Nod(ODCLFIELD, newname(Lookup("g")), typenod(Ptrto(Types[TUINT8]))))
+ sudog.List.Append(Nod(ODCLFIELD, newname(Lookup("selectdone")), typenod(Ptrto(Types[TUINT8]))))
+ sudog.List.Append(Nod(ODCLFIELD, newname(Lookup("next")), typenod(Ptrto(Types[TUINT8]))))
+ sudog.List.Append(Nod(ODCLFIELD, newname(Lookup("prev")), typenod(Ptrto(Types[TUINT8]))))
+ sudog.List.Append(Nod(ODCLFIELD, newname(Lookup("elem")), typenod(Ptrto(Types[TUINT8]))))
+ sudog.List.Append(Nod(ODCLFIELD, newname(Lookup("releasetime")), typenod(Types[TUINT64])))
+ sudog.List.Append(Nod(ODCLFIELD, newname(Lookup("nrelease")), typenod(Types[TINT32])))
+ sudog.List.Append(Nod(ODCLFIELD, newname(Lookup("waitlink")), typenod(Ptrto(Types[TUINT8]))))
typecheck(&sudog, Etype)
sudog.Type.Noalg = true
sudog.Type.Local = true
scase := Nod(OTSTRUCT, nil, nil)
- scase.List = list(scase.List, Nod(ODCLFIELD, newname(Lookup("elem")), typenod(Ptrto(Types[TUINT8]))))
- scase.List = list(scase.List, Nod(ODCLFIELD, newname(Lookup("chan")), typenod(Ptrto(Types[TUINT8]))))
- scase.List = list(scase.List, Nod(ODCLFIELD, newname(Lookup("pc")), typenod(Types[TUINTPTR])))
- scase.List = list(scase.List, Nod(ODCLFIELD, newname(Lookup("kind")), typenod(Types[TUINT16])))
- scase.List = list(scase.List, Nod(ODCLFIELD, newname(Lookup("so")), typenod(Types[TUINT16])))
- scase.List = list(scase.List, Nod(ODCLFIELD, newname(Lookup("receivedp")), typenod(Ptrto(Types[TUINT8]))))
- scase.List = list(scase.List, Nod(ODCLFIELD, newname(Lookup("releasetime")), typenod(Types[TUINT64])))
+ scase.List.Append(Nod(ODCLFIELD, newname(Lookup("elem")), typenod(Ptrto(Types[TUINT8]))))
+ scase.List.Append(Nod(ODCLFIELD, newname(Lookup("chan")), typenod(Ptrto(Types[TUINT8]))))
+ scase.List.Append(Nod(ODCLFIELD, newname(Lookup("pc")), typenod(Types[TUINTPTR])))
+ scase.List.Append(Nod(ODCLFIELD, newname(Lookup("kind")), typenod(Types[TUINT16])))
+ scase.List.Append(Nod(ODCLFIELD, newname(Lookup("so")), typenod(Types[TUINT16])))
+ scase.List.Append(Nod(ODCLFIELD, newname(Lookup("receivedp")), typenod(Ptrto(Types[TUINT8]))))
+ scase.List.Append(Nod(ODCLFIELD, newname(Lookup("releasetime")), typenod(Types[TUINT64])))
typecheck(&scase, Etype)
scase.Type.Noalg = true
scase.Type.Local = true
sel := Nod(OTSTRUCT, nil, nil)
- sel.List = list(sel.List, Nod(ODCLFIELD, newname(Lookup("tcase")), typenod(Types[TUINT16])))
- sel.List = list(sel.List, Nod(ODCLFIELD, newname(Lookup("ncase")), typenod(Types[TUINT16])))
- sel.List = list(sel.List, Nod(ODCLFIELD, newname(Lookup("pollorder")), typenod(Ptrto(Types[TUINT8]))))
- sel.List = list(sel.List, Nod(ODCLFIELD, newname(Lookup("lockorder")), typenod(Ptrto(Types[TUINT8]))))
+ sel.List.Append(Nod(ODCLFIELD, newname(Lookup("tcase")), typenod(Types[TUINT16])))
+ sel.List.Append(Nod(ODCLFIELD, newname(Lookup("ncase")), typenod(Types[TUINT16])))
+ sel.List.Append(Nod(ODCLFIELD, newname(Lookup("pollorder")), typenod(Ptrto(Types[TUINT8]))))
+ sel.List.Append(Nod(ODCLFIELD, newname(Lookup("lockorder")), typenod(Ptrto(Types[TUINT8]))))
arr := Nod(OTARRAY, Nodintconst(int64(size)), scase)
- sel.List = list(sel.List, Nod(ODCLFIELD, newname(Lookup("scase")), arr))
+ sel.List.Append(Nod(ODCLFIELD, newname(Lookup("scase")), arr))
arr = Nod(OTARRAY, Nodintconst(int64(size)), typenod(Ptrto(Types[TUINT8])))
- sel.List = list(sel.List, Nod(ODCLFIELD, newname(Lookup("lockorderarr")), arr))
+ sel.List.Append(Nod(ODCLFIELD, newname(Lookup("lockorderarr")), arr))
arr = Nod(OTARRAY, Nodintconst(int64(size)), typenod(Types[TUINT16]))
- sel.List = list(sel.List, Nod(ODCLFIELD, newname(Lookup("pollorderarr")), arr))
+ sel.List.Append(Nod(ODCLFIELD, newname(Lookup("pollorderarr")), arr))
typecheck(&sel, Etype)
sel.Type.Noalg = true
sel.Type.Local = true
}
init1(n.Left, out)
init1(n.Right, out)
- for l := n.List; l != nil; l = l.Next {
- init1(l.N, out)
+ for _, n1 := range n.List.Slice() {
+ init1(n1, out)
}
if n.Left != nil && n.Type != nil && n.Left.Op == OTYPE && n.Class == PFUNC {
Fatalf("init1: bad defn")
case ODCLFUNC:
- init2slice(defn.Nbody.Slice(), out)
+ init2list(defn.Nbody, out)
case OAS:
if defn.Left != n {
break
}
defn.Initorder = InitPending
- for l := defn.Rlist; l != nil; l = l.Next {
- init1(l.N, out)
+ for _, n2 := range defn.Rlist.Slice() {
+ init1(n2, out)
}
if Debug['%'] != 0 {
Dump("nonstatic", defn)
return
}
- if n.Op == ONAME && n.Ninit != nil {
+ if n.Op == ONAME && n.Ninit.Len() != 0 {
Fatalf("name %v with ninit: %v\n", n.Sym, Nconv(n, obj.FmtSign))
}
init2list(n.Ninit, out)
init2list(n.List, out)
init2list(n.Rlist, out)
- init2slice(n.Nbody.Slice(), out)
+ init2list(n.Nbody, out)
if n.Op == OCLOSURE {
- init2slice(n.Func.Closure.Nbody.Slice(), out)
+ init2list(n.Func.Closure.Nbody, out)
}
if n.Op == ODOTMETH || n.Op == OCALLPART {
init2(n.Type.Nname, out)
}
}
-func init2list(l *NodeList, out *[]*Node) {
- for ; l != nil; l = l.Next {
- init2(l.N, out)
- }
-}
-
-func init2slice(l []*Node, out *[]*Node) {
- for _, n := range l {
+func init2list(l Nodes, out *[]*Node) {
+ for _, n := range l.Slice() {
init2(n, out)
}
}
-func initreorder(l *NodeList, out *[]*Node) {
+func initreorder(l []*Node, out *[]*Node) {
var n *Node
-
- for ; l != nil; l = l.Next {
- n = l.N
+ for _, n = range l {
switch n.Op {
case ODCLFUNC, ODCLCONST, ODCLTYPE:
continue
}
- initreorder(n.Ninit, out)
- n.Ninit = nil
+ initreorder(n.Ninit.Slice(), out)
+ n.Ninit.Set(nil)
init1(n, out)
}
}
// initfix computes initialization order for a list l of top-level
// declarations and outputs the corresponding list of statements
// to include in the init() function body.
-func initfix(l *NodeList) []*Node {
+func initfix(l []*Node) []*Node {
var lout []*Node
initplans = make(map[*Node]*InitPlan)
- lno := int(lineno)
+ lno := lineno
initreorder(l, &lout)
- lineno = int32(lno)
+ lineno = lno
initplans = nil
return lout
}
return true
}
-func litas(l *Node, r *Node, init **NodeList) {
+func litas(l *Node, r *Node, init *Nodes) {
a := Nod(OAS, l, r)
typecheck(&a, Etop)
walkexpr(&a, init)
- *init = list(*init, a)
+ init.Append(a)
}
const (
case OSTRUCTLIT:
break
}
-
- for nl := n.List; nl != nil; nl = nl.Next {
- value := nl.N.Right
+ for _, n1 := range n.List.Slice() {
+ value := n1.Right
mode |= getdyn(value, 0)
if mode == MODEDYNAM|MODECONST {
break
return mode
}
-func structlit(ctxt int, pass int, n *Node, var_ *Node, init **NodeList) {
- for nl := n.List; nl != nil; nl = nl.Next {
- r := nl.N
+func structlit(ctxt int, pass int, n *Node, var_ *Node, init *Nodes) {
+ for _, r := range n.List.Slice() {
if r.Op != OKEY {
Fatalf("structlit: rhs not OKEY: %v", r)
}
walkstmt(&a)
}
- *init = list(*init, a)
+ init.Append(a)
}
}
-func arraylit(ctxt int, pass int, n *Node, var_ *Node, init **NodeList) {
- for l := n.List; l != nil; l = l.Next {
- r := l.N
+func arraylit(ctxt int, pass int, n *Node, var_ *Node, init *Nodes) {
+ for _, r := range n.List.Slice() {
if r.Op != OKEY {
Fatalf("arraylit: rhs not OKEY: %v", r)
}
walkstmt(&a)
}
- *init = list(*init, a)
+ init.Append(a)
}
}
-func slicelit(ctxt int, n *Node, var_ *Node, init **NodeList) {
+func slicelit(ctxt int, n *Node, var_ *Node, init *Nodes) {
// make an array type
t := shallow(n.Type)
a = Nod(OAS, var_, a)
typecheck(&a, Etop)
a.Dodata = 2
- *init = list(*init, a)
+ init.Append(a)
return
}
if vstat == nil {
a = Nod(OAS, x, nil)
typecheck(&a, Etop)
- *init = list(*init, a) // zero new temp
+ init.Append(a) // zero new temp
}
a = Nod(OADDR, x, nil)
if vstat == nil {
a = Nod(OAS, temp(t), nil)
typecheck(&a, Etop)
- *init = list(*init, a) // zero new temp
+ init.Append(a) // zero new temp
a = a.Left
}
a = Nod(OADDR, a, nil)
} else {
a = Nod(ONEW, nil, nil)
- a.List = list1(typenod(t))
+ a.List.Set([]*Node{typenod(t)})
}
a = Nod(OAS, vauto, a)
typecheck(&a, Etop)
walkexpr(&a, init)
- *init = list(*init, a)
+ init.Append(a)
if vstat != nil {
// copy static to heap (4)
a = Nod(OAS, a, vstat)
typecheck(&a, Etop)
walkexpr(&a, init)
- *init = list(*init, a)
+ init.Append(a)
}
// make slice out of heap (5)
typecheck(&a, Etop)
orderstmtinplace(&a)
walkstmt(&a)
- *init = list(*init, a)
-
+ init.Append(a)
// put dynamics into slice (6)
- for l := n.List; l != nil; l = l.Next {
- r := l.N
+ for _, r := range n.List.Slice() {
if r.Op != OKEY {
Fatalf("slicelit: rhs not OKEY: %v", r)
}
typecheck(&a, Etop)
orderstmtinplace(&a)
walkstmt(&a)
- *init = list(*init, a)
+ init.Append(a)
}
}
-func maplit(ctxt int, n *Node, var_ *Node, init **NodeList) {
+func maplit(ctxt int, n *Node, var_ *Node, init *Nodes) {
ctxt = 0
// make the map var
nerr := nerrors
a := Nod(OMAKE, nil, nil)
- a.List = list1(typenod(n.Type))
+ a.List.Set([]*Node{typenod(n.Type)})
litas(var_, a, init)
// count the initializers
b := 0
- for l := n.List; l != nil; l = l.Next {
- r := l.N
+ for _, r := range n.List.Slice() {
if r.Op != OKEY {
Fatalf("maplit: rhs not OKEY: %v", r)
}
vstat := staticname(tarr, ctxt)
b := int64(0)
- for l := n.List; l != nil; l = l.Next {
- r := l.N
-
+ for _, r := range n.List.Slice() {
if r.Op != OKEY {
Fatalf("maplit: rhs not OKEY: %v", r)
}
typecheck(&a, Etop)
walkexpr(&a, init)
a.Dodata = 2
- *init = list(*init, a)
+ init.Append(a)
// build vstat[b].b = value;
setlineno(value)
typecheck(&a, Etop)
walkexpr(&a, init)
a.Dodata = 2
- *init = list(*init, a)
+ init.Append(a)
b++
}
a = Nod(OFOR, nil, nil)
a.Nbody.Set([]*Node{r})
- a.Ninit = list1(Nod(OAS, index, Nodintconst(0)))
+ a.Ninit.Set([]*Node{Nod(OAS, index, Nodintconst(0))})
a.Left = Nod(OLT, index, Nodintconst(tarr.Bound))
a.Right = Nod(OAS, index, Nod(OADD, index, Nodintconst(1)))
typecheck(&a, Etop)
walkstmt(&a)
- *init = list(*init, a)
+ init.Append(a)
}
// put in dynamic entries one-at-a-time
var key, val *Node
- for l := n.List; l != nil; l = l.Next {
- r := l.N
-
+ for _, r := range n.List.Slice() {
if r.Op != OKEY {
Fatalf("maplit: rhs not OKEY: %v", r)
}
a = Nod(OAS, key, r.Left)
typecheck(&a, Etop)
walkstmt(&a)
- *init = list(*init, a)
+ init.Append(a)
setlineno(r.Right)
a = Nod(OAS, val, r.Right)
typecheck(&a, Etop)
walkstmt(&a)
- *init = list(*init, a)
+ init.Append(a)
setlineno(val)
a = Nod(OAS, Nod(OINDEX, var_, key), val)
typecheck(&a, Etop)
walkstmt(&a)
- *init = list(*init, a)
+ init.Append(a)
if nerr != nerrors {
break
if key != nil {
a = Nod(OVARKILL, key, nil)
typecheck(&a, Etop)
- *init = list(*init, a)
+ init.Append(a)
a = Nod(OVARKILL, val, nil)
typecheck(&a, Etop)
- *init = list(*init, a)
+ init.Append(a)
}
}
-func anylit(ctxt int, n *Node, var_ *Node, init **NodeList) {
+func anylit(ctxt int, n *Node, var_ *Node, init *Nodes) {
t := n.Type
switch n.Op {
default:
a := Nod(OAS, var_, r)
typecheck(&a, Etop)
- *init = list(*init, a)
+ init.Append(a)
var_ = Nod(OIND, var_, nil)
typecheck(&var_, Erv|Easgn)
Fatalf("anylit: not struct")
}
- if simplename(var_) && count(n.List) > 4 {
+ if simplename(var_) && n.List.Len() > 4 {
if ctxt == 0 {
// lay out static data
vstat := staticname(t, ctxt)
typecheck(&a, Etop)
walkexpr(&a, init)
- *init = list(*init, a)
+ init.Append(a)
// add expressions to automatic
structlit(ctxt, 2, n, var_, init)
}
// initialize of not completely specified
- if simplename(var_) || count(n.List) < structcount(t) {
+ if simplename(var_) || n.List.Len() < structcount(t) {
a := Nod(OAS, var_, nil)
typecheck(&a, Etop)
walkexpr(&a, init)
- *init = list(*init, a)
+ init.Append(a)
}
structlit(ctxt, 3, n, var_, init)
break
}
- if simplename(var_) && count(n.List) > 4 {
+ if simplename(var_) && n.List.Len() > 4 {
if ctxt == 0 {
// lay out static data
vstat := staticname(t, ctxt)
typecheck(&a, Etop)
walkexpr(&a, init)
- *init = list(*init, a)
+ init.Append(a)
// add expressions to automatic
arraylit(ctxt, 2, n, var_, init)
}
// initialize of not completely specified
- if simplename(var_) || int64(count(n.List)) < t.Bound {
+ if simplename(var_) || int64(n.List.Len()) < t.Bound {
a := Nod(OAS, var_, nil)
typecheck(&a, Etop)
walkexpr(&a, init)
- *init = list(*init, a)
+ init.Append(a)
}
arraylit(ctxt, 3, n, var_, init)
}
}
-func oaslit(n *Node, init **NodeList) bool {
+func oaslit(n *Node, init *Nodes) bool {
if n.Left == nil || n.Right == nil {
// not a special composit literal assignment
return false
Fatalf("initplan")
case OARRAYLIT:
- for l := n.List; l != nil; l = l.Next {
- a := l.N
+ for _, a := range n.List.Slice() {
if a.Op != OKEY || !Smallintconst(a.Left) {
Fatalf("initplan arraylit")
}
}
case OSTRUCTLIT:
- for l := n.List; l != nil; l = l.Next {
- a := l.N
+ for _, a := range n.List.Slice() {
if a.Op != OKEY || a.Left.Type == nil {
Fatalf("initplan structlit")
}
}
case OMAPLIT:
- for l := n.List; l != nil; l = l.Next {
- a := l.N
+ for _, a := range n.List.Slice() {
if a.Op != OKEY {
Fatalf("initplan maplit")
}
// fall through
case OSTRUCTLIT:
- for l := n.List; l != nil; l = l.Next {
- if !iszero(l.N.Right) {
+ for _, n1 := range n.List.Slice() {
+ if !iszero(n1.Right) {
return false
}
}
--- /dev/null
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !nacl
+
+package gc
+
+import (
+ "reflect"
+ "testing"
+ "unsafe"
+)
+
+// Assert that the size of important structures do not change unexpectedly.
+
+func TestSizeof(t *testing.T) {
+ const _64bit = unsafe.Sizeof(uintptr(0)) == 8
+
+ var tests = []struct {
+ val interface{} // type as a value
+ _32bit uintptr // size on 32bit platforms
+ _64bit uintptr // size on 64bit platforms
+ }{
+ {Flow{}, 52, 88},
+ {Func{}, 104, 184},
+ {Name{}, 52, 80},
+ {Node{}, 92, 144},
+ {Sym{}, 64, 112},
+ {Type{}, 144, 240},
+ }
+
+ for _, tt := range tests {
+ want := tt._32bit
+ if _64bit {
+ want = tt._64bit
+ }
+ got := reflect.TypeOf(tt.val).Size()
+ if want != got {
+ t.Errorf("unsafe.Sizeof(%T) = %d, want %d", tt.val, got, want)
+ }
+ }
+}
"cmd/internal/obj/x86"
)
+var ssaEnabled = true
+
// Smallest possible faulting page at address zero.
const minZeroPage = 4096
if Thearch.Thestring != "amd64" {
return false
}
+ if !ssaEnabled {
+ return false
+ }
// Environment variable control of SSA CG
// 1. IF GOSSAFUNC == current function name THEN
printssa := name == os.Getenv("GOSSAFUNC")
if printssa {
fmt.Println("generating SSA for", name)
- dumpslice("buildssa-enter", fn.Func.Enter.Slice())
- dumpslice("buildssa-body", fn.Nbody.Slice())
- dumpslice("buildssa-exit", fn.Func.Exit.Slice())
+ dumplist("buildssa-enter", fn.Func.Enter)
+ dumplist("buildssa-body", fn.Nbody)
+ dumplist("buildssa-exit", fn.Func.Exit)
}
var s state
s.stmts(s.exitCode)
m := s.mem()
b := s.endBlock()
+ b.Line = fn.Func.Endlineno
b.Kind = ssa.BlockRet
b.Control = m
}
// Check that we used all labels
for name, lab := range s.labels {
if !lab.used() && !lab.reported {
- yyerrorl(int(lab.defNode.Lineno), "label %v defined and not used", name)
+ yyerrorl(lab.defNode.Lineno, "label %v defined and not used", name)
lab.reported = true
}
if lab.used() && !lab.defined() && !lab.reported {
- yyerrorl(int(lab.useNode.Lineno), "label %v not defined", name)
+ yyerrorl(lab.useNode.Lineno, "label %v not defined", name)
lab.reported = true
}
}
}
func (s *state) Error(msg string, args ...interface{}) {
- yyerrorl(int(s.peekLine()), msg, args...)
+ yyerrorl(s.peekLine(), msg, args...)
}
// newValue0 adds a new value with no arguments to the current block.
}
// const* routines add a new const value to the entry block.
+func (s *state) constSlice(t ssa.Type) *ssa.Value { return s.f.ConstSlice(s.peekLine(), t) }
+func (s *state) constInterface(t ssa.Type) *ssa.Value { return s.f.ConstInterface(s.peekLine(), t) }
+func (s *state) constNil(t ssa.Type) *ssa.Value { return s.f.ConstNil(s.peekLine(), t) }
+func (s *state) constEmptyString(t ssa.Type) *ssa.Value { return s.f.ConstEmptyString(s.peekLine(), t) }
func (s *state) constBool(c bool) *ssa.Value {
return s.f.ConstBool(s.peekLine(), Types[TBOOL], c)
}
}
// ssaStmtList converts the statement n to SSA and adds it to s.
-func (s *state) stmtList(l *NodeList) {
- for ; l != nil; l = l.Next {
- s.stmt(l.N)
+func (s *state) stmtList(l Nodes) {
+ for _, n := range l.Slice() {
+ s.stmt(n)
}
}
s.call(n.Left, callGo)
case OAS2DOTTYPE:
- res, resok := s.dottype(n.Rlist.N, true)
- s.assign(n.List.N, res, needwritebarrier(n.List.N, n.Rlist.N), false, n.Lineno)
- s.assign(n.List.Next.N, resok, false, false, n.Lineno)
+ res, resok := s.dottype(n.Rlist.First(), true)
+ s.assign(n.List.First(), res, needwritebarrier(n.List.First(), n.Rlist.First()), false, n.Lineno)
+ s.assign(n.List.Second(), resok, false, false, n.Lineno)
return
case ODCL:
if !lab.defined() {
lab.defNode = n
} else {
- s.Error("label %v already defined at %v", sym, Ctxt.Line(int(lab.defNode.Lineno)))
+ s.Error("label %v already defined at %v", sym, linestr(lab.defNode.Lineno))
lab.reported = true
}
// The label might already have a target block via a goto.
bThen := s.f.NewBlock(ssa.BlockPlain)
bEnd := s.f.NewBlock(ssa.BlockPlain)
var bElse *ssa.Block
- if n.Rlist != nil {
+ if n.Rlist.Len() != 0 {
bElse = s.f.NewBlock(ssa.BlockPlain)
s.condBranch(n.Left, bThen, bElse, n.Likely)
} else {
b.AddEdgeTo(bEnd)
}
- if n.Rlist != nil {
+ if n.Rlist.Len() != 0 {
s.startBlock(bElse)
s.stmtList(n.Rlist)
if b := s.endBlock(); b != nil {
return nil
}
case CTSTR:
+ if n.Val().U == "" {
+ return s.constEmptyString(n.Type)
+ }
return s.entryNewValue0A(ssa.OpConstString, n.Type, n.Val().U)
case CTBOOL:
v := s.constBool(n.Val().U.(bool))
t := n.Type
switch {
case t.IsSlice():
- return s.entryNewValue0(ssa.OpConstSlice, t)
+ return s.constSlice(t)
case t.IsInterface():
- return s.entryNewValue0(ssa.OpConstInterface, t)
+ return s.constInterface(t)
default:
- return s.entryNewValue0(ssa.OpConstNil, t)
+ return s.constNil(t)
}
case CTFLT:
f := n.Val().U.(*Mpflt)
case ODOTPTR:
p := s.expr(n.Left)
s.nilCheck(p)
- p = s.newValue2(ssa.OpAddPtr, p.Type, p, s.constInt(Types[TINT], n.Xoffset))
+ p = s.newValue1I(ssa.OpOffPtr, p.Type, n.Xoffset, p)
return s.newValue2(ssa.OpLoad, n.Type, p, s.mem())
case OINDEX:
}
ptrtyp := Ptrto(Types[TUINT8])
ptr := s.newValue1(ssa.OpStringPtr, ptrtyp, a)
- ptr = s.newValue2(ssa.OpAddPtr, ptrtyp, ptr, i)
+ if Isconst(n.Right, CTINT) {
+ ptr = s.newValue1I(ssa.OpOffPtr, ptrtyp, n.Right.Int(), ptr)
+ } else {
+ ptr = s.newValue2(ssa.OpAddPtr, ptrtyp, ptr, i)
+ }
return s.newValue2(ssa.OpLoad, Types[TUINT8], ptr, s.mem())
case n.Left.Type.IsSlice():
p := s.addr(n, false)
pt := Ptrto(et)
// Evaluate slice
- slice := s.expr(n.List.N)
+ slice := s.expr(n.List.First())
// Allocate new blocks
grow := s.f.NewBlock(ssa.BlockPlain)
assign := s.f.NewBlock(ssa.BlockPlain)
// Decide if we need to grow
- nargs := int64(count(n.List) - 1)
+ nargs := int64(n.List.Len() - 1)
p := s.newValue1(ssa.OpSlicePtr, pt, slice)
l := s.newValue1(ssa.OpSliceLen, Types[TINT], slice)
c := s.newValue1(ssa.OpSliceCap, Types[TINT], slice)
// Evaluate args
args := make([]*ssa.Value, 0, nargs)
store := make([]bool, 0, nargs)
- for l := n.List.Next; l != nil; l = l.Next {
- if canSSAType(l.N.Type) {
- args = append(args, s.expr(l.N))
+ it := nodeSeqIterate(n.List)
+ it.Next()
+ for ; !it.Done(); it.Next() {
+ if canSSAType(it.N().Type) {
+ args = append(args, s.expr(it.N()))
store = append(store, true)
} else {
- args = append(args, s.addr(l.N, false))
+ args = append(args, s.addr(it.N(), false))
store = append(store, false)
}
}
}
case t.IsString():
- return s.entryNewValue0A(ssa.OpConstString, t, "")
+ return s.constEmptyString(t)
case t.IsPtr():
- return s.entryNewValue0(ssa.OpConstNil, t)
+ return s.constNil(t)
case t.IsBoolean():
return s.constBool(false)
case t.IsInterface():
- return s.entryNewValue0(ssa.OpConstInterface, t)
+ return s.constInterface(t)
case t.IsSlice():
- return s.entryNewValue0(ssa.OpConstSlice, t)
+ return s.constSlice(t)
case t.IsStruct():
n := t.NumFields()
v := s.entryNewValue0(ssa.StructMakeOp(t.NumFields()), t)
// want to set it here.
case OCALLINTER:
if fn.Op != ODOTINTER {
- Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", Oconv(int(fn.Op), 0))
+ Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", Oconv(fn.Op, 0))
}
i := s.expr(fn.Left)
itab := s.newValue1(ssa.OpITab, Types[TUINTPTR], i)
// Start exit block, find address of result.
s.startBlock(bNext)
- var titer Iter
- fp := Structfirst(&titer, Getoutarg(n.Left.Type))
+ fp, _ := IterFields(n.Left.Type.Results())
if fp == nil || k != callNormal {
// call has no return value. Continue with the next statement.
return nil
return p
case ODOT:
p := s.addr(n.Left, bounded)
- return s.newValue2(ssa.OpAddPtr, t, p, s.constInt(Types[TINT], n.Xoffset))
+ return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset, p)
case ODOTPTR:
p := s.expr(n.Left)
if !bounded {
s.nilCheck(p)
}
- return s.newValue2(ssa.OpAddPtr, t, p, s.constInt(Types[TINT], n.Xoffset))
+ return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset, p)
case OCLOSUREVAR:
- return s.newValue2(ssa.OpAddPtr, t,
- s.entryNewValue0(ssa.OpGetClosurePtr, Ptrto(Types[TUINT8])),
- s.constInt(Types[TINT], n.Xoffset))
+ return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset,
+ s.entryNewValue0(ssa.OpGetClosurePtr, Ptrto(Types[TUINT8])))
case OPARAM:
p := n.Left
if p.Op != ONAME || !(p.Class == PPARAM|PHEAP || p.Class == PPARAMOUT|PHEAP) {
return s.call(n, callNormal)
default:
- s.Unimplementedf("unhandled addr %v", Oconv(int(n.Op), 0))
+ s.Unimplementedf("unhandled addr %v", Oconv(n.Op, 0))
return nil
}
}
bElse := s.f.NewBlock(ssa.BlockPlain)
bEnd := s.f.NewBlock(ssa.BlockPlain)
- aux := &ssa.ExternSymbol{Types[TBOOL], syslook("writeBarrier", 0).Sym}
+ aux := &ssa.ExternSymbol{Types[TBOOL], syslook("writeBarrier").Sym}
flagaddr := s.newValue1A(ssa.OpAddr, Ptrto(Types[TUINT32]), aux, s.sb)
// TODO: select the .enabled field. It is currently first, so not needed for now.
// Load word, test byte, avoiding partial register write from load byte.
s.startBlock(bEnd)
if Debug_wb > 0 {
- Warnl(int(line), "write barrier")
+ Warnl(line, "write barrier")
}
}
bElse := s.f.NewBlock(ssa.BlockPlain)
bEnd := s.f.NewBlock(ssa.BlockPlain)
- aux := &ssa.ExternSymbol{Types[TBOOL], syslook("writeBarrier", 0).Sym}
+ aux := &ssa.ExternSymbol{Types[TBOOL], syslook("writeBarrier").Sym}
flagaddr := s.newValue1A(ssa.OpAddr, Ptrto(Types[TUINT32]), aux, s.sb)
// TODO: select the .enabled field. It is currently first, so not needed for now.
// Load word, test byte, avoiding partial register write from load byte.
s.startBlock(bEnd)
if Debug_wb > 0 {
- Warnl(int(line), "write barrier")
+ Warnl(line, "write barrier")
}
}
// return *(((*int)n)+1)
// }
lenType := n.Type
- nilValue := s.newValue0(ssa.OpConstNil, Types[TUINTPTR])
+ nilValue := s.constNil(Types[TUINTPTR])
cmp := s.newValue2(ssa.OpEqPtr, Types[TBOOL], x, nilValue)
b := s.endBlock()
b.Kind = ssa.BlockIf
tab := s.newValue1(ssa.OpITab, byteptr, v)
s.vars[&typVar] = tab
- isnonnil := s.newValue2(ssa.OpNeqPtr, Types[TBOOL], tab, s.entryNewValue0(ssa.OpConstNil, byteptr))
+ isnonnil := s.newValue2(ssa.OpNeqPtr, Types[TBOOL], tab, s.constNil(byteptr))
b := s.endBlock()
b.Kind = ssa.BlockIf
b.Control = isnonnil
}
if Debug_typeassert > 0 {
- Warnl(int(n.Lineno), "type assertion inlined")
+ Warnl(n.Lineno, "type assertion inlined")
}
// TODO: If we have a nonempty interface and its itab field is nil,
// type assertion failed
s.startBlock(bFail)
- s.vars[&idataVar] = s.entryNewValue0(ssa.OpConstNil, byteptr)
+ s.vars[&idataVar] = s.constNil(byteptr)
s.vars[&okVar] = s.constBool(false)
s.endBlock()
bFail.AddEdgeTo(bEnd)
fs = fs.Link
}
- lno := int(from.Left.Lineno)
+ lno := from.Left.Lineno
if block != nil {
- yyerrorl(lno, "goto %v jumps into block starting at %v", from.Left.Sym, Ctxt.Line(int(block.Lastlineno)))
+ yyerrorl(lno, "goto %v jumps into block starting at %v", from.Left.Sym, linestr(block.Lastlineno))
} else {
- yyerrorl(lno, "goto %v jumps over declaration of %v at %v", from.Left.Sym, dcl, Ctxt.Line(int(dcl.Lastlineno)))
+ yyerrorl(lno, "goto %v jumps over declaration of %v at %v", from.Left.Sym, dcl, linestr(dcl.Lastlineno))
}
}
}
// dest := dest(To) op src(From)
// and also returns the created obj.Prog so it
// may be further adjusted (offset, scale, etc).
-func opregreg(op int, dest, src int16) *obj.Prog {
+func opregreg(op obj.As, dest, src int16) *obj.Prog {
p := Prog(op)
p.From.Type = obj.TYPE_REG
p.To.Type = obj.TYPE_REG
p.To.Type = obj.TYPE_REG
p.To.Reg = r
default:
- var asm int
+ var asm obj.As
switch v.Op {
case ssa.OpAMD64ADDQ:
asm = x86.ALEAQ
a := regnum(v.Args[0])
if r == a {
if v.AuxInt2Int64() == 1 {
- var asm int
+ var asm obj.As
switch v.Op {
// Software optimization manual recommends add $1,reg.
// But inc/dec is 1 byte smaller. ICC always uses inc
p.To.Reg = r
return
} else if v.AuxInt2Int64() == -1 {
- var asm int
+ var asm obj.As
switch v.Op {
case ssa.OpAMD64ADDQconst:
asm = x86.ADECQ
return
}
}
- var asm int
+ var asm obj.As
switch v.Op {
case ssa.OpAMD64ADDQconst:
asm = x86.ALEAQ
p.To.Type = obj.TYPE_REG
p.To.Reg = r
} else if x == r && v.AuxInt2Int64() == -1 {
- var asm int
+ var asm obj.As
// x = x - (-1) is the same as x++
// See OpAMD64ADDQconst comments about inc vs add $1,reg
switch v.Op {
p.To.Type = obj.TYPE_REG
p.To.Reg = r
} else if x == r && v.AuxInt2Int64() == 1 {
- var asm int
+ var asm obj.As
switch v.Op {
case ssa.OpAMD64SUBQconst:
asm = x86.ADECQ
p.To.Type = obj.TYPE_REG
p.To.Reg = r
} else {
- var asm int
+ var asm obj.As
switch v.Op {
case ssa.OpAMD64SUBQconst:
asm = x86.ALEAQ
ssa.OpAMD64MOVSSstore, ssa.OpAMD64MOVSDstore, ssa.OpAMD64MOVOstore:
if w.Args[0] == v.Args[0] && w.Aux == nil && w.AuxInt >= 0 && w.AuxInt < minZeroPage {
if Debug_checknil != 0 && int(v.Line) > 1 {
- Warnl(int(v.Line), "removed nil check")
+ Warnl(v.Line, "removed nil check")
}
return
}
off := ssa.ValAndOff(v.AuxInt).Off()
if w.Args[0] == v.Args[0] && w.Aux == nil && off >= 0 && off < minZeroPage {
if Debug_checknil != 0 && int(v.Line) > 1 {
- Warnl(int(v.Line), "removed nil check")
+ Warnl(v.Line, "removed nil check")
}
return
}
p.To.Reg = regnum(v.Args[0])
addAux(&p.To, v)
if Debug_checknil != 0 && v.Line > 1 { // v.Line==1 in generated wrappers
- Warnl(int(v.Line), "generated nil check")
+ Warnl(v.Line, "generated nil check")
}
default:
v.Unimplementedf("genValue not implemented: %s", v.LongString())
}
// movZero generates a register indirect move with a 0 immediate and keeps track of bytes left and next offset
-func movZero(as int, width int64, nbytes int64, offset int64, regnum int16) (nleft int64, noff int64) {
+func movZero(as obj.As, width int64, nbytes int64, offset int64, regnum int16) (nleft int64, noff int64) {
p := Prog(as)
// TODO: use zero register on archs that support it.
p.From.Type = obj.TYPE_CONST
}
var blockJump = [...]struct {
- asm, invasm int
+ asm, invasm obj.As
}{
ssa.BlockAMD64EQ: {x86.AJEQ, x86.AJNE},
ssa.BlockAMD64NE: {x86.AJNE, x86.AJEQ},
}
type floatingEQNEJump struct {
- jump, index int
+ jump obj.As
+ index int
}
var eqfJumps = [2][2]floatingEQNEJump{
}
// loadByType returns the load instruction of the given type.
-func loadByType(t ssa.Type) int {
+func loadByType(t ssa.Type) obj.As {
// Avoid partial register write
if !t.IsFloat() && t.Size() <= 2 {
if t.Size() == 1 {
}
// storeByType returns the store instruction of the given type.
-func storeByType(t ssa.Type) int {
+func storeByType(t ssa.Type) obj.As {
width := t.Size()
if t.IsFloat() {
switch width {
}
// moveByType returns the reg->reg move instruction of the given type.
-func moveByType(t ssa.Type) int {
+func moveByType(t ssa.Type) obj.As {
if t.IsFloat() {
// Moving the whole sse2 register is faster
// than moving just the correct low portion of it.
return x86.AMOVL
case 8:
return x86.AMOVQ
+ case 16:
+ return x86.AMOVUPS // int128s are in SSE registers
default:
- panic("bad int register width")
+ panic(fmt.Sprintf("bad int register width %d:%s", t.Size(), t))
}
}
panic("bad register type")
}
func (e *ssaExport) Line(line int32) string {
- return Ctxt.Line(int(line))
+ return linestr(line)
}
// Log logs a message from the compiler.
// Warnl reports a "warning", which is usually flag-triggered
// logging output for the benefit of tests.
func (e *ssaExport) Warnl(line int, fmt_ string, args ...interface{}) {
- Warnl(line, fmt_, args...)
+ Warnl(int32(line), fmt_, args...)
}
func (e *ssaExport) Debug_checknil() bool {
)
type Error struct {
- lineno int
+ lineno int32
msg string
}
os.Exit(2)
}
-func parserline() int {
- return int(lineno)
-}
-
func adderrorname(n *Node) {
if n.Op != ODOT {
return
}
old := fmt.Sprintf("%v: undefined: %v\n", n.Line(), n.Left)
- if len(errors) > 0 && int32(errors[len(errors)-1].lineno) == n.Lineno && errors[len(errors)-1].msg == old {
+ if len(errors) > 0 && errors[len(errors)-1].lineno == n.Lineno && errors[len(errors)-1].msg == old {
errors[len(errors)-1].msg = fmt.Sprintf("%v: undefined: %v in %v\n", n.Line(), n.Left, n)
}
}
-func adderr(line int, format string, args ...interface{}) {
+func adderr(line int32, format string, args ...interface{}) {
errors = append(errors, Error{
lineno: line,
- msg: fmt.Sprintf("%v: %s\n", Ctxt.Line(line), fmt.Sprintf(format, args...)),
+ msg: fmt.Sprintf("%v: %s\n", linestr(line), fmt.Sprintf(format, args...)),
})
}
}
}
-func yyerrorl(line int, format string, args ...interface{}) {
+func linestr(line int32) string {
+ return Ctxt.Line(int(line))
+}
+
+func yyerrorl(line int32, format string, args ...interface{}) {
adderr(line, format, args...)
hcrash()
nerrors++
if nsavederrors+nerrors >= 10 && Debug['e'] == 0 {
Flusherrors()
- fmt.Printf("%v: too many errors\n", Ctxt.Line(line))
+ fmt.Printf("%v: too many errors\n", linestr(line))
errorexit()
}
}
}
yyerror_lastsyntax = lineno
- yyerrorl(int(lineno), "%s", msg)
+ yyerrorl(lineno, "%s", msg)
return
}
- adderr(parserline(), "%s", msg)
+ adderr(lineno, "%s", msg)
hcrash()
nerrors++
if nsavederrors+nerrors >= 10 && Debug['e'] == 0 {
Flusherrors()
- fmt.Printf("%v: too many errors\n", Ctxt.Line(parserline()))
+ fmt.Printf("%v: too many errors\n", linestr(lineno))
errorexit()
}
}
func Warn(fmt_ string, args ...interface{}) {
- adderr(parserline(), fmt_, args...)
+ adderr(lineno, fmt_, args...)
hcrash()
}
-func Warnl(line int, fmt_ string, args ...interface{}) {
+func Warnl(line int32, fmt_ string, args ...interface{}) {
adderr(line, fmt_, args...)
if Debug['m'] != 0 {
Flusherrors()
func Fatalf(fmt_ string, args ...interface{}) {
Flusherrors()
- fmt.Printf("%v: internal compiler error: ", Ctxt.Line(int(lineno)))
+ fmt.Printf("%v: internal compiler error: ", linestr(lineno))
fmt.Printf(fmt_, args...)
fmt.Printf("\n")
func linehistpragma(file string) {
if Debug['i'] != 0 {
- fmt.Printf("pragma %s at line %v\n", file, Ctxt.Line(int(lexlineno)))
+ fmt.Printf("pragma %s at line %v\n", file, linestr(lexlineno))
}
Ctxt.AddImport(file)
}
func linehistpush(file string) {
if Debug['i'] != 0 {
- fmt.Printf("import %s at line %v\n", file, Ctxt.Line(int(lexlineno)))
+ fmt.Printf("import %s at line %v\n", file, linestr(lexlineno))
}
Ctxt.LineHist.Push(int(lexlineno), file)
}
func linehistpop() {
if Debug['i'] != 0 {
- fmt.Printf("end of import at line %v\n", Ctxt.Line(int(lexlineno)))
+ fmt.Printf("end of import at line %v\n", linestr(lexlineno))
}
Ctxt.LineHist.Pop(int(lexlineno))
}
func linehistupdate(file string, off int) {
if Debug['i'] != 0 {
- fmt.Printf("line %s at line %v\n", file, Ctxt.Line(int(lexlineno)))
+ fmt.Printf("line %s at line %v\n", file, linestr(lexlineno))
}
Ctxt.LineHist.Update(int(lexlineno), file, off)
}
if n == 0 {
// can't possibly be used - there were no symbols
- yyerrorl(int(pack.Lineno), "imported and not used: %q", opkg.Path)
+ yyerrorl(pack.Lineno, "imported and not used: %q", opkg.Path)
}
}
n.Op = op
n.Left = nleft
n.Right = nright
- n.Lineno = int32(parserline())
+ n.Lineno = lineno
n.Xoffset = BADWIDTH
n.Orig = n
switch op {
t := new(Type)
t.Etype = et
t.Width = BADWIDTH
- t.Lineno = int(lineno)
+ t.Lineno = lineno
t.Orig = t
return t
}
}
var a []*Type
- for f := t.Type; f != nil; f = f.Down {
+ for f, it := IterFields(t); f != nil; f = it.Next() {
a = append(a, f)
}
sort.Sort(methcmp(a))
m.Orig = m
m.Left = treecopy(n.Left, lineno)
m.Right = treecopy(n.Right, lineno)
- m.List = listtreecopy(n.List, lineno)
+ m.List.Set(listtreecopy(n.List.Slice(), lineno))
if lineno != 0 {
m.Lineno = lineno
}
return a == b || a != nil && b != nil && *a == *b
}
-type TypePairList struct {
- t1 *Type
- t2 *Type
- next *TypePairList
-}
-
-func onlist(l *TypePairList, t1 *Type, t2 *Type) bool {
- for ; l != nil; l = l.next {
- if (l.t1 == t1 && l.t2 == t2) || (l.t1 == t2 && l.t2 == t1) {
- return true
- }
- }
- return false
-}
-
-// Return 1 if t1 and t2 are identical, following the spec rules.
+// Eqtype reports whether t1 and t2 are identical, following the spec rules.
//
// Any cyclic type must go through a named type, and if one is
// named, it is only identical to the other if they are the same
// pointer (t1 == t2), so there's no chance of chasing cycles
// ad infinitum, so no need for a depth counter.
-func Eqtype(t1 *Type, t2 *Type) bool {
+func Eqtype(t1, t2 *Type) bool {
return eqtype1(t1, t2, nil)
}
-func eqtype1(t1 *Type, t2 *Type, assumed_equal *TypePairList) bool {
+type typePair struct {
+ t1 *Type
+ t2 *Type
+}
+
+func eqtype1(t1, t2 *Type, assumedEqual map[typePair]struct{}) bool {
if t1 == t2 {
return true
}
return false
}
if t1.Sym != nil || t2.Sym != nil {
- // Special case: we keep byte and uint8 separate
- // for error messages. Treat them as equal.
+ // Special case: we keep byte/uint8 and rune/int32
+ // separate for error messages. Treat them as equal.
switch t1.Etype {
case TUINT8:
- if (t1 == Types[TUINT8] || t1 == bytetype) && (t2 == Types[TUINT8] || t2 == bytetype) {
- return true
- }
-
- case TINT, TINT32:
- if (t1 == Types[runetype.Etype] || t1 == runetype) && (t2 == Types[runetype.Etype] || t2 == runetype) {
- return true
- }
+ return (t1 == Types[TUINT8] || t1 == bytetype) && (t2 == Types[TUINT8] || t2 == bytetype)
+ case TINT32:
+ return (t1 == Types[TINT32] || t1 == runetype) && (t2 == Types[TINT32] || t2 == runetype)
+ default:
+ return false
}
-
- return false
}
- if onlist(assumed_equal, t1, t2) {
+ if assumedEqual == nil {
+ assumedEqual = make(map[typePair]struct{})
+ } else if _, ok := assumedEqual[typePair{t1, t2}]; ok {
return true
}
- var l TypePairList
- l.next = assumed_equal
- l.t1 = t1
- l.t2 = t2
+ assumedEqual[typePair{t1, t2}] = struct{}{}
switch t1.Etype {
case TINTER, TSTRUCT:
if t1.Etype != TFIELD || t2.Etype != TFIELD {
Fatalf("struct/interface missing field: %v %v", t1, t2)
}
- if t1.Sym != t2.Sym || t1.Embedded != t2.Embedded || !eqtype1(t1.Type, t2.Type, &l) || !eqnote(t1.Note, t2.Note) {
+ if t1.Sym != t2.Sym || t1.Embedded != t2.Embedded || !eqtype1(t1.Type, t2.Type, assumedEqual) || !eqnote(t1.Note, t2.Note) {
return false
}
}
// Loop over structs: receiver, in, out.
case TFUNC:
- t1 = t1.Type
- t2 = t2.Type
- for ; t1 != nil && t2 != nil; t1, t2 = t1.Down, t2.Down {
- if t1.Etype != TSTRUCT || t2.Etype != TSTRUCT {
- Fatalf("func missing struct: %v %v", t1, t2)
- }
-
+ for _, f := range recvParamsResults {
// Loop over fields in structs, ignoring argument names.
- ta := t1.Type
- tb := t2.Type
- for ; ta != nil && tb != nil; ta, tb = ta.Down, tb.Down {
- if ta.Etype != TFIELD || tb.Etype != TFIELD {
- Fatalf("func struct missing field: %v %v", ta, tb)
- }
- if ta.Isddd != tb.Isddd || !eqtype1(ta.Type, tb.Type, &l) {
+ ta, ia := IterFields(f(t1))
+ tb, ib := IterFields(f(t2))
+ for ; ta != nil && tb != nil; ta, tb = ia.Next(), ib.Next() {
+ if ta.Isddd != tb.Isddd || !eqtype1(ta.Type, tb.Type, assumedEqual) {
return false
}
}
-
if ta != nil || tb != nil {
return false
}
}
-
- if t1 == nil && t2 == nil {
- return true
- }
- return false
+ return true
case TARRAY:
if t1.Bound != t2.Bound {
}
}
- if eqtype1(t1.Down, t2.Down, &l) && eqtype1(t1.Type, t2.Type, &l) {
- return true
- }
- return false
+ return eqtype1(t1.Down, t2.Down, assumedEqual) && eqtype1(t1.Type, t2.Type, assumedEqual)
}
// Are t1 and t2 equal struct types when field names are ignored?
// substArgTypes substitutes the given list of types for
// successive occurrences of the "any" placeholder in the
// type syntax expression n.Type.
-func substArgTypes(n *Node, types ...*Type) {
+func substArgTypes(np **Node, types ...*Type) {
+ n := Nod(0, nil, nil)
+ *n = **np
+ *np = n
+
for _, t := range types {
dowidth(t)
}
+ n.Type = deep(n.Type)
substAny(&n.Type, &types)
if len(types) > 0 {
Fatalf("substArgTypes: too many argument types")
continue
case TFUNC:
- substAny(&t.Type, types)
- substAny(&t.Type.Down.Down, types)
- substAny(&t.Type.Down, types)
+ substAny(t.RecvP(), types)
+ substAny(t.ParamsP(), types)
+ substAny(t.ResultsP(), types)
case TSTRUCT:
for t = t.Type; t != nil; t = t.Down {
case TFUNC:
nt = shallow(t)
- nt.Type = deep(t.Type)
- nt.Type.Down = deep(t.Type.Down)
- nt.Type.Down.Down = deep(t.Type.Down.Down)
+ *nt.RecvP() = deep(t.Recv())
+ *nt.ResultsP() = deep(t.Results())
+ *nt.ParamsP() = deep(t.Params())
case TSTRUCT:
nt = shallow(t)
return nt
}
-func syslook(name string, copy int) *Node {
+func syslook(name string) *Node {
s := Pkglookup(name, Runtimepkg)
if s == nil || s.Def == nil {
Fatalf("syslook: can't find runtime.%s", name)
}
-
- if copy == 0 {
- return s.Def
- }
-
- n := Nod(0, nil, nil)
- *n = *s.Def
- n.Type = deep(s.Def.Type)
-
- return n
+ return s.Def
}
// compute a hash value for type t.
}
switch n.Op {
case ONAME:
- fmt.Printf("%v %v G%d %v width=%d\n", Oconv(int(n.Op), 0), n.Sym, n.Name.Vargen, n.Type, w)
+ fmt.Printf("%v %v G%d %v width=%d\n", Oconv(n.Op, 0), n.Sym, n.Name.Vargen, n.Type, w)
case OTYPE:
- fmt.Printf("%v %v width=%d\n", Oconv(int(n.Op), 0), n.Type, w)
+ fmt.Printf("%v %v width=%d\n", Oconv(n.Op, 0), n.Type, w)
}
}
var ul int
var ur int
- if n.Ninit != nil {
+ if n.Ninit.Len() != 0 {
ul = UINF
goto out
}
}
s := fmt_
- Yyerror("illegal types for operand: %v%s", Oconv(int(op), 0), s)
-}
-
-// iterator to walk a structure declaration
-func Structfirst(s *Iter, nn **Type) *Type {
- var t *Type
-
- n := *nn
- if n == nil {
- goto bad
- }
-
- switch n.Etype {
- default:
- goto bad
-
- case TSTRUCT, TINTER, TFUNC:
- break
- }
-
- t = n.Type
- if t == nil {
- return nil
- }
-
- if t.Etype != TFIELD {
- Fatalf("structfirst: not field %v", t)
- }
-
- s.T = t
- return t
-
-bad:
- Fatalf("structfirst: not struct %v", n)
-
- return nil
-}
-
-func structnext(s *Iter) *Type {
- n := s.T
- t := n.Down
- if t == nil {
- return nil
- }
-
- if t.Etype != TFIELD {
- Fatalf("structnext: not struct %v", n)
-
- return nil
- }
-
- s.T = t
- return t
-}
-
-// iterator to this and inargs in a function
-func funcfirst(s *Iter, t *Type) *Type {
- var fp *Type
-
- if t == nil {
- goto bad
- }
-
- if t.Etype != TFUNC {
- goto bad
- }
-
- s.Tfunc = t
- s.Done = 0
- fp = Structfirst(s, getthis(t))
- if fp == nil {
- s.Done = 1
- fp = Structfirst(s, getinarg(t))
- }
-
- return fp
-
-bad:
- Fatalf("funcfirst: not func %v", t)
- return nil
-}
-
-func funcnext(s *Iter) *Type {
- fp := structnext(s)
- if fp == nil && s.Done == 0 {
- s.Done = 1
- fp = Structfirst(s, getinarg(s.Tfunc))
- }
-
- return fp
-}
-
-func getthis(t *Type) **Type {
- if t.Etype != TFUNC {
- Fatalf("getthis: not a func %v", t)
- }
- return &t.Type
-}
-
-func Getoutarg(t *Type) **Type {
- if t.Etype != TFUNC {
- Fatalf("getoutarg: not a func %v", t)
- }
- return &t.Type.Down
-}
-
-func getinarg(t *Type) **Type {
- if t.Etype != TFUNC {
- Fatalf("getinarg: not a func %v", t)
- }
- return &t.Type.Down.Down
-}
-
-func getthisx(t *Type) *Type {
- return *getthis(t)
-}
-
-func getoutargx(t *Type) *Type {
- return *Getoutarg(t)
-}
-
-func getinargx(t *Type) *Type {
- return *getinarg(t)
+ Yyerror("illegal types for operand: %v%s", Oconv(op, 0), s)
}
// Brcom returns !(op).
case OGE:
return OLT
}
- Fatalf("brcom: no com for %v\n", Oconv(int(op), 0))
+ Fatalf("brcom: no com for %v\n", Oconv(op, 0))
return op
}
case OGE:
return OLE
}
- Fatalf("brrev: no rev for %v\n", Oconv(int(op), 0))
+ Fatalf("brrev: no rev for %v\n", Oconv(op, 0))
return op
}
// return side effect-free n, appending side effects to init.
// result is assignable if n is.
-func safeexpr(n *Node, init **NodeList) *Node {
+func safeexpr(n *Node, init *Nodes) *Node {
if n == nil {
return nil
}
- if n.Ninit != nil {
- walkstmtlist(n.Ninit)
- *init = concat(*init, n.Ninit)
- n.Ninit = nil
+ if n.Ninit.Len() != 0 {
+ walkstmtlist(n.Ninit.Slice())
+ init.AppendNodes(&n.Ninit)
}
switch n.Op {
return cheapexpr(n, init)
}
-func copyexpr(n *Node, t *Type, init **NodeList) *Node {
+func copyexpr(n *Node, t *Type, init *Nodes) *Node {
l := temp(t)
a := Nod(OAS, l, n)
typecheck(&a, Etop)
walkexpr(&a, init)
- *init = list(*init, a)
+ init.Append(a)
return l
}
// return side-effect free and cheap n, appending side effects to init.
// result may not be assignable.
-func cheapexpr(n *Node, init **NodeList) *Node {
+func cheapexpr(n *Node, init *Nodes) *Node {
switch n.Op {
case ONAME, OLITERAL:
return n
}
// Given funarg struct list, return list of ODCLFIELD Node fn args.
-func structargs(tl **Type, mustname int) *NodeList {
- var savet Iter
- var a *Node
- var n *Node
- var buf string
-
- var args *NodeList
+func structargs(tl *Type, mustname bool) []*Node {
+ var args []*Node
gen := 0
- for t := Structfirst(&savet, tl); t != nil; t = structnext(&savet) {
- n = nil
- if mustname != 0 && (t.Sym == nil || t.Sym.Name == "_") {
+ for t, it := IterFields(tl); t != nil; t = it.Next() {
+ var n *Node
+ if mustname && (t.Sym == nil || t.Sym.Name == "_") {
// invent a name so that we can refer to it in the trampoline
- buf = fmt.Sprintf(".anon%d", gen)
+ buf := fmt.Sprintf(".anon%d", gen)
gen++
-
n = newname(Lookup(buf))
} else if t.Sym != nil {
n = newname(t.Sym)
}
- a = Nod(ODCLFIELD, n, typenod(t.Type))
+ a := Nod(ODCLFIELD, n, typenod(t.Type))
a.Isddd = t.Isddd
if n != nil {
n.Isddd = t.Isddd
}
- args = list(args, a)
+ args = append(args, a)
}
return args
this := Nod(ODCLFIELD, newname(Lookup(".this")), typenod(rcvr))
this.Left.Name.Param.Ntype = this.Right
- in := structargs(getinarg(method.Type), 1)
- out := structargs(Getoutarg(method.Type), 0)
+ in := structargs(method.Type.Params(), true)
+ out := structargs(method.Type.Results(), false)
t := Nod(OTFUNC, nil, nil)
- l := list1(this)
+ l := []*Node{this}
if iface != 0 && rcvr.Width < Types[Tptr].Width {
// Building method for interface table and receiver
// is smaller than the single pointer-sized word
tpad.Type = Types[TUINT8]
tpad.Bound = Types[Tptr].Width - rcvr.Width
pad := Nod(ODCLFIELD, newname(Lookup(".pad")), typenod(tpad))
- l = list(l, pad)
+ l = append(l, pad)
}
- t.List = concat(l, in)
- t.Rlist = out
+ t.List.Set(append(l, in...))
+ t.Rlist.Set(out)
fn := Nod(ODCLFUNC, nil, nil)
fn.Func.Nname = newname(newnam)
funchdr(fn)
// arg list
- var args *NodeList
+ var args []*Node
isddd := false
- for l := in; l != nil; l = l.Next {
- args = list(args, l.N.Left)
- isddd = l.N.Left.Isddd
+ for _, n := range in {
+ args = append(args, n.Left)
+ isddd = n.Left.Isddd
}
- methodrcvr := getthisx(method.Type).Type.Type
+ methodrcvr := method.Type.Recv().Type.Type
// generate nil pointer check for better error
if Isptr[rcvr.Etype] && rcvr.Type == methodrcvr {
// these strings are already in the reflect tables,
// so no space cost to use them here.
- var l *NodeList
+ var l []*Node
var v Val
v.U = rcvr.Type.Sym.Pkg.Name // package name
- l = list(l, nodlit(v))
+ l = append(l, nodlit(v))
v.U = rcvr.Type.Sym.Name // type name
- l = list(l, nodlit(v))
+ l = append(l, nodlit(v))
v.U = method.Sym.Name
- l = list(l, nodlit(v)) // method name
- call := Nod(OCALL, syslook("panicwrap", 0), nil)
- call.List = l
+ l = append(l, nodlit(v)) // method name
+ call := Nod(OCALL, syslook("panicwrap"), nil)
+ call.List.Set(l)
n.Nbody.Set([]*Node{call})
fn.Nbody.Append(n)
}
} else {
fn.Func.Wrapper = true // ignore frame for panic+recover matching
call := Nod(OCALL, dot, nil)
- call.List = args
+ call.List.Set(args)
call.Isddd = isddd
if method.Type.Outtuple > 0 {
n := Nod(ORETURN, nil, nil)
- n.List = list1(call)
+ n.List.Set([]*Node{call})
call = n
}
}
if false && Debug['r'] != 0 {
- dumpslice("genwrapper body", fn.Nbody.Slice())
+ dumplist("genwrapper body", fn.Nbody)
}
funcbody(fn)
fn.Func.Dupok = true
}
typecheck(&fn, Etop)
- typecheckslice(fn.Nbody.Slice(), Etop)
+ typechecklist(fn.Nbody.Slice(), Etop)
inlcalls(fn)
escAnalyze([]*Node{fn}, false)
n := newname(sym)
n.Class = PFUNC
tfn := Nod(OTFUNC, nil, nil)
- tfn.List = list(tfn.List, Nod(ODCLFIELD, nil, typenod(Ptrto(t))))
- tfn.List = list(tfn.List, Nod(ODCLFIELD, nil, typenod(Types[TUINTPTR])))
- tfn.List = list(tfn.List, Nod(ODCLFIELD, nil, typenod(Types[TUINTPTR])))
- tfn.Rlist = list(tfn.Rlist, Nod(ODCLFIELD, nil, typenod(Types[TUINTPTR])))
+ tfn.List.Append(Nod(ODCLFIELD, nil, typenod(Ptrto(t))))
+ tfn.List.Append(Nod(ODCLFIELD, nil, typenod(Types[TUINTPTR])))
+ tfn.List.Append(Nod(ODCLFIELD, nil, typenod(Types[TUINTPTR])))
+ tfn.Rlist.Append(Nod(ODCLFIELD, nil, typenod(Types[TUINTPTR])))
typecheck(&tfn, Etype)
n.Type = tfn.Type
return n
// if pointer receiver in method,
// the method does not exist for value types.
- rcvr = getthisx(tm.Type).Type.Type
+ rcvr = tm.Type.Recv().Type.Type
if Isptr[rcvr.Etype] && !Isptr[t0.Etype] && !followptr && !isifacemethod(tm.Type) {
if false && Debug['r'] != 0 {
return et
}
-func listtreecopy(l *NodeList, lineno int32) *NodeList {
- var out *NodeList
- for ; l != nil; l = l.Next {
- out = list(out, treecopy(l.N, lineno))
+func listtreecopy(l []*Node, lineno int32) []*Node {
+ var out []*Node
+ for _, n := range l {
+ out = append(out, treecopy(n, lineno))
}
return out
}
-func liststmt(l *NodeList) *Node {
+func liststmt(l []*Node) *Node {
n := Nod(OBLOCK, nil, nil)
- n.List = l
- if l != nil {
- n.Lineno = l.N.Lineno
+ n.List.Set(l)
+ if nodeSeqLen(l) != 0 {
+ n.Lineno = nodeSeqFirst(l).Lineno
}
return n
}
-func liststmtslice(l []*Node) *Node {
- var ll *NodeList
- for _, n := range l {
- ll = list(ll, n)
- }
- return liststmt(ll)
-}
-
// return nelem of list
func structcount(t *Type) int {
- var s Iter
-
v := 0
- for t = Structfirst(&s, &t); t != nil; t = structnext(&s) {
+ for t, it := IterFields(t); t != nil; t = it.Next() {
v++
}
return v
return p
}
-func addinit(np **Node, init *NodeList) {
- if init == nil {
+func addinit(np **Node, init []*Node) {
+ if nodeSeqLen(init) == 0 {
return
}
*np = n
}
- n.Ninit = concat(init, n.Ninit)
+ n.Ninit.Set(append(nodeSeqSlice(init), n.Ninit.Slice()...))
n.Ullman = UINF
}
-func addinitslice(np **Node, init []*Node) {
- var l *NodeList
- for _, n := range init {
- l = list(l, n)
- }
- addinit(np, l)
-}
-
var reservedimports = []string{
"go",
"type",
return true
}
- if unicode.IsSpace(rune(r)) {
+ if unicode.IsSpace(r) {
Yyerror("import path contains space character: %q", path)
return true
}
return false
}
-func checknil(x *Node, init **NodeList) {
+func checknil(x *Node, init *Nodes) {
if Isinter(x.Type) {
x = Nod(OITAB, x, nil)
typecheck(&x, Erv)
n := Nod(OCHECKNIL, x, nil)
n.Typecheck = 1
- *init = list(*init, n)
+ init.Append(n)
}
// Can this type be stored directly in an interface word?
return false
}
-// type2IET returns "T" if t is a concrete type,
-// "I" if t is an interface type, and "E" if t is an empty interface type.
+// iet returns 'T' if t is a concrete type,
+// 'I' if t is an interface type, and 'E' if t is an empty interface type.
// It is used to build calls to the conv* and assert* runtime routines.
-func type2IET(t *Type) string {
+func (t *Type) iet() byte {
if isnilinter(t) {
- return "E"
+ return 'E'
}
if Isinter(t) {
- return "I"
+ return 'I'
}
- return "T"
+ return 'T'
}
// typecheckswitch typechecks a switch statement.
func typecheckswitch(n *Node) {
- lno := int(lineno)
- typechecklist(n.Ninit, Etop)
+ lno := lineno
+ typechecklist(n.Ninit.Slice(), Etop)
var nilonly string
var top int
n.Type = t
var def *Node
- var ll *NodeList
- for l := n.List; l != nil; l = l.Next {
- ncase := l.N
+ for _, ncase := range n.List.Slice() {
setlineno(n)
- if ncase.List == nil {
+ if ncase.List.Len() == 0 {
// default
if def != nil {
Yyerror("multiple defaults in switch (first at %v)", def.Line())
def = ncase
}
} else {
- for ll = ncase.List; ll != nil; ll = ll.Next {
- setlineno(ll.N)
- typecheck(&ll.N, Erv|Etype)
- if ll.N.Type == nil || t == nil {
+ for i1, n1 := range ncase.List.Slice() {
+ setlineno(n1)
+ typecheck(&ncase.List.Slice()[i1], Erv|Etype)
+ if ncase.List.Slice()[i1].Type == nil || t == nil {
continue
}
setlineno(ncase)
switch top {
// expression switch
case Erv:
- defaultlit(&ll.N, t)
+ defaultlit(&ncase.List.Slice()[i1], t)
switch {
- case ll.N.Op == OTYPE:
- Yyerror("type %v is not an expression", ll.N.Type)
- case ll.N.Type != nil && assignop(ll.N.Type, t, nil) == 0 && assignop(t, ll.N.Type, nil) == 0:
+ case ncase.List.Slice()[i1].Op == OTYPE:
+ Yyerror("type %v is not an expression", ncase.List.Slice()[i1].Type)
+ case ncase.List.Slice()[i1].Type != nil && assignop(ncase.List.Slice()[i1].Type, t, nil) == 0 && assignop(t, ncase.List.Slice()[i1].Type, nil) == 0:
if n.Left != nil {
- Yyerror("invalid case %v in switch on %v (mismatched types %v and %v)", ll.N, n.Left, ll.N.Type, t)
+ Yyerror("invalid case %v in switch on %v (mismatched types %v and %v)", ncase.List.Slice()[i1], n.Left, ncase.List.Slice()[i1].Type, t)
} else {
- Yyerror("invalid case %v in switch (mismatched types %v and bool)", ll.N, ll.N.Type)
+ Yyerror("invalid case %v in switch (mismatched types %v and bool)", ncase.List.Slice()[i1], ncase.List.Slice()[i1].Type)
}
- case nilonly != "" && !isnil(ll.N):
- Yyerror("invalid case %v in switch (can only compare %s %v to nil)", ll.N, nilonly, n.Left)
- case Isinter(t) && !Isinter(ll.N.Type) && algtype1(ll.N.Type, nil) == ANOEQ:
- Yyerror("invalid case %v in switch (incomparable type)", Nconv(ll.N, obj.FmtLong))
+ case nilonly != "" && !isnil(ncase.List.Slice()[i1]):
+ Yyerror("invalid case %v in switch (can only compare %s %v to nil)", ncase.List.Slice()[i1], nilonly, n.Left)
+ case Isinter(t) && !Isinter(ncase.List.Slice()[i1].Type) && algtype1(ncase.List.Slice()[i1].Type, nil) == ANOEQ:
+ Yyerror("invalid case %v in switch (incomparable type)", Nconv(ncase.List.Slice()[i1], obj.FmtLong))
}
// type switch
var missing, have *Type
var ptr int
switch {
- case ll.N.Op == OLITERAL && Istype(ll.N.Type, TNIL):
- case ll.N.Op != OTYPE && ll.N.Type != nil: // should this be ||?
- Yyerror("%v is not a type", Nconv(ll.N, obj.FmtLong))
+ case ncase.List.Slice()[i1].Op == OLITERAL && Istype(ncase.List.Slice()[i1].Type, TNIL):
+ case ncase.List.Slice()[i1].Op != OTYPE && ncase.List.Slice()[i1].Type != nil: // should this be ||?
+ Yyerror("%v is not a type", Nconv(ncase.List.Slice()[i1], obj.FmtLong))
// reset to original type
- ll.N = n.Left.Right
- case ll.N.Type.Etype != TINTER && t.Etype == TINTER && !implements(ll.N.Type, t, &missing, &have, &ptr):
+ ncase.List.Slice()[i1] = n.Left.Right
+ case ncase.List.Slice()[i1].Type.Etype != TINTER && t.Etype == TINTER && !implements(ncase.List.Slice()[i1].Type, t, &missing, &have, &ptr):
if have != nil && !missing.Broke && !have.Broke {
- Yyerror("impossible type switch case: %v cannot have dynamic type %v"+" (wrong type for %v method)\n\thave %v%v\n\twant %v%v", Nconv(n.Left.Right, obj.FmtLong), ll.N.Type, missing.Sym, have.Sym, Tconv(have.Type, obj.FmtShort), missing.Sym, Tconv(missing.Type, obj.FmtShort))
+ Yyerror("impossible type switch case: %v cannot have dynamic type %v"+" (wrong type for %v method)\n\thave %v%v\n\twant %v%v", Nconv(n.Left.Right, obj.FmtLong), ncase.List.Slice()[i1].Type, missing.Sym, have.Sym, Tconv(have.Type, obj.FmtShort), missing.Sym, Tconv(missing.Type, obj.FmtShort))
} else if !missing.Broke {
- Yyerror("impossible type switch case: %v cannot have dynamic type %v"+" (missing %v method)", Nconv(n.Left.Right, obj.FmtLong), ll.N.Type, missing.Sym)
+ Yyerror("impossible type switch case: %v cannot have dynamic type %v"+" (missing %v method)", Nconv(n.Left.Right, obj.FmtLong), ncase.List.Slice()[i1].Type, missing.Sym)
}
}
}
}
if top == Etype && n.Type != nil {
- ll = ncase.List
- if ncase.Rlist != nil {
- nvar := ncase.Rlist.N
- if ll != nil && ll.Next == nil && ll.N.Type != nil && !Istype(ll.N.Type, TNIL) {
+ ll := ncase.List
+ if ncase.Rlist.Len() != 0 {
+ nvar := ncase.Rlist.First()
+ if ll.Len() == 1 && ll.First().Type != nil && !Istype(ll.First().Type, TNIL) {
// single entry type switch
- nvar.Name.Param.Ntype = typenod(ll.N.Type)
+ nvar.Name.Param.Ntype = typenod(ll.First().Type)
} else {
// multiple entry type switch or default
nvar.Name.Param.Ntype = typenod(n.Type)
}
typecheck(&nvar, Erv|Easgn)
- ncase.Rlist.N = nvar
+ rit := nodeSeqIterate(ncase.Rlist)
+ *rit.P() = nvar
}
}
- typecheckslice(ncase.Nbody.Slice(), Etop)
+ typechecklist(ncase.Nbody.Slice(), Etop)
}
- lineno = int32(lno)
+ lineno = lno
}
// walkswitch walks a switch statement.
// enumerate the cases, and lop off the default case
cc := caseClauses(sw, s.kind)
- sw.List = nil
+ sw.List.Set(nil)
var def *Node
if len(cc) > 0 && cc[0].typ == caseKindDefault {
def = cc[0].node.Right
if nerrors == 0 {
cas = append(cas, def)
sw.Nbody.Set(append(cas, sw.Nbody.Slice()...))
- walkstmtslice(sw.Nbody.Slice())
+ walkstmtlist(sw.Nbody.Slice())
}
}
func (s *exprSwitch) walkCases(cc []*caseClause) *Node {
if len(cc) < binarySearchMin {
// linear search
- var cas *NodeList
+ var cas []*Node
for _, c := range cc {
n := c.node
- lno := int(setlineno(n))
+ lno := setlineno(n)
a := Nod(OIF, nil, nil)
if (s.kind != switchKindTrue && s.kind != switchKindFalse) || assignop(n.Left.Type, s.exprname.Type, nil) == OCONVIFACE || assignop(s.exprname.Type, n.Left.Type, nil) == OCONVIFACE {
}
a.Nbody.Set([]*Node{n.Right}) // goto l
- cas = list(cas, a)
- lineno = int32(lno)
+ cas = append(cas, a)
+ lineno = lno
}
return liststmt(cas)
}
}
typecheck(&a.Left, Erv)
a.Nbody.Set([]*Node{s.walkCases(cc[:half])})
- a.Rlist = list1(s.walkCases(cc[half:]))
+ a.Rlist.Set([]*Node{s.walkCases(cc[half:])})
return a
}
// It makes labels between cases and statements
// and deals with fallthrough, break, and unreachable statements.
func casebody(sw *Node, typeswvar *Node) {
- if sw.List == nil {
+ if sw.List.Len() == 0 {
return
}
lno := setlineno(sw)
- var cas *NodeList // cases
- var stat []*Node // statements
- var def *Node // defaults
+ var cas []*Node // cases
+ var stat []*Node // statements
+ var def *Node // defaults
br := Nod(OBREAK, nil, nil)
- for l := sw.List; l != nil; l = l.Next {
- n := l.N
+ for it := nodeSeqIterate(sw.List); !it.Done(); it.Next() {
+ n := it.N()
setlineno(n)
if n.Op != OXCASE {
- Fatalf("casebody %v", Oconv(int(n.Op), 0))
+ Fatalf("casebody %v", Oconv(n.Op, 0))
}
n.Op = OCASE
- needvar := count(n.List) != 1 || n.List.N.Op == OLITERAL
+ needvar := n.List.Len() != 1 || n.List.First().Op == OLITERAL
jmp := Nod(OGOTO, newCaseLabel(), nil)
- if n.List == nil {
+ if n.List.Len() == 0 {
if def != nil {
Yyerror("more than one default case")
}
def = n
}
- if n.List != nil && n.List.Next == nil {
+ if n.List.Len() == 1 {
// one case -- reuse OCASE node
- n.Left = n.List.N
+ n.Left = n.List.First()
n.Right = jmp
- n.List = nil
- cas = list(cas, n)
+ n.List.Set(nil)
+ cas = append(cas, n)
} else {
// expand multi-valued cases
- for lc := n.List; lc != nil; lc = lc.Next {
- cas = list(cas, Nod(OCASE, lc.N, jmp))
+ for _, n1 := range n.List.Slice() {
+ cas = append(cas, Nod(OCASE, n1, jmp))
}
}
stat = append(stat, Nod(OLABEL, jmp.Left, nil))
- if typeswvar != nil && needvar && n.Rlist != nil {
+ if typeswvar != nil && needvar && n.Rlist.Len() != 0 {
l := []*Node{
- Nod(ODCL, n.Rlist.N, nil),
- Nod(OAS, n.Rlist.N, typeswvar),
+ Nod(ODCL, n.Rlist.First(), nil),
+ Nod(OAS, n.Rlist.First(), typeswvar),
}
typecheckslice(l, Etop)
stat = append(stat, l...)
Yyerror("cannot fallthrough in type switch")
}
- if l.Next == nil {
+ if it.Len() <= 1 {
setlineno(last)
Yyerror("cannot fallthrough final case in switch")
}
stat = append(stat, br)
if def != nil {
- cas = list(cas, def)
+ cas = append(cas, def)
}
- sw.List = cas
+ sw.List.Set(cas)
sw.Nbody.Set(stat)
lineno = lno
}
// Kind is the kind of switch statement.
func caseClauses(sw *Node, kind int) []*caseClause {
var cc []*caseClause
- for l := sw.List; l != nil; l = l.Next {
- n := l.N
+ for _, n := range sw.List.Slice() {
c := new(caseClause)
cc = append(cc, c)
c.ordinal = len(cc)
break
}
if Eqtype(c1.node.Left.Type, c2.node.Left.Type) {
- yyerrorl(int(c2.node.Lineno), "duplicate case %v in type switch\n\tprevious case at %v", c2.node.Left.Type, c1.node.Line())
+ yyerrorl(c2.node.Lineno, "duplicate case %v in type switch\n\tprevious case at %v", c2.node.Left.Type, c1.node.Line())
}
}
}
sw.Left = nil
if cond == nil {
- sw.List = nil
+ sw.List.Set(nil)
return
}
if cond.Right == nil {
casebody(sw, s.facename)
cc := caseClauses(sw, switchKindType)
- sw.List = nil
+ sw.List.Set(nil)
var def *Node
if len(cc) > 0 && cc[0].typ == caseKindDefault {
def = cc[0].node.Right
i.Nbody.Set([]*Node{Nod(OGOTO, lbl, nil)})
// Wrap default case with label.
blk := Nod(OBLOCK, nil, nil)
- blk.List = list(list1(Nod(OLABEL, lbl, nil)), def)
+ blk.List.Set([]*Node{Nod(OLABEL, lbl, nil), def})
def = blk
}
typecheck(&i.Left, Erv)
ncase := 0
for i := 0; i < run; i++ {
ncase++
- hash := list1(cc[i].node.Right)
+ hash := []*Node{cc[i].node.Right}
for j := i + 1; j < run && cc[i].hash == cc[j].hash; j++ {
- hash = list(hash, cc[j].node.Right)
+ hash = append(hash, cc[j].node.Right)
}
cc[i].node.Right = liststmt(hash)
}
if nerrors == 0 {
cas = append(cas, def)
sw.Nbody.Set(append(cas, sw.Nbody.Slice()...))
- sw.List = nil
- walkstmtslice(sw.Nbody.Slice())
+ sw.List.Set(nil)
+ walkstmtlist(sw.Nbody.Slice())
}
}
// case body if the variable is of type t.
func (s *typeSwitch) typeone(t *Node) *Node {
var name *Node
- var init *NodeList
- if t.Rlist == nil {
+ var init []*Node
+ if t.Rlist.Len() == 0 {
name = nblank
typecheck(&nblank, Erv|Easgn)
} else {
- name = t.Rlist.N
- init = list1(Nod(ODCL, name, nil))
+ name = t.Rlist.First()
+ init = []*Node{Nod(ODCL, name, nil)}
a := Nod(OAS, name, nil)
typecheck(&a, Etop)
- init = list(init, a)
+ init = append(init, a)
}
a := Nod(OAS2, nil, nil)
- a.List = list(list1(name), s.okname) // name, ok =
+ a.List.Set([]*Node{name, s.okname}) // name, ok =
b := Nod(ODOTTYPE, s.facename, nil)
b.Type = t.Left.Type // interface.(type)
- a.Rlist = list1(b)
+ a.Rlist.Set([]*Node{b})
typecheck(&a, Etop)
- init = list(init, a)
+ init = append(init, a)
c := Nod(OIF, nil, nil)
c.Left = s.okname
c.Nbody.Set([]*Node{t.Right}) // if ok { goto l }
- return liststmt(list(init, c))
+ return liststmt(append(init, c))
}
// walkCases generates an AST implementing the cases in cc.
func (s *typeSwitch) walkCases(cc []*caseClause) *Node {
if len(cc) < binarySearchMin {
- var cas *NodeList
+ var cas []*Node
for _, c := range cc {
n := c.node
if c.typ != caseKindTypeConst {
a.Left = Nod(OEQ, s.hashname, Nodintconst(int64(c.hash)))
typecheck(&a.Left, Erv)
a.Nbody.Set([]*Node{n.Right})
- cas = list(cas, a)
+ cas = append(cas, a)
}
return liststmt(cas)
}
a.Left = Nod(OLE, s.hashname, Nodintconst(int64(cc[half-1].hash)))
typecheck(&a.Left, Erv)
a.Nbody.Set([]*Node{s.walkCases(cc[:half])})
- a.Rlist = list1(s.walkCases(cc[half:]))
+ a.Rlist.Set([]*Node{s.walkCases(cc[half:])})
return a
}
// Generic recursive walks should follow these fields.
Left *Node
Right *Node
- Ninit *NodeList
+ Ninit Nodes
Nbody Nodes
- List *NodeList
- Rlist *NodeList
+ List Nodes
+ Rlist Nodes
// most nodes
Type *Type
OSUB // Left - Right
OOR // Left | Right
OXOR // Left ^ Right
- OADDSTR // Left + Right (string addition)
+ OADDSTR // +{List} (string addition, list elements are strings)
OADDR // &Left
OANDAND // Left && Right
OAPPEND // append(List)
if n == nil {
return nil
}
- if n.Op == OBLOCK && n.Ninit == nil {
- // Flatten list and steal storage.
- // Poison pointer to catch errant uses.
- l := n.List
-
- n.List = nil
- return l
- }
-
l := new(NodeList)
l.N = n
l.End = l
return concat(l, list1(n))
}
-// listsort sorts *l in place according to the comparison function lt.
-// The algorithm expects lt(a, b) to be equivalent to a < b.
-// The algorithm is mergesort, so it is guaranteed to be O(n log n).
-func listsort(l **NodeList, lt func(*Node, *Node) bool) {
- if *l == nil || (*l).Next == nil {
- return
- }
-
- l1 := *l
- l2 := *l
- for {
- l2 = l2.Next
- if l2 == nil {
- break
- }
- l2 = l2.Next
- if l2 == nil {
- break
- }
- l1 = l1.Next
- }
-
- l2 = l1.Next
- l1.Next = nil
- l2.End = (*l).End
- (*l).End = l1
-
- l1 = *l
- listsort(&l1, lt)
- listsort(&l2, lt)
-
- if lt(l1.N, l2.N) {
- *l = l1
- } else {
- *l = l2
- l2 = l1
- l1 = *l
- }
-
- // now l1 == *l; and l1 < l2
-
- var le *NodeList
- for (l1 != nil) && (l2 != nil) {
- for (l1.Next != nil) && lt(l1.Next.N, l2.N) {
- l1 = l1.Next
- }
-
- // l1 is last one from l1 that is < l2
- le = l1.Next // le is the rest of l1, first one that is >= l2
- if le != nil {
- le.End = (*l).End
- }
-
- (*l).End = l1 // cut *l at l1
- *l = concat(*l, l2) // glue l2 to *l's tail
-
- l1 = l2 // l1 is the first element of *l that is < the new l2
- l2 = le // ... because l2 now is the old tail of l1
- }
-
- *l = concat(*l, l2) // any remainder
-}
-
// count returns the length of the list l.
func count(l *NodeList) int {
n := int64(0)
return *n.slice
}
+// Len returns the number of entries in Nodes.
+func (n *Nodes) Len() int {
+ if n.slice == nil {
+ return 0
+ }
+ return len(*n.slice)
+}
+
+// First returns the first element of Nodes.
+// It panics if Nodes has no elements.
+func (n *Nodes) First() *Node {
+ return (*n.slice)[0]
+}
+
+// Second returns the second element of Nodes.
+// It panics if Nodes has fewer than two elements.
+func (n *Nodes) Second() *Node {
+ return (*n.slice)[1]
+}
+
// NodeList returns the entries in Nodes as a NodeList.
// Changes to the NodeList entries (as in l.N = n) will *not* be
// reflected in the Nodes.
}
}
+// AppendNodes appends the contents of *n2 to n, then clears n2.
+func (n *Nodes) AppendNodes(n2 *Nodes) {
+ switch {
+ case n2.slice == nil:
+ case n.slice == nil:
+ n.slice = n2.slice
+ default:
+ *n.slice = append(*n.slice, *n2.slice...)
+ }
+ n2.slice = nil
+}
+
// SetToNodeList sets Nodes to the contents of a NodeList.
func (n *Nodes) SetToNodeList(l *NodeList) {
s := make([]*Node, 0, count(l))
}
}
}
+
+// nodesOrNodeList must be either type Nodes or type *NodeList, or, in
+// some cases, []*Node. It exists during the transition from NodeList
+// to Nodes only and then should be deleted. See nodeSeqIterate to
+// return an iterator from a nodesOrNodeList.
+type nodesOrNodeList interface{}
+
+// nodesOrNodeListPtr must be type *Nodes or type **NodeList, or, in
+// some cases, *[]*Node. It exists during the transition from NodeList
+// to Nodes only, and then should be deleted. See setNodeSeq to assign
+// to a generic value.
+type nodesOrNodeListPtr interface{}
+
+// nodeSeqIterator is an interface used to iterate over a sequence of nodes.
+// TODO(iant): Remove after conversion from NodeList to Nodes is complete.
+type nodeSeqIterator interface {
+ // Return whether iteration is complete.
+ Done() bool
+ // Advance to the next node.
+ Next()
+ // Return the current node.
+ N() *Node
+ // Return the address of the current node.
+ P() **Node
+ // Return the number of items remaining in the iteration.
+ Len() int
+ // Return the remaining items as a sequence.
+ // This will have the same type as that passed to nodeSeqIterate.
+ Seq() nodesOrNodeList
+}
+
+// nodeListIterator is a type that implements nodeSeqIterator using a
+// *NodeList.
+type nodeListIterator struct {
+ l *NodeList
+}
+
+func (nli *nodeListIterator) Done() bool {
+ return nli.l == nil
+}
+
+func (nli *nodeListIterator) Next() {
+ nli.l = nli.l.Next
+}
+
+func (nli *nodeListIterator) N() *Node {
+ return nli.l.N
+}
+
+func (nli *nodeListIterator) P() **Node {
+ return &nli.l.N
+}
+
+func (nli *nodeListIterator) Len() int {
+ return count(nli.l)
+}
+
+func (nli *nodeListIterator) Seq() nodesOrNodeList {
+ return nli.l
+}
+
+// nodesIterator implements nodeSeqIterator using a Nodes.
+type nodesIterator struct {
+ n Nodes
+ i int
+}
+
+func (ni *nodesIterator) Done() bool {
+ return ni.i >= len(ni.n.Slice())
+}
+
+func (ni *nodesIterator) Next() {
+ ni.i++
+}
+
+func (ni *nodesIterator) N() *Node {
+ return ni.n.Slice()[ni.i]
+}
+
+func (ni *nodesIterator) P() **Node {
+ return &ni.n.Slice()[ni.i]
+}
+
+func (ni *nodesIterator) Len() int {
+ return len(ni.n.Slice()[ni.i:])
+}
+
+func (ni *nodesIterator) Seq() nodesOrNodeList {
+ var r Nodes
+ r.Set(ni.n.Slice()[ni.i:])
+ return r
+}
+
+// nodeSeqIterate returns an iterator over a *NodeList, a Nodes,
+// a []*Node, or nil.
+func nodeSeqIterate(ns nodesOrNodeList) nodeSeqIterator {
+ switch ns := ns.(type) {
+ case *NodeList:
+ return &nodeListIterator{ns}
+ case Nodes:
+ return &nodesIterator{ns, 0}
+ case []*Node:
+ var r Nodes
+ r.Set(ns)
+ return &nodesIterator{r, 0}
+ case nil:
+ var r Nodes
+ return &nodesIterator{r, 0}
+ default:
+ panic("can't happen")
+ }
+}
+
+// nodeSeqLen returns the length of a *NodeList, a Nodes, a []*Node, or nil.
+func nodeSeqLen(ns nodesOrNodeList) int {
+ switch ns := ns.(type) {
+ case *NodeList:
+ return count(ns)
+ case Nodes:
+ return len(ns.Slice())
+ case []*Node:
+ return len(ns)
+ case nil:
+ return 0
+ default:
+ panic("can't happen")
+ }
+}
+
+// nodeSeqFirst returns the first element of a *NodeList, a Nodes,
+// or a []*Node. It panics if the sequence is empty.
+func nodeSeqFirst(ns nodesOrNodeList) *Node {
+ switch ns := ns.(type) {
+ case *NodeList:
+ return ns.N
+ case Nodes:
+ return ns.Slice()[0]
+ case []*Node:
+ return ns[0]
+ default:
+ panic("can't happen")
+ }
+}
+
+// nodeSeqSecond returns the second element of a *NodeList, a Nodes,
+// or a []*Node. It panics if the sequence has fewer than two elements.
+func nodeSeqSecond(ns nodesOrNodeList) *Node {
+ switch ns := ns.(type) {
+ case *NodeList:
+ return ns.Next.N
+ case Nodes:
+ return ns.Slice()[1]
+ case []*Node:
+ return ns[1]
+ default:
+ panic("can't happen")
+ }
+}
+
+// nodeSeqSlice returns a []*Node containing the contents of a
+// *NodeList, a Nodes, or a []*Node.
+// This is an interim function during the transition from NodeList to Nodes.
+// TODO(iant): Remove when transition is complete.
+func nodeSeqSlice(ns nodesOrNodeList) []*Node {
+ switch ns := ns.(type) {
+ case *NodeList:
+ var s []*Node
+ for l := ns; l != nil; l = l.Next {
+ s = append(s, l.N)
+ }
+ return s
+ case Nodes:
+ return ns.Slice()
+ case []*Node:
+ return ns
+ default:
+ panic("can't happen")
+ }
+}
+
+// setNodeSeq implements *a = b.
+// a must have type **NodeList, *Nodes, or *[]*Node.
+// b must have type *NodeList, Nodes, []*Node, or nil.
+// This is an interim function during the transition from NodeList to Nodes.
+// TODO(iant): Remove when transition is complete.
+func setNodeSeq(a nodesOrNodeListPtr, b nodesOrNodeList) {
+ if b == nil {
+ switch a := a.(type) {
+ case **NodeList:
+ *a = nil
+ case *Nodes:
+ a.Set(nil)
+ case *[]*Node:
+ *a = nil
+ default:
+ panic("can't happen")
+ }
+ return
+ }
+
+ // Simplify b to either *NodeList or []*Node.
+ if n, ok := b.(Nodes); ok {
+ b = n.Slice()
+ }
+
+ if l, ok := a.(**NodeList); ok {
+ switch b := b.(type) {
+ case *NodeList:
+ *l = b
+ case []*Node:
+ var ll *NodeList
+ for _, n := range b {
+ ll = list(ll, n)
+ }
+ *l = ll
+ default:
+ panic("can't happen")
+ }
+ } else {
+ var s []*Node
+ switch b := b.(type) {
+ case *NodeList:
+ for l := b; l != nil; l = l.Next {
+ s = append(s, l.N)
+ }
+ case []*Node:
+ s = b
+ default:
+ panic("can't happen")
+ }
+
+ switch a := a.(type) {
+ case *Nodes:
+ a.Set(s)
+ case *[]*Node:
+ *a = s
+ default:
+ panic("can't happen")
+ }
+ }
+}
+
+// setNodeSeqNode sets the node sequence a to the node n.
+// a must have type **NodeList, *Nodes, or *[]*Node.
+// This is an interim function during the transition from NodeList to Nodes.
+// TODO(iant): Remove when transition is complete.
+func setNodeSeqNode(a nodesOrNodeListPtr, n *Node) {
+ switch a := a.(type) {
+ case **NodeList:
+ *a = list1(n)
+ case *Nodes:
+ a.Set([]*Node{n})
+ case *[]*Node:
+ *a = []*Node{n}
+ default:
+ panic("can't happen")
+ }
+}
+
+// appendNodeSeq appends the node sequence b to the node sequence a.
+// a must have type **NodeList, *Nodes, or *[]*Node.
+// b must have type *NodeList, Nodes, or []*Node.
+// This is an interim function during the transition from NodeList to Nodes.
+// TODO(iant): Remove when transition is complete.
+func appendNodeSeq(a nodesOrNodeListPtr, b nodesOrNodeList) {
+ // Simplify b to either *NodeList or []*Node.
+ if n, ok := b.(Nodes); ok {
+ b = n.Slice()
+ }
+
+ if l, ok := a.(**NodeList); ok {
+ switch b := b.(type) {
+ case *NodeList:
+ *l = concat(*l, b)
+ case []*Node:
+ for _, n := range b {
+ *l = list(*l, n)
+ }
+ default:
+ panic("can't happen")
+ }
+ } else {
+ var s []*Node
+ switch a := a.(type) {
+ case *Nodes:
+ s = a.Slice()
+ case *[]*Node:
+ s = *a
+ default:
+ panic("can't happen")
+ }
+
+ switch b := b.(type) {
+ case *NodeList:
+ for l := b; l != nil; l = l.Next {
+ s = append(s, l.N)
+ }
+ case []*Node:
+ s = append(s, b...)
+ default:
+ panic("can't happen")
+ }
+
+ switch a := a.(type) {
+ case *Nodes:
+ a.Set(s)
+ case *[]*Node:
+ *a = s
+ default:
+ panic("can't happen")
+ }
+ }
+}
+
+// appendNodeSeqNode appends n to the node sequence a.
+// a must have type **NodeList, *Nodes, or *[]*Node.
+// This is an interim function during the transition from NodeList to Nodes.
+// TODO(iant): Remove when transition is complete.
+func appendNodeSeqNode(a nodesOrNodeListPtr, n *Node) {
+ switch a := a.(type) {
+ case **NodeList:
+ *a = list(*a, n)
+ case *Nodes:
+ a.Append(n)
+ case *[]*Node:
+ *a = append(*a, n)
+ default:
+ panic("can't happen")
+ }
+}
y = 0x0fffFFFF
)
+//go:noinline
+func parseLE64(b []byte) uint64 {
+ // skip the first two bytes, and parse the remaining 8 as a uint64
+ return uint64(b[2]) | uint64(b[3])<<8 | uint64(b[4])<<16 | uint64(b[5])<<24 |
+ uint64(b[6])<<32 | uint64(b[7])<<40 | uint64(b[8])<<48 | uint64(b[9])<<56
+}
+
+//go:noinline
+func parseLE32(b []byte) uint32 {
+ return uint32(b[2]) | uint32(b[3])<<8 | uint32(b[4])<<16 | uint32(b[5])<<24
+}
+
+//go:noinline
+func parseLE16(b []byte) uint16 {
+ return uint16(b[2]) | uint16(b[3])<<8
+}
+
+// testLoadCombine tests for issue #14694 where load combining didn't respect the pointer offset.
+func testLoadCombine() {
+ testData := []byte{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09}
+ if want, got := uint64(0x0908070605040302), parseLE64(testData); want != got {
+ println("testLargeConst add failed, wanted", want, "got", got)
+ failed = true
+ }
+ if want, got := uint32(0x05040302), parseLE32(testData); want != got {
+ println("testLargeConst add failed, wanted", want, "got", got)
+ failed = true
+ }
+ if want, got := uint16(0x0302), parseLE16(testData); want != got {
+ println("testLargeConst add failed, wanted", want, "got", got)
+ failed = true
+ }
+}
+
//go:noinline
func invalidAdd_ssa(x uint32) uint32 {
return x + y + y + y + y + y + y + y + y + y + y + y + y + y + y + y + y + y
testArithConstShift()
testArithRshConst()
testLargeConst()
+ testLoadCombine()
if failed {
panic("failed")
"fmt"
)
+// EType describes a kind of type.
+type EType uint8
+
+const (
+ Txxx = iota
+
+ TINT8
+ TUINT8
+ TINT16
+ TUINT16
+ TINT32
+ TUINT32
+ TINT64
+ TUINT64
+ TINT
+ TUINT
+ TUINTPTR
+
+ TCOMPLEX64
+ TCOMPLEX128
+
+ TFLOAT32
+ TFLOAT64
+
+ TBOOL
+
+ TPTR32
+ TPTR64
+
+ TFUNC
+ TARRAY
+ T_old_DARRAY // Doesn't seem to be used in existing code. Used now for Isddd export (see bexport.go). TODO(gri) rename.
+ TSTRUCT
+ TCHAN
+ TMAP
+ TINTER
+ TFORW
+ TFIELD
+ TANY
+ TSTRING
+ TUNSAFEPTR
+
+ // pseudo-types for literals
+ TIDEAL
+ TNIL
+ TBLANK
+
+ // pseudo-type for frame layout
+ TFUNCARGS
+ TCHANARGS
+ TINTERMETH
+
+ NTYPE
+)
+
+// Types stores pointers to predeclared named types.
+//
+// It also stores pointers to several special types:
+// - Types[TANY] is the placeholder "any" type recognized by substArgTypes.
+// - Types[TBLANK] represents the blank variable's type.
+// - Types[TIDEAL] represents untyped numeric constants.
+// - Types[TNIL] represents the predeclared "nil" value's type.
+// - Types[TUNSAFEPTR] is package unsafe's Pointer type.
+var Types [NTYPE]*Type
+
+var (
+ // Predeclared alias types. Kept separate for better error messages.
+ bytetype *Type
+ runetype *Type
+
+ // Predeclared error interface type.
+ errortype *Type
+
+ // Types to represent untyped string and boolean constants.
+ idealstring *Type
+ idealbool *Type
+
+ // Types to represent untyped numeric constants.
+ // Note: Currently these are only used within the binary export
+ // data format. The rest of the compiler only uses Types[TIDEAL].
+ idealint = typ(TIDEAL)
+ idealrune = typ(TIDEAL)
+ idealfloat = typ(TIDEAL)
+ idealcomplex = typ(TIDEAL)
+)
+
+// A Type represents a Go type.
+type Type struct {
+ Etype EType
+ Nointerface bool
+ Noalg bool
+ Chan uint8
+ Trecur uint8 // to detect loops
+ Printed bool
+ Embedded uint8 // TFIELD embedded type
+ Funarg bool // on TSTRUCT and TFIELD
+ Copyany bool
+ Local bool // created in this file
+ Deferwidth bool
+ Broke bool // broken type definition.
+ Isddd bool // TFIELD is ... argument
+ Align uint8
+ Haspointers uint8 // 0 unknown, 1 no, 2 yes
+
+ Nod *Node // canonical OTYPE node
+ Orig *Type // original type (type literal or predefined type)
+ Lineno int32
+
+ // TFUNC
+ Thistuple int
+ Outtuple int
+ Intuple int
+ Outnamed bool
+
+ Method *Type
+ Xmethod *Type
+
+ Sym *Sym
+ Vargen int32 // unique name for OTYPE/ONAME
+
+ Nname *Node
+ Argwid int64
+
+ // most nodes
+ Type *Type // actual type for TFIELD, element type for TARRAY, TCHAN, TMAP, TPTRxx
+ Width int64 // offset in TFIELD, width in all others
+
+ // TFIELD
+ Down *Type // next struct field, also key type in TMAP
+ Outer *Type // outer struct
+ Note *string // literal string annotation
+
+ // TARRAY
+ Bound int64 // negative is slice
+
+ // TMAP
+ Bucket *Type // internal type representing a hash bucket
+ Hmap *Type // internal type representing a Hmap (map header object)
+ Hiter *Type // internal type representing hash iterator state
+ Map *Type // link from the above 3 internal types back to the map type.
+
+ Maplineno int32 // first use of TFORW as map key
+ Embedlineno int32 // first use of TFORW as embedded type
+
+ // for TFORW, where to copy the eventual value to
+ Copyto []*Node
+
+ Lastfn *Node // for usefield
+}
+
+// Iter provides an abstraction for iterating across struct fields and
+// interface methods.
+type Iter struct {
+ x *Type
+}
+
+// IterFields returns the first field or method in struct or interface type t
+// and an Iter value to continue iterating across the rest.
+func IterFields(t *Type) (*Type, Iter) {
+ if t.Etype != TSTRUCT && t.Etype != TINTER {
+ Fatalf("IterFields: type %v does not have fields", t)
+ }
+ i := Iter{x: t.Type}
+ f := i.Next()
+ return f, i
+}
+
+// Next returns the next field or method, if any.
+func (i *Iter) Next() *Type {
+ if i.x == nil {
+ return nil
+ }
+ t := i.x
+ if t.Etype != TFIELD {
+ Fatalf("Iter.Next: type %v is not a field", t)
+ }
+ i.x = t.Down
+ return t
+}
+
+func (t *Type) wantEtype(et EType) {
+ if t.Etype != et {
+ Fatalf("want %v, but have %v", et, t)
+ }
+}
+
+func (t *Type) RecvP() **Type {
+ t.wantEtype(TFUNC)
+ return &t.Type
+}
+
+func (t *Type) ParamsP() **Type {
+ t.wantEtype(TFUNC)
+ return &t.Type.Down.Down
+}
+
+func (t *Type) ResultsP() **Type {
+ t.wantEtype(TFUNC)
+ return &t.Type.Down
+}
+
+func (t *Type) Recv() *Type { return *t.RecvP() }
+func (t *Type) Params() *Type { return *t.ParamsP() }
+func (t *Type) Results() *Type { return *t.ResultsP() }
+
+// recvParamsResults stores the accessor functions for a function Type's
+// receiver, parameters, and result parameters, in that order.
+// It can be used to iterate over all of a function's parameter lists.
+var recvParamsResults = [3]func(*Type) *Type{
+ (*Type).Recv, (*Type).Params, (*Type).Results,
+}
+
func (t *Type) Size() int64 {
dowidth(t)
return t.Width
fallthrough
case TINTER:
- t1 := t.Type
- x1 := x.Type
- for ; t1 != nil && x1 != nil; t1, x1 = t1.Down, x1.Down {
+ t1, ti := IterFields(t)
+ x1, xi := IterFields(x)
+ for ; t1 != nil && x1 != nil; t1, x1 = ti.Next(), xi.Next() {
if t1.Embedded != x1.Embedded {
- if t1.Embedded < x1.Embedded {
- return ssa.CMPlt
- }
- return ssa.CMPgt
+ return cmpForNe(t1.Embedded < x1.Embedded)
}
if t1.Note != x1.Note {
if t1.Note == nil {
return ssa.CMPgt
}
if *t1.Note != *x1.Note {
- if *t1.Note < *x1.Note {
- return ssa.CMPlt
- }
- return ssa.CMPgt
+ return cmpForNe(*t1.Note < *x1.Note)
}
}
- c := t1.Sym.cmpsym(x1.Sym)
- if c != ssa.CMPeq {
+ if c := t1.Sym.cmpsym(x1.Sym); c != ssa.CMPeq {
return c
}
- c = t1.Type.cmp(x1.Type)
- if c != ssa.CMPeq {
+ if c := t1.Type.cmp(x1.Type); c != ssa.CMPeq {
return c
}
}
- if t1 == x1 {
- return ssa.CMPeq
- }
- if t1 == nil {
- return ssa.CMPlt
+ if t1 != x1 {
+ return cmpForNe(t1 == nil)
}
- return ssa.CMPgt
+ return ssa.CMPeq
case TFUNC:
- t1 := t.Type
- t2 := x.Type
- for ; t1 != nil && t2 != nil; t1, t2 = t1.Down, t2.Down {
+ for _, f := range recvParamsResults {
// Loop over fields in structs, ignoring argument names.
- ta := t1.Type
- tb := t2.Type
- for ; ta != nil && tb != nil; ta, tb = ta.Down, tb.Down {
+ ta, ia := IterFields(f(t))
+ tb, ib := IterFields(f(x))
+ for ; ta != nil && tb != nil; ta, tb = ia.Next(), ib.Next() {
if ta.Isddd != tb.Isddd {
- if ta.Isddd {
- return ssa.CMPgt
- }
- return ssa.CMPlt
+ return cmpForNe(!ta.Isddd)
}
- c := ta.Type.cmp(tb.Type)
- if c != ssa.CMPeq {
+ if c := ta.Type.cmp(tb.Type); c != ssa.CMPeq {
return c
}
}
-
if ta != tb {
- if t1 == nil {
- return ssa.CMPlt
- }
- return ssa.CMPgt
- }
- }
- if t1 != t2 {
- if t1 == nil {
- return ssa.CMPlt
+ return cmpForNe(ta == nil)
}
- return ssa.CMPgt
}
return ssa.CMPeq
if t.Etype != TARRAY {
panic("NumElem on non-TARRAY")
}
- return int64(t.Bound)
+ return t.Bound
}
func (t *Type) IsMemory() bool { return false }
return n
}
-func typechecklist(l *NodeList, top int) {
- for ; l != nil; l = l.Next {
- typecheck(&l.N, top)
+func typechecklist(l []*Node, top int) {
+ for i := range l {
+ typecheck(&l[i], top)
}
}
return nil
}
- lno := int(setlineno(n))
+ lno := setlineno(n)
// Skip over parens.
for n.Op == OPAREN {
break
default:
- lineno = int32(lno)
+ lineno = lno
return n
}
}
break
}
sprint_depchain(&fmt_, typecheck_tcstack, n, n)
- yyerrorl(int(n.Lineno), "constant definition loop%s", fmt_)
+ yyerrorl(n.Lineno, "constant definition loop%s", fmt_)
}
if nsavederrors+nerrors == 0 {
Yyerror("typechecking loop involving %v%s", n, fmt_)
}
- lineno = int32(lno)
+ lineno = lno
return n
}
typecheck_tcstack[last] = nil
typecheck_tcstack = typecheck_tcstack[:last]
- lineno = int32(lno)
+ lineno = lno
return n
}
return true
}
- return callrecv(n.Left) || callrecv(n.Right) || callrecvlist(n.Ninit) || callrecvslice(n.Nbody.Slice()) || callrecvlist(n.List) || callrecvlist(n.Rlist)
+ return callrecv(n.Left) || callrecv(n.Right) || callrecvlist(n.Ninit) || callrecvlist(n.Nbody) || callrecvlist(n.List) || callrecvlist(n.Rlist)
}
-func callrecvlist(l *NodeList) bool {
- for ; l != nil; l = l.Next {
- if callrecv(l.N) {
- return true
- }
- }
- return false
-}
-
-func callrecvslice(l []*Node) bool {
- for _, n := range l {
+func callrecvlist(l Nodes) bool {
+ for _, n := range l.Slice() {
if callrecv(n) {
return true
}
default:
Dump("typecheck", n)
- Fatalf("typecheck %v", Oconv(int(n.Op), 0))
+ Fatalf("typecheck %v", Oconv(n.Op, 0))
// names
case OLITERAL:
case OTSTRUCT:
ok |= Etype
n.Op = OTYPE
- n.Type = tostruct(n.List)
+ n.Type = tostruct(n.List.Slice())
if n.Type == nil || n.Type.Broke {
n.Type = nil
return
}
- n.List = nil
+ n.List.Set(nil)
case OTINTER:
ok |= Etype
n.Op = OTYPE
- n.Type = tointerface(n.List)
+ n.Type = tointerface(n.List.Slice())
if n.Type == nil {
n.Type = nil
return
case OTFUNC:
ok |= Etype
n.Op = OTYPE
- n.Type = functype(n.Left, n.List, n.Rlist)
+ n.Type = functype(n.Left, n.List.Slice(), n.Rlist.Slice())
if n.Type == nil {
n.Type = nil
return
n.Type = nil
return
}
+ if n.Implicit && !okforarith[l.Type.Etype] {
+ Yyerror("invalid operation: %v (non-numeric type %v)", n, l.Type)
+ n.Type = nil
+ return
+ }
// TODO(marvin): Fix Node.EType type union.
op = Op(n.Etype)
} else {
aop = assignop(l.Type, r.Type, nil)
if aop != 0 {
if Isinter(r.Type) && !Isinter(l.Type) && algtype1(l.Type, nil) == ANOEQ {
- Yyerror("invalid operation: %v (operator %v not defined on %s)", n, Oconv(int(op), 0), typekind(l.Type))
+ Yyerror("invalid operation: %v (operator %v not defined on %s)", n, Oconv(op, 0), typekind(l.Type))
n.Type = nil
return
}
aop = assignop(r.Type, l.Type, nil)
if aop != 0 {
if Isinter(l.Type) && !Isinter(r.Type) && algtype1(r.Type, nil) == ANOEQ {
- Yyerror("invalid operation: %v (operator %v not defined on %s)", n, Oconv(int(op), 0), typekind(r.Type))
+ Yyerror("invalid operation: %v (operator %v not defined on %s)", n, Oconv(op, 0), typekind(r.Type))
n.Type = nil
return
}
if t.Etype != TIDEAL && !Eqtype(l.Type, r.Type) {
defaultlit2(&l, &r, true)
- if n.Op == OASOP && n.Implicit {
- Yyerror("invalid operation: %v (non-numeric type %v)", n, l.Type)
- n.Type = nil
- return
- }
-
if Isinter(r.Type) == Isinter(l.Type) || aop == 0 {
Yyerror("invalid operation: %v (mismatched types %v and %v)", n, l.Type, r.Type)
n.Type = nil
}
if !okfor[op][et] {
- Yyerror("invalid operation: %v (operator %v not defined on %s)", n, Oconv(int(op), 0), typekind(t))
+ Yyerror("invalid operation: %v (operator %v not defined on %s)", n, Oconv(op, 0), typekind(t))
n.Type = nil
return
}
n.Op = OADDSTR
if l.Op == OADDSTR {
- n.List = l.List
+ n.List.Set(l.List.Slice())
} else {
- n.List = list1(l)
+ n.List.Set([]*Node{l})
}
if r.Op == OADDSTR {
- n.List = concat(n.List, r.List)
+ n.List.AppendNodes(&r.List)
} else {
- n.List = list(n.List, r)
+ n.List.Append(r)
}
n.Left = nil
n.Right = nil
return
}
if !okfor[n.Op][t.Etype] {
- Yyerror("invalid operation: %v %v", Oconv(int(n.Op), 0), t)
+ Yyerror("invalid operation: %v %v", Oconv(n.Op, 0), t)
n.Type = nil
return
}
return
}
- if count(n.List) == 1 && !n.Isddd {
- typecheck(&n.List.N, Erv|Efnstruct)
+ if n.List.Len() == 1 && !n.Isddd {
+ it := nodeSeqIterate(n.List)
+ typecheck(it.P(), Erv|Efnstruct)
} else {
- typechecklist(n.List, Erv)
+ typechecklist(n.List.Slice(), Erv)
}
t := l.Type
if t == nil {
// information further down the call chain to know if we
// were testing a method receiver for unexported fields.
// It isn't necessary, so just do a sanity check.
- tp := getthisx(t).Type.Type
+ tp := t.Recv().Type.Type
if l.Left == nil || !Eqtype(l.Left.Type, tp) {
Fatalf("method receiver")
}
}
- typecheckaste(OCALL, n.Left, n.Isddd, getinargx(t), n.List, func() string { return fmt.Sprintf("argument to %v", n.Left) })
+ typecheckaste(OCALL, n.Left, n.Isddd, t.Params(), n.List, func() string { return fmt.Sprintf("argument to %v", n.Left) })
ok |= Etop
if t.Outtuple == 0 {
break OpSwitch
}
ok |= Erv
if t.Outtuple == 1 {
- t := getoutargx(l.Type).Type
+ t := l.Type.Results().Type
if t == nil {
n.Type = nil
return
break OpSwitch
}
- n.Type = getoutargx(l.Type)
+ n.Type = l.Type.Results()
break OpSwitch
case OCAP, OLEN, OREAL, OIMAG:
ok |= Erv
- if !onearg(n, "%v", Oconv(int(n.Op), 0)) {
+ if !onearg(n, "%v", Oconv(n.Op, 0)) {
n.Type = nil
return
}
break OpSwitch
badcall1:
- Yyerror("invalid argument %v for %v", Nconv(n.Left, obj.FmtLong), Oconv(int(n.Op), 0))
+ Yyerror("invalid argument %v for %v", Nconv(n.Left, obj.FmtLong), Oconv(n.Op, 0))
n.Type = nil
return
ok |= Erv
var r *Node
var l *Node
- if count(n.List) == 1 {
- typechecklist(n.List, Efnstruct)
- if n.List.N.Op != OCALLFUNC && n.List.N.Op != OCALLMETH {
+ if n.List.Len() == 1 {
+ typechecklist(n.List.Slice(), Efnstruct)
+ if n.List.First().Op != OCALLFUNC && n.List.First().Op != OCALLMETH {
Yyerror("invalid operation: complex expects two arguments")
n.Type = nil
return
}
- t := n.List.N.Left.Type
+ t := n.List.First().Left.Type
if t.Outtuple != 2 {
- Yyerror("invalid operation: complex expects two arguments, %v returns %d results", n.List.N, t.Outtuple)
+ Yyerror("invalid operation: complex expects two arguments, %v returns %d results", n.List.First(), t.Outtuple)
n.Type = nil
return
}
- t = n.List.N.Type.Type
+ t = n.List.First().Type.Type
l = t.Nname
r = t.Down.Nname
} else {
break OpSwitch
case OCLOSE:
- if !onearg(n, "%v", Oconv(int(n.Op), 0)) {
+ if !onearg(n, "%v", Oconv(n.Op, 0)) {
n.Type = nil
return
}
case ODELETE:
args := n.List
- if args == nil {
+ if args.Len() == 0 {
Yyerror("missing arguments to delete")
n.Type = nil
return
}
- if args.Next == nil {
+ if args.Len() == 1 {
Yyerror("missing second (key) argument to delete")
n.Type = nil
return
}
- if args.Next.Next != nil {
+ if args.Len() != 2 {
Yyerror("too many arguments to delete")
n.Type = nil
return
}
ok |= Etop
- typechecklist(args, Erv)
- l := args.N
- r := args.Next.N
+ typechecklist(args.Slice(), Erv)
+ l := args.First()
+ r := args.Second()
if l.Type != nil && l.Type.Etype != TMAP {
Yyerror("first argument to delete must be map; have %v", Tconv(l.Type, obj.FmtLong))
n.Type = nil
return
}
- args.Next.N = assignconv(r, l.Type.Down, "delete")
+ it := nodeSeqIterate(args)
+ it.Next()
+ *it.P() = assignconv(r, l.Type.Down, "delete")
break OpSwitch
case OAPPEND:
ok |= Erv
args := n.List
- if args == nil {
+ if args.Len() == 0 {
Yyerror("missing arguments to append")
n.Type = nil
return
}
- if count(args) == 1 && !n.Isddd {
- typecheck(&args.N, Erv|Efnstruct)
+ if args.Len() == 1 && !n.Isddd {
+ it := nodeSeqIterate(args)
+ typecheck(it.P(), Erv|Efnstruct)
} else {
- typechecklist(args, Erv)
+ typechecklist(args.Slice(), Erv)
}
- t := args.N.Type
+ t := args.First().Type
if t == nil {
n.Type = nil
return
n.Type = t
if !Isslice(t) {
- if Isconst(args.N, CTNIL) {
+ if Isconst(args.First(), CTNIL) {
Yyerror("first argument to append must be typed slice; have untyped nil")
n.Type = nil
return
}
if n.Isddd {
- if args.Next == nil {
+ if args.Len() == 1 {
Yyerror("cannot use ... on first argument to append")
n.Type = nil
return
}
- if args.Next.Next != nil {
+ if args.Len() != 2 {
Yyerror("too many arguments to append")
n.Type = nil
return
}
- if Istype(t.Type, TUINT8) && Istype(args.Next.N.Type, TSTRING) {
- defaultlit(&args.Next.N, Types[TSTRING])
+ if Istype(t.Type, TUINT8) && Istype(args.Second().Type, TSTRING) {
+ it := nodeSeqIterate(args)
+ it.Next()
+ defaultlit(it.P(), Types[TSTRING])
break OpSwitch
}
- args.Next.N = assignconv(args.Next.N, t.Orig, "append")
+ it := nodeSeqIterate(args)
+ it.Next()
+ *it.P() = assignconv(args.Second(), t.Orig, "append")
break OpSwitch
}
}
}
} else {
- for args = args.Next; args != nil; args = args.Next {
- if args.N.Type == nil {
+ it := nodeSeqIterate(args)
+ it.Next()
+ for ; !it.Done(); it.Next() {
+ if it.N().Type == nil {
continue
}
- args.N = assignconv(args.N, t.Type, "append")
+ *it.P() = assignconv(it.N(), t.Type, "append")
}
}
case OCOPY:
ok |= Etop | Erv
args := n.List
- if args == nil || args.Next == nil {
+ if args.Len() < 2 {
Yyerror("missing arguments to copy")
n.Type = nil
return
}
- if args.Next.Next != nil {
+ if args.Len() > 2 {
Yyerror("too many arguments to copy")
n.Type = nil
return
}
- n.Left = args.N
- n.Right = args.Next.N
- n.List = nil
+ n.Left = args.First()
+ n.Right = args.Second()
+ n.List.Set(nil)
n.Type = Types[TINT]
typecheck(&n.Left, Erv)
typecheck(&n.Right, Erv)
case OMAKE:
ok |= Erv
- args := n.List
- if args == nil {
+ args := nodeSeqIterate(n.List)
+ if args.Len() == 0 {
Yyerror("missing argument to make")
n.Type = nil
return
}
- n.List = nil
- l := args.N
- args = args.Next
+ n.List.Set(nil)
+ l := args.N()
+ args.Next()
typecheck(&l, Etype)
t := l.Type
if t == nil {
return
}
- if args == nil {
+ if args.Done() {
Yyerror("missing len argument to make(%v)", t)
n.Type = nil
return
}
- l = args.N
- args = args.Next
+ l = args.N()
+ args.Next()
typecheck(&l, Erv)
var r *Node
- if args != nil {
- r = args.N
- args = args.Next
+ if !args.Done() {
+ r = args.N()
+ args.Next()
typecheck(&r, Erv)
}
n.Op = OMAKESLICE
case TMAP:
- if args != nil {
- l = args.N
- args = args.Next
+ if !args.Done() {
+ l = args.N()
+ args.Next()
typecheck(&l, Erv)
defaultlit(&l, Types[TINT])
if l.Type == nil {
case TCHAN:
l = nil
- if args != nil {
- l = args.N
- args = args.Next
+ if !args.Done() {
+ l = args.N()
+ args.Next()
typecheck(&l, Erv)
defaultlit(&l, Types[TINT])
if l.Type == nil {
n.Op = OMAKECHAN
}
- if args != nil {
+ if !args.Done() {
Yyerror("too many arguments to make(%v)", t)
n.Op = OMAKE
n.Type = nil
case ONEW:
ok |= Erv
args := n.List
- if args == nil {
+ if args.Len() == 0 {
Yyerror("missing argument to new")
n.Type = nil
return
}
- l := args.N
+ l := args.First()
typecheck(&l, Etype)
t := l.Type
if t == nil {
n.Type = nil
return
}
- if args.Next != nil {
+ if args.Len() > 1 {
Yyerror("too many arguments to new(%v)", t)
n.Type = nil
return
case OPRINT, OPRINTN:
ok |= Etop
- typechecklist(n.List, Erv|Eindir) // Eindir: address does not escape
- for args := n.List; args != nil; args = args.Next {
+ typechecklist(n.List.Slice(), Erv|Eindir) // Eindir: address does not escape
+ for i1, n1 := range n.List.Slice() {
// Special case for print: int constant is int64, not int.
- if Isconst(args.N, CTINT) {
- defaultlit(&args.N, Types[TINT64])
+ if Isconst(n1, CTINT) {
+ defaultlit(&n.List.Slice()[i1], Types[TINT64])
} else {
- defaultlit(&args.N, nil)
+ defaultlit(&n.List.Slice()[i1], nil)
}
}
case ORECOVER:
ok |= Erv | Etop
- if n.List != nil {
+ if n.List.Len() != 0 {
Yyerror("too many arguments to recover")
n.Type = nil
return
case OFOR:
ok |= Etop
- typechecklist(n.Ninit, Etop)
+ typechecklist(n.Ninit.Slice(), Etop)
decldepth++
typecheck(&n.Left, Erv)
if n.Left != nil {
}
}
typecheck(&n.Right, Etop)
- typecheckslice(n.Nbody.Slice(), Etop)
+ typechecklist(n.Nbody.Slice(), Etop)
decldepth--
break OpSwitch
case OIF:
ok |= Etop
- typechecklist(n.Ninit, Etop)
+ typechecklist(n.Ninit.Slice(), Etop)
typecheck(&n.Left, Erv)
if n.Left != nil {
t := n.Left.Type
Yyerror("non-bool %v used as if condition", Nconv(n.Left, obj.FmtLong))
}
}
- typecheckslice(n.Nbody.Slice(), Etop)
- typechecklist(n.Rlist, Etop)
+ typechecklist(n.Nbody.Slice(), Etop)
+ typechecklist(n.Rlist.Slice(), Etop)
break OpSwitch
case ORETURN:
ok |= Etop
- if count(n.List) == 1 {
- typechecklist(n.List, Erv|Efnstruct)
+ if n.List.Len() == 1 {
+ typechecklist(n.List.Slice(), Erv|Efnstruct)
} else {
- typechecklist(n.List, Erv)
+ typechecklist(n.List.Slice(), Erv)
}
if Curfn == nil {
Yyerror("return outside function")
return
}
- if Curfn.Type.Outnamed && n.List == nil {
+ if Curfn.Type.Outnamed && n.List.Len() == 0 {
break OpSwitch
}
- typecheckaste(ORETURN, nil, false, getoutargx(Curfn.Type), n.List, func() string { return "return argument" })
+ typecheckaste(ORETURN, nil, false, Curfn.Type.Results(), n.List, func() string { return "return argument" })
break OpSwitch
case ORETJMP:
case OXCASE:
ok |= Etop
- typechecklist(n.List, Erv)
- typecheckslice(n.Nbody.Slice(), Etop)
+ typechecklist(n.List.Slice(), Erv)
+ typechecklist(n.Nbody.Slice(), Etop)
break OpSwitch
case ODCLFUNC:
if n.Left != nil {
return true
}
- if n.List == nil {
+ if n.List.Len() == 0 {
p := fmt.Sprintf(f, args...)
Yyerror("missing argument to %s: %v", p, n)
return false
}
- if n.List.Next != nil {
+ if n.List.Len() > 1 {
p := fmt.Sprintf(f, args...)
Yyerror("too many arguments to %s: %v", p, n)
- n.Left = n.List.N
- n.List = nil
+ n.Left = n.List.First()
+ n.List.Set(nil)
return false
}
- n.Left = n.List.N
- n.List = nil
+ n.Left = n.List.First()
+ n.List.Set(nil)
return true
}
if n.Left != nil {
return true
}
- if n.List == nil {
- Yyerror("missing argument to %v - %v", Oconv(int(n.Op), 0), n)
+ if n.List.Len() == 0 {
+ Yyerror("missing argument to %v - %v", Oconv(n.Op, 0), n)
return false
}
- n.Left = n.List.N
- if n.List.Next == nil {
- Yyerror("missing argument to %v - %v", Oconv(int(n.Op), 0), n)
- n.List = nil
+ n.Left = n.List.First()
+ if n.List.Len() == 1 {
+ Yyerror("missing argument to %v - %v", Oconv(n.Op, 0), n)
+ n.List.Set(nil)
return false
}
- if n.List.Next.Next != nil {
- Yyerror("too many arguments to %v - %v", Oconv(int(n.Op), 0), n)
- n.List = nil
+ if n.List.Len() > 2 {
+ Yyerror("too many arguments to %v - %v", Oconv(n.Op, 0), n)
+ n.List.Set(nil)
return false
}
- n.Right = n.List.Next.N
- n.List = nil
+ n.Right = n.List.Second()
+ n.List.Set(nil)
return true
}
}
// disallow T.m if m requires *T receiver
- if Isptr[getthisx(f2.Type).Type.Type.Etype] && !Isptr[t.Etype] && f2.Embedded != 2 && !isifacemethod(f2.Type) {
+ if Isptr[f2.Type.Recv().Type.Type.Etype] && !Isptr[t.Etype] && f2.Embedded != 2 && !isifacemethod(f2.Type) {
Yyerror("invalid method expression %v (needs pointer receiver: (*%v).%v)", n, t, Sconv(f2.Sym, obj.FmtShort))
return false
}
}
tt := n.Left.Type
dowidth(tt)
- rcvr := getthisx(f2.Type).Type.Type
+ rcvr := f2.Type.Recv().Type.Type
if !Eqtype(rcvr, tt) {
if rcvr.Etype == Tptr && Eqtype(rcvr.Type, tt) {
checklvalue(n.Left, "call pointer method on")
return nil
}
-func nokeys(l *NodeList) bool {
- for ; l != nil; l = l.Next {
- if l.N.Op == OKEY {
+func nokeys(l Nodes) bool {
+ for _, n := range l.Slice() {
+ if n.Op == OKEY {
return false
}
}
}
// typecheck assignment: type list = expression list
-func typecheckaste(op Op, call *Node, isddd bool, tstruct *Type, nl *NodeList, desc func() string) {
+func typecheckaste(op Op, call *Node, isddd bool, tstruct *Type, nl Nodes, desc func() string) {
var t *Type
var n *Node
var n1 int
var n2 int
+ var it nodeSeqIterator
- lno := int(lineno)
+ lno := lineno
if tstruct.Broke {
goto out
}
n = nil
- if nl != nil && nl.Next == nil {
- n = nl.N
+ if nl.Len() == 1 {
+ n = nl.First()
if n.Type != nil {
if n.Type.Etype == TSTRUCT && n.Type.Funarg {
if !hasddd(tstruct) {
}
n1 = downcount(tstruct)
- n2 = count(nl)
+ n2 = nl.Len()
if !hasddd(tstruct) {
if n2 > n1 {
goto toomany
}
}
+ it = nodeSeqIterate(nl)
for tl := tstruct.Type; tl != nil; tl = tl.Down {
t = tl.Type
if tl.Isddd {
if isddd {
- if nl == nil {
+ if it.Done() {
goto notenough
}
- if nl.Next != nil {
+ if it.Len() > 1 {
goto toomany
}
- n = nl.N
+ n = it.N()
setlineno(n)
if n.Type != nil {
- nl.N = assignconvfn(n, t, desc)
+ *it.P() = assignconvfn(n, t, desc)
}
goto out
}
- for ; nl != nil; nl = nl.Next {
- n = nl.N
- setlineno(nl.N)
+ for ; !it.Done(); it.Next() {
+ n = it.N()
+ setlineno(it.N())
if n.Type != nil {
- nl.N = assignconvfn(n, t.Type, desc)
+ *it.P() = assignconvfn(n, t.Type, desc)
}
}
goto out
}
- if nl == nil {
+ if it.Done() {
goto notenough
}
- n = nl.N
+ n = it.N()
setlineno(n)
if n.Type != nil {
- nl.N = assignconvfn(n, t, desc)
+ *it.P() = assignconvfn(n, t, desc)
}
- nl = nl.Next
+ it.Next()
}
- if nl != nil {
+ if !it.Done() {
goto toomany
}
if isddd {
if call != nil {
Yyerror("invalid use of ... in call to %v", call)
} else {
- Yyerror("invalid use of ... in %v", Oconv(int(op), 0))
+ Yyerror("invalid use of ... in %v", Oconv(op, 0))
}
}
out:
- lineno = int32(lno)
+ lineno = lno
return
notenough:
Yyerror("not enough arguments in call to %v", call)
}
} else {
- Yyerror("not enough arguments to %v", Oconv(int(op), 0))
+ Yyerror("not enough arguments to %v", Oconv(op, 0))
}
if n != nil {
n.Diag = 1
if call != nil {
Yyerror("too many arguments in call to %v", call)
} else {
- Yyerror("too many arguments to %v", Oconv(int(op), 0))
+ Yyerror("too many arguments to %v", Oconv(op, 0))
}
goto out
}
}()
if n.Right == nil {
- if n.List != nil {
- setlineno(n.List.N)
+ if n.List.Len() != 0 {
+ setlineno(n.List.First())
}
Yyerror("missing type in composite literal")
n.Type = nil
case TARRAY:
// Only allocate hash if there are some key/value pairs.
var hash map[int64]*Node
- for ll := n.List; ll != nil; ll = ll.Next {
- if ll.N.Op == OKEY {
+ for _, n1 := range n.List.Slice() {
+ if n1.Op == OKEY {
hash = make(map[int64]*Node)
break
}
}
length := int64(0)
i := 0
- for ll := n.List; ll != nil; ll = ll.Next {
- l := ll.N
+ for i2, n2 := range n.List.Slice() {
+ l := n2
setlineno(l)
if l.Op != OKEY {
l = Nod(OKEY, Nodintconst(int64(i)), l)
l.Left.Type = Types[TINT]
l.Left.Typecheck = 1
- ll.N = l
+ n.List.Slice()[i2] = l
}
typecheck(&l.Left, Erv)
case TMAP:
hash := make(map[uint32][]*Node)
var l *Node
- for ll := n.List; ll != nil; ll = ll.Next {
- l = ll.N
+ for i3, n3 := range n.List.Slice() {
+ l = n3
setlineno(l)
if l.Op != OKEY {
- typecheck(&ll.N, Erv)
+ typecheck(&n.List.Slice()[i3], Erv)
Yyerror("missing key in map literal")
continue
}
case TSTRUCT:
bad := 0
- if n.List != nil && nokeys(n.List) {
+ if n.List.Len() != 0 && nokeys(n.List) {
// simple list of variables
f := t.Type
var s *Sym
- for ll := n.List; ll != nil; ll = ll.Next {
- setlineno(ll.N)
- typecheck(&ll.N, Erv)
+ for i4, n4 := range n.List.Slice() {
+ setlineno(n4)
+ typecheck(&n.List.Slice()[i4], Erv)
if f == nil {
if bad == 0 {
Yyerror("too many values in struct initializer")
if s != nil && !exportname(s.Name) && s.Pkg != localpkg {
Yyerror("implicit assignment of unexported field '%s' in %v literal", s.Name, t)
}
-
// No pushtype allowed here. Must name fields for that.
- ll.N = assignconv(ll.N, f.Type, "field value")
-
- ll.N = Nod(OKEY, newname(f.Sym), ll.N)
- ll.N.Left.Type = f
- ll.N.Left.Typecheck = 1
+ n.List.Slice()[i4] = assignconv(n.List.Slice()[i4], f.Type, "field value")
+ n.List.Slice()[i4] = Nod(OKEY, newname(f.Sym), n.List.Slice()[i4])
+ n.List.Slice()[i4].Left.Type = f
+ n.List.Slice()[i4].Left.Typecheck = 1
f = f.Down
}
var f *Type
var l *Node
var s1 *Sym
- for ll := n.List; ll != nil; ll = ll.Next {
- l = ll.N
+ for i5, n5 := range n.List.Slice() {
+ l = n5
setlineno(l)
if l.Op != OKEY {
if bad == 0 {
Yyerror("mixture of field:value and value initializers")
}
bad++
- typecheck(&ll.N, Erv)
+ typecheck(&n.List.Slice()[i5], Erv)
continue
}
Yyerror("cannot assign to %v", n)
}
-func checkassignlist(stmt *Node, l *NodeList) {
- for ; l != nil; l = l.Next {
- checkassign(stmt, l.N)
+func checkassignlist(stmt *Node, l Nodes) {
+ for _, n := range l.Slice() {
+ checkassign(stmt, n)
}
}
}
func typecheckas2(n *Node) {
- for ll := n.List; ll != nil; ll = ll.Next {
+ for i1 := range n.List.Slice() {
// delicate little dance.
- ll.N = resolve(ll.N)
+ n.List.Slice()[i1] = resolve(n.List.Slice()[i1])
- if ll.N.Name == nil || ll.N.Name.Defn != n || ll.N.Name.Param.Ntype != nil {
- typecheck(&ll.N, Erv|Easgn)
+ if n.List.Slice()[i1].Name == nil || n.List.Slice()[i1].Name.Defn != n || n.List.Slice()[i1].Name.Param.Ntype != nil {
+ typecheck(&n.List.Slice()[i1], Erv|Easgn)
}
}
- cl := count(n.List)
- cr := count(n.Rlist)
+ cl := n.List.Len()
+ cr := n.Rlist.Len()
if cl > 1 && cr == 1 {
- typecheck(&n.Rlist.N, Erv|Efnstruct)
+ it := nodeSeqIterate(n.Rlist)
+ typecheck(it.P(), Erv|Efnstruct)
} else {
- typechecklist(n.Rlist, Erv)
+ typechecklist(n.Rlist.Slice(), Erv)
}
checkassignlist(n, n.List)
var r *Node
if cl == cr {
// easy
- ll := n.List
- lr := n.Rlist
- for ; ll != nil; ll, lr = ll.Next, lr.Next {
- if ll.N.Type != nil && lr.N.Type != nil {
- lr.N = assignconv(lr.N, ll.N.Type, "assignment")
+ lrit := nodeSeqIterate(n.Rlist)
+ for _, n2 := range n.List.Slice() {
+ if n2.Type != nil && lrit.N().Type != nil {
+ *lrit.P() = assignconv(lrit.N(), n2.Type, "assignment")
}
- if ll.N.Name != nil && ll.N.Name.Defn == n && ll.N.Name.Param.Ntype == nil {
- defaultlit(&lr.N, nil)
- ll.N.Type = lr.N.Type
+ if n2.Name != nil && n2.Name.Defn == n && n2.Name.Param.Ntype == nil {
+ defaultlit(lrit.P(), nil)
+ n2.Type = lrit.N().Type
}
+ lrit.Next()
}
goto out
}
- l = n.List.N
- r = n.Rlist.N
+ l = n.List.First()
+ r = n.Rlist.First()
// x,y,z = f()
if cr == 1 {
goto mismatch
}
n.Op = OAS2FUNC
- var s Iter
- t := Structfirst(&s, &r.Type)
- for ll := n.List; ll != nil; ll = ll.Next {
- if t.Type != nil && ll.N.Type != nil {
- checkassignto(t.Type, ll.N)
+ t, s := IterFields(r.Type)
+ for _, n3 := range n.List.Slice() {
+ if t.Type != nil && n3.Type != nil {
+ checkassignto(t.Type, n3)
}
- if ll.N.Name != nil && ll.N.Name.Defn == n && ll.N.Name.Param.Ntype == nil {
- ll.N.Type = t.Type
+ if n3.Name != nil && n3.Name.Defn == n && n3.Name.Param.Ntype == nil {
+ n3.Type = t.Type
}
- t = structnext(&s)
+ t = s.Next()
}
goto out
if l.Name != nil && l.Name.Defn == n {
l.Type = r.Type
}
- l := n.List.Next.N
+ l := n.List.Second()
if l.Type != nil && l.Type.Etype != TBOOL {
checkassignto(Types[TBOOL], l)
}
// second half of dance
out:
n.Typecheck = 1
-
- for ll := n.List; ll != nil; ll = ll.Next {
- if ll.N.Typecheck == 0 {
- typecheck(&ll.N, Erv|Easgn)
+ for i4, n4 := range n.List.Slice() {
+ if n4.Typecheck == 0 {
+ typecheck(&n.List.Slice()[i4], Erv|Easgn)
}
}
}
}
n.Type = t
t.Nname = n.Func.Nname
- rcvr := getthisx(t).Type
+ rcvr := t.Recv().Type
if rcvr != nil && n.Func.Shortname != nil {
addmethod(n.Func.Shortname.Sym, t, true, n.Func.Nname.Nointerface)
}
}
s := n.Left.Val().U.(string)
- var l *NodeList
+ var l []*Node
if n.Type.Type.Etype == TUINT8 {
// []byte
for i := 0; i < len(s); i++ {
- l = list(l, Nod(OKEY, Nodintconst(int64(i)), Nodintconst(int64(s[0]))))
+ l = append(l, Nod(OKEY, Nodintconst(int64(i)), Nodintconst(int64(s[0]))))
}
} else {
// []rune
i := 0
for _, r := range s {
- l = list(l, Nod(OKEY, Nodintconst(int64(i)), Nodintconst(int64(r))))
+ l = append(l, Nod(OKEY, Nodintconst(int64(i)), Nodintconst(int64(r))))
i++
}
}
nn := Nod(OCOMPLIT, nil, typenod(n.Type))
- nn.List = l
+ nn.List.Set(l)
typecheck(&nn, Erv)
*np = nn
}
// value of its argument, a specific implementation of I may
// care. The _ would suppress the assignment to that argument
// while generating a call, so remove it.
- for t := getinargx(nt.Type).Type; t != nil; t = t.Down {
+ for t, it := IterFields(nt.Type.Params()); t != nil; t = it.Next() {
if t.Sym != nil && t.Sym.Name == "_" {
t.Sym = nil
}
}
// Double-check use of type as embedded type.
- lno := int(lineno)
+ lno := lineno
if embedlineno != 0 {
lineno = int32(embedlineno)
}
}
- lineno = int32(lno)
+ lineno = lno
// Queue check for map until all the types are done settling.
if maplineno != 0 {
func typecheckdeftype(n *Node) {
ntypecheckdeftype++
- lno := int(lineno)
+ lno := lineno
setlineno(n)
n.Type.Sym = n.Sym
n.Typecheck = 1
copytype(n, t)
ret:
- lineno = int32(lno)
+ lineno = lno
// if there are no type definitions going on, it's safe to
// try to resolve the method types for the interfaces
maptype(l.N.Type, Types[TBOOL])
}
- lineno = int32(lno)
+ lineno = lno
}
ntypecheckdeftype--
}
func typecheckdef(n *Node) *Node {
- lno := int(lineno)
+ lno := lineno
setlineno(n)
if n.Op == ONONAME {
switch n.Op {
default:
- Fatalf("typecheckdef %v", Oconv(int(n.Op), 0))
+ Fatalf("typecheckdef %v", Oconv(n.Op, 0))
// not really syms
case OGOTO, OLABEL:
typecheckdefstack[last] = nil
typecheckdefstack = typecheckdefstack[:last]
- lineno = int32(lno)
+ lineno = lno
n.Walkdef = 1
return n
}
markbreak(n.Right, implicit)
markbreaklist(n.Ninit, implicit)
- markbreakslice(n.Nbody.Slice(), implicit)
+ markbreaklist(n.Nbody, implicit)
markbreaklist(n.List, implicit)
markbreaklist(n.Rlist, implicit)
}
}
-func markbreaklist(l *NodeList, implicit *Node) {
- var n *Node
- var lab *Label
-
- for ; l != nil; l = l.Next {
- n = l.N
- if n.Op == OLABEL && l.Next != nil && n.Name.Defn == l.Next.N {
- switch n.Name.Defn.Op {
- case OFOR,
- OSWITCH,
- OTYPESW,
- OSELECT,
- ORANGE:
- lab = new(Label)
- lab.Def = n.Name.Defn
- n.Left.Sym.Label = lab
- markbreak(n.Name.Defn, n.Name.Defn)
- n.Left.Sym.Label = nil
- l = l.Next
- continue
- }
- }
-
- markbreak(n, implicit)
- }
-}
-
-func markbreakslice(l []*Node, implicit *Node) {
- for i := 0; i < len(l); i++ {
- n := l[i]
- if n.Op == OLABEL && i+1 < len(l) && n.Name.Defn == l[i+1] {
+func markbreaklist(l Nodes, implicit *Node) {
+ for it := nodeSeqIterate(l); !it.Done(); it.Next() {
+ n := it.N()
+ if n.Op == OLABEL && it.Len() > 1 && n.Name.Defn == nodeSeqSlice(it.Seq())[1] {
switch n.Name.Defn.Op {
case OFOR, OSWITCH, OTYPESW, OSELECT, ORANGE:
lab := new(Label)
n.Left.Sym.Label = lab
markbreak(n.Name.Defn, n.Name.Defn)
n.Left.Sym.Label = nil
- i++
+ it.Next()
continue
}
}
return false
}
def := 0
- for l := n.List; l != nil; l = l.Next {
- if !l.N.Nbody.isterminating() {
+ for _, n1 := range n.List.Slice() {
+ if !n1.Nbody.isterminating() {
return false
}
- if l.N.List == nil { // default
+ if n1.List.Len() == 0 { // default
def = 1
}
}
func checkreturn(fn *Node) {
if fn.Type.Outtuple != 0 && len(fn.Nbody.Slice()) != 0 {
- markbreakslice(fn.Nbody.Slice(), nil)
+ markbreaklist(fn.Nbody, nil)
if !fn.Nbody.isterminating() {
- yyerrorl(int(fn.Func.Endlineno), "missing return at end of function")
+ yyerrorl(fn.Func.Endlineno, "missing return at end of function")
}
}
}
return nil
}
- if args == nil {
+ if args.Len() == 0 {
Yyerror("missing argument for %v", s)
return nil
}
- r := args.N
+ r := args.First()
var v int64
if s.Name == "Sizeof" {
default:
Dump("unsafenmagic", r)
- Fatalf("impossible %v node after dot insertion", Oconv(int(r1.Op), obj.FmtSharp))
+ Fatalf("impossible %v node after dot insertion", Oconv(r1.Op, obj.FmtSharp))
goto bad
}
}
goto ret
yes:
- if args.Next != nil {
+ if args.Len() > 1 {
Yyerror("extra arguments for %v", s)
}
if Debug['W'] != 0 {
s := fmt.Sprintf("\nbefore %v", Curfn.Func.Nname.Sym)
- dumpslice(s, Curfn.Nbody.Slice())
+ dumplist(s, Curfn.Nbody)
}
- lno := int(lineno)
+ lno := lineno
// Final typecheck for any unused variables.
// It's hard to be on the heap when not-used, but best to be consistent about &~PHEAP here and below.
}
}
- lineno = int32(lno)
+ lineno = lno
if nerrors != 0 {
return
}
- walkstmtslice(Curfn.Nbody.Slice())
+ walkstmtlist(Curfn.Nbody.Slice())
if Debug['W'] != 0 {
s := fmt.Sprintf("after walk %v", Curfn.Func.Nname.Sym)
- dumpslice(s, Curfn.Nbody.Slice())
+ dumplist(s, Curfn.Nbody)
}
heapmoves()
if Debug['W'] != 0 && len(Curfn.Func.Enter.Slice()) > 0 {
s := fmt.Sprintf("enter %v", Curfn.Func.Nname.Sym)
- dumpslice(s, Curfn.Func.Enter.Slice())
+ dumplist(s, Curfn.Func.Enter)
}
}
-func walkstmtlist(l *NodeList) {
- for ; l != nil; l = l.Next {
- walkstmt(&l.N)
+func walkstmtlist(s []*Node) {
+ for i := range s {
+ walkstmt(&s[i])
}
}
-func walkstmtslice(l []*Node) {
- for i := range l {
- walkstmt(&l[i])
+func samelist(a, b []*Node) bool {
+ if len(a) != len(b) {
+ return false
}
-}
-
-func samelist(a *NodeList, b *NodeList) bool {
- for ; a != nil && b != nil; a, b = a.Next, b.Next {
- if a.N != b.N {
+ for i, n := range a {
+ if n != b[i] {
return false
}
}
- return a == b
+ return true
}
func paramoutheap(fn *Node) bool {
var lhs *Node
callfunc := n.Left
- for args := callfunc.List; args != nil; args = args.Next {
- arg = args.N
+ for _, arg = range callfunc.List.Slice() {
if arg.Op != OAS {
Yyerror("call arg not assignment")
}
setlineno(n)
- walkstmtlist(n.Ninit)
+ walkstmtlist(n.Ninit.Slice())
switch n.Op {
default:
if n.Op == ONAME {
Yyerror("%v is not a top level statement", n.Sym)
} else {
- Yyerror("%v is not a top level statement", Oconv(int(n.Op), 0))
+ Yyerror("%v is not a top level statement", Oconv(n.Op, 0))
}
Dump("nottop", n)
Fatalf("missing typecheck: %v", Nconv(n, obj.FmtSign))
}
init := n.Ninit
- n.Ninit = nil
+ n.Ninit.Set(nil)
walkexpr(&n, &init)
- addinit(&n, init)
+ addinit(&n, init.Slice())
if (*np).Op == OCOPY && n.Op == OCONVNOP {
n.Op = OEMPTY // don't leave plain values as statements.
}
Fatalf("missing typecheck: %v", Nconv(n, obj.FmtSign))
}
init := n.Ninit
- n.Ninit = nil
+ n.Ninit.Set(nil)
walkexpr(&n.Left, &init)
n = mkcall1(chanfn("chanrecv1", 2, n.Left.Type), nil, &init, typename(n.Left.Type), n.Left, nodnil())
walkexpr(&n, &init)
- addinit(&n, init)
+ addinit(&n, init.Slice())
case OBREAK,
ODCL,
break
case OBLOCK:
- walkstmtlist(n.List)
+ walkstmtlist(n.List.Slice())
case OXCASE:
Yyerror("case statement out of place")
case OFOR:
if n.Left != nil {
- walkstmtlist(n.Left.Ninit)
+ walkstmtlist(n.Left.Ninit.Slice())
init := n.Left.Ninit
- n.Left.Ninit = nil
+ n.Left.Ninit.Set(nil)
walkexpr(&n.Left, &init)
- addinit(&n.Left, init)
+ addinit(&n.Left, init.Slice())
}
walkstmt(&n.Right)
- walkstmtslice(n.Nbody.Slice())
+ walkstmtlist(n.Nbody.Slice())
case OIF:
walkexpr(&n.Left, &n.Ninit)
- walkstmtslice(n.Nbody.Slice())
- walkstmtlist(n.Rlist)
+ walkstmtlist(n.Nbody.Slice())
+ walkstmtlist(n.Rlist.Slice())
case OPROC:
switch n.Left.Op {
adjustargs(n, 2*Widthptr)
case ORETURN:
- walkexprlist(n.List, &n.Ninit)
- if n.List == nil {
+ walkexprlist(n.List.Slice(), &n.Ninit)
+ if n.List.Len() == 0 {
break
}
- if (Curfn.Type.Outnamed && count(n.List) > 1) || paramoutheap(Curfn) {
+ if (Curfn.Type.Outnamed && n.List.Len() > 1) || paramoutheap(Curfn) {
// assign to the function out parameters,
// so that reorder3 can fix up conflicts
- var rl *NodeList
+ var rl []*Node
var cl Class
for _, ln := range Curfn.Func.Dcl {
break
}
if cl == PPARAMOUT {
- rl = list(rl, ln)
+ rl = append(rl, ln)
}
}
- if got, want := count(n.List), count(rl); got != want {
+ if got, want := n.List.Len(), len(rl); got != want {
// order should have rewritten multi-value function calls
// with explicit OAS2FUNC nodes.
Fatalf("expected %v return arguments, have %v", want, got)
}
- if samelist(rl, n.List) {
+ if samelist(rl, n.List.Slice()) {
// special return in disguise
- n.List = nil
+ n.List.Set(nil)
break
}
// move function calls out, to make reorder3's job easier.
- walkexprlistsafe(n.List, &n.Ninit)
+ walkexprlistsafe(n.List.Slice(), &n.Ninit)
- ll := ascompatee(n.Op, rl, n.List, &n.Ninit)
- n.List = reorder3(ll)
- for lr := n.List; lr != nil; lr = lr.Next {
- lr.N = applywritebarrier(lr.N)
+ ll := ascompatee(n.Op, rl, n.List.Slice(), &n.Ninit)
+ n.List.Set(reorder3(ll))
+ for i1 := range n.List.Slice() {
+ n.List.Slice()[i1] = applywritebarrier(n.List.Slice()[i1])
}
break
}
- ll := ascompatte(n.Op, nil, false, Getoutarg(Curfn.Type), n.List, 1, &n.Ninit)
- n.List = ll
+ ll := ascompatte(n.Op, nil, false, Curfn.Type.ResultsP(), n.List.Slice(), 1, &n.Ninit)
+ n.List.Set(ll)
case ORETJMP:
break
// the types expressions are calculated.
// compile-time constants are evaluated.
// complex side effects like statements are appended to init
-func walkexprlist(l *NodeList, init **NodeList) {
- for ; l != nil; l = l.Next {
- walkexpr(&l.N, init)
+func walkexprlist(s []*Node, init *Nodes) {
+ for i := range s {
+ walkexpr(&s[i], init)
+ }
+}
+
+func walkexprlistsafe(s []*Node, init *Nodes) {
+ for i, n := range s {
+ s[i] = safeexpr(n, init)
+ walkexpr(&s[i], init)
+ }
+}
+
+func walkexprlistcheap(s []*Node, init *Nodes) {
+ for i, n := range s {
+ s[i] = cheapexpr(n, init)
+ walkexpr(&s[i], init)
}
}
-func walkexprlistsafe(l *NodeList, init **NodeList) {
- for ; l != nil; l = l.Next {
- l.N = safeexpr(l.N, init)
- walkexpr(&l.N, init)
+// Build name of function: convI2E etc.
+// Not all names are possible
+// (e.g., we'll never generate convE2E or convE2I).
+func convFuncName(from, to *Type) string {
+ tkind := to.iet()
+ switch from.iet() {
+ case 'I':
+ switch tkind {
+ case 'E':
+ return "convI2E"
+ case 'I':
+ return "convI2I"
+ }
+ case 'T':
+ switch tkind {
+ case 'E':
+ return "convT2E"
+ case 'I':
+ return "convT2I"
+ }
}
+ Fatalf("unknown conv func %c2%c", from.iet(), to.iet())
+ panic("unreachable")
}
-func walkexprlistcheap(l *NodeList, init **NodeList) {
- for ; l != nil; l = l.Next {
- l.N = cheapexpr(l.N, init)
- walkexpr(&l.N, init)
+// Build name of function: assertI2E etc.
+// If with2suffix is true, the form ending in "2" is returned".
+func assertFuncName(from, to *Type, with2suffix bool) string {
+ l := len("assertX2X2")
+ if !with2suffix {
+ l--
+ }
+ tkind := to.iet()
+ switch from.iet() {
+ case 'E':
+ switch tkind {
+ case 'I':
+ return "assertE2I2"[:l]
+ case 'E':
+ return "assertE2E2"[:l]
+ case 'T':
+ return "assertE2T2"[:l]
+ }
+ case 'I':
+ switch tkind {
+ case 'I':
+ return "assertI2I2"[:l]
+ case 'E':
+ return "assertI2E2"[:l]
+ case 'T':
+ return "assertI2T2"[:l]
+ }
}
+ Fatalf("unknown assert func %c2%c", from.iet(), to.iet())
+ panic("unreachable")
}
-func walkexpr(np **Node, init **NodeList) {
+func walkexpr(np **Node, init *Nodes) {
n := *np
if n == nil {
Fatalf("walkexpr init == &n->ninit")
}
- if n.Ninit != nil {
- walkstmtlist(n.Ninit)
- *init = concat(*init, n.Ninit)
- n.Ninit = nil
+ if n.Ninit.Len() != 0 {
+ walkstmtlist(n.Ninit.Slice())
+ init.AppendNodes(&n.Ninit)
}
// annoying case - not typechecked
OCOMPLEX,
OLROT:
if n.Op == OCOMPLEX && n.Left == nil && n.Right == nil {
- n.Left = n.List.N
- n.Right = n.List.Next.N
+ n.Left = n.List.First()
+ n.Right = n.List.Second()
}
walkexpr(&n.Left, init)
// cannot put side effects from n.Right on init,
// because they cannot run before n.Left is checked.
// save elsewhere and store on the eventual n.Right.
- var ll *NodeList
+ var ll Nodes
walkexpr(&n.Right, &ll)
- addinit(&n.Right, ll)
+ addinit(&n.Right, ll.Slice())
case OPRINT, OPRINTN:
- walkexprlist(n.List, init)
+ walkexprlist(n.List.Slice(), init)
n = walkprint(n, init)
case OPANIC:
case OCALLINTER:
t := n.Left.Type
- if n.List != nil && n.List.N.Op == OAS {
+ if n.List.Len() != 0 && n.List.First().Op == OAS {
break
}
walkexpr(&n.Left, init)
- walkexprlist(n.List, init)
- ll := ascompatte(n.Op, n, n.Isddd, getinarg(t), n.List, 0, init)
- n.List = reorder1(ll)
+ walkexprlist(n.List.Slice(), init)
+ ll := ascompatte(n.Op, n, n.Isddd, t.ParamsP(), n.List.Slice(), 0, init)
+ n.List.Set(reorder1(ll))
case OCALLFUNC:
if n.Left.Op == OCLOSURE {
// transformclosure already did all preparation work.
// Prepend captured variables to argument list.
- n.List = concat(n.Left.Func.Enter.NodeList(), n.List)
+ n.List.Set(append(n.Left.Func.Enter.Slice(), n.List.Slice()...))
n.Left.Func.Enter.Set(nil)
// Update type of OCALLFUNC node.
// Output arguments had not changed, but their offsets could.
if n.Left.Type.Outtuple == 1 {
- t := getoutargx(n.Left.Type).Type
+ t := n.Left.Type.Results().Type
if t.Etype == TFIELD {
t = t.Type
}
n.Type = t
} else {
- n.Type = getoutargx(n.Left.Type)
+ n.Type = n.Left.Type.Results()
}
}
t := n.Left.Type
- if n.List != nil && n.List.N.Op == OAS {
+ if n.List.Len() != 0 && n.List.First().Op == OAS {
break
}
walkexpr(&n.Left, init)
- walkexprlist(n.List, init)
+ walkexprlist(n.List.Slice(), init)
if n.Left.Op == ONAME && n.Left.Sym.Name == "Sqrt" && n.Left.Sym.Pkg.Path == "math" {
switch Thearch.Thechar {
case '5', '6', '7':
n.Op = OSQRT
- n.Left = n.List.N
- n.List = nil
+ n.Left = n.List.First()
+ n.List.Set(nil)
break opswitch
}
}
- ll := ascompatte(n.Op, n, n.Isddd, getinarg(t), n.List, 0, init)
- n.List = reorder1(ll)
+ ll := ascompatte(n.Op, n, n.Isddd, t.ParamsP(), n.List.Slice(), 0, init)
+ n.List.Set(reorder1(ll))
case OCALLMETH:
t := n.Left.Type
- if n.List != nil && n.List.N.Op == OAS {
+ if n.List.Len() != 0 && n.List.First().Op == OAS {
break
}
walkexpr(&n.Left, init)
- walkexprlist(n.List, init)
- ll := ascompatte(n.Op, n, false, getthis(t), list1(n.Left.Left), 0, init)
- lr := ascompatte(n.Op, n, n.Isddd, getinarg(t), n.List, 0, init)
- ll = concat(ll, lr)
+ walkexprlist(n.List.Slice(), init)
+ ll := ascompatte(n.Op, n, false, t.RecvP(), []*Node{n.Left.Left}, 0, init)
+ lr := ascompatte(n.Op, n, n.Isddd, t.ParamsP(), n.List.Slice(), 0, init)
+ ll = append(ll, lr...)
n.Left.Left = nil
ullmancalc(n.Left)
- n.List = reorder1(ll)
+ n.List.Set(reorder1(ll))
case OAS:
- *init = concat(*init, n.Ninit)
- n.Ninit = nil
+ init.AppendNodes(&n.Ninit)
walkexpr(&n.Left, init)
n.Left = safeexpr(n.Left, init)
Warn("type assertion not inlined")
}
- buf := "assert" + type2IET(r.Left.Type) + "2" + type2IET(r.Type)
- fn := syslook(buf, 1)
- substArgTypes(fn, r.Left.Type, r.Type)
+ fn := syslook(assertFuncName(r.Left.Type, r.Type, false))
+ substArgTypes(&fn, r.Left.Type, r.Type)
n = mkcall1(fn, nil, init, typename(r.Type), r.Left, n1)
walkexpr(&n, init)
}
case OAS2:
- *init = concat(*init, n.Ninit)
- n.Ninit = nil
- walkexprlistsafe(n.List, init)
- walkexprlistsafe(n.Rlist, init)
- ll := ascompatee(OAS, n.List, n.Rlist, init)
+ init.AppendNodes(&n.Ninit)
+ walkexprlistsafe(n.List.Slice(), init)
+ walkexprlistsafe(n.Rlist.Slice(), init)
+ ll := ascompatee(OAS, n.List.Slice(), n.Rlist.Slice(), init)
ll = reorder3(ll)
- for lr := ll; lr != nil; lr = lr.Next {
- lr.N = applywritebarrier(lr.N)
+ for i, n := range ll {
+ ll[i] = applywritebarrier(n)
}
n = liststmt(ll)
// a,b,... = fn()
case OAS2FUNC:
- *init = concat(*init, n.Ninit)
+ init.AppendNodes(&n.Ninit)
- n.Ninit = nil
- r := n.Rlist.N
- walkexprlistsafe(n.List, init)
+ r := n.Rlist.First()
+ walkexprlistsafe(n.List.Slice(), init)
walkexpr(&r, init)
ll := ascompatet(n.Op, n.List, &r.Type, 0, init)
- for lr := ll; lr != nil; lr = lr.Next {
- lr.N = applywritebarrier(lr.N)
+ for i, n := range ll {
+ ll[i] = applywritebarrier(n)
}
- n = liststmt(concat(list1(r), ll))
+ n = liststmt(append([]*Node{r}, ll...))
// x, y = <-c
// orderstmt made sure x is addressable.
case OAS2RECV:
- *init = concat(*init, n.Ninit)
+ init.AppendNodes(&n.Ninit)
- n.Ninit = nil
- r := n.Rlist.N
- walkexprlistsafe(n.List, init)
+ r := n.Rlist.First()
+ walkexprlistsafe(n.List.Slice(), init)
walkexpr(&r.Left, init)
var n1 *Node
- if isblank(n.List.N) {
+ if isblank(n.List.First()) {
n1 = nodnil()
} else {
- n1 = Nod(OADDR, n.List.N, nil)
+ n1 = Nod(OADDR, n.List.First(), nil)
}
n1.Etype = 1 // addr does not escape
fn := chanfn("chanrecv2", 2, r.Left.Type)
- r = mkcall1(fn, n.List.Next.N.Type, init, typename(r.Left.Type), r.Left, n1)
- n = Nod(OAS, n.List.Next.N, r)
+ r = mkcall1(fn, n.List.Second().Type, init, typename(r.Left.Type), r.Left, n1)
+ n = Nod(OAS, n.List.Second(), r)
typecheck(&n, Etop)
// a,b = m[i];
case OAS2MAPR:
- *init = concat(*init, n.Ninit)
+ init.AppendNodes(&n.Ninit)
- n.Ninit = nil
- r := n.Rlist.N
- walkexprlistsafe(n.List, init)
+ r := n.Rlist.First()
+ walkexprlistsafe(n.List.Slice(), init)
walkexpr(&r.Left, init)
walkexpr(&r.Right, init)
t := r.Left.Type
// to:
// var,b = mapaccess2*(t, m, i)
// a = *var
- a := n.List.N
+ a := n.List.First()
fn := mapfn(p, t)
- r = mkcall1(fn, getoutargx(fn.Type), init, typename(t), r.Left, key)
+ r = mkcall1(fn, fn.Type.Results(), init, typename(t), r.Left, key)
// mapaccess2* returns a typed bool, but due to spec changes,
// the boolean result of i.(T) is now untyped so we make it the
// same type as the variable on the lhs.
- if !isblank(n.List.Next.N) {
- r.Type.Type.Down.Type = n.List.Next.N.Type
+ if !isblank(n.List.Second()) {
+ r.Type.Type.Down.Type = n.List.Second().Type
}
- n.Rlist = list1(r)
+ setNodeSeq(&n.Rlist, list1(r))
n.Op = OAS2FUNC
// don't generate a = *var if a is _
if !isblank(a) {
var_ := temp(Ptrto(t.Type))
var_.Typecheck = 1
- n.List.N = var_
+ it := nodeSeqIterate(n.List)
+ *it.P() = var_
walkexpr(&n, init)
- *init = list(*init, n)
+ init.Append(n)
n = Nod(OAS, a, Nod(OIND, var_, nil))
}
// TODO: ptr is always non-nil, so disable nil check for this OIND op.
case ODELETE:
- *init = concat(*init, n.Ninit)
- n.Ninit = nil
- map_ := n.List.N
- key := n.List.Next.N
+ init.AppendNodes(&n.Ninit)
+ map_ := n.List.First()
+ key := n.List.Second()
walkexpr(&map_, init)
walkexpr(&key, init)
n = mkcall1(mapfndel("mapdelete", t), nil, init, typename(t), map_, key)
case OAS2DOTTYPE:
- e := n.Rlist.N // i.(T)
+ e := n.Rlist.First() // i.(T)
// TODO(rsc): The Isfat is for consistency with componentgen and orderexpr.
// It needs to be removed in all three places.
// That would allow inlining x.(struct{*int}) the same as x.(*int).
if isdirectiface(e.Type) && !Isfat(e.Type) && !instrumenting {
// handled directly during gen.
- walkexprlistsafe(n.List, init)
+ walkexprlistsafe(n.List.Slice(), init)
walkexpr(&e.Left, init)
break
}
// res, ok = i.(T)
// orderstmt made sure a is addressable.
- *init = concat(*init, n.Ninit)
- n.Ninit = nil
+ init.AppendNodes(&n.Ninit)
- walkexprlistsafe(n.List, init)
+ walkexprlistsafe(n.List.Slice(), init)
walkexpr(&e.Left, init)
t := e.Type // T
from := e.Left // i
oktype := Types[TBOOL]
- ok := n.List.Next.N
+ ok := n.List.Second()
if !isblank(ok) {
oktype = ok.Type
}
- fromKind := type2IET(from.Type)
- toKind := type2IET(t)
+ fromKind := from.Type.iet()
+ toKind := t.iet()
// Avoid runtime calls in a few cases of the form _, ok := i.(T).
// This is faster and shorter and allows the corresponding assertX2X2
// routines to skip nil checks on their last argument.
- if isblank(n.List.N) {
+ if isblank(n.List.First()) {
var fast *Node
switch {
- case fromKind == "E" && toKind == "T":
+ case fromKind == 'E' && toKind == 'T':
tab := Nod(OITAB, from, nil) // type:eface::tab:iface
typ := Nod(OCONVNOP, typename(t), nil)
typ.Type = Ptrto(Types[TUINTPTR])
fast = Nod(OEQ, tab, typ)
- case fromKind == "I" && toKind == "E",
- fromKind == "E" && toKind == "E":
+ case fromKind == 'I' && toKind == 'E',
+ fromKind == 'E' && toKind == 'E':
tab := Nod(OITAB, from, nil)
fast = Nod(ONE, nodnil(), tab)
}
}
var resptr *Node // &res
- if isblank(n.List.N) {
+ if isblank(n.List.First()) {
resptr = nodnil()
} else {
- resptr = Nod(OADDR, n.List.N, nil)
+ resptr = Nod(OADDR, n.List.First(), nil)
}
resptr.Etype = 1 // addr does not escape
if Debug_typeassert > 0 {
Warn("type assertion not inlined")
}
- buf := "assert" + fromKind + "2" + toKind + "2"
- fn := syslook(buf, 1)
- substArgTypes(fn, from.Type, t)
+ fn := syslook(assertFuncName(from.Type, t, true))
+ substArgTypes(&fn, from.Type, t)
call := mkcall1(fn, oktype, init, typename(t), from, resptr)
n = Nod(OAS, ok, call)
typecheck(&n, Etop)
break
}
- // Build name of function: convI2E etc.
- // Not all names are possible
- // (e.g., we'll never generate convE2E or convE2I).
- buf := "conv" + type2IET(n.Left.Type) + "2" + type2IET(n.Type)
- fn := syslook(buf, 1)
var ll *NodeList
if !Isinter(n.Left.Type) {
ll = list(ll, typename(n.Left.Type))
n1 := Nod(OAS, l, sym.Def)
typecheck(&n1, Etop)
- *init = list(*init, n1)
+ init.Append(n1)
- fn := syslook("typ2Itab", 1)
+ fn := syslook("typ2Itab")
n1 = Nod(OCALL, fn, nil)
- n1.List = ll
+ setNodeSeq(&n1.List, ll)
typecheck(&n1, Erv)
walkexpr(&n1, init)
n2.Nbody.Set([]*Node{Nod(OAS, l, n1)})
n2.Likely = -1
typecheck(&n2, Etop)
- *init = list(*init, n2)
+ init.Append(n2)
l = Nod(OEFACE, l, n.Left)
l.Typecheck = n.Typecheck
r = temp(n.Left.Type)
r = Nod(OAS, r, nil) // zero temp
typecheck(&r, Etop)
- *init = list(*init, r)
+ init.Append(r)
r = Nod(OADDR, r.Left, nil)
typecheck(&r, Erv)
}
ll = list(ll, r)
}
+ fn := syslook(convFuncName(n.Left.Type, n.Type))
if !Isinter(n.Left.Type) {
- substArgTypes(fn, n.Left.Type, n.Left.Type, n.Type)
+ substArgTypes(&fn, n.Left.Type, n.Left.Type, n.Type)
} else {
- substArgTypes(fn, n.Left.Type, n.Type)
+ substArgTypes(&fn, n.Left.Type, n.Type)
}
dowidth(fn.Type)
n = Nod(OCALL, fn, nil)
- n.List = ll
+ setNodeSeq(&n.List, ll)
typecheck(&n, Erv)
walkexpr(&n, init)
r := temp(n.Type.Type)
r = Nod(OAS, r, nil) // zero temp
typecheck(&r, Etop)
- *init = list(*init, r)
+ init.Append(r)
r = Nod(OADDR, r.Left, nil)
typecheck(&r, Erv)
n = r
}
// s + "badgerbadgerbadger" == "badgerbadgerbadger"
- if (Op(n.Etype) == OEQ || Op(n.Etype) == ONE) && Isconst(n.Right, CTSTR) && n.Left.Op == OADDSTR && count(n.Left.List) == 2 && Isconst(n.Left.List.Next.N, CTSTR) && strlit(n.Right) == strlit(n.Left.List.Next.N) {
+ if (Op(n.Etype) == OEQ || Op(n.Etype) == ONE) && Isconst(n.Right, CTSTR) && n.Left.Op == OADDSTR && n.Left.List.Len() == 2 && Isconst(n.Left.List.Second(), CTSTR) && strlit(n.Right) == strlit(n.Left.List.Second()) {
// TODO(marvin): Fix Node.EType type union.
- r := Nod(Op(n.Etype), Nod(OLEN, n.Left.List.N, nil), Nodintconst(0))
+ r := Nod(Op(n.Etype), Nod(OLEN, n.Left.List.First(), nil), Nodintconst(0))
typecheck(&r, Erv)
walkexpr(&r, init)
r.Type = n.Type
// cannot use chanfn - closechan takes any, not chan any
case OCLOSE:
- fn := syslook("closechan", 1)
+ fn := syslook("closechan")
- substArgTypes(fn, n.Left.Type)
+ substArgTypes(&fn, n.Left.Type)
n = mkcall1(fn, nil, init, n.Left)
case OMAKECHAN:
case OMAKEMAP:
t := n.Type
- fn := syslook("makemap", 1)
-
a := nodnil() // hmap buffer
r := nodnil() // bucket buffer
if n.Esc == EscNone {
a = Nod(OAS, var_, nil) // zero temp
typecheck(&a, Etop)
- *init = list(*init, a)
+ init.Append(a)
a = Nod(OADDR, var_, nil)
// Allocate one bucket on stack.
r = Nod(OAS, var_, nil) // zero temp
typecheck(&r, Etop)
- *init = list(*init, r)
+ init.Append(r)
r = Nod(OADDR, var_, nil)
}
- substArgTypes(fn, hmap(t), mapbucket(t), t.Down, t.Type)
+ fn := syslook("makemap")
+ substArgTypes(&fn, hmap(t), mapbucket(t), t.Down, t.Type)
n = mkcall1(fn, n.Type, init, typename(n.Type), conv(n.Left, Types[TINT64]), a, r)
case OMAKESLICE:
var_ := temp(t)
a := Nod(OAS, var_, nil) // zero temp
typecheck(&a, Etop)
- *init = list(*init, a)
+ init.Append(a)
r := Nod(OSLICE, var_, Nod(OKEY, nil, l)) // arr[:l]
r = conv(r, n.Type) // in case n.Type is named.
typecheck(&r, Erv)
n = r
} else {
// makeslice(t *Type, nel int64, max int64) (ary []any)
- fn := syslook("makeslice", 1)
+ fn := syslook("makeslice")
- substArgTypes(fn, t.Type) // any-1
+ substArgTypes(&fn, t.Type) // any-1
n = mkcall1(fn, n.Type, init, typename(n.Type), conv(l, Types[TINT64]), conv(r, Types[TINT64]))
}
// ifaceeq(i1 any-1, i2 any-2) (ret bool);
case OCMPIFACE:
if !Eqtype(n.Left.Type, n.Right.Type) {
- Fatalf("ifaceeq %v %v %v", Oconv(int(n.Op), 0), n.Left.Type, n.Right.Type)
+ Fatalf("ifaceeq %v %v %v", Oconv(n.Op, 0), n.Left.Type, n.Right.Type)
}
var fn *Node
if isnilinter(n.Left.Type) {
- fn = syslook("efaceeq", 1)
+ fn = syslook("efaceeq")
} else {
- fn = syslook("ifaceeq", 1)
+ fn = syslook("ifaceeq")
}
n.Right = cheapexpr(n.Right, init)
n.Left = cheapexpr(n.Left, init)
- substArgTypes(fn, n.Right.Type, n.Left.Type)
+ substArgTypes(&fn, n.Right.Type, n.Left.Type)
r := mkcall1(fn, n.Type, init, n.Left, n.Right)
// TODO(marvin): Fix Node.EType type union.
if Op(n.Etype) == ONE {
return n
}
-func ascompatee1(op Op, l *Node, r *Node, init **NodeList) *Node {
+func ascompatee1(op Op, l *Node, r *Node, init *Nodes) *Node {
// convas will turn map assigns into function calls,
// making it impossible for reorder3 to work.
n := Nod(OAS, l, r)
return convas(n, init)
}
-func ascompatee(op Op, nl *NodeList, nr *NodeList, init **NodeList) *NodeList {
+func ascompatee(op Op, nl, nr []*Node, init *Nodes) []*Node {
// check assign expression list to
// a expression list. called in
// expr-list = expr-list
// ensure order of evaluation for function calls
- for ll := nl; ll != nil; ll = ll.Next {
- ll.N = safeexpr(ll.N, init)
+ for i := range nl {
+ nl[i] = safeexpr(nl[i], init)
}
- for lr := nr; lr != nil; lr = lr.Next {
- lr.N = safeexpr(lr.N, init)
+ for i1 := range nr {
+ nr[i1] = safeexpr(nr[i1], init)
}
- var nn *NodeList
- ll := nl
- lr := nr
- for ; ll != nil && lr != nil; ll, lr = ll.Next, lr.Next {
+ var nn []*Node
+ nlit := nodeSeqIterate(nl)
+ nrit := nodeSeqIterate(nr)
+ for ; !nlit.Done() && !nrit.Done(); nlit.Next() {
// Do not generate 'x = x' during return. See issue 4014.
- if op == ORETURN && ll.N == lr.N {
+ if op == ORETURN && nlit.N() == nrit.N() {
+ nrit.Next()
continue
}
- nn = list(nn, ascompatee1(op, ll.N, lr.N, init))
+ nn = append(nn, ascompatee1(op, nlit.N(), nrit.N(), init))
+ nrit.Next()
}
// cannot happen: caller checked that lists had same length
- if ll != nil || lr != nil {
- Yyerror("error in shape across %v %v %v / %d %d [%s]", Hconv(nl, obj.FmtSign), Oconv(int(op), 0), Hconv(nr, obj.FmtSign), count(nl), count(nr), Curfn.Func.Nname.Sym.Name)
+ if !nlit.Done() || !nrit.Done() {
+ var nln, nrn Nodes
+ nln.Set(nl)
+ nrn.Set(nr)
+ Yyerror("error in shape across %v %v %v / %d %d [%s]", Hconv(nln, obj.FmtSign), Oconv(op, 0), Hconv(nrn, obj.FmtSign), nodeSeqLen(nl), nodeSeqLen(nr), Curfn.Func.Nname.Sym.Name)
}
return nn
}
return true
}
-func ascompatet(op Op, nl *NodeList, nr **Type, fp int, init **NodeList) *NodeList {
+func ascompatet(op Op, nl Nodes, nr **Type, fp int, init *Nodes) []*Node {
var l *Node
var tmp *Node
var a *Node
- var ll *NodeList
- var saver Iter
// check assign type list to
// a expression list. called in
// expr-list = func()
- r := Structfirst(&saver, nr)
+ r, saver := IterFields(*nr)
- var nn *NodeList
- var mm *NodeList
+ var nn []*Node
+ var mm []*Node
ucount := 0
- for ll = nl; ll != nil; ll = ll.Next {
+ it := nodeSeqIterate(nl)
+ for ; !it.Done(); it.Next() {
if r == nil {
break
}
- l = ll.N
+ l = it.N()
if isblank(l) {
- r = structnext(&saver)
+ r = saver.Next()
continue
}
typecheck(&tmp, Erv)
a = Nod(OAS, l, tmp)
a = convas(a, init)
- mm = list(mm, a)
+ mm = append(mm, a)
l = tmp
}
ucount++
}
- nn = list(nn, a)
- r = structnext(&saver)
+ nn = append(nn, a)
+ r = saver.Next()
}
- if ll != nil || r != nil {
- Yyerror("ascompatet: assignment count mismatch: %d = %d", count(nl), structcount(*nr))
+ if !it.Done() || r != nil {
+ Yyerror("ascompatet: assignment count mismatch: %d = %d", nl.Len(), structcount(*nr))
}
if ucount != 0 {
Fatalf("ascompatet: too many function calls evaluating parameters")
}
- return concat(nn, mm)
+ return append(nn, mm...)
}
// package all the arguments that match a ... T parameter into a []T.
-func mkdotargslice(lr0 *NodeList, nn *NodeList, l *Type, fp int, init **NodeList, ddd *Node) *NodeList {
+func mkdotargslice(lr0, nn []*Node, l *Type, fp int, init *Nodes, ddd *Node) []*Node {
esc := uint16(EscUnknown)
if ddd != nil {
esc = ddd.Esc
tslice.Bound = -1
var n *Node
- if count(lr0) == 0 {
+ if nodeSeqLen(lr0) == 0 {
n = nodnil()
n.Type = tslice
} else {
if ddd != nil && prealloc[ddd] != nil {
prealloc[n] = prealloc[ddd] // temporary to use
}
- n.List = lr0
+ n.List.Set(lr0)
n.Esc = esc
typecheck(&n, Erv)
if n.Type == nil {
}
a := Nod(OAS, nodarg(l, fp), n)
- nn = list(nn, convas(a, init))
+ nn = append(nn, convas(a, init))
return nn
}
// helpers for shape errors
func dumptypes(nl **Type, what string) string {
- var savel Iter
-
fmt_ := ""
fmt_ += "\t"
first := 1
- for l := Structfirst(&savel, nl); l != nil; l = structnext(&savel) {
+ for l, it := IterFields(*nl); l != nil; l = it.Next() {
if first != 0 {
first = 0
} else {
return fmt_
}
-func dumpnodetypes(l *NodeList, what string) string {
+func dumpnodetypes(l []*Node, what string) string {
var r *Node
fmt_ := ""
fmt_ += "\t"
first := 1
- for ; l != nil; l = l.Next {
- r = l.N
+ for _, r = range l {
if first != 0 {
first = 0
} else {
// a type list. called in
// return expr-list
// func(expr-list)
-func ascompatte(op Op, call *Node, isddd bool, nl **Type, lr *NodeList, fp int, init **NodeList) *NodeList {
- var savel Iter
-
+func ascompatte(op Op, call *Node, isddd bool, nl **Type, lr []*Node, fp int, init *Nodes) []*Node {
lr0 := lr
- l := Structfirst(&savel, nl)
+ l, savel := IterFields(*nl)
var r *Node
- if lr != nil {
- r = lr.N
+ if nodeSeqLen(lr) > 0 {
+ r = nodeSeqFirst(lr)
}
- var nn *NodeList
+ var nn []*Node
// f(g()) where g has multiple return values
var a *Node
var l2 string
var ll *Type
var l1 string
- if r != nil && lr.Next == nil && r.Type.Etype == TSTRUCT && r.Type.Funarg {
+ if r != nil && nodeSeqLen(lr) <= 1 && r.Type.Etype == TSTRUCT && r.Type.Funarg {
// optimization - can do block copy
if eqtypenoname(r.Type, *nl) {
a := nodarg(*nl, fp)
r = Nod(OCONVNOP, r, nil)
r.Type = a.Type
- nn = list1(convas(Nod(OAS, a, r), init))
+ nn = []*Node{convas(Nod(OAS, a, r), init)}
goto ret
}
// conversions involved.
// copy into temporaries.
- var alist *NodeList
+ var alist []*Node
- for l := Structfirst(&savel, &r.Type); l != nil; l = structnext(&savel) {
+ for l, it := IterFields(r.Type); l != nil; l = it.Next() {
a = temp(l.Type)
- alist = list(alist, a)
+ alist = append(alist, a)
}
a = Nod(OAS2, nil, nil)
- a.List = alist
- a.Rlist = lr
+ a.List.Set(alist)
+ a.Rlist.Set(lr)
typecheck(&a, Etop)
walkstmt(&a)
- *init = list(*init, a)
+ init.Append(a)
lr = alist
- r = lr.N
- l = Structfirst(&savel, nl)
+ r = nodeSeqFirst(lr)
+ l, savel = IterFields(*nl)
}
loop:
if l != nil && l.Isddd {
// the ddd parameter must be last
- ll = structnext(&savel)
+ ll = savel.Next()
if ll != nil {
Yyerror("... must be last argument")
// only if we are assigning a single ddd
// argument to a ddd parameter then it is
// passed thru unencapsulated
- if r != nil && lr.Next == nil && isddd && Eqtype(l.Type, r.Type) {
+ if r != nil && len(lr) <= 1 && isddd && Eqtype(l.Type, r.Type) {
a = Nod(OAS, nodarg(l, fp), r)
a = convas(a, init)
- nn = list(nn, a)
+ nn = append(nn, a)
goto ret
}
l1 = dumptypes(nl, "expected")
l2 = dumpnodetypes(lr0, "given")
if l != nil {
- Yyerror("not enough arguments to %v\n%s\n%s", Oconv(int(op), 0), l1, l2)
+ Yyerror("not enough arguments to %v\n%s\n%s", Oconv(op, 0), l1, l2)
} else {
- Yyerror("too many arguments to %v\n%s\n%s", Oconv(int(op), 0), l1, l2)
+ Yyerror("too many arguments to %v\n%s\n%s", Oconv(op, 0), l1, l2)
}
}
a = Nod(OAS, nodarg(l, fp), r)
a = convas(a, init)
- nn = list(nn, a)
+ nn = append(nn, a)
- l = structnext(&savel)
+ l = savel.Next()
r = nil
- lr = lr.Next
- if lr != nil {
- r = lr.N
+ lr = lr[1:]
+ if len(lr) > 0 {
+ r = lr[0]
}
goto loop
ret:
- for lr = nn; lr != nil; lr = lr.Next {
- lr.N.Typecheck = 1
+ for _, n := range nn {
+ n.Typecheck = 1
}
return nn
}
// generate code for print
-func walkprint(nn *Node, init **NodeList) *Node {
+func walkprint(nn *Node, init *Nodes) *Node {
var r *Node
var n *Node
var on *Node
op := nn.Op
all := nn.List
- var calls *NodeList
+ var calls []*Node
notfirst := false
// Hoist all the argument evaluation up before the lock.
- walkexprlistcheap(all, init)
+ walkexprlistcheap(all.Slice(), init)
- calls = list(calls, mkcall("printlock", nil, init))
-
- for l := all; l != nil; l = l.Next {
+ calls = append(calls, mkcall("printlock", nil, init))
+ for i1, n1 := range all.Slice() {
if notfirst {
- calls = list(calls, mkcall("printsp", nil, init))
+ calls = append(calls, mkcall("printsp", nil, init))
}
notfirst = op == OPRINTN
- n = l.N
+ n = n1
if n.Op == OLITERAL {
switch n.Val().Ctype() {
case CTRUNE:
defaultlit(&n, Types[TINT64])
}
defaultlit(&n, nil)
- l.N = n
+ all.Slice()[i1] = n
if n.Type == nil || n.Type.Etype == TFORW {
continue
}
et = n.Type.Etype
if Isinter(n.Type) {
if isnilinter(n.Type) {
- on = syslook("printeface", 1)
+ on = syslook("printeface")
} else {
- on = syslook("printiface", 1)
+ on = syslook("printiface")
}
- substArgTypes(on, n.Type) // any-1
+ substArgTypes(&on, n.Type) // any-1
} else if Isptr[et] || et == TCHAN || et == TMAP || et == TFUNC || et == TUNSAFEPTR {
- on = syslook("printpointer", 1)
- substArgTypes(on, n.Type) // any-1
+ on = syslook("printpointer")
+ substArgTypes(&on, n.Type) // any-1
} else if Isslice(n.Type) {
- on = syslook("printslice", 1)
- substArgTypes(on, n.Type) // any-1
+ on = syslook("printslice")
+ substArgTypes(&on, n.Type) // any-1
} else if Isint[et] {
if et == TUINT64 {
if (t.Sym.Pkg == Runtimepkg || compiling_runtime != 0) && t.Sym.Name == "hex" {
- on = syslook("printhex", 0)
+ on = syslook("printhex")
} else {
- on = syslook("printuint", 0)
+ on = syslook("printuint")
}
} else {
- on = syslook("printint", 0)
+ on = syslook("printint")
}
} else if Isfloat[et] {
- on = syslook("printfloat", 0)
+ on = syslook("printfloat")
} else if Iscomplex[et] {
- on = syslook("printcomplex", 0)
+ on = syslook("printcomplex")
} else if et == TBOOL {
- on = syslook("printbool", 0)
+ on = syslook("printbool")
} else if et == TSTRING {
- on = syslook("printstring", 0)
+ on = syslook("printstring")
} else {
badtype(OPRINT, n.Type, nil)
continue
}
- t = *getinarg(on.Type)
+ t = on.Type.Params()
if t != nil {
t = t.Type
}
}
r = Nod(OCALL, on, nil)
- r.List = list1(n)
- calls = list(calls, r)
+ r.List.Append(n)
+ calls = append(calls, r)
}
if op == OPRINTN {
- calls = list(calls, mkcall("printnl", nil, nil))
+ calls = append(calls, mkcall("printnl", nil, nil))
}
- calls = list(calls, mkcall("printunlock", nil, init))
+ calls = append(calls, mkcall("printunlock", nil, init))
typechecklist(calls, Etop)
walkexprlist(calls, init)
r = Nod(OEMPTY, nil, nil)
typecheck(&r, Etop)
walkexpr(&r, init)
- r.Ninit = calls
+ r.Ninit.Set(calls)
return r
}
func callnew(t *Type) *Node {
dowidth(t)
- fn := syslook("newobject", 1)
- substArgTypes(fn, t)
+ fn := syslook("newobject")
+ substArgTypes(&fn, t)
return mkcall1(fn, Ptrto(t), nil, typename(t))
}
func applywritebarrier(n *Node) *Node {
if n.Left != nil && n.Right != nil && needwritebarrier(n.Left, n.Right) {
if Debug_wb > 1 {
- Warnl(int(n.Lineno), "marking %v for barrier", Nconv(n.Left, 0))
+ Warnl(n.Lineno, "marking %v for barrier", Nconv(n.Left, 0))
}
n.Op = OASWB
return n
return n
}
-func convas(n *Node, init **NodeList) *Node {
+func convas(n *Node, init *Nodes) *Node {
if n.Op != OAS {
- Fatalf("convas: not OAS %v", Oconv(int(n.Op), 0))
+ Fatalf("convas: not OAS %v", Oconv(n.Op, 0))
}
n.Typecheck = 1
// if there is exactly one function expr,
// then it is done first. otherwise must
// make temp variables
-func reorder1(all *NodeList) *NodeList {
- var n *Node
-
+func reorder1(all []*Node) []*Node {
c := 0 // function calls
t := 0 // total parameters
- for l := all; l != nil; l = l.Next {
- n = l.N
+ for _, n := range all {
t++
ullmancalc(n)
if n.Ullman >= UINF {
return all
}
- var g *NodeList // fncalls assigned to tempnames
- var f *Node // last fncall assigned to stack
- var r *NodeList // non fncalls and tempnames assigned to stack
+ var g []*Node // fncalls assigned to tempnames
+ var f *Node // last fncall assigned to stack
+ var r []*Node // non fncalls and tempnames assigned to stack
d := 0
var a *Node
- for l := all; l != nil; l = l.Next {
- n = l.N
+ for _, n := range all {
if n.Ullman < UINF {
- r = list(r, n)
+ r = append(r, n)
continue
}
a = temp(n.Right.Type)
a = Nod(OAS, a, n.Right)
- g = list(g, a)
+ g = append(g, a)
// put normal arg assignment on list
// with fncall replaced by tempname
n.Right = a.Left
- r = list(r, n)
+ r = append(r, n)
}
if f != nil {
- g = list(g, f)
+ g = append(g, f)
}
- return concat(g, r)
+ return append(g, r...)
}
// from ascompat[ee]
// be later use of an earlier lvalue.
//
// function calls have been removed.
-func reorder3(all *NodeList) *NodeList {
+func reorder3(all []*Node) []*Node {
var l *Node
// If a needed expression may be affected by an
// earlier assignment, make an early copy of that
// expression and use the copy instead.
- var early *NodeList
+ var early []*Node
- var mapinit *NodeList
- for list := all; list != nil; list = list.Next {
- l = list.N.Left
+ var mapinit Nodes
+ for i, n := range all {
+ l = n.Left
// Save subexpressions needed on left side.
// Drill through non-dereferences.
}
if l.Op == OINDEX && Isfixedarray(l.Left.Type) {
- reorder3save(&l.Right, all, list, &early)
+ reorder3save(&l.Right, all, i, &early)
l = l.Left
continue
}
switch l.Op {
default:
- Fatalf("reorder3 unexpected lvalue %v", Oconv(int(l.Op), obj.FmtSharp))
+ Fatalf("reorder3 unexpected lvalue %v", Oconv(l.Op, obj.FmtSharp))
case ONAME:
break
case OINDEX, OINDEXMAP:
- reorder3save(&l.Left, all, list, &early)
- reorder3save(&l.Right, all, list, &early)
+ reorder3save(&l.Left, all, i, &early)
+ reorder3save(&l.Right, all, i, &early)
if l.Op == OINDEXMAP {
- list.N = convas(list.N, &mapinit)
+ all[i] = convas(all[i], &mapinit)
}
case OIND, ODOTPTR:
- reorder3save(&l.Left, all, list, &early)
+ reorder3save(&l.Left, all, i, &early)
}
// Save expression on right side.
- reorder3save(&list.N.Right, all, list, &early)
+ reorder3save(&all[i].Right, all, i, &early)
}
- early = concat(mapinit, early)
- return concat(early, all)
+ early = append(mapinit.Slice(), early...)
+ return append(early, all...)
}
// if the evaluation of *np would be affected by the
-// assignments in all up to but not including stop,
+// assignments in all up to but not including the ith assignment,
// copy into a temporary during *early and
// replace *np with that temp.
-func reorder3save(np **Node, all *NodeList, stop *NodeList, early **NodeList) {
+func reorder3save(np **Node, all []*Node, i int, early *[]*Node) {
n := *np
- if !aliased(n, all, stop) {
+ if !aliased(n, all, i) {
return
}
q := temp(n.Type)
q = Nod(OAS, q, n)
typecheck(&q, Etop)
- *early = list(*early, q)
+ *early = append(*early, q)
*np = q.Left
}
}
// Is it possible that the computation of n might be
-// affected by writes in as up to but not including stop?
-func aliased(n *Node, all *NodeList, stop *NodeList) bool {
+// affected by writes in as up to but not including the ith element?
+func aliased(n *Node, all []*Node, i int) bool {
if n == nil {
return false
}
varwrite := 0
var a *Node
- for l := all; l != stop; l = l.Next {
- a = outervalue(l.N.Left)
+ for _, an := range all[:i] {
+ a = outervalue(an.Left)
if a.Op != ONAME {
memwrite = 1
continue
if vmatch2(l, r.Right) {
return true
}
- for ll := r.List; ll != nil; ll = ll.Next {
- if vmatch2(l, ll.N) {
+ for _, n := range r.List.Slice() {
+ if vmatch2(l, n) {
return true
}
}
if vmatch1(l.Right, r) {
return true
}
- for ll := l.List; ll != nil; ll = ll.Next {
- if vmatch1(ll.N, r) {
+ for _, n := range l.List.Slice() {
+ if vmatch1(n, r) {
return true
}
}
// generate and return code to allocate
// copies of escaped parameters to the heap.
func paramstoheap(argin **Type, out int) []*Node {
- var savet Iter
var v *Node
var as *Node
var nn []*Node
- for t := Structfirst(&savet, argin); t != nil; t = structnext(&savet) {
+ for t, it := IterFields(*argin); t != nil; t = it.Next() {
v = t.Nname
if v != nil && v.Sym != nil && v.Sym.Name[0] == '~' && v.Sym.Name[1] == 'r' { // unnamed result
v = nil
// walk through argout parameters copying back to stack
func returnsfromheap(argin **Type) []*Node {
- var savet Iter
var v *Node
var nn []*Node
- for t := Structfirst(&savet, argin); t != nil; t = structnext(&savet) {
+ for t, it := IterFields(*argin); t != nil; t = it.Next() {
v = t.Nname
if v == nil || v.Class != PHEAP|PPARAMOUT {
continue
func heapmoves() {
lno := lineno
lineno = Curfn.Lineno
- nn := paramstoheap(getthis(Curfn.Type), 0)
- nn = append(nn, paramstoheap(getinarg(Curfn.Type), 0)...)
- nn = append(nn, paramstoheap(Getoutarg(Curfn.Type), 1)...)
+ nn := paramstoheap(Curfn.Type.RecvP(), 0)
+ nn = append(nn, paramstoheap(Curfn.Type.ParamsP(), 0)...)
+ nn = append(nn, paramstoheap(Curfn.Type.ResultsP(), 1)...)
Curfn.Func.Enter.Append(nn...)
lineno = Curfn.Func.Endlineno
- Curfn.Func.Exit.Append(returnsfromheap(Getoutarg(Curfn.Type))...)
+ Curfn.Func.Exit.Append(returnsfromheap(Curfn.Type.ResultsP())...)
lineno = lno
}
-func vmkcall(fn *Node, t *Type, init **NodeList, va []*Node) *Node {
+func vmkcall(fn *Node, t *Type, init *Nodes, va []*Node) *Node {
if fn.Type == nil || fn.Type.Etype != TFUNC {
Fatalf("mkcall %v %v", fn, fn.Type)
}
- var args *NodeList
n := fn.Type.Intuple
- for i := 0; i < n; i++ {
- args = list(args, va[i])
- }
r := Nod(OCALL, fn, nil)
- r.List = args
+ r.List.Set(va[:n])
if fn.Type.Outtuple > 0 {
typecheck(&r, Erv|Efnstruct)
} else {
return r
}
-func mkcall(name string, t *Type, init **NodeList, args ...*Node) *Node {
- return vmkcall(syslook(name, 0), t, init, args)
+func mkcall(name string, t *Type, init *Nodes, args ...*Node) *Node {
+ return vmkcall(syslook(name), t, init, args)
}
-func mkcall1(fn *Node, t *Type, init **NodeList, args ...*Node) *Node {
+func mkcall1(fn *Node, t *Type, init *Nodes, args ...*Node) *Node {
return vmkcall(fn, t, init, args)
}
if t.Etype != TCHAN {
Fatalf("chanfn %v", t)
}
- fn := syslook(name, 1)
+ fn := syslook(name)
switch n {
default:
Fatalf("chanfn %d", n)
case 1:
- substArgTypes(fn, t.Type)
+ substArgTypes(&fn, t.Type)
case 2:
- substArgTypes(fn, t.Type, t.Type)
+ substArgTypes(&fn, t.Type, t.Type)
}
return fn
}
if t.Etype != TMAP {
Fatalf("mapfn %v", t)
}
- fn := syslook(name, 1)
- substArgTypes(fn, t.Down, t.Type, t.Down, t.Type)
+ fn := syslook(name)
+ substArgTypes(&fn, t.Down, t.Type, t.Down, t.Type)
return fn
}
if t.Etype != TMAP {
Fatalf("mapfn %v", t)
}
- fn := syslook(name, 1)
- substArgTypes(fn, t.Down, t.Type, t.Down)
+ fn := syslook(name)
+ substArgTypes(&fn, t.Down, t.Type, t.Down)
return fn
}
func writebarrierfn(name string, l *Type, r *Type) *Node {
- fn := syslook(name, 1)
- substArgTypes(fn, l, r)
+ fn := syslook(name)
+ substArgTypes(&fn, l, r)
return fn
}
-func addstr(n *Node, init **NodeList) *Node {
+func addstr(n *Node, init *Nodes) *Node {
// orderexpr rewrote OADDSTR to have a list of strings.
- c := count(n.List)
+ c := n.List.Len()
if c < 2 {
Yyerror("addstr count %d too small", c)
buf := nodnil()
if n.Esc == EscNone {
sz := int64(0)
- for l := n.List; l != nil; l = l.Next {
- if l.N.Op == OLITERAL {
- sz += int64(len(l.N.Val().U.(string)))
+ for _, n1 := range n.List.Slice() {
+ if n1.Op == OLITERAL {
+ sz += int64(len(n1.Val().U.(string)))
}
}
}
// build list of string arguments
- args := list1(buf)
-
- for l := n.List; l != nil; l = l.Next {
- args = list(args, conv(l.N, Types[TSTRING]))
+ args := []*Node{buf}
+ for _, n2 := range n.List.Slice() {
+ args = append(args, conv(n2, Types[TSTRING]))
}
var fn string
if prealloc[n] != nil {
prealloc[slice] = prealloc[n]
}
- slice.List = args.Next // skip buf arg
- args = list1(buf)
- args = list(args, slice)
+ slice.List.Set(args[1:]) // skip buf arg
+ args = []*Node{buf}
+ args = append(args, slice)
slice.Esc = EscNone
}
- cat := syslook(fn, 1)
+ cat := syslook(fn)
r := Nod(OCALL, cat, nil)
- r.List = args
+ r.List.Set(args)
typecheck(&r, Erv)
walkexpr(&r, init)
r.Type = n.Type
// s
//
// l2 is allowed to be a string.
-func appendslice(n *Node, init **NodeList) *Node {
- walkexprlistsafe(n.List, init)
+func appendslice(n *Node, init *Nodes) *Node {
+ walkexprlistsafe(n.List.Slice(), init)
+ for i1 := range
// walkexprlistsafe will leave OINDEX (s[n]) alone if both s
// and n are name or literal, but those may index the slice we're
// modifying here. Fix explicitly.
- for l := n.List; l != nil; l = l.Next {
- l.N = cheapexpr(l.N, init)
+ n.List.Slice() {
+ n.List.Slice()[i1] = cheapexpr(n.List.Slice()[i1],
+ init)
}
- l1 := n.List.N
- l2 := n.List.Next.N
+ l1 := n.List.First()
+ l2 := n.List.Second()
s := temp(l1.Type) // var s []T
- var l *NodeList
- l = list(l, Nod(OAS, s, l1)) // s = l1
+ var l []*Node
+ l = append(l, Nod(OAS, s, l1)) // s = l1
nt := temp(Types[TINT])
nif := Nod(OIF, nil, nil)
// n := len(s) + len(l2) - cap(s)
- nif.Ninit = list1(Nod(OAS, nt, Nod(OSUB, Nod(OADD, Nod(OLEN, s, nil), Nod(OLEN, l2, nil)), Nod(OCAP, s, nil))))
+ setNodeSeq(&nif.Ninit, list1(Nod(OAS, nt, Nod(OSUB, Nod(OADD, Nod(OLEN, s, nil), Nod(OLEN, l2, nil)), Nod(OCAP, s, nil)))))
nif.Left = Nod(OGT, nt, Nodintconst(0))
// instantiate growslice_n(Type*, []any, int) []any
- fn := syslook("growslice_n", 1) // growslice_n(<type>, old []T, n int64) (ret []T)
- substArgTypes(fn, s.Type.Type, s.Type.Type)
+ fn := syslook("growslice_n") // growslice_n(<type>, old []T, n int64) (ret []T)
+ substArgTypes(&fn, s.Type.Type, s.Type.Type)
// s = growslice_n(T, s, n)
nif.Nbody.Set([]*Node{Nod(OAS, s, mkcall1(fn, s.Type, &nif.Ninit, typename(s.Type), s, nt))})
- l = list(l, nif)
+ l = append(l, nif)
if haspointers(l1.Type.Type) {
// copy(s[len(l1):len(l1)+len(l2)], l2)
nptr1.Etype = 1
nptr2 := l2
- fn := syslook("typedslicecopy", 1)
- substArgTypes(fn, l1.Type, l2.Type)
- nt := mkcall1(fn, Types[TINT], &l, typename(l1.Type.Type), nptr1, nptr2)
- l = list(l, nt)
+ fn := syslook("typedslicecopy")
+ substArgTypes(&fn, l1.Type, l2.Type)
+ var ln Nodes
+ ln.Set(l)
+ nt := mkcall1(fn, Types[TINT], &ln, typename(l1.Type.Type), nptr1, nptr2)
+ l = append(ln.Slice(), nt)
} else if instrumenting {
// rely on runtime to instrument copy.
// copy(s[len(l1):len(l1)+len(l2)], l2)
nptr2 := l2
var fn *Node
if l2.Type.Etype == TSTRING {
- fn = syslook("slicestringcopy", 1)
+ fn = syslook("slicestringcopy")
} else {
- fn = syslook("slicecopy", 1)
+ fn = syslook("slicecopy")
}
- substArgTypes(fn, l1.Type, l2.Type)
- nt := mkcall1(fn, Types[TINT], &l, nptr1, nptr2, Nodintconst(s.Type.Type.Width))
- l = list(l, nt)
+ substArgTypes(&fn, l1.Type, l2.Type)
+ var ln Nodes
+ ln.Set(l)
+ nt := mkcall1(fn, Types[TINT], &ln, nptr1, nptr2, Nodintconst(s.Type.Type.Width))
+ l = append(ln.Slice(), nt)
} else {
// memmove(&s[len(l1)], &l2[0], len(l2)*sizeof(T))
nptr1 := Nod(OINDEX, s, Nod(OLEN, l1, nil))
nptr2 := Nod(OSPTR, l2, nil)
- fn := syslook("memmove", 1)
- substArgTypes(fn, s.Type.Type, s.Type.Type)
+ fn := syslook("memmove")
+ substArgTypes(&fn, s.Type.Type, s.Type.Type)
- nwid := cheapexpr(conv(Nod(OLEN, l2, nil), Types[TUINTPTR]), &l)
+ var ln Nodes
+ ln.Set(l)
+ nwid := cheapexpr(conv(Nod(OLEN, l2, nil), Types[TUINTPTR]), &ln)
nwid = Nod(OMUL, nwid, Nodintconst(s.Type.Type.Width))
- nt := mkcall1(fn, nil, &l, nptr1, nptr2, nwid)
- l = list(l, nt)
+ nt := mkcall1(fn, nil, &ln, nptr1, nptr2, nwid)
+ l = append(ln.Slice(), nt)
}
// s = s[:len(l1)+len(l2)]
nt = Nod(OSLICE, s, Nod(OKEY, nil, nt))
nt.Etype = 1
- l = list(l, Nod(OAS, s, nt))
+ l = append(l, Nod(OAS, s, nt))
typechecklist(l, Etop)
walkstmtlist(l)
- *init = concat(*init, l)
+ init.Append(l...)
return s
}
// ...
// }
// s
-func walkappend(n *Node, init **NodeList, dst *Node) *Node {
- if !samesafeexpr(dst, n.List.N) {
- l := n.List
- l.N = safeexpr(l.N, init)
- walkexpr(&l.N, init)
+func walkappend(n *Node, init *Nodes, dst *Node) *Node {
+ if !samesafeexpr(dst, n.List.First()) {
+ it := nodeSeqIterate(n.List)
+ *it.P() = safeexpr(it.N(), init)
+ walkexpr(it.P(), init)
}
- walkexprlistsafe(n.List.Next, init)
+ walkexprlistsafe(n.List.Slice()[1:], init)
// walkexprlistsafe will leave OINDEX (s[n]) alone if both s
// and n are name or literal, but those may index the slice we're
// Using cheapexpr also makes sure that the evaluation
// of all arguments (and especially any panics) happen
// before we begin to modify the slice in a visible way.
- for l := n.List.Next; l != nil; l = l.Next {
- l.N = cheapexpr(l.N, init)
+ it := nodeSeqIterate(n.List)
+ it.Next()
+ for ; !it.Done(); it.Next() {
+ *it.P() = cheapexpr(it.N(), init)
}
- nsrc := n.List.N
+ nsrc := n.List.First()
// Resolve slice type of multi-valued return.
if Istype(nsrc.Type, TSTRUCT) {
nsrc.Type = nsrc.Type.Type.Type
}
- argc := count(n.List) - 1
+ argc := n.List.Len() - 1
if argc < 1 {
return nsrc
}
return n
}
- var l *NodeList
+ var l []*Node
ns := temp(nsrc.Type)
- l = list(l, Nod(OAS, ns, nsrc)) // s = src
+ l = append(l, Nod(OAS, ns, nsrc)) // s = src
na := Nodintconst(int64(argc)) // const argc
nx := Nod(OIF, nil, nil) // if cap(s) - len(s) < argc
nx.Left = Nod(OLT, Nod(OSUB, Nod(OCAP, ns, nil), Nod(OLEN, ns, nil)), na)
- fn := syslook("growslice", 1) // growslice(<type>, old []T, mincap int) (ret []T)
- substArgTypes(fn, ns.Type.Type, ns.Type.Type)
+ fn := syslook("growslice") // growslice(<type>, old []T, mincap int) (ret []T)
+ substArgTypes(&fn, ns.Type.Type, ns.Type.Type)
nx.Nbody.Set([]*Node{Nod(OAS, ns, mkcall1(fn, ns.Type, &nx.Ninit, typename(ns.Type), ns, Nod(OADD, Nod(OLEN, ns, nil), na)))})
- l = list(l, nx)
+ l = append(l, nx)
nn := temp(Types[TINT])
- l = list(l, Nod(OAS, nn, Nod(OLEN, ns, nil))) // n = len(s)
+ l = append(l, Nod(OAS, nn, Nod(OLEN, ns, nil))) // n = len(s)
nx = Nod(OSLICE, ns, Nod(OKEY, nil, Nod(OADD, nn, na))) // ...s[:n+argc]
nx.Etype = 1
- l = list(l, Nod(OAS, ns, nx)) // s = s[:n+argc]
+ l = append(l, Nod(OAS, ns, nx)) // s = s[:n+argc]
- for a := n.List.Next; a != nil; a = a.Next {
+ it = nodeSeqIterate(n.List)
+ it.Next()
+ for ; !it.Done(); it.Next() {
nx = Nod(OINDEX, ns, nn) // s[n] ...
nx.Bounded = true
- l = list(l, Nod(OAS, nx, a.N)) // s[n] = arg
- if a.Next != nil {
- l = list(l, Nod(OAS, nn, Nod(OADD, nn, Nodintconst(1)))) // n = n + 1
+ l = append(l, Nod(OAS, nx, it.N())) // s[n] = arg
+ if it.Len() > 1 {
+ l = append(l, Nod(OAS, nn, Nod(OADD, nn, Nodintconst(1)))) // n = n + 1
}
}
typechecklist(l, Etop)
walkstmtlist(l)
- *init = concat(*init, l)
+ init.Append(l...)
return ns
}
//
// Also works if b is a string.
//
-func copyany(n *Node, init **NodeList, runtimecall bool) *Node {
+func copyany(n *Node, init *Nodes, runtimecall bool) *Node {
if haspointers(n.Left.Type.Type) {
fn := writebarrierfn("typedslicecopy", n.Left.Type, n.Right.Type)
return mkcall1(fn, n.Type, init, typename(n.Left.Type.Type), n.Left, n.Right)
if runtimecall {
var fn *Node
if n.Right.Type.Etype == TSTRING {
- fn = syslook("slicestringcopy", 1)
+ fn = syslook("slicestringcopy")
} else {
- fn = syslook("slicecopy", 1)
+ fn = syslook("slicecopy")
}
- substArgTypes(fn, n.Left.Type, n.Right.Type)
+ substArgTypes(&fn, n.Left.Type, n.Right.Type)
return mkcall1(fn, n.Type, init, n.Left, n.Right, Nodintconst(n.Left.Type.Type.Width))
}
walkexpr(&n.Right, init)
nl := temp(n.Left.Type)
nr := temp(n.Right.Type)
- var l *NodeList
- l = list(l, Nod(OAS, nl, n.Left))
- l = list(l, Nod(OAS, nr, n.Right))
+ var l []*Node
+ l = append(l, Nod(OAS, nl, n.Left))
+ l = append(l, Nod(OAS, nr, n.Right))
nfrm := Nod(OSPTR, nr, nil)
nto := Nod(OSPTR, nl, nil)
nlen := temp(Types[TINT])
// n = len(to)
- l = list(l, Nod(OAS, nlen, Nod(OLEN, nl, nil)))
+ l = append(l, Nod(OAS, nlen, Nod(OLEN, nl, nil)))
// if n > len(frm) { n = len(frm) }
nif := Nod(OIF, nil, nil)
nif.Left = Nod(OGT, nlen, Nod(OLEN, nr, nil))
nif.Nbody.Append(Nod(OAS, nlen, Nod(OLEN, nr, nil)))
- l = list(l, nif)
+ l = append(l, nif)
// Call memmove.
- fn := syslook("memmove", 1)
+ fn := syslook("memmove")
- substArgTypes(fn, nl.Type.Type, nl.Type.Type)
+ substArgTypes(&fn, nl.Type.Type, nl.Type.Type)
nwid := temp(Types[TUINTPTR])
- l = list(l, Nod(OAS, nwid, conv(nlen, Types[TUINTPTR])))
+ l = append(l, Nod(OAS, nwid, conv(nlen, Types[TUINTPTR])))
nwid = Nod(OMUL, nwid, Nodintconst(nl.Type.Type.Width))
- l = list(l, mkcall1(fn, nil, init, nto, nfrm, nwid))
+ l = append(l, mkcall1(fn, nil, init, nto, nfrm, nwid))
typechecklist(l, Etop)
walkstmtlist(l)
- *init = concat(*init, l)
+ init.Append(l...)
return nlen
}
}
if a == AMEM {
- n := syslook("memequal", 1)
- substArgTypes(n, t, t)
+ n := syslook("memequal")
+ substArgTypes(&n, t, t)
*needsize = 1
return n
}
n := newname(sym)
n.Class = PFUNC
ntype := Nod(OTFUNC, nil, nil)
- ntype.List = list(ntype.List, Nod(ODCLFIELD, nil, typenod(Ptrto(t))))
- ntype.List = list(ntype.List, Nod(ODCLFIELD, nil, typenod(Ptrto(t))))
- ntype.Rlist = list(ntype.Rlist, Nod(ODCLFIELD, nil, typenod(Types[TBOOL])))
+ ntype.List.Append(Nod(ODCLFIELD, nil, typenod(Ptrto(t))))
+ ntype.List.Append(Nod(ODCLFIELD, nil, typenod(Ptrto(t))))
+ ntype.Rlist.Append(Nod(ODCLFIELD, nil, typenod(Types[TBOOL])))
typecheck(&ntype, Etype)
n.Type = ntype.Type
*needsize = 0
return n
}
-func walkcompare(np **Node, init **NodeList) {
+func walkcompare(np **Node, init *Nodes) {
n := *np
// Given interface value l and concrete value r, rewrite
if haspointers(r.Type) {
a := Nod(OAS, x, nil)
typecheck(&a, Etop)
- *init = list(*init, a)
+ init.Append(a)
}
ok := temp(Types[TBOOL])
// x, ok := l.(type(r))
expr := Nod(OAS2, nil, nil)
- expr.List = list1(x)
- expr.List = list(expr.List, ok)
- expr.Rlist = list1(a)
+ expr.List.Append(x)
+ expr.List.Append(ok)
+ expr.Rlist.Append(a)
typecheck(&expr, Etop)
walkexpr(&expr, init)
} else {
r = Nod(OOROR, Nod(ONOT, ok, nil), Nod(ONE, x, r))
}
- *init = list(*init, expr)
+ init.Append(expr)
finishcompare(np, n, r, init)
return
}
a := Nod(OAS, l, Nod(OADDR, cmpl, nil))
a.Right.Etype = 1 // addr does not escape
typecheck(&a, Etop)
- *init = list(*init, a)
+ init.Append(a)
r = temp(Ptrto(t))
a = Nod(OAS, r, Nod(OADDR, cmpr, nil))
a.Right.Etype = 1 // addr does not escape
typecheck(&a, Etop)
- *init = list(*init, a)
+ init.Append(a)
var andor Op = OANDAND
if n.Op == ONE {
var needsize int
call := Nod(OCALL, eqfor(t, &needsize), nil)
- call.List = list(call.List, l)
- call.List = list(call.List, r)
+ call.List.Append(l)
+ call.List.Append(r)
if needsize != 0 {
- call.List = list(call.List, Nodintconst(t.Width))
+ call.List.Append(Nodintconst(t.Width))
}
r = call
if n.Op != OEQ {
return
}
-func finishcompare(np **Node, n, r *Node, init **NodeList) {
+func finishcompare(np **Node, n, r *Node, init *Nodes) {
// Using np here to avoid passing &r to typecheck.
*np = r
typecheck(np, Erv)
}
// walkmul rewrites integer multiplication by powers of two as shifts.
-func walkmul(np **Node, init **NodeList) {
+func walkmul(np **Node, init *Nodes) {
n := *np
if !Isint[n.Type.Etype] {
return
// walkdiv rewrites division by a constant as less expensive
// operations.
-func walkdiv(np **Node, init **NodeList) {
+func walkdiv(np **Node, init *Nodes) {
// if >= 0, nr is 1<<pow // 1 if nr is negative.
// TODO(minux)
switch n.Op {
default:
- Fatalf("usefield %v", Oconv(int(n.Op), 0))
+ Fatalf("usefield %v", Oconv(n.Op, 0))
case ODOT, ODOTPTR:
break
}
+ if n.Right == nil {
+ // No field name. This DOTPTR was built by the compiler for access
+ // to runtime data structures. Ignore.
+ return
+ }
t := n.Left.Type
if Isptr[t.Etype] {
Curfn.Func.Fieldtrack = append(Curfn.Func.Fieldtrack, field)
}
-func candiscardlist(l *NodeList) bool {
- for ; l != nil; l = l.Next {
- if !candiscard(l.N) {
- return false
- }
- }
- return true
-}
-
-func candiscardslice(l []*Node) bool {
- for _, n := range l {
+func candiscardlist(l Nodes) bool {
+ for _, n := range l.Slice() {
if !candiscard(n) {
return false
}
return false
}
- if !candiscard(n.Left) || !candiscard(n.Right) || !candiscardlist(n.Ninit) || !candiscardslice(n.Nbody.Slice()) || !candiscardlist(n.List) || !candiscardlist(n.Rlist) {
+ if !candiscard(n.Left) || !candiscard(n.Right) || !candiscardlist(n.Ninit) || !candiscardlist(n.Nbody) || !candiscardlist(n.List) || !candiscardlist(n.Rlist) {
return false
}
var walkprintfunc_prgen int
-func walkprintfunc(np **Node, init **NodeList) {
+func walkprintfunc(np **Node, init *Nodes) {
n := *np
- if n.Ninit != nil {
- walkstmtlist(n.Ninit)
- *init = concat(*init, n.Ninit)
- n.Ninit = nil
+ if n.Ninit.Len() != 0 {
+ walkstmtlist(n.Ninit.Slice())
+ init.AppendNodes(&n.Ninit)
}
t := Nod(OTFUNC, nil, nil)
num := 0
- var printargs *NodeList
+ var printargs []*Node
var a *Node
var buf string
- for l := n.List; l != nil; l = l.Next {
+ for _, n1 := range n.List.Slice() {
buf = fmt.Sprintf("a%d", num)
num++
- a = Nod(ODCLFIELD, newname(Lookup(buf)), typenod(l.N.Type))
- t.List = list(t.List, a)
- printargs = list(printargs, a.Left)
+ a = Nod(ODCLFIELD, newname(Lookup(buf)), typenod(n1.Type))
+ t.List.Append(a)
+ printargs = append(printargs, a.Left)
}
fn := Nod(ODCLFUNC, nil, nil)
funchdr(fn)
a = Nod(n.Op, nil, nil)
- a.List = printargs
+ a.List.Set(printargs)
typecheck(&a, Etop)
walkstmt(&a)
funcbody(fn)
typecheck(&fn, Etop)
- typecheckslice(fn.Nbody.Slice(), Etop)
+ typechecklist(fn.Nbody.Slice(), Etop)
xtop = list(xtop, fn)
Curfn = oldfn
a = Nod(OCALL, nil, nil)
a.Left = fn.Func.Nname
- a.List = n.List
+ a.List.Set(n.List.Slice())
typecheck(&a, Etop)
walkexpr(&a, init)
*np = a
// for example moving [4]byte must use 4 MOVB not 1 MOVW.
align := int(n.Type.Align)
- var op int
+ var op obj.As
switch align {
default:
gc.Fatalf("sgen: invalid alignment %d for %v", align, n.Type)
// the src and dst overlap, then reverse direction
dir := align
- if osrc < odst && int64(odst) < int64(osrc)+w {
+ if osrc < odst && odst < osrc+w {
dir = -dir
}
var MAXWIDTH int64 = 1 << 50
-/*
- * go declares several platform-specific type aliases:
- * int, uint, and uintptr
- */
-var typedefs = []gc.Typedef{
- {"int", gc.TINT, gc.TINT64},
- {"uint", gc.TUINT, gc.TUINT64},
- {"uintptr", gc.TUINTPTR, gc.TUINT64},
-}
-
func betypeinit() {
gc.Widthptr = 8
gc.Widthint = 8
gc.Thearch.Thechar = thechar
gc.Thearch.Thestring = thestring
gc.Thearch.Thelinkarch = thelinkarch
- gc.Thearch.Typedefs = typedefs
gc.Thearch.REGSP = mips.REGSP
gc.Thearch.REGCTXT = mips.REGCTXT
gc.Thearch.REGCALLX = mips.REG_R1
return p
}
-func appendpp(p *obj.Prog, as int, ftype int, freg int, foffset int64, ttype int, treg int, toffset int64) *obj.Prog {
+func appendpp(p *obj.Prog, as obj.As, ftype obj.AddrType, freg int, foffset int64, ttype obj.AddrType, treg int, toffset int64) *obj.Prog {
q := gc.Ctxt.NewProg()
gc.Clearp(q)
- q.As = int16(as)
+ q.As = as
q.Lineno = p.Lineno
- q.From.Type = int16(ftype)
+ q.From.Type = ftype
q.From.Reg = int16(freg)
q.From.Offset = foffset
- q.To.Type = int16(ttype)
+ q.To.Type = ttype
q.To.Reg = int16(treg)
q.To.Offset = toffset
q.Link = p.Link
nl, nr = nr, nl
}
- t := (*gc.Type)(nl.Type)
- w := int(int(t.Width * 8))
+ t := nl.Type
+ w := t.Width * 8
var n1 gc.Node
gc.Cgenr(nl, &n1, res)
var n2 gc.Node
var lo gc.Node
gc.Nodreg(&lo, gc.Types[gc.TUINT64], mips.REG_LO)
gins(mips.AMOVV, &lo, &n1)
- p := (*obj.Prog)(gins(mips.ASRAV, nil, &n1))
+ p := gins(mips.ASRAV, nil, &n1)
p.From.Type = obj.TYPE_CONST
- p.From.Offset = int64(w)
+ p.From.Offset = w
case gc.TUINT8,
gc.TUINT16,
var lo gc.Node
gc.Nodreg(&lo, gc.Types[gc.TUINT64], mips.REG_LO)
gins(mips.AMOVV, &lo, &n1)
- p := (*obj.Prog)(gins(mips.ASRLV, nil, &n1))
+ p := gins(mips.ASRLV, nil, &n1)
p.From.Type = obj.TYPE_CONST
- p.From.Offset = int64(w)
+ p.From.Offset = w
case gc.TINT64,
gc.TUINT64:
* res = nl >> nr
*/
func cgen_shift(op gc.Op, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
- a := int(optoas(op, nl.Type))
+ a := optoas(op, nl.Type)
if nr.Op == gc.OLITERAL {
var n1 gc.Node
fmt.Printf("clearfat %v (%v, size: %d)\n", nl, nl.Type, nl.Type.Width)
}
- w := uint64(uint64(nl.Type.Width))
+ w := uint64(nl.Type.Width)
// Avoid taking the address for simple enough types.
if gc.Componentgen(nil, nl) {
return
}
- c := uint64(w % 8) // bytes
- q := uint64(w / 8) // dwords
+ c := w % 8 // bytes
+ q := w / 8 // dwords
if gc.Reginuse(mips.REGRT1) {
gc.Fatalf("%v in use during clearfat", obj.Rconv(mips.REGRT1))
p = gins(mips.AMOVV, &r0, &dst)
p.To.Type = obj.TYPE_MEM
p.To.Offset = 8
- pl := (*obj.Prog)(p)
+ pl := p
p = gins(mips.AADDV, nil, &dst)
p.From.Type = obj.TYPE_CONST
p := gins(mips.ASUBV, nil, &dst)
p.From.Type = obj.TYPE_CONST
p.From.Offset = 8
- f := (*gc.Node)(gc.Sysfunc("duffzero"))
+ f := gc.Sysfunc("duffzero")
p = gins(obj.ADUFFZERO, nil, f)
gc.Afunclit(&p.To, f)
func expandchecks(firstp *obj.Prog) {
var p1 *obj.Prog
- for p := (*obj.Prog)(firstp); p != nil; p = p.Link {
+ for p := firstp; p != nil; p = p.Link {
if gc.Debug_checknil != 0 && gc.Ctxt.Debugvlog != 0 {
fmt.Printf("expandchecks: %v\n", p)
}
continue
}
if gc.Debug_checknil != 0 && p.Lineno > 1 { // p->lineno==1 in generated wrappers
- gc.Warnl(int(p.Lineno), "generated nil check")
+ gc.Warnl(p.Lineno, "generated nil check")
}
if p.From.Type != obj.TYPE_REG {
gc.Fatalf("invalid nil check %v\n", p)
* generate
* as $c, n
*/
-func ginscon(as int, c int64, n2 *gc.Node) {
+func ginscon(as obj.As, c int64, n2 *gc.Node) {
var n1 gc.Node
gc.Nodconst(&n1, gc.Types[gc.TINT64], c)
// generate branch
// n1, n2 are registers
-func ginsbranch(as int, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
+func ginsbranch(as obj.As, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
p := gc.Gbranch(as, t, likely)
gc.Naddr(&p.From, n1)
if n2 != nil {
case gc.TFLOAT32:
switch op {
default:
- gc.Fatalf("ginscmp: no entry for op=%v type=%v", gc.Oconv(int(op), 0), t)
+ gc.Fatalf("ginscmp: no entry for op=%v type=%v", gc.Oconv(op, 0), t)
case gc.OEQ,
gc.ONE:
case gc.TFLOAT64:
switch op {
default:
- gc.Fatalf("ginscmp: no entry for op=%v type=%v", gc.Oconv(int(op), 0), t)
+ gc.Fatalf("ginscmp: no entry for op=%v type=%v", gc.Oconv(op, 0), t)
case gc.OEQ,
gc.ONE:
ft := int(gc.Simsimtype(f.Type))
tt := int(gc.Simsimtype(t.Type))
- cvt := (*gc.Type)(t.Type)
+ cvt := t.Type
if gc.Iscomplex[ft] || gc.Iscomplex[tt] {
gc.Complexmove(f, t)
// cannot have two memory operands
var r2 gc.Node
var r1 gc.Node
- var a int
+ var a obj.As
if gc.Ismem(f) && gc.Ismem(t) {
goto hard
}
// gins is called by the front end.
// It synthesizes some multiple-instruction sequences
// so the front end can stay simpler.
-func gins(as int, f, t *gc.Node) *obj.Prog {
+func gins(as obj.As, f, t *gc.Node) *obj.Prog {
if as >= obj.A_ARCHSPECIFIC {
if x, ok := f.IntLiteral(); ok {
ginscon(as, x, t)
* as f, r, t
* r must be register, if not nil
*/
-func gins3(as int, f, r, t *gc.Node) *obj.Prog {
+func gins3(as obj.As, f, r, t *gc.Node) *obj.Prog {
p := rawgins(as, f, t)
if r != nil {
p.Reg = r.Reg
* generate one instruction:
* as f, t
*/
-func rawgins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
+func rawgins(as obj.As, f *gc.Node, t *gc.Node) *obj.Prog {
// TODO(austin): Add self-move test like in 6g (but be careful
// of truncation moves)
/*
* return Axxx for Oxxx on type t.
*/
-func optoas(op gc.Op, t *gc.Type) int {
+func optoas(op gc.Op, t *gc.Type) obj.As {
if t == nil {
gc.Fatalf("optoas: t is nil")
}
OHMUL_ = uint32(gc.OHMUL) << 16
)
- a := int(obj.AXXX)
+ a := obj.AXXX
switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) {
default:
- gc.Fatalf("optoas: no entry for op=%v type=%v", gc.Oconv(int(op), 0), t)
+ gc.Fatalf("optoas: no entry for op=%v type=%v", gc.Oconv(op, 0), t)
case OEQ_ | gc.TBOOL,
OEQ_ | gc.TINT8,
* after successful sudoaddable,
* to release the register used for a.
*/
-func sudoaddable(as int, n *gc.Node, a *obj.Addr) bool {
+func sudoaddable(as obj.As, n *gc.Node, a *obj.Addr) bool {
// TODO(minux)
*a = obj.Addr{}
var gactive uint32
func peep(firstp *obj.Prog) {
- g := (*gc.Graph)(gc.Flowstart(firstp, nil))
+ g := gc.Flowstart(firstp, nil)
if g == nil {
return
}
*/
var p1 *obj.Prog
var r1 *gc.Flow
- for r := (*gc.Flow)(g.Start); r != nil; r = r.Link {
+ for r := g.Start; r != nil; r = r.Link {
p = r.Prog
switch p.As {
default:
}
func excise(r *gc.Flow) {
- p := (*obj.Prog)(r.Prog)
+ p := r.Prog
if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
fmt.Printf("%v ===delete===\n", p)
}
* above sequences. This returns 1 if it modified any instructions.
*/
func subprop(r0 *gc.Flow) bool {
- p := (*obj.Prog)(r0.Prog)
- v1 := (*obj.Addr)(&p.From)
+ p := r0.Prog
+ v1 := &p.From
if !regtyp(v1) {
return false
}
- v2 := (*obj.Addr)(&p.To)
+ v2 := &p.To
if !regtyp(v2) {
return false
}
}
}
- t := int(int(v1.Reg))
- v1.Reg = v2.Reg
- v2.Reg = int16(t)
+ v1.Reg, v2.Reg = v2.Reg, v1.Reg
if gc.Debug['P'] != 0 {
fmt.Printf("%v last\n", r.Prog)
}
* set v2 return success (caller can remove v1->v2 move)
*/
func copyprop(r0 *gc.Flow) bool {
- p := (*obj.Prog)(r0.Prog)
- v1 := (*obj.Addr)(&p.From)
- v2 := (*obj.Addr)(&p.To)
+ p := r0.Prog
+ v1 := &p.From
+ v2 := &p.To
if copyas(v1, v2) {
if gc.Debug['P'] != 0 {
fmt.Printf("eliminating self-move: %v\n", r0.Prog)
switch p.As {
default:
- fmt.Printf("copyu: can't find %v\n", obj.Aconv(int(p.As)))
+ fmt.Printf("copyu: can't find %v\n", obj.Aconv(p.As))
return 2
case obj.ANOP, /* read p->from, write p->to */
func excludedregs() uint64 {
// Exclude registers with fixed functions
- regbits := uint64(1<<0 | RtoB(mips.REGSP) | RtoB(mips.REGG) | RtoB(mips.REGTMP) | RtoB(mips.REGLINK) | RtoB(mips.REG_R26) | RtoB(mips.REG_R27))
+ regbits := 1<<0 | RtoB(mips.REGSP) | RtoB(mips.REGG) | RtoB(mips.REGTMP) | RtoB(mips.REGLINK) | RtoB(mips.REG_R26) | RtoB(mips.REG_R27)
// Also exclude floating point registers with fixed constants
regbits |= RtoB(mips.FREGZERO) | RtoB(mips.FREGHALF) | RtoB(mips.FREGONE) | RtoB(mips.FREGTWO)
// for example moving [4]byte must use 4 MOVB not 1 MOVW.
align := int(n.Type.Align)
- var op int
+ var op obj.As
switch align {
default:
gc.Fatalf("sgen: invalid alignment %d for %v", align, n.Type)
// the src and dst overlap, then reverse direction
dir := align
- if osrc < odst && int64(odst) < int64(osrc)+w {
+ if osrc < odst && odst < osrc+w {
dir = -dir
}
var MAXWIDTH int64 = 1 << 50
-/*
- * go declares several platform-specific type aliases:
- * int, uint, and uintptr
- */
-var typedefs = []gc.Typedef{
- {"int", gc.TINT, gc.TINT64},
- {"uint", gc.TUINT, gc.TUINT64},
- {"uintptr", gc.TUINTPTR, gc.TUINT64},
-}
-
func betypeinit() {
gc.Widthptr = 8
gc.Widthint = 8
gc.Thearch.Thechar = thechar
gc.Thearch.Thestring = thestring
gc.Thearch.Thelinkarch = thelinkarch
- gc.Thearch.Typedefs = typedefs
gc.Thearch.REGSP = ppc64.REGSP
gc.Thearch.REGCTXT = ppc64.REGCTXT
gc.Thearch.REGCALLX = ppc64.REG_R3
gc.Thearch.Doregbits = doregbits
gc.Thearch.Regnames = regnames
+ initvariants()
+ initproginfo()
+
gc.Main()
gc.Exit(0)
}
return p
}
-func appendpp(p *obj.Prog, as int, ftype int, freg int, foffset int64, ttype int, treg int, toffset int64) *obj.Prog {
+func appendpp(p *obj.Prog, as obj.As, ftype obj.AddrType, freg int, foffset int64, ttype obj.AddrType, treg int, toffset int64) *obj.Prog {
q := gc.Ctxt.NewProg()
gc.Clearp(q)
- q.As = int16(as)
+ q.As = as
q.Lineno = p.Lineno
- q.From.Type = int16(ftype)
+ q.From.Type = ftype
q.From.Reg = int16(freg)
q.From.Offset = foffset
- q.To.Type = int16(ttype)
+ q.To.Type = ttype
q.To.Reg = int16(treg)
q.To.Offset = toffset
q.Link = p.Link
nl, nr = nr, nl
}
- t := (*gc.Type)(nl.Type)
- w := int(int(t.Width * 8))
+ t := nl.Type
+ w := t.Width * 8
var n1 gc.Node
gc.Cgenr(nl, &n1, res)
var n2 gc.Node
gc.TINT16,
gc.TINT32:
gins(optoas(gc.OMUL, t), &n2, &n1)
- p := (*obj.Prog)(gins(ppc64.ASRAD, nil, &n1))
+ p := gins(ppc64.ASRAD, nil, &n1)
p.From.Type = obj.TYPE_CONST
- p.From.Offset = int64(w)
+ p.From.Offset = w
case gc.TUINT8,
gc.TUINT16,
gc.TUINT32:
gins(optoas(gc.OMUL, t), &n2, &n1)
- p := (*obj.Prog)(gins(ppc64.ASRD, nil, &n1))
+ p := gins(ppc64.ASRD, nil, &n1)
p.From.Type = obj.TYPE_CONST
- p.From.Offset = int64(w)
+ p.From.Offset = w
case gc.TINT64,
gc.TUINT64:
* res = nl >> nr
*/
func cgen_shift(op gc.Op, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
- a := int(optoas(op, nl.Type))
+ a := optoas(op, nl.Type)
if nr.Op == gc.OLITERAL {
var n1 gc.Node
if !bounded {
gc.Nodconst(&n3, tcount, nl.Type.Width*8)
gins(optoas(gc.OCMP, tcount), &n1, &n3)
- p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, tcount), nil, +1))
+ p1 := gc.Gbranch(optoas(gc.OLT, tcount), nil, +1)
if op == gc.ORSH && gc.Issigned[nl.Type.Etype] {
gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1)
gins(a, &n3, &n2)
fmt.Printf("clearfat %v (%v, size: %d)\n", nl, nl.Type, nl.Type.Width)
}
- w := uint64(uint64(nl.Type.Width))
+ w := uint64(nl.Type.Width)
// Avoid taking the address for simple enough types.
if gc.Componentgen(nil, nl) {
return
}
- c := uint64(w % 8) // bytes
- q := uint64(w / 8) // dwords
+ c := w % 8 // bytes
+ q := w / 8 // dwords
if gc.Reginuse(ppc64.REGRT1) {
gc.Fatalf("%v in use during clearfat", obj.Rconv(ppc64.REGRT1))
p = gins(ppc64.AMOVDU, &r0, &dst)
p.To.Type = obj.TYPE_MEM
p.To.Offset = 8
- pl := (*obj.Prog)(p)
+ pl := p
p = gins(ppc64.ACMP, &dst, &end)
gc.Patch(gc.Gbranch(ppc64.ABNE, nil, 0), pl)
p := gins(ppc64.ASUB, nil, &dst)
p.From.Type = obj.TYPE_CONST
p.From.Offset = 8
- f := (*gc.Node)(gc.Sysfunc("duffzero"))
+ f := gc.Sysfunc("duffzero")
p = gins(obj.ADUFFZERO, nil, f)
gc.Afunclit(&p.To, f)
var p1 *obj.Prog
var p2 *obj.Prog
- for p := (*obj.Prog)(firstp); p != nil; p = p.Link {
+ for p := firstp; p != nil; p = p.Link {
if gc.Debug_checknil != 0 && gc.Ctxt.Debugvlog != 0 {
fmt.Printf("expandchecks: %v\n", p)
}
continue
}
if gc.Debug_checknil != 0 && p.Lineno > 1 { // p->lineno==1 in generated wrappers
- gc.Warnl(int(p.Lineno), "generated nil check")
+ gc.Warnl(p.Lineno, "generated nil check")
}
if p.From.Type != obj.TYPE_REG {
gc.Fatalf("invalid nil check %v\n", p)
* generate
* as $c, n
*/
-func ginscon(as int, c int64, n2 *gc.Node) {
+func ginscon(as obj.As, c int64, n2 *gc.Node) {
var n1 gc.Node
gc.Nodconst(&n1, gc.Types[gc.TINT64], c)
* generate
* as n, $c (CMP/CMPU)
*/
-func ginscon2(as int, n2 *gc.Node, c int64) {
+func ginscon2(as obj.As, n2 *gc.Node, c int64) {
var n1 gc.Node
gc.Nodconst(&n1, gc.Types[gc.TINT64], c)
ft := int(gc.Simsimtype(f.Type))
tt := int(gc.Simsimtype(t.Type))
- cvt := (*gc.Type)(t.Type)
+ cvt := t.Type
if gc.Iscomplex[ft] || gc.Iscomplex[tt] {
gc.Complexmove(f, t)
// cannot have two memory operands
var r2 gc.Node
var r1 gc.Node
- var a int
+ var a obj.As
if gc.Ismem(f) && gc.Ismem(t) {
goto hard
}
gc.Regalloc(&r2, gc.Types[gc.TFLOAT64], nil)
gmove(&bigf, &r2)
gins(ppc64.AFCMPU, &r1, &r2)
- p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TFLOAT64]), nil, +1))
+ p1 := gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TFLOAT64]), nil, +1)
gins(ppc64.AFSUB, &r2, &r1)
gc.Patch(p1, gc.Pc)
gc.Regfree(&r2)
var r3 gc.Node
gc.Regalloc(&r3, gc.Types[gc.TINT64], t)
gins(ppc64.AFCTIDZ, &r1, &r2)
- p1 := (*obj.Prog)(gins(ppc64.AFMOVD, &r2, nil))
+ p1 := gins(ppc64.AFMOVD, &r2, nil)
p1.To.Type = obj.TYPE_MEM
p1.To.Reg = ppc64.REGSP
p1.To.Offset = -8
gc.Regfree(&r2)
gc.Regfree(&r1)
if tt == gc.TUINT64 {
- p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TFLOAT64]), nil, +1)) // use CR0 here again
+ p1 := gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TFLOAT64]), nil, +1) // use CR0 here again
gc.Nodreg(&r1, gc.Types[gc.TINT64], ppc64.REGTMP)
gins(ppc64.AMOVD, &bigi, &r1)
gins(ppc64.AADD, &r1, &r3)
gc.Nodreg(&r2, gc.Types[gc.TUINT64], ppc64.REGTMP)
gmove(&bigi, &r2)
gins(ppc64.ACMPU, &r1, &r2)
- p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT64]), nil, +1))
- p2 := (*obj.Prog)(gins(ppc64.ASRD, nil, &r1))
+ p1 := gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT64]), nil, +1)
+ p2 := gins(ppc64.ASRD, nil, &r1)
p2.From.Type = obj.TYPE_CONST
p2.From.Offset = 1
gc.Patch(p1, gc.Pc)
}
gc.Regalloc(&r2, gc.Types[gc.TFLOAT64], t)
- p1 := (*obj.Prog)(gins(ppc64.AMOVD, &r1, nil))
+ p1 := gins(ppc64.AMOVD, &r1, nil)
p1.To.Type = obj.TYPE_MEM
p1.To.Reg = ppc64.REGSP
p1.To.Offset = -8
gins(ppc64.AFCFID, &r2, &r2)
gc.Regfree(&r1)
if ft == gc.TUINT64 {
- p1 := (*obj.Prog)(gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT64]), nil, +1)) // use CR0 here again
+ p1 := gc.Gbranch(optoas(gc.OLT, gc.Types[gc.TUINT64]), nil, +1) // use CR0 here again
gc.Nodreg(&r1, gc.Types[gc.TFLOAT64], ppc64.FREGTWO)
gins(ppc64.AFMUL, &r1, &r2)
gc.Patch(p1, gc.Pc)
// gins is called by the front end.
// It synthesizes some multiple-instruction sequences
// so the front end can stay simpler.
-func gins(as int, f, t *gc.Node) *obj.Prog {
+func gins(as obj.As, f, t *gc.Node) *obj.Prog {
if as >= obj.A_ARCHSPECIFIC {
if x, ok := f.IntLiteral(); ok {
ginscon(as, x, t)
* generate one instruction:
* as f, t
*/
-func rawgins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
+func rawgins(as obj.As, f *gc.Node, t *gc.Node) *obj.Prog {
// TODO(austin): Add self-move test like in 6g (but be careful
// of truncation moves)
/*
* return Axxx for Oxxx on type t.
*/
-func optoas(op gc.Op, t *gc.Type) int {
+func optoas(op gc.Op, t *gc.Type) obj.As {
if t == nil {
gc.Fatalf("optoas: t is nil")
}
OHMUL_ = uint32(gc.OHMUL) << 16
)
- a := int(obj.AXXX)
+ a := obj.AXXX
switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) {
default:
- gc.Fatalf("optoas: no entry for op=%v type=%v", gc.Oconv(int(op), 0), t)
+ gc.Fatalf("optoas: no entry for op=%v type=%v", gc.Oconv(op, 0), t)
case OEQ_ | gc.TBOOL,
OEQ_ | gc.TINT8,
* after successful sudoaddable,
* to release the register used for a.
*/
-func sudoaddable(as int, n *gc.Node, a *obj.Addr) bool {
+func sudoaddable(as obj.As, n *gc.Node, a *obj.Addr) bool {
// TODO(minux)
*a = obj.Addr{}
var gactive uint32
func peep(firstp *obj.Prog) {
- g := (*gc.Graph)(gc.Flowstart(firstp, nil))
+ g := gc.Flowstart(firstp, nil)
if g == nil {
return
}
var p *obj.Prog
var r *gc.Flow
- var t int
+ var t obj.As
loop1:
if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
gc.Dumpit("loop1", g.Start, 0)
*/
var p1 *obj.Prog
var r1 *gc.Flow
- for r := (*gc.Flow)(g.Start); r != nil; r = r.Link {
+ for r := g.Start; r != nil; r = r.Link {
p = r.Prog
switch p.As {
default:
* look for OP x,y,R; CMP R, $0 -> OPCC x,y,R
* when OP can set condition codes correctly
*/
- for r := (*gc.Flow)(g.Start); r != nil; r = r.Link {
+ for r := g.Start; r != nil; r = r.Link {
p = r.Prog
switch p.As {
case ppc64.ACMP,
ppc64.ASUBZE,
ppc64.ASUBZEV,
ppc64.AXOR:
- t = variant2as(int(p1.As), as2variant(int(p1.As))|V_CC)
+ t = variant2as(p1.As, as2variant(p1.As)|V_CC)
}
if gc.Debug['D'] != 0 {
fmt.Printf("cmp %v; %v -> ", p1, p)
}
- p1.As = int16(t)
+ p1.As = t
if gc.Debug['D'] != 0 {
fmt.Printf("%v\n", p1)
}
}
func excise(r *gc.Flow) {
- p := (*obj.Prog)(r.Prog)
+ p := r.Prog
if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
fmt.Printf("%v ===delete===\n", p)
}
* above sequences. This returns 1 if it modified any instructions.
*/
func subprop(r0 *gc.Flow) bool {
- p := (*obj.Prog)(r0.Prog)
- v1 := (*obj.Addr)(&p.From)
+ p := r0.Prog
+ v1 := &p.From
if !regtyp(v1) {
return false
}
- v2 := (*obj.Addr)(&p.To)
+ v2 := &p.To
if !regtyp(v2) {
return false
}
}
}
- t := int(int(v1.Reg))
- v1.Reg = v2.Reg
- v2.Reg = int16(t)
+ v1.Reg, v2.Reg = v2.Reg, v1.Reg
if gc.Debug['P'] != 0 {
fmt.Printf("%v last\n", r.Prog)
}
* set v2 return success (caller can remove v1->v2 move)
*/
func copyprop(r0 *gc.Flow) bool {
- p := (*obj.Prog)(r0.Prog)
- v1 := (*obj.Addr)(&p.From)
- v2 := (*obj.Addr)(&p.To)
+ p := r0.Prog
+ v1 := &p.From
+ v2 := &p.To
if copyas(v1, v2) {
if gc.Debug['P'] != 0 {
fmt.Printf("eliminating self-move: %v\n", r0.Prog)
switch p.As {
default:
- fmt.Printf("copyu: can't find %v\n", obj.Aconv(int(p.As)))
+ fmt.Printf("copyu: can't find %v\n", obj.Aconv(p.As))
return 2
case obj.ANOP, /* read p->from, write p->to */
obj.ADUFFCOPY: {Flags: gc.Call},
}
-var initproginfo_initialized int
-
func initproginfo() {
var addvariant = []int{V_CC, V_V, V_CC | V_V}
- if initproginfo_initialized != 0 {
- return
- }
- initproginfo_initialized = 1
-
// Perform one-time expansion of instructions in progtable to
// their CC, V, and VCC variants
- var as2 int
- var i int
- var variant int
- for as := int(0); as < len(progtable); as++ {
+ for i := range progtable {
+ as := obj.As(i)
if progtable[as].Flags == 0 {
continue
}
- variant = as2variant(as)
- for i = 0; i < len(addvariant); i++ {
- as2 = variant2as(as, variant|addvariant[i])
+ variant := as2variant(as)
+ for i := range addvariant {
+ as2 := variant2as(as, variant|addvariant[i])
if as2 != 0 && progtable[as2].Flags == 0 {
progtable[as2] = progtable[as]
}
}
func proginfo(p *obj.Prog) {
- initproginfo()
-
info := &p.Info
*info = progtable[p.As]
if info.Flags == 0 {
}
// Instruction variants table. Initially this contains entries only
-// for the "base" form of each instruction. On the first call to
-// as2variant or variant2as, we'll add the variants to the table.
-var varianttable = [ppc64.ALAST][4]int{
+// for the "base" form of each instruction.
+// This table is completed by calling initvariants in Main.
+var varianttable = [ppc64.ALAST][4]obj.As{
ppc64.AADD: {ppc64.AADD, ppc64.AADDCC, ppc64.AADDV, ppc64.AADDVCC},
ppc64.AADDC: {ppc64.AADDC, ppc64.AADDCCC, ppc64.AADDCV, ppc64.AADDCVCC},
ppc64.AADDE: {ppc64.AADDE, ppc64.AADDECC, ppc64.AADDEV, ppc64.AADDEVCC},
ppc64.AXOR: {ppc64.AXOR, ppc64.AXORCC, 0, 0},
}
-var initvariants_initialized int
-
func initvariants() {
- if initvariants_initialized != 0 {
- return
- }
- initvariants_initialized = 1
-
- var j int
- for i := int(0); i < len(varianttable); i++ {
+ for i := range varianttable {
if varianttable[i][0] == 0 {
// Instruction has no variants
- varianttable[i][0] = i
-
+ varianttable[i][0] = obj.As(i)
continue
}
// Copy base form to other variants
- if varianttable[i][0] == i {
- for j = 0; j < len(varianttable[i]); j++ {
+ if varianttable[i][0] == obj.As(i) {
+ for j := range varianttable[i] {
varianttable[varianttable[i][j]] = varianttable[i]
}
}
}
// as2variant returns the variant (V_*) flags of instruction as.
-func as2variant(as int) int {
- initvariants()
- for i := int(0); i < len(varianttable[as]); i++ {
+func as2variant(as obj.As) int {
+ for i := range varianttable[as] {
if varianttable[as][i] == as {
return i
}
// variant2as returns the instruction as with the given variant (V_*) flags.
// If no such variant exists, this returns 0.
-func variant2as(as int, flags int) int {
- initvariants()
+func variant2as(as obj.As, flags int) obj.As {
return varianttable[as][flags]
}
func excludedregs() uint64 {
// Exclude registers with fixed functions
- regbits := uint64(1<<0 | RtoB(ppc64.REGSP) | RtoB(ppc64.REGG) | RtoB(ppc64.REGTLS) | RtoB(ppc64.REGTMP))
+ regbits := 1<<0 | RtoB(ppc64.REGSP) | RtoB(ppc64.REGG) | RtoB(ppc64.REGTLS) | RtoB(ppc64.REGTMP)
if gc.Ctxt.Flag_shared != 0 {
// When compiling Go into PIC, R2 is reserved to be the TOC pointer
Optimizations (better compiler)
-------------------------------
-- OpStore uses 3 args. Increase the size of Value.argstorage to 3?
-- Use a constant cache for OpConstNil, OpConstInterface, OpConstSlice, maybe OpConstString
- Handle signed division overflow and sign extension earlier
Regalloc
// Run all the passes
printFunc(f)
f.Config.HTML.WriteFunc("start", f)
- checkFunc(f)
+ if checkEnabled {
+ checkFunc(f)
+ }
const logMemStats = false
for _, p := range passes {
if !f.Config.optimize && !p.required {
f.logStat("TIME(ns):BYTES:ALLOCS", time, nBytes, nAllocs)
}
}
- checkFunc(f)
+ if checkEnabled {
+ checkFunc(f)
+ }
}
// Squash error printing defer
test int // pass-specific ad-hoc option, perhaps useful in development
}
+// Run consistency checker between each phase
+var checkEnabled = true
+
// PhaseOption sets the specified flag in the specified ssa phase,
// returning empty string if this was successful or a string explaining
// the error if it was not. A version of the phase name with "_"
// GO_GCFLAGS=-d=ssa/generic_cse/time,ssa/generic_cse/stats,ssa/generic_cse/debug=3 ./make.bash ...
//
func PhaseOption(phase, flag string, val int) string {
+ if phase == "check" && flag == "on" {
+ checkEnabled = val != 0
+ return ""
+ }
+ if phase == "check" && flag == "off" {
+ checkEnabled = val == 0
+ return ""
+ }
underphase := strings.Replace(phase, "_", " ", -1)
for i, p := range passes {
if p.name == phase || p.name == underphase {
{name: "phiopt", fn: phiopt},
{name: "nilcheckelim", fn: nilcheckelim},
{name: "prove", fn: prove},
+ {name: "dec", fn: dec, required: true},
+ {name: "late opt", fn: opt}, // TODO: split required rules and optimizing rules
{name: "generic deadcode", fn: deadcode},
{name: "fuse", fn: fuse},
{name: "dse", fn: dse},
)
type Config struct {
- arch string // "amd64", etc.
- IntSize int64 // 4 or 8
- PtrSize int64 // 4 or 8
- lowerBlock func(*Block) bool // lowering function
- lowerValue func(*Value, *Config) bool // lowering function
- fe Frontend // callbacks into compiler frontend
- HTML *HTMLWriter // html writer, for debugging
- ctxt *obj.Link // Generic arch information
- optimize bool // Do optimization
- curFunc *Func
+ arch string // "amd64", etc.
+ IntSize int64 // 4 or 8
+ PtrSize int64 // 4 or 8
+ lowerBlock func(*Block) bool // lowering function
+ lowerValue func(*Value, *Config) bool // lowering function
+ fe Frontend // callbacks into compiler frontend
+ HTML *HTMLWriter // html writer, for debugging
+ ctxt *obj.Link // Generic arch information
+ optimize bool // Do optimization
+ noDuffDevice bool // Don't use Duff's device
+ curFunc *Func
// TODO: more stuff. Compiler flags of interest, ...
c.ctxt = ctxt
c.optimize = optimize
+ // Don't use Duff's device on Plan 9, because floating
+ // point operations are not allowed in note handler.
+ if obj.Getgoos() == "plan9" {
+ c.noDuffDevice = true
+ }
+
// Assign IDs to preallocated values/blocks.
for i := range c.values {
c.values[i].ID = ID(i)
}
// constVal returns a constant value for c.
-func (f *Func) constVal(line int32, op Op, t Type, c int64) *Value {
+func (f *Func) constVal(line int32, op Op, t Type, c int64, setAux bool) *Value {
if f.constants == nil {
f.constants = make(map[int64][]*Value)
}
return v
}
}
- v := f.Entry.NewValue0I(line, op, t, c)
+ var v *Value
+ if setAux {
+ v = f.Entry.NewValue0I(line, op, t, c)
+ } else {
+ v = f.Entry.NewValue0(line, op, t)
+ }
f.constants[c] = append(vv, v)
return v
}
+// These magic auxint values let us easily cache non-numeric constants
+// using the same constants map while making collisions unlikely.
+// These values are unlikely to occur in regular code and
+// are easy to grep for in case of bugs.
+const (
+ constSliceMagic = 1122334455
+ constInterfaceMagic = 2233445566
+ constNilMagic = 3344556677
+ constEmptyStringMagic = 4455667788
+)
+
// ConstInt returns an int constant representing its argument.
func (f *Func) ConstBool(line int32, t Type, c bool) *Value {
i := int64(0)
if c {
i = 1
}
- return f.constVal(line, OpConstBool, t, i)
+ return f.constVal(line, OpConstBool, t, i, true)
}
func (f *Func) ConstInt8(line int32, t Type, c int8) *Value {
- return f.constVal(line, OpConst8, t, int64(c))
+ return f.constVal(line, OpConst8, t, int64(c), true)
}
func (f *Func) ConstInt16(line int32, t Type, c int16) *Value {
- return f.constVal(line, OpConst16, t, int64(c))
+ return f.constVal(line, OpConst16, t, int64(c), true)
}
func (f *Func) ConstInt32(line int32, t Type, c int32) *Value {
- return f.constVal(line, OpConst32, t, int64(c))
+ return f.constVal(line, OpConst32, t, int64(c), true)
}
func (f *Func) ConstInt64(line int32, t Type, c int64) *Value {
- return f.constVal(line, OpConst64, t, c)
+ return f.constVal(line, OpConst64, t, c, true)
}
func (f *Func) ConstFloat32(line int32, t Type, c float64) *Value {
- return f.constVal(line, OpConst32F, t, int64(math.Float64bits(c)))
+ return f.constVal(line, OpConst32F, t, int64(math.Float64bits(c)), true)
}
func (f *Func) ConstFloat64(line int32, t Type, c float64) *Value {
- return f.constVal(line, OpConst64F, t, int64(math.Float64bits(c)))
+ return f.constVal(line, OpConst64F, t, int64(math.Float64bits(c)), true)
+}
+
+func (f *Func) ConstSlice(line int32, t Type) *Value {
+ return f.constVal(line, OpConstSlice, t, constSliceMagic, false)
+}
+func (f *Func) ConstInterface(line int32, t Type) *Value {
+ return f.constVal(line, OpConstInterface, t, constInterfaceMagic, false)
+}
+func (f *Func) ConstNil(line int32, t Type) *Value {
+ return f.constVal(line, OpConstNil, t, constNilMagic, false)
+}
+func (f *Func) ConstEmptyString(line int32, t Type) *Value {
+ v := f.constVal(line, OpConstString, t, constEmptyStringMagic, false)
+ v.Aux = ""
+ return v
}
func (f *Func) Logf(msg string, args ...interface{}) { f.Config.Logf(msg, args...) }
(MOVOstore dst (MOVOload src mem) mem))
// Medium copying uses a duff device.
-(Move [size] dst src mem) && size >= 32 && size <= 16*64 && size%16 == 0 ->
+(Move [size] dst src mem) && size >= 32 && size <= 16*64 && size%16 == 0 && !config.noDuffDevice ->
(DUFFCOPY [14*(64-size/16)] dst src mem)
// 14 and 64 are magic constants. 14 is the number of bytes to encode:
// MOVUPS (SI), X0
// and 64 is the number of such blocks. See src/runtime/duff_amd64.s:duffcopy.
// Large copying uses REP MOVSQ.
-(Move [size] dst src mem) && size > 16*64 && size%8 == 0 ->
+(Move [size] dst src mem) && (size > 16*64 || config.noDuffDevice) && size%8 == 0 ->
(REPMOVSQ dst src (MOVQconst [size/8]) mem)
(Not x) -> (XORBconst [1] x)
(MOVQstoreconst [0] destptr mem))))
// Medium zeroing uses a duff device.
-(Zero [size] destptr mem) && size <= 1024 && size%8 == 0 && size%16 != 0 ->
+(Zero [size] destptr mem) && size <= 1024 && size%8 == 0 && size%16 != 0 && !config.noDuffDevice ->
(Zero [size-8] (ADDQconst [8] destptr) (MOVQstore destptr (MOVQconst [0]) mem))
-(Zero [size] destptr mem) && size <= 1024 && size%16 == 0 ->
+(Zero [size] destptr mem) && size <= 1024 && size%16 == 0 && !config.noDuffDevice ->
(DUFFZERO [duffStart(size)] (ADDQconst [duffAdj(size)] destptr) (MOVOconst [0]) mem)
// Large zeroing uses REP STOSQ.
-(Zero [size] destptr mem) && size > 1024 && size%8 == 0 ->
+(Zero [size] destptr mem) && (size > 1024 || (config.noDuffDevice && size > 32)) && size%8 == 0 ->
(REPSTOSQ destptr (MOVQconst [size/8]) (MOVQconst [0]) mem)
// Absorb InvertFlags into branches.
(CMPLconst (ANDLconst [c] x) [0]) -> (TESTLconst [c] x)
(CMPWconst (ANDWconst [c] x) [0]) -> (TESTWconst [c] x)
(CMPBconst (ANDBconst [c] x) [0]) -> (TESTBconst [c] x)
+
+// Combining byte loads into larger (unaligned) loads.
+// There are many ways these combinations could occur. This is
+// designed to match the way encoding/binary.LittleEndian does it.
+(ORW (MOVBQZXload [i] {s} p mem)
+ (SHLWconst [8] (MOVBQZXload [i+1] {s} p mem))) -> (MOVWload (ADDQconst [i] p) mem)
+
+(ORL (ORL (ORL
+ (MOVBQZXload [i] {s} p mem)
+ (SHLLconst [8] (MOVBQZXload [i+1] {s} p mem)))
+ (SHLLconst [16] (MOVBQZXload [i+2] {s} p mem)))
+ (SHLLconst [24] (MOVBQZXload [i+3] {s} p mem))) -> (MOVLload (ADDQconst [i] p) mem)
+
+(ORQ (ORQ (ORQ (ORQ (ORQ (ORQ (ORQ
+ (MOVBQZXload [i] {s} p mem)
+ (SHLQconst [8] (MOVBQZXload [i+1] {s} p mem)))
+ (SHLQconst [16] (MOVBQZXload [i+2] {s} p mem)))
+ (SHLQconst [24] (MOVBQZXload [i+3] {s} p mem)))
+ (SHLQconst [32] (MOVBQZXload [i+4] {s} p mem)))
+ (SHLQconst [40] (MOVBQZXload [i+5] {s} p mem)))
+ (SHLQconst [48] (MOVBQZXload [i+6] {s} p mem)))
+ (SHLQconst [56] (MOVBQZXload [i+7] {s} p mem))) -> (MOVQload (ADDQconst [i] p) mem)
// auxint+aux == add auxint and the offset of the symbol in aux (if any) to the effective address
{name: "MOVBload", argLength: 2, reg: gpload, asm: "MOVBLZX", aux: "SymOff", typ: "UInt8"}, // load byte from arg0+auxint+aux. arg1=mem
{name: "MOVBQSXload", argLength: 2, reg: gpload, asm: "MOVBQSX", aux: "SymOff"}, // ditto, extend to int64
- {name: "MOVBQZXload", argLength: 2, reg: gpload, asm: "MOVBQZX", aux: "SymOff"}, // ditto, extend to uint64
+ {name: "MOVBQZXload", argLength: 2, reg: gpload, asm: "MOVBLZX", aux: "SymOff"}, // ditto, extend to uint64
{name: "MOVWload", argLength: 2, reg: gpload, asm: "MOVWLZX", aux: "SymOff", typ: "UInt16"}, // load 2 bytes from arg0+auxint+aux. arg1=mem
{name: "MOVWQSXload", argLength: 2, reg: gpload, asm: "MOVWQSX", aux: "SymOff"}, // ditto, extend to int64
- {name: "MOVWQZXload", argLength: 2, reg: gpload, asm: "MOVWQZX", aux: "SymOff"}, // ditto, extend to uint64
+ {name: "MOVWQZXload", argLength: 2, reg: gpload, asm: "MOVWLZX", aux: "SymOff"}, // ditto, extend to uint64
{name: "MOVLload", argLength: 2, reg: gpload, asm: "MOVL", aux: "SymOff", typ: "UInt32"}, // load 4 bytes from arg0+auxint+aux. arg1=mem
{name: "MOVLQSXload", argLength: 2, reg: gpload, asm: "MOVLQSX", aux: "SymOff"}, // ditto, extend to int64
- {name: "MOVLQZXload", argLength: 2, reg: gpload, asm: "MOVLQZX", aux: "SymOff"}, // ditto, extend to uint64
+ {name: "MOVLQZXload", argLength: 2, reg: gpload, asm: "MOVL", aux: "SymOff"}, // ditto, extend to uint64
{name: "MOVQload", argLength: 2, reg: gpload, asm: "MOVQ", aux: "SymOff", typ: "UInt64"}, // load 8 bytes from arg0+auxint+aux. arg1=mem
{name: "MOVBstore", argLength: 3, reg: gpstore, asm: "MOVB", aux: "SymOff", typ: "Mem"}, // store byte in arg1 to arg0+auxint+aux. arg2=mem
{name: "MOVWstore", argLength: 3, reg: gpstore, asm: "MOVW", aux: "SymOff", typ: "Mem"}, // store 2 bytes in arg1 to arg0+auxint+aux. arg2=mem
--- /dev/null
+(StringPtr (StringMake ptr _)) -> ptr
+(StringLen (StringMake _ len)) -> len
+
+(SlicePtr (SliceMake ptr _ _ )) -> ptr
+(SliceLen (SliceMake _ len _)) -> len
+(SliceCap (SliceMake _ _ cap)) -> cap
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+var decOps = []opData{}
+
+var decBlocks = []blockData{}
+
+func init() {
+ archs = append(archs, arch{"dec", decOps, decBlocks, nil})
+}
// succ* fields must be variables
// For now, the generated successors must be a permutation of the matched successors.
+// Simplify nil checks.
+// These are inserted by for _, e := range a {}
+(NilCheck (Phi x (Add64 (Const64 [c]) y)) mem) && c > 0 && v.Args[0] == y -> (NilCheck x mem)
+
// constant folding
(Trunc16to8 (Const16 [c])) -> (Const8 [int64(int8(c))])
(Trunc32to8 (Const32 [c])) -> (Const8 [int64(int8(c))])
(Rsh8x64 (Const8 [0]) _) -> (Const8 [0])
(Rsh8Ux64 (Const8 [0]) _) -> (Const8 [0])
+(IsInBounds (And32 (Const32 [c]) _) (Const32 [d])) && inBounds32(c, d) -> (ConstBool [1])
+(IsInBounds (And64 (Const64 [c]) _) (Const64 [d])) && inBounds64(c, d) -> (ConstBool [1])
(IsInBounds (Const32 [c]) (Const32 [d])) -> (ConstBool [b2i(inBounds32(c,d))])
(IsInBounds (Const64 [c]) (Const64 [d])) -> (ConstBool [b2i(inBounds64(c,d))])
+(IsSliceInBounds (And32 (Const32 [c]) _) (Const32 [d])) && sliceInBounds32(c, d) -> (ConstBool [1])
+(IsSliceInBounds (And64 (Const64 [c]) _) (Const64 [d])) && sliceInBounds64(c, d) -> (ConstBool [1])
(IsSliceInBounds (Const32 [c]) (Const32 [d])) -> (ConstBool [b2i(sliceInBounds32(c,d))])
(IsSliceInBounds (Const64 [c]) (Const64 [d])) -> (ConstBool [b2i(sliceInBounds64(c,d))])
(Neg32 (Sub32 x y)) -> (Sub32 y x)
(Neg64 (Sub64 x y)) -> (Sub64 y x)
-// Rewrite AND of consts as shifts if possible, slightly faster for 32/64 bit operands
+(Trunc64to8 (And64 (Const64 [y]) x)) && y&0xFF == 0xFF -> (Trunc64to8 x)
+(Trunc64to16 (And64 (Const64 [y]) x)) && y&0xFFFF == 0xFFFF -> (Trunc64to16 x)
+(Trunc64to32 (And64 (Const64 [y]) x)) && y&0xFFFFFFFF == 0xFFFFFFFF -> (Trunc64to32 x)
+(Trunc32to8 (And32 (Const32 [y]) x)) && y&0xFF == 0xFF -> (Trunc32to8 x)
+(Trunc32to16 (And32 (Const32 [y]) x)) && y&0xFFFF == 0xFFFF -> (Trunc32to16 x)
+(Trunc16to8 (And16 (Const16 [y]) x)) && y&0xFF == 0xFF -> (Trunc16to8 x)
+
+// Rewrite AND of consts as shifts if possible, slightly faster for 64 bit operands
// leading zeros can be shifted left, then right
-(And64 <t> (Const64 [y]) x) && nlz(y) + nto(y) == 64 -> (Rsh64Ux64 (Lsh64x64 <t> x (Const64 <t> [nlz(y)])) (Const64 <t> [nlz(y)]))
-(And32 <t> (Const32 [y]) x) && nlz(int64(int32(y))) + nto(int64(int32(y))) == 64 -> (Rsh32Ux32 (Lsh32x32 <t> x (Const32 <t> [nlz(int64(int32(y)))-32])) (Const32 <t> [nlz(int64(int32(y)))-32]))
+(And64 <t> (Const64 [y]) x) && nlz(y) + nto(y) == 64 && nto(y) >= 32 -> (Rsh64Ux64 (Lsh64x64 <t> x (Const64 <t> [nlz(y)])) (Const64 <t> [nlz(y)]))
// trailing zeros can be shifted right, then left
-(And64 <t> (Const64 [y]) x) && nlo(y) + ntz(y) == 64 -> (Lsh64x64 (Rsh64Ux64 <t> x (Const64 <t> [ntz(y)])) (Const64 <t> [ntz(y)]))
-(And32 <t> (Const32 [y]) x) && nlo(int64(int32(y))) + ntz(int64(int32(y))) == 64 -> (Lsh32x32 (Rsh32Ux32 <t> x (Const32 <t> [ntz(int64(int32(y)))])) (Const32 <t> [ntz(int64(int32(y)))]))
+(And64 <t> (Const64 [y]) x) && nlo(y) + ntz(y) == 64 && ntz(y) >= 32 -> (Lsh64x64 (Rsh64Ux64 <t> x (Const64 <t> [ntz(y)])) (Const64 <t> [ntz(y)]))
// simplifications often used for lengths. e.g. len(s[i:i+5])==5
(Sub64 (Add64 x y) x) -> y
// Load of store of same address, with compatibly typed value and same size
(Load <t1> p1 (Store [w] p2 x _)) && isSamePtr(p1,p2) && t1.Compare(x.Type)==CMPeq && w == t1.Size() -> x
+// Collapse OffPtr
+(OffPtr (OffPtr p [b]) [a]) -> (OffPtr p [a+b])
+
// indexing operations
// Note: bounds check has already been done
(Store [8] dst real mem))
// string ops
-(StringPtr (StringMake ptr _)) -> ptr
-(StringLen (StringMake _ len)) -> len
+(StringPtr (StringMake (Const64 <t> [c]) _)) -> (Const64 <t> [c])
+(StringLen (StringMake _ (Const64 <t> [c]))) -> (Const64 <t> [c])
(ConstString {s}) && config.PtrSize == 4 && s.(string) == "" ->
(StringMake (ConstNil) (Const32 <config.fe.TypeInt()> [0]))
(ConstString {s}) && config.PtrSize == 8 && s.(string) == "" ->
(Store [config.PtrSize] dst ptr mem))
// slice ops
-(SlicePtr (SliceMake ptr _ _ )) -> ptr
-(SliceLen (SliceMake _ len _)) -> len
-(SliceCap (SliceMake _ _ cap)) -> cap
+(SlicePtr (SliceMake (Const64 <t> [c]) _ _)) -> (Const64 <t> [c])
+(SliceLen (SliceMake _ (Const64 <t> [c]) _)) -> (Const64 <t> [c])
+(SliceCap (SliceMake _ _ (Const64 <t> [c]))) -> (Const64 <t> [c])
(ConstSlice) && config.PtrSize == 4 ->
(SliceMake
(ConstNil <config.fe.TypeBytePtr()>)
//(Mod64 n (Const64 [1])) -> (Const64 [0])
//(Mod64u n (Const64 [1])) -> (Const64 [0])
-// Unsigned divide by power of 2. Currently handled by frontend.
-//(Div64u <t> n (Const64 [c])) && isPowerOfTwo(c) -> (Rsh64Ux64 n (Const64 <t> [log2(c)]))
-//(Mod64u <t> n (Const64 [c])) && isPowerOfTwo(c) -> (And64 n (Const64 <t> [c-1]))
+// Unsigned divide by power of 2.
+(Div64u <t> n (Const64 [c])) && isPowerOfTwo(c) -> (Rsh64Ux64 n (Const64 <t> [log2(c)]))
+(Mod64u <t> n (Const64 [c])) && isPowerOfTwo(c) -> (And64 n (Const64 <t> [c-1]))
// Signed divide by power of 2. Currently handled by frontend.
// n / c = n >> log(c) if n >= 0
fmt.Fprintln(w)
fmt.Fprintln(w, "package ssa")
- fmt.Fprintln(w, "import \"cmd/internal/obj/x86\"")
+ fmt.Fprintln(w, "import (")
+ fmt.Fprintln(w, "\"cmd/internal/obj\"")
+ fmt.Fprintln(w, "\"cmd/internal/obj/x86\"")
+ fmt.Fprintln(w, ")")
// generate Block* declarations
fmt.Fprintln(w, "const (")
}
fmt.Fprintln(w, "}")
- fmt.Fprintln(w, "func (o Op) Asm() int {return opcodeTable[o].asm}")
+ fmt.Fprintln(w, "func (o Op) Asm() obj.As {return opcodeTable[o].asm}")
// generate op string method
fmt.Fprintln(w, "func (o Op) String() string {return opcodeTable[o].name }")
fmt.Fprintf(w, "if %s.Type != %s {\nbreak\n}\n", v, t)
} else {
// variable
- if u, ok := m[t]; ok {
+ if _, ok := m[t]; ok {
// must match previous variable
- fmt.Fprintf(w, "if %s.Type != %s {\nbreak\n}\n", v, u)
+ fmt.Fprintf(w, "if %s.Type != %s {\nbreak\n}\n", v, t)
} else {
m[t] = v + ".Type"
fmt.Fprintf(w, "%s := %s.Type\n", t, v)
fmt.Fprintf(w, "if %s.AuxInt != %s {\nbreak\n}\n", v, x)
} else {
// variable
- if y, ok := m[x]; ok {
- fmt.Fprintf(w, "if %s.AuxInt != %s {\nbreak\n}\n", v, y)
+ if _, ok := m[x]; ok {
+ fmt.Fprintf(w, "if %s.AuxInt != %s {\nbreak\n}\n", v, x)
} else {
m[x] = v + ".AuxInt"
fmt.Fprintf(w, "%s := %s.AuxInt\n", x, v)
fmt.Fprintf(w, "if %s.Aux != %s {\nbreak\n}\n", v, x)
} else {
// variable
- if y, ok := m[x]; ok {
- fmt.Fprintf(w, "if %s.Aux != %s {\nbreak\n}\n", v, y)
+ if _, ok := m[x]; ok {
+ fmt.Fprintf(w, "if %s.Aux != %s {\nbreak\n}\n", v, x)
} else {
m[x] = v + ".Aux"
fmt.Fprintf(w, "%s := %s.Aux\n", x, v)
package ssa
-import "fmt"
+import (
+ "cmd/internal/obj"
+ "fmt"
+)
// An Op encodes the specific operation that a Value performs.
// Opcodes' semantics can be modified by the type and aux fields of the Value.
type opInfo struct {
name string
- asm int
reg regInfo
auxType auxType
argLen int32 // the number of arugments, -1 if variable length
- generic bool // this is a generic (arch-independent) opcode
- rematerializeable bool // this op is rematerializeable
- commutative bool // this operation is commutative (e.g. addition)
+ asm obj.As
+ generic bool // this is a generic (arch-independent) opcode
+ rematerializeable bool // this op is rematerializeable
+ commutative bool // this operation is commutative (e.g. addition)
}
type inputInfo struct {
package ssa
-import "cmd/internal/obj/x86"
+import (
+ "cmd/internal/obj"
+ "cmd/internal/obj/x86"
+)
const (
BlockInvalid BlockKind = iota
name: "MOVBQZXload",
auxType: auxSymOff,
argLen: 2,
- asm: x86.AMOVBQZX,
+ asm: x86.AMOVBLZX,
reg: regInfo{
inputs: []inputInfo{
{0, 4295032831}, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .SB
name: "MOVWQZXload",
auxType: auxSymOff,
argLen: 2,
- asm: x86.AMOVWQZX,
+ asm: x86.AMOVWLZX,
reg: regInfo{
inputs: []inputInfo{
{0, 4295032831}, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .SB
name: "MOVLQZXload",
auxType: auxSymOff,
argLen: 2,
- asm: x86.AMOVLQZX,
+ asm: x86.AMOVL,
reg: regInfo{
inputs: []inputInfo{
{0, 4295032831}, // .AX .CX .DX .BX .SP .BP .SI .DI .R8 .R9 .R10 .R11 .R12 .R13 .R14 .R15 .SB
},
}
-func (o Op) Asm() int { return opcodeTable[o].asm }
+func (o Op) Asm() obj.As { return opcodeTable[o].asm }
func (o Op) String() string { return opcodeTable[o].name }
func opt(f *Func) {
applyRewrite(f, rewriteBlockgeneric, rewriteValuegeneric)
}
+
+func dec(f *Func) {
+ applyRewrite(f, rewriteBlockdec, rewriteValuedec)
+}
p.endBlock(b)
}
- for name, vals := range f.NamedValues {
- p.named(name, vals)
+ for _, name := range f.Names {
+ p.named(name, f.NamedValues[name])
}
}
const regDebug = false // TODO: compiler flag
const logSpills = false
+// distance is a measure of how far into the future values are used.
+// distance is measured in units of instructions.
+const (
+ likelyDistance = 1
+ normalDistance = 10
+ unlikelyDistance = 100
+)
+
// regalloc performs register allocation on f. It sets f.RegAlloc
// to the resulting allocation.
func regalloc(f *Func) {
// compatRegs returns the set of registers which can store a type t.
func (s *regAllocState) compatRegs(t Type) regMask {
var m regMask
- if t.IsFloat() {
+ if t.IsFloat() || t == TypeInt128 {
m = 0xffff << 16 // X0-X15
} else {
m = 0xffef << 0 // AX-R15, except SP
// Initialize liveSet and uses fields for this block.
// Walk backwards through the block doing liveness analysis.
liveSet.clear()
+ d := int32(len(b.Values))
+ if b.Kind == BlockCall {
+ d += unlikelyDistance
+ }
for _, e := range s.live[b.ID] {
- s.addUse(e.ID, int32(len(b.Values))+e.dist) // pseudo-uses from beyond end of block
+ s.addUse(e.ID, d+e.dist) // pseudo-uses from beyond end of block
liveSet.add(e.ID)
}
if v := b.Control; v != nil && s.values[v.ID].needReg {
}
}
+ // Load control value into reg.
if v := b.Control; v != nil && s.values[v.ID].needReg {
if regDebug {
fmt.Printf(" processing control %s\n", v.LongString())
}
- // Load control value into reg.
// TODO: regspec for block control values, instead of using
// register set from the control op's output.
s.allocValToReg(v, opcodeTable[v.Op].reg.outputs[0], false, b.Line)
s.freeUseRecords = u
}
+ // If we are approaching a merge point and we are the primary
+ // predecessor of it, find live values that we use soon after
+ // the merge point and promote them to registers now.
+ if len(b.Succs) == 1 && len(b.Succs[0].Preds) > 1 && b.Succs[0].Preds[s.primary[b.Succs[0].ID]] == b {
+ // For this to be worthwhile, the loop must have no calls in it.
+ // Use a very simple loop detector. TODO: incorporate David's loop stuff
+ // once it is in.
+ top := b.Succs[0]
+ for _, p := range top.Preds {
+ if p == b {
+ continue
+ }
+ for {
+ if p.Kind == BlockCall {
+ goto badloop
+ }
+ if p == top {
+ break
+ }
+ if len(p.Preds) != 1 {
+ goto badloop
+ }
+ p = p.Preds[0]
+ }
+ }
+
+ // TODO: sort by distance, pick the closest ones?
+ for _, live := range s.live[b.ID] {
+ if live.dist >= unlikelyDistance {
+ // Don't preload anything live after the loop.
+ continue
+ }
+ vid := live.ID
+ vi := &s.values[vid]
+ if vi.regs != 0 {
+ continue
+ }
+ v := s.orig[vid]
+ m := s.compatRegs(v.Type) &^ s.used
+ if m != 0 {
+ s.allocValToReg(v, m, false, b.Line)
+ }
+ }
+ }
+ badloop:
+ ;
+
// Save end-of-block register state.
// First count how many, this cuts allocations in half.
k := 0
p, b *Block // edge goes from p->b.
// for each pre-regalloc value, a list of equivalent cached values
- cache map[ID][]*Value
+ cache map[ID][]*Value
+ cachedVals []ID // (superset of) keys of the above map, for deterministic iteration
// map from location to the value it contains
contents map[Location]contentRecord
}
// Clear state.
- for k := range e.cache {
- delete(e.cache, k)
+ for _, vid := range e.cachedVals {
+ delete(e.cache, vid)
}
+ e.cachedVals = e.cachedVals[:0]
for k := range e.contents {
delete(e.contents, k)
}
e.destinations = dsts
if regDebug {
- for vid, a := range e.cache {
+ for _, vid := range e.cachedVals {
+ a := e.cache[vid]
for _, c := range a {
fmt.Printf("src %s: v%d cache=%s\n", e.s.f.getHome(c.ID).Name(), vid, c)
}
e.erase(loc)
e.contents[loc] = contentRecord{vid, c, final}
a := e.cache[vid]
+ if len(a) == 0 {
+ e.cachedVals = append(e.cachedVals, vid)
+ }
a = append(a, c)
e.cache[vid] = a
if r, ok := loc.(*Register); ok {
// TODO: reuse these slots.
// Pick a register to spill.
- for vid, a := range e.cache {
+ for _, vid := range e.cachedVals {
+ a := e.cache[vid]
for _, c := range a {
if r, ok := e.s.f.getHome(c.ID).(*Register); ok && m>>uint(r.Num)&1 != 0 {
x := e.p.NewValue1(c.Line, OpStoreReg, c.Type, c)
}
fmt.Printf("m:%d unique:%d final:%d\n", m, e.uniqueRegs, e.finalRegs)
- for vid, a := range e.cache {
+ for _, vid := range e.cachedVals {
+ a := e.cache[vid]
for _, c := range a {
fmt.Printf("v%d: %s %s\n", vid, c, e.s.f.getHome(c.ID).Name())
}
// Add len(b.Values) to adjust from end-of-block distance
// to beginning-of-block distance.
live.clear()
+ d := int32(len(b.Values))
+ if b.Kind == BlockCall {
+ // Because we keep no values in registers across a call,
+ // make every use past a call very far away.
+ d += unlikelyDistance
+ }
for _, e := range s.live[b.ID] {
- live.set(e.ID, e.dist+int32(len(b.Values)))
+ live.set(e.ID, e.dist+d)
}
// Mark control value as live
// invariant: live contains the values live at the start of b (excluding phi inputs)
for i, p := range b.Preds {
// Compute additional distance for the edge.
- const normalEdge = 10
- const likelyEdge = 1
- const unlikelyEdge = 100
// Note: delta must be at least 1 to distinguish the control
// value use from the first user in a successor block.
- delta := int32(normalEdge)
+ delta := int32(normalDistance)
if len(p.Succs) == 2 {
if p.Succs[0] == b && p.Likely == BranchLikely ||
p.Succs[1] == b && p.Likely == BranchUnlikely {
- delta = likelyEdge
+ delta = likelyDistance
}
if p.Succs[0] == b && p.Likely == BranchUnlikely ||
p.Succs[1] == b && p.Likely == BranchLikely {
- delta = unlikelyEdge
+ delta = unlikelyDistance
}
}
if p1 == p2 {
return true
}
- // Aux isn't used in OffPtr, and AuxInt isn't currently used in
- // Addr, but this still works as the values will be null/0
- return (p1.Op == OpOffPtr || p1.Op == OpAddr) && p1.Op == p2.Op &&
- p1.Aux == p2.Aux && p1.AuxInt == p2.AuxInt &&
- p1.Args[0] == p2.Args[0]
+ if p1.Op != p2.Op {
+ return false
+ }
+ switch p1.Op {
+ case OpOffPtr:
+ return p1.AuxInt == p2.AuxInt && isSamePtr(p1.Args[0], p2.Args[0])
+ case OpAddr:
+ // OpAddr's 0th arg is either OpSP or OpSB, which means that it is uniquely identified by its Op.
+ // Checking for value equality only works after [z]cse has run.
+ return p1.Aux == p2.Aux && p1.Args[0].Op == p2.Args[0].Op
+ case OpAddPtr:
+ return p1.Args[1] == p2.Args[1] && isSamePtr(p1.Args[0], p2.Args[0])
+ }
+ return false
}
// DUFFZERO consists of repeated blocks of 4 MOVUPSs + ADD,
return true
}
// match: (Move [size] dst src mem)
- // cond: size >= 32 && size <= 16*64 && size%16 == 0
+ // cond: size >= 32 && size <= 16*64 && size%16 == 0 && !config.noDuffDevice
// result: (DUFFCOPY [14*(64-size/16)] dst src mem)
for {
size := v.AuxInt
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
- if !(size >= 32 && size <= 16*64 && size%16 == 0) {
+ if !(size >= 32 && size <= 16*64 && size%16 == 0 && !config.noDuffDevice) {
break
}
v.reset(OpAMD64DUFFCOPY)
return true
}
// match: (Move [size] dst src mem)
- // cond: size > 16*64 && size%8 == 0
+ // cond: (size > 16*64 || config.noDuffDevice) && size%8 == 0
// result: (REPMOVSQ dst src (MOVQconst [size/8]) mem)
for {
size := v.AuxInt
dst := v.Args[0]
src := v.Args[1]
mem := v.Args[2]
- if !(size > 16*64 && size%8 == 0) {
+ if !((size > 16*64 || config.noDuffDevice) && size%8 == 0) {
break
}
v.reset(OpAMD64REPMOVSQ)
v.AddArg(x)
return true
}
+ // match: (ORL (ORL (ORL (MOVBQZXload [i] {s} p mem) (SHLLconst [8] (MOVBQZXload [i+1] {s} p mem))) (SHLLconst [16] (MOVBQZXload [i+2] {s} p mem))) (SHLLconst [24] (MOVBQZXload [i+3] {s} p mem)))
+ // cond:
+ // result: (MOVLload (ADDQconst [i] p) mem)
+ for {
+ if v.Args[0].Op != OpAMD64ORL {
+ break
+ }
+ if v.Args[0].Args[0].Op != OpAMD64ORL {
+ break
+ }
+ if v.Args[0].Args[0].Args[0].Op != OpAMD64MOVBQZXload {
+ break
+ }
+ i := v.Args[0].Args[0].Args[0].AuxInt
+ s := v.Args[0].Args[0].Args[0].Aux
+ p := v.Args[0].Args[0].Args[0].Args[0]
+ mem := v.Args[0].Args[0].Args[0].Args[1]
+ if v.Args[0].Args[0].Args[1].Op != OpAMD64SHLLconst {
+ break
+ }
+ if v.Args[0].Args[0].Args[1].AuxInt != 8 {
+ break
+ }
+ if v.Args[0].Args[0].Args[1].Args[0].Op != OpAMD64MOVBQZXload {
+ break
+ }
+ if v.Args[0].Args[0].Args[1].Args[0].AuxInt != i+1 {
+ break
+ }
+ if v.Args[0].Args[0].Args[1].Args[0].Aux != s {
+ break
+ }
+ if v.Args[0].Args[0].Args[1].Args[0].Args[0] != p {
+ break
+ }
+ if v.Args[0].Args[0].Args[1].Args[0].Args[1] != mem {
+ break
+ }
+ if v.Args[0].Args[1].Op != OpAMD64SHLLconst {
+ break
+ }
+ if v.Args[0].Args[1].AuxInt != 16 {
+ break
+ }
+ if v.Args[0].Args[1].Args[0].Op != OpAMD64MOVBQZXload {
+ break
+ }
+ if v.Args[0].Args[1].Args[0].AuxInt != i+2 {
+ break
+ }
+ if v.Args[0].Args[1].Args[0].Aux != s {
+ break
+ }
+ if v.Args[0].Args[1].Args[0].Args[0] != p {
+ break
+ }
+ if v.Args[0].Args[1].Args[0].Args[1] != mem {
+ break
+ }
+ if v.Args[1].Op != OpAMD64SHLLconst {
+ break
+ }
+ if v.Args[1].AuxInt != 24 {
+ break
+ }
+ if v.Args[1].Args[0].Op != OpAMD64MOVBQZXload {
+ break
+ }
+ if v.Args[1].Args[0].AuxInt != i+3 {
+ break
+ }
+ if v.Args[1].Args[0].Aux != s {
+ break
+ }
+ if v.Args[1].Args[0].Args[0] != p {
+ break
+ }
+ if v.Args[1].Args[0].Args[1] != mem {
+ break
+ }
+ v.reset(OpAMD64MOVLload)
+ v0 := b.NewValue0(v.Line, OpAMD64ADDQconst, config.fe.TypeUInt64())
+ v0.AuxInt = i
+ v0.AddArg(p)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
return false
}
func rewriteValueAMD64_OpAMD64ORLconst(v *Value, config *Config) bool {
v.AddArg(x)
return true
}
+ // match: (ORQ (ORQ (ORQ (ORQ (ORQ (ORQ (ORQ (MOVBQZXload [i] {s} p mem) (SHLQconst [8] (MOVBQZXload [i+1] {s} p mem))) (SHLQconst [16] (MOVBQZXload [i+2] {s} p mem))) (SHLQconst [24] (MOVBQZXload [i+3] {s} p mem))) (SHLQconst [32] (MOVBQZXload [i+4] {s} p mem))) (SHLQconst [40] (MOVBQZXload [i+5] {s} p mem))) (SHLQconst [48] (MOVBQZXload [i+6] {s} p mem))) (SHLQconst [56] (MOVBQZXload [i+7] {s} p mem)))
+ // cond:
+ // result: (MOVQload (ADDQconst [i] p) mem)
+ for {
+ if v.Args[0].Op != OpAMD64ORQ {
+ break
+ }
+ if v.Args[0].Args[0].Op != OpAMD64ORQ {
+ break
+ }
+ if v.Args[0].Args[0].Args[0].Op != OpAMD64ORQ {
+ break
+ }
+ if v.Args[0].Args[0].Args[0].Args[0].Op != OpAMD64ORQ {
+ break
+ }
+ if v.Args[0].Args[0].Args[0].Args[0].Args[0].Op != OpAMD64ORQ {
+ break
+ }
+ if v.Args[0].Args[0].Args[0].Args[0].Args[0].Args[0].Op != OpAMD64ORQ {
+ break
+ }
+ if v.Args[0].Args[0].Args[0].Args[0].Args[0].Args[0].Args[0].Op != OpAMD64MOVBQZXload {
+ break
+ }
+ i := v.Args[0].Args[0].Args[0].Args[0].Args[0].Args[0].Args[0].AuxInt
+ s := v.Args[0].Args[0].Args[0].Args[0].Args[0].Args[0].Args[0].Aux
+ p := v.Args[0].Args[0].Args[0].Args[0].Args[0].Args[0].Args[0].Args[0]
+ mem := v.Args[0].Args[0].Args[0].Args[0].Args[0].Args[0].Args[0].Args[1]
+ if v.Args[0].Args[0].Args[0].Args[0].Args[0].Args[0].Args[1].Op != OpAMD64SHLQconst {
+ break
+ }
+ if v.Args[0].Args[0].Args[0].Args[0].Args[0].Args[0].Args[1].AuxInt != 8 {
+ break
+ }
+ if v.Args[0].Args[0].Args[0].Args[0].Args[0].Args[0].Args[1].Args[0].Op != OpAMD64MOVBQZXload {
+ break
+ }
+ if v.Args[0].Args[0].Args[0].Args[0].Args[0].Args[0].Args[1].Args[0].AuxInt != i+1 {
+ break
+ }
+ if v.Args[0].Args[0].Args[0].Args[0].Args[0].Args[0].Args[1].Args[0].Aux != s {
+ break
+ }
+ if v.Args[0].Args[0].Args[0].Args[0].Args[0].Args[0].Args[1].Args[0].Args[0] != p {
+ break
+ }
+ if v.Args[0].Args[0].Args[0].Args[0].Args[0].Args[0].Args[1].Args[0].Args[1] != mem {
+ break
+ }
+ if v.Args[0].Args[0].Args[0].Args[0].Args[0].Args[1].Op != OpAMD64SHLQconst {
+ break
+ }
+ if v.Args[0].Args[0].Args[0].Args[0].Args[0].Args[1].AuxInt != 16 {
+ break
+ }
+ if v.Args[0].Args[0].Args[0].Args[0].Args[0].Args[1].Args[0].Op != OpAMD64MOVBQZXload {
+ break
+ }
+ if v.Args[0].Args[0].Args[0].Args[0].Args[0].Args[1].Args[0].AuxInt != i+2 {
+ break
+ }
+ if v.Args[0].Args[0].Args[0].Args[0].Args[0].Args[1].Args[0].Aux != s {
+ break
+ }
+ if v.Args[0].Args[0].Args[0].Args[0].Args[0].Args[1].Args[0].Args[0] != p {
+ break
+ }
+ if v.Args[0].Args[0].Args[0].Args[0].Args[0].Args[1].Args[0].Args[1] != mem {
+ break
+ }
+ if v.Args[0].Args[0].Args[0].Args[0].Args[1].Op != OpAMD64SHLQconst {
+ break
+ }
+ if v.Args[0].Args[0].Args[0].Args[0].Args[1].AuxInt != 24 {
+ break
+ }
+ if v.Args[0].Args[0].Args[0].Args[0].Args[1].Args[0].Op != OpAMD64MOVBQZXload {
+ break
+ }
+ if v.Args[0].Args[0].Args[0].Args[0].Args[1].Args[0].AuxInt != i+3 {
+ break
+ }
+ if v.Args[0].Args[0].Args[0].Args[0].Args[1].Args[0].Aux != s {
+ break
+ }
+ if v.Args[0].Args[0].Args[0].Args[0].Args[1].Args[0].Args[0] != p {
+ break
+ }
+ if v.Args[0].Args[0].Args[0].Args[0].Args[1].Args[0].Args[1] != mem {
+ break
+ }
+ if v.Args[0].Args[0].Args[0].Args[1].Op != OpAMD64SHLQconst {
+ break
+ }
+ if v.Args[0].Args[0].Args[0].Args[1].AuxInt != 32 {
+ break
+ }
+ if v.Args[0].Args[0].Args[0].Args[1].Args[0].Op != OpAMD64MOVBQZXload {
+ break
+ }
+ if v.Args[0].Args[0].Args[0].Args[1].Args[0].AuxInt != i+4 {
+ break
+ }
+ if v.Args[0].Args[0].Args[0].Args[1].Args[0].Aux != s {
+ break
+ }
+ if v.Args[0].Args[0].Args[0].Args[1].Args[0].Args[0] != p {
+ break
+ }
+ if v.Args[0].Args[0].Args[0].Args[1].Args[0].Args[1] != mem {
+ break
+ }
+ if v.Args[0].Args[0].Args[1].Op != OpAMD64SHLQconst {
+ break
+ }
+ if v.Args[0].Args[0].Args[1].AuxInt != 40 {
+ break
+ }
+ if v.Args[0].Args[0].Args[1].Args[0].Op != OpAMD64MOVBQZXload {
+ break
+ }
+ if v.Args[0].Args[0].Args[1].Args[0].AuxInt != i+5 {
+ break
+ }
+ if v.Args[0].Args[0].Args[1].Args[0].Aux != s {
+ break
+ }
+ if v.Args[0].Args[0].Args[1].Args[0].Args[0] != p {
+ break
+ }
+ if v.Args[0].Args[0].Args[1].Args[0].Args[1] != mem {
+ break
+ }
+ if v.Args[0].Args[1].Op != OpAMD64SHLQconst {
+ break
+ }
+ if v.Args[0].Args[1].AuxInt != 48 {
+ break
+ }
+ if v.Args[0].Args[1].Args[0].Op != OpAMD64MOVBQZXload {
+ break
+ }
+ if v.Args[0].Args[1].Args[0].AuxInt != i+6 {
+ break
+ }
+ if v.Args[0].Args[1].Args[0].Aux != s {
+ break
+ }
+ if v.Args[0].Args[1].Args[0].Args[0] != p {
+ break
+ }
+ if v.Args[0].Args[1].Args[0].Args[1] != mem {
+ break
+ }
+ if v.Args[1].Op != OpAMD64SHLQconst {
+ break
+ }
+ if v.Args[1].AuxInt != 56 {
+ break
+ }
+ if v.Args[1].Args[0].Op != OpAMD64MOVBQZXload {
+ break
+ }
+ if v.Args[1].Args[0].AuxInt != i+7 {
+ break
+ }
+ if v.Args[1].Args[0].Aux != s {
+ break
+ }
+ if v.Args[1].Args[0].Args[0] != p {
+ break
+ }
+ if v.Args[1].Args[0].Args[1] != mem {
+ break
+ }
+ v.reset(OpAMD64MOVQload)
+ v0 := b.NewValue0(v.Line, OpAMD64ADDQconst, config.fe.TypeUInt64())
+ v0.AuxInt = i
+ v0.AddArg(p)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
return false
}
func rewriteValueAMD64_OpAMD64ORQconst(v *Value, config *Config) bool {
v.AddArg(x)
return true
}
+ // match: (ORW (MOVBQZXload [i] {s} p mem) (SHLWconst [8] (MOVBQZXload [i+1] {s} p mem)))
+ // cond:
+ // result: (MOVWload (ADDQconst [i] p) mem)
+ for {
+ if v.Args[0].Op != OpAMD64MOVBQZXload {
+ break
+ }
+ i := v.Args[0].AuxInt
+ s := v.Args[0].Aux
+ p := v.Args[0].Args[0]
+ mem := v.Args[0].Args[1]
+ if v.Args[1].Op != OpAMD64SHLWconst {
+ break
+ }
+ if v.Args[1].AuxInt != 8 {
+ break
+ }
+ if v.Args[1].Args[0].Op != OpAMD64MOVBQZXload {
+ break
+ }
+ if v.Args[1].Args[0].AuxInt != i+1 {
+ break
+ }
+ if v.Args[1].Args[0].Aux != s {
+ break
+ }
+ if v.Args[1].Args[0].Args[0] != p {
+ break
+ }
+ if v.Args[1].Args[0].Args[1] != mem {
+ break
+ }
+ v.reset(OpAMD64MOVWload)
+ v0 := b.NewValue0(v.Line, OpAMD64ADDQconst, config.fe.TypeUInt64())
+ v0.AuxInt = i
+ v0.AddArg(p)
+ v.AddArg(v0)
+ v.AddArg(mem)
+ return true
+ }
return false
}
func rewriteValueAMD64_OpAMD64ORWconst(v *Value, config *Config) bool {
return true
}
// match: (Zero [size] destptr mem)
- // cond: size <= 1024 && size%8 == 0 && size%16 != 0
+ // cond: size <= 1024 && size%8 == 0 && size%16 != 0 && !config.noDuffDevice
// result: (Zero [size-8] (ADDQconst [8] destptr) (MOVQstore destptr (MOVQconst [0]) mem))
for {
size := v.AuxInt
destptr := v.Args[0]
mem := v.Args[1]
- if !(size <= 1024 && size%8 == 0 && size%16 != 0) {
+ if !(size <= 1024 && size%8 == 0 && size%16 != 0 && !config.noDuffDevice) {
break
}
v.reset(OpZero)
return true
}
// match: (Zero [size] destptr mem)
- // cond: size <= 1024 && size%16 == 0
+ // cond: size <= 1024 && size%16 == 0 && !config.noDuffDevice
// result: (DUFFZERO [duffStart(size)] (ADDQconst [duffAdj(size)] destptr) (MOVOconst [0]) mem)
for {
size := v.AuxInt
destptr := v.Args[0]
mem := v.Args[1]
- if !(size <= 1024 && size%16 == 0) {
+ if !(size <= 1024 && size%16 == 0 && !config.noDuffDevice) {
break
}
v.reset(OpAMD64DUFFZERO)
return true
}
// match: (Zero [size] destptr mem)
- // cond: size > 1024 && size%8 == 0
+ // cond: (size > 1024 || (config.noDuffDevice && size > 32)) && size%8 == 0
// result: (REPSTOSQ destptr (MOVQconst [size/8]) (MOVQconst [0]) mem)
for {
size := v.AuxInt
destptr := v.Args[0]
mem := v.Args[1]
- if !(size > 1024 && size%8 == 0) {
+ if !((size > 1024 || (config.noDuffDevice && size > 32)) && size%8 == 0) {
break
}
v.reset(OpAMD64REPSTOSQ)
--- /dev/null
+// autogenerated from gen/dec.rules: do not edit!
+// generated with: cd gen; go run *.go
+
+package ssa
+
+import "math"
+
+var _ = math.MinInt8 // in case not otherwise used
+func rewriteValuedec(v *Value, config *Config) bool {
+ switch v.Op {
+ case OpSliceCap:
+ return rewriteValuedec_OpSliceCap(v, config)
+ case OpSliceLen:
+ return rewriteValuedec_OpSliceLen(v, config)
+ case OpSlicePtr:
+ return rewriteValuedec_OpSlicePtr(v, config)
+ case OpStringLen:
+ return rewriteValuedec_OpStringLen(v, config)
+ case OpStringPtr:
+ return rewriteValuedec_OpStringPtr(v, config)
+ }
+ return false
+}
+func rewriteValuedec_OpSliceCap(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SliceCap (SliceMake _ _ cap))
+ // cond:
+ // result: cap
+ for {
+ if v.Args[0].Op != OpSliceMake {
+ break
+ }
+ cap := v.Args[0].Args[2]
+ v.reset(OpCopy)
+ v.Type = cap.Type
+ v.AddArg(cap)
+ return true
+ }
+ return false
+}
+func rewriteValuedec_OpSliceLen(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SliceLen (SliceMake _ len _))
+ // cond:
+ // result: len
+ for {
+ if v.Args[0].Op != OpSliceMake {
+ break
+ }
+ len := v.Args[0].Args[1]
+ v.reset(OpCopy)
+ v.Type = len.Type
+ v.AddArg(len)
+ return true
+ }
+ return false
+}
+func rewriteValuedec_OpSlicePtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (SlicePtr (SliceMake ptr _ _ ))
+ // cond:
+ // result: ptr
+ for {
+ if v.Args[0].Op != OpSliceMake {
+ break
+ }
+ ptr := v.Args[0].Args[0]
+ v.reset(OpCopy)
+ v.Type = ptr.Type
+ v.AddArg(ptr)
+ return true
+ }
+ return false
+}
+func rewriteValuedec_OpStringLen(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (StringLen (StringMake _ len))
+ // cond:
+ // result: len
+ for {
+ if v.Args[0].Op != OpStringMake {
+ break
+ }
+ len := v.Args[0].Args[1]
+ v.reset(OpCopy)
+ v.Type = len.Type
+ v.AddArg(len)
+ return true
+ }
+ return false
+}
+func rewriteValuedec_OpStringPtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (StringPtr (StringMake ptr _))
+ // cond:
+ // result: ptr
+ for {
+ if v.Args[0].Op != OpStringMake {
+ break
+ }
+ ptr := v.Args[0].Args[0]
+ v.reset(OpCopy)
+ v.Type = ptr.Type
+ v.AddArg(ptr)
+ return true
+ }
+ return false
+}
+func rewriteBlockdec(b *Block) bool {
+ switch b.Kind {
+ }
+ return false
+}
return rewriteValuegeneric_OpNeqPtr(v, config)
case OpNeqSlice:
return rewriteValuegeneric_OpNeqSlice(v, config)
+ case OpNilCheck:
+ return rewriteValuegeneric_OpNilCheck(v, config)
+ case OpOffPtr:
+ return rewriteValuegeneric_OpOffPtr(v, config)
case OpOr16:
return rewriteValuegeneric_OpOr16(v, config)
case OpOr32:
v.AuxInt = 0
return true
}
- // match: (And32 <t> (Const32 [y]) x)
- // cond: nlz(int64(int32(y))) + nto(int64(int32(y))) == 64
- // result: (Rsh32Ux32 (Lsh32x32 <t> x (Const32 <t> [nlz(int64(int32(y)))-32])) (Const32 <t> [nlz(int64(int32(y)))-32]))
- for {
- t := v.Type
- if v.Args[0].Op != OpConst32 {
- break
- }
- y := v.Args[0].AuxInt
- x := v.Args[1]
- if !(nlz(int64(int32(y)))+nto(int64(int32(y))) == 64) {
- break
- }
- v.reset(OpRsh32Ux32)
- v0 := b.NewValue0(v.Line, OpLsh32x32, t)
- v0.AddArg(x)
- v1 := b.NewValue0(v.Line, OpConst32, t)
- v1.AuxInt = nlz(int64(int32(y))) - 32
- v0.AddArg(v1)
- v.AddArg(v0)
- v2 := b.NewValue0(v.Line, OpConst32, t)
- v2.AuxInt = nlz(int64(int32(y))) - 32
- v.AddArg(v2)
- return true
- }
- // match: (And32 <t> (Const32 [y]) x)
- // cond: nlo(int64(int32(y))) + ntz(int64(int32(y))) == 64
- // result: (Lsh32x32 (Rsh32Ux32 <t> x (Const32 <t> [ntz(int64(int32(y)))])) (Const32 <t> [ntz(int64(int32(y)))]))
- for {
- t := v.Type
- if v.Args[0].Op != OpConst32 {
- break
- }
- y := v.Args[0].AuxInt
- x := v.Args[1]
- if !(nlo(int64(int32(y)))+ntz(int64(int32(y))) == 64) {
- break
- }
- v.reset(OpLsh32x32)
- v0 := b.NewValue0(v.Line, OpRsh32Ux32, t)
- v0.AddArg(x)
- v1 := b.NewValue0(v.Line, OpConst32, t)
- v1.AuxInt = ntz(int64(int32(y)))
- v0.AddArg(v1)
- v.AddArg(v0)
- v2 := b.NewValue0(v.Line, OpConst32, t)
- v2.AuxInt = ntz(int64(int32(y)))
- v.AddArg(v2)
- return true
- }
return false
}
func rewriteValuegeneric_OpAnd64(v *Value, config *Config) bool {
return true
}
// match: (And64 <t> (Const64 [y]) x)
- // cond: nlz(y) + nto(y) == 64
+ // cond: nlz(y) + nto(y) == 64 && nto(y) >= 32
// result: (Rsh64Ux64 (Lsh64x64 <t> x (Const64 <t> [nlz(y)])) (Const64 <t> [nlz(y)]))
for {
t := v.Type
}
y := v.Args[0].AuxInt
x := v.Args[1]
- if !(nlz(y)+nto(y) == 64) {
+ if !(nlz(y)+nto(y) == 64 && nto(y) >= 32) {
break
}
v.reset(OpRsh64Ux64)
return true
}
// match: (And64 <t> (Const64 [y]) x)
- // cond: nlo(y) + ntz(y) == 64
+ // cond: nlo(y) + ntz(y) == 64 && ntz(y) >= 32
// result: (Lsh64x64 (Rsh64Ux64 <t> x (Const64 <t> [ntz(y)])) (Const64 <t> [ntz(y)]))
for {
t := v.Type
}
y := v.Args[0].AuxInt
x := v.Args[1]
- if !(nlo(y)+ntz(y) == 64) {
+ if !(nlo(y)+ntz(y) == 64 && ntz(y) >= 32) {
break
}
v.reset(OpLsh64x64)
func rewriteValuegeneric_OpDiv64u(v *Value, config *Config) bool {
b := v.Block
_ = b
+ // match: (Div64u <t> n (Const64 [c]))
+ // cond: isPowerOfTwo(c)
+ // result: (Rsh64Ux64 n (Const64 <t> [log2(c)]))
+ for {
+ t := v.Type
+ n := v.Args[0]
+ if v.Args[1].Op != OpConst64 {
+ break
+ }
+ c := v.Args[1].AuxInt
+ if !(isPowerOfTwo(c)) {
+ break
+ }
+ v.reset(OpRsh64Ux64)
+ v.AddArg(n)
+ v0 := b.NewValue0(v.Line, OpConst64, t)
+ v0.AuxInt = log2(c)
+ v.AddArg(v0)
+ return true
+ }
// match: (Div64u <t> x (Const64 [c]))
// cond: umagic64ok(c) && !umagic64a(c)
// result: (Rsh64Ux64 (Hmul64u <t> (Const64 <t> [umagic64m(c)]) x) (Const64 <t> [umagic64s(c)]))
if v.Args[1].Args[0].Op != OpConst16 {
break
}
- if v.Args[1].Args[0].Type != v.Args[0].Type {
+ if v.Args[1].Args[0].Type != t {
break
}
d := v.Args[1].Args[0].AuxInt
if v.Args[1].Args[0].Op != OpConst32 {
break
}
- if v.Args[1].Args[0].Type != v.Args[0].Type {
+ if v.Args[1].Args[0].Type != t {
break
}
d := v.Args[1].Args[0].AuxInt
if v.Args[1].Args[0].Op != OpConst64 {
break
}
- if v.Args[1].Args[0].Type != v.Args[0].Type {
+ if v.Args[1].Args[0].Type != t {
break
}
d := v.Args[1].Args[0].AuxInt
if v.Args[1].Args[0].Op != OpConst8 {
break
}
- if v.Args[1].Args[0].Type != v.Args[0].Type {
+ if v.Args[1].Args[0].Type != t {
break
}
d := v.Args[1].Args[0].AuxInt
func rewriteValuegeneric_OpIsInBounds(v *Value, config *Config) bool {
b := v.Block
_ = b
+ // match: (IsInBounds (And32 (Const32 [c]) _) (Const32 [d]))
+ // cond: inBounds32(c, d)
+ // result: (ConstBool [1])
+ for {
+ if v.Args[0].Op != OpAnd32 {
+ break
+ }
+ if v.Args[0].Args[0].Op != OpConst32 {
+ break
+ }
+ c := v.Args[0].Args[0].AuxInt
+ if v.Args[1].Op != OpConst32 {
+ break
+ }
+ d := v.Args[1].AuxInt
+ if !(inBounds32(c, d)) {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (IsInBounds (And64 (Const64 [c]) _) (Const64 [d]))
+ // cond: inBounds64(c, d)
+ // result: (ConstBool [1])
+ for {
+ if v.Args[0].Op != OpAnd64 {
+ break
+ }
+ if v.Args[0].Args[0].Op != OpConst64 {
+ break
+ }
+ c := v.Args[0].Args[0].AuxInt
+ if v.Args[1].Op != OpConst64 {
+ break
+ }
+ d := v.Args[1].AuxInt
+ if !(inBounds64(c, d)) {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = 1
+ return true
+ }
// match: (IsInBounds (Const32 [c]) (Const32 [d]))
// cond:
// result: (ConstBool [b2i(inBounds32(c,d))])
func rewriteValuegeneric_OpIsSliceInBounds(v *Value, config *Config) bool {
b := v.Block
_ = b
+ // match: (IsSliceInBounds (And32 (Const32 [c]) _) (Const32 [d]))
+ // cond: sliceInBounds32(c, d)
+ // result: (ConstBool [1])
+ for {
+ if v.Args[0].Op != OpAnd32 {
+ break
+ }
+ if v.Args[0].Args[0].Op != OpConst32 {
+ break
+ }
+ c := v.Args[0].Args[0].AuxInt
+ if v.Args[1].Op != OpConst32 {
+ break
+ }
+ d := v.Args[1].AuxInt
+ if !(sliceInBounds32(c, d)) {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = 1
+ return true
+ }
+ // match: (IsSliceInBounds (And64 (Const64 [c]) _) (Const64 [d]))
+ // cond: sliceInBounds64(c, d)
+ // result: (ConstBool [1])
+ for {
+ if v.Args[0].Op != OpAnd64 {
+ break
+ }
+ if v.Args[0].Args[0].Op != OpConst64 {
+ break
+ }
+ c := v.Args[0].Args[0].AuxInt
+ if v.Args[1].Op != OpConst64 {
+ break
+ }
+ d := v.Args[1].AuxInt
+ if !(sliceInBounds64(c, d)) {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = 1
+ return true
+ }
// match: (IsSliceInBounds (Const32 [c]) (Const32 [d]))
// cond:
// result: (ConstBool [b2i(sliceInBounds32(c,d))])
func rewriteValuegeneric_OpMod64u(v *Value, config *Config) bool {
b := v.Block
_ = b
+ // match: (Mod64u <t> n (Const64 [c]))
+ // cond: isPowerOfTwo(c)
+ // result: (And64 n (Const64 <t> [c-1]))
+ for {
+ t := v.Type
+ n := v.Args[0]
+ if v.Args[1].Op != OpConst64 {
+ break
+ }
+ c := v.Args[1].AuxInt
+ if !(isPowerOfTwo(c)) {
+ break
+ }
+ v.reset(OpAnd64)
+ v.AddArg(n)
+ v0 := b.NewValue0(v.Line, OpConst64, t)
+ v0.AuxInt = c - 1
+ v.AddArg(v0)
+ return true
+ }
// match: (Mod64u <t> x (Const64 [c]))
// cond: umagic64ok(c)
// result: (Sub64 x (Mul64 <t> (Div64u <t> x (Const64 <t> [c])) (Const64 <t> [c])))
if v.Args[1].Op != OpAdd32 {
break
}
- if v.Args[1].Type != v.Args[0].Type {
+ if v.Args[1].Type != t {
break
}
if v.Args[1].Args[0].Op != OpConst32 {
break
}
- if v.Args[1].Args[0].Type != v.Args[0].Type {
+ if v.Args[1].Args[0].Type != t {
break
}
d := v.Args[1].Args[0].AuxInt
if v.Args[1].Op != OpAdd64 {
break
}
- if v.Args[1].Type != v.Args[0].Type {
+ if v.Args[1].Type != t {
break
}
if v.Args[1].Args[0].Op != OpConst64 {
break
}
- if v.Args[1].Args[0].Type != v.Args[0].Type {
+ if v.Args[1].Args[0].Type != t {
break
}
d := v.Args[1].Args[0].AuxInt
if v.Args[1].Args[0].Op != OpConst16 {
break
}
- if v.Args[1].Args[0].Type != v.Args[0].Type {
+ if v.Args[1].Args[0].Type != t {
break
}
d := v.Args[1].Args[0].AuxInt
if v.Args[1].Args[0].Op != OpConst32 {
break
}
- if v.Args[1].Args[0].Type != v.Args[0].Type {
+ if v.Args[1].Args[0].Type != t {
break
}
d := v.Args[1].Args[0].AuxInt
if v.Args[1].Args[0].Op != OpConst64 {
break
}
- if v.Args[1].Args[0].Type != v.Args[0].Type {
+ if v.Args[1].Args[0].Type != t {
break
}
d := v.Args[1].Args[0].AuxInt
if v.Args[1].Args[0].Op != OpConst8 {
break
}
- if v.Args[1].Args[0].Type != v.Args[0].Type {
+ if v.Args[1].Args[0].Type != t {
break
}
d := v.Args[1].Args[0].AuxInt
}
return false
}
+func rewriteValuegeneric_OpNilCheck(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (NilCheck (Phi x (Add64 (Const64 [c]) y)) mem)
+ // cond: c > 0 && v.Args[0] == y
+ // result: (NilCheck x mem)
+ for {
+ if v.Args[0].Op != OpPhi {
+ break
+ }
+ x := v.Args[0].Args[0]
+ if v.Args[0].Args[1].Op != OpAdd64 {
+ break
+ }
+ if v.Args[0].Args[1].Args[0].Op != OpConst64 {
+ break
+ }
+ c := v.Args[0].Args[1].Args[0].AuxInt
+ y := v.Args[0].Args[1].Args[1]
+ if len(v.Args[0].Args) != 2 {
+ break
+ }
+ mem := v.Args[1]
+ if !(c > 0 && v.Args[0] == y) {
+ break
+ }
+ v.reset(OpNilCheck)
+ v.AddArg(x)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpOffPtr(v *Value, config *Config) bool {
+ b := v.Block
+ _ = b
+ // match: (OffPtr (OffPtr p [b]) [a])
+ // cond:
+ // result: (OffPtr p [a+b])
+ for {
+ if v.Args[0].Op != OpOffPtr {
+ break
+ }
+ p := v.Args[0].Args[0]
+ b := v.Args[0].AuxInt
+ a := v.AuxInt
+ v.reset(OpOffPtr)
+ v.AddArg(p)
+ v.AuxInt = a + b
+ return true
+ }
+ return false
+}
func rewriteValuegeneric_OpOr16(v *Value, config *Config) bool {
b := v.Block
_ = b
if v.Args[1].Op != OpConst64 {
break
}
- if v.Args[1].AuxInt != v.Args[0].AuxInt {
+ if v.Args[1].AuxInt != c {
break
}
if len(v.Args) != 2 {
func rewriteValuegeneric_OpSliceCap(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (SliceCap (SliceMake _ _ cap))
+ // match: (SliceCap (SliceMake _ _ (Const64 <t> [c])))
// cond:
- // result: cap
+ // result: (Const64 <t> [c])
for {
if v.Args[0].Op != OpSliceMake {
break
}
- cap := v.Args[0].Args[2]
- v.reset(OpCopy)
- v.Type = cap.Type
- v.AddArg(cap)
+ if v.Args[0].Args[2].Op != OpConst64 {
+ break
+ }
+ t := v.Args[0].Args[2].Type
+ c := v.Args[0].Args[2].AuxInt
+ v.reset(OpConst64)
+ v.Type = t
+ v.AuxInt = c
return true
}
return false
func rewriteValuegeneric_OpSliceLen(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (SliceLen (SliceMake _ len _))
+ // match: (SliceLen (SliceMake _ (Const64 <t> [c]) _))
// cond:
- // result: len
+ // result: (Const64 <t> [c])
for {
if v.Args[0].Op != OpSliceMake {
break
}
- len := v.Args[0].Args[1]
- v.reset(OpCopy)
- v.Type = len.Type
- v.AddArg(len)
+ if v.Args[0].Args[1].Op != OpConst64 {
+ break
+ }
+ t := v.Args[0].Args[1].Type
+ c := v.Args[0].Args[1].AuxInt
+ v.reset(OpConst64)
+ v.Type = t
+ v.AuxInt = c
return true
}
return false
func rewriteValuegeneric_OpSlicePtr(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (SlicePtr (SliceMake ptr _ _ ))
+ // match: (SlicePtr (SliceMake (Const64 <t> [c]) _ _))
// cond:
- // result: ptr
+ // result: (Const64 <t> [c])
for {
if v.Args[0].Op != OpSliceMake {
break
}
- ptr := v.Args[0].Args[0]
- v.reset(OpCopy)
- v.Type = ptr.Type
- v.AddArg(ptr)
+ if v.Args[0].Args[0].Op != OpConst64 {
+ break
+ }
+ t := v.Args[0].Args[0].Type
+ c := v.Args[0].Args[0].AuxInt
+ v.reset(OpConst64)
+ v.Type = t
+ v.AuxInt = c
return true
}
return false
func rewriteValuegeneric_OpStringLen(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (StringLen (StringMake _ len))
+ // match: (StringLen (StringMake _ (Const64 <t> [c])))
// cond:
- // result: len
+ // result: (Const64 <t> [c])
for {
if v.Args[0].Op != OpStringMake {
break
}
- len := v.Args[0].Args[1]
- v.reset(OpCopy)
- v.Type = len.Type
- v.AddArg(len)
+ if v.Args[0].Args[1].Op != OpConst64 {
+ break
+ }
+ t := v.Args[0].Args[1].Type
+ c := v.Args[0].Args[1].AuxInt
+ v.reset(OpConst64)
+ v.Type = t
+ v.AuxInt = c
return true
}
return false
func rewriteValuegeneric_OpStringPtr(v *Value, config *Config) bool {
b := v.Block
_ = b
- // match: (StringPtr (StringMake ptr _))
+ // match: (StringPtr (StringMake (Const64 <t> [c]) _))
// cond:
- // result: ptr
+ // result: (Const64 <t> [c])
for {
if v.Args[0].Op != OpStringMake {
break
}
- ptr := v.Args[0].Args[0]
- v.reset(OpCopy)
- v.Type = ptr.Type
- v.AddArg(ptr)
+ if v.Args[0].Args[0].Op != OpConst64 {
+ break
+ }
+ t := v.Args[0].Args[0].Type
+ c := v.Args[0].Args[0].AuxInt
+ v.reset(OpConst64)
+ v.Type = t
+ v.AuxInt = c
return true
}
return false
v.AuxInt = int64(int8(c))
return true
}
+ // match: (Trunc16to8 (And16 (Const16 [y]) x))
+ // cond: y&0xFF == 0xFF
+ // result: (Trunc16to8 x)
+ for {
+ if v.Args[0].Op != OpAnd16 {
+ break
+ }
+ if v.Args[0].Args[0].Op != OpConst16 {
+ break
+ }
+ y := v.Args[0].Args[0].AuxInt
+ x := v.Args[0].Args[1]
+ if !(y&0xFF == 0xFF) {
+ break
+ }
+ v.reset(OpTrunc16to8)
+ v.AddArg(x)
+ return true
+ }
return false
}
func rewriteValuegeneric_OpTrunc32to16(v *Value, config *Config) bool {
v.AuxInt = int64(int16(c))
return true
}
+ // match: (Trunc32to16 (And32 (Const32 [y]) x))
+ // cond: y&0xFFFF == 0xFFFF
+ // result: (Trunc32to16 x)
+ for {
+ if v.Args[0].Op != OpAnd32 {
+ break
+ }
+ if v.Args[0].Args[0].Op != OpConst32 {
+ break
+ }
+ y := v.Args[0].Args[0].AuxInt
+ x := v.Args[0].Args[1]
+ if !(y&0xFFFF == 0xFFFF) {
+ break
+ }
+ v.reset(OpTrunc32to16)
+ v.AddArg(x)
+ return true
+ }
return false
}
func rewriteValuegeneric_OpTrunc32to8(v *Value, config *Config) bool {
v.AuxInt = int64(int8(c))
return true
}
+ // match: (Trunc32to8 (And32 (Const32 [y]) x))
+ // cond: y&0xFF == 0xFF
+ // result: (Trunc32to8 x)
+ for {
+ if v.Args[0].Op != OpAnd32 {
+ break
+ }
+ if v.Args[0].Args[0].Op != OpConst32 {
+ break
+ }
+ y := v.Args[0].Args[0].AuxInt
+ x := v.Args[0].Args[1]
+ if !(y&0xFF == 0xFF) {
+ break
+ }
+ v.reset(OpTrunc32to8)
+ v.AddArg(x)
+ return true
+ }
return false
}
func rewriteValuegeneric_OpTrunc64to16(v *Value, config *Config) bool {
v.AuxInt = int64(int16(c))
return true
}
+ // match: (Trunc64to16 (And64 (Const64 [y]) x))
+ // cond: y&0xFFFF == 0xFFFF
+ // result: (Trunc64to16 x)
+ for {
+ if v.Args[0].Op != OpAnd64 {
+ break
+ }
+ if v.Args[0].Args[0].Op != OpConst64 {
+ break
+ }
+ y := v.Args[0].Args[0].AuxInt
+ x := v.Args[0].Args[1]
+ if !(y&0xFFFF == 0xFFFF) {
+ break
+ }
+ v.reset(OpTrunc64to16)
+ v.AddArg(x)
+ return true
+ }
return false
}
func rewriteValuegeneric_OpTrunc64to32(v *Value, config *Config) bool {
v.AuxInt = int64(int32(c))
return true
}
+ // match: (Trunc64to32 (And64 (Const64 [y]) x))
+ // cond: y&0xFFFFFFFF == 0xFFFFFFFF
+ // result: (Trunc64to32 x)
+ for {
+ if v.Args[0].Op != OpAnd64 {
+ break
+ }
+ if v.Args[0].Args[0].Op != OpConst64 {
+ break
+ }
+ y := v.Args[0].Args[0].AuxInt
+ x := v.Args[0].Args[1]
+ if !(y&0xFFFFFFFF == 0xFFFFFFFF) {
+ break
+ }
+ v.reset(OpTrunc64to32)
+ v.AddArg(x)
+ return true
+ }
return false
}
func rewriteValuegeneric_OpTrunc64to8(v *Value, config *Config) bool {
v.AuxInt = int64(int8(c))
return true
}
+ // match: (Trunc64to8 (And64 (Const64 [y]) x))
+ // cond: y&0xFF == 0xFF
+ // result: (Trunc64to8 x)
+ for {
+ if v.Args[0].Op != OpAnd64 {
+ break
+ }
+ if v.Args[0].Args[0].Op != OpConst64 {
+ break
+ }
+ y := v.Args[0].Args[0].AuxInt
+ x := v.Args[0].Args[1]
+ if !(y&0xFF == 0xFF) {
+ break
+ }
+ v.reset(OpTrunc64to8)
+ v.AddArg(x)
+ return true
+ }
return false
}
func rewriteValuegeneric_OpXor16(v *Value, config *Config) bool {
package ssa
+import "container/heap"
+
const (
ScorePhi = iota // towards top of block
ScoreVarDef
ScoreDefault
ScoreFlags
ScoreControl // towards bottom of block
-
- ScoreCount // not a real score
)
+type ValHeap struct {
+ a []*Value
+ less func(a, b *Value) bool
+}
+
+func (h ValHeap) Len() int { return len(h.a) }
+func (h ValHeap) Swap(i, j int) { a := h.a; a[i], a[j] = a[j], a[i] }
+
+func (h *ValHeap) Push(x interface{}) {
+ // Push and Pop use pointer receivers because they modify the slice's length,
+ // not just its contents.
+ v := x.(*Value)
+ h.a = append(h.a, v)
+}
+func (h *ValHeap) Pop() interface{} {
+ old := h.a
+ n := len(old)
+ x := old[n-1]
+ h.a = old[0 : n-1]
+ return x
+}
+func (h ValHeap) Less(i, j int) bool { return h.less(h.a[i], h.a[j]) }
+
// Schedule the Values in each Block. After this phase returns, the
// order of b.Values matters and is the order in which those values
// will appear in the assembly output. For now it generates a
func schedule(f *Func) {
// For each value, the number of times it is used in the block
// by values that have not been scheduled yet.
- uses := make([]int, f.NumValues())
+ uses := make([]int32, f.NumValues())
// "priority" for a value
- score := make([]uint8, f.NumValues())
+ score := make([]int8, f.NumValues())
// scheduling order. We queue values in this list in reverse order.
var order []*Value
- // priority queue of legally schedulable (0 unscheduled uses) values
- var priq [ScoreCount][]*Value
-
// maps mem values to the next live memory value
nextMem := make([]*Value, f.NumValues())
// additional pretend arguments for each Value. Used to enforce load/store ordering.
additionalArgs := make([][]*Value, f.NumValues())
+ for _, b := range f.Blocks {
+ // Compute score. Larger numbers are scheduled closer to the end of the block.
+ for _, v := range b.Values {
+ switch {
+ case v.Op == OpAMD64LoweredGetClosurePtr:
+ // We also score GetLoweredClosurePtr as early as possible to ensure that the
+ // context register is not stomped. GetLoweredClosurePtr should only appear
+ // in the entry block where there are no phi functions, so there is no
+ // conflict or ambiguity here.
+ if b != f.Entry {
+ f.Fatalf("LoweredGetClosurePtr appeared outside of entry block, b=%s", b.String())
+ }
+ score[v.ID] = ScorePhi
+ case v.Op == OpPhi:
+ // We want all the phis first.
+ score[v.ID] = ScorePhi
+ case v.Op == OpVarDef:
+ // We want all the vardefs next.
+ score[v.ID] = ScoreVarDef
+ case v.Type.IsMemory():
+ // Schedule stores as early as possible. This tends to
+ // reduce register pressure. It also helps make sure
+ // VARDEF ops are scheduled before the corresponding LEA.
+ score[v.ID] = ScoreMemory
+ case v.Type.IsFlags():
+ // Schedule flag register generation as late as possible.
+ // This makes sure that we only have one live flags
+ // value at a time.
+ score[v.ID] = ScoreFlags
+ default:
+ score[v.ID] = ScoreDefault
+ }
+ }
+ }
+
for _, b := range f.Blocks {
// Find store chain for block.
// Store chains for different blocks overwrite each other, so
uses[v.ID]++
}
}
- // Compute score. Larger numbers are scheduled closer to the end of the block.
- for _, v := range b.Values {
- switch {
- case v.Op == OpAMD64LoweredGetClosurePtr:
- // We also score GetLoweredClosurePtr as early as possible to ensure that the
- // context register is not stomped. GetLoweredClosurePtr should only appear
- // in the entry block where there are no phi functions, so there is no
- // conflict or ambiguity here.
- if b != f.Entry {
- f.Fatalf("LoweredGetClosurePtr appeared outside of entry block, b=%s", b.String())
- }
- score[v.ID] = ScorePhi
- case v.Op == OpPhi:
- // We want all the phis first.
- score[v.ID] = ScorePhi
- case v.Op == OpVarDef:
- // We want all the vardefs next.
- score[v.ID] = ScoreVarDef
- case v.Type.IsMemory():
- // Schedule stores as early as possible. This tends to
- // reduce register pressure. It also helps make sure
- // VARDEF ops are scheduled before the corresponding LEA.
- score[v.ID] = ScoreMemory
- case v.Type.IsFlags():
- // Schedule flag register generation as late as possible.
- // This makes sure that we only have one live flags
- // value at a time.
- score[v.ID] = ScoreFlags
- default:
- score[v.ID] = ScoreDefault
- }
- }
+
if b.Control != nil && b.Control.Op != OpPhi {
// Force the control value to be scheduled at the end,
// unless it is a phi value (which must be first).
}
}
- // Initialize priority queue with schedulable values.
- for i := range priq {
- priq[i] = priq[i][:0]
+ // To put things into a priority queue
+ // The values that should come last are least.
+ priq := &ValHeap{
+ a: make([]*Value, 0, 8), // TODO allocate once and reuse.
+ less: func(x, y *Value) bool {
+ sx := score[x.ID]
+ sy := score[y.ID]
+ if c := sx - sy; c != 0 {
+ return c > 0 // higher score comes later.
+ }
+ if x.Line != y.Line { // Favor in-order line stepping
+ return x.Line > y.Line
+ }
+ if x.Op != OpPhi {
+ if c := len(x.Args) - len(y.Args); c != 0 {
+ return c < 0 // smaller args comes later
+ }
+ }
+ return x.ID > y.ID
+ },
}
+
+ // Initialize priority queue with schedulable values.
for _, v := range b.Values {
if uses[v.ID] == 0 {
- s := score[v.ID]
- priq[s] = append(priq[s], v)
+ heap.Push(priq, v)
}
}
order = order[:0]
for {
// Find highest priority schedulable value.
- var v *Value
- for i := len(priq) - 1; i >= 0; i-- {
- n := len(priq[i])
- if n == 0 {
- continue
- }
- v = priq[i][n-1]
- priq[i] = priq[i][:n-1]
- break
- }
- if v == nil {
+ // Note that schedule is assembled backwards.
+
+ if priq.Len() == 0 {
break
}
+ v := heap.Pop(priq).(*Value)
+
// Add it to the schedule.
order = append(order, v)
uses[w.ID]--
if uses[w.ID] == 0 {
// All uses scheduled, w is now schedulable.
- s := score[w.ID]
- priq[s] = append(priq[s], w)
+ heap.Push(priq, w)
}
}
for _, w := range additionalArgs[v.ID] {
uses[w.ID]--
if uses[w.ID] == 0 {
// All uses scheduled, w is now schedulable.
- s := score[w.ID]
- priq[s] = append(priq[s], w)
+ heap.Push(priq, w)
}
}
}
--- /dev/null
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !nacl
+
+package ssa
+
+import (
+ "reflect"
+ "testing"
+ "unsafe"
+)
+
+// Assert that the size of important structures do not change unexpectedly.
+
+func TestSizeof(t *testing.T) {
+ const _64bit = unsafe.Sizeof(uintptr(0)) == 8
+
+ var tests = []struct {
+ val interface{} // type as a value
+ _32bit uintptr // size on 32bit platforms
+ _64bit uintptr // size on 64bit platforms
+ }{
+ {Value{}, 64, 112},
+ {Block{}, 124, 232},
+ }
+
+ for _, tt := range tests {
+ want := tt._32bit
+ if _64bit {
+ want = tt._64bit
+ }
+ got := reflect.TypeOf(tt.val).Size()
+ if want != got {
+ t.Errorf("unsafe.Sizeof(%T) = %d, want %d", tt.val, got, want)
+ }
+ }
+}
// Special compiler-only types.
type CompilerType struct {
Name string
+ size int64
Memory bool
Flags bool
Void bool
Int128 bool
}
-func (t *CompilerType) Size() int64 { return 0 } // Size in bytes
+func (t *CompilerType) Size() int64 { return t.size } // Size in bytes
func (t *CompilerType) Alignment() int64 { return 0 }
func (t *CompilerType) IsBoolean() bool { return false }
func (t *CompilerType) IsInteger() bool { return false }
TypeMem = &CompilerType{Name: "mem", Memory: true}
TypeFlags = &CompilerType{Name: "flags", Flags: true}
TypeVoid = &CompilerType{Name: "void", Void: true}
- TypeInt128 = &CompilerType{Name: "int128", Int128: true}
+ TypeInt128 = &CompilerType{Name: "int128", size: 16, Int128: true}
)
// Source line number
Line int32
- // Storage for the first two args
- argstorage [2]*Value
+ // Storage for the first three args
+ argstorage [3]*Value
}
// Examples:
// if we are copying forward on the stack and
// the src and dst overlap, then reverse direction
- if osrc < odst && int64(odst) < int64(osrc)+w {
+ if osrc < odst && odst < osrc+w {
// reverse direction
gins(x86.ASTD, nil, nil) // set direction flag
if c > 0 {
if res.Op != gc.OINDREG && res.Op != gc.ONAME {
gc.Dump("n", n)
gc.Dump("res", res)
- gc.Fatalf("cgen64 %v of %v", gc.Oconv(int(n.Op), 0), gc.Oconv(int(res.Op), 0))
+ gc.Fatalf("cgen64 %v of %v", gc.Oconv(n.Op, 0), gc.Oconv(res.Op, 0))
}
switch n.Op {
default:
- gc.Fatalf("cgen64 %v", gc.Oconv(int(n.Op), 0))
+ gc.Fatalf("cgen64 %v", gc.Oconv(n.Op, 0))
case gc.OMINUS:
gc.Cgen(n.Left, res)
var br *obj.Prog
switch op {
default:
- gc.Fatalf("cmp64 %v %v", gc.Oconv(int(op), 0), t)
+ gc.Fatalf("cmp64 %v %v", gc.Oconv(op, 0), t)
// cmp hi
// jne L
var MAXWIDTH int64 = (1 << 32) - 1
-/*
- * go declares several platform-specific type aliases:
- * int, uint, and uintptr
- */
-var typedefs = []gc.Typedef{
- {"int", gc.TINT, gc.TINT32},
- {"uint", gc.TUINT, gc.TUINT32},
- {"uintptr", gc.TUINTPTR, gc.TUINT32},
-}
-
func betypeinit() {
gc.Widthptr = 4
gc.Widthint = 4
gc.Thearch.Thechar = thechar
gc.Thearch.Thestring = thestring
gc.Thearch.Thelinkarch = thelinkarch
- gc.Thearch.Typedefs = typedefs
gc.Thearch.REGSP = x86.REGSP
gc.Thearch.REGCTXT = x86.REGCTXT
gc.Thearch.REGCALLX = x86.REG_BX
return p
}
-func appendpp(p *obj.Prog, as int, ftype int, freg int, foffset int64, ttype int, treg int, toffset int64) *obj.Prog {
+func appendpp(p *obj.Prog, as obj.As, ftype obj.AddrType, freg int, foffset int64, ttype obj.AddrType, treg int, toffset int64) *obj.Prog {
q := gc.Ctxt.NewProg()
gc.Clearp(q)
- q.As = int16(as)
+ q.As = as
q.Lineno = p.Lineno
- q.From.Type = int16(ftype)
+ q.From.Type = ftype
q.From.Reg = int16(freg)
q.From.Offset = foffset
- q.To.Type = int16(ttype)
+ q.To.Type = ttype
q.To.Reg = int16(treg)
q.To.Offset = toffset
q.Link = p.Link
}
func cgen_floatsse(n *gc.Node, res *gc.Node) {
- var a int
+ var a obj.As
nl := n.Left
nr := n.Right
switch n.Op {
default:
gc.Dump("cgen_floatsse", n)
- gc.Fatalf("cgen_floatsse %v", gc.Oconv(int(n.Op), 0))
+ gc.Fatalf("cgen_floatsse %v", gc.Oconv(n.Op, 0))
return
case gc.OMINUS,
continue
}
if gc.Debug_checknil != 0 && p.Lineno > 1 { // p->lineno==1 in generated wrappers
- gc.Warnl(int(p.Lineno), "generated nil check")
+ gc.Warnl(p.Lineno, "generated nil check")
}
// check is
/*
* return Axxx for Oxxx on type t.
*/
-func optoas(op gc.Op, t *gc.Type) int {
+func optoas(op gc.Op, t *gc.Type) obj.As {
if t == nil {
gc.Fatalf("optoas: t is nil")
}
a := obj.AXXX
switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) {
default:
- gc.Fatalf("optoas: no entry %v-%v", gc.Oconv(int(op), 0), t)
+ gc.Fatalf("optoas: no entry %v-%v", gc.Oconv(op, 0), t)
case OADDR_ | gc.TPTR32:
a = x86.ALEAL
return a
}
-func foptoas(op gc.Op, t *gc.Type, flg int) int {
+func foptoas(op gc.Op, t *gc.Type, flg int) obj.As {
a := obj.AXXX
et := gc.Simtype[t.Etype]
if !gc.Thearch.Use387 {
switch uint32(op)<<16 | uint32(et) {
default:
- gc.Fatalf("foptoas-sse: no entry %v-%v", gc.Oconv(int(op), 0), t)
+ gc.Fatalf("foptoas-sse: no entry %v-%v", gc.Oconv(op, 0), t)
case OCMP_ | gc.TFLOAT32:
a = x86.AUCOMISS
return x86.AFCHS
}
- gc.Fatalf("foptoas %v %v %#x", gc.Oconv(int(op), 0), t, flg)
+ gc.Fatalf("foptoas %v %v %#x", gc.Oconv(op, 0), t, flg)
return 0
}
* generate
* as $c, reg
*/
-func gconreg(as int, c int64, reg int) {
+func gconreg(as obj.As, c int64, reg int) {
var n1 gc.Node
var n2 gc.Node
* generate
* as $c, n
*/
-func ginscon(as int, c int64, n2 *gc.Node) {
+func ginscon(as obj.As, c int64, n2 *gc.Node) {
var n1 gc.Node
gc.Nodconst(&n1, gc.Types[gc.TINT32], c)
gins(as, &n1, n2)
// cannot have two integer memory operands;
// except 64-bit, which always copies via registers anyway.
var r1 gc.Node
- var a int
+ var a obj.As
if gc.Isint[ft] && gc.Isint[tt] && !gc.Is64(f.Type) && !gc.Is64(t.Type) && gc.Ismem(f) && gc.Ismem(t) {
goto hard
}
func floatmove_387(f *gc.Node, t *gc.Node) {
var r1 gc.Node
- var a int
+ var a obj.As
ft := gc.Simsimtype(f.Type)
tt := gc.Simsimtype(t.Type)
func floatmove_sse(f *gc.Node, t *gc.Node) {
var r1 gc.Node
var cvt *gc.Type
- var a int
+ var a obj.As
ft := gc.Simsimtype(f.Type)
tt := gc.Simsimtype(t.Type)
* generate one instruction:
* as f, t
*/
-func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
+func gins(as obj.As, f *gc.Node, t *gc.Node) *obj.Prog {
if as == x86.AFMOVF && f != nil && f.Op == gc.OREGISTER && t != nil && t.Op == gc.OREGISTER {
gc.Fatalf("gins MOVF reg, reg")
}
func sudoclean() {
}
-func sudoaddable(as int, n *gc.Node, a *obj.Addr) bool {
+func sudoaddable(as obj.As, n *gc.Node, a *obj.Addr) bool {
*a = obj.Addr{}
return false
}
if regtyp(v) {
reg := int(v.Reg)
if (a.Type == obj.TYPE_MEM || a.Type == obj.TYPE_ADDR) && int(a.Reg) == reg {
- if (s.Reg == x86.REG_BP) && a.Index != obj.TYPE_NONE {
+ if (s.Reg == x86.REG_BP) && a.Index != x86.REG_NONE {
return 1 /* can't use BP-base with index */
}
if f != 0 {
An example function is similar to a test function but, instead of using
*testing.T to report success or failure, prints output to os.Stdout.
-That output is compared against the function's "Output:" comment, which
-must be the last comment in the function body (see example below). An
-example with no such comment, or with no text after "Output:" is compiled
-but not executed.
+If the last comment in the function starts with "Output:" then the output
+is compared exactly against the comment (see examples below). If the last
+comment begins with "Unordered output:" then the output is compared to the
+comment, however the order of the lines is ignored. An example with no such
+comment, or with no text after "Output:" is compiled but not executed.
Godoc displays the body of ExampleXXX to demonstrate the use
of the function, constant, or variable XXX. An example of a method M with
// this example.
}
+Here is another example where the ordering of the output is ignored:
+
+ func ExamplePerm() {
+ for _, value := range Perm(4) {
+ fmt.Println(value)
+ }
+ // Unordered output: 4
+ // 2
+ // 1
+ // 3
+ // 0
+ }
+
The entire test file is presented as the example when it contains a single
example function, at least one other function, type, variable, or constant
declaration, and no test or benchmark functions.
// cgo and non-cgo worlds, so it necessarily has files in both.
// In that case gcc only gets the gcc_* files.
var gccfiles []string
+ gccfiles = append(gccfiles, cfiles...)
+ cfiles = nil
if a.p.Standard && a.p.ImportPath == "runtime/cgo" {
filter := func(files, nongcc, gcc []string) ([]string, []string) {
for _, f := range files {
}
return nongcc, gcc
}
- cfiles, gccfiles = filter(cfiles, cfiles[:0], gccfiles)
sfiles, gccfiles = filter(sfiles, sfiles[:0], gccfiles)
} else {
- gccfiles = append(cfiles, sfiles...)
- cfiles = nil
+ gccfiles = append(gccfiles, sfiles...)
sfiles = nil
}
GOARCH: c.GOARCH,
GOOS: c.GOOS,
GOROOT: c.GOROOT,
+ GOPATH: c.GOPATH,
CgoEnabled: c.CgoEnabled,
UseAllFiles: c.UseAllFiles,
Compiler: c.Compiler,
tg.grepStdout(`\* another-branch`, "not on correct default branch")
}
-func TestDisallowedCSourceFiles(t *testing.T) {
- tg := testgo(t)
- defer tg.cleanup()
- tg.setenv("GOPATH", filepath.Join(tg.pwd(), "testdata"))
- tg.runFail("build", "badc")
- tg.grepStderr("C source files not allowed", "go test did not say C source files not allowed")
-}
-
func TestErrorMessageForSyntaxErrorInTestGoFileSaysFAIL(t *testing.T) {
tg := testgo(t)
defer tg.cleanup()
tg.grepStderr(`gccgo.*\-L alibpath \-lalib`, `no Go-inline "#cgo LDFLAGS:" ("-L alibpath -lalib") passed to gccgo linking stage`)
}
-func TestListTemplateCanUseContextFunction(t *testing.T) {
- tg := testgo(t)
- defer tg.cleanup()
- tg.run("list", "-f", "GOARCH: {{context.GOARCH}}")
+func TestListTemplateContextFunction(t *testing.T) {
+ tg := testgo(t)
+ defer tg.cleanup()
+ for _, tt := range []struct {
+ v string
+ want string
+ }{
+ {"GOARCH", runtime.GOARCH},
+ {"GOOS", runtime.GOOS},
+ {"GOROOT", filepath.Clean(runtime.GOROOT())},
+ {"GOPATH", os.Getenv("GOPATH")},
+ {"CgoEnabled", ""},
+ {"UseAllFiles", ""},
+ {"Compiler", ""},
+ {"BuildTags", ""},
+ {"ReleaseTags", ""},
+ {"InstallSuffix", ""},
+ } {
+ tmpl := "{{context." + tt.v + "}}"
+ tg.run("list", "-f", tmpl)
+ if tt.want == "" {
+ continue
+ }
+ if got := strings.TrimSpace(tg.getStdout()); got != tt.want {
+ t.Errorf("go list -f %q: got %q; want %q", tmpl, got, tt.want)
+ }
+ }
}
// cmd/go: "go test" should fail if package does not build
}
p.Target = p.target
+ // If cgo is not enabled, ignore cgo supporting sources
+ // just as we ignore go files containing import "C".
+ if !buildContext.CgoEnabled {
+ p.CFiles = nil
+ p.CXXFiles = nil
+ p.MFiles = nil
+ p.SwigFiles = nil
+ p.SwigCXXFiles = nil
+ p.SysoFiles = nil
+ // Note that SFiles are okay (they go to the Go assembler)
+ // and HFiles are okay (they might be used by the SFiles).
+ }
+
// The gc toolchain only permits C source files with cgo.
if len(p.CFiles) > 0 && !p.usesCgo() && !p.usesSwig() && buildContext.Compiler == "gc" {
p.Error = &PackageError{
An example function is similar to a test function but, instead of using
*testing.T to report success or failure, prints output to os.Stdout.
-That output is compared against the function's "Output:" comment, which
-must be the last comment in the function body (see example below). An
-example with no such comment, or with no text after "Output:" is compiled
-but not executed.
+If the last comment in the function starts with "Output:" then the output
+is compared exactly against the comment (see examples below). If the last
+comment begins with "Unordered output:" then the output is compared to the
+comment, however the order of the lines is ignored. An example with no such
+comment, or with no text after "Output:" is compiled but not executed.
Godoc displays the body of ExampleXXX to demonstrate the use
of the function, constant, or variable XXX. An example of a method M with
// this example.
}
+Here is another example where the ordering of the output is ignored:
+
+ func ExamplePerm() {
+ for _, value := range Perm(4) {
+ fmt.Println(value)
+ }
+
+ // Unordered output: 4
+ // 2
+ // 1
+ // 3
+ // 0
+ }
+
The entire test file is presented as the example when it contains a single
example function, at least one other function, type, variable, or constant
declaration, and no test or benchmark functions.
}
type testFunc struct {
- Package string // imported package name (_test or _xtest)
- Name string // function name
- Output string // output, for examples
+ Package string // imported package name (_test or _xtest)
+ Name string // function name
+ Output string // output, for examples
+ Unordered bool // output is allowed to be unordered.
}
var testFileSet = token.NewFileSet()
if t.TestMain != nil {
return errors.New("multiple definitions of TestMain")
}
- t.TestMain = &testFunc{pkg, name, ""}
+ t.TestMain = &testFunc{pkg, name, "", false}
*doImport, *seen = true, true
case isTest(name, "Test"):
err := checkTestFunc(n, "T")
if err != nil {
return err
}
- t.Tests = append(t.Tests, testFunc{pkg, name, ""})
+ t.Tests = append(t.Tests, testFunc{pkg, name, "", false})
*doImport, *seen = true, true
case isTest(name, "Benchmark"):
err := checkTestFunc(n, "B")
if err != nil {
return err
}
- t.Benchmarks = append(t.Benchmarks, testFunc{pkg, name, ""})
+ t.Benchmarks = append(t.Benchmarks, testFunc{pkg, name, "", false})
*doImport, *seen = true, true
}
}
// Don't run examples with no output.
continue
}
- t.Examples = append(t.Examples, testFunc{pkg, "Example" + e.Name, e.Output})
+ t.Examples = append(t.Examples, testFunc{pkg, "Example" + e.Name, e.Output, e.Unordered})
*seen = true
}
return nil
var examples = []testing.InternalExample{
{{range .Examples}}
- {"{{.Name}}", {{.Package}}.{{.Name}}, {{.Output | printf "%q"}}},
+ {"{{.Name}}", {{.Package}}.{{.Name}}, {{.Output | printf "%q"}}, {{.Unordered}}},
{{end}}
}
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package obj
-
-type ar_hdr struct {
- name string
- date string
- uid string
- gid string
- mode string
- size string
- fmag string
-}
)
type Optab struct {
- as uint16
+ as obj.As
a1 uint8
a2 int8
a3 uint8
pcrelsiz uint8
}
-type Oprang struct {
- start []Optab
- stop []Optab
-}
-
type Opcross [32][2][32]uint8
const (
extra uint32
}
-var oprange [ALAST & obj.AMask]Oprang
+var oprange [ALAST & obj.AMask][]Optab
-var xcmp [C_GOK + 1][C_GOK + 1]uint8
+var xcmp [C_GOK + 1][C_GOK + 1]bool
var deferreturn *obj.LSym
return
}
- if oprange[AAND&obj.AMask].start == nil {
+ if oprange[AAND&obj.AMask] == nil {
buildop(ctxt)
}
return C_LAUTO
- case obj.TYPE_NONE:
+ case obj.NAME_NONE:
ctxt.Instoffset = a.Offset
t := int(immaddr(int32(ctxt.Instoffset)))
if t != 0 {
case obj.TYPE_CONST,
obj.TYPE_ADDR:
switch a.Name {
- case obj.TYPE_NONE:
+ case obj.NAME_NONE:
ctxt.Instoffset = a.Offset
if a.Reg != 0 {
return aconsize(ctxt)
func oplook(ctxt *obj.Link, p *obj.Prog) *Optab {
a1 := int(p.Optab)
if a1 != 0 {
- return &optab[a1-1:][0]
+ return &optab[a1-1]
}
a1 = int(p.From.Class)
if a1 == 0 {
if p.Reg != 0 {
a2 = C_REG
}
- r := p.As & obj.AMask
- o := oprange[r].start
- if o == nil {
- o = oprange[r].stop /* just generate an error */
- }
if false { /*debug['O']*/
- fmt.Printf("oplook %v %v %v %v\n", obj.Aconv(int(p.As)), DRconv(a1), DRconv(a2), DRconv(a3))
+ fmt.Printf("oplook %v %v %v %v\n", obj.Aconv(p.As), DRconv(a1), DRconv(a2), DRconv(a3))
fmt.Printf("\t\t%d %d\n", p.From.Type, p.To.Type)
}
- e := oprange[r].stop
- c1 := xcmp[a1][:]
- c3 := xcmp[a3][:]
- for ; -cap(o) < -cap(e); o = o[1:] {
- if int(o[0].a2) == a2 {
- if c1[o[0].a1] != 0 {
- if c3[o[0].a3] != 0 {
- p.Optab = uint16((-cap(o) + cap(optab)) + 1)
- return &o[0]
- }
- }
+ ops := oprange[p.As&obj.AMask]
+ c1 := &xcmp[a1]
+ c3 := &xcmp[a3]
+ for i := range ops {
+ op := &ops[i]
+ if int(op.a2) == a2 && c1[op.a1] && c3[op.a3] {
+ p.Optab = uint16(cap(optab) - cap(ops) + i + 1)
+ return op
}
}
ctxt.Diag("illegal combination %v; %v %v %v, %d %d", p, DRconv(a1), DRconv(a2), DRconv(a3), p.From.Type, p.To.Type)
ctxt.Diag("from %d %d to %d %d\n", p.From.Type, p.From.Name, p.To.Type, p.To.Name)
prasm(p)
- if o == nil {
- o = optab
+ if ops == nil {
+ ops = optab
}
- return &o[0]
+ return &ops[0]
}
func cmp(a int, b int) bool {
return false
}
-func opset(a, b0 uint16) {
+func opset(a, b0 obj.As) {
oprange[a&obj.AMask] = oprange[b0]
}
for i := 0; i < C_GOK; i++ {
for n = 0; n < C_GOK; n++ {
if cmp(n, i) {
- xcmp[i][n] = 1
+ xcmp[i][n] = true
}
}
}
for i := 0; i < n; i++ {
r := optab[i].as
r0 := r & obj.AMask
- oprange[r0].start = optab[i:]
+ start := i
for optab[i].as == r {
i++
}
- oprange[r0].stop = optab[i:]
+ oprange[r0] = optab[start:i]
i--
switch r {
default:
- ctxt.Diag("unknown op in build: %v", obj.Aconv(int(r)))
+ ctxt.Diag("unknown op in build: %v", obj.Aconv(r))
log.Fatalf("bad code")
case AADD:
}
case 1: /* op R,[R],R */
- o1 = oprrr(ctxt, int(p.As), int(p.Scond))
+ o1 = oprrr(ctxt, p.As, int(p.Scond))
rf := int(p.From.Reg)
rt := int(p.To.Reg)
case 2: /* movbu $I,[R],R */
aclass(ctxt, &p.From)
- o1 = oprrr(ctxt, int(p.As), int(p.Scond))
+ o1 = oprrr(ctxt, p.As, int(p.Scond))
o1 |= uint32(immrot(uint32(ctxt.Instoffset)))
rt := int(p.To.Reg)
r := int(p.Reg)
o1 |= (uint32(p.To.Reg) & 15) << 12
case 5: /* bra s */
- o1 = opbra(ctxt, p, int(p.As), int(p.Scond))
+ o1 = opbra(ctxt, p, p.As, int(p.Scond))
v := int32(-8)
if p.To.Sym != nil {
case 8: /* sll $c,[R],R -> mov (R<<$c),R */
aclass(ctxt, &p.From)
- o1 = oprrr(ctxt, int(p.As), int(p.Scond))
+ o1 = oprrr(ctxt, p.As, int(p.Scond))
r := int(p.Reg)
if r == 0 {
r = int(p.To.Reg)
o1 |= (uint32(p.To.Reg) & 15) << 12
case 9: /* sll R,[R],R -> mov (R<<R),R */
- o1 = oprrr(ctxt, int(p.As), int(p.Scond))
+ o1 = oprrr(ctxt, p.As, int(p.Scond))
r := int(p.Reg)
if r == 0 {
o1 |= (uint32(p.To.Reg) & 15) << 12
case 10: /* swi [$con] */
- o1 = oprrr(ctxt, int(p.As), int(p.Scond))
+ o1 = oprrr(ctxt, p.As, int(p.Scond))
if p.To.Type != obj.TYPE_NONE {
aclass(ctxt, &p.To)
if o1 == 0 {
break
}
- o2 = oprrr(ctxt, int(p.As), int(p.Scond))
+ o2 = oprrr(ctxt, p.As, int(p.Scond))
o2 |= REGTMP & 15
r := int(p.Reg)
if p.As == AMOVW || p.As == AMVN {
}
case 15: /* mul r,[r,]r */
- o1 = oprrr(ctxt, int(p.As), int(p.Scond))
+ o1 = oprrr(ctxt, p.As, int(p.Scond))
rf := int(p.From.Reg)
rt := int(p.To.Reg)
o2 = 0
case 17:
- o1 = oprrr(ctxt, int(p.As), int(p.Scond))
+ o1 = oprrr(ctxt, p.As, int(p.Scond))
rf := int(p.From.Reg)
rt := int(p.To.Reg)
rt2 := int(p.To.Offset)
if r == 0 {
r = int(o.param)
}
- o1 = osr(ctxt, int(p.As), int(p.From.Reg), int32(ctxt.Instoffset), r, int(p.Scond))
+ o1 = osr(ctxt, p.As, int(p.From.Reg), int32(ctxt.Instoffset), r, int(p.Scond))
case 21: /* mov/movbu O(R),R -> lr */
aclass(ctxt, &p.From)
if r == 0 {
r = int(o.param)
}
- o1 = ofsr(ctxt, int(p.As), int(p.From.Reg), v, r, int(p.Scond), p)
+ o1 = ofsr(ctxt, p.As, int(p.From.Reg), v, r, int(p.Scond), p)
case 51: /* floating point load */
v := regoff(ctxt, &p.From)
if r == 0 {
r = int(o.param)
}
- o1 = ofsr(ctxt, int(p.As), int(p.To.Reg), v, r, int(p.Scond), p) | 1<<20
+ o1 = ofsr(ctxt, p.As, int(p.To.Reg), v, r, int(p.Scond), p) | 1<<20
case 52: /* floating point store, int32 offset UGLY */
o1 = omvl(ctxt, p, &p.To, REGTMP)
r = int(o.param)
}
o2 = oprrr(ctxt, AADD, int(p.Scond)) | (REGTMP&15)<<12 | (REGTMP&15)<<16 | (uint32(r)&15)<<0
- o3 = ofsr(ctxt, int(p.As), int(p.From.Reg), 0, REGTMP, int(p.Scond), p)
+ o3 = ofsr(ctxt, p.As, int(p.From.Reg), 0, REGTMP, int(p.Scond), p)
case 53: /* floating point load, int32 offset UGLY */
o1 = omvl(ctxt, p, &p.From, REGTMP)
r = int(o.param)
}
o2 = oprrr(ctxt, AADD, int(p.Scond)) | (REGTMP&15)<<12 | (REGTMP&15)<<16 | (uint32(r)&15)<<0
- o3 = ofsr(ctxt, int(p.As), int(p.To.Reg), 0, (REGTMP&15), int(p.Scond), p) | 1<<20
+ o3 = ofsr(ctxt, p.As, int(p.To.Reg), 0, (REGTMP&15), int(p.Scond), p) | 1<<20
case 54: /* floating point arith */
- o1 = oprrr(ctxt, int(p.As), int(p.Scond))
+ o1 = oprrr(ctxt, p.As, int(p.Scond))
rf := int(p.From.Reg)
rt := int(p.To.Reg)
if o1 == 0 {
break
}
- o2 = osr(ctxt, int(p.As), int(p.From.Reg), 0, REGTMP, int(p.Scond))
+ o2 = osr(ctxt, p.As, int(p.From.Reg), 0, REGTMP, int(p.Scond))
if o.flag&LPCREL != 0 {
o3 = o2
o2 = oprrr(ctxt, AADD, int(p.Scond)) | REGTMP&15 | (REGPC&15)<<16 | (REGTMP&15)<<12
if o1 == 0 {
break
}
- o2 = ofsr(ctxt, int(p.As), int(p.From.Reg), 0, REGTMP, int(p.Scond), p)
+ o2 = ofsr(ctxt, p.As, int(p.From.Reg), 0, REGTMP, int(p.Scond), p)
if o.flag&LPCREL != 0 {
o3 = o2
o2 = oprrr(ctxt, AADD, int(p.Scond)) | REGTMP&15 | (REGPC&15)<<16 | (REGTMP&15)<<12
if o1 == 0 {
break
}
- o2 = ofsr(ctxt, int(p.As), int(p.To.Reg), 0, (REGTMP&15), int(p.Scond), p) | 1<<20
+ o2 = ofsr(ctxt, p.As, int(p.To.Reg), 0, (REGTMP&15), int(p.Scond), p) | 1<<20
if o.flag&LPCREL != 0 {
o3 = o2
o2 = oprrr(ctxt, AADD, int(p.Scond)) | REGTMP&15 | (REGPC&15)<<16 | (REGTMP&15)<<12
o1 |= (uint32(v) & 0xf0) << 12
case 82: /* fcmp freg,freg, */
- o1 = oprrr(ctxt, int(p.As), int(p.Scond))
+ o1 = oprrr(ctxt, p.As, int(p.Scond))
o1 |= (uint32(p.Reg)&15)<<12 | (uint32(p.From.Reg)&15)<<0
o2 = 0x0ef1fa10 // VMRS R15
o2 |= ((uint32(p.Scond) & C_SCOND) ^ C_SCOND_XOR) << 28
case 83: /* fcmp freg,, */
- o1 = oprrr(ctxt, int(p.As), int(p.Scond))
+ o1 = oprrr(ctxt, p.As, int(p.Scond))
o1 |= (uint32(p.From.Reg)&15)<<12 | 1<<16
o2 = 0x0ef1fa10 // VMRS R15
o2 |= ((uint32(p.Scond) & C_SCOND) ^ C_SCOND_XOR) << 28
case 84: /* movfw freg,freg - truncate float-to-fix */
- o1 = oprrr(ctxt, int(p.As), int(p.Scond))
+ o1 = oprrr(ctxt, p.As, int(p.Scond))
o1 |= (uint32(p.From.Reg) & 15) << 0
o1 |= (uint32(p.To.Reg) & 15) << 12
case 85: /* movwf freg,freg - fix-to-float */
- o1 = oprrr(ctxt, int(p.As), int(p.Scond))
+ o1 = oprrr(ctxt, p.As, int(p.Scond))
o1 |= (uint32(p.From.Reg) & 15) << 0
o1 |= (uint32(p.To.Reg) & 15) << 12
// macro for movfw freg,FTMP; movw FTMP,reg
case 86: /* movfw freg,reg - truncate float-to-fix */
- o1 = oprrr(ctxt, int(p.As), int(p.Scond))
+ o1 = oprrr(ctxt, p.As, int(p.Scond))
o1 |= (uint32(p.From.Reg) & 15) << 0
o1 |= (FREGTMP & 15) << 12
- o2 = oprrr(ctxt, AMOVFW+ALAST, int(p.Scond))
+ o2 = oprrr(ctxt, -AMOVFW, int(p.Scond))
o2 |= (FREGTMP & 15) << 16
o2 |= (uint32(p.To.Reg) & 15) << 12
// macro for movw reg,FTMP; movwf FTMP,freg
case 87: /* movwf reg,freg - fix-to-float */
- o1 = oprrr(ctxt, AMOVWF+ALAST, int(p.Scond))
+ o1 = oprrr(ctxt, -AMOVWF, int(p.Scond))
o1 |= (uint32(p.From.Reg) & 15) << 12
o1 |= (FREGTMP & 15) << 16
- o2 = oprrr(ctxt, int(p.As), int(p.Scond))
+ o2 = oprrr(ctxt, p.As, int(p.Scond))
o2 |= (FREGTMP & 15) << 0
o2 |= (uint32(p.To.Reg) & 15) << 12
case 88: /* movw reg,freg */
- o1 = oprrr(ctxt, AMOVWF+ALAST, int(p.Scond))
+ o1 = oprrr(ctxt, -AMOVWF, int(p.Scond))
o1 |= (uint32(p.From.Reg) & 15) << 12
o1 |= (uint32(p.To.Reg) & 15) << 16
case 89: /* movw freg,reg */
- o1 = oprrr(ctxt, AMOVFW+ALAST, int(p.Scond))
+ o1 = oprrr(ctxt, -AMOVFW, int(p.Scond))
o1 |= (uint32(p.From.Reg) & 15) << 16
o1 |= (uint32(p.To.Reg) & 15) << 12
case 90: /* tst reg */
- o1 = oprrr(ctxt, ACMP+ALAST, int(p.Scond))
+ o1 = oprrr(ctxt, -ACMP, int(p.Scond))
o1 |= (uint32(p.From.Reg) & 15) << 16
o1 = 0xf7fabcfd
case 97: /* CLZ Rm, Rd */
- o1 = oprrr(ctxt, int(p.As), int(p.Scond))
+ o1 = oprrr(ctxt, p.As, int(p.Scond))
o1 |= (uint32(p.To.Reg) & 15) << 12
o1 |= (uint32(p.From.Reg) & 15) << 0
case 98: /* MULW{T,B} Rs, Rm, Rd */
- o1 = oprrr(ctxt, int(p.As), int(p.Scond))
+ o1 = oprrr(ctxt, p.As, int(p.Scond))
o1 |= (uint32(p.To.Reg) & 15) << 16
o1 |= (uint32(p.From.Reg) & 15) << 8
o1 |= (uint32(p.Reg) & 15) << 0
case 99: /* MULAW{T,B} Rs, Rm, Rn, Rd */
- o1 = oprrr(ctxt, int(p.As), int(p.Scond))
+ o1 = oprrr(ctxt, p.As, int(p.Scond))
o1 |= (uint32(p.To.Reg) & 15) << 12
o1 |= (uint32(p.From.Reg) & 15) << 8
func mov(ctxt *obj.Link, p *obj.Prog) uint32 {
aclass(ctxt, &p.From)
- o1 := oprrr(ctxt, int(p.As), int(p.Scond))
+ o1 := oprrr(ctxt, p.As, int(p.Scond))
o1 |= uint32(p.From.Offset)
rt := int(p.To.Reg)
if p.To.Type == obj.TYPE_NONE {
return o1
}
-func oprrr(ctxt *obj.Link, a int, sc int) uint32 {
+func oprrr(ctxt *obj.Link, a obj.As, sc int) uint32 {
o := ((uint32(sc) & C_SCOND) ^ C_SCOND_XOR) << 28
if sc&C_SBIT != 0 {
o |= 1 << 20
}
return o | 0xe<<24 | 0xb<<20 | 8<<16 | 0xa<<8 | 4<<4 | 1<<18 | 1<<8 | 1<<7 // toint, double, trunc
- case AMOVWF + ALAST: // copy WtoF
+ case -AMOVWF: // copy WtoF
return o | 0xe<<24 | 0x0<<20 | 0xb<<8 | 1<<4
- case AMOVFW + ALAST: // copy FtoW
+ case -AMOVFW: // copy FtoW
return o | 0xe<<24 | 0x1<<20 | 0xb<<8 | 1<<4
- case ACMP + ALAST: // cmp imm
+ case -ACMP: // cmp imm
return o | 0x3<<24 | 0x5<<20
// CLZ doesn't support .nil
return 0
}
-func opbra(ctxt *obj.Link, p *obj.Prog, a int, sc int) uint32 {
+func opbra(ctxt *obj.Link, p *obj.Prog, a obj.As, sc int) uint32 {
if sc&(C_SBIT|C_PBIT|C_WBIT) != 0 {
ctxt.Diag("%v: .nil/.nil/.W on bra instruction", p)
}
return o
}
-func osr(ctxt *obj.Link, a int, r int, v int32, b int, sc int) uint32 {
+func osr(ctxt *obj.Link, a obj.As, r int, v int32, b int, sc int) uint32 {
o := olr(ctxt, v, b, r, sc) ^ (1 << 20)
if a != AMOVW {
o |= 1 << 22
return olhr(ctxt, int32(i), b, r, sc) ^ (1 << 22)
}
-func ofsr(ctxt *obj.Link, a int, r int, v int32, b int, sc int, p *obj.Prog) uint32 {
+func ofsr(ctxt *obj.Link, a obj.As, r int, v int32, b int, sc int, p *obj.Prog) uint32 {
if sc&C_SBIT != 0 {
ctxt.Diag(".nil on FLDR/FSTR instruction: %v", p)
}
q = p
}
- var o int
var p1 *obj.Prog
var p2 *obj.Prog
var q2 *obj.Prog
for p := cursym.Text; p != nil; p = p.Link {
- o = int(p.As)
+ o := p.As
switch o {
case obj.ATEXT:
autosize = int32(p.To.Offset + 4)
s.Text = firstp.Link
}
-func relinv(a int) int {
+func relinv(a obj.As) obj.As {
switch a {
case ABEQ:
return ABNE
func xfol(ctxt *obj.Link, p *obj.Prog, last **obj.Prog) {
var q *obj.Prog
var r *obj.Prog
- var a int
var i int
loop:
if p == nil {
return
}
- a = int(p.As)
+ a := p.As
if a == AB {
q = p.Pcond
if q != nil && q.As != obj.ATEXT {
if q == *last || q == nil {
break
}
- a = int(q.As)
+ a = q.As
if a == obj.ANOP {
i--
continue
a = AB
q = ctxt.NewProg()
- q.As = int16(a)
+ q.As = a
q.Lineno = p.Lineno
q.To.Type = obj.TYPE_BRANCH
q.To.Offset = p.Pc
q = obj.Brchain(ctxt, p.Link)
if a != obj.ATEXT {
if q != nil && (q.Mark&FOLL != 0) {
- p.As = int16(relinv(a))
+ p.As = relinv(a)
p.Link = p.Pcond
p.Pcond = q
}
goto loop
}
-var unaryDst = map[int]bool{
+var unaryDst = map[obj.As]bool{
ASWI: true,
AWORD: true,
}
)
type Optab struct {
- as uint16
+ as obj.As
a1 uint8
a2 uint8
a3 uint8
scond uint16
}
-type Oprange struct {
- start []Optab
- stop []Optab
-}
-
-var oprange [ALAST]Oprange
+var oprange [ALAST][]Optab
-var xcmp [C_NCLASS][C_NCLASS]uint8
+var xcmp [C_NCLASS][C_NCLASS]bool
const (
S32 = 0 << 31
ctxt.Cursym = cursym
ctxt.Autosize = int32(p.To.Offset&0xffffffff) + 8
- if oprange[AAND].start == nil {
+ if oprange[AAND] == nil {
buildop(ctxt)
}
case obj.TYPE_MEM:
switch a.Name {
- case obj.NAME_EXTERN,
- obj.NAME_STATIC:
+ case obj.NAME_EXTERN, obj.NAME_STATIC:
if a.Sym == nil {
break
}
ctxt.Instoffset = int64(ctxt.Autosize) + a.Offset + 8
return autoclass(ctxt.Instoffset)
- case obj.TYPE_NONE:
+ case obj.NAME_NONE:
ctxt.Instoffset = a.Offset
return oregclass(ctxt.Instoffset)
}
case obj.TYPE_TEXTSIZE:
return C_TEXTSIZE
- case obj.TYPE_CONST,
- obj.TYPE_ADDR:
+ case obj.TYPE_CONST, obj.TYPE_ADDR:
switch a.Name {
- case obj.TYPE_NONE:
+ case obj.NAME_NONE:
ctxt.Instoffset = a.Offset
if a.Reg != 0 && a.Reg != REGZERO {
goto aconsize
}
return C_VCON
- case obj.NAME_EXTERN,
- obj.NAME_STATIC:
+ case obj.NAME_EXTERN, obj.NAME_STATIC:
if a.Sym == nil {
break
}
if p.Reg != 0 {
a2 = rclass(p.Reg)
}
- r := int(p.As)
- o := oprange[r].start
- if o == nil {
- o = oprange[r].stop /* just generate an error */
- }
if false {
- fmt.Printf("oplook %v %d %d %d\n", obj.Aconv(int(p.As)), a1, a2, a3)
+ fmt.Printf("oplook %v %d %d %d\n", obj.Aconv(p.As), a1, a2, a3)
fmt.Printf("\t\t%d %d\n", p.From.Type, p.To.Type)
}
- e := oprange[r].stop
- c1 := xcmp[a1][:]
- c2 := xcmp[a2][:]
- c3 := xcmp[a3][:]
- c4 := xcmp[p.Scond>>5][:]
- for ; -cap(o) < -cap(e); o = o[1:] {
- if int(o[0].a2) == a2 || c2[o[0].a2] != 0 {
- if c4[o[0].scond>>5] != 0 {
- if c1[o[0].a1] != 0 {
- if c3[o[0].a3] != 0 {
- p.Optab = uint16((-cap(o) + cap(optab)) + 1)
- return &o[0]
- }
- }
- }
+ ops := oprange[p.As]
+ c1 := &xcmp[a1]
+ c2 := &xcmp[a2]
+ c3 := &xcmp[a3]
+ c4 := &xcmp[p.Scond>>5]
+ for i := range ops {
+ op := &ops[i]
+ if (int(op.a2) == a2 || c2[op.a2]) && c4[op.scond>>5] && c1[op.a1] && c3[op.a3] {
+ p.Optab = uint16(cap(optab) - cap(ops) + i + 1)
+ return op
}
}
ctxt.Diag("illegal combination %v %v %v %v, %d %d", p, DRconv(a1), DRconv(a2), DRconv(a3), p.From.Type, p.To.Type)
prasm(p)
- if o == nil {
- o = optab
+ if ops == nil {
+ ops = optab
}
- return &o[0]
+ return &ops[0]
}
func cmp(a int, b int) bool {
func (x ocmp) Less(i, j int) bool {
p1 := &x[i]
p2 := &x[j]
- n := int(p1.as) - int(p2.as)
- if n != 0 {
- return n < 0
+ if p1.as != p2.as {
+ return p1.as < p2.as
}
- n = int(p1.a1) - int(p2.a1)
- if n != 0 {
- return n < 0
+ if p1.a1 != p2.a1 {
+ return p1.a1 < p2.a1
}
- n = int(p1.a2) - int(p2.a2)
- if n != 0 {
- return n < 0
+ if p1.a2 != p2.a2 {
+ return p1.a2 < p2.a2
}
- n = int(p1.a3) - int(p2.a3)
- if n != 0 {
- return n < 0
+ if p1.a3 != p2.a3 {
+ return p1.a3 < p2.a3
}
- n = int(p1.scond) - int(p2.scond)
- if n != 0 {
- return n < 0
+ if p1.scond != p2.scond {
+ return p1.scond < p2.scond
}
return false
}
for i := 0; i < C_GOK; i++ {
for n = 0; n < C_GOK; n++ {
if cmp(n, i) {
- xcmp[i][n] = 1
+ xcmp[i][n] = true
}
}
}
for n = 0; optab[n].as != obj.AXXX; n++ {
}
sort.Sort(ocmp(optab[:n]))
- var r int
- var t Oprange
for i := 0; i < n; i++ {
- r = int(optab[i].as)
- oprange[r].start = optab[i:]
- for int(optab[i].as) == r {
+ r := optab[i].as
+ start := i
+ for optab[i].as == r {
i++
}
- oprange[r].stop = optab[i:]
+ t := optab[start:i]
i--
- t = oprange[r]
+ oprange[r] = t
switch r {
default:
ctxt.Diag("unknown op in build: %v", obj.Aconv(r))
break
case 1: /* op Rm,[Rn],Rd; default Rn=Rd -> op Rm<<0,[Rn,]Rd (shifted register) */
- o1 = oprrr(ctxt, int(p.As))
+ o1 = oprrr(ctxt, p.As)
rf := int(p.From.Reg)
rt := int(p.To.Reg)
o1 |= (uint32(rf&31) << 16) | (uint32(r&31) << 5) | uint32(rt&31)
case 2: /* add/sub $(uimm12|uimm24)[,R],R; cmp $(uimm12|uimm24),R */
- o1 = opirr(ctxt, int(p.As))
+ o1 = opirr(ctxt, p.As)
rt := int(p.To.Reg)
if p.To.Type == obj.TYPE_NONE {
o1 = oaddi(ctxt, int32(o1), v, r, rt)
case 3: /* op R<<n[,R],R (shifted register) */
- o1 = oprrr(ctxt, int(p.As))
+ o1 = oprrr(ctxt, p.As)
o1 |= uint32(p.From.Offset) /* includes reg, op, etc */
rt := int(p.To.Reg)
o1 |= (uint32(r&31) << 5) | uint32(rt&31)
case 4: /* mov $addcon, R; mov $recon, R; mov $racon, R */
- o1 = opirr(ctxt, int(p.As))
+ o1 = opirr(ctxt, p.As)
rt := int(p.To.Reg)
r := int(o.param)
o1 |= ((uint32(v) & 0xFFF) << 10) | (uint32(r&31) << 5) | uint32(rt&31)
case 5: /* b s; bl s */
- o1 = opbra(ctxt, int(p.As))
+ o1 = opbra(ctxt, p.As)
if p.To.Sym == nil {
o1 |= uint32(brdist(ctxt, p, 0, 26, 2))
rel.Type = obj.R_CALLARM64
case 6: /* b ,O(R); bl ,O(R) */
- o1 = opbrr(ctxt, int(p.As))
+ o1 = opbrr(ctxt, p.As)
o1 |= uint32(p.To.Reg&31) << 5
rel := obj.Addrel(ctxt.Cursym)
rel.Type = obj.R_CALLIND
case 7: /* beq s */
- o1 = opbra(ctxt, int(p.As))
+ o1 = opbra(ctxt, p.As)
o1 |= uint32(brdist(ctxt, p, 0, 19, 2) << 5)
}
case 9: /* lsl Rm,[Rn],Rd -> lslv Rm, Rn, Rd */
- o1 = oprrr(ctxt, int(p.As))
+ o1 = oprrr(ctxt, p.As)
r := int(p.Reg)
if r == 0 {
o1 |= (uint32(p.From.Reg&31) << 16) | (uint32(r&31) << 5) | uint32(p.To.Reg&31)
case 10: /* brk/hvc/.../svc [$con] */
- o1 = opimm(ctxt, int(p.As))
+ o1 = opimm(ctxt, p.As)
if p.To.Type != obj.TYPE_NONE {
o1 |= uint32((p.To.Offset & 0xffff) << 5)
}
case 12: /* movT $vcon, reg */
- o1 = omovlit(ctxt, int(p.As), p, &p.From, int(p.To.Reg))
+ o1 = omovlit(ctxt, p.As, p, &p.From, int(p.To.Reg))
case 13: /* addop $vcon, [R], R (64 bit literal); cmp $lcon,R -> addop $lcon,R, ZR */
o1 = omovlit(ctxt, AMOVD, p, &p.From, REGTMP)
r = rt
}
if p.To.Type != obj.TYPE_NONE && (p.To.Reg == REGSP || r == REGSP) {
- o2 = opxrrr(ctxt, int(p.As))
+ o2 = opxrrr(ctxt, p.As)
o2 |= REGTMP & 31 << 16
o2 |= LSL0_64
} else {
- o2 = oprrr(ctxt, int(p.As))
+ o2 = oprrr(ctxt, p.As)
o2 |= REGTMP & 31 << 16 /* shift is 0 */
}
}
case 15: /* mul/mneg/umulh/umull r,[r,]r; madd/msub Rm,Rn,Ra,Rd */
- o1 = oprrr(ctxt, int(p.As))
+ o1 = oprrr(ctxt, p.As)
rf := int(p.From.Reg)
rt := int(p.To.Reg)
o1 |= (uint32(rf&31) << 16) | (uint32(ra&31) << 10) | (uint32(r&31) << 5) | uint32(rt&31)
case 16: /* XremY R[,R],R -> XdivY; XmsubY */
- o1 = oprrr(ctxt, int(p.As))
+ o1 = oprrr(ctxt, p.As)
rf := int(p.From.Reg)
rt := int(p.To.Reg)
o2 |= (uint32(rf&31) << 16) | (uint32(r&31) << 10) | (REGTMP & 31 << 5) | uint32(rt&31)
case 17: /* op Rm,[Rn],Rd; default Rn=ZR */
- o1 = oprrr(ctxt, int(p.As))
+ o1 = oprrr(ctxt, p.As)
rf := int(p.From.Reg)
rt := int(p.To.Reg)
o1 |= (uint32(rf&31) << 16) | (uint32(r&31) << 5) | uint32(rt&31)
case 18: /* csel cond,Rn,Rm,Rd; cinc/cinv/cneg cond,Rn,Rd; cset cond,Rd */
- o1 = oprrr(ctxt, int(p.As))
+ o1 = oprrr(ctxt, p.As)
cond := int(p.From.Reg)
r := int(p.Reg)
cond := int(p.From.Reg)
var rf int
if p.From3.Type == obj.TYPE_REG {
- o1 = oprrr(ctxt, int(p.As))
+ o1 = oprrr(ctxt, p.As)
rf = int(p.From3.Reg) /* Rm */
} else {
- o1 = opirr(ctxt, int(p.As))
+ o1 = opirr(ctxt, p.As)
rf = int(p.From3.Offset & 0x1F)
}
r = int(o.param)
}
if v < 0 { /* unscaled 9-bit signed */
- o1 = olsr9s(ctxt, int32(opstr9(ctxt, int(p.As))), v, r, int(p.From.Reg))
+ o1 = olsr9s(ctxt, int32(opstr9(ctxt, p.As)), v, r, int(p.From.Reg))
} else {
v = int32(offsetshift(ctxt, int64(v), int(o.a3)))
- o1 = olsr12u(ctxt, int32(opstr12(ctxt, int(p.As))), v, r, int(p.From.Reg))
+ o1 = olsr12u(ctxt, int32(opstr12(ctxt, p.As)), v, r, int(p.From.Reg))
}
case 21: /* movT O(R),R -> ldrT */
r = int(o.param)
}
if v < 0 { /* unscaled 9-bit signed */
- o1 = olsr9s(ctxt, int32(opldr9(ctxt, int(p.As))), v, r, int(p.To.Reg))
+ o1 = olsr9s(ctxt, int32(opldr9(ctxt, p.As)), v, r, int(p.To.Reg))
} else {
v = int32(offsetshift(ctxt, int64(v), int(o.a1)))
//print("offset=%lld v=%ld a1=%d\n", instoffset, v, o->a1);
- o1 = olsr12u(ctxt, int32(opldr12(ctxt, int(p.As))), v, r, int(p.To.Reg))
+ o1 = olsr12u(ctxt, int32(opldr12(ctxt, p.As)), v, r, int(p.To.Reg))
}
case 22: /* movT (R)O!,R; movT O(R)!, R -> ldrT */
if v < -256 || v > 255 {
ctxt.Diag("offset out of range\n%v", p)
}
- o1 = opldrpp(ctxt, int(p.As))
+ o1 = opldrpp(ctxt, p.As)
if o.scond == C_XPOST {
o1 |= 1 << 10
} else {
if v < -256 || v > 255 {
ctxt.Diag("offset out of range\n%v", p)
}
- o1 = LD2STR(opldrpp(ctxt, int(p.As)))
+ o1 = LD2STR(opldrpp(ctxt, p.As))
if o.scond == C_XPOST {
o1 |= 1 << 10
} else {
if s {
ctxt.Diag("illegal SP reference\n%v", p)
}
- o1 = oprrr(ctxt, int(p.As))
+ o1 = oprrr(ctxt, p.As)
o1 |= (uint32(rf&31) << 16) | (REGZERO & 31 << 5) | uint32(rt&31)
} else if s {
- o1 = opirr(ctxt, int(p.As))
+ o1 = opirr(ctxt, p.As)
o1 |= (uint32(rf&31) << 5) | uint32(rt&31)
} else {
- o1 = oprrr(ctxt, int(p.As))
+ o1 = oprrr(ctxt, p.As)
o1 |= (uint32(rf&31) << 16) | (REGZERO & 31 << 5) | uint32(rt&31)
}
case 25: /* negX Rs, Rd -> subX Rs<<0, ZR, Rd */
- o1 = oprrr(ctxt, int(p.As))
+ o1 = oprrr(ctxt, p.As)
rf := int(p.From.Reg)
rt := int(p.To.Reg)
o1 |= (uint32(rf&31) << 16) | (REGZERO & 31 << 5) | uint32(rt&31)
case 26: /* negX Rm<<s, Rd -> subX Rm<<s, ZR, Rd */
- o1 = oprrr(ctxt, int(p.As))
+ o1 = oprrr(ctxt, p.As)
o1 |= uint32(p.From.Offset) /* includes reg, op, etc */
rt := int(p.To.Reg)
o1 |= (REGZERO & 31 << 5) | uint32(rt&31)
case 27: /* op Rm<<n[,Rn],Rd (extended register) */
- o1 = opxrrr(ctxt, int(p.As))
+ o1 = opxrrr(ctxt, p.As)
if (p.From.Reg-obj.RBaseARM64)®_EXT != 0 {
ctxt.Diag("extended register not implemented\n%v", p)
if r == 0 {
r = int(p.To.Reg)
}
- o2 = oprrr(ctxt, int(p.As))
+ o2 = oprrr(ctxt, p.As)
o2 |= REGTMP & 31 << 16 /* shift is 0 */
o2 |= uint32(r&31) << 5
o2 |= uint32(p.To.Reg & 31)
case 29: /* op Rn, Rd */
- o1 = oprrr(ctxt, int(p.As))
+ o1 = oprrr(ctxt, p.As)
o1 |= uint32(p.From.Reg&31)<<5 | uint32(p.To.Reg&31)
case 30: /* movT R,L(R) -> strT */
- s := movesize(int(o.as))
+ s := movesize(o.as)
if s < 0 {
- ctxt.Diag("unexpected long move, op %v tab %v\n%v", obj.Aconv(int(p.As)), obj.Aconv(int(o.as)), p)
+ ctxt.Diag("unexpected long move, op %v tab %v\n%v", obj.Aconv(p.As), obj.Aconv(o.as), p)
}
v := int32(regoff(ctxt, &p.To))
if v < 0 {
r = int(o.param)
}
o1 = oaddi(ctxt, int32(opirr(ctxt, AADD)), hi, r, REGTMP)
- o2 = olsr12u(ctxt, int32(opstr12(ctxt, int(p.As))), ((v-hi)>>uint(s))&0xFFF, REGTMP, int(p.From.Reg))
+ o2 = olsr12u(ctxt, int32(opstr12(ctxt, p.As)), ((v-hi)>>uint(s))&0xFFF, REGTMP, int(p.From.Reg))
case 31: /* movT L(R), R -> ldrT */
- s := movesize(int(o.as))
+ s := movesize(o.as)
if s < 0 {
- ctxt.Diag("unexpected long move, op %v tab %v\n%v", obj.Aconv(int(p.As)), obj.Aconv(int(o.as)), p)
+ ctxt.Diag("unexpected long move, op %v tab %v\n%v", obj.Aconv(p.As), obj.Aconv(o.as), p)
}
v := int32(regoff(ctxt, &p.From))
if v < 0 {
r = int(o.param)
}
o1 = oaddi(ctxt, int32(opirr(ctxt, AADD)), hi, r, REGTMP)
- o2 = olsr12u(ctxt, int32(opldr12(ctxt, int(p.As))), ((v-hi)>>uint(s))&0xFFF, REGTMP, int(p.To.Reg))
+ o2 = olsr12u(ctxt, int32(opldr12(ctxt, p.As)), ((v-hi)>>uint(s))&0xFFF, REGTMP, int(p.To.Reg))
case 32: /* mov $con, R -> movz/movn */
r := 32
o1 |= uint32((((d >> uint(s*16)) & 0xFFFF) << 5) | int64((uint32(s)&3)<<21) | int64(rt&31))
case 33: /* movk $uimm16 << pos */
- o1 = opirr(ctxt, int(p.As))
+ o1 = opirr(ctxt, p.As)
d := p.From.Offset
if (d >> 16) != 0 {
o1 |= uint32(v)
case 38: /* clrex [$imm] */
- o1 = opimm(ctxt, int(p.As))
+ o1 = opimm(ctxt, p.As)
if p.To.Type == obj.TYPE_NONE {
o1 |= 0xF << 8
}
case 39: /* cbz R, rel */
- o1 = opirr(ctxt, int(p.As))
+ o1 = opirr(ctxt, p.As)
o1 |= uint32(p.From.Reg & 31)
o1 |= uint32(brdist(ctxt, p, 0, 19, 2) << 5)
case 40: /* tbz */
- o1 = opirr(ctxt, int(p.As))
+ o1 = opirr(ctxt, p.As)
v := int32(p.From.Offset)
if v < 0 || v > 63 {
o1 |= uint32(p.Reg)
case 41: /* eret, nop, others with no operands */
- o1 = op0(ctxt, int(p.As))
+ o1 = op0(ctxt, p.As)
case 42: /* bfm R,r,s,R */
- o1 = opbfm(ctxt, int(p.As), int(p.From.Offset), int(p.From3.Offset), int(p.Reg), int(p.To.Reg))
+ o1 = opbfm(ctxt, p.As, int(p.From.Offset), int(p.From3.Offset), int(p.Reg), int(p.To.Reg))
case 43: /* bfm aliases */
r := int(p.From.Offset)
}
case 44: /* extr $b, Rn, Rm, Rd */
- o1 = opextr(ctxt, int(p.As), int32(p.From.Offset), int(p.From3.Reg), int(p.Reg), int(p.To.Reg))
+ o1 = opextr(ctxt, p.As, int32(p.From.Offset), int(p.From3.Reg), int(p.Reg), int(p.To.Reg))
case 45: /* sxt/uxt[bhw] R,R; movT R,R -> sxtT R,R */
rf := int(p.From.Reg)
rt := int(p.To.Reg)
- as := int(p.As)
+ as := p.As
if rf == REGZERO {
as = AMOVWU /* clearer in disassembly */
}
}
case 46: /* cls */
- o1 = opbit(ctxt, int(p.As))
+ o1 = opbit(ctxt, p.As)
o1 |= uint32(p.From.Reg&31) << 5
o1 |= uint32(p.To.Reg & 31)
if r == 0 {
r = int(o.param)
}
- o2 = olsxrr(ctxt, int(p.As), REGTMP, r, int(p.From.Reg))
+ o2 = olsxrr(ctxt, p.As, REGTMP, r, int(p.From.Reg))
case 48: /* movT V(R), R -> ldrT (huge offset) */
o1 = omovlit(ctxt, AMOVW, p, &p.From, REGTMP)
if r == 0 {
r = int(o.param)
}
- o2 = olsxrr(ctxt, int(p.As), REGTMP, r, int(p.To.Reg))
+ o2 = olsxrr(ctxt, p.As, REGTMP, r, int(p.To.Reg))
case 50: /* sys/sysl */
- o1 = opirr(ctxt, int(p.As))
+ o1 = opirr(ctxt, p.As)
if (p.From.Offset &^ int64(SYSARG4(0x7, 0xF, 0xF, 0x7))) != 0 {
ctxt.Diag("illegal SYS argument\n%v", p)
}
case 51: /* dmb */
- o1 = opirr(ctxt, int(p.As))
+ o1 = opirr(ctxt, p.As)
if p.From.Type == obj.TYPE_CONST {
o1 |= uint32((p.From.Offset & 0xF) << 8)
}
case 52: /* hint */
- o1 = opirr(ctxt, int(p.As))
+ o1 = opirr(ctxt, p.As)
o1 |= uint32((p.From.Offset & 0x7F) << 5)
ctxt.Diag("bitmask immediate not implemented\n%v", p)
case 54: /* floating point arith */
- o1 = oprrr(ctxt, int(p.As))
+ o1 = oprrr(ctxt, p.As)
var rf int
if p.From.Type == obj.TYPE_CONST {
o1 |= (uint32(rf&31) << 16) | (uint32(r&31) << 5) | uint32(rt&31)
case 56: /* floating point compare */
- o1 = oprrr(ctxt, int(p.As))
+ o1 = oprrr(ctxt, p.As)
var rf int
if p.From.Type == obj.TYPE_CONST {
o1 |= uint32(rf&31)<<16 | uint32(rt&31)<<5
case 57: /* floating point conditional compare */
- o1 = oprrr(ctxt, int(p.As))
+ o1 = oprrr(ctxt, p.As)
cond := int(p.From.Reg)
nzcv := int(p.To.Offset)
o1 |= uint32(rf&31)<<16 | uint32(cond)<<12 | uint32(rt&31)<<5 | uint32(nzcv)
case 58: /* ldar/ldxr/ldaxr */
- o1 = opload(ctxt, int(p.As))
+ o1 = opload(ctxt, p.As)
o1 |= 0x1F << 16
o1 |= uint32(p.From.Reg) << 5
o1 |= uint32(p.To.Reg & 31)
case 59: /* stxr/stlxr */
- o1 = opstore(ctxt, int(p.As))
+ o1 = opstore(ctxt, p.As)
if p.RegTo2 != obj.REG_NONE {
o1 |= uint32(p.RegTo2&31) << 16
rel.Sym = p.To.Sym
rel.Add = p.To.Offset
rel.Type = obj.R_ADDRARM64
- o3 = olsr12u(ctxt, int32(opstr12(ctxt, int(p.As))), 0, REGTMP, int(p.From.Reg))
+ o3 = olsr12u(ctxt, int32(opstr12(ctxt, p.As)), 0, REGTMP, int(p.From.Reg))
case 65: /* movT addr,R -> adrp + add + movT (REGTMP), R */
o1 = ADR(1, 0, REGTMP)
rel.Sym = p.From.Sym
rel.Add = p.From.Offset
rel.Type = obj.R_ADDRARM64
- o3 = olsr12u(ctxt, int32(opldr12(ctxt, int(p.As))), 0, REGTMP, int(p.To.Reg))
+ o3 = olsr12u(ctxt, int32(opldr12(ctxt, p.As)), 0, REGTMP, int(p.To.Reg))
case 66: /* ldp O(R)!, (r1, r2); ldp (R)O!, (r1, r2) */
v := int32(p.From.Offset)
* also op Rn -> Rt
* also Rm*Rn op Ra -> Rd
*/
-func oprrr(ctxt *obj.Link, a int) uint32 {
+func oprrr(ctxt *obj.Link, a obj.As) uint32 {
switch a {
case AADC:
return S64 | 0<<30 | 0<<29 | 0xd0<<21 | 0<<10
* imm -> Rd
* imm op Rn -> Rd
*/
-func opirr(ctxt *obj.Link, a int) uint32 {
+func opirr(ctxt *obj.Link, a obj.As) uint32 {
switch a {
/* op $addcon, Rn, Rd */
case AMOVD, AADD:
return 0
}
-func opbit(ctxt *obj.Link, a int) uint32 {
+func opbit(ctxt *obj.Link, a obj.As) uint32 {
switch a {
case ACLS:
return S64 | OPBIT(5)
/*
* add/subtract extended register
*/
-func opxrrr(ctxt *obj.Link, a int) uint32 {
+func opxrrr(ctxt *obj.Link, a obj.As) uint32 {
switch a {
case AADD:
return S64 | 0<<30 | 0<<29 | 0x0b<<24 | 0<<22 | 1<<21 | LSL0_64
return 0
}
-func opimm(ctxt *obj.Link, a int) uint32 {
+func opimm(ctxt *obj.Link, a obj.As) uint32 {
switch a {
case ASVC:
return 0xD4<<24 | 0<<21 | 1 /* imm16<<5 */
/*
* pc-relative branches
*/
-func opbra(ctxt *obj.Link, a int) uint32 {
+func opbra(ctxt *obj.Link, a obj.As) uint32 {
switch a {
case ABEQ:
return OPBcc(0x0)
return 0
}
-func opbrr(ctxt *obj.Link, a int) uint32 {
+func opbrr(ctxt *obj.Link, a obj.As) uint32 {
switch a {
case ABL:
return OPBLR(1) /* BLR */
return 0
}
-func op0(ctxt *obj.Link, a int) uint32 {
+func op0(ctxt *obj.Link, a obj.As) uint32 {
switch a {
case ADRPS:
return 0x6B<<25 | 5<<21 | 0x1F<<16 | 0x1F<<5
/*
* register offset
*/
-func opload(ctxt *obj.Link, a int) uint32 {
+func opload(ctxt *obj.Link, a obj.As) uint32 {
switch a {
case ALDAR:
return LDSTX(3, 1, 1, 0, 1) | 0x1F<<10
return 0
}
-func opstore(ctxt *obj.Link, a int) uint32 {
+func opstore(ctxt *obj.Link, a obj.As) uint32 {
switch a {
case ASTLR:
return LDSTX(3, 1, 0, 0, 1) | 0x1F<<10
return uint32(o)
}
-func opldr12(ctxt *obj.Link, a int) uint32 {
+func opldr12(ctxt *obj.Link, a obj.As) uint32 {
switch a {
case AMOVD:
return LDSTR12U(3, 0, 1) /* imm12<<10 | Rn<<5 | Rt */
return 0
}
-func opstr12(ctxt *obj.Link, a int) uint32 {
+func opstr12(ctxt *obj.Link, a obj.As) uint32 {
return LD2STR(opldr12(ctxt, a))
}
return uint32(o)
}
-func opldr9(ctxt *obj.Link, a int) uint32 {
+func opldr9(ctxt *obj.Link, a obj.As) uint32 {
switch a {
case AMOVD:
return LDSTR9S(3, 0, 1) /* simm9<<12 | Rn<<5 | Rt */
return 0
}
-func opstr9(ctxt *obj.Link, a int) uint32 {
+func opstr9(ctxt *obj.Link, a obj.As) uint32 {
return LD2STR(opldr9(ctxt, a))
}
-func opldrpp(ctxt *obj.Link, a int) uint32 {
+func opldrpp(ctxt *obj.Link, a obj.As) uint32 {
switch a {
case AMOVD:
return 3<<30 | 7<<27 | 0<<26 | 0<<24 | 1<<22 /* simm9<<12 | Rn<<5 | Rt */
/*
* load/store register (extended register)
*/
-func olsxrr(ctxt *obj.Link, as int, rt int, r1 int, r2 int) uint32 {
+func olsxrr(ctxt *obj.Link, as obj.As, rt int, r1 int, r2 int) uint32 {
ctxt.Diag("need load/store extended register\n%v", ctxt.Curp)
return 0xffffffff
}
/*
* load a a literal value into dr
*/
-func omovlit(ctxt *obj.Link, as int, p *obj.Prog, a *obj.Addr, dr int) uint32 {
+func omovlit(ctxt *obj.Link, as obj.As, p *obj.Prog, a *obj.Addr, dr int) uint32 {
var o1 int32
if p.Pcond == nil { /* not in literal pool */
aclass(ctxt, a)
return uint32(o1)
}
-func opbfm(ctxt *obj.Link, a int, r int, s int, rf int, rt int) uint32 {
+func opbfm(ctxt *obj.Link, a obj.As, r int, s int, rf int, rt int) uint32 {
var c uint32
o := opirr(ctxt, a)
if (o & (1 << 31)) == 0 {
return o
}
-func opextr(ctxt *obj.Link, a int, v int32, rn int, rm int, rt int) uint32 {
+func opextr(ctxt *obj.Link, a obj.As, v int32, rn int, rm int, rt int) uint32 {
var c uint32
o := opirr(ctxt, a)
if (o & (1 << 31)) != 0 {
/*
* size in log2(bytes)
*/
-func movesize(a int) int {
+func movesize(a obj.As) int {
switch a {
case AMOVD:
return 3
"math"
)
-var complements = []int16{
+var complements = []obj.As{
AADD: ASUB,
AADDW: ASUBW,
ASUB: AADD,
s.Text = firstp.Link
}
-func relinv(a int) int {
+func relinv(a obj.As) obj.As {
switch a {
case ABEQ:
return ABNE
func xfol(ctxt *obj.Link, p *obj.Prog, last **obj.Prog) {
var q *obj.Prog
var r *obj.Prog
- var a int
var i int
loop:
if p == nil {
return
}
- a = int(p.As)
+ a := p.As
if a == AB {
q = p.Pcond
if q != nil {
if q == *last || q == nil {
break
}
- a = int(q.As)
+ a = q.As
if a == obj.ANOP {
i--
continue
a = AB
q = ctxt.NewProg()
- q.As = int16(a)
+ q.As = a
q.Lineno = p.Lineno
q.To.Type = obj.TYPE_BRANCH
q.To.Offset = p.Pc
q = obj.Brchain(ctxt, p.Link)
if a != obj.ATEXT {
if q != nil && (q.Mark&FOLL != 0) {
- p.As = int16(relinv(a))
+ p.As = relinv(a)
p.Link = p.Pcond
p.Pcond = q
}
q = p
}
- var o int
var q2 *obj.Prog
var retjmp *obj.LSym
for p := cursym.Text; p != nil; p = p.Link {
- o = int(p.As)
+ o := p.As
switch o {
case obj.ATEXT:
cursym.Text = p
p.To.Class = 0
}
-var unaryDst = map[int]bool{
+var unaryDst = map[obj.As]bool{
AWORD: true,
ADWORD: true,
ABL: true,
}
Symgrow(ctxt, s, int64(off+siz))
- switch int(p.To.Type) {
+ switch p.To.Type {
default:
ctxt.Diag("bad data: %v", p)
copy(s.P[off:off+siz], p.To.Val.(string))
case TYPE_CONST, TYPE_ADDR:
- if p.To.Sym != nil || int(p.To.Type) == TYPE_ADDR {
+ if p.To.Sym != nil || p.To.Type == TYPE_ADDR {
r := Addrel(s)
r.Off = off
r.Siz = uint8(siz)
// scale = 1
//
type Addr struct {
- Type int16
Reg int16
Index int16
Scale int16 // Sometimes holds a register.
+ Type AddrType
Name int8
Class int8
Etype uint8
Node interface{} // for use by compiler
}
+type AddrType uint8
+
const (
NAME_NONE = 0 + iota
NAME_EXTERN
)
const (
- TYPE_NONE = 0
-)
+ TYPE_NONE AddrType = 0
-const (
- TYPE_BRANCH = 5 + iota
+ TYPE_BRANCH AddrType = 5 + iota
TYPE_TEXTSIZE
TYPE_MEM
TYPE_CONST
Pc int64
Lineno int32
Spadj int32
- As int16
+ As As // Assembler opcode.
Reg int16
RegTo2 int16 // 2nd register output operand
Mark uint16 // bitmask of arch-specific items
}
// From3Type returns From3.Type, or TYPE_NONE when From3 is nil.
-func (p *Prog) From3Type() int16 {
+func (p *Prog) From3Type() AddrType {
if p.From3 == nil {
return TYPE_NONE
}
Regindex uint64 // registers used by addressing mode
}
-// Prog.as opcodes.
-// These are the portable opcodes, common to all architectures.
-// Each architecture defines many more arch-specific opcodes,
-// with values starting at A_ARCHSPECIFIC.
-// Each architecture adds an offset to this so each machine has
-// distinct space for its instructions. The offset is a power of
-// two so it can be masked to return to origin zero.
-// See the definitions of ABase386 etc.
+// An As denotes an assembler opcode.
+// There are some portable opcodes, declared here in package obj,
+// that are common to all architectures.
+// However, the majority of opcodes are arch-specific
+// and are declared in their respective architecture's subpackage.
+type As int16
+
+// These are the portable opcodes.
const (
- AXXX = 0 + iota
+ AXXX As = iota
ACALL
ACHECKNIL
ADATA
A_ARCHSPECIFIC
)
+// Each architecture is allotted a distinct subspace of opcode values
+// for declaring its arch-specific opcodes.
+// Within this subspace, the first arch-specific opcode should be
+// at offset A_ARCHSPECIFIC.
+//
+// Subspaces are aligned to a power of two so opcodes can be masked
+// with AMask and used as compact array indices.
+const (
+ ABase386 = (1 + iota) << 12
+ ABaseARM
+ ABaseAMD64
+ ABasePPC64
+ ABaseARM64
+ ABaseMIPS64
+
+ AMask = 1<<12 - 1 // AND with this to use the opcode as an array index.
+)
+
// An LSym is the sort of symbol that is written to an object file.
type LSym struct {
Name string
STYPE
SSTRING
SGOSTRING
+ SGOSTRINGHDR
SGOFUNC
SGCBITS
SRODATA
STYPERELRO
SSTRINGRELRO
SGOSTRINGRELRO
+ SGOSTRINGHDRRELRO
SGOFUNCRELRO
SGCBITSRELRO
SRODATARELRO
Assemble func(*Link, *LSym)
Follow func(*Link, *LSym)
Progedit func(*Link, *Prog)
- UnaryDst map[int]bool // Instruction takes one operand, a destination.
+ UnaryDst map[As]bool // Instruction takes one operand, a destination.
Minlc int
Ptrsize int
Regsize int
ADIVU
ADIVW
AGOK
+ ALUI
AMOVB
AMOVBU
AMOVD
"DIVU",
"DIVW",
"GOK",
+ "LUI",
"MOVB",
"MOVBU",
"MOVD",
)
type Optab struct {
- as int16
+ as obj.As
a1 uint8
a2 uint8
a3 uint8
{obj.AXXX, C_NONE, C_NONE, C_NONE, 0, 4, 0},
}
-type Oprang struct {
- start []Optab
- stop []Optab
-}
-
-var oprange [ALAST & obj.AMask]Oprang
+var oprange [ALAST & obj.AMask][]Optab
-var xcmp [C_NCLASS][C_NCLASS]uint8
+var xcmp [C_NCLASS][C_NCLASS]bool
func span0(ctxt *obj.Link, cursym *obj.LSym) {
p := cursym.Text
ctxt.Cursym = cursym
ctxt.Autosize = int32(p.To.Offset + 8)
- if oprange[AOR&obj.AMask].start == nil {
+ if oprange[AOR&obj.AMask] == nil {
buildop(ctxt)
}
}
func oplook(ctxt *obj.Link, p *obj.Prog) *Optab {
- if oprange[AOR&obj.AMask].start == nil {
+ if oprange[AOR&obj.AMask] == nil {
buildop(ctxt)
}
}
//print("oplook %P %d %d %d\n", p, a1, a2, a3);
- r0 := p.As & obj.AMask
- o := oprange[r0].start
- if o == nil {
- o = oprange[r0].stop /* just generate an error */
- }
- e := oprange[r0].stop
- c1 := xcmp[a1][:]
- c3 := xcmp[a3][:]
- for ; -cap(o) < -cap(e); o = o[1:] {
- if int(o[0].a2) == a2 {
- if c1[o[0].a1] != 0 {
- if c3[o[0].a3] != 0 {
- p.Optab = uint16((-cap(o) + cap(optab)) + 1)
- return &o[0]
- }
- }
+ ops := oprange[p.As&obj.AMask]
+ c1 := &xcmp[a1]
+ c3 := &xcmp[a3]
+ for i := range ops {
+ op := &ops[i]
+ if int(op.a2) == a2 && c1[op.a1] && c3[op.a3] {
+ p.Optab = uint16(cap(optab) - cap(ops) + i + 1)
+ return op
}
}
- ctxt.Diag("illegal combination %v %v %v %v", obj.Aconv(int(p.As)), DRconv(a1), DRconv(a2), DRconv(a3))
+ ctxt.Diag("illegal combination %v %v %v %v", obj.Aconv(p.As), DRconv(a1), DRconv(a2), DRconv(a3))
prasm(p)
- if o == nil {
- o = optab
+ if ops == nil {
+ ops = optab
}
- return &o[0]
+ return &ops[0]
}
func cmp(a int, b int) bool {
}
return false
}
-func opset(a, b0 int16) {
+func opset(a, b0 obj.As) {
oprange[a&obj.AMask] = oprange[b0]
}
for i := 0; i < C_NCLASS; i++ {
for n = 0; n < C_NCLASS; n++ {
if cmp(n, i) {
- xcmp[i][n] = 1
+ xcmp[i][n] = true
}
}
}
for i := 0; i < n; i++ {
r := optab[i].as
r0 := r & obj.AMask
- oprange[r0].start = optab[i:]
+ start := i
for optab[i].as == r {
i++
}
- oprange[r0].stop = optab[i:]
+ oprange[r0] = optab[start:i]
i--
switch r {
default:
- ctxt.Diag("unknown op in build: %v", obj.Aconv(int(r)))
+ ctxt.Diag("unknown op in build: %v", obj.Aconv(r))
log.Fatalf("bad code")
case AABSF:
if r == 0 {
r = int(p.To.Reg)
}
- o1 = OP_RRR(oprrr(ctxt, int(p.As)), uint32(p.From.Reg), uint32(r), uint32(p.To.Reg))
+ o1 = OP_RRR(oprrr(ctxt, p.As), uint32(p.From.Reg), uint32(r), uint32(p.To.Reg))
case 3: /* mov $soreg, r ==> or/add $i,o,r */
v := regoff(ctxt, &p.From)
r = int(p.To.Reg)
}
- o1 = OP_IRR(opirr(ctxt, int(p.As)), uint32(v), uint32(r), uint32(p.To.Reg))
+ o1 = OP_IRR(opirr(ctxt, p.As), uint32(v), uint32(r), uint32(p.To.Reg))
case 5: /* syscall */
- o1 = uint32(oprrr(ctxt, int(p.As)))
+ o1 = uint32(oprrr(ctxt, p.As))
case 6: /* beq r1,[r2],sbra */
v := int32(0)
if (v<<16)>>16 != v {
ctxt.Diag("short branch too far\n%v", p)
}
- o1 = OP_IRR(opirr(ctxt, int(p.As)), uint32(v), uint32(p.From.Reg), uint32(p.Reg))
+ o1 = OP_IRR(opirr(ctxt, p.As), uint32(v), uint32(p.From.Reg), uint32(p.Reg))
// for ABFPT and ABFPF only: always fill delay slot with 0
// see comments in func preprocess for details.
o2 = 0
r = int(o.param)
}
v := regoff(ctxt, &p.To)
- o1 = OP_IRR(opirr(ctxt, int(p.As)), uint32(v), uint32(r), uint32(p.From.Reg))
+ o1 = OP_IRR(opirr(ctxt, p.As), uint32(v), uint32(r), uint32(p.From.Reg))
case 8: /* mov soreg, r ==> lw o(r) */
r := int(p.From.Reg)
r = int(o.param)
}
v := regoff(ctxt, &p.From)
- o1 = OP_IRR(opirr(ctxt, int(p.As)+ALAST), uint32(v), uint32(r), uint32(p.To.Reg))
+ o1 = OP_IRR(opirr(ctxt, -p.As), uint32(v), uint32(r), uint32(p.To.Reg))
case 9: /* sll r1,[r2],r3 */
r := int(p.Reg)
if r == 0 {
r = int(p.To.Reg)
}
- o1 = OP_RRR(oprrr(ctxt, int(p.As)), uint32(r), uint32(p.From.Reg), uint32(p.To.Reg))
+ o1 = OP_RRR(oprrr(ctxt, p.As), uint32(r), uint32(p.From.Reg), uint32(p.To.Reg))
case 10: /* add $con,[r1],r2 ==> mov $con, t; add t,[r1],r2 */
v := regoff(ctxt, &p.From)
if r == 0 {
r = int(p.To.Reg)
}
- o2 = OP_RRR(oprrr(ctxt, int(p.As)), uint32(REGTMP), uint32(r), uint32(p.To.Reg))
+ o2 = OP_RRR(oprrr(ctxt, p.As), uint32(REGTMP), uint32(r), uint32(p.To.Reg))
case 11: /* jmp lbra */
v := int32(0)
} else {
v = int32(p.Pcond.Pc) >> 2
}
- o1 = OP_JMP(opirr(ctxt, int(p.As)), uint32(v))
+ o1 = OP_JMP(opirr(ctxt, p.As), uint32(v))
if p.To.Sym == nil {
p.To.Sym = ctxt.Cursym.Text.From.Sym
p.To.Offset = p.Pcond.Pc
}
case 14: /* movwu r,r */
- o1 = OP_SRR(opirr(ctxt, ASLLV+ALAST), uint32(0), uint32(p.From.Reg), uint32(p.To.Reg))
+ o1 = OP_SRR(opirr(ctxt, -ASLLV), uint32(0), uint32(p.From.Reg), uint32(p.To.Reg))
if p.As == AMOVWU {
- o2 = OP_SRR(opirr(ctxt, ASRLV+ALAST), uint32(0), uint32(p.To.Reg), uint32(p.To.Reg))
+ o2 = OP_SRR(opirr(ctxt, -ASRLV), uint32(0), uint32(p.To.Reg), uint32(p.To.Reg))
} else {
- o2 = OP_SRR(opirr(ctxt, ASRAV+ALAST), uint32(0), uint32(p.To.Reg), uint32(p.To.Reg))
+ o2 = OP_SRR(opirr(ctxt, -ASRAV), uint32(0), uint32(p.To.Reg), uint32(p.To.Reg))
}
case 16: /* sll $c,[r1],r2 */
/* OP_SRR will use only the low 5 bits of the shift value */
if v >= 32 && vshift(p.As) {
- o1 = OP_SRR(opirr(ctxt, int(p.As)+ALAST), uint32(v-32), uint32(r), uint32(p.To.Reg))
+ o1 = OP_SRR(opirr(ctxt, -p.As), uint32(v-32), uint32(r), uint32(p.To.Reg))
} else {
- o1 = OP_SRR(opirr(ctxt, int(p.As)), uint32(v), uint32(r), uint32(p.To.Reg))
+ o1 = OP_SRR(opirr(ctxt, p.As), uint32(v), uint32(r), uint32(p.To.Reg))
}
case 18: /* jmp [r1],0(r2) */
if r == 0 {
r = int(o.param)
}
- o1 = OP_RRR(oprrr(ctxt, int(p.As)), uint32(0), uint32(p.To.Reg), uint32(r))
+ o1 = OP_RRR(oprrr(ctxt, p.As), uint32(0), uint32(p.To.Reg), uint32(r))
rel := obj.Addrel(ctxt.Cursym)
rel.Off = int32(ctxt.Pc)
rel.Siz = 0
case 19: /* mov $lcon,r ==> lu+or */
v := regoff(ctxt, &p.From)
- o1 = OP_IRR(opirr(ctxt, ALAST), uint32(v>>16), uint32(REGZERO), uint32(p.To.Reg))
+ o1 = OP_IRR(opirr(ctxt, ALUI), uint32(v>>16), uint32(REGZERO), uint32(p.To.Reg))
o2 = OP_IRR(opirr(ctxt, AOR), uint32(v), uint32(p.To.Reg), uint32(p.To.Reg))
if p.From.Sym != nil {
rel := obj.Addrel(ctxt.Cursym)
o1 = OP_RRR(a, uint32(REGZERO), uint32(p.From.Reg), uint32(REGZERO))
case 22: /* mul r1,r2 */
- o1 = OP_RRR(oprrr(ctxt, int(p.As)), uint32(p.From.Reg), uint32(p.Reg), uint32(REGZERO))
+ o1 = OP_RRR(oprrr(ctxt, p.As), uint32(p.From.Reg), uint32(p.Reg), uint32(REGZERO))
case 23: /* add $lcon,r1,r2 ==> lu+or+add */
v := regoff(ctxt, &p.From)
- o1 = OP_IRR(opirr(ctxt, ALAST), uint32(v>>16), uint32(REGZERO), uint32(REGTMP))
+ o1 = OP_IRR(opirr(ctxt, ALUI), uint32(v>>16), uint32(REGZERO), uint32(REGTMP))
o2 = OP_IRR(opirr(ctxt, AOR), uint32(v), uint32(REGTMP), uint32(REGTMP))
r := int(p.Reg)
if r == 0 {
r = int(p.To.Reg)
}
- o3 = OP_RRR(oprrr(ctxt, int(p.As)), uint32(REGTMP), uint32(r), uint32(p.To.Reg))
+ o3 = OP_RRR(oprrr(ctxt, p.As), uint32(REGTMP), uint32(r), uint32(p.To.Reg))
case 24: /* mov $ucon,r ==> lu r */
v := regoff(ctxt, &p.From)
- o1 = OP_IRR(opirr(ctxt, ALAST), uint32(v>>16), uint32(REGZERO), uint32(p.To.Reg))
+ o1 = OP_IRR(opirr(ctxt, ALUI), uint32(v>>16), uint32(REGZERO), uint32(p.To.Reg))
case 25: /* add/and $ucon,[r1],r2 ==> lu $con,t; add t,[r1],r2 */
v := regoff(ctxt, &p.From)
- o1 = OP_IRR(opirr(ctxt, ALAST), uint32(v>>16), uint32(REGZERO), uint32(REGTMP))
+ o1 = OP_IRR(opirr(ctxt, ALUI), uint32(v>>16), uint32(REGZERO), uint32(REGTMP))
r := int(p.Reg)
if r == 0 {
r = int(p.To.Reg)
}
- o2 = OP_RRR(oprrr(ctxt, int(p.As)), uint32(REGTMP), uint32(r), uint32(p.To.Reg))
+ o2 = OP_RRR(oprrr(ctxt, p.As), uint32(REGTMP), uint32(r), uint32(p.To.Reg))
case 26: /* mov $lsext/auto/oreg,r ==> lu+or+add */
v := regoff(ctxt, &p.From)
- o1 = OP_IRR(opirr(ctxt, ALAST), uint32(v>>16), uint32(REGZERO), uint32(REGTMP))
+ o1 = OP_IRR(opirr(ctxt, ALUI), uint32(v>>16), uint32(REGZERO), uint32(REGTMP))
o2 = OP_IRR(opirr(ctxt, AOR), uint32(v), uint32(REGTMP), uint32(REGTMP))
r := int(p.From.Reg)
if r == 0 {
if r == 0 {
r = int(o.param)
}
- a := AMOVF + ALAST
+ a := -AMOVF
if p.As == AMOVD {
- a = AMOVD + ALAST
+ a = -AMOVD
}
switch o.size {
case 16:
- o1 = OP_IRR(opirr(ctxt, ALAST), uint32(v>>16), uint32(REGZERO), uint32(REGTMP))
+ o1 = OP_IRR(opirr(ctxt, ALUI), uint32(v>>16), uint32(REGZERO), uint32(REGTMP))
o2 = OP_IRR(opirr(ctxt, AOR), uint32(v), uint32(REGTMP), uint32(REGTMP))
o3 = OP_RRR(opirr(ctxt, AADDVU), uint32(r), uint32(REGTMP), uint32(REGTMP))
o4 = OP_IRR(opirr(ctxt, a), uint32(0), uint32(r), uint32(p.To.Reg))
}
switch o.size {
case 16:
- o1 = OP_IRR(opirr(ctxt, ALAST), uint32(v>>16), uint32(REGZERO), uint32(REGTMP))
+ o1 = OP_IRR(opirr(ctxt, ALUI), uint32(v>>16), uint32(REGZERO), uint32(REGTMP))
o2 = OP_IRR(opirr(ctxt, AOR), uint32(v), uint32(REGTMP), uint32(REGTMP))
o3 = OP_RRR(opirr(ctxt, AADDVU), uint32(r), uint32(REGTMP), uint32(REGTMP))
o4 = OP_IRR(opirr(ctxt, a), uint32(0), uint32(REGTMP), uint32(p.From.Reg))
if r == 0 {
r = int(p.To.Reg)
}
- o1 = OP_FRRR(oprrr(ctxt, int(p.As)), uint32(p.From.Reg), uint32(r), uint32(p.To.Reg))
+ o1 = OP_FRRR(oprrr(ctxt, p.As), uint32(p.From.Reg), uint32(r), uint32(p.To.Reg))
case 33: /* fabs fr1, fr3 */
- o1 = OP_FRRR(oprrr(ctxt, int(p.As)), uint32(0), uint32(p.From.Reg), uint32(p.To.Reg))
+ o1 = OP_FRRR(oprrr(ctxt, p.As), uint32(0), uint32(p.From.Reg), uint32(p.To.Reg))
case 34: /* mov $con,fr ==> or/add $i,t; mov t,fr */
v := regoff(ctxt, &p.From)
if r == 0 {
r = int(o.param)
}
- o1 = OP_IRR(opirr(ctxt, ALAST), uint32(v>>16), uint32(REGZERO), uint32(REGTMP))
+ o1 = OP_IRR(opirr(ctxt, ALUI), uint32(v>>16), uint32(REGZERO), uint32(REGTMP))
o2 = OP_IRR(opirr(ctxt, AOR), uint32(v), uint32(REGTMP), uint32(REGTMP))
o3 = OP_RRR(oprrr(ctxt, AADDVU), uint32(r), uint32(REGTMP), uint32(REGTMP))
- o4 = OP_IRR(opirr(ctxt, int(p.As)), uint32(0), uint32(REGTMP), uint32(p.From.Reg))
+ o4 = OP_IRR(opirr(ctxt, p.As), uint32(0), uint32(REGTMP), uint32(p.From.Reg))
case 36: /* mov lext/auto/oreg,r ==> lw o(r30) */
v := regoff(ctxt, &p.From)
if r == 0 {
r = int(o.param)
}
- o1 = OP_IRR(opirr(ctxt, ALAST), uint32(v>>16), uint32(REGZERO), uint32(REGTMP))
+ o1 = OP_IRR(opirr(ctxt, ALUI), uint32(v>>16), uint32(REGZERO), uint32(REGTMP))
o2 = OP_IRR(opirr(ctxt, AOR), uint32(v), uint32(REGTMP), uint32(REGTMP))
o3 = OP_RRR(oprrr(ctxt, AADDVU), uint32(r), uint32(REGTMP), uint32(REGTMP))
- o4 = OP_IRR(opirr(ctxt, int(p.As)+ALAST), uint32(0), uint32(REGTMP), uint32(p.To.Reg))
+ o4 = OP_IRR(opirr(ctxt, -p.As), uint32(0), uint32(REGTMP), uint32(p.To.Reg))
case 37: /* movw r,mr */
a := SP(2, 0) | (4 << 21) /* mtc0 */
/* relocation operations */
case 50: /* mov r,addr ==> lu + or + sw (REGTMP) */
- o1 = OP_IRR(opirr(ctxt, ALAST), uint32(0), uint32(REGZERO), uint32(REGTMP))
+ o1 = OP_IRR(opirr(ctxt, ALUI), uint32(0), uint32(REGZERO), uint32(REGTMP))
o2 = OP_IRR(opirr(ctxt, AOR), uint32(0), uint32(REGTMP), uint32(REGTMP))
rel := obj.Addrel(ctxt.Cursym)
rel.Off = int32(ctxt.Pc)
rel.Sym = p.To.Sym
rel.Add = p.To.Offset
rel.Type = obj.R_ADDRMIPS
- o3 = OP_IRR(opirr(ctxt, int(p.As)), uint32(0), uint32(REGTMP), uint32(p.From.Reg))
+ o3 = OP_IRR(opirr(ctxt, p.As), uint32(0), uint32(REGTMP), uint32(p.From.Reg))
case 51: /* mov addr,r ==> lu + or + lw (REGTMP) */
- o1 = OP_IRR(opirr(ctxt, ALAST), uint32(0), uint32(REGZERO), uint32(REGTMP))
+ o1 = OP_IRR(opirr(ctxt, ALUI), uint32(0), uint32(REGZERO), uint32(REGTMP))
o2 = OP_IRR(opirr(ctxt, AOR), uint32(0), uint32(REGTMP), uint32(REGTMP))
rel := obj.Addrel(ctxt.Cursym)
rel.Off = int32(ctxt.Pc)
rel.Sym = p.From.Sym
rel.Add = p.From.Offset
rel.Type = obj.R_ADDRMIPS
- o3 = OP_IRR(opirr(ctxt, int(p.As)+ALAST), uint32(0), uint32(REGTMP), uint32(p.To.Reg))
+ o3 = OP_IRR(opirr(ctxt, -p.As), uint32(0), uint32(REGTMP), uint32(p.To.Reg))
}
out[0] = o1
return int32(vregoff(ctxt, a))
}
-func oprrr(ctxt *obj.Link, a int) uint32 {
+func oprrr(ctxt *obj.Link, a obj.As) uint32 {
switch a {
case AADD:
return OP(4, 0)
return FPD(7, 6)
}
- if a >= ALAST {
- ctxt.Diag("bad rrr opcode %v+ALAST", obj.Aconv(a-ALAST))
+ if a < 0 {
+ ctxt.Diag("bad rrr opcode -%v", obj.Aconv(-a))
} else {
ctxt.Diag("bad rrr opcode %v", obj.Aconv(a))
}
return 0
}
-func opirr(ctxt *obj.Link, a int) uint32 {
+func opirr(ctxt *obj.Link, a obj.As) uint32 {
switch a {
case AADD:
return SP(1, 0)
return SP(1, 5)
case AXOR:
return SP(1, 6)
- case ALAST:
- return SP(1, 7) /* lui */
+ case ALUI:
+ return SP(1, 7)
case ASLL:
return OP(0, 0)
case ASRL:
return SP(0, 3)
case ABEQ:
return SP(0, 4)
- case ABEQ + ALAST:
+ case -ABEQ:
return SP(2, 4) /* likely */
case ABNE:
return SP(0, 5)
- case ABNE + ALAST:
+ case -ABNE:
return SP(2, 5) /* likely */
case ABGEZ:
return SP(0, 1) | BCOND(0, 1)
- case ABGEZ + ALAST:
+ case -ABGEZ:
return SP(0, 1) | BCOND(0, 3) /* likely */
case ABGEZAL:
return SP(0, 1) | BCOND(2, 1)
- case ABGEZAL + ALAST:
+ case -ABGEZAL:
return SP(0, 1) | BCOND(2, 3) /* likely */
case ABGTZ:
return SP(0, 7)
- case ABGTZ + ALAST:
+ case -ABGTZ:
return SP(2, 7) /* likely */
case ABLEZ:
return SP(0, 6)
- case ABLEZ + ALAST:
+ case -ABLEZ:
return SP(2, 6) /* likely */
case ABLTZ:
return SP(0, 1) | BCOND(0, 0)
- case ABLTZ + ALAST:
+ case -ABLTZ:
return SP(0, 1) | BCOND(0, 2) /* likely */
case ABLTZAL:
return SP(0, 1) | BCOND(2, 0)
- case ABLTZAL + ALAST:
+ case -ABLTZAL:
return SP(0, 1) | BCOND(2, 2) /* likely */
case ABFPT:
return SP(2, 1) | (257 << 16)
- case ABFPT + ALAST:
+ case -ABFPT:
return SP(2, 1) | (259 << 16) /* likely */
case ABFPF:
return SP(2, 1) | (256 << 16)
- case ABFPF + ALAST:
+ case -ABFPF:
return SP(2, 1) | (258 << 16) /* likely */
case AMOVB,
case ABREAK:
return SP(5, 7)
- case AMOVWL + ALAST:
+ case -AMOVWL:
return SP(4, 2)
- case AMOVWR + ALAST:
+ case -AMOVWR:
return SP(4, 6)
- case AMOVVL + ALAST:
+ case -AMOVVL:
return SP(3, 2)
- case AMOVVR + ALAST:
+ case -AMOVVR:
return SP(3, 3)
- case AMOVB + ALAST:
+ case -AMOVB:
return SP(4, 0)
- case AMOVBU + ALAST:
+ case -AMOVBU:
return SP(4, 4)
- case AMOVH + ALAST:
+ case -AMOVH:
return SP(4, 1)
- case AMOVHU + ALAST:
+ case -AMOVHU:
return SP(4, 5)
- case AMOVW + ALAST:
+ case -AMOVW:
return SP(4, 3)
- case AMOVWU + ALAST:
+ case -AMOVWU:
return SP(4, 7)
- case AMOVV + ALAST:
+ case -AMOVV:
return SP(6, 7)
- case AMOVF + ALAST:
+ case -AMOVF:
return SP(6, 1)
- case AMOVD + ALAST:
+ case -AMOVD:
return SP(6, 5)
case ASLLV:
return OP(7, 2)
case ASRAV:
return OP(7, 3)
- case ASLLV + ALAST:
+ case -ASLLV:
return OP(7, 4)
- case ASRLV + ALAST:
+ case -ASRLV:
return OP(7, 6)
- case ASRAV + ALAST:
+ case -ASRAV:
return OP(7, 7)
}
- if a >= ALAST {
- ctxt.Diag("bad irr opcode %v+ALAST", obj.Aconv(a-ALAST))
+ if a < 0 {
+ ctxt.Diag("bad irr opcode -%v", obj.Aconv(-a))
} else {
ctxt.Diag("bad irr opcode %v", obj.Aconv(a))
}
return 0
}
-func vshift(a int16) bool {
+func vshift(a obj.As) bool {
switch a {
case ASLLV,
ASRLV,
}
autosize := int32(0)
- var o int
var p1 *obj.Prog
var p2 *obj.Prog
for p := cursym.Text; p != nil; p = p.Link {
- o = int(p.As)
+ o := p.As
switch o {
case obj.ATEXT:
autosize = int32(textstksiz + 8)
// instruction scheduling
q = nil // p - 1
q1 = cursym.Text // top of block
- o = 0 // count of instructions
+ o := 0 // count of instructions
for p = cursym.Text; p != nil; p = p1 {
p1 = p.Link
o++
func xfol(ctxt *obj.Link, p *obj.Prog, last **obj.Prog) {
var q *obj.Prog
var r *obj.Prog
- var a int
var i int
loop:
if p == nil {
return
}
- a = int(p.As)
+ a := p.As
if a == AJMP {
q = p.Pcond
if (p.Mark&NOSCHED != 0) || q != nil && (q.Mark&NOSCHED != 0) {
if q == *last || (q.Mark&NOSCHED != 0) {
break
}
- a = int(q.As)
+ a = q.As
if a == obj.ANOP {
i--
continue
a = AJMP
q = ctxt.NewProg()
- q.As = int16(a)
+ q.As = a
q.Lineno = p.Lineno
q.To.Type = obj.TYPE_BRANCH
q.To.Offset = p.Pc
)
type Optab struct {
- as int16
+ as obj.As
a1 uint8
a2 uint8
a3 uint8
{obj.AXXX, C_NONE, C_NONE, C_NONE, C_NONE, 0, 4, 0},
}
-type Oprang struct {
- start []Optab
- stop []Optab
-}
-
-var oprange [ALAST & obj.AMask]Oprang
+var oprange [ALAST & obj.AMask][]Optab
-var xcmp [C_NCLASS][C_NCLASS]uint8
+var xcmp [C_NCLASS][C_NCLASS]bool
func span9(ctxt *obj.Link, cursym *obj.LSym) {
p := cursym.Text
ctxt.Cursym = cursym
ctxt.Autosize = int32(p.To.Offset)
- if oprange[AANDN&obj.AMask].start == nil {
+ if oprange[AANDN&obj.AMask] == nil {
buildop(ctxt)
}
case obj.TYPE_CONST,
obj.TYPE_ADDR:
switch a.Name {
- case obj.TYPE_NONE:
+ case obj.NAME_NONE:
ctxt.Instoffset = a.Offset
if a.Reg != 0 {
if -BIG <= ctxt.Instoffset && ctxt.Instoffset <= BIG {
func oplook(ctxt *obj.Link, p *obj.Prog) *Optab {
a1 := int(p.Optab)
if a1 != 0 {
- return &optab[a1-1:][0]
+ return &optab[a1-1]
}
a1 = int(p.From.Class)
if a1 == 0 {
}
//print("oplook %v %d %d %d %d\n", p, a1, a2, a3, a4);
- r0 := p.As & obj.AMask
-
- o := oprange[r0].start
- if o == nil {
- o = oprange[r0].stop /* just generate an error */
- }
- e := oprange[r0].stop
- c1 := xcmp[a1][:]
- c3 := xcmp[a3][:]
- c4 := xcmp[a4][:]
- for ; -cap(o) < -cap(e); o = o[1:] {
- if int(o[0].a2) == a2 {
- if c1[o[0].a1] != 0 {
- if c3[o[0].a3] != 0 {
- if c4[o[0].a4] != 0 {
- p.Optab = uint16((-cap(o) + cap(optab)) + 1)
- return &o[0]
- }
- }
- }
+ ops := oprange[p.As&obj.AMask]
+ c1 := &xcmp[a1]
+ c3 := &xcmp[a3]
+ c4 := &xcmp[a4]
+ for i := range ops {
+ op := &ops[i]
+ if int(op.a2) == a2 && c1[op.a1] && c3[op.a3] && c4[op.a4] {
+ p.Optab = uint16(cap(optab) - cap(ops) + i + 1)
+ return op
}
}
- ctxt.Diag("illegal combination %v %v %v %v %v", obj.Aconv(int(p.As)), DRconv(a1), DRconv(a2), DRconv(a3), DRconv(a4))
+ ctxt.Diag("illegal combination %v %v %v %v %v", obj.Aconv(p.As), DRconv(a1), DRconv(a2), DRconv(a3), DRconv(a4))
prasm(p)
- if o == nil {
- o = optab
+ if ops == nil {
+ ops = optab
}
- return &o[0]
+ return &ops[0]
}
func cmp(a int, b int) bool {
}
return false
}
-func opset(a, b0 int16) {
+func opset(a, b0 obj.As) {
oprange[a&obj.AMask] = oprange[b0]
}
for i := 0; i < C_NCLASS; i++ {
for n = 0; n < C_NCLASS; n++ {
if cmp(n, i) {
- xcmp[i][n] = 1
+ xcmp[i][n] = true
}
}
}
for i := 0; i < n; i++ {
r := optab[i].as
r0 := r & obj.AMask
- oprange[r0].start = optab[i:]
+ start := i
for optab[i].as == r {
i++
}
- oprange[r0].stop = optab[i:]
+ oprange[r0] = optab[start:i]
i--
switch r {
default:
- ctxt.Diag("unknown op in build: %v", obj.Aconv(int(r)))
+ ctxt.Diag("unknown op in build: %v", obj.Aconv(r))
log.Fatalf("bad code")
case ADCBF: /* unary indexed: op (b+a); op (b) */
// opform returns the form (D_FORM or DS_FORM) of an instruction. Used to decide on
// which relocation to use with a load or store and only supports the needed
// instructions.
-func opform(ctxt *obj.Link, insn int32) int {
+func opform(ctxt *obj.Link, insn uint32) int {
switch uint32(insn) {
default:
ctxt.Diag("bad insn in loadform: %x", insn)
// Encode instructions and create relocation for accessing s+d according to the
// instruction op with source or destination (as appropriate) register reg.
-func symbolAccess(ctxt *obj.Link, s *obj.LSym, d int64, reg int16, op int32) (o1, o2 uint32) {
+func symbolAccess(ctxt *obj.Link, s *obj.LSym, d int64, reg int16, op uint32) (o1, o2 uint32) {
var base uint32
form := opform(ctxt, op)
if ctxt.Flag_shared != 0 {
base = REG_R0
}
o1 = AOP_IRR(OP_ADDIS, REGTMP, base, 0)
- o2 = AOP_IRR(uint32(op), uint32(reg), REGTMP, 0)
+ o2 = AOP_IRR(op, uint32(reg), REGTMP, 0)
rel := obj.Addrel(ctxt.Cursym)
rel.Off = int32(ctxt.Pc)
rel.Siz = 8
if r == 0 {
r = int(p.To.Reg)
}
- o1 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
+ o1 = AOP_RRR(oprrr(ctxt, p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
case 3: /* mov $soreg/addcon/ucon, r ==> addis/addi $i,reg',r */
d := vregoff(ctxt, &p.From)
if int32(int16(v)) != v {
log.Fatalf("mishandled instruction %v", p)
}
- o1 = AOP_IRR(uint32(opirr(ctxt, int(p.As))), uint32(p.To.Reg), uint32(r), uint32(v))
+ o1 = AOP_IRR(opirr(ctxt, p.As), uint32(p.To.Reg), uint32(r), uint32(v))
case 5: /* syscall */
- o1 = uint32(oprrr(ctxt, int(p.As)))
+ o1 = oprrr(ctxt, p.As)
case 6: /* logical op Rb,[Rs,]Ra; no literal */
r := int(p.Reg)
if r == 0 {
r = int(p.To.Reg)
}
- o1 = LOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
+ o1 = LOP_RRR(oprrr(ctxt, p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
case 7: /* mov r, soreg ==> stw o(r) */
r := int(p.To.Reg)
rel.Sym = obj.Linklookup(ctxt, "runtime.tls_g", 0)
rel.Type = obj.R_POWER_TLS
}
- o1 = AOP_RRR(uint32(opstorex(ctxt, int(p.As))), uint32(p.From.Reg), uint32(p.To.Index), uint32(r))
+ o1 = AOP_RRR(opstorex(ctxt, p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(r))
} else {
if int32(int16(v)) != v {
log.Fatalf("mishandled instruction %v", p)
}
- o1 = AOP_IRR(uint32(opstore(ctxt, int(p.As))), uint32(p.From.Reg), uint32(r), uint32(v))
+ o1 = AOP_IRR(opstore(ctxt, p.As), uint32(p.From.Reg), uint32(r), uint32(v))
}
case 8: /* mov soreg, r ==> lbz/lhz/lwz o(r) */
rel.Sym = obj.Linklookup(ctxt, "runtime.tls_g", 0)
rel.Type = obj.R_POWER_TLS
}
- o1 = AOP_RRR(uint32(oploadx(ctxt, int(p.As))), uint32(p.To.Reg), uint32(p.From.Index), uint32(r))
+ o1 = AOP_RRR(oploadx(ctxt, p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(r))
} else {
if int32(int16(v)) != v {
log.Fatalf("mishandled instruction %v", p)
}
- o1 = AOP_IRR(uint32(opload(ctxt, int(p.As))), uint32(p.To.Reg), uint32(r), uint32(v))
+ o1 = AOP_IRR(opload(ctxt, p.As), uint32(p.To.Reg), uint32(r), uint32(v))
}
case 9: /* movb soreg, r ==> lbz o(r),r2; extsb r2,r2 */
if v != 0 {
ctxt.Diag("illegal indexed instruction\n%v", p)
}
- o1 = AOP_RRR(uint32(oploadx(ctxt, int(p.As))), uint32(p.To.Reg), uint32(p.From.Index), uint32(r))
+ o1 = AOP_RRR(oploadx(ctxt, p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(r))
} else {
- o1 = AOP_IRR(uint32(opload(ctxt, int(p.As))), uint32(p.To.Reg), uint32(r), uint32(v))
+ o1 = AOP_IRR(opload(ctxt, p.As), uint32(p.To.Reg), uint32(r), uint32(v))
}
o2 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
if r == 0 {
r = int(p.To.Reg)
}
- o1 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(p.To.Reg), uint32(p.From.Reg), uint32(r))
+ o1 = AOP_RRR(oprrr(ctxt, p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(r))
case 11: /* br/bl lbra */
v := int32(0)
}
}
- o1 = OP_BR(uint32(opirr(ctxt, int(p.As))), uint32(v), 0)
+ o1 = OP_BR(opirr(ctxt, p.As), uint32(v), 0)
if p.To.Sym != nil {
rel := obj.Addrel(ctxt.Cursym)
rel.Off = int32(ctxt.Pc)
a = 0
}
- o1 = LOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
+ o1 = LOP_RRR(oprrr(ctxt, p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
o1 |= (uint32(a) & 31) << 6
if a&0x20 != 0 {
o1 |= 1 << 5 /* mb[5] is top bit */
if v < -(1<<16) || v >= 1<<15 {
ctxt.Diag("branch too far\n%v", p)
}
- o1 = OP_BC(uint32(opirr(ctxt, int(p.As))), uint32(a), uint32(r), uint32(v), 0)
+ o1 = OP_BC(opirr(ctxt, p.As), uint32(a), uint32(r), uint32(v), 0)
case 15: /* br/bl (r) => mov r,lr; br/bl (lr) */
var v int32
if p.As == AADD && (r0iszero == 0 /*TypeKind(100016)*/ && p.Reg == 0 || r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0) {
ctxt.Diag("literal operation on R0\n%v", p)
}
- o1 = AOP_IRR(uint32(opirr(ctxt, int(p.As)+ALAST)), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
+ o1 = AOP_IRR(opirr(ctxt, -p.As), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
case 22: /* add $lcon,r1,r2 ==> cau+or+add */ /* could do add/sub more efficiently */
if p.To.Reg == REGTMP || p.Reg == REGTMP {
if r == 0 {
r = int(p.To.Reg)
}
- o3 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(p.To.Reg), REGTMP, uint32(r))
+ o3 = AOP_RRR(oprrr(ctxt, p.As), uint32(p.To.Reg), REGTMP, uint32(r))
if p.From.Sym != nil {
ctxt.Diag("%v is not supported", p)
}
if r == 0 {
r = int(p.To.Reg)
}
- o3 = LOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(p.To.Reg), REGTMP, uint32(r))
+ o3 = LOP_RRR(oprrr(ctxt, p.As), uint32(p.To.Reg), REGTMP, uint32(r))
if p.From.Sym != nil {
ctxt.Diag("%v is not supported", p)
}
v := regoff(ctxt, p.From3)
r := int(p.From.Reg)
- o1 = AOP_IRR(uint32(opirr(ctxt, int(p.As))), uint32(p.To.Reg), uint32(r), uint32(v))
+ o1 = AOP_IRR(opirr(ctxt, p.As), uint32(p.To.Reg), uint32(r), uint32(v))
case 28: /* subc r1,$lcon,r2 ==> cau+or+subfc */
if p.To.Reg == REGTMP || p.From.Reg == REGTMP {
v := regoff(ctxt, p.From3)
o1 = AOP_IRR(OP_ADDIS, REGTMP, REGZERO, uint32(v)>>16)
o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(v))
- o3 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(p.To.Reg), uint32(p.From.Reg), REGTMP)
+ o3 = AOP_RRR(oprrr(ctxt, p.As), uint32(p.To.Reg), uint32(p.From.Reg), REGTMP)
if p.From.Sym != nil {
ctxt.Diag("%v is not supported", p)
}
a = 0
}
- o1 = AOP_RRR(uint32(opirr(ctxt, int(p.As))), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
+ o1 = AOP_RRR(opirr(ctxt, p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
o1 |= (uint32(a) & 31) << 6
if v&0x20 != 0 {
o1 |= 1 << 1
if int32(mask[1]) != (63 - v) {
ctxt.Diag("invalid mask for shift: %x (shift %d)\n%v", uint64(d), v, p)
}
- o1 = AOP_RRR(uint32(opirr(ctxt, int(p.As))), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
+ o1 = AOP_RRR(opirr(ctxt, p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
o1 |= (uint32(mask[0]) & 31) << 6
if v&0x20 != 0 {
o1 |= 1 << 1
if r == 0 {
r = int(p.To.Reg)
}
- o1 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(p.To.Reg), uint32(r), 0) | (uint32(p.From.Reg)&31)<<6
+ o1 = AOP_RRR(oprrr(ctxt, p.As), uint32(p.To.Reg), uint32(r), 0) | (uint32(p.From.Reg)&31)<<6
case 33: /* fabs [frb,]frd; fmr. frb,frd */
r := int(p.From.Reg)
if oclass(&p.From) == C_NONE {
r = int(p.To.Reg)
}
- o1 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(p.To.Reg), 0, uint32(r))
+ o1 = AOP_RRR(oprrr(ctxt, p.As), uint32(p.To.Reg), 0, uint32(r))
case 34: /* FMADDx fra,frb,frc,frd (d=a*b+c); FSELx a<0? (d=b): (d=c) */
- o1 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg)) | (uint32(p.From3.Reg)&31)<<6
+ o1 = AOP_RRR(oprrr(ctxt, p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg)) | (uint32(p.From3.Reg)&31)<<6
case 35: /* mov r,lext/lauto/loreg ==> cau $(v>>16),sb,r'; store o(r') */
v := regoff(ctxt, &p.To)
r = int(o.param)
}
o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
- o2 = AOP_IRR(uint32(opstore(ctxt, int(p.As))), uint32(p.From.Reg), REGTMP, uint32(v))
+ o2 = AOP_IRR(opstore(ctxt, p.As), uint32(p.From.Reg), REGTMP, uint32(v))
case 36: /* mov bz/h/hz lext/lauto/lreg,r ==> lbz/lha/lhz etc */
v := regoff(ctxt, &p.From)
r = int(o.param)
}
o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
- o2 = AOP_IRR(uint32(opload(ctxt, int(p.As))), uint32(p.To.Reg), REGTMP, uint32(v))
+ o2 = AOP_IRR(opload(ctxt, p.As), uint32(p.To.Reg), REGTMP, uint32(v))
case 37: /* movb lext/lauto/lreg,r ==> lbz o(reg),r; extsb r */
v := regoff(ctxt, &p.From)
r = int(o.param)
}
o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
- o2 = AOP_IRR(uint32(opload(ctxt, int(p.As))), uint32(p.To.Reg), REGTMP, uint32(v))
+ o2 = AOP_IRR(opload(ctxt, p.As), uint32(p.To.Reg), REGTMP, uint32(v))
o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
case 40: /* word */
o1 = uint32(regoff(ctxt, &p.From))
case 41: /* stswi */
- o1 = AOP_RRR(uint32(opirr(ctxt, int(p.As))), uint32(p.From.Reg), uint32(p.To.Reg), 0) | (uint32(regoff(ctxt, p.From3))&0x7F)<<11
+ o1 = AOP_RRR(opirr(ctxt, p.As), uint32(p.From.Reg), uint32(p.To.Reg), 0) | (uint32(regoff(ctxt, p.From3))&0x7F)<<11
case 42: /* lswi */
- o1 = AOP_RRR(uint32(opirr(ctxt, int(p.As))), uint32(p.To.Reg), uint32(p.From.Reg), 0) | (uint32(regoff(ctxt, p.From3))&0x7F)<<11
+ o1 = AOP_RRR(opirr(ctxt, p.As), uint32(p.To.Reg), uint32(p.From.Reg), 0) | (uint32(regoff(ctxt, p.From3))&0x7F)<<11
case 43: /* unary indexed source: dcbf (b); dcbf (a+b) */
- o1 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), 0, uint32(p.From.Index), uint32(p.From.Reg))
+ o1 = AOP_RRR(oprrr(ctxt, p.As), 0, uint32(p.From.Index), uint32(p.From.Reg))
case 44: /* indexed store */
- o1 = AOP_RRR(uint32(opstorex(ctxt, int(p.As))), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg))
+ o1 = AOP_RRR(opstorex(ctxt, p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg))
case 45: /* indexed load */
- o1 = AOP_RRR(uint32(oploadx(ctxt, int(p.As))), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
+ o1 = AOP_RRR(oploadx(ctxt, p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
case 46: /* plain op */
- o1 = uint32(oprrr(ctxt, int(p.As)))
+ o1 = oprrr(ctxt, p.As)
case 47: /* op Ra, Rd; also op [Ra,] Rd */
r := int(p.From.Reg)
if r == 0 {
r = int(p.To.Reg)
}
- o1 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(p.To.Reg), uint32(r), 0)
+ o1 = AOP_RRR(oprrr(ctxt, p.As), uint32(p.To.Reg), uint32(r), 0)
case 48: /* op Rs, Ra */
r := int(p.From.Reg)
if r == 0 {
r = int(p.To.Reg)
}
- o1 = LOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(p.To.Reg), uint32(r), 0)
+ o1 = LOP_RRR(oprrr(ctxt, p.As), uint32(p.To.Reg), uint32(r), 0)
case 49: /* op Rb; op $n, Rb */
if p.From.Type != obj.TYPE_REG { /* tlbie $L, rB */
v := regoff(ctxt, &p.From) & 1
- o1 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), 0, 0, uint32(p.To.Reg)) | uint32(v)<<21
+ o1 = AOP_RRR(oprrr(ctxt, p.As), 0, 0, uint32(p.To.Reg)) | uint32(v)<<21
} else {
- o1 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), 0, 0, uint32(p.From.Reg))
+ o1 = AOP_RRR(oprrr(ctxt, p.As), 0, 0, uint32(p.From.Reg))
}
case 50: /* rem[u] r1[,r2],r3 */
if r == 0 {
r = int(p.To.Reg)
}
- v := oprrr(ctxt, int(p.As))
+ v := oprrr(ctxt, p.As)
t := v & (1<<10 | 1) /* OE|Rc */
o1 = AOP_RRR(uint32(v)&^uint32(t), REGTMP, uint32(r), uint32(p.From.Reg))
o2 = AOP_RRR(OP_MULLW, REGTMP, REGTMP, uint32(p.From.Reg))
if r == 0 {
r = int(p.To.Reg)
}
- v := oprrr(ctxt, int(p.As))
+ v := oprrr(ctxt, p.As)
t := v & (1<<10 | 1) /* OE|Rc */
o1 = AOP_RRR(uint32(v)&^uint32(t), REGTMP, uint32(r), uint32(p.From.Reg))
o2 = AOP_RRR(OP_MULLD, REGTMP, REGTMP, uint32(p.From.Reg))
case 52: /* mtfsbNx cr(n) */
v := regoff(ctxt, &p.From) & 31
- o1 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(v), 0, 0)
+ o1 = AOP_RRR(oprrr(ctxt, p.As), uint32(v), 0, 0)
case 53: /* mffsX ,fr1 */
o1 = AOP_RRR(OP_MFFS, uint32(p.To.Reg), 0, 0)
}
case 55: /* op Rb, Rd */
- o1 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(p.To.Reg), 0, uint32(p.From.Reg))
+ o1 = AOP_RRR(oprrr(ctxt, p.As), uint32(p.To.Reg), 0, uint32(p.From.Reg))
case 56: /* sra $sh,[s,]a; srd $sh,[s,]a */
v := regoff(ctxt, &p.From)
if r == 0 {
r = int(p.To.Reg)
}
- o1 = AOP_RRR(uint32(opirr(ctxt, int(p.As))), uint32(r), uint32(p.To.Reg), uint32(v)&31)
+ o1 = AOP_RRR(opirr(ctxt, p.As), uint32(r), uint32(p.To.Reg), uint32(v)&31)
if (p.As == ASRAD || p.As == ASRADCC) && (v&0x20 != 0) {
o1 |= 1 << 1 /* mb[5] */
}
if r == 0 {
r = int(p.To.Reg)
}
- o1 = LOP_IRR(uint32(opirr(ctxt, int(p.As))), uint32(p.To.Reg), uint32(r), uint32(v))
+ o1 = LOP_IRR(opirr(ctxt, p.As), uint32(p.To.Reg), uint32(r), uint32(v))
case 59: /* or/and $ucon,,r */
v := regoff(ctxt, &p.From)
if r == 0 {
r = int(p.To.Reg)
}
- o1 = LOP_IRR(uint32(opirr(ctxt, int(p.As)+ALAST)), uint32(p.To.Reg), uint32(r), uint32(v)>>16) /* oris, xoris, andis */
+ o1 = LOP_IRR(opirr(ctxt, -p.As), uint32(p.To.Reg), uint32(r), uint32(v)>>16) /* oris, xoris, andis */
case 60: /* tw to,a,b */
r := int(regoff(ctxt, &p.From) & 31)
- o1 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(r), uint32(p.Reg), uint32(p.To.Reg))
+ o1 = AOP_RRR(oprrr(ctxt, p.As), uint32(r), uint32(p.Reg), uint32(p.To.Reg))
case 61: /* tw to,a,$simm */
r := int(regoff(ctxt, &p.From) & 31)
v := regoff(ctxt, &p.To)
- o1 = AOP_IRR(uint32(opirr(ctxt, int(p.As))), uint32(r), uint32(p.Reg), uint32(v))
+ o1 = AOP_IRR(opirr(ctxt, p.As), uint32(r), uint32(p.Reg), uint32(v))
case 62: /* rlwmi $sh,s,$mask,a */
v := regoff(ctxt, &p.From)
var mask [2]uint8
maskgen(ctxt, p, mask[:], uint32(regoff(ctxt, p.From3)))
- o1 = AOP_RRR(uint32(opirr(ctxt, int(p.As))), uint32(p.Reg), uint32(p.To.Reg), uint32(v))
+ o1 = AOP_RRR(opirr(ctxt, p.As), uint32(p.Reg), uint32(p.To.Reg), uint32(v))
o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1
case 63: /* rlwmi b,s,$mask,a */
var mask [2]uint8
maskgen(ctxt, p, mask[:], uint32(regoff(ctxt, p.From3)))
- o1 = AOP_RRR(uint32(opirr(ctxt, int(p.As))), uint32(p.Reg), uint32(p.To.Reg), uint32(p.From.Reg))
+ o1 = AOP_RRR(opirr(ctxt, p.As), uint32(p.Reg), uint32(p.To.Reg), uint32(p.From.Reg))
o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1
case 64: /* mtfsf fr[, $m] {,fpcsr} */
} else {
r = (int(p.Reg) & 7) << 2
}
- o1 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(r), uint32(p.From.Reg), uint32(p.To.Reg))
+ o1 = AOP_RRR(oprrr(ctxt, p.As), uint32(r), uint32(p.From.Reg), uint32(p.To.Reg))
case 71: /* cmp[l] r,i,cr*/
var r int
} else {
r = (int(p.Reg) & 7) << 2
}
- o1 = AOP_RRR(uint32(opirr(ctxt, int(p.As))), uint32(r), uint32(p.From.Reg), 0) | uint32(regoff(ctxt, &p.To))&0xffff
+ o1 = AOP_RRR(opirr(ctxt, p.As), uint32(r), uint32(p.From.Reg), 0) | uint32(regoff(ctxt, &p.To))&0xffff
case 72: /* slbmte (Rb+Rs -> slb[Rb]) -> Rs, Rb */
- o1 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(p.From.Reg), 0, uint32(p.To.Reg))
+ o1 = AOP_RRR(oprrr(ctxt, p.As), uint32(p.From.Reg), 0, uint32(p.To.Reg))
case 73: /* mcrfs crfD,crfS */
if p.From.Type != obj.TYPE_REG || p.From.Reg != REG_FPSCR || p.To.Type != obj.TYPE_REG || p.To.Reg < REG_CR0 || REG_CR7 < p.To.Reg {
o1 = 0x7fe00008 // trap always
}
- o2 = uint32(oprrr(ctxt, int(p.As)))
- o3 = AOP_RRR(uint32(oprrr(ctxt, AXOR)), REGZERO, REGZERO, REGZERO) // XOR R0, R0
+ o2 = oprrr(ctxt, p.As)
+ o3 = AOP_RRR(oprrr(ctxt, AXOR), REGZERO, REGZERO, REGZERO) // XOR R0, R0
case 78: /* undef */
o1 = 0 /* "An instruction consisting entirely of binary 0s is guaranteed
/* relocation operations */
case 74:
v := vregoff(ctxt, &p.To)
- o1, o2 = symbolAccess(ctxt, p.To.Sym, v, p.From.Reg, opstore(ctxt, int(p.As)))
+ o1, o2 = symbolAccess(ctxt, p.To.Sym, v, p.From.Reg, opstore(ctxt, p.As))
//if(dlm) reloc(&p->to, p->pc, 1);
case 75:
v := vregoff(ctxt, &p.From)
- o1, o2 = symbolAccess(ctxt, p.From.Sym, v, p.To.Reg, opload(ctxt, int(p.As)))
+ o1, o2 = symbolAccess(ctxt, p.From.Sym, v, p.To.Reg, opload(ctxt, p.As))
//if(dlm) reloc(&p->from, p->pc, 1);
case 76:
v := vregoff(ctxt, &p.From)
- o1, o2 = symbolAccess(ctxt, p.From.Sym, v, p.To.Reg, opload(ctxt, int(p.As)))
+ o1, o2 = symbolAccess(ctxt, p.From.Sym, v, p.To.Reg, opload(ctxt, p.As))
o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
//if(dlm) reloc(&p->from, p->pc, 1);
return int32(vregoff(ctxt, a))
}
-func oprrr(ctxt *obj.Link, a int) int32 {
+func oprrr(ctxt *obj.Link, a obj.As) uint32 {
switch a {
case AADD:
- return int32(OPVCC(31, 266, 0, 0))
+ return OPVCC(31, 266, 0, 0)
case AADDCC:
- return int32(OPVCC(31, 266, 0, 1))
+ return OPVCC(31, 266, 0, 1)
case AADDV:
- return int32(OPVCC(31, 266, 1, 0))
+ return OPVCC(31, 266, 1, 0)
case AADDVCC:
- return int32(OPVCC(31, 266, 1, 1))
+ return OPVCC(31, 266, 1, 1)
case AADDC:
- return int32(OPVCC(31, 10, 0, 0))
+ return OPVCC(31, 10, 0, 0)
case AADDCCC:
- return int32(OPVCC(31, 10, 0, 1))
+ return OPVCC(31, 10, 0, 1)
case AADDCV:
- return int32(OPVCC(31, 10, 1, 0))
+ return OPVCC(31, 10, 1, 0)
case AADDCVCC:
- return int32(OPVCC(31, 10, 1, 1))
+ return OPVCC(31, 10, 1, 1)
case AADDE:
- return int32(OPVCC(31, 138, 0, 0))
+ return OPVCC(31, 138, 0, 0)
case AADDECC:
- return int32(OPVCC(31, 138, 0, 1))
+ return OPVCC(31, 138, 0, 1)
case AADDEV:
- return int32(OPVCC(31, 138, 1, 0))
+ return OPVCC(31, 138, 1, 0)
case AADDEVCC:
- return int32(OPVCC(31, 138, 1, 1))
+ return OPVCC(31, 138, 1, 1)
case AADDME:
- return int32(OPVCC(31, 234, 0, 0))
+ return OPVCC(31, 234, 0, 0)
case AADDMECC:
- return int32(OPVCC(31, 234, 0, 1))
+ return OPVCC(31, 234, 0, 1)
case AADDMEV:
- return int32(OPVCC(31, 234, 1, 0))
+ return OPVCC(31, 234, 1, 0)
case AADDMEVCC:
- return int32(OPVCC(31, 234, 1, 1))
+ return OPVCC(31, 234, 1, 1)
case AADDZE:
- return int32(OPVCC(31, 202, 0, 0))
+ return OPVCC(31, 202, 0, 0)
case AADDZECC:
- return int32(OPVCC(31, 202, 0, 1))
+ return OPVCC(31, 202, 0, 1)
case AADDZEV:
- return int32(OPVCC(31, 202, 1, 0))
+ return OPVCC(31, 202, 1, 0)
case AADDZEVCC:
- return int32(OPVCC(31, 202, 1, 1))
+ return OPVCC(31, 202, 1, 1)
case AAND:
- return int32(OPVCC(31, 28, 0, 0))
+ return OPVCC(31, 28, 0, 0)
case AANDCC:
- return int32(OPVCC(31, 28, 0, 1))
+ return OPVCC(31, 28, 0, 1)
case AANDN:
- return int32(OPVCC(31, 60, 0, 0))
+ return OPVCC(31, 60, 0, 0)
case AANDNCC:
- return int32(OPVCC(31, 60, 0, 1))
+ return OPVCC(31, 60, 0, 1)
case ACMP:
- return int32(OPVCC(31, 0, 0, 0) | 1<<21) /* L=1 */
+ return OPVCC(31, 0, 0, 0) | 1<<21 /* L=1 */
case ACMPU:
- return int32(OPVCC(31, 32, 0, 0) | 1<<21)
+ return OPVCC(31, 32, 0, 0) | 1<<21
case ACMPW:
- return int32(OPVCC(31, 0, 0, 0)) /* L=0 */
+ return OPVCC(31, 0, 0, 0) /* L=0 */
case ACMPWU:
- return int32(OPVCC(31, 32, 0, 0))
+ return OPVCC(31, 32, 0, 0)
case ACNTLZW:
- return int32(OPVCC(31, 26, 0, 0))
+ return OPVCC(31, 26, 0, 0)
case ACNTLZWCC:
- return int32(OPVCC(31, 26, 0, 1))
+ return OPVCC(31, 26, 0, 1)
case ACNTLZD:
- return int32(OPVCC(31, 58, 0, 0))
+ return OPVCC(31, 58, 0, 0)
case ACNTLZDCC:
- return int32(OPVCC(31, 58, 0, 1))
+ return OPVCC(31, 58, 0, 1)
case ACRAND:
- return int32(OPVCC(19, 257, 0, 0))
+ return OPVCC(19, 257, 0, 0)
case ACRANDN:
- return int32(OPVCC(19, 129, 0, 0))
+ return OPVCC(19, 129, 0, 0)
case ACREQV:
- return int32(OPVCC(19, 289, 0, 0))
+ return OPVCC(19, 289, 0, 0)
case ACRNAND:
- return int32(OPVCC(19, 225, 0, 0))
+ return OPVCC(19, 225, 0, 0)
case ACRNOR:
- return int32(OPVCC(19, 33, 0, 0))
+ return OPVCC(19, 33, 0, 0)
case ACROR:
- return int32(OPVCC(19, 449, 0, 0))
+ return OPVCC(19, 449, 0, 0)
case ACRORN:
- return int32(OPVCC(19, 417, 0, 0))
+ return OPVCC(19, 417, 0, 0)
case ACRXOR:
- return int32(OPVCC(19, 193, 0, 0))
+ return OPVCC(19, 193, 0, 0)
case ADCBF:
- return int32(OPVCC(31, 86, 0, 0))
+ return OPVCC(31, 86, 0, 0)
case ADCBI:
- return int32(OPVCC(31, 470, 0, 0))
+ return OPVCC(31, 470, 0, 0)
case ADCBST:
- return int32(OPVCC(31, 54, 0, 0))
+ return OPVCC(31, 54, 0, 0)
case ADCBT:
- return int32(OPVCC(31, 278, 0, 0))
+ return OPVCC(31, 278, 0, 0)
case ADCBTST:
- return int32(OPVCC(31, 246, 0, 0))
+ return OPVCC(31, 246, 0, 0)
case ADCBZ:
- return int32(OPVCC(31, 1014, 0, 0))
+ return OPVCC(31, 1014, 0, 0)
case AREM, ADIVW:
- return int32(OPVCC(31, 491, 0, 0))
+ return OPVCC(31, 491, 0, 0)
case AREMCC, ADIVWCC:
- return int32(OPVCC(31, 491, 0, 1))
+ return OPVCC(31, 491, 0, 1)
case AREMV, ADIVWV:
- return int32(OPVCC(31, 491, 1, 0))
+ return OPVCC(31, 491, 1, 0)
case AREMVCC, ADIVWVCC:
- return int32(OPVCC(31, 491, 1, 1))
+ return OPVCC(31, 491, 1, 1)
case AREMU, ADIVWU:
- return int32(OPVCC(31, 459, 0, 0))
+ return OPVCC(31, 459, 0, 0)
case AREMUCC, ADIVWUCC:
- return int32(OPVCC(31, 459, 0, 1))
+ return OPVCC(31, 459, 0, 1)
case AREMUV, ADIVWUV:
- return int32(OPVCC(31, 459, 1, 0))
+ return OPVCC(31, 459, 1, 0)
case AREMUVCC, ADIVWUVCC:
- return int32(OPVCC(31, 459, 1, 1))
+ return OPVCC(31, 459, 1, 1)
case AREMD, ADIVD:
- return int32(OPVCC(31, 489, 0, 0))
+ return OPVCC(31, 489, 0, 0)
case AREMDCC, ADIVDCC:
- return int32(OPVCC(31, 489, 0, 1))
+ return OPVCC(31, 489, 0, 1)
case AREMDV, ADIVDV:
- return int32(OPVCC(31, 489, 1, 0))
+ return OPVCC(31, 489, 1, 0)
case AREMDVCC, ADIVDVCC:
- return int32(OPVCC(31, 489, 1, 1))
+ return OPVCC(31, 489, 1, 1)
case AREMDU, ADIVDU:
- return int32(OPVCC(31, 457, 0, 0))
+ return OPVCC(31, 457, 0, 0)
case AREMDUCC, ADIVDUCC:
- return int32(OPVCC(31, 457, 0, 1))
+ return OPVCC(31, 457, 0, 1)
case AREMDUV, ADIVDUV:
- return int32(OPVCC(31, 457, 1, 0))
+ return OPVCC(31, 457, 1, 0)
case AREMDUVCC, ADIVDUVCC:
- return int32(OPVCC(31, 457, 1, 1))
+ return OPVCC(31, 457, 1, 1)
case AEIEIO:
- return int32(OPVCC(31, 854, 0, 0))
+ return OPVCC(31, 854, 0, 0)
case AEQV:
- return int32(OPVCC(31, 284, 0, 0))
+ return OPVCC(31, 284, 0, 0)
case AEQVCC:
- return int32(OPVCC(31, 284, 0, 1))
+ return OPVCC(31, 284, 0, 1)
case AEXTSB:
- return int32(OPVCC(31, 954, 0, 0))
+ return OPVCC(31, 954, 0, 0)
case AEXTSBCC:
- return int32(OPVCC(31, 954, 0, 1))
+ return OPVCC(31, 954, 0, 1)
case AEXTSH:
- return int32(OPVCC(31, 922, 0, 0))
+ return OPVCC(31, 922, 0, 0)
case AEXTSHCC:
- return int32(OPVCC(31, 922, 0, 1))
+ return OPVCC(31, 922, 0, 1)
case AEXTSW:
- return int32(OPVCC(31, 986, 0, 0))
+ return OPVCC(31, 986, 0, 0)
case AEXTSWCC:
- return int32(OPVCC(31, 986, 0, 1))
+ return OPVCC(31, 986, 0, 1)
case AFABS:
- return int32(OPVCC(63, 264, 0, 0))
+ return OPVCC(63, 264, 0, 0)
case AFABSCC:
- return int32(OPVCC(63, 264, 0, 1))
+ return OPVCC(63, 264, 0, 1)
case AFADD:
- return int32(OPVCC(63, 21, 0, 0))
+ return OPVCC(63, 21, 0, 0)
case AFADDCC:
- return int32(OPVCC(63, 21, 0, 1))
+ return OPVCC(63, 21, 0, 1)
case AFADDS:
- return int32(OPVCC(59, 21, 0, 0))
+ return OPVCC(59, 21, 0, 0)
case AFADDSCC:
- return int32(OPVCC(59, 21, 0, 1))
+ return OPVCC(59, 21, 0, 1)
case AFCMPO:
- return int32(OPVCC(63, 32, 0, 0))
+ return OPVCC(63, 32, 0, 0)
case AFCMPU:
- return int32(OPVCC(63, 0, 0, 0))
+ return OPVCC(63, 0, 0, 0)
case AFCFID:
- return int32(OPVCC(63, 846, 0, 0))
+ return OPVCC(63, 846, 0, 0)
case AFCFIDCC:
- return int32(OPVCC(63, 846, 0, 1))
+ return OPVCC(63, 846, 0, 1)
case AFCTIW:
- return int32(OPVCC(63, 14, 0, 0))
+ return OPVCC(63, 14, 0, 0)
case AFCTIWCC:
- return int32(OPVCC(63, 14, 0, 1))
+ return OPVCC(63, 14, 0, 1)
case AFCTIWZ:
- return int32(OPVCC(63, 15, 0, 0))
+ return OPVCC(63, 15, 0, 0)
case AFCTIWZCC:
- return int32(OPVCC(63, 15, 0, 1))
+ return OPVCC(63, 15, 0, 1)
case AFCTID:
- return int32(OPVCC(63, 814, 0, 0))
+ return OPVCC(63, 814, 0, 0)
case AFCTIDCC:
- return int32(OPVCC(63, 814, 0, 1))
+ return OPVCC(63, 814, 0, 1)
case AFCTIDZ:
- return int32(OPVCC(63, 815, 0, 0))
+ return OPVCC(63, 815, 0, 0)
case AFCTIDZCC:
- return int32(OPVCC(63, 815, 0, 1))
+ return OPVCC(63, 815, 0, 1)
case AFDIV:
- return int32(OPVCC(63, 18, 0, 0))
+ return OPVCC(63, 18, 0, 0)
case AFDIVCC:
- return int32(OPVCC(63, 18, 0, 1))
+ return OPVCC(63, 18, 0, 1)
case AFDIVS:
- return int32(OPVCC(59, 18, 0, 0))
+ return OPVCC(59, 18, 0, 0)
case AFDIVSCC:
- return int32(OPVCC(59, 18, 0, 1))
+ return OPVCC(59, 18, 0, 1)
case AFMADD:
- return int32(OPVCC(63, 29, 0, 0))
+ return OPVCC(63, 29, 0, 0)
case AFMADDCC:
- return int32(OPVCC(63, 29, 0, 1))
+ return OPVCC(63, 29, 0, 1)
case AFMADDS:
- return int32(OPVCC(59, 29, 0, 0))
+ return OPVCC(59, 29, 0, 0)
case AFMADDSCC:
- return int32(OPVCC(59, 29, 0, 1))
+ return OPVCC(59, 29, 0, 1)
case AFMOVS, AFMOVD:
- return int32(OPVCC(63, 72, 0, 0)) /* load */
+ return OPVCC(63, 72, 0, 0) /* load */
case AFMOVDCC:
- return int32(OPVCC(63, 72, 0, 1))
+ return OPVCC(63, 72, 0, 1)
case AFMSUB:
- return int32(OPVCC(63, 28, 0, 0))
+ return OPVCC(63, 28, 0, 0)
case AFMSUBCC:
- return int32(OPVCC(63, 28, 0, 1))
+ return OPVCC(63, 28, 0, 1)
case AFMSUBS:
- return int32(OPVCC(59, 28, 0, 0))
+ return OPVCC(59, 28, 0, 0)
case AFMSUBSCC:
- return int32(OPVCC(59, 28, 0, 1))
+ return OPVCC(59, 28, 0, 1)
case AFMUL:
- return int32(OPVCC(63, 25, 0, 0))
+ return OPVCC(63, 25, 0, 0)
case AFMULCC:
- return int32(OPVCC(63, 25, 0, 1))
+ return OPVCC(63, 25, 0, 1)
case AFMULS:
- return int32(OPVCC(59, 25, 0, 0))
+ return OPVCC(59, 25, 0, 0)
case AFMULSCC:
- return int32(OPVCC(59, 25, 0, 1))
+ return OPVCC(59, 25, 0, 1)
case AFNABS:
- return int32(OPVCC(63, 136, 0, 0))
+ return OPVCC(63, 136, 0, 0)
case AFNABSCC:
- return int32(OPVCC(63, 136, 0, 1))
+ return OPVCC(63, 136, 0, 1)
case AFNEG:
- return int32(OPVCC(63, 40, 0, 0))
+ return OPVCC(63, 40, 0, 0)
case AFNEGCC:
- return int32(OPVCC(63, 40, 0, 1))
+ return OPVCC(63, 40, 0, 1)
case AFNMADD:
- return int32(OPVCC(63, 31, 0, 0))
+ return OPVCC(63, 31, 0, 0)
case AFNMADDCC:
- return int32(OPVCC(63, 31, 0, 1))
+ return OPVCC(63, 31, 0, 1)
case AFNMADDS:
- return int32(OPVCC(59, 31, 0, 0))
+ return OPVCC(59, 31, 0, 0)
case AFNMADDSCC:
- return int32(OPVCC(59, 31, 0, 1))
+ return OPVCC(59, 31, 0, 1)
case AFNMSUB:
- return int32(OPVCC(63, 30, 0, 0))
+ return OPVCC(63, 30, 0, 0)
case AFNMSUBCC:
- return int32(OPVCC(63, 30, 0, 1))
+ return OPVCC(63, 30, 0, 1)
case AFNMSUBS:
- return int32(OPVCC(59, 30, 0, 0))
+ return OPVCC(59, 30, 0, 0)
case AFNMSUBSCC:
- return int32(OPVCC(59, 30, 0, 1))
+ return OPVCC(59, 30, 0, 1)
case AFRES:
- return int32(OPVCC(59, 24, 0, 0))
+ return OPVCC(59, 24, 0, 0)
case AFRESCC:
- return int32(OPVCC(59, 24, 0, 1))
+ return OPVCC(59, 24, 0, 1)
case AFRSP:
- return int32(OPVCC(63, 12, 0, 0))
+ return OPVCC(63, 12, 0, 0)
case AFRSPCC:
- return int32(OPVCC(63, 12, 0, 1))
+ return OPVCC(63, 12, 0, 1)
case AFRSQRTE:
- return int32(OPVCC(63, 26, 0, 0))
+ return OPVCC(63, 26, 0, 0)
case AFRSQRTECC:
- return int32(OPVCC(63, 26, 0, 1))
+ return OPVCC(63, 26, 0, 1)
case AFSEL:
- return int32(OPVCC(63, 23, 0, 0))
+ return OPVCC(63, 23, 0, 0)
case AFSELCC:
- return int32(OPVCC(63, 23, 0, 1))
+ return OPVCC(63, 23, 0, 1)
case AFSQRT:
- return int32(OPVCC(63, 22, 0, 0))
+ return OPVCC(63, 22, 0, 0)
case AFSQRTCC:
- return int32(OPVCC(63, 22, 0, 1))
+ return OPVCC(63, 22, 0, 1)
case AFSQRTS:
- return int32(OPVCC(59, 22, 0, 0))
+ return OPVCC(59, 22, 0, 0)
case AFSQRTSCC:
- return int32(OPVCC(59, 22, 0, 1))
+ return OPVCC(59, 22, 0, 1)
case AFSUB:
- return int32(OPVCC(63, 20, 0, 0))
+ return OPVCC(63, 20, 0, 0)
case AFSUBCC:
- return int32(OPVCC(63, 20, 0, 1))
+ return OPVCC(63, 20, 0, 1)
case AFSUBS:
- return int32(OPVCC(59, 20, 0, 0))
+ return OPVCC(59, 20, 0, 0)
case AFSUBSCC:
- return int32(OPVCC(59, 20, 0, 1))
+ return OPVCC(59, 20, 0, 1)
case AICBI:
- return int32(OPVCC(31, 982, 0, 0))
+ return OPVCC(31, 982, 0, 0)
case AISYNC:
- return int32(OPVCC(19, 150, 0, 0))
+ return OPVCC(19, 150, 0, 0)
case AMTFSB0:
- return int32(OPVCC(63, 70, 0, 0))
+ return OPVCC(63, 70, 0, 0)
case AMTFSB0CC:
- return int32(OPVCC(63, 70, 0, 1))
+ return OPVCC(63, 70, 0, 1)
case AMTFSB1:
- return int32(OPVCC(63, 38, 0, 0))
+ return OPVCC(63, 38, 0, 0)
case AMTFSB1CC:
- return int32(OPVCC(63, 38, 0, 1))
+ return OPVCC(63, 38, 0, 1)
case AMULHW:
- return int32(OPVCC(31, 75, 0, 0))
+ return OPVCC(31, 75, 0, 0)
case AMULHWCC:
- return int32(OPVCC(31, 75, 0, 1))
+ return OPVCC(31, 75, 0, 1)
case AMULHWU:
- return int32(OPVCC(31, 11, 0, 0))
+ return OPVCC(31, 11, 0, 0)
case AMULHWUCC:
- return int32(OPVCC(31, 11, 0, 1))
+ return OPVCC(31, 11, 0, 1)
case AMULLW:
- return int32(OPVCC(31, 235, 0, 0))
+ return OPVCC(31, 235, 0, 0)
case AMULLWCC:
- return int32(OPVCC(31, 235, 0, 1))
+ return OPVCC(31, 235, 0, 1)
case AMULLWV:
- return int32(OPVCC(31, 235, 1, 0))
+ return OPVCC(31, 235, 1, 0)
case AMULLWVCC:
- return int32(OPVCC(31, 235, 1, 1))
+ return OPVCC(31, 235, 1, 1)
case AMULHD:
- return int32(OPVCC(31, 73, 0, 0))
+ return OPVCC(31, 73, 0, 0)
case AMULHDCC:
- return int32(OPVCC(31, 73, 0, 1))
+ return OPVCC(31, 73, 0, 1)
case AMULHDU:
- return int32(OPVCC(31, 9, 0, 0))
+ return OPVCC(31, 9, 0, 0)
case AMULHDUCC:
- return int32(OPVCC(31, 9, 0, 1))
+ return OPVCC(31, 9, 0, 1)
case AMULLD:
- return int32(OPVCC(31, 233, 0, 0))
+ return OPVCC(31, 233, 0, 0)
case AMULLDCC:
- return int32(OPVCC(31, 233, 0, 1))
+ return OPVCC(31, 233, 0, 1)
case AMULLDV:
- return int32(OPVCC(31, 233, 1, 0))
+ return OPVCC(31, 233, 1, 0)
case AMULLDVCC:
- return int32(OPVCC(31, 233, 1, 1))
+ return OPVCC(31, 233, 1, 1)
case ANAND:
- return int32(OPVCC(31, 476, 0, 0))
+ return OPVCC(31, 476, 0, 0)
case ANANDCC:
- return int32(OPVCC(31, 476, 0, 1))
+ return OPVCC(31, 476, 0, 1)
case ANEG:
- return int32(OPVCC(31, 104, 0, 0))
+ return OPVCC(31, 104, 0, 0)
case ANEGCC:
- return int32(OPVCC(31, 104, 0, 1))
+ return OPVCC(31, 104, 0, 1)
case ANEGV:
- return int32(OPVCC(31, 104, 1, 0))
+ return OPVCC(31, 104, 1, 0)
case ANEGVCC:
- return int32(OPVCC(31, 104, 1, 1))
+ return OPVCC(31, 104, 1, 1)
case ANOR:
- return int32(OPVCC(31, 124, 0, 0))
+ return OPVCC(31, 124, 0, 0)
case ANORCC:
- return int32(OPVCC(31, 124, 0, 1))
+ return OPVCC(31, 124, 0, 1)
case AOR:
- return int32(OPVCC(31, 444, 0, 0))
+ return OPVCC(31, 444, 0, 0)
case AORCC:
- return int32(OPVCC(31, 444, 0, 1))
+ return OPVCC(31, 444, 0, 1)
case AORN:
- return int32(OPVCC(31, 412, 0, 0))
+ return OPVCC(31, 412, 0, 0)
case AORNCC:
- return int32(OPVCC(31, 412, 0, 1))
+ return OPVCC(31, 412, 0, 1)
case ARFI:
- return int32(OPVCC(19, 50, 0, 0))
+ return OPVCC(19, 50, 0, 0)
case ARFCI:
- return int32(OPVCC(19, 51, 0, 0))
+ return OPVCC(19, 51, 0, 0)
case ARFID:
- return int32(OPVCC(19, 18, 0, 0))
+ return OPVCC(19, 18, 0, 0)
case AHRFID:
- return int32(OPVCC(19, 274, 0, 0))
+ return OPVCC(19, 274, 0, 0)
case ARLWMI:
- return int32(OPVCC(20, 0, 0, 0))
+ return OPVCC(20, 0, 0, 0)
case ARLWMICC:
- return int32(OPVCC(20, 0, 0, 1))
+ return OPVCC(20, 0, 0, 1)
case ARLWNM:
- return int32(OPVCC(23, 0, 0, 0))
+ return OPVCC(23, 0, 0, 0)
case ARLWNMCC:
- return int32(OPVCC(23, 0, 0, 1))
+ return OPVCC(23, 0, 0, 1)
case ARLDCL:
- return int32(OPVCC(30, 8, 0, 0))
+ return OPVCC(30, 8, 0, 0)
case ARLDCR:
- return int32(OPVCC(30, 9, 0, 0))
+ return OPVCC(30, 9, 0, 0)
case ASYSCALL:
- return int32(OPVCC(17, 1, 0, 0))
+ return OPVCC(17, 1, 0, 0)
case ASLW:
- return int32(OPVCC(31, 24, 0, 0))
+ return OPVCC(31, 24, 0, 0)
case ASLWCC:
- return int32(OPVCC(31, 24, 0, 1))
+ return OPVCC(31, 24, 0, 1)
case ASLD:
- return int32(OPVCC(31, 27, 0, 0))
+ return OPVCC(31, 27, 0, 0)
case ASLDCC:
- return int32(OPVCC(31, 27, 0, 1))
+ return OPVCC(31, 27, 0, 1)
case ASRAW:
- return int32(OPVCC(31, 792, 0, 0))
+ return OPVCC(31, 792, 0, 0)
case ASRAWCC:
- return int32(OPVCC(31, 792, 0, 1))
+ return OPVCC(31, 792, 0, 1)
case ASRAD:
- return int32(OPVCC(31, 794, 0, 0))
+ return OPVCC(31, 794, 0, 0)
case ASRADCC:
- return int32(OPVCC(31, 794, 0, 1))
+ return OPVCC(31, 794, 0, 1)
case ASRW:
- return int32(OPVCC(31, 536, 0, 0))
+ return OPVCC(31, 536, 0, 0)
case ASRWCC:
- return int32(OPVCC(31, 536, 0, 1))
+ return OPVCC(31, 536, 0, 1)
case ASRD:
- return int32(OPVCC(31, 539, 0, 0))
+ return OPVCC(31, 539, 0, 0)
case ASRDCC:
- return int32(OPVCC(31, 539, 0, 1))
+ return OPVCC(31, 539, 0, 1)
case ASUB:
- return int32(OPVCC(31, 40, 0, 0))
+ return OPVCC(31, 40, 0, 0)
case ASUBCC:
- return int32(OPVCC(31, 40, 0, 1))
+ return OPVCC(31, 40, 0, 1)
case ASUBV:
- return int32(OPVCC(31, 40, 1, 0))
+ return OPVCC(31, 40, 1, 0)
case ASUBVCC:
- return int32(OPVCC(31, 40, 1, 1))
+ return OPVCC(31, 40, 1, 1)
case ASUBC:
- return int32(OPVCC(31, 8, 0, 0))
+ return OPVCC(31, 8, 0, 0)
case ASUBCCC:
- return int32(OPVCC(31, 8, 0, 1))
+ return OPVCC(31, 8, 0, 1)
case ASUBCV:
- return int32(OPVCC(31, 8, 1, 0))
+ return OPVCC(31, 8, 1, 0)
case ASUBCVCC:
- return int32(OPVCC(31, 8, 1, 1))
+ return OPVCC(31, 8, 1, 1)
case ASUBE:
- return int32(OPVCC(31, 136, 0, 0))
+ return OPVCC(31, 136, 0, 0)
case ASUBECC:
- return int32(OPVCC(31, 136, 0, 1))
+ return OPVCC(31, 136, 0, 1)
case ASUBEV:
- return int32(OPVCC(31, 136, 1, 0))
+ return OPVCC(31, 136, 1, 0)
case ASUBEVCC:
- return int32(OPVCC(31, 136, 1, 1))
+ return OPVCC(31, 136, 1, 1)
case ASUBME:
- return int32(OPVCC(31, 232, 0, 0))
+ return OPVCC(31, 232, 0, 0)
case ASUBMECC:
- return int32(OPVCC(31, 232, 0, 1))
+ return OPVCC(31, 232, 0, 1)
case ASUBMEV:
- return int32(OPVCC(31, 232, 1, 0))
+ return OPVCC(31, 232, 1, 0)
case ASUBMEVCC:
- return int32(OPVCC(31, 232, 1, 1))
+ return OPVCC(31, 232, 1, 1)
case ASUBZE:
- return int32(OPVCC(31, 200, 0, 0))
+ return OPVCC(31, 200, 0, 0)
case ASUBZECC:
- return int32(OPVCC(31, 200, 0, 1))
+ return OPVCC(31, 200, 0, 1)
case ASUBZEV:
- return int32(OPVCC(31, 200, 1, 0))
+ return OPVCC(31, 200, 1, 0)
case ASUBZEVCC:
- return int32(OPVCC(31, 200, 1, 1))
+ return OPVCC(31, 200, 1, 1)
case ASYNC:
- return int32(OPVCC(31, 598, 0, 0))
+ return OPVCC(31, 598, 0, 0)
case APTESYNC:
- return int32(OPVCC(31, 598, 0, 0) | 2<<21)
+ return OPVCC(31, 598, 0, 0) | 2<<21
case ATLBIE:
- return int32(OPVCC(31, 306, 0, 0))
+ return OPVCC(31, 306, 0, 0)
case ATLBIEL:
- return int32(OPVCC(31, 274, 0, 0))
+ return OPVCC(31, 274, 0, 0)
case ATLBSYNC:
- return int32(OPVCC(31, 566, 0, 0))
+ return OPVCC(31, 566, 0, 0)
case ASLBIA:
- return int32(OPVCC(31, 498, 0, 0))
+ return OPVCC(31, 498, 0, 0)
case ASLBIE:
- return int32(OPVCC(31, 434, 0, 0))
+ return OPVCC(31, 434, 0, 0)
case ASLBMFEE:
- return int32(OPVCC(31, 915, 0, 0))
+ return OPVCC(31, 915, 0, 0)
case ASLBMFEV:
- return int32(OPVCC(31, 851, 0, 0))
+ return OPVCC(31, 851, 0, 0)
case ASLBMTE:
- return int32(OPVCC(31, 402, 0, 0))
+ return OPVCC(31, 402, 0, 0)
case ATW:
- return int32(OPVCC(31, 4, 0, 0))
+ return OPVCC(31, 4, 0, 0)
case ATD:
- return int32(OPVCC(31, 68, 0, 0))
+ return OPVCC(31, 68, 0, 0)
case AXOR:
- return int32(OPVCC(31, 316, 0, 0))
+ return OPVCC(31, 316, 0, 0)
case AXORCC:
- return int32(OPVCC(31, 316, 0, 1))
+ return OPVCC(31, 316, 0, 1)
}
ctxt.Diag("bad r/r opcode %v", obj.Aconv(a))
return 0
}
-func opirr(ctxt *obj.Link, a int) int32 {
+func opirr(ctxt *obj.Link, a obj.As) uint32 {
switch a {
case AADD:
- return int32(OPVCC(14, 0, 0, 0))
+ return OPVCC(14, 0, 0, 0)
case AADDC:
- return int32(OPVCC(12, 0, 0, 0))
+ return OPVCC(12, 0, 0, 0)
case AADDCCC:
- return int32(OPVCC(13, 0, 0, 0))
- case AADD + ALAST:
- return int32(OPVCC(15, 0, 0, 0)) /* ADDIS/CAU */
+ return OPVCC(13, 0, 0, 0)
+ case -AADD:
+ return OPVCC(15, 0, 0, 0) /* ADDIS/CAU */
case AANDCC:
- return int32(OPVCC(28, 0, 0, 0))
- case AANDCC + ALAST:
- return int32(OPVCC(29, 0, 0, 0)) /* ANDIS./ANDIU. */
+ return OPVCC(28, 0, 0, 0)
+ case -AANDCC:
+ return OPVCC(29, 0, 0, 0) /* ANDIS./ANDIU. */
case ABR:
- return int32(OPVCC(18, 0, 0, 0))
+ return OPVCC(18, 0, 0, 0)
case ABL:
- return int32(OPVCC(18, 0, 0, 0) | 1)
+ return OPVCC(18, 0, 0, 0) | 1
case obj.ADUFFZERO:
- return int32(OPVCC(18, 0, 0, 0) | 1)
+ return OPVCC(18, 0, 0, 0) | 1
case obj.ADUFFCOPY:
- return int32(OPVCC(18, 0, 0, 0) | 1)
+ return OPVCC(18, 0, 0, 0) | 1
case ABC:
- return int32(OPVCC(16, 0, 0, 0))
+ return OPVCC(16, 0, 0, 0)
case ABCL:
- return int32(OPVCC(16, 0, 0, 0) | 1)
+ return OPVCC(16, 0, 0, 0) | 1
case ABEQ:
- return int32(AOP_RRR(16<<26, 12, 2, 0))
+ return AOP_RRR(16<<26, 12, 2, 0)
case ABGE:
- return int32(AOP_RRR(16<<26, 4, 0, 0))
+ return AOP_RRR(16<<26, 4, 0, 0)
case ABGT:
- return int32(AOP_RRR(16<<26, 12, 1, 0))
+ return AOP_RRR(16<<26, 12, 1, 0)
case ABLE:
- return int32(AOP_RRR(16<<26, 4, 1, 0))
+ return AOP_RRR(16<<26, 4, 1, 0)
case ABLT:
- return int32(AOP_RRR(16<<26, 12, 0, 0))
+ return AOP_RRR(16<<26, 12, 0, 0)
case ABNE:
- return int32(AOP_RRR(16<<26, 4, 2, 0))
+ return AOP_RRR(16<<26, 4, 2, 0)
case ABVC:
- return int32(AOP_RRR(16<<26, 4, 3, 0))
+ return AOP_RRR(16<<26, 4, 3, 0)
case ABVS:
- return int32(AOP_RRR(16<<26, 12, 3, 0))
+ return AOP_RRR(16<<26, 12, 3, 0)
case ACMP:
- return int32(OPVCC(11, 0, 0, 0) | 1<<21) /* L=1 */
+ return OPVCC(11, 0, 0, 0) | 1<<21 /* L=1 */
case ACMPU:
- return int32(OPVCC(10, 0, 0, 0) | 1<<21)
+ return OPVCC(10, 0, 0, 0) | 1<<21
case ACMPW:
- return int32(OPVCC(11, 0, 0, 0)) /* L=0 */
+ return OPVCC(11, 0, 0, 0) /* L=0 */
case ACMPWU:
- return int32(OPVCC(10, 0, 0, 0))
+ return OPVCC(10, 0, 0, 0)
case ALSW:
- return int32(OPVCC(31, 597, 0, 0))
+ return OPVCC(31, 597, 0, 0)
case AMULLW:
- return int32(OPVCC(7, 0, 0, 0))
+ return OPVCC(7, 0, 0, 0)
case AOR:
- return int32(OPVCC(24, 0, 0, 0))
- case AOR + ALAST:
- return int32(OPVCC(25, 0, 0, 0)) /* ORIS/ORIU */
+ return OPVCC(24, 0, 0, 0)
+ case -AOR:
+ return OPVCC(25, 0, 0, 0) /* ORIS/ORIU */
case ARLWMI:
- return int32(OPVCC(20, 0, 0, 0)) /* rlwimi */
+ return OPVCC(20, 0, 0, 0) /* rlwimi */
case ARLWMICC:
- return int32(OPVCC(20, 0, 0, 1))
+ return OPVCC(20, 0, 0, 1)
case ARLDMI:
- return int32(OPVCC(30, 0, 0, 0) | 3<<2) /* rldimi */
+ return OPVCC(30, 0, 0, 0) | 3<<2 /* rldimi */
case ARLDMICC:
- return int32(OPVCC(30, 0, 0, 1) | 3<<2)
+ return OPVCC(30, 0, 0, 1) | 3<<2
case ARLWNM:
- return int32(OPVCC(21, 0, 0, 0)) /* rlwinm */
+ return OPVCC(21, 0, 0, 0) /* rlwinm */
case ARLWNMCC:
- return int32(OPVCC(21, 0, 0, 1))
+ return OPVCC(21, 0, 0, 1)
case ARLDCL:
- return int32(OPVCC(30, 0, 0, 0)) /* rldicl */
+ return OPVCC(30, 0, 0, 0) /* rldicl */
case ARLDCLCC:
- return int32(OPVCC(30, 0, 0, 1))
+ return OPVCC(30, 0, 0, 1)
case ARLDCR:
- return int32(OPVCC(30, 1, 0, 0)) /* rldicr */
+ return OPVCC(30, 1, 0, 0) /* rldicr */
case ARLDCRCC:
- return int32(OPVCC(30, 1, 0, 1))
+ return OPVCC(30, 1, 0, 1)
case ARLDC:
- return int32(OPVCC(30, 0, 0, 0) | 2<<2)
+ return OPVCC(30, 0, 0, 0) | 2<<2
case ARLDCCC:
- return int32(OPVCC(30, 0, 0, 1) | 2<<2)
+ return OPVCC(30, 0, 0, 1) | 2<<2
case ASRAW:
- return int32(OPVCC(31, 824, 0, 0))
+ return OPVCC(31, 824, 0, 0)
case ASRAWCC:
- return int32(OPVCC(31, 824, 0, 1))
+ return OPVCC(31, 824, 0, 1)
case ASRAD:
- return int32(OPVCC(31, (413 << 1), 0, 0))
+ return OPVCC(31, (413 << 1), 0, 0)
case ASRADCC:
- return int32(OPVCC(31, (413 << 1), 0, 1))
+ return OPVCC(31, (413 << 1), 0, 1)
case ASTSW:
- return int32(OPVCC(31, 725, 0, 0))
+ return OPVCC(31, 725, 0, 0)
case ASUBC:
- return int32(OPVCC(8, 0, 0, 0))
+ return OPVCC(8, 0, 0, 0)
case ATW:
- return int32(OPVCC(3, 0, 0, 0))
+ return OPVCC(3, 0, 0, 0)
case ATD:
- return int32(OPVCC(2, 0, 0, 0))
+ return OPVCC(2, 0, 0, 0)
case AXOR:
- return int32(OPVCC(26, 0, 0, 0)) /* XORIL */
- case AXOR + ALAST:
- return int32(OPVCC(27, 0, 0, 0)) /* XORIU */
+ return OPVCC(26, 0, 0, 0) /* XORIL */
+ case -AXOR:
+ return OPVCC(27, 0, 0, 0) /* XORIU */
}
ctxt.Diag("bad opcode i/r %v", obj.Aconv(a))
/*
* load o(a),d
*/
-func opload(ctxt *obj.Link, a int) int32 {
+func opload(ctxt *obj.Link, a obj.As) uint32 {
switch a {
case AMOVD:
- return int32(OPVCC(58, 0, 0, 0)) /* ld */
+ return OPVCC(58, 0, 0, 0) /* ld */
case AMOVDU:
- return int32(OPVCC(58, 0, 0, 1)) /* ldu */
+ return OPVCC(58, 0, 0, 1) /* ldu */
case AMOVWZ:
- return int32(OPVCC(32, 0, 0, 0)) /* lwz */
+ return OPVCC(32, 0, 0, 0) /* lwz */
case AMOVWZU:
- return int32(OPVCC(33, 0, 0, 0)) /* lwzu */
+ return OPVCC(33, 0, 0, 0) /* lwzu */
case AMOVW:
- return int32(OPVCC(58, 0, 0, 0) | 1<<1) /* lwa */
+ return OPVCC(58, 0, 0, 0) | 1<<1 /* lwa */
/* no AMOVWU */
case AMOVB, AMOVBZ:
- return int32(OPVCC(34, 0, 0, 0))
+ return OPVCC(34, 0, 0, 0)
/* load */
case AMOVBU, AMOVBZU:
- return int32(OPVCC(35, 0, 0, 0))
+ return OPVCC(35, 0, 0, 0)
case AFMOVD:
- return int32(OPVCC(50, 0, 0, 0))
+ return OPVCC(50, 0, 0, 0)
case AFMOVDU:
- return int32(OPVCC(51, 0, 0, 0))
+ return OPVCC(51, 0, 0, 0)
case AFMOVS:
- return int32(OPVCC(48, 0, 0, 0))
+ return OPVCC(48, 0, 0, 0)
case AFMOVSU:
- return int32(OPVCC(49, 0, 0, 0))
+ return OPVCC(49, 0, 0, 0)
case AMOVH:
- return int32(OPVCC(42, 0, 0, 0))
+ return OPVCC(42, 0, 0, 0)
case AMOVHU:
- return int32(OPVCC(43, 0, 0, 0))
+ return OPVCC(43, 0, 0, 0)
case AMOVHZ:
- return int32(OPVCC(40, 0, 0, 0))
+ return OPVCC(40, 0, 0, 0)
case AMOVHZU:
- return int32(OPVCC(41, 0, 0, 0))
+ return OPVCC(41, 0, 0, 0)
case AMOVMW:
- return int32(OPVCC(46, 0, 0, 0)) /* lmw */
+ return OPVCC(46, 0, 0, 0) /* lmw */
}
ctxt.Diag("bad load opcode %v", obj.Aconv(a))
/*
* indexed load a(b),d
*/
-func oploadx(ctxt *obj.Link, a int) int32 {
+func oploadx(ctxt *obj.Link, a obj.As) uint32 {
switch a {
case AMOVWZ:
- return int32(OPVCC(31, 23, 0, 0)) /* lwzx */
+ return OPVCC(31, 23, 0, 0) /* lwzx */
case AMOVWZU:
- return int32(OPVCC(31, 55, 0, 0)) /* lwzux */
+ return OPVCC(31, 55, 0, 0) /* lwzux */
case AMOVW:
- return int32(OPVCC(31, 341, 0, 0)) /* lwax */
+ return OPVCC(31, 341, 0, 0) /* lwax */
case AMOVWU:
- return int32(OPVCC(31, 373, 0, 0)) /* lwaux */
+ return OPVCC(31, 373, 0, 0) /* lwaux */
case AMOVB, AMOVBZ:
- return int32(OPVCC(31, 87, 0, 0)) /* lbzx */
+ return OPVCC(31, 87, 0, 0) /* lbzx */
case AMOVBU, AMOVBZU:
- return int32(OPVCC(31, 119, 0, 0)) /* lbzux */
+ return OPVCC(31, 119, 0, 0) /* lbzux */
case AFMOVD:
- return int32(OPVCC(31, 599, 0, 0)) /* lfdx */
+ return OPVCC(31, 599, 0, 0) /* lfdx */
case AFMOVDU:
- return int32(OPVCC(31, 631, 0, 0)) /* lfdux */
+ return OPVCC(31, 631, 0, 0) /* lfdux */
case AFMOVS:
- return int32(OPVCC(31, 535, 0, 0)) /* lfsx */
+ return OPVCC(31, 535, 0, 0) /* lfsx */
case AFMOVSU:
- return int32(OPVCC(31, 567, 0, 0)) /* lfsux */
+ return OPVCC(31, 567, 0, 0) /* lfsux */
case AMOVH:
- return int32(OPVCC(31, 343, 0, 0)) /* lhax */
+ return OPVCC(31, 343, 0, 0) /* lhax */
case AMOVHU:
- return int32(OPVCC(31, 375, 0, 0)) /* lhaux */
+ return OPVCC(31, 375, 0, 0) /* lhaux */
case AMOVHBR:
- return int32(OPVCC(31, 790, 0, 0)) /* lhbrx */
+ return OPVCC(31, 790, 0, 0) /* lhbrx */
case AMOVWBR:
- return int32(OPVCC(31, 534, 0, 0)) /* lwbrx */
+ return OPVCC(31, 534, 0, 0) /* lwbrx */
case AMOVHZ:
- return int32(OPVCC(31, 279, 0, 0)) /* lhzx */
+ return OPVCC(31, 279, 0, 0) /* lhzx */
case AMOVHZU:
- return int32(OPVCC(31, 311, 0, 0)) /* lhzux */
+ return OPVCC(31, 311, 0, 0) /* lhzux */
case AECIWX:
- return int32(OPVCC(31, 310, 0, 0)) /* eciwx */
+ return OPVCC(31, 310, 0, 0) /* eciwx */
case ALWAR:
- return int32(OPVCC(31, 20, 0, 0)) /* lwarx */
+ return OPVCC(31, 20, 0, 0) /* lwarx */
case ALDAR:
- return int32(OPVCC(31, 84, 0, 0))
+ return OPVCC(31, 84, 0, 0)
case ALSW:
- return int32(OPVCC(31, 533, 0, 0)) /* lswx */
+ return OPVCC(31, 533, 0, 0) /* lswx */
case AMOVD:
- return int32(OPVCC(31, 21, 0, 0)) /* ldx */
+ return OPVCC(31, 21, 0, 0) /* ldx */
case AMOVDU:
- return int32(OPVCC(31, 53, 0, 0)) /* ldux */
+ return OPVCC(31, 53, 0, 0) /* ldux */
}
ctxt.Diag("bad loadx opcode %v", obj.Aconv(a))
/*
* store s,o(d)
*/
-func opstore(ctxt *obj.Link, a int) int32 {
+func opstore(ctxt *obj.Link, a obj.As) uint32 {
switch a {
case AMOVB, AMOVBZ:
- return int32(OPVCC(38, 0, 0, 0)) /* stb */
+ return OPVCC(38, 0, 0, 0) /* stb */
case AMOVBU, AMOVBZU:
- return int32(OPVCC(39, 0, 0, 0)) /* stbu */
+ return OPVCC(39, 0, 0, 0) /* stbu */
case AFMOVD:
- return int32(OPVCC(54, 0, 0, 0)) /* stfd */
+ return OPVCC(54, 0, 0, 0) /* stfd */
case AFMOVDU:
- return int32(OPVCC(55, 0, 0, 0)) /* stfdu */
+ return OPVCC(55, 0, 0, 0) /* stfdu */
case AFMOVS:
- return int32(OPVCC(52, 0, 0, 0)) /* stfs */
+ return OPVCC(52, 0, 0, 0) /* stfs */
case AFMOVSU:
- return int32(OPVCC(53, 0, 0, 0)) /* stfsu */
+ return OPVCC(53, 0, 0, 0) /* stfsu */
case AMOVHZ, AMOVH:
- return int32(OPVCC(44, 0, 0, 0)) /* sth */
+ return OPVCC(44, 0, 0, 0) /* sth */
case AMOVHZU, AMOVHU:
- return int32(OPVCC(45, 0, 0, 0)) /* sthu */
+ return OPVCC(45, 0, 0, 0) /* sthu */
case AMOVMW:
- return int32(OPVCC(47, 0, 0, 0)) /* stmw */
+ return OPVCC(47, 0, 0, 0) /* stmw */
case ASTSW:
- return int32(OPVCC(31, 725, 0, 0)) /* stswi */
+ return OPVCC(31, 725, 0, 0) /* stswi */
case AMOVWZ, AMOVW:
- return int32(OPVCC(36, 0, 0, 0)) /* stw */
+ return OPVCC(36, 0, 0, 0) /* stw */
case AMOVWZU, AMOVWU:
- return int32(OPVCC(37, 0, 0, 0)) /* stwu */
+ return OPVCC(37, 0, 0, 0) /* stwu */
case AMOVD:
- return int32(OPVCC(62, 0, 0, 0)) /* std */
+ return OPVCC(62, 0, 0, 0) /* std */
case AMOVDU:
- return int32(OPVCC(62, 0, 0, 1)) /* stdu */
+ return OPVCC(62, 0, 0, 1) /* stdu */
}
ctxt.Diag("unknown store opcode %v", obj.Aconv(a))
/*
* indexed store s,a(b)
*/
-func opstorex(ctxt *obj.Link, a int) int32 {
+func opstorex(ctxt *obj.Link, a obj.As) uint32 {
switch a {
case AMOVB, AMOVBZ:
- return int32(OPVCC(31, 215, 0, 0)) /* stbx */
+ return OPVCC(31, 215, 0, 0) /* stbx */
case AMOVBU, AMOVBZU:
- return int32(OPVCC(31, 247, 0, 0)) /* stbux */
+ return OPVCC(31, 247, 0, 0) /* stbux */
case AFMOVD:
- return int32(OPVCC(31, 727, 0, 0)) /* stfdx */
+ return OPVCC(31, 727, 0, 0) /* stfdx */
case AFMOVDU:
- return int32(OPVCC(31, 759, 0, 0)) /* stfdux */
+ return OPVCC(31, 759, 0, 0) /* stfdux */
case AFMOVS:
- return int32(OPVCC(31, 663, 0, 0)) /* stfsx */
+ return OPVCC(31, 663, 0, 0) /* stfsx */
case AFMOVSU:
- return int32(OPVCC(31, 695, 0, 0)) /* stfsux */
+ return OPVCC(31, 695, 0, 0) /* stfsux */
case AMOVHZ, AMOVH:
- return int32(OPVCC(31, 407, 0, 0)) /* sthx */
+ return OPVCC(31, 407, 0, 0) /* sthx */
case AMOVHBR:
- return int32(OPVCC(31, 918, 0, 0)) /* sthbrx */
+ return OPVCC(31, 918, 0, 0) /* sthbrx */
case AMOVHZU, AMOVHU:
- return int32(OPVCC(31, 439, 0, 0)) /* sthux */
+ return OPVCC(31, 439, 0, 0) /* sthux */
case AMOVWZ, AMOVW:
- return int32(OPVCC(31, 151, 0, 0)) /* stwx */
+ return OPVCC(31, 151, 0, 0) /* stwx */
case AMOVWZU, AMOVWU:
- return int32(OPVCC(31, 183, 0, 0)) /* stwux */
+ return OPVCC(31, 183, 0, 0) /* stwux */
case ASTSW:
- return int32(OPVCC(31, 661, 0, 0)) /* stswx */
+ return OPVCC(31, 661, 0, 0) /* stswx */
case AMOVWBR:
- return int32(OPVCC(31, 662, 0, 0)) /* stwbrx */
+ return OPVCC(31, 662, 0, 0) /* stwbrx */
case ASTWCCC:
- return int32(OPVCC(31, 150, 0, 1)) /* stwcx. */
+ return OPVCC(31, 150, 0, 1) /* stwcx. */
case ASTDCCC:
- return int32(OPVCC(31, 214, 0, 1)) /* stwdx. */
+ return OPVCC(31, 214, 0, 1) /* stwdx. */
case AECOWX:
- return int32(OPVCC(31, 438, 0, 0)) /* ecowx */
+ return OPVCC(31, 438, 0, 0) /* ecowx */
case AMOVD:
- return int32(OPVCC(31, 149, 0, 0)) /* stdx */
+ return OPVCC(31, 149, 0, 0) /* stdx */
case AMOVDU:
- return int32(OPVCC(31, 181, 0, 0)) /* stdux */
+ return OPVCC(31, 181, 0, 0) /* stdux */
}
ctxt.Diag("unknown storex opcode %v", obj.Aconv(a))
autosize := int32(0)
var aoffset int
- var mov int
- var o int
+ var mov obj.As
var p1 *obj.Prog
var p2 *obj.Prog
for p := cursym.Text; p != nil; p = p.Link {
- o = int(p.As)
+ o := p.As
switch o {
case obj.ATEXT:
mov = AMOVD
q.To.Reg = REGTMP
q = obj.Appendp(ctxt, q)
- q.As = int16(mov)
+ q.As = mov
q.Lineno = p.Lineno
q.From.Type = obj.TYPE_REG
q.From.Reg = REGTMP
s.Text = firstp.Link
}
-func relinv(a int) int {
+func relinv(a obj.As) obj.As {
switch a {
case ABEQ:
return ABNE
func xfol(ctxt *obj.Link, p *obj.Prog, last **obj.Prog) {
var q *obj.Prog
var r *obj.Prog
- var a int
- var b int
+ var b obj.As
var i int
loop:
if p == nil {
return
}
- a = int(p.As)
+ a := p.As
if a == ABR {
q = p.Pcond
if (p.Mark&NOSCHED != 0) || q != nil && (q.Mark&NOSCHED != 0) {
break
}
b = 0 /* set */
- a = int(q.As)
+ a = q.As
if a == obj.ANOP {
i--
continue
if a == ABR || a == obj.ARET || a == ARFI || a == ARFCI || a == ARFID || a == AHRFID {
return
}
- r.As = int16(b)
+ r.As = b
r.Pcond = p.Link
r.Link = p.Pcond
if r.Link.Mark&FOLL == 0 {
a = ABR
q = ctxt.NewProg()
- q.As = int16(a)
+ q.As = a
q.Lineno = p.Lineno
q.To.Type = obj.TYPE_BRANCH
q.To.Offset = p.Pc
--- /dev/null
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !nacl
+
+package obj
+
+import (
+ "reflect"
+ "testing"
+ "unsafe"
+)
+
+// Assert that the size of important structures do not change unexpectedly.
+
+func TestSizeof(t *testing.T) {
+ const _64bit = unsafe.Sizeof(uintptr(0)) == 8
+
+ var tests = []struct {
+ val interface{} // type as a value
+ _32bit uintptr // size on 32bit platforms
+ _64bit uintptr // size on 64bit platforms
+ }{
+ {Addr{}, 52, 80},
+ {LSym{}, 92, 152},
+ {Prog{}, 196, 288},
+ }
+
+ for _, tt := range tests {
+ want := tt._32bit
+ if _64bit {
+ want = tt._64bit
+ }
+ got := reflect.TypeOf(tt.val).Size()
+ if want != got {
+ t.Errorf("unsafe.Sizeof(%T) = %d, want %d", tt.val, got, want)
+ }
+ }
+}
var buf bytes.Buffer
- fmt.Fprintf(&buf, "%.5d (%v)\t%v%s", p.Pc, p.Line(), Aconv(int(p.As)), sc)
+ fmt.Fprintf(&buf, "%.5d (%v)\t%v%s", p.Pc, p.Line(), Aconv(p.As), sc)
sep := "\t"
if p.From.Type != TYPE_NONE {
fmt.Fprintf(&buf, "%s%v", sep, Dconv(p, &p.From))
}
str = Rconv(int(a.Reg))
- if a.Name != TYPE_NONE || a.Sym != nil {
+ if a.Name != NAME_NONE || a.Sym != nil {
str = fmt.Sprintf("%v(%v)(REG)", Mconv(a), Rconv(int(a.Reg)))
}
return str
}
-/*
- Each architecture defines an instruction (A*) space as a unique
- integer range.
- Global opcodes like CALL start at 0; the architecture-specific ones
- start at a distinct, big-maskable offsets.
- Here is the list of architectures and the base of their opcode spaces.
-*/
-
-const (
- ABase386 = (1 + iota) << 12
- ABaseARM
- ABaseAMD64
- ABasePPC64
- ABaseARM64
- ABaseMIPS64
- AMask = 1<<12 - 1 // AND with this to use the opcode as an array index.
-)
-
type opSet struct {
- lo int
+ lo As
names []string
}
// RegisterOpcode binds a list of instruction names
// to a given instruction number range.
-func RegisterOpcode(lo int, Anames []string) {
+func RegisterOpcode(lo As, Anames []string) {
aSpace = append(aSpace, opSet{lo, Anames})
}
-func Aconv(a int) string {
- if 0 <= a && a < len(Anames) {
+func Aconv(a As) string {
+ if 0 <= a && int(a) < len(Anames) {
return Anames[a]
}
for i := range aSpace {
as := &aSpace[i]
- if as.lo <= a && a < as.lo+len(as.names) {
+ if as.lo <= a && int(a-as.lo) < len(as.names) {
return as.names[a-as.lo]
}
}
)
type Optab struct {
- as int16
+ as obj.As
ytab []ytab
prefix uint8
op [23]uint8
}
type Movtab struct {
- as int16
+ as obj.As
ft uint8
f3t uint8
tt uint8
return c + pad
}
-func spadjop(ctxt *obj.Link, p *obj.Prog, l int, q int) int {
+func spadjop(ctxt *obj.Link, p *obj.Prog, l, q obj.As) obj.As {
if p.Mode != 64 || ctxt.Arch.Ptrsize == 4 {
return l
}
p.To.Reg = REG_SP
v = int32(-p.From.Offset)
p.From.Offset = int64(v)
- p.As = int16(spadjop(ctxt, p, AADDL, AADDQ))
+ p.As = spadjop(ctxt, p, AADDL, AADDQ)
if v < 0 {
- p.As = int16(spadjop(ctxt, p, ASUBL, ASUBQ))
+ p.As = spadjop(ctxt, p, ASUBL, ASUBQ)
v = -v
p.From.Offset = int64(v)
}
p.To.Reg = REG_SP
v = int32(-p.From.Offset)
p.From.Offset = int64(v)
- p.As = int16(spadjop(ctxt, p, AADDL, AADDQ))
+ p.As = spadjop(ctxt, p, AADDL, AADDQ)
if v < 0 {
- p.As = int16(spadjop(ctxt, p, ASUBL, ASUBQ))
+ p.As = spadjop(ctxt, p, ASUBL, ASUBQ)
v = -v
p.From.Offset = int64(v)
}
}
func instinit() {
- var c int
-
for i := 1; optab[i].as != 0; i++ {
- c = int(optab[i].as)
+ c := optab[i].as
if opindex[c&obj.AMask] != nil {
log.Fatalf("phase error in optab: %d (%v)", i, obj.Aconv(c))
}
}
if p.As != ALEAQ && p.As != ALEAL {
- if p.From.Index != obj.TYPE_NONE && p.From.Scale > 0 {
+ if p.From.Index != REG_NONE && p.From.Scale > 0 {
nacltrunc(ctxt, int(p.From.Index))
}
- if p.To.Index != obj.TYPE_NONE && p.To.Scale > 0 {
+ if p.To.Index != REG_NONE && p.To.Scale > 0 {
nacltrunc(ctxt, int(p.To.Index))
}
}
// Rewrite p, if necessary, to access global data via the global offset table.
func rewriteToUseGot(ctxt *obj.Link, p *obj.Prog) {
- var add, lea, mov, reg int16
+ var add, lea, mov obj.As
+ var reg int16
if p.Mode == 64 {
add = AADDQ
lea = ALEAQ
// CMPQ SP, stackguard
p = obj.Appendp(ctxt, p)
- p.As = int16(cmp)
+ p.As = cmp
p.From.Type = obj.TYPE_REG
p.From.Reg = REG_SP
indir_cx(ctxt, p, &p.To)
// CMPQ AX, stackguard
p = obj.Appendp(ctxt, p)
- p.As = int16(lea)
+ p.As = lea
p.From.Type = obj.TYPE_MEM
p.From.Reg = REG_SP
p.From.Offset = -(int64(framesize) - obj.StackSmall)
p.To.Reg = REG_AX
p = obj.Appendp(ctxt, p)
- p.As = int16(cmp)
+ p.As = cmp
p.From.Type = obj.TYPE_REG
p.From.Reg = REG_AX
indir_cx(ctxt, p, &p.To)
p = obj.Appendp(ctxt, p)
- p.As = int16(mov)
+ p.As = mov
indir_cx(ctxt, p, &p.From)
p.From.Offset = 2 * int64(ctxt.Arch.Ptrsize) // G.stackguard0
if ctxt.Cursym.Cfunc != 0 {
p.To.Reg = REG_SI
p = obj.Appendp(ctxt, p)
- p.As = int16(cmp)
+ p.As = cmp
p.From.Type = obj.TYPE_REG
p.From.Reg = REG_SI
p.To.Type = obj.TYPE_CONST
q1 = p
p = obj.Appendp(ctxt, p)
- p.As = int16(lea)
+ p.As = lea
p.From.Type = obj.TYPE_MEM
p.From.Reg = REG_SP
p.From.Offset = obj.StackGuard
p.To.Reg = REG_AX
p = obj.Appendp(ctxt, p)
- p.As = int16(sub)
+ p.As = sub
p.From.Type = obj.TYPE_REG
p.From.Reg = REG_SI
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_AX
p = obj.Appendp(ctxt, p)
- p.As = int16(cmp)
+ p.As = cmp
p.From.Type = obj.TYPE_REG
p.From.Reg = REG_AX
p.To.Type = obj.TYPE_CONST
s.Text = firstp.Link
}
-func nofollow(a int) bool {
+func nofollow(a obj.As) bool {
switch a {
case obj.AJMP,
obj.ARET,
return false
}
-func pushpop(a int) bool {
+func pushpop(a obj.As) bool {
switch a {
case APUSHL,
APUSHFL,
return false
}
-func relinv(a int16) int16 {
+func relinv(a obj.As) obj.As {
switch a {
case AJEQ:
return AJNE
return AJOS
}
- log.Fatalf("unknown relation: %s", obj.Aconv(int(a)))
+ log.Fatalf("unknown relation: %s", obj.Aconv(a))
return 0
}
func xfol(ctxt *obj.Link, p *obj.Prog, last **obj.Prog) {
var q *obj.Prog
var i int
- var a int
+ var a obj.As
loop:
if p == nil {
if q == *last {
break
}
- a = int(q.As)
+ a = q.As
if a == obj.ANOP {
i--
continue
q.Mark |= DONE
(*last).Link = q
*last = q
- if int(q.As) != a || q.Pcond == nil || q.Pcond.Mark&DONE != 0 {
+ if q.As != a || q.Pcond == nil || q.Pcond.Mark&DONE != 0 {
continue
}
(*last).Link = p
*last = p
- a = int(p.As)
+ a = p.As
/* continue loop with what comes after p */
if nofollow(a) {
* expect conditional jump to be taken.
* rewrite so that's the fall-through case.
*/
- p.As = relinv(int16(a))
+ p.As = relinv(a)
q = p.Link
p.Link = p.Pcond
q = p.Link
if q.Mark&DONE != 0 {
if a != ALOOP {
- p.As = relinv(int16(a))
+ p.As = relinv(a)
p.Link = p.Pcond
p.Pcond = q
}
goto loop
}
-var unaryDst = map[int]bool{
+var unaryDst = map[obj.As]bool{
ABSWAPL: true,
ABSWAPQ: true,
ACMPXCHG8B: true,
}
func Addcall(ctxt *ld.Link, s *ld.LSym, t *ld.LSym) int64 {
- s.Reachable = true
+ s.Attr |= ld.AttrReachable
i := s.Size
s.Size += 4
ld.Symgrow(ctxt, s, s.Size)
// an init function
return
}
- addmoduledata.Reachable = true
+ addmoduledata.Attr |= ld.AttrReachable
initfunc := ld.Linklookup(ld.Ctxt, "go.link.addmoduledata", 0)
initfunc.Type = obj.STEXT
- initfunc.Local = true
- initfunc.Reachable = true
+ initfunc.Attr |= ld.AttrLocal
+ initfunc.Attr |= ld.AttrReachable
o := func(op ...uint8) {
for _, op1 := range op {
ld.Adduint8(ld.Ctxt, initfunc, op1)
}
ld.Ctxt.Etextp = initfunc
initarray_entry := ld.Linklookup(ld.Ctxt, "go.link.addmoduledatainit", 0)
- initarray_entry.Reachable = true
- initarray_entry.Local = true
+ initarray_entry.Attr |= ld.AttrReachable
+ initarray_entry.Attr |= ld.AttrLocal
initarray_entry.Type = obj.SINITARR
ld.Addaddr(ld.Ctxt, initarray_entry, initfunc)
}
const (
thechar = '6'
MaxAlign = 32 // max data alignment
+ MinAlign = 1 // min data alignment
FuncAlign = 16
)
ld.Thearch.Regsize = ld.Thelinkarch.Regsize
ld.Thearch.Funcalign = FuncAlign
ld.Thearch.Maxalign = MaxAlign
+ ld.Thearch.Minalign = MinAlign
ld.Thearch.Minlc = MINLC
ld.Thearch.Dwarfregsp = DWARFREGSP
ld.Thearch.Dwarfreglr = DWARFREGLR
// an init function
return
}
- addmoduledata.Reachable = true
+ addmoduledata.Attr |= ld.AttrReachable
initfunc := ld.Linklookup(ld.Ctxt, "go.link.addmoduledata", 0)
initfunc.Type = obj.STEXT
- initfunc.Local = true
- initfunc.Reachable = true
+ initfunc.Attr |= ld.AttrLocal
+ initfunc.Attr |= ld.AttrReachable
o := func(op uint32) {
ld.Adduint32(ld.Ctxt, initfunc, op)
}
}
ld.Ctxt.Etextp = initfunc
initarray_entry := ld.Linklookup(ld.Ctxt, "go.link.addmoduledatainit", 0)
- initarray_entry.Reachable = true
- initarray_entry.Local = true
+ initarray_entry.Attr |= ld.AttrReachable
+ initarray_entry.Attr |= ld.AttrLocal
initarray_entry.Type = obj.SINITARR
ld.Addaddr(ld.Ctxt, initarray_entry, initfunc)
}
r.Type = int32(typ)
r.Add = int64(sym.Got) - 8
- plt.Reachable = true
+ plt.Attr |= ld.AttrReachable
plt.Size += 4
ld.Symgrow(ctxt, plt, plt.Size)
const (
thechar = '5'
MaxAlign = 8 // max data alignment
+ MinAlign = 1 // min data alignment
FuncAlign = 4 // single-instruction alignment
MINLC = 4
)
ld.Thearch.Regsize = ld.Thelinkarch.Regsize
ld.Thearch.Funcalign = FuncAlign
ld.Thearch.Maxalign = MaxAlign
+ ld.Thearch.Minalign = MinAlign
ld.Thearch.Minlc = MINLC
ld.Thearch.Dwarfregsp = DWARFREGSP
ld.Thearch.Dwarfreglr = DWARFREGLR
// an init function
return
}
- addmoduledata.Reachable = true
+ addmoduledata.Attr |= ld.AttrReachable
initfunc := ld.Linklookup(ld.Ctxt, "go.link.addmoduledata", 0)
initfunc.Type = obj.STEXT
- initfunc.Local = true
- initfunc.Reachable = true
+ initfunc.Attr |= ld.AttrLocal
+ initfunc.Attr |= ld.AttrReachable
o := func(op uint32) {
ld.Adduint32(ld.Ctxt, initfunc, op)
}
}
ld.Ctxt.Etextp = initfunc
initarray_entry := ld.Linklookup(ld.Ctxt, "go.link.addmoduledatainit", 0)
- initarray_entry.Reachable = true
- initarray_entry.Local = true
+ initarray_entry.Attr |= ld.AttrReachable
+ initarray_entry.Attr |= ld.AttrLocal
initarray_entry.Type = obj.SINITARR
ld.Addaddr(ld.Ctxt, initarray_entry, initfunc)
}
// (https://sourceware.org/bugzilla/show_bug.cgi?id=18270). So
// we convert the adrp; ld64 + R_ARM64_GOTPCREL into adrp;
// add + R_ADDRARM64.
- if !(r.Sym.Version != 0 || (r.Sym.Type&obj.SHIDDEN != 0) || r.Sym.Local) && r.Sym.Type == obj.STEXT && ld.DynlinkingGo() {
+ if !(r.Sym.Version != 0 || (r.Sym.Type&obj.SHIDDEN != 0) || r.Sym.Attr.Local()) && r.Sym.Type == obj.STEXT && ld.DynlinkingGo() {
if o2&0xffc00000 != 0xf9400000 {
ld.Ctxt.Diag("R_ARM64_GOTPCREL against unexpected instruction %x", o2)
}
const (
thechar = '7'
MaxAlign = 32 // max data alignment
+ MinAlign = 1 // min data alignment
FuncAlign = 8
MINLC = 4
)
ld.Thearch.Regsize = ld.Thelinkarch.Regsize
ld.Thearch.Funcalign = FuncAlign
ld.Thearch.Maxalign = MaxAlign
+ ld.Thearch.Minalign = MinAlign
ld.Thearch.Minlc = MINLC
ld.Thearch.Dwarfregsp = DWARFREGSP
ld.Thearch.Dwarfreglr = DWARFREGLR
any := true
for any {
var load []uint64
- for s := Ctxt.Allsym; s != nil; s = s.Allsym {
+ for _, s := range Ctxt.Allsym {
for _, r := range s.R {
if r.Sym != nil && r.Sym.Type&obj.SMASK == obj.SXREF {
if off := armap[r.Sym.Name]; off != 0 && !loaded[off] {
if s.Type == 0 {
s.Type = obj.SDATA
}
- s.Reachable = true
+ s.Attr |= AttrReachable
if s.Size < off+wid {
s.Size = off + wid
Symgrow(ctxt, s, s.Size)
if s.Type == 0 {
s.Type = obj.SDATA
}
- s.Reachable = true
+ s.Attr |= AttrReachable
i := s.Size
s.Size += int64(ctxt.Arch.Ptrsize)
Symgrow(ctxt, s, s.Size)
if s.Type == 0 {
s.Type = obj.SDATA
}
- s.Reachable = true
+ s.Attr |= AttrReachable
i := s.Size
s.Size += 4
Symgrow(ctxt, s, s.Size)
if s.Type == 0 {
s.Type = obj.SDATA
}
- s.Reachable = true
+ s.Attr |= AttrReachable
if off+int64(ctxt.Arch.Ptrsize) > s.Size {
s.Size = off + int64(ctxt.Arch.Ptrsize)
Symgrow(ctxt, s, s.Size)
if s.Type == 0 {
s.Type = obj.SDATA
}
- s.Reachable = true
+ s.Attr |= AttrReachable
i := s.Size
s.Size += int64(ctxt.Arch.Ptrsize)
Symgrow(ctxt, s, s.Size)
if s.Type == 0 {
s.Type = obj.SDATA
}
- s.Reachable = true
+ s.Attr |= AttrReachable
i := s.Size
s.Size += 4
Symgrow(ctxt, s, s.Size)
Diag("unhandled relocation for %s (type %d rtype %d)", r.Sym.Name, r.Sym.Type, r.Type)
}
}
- if r.Sym != nil && r.Sym.Type != obj.STLSBSS && !r.Sym.Reachable {
+ if r.Sym != nil && r.Sym.Type != obj.STLSBSS && !r.Sym.Attr.Reachable() {
Diag("unreachable sym in relocation: %s %s", s.Name, r.Sym.Name)
}
if targ == nil {
continue
}
- if !targ.Reachable {
+ if !targ.Attr.Reachable() {
Diag("internal inconsistency: dynamic symbol %s is not reachable.", targ.Name)
}
if r.Sym.Plt == -2 && r.Sym.Got != -2 { // make dynimport JMP table for PE object files.
for ri := 0; ri < len(s.R); ri++ {
r = &s.R[ri]
if r.Sym != nil && r.Sym.Type == obj.SDYNIMPORT || r.Type >= 256 {
- if r.Sym != nil && !r.Sym.Reachable {
+ if r.Sym != nil && !r.Sym.Attr.Reachable() {
Diag("internal inconsistency: dynamic symbol %s is not reachable.", r.Sym.Name)
}
Thearch.Adddynrel(s, r)
var sym *LSym
for sym = Ctxt.Textp; sym != nil; sym = sym.Next {
- if !sym.Reachable {
+ if !sym.Attr.Reachable() {
continue
}
if sym.Value >= addr {
eaddr := addr + size
var q []byte
for ; sym != nil; sym = sym.Next {
- if !sym.Reachable {
+ if !sym.Attr.Reachable() {
continue
}
if sym.Value >= eaddr {
s := Linklookup(Ctxt, name, 0)
s.Size = 0
- s.Dupok = 1
- reachable := s.Reachable
+ s.Attr |= AttrDuplicateOK
+ reachable := s.Attr.Reachable()
Addaddr(Ctxt, s, sp)
adduintxx(Ctxt, s, uint64(len(value)), Thearch.Ptrsize)
// addstring, addaddr, etc., mark the symbols as reachable.
// In this case that is not necessarily true, so stick to what
// we know before entering this function.
- s.Reachable = reachable
+ s.Attr.Set(AttrReachable, reachable)
strdata = append(strdata, s)
- sp.Reachable = reachable
+ sp.Attr.Set(AttrReachable, reachable)
}
func checkstrdata() {
if s.Type == 0 {
s.Type = obj.SNOPTRDATA
}
- s.Reachable = true
+ s.Attr |= AttrReachable
r := int32(s.Size)
n := len(str) + 1
if s.Name == ".shstrtab" {
if sym.Type != obj.Sxxx {
Diag("duplicate symname in addgostring: %s", symname)
}
- sym.Reachable = true
- sym.Local = true
+ sym.Attr |= AttrReachable
+ sym.Attr |= AttrLocal
sym.Type = obj.SRODATA
sym.Size = int64(len(str))
sym.P = []byte(str)
sp := Linklookup(Ctxt, p, 0)
sp.Type = obj.SINITARR
sp.Size = 0
- sp.Dupok = 1
+ sp.Attr |= AttrDuplicateOK
Addaddr(Ctxt, sp, s)
}
func dosymtype() {
- for s := Ctxt.Allsym; s != nil; s = s.Allsym {
+ for _, s := range Ctxt.Allsym {
if len(s.P) > 0 {
if s.Type == obj.SBSS {
s.Type = obj.SDATA
}
}
+// symalign returns the required alignment for the given symbol s.
func symalign(s *LSym) int32 {
- if s.Align != 0 {
+ min := int32(Thearch.Minalign)
+ if s.Align >= min {
return s.Align
+ } else if s.Align != 0 {
+ return min
+ }
+ if strings.HasPrefix(s.Name, "go.string.") && !strings.HasPrefix(s.Name, "go.string.hdr.") {
+ // String data is just bytes.
+ // If we align it, we waste a lot of space to padding.
+ return 1
}
-
align := int32(Thearch.Maxalign)
- for int64(align) > s.Size && align > 1 {
+ for int64(align) > s.Size && align > min {
align >>= 1
}
- if align < s.Align {
- align = s.Align
- }
return align
}
var last *LSym
datap = nil
- for s := Ctxt.Allsym; s != nil; s = s.Allsym {
- if !s.Reachable || s.Special != 0 {
+ for _, s := range Ctxt.Allsym {
+ if !s.Attr.Reachable() || s.Attr.Special() {
continue
}
if obj.STEXT < s.Type && s.Type < obj.SXREF {
- if s.Onlist != 0 {
+ if s.Attr.OnList() {
log.Fatalf("symbol %s listed multiple times", s.Name)
}
- s.Onlist = 1
+ s.Attr |= AttrOnList
if last == nil {
datap = s
} else {
// when building a shared library. We do this by boosting objects of
// type SXXX with relocations to type SXXXRELRO.
for s := datap; s != nil; s = s.Next {
- if (s.Type >= obj.STYPE && s.Type <= obj.SFUNCTAB && len(s.R) > 0) || s.Type == obj.SGOSTRING {
+ if (s.Type >= obj.STYPE && s.Type <= obj.SFUNCTAB && len(s.R) > 0) || s.Type == obj.SGOSTRINGHDR {
s.Type += (obj.STYPERELRO - obj.STYPE)
if s.Outer != nil {
s.Outer.Type = s.Type
}
sym := Linklookup(Ctxt, "go.buildid", 0)
- sym.Reachable = true
+ sym.Attr |= AttrReachable
// The \xff is invalid UTF-8, meant to make it less likely
// to find one of these accidentally.
data := "\xff Go build ID: " + strconv.Quote(buildid) + "\n \xff"
xdefine("runtime.etypelink", obj.SRODATA, int64(typelink.Vaddr+typelink.Length))
sym := Linklookup(Ctxt, "runtime.gcdata", 0)
- sym.Local = true
+ sym.Attr |= AttrLocal
xdefine("runtime.egcdata", obj.SRODATA, Symaddr(sym)+sym.Size)
Linklookup(Ctxt, "runtime.egcdata", 0).Sect = sym.Sect
sym = Linklookup(Ctxt, "runtime.gcbss", 0)
- sym.Local = true
+ sym.Attr |= AttrLocal
xdefine("runtime.egcbss", obj.SRODATA, Symaddr(sym)+sym.Size)
Linklookup(Ctxt, "runtime.egcbss", 0).Sect = sym.Sect
}
}
-// commonsize returns the size of the common prefix for all type
-// structures (runtime._type).
-func commonsize() int {
- return 7*Thearch.Ptrsize + 8
-}
+func commonsize() int { return 6*Thearch.Ptrsize + 8 } // runtime._type
+func structfieldSize() int { return 5 * Thearch.Ptrsize } // runtime.structfield
+func uncommonSize() int { return 2*Thearch.Ptrsize + 2*Thearch.Intsize } // runtime.uncommontype
// Type.commonType.kind
func decodetype_kind(s *LSym) uint8 {
return int64(decode_inuxi(s.P[Thearch.Ptrsize:], Thearch.Ptrsize)) // 0x8 / 0x10
}
+// Type.commonType.tflag
+func decodetype_hasUncommon(s *LSym) bool {
+ const tflagUncommon = 1 // see ../../../../reflect/type.go:/^type.tflag
+ return s.P[2*Thearch.Ptrsize+4]&tflagUncommon != 0
+}
+
// Find the elf.Section of a given shared library that contains a given address.
func findShlibSection(path string, addr uint64) *elf.Section {
for _, shlib := range Ctxt.Shlibs {
}
// Type.FuncType.dotdotdot
-func decodetype_funcdotdotdot(s *LSym) int {
- return int(s.P[commonsize()])
+func decodetype_funcdotdotdot(s *LSym) bool {
+ return uint16(decode_inuxi(s.P[commonsize()+2:], 2))&(1<<15) != 0
}
-// Type.FuncType.in.length
+// Type.FuncType.inCount
func decodetype_funcincount(s *LSym) int {
- return int(decode_inuxi(s.P[commonsize()+2*Thearch.Ptrsize:], Thearch.Intsize))
+ return int(decode_inuxi(s.P[commonsize():], 2))
}
func decodetype_funcoutcount(s *LSym) int {
- return int(decode_inuxi(s.P[commonsize()+3*Thearch.Ptrsize+2*Thearch.Intsize:], Thearch.Intsize))
+ return int(uint16(decode_inuxi(s.P[commonsize()+2:], 2)) & (1<<15 - 1))
}
func decodetype_funcintype(s *LSym, i int) *LSym {
- r := decode_reloc(s, int32(commonsize())+int32(Thearch.Ptrsize))
- if r == nil {
- return nil
+ uadd := commonsize() + 4
+ if Thearch.Ptrsize == 8 {
+ uadd += 4
+ }
+ if decodetype_hasUncommon(s) {
+ uadd += uncommonSize()
}
- return decode_reloc_sym(r.Sym, int32(r.Add+int64(int32(i)*int32(Thearch.Ptrsize))))
+ return decode_reloc_sym(s, int32(uadd+i*Thearch.Ptrsize))
}
func decodetype_funcouttype(s *LSym, i int) *LSym {
- r := decode_reloc(s, int32(commonsize())+2*int32(Thearch.Ptrsize)+2*int32(Thearch.Intsize))
- if r == nil {
- return nil
- }
- return decode_reloc_sym(r.Sym, int32(r.Add+int64(int32(i)*int32(Thearch.Ptrsize))))
+ return decodetype_funcintype(s, i+decodetype_funcincount(s))
}
// Type.StructType.fields.Slice::length
return int(decode_inuxi(s.P[commonsize()+Thearch.Ptrsize:], Thearch.Intsize))
}
-func structfieldsize() int {
- return 5 * Thearch.Ptrsize
+func decodetype_structfieldarrayoff(s *LSym, i int) int {
+ off := commonsize() + Thearch.Ptrsize + 2*Thearch.Intsize
+ if decodetype_hasUncommon(s) {
+ off += uncommonSize()
+ }
+ off += i * structfieldSize()
+ return off
}
-// Type.StructType.fields[]-> name, typ and offset.
func decodetype_structfieldname(s *LSym, i int) string {
- // go.string."foo" 0x28 / 0x40
- s = decode_reloc_sym(s, int32(commonsize())+int32(Thearch.Ptrsize)+2*int32(Thearch.Intsize)+int32(i)*int32(structfieldsize()))
-
+ off := decodetype_structfieldarrayoff(s, i)
+ s = decode_reloc_sym(s, int32(off))
if s == nil { // embedded structs have a nil name.
return ""
}
if r == nil { // shouldn't happen.
return ""
}
- return cstring(r.Sym.P[r.Add:])
+ strlen := int64(decode_inuxi(s.P[Thearch.Ptrsize:], Thearch.Intsize))
+ return string(r.Sym.P[r.Add : r.Add+strlen])
}
func decodetype_structfieldtype(s *LSym, i int) *LSym {
- return decode_reloc_sym(s, int32(commonsize())+int32(Thearch.Ptrsize)+2*int32(Thearch.Intsize)+int32(i)*int32(structfieldsize())+2*int32(Thearch.Ptrsize))
+ off := decodetype_structfieldarrayoff(s, i)
+ return decode_reloc_sym(s, int32(off+2*Thearch.Ptrsize))
}
func decodetype_structfieldoffs(s *LSym, i int) int64 {
- return int64(decode_inuxi(s.P[commonsize()+Thearch.Ptrsize+2*Thearch.Intsize+i*structfieldsize()+4*Thearch.Ptrsize:], Thearch.Intsize))
+ off := decodetype_structfieldarrayoff(s, i)
+ return int64(decode_inuxi(s.P[off+4*Thearch.Ptrsize:], Thearch.Intsize))
}
// InterfaceType.methods.length
/*
* Debugging Information Entries and their attributes.
*/
-const (
- HASHSIZE = 107
-)
-
-func dwarfhashstr(s string) uint32 {
- h := uint32(0)
- for s != "" {
- h = h + h + h + uint32(s[0])
- s = s[1:]
- }
- return h % HASHSIZE
-}
// For DW_CLS_string and _block, value should contain the length, and
// data the data, for _reference, value is 0 and data is a DWDie* to
attr *DWAttr
// offset into .debug_info section, i.e relative to
// infoo. only valid after call to putdie()
- offs int64
- hash []*DWDie // optional index of children by name, enabled by mkindex()
- hlink *DWDie // bucket chain in parent's index
+ offs int64
+ hash map[string]*DWDie // optional index of DWAttr by name, enabled by mkindex()
}
/*
newattr(die, DW_AT_name, DW_CLS_STRING, int64(len(name)), name)
if parent.hash != nil {
- h := int(dwarfhashstr(name))
- die.hlink = parent.hash[h]
- parent.hash[h] = die
+ parent.hash[name] = die
}
return die
}
func mkindex(die *DWDie) {
- die.hash = make([]*DWDie, HASHSIZE)
+ die.hash = make(map[string]*DWDie)
}
func walktypedef(die *DWDie) *DWDie {
func find(die *DWDie, name string) *DWDie {
var prev *DWDie
for ; die != prev; prev, die = die, walktypedef(die) {
-
if die.hash == nil {
for a := die.child; a != nil; a = a.link {
if name == getattr(a, DW_AT_name).data {
}
continue
}
-
- h := int(dwarfhashstr(name))
- a := die.hash[h]
-
- if a == nil {
- continue
- }
-
- if name == getattr(a, DW_AT_name).data {
+ if a := die.hash[name]; a != nil {
return a
}
-
- // Move found ones to head of the list.
- for b := a.hlink; b != nil; b = b.hlink {
- if name == getattr(b, DW_AT_name).data {
- a.hlink = b.hlink
- b.hlink = die.hash[h]
- die.hash[h] = b
- return b
- }
- a = b
- }
}
return nil
}
newrefattr(fld, DW_AT_type, defgotype(s))
}
- if decodetype_funcdotdotdot(gotype) != 0 {
+ if decodetype_funcdotdotdot(gotype) {
newdie(die, DW_ABRV_DOTDOTDOT, "...")
}
nfields = decodetype_funcoutcount(gotype)
}
var (
- dt int
- offs int64
- varhash [HASHSIZE]*DWDie
+ dt, da int
+ offs int64
)
- da := 0
- dwfunc.hash = varhash[:] // enable indexing of children by name
- for a := s.Autom; a != nil; a = a.Link {
+ for _, a := range s.Autom {
switch a.Name {
case obj.A_AUTO:
dt = DW_ABRV_AUTO
da++
}
-
- dwfunc.hash = nil
}
flushunit(dwinfo, epc, epcs, unitstart, int32(headerend-unitstart-10))
sect = addmachodwarfsect(sect, ".debug_info")
infosym = Linklookup(Ctxt, ".debug_info", 0)
- infosym.Hidden = true
+ infosym.Attr |= AttrHidden
abbrevsym = Linklookup(Ctxt, ".debug_abbrev", 0)
- abbrevsym.Hidden = true
+ abbrevsym.Attr |= AttrHidden
linesym = Linklookup(Ctxt, ".debug_line", 0)
- linesym.Hidden = true
+ linesym.Attr |= AttrHidden
framesym = Linklookup(Ctxt, ".debug_frame", 0)
- framesym.Hidden = true
+ framesym.Attr |= AttrHidden
}
}
}
infosym = Linklookup(Ctxt, ".debug_info", 0)
- infosym.Hidden = true
+ infosym.Attr |= AttrHidden
abbrevsym = Linklookup(Ctxt, ".debug_abbrev", 0)
- abbrevsym.Hidden = true
+ abbrevsym.Attr |= AttrHidden
linesym = Linklookup(Ctxt, ".debug_line", 0)
- linesym.Hidden = true
+ linesym.Attr |= AttrHidden
framesym = Linklookup(Ctxt, ".debug_frame", 0)
- framesym.Hidden = true
+ framesym.Attr |= AttrHidden
}
}
"cmd/internal/obj"
"crypto/sha1"
"encoding/binary"
+ "encoding/hex"
"fmt"
"path/filepath"
"sort"
}
/* Taken directly from the definition document for ELF64 */
-func elfhash(name []byte) uint32 {
- var h uint32 = 0
- var g uint32
- for len(name) != 0 {
- h = (h << 4) + uint32(name[0])
- name = name[1:]
- g = h & 0xf0000000
- if g != 0 {
+func elfhash(name string) uint32 {
+ var h uint32
+ for i := 0; i < len(name); i++ {
+ h = (h << 4) + uint32(name[i])
+ if g := h & 0xf0000000; g != 0 {
h ^= g >> 24
}
h &= 0x0fffffff
}
-
return h
}
}
func addbuildinfo(val string) {
- var j int
-
- if val[0] != '0' || val[1] != 'x' {
+ if !strings.HasPrefix(val, "0x") {
Exitf("-B argument must start with 0x: %s", val)
}
ov := val
val = val[2:]
- i := 0
- var b int
- for val != "" {
- if len(val) == 1 {
- Exitf("-B argument must have even number of digits: %s", ov)
- }
- b = 0
- for j = 0; j < 2; j, val = j+1, val[1:] {
- b *= 16
- if val[0] >= '0' && val[0] <= '9' {
- b += int(val[0]) - '0'
- } else if val[0] >= 'a' && val[0] <= 'f' {
- b += int(val[0]) - 'a' + 10
- } else if val[0] >= 'A' && val[0] <= 'F' {
- b += int(val[0]) - 'A' + 10
- } else {
- Exitf("-B argument contains invalid hex digit %c: %s", val[0], ov)
- }
- }
+ const maxLen = 32
+ if hex.DecodedLen(len(val)) > maxLen {
+ Exitf("-B option too long (max %d digits): %s", maxLen, ov)
+ }
- const maxLen = 32
- if i >= maxLen {
- Exitf("-B option too long (max %d digits): %s", maxLen, ov)
+ b, err := hex.DecodeString(val)
+ if err != nil {
+ if err == hex.ErrLength {
+ Exitf("-B argument must have even number of digits: %s", ov)
}
-
- buildinfo = append(buildinfo, uint8(b))
- i++
+ if inv, ok := err.(hex.InvalidByteError); ok {
+ Exitf("-B argument contains invalid hex digit %c: %s", byte(inv), ov)
+ }
+ Exitf("-B argument contains invalid hex: %s", ov)
}
- buildinfo = buildinfo[:i]
+ buildinfo = b
}
// Build info note
nsym := Nelfsym
s := Linklookup(Ctxt, ".hash", 0)
s.Type = obj.SELFROSECT
- s.Reachable = true
+ s.Attr |= AttrReachable
i := nsym
nbucket := 1
buckets := make([]uint32, nbucket)
var b int
- var hc uint32
- var name string
- for sy := Ctxt.Allsym; sy != nil; sy = sy.Allsym {
+ for _, sy := range Ctxt.Allsym {
if sy.Dynid <= 0 {
continue
}
need[sy.Dynid] = addelflib(&needlib, sy.Dynimplib, sy.Dynimpvers)
}
- name = sy.Extname
- hc = elfhash([]byte(name))
+ name := sy.Extname
+ hc := elfhash(name)
b = int(hc % uint32(nbucket))
chain[sy.Dynid] = buckets[b]
i++
// aux struct
- Adduint32(Ctxt, s, elfhash([]byte(x.vers))) // hash
+ Adduint32(Ctxt, s, elfhash(x.vers)) // hash
Adduint16(Ctxt, s, 0) // flags
Adduint16(Ctxt, s, uint16(x.num)) // other - index we refer to this by
Adduint32(Ctxt, s, uint32(Addstring(dynstr, x.vers))) // version string offset
sect.Reloff = uint64(Cpos())
var sym *LSym
for sym = first; sym != nil; sym = sym.Next {
- if !sym.Reachable {
+ if !sym.Attr.Reachable() {
continue
}
if uint64(sym.Value) >= sect.Vaddr {
var r *Reloc
var ri int
for ; sym != nil; sym = sym.Next {
- if !sym.Reachable {
+ if !sym.Attr.Reachable() {
continue
}
if sym.Value >= int64(eaddr) {
func addgonote(sectionName string, tag uint32, desc []byte) {
s := Linklookup(Ctxt, sectionName, 0)
- s.Reachable = true
+ s.Attr |= AttrReachable
s.Type = obj.SELFROSECT
// namesz
Adduint32(Ctxt, s, uint32(len(ELF_NOTE_GO_NAME)))
shstrtab := Linklookup(Ctxt, ".shstrtab", 0)
shstrtab.Type = obj.SELFROSECT
- shstrtab.Reachable = true
+ shstrtab.Attr |= AttrReachable
Addstring(shstrtab, "")
Addstring(shstrtab, ".text")
s := Linklookup(Ctxt, ".dynsym", 0)
s.Type = obj.SELFROSECT
- s.Reachable = true
+ s.Attr |= AttrReachable
switch Thearch.Thechar {
case '0', '6', '7', '9':
s.Size += ELF64SYMSIZE
s = Linklookup(Ctxt, ".dynstr", 0)
s.Type = obj.SELFROSECT
- s.Reachable = true
+ s.Attr |= AttrReachable
if s.Size == 0 {
Addstring(s, "")
}
default:
s = Linklookup(Ctxt, ".rel", 0)
}
- s.Reachable = true
+ s.Attr |= AttrReachable
s.Type = obj.SELFROSECT
/* global offset table */
s = Linklookup(Ctxt, ".got", 0)
- s.Reachable = true
+ s.Attr |= AttrReachable
s.Type = obj.SELFGOT // writable
/* ppc64 glink resolver */
if Thearch.Thechar == '9' {
s := Linklookup(Ctxt, ".glink", 0)
- s.Reachable = true
+ s.Attr |= AttrReachable
s.Type = obj.SELFRXSECT
}
/* hash */
s = Linklookup(Ctxt, ".hash", 0)
- s.Reachable = true
+ s.Attr |= AttrReachable
s.Type = obj.SELFROSECT
s = Linklookup(Ctxt, ".got.plt", 0)
- s.Reachable = true
+ s.Attr |= AttrReachable
s.Type = obj.SELFSECT // writable
s = Linklookup(Ctxt, ".plt", 0)
- s.Reachable = true
+ s.Attr |= AttrReachable
if Thearch.Thechar == '9' {
// In the ppc64 ABI, .plt is a data section
// written by the dynamic linker.
default:
s = Linklookup(Ctxt, ".rel.plt", 0)
}
- s.Reachable = true
+ s.Attr |= AttrReachable
s.Type = obj.SELFROSECT
s = Linklookup(Ctxt, ".gnu.version", 0)
- s.Reachable = true
+ s.Attr |= AttrReachable
s.Type = obj.SELFROSECT
s = Linklookup(Ctxt, ".gnu.version_r", 0)
- s.Reachable = true
+ s.Attr |= AttrReachable
s.Type = obj.SELFROSECT
/* define dynamic elf table */
s = Linklookup(Ctxt, ".dynamic", 0)
- s.Reachable = true
+ s.Attr |= AttrReachable
s.Type = obj.SELFSECT // writable
/*
// The go.link.abihashbytes symbol will be pointed at the appropriate
// part of the .note.go.abihash section in data.go:func address().
s := Linklookup(Ctxt, "go.link.abihashbytes", 0)
- s.Local = true
+ s.Attr |= AttrLocal
s.Type = obj.SRODATA
- s.Special = 1
- s.Reachable = true
+ s.Attr |= AttrSpecial
+ s.Attr |= AttrReachable
s.Size = int64(sha1.Size)
sort.Sort(byPkg(Ctxt.Library))
/* type */
t := STB_GLOBAL << 4
- if s.Cgoexport != 0 && s.Type&obj.SMASK == obj.STEXT {
+ if s.Attr.CgoExport() && s.Type&obj.SMASK == obj.STEXT {
t |= STT_FUNC
} else {
t |= STT_OBJECT
/* size of object */
Adduint64(ctxt, d, uint64(s.Size))
- if Thearch.Thechar == '6' && s.Cgoexport&CgoExportDynamic == 0 && s.Dynimplib != "" && !seenlib[s.Dynimplib] {
+ if Thearch.Thechar == '6' && !s.Attr.CgoExportDynamic() && s.Dynimplib != "" && !seenlib[s.Dynimplib] {
Elfwritedynent(Linklookup(ctxt, ".dynamic", 0), DT_NEEDED, uint64(Addstring(Linklookup(ctxt, ".dynstr", 0), s.Dynimplib)))
}
} else {
t := STB_GLOBAL << 4
// TODO(mwhudson): presumably the behaviour should actually be the same on both arm and 386.
- if Thearch.Thechar == '8' && s.Cgoexport != 0 && s.Type&obj.SMASK == obj.STEXT {
+ if Thearch.Thechar == '8' && s.Attr.CgoExport() && s.Type&obj.SMASK == obj.STEXT {
t |= STT_FUNC
- } else if Thearch.Thechar == '5' && s.Cgoexport&CgoExportDynamic != 0 && s.Type&obj.SMASK == obj.STEXT {
+ } else if Thearch.Thechar == '5' && s.Attr.CgoExportDynamic() && s.Type&obj.SMASK == obj.STEXT {
t |= STT_FUNC
} else {
t |= STT_OBJECT
s.Type = 0
}
- if s.Cgoexport == 0 {
+ if !s.Attr.CgoExport() {
s.Extname = remote
dynexp = append(dynexp, s)
} else if s.Extname != remote {
}
if f[0] == "cgo_export_static" {
- s.Cgoexport |= CgoExportStatic
+ s.Attr |= AttrCgoExportStatic
} else {
- s.Cgoexport |= CgoExportDynamic
+ s.Attr |= AttrCgoExportDynamic
}
if local != f[1] {
}
}
}
-var markq *LSym
-
-var emarkq *LSym
+var markQueue []*LSym
func mark1(s *LSym, parent *LSym) {
- if s == nil || s.Reachable {
- return
- }
- if strings.HasPrefix(s.Name, "go.weak.") {
+ if s == nil || s.Attr.Reachable() {
return
}
- s.Reachable = true
+ s.Attr |= AttrReachable
s.Reachparent = parent
- if markq == nil {
- markq = s
- } else {
- emarkq.Queue = s
- }
- emarkq = s
+ markQueue = append(markQueue, s)
}
func mark(s *LSym) {
mark1(s, nil)
}
+// markflood makes the dependencies of any reachable symable also reachable.
func markflood() {
- var a *Auto
- var i int
-
- for s := markq; s != nil; s = s.Queue {
+ for len(markQueue) > 0 {
+ s := markQueue[0]
+ markQueue = markQueue[1:]
if s.Type == obj.STEXT {
if Debug['v'] > 1 {
fmt.Fprintf(&Bso, "marktext %s\n", s.Name)
}
- for a = s.Autom; a != nil; a = a.Link {
+ for _, a := range s.Autom {
mark1(a.Gotype, s)
}
}
-
- for i = 0; i < len(s.R); i++ {
+ for i := 0; i < len(s.R); i++ {
mark1(s.R[i].Sym, s)
}
if s.Pcln != nil {
- for i = 0; i < s.Pcln.Nfuncdata; i++ {
+ for i := 0; i < s.Pcln.Nfuncdata; i++ {
mark1(s.Pcln.Funcdata[i], s)
}
}
-
mark1(s.Gotype, s)
mark1(s.Sub, s)
mark1(s.Outer, s)
if Buildmode == BuildmodeShared {
// Mark all symbols defined in this library as reachable when
// building a shared library.
- for s := Ctxt.Allsym; s != nil; s = s.Allsym {
+ for _, s := range Ctxt.Allsym {
if s.Type != 0 && s.Type != obj.SDYNIMPORT {
mark(s)
}
markflood()
// keep each beginning with 'typelink.' if the symbol it points at is being kept.
- for s := Ctxt.Allsym; s != nil; s = s.Allsym {
+ for _, s := range Ctxt.Allsym {
if strings.HasPrefix(s.Name, "go.typelink.") {
- s.Reachable = len(s.R) == 1 && s.R[0].Sym.Reachable
+ s.Attr.Set(AttrReachable, len(s.R) == 1 && s.R[0].Sym.Attr.Reachable())
}
}
var last *LSym
for s := Ctxt.Textp; s != nil; s = s.Next {
- if !s.Reachable {
+ if !s.Attr.Reachable() {
continue
}
}
}
- for s := Ctxt.Allsym; s != nil; s = s.Allsym {
- if strings.HasPrefix(s.Name, "go.weak.") {
- s.Special = 1 // do not lay out in data segment
- s.Reachable = true
- s.Hidden = true
- }
- }
-
// record field tracking references
var buf bytes.Buffer
- var p *LSym
- for s := Ctxt.Allsym; s != nil; s = s.Allsym {
+ for _, s := range Ctxt.Allsym {
if strings.HasPrefix(s.Name, "go.track.") {
- s.Special = 1 // do not lay out in data segment
- s.Hidden = true
- if s.Reachable {
+ s.Attr |= AttrSpecial // do not lay out in data segment
+ s.Attr |= AttrHidden
+ if s.Attr.Reachable() {
buf.WriteString(s.Name[9:])
- for p = s.Reachparent; p != nil; p = p.Reachparent {
+ for p := s.Reachparent; p != nil; p = p.Reachparent {
buf.WriteString("\t")
buf.WriteString(p.Name)
}
return
}
s := Linklookup(Ctxt, tracksym, 0)
- if !s.Reachable {
+ if !s.Attr.Reachable() {
return
}
addstrdata(tracksym, buf.String())
}
-func doweak() {
- var t *LSym
-
- // resolve weak references only if
- // target symbol will be in binary anyway.
- for s := Ctxt.Allsym; s != nil; s = s.Allsym {
- if strings.HasPrefix(s.Name, "go.weak.") {
- t = Linkrlookup(Ctxt, s.Name[8:], int(s.Version))
- if t != nil && t.Type != 0 && t.Reachable {
- s.Value = t.Value
- s.Type = t.Type
- s.Outer = t
- } else {
- s.Type = obj.SCONST
- s.Value = 0
- }
-
- continue
- }
- }
-}
-
func addexport() {
if HEADTYPE == obj.Hdarwin {
return
s = sym.sym
if s.Outer != nil {
- if s.Dupok != 0 {
+ if s.Attr.DuplicateOK() {
continue
}
Exitf("%s: duplicate symbol reference: %s in both %s and %s", pn, s.Name, s.Outer.Name, sect.sym.Name)
s.Sub = sect.sym.Sub
sect.sym.Sub = s
s.Type = sect.sym.Type | s.Type&^obj.SMASK | obj.SSUB
- if s.Cgoexport&CgoExportDynamic == 0 {
+ if !s.Attr.CgoExportDynamic() {
s.Dynimplib = "" // satisfy dynimport
}
s.Value = int64(sym.value)
s.Size = int64(sym.size)
s.Outer = sect.sym
if sect.sym.Type == obj.STEXT {
- if s.External != 0 && s.Dupok == 0 {
+ if s.Attr.External() && !s.Attr.DuplicateOK() {
Diag("%s: duplicate definition of %s", pn, s.Name)
}
- s.External = 1
+ s.Attr |= AttrExternal
}
if elfobj.machine == ElfMachPower64 {
s.Sub = listsort(s.Sub, valuecmp, listsubp)
}
if s.Type == obj.STEXT {
- if s.Onlist != 0 {
+ if s.Attr.OnList() {
log.Fatalf("symbol %s listed multiple times", s.Name)
}
- s.Onlist = 1
+ s.Attr |= AttrOnList
if Ctxt.Etextp != nil {
Ctxt.Etextp.Next = s
} else {
}
Ctxt.Etextp = s
for s = s.Sub; s != nil; s = s.Sub {
- if s.Onlist != 0 {
+ if s.Attr.OnList() {
log.Fatalf("symbol %s listed multiple times", s.Name)
}
- s.Onlist = 1
+ s.Attr |= AttrOnList
Ctxt.Etextp.Next = s
Ctxt.Etextp = s
}
// comment #5 for details.
if s != nil && sym.other == 2 {
s.Type |= obj.SHIDDEN
- s.Dupok = 1
+ s.Attr |= AttrDuplicateOK
}
}
}
s = Linklookup(Ctxt, name, v)
if sym.type_&N_EXT == 0 {
- s.Dupok = 1
+ s.Attr |= AttrDuplicateOK
}
sym.sym = s
if sym.sectnum == 0 { // undefined
}
if s.Outer != nil {
- if s.Dupok != 0 {
+ if s.Attr.DuplicateOK() {
continue
}
Exitf("%s: duplicate symbol reference: %s in both %s and %s", pn, s.Name, s.Outer.Name, sect.sym.Name)
outer.Sub = s
s.Outer = outer
s.Value = int64(sym.value - sect.addr)
- if s.Cgoexport&CgoExportDynamic == 0 {
+ if !s.Attr.CgoExportDynamic() {
s.Dynimplib = "" // satisfy dynimport
}
if outer.Type == obj.STEXT {
- if s.External != 0 && s.Dupok == 0 {
+ if s.Attr.External() && !s.Attr.DuplicateOK() {
Diag("%s: duplicate definition of %s", pn, s.Name)
}
- s.External = 1
+ s.Attr |= AttrExternal
}
sym.sym = s
}
if s.Type == obj.STEXT {
- if s.Onlist != 0 {
+ if s.Attr.OnList() {
log.Fatalf("symbol %s listed multiple times", s.Name)
}
- s.Onlist = 1
+ s.Attr |= AttrOnList
if Ctxt.Etextp != nil {
Ctxt.Etextp.Next = s
} else {
}
Ctxt.Etextp = s
for s1 = s.Sub; s1 != nil; s1 = s1.Sub {
- if s1.Onlist != 0 {
+ if s1.Attr.OnList() {
log.Fatalf("symbol %s listed multiple times", s1.Name)
}
- s1.Onlist = 1
+ s1.Attr |= AttrOnList
Ctxt.Etextp.Next = s1
Ctxt.Etextp = s1
}
}
if s.Outer != nil {
- if s.Dupok != 0 {
+ if s.Attr.DuplicateOK() {
continue
}
Exitf("%s: duplicate symbol reference: %s in both %s and %s", pn, s.Name, s.Outer.Name, sect.sym.Name)
s.Size = 4
s.Outer = sect.sym
if sect.sym.Type == obj.STEXT {
- if s.External != 0 && s.Dupok == 0 {
+ if s.Attr.External() && !s.Attr.DuplicateOK() {
Diag("%s: duplicate definition of %s", pn, s.Name)
}
- s.External = 1
+ s.Attr |= AttrExternal
}
}
s.Sub = listsort(s.Sub, valuecmp, listsubp)
}
if s.Type == obj.STEXT {
- if s.Onlist != 0 {
+ if s.Attr.OnList() {
log.Fatalf("symbol %s listed multiple times", s.Name)
}
- s.Onlist = 1
+ s.Attr |= AttrOnList
if Ctxt.Etextp != nil {
Ctxt.Etextp.Next = s
} else {
}
Ctxt.Etextp = s
for s = s.Sub; s != nil; s = s.Sub {
- if s.Onlist != 0 {
+ if s.Attr.OnList() {
log.Fatalf("symbol %s listed multiple times", s.Name)
}
- s.Onlist = 1
+ s.Attr |= AttrOnList
Ctxt.Etextp.Next = s
Ctxt.Etextp = s
}
case IMAGE_SYM_CLASS_NULL, IMAGE_SYM_CLASS_STATIC, IMAGE_SYM_CLASS_LABEL:
s = Linklookup(Ctxt, name, Ctxt.Version)
- s.Dupok = 1
+ s.Attr |= AttrDuplicateOK
default:
err = fmt.Errorf("%s: invalid symbol binding %d", sym.name, sym.sclass)
Regsize int
Funcalign int
Maxalign int
+ Minalign int
Minlc int
Dwarfregsp int
Dwarfreglr int
liveness int64
)
-// for dynexport field of LSym
-const (
- CgoExportDynamic = 1 << 0
- CgoExportStatic = 1 << 1
-)
-
var (
Segtext Segment
Segrodata Segment
switch Buildmode {
case BuildmodeCShared:
s := Linklookup(Ctxt, "runtime.islibrary", 0)
- s.Dupok = 1
+ s.Attr |= AttrDuplicateOK
Adduint8(Ctxt, s, 1)
case BuildmodeCArchive:
s := Linklookup(Ctxt, "runtime.isarchive", 0)
- s.Dupok = 1
+ s.Attr |= AttrDuplicateOK
Adduint8(Ctxt, s, 1)
}
if Linkmode == LinkInternal {
// Drop all the cgo_import_static declarations.
// Turns out we won't be needing them.
- for s := Ctxt.Allsym; s != nil; s = s.Allsym {
+ for _, s := range Ctxt.Allsym {
if s.Type == obj.SHOSTOBJ {
// If a symbol was marked both
// cgo_import_static and cgo_import_dynamic,
// then we want to make it cgo_import_dynamic
// now.
- if s.Extname != "" && s.Dynimplib != "" && s.Cgoexport == 0 {
+ if s.Extname != "" && s.Dynimplib != "" && !s.Attr.CgoExport() {
s.Type = obj.SDYNIMPORT
} else {
s.Type = 0
} else if tlsg.Type != obj.SDYNIMPORT {
Diag("internal error: runtime declared tlsg variable %d", tlsg.Type)
}
- tlsg.Reachable = true
+ tlsg.Attr |= AttrReachable
Ctxt.Tlsg = tlsg
moduledata := Linklookup(Ctxt, "runtime.firstmoduledata", 0)
// If OTOH the module does not contain the runtime package,
// create a local symbol for the moduledata.
moduledata = Linklookup(Ctxt, "local.moduledata", 0)
- moduledata.Local = true
+ moduledata.Attr |= AttrLocal
}
// In all cases way we mark the moduledata as noptrdata to hide it from
// the GC.
moduledata.Type = obj.SNOPTRDATA
- moduledata.Reachable = true
+ moduledata.Attr |= AttrReachable
Ctxt.Moduledata = moduledata
// Now that we know the link mode, trim the dynexp list.
- x := CgoExportDynamic
+ x := AttrCgoExportDynamic
if Linkmode == LinkExternal {
- x = CgoExportStatic
+ x = AttrCgoExportStatic
}
w := 0
for i := 0; i < len(dynexp); i++ {
- if int(dynexp[i].Cgoexport)&x != 0 {
+ if dynexp[i].Attr&x != 0 {
dynexp[w] = dynexp[i]
w++
}
// If we have any undefined symbols in external
// objects, try to read them from the libgcc file.
any := false
- for s := Ctxt.Allsym; s != nil; s = s.Allsym {
+ for _, s := range Ctxt.Allsym {
for _, r := range s.R {
if r.Sym != nil && r.Sym.Type&obj.SMASK == obj.SXREF && r.Sym.Name != ".got" {
any = true
continue
}
- if s.Nosplit != 0 {
+ if s.Attr.NoSplit() {
Ctxt.Cursym = s
ch.sym = s
stkcheck(&ch, 0)
}
for s := Ctxt.Textp; s != nil; s = s.Next {
- if s.Nosplit == 0 {
+ if !s.Attr.NoSplit() {
Ctxt.Cursym = s
ch.sym = s
stkcheck(&ch, 0)
// function at top of safe zone once.
top := limit == obj.StackLimit-callsize()
if top {
- if s.Stkcheck != 0 {
+ if s.Attr.StackCheck() {
return 0
}
- s.Stkcheck = 1
+ s.Attr |= AttrStackCheck
}
if depth > 100 {
return -1
}
- if s.External != 0 || s.Pcln == nil {
+ if s.Attr.External() || s.Pcln == nil {
// external function.
// should never be called directly.
// only diagnose the direct caller.
var ch Chain
ch.up = up
- if s.Nosplit == 0 {
+ if !s.Attr.NoSplit() {
// Ensure we have enough stack to call morestack.
ch.limit = limit - callsize()
ch.sym = morestack
if ch.sym != nil {
name = ch.sym.Name
- if ch.sym.Nosplit != 0 {
+ if ch.sym.Attr.NoSplit() {
name += " (nosplit)"
}
} else {
if ch.up == nil {
// top of chain. ch->sym != nil.
- if ch.sym.Nosplit != 0 {
+ if ch.sym.Attr.NoSplit() {
fmt.Printf("\t%d\tassumed on entry to %s\n", ch.limit, name)
} else {
fmt.Printf("\t%d\tguaranteed after split check in %s\n", ch.limit, name)
put(s, s.Name, 'T', s.Value, s.Size, int(s.Version), nil)
}
- for s := Ctxt.Allsym; s != nil; s = s.Allsym {
- if s.Hidden || ((s.Name == "" || s.Name[0] == '.') && s.Version == 0 && s.Name != ".rathole" && s.Name != ".TOC.") {
+ for _, s := range Ctxt.Allsym {
+ if s.Attr.Hidden() {
+ continue
+ }
+ if (s.Name == "" || s.Name[0] == '.') && s.Version == 0 && s.Name != ".rathole" && s.Name != ".TOC." {
continue
}
switch s.Type & obj.SMASK {
obj.STYPE,
obj.SSTRING,
obj.SGOSTRING,
+ obj.SGOSTRINGHDR,
obj.SGOFUNC,
obj.SGCBITS,
obj.STYPERELRO,
obj.SSTRINGRELRO,
obj.SGOSTRINGRELRO,
+ obj.SGOSTRINGHDRRELRO,
obj.SGOFUNCRELRO,
obj.SGCBITSRELRO,
obj.SRODATARELRO,
obj.STYPELINK,
obj.SWINDOWS:
- if !s.Reachable {
+ if !s.Attr.Reachable() {
continue
}
put(s, s.Name, 'D', Symaddr(s), s.Size, int(s.Version), s.Gotype)
case obj.SBSS, obj.SNOPTRBSS:
- if !s.Reachable {
+ if !s.Attr.Reachable() {
continue
}
if len(s.P) > 0 {
- Diag("%s should not be bss (size=%d type=%d special=%d)", s.Name, int(len(s.P)), s.Type, s.Special)
+ Diag("%s should not be bss (size=%d type=%d special=%v)", s.Name, int(len(s.P)), s.Type, s.Attr.Special())
}
put(s, s.Name, 'B', Symaddr(s), s.Size, int(s.Version), s.Gotype)
}
case obj.SDYNIMPORT:
- if !s.Reachable {
+ if !s.Attr.Reachable() {
continue
}
put(s, s.Extname, 'U', 0, 0, int(s.Version), nil)
}
}
- var a *Auto
var off int32
for s := Ctxt.Textp; s != nil; s = s.Next {
put(s, s.Name, 'T', s.Value, s.Size, int(s.Version), s.Gotype)
// NOTE(ality): acid can't produce a stack trace without .frame symbols
put(nil, ".frame", 'm', int64(s.Locals)+int64(Thearch.Ptrsize), 0, 0, nil)
- for a = s.Autom; a != nil; a = a.Link {
+ for _, a := range s.Autom {
// Emit a or p according to actual offset, even if label is wrong.
// This avoids negative offsets, which cannot be encoded.
if a.Name != obj.A_AUTO && a.Name != obj.A_PARAM {
}
func Symaddr(s *LSym) int64 {
- if !s.Reachable {
+ if !s.Attr.Reachable() {
Diag("unreachable symbol in symaddr - %s", s.Name)
}
return s.Value
s := Linklookup(Ctxt, p, 0)
s.Type = int16(t)
s.Value = v
- s.Reachable = true
- s.Special = 1
- s.Local = true
+ s.Attr |= AttrReachable
+ s.Attr |= AttrSpecial
+ s.Attr |= AttrLocal
}
func datoff(addr int64) int64 {
if r.Sym.Type == obj.Sxxx || r.Sym.Type == obj.SXREF {
Diag("undefined: %s", r.Sym.Name)
}
- if !r.Sym.Reachable {
+ if !r.Sym.Attr.Reachable() {
Diag("use of unreachable symbol: %s", r.Sym.Name)
}
}
}
}
-func checkgo() {
- if Debug['C'] == 0 {
- return
- }
-
- // TODO(rsc,khr): Eventually we want to get to no Go-called C functions at all,
- // which would simplify this logic quite a bit.
-
- // Mark every Go-called C function with cfunc=2, recursively.
- var changed int
- var i int
- var r *Reloc
- var s *LSym
- for {
- changed = 0
- for s = Ctxt.Textp; s != nil; s = s.Next {
- if s.Cfunc == 0 || (s.Cfunc == 2 && s.Nosplit != 0) {
- for i = 0; i < len(s.R); i++ {
- r = &s.R[i]
- if r.Sym == nil {
- continue
- }
- if (r.Type == obj.R_CALL || r.Type == obj.R_CALLARM) && r.Sym.Type == obj.STEXT {
- if r.Sym.Cfunc == 1 {
- changed = 1
- r.Sym.Cfunc = 2
- }
- }
- }
- }
- }
- if changed == 0 {
- break
- }
- }
-
- // Complain about Go-called C functions that can split the stack
- // (that can be preempted for garbage collection or trigger a stack copy).
- for s := Ctxt.Textp; s != nil; s = s.Next {
- if s.Cfunc == 0 || (s.Cfunc == 2 && s.Nosplit != 0) {
- for i = 0; i < len(s.R); i++ {
- r = &s.R[i]
- if r.Sym == nil {
- continue
- }
- if (r.Type == obj.R_CALL || r.Type == obj.R_CALLARM) && r.Sym.Type == obj.STEXT {
- if s.Cfunc == 0 && r.Sym.Cfunc == 2 && r.Sym.Nosplit == 0 {
- fmt.Printf("Go %s calls C %s\n", s.Name, r.Sym.Name)
- } else if s.Cfunc == 2 && s.Nosplit != 0 && r.Sym.Nosplit == 0 {
- fmt.Printf("Go calls C %s calls %s\n", s.Name, r.Sym.Name)
- }
- }
- }
- }
- }
-}
-
func Rnd(v int64, r int64) int64 {
if r <= 0 {
return v
)
type LSym struct {
- Name string
- Extname string
- Type int16
- Version int16
- Dupok uint8
- Cfunc uint8
- External uint8
- Nosplit uint8
- Reachable bool
- Cgoexport uint8
- Special uint8
- Stkcheck uint8
- Hidden bool
- Leaf uint8
- Localentry uint8
- Onlist uint8
- // ElfType is set for symbols read from shared libraries by ldshlibsyms. It
- // is not set for symbols defined by the packages being linked or by symbols
- // read by ldelf (and so is left as elf.STT_NOTYPE).
- ElfType elf.SymType
+ Name string
+ Extname string
+ Type int16
+ Version int16
+ Attr Attribute
+ Localentry uint8
Dynid int32
Plt int32
Got int32
Locals int32
Value int64
Size int64
- Allsym *LSym
+ // ElfType is set for symbols read from shared libraries by ldshlibsyms. It
+ // is not set for symbols defined by the packages being linked or by symbols
+ // read by ldelf (and so is left as elf.STT_NOTYPE).
+ ElfType elf.SymType
Next *LSym
Sub *LSym
Outer *LSym
Gotype *LSym
Reachparent *LSym
- Queue *LSym
File string
Dynimplib string
Dynimpvers string
Sect *Section
- Autom *Auto
+ Autom []Auto
Pcln *Pcln
P []byte
R []Reloc
- Local bool
}
func (s *LSym) String() string {
}
}
+// Attribute is a set of common symbol attributes.
+type Attribute int16
+
+const (
+ AttrDuplicateOK Attribute = 1 << iota
+ AttrExternal
+ AttrNoSplit
+ AttrReachable
+ AttrCgoExportDynamic
+ AttrCgoExportStatic
+ AttrSpecial
+ AttrStackCheck
+ AttrHidden
+ AttrOnList
+ AttrLocal
+)
+
+func (a Attribute) DuplicateOK() bool { return a&AttrDuplicateOK != 0 }
+func (a Attribute) External() bool { return a&AttrExternal != 0 }
+func (a Attribute) NoSplit() bool { return a&AttrNoSplit != 0 }
+func (a Attribute) Reachable() bool { return a&AttrReachable != 0 }
+func (a Attribute) CgoExportDynamic() bool { return a&AttrCgoExportDynamic != 0 }
+func (a Attribute) CgoExportStatic() bool { return a&AttrCgoExportStatic != 0 }
+func (a Attribute) Special() bool { return a&AttrSpecial != 0 }
+func (a Attribute) StackCheck() bool { return a&AttrStackCheck != 0 }
+func (a Attribute) Hidden() bool { return a&AttrHidden != 0 }
+func (a Attribute) OnList() bool { return a&AttrOnList != 0 }
+func (a Attribute) Local() bool { return a&AttrLocal != 0 }
+
+func (a Attribute) CgoExport() bool {
+ return a.CgoExportDynamic() || a.CgoExportStatic()
+}
+
+func (a *Attribute) Set(flag Attribute, value bool) {
+ if value {
+ *a |= flag
+ } else {
+ *a &= ^flag
+ }
+}
+
type Reloc struct {
Off int32
Siz uint8
type Auto struct {
Asym *LSym
- Link *Auto
+ Gotype *LSym
Aoffset int32
Name int16
- Gotype *LSym
}
type Shlib struct {
Windows int32
Goroot string
Hash map[symVer]*LSym
- Allsym *LSym
+ Allsym []*LSym
Nsymbol int32
Tlsg *LSym
Libdir []string
s := Linklookup(Ctxt, ".machosymstr", 0)
s.Type = obj.SMACHOSYMSTR
- s.Reachable = true
+ s.Attr |= AttrReachable
Adduint8(Ctxt, s, ' ')
Adduint8(Ctxt, s, '\x00')
s = Linklookup(Ctxt, ".machosymtab", 0)
s.Type = obj.SMACHOSYMTAB
- s.Reachable = true
+ s.Attr |= AttrReachable
if Linkmode != LinkExternal {
s := Linklookup(Ctxt, ".plt", 0) // will be __symbol_stub
s.Type = obj.SMACHOPLT
- s.Reachable = true
+ s.Attr |= AttrReachable
s = Linklookup(Ctxt, ".got", 0) // will be __nl_symbol_ptr
s.Type = obj.SMACHOGOT
- s.Reachable = true
+ s.Attr |= AttrReachable
s.Align = 4
s = Linklookup(Ctxt, ".linkedit.plt", 0) // indirect table for .plt
s.Type = obj.SMACHOINDIRECTPLT
- s.Reachable = true
+ s.Attr |= AttrReachable
s = Linklookup(Ctxt, ".linkedit.got", 0) // indirect table for .got
s.Type = obj.SMACHOINDIRECTGOT
- s.Reachable = true
+ s.Attr |= AttrReachable
}
}
buf := "__" + strings.Replace(sect.Name[1:], ".", "_", -1)
var msect *MachoSect
- if sect.Rwx&1 == 0 && (Thearch.Thechar == '7' || (Thearch.Thechar == '6' && Buildmode == BuildmodeCShared)) {
- // Darwin external linker on arm64 and on amd64 in c-shared buildmode
+ if sect.Rwx&1 == 0 && (Thearch.Thechar == '7' || // arm64
+ (Thearch.Thechar == '6' && (Buildmode == BuildmodeCShared || Buildmode == BuildmodeCArchive))) { // amd64
+ // Darwin external linker on arm64 and on amd64 in c-shared/c-archive buildmode
// complains about absolute relocs in __TEXT, so if the section is not
// executable, put it in __DATA segment.
msect = newMachoSect(mseg, buf, "__DATA")
if s.Type == obj.SDYNIMPORT {
return SymKindUndef
}
- if s.Cgoexport != 0 {
+ if s.Attr.CgoExport() {
return SymKindExtdef
}
return SymKindLocal
func machogenasmsym(put func(*LSym, string, int, int64, int64, int, *LSym)) {
genasmsym(put)
- for s := Ctxt.Allsym; s != nil; s = s.Allsym {
+ for _, s := range Ctxt.Allsym {
if s.Type == obj.SDYNIMPORT || s.Type == obj.SHOSTOBJ {
- if s.Reachable {
+ if s.Attr.Reachable() {
put(s, "", 'D', 0, 0, 0, nil)
}
}
// So we sort them here and pre-allocate dynid for them
// See https://golang.org/issue/4029
for i := 0; i < len(dynexp); i++ {
- dynexp[i].Reachable = true
+ dynexp[i].Attr |= AttrReachable
}
machogenasmsym(addsym)
sortsym = make([]*LSym, nsortsym)
Adduint16(Ctxt, symtab, 0) // desc
adduintxx(Ctxt, symtab, 0, Thearch.Ptrsize) // no value
} else {
- if s.Cgoexport != 0 {
+ if s.Attr.CgoExport() {
Adduint8(Ctxt, symtab, 0x0f)
} else {
Adduint8(Ctxt, symtab, 0x0e)
sect.Reloff = uint64(Cpos())
var sym *LSym
for sym = first; sym != nil; sym = sym.Next {
- if !sym.Reachable {
+ if !sym.Attr.Reachable() {
continue
}
if uint64(sym.Value) >= sect.Vaddr {
var r *Reloc
var ri int
for ; sym != nil; sym = sym.Next {
- if !sym.Reachable {
+ if !sym.Attr.Reachable() {
continue
}
if sym.Value >= int64(eaddr) {
log.Fatalf("invalid symbol version %d", v)
}
flags := rdint(f)
- dupok := flags & 1
- local := false
- if flags&2 != 0 {
- local = true
- }
+ dupok := flags&1 != 0
+ local := flags&2 != 0
size := rdint(f)
typ := rdsym(ctxt, f, pkg)
data := rddata(f)
if (s.Type == obj.SDATA || s.Type == obj.SBSS || s.Type == obj.SNOPTRBSS) && len(s.P) == 0 && len(s.R) == 0 {
goto overwrite
}
- if s.Type != obj.SBSS && s.Type != obj.SNOPTRBSS && dupok == 0 && s.Dupok == 0 {
+ if s.Type != obj.SBSS && s.Type != obj.SNOPTRBSS && !dupok && !s.Attr.DuplicateOK() {
log.Fatalf("duplicate symbol %s (types %d and %d) in %s and %s", s.Name, s.Type, t, s.File, pn)
}
if len(s.P) > 0 {
overwrite:
s.File = pkg
- s.Dupok = uint8(dupok)
+ if dupok {
+ s.Attr |= AttrDuplicateOK
+ }
if t == obj.SXREF {
log.Fatalf("bad sxref")
}
if s.Size < int64(size) {
s.Size = int64(size)
}
- s.Local = local
+ s.Attr.Set(AttrLocal, local)
if typ != nil { // if bss sym defined multiple times, take type from any one def
s.Gotype = typ
}
if s.Type == obj.STEXT {
s.Args = rdint32(f)
s.Locals = rdint32(f)
- s.Nosplit = rduint8(f)
- v := rdint(f)
- s.Leaf = uint8(v & 1)
- s.Cfunc = uint8(v & 2)
+ if rduint8(f) != 0 {
+ s.Attr |= AttrNoSplit
+ }
+ rdint(f) // v&1 is Leaf, currently unused
n := rdint(f)
- var a *Auto
+ s.Autom = make([]Auto, n)
for i := 0; i < n; i++ {
- a = new(Auto)
- a.Asym = rdsym(ctxt, f, pkg)
- a.Aoffset = rdint32(f)
- a.Name = rdint16(f)
- a.Gotype = rdsym(ctxt, f, pkg)
- a.Link = s.Autom
- s.Autom = a
+ s.Autom[i] = Auto{
+ Asym: rdsym(ctxt, f, pkg),
+ Aoffset: rdint32(f),
+ Name: rdint16(f),
+ Gotype: rdsym(ctxt, f, pkg),
+ }
}
s.Pcln = new(Pcln)
}
if dup == nil {
- if s.Onlist != 0 {
+ if s.Attr.OnList() {
log.Fatalf("symbol %s listed multiple times", s.Name)
}
- s.Onlist = 1
+ s.Attr |= AttrOnList
if ctxt.Etextp != nil {
ctxt.Etextp.Next = s
} else {
if s.Type != 0 {
fmt.Fprintf(ctxt.Bso, "t=%d ", s.Type)
}
- if s.Dupok != 0 {
+ if s.Attr.DuplicateOK() {
fmt.Fprintf(ctxt.Bso, "dupok ")
}
- if s.Cfunc != 0 {
- fmt.Fprintf(ctxt.Bso, "cfunc ")
- }
- if s.Nosplit != 0 {
+ if s.Attr.NoSplit() {
fmt.Fprintf(ctxt.Bso, "nosplit ")
}
fmt.Fprintf(ctxt.Bso, "size=%d value=%d", int64(s.Size), int64(s.Value))
x, _ := strconv.ParseUint(s.Name[5:], 16, 32)
i32 := int32(x)
s.Type = obj.SRODATA
- s.Local = true
+ s.Attr |= AttrLocal
Adduint32(ctxt, s, uint32(i32))
- s.Reachable = false
+ s.Attr.Set(AttrReachable, false)
} else if strings.HasPrefix(s.Name, "$f64.") || strings.HasPrefix(s.Name, "$i64.") {
x, _ := strconv.ParseUint(s.Name[5:], 16, 64)
i64 := int64(x)
s.Type = obj.SRODATA
- s.Local = true
+ s.Attr |= AttrLocal
Adduint64(ctxt, s, uint64(i64))
- s.Reachable = false
+ s.Attr.Set(AttrReachable, false)
}
}
if v == 0 && strings.HasPrefix(s.Name, "runtime.gcbits.") {
- s.Local = true
+ s.Attr |= AttrLocal
}
return s
}
funcdata_bytes := int64(0)
ftab := Linklookup(Ctxt, "runtime.pclntab", 0)
ftab.Type = obj.SPCLNTAB
- ftab.Reachable = true
+ ftab.Attr |= AttrReachable
// See golang.org/s/go12symtab for the format. Briefly:
// 8-byte header
func findfunctab() {
t := Linklookup(Ctxt, "runtime.findfunctab", 0)
t.Type = obj.SRODATA
- t.Reachable = true
- t.Local = true
+ t.Attr |= AttrReachable
+ t.Attr |= AttrLocal
// find min and max address
min := Ctxt.Textp.Value
dr = nil
var m *Imp
- for s := Ctxt.Allsym; s != nil; s = s.Allsym {
- if !s.Reachable || s.Type != obj.SDYNIMPORT {
+ for _, s := range Ctxt.Allsym {
+ if !s.Attr.Reachable() || s.Type != obj.SDYNIMPORT {
continue
}
for d = dr; d != nil; d = d.next {
dynName += fmt.Sprintf("@%d", m.argsize)
}
dynSym := Linklookup(Ctxt, dynName, 0)
- dynSym.Reachable = true
+ dynSym.Attr |= AttrReachable
dynSym.Type = obj.SHOSTOBJ
r := Addrel(m.s)
r.Sym = dynSym
}
} else {
dynamic := Linklookup(Ctxt, ".windynamic", 0)
- dynamic.Reachable = true
+ dynamic.Attr |= AttrReachable
dynamic.Type = obj.SWINDOWS
for d := dr; d != nil; d = d.next {
for m = d.ms; m != nil; m = m.next {
func initdynexport() {
nexport = 0
- for s := Ctxt.Allsym; s != nil; s = s.Allsym {
- if !s.Reachable || s.Cgoexport&CgoExportDynamic == 0 {
+ for _, s := range Ctxt.Allsym {
+ if !s.Attr.Reachable() || !s.Attr.CgoExportDynamic() {
continue
}
if nexport+1 > len(dexport) {
sect.Reloff = uint64(Cpos())
var sym *LSym
for sym = first; sym != nil; sym = sym.Next {
- if !sym.Reachable {
+ if !sym.Attr.Reachable() {
continue
}
if uint64(sym.Value) >= sect.Vaddr {
var r *Reloc
var ri int
for ; sym != nil; sym = sym.Next {
- if !sym.Reachable {
+ if !sym.Attr.Reachable() {
continue
}
if sym.Value >= int64(eaddr) {
/* relocation table */
rel := Linklookup(Ctxt, ".rel", 0)
- rel.Reachable = true
+ rel.Attr |= AttrReachable
rel.Type = obj.SELFROSECT
initdynimport()
if coffsym != nil {
// only windows/386 requires underscore prefix on external symbols
- if Thearch.Thechar == '8' && Linkmode == LinkExternal && (s.Type == obj.SHOSTOBJ || s.Cgoexport != 0) && s.Name == s.Extname {
+ if Thearch.Thechar == '8' && Linkmode == LinkExternal && (s.Type == obj.SHOSTOBJ || s.Attr.CgoExport()) && s.Name == s.Extname {
s.Name = "_" + s.Name
}
cs := &coffsym[ncoffsym]
mark(Linklookup(Ctxt, "runtime.read_tls_fallback", 0))
}
- checkgo()
checkstrdata()
deadcode()
callgraph()
symtab()
dodata()
address()
- doweak()
reloc()
Thearch.Asmb()
undef()
}
func linknew(arch *LinkArch) *Link {
- ctxt := new(Link)
- // Preallocate about 2mb for hash
- ctxt.Hash = make(map[symVer]*LSym, 100000)
- ctxt.Arch = arch
- ctxt.Version = obj.HistVersion
- ctxt.Goroot = obj.Getgoroot()
+ ctxt := &Link{
+ Hash: make(map[symVer]*LSym, 100000), // preallocate about 2mb for hash
+ Allsym: make([]*LSym, 0, 100000),
+ Arch: arch,
+ Version: obj.HistVersion,
+ Goroot: obj.Getgoroot(),
+ }
p := obj.Getgoarch()
if p != arch.Name {
s.Plt = -1
s.Got = -1
s.Name = symb
- s.Type = 0
s.Version = int16(v)
- s.Value = 0
- s.Size = 0
ctxt.Nsymbol++
- s.Allsym = ctxt.Allsym
- ctxt.Allsym = s
-
+ ctxt.Allsym = append(ctxt.Allsym, s)
return s
}
// maybe one day STB_WEAK.
bind := STB_GLOBAL
- if ver != 0 || (x.Type&obj.SHIDDEN != 0) || x.Local {
+ if ver != 0 || (x.Type&obj.SHIDDEN != 0) || x.Attr.Local() {
bind = STB_LOCAL
}
// To avoid filling the dynamic table with lots of unnecessary symbols,
// mark all Go symbols local (not global) in the final executable.
// But when we're dynamically linking, we need all those global symbols.
- if !DynlinkingGo() && Linkmode == LinkExternal && x.Cgoexport&CgoExportStatic == 0 && elfshnum != SHN_UNDEF {
+ if !DynlinkingGo() && Linkmode == LinkExternal && !x.Attr.CgoExportStatic() && elfshnum != SHN_UNDEF {
bind = STB_LOCAL
}
s.Type = obj.SRODATA
s.Size = 0
- s.Reachable = true
+ s.Attr |= AttrReachable
xdefine("runtime.egcdata", obj.SRODATA, 0)
s = Linklookup(Ctxt, "runtime.gcbss", 0)
s.Type = obj.SRODATA
s.Size = 0
- s.Reachable = true
+ s.Attr |= AttrReachable
xdefine("runtime.egcbss", obj.SRODATA, 0)
// pseudo-symbols to mark locations of type, string, and go string data.
s.Type = obj.STYPE
s.Size = 0
- s.Reachable = true
+ s.Attr |= AttrReachable
symtype = s
s = Linklookup(Ctxt, "typerel.*", 0)
s.Type = obj.STYPERELRO
s.Size = 0
- s.Reachable = true
+ s.Attr |= AttrReachable
symtyperel = s
} else if !DynlinkingGo() {
s = Linklookup(Ctxt, "type.*", 0)
s.Type = obj.STYPE
s.Size = 0
- s.Reachable = true
+ s.Attr |= AttrReachable
symtype = s
symtyperel = s
}
s = Linklookup(Ctxt, "go.string.*", 0)
s.Type = obj.SGOSTRING
- s.Local = true
+ s.Attr |= AttrLocal
s.Size = 0
- s.Reachable = true
+ s.Attr |= AttrReachable
symgostring := s
+ s = Linklookup(Ctxt, "go.string.hdr.*", 0)
+ s.Type = obj.SGOSTRINGHDR
+ s.Attr |= AttrLocal
+ s.Size = 0
+ s.Attr |= AttrReachable
+ symgostringhdr := s
+
s = Linklookup(Ctxt, "go.func.*", 0)
s.Type = obj.SGOFUNC
- s.Local = true
+ s.Attr |= AttrLocal
s.Size = 0
- s.Reachable = true
+ s.Attr |= AttrReachable
symgofunc := s
s = Linklookup(Ctxt, "runtime.gcbits.*", 0)
s.Type = obj.SGCBITS
- s.Local = true
+ s.Attr |= AttrLocal
s.Size = 0
- s.Reachable = true
+ s.Attr |= AttrReachable
symgcbits := s
symtypelink := Linklookup(Ctxt, "runtime.typelink", 0)
symtypelink.Type = obj.STYPELINK
symt = Linklookup(Ctxt, "runtime.symtab", 0)
- symt.Local = true
+ symt.Attr |= AttrLocal
symt.Type = obj.SSYMTAB
symt.Size = 0
- symt.Reachable = true
+ symt.Attr |= AttrReachable
ntypelinks := 0
// within a type they sort by size, so the .* symbols
// just defined above will be first.
// hide the specific symbols.
- for s := Ctxt.Allsym; s != nil; s = s.Allsym {
- if !s.Reachable || s.Special != 0 || s.Type != obj.SRODATA {
+ for _, s := range Ctxt.Allsym {
+ if !s.Attr.Reachable() || s.Attr.Special() || s.Type != obj.SRODATA {
continue
}
if strings.HasPrefix(s.Name, "type.") && !DynlinkingGo() {
- s.Hidden = true
+ s.Attr |= AttrHidden
if UseRelro() && len(s.R) > 0 {
s.Type = obj.STYPERELRO
s.Outer = symtyperel
if strings.HasPrefix(s.Name, "go.typelink.") {
ntypelinks++
s.Type = obj.STYPELINK
- s.Hidden = true
+ s.Attr |= AttrHidden
s.Outer = symtypelink
}
if strings.HasPrefix(s.Name, "go.string.") {
s.Type = obj.SGOSTRING
- s.Hidden = true
+ s.Attr |= AttrHidden
s.Outer = symgostring
+ if strings.HasPrefix(s.Name, "go.string.hdr.") {
+ s.Type = obj.SGOSTRINGHDR
+ s.Outer = symgostringhdr
+ }
}
if strings.HasPrefix(s.Name, "runtime.gcbits.") {
s.Type = obj.SGCBITS
- s.Hidden = true
+ s.Attr |= AttrHidden
s.Outer = symgcbits
}
if strings.HasPrefix(s.Name, "go.func.") {
s.Type = obj.SGOFUNC
- s.Hidden = true
+ s.Attr |= AttrHidden
s.Outer = symgofunc
}
if strings.HasPrefix(s.Name, "gcargs.") || strings.HasPrefix(s.Name, "gclocals.") || strings.HasPrefix(s.Name, "gclocals·") {
s.Type = obj.SGOFUNC
- s.Hidden = true
+ s.Attr |= AttrHidden
s.Outer = symgofunc
s.Align = 4
liveness += (s.Size + int64(s.Align) - 1) &^ (int64(s.Align) - 1)
if Buildmode == BuildmodeShared {
abihashgostr := Linklookup(Ctxt, "go.link.abihash."+filepath.Base(outfile), 0)
- abihashgostr.Reachable = true
+ abihashgostr.Attr |= AttrReachable
abihashgostr.Type = obj.SRODATA
hashsym := Linklookup(Ctxt, "go.link.abihashbytes", 0)
Addaddr(Ctxt, abihashgostr, hashsym)
addgostring(moduledata, "go.link.thismodulename", thismodulename)
modulehashes := Linklookup(Ctxt, "go.link.abihashes", 0)
- modulehashes.Reachable = true
- modulehashes.Local = true
+ modulehashes.Attr |= AttrReachable
+ modulehashes.Attr |= AttrLocal
modulehashes.Type = obj.SRODATA
for i, shlib := range Ctxt.Shlibs {
// modulehashes[i].runtimehash
abihash := Linklookup(Ctxt, "go.link.abihash."+modulename, 0)
- abihash.Reachable = true
+ abihash.Attr |= AttrReachable
Addaddr(Ctxt, modulehashes, abihash)
}
const (
thechar = '0'
MaxAlign = 32 // max data alignment
+ MinAlign = 1 // min data alignment
FuncAlign = 8
MINLC = 4
)
ld.Thearch.Regsize = ld.Thelinkarch.Regsize
ld.Thearch.Funcalign = FuncAlign
ld.Thearch.Maxalign = MaxAlign
+ ld.Thearch.Minalign = MinAlign
ld.Thearch.Minlc = MINLC
ld.Thearch.Dwarfregsp = DWARFREGSP
ld.Thearch.Dwarfreglr = DWARFREGLR
n = fmt.Sprintf("%s.%s", s.Name, r.Sym.Name)
stub = ld.Linklookup(ld.Ctxt, n, 0)
- stub.Reachable = stub.Reachable || s.Reachable
+ if s.Attr.Reachable() {
+ stub.Attr |= ld.AttrReachable
+ }
if stub.Size == 0 {
// Need outer to resolve .TOC.
stub.Outer = s
if addmoduledata.Type == obj.STEXT {
return
}
- addmoduledata.Reachable = true
+ addmoduledata.Attr |= ld.AttrReachable
initfunc := ld.Linklookup(ld.Ctxt, "go.link.addmoduledata", 0)
initfunc.Type = obj.STEXT
- initfunc.Local = true
- initfunc.Reachable = true
+ initfunc.Attr |= ld.AttrLocal
+ initfunc.Attr |= ld.AttrReachable
o := func(op uint32) {
ld.Adduint32(ld.Ctxt, initfunc, op)
}
ld.Ctxt.Etextp = initfunc
initarray_entry := ld.Linklookup(ld.Ctxt, "go.link.addmoduledatainit", 0)
- initarray_entry.Reachable = true
- initarray_entry.Local = true
+ initarray_entry.Attr |= ld.AttrReachable
+ initarray_entry.Attr |= ld.AttrLocal
initarray_entry.Type = obj.SINITARR
ld.Addaddr(ld.Ctxt, initarray_entry, initfunc)
}
const (
thechar = '9'
MaxAlign = 32 // max data alignment
+ MinAlign = 1 // min data alignment
FuncAlign = 8
MINLC = 4
)
ld.Thearch.Regsize = ld.Thelinkarch.Regsize
ld.Thearch.Funcalign = FuncAlign
ld.Thearch.Maxalign = MaxAlign
+ ld.Thearch.Minalign = MinAlign
ld.Thearch.Minlc = MINLC
ld.Thearch.Dwarfregsp = DWARFREGSP
ld.Thearch.Dwarfreglr = DWARFREGLR
// Append 4 bytes to s and create a R_CALL relocation targeting t to fill them in.
func addcall(ctxt *ld.Link, s *ld.LSym, t *ld.LSym) {
- s.Reachable = true
+ s.Attr |= ld.AttrReachable
i := s.Size
s.Size += 4
ld.Symgrow(ctxt, s, s.Size)
thunkfunc := ld.Linklookup(ld.Ctxt, "__x86.get_pc_thunk.cx", 0)
thunkfunc.Type = obj.STEXT
- thunkfunc.Local = true
- thunkfunc.Reachable = true
+ thunkfunc.Attr |= ld.AttrLocal
+ thunkfunc.Attr |= ld.AttrReachable
o := func(op ...uint8) {
for _, op1 := range op {
ld.Adduint8(ld.Ctxt, thunkfunc, op1)
return
}
- addmoduledata.Reachable = true
+ addmoduledata.Attr |= ld.AttrReachable
initfunc := ld.Linklookup(ld.Ctxt, "go.link.addmoduledata", 0)
initfunc.Type = obj.STEXT
- initfunc.Local = true
- initfunc.Reachable = true
+ initfunc.Attr |= ld.AttrLocal
+ initfunc.Attr |= ld.AttrReachable
o = func(op ...uint8) {
for _, op1 := range op {
ld.Adduint8(ld.Ctxt, initfunc, op1)
ld.Ctxt.Etextp.Next = initfunc
ld.Ctxt.Etextp = initfunc
initarray_entry := ld.Linklookup(ld.Ctxt, "go.link.addmoduledatainit", 0)
- initarray_entry.Reachable = true
- initarray_entry.Local = true
+ initarray_entry.Attr |= ld.AttrReachable
+ initarray_entry.Attr |= ld.AttrLocal
initarray_entry.Type = obj.SINITARR
ld.Addaddr(ld.Ctxt, initarray_entry, initfunc)
}
thechar = '8'
PtrSize = 4
MaxAlign = 32 // max data alignment
+ MinAlign = 1 // min data alignment
FuncAlign = 16
MINLC = 1
)
ld.Thearch.Regsize = ld.Thelinkarch.Regsize
ld.Thearch.Funcalign = FuncAlign
ld.Thearch.Maxalign = MaxAlign
+ ld.Thearch.Minalign = MinAlign
ld.Thearch.Minlc = MINLC
ld.Thearch.Dwarfregsp = DWARFREGSP
ld.Thearch.Dwarfreglr = DWARFREGLR
ld.Linkmode = ld.LinkExternal
got := ld.Linklookup(ld.Ctxt, "_GLOBAL_OFFSET_TABLE_", 0)
got.Type = obj.SDYNIMPORT
- got.Reachable = true
+ got.Attr |= ld.AttrReachable
}
switch ld.HEADTYPE {
+++ /dev/null
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Automatic symbol generation.
-
-// TODO(rsc): Handle go.typelink, go.track symbols.
-// TODO(rsc): Do not handle $f64. and $f32. symbols. Instead, generate those
-// from the compiler and assemblers as dupok data, and then remove autoData below.
-package main
-
-import (
- "cmd/internal/goobj"
- "strconv"
- "strings"
-)
-
-// linkerDefined lists the symbols supplied by other parts of the linker
-// (runtime.go and layout.go).
-var linkerDefined = map[string]bool{
- "runtime.bss": true,
- "runtime.data": true,
- "runtime.ebss": true,
- "runtime.edata": true,
- "runtime.efunctab": true,
- "runtime.end": true,
- "runtime.enoptrbss": true,
- "runtime.enoptrdata": true,
- "runtime.erodata": true,
- "runtime.etext": true,
- "runtime.etypelink": true,
- "runtime.functab": true,
- "runtime.gcbss": true,
- "runtime.gcdata": true,
- "runtime.noptrbss": true,
- "runtime.noptrdata": true,
- "runtime.pclntab": true,
- "runtime.rodata": true,
- "runtime.text": true,
- "runtime.typelink": true,
-}
-
-// isAuto reports whether sym is an automatically-generated data or constant symbol.
-func (p *Prog) isAuto(sym goobj.SymID) bool {
- return strings.HasPrefix(sym.Name, "go.weak.") ||
- strings.HasPrefix(sym.Name, "$f64.") ||
- strings.HasPrefix(sym.Name, "$f32.") ||
- linkerDefined[sym.Name]
-}
-
-// autoData defines the automatically generated data symbols needed by p.
-func (p *Prog) autoData() {
- for sym := range p.Missing {
- switch {
- // Floating-point constants that need to be loaded from memory are
- // written as $f64.{16 hex digits} or $f32.{8 hex digits}; the hex digits
- // give the IEEE bit pattern of the constant. As far as the layout into
- // memory is concerned, we interpret these as uint64 or uint32 constants.
- case strings.HasPrefix(sym.Name, "$f64."), strings.HasPrefix(sym.Name, "$f32."):
- size := 64
- if sym.Name[2:4] == "32" {
- size = 32
- }
- delete(p.Missing, sym)
- fbits, err := strconv.ParseUint(sym.Name[len("$f64."):], 16, size)
- if err != nil {
- p.errorf("unexpected floating point symbol %s", sym)
- continue
- }
- data := make([]byte, size/8)
- if size == 64 {
- p.byteorder.PutUint64(data, fbits)
- } else {
- p.byteorder.PutUint32(data, uint32(fbits))
- }
- p.addSym(&Sym{
- Sym: &goobj.Sym{
- SymID: sym,
- Kind: goobj.SRODATA,
- Size: size / 8,
- },
- Bytes: data,
- })
- }
- }
-}
-
-// autoConst defines the automatically generated constant symbols needed by p.
-func (p *Prog) autoConst() {
- for sym := range p.Missing {
- switch {
- case strings.HasPrefix(sym.Name, "go.weak."):
- // weak symbol resolves to actual symbol if present, or else nil.
- delete(p.Missing, sym)
- targ := sym
- targ.Name = sym.Name[len("go.weak."):]
- var addr Addr
- if s := p.Syms[targ]; s != nil {
- addr = s.Addr
- }
- p.defineConst(sym.Name, addr)
- }
- }
-}
-
-// defineConst defines a new symbol with the given name and constant address.
-func (p *Prog) defineConst(name string, addr Addr) {
- sym := goobj.SymID{Name: name}
- p.addSym(&Sym{
- Sym: &goobj.Sym{
- SymID: sym,
- Kind: goobj.SCONST,
- },
- Package: nil,
- Addr: addr,
- })
-}
+++ /dev/null
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Test for auto-generated symbols.
-
-// There is no test for $f64. and $f32. symbols, because those are
-// not possible to write in the assembler syntax. Instead of changing
-// the assembler to allow that, we plan to change the compilers
-// not to generate such symbols (plain dupok data is sufficient).
-
-package main
-
-import (
- "bytes"
- "cmd/internal/goobj"
- "testing"
-)
-
-// Each test case is an object file, generated from a corresponding .s file.
-// The image of the autotab symbol should be a sequence of pairs of
-// identical 8-byte sequences.
-var autoTests = []string{
- "testdata/autosection.6",
- "testdata/autoweak.6",
-}
-
-func TestAuto(t *testing.T) {
- for _, obj := range autoTests {
- p := Prog{GOOS: "darwin", GOARCH: "amd64", StartSym: "start"}
- p.omitRuntime = true
- p.Error = func(s string) { t.Error(s) }
- var buf bytes.Buffer
- p.link(&buf, obj)
- if p.NumError > 0 {
- continue // already reported
- }
-
- const name = "autotab"
- sym := p.Syms[goobj.SymID{Name: name}]
- if sym == nil {
- t.Errorf("%s is missing %s symbol", obj, name)
- return
- }
- if sym.Size == 0 {
- return
- }
-
- seg := sym.Section.Segment
- off := sym.Addr - seg.VirtAddr
- data := seg.Data[off : off+Addr(sym.Size)]
- if len(data)%16 != 0 {
- t.Errorf("%s: %s.Size = %d, want multiple of 16", obj, name, len(data))
- return
- }
- Data:
- for i := 0; i < len(data); i += 16 {
- have := p.byteorder.Uint64(data[i : i+8])
- want := p.byteorder.Uint64(data[i+8 : i+16])
- if have != want {
- // Look for relocation so we can explain what went wrong.
- for _, r := range sym.Reloc {
- if r.Offset == i {
- t.Errorf("%s: %s+%#x: %s: have %#x want %#x", obj, name, i, r.Sym, have, want)
- continue Data
- }
- }
- t.Errorf("%s: %s+%#x: have %#x want %#x", obj, name, i, have, want)
- }
- }
- }
-}
+++ /dev/null
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Removal of dead code and data.
-
-package main
-
-import "cmd/internal/goobj"
-
-// dead removes unreachable code and data from the program.
-// It is basically a mark-sweep garbage collection: traverse all the
-// symbols reachable from the entry (startSymID) and then delete
-// the rest.
-func (p *Prog) dead() {
- p.Dead = make(map[goobj.SymID]bool)
- reachable := make(map[goobj.SymID]bool)
- p.walkDead(p.startSym, reachable)
-
- for sym := range p.Syms {
- if !reachable[sym] {
- delete(p.Syms, sym)
- p.Dead[sym] = true
- }
- }
-
- for sym := range p.Missing {
- if !reachable[sym] {
- delete(p.Missing, sym)
- p.Dead[sym] = true
- }
- }
-
- p.SymOrder = removeDead(p.SymOrder, reachable)
-
- for _, pkg := range p.Packages {
- pkg.Syms = removeDead(pkg.Syms, reachable)
- }
-}
-
-// walkDead traverses the symbols reachable from sym, adding them to reachable.
-// The caller has verified that reachable[sym] = false.
-func (p *Prog) walkDead(sym goobj.SymID, reachable map[goobj.SymID]bool) {
- reachable[sym] = true
- s := p.Syms[sym]
- if s == nil {
- return
- }
- for i := range s.Reloc {
- r := &s.Reloc[i]
- if !reachable[r.Sym] {
- p.walkDead(r.Sym, reachable)
- }
- }
- if s.Func != nil {
- for _, fdata := range s.Func.FuncData {
- if fdata.Sym.Name != "" && !reachable[fdata.Sym] {
- p.walkDead(fdata.Sym, reachable)
- }
- }
- }
-}
-
-// removeDead removes unreachable (dead) symbols from syms,
-// returning a shortened slice using the same underlying array.
-func removeDead(syms []*Sym, reachable map[goobj.SymID]bool) []*Sym {
- keep := syms[:0]
- for _, sym := range syms {
- if reachable[sym.SymID] {
- keep = append(keep, sym)
- }
- }
- return keep
-}
+++ /dev/null
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package main
-
-import (
- "cmd/internal/goobj"
- "reflect"
- "strings"
- "testing"
-)
-
-// Each test case is an object file, generated from a corresponding .s file.
-// The symbols in the object file with a dead_ prefix are the ones that
-// should be removed from the program.
-var deadTests = []string{
- "testdata/dead.6",
-}
-
-func TestDead(t *testing.T) {
- for _, obj := range deadTests {
- p := Prog{GOOS: "darwin", GOARCH: "amd64", StartSym: "start"}
- p.omitRuntime = true
- p.Error = func(s string) { t.Error(s) }
- p.init()
- p.scan(obj)
- if p.NumError > 0 {
- continue // already reported
- }
- origSyms := copyMap(p.Syms)
- origMissing := copyMap(p.Missing)
- origSymOrder := copySlice(p.SymOrder)
- origPkgSyms := copySlice(p.Packages["main"].Syms)
- p.dead()
- checkDeadMap(t, obj, "p.Syms", origSyms, p.Syms)
- checkDeadMap(t, obj, "p.Missing", origMissing, p.Missing)
- checkDeadSlice(t, obj, "p.SymOrder", origSymOrder, p.SymOrder)
- checkDeadSlice(t, obj, `p.Packages["main"].Syms`, origPkgSyms, p.Packages["main"].Syms)
- }
-}
-
-func copyMap(m interface{}) interface{} {
- v := reflect.ValueOf(m)
- out := reflect.MakeMap(v.Type())
- for _, key := range v.MapKeys() {
- out.SetMapIndex(key, v.MapIndex(key))
- }
- return out.Interface()
-}
-
-func checkDeadMap(t *testing.T, obj, name string, old, new interface{}) {
- vold := reflect.ValueOf(old)
- vnew := reflect.ValueOf(new)
- for _, vid := range vold.MapKeys() {
- id := vid.Interface().(goobj.SymID)
- if strings.HasPrefix(id.Name, "dead_") {
- if vnew.MapIndex(vid).IsValid() {
- t.Errorf("%s: %s contains unnecessary symbol %s", obj, name, id)
- }
- } else {
- if !vnew.MapIndex(vid).IsValid() {
- t.Errorf("%s: %s is missing symbol %s", obj, name, id)
- }
- }
- }
- for _, vid := range vnew.MapKeys() {
- id := vid.Interface().(goobj.SymID)
- if !vold.MapIndex(vid).IsValid() {
- t.Errorf("%s: %s contains unexpected symbol %s", obj, name, id)
- }
- }
-}
-
-func copySlice(x []*Sym) (out []*Sym) {
- return append(out, x...)
-}
-
-func checkDeadSlice(t *testing.T, obj, name string, old, new []*Sym) {
- for i, s := range old {
- if strings.HasPrefix(s.Name, "dead_") {
- continue
- }
- if len(new) == 0 {
- t.Errorf("%s: %s is missing symbol %s\nhave%v\nwant%v", obj, name, s, new, old[i:])
- return
- }
- if new[0].SymID != s.SymID {
- t.Errorf("%s: %s is incorrect: have %s, want %s\nhave%v\nwant%v", obj, name, new[0].SymID, s.SymID, new, old[i:])
- return
- }
- new = new[1:]
- }
- if len(new) > 0 {
- t.Errorf("%s: %s has unexpected symbols: %v", obj, name, new)
- }
-}
+++ /dev/null
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Generation of debug data structures (in the executable but not mapped at run time).
-// See also runtime.go.
-
-package main
-
-func (p *Prog) debug() {
-}
+++ /dev/null
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package main
-
-import (
- "encoding/hex"
- "fmt"
- "io/ioutil"
- "regexp"
- "strconv"
- "strings"
- "testing"
-)
-
-// mustParseHexdumpFile returns a block of data generated by
-// parsing the hex dump in the named file.
-// If the file cannot be read or does not contain a valid hex dump,
-// mustParseHexdumpFile calls t.Fatal.
-func mustParseHexdumpFile(t *testing.T, file string) []byte {
- hex, err := ioutil.ReadFile(file)
- if err != nil {
- t.Fatal(err)
- }
- data, err := parseHexdump(string(hex))
- if err != nil {
- t.Fatal(err)
- }
- return data
-}
-
-// parseHexdump parses the hex dump in text, which should be the
-// output of "hexdump -C" or Plan 9's "xd -b",
-// and returns the original data used to produce the dump.
-// It is meant to enable storing golden binary files as text, so that
-// changes to the golden files can be seen during code reviews.
-func parseHexdump(text string) ([]byte, error) {
- var out []byte
- for _, line := range strings.Split(text, "\n") {
- if i := strings.Index(line, "|"); i >= 0 { // remove text dump
- line = line[:i]
- }
- f := strings.Fields(line)
- if len(f) > 1+16 {
- return nil, fmt.Errorf("parsing hex dump: too many fields on line %q", line)
- }
- if len(f) == 0 || len(f) == 1 && f[0] == "*" { // all zeros block omitted
- continue
- }
- addr64, err := strconv.ParseUint(f[0], 16, 0)
- if err != nil {
- return nil, fmt.Errorf("parsing hex dump: invalid address %q", f[0])
- }
- addr := int(addr64)
- if len(out) < addr {
- out = append(out, make([]byte, addr-len(out))...)
- }
- for _, x := range f[1:] {
- val, err := strconv.ParseUint(x, 16, 8)
- if err != nil {
- return nil, fmt.Errorf("parsing hexdump: invalid hex byte %q", x)
- }
- out = append(out, byte(val))
- }
- }
- return out, nil
-}
-
-func hexdump(data []byte) string {
- text := hex.Dump(data) + fmt.Sprintf("%08x\n", len(data))
- text = regexp.MustCompile(`\n([0-9a-f]+(\s+00){16}.*\n)+`).ReplaceAllString(text, "\n*\n")
- return text
-}
+++ /dev/null
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Executable image layout - address assignment.
-
-package main
-
-import (
- "cmd/internal/goobj"
-)
-
-// A layoutSection describes a single section to add to the
-// final executable. Go binaries only have a fixed set of possible
-// sections, and the symbol kind determines the section.
-type layoutSection struct {
- Segment string
- Section string
- Kind goobj.SymKind
- Index int
-}
-
-// layout defines the layout of the generated Go executable.
-// The order of entries here is the order in the executable.
-// Entries with the same Segment name must be contiguous.
-var layout = []layoutSection{
- {Segment: "text", Section: "text", Kind: goobj.STEXT},
- {Segment: "rodata", Section: "rodata", Kind: goobj.SRODATA},
- {Segment: "rodata", Section: "functab", Kind: goobj.SPCLNTAB},
- {Segment: "rodata", Section: "typelink", Kind: goobj.STYPELINK},
- {Segment: "data", Section: "noptrdata", Kind: goobj.SNOPTRDATA},
- {Segment: "data", Section: "data", Kind: goobj.SDATA},
- {Segment: "data", Section: "bss", Kind: goobj.SBSS},
- {Segment: "data", Section: "noptrbss", Kind: goobj.SNOPTRBSS},
-
- // Later:
- // {"rodata", "type", goobj.STYPE},
- // {"rodata", "string", goobj.SSTRING},
- // {"rodata", "gostring", goobj.SGOSTRING},
- // {"rodata", "gofunc", goobj.SGOFUNC},
-}
-
-// layoutByKind maps from SymKind to an entry in layout.
-var layoutByKind []*layoutSection
-
-func init() {
- // Build index from symbol type to layout entry.
- max := 0
- for _, sect := range layout {
- if max <= int(sect.Kind) {
- max = int(sect.Kind) + 1
- }
- }
- layoutByKind = make([]*layoutSection, max)
- for i := range layout {
- sect := &layout[i]
- layoutByKind[sect.Kind] = sect
- sect.Index = i
- }
-}
-
-// layout arranges symbols into sections and sections into segments,
-// and then it assigns addresses to segments, sections, and symbols.
-func (p *Prog) layout() {
- sections := make([]*Section, len(layout))
-
- // Assign symbols to sections using index, creating sections as needed.
- // Could keep sections separated by type during input instead.
- for _, sym := range p.SymOrder {
- kind := sym.Kind
- if kind < 0 || int(kind) >= len(layoutByKind) || layoutByKind[kind] == nil {
- p.errorf("%s: unexpected symbol kind %v", sym.SymID, kind)
- continue
- }
- lsect := layoutByKind[kind]
- sect := sections[lsect.Index]
- if sect == nil {
- sect = &Section{
- Name: lsect.Section,
- Align: 1,
- }
- sections[lsect.Index] = sect
- }
- if sym.Data.Size > 0 || len(sym.Bytes) > 0 {
- sect.InFile = true
- }
- sym.Section = sect
- sect.Syms = append(sect.Syms, sym)
-
- // TODO(rsc): Incorporate alignment information.
- // First that information needs to be added to the object files.
- //
- // if sect.Align < Addr(sym.Align) {
- // sect.Align = Addr(sym.Align)
- // }
- }
-
- // Assign sections to segments, creating segments as needed.
- var seg *Segment
- for i, sect := range sections {
- if sect == nil {
- continue
- }
- segName := layout[i].Segment
-
- // Special case: Mach-O does not support "rodata" segment,
- // so store read-only data in text segment.
- if p.GOOS == "darwin" && segName == "rodata" {
- segName = "text"
- }
-
- if seg == nil || seg.Name != segName {
- seg = &Segment{
- Name: segName,
- }
- p.Segments = append(p.Segments, seg)
- }
- sect.Segment = seg
- seg.Sections = append(seg.Sections, sect)
- }
-
- // Assign addresses.
-
- // TODO(rsc): This choice needs to be informed by both
- // the formatter and the target architecture.
- // And maybe eventually a command line flag (sigh).
- const segAlign = 4096
-
- // TODO(rsc): Use a larger amount on most systems, which will let the
- // compiler eliminate more nil checks.
- if p.UnmappedSize == 0 {
- p.UnmappedSize = segAlign
- }
-
- // TODO(rsc): addr := Addr(0) when generating a shared library or PIE.
- addr := p.UnmappedSize
-
- // Account for initial file header.
- hdrVirt, hdrFile := p.formatter.headerSize(p)
- addr += hdrVirt
-
- // Assign addresses to segments, sections, symbols.
- // Assign sizes to segments, sections.
- startVirt := addr
- startFile := hdrFile
- for _, seg := range p.Segments {
- addr = round(addr, segAlign)
- seg.VirtAddr = addr
- seg.FileOffset = startFile + seg.VirtAddr - startVirt
- for _, sect := range seg.Sections {
- addr = round(addr, sect.Align)
- sect.VirtAddr = addr
- for _, sym := range sect.Syms {
- // TODO(rsc): Respect alignment once we have that information.
- sym.Addr = addr
- addr += Addr(sym.Size)
- }
- sect.Size = addr - sect.VirtAddr
- if sect.InFile {
- seg.FileSize = addr - seg.VirtAddr
- }
- }
- seg.VirtSize = addr - seg.VirtAddr
- }
-
- // Define symbols for section names.
- var progEnd Addr
- for i, sect := range sections {
- name := layout[i].Section
- var start, end Addr
- if sect != nil {
- start = sect.VirtAddr
- end = sect.VirtAddr + sect.Size
- }
- p.defineConst("runtime."+name, start)
- p.defineConst("runtime.e"+name, end)
- progEnd = end
- }
- p.defineConst("runtime.end", progEnd)
-}
+++ /dev/null
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package main
-
-import (
- "bytes"
- "strings"
- "testing"
-)
-
-func TestLayout(t *testing.T) {
- p := Prog{GOOS: "darwin", GOARCH: "amd64", StartSym: "text_start"}
- p.omitRuntime = true
- p.Error = func(s string) { t.Error(s) }
- var buf bytes.Buffer
- const obj = "testdata/layout.6"
- p.link(&buf, obj)
- if p.NumError > 0 {
- return // already reported
- }
- if len(p.Dead) > 0 {
- t.Errorf("%s: unexpected dead symbols %v", obj, p.Dead)
- return
- }
-
- for _, sym := range p.SymOrder {
- if p.isAuto(sym.SymID) {
- continue
- }
- if sym.Section == nil {
- t.Errorf("%s: symbol %s is missing section", obj, sym)
- continue
- }
- i := strings.Index(sym.Name, "_")
- if i < 0 {
- t.Errorf("%s: unexpected symbol %s", obj, sym)
- continue
- }
- if sym.Section.Name != sym.Name[:i] {
- t.Errorf("%s: symbol %s in section %s, want %s", obj, sym, sym.Section.Name, sym.Name[:i])
- }
- }
-}
+++ /dev/null
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package main
-
-import (
- "bytes"
- "cmd/internal/goobj"
- "io/ioutil"
- "testing"
-)
-
-func TestLinkHello(t *testing.T) {
- p := &Prog{
- GOOS: "darwin",
- GOARCH: "amd64",
- Error: func(s string) { t.Error(s) },
- StartSym: "_rt0_go",
- }
- var buf bytes.Buffer
- p.link(&buf, "testdata/hello.6")
- if p.NumError > 0 {
- return
- }
- if p.Syms[goobj.SymID{"_rt0_go", 0}] == nil || p.Syms[goobj.SymID{"hello", 1}] == nil {
- t.Errorf("Syms = %v, want at least [_rt0_go hello<1>]", p.Syms)
- }
-
- // uncomment to leave file behind for execution:
- if false {
- ioutil.WriteFile("a.out", buf.Bytes(), 0777)
- }
- checkGolden(t, buf.Bytes(), "testdata/link.hello.darwin.amd64")
-}
+++ /dev/null
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Loading of code and data fragments from package files into final image.
-
-package main
-
-import (
- "cmd/internal/obj"
- "os"
-)
-
-// load allocates segment images, populates them with data
-// read from package files, and applies relocations to the data.
-func (p *Prog) load() {
- // TODO(rsc): mmap the output file and store the data directly.
- // That will make writing the output file more efficient.
- for _, seg := range p.Segments {
- seg.Data = make([]byte, seg.FileSize)
- }
- for _, pkg := range p.Packages {
- p.loadPackage(pkg)
- }
-}
-
-// loadPackage loads and relocates data for all the
-// symbols needed in the given package.
-func (p *Prog) loadPackage(pkg *Package) {
- if pkg.File == "" {
- // This "package" contains internally generated symbols only.
- // All such symbols have a sym.Bytes field holding the actual data
- // (if any), plus relocations.
- for _, sym := range pkg.Syms {
- if sym.Bytes == nil {
- continue
- }
- seg := sym.Section.Segment
- off := sym.Addr - seg.VirtAddr
- data := seg.Data[off : off+Addr(sym.Size)]
- copy(data, sym.Bytes)
- p.relocateSym(sym, data)
- }
- return
- }
-
- // Package stored in file.
- f, err := os.Open(pkg.File)
- if err != nil {
- p.errorf("%v", err)
- return
- }
- defer f.Close()
-
- // TODO(rsc): Mmap file into memory.
-
- for _, sym := range pkg.Syms {
- if sym.Data.Size == 0 {
- continue
- }
- // TODO(rsc): If not using mmap, at least coalesce nearby reads.
- if sym.Section == nil {
- p.errorf("internal error: missing section for %s", sym.Name)
- }
- seg := sym.Section.Segment
- off := sym.Addr - seg.VirtAddr
- if off >= Addr(len(seg.Data)) || off+Addr(sym.Data.Size) > Addr(len(seg.Data)) {
- p.errorf("internal error: allocated space for %s too small: %d bytes for %d+%d (%d)", sym, len(seg.Data), off, sym.Data.Size, sym.Size)
- }
- data := seg.Data[off : off+Addr(sym.Data.Size)]
- _, err := f.ReadAt(data, sym.Data.Offset)
- if err != nil {
- p.errorf("reading %v: %v", sym.SymID, err)
- }
- p.relocateSym(sym, data)
- }
-}
-
-// relocateSym applies relocations to sym's data.
-func (p *Prog) relocateSym(sym *Sym, data []byte) {
- for i := range sym.Reloc {
- r := &sym.Reloc[i]
- targ := p.Syms[r.Sym]
- if targ == nil {
- p.errorf("%v: reference to undefined symbol %v", sym, r.Sym)
- continue
- }
- val := targ.Addr + Addr(r.Add)
- switch r.Type {
- default:
- p.errorf("%v: unknown relocation type %d", sym, r.Type)
- case obj.R_ADDR, obj.R_CALLIND:
- // ok
- case obj.R_PCREL, obj.R_CALL:
- val -= sym.Addr + Addr(r.Offset+r.Size)
- }
- frag := data[r.Offset : r.Offset+r.Size]
- switch r.Size {
- default:
- p.errorf("%v: unknown relocation size %d", sym, r.Size)
- case 4:
- // TODO(rsc): Check for overflow?
- p.byteorder.PutUint32(frag, uint32(val))
- case 8:
- p.byteorder.PutUint64(frag, uint64(val))
- }
- }
-}
+++ /dev/null
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Mach-O (Darwin) object file writing.
-
-package main
-
-import (
- "debug/macho"
- "encoding/binary"
- "io"
- "strings"
-)
-
-// machoFormat is the implementation of formatter.
-type machoFormat struct{}
-
-// machoHeader and friends are data structures
-// corresponding to the Mach-O file header
-// to be written to disk.
-
-const (
- macho64Bit = 1 << 24
- machoSubCPU386 = 3
-)
-
-// machoArch describes a Mach-O target architecture.
-type machoArch struct {
- CPU uint32
- SubCPU uint32
-}
-
-// machoHeader is the Mach-O file header.
-type machoHeader struct {
- machoArch
- FileType uint32
- Loads []*machoLoad
- Segments []*machoSegment
- p *Prog // for reporting errors
-}
-
-// machoLoad is a Mach-O load command.
-type machoLoad struct {
- Type uint32
- Data []uint32
-}
-
-// machoSegment is a Mach-O segment.
-type machoSegment struct {
- Name string
- VirtAddr Addr
- VirtSize Addr
- FileOffset Addr
- FileSize Addr
- Prot1 uint32
- Prot2 uint32
- Flags uint32
- Sections []*machoSection
-}
-
-// machoSection is a Mach-O section, inside a segment.
-type machoSection struct {
- Name string
- Segment string
- Addr Addr
- Size Addr
- Offset uint32
- Align uint32
- Reloc uint32
- Nreloc uint32
- Flags uint32
- Res1 uint32
- Res2 uint32
-}
-
-// layout positions the segments and sections in p
-// to make room for the Mach-O file header.
-// That is, it edits their VirtAddr fields to adjust for the presence
-// of the Mach-O header at the beginning of the address space.
-func (machoFormat) headerSize(p *Prog) (virt, file Addr) {
- var h machoHeader
- h.init(p)
- size := Addr(h.size())
- size = round(size, 4096)
- p.HeaderSize = size
- return size, size
-}
-
-// write writes p to w as a Mach-O executable.
-// layout(p) must have already been called,
-// and the number, sizes, and addresses of the segments
-// and sections must not have been modified since the call.
-func (machoFormat) write(w io.Writer, p *Prog) {
- var h machoHeader
- h.init(p)
- off := Addr(0)
- enc := h.encode()
- w.Write(enc)
- off += Addr(len(enc))
- for _, seg := range p.Segments {
- if seg.FileOffset < off {
- h.p.errorf("mach-o error: invalid file offset")
- }
- w.Write(make([]byte, int(seg.FileOffset-off)))
- if seg.FileSize != Addr(len(seg.Data)) {
- h.p.errorf("mach-o error: invalid file size")
- }
- w.Write(seg.Data)
- off = seg.FileOffset + Addr(len(seg.Data))
- }
-}
-
-// Conversion of Prog to macho data structures.
-
-// machoArches maps from GOARCH to machoArch.
-var machoArches = map[string]machoArch{
- "amd64": {
- CPU: uint32(macho.CpuAmd64),
- SubCPU: uint32(machoSubCPU386),
- },
-}
-
-// init initializes the header h to describe p.
-func (h *machoHeader) init(p *Prog) {
- h.p = p
- h.Segments = nil
- h.Loads = nil
- var ok bool
- h.machoArch, ok = machoArches[p.GOARCH]
- if !ok {
- p.errorf("mach-o: unknown target GOARCH %q", p.GOARCH)
- return
- }
- h.FileType = uint32(macho.TypeExec)
-
- mseg := h.addSegment(p, "__PAGEZERO", nil)
- mseg.VirtSize = p.UnmappedSize
-
- for _, seg := range p.Segments {
- h.addSegment(p, "__"+strings.ToUpper(seg.Name), seg)
- }
-
- var data []uint32
- switch h.CPU {
- default:
- p.errorf("mach-o: unknown cpu %#x for GOARCH %q", h.CPU, p.GOARCH)
- case uint32(macho.CpuAmd64):
- data = make([]uint32, 2+42)
- data[0] = 4 // thread type
- data[1] = 42 // word count
- data[2+32] = uint32(p.Entry) // RIP register, in two parts
- data[2+32+1] = uint32(p.Entry >> 32)
- }
-
- h.Loads = append(h.Loads, &machoLoad{
- Type: uint32(macho.LoadCmdUnixThread),
- Data: data,
- })
-}
-
-// addSegment adds to h a Mach-O segment like seg with the given name.
-func (h *machoHeader) addSegment(p *Prog, name string, seg *Segment) *machoSegment {
- mseg := &machoSegment{
- Name: name,
- }
- h.Segments = append(h.Segments, mseg)
- if seg == nil {
- return mseg
- }
-
- mseg.VirtAddr = seg.VirtAddr
- mseg.VirtSize = seg.VirtSize
- mseg.FileOffset = round(seg.FileOffset, 4096)
- mseg.FileSize = seg.FileSize
-
- if name == "__TEXT" {
- // Initially RWX, then just RX
- mseg.Prot1 = 7
- mseg.Prot2 = 5
-
- // Text segment maps Mach-O header, needed by dynamic linker.
- mseg.VirtAddr -= p.HeaderSize
- mseg.VirtSize += p.HeaderSize
- mseg.FileOffset -= p.HeaderSize
- mseg.FileSize += p.HeaderSize
- } else {
- // RW
- mseg.Prot1 = 3
- mseg.Prot2 = 3
- }
-
- for _, sect := range seg.Sections {
- h.addSection(mseg, seg, sect)
- }
- return mseg
-}
-
-// addSection adds to mseg a Mach-O section like sect, inside seg, with the given name.
-func (h *machoHeader) addSection(mseg *machoSegment, seg *Segment, sect *Section) {
- msect := &machoSection{
- Name: "__" + sect.Name,
- Segment: mseg.Name,
- // Reloc: sect.RelocOffset,
- // NumReloc: sect.RelocLen / 8,
- Addr: sect.VirtAddr,
- Size: sect.Size,
- }
- mseg.Sections = append(mseg.Sections, msect)
-
- for 1<<msect.Align < sect.Align {
- msect.Align++
- }
-
- if off := sect.VirtAddr - seg.VirtAddr; off < seg.FileSize {
- // Data in file.
- if sect.Size > seg.FileSize-off {
- h.p.errorf("mach-o error: section crosses file boundary")
- }
- msect.Offset = uint32(seg.FileOffset + off)
- } else {
- // Zero filled.
- msect.Flags |= 1
- }
-
- if sect.Name == "text" {
- msect.Flags |= 0x400 // contains executable instructions
- }
-}
-
-// A machoWriter helps write Mach-O headers.
-// It is basically a buffer with some helper routines for writing integers.
-type machoWriter struct {
- dst []byte
- tmp [8]byte
- order binary.ByteOrder
- is64 bool
- p *Prog
-}
-
-// if64 returns x if w is writing a 64-bit object file; otherwise it returns y.
-func (w *machoWriter) if64(x, y interface{}) interface{} {
- if w.is64 {
- return x
- }
- return y
-}
-
-// encode encodes each of the given arguments into the writer.
-// It encodes uint32, []uint32, uint64, and []uint64 by writing each value
-// in turn in the correct byte order for the output file.
-// It encodes an Addr as a uint64 if writing a 64-bit output file, or else as a uint32.
-// It encodes []byte and string by writing the raw bytes (no length prefix).
-// It skips nil values in the args list.
-func (w *machoWriter) encode(args ...interface{}) {
- for _, arg := range args {
- switch arg := arg.(type) {
- default:
- w.p.errorf("mach-o error: cannot encode %T", arg)
- case nil:
- // skip
- case []byte:
- w.dst = append(w.dst, arg...)
- case string:
- w.dst = append(w.dst, arg...)
- case uint32:
- w.order.PutUint32(w.tmp[:], arg)
- w.dst = append(w.dst, w.tmp[:4]...)
- case []uint32:
- for _, x := range arg {
- w.order.PutUint32(w.tmp[:], x)
- w.dst = append(w.dst, w.tmp[:4]...)
- }
- case uint64:
- w.order.PutUint64(w.tmp[:], arg)
- w.dst = append(w.dst, w.tmp[:8]...)
- case Addr:
- if w.is64 {
- w.order.PutUint64(w.tmp[:], uint64(arg))
- w.dst = append(w.dst, w.tmp[:8]...)
- } else {
- if Addr(uint32(arg)) != arg {
- w.p.errorf("mach-o error: truncating address %#x to uint32", arg)
- }
- w.order.PutUint32(w.tmp[:], uint32(arg))
- w.dst = append(w.dst, w.tmp[:4]...)
- }
- }
- }
-}
-
-// segmentSize returns the size of the encoding of seg in bytes.
-func (w *machoWriter) segmentSize(seg *machoSegment) int {
- if w.is64 {
- return 18*4 + 20*4*len(seg.Sections)
- }
- return 14*4 + 22*4*len(seg.Sections)
-}
-
-// zeroPad returns the string s truncated or padded with NULs to n bytes.
-func zeroPad(s string, n int) string {
- if len(s) >= n {
- return s[:n]
- }
- return s + strings.Repeat("\x00", n-len(s))
-}
-
-// size returns the encoded size of the header.
-func (h *machoHeader) size() int {
- // Could write separate code, but encoding is cheap; encode and throw it away.
- return len(h.encode())
-}
-
-// encode returns the Mach-O encoding of the header.
-func (h *machoHeader) encode() []byte {
- w := &machoWriter{p: h.p}
- w.is64 = h.CPU&macho64Bit != 0
- w.order = w.p.byteorder
-
- loadSize := 0
- for _, seg := range h.Segments {
- loadSize += w.segmentSize(seg)
- }
- for _, l := range h.Loads {
- loadSize += 4 * (2 + len(l.Data))
- }
-
- w.encode(
- w.if64(macho.Magic64, macho.Magic32),
- uint32(h.CPU),
- uint32(h.SubCPU),
- uint32(h.FileType),
- uint32(len(h.Loads)+len(h.Segments)),
- uint32(loadSize),
- uint32(1),
- w.if64(uint32(0), nil),
- )
-
- for _, seg := range h.Segments {
- w.encode(
- w.if64(uint32(macho.LoadCmdSegment64), uint32(macho.LoadCmdSegment)),
- uint32(w.segmentSize(seg)),
- zeroPad(seg.Name, 16),
- seg.VirtAddr,
- seg.VirtSize,
- seg.FileOffset,
- seg.FileSize,
- seg.Prot1,
- seg.Prot2,
- uint32(len(seg.Sections)),
- seg.Flags,
- )
- for _, sect := range seg.Sections {
- w.encode(
- zeroPad(sect.Name, 16),
- zeroPad(seg.Name, 16),
- sect.Addr,
- sect.Size,
- sect.Offset,
- sect.Align,
- sect.Reloc,
- sect.Nreloc,
- sect.Flags,
- sect.Res1,
- sect.Res2,
- w.if64(uint32(0), nil),
- )
- }
- }
-
- for _, load := range h.Loads {
- w.encode(
- load.Type,
- uint32(4*(2+len(load.Data))),
- load.Data,
- )
- }
-
- return w.dst
-}
+++ /dev/null
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package main
-
-import (
- "bytes"
- "debug/macho"
- "encoding/binary"
- "fmt"
- "io/ioutil"
- "strings"
- "testing"
-)
-
-// Test macho writing by checking that each generated prog can be written
-// and then read back using debug/macho to get the same prog.
-// Also check against golden testdata file.
-var machoWriteTests = []struct {
- name string
- golden bool
- prog *Prog
-}{
- // amd64 exit 9
- {
- name: "exit9",
- golden: true,
- prog: &Prog{
- GOARCH: "amd64",
- GOOS: "darwin",
- UnmappedSize: 0x1000,
- Entry: 0x1000,
- Segments: []*Segment{
- {
- Name: "text",
- VirtAddr: 0x1000,
- VirtSize: 13,
- FileOffset: 0,
- FileSize: 13,
- Data: []byte{
- 0xb8, 0x01, 0x00, 0x00, 0x02, // MOVL $0x2000001, AX
- 0xbf, 0x09, 0x00, 0x00, 0x00, // MOVL $9, DI
- 0x0f, 0x05, // SYSCALL
- 0xf4, // HLT
- },
- Sections: []*Section{
- {
- Name: "text",
- VirtAddr: 0x1000,
- Size: 13,
- Align: 64,
- },
- },
- },
- },
- },
- },
-
- // amd64 write hello world & exit 9
- {
- name: "hello",
- golden: true,
- prog: &Prog{
- GOARCH: "amd64",
- GOOS: "darwin",
- UnmappedSize: 0x1000,
- Entry: 0x1000,
- Segments: []*Segment{
- {
- Name: "text",
- VirtAddr: 0x1000,
- VirtSize: 35,
- FileOffset: 0,
- FileSize: 35,
- Data: []byte{
- 0xb8, 0x04, 0x00, 0x00, 0x02, // MOVL $0x2000001, AX
- 0xbf, 0x01, 0x00, 0x00, 0x00, // MOVL $1, DI
- 0xbe, 0x00, 0x30, 0x00, 0x00, // MOVL $0x3000, SI
- 0xba, 0x0c, 0x00, 0x00, 0x00, // MOVL $12, DX
- 0x0f, 0x05, // SYSCALL
- 0xb8, 0x01, 0x00, 0x00, 0x02, // MOVL $0x2000001, AX
- 0xbf, 0x09, 0x00, 0x00, 0x00, // MOVL $9, DI
- 0x0f, 0x05, // SYSCALL
- 0xf4, // HLT
- },
- Sections: []*Section{
- {
- Name: "text",
- VirtAddr: 0x1000,
- Size: 35,
- Align: 64,
- },
- },
- },
- {
- Name: "data",
- VirtAddr: 0x2000,
- VirtSize: 12,
- FileOffset: 0x1000,
- FileSize: 12,
- Data: []byte("hello world\n"),
- Sections: []*Section{
- {
- Name: "data",
- VirtAddr: 0x2000,
- Size: 12,
- Align: 64,
- },
- },
- },
- },
- },
- },
-
- // amd64 write hello world from rodata & exit 0
- {
- name: "helloro",
- golden: true,
- prog: &Prog{
- GOARCH: "amd64",
- GOOS: "darwin",
- UnmappedSize: 0x1000,
- Entry: 0x1000,
- Segments: []*Segment{
- {
- Name: "text",
- VirtAddr: 0x1000,
- VirtSize: 0x100c,
- FileOffset: 0,
- FileSize: 0x100c,
- Data: concat(
- []byte{
- 0xb8, 0x04, 0x00, 0x00, 0x02, // MOVL $0x2000001, AX
- 0xbf, 0x01, 0x00, 0x00, 0x00, // MOVL $1, DI
- 0xbe, 0x00, 0x30, 0x00, 0x00, // MOVL $0x3000, SI
- 0xba, 0x0c, 0x00, 0x00, 0x00, // MOVL $12, DX
- 0x0f, 0x05, // SYSCALL
- 0xb8, 0x01, 0x00, 0x00, 0x02, // MOVL $0x2000001, AX
- 0xbf, 0x00, 0x00, 0x00, 0x00, // MOVL $0, DI
- 0x0f, 0x05, // SYSCALL
- 0xf4, // HLT
- },
- make([]byte, 0x1000-35),
- []byte("hello world\n"),
- ),
- Sections: []*Section{
- {
- Name: "text",
- VirtAddr: 0x1000,
- Size: 35,
- Align: 64,
- },
- {
- Name: "rodata",
- VirtAddr: 0x2000,
- Size: 12,
- Align: 64,
- },
- },
- },
- },
- },
- },
-}
-
-func concat(xs ...[]byte) []byte {
- var out []byte
- for _, x := range xs {
- out = append(out, x...)
- }
- return out
-}
-
-func TestMachoWrite(t *testing.T) {
- for _, tt := range machoWriteTests {
- name := tt.prog.GOARCH + "." + tt.name
- prog := cloneProg(tt.prog)
- prog.init()
- var f machoFormat
- vsize, fsize := f.headerSize(prog)
- shiftProg(prog, vsize, fsize)
- var buf bytes.Buffer
- f.write(&buf, prog)
- if false { // enable to debug
- ioutil.WriteFile("a.out", buf.Bytes(), 0777)
- }
- read, err := machoRead(machoArches[tt.prog.GOARCH], buf.Bytes())
- if err != nil {
- t.Errorf("%s: reading mach-o output:\n\t%v", name, err)
- continue
- }
- diffs := diffProg(read, prog)
- if diffs != nil {
- t.Errorf("%s: mismatched prog:\n\t%s", name, strings.Join(diffs, "\n\t"))
- continue
- }
- if !tt.golden {
- continue
- }
- checkGolden(t, buf.Bytes(), "testdata/macho."+name)
- }
-}
-
-// machoRead reads the mach-o file in data and returns a corresponding prog.
-func machoRead(arch machoArch, data []byte) (*Prog, error) {
- f, err := macho.NewFile(bytes.NewReader(data))
- if err != nil {
- return nil, err
- }
-
- var errors []string
- errorf := func(format string, args ...interface{}) {
- errors = append(errors, fmt.Sprintf(format, args...))
- }
-
- magic := uint32(0xFEEDFACE)
- if arch.CPU&macho64Bit != 0 {
- magic |= 1
- }
- if f.Magic != magic {
- errorf("header: Magic = %#x, want %#x", f.Magic, magic)
- }
- if f.Cpu != macho.CpuAmd64 {
- errorf("header: CPU = %#x, want %#x", f.Cpu, macho.CpuAmd64)
- }
- if f.SubCpu != 3 {
- errorf("header: SubCPU = %#x, want %#x", f.SubCpu, 3)
- }
- if f.Type != 2 {
- errorf("header: FileType = %d, want %d", f.Type, 2)
- }
- if f.Flags != 1 {
- errorf("header: Flags = %d, want %d", f.Flags, 1)
- }
-
- msects := f.Sections
- var limit uint64
- prog := new(Prog)
- for _, load := range f.Loads {
- switch load := load.(type) {
- default:
- errorf("unexpected macho load %T %x", load, load.Raw())
-
- case macho.LoadBytes:
- if len(load) < 8 || len(load)%4 != 0 {
- errorf("unexpected load length %d", len(load))
- continue
- }
- cmd := f.ByteOrder.Uint32(load)
- switch macho.LoadCmd(cmd) {
- default:
- errorf("unexpected macho load cmd %s", macho.LoadCmd(cmd))
- case macho.LoadCmdUnixThread:
- data := make([]uint32, len(load[8:])/4)
- binary.Read(bytes.NewReader(load[8:]), f.ByteOrder, data)
- if len(data) != 44 {
- errorf("macho thread len(data) = %d, want 42", len(data))
- continue
- }
- if data[0] != 4 {
- errorf("macho thread type = %d, want 4", data[0])
- }
- if data[1] != uint32(len(data))-2 {
- errorf("macho thread desc len = %d, want %d", data[1], uint32(len(data))-2)
- continue
- }
- for i, val := range data[2:] {
- switch i {
- default:
- if val != 0 {
- errorf("macho thread data[%d] = %#x, want 0", i, val)
- }
- case 32:
- prog.Entry = Addr(val)
- case 33:
- prog.Entry |= Addr(val) << 32
- }
- }
- }
-
- case *macho.Segment:
- if load.Addr < limit {
- errorf("segments out of order: %q at %#x after %#x", load.Name, load.Addr, limit)
- }
- limit = load.Addr + load.Memsz
- if load.Name == "__PAGEZERO" || load.Addr == 0 && load.Filesz == 0 {
- if load.Name != "__PAGEZERO" {
- errorf("segment with Addr=0, Filesz=0 is named %q, want %q", load.Name, "__PAGEZERO")
- } else if load.Addr != 0 || load.Filesz != 0 {
- errorf("segment %q has Addr=%#x, Filesz=%d, want Addr=%#x, Filesz=%d", load.Name, load.Addr, load.Filesz, 0, 0)
- }
- prog.UnmappedSize = Addr(load.Memsz)
- continue
- }
-
- if !strings.HasPrefix(load.Name, "__") {
- errorf("segment name %q does not begin with %q", load.Name, "__")
- }
- if strings.ToUpper(load.Name) != load.Name {
- errorf("segment name %q is not all upper case", load.Name)
- }
-
- seg := &Segment{
- Name: strings.ToLower(strings.TrimPrefix(load.Name, "__")),
- VirtAddr: Addr(load.Addr),
- VirtSize: Addr(load.Memsz),
- FileOffset: Addr(load.Offset),
- FileSize: Addr(load.Filesz),
- }
- prog.Segments = append(prog.Segments, seg)
-
- data, err := load.Data()
- if err != nil {
- errorf("loading data from %q: %v", load.Name, err)
- }
- seg.Data = data
-
- var maxprot, prot uint32
- if load.Name == "__TEXT" {
- maxprot, prot = 7, 5
- } else {
- maxprot, prot = 3, 3
- }
- if load.Maxprot != maxprot || load.Prot != prot {
- errorf("segment %q protection is %d, %d, want %d, %d",
- load.Name, load.Maxprot, load.Prot, maxprot, prot)
- }
-
- for len(msects) > 0 && msects[0].Addr < load.Addr+load.Memsz {
- msect := msects[0]
- msects = msects[1:]
-
- if msect.Offset > 0 && prog.HeaderSize == 0 {
- prog.HeaderSize = Addr(msect.Offset)
- if seg.FileOffset != 0 {
- errorf("initial segment %q does not map header", load.Name)
- }
- seg.VirtAddr += prog.HeaderSize
- seg.VirtSize -= prog.HeaderSize
- seg.FileOffset += prog.HeaderSize
- seg.FileSize -= prog.HeaderSize
- seg.Data = seg.Data[prog.HeaderSize:]
- }
-
- if msect.Addr < load.Addr {
- errorf("section %q at address %#x is missing segment", msect.Name, msect.Addr)
- continue
- }
-
- if !strings.HasPrefix(msect.Name, "__") {
- errorf("section name %q does not begin with %q", msect.Name, "__")
- }
- if strings.ToLower(msect.Name) != msect.Name {
- errorf("section name %q is not all lower case", msect.Name)
- }
- if msect.Seg != load.Name {
- errorf("section %q is lists segment name %q, want %q",
- msect.Name, msect.Seg, load.Name)
- }
- if uint64(msect.Offset) != uint64(load.Offset)+msect.Addr-load.Addr {
- errorf("section %q file offset is %#x, want %#x",
- msect.Name, msect.Offset, load.Offset+msect.Addr-load.Addr)
- }
- if msect.Reloff != 0 || msect.Nreloc != 0 {
- errorf("section %q has reloff %d,%d, want %d,%d",
- msect.Name, msect.Reloff, msect.Nreloc, 0, 0)
- }
- flags := uint32(0)
- if msect.Name == "__text" {
- flags = 0x400
- }
- if msect.Offset == 0 {
- flags = 1
- }
- if msect.Flags != flags {
- errorf("section %q flags = %#x, want %#x", msect.Name, msect.Flags, flags)
- }
- sect := &Section{
- Name: strings.ToLower(strings.TrimPrefix(msect.Name, "__")),
- VirtAddr: Addr(msect.Addr),
- Size: Addr(msect.Size),
- Align: 1 << msect.Align,
- }
- seg.Sections = append(seg.Sections, sect)
- }
- }
- }
-
- for _, msect := range msects {
- errorf("section %q has no segment", msect.Name)
- }
-
- limit = 0
- for _, msect := range f.Sections {
- if msect.Addr < limit {
- errorf("sections out of order: %q at %#x after %#x", msect.Name, msect.Addr, limit)
- }
- limit = msect.Addr + msect.Size
- }
-
- err = nil
- if errors != nil {
- err = fmt.Errorf("%s", strings.Join(errors, "\n\t"))
- }
- return prog, err
-}
+++ /dev/null
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Placeholder to keep build building.
-
-package main
-
-func main() {}
+++ /dev/null
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Generation of runtime function information (pclntab).
-
-package main
-
-import (
- "cmd/internal/goobj"
- "cmd/internal/obj"
- "encoding/binary"
- "os"
- "sort"
-)
-
-var zerofunc goobj.Func
-
-// pclntab collects the runtime function data for each function that will
-// be listed in the binary and builds a single table describing all functions.
-// This table is used at run time for stack traces and to look up PC-specific
-// information during garbage collection. The symbol created is named
-// "pclntab" for historical reasons; the scope of the table has grown to
-// include more than just PC/line number correspondences.
-// The table format is documented at https://golang.org/s/go12symtab.
-func (p *Prog) pclntab() {
- // Count number of functions going into the binary,
- // so that we can size the initial index correctly.
- nfunc := 0
- for _, sym := range p.SymOrder {
- if sym.Kind != goobj.STEXT {
- continue
- }
- nfunc++
- }
-
- // Table header.
- buf := new(SymBuffer)
- buf.Init(p)
- buf.SetSize(8 + p.ptrsize)
- off := 0
- off = buf.Uint32(off, 0xfffffffb)
- off = buf.Uint8(off, 0)
- off = buf.Uint8(off, 0)
- off = buf.Uint8(off, uint8(p.pcquantum))
- off = buf.Uint8(off, uint8(p.ptrsize))
- off = buf.Uint(off, uint64(nfunc), p.ptrsize)
- indexOff := off
- off += (nfunc*2 + 1) * p.ptrsize // function index, to be filled in
- off += 4 // file table start offset, to be filled in
- buf.SetSize(off)
-
- // One-file cache for reading PCData tables from package files.
- // TODO(rsc): Better I/O strategy.
- var (
- file *os.File
- fname string
- )
-
- // Files gives the file numbering for source file names recorded
- // in the binary.
- files := make(map[string]int)
-
- // Build the table, build the index, and build the file name numbering.
- // The loop here must visit functions in the same order that they will
- // be stored in the binary, or else binary search over the index will fail.
- // The runtime checks that the index is sorted properly at program start time.
- var lastSym *Sym
- for _, sym := range p.SymOrder {
- if sym.Kind != goobj.STEXT {
- continue
- }
- lastSym = sym
-
- // Treat no recorded function information same as all zeros.
- f := sym.Func
- if f == nil {
- f = &zerofunc
- }
-
- // Open package file if needed, for reading PC data.
- if fname != sym.Package.File {
- if file != nil {
- file.Close()
- }
- var err error
- file, err = os.Open(sym.Package.File)
- if err != nil {
- p.errorf("%v: %v", sym, err)
- return
- }
- fname = sym.Package.File
- }
-
- // off is the offset of the table entry where we're going to write
- // the encoded form of Func.
- // indexOff is the current position in the table index;
- // we add an entry in the index pointing at off.
- off = (buf.Size() + p.ptrsize - 1) &^ (p.ptrsize - 1)
- indexOff = buf.Addr(indexOff, sym.SymID, 0)
- indexOff = buf.Uint(indexOff, uint64(off), p.ptrsize)
-
- // The Func encoding starts with a header giving offsets
- // to data blobs, and then the data blobs themselves.
- // end gives the current write position for the data blobs.
- end := off + p.ptrsize + 3*4 + 5*4 + len(f.PCData)*4 + len(f.FuncData)*p.ptrsize
- if len(f.FuncData) > 0 {
- end += -end & (p.ptrsize - 1)
- }
- buf.SetSize(end)
-
- // entry uintptr
- // name int32
- // args int32
- // frame int32
- //
- // The frame recorded in the object file is
- // the frame size used in an assembly listing, which does
- // not include the caller PC on the stack.
- // The frame size we want to list here is the delta from
- // this function's SP to its caller's SP, which does include
- // the caller PC. Add p.ptrsize to f.Frame to adjust.
- // TODO(rsc): Record the same frame size in the object file.
- off = buf.Addr(off, sym.SymID, 0)
- off = buf.Uint32(off, uint32(addString(buf, sym.Name)))
- off = buf.Uint32(off, uint32(f.Args))
- off = buf.Uint32(off, uint32(f.Frame+p.ptrsize))
-
- // pcdata
- off = buf.Uint32(off, uint32(addPCTable(p, buf, file, f.PCSP)))
- off = buf.Uint32(off, uint32(addPCFileTable(p, buf, file, f.PCFile, sym, files)))
- off = buf.Uint32(off, uint32(addPCTable(p, buf, file, f.PCLine)))
- off = buf.Uint32(off, uint32(len(f.PCData)))
- off = buf.Uint32(off, uint32(len(f.FuncData)))
- for _, pcdata := range f.PCData {
- off = buf.Uint32(off, uint32(addPCTable(p, buf, file, pcdata)))
- }
-
- // funcdata
- if len(f.FuncData) > 0 {
- off += -off & (p.ptrsize - 1) // must be pointer-aligned
- for _, funcdata := range f.FuncData {
- if funcdata.Sym.Name == "" {
- off = buf.Uint(off, uint64(funcdata.Offset), p.ptrsize)
- } else {
- off = buf.Addr(off, funcdata.Sym, funcdata.Offset)
- }
- }
- }
-
- if off != end {
- p.errorf("internal error: invalid math in pclntab: off=%#x end=%#x", off, end)
- break
- }
- }
- if file != nil {
- file.Close()
- }
-
- // Final entry of index is end PC of last function.
- indexOff = buf.Addr(indexOff, lastSym.SymID, int64(lastSym.Size))
-
- // Start file table.
- // Function index is immediately followed by offset to file table.
- off = (buf.Size() + p.ptrsize - 1) &^ (p.ptrsize - 1)
- buf.Uint32(indexOff, uint32(off))
-
- // File table is an array of uint32s.
- // The first entry gives 1+n, the size of the array.
- // The following n entries hold offsets to string data.
- // File number n uses the string pointed at by entry n.
- // File number 0 is invalid.
- buf.SetSize(off + (1+len(files))*4)
- buf.Uint32(off, uint32(1+len(files)))
- var filestr []string
- for file := range files {
- filestr = append(filestr, file)
- }
- sort.Strings(filestr)
- for _, file := range filestr {
- id := files[file]
- buf.Uint32(off+4*id, uint32(addString(buf, file)))
- }
-
- pclntab := &Sym{
- Sym: &goobj.Sym{
- SymID: goobj.SymID{Name: "runtime.pclntab"},
- Kind: goobj.SPCLNTAB,
- Size: buf.Size(),
- Reloc: buf.Reloc(),
- },
- Bytes: buf.Bytes(),
- }
- p.addSym(pclntab)
-}
-
-// addString appends the string s to the buffer b.
-// It returns the offset of the beginning of the string in the buffer.
-func addString(b *SymBuffer, s string) int {
- off := b.Size()
- b.SetSize(off + len(s) + 1)
- copy(b.data[off:], s)
- return off
-}
-
-// addPCTable appends the PC-data table stored in the file f at the location loc
-// to the symbol buffer b. It returns the offset of the beginning of the table
-// in the buffer.
-func addPCTable(p *Prog, b *SymBuffer, f *os.File, loc goobj.Data) int {
- if loc.Size == 0 {
- return 0
- }
- off := b.Size()
- b.SetSize(off + int(loc.Size))
- _, err := f.ReadAt(b.data[off:off+int(loc.Size)], loc.Offset)
- if err != nil {
- p.errorf("%v", err)
- }
- return off
-}
-
-// addPCFileTable is like addPCTable, but it renumbers the file names referred to by the table
-// to use the global numbering maintained in the files map. It adds new files to the
-// map as necessary.
-func addPCFileTable(p *Prog, b *SymBuffer, f *os.File, loc goobj.Data, sym *Sym, files map[string]int) int {
- if loc.Size == 0 {
- return 0
- }
- off := b.Size()
-
- src := make([]byte, loc.Size)
- _, err := f.ReadAt(src, loc.Offset)
- if err != nil {
- p.errorf("%v", err)
- return 0
- }
-
- filenum := make([]int, len(sym.Func.File))
- for i, name := range sym.Func.File {
- num := files[name]
- if num == 0 {
- num = len(files) + 1
- files[name] = num
- }
- filenum[i] = num
- }
-
- var dst []byte
- newval := int32(-1)
- var it PCIter
- for it.Init(p, src); !it.Done; it.Next() {
- // value delta
- oldval := it.Value
- val := oldval
- if oldval != -1 {
- if oldval < 0 || int(oldval) >= len(filenum) {
- p.errorf("%s: corrupt pc-file table", sym)
- break
- }
- val = int32(filenum[oldval])
- }
- dv := val - newval
- newval = val
- uv := uint32(dv<<1) ^ uint32(dv>>31)
- dst = appendVarint(dst, uv)
-
- // pc delta
- dst = appendVarint(dst, it.NextPC-it.PC)
- }
- if it.Corrupt {
- p.errorf("%s: corrupt pc-file table", sym)
- }
-
- // terminating value delta
- dst = appendVarint(dst, 0)
-
- b.SetSize(off + len(dst))
- copy(b.data[off:], dst)
- return off
-}
-
-// A SymBuffer is a buffer for preparing the data image of a
-// linker-generated symbol.
-type SymBuffer struct {
- data []byte
- reloc []goobj.Reloc
- order binary.ByteOrder
- ptrsize int
-}
-
-// Init initializes the buffer for writing.
-func (b *SymBuffer) Init(p *Prog) {
- b.data = nil
- b.reloc = nil
- b.order = p.byteorder
- b.ptrsize = p.ptrsize
-}
-
-// Bytes returns the buffer data.
-func (b *SymBuffer) Bytes() []byte {
- return b.data
-}
-
-// SetSize sets the buffer's data size to n bytes.
-func (b *SymBuffer) SetSize(n int) {
- for cap(b.data) < n {
- b.data = append(b.data[:cap(b.data)], 0)
- }
- b.data = b.data[:n]
-}
-
-// Size returns the buffer's data size.
-func (b *SymBuffer) Size() int {
- return len(b.data)
-}
-
-// Reloc returns the buffered relocations.
-func (b *SymBuffer) Reloc() []goobj.Reloc {
- return b.reloc
-}
-
-// Uint8 sets the uint8 at offset off to v.
-// It returns the offset just beyond v.
-func (b *SymBuffer) Uint8(off int, v uint8) int {
- b.data[off] = v
- return off + 1
-}
-
-// Uint16 sets the uint16 at offset off to v.
-// It returns the offset just beyond v.
-func (b *SymBuffer) Uint16(off int, v uint16) int {
- b.order.PutUint16(b.data[off:], v)
- return off + 2
-}
-
-// Uint32 sets the uint32 at offset off to v.
-// It returns the offset just beyond v.
-func (b *SymBuffer) Uint32(off int, v uint32) int {
- b.order.PutUint32(b.data[off:], v)
- return off + 4
-}
-
-// Uint64 sets the uint64 at offset off to v.
-// It returns the offset just beyond v.
-func (b *SymBuffer) Uint64(off int, v uint64) int {
- b.order.PutUint64(b.data[off:], v)
- return off + 8
-}
-
-// Uint sets the size-byte unsigned integer at offset off to v.
-// It returns the offset just beyond v.
-func (b *SymBuffer) Uint(off int, v uint64, size int) int {
- switch size {
- case 1:
- return b.Uint8(off, uint8(v))
- case 2:
- return b.Uint16(off, uint16(v))
- case 4:
- return b.Uint32(off, uint32(v))
- case 8:
- return b.Uint64(off, v)
- }
- panic("invalid use of SymBuffer.SetUint")
-}
-
-// Addr sets the pointer-sized address at offset off to refer
-// to symoff bytes past the start of sym. It returns the offset
-// just beyond the address.
-func (b *SymBuffer) Addr(off int, sym goobj.SymID, symoff int64) int {
- b.reloc = append(b.reloc, goobj.Reloc{
- Offset: off,
- Size: b.ptrsize,
- Sym: sym,
- Add: int(symoff),
- Type: obj.R_ADDR,
- })
- return off + b.ptrsize
-}
-
-// A PCIter implements iteration over PC-data tables.
-//
-// var it PCIter
-// for it.Init(p, data); !it.Done; it.Next() {
-// it.Value holds from it.PC up to (but not including) it.NextPC
-// }
-// if it.Corrupt {
-// data was malformed
-// }
-//
-type PCIter struct {
- PC uint32
- NextPC uint32
- Value int32
- Done bool
- Corrupt bool
- p []byte
- start bool
- pcquantum uint32
-}
-
-// Init initializes the iteration.
-// On return, if it.Done is true, the iteration is over.
-// Otherwise it.Value applies in the pc range [it.PC, it.NextPC).
-func (it *PCIter) Init(p *Prog, buf []byte) {
- it.p = buf
- it.PC = 0
- it.NextPC = 0
- it.Value = -1
- it.start = true
- it.pcquantum = uint32(p.pcquantum)
- it.Done = false
- it.Next()
-}
-
-// Next steps forward one entry in the table.
-// On return, if it.Done is true, the iteration is over.
-// Otherwise it.Value applies in the pc range [it.PC, it.NextPC).
-func (it *PCIter) Next() {
- it.PC = it.NextPC
- if it.Done {
- return
- }
- if len(it.p) == 0 {
- it.Done = true
- return
- }
-
- // value delta
- uv, p, ok := decodeVarint(it.p)
- if !ok {
- it.Done = true
- it.Corrupt = true
- return
- }
- it.p = p
- if uv == 0 && !it.start {
- it.Done = true
- return
- }
- it.start = false
- sv := int32(uv>>1) ^ int32(uv<<31)>>31
- it.Value += sv
-
- // pc delta
- uv, it.p, ok = decodeVarint(it.p)
- if !ok {
- it.Done = true
- it.Corrupt = true
- return
- }
- it.NextPC = it.PC + uv*it.pcquantum
-}
-
-// decodeVarint decodes an unsigned varint from p,
-// reporting the value, the remainder of the data, and
-// whether the decoding was successful.
-func decodeVarint(p []byte) (v uint32, rest []byte, ok bool) {
- for shift := uint(0); ; shift += 7 {
- if len(p) == 0 {
- return
- }
- c := uint32(p[0])
- p = p[1:]
- v |= (c & 0x7F) << shift
- if c&0x80 == 0 {
- break
- }
- }
- return v, p, true
-}
-
-// appendVarint appends an unsigned varint encoding of v to p
-// and returns the resulting slice.
-func appendVarint(p []byte, v uint32) []byte {
- for ; v >= 0x80; v >>= 7 {
- p = append(p, byte(v)|0x80)
- }
- p = append(p, byte(v))
- return p
-}
+++ /dev/null
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package main
-
-import (
- "bytes"
- "cmd/internal/goobj"
- "fmt"
- "math/rand"
- "sort"
- "strings"
- "testing"
-)
-
-// Test of pcln table encoding.
-// testdata/genpcln.go generates an assembly file with
-// pseudorandom values for the data that pclntab stores.
-// This test recomputes the same pseudorandom stream
-// and checks that the final linked binary uses those values
-// as well.
-func TestPclntab(t *testing.T) {
- p := &Prog{
- GOOS: "darwin",
- GOARCH: "amd64",
- Error: func(s string) { t.Error(s) },
- StartSym: "start",
- omitRuntime: true,
- }
- var buf bytes.Buffer
- p.link(&buf, "testdata/pclntab.6")
- if p.NumError > 0 {
- return
- }
-
- // The algorithm for computing values here must match
- // the one in testdata/genpcln.go.
- for f := 0; f < 3; f++ {
- file := "input"
- line := 1
- rnd := rand.New(rand.NewSource(int64(f)))
- args := rnd.Intn(100) * 8
- frame := 32 + rnd.Intn(32)/8*8
- size := 200 + rnd.Intn(100)*8
-
- name := fmt.Sprintf("func%d", f)
- r, off, fargs, fframe, ok := findFunc(t, p, name)
- if !ok {
- continue // error already printed
- }
- if fargs != args {
- t.Errorf("%s: args=%d, want %d", name, fargs, args)
- }
- if fframe != frame+8 {
- t.Errorf("%s: frame=%d, want %d", name, fframe, frame+8)
- }
-
- // Check FUNCDATA 1.
- fdata, ok := loadFuncdata(t, r, name, off, 1)
- if ok {
- fsym := p.Syms[goobj.SymID{Name: fmt.Sprintf("funcdata%d", f)}]
- if fsym == nil {
- t.Errorf("funcdata%d is missing in binary", f)
- } else if fdata != fsym.Addr {
- t.Errorf("%s: funcdata 1 = %#x, want %#x", name, fdata, fsym.Addr)
- }
- }
-
- // Walk code checking pcdata values.
- spadj := 0
- pcdata1 := -1
- pcdata2 := -1
-
- checkPCSP(t, r, name, off, 0, 0)
- checkPCData(t, r, name, off, 0, 0, -1)
- checkPCData(t, r, name, off, 0, 1, -1)
- checkPCData(t, r, name, off, 0, 2, -1)
-
- firstpc := 4
- for i := 0; i < size; i++ {
- pc := firstpc + i // skip SP adjustment to allocate frame
- if i >= 0x100 && t.Failed() {
- break
- }
- // Possible SP adjustment.
- checkPCSP(t, r, name, off, pc, frame+spadj)
- if rnd.Intn(100) == 0 {
- checkPCFileLine(t, r, name, off, pc, file, line)
- checkPCData(t, r, name, off, pc, 1, pcdata1)
- checkPCData(t, r, name, off, pc, 2, pcdata2)
- i += 1
- pc = firstpc + i
- checkPCFileLine(t, r, name, off, pc-1, file, line)
- checkPCData(t, r, name, off, pc-1, 1, pcdata1)
- checkPCData(t, r, name, off, pc-1, 2, pcdata2)
- checkPCSP(t, r, name, off, pc-1, frame+spadj)
-
- if spadj <= -32 || spadj < 32 && rnd.Intn(2) == 0 {
- spadj += 8
- } else {
- spadj -= 8
- }
- checkPCSP(t, r, name, off, pc, frame+spadj)
- }
-
- // Possible PCFile change.
- if rnd.Intn(100) == 0 {
- file = fmt.Sprintf("file%d.s", rnd.Intn(10))
- line = rnd.Intn(100) + 1
- }
-
- // Possible PCLine change.
- if rnd.Intn(10) == 0 {
- line = rnd.Intn(1000) + 1
- }
-
- // Possible PCData $1 change.
- if rnd.Intn(100) == 0 {
- pcdata1 = rnd.Intn(1000)
- }
-
- // Possible PCData $2 change.
- if rnd.Intn(100) == 0 {
- pcdata2 = rnd.Intn(1000)
- }
-
- if i == 0 {
- checkPCFileLine(t, r, name, off, 0, file, line)
- checkPCFileLine(t, r, name, off, pc-1, file, line)
- }
- checkPCFileLine(t, r, name, off, pc, file, line)
- checkPCData(t, r, name, off, pc, 1, pcdata1)
- checkPCData(t, r, name, off, pc, 2, pcdata2)
- }
- }
-}
-
-// findFunc finds the function information in the pclntab of p
-// for the function with the given name.
-// It returns a symbol reader for pclntab, the offset of the function information
-// within that symbol, and the args and frame values read out of the information.
-func findFunc(t *testing.T, p *Prog, name string) (r *SymReader, off, args, frame int, ok bool) {
- tabsym := p.Syms[goobj.SymID{Name: "runtime.pclntab"}]
- if tabsym == nil {
- t.Errorf("pclntab is missing in binary")
- return
- }
-
- r = new(SymReader)
- r.Init(p, tabsym)
-
- // pclntab must with 8-byte header
- if r.Uint32(0) != 0xfffffffb || r.Uint8(4) != 0 || r.Uint8(5) != 0 || r.Uint8(6) != uint8(p.pcquantum) || r.Uint8(7) != uint8(p.ptrsize) {
- t.Errorf("pclntab has incorrect header %.8x", r.data[:8])
- return
- }
-
- sym := p.Syms[goobj.SymID{Name: name}]
- if sym == nil {
- t.Errorf("%s is missing in the binary", name)
- return
- }
-
- // index is nfunc addr0 off0 addr1 off1 ... addr_nfunc (sentinel)
- nfunc := int(r.Addr(8))
- i := sort.Search(nfunc, func(i int) bool {
- return r.Addr(8+p.ptrsize*(1+2*i)) >= sym.Addr
- })
- if entry := r.Addr(8 + p.ptrsize*(1+2*i)); entry != sym.Addr {
- indexTab := make([]Addr, 2*nfunc+1)
- for j := range indexTab {
- indexTab[j] = r.Addr(8 + p.ptrsize*(1+j))
- }
- t.Errorf("pclntab is missing entry for %s (%#x): %#x", name, sym.Addr, indexTab)
- return
- }
-
- off = int(r.Addr(8 + p.ptrsize*(1+2*i+1)))
-
- // func description at off is
- // entry addr
- // nameoff uint32
- // args uint32
- // frame uint32
- // pcspoff uint32
- // pcfileoff uint32
- // pclineoff uint32
- // npcdata uint32
- // nfuncdata uint32
- // pcdata npcdata*uint32
- // funcdata nfuncdata*addr
- //
- if entry := r.Addr(off); entry != sym.Addr {
- t.Errorf("pclntab inconsistent: entry for %s addr=%#x has entry=%#x", name, sym.Addr, entry)
- return
- }
- nameoff := int(r.Uint32(off + p.ptrsize))
- args = int(r.Uint32(off + p.ptrsize + 1*4))
- frame = int(r.Uint32(off + p.ptrsize + 2*4))
-
- fname := r.String(nameoff)
- if fname != name {
- t.Errorf("pclntab inconsistent: entry for %s addr=%#x has name %q", name, sym.Addr, fname)
- }
-
- ok = true // off, args, frame are usable
- return
-}
-
-// loadFuncdata returns the funcdata #fnum value
-// loaded from the function information for name.
-func loadFuncdata(t *testing.T, r *SymReader, name string, off int, fnum int) (Addr, bool) {
- npcdata := int(r.Uint32(off + r.p.ptrsize + 6*4))
- nfuncdata := int(r.Uint32(off + r.p.ptrsize + 7*4))
- if fnum >= nfuncdata {
- t.Errorf("pclntab(%s): no funcdata %d (only < %d)", name, fnum, nfuncdata)
- return 0, false
- }
- fdataoff := off + r.p.ptrsize + (8+npcdata)*4 + fnum*r.p.ptrsize
- fdataoff += fdataoff & 4
- return r.Addr(fdataoff), true
-}
-
-// checkPCSP checks that the PCSP table in the function information at off
-// lists spadj as the sp delta for pc.
-func checkPCSP(t *testing.T, r *SymReader, name string, off, pc, spadj int) {
- pcoff := r.Uint32(off + r.p.ptrsize + 3*4)
- pcval, ok := readPCData(t, r, name, "PCSP", pcoff, pc)
- if !ok {
- return
- }
- if pcval != spadj {
- t.Errorf("pclntab(%s): at pc=+%#x, pcsp=%d, want %d", name, pc, pcval, spadj)
- }
-}
-
-// checkPCSP checks that the PCFile and PCLine tables in the function information at off
-// list file, line as the file name and line number for pc.
-func checkPCFileLine(t *testing.T, r *SymReader, name string, off, pc int, file string, line int) {
- pcfileoff := r.Uint32(off + r.p.ptrsize + 4*4)
- pclineoff := r.Uint32(off + r.p.ptrsize + 5*4)
- pcfilenum, ok1 := readPCData(t, r, name, "PCFile", pcfileoff, pc)
- pcline, ok2 := readPCData(t, r, name, "PCLine", pclineoff, pc)
- if !ok1 || !ok2 {
- return
- }
- nfunc := int(r.Addr(8))
- filetaboff := r.Uint32(8 + r.p.ptrsize*2*(nfunc+1))
- nfile := int(r.Uint32(int(filetaboff)))
- if pcfilenum <= 0 || pcfilenum >= nfile {
- t.Errorf("pclntab(%s): at pc=+%#x, filenum=%d (invalid; nfile=%d)", name, pc, pcfilenum, nfile)
- }
- pcfile := r.String(int(r.Uint32(int(filetaboff) + pcfilenum*4)))
- if !strings.HasSuffix(pcfile, file) {
- t.Errorf("pclntab(%s): at pc=+%#x, file=%q, want %q", name, pc, pcfile, file)
- }
- if pcline != line {
- t.Errorf("pclntab(%s): at pc=+%#x, line=%d, want %d", name, pc, pcline, line)
- }
-}
-
-// checkPCData checks that the PCData#pnum table in the function information at off
-// list val as the value for pc.
-func checkPCData(t *testing.T, r *SymReader, name string, off, pc, pnum, val int) {
- pcoff := r.Uint32(off + r.p.ptrsize + (8+pnum)*4)
- pcval, ok := readPCData(t, r, name, fmt.Sprintf("PCData#%d", pnum), pcoff, pc)
- if !ok {
- return
- }
- if pcval != val {
- t.Errorf("pclntab(%s): at pc=+%#x, pcdata#%d=%d, want %d", name, pc, pnum, pcval, val)
- }
-}
-
-// readPCData reads the PCData table offset off
-// to obtain and return the value associated with pc.
-func readPCData(t *testing.T, r *SymReader, name, pcdataname string, pcoff uint32, pc int) (int, bool) {
- // "If pcsp, pcfile, pcln, or any of the pcdata offsets is zero,
- // that table is considered missing, and all PCs take value -1."
- if pcoff == 0 {
- return -1, true
- }
-
- var it PCIter
- for it.Init(r.p, r.data[pcoff:]); !it.Done; it.Next() {
- if it.PC <= uint32(pc) && uint32(pc) < it.NextPC {
- return int(it.Value), true
- }
- }
- if it.Corrupt {
- t.Errorf("pclntab(%s): %s: corrupt pcdata table", name, pcdataname)
- }
- return 0, false
-}
-
-// A SymReader provides typed access to the data for a symbol.
-type SymReader struct {
- p *Prog
- data []byte
-}
-
-func (r *SymReader) Init(p *Prog, sym *Sym) {
- seg := sym.Section.Segment
- off := sym.Addr - seg.VirtAddr
- data := seg.Data[off : off+Addr(sym.Size)]
- r.p = p
- r.data = data
-}
-
-func (r *SymReader) Uint8(off int) uint8 {
- return r.data[off]
-}
-
-func (r *SymReader) Uint16(off int) uint16 {
- return r.p.byteorder.Uint16(r.data[off:])
-}
-
-func (r *SymReader) Uint32(off int) uint32 {
- return r.p.byteorder.Uint32(r.data[off:])
-}
-
-func (r *SymReader) Uint64(off int) uint64 {
- return r.p.byteorder.Uint64(r.data[off:])
-}
-
-func (r *SymReader) Addr(off int) Addr {
- if r.p.ptrsize == 4 {
- return Addr(r.Uint32(off))
- }
- return Addr(r.Uint64(off))
-}
-
-func (r *SymReader) String(off int) string {
- end := off
- for r.data[end] != '\x00' {
- end++
- }
- return string(r.data[off:end])
-}
+++ /dev/null
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package main
-
-import (
- "cmd/internal/goobj"
- "encoding/binary"
- "fmt"
- "go/build"
- "io"
- "os"
- "runtime"
-)
-
-// A Prog holds state for constructing an executable (program) image.
-//
-// The usual sequence of operations on a Prog is:
-//
-// p.init()
-// p.scan(file)
-// p.dead()
-// p.runtime()
-// p.layout()
-// p.load()
-// p.debug()
-// p.write(w)
-//
-// p.init is in this file. The rest of the methods are in files
-// named for the method. The convenience method p.link runs
-// this sequence.
-//
-type Prog struct {
- // Context
- GOOS string // target operating system
- GOARCH string // target architecture
- Format string // desired file format ("elf", "macho", ...)
- Error func(string) // called to report an error (if set)
- NumError int // number of errors printed
- StartSym string
-
- // Derived context
- arch
- formatter formatter
- startSym goobj.SymID
- pkgdir string
- omitRuntime bool // do not load runtime package
-
- // Input
- Packages map[string]*Package // loaded packages, by import path
- Syms map[goobj.SymID]*Sym // defined symbols, by symbol ID
- Missing map[goobj.SymID]bool // missing symbols
- Dead map[goobj.SymID]bool // symbols removed as dead
- SymOrder []*Sym // order syms were scanned
- MaxVersion int // max SymID.Version, for generating fresh symbol IDs
-
- // Output
- UnmappedSize Addr // size of unmapped region at address 0
- HeaderSize Addr // size of object file header
- Entry Addr // virtual address where execution begins
- Segments []*Segment // loaded memory segments
-}
-
-// An arch describes architecture-dependent settings.
-type arch struct {
- byteorder binary.ByteOrder
- ptrsize int
- pcquantum int
-}
-
-// A formatter takes care of the details of generating a particular
-// kind of executable file.
-type formatter interface {
- // headerSize returns the footprint of the header for p
- // in both virtual address space and file bytes.
- // The footprint does not include any bytes stored at the
- // end of the file.
- headerSize(p *Prog) (virt, file Addr)
-
- // write writes the executable file for p to w.
- write(w io.Writer, p *Prog)
-}
-
-// An Addr represents a virtual memory address, a file address, or a size.
-// It must be a uint64, not a uintptr, so that a 32-bit linker can still generate a 64-bit binary.
-// It must be unsigned in order to link programs placed at very large start addresses.
-// Math involving Addrs must be checked carefully not to require negative numbers.
-type Addr uint64
-
-// A Package is a Go package loaded from a file.
-type Package struct {
- *goobj.Package // table of contents
- File string // file name for reopening
- Syms []*Sym // symbols defined by this package
-}
-
-// A Sym is a symbol defined in a loaded package.
-type Sym struct {
- *goobj.Sym // symbol metadata from package file
- Package *Package // package defining symbol
- Section *Section // section where symbol is placed in output program
- Addr Addr // virtual address of symbol in output program
- Bytes []byte // symbol data, for internally defined symbols
-}
-
-// A Segment is a loaded memory segment.
-// A Prog is expected to have segments named "text" and optionally "data",
-// in that order, before any other segments.
-type Segment struct {
- Name string // name of segment: "text", "data", ...
- VirtAddr Addr // virtual memory address of segment base
- VirtSize Addr // size of segment in memory
- FileOffset Addr // file offset of segment base
- FileSize Addr // size of segment in file; can be less than VirtSize
- Sections []*Section // sections inside segment
- Data []byte // raw data of segment image
-}
-
-// A Section is part of a loaded memory segment.
-type Section struct {
- Name string // name of section: "text", "rodata", "noptrbss", and so on
- VirtAddr Addr // virtual memory address of section base
- Size Addr // size of section in memory
- Align Addr // required alignment
- InFile bool // section has image data in file (like data, unlike bss)
- Syms []*Sym // symbols stored in section
- Segment *Segment // segment containing section
-}
-
-func (p *Prog) errorf(format string, args ...interface{}) {
- if p.Error != nil {
- p.Error(fmt.Sprintf(format, args...))
- } else {
- fmt.Fprintf(os.Stderr, format+"\n", args...)
- }
- p.NumError++
-}
-
-// link is the one-stop convenience method for running a link.
-// It writes to w the object file generated from using mainFile as the main package.
-func (p *Prog) link(w io.Writer, mainFile string) {
- p.init()
- p.scan(mainFile)
- if p.NumError > 0 {
- return
- }
- p.dead()
- p.runtime()
- p.autoData()
- p.layout()
- p.autoConst()
- if p.NumError > 0 {
- return
- }
- p.load()
- if p.NumError > 0 {
- return
- }
- p.debug()
- if p.NumError > 0 {
- return
- }
- p.write(w)
-}
-
-// init initializes p for use by the other methods.
-func (p *Prog) init() {
- // Set default context if not overridden.
- if p.GOOS == "" {
- p.GOOS = build.Default.GOOS
- }
- if p.GOARCH == "" {
- p.GOARCH = build.Default.GOARCH
- }
- if p.Format == "" {
- p.Format = goosFormat[p.GOOS]
- if p.Format == "" {
- p.errorf("no default file format for GOOS %q", p.GOOS)
- return
- }
- }
- if p.StartSym == "" {
- p.StartSym = fmt.Sprintf("_rt0_%s_%s", p.GOARCH, p.GOOS)
- }
-
- // Derive internal context.
- p.formatter = formatters[p.Format]
- if p.formatter == nil {
- p.errorf("unknown output file format %q", p.Format)
- return
- }
- p.startSym = goobj.SymID{Name: p.StartSym}
- arch, ok := arches[p.GOARCH]
- if !ok {
- p.errorf("unknown GOOS %q", p.GOOS)
- return
- }
- p.arch = arch
-
- p.pkgdir = fmt.Sprintf("%s/pkg/%s_%s", runtime.GOROOT(), p.GOOS, p.GOARCH)
-}
-
-// goosFormat records the default format for each known GOOS value.
-var goosFormat = map[string]string{
- "darwin": "darwin",
-}
-
-// formatters records the format implementation for each known format value.
-var formatters = map[string]formatter{
- "darwin": machoFormat{},
-}
-
-var arches = map[string]arch{
- "amd64": {
- byteorder: binary.LittleEndian,
- ptrsize: 8,
- pcquantum: 1,
- },
-}
+++ /dev/null
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package main
-
-import (
- "bytes"
- "fmt"
- "io/ioutil"
- "testing"
-)
-
-// shiftProg adjusts the addresses in p.
-// It adds vdelta to all virtual addresses and fdelta to all file offsets.
-func shiftProg(p *Prog, vdelta Addr, fdelta Addr) {
- p.Entry += vdelta
- for _, seg := range p.Segments {
- seg.FileOffset += fdelta
- seg.VirtAddr += vdelta
- for _, sect := range seg.Sections {
- sect.VirtAddr += vdelta
- for _, sym := range sect.Syms {
- sym.Addr += vdelta
- }
- }
- }
-}
-
-// diffProg returns a list of differences between p and q,
-// assuming p is being checked and q is the correct answer.
-func diffProg(p, q *Prog) []string {
- var errors []string
- if p.UnmappedSize != q.UnmappedSize {
- errors = append(errors, fmt.Sprintf("p.UnmappedSize = %#x, want %#x", p.UnmappedSize, q.UnmappedSize))
- }
- if p.HeaderSize != q.HeaderSize {
- errors = append(errors, fmt.Sprintf("p.HeaderSize = %#x, want %#x", p.HeaderSize, q.HeaderSize))
- }
- if p.Entry != q.Entry {
- errors = append(errors, fmt.Sprintf("p.Entry = %#x, want %#x", p.Entry, q.Entry))
- }
- for i := 0; i < len(p.Segments) || i < len(q.Segments); i++ {
- if i >= len(p.Segments) {
- errors = append(errors, fmt.Sprintf("p missing segment %q", q.Segments[i].Name))
- continue
- }
- if i >= len(q.Segments) {
- errors = append(errors, fmt.Sprintf("p has extra segment %q", p.Segments[i].Name))
- continue
- }
- pseg := p.Segments[i]
- qseg := q.Segments[i]
- if pseg.Name != qseg.Name {
- errors = append(errors, fmt.Sprintf("segment %d Name = %q, want %q", i, pseg.Name, qseg.Name))
- continue // probably out of sync
- }
- if pseg.VirtAddr != qseg.VirtAddr {
- errors = append(errors, fmt.Sprintf("segment %q VirtAddr = %#x, want %#x", pseg.Name, pseg.VirtAddr, qseg.VirtAddr))
- }
- if pseg.VirtSize != qseg.VirtSize {
- errors = append(errors, fmt.Sprintf("segment %q VirtSize = %#x, want %#x", pseg.Name, pseg.VirtSize, qseg.VirtSize))
- }
- if pseg.FileOffset != qseg.FileOffset {
- errors = append(errors, fmt.Sprintf("segment %q FileOffset = %#x, want %#x", pseg.Name, pseg.FileOffset, qseg.FileOffset))
- }
- if pseg.FileSize != qseg.FileSize {
- errors = append(errors, fmt.Sprintf("segment %q FileSize = %#x, want %#x", pseg.Name, pseg.FileSize, qseg.FileSize))
- }
- if len(pseg.Data) != len(qseg.Data) {
- errors = append(errors, fmt.Sprintf("segment %q len(Data) = %d, want %d", pseg.Name, len(pseg.Data), len(qseg.Data)))
- } else if !bytes.Equal(pseg.Data, qseg.Data) {
- errors = append(errors, fmt.Sprintf("segment %q Data mismatch:\n\thave %x\n\twant %x", pseg.Name, pseg.Data, qseg.Data))
- }
-
- for j := 0; j < len(pseg.Sections) || j < len(qseg.Sections); j++ {
- if j >= len(pseg.Sections) {
- errors = append(errors, fmt.Sprintf("segment %q missing section %q", pseg.Name, qseg.Sections[i].Name))
- continue
- }
- if j >= len(qseg.Sections) {
- errors = append(errors, fmt.Sprintf("segment %q has extra section %q", pseg.Name, pseg.Sections[i].Name))
- continue
- }
- psect := pseg.Sections[j]
- qsect := qseg.Sections[j]
- if psect.Name != qsect.Name {
- errors = append(errors, fmt.Sprintf("segment %q, section %d Name = %q, want %q", pseg.Name, j, psect.Name, qsect.Name))
- continue // probably out of sync
- }
-
- if psect.VirtAddr != qsect.VirtAddr {
- errors = append(errors, fmt.Sprintf("segment %q section %q VirtAddr = %#x, want %#x", pseg.Name, psect.Name, psect.VirtAddr, qsect.VirtAddr))
- }
- if psect.Size != qsect.Size {
- errors = append(errors, fmt.Sprintf("segment %q section %q Size = %#x, want %#x", pseg.Name, psect.Name, psect.Size, qsect.Size))
- }
- if psect.Align != qsect.Align {
- errors = append(errors, fmt.Sprintf("segment %q section %q Align = %#x, want %#x", pseg.Name, psect.Name, psect.Align, qsect.Align))
- }
- }
- }
-
- return errors
-}
-
-// cloneProg returns a deep copy of p.
-func cloneProg(p *Prog) *Prog {
- q := new(Prog)
- *q = *p
- q.Segments = make([]*Segment, len(p.Segments))
- for i, seg := range p.Segments {
- q.Segments[i] = cloneSegment(seg)
- }
- return q
-}
-
-// cloneSegment returns a deep copy of seg.
-func cloneSegment(seg *Segment) *Segment {
- t := new(Segment)
- *t = *seg
- t.Sections = make([]*Section, len(seg.Sections))
- for i, sect := range seg.Sections {
- t.Sections[i] = cloneSection(sect)
- }
- t.Data = make([]byte, len(seg.Data))
- copy(t.Data, seg.Data)
- return t
-}
-
-// cloneSection returns a deep copy of section.
-func cloneSection(sect *Section) *Section {
- // At the moment, there's nothing we need to make a deep copy of.
- t := new(Section)
- *t = *sect
- return t
-}
-
-const saveMismatch = true
-
-// checkGolden checks that data matches the named file.
-// If not, it reports the error to the test.
-func checkGolden(t *testing.T, data []byte, name string) {
- golden := mustParseHexdumpFile(t, name)
- if !bytes.Equal(data, golden) {
- if saveMismatch {
- ioutil.WriteFile(name+".raw", data, 0666)
- ioutil.WriteFile(name+".hex", []byte(hexdump(data)), 0666)
- }
- // TODO(rsc): A better diff would be nice, as needed.
- i := 0
- for i < len(data) && i < len(golden) && data[i] == golden[i] {
- i++
- }
- if i >= len(data) {
- t.Errorf("%s: output file shorter than expected: have %d bytes, want %d", name, len(data), len(golden))
- } else if i >= len(golden) {
- t.Errorf("%s: output file larger than expected: have %d bytes, want %d", name, len(data), len(golden))
- } else {
- t.Errorf("%s: output file differs at byte %d: have %#02x, want %#02x", name, i, data[i], golden[i])
- }
- }
-}
+++ /dev/null
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Generation of runtime-accessible data structures.
-// See also debug.go.
-
-package main
-
-import "cmd/internal/goobj"
-
-func (p *Prog) runtime() {
- p.pclntab()
-
- // TODO: Implement garbage collection data.
- p.addSym(&Sym{
- Sym: &goobj.Sym{
- SymID: goobj.SymID{Name: "runtime.gcdata"},
- Kind: goobj.SRODATA,
- },
- })
- p.addSym(&Sym{
- Sym: &goobj.Sym{
- SymID: goobj.SymID{Name: "runtime.gcbss"},
- Kind: goobj.SRODATA,
- },
- })
-}
+++ /dev/null
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Initial scan of packages making up a program.
-
-// TODO(rsc): Rename goobj.SymID.Version to StaticID to avoid confusion with the ELF meaning of version.
-// TODO(rsc): Fix file format so that SBSS/SNOPTRBSS with data is listed as SDATA/SNOPTRDATA.
-// TODO(rsc): Parallelize scan to overlap file i/o where possible.
-
-package main
-
-import (
- "cmd/internal/goobj"
- "os"
- "sort"
- "strings"
-)
-
-// scan scans all packages making up the program, starting with package main defined in mainfile.
-func (p *Prog) scan(mainfile string) {
- p.initScan()
- p.scanFile("main", mainfile)
- if len(p.Missing) > 0 && !p.omitRuntime {
- p.scanImport("runtime")
- }
-
- var missing []string
- for sym := range p.Missing {
- if !p.isAuto(sym) {
- missing = append(missing, sym.String())
- }
- }
-
- if missing != nil {
- sort.Strings(missing)
- for _, sym := range missing {
- p.errorf("undefined: %s", sym)
- }
- }
-
- // TODO(rsc): Walk import graph to diagnose cycles.
-}
-
-// initScan initializes the Prog fields needed by scan.
-func (p *Prog) initScan() {
- p.Packages = make(map[string]*Package)
- p.Syms = make(map[goobj.SymID]*Sym)
- p.Missing = make(map[goobj.SymID]bool)
- p.Missing[p.startSym] = true
-}
-
-// scanFile reads file to learn about the package with the given import path.
-func (p *Prog) scanFile(pkgpath string, file string) {
- pkg := &Package{
- File: file,
- }
- p.Packages[pkgpath] = pkg
-
- f, err := os.Open(file)
- if err != nil {
- p.errorf("%v", err)
- return
- }
- gp, err := goobj.Parse(f, pkgpath)
- f.Close()
- if err != nil {
- p.errorf("reading %s: %v", file, err)
- return
- }
-
- // TODO(rsc): Change cmd/internal/goobj to record package name as gp.Name.
- // TODO(rsc): If pkgpath == "main", check that gp.Name == "main".
-
- pkg.Package = gp
-
- for _, gs := range gp.Syms {
- // TODO(rsc): Fix file format instead of this workaround.
- if gs.Data.Size > 0 {
- switch gs.Kind {
- case goobj.SBSS:
- gs.Kind = goobj.SDATA
- case goobj.SNOPTRBSS:
- gs.Kind = goobj.SNOPTRDATA
- }
- }
-
- if gs.Version != 0 {
- gs.Version += p.MaxVersion
- }
- for i := range gs.Reloc {
- r := &gs.Reloc[i]
- if r.Sym.Version != 0 {
- r.Sym.Version += p.MaxVersion
- }
- if p.Syms[r.Sym] == nil {
- p.Missing[r.Sym] = true
- }
- }
- if gs.Func != nil {
- for i := range gs.Func.FuncData {
- fdata := &gs.Func.FuncData[i]
- if fdata.Sym.Name != "" {
- if fdata.Sym.Version != 0 {
- fdata.Sym.Version += p.MaxVersion
- }
- if p.Syms[fdata.Sym] == nil {
- p.Missing[fdata.Sym] = true
- }
- }
- }
- }
- if old := p.Syms[gs.SymID]; old != nil {
- // Duplicate definition of symbol. Is it okay?
- // TODO(rsc): Write test for this code.
- switch {
- // If both symbols are BSS (no data), take max of sizes
- // but otherwise ignore second symbol.
- case old.Data.Size == 0 && gs.Data.Size == 0:
- if old.Size < gs.Size {
- old.Size = gs.Size
- }
- continue
-
- // If one is in BSS and one is not, use the one that is not.
- case old.Data.Size > 0 && gs.Data.Size == 0:
- continue
- case gs.Data.Size > 0 && old.Data.Size == 0:
- break // install gs as new symbol below
-
- // If either is marked as DupOK, we can keep either one.
- // Keep the one that we saw first.
- case old.DupOK || gs.DupOK:
- continue
-
- // Otherwise, there's an actual conflict:
- default:
- p.errorf("symbol %s defined in both %s and %s %v %v", gs.SymID, old.Package.File, file, old.Data, gs.Data)
- continue
- }
- }
- s := &Sym{
- Sym: gs,
- Package: pkg,
- }
- p.addSym(s)
- delete(p.Missing, gs.SymID)
-
- if s.Data.Size > int64(s.Size) {
- p.errorf("%s: initialized data larger than symbol (%d > %d)", s, s.Data.Size, s.Size)
- }
- }
- p.MaxVersion += pkg.MaxVersion
-
- for i, pkgpath := range pkg.Imports {
- // TODO(rsc): Fix file format to drop .a from recorded import path.
- pkgpath = strings.TrimSuffix(pkgpath, ".a")
- pkg.Imports[i] = pkgpath
-
- p.scanImport(pkgpath)
- }
-}
-
-func (p *Prog) addSym(s *Sym) {
- pkg := s.Package
- if pkg == nil {
- pkg = p.Packages[""]
- if pkg == nil {
- pkg = &Package{}
- p.Packages[""] = pkg
- }
- s.Package = pkg
- }
- pkg.Syms = append(pkg.Syms, s)
- p.Syms[s.SymID] = s
- p.SymOrder = append(p.SymOrder, s)
-}
-
-// scanImport finds the object file for the given import path and then scans it.
-func (p *Prog) scanImport(pkgpath string) {
- if p.Packages[pkgpath] != nil {
- return // already loaded
- }
-
- // TODO(rsc): Implement correct search to find file.
- p.scanFile(pkgpath, p.pkgdir+"/"+pkgpath+".a")
-}
+++ /dev/null
-ALL=\
- autosection.6\
- autoweak.6\
- dead.6\
- hello.6\
- layout.6\
- pclntab.6\
-
-all: $(ALL)
-
-%.6: %.s
- GOARCH=amd64 GOOS=darwin go tool asm -o $*.6 -I $(shell go env GOROOT)/pkg/include -trimpath=$(shell pwd) $*.s
-
-pclntab.s: genpcln.go
- go run genpcln.go >pclntab.s
+++ /dev/null
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Test of section-named symbols.
-
-#include "textflag.h"
-
-TEXT start(SB),7,$0
- MOVQ $autotab(SB),AX
- MOVQ $autoptr(SB),AX
- RET
-
-GLOBL zero(SB), $8
-
-GLOBL zeronoptr(SB), NOPTR, $16
-
-// text
-DATA autotab+0x00(SB)/8, $runtime·text(SB)
-DATA autotab+0x08(SB)/8, $start(SB)
-DATA autotab+0x10(SB)/8, $runtime·etext(SB)
-DATA autotab+0x18(SB)/8, $start+16(SB)
-
-// data
-DATA autotab+0x20(SB)/8, $runtime·data(SB)
-DATA autotab+0x28(SB)/8, $autotab(SB)
-DATA autotab+0x30(SB)/8, $runtime·edata(SB)
-DATA autotab+0x38(SB)/8, $nonzero+4(SB)
-
-// bss
-DATA autotab+0x40(SB)/8, $runtime·bss(SB)
-DATA autotab+0x48(SB)/8, $zero(SB)
-DATA autotab+0x50(SB)/8, $runtime·ebss(SB)
-DATA autotab+0x58(SB)/8, $zero+8(SB)
-
-// noptrdata
-DATA autotab+0x60(SB)/8, $runtime·noptrdata(SB)
-DATA autotab+0x68(SB)/8, $nonzeronoptr(SB)
-DATA autotab+0x70(SB)/8, $runtime·enoptrdata(SB)
-DATA autotab+0x78(SB)/8, $nonzeronoptr+8(SB)
-
-// noptrbss
-DATA autotab+0x80(SB)/8, $runtime·noptrbss(SB)
-DATA autotab+0x88(SB)/8, $zeronoptr(SB)
-DATA autotab+0x90(SB)/8, $runtime·enoptrbss(SB)
-DATA autotab+0x98(SB)/8, $zeronoptr+16(SB)
-
-// end
-DATA autotab+0xa0(SB)/8, $runtime·end(SB)
-DATA autotab+0xa8(SB)/8, $zeronoptr+16(SB)
-
-GLOBL autotab(SB), $0xb0
-
-DATA nonzero(SB)/4, $1
-GLOBL nonzero(SB), $4
-
-DATA nonzeronoptr(SB)/8, $2
-GLOBL nonzeronoptr(SB), NOPTR, $8
-
-GLOBL autoptr(SB), $0
+++ /dev/null
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Test of go.weak symbols.
-
-TEXT start(SB),7,$0
- MOVQ $autotab(SB),AX
- MOVQ $autoptr(SB),AX
- RET
-
-// go.weak.sym should resolve to sym, because sym is in the binary.
-DATA autotab+0(SB)/8, $go·weak·sym(SB)
-DATA autotab+8(SB)/8, $sym(SB)
-
-// go.weak.missingsym should resolve to 0, because missingsym is not in the binary.
-DATA autotab+16(SB)/8, $go·weak·missingsym(SB)
-DATA autotab+24(SB)/8, $0
-
-// go.weak.deadsym should resolve to 0, because deadsym is discarded during dead code removal
-DATA autotab+32(SB)/8, $go·weak·deadsym(SB)
-DATA autotab+40(SB)/8, $0
-
-GLOBL autotab(SB), $48
-
-GLOBL sym(SB), $1
-
-GLOBL deadsym(SB), $1
-
-GLOBL autoptr(SB), $0
+++ /dev/null
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Test of dead code removal.
-// Symbols with names beginning with dead_ should be discarded.
-// Others should be kept.
-
-TEXT start(SB),7,$0 // start symbol
- MOVQ $data1<>(SB), AX
- CALL text1(SB)
- MOVQ $text2(SB), BX
- RET
-
-TEXT text1(SB),7,$0
- FUNCDATA $1, funcdata+4(SB)
- RET
-
-TEXT text2(SB),7,$0
- MOVQ $runtime·edata(SB),BX
- RET
-
-DATA data1<>+0(SB)/8, $data2(SB)
-DATA data1<>+8(SB)/8, $data3(SB)
-GLOBL data1<>(SB), $16
-GLOBL data2(SB), $1
-GLOBL data3(SB), $1
-GLOBL funcdata(SB), $8
-
-TEXT dead_start(SB),7,$0
- MOVQ $dead_data1(SB), AX
- CALL dead_text1(SB)
- MOVQ $dead_text2(SB), BX
- RET
-
-TEXT dead_text1(SB),7,$0
- FUNCDATA $1, dead_funcdata+4(SB)
- RET
-
-TEXT dead_text2(SB),7,$0
- RET
-
-DATA dead_data1+0(SB)/8, $dead_data2(SB)
-DATA dead_data1+8(SB)/8, $dead_data3(SB)
-GLOBL dead_data1(SB), $16
-GLOBL dead_data2(SB), $1
-GLOBL dead_data3(SB), $1
-GLOBL dead_funcdata(SB), $8
+++ /dev/null
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This program generates a .s file using a pseudorandom
-// value stream for the runtime function data.
-// The pclntab test checks that the linked copy
-// still has the same pseudorandom value stream.
-
-package main
-
-import (
- "fmt"
- "math/rand"
-)
-
-func main() {
- fmt.Printf("// generated by genpcln.go; do not edit\n\n")
- for f := 0; f < 3; f++ {
- r := rand.New(rand.NewSource(int64(f)))
- file := "input"
- line := 1
- args := r.Intn(100) * 8
- frame := 32 + r.Intn(32)/8*8
- fmt.Printf("#line %d %q\n", line, file)
- fmt.Printf("TEXT func%d(SB),7,$%d-%d\n", f, frame, args)
- fmt.Printf("\tFUNCDATA $1, funcdata%d(SB)\n", f)
- fmt.Printf("#line %d %q\n", line, file)
- size := 200 + r.Intn(100)*8
- spadj := 0
- flushed := 0
- firstpc := 4
- flush := func(i int) {
- for i-flushed >= 10 {
- fmt.Printf("#line %d %q\n", line, file)
- fmt.Printf("/*%#04x*/\tMOVQ $0x123456789, AX\n", firstpc+flushed)
- flushed += 10
- }
- for i-flushed >= 5 {
- fmt.Printf("#line %d %q\n", line, file)
- fmt.Printf("/*%#04x*/\tMOVL $0x1234567, AX\n", firstpc+flushed)
- flushed += 5
- }
- for i-flushed > 0 {
- fmt.Printf("#line %d %q\n", line, file)
- fmt.Printf("/*%#04x*/\tBYTE $0\n", firstpc+flushed)
- flushed++
- }
- }
- for i := 0; i < size; i++ {
- // Possible SP adjustment.
- if r.Intn(100) == 0 {
- flush(i)
- fmt.Printf("#line %d %q\n", line, file)
- if spadj <= -32 || spadj < 32 && r.Intn(2) == 0 {
- spadj += 8
- fmt.Printf("/*%#04x*/\tPUSHQ AX\n", firstpc+i)
- } else {
- spadj -= 8
- fmt.Printf("/*%#04x*/\tPOPQ AX\n", firstpc+i)
- }
- i += 1
- flushed = i
- }
-
- // Possible PCFile change.
- if r.Intn(100) == 0 {
- flush(i)
- file = fmt.Sprintf("file%d.s", r.Intn(10))
- line = r.Intn(100) + 1
- }
-
- // Possible PCLine change.
- if r.Intn(10) == 0 {
- flush(i)
- line = r.Intn(1000) + 1
- }
-
- // Possible PCData $1 change.
- if r.Intn(100) == 0 {
- flush(i)
- fmt.Printf("/*%6s*/\tPCDATA $1, $%d\n", "", r.Intn(1000))
- }
-
- // Possible PCData $2 change.
- if r.Intn(100) == 0 {
- flush(i)
- fmt.Printf("/*%6s*/\tPCDATA $2, $%d\n", "", r.Intn(1000))
- }
- }
- flush(size)
- for spadj < 0 {
- fmt.Printf("\tPUSHQ AX\n")
- spadj += 8
- }
- for spadj > 0 {
- fmt.Printf("\tPOPQ AX\n")
- spadj -= 8
- }
- fmt.Printf("\tRET\n")
-
- fmt.Printf("\n")
- fmt.Printf("GLOBL funcdata%d(SB), $16\n", f)
- }
-
- fmt.Printf("\nTEXT start(SB),7,$0\n")
- for f := 0; f < 3; f++ {
- fmt.Printf("\tCALL func%d(SB)\n", f)
- }
- fmt.Printf("\tMOVQ $runtime·pclntab(SB), AX\n")
- fmt.Printf("\n\tRET\n")
-}
+++ /dev/null
-TEXT _rt0_go(SB),7,$0
- MOVL $1, DI
- MOVL $hello<>(SB), SI
- MOVL $12, DX
- MOVL $0x2000004, AX
- SYSCALL
- MOVL $0, DI
- MOVL $0x2000001, AX
- SYSCALL
- RET
-
-DATA hello<>+0(SB)/4, $"hell"
-DATA hello<>+4(SB)/4, $"o wo"
-DATA hello<>+8(SB)/4, $"rld\n"
-GLOBL hello<>(SB), $12
+++ /dev/null
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Test of section assignment in layout.go.
-// Each symbol should end up in the section named by the symbol name prefix (up to the underscore).
-
-#include "textflag.h"
-
-TEXT text_start(SB),7,$0
- MOVQ $rodata_sym(SB), AX
- MOVQ $noptrdata_sym(SB), AX
- MOVQ $data_sym(SB), AX
- MOVQ $bss_sym(SB), AX
- MOVQ $noptrbss_sym(SB), AX
- RET
-
-DATA rodata_sym(SB)/4, $1
-GLOBL rodata_sym(SB), RODATA, $4
-
-DATA noptrdata_sym(SB)/4, $1
-GLOBL noptrdata_sym(SB), NOPTR, $4
-
-DATA data_sym(SB)/4, $1
-GLOBL data_sym(SB), $4
-
-GLOBL bss_sym(SB), $4
-
-GLOBL noptrbss_sym(SB), NOPTR, $4
+++ /dev/null
-00000000 cf fa ed fe 07 00 00 01 03 00 00 00 02 00 00 00 |................|
-00000010 04 00 00 00 d0 02 00 00 01 00 00 00 00 00 00 00 |................|
-00000020 19 00 00 00 48 00 00 00 5f 5f 50 41 47 45 5a 45 |....H...__PAGEZE|
-00000030 52 4f 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |RO..............|
-00000040 00 10 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
-*
-00000060 00 00 00 00 00 00 00 00 19 00 00 00 38 01 00 00 |............8...|
-00000070 5f 5f 54 45 58 54 00 00 00 00 00 00 00 00 00 00 |__TEXT..........|
-00000080 00 10 00 00 00 00 00 00 c0 10 00 00 00 00 00 00 |................|
-00000090 00 00 00 00 00 00 00 00 c0 10 00 00 00 00 00 00 |................|
-000000a0 07 00 00 00 05 00 00 00 03 00 00 00 00 00 00 00 |................|
-000000b0 5f 5f 74 65 78 74 00 00 00 00 00 00 00 00 00 00 |__text..........|
-000000c0 5f 5f 54 45 58 54 00 00 00 00 00 00 00 00 00 00 |__TEXT..........|
-000000d0 00 20 00 00 00 00 00 00 30 00 00 00 00 00 00 00 |. ......0.......|
-000000e0 00 10 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
-000000f0 00 04 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
-00000100 5f 5f 72 6f 64 61 74 61 00 00 00 00 00 00 00 00 |__rodata........|
-00000110 5f 5f 54 45 58 54 00 00 00 00 00 00 00 00 00 00 |__TEXT..........|
-00000120 30 20 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |0 ..............|
-00000130 30 10 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |0...............|
-*
-00000150 5f 5f 66 75 6e 63 74 61 62 00 00 00 00 00 00 00 |__functab.......|
-00000160 5f 5f 54 45 58 54 00 00 00 00 00 00 00 00 00 00 |__TEXT..........|
-00000170 30 20 00 00 00 00 00 00 90 00 00 00 00 00 00 00 |0 ..............|
-00000180 30 10 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |0...............|
-*
-000001a0 19 00 00 00 98 00 00 00 5f 5f 44 41 54 41 00 00 |........__DATA..|
-000001b0 00 00 00 00 00 00 00 00 00 30 00 00 00 00 00 00 |.........0......|
-000001c0 0c 00 00 00 00 00 00 00 00 20 00 00 00 00 00 00 |......... ......|
-000001d0 0c 00 00 00 00 00 00 00 03 00 00 00 03 00 00 00 |................|
-000001e0 01 00 00 00 00 00 00 00 5f 5f 64 61 74 61 00 00 |........__data..|
-000001f0 00 00 00 00 00 00 00 00 5f 5f 44 41 54 41 00 00 |........__DATA..|
-00000200 00 00 00 00 00 00 00 00 00 30 00 00 00 00 00 00 |.........0......|
-00000210 0c 00 00 00 00 00 00 00 00 20 00 00 00 00 00 00 |......... ......|
-*
-00000230 00 00 00 00 00 00 00 00 05 00 00 00 b8 00 00 00 |................|
-00000240 04 00 00 00 2a 00 00 00 00 00 00 00 00 00 00 00 |....*...........|
-*
-000002c0 00 00 00 00 00 00 00 00 00 20 00 00 00 00 00 00 |......... ......|
-*
-00001000 bf 01 00 00 00 8d 35 f5 0f 00 00 ba 0c 00 00 00 |......5.........|
-00001010 b8 04 00 00 02 0f 05 31 ff b8 01 00 00 02 0f 05 |.......1........|
-00001020 c3 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
-00001030 fb ff ff ff 00 00 01 08 01 00 00 00 00 00 00 00 |................|
-00001040 00 20 00 00 00 00 00 00 30 00 00 00 00 00 00 00 |. ......0.......|
-00001050 30 20 00 00 00 00 00 00 80 00 00 00 00 00 00 00 |0 ..............|
-00001060 00 20 00 00 00 00 00 00 58 00 00 00 00 00 00 80 |. ......X.......|
-00001070 08 00 00 00 60 00 00 00 63 00 00 00 66 00 00 00 |....`...c...f...|
-00001080 00 00 00 00 00 00 00 00 5f 72 74 30 5f 67 6f 00 |........_rt0_go.|
-00001090 02 30 00 04 30 00 06 05 02 06 02 05 02 05 02 02 |.0..0...........|
-000010a0 02 02 02 05 02 02 02 10 00 00 00 00 00 00 00 00 |................|
-000010b0 02 00 00 00 88 00 00 00 68 65 6c 6c 6f 2e 73 00 |........hello.s.|
-*
-00002000 68 65 6c 6c 6f 20 77 6f 72 6c 64 0a |hello world.|
-0000200c
+++ /dev/null
-00000000 cf fa ed fe 07 00 00 01 03 00 00 00 02 00 00 00 |................|
-00000010 03 00 00 00 98 01 00 00 01 00 00 00 00 00 00 00 |................|
-00000020 19 00 00 00 48 00 00 00 5f 5f 50 41 47 45 5a 45 |....H...__PAGEZE|
-00000030 52 4f 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |RO..............|
-00000040 00 10 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
-00000050 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
-00000060 00 00 00 00 00 00 00 00 19 00 00 00 98 00 00 00 |................|
-00000070 5f 5f 54 45 58 54 00 00 00 00 00 00 00 00 00 00 |__TEXT..........|
-00000080 00 10 00 00 00 00 00 00 0d 10 00 00 00 00 00 00 |................|
-00000090 00 00 00 00 00 00 00 00 0d 10 00 00 00 00 00 00 |................|
-000000a0 07 00 00 00 05 00 00 00 01 00 00 00 00 00 00 00 |................|
-000000b0 5f 5f 74 65 78 74 00 00 00 00 00 00 00 00 00 00 |__text..........|
-000000c0 5f 5f 54 45 58 54 00 00 00 00 00 00 00 00 00 00 |__TEXT..........|
-000000d0 00 20 00 00 00 00 00 00 0d 00 00 00 00 00 00 00 |. ..............|
-000000e0 00 10 00 00 06 00 00 00 00 00 00 00 00 00 00 00 |................|
-000000f0 00 04 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
-00000100 05 00 00 00 b8 00 00 00 04 00 00 00 2a 00 00 00 |............*...|
-00000110 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
-*
-00000190 00 20 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |. ..............|
-000001a0 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
-*
-00001000 b8 01 00 00 02 bf 09 00 00 00 0f 05 f4 |.............|
-0000100d
+++ /dev/null
-00000000 cf fa ed fe 07 00 00 01 03 00 00 00 02 00 00 00 |................|
-00000010 04 00 00 00 30 02 00 00 01 00 00 00 00 00 00 00 |....0...........|
-00000020 19 00 00 00 48 00 00 00 5f 5f 50 41 47 45 5a 45 |....H...__PAGEZE|
-00000030 52 4f 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |RO..............|
-00000040 00 10 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
-00000050 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
-00000060 00 00 00 00 00 00 00 00 19 00 00 00 98 00 00 00 |................|
-00000070 5f 5f 54 45 58 54 00 00 00 00 00 00 00 00 00 00 |__TEXT..........|
-00000080 00 10 00 00 00 00 00 00 23 10 00 00 00 00 00 00 |........#.......|
-00000090 00 00 00 00 00 00 00 00 23 10 00 00 00 00 00 00 |........#.......|
-000000a0 07 00 00 00 05 00 00 00 01 00 00 00 00 00 00 00 |................|
-000000b0 5f 5f 74 65 78 74 00 00 00 00 00 00 00 00 00 00 |__text..........|
-000000c0 5f 5f 54 45 58 54 00 00 00 00 00 00 00 00 00 00 |__TEXT..........|
-000000d0 00 20 00 00 00 00 00 00 23 00 00 00 00 00 00 00 |. ......#.......|
-000000e0 00 10 00 00 06 00 00 00 00 00 00 00 00 00 00 00 |................|
-000000f0 00 04 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
-00000100 19 00 00 00 98 00 00 00 5f 5f 44 41 54 41 00 00 |........__DATA..|
-00000110 00 00 00 00 00 00 00 00 00 30 00 00 00 00 00 00 |.........0......|
-00000120 0c 00 00 00 00 00 00 00 00 20 00 00 00 00 00 00 |......... ......|
-00000130 0c 00 00 00 00 00 00 00 03 00 00 00 03 00 00 00 |................|
-00000140 01 00 00 00 00 00 00 00 5f 5f 64 61 74 61 00 00 |........__data..|
-00000150 00 00 00 00 00 00 00 00 5f 5f 44 41 54 41 00 00 |........__DATA..|
-00000160 00 00 00 00 00 00 00 00 00 30 00 00 00 00 00 00 |.........0......|
-00000170 0c 00 00 00 00 00 00 00 00 20 00 00 06 00 00 00 |......... ......|
-00000180 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
-00000190 00 00 00 00 00 00 00 00 05 00 00 00 b8 00 00 00 |................|
-000001a0 04 00 00 00 2a 00 00 00 00 00 00 00 00 00 00 00 |....*...........|
-000001b0 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
-*
-00000220 00 00 00 00 00 00 00 00 00 20 00 00 00 00 00 00 |......... ......|
-00000230 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
-*
-00001000 b8 04 00 00 02 bf 01 00 00 00 be 00 30 00 00 ba |............0...|
-00001010 0c 00 00 00 0f 05 b8 01 00 00 02 bf 09 00 00 00 |................|
-00001020 0f 05 f4 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
-00001030 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
-*
-00002000 68 65 6c 6c 6f 20 77 6f 72 6c 64 0a |hello world.|
-0000200c
+++ /dev/null
-00000000 cf fa ed fe 07 00 00 01 03 00 00 00 02 00 00 00 |................|
-00000010 03 00 00 00 e8 01 00 00 01 00 00 00 00 00 00 00 |................|
-00000020 19 00 00 00 48 00 00 00 5f 5f 50 41 47 45 5a 45 |....H...__PAGEZE|
-00000030 52 4f 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |RO..............|
-00000040 00 10 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
-00000050 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
-00000060 00 00 00 00 00 00 00 00 19 00 00 00 e8 00 00 00 |................|
-00000070 5f 5f 54 45 58 54 00 00 00 00 00 00 00 00 00 00 |__TEXT..........|
-00000080 00 10 00 00 00 00 00 00 0c 20 00 00 00 00 00 00 |......... ......|
-00000090 00 00 00 00 00 00 00 00 0c 20 00 00 00 00 00 00 |......... ......|
-000000a0 07 00 00 00 05 00 00 00 02 00 00 00 00 00 00 00 |................|
-000000b0 5f 5f 74 65 78 74 00 00 00 00 00 00 00 00 00 00 |__text..........|
-000000c0 5f 5f 54 45 58 54 00 00 00 00 00 00 00 00 00 00 |__TEXT..........|
-000000d0 00 20 00 00 00 00 00 00 23 00 00 00 00 00 00 00 |. ......#.......|
-000000e0 00 10 00 00 06 00 00 00 00 00 00 00 00 00 00 00 |................|
-000000f0 00 04 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
-00000100 5f 5f 72 6f 64 61 74 61 00 00 00 00 00 00 00 00 |__rodata........|
-00000110 5f 5f 54 45 58 54 00 00 00 00 00 00 00 00 00 00 |__TEXT..........|
-00000120 00 30 00 00 00 00 00 00 0c 00 00 00 00 00 00 00 |.0..............|
-00000130 00 20 00 00 06 00 00 00 00 00 00 00 00 00 00 00 |. ..............|
-00000140 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
-00000150 05 00 00 00 b8 00 00 00 04 00 00 00 2a 00 00 00 |............*...|
-00000160 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
-*
-000001e0 00 20 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |. ..............|
-000001f0 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
-*
-00001000 b8 04 00 00 02 bf 01 00 00 00 be 00 30 00 00 ba |............0...|
-00001010 0c 00 00 00 0f 05 b8 01 00 00 02 bf 00 00 00 00 |................|
-00001020 0f 05 f4 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
-00001030 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 |................|
-*
-00002000 68 65 6c 6c 6f 20 77 6f 72 6c 64 0a |hello world.|
-0000200c
+++ /dev/null
-// generated by genpcln.go; do not edit
-
-#line 1 "input"
-TEXT func0(SB),7,$48-592
- FUNCDATA $1, funcdata0(SB)
-#line 1 "input"
-#line 1 "input"
-/*0x0004*/ MOVL $0x1234567, AX
-#line 1 "input"
-/*0x0009*/ BYTE $0
-/* */ PCDATA $1, $110
-#line 1 "input"
-/*0x000a*/ BYTE $0
-#line 1 "input"
-/*0x000b*/ BYTE $0
-/* */ PCDATA $1, $422
-#line 1 "input"
-/*0x000c*/ BYTE $0
-#line 1 "input"
-/*0x000d*/ BYTE $0
-#line 1 "input"
-/*0x000e*/ POPQ AX
-#line 1 "input"
-/*0x000f*/ MOVL $0x1234567, AX
-#line 1 "input"
-/*0x0014*/ BYTE $0
-#line 1 "input"
-/*0x0015*/ BYTE $0
-#line 1 "input"
-/*0x0016*/ BYTE $0
-#line 542 "input"
-/*0x0017*/ BYTE $0
-#line 960 "input"
-/*0x0018*/ MOVL $0x1234567, AX
-#line 960 "input"
-/*0x001d*/ BYTE $0
-#line 960 "input"
-/*0x001e*/ PUSHQ AX
-#line 960 "input"
-/*0x001f*/ BYTE $0
-#line 960 "input"
-/*0x0020*/ BYTE $0
-#line 594 "input"
-/*0x0021*/ BYTE $0
-#line 671 "input"
-/*0x0022*/ MOVL $0x1234567, AX
-#line 671 "input"
-/*0x0027*/ BYTE $0
-#line 671 "input"
-/*0x0028*/ BYTE $0
-#line 230 "input"
-/*0x0029*/ BYTE $0
-#line 230 "input"
-/*0x002a*/ BYTE $0
-#line 413 "input"
-/*0x002b*/ BYTE $0
-#line 413 "input"
-/*0x002c*/ BYTE $0
-#line 413 "input"
-/*0x002d*/ BYTE $0
-#line 729 "input"
-/*0x002e*/ BYTE $0
-#line 729 "input"
-/*0x002f*/ BYTE $0
-#line 729 "input"
-/*0x0030*/ BYTE $0
-#line 948 "input"
-/*0x0031*/ BYTE $0
-#line 11 "input"
-/*0x0032*/ MOVQ $0x123456789, AX
-#line 11 "input"
-/*0x003c*/ MOVL $0x1234567, AX
-#line 11 "input"
-/*0x0041*/ BYTE $0
-#line 11 "input"
-/*0x0042*/ BYTE $0
-#line 11 "input"
-/*0x0043*/ POPQ AX
-/* */ PCDATA $2, $342
-#line 11 "input"
-/*0x0044*/ MOVQ $0x123456789, AX
-#line 11 "input"
-/*0x004e*/ MOVQ $0x123456789, AX
-#line 11 "input"
-/*0x0058*/ MOVL $0x1234567, AX
-#line 11 "input"
-/*0x005d*/ BYTE $0
-#line 11 "input"
-/*0x005e*/ BYTE $0
-#line 70 "input"
-/*0x005f*/ BYTE $0
-#line 70 "input"
-/*0x0060*/ BYTE $0
-#line 70 "input"
-/*0x0061*/ BYTE $0
-#line 18 "input"
-/*0x0062*/ MOVQ $0x123456789, AX
-#line 18 "input"
-/*0x006c*/ MOVL $0x1234567, AX
-#line 18 "input"
-/*0x0071*/ BYTE $0
-#line 814 "input"
-/*0x0072*/ BYTE $0
-#line 814 "input"
-/*0x0073*/ PUSHQ AX
-#line 763 "input"
-/*0x0074*/ BYTE $0
-#line 763 "input"
-/*0x0075*/ BYTE $0
-#line 763 "input"
-/*0x0076*/ BYTE $0
-#line 530 "input"
-/*0x0077*/ BYTE $0
-#line 530 "input"
-/*0x0078*/ BYTE $0
-#line 530 "input"
-/*0x0079*/ BYTE $0
-#line 530 "input"
-/*0x007a*/ BYTE $0
-#line 985 "input"
-/*0x007b*/ BYTE $0
-#line 985 "input"
-/*0x007c*/ BYTE $0
-#line 985 "input"
-/*0x007d*/ BYTE $0
-#line 985 "input"
-/*0x007e*/ PUSHQ AX
-#line 985 "input"
-/*0x007f*/ MOVL $0x1234567, AX
-#line 958 "input"
-/*0x0084*/ BYTE $0
-#line 368 "input"
-/*0x0085*/ MOVQ $0x123456789, AX
-#line 368 "input"
-/*0x008f*/ MOVL $0x1234567, AX
-#line 368 "input"
-/*0x0094*/ BYTE $0
-#line 368 "input"
-/*0x0095*/ BYTE $0
-#line 368 "input"
-/*0x0096*/ BYTE $0
-#line 368 "input"
-/*0x0097*/ BYTE $0
-#line 75 "file0.s"
-/*0x0098*/ BYTE $0
-#line 75 "file0.s"
-/*0x0099*/ BYTE $0
-#line 75 "file0.s"
-/*0x009a*/ BYTE $0
-#line 75 "file0.s"
-/*0x009b*/ BYTE $0
-#line 588 "file0.s"
-/*0x009c*/ MOVQ $0x123456789, AX
-#line 187 "file0.s"
-/*0x00a6*/ MOVQ $0x123456789, AX
-#line 187 "file0.s"
-/*0x00b0*/ BYTE $0
-#line 202 "file0.s"
-/*0x00b1*/ MOVL $0x1234567, AX
-#line 202 "file0.s"
-/*0x00b6*/ BYTE $0
-#line 887 "file0.s"
-/*0x00b7*/ MOVL $0x1234567, AX
-#line 887 "file0.s"
-/*0x00bc*/ BYTE $0
-#line 887 "file0.s"
-/*0x00bd*/ BYTE $0
-#line 480 "file0.s"
-/*0x00be*/ MOVL $0x1234567, AX
-#line 480 "file0.s"
-/*0x00c3*/ BYTE $0
-#line 40 "file8.s"
-/*0x00c4*/ BYTE $0
-#line 17 "file0.s"
-/*0x00c5*/ MOVQ $0x123456789, AX
-#line 17 "file0.s"
-/*0x00cf*/ BYTE $0
-#line 17 "file0.s"
-/*0x00d0*/ BYTE $0
-#line 17 "file0.s"
-/*0x00d1*/ BYTE $0
-#line 17 "file0.s"
-/*0x00d2*/ BYTE $0
-#line 436 "file0.s"
-/*0x00d3*/ MOVL $0x1234567, AX
-#line 436 "file0.s"
-/*0x00d8*/ BYTE $0
-#line 436 "file0.s"
-/*0x00d9*/ BYTE $0
-#line 346 "file0.s"
-/*0x00da*/ BYTE $0
-#line 346 "file0.s"
-/*0x00db*/ BYTE $0
-#line 346 "file0.s"
-/*0x00dc*/ BYTE $0
-#line 812 "file0.s"
-/*0x00dd*/ BYTE $0
-#line 812 "file0.s"
-/*0x00de*/ BYTE $0
-#line 812 "file0.s"
-/*0x00df*/ BYTE $0
-#line 812 "file0.s"
-/*0x00e0*/ BYTE $0
-#line 94 "file1.s"
-/*0x00e1*/ BYTE $0
-#line 94 "file1.s"
-/*0x00e2*/ BYTE $0
-#line 165 "file1.s"
-/*0x00e3*/ MOVL $0x1234567, AX
-#line 165 "file1.s"
-/*0x00e8*/ BYTE $0
-#line 456 "file1.s"
-/*0x00e9*/ BYTE $0
-#line 810 "file1.s"
-/*0x00ea*/ BYTE $0
-#line 722 "file1.s"
-/*0x00eb*/ BYTE $0
-#line 722 "file1.s"
-/*0x00ec*/ BYTE $0
-#line 722 "file1.s"
-/*0x00ed*/ BYTE $0
-#line 722 "file1.s"
-/*0x00ee*/ BYTE $0
-#line 722 "file1.s"
-/*0x00ef*/ PUSHQ AX
-#line 722 "file1.s"
-/*0x00f0*/ BYTE $0
-#line 722 "file1.s"
-/*0x00f1*/ BYTE $0
-#line 722 "file1.s"
-/*0x00f2*/ BYTE $0
-#line 722 "file1.s"
-/*0x00f3*/ BYTE $0
-/* */ PCDATA $2, $246
-#line 722 "file1.s"
-/*0x00f4*/ MOVQ $0x123456789, AX
-#line 722 "file1.s"
-/*0x00fe*/ MOVQ $0x123456789, AX
-#line 722 "file1.s"
-/*0x0108*/ MOVL $0x1234567, AX
-#line 722 "file1.s"
-/*0x010d*/ BYTE $0
-#line 722 "file1.s"
-/*0x010e*/ BYTE $0
-#line 497 "file1.s"
-/*0x010f*/ MOVQ $0x123456789, AX
-#line 497 "file1.s"
-/*0x0119*/ MOVQ $0x123456789, AX
-#line 497 "file1.s"
-/*0x0123*/ MOVQ $0x123456789, AX
-#line 497 "file1.s"
-/*0x012d*/ MOVL $0x1234567, AX
-#line 497 "file1.s"
-/*0x0132*/ BYTE $0
-#line 686 "file1.s"
-/*0x0133*/ BYTE $0
-#line 686 "file1.s"
-/*0x0134*/ BYTE $0
-#line 248 "file1.s"
-/*0x0135*/ BYTE $0
-#line 248 "file1.s"
-/*0x0136*/ BYTE $0
-#line 248 "file1.s"
-/*0x0137*/ BYTE $0
-#line 248 "file1.s"
-/*0x0138*/ BYTE $0
-#line 307 "file1.s"
-/*0x0139*/ BYTE $0
-#line 220 "file1.s"
-/*0x013a*/ MOVL $0x1234567, AX
-#line 220 "file1.s"
-/*0x013f*/ BYTE $0
-#line 220 "file1.s"
-/*0x0140*/ BYTE $0
-#line 467 "file1.s"
-/*0x0141*/ MOVQ $0x123456789, AX
-#line 467 "file1.s"
-/*0x014b*/ BYTE $0
-#line 467 "file1.s"
-/*0x014c*/ BYTE $0
-#line 467 "file1.s"
-/*0x014d*/ BYTE $0
-#line 467 "file1.s"
-/*0x014e*/ BYTE $0
-#line 786 "file1.s"
-/*0x014f*/ MOVL $0x1234567, AX
-#line 251 "file1.s"
-/*0x0154*/ BYTE $0
-/* */ PCDATA $2, $64
-#line 251 "file1.s"
-/*0x0155*/ BYTE $0
-#line 251 "file1.s"
-/*0x0156*/ BYTE $0
-#line 251 "file1.s"
-/*0x0157*/ BYTE $0
-#line 618 "file1.s"
-/*0x0158*/ MOVQ $0x123456789, AX
-/* */ PCDATA $1, $686
-#line 618 "file1.s"
-/*0x0162*/ BYTE $0
-#line 618 "file1.s"
-/*0x0163*/ BYTE $0
-#line 618 "file1.s"
-/*0x0164*/ BYTE $0
-#line 618 "file1.s"
-/*0x0165*/ PUSHQ AX
-/* */ PCDATA $2, $915
-#line 618 "file1.s"
-/*0x0166*/ BYTE $0
-#line 618 "file1.s"
-/*0x0167*/ BYTE $0
-#line 618 "file1.s"
-/*0x0168*/ BYTE $0
-#line 618 "file1.s"
-/*0x0169*/ BYTE $0
-#line 230 "file1.s"
-/*0x016a*/ BYTE $0
-#line 823 "file1.s"
-/*0x016b*/ BYTE $0
-#line 145 "file1.s"
-/*0x016c*/ MOVQ $0x123456789, AX
-#line 145 "file1.s"
-/*0x0176*/ BYTE $0
-#line 145 "file1.s"
-/*0x0177*/ BYTE $0
-#line 675 "file1.s"
-/*0x0178*/ BYTE $0
-#line 62 "file9.s"
-/*0x0179*/ BYTE $0
-/* */ PCDATA $2, $768
-#line 62 "file9.s"
-/*0x017a*/ BYTE $0
-#line 62 "file9.s"
-/*0x017b*/ BYTE $0
-#line 29 "file9.s"
-/*0x017c*/ BYTE $0
-#line 29 "file9.s"
-/*0x017d*/ BYTE $0
-#line 29 "file9.s"
-/*0x017e*/ BYTE $0
-#line 29 "file9.s"
-/*0x017f*/ BYTE $0
-#line 65 "file4.s"
-/*0x0180*/ BYTE $0
-#line 77 "file3.s"
-/*0x0181*/ MOVL $0x1234567, AX
-#line 77 "file3.s"
-/*0x0186*/ BYTE $0
-#line 77 "file3.s"
-/*0x0187*/ BYTE $0
-#line 77 "file3.s"
-/*0x0188*/ BYTE $0
-#line 307 "file3.s"
-/*0x0189*/ MOVQ $0x123456789, AX
-#line 307 "file3.s"
-/*0x0193*/ BYTE $0
-#line 654 "file3.s"
-/*0x0194*/ BYTE $0
-#line 654 "file3.s"
-/*0x0195*/ BYTE $0
-#line 115 "file3.s"
-/*0x0196*/ MOVL $0x1234567, AX
-#line 115 "file3.s"
-/*0x019b*/ BYTE $0
-#line 115 "file3.s"
-/*0x019c*/ BYTE $0
-#line 115 "file3.s"
-/*0x019d*/ BYTE $0
-#line 115 "file3.s"
-/*0x019e*/ BYTE $0
-#line 154 "file3.s"
-/*0x019f*/ MOVQ $0x123456789, AX
-#line 166 "file3.s"
-/*0x01a9*/ BYTE $0
-#line 166 "file3.s"
-/*0x01aa*/ BYTE $0
-#line 166 "file3.s"
-/*0x01ab*/ BYTE $0
-/* */ PCDATA $1, $523
-#line 166 "file3.s"
-/*0x01ac*/ MOVL $0x1234567, AX
-#line 166 "file3.s"
-/*0x01b1*/ BYTE $0
-#line 779 "file3.s"
-/*0x01b2*/ BYTE $0
-#line 779 "file3.s"
-/*0x01b3*/ BYTE $0
-#line 515 "file3.s"
-/*0x01b4*/ BYTE $0
-#line 515 "file3.s"
-/*0x01b5*/ BYTE $0
-#line 369 "file3.s"
-/*0x01b6*/ MOVL $0x1234567, AX
-#line 369 "file3.s"
-/*0x01bb*/ BYTE $0
-#line 369 "file3.s"
-/*0x01bc*/ BYTE $0
-#line 369 "file3.s"
-/*0x01bd*/ BYTE $0
-#line 680 "file3.s"
-/*0x01be*/ BYTE $0
-#line 680 "file3.s"
-/*0x01bf*/ BYTE $0
-#line 680 "file3.s"
-/*0x01c0*/ BYTE $0
-#line 680 "file3.s"
-/*0x01c1*/ BYTE $0
-#line 131 "file3.s"
-/*0x01c2*/ MOVQ $0x123456789, AX
-#line 131 "file3.s"
-/*0x01cc*/ BYTE $0
-#line 131 "file3.s"
-/*0x01cd*/ BYTE $0
-#line 131 "file3.s"
-/*0x01ce*/ BYTE $0
-#line 131 "file3.s"
-/*0x01cf*/ BYTE $0
-#line 848 "file3.s"
-/*0x01d0*/ BYTE $0
-#line 848 "file3.s"
-/*0x01d1*/ BYTE $0
-#line 848 "file3.s"
-/*0x01d2*/ POPQ AX
-#line 848 "file3.s"
-/*0x01d3*/ BYTE $0
-#line 848 "file3.s"
-/*0x01d4*/ BYTE $0
-#line 848 "file3.s"
-/*0x01d5*/ BYTE $0
-/* */ PCDATA $1, $86
-#line 848 "file3.s"
-/*0x01d6*/ MOVL $0x1234567, AX
-#line 438 "file3.s"
-/*0x01db*/ MOVQ $0x123456789, AX
-#line 438 "file3.s"
-/*0x01e5*/ MOVL $0x1234567, AX
-#line 5 "file3.s"
-/*0x01ea*/ BYTE $0
-#line 5 "file3.s"
-/*0x01eb*/ BYTE $0
-#line 531 "file3.s"
-/*0x01ec*/ MOVQ $0x123456789, AX
-#line 531 "file3.s"
-/*0x01f6*/ MOVQ $0x123456789, AX
-#line 531 "file3.s"
-/*0x0200*/ MOVQ $0x123456789, AX
-#line 531 "file3.s"
-/*0x020a*/ MOVL $0x1234567, AX
-#line 863 "file3.s"
-/*0x020f*/ BYTE $0
-#line 733 "file3.s"
-/*0x0210*/ MOVQ $0x123456789, AX
-#line 166 "file3.s"
-/*0x021a*/ MOVQ $0x123456789, AX
-#line 166 "file3.s"
-/*0x0224*/ BYTE $0
-#line 166 "file3.s"
-/*0x0225*/ BYTE $0
-#line 166 "file3.s"
-/*0x0226*/ BYTE $0
-#line 166 "file3.s"
-/*0x0227*/ BYTE $0
-#line 54 "file3.s"
-/*0x0228*/ MOVQ $0x123456789, AX
-#line 54 "file3.s"
-/*0x0232*/ BYTE $0
-#line 54 "file3.s"
-/*0x0233*/ BYTE $0
-#line 54 "file3.s"
-/*0x0234*/ BYTE $0
-#line 20 "file4.s"
-/*0x0235*/ BYTE $0
-#line 20 "file4.s"
-/*0x0236*/ BYTE $0
-#line 546 "file4.s"
-/*0x0237*/ BYTE $0
-#line 546 "file4.s"
-/*0x0238*/ BYTE $0
-#line 74 "file4.s"
-/*0x0239*/ BYTE $0
-#line 31 "file4.s"
-/*0x023a*/ BYTE $0
-#line 31 "file4.s"
-/*0x023b*/ BYTE $0
-#line 31 "file4.s"
-/*0x023c*/ BYTE $0
-#line 31 "file4.s"
-/*0x023d*/ BYTE $0
-#line 834 "file4.s"
-/*0x023e*/ BYTE $0
-#line 834 "file4.s"
-/*0x023f*/ BYTE $0
-#line 519 "file4.s"
-/*0x0240*/ MOVL $0x1234567, AX
-#line 342 "file4.s"
-/*0x0245*/ BYTE $0
-#line 342 "file4.s"
-/*0x0246*/ BYTE $0
-#line 342 "file4.s"
-/*0x0247*/ BYTE $0
-#line 458 "file4.s"
-/*0x0248*/ BYTE $0
-#line 458 "file4.s"
-/*0x0249*/ BYTE $0
-#line 458 "file4.s"
-/*0x024a*/ BYTE $0
-#line 458 "file4.s"
-/*0x024b*/ BYTE $0
-#line 13 "file9.s"
-/*0x024c*/ BYTE $0
-#line 13 "file9.s"
-/*0x024d*/ BYTE $0
-#line 13 "file9.s"
-/*0x024e*/ BYTE $0
-#line 365 "file9.s"
-/*0x024f*/ BYTE $0
-#line 749 "file9.s"
-/*0x0250*/ MOVL $0x1234567, AX
-#line 749 "file9.s"
-/*0x0255*/ BYTE $0
-#line 749 "file9.s"
-/*0x0256*/ BYTE $0
-#line 41 "file0.s"
-/*0x0257*/ MOVL $0x1234567, AX
-#line 41 "file0.s"
-/*0x025c*/ BYTE $0
-#line 41 "file0.s"
-/*0x025d*/ BYTE $0
-#line 41 "file0.s"
-/*0x025e*/ BYTE $0
-#line 869 "file0.s"
-/*0x025f*/ BYTE $0
-#line 881 "file0.s"
-/*0x0260*/ MOVQ $0x123456789, AX
-#line 881 "file0.s"
-/*0x026a*/ MOVQ $0x123456789, AX
- POPQ AX
- POPQ AX
- RET
-
-GLOBL funcdata0(SB), $16
-#line 1 "input"
-TEXT func1(SB),7,$40-648
- FUNCDATA $1, funcdata1(SB)
-#line 1 "input"
-#line 1 "input"
-/*0x0004*/ BYTE $0
-#line 12 "file4.s"
-/*0x0005*/ MOVL $0x1234567, AX
-#line 12 "file4.s"
-/*0x000a*/ BYTE $0
-#line 12 "file4.s"
-/*0x000b*/ BYTE $0
-#line 12 "file4.s"
-/*0x000c*/ BYTE $0
-/* */ PCDATA $2, $705
-#line 12 "file4.s"
-/*0x000d*/ MOVQ $0x123456789, AX
-#line 12 "file4.s"
-/*0x0017*/ BYTE $0
-#line 633 "file4.s"
-/*0x0018*/ MOVQ $0x123456789, AX
-#line 633 "file4.s"
-/*0x0022*/ MOVL $0x1234567, AX
-#line 633 "file4.s"
-/*0x0027*/ POPQ AX
-#line 633 "file4.s"
-/*0x0028*/ BYTE $0
-#line 633 "file4.s"
-/*0x0029*/ BYTE $0
-#line 633 "file4.s"
-/*0x002a*/ BYTE $0
-#line 633 "file4.s"
-/*0x002b*/ PUSHQ AX
-#line 633 "file4.s"
-/*0x002c*/ MOVL $0x1234567, AX
-#line 997 "file4.s"
-/*0x0031*/ BYTE $0
-#line 997 "file4.s"
-/*0x0032*/ BYTE $0
-#line 997 "file4.s"
-/*0x0033*/ BYTE $0
-#line 997 "file4.s"
-/*0x0034*/ BYTE $0
-#line 997 "file4.s"
-/*0x0035*/ POPQ AX
-#line 997 "file4.s"
-/*0x0036*/ BYTE $0
-#line 997 "file4.s"
-/*0x0037*/ BYTE $0
-#line 1 "file4.s"
-/*0x0038*/ MOVQ $0x123456789, AX
-#line 1 "file4.s"
-/*0x0042*/ MOVQ $0x123456789, AX
-#line 1 "file4.s"
-/*0x004c*/ MOVQ $0x123456789, AX
-#line 1 "file4.s"
-/*0x0056*/ MOVQ $0x123456789, AX
-#line 1 "file4.s"
-/*0x0060*/ BYTE $0
-#line 922 "file4.s"
-/*0x0061*/ BYTE $0
-#line 375 "file4.s"
-/*0x0062*/ MOVL $0x1234567, AX
-/* */ PCDATA $1, $51
-#line 31 "file4.s"
-/*0x0067*/ MOVQ $0x123456789, AX
-#line 31 "file4.s"
-/*0x0071*/ BYTE $0
-#line 620 "file4.s"
-/*0x0072*/ MOVL $0x1234567, AX
-#line 620 "file4.s"
-/*0x0077*/ BYTE $0
-#line 695 "file4.s"
-/*0x0078*/ MOVL $0x1234567, AX
-#line 799 "file4.s"
-/*0x007d*/ MOVL $0x1234567, AX
-#line 41 "file4.s"
-/*0x0082*/ BYTE $0
-#line 795 "file4.s"
-/*0x0083*/ MOVL $0x1234567, AX
-#line 908 "file4.s"
-/*0x0088*/ BYTE $0
-#line 905 "file4.s"
-/*0x0089*/ BYTE $0
-#line 905 "file4.s"
-/*0x008a*/ BYTE $0
-#line 905 "file4.s"
-/*0x008b*/ BYTE $0
-#line 905 "file4.s"
-/*0x008c*/ BYTE $0
-#line 861 "file4.s"
-/*0x008d*/ MOVL $0x1234567, AX
-#line 861 "file4.s"
-/*0x0092*/ BYTE $0
-#line 861 "file4.s"
-/*0x0093*/ BYTE $0
-#line 861 "file4.s"
-/*0x0094*/ BYTE $0
-#line 861 "file4.s"
-/*0x0095*/ BYTE $0
-/* */ PCDATA $1, $192
-#line 861 "file4.s"
-/*0x0096*/ MOVQ $0x123456789, AX
-/* */ PCDATA $1, $60
-#line 861 "file4.s"
-/*0x00a0*/ MOVL $0x1234567, AX
-#line 861 "file4.s"
-/*0x00a5*/ BYTE $0
-#line 861 "file4.s"
-/*0x00a6*/ BYTE $0
-#line 56 "file6.s"
-/*0x00a7*/ MOVQ $0x123456789, AX
-#line 56 "file6.s"
-/*0x00b1*/ BYTE $0
-#line 56 "file6.s"
-/*0x00b2*/ BYTE $0
-#line 56 "file6.s"
-/*0x00b3*/ BYTE $0
-#line 204 "file6.s"
-/*0x00b4*/ BYTE $0
-#line 204 "file6.s"
-/*0x00b5*/ BYTE $0
-#line 204 "file6.s"
-/*0x00b6*/ BYTE $0
-#line 204 "file6.s"
-/*0x00b7*/ BYTE $0
-#line 979 "file6.s"
-/*0x00b8*/ MOVL $0x1234567, AX
-#line 979 "file6.s"
-/*0x00bd*/ BYTE $0
-#line 979 "file6.s"
-/*0x00be*/ BYTE $0
-#line 979 "file6.s"
-/*0x00bf*/ BYTE $0
-#line 58 "file6.s"
-/*0x00c0*/ MOVL $0x1234567, AX
-#line 238 "file6.s"
-/*0x00c5*/ MOVL $0x1234567, AX
-#line 238 "file6.s"
-/*0x00ca*/ BYTE $0
-#line 238 "file6.s"
-/*0x00cb*/ BYTE $0
-#line 699 "file2.s"
-/*0x00cc*/ MOVQ $0x123456789, AX
-#line 699 "file2.s"
-/*0x00d6*/ BYTE $0
-#line 699 "file2.s"
-/*0x00d7*/ BYTE $0
-#line 514 "file2.s"
-/*0x00d8*/ MOVL $0x1234567, AX
-#line 514 "file2.s"
-/*0x00dd*/ BYTE $0
-#line 514 "file2.s"
-/*0x00de*/ BYTE $0
-#line 108 "file2.s"
-/*0x00df*/ MOVQ $0x123456789, AX
-#line 108 "file2.s"
-/*0x00e9*/ MOVQ $0x123456789, AX
-#line 108 "file2.s"
-/*0x00f3*/ MOVL $0x1234567, AX
-#line 108 "file2.s"
-/*0x00f8*/ BYTE $0
-#line 108 "file2.s"
-/*0x00f9*/ BYTE $0
-#line 918 "file2.s"
-/*0x00fa*/ BYTE $0
-#line 918 "file2.s"
-/*0x00fb*/ BYTE $0
-#line 785 "file2.s"
-/*0x00fc*/ BYTE $0
-#line 3 "file5.s"
-/*0x00fd*/ BYTE $0
-#line 918 "file5.s"
-/*0x00fe*/ BYTE $0
-#line 918 "file5.s"
-/*0x00ff*/ BYTE $0
-#line 670 "file5.s"
-/*0x0100*/ MOVL $0x1234567, AX
-#line 670 "file5.s"
-/*0x0105*/ BYTE $0
-#line 720 "file5.s"
-/*0x0106*/ BYTE $0
-#line 384 "file5.s"
-/*0x0107*/ MOVQ $0x123456789, AX
-#line 384 "file5.s"
-/*0x0111*/ MOVQ $0x123456789, AX
-#line 384 "file5.s"
-/*0x011b*/ MOVQ $0x123456789, AX
-#line 384 "file5.s"
-/*0x0125*/ BYTE $0
-#line 384 "file5.s"
-/*0x0126*/ BYTE $0
-#line 263 "file5.s"
-/*0x0127*/ BYTE $0
-#line 750 "file5.s"
-/*0x0128*/ MOVL $0x1234567, AX
-#line 750 "file5.s"
-/*0x012d*/ BYTE $0
-#line 679 "file5.s"
-/*0x012e*/ MOVL $0x1234567, AX
-#line 679 "file5.s"
-/*0x0133*/ BYTE $0
-#line 679 "file5.s"
-/*0x0134*/ BYTE $0
-#line 679 "file5.s"
-/*0x0135*/ BYTE $0
-#line 679 "file5.s"
-/*0x0136*/ POPQ AX
-#line 171 "file5.s"
-/*0x0137*/ MOVL $0x1234567, AX
-#line 171 "file5.s"
-/*0x013c*/ BYTE $0
-#line 34 "file2.s"
-/*0x013d*/ BYTE $0
-#line 34 "file2.s"
-/*0x013e*/ BYTE $0
-#line 34 "file2.s"
-/*0x013f*/ BYTE $0
-#line 732 "file2.s"
-/*0x0140*/ BYTE $0
-#line 732 "file2.s"
-/*0x0141*/ PUSHQ AX
-#line 485 "file2.s"
-/*0x0142*/ BYTE $0
-#line 485 "file2.s"
-/*0x0143*/ BYTE $0
-#line 485 "file2.s"
-/*0x0144*/ BYTE $0
-#line 222 "file2.s"
-/*0x0145*/ BYTE $0
-#line 222 "file2.s"
-/*0x0146*/ BYTE $0
-/* */ PCDATA $1, $462
-#line 222 "file2.s"
-/*0x0147*/ MOVQ $0x123456789, AX
-#line 222 "file2.s"
-/*0x0151*/ MOVL $0x1234567, AX
-#line 222 "file2.s"
-/*0x0156*/ BYTE $0
-#line 677 "file2.s"
-/*0x0157*/ BYTE $0
-#line 117 "file2.s"
-/*0x0158*/ MOVL $0x1234567, AX
-#line 117 "file2.s"
-/*0x015d*/ BYTE $0
-#line 117 "file2.s"
-/*0x015e*/ BYTE $0
-#line 361 "file2.s"
-/*0x015f*/ MOVL $0x1234567, AX
-#line 590 "file2.s"
-/*0x0164*/ BYTE $0
-#line 590 "file2.s"
-/*0x0165*/ BYTE $0
-#line 58 "file2.s"
-/*0x0166*/ MOVL $0x1234567, AX
-#line 58 "file2.s"
-/*0x016b*/ BYTE $0
-#line 58 "file2.s"
-/*0x016c*/ BYTE $0
-#line 58 "file2.s"
-/*0x016d*/ BYTE $0
-#line 58 "file2.s"
-/*0x016e*/ BYTE $0
-#line 983 "file2.s"
-/*0x016f*/ BYTE $0
-#line 983 "file2.s"
-/*0x0170*/ BYTE $0
-#line 983 "file2.s"
-/*0x0171*/ BYTE $0
-#line 983 "file2.s"
-/*0x0172*/ BYTE $0
-#line 727 "file2.s"
-/*0x0173*/ MOVL $0x1234567, AX
-#line 450 "file2.s"
-/*0x0178*/ BYTE $0
-#line 450 "file2.s"
-/*0x0179*/ BYTE $0
-#line 450 "file2.s"
-/*0x017a*/ BYTE $0
-#line 450 "file2.s"
-/*0x017b*/ BYTE $0
-#line 334 "file2.s"
-/*0x017c*/ BYTE $0
-#line 334 "file2.s"
-/*0x017d*/ BYTE $0
-#line 334 "file2.s"
-/*0x017e*/ BYTE $0
-#line 334 "file2.s"
-/*0x017f*/ BYTE $0
-#line 465 "file2.s"
-/*0x0180*/ MOVL $0x1234567, AX
-/* */ PCDATA $1, $518
-#line 465 "file2.s"
-/*0x0185*/ MOVL $0x1234567, AX
-#line 465 "file2.s"
-/*0x018a*/ BYTE $0
-#line 465 "file2.s"
-/*0x018b*/ BYTE $0
-#line 465 "file2.s"
-/*0x018c*/ BYTE $0
-#line 465 "file2.s"
-/*0x018d*/ BYTE $0
-#line 682 "file2.s"
-/*0x018e*/ MOVL $0x1234567, AX
-#line 682 "file2.s"
-/*0x0193*/ BYTE $0
-#line 682 "file2.s"
-/*0x0194*/ BYTE $0
-#line 846 "file2.s"
-/*0x0195*/ MOVQ $0x123456789, AX
-#line 846 "file2.s"
-/*0x019f*/ BYTE $0
-#line 846 "file2.s"
-/*0x01a0*/ BYTE $0
-#line 846 "file2.s"
-/*0x01a1*/ POPQ AX
-#line 846 "file2.s"
-/*0x01a2*/ BYTE $0
-#line 846 "file2.s"
-/*0x01a3*/ BYTE $0
-#line 49 "file2.s"
-/*0x01a4*/ BYTE $0
-#line 49 "file2.s"
-/*0x01a5*/ BYTE $0
-#line 49 "file2.s"
-/*0x01a6*/ BYTE $0
-#line 726 "file2.s"
-/*0x01a7*/ MOVQ $0x123456789, AX
-#line 726 "file2.s"
-/*0x01b1*/ MOVL $0x1234567, AX
-#line 726 "file2.s"
-/*0x01b6*/ BYTE $0
-#line 726 "file2.s"
-/*0x01b7*/ BYTE $0
-#line 726 "file2.s"
-/*0x01b8*/ BYTE $0
-#line 726 "file2.s"
-/*0x01b9*/ PUSHQ AX
-#line 726 "file2.s"
-/*0x01ba*/ BYTE $0
-#line 726 "file2.s"
-/*0x01bb*/ BYTE $0
-#line 726 "file2.s"
-/*0x01bc*/ BYTE $0
-#line 726 "file2.s"
-/*0x01bd*/ BYTE $0
-#line 13 "file1.s"
-/*0x01be*/ BYTE $0
-#line 13 "file1.s"
-/*0x01bf*/ BYTE $0
-#line 13 "file1.s"
-/*0x01c0*/ BYTE $0
-#line 827 "file1.s"
-/*0x01c1*/ MOVL $0x1234567, AX
-#line 827 "file1.s"
-/*0x01c6*/ BYTE $0
-#line 827 "file1.s"
-/*0x01c7*/ BYTE $0
-#line 827 "file1.s"
-/*0x01c8*/ BYTE $0
-#line 827 "file1.s"
-/*0x01c9*/ BYTE $0
-#line 783 "file1.s"
-/*0x01ca*/ BYTE $0
-#line 783 "file1.s"
-/*0x01cb*/ BYTE $0
-#line 783 "file1.s"
-/*0x01cc*/ BYTE $0
-#line 783 "file1.s"
-/*0x01cd*/ BYTE $0
-#line 367 "file1.s"
-/*0x01ce*/ MOVQ $0x123456789, AX
-#line 367 "file1.s"
-/*0x01d8*/ MOVL $0x1234567, AX
-#line 367 "file1.s"
-/*0x01dd*/ BYTE $0
-#line 367 "file1.s"
-/*0x01de*/ BYTE $0
-#line 367 "file1.s"
-/*0x01df*/ BYTE $0
-#line 367 "file1.s"
-/*0x01e0*/ BYTE $0
-#line 581 "file1.s"
-/*0x01e1*/ BYTE $0
-#line 581 "file1.s"
-/*0x01e2*/ BYTE $0
-#line 581 "file1.s"
-/*0x01e3*/ BYTE $0
-#line 638 "file1.s"
-/*0x01e4*/ MOVQ $0x123456789, AX
-#line 638 "file1.s"
-/*0x01ee*/ MOVL $0x1234567, AX
-#line 638 "file1.s"
-/*0x01f3*/ BYTE $0
-#line 638 "file1.s"
-/*0x01f4*/ BYTE $0
-#line 638 "file1.s"
-/*0x01f5*/ BYTE $0
-#line 359 "file1.s"
-/*0x01f6*/ BYTE $0
-#line 449 "file1.s"
-/*0x01f7*/ BYTE $0
-#line 449 "file1.s"
-/*0x01f8*/ BYTE $0
-#line 449 "file1.s"
-/*0x01f9*/ BYTE $0
-#line 298 "file1.s"
-/*0x01fa*/ BYTE $0
-#line 298 "file1.s"
-/*0x01fb*/ BYTE $0
-#line 257 "file1.s"
-/*0x01fc*/ BYTE $0
-#line 257 "file1.s"
-/*0x01fd*/ BYTE $0
-#line 257 "file1.s"
-/*0x01fe*/ BYTE $0
-#line 512 "file1.s"
-/*0x01ff*/ MOVL $0x1234567, AX
-#line 512 "file1.s"
-/*0x0204*/ BYTE $0
-#line 512 "file1.s"
-/*0x0205*/ BYTE $0
-#line 617 "file1.s"
-/*0x0206*/ MOVL $0x1234567, AX
-#line 478 "file1.s"
-/*0x020b*/ BYTE $0
-/* */ PCDATA $2, $351
-#line 478 "file1.s"
-/*0x020c*/ BYTE $0
-#line 958 "file1.s"
-/*0x020d*/ BYTE $0
-#line 958 "file1.s"
-/*0x020e*/ BYTE $0
-#line 958 "file1.s"
-/*0x020f*/ BYTE $0
-#line 958 "file1.s"
-/*0x0210*/ BYTE $0
-#line 371 "file1.s"
-/*0x0211*/ MOVQ $0x123456789, AX
-#line 371 "file1.s"
-/*0x021b*/ BYTE $0
-#line 371 "file1.s"
-/*0x021c*/ BYTE $0
-#line 56 "file3.s"
-/*0x021d*/ MOVL $0x1234567, AX
-#line 56 "file3.s"
-/*0x0222*/ BYTE $0
-#line 56 "file3.s"
-/*0x0223*/ BYTE $0
-#line 56 "file9.s"
-/*0x0224*/ MOVQ $0x123456789, AX
-#line 56 "file9.s"
-/*0x022e*/ BYTE $0
-#line 56 "file9.s"
-/*0x022f*/ BYTE $0
-#line 56 "file9.s"
-/*0x0230*/ BYTE $0
-#line 56 "file9.s"
-/*0x0231*/ BYTE $0
-#line 684 "file9.s"
-/*0x0232*/ MOVQ $0x123456789, AX
-#line 684 "file9.s"
-/*0x023c*/ BYTE $0
-#line 684 "file9.s"
-/*0x023d*/ POPQ AX
-#line 407 "file9.s"
-/*0x023e*/ MOVL $0x1234567, AX
-#line 407 "file9.s"
-/*0x0243*/ BYTE $0
- PUSHQ AX
- PUSHQ AX
- RET
-
-GLOBL funcdata1(SB), $16
-#line 1 "input"
-TEXT func2(SB),7,$40-688
- FUNCDATA $1, funcdata2(SB)
-#line 1 "input"
-#line 1 "input"
-/*0x0004*/ BYTE $0
-#line 860 "input"
-/*0x0005*/ BYTE $0
-#line 860 "input"
-/*0x0006*/ BYTE $0
-#line 860 "input"
-/*0x0007*/ BYTE $0
-#line 860 "input"
-/*0x0008*/ BYTE $0
-#line 85 "input"
-/*0x0009*/ BYTE $0
-#line 85 "input"
-/*0x000a*/ BYTE $0
-#line 355 "input"
-/*0x000b*/ MOVQ $0x123456789, AX
-#line 355 "input"
-/*0x0015*/ MOVL $0x1234567, AX
-#line 355 "input"
-/*0x001a*/ BYTE $0
-#line 355 "input"
-/*0x001b*/ BYTE $0
-#line 355 "input"
-/*0x001c*/ BYTE $0
-#line 840 "input"
-/*0x001d*/ MOVL $0x1234567, AX
-#line 840 "input"
-/*0x0022*/ BYTE $0
-#line 294 "input"
-/*0x0023*/ MOVQ $0x123456789, AX
-#line 294 "input"
-/*0x002d*/ MOVQ $0x123456789, AX
-#line 294 "input"
-/*0x0037*/ MOVQ $0x123456789, AX
-#line 294 "input"
-/*0x0041*/ BYTE $0
-#line 294 "input"
-/*0x0042*/ BYTE $0
-#line 294 "input"
-/*0x0043*/ BYTE $0
-#line 294 "input"
-/*0x0044*/ BYTE $0
-/* */ PCDATA $1, $385
-#line 294 "input"
-/*0x0045*/ BYTE $0
-#line 294 "input"
-/*0x0046*/ BYTE $0
-#line 294 "input"
-/*0x0047*/ BYTE $0
-#line 81 "file9.s"
-/*0x0048*/ MOVL $0x1234567, AX
-#line 81 "file9.s"
-/*0x004d*/ BYTE $0
-#line 81 "file9.s"
-/*0x004e*/ BYTE $0
-#line 81 "file9.s"
-/*0x004f*/ POPQ AX
-#line 81 "file9.s"
-/*0x0050*/ MOVL $0x1234567, AX
-#line 81 "file9.s"
-/*0x0055*/ BYTE $0
-/* */ PCDATA $1, $701
-#line 81 "file9.s"
-/*0x0056*/ MOVL $0x1234567, AX
-#line 81 "file9.s"
-/*0x005b*/ BYTE $0
-#line 81 "file9.s"
-/*0x005c*/ BYTE $0
-#line 81 "file9.s"
-/*0x005d*/ BYTE $0
-#line 81 "file9.s"
-/*0x005e*/ BYTE $0
-#line 290 "file9.s"
-/*0x005f*/ BYTE $0
-#line 290 "file9.s"
-/*0x0060*/ BYTE $0
-#line 290 "file9.s"
-/*0x0061*/ BYTE $0
-#line 197 "file9.s"
-/*0x0062*/ MOVL $0x1234567, AX
-#line 197 "file9.s"
-/*0x0067*/ BYTE $0
-#line 608 "file9.s"
-/*0x0068*/ MOVQ $0x123456789, AX
-#line 608 "file9.s"
-/*0x0072*/ MOVQ $0x123456789, AX
-#line 608 "file9.s"
-/*0x007c*/ BYTE $0
-/* */ PCDATA $1, $562
-#line 608 "file9.s"
-/*0x007d*/ BYTE $0
-#line 608 "file9.s"
-/*0x007e*/ BYTE $0
-#line 189 "file9.s"
-/*0x007f*/ MOVL $0x1234567, AX
-#line 189 "file9.s"
-/*0x0084*/ BYTE $0
-#line 189 "file9.s"
-/*0x0085*/ BYTE $0
-#line 189 "file9.s"
-/*0x0086*/ BYTE $0
-#line 189 "file9.s"
-/*0x0087*/ BYTE $0
-#line 472 "file9.s"
-/*0x0088*/ MOVL $0x1234567, AX
-#line 472 "file9.s"
-/*0x008d*/ BYTE $0
-#line 472 "file9.s"
-/*0x008e*/ BYTE $0
-#line 472 "file9.s"
-/*0x008f*/ PUSHQ AX
-#line 472 "file9.s"
-/*0x0090*/ MOVQ $0x123456789, AX
-#line 472 "file9.s"
-/*0x009a*/ MOVL $0x1234567, AX
-#line 472 "file9.s"
-/*0x009f*/ BYTE $0
-#line 472 "file9.s"
-/*0x00a0*/ BYTE $0
-#line 472 "file9.s"
-/*0x00a1*/ BYTE $0
-#line 472 "file9.s"
-/*0x00a2*/ BYTE $0
-#line 148 "file9.s"
-/*0x00a3*/ MOVQ $0x123456789, AX
-#line 148 "file9.s"
-/*0x00ad*/ BYTE $0
-#line 148 "file9.s"
-/*0x00ae*/ BYTE $0
-#line 148 "file9.s"
-/*0x00af*/ BYTE $0
-#line 148 "file9.s"
-/*0x00b0*/ BYTE $0
-#line 562 "file9.s"
-/*0x00b1*/ MOVL $0x1234567, AX
-#line 562 "file9.s"
-/*0x00b6*/ BYTE $0
-#line 562 "file9.s"
-/*0x00b7*/ PUSHQ AX
-#line 562 "file9.s"
-/*0x00b8*/ BYTE $0
-#line 532 "file9.s"
-/*0x00b9*/ MOVQ $0x123456789, AX
-#line 532 "file9.s"
-/*0x00c3*/ MOVQ $0x123456789, AX
-#line 282 "file9.s"
-/*0x00cd*/ BYTE $0
-#line 282 "file9.s"
-/*0x00ce*/ BYTE $0
-#line 282 "file9.s"
-/*0x00cf*/ BYTE $0
-/* */ PCDATA $2, $861
-#line 282 "file9.s"
-/*0x00d0*/ BYTE $0
-#line 282 "file9.s"
-/*0x00d1*/ BYTE $0
-/* */ PCDATA $2, $310
-#line 282 "file9.s"
-/*0x00d2*/ BYTE $0
-#line 416 "file9.s"
-/*0x00d3*/ MOVQ $0x123456789, AX
-#line 416 "file9.s"
-/*0x00dd*/ MOVL $0x1234567, AX
-#line 780 "file9.s"
-/*0x00e2*/ BYTE $0
-#line 780 "file9.s"
-/*0x00e3*/ BYTE $0
-#line 765 "file9.s"
-/*0x00e4*/ MOVL $0x1234567, AX
-#line 523 "file9.s"
-/*0x00e9*/ BYTE $0
-#line 523 "file9.s"
-/*0x00ea*/ BYTE $0
-#line 523 "file9.s"
-/*0x00eb*/ BYTE $0
-#line 733 "file9.s"
-/*0x00ec*/ MOVQ $0x123456789, AX
-#line 378 "file9.s"
-/*0x00f6*/ BYTE $0
-#line 378 "file9.s"
-/*0x00f7*/ BYTE $0
-#line 378 "file9.s"
-/*0x00f8*/ BYTE $0
-#line 378 "file9.s"
-/*0x00f9*/ BYTE $0
-#line 540 "file9.s"
-/*0x00fa*/ BYTE $0
-#line 540 "file9.s"
-/*0x00fb*/ BYTE $0
-#line 57 "file9.s"
-/*0x00fc*/ BYTE $0
-#line 57 "file9.s"
-/*0x00fd*/ BYTE $0
-#line 57 "file9.s"
-/*0x00fe*/ BYTE $0
-#line 417 "file9.s"
-/*0x00ff*/ BYTE $0
-/* */ PCDATA $1, $932
-#line 417 "file9.s"
-/*0x0100*/ BYTE $0
-#line 417 "file9.s"
-/*0x0101*/ BYTE $0
-#line 417 "file9.s"
-/*0x0102*/ BYTE $0
-#line 417 "file9.s"
-/*0x0103*/ BYTE $0
-#line 713 "file9.s"
-/*0x0104*/ MOVL $0x1234567, AX
-#line 610 "file0.s"
-/*0x0109*/ MOVQ $0x123456789, AX
-#line 610 "file0.s"
-/*0x0113*/ MOVL $0x1234567, AX
-#line 610 "file0.s"
-/*0x0118*/ BYTE $0
-#line 787 "file0.s"
-/*0x0119*/ MOVQ $0x123456789, AX
-#line 829 "file0.s"
-/*0x0123*/ BYTE $0
-#line 829 "file0.s"
-/*0x0124*/ BYTE $0
-#line 829 "file0.s"
-/*0x0125*/ BYTE $0
-#line 849 "file0.s"
-/*0x0126*/ BYTE $0
-#line 849 "file0.s"
-/*0x0127*/ BYTE $0
-#line 849 "file0.s"
-/*0x0128*/ BYTE $0
-/* */ PCDATA $2, $356
-#line 849 "file0.s"
-/*0x0129*/ BYTE $0
-#line 849 "file0.s"
-/*0x012a*/ BYTE $0
-#line 88 "file0.s"
-/*0x012b*/ MOVL $0x1234567, AX
-#line 88 "file0.s"
-/*0x0130*/ BYTE $0
-#line 88 "file0.s"
-/*0x0131*/ BYTE $0
-#line 88 "file0.s"
-/*0x0132*/ BYTE $0
-#line 684 "file0.s"
-/*0x0133*/ BYTE $0
-#line 684 "file0.s"
-/*0x0134*/ BYTE $0
-#line 684 "file0.s"
-/*0x0135*/ BYTE $0
-#line 684 "file0.s"
-/*0x0136*/ BYTE $0
-#line 238 "file0.s"
-/*0x0137*/ BYTE $0
-#line 238 "file0.s"
-/*0x0138*/ BYTE $0
-#line 238 "file0.s"
-/*0x0139*/ PUSHQ AX
-#line 238 "file0.s"
-/*0x013a*/ BYTE $0
-#line 238 "file0.s"
-/*0x013b*/ BYTE $0
-#line 603 "file0.s"
-/*0x013c*/ BYTE $0
-#line 981 "file0.s"
-/*0x013d*/ BYTE $0
-#line 981 "file0.s"
-/*0x013e*/ POPQ AX
-#line 616 "file0.s"
-/*0x013f*/ BYTE $0
-#line 616 "file0.s"
-/*0x0140*/ BYTE $0
-#line 616 "file0.s"
-/*0x0141*/ BYTE $0
-#line 616 "file0.s"
-/*0x0142*/ BYTE $0
-#line 716 "file0.s"
-/*0x0143*/ MOVL $0x1234567, AX
-#line 716 "file0.s"
-/*0x0148*/ BYTE $0
-#line 716 "file0.s"
-/*0x0149*/ BYTE $0
-#line 34 "file0.s"
-/*0x014a*/ BYTE $0
-#line 34 "file0.s"
-/*0x014b*/ BYTE $0
-#line 34 "file0.s"
-/*0x014c*/ BYTE $0
-#line 90 "file0.s"
-/*0x014d*/ MOVL $0x1234567, AX
-#line 316 "file0.s"
-/*0x0152*/ MOVQ $0x123456789, AX
-#line 230 "file0.s"
-/*0x015c*/ MOVQ $0x123456789, AX
-#line 799 "file0.s"
-/*0x0166*/ MOVQ $0x123456789, AX
-#line 799 "file0.s"
-/*0x0170*/ BYTE $0
-#line 799 "file0.s"
-/*0x0171*/ BYTE $0
-/* */ PCDATA $1, $247
-#line 799 "file0.s"
-/*0x0172*/ BYTE $0
-#line 799 "file0.s"
-/*0x0173*/ BYTE $0
-#line 799 "file0.s"
-/*0x0174*/ BYTE $0
-#line 655 "file0.s"
-/*0x0175*/ MOVL $0x1234567, AX
-#line 655 "file0.s"
-/*0x017a*/ BYTE $0
-#line 551 "file0.s"
-/*0x017b*/ BYTE $0
-#line 551 "file0.s"
-/*0x017c*/ BYTE $0
-#line 271 "file0.s"
-/*0x017d*/ MOVQ $0x123456789, AX
-#line 271 "file0.s"
-/*0x0187*/ MOVQ $0x123456789, AX
-#line 271 "file0.s"
-/*0x0191*/ MOVL $0x1234567, AX
-#line 271 "file0.s"
-/*0x0196*/ BYTE $0
-#line 271 "file0.s"
-/*0x0197*/ BYTE $0
-#line 271 "file0.s"
-/*0x0198*/ BYTE $0
-#line 233 "file0.s"
-/*0x0199*/ MOVL $0x1234567, AX
-#line 233 "file0.s"
-/*0x019e*/ BYTE $0
-#line 233 "file0.s"
-/*0x019f*/ BYTE $0
-#line 233 "file0.s"
-/*0x01a0*/ BYTE $0
-#line 233 "file0.s"
-/*0x01a1*/ BYTE $0
-#line 738 "file0.s"
-/*0x01a2*/ MOVL $0x1234567, AX
-#line 738 "file0.s"
-/*0x01a7*/ BYTE $0
-#line 800 "file0.s"
-/*0x01a8*/ BYTE $0
-#line 800 "file0.s"
-/*0x01a9*/ BYTE $0
-#line 646 "file0.s"
-/*0x01aa*/ MOVQ $0x123456789, AX
-#line 646 "file0.s"
-/*0x01b4*/ BYTE $0
-#line 646 "file0.s"
-/*0x01b5*/ BYTE $0
-#line 646 "file0.s"
-/*0x01b6*/ BYTE $0
-#line 709 "file0.s"
-/*0x01b7*/ BYTE $0
-#line 709 "file0.s"
-/*0x01b8*/ BYTE $0
-#line 709 "file0.s"
-/*0x01b9*/ BYTE $0
-#line 709 "file0.s"
-/*0x01ba*/ BYTE $0
-#line 296 "file0.s"
-/*0x01bb*/ BYTE $0
-#line 296 "file0.s"
-/*0x01bc*/ BYTE $0
-#line 296 "file0.s"
-/*0x01bd*/ BYTE $0
-#line 71 "file0.s"
-/*0x01be*/ BYTE $0
-#line 71 "file0.s"
-/*0x01bf*/ BYTE $0
-#line 71 "file0.s"
-/*0x01c0*/ BYTE $0
-#line 7 "file2.s"
-/*0x01c1*/ BYTE $0
-#line 747 "file2.s"
-/*0x01c2*/ BYTE $0
-#line 177 "file2.s"
-/*0x01c3*/ MOVQ $0x123456789, AX
-#line 177 "file2.s"
-/*0x01cd*/ MOVQ $0x123456789, AX
-#line 177 "file2.s"
-/*0x01d7*/ MOVL $0x1234567, AX
-#line 177 "file2.s"
-/*0x01dc*/ BYTE $0
-#line 177 "file2.s"
-/*0x01dd*/ BYTE $0
-#line 177 "file2.s"
-/*0x01de*/ BYTE $0
-#line 72 "file2.s"
-/*0x01df*/ BYTE $0
-#line 215 "file2.s"
-/*0x01e0*/ MOVL $0x1234567, AX
-#line 215 "file2.s"
-/*0x01e5*/ BYTE $0
-#line 215 "file2.s"
-/*0x01e6*/ BYTE $0
-#line 215 "file2.s"
-/*0x01e7*/ BYTE $0
-#line 946 "file2.s"
-/*0x01e8*/ BYTE $0
-#line 946 "file2.s"
-/*0x01e9*/ BYTE $0
-#line 946 "file2.s"
-/*0x01ea*/ BYTE $0
-#line 946 "file2.s"
-/*0x01eb*/ BYTE $0
-#line 263 "file2.s"
-/*0x01ec*/ MOVL $0x1234567, AX
-#line 263 "file2.s"
-/*0x01f1*/ BYTE $0
-#line 897 "file2.s"
-/*0x01f2*/ MOVQ $0x123456789, AX
-#line 897 "file2.s"
-/*0x01fc*/ MOVQ $0x123456789, AX
-#line 897 "file2.s"
-/*0x0206*/ BYTE $0
-#line 897 "file2.s"
-/*0x0207*/ BYTE $0
-#line 897 "file2.s"
-/*0x0208*/ BYTE $0
-#line 229 "file2.s"
-/*0x0209*/ BYTE $0
-#line 229 "file2.s"
-/*0x020a*/ BYTE $0
-#line 229 "file2.s"
-/*0x020b*/ BYTE $0
-#line 229 "file2.s"
-/*0x020c*/ BYTE $0
-/* */ PCDATA $1, $7
-#line 229 "file2.s"
-/*0x020d*/ MOVL $0x1234567, AX
-#line 229 "file2.s"
-/*0x0212*/ BYTE $0
-#line 353 "file2.s"
-/*0x0213*/ BYTE $0
-#line 353 "file2.s"
-/*0x0214*/ BYTE $0
-#line 353 "file2.s"
-/*0x0215*/ BYTE $0
-#line 353 "file2.s"
-/*0x0216*/ BYTE $0
-#line 852 "file2.s"
-/*0x0217*/ BYTE $0
-#line 852 "file2.s"
-/*0x0218*/ BYTE $0
-#line 852 "file2.s"
-/*0x0219*/ BYTE $0
-#line 852 "file2.s"
-/*0x021a*/ BYTE $0
-#line 852 "file2.s"
-/*0x021b*/ PUSHQ AX
-#line 852 "file2.s"
-/*0x021c*/ BYTE $0
-#line 852 "file2.s"
-/*0x021d*/ BYTE $0
-#line 852 "file2.s"
-/*0x021e*/ BYTE $0
-#line 904 "file2.s"
-/*0x021f*/ MOVQ $0x123456789, AX
-#line 904 "file2.s"
-/*0x0229*/ BYTE $0
-#line 904 "file2.s"
-/*0x022a*/ BYTE $0
-#line 882 "file2.s"
-/*0x022b*/ BYTE $0
-#line 905 "file2.s"
-/*0x022c*/ BYTE $0
-#line 410 "file2.s"
-/*0x022d*/ MOVQ $0x123456789, AX
-#line 410 "file2.s"
-/*0x0237*/ BYTE $0
-#line 410 "file2.s"
-/*0x0238*/ BYTE $0
-#line 410 "file2.s"
-/*0x0239*/ POPQ AX
-#line 410 "file2.s"
-/*0x023a*/ BYTE $0
-#line 410 "file2.s"
-/*0x023b*/ BYTE $0
-#line 410 "file2.s"
-/*0x023c*/ BYTE $0
-/* */ PCDATA $2, $173
-#line 410 "file2.s"
-/*0x023d*/ MOVL $0x1234567, AX
-#line 410 "file2.s"
-/*0x0242*/ BYTE $0
-/* */ PCDATA $1, $396
-#line 410 "file2.s"
-/*0x0243*/ BYTE $0
-#line 410 "file2.s"
-/*0x0244*/ BYTE $0
-#line 666 "file2.s"
-/*0x0245*/ BYTE $0
-#line 129 "file2.s"
-/*0x0246*/ MOVQ $0x123456789, AX
-#line 129 "file2.s"
-/*0x0250*/ BYTE $0
-#line 391 "file2.s"
-/*0x0251*/ BYTE $0
-#line 696 "file2.s"
-/*0x0252*/ MOVL $0x1234567, AX
-#line 940 "file2.s"
-/*0x0257*/ BYTE $0
-#line 940 "file2.s"
-/*0x0258*/ BYTE $0
-#line 606 "file2.s"
-/*0x0259*/ MOVL $0x1234567, AX
-#line 606 "file2.s"
-/*0x025e*/ BYTE $0
-#line 648 "file2.s"
-/*0x025f*/ MOVQ $0x123456789, AX
-#line 648 "file2.s"
-/*0x0269*/ BYTE $0
-#line 648 "file2.s"
-/*0x026a*/ BYTE $0
-/* */ PCDATA $2, $84
-#line 648 "file2.s"
-/*0x026b*/ MOVQ $0x123456789, AX
-#line 648 "file2.s"
-/*0x0275*/ MOVQ $0x123456789, AX
-#line 648 "file2.s"
-/*0x027f*/ MOVQ $0x123456789, AX
-#line 648 "file2.s"
-/*0x0289*/ MOVQ $0x123456789, AX
-#line 648 "file2.s"
-/*0x0293*/ MOVQ $0x123456789, AX
-#line 648 "file2.s"
-/*0x029d*/ MOVL $0x1234567, AX
-#line 648 "file2.s"
-/*0x02a2*/ PUSHQ AX
-#line 648 "file2.s"
-/*0x02a3*/ MOVL $0x1234567, AX
-#line 648 "file2.s"
-/*0x02a8*/ BYTE $0
-#line 648 "file2.s"
-/*0x02a9*/ BYTE $0
-#line 648 "file2.s"
-/*0x02aa*/ BYTE $0
-#line 648 "file2.s"
-/*0x02ab*/ BYTE $0
-#line 449 "file2.s"
-/*0x02ac*/ MOVQ $0x123456789, AX
-#line 449 "file2.s"
-/*0x02b6*/ MOVL $0x1234567, AX
-#line 284 "file2.s"
-/*0x02bb*/ BYTE $0
-#line 284 "file2.s"
-/*0x02bc*/ BYTE $0
-#line 284 "file2.s"
-/*0x02bd*/ BYTE $0
-#line 284 "file2.s"
-/*0x02be*/ BYTE $0
-#line 26 "file2.s"
-/*0x02bf*/ MOVQ $0x123456789, AX
-#line 26 "file2.s"
-/*0x02c9*/ MOVL $0x1234567, AX
-#line 26 "file2.s"
-/*0x02ce*/ BYTE $0
-#line 26 "file2.s"
-/*0x02cf*/ BYTE $0
-#line 26 "file2.s"
-/*0x02d0*/ BYTE $0
-#line 605 "file2.s"
-/*0x02d1*/ MOVL $0x1234567, AX
-#line 605 "file2.s"
-/*0x02d6*/ BYTE $0
-#line 605 "file2.s"
-/*0x02d7*/ BYTE $0
-#line 605 "file2.s"
-/*0x02d8*/ BYTE $0
-#line 593 "file2.s"
-/*0x02d9*/ MOVL $0x1234567, AX
-#line 541 "file2.s"
-/*0x02de*/ MOVL $0x1234567, AX
-#line 541 "file2.s"
-/*0x02e3*/ BYTE $0
-#line 541 "file2.s"
-/*0x02e4*/ BYTE $0
-#line 181 "file2.s"
-/*0x02e5*/ MOVQ $0x123456789, AX
-#line 181 "file2.s"
-/*0x02ef*/ BYTE $0
-#line 907 "file2.s"
-/*0x02f0*/ MOVQ $0x123456789, AX
-#line 704 "file2.s"
-/*0x02fa*/ MOVQ $0x123456789, AX
-#line 704 "file2.s"
-/*0x0304*/ MOVQ $0x123456789, AX
-#line 704 "file2.s"
-/*0x030e*/ MOVL $0x1234567, AX
-#line 704 "file2.s"
-/*0x0313*/ BYTE $0
-#line 704 "file2.s"
-/*0x0314*/ BYTE $0
-#line 704 "file2.s"
-/*0x0315*/ BYTE $0
-#line 704 "file2.s"
-/*0x0316*/ BYTE $0
-#line 859 "file2.s"
-/*0x0317*/ MOVL $0x1234567, AX
-#line 407 "file2.s"
-/*0x031c*/ BYTE $0
-#line 407 "file2.s"
-/*0x031d*/ BYTE $0
-/* */ PCDATA $2, $569
-#line 407 "file2.s"
-/*0x031e*/ MOVL $0x1234567, AX
-#line 407 "file2.s"
-/*0x0323*/ BYTE $0
-#line 407 "file2.s"
-/*0x0324*/ BYTE $0
-#line 407 "file2.s"
-/*0x0325*/ BYTE $0
-/* */ PCDATA $1, $937
-#line 407 "file2.s"
-/*0x0326*/ MOVL $0x1234567, AX
-#line 407 "file2.s"
-/*0x032b*/ BYTE $0
-#line 774 "file2.s"
-/*0x032c*/ MOVQ $0x123456789, AX
-#line 774 "file2.s"
-/*0x0336*/ BYTE $0
-#line 547 "file2.s"
-/*0x0337*/ BYTE $0
-#line 547 "file2.s"
-/*0x0338*/ BYTE $0
-#line 547 "file2.s"
-/*0x0339*/ BYTE $0
-#line 547 "file2.s"
-/*0x033a*/ PUSHQ AX
-#line 547 "file2.s"
-/*0x033b*/ MOVL $0x1234567, AX
-#line 427 "file2.s"
-/*0x0340*/ MOVL $0x1234567, AX
-/* */ PCDATA $1, $462
-#line 427 "file2.s"
-/*0x0345*/ MOVQ $0x123456789, AX
-#line 427 "file2.s"
-/*0x034f*/ MOVL $0x1234567, AX
-#line 427 "file2.s"
-/*0x0354*/ BYTE $0
-#line 427 "file2.s"
-/*0x0355*/ BYTE $0
-#line 427 "file2.s"
-/*0x0356*/ BYTE $0
-#line 427 "file2.s"
-/*0x0357*/ BYTE $0
-/* */ PCDATA $2, $303
-#line 427 "file2.s"
-/*0x0358*/ MOVQ $0x123456789, AX
-#line 427 "file2.s"
-/*0x0362*/ BYTE $0
-#line 427 "file2.s"
-/*0x0363*/ BYTE $0
-#line 708 "file2.s"
-/*0x0364*/ BYTE $0
-#line 708 "file2.s"
-/*0x0365*/ BYTE $0
-#line 708 "file2.s"
-/*0x0366*/ BYTE $0
-#line 708 "file2.s"
-/*0x0367*/ BYTE $0
-#line 218 "file2.s"
-/*0x0368*/ MOVL $0x1234567, AX
-#line 44 "file2.s"
-/*0x036d*/ BYTE $0
-#line 915 "file2.s"
-/*0x036e*/ BYTE $0
-#line 915 "file2.s"
-/*0x036f*/ BYTE $0
-#line 915 "file2.s"
-/*0x0370*/ BYTE $0
-#line 915 "file2.s"
-/*0x0371*/ BYTE $0
-#line 122 "file2.s"
-/*0x0372*/ MOVQ $0x123456789, AX
-#line 122 "file2.s"
-/*0x037c*/ MOVL $0x1234567, AX
-#line 122 "file2.s"
-/*0x0381*/ BYTE $0
-#line 122 "file2.s"
-/*0x0382*/ BYTE $0
-#line 266 "file2.s"
-/*0x0383*/ BYTE $0
-#line 266 "file2.s"
-/*0x0384*/ BYTE $0
-#line 368 "file2.s"
-/*0x0385*/ BYTE $0
-#line 368 "file2.s"
-/*0x0386*/ BYTE $0
-#line 368 "file2.s"
-/*0x0387*/ BYTE $0
-#line 368 "file2.s"
-/*0x0388*/ BYTE $0
-#line 775 "file2.s"
-/*0x0389*/ BYTE $0
-#line 10 "file8.s"
-/*0x038a*/ BYTE $0
-#line 10 "file8.s"
-/*0x038b*/ BYTE $0
-#line 422 "file8.s"
-/*0x038c*/ MOVL $0x1234567, AX
-#line 422 "file8.s"
-/*0x0391*/ BYTE $0
-#line 613 "file8.s"
-/*0x0392*/ BYTE $0
-#line 613 "file8.s"
-/*0x0393*/ BYTE $0
-#line 613 "file8.s"
-/*0x0394*/ BYTE $0
-#line 697 "file8.s"
-/*0x0395*/ MOVQ $0x123456789, AX
-#line 697 "file8.s"
-/*0x039f*/ MOVQ $0x123456789, AX
-#line 697 "file8.s"
-/*0x03a9*/ BYTE $0
-#line 697 "file8.s"
-/*0x03aa*/ BYTE $0
-#line 697 "file8.s"
-/*0x03ab*/ BYTE $0
- POPQ AX
- POPQ AX
- POPQ AX
- RET
-
-GLOBL funcdata2(SB), $16
-
-TEXT start(SB),7,$0
- CALL func0(SB)
- CALL func1(SB)
- CALL func2(SB)
- MOVQ $runtime·pclntab(SB), AX
-
- RET
+++ /dev/null
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package main
-
-// round returns size rounded up to the next multiple of align;
-// align must be a power of two.
-func round(size, align Addr) Addr {
- return (size + align - 1) &^ (align - 1)
-}
+++ /dev/null
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Writing of executable and (for hostlink mode) object files.
-
-package main
-
-import "io"
-
-func (p *Prog) write(w io.Writer) {
- p.Entry = p.Syms[p.startSym].Addr
- p.formatter.write(w, p)
-}
For more information, see the discussion of the -printf flag.
-shadowstrict
Whether to be strict about shadowing; can be noisy.
- -test
- For testing only: sets -all and -shadow.
*/
package main // import "golang.org/x/tools/cmd/vet"
)
var (
- verbose = flag.Bool("v", false, "verbose")
- testFlag = flag.Bool("test", false, "for testing only: sets -all and -shadow")
- tags = flag.String("tags", "", "comma-separated list of build tags to apply when parsing")
- tagList = []string{} // exploded version of tags flag; set in main
+ verbose = flag.Bool("v", false, "verbose")
+ tags = flag.String("tags", "", "comma-separated list of build tags to apply when parsing")
+ tagList = []string{} // exploded version of tags flag; set in main
)
var exitCode = 0
// vet tells whether to report errors for the named check, a flag name.
func vet(name string) bool {
- if *testFlag {
- return true
- }
return report[name].isTrue()
}
}
name = strings.ToLower(name)
if name[len(name)-1] == 'f' {
- printfList[name] = skip
+ isFormattedPrint[name] = true
} else {
printList[name] = skip
}
}
}
-// printfList records the formatted-print functions. The value is the location
-// of the format parameter. Names are lower-cased so the lookup is
-// case insensitive.
-var printfList = map[string]int{
- "errorf": 0,
- "fatalf": 0,
- "fprintf": 1,
- "logf": 0,
- "panicf": 0,
- "printf": 0,
- "sprintf": 0,
+// isFormattedPrint records the formatted-print functions. Names are
+// lower-cased so the lookup is case insensitive.
+var isFormattedPrint = map[string]bool{
+ "errorf": true,
+ "fatalf": true,
+ "fprintf": true,
+ "logf": true,
+ "panicf": true,
+ "printf": true,
+ "sprintf": true,
}
// printList records the unformatted-print functions. The value is the location
"sprint": 0, "sprintln": 0,
}
+// signature returns the types.Signature of a call. If it is unable to
+// identify the call's signature, it can return nil.
+func signature(f *File, call *ast.CallExpr) *types.Signature {
+ typ := f.pkg.types[call.Fun].Type
+ if typ == nil {
+ return nil
+ }
+ sig, _ := typ.(*types.Signature)
+ return sig
+}
+
+// formatIndex returns the index of the format string parameter within
+// a signature. If it cannot find any format string parameter, it
+// returns -1.
+func formatIndex(sig *types.Signature) int {
+ if sig == nil {
+ return -1
+ }
+ idx := -1
+ for i := 0; i < sig.Params().Len(); i++ {
+ p := sig.Params().At(i)
+ if typ, ok := p.Type().(*types.Basic); ok && typ.Kind() == types.String {
+ idx = i
+ }
+ }
+ return idx
+}
+
// checkCall triggers the print-specific checks if the call invokes a print function.
func checkFmtPrintfCall(f *File, node ast.Node) {
if d, ok := node.(*ast.FuncDecl); ok && isStringer(f, d) {
}
name := strings.ToLower(Name)
- if skip, ok := printfList[name]; ok {
- f.checkPrintf(call, Name, skip)
+ if _, ok := isFormattedPrint[name]; ok {
+ f.checkPrintf(call, Name)
return
}
if skip, ok := printList[name]; ok {
// checkPrintf checks a call to a formatted print routine such as Printf.
// call.Args[formatIndex] is (well, should be) the format argument.
-func (f *File) checkPrintf(call *ast.CallExpr, name string, formatIndex int) {
- if formatIndex >= len(call.Args) {
+func (f *File) checkPrintf(call *ast.CallExpr, name string) {
+ idx := formatIndex(signature(f, call))
+
+ if idx < 0 {
+ f.Badf(call.Pos(), "no formatting directive in %s call", name)
+ return
+ }
+
+ if idx >= len(call.Args) {
f.Bad(call.Pos(), "too few arguments in call to", name)
return
}
- lit := f.pkg.types[call.Args[formatIndex]].Value
+
+ lit := f.pkg.types[call.Args[idx]].Value
if lit == nil {
if *verbose {
f.Warn(call.Pos(), "can't check non-constant format in call to", name)
return
}
format := constant.StringVal(lit)
- firstArg := formatIndex + 1 // Arguments are immediately after format string.
+ firstArg := idx + 1 // Arguments are immediately after format string.
if !strings.Contains(format, "%") {
if len(call.Args) > firstArg {
f.Badf(call.Pos(), "no formatting directive in %s call", name)
Log(3) // OK
Log("%d", 3) // ERROR "possible formatting directive in Log call"
Logf("%d", 3)
- Logf("%d", "hi") // ERROR "arg .hi. for printf verb %d of wrong type: untyped string"
+ Logf("%d", "hi") // ERROR "arg .hi. for printf verb %d of wrong type: string"
+ Errorf(1, "%d", 3) // OK
+ Errorf(1, "%d", "hi") // ERROR "arg .hi. for printf verb %d of wrong type: string"
}
// A function we use as a function value; it has no other purpose.
panic("don't call - testing only")
}
+// Logf is used by the test so we must declare it.
+func Logf(format string, args ...interface{}) {
+ panic("don't call - testing only")
+}
+
// printf is used by the test so we must declare it.
func printf(format string, args ...interface{}) {
panic("don't call - testing only")
}
+// Errorf is used by the test for a case in which the first parameter
+// is not a format string.
+func Errorf(i int, format string, args ...interface{}) {
+ panic("don't call - testing only")
+}
+
// multi is used by the test.
func multi() []interface{} {
panic("don't call - testing only")
flags := []string{
"./" + binary,
"-printfuncs=Warn:1,Warnf:1",
- "-test", // TODO: Delete once -shadow is part of -all.
+ "-all",
+ "-shadow",
}
cmd := exec.Command(errchk, append(flags, files...)...)
if !run(cmd, t) {
which in turn was largely transliterated from the Plan 9 version
written in C and documented at
- http://plan9.bell-labs.com/magic/man2html/1/yacc
+ https://9p.io/magic/man2html/1/yacc
Adepts of the original yacc will have no trouble adapting to this
form of the tool.
length := br.ReadBits(5)
for j := range lengths {
for {
+ if length < 1 || length > 20 {
+ return StructuralError("Huffman length out of range")
+ }
if !br.ReadBit() {
break
}
length++
}
}
- if length < 0 || length > 20 {
- return StructuralError("Huffman length out of range")
- }
lengths[j] = uint8(length)
}
huffmanTrees[i], err = newHuffmanTree(lengths)
import (
"bytes"
- "encoding/base64"
"encoding/hex"
+ "fmt"
"io"
"io/ioutil"
"testing"
)
-func TestBitReader(t *testing.T) {
- buf := bytes.NewReader([]byte{0xaa})
- br := newBitReader(buf)
- if n := br.ReadBits(1); n != 1 {
- t.Errorf("read 1 wrong")
- }
- if n := br.ReadBits(1); n != 0 {
- t.Errorf("read 2 wrong")
- }
- if n := br.ReadBits(1); n != 1 {
- t.Errorf("read 3 wrong")
- }
- if n := br.ReadBits(1); n != 0 {
- t.Errorf("read 4 wrong")
- }
-}
-
-func TestBitReaderLarge(t *testing.T) {
- buf := bytes.NewReader([]byte{0x12, 0x34, 0x56, 0x78})
- br := newBitReader(buf)
- if n := br.ReadBits(32); n != 0x12345678 {
- t.Errorf("got: %x want: %x", n, 0x12345678)
- }
-}
-
-func readerFromHex(s string) io.Reader {
- data, err := hex.DecodeString(s)
+func mustDecodeHex(s string) []byte {
+ b, err := hex.DecodeString(s)
if err != nil {
- panic("readerFromHex: bad input")
- }
- return bytes.NewReader(data)
-}
-
-func decompressHex(s string) (out []byte, err error) {
- r := NewReader(readerFromHex(s))
- return ioutil.ReadAll(r)
-}
-
-func TestHelloWorldBZ2(t *testing.T) {
- out, err := decompressHex(helloWorldBZ2Hex)
- if err != nil {
- t.Errorf("error from Read: %s", err)
- return
- }
-
- if !bytes.Equal(helloWorld, out) {
- t.Errorf("got %x, want %x", out, helloWorld)
- }
-}
-
-func TestConcat(t *testing.T) {
- out, err := decompressHex(helloWorldBZ2Hex + helloWorldBZ2Hex)
- if err != nil {
- t.Errorf("error from Read: %s", err)
- return
- }
-
- hello2 := bytes.Repeat(helloWorld, 2)
- if !bytes.Equal(hello2, out) {
- t.Errorf("got %x, want %x", out, hello2)
+ panic(err)
}
+ return b
}
-func testZeros(t *testing.T, inHex string, n int) {
- out, err := decompressHex(inHex)
+func mustLoadFile(f string) []byte {
+ b, err := ioutil.ReadFile(f)
if err != nil {
- t.Errorf("error from Read: %s", err)
- return
- }
-
- expected := make([]byte, n)
-
- if !bytes.Equal(expected, out) {
- allZeros := true
- for _, b := range out {
- if b != 0 {
- allZeros = false
- break
+ panic(err)
+ }
+ return b
+}
+
+func trim(b []byte) string {
+ const limit = 1024
+ if len(b) < limit {
+ return fmt.Sprintf("%q", b)
+ }
+ return fmt.Sprintf("%q...", b[:limit])
+}
+
+func TestReader(t *testing.T) {
+ var vectors = []struct {
+ desc string
+ input []byte
+ output []byte
+ fail bool
+ }{{
+ desc: "hello world",
+ input: mustDecodeHex("" +
+ "425a68393141592653594eece83600000251800010400006449080200031064c" +
+ "4101a7a9a580bb9431f8bb9229c28482776741b0",
+ ),
+ output: []byte("hello world\n"),
+ }, {
+ desc: "concatenated files",
+ input: mustDecodeHex("" +
+ "425a68393141592653594eece83600000251800010400006449080200031064c" +
+ "4101a7a9a580bb9431f8bb9229c28482776741b0425a68393141592653594eec" +
+ "e83600000251800010400006449080200031064c4101a7a9a580bb9431f8bb92" +
+ "29c28482776741b0",
+ ),
+ output: []byte("hello world\nhello world\n"),
+ }, {
+ desc: "32B zeros",
+ input: mustDecodeHex("" +
+ "425a6839314159265359b5aa5098000000600040000004200021008283177245" +
+ "385090b5aa5098",
+ ),
+ output: make([]byte, 32),
+ }, {
+ desc: "1MiB zeros",
+ input: mustDecodeHex("" +
+ "425a683931415926535938571ce50008084000c0040008200030cc0529a60806" +
+ "c4201e2ee48a70a12070ae39ca",
+ ),
+ output: make([]byte, 1<<20),
+ }, {
+ desc: "random data",
+ input: mustLoadFile("testdata/pass-random1.bz2"),
+ output: mustLoadFile("testdata/pass-random1.bin"),
+ }, {
+ desc: "random data - full symbol range",
+ input: mustLoadFile("testdata/pass-random2.bz2"),
+ output: mustLoadFile("testdata/pass-random2.bin"),
+ }, {
+ desc: "random data - uses RLE1 stage",
+ input: mustDecodeHex("" +
+ "425a6839314159265359d992d0f60000137dfe84020310091c1e280e100e0428" +
+ "01099210094806c0110002e70806402000546034000034000000f28300000320" +
+ "00d3403264049270eb7a9280d308ca06ad28f6981bee1bf8160727c7364510d7" +
+ "3a1e123083421b63f031f63993a0f40051fbf177245385090d992d0f60",
+ ),
+ output: mustDecodeHex("" +
+ "92d5652616ac444a4a04af1a8a3964aca0450d43d6cf233bd03233f4ba92f871" +
+ "9e6c2a2bd4f5f88db07ecd0da3a33b263483db9b2c158786ad6363be35d17335" +
+ "ba",
+ ),
+ }, {
+ desc: "1MiB sawtooth",
+ input: mustLoadFile("testdata/pass-sawtooth.bz2"),
+ output: func() []byte {
+ b := make([]byte, 1<<20)
+ for i := range b {
+ b[i] = byte(i)
+ }
+ return b
+ }(),
+ }, {
+ desc: "RLE2 buffer overrun - issue 5747",
+ input: mustLoadFile("testdata/fail-issue5747.bz2"),
+ fail: true,
+ }, {
+ desc: "out-of-range selector - issue 8363",
+ input: mustDecodeHex("" +
+ "425a68393141592653594eece83600000251800010400006449080200031064c" +
+ "4101a7a9a580bb943117724538509000000000",
+ ),
+ fail: true,
+ }, {
+ desc: "bad block size - issue 13941",
+ input: mustDecodeHex("" +
+ "425a683131415926535936dc55330063ffc0006000200020a40830008b0008b8" +
+ "bb9229c28481b6e2a998",
+ ),
+ fail: true,
+ }, {
+ desc: "bad huffman delta",
+ input: mustDecodeHex("" +
+ "425a6836314159265359b1f7404b000000400040002000217d184682ee48a70a" +
+ "12163ee80960",
+ ),
+ fail: true,
+ }}
+
+ for i, v := range vectors {
+ rd := NewReader(bytes.NewReader(v.input))
+ buf, err := ioutil.ReadAll(rd)
+
+ if fail := bool(err != nil); fail != v.fail {
+ if fail {
+ t.Errorf("test %d (%s), unexpected failure: %v", i, v.desc, err)
+ } else {
+ t.Errorf("test %d (%s), unexpected success", i, v.desc)
}
}
- t.Errorf("incorrect result, got %d bytes (allZeros: %t)", len(out), allZeros)
+ if !v.fail && !bytes.Equal(buf, v.output) {
+ t.Errorf("test %d (%s), output mismatch:\ngot %s\nwant %s", i, v.desc, trim(buf), trim(v.output))
+ }
}
}
-func Test32Zeros(t *testing.T) {
- testZeros(t, thirtyTwoZerosBZ2Hex, 32)
-}
-
-func Test1MBZeros(t *testing.T) {
- testZeros(t, oneMBZerosBZ2Hex, 1024*1024)
-}
-
-func testRandomData(t *testing.T, compressedHex, uncompressedHex string) {
- out, err := decompressHex(compressedHex)
- if err != nil {
- t.Errorf("error from Read: %s", err)
- return
- }
-
- expected, _ := hex.DecodeString(uncompressedHex)
-
- if !bytes.Equal(out, expected) {
- t.Errorf("incorrect result\ngot: %x\nwant: %x", out, expected)
+func TestBitReader(t *testing.T) {
+ var vectors = []struct {
+ nbits uint // Number of bits to read
+ value int // Expected output value (0 for error)
+ fail bool // Expected operation failure?
+ }{
+ {nbits: 1, value: 1},
+ {nbits: 1, value: 0},
+ {nbits: 1, value: 1},
+ {nbits: 5, value: 11},
+ {nbits: 32, value: 0x12345678},
+ {nbits: 15, value: 14495},
+ {nbits: 3, value: 6},
+ {nbits: 6, value: 13},
+ {nbits: 1, fail: true},
+ }
+
+ rd := bytes.NewReader([]byte{0xab, 0x12, 0x34, 0x56, 0x78, 0x71, 0x3f, 0x8d})
+ br := newBitReader(rd)
+ for i, v := range vectors {
+ val := br.ReadBits(v.nbits)
+ if fail := bool(br.err != nil); fail != v.fail {
+ if fail {
+ t.Errorf("test %d, unexpected failure: ReadBits(%d) = %v", i, v.nbits, br.err)
+ } else {
+ t.Errorf("test %d, unexpected success: ReadBits(%d) = nil", i, v.nbits)
+ }
+ }
+ if !v.fail && val != v.value {
+ t.Errorf("test %d, mismatching value: ReadBits(%d) = %d, want %d", i, v.nbits, val, v.value)
+ }
}
}
-func TestRandomData1(t *testing.T) {
- testRandomData(t, randBZ2Hex, randHex)
-}
-
-func TestRandomData2(t *testing.T) {
- // This test involves several repeated bytes in the output, but they
- // should trigger RLE decoding.
- testRandomData(t, rand2BZ2Hex, rand2Hex)
-}
-
-func TestRandomData3(t *testing.T) {
- // This test uses the full range of symbols.
- testRandomData(t, rand3BZ2Hex, rand3Hex)
-}
-
-func Test1MBSawtooth(t *testing.T) {
- out, err := decompressHex(oneMBSawtoothBZ2Hex)
- if err != nil {
- t.Errorf("error from Read: %s", err)
- return
- }
-
- expected := make([]byte, 1024*1024)
-
- for i := range expected {
- expected[i] = byte(i)
+func TestMTF(t *testing.T) {
+ var vectors = []struct {
+ idx int // Input index
+ sym uint8 // Expected output symbol
+ }{
+ {idx: 1, sym: 1}, // [1 0 2 3 4]
+ {idx: 0, sym: 1}, // [1 0 2 3 4]
+ {idx: 1, sym: 0}, // [0 1 2 3 4]
+ {idx: 4, sym: 4}, // [4 0 1 2 3]
+ {idx: 1, sym: 0}, // [0 4 1 2 3]
}
- if !bytes.Equal(out, expected) {
- t.Error("incorrect result")
+ mtf := newMTFDecoderWithRange(5)
+ for i, v := range vectors {
+ sym := mtf.Decode(v.idx)
+ t.Log(mtf)
+ if sym != v.sym {
+ t.Errorf("test %d, symbol mismatch: Decode(%d) = %d, want %d", i, v.idx, sym, v.sym)
+ }
}
}
-const helloWorldBZ2Hex = "425a68393141592653594eece83600000251800010400006449080200031064c4101a7a9a580bb9431f8bb9229c28482776741b0"
-
-var helloWorld = []byte("hello world\n")
-
-const thirtyTwoZerosBZ2Hex = "425a6839314159265359b5aa5098000000600040000004200021008283177245385090b5aa5098"
-const oneMBZerosBZ2Hex = "425a683931415926535938571ce50008084000c0040008200030cc0529a60806c4201e2ee48a70a12070ae39ca"
-
-const randBZ2Hex = "425a6839314159265359905d990d0001957fffffffffffafffffffffffffffffbfff6fffdfffffffffffffffffffffffffffffc002b6dd75676ed5b77720098320d11a64626981323d4da47a83131a13d09e8040f534cd4f4d27a464d193008cd09804601347a980026350c9886234d36864193d1351b44c136919e90340d26127a4cd264c32023009898981310c0344c340027a8303427a99a04c00003534c230d034f5006468d268cf54d36a3009a69a62626261311b40026013d34201a6934c9a604c98ca6c8460989fa9346234d30d3469a2604fd4131a7aa6d0046043d4c62098479269e89e835190d018d4c046001a11e801a0264792321932308c43a130688c260d46686804cd01a9e80981193684c6a68c00000004c4c20c04627a4c0000260003400d04c0681a01334026009a6f48041466132581ec5212b081d96b0effc16543e2228b052fcd30f2567ee8d970e0f10aabca68dd8270591c376cfc1baae0dba00aaff2d6caf6b211322c997cc18eaee5927f75185336bf907021324c71626c1dd20e22b9b0977f05d0f901eaa51db9fbaf7c603b4c87bc82890e6dd7e61d0079e27ec050dd788fd958152061cd01e222f9547cb9efc465d775b6fc98bac7d387bffd151ae09dadf19494f7a638e2eae58e550faba5fe6820ea520eb986096de4e527d80def3ba625e71fbefdcf7e7844e0a25d29b52dcd1344fca083737d42692aab38d230485f3c8ed54c2ed31f15cf0270c8143765b10b92157233fa1dfe0d7ce8ffe70b8b8f7250071701dfe9f1c94de362c9031455951c93eb098a6b50ee45c6131fefc3b6f9643e21f4adc59497138e246f5c57d834aa67c4f10d8bd8b3908d8130dd7388409c299a268eab3664fa4907c5c31574874bd8d388a4ab22b339660804e53e1b8d05867d40e3082560608d35d5d2c6054e8bab23da28f61f83efd41d25529ad6ea15fb50505cacfabb0902166427354ca3830a2c8415f21b19e592690fbe447020d685a4bcd16ecc4ff1a1c0e572627d0ef6265c008a43fc243240541061ed7840606be466d1c0dac2c53250ed567507d926c844154560d631960c65e15157829b2c7f16859f111a3a8cb72bf24ffa57a680c3be67b1be67c8dd8aea73ac2437a78df5b686d427080ebc01bd30b71a49f6ea31dc0f08e4849e38face96717690239538bc08b6cc5aa8d467cb9c36aa83d40ac7e58bddbfa185b22065e89a86c0145569d9e23726651aec49e31588d70f40fe9a4449dcf4f89eac220171e9c938e803dc195679651004b79ad33cc0c13aeeba5941b33ffeeb8fbe16e76c7811445c67b4269c90479433ddf9e8ed1d00c166b6c17217fb22c3ef1b0c1c7e28e185446a111c37f1ea6c07a59fbcc6546ecc6968d36ba58bc5489a5640647e426b0c39350cb6f07d5dc7a717648c4ec7f841467597ae1f65f408fd2d9940a4b1b860b3c9ae351dcae0b4425f7e8538710f2e40b7f70d13b51ac05ccc6ecda8264a88cad2d721d18132a9b9110a9e759c2483c77dcefc7e464ec88588174cb0c9abff93230ea0bed8decdd8ed8bfe2b5df0a253803678df04fab44c03b9ab7cc97d6e6d6fd0c4c840ce0efc498436f453bbb181603459471f2b588724592b222ec990614db530e10cadd84705621cfdd9261fa44a5f5806a2d74b575056b3c915255c65678f9c16e6dc00a99180fef1a840aff0e842ac02731080cc92782538360a60a727991013984da4fad95f79d5030677b7528d076b2483685fca4429edf804682fdc110dfc2f7c30e23e20a72e039108a0ad6fdee2f76985a4b4be4f5afc6101bf9d5042b657a05dc914e1424241766434"
-const randHex = "c95138082bdf2b9bfa5b1072b23f729735d42c785eeb94320fb14c265b9c2ca421d01a3db986df1ac2acde5a0e6bf955d6f95e61261540905928e195f1a66644cc7f37281744fff4dc6df35566a494c41a8167151950eb74f5fc45f85ad0e5ed28b49adfe218aa7ec1707e8e1d55825f61f72beda3b4c006b8c9188d7336a5d875329b1b58c27cc4e89ecbae02c7712400c39dd131d2c6de82e2863da51d472bdfb21ecce62cc9cf769ed28aedc7583d755da45a0d90874bda269dd53283a9bdfd05f95fc8e9a304bb338ea1a2111894678c18134f17d31a15d9bfc1237894650f3e715e2548639ecbddb845cfe4a46a7b3a3c540f48629488e8c869f1e9f3f4c552243a8105b20eb8e264994214349dae83b165fd6c2a5b8e83fce09fc0a80d3281c8d53a9a08095bd19cbc1388df23975646ed259e003d39261ee68cbece8bcf32971f7fe7e588e8ba8f5e8597909abaea693836a79a1964050ed910a45a0f13a58cd2d3ae18992c5b23082407fd920d0bf01e33118a017bb5e39f44931346845af52128f7965206759433a346034ea481671f501280067567619f5ecef6cded077f92ed7f3b3ce8e308c80f34ba06939e9303f91b4318c8c1dd4cc223c1f057ac0c91211c629cd30e46ee9ec1d9fd493086b7bc2bc83e33f08749a5d430b0ed4f79d70f481940c9b0930b16321886a0df4fa5a1465d5208c7d3494a7987d9a5e42aa256f0c9523947f8318d0ef0af3d59a45cfc2418d0785c9a548b32b81e7de18be7d55a69a4c156bbb3d7579c0ac8e9c72b24646e54b0d0e8725f8f49fb44ae3c6b9d0287be118586255a90a4a83483ed0328518037e52aa959c5748ed83e13023e532306be98b8288da306bbb040bcf5d92176f84a9306dc6b274b040370b61d71fde58dd6d20e6fee348eae0c54bd0a5a487b2d005f329794f2a902c296af0a4c1f638f63292a1fa18e006c1b1838636f4de71c73635b25660d32e88a0917e1a5677f6a02ca65585b82cbd99fb4badbfa97a585da1e6cadf6737b4ec6ca33f245d66ee6a9fae6785d69b003c17b9fc6ec34fe5824ab8caae5e8e14dc6f9e116e7bf4a60c04388783c8ae929e1b46b3ef3bbe81b38f2fa6da771bf39dfba2374d3d2ed356b8e2c42081d885a91a3afb2f31986d2f9873354c48cf5448492c32e62385af423aa4f83db6d1b2669650379a1134b0a04cbca0862d6f9743c791cbb527d36cd5d1f0fc7f503831c8bd1b7a0ef8ae1a5ed1155dfdd9e32b6bb33138112d3d476b802179cb85a2a6c354ccfed2f31604fbd8d6ec4baf9f1c8454f72c6588c06a7df3178c43a6970bfa02dd6f74cb5ec3b63f9eddaa17db5cbf27fac6de8e57c384afd0954179f7b5690c3bee42abc4fa79b4b12101a9cf5f0b9aecdda945def0bd04163237247d3539850e123fe18139f316fa0256d5bd2faa8"
-
-const oneMBSawtoothBZ2Hex = "425a683931415926535971931ea00006ddffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe007de00000000000000024c00130001300000000000000000000000000000000000000000000000000000000126000980009800000000000000000000000000000000000000000000000000000000930004c0004c000000000000000000000000000000000000000000000000000000004980026000260000000000000000000000000000000000000000000000000000000009aaaaa0000000000000000000000000000000000000000000000000000000000000000498002600026000000000000000000000000000000000000000000000000000000007fc42271980d044c0a822607411304a08982d044c1a82260f411308a08984d044c2a82261741130ca08986d044c3a82261f411310a08988d044c4a822627411314a0898ad044c5a82262f411318a0898cd044c6a82263741131ca0898ed044c7a82263f411320a08990d044c8a822647411324a08992d044c9a82264f411328a08994d044caa82265741132ca08996d044cba82265f411330a08998d044cca822667411334a0899ad044cda82266f411338a0899cd044cea82267741133ca0899ed044cfa82267f411340a089a0d044d0a822687411344a089a2d044d1a82268f411348a089a4d044d2a82269741134ca089a6d044d3a82269f411350a089a8d044d4a8226a7411354a089aad044d5a8226af411358a089acd044d6a8226b741135ca089aed044d7a8226bf411360a089b0d044d8a8226c7411364a089b2d044d9a8226cf411368a089b4d044daa8226d741136ca089b6d044dba8226df411370a089b8d044dca8226e7411374a089bad044dda8226ef411378a089bcd044dea8226f741137ca089bed044dfa8226ff411380a089c0d044e0a822707411384a089c2d044e1a82270f411388a089c4d044e2a82271741138ca089c59089c69089c71089c79089c81089c89089c91089c99089ca1089ca9089cb1089cb9089cc1089cc9089cd1089cd9089ce1089ce9089cf1089cf9089d01089d09089d11089d19089d21089d29089d31089d39089d41089d49089d51089d59089d61089d69089d71089d79089d81089d89089d91089d99089da1089da9089db1089db9089dc1089dc9089dd1089dd9089de1089de9089df1089df9089e01089e09089e11089e19089e21089e29089e31089e39089e41089e49089e51089e59089e61089e69089e71089e79089e81089e89089e91089e99089ea1089ea9089eb1089eb9089ec1089ec9089ed1089ed9089ee1089ee9089ef1089ef9089f01089f09089f11089f19089f21089f29089f31089f39089f41089f49089f51089f59089f61089f69089f71089f79089f81089f89089f91089f99089fa1089fa9089fb1089fb9089fc1089fc9089fd1089fd9089fe1089fe9089ff1089ff98a0ac9329acf23ba884804fdd3ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0034f800000000000024c00130001300000000000000000000000000000000000000000000000000000000126000980009800000000000000000000000000000000000000000000000000000000930004c0004c000000000000000000000000000000000000000000000000000000004980026000260000000000000000000000000000000000000000000000000000000024c0013000130000000000000000000000000000000000000000000000000000000002955540000000000000000000000000000000000000000000000000000000000000001ff108c00846024230221181908c108460a4230621183908c20846124230a21185908c308461a4230e21187908c40846224231221189908c508462a423162118b908c60846324231a2118d908c708463a4231e2118f908c80846424232221191908c908464a4232621193908ca0846524232a21195908cb08465a4232e21197908cc0846624233221199908cd08466a423362119b908ce0846724233a2119d908cf08467a4233e2119f908d008468242342211a1908d108468a42346211a3908d20846924234a211a5908d308469a4234e211a7908d40846a242352211a9908d50846aa42356211ab908d60846b24235a211ad908d70846ba4235e211af908d80846c242362211b1908d90846ca42366211b3908da0846d24236a211b5908db0846da4236e211b7908dc0846e242372211b9908dd0846ea42376211bb908de0846f24237a211bd908df0846fa4237e211bf908e008470242382211c1908e108470a42386211c3908e20847124238a211c5908e2f8c211c6c8471d211c7c84721211c8c84725211c9c84729211cac8472d211cbc84731211ccc84735211cdc84739211cec8473d211cfc84741211d0c84745211d1c84749211d2c8474d211d3c84751211d4c84755211d5c84759211d6c8475d211d7c84761211d8c84765211d9c84769211dac8476d211dbc84771211dcc84775211ddc84779211dec8477d211dfc84781211e0c84785211e1c84789211e2c8478d211e3c84791211e4c84795211e5c84799211e6c8479d211e7c847a1211e8c847a5211e9c847a9211eac847ad211ebc847b1211ecc847b5211edc847b9211eec847bd211efc847c1211f0c847c5211f1c847c9211f2c847cd211f3c847d1211f4c847d5211f5c847d9211f6c847dd211f7c847e1211f8c847e5211f9c847e9211fac847ed211fbc847f1211fcc847f5211fdc847f9211fec847fd211ff8bb9229c284803a8b6248"
-
-const rand2BZ2Hex = "425a6839314159265359d992d0f60000137dfe84020310091c1e280e100e042801099210094806c0110002e70806402000546034000034000000f2830000032000d3403264049270eb7a9280d308ca06ad28f6981bee1bf8160727c7364510d73a1e123083421b63f031f63993a0f40051fbf177245385090d992d0f60"
-const rand2Hex = "92d5652616ac444a4a04af1a8a3964aca0450d43d6cf233bd03233f4ba92f8719e6c2a2bd4f5f88db07ecd0da3a33b263483db9b2c158786ad6363be35d17335ba"
-
-const rand3BZ2Hex = "425a68393141592653593be669d00000327ffffffffffffffffffffffffffffffffffff7ffffffffffffffffffffffffffffffc002b3b2b1b6e2bae400004c00132300004c0d268c004c08c0130026001a008683234c0684c34008c230261a04c0260064d07a8d00034000d27a1268c9931a8d327a3427a41faa69ea0da264c1a34219326869b51b49a6469a3268c689fa53269a62794687a9a68f5189994c9e487a8f534fd49a3d34043629e8c93d04da4f4648d30d4f44d3234c4d3023d0840680984d309934c234d3131a000640984f536a6132601300130130c8d00d04d1841ea7a8d31a02609b40023460010c01a34d4c1a0d04d3069306810034d0d0d4c0046130d034d0131a9a64d321804c68003400098344c13000991808c0001a00000000098004d3d4da4604c47a13012140aadf8d673c922c607ef6212a8c0403adea4b28aee578900e653b9cdeb8d11e6b838815f3ebaad5a01c5408d84a332170aff8734d4e06612d3c2889f31925fb89e33561f5100ae89b1f7047102e729373d3667e58d73aaa80fa7be368a1cc2dadd81d81ec8e1b504bd772ca31d03649269b01ceddaca07bf3d4eba24de141be3f86f93601e03714c0f64654671684f9f9528626fd4e1b76753dc0c54b842486b8d59d8ab314e86ca818e7a1f079463cbbd70d9b79b283c7edc419406311022e4be98c2c1374df9cdde2d008ce1d00e5f06ad1024baf555631f70831fc1023034e62be7c4bcb648caf276963ffa20e96bb50377fe1c113da0db4625b50741c35a058edb009c6ee5dbf93b8a6b060eec568180e8db791b82aab96cbf4326ca98361461379425ba8dcc347be670bdba7641883e5526ae3d833f6e9cb9bac9557747c79e206151072f7f0071dff3880411846f66bf4075c7462f302b53cb3400a74cf35652ad5641ed33572fd54e7ed7f85f58a0acba89327e7c6be5c58cb71528b99df2431f1d0358f8d28d81d95292da631fb06701decabb205fac59ff0fb1df536afc681eece6ea658c4d9eaa45f1342aa1ff70bdaff2ddaf25ec88c22f12829a0553db1ec2505554cb17d7b282e213a5a2aa30431ded2bce665bb199d023840832fedb2c0c350a27291407ff77440792872137df281592e82076a05c64c345ffb058c64f7f7c207ef78420b7010520610f17e302cc4dfcfaef72a0ed091aab4b541eb0531bbe941ca2f792bf7b31ca6162882b68054a8470115bc2c19f2df2023f7800432b39b04d3a304e8085ba3f1f0ca5b1ba4d38d339e6084de979cdea6d0e244c6c9fa0366bd890621e3d30846f5e8497e21597b8f29bbf52c961a485dfbea647600da0fc1f25ce4d203a8352ece310c39073525044e7ac46acf2ed9120bae1b4f6f02364abfe343f80b290983160c103557af1c68416480d024cc31b6c06cfec011456f1e95c420a12b48b1c3fe220c2879a982fb099948ac440db844b9a112a5188c7783fd3b19593290785f908d95c9db4b280bafe89c1313aeec24772046d9bc089645f0d182a21184e143823c5f52de50e5d7e98d3d7ab56f5413bbccd1415c9bcff707def475b643fb7f29842582104d4cc1dbaaca8f10a2f44273c339e0984f2b1e06ab2f0771db01fafa8142298345f3196f23e5847bda024034b6f59b11c29e981c881456e40d211929fd4f766200258aad8212016322bd5c605790dcfdf1bd2a93d99c9b8f498722d311d7eae7ff420496a31804c55f4759a7b13aaaf5f7ce006c3a8a998897d5e0a504398c2b627852545baf440798bcc5cc049357cf3f17d9771e4528a1af3d77dc794a11346e1bdf5efe37a405b127b4c43b616d61fbc5dc914e14240ef99a7400"
-const rand3Hex = "1744b384d68c042371244e13500d4bfb98c6244e3d71a5b700224420b59c593553f33bd786e3d0ce31626f511bc985f59d1a88aa38ba8ad6218d306abee60dd9172540232b95be1af146c69e72e5fde667a090dc3f93bdc5c5af0ab80acdbaa7a505f628c59dc0247b31a439cacf5010a94376d71521df08c178b02fb96fdb1809144ea38c68536187c53201fea8631fb0a880b4451ccdca7cc61f6aafca21cc7449d920599db61789ac3b1e164b3390124f95022aeea39ccca3ec1053f4fa10de2978e2861ea58e477085c2220021a0927aa94c5d0006b5055abba340e4f9eba22e969978dfd18e278a8b89d877328ae34268bc0174cfe211954c0036f078025217d1269fac1932a03b05a0b616012271bbe1fb554171c7a59b196d8a4479f45a77931b5d97aaf6c0c673cbe597b79b96e2a0c1eae2e66e46ccc8c85798e23ffe972ebdaa3f6caea243c004e60321eb47cd79137d78fd0613be606feacc5b3637bdc96a89c13746db8cad886f3ccf912b2178c823bcac395f06d28080269bdca2debf3419c66c690fd1adcfbd53e32e79443d7a42511a84cb22ca94fffad9149275a075b2f8ae0b021dcde9bf62b102db920733b897560518b06e1ad7f4b03458493ddaa7f4fa2c1609f7a1735aeeb1b3e2cea3ab45fc376323cc91873b7e9c90d07c192e38d3f5dfc9bfab1fd821c854da9e607ea596c391c7ec4161c6c4493929a8176badaa5a5af7211c623f29643a937677d3df0da9266181b7c4da5dd40376db677fe8f4a1dc456adf6f33c1e37cec471dd318c2647644fe52f93707a77da7d1702380a80e14cc0fdce7bf2eed48a529090bae0388ee277ce6c7018c5fb00b88362554362205c641f0d0fab94fd5b8357b5ff08b207fee023709bc126ec90cfb17c006754638f8186aaeb1265e80be0c1189ec07d01d5f6f96cb9ce82744147d18490de7dc72862f42f024a16968891a356f5e7e0e695d8c933ba5b5e43ad4c4ade5399bc2cae9bb6189b7870d7f22956194d277f28b10e01c10c6ffe3e065f7e2d6d056aa790db5649ca84dc64c35566c0af1b68c32b5b7874aaa66467afa44f40e9a0846a07ae75360a641dd2acc69d93219b2891f190621511e62a27f5e4fbe641ece1fa234fc7e9a74f48d2a760d82160d9540f649256b169d1fed6fbefdc491126530f3cbad7913e19fbd7aa53b1e243fbf28d5f38c10ebd77c8b986775975cc1d619efb27cdcd733fa1ca36cffe9c0a33cc9f02463c91a886601fd349efee85ef1462065ef9bd2c8f533220ad93138b8382d5938103ab25b2d9af8ae106e1211eb9b18793fba033900c809c02cd6d17e2f3e6fc84dae873411f8e87c3f0a8f1765b7825d185ce3730f299c3028d4a62da9ee95c2b870fb70c79370d485f9d5d9acb78926d20444033d960524d2776dc31988ec7c0dbf23b9905d"
-
-const badBlockSize = "425a683131415926535936dc55330063ffc0006000200020a40830008b0008b8bb9229c28481b6e2a998"
-
-const (
- digits = iota
- twain
- random
+var (
+ digits = mustLoadFile("testdata/e.txt.bz2")
+ twain = mustLoadFile("testdata/Mark.Twain-Tom.Sawyer.txt.bz2")
+ random = mustLoadFile("testdata/random.data.bz2")
)
-var testfiles = []string{
- // Digits is the digits of the irrational number e. Its decimal representation
- // does not repeat, but there are only 10 possible digits, so it should be
- // reasonably compressible.
- digits: "testdata/e.txt.bz2",
- // Twain is Mark Twain's classic English novel.
- twain: "testdata/Mark.Twain-Tom.Sawyer.txt.bz2",
- // 16KB of random data from /dev/urandom
- random: "testdata/random.data.bz2",
-}
-
-func benchmarkDecode(b *testing.B, testfile int) {
- compressed, err := ioutil.ReadFile(testfiles[testfile])
- if err != nil {
- b.Fatal(err)
- }
-
+func benchmarkDecode(b *testing.B, compressed []byte) {
// Determine the uncompressed size of testfile.
uncompressedSize, err := io.Copy(ioutil.Discard, NewReader(bytes.NewReader(compressed)))
if err != nil {
func BenchmarkDecodeDigits(b *testing.B) { benchmarkDecode(b, digits) }
func BenchmarkDecodeTwain(b *testing.B) { benchmarkDecode(b, twain) }
func BenchmarkDecodeRand(b *testing.B) { benchmarkDecode(b, random) }
-
-func TestBufferOverrun(t *testing.T) {
- // Tests https://golang.org/issue/5747.
- buffer := bytes.NewReader([]byte(bufferOverrunBase64))
- decoder := base64.NewDecoder(base64.StdEncoding, buffer)
- decompressor := NewReader(decoder)
- // This shouldn't panic.
- ioutil.ReadAll(decompressor)
-}
-
-func TestOutOfRangeSelector(t *testing.T) {
- // Tests https://golang.org/issue/8363.
- buffer := bytes.NewReader(outOfRangeSelector)
- decompressor := NewReader(buffer)
- // This shouldn't panic.
- ioutil.ReadAll(decompressor)
-}
-
-func TestMTF(t *testing.T) {
- mtf := newMTFDecoderWithRange(5)
-
- // 0 1 2 3 4
- expect := byte(1)
- x := mtf.Decode(1)
- if x != expect {
- t.Errorf("expected %v, got %v", expect, x)
- }
-
- // 1 0 2 3 4
- x = mtf.Decode(0)
- if x != expect {
- t.Errorf("expected %v, got %v", expect, x)
- }
-
- // 1 0 2 3 4
- expect = byte(0)
- x = mtf.Decode(1)
- if x != expect {
- t.Errorf("expected %v, got %v", expect, x)
- }
-
- // 0 1 2 3 4
- expect = byte(4)
- x = mtf.Decode(4)
- if x != expect {
- t.Errorf("expected %v, got %v", expect, x)
- }
-
- // 4 0 1 2 3
- expect = byte(0)
- x = mtf.Decode(1)
- if x != expect {
- t.Errorf("expected %v, got %v", expect, x)
- }
-}
-
-func TestBadBlockSize(t *testing.T) {
- // Tests https://golang.org/issue/13941.
- _, err := decompressHex(badBlockSize)
- if err == nil {
- t.Errorf("unexpected success")
- }
-}
-
-var bufferOverrunBase64 string = `
-QlpoNTFBWSZTWTzyiGcACMP/////////////////////////////////3/7f3///
-////4N/fCZODak2Xo44GIHZgkGzDRbFAuwAAKoFV7T6AO6qwA6APb6s2rOoAkAAD
-oACUoDtndh0iQAPkAAAAaPWihQoCgr5t97Obju21ChQB0NBm3RbA7apXrRoBooAA
-AhA+IAHWl2Us3O7t9yieb3udvd76+4+fd33nd3HO1bVvfcGRne6+3vfPvfc++995
-w7k973eJhasLVec970tzDNXdX28LoPXZ3H3K9z0s5ufWAfes49d5594c3dUYtI+2
-+h1dvtpRa+uvrVEAG9bl893RVEN7cWvroSqWjPMGgAQi7Gq8TJSgKKdjKFBIB9Ae
-LqWxleu715eXe7ml9e5098Z6G1vr7t1QZ6ot76YzPd3j7333t2ql2Chm7XrA9ICQ
-VF77z3rVBWqkSXtlfb099hyezAr6USbGpICTSCFAaqHrKo+tUnm32rpE4Ue+t2mj
-bKUeipEqwc93EdhhTwmQpOhhesC9iqDSPNTWYNSnUtBdm1nsA0nqqNd7OWwDXtFL
-ONmmA6Ubke26I9UblvWIPR5VOWOnctai443URunnDy77uVC59OfRvezlDu33Z7Ly
-3NNuuHW63088xu3t3NHZhkZbG7tXRlj00qOtbaXTJUUdspTbABR9R6EUwQAEAAAA
-EMEwRpoAAAABMmhoAAjBNNAaCMhponpoGpgJpk9TEyp6niGKZkAaAEfqMQ09U80p
-+pMGSCKngIAAAAgAAg0AAJhGgABGCEaaTyTKeNI1PE0wkj01GajMSNPSZGnqbU9T
-anlPUNAHqGQ0DQAMg9TamgAAYRU/IAAICAmjQJgjQBMEwp5DTSaaYmhTeqfplPID
-U1T9TynoU82pT1NPU/VP0j1NHqRpk9TTR7SnqaNNGmmQAaAD1Aeo0PSAAAAaaBiK
-eBAQBGgIABGQA0AmBNNBoaAgaJmpglPEyYap6npiTT0agGjJjUaaDTQAAAAAAM1A
-9QAaAAAADU8iEAQAEyAJk0NNNJgIZTJ5E00YSemiaZNGm1MpGNJ+lPU9qm9U2RDM
-oY0EzJB6h6nqDID1NMBDDRpo1AGNAjCMmhkMgaYSJIgAAAQyAAEyBoATECCNhTT0
-U/IZAmCM1DSTxkzUE8p6NDaGiZGJqntTFHvUyU9qPQp7Kn5GgKNPU9QAGg9QAAA3
-wz0Pk/g/m/m9P9H4vxv2+dH3gCS8nhbbbbbYxtgNsBsG0m2MbG0NNtsbYNsaY0wb
-bBibGmm22mxptNpsaGNDTY02JsG0MY0xg2MaYNNDbGwG0L5vsK/F9DO+EAA447Kq
-p7Wdf6Y+5c20T7DfHyMXIzRKrZexw72uiQI+y55vOe52xpqbCLC2uR20JdER7Zvr
-7ufuKb6zhiBxLuj0eA27v8RpMLucw9Ohwcizi2wrpt+yU1FdpM7ZYPcwS3XTef+A
-Wzjxwhdrgw3aH1LeC1eZW900x8V9Nv4hTPXp4l067P/4ANVZFF/imOe/d5bdueam
-/DFFokQWnFaU+ZqLBCM+d0PialJQWnLqRQZk/KhfbbYc2pCUTgffcSYbrCM1N+8l
-HU6gSz+h2GJXs+tbrNviL83M97X0vcTn/F82P8wen8/3/h3sHY+sf9CSej9ThYTV
-3lQ+FUHpfpGD4kv7dYMV995dpDX/y3xR8FoXx1bjUxBTNxuutwQ/h/Eedn9wpn6w
-E3+ND8YhN1HSriIxRE/6uFyMv6/oC6Elarw3aHMMqHJkGiiz6tejmvnYLQa+Qm6G
-deZ7jXTZV6NlpocgDnRdimS06bTYSkvPAL/xoWNLkX6N6VljU0dfKSBmm2uZE/xu
-sutQ1EdP7GdjhglIq4xlOFUFEQpmX+xx7R8y6c0GSAaqusOjNZwxZRudOvmXm1tZ
-T+YnbeB2ir9eiHNrtJNSLD/J/WDyuQpwBUtLKo0krccY/wIILP7f86teb9Z/9oyz
-OX05qEWbObfhpRw+9+rCvp/35ML8KX3aHaI0n+tudbFRsV5FLW+Oa8ruLN4peyVL
-DWjTHrXNthq/s7zAJYMeFJZkZt5mT9rfpH+5g3nc+piOSZ+J5nHtOnKI7Ff8Xl+j
-0t76XTNucCHQ6whav1OHdF53TY5wuv5OzvrdnxoId8fTyUvERr0ERINu/8XxZZ5f
-B5/kTZ8bBO0wv54Jp+ED/GQI8lZHzIQCP3vfQhwnCTj9TvITic7P4mYLDbH3fyzR
-i+6EajCcpXLWSGf+ZXkOrWspDWDhXtEKas0v3UqWksqgY1rTj45krX4KihN+daXs
-pZl5WPlta5p06CX6Xm2SfzqkMw12/3ix1bpnnZ+kFeBNX7A+E9zzG6OZaN78GOpl
-9Ht/eZn9PqWdav852zr0zqkDK2H5IjdvNah+b1YVGdQGzwR4Nw+f13yEKnV+y66W
-djfq7zWp7m5w+hzfv+Ly8O7oet5Vvd8/wQvO7qzOZ2vjf9X8Tj8PnMb/nc/nKqRR
-+ml4UEhOOwfCeJEEI109CMYSh91iAJqPjMyH6KjrPD7W25llZVcREYNCTg6htbQt
-M38wYoquCWP6tdKYlVIv14xTNUeUf4El/FunCf6csZkmv+9tfWx7t59wuKIa3saU
-tZs9M+3HFOZtz3OLg/Unoaj9BYazYqA78xBU9tZzrtmF/rQL9CGJt90o/oYnSfcS
-SL3haaw351LXWQ1XOsv1SmH3v6ymuxEpPPnEDmBELaTYsvvMIWJsmPZFFww++Kd7
-s/Jo0JFeUU7uNtI+gVosAIpVVuWfI/9tOIycz7I5Z7zjV+NR2OuZbYtW5F08KX4o
-2k/xuJIchcNFPtxPfw9dkDgscRbMckyFMrzuZ3IvrcGzk0J6iI5ytrv37bGpAXMz
-WK9mMMPebepNevmLjjo/QWoM968Sjv7ldlPS5AinHcXwsFv6dmmh8lJt7UOJWoKu
-lMD1cB2ksIGpMdv8iuqR42Rn/kn+17BhhUZcwDBaUXVdX6bKW7fxlUYbq+mlqIcf
-a9v8HF87M9ANbi9bq9onf9TD7nQ6Xf6vZci8TBPX+/GI0He6j31fTVQYW+NsQxvO
-J8xrx+e58CCLQNjxeIyPt+F+qk/QMiXw+LyxGVkV/XcGQT9X03jSDP6beJ5QG1JW
-9Q3qLv/YixWI7gPV9Mrhf2oRYTc/9KLFRhkE3SjKOTKuSSBKQ24fI+hEznamH71D
-66Hwez8/0et7AtTv9zvamv2OD5He6fMV4k+ePl6+qPfO5CdHtK+eCDZL5+4f5yrl
-gTcRFiq8fXbc5IaI5fbbc1KMM/2T0Mr7+Hwaco6FtXm0fmhCgTZRqY4pKiEIfmaz
-QwHNOOCrtMJ2VwsyMumt7xsOolGnizRev6lILH43qPcczQM7Gc5zRin80YvFt1Qm
-h/57Z0auR2h0fuX50MBO4XQ+26y5l6v4j902R66c0j3z2KHstKQ04J/h6LbuNQE4
-D6cu/lyfK69DxxX8wb8XaQkMUcJdo1LzqUGDAb3Kfn/A3P/JYc99MO9qv67+SxWb
-wYTyqKdWTd+1KbR/Rcn0Io5zI/QquX7FA1bxfMytjQ/X+l0fh0Pf+Hx97meH4fQL
-7/T8/sdTm9Tn8nELvedyhydLlPPTScINdXyLIq9wgIJr4fWPbp9ZhFh/56fdSgOG
-HDXg+gkXsN2Rddr4HQ5P3u+RhLzmSjhzoqY5EsPC4QvRlX9JXjB84rPV5USR66qa
-/kjw4156GJnzoXtydKJE53t6PHfZWO+3ujsfI6iAdshc7OFzGXiZB9PtItKodhYq
-nABkTKdcpu4+TOpf9h5piX5slsaBjkeTnj/Ba02ilboQfcDVigxrYn/iTH5ySWUW
-/lHtg78s5UZM8sErwhNe3N3w+6ZOMnU+5i86/xFNtqZfDdXTGy1H3PzGbdtZXYT+
-Ixx2vpwBYzbPVYHxKosM5rPiVmcTllI9nuoSfeh9ib4foFWauOpvdmhBDqpTpKTX
-u8EO2l2Z195G2RIV7TlKSxGWjR5sl/nALu1uzBeLd9zpSujzMTd1uTX9Qk/Q1S+r
-vaW6bm8qqPO4jb6Wx6XIkm321nrIF6Ae25d1+Dpv/P5G4NoLd2j6/EtENC3FeR5z
-oo7bA+tI8yEQRhiF0z1FlJXLD5ZbhNNWQm/j/IbzRfh8JtOFZU7ruShLvHXysW9S
-9V909tr9jn8/E/Hb5N/1NVNHnZu2HIUvJvHJiHd2ucmeI9PWUMnppmE65GQ5E9xV
-ZRlGEH0X85EvmHyEupkMrCC0oMv9RCq+/H8gcfpe00Hs/S+regT5p58cyYomh93v
-qvuw/A06BE/wzJESuYbN9pqYpoXqXFemW1NksHEJ2w+PYMJ27WJyD5FpaXB85VaW
-qMOhDfO8E3QdH8ybyKt/UgI8/tDGpFbyOlaVdIv1FXJhoLp8soAA4Djg6/KZ066N
-ZFYuS8WdjpSZGP4/Lw+1yaXlzNznc/k2uHe2uXP3uFuPcHx+Dm44utxldoO1uBPy
-+jzOs14+MIgOjOHMVNqAbMd8fUedLlhJMCfMtm4uz01enLNKcMrtLlPIR37Yukh1
-YEMXYpm7eU4XU+j+Jj3pDyaXtXs+p1fWfTN/cy9/Oxs4umUXQ4uHh1kObtayDJ56
-/QMxiHobjHNKuKfMxsrYEwN+QVIyVjAwMDYuMjQ1AAA9IwJniiBLRkZDAAAXt0Ja
-aDQxQVkmU1lZtwytAACLf///////////////////+//////v//////////bv78//
-/+AXO133uwO2xB2UxIvbKXrCqCoURUBL2ytFI82AFdcOwMhVTHtk5rD3szEVNYD4
-aIQINCaMRoTaSn7SbSMJiYmEwieTEp+psqbMCp+VNPaFNpqbBNR7UmanlPUeKfqm
-j1PU0/VPU08o9Q9EeKHlPJtKbYqeTCYhN6U9T1NH6mp+lPyoGNTI/Knkyg1MggAg
-CaMEyQnqZoaaRtRtJpppppoDaTR6hpphGh6mmgHpMQBpkGTTEAAaAAAA00AZDag0
-ADIBkGgABqemiRNTI0k8aU0PRGRoAZlP0UAAAGgAAAyAADQaAAAaAAAAAAAAAAAA
-AaAAAAM0kgRBJ5MlPFP1Gj0jTTTUaekxNAbUGjTQMgaZANNAAAAaAADTQAAAAAAA
-ANAA0AAANADQ0QAAAAAAAAAaGgAAAAAAABoA0AAA0AAAAAAAAAAAAANAAAAAkSEI
-aTRpomp5DUxNNDTJPTKaep6T09Kemmo2JG0aTQ9ENogaaGhkABo0NHqaBoDTI0DC
-Gj0gNAMhoDQ9QMQNAGQAaDDwyMPIMlbG1vhRBTFo6JksSupgpAjPbY0ec02IGXjb
-eS+FBsh01+O4ZOaD+srUZCFaT4DRjVDLx7uKIsFtESIDUg1ZkhyCSYov05C00MtR
-BdNNa/AYPGOQZWcs+VegXOPrkushFbZ3mBoRD6WamClkpBaHZrUhUl02bIfRXX4w
-b3/9cW9nHDVxh2qFBxqgRKfmq7/Jc/tdJk05nVrGbckGVy2PnIy30CDhpWmqrSot
-K2bOnX0NbP1iy2cd0Na0ZmbRstm4MzMzbbMySTd35F7f+zPP8DC+NJLYcakkkkRd
-NZlupJt3OMFoDAD2g+N3FAMCydhIpoRHRQAdFI5nNg4ugEXHCYxkMyGCwtaJmial
-y0IMlpSYYM/weXNJAhFqS0GNmvaPEtYGjbvaucMdklOTmBX1vfVAkTYB1uXCSK64
-UNIixOqRKLuRCFtqIQtgwqaFrCkIYbbewErWABa+VGADWsJXJjfx5SJViLuwiGXq
-Ru6vCuwmU5CJiJz3UiBpmLv0r2wskxUhY4tzPVGQ9RMXJl65eLSNwZVwaSyGZ9Cm
-A3jztQUUpFeUryBTskW95iVwRMFrhBCwZBAFJBZvhMEMNoDJJlUoIhQkAkjbExp2
-YZio+ZYeAZUwmH1qUbdQixmxf0+61+aVgJ1hwxsO1yG3hFx4pfjc09ITVht0pG8u
-FtVFhPa1KE0gTRUSVXywkITucqk0Waz5Fs6qJpVHYdNrbYRFxnFsQGY1qmsTLjK6
-4QX5Rddo6krM/Bx9CqIAKq4CzVQYHrmIAd2EBhYmwVYwLvhzKIUrc2EirnGIvyuD
-O4YZDSwsVTA0BpVvUOjDErkCraBoSutcKwUSSLGhVvNYHLz3klgZD++wWsa/swLw
-gvNDY2De+sncOv8X2lq4HD95ZdwPuTIMXCwSbg4RrIqv+L0y6F17pqDecyQYPEj3
-iN/0BBeWZlJAyBMi5U3Q1zAlsK8IlDhaXGmvZrgISq5CfNjmUgxDeMggOKqxu4sI
-OrilS49Lkl1J3u3GjXTuH+rX+4ccyFAQnizCpPClcY77F59j63S6fr5vr+y99tuO
-7Ox7Wg/ljwhdyaK4xMmXczeJbx7x07htJNtC4xcQfAtvzeznLrN6MN/ILIBOI65I
-qIA2D5fHHj1XN4aN6TvOjWDaSbSWqxCSCvXUpzkNJAkWXAuTwF8k5uSJvQj/rVo0
-hAhEMEIYkCRGx9AX+byIuXWlLMbbVeliHNUL5AQYmNwLFu4SkmGD+UWtBMyVHQOQ
-ss0ggoVKSKOBUgnVS6ljt7WE1qXqJJ4QA1pEwYNLEaguEE1LtPNoVr5WzjbSbWPk
-V9OW3y9IneUDLoIV5pAkEFTEFGFVjeTFxtpzBBfGgycBxVCdz8eESBIzsamRchAa
-TQunQH8DHnpfod9QuAuRvc7JBlKUCYmCjMvynLcxIFohxCaYrDvGw4QbXZB7oWQ7
-hpoGlz23ayDfB8NrRRzdilsEQyQniu9ASLQg7RrGZnoTr1ai12IbCEUCGdFq03P5
-nBnRFAGmisQGcyykV9gKtcVMWLhCuVmXg86dndn7slUpRNSSEAU20oaWIm1maFTu
-E0DT4gTbg0nuhjtz3kNOz+i7sBm0bkXjxQWuLqlZEmp60ZTyRZJDUqKSEKg6hqcy
-ERxdU22CSNOO10RYUUiDVpKhPNdKTOIE1thp02sBNoNTFSht8WJtaBQ09qN3jd5r
-dOLX4IA5fevRyCCzDgRXfV4wzik4KROjmxmTMglBySlIMEzcXehnDXCRiZSlvwA2
-0YsIOROcm4UrIRFxJHctJH7OdN5u1aHVHb5UaLHpv48NgmFRE56KTSoaWunqm2st
-S0mrAdOiqcR12PWVbdVRJKcQ0DQuhwlAPcRtpxN3D4kbXJjToSYJIFw406G2CSaK
-jQMIJPZGlQmgyFhoCSzeGS1VSq5SKKQQxs5RqKUcVUNY57YUETb4mXzV84SPngKi
-nsce0mXByZq5BKUA9puHZWLNwQIYuDaJUNgG+E01E3pDYVNLKYQ0hsVesgV5gZY0
-htDsRdGtm0+iGnkN6+Ea9YJtUZNAkx2GgSoix12nTW0avTUfxR3oYcpvZ7IdtABE
-UhBcjG4qZtDZsS1JQHys243vhLaDTSvvTeBiJA2tmokqECTBcSOCAGkAxMKlVAva
-4IsLRaBBqhxDbcGtgdw03mFcLUaFuhtKuuEIEkUleJQwby/zwu9uvvZK4xTV+ECM
-a8lmzxKmqkBggYK1+xPdbmJclm6tSZhE/OSJtCEjs+unJIQkT9hCWgBJqGMS07Eh
-AJNmBiuVEVdTyjkIJkavuZmx2sJF13htgEZUCC23lZFOE6gWbM9WyYNJTM8yCQrb
-0Sx3OQvBML5cRATAQkSQkAJOAhoxpQkNi4ZiEVDbdtJAME0RXNDXGHA3M3Q0mm1o
-IEwbWpaM1DQCSMbGRCAu3iRIQiT6RlBpT1n3tfwvUXz3gIVlx3mEximY/kZW1kNG
-sgEJIrBisaEoGYPJ+1CQUYFBw+eGEHJQBpNHjErXUJY2iWHQ30hXwFBuMSxQ2lB5
-bg+/LX3euG6HsHUB1lFvBvaiaBrITVwkCTa1d0s9CHZCiDZjbWReKyrpPE2oSa7o
-LPrR4BJvys9ttjUpzETSSMxh8vsr9dXTwKBtK+1xCTGDQmNIaE29HmHdS5GSxpya
-MismcAUSEgSxHBrKtgsZzduG7vHZn16l3kFkVITtENIzS2JsiBwFTDlhgexsjBHv
-5HXOYxHBzoSDCcPZ0ctvkY9aS5XpoQuFYkGJgCsqjJZeUMNUEpDSbKcnUc1PifIA
-CbR2UoXawBlspkEBr9HBfvUi/MUakZVOf1WKYrqSaIXce62JOyhJLq3qJBloTA0F
-VbILEtM+heFmNRCFt70GJrExVJri0ArYbCRbADSGDBpBXxxb/6fo+s3C7uaL7RjM
-LV2IQBNrAJrKFeJwTsPnxbAsemirUx2lk1kaxschzdK4TQNJN5wQnolIFg401OZ4
-2na11LnT3lR+1k1TMJhiAjXMk0F1ooHnYlt9LKfJ3ZIOmeY+2l9bUQHWFNGyEyfj
-EAcu3kpGLq0Ez7XOS+EpAASRQTAYMATfVQibHLTT30zG732+pNe9za1JNt8sNJYn
-RjWuJ6jL5ILV0rcd9vT7X9fObvcXitpvJ2XBJE+PhX2HaTkyWeF9pwnlQNrTe9hV
-tzhA+ihZrDrHNmLcQjZbnv/IMubqq8egxY80t5n6vZ6U5TR6U9uZJvai1xtqAyCR
-NWkW52m00rDTEuO6BA4q2RHDWwbETF55rRsWLIgNW9qJCyMHPbTM/dMBmWMQSMxz
-4M2pRzt47SICxA327UqSCEERqMFybmYi3nUxePtLgHYplqRiw4ynMbXd/kiQ0LE0
-PKJSSCXA42ymziCpAxNWflzpzQdJZusahRFr6t6m+4p273/Taj7k+hZyNgBAgXAY
-8F7pTts6orLb8IA6o4TOwkwQYmKvKu9VwMrE7+GUhVIAgY9a8DyQMiDBkEAwh7S1
-KgCBfao8DK1CwSS8Z3WjL5MEgt93z2koUQCD/YxMBppiCMp7SDVSmkkIHptfGpeh
-t+M13Ccv1tavIASFiaQl6rBz3K4N3DSGwNkCibrvEAC0fQirOWnc4NVbcLKpFG1l
-NQXF/eqdT79wq1Mvlap3QSCLhcD2D3fCkKVWid4aSjtp9FOX1Uaf7P9eT93zd9Sv
-mj2yNLRUGzyI/0oONNSzmmkvJ5Cq2X2CdldIWMGZO57RJ8oyATAWTQmRmNkfh0Sx
-uuR/J9oUsomVy1AEntc0dlPivkqBkBqrxU3j5PnWkaI3ZRGc0gg9spCQEISh4xEU
-pMhVrnmDQLfLP8Ouqpx917MAw7hkjQk6BJFTAbXDsz3LSHIxo/gB8qrA1vbvdZZh
-LtR0frJdfdppX8nAQX/TAxOQ8+H6yw8a9i7/zJEfSYIhop59N/fhcWW2F14cj2Xc
-fyHaZ04lTO4uPnly91jwuFPaREuZVp8AxImIhlkxkAN61tWdWG7tEbaCgszh6VIz
-ThFnHo2Vi8SQXPrXCN7J9Tc9ZYiAYqoThV/u6SYsea5aZL8deOvKBQCgZZuIxX1z
-4EnfcqG176vY4VqMBIC4pMJz0WcHJYqN+j7BiwGoMBwExrIdTB7q4XIFLotcIpS0
-1MqyVsesvoQq7WObmGQXdMliMirSLcDuSx8Qy+4pIBgGDIyMp1qbonnGdcHYvU8S
-O0A8s/iua5oFdNZTWvbVI4FUH9sKcLiB3/fIAF+sB4n8q6L+UCfmbPcAo/crQ6b3
-HqhDBMY9J0q/jdz9GNYZ/1fbXdkUqAQKFePhtzJDRBZba27+LPQNMCcrHMq06F1T
-4QmLmkHt7LxB2pAczUO+T2O9bHEw/HWw+dYf2MoRDUw=
-`
-
-var outOfRangeSelector = []byte{
- 0x42, 0x5a, 0x68, 0x39, 0x31, 0x41, 0x59, 0x26,
- 0x53, 0x59, 0x4e, 0xec, 0xe8, 0x36, 0x00, 0x00,
- 0x02, 0x51, 0x80, 0x00, 0x10, 0x40, 0x00, 0x06,
- 0x44, 0x90, 0x80, 0x20, 0x00, 0x31, 0x06, 0x4c,
- 0x41, 0x01, 0xa7, 0xa9, 0xa5, 0x80, 0xbb, 0x94,
- 0x31, 0x17, 0x72, 0x45, 0x38, 0x50, 0x90, 0x00,
- 0x00, 0x00, 0x00,
-}
--- /dev/null
+\92Õe&\16¬DJJ\ 4¯\1a\8a9d¬ E\rCÖÏ#;Ð23ôº\92øq\9el*+Ôõø\8d°~Í\r££;&4\83Û\9b,\15\87\86cc¾5Ñs5º
\ No newline at end of file
// Input source.
r Reader
roffset int64
- woffset int64
// Input bits, in top of b.
b uint32
return 0, f.err
}
f.step(f)
- f.woffset += int64(len(f.toRead))
}
}
copy(buf1[i:], buf0)
}
buf0 = nil
+ w, err := NewWriter(ioutil.Discard, level)
+ if err != nil {
+ b.Fatal(err)
+ }
runtime.GC()
b.StartTimer()
for i := 0; i < b.N; i++ {
- w, err := NewWriter(ioutil.Discard, level)
- if err != nil {
- b.Fatal(err)
- }
+ w.Reset(ioutil.Discard)
w.Write(buf1)
w.Close()
}
return z, nil
}
-func (z *reader) Read(p []byte) (n int, err error) {
+func (z *reader) Read(p []byte) (int, error) {
if z.err != nil {
return 0, z.err
}
- if len(p) == 0 {
- return 0, nil
- }
- n, err = z.decompressor.Read(p)
+ var n int
+ n, z.err = z.decompressor.Read(p)
z.digest.Write(p[0:n])
- if n != 0 || err != io.EOF {
- z.err = err
- return
+ if z.err != io.EOF {
+ // In the normal case we return here.
+ return n, z.err
}
// Finished file; check checksum.
err = io.ErrUnexpectedEOF
}
z.err = err
- return 0, err
+ return n, z.err
}
// ZLIB (RFC 1950) is big-endian, unlike GZIP (RFC 1952).
checksum := uint32(z.scratch[0])<<24 | uint32(z.scratch[1])<<16 | uint32(z.scratch[2])<<8 | uint32(z.scratch[3])
if checksum != z.digest.Sum32() {
z.err = ErrChecksum
- return 0, z.err
+ return n, z.err
}
- return
+ return n, io.EOF
}
// Calling Close does not close the wrapped io.Reader originally passed to NewReader.
func (z *reader) Close() error {
- if z.err != nil {
+ if z.err != nil && z.err != io.EOF {
return z.err
}
z.err = z.decompressor.Close()
}
func (z *reader) Reset(r io.Reader, dict []byte) error {
+ *z = reader{decompressor: z.decompressor}
if fr, ok := r.(flate.Reader); ok {
z.r = fr
} else {
z.r = bufio.NewReader(r)
}
- _, err := io.ReadFull(z.r, z.scratch[0:2])
- if err != nil {
- if err == io.EOF {
- err = io.ErrUnexpectedEOF
+
+ // Read the header (RFC 1950 section 2.2.).
+ _, z.err = io.ReadFull(z.r, z.scratch[0:2])
+ if z.err != nil {
+ if z.err == io.EOF {
+ z.err = io.ErrUnexpectedEOF
}
- return err
+ return z.err
}
h := uint(z.scratch[0])<<8 | uint(z.scratch[1])
if (z.scratch[0]&0x0f != zlibDeflate) || (h%31 != 0) {
- return ErrHeader
+ z.err = ErrHeader
+ return z.err
}
haveDict := z.scratch[1]&0x20 != 0
if haveDict {
- _, err = io.ReadFull(z.r, z.scratch[0:4])
- if err != nil {
- if err == io.EOF {
- err = io.ErrUnexpectedEOF
+ _, z.err = io.ReadFull(z.r, z.scratch[0:4])
+ if z.err != nil {
+ if z.err == io.EOF {
+ z.err = io.ErrUnexpectedEOF
}
- return err
+ return z.err
}
checksum := uint32(z.scratch[0])<<24 | uint32(z.scratch[1])<<16 | uint32(z.scratch[2])<<8 | uint32(z.scratch[3])
if checksum != adler32.Checksum(dict) {
- return ErrDictionary
+ z.err = ErrDictionary
+ return z.err
}
}
+
if z.decompressor == nil {
if haveDict {
z.decompressor = flate.NewReaderDict(z.r, dict)
b := new(bytes.Buffer)
for _, tt := range zlibTests {
in := bytes.NewReader(tt.compressed)
- zlib, err := NewReaderDict(in, tt.dict)
+ zr, err := NewReaderDict(in, tt.dict)
if err != nil {
if err != tt.err {
t.Errorf("%s: NewReader: %s", tt.desc, err)
}
continue
}
- defer zlib.Close()
+ defer zr.Close()
+
+ // Read and verify correctness of data.
b.Reset()
- n, err := io.Copy(b, zlib)
+ n, err := io.Copy(b, zr)
if err != nil {
if err != tt.err {
t.Errorf("%s: io.Copy: %v want %v", tt.desc, err, tt.err)
if s != tt.raw {
t.Errorf("%s: got %d-byte %q want %d-byte %q", tt.desc, n, s, len(tt.raw), tt.raw)
}
+
+ // Check for sticky errors.
+ if n, err := zr.Read([]byte{0}); n != 0 || err != io.EOF {
+ t.Errorf("%s: Read() = (%d, %v), want (0, io.EOF)", tt.desc, n, err)
+ }
+ if err := zr.Close(); err != nil {
+ t.Errorf("%s: Close() = %v, want nil", tt.desc, err)
+ }
}
}
)
const wordSize = int(unsafe.Sizeof(uintptr(0)))
-const supportsUnaligned = runtime.GOARCH == "386" || runtime.GOARCH == "amd64"
+const supportsUnaligned = runtime.GOARCH == "386" || runtime.GOARCH == "amd64" || runtime.GOARCH == "ppc64" || runtime.GOARCH == "ppc64le"
// fastXORBytes xors in bulk. It only works on architectures that
// support unaligned read/writes.
c.tmp[0] = alertLevelError
}
c.tmp[1] = byte(err)
- c.writeRecord(recordTypeAlert, c.tmp[0:2])
- // closeNotify is a special case in that it isn't an error:
- if err != alertCloseNotify {
- return c.out.setErrorLocked(&net.OpError{Op: "local error", Err: err})
+
+ _, writeErr := c.writeRecord(recordTypeAlert, c.tmp[0:2])
+ if err == alertCloseNotify {
+ // closeNotify is a special case in that it isn't an error.
+ return writeErr
}
- return nil
+
+ return c.out.setErrorLocked(&net.OpError{Op: "local error", Err: err})
}
// sendAlert sends a TLS alert message.
// writeRecord writes a TLS record with the given type and payload
// to the connection and updates the record layer state.
// c.out.Mutex <= L.
-func (c *Conn) writeRecord(typ recordType, data []byte) (n int, err error) {
+func (c *Conn) writeRecord(typ recordType, data []byte) (int, error) {
b := c.out.newBlock()
+ defer c.out.freeBlock(b)
+
+ var n int
for len(data) > 0 {
m := len(data)
if m > maxPlaintext {
if explicitIVIsSeq {
copy(explicitIV, c.out.seq[:])
} else {
- if _, err = io.ReadFull(c.config.rand(), explicitIV); err != nil {
- break
+ if _, err := io.ReadFull(c.config.rand(), explicitIV); err != nil {
+ return n, err
}
}
}
copy(b.data[recordHeaderLen+explicitIVLen:], data)
c.out.encrypt(b, explicitIVLen)
- _, err = c.conn.Write(b.data)
- if err != nil {
- break
+ if _, err := c.conn.Write(b.data); err != nil {
+ return n, err
}
n += m
data = data[m:]
}
- c.out.freeBlock(b)
if typ == recordTypeChangeCipherSpec {
- err = c.out.changeCipherSpec()
- if err != nil {
- // Cannot call sendAlert directly,
- // because we already hold c.out.Mutex.
- c.tmp[0] = alertLevelError
- c.tmp[1] = byte(err.(alert))
- c.writeRecord(recordTypeAlert, c.tmp[0:2])
- return n, c.out.setErrorLocked(&net.OpError{Op: "local error", Err: err})
+ if err := c.out.changeCipherSpec(); err != nil {
+ return n, c.sendAlertLocked(err.(alert))
}
}
- return
+
+ return n, nil
}
// readHandshake reads the next handshake message from
}
}
- c.writeRecord(recordTypeHandshake, hello.marshal())
+ if _, err := c.writeRecord(recordTypeHandshake, hello.marshal()); err != nil {
+ return err
+ }
msg, err := c.readHandshake()
if err != nil {
certMsg.certificates = chainToSend.Certificate
}
hs.finishedHash.Write(certMsg.marshal())
- c.writeRecord(recordTypeHandshake, certMsg.marshal())
+ if _, err := c.writeRecord(recordTypeHandshake, certMsg.marshal()); err != nil {
+ return err
+ }
}
preMasterSecret, ckx, err := keyAgreement.generateClientKeyExchange(c.config, hs.hello, certs[0])
}
if ckx != nil {
hs.finishedHash.Write(ckx.marshal())
- c.writeRecord(recordTypeHandshake, ckx.marshal())
+ if _, err := c.writeRecord(recordTypeHandshake, ckx.marshal()); err != nil {
+ return err
+ }
}
if chainToSend != nil {
}
hs.finishedHash.Write(certVerify.marshal())
- c.writeRecord(recordTypeHandshake, certVerify.marshal())
+ if _, err := c.writeRecord(recordTypeHandshake, certVerify.marshal()); err != nil {
+ return err
+ }
}
hs.masterSecret = masterFromPreMasterSecret(c.vers, hs.suite, preMasterSecret, hs.hello.random, hs.serverHello.random)
func (hs *clientHandshakeState) sendFinished(out []byte) error {
c := hs.c
- c.writeRecord(recordTypeChangeCipherSpec, []byte{1})
+ if _, err := c.writeRecord(recordTypeChangeCipherSpec, []byte{1}); err != nil {
+ return err
+ }
if hs.serverHello.nextProtoNeg {
nextProto := new(nextProtoMsg)
proto, fallback := mutualProtocol(c.config.NextProtos, hs.serverHello.nextProtos)
c.clientProtocolFallback = fallback
hs.finishedHash.Write(nextProto.marshal())
- c.writeRecord(recordTypeHandshake, nextProto.marshal())
+ if _, err := c.writeRecord(recordTypeHandshake, nextProto.marshal()); err != nil {
+ return err
+ }
}
finished := new(finishedMsg)
finished.verifyData = hs.finishedHash.clientSum(hs.masterSecret)
hs.finishedHash.Write(finished.marshal())
- c.writeRecord(recordTypeHandshake, finished.marshal())
+ if _, err := c.writeRecord(recordTypeHandshake, finished.marshal()); err != nil {
+ return err
+ }
copy(out, finished.verifyData)
return nil
}
"encoding/base64"
"encoding/binary"
"encoding/pem"
+ "errors"
"fmt"
"io"
"net"
t.Fatalf("Expected error about unconfigured cipher suite but got %q", err)
}
}
+
+// brokenConn wraps a net.Conn and causes all Writes after a certain number to
+// fail with brokenConnErr.
+type brokenConn struct {
+ net.Conn
+
+ // breakAfter is the number of successful writes that will be allowed
+ // before all subsequent writes fail.
+ breakAfter int
+
+ // numWrites is the number of writes that have been done.
+ numWrites int
+}
+
+// brokenConnErr is the error that brokenConn returns once exhausted.
+var brokenConnErr = errors.New("too many writes to brokenConn")
+
+func (b *brokenConn) Write(data []byte) (int, error) {
+ if b.numWrites >= b.breakAfter {
+ return 0, brokenConnErr
+ }
+
+ b.numWrites++
+ return b.Conn.Write(data)
+}
+
+func TestFailedWrite(t *testing.T) {
+ // Test that a write error during the handshake is returned.
+ for _, breakAfter := range []int{0, 1, 2, 3} {
+ c, s := net.Pipe()
+ done := make(chan bool)
+
+ go func() {
+ Server(s, testConfig).Handshake()
+ s.Close()
+ done <- true
+ }()
+
+ brokenC := &brokenConn{Conn: c, breakAfter: breakAfter}
+ err := Client(brokenC, testConfig).Handshake()
+ if err != brokenConnErr {
+ t.Errorf("#%d: expected error from brokenConn but got %q", breakAfter, err)
+ }
+ brokenC.Close()
+
+ <-done
+ }
+}
hs.finishedHash.discardHandshakeBuffer()
hs.finishedHash.Write(hs.clientHello.marshal())
hs.finishedHash.Write(hs.hello.marshal())
- c.writeRecord(recordTypeHandshake, hs.hello.marshal())
+ if _, err := c.writeRecord(recordTypeHandshake, hs.hello.marshal()); err != nil {
+ return err
+ }
if len(hs.sessionState.certificates) > 0 {
if _, err := hs.processCertsFromClient(hs.sessionState.certificates); err != nil {
}
hs.finishedHash.Write(hs.clientHello.marshal())
hs.finishedHash.Write(hs.hello.marshal())
- c.writeRecord(recordTypeHandshake, hs.hello.marshal())
+ if _, err := c.writeRecord(recordTypeHandshake, hs.hello.marshal()); err != nil {
+ return err
+ }
certMsg := new(certificateMsg)
certMsg.certificates = hs.cert.Certificate
hs.finishedHash.Write(certMsg.marshal())
- c.writeRecord(recordTypeHandshake, certMsg.marshal())
+ if _, err := c.writeRecord(recordTypeHandshake, certMsg.marshal()); err != nil {
+ return err
+ }
if hs.hello.ocspStapling {
certStatus := new(certificateStatusMsg)
certStatus.statusType = statusTypeOCSP
certStatus.response = hs.cert.OCSPStaple
hs.finishedHash.Write(certStatus.marshal())
- c.writeRecord(recordTypeHandshake, certStatus.marshal())
+ if _, err := c.writeRecord(recordTypeHandshake, certStatus.marshal()); err != nil {
+ return err
+ }
}
keyAgreement := hs.suite.ka(c.vers)
}
if skx != nil {
hs.finishedHash.Write(skx.marshal())
- c.writeRecord(recordTypeHandshake, skx.marshal())
+ if _, err := c.writeRecord(recordTypeHandshake, skx.marshal()); err != nil {
+ return err
+ }
}
if config.ClientAuth >= RequestClientCert {
certReq.certificateAuthorities = config.ClientCAs.Subjects()
}
hs.finishedHash.Write(certReq.marshal())
- c.writeRecord(recordTypeHandshake, certReq.marshal())
+ if _, err := c.writeRecord(recordTypeHandshake, certReq.marshal()); err != nil {
+ return err
+ }
}
helloDone := new(serverHelloDoneMsg)
hs.finishedHash.Write(helloDone.marshal())
- c.writeRecord(recordTypeHandshake, helloDone.marshal())
+ if _, err := c.writeRecord(recordTypeHandshake, helloDone.marshal()); err != nil {
+ return err
+ }
var pub crypto.PublicKey // public key for client auth, if any
}
hs.finishedHash.Write(m.marshal())
- c.writeRecord(recordTypeHandshake, m.marshal())
+ if _, err := c.writeRecord(recordTypeHandshake, m.marshal()); err != nil {
+ return err
+ }
return nil
}
func (hs *serverHandshakeState) sendFinished(out []byte) error {
c := hs.c
- c.writeRecord(recordTypeChangeCipherSpec, []byte{1})
+ if _, err := c.writeRecord(recordTypeChangeCipherSpec, []byte{1}); err != nil {
+ return err
+ }
finished := new(finishedMsg)
finished.verifyData = hs.finishedHash.serverSum(hs.masterSecret)
hs.finishedHash.Write(finished.marshal())
- c.writeRecord(recordTypeHandshake, finished.marshal())
+ if _, err := c.writeRecord(recordTypeHandshake, finished.marshal()); err != nil {
+ return err
+ }
c.cipherSuite = hs.suite.id
copy(out, finished.verifyData)
cli.writeRecord(recordTypeHandshake, m.marshal())
c.Close()
}()
- err := Server(s, serverConfig).Handshake()
+ hs := serverHandshakeState{
+ c: Server(s, serverConfig),
+ }
+ _, err := hs.readClientHello()
s.Close()
if len(expectedSubStr) == 0 {
if err != nil && err != io.EOF {
t.Fatal("DialWithTimeout completed successfully")
}
- if !strings.Contains(err.Error(), "timed out") {
- t.Errorf("resulting error not a timeout: %s", err)
+ if !isTimeoutError(err) {
+ t.Errorf("resulting error not a timeout: %v\nType %T: %#v", err, err, err)
}
}
+func isTimeoutError(err error) bool {
+ if ne, ok := err.(net.Error); ok {
+ return ne.Timeout()
+ }
+ return false
+}
+
// tests that Conn.Read returns (non-zero, io.EOF) instead of
// (non-zero, nil) when a Close (alertCloseNotify) is sitting right
// behind the application data in the buffer.
package gosym
// The table format is a variant of the format used in Plan 9's a.out
-// format, documented at http://plan9.bell-labs.com/magic/man2html/6/a.out.
+// format, documented at https://9p.io/magic/man2html/6/a.out.
// The best reference for the differences between the Plan 9 format
// and the Go format is the runtime source, specifically ../../runtime/symtab.c.
}
}
+var (
+ NaN = math.NaN()
+ posInf = math.Inf(1)
+ negInf = math.Inf(-1)
+)
+
const b32 uint32 = 1<<32 - 1
const b64 uint64 = 1<<64 - 1
{"%x", "abc", "616263"},
{"%x", "\xff\xf0\x0f\xff", "fff00fff"},
{"%X", "\xff\xf0\x0f\xff", "FFF00FFF"},
+ {"%x", "", ""},
+ {"% x", "", ""},
+ {"%#x", "", ""},
+ {"%# x", "", ""},
{"%x", "xyz", "78797a"},
{"%X", "xyz", "78797A"},
{"% x", "xyz", "78 79 7a"},
{"%x", []byte("abc"), "616263"},
{"%x", []byte("\xff\xf0\x0f\xff"), "fff00fff"},
{"%X", []byte("\xff\xf0\x0f\xff"), "FFF00FFF"},
+ {"%x", []byte(""), ""},
+ {"% x", []byte(""), ""},
+ {"%#x", []byte(""), ""},
+ {"%# x", []byte(""), ""},
{"%x", []byte("xyz"), "78797a"},
{"%X", []byte("xyz"), "78797A"},
{"% x", []byte("xyz"), "78 79 7a"},
{"%# X", []byte("xyz"), "0X78 0X79 0X7A"},
// escaped strings
- {"%#q", `abc`, "`abc`"},
- {"%#q", `"`, "`\"`"},
- {"1 %#q", `\n`, "1 `\\n`"},
- {"2 %#q", "\n", `2 "\n"`},
- {"%q", `"`, `"\""`},
- {"%q", "\a\b\f\r\n\t\v", `"\a\b\f\r\n\t\v"`},
+ {"%q", "", `""`},
+ {"%#q", "", "``"},
+ {"%q", "\"", `"\""`},
+ {"%#q", "\"", "`\"`"},
+ {"%q", "`", `"` + "`" + `"`},
+ {"%#q", "`", `"` + "`" + `"`},
+ {"%q", "\n", `"\n"`},
+ {"%#q", "\n", `"\n"`},
+ {"%q", `\n`, `"\\n"`},
+ {"%#q", `\n`, "`\\n`"},
+ {"%q", "abc", `"abc"`},
+ {"%#q", "abc", "`abc`"},
+ {"%q", "日本語", `"日本語"`},
+ {"%+q", "日本語", `"\u65e5\u672c\u8a9e"`},
+ {"%#q", "日本語", "`日本語`"},
+ {"%#+q", "日本語", "`日本語`"},
+ {"%q", "\a\b\f\n\r\t\v\"\\", `"\a\b\f\n\r\t\v\"\\"`},
+ {"%+q", "\a\b\f\n\r\t\v\"\\", `"\a\b\f\n\r\t\v\"\\"`},
+ {"%#q", "\a\b\f\n\r\t\v\"\\", `"\a\b\f\n\r\t\v\"\\"`},
+ {"%#+q", "\a\b\f\n\r\t\v\"\\", `"\a\b\f\n\r\t\v\"\\"`},
+ {"%q", "☺", `"☺"`},
+ {"% q", "☺", `"☺"`}, // The space modifier should have no effect.
+ {"%+q", "☺", `"\u263a"`},
+ {"%#q", "☺", "`☺`"},
+ {"%#+q", "☺", "`☺`"},
+ {"%10q", "⌘", ` "⌘"`},
+ {"%+10q", "⌘", ` "\u2318"`},
+ {"%-10q", "⌘", `"⌘" `},
+ {"%+-10q", "⌘", `"\u2318" `},
+ {"%010q", "⌘", `0000000"⌘"`},
+ {"%+010q", "⌘", `00"\u2318"`},
+ {"%-010q", "⌘", `"⌘" `}, // 0 has no effect when - is present.
+ {"%+-010q", "⌘", `"\u2318" `},
+ {"%#8q", "\n", ` "\n"`},
+ {"%#+8q", "\r", ` "\r"`},
+ {"%#-8q", "\t", "` ` "},
+ {"%#+-8q", "\b", `"\b" `},
{"%q", "abc\xffdef", `"abc\xffdef"`},
- {"%q", "\u263a", `"☺"`},
- {"%+q", "\u263a", `"\u263a"`},
- {"%q", "\U0010ffff", `"\U0010ffff"`},
+ {"%+q", "abc\xffdef", `"abc\xffdef"`},
+ {"%#q", "abc\xffdef", `"abc\xffdef"`},
+ {"%#+q", "abc\xffdef", `"abc\xffdef"`},
+ {"%q", "\U0010ffff", `"\U0010ffff"`}, // Rune is not printable.
+ {"%+q", "\U0010ffff", `"\U0010ffff"`},
+ {"%#q", "\U0010ffff", "``"},
+ {"%#+q", "\U0010ffff", "``"},
+ {"%q", string(0x110000), `"�"`}, // Rune is not valid.
+ {"%+q", string(0x110000), `"\ufffd"`},
+ {"%#q", string(0x110000), "`�`"},
+ {"%#+q", string(0x110000), "`�`"},
// escaped characters
- {"%q", 'x', `'x'`},
{"%q", 0, `'\x00'`},
+ {"%+q", 0, `'\x00'`},
+ {"%q", '"', `'"'`},
+ {"%+q", '"', `'"'`},
+ {"%q", '\'', `'\''`},
+ {"%+q", '\'', `'\''`},
+ {"%q", '`', "'`'"},
+ {"%+q", '`', "'`'"},
+ {"%q", 'x', `'x'`},
+ {"%+q", 'x', `'x'`},
+ {"%q", 'ÿ', `'ÿ'`},
+ {"%+q", 'ÿ', `'\u00ff'`},
{"%q", '\n', `'\n'`},
- {"%q", '\u0e00', `'\u0e00'`}, // not a printable rune.
- {"%q", '\U000c2345', `'\U000c2345'`}, // not a printable rune.
+ {"%+q", '\n', `'\n'`},
+ {"%q", '☺', `'☺'`},
+ {"% q", '☺', `'☺'`}, // The space modifier should have no effect.
+ {"%+q", '☺', `'\u263a'`},
+ {"%10q", '⌘', ` '⌘'`},
+ {"%+10q", '⌘', ` '\u2318'`},
+ {"%-10q", '⌘', `'⌘' `},
+ {"%+-10q", '⌘', `'\u2318' `},
+ {"%010q", '⌘', `0000000'⌘'`},
+ {"%+010q", '⌘', `00'\u2318'`},
+ {"%-010q", '⌘', `'⌘' `}, // 0 has no effect when - is present.
+ {"%+-010q", '⌘', `'\u2318' `},
+ {"%q", '\U00000e00', `'\u0e00'`}, // Rune is not printable.
+ {"%q", '\U000c2345', `'\U000c2345'`}, // Rune is not printable.
+ {"%q", '\U0010ffff', `'\U0010ffff'`}, // Rune is not printable.
+ {"%q", rune(0x110000), `%!q(int32=1114112)`}, // Rune is not valid.
{"%q", int64(0x7FFFFFFF), `%!q(int64=2147483647)`},
{"%q", uint64(0xFFFFFFFF), `%!q(uint64=4294967295)`},
- {"%q", '"', `'"'`},
- {"%q", '\'', `'\''`},
- {"%q", "\u263a", `"☺"`},
- {"%+q", "\u263a", `"\u263a"`},
// width
{"%5s", "abc", " abc"},
{"%08q", "abc", `000"abc"`},
{"%5s", "abcdefghijklmnopqrstuvwxyz", "abcdefghijklmnopqrstuvwxyz"},
{"%.5s", "abcdefghijklmnopqrstuvwxyz", "abcde"},
+ {"%.0s", "日本語日本語", ""},
{"%.5s", "日本語日本語", "日本語日本"},
+ {"%.10s", "日本語日本語", "日本語日本語"},
{"%.5s", []byte("日本語日本語"), "日本語日本"},
{"%.5q", "abcdefghijklmnopqrstuvwxyz", `"abcde"`},
- {"%.5x", "abcdefghijklmnopqrstuvwxyz", `6162636465`},
+ {"%.5x", "abcdefghijklmnopqrstuvwxyz", "6162636465"},
{"%.5q", []byte("abcdefghijklmnopqrstuvwxyz"), `"abcde"`},
- {"%.5x", []byte("abcdefghijklmnopqrstuvwxyz"), `6162636465`},
+ {"%.5x", []byte("abcdefghijklmnopqrstuvwxyz"), "6162636465"},
{"%.3q", "日本語日本語", `"日本語"`},
{"%.3q", []byte("日本語日本語"), `"日本語"`},
{"%.1q", "日本語", `"日"`},
{"%.1q", []byte("日本語"), `"日"`},
- {"%.1x", "日本語", `e6`},
- {"%.1X", []byte("日本語"), `E6`},
+ {"%.1x", "日本語", "e6"},
+ {"%.1X", []byte("日本語"), "E6"},
{"%10.1q", "日本語日本語", ` "日"`},
{"%3c", '⌘', " ⌘"},
{"%5q", '\u2026', ` '…'`},
{"% .3g", 1.0, " 1"},
{"%b", float32(1.0), "8388608p-23"},
{"%b", 1.0, "4503599627370496p-52"},
+ // Precision has no effect for binary float format.
+ {"%.4b", float32(1.0), "8388608p-23"},
+ {"%.4b", -1.0, "-4503599627370496p-52"},
+ // float infinites and NaNs
+ {"%f", posInf, "+Inf"},
+ {"%.1f", negInf, "-Inf"},
+ {"% f", NaN, " NaN"},
+ {"%20f", posInf, " +Inf"},
+ {"% 20F", posInf, " Inf"},
+ {"% 20e", negInf, " -Inf"},
+ {"%+20E", negInf, " -Inf"},
+ {"% +20g", negInf, " -Inf"},
+ {"%+-20G", posInf, "+Inf "},
+ {"%20e", NaN, " NaN"},
+ {"% +20E", NaN, " +NaN"},
+ {"% -20g", NaN, " NaN "},
+ {"%+-20G", NaN, "+NaN "},
+ // Zero padding does not apply to infinities and NaN.
+ {"%+020e", posInf, " +Inf"},
+ {"%-020f", negInf, "-Inf "},
+ {"%-020E", NaN, "NaN "},
// complex values
{"%.f", 0i, "(0+0i)"},
+ {"% .f", 0i, "( 0+0i)"},
{"%+.f", 0i, "(+0+0i)"},
{"% +.f", 0i, "(+0+0i)"},
{"%+.3e", 0i, "(+0.000e+00+0.000e+00i)"},
{"%.3f", -1 - 2i, "(-1.000-2.000i)"},
{"%.3g", -1 - 2i, "(-1-2i)"},
{"% .3E", -1 - 2i, "(-1.000E+00-2.000E+00i)"},
+ {"%+.3g", 1 + 2i, "(+1+2i)"},
{"%+.3g", complex64(1 + 2i), "(+1+2i)"},
- {"%+.3g", complex128(1 + 2i), "(+1+2i)"},
- {"%b", complex64(1 + 2i), "(8388608p-23+8388608p-22i)"},
{"%b", 1 + 2i, "(4503599627370496p-52+4503599627370496p-51i)"},
+ {"%b", complex64(1 + 2i), "(8388608p-23+8388608p-22i)"},
+ // Precision has no effect for binary complex format.
+ {"%.4b", 1 + 2i, "(4503599627370496p-52+4503599627370496p-51i)"},
+ {"%.4b", complex64(1 + 2i), "(8388608p-23+8388608p-22i)"},
+ // complex infinites and NaNs
+ {"%f", complex(posInf, posInf), "(+Inf+Infi)"},
+ {"%f", complex(negInf, negInf), "(-Inf-Infi)"},
+ {"%f", complex(NaN, NaN), "(NaN+NaNi)"},
+ {"%.1f", complex(posInf, posInf), "(+Inf+Infi)"},
+ {"% f", complex(posInf, posInf), "( Inf+Infi)"},
+ {"% f", complex(negInf, negInf), "(-Inf-Infi)"},
+ {"% f", complex(NaN, NaN), "( NaN+NaNi)"},
+ {"%8e", complex(posInf, posInf), "( +Inf +Infi)"},
+ {"% 8E", complex(posInf, posInf), "( Inf +Infi)"},
+ {"%+8f", complex(negInf, negInf), "( -Inf -Infi)"},
+ {"% +8g", complex(negInf, negInf), "( -Inf -Infi)"},
+ {"% -8G", complex(NaN, NaN), "( NaN +NaN i)"},
+ {"%+-8b", complex(NaN, NaN), "(+NaN +NaN i)"},
+ // Zero padding does not apply to infinities and NaN.
+ {"%08f", complex(posInf, posInf), "( +Inf +Infi)"},
+ {"%-08g", complex(negInf, negInf), "(-Inf -Inf i)"},
+ {"%-08G", complex(NaN, NaN), "(NaN +NaN i)"},
// erroneous formats
{"", 2, "%!(EXTRA int=2)"},
{"%g", 1.23456789e3, "1234.56789"},
{"%g", 1.23456789e-3, "0.00123456789"},
{"%g", 1.23456789e20, "1.23456789e+20"},
- {"%20e", math.Inf(1), " +Inf"},
- {"% 20f", math.Inf(1), " Inf"},
- {"%+20f", math.Inf(1), " +Inf"},
- {"% +20f", math.Inf(1), " +Inf"},
- {"%-20f", math.Inf(-1), "-Inf "},
- {"%20g", math.NaN(), " NaN"},
- {"%+20f", math.NaN(), " +NaN"},
- {"% +20f", math.NaN(), " +NaN"},
- {"% -20f", math.NaN(), " NaN "},
- {"%+-20f", math.NaN(), "+NaN "},
// arrays
{"%v", array, "[1 2 3 4 5]"},
{"%v", &slice, "&[1 2 3 4 5]"},
{"%v", &islice, "&[1 hello 2.5 <nil>]"},
{"%v", &bslice, "&[1 2 3 4 5]"},
- {"%v", []byte{1}, "[1]"},
+
+ // byte slices and arrays with %d and %v variants
+ {"%d", [0]byte{}, "[]"},
+ {"%d", [1]byte{123}, "[123]"},
+ {"%012d", []byte{}, "[]"},
+ {"%d", [3]byte{1, 11, 111}, "[1 11 111]"},
+ {"%d", [3]uint8{1, 11, 111}, "[1 11 111]"},
+ {"%06d", [3]byte{1, 11, 111}, "[000001 000011 000111]"},
+ {"%-6d", [3]byte{1, 11, 111}, "[1 11 111 ]"},
+ {"%-06d", [3]byte{1, 11, 111}, "[1 11 111 ]"}, // 0 has no effect when - is present.
{"%v", []byte{}, "[]"},
+ {"%012v", []byte{}, "[]"},
+ {"%#v", []byte{}, "[]byte{}"},
+ {"%#v", []uint8{}, "[]byte{}"},
+ {"%#012v", []byte{}, "[]byte{}"},
+ {"%v", []byte{123}, "[123]"},
+ {"%v", []byte{1, 11, 111}, "[1 11 111]"},
+ {"%6v", []byte{1, 11, 111}, "[ 1 11 111]"},
+ {"%06v", []byte{1, 11, 111}, "[000001 000011 000111]"},
+ {"%-6v", []byte{1, 11, 111}, "[1 11 111 ]"},
+ {"%-06v", []byte{1, 11, 111}, "[1 11 111 ]"},
+ {"%#v", []byte{1, 11, 111}, "[]byte{0x1, 0xb, 0x6f}"},
+ {"%#6v", []byte{1, 11, 111}, "[]byte{ 0x1, 0xb, 0x6f}"},
+ {"%#06v", []byte{1, 11, 111}, "[]byte{0x000001, 0x00000b, 0x00006f}"},
+ {"%#-6v", []byte{1, 11, 111}, "[]byte{0x1 , 0xb , 0x6f }"},
+ {"%#-06v", []byte{1, 11, 111}, "[]byte{0x1 , 0xb , 0x6f }"},
+ {"%v", [0]byte{}, "[]"},
+ {"%-12v", [0]byte{}, "[]"},
+ {"%#v", [0]byte{}, "[0]uint8{}"},
+ {"%#v", [0]uint8{}, "[0]uint8{}"},
+ {"%#-12v", [0]byte{}, "[0]uint8{}"},
+ {"%v", [1]byte{123}, "[123]"},
+ {"%v", [3]byte{1, 11, 111}, "[1 11 111]"},
+ {"%06v", [3]byte{1, 11, 111}, "[000001 000011 000111]"},
+ {"%-6v", [3]byte{1, 11, 111}, "[1 11 111 ]"},
+ {"%-06v", [3]byte{1, 11, 111}, "[1 11 111 ]"},
+ {"%#v", [3]byte{1, 11, 111}, "[3]uint8{0x1, 0xb, 0x6f}"},
+ {"%#6v", [3]byte{1, 11, 111}, "[3]uint8{ 0x1, 0xb, 0x6f}"},
+ {"%#06v", [3]byte{1, 11, 111}, "[3]uint8{0x000001, 0x00000b, 0x00006f}"},
+ {"%#-6v", [3]byte{1, 11, 111}, "[3]uint8{0x1 , 0xb , 0x6f }"},
+ {"%#-06v", [3]byte{1, 11, 111}, "[3]uint8{0x1 , 0xb , 0x6f }"},
+ // f.space should and f.plus should not have an effect with %v.
+ {"% v", []byte{1, 11, 111}, "[ 1 11 111]"},
+ {"%+v", [3]byte{1, 11, 111}, "[1 11 111]"},
+ {"%# -6v", []byte{1, 11, 111}, "[]byte{ 0x1 , 0xb , 0x6f }"},
+ {"%#+-6v", [3]byte{1, 11, 111}, "[3]uint8{0x1 , 0xb , 0x6f }"},
+ // f.space and f.plus should have an effect with %d.
+ {"% d", []byte{1, 11, 111}, "[ 1 11 111]"},
+ {"%+d", [3]byte{1, 11, 111}, "[+1 +11 +111]"},
+ {"%# -6d", []byte{1, 11, 111}, "[ 1 11 111 ]"},
+ {"%#+-6d", [3]byte{1, 11, 111}, "[+1 +11 +111 ]"},
+
+ // floates with %v
+ {"%v", 1.2345678, "1.2345678"},
+ {"%v", float32(1.2345678), "1.2345678"},
// complexes with %v
{"%v", 1 + 2i, "(1+2i)"},
{"%v", complex64(1 + 2i), "(1+2i)"},
- {"%v", complex128(1 + 2i), "(1+2i)"},
// structs
{"%v", A{1, 2, "a", []int{1, 2}}, `{1 2 a [1 2]}`},
{"%#v", bslice, `[]fmt_test.renamedUint8{0x1, 0x2, 0x3, 0x4, 0x5}`},
{"%#v", []byte(nil), "[]byte(nil)"},
{"%#v", []int32(nil), "[]int32(nil)"},
+ {"%#v", 1.2345678, "1.2345678"},
+ {"%#v", float32(1.2345678), "1.2345678"},
// slices with other formats
{"%#x", []int{1, 2, 15}, `[0x1 0x2 0xf]`},
{"%q", []string{"a", "b"}, `["a" "b"]`},
{"% 02x", []byte{1}, "01"},
{"% 02x", []byte{1, 2, 3}, "01 02 03"},
+
// Padding with byte slices.
- {"%x", []byte{}, ""},
- {"%02x", []byte{}, "00"},
+ {"%2x", []byte{}, " "},
+ {"%#2x", []byte{}, " "},
{"% 02x", []byte{}, "00"},
- {"%08x", []byte{0xab}, "000000ab"},
- {"% 08x", []byte{0xab}, "000000ab"},
- {"%08x", []byte{0xab, 0xcd}, "0000abcd"},
- {"% 08x", []byte{0xab, 0xcd}, "000ab cd"},
+ {"%# 02x", []byte{}, "00"},
+ {"%-2x", []byte{}, " "},
+ {"%-02x", []byte{}, " "},
{"%8x", []byte{0xab}, " ab"},
{"% 8x", []byte{0xab}, " ab"},
- {"%8x", []byte{0xab, 0xcd}, " abcd"},
- {"% 8x", []byte{0xab, 0xcd}, " ab cd"},
+ {"%#8x", []byte{0xab}, " 0xab"},
+ {"%# 8x", []byte{0xab}, " 0xab"},
+ {"%08x", []byte{0xab}, "000000ab"},
+ {"% 08x", []byte{0xab}, "000000ab"},
+ {"%#08x", []byte{0xab}, "00000xab"},
+ {"%# 08x", []byte{0xab}, "00000xab"},
+ {"%10x", []byte{0xab, 0xcd}, " abcd"},
+ {"% 10x", []byte{0xab, 0xcd}, " ab cd"},
+ {"%#10x", []byte{0xab, 0xcd}, " 0xabcd"},
+ {"%# 10x", []byte{0xab, 0xcd}, " 0xab 0xcd"},
+ {"%010x", []byte{0xab, 0xcd}, "000000abcd"},
+ {"% 010x", []byte{0xab, 0xcd}, "00000ab cd"},
+ {"%#010x", []byte{0xab, 0xcd}, "00000xabcd"},
+ {"%# 010x", []byte{0xab, 0xcd}, "00xab 0xcd"},
+ {"%-10X", []byte{0xab}, "AB "},
+ {"% -010X", []byte{0xab}, "AB "},
+ {"%#-10X", []byte{0xab, 0xcd}, "0XABCD "},
+ {"%# -010X", []byte{0xab, 0xcd}, "0XAB 0XCD "},
// Same for strings
- {"%x", "", ""},
- {"%02x", "", "00"},
+ {"%2x", "", " "},
+ {"%#2x", "", " "},
{"% 02x", "", "00"},
- {"%08x", "\xab", "000000ab"},
- {"% 08x", "\xab", "000000ab"},
- {"%08x", "\xab\xcd", "0000abcd"},
- {"% 08x", "\xab\xcd", "000ab cd"},
+ {"%# 02x", "", "00"},
+ {"%-2x", "", " "},
+ {"%-02x", "", " "},
{"%8x", "\xab", " ab"},
{"% 8x", "\xab", " ab"},
- {"%8x", "\xab\xcd", " abcd"},
- {"% 8x", "\xab\xcd", " ab cd"},
+ {"%#8x", "\xab", " 0xab"},
+ {"%# 8x", "\xab", " 0xab"},
+ {"%08x", "\xab", "000000ab"},
+ {"% 08x", "\xab", "000000ab"},
+ {"%#08x", "\xab", "00000xab"},
+ {"%# 08x", "\xab", "00000xab"},
+ {"%10x", "\xab\xcd", " abcd"},
+ {"% 10x", "\xab\xcd", " ab cd"},
+ {"%#10x", "\xab\xcd", " 0xabcd"},
+ {"%# 10x", "\xab\xcd", " 0xab 0xcd"},
+ {"%010x", "\xab\xcd", "000000abcd"},
+ {"% 010x", "\xab\xcd", "00000ab cd"},
+ {"%#010x", "\xab\xcd", "00000xabcd"},
+ {"%# 010x", "\xab\xcd", "00xab 0xcd"},
+ {"%-10X", "\xab", "AB "},
+ {"% -010X", "\xab", "AB "},
+ {"%#-10X", "\xab\xcd", "0XABCD "},
+ {"%# -010X", "\xab\xcd", "0XAB 0XCD "},
// renamings
{"%v", renamedBool(true), "true"},
// be fetched directly, the lookup fails and returns a
// zero reflect.Value, which formats as <nil>.
// This test is just to check that it shows the two NaNs at all.
- {"%v", map[float64]int{math.NaN(): 1, math.NaN(): 2}, "map[NaN:<nil> NaN:<nil>]"},
+ {"%v", map[float64]int{NaN: 1, NaN: 2}, "map[NaN:<nil> NaN:<nil>]"},
// Used to crash because nByte didn't allow for a sign.
{"%b", int64(-1 << 63), zeroFill("-1", 63, "")},
// Complex numbers: exhaustively tested in TestComplexFormatting.
{"%7.2f", 1 + 2i, "( 1.00 +2.00i)"},
{"%+07.2f", -1 - 2i, "(-001.00-002.00i)"},
- // Zero padding does not apply to infinities and NaN.
- {"%020f", math.Inf(-1), " -Inf"},
- {"%020f", math.Inf(+1), " +Inf"},
- {"%020f", math.NaN(), " NaN"},
- {"% 020f", math.Inf(-1), " -Inf"},
- {"% 020f", math.Inf(+1), " Inf"},
- {"% 020f", math.NaN(), " NaN"},
- {"%+020f", math.Inf(-1), " -Inf"},
- {"%+020f", math.Inf(+1), " +Inf"},
- {"%+020f", math.NaN(), " +NaN"},
- {"%-020f", math.Inf(-1), "-Inf "},
- {"%-020f", math.Inf(+1), "+Inf "},
- {"%-020f", math.NaN(), "NaN "},
+
{"%20f", -1.0, " -1.000000"},
// Make sure we can handle very large widths.
{"%0100f", -1.0, zeroFill("-", 99, "1.000000")},
{"%0-5s", "abc", "abc "},
{"%-05.1f", 1.0, "1.0 "},
+ // float and complex formatting should not change the padding width
+ // for other elements. See issue 14642.
+ {"%06v", []interface{}{+10.0, 10}, "[000010 000010]"},
+ {"%06v", []interface{}{-10.0, 10}, "[-00010 000010]"},
+ {"%06v", []interface{}{+10.0 + 10i, 10}, "[(000010+00010i) 000010]"},
+ {"%06v", []interface{}{-10.0 + 10i, 10}, "[(-00010+00010i) 000010]"},
+
// Complex fmt used to leave the plus flag set for future entries in the array
// causing +2+0i and +3+0i instead of 2+0i and 3+0i.
{"%v", []complex64{1, 2, 3}, "[(1+0i) (2+0i) (3+0i)]"},
// invalid reflect.Value doesn't crash.
{"%v", reflect.Value{}, "<invalid reflect.Value>"},
+
+ // Tests to check that not supported verbs generate an error string.
+ {"%☠", nil, "%!☠(<nil>)"},
+ {"%☠", interface{}(nil), "%!☠(<nil>)"},
+ {"%☠", []byte{0}, "%!☠([]uint8=[0])"},
+ {"%☠", []uint8{0}, "%!☠([]uint8=[0])"},
+ {"%☠", [1]byte{0}, "%!☠([1]uint8=[0])"},
+ {"%☠", [1]uint8{0}, "%!☠([1]uint8=[0])"},
+ {"%☠", 1.2345678, "%!☠(float64=1.2345678)"},
+ {"%☠", float32(1.2345678), "%!☠(float32=1.2345678)"},
+ {"%☠", 1.2345678 + 1.2345678i, "%!☠(complex128=(1.2345678+1.2345678i))"},
+ {"%☠", complex64(1.2345678 + 1.2345678i), "%!☠(complex64=(1.2345678+1.2345678i))"},
}
// zeroFill generates zero-filled strings of the specified width. The length
// thing as if done by hand with two singleton prints.
func TestComplexFormatting(t *testing.T) {
var yesNo = []bool{true, false}
- var values = []float64{1, 0, -1, math.Inf(1), math.Inf(-1), math.NaN()}
+ var values = []float64{1, 0, -1, posInf, negInf, NaN}
for _, plus := range yesNo {
for _, zero := range yesNo {
for _, space := range yesNo {
})
}
+func BenchmarkSprintfTruncateString(b *testing.B) {
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ Sprintf("%.3s", "日本語日本語日本語")
+ }
+ })
+}
+
+func BenchmarkSprintfQuoteString(b *testing.B) {
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ Sprintf("%q", "日本語日本語日本語")
+ }
+ })
+}
+
func BenchmarkSprintfInt(b *testing.B) {
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
}
})
}
+
+func BenchmarkSprintfComplex(b *testing.B) {
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ Sprintf("%f", 5.23184+5.23184i)
+ }
+ })
+}
+
func BenchmarkSprintfBoolean(b *testing.B) {
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
})
}
+func BenchmarkSprintfHexString(b *testing.B) {
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ Sprintf("% #x", "0123456789abcdef")
+ }
+ })
+}
+
+func BenchmarkSprintfHexBytes(b *testing.B) {
+ data := []byte("0123456789abcdef")
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ Sprintf("% #x", data)
+ }
+ })
+}
+
+func BenchmarkSprintfBytes(b *testing.B) {
+ data := []byte("0123456789abcdef")
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ Sprintf("%v", data)
+ }
+ })
+}
+
func BenchmarkManyArgs(b *testing.B) {
b.RunParallel(func(pb *testing.PB) {
var buf bytes.Buffer
// Hex can add 0x and we handle it specially.
nByte = 65
- ldigits = "0123456789abcdef"
- udigits = "0123456789ABCDEF"
+ ldigits = "0123456789abcdefx"
+ udigits = "0123456789ABCDEFX"
)
const (
buf[i] = '0'
}
case 16:
+ // Add a leading 0x or 0X.
i--
- buf[i] = 'x' + digits[10] - 'a'
+ buf[i] = digits[16]
i--
buf[i] = '0'
}
// truncate truncates the string to the specified precision, if present.
func (f *fmt) truncate(s string) string {
- if f.precPresent && f.prec < utf8.RuneCountInString(s) {
+ if f.precPresent {
n := f.prec
for i := range s {
- if n == 0 {
- s = s[:i]
- break
- }
n--
+ if n < 0 {
+ return s[:i]
+ }
}
}
return s
// fmt_sbx formats a string or byte slice as a hexadecimal encoding of its bytes.
func (f *fmt) fmt_sbx(s string, b []byte, digits string) {
- n := len(b)
+ length := len(b)
if b == nil {
- n = len(s)
+ // No byte slice present. Assume string s should be encoded.
+ length = len(s)
+ }
+ // Set length to not process more bytes than the precision demands.
+ if f.precPresent && f.prec < length {
+ length = f.prec
+ }
+ // Compute width of the encoding taking into account the f.sharp and f.space flag.
+ width := 2 * length
+ if width > 0 {
+ if f.space {
+ // Each element encoded by two hexadecimals will get a leading 0x or 0X.
+ if f.sharp {
+ width *= 2
+ }
+ // Elements will be separated by a space.
+ width += length - 1
+ } else if f.sharp {
+ // Only a leading 0x or 0X will be added for the whole string.
+ width += 2
+ }
+ } else { // The byte slice or string that should be encoded is empty.
+ if f.widPresent {
+ f.writePadding(f.wid)
+ }
+ return
+ }
+ // Handle padding to the left.
+ if f.widPresent && f.wid > width && !f.minus {
+ f.writePadding(f.wid - width)
+ }
+ // Write the encoding directly into the output buffer.
+ buf := *f.buf
+ if f.sharp {
+ // Add leading 0x or 0X.
+ buf = append(buf, '0', digits[16])
}
- x := digits[10] - 'a' + 'x'
- // TODO: Avoid buffer by pre-padding.
- var buf []byte
- for i := 0; i < n; i++ {
- if i > 0 && f.space {
+ var c byte
+ for i := 0; i < length; i++ {
+ if f.space && i > 0 {
+ // Separate elements with a space.
buf = append(buf, ' ')
+ if f.sharp {
+ // Add leading 0x or 0X for each element.
+ buf = append(buf, '0', digits[16])
+ }
}
- if f.sharp && (f.space || i == 0) {
- buf = append(buf, '0', x)
- }
- var c byte
- if b == nil {
- c = s[i]
+ if b != nil {
+ c = b[i] // Take a byte from the input byte slice.
} else {
- c = b[i]
+ c = s[i] // Take a byte from the input string.
}
+ // Encode each byte as two hexadecimal digits.
buf = append(buf, digits[c>>4], digits[c&0xF])
}
- f.pad(buf)
+ *f.buf = buf
+ // Handle padding to the right.
+ if f.widPresent && f.wid > width && f.minus {
+ f.writePadding(f.wid - width)
+ }
}
// fmt_sx formats a string as a hexadecimal encoding of its bytes.
func (f *fmt) fmt_sx(s, digits string) {
- if f.precPresent && f.prec < len(s) {
- s = s[:f.prec]
- }
f.fmt_sbx(s, nil, digits)
}
// fmt_bx formats a byte slice as a hexadecimal encoding of its bytes.
func (f *fmt) fmt_bx(b []byte, digits string) {
- if f.precPresent && f.prec < len(b) {
- b = b[:f.prec]
- }
f.fmt_sbx("", b, digits)
}
// fmt_q formats a string as a double-quoted, escaped Go string constant.
+// If f.sharp is set a raw (backquoted) string may be returned instead
+// if the string does not contain any control characters other than tab.
func (f *fmt) fmt_q(s string) {
s = f.truncate(s)
- var quoted string
if f.sharp && strconv.CanBackquote(s) {
- quoted = "`" + s + "`"
+ f.padString("`" + s + "`")
+ return
+ }
+ buf := f.intbuf[:0]
+ if f.plus {
+ f.pad(strconv.AppendQuoteToASCII(buf, s))
} else {
- if f.plus {
- quoted = strconv.QuoteToASCII(s)
- } else {
- quoted = strconv.Quote(s)
- }
+ f.pad(strconv.AppendQuote(buf, s))
}
- f.padString(quoted)
}
// fmt_qc formats the integer as a single-quoted, escaped Go character constant.
// If the character is not valid Unicode, it will print '\ufffd'.
func (f *fmt) fmt_qc(c int64) {
- var quoted []byte
+ buf := f.intbuf[:0]
if f.plus {
- quoted = strconv.AppendQuoteRuneToASCII(f.intbuf[0:0], rune(c))
+ f.pad(strconv.AppendQuoteRuneToASCII(buf, rune(c)))
} else {
- quoted = strconv.AppendQuoteRune(f.intbuf[0:0], rune(c))
+ f.pad(strconv.AppendQuoteRune(buf, rune(c)))
}
- f.pad(quoted)
}
-// floating-point
-
-func doPrec(f *fmt, def int) int {
+// fmt_float formats a float64. It assumes that verb is a valid format specifier
+// for strconv.AppendFloat and therefore fits into a byte.
+func (f *fmt) fmt_float(v float64, size int, verb rune, prec int) {
+ // Explicit precision in format specifier overrules default precision.
if f.precPresent {
- return f.prec
+ prec = f.prec
}
- return def
-}
-
-// formatFloat formats a float64; it is an efficient equivalent to f.pad(strconv.FormatFloat()...).
-func (f *fmt) formatFloat(v float64, verb byte, prec, n int) {
// Format number, reserving space for leading + sign if needed.
- num := strconv.AppendFloat(f.intbuf[:1], v, verb, prec, n)
+ num := strconv.AppendFloat(f.intbuf[:1], v, byte(verb), prec, size)
if num[1] == '-' || num[1] == '+' {
num = num[1:]
} else {
if f.zero && f.widPresent && f.wid > len(num) {
f.buf.WriteByte(num[0])
f.wid--
- num = num[1:]
+ f.pad(num[1:])
+ f.wid++
+ return
}
f.pad(num)
return
// No sign to show and the number is positive; just print the unsigned number.
f.pad(num[1:])
}
-
-// fmt_e64 formats a float64 in the form -1.23e+12.
-func (f *fmt) fmt_e64(v float64) { f.formatFloat(v, 'e', doPrec(f, 6), 64) }
-
-// fmt_E64 formats a float64 in the form -1.23E+12.
-func (f *fmt) fmt_E64(v float64) { f.formatFloat(v, 'E', doPrec(f, 6), 64) }
-
-// fmt_f64 formats a float64 in the form -1.23.
-func (f *fmt) fmt_f64(v float64) { f.formatFloat(v, 'f', doPrec(f, 6), 64) }
-
-// fmt_g64 formats a float64 in the 'f' or 'e' form according to size.
-func (f *fmt) fmt_g64(v float64) { f.formatFloat(v, 'g', doPrec(f, -1), 64) }
-
-// fmt_G64 formats a float64 in the 'f' or 'E' form according to size.
-func (f *fmt) fmt_G64(v float64) { f.formatFloat(v, 'G', doPrec(f, -1), 64) }
-
-// fmt_fb64 formats a float64 in the form -123p3 (exponent is power of 2).
-func (f *fmt) fmt_fb64(v float64) { f.formatFloat(v, 'b', 0, 64) }
-
-// float32
-// cannot defer to float64 versions
-// because it will get rounding wrong in corner cases.
-
-// fmt_e32 formats a float32 in the form -1.23e+12.
-func (f *fmt) fmt_e32(v float32) { f.formatFloat(float64(v), 'e', doPrec(f, 6), 32) }
-
-// fmt_E32 formats a float32 in the form -1.23E+12.
-func (f *fmt) fmt_E32(v float32) { f.formatFloat(float64(v), 'E', doPrec(f, 6), 32) }
-
-// fmt_f32 formats a float32 in the form -1.23.
-func (f *fmt) fmt_f32(v float32) { f.formatFloat(float64(v), 'f', doPrec(f, 6), 32) }
-
-// fmt_g32 formats a float32 in the 'f' or 'e' form according to size.
-func (f *fmt) fmt_g32(v float32) { f.formatFloat(float64(v), 'g', doPrec(f, -1), 32) }
-
-// fmt_G32 formats a float32 in the 'f' or 'E' form according to size.
-func (f *fmt) fmt_G32(v float32) { f.formatFloat(float64(v), 'G', doPrec(f, -1), 32) }
-
-// fmt_fb32 formats a float32 in the form -123p3 (exponent is power of 2).
-func (f *fmt) fmt_fb32(v float32) { f.formatFloat(float64(v), 'b', 0, 32) }
-
-// fmt_c64 formats a complex64 according to the verb.
-func (f *fmt) fmt_c64(v complex64, verb rune) {
- f.fmt_complex(float64(real(v)), float64(imag(v)), 32, verb)
-}
-
-// fmt_c128 formats a complex128 according to the verb.
-func (f *fmt) fmt_c128(v complex128, verb rune) {
- f.fmt_complex(real(v), imag(v), 64, verb)
-}
-
-// fmt_complex formats a complex number as (r+ji).
-func (f *fmt) fmt_complex(r, j float64, size int, verb rune) {
- f.buf.WriteByte('(')
- oldPlus := f.plus
- oldSpace := f.space
- oldWid := f.wid
- for i := 0; ; i++ {
- switch verb {
- case 'b':
- f.formatFloat(r, 'b', 0, size)
- case 'e':
- f.formatFloat(r, 'e', doPrec(f, 6), size)
- case 'E':
- f.formatFloat(r, 'E', doPrec(f, 6), size)
- case 'f', 'F':
- f.formatFloat(r, 'f', doPrec(f, 6), size)
- case 'g':
- f.formatFloat(r, 'g', doPrec(f, -1), size)
- case 'G':
- f.formatFloat(r, 'G', doPrec(f, -1), size)
- }
- if i != 0 {
- break
- }
- // Imaginary part always has a sign.
- f.plus = true
- f.space = false
- f.wid = oldWid
- r = j
- }
- f.space = oldSpace
- f.plus = oldPlus
- f.wid = oldWid
- f.buf.WriteString("i)")
-}
badIndexString = "(BADINDEX)"
panicString = "(PANIC="
extraString = "%!(EXTRA "
- bytesString = "[]byte{"
+ bytesString = "[]byte"
badWidthString = "%!(BADWIDTH)"
badPrecString = "%!(BADPREC)"
noVerbString = "%!(NOVERB)"
}
}
-func (p *pp) fmtFloat32(v float32, verb rune) {
+// fmtFloat formats a float. The default precision for each verb
+// is specified as last argument in the call to fmt_float.
+func (p *pp) fmtFloat(v float64, size int, verb rune) {
switch verb {
- case 'b':
- p.fmt.fmt_fb32(v)
- case 'e':
- p.fmt.fmt_e32(v)
- case 'E':
- p.fmt.fmt_E32(v)
- case 'f', 'F':
- p.fmt.fmt_f32(v)
- case 'g', 'v':
- p.fmt.fmt_g32(v)
- case 'G':
- p.fmt.fmt_G32(v)
- default:
- p.badVerb(verb)
- }
-}
-
-func (p *pp) fmtFloat64(v float64, verb rune) {
- switch verb {
- case 'b':
- p.fmt.fmt_fb64(v)
- case 'e':
- p.fmt.fmt_e64(v)
- case 'E':
- p.fmt.fmt_E64(v)
- case 'f', 'F':
- p.fmt.fmt_f64(v)
- case 'g', 'v':
- p.fmt.fmt_g64(v)
- case 'G':
- p.fmt.fmt_G64(v)
- default:
- p.badVerb(verb)
- }
-}
-
-func (p *pp) fmtComplex64(v complex64, verb rune) {
- switch verb {
- case 'b', 'e', 'E', 'f', 'F', 'g', 'G':
- p.fmt.fmt_c64(v, verb)
case 'v':
- p.fmt.fmt_c64(v, 'g')
+ p.fmt.fmt_float(v, size, 'g', -1)
+ case 'b', 'g', 'G':
+ p.fmt.fmt_float(v, size, verb, -1)
+ case 'f', 'e', 'E':
+ p.fmt.fmt_float(v, size, verb, 6)
+ case 'F':
+ p.fmt.fmt_float(v, size, 'f', 6)
default:
p.badVerb(verb)
}
}
-func (p *pp) fmtComplex128(v complex128, verb rune) {
+// fmtComplex formats a complex number v with
+// r = real(v) and j = imag(v) as (r+ji) using
+// fmtFloat for r and j formatting.
+func (p *pp) fmtComplex(v complex128, size int, verb rune) {
+ // Make sure any unsupported verbs are found before the
+ // calls to fmtFloat to not generate an incorrect error string.
switch verb {
- case 'b', 'e', 'E', 'f', 'F', 'g', 'G':
- p.fmt.fmt_c128(v, verb)
- case 'v':
- p.fmt.fmt_c128(v, 'g')
+ case 'v', 'b', 'g', 'G', 'f', 'F', 'e', 'E':
+ oldPlus := p.fmt.plus
+ p.buf.WriteByte('(')
+ p.fmtFloat(real(v), size/2, verb)
+ // Imaginary part always has a sign.
+ p.fmt.plus = true
+ p.fmtFloat(imag(v), size/2, verb)
+ p.buf.WriteString("i)")
+ p.fmt.plus = oldPlus
default:
p.badVerb(verb)
}
}
}
-func (p *pp) fmtBytes(v []byte, verb rune, typ reflect.Type, depth int) {
- if verb == 'v' || verb == 'd' {
+func (p *pp) fmtBytes(v []byte, verb rune, typeString string) {
+ switch verb {
+ case 'v', 'd':
if p.fmt.sharpV {
+ p.buf.WriteString(typeString)
if v == nil {
- if typ == nil {
- p.buf.WriteString("[]byte(nil)")
- } else {
- p.buf.WriteString(typ.String())
- p.buf.WriteString(nilParenString)
- }
+ p.buf.WriteString(nilParenString)
return
}
- if typ == nil {
- p.buf.WriteString(bytesString)
- } else {
- p.buf.WriteString(typ.String())
- p.buf.WriteByte('{')
+ p.buf.WriteByte('{')
+ for i, c := range v {
+ if i > 0 {
+ p.buf.WriteString(commaSpaceString)
+ }
+ p.fmt0x64(uint64(c), true)
}
+ p.buf.WriteByte('}')
} else {
p.buf.WriteByte('[')
- }
- for i, c := range v {
- if i > 0 {
- if p.fmt.sharpV {
- p.buf.WriteString(commaSpaceString)
- } else {
+ for i, c := range v {
+ if i > 0 {
p.buf.WriteByte(' ')
}
+ p.fmt.integer(int64(c), 10, unsigned, ldigits)
}
- p.printArg(c, 'v', depth+1)
- }
- if p.fmt.sharpV {
- p.buf.WriteByte('}')
- } else {
p.buf.WriteByte(']')
}
- return
- }
- switch verb {
case 's':
p.fmt.fmt_s(string(v))
case 'x':
case bool:
p.fmtBool(f, verb)
case float32:
- p.fmtFloat32(f, verb)
+ p.fmtFloat(float64(f), 32, verb)
case float64:
- p.fmtFloat64(f, verb)
+ p.fmtFloat(f, 64, verb)
case complex64:
- p.fmtComplex64(f, verb)
+ p.fmtComplex(complex128(f), 64, verb)
case complex128:
- p.fmtComplex128(f, verb)
+ p.fmtComplex(f, 128, verb)
case int:
p.fmtInt64(int64(f), verb)
case int8:
case string:
p.fmtString(f, verb)
case []byte:
- p.fmtBytes(f, verb, nil, depth)
+ p.fmtBytes(f, verb, bytesString)
case reflect.Value:
p.printReflectValue(f, verb, depth)
return
p.fmtInt64(f.Int(), verb)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
p.fmtUint64(f.Uint(), verb)
- case reflect.Float32, reflect.Float64:
- if f.Type().Size() == 4 {
- p.fmtFloat32(float32(f.Float()), verb)
- } else {
- p.fmtFloat64(f.Float(), verb)
- }
- case reflect.Complex64, reflect.Complex128:
- if f.Type().Size() == 8 {
- p.fmtComplex64(complex64(f.Complex()), verb)
- } else {
- p.fmtComplex128(f.Complex(), verb)
- }
+ case reflect.Float32:
+ p.fmtFloat(f.Float(), 32, verb)
+ case reflect.Float64:
+ p.fmtFloat(f.Float(), 64, verb)
+ case reflect.Complex64:
+ p.fmtComplex(f.Complex(), 64, verb)
+ case reflect.Complex128:
+ p.fmtComplex(f.Complex(), 128, verb)
case reflect.String:
p.fmtString(f.String(), verb)
case reflect.Map:
bytes[i] = byte(f.Index(i).Uint())
}
}
- p.fmtBytes(bytes, verb, typ, depth)
+ p.fmtBytes(bytes, verb, typ.String())
break
}
if p.fmt.sharpV {
p.buf.WriteString(value.Type().String())
if f.Kind() == reflect.Slice && f.IsNil() {
- p.buf.WriteString("(nil)")
+ p.buf.WriteString(nilParenString)
break
}
p.buf.WriteByte('{')
}
// Comments returns the list of comment groups in the comment map.
-// The result is sorted is source order.
+// The result is sorted in source order.
//
func (cmap CommentMap) Comments() []*CommentGroup {
list := make([]*CommentGroup, 0, len(cmap))
// in all releases >= Go 1.x. Code that requires Go 1.x or later should
// say "+build go1.x", and code that should only be built before Go 1.x
// (perhaps it is the stub to use in that case) should say "+build !go1.x".
- c.ReleaseTags = []string{"go1.1", "go1.2", "go1.3", "go1.4", "go1.5", "go1.6"}
+ c.ReleaseTags = []string{"go1.1", "go1.2", "go1.3", "go1.4", "go1.5", "go1.6", "go1.7"}
switch os.Getenv("CGO_ENABLED") {
case "1":
// - "go1.4", from Go version 1.4 onward
// - "go1.5", from Go version 1.5 onward
// - "go1.6", from Go version 1.6 onward
+// - "go1.7", from Go version 1.7 onward
// - any additional words listed in ctxt.BuildTags
//
// If a file's name, after stripping the extension and a possible _test suffix,
Play *ast.File // a whole program version of the example
Comments []*ast.CommentGroup
Output string // expected output
- EmptyOutput bool // expect empty output
- Order int // original source code order
+ Unordered bool
+ EmptyOutput bool // expect empty output
+ Order int // original source code order
}
// Examples returns the examples found in the files, sorted by Name field.
if f.Doc != nil {
doc = f.Doc.Text()
}
- output, hasOutput := exampleOutput(f.Body, file.Comments)
+ output, unordered, hasOutput := exampleOutput(f.Body, file.Comments)
flist = append(flist, &Example{
Name: name[len("Example"):],
Doc: doc,
Play: playExample(file, f.Body),
Comments: file.Comments,
Output: output,
+ Unordered: unordered,
EmptyOutput: output == "" && hasOutput,
Order: len(flist),
})
return list
}
-var outputPrefix = regexp.MustCompile(`(?i)^[[:space:]]*output:`)
+var outputPrefix = regexp.MustCompile(`(?i)^[[:space:]]*(unordered )?output:`)
// Extracts the expected output and whether there was a valid output comment
-func exampleOutput(b *ast.BlockStmt, comments []*ast.CommentGroup) (output string, ok bool) {
+func exampleOutput(b *ast.BlockStmt, comments []*ast.CommentGroup) (output string, unordered, ok bool) {
if _, last := lastComment(b, comments); last != nil {
// test that it begins with the correct prefix
text := last.Text()
- if loc := outputPrefix.FindStringIndex(text); loc != nil {
+ if loc := outputPrefix.FindStringSubmatchIndex(text); loc != nil {
+ if loc[2] != -1 {
+ unordered = true
+ }
text = text[loc[1]:]
// Strip zero or more spaces followed by \n or a single space.
text = strings.TrimLeft(text, " ")
if len(text) > 0 && text[0] == '\n' {
text = text[1:]
}
- return text, true
+ return text, unordered, true
}
}
- return "", false // no suitable comment found
+ return "", false, false // no suitable comment found
}
// isTest tells whether name looks like a test, example, or benchmark.
}
}
- // Strip "Output:" comment and adjust body end position.
+ // Strip the "Output:" or "Unordered output:" comment and adjust body
+ // end position.
body, comments = stripOutputComment(body, comments)
// Synthesize import declaration.
return &f
}
-// stripOutputComment finds and removes an "Output:" comment from body
-// and comments, and adjusts the body block's end position.
+// stripOutputComment finds and removes the "Output:" or "Unordered output:"
+// comment from body and comments, and adjusts the body block's end position.
func stripOutputComment(body *ast.BlockStmt, comments []*ast.CommentGroup) (*ast.BlockStmt, []*ast.CommentGroup) {
- // Do nothing if no "Output:" comment found.
+ // Do nothing if there is no "Output:" or "Unordered output:" comment.
i, last := lastComment(body, comments)
if last == nil || !outputPrefix.MatchString(last.Text()) {
return body, comments
check.invalidAST(s.TokPos, "unknown inc/dec operation %s", s.Tok)
return
}
+
var x operand
+ check.expr(&x, s.X)
+ if x.mode == invalid {
+ return
+ }
+ if !isNumeric(x.typ) {
+ check.invalidOp(s.X.Pos(), "%s%s (non-numeric type %s)", s.X, s.Tok, x.typ)
+ return
+ }
+
Y := &ast.BasicLit{ValuePos: s.X.Pos(), Kind: token.INT, Value: "1"} // use x's position
check.binary(&x, nil, s.X, Y, op)
if x.mode == invalid {
const c = 3.14
c /* ERROR "cannot assign" */ ++
s := "foo"
- s /* ERROR "cannot convert" */ --
+ s /* ERROR "invalid operation" */ --
3.14 /* ERROR "cannot assign" */ ++
var (
x int
// The size of a CRC-32 checksum in bytes.
const Size = 4
+// Use "slice by 8" when payload >= this value.
+const sliceBy8Cutoff = 16
+
// Predefined polynomials.
const (
// IEEE is by far and away the most common CRC-32 polynomial.
// Castagnoli table so we can compare against it to find when the caller is
// using this polynomial.
var castagnoliTable *Table
+var castagnoliTable8 *slicing8Table
var castagnoliOnce sync.Once
func castagnoliInit() {
castagnoliTable = makeTable(Castagnoli)
+ castagnoliTable8 = makeTable8(Castagnoli)
}
// IEEETable is the table for the IEEE polynomial.
p = p[8:]
}
crc = ^crc
+ if len(p) == 0 {
+ return crc
+ }
return update(crc, &tab[0], p)
}
// ChecksumIEEE returns the CRC-32 checksum of data
// using the IEEE polynomial.
-func ChecksumIEEE(data []byte) uint32 { return Update(0, IEEETable, data) }
+func ChecksumIEEE(data []byte) uint32 { return updateIEEE(0, data) }
if sse42 {
return castagnoliSSE42(crc, p)
}
+ // Use slicing-by-8 on larger inputs.
+ if len(p) >= sliceBy8Cutoff {
+ return updateSlicingBy8(crc, castagnoliTable8, p)
+ }
return update(crc, castagnoliTable, p)
}
return crc
}
- // only use slicing-by-8 when input is >= 4KB
- if len(p) >= 4096 {
+ // Use slicing-by-8 on larger inputs.
+ if len(p) >= sliceBy8Cutoff {
ieeeTable8Once.Do(func() {
ieeeTable8 = makeTable8(IEEE)
})
if sse42 {
return castagnoliSSE42(crc, p)
}
+ // Use slicing-by-8 on larger inputs.
+ if len(p) >= sliceBy8Cutoff {
+ return updateSlicingBy8(crc, castagnoliTable8, p)
+ }
return update(crc, castagnoliTable, p)
}
func updateIEEE(crc uint32, p []byte) uint32 {
- // only use slicing-by-8 when input is >= 4KB
- if len(p) >= 4096 {
+ // Use slicing-by-8 on larger inputs.
+ if len(p) >= sliceBy8Cutoff {
ieeeTable8Once.Do(func() {
ieeeTable8 = makeTable8(IEEE)
})
package crc32
-// The file contains the generic version of updateCastagnoli which just calls
-// the software implementation.
+// This file contains the generic version of updateCastagnoli which does
+// slicing-by-8, or uses the fallback for very small sizes.
func updateCastagnoli(crc uint32, p []byte) uint32 {
+ // Use slicing-by-8 on larger inputs.
+ if len(p) >= sliceBy8Cutoff {
+ return updateSlicingBy8(crc, castagnoliTable8, p)
+ }
return update(crc, castagnoliTable, p)
}
func updateIEEE(crc uint32, p []byte) uint32 {
- // only use slicing-by-8 when input is >= 4KB
- if len(p) >= 4096 {
+ // Use slicing-by-8 on larger inputs.
+ if len(p) >= sliceBy8Cutoff {
ieeeTable8Once.Do(func() {
ieeeTable8 = makeTable8(IEEE)
})
package crc32
import (
+ "hash"
"io"
"testing"
)
}
}
-func BenchmarkIEEECrc1KB(b *testing.B) {
- b.SetBytes(1024)
- data := make([]byte, 1024)
- for i := range data {
- data[i] = byte(i)
- }
- h := NewIEEE()
- in := make([]byte, 0, h.Size())
+func BenchmarkIEEECrc40B(b *testing.B) {
+ benchmark(b, NewIEEE(), 40)
+}
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- h.Reset()
- h.Write(data)
- h.Sum(in)
- }
+func BenchmarkIEEECrc1KB(b *testing.B) {
+ benchmark(b, NewIEEE(), 1<<10)
}
func BenchmarkIEEECrc4KB(b *testing.B) {
- b.SetBytes(4096)
- data := make([]byte, 4096)
- for i := range data {
- data[i] = byte(i)
- }
- h := NewIEEE()
- in := make([]byte, 0, h.Size())
+ benchmark(b, NewIEEE(), 4<<10)
+}
- b.ResetTimer()
- for i := 0; i < b.N; i++ {
- h.Reset()
- h.Write(data)
- h.Sum(in)
- }
+func BenchmarkIEEECrc32KB(b *testing.B) {
+ benchmark(b, NewIEEE(), 32<<10)
+}
+
+func BenchmarkCastagnoliCrc40B(b *testing.B) {
+ benchmark(b, New(MakeTable(Castagnoli)), 40)
}
func BenchmarkCastagnoliCrc1KB(b *testing.B) {
- b.SetBytes(1024)
- data := make([]byte, 1024)
+ benchmark(b, New(MakeTable(Castagnoli)), 1<<10)
+}
+
+func BenchmarkCastagnoliCrc4KB(b *testing.B) {
+ benchmark(b, New(MakeTable(Castagnoli)), 4<<10)
+}
+
+func BenchmarkCastagnoliCrc32KB(b *testing.B) {
+ benchmark(b, New(MakeTable(Castagnoli)), 32<<10)
+}
+
+func benchmark(b *testing.B, h hash.Hash32, n int64) {
+ b.SetBytes(n)
+ data := make([]byte, n)
for i := range data {
data[i] = byte(i)
}
- h := New(MakeTable(Castagnoli))
in := make([]byte, 0, h.Size())
+ // Warm up
+ h.Reset()
+ h.Write(data)
+ h.Sum(in)
+
b.ResetTimer()
for i := 0; i < b.N; i++ {
h.Reset()
fmt.Fprintln(w, "// of continuous tones.")
fmt.Fprintln(w, "//")
fmt.Fprintln(w, "// This palette was used in the Plan 9 Operating System, described at")
- fmt.Fprintln(w, "// http://plan9.bell-labs.com/magic/man2html/6/color")
+ fmt.Fprintln(w, "// https://9p.io/magic/man2html/6/color")
fmt.Fprintln(w, "var Plan9 = []color.Color{")
for _, line := range lines {
fmt.Fprintln(w, line)
// of continuous tones.
//
// This palette was used in the Plan 9 Operating System, described at
-// http://plan9.bell-labs.com/magic/man2html/6/color
+// https://9p.io/magic/man2html/6/color
var Plan9 = []color.Color{
color.RGBA{0x00, 0x00, 0x00, 0xff},
color.RGBA{0x00, 0x00, 0x44, 0xff},
emax = bias // 127 largest unbiased exponent (normal)
)
- // Float mantissa m is 0.5 <= m < 1.0; compute exponent for floatxx mantissa.
+ // Float mantissa m is 0.5 <= m < 1.0; compute exponent for float32 mantissa.
e := x.exp - 1 // exponent for mantissa m with 1.0 <= m < 2.0
p := mbits + 1 // precision of normal float
// If the exponent is too small, we may have a denormal number
- // in which case we have fewer mantissa bits available: reduce
- // precision accordingly.
+ // in which case we have fewer mantissa bits available: recompute
+ // precision.
if e < emin {
- p -= emin - int(e)
+ p = mbits + 1 - emin + int(e)
// Make sure we have at least 1 bit so that we don't
// lose numbers rounded up to the smallest denormal.
if p < 1 {
return 0.0, Below
}
// bexp = 0
- mant = msb32(r.mant) >> (fbits - r.prec)
+ // recompute precision
+ p = mbits + 1 - emin + int(e)
+ mant = msb32(r.mant) >> uint(fbits-p)
} else {
// normal number: emin <= e <= emax
bexp = uint32(e+bias) << mbits
emax = bias // 1023 largest unbiased exponent (normal)
)
- // Float mantissa m is 0.5 <= m < 1.0; compute exponent for floatxx mantissa.
+ // Float mantissa m is 0.5 <= m < 1.0; compute exponent for float64 mantissa.
e := x.exp - 1 // exponent for mantissa m with 1.0 <= m < 2.0
p := mbits + 1 // precision of normal float
// If the exponent is too small, we may have a denormal number
- // in which case we have fewer mantissa bits available: reduce
- // precision accordingly.
+ // in which case we have fewer mantissa bits available: recompute
+ // precision.
if e < emin {
- p -= emin - int(e)
+ p = mbits + 1 - emin + int(e)
// Make sure we have at least 1 bit so that we don't
// lose numbers rounded up to the smallest denormal.
if p < 1 {
return 0.0, Below
}
// bexp = 0
- mant = msb64(r.mant) >> (fbits - r.prec)
+ // recompute precision
+ p = mbits + 1 - emin + int(e)
+ mant = msb64(r.mant) >> uint(fbits-p)
} else {
// normal number: emin <= e <= emax
bexp = uint64(e+bias) << mbits
{"1p-149", math.SmallestNonzeroFloat32, Exact},
{"0x.fffffep-126", math.Float32frombits(0x7fffff), Exact}, // largest denormal
+ // special cases (see issue 14553)
+ {"0x0.bp-149", math.Float32frombits(0x000000000), Below}, // ToNearestEven rounds down (to even)
+ {"0x0.cp-149", math.Float32frombits(0x000000001), Above},
+
+ {"0x1.0p-149", math.Float32frombits(0x000000001), Exact},
+ {"0x1.7p-149", math.Float32frombits(0x000000001), Below},
+ {"0x1.8p-149", math.Float32frombits(0x000000002), Above},
+ {"0x1.9p-149", math.Float32frombits(0x000000002), Above},
+
+ {"0x2.0p-149", math.Float32frombits(0x000000002), Exact},
+ {"0x2.8p-149", math.Float32frombits(0x000000002), Below}, // ToNearestEven rounds down (to even)
+ {"0x2.9p-149", math.Float32frombits(0x000000003), Above},
+
+ {"0x3.0p-149", math.Float32frombits(0x000000003), Exact},
+ {"0x3.7p-149", math.Float32frombits(0x000000003), Below},
+ {"0x3.8p-149", math.Float32frombits(0x000000004), Above}, // ToNearestEven rounds up (to even)
+
+ {"0x4.0p-149", math.Float32frombits(0x000000004), Exact},
+ {"0x4.8p-149", math.Float32frombits(0x000000004), Below}, // ToNearestEven rounds down (to even)
+ {"0x4.9p-149", math.Float32frombits(0x000000005), Above},
+
+ // specific case from issue 14553
+ {"0x7.7p-149", math.Float32frombits(0x000000007), Below},
+ {"0x7.8p-149", math.Float32frombits(0x000000008), Above},
+ {"0x7.9p-149", math.Float32frombits(0x000000008), Above},
+
// normals
{"0x.ffffffp-126", math.Float32frombits(0x00800000), Above}, // rounded up to smallest normal
{"1p-126", math.Float32frombits(0x00800000), Exact}, // smallest normal
{"1p-1074", math.SmallestNonzeroFloat64, Exact},
{"0x.fffffffffffffp-1022", math.Float64frombits(0x000fffffffffffff), Exact}, // largest denormal
+ // special cases (see issue 14553)
+ {"0x0.bp-1074", math.Float64frombits(0x00000000000000000), Below}, // ToNearestEven rounds down (to even)
+ {"0x0.cp-1074", math.Float64frombits(0x00000000000000001), Above},
+
+ {"0x1.0p-1074", math.Float64frombits(0x00000000000000001), Exact},
+ {"0x1.7p-1074", math.Float64frombits(0x00000000000000001), Below},
+ {"0x1.8p-1074", math.Float64frombits(0x00000000000000002), Above},
+ {"0x1.9p-1074", math.Float64frombits(0x00000000000000002), Above},
+
+ {"0x2.0p-1074", math.Float64frombits(0x00000000000000002), Exact},
+ {"0x2.8p-1074", math.Float64frombits(0x00000000000000002), Below}, // ToNearestEven rounds down (to even)
+ {"0x2.9p-1074", math.Float64frombits(0x00000000000000003), Above},
+
+ {"0x3.0p-1074", math.Float64frombits(0x00000000000000003), Exact},
+ {"0x3.7p-1074", math.Float64frombits(0x00000000000000003), Below},
+ {"0x3.8p-1074", math.Float64frombits(0x00000000000000004), Above}, // ToNearestEven rounds up (to even)
+
+ {"0x4.0p-1074", math.Float64frombits(0x00000000000000004), Exact},
+ {"0x4.8p-1074", math.Float64frombits(0x00000000000000004), Below}, // ToNearestEven rounds down (to even)
+ {"0x4.9p-1074", math.Float64frombits(0x00000000000000005), Above},
+
// normals
{"0x.fffffffffffff8p-1022", math.Float64frombits(0x0010000000000000), Above}, // rounded up to smallest normal
{"1p-1022", math.Float64frombits(0x0010000000000000), Exact}, // smallest normal
// Int63n(10) 7 6 3
// Perm [1 4 2 3 0] [4 2 1 3 0] [1 2 4 0 3]
}
+
+func ExamplePerm() {
+ for _, value := range rand.Perm(3) {
+ fmt.Println(value)
+ }
+
+ // Unordered output: 1
+ // 2
+ // 0
+}
ix = q>>1 + uint64(exp-1+bias)<<shift // significand + biased exponent
return Float64frombits(ix)
}
-
-func sqrtC(f float64, r *float64) {
- *r = sqrt(f)
-}
` + textbValue + `
--MyBoundary--
`
+
+func TestReadForm_NoReadAfterEOF(t *testing.T) {
+ maxMemory := int64(32) << 20
+ boundary := `---------------------------8d345eef0d38dc9`
+ body := `
+-----------------------------8d345eef0d38dc9
+Content-Disposition: form-data; name="version"
+
+171
+-----------------------------8d345eef0d38dc9--`
+
+ mr := NewReader(&failOnReadAfterErrorReader{t: t, r: strings.NewReader(body)}, boundary)
+
+ f, err := mr.ReadForm(maxMemory)
+ if err != nil {
+ t.Fatal(err)
+ }
+ t.Logf("Got: %#v", f)
+}
+
+// failOnReadAfterErrorReader is an io.Reader wrapping r.
+// It fails t if any Read is called after a failing Read.
+type failOnReadAfterErrorReader struct {
+ t *testing.T
+ r io.Reader
+ sawErr error
+}
+
+func (r *failOnReadAfterErrorReader) Read(p []byte) (n int, err error) {
+ if r.sawErr != nil {
+ r.t.Fatalf("unexpected Read on Reader after previous read saw error %v", r.sawErr)
+ }
+ n, err = r.r.Read(p)
+ r.sawErr = err
+ return
+}
func NewReader(r io.Reader, boundary string) *Reader {
b := []byte("\r\n--" + boundary + "--")
return &Reader{
- bufReader: bufio.NewReaderSize(r, peekBufferSize),
+ bufReader: bufio.NewReaderSize(&stickyErrorReader{r: r}, peekBufferSize),
nl: b[:2],
nlDashBoundary: b[:len(b)-2],
dashBoundaryDash: b[2:],
}
}
+// stickyErrorReader is an io.Reader which never calls Read on its
+// underlying Reader once an error has been seen. (the io.Reader
+// interface's contract promises nothing about the return values of
+// Read calls after an error, yet this package does do multiple Reads
+// after error)
+type stickyErrorReader struct {
+ r io.Reader
+ err error
+}
+
+func (r *stickyErrorReader) Read(p []byte) (n int, _ error) {
+ if r.err != nil {
+ return 0, r.err
+ }
+ n, r.err = r.r.Read(p)
+ return n, r.err
+}
+
func newPart(mr *Reader) (*Part, error) {
bp := &Part{
Header: make(map[string][]string),
return nil, &OpError{Op: "dial", Net: ctx.network, Source: la, Addr: ra, Err: &AddrError{Err: "unexpected address type", Addr: ctx.address}}
}
if err != nil {
- return nil, err // c is non-nil interface containing nil pointer
+ return nil, &OpError{Op: "dial", Net: ctx.network, Source: la, Addr: ra, Err: err} // c is non-nil interface containing nil pointer
}
return c, nil
}
t.Fatal("unpacking failed")
}
if n != len(buf) {
- t.Error("unpacked different amount than packed: got n = %d, want = %d", n, len(buf))
+ t.Errorf("unpacked different amount than packed: got n = %d, want = %d", n, len(buf))
}
if !reflect.DeepEqual(got, want) {
t.Errorf("got = %+v, want = %+v", got, want)
continue
}
if n != len(buf) {
- t.Error(
+ t.Errorf(
"unpacked different amount than packed for %s: got n = %d, want = %d",
test.in,
n,
case *os.SyscallError:
nestedErr = err.Err
goto third
+ case *os.PathError: // for Plan 9
+ nestedErr = err.Err
+ goto third
}
switch nestedErr {
case errCanceled, errClosing, errMissingAddress:
case *os.SyscallError:
nestedErr = err.Err
goto third
+ case *os.PathError: // for Plan 9
+ nestedErr = err.Err
+ goto third
}
switch nestedErr {
case errClosing, errTimeout:
func NewTestTimeoutHandler(handler Handler, ch <-chan time.Time) Handler {
return &timeoutHandler{
- handler: handler,
- timeout: func() <-chan time.Time { return ch },
- // (no body and nil cancelTimer)
+ handler: handler,
+ testTimeout: ch,
+ // (no body)
}
}
Body *bytes.Buffer // if non-nil, the bytes.Buffer to append written data to
Flushed bool
+ stagingMap http.Header // map that handlers manipulate to set headers
+ trailerMap http.Header // lazily filled when Trailers() is called
+
wroteHeader bool
}
// Header returns the response headers.
func (rw *ResponseRecorder) Header() http.Header {
- m := rw.HeaderMap
+ m := rw.stagingMap
if m == nil {
m = make(http.Header)
- rw.HeaderMap = m
+ rw.stagingMap = m
}
return m
}
str = str[:512]
}
- _, hasType := rw.HeaderMap["Content-Type"]
- hasTE := rw.HeaderMap.Get("Transfer-Encoding") != ""
+ m := rw.Header()
+
+ _, hasType := m["Content-Type"]
+ hasTE := m.Get("Transfer-Encoding") != ""
if !hasType && !hasTE {
if b == nil {
b = []byte(str)
}
- if rw.HeaderMap == nil {
- rw.HeaderMap = make(http.Header)
- }
- rw.HeaderMap.Set("Content-Type", http.DetectContentType(b))
+ m.Set("Content-Type", http.DetectContentType(b))
}
rw.WriteHeader(200)
return len(str), nil
}
-// WriteHeader sets rw.Code.
+// WriteHeader sets rw.Code. After it is called, changing rw.Header
+// will not affect rw.HeaderMap.
func (rw *ResponseRecorder) WriteHeader(code int) {
- if !rw.wroteHeader {
- rw.Code = code
- rw.wroteHeader = true
+ if rw.wroteHeader {
+ return
+ }
+ rw.Code = code
+ rw.wroteHeader = true
+ if rw.HeaderMap == nil {
+ rw.HeaderMap = make(http.Header)
+ }
+ for k, vv := range rw.stagingMap {
+ vv2 := make([]string, len(vv))
+ copy(vv2, vv)
+ rw.HeaderMap[k] = vv2
}
}
}
rw.Flushed = true
}
+
+// Trailers returns any trailers set by the handler. It must be called
+// after the handler finished running.
+func (rw *ResponseRecorder) Trailers() http.Header {
+ if rw.trailerMap != nil {
+ return rw.trailerMap
+ }
+ trailers, ok := rw.HeaderMap["Trailer"]
+ if !ok {
+ rw.trailerMap = make(http.Header)
+ return rw.trailerMap
+ }
+ rw.trailerMap = make(http.Header, len(trailers))
+ for _, k := range trailers {
+ switch k {
+ case "Transfer-Encoding", "Content-Length", "Trailer":
+ // Ignore since forbidden by RFC 2616 14.40.
+ continue
+ }
+ k = http.CanonicalHeaderKey(k)
+ vv, ok := rw.stagingMap[k]
+ if !ok {
+ continue
+ }
+ vv2 := make([]string, len(vv))
+ copy(vv2, vv)
+ rw.trailerMap[k] = vv2
+ }
+ return rw.trailerMap
+}
return nil
}
}
+ hasNotHeaders := func(keys ...string) checkFunc {
+ return func(rec *ResponseRecorder) error {
+ for _, k := range keys {
+ _, ok := rec.HeaderMap[http.CanonicalHeaderKey(k)]
+ if ok {
+ return fmt.Errorf("unexpected header %s", k)
+ }
+ }
+ return nil
+ }
+ }
+ hasTrailer := func(key, want string) checkFunc {
+ return func(rec *ResponseRecorder) error {
+ if got := rec.Trailers().Get(key); got != want {
+ return fmt.Errorf("trailer %s = %q; want %q", key, got, want)
+ }
+ return nil
+ }
+ }
+ hasNotTrailers := func(keys ...string) checkFunc {
+ return func(rec *ResponseRecorder) error {
+ trailers := rec.Trailers()
+ for _, k := range keys {
+ _, ok := trailers[http.CanonicalHeaderKey(k)]
+ if ok {
+ return fmt.Errorf("unexpected trailer %s", k)
+ }
+ }
+ return nil
+ }
+ }
tests := []struct {
name string
},
check(hasHeader("Content-Type", "text/html; charset=utf-8")),
},
+ {
+ "Header is not changed after write",
+ func(w http.ResponseWriter, r *http.Request) {
+ hdr := w.Header()
+ hdr.Set("Key", "correct")
+ w.WriteHeader(200)
+ hdr.Set("Key", "incorrect")
+ },
+ check(hasHeader("Key", "correct")),
+ },
+ {
+ "Trailer headers are correctly recorded",
+ func(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Non-Trailer", "correct")
+ w.Header().Set("Trailer", "Trailer-A")
+ w.Header().Add("Trailer", "Trailer-B")
+ w.Header().Add("Trailer", "Trailer-C")
+ io.WriteString(w, "<html>")
+ w.Header().Set("Non-Trailer", "incorrect")
+ w.Header().Set("Trailer-A", "valuea")
+ w.Header().Set("Trailer-C", "valuec")
+ w.Header().Set("Trailer-NotDeclared", "should be omitted")
+ },
+ check(
+ hasStatus(200),
+ hasHeader("Content-Type", "text/html; charset=utf-8"),
+ hasHeader("Non-Trailer", "correct"),
+ hasNotHeaders("Trailer-A", "Trailer-B", "Trailer-C", "Trailer-NotDeclared"),
+ hasTrailer("Trailer-A", "valuea"),
+ hasTrailer("Trailer-C", "valuec"),
+ hasNotTrailers("Non-Trailer", "Trailer-B", "Trailer-NotDeclared"),
+ ),
+ },
}
r, _ := http.NewRequest("GET", "http://foo.com/", nil)
for _, tt := range tests {
res, err := http.Get(s.URL)
if err == nil {
res.Body.Close()
- t.Fatal("Unexpected response: %#v", res)
+ t.Fatalf("Unexpected response: %#v", res)
}
}
}
}
+// Issue 14568.
+func TestTimeoutHandlerStartTimerWhenServing(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping sleeping test in -short mode")
+ }
+ defer afterTest(t)
+ var handler HandlerFunc = func(w ResponseWriter, _ *Request) {
+ w.WriteHeader(StatusNoContent)
+ }
+ timeout := 300 * time.Millisecond
+ ts := httptest.NewServer(TimeoutHandler(handler, timeout, ""))
+ defer ts.Close()
+ // Issue was caused by the timeout handler starting the timer when
+ // was created, not when the request. So wait for more than the timeout
+ // to ensure that's not the case.
+ time.Sleep(2 * timeout)
+ res, err := Get(ts.URL)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer res.Body.Close()
+ if res.StatusCode != StatusNoContent {
+ t.Errorf("got res.StatusCode %d, want %v", res.StatusCode, StatusNoContent)
+ }
+}
+
// Verifies we don't path.Clean() on the wrong parts in redirects.
func TestRedirectMunging(t *testing.T) {
req, _ := NewRequest("GET", "http://example.com/", nil)
// TimeoutHandler buffers all Handler writes to memory and does not
// support the Hijacker or Flusher interfaces.
func TimeoutHandler(h Handler, dt time.Duration, msg string) Handler {
- t := time.NewTimer(dt)
return &timeoutHandler{
handler: h,
body: msg,
-
- // Effectively storing a *time.Timer, but decomposed
- // for testing:
- timeout: func() <-chan time.Time { return t.C },
- cancelTimer: t.Stop,
+ dt: dt,
}
}
type timeoutHandler struct {
handler Handler
body string
+ dt time.Duration
- // timeout returns the channel of a *time.Timer and
- // cancelTimer cancels it. They're stored separately for
- // testing purposes.
- timeout func() <-chan time.Time // returns channel producing a timeout
- cancelTimer func() bool // optional
+ // When set, no timer will be created and this channel will
+ // be used instead.
+ testTimeout <-chan time.Time
}
func (h *timeoutHandler) errorBody() string {
}
func (h *timeoutHandler) ServeHTTP(w ResponseWriter, r *Request) {
+ var t *time.Timer
+ timeout := h.testTimeout
+ if timeout == nil {
+ t = time.NewTimer(h.dt)
+ timeout = t.C
+ }
done := make(chan struct{})
tw := &timeoutWriter{
w: w,
}
w.WriteHeader(tw.code)
w.Write(tw.wbuf.Bytes())
- if h.cancelTimer != nil {
- h.cancelTimer()
+ if t != nil {
+ t.Stop()
}
- case <-h.timeout():
+ case <-timeout:
tw.mu.Lock()
defer tw.mu.Unlock()
w.WriteHeader(StatusServiceUnavailable)
// netProto, which must be "ip", "ip4", or "ip6" followed by a colon
// and a protocol number or name.
func DialIP(netProto string, laddr, raddr *IPAddr) (*IPConn, error) {
- return dialIP(netProto, laddr, raddr, noDeadline)
+ c, err := dialIP(netProto, laddr, raddr, noDeadline)
+ if err != nil {
+ return nil, &OpError{Op: "dial", Net: netProto, Source: laddr.opAddr(), Addr: raddr.opAddr(), Err: err}
+ }
+ return c, nil
}
func dialIP(netProto string, laddr, raddr *IPAddr, deadline time.Time) (*IPConn, error) {
net, proto, err := parseNetwork(netProto)
if err != nil {
- return nil, &OpError{Op: "dial", Net: netProto, Source: laddr.opAddr(), Addr: raddr.opAddr(), Err: err}
+ return nil, err
}
switch net {
case "ip", "ip4", "ip6":
default:
- return nil, &OpError{Op: "dial", Net: netProto, Source: laddr.opAddr(), Addr: raddr.opAddr(), Err: UnknownNetworkError(netProto)}
+ return nil, UnknownNetworkError(netProto)
}
if raddr == nil {
- return nil, &OpError{Op: "dial", Net: netProto, Source: laddr.opAddr(), Addr: nil, Err: errMissingAddress}
+ return nil, errMissingAddress
}
fd, err := internetSocket(net, laddr, raddr, deadline, syscall.SOCK_RAW, proto, "dial", noCancel)
if err != nil {
- return nil, &OpError{Op: "dial", Net: netProto, Source: laddr.opAddr(), Addr: raddr.opAddr(), Err: err}
+ return nil, err
}
return newIPConn(fd), nil
}
defer func() { netErr(err) }()
f, dest, proto, name, err := startPlan9(net, raddr)
if err != nil {
- return nil, &OpError{Op: "dial", Net: net, Source: laddr, Addr: raddr, Err: err}
+ return nil, err
}
_, err = f.WriteString("connect " + dest)
if err != nil {
f.Close()
- return nil, &OpError{Op: "dial", Net: f.Name(), Source: laddr, Addr: raddr, Err: err}
+ return nil, err
}
data, err := os.OpenFile(netdir+"/"+proto+"/"+name+"/data", os.O_RDWR, 0)
if err != nil {
f.Close()
- return nil, &OpError{Op: "dial", Net: net, Source: laddr, Addr: raddr, Err: err}
+ return nil, err
}
laddr, err = readPlan9Addr(proto, netdir+"/"+proto+"/"+name+"/local")
if err != nil {
data.Close()
f.Close()
- return nil, &OpError{Op: "dial", Net: proto, Source: laddr, Addr: raddr, Err: err}
+ return nil, err
}
return newFD(proto, name, f, data, laddr, raddr)
}
defer func() { netErr(err) }()
f, dest, proto, name, err := startPlan9(net, laddr)
if err != nil {
- return nil, &OpError{Op: "listen", Net: net, Source: nil, Addr: laddr, Err: err}
+ return nil, err
}
_, err = f.WriteString("announce " + dest)
if err != nil {
f.Close()
- return nil, &OpError{Op: "announce", Net: proto, Source: nil, Addr: laddr, Err: err}
+ return nil, err
}
laddr, err = readPlan9Addr(proto, netdir+"/"+proto+"/"+name+"/local")
if err != nil {
f.Close()
- return nil, &OpError{Op: "listen", Net: net, Source: nil, Addr: laddr, Err: err}
+ return nil, err
}
return newFD(proto, name, f, nil, laddr, nil)
}
defer fd.readUnlock()
f, err := os.Open(fd.dir + "/listen")
if err != nil {
- return nil, &OpError{Op: "accept", Net: fd.dir + "/listen", Source: nil, Addr: fd.laddr, Err: err}
+ return nil, err
}
var buf [16]byte
n, err := f.Read(buf[:])
if err != nil {
f.Close()
- return nil, &OpError{Op: "accept", Net: fd.dir + "/listen", Source: nil, Addr: fd.laddr, Err: err}
+ return nil, err
}
name := string(buf[:n])
data, err := os.OpenFile(netdir+"/"+fd.net+"/"+name+"/data", os.O_RDWR, 0)
if err != nil {
f.Close()
- return nil, &OpError{Op: "accept", Net: fd.net, Source: nil, Addr: fd.laddr, Err: err}
+ return nil, err
}
raddr, err := readPlan9Addr(fd.net, netdir+"/"+fd.net+"/"+name+"/remote")
if err != nil {
data.Close()
f.Close()
- return nil, &OpError{Op: "accept", Net: fd.net, Source: nil, Addr: fd.laddr, Err: err}
+ return nil, err
}
return newFD(fd.net, name, f, data, fd.laddr, raddr)
}
switch runtime.GOOS {
case "nacl":
t.Skipf("not supported on %s", runtime.GOOS)
+ case "android":
+ if netGo {
+ t.Skipf("not supported on %s without cgo; see golang.org/issues/14576", runtime.GOOS)
+ }
}
for _, tt := range lookupPortTests {
if bytes.Contains(line, []byte("Connection Name:")) {
f := bytes.Split(line, []byte{':'})
if len(f) != 2 {
- t.Fatal("unexpected \"Connection Name\" line: %q", line)
+ t.Fatalf("unexpected \"Connection Name\" line: %q", line)
}
name = string(bytes.TrimSpace(f[1]))
if name == "" {
- t.Fatal("empty name on \"Connection Name\" line: %q", line)
+ t.Fatalf("empty name on \"Connection Name\" line: %q", line)
}
}
if bytes.Contains(line, []byte("Physical Address:")) {
if name == "" {
- t.Fatal("no matching name found: %q", string(out))
+ t.Fatalf("no matching name found: %q", string(out))
}
f := bytes.Split(line, []byte{':'})
if len(f) != 2 {
- t.Fatal("unexpected \"Physical Address\" line: %q", line)
+ t.Fatalf("unexpected \"Physical Address\" line: %q", line)
}
addr := string(bytes.ToLower(bytes.TrimSpace(f[1])))
if addr == "" {
- t.Fatal("empty address on \"Physical Address\" line: %q", line)
+ t.Fatalf("empty address on \"Physical Address\" line: %q", line)
}
if addr == "disabled" || addr == "n/a" {
continue
package net
+import "syscall"
+
func setKeepAlive(fd *netFD, keepalive bool) error {
if keepalive {
_, e := fd.ctl.WriteAt([]byte("keepalive"), 0)
}
return nil
}
+
+func setLinger(fd *netFD, sec int) error {
+ return syscall.EPLAN9
+}
package net
+import (
+ "io"
+ "os"
+ "syscall"
+ "time"
+)
+
// TCPAddr represents the address of a TCP end point.
type TCPAddr struct {
IP IP
}
return addrs.first(isIPv4).(*TCPAddr), nil
}
+
+// TCPConn is an implementation of the Conn interface for TCP network
+// connections.
+type TCPConn struct {
+ conn
+}
+
+// ReadFrom implements the io.ReaderFrom ReadFrom method.
+func (c *TCPConn) ReadFrom(r io.Reader) (int64, error) {
+ if !c.ok() {
+ return 0, syscall.EINVAL
+ }
+ n, err := c.readFrom(r)
+ if err != nil && err != io.EOF {
+ err = &OpError{Op: "read", Net: c.fd.net, Source: c.fd.laddr, Addr: c.fd.raddr, Err: err}
+ }
+ return n, err
+}
+
+// CloseRead shuts down the reading side of the TCP connection.
+// Most callers should just use Close.
+func (c *TCPConn) CloseRead() error {
+ if !c.ok() {
+ return syscall.EINVAL
+ }
+ if err := c.fd.closeRead(); err != nil {
+ return &OpError{Op: "close", Net: c.fd.net, Source: c.fd.laddr, Addr: c.fd.raddr, Err: err}
+ }
+ return nil
+}
+
+// CloseWrite shuts down the writing side of the TCP connection.
+// Most callers should just use Close.
+func (c *TCPConn) CloseWrite() error {
+ if !c.ok() {
+ return syscall.EINVAL
+ }
+ if err := c.fd.closeWrite(); err != nil {
+ return &OpError{Op: "close", Net: c.fd.net, Source: c.fd.laddr, Addr: c.fd.raddr, Err: err}
+ }
+ return nil
+}
+
+// SetLinger sets the behavior of Close on a connection which still
+// has data waiting to be sent or to be acknowledged.
+//
+// If sec < 0 (the default), the operating system finishes sending the
+// data in the background.
+//
+// If sec == 0, the operating system discards any unsent or
+// unacknowledged data.
+//
+// If sec > 0, the data is sent in the background as with sec < 0. On
+// some operating systems after sec seconds have elapsed any remaining
+// unsent data may be discarded.
+func (c *TCPConn) SetLinger(sec int) error {
+ if !c.ok() {
+ return syscall.EINVAL
+ }
+ if err := setLinger(c.fd, sec); err != nil {
+ return &OpError{Op: "set", Net: c.fd.net, Source: c.fd.laddr, Addr: c.fd.raddr, Err: err}
+ }
+ return nil
+}
+
+// SetKeepAlive sets whether the operating system should send
+// keepalive messages on the connection.
+func (c *TCPConn) SetKeepAlive(keepalive bool) error {
+ if !c.ok() {
+ return syscall.EINVAL
+ }
+ if err := setKeepAlive(c.fd, keepalive); err != nil {
+ return &OpError{Op: "set", Net: c.fd.net, Source: c.fd.laddr, Addr: c.fd.raddr, Err: err}
+ }
+ return nil
+}
+
+// SetKeepAlivePeriod sets period between keep alives.
+func (c *TCPConn) SetKeepAlivePeriod(d time.Duration) error {
+ if !c.ok() {
+ return syscall.EINVAL
+ }
+ if err := setKeepAlivePeriod(c.fd, d); err != nil {
+ return &OpError{Op: "set", Net: c.fd.net, Source: c.fd.laddr, Addr: c.fd.raddr, Err: err}
+ }
+ return nil
+}
+
+// SetNoDelay controls whether the operating system should delay
+// packet transmission in hopes of sending fewer packets (Nagle's
+// algorithm). The default is true (no delay), meaning that data is
+// sent as soon as possible after a Write.
+func (c *TCPConn) SetNoDelay(noDelay bool) error {
+ if !c.ok() {
+ return syscall.EINVAL
+ }
+ if err := setNoDelay(c.fd, noDelay); err != nil {
+ return &OpError{Op: "set", Net: c.fd.net, Source: c.fd.laddr, Addr: c.fd.raddr, Err: err}
+ }
+ return nil
+}
+
+func newTCPConn(fd *netFD) *TCPConn {
+ c := &TCPConn{conn{fd}}
+ setNoDelay(c.fd, true)
+ return c
+}
+
+// DialTCP connects to the remote address raddr on the network net,
+// which must be "tcp", "tcp4", or "tcp6". If laddr is not nil, it is
+// used as the local address for the connection.
+func DialTCP(net string, laddr, raddr *TCPAddr) (*TCPConn, error) {
+ switch net {
+ case "tcp", "tcp4", "tcp6":
+ default:
+ return nil, &OpError{Op: "dial", Net: net, Source: laddr.opAddr(), Addr: raddr.opAddr(), Err: UnknownNetworkError(net)}
+ }
+ if raddr == nil {
+ return nil, &OpError{Op: "dial", Net: net, Source: laddr.opAddr(), Addr: nil, Err: errMissingAddress}
+ }
+ c, err := dialTCP(net, laddr, raddr, noDeadline, noCancel)
+ if err != nil {
+ return nil, &OpError{Op: "dial", Net: net, Source: laddr.opAddr(), Addr: raddr.opAddr(), Err: err}
+ }
+ return c, nil
+}
+
+// TCPListener is a TCP network listener. Clients should typically
+// use variables of type Listener instead of assuming TCP.
+type TCPListener struct {
+ fd *netFD
+}
+
+// AcceptTCP accepts the next incoming call and returns the new
+// connection.
+func (l *TCPListener) AcceptTCP() (*TCPConn, error) {
+ if !l.ok() {
+ return nil, syscall.EINVAL
+ }
+ c, err := l.accept()
+ if err != nil {
+ return nil, &OpError{Op: "accept", Net: l.fd.net, Source: nil, Addr: l.fd.laddr, Err: err}
+ }
+ return c, nil
+}
+
+// Accept implements the Accept method in the Listener interface; it
+// waits for the next call and returns a generic Conn.
+func (l *TCPListener) Accept() (Conn, error) {
+ if !l.ok() {
+ return nil, syscall.EINVAL
+ }
+ c, err := l.accept()
+ if err != nil {
+ return nil, &OpError{Op: "accept", Net: l.fd.net, Source: nil, Addr: l.fd.laddr, Err: err}
+ }
+ return c, nil
+}
+
+// Close stops listening on the TCP address.
+// Already Accepted connections are not closed.
+func (l *TCPListener) Close() error {
+ if !l.ok() {
+ return syscall.EINVAL
+ }
+ if err := l.close(); err != nil {
+ return &OpError{Op: "close", Net: l.fd.net, Source: nil, Addr: l.fd.laddr, Err: err}
+ }
+ return nil
+}
+
+// Addr returns the listener's network address, a *TCPAddr.
+// The Addr returned is shared by all invocations of Addr, so
+// do not modify it.
+func (l *TCPListener) Addr() Addr { return l.fd.laddr }
+
+// SetDeadline sets the deadline associated with the listener.
+// A zero time value disables the deadline.
+func (l *TCPListener) SetDeadline(t time.Time) error {
+ if !l.ok() {
+ return syscall.EINVAL
+ }
+ if err := l.fd.setDeadline(t); err != nil {
+ return &OpError{Op: "set", Net: l.fd.net, Source: nil, Addr: l.fd.laddr, Err: err}
+ }
+ return nil
+}
+
+// File returns a copy of the underlying os.File, set to blocking
+// mode. It is the caller's responsibility to close f when finished.
+// Closing l does not affect f, and closing f does not affect l.
+//
+// The returned os.File's file descriptor is different from the
+// connection's. Attempting to change properties of the original
+// using this duplicate may or may not have the desired effect.
+func (l *TCPListener) File() (f *os.File, err error) {
+ if !l.ok() {
+ return nil, syscall.EINVAL
+ }
+ f, err = l.file()
+ if err != nil {
+ return nil, &OpError{Op: "file", Net: l.fd.net, Source: nil, Addr: l.fd.laddr, Err: err}
+ }
+ return
+}
+
+// ListenTCP announces on the TCP address laddr and returns a TCP
+// listener. Net must be "tcp", "tcp4", or "tcp6". If laddr has a
+// port of 0, ListenTCP will choose an available port. The caller can
+// use the Addr method of TCPListener to retrieve the chosen address.
+func ListenTCP(net string, laddr *TCPAddr) (*TCPListener, error) {
+ switch net {
+ case "tcp", "tcp4", "tcp6":
+ default:
+ return nil, &OpError{Op: "listen", Net: net, Source: nil, Addr: laddr.opAddr(), Err: UnknownNetworkError(net)}
+ }
+ if laddr == nil {
+ laddr = &TCPAddr{}
+ }
+ ln, err := listenTCP(net, laddr)
+ if err != nil {
+ return nil, &OpError{Op: "listen", Net: net, Source: nil, Addr: laddr.opAddr(), Err: err}
+ }
+ return ln, nil
+}
import (
"io"
"os"
- "syscall"
"time"
)
-// TCPConn is an implementation of the Conn interface for TCP network
-// connections.
-type TCPConn struct {
- conn
-}
-
-func newTCPConn(fd *netFD) *TCPConn {
- return &TCPConn{conn{fd}}
-}
-
-// ReadFrom implements the io.ReaderFrom ReadFrom method.
-func (c *TCPConn) ReadFrom(r io.Reader) (int64, error) {
- n, err := genericReadFrom(c, r)
- if err != nil && err != io.EOF {
- err = &OpError{Op: "read", Net: c.fd.dir, Source: c.fd.laddr, Addr: c.fd.raddr, Err: err}
- }
- return n, err
-}
-
-// CloseRead shuts down the reading side of the TCP connection.
-// Most callers should just use Close.
-func (c *TCPConn) CloseRead() error {
- if !c.ok() {
- return syscall.EINVAL
- }
- err := c.fd.closeRead()
- if err != nil {
- err = &OpError{Op: "close", Net: c.fd.dir, Source: c.fd.laddr, Addr: c.fd.raddr, Err: err}
- }
- return err
-}
-
-// CloseWrite shuts down the writing side of the TCP connection.
-// Most callers should just use Close.
-func (c *TCPConn) CloseWrite() error {
- if !c.ok() {
- return syscall.EINVAL
- }
- err := c.fd.closeWrite()
- if err != nil {
- err = &OpError{Op: "close", Net: c.fd.dir, Source: c.fd.laddr, Addr: c.fd.raddr, Err: err}
- }
- return err
-}
-
-// SetLinger sets the behavior of Close on a connection which still
-// has data waiting to be sent or to be acknowledged.
-//
-// If sec < 0 (the default), the operating system finishes sending the
-// data in the background.
-//
-// If sec == 0, the operating system discards any unsent or
-// unacknowledged data.
-//
-// If sec > 0, the data is sent in the background as with sec < 0. On
-// some operating systems after sec seconds have elapsed any remaining
-// unsent data may be discarded.
-func (c *TCPConn) SetLinger(sec int) error {
- return &OpError{Op: "set", Net: c.fd.dir, Source: c.fd.laddr, Addr: c.fd.raddr, Err: syscall.EPLAN9}
-}
-
-// SetKeepAlive sets whether the operating system should send
-// keepalive messages on the connection.
-func (c *TCPConn) SetKeepAlive(keepalive bool) error {
- if !c.ok() {
- return syscall.EPLAN9
- }
- if err := setKeepAlive(c.fd, keepalive); err != nil {
- return &OpError{Op: "set", Net: c.fd.dir, Source: c.fd.laddr, Addr: c.fd.raddr, Err: err}
- }
- return nil
-}
-
-// SetKeepAlivePeriod sets period between keep alives.
-func (c *TCPConn) SetKeepAlivePeriod(d time.Duration) error {
- if !c.ok() {
- return syscall.EPLAN9
- }
- if err := setKeepAlivePeriod(c.fd, d); err != nil {
- return &OpError{Op: "set", Net: c.fd.dir, Source: c.fd.laddr, Addr: c.fd.raddr, Err: err}
- }
- return nil
-}
-
-// SetNoDelay controls whether the operating system should delay
-// packet transmission in hopes of sending fewer packets (Nagle's
-// algorithm). The default is true (no delay), meaning that data is
-// sent as soon as possible after a Write.
-func (c *TCPConn) SetNoDelay(noDelay bool) error {
- return &OpError{Op: "set", Net: c.fd.dir, Source: c.fd.laddr, Addr: c.fd.raddr, Err: syscall.EPLAN9}
-}
-
-// DialTCP connects to the remote address raddr on the network net,
-// which must be "tcp", "tcp4", or "tcp6". If laddr is not nil, it is
-// used as the local address for the connection.
-func DialTCP(net string, laddr, raddr *TCPAddr) (*TCPConn, error) {
- return dialTCP(net, laddr, raddr, noDeadline, noCancel)
+func (c *TCPConn) readFrom(r io.Reader) (int64, error) {
+ return genericReadFrom(c, r)
}
func dialTCP(net string, laddr, raddr *TCPAddr, deadline time.Time, cancel <-chan struct{}) (*TCPConn, error) {
switch net {
case "tcp", "tcp4", "tcp6":
default:
- return nil, &OpError{Op: "dial", Net: net, Source: laddr.opAddr(), Addr: raddr.opAddr(), Err: UnknownNetworkError(net)}
+ return nil, UnknownNetworkError(net)
}
if raddr == nil {
- return nil, &OpError{Op: "dial", Net: net, Source: laddr.opAddr(), Addr: nil, Err: errMissingAddress}
+ return nil, errMissingAddress
}
fd, err := dialPlan9(net, laddr, raddr)
if err != nil {
return newTCPConn(fd), nil
}
-// TCPListener is a TCP network listener. Clients should typically
-// use variables of type Listener instead of assuming TCP.
-type TCPListener struct {
- fd *netFD
-}
+func (ln *TCPListener) ok() bool { return ln != nil && ln.fd != nil && ln.fd.ctl != nil }
-// AcceptTCP accepts the next incoming call and returns the new
-// connection.
-func (l *TCPListener) AcceptTCP() (*TCPConn, error) {
- if l == nil || l.fd == nil || l.fd.ctl == nil {
- return nil, syscall.EINVAL
- }
- fd, err := l.fd.acceptPlan9()
+func (ln *TCPListener) accept() (*TCPConn, error) {
+ fd, err := ln.fd.acceptPlan9()
if err != nil {
return nil, err
}
return newTCPConn(fd), nil
}
-// Accept implements the Accept method in the Listener interface; it
-// waits for the next call and returns a generic Conn.
-func (l *TCPListener) Accept() (Conn, error) {
- if l == nil || l.fd == nil || l.fd.ctl == nil {
- return nil, syscall.EINVAL
- }
- c, err := l.AcceptTCP()
- if err != nil {
- return nil, err
- }
- return c, nil
-}
-
-// Close stops listening on the TCP address.
-// Already Accepted connections are not closed.
-func (l *TCPListener) Close() error {
- if l == nil || l.fd == nil || l.fd.ctl == nil {
- return syscall.EINVAL
+func (ln *TCPListener) close() error {
+ if _, err := ln.fd.ctl.WriteString("hangup"); err != nil {
+ ln.fd.ctl.Close()
+ return err
}
- if _, err := l.fd.ctl.WriteString("hangup"); err != nil {
- l.fd.ctl.Close()
- return &OpError{Op: "close", Net: l.fd.dir, Source: nil, Addr: l.fd.laddr, Err: err}
- }
- err := l.fd.ctl.Close()
- if err != nil {
- err = &OpError{Op: "close", Net: l.fd.dir, Source: nil, Addr: l.fd.laddr, Err: err}
- }
- return err
-}
-
-// Addr returns the listener's network address, a *TCPAddr.
-// The Addr returned is shared by all invocations of Addr, so
-// do not modify it.
-func (l *TCPListener) Addr() Addr { return l.fd.laddr }
-
-// SetDeadline sets the deadline associated with the listener.
-// A zero time value disables the deadline.
-func (l *TCPListener) SetDeadline(t time.Time) error {
- if l == nil || l.fd == nil || l.fd.ctl == nil {
- return syscall.EINVAL
- }
- if err := l.fd.setDeadline(t); err != nil {
- return &OpError{Op: "set", Net: l.fd.dir, Source: nil, Addr: l.fd.laddr, Err: err}
+ if err := ln.fd.ctl.Close(); err != nil {
+ return err
}
return nil
}
-// File returns a copy of the underlying os.File, set to blocking
-// mode. It is the caller's responsibility to close f when finished.
-// Closing l does not affect f, and closing f does not affect l.
-//
-// The returned os.File's file descriptor is different from the
-// connection's. Attempting to change properties of the original
-// using this duplicate may or may not have the desired effect.
-func (l *TCPListener) File() (f *os.File, err error) {
- f, err = l.dup()
+func (ln *TCPListener) file() (*os.File, error) {
+ f, err := ln.dup()
if err != nil {
- err = &OpError{Op: "file", Net: l.fd.dir, Source: nil, Addr: l.fd.laddr, Err: err}
+ return nil, err
}
- return
+ return f, nil
}
-// ListenTCP announces on the TCP address laddr and returns a TCP
-// listener. Net must be "tcp", "tcp4", or "tcp6". If laddr has a
-// port of 0, ListenTCP will choose an available port. The caller can
-// use the Addr method of TCPListener to retrieve the chosen address.
-func ListenTCP(net string, laddr *TCPAddr) (*TCPListener, error) {
- switch net {
- case "tcp", "tcp4", "tcp6":
- default:
- return nil, &OpError{Op: "listen", Net: net, Source: nil, Addr: laddr.opAddr(), Err: UnknownNetworkError(net)}
- }
- if laddr == nil {
- laddr = &TCPAddr{}
- }
- fd, err := listenPlan9(net, laddr)
+func listenTCP(network string, laddr *TCPAddr) (*TCPListener, error) {
+ fd, err := listenPlan9(network, laddr)
if err != nil {
return nil, err
}
return ipToSockaddr(family, a.IP, a.Port, a.Zone)
}
-// TCPConn is an implementation of the Conn interface for TCP network
-// connections.
-type TCPConn struct {
- conn
-}
-
-func newTCPConn(fd *netFD) *TCPConn {
- c := &TCPConn{conn{fd}}
- setNoDelay(c.fd, true)
- return c
-}
-
-// ReadFrom implements the io.ReaderFrom ReadFrom method.
-func (c *TCPConn) ReadFrom(r io.Reader) (int64, error) {
+func (c *TCPConn) readFrom(r io.Reader) (int64, error) {
if n, err, handled := sendFile(c.fd, r); handled {
- if err != nil && err != io.EOF {
- err = &OpError{Op: "read", Net: c.fd.net, Source: c.fd.laddr, Addr: c.fd.raddr, Err: err}
- }
return n, err
}
- n, err := genericReadFrom(c, r)
- if err != nil && err != io.EOF {
- err = &OpError{Op: "read", Net: c.fd.net, Source: c.fd.laddr, Addr: c.fd.raddr, Err: err}
- }
- return n, err
-}
-
-// CloseRead shuts down the reading side of the TCP connection.
-// Most callers should just use Close.
-func (c *TCPConn) CloseRead() error {
- if !c.ok() {
- return syscall.EINVAL
- }
- err := c.fd.closeRead()
- if err != nil {
- err = &OpError{Op: "close", Net: c.fd.net, Source: c.fd.laddr, Addr: c.fd.raddr, Err: err}
- }
- return err
-}
-
-// CloseWrite shuts down the writing side of the TCP connection.
-// Most callers should just use Close.
-func (c *TCPConn) CloseWrite() error {
- if !c.ok() {
- return syscall.EINVAL
- }
- err := c.fd.closeWrite()
- if err != nil {
- err = &OpError{Op: "close", Net: c.fd.net, Source: c.fd.laddr, Addr: c.fd.raddr, Err: err}
- }
- return err
-}
-
-// SetLinger sets the behavior of Close on a connection which still
-// has data waiting to be sent or to be acknowledged.
-//
-// If sec < 0 (the default), the operating system finishes sending the
-// data in the background.
-//
-// If sec == 0, the operating system discards any unsent or
-// unacknowledged data.
-//
-// If sec > 0, the data is sent in the background as with sec < 0. On
-// some operating systems after sec seconds have elapsed any remaining
-// unsent data may be discarded.
-func (c *TCPConn) SetLinger(sec int) error {
- if !c.ok() {
- return syscall.EINVAL
- }
- if err := setLinger(c.fd, sec); err != nil {
- return &OpError{Op: "set", Net: c.fd.net, Source: c.fd.laddr, Addr: c.fd.raddr, Err: err}
- }
- return nil
-}
-
-// SetKeepAlive sets whether the operating system should send
-// keepalive messages on the connection.
-func (c *TCPConn) SetKeepAlive(keepalive bool) error {
- if !c.ok() {
- return syscall.EINVAL
- }
- if err := setKeepAlive(c.fd, keepalive); err != nil {
- return &OpError{Op: "set", Net: c.fd.net, Source: c.fd.laddr, Addr: c.fd.raddr, Err: err}
- }
- return nil
-}
-
-// SetKeepAlivePeriod sets period between keep alives.
-func (c *TCPConn) SetKeepAlivePeriod(d time.Duration) error {
- if !c.ok() {
- return syscall.EINVAL
- }
- if err := setKeepAlivePeriod(c.fd, d); err != nil {
- return &OpError{Op: "set", Net: c.fd.net, Source: c.fd.laddr, Addr: c.fd.raddr, Err: err}
- }
- return nil
-}
-
-// SetNoDelay controls whether the operating system should delay
-// packet transmission in hopes of sending fewer packets (Nagle's
-// algorithm). The default is true (no delay), meaning that data is
-// sent as soon as possible after a Write.
-func (c *TCPConn) SetNoDelay(noDelay bool) error {
- if !c.ok() {
- return syscall.EINVAL
- }
- if err := setNoDelay(c.fd, noDelay); err != nil {
- return &OpError{Op: "set", Net: c.fd.net, Source: c.fd.laddr, Addr: c.fd.raddr, Err: err}
- }
- return nil
-}
-
-// DialTCP connects to the remote address raddr on the network net,
-// which must be "tcp", "tcp4", or "tcp6". If laddr is not nil, it is
-// used as the local address for the connection.
-func DialTCP(net string, laddr, raddr *TCPAddr) (*TCPConn, error) {
- switch net {
- case "tcp", "tcp4", "tcp6":
- default:
- return nil, &OpError{Op: "dial", Net: net, Source: laddr.opAddr(), Addr: raddr.opAddr(), Err: UnknownNetworkError(net)}
- }
- if raddr == nil {
- return nil, &OpError{Op: "dial", Net: net, Source: laddr.opAddr(), Addr: nil, Err: errMissingAddress}
- }
- return dialTCP(net, laddr, raddr, noDeadline, noCancel)
+ return genericReadFrom(c, r)
}
func dialTCP(net string, laddr, raddr *TCPAddr, deadline time.Time, cancel <-chan struct{}) (*TCPConn, error) {
}
if err != nil {
- return nil, &OpError{Op: "dial", Net: net, Source: laddr.opAddr(), Addr: raddr.opAddr(), Err: err}
+ return nil, err
}
return newTCPConn(fd), nil
}
return err == syscall.EADDRNOTAVAIL
}
-// TCPListener is a TCP network listener. Clients should typically
-// use variables of type Listener instead of assuming TCP.
-type TCPListener struct {
- fd *netFD
-}
+func (ln *TCPListener) ok() bool { return ln != nil && ln.fd != nil }
-// AcceptTCP accepts the next incoming call and returns the new
-// connection.
-func (l *TCPListener) AcceptTCP() (*TCPConn, error) {
- if l == nil || l.fd == nil {
- return nil, syscall.EINVAL
- }
- fd, err := l.fd.accept()
- if err != nil {
- return nil, &OpError{Op: "accept", Net: l.fd.net, Source: nil, Addr: l.fd.laddr, Err: err}
- }
- return newTCPConn(fd), nil
-}
-
-// Accept implements the Accept method in the Listener interface; it
-// waits for the next call and returns a generic Conn.
-func (l *TCPListener) Accept() (Conn, error) {
- c, err := l.AcceptTCP()
+func (ln *TCPListener) accept() (*TCPConn, error) {
+ fd, err := ln.fd.accept()
if err != nil {
return nil, err
}
- return c, nil
+ return newTCPConn(fd), nil
}
-// Close stops listening on the TCP address.
-// Already Accepted connections are not closed.
-func (l *TCPListener) Close() error {
- if l == nil || l.fd == nil {
- return syscall.EINVAL
- }
- err := l.fd.Close()
- if err != nil {
- err = &OpError{Op: "close", Net: l.fd.net, Source: nil, Addr: l.fd.laddr, Err: err}
- }
- return err
+func (ln *TCPListener) close() error {
+ return ln.fd.Close()
}
-// Addr returns the listener's network address, a *TCPAddr.
-// The Addr returned is shared by all invocations of Addr, so
-// do not modify it.
-func (l *TCPListener) Addr() Addr { return l.fd.laddr }
-
-// SetDeadline sets the deadline associated with the listener.
-// A zero time value disables the deadline.
-func (l *TCPListener) SetDeadline(t time.Time) error {
- if l == nil || l.fd == nil {
- return syscall.EINVAL
- }
- if err := l.fd.setDeadline(t); err != nil {
- return &OpError{Op: "set", Net: l.fd.net, Source: nil, Addr: l.fd.laddr, Err: err}
- }
- return nil
-}
-
-// File returns a copy of the underlying os.File, set to blocking
-// mode. It is the caller's responsibility to close f when finished.
-// Closing l does not affect f, and closing f does not affect l.
-//
-// The returned os.File's file descriptor is different from the
-// connection's. Attempting to change properties of the original
-// using this duplicate may or may not have the desired effect.
-func (l *TCPListener) File() (f *os.File, err error) {
- f, err = l.fd.dup()
+func (ln *TCPListener) file() (*os.File, error) {
+ f, err := ln.fd.dup()
if err != nil {
- err = &OpError{Op: "file", Net: l.fd.net, Source: nil, Addr: l.fd.laddr, Err: err}
+ return nil, err
}
- return
+ return f, nil
}
-// ListenTCP announces on the TCP address laddr and returns a TCP
-// listener. Net must be "tcp", "tcp4", or "tcp6". If laddr has a
-// port of 0, ListenTCP will choose an available port. The caller can
-// use the Addr method of TCPListener to retrieve the chosen address.
-func ListenTCP(net string, laddr *TCPAddr) (*TCPListener, error) {
- switch net {
- case "tcp", "tcp4", "tcp6":
- default:
- return nil, &OpError{Op: "listen", Net: net, Source: nil, Addr: laddr.opAddr(), Err: UnknownNetworkError(net)}
- }
- if laddr == nil {
- laddr = &TCPAddr{}
- }
- fd, err := internetSocket(net, laddr, nil, noDeadline, syscall.SOCK_STREAM, 0, "listen", noCancel)
+func listenTCP(network string, laddr *TCPAddr) (*TCPListener, error) {
+ fd, err := internetSocket(network, laddr, nil, noDeadline, syscall.SOCK_STREAM, 0, "listen", noCancel)
if err != nil {
- return nil, &OpError{Op: "listen", Net: net, Source: nil, Addr: laddr, Err: err}
+ return nil, err
}
return &TCPListener{fd}, nil
}
package net
import (
+ "syscall"
"time"
)
+func setNoDelay(fd *netFD, noDelay bool) error {
+ return syscall.EPLAN9
+}
+
// Set keep alive period.
func setKeepAlivePeriod(fd *netFD, d time.Duration) error {
cmd := "keepalive " + itoa(int(d/time.Millisecond))
// which must be "udp", "udp4", or "udp6". If laddr is not nil, it is
// used as the local address for the connection.
func DialUDP(net string, laddr, raddr *UDPAddr) (*UDPConn, error) {
- return dialUDP(net, laddr, raddr, noDeadline)
+ c, err := dialUDP(net, laddr, raddr, noDeadline)
+ if err != nil {
+ return nil, &OpError{Op: "dial", Net: net, Source: laddr.opAddr(), Addr: raddr.opAddr(), Err: err}
+ }
+ return c, nil
}
func dialUDP(net string, laddr, raddr *UDPAddr, deadline time.Time) (*UDPConn, error) {
switch net {
case "udp", "udp4", "udp6":
default:
- return nil, &OpError{Op: "dial", Net: net, Source: laddr.opAddr(), Addr: raddr.opAddr(), Err: UnknownNetworkError(net)}
+ return nil, UnknownNetworkError(net)
}
if raddr == nil {
- return nil, &OpError{Op: "dial", Net: net, Source: laddr.opAddr(), Addr: nil, Err: errMissingAddress}
+ return nil, errMissingAddress
}
fd, err := dialPlan9(net, laddr, raddr)
if err != nil {
if raddr == nil {
return nil, &OpError{Op: "dial", Net: net, Source: laddr.opAddr(), Addr: nil, Err: errMissingAddress}
}
- return dialUDP(net, laddr, raddr, noDeadline)
+ c, err := dialUDP(net, laddr, raddr, noDeadline)
+ if err != nil {
+ return nil, &OpError{Op: "dial", Net: net, Source: laddr.opAddr(), Addr: raddr.opAddr(), Err: err}
+ }
+ return c, nil
}
func dialUDP(net string, laddr, raddr *UDPAddr, deadline time.Time) (*UDPConn, error) {
fd, err := internetSocket(net, laddr, raddr, deadline, syscall.SOCK_DGRAM, 0, "dial", noCancel)
if err != nil {
- return nil, &OpError{Op: "dial", Net: net, Source: laddr.opAddr(), Addr: raddr.opAddr(), Err: err}
+ return nil, err
}
return newUDPConn(fd), nil
}
default:
return nil, &OpError{Op: "dial", Net: net, Source: laddr.opAddr(), Addr: raddr.opAddr(), Err: UnknownNetworkError(net)}
}
- return dialUnix(net, laddr, raddr, noDeadline)
+ c, err := dialUnix(net, laddr, raddr, noDeadline)
+ if err != nil {
+ return nil, &OpError{Op: "dial", Net: net, Source: laddr.opAddr(), Addr: raddr.opAddr(), Err: err}
+ }
+ return c, nil
}
func dialUnix(net string, laddr, raddr *UnixAddr, deadline time.Time) (*UnixConn, error) {
fd, err := unixSocket(net, laddr, raddr, "dial", deadline)
if err != nil {
- return nil, &OpError{Op: "dial", Net: net, Source: laddr.opAddr(), Addr: raddr.opAddr(), Err: err}
+ return nil, err
}
return newUnixConn(fd), nil
}
}
}
-func run(t *testing.T, cmd []string) string {
+func runBinHostname(t *testing.T) string {
// Run /bin/hostname and collect output.
r, w, err := Pipe()
if err != nil {
t.Fatal(err)
}
defer r.Close()
- p, err := StartProcess("/bin/hostname", []string{"hostname"}, &ProcAttr{Files: []*File{nil, w, Stderr}})
+ const path = "/bin/hostname"
+ p, err := StartProcess(path, []string{"hostname"}, &ProcAttr{Files: []*File{nil, w, Stderr}})
if err != nil {
+ if _, err := Stat(path); IsNotExist(err) {
+ t.Skipf("skipping test; test requires %s but it does not exist", path)
+ }
t.Fatal(err)
}
w.Close()
output = output[0 : n-1]
}
if output == "" {
- t.Fatalf("%v produced no output", cmd)
+ t.Fatalf("/bin/hostname produced no output")
}
return output
if err != nil {
t.Fatalf("%v", err)
}
- want := run(t, []string{"/bin/hostname"})
+ want := runBinHostname(t)
if hostname != want {
i := strings.Index(hostname, ".")
if i < 0 || hostname[0:i] != want {
--- /dev/null
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build cgo
+
+#include <unistd.h>
+#include <sys/types.h>
+#include <stdlib.h>
+
+int mygetgrouplist(const char* user, gid_t group, gid_t* groups, int* ngroups) {
+ int* buf = malloc(*ngroups * sizeof(int));
+ int rv = getgrouplist(user, (int) group, buf, ngroups);
+ int i;
+ if (rv == 0) {
+ for (i = 0; i < *ngroups; i++) {
+ groups[i] = (gid_t) buf[i];
+ }
+ }
+ free(buf);
+ return rv;
+}
--- /dev/null
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build cgo
+// +build dragonfly freebsd !android,linux netbsd openbsd
+
+#include <unistd.h>
+#include <sys/types.h>
+#include <grp.h>
+
+int mygetgrouplist(const char* user, gid_t group, gid_t* groups, int* ngroups) {
+ return getgrouplist(user, group, groups, ngroups);
+}
--- /dev/null
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build cgo
+
+// Even though this file requires no C, it is used to provide a
+// listGroup stub because all the other Solaris calls work. Otherwise,
+// this stub will conflict with the lookup_stubs.go fallback.
+
+package user
+
+import "fmt"
+
+func listGroups(u *User) ([]string, error) {
+ return nil, fmt.Errorf("user: list groups for %s: not supported on Solaris", u.Username)
+}
--- /dev/null
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build dragonfly darwin freebsd !android,linux netbsd openbsd
+
+package user
+
+import (
+ "fmt"
+ "strconv"
+ "unsafe"
+)
+
+/*
+#include <unistd.h>
+#include <sys/types.h>
+#include <stdlib.h>
+
+extern int mygetgrouplist(const char* user, gid_t group, gid_t* groups, int* ngroups);
+*/
+import "C"
+
+func listGroups(u *User) ([]string, error) {
+ ug, err := strconv.Atoi(u.Gid)
+ if err != nil {
+ return nil, fmt.Errorf("user: list groups for %s: invalid gid %q", u.Username, u.Gid)
+ }
+ userGID := C.gid_t(ug)
+ nameC := C.CString(u.Username)
+ defer C.free(unsafe.Pointer(nameC))
+
+ n := C.int(256)
+ gidsC := make([]C.gid_t, n)
+ rv := C.mygetgrouplist(nameC, userGID, &gidsC[0], &n)
+ if rv == -1 {
+ // More than initial buffer, but now n contains the correct size.
+ const maxGroups = 2048
+ if n > maxGroups {
+ return nil, fmt.Errorf("user: list groups for %s: member of more than %d groups", u.Username, maxGroups)
+ }
+ gidsC = make([]C.gid_t, n)
+ rv := C.mygetgrouplist(nameC, userGID, &gidsC[0], &n)
+ if rv == -1 {
+ return nil, fmt.Errorf("user: list groups for %s failed (changed groups?)", u.Username)
+ }
+ }
+ gidsC = gidsC[:n]
+ gids := make([]string, 0, n)
+ for _, g := range gidsC[:n] {
+ gids = append(gids, strconv.Itoa(int(g)))
+ }
+ return gids, nil
+}
// Lookup looks up a user by username. If the user cannot be found, the
// returned error is of type UnknownUserError.
func Lookup(username string) (*User, error) {
- return lookup(username)
+ return lookupUser(username)
}
// LookupId looks up a user by userid. If the user cannot be found, the
// returned error is of type UnknownUserIdError.
func LookupId(uid string) (*User, error) {
- return lookupId(uid)
+ return lookupUserId(uid)
+}
+
+// LookupGroup looks up a group by name. If the group cannot be found, the
+// returned error is of type UnknownGroupError.
+func LookupGroup(name string) (*Group, error) {
+ return lookupGroup(name)
+}
+
+// LookupGroupId looks up a group by groupid. If the group cannot be found, the
+// returned error is of type UnknownGroupIdError.
+func LookupGroupId(gid string) (*Group, error) {
+ return lookupGroupId(gid)
+}
+
+// GroupIds returns the list of group IDs that the user is a member of.
+func (u *User) GroupIds() ([]string, error) {
+ return listGroups(u)
}
--- /dev/null
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build android
+
+package user
+
+import "errors"
+
+func init() {
+ userImplemented = false
+ groupImplemented = false
+}
+
+func current() (*User, error) {
+ return nil, errors.New("user: Current not implemented on android")
+}
+
+func lookupUser(string) (*User, error) {
+ return nil, errors.New("user: Lookup not implemented on android")
+}
+
+func lookupUserId(string) (*User, error) {
+ return nil, errors.New("user: LookupId not implemented on android")
+}
+
+func lookupGroup(string) (*Group, error) {
+ return nil, errors.New("user: LookupGroup not implemented on android")
+}
+
+func lookupGroupId(string) (*Group, error) {
+ return nil, errors.New("user: LookupGroupId not implemented on android")
+}
+
+func listGroups(*User) ([]string, error) {
+ return nil, errors.New("user: GroupIds not implemented on android")
+}
userFile = "/dev/user"
)
+func init() {
+ groupImplemented = false
+}
+
func current() (*User, error) {
ubytes, err := ioutil.ReadFile(userFile)
if err != nil {
return u, nil
}
-func lookup(username string) (*User, error) {
+func lookupUser(username string) (*User, error) {
+ return nil, syscall.EPLAN9
+}
+
+func lookupUserId(uid string) (*User, error) {
+ return nil, syscall.EPLAN9
+}
+
+func lookupGroup(groupname string) (*Group, error) {
+ return nil, syscall.EPLAN9
+}
+
+func lookupGroupId(string) (*Group, error) {
return nil, syscall.EPLAN9
}
-func lookupId(uid string) (*User, error) {
+func listGroups(*User) ([]string, error) {
return nil, syscall.EPLAN9
}
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build !cgo,!windows,!plan9 android
+// +build !cgo,!windows,!plan9,!android
package user
import (
+ "errors"
"fmt"
+ "os"
"runtime"
+ "strconv"
)
func init() {
- implemented = false
+ userImplemented = false
+ groupImplemented = false
}
func current() (*User, error) {
- return nil, fmt.Errorf("user: Current not implemented on %s/%s", runtime.GOOS, runtime.GOARCH)
+ u := &User{
+ Uid: currentUID(),
+ Gid: currentGID(),
+ Username: os.Getenv("USER"),
+ Name: "", // ignored
+ HomeDir: os.Getenv("HOME"),
+ }
+ if runtime.GOOS == "nacl" {
+ if u.Uid == "" {
+ u.Uid = "1"
+ }
+ if u.Username == "" {
+ u.Username = "nacl"
+ }
+ if u.HomeDir == "" {
+ u.HomeDir = "/home/nacl"
+ }
+ }
+ // cgo isn't available, but if we found the minimum information
+ // without it, use it:
+ if u.Uid != "" && u.Username != "" && u.HomeDir != "" {
+ return u, nil
+ }
+ return u, fmt.Errorf("user: Current not implemented on %s/%s", runtime.GOOS, runtime.GOARCH)
}
-func lookup(username string) (*User, error) {
- return nil, fmt.Errorf("user: Lookup not implemented on %s/%s", runtime.GOOS, runtime.GOARCH)
+func lookupUser(username string) (*User, error) {
+ return nil, errors.New("user: Lookup requires cgo")
}
-func lookupId(uid string) (*User, error) {
- return nil, fmt.Errorf("user: LookupId not implemented on %s/%s", runtime.GOOS, runtime.GOARCH)
+func lookupUserId(uid string) (*User, error) {
+ return nil, errors.New("user: LookupId requires cgo")
+}
+
+func lookupGroup(groupname string) (*Group, error) {
+ return nil, errors.New("user: LookupGroup requires cgo")
+}
+
+func lookupGroupId(string) (*Group, error) {
+ return nil, errors.New("user: LookupGroupId requires cgo")
+}
+
+func listGroups(*User) ([]string, error) {
+ return nil, errors.New("user: GroupIds requires cgo")
+}
+
+func currentUID() string {
+ if id := os.Getuid(); id >= 0 {
+ return strconv.Itoa(id)
+ }
+ // Note: Windows returns -1, but this file isn't used on
+ // Windows anyway, so this empty return path shouldn't be
+ // used.
+ return ""
+}
+
+func currentGID() string {
+ if id := os.Getgid(); id >= 0 {
+ return strconv.Itoa(id)
+ }
+ return ""
}
#include <unistd.h>
#include <sys/types.h>
#include <pwd.h>
+#include <grp.h>
#include <stdlib.h>
static int mygetpwuid_r(int uid, struct passwd *pwd,
char *buf, size_t buflen, struct passwd **result) {
return getpwnam_r(name, pwd, buf, buflen, result);
}
+
+static int mygetgrgid_r(int gid, struct group *grp,
+ char *buf, size_t buflen, struct group **result) {
+ return getgrgid_r(gid, grp, buf, buflen, result);
+}
*/
import "C"
func current() (*User, error) {
- return lookupUnix(syscall.Getuid(), "", false)
+ return lookupUnixUid(syscall.Getuid())
}
-func lookup(username string) (*User, error) {
- return lookupUnix(-1, username, true)
+func lookupUser(username string) (*User, error) {
+ var pwd C.struct_passwd
+ var result *C.struct_passwd
+ nameC := C.CString(username)
+ defer C.free(unsafe.Pointer(nameC))
+
+ buf := alloc(userBuffer)
+ defer buf.free()
+
+ err := retryWithBuffer(buf, func() syscall.Errno {
+ // mygetpwnam_r is a wrapper around getpwnam_r to avoid
+ // passing a size_t to getpwnam_r, because for unknown
+ // reasons passing a size_t to getpwnam_r doesn't work on
+ // Solaris.
+ return syscall.Errno(C.mygetpwnam_r(nameC,
+ &pwd,
+ (*C.char)(buf.ptr),
+ C.size_t(buf.size),
+ &result))
+ })
+ if err != nil {
+ return nil, fmt.Errorf("user: lookup username %s: %v", username, err)
+ }
+ if result == nil {
+ return nil, UnknownUserError(username)
+ }
+ return buildUser(&pwd), err
}
-func lookupId(uid string) (*User, error) {
+func lookupUserId(uid string) (*User, error) {
i, e := strconv.Atoi(uid)
if e != nil {
return nil, e
}
- return lookupUnix(i, "", false)
+ return lookupUnixUid(i)
}
-func lookupUnix(uid int, username string, lookupByName bool) (*User, error) {
+func lookupUnixUid(uid int) (*User, error) {
var pwd C.struct_passwd
var result *C.struct_passwd
- bufSize := C.sysconf(C._SC_GETPW_R_SIZE_MAX)
- if bufSize == -1 {
- // DragonFly and FreeBSD do not have _SC_GETPW_R_SIZE_MAX.
- // Additionally, not all Linux systems have it, either. For
- // example, the musl libc returns -1.
- bufSize = 1024
- }
- if bufSize <= 0 || bufSize > 1<<20 {
- return nil, fmt.Errorf("user: unreasonable _SC_GETPW_R_SIZE_MAX of %d", bufSize)
- }
- buf := C.malloc(C.size_t(bufSize))
- defer C.free(buf)
- var rv C.int
- if lookupByName {
- nameC := C.CString(username)
- defer C.free(unsafe.Pointer(nameC))
- // mygetpwnam_r is a wrapper around getpwnam_r to avoid
- // passing a size_t to getpwnam_r, because for unknown
- // reasons passing a size_t to getpwnam_r doesn't work on
- // Solaris.
- rv = C.mygetpwnam_r(nameC,
- &pwd,
- (*C.char)(buf),
- C.size_t(bufSize),
- &result)
- if rv != 0 {
- return nil, fmt.Errorf("user: lookup username %s: %s", username, syscall.Errno(rv))
- }
- if result == nil {
- return nil, UnknownUserError(username)
- }
- } else {
+ buf := alloc(userBuffer)
+ defer buf.free()
+
+ err := retryWithBuffer(buf, func() syscall.Errno {
// mygetpwuid_r is a wrapper around getpwuid_r to
// to avoid using uid_t because C.uid_t(uid) for
// unknown reasons doesn't work on linux.
- rv = C.mygetpwuid_r(C.int(uid),
+ return syscall.Errno(C.mygetpwuid_r(C.int(uid),
&pwd,
- (*C.char)(buf),
- C.size_t(bufSize),
- &result)
- if rv != 0 {
- return nil, fmt.Errorf("user: lookup userid %d: %s", uid, syscall.Errno(rv))
- }
- if result == nil {
- return nil, UnknownUserIdError(uid)
- }
+ (*C.char)(buf.ptr),
+ C.size_t(buf.size),
+ &result))
+ })
+ if err != nil {
+ return nil, fmt.Errorf("user: lookup userid %d: %v", uid, err)
+ }
+ if result == nil {
+ return nil, UnknownUserIdError(uid)
}
+ return buildUser(&pwd), nil
+}
+
+func buildUser(pwd *C.struct_passwd) *User {
u := &User{
Uid: strconv.Itoa(int(pwd.pw_uid)),
Gid: strconv.Itoa(int(pwd.pw_gid)),
if i := strings.Index(u.Name, ","); i >= 0 {
u.Name = u.Name[:i]
}
- return u, nil
+ return u
+}
+
+func currentGroup() (*Group, error) {
+ return lookupUnixGid(syscall.Getgid())
+}
+
+func lookupGroup(groupname string) (*Group, error) {
+ var grp C.struct_group
+ var result *C.struct_group
+
+ buf := alloc(groupBuffer)
+ defer buf.free()
+ cname := C.CString(groupname)
+ defer C.free(unsafe.Pointer(cname))
+
+ err := retryWithBuffer(buf, func() syscall.Errno {
+ return syscall.Errno(C.getgrnam_r(cname,
+ &grp,
+ (*C.char)(buf.ptr),
+ C.size_t(buf.size),
+ &result))
+ })
+ if err != nil {
+ return nil, fmt.Errorf("user: lookup groupname %s: %v", groupname, err)
+ }
+ if result == nil {
+ return nil, UnknownGroupError(groupname)
+ }
+ return buildGroup(&grp), nil
+}
+
+func lookupGroupId(gid string) (*Group, error) {
+ i, e := strconv.Atoi(gid)
+ if e != nil {
+ return nil, e
+ }
+ return lookupUnixGid(i)
+}
+
+func lookupUnixGid(gid int) (*Group, error) {
+ var grp C.struct_group
+ var result *C.struct_group
+
+ buf := alloc(groupBuffer)
+ defer buf.free()
+
+ err := retryWithBuffer(buf, func() syscall.Errno {
+ // mygetgrgid_r is a wrapper around getgrgid_r to
+ // to avoid using gid_t because C.gid_t(gid) for
+ // unknown reasons doesn't work on linux.
+ return syscall.Errno(C.mygetgrgid_r(C.int(gid),
+ &grp,
+ (*C.char)(buf.ptr),
+ C.size_t(buf.size),
+ &result))
+ })
+ if err != nil {
+ return nil, fmt.Errorf("user: lookup groupid %d: %v", gid, err)
+ }
+ if result == nil {
+ return nil, UnknownGroupIdError(gid)
+ }
+ return buildGroup(&grp), nil
+}
+
+func buildGroup(grp *C.struct_group) *Group {
+ g := &Group{
+ Gid: strconv.Itoa(int(grp.gr_gid)),
+ Name: C.GoString(grp.gr_name),
+ }
+ return g
+}
+
+type bufferKind C.int
+
+const (
+ userBuffer = bufferKind(C._SC_GETPW_R_SIZE_MAX)
+ groupBuffer = bufferKind(C._SC_GETGR_R_SIZE_MAX)
+)
+
+func (k bufferKind) initialSize() C.size_t {
+ sz := C.sysconf(C.int(k))
+ if sz == -1 {
+ // DragonFly and FreeBSD do not have _SC_GETPW_R_SIZE_MAX.
+ // Additionally, not all Linux systems have it, either. For
+ // example, the musl libc returns -1.
+ return 1024
+ }
+ if !isSizeReasonable(int64(sz)) {
+ // Truncate. If this truly isn't enough, retryWithBuffer will error on the first run.
+ return maxBufferSize
+ }
+ return C.size_t(sz)
+}
+
+type memBuffer struct {
+ ptr unsafe.Pointer
+ size C.size_t
+}
+
+func alloc(kind bufferKind) *memBuffer {
+ sz := kind.initialSize()
+ return &memBuffer{
+ ptr: C.malloc(sz),
+ size: sz,
+ }
+}
+
+func (mb *memBuffer) resize(newSize C.size_t) {
+ mb.ptr = C.realloc(mb.ptr, newSize)
+ mb.size = newSize
+}
+
+func (mb *memBuffer) free() {
+ C.free(mb.ptr)
+}
+
+// retryWithBuffer repeatedly calls f(), increasing the size of the
+// buffer each time, until f succeeds, fails with a non-ERANGE error,
+// or the buffer exceeds a reasonable limit.
+func retryWithBuffer(buf *memBuffer, f func() syscall.Errno) error {
+ for {
+ errno := f()
+ if errno == 0 {
+ return nil
+ } else if errno != syscall.ERANGE {
+ return errno
+ }
+ newSize := buf.size * 2
+ if !isSizeReasonable(int64(newSize)) {
+ return fmt.Errorf("internal buffer exceeds %d bytes", maxBufferSize)
+ }
+ buf.resize(newSize)
+ }
+}
+
+const maxBufferSize = 1 << 20
+
+func isSizeReasonable(sz int64) bool {
+ return sz > 0 && sz <= maxBufferSize
}
package user
import (
+ "errors"
"fmt"
"syscall"
"unsafe"
)
+func init() {
+ groupImplemented = false
+}
+
func isDomainJoined() (bool, error) {
var domain *uint16
var status uint32
return newUser(usid, gid, dir)
}
-func lookup(username string) (*User, error) {
+func lookupUser(username string) (*User, error) {
sid, _, t, e := syscall.LookupSID("", username)
if e != nil {
return nil, e
return newUserFromSid(sid)
}
-func lookupId(uid string) (*User, error) {
+func lookupUserId(uid string) (*User, error) {
sid, e := syscall.StringToSid(uid)
if e != nil {
return nil, e
}
return newUserFromSid(sid)
}
+
+func lookupGroup(groupname string) (*Group, error) {
+ return nil, errors.New("user: LookupGroup not implemented on windows")
+}
+
+func lookupGroupId(string) (*Group, error) {
+ return nil, errors.New("user: LookupGroupId not implemented on windows")
+}
+
+func listGroups(*User) ([]string, error) {
+ return nil, errors.New("user: GroupIds not implemented on windows")
+}
"strconv"
)
-var implemented = true // set to false by lookup_stubs.go's init
+var (
+ userImplemented = true // set to false by lookup_stubs.go's init
+ groupImplemented = true // set to false by lookup_stubs.go's init
+)
// User represents a user account.
//
-// On posix systems Uid and Gid contain a decimal number
+// On POSIX systems Uid and Gid contain a decimal number
// representing uid and gid. On windows Uid and Gid
// contain security identifier (SID) in a string format.
// On Plan 9, Uid, Gid, Username, and Name will be the
// contents of /dev/user.
type User struct {
- Uid string // user id
- Gid string // primary group id
+ Uid string // user ID
+ Gid string // primary group ID
Username string
Name string
HomeDir string
}
+// Group represents a grouping of users.
+//
+// On POSIX systems Gid contains a decimal number
+// representing the group ID.
+type Group struct {
+ Gid string // group ID
+ Name string // group name
+}
+
// UnknownUserIdError is returned by LookupId when
// a user cannot be found.
type UnknownUserIdError int
func (e UnknownUserError) Error() string {
return "user: unknown user " + string(e)
}
+
+// UnknownGroupIdError is returned by LookupGroupId when
+// a group cannot be found.
+type UnknownGroupIdError string
+
+func (e UnknownGroupIdError) Error() string {
+ return "group: unknown groupid " + string(e)
+}
+
+// UnknownGroupError is returned by LookupGroup when
+// a group cannot be found.
+type UnknownGroupError string
+
+func (e UnknownGroupError) Error() string {
+ return "group: unknown group " + string(e)
+}
"testing"
)
-func check(t *testing.T) {
- if !implemented {
+func checkUser(t *testing.T) {
+ if !userImplemented {
t.Skip("user: not implemented; skipping tests")
}
}
func TestCurrent(t *testing.T) {
- check(t)
-
+ if runtime.GOOS == "android" {
+ t.Skipf("skipping on %s", runtime.GOOS)
+ }
u, err := Current()
if err != nil {
- t.Fatalf("Current: %v", err)
+ t.Fatalf("Current: %v (got %#v)", err, u)
}
if u.HomeDir == "" {
t.Errorf("didn't get a HomeDir")
}
func TestLookup(t *testing.T) {
- check(t)
+ checkUser(t)
if runtime.GOOS == "plan9" {
t.Skipf("Lookup not implemented on %q", runtime.GOOS)
}
func TestLookupId(t *testing.T) {
- check(t)
+ checkUser(t)
if runtime.GOOS == "plan9" {
t.Skipf("LookupId not implemented on %q", runtime.GOOS)
}
compare(t, want, got)
}
+
+func checkGroup(t *testing.T) {
+ if !groupImplemented {
+ t.Skip("user: group not implemented; skipping test")
+ }
+}
+
+func TestLookupGroup(t *testing.T) {
+ checkGroup(t)
+ user, err := Current()
+ if err != nil {
+ t.Fatalf("Current(): %v", err)
+ }
+
+ g1, err := LookupGroupId(user.Gid)
+ if err != nil {
+ t.Fatalf("LookupGroupId(%q): %v", user.Gid, err)
+ }
+ if g1.Gid != user.Gid {
+ t.Errorf("LookupGroupId(%q).Gid = %s; want %s", user.Gid, g1.Gid, user.Gid)
+ }
+
+ g2, err := LookupGroup(g1.Name)
+ if err != nil {
+ t.Fatalf("LookupGroup(%q): %v", g1.Name, err)
+ }
+ if g1.Gid != g2.Gid || g1.Name != g2.Name {
+ t.Errorf("LookupGroup(%q) = %+v; want %+v", g1.Name, g2, g1)
+ }
+}
+
+func TestGroupIds(t *testing.T) {
+ checkGroup(t)
+ if runtime.GOOS == "solaris" {
+ t.Skip("skipping GroupIds, see golang.org/issue/14709")
+ }
+ user, err := Current()
+ if err != nil {
+ t.Fatalf("Current(): %v", err)
+ }
+ gids, err := user.GroupIds()
+ if err != nil {
+ t.Fatalf("%+v.GroupIds(): %v", user, err)
+ }
+ if !containsID(gids, user.Gid) {
+ t.Errorf("%+v.GroupIds() = %v; does not contain user GID %s", user, gids, user.Gid)
+ }
+}
+
+func containsID(ids []string, id string) bool {
+ for _, x := range ids {
+ if x == id {
+ return true
+ }
+ }
+ return false
+}
//
// See also Rob Pike, ``Lexical File Names in Plan 9 or
// Getting Dot-Dot Right,''
-// http://plan9.bell-labs.com/sys/doc/lexnames.html
+// https://9p.io/sys/doc/lexnames.html
func Clean(path string) string {
originalPath := path
volLen := volumeNameLen(path)
//
// See also Rob Pike, ``Lexical File Names in Plan 9 or
// Getting Dot-Dot Right,''
-// http://plan9.bell-labs.com/sys/doc/lexnames.html
+// https://9p.io/sys/doc/lexnames.html
func Clean(path string) string {
if path == "" {
return "."
{[]D1{}, ""},
{(chan D1)(nil), ""},
{(func() D1)(nil), ""},
+ {(<-chan D1)(nil), ""},
+ {(chan<- D1)(nil), ""},
}
func TestNames(t *testing.T) {
{new(*int), new(IntPtr), true},
{new(IntPtr), new(*int), true},
{new(IntPtr), new(IntPtr1), false},
+ {new(Ch), new(<-chan interface{}), true},
// test runs implementsTests too
}
type IntPtr *int
type IntPtr1 *int
+type Ch <-chan interface{}
func TestAssignableTo(t *testing.T) {
for _, tt := range append(assignableTests, implementsTests...) {
UnsafePointer
)
+// tflag is used by an rtype to signal what extra type information is
+// available in the memory directly following the rtype value.
+type tflag uint8
+
+const (
+ // tflagUncommon means that there is a pointer, *uncommonType,
+ // just beyond the outer type structure.
+ //
+ // For example, if t.Kind() == Struct and t.tflag&tflagUncommon != 0,
+ // then t has uncommonType data and it can be accessed as:
+ //
+ // type tUncommon struct {
+ // structType
+ // u uncommonType
+ // }
+ // u := &(*tUncommon)(unsafe.Pointer(t)).u
+ tflagUncommon tflag = 1
+)
+
// rtype is the common implementation of most values.
// It is embedded in other, public struct types, but always
// with a unique tag like `reflect:"array"` or `reflect:"ptr"`
// so that code cannot convert from, say, *arrayType to *ptrType.
type rtype struct {
- size uintptr
- ptrdata uintptr
- hash uint32 // hash of type; avoids computation in hash tables
- _ uint8 // unused/padding
- align uint8 // alignment of variable with this type
- fieldAlign uint8 // alignment of struct field with this type
- kind uint8 // enumeration for C
- alg *typeAlg // algorithm table
- gcdata *byte // garbage collection data
- string string // string form; unnecessary but undeniably useful
- *uncommonType // (relatively) uncommon fields
+ size uintptr
+ ptrdata uintptr
+ hash uint32 // hash of type; avoids computation in hash tables
+ tflag tflag // extra type information flags
+ align uint8 // alignment of variable with this type
+ fieldAlign uint8 // alignment of struct field with this type
+ kind uint8 // enumeration for C
+ alg *typeAlg // algorithm table
+ gcdata *byte // garbage collection data
+ string string // string form; unnecessary but undeniably useful
}
// a copy of runtime.typeAlg
}
// funcType represents a function type.
+//
+// A *rtype for each in and out parameter is stored in an array that
+// directly follows the funcType (and possibly its uncommonType). So
+// a function type with one method, one input, and one output is:
+//
+// struct {
+// funcType
+// uncommonType
+// [2]*rtype // [0] is in, [1] is out
+// }
type funcType struct {
- rtype `reflect:"func"`
- dotdotdot bool // last input parameter is ...
- in []*rtype // input parameter types
- out []*rtype // output parameter types
+ rtype `reflect:"func"`
+ inCount uint16
+ outCount uint16 // top bit is set if last input parameter is ...
}
// imethod represents a method on an interface type
UnsafePointer: "unsafe.Pointer",
}
-func (t *uncommonType) uncommon() *uncommonType {
- return t
-}
-
func (t *uncommonType) PkgPath() string {
if t == nil || t.pkgPath == nil {
return ""
return *t.pkgPath
}
+func (t *rtype) uncommon() *uncommonType {
+ if t.tflag&tflagUncommon == 0 {
+ return nil
+ }
+ switch t.Kind() {
+ case Struct:
+ type u struct {
+ structType
+ u uncommonType
+ }
+ return &(*u)(unsafe.Pointer(t)).u
+ case Ptr:
+ type u struct {
+ ptrType
+ u uncommonType
+ }
+ return &(*u)(unsafe.Pointer(t)).u
+ case Func:
+ type u struct {
+ funcType
+ u uncommonType
+ }
+ return &(*u)(unsafe.Pointer(t)).u
+ case Slice:
+ type u struct {
+ sliceType
+ u uncommonType
+ }
+ return &(*u)(unsafe.Pointer(t)).u
+ case Array:
+ type u struct {
+ arrayType
+ u uncommonType
+ }
+ return &(*u)(unsafe.Pointer(t)).u
+ case Chan:
+ type u struct {
+ chanType
+ u uncommonType
+ }
+ return &(*u)(unsafe.Pointer(t)).u
+ case Map:
+ type u struct {
+ mapType
+ u uncommonType
+ }
+ return &(*u)(unsafe.Pointer(t)).u
+ case Interface:
+ type u struct {
+ interfaceType
+ u uncommonType
+ }
+ return &(*u)(unsafe.Pointer(t)).u
+ default:
+ type u struct {
+ rtype
+ u uncommonType
+ }
+ return &(*u)(unsafe.Pointer(t)).u
+ }
+}
+
func (t *rtype) String() string { return t.string }
func (t *rtype) Size() uintptr { return t.size }
tt := (*interfaceType)(unsafe.Pointer(t))
return tt.NumMethod()
}
- return t.uncommonType.NumMethod()
+ return t.uncommon().NumMethod()
}
func (t *rtype) Method(i int) (m Method) {
tt := (*interfaceType)(unsafe.Pointer(t))
return tt.Method(i)
}
- return t.uncommonType.Method(i)
+ return t.uncommon().Method(i)
}
func (t *rtype) MethodByName(name string) (m Method, ok bool) {
tt := (*interfaceType)(unsafe.Pointer(t))
return tt.MethodByName(name)
}
- return t.uncommonType.MethodByName(name)
+ return t.uncommon().MethodByName(name)
}
func (t *rtype) PkgPath() string {
- return t.uncommonType.PkgPath()
+ return t.uncommon().PkgPath()
}
func hasPrefix(s, prefix string) bool {
if hasPrefix(t.string, "chan ") {
return ""
}
+ if hasPrefix(t.string, "chan<-") {
+ return ""
+ }
if hasPrefix(t.string, "func(") {
return ""
}
- if t.string[0] == '[' || t.string[0] == '*' {
+ switch t.string[0] {
+ case '[', '*', '<':
return ""
}
i := len(t.string) - 1
panic("reflect: IsVariadic of non-func type")
}
tt := (*funcType)(unsafe.Pointer(t))
- return tt.dotdotdot
+ return tt.outCount&(1<<15) != 0
}
func (t *rtype) Elem() Type {
panic("reflect: In of non-func type")
}
tt := (*funcType)(unsafe.Pointer(t))
- return toType(tt.in[i])
+ return toType(tt.in()[i])
}
func (t *rtype) Key() Type {
panic("reflect: NumIn of non-func type")
}
tt := (*funcType)(unsafe.Pointer(t))
- return len(tt.in)
+ return int(tt.inCount)
}
func (t *rtype) NumOut() int {
panic("reflect: NumOut of non-func type")
}
tt := (*funcType)(unsafe.Pointer(t))
- return len(tt.out)
+ return len(tt.out())
}
func (t *rtype) Out(i int) Type {
panic("reflect: Out of non-func type")
}
tt := (*funcType)(unsafe.Pointer(t))
- return toType(tt.out[i])
+ return toType(tt.out()[i])
+}
+
+func (t *funcType) in() []*rtype {
+ uadd := uintptr(unsafe.Sizeof(*t))
+ if t.tflag&tflagUncommon != 0 {
+ uadd += unsafe.Sizeof(uncommonType{})
+ }
+ return (*[1 << 20]*rtype)(add(unsafe.Pointer(t), uadd))[:t.inCount]
+}
+
+func (t *funcType) out() []*rtype {
+ uadd := uintptr(unsafe.Sizeof(*t))
+ if t.tflag&tflagUncommon != 0 {
+ uadd += unsafe.Sizeof(uncommonType{})
+ }
+ outCount := t.outCount & (1<<15 - 1)
+ return (*[1 << 20]*rtype)(add(unsafe.Pointer(t), uadd))[t.inCount : t.inCount+outCount]
+}
+
+func add(p unsafe.Pointer, x uintptr) unsafe.Pointer {
+ return unsafe.Pointer(uintptr(p) + x)
}
func (d ChanDir) String() string {
// old hash and the new "*".
p.hash = fnv1(t.hash, '*')
- p.uncommonType = nil
p.elem = t
ptrMap.m[t] = p
case Func:
t := (*funcType)(unsafe.Pointer(T))
v := (*funcType)(unsafe.Pointer(V))
- if t.dotdotdot != v.dotdotdot || len(t.in) != len(v.in) || len(t.out) != len(v.out) {
+ if t.outCount != v.outCount || t.inCount != v.inCount {
return false
}
- for i, typ := range t.in {
- if typ != v.in[i] {
+ for i := 0; i < t.NumIn(); i++ {
+ if t.In(i) != v.In(i) {
return false
}
}
- for i, typ := range t.out {
- if typ != v.out[i] {
+ for i := 0; i < t.NumOut(); i++ {
+ if t.Out(i) != v.Out(i) {
return false
}
}
ch.string = s
ch.hash = fnv1(typ.hash, 'c', byte(dir))
ch.elem = typ
- ch.uncommonType = nil
return cachePut(ckey, &ch.rtype)
}
mt.bucketsize = uint16(mt.bucket.size)
mt.reflexivekey = isReflexive(ktyp)
mt.needkeyupdate = needKeyUpdate(ktyp)
- mt.uncommonType = nil
return cachePut(ckey, &mt.rtype)
}
+type funcTypeFixed4 struct {
+ funcType
+ args [4]*rtype
+}
+type funcTypeFixed8 struct {
+ funcType
+ args [8]*rtype
+}
+type funcTypeFixed16 struct {
+ funcType
+ args [16]*rtype
+}
+type funcTypeFixed32 struct {
+ funcType
+ args [32]*rtype
+}
+type funcTypeFixed64 struct {
+ funcType
+ args [64]*rtype
+}
+type funcTypeFixed128 struct {
+ funcType
+ args [128]*rtype
+}
+
// FuncOf returns the function type with the given argument and result types.
// For example if k represents int and e represents string,
// FuncOf([]Type{k}, []Type{e}, false) represents func(int) string.
// Make a func type.
var ifunc interface{} = (func())(nil)
prototype := *(**funcType)(unsafe.Pointer(&ifunc))
- ft := new(funcType)
+ n := len(in) + len(out)
+
+ var ft *funcType
+ var args []*rtype
+ switch {
+ case n <= 4:
+ fixed := new(funcTypeFixed4)
+ args = fixed.args[:0:len(fixed.args)]
+ ft = &fixed.funcType
+ case n <= 8:
+ fixed := new(funcTypeFixed8)
+ args = fixed.args[:0:len(fixed.args)]
+ ft = &fixed.funcType
+ case n <= 16:
+ fixed := new(funcTypeFixed16)
+ args = fixed.args[:0:len(fixed.args)]
+ ft = &fixed.funcType
+ case n <= 32:
+ fixed := new(funcTypeFixed32)
+ args = fixed.args[:0:len(fixed.args)]
+ ft = &fixed.funcType
+ case n <= 64:
+ fixed := new(funcTypeFixed64)
+ args = fixed.args[:0:len(fixed.args)]
+ ft = &fixed.funcType
+ case n <= 128:
+ fixed := new(funcTypeFixed128)
+ args = fixed.args[:0:len(fixed.args)]
+ ft = &fixed.funcType
+ default:
+ panic("reflect.FuncOf: too many arguments")
+ }
*ft = *prototype
// Build a hash and minimally populate ft.
var hash uint32
- var fin, fout []*rtype
for _, in := range in {
t := in.(*rtype)
- fin = append(fin, t)
+ args = append(args, t)
hash = fnv1(hash, byte(t.hash>>24), byte(t.hash>>16), byte(t.hash>>8), byte(t.hash))
}
if variadic {
hash = fnv1(hash, '.')
for _, out := range out {
t := out.(*rtype)
- fout = append(fout, t)
+ args = append(args, t)
hash = fnv1(hash, byte(t.hash>>24), byte(t.hash>>16), byte(t.hash>>8), byte(t.hash))
}
+ if len(args) > 50 {
+ panic("reflect.FuncOf does not support more than 50 arguments")
+ }
ft.hash = hash
- ft.in = fin
- ft.out = fout
- ft.dotdotdot = variadic
+ ft.inCount = uint16(len(in))
+ ft.outCount = uint16(len(out))
+ if variadic {
+ ft.outCount |= 1 << 15
+ }
// Look in cache.
funcLookupCache.RLock()
// Populate the remaining fields of ft and store in cache.
ft.string = str
- ft.uncommonType = nil
funcLookupCache.m[hash] = append(funcLookupCache.m[hash], &ft.rtype)
return &ft.rtype
func funcStr(ft *funcType) string {
repr := make([]byte, 0, 64)
repr = append(repr, "func("...)
- for i, t := range ft.in {
+ for i, t := range ft.in() {
if i > 0 {
repr = append(repr, ", "...)
}
- if ft.dotdotdot && i == len(ft.in)-1 {
+ if ft.IsVariadic() && i == int(ft.inCount)-1 {
repr = append(repr, "..."...)
repr = append(repr, (*sliceType)(unsafe.Pointer(t)).elem.string...)
} else {
}
}
repr = append(repr, ')')
- if l := len(ft.out); l == 1 {
+ out := ft.out()
+ if len(out) == 1 {
repr = append(repr, ' ')
- } else if l > 1 {
+ } else if len(out) > 1 {
repr = append(repr, " ("...)
}
- for i, t := range ft.out {
+ for i, t := range out {
if i > 0 {
repr = append(repr, ", "...)
}
repr = append(repr, t.string...)
}
- if len(ft.out) > 1 {
+ if len(out) > 1 {
repr = append(repr, ')')
}
return string(repr)
slice.string = s
slice.hash = fnv1(typ.hash, '[')
slice.elem = typ
- slice.uncommonType = nil
return cachePut(ckey, &slice.rtype)
}
}
array.align = typ.align
array.fieldAlign = typ.fieldAlign
- array.uncommonType = nil
array.len = uintptr(count)
array.slice = slice.(*rtype)
}
offset += ptrSize
}
- for _, arg := range tt.in {
+ for _, arg := range tt.in() {
offset += -offset & uintptr(arg.align-1)
addTypeBits(ptrmap, offset, arg)
offset += arg.size
}
offset += -offset & (ptrSize - 1)
retOffset = offset
- for _, res := range tt.out {
+ for _, res := range tt.out() {
offset += -offset & uintptr(res.align-1)
addTypeBits(ptrmap, offset, res)
offset += res.size
// Copy argument frame into Values.
ptr := frame
off := uintptr(0)
- in := make([]Value, 0, len(ftyp.in))
- for _, arg := range ftyp.in {
- typ := arg
+ in := make([]Value, 0, int(ftyp.inCount))
+ for _, typ := range ftyp.in() {
off += -off & uintptr(typ.align-1)
addr := unsafe.Pointer(uintptr(ptr) + off)
v := Value{typ, nil, flag(typ.Kind())}
// Call underlying function.
out := f(in)
- if len(out) != len(ftyp.out) {
+ numOut := ftyp.NumOut()
+ if len(out) != numOut {
panic("reflect: wrong return count from function created by MakeFunc")
}
// Copy results back into argument frame.
- if len(ftyp.out) > 0 {
+ if numOut > 0 {
off += -off & (ptrSize - 1)
if runtime.GOARCH == "amd64p32" {
off = align(off, 8)
}
- for i, arg := range ftyp.out {
- typ := arg
+ for i, typ := range ftyp.out() {
v := out[i]
if v.typ != typ {
panic("reflect: function created by MakeFunc using " + funcName(f) +
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build cgo
-
#include <stdarg.h>
#include <android/log.h>
#include "libcgo.h"
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build cgo
-
#include <string.h> /* for strerror */
#include <pthread.h>
#include <signal.h>
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build cgo
-
#include <string.h> /* for strerror */
#include <pthread.h>
#include <signal.h>
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build cgo
-
#include <pthread.h>
#include <signal.h>
#include <stdio.h>
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build cgo
-
#include <pthread.h>
#include <signal.h>
#include <stdio.h>
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build cgo
-
#include <string.h> /* for strerror */
#include <pthread.h>
#include <signal.h>
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build cgo
-
#include <string.h> /* for strerror */
#include <pthread.h>
#include <signal.h>
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build cgo
-
#include <limits.h>
#include <pthread.h>
#include <signal.h>
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build cgo
-
#include <limits.h>
#include <pthread.h>
#include <signal.h>
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build cgo
-
#include <sys/types.h>
#include <sys/signalvar.h>
#include <pthread.h>
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build cgo
-
// +build !android,linux
#include <stdarg.h>
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build cgo
-
#include <sys/types.h>
#include <sys/signalvar.h>
#include <pthread.h>
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build cgo
-
#include <sys/types.h>
#include <sys/signalvar.h>
#include <pthread.h>
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build cgo
-
#include <sys/types.h>
#include <machine/sysarch.h>
#include <sys/signalvar.h>
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build cgo
-
// TODO: see issue #10410
// +build linux
// +build ppc64 ppc64le
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build cgo
-
#include <stdio.h>
#include <stdlib.h>
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build cgo
-
#include <stdio.h>
#include <stdlib.h>
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build cgo
-
#include <pthread.h>
#include <string.h>
#include <signal.h>
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build cgo
-
#include <pthread.h>
#include <errno.h>
#include <string.h> // strerror
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build cgo
-
#include <pthread.h>
#include <string.h>
#include <signal.h>
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build cgo
-
#include <pthread.h>
#include <string.h>
#include <signal.h>
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build cgo
-
// +build ppc64 ppc64le
#include <pthread.h>
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build cgo
-
// +build linux,amd64
#include <errno.h>
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build cgo
-
#include <sys/types.h>
#include <pthread.h>
#include <signal.h>
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build cgo
-
#include <sys/types.h>
#include <pthread.h>
#include <signal.h>
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build cgo
-
#include <sys/types.h>
#include <pthread.h>
#include <signal.h>
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build cgo
-
#include <sys/types.h>
#include <dlfcn.h>
#include <errno.h>
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build cgo
-
#include <sys/types.h>
#include <dlfcn.h>
#include <errno.h>
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build cgo
-
// Emulation of the Unix signal SIGSEGV.
//
// On iOS, Go tests and apps under development are run by lldb.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build cgo
-
// +build !lldb
// +build darwin
// +build arm arm64
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build cgo
-
#include <pthread.h>
#include <string.h>
#include <signal.h>
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build cgo
-
#include "libcgo.h"
/* Stub for calling malloc from Go */
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build cgo
-
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
#include <process.h>
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build cgo
-
#define WIN64_LEAN_AND_MEAN
#include <windows.h>
#include <process.h>
// For cgo, cb.arg points into a C stack frame and therefore doesn't
// hold any pointers that the GC can find anyway - the write barrier
// would be a no-op.
- reflectcall(nil, unsafe.Pointer(cb.fn), unsafe.Pointer(cb.arg), uint32(cb.argsize), 0)
+ reflectcall(nil, unsafe.Pointer(cb.fn), cb.arg, uint32(cb.argsize), 0)
if raceenabled {
racereleasemerge(unsafe.Pointer(&racecgosync))
if hchanSize%maxAlign != 0 || elem.align > maxAlign {
throw("makechan: bad alignment")
}
- if size < 0 || int64(uintptr(size)) != size || (elem.size > 0 && uintptr(size) > (_MaxMem-hchanSize)/uintptr(elem.size)) {
+ if size < 0 || int64(uintptr(size)) != size || (elem.size > 0 && uintptr(size) > (_MaxMem-hchanSize)/elem.size) {
panic("makechan: size out of range")
}
// buf points into the same allocation, elemtype is persistent.
// SudoG's are referenced from their owning thread so they can't be collected.
// TODO(dvyukov,rlh): Rethink when collector can move allocated objects.
- c = (*hchan)(mallocgc(hchanSize+uintptr(size)*uintptr(elem.size), nil, flagNoScan))
+ c = (*hchan)(mallocgc(hchanSize+uintptr(size)*elem.size, nil, flagNoScan))
if size > 0 && elem.size != 0 {
c.buf = add(unsafe.Pointer(c), hchanSize)
} else {
}
gp.param = nil
if mysg.releasetime > 0 {
- blockevent(int64(mysg.releasetime)-t0, 2)
+ blockevent(mysg.releasetime-t0, 2)
}
releaseSudog(mysg)
return true
t.Fatalf("output does not start with %q:\n%s", want, got)
}
}
+
+func TestCgoCCodeSIGPROF(t *testing.T) {
+ got := runTestProg(t, "testprogcgo", "CgoCCodeSIGPROF")
+ want := "OK\n"
+ if got != want {
+ t.Errorf("expected %q got %v", want, got)
+ }
+}
func (c *sigctxt) pc() uintptr { return uintptr(c.u.pc) }
func (c *sigctxt) sp() uintptr { return uintptr(c.u.sp) }
+func (c *sigctxt) lr() uintptr { return uintptr(0) }
func (c *sigctxt) setpc(x uintptr) { c.u.pc = uint32(x) }
func (c *sigctxt) setsp(x uintptr) { c.u.sp = uint32(x) }
+func (c *sigctxt) setlr(x uintptr) {}
+
+func (c *sigctxt) savelr(x uintptr) {}
func dumpregs(u *ureg) {
print("ax ", hex(u.ax), "\n")
print("fs ", hex(u.fs), "\n")
print("gs ", hex(u.gs), "\n")
}
+
+func sigpanictramp() {}
func (c *sigctxt) pc() uintptr { return uintptr(c.u.ip) }
func (c *sigctxt) sp() uintptr { return uintptr(c.u.sp) }
+func (c *sigctxt) lr() uintptr { return uintptr(0) }
func (c *sigctxt) setpc(x uintptr) { c.u.ip = uint64(x) }
func (c *sigctxt) setsp(x uintptr) { c.u.sp = uint64(x) }
+func (c *sigctxt) setlr(x uintptr) {}
+
+func (c *sigctxt) savelr(x uintptr) {}
func dumpregs(u *ureg) {
print("ax ", hex(u.ax), "\n")
print("fs ", hex(u.fs), "\n")
print("gs ", hex(u.gs), "\n")
}
+
+func sigpanictramp() {}
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+const _PAGESIZE = 0x1000
+
+type ureg struct {
+ r0 uint32 /* general registers */
+ r1 uint32 /* ... */
+ r2 uint32 /* ... */
+ r3 uint32 /* ... */
+ r4 uint32 /* ... */
+ r5 uint32 /* ... */
+ r6 uint32 /* ... */
+ r7 uint32 /* ... */
+ r8 uint32 /* ... */
+ r9 uint32 /* ... */
+ r10 uint32 /* ... */
+ r11 uint32 /* ... */
+ r12 uint32 /* ... */
+ sp uint32
+ link uint32 /* ... */
+ trap uint32 /* trap type */
+ psr uint32
+ pc uint32 /* interrupted addr */
+}
+
+type sigctxt struct {
+ u *ureg
+}
+
+func (c *sigctxt) pc() uintptr { return uintptr(c.u.pc) }
+func (c *sigctxt) sp() uintptr { return uintptr(c.u.sp) }
+func (c *sigctxt) lr() uintptr { return uintptr(c.u.link) }
+
+func (c *sigctxt) setpc(x uintptr) { c.u.pc = uint32(x) }
+func (c *sigctxt) setsp(x uintptr) { c.u.sp = uint32(x) }
+func (c *sigctxt) setlr(x uintptr) { c.u.link = uint32(x) }
+func (c *sigctxt) savelr(x uintptr) { c.u.r0 = uint32(x) }
+
+func dumpregs(u *ureg) {
+ print("r0 ", hex(u.r0), "\n")
+ print("r1 ", hex(u.r1), "\n")
+ print("r2 ", hex(u.r2), "\n")
+ print("r3 ", hex(u.r3), "\n")
+ print("r4 ", hex(u.r4), "\n")
+ print("r5 ", hex(u.r5), "\n")
+ print("r6 ", hex(u.r6), "\n")
+ print("r7 ", hex(u.r7), "\n")
+ print("r8 ", hex(u.r8), "\n")
+ print("r9 ", hex(u.r9), "\n")
+ print("r10 ", hex(u.r10), "\n")
+ print("r11 ", hex(u.r11), "\n")
+ print("r12 ", hex(u.r12), "\n")
+ print("sp ", hex(u.sp), "\n")
+ print("link ", hex(u.link), "\n")
+ print("pc ", hex(u.pc), "\n")
+ print("psr ", hex(u.psr), "\n")
+}
+
+func sigpanictramp()
return
}
arg := [2]unsafe.Pointer{cstring(k), cstring(v)}
- asmcgocall(unsafe.Pointer(_cgo_setenv), unsafe.Pointer(&arg))
+ asmcgocall(_cgo_setenv, unsafe.Pointer(&arg))
}
// Update the C environment if cgo is loaded.
return
}
arg := [1]unsafe.Pointer{cstring(k)}
- asmcgocall(unsafe.Pointer(_cgo_unsetenv), unsafe.Pointer(&arg))
+ asmcgocall(_cgo_unsetenv, unsafe.Pointer(&arg))
}
func cstring(s string) unsafe.Pointer {
// If h != nil, the map can be created directly in h.
// If bucket != nil, bucket can be used as the first bucket.
func makemap(t *maptype, hint int64, h *hmap, bucket unsafe.Pointer) *hmap {
- if sz := unsafe.Sizeof(hmap{}); sz > 48 || sz != uintptr(t.hmap.size) {
+ if sz := unsafe.Sizeof(hmap{}); sz > 48 || sz != t.hmap.size {
println("runtime: sizeof(hmap) =", sz, ", t.hmap.size =", t.hmap.size)
throw("bad hmap size")
}
if t.elem.align > bucketCnt {
throw("value align too big")
}
- if uintptr(t.key.size)%uintptr(t.key.align) != 0 {
+ if t.key.size%uintptr(t.key.align) != 0 {
throw("key size not a multiple of key align")
}
- if uintptr(t.elem.size)%uintptr(t.elem.align) != 0 {
+ if t.elem.size%uintptr(t.elem.align) != 0 {
throw("value size not a multiple of value align")
}
if bucketCnt < 8 {
dumpint(tagType)
dumpint(uint64(uintptr(unsafe.Pointer(t))))
dumpint(uint64(t.size))
- if t.x == nil || t.x.pkgpath == nil {
+ if x := t.uncommon(); x == nil || x.pkgpath == nil {
dumpstr(t._string)
} else {
- pkgpath := stringStructOf(t.x.pkgpath)
+ pkgpath := stringStructOf(x.pkgpath)
namestr := t.name()
name := stringStructOf(&namestr)
dumpint(uint64(uintptr(pkgpath.len) + 1 + uintptr(name.len)))
// dump kinds & offsets of interesting fields in bv
func dumpbv(cbv *bitvector, offset uintptr) {
bv := gobv(*cbv)
- for i := uintptr(0); i < uintptr(bv.n); i++ {
+ for i := uintptr(0); i < bv.n; i++ {
if bv.bytedata[i/8]>>(i%8)&1 == 1 {
dumpint(fieldKindPtr)
dumpint(uint64(offset + i*sys.PtrSize))
}
// easy case
- x := typ.x
+ x := typ.uncommon()
if x == nil {
if canfail {
return nil
itype := i._type
for ; j < nt; j++ {
t := &x.mhdr[j]
+ if t.name == nil {
+ throw("itab t.name is nil")
+ }
if t.mtyp == itype && (t.name == iname || *t.name == *iname) && t.pkgpath == ipkgpath {
if m != nil {
*(*unsafe.Pointer)(add(unsafe.Pointer(&m.fun[0]), uintptr(k)*sys.PtrSize)) = t.ifn
tab := i.tab
if tab == nil || tab._type != t {
if r != nil {
- memclr(r, uintptr(t.size))
+ memclr(r, t.size)
}
return false
}
GC()
}
if e._type != t {
- memclr(r, uintptr(t.size))
+ memclr(r, t.size)
return false
}
if isDirectIface(t) {
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+TEXT runtime∕internal∕atomic·Cas(SB),NOSPLIT,$0
+ B runtime∕internal∕atomic·armcas(SB)
+
+TEXT runtime∕internal∕atomic·Casp1(SB),NOSPLIT,$0
+ B runtime∕internal∕atomic·Cas(SB)
// Keep everything page-aligned.
// Our pages are bigger than hardware pages.
h.arena_end = p + p_size
- used := p + (-uintptr(p) & (_PageSize - 1))
+ used := p + (-p & (_PageSize - 1))
h.mapBits(used)
h.mapSpans(used)
h.arena_used = used
racemapshadow(unsafe.Pointer(p), n)
}
- if uintptr(p)&(_PageSize-1) != 0 {
+ if p&(_PageSize-1) != 0 {
throw("misrounded allocation in MHeap_SysAlloc")
}
return unsafe.Pointer(p)
return nil
}
- if p < h.arena_start || uintptr(p)+p_size-h.arena_start >= _MaxArena32 {
+ if p < h.arena_start || p+p_size-h.arena_start >= _MaxArena32 {
top := ^uintptr(0)
if top-h.arena_start > _MaxArena32 {
top = h.arena_start + _MaxArena32
p_end := p + p_size
p += -p & (_PageSize - 1)
- if uintptr(p)+n > h.arena_used {
+ if p+n > h.arena_used {
h.mapBits(p + n)
h.mapSpans(p + n)
h.arena_used = p + n
}
}
- if uintptr(p)&(_PageSize-1) != 0 {
+ if p&(_PageSize-1) != 0 {
throw("misrounded allocation in MHeap_SysAlloc")
}
return unsafe.Pointer(p)
var s *mspan
shouldhelpgc = true
systemstack(func() {
- s = largeAlloc(size, uint32(flags))
+ s = largeAlloc(size, flags)
})
x = unsafe.Pointer(uintptr(s.start << pageShift))
- size = uintptr(s.elemsize)
+ size = s.elemsize
}
if flags&flagNoScan != 0 {
if typ.kind&kindNoPointers != 0 {
flags |= flagNoScan
}
- return mallocgc(uintptr(typ.size), typ, flags)
+ return mallocgc(typ.size, typ, flags)
}
//go:linkname reflect_unsafe_New reflect.unsafe_New
if typ.kind&kindNoPointers != 0 {
flags |= flagNoScan
}
- if int(n) < 0 || (typ.size > 0 && n > _MaxMem/uintptr(typ.size)) {
+ if int(n) < 0 || (typ.size > 0 && n > _MaxMem/typ.size) {
panic("runtime: allocation size out of range")
}
- return mallocgc(uintptr(typ.size)*n, typ, flags)
+ return mallocgc(typ.size*n, typ, flags)
}
//go:linkname reflect_unsafe_NewArray reflect.unsafe_NewArray
// x = -log_e(q) * period
// x = log_2(q) * (-log_e(2)) * period ; Using log_2 for efficiency
const randomBitCount = 26
- q := uint32(fastrand1())%(1<<randomBitCount) + 1
+ q := fastrand1()%(1<<randomBitCount) + 1
qlog := fastlog2(float64(q)) - randomBitCount
if qlog > 0 {
qlog = 0
if n == 0 {
return 0
}
- dstp := unsafe.Pointer(dst.array)
- srcp := unsafe.Pointer(src.array)
+ dstp := dst.array
+ srcp := src.array
if raceenabled {
callerpc := getcallerpc(unsafe.Pointer(&typ))
}
}
})
- return int(n)
+ return n
}
//go:linkname reflect_typedslicecopy reflect.typedslicecopy
// which prevents us from allocating more stack.
//go:nosplit
func sysAlloc(n uintptr, sysStat *uint64) unsafe.Pointer {
- v := unsafe.Pointer(mmap(nil, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0))
+ v := mmap(nil, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0)
if uintptr(v) < 4096 {
return nil
}
return v
}
- p := unsafe.Pointer(mmap(v, n, _PROT_NONE, _MAP_ANON|_MAP_PRIVATE, -1, 0))
+ p := mmap(v, n, _PROT_NONE, _MAP_ANON|_MAP_PRIVATE, -1, 0)
if uintptr(p) < 4096 {
return nil
}
// which prevents us from allocating more stack.
//go:nosplit
func sysAlloc(n uintptr, sysStat *uint64) unsafe.Pointer {
- v := unsafe.Pointer(mmap(nil, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0))
+ v := mmap(nil, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0)
if uintptr(v) < 4096 {
return nil
}
func sysReserve(v unsafe.Pointer, n uintptr, reserved *bool) unsafe.Pointer {
*reserved = true
- p := unsafe.Pointer(mmap(v, n, _PROT_NONE, _MAP_ANON|_MAP_PRIVATE, -1, 0))
+ p := mmap(v, n, _PROT_NONE, _MAP_ANON|_MAP_PRIVATE, -1, 0)
if uintptr(p) < 4096 {
return nil
}
func sysMap(v unsafe.Pointer, n uintptr, reserved bool, sysStat *uint64) {
mSysStatInc(sysStat, n)
- p := unsafe.Pointer(mmap(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0))
+ p := mmap(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0)
if uintptr(p) == _ENOMEM {
throw("runtime: out of memory")
}
for i := fb.cnt; i > 0; i-- {
f := &fb.fin[i-1]
- framesz := unsafe.Sizeof((interface{})(nil)) + uintptr(f.nret)
+ framesz := unsafe.Sizeof((interface{})(nil)) + f.nret
if framecap < framesz {
// The frame does not contain pointers interesting for GC,
// all not yet finalized objects are stored in finq.
throw("runtime.SetFinalizer: second argument is " + ftyp._string + ", not a function")
}
ft := (*functype)(unsafe.Pointer(ftyp))
- if ft.dotdotdot || len(ft.in) != 1 {
+ if ft.dotdotdot() {
+ throw("runtime.SetFinalizer: cannot pass " + etyp._string + " to finalizer " + ftyp._string + " because dotdotdot")
+ }
+ if ft.dotdotdot() || ft.inCount != 1 {
throw("runtime.SetFinalizer: cannot pass " + etyp._string + " to finalizer " + ftyp._string)
}
- fint := ft.in[0]
+ fint := ft.in()[0]
switch {
case fint == etyp:
// ok - same type
goto okarg
case fint.kind&kindMask == kindPtr:
- if (fint.x == nil || etyp.x == nil) && (*ptrtype)(unsafe.Pointer(fint)).elem == ot.elem {
+ if (fint.uncommon() == nil || etyp.uncommon() == nil) && (*ptrtype)(unsafe.Pointer(fint)).elem == ot.elem {
// ok - not same type, but both pointers,
// one or the other is unnamed, and same element type, so assignable.
goto okarg
okarg:
// compute size needed for return parameters
nret := uintptr(0)
- for _, t := range ft.out {
+ for _, t := range ft.out() {
nret = round(nret, uintptr(t.align)) + uintptr(t.size)
}
nret = round(nret, sys.PtrSize)
return
}
- n = uintptr(s.elemsize)
+ n = s.elemsize
if s.sizeclass != 0 {
x = add(x, (uintptr(v)-uintptr(x))/n*n)
}
// the cycle.
idleMarkTime int64
- // bgMarkStartTime is the absolute start time in nanoseconds
- // that the background mark phase started.
- bgMarkStartTime int64
-
- // assistTime is the absolute start time in nanoseconds that
- // mutator assists were enabled.
- assistStartTime int64
+ // markStartTime is the absolute start time in nanoseconds
+ // that assists and background mark workers started.
+ markStartTime int64
// heapGoal is the goal memstats.heap_live for when this cycle
// ends. This is computed at the beginning of each cycle.
// technically isn't comparable to the trigger ratio.
goalGrowthRatio := float64(gcpercent) / 100
actualGrowthRatio := float64(memstats.heap_live)/float64(memstats.heap_marked) - 1
- assistDuration := nanotime() - c.assistStartTime
+ assistDuration := nanotime() - c.markStartTime
// Assume background mark hit its utilization goal.
utilization := gcGoalUtilization
// TODO(austin): Shorter preemption interval for mark
// worker to improve fairness and give this
// finer-grained control over schedule?
- now := nanotime() - gcController.bgMarkStartTime
+ now := nanotime() - gcController.markStartTime
then := now + gcForcePreemptNS
timeUsed := c.fractionalMarkTime + gcForcePreemptNS
if then > 0 && float64(timeUsed)/float64(then) > c.fractionalUtilizationGoal {
if mode == gcBackgroundMode {
gcBgMarkStartWorkers()
}
+
+ gcResetMarkState()
+
now := nanotime()
work.stwprocs, work.maxprocs = gcprocs(), gomaxprocs
work.tSweepTerm = now
// reclaimed until the next GC cycle.
clearpools()
- gcResetMarkState()
-
- work.finalizersDone = false
-
if mode == gcBackgroundMode { // Do as much work concurrently as possible
gcController.startCycle()
work.heapGoal = gcController.heapGoal
// Assists and workers can start the moment we start
// the world.
- gcController.assistStartTime = now
- gcController.bgMarkStartTime = now
+ gcController.markStartTime = now
// Concurrent mark.
systemstack(startTheWorldWithSema)
gcDrain(gcw, gcDrainBlock)
gcw.dispose()
- gcMarkRootCheck()
+ // TODO: Re-enable once this is cheap.
+ //gcMarkRootCheck()
if work.full != 0 {
throw("work.full != 0")
}
}
// gcResetMarkState resets global state prior to marking (concurrent
-// or STW) and resets the stack scan state of all Gs. Any Gs created
-// after this will also be in the reset state.
+// or STW) and resets the stack scan state of all Gs.
+//
+// This is safe to do without the world stopped because any Gs created
+// during or after this will start out in the reset state.
func gcResetMarkState() {
// This may be called during a concurrent phase, so make sure
// allgs doesn't change.
work.bytesMarked = 0
work.initialHeapLive = memstats.heap_live
+ work.finalizersDone = false
}
// Hooks for other packages
print(" ...\n")
skipped = false
}
- print(" *(", label, "+", i, ") = ", hex(*(*uintptr)(unsafe.Pointer(obj + uintptr(i)))))
+ print(" *(", label, "+", i, ") = ", hex(*(*uintptr)(unsafe.Pointer(obj + i))))
if i == off {
print(" <==")
}
}
}
h_allspans = new
- h.allspans = (**mspan)(unsafe.Pointer(sp.array))
+ h.allspans = (**mspan)(sp.array)
}
h_allspans = append(h_allspans, s)
h.nspan = uint32(len(h_allspans))
n := s.elemsize
if base != nil {
- i := (uintptr(v) - uintptr(p)) / n
+ i := (v - p) / n
*base = p + i*n
}
if size != nil {
lock(&proflock)
for b := mbuckets; b != nil; b = b.allnext {
mp := b.mp()
- fn(b, uintptr(b.nstk), &b.stk()[0], b.size, mp.allocs, mp.frees)
+ fn(b, b.nstk, &b.stk()[0], b.size, mp.allocs, mp.frees)
}
unlock(&proflock)
}
for b := bbuckets; b != nil; b = b.allnext {
bp := b.bp()
r := &p[0]
- r.Count = int64(bp.count)
- r.Cycles = int64(bp.cycles)
+ r.Count = bp.count
+ r.Cycles = bp.cycles
i := copy(r.Stack0[:], b.stk())
for ; i < len(r.Stack0); i++ {
r.Stack0[i] = 0
memstats.nfree += mheap_.nsmallfree[i]
memstats.by_size[i].nfree = mheap_.nsmallfree[i]
memstats.by_size[i].nmalloc += mheap_.nsmallfree[i]
- smallfree += uint64(mheap_.nsmallfree[i]) * uint64(class_to_size[i])
+ smallfree += mheap_.nsmallfree[i] * uint64(class_to_size[i])
}
memstats.nfree += memstats.tinyallocs
memstats.nmalloc += memstats.nfree
// Calculate derived stats.
- memstats.total_alloc = uint64(memstats.alloc) + uint64(mheap_.largefree) + smallfree
+ memstats.total_alloc = memstats.alloc + mheap_.largefree + smallfree
memstats.heap_alloc = memstats.alloc
memstats.heap_objects = memstats.nmalloc - memstats.nfree
}
if pd.rg != 0 && pd.rg != pdReady {
throw("netpollClose: blocked read on closing descriptor")
}
- netpollclose(uintptr(pd.fd))
+ netpollclose(pd.fd)
pollcache.free(pd)
}
var iocphandle uintptr = _INVALID_HANDLE_VALUE // completion port io handle
func netpollinit() {
- iocphandle = uintptr(stdcall4(_CreateIoCompletionPort, _INVALID_HANDLE_VALUE, 0, 0, _DWORD_MAX))
+ iocphandle = stdcall4(_CreateIoCompletionPort, _INVALID_HANDLE_VALUE, 0, 0, _DWORD_MAX)
if iocphandle == 0 {
println("netpoll: failed to create iocp handle (errno=", getlasterror(), ")")
throw("netpoll: failed to create iocp handle")
func goexitsall(status *byte) {
var buf [_ERRMAX]byte
+ getg().m.locks++
n := copy(buf[:], goexits)
n = copy(buf[n:], gostringnocopy(status))
pid := getpid()
postnote(mp.procid, buf[:])
}
}
+ getg().m.locks--
}
var procdir = []byte("/proc/")
gp.sig = uint32(sig)
gp.sigpc = c.pc()
- pc := uintptr(c.pc())
- sp := uintptr(c.sp())
+ pc := c.pc()
+ sp := c.sp()
// If we don't recognize the PC as code
// but we do recognize the top pointer on the stack as code,
pc = 0
}
- // Only push sigpanic if PC != 0.
- //
+ // IF LR exists, sigpanictramp must save it to the stack
+ // before entry to sigpanic so that panics in leaf
+ // functions are correctly handled. This will smash
+ // the stack frame but we're not going back there
+ // anyway.
+ if usesLR {
+ c.savelr(c.lr())
+ }
+
// If PC == 0, probably panicked because of a call to a nil func.
- // Not pushing that onto SP will make the trace look like a call
+ // Not faking that as the return address will make the trace look like a call
// to sigpanic instead. (Otherwise the trace will end at
// sigpanic and we won't get to see who faulted).
if pc != 0 {
- if sys.RegSize > sys.PtrSize {
+ if usesLR {
+ c.setlr(pc)
+ } else {
+ if sys.RegSize > sys.PtrSize {
+ sp -= sys.PtrSize
+ *(*uintptr)(unsafe.Pointer(sp)) = 0
+ }
sp -= sys.PtrSize
- *(*uintptr)(unsafe.Pointer(sp)) = 0
+ *(*uintptr)(unsafe.Pointer(sp)) = pc
+ c.setsp(sp)
}
- sp -= sys.PtrSize
- *(*uintptr)(unsafe.Pointer(sp)) = pc
- c.setsp(sp)
}
- c.setpc(funcPC(sigpanic))
+ if usesLR {
+ c.setpc(funcPC(sigpanictramp))
+ } else {
+ c.setpc(funcPC(sigpanic))
+ }
return _NCONT
}
if flags&_SigNotify != 0 {
level, _, docrash = gotraceback()
if level > 0 {
goroutineheader(gp)
- tracebacktrap(c.pc(), c.sp(), 0, gp)
+ tracebacktrap(c.pc(), c.sp(), c.lr(), gp)
tracebackothers(gp)
print("\n")
dumpregs(_ureg)
note := gostringnocopy((*byte)(unsafe.Pointer(g.m.notesig)))
switch g.sig {
case _SIGRFAULT, _SIGWFAULT:
- addr := note[index(note, "addr=")+5:]
+ i := index(note, "addr=")
+ if i >= 0 {
+ i += 5
+ } else if i = index(note, "va="); i >= 0 {
+ i += 3
+ } else {
+ panicmem()
+ }
+ addr := note[i:]
g.sigcode1 = uintptr(atolwhex(addr))
if g.sigcode1 < 0x1000 || g.paniconfault {
panicmem()
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+func checkgoarm() {
+ return // TODO(minux)
+}
+
+//go:nosplit
+func cputicks() int64 {
+ // Currently cputicks() is used in blocking profiler and to seed runtime·fastrand1().
+ // runtime·nanotime() is a poor approximation of CPU ticks that is enough for the profiler.
+ // TODO: need more entropy to better seed fastrand1.
+ return nanotime()
+}
"unsafe"
)
+// Calling panic with one of the errors below will call errorString.Error
+// which will call mallocgc to concatenate strings. That will fail if
+// malloc is locked, causing a confusing error message. Throw a better
+// error message instead.
+func panicCheckMalloc(err error) {
+ gp := getg()
+ if gp != nil && gp.m != nil && gp.m.mallocing != 0 {
+ throw(string(err.(errorString)))
+ }
+}
+
var indexError = error(errorString("index out of range"))
func panicindex() {
+ panicCheckMalloc(indexError)
panic(indexError)
}
var sliceError = error(errorString("slice bounds out of range"))
func panicslice() {
+ panicCheckMalloc(sliceError)
panic(sliceError)
}
var divideError = error(errorString("integer divide by zero"))
func panicdivide() {
+ panicCheckMalloc(divideError)
panic(divideError)
}
var overflowError = error(errorString("integer overflow"))
func panicoverflow() {
+ panicCheckMalloc(overflowError)
panic(overflowError)
}
var floatError = error(errorString("floating point error"))
func panicfloat() {
+ panicCheckMalloc(floatError)
panic(floatError)
}
var memoryError = error(errorString("invalid memory address or nil pointer dereference"))
func panicmem() {
+ panicCheckMalloc(memoryError)
panic(memoryError)
}
}
return buf.String()
}
- m := map[string]int{}
+ count := map[string]int{}
+ index := map[string]int{}
+ var keys []string
n := p.Len()
for i := 0; i < n; i++ {
- m[key(p.Stack(i))]++
+ k := key(p.Stack(i))
+ if count[k] == 0 {
+ index[k] = i
+ keys = append(keys, k)
+ }
+ count[k]++
}
- // Print stacks, listing count on first occurrence of a unique stack.
- for i := 0; i < n; i++ {
- stk := p.Stack(i)
- s := key(stk)
- if count := m[s]; count != 0 {
- fmt.Fprintf(w, "%d %s\n", count, s)
- if debug > 0 {
- printStackRecord(w, stk, false)
- }
- delete(m, s)
+ sort.Sort(&keysByCount{keys, count})
+
+ for _, k := range keys {
+ fmt.Fprintf(w, "%d %s\n", count[k], k)
+ if debug > 0 {
+ printStackRecord(w, p.Stack(index[k]), false)
}
}
return b.Flush()
}
+// keysByCount sorts keys with higher counts first, breaking ties by key string order.
+type keysByCount struct {
+ keys []string
+ count map[string]int
+}
+
+func (x *keysByCount) Len() int { return len(x.keys) }
+func (x *keysByCount) Swap(i, j int) { x.keys[i], x.keys[j] = x.keys[j], x.keys[i] }
+func (x *keysByCount) Less(i, j int) bool {
+ ki, kj := x.keys[i], x.keys[j]
+ ci, cj := x.count[ki], x.count[kj]
+ if ci != cj {
+ return ci > cj
+ }
+ return ki < kj
+}
+
// printStackRecord prints the function + source line information
// for a single stack trace.
func printStackRecord(w io.Writer, stk []uintptr, allFrames bool) {
fmt.Fprintf(w, "# NextGC = %d\n", s.NextGC)
fmt.Fprintf(w, "# PauseNs = %d\n", s.PauseNs)
fmt.Fprintf(w, "# NumGC = %d\n", s.NumGC)
- fmt.Fprintf(w, "# EnableGC = %v\n", s.EnableGC)
fmt.Fprintf(w, "# DebugGC = %v\n", s.DebugGC)
if tw != nil {
c.Wait()
mu.Unlock()
}
+
+func func1(c chan int) { <-c }
+func func2(c chan int) { <-c }
+func func3(c chan int) { <-c }
+func func4(c chan int) { <-c }
+
+func TestGoroutineCounts(t *testing.T) {
+ c := make(chan int)
+ for i := 0; i < 100; i++ {
+ if i%10 == 0 {
+ go func1(c)
+ continue
+ }
+ if i%2 == 0 {
+ go func2(c)
+ continue
+ }
+ go func3(c)
+ }
+ time.Sleep(10 * time.Millisecond) // let goroutines block on channel
+
+ var w bytes.Buffer
+ Lookup("goroutine").WriteTo(&w, 1)
+ prof := w.String()
+
+ if !containsInOrder(prof, "\n50 @ ", "\n40 @", "\n10 @", "\n1 @") {
+ t.Errorf("expected sorted goroutine counts:\n%s", prof)
+ }
+
+ close(c)
+
+ time.Sleep(10 * time.Millisecond) // let goroutines exit
+}
+
+func containsInOrder(s string, all ...string) bool {
+ for _, t := range all {
+ i := strings.Index(s, t)
+ if i < 0 {
+ return false
+ }
+ s = s[i+len(t):]
+ }
+ return true
+}
func printslice(s []byte) {
sp := (*slice)(unsafe.Pointer(&s))
print("[", len(s), "/", cap(s), "]")
- printpointer(unsafe.Pointer(sp.array))
+ printpointer(sp.array)
}
func printeface(e eface) {
}
if netpoll {
// Enable netpoller, affects schedler behavior.
- ln, err := net.Listen("tcp", "localhost:0")
+ laddr := "localhost:0"
+ if runtime.GOOS == "android" {
+ // On some Android devices, there are no records for localhost,
+ // see https://golang.org/issues/14486.
+ // Don't use 127.0.0.1 for every case, it won't work on IPv6-only systems.
+ laddr = "127.0.0.1:0"
+ }
+ ln, err := net.Listen("tcp", laddr)
if err != nil {
defer ln.Close() // yup, defer in a loop
}
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+//in plan 9 argc is at top of stack followed by ptrs to arguments
+
+TEXT _rt0_arm_plan9(SB),NOSPLIT,$-4
+ MOVW R0, _tos(SB)
+ MOVW 0(R13), R0
+ MOVW $4(R13), R1
+ MOVW.W R1, -4(R13)
+ MOVW.W R0, -4(R13)
+ B runtime·rt0_go(SB)
+
+GLOBL _tos(SB), NOPTR, $4
k = unsafe.Pointer(uintptr(0xfedcb123))
if sys.PtrSize == 8 {
- k = unsafe.Pointer(uintptr(unsafe.Pointer(k)) << 10)
+ k = unsafe.Pointer(uintptr(k) << 10)
}
if casp(&k, nil, nil) {
throw("casp1")
}
}
if s.releasetime > 0 {
- blockevent(int64(s.releasetime)-t0, 3)
+ blockevent(s.releasetime-t0, 3)
}
releaseSudog(s)
}
s.tail = w
goparkunlock(&s.lock, "semacquire", traceEvGoBlockCond, 3)
if t0 != 0 {
- blockevent(int64(w.releasetime)-t0, 2)
+ blockevent(w.releasetime-t0, 2)
}
releaseSudog(w)
}
c.set_sp(sp)
*(*uint32)(unsafe.Pointer(uintptr(sp))) = c.lr()
- pc := uintptr(gp.sigpc)
+ pc := gp.sigpc
// If we don't recognize the PC as code
// but we do recognize the link register as code,
c.set_sp(sp)
*(*uint64)(unsafe.Pointer(uintptr(sp))) = c.lr()
- pc := uintptr(gp.sigpc)
+ pc := gp.sigpc
// If we don't recognize the PC as code
// but we do recognize the link register as code,
func (c *sigctxt) r15() uint64 { return c.regs().mc_r15 }
func (c *sigctxt) rip() uint64 { return c.regs().mc_rip }
func (c *sigctxt) rflags() uint64 { return c.regs().mc_rflags }
-func (c *sigctxt) cs() uint64 { return uint64(c.regs().mc_cs) }
-func (c *sigctxt) fs() uint64 { return uint64(c.regs().mc_ss) }
-func (c *sigctxt) gs() uint64 { return uint64(c.regs().mc_ss) }
+func (c *sigctxt) cs() uint64 { return c.regs().mc_cs }
+func (c *sigctxt) fs() uint64 { return c.regs().mc_ss }
+func (c *sigctxt) gs() uint64 { return c.regs().mc_ss }
func (c *sigctxt) sigcode() uint64 { return uint64(c.info.si_code) }
-func (c *sigctxt) sigaddr() uint64 { return uint64(c.info.si_addr) }
+func (c *sigctxt) sigaddr() uint64 { return c.info.si_addr }
func (c *sigctxt) set_rip(x uint64) { c.regs().mc_rip = x }
func (c *sigctxt) set_rsp(x uint64) { c.regs().mc_rsp = x }
func (c *sigctxt) esp() uint32 { return c.regs().mc_esp }
func (c *sigctxt) eip() uint32 { return c.regs().mc_eip }
func (c *sigctxt) eflags() uint32 { return c.regs().mc_eflags }
-func (c *sigctxt) cs() uint32 { return uint32(c.regs().mc_cs) }
-func (c *sigctxt) fs() uint32 { return uint32(c.regs().mc_fs) }
-func (c *sigctxt) gs() uint32 { return uint32(c.regs().mc_gs) }
+func (c *sigctxt) cs() uint32 { return c.regs().mc_cs }
+func (c *sigctxt) fs() uint32 { return c.regs().mc_fs }
+func (c *sigctxt) gs() uint32 { return c.regs().mc_gs }
func (c *sigctxt) sigcode() uint32 { return uint32(c.info.si_code) }
func (c *sigctxt) sigaddr() uint32 { return uint32(c.info.si_addr) }
func (c *sigctxt) r15() uint64 { return c.regs().mc_r15 }
func (c *sigctxt) rip() uint64 { return c.regs().mc_rip }
func (c *sigctxt) rflags() uint64 { return c.regs().mc_rflags }
-func (c *sigctxt) cs() uint64 { return uint64(c.regs().mc_cs) }
+func (c *sigctxt) cs() uint64 { return c.regs().mc_cs }
func (c *sigctxt) fs() uint64 { return uint64(c.regs().mc_fs) }
func (c *sigctxt) gs() uint64 { return uint64(c.regs().mc_gs) }
func (c *sigctxt) sigcode() uint64 { return uint64(c.info.si_code) }
-func (c *sigctxt) sigaddr() uint64 { return uint64(c.info.si_addr) }
+func (c *sigctxt) sigaddr() uint64 { return c.info.si_addr }
func (c *sigctxt) set_rip(x uint64) { c.regs().mc_rip = x }
func (c *sigctxt) set_rsp(x uint64) { c.regs().mc_rsp = x }
c.set_sp(sp)
*(*uint64)(unsafe.Pointer(uintptr(sp))) = c.link()
- pc := uintptr(gp.sigpc)
+ pc := gp.sigpc
// If we don't recognize the PC as code
// but we do recognize the link register as code,
func (c *sigctxt) esp() uint32 { return c.regs().__gregs[_REG_UESP] }
func (c *sigctxt) eip() uint32 { return c.regs().__gregs[_REG_EIP] }
func (c *sigctxt) eflags() uint32 { return c.regs().__gregs[_REG_EFL] }
-func (c *sigctxt) cs() uint32 { return uint32(c.regs().__gregs[_REG_CS]) }
-func (c *sigctxt) fs() uint32 { return uint32(c.regs().__gregs[_REG_FS]) }
-func (c *sigctxt) gs() uint32 { return uint32(c.regs().__gregs[_REG_GS]) }
+func (c *sigctxt) cs() uint32 { return c.regs().__gregs[_REG_CS] }
+func (c *sigctxt) fs() uint32 { return c.regs().__gregs[_REG_FS] }
+func (c *sigctxt) gs() uint32 { return c.regs().__gregs[_REG_GS] }
func (c *sigctxt) sigcode() uint32 { return uint32(c.info._code) }
func (c *sigctxt) sigaddr() uint32 {
- return uint32(*(*uint32)(unsafe.Pointer(&c.info._reason[0])))
+ return *(*uint32)(unsafe.Pointer(&c.info._reason[0]))
}
func (c *sigctxt) set_eip(x uint32) { c.regs().__gregs[_REG_EIP] = x }
func (c *sigctxt) gs() uint64 { return c.regs().__gregs[_REG_GS] }
func (c *sigctxt) sigcode() uint64 { return uint64(c.info._code) }
func (c *sigctxt) sigaddr() uint64 {
- return uint64(*(*uint64)(unsafe.Pointer(&c.info._reason[0])))
+ return *(*uint64)(unsafe.Pointer(&c.info._reason[0]))
}
func (c *sigctxt) set_rip(x uint64) { c.regs().__gregs[_REG_RIP] = x }
{_SigThrow, "sys: trap: invalid opcode"},
// We can recover from some memory errors in runtime·sigpanic.
- {_SigPanic, "sys: trap: fault read addr"}, // SIGRFAULT
- {_SigPanic, "sys: trap: fault write addr"}, // SIGWFAULT
+ {_SigPanic, "sys: trap: fault read"}, // SIGRFAULT
+ {_SigPanic, "sys: trap: fault write"}, // SIGWFAULT
// We can also recover from math errors.
{_SigPanic, "sys: trap: divide error"}, // SIGINTDIV
c.set_sp(sp)
*(*uint64)(unsafe.Pointer(uintptr(sp))) = c.link()
- pc := uintptr(gp.sigpc)
+ pc := gp.sigpc
// If we don't recognize the PC as code
// but we do recognize the link register as code,
// won't get to see who faulted.)
if r.ip() != 0 {
sp := unsafe.Pointer(r.sp())
- sp = add(sp, ^uintptr(unsafe.Sizeof(uintptr(0))-1)) // sp--
+ sp = add(sp, ^(unsafe.Sizeof(uintptr(0)) - 1)) // sp--
*((*uintptr)(sp)) = r.ip()
r.setsp(uintptr(sp))
}
throw("unexpected signal during runtime execution")
}
- switch uint32(g.sig) {
+ switch g.sig {
case _EXCEPTION_ACCESS_VIOLATION:
if g.sigcode1 < 0x1000 || g.paniconfault {
panicmem()
// but since the cap is only being supplied implicitly, saying len is clearer.
// See issue 4085.
len := int(len64)
- if len64 < 0 || int64(len) != len64 || t.elem.size > 0 && uintptr(len) > _MaxMem/uintptr(t.elem.size) {
+ if len64 < 0 || int64(len) != len64 || t.elem.size > 0 && uintptr(len) > _MaxMem/t.elem.size {
panic(errorString("makeslice: len out of range"))
}
cap := int(cap64)
- if cap < len || int64(cap) != cap64 || t.elem.size > 0 && uintptr(cap) > _MaxMem/uintptr(t.elem.size) {
+ if cap < len || int64(cap) != cap64 || t.elem.size > 0 && uintptr(cap) > _MaxMem/t.elem.size {
panic(errorString("makeslice: cap out of range"))
}
p := newarray(t.elem, uintptr(cap))
// and it returns a new slice with at least that capacity, with the old data
// copied into it.
func growslice(t *slicetype, old slice, cap int) slice {
- if cap < old.cap || t.elem.size > 0 && uintptr(cap) > _MaxMem/uintptr(t.elem.size) {
+ if cap < old.cap || t.elem.size > 0 && uintptr(cap) > _MaxMem/t.elem.size {
panic(errorString("growslice: cap out of range"))
}
}
}
- if uintptr(newcap) >= _MaxMem/uintptr(et.size) {
+ if uintptr(newcap) >= _MaxMem/et.size {
panic(errorString("growslice: cap out of range"))
}
- lenmem := uintptr(old.len) * uintptr(et.size)
- capmem := roundupsize(uintptr(newcap) * uintptr(et.size))
- newcap = int(capmem / uintptr(et.size))
+ lenmem := uintptr(old.len) * et.size
+ capmem := roundupsize(uintptr(newcap) * et.size)
+ newcap = int(capmem / et.size)
var p unsafe.Pointer
if et.kind&kindNoPointers != 0 {
p = rawmem(capmem)
} else {
memmove(to.array, fm.array, size)
}
- return int(n)
+ return n
}
func slicestringcopy(to []byte, fm string) int {
msanwrite(unsafe.Pointer(&to[0]), uintptr(n))
}
- memmove(unsafe.Pointer(&to[0]), unsafe.Pointer(stringStructOf(&fm).str), uintptr(n))
+ memmove(unsafe.Pointer(&to[0]), stringStructOf(&fm).str, uintptr(n))
return n
}
pc = uint32(funcPC(_sfloatpanic))
break
}
- pc += 4 * uint32(skip)
+ pc += 4 * skip
}
if first {
print("sfloat2 ", pc, " ", hex(*(*uint32)(unsafe.Pointer(uintptr(pc)))), "\n")
minp := adjinfo.old.lo
maxp := adjinfo.old.hi
delta := adjinfo.delta
- num := uintptr(bv.n)
+ num := bv.n
for i := uintptr(0); i < num; i++ {
if stackDebug >= 4 {
print(" ", add(scanp, i*sys.PtrSize), ":", ptrnames[ptrbit(&bv, i)], ":", hex(*(*uintptr)(add(scanp, i*sys.PtrSize))), " # ", i, " ", bv.bytedata[i/8], "\n")
} else {
stackmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps))
if stackmap == nil || stackmap.n <= 0 {
- print("runtime: frame ", funcname(f), " untyped args ", frame.argp, "+", uintptr(frame.arglen), "\n")
+ print("runtime: frame ", funcname(f), " untyped args ", frame.argp, "+", frame.arglen, "\n")
throw("missing stackmap")
}
if pcdata < 0 || pcdata >= stackmap.n {
// for i, c := range []byte(str)
str := stringStructOf(&s)
- ret := slice{array: unsafe.Pointer(str.str), len: str.len, cap: str.len}
+ ret := slice{array: str.str, len: str.len, cap: str.len}
return *(*[]byte)(unsafe.Pointer(&ret))
}
for {
ms := maxstring
- if uintptr(size) <= uintptr(ms) || atomic.Casuintptr((*uintptr)(unsafe.Pointer(&maxstring)), uintptr(ms), uintptr(size)) {
+ if uintptr(size) <= ms || atomic.Casuintptr((*uintptr)(unsafe.Pointer(&maxstring)), ms, uintptr(size)) {
return
}
}
func findmoduledatap(pc uintptr) *moduledata {
for datap := &firstmoduledata; datap != nil; datap = datap.next {
- if datap.minpc <= pc && pc <= datap.maxpc {
+ if datap.minpc <= pc && pc < datap.maxpc {
return datap
}
}
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "go_asm.h"
+#include "go_tls.h"
+#include "textflag.h"
+
+// from ../syscall/zsysnum_plan9.go
+
+#define SYS_SYSR1 0
+#define SYS_BIND 2
+#define SYS_CHDIR 3
+#define SYS_CLOSE 4
+#define SYS_DUP 5
+#define SYS_ALARM 6
+#define SYS_EXEC 7
+#define SYS_EXITS 8
+#define SYS_FAUTH 10
+#define SYS_SEGBRK 12
+#define SYS_OPEN 14
+#define SYS_OSEEK 16
+#define SYS_SLEEP 17
+#define SYS_RFORK 19
+#define SYS_PIPE 21
+#define SYS_CREATE 22
+#define SYS_FD2PATH 23
+#define SYS_BRK_ 24
+#define SYS_REMOVE 25
+#define SYS_NOTIFY 28
+#define SYS_NOTED 29
+#define SYS_SEGATTACH 30
+#define SYS_SEGDETACH 31
+#define SYS_SEGFREE 32
+#define SYS_SEGFLUSH 33
+#define SYS_RENDEZVOUS 34
+#define SYS_UNMOUNT 35
+#define SYS_SEMACQUIRE 37
+#define SYS_SEMRELEASE 38
+#define SYS_SEEK 39
+#define SYS_FVERSION 40
+#define SYS_ERRSTR 41
+#define SYS_STAT 42
+#define SYS_FSTAT 43
+#define SYS_WSTAT 44
+#define SYS_FWSTAT 45
+#define SYS_MOUNT 46
+#define SYS_AWAIT 47
+#define SYS_PREAD 50
+#define SYS_PWRITE 51
+#define SYS_TSEMACQUIRE 52
+#define SYS_NSEC 53
+
+//func open(name *byte, mode, perm int32) int32
+TEXT runtime·open(SB),NOSPLIT,$0-16
+ MOVW $SYS_OPEN, R0
+ SWI 0
+ MOVW R0, ret+12(FP)
+ RET
+
+//func pread(fd int32, buf unsafe.Pointer, nbytes int32, offset int64) int32
+TEXT runtime·pread(SB),NOSPLIT,$0-24
+ MOVW $SYS_PREAD, R0
+ SWI 0
+ MOVW R0, ret+20(FP)
+ RET
+
+//func pwrite(fd int32, buf unsafe.Pointer, nbytes int32, offset int64) int32
+TEXT runtime·pwrite(SB),NOSPLIT,$0-24
+ MOVW $SYS_PWRITE, R0
+ SWI 0
+ MOVW R0, ret+20(FP)
+ RET
+
+//func seek(fd int32, offset int64, whence int32) int64
+TEXT runtime·seek(SB),NOSPLIT,$0-24
+ MOVW $ret_lo+16(FP), R0
+ MOVW 0(R13), R1
+ MOVW R0, 0(R13)
+ MOVW.W R1, -4(R13)
+ MOVW $SYS_SEEK, R0
+ SWI 0
+ MOVW.W R1, 4(R13)
+ CMP $-1, R0
+ MOVW.EQ R0, ret_lo+16(FP)
+ MOVW.EQ R0, ret_hi+20(FP)
+ RET
+
+//func closefd(fd int32) int32
+TEXT runtime·closefd(SB),NOSPLIT,$0-8
+ MOVW $SYS_CLOSE, R0
+ SWI 0
+ MOVW R0, ret+4(FP)
+ RET
+
+//func exits(msg *byte)
+TEXT runtime·exits(SB),NOSPLIT,$0-4
+ MOVW $SYS_EXITS, R0
+ SWI 0
+ RET
+
+//func brk_(addr unsafe.Pointer) int32
+TEXT runtime·brk_(SB),NOSPLIT,$0-8
+ MOVW $SYS_BRK_, R0
+ SWI 0
+ MOVW R0, ret+4(FP)
+ RET
+
+//func sleep(ms int32) int32
+TEXT runtime·sleep(SB),NOSPLIT,$0-8
+ MOVW $SYS_SLEEP, R0
+ SWI 0
+ MOVW R0, ret+4(FP)
+ RET
+
+//func plan9_semacquire(addr *uint32, block int32) int32
+TEXT runtime·plan9_semacquire(SB),NOSPLIT,$0-12
+ MOVW $SYS_SEMACQUIRE, R0
+ SWI 0
+ MOVW R0, ret+8(FP)
+ RET
+
+//func plan9_tsemacquire(addr *uint32, ms int32) int32
+TEXT runtime·plan9_tsemacquire(SB),NOSPLIT,$0-12
+ MOVW $SYS_TSEMACQUIRE, R0
+ SWI 0
+ MOVW R0, ret+8(FP)
+ RET
+
+//func nsec(*int64) int64
+TEXT runtime·nsec(SB),NOSPLIT,$-4-12
+ MOVW $SYS_NSEC, R0
+ SWI 0
+ MOVW unnamed+0(FP), R1
+ MOVW 0(R1), R0
+ MOVW R0, ret_lo+4(FP)
+ MOVW 4(R1), R0
+ MOVW R0, ret_hi+8(FP)
+ RET
+
+// time.now() (sec int64, nsec int32)
+TEXT time·now(SB),NOSPLIT,$12-12
+ // use nsec system call to get current time in nanoseconds
+ MOVW $sysnsec_lo-8(SP), R0 // destination addr
+ MOVW R0,res-12(SP)
+ MOVW $SYS_NSEC, R0
+ SWI 0
+ MOVW sysnsec_lo-8(SP), R1 // R1:R2 = nsec
+ MOVW sysnsec_hi-4(SP), R2
+
+ // multiply nanoseconds by reciprocal of 10**9 (scaled by 2**61)
+ // to get seconds (96 bit scaled result)
+ MOVW $0x89705f41, R3 // 2**61 * 10**-9
+ MULLU R1,R3,(R6,R5) // R5:R6:R7 = R1:R2 * R3
+ MOVW $0,R7
+ MULALU R2,R3,(R7,R6)
+
+ // unscale by discarding low 32 bits, shifting the rest by 29
+ MOVW R6>>29,R6 // R6:R7 = (R5:R6:R7 >> 61)
+ ORR R7<<3,R6
+ MOVW R7>>29,R7
+
+ // subtract (10**9 * sec) from nsec to get nanosecond remainder
+ MOVW $1000000000, R5 // 10**9
+ MULLU R6,R5,(R9,R8) // R8:R9 = R6:R7 * R5
+ MULA R7,R5,R9,R9
+ SUB.S R8,R1 // R1:R2 -= R8:R9
+ SBC R9,R2
+
+ // because reciprocal was a truncated repeating fraction, quotient
+ // may be slightly too small -- adjust to make remainder < 10**9
+ CMP R5,R1 // if remainder > 10**9
+ SUB.HS R5,R1 // remainder -= 10**9
+ ADD.HS $1,R6 // sec += 1
+
+ MOVW R6,sec_lo+0(FP)
+ MOVW R7,sec_hi+4(FP)
+ MOVW R1,nsec+8(FP)
+ RET
+
+//func notify(fn unsafe.Pointer) int32
+TEXT runtime·notify(SB),NOSPLIT,$0-8
+ MOVW $SYS_NOTIFY, R0
+ SWI 0
+ MOVW R0, ret+4(FP)
+ RET
+
+//func noted(mode int32) int32
+TEXT runtime·noted(SB),NOSPLIT,$0-8
+ MOVW $SYS_NOTED, R0
+ SWI 0
+ MOVW R0, ret+4(FP)
+ RET
+
+//func plan9_semrelease(addr *uint32, count int32) int32
+TEXT runtime·plan9_semrelease(SB),NOSPLIT,$0-12
+ MOVW $SYS_SEMRELEASE, R0
+ SWI 0
+ MOVW R0, ret+8(FP)
+ RET
+
+//func rfork(flags int32) int32
+TEXT runtime·rfork(SB),NOSPLIT,$0-8
+ MOVW $SYS_RFORK, R0
+ SWI 0
+ MOVW R0, ret+4(FP)
+ RET
+
+//func tstart_plan9(newm *m)
+TEXT runtime·tstart_plan9(SB),NOSPLIT,$0-4
+ MOVW newm+0(FP), R1
+ MOVW m_g0(R1), g
+
+ // Layout new m scheduler stack on os stack.
+ MOVW R13, R0
+ MOVW R0, g_stack+stack_hi(g)
+ SUB $(64*1024), R0
+ MOVW R0, (g_stack+stack_lo)(g)
+ MOVW R0, g_stackguard0(g)
+ MOVW R0, g_stackguard1(g)
+
+ // Initialize procid from TOS struct.
+ MOVW _tos(SB), R0
+ MOVW 48(R0), R0
+ MOVW R0, m_procid(R1) // save pid as m->procid
+
+ BL runtime·mstart(SB)
+
+ MOVW $0x1234, R0
+ MOVW R0, 0(R0) // not reached
+ RET
+
+//func sigtramp(ureg, msg unsafe.Pointer)
+TEXT runtime·sigtramp(SB),NOSPLIT,$0-8
+ // check that g and m exist
+ CMP $0, g
+ BEQ 4(PC)
+ MOVW g_m(g), R0
+ CMP $0, R0
+ BNE 2(PC)
+ BL runtime·badsignal2(SB) // will exit
+
+ // save args
+ MOVW ureg+0(FP), R1
+ MOVW msg+4(FP), R2
+
+ // change stack
+ MOVW m_gsignal(R0), R3
+ MOVW (g_stack+stack_hi)(R3), R13
+
+ // make room for args, retval and g
+ SUB $24, R13
+
+ // save g
+ MOVW g, R3
+ MOVW R3, 20(R13)
+
+ // g = m->gsignal
+ MOVW m_gsignal(R0), g
+
+ // load args and call sighandler
+ ADD $4,R13,R5
+ MOVM.IA [R1-R3], (R5)
+ BL runtime·sighandler(SB)
+ MOVW 16(R13), R0 // retval
+
+ // restore g
+ MOVW 20(R13), g
+
+ // call noted(R0)
+ MOVW R0, 4(R13)
+ BL runtime·noted(SB)
+ RET
+
+//func sigpanictramp()
+TEXT runtime·sigpanictramp(SB),NOSPLIT,$0-0
+ MOVW.W R0, -4(R13)
+ B runtime·sigpanic(SB)
+
+//func setfpmasks()
+// Only used by the 64-bit runtime.
+TEXT runtime·setfpmasks(SB),NOSPLIT,$0
+ RET
+
+#define ERRMAX 128 /* from os_plan9.h */
+
+// func errstr() string
+// Only used by package syscall.
+// Grab error string due to a syscall made
+// in entersyscall mode, without going
+// through the allocator (issue 4994).
+// See ../syscall/asm_plan9_arm.s:/·Syscall/
+TEXT runtime·errstr(SB),NOSPLIT,$0-8
+ MOVW g_m(g), R0
+ MOVW (m_mOS+mOS_errstr)(R0), R1
+ MOVW R1, ret_base+0(FP)
+ MOVW $ERRMAX, R2
+ MOVW R2, ret_len+4(FP)
+ MOVW $SYS_ERRSTR, R0
+ SWI 0
+ MOVW R1, R2
+ MOVBU 0(R2), R0
+ CMP $0, R0
+ BEQ 3(PC)
+ ADD $1, R2
+ B -4(PC)
+ SUB R1, R2
+ MOVW R2, ret_len+4(FP)
+ RET
+
+TEXT ·publicationBarrier(SB),NOSPLIT,$-4-0
+ B runtime·armPublicationBarrier(SB)
+
+// never called (cgo not supported)
+TEXT runtime·read_tls_fallback(SB),NOSPLIT,$-4
+ MOVW $0, R0
+ MOVW R0, (R0)
+ RET
panic("compileCallback: not a function")
}
ft := (*functype)(unsafe.Pointer(fn._type))
- if len(ft.out) != 1 {
+ if len(ft.out()) != 1 {
panic("compileCallback: function must have one output parameter")
}
uintptrSize := unsafe.Sizeof(uintptr(0))
- if ft.out[0].size != uintptrSize {
+ if ft.out()[0].size != uintptrSize {
panic("compileCallback: output parameter size is wrong")
}
argsize := uintptr(0)
- for _, t := range ft.in {
+ for _, t := range ft.in() {
if t.size > uintptrSize {
panic("compileCallback: input parameter size is wrong")
}
--- /dev/null
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+// Test that SIGPROF received in C code does not crash the process
+// looking for the C code's func pointer.
+
+// The test fails when the function is the first C function.
+// The exported functions are the first C functions, so we use that.
+
+// extern void GoNop();
+import "C"
+
+import (
+ "bytes"
+ "fmt"
+ "runtime/pprof"
+)
+
+func init() {
+ register("CgoCCodeSIGPROF", CgoCCodeSIGPROF)
+}
+
+//export GoNop
+func GoNop() {}
+
+func CgoCCodeSIGPROF() {
+ c := make(chan bool)
+ go func() {
+ for {
+ <-c
+ for i := 0; i < 1e7; i++ {
+ C.GoNop()
+ }
+ c <- true
+ }
+ }()
+
+ var buf bytes.Buffer
+ pprof.StartCPUProfile(&buf)
+ c <- true
+ <-c
+ pprof.StopCPUProfile()
+
+ fmt.Println("OK")
+}
want = append(want, []eventDesc{
{trace.EvGoBlockNet, []frame{
{"net.(*netFD).accept", 0},
- {"net.(*TCPListener).AcceptTCP", 0},
+ {"net.(*TCPListener).accept", 0},
{"net.(*TCPListener).Accept", 0},
{"runtime/trace_test.TestTraceSymbolize.func10", 86},
}},
frame.arglen = 0
frame.argmap = nil
} else {
- frame.pc = uintptr(fn.fn)
+ frame.pc = fn.fn
f := findfunc(frame.pc)
if f == nil {
print("runtime: unknown pc in defer ", hex(frame.pc), "\n")
printing := pcbuf == nil && callback == nil
_defer := gp._defer
- for _defer != nil && uintptr(_defer.sp) == _NoArgs {
+ for _defer != nil && _defer.sp == _NoArgs {
_defer = _defer.link
}
func callers(skip int, pcbuf []uintptr) int {
sp := getcallersp(unsafe.Pointer(&skip))
- pc := uintptr(getcallerpc(unsafe.Pointer(&skip)))
+ pc := getcallerpc(unsafe.Pointer(&skip))
gp := getg()
var n int
systemstack(func() {
import "unsafe"
+// tflag is documented in ../reflect/type.go.
+type tflag uint8
+
+const tflagUncommon tflag = 1
+
// Needs to be in sync with ../cmd/compile/internal/ld/decodesym.go:/^func.commonsize,
// ../cmd/compile/internal/gc/reflect.go:/^func.dcommontype and
// ../reflect/type.go:/^type.rtype.
size uintptr
ptrdata uintptr // size of memory prefix holding all pointers
hash uint32
- _unused uint8
+ tflag tflag
align uint8
fieldalign uint8
kind uint8
// Otherwise it is a ptrmask bitmap. See mbitmap.go for details.
gcdata *byte
_string string
- x *uncommontype
+}
+
+func (t *_type) uncommon() *uncommontype {
+ if t.tflag&tflagUncommon == 0 {
+ return nil
+ }
+ switch t.kind & kindMask {
+ case kindStruct:
+ type u struct {
+ structtype
+ u uncommontype
+ }
+ return &(*u)(unsafe.Pointer(t)).u
+ case kindPtr:
+ type u struct {
+ ptrtype
+ u uncommontype
+ }
+ return &(*u)(unsafe.Pointer(t)).u
+ case kindFunc:
+ type u struct {
+ functype
+ u uncommontype
+ }
+ return &(*u)(unsafe.Pointer(t)).u
+ case kindSlice:
+ type u struct {
+ slicetype
+ u uncommontype
+ }
+ return &(*u)(unsafe.Pointer(t)).u
+ case kindArray:
+ type u struct {
+ arraytype
+ u uncommontype
+ }
+ return &(*u)(unsafe.Pointer(t)).u
+ case kindChan:
+ type u struct {
+ chantype
+ u uncommontype
+ }
+ return &(*u)(unsafe.Pointer(t)).u
+ case kindMap:
+ type u struct {
+ maptype
+ u uncommontype
+ }
+ return &(*u)(unsafe.Pointer(t)).u
+ case kindInterface:
+ type u struct {
+ interfacetype
+ u uncommontype
+ }
+ return &(*u)(unsafe.Pointer(t)).u
+ default:
+ type u struct {
+ _type
+ u uncommontype
+ }
+ return &(*u)(unsafe.Pointer(t)).u
+ }
}
func hasPrefix(s, prefix string) bool {
if hasPrefix(t._string, "chan ") {
return ""
}
+ if hasPrefix(t._string, "chan<-") {
+ return ""
+ }
if hasPrefix(t._string, "func(") {
return ""
}
- if t._string[0] == '[' || t._string[0] == '*' {
+ switch t._string[0] {
+ case '[', '*', '<':
return ""
}
i := len(t._string) - 1
return t._string[i+1:]
}
+func (t *functype) in() []*_type {
+ // See funcType in reflect/type.go for details on data layout.
+ uadd := uintptr(unsafe.Sizeof(functype{}))
+ if t.typ.tflag&tflagUncommon != 0 {
+ uadd += unsafe.Sizeof(uncommontype{})
+ }
+ return (*[1 << 20]*_type)(add(unsafe.Pointer(t), uadd))[:t.inCount]
+}
+
+func (t *functype) out() []*_type {
+ // See funcType in reflect/type.go for details on data layout.
+ uadd := uintptr(unsafe.Sizeof(functype{}))
+ if t.typ.tflag&tflagUncommon != 0 {
+ uadd += unsafe.Sizeof(uncommontype{})
+ }
+ outCount := t.outCount & (1<<15 - 1)
+ return (*[1 << 20]*_type)(add(unsafe.Pointer(t), uadd))[t.inCount : t.inCount+outCount]
+}
+
+func (t *functype) dotdotdot() bool {
+ return t.outCount&(1<<15) != 0
+}
+
type method struct {
name *string
pkgpath *string
}
type functype struct {
- typ _type
- dotdotdot bool
- in []*_type
- out []*_type
+ typ _type
+ inCount uint16
+ outCount uint16
}
type ptrtype struct {
// license that can be found in the LICENSE file.
#include "textflag.h"
+#include "funcdata.h"
#define SYS_SEEK 39 /* from zsysnum_plan9.go */
RET
//func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err ErrorString)
-TEXT ·Syscall(SB),NOSPLIT,$0
+TEXT ·Syscall(SB),NOSPLIT,$0-32
BL runtime·entersyscall(SB)
MOVW trap+0(FP), R0 // syscall num
MOVM.IA.W (R13),[R1-R2] // pop LR and caller's LR
//func Syscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err ErrorString)
// Actually Syscall5 but the rest of the code expects it to be named Syscall6.
-TEXT ·Syscall6(SB),NOSPLIT,$0
+TEXT ·Syscall6(SB),NOSPLIT,$0-44
BL runtime·entersyscall(SB)
MOVW trap+0(FP), R0 // syscall num
MOVM.IA.W (R13),[R1-R2] // pop LR and caller's LR
RET
//func RawSyscall(trap, a1, a2, a3 uintptr) (r1, r2, err uintptr)
-TEXT ·RawSyscall(SB),NOSPLIT,$0
+TEXT ·RawSyscall(SB),NOSPLIT,$0-28
MOVW trap+0(FP), R0 // syscall num
MOVM.IA.W (R13),[R1] // pop caller's LR
SWI 0
//func RawSyscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr)
// Actually RawSyscall5 but the rest of the code expects it to be named RawSyscall6.
-TEXT ·RawSyscall6(SB),NOSPLIT,$0
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
MOVW trap+0(FP), R0 // syscall num
MOVM.IA.W (R13),[R1] // pop caller's LR
SWI 0
RET
//func seek(placeholder uintptr, fd int, offset int64, whence int) (newoffset int64, err string)
-TEXT ·seek(SB),NOSPLIT,$0
+TEXT ·seek(SB),NOSPLIT,$0-36
MOVW $newoffset_lo+20(FP), R5
MOVW R5, placeholder+0(FP) //placeholder = dest for return value
MOVW $SYS_SEEK, R0 // syscall num
//func exit(code int)
// Import runtime·exit for cleanly exiting.
-TEXT ·exit(SB),NOSPLIT,$4
+TEXT ·exit(SB),NOSPLIT,$4-4
+ NO_LOCAL_POINTERS
MOVW code+0(FP), R0
MOVW R0, e-4(SP)
BL runtime·exit(SB)
if os.IsNotExist(err) {
t.Skip("kernel doesn't support user namespaces")
}
+ if os.IsPermission(err) {
+ t.Skip("unable to test user namespaces due to permissions")
+ }
t.Fatalf("Failed to stat /proc/self/ns/user: %v", err)
}
if isChrooted(t) {
"fmt"
"io"
"os"
+ "sort"
"strings"
"time"
)
type InternalExample struct {
- Name string
- F func()
- Output string
+ Name string
+ F func()
+ Output string
+ Unordered bool
}
func RunExamples(matchString func(pat, str string) (bool, error), examples []InternalExample) (ok bool) {
return
}
+func sortLines(output string) string {
+ lines := strings.Split(output, "\n")
+ sort.Strings(lines)
+ return strings.Join(lines, "\n")
+}
+
func runExample(eg InternalExample) (ok bool) {
if *chatty {
fmt.Printf("=== RUN %s\n", eg.Name)
var fail string
err := recover()
- if g, e := strings.TrimSpace(out), strings.TrimSpace(eg.Output); g != e && err == nil {
- fail = fmt.Sprintf("got:\n%s\nwant:\n%s\n", g, e)
+ got := strings.TrimSpace(out)
+ want := strings.TrimSpace(eg.Output)
+ if eg.Unordered {
+ if sortLines(got) != sortLines(want) && err == nil {
+ fail = fmt.Sprintf("got:\n%s\nwant (unordered):\n%s\n", out, eg.Output)
+ }
+ } else {
+ if got != want && err == nil {
+ fail = fmt.Sprintf("got:\n%s\nwant:\n%s\n", got, want)
+ }
}
if fail != "" || err != nil {
fmt.Printf("--- FAIL: %s (%s)\n%s", eg.Name, dstr, fail)
--- /dev/null
+// errorcheck
+
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Issue 12525: confusing error trying to increment boolean value
+
+package main
+
+func main() {
+ var i int
+ i++
+
+ var f float64
+ f++
+
+ var c complex128
+ c++
+
+ var b bool
+ b++ // ERROR "invalid operation: b\+\+ \(non-numeric type bool\)"
+
+ var s string
+ s-- // ERROR "invalid operation: s-- \(non-numeric type string\)"
+}
--- /dev/null
+// run
+
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This test checks if the compiler's internal constant
+// arithmetic correctly rounds denormal float32 values.
+
+package main
+
+import (
+ "fmt"
+ "math"
+)
+
+func main() {
+ for _, t := range []struct {
+ value float32
+ bits uint32
+ }{
+ {0e+00, 0x00000000},
+ {1e-45, 0x00000000},
+ {2e-45, 0x00000001},
+ {3e-45, 0x00000002},
+ {4e-45, 0x00000003},
+ {5e-45, 0x00000004},
+ {6e-45, 0x00000004},
+ {7e-45, 0x00000005},
+ {8e-45, 0x00000006},
+ {9e-45, 0x00000006},
+ {1.0e-44, 0x00000007},
+ {1.1e-44, 0x00000008},
+ {1.2e-44, 0x00000009},
+ } {
+ got := math.Float32bits(t.value)
+ want := t.bits
+ if got != want {
+ panic(fmt.Sprintf("bits(%g) = 0x%08x; want 0x%08x", t.value, got, want))
+ }
+ }
+}
--- /dev/null
+// +build !nacl,!android,!darwin darwin,!arm
+// run
+
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "bytes"
+ "log"
+ "os/exec"
+ "strings"
+)
+
+func main() {
+ checkLinkOutput("", "-B argument must start with 0x")
+ checkLinkOutput("0", "-B argument must start with 0x")
+ checkLinkOutput("0x", "usage")
+ checkLinkOutput("0x0", "-B argument must have even number of digits")
+ checkLinkOutput("0x00", "usage")
+ checkLinkOutput("0xYZ", "-B argument contains invalid hex digit")
+ checkLinkOutput("0x"+strings.Repeat("00", 32), "usage")
+ checkLinkOutput("0x"+strings.Repeat("00", 33), "-B option too long (max 32 digits)")
+}
+
+func checkLinkOutput(buildid string, message string) {
+ cmd := exec.Command("go", "tool", "link", "-B", buildid)
+ out, err := cmd.CombinedOutput()
+ if err == nil {
+ log.Fatalf("expected cmd/link to fail")
+ }
+
+ firstLine := string(bytes.SplitN(out, []byte("\n"), 2)[0])
+ if strings.HasPrefix(firstLine, "panic") {
+ log.Fatalf("cmd/link panicked:\n%s", out)
+ }
+
+ if !strings.Contains(firstLine, message) {
+ log.Fatalf("cmd/link output did not include expected message %q: %s", message, firstLine)
+ }
+}
--- /dev/null
+// run
+
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import "runtime"
+
+func main() {
+ var file string
+ var line int
+ func() {
+ defer func() {
+ _, file, line, _ = runtime.Caller(1)
+ }()
+ }() // this is the expected line
+ const EXPECTED = 18
+ if line != EXPECTED {
+ println("Expected line =", EXPECTED, "but got line =", line, "and file =", file)
+ }
+}
--- /dev/null
+// errorcheck
+
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+var x any // ERROR "undefined: any"