bEnd := s.f.NewBlock(ssa.BlockPlain)
aux := &ssa.ExternSymbol{Types[TBOOL], syslook("writeBarrier", 0).Sym}
- flagaddr := s.newValue1A(ssa.OpAddr, Ptrto(Types[TBOOL]), aux, s.sb)
+ flagaddr := s.newValue1A(ssa.OpAddr, Ptrto(Types[TUINT32]), aux, s.sb)
// TODO: select the .enabled field. It is currently first, so not needed for now.
- flag := s.newValue2(ssa.OpLoad, Types[TBOOL], flagaddr, s.mem())
+ // Load word, test byte, avoiding partial register write from load byte.
+ flag := s.newValue2(ssa.OpLoad, Types[TUINT32], flagaddr, s.mem())
+ flag = s.newValue1(ssa.OpTrunc64to8, Types[TBOOL], flag)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.Likely = ssa.BranchUnlikely
bEnd := s.f.NewBlock(ssa.BlockPlain)
aux := &ssa.ExternSymbol{Types[TBOOL], syslook("writeBarrier", 0).Sym}
- flagaddr := s.newValue1A(ssa.OpAddr, Ptrto(Types[TBOOL]), aux, s.sb)
+ flagaddr := s.newValue1A(ssa.OpAddr, Ptrto(Types[TUINT32]), aux, s.sb)
// TODO: select the .enabled field. It is currently first, so not needed for now.
- flag := s.newValue2(ssa.OpLoad, Types[TBOOL], flagaddr, s.mem())
+ // Load word, test byte, avoiding partial register write from load byte.
+ flag := s.newValue2(ssa.OpLoad, Types[TUINT32], flagaddr, s.mem())
+ flag = s.newValue1(ssa.OpTrunc64to8, Types[TBOOL], flag)
b := s.endBlock()
b.Kind = ssa.BlockIf
b.Likely = ssa.BranchUnlikely
if t.IsFloat() {
// Moving the whole sse2 register is faster
// than moving just the correct low portion of it.
- return x86.AMOVAPD
+ // There is no xmm->xmm move with 1 byte opcode,
+ // so use movups, which has 2 byte opcode.
+ return x86.AMOVUPS
} else {
switch t.Size() {
case 1:
-------------------------------
- Smaller Value.Type (int32 or ptr)? Get rid of types altogether?
- OpStore uses 3 args. Increase the size of Value.argstorage to 3?
-- Constant cache
+- Use a constant cache for OpConstNil, OpConstInterface, OpConstSlice, maybe OpConstString
- Handle signed division overflow and sign extension earlier
- Implement 64 bit const division with high multiply, maybe in the frontend?
- Add bit widths to complex ops
{name: "opt deadcode", fn: deadcode}, // remove any blocks orphaned during opt
{name: "generic cse", fn: cse},
{name: "nilcheckelim", fn: nilcheckelim},
+ {name: "prove", fn: prove},
{name: "generic deadcode", fn: deadcode},
{name: "fuse", fn: fuse},
{name: "dse", fn: dse},
}
var passOrder = [...]constraint{
+ // prove reliese on common-subexpression elimination for maximum benefits.
+ {"generic cse", "prove"},
+ // deadcode after prove to eliminate all new dead blocks.
+ {"prove", "generic deadcode"},
// common-subexpression before dead-store elim, so that we recognize
// when two address expressions are the same.
{"generic cse", "dse"},
freeValues *Value // free Values linked by argstorage[0]. All other fields except ID are 0/nil.
freeBlocks *Block // free Blocks linked by succstorage[0]. All other fields except ID are 0/nil.
+
+ constants map[int64][]*Value // constants cache, keyed by constant value; users must check value's Op and Type
}
// NumBlocks returns an integer larger than the id of any Block in the Func.
return v
}
+// constVal returns a constant value for c.
+func (f *Func) constVal(line int32, op Op, t Type, c int64) *Value {
+ if f.constants == nil {
+ f.constants = make(map[int64][]*Value)
+ }
+ vv := f.constants[c]
+ for _, v := range vv {
+ if v.Op == op && v.Type.Equal(t) {
+ return v
+ }
+ }
+ v := f.Entry.NewValue0I(line, op, t, c)
+ f.constants[c] = append(vv, v)
+ return v
+}
+
// ConstInt returns an int constant representing its argument.
func (f *Func) ConstBool(line int32, t Type, c bool) *Value {
- // TODO: cache?
i := int64(0)
if c {
i = 1
}
- return f.Entry.NewValue0I(line, OpConstBool, t, i)
+ return f.constVal(line, OpConstBool, t, i)
}
func (f *Func) ConstInt8(line int32, t Type, c int8) *Value {
- // TODO: cache?
- return f.Entry.NewValue0I(line, OpConst8, t, int64(c))
+ return f.constVal(line, OpConst8, t, int64(c))
}
func (f *Func) ConstInt16(line int32, t Type, c int16) *Value {
- // TODO: cache?
- return f.Entry.NewValue0I(line, OpConst16, t, int64(c))
+ return f.constVal(line, OpConst16, t, int64(c))
}
func (f *Func) ConstInt32(line int32, t Type, c int32) *Value {
- // TODO: cache?
- return f.Entry.NewValue0I(line, OpConst32, t, int64(c))
+ return f.constVal(line, OpConst32, t, int64(c))
}
func (f *Func) ConstInt64(line int32, t Type, c int64) *Value {
- // TODO: cache?
- return f.Entry.NewValue0I(line, OpConst64, t, c)
+ return f.constVal(line, OpConst64, t, c)
}
func (f *Func) ConstFloat32(line int32, t Type, c float64) *Value {
- // TODO: cache?
- return f.Entry.NewValue0I(line, OpConst32F, t, int64(math.Float64bits(c)))
+ return f.constVal(line, OpConst32F, t, int64(math.Float64bits(c)))
}
func (f *Func) ConstFloat64(line int32, t Type, c float64) *Value {
- // TODO: cache?
- return f.Entry.NewValue0I(line, OpConst64F, t, int64(math.Float64bits(c)))
+ return f.constVal(line, OpConst64F, t, int64(math.Float64bits(c)))
}
func (f *Func) Logf(msg string, args ...interface{}) { f.Config.Logf(msg, args...) }
(MOVLQSX (MOVLload [off] {sym} ptr mem)) -> @v.Args[0].Block (MOVLQSXload <v.Type> [off] {sym} ptr mem)
(MOVLQZX (MOVLload [off] {sym} ptr mem)) -> @v.Args[0].Block (MOVLQZXload <v.Type> [off] {sym} ptr mem)
+// replace load from same location as preceding store with copy
+(MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
+(MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
+(MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
+(MOVQload [off] {sym} ptr (MOVQstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
+
// Fold extensions and ANDs together.
(MOVBQZX (ANDBconst [c] x)) -> (ANDQconst [c & 0xff] x)
(MOVWQZX (ANDWconst [c] x)) -> (ANDQconst [c & 0xffff] x)
genResult0(w, arch, result, new(int), true, move)
}
func genResult0(w io.Writer, arch arch, result string, alloc *int, top, move bool) string {
+ // TODO: when generating a constant result, use f.constVal to avoid
+ // introducing copies just to clean them up again.
if result[0] != '(' {
// variable
if top {
--- /dev/null
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// rangeMask represents the possible relations between a pair of variables.
+type rangeMask uint
+
+const (
+ lt rangeMask = 1 << iota
+ eq
+ gt
+)
+
+// typeMask represents the universe of a variable pair in which
+// a set of relations is known.
+// For example, information learned for unsigned pairs cannot
+// be transfered to signed pairs because the same bit representation
+// can mean something else.
+type typeMask uint
+
+const (
+ signed typeMask = 1 << iota
+ unsigned
+ pointer
+)
+
+type typeRange struct {
+ t typeMask
+ r rangeMask
+}
+
+type control struct {
+ tm typeMask
+ a0, a1 ID
+}
+
+var (
+ reverseBits = [...]rangeMask{0, 4, 2, 6, 1, 5, 3, 7}
+
+ // maps what we learn when the positive branch is taken.
+ // For example:
+ // OpLess8: {signed, lt},
+ // v1 = (OpLess8 v2 v3).
+ // If v1 branch is taken than we learn that the rangeMaks
+ // can be at most lt.
+ typeRangeTable = map[Op]typeRange{
+ OpEq8: {signed | unsigned, eq},
+ OpEq16: {signed | unsigned, eq},
+ OpEq32: {signed | unsigned, eq},
+ OpEq64: {signed | unsigned, eq},
+ OpEqPtr: {pointer, eq},
+
+ OpNeq8: {signed | unsigned, lt | gt},
+ OpNeq16: {signed | unsigned, lt | gt},
+ OpNeq32: {signed | unsigned, lt | gt},
+ OpNeq64: {signed | unsigned, lt | gt},
+ OpNeqPtr: {pointer, lt | gt},
+
+ OpLess8: {signed, lt},
+ OpLess8U: {unsigned, lt},
+ OpLess16: {signed, lt},
+ OpLess16U: {unsigned, lt},
+ OpLess32: {signed, lt},
+ OpLess32U: {unsigned, lt},
+ OpLess64: {signed, lt},
+ OpLess64U: {unsigned, lt},
+
+ OpLeq8: {signed, lt | eq},
+ OpLeq8U: {unsigned, lt | eq},
+ OpLeq16: {signed, lt | eq},
+ OpLeq16U: {unsigned, lt | eq},
+ OpLeq32: {signed, lt | eq},
+ OpLeq32U: {unsigned, lt | eq},
+ OpLeq64: {signed, lt | eq},
+ OpLeq64U: {unsigned, lt | eq},
+
+ OpGeq8: {signed, eq | gt},
+ OpGeq8U: {unsigned, eq | gt},
+ OpGeq16: {signed, eq | gt},
+ OpGeq16U: {unsigned, eq | gt},
+ OpGeq32: {signed, eq | gt},
+ OpGeq32U: {unsigned, eq | gt},
+ OpGeq64: {signed, eq | gt},
+ OpGeq64U: {unsigned, eq | gt},
+
+ OpGreater8: {signed, gt},
+ OpGreater8U: {unsigned, gt},
+ OpGreater16: {signed, gt},
+ OpGreater16U: {unsigned, gt},
+ OpGreater32: {signed, gt},
+ OpGreater32U: {unsigned, gt},
+ OpGreater64: {signed, gt},
+ OpGreater64U: {unsigned, gt},
+
+ // TODO: OpIsInBounds actually test 0 <= a < b. This means
+ // that the positive branch learns signed/LT and unsigned/LT
+ // but the negative branch only learns unsigned/GE.
+ OpIsInBounds: {unsigned, lt},
+ OpIsSliceInBounds: {unsigned, lt | eq},
+ }
+)
+
+// prove removes redundant BlockIf controls that can be inferred in a straight line.
+//
+// By far, the most common redundant control are generated by bounds checking.
+// For example for the code:
+//
+// a[i] = 4
+// foo(a[i])
+//
+// The compiler will generate the following code:
+//
+// if i >= len(a) {
+// panic("not in bounds")
+// }
+// a[i] = 4
+// if i >= len(a) {
+// panic("not in bounds")
+// }
+// foo(a[i])
+//
+// The second comparison i >= len(a) is clearly redundant because if the
+// else branch of the first comparison is executed, we already know that i < len(a).
+// The code for the second panic can be removed.
+func prove(f *Func) {
+ idom := dominators(f)
+ sdom := newSparseTree(f, idom)
+
+ // current node state
+ type walkState int
+ const (
+ descend walkState = iota
+ simplify
+ )
+ // work maintains the DFS stack.
+ type bp struct {
+ block *Block // current handled block
+ state walkState // what's to do
+ saved []typeRange // save previous map entries modified by node
+ }
+ work := make([]bp, 0, 256)
+ work = append(work, bp{
+ block: f.Entry,
+ state: descend,
+ })
+
+ // mask keep tracks of restrictions for each pair of values in
+ // the dominators for the current node.
+ // Invariant: a0.ID <= a1.ID
+ // For example {unsigned, a0, a1} -> eq|gt means that from
+ // predecessors we know that a0 must be greater or equal to
+ // a1.
+ mask := make(map[control]rangeMask)
+
+ // DFS on the dominator tree.
+ for len(work) > 0 {
+ node := work[len(work)-1]
+ work = work[:len(work)-1]
+
+ switch node.state {
+ case descend:
+ parent := idom[node.block.ID]
+ tr := getRestrict(sdom, parent, node.block)
+ saved := updateRestrictions(mask, parent, tr)
+
+ work = append(work, bp{
+ block: node.block,
+ state: simplify,
+ saved: saved,
+ })
+
+ for s := sdom.Child(node.block); s != nil; s = sdom.Sibling(s) {
+ work = append(work, bp{
+ block: s,
+ state: descend,
+ })
+ }
+
+ case simplify:
+ simplifyBlock(mask, node.block)
+ restoreRestrictions(mask, idom[node.block.ID], node.saved)
+ }
+ }
+}
+
+// getRestrict returns the range restrictions added by p
+// when reaching b. p is the immediate dominator or b.
+func getRestrict(sdom sparseTree, p *Block, b *Block) typeRange {
+ if p == nil || p.Kind != BlockIf {
+ return typeRange{}
+ }
+ tr, has := typeRangeTable[p.Control.Op]
+ if !has {
+ return typeRange{}
+ }
+ // If p and p.Succs[0] are dominators it means that every path
+ // from entry to b passes through p and p.Succs[0]. We care that
+ // no path from entry to b passes through p.Succs[1]. If p.Succs[0]
+ // has one predecessor then (apart from the degenerate case),
+ // there is no path from entry that can reach b through p.Succs[1].
+ // TODO: how about p->yes->b->yes, i.e. a loop in yes.
+ if sdom.isAncestorEq(p.Succs[0], b) && len(p.Succs[0].Preds) == 1 {
+ return tr
+ } else if sdom.isAncestorEq(p.Succs[1], b) && len(p.Succs[1].Preds) == 1 {
+ tr.r = (lt | eq | gt) ^ tr.r
+ return tr
+ }
+ return typeRange{}
+}
+
+// updateRestrictions updates restrictions from the previous block (p) based on tr.
+// normally tr was calculated with getRestrict.
+func updateRestrictions(mask map[control]rangeMask, p *Block, tr typeRange) []typeRange {
+ if tr.t == 0 {
+ return nil
+ }
+
+ // p modifies the restrictions for (a0, a1).
+ // save and return the previous state.
+ a0 := p.Control.Args[0]
+ a1 := p.Control.Args[1]
+ if a0.ID > a1.ID {
+ tr.r = reverseBits[tr.r]
+ a0, a1 = a1, a0
+ }
+
+ saved := make([]typeRange, 0, 2)
+ for t := typeMask(1); t <= tr.t; t <<= 1 {
+ if t&tr.t == 0 {
+ continue
+ }
+
+ i := control{t, a0.ID, a1.ID}
+ oldRange, ok := mask[i]
+ if !ok {
+ if a1 != a0 {
+ oldRange = lt | eq | gt
+ } else { // sometimes happens after cse
+ oldRange = eq
+ }
+ }
+ // if i was not already in the map we save the full range
+ // so that when we restore it we properly keep track of it.
+ saved = append(saved, typeRange{t, oldRange})
+ // mask[i] contains the possible relations between a0 and a1.
+ // When we branched from parent we learned that the possible
+ // relations cannot be more than tr.r. We compute the new set of
+ // relations as the intersection betwee the old and the new set.
+ mask[i] = oldRange & tr.r
+ }
+ return saved
+}
+
+func restoreRestrictions(mask map[control]rangeMask, p *Block, saved []typeRange) {
+ if p == nil || p.Kind != BlockIf || len(saved) == 0 {
+ return
+ }
+
+ a0 := p.Control.Args[0].ID
+ a1 := p.Control.Args[1].ID
+ if a0 > a1 {
+ a0, a1 = a1, a0
+ }
+
+ for _, tr := range saved {
+ i := control{tr.t, a0, a1}
+ if tr.r != lt|eq|gt {
+ mask[i] = tr.r
+ } else {
+ delete(mask, i)
+ }
+ }
+}
+
+// simplifyBlock simplifies block known the restrictions in mask.
+func simplifyBlock(mask map[control]rangeMask, b *Block) {
+ if b.Kind != BlockIf {
+ return
+ }
+
+ tr, has := typeRangeTable[b.Control.Op]
+ if !has {
+ return
+ }
+
+ succ := -1
+ a0 := b.Control.Args[0].ID
+ a1 := b.Control.Args[1].ID
+ if a0 > a1 {
+ tr.r = reverseBits[tr.r]
+ a0, a1 = a1, a0
+ }
+
+ for t := typeMask(1); t <= tr.t; t <<= 1 {
+ if t&tr.t == 0 {
+ continue
+ }
+
+ // tr.r represents in which case the positive branch is taken.
+ // m.r represents which cases are possible because of previous relations.
+ // If the set of possible relations m.r is included in the set of relations
+ // need to take the positive branch (or negative) then that branch will
+ // always be taken.
+ // For shortcut, if m.r == 0 then this block is dead code.
+ i := control{t, a0, a1}
+ m := mask[i]
+ if m != 0 && tr.r&m == m {
+ if b.Func.pass.debug > 0 {
+ b.Func.Config.Warnl(int(b.Line), "Proved %s", b.Control.Op)
+ }
+ b.Logf("proved positive branch of %s, block %s in %s\n", b.Control, b, b.Func.Name)
+ succ = 0
+ break
+ }
+ if m != 0 && ((lt|eq|gt)^tr.r)&m == m {
+ if b.Func.pass.debug > 0 {
+ b.Func.Config.Warnl(int(b.Line), "Disproved %s", b.Control.Op)
+ }
+ b.Logf("proved negative branch of %s, block %s in %s\n", b.Control, b, b.Func.Name)
+ succ = 1
+ break
+ }
+ }
+
+ if succ == -1 {
+ // HACK: If the first argument of IsInBounds or IsSliceInBounds
+ // is a constant and we already know that constant is smaller (or equal)
+ // to the upper bound than this is proven. Most useful in cases such as:
+ // if len(a) <= 1 { return }
+ // do something with a[1]
+ c := b.Control
+ if (c.Op == OpIsInBounds || c.Op == OpIsSliceInBounds) &&
+ c.Args[0].Op == OpConst64 && c.Args[0].AuxInt >= 0 {
+ m := mask[control{signed, a0, a1}]
+ if m != 0 && tr.r&m == m {
+ if b.Func.pass.debug > 0 {
+ b.Func.Config.Warnl(int(b.Line), "Proved constant %s", c.Op)
+ }
+ succ = 0
+ }
+ }
+ }
+
+ if succ != -1 {
+ b.Kind = BlockFirst
+ b.Control = nil
+ b.Succs[0], b.Succs[1] = b.Succs[succ], b.Succs[1-succ]
+ }
+}
func rewriteValueAMD64_OpAMD64MOVBload(v *Value, config *Config) bool {
b := v.Block
_ = b
+ // match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: x
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ if v.Args[1].Op != OpAMD64MOVBstore {
+ break
+ }
+ off2 := v.Args[1].AuxInt
+ sym2 := v.Args[1].Aux
+ ptr2 := v.Args[1].Args[0]
+ x := v.Args[1].Args[1]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
// match: (MOVBload [off1] {sym} (ADDQconst [off2] ptr) mem)
// cond:
// result: (MOVBload [addOff(off1, off2)] {sym} ptr mem)
func rewriteValueAMD64_OpAMD64MOVLload(v *Value, config *Config) bool {
b := v.Block
_ = b
+ // match: (MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: x
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ if v.Args[1].Op != OpAMD64MOVLstore {
+ break
+ }
+ off2 := v.Args[1].AuxInt
+ sym2 := v.Args[1].Aux
+ ptr2 := v.Args[1].Args[0]
+ x := v.Args[1].Args[1]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
// match: (MOVLload [off1] {sym} (ADDQconst [off2] ptr) mem)
// cond:
// result: (MOVLload [addOff(off1, off2)] {sym} ptr mem)
func rewriteValueAMD64_OpAMD64MOVQload(v *Value, config *Config) bool {
b := v.Block
_ = b
+ // match: (MOVQload [off] {sym} ptr (MOVQstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: x
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ if v.Args[1].Op != OpAMD64MOVQstore {
+ break
+ }
+ off2 := v.Args[1].AuxInt
+ sym2 := v.Args[1].Aux
+ ptr2 := v.Args[1].Args[0]
+ x := v.Args[1].Args[1]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
// match: (MOVQload [off1] {sym} (ADDQconst [off2] ptr) mem)
// cond:
// result: (MOVQload [addOff(off1, off2)] {sym} ptr mem)
func rewriteValueAMD64_OpAMD64MOVWload(v *Value, config *Config) bool {
b := v.Block
_ = b
+ // match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: x
+ for {
+ off := v.AuxInt
+ sym := v.Aux
+ ptr := v.Args[0]
+ if v.Args[1].Op != OpAMD64MOVWstore {
+ break
+ }
+ off2 := v.Args[1].AuxInt
+ sym2 := v.Args[1].Aux
+ ptr2 := v.Args[1].Args[0]
+ x := v.Args[1].Args[1]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpCopy)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
// match: (MOVWload [off1] {sym} (ADDQconst [off2] ptr) mem)
// cond:
// result: (MOVWload [addOff(off1, off2)] {sym} ptr mem)
package ssa
type sparseTreeNode struct {
- block *Block
child *Block
sibling *Block
parent *Block
t := make(sparseTree, f.NumBlocks())
for _, b := range f.Blocks {
n := &t[b.ID]
- n.block = b
if p := parentOf[b.ID]; p != nil {
n.parent = p
n.sibling = t[p.ID].child
return n + 2
}
+// Sibling returns a sibling of x in the dominator tree (i.e.,
+// a node with the same immediate dominator) or nil if there
+// are no remaining siblings in the arbitrary but repeatable
+// order chosen. Because the Child-Sibling order is used
+// to assign entry and exit numbers in the treewalk, those
+// numbers are also consistent with this order (i.e.,
+// Sibling(x) has entry number larger than x's exit number).
+func (t sparseTree) Sibling(x *Block) *Block {
+ return t[x.ID].sibling
+}
+
+// Child returns a child of x in the dominator tree, or
+// nil if there are none. The choice of first child is
+// arbitrary but repeatable.
+func (t sparseTree) Child(x *Block) *Block {
+ return t[x.ID].child
+}
+
// isAncestorEq reports whether x is an ancestor of or equal to y.
func (t sparseTree) isAncestorEq(x, y *Block) bool {
xx := &t[x.ID]
// The compiler knows about this variable.
// If you change it, you must change the compiler too.
var writeBarrier struct {
- enabled bool // compiler emits a check of this before calling write barrier
- needed bool // whether we need a write barrier for current GC phase
- cgo bool // whether we need a write barrier for a cgo check
+ enabled bool // compiler emits a check of this before calling write barrier
+ needed bool // whether we need a write barrier for current GC phase
+ cgo bool // whether we need a write barrier for a cgo check
+ alignme uint64 // guarantee alignment so that compiler can use a 32 or 64-bit load
}
// gcBlackenEnabled is 1 if mutator assists and background mark
--- /dev/null
+// +build amd64
+// errorcheck -0 -d=ssa/prove/debug=3
+
+package main
+
+func f0(a []int) int {
+ a[0] = 1
+ a[0] = 1 // ERROR "Proved IsInBounds$"
+ a[6] = 1
+ a[6] = 1 // ERROR "Proved IsInBounds$"
+ a[5] = 1
+ a[5] = 1 // ERROR "Proved IsInBounds$"
+ return 13
+}
+
+func f1(a []int) int {
+ if len(a) <= 5 {
+ return 18
+ }
+ a[0] = 1
+ a[0] = 1 // ERROR "Proved IsInBounds$"
+ a[6] = 1
+ a[6] = 1 // ERROR "Proved IsInBounds$"
+ a[5] = 1 // ERROR "Proved constant IsInBounds$"
+ a[5] = 1 // ERROR "Proved IsInBounds$"
+ return 26
+}
+
+func f2(a []int) int {
+ for i := range a {
+ a[i] = i
+ a[i] = i // ERROR "Proved IsInBounds$"
+ }
+ return 34
+}
+
+func f3(a []uint) int {
+ for i := uint(0); i < uint(len(a)); i++ {
+ a[i] = i // ERROR "Proved IsInBounds$"
+ }
+ return 41
+}
+
+func f4a(a, b, c int) int {
+ if a < b {
+ if a == b { // ERROR "Disproved Eq64$"
+ return 47
+ }
+ if a > b { // ERROR "Disproved Greater64$"
+ return 50
+ }
+ if a < b { // ERROR "Proved Less64$"
+ return 53
+ }
+ if a == b { // ERROR "Disproved Eq64$"
+ return 56
+ }
+ if a > b {
+ return 59
+ }
+ return 61
+ }
+ return 63
+}
+
+func f4b(a, b, c int) int {
+ if a <= b {
+ if a >= b {
+ if a == b { // ERROR "Proved Eq64$"
+ return 70
+ }
+ return 75
+ }
+ return 77
+ }
+ return 79
+}
+
+func f4c(a, b, c int) int {
+ if a <= b {
+ if a >= b {
+ if a != b { // ERROR "Disproved Neq64$"
+ return 73
+ }
+ return 75
+ }
+ return 77
+ }
+ return 79
+}
+
+func f4d(a, b, c int) int {
+ if a < b {
+ if a < c {
+ if a < b { // ERROR "Proved Less64$"
+ if a < c { // ERROR "Proved Less64$"
+ return 87
+ }
+ return 89
+ }
+ return 91
+ }
+ return 93
+ }
+ return 95
+}
+
+func f4e(a, b, c int) int {
+ if a < b {
+ if b > a { // ERROR "Proved Greater64$"
+ return 101
+ }
+ return 103
+ }
+ return 105
+}
+
+func f4f(a, b, c int) int {
+ if a <= b {
+ if b > a {
+ if b == a { // ERROR "Disproved Eq64$"
+ return 112
+ }
+ return 114
+ }
+ if b >= a { // ERROR "Proved Geq64$"
+ if b == a { // ERROR "Proved Eq64$"
+ return 118
+ }
+ return 120
+ }
+ return 122
+ }
+ return 124
+}
+
+func f5(a, b uint) int {
+ if a == b {
+ if a <= b { // ERROR "Proved Leq64U$"
+ return 130
+ }
+ return 132
+ }
+ return 134
+}
+
+// These comparisons are compile time constants.
+func f6a(a uint8) int {
+ if a < a { // ERROR "Disproved Less8U$"
+ return 140
+ }
+ return 151
+}
+
+func f6b(a uint8) int {
+ if a < a { // ERROR "Disproved Less8U$"
+ return 140
+ }
+ return 151
+}
+
+func f6x(a uint8) int {
+ if a > a { // ERROR "Disproved Greater8U$"
+ return 143
+ }
+ return 151
+}
+
+func f6d(a uint8) int {
+ if a <= a { // ERROR "Proved Leq8U$"
+ return 146
+ }
+ return 151
+}
+
+func f6e(a uint8) int {
+ if a >= a { // ERROR "Proved Geq8U$"
+ return 149
+ }
+ return 151
+}
+
+func f7(a []int, b int) int {
+ if b < len(a) {
+ a[b] = 3
+ if b < len(a) { // ERROR "Proved Less64$"
+ a[b] = 5 // ERROR "Proved IsInBounds$"
+ }
+ }
+ return 161
+}
+
+func f8(a, b uint) int {
+ if a == b {
+ return 166
+ }
+ if a > b {
+ return 169
+ }
+ if a < b { // ERROR "Proved Less64U$"
+ return 172
+ }
+ return 174
+}
+
+func main() {
+}