1 // Copyright 2015 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
7 // nilcheckelim eliminates unnecessary nil checks.
8 // runs on machine-independent code.
9 func nilcheckelim(f *Func) {
10 // A nil check is redundant if the same nil check was successful in a
11 // dominating block. The efficacy of this pass depends heavily on the
12 // efficacy of the cse pass.
15 // TODO: Eliminate more nil checks.
16 // We can recursively remove any chain of fixed offset calculations,
17 // i.e. struct fields and array elements, even with non-constant
18 // indices: x is non-nil iff x.a.b[i].c is.
22 Work walkState = iota // process nil checks and traverse to dominees
23 ClearPtr // forget the fact that ptr is nil
27 block *Block // block, or nil in ClearPtr state
28 ptr *Value // if non-nil, ptr that is to be cleared in ClearPtr state
32 work := make([]bp, 0, 256)
33 work = append(work, bp{block: f.Entry})
35 // map from value ID to bool indicating if value is known to be non-nil
36 // in the current dominator path being walked. This slice is updated by
37 // walkStates to maintain the known non-nil values.
38 nonNilValues := make([]bool, f.NumValues())
40 // make an initial pass identifying any non-nil values
41 for _, b := range f.Blocks {
42 // a value resulting from taking the address of a
43 // value, or a value constructed from an offset of a
44 // non-nil ptr (OpAddPtr) implies it is non-nil
45 for _, v := range b.Values {
46 if v.Op == OpAddr || v.Op == OpAddPtr {
47 nonNilValues[v.ID] = true
48 } else if v.Op == OpPhi {
49 // phis whose arguments are all non-nil
52 for _, a := range v.Args {
53 if !nonNilValues[a.ID] {
58 nonNilValues[v.ID] = true
64 // perform a depth first walk of the dominee tree
66 node := work[len(work)-1]
67 work = work[:len(work)-1]
73 // First, see if we're dominated by an explicit nil check.
74 if len(b.Preds) == 1 {
76 if p.Kind == BlockIf && p.Control.Op == OpIsNonNil && p.Succs[0].b == b {
77 ptr := p.Control.Args[0]
78 if !nonNilValues[ptr.ID] {
79 nonNilValues[ptr.ID] = true
80 work = append(work, bp{op: ClearPtr, ptr: ptr})
85 // Next, process values in the block.
87 for _, v := range b.Values {
93 if nonNilValues[ptr.ID] {
94 // This is a redundant explicit nil check.
100 if nonNilValues[ptr.ID] {
101 // This is a redundant implicit nil check.
102 // Logging in the style of the former compiler -- and omit line 1,
103 // which is usually in generated code.
104 if f.Config.Debug_checknil() && v.Pos.Line() > 1 {
105 f.Config.Warnl(v.Pos, "removed nil check")
111 // Record the fact that we know ptr is non nil, and remember to
112 // undo that information when this dominator subtree is done.
113 nonNilValues[ptr.ID] = true
114 work = append(work, bp{op: ClearPtr, ptr: ptr})
117 for j := i; j < len(b.Values); j++ {
120 b.Values = b.Values[:i]
122 // Add all dominated blocks to the work list.
123 for w := sdom[node.block.ID].child; w != nil; w = sdom[w.ID].sibling {
124 work = append(work, bp{op: Work, block: w})
128 nonNilValues[node.ptr.ID] = false
134 // All platforms are guaranteed to fault if we load/store to anything smaller than this address.
135 const minZeroPage = 4096
137 // nilcheckelim2 eliminates unnecessary nil checks.
138 // Runs after lowering and scheduling.
139 func nilcheckelim2(f *Func) {
140 unnecessary := f.newSparseSet(f.NumValues())
141 defer f.retSparseSet(unnecessary)
142 for _, b := range f.Blocks {
143 // Walk the block backwards. Find instructions that will fault if their
144 // input pointer is nil. Remove nil checks on those pointers, as the
145 // faulting instruction effectively does the nil check for free.
147 for i := len(b.Values) - 1; i >= 0; i-- {
149 if opcodeTable[v.Op].nilCheck && unnecessary.contains(v.Args[0].ID) {
150 if f.Config.Debug_checknil() && v.Pos.Line() > 1 {
151 f.Config.Warnl(v.Pos, "removed nil check")
156 if v.Type.IsMemory() || v.Type.IsTuple() && v.Type.FieldType(1).IsMemory() {
157 if v.Op == OpVarDef || v.Op == OpVarKill || v.Op == OpVarLive {
158 // These ops don't really change memory.
161 // This op changes memory. Any faulting instruction after v that
162 // we've recorded in the unnecessary map is now obsolete.
166 // Find any pointers that this op is guaranteed to fault on if nil.
167 var ptrstore [2]*Value
169 if opcodeTable[v.Op].faultOnNilArg0 {
170 ptrs = append(ptrs, v.Args[0])
172 if opcodeTable[v.Op].faultOnNilArg1 {
173 ptrs = append(ptrs, v.Args[1])
175 for _, ptr := range ptrs {
176 // Check to make sure the offset is small.
177 switch opcodeTable[v.Op].auxType {
179 if v.Aux != nil || v.AuxInt < 0 || v.AuxInt >= minZeroPage {
182 case auxSymValAndOff:
183 off := ValAndOff(v.AuxInt).Off()
184 if v.Aux != nil || off < 0 || off >= minZeroPage {
188 // Mips uses this auxType for atomic add constant. It does not affect the effective address.
190 // ARM uses this auxType for duffcopy/duffzero/alignment info.
191 // It does not affect the effective address.
195 v.Fatalf("can't handle aux %s (type %d) yet\n", v.auxString(), int(opcodeTable[v.Op].auxType))
197 // This instruction is guaranteed to fault if ptr is nil.
198 // Any previous nil check op is unnecessary.
199 unnecessary.add(ptr.ID)
202 // Remove values we've clobbered with OpUnknown.
204 for _, v := range b.Values {
205 if v.Op != OpUnknown {
210 for j := i; j < len(b.Values); j++ {
213 b.Values = b.Values[:i]
215 // TODO: if b.Kind == BlockPlain, start the analysis in the subsequent block to find
216 // more unnecessary nil checks. Would fix test/nilptr3_ssa.go:157.