1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
10 "cmd/compile/internal/base"
11 "cmd/compile/internal/ir"
12 "cmd/compile/internal/reflectdata"
13 "cmd/compile/internal/ssagen"
14 "cmd/compile/internal/typecheck"
15 "cmd/compile/internal/types"
19 func cheapComputableIndex(width int64) bool {
20 switch ssagen.Arch.LinkArch.Family {
21 // MIPS does not have R+R addressing
22 // Arm64 may lack ability to generate this code in our assembler,
23 // but the architecture supports it.
24 case sys.PPC64, sys.S390X:
26 case sys.AMD64, sys.I386, sys.ARM64, sys.ARM:
35 // walkRange transforms various forms of ORANGE into
36 // simpler forms. The result must be assigned back to n.
37 // Node n may also be modified in place, and may also be
39 func walkRange(nrange *ir.RangeStmt) ir.Node {
40 if isMapClear(nrange) {
48 nfor := ir.NewForStmt(nrange.Pos(), nil, nil, nil, nil)
49 nfor.SetInit(nrange.Init())
50 nfor.Label = nrange.Label
52 // variable name conventions:
53 // ohv1, hv1, hv2: hidden (old) val 1, 2
54 // ha, hit: hidden aggregate, iterator
55 // hn, hp: hidden len, pointer
57 // a, v1, v2: not hidden aggregate, val 1, 2
60 t := typecheck.RangeExprType(a.Type())
63 v1, v2 := nrange.Key, nrange.Value
69 if ir.IsBlank(v1) && v2 == nil {
73 if v1 == nil && v2 != nil {
74 base.Fatalf("walkRange: v2 != nil while v1 == nil")
77 var ifGuard *ir.IfStmt
83 base.Fatalf("walkRange")
85 case types.TARRAY, types.TSLICE:
86 if nn := arrayClear(nrange, v1, v2, a); nn != nil {
91 // order.stmt arranged for a copy of the array/slice variable if needed.
94 hv1 := typecheck.Temp(types.Types[types.TINT])
95 hn := typecheck.Temp(types.Types[types.TINT])
97 init = append(init, ir.NewAssignStmt(base.Pos, hv1, nil))
98 init = append(init, ir.NewAssignStmt(base.Pos, hn, ir.NewUnaryExpr(base.Pos, ir.OLEN, ha)))
100 nfor.Cond = ir.NewBinaryExpr(base.Pos, ir.OLT, hv1, hn)
101 nfor.Post = ir.NewAssignStmt(base.Pos, hv1, ir.NewBinaryExpr(base.Pos, ir.OADD, hv1, ir.NewInt(1)))
103 // for range ha { body }
108 // for v1 := range ha { body }
110 body = []ir.Node{ir.NewAssignStmt(base.Pos, v1, hv1)}
114 // for v1, v2 := range ha { body }
115 if cheapComputableIndex(t.Elem().Width) {
116 // v1, v2 = hv1, ha[hv1]
117 tmp := ir.NewIndexExpr(base.Pos, ha, hv1)
119 // Use OAS2 to correctly handle assignments
120 // of the form "v1, a[v1] := range".
121 a := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil)
122 a.Lhs = []ir.Node{v1, v2}
123 a.Rhs = []ir.Node{hv1, tmp}
128 // TODO(austin): OFORUNTIL is a strange beast, but is
129 // necessary for expressing the control flow we need
130 // while also making "break" and "continue" work. It
131 // would be nice to just lower ORANGE during SSA, but
132 // racewalk needs to see many of the operations
133 // involved in ORANGE's implementation. If racewalk
134 // moves into SSA, consider moving ORANGE into SSA and
135 // eliminating OFORUNTIL.
137 // TODO(austin): OFORUNTIL inhibits bounds-check
138 // elimination on the index variable (see #20711).
139 // Enhance the prove pass to understand this.
140 ifGuard = ir.NewIfStmt(base.Pos, nil, nil, nil)
141 ifGuard.Cond = ir.NewBinaryExpr(base.Pos, ir.OLT, hv1, hn)
142 nfor.SetOp(ir.OFORUNTIL)
144 hp := typecheck.Temp(types.NewPtr(t.Elem()))
145 tmp := ir.NewIndexExpr(base.Pos, ha, ir.NewInt(0))
147 init = append(init, ir.NewAssignStmt(base.Pos, hp, typecheck.NodAddr(tmp)))
149 // Use OAS2 to correctly handle assignments
150 // of the form "v1, a[v1] := range".
151 a := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil)
152 a.Lhs = []ir.Node{v1, v2}
153 a.Rhs = []ir.Node{hv1, ir.NewStarExpr(base.Pos, hp)}
154 body = append(body, a)
156 // Advance pointer as part of the late increment.
158 // This runs *after* the condition check, so we know
159 // advancing the pointer is safe and won't go past the
160 // end of the allocation.
161 as := ir.NewAssignStmt(base.Pos, hp, addptr(hp, t.Elem().Width))
162 nfor.Late = []ir.Node{typecheck.Stmt(as)}
165 // order.stmt allocated the iterator for us.
166 // we only use a once, so no copy needed.
169 hit := nrange.Prealloc
171 // depends on layout of iterator struct.
172 // See cmd/compile/internal/reflectdata/reflect.go:MapIterType
173 keysym := th.Field(0).Sym
174 elemsym := th.Field(1).Sym // ditto
176 fn := typecheck.LookupRuntime("mapiterinit")
178 fn = typecheck.SubstArgTypes(fn, t.Key(), t.Elem(), th)
179 init = append(init, mkcallstmt1(fn, reflectdata.TypePtr(t), ha, typecheck.NodAddr(hit)))
180 nfor.Cond = ir.NewBinaryExpr(base.Pos, ir.ONE, ir.NewSelectorExpr(base.Pos, ir.ODOT, hit, keysym), typecheck.NodNil())
182 fn = typecheck.LookupRuntime("mapiternext")
183 fn = typecheck.SubstArgTypes(fn, th)
184 nfor.Post = mkcallstmt1(fn, typecheck.NodAddr(hit))
186 key := ir.NewStarExpr(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, hit, keysym))
189 } else if v2 == nil {
190 body = []ir.Node{ir.NewAssignStmt(base.Pos, v1, key)}
192 elem := ir.NewStarExpr(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, hit, elemsym))
193 a := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil)
194 a.Lhs = []ir.Node{v1, v2}
195 a.Rhs = []ir.Node{key, elem}
200 // order.stmt arranged for a copy of the channel variable.
203 hv1 := typecheck.Temp(t.Elem())
205 if t.Elem().HasPointers() {
206 init = append(init, ir.NewAssignStmt(base.Pos, hv1, nil))
208 hb := typecheck.Temp(types.Types[types.TBOOL])
210 nfor.Cond = ir.NewBinaryExpr(base.Pos, ir.ONE, hb, ir.NewBool(false))
211 a := ir.NewAssignListStmt(base.Pos, ir.OAS2RECV, nil, nil)
213 a.Lhs = []ir.Node{hv1, hb}
214 a.Rhs = []ir.Node{ir.NewUnaryExpr(base.Pos, ir.ORECV, ha)}
215 nfor.Cond = ir.InitExpr([]ir.Node{a}, nfor.Cond)
219 body = []ir.Node{ir.NewAssignStmt(base.Pos, v1, hv1)}
221 // Zero hv1. This prevents hv1 from being the sole, inaccessible
222 // reference to an otherwise GC-able value during the next channel receive.
224 body = append(body, ir.NewAssignStmt(base.Pos, hv1, nil))
227 // Transform string range statements like "for v1, v2 = range a" into
230 // for hv1 := 0; hv1 < len(ha); {
232 // hv2 := rune(ha[hv1])
233 // if hv2 < utf8.RuneSelf {
236 // hv2, hv1 = decoderune(ha, hv1)
238 // v1, v2 = hv1t, hv2
242 // order.stmt arranged for a copy of the string variable.
245 hv1 := typecheck.Temp(types.Types[types.TINT])
246 hv1t := typecheck.Temp(types.Types[types.TINT])
247 hv2 := typecheck.Temp(types.RuneType)
250 init = append(init, ir.NewAssignStmt(base.Pos, hv1, nil))
253 nfor.Cond = ir.NewBinaryExpr(base.Pos, ir.OLT, hv1, ir.NewUnaryExpr(base.Pos, ir.OLEN, ha))
257 body = append(body, ir.NewAssignStmt(base.Pos, hv1t, hv1))
260 // hv2 := rune(ha[hv1])
261 nind := ir.NewIndexExpr(base.Pos, ha, hv1)
262 nind.SetBounded(true)
263 body = append(body, ir.NewAssignStmt(base.Pos, hv2, typecheck.Conv(nind, types.RuneType)))
265 // if hv2 < utf8.RuneSelf
266 nif := ir.NewIfStmt(base.Pos, nil, nil, nil)
267 nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OLT, hv2, ir.NewInt(utf8.RuneSelf))
270 nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, hv1, ir.NewBinaryExpr(base.Pos, ir.OADD, hv1, ir.NewInt(1)))}
273 eif := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil)
275 // hv2, hv1 = decoderune(ha, hv1)
276 eif.Lhs = []ir.Node{hv2, hv1}
277 fn := typecheck.LookupRuntime("decoderune")
279 eif.Rhs = []ir.Node{mkcall1(fn, fn.Type().Results(), &fnInit, ha, hv1)}
283 body = append(body, nif)
287 // v1, v2 = hv1t, hv2
288 a := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil)
289 a.Lhs = []ir.Node{v1, v2}
290 a.Rhs = []ir.Node{hv1t, hv2}
291 body = append(body, a)
294 body = append(body, ir.NewAssignStmt(base.Pos, v1, hv1t))
299 typecheck.Stmts(init)
302 ifGuard.PtrInit().Append(init...)
303 ifGuard = typecheck.Stmt(ifGuard).(*ir.IfStmt)
305 nfor.PtrInit().Append(init...)
308 typecheck.Stmts(nfor.Cond.Init())
310 nfor.Cond = typecheck.Expr(nfor.Cond)
311 nfor.Cond = typecheck.DefaultLit(nfor.Cond, nil)
312 nfor.Post = typecheck.Stmt(nfor.Post)
313 typecheck.Stmts(body)
314 nfor.Body.Append(body...)
315 nfor.Body.Append(nrange.Body...)
319 ifGuard.Body = []ir.Node{n}
329 // isMapClear checks if n is of the form:
331 // for k := range m {
335 // where == for keys of map m is reflexive.
336 func isMapClear(n *ir.RangeStmt) bool {
337 if base.Flag.N != 0 || base.Flag.Cfg.Instrumenting {
342 if n.Op() != ir.ORANGE || t.Kind() != types.TMAP || n.Key == nil || n.Value != nil {
347 // Require k to be a new variable name.
348 if !ir.DeclaredBy(k, n) {
352 if len(n.Body) != 1 {
356 stmt := n.Body[0] // only stmt in body
357 if stmt == nil || stmt.Op() != ir.ODELETE {
362 if delete := stmt.(*ir.CallExpr); !ir.SameSafeExpr(delete.Args[0], m) || !ir.SameSafeExpr(delete.Args[1], k) {
366 // Keys where equality is not reflexive can not be deleted from maps.
367 if !types.IsReflexive(t.Key()) {
374 // mapClear constructs a call to runtime.mapclear for the map m.
375 func mapClear(m ir.Node) ir.Node {
378 // instantiate mapclear(typ *type, hmap map[any]any)
379 fn := typecheck.LookupRuntime("mapclear")
380 fn = typecheck.SubstArgTypes(fn, t.Key(), t.Elem())
381 n := mkcallstmt1(fn, reflectdata.TypePtr(t), m)
382 return walkStmt(typecheck.Stmt(n))
385 // Lower n into runtime·memclr if possible, for
386 // fast zeroing of slices and arrays (issue 5373).
387 // Look for instances of
389 // for i := range a {
393 // in which the evaluation of a is side-effect-free.
395 // Parameters are as in walkRange: "for v1, v2 = range a".
396 func arrayClear(loop *ir.RangeStmt, v1, v2, a ir.Node) ir.Node {
397 if base.Flag.N != 0 || base.Flag.Cfg.Instrumenting {
401 if v1 == nil || v2 != nil {
405 if len(loop.Body) != 1 || loop.Body[0] == nil {
409 stmt1 := loop.Body[0] // only stmt in body
410 if stmt1.Op() != ir.OAS {
413 stmt := stmt1.(*ir.AssignStmt)
414 if stmt.X.Op() != ir.OINDEX {
417 lhs := stmt.X.(*ir.IndexExpr)
419 if !ir.SameSafeExpr(lhs.X, a) || !ir.SameSafeExpr(lhs.Index, v1) {
423 elemsize := typecheck.RangeExprType(loop.X.Type()).Elem().Width
424 if elemsize <= 0 || !ir.IsZero(stmt.Y) {
431 // hn = len(a)*sizeof(elem(a))
432 // memclr{NoHeap,Has}Pointers(hp, hn)
435 n := ir.NewIfStmt(base.Pos, nil, nil, nil)
437 n.Cond = ir.NewBinaryExpr(base.Pos, ir.ONE, ir.NewUnaryExpr(base.Pos, ir.OLEN, a), ir.NewInt(0))
440 hp := typecheck.Temp(types.Types[types.TUNSAFEPTR])
442 ix := ir.NewIndexExpr(base.Pos, a, ir.NewInt(0))
444 addr := typecheck.ConvNop(typecheck.NodAddr(ix), types.Types[types.TUNSAFEPTR])
445 n.Body.Append(ir.NewAssignStmt(base.Pos, hp, addr))
447 // hn = len(a) * sizeof(elem(a))
448 hn := typecheck.Temp(types.Types[types.TUINTPTR])
449 mul := typecheck.Conv(ir.NewBinaryExpr(base.Pos, ir.OMUL, ir.NewUnaryExpr(base.Pos, ir.OLEN, a), ir.NewInt(elemsize)), types.Types[types.TUINTPTR])
450 n.Body.Append(ir.NewAssignStmt(base.Pos, hn, mul))
453 if a.Type().Elem().HasPointers() {
454 // memclrHasPointers(hp, hn)
455 ir.CurFunc.SetWBPos(stmt.Pos())
456 fn = mkcallstmt("memclrHasPointers", hp, hn)
458 // memclrNoHeapPointers(hp, hn)
459 fn = mkcallstmt("memclrNoHeapPointers", hp, hn)
465 v1 = ir.NewAssignStmt(base.Pos, v1, ir.NewBinaryExpr(base.Pos, ir.OSUB, ir.NewUnaryExpr(base.Pos, ir.OLEN, a), ir.NewInt(1)))
469 n.Cond = typecheck.Expr(n.Cond)
470 n.Cond = typecheck.DefaultLit(n.Cond, nil)
471 typecheck.Stmts(n.Body)
475 // addptr returns (*T)(uintptr(p) + n).
476 func addptr(p ir.Node, n int64) ir.Node {
479 p = ir.NewConvExpr(base.Pos, ir.OCONVNOP, nil, p)
480 p.SetType(types.Types[types.TUINTPTR])
482 p = ir.NewBinaryExpr(base.Pos, ir.OADD, p, ir.NewInt(n))
484 p = ir.NewConvExpr(base.Pos, ir.OCONVNOP, nil, p)