1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
11 "cmd/compile/internal/base"
12 "cmd/compile/internal/ir"
13 "cmd/compile/internal/reflectdata"
14 "cmd/compile/internal/ssagen"
15 "cmd/compile/internal/typecheck"
16 "cmd/compile/internal/types"
20 // The constant is known to runtime.
21 const tmpstringbufsize = 32
22 const zeroValSize = 1024 // must match value of runtime/map.go:maxZero
24 func Walk(fn *ir.Func) {
26 errorsBefore := base.Errors()
28 if base.Errors() > errorsBefore {
33 s := fmt.Sprintf("\nbefore walk %v", ir.CurFunc.Sym())
34 ir.DumpList(s, ir.CurFunc.Body)
40 if base.Errors() > errorsBefore {
43 walkStmtList(ir.CurFunc.Body)
45 s := fmt.Sprintf("after walk %v", ir.CurFunc.Sym())
46 ir.DumpList(s, ir.CurFunc.Body)
49 if base.Flag.Cfg.Instrumenting {
54 // walkRecv walks an ORECV node.
55 func walkRecv(n *ir.UnaryExpr) ir.Node {
56 if n.Typecheck() == 0 {
57 base.Fatalf("missing typecheck: %+v", n)
59 init := ir.TakeInit(n)
61 n.X = walkExpr(n.X, &init)
62 call := walkExpr(mkcall1(chanfn("chanrecv1", 2, n.X.Type()), nil, &init, n.X, typecheck.NodNil()), &init)
63 return ir.InitExpr(init, call)
66 func convas(n *ir.AssignStmt, init *ir.Nodes) *ir.AssignStmt {
68 base.Fatalf("convas: not OAS %v", n.Op())
72 if n.X == nil || n.Y == nil {
78 if lt == nil || rt == nil {
83 n.Y = typecheck.DefaultLit(n.Y, nil)
87 if !types.Identical(lt, rt) {
88 n.Y = typecheck.AssignConv(n.Y, lt, "assignment")
89 n.Y = walkExpr(n.Y, init)
91 types.CalcSize(n.Y.Type())
96 var stop = errors.New("stop")
98 func vmkcall(fn ir.Node, t *types.Type, init *ir.Nodes, va []ir.Node) *ir.CallExpr {
100 base.Fatalf("mkcall with nil init: %v", fn)
102 if fn.Type() == nil || fn.Type().Kind() != types.TFUNC {
103 base.Fatalf("mkcall %v %v", fn, fn.Type())
106 n := fn.Type().NumParams()
108 base.Fatalf("vmkcall %v needs %v args got %v", fn, n, len(va))
111 call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, va)
114 return walkExpr(call, init).(*ir.CallExpr)
117 func mkcall(name string, t *types.Type, init *ir.Nodes, args ...ir.Node) *ir.CallExpr {
118 return vmkcall(typecheck.LookupRuntime(name), t, init, args)
121 func mkcallstmt(name string, args ...ir.Node) ir.Node {
122 return mkcallstmt1(typecheck.LookupRuntime(name), args...)
125 func mkcall1(fn ir.Node, t *types.Type, init *ir.Nodes, args ...ir.Node) *ir.CallExpr {
126 return vmkcall(fn, t, init, args)
129 func mkcallstmt1(fn ir.Node, args ...ir.Node) ir.Node {
131 n := vmkcall(fn, nil, &init, args)
136 return ir.NewBlockStmt(n.Pos(), init)
139 func chanfn(name string, n int, t *types.Type) ir.Node {
141 base.Fatalf("chanfn %v", t)
143 fn := typecheck.LookupRuntime(name)
146 base.Fatalf("chanfn %d", n)
148 fn = typecheck.SubstArgTypes(fn, t.Elem())
150 fn = typecheck.SubstArgTypes(fn, t.Elem(), t.Elem())
155 func mapfn(name string, t *types.Type) ir.Node {
157 base.Fatalf("mapfn %v", t)
159 fn := typecheck.LookupRuntime(name)
160 fn = typecheck.SubstArgTypes(fn, t.Key(), t.Elem(), t.Key(), t.Elem())
164 func mapfndel(name string, t *types.Type) ir.Node {
166 base.Fatalf("mapfn %v", t)
168 fn := typecheck.LookupRuntime(name)
169 fn = typecheck.SubstArgTypes(fn, t.Key(), t.Elem(), t.Key())
183 type mapnames [nmapfast]string
185 func mkmapnames(base string, ptr string) mapnames {
186 return mapnames{base, base + "_fast32", base + "_fast32" + ptr, base + "_fast64", base + "_fast64" + ptr, base + "_faststr"}
189 var mapaccess1 = mkmapnames("mapaccess1", "")
190 var mapaccess2 = mkmapnames("mapaccess2", "")
191 var mapassign = mkmapnames("mapassign", "ptr")
192 var mapdelete = mkmapnames("mapdelete", "")
194 func mapfast(t *types.Type) int {
195 // Check runtime/map.go:maxElemSize before changing.
196 if t.Elem().Width > 128 {
199 switch reflectdata.AlgType(t.Key()) {
201 if !t.Key().HasPointers() {
204 if types.PtrSize == 4 {
207 base.Fatalf("small pointer %v", t.Key())
209 if !t.Key().HasPointers() {
212 if types.PtrSize == 8 {
215 // Two-word object, at least one of which is a pointer.
216 // Use the slow path.
223 func walkAppendArgs(n *ir.CallExpr, init *ir.Nodes) {
224 walkExprListSafe(n.Args, init)
226 // walkExprListSafe will leave OINDEX (s[n]) alone if both s
227 // and n are name or literal, but those may index the slice we're
228 // modifying here. Fix explicitly.
230 for i1, n1 := range ls {
231 ls[i1] = cheapExpr(n1, init)
236 // go builtin(x, y, z)
238 // go func(a1, a2, a3) {
239 // builtin(a1, a2, a3)
241 // for print, println, and delete.
244 // go f(x, y, uintptr(unsafe.Pointer(z)))
246 // go func(a1, a2, a3) {
247 // builtin(a1, a2, uintptr(a3))
248 // }(x, y, unsafe.Pointer(z))
249 // for function contains unsafe-uintptr arguments.
251 var wrapCall_prgen int
253 // appendWalkStmt typechecks and walks stmt and then appends it to init.
254 func appendWalkStmt(init *ir.Nodes, stmt ir.Node) {
256 n := typecheck.Stmt(stmt)
257 if op == ir.OAS || op == ir.OAS2 {
258 // If the assignment has side effects, walkExpr will append them
259 // directly to init for us, while walkStmt will wrap it in an OBLOCK.
260 // We need to append them directly.
261 // TODO(rsc): Clean this up.
262 n = walkExpr(n, init)
269 // The max number of defers in a function using open-coded defers. We enforce this
270 // limit because the deferBits bitmask is currently a single byte (to minimize code size)
271 const maxOpenDefers = 8
273 // backingArrayPtrLen extracts the pointer and length from a slice or string.
274 // This constructs two nodes referring to n, so n must be a cheapExpr.
275 func backingArrayPtrLen(n ir.Node) (ptr, length ir.Node) {
277 c := cheapExpr(n, &init)
278 if c != n || len(init) != 0 {
279 base.Fatalf("backingArrayPtrLen not cheap: %v", n)
281 ptr = ir.NewUnaryExpr(base.Pos, ir.OSPTR, n)
282 if n.Type().IsString() {
283 ptr.SetType(types.Types[types.TUINT8].PtrTo())
285 ptr.SetType(n.Type().Elem().PtrTo())
287 length = ir.NewUnaryExpr(base.Pos, ir.OLEN, n)
288 length.SetType(types.Types[types.TINT])
292 // mayCall reports whether evaluating expression n may require
293 // function calls, which could clobber function call arguments/results
294 // currently on the stack.
295 func mayCall(n ir.Node) bool {
296 // When instrumenting, any expression might require function calls.
297 if base.Flag.Cfg.Instrumenting {
301 isSoftFloat := func(typ *types.Type) bool {
302 return types.IsFloat[typ.Kind()] || types.IsComplex[typ.Kind()]
305 return ir.Any(n, func(n ir.Node) bool {
306 // walk should have already moved any Init blocks off of
308 if len(n.Init()) != 0 {
309 base.FatalfAt(n.Pos(), "mayCall %+v", n)
314 base.FatalfAt(n.Pos(), "mayCall %+v", n)
316 case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER:
319 case ir.OINDEX, ir.OSLICE, ir.OSLICEARR, ir.OSLICE3, ir.OSLICE3ARR, ir.OSLICESTR,
320 ir.ODEREF, ir.ODOTPTR, ir.ODOTTYPE, ir.ODIV, ir.OMOD:
321 // These ops might panic, make sure they are done
322 // before we start marshaling args for a call. See issue 16760.
325 case ir.OANDAND, ir.OOROR:
326 n := n.(*ir.LogicalExpr)
327 // The RHS expression may have init statements that
328 // should only execute conditionally, and so cannot be
329 // pulled out to the top-level init list. We could try
330 // to be more precise here.
331 return len(n.Y.Init()) != 0
333 // When using soft-float, these ops might be rewritten to function calls
334 // so we ensure they are evaluated first.
335 case ir.OADD, ir.OSUB, ir.OMUL, ir.ONEG:
336 return ssagen.Arch.SoftFloat && isSoftFloat(n.Type())
337 case ir.OLT, ir.OEQ, ir.ONE, ir.OLE, ir.OGE, ir.OGT:
338 n := n.(*ir.BinaryExpr)
339 return ssagen.Arch.SoftFloat && isSoftFloat(n.X.Type())
341 n := n.(*ir.ConvExpr)
342 return ssagen.Arch.SoftFloat && (isSoftFloat(n.Type()) || isSoftFloat(n.X.Type()))
344 case ir.OLITERAL, ir.ONIL, ir.ONAME, ir.OLINKSYMOFFSET, ir.OMETHEXPR,
345 ir.OAND, ir.OANDNOT, ir.OLSH, ir.OOR, ir.ORSH, ir.OXOR, ir.OCOMPLEX, ir.OEFACE,
346 ir.OADDR, ir.OBITNOT, ir.ONOT, ir.OPLUS,
347 ir.OCAP, ir.OIMAG, ir.OLEN, ir.OREAL,
348 ir.OCONVNOP, ir.ODOT,
349 ir.OCFUNC, ir.OIDATA, ir.OITAB, ir.OSPTR,
350 ir.OBYTES2STRTMP, ir.OGETG, ir.OSLICEHEADER:
351 // ok: operations that don't require function calls.
359 // itabType loads the _type field from a runtime.itab struct.
360 func itabType(itab ir.Node) ir.Node {
361 if itabTypeField == nil {
362 // runtime.itab's _type field
363 itabTypeField = runtimeField("_type", int64(types.PtrSize), types.NewPtr(types.Types[types.TUINT8]))
365 return boundedDotPtr(base.Pos, itab, itabTypeField)
368 var itabTypeField *types.Field
370 // boundedDotPtr returns a selector expression representing ptr.field
371 // and omits nil-pointer checks for ptr.
372 func boundedDotPtr(pos src.XPos, ptr ir.Node, field *types.Field) *ir.SelectorExpr {
373 sel := ir.NewSelectorExpr(pos, ir.ODOTPTR, ptr, field.Sym)
374 sel.Selection = field
375 sel.SetType(field.Type)
377 sel.SetBounded(true) // guaranteed not to fault
381 func runtimeField(name string, offset int64, typ *types.Type) *types.Field {
382 f := types.NewField(src.NoXPos, ir.Pkgs.Runtime.Lookup(name), typ)
387 // ifaceData loads the data field from an interface.
388 // The concrete type must be known to have type t.
389 // It follows the pointer if !IsDirectIface(t).
390 func ifaceData(pos src.XPos, n ir.Node, t *types.Type) ir.Node {
392 base.Fatalf("ifaceData interface: %v", t)
394 ptr := ir.NewUnaryExpr(pos, ir.OIDATA, n)
395 if types.IsDirectIface(t) {
400 ptr.SetType(types.NewPtr(t))
402 ind := ir.NewStarExpr(pos, ptr)