1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
13 "cmd/compile/internal/base"
14 "cmd/compile/internal/escape"
15 "cmd/compile/internal/ir"
16 "cmd/compile/internal/reflectdata"
17 "cmd/compile/internal/typecheck"
18 "cmd/compile/internal/types"
21 // Rewrite append(src, x, y, z) so that any side effects in
22 // x, y, z (including runtime panics) are evaluated in
23 // initialization statements before the append.
24 // For normal code generation, stop there and leave the
25 // rest to cgen_append.
27 // For race detector, expand append(src, a [, b]* ) to
31 // const argc = len(args) - 1
32 // if cap(s) - len(s) < argc {
33 // s = growslice(s, len(s)+argc)
42 func walkAppend(n *ir.CallExpr, init *ir.Nodes, dst ir.Node) ir.Node {
43 if !ir.SameSafeExpr(dst, n.Args[0]) {
44 n.Args[0] = safeExpr(n.Args[0], init)
45 n.Args[0] = walkExpr(n.Args[0], init)
47 walkExprListSafe(n.Args[1:], init)
51 // walkExprListSafe will leave OINDEX (s[n]) alone if both s
52 // and n are name or literal, but those may index the slice we're
53 // modifying here. Fix explicitly.
54 // Using cheapExpr also makes sure that the evaluation
55 // of all arguments (and especially any panics) happen
56 // before we begin to modify the slice in a visible way.
58 for i, n := range ls {
59 n = cheapExpr(n, init)
60 if !types.Identical(n.Type(), nsrc.Type().Elem()) {
61 n = typecheck.AssignConv(n, nsrc.Type().Elem(), "append")
67 argc := len(n.Args) - 1
72 // General case, with no function calls left as arguments.
73 // Leave for gen, except that instrumentation requires old form.
74 if !base.Flag.Cfg.Instrumenting || base.Flag.CompilingRuntime {
80 ns := typecheck.Temp(nsrc.Type())
81 l = append(l, ir.NewAssignStmt(base.Pos, ns, nsrc)) // s = src
83 na := ir.NewInt(int64(argc)) // const argc
84 nif := ir.NewIfStmt(base.Pos, nil, nil, nil) // if cap(s) - len(s) < argc
85 nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OLT, ir.NewBinaryExpr(base.Pos, ir.OSUB, ir.NewUnaryExpr(base.Pos, ir.OCAP, ns), ir.NewUnaryExpr(base.Pos, ir.OLEN, ns)), na)
87 fn := typecheck.LookupRuntime("growslice") // growslice(<type>, old []T, mincap int) (ret []T)
88 fn = typecheck.SubstArgTypes(fn, ns.Type().Elem(), ns.Type().Elem())
90 nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, ns, mkcall1(fn, ns.Type(), nif.PtrInit(), reflectdata.TypePtr(ns.Type().Elem()), ns,
91 ir.NewBinaryExpr(base.Pos, ir.OADD, ir.NewUnaryExpr(base.Pos, ir.OLEN, ns), na)))}
95 nn := typecheck.Temp(types.Types[types.TINT])
96 l = append(l, ir.NewAssignStmt(base.Pos, nn, ir.NewUnaryExpr(base.Pos, ir.OLEN, ns))) // n = len(s)
98 slice := ir.NewSliceExpr(base.Pos, ir.OSLICE, ns, nil, ir.NewBinaryExpr(base.Pos, ir.OADD, nn, na), nil) // ...s[:n+argc]
99 slice.SetBounded(true)
100 l = append(l, ir.NewAssignStmt(base.Pos, ns, slice)) // s = s[:n+argc]
103 for i, n := range ls {
104 ix := ir.NewIndexExpr(base.Pos, ns, nn) // s[n] ...
106 l = append(l, ir.NewAssignStmt(base.Pos, ix, n)) // s[n] = arg
108 l = append(l, ir.NewAssignStmt(base.Pos, nn, ir.NewBinaryExpr(base.Pos, ir.OADD, nn, ir.NewInt(1)))) // n = n + 1
118 // walkClose walks an OCLOSE node.
119 func walkClose(n *ir.UnaryExpr, init *ir.Nodes) ir.Node {
120 // cannot use chanfn - closechan takes any, not chan any
121 fn := typecheck.LookupRuntime("closechan")
122 fn = typecheck.SubstArgTypes(fn, n.X.Type())
123 return mkcall1(fn, nil, init, n.X)
126 // Lower copy(a, b) to a memmove call or a runtime call.
130 // if n > len(b) { n = len(b) }
131 // if a.ptr != b.ptr { memmove(a.ptr, b.ptr, n*sizeof(elem(a))) }
135 // Also works if b is a string.
137 func walkCopy(n *ir.BinaryExpr, init *ir.Nodes, runtimecall bool) ir.Node {
138 if n.X.Type().Elem().HasPointers() {
139 ir.CurFunc.SetWBPos(n.Pos())
140 fn := writebarrierfn("typedslicecopy", n.X.Type().Elem(), n.Y.Type().Elem())
141 n.X = cheapExpr(n.X, init)
142 ptrL, lenL := backingArrayPtrLen(n.X)
143 n.Y = cheapExpr(n.Y, init)
144 ptrR, lenR := backingArrayPtrLen(n.Y)
145 return mkcall1(fn, n.Type(), init, reflectdata.TypePtr(n.X.Type().Elem()), ptrL, lenL, ptrR, lenR)
149 // rely on runtime to instrument:
150 // copy(n.Left, n.Right)
151 // n.Right can be a slice or string.
153 n.X = cheapExpr(n.X, init)
154 ptrL, lenL := backingArrayPtrLen(n.X)
155 n.Y = cheapExpr(n.Y, init)
156 ptrR, lenR := backingArrayPtrLen(n.Y)
158 fn := typecheck.LookupRuntime("slicecopy")
159 fn = typecheck.SubstArgTypes(fn, ptrL.Type().Elem(), ptrR.Type().Elem())
161 return mkcall1(fn, n.Type(), init, ptrL, lenL, ptrR, lenR, ir.NewInt(n.X.Type().Elem().Width))
164 n.X = walkExpr(n.X, init)
165 n.Y = walkExpr(n.Y, init)
166 nl := typecheck.Temp(n.X.Type())
167 nr := typecheck.Temp(n.Y.Type())
169 l = append(l, ir.NewAssignStmt(base.Pos, nl, n.X))
170 l = append(l, ir.NewAssignStmt(base.Pos, nr, n.Y))
172 nfrm := ir.NewUnaryExpr(base.Pos, ir.OSPTR, nr)
173 nto := ir.NewUnaryExpr(base.Pos, ir.OSPTR, nl)
175 nlen := typecheck.Temp(types.Types[types.TINT])
178 l = append(l, ir.NewAssignStmt(base.Pos, nlen, ir.NewUnaryExpr(base.Pos, ir.OLEN, nl)))
180 // if n > len(frm) { n = len(frm) }
181 nif := ir.NewIfStmt(base.Pos, nil, nil, nil)
183 nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OGT, nlen, ir.NewUnaryExpr(base.Pos, ir.OLEN, nr))
184 nif.Body.Append(ir.NewAssignStmt(base.Pos, nlen, ir.NewUnaryExpr(base.Pos, ir.OLEN, nr)))
187 // if to.ptr != frm.ptr { memmove( ... ) }
188 ne := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.ONE, nto, nfrm), nil, nil)
192 fn := typecheck.LookupRuntime("memmove")
193 fn = typecheck.SubstArgTypes(fn, nl.Type().Elem(), nl.Type().Elem())
194 nwid := ir.Node(typecheck.Temp(types.Types[types.TUINTPTR]))
195 setwid := ir.NewAssignStmt(base.Pos, nwid, typecheck.Conv(nlen, types.Types[types.TUINTPTR]))
196 ne.Body.Append(setwid)
197 nwid = ir.NewBinaryExpr(base.Pos, ir.OMUL, nwid, ir.NewInt(nl.Type().Elem().Width))
198 call := mkcall1(fn, nil, init, nto, nfrm, nwid)
207 // walkDelete walks an ODELETE node.
208 func walkDelete(init *ir.Nodes, n *ir.CallExpr) ir.Node {
209 init.Append(ir.TakeInit(n)...)
212 map_ = walkExpr(map_, init)
213 key = walkExpr(key, init)
217 key = mapKeyArg(fast, n, key)
218 return mkcall1(mapfndel(mapdelete[fast], t), nil, init, reflectdata.TypePtr(t), map_, key)
221 // walkLenCap walks an OLEN or OCAP node.
222 func walkLenCap(n *ir.UnaryExpr, init *ir.Nodes) ir.Node {
224 // Replace len([]rune(string)) with runtime.countrunes(string).
225 return mkcall("countrunes", n.Type(), init, typecheck.Conv(n.X.(*ir.ConvExpr).X, types.Types[types.TSTRING]))
228 n.X = walkExpr(n.X, init)
230 // replace len(*[10]int) with 10.
231 // delayed until now to preserve side effects.
239 con := typecheck.OrigInt(n, t.NumElem())
246 // walkMakeChan walks an OMAKECHAN node.
247 func walkMakeChan(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
248 // When size fits into int, use makechan instead of
249 // makechan64, which is faster and shorter on 32 bit platforms.
251 fnname := "makechan64"
252 argtype := types.Types[types.TINT64]
254 // Type checking guarantees that TIDEAL size is positive and fits in an int.
255 // The case of size overflow when converting TUINT or TUINTPTR to TINT
256 // will be handled by the negative range checks in makechan during runtime.
257 if size.Type().IsKind(types.TIDEAL) || size.Type().Size() <= types.Types[types.TUINT].Size() {
259 argtype = types.Types[types.TINT]
262 return mkcall1(chanfn(fnname, 1, n.Type()), n.Type(), init, reflectdata.TypePtr(n.Type()), typecheck.Conv(size, argtype))
265 // walkMakeMap walks an OMAKEMAP node.
266 func walkMakeMap(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
268 hmapType := reflectdata.MapType(t)
273 if n.Esc() == ir.EscNone {
274 // Allocate hmap on stack.
278 h = stackTempAddr(init, hmapType)
280 // Allocate one bucket pointed to by hmap.buckets on stack if hint
281 // is not larger than BUCKETSIZE. In case hint is larger than
282 // BUCKETSIZE runtime.makemap will allocate the buckets on the heap.
283 // Maximum key and elem size is 128 bytes, larger objects
284 // are stored with an indirection. So max bucket size is 2048+eps.
285 if !ir.IsConst(hint, constant.Int) ||
286 constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(reflectdata.BUCKETSIZE)) {
288 // In case hint is larger than BUCKETSIZE runtime.makemap
289 // will allocate the buckets on the heap, see #20184
291 // if hint <= BUCKETSIZE {
297 nif := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OLE, hint, ir.NewInt(reflectdata.BUCKETSIZE)), nil, nil)
302 b := stackTempAddr(&nif.Body, reflectdata.MapBucketType(t))
305 bsym := hmapType.Field(5).Sym // hmap.buckets see reflect.go:hmap
306 na := ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, h, bsym), b)
308 appendWalkStmt(init, nif)
312 if ir.IsConst(hint, constant.Int) && constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(reflectdata.BUCKETSIZE)) {
313 // Handling make(map[any]any) and
314 // make(map[any]any, hint) where hint <= BUCKETSIZE
315 // special allows for faster map initialization and
316 // improves binary size by using calls with fewer arguments.
317 // For hint <= BUCKETSIZE overLoadFactor(hint, 0) is false
318 // and no buckets will be allocated by makemap. Therefore,
319 // no buckets need to be allocated in this code path.
320 if n.Esc() == ir.EscNone {
321 // Only need to initialize h.hash0 since
322 // hmap h has been allocated on the stack already.
323 // h.hash0 = fastrand()
324 rand := mkcall("fastrand", types.Types[types.TUINT32], init)
325 hashsym := hmapType.Field(4).Sym // hmap.hash0 see reflect.go:hmap
326 appendWalkStmt(init, ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, h, hashsym), rand))
327 return typecheck.ConvNop(h, t)
329 // Call runtime.makehmap to allocate an
330 // hmap on the heap and initialize hmap's hash0 field.
331 fn := typecheck.LookupRuntime("makemap_small")
332 fn = typecheck.SubstArgTypes(fn, t.Key(), t.Elem())
333 return mkcall1(fn, n.Type(), init)
336 if n.Esc() != ir.EscNone {
337 h = typecheck.NodNil()
339 // Map initialization with a variable or large hint is
340 // more complicated. We therefore generate a call to
341 // runtime.makemap to initialize hmap and allocate the
344 // When hint fits into int, use makemap instead of
345 // makemap64, which is faster and shorter on 32 bit platforms.
346 fnname := "makemap64"
347 argtype := types.Types[types.TINT64]
349 // Type checking guarantees that TIDEAL hint is positive and fits in an int.
350 // See checkmake call in TMAP case of OMAKE case in OpSwitch in typecheck1 function.
351 // The case of hint overflow when converting TUINT or TUINTPTR to TINT
352 // will be handled by the negative range checks in makemap during runtime.
353 if hint.Type().IsKind(types.TIDEAL) || hint.Type().Size() <= types.Types[types.TUINT].Size() {
355 argtype = types.Types[types.TINT]
358 fn := typecheck.LookupRuntime(fnname)
359 fn = typecheck.SubstArgTypes(fn, hmapType, t.Key(), t.Elem())
360 return mkcall1(fn, n.Type(), init, reflectdata.TypePtr(n.Type()), typecheck.Conv(hint, argtype), h)
363 // walkMakeSlice walks an OMAKESLICE node.
364 func walkMakeSlice(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
368 r = safeExpr(l, init)
372 if t.Elem().NotInHeap() {
373 base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", t.Elem())
375 if n.Esc() == ir.EscNone {
376 if why := escape.HeapAllocReason(n); why != "" {
377 base.Fatalf("%v has EscNone, but %v", n, why)
381 i := typecheck.IndexConst(r)
383 base.Fatalf("walkExpr: invalid index %v", r)
386 // cap is constrained to [0,2^31) or [0,2^63) depending on whether
387 // we're in 32-bit or 64-bit systems. So it's safe to do:
389 // if uint64(len) > cap {
390 // if len < 0 { panicmakeslicelen() }
391 // panicmakeslicecap()
393 nif := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OGT, typecheck.Conv(l, types.Types[types.TUINT64]), ir.NewInt(i)), nil, nil)
394 niflen := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OLT, l, ir.NewInt(0)), nil, nil)
395 niflen.Body = []ir.Node{mkcall("panicmakeslicelen", nil, init)}
396 nif.Body.Append(niflen, mkcall("panicmakeslicecap", nil, init))
397 init.Append(typecheck.Stmt(nif))
399 t = types.NewArray(t.Elem(), i) // [r]T
400 var_ := typecheck.Temp(t)
401 appendWalkStmt(init, ir.NewAssignStmt(base.Pos, var_, nil)) // zero temp
402 r := ir.NewSliceExpr(base.Pos, ir.OSLICE, var_, nil, l, nil) // arr[:l]
403 // The conv is necessary in case n.Type is named.
404 return walkExpr(typecheck.Expr(typecheck.Conv(r, n.Type())), init)
407 // n escapes; set up a call to makeslice.
408 // When len and cap can fit into int, use makeslice instead of
409 // makeslice64, which is faster and shorter on 32 bit platforms.
413 fnname := "makeslice64"
414 argtype := types.Types[types.TINT64]
416 // Type checking guarantees that TIDEAL len/cap are positive and fit in an int.
417 // The case of len or cap overflow when converting TUINT or TUINTPTR to TINT
418 // will be handled by the negative range checks in makeslice during runtime.
419 if (len.Type().IsKind(types.TIDEAL) || len.Type().Size() <= types.Types[types.TUINT].Size()) &&
420 (cap.Type().IsKind(types.TIDEAL) || cap.Type().Size() <= types.Types[types.TUINT].Size()) {
422 argtype = types.Types[types.TINT]
424 fn := typecheck.LookupRuntime(fnname)
425 ptr := mkcall1(fn, types.Types[types.TUNSAFEPTR], init, reflectdata.TypePtr(t.Elem()), typecheck.Conv(len, argtype), typecheck.Conv(cap, argtype))
427 len = typecheck.Conv(len, types.Types[types.TINT])
428 cap = typecheck.Conv(cap, types.Types[types.TINT])
429 sh := ir.NewSliceHeaderExpr(base.Pos, t, ptr, len, cap)
430 return walkExpr(typecheck.Expr(sh), init)
433 // walkMakeSliceCopy walks an OMAKESLICECOPY node.
434 func walkMakeSliceCopy(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
435 if n.Esc() == ir.EscNone {
436 base.Fatalf("OMAKESLICECOPY with EscNone: %v", n)
440 if t.Elem().NotInHeap() {
441 base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", t.Elem())
444 length := typecheck.Conv(n.Len, types.Types[types.TINT])
445 copylen := ir.NewUnaryExpr(base.Pos, ir.OLEN, n.Cap)
446 copyptr := ir.NewUnaryExpr(base.Pos, ir.OSPTR, n.Cap)
448 if !t.Elem().HasPointers() && n.Bounded() {
449 // When len(to)==len(from) and elements have no pointers:
450 // replace make+copy with runtime.mallocgc+runtime.memmove.
452 // We do not check for overflow of len(to)*elem.Width here
453 // since len(from) is an existing checked slice capacity
454 // with same elem.Width for the from slice.
455 size := ir.NewBinaryExpr(base.Pos, ir.OMUL, typecheck.Conv(length, types.Types[types.TUINTPTR]), typecheck.Conv(ir.NewInt(t.Elem().Width), types.Types[types.TUINTPTR]))
457 // instantiate mallocgc(size uintptr, typ *byte, needszero bool) unsafe.Pointer
458 fn := typecheck.LookupRuntime("mallocgc")
459 ptr := mkcall1(fn, types.Types[types.TUNSAFEPTR], init, size, typecheck.NodNil(), ir.NewBool(false))
461 sh := ir.NewSliceHeaderExpr(base.Pos, t, ptr, length, length)
463 s := typecheck.Temp(t)
464 r := typecheck.Stmt(ir.NewAssignStmt(base.Pos, s, sh))
465 r = walkExpr(r, init)
468 // instantiate memmove(to *any, frm *any, size uintptr)
469 fn = typecheck.LookupRuntime("memmove")
470 fn = typecheck.SubstArgTypes(fn, t.Elem(), t.Elem())
471 ncopy := mkcall1(fn, nil, init, ir.NewUnaryExpr(base.Pos, ir.OSPTR, s), copyptr, size)
472 init.Append(walkExpr(typecheck.Stmt(ncopy), init))
476 // Replace make+copy with runtime.makeslicecopy.
477 // instantiate makeslicecopy(typ *byte, tolen int, fromlen int, from unsafe.Pointer) unsafe.Pointer
478 fn := typecheck.LookupRuntime("makeslicecopy")
479 ptr := mkcall1(fn, types.Types[types.TUNSAFEPTR], init, reflectdata.TypePtr(t.Elem()), length, copylen, typecheck.Conv(copyptr, types.Types[types.TUNSAFEPTR]))
481 sh := ir.NewSliceHeaderExpr(base.Pos, t, ptr, length, length)
482 return walkExpr(typecheck.Expr(sh), init)
485 // walkNew walks an ONEW node.
486 func walkNew(n *ir.UnaryExpr, init *ir.Nodes) ir.Node {
489 base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", n.Type().Elem())
491 if n.Esc() == ir.EscNone {
492 if t.Size() > ir.MaxImplicitStackVarSize {
493 base.Fatalf("large ONEW with EscNone: %v", n)
495 return stackTempAddr(init, t)
502 // generate code for print
503 func walkPrint(nn *ir.CallExpr, init *ir.Nodes) ir.Node {
504 // Hoist all the argument evaluation up before the lock.
505 walkExprListCheap(nn.Args, init)
507 // For println, add " " between elements and "\n" at the end.
508 if nn.Op() == ir.OPRINTN {
510 t := make([]ir.Node, 0, len(s)*2)
511 for i, n := range s {
513 t = append(t, ir.NewString(" "))
517 t = append(t, ir.NewString("\n"))
521 // Collapse runs of constant strings.
523 t := make([]ir.Node, 0, len(s))
524 for i := 0; i < len(s); {
526 for i < len(s) && ir.IsConst(s[i], constant.String) {
527 strs = append(strs, ir.StringVal(s[i]))
531 t = append(t, ir.NewString(strings.Join(strs, "")))
540 calls := []ir.Node{mkcall("printlock", nil, init)}
541 for i, n := range nn.Args {
542 if n.Op() == ir.OLITERAL {
543 if n.Type() == types.UntypedRune {
544 n = typecheck.DefaultLit(n, types.RuneType)
547 switch n.Val().Kind() {
549 n = typecheck.DefaultLit(n, types.Types[types.TINT64])
552 n = typecheck.DefaultLit(n, types.Types[types.TFLOAT64])
556 if n.Op() != ir.OLITERAL && n.Type() != nil && n.Type().Kind() == types.TIDEAL {
557 n = typecheck.DefaultLit(n, types.Types[types.TINT64])
559 n = typecheck.DefaultLit(n, nil)
561 if n.Type() == nil || n.Type().Kind() == types.TFORW {
566 switch n.Type().Kind() {
568 if n.Type().IsEmptyInterface() {
569 on = typecheck.LookupRuntime("printeface")
571 on = typecheck.LookupRuntime("printiface")
573 on = typecheck.SubstArgTypes(on, n.Type()) // any-1
575 if n.Type().Elem().NotInHeap() {
576 on = typecheck.LookupRuntime("printuintptr")
577 n = ir.NewConvExpr(base.Pos, ir.OCONV, nil, n)
578 n.SetType(types.Types[types.TUNSAFEPTR])
579 n = ir.NewConvExpr(base.Pos, ir.OCONV, nil, n)
580 n.SetType(types.Types[types.TUINTPTR])
584 case types.TCHAN, types.TMAP, types.TFUNC, types.TUNSAFEPTR:
585 on = typecheck.LookupRuntime("printpointer")
586 on = typecheck.SubstArgTypes(on, n.Type()) // any-1
588 on = typecheck.LookupRuntime("printslice")
589 on = typecheck.SubstArgTypes(on, n.Type()) // any-1
590 case types.TUINT, types.TUINT8, types.TUINT16, types.TUINT32, types.TUINT64, types.TUINTPTR:
591 if types.IsRuntimePkg(n.Type().Sym().Pkg) && n.Type().Sym().Name == "hex" {
592 on = typecheck.LookupRuntime("printhex")
594 on = typecheck.LookupRuntime("printuint")
596 case types.TINT, types.TINT8, types.TINT16, types.TINT32, types.TINT64:
597 on = typecheck.LookupRuntime("printint")
598 case types.TFLOAT32, types.TFLOAT64:
599 on = typecheck.LookupRuntime("printfloat")
600 case types.TCOMPLEX64, types.TCOMPLEX128:
601 on = typecheck.LookupRuntime("printcomplex")
603 on = typecheck.LookupRuntime("printbool")
606 if ir.IsConst(n, constant.String) {
611 on = typecheck.LookupRuntime("printsp")
613 on = typecheck.LookupRuntime("printnl")
615 on = typecheck.LookupRuntime("printstring")
618 badtype(ir.OPRINT, n.Type(), nil)
622 r := ir.NewCallExpr(base.Pos, ir.OCALL, on, nil)
623 if params := on.Type().Params().FieldSlice(); len(params) > 0 {
625 if !types.Identical(t, n.Type()) {
626 n = ir.NewConvExpr(base.Pos, ir.OCONV, nil, n)
631 calls = append(calls, r)
634 calls = append(calls, mkcall("printunlock", nil, init))
636 typecheck.Stmts(calls)
637 walkExprList(calls, init)
639 r := ir.NewBlockStmt(base.Pos, nil)
641 return walkStmt(typecheck.Stmt(r))
644 // walkRecover walks an ORECOVERFP node.
645 func walkRecoverFP(nn *ir.CallExpr, init *ir.Nodes) ir.Node {
646 return mkcall("gorecover", nn.Type(), init, walkExpr(nn.Args[0], init))
649 func walkUnsafeSlice(n *ir.BinaryExpr, init *ir.Nodes) ir.Node {
650 ptr := safeExpr(n.X, init)
651 len := safeExpr(n.Y, init)
653 fnname := "unsafeslice64"
654 lenType := types.Types[types.TINT64]
656 // Type checking guarantees that TIDEAL len/cap are positive and fit in an int.
657 // The case of len or cap overflow when converting TUINT or TUINTPTR to TINT
658 // will be handled by the negative range checks in unsafeslice during runtime.
659 if ir.ShouldCheckPtr(ir.CurFunc, 1) {
660 fnname = "unsafeslicecheckptr"
661 // for simplicity, unsafeslicecheckptr always uses int64
662 } else if len.Type().IsKind(types.TIDEAL) || len.Type().Size() <= types.Types[types.TUINT].Size() {
663 fnname = "unsafeslice"
664 lenType = types.Types[types.TINT]
669 // Call runtime.unsafeslice{,64,checkptr} to check ptr and len.
670 fn := typecheck.LookupRuntime(fnname)
671 init.Append(mkcall1(fn, nil, init, reflectdata.TypePtr(t.Elem()), typecheck.Conv(ptr, types.Types[types.TUNSAFEPTR]), typecheck.Conv(len, lenType)))
673 h := ir.NewSliceHeaderExpr(n.Pos(), t,
674 typecheck.Conv(ptr, types.Types[types.TUNSAFEPTR]),
675 typecheck.Conv(len, types.Types[types.TINT]),
676 typecheck.Conv(len, types.Types[types.TINT]))
677 return walkExpr(typecheck.Expr(h), init)
680 func badtype(op ir.Op, tl, tr *types.Type) {
683 s += fmt.Sprintf("\n\t%v", tl)
686 s += fmt.Sprintf("\n\t%v", tr)
689 // common mistake: *struct and *interface.
690 if tl != nil && tr != nil && tl.IsPtr() && tr.IsPtr() {
691 if tl.Elem().IsStruct() && tr.Elem().IsInterface() {
692 s += "\n\t(*struct vs *interface)"
693 } else if tl.Elem().IsInterface() && tr.Elem().IsStruct() {
694 s += "\n\t(*interface vs *struct)"
698 base.Errorf("illegal types for operand: %v%s", op, s)
701 func writebarrierfn(name string, l *types.Type, r *types.Type) ir.Node {
702 fn := typecheck.LookupRuntime(name)
703 fn = typecheck.SubstArgTypes(fn, l, r)
707 // isRuneCount reports whether n is of the form len([]rune(string)).
708 // These are optimized into a call to runtime.countrunes.
709 func isRuneCount(n ir.Node) bool {
710 return base.Flag.N == 0 && !base.Flag.Cfg.Instrumenting && n.Op() == ir.OLEN && n.(*ir.UnaryExpr).X.Op() == ir.OSTR2RUNES