1 // Copyright 2011 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
15 "cmd/compile/internal/base"
16 "cmd/compile/internal/ir"
17 "cmd/compile/internal/objw"
18 "cmd/compile/internal/ssa"
19 "cmd/compile/internal/types"
25 // cmpstackvarlt reports whether the stack variable a sorts before b.
27 // Sort the list of stack variables. Autos after anything else,
28 // within autos, unused after used, within used, things with
29 // pointers first, zeroed things first, and then decreasing size.
30 // Because autos are laid out in decreasing addresses
31 // on the stack, pointers first, zeroed things first and decreasing size
32 // really means, in memory, things with pointers needing zeroing at
33 // the top of the stack and increasing in size.
34 // Non-autos sort on offset.
35 func cmpstackvarlt(a, b *ir.Name) bool {
36 if needAlloc(a) != needAlloc(b) {
41 return a.FrameOffset() < b.FrameOffset()
44 if a.Used() != b.Used() {
48 ap := a.Type().HasPointers()
49 bp := b.Type().HasPointers()
60 if a.Type().Width != b.Type().Width {
61 return a.Type().Width > b.Type().Width
64 return a.Sym().Name < b.Sym().Name
67 // byStackvar implements sort.Interface for []*Node using cmpstackvarlt.
68 type byStackVar []*ir.Name
70 func (s byStackVar) Len() int { return len(s) }
71 func (s byStackVar) Less(i, j int) bool { return cmpstackvarlt(s[i], s[j]) }
72 func (s byStackVar) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
74 // needAlloc reports whether n is within the current frame, for which we need to
75 // allocate space. In particular, it excludes arguments and results, which are in
77 func needAlloc(n *ir.Name) bool {
78 return n.Class == ir.PAUTO || n.Class == ir.PPARAMOUT && n.IsOutputParamInRegisters()
81 func (s *ssafn) AllocFrame(f *ssa.Func) {
86 // Mark the PAUTO's unused.
87 for _, ln := range fn.Dcl {
93 for _, l := range f.RegAlloc {
94 if ls, ok := l.(ssa.LocalSlot); ok {
99 for _, b := range f.Blocks {
100 for _, v := range b.Values {
101 if n, ok := v.Aux.(*ir.Name); ok {
104 if n.IsOutputParamInRegisters() && v.Op == ssa.OpVarDef {
105 // ignore VarDef, look for "real" uses.
106 // TODO: maybe do this for PAUTO as well?
110 case ir.PPARAM, ir.PAUTO:
117 sort.Sort(byStackVar(fn.Dcl))
119 // Reassign stack offsets of the locals that are used.
121 for i, n := range fn.Dcl {
122 if n.Op() != ir.ONAME || n.Class != ir.PAUTO && !(n.Class == ir.PPARAMOUT && n.IsOutputParamInRegisters()) {
123 // i.e., stack assign if AUTO, or if PARAMOUT in registers (which has no predefined spill locations)
131 types.CalcSize(n.Type())
133 if w >= types.MaxWidth || w < 0 {
134 base.Fatalf("bad width")
136 if w == 0 && lastHasPtr {
137 // Pad between a pointer-containing object and a zero-sized object.
138 // This prevents a pointer to the zero-sized object from being interpreted
139 // as a pointer to the pointer-containing object (and causing it
140 // to be scanned when it shouldn't be). See issue 24993.
144 s.stksize = types.Rnd(s.stksize, int64(n.Type().Align))
145 if n.Type().HasPointers() {
146 s.stkptrsize = s.stksize
151 n.SetFrameOffset(-s.stksize)
154 s.stksize = types.Rnd(s.stksize, int64(types.RegSize))
155 s.stkptrsize = types.Rnd(s.stkptrsize, int64(types.RegSize))
158 const maxStackSize = 1 << 30
160 // Compile builds an SSA backend function,
161 // uses it to generate a plist,
162 // and flushes that plist to machine code.
163 // worker indicates which of the backend workers is doing the processing.
164 func Compile(fn *ir.Func, worker int) {
165 f := buildssa(fn, worker)
166 // Note: check arg size to fix issue 25507.
167 if f.Frontend().(*ssafn).stksize >= maxStackSize || f.OwnAux.ArgWidth() >= maxStackSize {
168 largeStackFramesMu.Lock()
169 largeStackFrames = append(largeStackFrames, largeStack{locals: f.Frontend().(*ssafn).stksize, args: f.OwnAux.ArgWidth(), pos: fn.Pos()})
170 largeStackFramesMu.Unlock()
173 pp := objw.NewProgs(fn, worker)
176 // Check frame size again.
177 // The check above included only the space needed for local variables.
178 // After genssa, the space needed includes local variables and the callee arg region.
179 // We must do this check prior to calling pp.Flush.
180 // If there are any oversized stack frames,
181 // the assembler may emit inscrutable complaints about invalid instructions.
182 if pp.Text.To.Offset >= maxStackSize {
183 largeStackFramesMu.Lock()
184 locals := f.Frontend().(*ssafn).stksize
185 largeStackFrames = append(largeStackFrames, largeStack{locals: locals, args: f.OwnAux.ArgWidth(), callee: pp.Text.To.Offset - locals, pos: fn.Pos()})
186 largeStackFramesMu.Unlock()
190 pp.Flush() // assemble, fill in boilerplate, etc.
191 // fieldtrack must be called after pp.Flush. See issue 20014.
192 fieldtrack(pp.Text.From.Sym, fn.FieldTrack)
197 rand.Seed(time.Now().UnixNano())
201 // StackOffset returns the stack location of a LocalSlot relative to the
202 // stack pointer, suitable for use in a DWARF location entry. This has nothing
203 // to do with its offset in the user variable.
204 func StackOffset(slot ssa.LocalSlot) int32 {
208 case ir.PPARAM, ir.PPARAMOUT:
209 if !n.IsOutputParamInRegisters() {
210 off = n.FrameOffset() + base.Ctxt.FixedFrameSize()
213 fallthrough // PPARAMOUT in registers allocates like an AUTO
215 off = n.FrameOffset()
216 if base.Ctxt.FixedFrameSize() == 0 {
217 off -= int64(types.PtrSize)
219 if buildcfg.FramePointerEnabled {
220 off -= int64(types.PtrSize)
223 return int32(off + slot.Off)
226 // fieldtrack adds R_USEFIELD relocations to fnsym to record any
227 // struct fields that it used.
228 func fieldtrack(fnsym *obj.LSym, tracked map[*obj.LSym]struct{}) {
232 if !buildcfg.Experiment.FieldTrack || len(tracked) == 0 {
236 trackSyms := make([]*obj.LSym, 0, len(tracked))
237 for sym := range tracked {
238 trackSyms = append(trackSyms, sym)
240 sort.Slice(trackSyms, func(i, j int) bool { return trackSyms[i].Name < trackSyms[j].Name })
241 for _, sym := range trackSyms {
242 r := obj.Addrel(fnsym)
244 r.Type = objabi.R_USEFIELD
248 // largeStack is info about a function whose stack frame is too large (rare).
249 type largeStack struct {
257 largeStackFramesMu sync.Mutex // protects largeStackFrames
258 largeStackFrames []largeStack
261 func CheckLargeStacks() {
262 // Check whether any of the functions we have compiled have gigantic stack frames.
263 sort.Slice(largeStackFrames, func(i, j int) bool {
264 return largeStackFrames[i].pos.Before(largeStackFrames[j].pos)
266 for _, large := range largeStackFrames {
267 if large.callee != 0 {
268 base.ErrorfAt(large.pos, "stack frame too large (>1GB): %d MB locals + %d MB args + %d MB callee", large.locals>>20, large.args>>20, large.callee>>20)
270 base.ErrorfAt(large.pos, "stack frame too large (>1GB): %d MB locals + %d MB args", large.locals>>20, large.args>>20)