1 // Copyright 2022 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
7 // mklockrank records the static rank graph of the locks in the
8 // runtime and generates the rank checking structures in lockrank.go.
23 // ranks describes the lock rank graph. See "go doc internal/dag" for
26 // "a < b" means a must be acquired before b if both are held
27 // (or, if b is held, a cannot be acquired).
29 // "NONE < a" means no locks may be held when a is acquired.
31 // If a lock is not given a rank, then it is assumed to be a leaf
32 // lock, which means no other lock can be acquired while it is held.
33 // Therefore, leaf locks do not need to be given an explicit rank.
35 // Ranks in all caps are pseudo-nodes that help define order, but do
36 // not actually define a rank.
38 // TODO: It's often hard to correlate rank names to locks. Change
39 // these to be more consistent with the locks they label.
55 # Scheduler, timers, netpoll
56 NONE < pollDesc, cpuprof;
60 pollDesc, # pollDesc can interact with timers, which can lock sched.
70 scavenge, sweep < hchan;
72 hchan, notifyList < sudog;
76 rwmutexW, sysmon < rwmutexR;
87 NONE < userArenaState;
89 # Tracing without a P uses a global trace buffer.
91 # Above TRACEGLOBAL can emit a trace event without a P.
93 # Below TRACEGLOBAL manages the global tracing buffer.
94 # Note that traceBuf eventually chains to MALLOC, but we never get that far
95 # in the situation where there's no P.
97 # Starting/stopping tracing traces strings.
98 traceBuf < traceStrings;
108 # Above MALLOC are things that can allocate memory.
110 # Below MALLOC is the malloc implementation.
118 MPROF < profInsert, profBlock, profMemActive;
119 profMemActive < profMemFuture;
121 # Stack allocation and copying
130 # Anything that can grow the stack can acquire STACKGROW.
131 # (Most higher layers imply STACKGROW, like MALLOC.)
133 # Below STACKGROW is the stack allocator/copying implementation.
135 gscan, rwmutexR < stackpool;
137 # Generally, hchan must be acquired before gscan. But in one case,
138 # where we suspend a G and then shrink its stack, syncadjustsudogs
139 # can acquire hchan locks while holding gscan. To allow this case,
140 # we use hchanLeaf instead of hchan.
148 # Anything that can have write barriers can acquire WB.
149 # Above WB, we can have write barriers.
151 # Below WB is the write barrier implementation.
158 # Above mheap is anything that can call the span allocator.
160 # Below mheap is the span allocator implementation.
162 # Specials: we're allowed to allocate a special while holding
163 # an mspanSpecial lock, and they're part of the malloc implementation.
164 # Pinner bits might be freed by the span allocator.
165 mheap, mspanSpecial < mheapSpecial;
166 mheap, mheapSpecial < globalAlloc;
168 # Execution tracer events (with a P)
176 # Above TRACE is anything that can create a trace event
181 # panic is handled specially. It is implicitly below all other locks.
183 # deadlock is not acquired while holding panic, but it also needs to be
184 # below all other locks.
186 # raceFini is only held while exiting.
190 // cyclicRanks lists lock ranks that allow multiple locks of the same
191 // rank to be acquired simultaneously. The runtime enforces ordering
192 // within these ranks using a separate mechanism.
193 var cyclicRanks = map[string]bool{
194 // Multiple timers are locked simultaneously in destroy().
196 // Multiple hchans are acquired in hchan.sortkey() order in
199 // Multiple hchanLeafs are acquired in hchan.sortkey() order in
200 // syncadjustsudogs().
202 // The point of the deadlock lock is to deadlock.
207 flagO := flag.String("o", "", "write to `file` instead of stdout")
208 flagDot := flag.Bool("dot", false, "emit graphviz output instead of Go")
210 if flag.NArg() != 0 {
211 fmt.Fprintf(os.Stderr, "too many arguments")
215 g, err := dag.Parse(ranks)
223 g.TransitiveReduction()
224 // Add cyclic edges for visualization.
225 for k := range cyclicRanks {
228 // Reverse the graph. It's much easier to read this as
229 // a "<" partial order than a ">" partial order. This
230 // ways, locks are acquired from the top going down
231 // and time moves forward over the edges instead of
239 out, err = format.Source(b.Bytes())
246 err = os.WriteFile(*flagO, out, 0666)
248 _, err = os.Stdout.Write(out)
255 func generateGo(w io.Writer, g *dag.Graph) {
256 fmt.Fprintf(w, `// Code generated by mklockrank.go; DO NOT EDIT.
264 // Create numeric ranks.
266 for i, j := 0, len(topo)-1; i < j; i, j = i+1, j-1 {
267 topo[i], topo[j] = topo[j], topo[i]
270 // Constants representing the ranks of all non-leaf runtime locks, in rank order.
271 // Locks with lower rank must be taken before locks with higher rank,
272 // in addition to satisfying the partial order in lockPartialOrder.
273 // A few ranks allow self-cycles, which are specified in lockPartialOrder.
275 lockRankUnknown lockRank = iota
278 for _, rank := range topo {
280 fmt.Fprintf(w, "\t// %s\n", rank)
282 fmt.Fprintf(w, "\t%s\n", cname(rank))
287 // lockRankLeafRank is the rank of lock that does not have a declared rank,
288 // and hence is a leaf lock.
289 const lockRankLeafRank lockRank = 1000
292 // Create string table.
294 // lockNames gives the names associated with each of the above ranks.
295 var lockNames = []string{
297 for _, rank := range topo {
299 fmt.Fprintf(w, "\t%s: %q,\n", cname(rank), rank)
304 func (rank lockRank) String() string {
308 if rank == lockRankLeafRank {
311 if rank < 0 || int(rank) >= len(lockNames) {
314 return lockNames[rank]
318 // Create partial order structure.
320 // lockPartialOrder is the transitive closure of the lock rank graph.
321 // An entry for rank X lists all of the ranks that can already be held
322 // when rank X is acquired.
324 // Lock ranks that allow self-cycles list themselves.
325 var lockPartialOrder [][]lockRank = [][]lockRank{
327 for _, rank := range topo {
332 for _, before := range g.Edges(rank) {
333 if !isPseudo(before) {
334 list = append(list, cname(before))
337 if cyclicRanks[rank] {
338 list = append(list, cname(rank))
341 fmt.Fprintf(w, "\t%s: {%s},\n", cname(rank), strings.Join(list, ", "))
343 fmt.Fprintf(w, "}\n")
346 // cname returns the Go const name for the given lock rank label.
347 func cname(label string) string {
348 return "lockRank" + strings.ToUpper(label[:1]) + label[1:]
351 func isPseudo(label string) bool {
352 return strings.ToUpper(label) == label
355 // generateDot emits a Graphviz dot representation of g to w.
356 func generateDot(w io.Writer, g *dag.Graph) {
357 fmt.Fprintf(w, "digraph g {\n")
360 for _, node := range g.Nodes {
361 fmt.Fprintf(w, "%q;\n", node)
365 for _, node := range g.Nodes {
366 for _, to := range g.Edges(node) {
367 fmt.Fprintf(w, "%q -> %q;\n", node, to)
371 fmt.Fprintf(w, "}\n")