Source file src/runtime/traceruntime.go

     1  // Copyright 2023 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Runtime -> tracer API.
     6  
     7  package runtime
     8  
     9  import (
    10  	"internal/runtime/atomic"
    11  	_ "unsafe" // for go:linkname
    12  )
    13  
    14  // gTraceState is per-G state for the tracer.
    15  type gTraceState struct {
    16  	traceSchedResourceState
    17  }
    18  
    19  // reset resets the gTraceState for a new goroutine.
    20  func (s *gTraceState) reset() {
    21  	s.seq = [2]uint64{}
    22  	// N.B. s.statusTraced is managed and cleared separately.
    23  }
    24  
    25  // mTraceState is per-M state for the tracer.
    26  type mTraceState struct {
    27  	seqlock atomic.Uintptr // seqlock indicating that this M is writing to a trace buffer.
    28  	buf     [2]*traceBuf   // Per-M traceBuf for writing. Indexed by trace.gen%2.
    29  	link    *m             // Snapshot of alllink or freelink.
    30  }
    31  
    32  // pTraceState is per-P state for the tracer.
    33  type pTraceState struct {
    34  	traceSchedResourceState
    35  
    36  	// mSyscallID is the ID of the M this was bound to before entering a syscall.
    37  	mSyscallID int64
    38  
    39  	// maySweep indicates the sweep events should be traced.
    40  	// This is used to defer the sweep start event until a span
    41  	// has actually been swept.
    42  	maySweep bool
    43  
    44  	// inSweep indicates that at least one sweep event has been traced.
    45  	inSweep bool
    46  
    47  	// swept and reclaimed track the number of bytes swept and reclaimed
    48  	// by sweeping in the current sweep loop (while maySweep was true).
    49  	swept, reclaimed uintptr
    50  }
    51  
    52  // traceLockInit initializes global trace locks.
    53  func traceLockInit() {
    54  	// Sharing a lock rank here is fine because they should never be accessed
    55  	// together. If they are, we want to find out immediately.
    56  	lockInit(&trace.stringTab[0].lock, lockRankTraceStrings)
    57  	lockInit(&trace.stringTab[0].tab.mem.lock, lockRankTraceStrings)
    58  	lockInit(&trace.stringTab[1].lock, lockRankTraceStrings)
    59  	lockInit(&trace.stringTab[1].tab.mem.lock, lockRankTraceStrings)
    60  	lockInit(&trace.stackTab[0].tab.mem.lock, lockRankTraceStackTab)
    61  	lockInit(&trace.stackTab[1].tab.mem.lock, lockRankTraceStackTab)
    62  	lockInit(&trace.typeTab[0].tab.mem.lock, lockRankTraceTypeTab)
    63  	lockInit(&trace.typeTab[1].tab.mem.lock, lockRankTraceTypeTab)
    64  	lockInit(&trace.lock, lockRankTrace)
    65  }
    66  
    67  // lockRankMayTraceFlush records the lock ranking effects of a
    68  // potential call to traceFlush.
    69  //
    70  // nosplit because traceAcquire is nosplit.
    71  //
    72  //go:nosplit
    73  func lockRankMayTraceFlush() {
    74  	lockWithRankMayAcquire(&trace.lock, getLockRank(&trace.lock))
    75  }
    76  
    77  // traceBlockReason is an enumeration of reasons a goroutine might block.
    78  // This is the interface the rest of the runtime uses to tell the
    79  // tracer why a goroutine blocked. The tracer then propagates this information
    80  // into the trace however it sees fit.
    81  //
    82  // Note that traceBlockReasons should not be compared, since reasons that are
    83  // distinct by name may *not* be distinct by value.
    84  type traceBlockReason uint8
    85  
    86  const (
    87  	traceBlockGeneric traceBlockReason = iota
    88  	traceBlockForever
    89  	traceBlockNet
    90  	traceBlockSelect
    91  	traceBlockCondWait
    92  	traceBlockSync
    93  	traceBlockChanSend
    94  	traceBlockChanRecv
    95  	traceBlockGCMarkAssist
    96  	traceBlockGCSweep
    97  	traceBlockSystemGoroutine
    98  	traceBlockPreempted
    99  	traceBlockDebugCall
   100  	traceBlockUntilGCEnds
   101  	traceBlockSleep
   102  )
   103  
   104  var traceBlockReasonStrings = [...]string{
   105  	traceBlockGeneric:         "unspecified",
   106  	traceBlockForever:         "forever",
   107  	traceBlockNet:             "network",
   108  	traceBlockSelect:          "select",
   109  	traceBlockCondWait:        "sync.(*Cond).Wait",
   110  	traceBlockSync:            "sync",
   111  	traceBlockChanSend:        "chan send",
   112  	traceBlockChanRecv:        "chan receive",
   113  	traceBlockGCMarkAssist:    "GC mark assist wait for work",
   114  	traceBlockGCSweep:         "GC background sweeper wait",
   115  	traceBlockSystemGoroutine: "system goroutine wait",
   116  	traceBlockPreempted:       "preempted",
   117  	traceBlockDebugCall:       "wait for debug call",
   118  	traceBlockUntilGCEnds:     "wait until GC ends",
   119  	traceBlockSleep:           "sleep",
   120  }
   121  
   122  // traceGoStopReason is an enumeration of reasons a goroutine might yield.
   123  //
   124  // Note that traceGoStopReasons should not be compared, since reasons that are
   125  // distinct by name may *not* be distinct by value.
   126  type traceGoStopReason uint8
   127  
   128  const (
   129  	traceGoStopGeneric traceGoStopReason = iota
   130  	traceGoStopGoSched
   131  	traceGoStopPreempted
   132  )
   133  
   134  var traceGoStopReasonStrings = [...]string{
   135  	traceGoStopGeneric:   "unspecified",
   136  	traceGoStopGoSched:   "runtime.Gosched",
   137  	traceGoStopPreempted: "preempted",
   138  }
   139  
   140  // traceEnabled returns true if the trace is currently enabled.
   141  //
   142  //go:nosplit
   143  func traceEnabled() bool {
   144  	return trace.enabled
   145  }
   146  
   147  // traceAllocFreeEnabled returns true if the trace is currently enabled
   148  // and alloc/free events are also enabled.
   149  //
   150  //go:nosplit
   151  func traceAllocFreeEnabled() bool {
   152  	return trace.enabledWithAllocFree
   153  }
   154  
   155  // traceShuttingDown returns true if the trace is currently shutting down.
   156  func traceShuttingDown() bool {
   157  	return trace.shutdown.Load()
   158  }
   159  
   160  // traceLocker represents an M writing trace events. While a traceLocker value
   161  // is valid, the tracer observes all operations on the G/M/P or trace events being
   162  // written as happening atomically.
   163  type traceLocker struct {
   164  	mp  *m
   165  	gen uintptr
   166  }
   167  
   168  // debugTraceReentrancy checks if the trace is reentrant.
   169  //
   170  // This is optional because throwing in a function makes it instantly
   171  // not inlineable, and we want traceAcquire to be inlineable for
   172  // low overhead when the trace is disabled.
   173  const debugTraceReentrancy = false
   174  
   175  // traceAcquire prepares this M for writing one or more trace events.
   176  //
   177  // nosplit because it's called on the syscall path when stack movement is forbidden.
   178  //
   179  //go:nosplit
   180  func traceAcquire() traceLocker {
   181  	if !traceEnabled() {
   182  		return traceLocker{}
   183  	}
   184  	return traceAcquireEnabled()
   185  }
   186  
   187  // traceTryAcquire is like traceAcquire, but may return an invalid traceLocker even
   188  // if tracing is enabled. For example, it will return !ok if traceAcquire is being
   189  // called with an active traceAcquire on the M (reentrant locking). This exists for
   190  // optimistically emitting events in the few contexts where tracing is now allowed.
   191  //
   192  // nosplit for alignment with traceTryAcquire, so it can be used in the
   193  // same contexts.
   194  //
   195  //go:nosplit
   196  func traceTryAcquire() traceLocker {
   197  	if !traceEnabled() {
   198  		return traceLocker{}
   199  	}
   200  	return traceTryAcquireEnabled()
   201  }
   202  
   203  // traceAcquireEnabled is the traceEnabled path for traceAcquire. It's explicitly
   204  // broken out to make traceAcquire inlineable to keep the overhead of the tracer
   205  // when it's disabled low.
   206  //
   207  // nosplit because it's called by traceAcquire, which is nosplit.
   208  //
   209  //go:nosplit
   210  func traceAcquireEnabled() traceLocker {
   211  	// Any time we acquire a traceLocker, we may flush a trace buffer. But
   212  	// buffer flushes are rare. Record the lock edge even if it doesn't happen
   213  	// this time.
   214  	lockRankMayTraceFlush()
   215  
   216  	// Prevent preemption.
   217  	mp := acquirem()
   218  
   219  	// Acquire the trace seqlock. This prevents traceAdvance from moving forward
   220  	// until all Ms are observed to be outside of their seqlock critical section.
   221  	//
   222  	// Note: The seqlock is mutated here and also in traceCPUSample. If you update
   223  	// usage of the seqlock here, make sure to also look at what traceCPUSample is
   224  	// doing.
   225  	seq := mp.trace.seqlock.Add(1)
   226  	if debugTraceReentrancy && seq%2 != 1 {
   227  		throw("bad use of trace.seqlock or tracer is reentrant")
   228  	}
   229  
   230  	// N.B. This load of gen appears redundant with the one in traceEnabled.
   231  	// However, it's very important that the gen we use for writing to the trace
   232  	// is acquired under a traceLocker so traceAdvance can make sure no stale
   233  	// gen values are being used.
   234  	//
   235  	// Because we're doing this load again, it also means that the trace
   236  	// might end up being disabled when we load it. In that case we need to undo
   237  	// what we did and bail.
   238  	gen := trace.gen.Load()
   239  	if gen == 0 {
   240  		mp.trace.seqlock.Add(1)
   241  		releasem(mp)
   242  		return traceLocker{}
   243  	}
   244  	return traceLocker{mp, gen}
   245  }
   246  
   247  // traceTryAcquireEnabled is like traceAcquireEnabled but may return an invalid
   248  // traceLocker under some conditions. See traceTryAcquire for more details.
   249  //
   250  // nosplit for alignment with traceAcquireEnabled, so it can be used in the
   251  // same contexts.
   252  //
   253  //go:nosplit
   254  func traceTryAcquireEnabled() traceLocker {
   255  	// Any time we acquire a traceLocker, we may flush a trace buffer. But
   256  	// buffer flushes are rare. Record the lock edge even if it doesn't happen
   257  	// this time.
   258  	lockRankMayTraceFlush()
   259  
   260  	// Check if we're already locked. If so, return an invalid traceLocker.
   261  	if getg().m.trace.seqlock.Load()%2 == 1 {
   262  		return traceLocker{}
   263  	}
   264  	return traceAcquireEnabled()
   265  }
   266  
   267  // ok returns true if the traceLocker is valid (i.e. tracing is enabled).
   268  //
   269  // nosplit because it's called on the syscall path when stack movement is forbidden.
   270  //
   271  //go:nosplit
   272  func (tl traceLocker) ok() bool {
   273  	return tl.gen != 0
   274  }
   275  
   276  // traceRelease indicates that this M is done writing trace events.
   277  //
   278  // nosplit because it's called on the syscall path when stack movement is forbidden.
   279  //
   280  //go:nosplit
   281  func traceRelease(tl traceLocker) {
   282  	seq := tl.mp.trace.seqlock.Add(1)
   283  	if debugTraceReentrancy && seq%2 != 0 {
   284  		print("runtime: seq=", seq, "\n")
   285  		throw("bad use of trace.seqlock")
   286  	}
   287  	releasem(tl.mp)
   288  }
   289  
   290  // traceExitingSyscall marks a goroutine as exiting the syscall slow path.
   291  //
   292  // Must be paired with a traceExitedSyscall call.
   293  func traceExitingSyscall() {
   294  	trace.exitingSyscall.Add(1)
   295  }
   296  
   297  // traceExitedSyscall marks a goroutine as having exited the syscall slow path.
   298  func traceExitedSyscall() {
   299  	trace.exitingSyscall.Add(-1)
   300  }
   301  
   302  // Gomaxprocs emits a ProcsChange event.
   303  func (tl traceLocker) Gomaxprocs(procs int32) {
   304  	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvProcsChange, traceArg(procs), tl.stack(1))
   305  }
   306  
   307  // ProcStart traces a ProcStart event.
   308  //
   309  // Must be called with a valid P.
   310  func (tl traceLocker) ProcStart() {
   311  	pp := tl.mp.p.ptr()
   312  	// Procs are typically started within the scheduler when there is no user goroutine. If there is a user goroutine,
   313  	// it must be in _Gsyscall because the only time a goroutine is allowed to have its Proc moved around from under it
   314  	// is during a syscall.
   315  	tl.eventWriter(traceGoSyscall, traceProcIdle).commit(traceEvProcStart, traceArg(pp.id), pp.trace.nextSeq(tl.gen))
   316  }
   317  
   318  // ProcStop traces a ProcStop event.
   319  func (tl traceLocker) ProcStop(pp *p) {
   320  	// The only time a goroutine is allowed to have its Proc moved around
   321  	// from under it is during a syscall.
   322  	tl.eventWriter(traceGoSyscall, traceProcRunning).commit(traceEvProcStop)
   323  }
   324  
   325  // GCActive traces a GCActive event.
   326  //
   327  // Must be emitted by an actively running goroutine on an active P. This restriction can be changed
   328  // easily and only depends on where it's currently called.
   329  func (tl traceLocker) GCActive() {
   330  	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGCActive, traceArg(trace.seqGC))
   331  	// N.B. Only one GC can be running at a time, so this is naturally
   332  	// serialized by the caller.
   333  	trace.seqGC++
   334  }
   335  
   336  // GCStart traces a GCBegin event.
   337  //
   338  // Must be emitted by an actively running goroutine on an active P. This restriction can be changed
   339  // easily and only depends on where it's currently called.
   340  func (tl traceLocker) GCStart() {
   341  	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGCBegin, traceArg(trace.seqGC), tl.stack(3))
   342  	// N.B. Only one GC can be running at a time, so this is naturally
   343  	// serialized by the caller.
   344  	trace.seqGC++
   345  }
   346  
   347  // GCDone traces a GCEnd event.
   348  //
   349  // Must be emitted by an actively running goroutine on an active P. This restriction can be changed
   350  // easily and only depends on where it's currently called.
   351  func (tl traceLocker) GCDone() {
   352  	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGCEnd, traceArg(trace.seqGC))
   353  	// N.B. Only one GC can be running at a time, so this is naturally
   354  	// serialized by the caller.
   355  	trace.seqGC++
   356  }
   357  
   358  // STWStart traces a STWBegin event.
   359  func (tl traceLocker) STWStart(reason stwReason) {
   360  	// Although the current P may be in _Pgcstop here, we model the P as running during the STW. This deviates from the
   361  	// runtime's state tracking, but it's more accurate and doesn't result in any loss of information.
   362  	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvSTWBegin, tl.string(reason.String()), tl.stack(2))
   363  }
   364  
   365  // STWDone traces a STWEnd event.
   366  func (tl traceLocker) STWDone() {
   367  	// Although the current P may be in _Pgcstop here, we model the P as running during the STW. This deviates from the
   368  	// runtime's state tracking, but it's more accurate and doesn't result in any loss of information.
   369  	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvSTWEnd)
   370  }
   371  
   372  // GCSweepStart prepares to trace a sweep loop. This does not
   373  // emit any events until traceGCSweepSpan is called.
   374  //
   375  // GCSweepStart must be paired with traceGCSweepDone and there
   376  // must be no preemption points between these two calls.
   377  //
   378  // Must be called with a valid P.
   379  func (tl traceLocker) GCSweepStart() {
   380  	// Delay the actual GCSweepBegin event until the first span
   381  	// sweep. If we don't sweep anything, don't emit any events.
   382  	pp := tl.mp.p.ptr()
   383  	if pp.trace.maySweep {
   384  		throw("double traceGCSweepStart")
   385  	}
   386  	pp.trace.maySweep, pp.trace.swept, pp.trace.reclaimed = true, 0, 0
   387  }
   388  
   389  // GCSweepSpan traces the sweep of a single span. If this is
   390  // the first span swept since traceGCSweepStart was called, this
   391  // will emit a GCSweepBegin event.
   392  //
   393  // This may be called outside a traceGCSweepStart/traceGCSweepDone
   394  // pair; however, it will not emit any trace events in this case.
   395  //
   396  // Must be called with a valid P.
   397  func (tl traceLocker) GCSweepSpan(bytesSwept uintptr) {
   398  	pp := tl.mp.p.ptr()
   399  	if pp.trace.maySweep {
   400  		if pp.trace.swept == 0 {
   401  			tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGCSweepBegin, tl.stack(1))
   402  			pp.trace.inSweep = true
   403  		}
   404  		pp.trace.swept += bytesSwept
   405  	}
   406  }
   407  
   408  // GCSweepDone finishes tracing a sweep loop. If any memory was
   409  // swept (i.e. traceGCSweepSpan emitted an event) then this will emit
   410  // a GCSweepEnd event.
   411  //
   412  // Must be called with a valid P.
   413  func (tl traceLocker) GCSweepDone() {
   414  	pp := tl.mp.p.ptr()
   415  	if !pp.trace.maySweep {
   416  		throw("missing traceGCSweepStart")
   417  	}
   418  	if pp.trace.inSweep {
   419  		tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGCSweepEnd, traceArg(pp.trace.swept), traceArg(pp.trace.reclaimed))
   420  		pp.trace.inSweep = false
   421  	}
   422  	pp.trace.maySweep = false
   423  }
   424  
   425  // GCMarkAssistStart emits a MarkAssistBegin event.
   426  func (tl traceLocker) GCMarkAssistStart() {
   427  	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGCMarkAssistBegin, tl.stack(1))
   428  }
   429  
   430  // GCMarkAssistDone emits a MarkAssistEnd event.
   431  func (tl traceLocker) GCMarkAssistDone() {
   432  	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGCMarkAssistEnd)
   433  }
   434  
   435  // GoCreate emits a GoCreate event.
   436  func (tl traceLocker) GoCreate(newg *g, pc uintptr, blocked bool) {
   437  	newg.trace.setStatusTraced(tl.gen)
   438  	ev := traceEvGoCreate
   439  	if blocked {
   440  		ev = traceEvGoCreateBlocked
   441  	}
   442  	tl.eventWriter(traceGoRunning, traceProcRunning).commit(ev, traceArg(newg.goid), tl.startPC(pc), tl.stack(2))
   443  }
   444  
   445  // GoStart emits a GoStart event.
   446  //
   447  // Must be called with a valid P.
   448  func (tl traceLocker) GoStart() {
   449  	gp := getg().m.curg
   450  	pp := gp.m.p
   451  	w := tl.eventWriter(traceGoRunnable, traceProcRunning)
   452  	w = w.write(traceEvGoStart, traceArg(gp.goid), gp.trace.nextSeq(tl.gen))
   453  	if pp.ptr().gcMarkWorkerMode != gcMarkWorkerNotWorker {
   454  		w = w.write(traceEvGoLabel, trace.markWorkerLabels[tl.gen%2][pp.ptr().gcMarkWorkerMode])
   455  	}
   456  	w.end()
   457  }
   458  
   459  // GoEnd emits a GoDestroy event.
   460  //
   461  // TODO(mknyszek): Rename this to GoDestroy.
   462  func (tl traceLocker) GoEnd() {
   463  	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGoDestroy)
   464  }
   465  
   466  // GoSched emits a GoStop event with a GoSched reason.
   467  func (tl traceLocker) GoSched() {
   468  	tl.GoStop(traceGoStopGoSched)
   469  }
   470  
   471  // GoPreempt emits a GoStop event with a GoPreempted reason.
   472  func (tl traceLocker) GoPreempt() {
   473  	tl.GoStop(traceGoStopPreempted)
   474  }
   475  
   476  // GoStop emits a GoStop event with the provided reason.
   477  func (tl traceLocker) GoStop(reason traceGoStopReason) {
   478  	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGoStop, traceArg(trace.goStopReasons[tl.gen%2][reason]), tl.stack(1))
   479  }
   480  
   481  // GoPark emits a GoBlock event with the provided reason.
   482  //
   483  // TODO(mknyszek): Replace traceBlockReason with waitReason. It's silly
   484  // that we have both, and waitReason is way more descriptive.
   485  func (tl traceLocker) GoPark(reason traceBlockReason, skip int) {
   486  	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGoBlock, traceArg(trace.goBlockReasons[tl.gen%2][reason]), tl.stack(skip))
   487  }
   488  
   489  // GoUnpark emits a GoUnblock event.
   490  func (tl traceLocker) GoUnpark(gp *g, skip int) {
   491  	// Emit a GoWaiting status if necessary for the unblocked goroutine.
   492  	w := tl.eventWriter(traceGoRunning, traceProcRunning)
   493  	// Careful: don't use the event writer. We never want status or in-progress events
   494  	// to trigger more in-progress events.
   495  	w.w = emitUnblockStatus(w.w, gp, tl.gen)
   496  	w.commit(traceEvGoUnblock, traceArg(gp.goid), gp.trace.nextSeq(tl.gen), tl.stack(skip))
   497  }
   498  
   499  // GoCoroswitch emits a GoSwitch event. If destroy is true, the calling goroutine
   500  // is simultaneously being destroyed.
   501  func (tl traceLocker) GoSwitch(nextg *g, destroy bool) {
   502  	// Emit a GoWaiting status if necessary for the unblocked goroutine.
   503  	w := tl.eventWriter(traceGoRunning, traceProcRunning)
   504  	// Careful: don't use the event writer. We never want status or in-progress events
   505  	// to trigger more in-progress events.
   506  	w.w = emitUnblockStatus(w.w, nextg, tl.gen)
   507  	ev := traceEvGoSwitch
   508  	if destroy {
   509  		ev = traceEvGoSwitchDestroy
   510  	}
   511  	w.commit(ev, traceArg(nextg.goid), nextg.trace.nextSeq(tl.gen))
   512  }
   513  
   514  // emitUnblockStatus emits a GoStatus GoWaiting event for a goroutine about to be
   515  // unblocked to the trace writer.
   516  func emitUnblockStatus(w traceWriter, gp *g, gen uintptr) traceWriter {
   517  	if !gp.trace.statusWasTraced(gen) && gp.trace.acquireStatus(gen) {
   518  		// TODO(go.dev/issue/65634): Although it would be nice to add a stack trace here of gp,
   519  		// we cannot safely do so. gp is in _Gwaiting and so we don't have ownership of its stack.
   520  		// We can fix this by acquiring the goroutine's scan bit.
   521  		w = w.writeGoStatus(gp.goid, -1, traceGoWaiting, gp.inMarkAssist, 0)
   522  	}
   523  	return w
   524  }
   525  
   526  // GoSysCall emits a GoSyscallBegin event.
   527  //
   528  // Must be called with a valid P.
   529  func (tl traceLocker) GoSysCall() {
   530  	// Scribble down the M that the P is currently attached to.
   531  	pp := tl.mp.p.ptr()
   532  	pp.trace.mSyscallID = int64(tl.mp.procid)
   533  	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGoSyscallBegin, pp.trace.nextSeq(tl.gen), tl.stack(1))
   534  }
   535  
   536  // GoSysExit emits a GoSyscallEnd event, possibly along with a GoSyscallBlocked event
   537  // if lostP is true.
   538  //
   539  // lostP must be true in all cases that a goroutine loses its P during a syscall.
   540  // This means it's not sufficient to check if it has no P. In particular, it needs to be
   541  // true in the following cases:
   542  // - The goroutine lost its P, it ran some other code, and then got it back. It's now running with that P.
   543  // - The goroutine lost its P and was unable to reacquire it, and is now running without a P.
   544  // - The goroutine lost its P and acquired a different one, and is now running with that P.
   545  func (tl traceLocker) GoSysExit(lostP bool) {
   546  	ev := traceEvGoSyscallEnd
   547  	procStatus := traceProcSyscall // Procs implicitly enter traceProcSyscall on GoSyscallBegin.
   548  	if lostP {
   549  		ev = traceEvGoSyscallEndBlocked
   550  		procStatus = traceProcRunning // If a G has a P when emitting this event, it reacquired a P and is indeed running.
   551  	} else {
   552  		tl.mp.p.ptr().trace.mSyscallID = -1
   553  	}
   554  	tl.eventWriter(traceGoSyscall, procStatus).commit(ev)
   555  }
   556  
   557  // ProcSteal indicates that our current M stole a P from another M.
   558  //
   559  // inSyscall indicates that we're stealing the P from a syscall context.
   560  //
   561  // The caller must have ownership of pp.
   562  func (tl traceLocker) ProcSteal(pp *p, inSyscall bool) {
   563  	// Grab the M ID we stole from.
   564  	mStolenFrom := pp.trace.mSyscallID
   565  	pp.trace.mSyscallID = -1
   566  
   567  	// The status of the proc and goroutine, if we need to emit one here, is not evident from the
   568  	// context of just emitting this event alone. There are two cases. Either we're trying to steal
   569  	// the P just to get its attention (e.g. STW or sysmon retake) or we're trying to steal a P for
   570  	// ourselves specifically to keep running. The two contexts look different, but can be summarized
   571  	// fairly succinctly. In the former, we're a regular running goroutine and proc, if we have either.
   572  	// In the latter, we're a goroutine in a syscall.
   573  	goStatus := traceGoRunning
   574  	procStatus := traceProcRunning
   575  	if inSyscall {
   576  		goStatus = traceGoSyscall
   577  		procStatus = traceProcSyscallAbandoned
   578  	}
   579  	w := tl.eventWriter(goStatus, procStatus)
   580  
   581  	// Emit the status of the P we're stealing. We may have *just* done this when creating the event
   582  	// writer but it's not guaranteed, even if inSyscall is true. Although it might seem like from a
   583  	// syscall context we're always stealing a P for ourselves, we may have not wired it up yet (so
   584  	// it wouldn't be visible to eventWriter) or we may not even intend to wire it up to ourselves
   585  	// at all (e.g. entersyscall_gcwait).
   586  	if !pp.trace.statusWasTraced(tl.gen) && pp.trace.acquireStatus(tl.gen) {
   587  		// Careful: don't use the event writer. We never want status or in-progress events
   588  		// to trigger more in-progress events.
   589  		w.w = w.w.writeProcStatus(uint64(pp.id), traceProcSyscallAbandoned, pp.trace.inSweep)
   590  	}
   591  	w.commit(traceEvProcSteal, traceArg(pp.id), pp.trace.nextSeq(tl.gen), traceArg(mStolenFrom))
   592  }
   593  
   594  // HeapAlloc emits a HeapAlloc event.
   595  func (tl traceLocker) HeapAlloc(live uint64) {
   596  	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvHeapAlloc, traceArg(live))
   597  }
   598  
   599  // HeapGoal reads the current heap goal and emits a HeapGoal event.
   600  func (tl traceLocker) HeapGoal() {
   601  	heapGoal := gcController.heapGoal()
   602  	if heapGoal == ^uint64(0) {
   603  		// Heap-based triggering is disabled.
   604  		heapGoal = 0
   605  	}
   606  	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvHeapGoal, traceArg(heapGoal))
   607  }
   608  
   609  // GoCreateSyscall indicates that a goroutine has transitioned from dead to GoSyscall.
   610  //
   611  // Unlike GoCreate, the caller must be running on gp.
   612  //
   613  // This occurs when C code calls into Go. On pthread platforms it occurs only when
   614  // a C thread calls into Go code for the first time.
   615  func (tl traceLocker) GoCreateSyscall(gp *g) {
   616  	// N.B. We should never trace a status for this goroutine (which we're currently running on),
   617  	// since we want this to appear like goroutine creation.
   618  	gp.trace.setStatusTraced(tl.gen)
   619  	tl.eventWriter(traceGoBad, traceProcBad).commit(traceEvGoCreateSyscall, traceArg(gp.goid))
   620  }
   621  
   622  // GoDestroySyscall indicates that a goroutine has transitioned from GoSyscall to dead.
   623  //
   624  // Must not have a P.
   625  //
   626  // This occurs when Go code returns back to C. On pthread platforms it occurs only when
   627  // the C thread is destroyed.
   628  func (tl traceLocker) GoDestroySyscall() {
   629  	// N.B. If we trace a status here, we must never have a P, and we must be on a goroutine
   630  	// that is in the syscall state.
   631  	tl.eventWriter(traceGoSyscall, traceProcBad).commit(traceEvGoDestroySyscall)
   632  }
   633  
   634  // To access runtime functions from runtime/trace.
   635  // See runtime/trace/annotation.go
   636  
   637  // trace_userTaskCreate emits a UserTaskCreate event.
   638  //
   639  //go:linkname trace_userTaskCreate runtime/trace.userTaskCreate
   640  func trace_userTaskCreate(id, parentID uint64, taskType string) {
   641  	tl := traceAcquire()
   642  	if !tl.ok() {
   643  		// Need to do this check because the caller won't have it.
   644  		return
   645  	}
   646  	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvUserTaskBegin, traceArg(id), traceArg(parentID), tl.string(taskType), tl.stack(3))
   647  	traceRelease(tl)
   648  }
   649  
   650  // trace_userTaskEnd emits a UserTaskEnd event.
   651  //
   652  //go:linkname trace_userTaskEnd runtime/trace.userTaskEnd
   653  func trace_userTaskEnd(id uint64) {
   654  	tl := traceAcquire()
   655  	if !tl.ok() {
   656  		// Need to do this check because the caller won't have it.
   657  		return
   658  	}
   659  	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvUserTaskEnd, traceArg(id), tl.stack(2))
   660  	traceRelease(tl)
   661  }
   662  
   663  // trace_userTaskEnd emits a UserRegionBegin or UserRegionEnd event,
   664  // depending on mode (0 == Begin, 1 == End).
   665  //
   666  // TODO(mknyszek): Just make this two functions.
   667  //
   668  //go:linkname trace_userRegion runtime/trace.userRegion
   669  func trace_userRegion(id, mode uint64, name string) {
   670  	tl := traceAcquire()
   671  	if !tl.ok() {
   672  		// Need to do this check because the caller won't have it.
   673  		return
   674  	}
   675  	var ev traceEv
   676  	switch mode {
   677  	case 0:
   678  		ev = traceEvUserRegionBegin
   679  	case 1:
   680  		ev = traceEvUserRegionEnd
   681  	default:
   682  		return
   683  	}
   684  	tl.eventWriter(traceGoRunning, traceProcRunning).commit(ev, traceArg(id), tl.string(name), tl.stack(3))
   685  	traceRelease(tl)
   686  }
   687  
   688  // trace_userTaskEnd emits a UserRegionBegin or UserRegionEnd event.
   689  //
   690  //go:linkname trace_userLog runtime/trace.userLog
   691  func trace_userLog(id uint64, category, message string) {
   692  	tl := traceAcquire()
   693  	if !tl.ok() {
   694  		// Need to do this check because the caller won't have it.
   695  		return
   696  	}
   697  	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvUserLog, traceArg(id), tl.string(category), tl.uniqueString(message), tl.stack(3))
   698  	traceRelease(tl)
   699  }
   700  
   701  // traceThreadDestroy is called when a thread is removed from
   702  // sched.freem.
   703  //
   704  // mp must not be able to emit trace events anymore.
   705  //
   706  // sched.lock must be held to synchronize with traceAdvance.
   707  func traceThreadDestroy(mp *m) {
   708  	assertLockHeld(&sched.lock)
   709  
   710  	// Flush all outstanding buffers to maintain the invariant
   711  	// that an M only has active buffers while on sched.freem
   712  	// or allm.
   713  	//
   714  	// Perform a traceAcquire/traceRelease on behalf of mp to
   715  	// synchronize with the tracer trying to flush our buffer
   716  	// as well.
   717  	seq := mp.trace.seqlock.Add(1)
   718  	if debugTraceReentrancy && seq%2 != 1 {
   719  		throw("bad use of trace.seqlock or tracer is reentrant")
   720  	}
   721  	systemstack(func() {
   722  		lock(&trace.lock)
   723  		for i := range mp.trace.buf {
   724  			if mp.trace.buf[i] != nil {
   725  				// N.B. traceBufFlush accepts a generation, but it
   726  				// really just cares about gen%2.
   727  				traceBufFlush(mp.trace.buf[i], uintptr(i))
   728  				mp.trace.buf[i] = nil
   729  			}
   730  		}
   731  		unlock(&trace.lock)
   732  	})
   733  	seq1 := mp.trace.seqlock.Add(1)
   734  	if seq1 != seq+1 {
   735  		print("runtime: seq1=", seq1, "\n")
   736  		throw("bad use of trace.seqlock")
   737  	}
   738  }
   739  

View as plain text