Source file src/runtime/traceallocfree.go

     1  // Copyright 2024 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Runtime -> tracer API for memory events.
     6  
     7  package runtime
     8  
     9  import (
    10  	"internal/abi"
    11  	"runtime/internal/sys"
    12  )
    13  
    14  // Batch type values for the alloc/free experiment.
    15  const (
    16  	traceAllocFreeTypesBatch = iota // Contains types. [{id, address, size, ptrspan, name length, name string} ...]
    17  	traceAllocFreeInfoBatch         // Contains info for interpreting events. [min heap addr, page size, min heap align, min stack align]
    18  )
    19  
    20  // traceSnapshotMemory takes a snapshot of all runtime memory that there are events for
    21  // (heap spans, heap objects, goroutine stacks, etc.) and writes out events for them.
    22  //
    23  // The world must be stopped and tracing must be enabled when this function is called.
    24  func traceSnapshotMemory(gen uintptr) {
    25  	assertWorldStopped()
    26  
    27  	// Write a batch containing information that'll be necessary to
    28  	// interpret the events.
    29  	var flushed bool
    30  	w := unsafeTraceExpWriter(gen, nil, traceExperimentAllocFree)
    31  	w, flushed = w.ensure(1 + 4*traceBytesPerNumber)
    32  	if flushed {
    33  		// Annotate the batch as containing additional info.
    34  		w.byte(byte(traceAllocFreeInfoBatch))
    35  	}
    36  
    37  	// Emit info.
    38  	w.varint(uint64(trace.minPageHeapAddr))
    39  	w.varint(uint64(pageSize))
    40  	w.varint(uint64(minHeapAlign))
    41  	w.varint(uint64(fixedStack))
    42  
    43  	// Finish writing the batch.
    44  	w.flush().end()
    45  
    46  	// Start tracing.
    47  	trace := traceAcquire()
    48  	if !trace.ok() {
    49  		throw("traceSnapshotMemory: tracing is not enabled")
    50  	}
    51  
    52  	// Write out all the heap spans and heap objects.
    53  	for _, s := range mheap_.allspans {
    54  		if s.state.get() == mSpanDead {
    55  			continue
    56  		}
    57  		// It's some kind of span, so trace that it exists.
    58  		trace.SpanExists(s)
    59  
    60  		// Write out allocated objects if it's a heap span.
    61  		if s.state.get() != mSpanInUse {
    62  			continue
    63  		}
    64  
    65  		// Find all allocated objects.
    66  		abits := s.allocBitsForIndex(0)
    67  		for i := uintptr(0); i < uintptr(s.nelems); i++ {
    68  			if abits.index < uintptr(s.freeindex) || abits.isMarked() {
    69  				x := s.base() + i*s.elemsize
    70  				trace.HeapObjectExists(x, s.typePointersOfUnchecked(x).typ)
    71  			}
    72  			abits.advance()
    73  		}
    74  	}
    75  
    76  	// Write out all the goroutine stacks.
    77  	forEachGRace(func(gp *g) {
    78  		trace.GoroutineStackExists(gp.stack.lo, gp.stack.hi-gp.stack.lo)
    79  	})
    80  	traceRelease(trace)
    81  }
    82  
    83  func traceSpanTypeAndClass(s *mspan) traceArg {
    84  	if s.state.get() == mSpanInUse {
    85  		return traceArg(s.spanclass) << 1
    86  	}
    87  	return traceArg(1)
    88  }
    89  
    90  // SpanExists records an event indicating that the span exists.
    91  func (tl traceLocker) SpanExists(s *mspan) {
    92  	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvSpan, traceSpanID(s), traceArg(s.npages), traceSpanTypeAndClass(s))
    93  }
    94  
    95  // SpanAlloc records an event indicating that the span has just been allocated.
    96  func (tl traceLocker) SpanAlloc(s *mspan) {
    97  	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvSpanAlloc, traceSpanID(s), traceArg(s.npages), traceSpanTypeAndClass(s))
    98  }
    99  
   100  // SpanFree records an event indicating that the span is about to be freed.
   101  func (tl traceLocker) SpanFree(s *mspan) {
   102  	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvSpanFree, traceSpanID(s))
   103  }
   104  
   105  // traceSpanID creates a trace ID for the span s for the trace.
   106  func traceSpanID(s *mspan) traceArg {
   107  	return traceArg(uint64(s.base())-trace.minPageHeapAddr) / pageSize
   108  }
   109  
   110  // HeapObjectExists records that an object already exists at addr with the provided type.
   111  // The type is optional, and the size of the slot occupied the object is inferred from the
   112  // span containing it.
   113  func (tl traceLocker) HeapObjectExists(addr uintptr, typ *abi.Type) {
   114  	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvHeapObject, traceHeapObjectID(addr), tl.rtype(typ))
   115  }
   116  
   117  // HeapObjectAlloc records that an object was newly allocated at addr with the provided type.
   118  // The type is optional, and the size of the slot occupied the object is inferred from the
   119  // span containing it.
   120  func (tl traceLocker) HeapObjectAlloc(addr uintptr, typ *abi.Type) {
   121  	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvHeapObjectAlloc, traceHeapObjectID(addr), tl.rtype(typ))
   122  }
   123  
   124  // HeapObjectFree records that an object at addr is about to be freed.
   125  func (tl traceLocker) HeapObjectFree(addr uintptr) {
   126  	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvHeapObjectFree, traceHeapObjectID(addr))
   127  }
   128  
   129  // traceHeapObjectID creates a trace ID for a heap object at address addr.
   130  func traceHeapObjectID(addr uintptr) traceArg {
   131  	return traceArg(uint64(addr)-trace.minPageHeapAddr) / minHeapAlign
   132  }
   133  
   134  // GoroutineStackExists records that a goroutine stack already exists at address base with the provided size.
   135  func (tl traceLocker) GoroutineStackExists(base, size uintptr) {
   136  	order := traceCompressStackSize(size)
   137  	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGoroutineStack, traceGoroutineStackID(base), order)
   138  }
   139  
   140  // GoroutineStackAlloc records that a goroutine stack was newly allocated at address base with the provided size..
   141  func (tl traceLocker) GoroutineStackAlloc(base, size uintptr) {
   142  	order := traceCompressStackSize(size)
   143  	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGoroutineStackAlloc, traceGoroutineStackID(base), order)
   144  }
   145  
   146  // GoroutineStackFree records that a goroutine stack at address base is about to be freed.
   147  func (tl traceLocker) GoroutineStackFree(base uintptr) {
   148  	tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvGoroutineStackFree, traceGoroutineStackID(base))
   149  }
   150  
   151  // traceGoroutineStackID creates a trace ID for the goroutine stack from its base address.
   152  func traceGoroutineStackID(base uintptr) traceArg {
   153  	return traceArg(uint64(base)-trace.minPageHeapAddr) / fixedStack
   154  }
   155  
   156  // traceCompressStackSize assumes size is a power of 2 and returns log2(size).
   157  func traceCompressStackSize(size uintptr) traceArg {
   158  	if size&(size-1) != 0 {
   159  		throw("goroutine stack size is not a power of 2")
   160  	}
   161  	return traceArg(sys.Len64(uint64(size)))
   162  }
   163  

View as plain text