1 // Copyright 2018 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Garbage collector: stack objects and stack tracing 6 // See the design doc at https://docs.google.com/document/d/1un-Jn47yByHL7I0aVIP_uVCMxjdM5mpelJhiKlIqxkE/edit?usp=sharing 7 // Also see issue 22350. 8 9 // Stack tracing solves the problem of determining which parts of the 10 // stack are live and should be scanned. It runs as part of scanning 11 // a single goroutine stack. 12 // 13 // Normally determining which parts of the stack are live is easy to 14 // do statically, as user code has explicit references (reads and 15 // writes) to stack variables. The compiler can do a simple dataflow 16 // analysis to determine liveness of stack variables at every point in 17 // the code. See cmd/compile/internal/gc/plive.go for that analysis. 18 // 19 // However, when we take the address of a stack variable, determining 20 // whether that variable is still live is less clear. We can still 21 // look for static accesses, but accesses through a pointer to the 22 // variable are difficult in general to track statically. That pointer 23 // can be passed among functions on the stack, conditionally retained, 24 // etc. 25 // 26 // Instead, we will track pointers to stack variables dynamically. 27 // All pointers to stack-allocated variables will themselves be on the 28 // stack somewhere (or in associated locations, like defer records), so 29 // we can find them all efficiently. 30 // 31 // Stack tracing is organized as a mini garbage collection tracing 32 // pass. The objects in this garbage collection are all the variables 33 // on the stack whose address is taken, and which themselves contain a 34 // pointer. We call these variables "stack objects". 35 // 36 // We begin by determining all the stack objects on the stack and all 37 // the statically live pointers that may point into the stack. We then 38 // process each pointer to see if it points to a stack object. If it 39 // does, we scan that stack object. It may contain pointers into the 40 // heap, in which case those pointers are passed to the main garbage 41 // collection. It may also contain pointers into the stack, in which 42 // case we add them to our set of stack pointers. 43 // 44 // Once we're done processing all the pointers (including the ones we 45 // added during processing), we've found all the stack objects that 46 // are live. Any dead stack objects are not scanned and their contents 47 // will not keep heap objects live. Unlike the main garbage 48 // collection, we can't sweep the dead stack objects; they live on in 49 // a moribund state until the stack frame that contains them is 50 // popped. 51 // 52 // A stack can look like this: 53 // 54 // +----------+ 55 // | foo() | 56 // | +------+ | 57 // | | A | | <---\ 58 // | +------+ | | 59 // | | | 60 // | +------+ | | 61 // | | B | | | 62 // | +------+ | | 63 // | | | 64 // +----------+ | 65 // | bar() | | 66 // | +------+ | | 67 // | | C | | <-\ | 68 // | +----|-+ | | | 69 // | | | | | 70 // | +----v-+ | | | 71 // | | D ---------/ 72 // | +------+ | | 73 // | | | 74 // +----------+ | 75 // | baz() | | 76 // | +------+ | | 77 // | | E -------/ 78 // | +------+ | 79 // | ^ | 80 // | F: --/ | 81 // | | 82 // +----------+ 83 // 84 // foo() calls bar() calls baz(). Each has a frame on the stack. 85 // foo() has stack objects A and B. 86 // bar() has stack objects C and D, with C pointing to D and D pointing to A. 87 // baz() has a stack object E pointing to C, and a local variable F pointing to E. 88 // 89 // Starting from the pointer in local variable F, we will eventually 90 // scan all of E, C, D, and A (in that order). B is never scanned 91 // because there is no live pointer to it. If B is also statically 92 // dead (meaning that foo() never accesses B again after it calls 93 // bar()), then B's pointers into the heap are not considered live. 94 95 package runtime 96 97 import ( 98 "internal/goarch" 99 "unsafe" 100 ) 101 102 const stackTraceDebug = false 103 104 // Buffer for pointers found during stack tracing. 105 // Must be smaller than or equal to workbuf. 106 // 107 //go:notinheap 108 type stackWorkBuf struct { 109 stackWorkBufHdr 110 obj [(_WorkbufSize - unsafe.Sizeof(stackWorkBufHdr{})) / goarch.PtrSize]uintptr 111 } 112 113 // Header declaration must come after the buf declaration above, because of issue #14620. 114 // 115 //go:notinheap 116 type stackWorkBufHdr struct { 117 workbufhdr 118 next *stackWorkBuf // linked list of workbufs 119 // Note: we could theoretically repurpose lfnode.next as this next pointer. 120 // It would save 1 word, but that probably isn't worth busting open 121 // the lfnode API. 122 } 123 124 // Buffer for stack objects found on a goroutine stack. 125 // Must be smaller than or equal to workbuf. 126 // 127 //go:notinheap 128 type stackObjectBuf struct { 129 stackObjectBufHdr 130 obj [(_WorkbufSize - unsafe.Sizeof(stackObjectBufHdr{})) / unsafe.Sizeof(stackObject{})]stackObject 131 } 132 133 //go:notinheap 134 type stackObjectBufHdr struct { 135 workbufhdr 136 next *stackObjectBuf 137 } 138 139 func init() { 140 if unsafe.Sizeof(stackWorkBuf{}) > unsafe.Sizeof(workbuf{}) { 141 panic("stackWorkBuf too big") 142 } 143 if unsafe.Sizeof(stackObjectBuf{}) > unsafe.Sizeof(workbuf{}) { 144 panic("stackObjectBuf too big") 145 } 146 } 147 148 // A stackObject represents a variable on the stack that has had 149 // its address taken. 150 // 151 //go:notinheap 152 type stackObject struct { 153 off uint32 // offset above stack.lo 154 size uint32 // size of object 155 r *stackObjectRecord // info of the object (for ptr/nonptr bits). nil if object has been scanned. 156 left *stackObject // objects with lower addresses 157 right *stackObject // objects with higher addresses 158 } 159 160 // obj.r = r, but with no write barrier. 161 // 162 //go:nowritebarrier 163 func (obj *stackObject) setRecord(r *stackObjectRecord) { 164 // Types of stack objects are always in read-only memory, not the heap. 165 // So not using a write barrier is ok. 166 *(*uintptr)(unsafe.Pointer(&obj.r)) = uintptr(unsafe.Pointer(r)) 167 } 168 169 // A stackScanState keeps track of the state used during the GC walk 170 // of a goroutine. 171 type stackScanState struct { 172 cache pcvalueCache 173 174 // stack limits 175 stack stack 176 177 // conservative indicates that the next frame must be scanned conservatively. 178 // This applies only to the innermost frame at an async safe-point. 179 conservative bool 180 181 // buf contains the set of possible pointers to stack objects. 182 // Organized as a LIFO linked list of buffers. 183 // All buffers except possibly the head buffer are full. 184 buf *stackWorkBuf 185 freeBuf *stackWorkBuf // keep around one free buffer for allocation hysteresis 186 187 // cbuf contains conservative pointers to stack objects. If 188 // all pointers to a stack object are obtained via 189 // conservative scanning, then the stack object may be dead 190 // and may contain dead pointers, so it must be scanned 191 // defensively. 192 cbuf *stackWorkBuf 193 194 // list of stack objects 195 // Objects are in increasing address order. 196 head *stackObjectBuf 197 tail *stackObjectBuf 198 nobjs int 199 200 // root of binary tree for fast object lookup by address 201 // Initialized by buildIndex. 202 root *stackObject 203 } 204 205 // Add p as a potential pointer to a stack object. 206 // p must be a stack address. 207 func (s *stackScanState) putPtr(p uintptr, conservative bool) { 208 if p < s.stack.lo || p >= s.stack.hi { 209 throw("address not a stack address") 210 } 211 head := &s.buf 212 if conservative { 213 head = &s.cbuf 214 } 215 buf := *head 216 if buf == nil { 217 // Initial setup. 218 buf = (*stackWorkBuf)(unsafe.Pointer(getempty())) 219 buf.nobj = 0 220 buf.next = nil 221 *head = buf 222 } else if buf.nobj == len(buf.obj) { 223 if s.freeBuf != nil { 224 buf = s.freeBuf 225 s.freeBuf = nil 226 } else { 227 buf = (*stackWorkBuf)(unsafe.Pointer(getempty())) 228 } 229 buf.nobj = 0 230 buf.next = *head 231 *head = buf 232 } 233 buf.obj[buf.nobj] = p 234 buf.nobj++ 235 } 236 237 // Remove and return a potential pointer to a stack object. 238 // Returns 0 if there are no more pointers available. 239 // 240 // This prefers non-conservative pointers so we scan stack objects 241 // precisely if there are any non-conservative pointers to them. 242 func (s *stackScanState) getPtr() (p uintptr, conservative bool) { 243 for _, head := range []**stackWorkBuf{&s.buf, &s.cbuf} { 244 buf := *head 245 if buf == nil { 246 // Never had any data. 247 continue 248 } 249 if buf.nobj == 0 { 250 if s.freeBuf != nil { 251 // Free old freeBuf. 252 putempty((*workbuf)(unsafe.Pointer(s.freeBuf))) 253 } 254 // Move buf to the freeBuf. 255 s.freeBuf = buf 256 buf = buf.next 257 *head = buf 258 if buf == nil { 259 // No more data in this list. 260 continue 261 } 262 } 263 buf.nobj-- 264 return buf.obj[buf.nobj], head == &s.cbuf 265 } 266 // No more data in either list. 267 if s.freeBuf != nil { 268 putempty((*workbuf)(unsafe.Pointer(s.freeBuf))) 269 s.freeBuf = nil 270 } 271 return 0, false 272 } 273 274 // addObject adds a stack object at addr of type typ to the set of stack objects. 275 func (s *stackScanState) addObject(addr uintptr, r *stackObjectRecord) { 276 x := s.tail 277 if x == nil { 278 // initial setup 279 x = (*stackObjectBuf)(unsafe.Pointer(getempty())) 280 x.next = nil 281 s.head = x 282 s.tail = x 283 } 284 if x.nobj > 0 && uint32(addr-s.stack.lo) < x.obj[x.nobj-1].off+x.obj[x.nobj-1].size { 285 throw("objects added out of order or overlapping") 286 } 287 if x.nobj == len(x.obj) { 288 // full buffer - allocate a new buffer, add to end of linked list 289 y := (*stackObjectBuf)(unsafe.Pointer(getempty())) 290 y.next = nil 291 x.next = y 292 s.tail = y 293 x = y 294 } 295 obj := &x.obj[x.nobj] 296 x.nobj++ 297 obj.off = uint32(addr - s.stack.lo) 298 obj.size = uint32(r.size) 299 obj.setRecord(r) 300 // obj.left and obj.right will be initialized by buildIndex before use. 301 s.nobjs++ 302 } 303 304 // buildIndex initializes s.root to a binary search tree. 305 // It should be called after all addObject calls but before 306 // any call of findObject. 307 func (s *stackScanState) buildIndex() { 308 s.root, _, _ = binarySearchTree(s.head, 0, s.nobjs) 309 } 310 311 // Build a binary search tree with the n objects in the list 312 // x.obj[idx], x.obj[idx+1], ..., x.next.obj[0], ... 313 // Returns the root of that tree, and the buf+idx of the nth object after x.obj[idx]. 314 // (The first object that was not included in the binary search tree.) 315 // If n == 0, returns nil, x. 316 func binarySearchTree(x *stackObjectBuf, idx int, n int) (root *stackObject, restBuf *stackObjectBuf, restIdx int) { 317 if n == 0 { 318 return nil, x, idx 319 } 320 var left, right *stackObject 321 left, x, idx = binarySearchTree(x, idx, n/2) 322 root = &x.obj[idx] 323 idx++ 324 if idx == len(x.obj) { 325 x = x.next 326 idx = 0 327 } 328 right, x, idx = binarySearchTree(x, idx, n-n/2-1) 329 root.left = left 330 root.right = right 331 return root, x, idx 332 } 333 334 // findObject returns the stack object containing address a, if any. 335 // Must have called buildIndex previously. 336 func (s *stackScanState) findObject(a uintptr) *stackObject { 337 off := uint32(a - s.stack.lo) 338 obj := s.root 339 for { 340 if obj == nil { 341 return nil 342 } 343 if off < obj.off { 344 obj = obj.left 345 continue 346 } 347 if off >= obj.off+obj.size { 348 obj = obj.right 349 continue 350 } 351 return obj 352 } 353 } 354