1 // Copyright 2016 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package sync 6 7 import ( 8 "sync/atomic" 9 "unsafe" 10 ) 11 12 // Map is like a Go map[interface{}]interface{} but is safe for concurrent use 13 // by multiple goroutines without additional locking or coordination. 14 // Loads, stores, and deletes run in amortized constant time. 15 // 16 // The Map type is specialized. Most code should use a plain Go map instead, 17 // with separate locking or coordination, for better type safety and to make it 18 // easier to maintain other invariants along with the map content. 19 // 20 // The Map type is optimized for two common use cases: (1) when the entry for a given 21 // key is only ever written once but read many times, as in caches that only grow, 22 // or (2) when multiple goroutines read, write, and overwrite entries for disjoint 23 // sets of keys. In these two cases, use of a Map may significantly reduce lock 24 // contention compared to a Go map paired with a separate Mutex or RWMutex. 25 // 26 // The zero Map is empty and ready for use. A Map must not be copied after first use. 27 // 28 // In the terminology of the Go memory model, Map arranges that a write operation 29 // “synchronizes before” any read operation that observes the effect of the write, where 30 // read and write operations are defined as follows. 31 // Load, LoadAndDelete, LoadOrStore are read operations; 32 // Delete, LoadAndDelete, and Store are write operations; 33 // and LoadOrStore is a write operation when it returns loaded set to false. 34 type Map struct { 35 mu Mutex 36 37 // read contains the portion of the map's contents that are safe for 38 // concurrent access (with or without mu held). 39 // 40 // The read field itself is always safe to load, but must only be stored with 41 // mu held. 42 // 43 // Entries stored in read may be updated concurrently without mu, but updating 44 // a previously-expunged entry requires that the entry be copied to the dirty 45 // map and unexpunged with mu held. 46 read atomic.Value // readOnly 47 48 // dirty contains the portion of the map's contents that require mu to be 49 // held. To ensure that the dirty map can be promoted to the read map quickly, 50 // it also includes all of the non-expunged entries in the read map. 51 // 52 // Expunged entries are not stored in the dirty map. An expunged entry in the 53 // clean map must be unexpunged and added to the dirty map before a new value 54 // can be stored to it. 55 // 56 // If the dirty map is nil, the next write to the map will initialize it by 57 // making a shallow copy of the clean map, omitting stale entries. 58 dirty map[any]*entry 59 60 // misses counts the number of loads since the read map was last updated that 61 // needed to lock mu to determine whether the key was present. 62 // 63 // Once enough misses have occurred to cover the cost of copying the dirty 64 // map, the dirty map will be promoted to the read map (in the unamended 65 // state) and the next store to the map will make a new dirty copy. 66 misses int 67 } 68 69 // readOnly is an immutable struct stored atomically in the Map.read field. 70 type readOnly struct { 71 m map[any]*entry 72 amended bool // true if the dirty map contains some key not in m. 73 } 74 75 // expunged is an arbitrary pointer that marks entries which have been deleted 76 // from the dirty map. 77 var expunged = unsafe.Pointer(new(any)) 78 79 // An entry is a slot in the map corresponding to a particular key. 80 type entry struct { 81 // p points to the interface{} value stored for the entry. 82 // 83 // If p == nil, the entry has been deleted, and either m.dirty == nil or 84 // m.dirty[key] is e. 85 // 86 // If p == expunged, the entry has been deleted, m.dirty != nil, and the entry 87 // is missing from m.dirty. 88 // 89 // Otherwise, the entry is valid and recorded in m.read.m[key] and, if m.dirty 90 // != nil, in m.dirty[key]. 91 // 92 // An entry can be deleted by atomic replacement with nil: when m.dirty is 93 // next created, it will atomically replace nil with expunged and leave 94 // m.dirty[key] unset. 95 // 96 // An entry's associated value can be updated by atomic replacement, provided 97 // p != expunged. If p == expunged, an entry's associated value can be updated 98 // only after first setting m.dirty[key] = e so that lookups using the dirty 99 // map find the entry. 100 p unsafe.Pointer // *interface{} 101 } 102 103 func newEntry(i any) *entry { 104 return &entry{p: unsafe.Pointer(&i)} 105 } 106 107 // Load returns the value stored in the map for a key, or nil if no 108 // value is present. 109 // The ok result indicates whether value was found in the map. 110 func (m *Map) Load(key any) (value any, ok bool) { 111 read, _ := m.read.Load().(readOnly) 112 e, ok := read.m[key] 113 if !ok && read.amended { 114 m.mu.Lock() 115 // Avoid reporting a spurious miss if m.dirty got promoted while we were 116 // blocked on m.mu. (If further loads of the same key will not miss, it's 117 // not worth copying the dirty map for this key.) 118 read, _ = m.read.Load().(readOnly) 119 e, ok = read.m[key] 120 if !ok && read.amended { 121 e, ok = m.dirty[key] 122 // Regardless of whether the entry was present, record a miss: this key 123 // will take the slow path until the dirty map is promoted to the read 124 // map. 125 m.missLocked() 126 } 127 m.mu.Unlock() 128 } 129 if !ok { 130 return nil, false 131 } 132 return e.load() 133 } 134 135 func (e *entry) load() (value any, ok bool) { 136 p := atomic.LoadPointer(&e.p) 137 if p == nil || p == expunged { 138 return nil, false 139 } 140 return *(*any)(p), true 141 } 142 143 // Store sets the value for a key. 144 func (m *Map) Store(key, value any) { 145 read, _ := m.read.Load().(readOnly) 146 if e, ok := read.m[key]; ok && e.tryStore(&value) { 147 return 148 } 149 150 m.mu.Lock() 151 read, _ = m.read.Load().(readOnly) 152 if e, ok := read.m[key]; ok { 153 if e.unexpungeLocked() { 154 // The entry was previously expunged, which implies that there is a 155 // non-nil dirty map and this entry is not in it. 156 m.dirty[key] = e 157 } 158 e.storeLocked(&value) 159 } else if e, ok := m.dirty[key]; ok { 160 e.storeLocked(&value) 161 } else { 162 if !read.amended { 163 // We're adding the first new key to the dirty map. 164 // Make sure it is allocated and mark the read-only map as incomplete. 165 m.dirtyLocked() 166 m.read.Store(readOnly{m: read.m, amended: true}) 167 } 168 m.dirty[key] = newEntry(value) 169 } 170 m.mu.Unlock() 171 } 172 173 // tryStore stores a value if the entry has not been expunged. 174 // 175 // If the entry is expunged, tryStore returns false and leaves the entry 176 // unchanged. 177 func (e *entry) tryStore(i *any) bool { 178 for { 179 p := atomic.LoadPointer(&e.p) 180 if p == expunged { 181 return false 182 } 183 if atomic.CompareAndSwapPointer(&e.p, p, unsafe.Pointer(i)) { 184 return true 185 } 186 } 187 } 188 189 // unexpungeLocked ensures that the entry is not marked as expunged. 190 // 191 // If the entry was previously expunged, it must be added to the dirty map 192 // before m.mu is unlocked. 193 func (e *entry) unexpungeLocked() (wasExpunged bool) { 194 return atomic.CompareAndSwapPointer(&e.p, expunged, nil) 195 } 196 197 // storeLocked unconditionally stores a value to the entry. 198 // 199 // The entry must be known not to be expunged. 200 func (e *entry) storeLocked(i *any) { 201 atomic.StorePointer(&e.p, unsafe.Pointer(i)) 202 } 203 204 // LoadOrStore returns the existing value for the key if present. 205 // Otherwise, it stores and returns the given value. 206 // The loaded result is true if the value was loaded, false if stored. 207 func (m *Map) LoadOrStore(key, value any) (actual any, loaded bool) { 208 // Avoid locking if it's a clean hit. 209 read, _ := m.read.Load().(readOnly) 210 if e, ok := read.m[key]; ok { 211 actual, loaded, ok := e.tryLoadOrStore(value) 212 if ok { 213 return actual, loaded 214 } 215 } 216 217 m.mu.Lock() 218 read, _ = m.read.Load().(readOnly) 219 if e, ok := read.m[key]; ok { 220 if e.unexpungeLocked() { 221 m.dirty[key] = e 222 } 223 actual, loaded, _ = e.tryLoadOrStore(value) 224 } else if e, ok := m.dirty[key]; ok { 225 actual, loaded, _ = e.tryLoadOrStore(value) 226 m.missLocked() 227 } else { 228 if !read.amended { 229 // We're adding the first new key to the dirty map. 230 // Make sure it is allocated and mark the read-only map as incomplete. 231 m.dirtyLocked() 232 m.read.Store(readOnly{m: read.m, amended: true}) 233 } 234 m.dirty[key] = newEntry(value) 235 actual, loaded = value, false 236 } 237 m.mu.Unlock() 238 239 return actual, loaded 240 } 241 242 // tryLoadOrStore atomically loads or stores a value if the entry is not 243 // expunged. 244 // 245 // If the entry is expunged, tryLoadOrStore leaves the entry unchanged and 246 // returns with ok==false. 247 func (e *entry) tryLoadOrStore(i any) (actual any, loaded, ok bool) { 248 p := atomic.LoadPointer(&e.p) 249 if p == expunged { 250 return nil, false, false 251 } 252 if p != nil { 253 return *(*any)(p), true, true 254 } 255 256 // Copy the interface after the first load to make this method more amenable 257 // to escape analysis: if we hit the "load" path or the entry is expunged, we 258 // shouldn't bother heap-allocating. 259 ic := i 260 for { 261 if atomic.CompareAndSwapPointer(&e.p, nil, unsafe.Pointer(&ic)) { 262 return i, false, true 263 } 264 p = atomic.LoadPointer(&e.p) 265 if p == expunged { 266 return nil, false, false 267 } 268 if p != nil { 269 return *(*any)(p), true, true 270 } 271 } 272 } 273 274 // LoadAndDelete deletes the value for a key, returning the previous value if any. 275 // The loaded result reports whether the key was present. 276 func (m *Map) LoadAndDelete(key any) (value any, loaded bool) { 277 read, _ := m.read.Load().(readOnly) 278 e, ok := read.m[key] 279 if !ok && read.amended { 280 m.mu.Lock() 281 read, _ = m.read.Load().(readOnly) 282 e, ok = read.m[key] 283 if !ok && read.amended { 284 e, ok = m.dirty[key] 285 delete(m.dirty, key) 286 // Regardless of whether the entry was present, record a miss: this key 287 // will take the slow path until the dirty map is promoted to the read 288 // map. 289 m.missLocked() 290 } 291 m.mu.Unlock() 292 } 293 if ok { 294 return e.delete() 295 } 296 return nil, false 297 } 298 299 // Delete deletes the value for a key. 300 func (m *Map) Delete(key any) { 301 m.LoadAndDelete(key) 302 } 303 304 func (e *entry) delete() (value any, ok bool) { 305 for { 306 p := atomic.LoadPointer(&e.p) 307 if p == nil || p == expunged { 308 return nil, false 309 } 310 if atomic.CompareAndSwapPointer(&e.p, p, nil) { 311 return *(*any)(p), true 312 } 313 } 314 } 315 316 // Range calls f sequentially for each key and value present in the map. 317 // If f returns false, range stops the iteration. 318 // 319 // Range does not necessarily correspond to any consistent snapshot of the Map's 320 // contents: no key will be visited more than once, but if the value for any key 321 // is stored or deleted concurrently (including by f), Range may reflect any 322 // mapping for that key from any point during the Range call. Range does not 323 // block other methods on the receiver; even f itself may call any method on m. 324 // 325 // Range may be O(N) with the number of elements in the map even if f returns 326 // false after a constant number of calls. 327 func (m *Map) Range(f func(key, value any) bool) { 328 // We need to be able to iterate over all of the keys that were already 329 // present at the start of the call to Range. 330 // If read.amended is false, then read.m satisfies that property without 331 // requiring us to hold m.mu for a long time. 332 read, _ := m.read.Load().(readOnly) 333 if read.amended { 334 // m.dirty contains keys not in read.m. Fortunately, Range is already O(N) 335 // (assuming the caller does not break out early), so a call to Range 336 // amortizes an entire copy of the map: we can promote the dirty copy 337 // immediately! 338 m.mu.Lock() 339 read, _ = m.read.Load().(readOnly) 340 if read.amended { 341 read = readOnly{m: m.dirty} 342 m.read.Store(read) 343 m.dirty = nil 344 m.misses = 0 345 } 346 m.mu.Unlock() 347 } 348 349 for k, e := range read.m { 350 v, ok := e.load() 351 if !ok { 352 continue 353 } 354 if !f(k, v) { 355 break 356 } 357 } 358 } 359 360 func (m *Map) missLocked() { 361 m.misses++ 362 if m.misses < len(m.dirty) { 363 return 364 } 365 m.read.Store(readOnly{m: m.dirty}) 366 m.dirty = nil 367 m.misses = 0 368 } 369 370 func (m *Map) dirtyLocked() { 371 if m.dirty != nil { 372 return 373 } 374 375 read, _ := m.read.Load().(readOnly) 376 m.dirty = make(map[any]*entry, len(read.m)) 377 for k, e := range read.m { 378 if !e.tryExpungeLocked() { 379 m.dirty[k] = e 380 } 381 } 382 } 383 384 func (e *entry) tryExpungeLocked() (isExpunged bool) { 385 p := atomic.LoadPointer(&e.p) 386 for p == nil { 387 if atomic.CompareAndSwapPointer(&e.p, nil, expunged) { 388 return true 389 } 390 p = atomic.LoadPointer(&e.p) 391 } 392 return p == expunged 393 } 394