You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

372 lines
11 KiB

  1. // Copyright 2016 The Go Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style
  3. // license that can be found in the LICENSE file.
  4. // Package syncmap provides a concurrent map implementation.
  5. // It is a prototype for a proposed addition to the sync package
  6. // in the standard library.
  7. // (https://golang.org/issue/18177)
  8. package syncmap
  9. import (
  10. "sync"
  11. "sync/atomic"
  12. "unsafe"
  13. )
  14. // Map is a concurrent map with amortized-constant-time loads, stores, and deletes.
  15. // It is safe for multiple goroutines to call a Map's methods concurrently.
  16. //
  17. // The zero Map is valid and empty.
  18. //
  19. // A Map must not be copied after first use.
  20. type Map struct {
  21. mu sync.Mutex
  22. // read contains the portion of the map's contents that are safe for
  23. // concurrent access (with or without mu held).
  24. //
  25. // The read field itself is always safe to load, but must only be stored with
  26. // mu held.
  27. //
  28. // Entries stored in read may be updated concurrently without mu, but updating
  29. // a previously-expunged entry requires that the entry be copied to the dirty
  30. // map and unexpunged with mu held.
  31. read atomic.Value // readOnly
  32. // dirty contains the portion of the map's contents that require mu to be
  33. // held. To ensure that the dirty map can be promoted to the read map quickly,
  34. // it also includes all of the non-expunged entries in the read map.
  35. //
  36. // Expunged entries are not stored in the dirty map. An expunged entry in the
  37. // clean map must be unexpunged and added to the dirty map before a new value
  38. // can be stored to it.
  39. //
  40. // If the dirty map is nil, the next write to the map will initialize it by
  41. // making a shallow copy of the clean map, omitting stale entries.
  42. dirty map[interface{}]*entry
  43. // misses counts the number of loads since the read map was last updated that
  44. // needed to lock mu to determine whether the key was present.
  45. //
  46. // Once enough misses have occurred to cover the cost of copying the dirty
  47. // map, the dirty map will be promoted to the read map (in the unamended
  48. // state) and the next store to the map will make a new dirty copy.
  49. misses int
  50. }
  51. // readOnly is an immutable struct stored atomically in the Map.read field.
  52. type readOnly struct {
  53. m map[interface{}]*entry
  54. amended bool // true if the dirty map contains some key not in m.
  55. }
  56. // expunged is an arbitrary pointer that marks entries which have been deleted
  57. // from the dirty map.
  58. var expunged = unsafe.Pointer(new(interface{}))
  59. // An entry is a slot in the map corresponding to a particular key.
  60. type entry struct {
  61. // p points to the interface{} value stored for the entry.
  62. //
  63. // If p == nil, the entry has been deleted and m.dirty == nil.
  64. //
  65. // If p == expunged, the entry has been deleted, m.dirty != nil, and the entry
  66. // is missing from m.dirty.
  67. //
  68. // Otherwise, the entry is valid and recorded in m.read.m[key] and, if m.dirty
  69. // != nil, in m.dirty[key].
  70. //
  71. // An entry can be deleted by atomic replacement with nil: when m.dirty is
  72. // next created, it will atomically replace nil with expunged and leave
  73. // m.dirty[key] unset.
  74. //
  75. // An entry's associated value can be updated by atomic replacement, provided
  76. // p != expunged. If p == expunged, an entry's associated value can be updated
  77. // only after first setting m.dirty[key] = e so that lookups using the dirty
  78. // map find the entry.
  79. p unsafe.Pointer // *interface{}
  80. }
  81. func newEntry(i interface{}) *entry {
  82. return &entry{p: unsafe.Pointer(&i)}
  83. }
  84. // Load returns the value stored in the map for a key, or nil if no
  85. // value is present.
  86. // The ok result indicates whether value was found in the map.
  87. func (m *Map) Load(key interface{}) (value interface{}, ok bool) {
  88. read, _ := m.read.Load().(readOnly)
  89. e, ok := read.m[key]
  90. if !ok && read.amended {
  91. m.mu.Lock()
  92. // Avoid reporting a spurious miss if m.dirty got promoted while we were
  93. // blocked on m.mu. (If further loads of the same key will not miss, it's
  94. // not worth copying the dirty map for this key.)
  95. read, _ = m.read.Load().(readOnly)
  96. e, ok = read.m[key]
  97. if !ok && read.amended {
  98. e, ok = m.dirty[key]
  99. // Regardless of whether the entry was present, record a miss: this key
  100. // will take the slow path until the dirty map is promoted to the read
  101. // map.
  102. m.missLocked()
  103. }
  104. m.mu.Unlock()
  105. }
  106. if !ok {
  107. return nil, false
  108. }
  109. return e.load()
  110. }
  111. func (e *entry) load() (value interface{}, ok bool) {
  112. p := atomic.LoadPointer(&e.p)
  113. if p == nil || p == expunged {
  114. return nil, false
  115. }
  116. return *(*interface{})(p), true
  117. }
  118. // Store sets the value for a key.
  119. func (m *Map) Store(key, value interface{}) {
  120. read, _ := m.read.Load().(readOnly)
  121. if e, ok := read.m[key]; ok && e.tryStore(&value) {
  122. return
  123. }
  124. m.mu.Lock()
  125. read, _ = m.read.Load().(readOnly)
  126. if e, ok := read.m[key]; ok {
  127. if e.unexpungeLocked() {
  128. // The entry was previously expunged, which implies that there is a
  129. // non-nil dirty map and this entry is not in it.
  130. m.dirty[key] = e
  131. }
  132. e.storeLocked(&value)
  133. } else if e, ok := m.dirty[key]; ok {
  134. e.storeLocked(&value)
  135. } else {
  136. if !read.amended {
  137. // We're adding the first new key to the dirty map.
  138. // Make sure it is allocated and mark the read-only map as incomplete.
  139. m.dirtyLocked()
  140. m.read.Store(readOnly{m: read.m, amended: true})
  141. }
  142. m.dirty[key] = newEntry(value)
  143. }
  144. m.mu.Unlock()
  145. }
  146. // tryStore stores a value if the entry has not been expunged.
  147. //
  148. // If the entry is expunged, tryStore returns false and leaves the entry
  149. // unchanged.
  150. func (e *entry) tryStore(i *interface{}) bool {
  151. p := atomic.LoadPointer(&e.p)
  152. if p == expunged {
  153. return false
  154. }
  155. for {
  156. if atomic.CompareAndSwapPointer(&e.p, p, unsafe.Pointer(i)) {
  157. return true
  158. }
  159. p = atomic.LoadPointer(&e.p)
  160. if p == expunged {
  161. return false
  162. }
  163. }
  164. }
  165. // unexpungeLocked ensures that the entry is not marked as expunged.
  166. //
  167. // If the entry was previously expunged, it must be added to the dirty map
  168. // before m.mu is unlocked.
  169. func (e *entry) unexpungeLocked() (wasExpunged bool) {
  170. return atomic.CompareAndSwapPointer(&e.p, expunged, nil)
  171. }
  172. // storeLocked unconditionally stores a value to the entry.
  173. //
  174. // The entry must be known not to be expunged.
  175. func (e *entry) storeLocked(i *interface{}) {
  176. atomic.StorePointer(&e.p, unsafe.Pointer(i))
  177. }
  178. // LoadOrStore returns the existing value for the key if present.
  179. // Otherwise, it stores and returns the given value.
  180. // The loaded result is true if the value was loaded, false if stored.
  181. func (m *Map) LoadOrStore(key, value interface{}) (actual interface{}, loaded bool) {
  182. // Avoid locking if it's a clean hit.
  183. read, _ := m.read.Load().(readOnly)
  184. if e, ok := read.m[key]; ok {
  185. actual, loaded, ok := e.tryLoadOrStore(value)
  186. if ok {
  187. return actual, loaded
  188. }
  189. }
  190. m.mu.Lock()
  191. read, _ = m.read.Load().(readOnly)
  192. if e, ok := read.m[key]; ok {
  193. if e.unexpungeLocked() {
  194. m.dirty[key] = e
  195. }
  196. actual, loaded, _ = e.tryLoadOrStore(value)
  197. } else if e, ok := m.dirty[key]; ok {
  198. actual, loaded, _ = e.tryLoadOrStore(value)
  199. m.missLocked()
  200. } else {
  201. if !read.amended {
  202. // We're adding the first new key to the dirty map.
  203. // Make sure it is allocated and mark the read-only map as incomplete.
  204. m.dirtyLocked()
  205. m.read.Store(readOnly{m: read.m, amended: true})
  206. }
  207. m.dirty[key] = newEntry(value)
  208. actual, loaded = value, false
  209. }
  210. m.mu.Unlock()
  211. return actual, loaded
  212. }
  213. // tryLoadOrStore atomically loads or stores a value if the entry is not
  214. // expunged.
  215. //
  216. // If the entry is expunged, tryLoadOrStore leaves the entry unchanged and
  217. // returns with ok==false.
  218. func (e *entry) tryLoadOrStore(i interface{}) (actual interface{}, loaded, ok bool) {
  219. p := atomic.LoadPointer(&e.p)
  220. if p == expunged {
  221. return nil, false, false
  222. }
  223. if p != nil {
  224. return *(*interface{})(p), true, true
  225. }
  226. // Copy the interface after the first load to make this method more amenable
  227. // to escape analysis: if we hit the "load" path or the entry is expunged, we
  228. // shouldn't bother heap-allocating.
  229. ic := i
  230. for {
  231. if atomic.CompareAndSwapPointer(&e.p, nil, unsafe.Pointer(&ic)) {
  232. return i, false, true
  233. }
  234. p = atomic.LoadPointer(&e.p)
  235. if p == expunged {
  236. return nil, false, false
  237. }
  238. if p != nil {
  239. return *(*interface{})(p), true, true
  240. }
  241. }
  242. }
  243. // Delete deletes the value for a key.
  244. func (m *Map) Delete(key interface{}) {
  245. read, _ := m.read.Load().(readOnly)
  246. e, ok := read.m[key]
  247. if !ok && read.amended {
  248. m.mu.Lock()
  249. read, _ = m.read.Load().(readOnly)
  250. e, ok = read.m[key]
  251. if !ok && read.amended {
  252. delete(m.dirty, key)
  253. }
  254. m.mu.Unlock()
  255. }
  256. if ok {
  257. e.delete()
  258. }
  259. }
  260. func (e *entry) delete() (hadValue bool) {
  261. for {
  262. p := atomic.LoadPointer(&e.p)
  263. if p == nil || p == expunged {
  264. return false
  265. }
  266. if atomic.CompareAndSwapPointer(&e.p, p, nil) {
  267. return true
  268. }
  269. }
  270. }
  271. // Range calls f sequentially for each key and value present in the map.
  272. // If f returns false, range stops the iteration.
  273. //
  274. // Range does not necessarily correspond to any consistent snapshot of the Map's
  275. // contents: no key will be visited more than once, but if the value for any key
  276. // is stored or deleted concurrently, Range may reflect any mapping for that key
  277. // from any point during the Range call.
  278. //
  279. // Range may be O(N) with the number of elements in the map even if f returns
  280. // false after a constant number of calls.
  281. func (m *Map) Range(f func(key, value interface{}) bool) {
  282. // We need to be able to iterate over all of the keys that were already
  283. // present at the start of the call to Range.
  284. // If read.amended is false, then read.m satisfies that property without
  285. // requiring us to hold m.mu for a long time.
  286. read, _ := m.read.Load().(readOnly)
  287. if read.amended {
  288. // m.dirty contains keys not in read.m. Fortunately, Range is already O(N)
  289. // (assuming the caller does not break out early), so a call to Range
  290. // amortizes an entire copy of the map: we can promote the dirty copy
  291. // immediately!
  292. m.mu.Lock()
  293. read, _ = m.read.Load().(readOnly)
  294. if read.amended {
  295. read = readOnly{m: m.dirty}
  296. m.read.Store(read)
  297. m.dirty = nil
  298. m.misses = 0
  299. }
  300. m.mu.Unlock()
  301. }
  302. for k, e := range read.m {
  303. v, ok := e.load()
  304. if !ok {
  305. continue
  306. }
  307. if !f(k, v) {
  308. break
  309. }
  310. }
  311. }
  312. func (m *Map) missLocked() {
  313. m.misses++
  314. if m.misses < len(m.dirty) {
  315. return
  316. }
  317. m.read.Store(readOnly{m: m.dirty})
  318. m.dirty = nil
  319. m.misses = 0
  320. }
  321. func (m *Map) dirtyLocked() {
  322. if m.dirty != nil {
  323. return
  324. }
  325. read, _ := m.read.Load().(readOnly)
  326. m.dirty = make(map[interface{}]*entry, len(read.m))
  327. for k, e := range read.m {
  328. if !e.tryExpungeLocked() {
  329. m.dirty[k] = e
  330. }
  331. }
  332. }
  333. func (e *entry) tryExpungeLocked() (isExpunged bool) {
  334. p := atomic.LoadPointer(&e.p)
  335. for p == nil {
  336. if atomic.CompareAndSwapPointer(&e.p, nil, expunged) {
  337. return true
  338. }
  339. p = atomic.LoadPointer(&e.p)
  340. }
  341. return p == expunged
  342. }