6 #ifndef XENIUM_VYUKOV_HASH_MAP_IMPL
7 #error "This is an impl file and must not be included directly!"
10 #include <xenium/acquire_guard.hpp>
11 #include <xenium/backoff.hpp>
12 #include <xenium/parameter.hpp>
13 #include <xenium/policy.hpp>
20 #pragma warning(disable: 26495)
21 #pragma warning(disable: 4324)
26 template <
class Key,
class Value,
class... Policies>
27 struct vyukov_hash_map<Key, Value, Policies...>::bucket_state {
28 bucket_state() noexcept : value() {}
30 bucket_state locked() const noexcept {
return value | lock; }
31 bucket_state clear_lock()
const {
35 bucket_state new_version() const noexcept {
return value + version_inc; }
36 bucket_state inc_item_count()
const {
37 assert(item_count() < bucket_item_count);
38 bucket_state result(value + item_count_inc);
39 assert(result.item_count() == item_count() + 1);
42 bucket_state dec_item_count()
const {
43 assert(item_count() > 0);
44 bucket_state result(value - item_count_inc);
45 assert(result.item_count() == item_count() - 1);
48 bucket_state set_delete_marker(std::uint32_t marker)
const {
49 assert(delete_marker() == 0);
50 bucket_state result(value | (marker << delete_marker_shift));
51 assert(result.delete_marker() == marker);
55 bool operator==(bucket_state r)
const noexcept {
return this->value == r.value; }
56 bool operator!=(bucket_state r)
const noexcept {
return this->value != r.value; }
58 std::uint32_t item_count() const noexcept {
return (value >> 1) & item_count_mask; }
59 std::uint32_t delete_marker() const noexcept {
return (value >> delete_marker_shift) & item_count_mask; }
60 std::uint32_t version() const noexcept {
return value >> version_shift; }
61 bool is_locked() const noexcept {
return (value & lock) != 0; }
64 bucket_state(std::uint32_t value) noexcept : value(value) {}
85 static constexpr std::size_t item_counter_bits = utils::find_last_bit_set(bucket_item_count);
87 static constexpr std::size_t item_count_shift = 1;
88 static constexpr std::size_t delete_marker_shift = item_count_shift + item_counter_bits;
89 static constexpr std::size_t version_shift = delete_marker_shift + item_counter_bits;
91 static constexpr std::uint32_t lock = 1;
92 static constexpr std::uint32_t version_inc = 1 << version_shift;
93 static constexpr std::uint32_t item_count_inc = 1 << item_count_shift;
95 static constexpr std::uint32_t item_count_mask = (1 << item_counter_bits) - 1;
98 template <
class Key,
class Value,
class... Policies>
99 struct vyukov_hash_map<Key, Value, Policies...>::bucket {
100 std::atomic<bucket_state> state;
101 std::atomic<extension_item*> head;
102 typename traits::storage_key_type key[bucket_item_count];
103 typename traits::storage_value_type value[bucket_item_count];
106 template <
class Key,
class Value,
class... Policies>
107 struct vyukov_hash_map<Key, Value, Policies...>::extension_item {
108 typename traits::storage_key_type key;
109 typename traits::storage_value_type value;
110 std::atomic<extension_item*> next;
113 template <
class Key,
class Value,
class... Policies>
114 struct vyukov_hash_map<Key, Value, Policies...>::extension_bucket {
115 std::atomic<std::uint32_t> lock;
116 std::atomic<extension_item*> head;
117 extension_item items[extension_item_count];
119 void acquire_lock() {
122 while (lock.load(std::memory_order_relaxed))
125 if (lock.exchange(1, std::memory_order_acquire) == 0)
130 void release_lock() {
132 lock.store(0, std::memory_order_release);
136 template <
class Key,
class Value,
class... Policies>
137 struct alignas(64) vyukov_hash_map<Key, Value, Policies...>::block :
138 reclaimer::template enable_concurrent_ptr<block>
141 std::uint32_t bucket_count;
142 std::uint32_t extension_bucket_count;
143 extension_bucket* extension_buckets;
146 std::uint32_t index(
const key_type& key)
const {
return static_cast<std::uint32_t
>(key & mask); }
147 bucket* buckets() {
return reinterpret_cast<bucket*
>(
this+1); }
149 void operator delete(
void* p) { ::operator
delete(p, cacheline_size); }
152 template <
class Key,
class Value,
class... Policies>
153 struct vyukov_hash_map<Key, Value, Policies...>::unlocker {
154 unlocker(bucket& locked_bucket, bucket_state state) : state(state), locked_bucket(locked_bucket) {}
157 assert(locked_bucket.state.load().is_locked());
158 locked_bucket.state.store(state, std::memory_order_relaxed);
161 void unlock(bucket_state new_state, std::memory_order order) {
163 assert(locked_bucket.state.load().is_locked());
164 locked_bucket.state.store(new_state, order);
167 void disable() { enabled =
false; }
171 bucket& locked_bucket;
174 template <
class Key,
class Value,
class... Policies>
175 vyukov_hash_map<Key, Value, Policies...>::vyukov_hash_map(std::size_t initial_capacity) :
178 auto b = allocate_block(
static_cast<std::uint32_t
>(utils::next_power_of_two(initial_capacity)));
180 throw std::bad_alloc();
181 data_block.store(b, std::memory_order_relaxed);
184 template <
class Key,
class Value,
class... Policies>
185 vyukov_hash_map<Key, Value, Policies...>::~vyukov_hash_map() {
192 delete data_block.load().get();
195 template <
class Key,
class Value,
class... Policies>
197 return do_get_or_emplace<false>(
199 [v = std::move(value)]()
mutable {
return std::move(v); },
200 [](accessor&&,
auto&) {});
203 template <
class Key,
class Value,
class... Policies>
204 template <
class... Args>
206 -> std::pair<accessor, bool>
208 std::pair<accessor, bool> result;
209 result.second = do_get_or_emplace<true>(
211 [&]{
return value_type(std::forward<Args>(args)...); },
212 [&result](accessor&& acc,
auto&) {
213 result.first = std::move(acc);
218 template <
class Key,
class Value,
class... Policies>
219 template <
class Factory>
220 auto vyukov_hash_map<Key, Value, Policies...>::get_or_emplace_lazy(key_type key, Factory&& factory)
221 -> std::pair<accessor, bool>
223 std::pair<accessor, bool> result;
224 result.second = do_get_or_emplace<true>(
226 std::forward<Factory>(factory),
227 [&result](accessor&& acc,
auto&) {
228 result.first = std::move(acc);
233 template <
class Key,
class Value,
class... Policies>
234 template <
bool AcquireAccessor,
class Factory,
class Callback>
235 bool vyukov_hash_map<Key, Value, Policies...>::do_get_or_emplace(Key&& key, Factory&& factory, Callback&& callback) {
236 const hash_t h = hash{}(key);
242 bucket& bucket = lock_bucket(h, b, state);
244 unlocker unlocker(bucket, state);
246 std::uint32_t item_count = state.item_count();
248 for (std::uint32_t i = 0; i != item_count; ++i)
249 if (traits::template compare_key<AcquireAccessor>(bucket.key[i], bucket.value[i], key, h, acc)) {
250 callback(std::move(acc), bucket.value[i]);
251 unlocker.unlock(state, std::memory_order_relaxed);
255 if (item_count < bucket_item_count) {
256 traits::template store_item<AcquireAccessor>(bucket.key[item_count], bucket.value[item_count], h,
257 std::move(key), factory(), std::memory_order_relaxed, acc);
258 callback(std::move(acc), bucket.value[item_count]);
261 unlocker.unlock(state.inc_item_count(), std::memory_order_release);
266 for (extension_item* extension = bucket.head.load(std::memory_order_relaxed);
267 extension !=
nullptr;
268 extension = extension->next.load(std::memory_order_relaxed)) {
269 if (traits::template compare_key<AcquireAccessor>(extension->key, extension->value, key, h, acc)) {
270 callback(std::move(acc), extension->value);
271 unlocker.unlock(state, std::memory_order_relaxed);
276 extension_item* extension = allocate_extension_item(b.get(), h);
277 if (extension ==
nullptr) {
283 traits::template store_item<AcquireAccessor>(extension->key, extension->value, h,
284 std::move(key), factory(), std::memory_order_relaxed, acc);
286 free_extension_item(extension);
289 callback(std::move(acc), extension->value);
290 auto old_head = bucket.head.load(std::memory_order_relaxed);
291 extension->next.store(old_head, std::memory_order_relaxed);
293 bucket.head.store(extension, std::memory_order_release);
296 unlocker.unlock(state, std::memory_order_release);
301 template <
class Key,
class Value,
class... Policies>
304 bool result = do_extract(key, acc);
305 traits::reclaim(acc);
309 template <
class Key,
class Value,
class... Policies>
311 bool result = do_extract(key, acc);
312 traits::reclaim_internal(acc);
316 template <
class Key,
class Value,
class... Policies>
318 const hash_t h =
hash{}(key);
324 b.acquire(data_block, std::memory_order_acquire);
325 const std::size_t bucket_idx = h & b->mask;
326 bucket& bucket = b->buckets()[bucket_idx];
327 bucket_state state = bucket.state.load(std::memory_order_relaxed);
328 std::uint32_t item_count = state.item_count();
333 if (state.is_locked())
336 auto locked_state = state.locked();
338 if (!bucket.state.compare_exchange_strong(state, locked_state, std::memory_order_acquire,
339 std::memory_order_relaxed)) {
344 unlocker unlocker(bucket, state);
348 for (std::uint32_t i = 0; i != item_count; ++i) {
349 if (traits::template compare_key<true>(bucket.key[i], bucket.value[i], key, h, result)) {
350 extension_item* extension = bucket.head.load(std::memory_order_relaxed);
353 bucket.state.store(locked_state.set_delete_marker(i + 1), std::memory_order_relaxed);
355 auto k = extension->key.load(std::memory_order_relaxed);
356 auto v = extension->value.load(std::memory_order_relaxed);
357 bucket.key[i].store(k, std::memory_order_relaxed);
359 bucket.value[i].store(v, std::memory_order_release);
362 locked_state = locked_state.new_version();
364 bucket.state.store(locked_state, std::memory_order_release);
366 extension_item* extension_next = extension->next.load(std::memory_order_relaxed);
368 bucket.head.store(extension_next, std::memory_order_release);
372 unlocker.unlock(locked_state.new_version().clear_lock(), std::memory_order_release);
374 free_extension_item(extension);
376 if (i != item_count - 1) {
378 bucket.state.store(locked_state.set_delete_marker(i + 1), std::memory_order_relaxed);
380 auto k = bucket.key[item_count - 1].load(std::memory_order_relaxed);
381 auto v = bucket.value[item_count - 1].load(std::memory_order_relaxed);
382 bucket.key[i].store(k, std::memory_order_relaxed);
384 bucket.value[i].store(v, std::memory_order_release);
390 unlocker.unlock(state.new_version().dec_item_count(), std::memory_order_release);
396 auto extension_prev = &bucket.head;
397 extension_item* extension = extension_prev->load(std::memory_order_relaxed);
399 if (traits::template compare_key<true>(extension->key, extension->value, key, h, result)) {
400 extension_item* extension_next = extension->next.load(std::memory_order_relaxed);
401 extension_prev->store(extension_next, std::memory_order_relaxed);
405 unlocker.unlock(state.new_version(), std::memory_order_release);
407 free_extension_item(extension);
410 extension_prev = &extension->next;
411 extension = extension_prev->load(std::memory_order_relaxed);
417 unlocker.unlock(state, std::memory_order_relaxed);
422 template <
class Key,
class Value,
class... Policies>
426 auto next = it.extension->next.load(std::memory_order_relaxed);
427 it.prev->store(next, std::memory_order_relaxed);
428 auto new_state = it.current_bucket_state.locked().new_version();
430 it.current_bucket->state.store(new_state, std::memory_order_release);
432 free_extension_item(it.extension);
435 it.move_to_next_bucket();
441 auto extension = it.current_bucket->head.load(std::memory_order_relaxed);
443 auto locked_state = it.current_bucket_state.locked();
444 auto marked_state = locked_state.set_delete_marker(it.index + 1);
445 it.current_bucket->state.store(marked_state, std::memory_order_relaxed);
446 assert(it.current_bucket->state.load().is_locked());
448 auto k = extension->key.load(std::memory_order_relaxed);
449 auto v = extension->value.load(std::memory_order_relaxed);
450 it.current_bucket->key[it.index].store(k, std::memory_order_relaxed);
452 it.current_bucket->value[it.index].store(v, std::memory_order_release);
455 locked_state = locked_state.new_version();
457 it.current_bucket->state.store(locked_state, std::memory_order_release);
458 assert(it.current_bucket->state.load().is_locked());
460 auto next = extension->next.load(std::memory_order_relaxed);
462 it.current_bucket->head.store(next, std::memory_order_release);
466 it.current_bucket->state.store(locked_state.new_version(), std::memory_order_release);
467 assert(it.current_bucket->state.load().is_locked());
468 free_extension_item(extension);
470 auto max_index = it.current_bucket_state.item_count() - 1;
471 if (it.index != max_index) {
473 auto locked_state = it.current_bucket_state.locked();
474 auto marked_state = locked_state.set_delete_marker(it.index + 1);
475 it.current_bucket->state.store(marked_state, std::memory_order_relaxed);
476 assert(it.current_bucket->state.load().is_locked());
478 auto k = it.current_bucket->key[max_index].load(std::memory_order_relaxed);
479 auto v = it.current_bucket->value[max_index].load(std::memory_order_relaxed);
480 it.current_bucket->key[it.index].store(k, std::memory_order_relaxed);
482 it.current_bucket->value[it.index].store(v, std::memory_order_release);
485 auto new_state = it.current_bucket_state.new_version().dec_item_count();
486 it.current_bucket_state = new_state;
489 it.current_bucket->state.store(new_state.locked(), std::memory_order_release);
490 assert(it.current_bucket->state.load().is_locked());
491 if (it.index == new_state.item_count()) {
492 it.prev = &it.current_bucket->head;
493 it.extension = it.current_bucket->head.load(std::memory_order_relaxed);
494 if (it.extension ==
nullptr)
495 it.move_to_next_bucket();
500 template <
class Key,
class Value,
class... Policies>
502 const hash_t h = hash{}(key);
505 guarded_block b = acquire_guard(data_block, std::memory_order_acquire);
506 const std::size_t bucket_idx = h & b->mask;
507 bucket& bucket = b->buckets()[bucket_idx];
515 bucket_state state = bucket.state.load(std::memory_order_acquire);
517 std::uint32_t item_count = state.item_count();
518 for (std::uint32_t i = 0; i != item_count; ++i) {
519 if (traits::compare_trivial_key(bucket.key[i], key, h)) {
524 accessor acc = traits::acquire(bucket.value[i], std::memory_order_acquire);
527 const auto state2 = bucket.state.load(std::memory_order_relaxed);
528 if (state.version() != state2.version()) {
534 const auto delete_marker = i + 1;
535 if (state2.delete_marker() == delete_marker) {
548 if (!traits::compare_nontrivial_key(acc, key))
551 value = std::move(acc);
557 extension_item* extension = bucket.head.load(std::memory_order_acquire);
559 if (traits::compare_trivial_key(extension->key, key, h)) {
564 accessor acc = traits::acquire(extension->value, std::memory_order_acquire);
566 auto state2 = bucket.state.load(std::memory_order_relaxed);
567 if (state.version() != state2.version()) {
573 if (!traits::compare_nontrivial_key(acc, key))
576 value = std::move(acc);
581 extension = extension->next.load(std::memory_order_acquire);
582 auto state2 = bucket.state.load(std::memory_order_relaxed);
583 if (state.version() != state2.version()) {
590 auto state2 = bucket.state.load(std::memory_order_relaxed);
591 if (state.version() != state2.version()) {
601 template <
class Key,
class Value,
class... Policies>
605 const int already_resizing = resize_lock.exchange(1, std::memory_order_relaxed);
608 bucket.state.store(state, std::memory_order_relaxed);
614 if (already_resizing) {
618 while (resize_lock.load(std::memory_order_acquire) != 0)
627 template <
class Key,
class Value,
class... Policies>
628 void vyukov_hash_map<Key, Value, Policies...>::do_grow()
632 auto old_block = data_block.load(std::memory_order_acquire);
633 const auto bucket_count = old_block->bucket_count;
634 block* new_block = allocate_block(bucket_count * 2);
635 if (new_block ==
nullptr) {
636 resize_lock.store(0, std::memory_order_relaxed);
637 throw std::bad_alloc();
641 auto old_buckets = old_block->buckets();
642 for (std::uint32_t i = 0; i != bucket_count; ++i) {
643 auto& bucket = old_buckets[i];
646 auto st = bucket.state.load(std::memory_order_relaxed);
651 if (bucket.state.compare_exchange_strong(st, st.locked(),
652 std::memory_order_acquire, std::memory_order_relaxed))
659 auto new_buckets = new_block->buckets();
660 for (std::uint32_t bucket_idx = 0; bucket_idx != bucket_count; ++bucket_idx) {
661 auto& old_bucket = old_buckets[bucket_idx];
662 const std::uint32_t item_count = old_bucket.state.load(std::memory_order_relaxed).item_count();
663 for (std::uint32_t i = 0; i != item_count; ++i) {
664 auto k = old_bucket.key[i].load(std::memory_order_relaxed);
665 hash_t h = traits::template rehash<hash>(k);
666 auto& new_bucket = new_buckets[h & new_block->mask];
667 auto new_bucket_state = new_bucket.state.load(std::memory_order_relaxed);
668 auto new_bucket_count = new_bucket_state.item_count();
669 auto v = old_bucket.value[i].load(std::memory_order_relaxed);
670 new_bucket.key[new_bucket_count].store(k, std::memory_order_relaxed);
671 new_bucket.value[new_bucket_count].store(v, std::memory_order_relaxed);
672 new_bucket.state.store(new_bucket_state.inc_item_count(), std::memory_order_relaxed);
676 for (extension_item* extension = old_bucket.head;
677 extension !=
nullptr;
678 extension = extension->next.load(std::memory_order_relaxed))
680 auto k = extension->key.load(std::memory_order_relaxed);
681 hash_t h = traits::template rehash<hash>(k);
682 auto& new_bucket = new_buckets[h & new_block->mask];
683 auto new_bucket_state = new_bucket.state.load(std::memory_order_relaxed);
684 auto new_bucket_count = new_bucket_state.item_count();
685 auto v = extension->value.load(std::memory_order_relaxed);
686 if (new_bucket_count < bucket_item_count) {
687 new_bucket.key[new_bucket_count].store(k, std::memory_order_relaxed);
688 new_bucket.value[new_bucket_count].store(v, std::memory_order_relaxed);
689 new_bucket.state.store(new_bucket_state.inc_item_count(), std::memory_order_relaxed);
691 extension_item* new_extension = allocate_extension_item(new_block, h);
692 assert(new_extension);
693 new_extension->key.store(k, std::memory_order_relaxed);
694 new_extension->value.store(v, std::memory_order_relaxed);
695 new_extension->next.store(new_bucket.head, std::memory_order_relaxed);
696 new_bucket.head = new_extension;
701 data_block.store(new_block, std::memory_order_release);
703 resize_lock.store(0, std::memory_order_release);
706 guarded_block g(old_block);
710 template <
class Key,
class Value,
class... Policies>
711 auto vyukov_hash_map<Key, Value, Policies...>::allocate_block(std::uint32_t bucket_count) -> block* {
712 std::uint32_t extension_bucket_count = bucket_count / bucket_to_extension_ratio;
713 std::size_t size =
sizeof(block) +
714 sizeof(bucket) * bucket_count +
715 sizeof(extension_bucket) * (
static_cast<size_t>(extension_bucket_count) + 1);
717 void* mem = ::operator
new(size, cacheline_size, std::nothrow);
721 std::memset(mem, 0, size);
722 block* b =
new (mem) block;
723 b->mask = bucket_count - 1;
724 b->bucket_count = bucket_count;
725 b->extension_bucket_count = extension_bucket_count;
727 std::size_t extension_bucket_addr =
728 reinterpret_cast<std::size_t
>(b) +
sizeof(block) +
sizeof(bucket) * bucket_count;
729 if (extension_bucket_addr %
sizeof(extension_bucket) != 0) {
730 extension_bucket_addr +=
sizeof(extension_bucket) - (extension_bucket_addr %
sizeof(extension_bucket));
732 b->extension_buckets =
reinterpret_cast<extension_bucket*
>(extension_bucket_addr);
734 for (std::uint32_t i = 0; i != extension_bucket_count; ++i) {
735 auto& bucket = b->extension_buckets[i];
736 extension_item* head =
nullptr;
737 for (std::size_t j = 0; j != extension_item_count; ++j) {
738 bucket.items[j].next.store(head, std::memory_order_relaxed);
739 head = &bucket.items[j];
741 bucket.head.store(head, std::memory_order_relaxed);
747 template <
class Key,
class Value,
class... Policies>
749 const auto h = hash{}(key);
752 result.current_bucket = &lock_bucket(h, result.block, result.current_bucket_state);
753 auto& bucket = *result.current_bucket;
756 auto item_count = result.current_bucket_state.item_count();
757 for (std::uint32_t i = 0; i != item_count; ++i) {
758 if (traits::template compare_key<false>(bucket.key[i], bucket.value[i], key, h, acc)) {
764 auto extension = bucket.head.load(std::memory_order_relaxed);
766 if (traits::template compare_key<false>(extension->key, extension->value, key, h, acc)) {
767 result.extension = extension;
770 extension = extension->next.load(std::memory_order_relaxed);
776 template <
class Key,
class Value,
class... Policies>
779 result.current_bucket = &lock_bucket(0, result.block, result.current_bucket_state);
780 if (result.current_bucket_state.item_count() == 0)
781 result.move_to_next_bucket();
785 template <
class Key,
class Value,
class... Policies>
792 block.acquire(data_block, std::memory_order_acquire);
793 const std::size_t bucket_idx =
hash & block->mask;
794 auto& bucket = block->buckets()[bucket_idx];
795 bucket_state st = bucket.state.load(std::memory_order_relaxed);
800 if (bucket.state.compare_exchange_strong(st, st.locked(), std::memory_order_acquire,
801 std::memory_order_relaxed)) {
809 template <
class Key,
class Value,
class... Policies>
810 auto vyukov_hash_map<Key, Value, Policies...>::allocate_extension_item(block* b, hash_t hash)
813 const std::size_t extension_bucket_count = b->extension_bucket_count;
814 const std::size_t mod_mask = extension_bucket_count - 1;
815 for (std::size_t iter = 0; iter != 2; ++iter) {
816 for (std::size_t idx = 0; idx != extension_bucket_count; ++idx) {
817 const std::size_t extension_bucket_idx = (hash + idx) & mod_mask;
818 extension_bucket& extension_bucket = b->extension_buckets[extension_bucket_idx];
820 if (extension_bucket.head.load(std::memory_order_relaxed) ==
nullptr)
823 extension_bucket.acquire_lock();
824 auto item = extension_bucket.head.load(std::memory_order_relaxed);
826 extension_bucket.head.store(item->next, std::memory_order_relaxed);
827 extension_bucket.release_lock();
830 extension_bucket.release_lock();
836 template <
class Key,
class Value,
class... Policies>
837 void vyukov_hash_map<Key, Value, Policies...>::free_extension_item(extension_item* item) {
838 std::size_t item_addr =
reinterpret_cast<std::size_t
>(item);
839 auto bucket =
reinterpret_cast<extension_bucket*
>(item_addr - item_addr %
sizeof(extension_bucket));
841 bucket->acquire_lock();
842 auto head = bucket->head.load(std::memory_order_relaxed);
844 item->next.store(head, std::memory_order_release);
847 bucket->head.store(item, std::memory_order_relaxed);
848 bucket->release_lock();
851 template <
class Key,
class Value,
class... Policies>
852 vyukov_hash_map<Key, Value, Policies...>::iterator::iterator() :
855 current_bucket_state(),
861 template <
class Key,
class Value,
class... Policies>
862 vyukov_hash_map<Key, Value, Policies...>::iterator::iterator(iterator&& other) :
863 block(std::move(other.block)),
864 current_bucket(std::move(other.current_bucket)),
865 current_bucket_state(std::move(other.current_bucket_state)),
866 index(std::move(other.index)),
867 extension(std::move(other.extension)),
868 prev(std::move(other.prev))
871 other.current_bucket =
nullptr;
872 other.current_bucket_state = bucket_state();
874 other.extension =
nullptr;
875 other.prev =
nullptr;
878 template <
class Key,
class Value,
class... Policies>
879 auto vyukov_hash_map<Key, Value, Policies...>::iterator::operator=(iterator&& other) -> iterator& {
880 block = std::move(other.block);
881 current_bucket = std::move(other.current_bucket);
882 current_bucket_state = std::move(other.current_bucket_state);
883 index = std::move(other.index);
884 extension = std::move(other.extension);
885 prev = std::move(other.prev);
888 other.current_bucket =
nullptr;
889 other.current_bucket_state = bucket_state();
891 other.extension =
nullptr;
892 other.prev =
nullptr;
897 template <
class Key,
class Value,
class... Policies>
898 vyukov_hash_map<Key, Value, Policies...>::iterator::~iterator() {
902 template <
class Key,
class Value,
class... Policies>
905 if (current_bucket) {
906 assert(current_bucket->state.load().is_locked());
908 current_bucket->state.store(current_bucket_state, std::memory_order_release);
912 current_bucket =
nullptr;
913 current_bucket_state = bucket_state();
919 template <
class Key,
class Value,
class... Policies>
921 return block == r.block &&
922 current_bucket == r.current_bucket &&
923 current_bucket_state == r.current_bucket_state &&
925 extension == r.extension;
928 template <
class Key,
class Value,
class... Policies>
930 return !(*
this == r);
933 template <
class Key,
class Value,
class... Policies>
934 auto vyukov_hash_map<Key, Value, Policies...>::iterator::operator++() -> iterator& {
936 prev = &extension->next;
937 extension = extension->next.load(std::memory_order_relaxed);
938 if (extension ==
nullptr)
939 move_to_next_bucket();
944 if (index == current_bucket_state.item_count()) {
945 prev = ¤t_bucket->head;
946 extension = current_bucket->head.load(std::memory_order_relaxed);
947 if (extension ==
nullptr)
948 move_to_next_bucket();
953 template <
class Key,
class Value,
class... Policies>
954 auto vyukov_hash_map<Key, Value, Policies...>::iterator::operator->() -> pointer {
955 static_assert(std::is_reference<reference>::value,
956 "operator-> is only available for instantiations with non-trivial key types. Use explicit "
957 "dereferenciation instead (operator*). The reason is that all other instantiations create "
958 "temporary std::pair<> instances since key and value are stored separately.");
959 return &this->operator*();
962 template <
class Key,
class Value,
class... Policies>
963 auto vyukov_hash_map<Key, Value, Policies...>::iterator::operator*() -> reference {
965 return traits::deref_iterator(extension->key, extension->value);
967 return traits::deref_iterator(current_bucket->key[index], current_bucket->value[index]);
970 template <
class Key,
class Value,
class... Policies>
971 void vyukov_hash_map<Key, Value, Policies...>::iterator::move_to_next_bucket() {
972 assert(current_bucket !=
nullptr);
973 if (current_bucket == block->buckets() + block->bucket_count - 1) {
979 auto old_bucket = current_bucket;
980 auto old_bucket_state = current_bucket_state;
987 auto st = current_bucket->state.load(std::memory_order_relaxed);
992 if (current_bucket->state.compare_exchange_strong(st, st.locked(), std::memory_order_acquire,
993 std::memory_order_relaxed))
995 current_bucket_state = st;
1002 old_bucket->state.store(old_bucket_state, std::memory_order_release);
1005 extension =
nullptr;
1007 if (current_bucket_state.item_count() == 0)
1008 move_to_next_bucket();
1014 #pragma warning(pop)
A ForwardIterator to safely iterate vyukov_hash_map.
Definition: vyukov_hash_map.hpp:370
void reset()
Releases the bucket lock and resets the iterator.
Definition: vyukov_hash_map.hpp:903
Slim wrapper around std::hash with specialization for pointer types.
Definition: hash.hpp:25
A concurrent hash-map that uses fine-grained locking.
Definition: vyukov_hash_map.hpp:143