51 #ifndef KOKKOS_UNORDERED_MAP_HPP
52 #define KOKKOS_UNORDERED_MAP_HPP
54 #include <Kokkos_Core.hpp>
55 #include <Kokkos_Functional.hpp>
57 #include <Kokkos_Bitset.hpp>
59 #include <impl/Kokkos_Traits.hpp>
60 #include <impl/Kokkos_UnorderedMap_impl.hpp>
69 enum :
unsigned { UnorderedMapInvalidIndex = ~0u };
87 enum Status : uint32_t {
90 FREED_EXISTING = 1u << 29,
91 LIST_LENGTH_MASK = ~(SUCCESS | EXISTING | FREED_EXISTING)
96 KOKKOS_FORCEINLINE_FUNCTION
97 bool success()
const {
return (m_status & SUCCESS); }
100 KOKKOS_FORCEINLINE_FUNCTION
101 bool existing()
const {
return (m_status & EXISTING); }
104 KOKKOS_FORCEINLINE_FUNCTION
105 bool failed()
const {
return m_index == UnorderedMapInvalidIndex; }
109 KOKKOS_FORCEINLINE_FUNCTION
114 KOKKOS_FORCEINLINE_FUNCTION
118 KOKKOS_FORCEINLINE_FUNCTION
119 uint32_t
index()
const {
return m_index; }
121 KOKKOS_FORCEINLINE_FUNCTION
124 KOKKOS_FORCEINLINE_FUNCTION
125 void increment_list_position() {
129 KOKKOS_FORCEINLINE_FUNCTION
130 void set_existing(uint32_t i,
bool arg_freed_existing) {
133 EXISTING | (arg_freed_existing ? FREED_EXISTING : 0u) |
list_position();
136 KOKKOS_FORCEINLINE_FUNCTION
137 void set_success(uint32_t i) {
202 template <
typename Key,
typename Value,
203 typename Device = Kokkos::DefaultExecutionSpace,
204 typename Hasher = pod_hash<typename std::remove_const<Key>::type>,
206 pod_equal_to<typename std::remove_const<Key>::type> >
209 using host_mirror_space =
210 typename ViewTraits<Key, Device, void, void>::host_mirror_space;
217 using declared_key_type = Key;
218 using key_type =
typename std::remove_const<declared_key_type>::type;
219 using const_key_type =
typename std::add_const<key_type>::type;
222 using declared_value_type = Value;
223 using value_type =
typename std::remove_const<declared_value_type>::type;
224 using const_value_type =
typename std::add_const<value_type>::type;
226 using device_type = Device;
227 using execution_space =
typename Device::execution_space;
228 using hasher_type = Hasher;
229 using equal_to_type = EqualTo;
230 using size_type = uint32_t;
234 UnorderedMap<declared_key_type, declared_value_type, device_type,
235 hasher_type, equal_to_type>;
237 hasher_type, equal_to_type>;
239 UnorderedMap<const_key_type, value_type, device_type, hasher_type,
242 device_type, hasher_type, equal_to_type>;
244 static const bool is_set = std::is_same<void, value_type>::value;
245 static const bool has_const_key =
246 std::is_same<const_key_type, declared_key_type>::value;
247 static const bool has_const_value =
248 is_set || std::is_same<const_value_type, declared_value_type>::value;
250 static const bool is_insertable_map =
251 !has_const_key && (is_set || !has_const_value);
252 static const bool is_modifiable_map = has_const_key && !has_const_value;
253 static const bool is_const_map = has_const_key && has_const_value;
260 using histogram_type = Impl::UnorderedMapHistogram<const_map_type>;
265 enum : size_type { invalid_index = ~static_cast<size_type>(0) };
267 using impl_value_type = std::conditional_t<is_set, int, declared_value_type>;
269 using key_type_view = std::conditional_t<
273 using value_type_view = std::conditional_t<
274 is_insertable_map || is_modifiable_map,
278 using size_type_view = std::conditional_t<
283 std::conditional_t<is_insertable_map, Bitset<execution_space>,
286 enum { modified_idx = 0, erasable_idx = 1, failed_insert_idx = 2 };
287 enum { num_scalars = 3 };
300 UnorderedMap(size_type capacity_hint = 0, hasher_type hasher = hasher_type(),
301 equal_to_type equal_to = equal_to_type())
302 : m_bounded_insert(true),
304 m_equal_to(equal_to),
306 m_available_indexes(calculate_capacity(capacity_hint)),
307 m_hash_lists(view_alloc(WithoutInitializing,
"UnorderedMap hash list"),
309 m_next_index(view_alloc(WithoutInitializing,
"UnorderedMap next index"),
313 m_keys(
"UnorderedMap keys",
capacity() + 1),
314 m_values(
"UnorderedMap values", (is_set ? 1 :
capacity() + 1)),
315 m_scalars(
"UnorderedMap scalars") {
316 if (!is_insertable_map) {
317 throw std::runtime_error(
318 "Cannot construct a non-insertable (i.e. const key_type) "
322 Kokkos::deep_copy(m_hash_lists, invalid_index);
323 Kokkos::deep_copy(m_next_index, invalid_index);
326 void reset_failed_insert_flag() { reset_flag(failed_insert_idx); }
328 histogram_type get_histogram() {
return histogram_type(*
this); }
332 m_bounded_insert =
true;
336 m_available_indexes.clear();
338 Kokkos::deep_copy(m_hash_lists, invalid_index);
339 Kokkos::deep_copy(m_next_index, invalid_index);
341 const key_type tmp = key_type();
342 Kokkos::deep_copy(m_keys, tmp);
345 const impl_value_type tmp = impl_value_type();
346 Kokkos::deep_copy(m_values, tmp);
348 { Kokkos::deep_copy(m_scalars, 0); }
351 KOKKOS_INLINE_FUNCTION constexpr
bool is_allocated()
const {
352 return (m_keys.is_allocated() && m_values.is_allocated() &&
353 m_scalars.is_allocated());
366 bool rehash(size_type requested_capacity = 0) {
367 const bool bounded_insert = (
capacity() == 0) || (
size() == 0u);
368 return rehash(requested_capacity, bounded_insert);
371 bool rehash(size_type requested_capacity,
bool bounded_insert) {
372 if (!is_insertable_map)
return false;
374 const size_type curr_size =
size();
376 (requested_capacity < curr_size) ? curr_size : requested_capacity;
378 insertable_map_type tmp(requested_capacity, m_hasher, m_equal_to);
381 tmp.m_bounded_insert =
false;
382 Impl::UnorderedMapRehash<insertable_map_type> f(tmp, *
this);
385 tmp.m_bounded_insert = bounded_insert;
402 m_size = m_available_indexes.count();
403 reset_flag(modified_idx);
415 bool erasable()
const {
416 return is_insertable_map ? get_flag(erasable_idx) : false;
420 bool result = !erasable();
421 if (is_insertable_map && result) {
422 execution_space().fence();
423 set_flag(erasable_idx);
424 execution_space().fence();
430 bool result = erasable();
431 if (is_insertable_map && result) {
432 execution_space().fence();
433 Impl::UnorderedMapErase<declared_map_type> f(*
this);
435 execution_space().fence();
436 reset_flag(erasable_idx);
445 KOKKOS_FORCEINLINE_FUNCTION
446 size_type
capacity()
const {
return m_available_indexes.size(); }
458 KOKKOS_INLINE_FUNCTION
472 KOKKOS_INLINE_FUNCTION
474 impl_value_type
const &v = impl_value_type())
const {
477 if (!is_insertable_map ||
capacity() == 0u ||
478 m_scalars((
int)erasable_idx)) {
482 if (!m_scalars((
int)modified_idx)) {
483 m_scalars((
int)modified_idx) =
true;
486 int volatile &failed_insert_ref = m_scalars((
int)failed_insert_idx);
488 const size_type hash_value = m_hasher(k);
489 const size_type hash_list = hash_value % m_hash_lists.extent(0);
491 size_type *curr_ptr = &m_hash_lists[hash_list];
492 size_type new_index = invalid_index;
495 size_type index_hint =
static_cast<size_type
>(
496 (
static_cast<double>(hash_list) *
capacity()) / m_hash_lists.extent(0));
498 size_type find_attempts = 0;
500 enum :
unsigned { bounded_find_attempts = 32u };
501 const size_type max_attempts =
503 (bounded_find_attempts < m_available_indexes.max_hint()))
504 ? bounded_find_attempts
505 : m_available_indexes.max_hint();
507 bool not_done =
true;
516 size_type curr = volatile_load(curr_ptr);
518 KOKKOS_NONTEMPORAL_PREFETCH_LOAD(
519 &m_keys[curr != invalid_index ? curr : 0]);
523 while (curr != invalid_index &&
524 !m_equal_to(volatile_load(&m_keys[curr]), k)) {
525 result.increment_list_position();
527 curr_ptr = &m_next_index[curr];
528 curr = volatile_load(curr_ptr);
529 KOKKOS_NONTEMPORAL_PREFETCH_LOAD(
530 &m_keys[curr != invalid_index ? curr : 0]);
535 if (curr != invalid_index) {
536 const bool free_existing = new_index != invalid_index;
540 if (!m_available_indexes.reset(new_index)) {
541 KOKKOS_IMPL_DO_NOT_USE_PRINTF(
"Unable to free existing\n");
545 result.set_existing(curr, free_existing);
554 if (new_index == invalid_index) {
558 m_available_indexes.find_any_unset_near(index_hint, hash_list);
561 if (!found && ++find_attempts >= max_attempts) {
562 failed_insert_ref =
true;
564 }
else if (m_available_indexes.set(index_hint)) {
565 new_index = index_hint;
567 KOKKOS_NONTEMPORAL_PREFETCH_STORE(&m_keys[new_index]);
568 m_keys[new_index] = k;
571 KOKKOS_NONTEMPORAL_PREFETCH_STORE(&m_values[new_index]);
572 m_values[new_index] = v;
578 }
else if (failed_insert_ref) {
585 if (new_index != invalid_index &&
586 curr == atomic_compare_exchange(
587 curr_ptr,
static_cast<size_type
>(invalid_index),
590 result.set_success(new_index);
599 KOKKOS_INLINE_FUNCTION
600 bool erase(key_type
const &k)
const {
603 if (is_insertable_map && 0u <
capacity() && m_scalars((
int)erasable_idx)) {
604 if (!m_scalars((
int)modified_idx)) {
605 m_scalars((
int)modified_idx) =
true;
608 size_type index =
find(k);
609 if (valid_at(index)) {
610 m_available_indexes.reset(index);
625 KOKKOS_INLINE_FUNCTION
626 size_type
find(
const key_type &k)
const {
628 ? m_hash_lists(m_hasher(k) % m_hash_lists.extent(0))
631 KOKKOS_NONTEMPORAL_PREFETCH_LOAD(&m_keys[curr != invalid_index ? curr : 0]);
632 while (curr != invalid_index && !m_equal_to(m_keys[curr], k)) {
633 KOKKOS_NONTEMPORAL_PREFETCH_LOAD(
634 &m_keys[curr != invalid_index ? curr : 0]);
635 curr = m_next_index[curr];
645 KOKKOS_INLINE_FUNCTION
646 bool exists(
const key_type &k)
const {
return valid_at(
find(k)); }
656 KOKKOS_FORCEINLINE_FUNCTION
657 std::conditional_t<(is_set || has_const_value), impl_value_type,
669 KOKKOS_FORCEINLINE_FUNCTION
674 KOKKOS_FORCEINLINE_FUNCTION
675 bool valid_at(size_type i)
const {
return m_available_indexes.test(i); }
677 template <
typename SKey,
typename SValue>
679 UnorderedMap<SKey, SValue, Device, Hasher, EqualTo>
const &src,
680 typename std::enable_if<
681 Impl::UnorderedMapCanAssign<declared_key_type, declared_value_type,
682 SKey, SValue>::value,
684 : m_bounded_insert(src.m_bounded_insert),
685 m_hasher(src.m_hasher),
686 m_equal_to(src.m_equal_to),
688 m_available_indexes(src.m_available_indexes),
689 m_hash_lists(src.m_hash_lists),
690 m_next_index(src.m_next_index),
692 m_values(src.m_values),
693 m_scalars(src.m_scalars) {}
695 template <
typename SKey,
typename SValue>
696 typename std::enable_if<
697 Impl::UnorderedMapCanAssign<declared_key_type, declared_value_type, SKey,
699 declared_map_type &>::type
700 operator=(UnorderedMap<SKey, SValue, Device, Hasher, EqualTo>
const &src) {
701 m_bounded_insert = src.m_bounded_insert;
702 m_hasher = src.m_hasher;
703 m_equal_to = src.m_equal_to;
705 m_available_indexes = src.m_available_indexes;
706 m_hash_lists = src.m_hash_lists;
707 m_next_index = src.m_next_index;
709 m_values = src.m_values;
710 m_scalars = src.m_scalars;
714 template <
typename SKey,
typename SValue,
typename SDevice>
715 typename std::enable_if<
716 std::is_same<typename std::remove_const<SKey>::type, key_type>::value &&
717 std::is_same<typename std::remove_const<SValue>::type,
718 value_type>::value>::type
720 UnorderedMap<SKey, SValue, SDevice, Hasher, EqualTo>
const &src) {
721 if (m_hash_lists.data() != src.m_hash_lists.data()) {
722 insertable_map_type tmp;
724 tmp.m_bounded_insert = src.m_bounded_insert;
725 tmp.m_hasher = src.m_hasher;
726 tmp.m_equal_to = src.m_equal_to;
727 tmp.m_size = src.size();
728 tmp.m_available_indexes = bitset_type(src.capacity());
729 tmp.m_hash_lists = size_type_view(
730 view_alloc(WithoutInitializing,
"UnorderedMap hash list"),
731 src.m_hash_lists.extent(0));
732 tmp.m_next_index = size_type_view(
733 view_alloc(WithoutInitializing,
"UnorderedMap next index"),
734 src.m_next_index.extent(0));
736 key_type_view(view_alloc(WithoutInitializing,
"UnorderedMap keys"),
737 src.m_keys.extent(0));
738 tmp.m_values = value_type_view(
739 view_alloc(WithoutInitializing,
"UnorderedMap values"),
740 src.m_values.extent(0));
741 tmp.m_scalars = scalars_view(
"UnorderedMap scalars");
743 Kokkos::deep_copy(tmp.m_available_indexes, src.m_available_indexes);
745 using raw_deep_copy =
746 Kokkos::Impl::DeepCopy<
typename device_type::memory_space,
747 typename SDevice::memory_space>;
749 raw_deep_copy(tmp.m_hash_lists.data(), src.m_hash_lists.data(),
750 sizeof(size_type) * src.m_hash_lists.extent(0));
751 raw_deep_copy(tmp.m_next_index.data(), src.m_next_index.data(),
752 sizeof(size_type) * src.m_next_index.extent(0));
753 raw_deep_copy(tmp.m_keys.data(), src.m_keys.data(),
754 sizeof(key_type) * src.m_keys.extent(0));
756 raw_deep_copy(tmp.m_values.data(), src.m_values.data(),
757 sizeof(impl_value_type) * src.m_values.extent(0));
759 raw_deep_copy(tmp.m_scalars.data(), src.m_scalars.data(),
760 sizeof(
int) * num_scalars);
768 bool modified()
const {
return get_flag(modified_idx); }
770 void set_flag(
int flag)
const {
771 using raw_deep_copy =
772 Kokkos::Impl::DeepCopy<
typename device_type::memory_space,
774 const int true_ =
true;
775 raw_deep_copy(m_scalars.data() + flag, &true_,
sizeof(
int));
778 void reset_flag(
int flag)
const {
779 using raw_deep_copy =
780 Kokkos::Impl::DeepCopy<
typename device_type::memory_space,
782 const int false_ =
false;
783 raw_deep_copy(m_scalars.data() + flag, &false_,
sizeof(
int));
786 bool get_flag(
int flag)
const {
787 using raw_deep_copy =
789 typename device_type::memory_space>;
791 raw_deep_copy(&result, m_scalars.data() + flag,
sizeof(
int));
795 static uint32_t calculate_capacity(uint32_t capacity_hint) {
798 ? ((
static_cast<uint32_t
>(7ull * capacity_hint / 6u) + 127u) /
805 bool m_bounded_insert;
806 hasher_type m_hasher;
807 equal_to_type m_equal_to;
808 mutable size_type m_size;
809 bitset_type m_available_indexes;
810 size_type_view m_hash_lists;
811 size_type_view m_next_index;
812 key_type_view m_keys;
813 value_type_view m_values;
814 scalars_view m_scalars;
816 template <
typename KKey,
typename VValue,
typename DDevice,
typename HHash,
818 friend class UnorderedMap;
820 template <
typename UMap>
821 friend struct Impl::UnorderedMapErase;
823 template <
typename UMap>
824 friend struct Impl::UnorderedMapHistogram;
826 template <
typename UMap>
827 friend struct Impl::UnorderedMapPrint;
831 template <
typename DKey,
typename DT,
typename DDevice,
typename SKey,
832 typename ST,
typename SDevice,
typename Hasher,
typename EqualTo>
833 inline void deep_copy(
834 UnorderedMap<DKey, DT, DDevice, Hasher, EqualTo> &dst,
835 const UnorderedMap<SKey, ST, SDevice, Hasher, EqualTo> &src) {
836 dst.create_copy_view(src);
KOKKOS_FORCEINLINE_FUNCTION pair< T1 &, T2 & > tie(T1 &x, T2 &y)
Return a pair of references to the input arguments.
Memory management for host memory.
First element of the return value of UnorderedMap::insert().
KOKKOS_FORCEINLINE_FUNCTION bool failed() const
Did the map fail to insert the key due to insufficient capacity.
KOKKOS_FORCEINLINE_FUNCTION bool success() const
Did the map successful insert the key/value pair.
KOKKOS_FORCEINLINE_FUNCTION bool freed_existing() const
KOKKOS_FORCEINLINE_FUNCTION bool existing() const
Was the key already present in the map.
KOKKOS_FORCEINLINE_FUNCTION uint32_t list_position() const
KOKKOS_FORCEINLINE_FUNCTION uint32_t index() const
Index where the key can be found as long as the insert did not fail.
Thread-safe, performance-portable lookup table.
bool failed_insert() const
The current number of failed insert() calls.
KOKKOS_FORCEINLINE_FUNCTION key_type key_at(size_type i) const
Get the key with i as its direct index.
KOKKOS_INLINE_FUNCTION size_type hash_capacity() const
The number of hash table "buckets.".
KOKKOS_INLINE_FUNCTION bool exists(const key_type &k) const
Does the key exist in the map.
KOKKOS_INLINE_FUNCTION size_type find(const key_type &k) const
Find the given key k, if it exists in the table.
UnorderedMap(size_type capacity_hint=0, hasher_type hasher=hasher_type(), equal_to_type equal_to=equal_to_type())
Constructor.
KOKKOS_FORCEINLINE_FUNCTION std::conditional_t<(is_set||has_const_value), impl_value_type, impl_value_type & > value_at(size_type i) const
Get the value with i as its direct index.
void clear()
Clear all entries in the table.
KOKKOS_FORCEINLINE_FUNCTION size_type capacity() const
The maximum number of entries that the table can hold.
size_type size() const
The number of entries in the table.
bool rehash(size_type requested_capacity=0)
Change the capacity of the the map.
KOKKOS_INLINE_FUNCTION insert_result insert(key_type const &k, impl_value_type const &v=impl_value_type()) const
View to an array of data.