Kokkos Core Kernels Package  Version of the Day
Kokkos_UnorderedMap.hpp
Go to the documentation of this file.
1 /*
2 //@HEADER
3 // ************************************************************************
4 //
5 // Kokkos v. 2.0
6 // Copyright (2014) Sandia Corporation
7 //
8 // Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
9 // the U.S. Government retains certain rights in this software.
10 //
11 // Redistribution and use in source and binary forms, with or without
12 // modification, are permitted provided that the following conditions are
13 // met:
14 //
15 // 1. Redistributions of source code must retain the above copyright
16 // notice, this list of conditions and the following disclaimer.
17 //
18 // 2. Redistributions in binary form must reproduce the above copyright
19 // notice, this list of conditions and the following disclaimer in the
20 // documentation and/or other materials provided with the distribution.
21 //
22 // 3. Neither the name of the Corporation nor the names of the
23 // contributors may be used to endorse or promote products derived from
24 // this software without specific prior written permission.
25 //
26 // THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
27 // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
30 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
31 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
32 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
33 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
34 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
35 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
36 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 //
38 // Questions? Contact H. Carter Edwards (hcedwar@sandia.gov)
39 //
40 // ************************************************************************
41 //@HEADER
42 */
43 
49 
50 #ifndef KOKKOS_UNORDERED_MAP_HPP
51 #define KOKKOS_UNORDERED_MAP_HPP
52 
53 #include <Kokkos_Core.hpp>
54 #include <Kokkos_Functional.hpp>
55 
56 #include <Kokkos_Bitset.hpp>
57 
58 #include <impl/Kokkos_Traits.hpp>
59 #include <impl/Kokkos_UnorderedMap_impl.hpp>
60 
61 
62 #include <iostream>
63 
64 #include <cstdint>
65 #include <stdexcept>
66 
67 
68 namespace Kokkos {
69 
70 enum { UnorderedMapInvalidIndex = ~0u };
71 
85 
87 {
88 private:
89  enum Status{
90  SUCCESS = 1u << 31
91  , EXISTING = 1u << 30
92  , FREED_EXISTING = 1u << 29
93  , LIST_LENGTH_MASK = ~(SUCCESS | EXISTING | FREED_EXISTING)
94  };
95 
96 public:
98  KOKKOS_FORCEINLINE_FUNCTION
99  bool success() const { return (m_status & SUCCESS); }
100 
102  KOKKOS_FORCEINLINE_FUNCTION
103  bool existing() const { return (m_status & EXISTING); }
104 
106  KOKKOS_FORCEINLINE_FUNCTION
107  bool failed() const { return m_index == UnorderedMapInvalidIndex; }
108 
111  KOKKOS_FORCEINLINE_FUNCTION
112  bool freed_existing() const { return (m_status & FREED_EXISTING); }
113 
116  KOKKOS_FORCEINLINE_FUNCTION
117  uint32_t list_position() const { return (m_status & LIST_LENGTH_MASK); }
118 
120  KOKKOS_FORCEINLINE_FUNCTION
121  uint32_t index() const { return m_index; }
122 
123  KOKKOS_FORCEINLINE_FUNCTION
125  : m_index(UnorderedMapInvalidIndex)
126  , m_status(0)
127  {}
128 
129  KOKKOS_FORCEINLINE_FUNCTION
130  void increment_list_position()
131  {
132  m_status += (list_position() < LIST_LENGTH_MASK) ? 1u : 0u;
133  }
134 
135  KOKKOS_FORCEINLINE_FUNCTION
136  void set_existing(uint32_t i, bool arg_freed_existing)
137  {
138  m_index = i;
139  m_status = EXISTING | (arg_freed_existing ? FREED_EXISTING : 0u) | list_position();
140  }
141 
142  KOKKOS_FORCEINLINE_FUNCTION
143  void set_success(uint32_t i)
144  {
145  m_index = i;
146  m_status = SUCCESS | list_position();
147  }
148 
149 private:
150  uint32_t m_index;
151  uint32_t m_status;
152 };
153 
209 template < typename Key
210  , typename Value
211  , typename Device = Kokkos::DefaultExecutionSpace
212  , typename Hasher = pod_hash<typename Impl::remove_const<Key>::type>
213  , typename EqualTo = pod_equal_to<typename Impl::remove_const<Key>::type>
214  >
216 {
217 private:
218  typedef typename ViewTraits<Key,Device,void,void>::host_mirror_space host_mirror_space ;
219 public:
221 
222 
223  //key_types
224  typedef Key declared_key_type;
225  typedef typename Impl::remove_const<declared_key_type>::type key_type;
226  typedef typename Impl::add_const<key_type>::type const_key_type;
227 
228  //value_types
229  typedef Value declared_value_type;
230  typedef typename Impl::remove_const<declared_value_type>::type value_type;
231  typedef typename Impl::add_const<value_type>::type const_value_type;
232 
233  typedef Device device_type;
234  typedef typename Device::execution_space execution_space;
235  typedef Hasher hasher_type;
236  typedef EqualTo equal_to_type;
237  typedef uint32_t size_type;
238 
239  //map_types
244 
245  static const bool is_set = std::is_same<void,value_type>::value;
246  static const bool has_const_key = std::is_same<const_key_type,declared_key_type>::value;
247  static const bool has_const_value = is_set || std::is_same<const_value_type,declared_value_type>::value;
248 
249  static const bool is_insertable_map = !has_const_key && (is_set || !has_const_value);
250  static const bool is_modifiable_map = has_const_key && !has_const_value;
251  static const bool is_const_map = has_const_key && has_const_value;
252 
253 
255 
257 
258  typedef Impl::UnorderedMapHistogram<const_map_type> histogram_type;
259 
261 
262 private:
263  enum { invalid_index = ~static_cast<size_type>(0) };
264 
265  typedef typename Impl::if_c< is_set, int, declared_value_type>::type impl_value_type;
266 
267  typedef typename Impl::if_c< is_insertable_map
270  >::type key_type_view;
271 
272  typedef typename Impl::if_c< is_insertable_map || is_modifiable_map
275  >::type value_type_view;
276 
277  typedef typename Impl::if_c< is_insertable_map
280  >::type size_type_view;
281 
282  typedef typename Impl::if_c< is_insertable_map
285  >::type bitset_type;
286 
287  enum { modified_idx = 0, erasable_idx = 1, failed_insert_idx = 2 };
288  enum { num_scalars = 3 };
290 
291 public:
293 
294 
295  UnorderedMap()
296  : m_bounded_insert()
297  , m_hasher()
298  , m_equal_to()
299  , m_size()
300  , m_available_indexes()
301  , m_hash_lists()
302  , m_next_index()
303  , m_keys()
304  , m_values()
305  , m_scalars()
306  {}
307 
313  UnorderedMap( size_type capacity_hint, hasher_type hasher = hasher_type(), equal_to_type equal_to = equal_to_type() )
314  : m_bounded_insert(true)
315  , m_hasher(hasher)
316  , m_equal_to(equal_to)
317  , m_size()
318  , m_available_indexes(calculate_capacity(capacity_hint))
319  , m_hash_lists(ViewAllocateWithoutInitializing("UnorderedMap hash list"), Impl::find_hash_size(capacity()))
320  , m_next_index(ViewAllocateWithoutInitializing("UnorderedMap next index"), capacity()+1) // +1 so that the *_at functions can always return a valid reference
321  , m_keys("UnorderedMap keys",capacity()+1)
322  , m_values("UnorderedMap values",(is_set? 1 : capacity()+1))
323  , m_scalars("UnorderedMap scalars")
324  {
325  if (!is_insertable_map) {
326  throw std::runtime_error("Cannot construct a non-insertable (i.e. const key_type) unordered_map");
327  }
328 
329  Kokkos::deep_copy(m_hash_lists, invalid_index);
330  Kokkos::deep_copy(m_next_index, invalid_index);
331  }
332 
333  void reset_failed_insert_flag()
334  {
335  reset_flag(failed_insert_idx);
336  }
337 
338  histogram_type get_histogram()
339  {
340  return histogram_type(*this);
341  }
342 
344  void clear()
345  {
346  m_bounded_insert = true;
347 
348  if (capacity() == 0) return;
349 
350  m_available_indexes.clear();
351 
352  Kokkos::deep_copy(m_hash_lists, invalid_index);
353  Kokkos::deep_copy(m_next_index, invalid_index);
354  {
355  const key_type tmp = key_type();
356  Kokkos::deep_copy(m_keys,tmp);
357  }
358  if (is_set){
359  const impl_value_type tmp = impl_value_type();
360  Kokkos::deep_copy(m_values,tmp);
361  }
362  {
363  Kokkos::deep_copy(m_scalars, 0);
364  }
365  }
366 
377  bool rehash(size_type requested_capacity = 0)
378  {
379  const bool bounded_insert = (capacity() == 0) || (size() == 0u);
380  return rehash(requested_capacity, bounded_insert );
381  }
382 
383  bool rehash(size_type requested_capacity, bool bounded_insert)
384  {
385  if(!is_insertable_map) return false;
386 
387  const size_type curr_size = size();
388  requested_capacity = (requested_capacity < curr_size) ? curr_size : requested_capacity;
389 
390  insertable_map_type tmp(requested_capacity, m_hasher, m_equal_to);
391 
392  if (curr_size) {
393  tmp.m_bounded_insert = false;
394  Impl::UnorderedMapRehash<insertable_map_type> f(tmp,*this);
395  f.apply();
396  }
397  tmp.m_bounded_insert = bounded_insert;
398 
399  *this = tmp;
400 
401  return true;
402  }
403 
411  size_type size() const
412  {
413  if( capacity() == 0u ) return 0u;
414  if (modified()) {
415  m_size = m_available_indexes.count();
416  reset_flag(modified_idx);
417  }
418  return m_size;
419  }
420 
426  bool failed_insert() const
427  {
428  return get_flag(failed_insert_idx);
429  }
430 
431  bool erasable() const
432  {
433  return is_insertable_map ? get_flag(erasable_idx) : false;
434  }
435 
436  bool begin_erase()
437  {
438  bool result = !erasable();
439  if (is_insertable_map && result) {
440  execution_space::fence();
441  set_flag(erasable_idx);
442  execution_space::fence();
443  }
444  return result;
445  }
446 
447  bool end_erase()
448  {
449  bool result = erasable();
450  if (is_insertable_map && result) {
451  execution_space::fence();
452  Impl::UnorderedMapErase<declared_map_type> f(*this);
453  f.apply();
454  execution_space::fence();
455  reset_flag(erasable_idx);
456  }
457  return result;
458  }
459 
464  KOKKOS_FORCEINLINE_FUNCTION
465  size_type capacity() const
466  { return m_available_indexes.size(); }
467 
478  KOKKOS_INLINE_FUNCTION
479  size_type hash_capacity() const
480  { return m_hash_lists.dimension_0(); }
481 
482  //---------------------------------------------------------------------------
483  //---------------------------------------------------------------------------
484 
485 
494  KOKKOS_INLINE_FUNCTION
495  insert_result insert(key_type const& k, impl_value_type const&v = impl_value_type()) const
496  {
497  insert_result result;
498 
499  if ( !is_insertable_map || capacity() == 0u || m_scalars((int)erasable_idx) ) {
500  return result;
501  }
502 
503  if ( !m_scalars((int)modified_idx) ) {
504  m_scalars((int)modified_idx) = true;
505  }
506 
507  int volatile & failed_insert_ref = m_scalars((int)failed_insert_idx) ;
508 
509  const size_type hash_value = m_hasher(k);
510  const size_type hash_list = hash_value % m_hash_lists.dimension_0();
511 
512  size_type * curr_ptr = & m_hash_lists[ hash_list ];
513  size_type new_index = invalid_index ;
514 
515  // Force integer multiply to long
516  size_type index_hint = static_cast<size_type>( (static_cast<double>(hash_list) * capacity()) / m_hash_lists.dimension_0());
517 
518  size_type find_attempts = 0;
519 
520  enum { bounded_find_attempts = 32u };
521  const size_type max_attempts = (m_bounded_insert && (bounded_find_attempts < m_available_indexes.max_hint()) ) ?
522  bounded_find_attempts :
523  m_available_indexes.max_hint();
524 
525  bool not_done = true ;
526 
527 #if defined( __MIC__ )
528  #pragma noprefetch
529 #endif
530  while ( not_done ) {
531 
532  // Continue searching the unordered list for this key,
533  // list will only be appended during insert phase.
534  // Need volatile_load as other threads may be appending.
535  size_type curr = volatile_load(curr_ptr);
536 
537  KOKKOS_NONTEMPORAL_PREFETCH_LOAD(&m_keys[curr != invalid_index ? curr : 0]);
538 #if defined( __MIC__ )
539  #pragma noprefetch
540 #endif
541  while ( curr != invalid_index && ! m_equal_to( volatile_load(&m_keys[curr]), k) ) {
542  result.increment_list_position();
543  index_hint = curr;
544  curr_ptr = &m_next_index[curr];
545  curr = volatile_load(curr_ptr);
546  KOKKOS_NONTEMPORAL_PREFETCH_LOAD(&m_keys[curr != invalid_index ? curr : 0]);
547  }
548 
549  //------------------------------------------------------------
550  // If key already present then return that index.
551  if ( curr != invalid_index ) {
552 
553  const bool free_existing = new_index != invalid_index;
554  if ( free_existing ) {
555  // Previously claimed an unused entry that was not inserted.
556  // Release this unused entry immediately.
557  if (!m_available_indexes.reset(new_index) ) {
558  printf("Unable to free existing\n");
559  }
560 
561  }
562 
563  result.set_existing(curr, free_existing);
564  not_done = false ;
565  }
566  //------------------------------------------------------------
567  // Key is not currently in the map.
568  // If the thread has claimed an entry try to insert now.
569  else {
570 
571  //------------------------------------------------------------
572  // If have not already claimed an unused entry then do so now.
573  if (new_index == invalid_index) {
574 
575  bool found = false;
576  // use the hash_list as the flag for the search direction
577  Kokkos::tie(found, index_hint) = m_available_indexes.find_any_unset_near( index_hint, hash_list );
578 
579  // found and index and this thread set it
580  if ( !found && ++find_attempts >= max_attempts ) {
581  failed_insert_ref = true;
582  not_done = false ;
583  }
584  else if (m_available_indexes.set(index_hint) ) {
585  new_index = index_hint;
586  // Set key and value
587  KOKKOS_NONTEMPORAL_PREFETCH_STORE(&m_keys[new_index]);
588  m_keys[new_index] = k ;
589 
590  if (!is_set) {
591  KOKKOS_NONTEMPORAL_PREFETCH_STORE(&m_values[new_index]);
592  m_values[new_index] = v ;
593  }
594 
595  // Do not proceed until key and value are updated in global memory
596  memory_fence();
597  }
598  }
599  else if (failed_insert_ref) {
600  not_done = false;
601  }
602 
603  // Attempt to append claimed entry into the list.
604  // Another thread may also be trying to append the same list so protect with atomic.
605  if ( new_index != invalid_index &&
606  curr == atomic_compare_exchange(curr_ptr, static_cast<size_type>(invalid_index), new_index) ) {
607  // Succeeded in appending
608  result.set_success(new_index);
609  not_done = false ;
610  }
611  }
612  } // while ( not_done )
613 
614  return result ;
615  }
616 
617  KOKKOS_INLINE_FUNCTION
618  bool erase(key_type const& k) const
619  {
620  bool result = false;
621 
622  if(is_insertable_map && 0u < capacity() && m_scalars((int)erasable_idx)) {
623 
624  if ( ! m_scalars((int)modified_idx) ) {
625  m_scalars((int)modified_idx) = true;
626  }
627 
628  size_type index = find(k);
629  if (valid_at(index)) {
630  m_available_indexes.reset(index);
631  result = true;
632  }
633  }
634 
635  return result;
636  }
637 
645  KOKKOS_INLINE_FUNCTION
646  size_type find( const key_type & k) const
647  {
648  size_type curr = 0u < capacity() ? m_hash_lists( m_hasher(k) % m_hash_lists.dimension_0() ) : invalid_index ;
649 
650  KOKKOS_NONTEMPORAL_PREFETCH_LOAD(&m_keys[curr != invalid_index ? curr : 0]);
651  while (curr != invalid_index && !m_equal_to( m_keys[curr], k) ) {
652  KOKKOS_NONTEMPORAL_PREFETCH_LOAD(&m_keys[curr != invalid_index ? curr : 0]);
653  curr = m_next_index[curr];
654  }
655 
656  return curr;
657  }
658 
663  KOKKOS_INLINE_FUNCTION
664  bool exists( const key_type & k) const
665  {
666  return valid_at(find(k));
667  }
668 
669 
678  KOKKOS_FORCEINLINE_FUNCTION
679  typename Impl::if_c< (is_set || has_const_value), impl_value_type, impl_value_type &>::type
680  value_at(size_type i) const
681  {
682  return m_values[ is_set ? 0 : (i < capacity() ? i : capacity()) ];
683  }
684 
691  KOKKOS_FORCEINLINE_FUNCTION
692  key_type key_at(size_type i) const
693  {
694  return m_keys[ i < capacity() ? i : capacity() ];
695  }
696 
697  KOKKOS_FORCEINLINE_FUNCTION
698  bool valid_at(size_type i) const
699  {
700  return m_available_indexes.test(i);
701  }
702 
703  template <typename SKey, typename SValue>
705  typename Impl::enable_if< Impl::UnorderedMapCanAssign<declared_key_type,declared_value_type,SKey,SValue>::value,int>::type = 0
706  )
707  : m_bounded_insert(src.m_bounded_insert)
708  , m_hasher(src.m_hasher)
709  , m_equal_to(src.m_equal_to)
710  , m_size(src.m_size)
711  , m_available_indexes(src.m_available_indexes)
712  , m_hash_lists(src.m_hash_lists)
713  , m_next_index(src.m_next_index)
714  , m_keys(src.m_keys)
715  , m_values(src.m_values)
716  , m_scalars(src.m_scalars)
717  {}
718 
719 
720  template <typename SKey, typename SValue>
721  typename Impl::enable_if< Impl::UnorderedMapCanAssign<declared_key_type,declared_value_type,SKey,SValue>::value
722  ,declared_map_type & >::type
724  {
725  m_bounded_insert = src.m_bounded_insert;
726  m_hasher = src.m_hasher;
727  m_equal_to = src.m_equal_to;
728  m_size = src.m_size;
729  m_available_indexes = src.m_available_indexes;
730  m_hash_lists = src.m_hash_lists;
731  m_next_index = src.m_next_index;
732  m_keys = src.m_keys;
733  m_values = src.m_values;
734  m_scalars = src.m_scalars;
735  return *this;
736  }
737 
738  template <typename SKey, typename SValue, typename SDevice>
739  typename Impl::enable_if< std::is_same< typename Impl::remove_const<SKey>::type, key_type>::value &&
740  std::is_same< typename Impl::remove_const<SValue>::type, value_type>::value
741  >::type
742  create_copy_view( UnorderedMap<SKey, SValue, SDevice, Hasher,EqualTo> const& src)
743  {
744  if (m_hash_lists.ptr_on_device() != src.m_hash_lists.ptr_on_device()) {
745 
746  insertable_map_type tmp;
747 
748  tmp.m_bounded_insert = src.m_bounded_insert;
749  tmp.m_hasher = src.m_hasher;
750  tmp.m_equal_to = src.m_equal_to;
751  tmp.m_size = src.size();
752  tmp.m_available_indexes = bitset_type( src.capacity() );
753  tmp.m_hash_lists = size_type_view( ViewAllocateWithoutInitializing("UnorderedMap hash list"), src.m_hash_lists.dimension_0() );
754  tmp.m_next_index = size_type_view( ViewAllocateWithoutInitializing("UnorderedMap next index"), src.m_next_index.dimension_0() );
755  tmp.m_keys = key_type_view( ViewAllocateWithoutInitializing("UnorderedMap keys"), src.m_keys.dimension_0() );
756  tmp.m_values = value_type_view( ViewAllocateWithoutInitializing("UnorderedMap values"), src.m_values.dimension_0() );
757  tmp.m_scalars = scalars_view("UnorderedMap scalars");
758 
759  Kokkos::deep_copy(tmp.m_available_indexes, src.m_available_indexes);
760 
761  typedef Kokkos::Impl::DeepCopy< typename device_type::memory_space, typename SDevice::memory_space > raw_deep_copy;
762 
763  raw_deep_copy(tmp.m_hash_lists.ptr_on_device(), src.m_hash_lists.ptr_on_device(), sizeof(size_type)*src.m_hash_lists.dimension_0());
764  raw_deep_copy(tmp.m_next_index.ptr_on_device(), src.m_next_index.ptr_on_device(), sizeof(size_type)*src.m_next_index.dimension_0());
765  raw_deep_copy(tmp.m_keys.ptr_on_device(), src.m_keys.ptr_on_device(), sizeof(key_type)*src.m_keys.dimension_0());
766  if (!is_set) {
767  raw_deep_copy(tmp.m_values.ptr_on_device(), src.m_values.ptr_on_device(), sizeof(impl_value_type)*src.m_values.dimension_0());
768  }
769  raw_deep_copy(tmp.m_scalars.ptr_on_device(), src.m_scalars.ptr_on_device(), sizeof(int)*num_scalars );
770 
771  *this = tmp;
772  }
773  }
774 
776 private: // private member functions
777 
778  bool modified() const
779  {
780  return get_flag(modified_idx);
781  }
782 
783  void set_flag(int flag) const
784  {
785  typedef Kokkos::Impl::DeepCopy< typename device_type::memory_space, Kokkos::HostSpace > raw_deep_copy;
786  const int true_ = true;
787  raw_deep_copy(m_scalars.ptr_on_device() + flag, &true_, sizeof(int));
788  }
789 
790  void reset_flag(int flag) const
791  {
792  typedef Kokkos::Impl::DeepCopy< typename device_type::memory_space, Kokkos::HostSpace > raw_deep_copy;
793  const int false_ = false;
794  raw_deep_copy(m_scalars.ptr_on_device() + flag, &false_, sizeof(int));
795  }
796 
797  bool get_flag(int flag) const
798  {
799  typedef Kokkos::Impl::DeepCopy< Kokkos::HostSpace, typename device_type::memory_space > raw_deep_copy;
800  int result = false;
801  raw_deep_copy(&result, m_scalars.ptr_on_device() + flag, sizeof(int));
802  return result;
803  }
804 
805  static uint32_t calculate_capacity(uint32_t capacity_hint)
806  {
807  // increase by 16% and round to nears multiple of 128
808  return capacity_hint ? ((static_cast<uint32_t>(7ull*capacity_hint/6u) + 127u)/128u)*128u : 128u;
809  }
810 
811 private: // private members
812  bool m_bounded_insert;
813  hasher_type m_hasher;
814  equal_to_type m_equal_to;
815  mutable size_type m_size;
816  bitset_type m_available_indexes;
817  size_type_view m_hash_lists;
818  size_type_view m_next_index;
819  key_type_view m_keys;
820  value_type_view m_values;
821  scalars_view m_scalars;
822 
823  template <typename KKey, typename VValue, typename DDevice, typename HHash, typename EEqualTo>
824  friend class UnorderedMap;
825 
826  template <typename UMap>
827  friend struct Impl::UnorderedMapErase;
828 
829  template <typename UMap>
830  friend struct Impl::UnorderedMapHistogram;
831 
832  template <typename UMap>
833  friend struct Impl::UnorderedMapPrint;
834 };
835 
836 // Specialization of deep_copy for two UnorderedMap objects.
837 template < typename DKey, typename DT, typename DDevice
838  , typename SKey, typename ST, typename SDevice
839  , typename Hasher, typename EqualTo >
842 {
843  dst.create_copy_view(src);
844 }
845 
846 
847 } // namespace Kokkos
848 
849 #endif //KOKKOS_UNORDERED_MAP_HPP
850 
A thread safe view to a bitset.
KOKKOS_FORCEINLINE_FUNCTION key_type key_at(size_type i) const
Get the key with i as its direct index.
KOKKOS_FORCEINLINE_FUNCTION bool success() const
Did the map successful insert the key/value pair.
bool failed_insert() const
The current number of failed insert() calls.
UnorderedMap(size_type capacity_hint, hasher_type hasher=hasher_type(), equal_to_type equal_to=equal_to_type())
Constructor.
KOKKOS_FORCEINLINE_FUNCTION bool existing() const
Was the key already present in the map.
KOKKOS_FORCEINLINE_FUNCTION bool failed() const
Did the map fail to insert the key due to insufficent capacity.
KOKKOS_INLINE_FUNCTION size_type find(const key_type &k) const
Find the given key k, if it exists in the table.
KOKKOS_FORCEINLINE_FUNCTION size_type capacity() const
The maximum number of entries that the table can hold.
KOKKOS_FORCEINLINE_FUNCTION Impl::if_c<(is_set||has_const_value), impl_value_type, impl_value_type & >::type value_at(size_type i) const
Get the value with i as its direct index.
void clear()
Clear all entries in the table.
View to an array of data.
Memory space for main process and CPU execution spaces.
First element of the return value of UnorderedMap::insert().
bool rehash(size_type requested_capacity=0)
Change the capacity of the the map.
size_type size() const
The number of entries in the table.
KOKKOS_INLINE_FUNCTION insert_result insert(key_type const &k, impl_value_type const &v=impl_value_type()) const
void deep_copy(const View< DT, DP... > &dst, typename ViewTraits< DT, DP... >::const_value_type &value, typename std::enable_if< std::is_same< typename ViewTraits< DT, DP... >::specialize, void >::value >::type *=0)
Deep copy a value from Host memory into a view.
KOKKOS_INLINE_FUNCTION size_type hash_capacity() const
The number of hash table "buckets.".
KOKKOS_FORCEINLINE_FUNCTION uint32_t list_position() const
KOKKOS_FORCEINLINE_FUNCTION uint32_t index() const
Index where the key can be found as long as the insert did not fail.
KOKKOS_INLINE_FUNCTION bool exists(const key_type &k) const
Does the key exist in the map.
Thread-safe, performance-portable lookup table.
KOKKOS_FORCEINLINE_FUNCTION pair< T1 &, T2 & > tie(T1 &x, T2 &y)
Return a pair of references to the input arguments.
KOKKOS_FORCEINLINE_FUNCTION bool freed_existing() const