50 #ifndef KOKKOS_UNORDERED_MAP_HPP 51 #define KOKKOS_UNORDERED_MAP_HPP 53 #include <Kokkos_Core.hpp> 54 #include <Kokkos_Functional.hpp> 56 #include <Kokkos_Bitset.hpp> 58 #include <impl/Kokkos_Traits.hpp> 59 #include <impl/Kokkos_UnorderedMap_impl.hpp> 70 enum { UnorderedMapInvalidIndex = ~0u };
92 , FREED_EXISTING = 1u << 29
93 , LIST_LENGTH_MASK = ~(SUCCESS | EXISTING | FREED_EXISTING)
98 KOKKOS_FORCEINLINE_FUNCTION
99 bool success()
const {
return (m_status & SUCCESS); }
102 KOKKOS_FORCEINLINE_FUNCTION
103 bool existing()
const {
return (m_status & EXISTING); }
106 KOKKOS_FORCEINLINE_FUNCTION
107 bool failed()
const {
return m_index == UnorderedMapInvalidIndex; }
111 KOKKOS_FORCEINLINE_FUNCTION
116 KOKKOS_FORCEINLINE_FUNCTION
120 KOKKOS_FORCEINLINE_FUNCTION
121 uint32_t
index()
const {
return m_index; }
123 KOKKOS_FORCEINLINE_FUNCTION
125 : m_index(UnorderedMapInvalidIndex)
129 KOKKOS_FORCEINLINE_FUNCTION
130 void increment_list_position()
135 KOKKOS_FORCEINLINE_FUNCTION
136 void set_existing(uint32_t i,
bool arg_freed_existing)
139 m_status = EXISTING | (arg_freed_existing ? FREED_EXISTING : 0u) |
list_position();
142 KOKKOS_FORCEINLINE_FUNCTION
143 void set_success(uint32_t i)
209 template <
typename Key
211 ,
typename Device = Kokkos::DefaultExecutionSpace
212 ,
typename Hasher = pod_hash<typename Impl::remove_const<Key>::type>
213 ,
typename EqualTo = pod_equal_to<typename Impl::remove_const<Key>::type>
218 typedef typename ViewTraits<Key,Device,void,void>::host_mirror_space host_mirror_space ;
224 typedef Key declared_key_type;
225 typedef typename Impl::remove_const<declared_key_type>::type key_type;
226 typedef typename Impl::add_const<key_type>::type const_key_type;
229 typedef Value declared_value_type;
230 typedef typename Impl::remove_const<declared_value_type>::type value_type;
231 typedef typename Impl::add_const<value_type>::type const_value_type;
234 typedef Hasher hasher_type;
235 typedef EqualTo equal_to_type;
236 typedef uint32_t size_type;
244 static const bool is_set = std::is_same<void,value_type>::value;
245 static const bool has_const_key = std::is_same<const_key_type,declared_key_type>::value;
246 static const bool has_const_value = is_set || std::is_same<const_value_type,declared_value_type>::value;
248 static const bool is_insertable_map = !has_const_key && (is_set || !has_const_value);
249 static const bool is_modifiable_map = has_const_key && !has_const_value;
250 static const bool is_const_map = has_const_key && has_const_value;
257 typedef Impl::UnorderedMapHistogram<const_map_type> histogram_type;
262 enum { invalid_index = ~static_cast<size_type>(0) };
264 typedef typename Impl::if_c< is_set, int, declared_value_type>::type impl_value_type;
266 typedef typename Impl::if_c< is_insertable_map
269 >::type key_type_view;
271 typedef typename Impl::if_c< is_insertable_map || is_modifiable_map
274 >::type value_type_view;
276 typedef typename Impl::if_c< is_insertable_map
279 >::type size_type_view;
281 typedef typename Impl::if_c< is_insertable_map
286 enum { modified_idx = 0, erasable_idx = 1, failed_insert_idx = 2 };
287 enum { num_scalars = 3 };
299 , m_available_indexes()
312 UnorderedMap( size_type capacity_hint, hasher_type hasher = hasher_type(), equal_to_type equal_to = equal_to_type() )
313 : m_bounded_insert(true)
315 , m_equal_to(equal_to)
317 , m_available_indexes(calculate_capacity(capacity_hint))
318 , m_hash_lists(ViewAllocateWithoutInitializing(
"UnorderedMap hash list"), Impl::find_hash_size(
capacity()))
319 , m_next_index(ViewAllocateWithoutInitializing(
"UnorderedMap next index"),
capacity()+1)
320 , m_keys(
"UnorderedMap keys",
capacity()+1)
321 , m_values(
"UnorderedMap values",(is_set? 1 :
capacity()+1))
322 , m_scalars(
"UnorderedMap scalars")
324 if (!is_insertable_map) {
325 throw std::runtime_error(
"Cannot construct a non-insertable (i.e. const key_type) unordered_map");
332 void reset_failed_insert_flag()
334 reset_flag(failed_insert_idx);
337 histogram_type get_histogram()
339 return histogram_type(*
this);
345 m_bounded_insert =
true;
349 m_available_indexes.clear();
354 const key_type tmp = key_type();
358 const impl_value_type tmp = impl_value_type();
376 bool rehash(size_type requested_capacity = 0)
378 const bool bounded_insert = (
capacity() == 0) || (
size() == 0u);
379 return rehash(requested_capacity, bounded_insert );
382 bool rehash(size_type requested_capacity,
bool bounded_insert)
384 if(!is_insertable_map)
return false;
386 const size_type curr_size =
size();
387 requested_capacity = (requested_capacity < curr_size) ? curr_size : requested_capacity;
389 insertable_map_type tmp(requested_capacity, m_hasher, m_equal_to);
392 tmp.m_bounded_insert =
false;
393 Impl::UnorderedMapRehash<insertable_map_type> f(tmp,*
this);
396 tmp.m_bounded_insert = bounded_insert;
414 m_size = m_available_indexes.count();
415 reset_flag(modified_idx);
427 return get_flag(failed_insert_idx);
430 bool erasable()
const 432 return is_insertable_map ? get_flag(erasable_idx) : false;
437 bool result = !erasable();
438 if (is_insertable_map && result) {
439 execution_space::fence();
440 set_flag(erasable_idx);
441 execution_space::fence();
448 bool result = erasable();
449 if (is_insertable_map && result) {
450 execution_space::fence();
451 Impl::UnorderedMapErase<declared_map_type> f(*
this);
453 execution_space::fence();
454 reset_flag(erasable_idx);
463 KOKKOS_FORCEINLINE_FUNCTION
465 {
return m_available_indexes.size(); }
477 KOKKOS_INLINE_FUNCTION
479 {
return m_hash_lists.dimension_0(); }
493 KOKKOS_INLINE_FUNCTION
498 if ( !is_insertable_map ||
capacity() == 0u || m_scalars((
int)erasable_idx) ) {
502 if ( !m_scalars((
int)modified_idx) ) {
503 m_scalars((
int)modified_idx) =
true;
506 int volatile & failed_insert_ref = m_scalars((
int)failed_insert_idx) ;
508 const size_type hash_value = m_hasher(k);
509 const size_type hash_list = hash_value % m_hash_lists.dimension_0();
511 size_type * curr_ptr = & m_hash_lists[ hash_list ];
512 size_type new_index = invalid_index ;
515 size_type index_hint =
static_cast<size_type
>( (
static_cast<double>(hash_list) *
capacity()) / m_hash_lists.dimension_0());
517 size_type find_attempts = 0;
519 enum { bounded_find_attempts = 32u };
520 const size_type max_attempts = (m_bounded_insert && (bounded_find_attempts < m_available_indexes.max_hint()) ) ?
521 bounded_find_attempts :
522 m_available_indexes.max_hint();
524 bool not_done = true ;
526 #if defined( __MIC__ ) 534 size_type curr = volatile_load(curr_ptr);
536 KOKKOS_NONTEMPORAL_PREFETCH_LOAD(&m_keys[curr != invalid_index ? curr : 0]);
537 #if defined( __MIC__ ) 540 while ( curr != invalid_index && ! m_equal_to( volatile_load(&m_keys[curr]), k) ) {
541 result.increment_list_position();
543 curr_ptr = &m_next_index[curr];
544 curr = volatile_load(curr_ptr);
545 KOKKOS_NONTEMPORAL_PREFETCH_LOAD(&m_keys[curr != invalid_index ? curr : 0]);
550 if ( curr != invalid_index ) {
552 const bool free_existing = new_index != invalid_index;
553 if ( free_existing ) {
556 if (!m_available_indexes.reset(new_index) ) {
557 printf(
"Unable to free existing\n");
562 result.set_existing(curr, free_existing);
572 if (new_index == invalid_index) {
576 Kokkos::tie(found, index_hint) = m_available_indexes.find_any_unset_near( index_hint, hash_list );
579 if ( !found && ++find_attempts >= max_attempts ) {
580 failed_insert_ref =
true;
583 else if (m_available_indexes.set(index_hint) ) {
584 new_index = index_hint;
586 KOKKOS_NONTEMPORAL_PREFETCH_STORE(&m_keys[new_index]);
587 m_keys[new_index] = k ;
590 KOKKOS_NONTEMPORAL_PREFETCH_STORE(&m_values[new_index]);
591 m_values[new_index] = v ;
598 else if (failed_insert_ref) {
604 if ( new_index != invalid_index &&
605 curr == atomic_compare_exchange(curr_ptr, static_cast<size_type>(invalid_index), new_index) ) {
607 result.set_success(new_index);
616 KOKKOS_INLINE_FUNCTION
617 bool erase(key_type
const& k)
const 621 if(is_insertable_map && 0u <
capacity() && m_scalars((
int)erasable_idx)) {
623 if ( ! m_scalars((
int)modified_idx) ) {
624 m_scalars((
int)modified_idx) =
true;
627 size_type index =
find(k);
628 if (valid_at(index)) {
629 m_available_indexes.reset(index);
644 KOKKOS_INLINE_FUNCTION
645 size_type
find(
const key_type & k)
const 647 size_type curr = 0u <
capacity() ? m_hash_lists( m_hasher(k) % m_hash_lists.dimension_0() ) : invalid_index ;
649 KOKKOS_NONTEMPORAL_PREFETCH_LOAD(&m_keys[curr != invalid_index ? curr : 0]);
650 while (curr != invalid_index && !m_equal_to( m_keys[curr], k) ) {
651 KOKKOS_NONTEMPORAL_PREFETCH_LOAD(&m_keys[curr != invalid_index ? curr : 0]);
652 curr = m_next_index[curr];
662 KOKKOS_INLINE_FUNCTION
665 return valid_at(
find(k));
677 KOKKOS_FORCEINLINE_FUNCTION
678 typename Impl::if_c< (is_set || has_const_value), impl_value_type, impl_value_type &>::type
690 KOKKOS_FORCEINLINE_FUNCTION
696 KOKKOS_FORCEINLINE_FUNCTION
697 bool valid_at(size_type i)
const 699 return m_available_indexes.test(i);
702 template <
typename SKey,
typename SValue>
703 UnorderedMap( UnorderedMap<SKey,SValue,Device,Hasher,EqualTo>
const& src,
704 typename Impl::enable_if< Impl::UnorderedMapCanAssign<declared_key_type,declared_value_type,SKey,SValue>::value,
int>::type = 0
706 : m_bounded_insert(src.m_bounded_insert)
707 , m_hasher(src.m_hasher)
708 , m_equal_to(src.m_equal_to)
710 , m_available_indexes(src.m_available_indexes)
711 , m_hash_lists(src.m_hash_lists)
712 , m_next_index(src.m_next_index)
714 , m_values(src.m_values)
715 , m_scalars(src.m_scalars)
719 template <
typename SKey,
typename SValue>
720 typename Impl::enable_if< Impl::UnorderedMapCanAssign<declared_key_type,declared_value_type,SKey,SValue>::value
721 ,declared_map_type & >::type
722 operator=( UnorderedMap<SKey,SValue,Device,Hasher,EqualTo>
const& src)
724 m_bounded_insert = src.m_bounded_insert;
725 m_hasher = src.m_hasher;
726 m_equal_to = src.m_equal_to;
728 m_available_indexes = src.m_available_indexes;
729 m_hash_lists = src.m_hash_lists;
730 m_next_index = src.m_next_index;
732 m_values = src.m_values;
733 m_scalars = src.m_scalars;
737 template <
typename SKey,
typename SValue,
typename SDevice>
738 typename Impl::enable_if< std::is_same< typename Impl::remove_const<SKey>::type, key_type>::value &&
739 std::is_same< typename Impl::remove_const<SValue>::type, value_type>::value
741 create_copy_view( UnorderedMap<SKey, SValue, SDevice, Hasher,EqualTo>
const& src)
743 if (m_hash_lists.ptr_on_device() != src.m_hash_lists.ptr_on_device()) {
745 insertable_map_type tmp;
747 tmp.m_bounded_insert = src.m_bounded_insert;
748 tmp.m_hasher = src.m_hasher;
749 tmp.m_equal_to = src.m_equal_to;
750 tmp.m_size = src.size();
751 tmp.m_available_indexes = bitset_type( src.capacity() );
752 tmp.m_hash_lists = size_type_view( ViewAllocateWithoutInitializing(
"UnorderedMap hash list"), src.m_hash_lists.dimension_0() );
753 tmp.m_next_index = size_type_view( ViewAllocateWithoutInitializing(
"UnorderedMap next index"), src.m_next_index.dimension_0() );
754 tmp.m_keys = key_type_view( ViewAllocateWithoutInitializing(
"UnorderedMap keys"), src.m_keys.dimension_0() );
755 tmp.m_values = value_type_view( ViewAllocateWithoutInitializing(
"UnorderedMap values"), src.m_values.dimension_0() );
756 tmp.m_scalars = scalars_view(
"UnorderedMap scalars");
760 typedef Kokkos::Impl::DeepCopy< typename execution_space::memory_space, typename SDevice::memory_space > raw_deep_copy;
762 raw_deep_copy(tmp.m_hash_lists.ptr_on_device(), src.m_hash_lists.ptr_on_device(),
sizeof(size_type)*src.m_hash_lists.dimension_0());
763 raw_deep_copy(tmp.m_next_index.ptr_on_device(), src.m_next_index.ptr_on_device(),
sizeof(size_type)*src.m_next_index.dimension_0());
764 raw_deep_copy(tmp.m_keys.ptr_on_device(), src.m_keys.ptr_on_device(),
sizeof(key_type)*src.m_keys.dimension_0());
766 raw_deep_copy(tmp.m_values.ptr_on_device(), src.m_values.ptr_on_device(),
sizeof(impl_value_type)*src.m_values.dimension_0());
768 raw_deep_copy(tmp.m_scalars.ptr_on_device(), src.m_scalars.ptr_on_device(),
sizeof(int)*num_scalars );
777 bool modified()
const 779 return get_flag(modified_idx);
782 void set_flag(
int flag)
const 784 typedef Kokkos::Impl::DeepCopy< typename execution_space::memory_space, Kokkos::HostSpace > raw_deep_copy;
785 const int true_ =
true;
786 raw_deep_copy(m_scalars.ptr_on_device() + flag, &true_,
sizeof(int));
789 void reset_flag(
int flag)
const 791 typedef Kokkos::Impl::DeepCopy< typename execution_space::memory_space, Kokkos::HostSpace > raw_deep_copy;
792 const int false_ =
false;
793 raw_deep_copy(m_scalars.ptr_on_device() + flag, &false_,
sizeof(int));
796 bool get_flag(
int flag)
const 798 typedef Kokkos::Impl::DeepCopy< Kokkos::HostSpace, typename execution_space::memory_space > raw_deep_copy;
800 raw_deep_copy(&result, m_scalars.ptr_on_device() + flag,
sizeof(int));
804 static uint32_t calculate_capacity(uint32_t capacity_hint)
807 return capacity_hint ? ((
static_cast<uint32_t
>(7ull*capacity_hint/6u) + 127u)/128u)*128u : 128u;
811 bool m_bounded_insert;
812 hasher_type m_hasher;
813 equal_to_type m_equal_to;
814 mutable size_type m_size;
815 bitset_type m_available_indexes;
816 size_type_view m_hash_lists;
817 size_type_view m_next_index;
818 key_type_view m_keys;
819 value_type_view m_values;
820 scalars_view m_scalars;
822 template <
typename KKey,
typename VValue,
typename DDevice,
typename HHash,
typename EEqualTo>
823 friend class UnorderedMap;
825 template <
typename UMap>
826 friend struct Impl::UnorderedMapErase;
828 template <
typename UMap>
829 friend struct Impl::UnorderedMapHistogram;
831 template <
typename UMap>
832 friend struct Impl::UnorderedMapPrint;
836 template <
typename DKey,
typename DT,
typename DDevice
837 ,
typename SKey,
typename ST,
typename SDevice
838 ,
typename Hasher,
typename EqualTo >
839 inline void deep_copy( UnorderedMap<DKey, DT, DDevice, Hasher, EqualTo> & dst
840 ,
const UnorderedMap<SKey, ST, SDevice, Hasher, EqualTo> & src )
842 dst.create_copy_view(src);
848 #endif //KOKKOS_UNORDERED_MAP_HPP A thread safe view to a bitset.
KOKKOS_FORCEINLINE_FUNCTION key_type key_at(size_type i) const
Get the key with i as its direct index.
KOKKOS_FORCEINLINE_FUNCTION bool success() const
Did the map successful insert the key/value pair.
bool failed_insert() const
The current number of failed insert() calls.
UnorderedMap(size_type capacity_hint, hasher_type hasher=hasher_type(), equal_to_type equal_to=equal_to_type())
Constructor.
KOKKOS_FORCEINLINE_FUNCTION bool existing() const
Was the key already present in the map.
KOKKOS_FORCEINLINE_FUNCTION bool failed() const
Did the map fail to insert the key due to insufficent capacity.
KOKKOS_INLINE_FUNCTION size_type find(const key_type &k) const
Find the given key k, if it exists in the table.
KOKKOS_FORCEINLINE_FUNCTION size_type capacity() const
The maximum number of entries that the table can hold.
KOKKOS_FORCEINLINE_FUNCTION Impl::if_c<(is_set||has_const_value), impl_value_type, impl_value_type & >::type value_at(size_type i) const
Get the value with i as its direct index.
void clear()
Clear all entries in the table.
View to an array of data.
Memory space for main process and CPU execution spaces.
First element of the return value of UnorderedMap::insert().
bool rehash(size_type requested_capacity=0)
Change the capacity of the the map.
size_type size() const
The number of entries in the table.
KOKKOS_INLINE_FUNCTION insert_result insert(key_type const &k, impl_value_type const &v=impl_value_type()) const
void deep_copy(const View< DT, DP... > &dst, typename ViewTraits< DT, DP... >::const_value_type &value, typename std::enable_if< std::is_same< typename ViewTraits< DT, DP... >::specialize, void >::value >::type *=0)
Deep copy a value from Host memory into a view.
KOKKOS_INLINE_FUNCTION size_type hash_capacity() const
The number of hash table "buckets.".
KOKKOS_FORCEINLINE_FUNCTION uint32_t list_position() const
KOKKOS_FORCEINLINE_FUNCTION uint32_t index() const
Index where the key can be found as long as the insert did not fail.
KOKKOS_INLINE_FUNCTION bool exists(const key_type &k) const
Does the key exist in the map.
Thread-safe, performance-portable lookup table.
KOKKOS_FORCEINLINE_FUNCTION pair< T1 &, T2 & > tie(T1 &x, T2 &y)
Return a pair of references to the input arguments.
KOKKOS_FORCEINLINE_FUNCTION bool freed_existing() const