21 #ifndef __TBB_concurrent_lru_cache_H 22 #define __TBB_concurrent_lru_cache_H 24 #if ! TBB_PREVIEW_CONCURRENT_LRU_CACHE 25 #error Set TBB_PREVIEW_CONCURRENT_LRU_CACHE to include concurrent_lru_cache.h 31 #include "tbb_stddef.h" 33 #include "internal/_aggregator_impl.h" 36 namespace interface6 {
39 template <
typename key_type,
typename value_type,
typename value_functor_type = value_type (*)(key_type) >
43 typedef value_functor_type value_function_type;
44 typedef std::size_t ref_counter_type;
45 struct map_value_type;
46 typedef std::map<key_type, map_value_type> map_storage_type;
47 typedef std::list<typename map_storage_type::iterator> lru_list_type;
48 struct map_value_type {
50 ref_counter_type my_ref_counter;
51 typename lru_list_type::iterator my_lru_list_iterator;
54 map_value_type (value_type
const& a_value, ref_counter_type a_ref_counter,
typename lru_list_type::iterator a_lru_list_iterator,
bool a_is_ready)
55 : my_value(a_value), my_ref_counter(a_ref_counter), my_lru_list_iterator (a_lru_list_iterator), my_is_ready(a_is_ready)
61 struct aggregator_operation;
62 typedef aggregator_operation aggregated_operation_type;
68 value_function_type my_value_function;
69 std::size_t
const my_number_of_lru_history_items;
70 map_storage_type my_map_storage;
71 lru_list_type my_lru_list;
72 aggregator_type my_aggregator;
75 typedef handle_object handle;
79 : my_value_function(f),my_number_of_lru_history_items(number_of_lru_history_items)
81 my_aggregator.initialize_handler(aggregator_function_type(
this));
84 handle_object operator[](key_type k){
85 retrieve_aggregator_operation op(k);
86 my_aggregator.execute(&op);
87 if (op.is_new_value_needed()){
88 op.result().second.my_value = my_value_function(k);
89 __TBB_store_with_release(op.result().second.my_is_ready,
true);
91 tbb::internal::spin_wait_while_eq(op.result().second.my_is_ready,
false);
93 return handle_object(*
this,op.result());
96 void signal_end_of_usage(
typename map_storage_type::reference value_ref){
97 signal_end_of_usage_aggregator_operation op(value_ref);
98 my_aggregator.execute(&op);
102 struct handle_move_t:no_assign{
104 typename map_storage_type::reference my_map_record_ref;
105 handle_move_t(
concurrent_lru_cache & cache_ref,
typename map_storage_type::reference value_ref):my_cache_ref(cache_ref),my_map_record_ref(value_ref) {};
107 class handle_object {
109 typename map_storage_type::reference my_map_record_ref;
111 handle_object(
concurrent_lru_cache & cache_ref,
typename map_storage_type::reference value_ref):my_cache_pointer(&cache_ref), my_map_record_ref(value_ref) {}
112 handle_object(handle_move_t m):my_cache_pointer(&m.my_cache_ref), my_map_record_ref(m.my_map_record_ref){}
113 operator handle_move_t(){
return move(*
this);}
115 __TBB_ASSERT(my_cache_pointer,
"get value from moved from object?");
116 return my_map_record_ref.second.my_value;
119 if (my_cache_pointer){
120 my_cache_pointer->signal_end_of_usage(my_map_record_ref);
124 friend handle_move_t move(handle_object& h){
125 return handle_object::move(h);
127 static handle_move_t move(handle_object& h){
128 __TBB_ASSERT(h.my_cache_pointer,
"move from the same object twice ?");
130 h.my_cache_pointer = NULL;
131 return handle_move_t(*cache_pointer,h.my_map_record_ref);
134 void operator=(handle_object&);
140 handle_object(handle_object &);
145 enum e_op_type {op_retive, op_signal_end_of_usage};
148 e_op_type my_operation_type;
149 aggregator_operation(e_op_type operation_type): my_operation_type(operation_type) {}
150 void cast_and_handle(self_type& container ){
151 if (my_operation_type==op_retive){
152 static_cast<retrieve_aggregator_operation*
>(
this)->handle(container);
154 static_cast<signal_end_of_usage_aggregator_operation*
>(
this)->handle(container);
158 struct retrieve_aggregator_operation : aggregator_operation,
private internal::no_assign {
160 typename map_storage_type::pointer my_result_map_record_pointer;
161 bool my_is_new_value_needed;
162 retrieve_aggregator_operation(key_type key):aggregator_operation(aggregator_operation::op_retive),my_key(key),my_is_new_value_needed(
false){}
163 void handle(self_type& container ){
164 my_result_map_record_pointer = & container.retrieve_serial(my_key,my_is_new_value_needed);
166 typename map_storage_type::reference result(){
return * my_result_map_record_pointer; }
167 bool is_new_value_needed(){
return my_is_new_value_needed;}
169 struct signal_end_of_usage_aggregator_operation : aggregator_operation,
private internal::no_assign {
170 typename map_storage_type::reference my_map_record_ref;
171 signal_end_of_usage_aggregator_operation(
typename map_storage_type::reference map_record_ref):aggregator_operation(aggregator_operation::op_signal_end_of_usage),my_map_record_ref(map_record_ref){}
172 void handle(self_type& container ){
173 container.signal_end_of_usage_serial(my_map_record_ref);
178 void handle_operations(aggregator_operation* op_list){
180 op_list->cast_and_handle(*
this);
181 aggregator_operation* tmp = op_list;
182 op_list=op_list->next;
183 tbb::internal::itt_store_word_with_release(tmp->status, uintptr_t(1));
188 typename map_storage_type::reference retrieve_serial(key_type k,
bool& is_new_value_needed){
189 typename map_storage_type::iterator it = my_map_storage.find(k);
190 if (it == my_map_storage.end()){
191 it = my_map_storage.insert(it,std::make_pair(k,map_value_type(value_type(),0,my_lru_list.end(),
false)));
192 is_new_value_needed =
true;
194 typename lru_list_type::iterator list_it = it->second.my_lru_list_iterator;
195 if (list_it!=my_lru_list.end()) {
196 __TBB_ASSERT(!it->second.my_ref_counter,
"item to be evicted should not have a live references");
199 my_lru_list.erase(list_it);
200 it->second.my_lru_list_iterator= my_lru_list.end();
203 ++(it->second.my_ref_counter);
207 void signal_end_of_usage_serial(
typename map_storage_type::reference map_record_ref){
208 typename map_storage_type::iterator it = my_map_storage.find(map_record_ref.first);
209 __TBB_ASSERT(it!=my_map_storage.end(),
"cache should not return past-end iterators to outer world");
210 __TBB_ASSERT(&(*it) == &map_record_ref,
"dangling reference has been returned to outside world? data race ?");
211 __TBB_ASSERT( my_lru_list.end()== std::find(my_lru_list.begin(),my_lru_list.end(),it),
212 "object in use should not be in list of unused objects ");
213 if (! --(it->second.my_ref_counter)){
215 if (my_lru_list.size()>=my_number_of_lru_history_items){
217 size_t number_of_elements_to_evict = 1 + my_lru_list.size() - my_number_of_lru_history_items;
218 for (
size_t i=0; i<number_of_elements_to_evict; ++i){
219 typename map_storage_type::iterator it_to_evict = my_lru_list.back();
220 __TBB_ASSERT(!it_to_evict->second.my_ref_counter,
"item to be evicted should not have a live references");
221 my_lru_list.pop_back();
222 my_map_storage.erase(it_to_evict);
225 my_lru_list.push_front(it);
226 it->second.my_lru_list_iterator = my_lru_list.begin();
235 #endif //__TBB_concurrent_lru_cache_H Definition: _aggregator_impl.h:160
Definition: concurrent_lru_cache.h:40
aggregated_operation base class
Definition: _aggregator_impl.h:37
Definition: _aggregator_impl.h:144
The namespace tbb contains all components of the library.
Definition: parallel_for.h:44