BRE12
partitioner.h
1 /*
2  Copyright 2005-2016 Intel Corporation. All Rights Reserved.
3 
4  This file is part of Threading Building Blocks. Threading Building Blocks is free software;
5  you can redistribute it and/or modify it under the terms of the GNU General Public License
6  version 2 as published by the Free Software Foundation. Threading Building Blocks is
7  distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the
8  implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
9  See the GNU General Public License for more details. You should have received a copy of
10  the GNU General Public License along with Threading Building Blocks; if not, write to the
11  Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
12 
13  As a special exception, you may use this file as part of a free software library without
14  restriction. Specifically, if other files instantiate templates or use macros or inline
15  functions from this file, or you compile this file and link it with other files to produce
16  an executable, this file does not by itself cause the resulting executable to be covered
17  by the GNU General Public License. This exception does not however invalidate any other
18  reasons why the executable file might be covered by the GNU General Public License.
19 */
20 
21 #ifndef __TBB_partitioner_H
22 #define __TBB_partitioner_H
23 
24 #ifndef __TBB_INITIAL_CHUNKS
25 // initial task divisions per thread
26 #define __TBB_INITIAL_CHUNKS 2
27 #endif
28 #ifndef __TBB_RANGE_POOL_CAPACITY
29 // maximum number of elements in range pool
30 #define __TBB_RANGE_POOL_CAPACITY 8
31 #endif
32 #ifndef __TBB_INIT_DEPTH
33 // initial value for depth of range pool
34 #define __TBB_INIT_DEPTH 5
35 #endif
36 #ifndef __TBB_DEMAND_DEPTH_ADD
37 // when imbalance is found range splits this value times more
38 #define __TBB_DEMAND_DEPTH_ADD 1
39 #endif
40 #ifndef __TBB_STATIC_THRESHOLD
41 // necessary number of clocks for the work to be distributed among all tasks
42 #define __TBB_STATIC_THRESHOLD 40000
43 #endif
44 #if __TBB_DEFINE_MIC
45 #define __TBB_NONUNIFORM_TASK_CREATION 1
46 #ifdef __TBB_time_stamp
47 #define __TBB_USE_MACHINE_TIME_STAMPS 1
48 #define __TBB_task_duration() __TBB_STATIC_THRESHOLD
49 #endif // __TBB_machine_time_stamp
50 #endif // __TBB_DEFINE_MIC
51 
52 #include "task.h"
53 #include "aligned_space.h"
54 #include "atomic.h"
55 #include "internal/_template_helpers.h"
56 
57 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
58  // Workaround for overzealous compiler warnings
59  #pragma warning (push)
60  #pragma warning (disable: 4244)
61 #endif
62 
63 namespace tbb {
64 
65 class auto_partitioner;
66 class simple_partitioner;
67 #if TBB_PREVIEW_STATIC_PARTITIONER
68 class static_partitioner;
69 #endif
70 class affinity_partitioner;
71 
72 namespace interface9 {
73  namespace internal {
74  class affinity_partition_type;
75  }
76 }
77 
78 namespace internal { //< @cond INTERNAL
79 size_t __TBB_EXPORTED_FUNC get_initial_auto_partitioner_divisor();
80 
82 class affinity_partitioner_base_v3: no_copy {
83  friend class tbb::affinity_partitioner;
84  friend class tbb::interface9::internal::affinity_partition_type;
86 
87  affinity_id* my_array;
89  size_t my_size;
91  affinity_partitioner_base_v3() : my_array(NULL), my_size(0) {}
93  ~affinity_partitioner_base_v3() {resize(0);}
95 
96  void __TBB_EXPORTED_METHOD resize( unsigned factor );
97 };
98 
100 class partition_type_base {
101 public:
102  void set_affinity( task & ) {}
103  void note_affinity( task::affinity_id ) {}
104  task* continue_after_execute_range() {return NULL;}
105  bool decide_whether_to_delay() {return false;}
106  void spawn_or_delay( bool, task& b ) {
107  task::spawn(b);
108  }
109 };
110 
111 template<typename Range, typename Body, typename Partitioner> class start_scan;
112 
113 } //< namespace internal @endcond
114 
115 namespace serial {
116 namespace interface9 {
117 template<typename Range, typename Body, typename Partitioner> class start_for;
118 }
119 }
120 
121 namespace interface9 {
123 namespace internal {
124 using namespace tbb::internal;
125 template<typename Range, typename Body, typename Partitioner> class start_for;
126 template<typename Range, typename Body, typename Partitioner> class start_reduce;
127 
129 class flag_task: public task {
130 public:
131  tbb::atomic<bool> my_child_stolen;
132  flag_task() { my_child_stolen = false; }
133  task* execute() { return NULL; }
134  static void mark_task_stolen(task &t) {
135  tbb::atomic<bool> &flag = static_cast<flag_task*>(t.parent())->my_child_stolen;
136 #if TBB_USE_THREADING_TOOLS
137  // Threading tools respect lock prefix but report false-positive data-race via plain store
138  flag.fetch_and_store<release>(true);
139 #else
140  flag = true;
141 #endif //TBB_USE_THREADING_TOOLS
142  }
143  static bool is_peer_stolen(task &t) {
144  return static_cast<flag_task*>(t.parent())->my_child_stolen;
145  }
146 };
147 
151 typedef unsigned char depth_t;
152 
154 template <typename T, depth_t MaxCapacity>
155 class range_vector {
156  depth_t my_head;
157  depth_t my_tail;
158  depth_t my_size;
159  depth_t my_depth[MaxCapacity]; // relative depths of stored ranges
161 
162 public:
164  range_vector(const T& elem) : my_head(0), my_tail(0), my_size(1) {
165  my_depth[0] = 0;
166  new( static_cast<void *>(my_pool.begin()) ) T(elem);//TODO: std::move?
167  }
168  ~range_vector() {
169  while( !empty() ) pop_back();
170  }
171  bool empty() const { return my_size == 0; }
172  depth_t size() const { return my_size; }
175  void split_to_fill(depth_t max_depth) {
176  while( my_size < MaxCapacity && is_divisible(max_depth) ) {
177  depth_t prev = my_head;
178  my_head = (my_head + 1) % MaxCapacity;
179  new(my_pool.begin()+my_head) T(my_pool.begin()[prev]); // copy TODO: std::move?
180  my_pool.begin()[prev].~T(); // instead of assignment
181  new(my_pool.begin()+prev) T(my_pool.begin()[my_head], split()); // do 'inverse' split
182  my_depth[my_head] = ++my_depth[prev];
183  my_size++;
184  }
185  }
186  void pop_back() {
187  __TBB_ASSERT(my_size > 0, "range_vector::pop_back() with empty size");
188  my_pool.begin()[my_head].~T();
189  my_size--;
190  my_head = (my_head + MaxCapacity - 1) % MaxCapacity;
191  }
192  void pop_front() {
193  __TBB_ASSERT(my_size > 0, "range_vector::pop_front() with empty size");
194  my_pool.begin()[my_tail].~T();
195  my_size--;
196  my_tail = (my_tail + 1) % MaxCapacity;
197  }
198  T& back() {
199  __TBB_ASSERT(my_size > 0, "range_vector::back() with empty size");
200  return my_pool.begin()[my_head];
201  }
202  T& front() {
203  __TBB_ASSERT(my_size > 0, "range_vector::front() with empty size");
204  return my_pool.begin()[my_tail];
205  }
207  depth_t front_depth() {
208  __TBB_ASSERT(my_size > 0, "range_vector::front_depth() with empty size");
209  return my_depth[my_tail];
210  }
211  depth_t back_depth() {
212  __TBB_ASSERT(my_size > 0, "range_vector::back_depth() with empty size");
213  return my_depth[my_head];
214  }
215  bool is_divisible(depth_t max_depth) {
216  return back_depth() < max_depth && back().is_divisible();
217  }
218 };
219 
221 template <typename Partition>
222 struct partition_type_base {
223  typedef split split_type;
224  // decision makers
225  void set_affinity( task & ) {}
226  void note_affinity( task::affinity_id ) {}
227  bool check_being_stolen(task &) { return false; } // part of old should_execute_range()
228  bool check_for_demand(task &) { return false; }
229  bool is_divisible() { return true; } // part of old should_execute_range()
230  depth_t max_depth() { return 0; }
231  void align_depth(depth_t) { }
232  template <typename Range> split_type get_split() { return split(); }
233  Partition& self() { return *static_cast<Partition*>(this); } // CRTP helper
234 
235  template<typename StartType, typename Range>
236  void work_balance(StartType &start, Range &range) {
237  start.run_body( range ); // simple partitioner goes always here
238  }
239 
240  template<typename StartType, typename Range>
241  void execute(StartType &start, Range &range) {
242  // The algorithm in a few words ([]-denotes calls to decision methods of partitioner):
243  // [If this task is stolen, adjust depth and divisions if necessary, set flag].
244  // If range is divisible {
245  // Spread the work while [initial divisions left];
246  // Create trap task [if necessary];
247  // }
248  // If not divisible or [max depth is reached], execute, else do the range pool part
249  if ( range.is_divisible() ) {
250  if ( self().is_divisible() ) {
251  do { // split until is divisible
252  typename Partition::split_type split_obj = self().template get_split<Range>();
253  start.offer_work( split_obj );
254  } while ( range.is_divisible() && self().is_divisible() );
255  }
256  }
257  self().work_balance(start, range);
258  }
259 };
260 
262 template <typename Partition>
263 struct adaptive_mode : partition_type_base<Partition> {
264  typedef Partition my_partition;
265  using partition_type_base<Partition>::self; // CRTP helper to get access to derived classes
266  size_t my_divisor;
267  // For affinity_partitioner, my_divisor indicates the number of affinity array indices the task reserves.
268  // A task which has only one index must produce the right split without reserved index in order to avoid
269  // it to be overwritten in note_affinity() of the created (right) task.
270  // I.e. a task created deeper than the affinity array can remember must not save its affinity (LIFO order)
271  static const unsigned factor = 1;
272  adaptive_mode() : my_divisor(tbb::internal::get_initial_auto_partitioner_divisor() / 4 * my_partition::factor) {}
273  adaptive_mode(adaptive_mode &src, split) : my_divisor(do_split(src, split())) {}
274  adaptive_mode(adaptive_mode &src, const proportional_split& split_obj) : my_divisor(do_split(src, split_obj)) {}
276  size_t do_split(adaptive_mode &src, split) {
277  return src.my_divisor /= 2u;
278  }
279  size_t do_split(adaptive_mode &src, const proportional_split& split_obj) {
280 #if __TBB_ENABLE_RANGE_FEEDBACK
281  size_t portion = size_t(float(src.my_divisor) * float(split_obj.right())
282  / float(split_obj.left() + split_obj.right()) + 0.5f);
283 #else
284  size_t portion = split_obj.right() * my_partition::factor;
285 #endif
286  portion = (portion + my_partition::factor/2) & (0ul - my_partition::factor);
287 #if __TBB_ENABLE_RANGE_FEEDBACK
288 
289  if (!portion)
290  portion = my_partition::factor;
291  else if (portion == src.my_divisor)
292  portion = src.my_divisor - my_partition::factor;
293 #endif
294  src.my_divisor -= portion;
295  return portion;
296  }
297  bool is_divisible() { // part of old should_execute_range()
298  return my_divisor > my_partition::factor;
299  }
300 };
301 
303 template <typename Partition>
304 struct linear_affinity_mode : adaptive_mode<Partition> {
305  using adaptive_mode<Partition>::my_divisor;
306  size_t my_head;
307  using adaptive_mode<Partition>::self;
308  linear_affinity_mode() : adaptive_mode<Partition>(), my_head(0) {}
309  linear_affinity_mode(linear_affinity_mode &src, split) : adaptive_mode<Partition>(src, split())
310  , my_head(src.my_head + src.my_divisor) {}
311  linear_affinity_mode(linear_affinity_mode &src, const proportional_split& split_obj) : adaptive_mode<Partition>(src, split_obj)
312  , my_head(src.my_head + src.my_divisor) {}
313  void set_affinity( task &t ) {
314  if( my_divisor )
315  t.set_affinity( affinity_id(my_head) + 1 );
316  }
317 };
318 
321 
324 template <typename Range>
325 class is_splittable_in_proportion {
326 private:
327  typedef char yes[1];
328  typedef char no [2];
329 
330  template <typename range_type> static yes& decide(typename enable_if<range_type::is_splittable_in_proportion>::type *);
331  template <typename range_type> static no& decide(...);
332 public:
333  // equals to 'true' if and only if static const variable 'is_splittable_in_proportion' of template parameter
334  // initialized with the value of 'true'
335  static const bool value = (sizeof(decide<Range>(0)) == sizeof(yes));
336 };
337 
339 template<class Mode>
340 struct unbalancing_partition_type : Mode {
341  using Mode::self;
342  unbalancing_partition_type() : Mode() {}
343  unbalancing_partition_type(unbalancing_partition_type& p, split) : Mode(p, split()) {}
344  unbalancing_partition_type(unbalancing_partition_type& p, const proportional_split& split_obj) : Mode(p, split_obj) {}
345 #if _MSC_VER && !defined(__INTEL_COMPILER)
346  // Suppress "conditional expression is constant" warning.
347  #pragma warning( push )
348  #pragma warning( disable: 4127 )
349 #endif
350  template <typename Range>
351  proportional_split get_split() {
352  if (is_splittable_in_proportion<Range>::value) {
353  size_t size = self().my_divisor / Mode::my_partition::factor;
354 #if __TBB_NONUNIFORM_TASK_CREATION
355  size_t right = (size + 2) / 3;
356 #else
357  size_t right = size / 2;
358 #endif
359  size_t left = size - right;
360  return proportional_split(left, right);
361  } else {
362  return proportional_split(1, 1);
363  }
364  }
365 #if _MSC_VER && !defined(__INTEL_COMPILER)
366  #pragma warning( pop )
367 #endif // warning 4127 is back
368 };
369 
371 template<class Mode>
372 struct balancing_partition_type : unbalancing_partition_type<Mode> {
373  using Mode::self;
374 #ifdef __TBB_USE_MACHINE_TIME_STAMPS
375  tbb::internal::machine_tsc_t my_dst_tsc;
376 #endif
377  enum {
378  begin = 0,
379  run,
380  pass
381  } my_delay;
382  depth_t my_max_depth;
383  static const unsigned range_pool_size = __TBB_RANGE_POOL_CAPACITY;
384  balancing_partition_type(): unbalancing_partition_type<Mode>()
385 #ifdef __TBB_USE_MACHINE_TIME_STAMPS
386  , my_dst_tsc(0)
387 #endif
388  , my_delay(begin)
389  , my_max_depth(__TBB_INIT_DEPTH) {}
390  balancing_partition_type(balancing_partition_type& p, split)
391  : unbalancing_partition_type<Mode>(p, split())
392 #ifdef __TBB_USE_MACHINE_TIME_STAMPS
393  , my_dst_tsc(0)
394 #endif
395  , my_delay(pass)
396  , my_max_depth(p.my_max_depth) {}
397  balancing_partition_type(balancing_partition_type& p, const proportional_split& split_obj)
398  : unbalancing_partition_type<Mode>(p, split_obj)
399 #ifdef __TBB_USE_MACHINE_TIME_STAMPS
400  , my_dst_tsc(0)
401 #endif
402  , my_delay(begin)
403  , my_max_depth(p.my_max_depth) {}
404  bool check_being_stolen( task &t) { // part of old should_execute_range()
405  if( !(self().my_divisor / Mode::my_partition::factor) ) { // if not from the top P tasks of binary tree
406  self().my_divisor = 1; // TODO: replace by on-stack flag (partition_state's member)?
407  if( t.is_stolen_task() && t.parent()->ref_count() >= 2 ) { // runs concurrently with the left task
408 #if __TBB_USE_OPTIONAL_RTTI
409  // RTTI is available, check whether the cast is valid
410  __TBB_ASSERT(dynamic_cast<flag_task*>(t.parent()), 0);
411  // correctness of the cast relies on avoiding the root task for which:
412  // - initial value of my_divisor != 0 (protected by separate assertion)
413  // - is_stolen_task() always returns false for the root task.
414 #endif
415  flag_task::mark_task_stolen(t);
416  if( !my_max_depth ) my_max_depth++;
417  my_max_depth += __TBB_DEMAND_DEPTH_ADD;
418  return true;
419  }
420  }
421  return false;
422  }
423  depth_t max_depth() { return my_max_depth; }
424  void align_depth(depth_t base) {
425  __TBB_ASSERT(base <= my_max_depth, 0);
426  my_max_depth -= base;
427  }
428  template<typename StartType, typename Range>
429  void work_balance(StartType &start, Range &range) {
430  if( !range.is_divisible() || !self().max_depth() ) {
431  start.run_body( range ); // simple partitioner goes always here
432  }
433  else { // do range pool
434  internal::range_vector<Range, range_pool_size> range_pool(range);
435  do {
436  range_pool.split_to_fill(self().max_depth()); // fill range pool
437  if( self().check_for_demand( start ) ) {
438  if( range_pool.size() > 1 ) {
439  start.offer_work( range_pool.front(), range_pool.front_depth() );
440  range_pool.pop_front();
441  continue;
442  }
443  if( range_pool.is_divisible(self().max_depth()) ) // was not enough depth to fork a task
444  continue; // note: next split_to_fill() should split range at least once
445  }
446  start.run_body( range_pool.back() );
447  range_pool.pop_back();
448  } while( !range_pool.empty() && !start.is_cancelled() );
449  }
450  }
451  bool check_for_demand( task &t ) {
452  if( pass == my_delay ) {
453  if( self().my_divisor > 1 ) // produce affinitized tasks while they have slot in array
454  return true; // do not do my_max_depth++ here, but be sure range_pool is splittable once more
455  else if( self().my_divisor && my_max_depth ) { // make balancing task
456  self().my_divisor = 0; // once for each task; depth will be decreased in align_depth()
457  return true;
458  }
459  else if( flag_task::is_peer_stolen(t) ) {
460  my_max_depth += __TBB_DEMAND_DEPTH_ADD;
461  return true;
462  }
463  } else if( begin == my_delay ) {
464 #ifndef __TBB_USE_MACHINE_TIME_STAMPS
465  my_delay = pass;
466 #else
467  my_dst_tsc = __TBB_time_stamp() + __TBB_task_duration();
468  my_delay = run;
469  } else if( run == my_delay ) {
470  if( __TBB_time_stamp() < my_dst_tsc ) {
471  __TBB_ASSERT(my_max_depth > 0, NULL);
472  my_max_depth--; // increase granularity since tasks seem having too small work
473  return false;
474  }
475  my_delay = pass;
476  return true;
477 #endif // __TBB_USE_MACHINE_TIME_STAMPS
478  }
479  return false;
480  }
481 };
482 
483 class auto_partition_type: public balancing_partition_type<adaptive_mode<auto_partition_type> > {
484 public:
485  auto_partition_type( const auto_partitioner& )
486  : balancing_partition_type<adaptive_mode<auto_partition_type> >() {
487  my_divisor *= __TBB_INITIAL_CHUNKS;
488  }
489  auto_partition_type( auto_partition_type& src, split)
490  : balancing_partition_type<adaptive_mode<auto_partition_type> >(src, split()) {}
491  bool is_divisible() { // part of old should_execute_range()
492  if( my_divisor > 1 ) return true;
493  if( my_divisor && my_max_depth ) { // can split the task. TODO: on-stack flag instead
494  // keep same fragmentation while splitting for the local task pool
495  my_max_depth--;
496  my_divisor = 0; // decrease max_depth once per task
497  return true;
498  } else return false;
499  }
500  bool check_for_demand(task &t) {
501  if( flag_task::is_peer_stolen(t) ) {
502  my_max_depth += __TBB_DEMAND_DEPTH_ADD;
503  return true;
504  } else return false;
505  }
506 };
507 
508 class simple_partition_type: public partition_type_base<simple_partition_type> {
509 public:
510  simple_partition_type( const simple_partitioner& ) {}
511  simple_partition_type( const simple_partition_type&, split ) {}
513  template<typename StartType, typename Range>
514  void execute(StartType &start, Range &range) {
515  split_type split_obj = split(); // start.offer_work accepts split_type as reference
516  while( range.is_divisible() )
517  start.offer_work( split_obj );
518  start.run_body( range );
519  }
520 };
521 
522 #if TBB_PREVIEW_STATIC_PARTITIONER
523 #ifndef __TBB_STATIC_PARTITIONER_BASE_TYPE
524 #define __TBB_STATIC_PARTITIONER_BASE_TYPE unbalancing_partition_type
525 #endif
526 class static_partition_type : public __TBB_STATIC_PARTITIONER_BASE_TYPE<linear_affinity_mode<static_partition_type> > {
527 public:
528  typedef proportional_split split_type;
529  static_partition_type( const static_partitioner& )
530  : __TBB_STATIC_PARTITIONER_BASE_TYPE<linear_affinity_mode<static_partition_type> >() {}
531  static_partition_type( static_partition_type& p, split )
532  : __TBB_STATIC_PARTITIONER_BASE_TYPE<linear_affinity_mode<static_partition_type> >(p, split()) {}
533  static_partition_type( static_partition_type& p, const proportional_split& split_obj )
534  : __TBB_STATIC_PARTITIONER_BASE_TYPE<linear_affinity_mode<static_partition_type> >(p, split_obj) {}
535 };
536 #undef __TBB_STATIC_PARTITIONER_BASE_TYPE
537 #endif
538 
539 class affinity_partition_type : public balancing_partition_type<linear_affinity_mode<affinity_partition_type> > {
540  static const unsigned factor_power = 4; // TODO: get a unified formula based on number of computing units
541  tbb::internal::affinity_id* my_array;
542 public:
543  static const unsigned factor = 1 << factor_power; // number of slots in affinity array per task
544  typedef proportional_split split_type;
545  affinity_partition_type( tbb::internal::affinity_partitioner_base_v3& ap )
546  : balancing_partition_type<linear_affinity_mode<affinity_partition_type> >() {
547  __TBB_ASSERT( (factor&(factor-1))==0, "factor must be power of two" );
548  ap.resize(factor);
549  my_array = ap.my_array;
550  my_max_depth = factor_power + 1;
551  __TBB_ASSERT( my_max_depth < __TBB_RANGE_POOL_CAPACITY, 0 );
552  }
553  affinity_partition_type(affinity_partition_type& p, split)
554  : balancing_partition_type<linear_affinity_mode<affinity_partition_type> >(p, split())
555  , my_array(p.my_array) {}
556  affinity_partition_type(affinity_partition_type& p, const proportional_split& split_obj)
557  : balancing_partition_type<linear_affinity_mode<affinity_partition_type> >(p, split_obj)
558  , my_array(p.my_array) {}
559  void set_affinity( task &t ) {
560  if( my_divisor ) {
561  if( !my_array[my_head] )
562  // TODO: consider new ideas with my_array for both affinity and static partitioner's, then code reuse
563  t.set_affinity( affinity_id(my_head / factor + 1) );
564  else
565  t.set_affinity( my_array[my_head] );
566  }
567  }
568  void note_affinity( task::affinity_id id ) {
569  if( my_divisor )
570  my_array[my_head] = id;
571  }
572 };
573 
575 class old_auto_partition_type: public tbb::internal::partition_type_base {
576  size_t num_chunks;
577  static const size_t VICTIM_CHUNKS = 4;
578 public:
579  bool should_execute_range(const task &t) {
580  if( num_chunks<VICTIM_CHUNKS && t.is_stolen_task() )
581  num_chunks = VICTIM_CHUNKS;
582  return num_chunks==1;
583  }
584  old_auto_partition_type( const auto_partitioner& )
585  : num_chunks(internal::get_initial_auto_partitioner_divisor()*__TBB_INITIAL_CHUNKS/4) {}
586  old_auto_partition_type( const affinity_partitioner& )
587  : num_chunks(internal::get_initial_auto_partitioner_divisor()*__TBB_INITIAL_CHUNKS/4) {}
588  old_auto_partition_type( old_auto_partition_type& pt, split ) {
589  num_chunks = pt.num_chunks = (pt.num_chunks+1u) / 2u;
590  }
591 };
592 
593 } // namespace interfaceX::internal
595 } // namespace interfaceX
596 
598 
600 class simple_partitioner {
601 public:
602  simple_partitioner() {}
603 private:
604  template<typename Range, typename Body, typename Partitioner> friend class serial::interface9::start_for;
605  template<typename Range, typename Body, typename Partitioner> friend class interface9::internal::start_for;
606  template<typename Range, typename Body, typename Partitioner> friend class interface9::internal::start_reduce;
607  template<typename Range, typename Body, typename Partitioner> friend class internal::start_scan;
608  // backward compatibility
609  class partition_type: public internal::partition_type_base {
610  public:
611  bool should_execute_range(const task& ) {return false;}
612  partition_type( const simple_partitioner& ) {}
613  partition_type( const partition_type&, split ) {}
614  };
615  // new implementation just extends existing interface
616  typedef interface9::internal::simple_partition_type task_partition_type;
617 
618  // TODO: consider to make split_type public
619  typedef interface9::internal::simple_partition_type::split_type split_type;
620 };
621 
623 
626 class auto_partitioner {
627 public:
628  auto_partitioner() {}
629 
630 private:
631  template<typename Range, typename Body, typename Partitioner> friend class serial::interface9::start_for;
632  template<typename Range, typename Body, typename Partitioner> friend class interface9::internal::start_for;
633  template<typename Range, typename Body, typename Partitioner> friend class interface9::internal::start_reduce;
634  template<typename Range, typename Body, typename Partitioner> friend class internal::start_scan;
635  // backward compatibility
636  typedef interface9::internal::old_auto_partition_type partition_type;
637  // new implementation just extends existing interface
638  typedef interface9::internal::auto_partition_type task_partition_type;
639 
640  // TODO: consider to make split_type public
641  typedef interface9::internal::auto_partition_type::split_type split_type;
642 };
643 
644 #if TBB_PREVIEW_STATIC_PARTITIONER
645 class static_partitioner {
647 public:
648  static_partitioner() {}
649 private:
650  template<typename Range, typename Body, typename Partitioner> friend class serial::interface9::start_for;
651  template<typename Range, typename Body, typename Partitioner> friend class interface9::internal::start_for;
652  template<typename Range, typename Body, typename Partitioner> friend class interface9::internal::start_reduce;
653  template<typename Range, typename Body, typename Partitioner> friend class internal::start_scan;
654  // backward compatibility
655  typedef interface9::internal::old_auto_partition_type partition_type;
656  // new implementation just extends existing interface
657  typedef interface9::internal::static_partition_type task_partition_type;
658 
659  // TODO: consider to make split_type public
660  typedef interface9::internal::static_partition_type::split_type split_type;
661 };
662 #endif
663 
665 class affinity_partitioner: internal::affinity_partitioner_base_v3 {
666 public:
667  affinity_partitioner() {}
668 
669 private:
670  template<typename Range, typename Body, typename Partitioner> friend class serial::interface9::start_for;
671  template<typename Range, typename Body, typename Partitioner> friend class interface9::internal::start_for;
672  template<typename Range, typename Body, typename Partitioner> friend class interface9::internal::start_reduce;
673  template<typename Range, typename Body, typename Partitioner> friend class internal::start_scan;
674  // backward compatibility - for parallel_scan only
675  typedef interface9::internal::old_auto_partition_type partition_type;
676  // new implementation just extends existing interface
677  typedef interface9::internal::affinity_partition_type task_partition_type;
678 
679  // TODO: consider to make split_type public
680  typedef interface9::internal::affinity_partition_type::split_type split_type;
681 };
682 
683 } // namespace tbb
684 
685 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
686  #pragma warning (pop)
687 #endif // warning 4244 is back
688 #undef __TBB_INITIAL_CHUNKS
689 #undef __TBB_RANGE_POOL_CAPACITY
690 #undef __TBB_INIT_DEPTH
691 #endif /* __TBB_partitioner_H */
Enables one or the other code branches.
Definition: _template_helpers.h:29
Definition: atomic.h:535
Block of space aligned sufficiently to construct an array T with N elements.
Definition: aligned_space.h:33
*/
Definition: material.h:665
T * begin()
Pointer to beginning of array.
Definition: aligned_space.h:39
Definition: _flow_graph_async_msg_impl.h:32
Release.
Definition: atomic.h:49
The namespace tbb contains all components of the library.
Definition: parallel_for.h:44