BRE12
task.h
1 /*
2  Copyright 2005-2016 Intel Corporation. All Rights Reserved.
3 
4  This file is part of Threading Building Blocks. Threading Building Blocks is free software;
5  you can redistribute it and/or modify it under the terms of the GNU General Public License
6  version 2 as published by the Free Software Foundation. Threading Building Blocks is
7  distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the
8  implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
9  See the GNU General Public License for more details. You should have received a copy of
10  the GNU General Public License along with Threading Building Blocks; if not, write to the
11  Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
12 
13  As a special exception, you may use this file as part of a free software library without
14  restriction. Specifically, if other files instantiate templates or use macros or inline
15  functions from this file, or you compile this file and link it with other files to produce
16  an executable, this file does not by itself cause the resulting executable to be covered
17  by the GNU General Public License. This exception does not however invalidate any other
18  reasons why the executable file might be covered by the GNU General Public License.
19 */
20 
21 #ifndef __TBB_task_H
22 #define __TBB_task_H
23 
24 #include "tbb_stddef.h"
25 #include "tbb_machine.h"
26 #include "tbb_profiling.h"
27 #include <climits>
28 
29 typedef struct ___itt_caller *__itt_caller;
30 
31 namespace tbb {
32 
33 class task;
34 class task_list;
35 class task_group_context;
36 
37 // MSVC does not allow taking the address of a member that was defined
38 // privately in task_base and made public in class task via a using declaration.
39 #if _MSC_VER || (__GNUC__==3 && __GNUC_MINOR__<3)
40 #define __TBB_TASK_BASE_ACCESS public
41 #else
42 #define __TBB_TASK_BASE_ACCESS private
43 #endif
44 
45 namespace internal { //< @cond INTERNAL
46 
47  class allocate_additional_child_of_proxy: no_assign {
49  task* self;
50  task& parent;
51  public:
52  explicit allocate_additional_child_of_proxy( task& parent_ ) : self(NULL), parent(parent_) {}
53  task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
54  void __TBB_EXPORTED_METHOD free( task& ) const;
55  };
56 
57  struct cpu_ctl_env_space { int space[sizeof(internal::uint64_t)/sizeof(int)]; };
58 } //< namespace internal @endcond
59 
60 namespace interface5 {
61  namespace internal {
63 
68  class task_base: tbb::internal::no_copy {
69  __TBB_TASK_BASE_ACCESS:
70  friend class tbb::task;
71 
73  static void spawn( task& t );
74 
76  static void spawn( task_list& list );
77 
79 
81  static tbb::internal::allocate_additional_child_of_proxy allocate_additional_child_of( task& t ) {
82  return tbb::internal::allocate_additional_child_of_proxy(t);
83  }
84 
86 
90  static void __TBB_EXPORTED_FUNC destroy( task& victim );
91  };
92  } // internal
93 } // interface5
94 
96 namespace internal {
97 
98  class scheduler: no_copy {
99  public:
101  virtual void spawn( task& first, task*& next ) = 0;
102 
104  virtual void wait_for_all( task& parent, task* child ) = 0;
105 
107  virtual void spawn_root_and_wait( task& first, task*& next ) = 0;
108 
110  // Have to have it just to shut up overzealous compilation warnings
111  virtual ~scheduler() = 0;
112 
114  virtual void enqueue( task& t, void* reserved ) = 0;
115  };
116 
118 
119  typedef intptr_t reference_count;
120 
122  typedef unsigned short affinity_id;
123 
124 #if __TBB_TASK_GROUP_CONTEXT
125  class generic_scheduler;
126 
127  struct context_list_node_t {
128  context_list_node_t *my_prev,
129  *my_next;
130  };
131 
132  class allocate_root_with_context_proxy: no_assign {
133  task_group_context& my_context;
134  public:
135  allocate_root_with_context_proxy ( task_group_context& ctx ) : my_context(ctx) {}
136  task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
137  void __TBB_EXPORTED_METHOD free( task& ) const;
138  };
139 #endif /* __TBB_TASK_GROUP_CONTEXT */
140 
141  class allocate_root_proxy: no_assign {
142  public:
143  static task& __TBB_EXPORTED_FUNC allocate( size_t size );
144  static void __TBB_EXPORTED_FUNC free( task& );
145  };
146 
147  class allocate_continuation_proxy: no_assign {
148  public:
149  task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
150  void __TBB_EXPORTED_METHOD free( task& ) const;
151  };
152 
153  class allocate_child_proxy: no_assign {
154  public:
155  task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
156  void __TBB_EXPORTED_METHOD free( task& ) const;
157  };
158 
160 
171  class task_prefix {
172  private:
173  friend class tbb::task;
174  friend class tbb::interface5::internal::task_base;
175  friend class tbb::task_list;
176  friend class internal::scheduler;
177  friend class internal::allocate_root_proxy;
178  friend class internal::allocate_child_proxy;
179  friend class internal::allocate_continuation_proxy;
180  friend class internal::allocate_additional_child_of_proxy;
181 
182 #if __TBB_TASK_GROUP_CONTEXT
183 
187  task_group_context *context;
188 #endif /* __TBB_TASK_GROUP_CONTEXT */
189 
191 
196  scheduler* origin;
197 
198 #if __TBB_TASK_PRIORITY
199  union {
200 #endif /* __TBB_TASK_PRIORITY */
201 
204  scheduler* owner;
205 
206 #if __TBB_TASK_PRIORITY
207 
209  task* next_offloaded;
210  };
211 #endif /* __TBB_TASK_PRIORITY */
212 
214 
217  tbb::task* parent;
218 
220 
224  __TBB_atomic reference_count ref_count;
225 
227 
229  int depth;
230 
232 
233  unsigned char state;
234 
236 
242  unsigned char extra_state;
243 
244  affinity_id affinity;
245 
247  tbb::task* next;
248 
250  tbb::task& task() {return *reinterpret_cast<tbb::task*>(this+1);}
251  };
252 
253 } // namespace internal
255 
256 #if __TBB_TASK_GROUP_CONTEXT
257 
258 #if __TBB_TASK_PRIORITY
259 namespace internal {
260  static const int priority_stride_v4 = INT_MAX / 4;
261 }
262 
263 enum priority_t {
264  priority_normal = internal::priority_stride_v4 * 2,
265  priority_low = priority_normal - internal::priority_stride_v4,
266  priority_high = priority_normal + internal::priority_stride_v4
267 };
268 
269 #endif /* __TBB_TASK_PRIORITY */
270 
271 #if TBB_USE_CAPTURED_EXCEPTION
272  class tbb_exception;
273 #else
274  namespace internal {
275  class tbb_exception_ptr;
276  }
277 #endif /* !TBB_USE_CAPTURED_EXCEPTION */
278 
279 class task_scheduler_init;
280 namespace interface7 { class task_arena; }
281 
283 
303 class task_group_context : internal::no_copy {
304 private:
305  friend class internal::generic_scheduler;
306  friend class task_scheduler_init;
307  friend class interface7::task_arena;
308 
309 #if TBB_USE_CAPTURED_EXCEPTION
310  typedef tbb_exception exception_container_type;
311 #else
312  typedef internal::tbb_exception_ptr exception_container_type;
313 #endif
314 
315  enum version_traits_word_layout {
316  traits_offset = 16,
317  version_mask = 0xFFFF,
318  traits_mask = 0xFFFFul << traits_offset
319  };
320 
321 public:
322  enum kind_type {
323  isolated,
324  bound
325  };
326 
327  enum traits_type {
328  exact_exception = 0x0001ul << traits_offset,
329 #if __TBB_FP_CONTEXT
330  fp_settings = 0x0002ul << traits_offset,
331 #endif
332  concurrent_wait = 0x0004ul << traits_offset,
333 #if TBB_USE_CAPTURED_EXCEPTION
334  default_traits = 0
335 #else
336  default_traits = exact_exception
337 #endif /* !TBB_USE_CAPTURED_EXCEPTION */
338  };
339 
340 private:
341  enum state {
342  may_have_children = 1,
343  // the following enumerations must be the last, new 2^x values must go above
344  next_state_value, low_unused_state_bit = (next_state_value-1)*2
345  };
346 
347  union {
349  // TODO: describe asynchronous use, and whether any memory semantics are needed
350  __TBB_atomic kind_type my_kind;
351  uintptr_t _my_kind_aligner;
352  };
353 
355  task_group_context *my_parent;
356 
358 
360  internal::context_list_node_t my_node;
361 
363  __itt_caller itt_caller;
364 
366 
369  char _leading_padding[internal::NFS_MaxLineSize
370  - 2 * sizeof(uintptr_t)- sizeof(void*) - sizeof(internal::context_list_node_t)
371  - sizeof(__itt_caller)
372 #if __TBB_FP_CONTEXT
373  - sizeof(internal::cpu_ctl_env_space)
374 #endif
375  ];
376 
377 #if __TBB_FP_CONTEXT
378 
381  internal::cpu_ctl_env_space my_cpu_ctl_env;
382 #endif
383 
385  uintptr_t my_cancellation_requested;
386 
388 
391  uintptr_t my_version_and_traits;
392 
394  exception_container_type *my_exception;
395 
397  internal::generic_scheduler *my_owner;
398 
400  uintptr_t my_state;
401 
402 #if __TBB_TASK_PRIORITY
403  intptr_t my_priority;
405 #endif /* __TBB_TASK_PRIORITY */
406 
408 
409  char _trailing_padding[internal::NFS_MaxLineSize - 2 * sizeof(uintptr_t) - 2 * sizeof(void*)
410 #if __TBB_TASK_PRIORITY
411  - sizeof(intptr_t)
412 #endif /* __TBB_TASK_PRIORITY */
413  ];
414 
415 public:
417 
445  task_group_context ( kind_type relation_with_parent = bound,
446  uintptr_t t = default_traits )
447  : my_kind(relation_with_parent)
448  , my_version_and_traits(2 | t)
449  {
450  init();
451  }
452 
453  // Do not introduce standalone unbind method since it will break state propagation assumptions
454  __TBB_EXPORTED_METHOD ~task_group_context ();
455 
457 
464  void __TBB_EXPORTED_METHOD reset ();
465 
467 
474  bool __TBB_EXPORTED_METHOD cancel_group_execution ();
475 
477  bool __TBB_EXPORTED_METHOD is_group_execution_cancelled () const;
478 
480 
486  void __TBB_EXPORTED_METHOD register_pending_exception ();
487 
488 #if __TBB_FP_CONTEXT
489 
497  void __TBB_EXPORTED_METHOD capture_fp_settings ();
498 #endif
499 
500 #if __TBB_TASK_PRIORITY
501  void set_priority ( priority_t );
503 
505  priority_t priority () const;
506 #endif /* __TBB_TASK_PRIORITY */
507 
509  uintptr_t traits() const { return my_version_and_traits & traits_mask; }
510 
511 protected:
513 
514  void __TBB_EXPORTED_METHOD init ();
515 
516 private:
517  friend class task;
518  friend class internal::allocate_root_with_context_proxy;
519 
520  static const kind_type binding_required = bound;
521  static const kind_type binding_completed = kind_type(bound+1);
522  static const kind_type detached = kind_type(binding_completed+1);
523  static const kind_type dying = kind_type(detached+1);
524 
526  template <typename T>
527  void propagate_task_group_state ( T task_group_context::*mptr_state, task_group_context& src, T new_state );
528 
530  void bind_to ( internal::generic_scheduler *local_sched );
531 
533  void register_with ( internal::generic_scheduler *local_sched );
534 
535 #if __TBB_FP_CONTEXT
536  // TODO: Consider adding #else stub in order to omit #if sections in other code
538  void copy_fp_settings( const task_group_context &src );
539 #endif /* __TBB_FP_CONTEXT */
540 }; // class task_group_context
541 
542 #endif /* __TBB_TASK_GROUP_CONTEXT */
543 
545 
546 class task: __TBB_TASK_BASE_ACCESS interface5::internal::task_base {
547 
549  void __TBB_EXPORTED_METHOD internal_set_ref_count( int count );
550 
552  internal::reference_count __TBB_EXPORTED_METHOD internal_decrement_ref_count();
553 
554 protected:
556  task() {prefix().extra_state=1;}
557 
558 public:
560  virtual ~task() {}
561 
563  virtual task* execute() = 0;
564 
566  enum state_type {
568  executing,
570  reexecute,
572  ready,
574  allocated,
576  freed,
578  recycle
579 #if __TBB_RECYCLE_TO_ENQUEUE
580  ,to_enqueue
582 #endif
583  };
584 
585  //------------------------------------------------------------------------
586  // Allocating tasks
587  //------------------------------------------------------------------------
588 
590  static internal::allocate_root_proxy allocate_root() {
591  return internal::allocate_root_proxy();
592  }
593 
594 #if __TBB_TASK_GROUP_CONTEXT
595  static internal::allocate_root_with_context_proxy allocate_root( task_group_context& ctx ) {
597  return internal::allocate_root_with_context_proxy(ctx);
598  }
599 #endif /* __TBB_TASK_GROUP_CONTEXT */
600 
602 
603  internal::allocate_continuation_proxy& allocate_continuation() {
604  return *reinterpret_cast<internal::allocate_continuation_proxy*>(this);
605  }
606 
608  internal::allocate_child_proxy& allocate_child() {
609  return *reinterpret_cast<internal::allocate_child_proxy*>(this);
610  }
611 
613  using task_base::allocate_additional_child_of;
614 
615 #if __TBB_DEPRECATED_TASK_INTERFACE
616 
621  void __TBB_EXPORTED_METHOD destroy( task& t );
622 #else /* !__TBB_DEPRECATED_TASK_INTERFACE */
623  using task_base::destroy;
625 #endif /* !__TBB_DEPRECATED_TASK_INTERFACE */
626 
627  //------------------------------------------------------------------------
628  // Recycling of tasks
629  //------------------------------------------------------------------------
630 
632 
638  void recycle_as_continuation() {
639  __TBB_ASSERT( prefix().state==executing, "execute not running?" );
640  prefix().state = allocated;
641  }
642 
644 
646  void recycle_as_safe_continuation() {
647  __TBB_ASSERT( prefix().state==executing, "execute not running?" );
648  prefix().state = recycle;
649  }
650 
652  void recycle_as_child_of( task& new_parent ) {
653  internal::task_prefix& p = prefix();
654  __TBB_ASSERT( prefix().state==executing||prefix().state==allocated, "execute not running, or already recycled" );
655  __TBB_ASSERT( prefix().ref_count==0, "no child tasks allowed when recycled as a child" );
656  __TBB_ASSERT( p.parent==NULL, "parent must be null" );
657  __TBB_ASSERT( new_parent.prefix().state<=recycle, "corrupt parent's state" );
658  __TBB_ASSERT( new_parent.prefix().state!=freed, "parent already freed" );
659  p.state = allocated;
660  p.parent = &new_parent;
661 #if __TBB_TASK_GROUP_CONTEXT
662  p.context = new_parent.prefix().context;
663 #endif /* __TBB_TASK_GROUP_CONTEXT */
664  }
665 
667 
668  void recycle_to_reexecute() {
669  __TBB_ASSERT( prefix().state==executing, "execute not running, or already recycled" );
670  __TBB_ASSERT( prefix().ref_count==0, "no child tasks allowed when recycled for reexecution" );
671  prefix().state = reexecute;
672  }
673 
674 #if __TBB_RECYCLE_TO_ENQUEUE
675 
677  void recycle_to_enqueue() {
678  __TBB_ASSERT( prefix().state==executing, "execute not running, or already recycled" );
679  prefix().state = to_enqueue;
680  }
681 #endif /* __TBB_RECYCLE_TO_ENQUEUE */
682 
683  //------------------------------------------------------------------------
684  // Spawning and blocking
685  //------------------------------------------------------------------------
686 
688  void set_ref_count( int count ) {
689 #if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT
690  internal_set_ref_count(count);
691 #else
692  prefix().ref_count = count;
693 #endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */
694  }
695 
697 
698  void increment_ref_count() {
699  __TBB_FetchAndIncrementWacquire( &prefix().ref_count );
700  }
701 
703 
704  int add_ref_count( int count ) {
705  internal::call_itt_notify( internal::releasing, &prefix().ref_count );
706  internal::reference_count k = count+__TBB_FetchAndAddW( &prefix().ref_count, count );
707  __TBB_ASSERT( k>=0, "task's reference count underflowed" );
708  if( k==0 )
709  internal::call_itt_notify( internal::acquired, &prefix().ref_count );
710  return int(k);
711  }
712 
714 
715  int decrement_ref_count() {
716 #if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT
717  return int(internal_decrement_ref_count());
718 #else
719  return int(__TBB_FetchAndDecrementWrelease( &prefix().ref_count ))-1;
720 #endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */
721  }
722 
724  using task_base::spawn;
725 
727  void spawn_and_wait_for_all( task& child ) {
728  prefix().owner->wait_for_all( *this, &child );
729  }
730 
732  void __TBB_EXPORTED_METHOD spawn_and_wait_for_all( task_list& list );
733 
735  static void spawn_root_and_wait( task& root ) {
736  root.prefix().owner->spawn_root_and_wait( root, root.prefix().next );
737  }
738 
740 
742  static void spawn_root_and_wait( task_list& root_list );
743 
745 
746  void wait_for_all() {
747  prefix().owner->wait_for_all( *this, NULL );
748  }
749 
751 #if __TBB_TASK_PRIORITY
752 
762 #endif /* __TBB_TASK_PRIORITY */
763  static void enqueue( task& t ) {
764  t.prefix().owner->enqueue( t, NULL );
765  }
766 
767 #if __TBB_TASK_PRIORITY
768  static void enqueue( task& t, priority_t p ) {
770  __TBB_ASSERT( p == priority_low || p == priority_normal || p == priority_high, "Invalid priority level value" );
771  t.prefix().owner->enqueue( t, (void*)p );
772  }
773 #endif /* __TBB_TASK_PRIORITY */
774 
776  static task& __TBB_EXPORTED_FUNC self();
777 
779  task* parent() const {return prefix().parent;}
780 
782  void set_parent(task* p) {
783 #if __TBB_TASK_GROUP_CONTEXT
784  __TBB_ASSERT(!p || prefix().context == p->prefix().context, "The tasks must be in the same context");
785 #endif
786  prefix().parent = p;
787  }
788 
789 #if __TBB_TASK_GROUP_CONTEXT
790 
792  task_group_context* context() {return prefix().context;}
793 
795  task_group_context* group () { return prefix().context; }
796 #endif /* __TBB_TASK_GROUP_CONTEXT */
797 
799  bool is_stolen_task() const {
800  return (prefix().extra_state & 0x80)!=0;
801  }
802 
803  //------------------------------------------------------------------------
804  // Debugging
805  //------------------------------------------------------------------------
806 
808  state_type state() const {return state_type(prefix().state);}
809 
811  int ref_count() const {
812 #if TBB_USE_ASSERT
813  internal::reference_count ref_count_ = prefix().ref_count;
814  __TBB_ASSERT( ref_count_==int(ref_count_), "integer overflow error");
815 #endif
816  return int(prefix().ref_count);
817  }
818 
820  bool __TBB_EXPORTED_METHOD is_owned_by_current_thread() const;
821 
822  //------------------------------------------------------------------------
823  // Affinity
824  //------------------------------------------------------------------------
825 
827 
828  typedef internal::affinity_id affinity_id;
829 
831  void set_affinity( affinity_id id ) {prefix().affinity = id;}
832 
834  affinity_id affinity() const {return prefix().affinity;}
835 
837 
841  virtual void __TBB_EXPORTED_METHOD note_affinity( affinity_id id );
842 
843 #if __TBB_TASK_GROUP_CONTEXT
844 
855  void __TBB_EXPORTED_METHOD change_group ( task_group_context& ctx );
856 
858 
859  bool cancel_group_execution () { return prefix().context->cancel_group_execution(); }
860 
862  bool is_cancelled () const { return prefix().context->is_group_execution_cancelled(); }
863 #else
864  bool is_cancelled () const { return false; }
865 #endif /* __TBB_TASK_GROUP_CONTEXT */
866 
867 #if __TBB_TASK_PRIORITY
868  void set_group_priority ( priority_t p ) { prefix().context->set_priority(p); }
870 
872  priority_t group_priority () const { return prefix().context->priority(); }
873 
874 #endif /* __TBB_TASK_PRIORITY */
875 
876 private:
877  friend class interface5::internal::task_base;
878  friend class task_list;
879  friend class internal::scheduler;
880  friend class internal::allocate_root_proxy;
881 #if __TBB_TASK_GROUP_CONTEXT
882  friend class internal::allocate_root_with_context_proxy;
883 #endif /* __TBB_TASK_GROUP_CONTEXT */
884  friend class internal::allocate_continuation_proxy;
885  friend class internal::allocate_child_proxy;
886  friend class internal::allocate_additional_child_of_proxy;
887 
889 
890  internal::task_prefix& prefix( internal::version_tag* = NULL ) const {
891  return reinterpret_cast<internal::task_prefix*>(const_cast<task*>(this))[-1];
892  }
893 }; // class task
894 
896 
897 class empty_task: public task {
898  /*override*/ task* execute() {
899  return NULL;
900  }
901 };
902 
904 namespace internal {
905  template<typename F>
906  class function_task : public task {
907 #if __TBB_ALLOW_MUTABLE_FUNCTORS
908  F my_func;
909 #else
910  const F my_func;
911 #endif
912  /*override*/ task* execute() {
913  my_func();
914  return NULL;
915  }
916  public:
917  function_task( const F& f ) : my_func(f) {}
918  };
919 } // namespace internal
921 
923 
925 class task_list: internal::no_copy {
926 private:
927  task* first;
928  task** next_ptr;
929  friend class task;
930  friend class interface5::internal::task_base;
931 public:
933  task_list() : first(NULL), next_ptr(&first) {}
934 
936  ~task_list() {}
937 
939  bool empty() const {return !first;}
940 
942  void push_back( task& task ) {
943  task.prefix().next = NULL;
944  *next_ptr = &task;
945  next_ptr = &task.prefix().next;
946  }
947 #if __TBB_TODO
948  // TODO: add this method and implement&document the local execution ordering. See more in generic_scheduler::local_spawn
950  void push_front( task& task ) {
951  if( empty() ) {
952  push_back(task);
953  } else {
954  task.prefix().next = first;
955  first = &task;
956  }
957  }
958 #endif
959  task& pop_front() {
961  __TBB_ASSERT( !empty(), "attempt to pop item from empty task_list" );
962  task* result = first;
963  first = result->prefix().next;
964  if( !first ) next_ptr = &first;
965  return *result;
966  }
967 
969  void clear() {
970  first=NULL;
971  next_ptr=&first;
972  }
973 };
974 
975 inline void interface5::internal::task_base::spawn( task& t ) {
976  t.prefix().owner->spawn( t, t.prefix().next );
977 }
978 
979 inline void interface5::internal::task_base::spawn( task_list& list ) {
980  if( task* t = list.first ) {
981  t->prefix().owner->spawn( *t, *list.next_ptr );
982  list.clear();
983  }
984 }
985 
986 inline void task::spawn_root_and_wait( task_list& root_list ) {
987  if( task* t = root_list.first ) {
988  t->prefix().owner->spawn_root_and_wait( *t, *root_list.next_ptr );
989  root_list.clear();
990  }
991 }
992 
993 } // namespace tbb
994 
995 inline void *operator new( size_t bytes, const tbb::internal::allocate_root_proxy& ) {
996  return &tbb::internal::allocate_root_proxy::allocate(bytes);
997 }
998 
999 inline void operator delete( void* task, const tbb::internal::allocate_root_proxy& ) {
1000  tbb::internal::allocate_root_proxy::free( *static_cast<tbb::task*>(task) );
1001 }
1002 
1003 #if __TBB_TASK_GROUP_CONTEXT
1004 inline void *operator new( size_t bytes, const tbb::internal::allocate_root_with_context_proxy& p ) {
1005  return &p.allocate(bytes);
1006 }
1007 
1008 inline void operator delete( void* task, const tbb::internal::allocate_root_with_context_proxy& p ) {
1009  p.free( *static_cast<tbb::task*>(task) );
1010 }
1011 #endif /* __TBB_TASK_GROUP_CONTEXT */
1012 
1013 inline void *operator new( size_t bytes, const tbb::internal::allocate_continuation_proxy& p ) {
1014  return &p.allocate(bytes);
1015 }
1016 
1017 inline void operator delete( void* task, const tbb::internal::allocate_continuation_proxy& p ) {
1018  p.free( *static_cast<tbb::task*>(task) );
1019 }
1020 
1021 inline void *operator new( size_t bytes, const tbb::internal::allocate_child_proxy& p ) {
1022  return &p.allocate(bytes);
1023 }
1024 
1025 inline void operator delete( void* task, const tbb::internal::allocate_child_proxy& p ) {
1026  p.free( *static_cast<tbb::task*>(task) );
1027 }
1028 
1029 inline void *operator new( size_t bytes, const tbb::internal::allocate_additional_child_of_proxy& p ) {
1030  return &p.allocate(bytes);
1031 }
1032 
1033 inline void operator delete( void* task, const tbb::internal::allocate_additional_child_of_proxy& p ) {
1034  p.free( *static_cast<tbb::task*>(task) );
1035 }
1036 
1037 #endif /* __TBB_task_H */
*/
Definition: material.h:665
Definition: _flow_graph_async_msg_impl.h:32
The namespace tbb contains all components of the library.
Definition: parallel_for.h:44