24 #include "tbb_stddef.h" 25 #include "tbb_machine.h" 26 #include "tbb_profiling.h" 29 typedef struct ___itt_caller *__itt_caller;
35 class task_group_context;
39 #if _MSC_VER || (__GNUC__==3 && __GNUC_MINOR__<3) 40 #define __TBB_TASK_BASE_ACCESS public 42 #define __TBB_TASK_BASE_ACCESS private 47 class allocate_additional_child_of_proxy: no_assign {
52 explicit allocate_additional_child_of_proxy( task& parent_ ) : self(NULL), parent(parent_) {}
53 task& __TBB_EXPORTED_METHOD allocate(
size_t size )
const;
54 void __TBB_EXPORTED_METHOD free( task& )
const;
57 struct cpu_ctl_env_space {
int space[
sizeof(internal::uint64_t)/
sizeof(
int)]; };
60 namespace interface5 {
68 class task_base: tbb::internal::no_copy {
69 __TBB_TASK_BASE_ACCESS:
70 friend class tbb::task;
73 static void spawn( task& t );
76 static void spawn( task_list& list );
81 static tbb::internal::allocate_additional_child_of_proxy allocate_additional_child_of( task& t ) {
82 return tbb::internal::allocate_additional_child_of_proxy(t);
90 static void __TBB_EXPORTED_FUNC destroy( task& victim );
98 class scheduler: no_copy {
101 virtual void spawn( task& first, task*& next ) = 0;
104 virtual void wait_for_all( task& parent, task* child ) = 0;
107 virtual void spawn_root_and_wait( task& first, task*& next ) = 0;
111 virtual ~scheduler() = 0;
114 virtual void enqueue( task& t,
void* reserved ) = 0;
119 typedef intptr_t reference_count;
122 typedef unsigned short affinity_id;
124 #if __TBB_TASK_GROUP_CONTEXT 125 class generic_scheduler;
127 struct context_list_node_t {
128 context_list_node_t *my_prev,
132 class allocate_root_with_context_proxy: no_assign {
133 task_group_context& my_context;
135 allocate_root_with_context_proxy ( task_group_context& ctx ) : my_context(ctx) {}
136 task& __TBB_EXPORTED_METHOD allocate(
size_t size )
const;
137 void __TBB_EXPORTED_METHOD free( task& )
const;
141 class allocate_root_proxy: no_assign {
143 static task& __TBB_EXPORTED_FUNC allocate(
size_t size );
144 static void __TBB_EXPORTED_FUNC free( task& );
147 class allocate_continuation_proxy: no_assign {
149 task& __TBB_EXPORTED_METHOD allocate(
size_t size )
const;
150 void __TBB_EXPORTED_METHOD free( task& )
const;
153 class allocate_child_proxy: no_assign {
155 task& __TBB_EXPORTED_METHOD allocate(
size_t size )
const;
156 void __TBB_EXPORTED_METHOD free( task& )
const;
173 friend class tbb::task;
174 friend class tbb::interface5::internal::task_base;
175 friend class tbb::task_list;
176 friend class internal::scheduler;
177 friend class internal::allocate_root_proxy;
178 friend class internal::allocate_child_proxy;
179 friend class internal::allocate_continuation_proxy;
180 friend class internal::allocate_additional_child_of_proxy;
182 #if __TBB_TASK_GROUP_CONTEXT 187 task_group_context *context;
198 #if __TBB_TASK_PRIORITY 206 #if __TBB_TASK_PRIORITY 209 task* next_offloaded;
224 __TBB_atomic reference_count ref_count;
242 unsigned char extra_state;
244 affinity_id affinity;
250 tbb::task& task() {
return *
reinterpret_cast<tbb::task*
>(
this+1);}
256 #if __TBB_TASK_GROUP_CONTEXT 258 #if __TBB_TASK_PRIORITY 260 static const int priority_stride_v4 = INT_MAX / 4;
264 priority_normal = internal::priority_stride_v4 * 2,
265 priority_low = priority_normal - internal::priority_stride_v4,
266 priority_high = priority_normal + internal::priority_stride_v4
271 #if TBB_USE_CAPTURED_EXCEPTION 275 class tbb_exception_ptr;
279 class task_scheduler_init;
280 namespace interface7 {
class task_arena; }
303 class task_group_context : internal::no_copy {
305 friend class internal::generic_scheduler;
306 friend class task_scheduler_init;
307 friend class interface7::task_arena;
309 #if TBB_USE_CAPTURED_EXCEPTION 310 typedef tbb_exception exception_container_type;
312 typedef internal::tbb_exception_ptr exception_container_type;
315 enum version_traits_word_layout {
317 version_mask = 0xFFFF,
318 traits_mask = 0xFFFFul << traits_offset
328 exact_exception = 0x0001ul << traits_offset,
330 fp_settings = 0x0002ul << traits_offset,
332 concurrent_wait = 0x0004ul << traits_offset,
333 #if TBB_USE_CAPTURED_EXCEPTION 336 default_traits = exact_exception
342 may_have_children = 1,
344 next_state_value, low_unused_state_bit = (next_state_value-1)*2
350 __TBB_atomic kind_type my_kind;
351 uintptr_t _my_kind_aligner;
355 task_group_context *my_parent;
360 internal::context_list_node_t my_node;
363 __itt_caller itt_caller;
369 char _leading_padding[internal::NFS_MaxLineSize
370 - 2 *
sizeof(uintptr_t)-
sizeof(
void*) -
sizeof(internal::context_list_node_t)
371 -
sizeof(__itt_caller)
373 -
sizeof(internal::cpu_ctl_env_space)
381 internal::cpu_ctl_env_space my_cpu_ctl_env;
385 uintptr_t my_cancellation_requested;
391 uintptr_t my_version_and_traits;
394 exception_container_type *my_exception;
397 internal::generic_scheduler *my_owner;
402 #if __TBB_TASK_PRIORITY 403 intptr_t my_priority;
409 char _trailing_padding[internal::NFS_MaxLineSize - 2 *
sizeof(uintptr_t) - 2 *
sizeof(
void*)
410 #if __TBB_TASK_PRIORITY 445 task_group_context ( kind_type relation_with_parent = bound,
446 uintptr_t t = default_traits )
447 : my_kind(relation_with_parent)
448 , my_version_and_traits(2 | t)
454 __TBB_EXPORTED_METHOD ~task_group_context ();
464 void __TBB_EXPORTED_METHOD reset ();
474 bool __TBB_EXPORTED_METHOD cancel_group_execution ();
477 bool __TBB_EXPORTED_METHOD is_group_execution_cancelled ()
const;
486 void __TBB_EXPORTED_METHOD register_pending_exception ();
497 void __TBB_EXPORTED_METHOD capture_fp_settings ();
500 #if __TBB_TASK_PRIORITY 501 void set_priority ( priority_t );
505 priority_t priority ()
const;
509 uintptr_t traits()
const {
return my_version_and_traits & traits_mask; }
514 void __TBB_EXPORTED_METHOD init ();
518 friend class internal::allocate_root_with_context_proxy;
520 static const kind_type binding_required = bound;
521 static const kind_type binding_completed = kind_type(bound+1);
522 static const kind_type detached = kind_type(binding_completed+1);
523 static const kind_type dying = kind_type(detached+1);
526 template <
typename T>
527 void propagate_task_group_state ( T task_group_context::*mptr_state, task_group_context& src, T new_state );
530 void bind_to ( internal::generic_scheduler *local_sched );
533 void register_with ( internal::generic_scheduler *local_sched );
538 void copy_fp_settings(
const task_group_context &src );
546 class task: __TBB_TASK_BASE_ACCESS interface5::internal::task_base {
549 void __TBB_EXPORTED_METHOD internal_set_ref_count(
int count );
552 internal::reference_count __TBB_EXPORTED_METHOD internal_decrement_ref_count();
556 task() {prefix().extra_state=1;}
563 virtual task* execute() = 0;
579 #if __TBB_RECYCLE_TO_ENQUEUE 590 static internal::allocate_root_proxy allocate_root() {
591 return internal::allocate_root_proxy();
594 #if __TBB_TASK_GROUP_CONTEXT 595 static internal::allocate_root_with_context_proxy allocate_root( task_group_context& ctx ) {
597 return internal::allocate_root_with_context_proxy(ctx);
603 internal::allocate_continuation_proxy& allocate_continuation() {
604 return *
reinterpret_cast<internal::allocate_continuation_proxy*
>(
this);
608 internal::allocate_child_proxy& allocate_child() {
609 return *
reinterpret_cast<internal::allocate_child_proxy*
>(
this);
613 using task_base::allocate_additional_child_of;
615 #if __TBB_DEPRECATED_TASK_INTERFACE 621 void __TBB_EXPORTED_METHOD destroy( task& t );
623 using task_base::destroy;
638 void recycle_as_continuation() {
639 __TBB_ASSERT( prefix().state==executing,
"execute not running?" );
640 prefix().state = allocated;
646 void recycle_as_safe_continuation() {
647 __TBB_ASSERT( prefix().state==executing,
"execute not running?" );
648 prefix().state = recycle;
652 void recycle_as_child_of( task& new_parent ) {
653 internal::task_prefix& p = prefix();
654 __TBB_ASSERT( prefix().state==executing||prefix().state==allocated,
"execute not running, or already recycled" );
655 __TBB_ASSERT( prefix().ref_count==0,
"no child tasks allowed when recycled as a child" );
656 __TBB_ASSERT( p.parent==NULL,
"parent must be null" );
657 __TBB_ASSERT( new_parent.prefix().state<=recycle,
"corrupt parent's state" );
658 __TBB_ASSERT( new_parent.prefix().state!=freed,
"parent already freed" );
660 p.parent = &new_parent;
661 #if __TBB_TASK_GROUP_CONTEXT 662 p.context = new_parent.prefix().context;
668 void recycle_to_reexecute() {
669 __TBB_ASSERT( prefix().state==executing,
"execute not running, or already recycled" );
670 __TBB_ASSERT( prefix().ref_count==0,
"no child tasks allowed when recycled for reexecution" );
671 prefix().state = reexecute;
674 #if __TBB_RECYCLE_TO_ENQUEUE 677 void recycle_to_enqueue() {
678 __TBB_ASSERT( prefix().state==executing,
"execute not running, or already recycled" );
679 prefix().state = to_enqueue;
688 void set_ref_count(
int count ) {
689 #if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT 690 internal_set_ref_count(count);
692 prefix().ref_count = count;
698 void increment_ref_count() {
699 __TBB_FetchAndIncrementWacquire( &prefix().ref_count );
704 int add_ref_count(
int count ) {
705 internal::call_itt_notify( internal::releasing, &prefix().ref_count );
706 internal::reference_count k = count+__TBB_FetchAndAddW( &prefix().ref_count, count );
707 __TBB_ASSERT( k>=0,
"task's reference count underflowed" );
709 internal::call_itt_notify( internal::acquired, &prefix().ref_count );
715 int decrement_ref_count() {
716 #if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT 717 return int(internal_decrement_ref_count());
719 return int(__TBB_FetchAndDecrementWrelease( &prefix().ref_count ))-1;
724 using task_base::spawn;
727 void spawn_and_wait_for_all( task& child ) {
728 prefix().owner->wait_for_all( *
this, &child );
732 void __TBB_EXPORTED_METHOD spawn_and_wait_for_all( task_list& list );
735 static void spawn_root_and_wait( task& root ) {
736 root.prefix().owner->spawn_root_and_wait( root, root.prefix().next );
742 static void spawn_root_and_wait( task_list& root_list );
746 void wait_for_all() {
747 prefix().owner->wait_for_all( *
this, NULL );
751 #if __TBB_TASK_PRIORITY 763 static void enqueue( task& t ) {
764 t.prefix().owner->enqueue( t, NULL );
767 #if __TBB_TASK_PRIORITY 768 static void enqueue( task& t, priority_t p ) {
770 __TBB_ASSERT( p == priority_low || p == priority_normal || p == priority_high,
"Invalid priority level value" );
771 t.prefix().owner->enqueue( t, (
void*)p );
776 static task& __TBB_EXPORTED_FUNC
self();
779 task* parent()
const {
return prefix().parent;}
782 void set_parent(task* p) {
783 #if __TBB_TASK_GROUP_CONTEXT 784 __TBB_ASSERT(!p || prefix().context == p->prefix().context,
"The tasks must be in the same context");
789 #if __TBB_TASK_GROUP_CONTEXT 792 task_group_context* context() {
return prefix().context;}
795 task_group_context* group () {
return prefix().context; }
799 bool is_stolen_task()
const {
800 return (prefix().extra_state & 0x80)!=0;
808 state_type state()
const {
return state_type(prefix().state);}
811 int ref_count()
const {
813 internal::reference_count ref_count_ = prefix().ref_count;
814 __TBB_ASSERT( ref_count_==
int(ref_count_),
"integer overflow error");
816 return int(prefix().ref_count);
820 bool __TBB_EXPORTED_METHOD is_owned_by_current_thread()
const;
828 typedef internal::affinity_id affinity_id;
831 void set_affinity( affinity_id
id ) {prefix().affinity = id;}
834 affinity_id affinity()
const {
return prefix().affinity;}
841 virtual void __TBB_EXPORTED_METHOD note_affinity( affinity_id
id );
843 #if __TBB_TASK_GROUP_CONTEXT 855 void __TBB_EXPORTED_METHOD change_group ( task_group_context& ctx );
859 bool cancel_group_execution () {
return prefix().context->cancel_group_execution(); }
862 bool is_cancelled ()
const {
return prefix().context->is_group_execution_cancelled(); }
864 bool is_cancelled ()
const {
return false; }
867 #if __TBB_TASK_PRIORITY 868 void set_group_priority ( priority_t p ) { prefix().context->set_priority(p); }
872 priority_t group_priority ()
const {
return prefix().context->priority(); }
877 friend class interface5::internal::task_base;
878 friend class task_list;
879 friend class internal::scheduler;
880 friend class internal::allocate_root_proxy;
881 #if __TBB_TASK_GROUP_CONTEXT 882 friend class internal::allocate_root_with_context_proxy;
884 friend class internal::allocate_continuation_proxy;
885 friend class internal::allocate_child_proxy;
886 friend class internal::allocate_additional_child_of_proxy;
890 internal::task_prefix& prefix( internal::version_tag* = NULL )
const {
891 return reinterpret_cast<internal::task_prefix*
>(
const_cast<task*
>(
this))[-1];
897 class empty_task:
public task {
906 class function_task :
public task {
907 #if __TBB_ALLOW_MUTABLE_FUNCTORS 917 function_task(
const F& f ) : my_func(f) {}
925 class task_list: internal::no_copy {
930 friend class interface5::internal::task_base;
933 task_list() : first(NULL), next_ptr(&first) {}
939 bool empty()
const {
return !first;}
942 void push_back( task& task ) {
943 task.prefix().next = NULL;
945 next_ptr = &task.prefix().next;
950 void push_front( task& task ) {
954 task.prefix().next = first;
961 __TBB_ASSERT( !empty(),
"attempt to pop item from empty task_list" );
962 task* result = first;
963 first = result->prefix().next;
964 if( !first ) next_ptr = &first;
975 inline void interface5::internal::task_base::spawn( task& t ) {
976 t.prefix().owner->spawn( t, t.prefix().next );
979 inline void interface5::internal::task_base::spawn( task_list& list ) {
980 if( task* t = list.first ) {
981 t->prefix().owner->spawn( *t, *list.next_ptr );
986 inline void task::spawn_root_and_wait( task_list& root_list ) {
987 if( task* t = root_list.first ) {
988 t->prefix().owner->spawn_root_and_wait( *t, *root_list.next_ptr );
995 inline void *
operator new(
size_t bytes,
const tbb::internal::allocate_root_proxy& ) {
996 return &tbb::internal::allocate_root_proxy::allocate(bytes);
999 inline void operator delete(
void* task,
const tbb::internal::allocate_root_proxy& ) {
1000 tbb::internal::allocate_root_proxy::free( *static_cast<tbb::task*>(task) );
1003 #if __TBB_TASK_GROUP_CONTEXT 1004 inline void *
operator new(
size_t bytes,
const tbb::internal::allocate_root_with_context_proxy& p ) {
1005 return &p.allocate(bytes);
1008 inline void operator delete(
void* task,
const tbb::internal::allocate_root_with_context_proxy& p ) {
1009 p.free( *static_cast<tbb::task*>(task) );
1013 inline void *
operator new(
size_t bytes,
const tbb::internal::allocate_continuation_proxy& p ) {
1014 return &p.allocate(bytes);
1017 inline void operator delete(
void* task,
const tbb::internal::allocate_continuation_proxy& p ) {
1018 p.free( *static_cast<tbb::task*>(task) );
1021 inline void *
operator new(
size_t bytes,
const tbb::internal::allocate_child_proxy& p ) {
1022 return &p.allocate(bytes);
1025 inline void operator delete(
void* task,
const tbb::internal::allocate_child_proxy& p ) {
1026 p.free( *static_cast<tbb::task*>(task) );
1029 inline void *
operator new(
size_t bytes,
const tbb::internal::allocate_additional_child_of_proxy& p ) {
1030 return &p.allocate(bytes);
1033 inline void operator delete(
void* task,
const tbb::internal::allocate_additional_child_of_proxy& p ) {
1034 p.free( *static_cast<tbb::task*>(task) );
*/
Definition: material.h:665
Definition: _flow_graph_async_msg_impl.h:32
The namespace tbb contains all components of the library.
Definition: parallel_for.h:44