BRE12
task_arena.h
1 /*
2  Copyright 2005-2016 Intel Corporation. All Rights Reserved.
3 
4  This file is part of Threading Building Blocks. Threading Building Blocks is free software;
5  you can redistribute it and/or modify it under the terms of the GNU General Public License
6  version 2 as published by the Free Software Foundation. Threading Building Blocks is
7  distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the
8  implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
9  See the GNU General Public License for more details. You should have received a copy of
10  the GNU General Public License along with Threading Building Blocks; if not, write to the
11  Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
12 
13  As a special exception, you may use this file as part of a free software library without
14  restriction. Specifically, if other files instantiate templates or use macros or inline
15  functions from this file, or you compile this file and link it with other files to produce
16  an executable, this file does not by itself cause the resulting executable to be covered
17  by the GNU General Public License. This exception does not however invalidate any other
18  reasons why the executable file might be covered by the GNU General Public License.
19 */
20 
21 #ifndef __TBB_task_arena_H
22 #define __TBB_task_arena_H
23 
24 #include "task.h"
25 #include "tbb_exception.h"
26 #if TBB_USE_THREADING_TOOLS
27 #include "atomic.h" // for as_atomic
28 #endif
29 
30 namespace tbb {
31 
33 namespace internal {
35 
36  class arena;
37  class task_scheduler_observer_v3;
38 } // namespace internal
40 
41 namespace interface7 {
43 namespace internal {
44 using namespace tbb::internal; //e.g. function_task from task.h
45 
46 class delegate_base : no_assign {
47 public:
48  virtual void operator()() const = 0;
49  virtual ~delegate_base() {}
50 };
51 
52 template<typename F>
53 class delegated_function : public delegate_base {
54  F &my_func;
55  /*override*/ void operator()() const {
56  my_func();
57  }
58 public:
59  delegated_function ( F& f ) : my_func(f) {}
60 };
61 
62 class task_arena_base {
63 protected:
65  internal::arena* my_arena;
66 
67 #if __TBB_TASK_GROUP_CONTEXT
68  task_group_context *my_context;
70 #endif
71 
73  int my_max_concurrency;
74 
76  unsigned my_master_slots;
77 
79  intptr_t my_version_and_traits;
80 
81  enum {
82  default_flags = 0
83 #if __TBB_TASK_GROUP_CONTEXT
84  | (task_group_context::default_traits & task_group_context::exact_exception) // 0 or 1 << 16
85  , exact_exception_flag = task_group_context::exact_exception // used to specify flag for context directly
86 #endif
87  };
88 
89  task_arena_base(int max_concurrency, unsigned reserved_for_masters)
90  : my_arena(0)
91 #if __TBB_TASK_GROUP_CONTEXT
92  , my_context(0)
93 #endif
94  , my_max_concurrency(max_concurrency)
95  , my_master_slots(reserved_for_masters)
96  , my_version_and_traits(default_flags)
97  {}
98 
99  void __TBB_EXPORTED_METHOD internal_initialize();
100  void __TBB_EXPORTED_METHOD internal_terminate();
101  void __TBB_EXPORTED_METHOD internal_attach();
102  void __TBB_EXPORTED_METHOD internal_enqueue( task&, intptr_t ) const;
103  void __TBB_EXPORTED_METHOD internal_execute( delegate_base& ) const;
104  void __TBB_EXPORTED_METHOD internal_wait() const;
105  static int __TBB_EXPORTED_FUNC internal_current_slot();
106 public:
108  static const int automatic = -1; // any value < 1 means 'automatic'
109 
110 };
111 
112 } // namespace internal
114 
120 class task_arena : public internal::task_arena_base {
122  bool my_initialized;
123  void mark_initialized() {
124  __TBB_ASSERT( my_arena, "task_arena initialization is incomplete" );
125 #if __TBB_TASK_GROUP_CONTEXT
126  __TBB_ASSERT( my_context, "task_arena initialization is incomplete" );
127 #endif
128 #if TBB_USE_THREADING_TOOLS
129  // Actual synchronization happens in internal_initialize & internal_attach.
130  // The race on setting my_initialized is benign, but should be hidden from Intel(R) Inspector
131  internal::as_atomic(my_initialized).fetch_and_store<release>(true);
132 #else
133  my_initialized = true;
134 #endif
135  }
136 
137 public:
139 
144  task_arena(int max_concurrency = automatic, unsigned reserved_for_masters = 1)
145  : task_arena_base(max_concurrency, reserved_for_masters)
146  , my_initialized(false)
147  {}
148 
150  task_arena(const task_arena &s) // copy settings but not the reference or instance
151  : task_arena_base(s.my_max_concurrency, s.my_master_slots)
152  , my_initialized(false)
153  {}
154 
156  struct attach {};
157 
160  : task_arena_base(automatic, 1) // use default settings if attach fails
161  , my_initialized(false)
162  {
163  internal_attach();
164  if( my_arena ) my_initialized = true;
165  }
166 
168  inline void initialize() {
169  if( !my_initialized ) {
170  internal_initialize();
171  mark_initialized();
172  }
173  }
174 
176  inline void initialize(int max_concurrency, unsigned reserved_for_masters = 1) {
177  // TODO: decide if this call must be thread-safe
178  __TBB_ASSERT( !my_arena, "Impossible to modify settings of an already initialized task_arena");
179  if( !my_initialized ) {
180  my_max_concurrency = max_concurrency;
181  my_master_slots = reserved_for_masters;
182  initialize();
183  }
184  }
185 
187  inline void initialize(attach) {
188  // TODO: decide if this call must be thread-safe
189  __TBB_ASSERT( !my_arena, "Impossible to modify settings of an already initialized task_arena");
190  if( !my_initialized ) {
191  internal_attach();
192  if( !my_arena ) internal_initialize();
193  mark_initialized();
194  }
195  }
196 
199  inline void terminate() {
200  if( my_initialized ) {
201  internal_terminate();
202  my_initialized = false;
203  }
204  }
205 
209  terminate();
210  }
211 
214  bool is_active() const { return my_initialized; }
215 
218  template<typename F>
219  void enqueue( const F& f ) {
220  initialize();
221 #if __TBB_TASK_GROUP_CONTEXT
222  internal_enqueue( *new( task::allocate_root(*my_context) ) internal::function_task<F>(f), 0 );
223 #else
224  internal_enqueue( *new( task::allocate_root() ) internal::function_task<F>(f), 0 );
225 #endif
226  }
227 
228 #if __TBB_TASK_PRIORITY
229  template<typename F>
232  void enqueue( const F& f, priority_t p ) {
233  __TBB_ASSERT( p == priority_low || p == priority_normal || p == priority_high, "Invalid priority level value" );
234  initialize();
235 #if __TBB_TASK_GROUP_CONTEXT
236  internal_enqueue( *new( task::allocate_root(*my_context) ) internal::function_task<F>(f), (intptr_t)p );
237 #else
238  internal_enqueue( *new( task::allocate_root() ) internal::function_task<F>(f), (intptr_t)p );
239 #endif
240  }
241 #endif// __TBB_TASK_PRIORITY
242 
246  template<typename F>
247  void execute(F& f) {
248  initialize();
249  internal::delegated_function<F> d(f);
250  internal_execute( d );
251  }
252 
256  template<typename F>
257  void execute(const F& f) {
258  initialize();
259  internal::delegated_function<const F> d(f);
260  internal_execute( d );
261  }
262 
263 #if __TBB_EXTRA_DEBUG
264  void debug_wait_until_empty() {
268  initialize();
269  internal_wait();
270  }
271 #endif //__TBB_EXTRA_DEBUG
272 
274  inline static int current_thread_index() {
275  return internal_current_slot();
276  }
277 };
278 
279 } // namespace interfaceX
280 
282 
283 } // namespace tbb
284 
285 #endif /* __TBB_task_arena_H */
static int current_thread_index()
Returns the index, aka slot number, of the calling thread in its current arena.
Definition: task_arena.h:274
Definition: atomic.h:535
void enqueue(const F &f)
Enqueues a task into the arena to process a functor, and immediately returns.
Definition: task_arena.h:219
task_arena(int max_concurrency=automatic, unsigned reserved_for_masters=1)
Creates task_arena with certain concurrency limits.
Definition: task_arena.h:144
void initialize()
Forces allocation of the resources for the task_arena as specified in constructor arguments...
Definition: task_arena.h:168
bool is_active() const
Returns true if the arena is active (initialized); false otherwise.
Definition: task_arena.h:214
task_arena(const task_arena &s)
Copies settings from another task_arena.
Definition: task_arena.h:150
task_arena(attach)
Creates an instance of task_arena attached to the current arena of the thread.
Definition: task_arena.h:159
~task_arena()
Removes the reference to the internal arena representation, and destroys the external object...
Definition: task_arena.h:208
*/
Definition: material.h:665
void terminate()
Removes the reference to the internal arena representation.
Definition: task_arena.h:199
Definition: _flow_graph_async_msg_impl.h:32
void execute(F &f)
Joins the arena and executes a functor, then returns If not possible to join, wraps the functor into ...
Definition: task_arena.h:247
Release.
Definition: atomic.h:49
void enqueue(const F &f, priority_t p)
Enqueues a task with priority p into the arena to process a functor f, and immediately returns...
Definition: task_arena.h:232
void initialize(attach)
Attaches this instance to the current arena of the thread.
Definition: task_arena.h:187
The namespace tbb contains all components of the library.
Definition: parallel_for.h:44
1-to-1 proxy representation class of scheduler&#39;s arena Constructors set up settings only...
Definition: task_arena.h:120
void execute(const F &f)
Joins the arena and executes a functor, then returns If not possible to join, wraps the functor into ...
Definition: task_arena.h:257
Tag class used to indicate the "attaching" constructor.
Definition: task_arena.h:156
Definition: task_scheduler_observer.h:40
void initialize(int max_concurrency, unsigned reserved_for_masters=1)
Overrides concurrency level and forces initialization of internal representation. ...
Definition: task_arena.h:176