10 #ifndef EIGEN_CXX11_THREADPOOL_EVENTCOUNT_H_ 11 #define EIGEN_CXX11_THREADPOOL_EVENTCOUNT_H_ 54 eigen_assert(waiters.size() < (1 << kWaiterBits) - 1);
56 state_ = kStackMask | (kEpochMask - kEpochInc * waiters.size() * 2);
61 eigen_assert((state_.load() & (kStackMask | kWaiterMask)) == kStackMask);
68 w->epoch = state_.fetch_add(kWaiterInc, std::memory_order_relaxed);
69 std::atomic_thread_fence(std::memory_order_seq_cst);
73 void CommitWait(
Waiter* w) {
74 w->state = Waiter::kNotSignaled;
77 (w->epoch & kEpochMask) +
78 (((w->epoch & kWaiterMask) >> kWaiterShift) << kEpochShift);
79 uint64_t state = state_.load(std::memory_order_seq_cst);
81 if (int64_t((state & kEpochMask) - epoch) < 0) {
85 state = state_.load(std::memory_order_seq_cst);
89 if (int64_t((state & kEpochMask) - epoch) > 0)
return;
91 eigen_assert((state & kWaiterMask) != 0);
92 uint64_t newstate = state - kWaiterInc + kEpochInc;
93 newstate = (newstate & ~kStackMask) | (w - &waiters_[0]);
94 if ((state & kStackMask) == kStackMask)
95 w->next.store(
nullptr, std::memory_order_relaxed);
97 w->next.store(&waiters_[state & kStackMask], std::memory_order_relaxed);
98 if (state_.compare_exchange_weak(state, newstate,
99 std::memory_order_release))
106 void CancelWait(
Waiter* w) {
108 (w->epoch & kEpochMask) +
109 (((w->epoch & kWaiterMask) >> kWaiterShift) << kEpochShift);
110 uint64_t state = state_.load(std::memory_order_relaxed);
112 if (int64_t((state & kEpochMask) - epoch) < 0) {
115 EIGEN_THREAD_YIELD();
116 state = state_.load(std::memory_order_relaxed);
120 if (int64_t((state & kEpochMask) - epoch) > 0)
return;
122 eigen_assert((state & kWaiterMask) != 0);
123 if (state_.compare_exchange_weak(state, state - kWaiterInc + kEpochInc,
124 std::memory_order_relaxed))
131 void Notify(
bool all) {
132 std::atomic_thread_fence(std::memory_order_seq_cst);
133 uint64_t state = state_.load(std::memory_order_acquire);
136 if ((state & kStackMask) == kStackMask && (state & kWaiterMask) == 0)
138 uint64_t waiters = (state & kWaiterMask) >> kWaiterShift;
142 newstate = (state & kEpochMask) + (kEpochInc * waiters) + kStackMask;
143 }
else if (waiters) {
145 newstate = state + kEpochInc - kWaiterInc;
148 Waiter* w = &waiters_[state & kStackMask];
149 Waiter* wnext = w->next.load(std::memory_order_relaxed);
150 uint64_t next = kStackMask;
151 if (wnext !=
nullptr) next = wnext - &waiters_[0];
156 newstate = (state & kEpochMask) + next;
158 if (state_.compare_exchange_weak(state, newstate,
159 std::memory_order_acquire)) {
160 if (!all && waiters)
return;
161 if ((state & kStackMask) == kStackMask)
return;
162 Waiter* w = &waiters_[state & kStackMask];
163 if (!all) w->next.store(
nullptr, std::memory_order_relaxed);
173 EIGEN_ALIGN_TO_BOUNDARY(128) std::atomic<Waiter*> next;
175 std::condition_variable cv;
190 static const uint64_t kStackBits = 16;
191 static const uint64_t kStackMask = (1ull << kStackBits) - 1;
192 static const uint64_t kWaiterBits = 16;
193 static const uint64_t kWaiterShift = 16;
194 static const uint64_t kWaiterMask = ((1ull << kWaiterBits) - 1)
196 static const uint64_t kWaiterInc = 1ull << kWaiterBits;
197 static const uint64_t kEpochBits = 32;
198 static const uint64_t kEpochShift = 32;
199 static const uint64_t kEpochMask = ((1ull << kEpochBits) - 1) << kEpochShift;
200 static const uint64_t kEpochInc = 1ull << kEpochShift;
201 std::atomic<uint64_t> state_;
205 std::unique_lock<std::mutex> lock(w->mu);
206 while (w->state != Waiter::kSignaled) {
207 w->state = Waiter::kWaiting;
212 void Unpark(
Waiter* waiters) {
214 for (
Waiter* w = waiters; w; w = next) {
215 next = w->next.load(std::memory_order_relaxed);
218 std::unique_lock<std::mutex> lock(w->mu);
220 w->state = Waiter::kSignaled;
223 if (state == Waiter::kWaiting) w->cv.notify_one();
233 #endif // EIGEN_CXX11_THREADPOOL_EVENTCOUNT_H_ Definition: EventCount.h:170
Namespace containing all symbols from the Eigen library.
Definition: bench_norm.cpp:85
Definition: EventCount.h:49
The MaxSizeVector class.
Definition: MaxSizeVector.h:31