10 #ifndef EIGEN_CXX11_THREADPOOL_EVENTCOUNT_H
11 #define EIGEN_CXX11_THREADPOOL_EVENTCOUNT_H
74 if (
state_.compare_exchange_weak(state, newstate,
75 std::memory_order_seq_cst))
96 std::memory_order_relaxed);
99 if (
state_.compare_exchange_weak(state, newstate,
100 std::memory_order_acq_rel)) {
124 if (
state_.compare_exchange_weak(state, newstate,
125 std::memory_order_acq_rel))
133 std::atomic_thread_fence(std::memory_order_seq_cst);
146 }
else if (signals < waiters) {
152 uint64_t next =
w->next.load(std::memory_order_relaxed);
156 if (
state_.compare_exchange_weak(state, newstate,
157 std::memory_order_acq_rel)) {
158 if (!notifyAll && (signals < waiters))
162 if (!notifyAll)
w->next.store(
kStackMask, std::memory_order_relaxed);
212 static_assert(
kEpochBits >= 20,
"not enough bits to prevent ABA problem");
233 next = wnext ==
kStackMask ? nullptr : &
waiters_[internal::convert_index<size_t>(wnext)];
#define eigen_plain_assert(condition)
std::atomic< uint64_t > next
MaxSizeVector< Waiter > & waiters_
EventCount(MaxSizeVector< Waiter > &waiters)
std::atomic< uint64_t > state_
static const uint64_t kEpochMask
static const uint64_t kStackMask
static const uint64_t kSignalInc
void CommitWait(Waiter *w)
static const uint64_t kEpochShift
static const uint64_t kWaiterMask
void operator=(const EventCount &)=delete
void Notify(bool notifyAll)
static const uint64_t kWaiterInc
static const uint64_t kEpochBits
static const uint64_t kWaiterShift
static void CheckState(uint64_t state, bool waiter=false)
EventCount(const EventCount &)=delete
static const uint64_t kSignalMask
static const uint64_t kEpochInc
static const uint64_t kWaiterBits
static const uint64_t kSignalShift