Eigen::ThreadPoolTempl< Environment > Class Template Reference
+ Inheritance diagram for Eigen::ThreadPoolTempl< Environment >:

Classes

struct  PerThread
 
struct  ThreadData
 

Public Types

typedef RunQueue< Task, 1024 > Queue
 
typedef Environment::Task Task
 

Public Member Functions

void Cancel () EIGEN_OVERRIDE
 
int CurrentThreadId () const EIGEN_FINAL
 
int NumThreads () const EIGEN_FINAL
 
void Schedule (std::function< void()> fn) EIGEN_OVERRIDE
 
void ScheduleWithHint (std::function< void()> fn, int start, int limit) override
 
void SetStealPartitions (const std::vector< std::pair< unsigned, unsigned >> &partitions)
 
 ThreadPoolTempl (int num_threads, bool allow_spinning, Environment env=Environment())
 
 ThreadPoolTempl (int num_threads, Environment env=Environment())
 
 ~ThreadPoolTempl ()
 
- Public Member Functions inherited from Eigen::ThreadPoolInterface
virtual ~ThreadPoolInterface ()
 

Private Types

typedef Environment::EnvThread Thread
 

Private Member Functions

void AssertBounds (int start, int end)
 
void ComputeCoprimes (int N, MaxSizeVector< unsigned > *coprimes)
 
void DecodePartition (unsigned val, unsigned *start, unsigned *limit)
 
unsigned EncodePartition (unsigned start, unsigned limit)
 
PerThreadGetPerThread ()
 
unsigned GetStealPartition (int i)
 
Task GlobalSteal ()
 
Task LocalSteal ()
 
int NonEmptyQueueIndex ()
 
void SetStealPartition (size_t i, unsigned val)
 
Task Steal (unsigned start, unsigned limit)
 
bool WaitForWork (EventCount::Waiter *waiter, Task *t)
 
void WorkerLoop (int thread_id)
 

Static Private Member Functions

static uint64_t GlobalThreadIdHash ()
 
static unsigned Rand (uint64_t *state)
 

Private Attributes

MaxSizeVector< MaxSizeVector< unsigned > > all_coprimes_
 
const bool allow_spinning_
 
std::atomic< unsigned > blocked_
 
std::atomic< boolcancelled_
 
std::atomic< booldone_
 
EventCount ec_
 
Environment env_
 
unsigned global_steal_partition_
 
std::unique_ptr< Barrierinit_barrier_
 
const int num_threads_
 
std::unordered_map< uint64_t, std::unique_ptr< PerThread > > per_thread_map_
 
EIGEN_MUTEX per_thread_map_mutex_
 
std::atomic< boolspinning_
 
MaxSizeVector< ThreadDatathread_data_
 
MaxSizeVector< EventCount::Waiterwaiters_
 

Static Private Attributes

static const int kMaxPartitionBits
 
static const int kMaxThreads
 

Detailed Description

template<typename Environment>
class Eigen::ThreadPoolTempl< Environment >

Definition at line 18 of file NonBlockingThreadPool.h.

Member Typedef Documentation

◆ Queue

template<typename Environment >
typedef RunQueue<Task, 1024> Eigen::ThreadPoolTempl< Environment >::Queue

Definition at line 21 of file NonBlockingThreadPool.h.

◆ Task

template<typename Environment >
typedef Environment::Task Eigen::ThreadPoolTempl< Environment >::Task

Definition at line 20 of file NonBlockingThreadPool.h.

◆ Thread

template<typename Environment >
typedef Environment::EnvThread Eigen::ThreadPoolTempl< Environment >::Thread
private

Definition at line 217 of file NonBlockingThreadPool.h.

Constructor & Destructor Documentation

◆ ThreadPoolTempl() [1/2]

template<typename Environment >
Eigen::ThreadPoolTempl< Environment >::ThreadPoolTempl ( int  num_threads,
Environment  env = Environment() 
)
inline

Definition at line 23 of file NonBlockingThreadPool.h.

24  : ThreadPoolTempl(num_threads, true, env) {}
ThreadPoolTempl(int num_threads, Environment env=Environment())

◆ ThreadPoolTempl() [2/2]

template<typename Environment >
Eigen::ThreadPoolTempl< Environment >::ThreadPoolTempl ( int  num_threads,
bool  allow_spinning,
Environment  env = Environment() 
)
inline

Definition at line 26 of file NonBlockingThreadPool.h.

28  : env_(env),
29  num_threads_(num_threads),
30  allow_spinning_(allow_spinning),
31  thread_data_(num_threads),
32  all_coprimes_(num_threads),
33  waiters_(num_threads),
35  blocked_(0),
36  spinning_(0),
37  done_(false),
38  cancelled_(false),
39  ec_(waiters_) {
40  waiters_.resize(num_threads_);
41  // Calculate coprimes of all numbers [1, num_threads].
42  // Coprimes are used for random walks over all threads in Steal
43  // and NonEmptyQueueIndex. Iteration is based on the fact that if we take
44  // a random starting thread index t and calculate num_threads - 1 subsequent
45  // indices as (t + coprime) % num_threads, we will cover all threads without
46  // repetitions (effectively getting a presudo-random permutation of thread
47  // indices).
49  for (int i = 1; i <= num_threads_; ++i) {
50  all_coprimes_.emplace_back(i);
52  }
53 #ifndef EIGEN_THREAD_LOCAL
54  init_barrier_.reset(new Barrier(num_threads_));
55 #endif
56  thread_data_.resize(num_threads_);
57  for (int i = 0; i < num_threads_; i++) {
59  thread_data_[i].thread.reset(
60  env_.CreateThread([this, i]() { WorkerLoop(i); }));
61  }
62 #ifndef EIGEN_THREAD_LOCAL
63  // Wait for workers to initialize per_thread_map_. Otherwise we might race
64  // with them in Schedule or CurrentThreadId.
65  init_barrier_->Wait();
66 #endif
67  }
#define eigen_plain_assert(condition)
Definition: Assert.h:156
std::atomic< unsigned > blocked_
void ComputeCoprimes(int N, MaxSizeVector< unsigned > *coprimes)
void SetStealPartition(size_t i, unsigned val)
MaxSizeVector< ThreadData > thread_data_
std::unique_ptr< Barrier > init_barrier_
unsigned EncodePartition(unsigned start, unsigned limit)
std::atomic< bool > spinning_
MaxSizeVector< MaxSizeVector< unsigned > > all_coprimes_
std::atomic< bool > cancelled_
MaxSizeVector< EventCount::Waiter > waiters_

◆ ~ThreadPoolTempl()

template<typename Environment >
Eigen::ThreadPoolTempl< Environment >::~ThreadPoolTempl ( )
inline

Definition at line 69 of file NonBlockingThreadPool.h.

69  {
70  done_ = true;
71 
72  // Now if all threads block without work, they will start exiting.
73  // But note that threads can continue to work arbitrary long,
74  // block, submit new work, unblock and otherwise live full life.
75  if (!cancelled_) {
76  ec_.Notify(true);
77  } else {
78  // Since we were cancelled, there might be entries in the queues.
79  // Empty them to prevent their destructor from asserting.
80  for (size_t i = 0; i < thread_data_.size(); i++) {
81  thread_data_[i].queue.Flush();
82  }
83  }
84  // Join threads explicitly (by destroying) to avoid destruction order within
85  // this class.
86  for (size_t i = 0; i < thread_data_.size(); ++i)
87  thread_data_[i].thread.reset();
88  }
void Notify(bool notifyAll)
Definition: EventCount.h:132

Member Function Documentation

◆ AssertBounds()

template<typename Environment >
void Eigen::ThreadPoolTempl< Environment >::AssertBounds ( int  start,
int  end 
)
inlineprivate

Definition at line 187 of file NonBlockingThreadPool.h.

187  {
188  eigen_plain_assert(start >= 0);
189  eigen_plain_assert(start < end); // non-zero sized partition
191  }
static const lastp1_t end

◆ Cancel()

template<typename Environment >
void Eigen::ThreadPoolTempl< Environment >::Cancel ( )
inlinevirtual

Reimplemented from Eigen::ThreadPoolInterface.

Definition at line 140 of file NonBlockingThreadPool.h.

140  {
141  cancelled_ = true;
142  done_ = true;
143 
144  // Let each thread know it's been cancelled.
145 #ifdef EIGEN_THREAD_ENV_SUPPORTS_CANCELLATION
146  for (size_t i = 0; i < thread_data_.size(); i++) {
147  thread_data_[i].thread->OnCancel();
148  }
149 #endif
150 
151  // Wake up the threads without work to let them exit on their own.
152  ec_.Notify(true);
153  }

◆ ComputeCoprimes()

template<typename Environment >
void Eigen::ThreadPoolTempl< Environment >::ComputeCoprimes ( int  N,
MaxSizeVector< unsigned > *  coprimes 
)
inlineprivate

Definition at line 201 of file NonBlockingThreadPool.h.

201  {
202  for (int i = 1; i <= N; i++) {
203  unsigned a = i;
204  unsigned b = N;
205  // If GCD(a, b) == 1, then a and b are coprimes.
206  while (b != 0) {
207  unsigned tmp = a;
208  a = b;
209  b = tmp % b;
210  }
211  if (a == 1) {
212  coprimes->push_back(i);
213  }
214  }
215  }
Array< int, 3, 1 > b

◆ CurrentThreadId()

template<typename Environment >
int Eigen::ThreadPoolTempl< Environment >::CurrentThreadId ( ) const
inlinevirtual

Implements Eigen::ThreadPoolInterface.

Definition at line 157 of file NonBlockingThreadPool.h.

157  {
158  const PerThread* pt = const_cast<ThreadPoolTempl*>(this)->GetPerThread();
159  if (pt->pool == this) {
160  return pt->thread_id;
161  } else {
162  return -1;
163  }
164  }

◆ DecodePartition()

template<typename Environment >
void Eigen::ThreadPoolTempl< Environment >::DecodePartition ( unsigned  val,
unsigned *  start,
unsigned *  limit 
)
inlineprivate

Definition at line 181 of file NonBlockingThreadPool.h.

181  {
182  *limit = val & (kMaxThreads - 1);
183  val >>= kMaxPartitionBits;
184  *start = val;
185  }
static const int kMaxPartitionBits

◆ EncodePartition()

template<typename Environment >
unsigned Eigen::ThreadPoolTempl< Environment >::EncodePartition ( unsigned  start,
unsigned  limit 
)
inlineprivate

Definition at line 177 of file NonBlockingThreadPool.h.

177  {
178  return (start << kMaxPartitionBits) | limit;
179  }

◆ GetPerThread()

template<typename Environment >
PerThread* Eigen::ThreadPoolTempl< Environment >::GetPerThread ( )
inlineprivate

Definition at line 458 of file NonBlockingThreadPool.h.

458  {
459 #ifndef EIGEN_THREAD_LOCAL
460  static PerThread dummy;
461  auto it = per_thread_map_.find(GlobalThreadIdHash());
462  if (it == per_thread_map_.end()) {
463  return &dummy;
464  } else {
465  return it->second.get();
466  }
467 #else
468  EIGEN_THREAD_LOCAL PerThread per_thread_;
469  PerThread* pt = &per_thread_;
470  return pt;
471 #endif
472  }
static uint64_t GlobalThreadIdHash()
std::unordered_map< uint64_t, std::unique_ptr< PerThread > > per_thread_map_

◆ GetStealPartition()

template<typename Environment >
unsigned Eigen::ThreadPoolTempl< Environment >::GetStealPartition ( int  i)
inlineprivate

Definition at line 197 of file NonBlockingThreadPool.h.

197  {
198  return thread_data_[i].steal_partition.load(std::memory_order_relaxed);
199  }

◆ GlobalSteal()

template<typename Environment >
Task Eigen::ThreadPoolTempl< Environment >::GlobalSteal ( )
inlineprivate

Definition at line 378 of file NonBlockingThreadPool.h.

378  {
379  return Steal(0, num_threads_);
380  }
Task Steal(unsigned start, unsigned limit)

◆ GlobalThreadIdHash()

template<typename Environment >
static uint64_t Eigen::ThreadPoolTempl< Environment >::GlobalThreadIdHash ( )
inlinestaticprivate

Definition at line 454 of file NonBlockingThreadPool.h.

454  {
455  return std::hash<std::thread::id>()(std::this_thread::get_id());
456  }

◆ LocalSteal()

template<typename Environment >
Task Eigen::ThreadPoolTempl< Environment >::LocalSteal ( )
inlineprivate

Definition at line 364 of file NonBlockingThreadPool.h.

364  {
365  PerThread* pt = GetPerThread();
366  unsigned partition = GetStealPartition(pt->thread_id);
367  // If thread steal partition is the same as global partition, there is no
368  // need to go through the steal loop twice.
369  if (global_steal_partition_ == partition) return Task();
370  unsigned start, limit;
371  DecodePartition(partition, &start, &limit);
372  AssertBounds(start, limit);
373 
374  return Steal(start, limit);
375  }
void AssertBounds(int start, int end)
void DecodePartition(unsigned val, unsigned *start, unsigned *limit)

◆ NonEmptyQueueIndex()

template<typename Environment >
int Eigen::ThreadPoolTempl< Environment >::NonEmptyQueueIndex ( )
inlineprivate

Definition at line 433 of file NonBlockingThreadPool.h.

433  {
434  PerThread* pt = GetPerThread();
435  // We intentionally design NonEmptyQueueIndex to steal work from
436  // anywhere in the queue so threads don't block in WaitForWork() forever
437  // when all threads in their partition go to sleep. Steal is still local.
438  const size_t size = thread_data_.size();
439  unsigned r = Rand(&pt->rand);
440  unsigned inc = all_coprimes_[size - 1][r % all_coprimes_[size - 1].size()];
441  unsigned victim = r % size;
442  for (unsigned i = 0; i < size; i++) {
443  if (!thread_data_[victim].queue.Empty()) {
444  return victim;
445  }
446  victim += inc;
447  if (victim >= size) {
448  victim -= size;
449  }
450  }
451  return -1;
452  }
static unsigned Rand(uint64_t *state)

◆ NumThreads()

template<typename Environment >
int Eigen::ThreadPoolTempl< Environment >::NumThreads ( ) const
inlinevirtual

Implements Eigen::ThreadPoolInterface.

Definition at line 155 of file NonBlockingThreadPool.h.

155 { return num_threads_; }

◆ Rand()

template<typename Environment >
static unsigned Eigen::ThreadPoolTempl< Environment >::Rand ( uint64_t *  state)
inlinestaticprivate

Definition at line 474 of file NonBlockingThreadPool.h.

474  {
475  uint64_t current = *state;
476  // Update the internal state
477  *state = current * 6364136223846793005ULL + 0xda3e39cb94b95bdbULL;
478  // Generate the random output (using the PCG-XSH-RS scheme)
479  return static_cast<unsigned>((current ^ (current >> 22)) >>
480  (22 + (current >> 61)));
481  }
std::uint64_t uint64_t
Definition: Meta.h:41

◆ Schedule()

template<typename Environment >
void Eigen::ThreadPoolTempl< Environment >::Schedule ( std::function< void()>  fn)
inlinevirtual

Implements Eigen::ThreadPoolInterface.

Definition at line 103 of file NonBlockingThreadPool.h.

103  {
104  ScheduleWithHint(std::move(fn), 0, num_threads_);
105  }
void ScheduleWithHint(std::function< void()> fn, int start, int limit) override

◆ ScheduleWithHint()

template<typename Environment >
void Eigen::ThreadPoolTempl< Environment >::ScheduleWithHint ( std::function< void()>  fn,
int  start,
int  limit 
)
inlineoverridevirtual

Reimplemented from Eigen::ThreadPoolInterface.

Definition at line 107 of file NonBlockingThreadPool.h.

108  {
109  Task t = env_.CreateTask(std::move(fn));
110  PerThread* pt = GetPerThread();
111  if (pt->pool == this) {
112  // Worker thread of this pool, push onto the thread's queue.
113  Queue& q = thread_data_[pt->thread_id].queue;
114  t = q.PushFront(std::move(t));
115  } else {
116  // A free-standing thread (or worker of another pool), push onto a random
117  // queue.
118  eigen_plain_assert(start < limit);
120  int num_queues = limit - start;
121  int rnd = Rand(&pt->rand) % num_queues;
122  eigen_plain_assert(start + rnd < limit);
123  Queue& q = thread_data_[start + rnd].queue;
124  t = q.PushBack(std::move(t));
125  }
126  // Note: below we touch this after making w available to worker threads.
127  // Strictly speaking, this can lead to a racy-use-after-free. Consider that
128  // Schedule is called from a thread that is neither main thread nor a worker
129  // thread of this pool. Then, execution of w directly or indirectly
130  // completes overall computations, which in turn leads to destruction of
131  // this. We expect that such scenario is prevented by program, that is,
132  // this is kept alive while any threads can potentially be in Schedule.
133  if (!t.f) {
134  ec_.Notify(false);
135  } else {
136  env_.ExecuteTask(t); // Push failed, execute directly.
137  }
138  }
RunQueue< Task, 1024 > Queue

◆ SetStealPartition()

template<typename Environment >
void Eigen::ThreadPoolTempl< Environment >::SetStealPartition ( size_t  i,
unsigned  val 
)
inlineprivate

Definition at line 193 of file NonBlockingThreadPool.h.

193  {
194  thread_data_[i].steal_partition.store(val, std::memory_order_relaxed);
195  }

◆ SetStealPartitions()

template<typename Environment >
void Eigen::ThreadPoolTempl< Environment >::SetStealPartitions ( const std::vector< std::pair< unsigned, unsigned >> &  partitions)
inline

Definition at line 90 of file NonBlockingThreadPool.h.

90  {
91  eigen_plain_assert(partitions.size() == static_cast<std::size_t>(num_threads_));
92 
93  // Pass this information to each thread queue.
94  for (int i = 0; i < num_threads_; i++) {
95  const auto& pair = partitions[i];
96  unsigned start = pair.first, end = pair.second;
97  AssertBounds(start, end);
98  unsigned val = EncodePartition(start, end);
99  SetStealPartition(i, val);
100  }
101  }

◆ Steal()

template<typename Environment >
Task Eigen::ThreadPoolTempl< Environment >::Steal ( unsigned  start,
unsigned  limit 
)
inlineprivate

Definition at line 338 of file NonBlockingThreadPool.h.

338  {
339  PerThread* pt = GetPerThread();
340  const size_t size = limit - start;
341  unsigned r = Rand(&pt->rand);
342  // Reduce r into [0, size) range, this utilizes trick from
343  // https://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/
344  eigen_plain_assert(all_coprimes_[size - 1].size() < (1<<30));
345  unsigned victim = ((uint64_t)r * (uint64_t)size) >> 32;
346  unsigned index = ((uint64_t) all_coprimes_[size - 1].size() * (uint64_t)r) >> 32;
347  unsigned inc = all_coprimes_[size - 1][index];
348 
349  for (unsigned i = 0; i < size; i++) {
350  eigen_plain_assert(start + victim < limit);
351  Task t = thread_data_[start + victim].queue.PopBack();
352  if (t.f) {
353  return t;
354  }
355  victim += inc;
356  if (victim >= size) {
357  victim -= size;
358  }
359  }
360  return Task();
361  }

◆ WaitForWork()

template<typename Environment >
bool Eigen::ThreadPoolTempl< Environment >::WaitForWork ( EventCount::Waiter waiter,
Task t 
)
inlineprivate

Definition at line 386 of file NonBlockingThreadPool.h.

386  {
387  eigen_plain_assert(!t->f);
388  // We already did best-effort emptiness check in Steal, so prepare for
389  // blocking.
390  ec_.Prewait();
391  // Now do a reliable emptiness check.
392  int victim = NonEmptyQueueIndex();
393  if (victim != -1) {
394  ec_.CancelWait();
395  if (cancelled_) {
396  return false;
397  } else {
398  *t = thread_data_[victim].queue.PopBack();
399  return true;
400  }
401  }
402  // Number of blocked threads is used as termination condition.
403  // If we are shutting down and all worker threads blocked without work,
404  // that's we are done.
405  blocked_++;
406  // TODO is blocked_ required to be unsigned?
407  if (done_ && blocked_ == static_cast<unsigned>(num_threads_)) {
408  ec_.CancelWait();
409  // Almost done, but need to re-check queues.
410  // Consider that all queues are empty and all worker threads are preempted
411  // right after incrementing blocked_ above. Now a free-standing thread
412  // submits work and calls destructor (which sets done_). If we don't
413  // re-check queues, we will exit leaving the work unexecuted.
414  if (NonEmptyQueueIndex() != -1) {
415  // Note: we must not pop from queues before we decrement blocked_,
416  // otherwise the following scenario is possible. Consider that instead
417  // of checking for emptiness we popped the only element from queues.
418  // Now other worker threads can start exiting, which is bad if the
419  // work item submits other work. So we just check emptiness here,
420  // which ensures that all worker threads exit at the same time.
421  blocked_--;
422  return true;
423  }
424  // Reached stable termination state.
425  ec_.Notify(true);
426  return false;
427  }
428  ec_.CommitWait(waiter);
429  blocked_--;
430  return true;
431  }
void CommitWait(Waiter *w)
Definition: EventCount.h:81

◆ WorkerLoop()

template<typename Environment >
void Eigen::ThreadPoolTempl< Environment >::WorkerLoop ( int  thread_id)
inlineprivate

Definition at line 256 of file NonBlockingThreadPool.h.

256  {
257 #ifndef EIGEN_THREAD_LOCAL
258  std::unique_ptr<PerThread> new_pt(new PerThread());
259  per_thread_map_mutex_.lock();
260  bool insertOK = per_thread_map_.emplace(GlobalThreadIdHash(), std::move(new_pt)).second;
261  eigen_plain_assert(insertOK);
262  EIGEN_UNUSED_VARIABLE(insertOK);
263  per_thread_map_mutex_.unlock();
264  init_barrier_->Notify();
265  init_barrier_->Wait();
266 #endif
267  PerThread* pt = GetPerThread();
268  pt->pool = this;
269  pt->rand = GlobalThreadIdHash();
270  pt->thread_id = thread_id;
271  Queue& q = thread_data_[thread_id].queue;
272  EventCount::Waiter* waiter = &waiters_[thread_id];
273  // TODO(dvyukov,rmlarsen): The time spent in NonEmptyQueueIndex() is
274  // proportional to num_threads_ and we assume that new work is scheduled at
275  // a constant rate, so we set spin_count to 5000 / num_threads_. The
276  // constant was picked based on a fair dice roll, tune it.
277  const int spin_count =
278  allow_spinning_ && num_threads_ > 0 ? 5000 / num_threads_ : 0;
279  if (num_threads_ == 1) {
280  // For num_threads_ == 1 there is no point in going through the expensive
281  // steal loop. Moreover, since NonEmptyQueueIndex() calls PopBack() on the
282  // victim queues it might reverse the order in which ops are executed
283  // compared to the order in which they are scheduled, which tends to be
284  // counter-productive for the types of I/O workloads the single thread
285  // pools tend to be used for.
286  while (!cancelled_) {
287  Task t = q.PopFront();
288  for (int i = 0; i < spin_count && !t.f; i++) {
289  if (!cancelled_.load(std::memory_order_relaxed)) {
290  t = q.PopFront();
291  }
292  }
293  if (!t.f) {
294  if (!WaitForWork(waiter, &t)) {
295  return;
296  }
297  }
298  if (t.f) {
299  env_.ExecuteTask(t);
300  }
301  }
302  } else {
303  while (!cancelled_) {
304  Task t = q.PopFront();
305  if (!t.f) {
306  t = LocalSteal();
307  if (!t.f) {
308  t = GlobalSteal();
309  if (!t.f) {
310  // Leave one thread spinning. This reduces latency.
311  if (allow_spinning_ && !spinning_ && !spinning_.exchange(true)) {
312  for (int i = 0; i < spin_count && !t.f; i++) {
313  if (!cancelled_.load(std::memory_order_relaxed)) {
314  t = GlobalSteal();
315  } else {
316  return;
317  }
318  }
319  spinning_ = false;
320  }
321  if (!t.f) {
322  if (!WaitForWork(waiter, &t)) {
323  return;
324  }
325  }
326  }
327  }
328  }
329  if (t.f) {
330  env_.ExecuteTask(t);
331  }
332  }
333  }
334  }
#define EIGEN_UNUSED_VARIABLE(var)
Definition: Macros.h:957
bool WaitForWork(EventCount::Waiter *waiter, Task *t)

Member Data Documentation

◆ all_coprimes_

template<typename Environment >
MaxSizeVector<MaxSizeVector<unsigned> > Eigen::ThreadPoolTempl< Environment >::all_coprimes_
private

Definition at line 241 of file NonBlockingThreadPool.h.

◆ allow_spinning_

template<typename Environment >
const bool Eigen::ThreadPoolTempl< Environment >::allow_spinning_
private

Definition at line 239 of file NonBlockingThreadPool.h.

◆ blocked_

template<typename Environment >
std::atomic<unsigned> Eigen::ThreadPoolTempl< Environment >::blocked_
private

Definition at line 244 of file NonBlockingThreadPool.h.

◆ cancelled_

template<typename Environment >
std::atomic<bool> Eigen::ThreadPoolTempl< Environment >::cancelled_
private

Definition at line 247 of file NonBlockingThreadPool.h.

◆ done_

template<typename Environment >
std::atomic<bool> Eigen::ThreadPoolTempl< Environment >::done_
private

Definition at line 246 of file NonBlockingThreadPool.h.

◆ ec_

template<typename Environment >
EventCount Eigen::ThreadPoolTempl< Environment >::ec_
private

Definition at line 248 of file NonBlockingThreadPool.h.

◆ env_

template<typename Environment >
Environment Eigen::ThreadPoolTempl< Environment >::env_
private

Definition at line 237 of file NonBlockingThreadPool.h.

◆ global_steal_partition_

template<typename Environment >
unsigned Eigen::ThreadPoolTempl< Environment >::global_steal_partition_
private

Definition at line 243 of file NonBlockingThreadPool.h.

◆ init_barrier_

template<typename Environment >
std::unique_ptr<Barrier> Eigen::ThreadPoolTempl< Environment >::init_barrier_
private

Definition at line 250 of file NonBlockingThreadPool.h.

◆ kMaxPartitionBits

template<typename Environment >
const int Eigen::ThreadPoolTempl< Environment >::kMaxPartitionBits
staticprivate

Definition at line 174 of file NonBlockingThreadPool.h.

◆ kMaxThreads

template<typename Environment >
const int Eigen::ThreadPoolTempl< Environment >::kMaxThreads
staticprivate

Definition at line 175 of file NonBlockingThreadPool.h.

◆ num_threads_

template<typename Environment >
const int Eigen::ThreadPoolTempl< Environment >::num_threads_
private

Definition at line 238 of file NonBlockingThreadPool.h.

◆ per_thread_map_

template<typename Environment >
std::unordered_map<uint64_t, std::unique_ptr<PerThread> > Eigen::ThreadPoolTempl< Environment >::per_thread_map_
private

Definition at line 252 of file NonBlockingThreadPool.h.

◆ per_thread_map_mutex_

template<typename Environment >
EIGEN_MUTEX Eigen::ThreadPoolTempl< Environment >::per_thread_map_mutex_
private

Definition at line 251 of file NonBlockingThreadPool.h.

◆ spinning_

template<typename Environment >
std::atomic<bool> Eigen::ThreadPoolTempl< Environment >::spinning_
private

Definition at line 245 of file NonBlockingThreadPool.h.

◆ thread_data_

template<typename Environment >
MaxSizeVector<ThreadData> Eigen::ThreadPoolTempl< Environment >::thread_data_
private

Definition at line 240 of file NonBlockingThreadPool.h.

◆ waiters_

template<typename Environment >
MaxSizeVector<EventCount::Waiter> Eigen::ThreadPoolTempl< Environment >::waiters_
private

Definition at line 242 of file NonBlockingThreadPool.h.


The documentation for this class was generated from the following file: