TensorIntDiv.h
Go to the documentation of this file.
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
5 //
6 // This Source Code Form is subject to the terms of the Mozilla
7 // Public License v. 2.0. If a copy of the MPL was not distributed
8 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9 
10 #ifndef EIGEN_CXX11_TENSOR_TENSOR_INTDIV_H
11 #define EIGEN_CXX11_TENSOR_TENSOR_INTDIV_H
12 
13 
14 #include "./InternalHeaderCheck.h"
15 
16 namespace Eigen {
17 
31 namespace internal {
32 
33  // Note: result is undefined if val == 0
34  template <typename T>
36  std::enable_if_t<sizeof(T)==4,int> count_leading_zeros(const T val)
37  {
38 #ifdef EIGEN_GPU_COMPILE_PHASE
39  return __clz(val);
40 #elif defined(SYCL_DEVICE_ONLY)
41  return cl::sycl::clz(val);
42 #elif EIGEN_COMP_MSVC
43  unsigned long index;
44  _BitScanReverse(&index, val);
45  return 31 - index;
46 #else
47  EIGEN_STATIC_ASSERT(sizeof(unsigned long long) == 8, YOU_MADE_A_PROGRAMMING_MISTAKE);
48  return __builtin_clz(static_cast<uint32_t>(val));
49 #endif
50  }
51 
52  template <typename T>
54  std::enable_if_t<sizeof(T)==8,int> count_leading_zeros(const T val)
55  {
56 #ifdef EIGEN_GPU_COMPILE_PHASE
57  return __clzll(val);
58 #elif defined(SYCL_DEVICE_ONLY)
59  return static_cast<int>(cl::sycl::clz(val));
60 #elif EIGEN_COMP_MSVC && EIGEN_ARCH_x86_64
61  unsigned long index;
62  _BitScanReverse64(&index, val);
63  return 63 - index;
64 #elif EIGEN_COMP_MSVC
65  // MSVC's _BitScanReverse64 is not available for 32bits builds.
66  unsigned int lo = (unsigned int)(val&0xffffffff);
67  unsigned int hi = (unsigned int)((val>>32)&0xffffffff);
68  int n;
69  if(hi==0)
70  n = 32 + count_leading_zeros<unsigned int>(lo);
71  else
72  n = count_leading_zeros<unsigned int>(hi);
73  return n;
74 #else
75  EIGEN_STATIC_ASSERT(sizeof(unsigned long long) == 8, YOU_MADE_A_PROGRAMMING_MISTAKE);
76  return __builtin_clzll(static_cast<uint64_t>(val));
77 #endif
78  }
79 
80  template <typename T>
81  struct UnsignedTraits {
82  typedef std::conditional_t<sizeof(T) == 8, uint64_t, uint32_t> type;
83  };
84 
85  template <typename T>
86  struct DividerTraits {
87  typedef typename UnsignedTraits<T>::type type;
88  static constexpr int N = sizeof(T) * 8;
89  };
90 
91  template <typename T>
93 #if defined(EIGEN_GPU_COMPILE_PHASE)
94  return __umulhi(a, b);
95 #elif defined(SYCL_DEVICE_ONLY)
96  return cl::sycl::mul_hi(a, static_cast<uint32_t>(b));
97 #else
98  return (static_cast<uint64_t>(a) * b) >> 32;
99 #endif
100  }
101 
102  template <typename T>
104 #if defined(EIGEN_GPU_COMPILE_PHASE)
105  return __umul64hi(a, b);
106 #elif defined(SYCL_DEVICE_ONLY)
107  return cl::sycl::mul_hi(a, static_cast<uint64_t>(b));
108 #elif EIGEN_HAS_BUILTIN_INT128
109  __uint128_t v = static_cast<__uint128_t>(a) * static_cast<__uint128_t>(b);
110  return static_cast<uint64_t>(v >> 64);
111 #else
112  return (TensorUInt128<static_val<0>, uint64_t>(a) * TensorUInt128<static_val<0>, uint64_t>(b)).upper();
113 #endif
114  }
115 
116  template <int N, typename T>
117  struct DividerHelper {
118  static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE uint32_t computeMultiplier(const int log_div, const T divider) {
119  EIGEN_STATIC_ASSERT(N == 32, YOU_MADE_A_PROGRAMMING_MISTAKE);
120  return static_cast<uint32_t>((static_cast<uint64_t>(1) << (N+log_div)) / divider - (static_cast<uint64_t>(1) << N) + 1);
121  }
122  };
123 
124  template <typename T>
125  struct DividerHelper<64, T> {
126  static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE uint64_t computeMultiplier(const int log_div, const T divider) {
127 #if EIGEN_HAS_BUILTIN_INT128 && !defined(EIGEN_GPU_COMPILE_PHASE) && !defined(SYCL_DEVICE_ONLY)
128  return static_cast<uint64_t>((static_cast<__uint128_t>(1) << (64+log_div)) / static_cast<__uint128_t>(divider) - (static_cast<__uint128_t>(1) << 64) + 1);
129 #else
130  const uint64_t shift = 1ULL << log_div;
131  TensorUInt128<uint64_t, uint64_t> result = TensorUInt128<uint64_t, static_val<0> >(shift, 0) / TensorUInt128<static_val<0>, uint64_t>(divider)
132  - TensorUInt128<static_val<1>, static_val<0> >(1, 0)
133  + TensorUInt128<static_val<0>, static_val<1> >(1);
134  return static_cast<uint64_t>(result);
135 #endif
136  }
137  };
138 
139 template <typename T, bool div_gt_one = false>
140 struct TensorIntDivisor {
141  public:
142  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorIntDivisor() {
143  multiplier = 0;
144  shift1 = 0;
145  shift2 = 0;
146  }
147 
148  // Must have 0 < divider < 2^31. This is relaxed to
149  // 0 < divider < 2^63 when using 64-bit indices on platforms that support
150  // the __uint128_t type.
151  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorIntDivisor(const T divider) {
152  const int N = DividerTraits<T>::N;
153  eigen_assert(static_cast<typename UnsignedTraits<T>::type>(divider) < NumTraits<UnsignedType>::highest()/2);
154  eigen_assert(divider > 0);
155 
156  // fast ln2
157  const int leading_zeros = count_leading_zeros(static_cast<UnsignedType>(divider));
158  int log_div = N - leading_zeros;
159  // if divider is a power of two then log_div is 1 more than it should be.
160  if ((static_cast<typename UnsignedTraits<T>::type>(1) << (log_div-1)) == static_cast<typename UnsignedTraits<T>::type>(divider))
161  log_div--;
162 
163  multiplier = DividerHelper<N, T>::computeMultiplier(log_div, divider);
164  shift1 = log_div > 1 ? 1 : log_div;
165  shift2 = log_div > 1 ? log_div-1 : 0;
166  }
167 
168  // Must have 0 <= numerator. On platforms that don't support the __uint128_t
169  // type numerator should also be less than 2^32-1.
170  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T divide(const T numerator) const {
171  eigen_assert(static_cast<typename UnsignedTraits<T>::type>(numerator) < NumTraits<UnsignedType>::highest()/2);
172  //eigen_assert(numerator >= 0); // this is implicitly asserted by the line above
173 
174  UnsignedType t1 = muluh(multiplier, numerator);
175  UnsignedType t = (static_cast<UnsignedType>(numerator) - t1) >> shift1;
176  return (t1 + t) >> shift2;
177  }
178 
179  private:
180  typedef typename DividerTraits<T>::type UnsignedType;
181  UnsignedType multiplier;
182  int32_t shift1;
183  int32_t shift2;
184 };
185 
186 
187 // Optimized version for signed 32 bit integers.
188 // Derived from Hacker's Delight.
189 // Only works for divisors strictly greater than one
190 template <>
191 class TensorIntDivisor<int32_t, true> {
192  public:
193  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorIntDivisor() {
194  magic = 0;
195  shift = 0;
196  }
197  // Must have 2 <= divider
198  EIGEN_DEVICE_FUNC TensorIntDivisor(int32_t divider) {
199  eigen_assert(divider >= 2);
200  calcMagic(divider);
201  }
202 
203  EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE int divide(const int32_t n) const {
204 #ifdef EIGEN_GPU_COMPILE_PHASE
205  return (__umulhi(magic, n) >> shift);
206 #elif defined(SYCL_DEVICE_ONLY)
207  return (cl::sycl::mul_hi(magic, static_cast<uint32_t>(n)) >> shift);
208 #else
209  uint64_t v = static_cast<uint64_t>(magic) * static_cast<uint64_t>(n);
210  return (static_cast<uint32_t>(v >> 32) >> shift);
211 #endif
212  }
213 
214 private:
215  // Compute the magic numbers. See Hacker's Delight section 10 for an in
216  // depth explanation.
217  EIGEN_DEVICE_FUNC void calcMagic(int32_t d) {
218  const unsigned two31 = 0x80000000; // 2**31.
219  unsigned ad = d;
220  unsigned t = two31 + (ad >> 31);
221  unsigned anc = t - 1 - t%ad; // Absolute value of nc.
222  int p = 31; // Init. p.
223  unsigned q1 = two31/anc; // Init. q1 = 2**p/|nc|.
224  unsigned r1 = two31 - q1*anc; // Init. r1 = rem(2**p, |nc|).
225  unsigned q2 = two31/ad; // Init. q2 = 2**p/|d|.
226  unsigned r2 = two31 - q2*ad; // Init. r2 = rem(2**p, |d|).
227  unsigned delta = 0;
228  do {
229  p = p + 1;
230  q1 = 2*q1; // Update q1 = 2**p/|nc|.
231  r1 = 2*r1; // Update r1 = rem(2**p, |nc|).
232  if (r1 >= anc) { // (Must be an unsigned
233  q1 = q1 + 1; // comparison here).
234  r1 = r1 - anc;}
235  q2 = 2*q2; // Update q2 = 2**p/|d|.
236  r2 = 2*r2; // Update r2 = rem(2**p, |d|).
237  if (r2 >= ad) { // (Must be an unsigned
238  q2 = q2 + 1; // comparison here).
239  r2 = r2 - ad;}
240  delta = ad - r2;
241  } while (q1 < delta || (q1 == delta && r1 == 0));
242 
243  magic = (unsigned)(q2 + 1);
244  shift = p - 32;
245  }
246 
247  uint32_t magic;
248  int32_t shift;
249 };
250 
251 
252 template <typename T, bool div_gt_one>
253 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T operator / (const T& numerator, const TensorIntDivisor<T, div_gt_one>& divisor) {
254  return divisor.divide(numerator);
255 }
256 
257 
258 } // end namespace internal
259 } // end namespace Eigen
260 
261 #endif // EIGEN_CXX11_TENSOR_TENSOR_INTDIV_H
ArrayXXi a
Array< int, Dynamic, 1 > v
int n
#define EIGEN_ALWAYS_INLINE
#define EIGEN_DEVICE_FUNC
#define eigen_assert(x)
#define EIGEN_STATIC_ASSERT(X, MSG)
float * p
T operator/(const T &numerator, const TensorIntDivisor< T, div_gt_one > &divisor)
Definition: TensorIntDiv.h:253
EIGEN_ALWAYS_INLINE uint32_t muluh(const uint32_t a, const T b)
Definition: TensorIntDiv.h:92
EIGEN_ALWAYS_INLINE std::enable_if_t< sizeof(T)==4, int > count_leading_zeros(const T val)
Definition: TensorIntDiv.h:36
std::int32_t int32_t
std::uint32_t uint32_t
std::uint64_t uint64_t
: TensorContractionSycl.h, provides various tensor contraction kernel for SYCL backend