11 #ifndef EIGEN_CXX11_TENSOR_TENSOR_H
12 #define EIGEN_CXX11_TENSOR_TENSOR_H
65 template<
typename Scalar_,
int NumIndices_,
int Options_,
typename IndexType_>
71 typedef typename Eigen::internal::nested<Self>::type
Nested;
72 typedef typename internal::traits<Self>::StorageKind
StorageKind;
73 typedef typename internal::traits<Self>::Index
Index;
92 template<
typename CustomIndices>
94 static const bool is_array = internal::is_base_of<array<Index, NumIndices>, CustomIndices>
::value;
114 template<
typename... IndexTypes>
130 template<
typename CustomIndices,
135 return coeff(internal::customIndices2Array<Index,NumIndices>(indices));
150 template<
typename... IndexTypes>
166 template<
typename CustomIndices,
171 return coeffRef(internal::customIndices2Array<Index,NumIndices>(indices));
186 template<
typename... IndexTypes>
195 template<
typename CustomIndices,
200 return coeff(internal::customIndices2Array<Index,NumIndices>(indices));
206 return coeff(indices);
228 template<
typename... IndexTypes>
243 template<
typename CustomIndices,
248 return coeffRef(internal::customIndices2Array<Index,NumIndices>(indices));
282 template<
typename... IndexTypes>
284 :
m_storage(firstDimension, otherDimensions...)
297 template<
typename OtherDerived>
302 Assign assign(*
this, other.derived());
304 internal::TensorExecutor<const Assign, DefaultDevice>::run(assign,
DefaultDevice());
307 template<
typename OtherDerived>
312 Assign assign(*
this, other.derived());
314 internal::TensorExecutor<const Assign, DefaultDevice>::run(assign,
DefaultDevice());
333 Assign assign(*
this, other);
335 internal::TensorExecutor<const Assign, DefaultDevice>::run(assign,
DefaultDevice());
338 template<
typename OtherDerived>
343 Assign assign(*
this, other);
345 internal::TensorExecutor<const Assign, DefaultDevice>::run(assign,
DefaultDevice());
363 internal::check_rows_cols_for_overflow<Dynamic, Dynamic, Dynamic>::run(
size,
dimensions[
i]);
366 #ifdef EIGEN_INITIALIZE_COEFFS
367 bool size_changed =
size != this->
size();
391 template <
typename FirstType,
typename... OtherTypes>
402 template<
typename CustomDimension,
410 #ifndef EIGEN_EMULATE_CXX11_META_H
411 template <
typename std::ptrdiff_t... Indices>
421 template <std::
size_t V1, std::
size_t V2, std::
size_t V3, std::
size_t V4, std::
size_t V5>
432 #ifdef EIGEN_TENSOR_PLUGIN
433 #include EIGEN_TENSOR_PLUGIN
442 using internal::greater_equal_zero_op;
443 using internal::logical_and_op;
444 using internal::lesser_op;
448 array_apply_and_reduce<logical_and_op, greater_equal_zero_op>(indices) &&
#define eigen_internal_assert(x)
#define EIGEN_DEVICE_FUNC
#define EIGEN_INITIALIZE_COEFFS_IF_THAT_OPTION_IS_ENABLED
#define EIGEN_STATIC_ASSERT(X, MSG)
#define EIGEN_SFINAE_ENABLE_IF(__condition__)
const FixedDimensions dimensions() const
const Scalar & operator()() const
Tensor(Index firstDimension, IndexTypes... otherDimensions)
TensorStorage< Scalar, Dimensions, Options > m_storage
void resize(const DSizes< Index, NumIndices > &dimensions)
Scalar & operator[](Index index)
void resize(Index firstDimension, IndexTypes... otherDimensions)
const Scalar & operator()(Index firstIndex, Index secondIndex, IndexTypes... otherIndices) const
DSizes< Index, NumIndices_ > Dimensions
const Scalar & operator()(Index index) const
Scalar & coeffRef(CustomIndices &indices)
Scalar & operator()(CustomIndices &indices)
const Self & base() const
const Scalar & coeff(const array< Index, NumIndices > &indices) const
const Scalar & coeff(Index index) const
Tensor(const TensorBase< OtherDerived, ReadOnlyAccessors > &other)
Base::CoeffReturnType CoeffReturnType
bool checkIndexRange(const array< Index, NumIndices > &indices) const
const Scalar & operator()(CustomIndices &indices) const
Scalar & coeffRef(const array< Index, NumIndices > &indices)
Index dimension(std::size_t n) const
void resize(const Sizes< Indices... > &dimensions)
Index linearizedIndex(const array< Index, NumIndices > &indices) const
const Scalar * data() const
Scalar & operator()(Index firstIndex, Index secondIndex, IndexTypes... otherIndices)
static constexpr int Layout
Tensor(const TensorBase< OtherDerived, WriteAccessors > &other)
Tensor & operator=(Self &&other)
Tensor & operator=(const OtherDerived &other)
Tensor(const Self &other)
Tensor(const array< Index, NumIndices > &dimensions)
static constexpr int NumIndices
void resize(CustomDimension &dimensions)
Scalar & coeffRef(Index firstIndex, Index secondIndex, IndexTypes... otherIndices)
Tensor & operator=(const Tensor &other)
Scalar & operator()(const array< Index, NumIndices > &indices)
TensorBase< Tensor< Scalar_, NumIndices_, Options_, IndexType_ > > Base
Tensor< Scalar_, NumIndices_, Options_, IndexType_ > Self
void resize(const Eigen::IndexList< FirstType, OtherTypes... > &dimensions)
Scalar & coeffRef(Index index)
Scalar & operator()(Index index)
const Scalar & coeff() const
const Scalar & coeff(Index firstIndex, Index secondIndex, IndexTypes... otherIndices) const
NumTraits< Scalar >::Real RealScalar
const Scalar & coeff(CustomIndices &indices) const
const Scalar & operator()(const array< Index, NumIndices > &indices) const
internal::traits< Self >::Index Index
const Dimensions & dimensions() const
Eigen::internal::nested< Self >::type Nested
static constexpr int Options
void resize(const array< Index, NumIndices > &dimensions)
const Scalar & operator[](Index index) const
internal::traits< Self >::StorageKind StorageKind
constexpr auto array_apply_and_reduce(array< A, N > a) -> decltype(h_array_apply_and_reduce< Reducer, Op, A, N >(a, typename gen_numeric_list< int, N >::type()))
constexpr auto array_zip_and_reduce(array< A, N > a, array< B, N > b) -> decltype(h_array_zip_and_reduce< Reducer, Op, A, B, N >(a, b, typename gen_numeric_list< int, N >::type()))
std::ptrdiff_t array_prod(const Sizes< Indices... > &)
: TensorContractionSycl.h, provides various tensor contraction kernel for SYCL backend
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
DenseIndex IndexOfRowMajor(const array< DenseIndex, NumDims > &indices) const
DenseIndex IndexOfColMajor(const array< DenseIndex, NumDims > &indices) const
A cost model used to limit the number of threads used for evaluating tensor expression.
static const bool is_array