10 #ifndef EIGEN_CXX11_TENSOR_TENSOR_FORWARD_DECLARATIONS_H
11 #define EIGEN_CXX11_TENSOR_TENSOR_FORWARD_DECLARATIONS_H
30 return const_cast<T*
>(
data);
42 template<
typename A,
typename B>
struct Pointer_type_promotion {
43 static const bool val=
false;
45 template<
typename A>
struct Pointer_type_promotion<
A,
A> {
46 static const bool val =
true;
48 template<
typename A,
typename B>
struct TypeConversion {
54 template<
typename PlainObjectType,
int Options_ = Unaligned,
template <
class>
class MakePointer_ = MakePointer>
class TensorMap;
55 template<
typename Scalar_,
int NumIndices_,
int Options_ = 0,
typename IndexType = DenseIndex>
class Tensor;
56 template<
typename Scalar_,
typename Dimensions,
int Options_ = 0,
typename IndexType = DenseIndex>
class TensorFixedSize;
57 template<
typename PlainObjectType>
class TensorRef;
58 template<
typename Derived,
int AccessLevel>
class TensorBase;
62 template<
typename BinaryOp,
typename LeftXprType,
typename RightXprType>
class TensorCwiseBinaryOp;
63 template<
typename TernaryOp,
typename Arg1XprType,
typename Arg2XprType,
typename Arg3XprType>
class TensorCwiseTernaryOp;
64 template<
typename IfXprType,
typename ThenXprType,
typename ElseXprType>
class TensorSelectOp;
69 template<
typename Dimensions,
typename LeftXprType,
typename RightXprType,
typename OutputKernelType>
class TensorContractionOp;
71 template<
typename Dimensions,
typename InputXprType,
typename KernelXprType>
class TensorConvolutionOp;
72 template<
typename FFT,
typename XprType,
int FFTDataType,
int FFTDirection>
class TensorFFTOp;
73 template<
typename PatchDim,
typename XprType>
class TensorPatchOp;
75 template<DenseIndex Planes, DenseIndex Rows, DenseIndex Cols,
typename XprType>
class TensorVolumePatchOp;
80 template<
typename StartIndices,
typename Sizes,
typename XprType>
class TensorSlicingOp;
81 template<
typename ReverseDimensions,
typename XprType>
class TensorReverseOp;
82 template<
typename PaddingDimensions,
typename XprType>
class TensorPaddingOp;
85 template<
typename StartIndices,
typename StopIndices,
typename Str
ides,
typename XprType>
class TensorStridingSlicingOp;
88 template<
typename LeftXprType,
typename RightXprType>
class TensorAssignOp;
89 template<
typename Op,
typename XprType>
class TensorScanOp;
90 template<
typename Dims,
typename XprType>
class TensorTraceOp;
93 template<
typename CustomBinaryFunc,
typename LhsXprType,
typename RhsXprType>
class TensorCustomBinaryOp;
98 template<
typename ExpressionType,
typename DeviceType>
class TensorDevice;
99 template<
typename ExpressionType,
typename DeviceType,
typename DoneCallback>
class TensorAsyncDevice;
105 struct ThreadPoolDevice;
109 #ifdef EIGEN_USE_SYCL
110 namespace TensorSycl {
112 template <
typename Evaluator,
typename Op>
class GenericNondeterministicReducer;
132 template <
typename Device,
typename Expression>
133 struct IsVectorizable {
137 template <
typename Expression>
138 struct IsVectorizable<GpuDevice, Expression> {
149 template <
typename Device,
typename Expression>
154 static constexpr
bool BlockAccess =
162 template <
typename Expression,
typename Device,
163 bool Vectorizable = IsVectorizable<Device, Expression>::value,
167 template <
typename Expression,
typename Device,
typename DoneCallback,
168 bool Vectorizable = IsVectorizable<Device, Expression>::value,
170 class TensorAsyncExecutor;
SparseMatrix< double > A(n, n)
#define EIGEN_DEVICE_FUNC
Pseudo expression providing an operator = that will evaluate its argument asynchronously on the speci...
Tensor concatenation class.
Tensor conversion class. This class makes it possible to vectorize type casting operations when the n...
Pseudo expression providing an operator = that will evaluate its argument on the specified computing ...
The tensor executor class.
: TensorContractionSycl.h, provides various tensor contraction kernel for SYCL backend
T * constCast(const T *data)
A cost model used to limit the number of threads used for evaluating tensor expression.