10 #ifndef EIGEN_CXX11_TENSOR_TENSOR_MORPHING_H 11 #define EIGEN_CXX11_TENSOR_TENSOR_MORPHING_H 23 template<
typename NewDimensions,
typename XprType>
26 typedef typename XprType::Scalar Scalar;
28 typedef typename XprTraits::StorageKind StorageKind;
29 typedef typename XprTraits::Index
Index;
30 typedef typename XprType::Nested Nested;
33 static const int Layout = XprTraits::Layout;
36 template<
typename NewDimensions,
typename XprType>
42 template<
typename NewDimensions,
typename XprType>
52 template<
typename NewDimensions,
typename XprType>
62 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
TensorReshapingOp(
const XprType& expr,
const NewDimensions& dims)
63 : m_xpr(expr), m_dims(dims) {}
66 const NewDimensions& dimensions()
const {
return m_dims; }
70 expression()
const {
return m_xpr; }
73 EIGEN_STRONG_INLINE TensorReshapingOp& operator = (
const TensorReshapingOp& other)
76 Assign assign(*
this, other);
81 template<
typename OtherDerived>
83 EIGEN_STRONG_INLINE TensorReshapingOp& operator = (
const OtherDerived& other)
86 Assign assign(*
this, other);
92 typename XprType::Nested m_xpr;
93 const NewDimensions m_dims;
98 template<
typename NewDimensions,
typename ArgType,
typename Device>
102 typedef NewDimensions Dimensions;
112 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
TensorEvaluator(
const XprType& op,
const Device& device)
113 : m_impl(op.expression(), device), m_dimensions(op.dimensions())
117 eigen_assert(internal::array_prod(m_impl.dimensions()) == internal::array_prod(op.dimensions()));
120 typedef typename XprType::Index
Index;
121 typedef typename XprType::Scalar Scalar;
125 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const Dimensions& dimensions()
const {
return m_dimensions; }
127 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
bool evalSubExprsIfNeeded(CoeffReturnType* data) {
128 return m_impl.evalSubExprsIfNeeded(data);
130 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void cleanup() {
134 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index)
const 136 return m_impl.coeff(index);
139 template<
int LoadMode>
140 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index)
const 142 return m_impl.template packet<LoadMode>(index);
145 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
TensorOpCost costPerCoeff(
bool vectorized)
const {
146 return m_impl.costPerCoeff(vectorized);
149 EIGEN_DEVICE_FUNC Scalar* data()
const {
return const_cast<Scalar*
>(m_impl.data()); }
155 NewDimensions m_dimensions;
160 template<
typename NewDimensions,
typename ArgType,
typename Device>
162 :
public TensorEvaluator<const TensorReshapingOp<NewDimensions, ArgType>, Device>
167 typedef NewDimensions Dimensions;
177 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
TensorEvaluator(
const XprType& op,
const Device& device)
181 typedef typename XprType::Index
Index;
182 typedef typename XprType::Scalar Scalar;
186 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType& coeffRef(Index index)
188 return this->m_impl.coeffRef(index);
190 template <
int StoreMode> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
191 void writePacket(Index index,
const PacketReturnType& x)
193 this->m_impl.template writePacket<StoreMode>(index, x);
206 template<
typename StartIndices,
typename Sizes,
typename XprType>
209 typedef typename XprType::Scalar Scalar;
211 typedef typename XprTraits::StorageKind StorageKind;
212 typedef typename XprTraits::Index
Index;
213 typedef typename XprType::Nested Nested;
216 static const int Layout = XprTraits::Layout;
219 template<
typename StartIndices,
typename Sizes,
typename XprType>
225 template<
typename StartIndices,
typename Sizes,
typename XprType>
235 template<
typename StartIndices,
typename Sizes,
typename XprType>
240 typedef typename XprType::CoeffReturnType CoeffReturnType;
245 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
TensorSlicingOp(
const XprType& expr,
const StartIndices& indices,
const Sizes& sizes)
246 : m_xpr(expr), m_indices(indices), m_sizes(sizes) {}
249 const StartIndices& startIndices()
const {
return m_indices; }
251 const Sizes& sizes()
const {
return m_sizes; }
255 expression()
const {
return m_xpr; }
257 template<
typename OtherDerived>
259 EIGEN_STRONG_INLINE TensorSlicingOp& operator = (
const OtherDerived& other)
262 Assign assign(*
this, other);
268 EIGEN_STRONG_INLINE TensorSlicingOp& operator = (
const TensorSlicingOp& other)
271 Assign assign(*
this, other);
278 typename XprType::Nested m_xpr;
279 const StartIndices m_indices;
286 template <
typename Index,
typename Device>
struct MemcpyTriggerForSlicing {
287 EIGEN_DEVICE_FUNC MemcpyTriggerForSlicing(
const Device& device) : threshold_(2 * device.numThreads()) { }
288 EIGEN_DEVICE_FUNC
bool operator ()(
Index val)
const {
return val > threshold_; }
297 template <
typename Index>
struct MemcpyTriggerForSlicing<Index, GpuDevice> {
298 EIGEN_DEVICE_FUNC MemcpyTriggerForSlicing(
const GpuDevice&) { }
299 EIGEN_DEVICE_FUNC
bool operator ()(Index val)
const {
return val > 4*1024*1024; }
305 template<
typename StartIndices,
typename Sizes,
typename ArgType,
typename Device>
321 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
TensorEvaluator(
const XprType& op,
const Device& device)
322 : m_impl(op.expression(), device), m_device(device), m_dimensions(op.sizes()), m_offsets(op.startIndices())
324 for (std::size_t i = 0; i < internal::array_size<Dimensions>::value; ++i) {
325 eigen_assert(m_impl.dimensions()[i] >= op.sizes()[i] + op.startIndices()[i]);
329 const Sizes& output_dims = op.sizes();
330 if (static_cast<int>(Layout) == static_cast<int>(
ColMajor)) {
331 m_inputStrides[0] = 1;
332 for (
int i = 1; i < NumDims; ++i) {
333 m_inputStrides[i] = m_inputStrides[i-1] * input_dims[i-1];
337 m_outputStrides[0] = 1;
338 for (
int i = 1; i < NumDims; ++i) {
339 m_outputStrides[i] = m_outputStrides[i-1] * output_dims[i-1];
343 m_inputStrides[NumDims-1] = 1;
344 for (
int i = NumDims - 2; i >= 0; --i) {
345 m_inputStrides[i] = m_inputStrides[i+1] * input_dims[i+1];
349 m_outputStrides[NumDims-1] = 1;
350 for (
int i = NumDims - 2; i >= 0; --i) {
351 m_outputStrides[i] = m_outputStrides[i+1] * output_dims[i+1];
357 typedef typename XprType::Index
Index;
358 typedef typename XprType::Scalar Scalar;
359 typedef typename XprType::CoeffReturnType CoeffReturnType;
363 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const Dimensions& dimensions()
const {
return m_dimensions; }
366 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
bool evalSubExprsIfNeeded(CoeffReturnType* data) {
367 m_impl.evalSubExprsIfNeeded(NULL);
369 Index contiguous_values = 1;
370 if (static_cast<int>(Layout) ==
static_cast<int>(
ColMajor)) {
371 for (
int i = 0; i < NumDims; ++i) {
372 contiguous_values *= dimensions()[i];
373 if (dimensions()[i] != m_impl.dimensions()[i]) {
378 for (
int i = NumDims-1; i >= 0; --i) {
379 contiguous_values *= dimensions()[i];
380 if (dimensions()[i] != m_impl.dimensions()[i]) {
386 const MemcpyTriggerForSlicing<Index, Device> trigger(m_device);
387 if (trigger(contiguous_values)) {
388 Scalar* src = (Scalar*)m_impl.data();
389 for (
int i = 0; i < internal::array_prod(dimensions()); i += contiguous_values) {
390 Index offset = srcCoeff(i);
391 m_device.memcpy((
void*)(data+i), src+offset, contiguous_values *
sizeof(Scalar));
399 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void cleanup() {
403 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index)
const 405 return m_impl.coeff(srcCoeff(index));
408 template<
int LoadMode>
409 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index)
const 412 EIGEN_STATIC_ASSERT((packetSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE)
413 eigen_assert(index+packetSize-1 < internal::array_prod(dimensions()));
415 Index inputIndices[] = {0, 0};
416 Index indices[] = {index, index + packetSize - 1};
417 if (static_cast<int>(Layout) == static_cast<int>(
ColMajor)) {
418 for (
int i = NumDims - 1; i > 0; --i) {
419 const Index idx0 = indices[0] / m_fastOutputStrides[i];
420 const Index idx1 = indices[1] / m_fastOutputStrides[i];
421 inputIndices[0] += (idx0 + m_offsets[i]) * m_inputStrides[i];
422 inputIndices[1] += (idx1 + m_offsets[i]) * m_inputStrides[i];
423 indices[0] -= idx0 * m_outputStrides[i];
424 indices[1] -= idx1 * m_outputStrides[i];
426 inputIndices[0] += (indices[0] + m_offsets[0]);
427 inputIndices[1] += (indices[1] + m_offsets[0]);
429 for (
int i = 0; i < NumDims - 1; ++i) {
430 const Index idx0 = indices[0] / m_fastOutputStrides[i];
431 const Index idx1 = indices[1] / m_fastOutputStrides[i];
432 inputIndices[0] += (idx0 + m_offsets[i]) * m_inputStrides[i];
433 inputIndices[1] += (idx1 + m_offsets[i]) * m_inputStrides[i];
434 indices[0] -= idx0 * m_outputStrides[i];
435 indices[1] -= idx1 * m_outputStrides[i];
437 inputIndices[0] += (indices[0] + m_offsets[NumDims-1]);
438 inputIndices[1] += (indices[1] + m_offsets[NumDims-1]);
440 if (inputIndices[1] - inputIndices[0] == packetSize - 1) {
441 PacketReturnType rslt = m_impl.template packet<Unaligned>(inputIndices[0]);
446 values[0] = m_impl.coeff(inputIndices[0]);
447 values[packetSize-1] = m_impl.coeff(inputIndices[1]);
448 for (
int i = 1; i < packetSize-1; ++i) {
449 values[i] = coeff(index+i);
451 PacketReturnType rslt = internal::pload<PacketReturnType>(values);
456 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
TensorOpCost costPerCoeff(
bool vectorized)
const {
457 return m_impl.costPerCoeff(vectorized) +
TensorOpCost(0, 0, NumDims);
461 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar* data()
const {
462 Scalar* result = m_impl.data();
465 if (static_cast<int>(Layout) == static_cast<int>(
ColMajor)) {
466 for (
int i = 0; i < NumDims; ++i) {
467 if (m_dimensions[i] != m_impl.dimensions()[i]) {
468 offset += m_offsets[i] * m_inputStrides[i];
469 for (
int j = i+1; j < NumDims; ++j) {
470 if (m_dimensions[j] > 1) {
473 offset += m_offsets[j] * m_inputStrides[j];
479 for (
int i = NumDims - 1; i >= 0; --i) {
480 if (m_dimensions[i] != m_impl.dimensions()[i]) {
481 offset += m_offsets[i] * m_inputStrides[i];
482 for (
int j = i-1; j >= 0; --j) {
483 if (m_dimensions[j] > 1) {
486 offset += m_offsets[j] * m_inputStrides[j];
492 return result + offset;
498 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index srcCoeff(Index index)
const 500 Index inputIndex = 0;
501 if (static_cast<int>(Layout) == static_cast<int>(
ColMajor)) {
502 for (
int i = NumDims - 1; i > 0; --i) {
503 const Index idx = index / m_fastOutputStrides[i];
504 inputIndex += (idx + m_offsets[i]) * m_inputStrides[i];
505 index -= idx * m_outputStrides[i];
507 inputIndex += (index + m_offsets[0]);
509 for (
int i = 0; i < NumDims - 1; ++i) {
510 const Index idx = index / m_fastOutputStrides[i];
511 inputIndex += (idx + m_offsets[i]) * m_inputStrides[i];
512 index -= idx * m_outputStrides[i];
514 inputIndex += (index + m_offsets[NumDims-1]);
523 const Device& m_device;
524 Dimensions m_dimensions;
525 const StartIndices m_offsets;
530 template<
typename StartIndices,
typename Sizes,
typename ArgType,
typename Device>
532 :
public TensorEvaluator<const TensorSlicingOp<StartIndices, Sizes, ArgType>, Device>
546 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
TensorEvaluator(
const XprType& op,
const Device& device)
550 typedef typename XprType::Index
Index;
551 typedef typename XprType::Scalar Scalar;
552 typedef typename XprType::CoeffReturnType CoeffReturnType;
556 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType& coeffRef(Index index)
558 return this->m_impl.coeffRef(this->srcCoeff(index));
561 template <
int StoreMode> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
562 void writePacket(Index index,
const PacketReturnType& x)
565 Index inputIndices[] = {0, 0};
566 Index indices[] = {index, index + packetSize - 1};
567 if (static_cast<int>(Layout) == static_cast<int>(
ColMajor)) {
568 for (
int i = NumDims - 1; i > 0; --i) {
569 const Index idx0 = indices[0] / this->m_fastOutputStrides[i];
570 const Index idx1 = indices[1] / this->m_fastOutputStrides[i];
571 inputIndices[0] += (idx0 + this->m_offsets[i]) * this->m_inputStrides[i];
572 inputIndices[1] += (idx1 + this->m_offsets[i]) * this->m_inputStrides[i];
573 indices[0] -= idx0 * this->m_outputStrides[i];
574 indices[1] -= idx1 * this->m_outputStrides[i];
576 inputIndices[0] += (indices[0] + this->m_offsets[0]);
577 inputIndices[1] += (indices[1] + this->m_offsets[0]);
579 for (
int i = 0; i < NumDims - 1; ++i) {
580 const Index idx0 = indices[0] / this->m_fastOutputStrides[i];
581 const Index idx1 = indices[1] / this->m_fastOutputStrides[i];
582 inputIndices[0] += (idx0 + this->m_offsets[i]) * this->m_inputStrides[i];
583 inputIndices[1] += (idx1 + this->m_offsets[i]) * this->m_inputStrides[i];
584 indices[0] -= idx0 * this->m_outputStrides[i];
585 indices[1] -= idx1 * this->m_outputStrides[i];
587 inputIndices[0] += (indices[0] + this->m_offsets[NumDims-1]);
588 inputIndices[1] += (indices[1] + this->m_offsets[NumDims-1]);
590 if (inputIndices[1] - inputIndices[0] == packetSize - 1) {
591 this->m_impl.template writePacket<StoreMode>(inputIndices[0], x);
594 EIGEN_ALIGN_MAX CoeffReturnType values[packetSize];
595 internal::pstore<CoeffReturnType, PacketReturnType>(values, x);
596 this->m_impl.coeffRef(inputIndices[0]) = values[0];
597 this->m_impl.coeffRef(inputIndices[1]) = values[packetSize-1];
598 for (
int i = 1; i < packetSize-1; ++i) {
599 this->coeffRef(index+i) = values[i];
608 template<
typename StartIndices,
typename StopIndices,
typename Str
ides,
typename XprType>
611 typedef typename XprType::Scalar Scalar;
613 typedef typename XprTraits::StorageKind StorageKind;
614 typedef typename XprTraits::Index
Index;
615 typedef typename XprType::Nested Nested;
618 static const int Layout = XprTraits::Layout;
621 template<
typename StartIndices,
typename StopIndices,
typename Str
ides,
typename XprType>
627 template<
typename StartIndices,
typename StopIndices,
typename Str
ides,
typename XprType>
636 template<
typename StartIndices,
typename StopIndices,
typename Str
ides,
typename XprType>
641 typedef typename XprType::CoeffReturnType CoeffReturnType;
647 const XprType& expr,
const StartIndices& startIndices,
648 const StopIndices& stopIndices,
const Strides& strides)
649 : m_xpr(expr), m_startIndices(startIndices), m_stopIndices(stopIndices),
650 m_strides(strides) {}
653 const StartIndices& startIndices()
const {
return m_startIndices; }
655 const StartIndices& stopIndices()
const {
return m_stopIndices; }
657 const StartIndices& strides()
const {
return m_strides; }
661 expression()
const {
return m_xpr; }
664 EIGEN_STRONG_INLINE TensorStridingSlicingOp& operator = (
const TensorStridingSlicingOp& other)
667 Assign assign(*
this, other);
673 template<
typename OtherDerived>
675 EIGEN_STRONG_INLINE TensorStridingSlicingOp& operator = (
const OtherDerived& other)
678 Assign assign(*
this, other);
685 typename XprType::Nested m_xpr;
686 const StartIndices m_startIndices;
687 const StopIndices m_stopIndices;
688 const Strides m_strides;
692 template<
typename StartIndices,
typename StopIndices,
typename Str
ides,
typename ArgType,
typename Device>
702 PacketAccess =
false,
708 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
TensorEvaluator(
const XprType& op,
const Device& device)
709 : m_impl(op.expression(), device), m_device(device), m_strides(op.strides())
713 for (
size_t i = 0; i < internal::array_size<Dimensions>::value; ++i) {
714 eigen_assert(m_strides[i] != 0 &&
"0 stride is invalid");
716 startIndicesClamped[i] = clamp(op.startIndices()[i], 0, m_impl.dimensions()[i]);
717 stopIndicesClamped[i] = clamp(op.stopIndices()[i], 0, m_impl.dimensions()[i]);
720 startIndicesClamped[i] = clamp(op.startIndices()[i], -1, m_impl.dimensions()[i] - 1);
721 stopIndicesClamped[i] = clamp(op.stopIndices()[i], -1, m_impl.dimensions()[i] - 1);
723 m_startIndices[i] = startIndicesClamped[i];
729 bool degenerate =
false;;
730 for(
int i = 0; i < NumDims; i++){
731 Index interval = stopIndicesClamped[i] - startIndicesClamped[i];
732 if(interval == 0 || ((interval<0) != (m_strides[i]<0))){
736 m_dimensions[i] = interval / m_strides[i]
737 + (interval % m_strides[i] != 0 ? 1 : 0);
738 eigen_assert(m_dimensions[i] >= 0);
741 Strides output_dims = m_dimensions;
743 if (static_cast<int>(Layout) == static_cast<int>(
ColMajor)) {
744 m_inputStrides[0] = m_strides[0];
745 m_offsets[0] = startIndicesClamped[0];
746 Index previousDimProduct = 1;
747 for (
int i = 1; i < NumDims; ++i) {
748 previousDimProduct *= input_dims[i-1];
749 m_inputStrides[i] = previousDimProduct * m_strides[i];
750 m_offsets[i] = startIndicesClamped[i] * previousDimProduct;
754 m_outputStrides[0] = 1;
755 for (
int i = 1; i < NumDims; ++i) {
756 m_outputStrides[i] = m_outputStrides[i-1] * output_dims[i-1];
761 m_inputStrides[NumDims-1] = m_strides[NumDims-1];
762 m_offsets[NumDims-1] = startIndicesClamped[NumDims-1];
763 Index previousDimProduct = 1;
764 for (
int i = NumDims - 2; i >= 0; --i) {
765 previousDimProduct *= input_dims[i+1];
766 m_inputStrides[i] = previousDimProduct * m_strides[i];
767 m_offsets[i] = startIndicesClamped[i] * previousDimProduct;
770 m_outputStrides[NumDims-1] = 1;
771 for (
int i = NumDims - 2; i >= 0; --i) {
772 m_outputStrides[i] = m_outputStrides[i+1] * output_dims[i+1];
777 m_block_total_size_max = numext::maxi(static_cast<std::size_t>(1),
778 device.lastLevelCacheSize() /
782 typedef typename XprType::Index
Index;
783 typedef typename XprType::Scalar Scalar;
785 typedef typename XprType::CoeffReturnType CoeffReturnType;
787 typedef Strides Dimensions;
789 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const Dimensions& dimensions()
const {
return m_dimensions; }
792 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
bool evalSubExprsIfNeeded(CoeffReturnType*) {
793 m_impl.evalSubExprsIfNeeded(NULL);
797 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void cleanup() {
801 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index)
const 803 return m_impl.coeff(srcCoeff(index));
806 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
TensorOpCost costPerCoeff(
bool vectorized)
const {
807 return m_impl.costPerCoeff(vectorized) +
TensorOpCost(0, 0, NumDims);
810 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar* data()
const {
815 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index srcCoeff(Index index)
const 817 Index inputIndex = 0;
818 if (static_cast<int>(Layout) == static_cast<int>(
ColMajor)) {
819 for (
int i = NumDims - 1; i >= 0; --i) {
820 const Index idx = index / m_fastOutputStrides[i];
821 inputIndex += idx * m_inputStrides[i] + m_offsets[i];
822 index -= idx * m_outputStrides[i];
825 for (
int i = 0; i < NumDims; ++i) {
826 const Index idx = index / m_fastOutputStrides[i];
827 inputIndex += idx * m_inputStrides[i] + m_offsets[i];
828 index -= idx * m_outputStrides[i];
834 static EIGEN_STRONG_INLINE Index clamp(Index value, Index min, Index max) {
835 return numext::maxi(min, numext::mini(max,value));
842 const Device& m_device;
846 const Strides m_strides;
847 std::size_t m_block_total_size_max;
851 template<
typename StartIndices,
typename StopIndices,
typename Str
ides,
typename ArgType,
typename Device>
853 :
public TensorEvaluator<const TensorStridingSlicingOp<StartIndices, StopIndices, Strides, ArgType>, Device>
861 PacketAccess =
false,
868 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
TensorEvaluator(
const XprType& op,
const Device& device)
872 typedef typename XprType::Index
Index;
873 typedef typename XprType::Scalar Scalar;
875 typedef typename XprType::CoeffReturnType CoeffReturnType;
877 typedef Strides Dimensions;
879 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType& coeffRef(Index index)
881 return this->m_impl.coeffRef(this->srcCoeff(index));
888 #endif // EIGEN_CXX11_TENSOR_TENSOR_MORPHING_H Definition: TensorExecutor.h:27
Definition: TensorMorphing.h:99
Definition: TensorForwardDeclarations.h:49
Definition: TensorCostModel.h:25
Storage order is column major (see TopicStorageOrders).
Definition: Constants.h:320
Definition: TensorForwardDeclarations.h:56
Definition: TensorMorphing.h:693
Definition: XprHelper.h:158
Namespace containing all symbols from the Eigen library.
Definition: bench_norm.cpp:85
A cost model used to limit the number of threads used for evaluating tensor expression.
Definition: TensorEvaluator.h:28
Definition: TensorAssign.h:60
Holds information about the various numeric (i.e.
Definition: NumTraits.h:150
Definition: TensorDimensions.h:93
Definition: TensorForwardDeclarations.h:51
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
Definition: Meta.h:33
Definition: TensorDeviceDefault.h:17
The tensor base class.
Definition: TensorBase.h:827
Definition: BandTriangularSolver.h:13
Definition: TensorTraits.h:170
The type used to identify a dense storage.
Definition: Constants.h:491
Generic expression where a coefficient-wise unary operator is applied to an expression.
Definition: CwiseUnaryOp.h:55
Definition: ForwardDeclarations.h:17
Definition: XprHelper.h:312
Definition: EmulateArray.h:203
Definition: TensorMorphing.h:306