compbio
CoreEvaluators.h
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2011 Benoit Jacob <jacob.benoit.1@gmail.com>
5 // Copyright (C) 2011-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
6 // Copyright (C) 2011-2012 Jitse Niesen <jitse@maths.leeds.ac.uk>
7 //
8 // This Source Code Form is subject to the terms of the Mozilla
9 // Public License v. 2.0. If a copy of the MPL was not distributed
10 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
11 
12 
13 #ifndef EIGEN_COREEVALUATORS_H
14 #define EIGEN_COREEVALUATORS_H
15 
16 namespace Eigen {
17 
18 namespace internal {
19 
20 // This class returns the evaluator kind from the expression storage kind.
21 // Default assumes index based accessors
22 template<typename StorageKind>
24  typedef IndexBased Kind;
25 };
26 
27 // This class returns the evaluator shape from the expression storage kind.
28 // It can be Dense, Sparse, Triangular, Diagonal, SelfAdjoint, Band, etc.
29 template<typename StorageKind> struct storage_kind_to_shape;
30 
31 template<> struct storage_kind_to_shape<Dense> { typedef DenseShape Shape; };
32 template<> struct storage_kind_to_shape<SolverStorage> { typedef SolverShape Shape; };
35 
36 // Evaluators have to be specialized with respect to various criteria such as:
37 // - storage/structure/shape
38 // - scalar type
39 // - etc.
40 // Therefore, we need specialization of evaluator providing additional template arguments for each kind of evaluators.
41 // We currently distinguish the following kind of evaluators:
42 // - unary_evaluator for expressions taking only one arguments (CwiseUnaryOp, CwiseUnaryView, Transpose, MatrixWrapper, ArrayWrapper, Reverse, Replicate)
43 // - binary_evaluator for expression taking two arguments (CwiseBinaryOp)
44 // - ternary_evaluator for expression taking three arguments (CwiseTernaryOp)
45 // - product_evaluator for linear algebra products (Product); special case of binary_evaluator because it requires additional tags for dispatching.
46 // - mapbase_evaluator for Map, Block, Ref
47 // - block_evaluator for Block (special dispatching to a mapbase_evaluator or unary_evaluator)
48 
49 template< typename T,
50  typename Arg1Kind = typename evaluator_traits<typename T::Arg1>::Kind,
51  typename Arg2Kind = typename evaluator_traits<typename T::Arg2>::Kind,
52  typename Arg3Kind = typename evaluator_traits<typename T::Arg3>::Kind,
53  typename Arg1Scalar = typename traits<typename T::Arg1>::Scalar,
54  typename Arg2Scalar = typename traits<typename T::Arg2>::Scalar,
55  typename Arg3Scalar = typename traits<typename T::Arg3>::Scalar> struct ternary_evaluator;
56 
57 template< typename T,
58  typename LhsKind = typename evaluator_traits<typename T::Lhs>::Kind,
59  typename RhsKind = typename evaluator_traits<typename T::Rhs>::Kind,
60  typename LhsScalar = typename traits<typename T::Lhs>::Scalar,
61  typename RhsScalar = typename traits<typename T::Rhs>::Scalar> struct binary_evaluator;
62 
63 template< typename T,
65  typename Scalar = typename T::Scalar> struct unary_evaluator;
66 
67 // evaluator_traits<T> contains traits for evaluator<T>
68 
69 template<typename T>
71 {
72  // by default, get evaluator kind and shape from storage
74  typedef typename storage_kind_to_shape<typename traits<T>::StorageKind>::Shape Shape;
75 };
76 
77 // Default evaluator traits
78 template<typename T>
80 {
81 };
82 
83 template<typename T, typename Shape = typename evaluator_traits<T>::Shape >
85  static const bool value = false;
86 };
87 
88 // By default, we assume a unary expression:
89 template<typename T>
90 struct evaluator : public unary_evaluator<T>
91 {
92  typedef unary_evaluator<T> Base;
93  EIGEN_DEVICE_FUNC explicit evaluator(const T& xpr) : Base(xpr) {}
94 };
95 
96 
97 // TODO: Think about const-correctness
98 template<typename T>
99 struct evaluator<const T>
100  : evaluator<T>
101 {
102  EIGEN_DEVICE_FUNC
103  explicit evaluator(const T& xpr) : evaluator<T>(xpr) {}
104 };
105 
106 // ---------- base class for all evaluators ----------
107 
108 template<typename ExpressionType>
110 {
111  // TODO that's not very nice to have to propagate all these traits. They are currently only needed to handle outer,inner indices.
113 
114  enum {
115  Alignment = 0
116  };
117 };
118 
119 // -------------------- Matrix and Array --------------------
120 //
121 // evaluator<PlainObjectBase> is a common base class for the
122 // Matrix and Array evaluators.
123 // Here we directly specialize evaluator. This is not really a unary expression, and it is, by definition, dense,
124 // so no need for more sophisticated dispatching.
125 
126 template<typename Derived>
127 struct evaluator<PlainObjectBase<Derived> >
128  : evaluator_base<Derived>
129 {
131  typedef typename PlainObjectType::Scalar Scalar;
132  typedef typename PlainObjectType::CoeffReturnType CoeffReturnType;
133 
134  enum {
135  IsRowMajor = PlainObjectType::IsRowMajor,
136  IsVectorAtCompileTime = PlainObjectType::IsVectorAtCompileTime,
137  RowsAtCompileTime = PlainObjectType::RowsAtCompileTime,
138  ColsAtCompileTime = PlainObjectType::ColsAtCompileTime,
139 
140  CoeffReadCost = NumTraits<Scalar>::ReadCost,
142  Alignment = traits<Derived>::Alignment
143  };
144 
145  EIGEN_DEVICE_FUNC evaluator()
146  : m_data(0),
147  m_outerStride(IsVectorAtCompileTime ? 0
148  : int(IsRowMajor) ? ColsAtCompileTime
149  : RowsAtCompileTime)
150  {
151  EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
152  }
153 
154  EIGEN_DEVICE_FUNC explicit evaluator(const PlainObjectType& m)
155  : m_data(m.data()), m_outerStride(IsVectorAtCompileTime ? 0 : m.outerStride())
156  {
157  EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
158  }
159 
160  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
161  CoeffReturnType coeff(Index row, Index col) const
162  {
163  if (IsRowMajor)
164  return m_data[row * m_outerStride.value() + col];
165  else
166  return m_data[row + col * m_outerStride.value()];
167  }
168 
169  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
170  CoeffReturnType coeff(Index index) const
171  {
172  return m_data[index];
173  }
174 
175  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
176  Scalar& coeffRef(Index row, Index col)
177  {
178  if (IsRowMajor)
179  return const_cast<Scalar*>(m_data)[row * m_outerStride.value() + col];
180  else
181  return const_cast<Scalar*>(m_data)[row + col * m_outerStride.value()];
182  }
183 
184  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
185  Scalar& coeffRef(Index index)
186  {
187  return const_cast<Scalar*>(m_data)[index];
188  }
189 
190  template<int LoadMode, typename PacketType>
191  EIGEN_STRONG_INLINE
192  PacketType packet(Index row, Index col) const
193  {
194  if (IsRowMajor)
195  return ploadt<PacketType, LoadMode>(m_data + row * m_outerStride.value() + col);
196  else
197  return ploadt<PacketType, LoadMode>(m_data + row + col * m_outerStride.value());
198  }
199 
200  template<int LoadMode, typename PacketType>
201  EIGEN_STRONG_INLINE
202  PacketType packet(Index index) const
203  {
204  return ploadt<PacketType, LoadMode>(m_data + index);
205  }
206 
207  template<int StoreMode,typename PacketType>
208  EIGEN_STRONG_INLINE
209  void writePacket(Index row, Index col, const PacketType& x)
210  {
211  if (IsRowMajor)
212  return pstoret<Scalar, PacketType, StoreMode>
213  (const_cast<Scalar*>(m_data) + row * m_outerStride.value() + col, x);
214  else
215  return pstoret<Scalar, PacketType, StoreMode>
216  (const_cast<Scalar*>(m_data) + row + col * m_outerStride.value(), x);
217  }
218 
219  template<int StoreMode, typename PacketType>
220  EIGEN_STRONG_INLINE
221  void writePacket(Index index, const PacketType& x)
222  {
223  return pstoret<Scalar, PacketType, StoreMode>(const_cast<Scalar*>(m_data) + index, x);
224  }
225 
226 protected:
227  const Scalar *m_data;
228 
229  // We do not need to know the outer stride for vectors
230  variable_if_dynamic<Index, IsVectorAtCompileTime ? 0
231  : int(IsRowMajor) ? ColsAtCompileTime
232  : RowsAtCompileTime> m_outerStride;
233 };
234 
235 template<typename Scalar, int Rows, int Cols, int Options, int MaxRows, int MaxCols>
236 struct evaluator<Matrix<Scalar, Rows, Cols, Options, MaxRows, MaxCols> >
237  : evaluator<PlainObjectBase<Matrix<Scalar, Rows, Cols, Options, MaxRows, MaxCols> > >
238 {
240 
241  EIGEN_DEVICE_FUNC evaluator() {}
242 
243  EIGEN_DEVICE_FUNC explicit evaluator(const XprType& m)
245  { }
246 };
247 
248 template<typename Scalar, int Rows, int Cols, int Options, int MaxRows, int MaxCols>
249 struct evaluator<Array<Scalar, Rows, Cols, Options, MaxRows, MaxCols> >
250  : evaluator<PlainObjectBase<Array<Scalar, Rows, Cols, Options, MaxRows, MaxCols> > >
251 {
253 
254  EIGEN_DEVICE_FUNC evaluator() {}
255 
256  EIGEN_DEVICE_FUNC explicit evaluator(const XprType& m)
258  { }
259 };
260 
261 // -------------------- Transpose --------------------
262 
263 template<typename ArgType>
265  : evaluator_base<Transpose<ArgType> >
266 {
267  typedef Transpose<ArgType> XprType;
268 
269  enum {
270  CoeffReadCost = evaluator<ArgType>::CoeffReadCost,
273  };
274 
275  EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& t) : m_argImpl(t.nestedExpression()) {}
276 
277  typedef typename XprType::Scalar Scalar;
278  typedef typename XprType::CoeffReturnType CoeffReturnType;
279 
280  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
281  CoeffReturnType coeff(Index row, Index col) const
282  {
283  return m_argImpl.coeff(col, row);
284  }
285 
286  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
287  CoeffReturnType coeff(Index index) const
288  {
289  return m_argImpl.coeff(index);
290  }
291 
292  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
293  Scalar& coeffRef(Index row, Index col)
294  {
295  return m_argImpl.coeffRef(col, row);
296  }
297 
298  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
299  typename XprType::Scalar& coeffRef(Index index)
300  {
301  return m_argImpl.coeffRef(index);
302  }
303 
304  template<int LoadMode, typename PacketType>
305  EIGEN_STRONG_INLINE
306  PacketType packet(Index row, Index col) const
307  {
308  return m_argImpl.template packet<LoadMode,PacketType>(col, row);
309  }
310 
311  template<int LoadMode, typename PacketType>
312  EIGEN_STRONG_INLINE
313  PacketType packet(Index index) const
314  {
315  return m_argImpl.template packet<LoadMode,PacketType>(index);
316  }
317 
318  template<int StoreMode, typename PacketType>
319  EIGEN_STRONG_INLINE
320  void writePacket(Index row, Index col, const PacketType& x)
321  {
322  m_argImpl.template writePacket<StoreMode,PacketType>(col, row, x);
323  }
324 
325  template<int StoreMode, typename PacketType>
326  EIGEN_STRONG_INLINE
327  void writePacket(Index index, const PacketType& x)
328  {
329  m_argImpl.template writePacket<StoreMode,PacketType>(index, x);
330  }
331 
332 protected:
333  evaluator<ArgType> m_argImpl;
334 };
335 
336 // -------------------- CwiseNullaryOp --------------------
337 // Like Matrix and Array, this is not really a unary expression, so we directly specialize evaluator.
338 // Likewise, there is not need to more sophisticated dispatching here.
339 
340 template<typename Scalar,typename NullaryOp,
341  bool has_nullary = has_nullary_operator<NullaryOp>::value,
342  bool has_unary = has_unary_operator<NullaryOp>::value,
343  bool has_binary = has_binary_operator<NullaryOp>::value>
345 {
346  template <typename IndexType>
347  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i, IndexType j) const { return op(i,j); }
348  template <typename IndexType>
349  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i) const { return op(i); }
350 
351  template <typename T, typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i, IndexType j) const { return op.template packetOp<T>(i,j); }
352  template <typename T, typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i) const { return op.template packetOp<T>(i); }
353 };
354 
355 template<typename Scalar,typename NullaryOp>
356 struct nullary_wrapper<Scalar,NullaryOp,true,false,false>
357 {
358  template <typename IndexType>
359  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType=0, IndexType=0) const { return op(); }
360  template <typename T, typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType=0, IndexType=0) const { return op.template packetOp<T>(); }
361 };
362 
363 template<typename Scalar,typename NullaryOp>
364 struct nullary_wrapper<Scalar,NullaryOp,false,false,true>
365 {
366  template <typename IndexType>
367  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i, IndexType j=0) const { return op(i,j); }
368  template <typename T, typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i, IndexType j=0) const { return op.template packetOp<T>(i,j); }
369 };
370 
371 // We need the following specialization for vector-only functors assigned to a runtime vector,
372 // for instance, using linspace and assigning a RowVectorXd to a MatrixXd or even a row of a MatrixXd.
373 // In this case, i==0 and j is used for the actual iteration.
374 template<typename Scalar,typename NullaryOp>
375 struct nullary_wrapper<Scalar,NullaryOp,false,true,false>
376 {
377  template <typename IndexType>
378  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i, IndexType j) const {
379  eigen_assert(i==0 || j==0);
380  return op(i+j);
381  }
382  template <typename T, typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i, IndexType j) const {
383  eigen_assert(i==0 || j==0);
384  return op.template packetOp<T>(i+j);
385  }
386 
387  template <typename IndexType>
388  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i) const { return op(i); }
389  template <typename T, typename IndexType>
390  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i) const { return op.template packetOp<T>(i); }
391 };
392 
393 template<typename Scalar,typename NullaryOp>
394 struct nullary_wrapper<Scalar,NullaryOp,false,false,false> {};
395 
396 #if 0 && EIGEN_COMP_MSVC>0
397 // Disable this ugly workaround. This is now handled in traits<Ref>::match,
398 // but this piece of code might still become handly if some other weird compilation
399 // erros pop up again.
400 
401 // MSVC exhibits a weird compilation error when
402 // compiling:
403 // Eigen::MatrixXf A = MatrixXf::Random(3,3);
404 // Ref<const MatrixXf> R = 2.f*A;
405 // and that has_*ary_operator<scalar_constant_op<float>> have not been instantiated yet.
406 // The "problem" is that evaluator<2.f*A> is instantiated by traits<Ref>::match<2.f*A>
407 // and at that time has_*ary_operator<T> returns true regardless of T.
408 // Then nullary_wrapper is badly instantiated as nullary_wrapper<.,.,true,true,true>.
409 // The trick is thus to defer the proper instantiation of nullary_wrapper when coeff(),
410 // and packet() are really instantiated as implemented below:
411 
412 // This is a simple wrapper around Index to enforce the re-instantiation of
413 // has_*ary_operator when needed.
414 template<typename T> struct nullary_wrapper_workaround_msvc {
415  nullary_wrapper_workaround_msvc(const T&);
416  operator T()const;
417 };
418 
419 template<typename Scalar,typename NullaryOp>
420 struct nullary_wrapper<Scalar,NullaryOp,true,true,true>
421 {
422  template <typename IndexType>
423  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i, IndexType j) const {
424  return nullary_wrapper<Scalar,NullaryOp,
428  }
429  template <typename IndexType>
430  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i) const {
431  return nullary_wrapper<Scalar,NullaryOp,
435  }
436 
437  template <typename T, typename IndexType>
438  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i, IndexType j) const {
439  return nullary_wrapper<Scalar,NullaryOp,
442  has_binary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value>().template packetOp<T>(op,i,j);
443  }
444  template <typename T, typename IndexType>
445  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i) const {
446  return nullary_wrapper<Scalar,NullaryOp,
450  }
451 };
452 #endif // MSVC workaround
453 
454 template<typename NullaryOp, typename PlainObjectType>
455 struct evaluator<CwiseNullaryOp<NullaryOp,PlainObjectType> >
456  : evaluator_base<CwiseNullaryOp<NullaryOp,PlainObjectType> >
457 {
460 
461  enum {
463 
465  & ( HereditaryBits
469  Alignment = AlignedMax
470  };
471 
472  EIGEN_DEVICE_FUNC explicit evaluator(const XprType& n)
473  : m_functor(n.functor()), m_wrapper()
474  {
475  EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
476  }
477 
478  typedef typename XprType::CoeffReturnType CoeffReturnType;
479 
480  template <typename IndexType>
481  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
482  CoeffReturnType coeff(IndexType row, IndexType col) const
483  {
484  return m_wrapper(m_functor, row, col);
485  }
486 
487  template <typename IndexType>
488  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
489  CoeffReturnType coeff(IndexType index) const
490  {
491  return m_wrapper(m_functor,index);
492  }
493 
494  template<int LoadMode, typename PacketType, typename IndexType>
495  EIGEN_STRONG_INLINE
496  PacketType packet(IndexType row, IndexType col) const
497  {
498  return m_wrapper.template packetOp<PacketType>(m_functor, row, col);
499  }
500 
501  template<int LoadMode, typename PacketType, typename IndexType>
502  EIGEN_STRONG_INLINE
503  PacketType packet(IndexType index) const
504  {
505  return m_wrapper.template packetOp<PacketType>(m_functor, index);
506  }
507 
508 protected:
509  const NullaryOp m_functor;
511 };
512 
513 // -------------------- CwiseUnaryOp --------------------
514 
515 template<typename UnaryOp, typename ArgType>
516 struct unary_evaluator<CwiseUnaryOp<UnaryOp, ArgType>, IndexBased >
517  : evaluator_base<CwiseUnaryOp<UnaryOp, ArgType> >
518 {
520 
521  enum {
523 
527  };
528 
529  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
530  explicit unary_evaluator(const XprType& op)
531  : m_functor(op.functor()),
532  m_argImpl(op.nestedExpression())
533  {
534  EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<UnaryOp>::Cost);
535  EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
536  }
537 
538  typedef typename XprType::CoeffReturnType CoeffReturnType;
539 
540  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
541  CoeffReturnType coeff(Index row, Index col) const
542  {
543  return m_functor(m_argImpl.coeff(row, col));
544  }
545 
546  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
547  CoeffReturnType coeff(Index index) const
548  {
549  return m_functor(m_argImpl.coeff(index));
550  }
551 
552  template<int LoadMode, typename PacketType>
553  EIGEN_STRONG_INLINE
554  PacketType packet(Index row, Index col) const
555  {
556  return m_functor.packetOp(m_argImpl.template packet<LoadMode, PacketType>(row, col));
557  }
558 
559  template<int LoadMode, typename PacketType>
560  EIGEN_STRONG_INLINE
561  PacketType packet(Index index) const
562  {
563  return m_functor.packetOp(m_argImpl.template packet<LoadMode, PacketType>(index));
564  }
565 
566 protected:
567  const UnaryOp m_functor;
568  evaluator<ArgType> m_argImpl;
569 };
570 
571 // -------------------- CwiseTernaryOp --------------------
572 
573 // this is a ternary expression
574 template<typename TernaryOp, typename Arg1, typename Arg2, typename Arg3>
575 struct evaluator<CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> >
576  : public ternary_evaluator<CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> >
577 {
580 
581  EIGEN_DEVICE_FUNC explicit evaluator(const XprType& xpr) : Base(xpr) {}
582 };
583 
584 template<typename TernaryOp, typename Arg1, typename Arg2, typename Arg3>
585 struct ternary_evaluator<CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3>, IndexBased, IndexBased>
586  : evaluator_base<CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> >
587 {
589 
590  enum {
592 
593  Arg1Flags = evaluator<Arg1>::Flags,
594  Arg2Flags = evaluator<Arg2>::Flags,
595  Arg3Flags = evaluator<Arg3>::Flags,
597  StorageOrdersAgree = (int(Arg1Flags)&RowMajorBit)==(int(Arg2Flags)&RowMajorBit) && (int(Arg1Flags)&RowMajorBit)==(int(Arg3Flags)&RowMajorBit),
598  Flags0 = (int(Arg1Flags) | int(Arg2Flags) | int(Arg3Flags)) & (
599  HereditaryBits
600  | (int(Arg1Flags) & int(Arg2Flags) & int(Arg3Flags) &
601  ( (StorageOrdersAgree ? LinearAccessBit : 0)
602  | (functor_traits<TernaryOp>::PacketAccess && StorageOrdersAgree && SameType ? PacketAccessBit : 0)
603  )
604  )
605  ),
606  Flags = (Flags0 & ~RowMajorBit) | (Arg1Flags & RowMajorBit),
607  Alignment = EIGEN_PLAIN_ENUM_MIN(
610  };
611 
612  EIGEN_DEVICE_FUNC explicit ternary_evaluator(const XprType& xpr)
613  : m_functor(xpr.functor()),
614  m_arg1Impl(xpr.arg1()),
615  m_arg2Impl(xpr.arg2()),
616  m_arg3Impl(xpr.arg3())
617  {
618  EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<TernaryOp>::Cost);
619  EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
620  }
621 
622  typedef typename XprType::CoeffReturnType CoeffReturnType;
623 
624  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
625  CoeffReturnType coeff(Index row, Index col) const
626  {
627  return m_functor(m_arg1Impl.coeff(row, col), m_arg2Impl.coeff(row, col), m_arg3Impl.coeff(row, col));
628  }
629 
630  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
631  CoeffReturnType coeff(Index index) const
632  {
633  return m_functor(m_arg1Impl.coeff(index), m_arg2Impl.coeff(index), m_arg3Impl.coeff(index));
634  }
635 
636  template<int LoadMode, typename PacketType>
637  EIGEN_STRONG_INLINE
638  PacketType packet(Index row, Index col) const
639  {
640  return m_functor.packetOp(m_arg1Impl.template packet<LoadMode,PacketType>(row, col),
641  m_arg2Impl.template packet<LoadMode,PacketType>(row, col),
642  m_arg3Impl.template packet<LoadMode,PacketType>(row, col));
643  }
644 
645  template<int LoadMode, typename PacketType>
646  EIGEN_STRONG_INLINE
647  PacketType packet(Index index) const
648  {
649  return m_functor.packetOp(m_arg1Impl.template packet<LoadMode,PacketType>(index),
650  m_arg2Impl.template packet<LoadMode,PacketType>(index),
651  m_arg3Impl.template packet<LoadMode,PacketType>(index));
652  }
653 
654 protected:
655  const TernaryOp m_functor;
656  evaluator<Arg1> m_arg1Impl;
657  evaluator<Arg2> m_arg2Impl;
658  evaluator<Arg3> m_arg3Impl;
659 };
660 
661 // -------------------- CwiseBinaryOp --------------------
662 
663 // this is a binary expression
664 template<typename BinaryOp, typename Lhs, typename Rhs>
665 struct evaluator<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >
666  : public binary_evaluator<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >
667 {
670 
671  EIGEN_DEVICE_FUNC explicit evaluator(const XprType& xpr) : Base(xpr) {}
672 };
673 
674 template<typename BinaryOp, typename Lhs, typename Rhs>
676  : evaluator_base<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >
677 {
679 
680  enum {
682 
683  LhsFlags = evaluator<Lhs>::Flags,
684  RhsFlags = evaluator<Rhs>::Flags,
686  StorageOrdersAgree = (int(LhsFlags)&RowMajorBit)==(int(RhsFlags)&RowMajorBit),
687  Flags0 = (int(LhsFlags) | int(RhsFlags)) & (
688  HereditaryBits
689  | (int(LhsFlags) & int(RhsFlags) &
690  ( (StorageOrdersAgree ? LinearAccessBit : 0)
691  | (functor_traits<BinaryOp>::PacketAccess && StorageOrdersAgree && SameType ? PacketAccessBit : 0)
692  )
693  )
694  ),
695  Flags = (Flags0 & ~RowMajorBit) | (LhsFlags & RowMajorBit),
696  Alignment = EIGEN_PLAIN_ENUM_MIN(evaluator<Lhs>::Alignment,evaluator<Rhs>::Alignment)
697  };
698 
699  EIGEN_DEVICE_FUNC explicit binary_evaluator(const XprType& xpr)
700  : m_functor(xpr.functor()),
701  m_lhsImpl(xpr.lhs()),
702  m_rhsImpl(xpr.rhs())
703  {
704  EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<BinaryOp>::Cost);
705  EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
706  }
707 
708  typedef typename XprType::CoeffReturnType CoeffReturnType;
709 
710  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
711  CoeffReturnType coeff(Index row, Index col) const
712  {
713  return m_functor(m_lhsImpl.coeff(row, col), m_rhsImpl.coeff(row, col));
714  }
715 
716  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
717  CoeffReturnType coeff(Index index) const
718  {
719  return m_functor(m_lhsImpl.coeff(index), m_rhsImpl.coeff(index));
720  }
721 
722  template<int LoadMode, typename PacketType>
723  EIGEN_STRONG_INLINE
724  PacketType packet(Index row, Index col) const
725  {
726  return m_functor.packetOp(m_lhsImpl.template packet<LoadMode,PacketType>(row, col),
727  m_rhsImpl.template packet<LoadMode,PacketType>(row, col));
728  }
729 
730  template<int LoadMode, typename PacketType>
731  EIGEN_STRONG_INLINE
732  PacketType packet(Index index) const
733  {
734  return m_functor.packetOp(m_lhsImpl.template packet<LoadMode,PacketType>(index),
735  m_rhsImpl.template packet<LoadMode,PacketType>(index));
736  }
737 
738 protected:
739  const BinaryOp m_functor;
740  evaluator<Lhs> m_lhsImpl;
741  evaluator<Rhs> m_rhsImpl;
742 };
743 
744 // -------------------- CwiseUnaryView --------------------
745 
746 template<typename UnaryOp, typename ArgType>
747 struct unary_evaluator<CwiseUnaryView<UnaryOp, ArgType>, IndexBased>
748  : evaluator_base<CwiseUnaryView<UnaryOp, ArgType> >
749 {
751 
752  enum {
754 
755  Flags = (evaluator<ArgType>::Flags & (HereditaryBits | LinearAccessBit | DirectAccessBit)),
756 
757  Alignment = 0 // FIXME it is not very clear why alignment is necessarily lost...
758  };
759 
760  EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& op)
761  : m_unaryOp(op.functor()),
762  m_argImpl(op.nestedExpression())
763  {
764  EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<UnaryOp>::Cost);
765  EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
766  }
767 
768  typedef typename XprType::Scalar Scalar;
769  typedef typename XprType::CoeffReturnType CoeffReturnType;
770 
771  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
772  CoeffReturnType coeff(Index row, Index col) const
773  {
774  return m_unaryOp(m_argImpl.coeff(row, col));
775  }
776 
777  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
778  CoeffReturnType coeff(Index index) const
779  {
780  return m_unaryOp(m_argImpl.coeff(index));
781  }
782 
783  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
784  Scalar& coeffRef(Index row, Index col)
785  {
786  return m_unaryOp(m_argImpl.coeffRef(row, col));
787  }
788 
789  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
790  Scalar& coeffRef(Index index)
791  {
792  return m_unaryOp(m_argImpl.coeffRef(index));
793  }
794 
795 protected:
796  const UnaryOp m_unaryOp;
797  evaluator<ArgType> m_argImpl;
798 };
799 
800 // -------------------- Map --------------------
801 
802 // FIXME perhaps the PlainObjectType could be provided by Derived::PlainObject ?
803 // but that might complicate template specialization
804 template<typename Derived, typename PlainObjectType>
806 
807 template<typename Derived, typename PlainObjectType>
808 struct mapbase_evaluator : evaluator_base<Derived>
809 {
810  typedef Derived XprType;
811  typedef typename XprType::PointerType PointerType;
812  typedef typename XprType::Scalar Scalar;
813  typedef typename XprType::CoeffReturnType CoeffReturnType;
814 
815  enum {
816  IsRowMajor = XprType::RowsAtCompileTime,
817  ColsAtCompileTime = XprType::ColsAtCompileTime,
818  CoeffReadCost = NumTraits<Scalar>::ReadCost
819  };
820 
821  EIGEN_DEVICE_FUNC explicit mapbase_evaluator(const XprType& map)
822  : m_data(const_cast<PointerType>(map.data())),
823  m_innerStride(map.innerStride()),
824  m_outerStride(map.outerStride())
825  {
827  PACKET_ACCESS_REQUIRES_TO_HAVE_INNER_STRIDE_FIXED_TO_1);
828  EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
829  }
830 
831  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
832  CoeffReturnType coeff(Index row, Index col) const
833  {
834  return m_data[col * colStride() + row * rowStride()];
835  }
836 
837  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
838  CoeffReturnType coeff(Index index) const
839  {
840  return m_data[index * m_innerStride.value()];
841  }
842 
843  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
844  Scalar& coeffRef(Index row, Index col)
845  {
846  return m_data[col * colStride() + row * rowStride()];
847  }
848 
849  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
850  Scalar& coeffRef(Index index)
851  {
852  return m_data[index * m_innerStride.value()];
853  }
854 
855  template<int LoadMode, typename PacketType>
856  EIGEN_STRONG_INLINE
857  PacketType packet(Index row, Index col) const
858  {
859  PointerType ptr = m_data + row * rowStride() + col * colStride();
860  return internal::ploadt<PacketType, LoadMode>(ptr);
861  }
862 
863  template<int LoadMode, typename PacketType>
864  EIGEN_STRONG_INLINE
865  PacketType packet(Index index) const
866  {
867  return internal::ploadt<PacketType, LoadMode>(m_data + index * m_innerStride.value());
868  }
869 
870  template<int StoreMode, typename PacketType>
871  EIGEN_STRONG_INLINE
872  void writePacket(Index row, Index col, const PacketType& x)
873  {
874  PointerType ptr = m_data + row * rowStride() + col * colStride();
875  return internal::pstoret<Scalar, PacketType, StoreMode>(ptr, x);
876  }
877 
878  template<int StoreMode, typename PacketType>
879  EIGEN_STRONG_INLINE
880  void writePacket(Index index, const PacketType& x)
881  {
882  internal::pstoret<Scalar, PacketType, StoreMode>(m_data + index * m_innerStride.value(), x);
883  }
884 protected:
885  EIGEN_DEVICE_FUNC
886  inline Index rowStride() const { return XprType::IsRowMajor ? m_outerStride.value() : m_innerStride.value(); }
887  EIGEN_DEVICE_FUNC
888  inline Index colStride() const { return XprType::IsRowMajor ? m_innerStride.value() : m_outerStride.value(); }
889 
890  PointerType m_data;
893 };
894 
895 template<typename PlainObjectType, int MapOptions, typename StrideType>
896 struct evaluator<Map<PlainObjectType, MapOptions, StrideType> >
897  : public mapbase_evaluator<Map<PlainObjectType, MapOptions, StrideType>, PlainObjectType>
898 {
900  typedef typename XprType::Scalar Scalar;
901  // TODO: should check for smaller packet types once we can handle multi-sized packet types
902  typedef typename packet_traits<Scalar>::type PacketScalar;
903 
904  enum {
905  InnerStrideAtCompileTime = StrideType::InnerStrideAtCompileTime == 0
906  ? int(PlainObjectType::InnerStrideAtCompileTime)
907  : int(StrideType::InnerStrideAtCompileTime),
908  OuterStrideAtCompileTime = StrideType::OuterStrideAtCompileTime == 0
909  ? int(PlainObjectType::OuterStrideAtCompileTime)
910  : int(StrideType::OuterStrideAtCompileTime),
911  HasNoInnerStride = InnerStrideAtCompileTime == 1,
912  HasNoOuterStride = StrideType::OuterStrideAtCompileTime == 0,
913  HasNoStride = HasNoInnerStride && HasNoOuterStride,
914  IsDynamicSize = PlainObjectType::SizeAtCompileTime==Dynamic,
915 
916  PacketAccessMask = bool(HasNoInnerStride) ? ~int(0) : ~int(PacketAccessBit),
917  LinearAccessMask = bool(HasNoStride) || bool(PlainObjectType::IsVectorAtCompileTime) ? ~int(0) : ~int(LinearAccessBit),
918  Flags = int( evaluator<PlainObjectType>::Flags) & (LinearAccessMask&PacketAccessMask),
919 
920  Alignment = int(MapOptions)&int(AlignedMask)
921  };
922 
923  EIGEN_DEVICE_FUNC explicit evaluator(const XprType& map)
925  { }
926 };
927 
928 // -------------------- Ref --------------------
929 
930 template<typename PlainObjectType, int RefOptions, typename StrideType>
931 struct evaluator<Ref<PlainObjectType, RefOptions, StrideType> >
932  : public mapbase_evaluator<Ref<PlainObjectType, RefOptions, StrideType>, PlainObjectType>
933 {
935 
936  enum {
939  };
940 
941  EIGEN_DEVICE_FUNC explicit evaluator(const XprType& ref)
943  { }
944 };
945 
946 // -------------------- Block --------------------
947 
948 template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel,
950 
951 template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>
952 struct evaluator<Block<ArgType, BlockRows, BlockCols, InnerPanel> >
953  : block_evaluator<ArgType, BlockRows, BlockCols, InnerPanel>
954 {
956  typedef typename XprType::Scalar Scalar;
957  // TODO: should check for smaller packet types once we can handle multi-sized packet types
958  typedef typename packet_traits<Scalar>::type PacketScalar;
959 
960  enum {
961  CoeffReadCost = evaluator<ArgType>::CoeffReadCost,
962 
963  RowsAtCompileTime = traits<XprType>::RowsAtCompileTime,
964  ColsAtCompileTime = traits<XprType>::ColsAtCompileTime,
965  MaxRowsAtCompileTime = traits<XprType>::MaxRowsAtCompileTime,
966  MaxColsAtCompileTime = traits<XprType>::MaxColsAtCompileTime,
967 
968  ArgTypeIsRowMajor = (int(evaluator<ArgType>::Flags)&RowMajorBit) != 0,
969  IsRowMajor = (MaxRowsAtCompileTime==1 && MaxColsAtCompileTime!=1) ? 1
970  : (MaxColsAtCompileTime==1 && MaxRowsAtCompileTime!=1) ? 0
971  : ArgTypeIsRowMajor,
972  HasSameStorageOrderAsArgType = (IsRowMajor == ArgTypeIsRowMajor),
973  InnerSize = IsRowMajor ? int(ColsAtCompileTime) : int(RowsAtCompileTime),
974  InnerStrideAtCompileTime = HasSameStorageOrderAsArgType
977  OuterStrideAtCompileTime = HasSameStorageOrderAsArgType
980  MaskPacketAccessBit = (InnerStrideAtCompileTime == 1) ? PacketAccessBit : 0,
981 
982  FlagsLinearAccessBit = (RowsAtCompileTime == 1 || ColsAtCompileTime == 1 || (InnerPanel && (evaluator<ArgType>::Flags&LinearAccessBit))) ? LinearAccessBit : 0,
983  FlagsRowMajorBit = XprType::Flags&RowMajorBit,
984  Flags0 = evaluator<ArgType>::Flags & ( (HereditaryBits & ~RowMajorBit) |
986  MaskPacketAccessBit),
987  Flags = Flags0 | FlagsLinearAccessBit | FlagsRowMajorBit,
988 
990  Alignment0 = (InnerPanel && (OuterStrideAtCompileTime!=Dynamic) && (((OuterStrideAtCompileTime * int(sizeof(Scalar))) % int(PacketAlignment)) == 0)) ? int(PacketAlignment) : 0,
991  Alignment = EIGEN_PLAIN_ENUM_MIN(evaluator<ArgType>::Alignment, Alignment0)
992  };
994  EIGEN_DEVICE_FUNC explicit evaluator(const XprType& block) : block_evaluator_type(block)
995  {
996  EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
997  }
998 };
999 
1000 // no direct-access => dispatch to a unary evaluator
1001 template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>
1002 struct block_evaluator<ArgType, BlockRows, BlockCols, InnerPanel, /*HasDirectAccess*/ false>
1003  : unary_evaluator<Block<ArgType, BlockRows, BlockCols, InnerPanel> >
1004 {
1006 
1007  EIGEN_DEVICE_FUNC explicit block_evaluator(const XprType& block)
1008  : unary_evaluator<XprType>(block)
1009  {}
1010 };
1011 
1012 template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>
1013 struct unary_evaluator<Block<ArgType, BlockRows, BlockCols, InnerPanel>, IndexBased>
1014  : evaluator_base<Block<ArgType, BlockRows, BlockCols, InnerPanel> >
1015 {
1017 
1018  EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& block)
1019  : m_argImpl(block.nestedExpression()),
1020  m_startRow(block.startRow()),
1021  m_startCol(block.startCol())
1022  { }
1023 
1024  typedef typename XprType::Scalar Scalar;
1025  typedef typename XprType::CoeffReturnType CoeffReturnType;
1026 
1027  enum {
1028  RowsAtCompileTime = XprType::RowsAtCompileTime
1029  };
1030 
1031  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1032  CoeffReturnType coeff(Index row, Index col) const
1033  {
1034  return m_argImpl.coeff(m_startRow.value() + row, m_startCol.value() + col);
1035  }
1036 
1037  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1038  CoeffReturnType coeff(Index index) const
1039  {
1040  return coeff(RowsAtCompileTime == 1 ? 0 : index, RowsAtCompileTime == 1 ? index : 0);
1041  }
1042 
1043  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1044  Scalar& coeffRef(Index row, Index col)
1045  {
1046  return m_argImpl.coeffRef(m_startRow.value() + row, m_startCol.value() + col);
1047  }
1048 
1049  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1050  Scalar& coeffRef(Index index)
1051  {
1052  return coeffRef(RowsAtCompileTime == 1 ? 0 : index, RowsAtCompileTime == 1 ? index : 0);
1053  }
1054 
1055  template<int LoadMode, typename PacketType>
1056  EIGEN_STRONG_INLINE
1057  PacketType packet(Index row, Index col) const
1058  {
1059  return m_argImpl.template packet<LoadMode,PacketType>(m_startRow.value() + row, m_startCol.value() + col);
1060  }
1061 
1062  template<int LoadMode, typename PacketType>
1063  EIGEN_STRONG_INLINE
1064  PacketType packet(Index index) const
1065  {
1066  return packet<LoadMode,PacketType>(RowsAtCompileTime == 1 ? 0 : index,
1067  RowsAtCompileTime == 1 ? index : 0);
1068  }
1069 
1070  template<int StoreMode, typename PacketType>
1071  EIGEN_STRONG_INLINE
1072  void writePacket(Index row, Index col, const PacketType& x)
1073  {
1074  return m_argImpl.template writePacket<StoreMode,PacketType>(m_startRow.value() + row, m_startCol.value() + col, x);
1075  }
1076 
1077  template<int StoreMode, typename PacketType>
1078  EIGEN_STRONG_INLINE
1079  void writePacket(Index index, const PacketType& x)
1080  {
1081  return writePacket<StoreMode,PacketType>(RowsAtCompileTime == 1 ? 0 : index,
1082  RowsAtCompileTime == 1 ? index : 0,
1083  x);
1084  }
1085 
1086 protected:
1087  evaluator<ArgType> m_argImpl;
1090 };
1091 
1092 // TODO: This evaluator does not actually use the child evaluator;
1093 // all action is via the data() as returned by the Block expression.
1094 
1095 template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>
1096 struct block_evaluator<ArgType, BlockRows, BlockCols, InnerPanel, /* HasDirectAccess */ true>
1097  : mapbase_evaluator<Block<ArgType, BlockRows, BlockCols, InnerPanel>,
1098  typename Block<ArgType, BlockRows, BlockCols, InnerPanel>::PlainObject>
1099 {
1101  typedef typename XprType::Scalar Scalar;
1102 
1103  EIGEN_DEVICE_FUNC explicit block_evaluator(const XprType& block)
1105  {
1106  // TODO: for the 3.3 release, this should be turned to an internal assertion, but let's keep it as is for the beta lifetime
1107  eigen_assert(((internal::UIntPtr(block.data()) % EIGEN_PLAIN_ENUM_MAX(1,evaluator<XprType>::Alignment)) == 0) && "data is not aligned");
1108  }
1109 };
1110 
1111 
1112 // -------------------- Select --------------------
1113 // NOTE shall we introduce a ternary_evaluator?
1114 
1115 // TODO enable vectorization for Select
1116 template<typename ConditionMatrixType, typename ThenMatrixType, typename ElseMatrixType>
1117 struct evaluator<Select<ConditionMatrixType, ThenMatrixType, ElseMatrixType> >
1118  : evaluator_base<Select<ConditionMatrixType, ThenMatrixType, ElseMatrixType> >
1119 {
1121  enum {
1123  + EIGEN_PLAIN_ENUM_MAX(evaluator<ThenMatrixType>::CoeffReadCost,
1125 
1126  Flags = (unsigned int)evaluator<ThenMatrixType>::Flags & evaluator<ElseMatrixType>::Flags & HereditaryBits,
1127 
1129  };
1130 
1131  EIGEN_DEVICE_FUNC explicit evaluator(const XprType& select)
1132  : m_conditionImpl(select.conditionMatrix()),
1133  m_thenImpl(select.thenMatrix()),
1134  m_elseImpl(select.elseMatrix())
1135  {
1136  EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
1137  }
1138 
1139  typedef typename XprType::CoeffReturnType CoeffReturnType;
1140 
1141  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1142  CoeffReturnType coeff(Index row, Index col) const
1143  {
1144  if (m_conditionImpl.coeff(row, col))
1145  return m_thenImpl.coeff(row, col);
1146  else
1147  return m_elseImpl.coeff(row, col);
1148  }
1149 
1150  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1151  CoeffReturnType coeff(Index index) const
1152  {
1153  if (m_conditionImpl.coeff(index))
1154  return m_thenImpl.coeff(index);
1155  else
1156  return m_elseImpl.coeff(index);
1157  }
1158 
1159 protected:
1160  evaluator<ConditionMatrixType> m_conditionImpl;
1161  evaluator<ThenMatrixType> m_thenImpl;
1162  evaluator<ElseMatrixType> m_elseImpl;
1163 };
1164 
1165 
1166 // -------------------- Replicate --------------------
1167 
1168 template<typename ArgType, int RowFactor, int ColFactor>
1169 struct unary_evaluator<Replicate<ArgType, RowFactor, ColFactor> >
1170  : evaluator_base<Replicate<ArgType, RowFactor, ColFactor> >
1171 {
1173  typedef typename XprType::CoeffReturnType CoeffReturnType;
1174  enum {
1175  Factor = (RowFactor==Dynamic || ColFactor==Dynamic) ? Dynamic : RowFactor*ColFactor
1176  };
1179 
1180  enum {
1182  LinearAccessMask = XprType::IsVectorAtCompileTime ? LinearAccessBit : 0,
1183  Flags = (evaluator<ArgTypeNestedCleaned>::Flags & (HereditaryBits|LinearAccessMask) & ~RowMajorBit) | (traits<XprType>::Flags & RowMajorBit),
1184 
1186  };
1187 
1188  EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& replicate)
1189  : m_arg(replicate.nestedExpression()),
1190  m_argImpl(m_arg),
1191  m_rows(replicate.nestedExpression().rows()),
1192  m_cols(replicate.nestedExpression().cols())
1193  {}
1194 
1195  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1196  CoeffReturnType coeff(Index row, Index col) const
1197  {
1198  // try to avoid using modulo; this is a pure optimization strategy
1199  const Index actual_row = internal::traits<XprType>::RowsAtCompileTime==1 ? 0
1200  : RowFactor==1 ? row
1201  : row % m_rows.value();
1202  const Index actual_col = internal::traits<XprType>::ColsAtCompileTime==1 ? 0
1203  : ColFactor==1 ? col
1204  : col % m_cols.value();
1205 
1206  return m_argImpl.coeff(actual_row, actual_col);
1207  }
1208 
1209  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1210  CoeffReturnType coeff(Index index) const
1211  {
1212  // try to avoid using modulo; this is a pure optimization strategy
1213  const Index actual_index = internal::traits<XprType>::RowsAtCompileTime==1
1214  ? (ColFactor==1 ? index : index%m_cols.value())
1215  : (RowFactor==1 ? index : index%m_rows.value());
1216 
1217  return m_argImpl.coeff(actual_index);
1218  }
1219 
1220  template<int LoadMode, typename PacketType>
1221  EIGEN_STRONG_INLINE
1222  PacketType packet(Index row, Index col) const
1223  {
1224  const Index actual_row = internal::traits<XprType>::RowsAtCompileTime==1 ? 0
1225  : RowFactor==1 ? row
1226  : row % m_rows.value();
1227  const Index actual_col = internal::traits<XprType>::ColsAtCompileTime==1 ? 0
1228  : ColFactor==1 ? col
1229  : col % m_cols.value();
1230 
1231  return m_argImpl.template packet<LoadMode,PacketType>(actual_row, actual_col);
1232  }
1233 
1234  template<int LoadMode, typename PacketType>
1235  EIGEN_STRONG_INLINE
1236  PacketType packet(Index index) const
1237  {
1238  const Index actual_index = internal::traits<XprType>::RowsAtCompileTime==1
1239  ? (ColFactor==1 ? index : index%m_cols.value())
1240  : (RowFactor==1 ? index : index%m_rows.value());
1241 
1242  return m_argImpl.template packet<LoadMode,PacketType>(actual_index);
1243  }
1244 
1245 protected:
1246  const ArgTypeNested m_arg;
1250 };
1251 
1252 
1253 // -------------------- PartialReduxExpr --------------------
1254 
1255 template< typename ArgType, typename MemberOp, int Direction>
1256 struct evaluator<PartialReduxExpr<ArgType, MemberOp, Direction> >
1257  : evaluator_base<PartialReduxExpr<ArgType, MemberOp, Direction> >
1258 {
1262  typedef typename ArgType::Scalar InputScalar;
1263  typedef typename XprType::Scalar Scalar;
1264  enum {
1265  TraversalSize = Direction==int(Vertical) ? int(ArgType::RowsAtCompileTime) : int(ArgType::ColsAtCompileTime)
1266  };
1267  typedef typename MemberOp::template Cost<InputScalar,int(TraversalSize)> CostOpType;
1268  enum {
1269  CoeffReadCost = TraversalSize==Dynamic ? HugeCost
1270  : TraversalSize * evaluator<ArgType>::CoeffReadCost + int(CostOpType::value),
1271 
1273 
1274  Alignment = 0 // FIXME this will need to be improved once PartialReduxExpr is vectorized
1275  };
1276 
1277  EIGEN_DEVICE_FUNC explicit evaluator(const XprType xpr)
1278  : m_arg(xpr.nestedExpression()), m_functor(xpr.functor())
1279  {
1280  EIGEN_INTERNAL_CHECK_COST_VALUE(TraversalSize==Dynamic ? HugeCost : int(CostOpType::value));
1281  EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
1282  }
1283 
1284  typedef typename XprType::CoeffReturnType CoeffReturnType;
1285 
1286  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1287  const Scalar coeff(Index i, Index j) const
1288  {
1289  if (Direction==Vertical)
1290  return m_functor(m_arg.col(j));
1291  else
1292  return m_functor(m_arg.row(i));
1293  }
1294 
1295  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1296  const Scalar coeff(Index index) const
1297  {
1298  if (Direction==Vertical)
1299  return m_functor(m_arg.col(index));
1300  else
1301  return m_functor(m_arg.row(index));
1302  }
1303 
1304 protected:
1305  typename internal::add_const_on_value_type<ArgTypeNested>::type m_arg;
1306  const MemberOp m_functor;
1307 };
1308 
1309 
1310 // -------------------- MatrixWrapper and ArrayWrapper --------------------
1311 //
1312 // evaluator_wrapper_base<T> is a common base class for the
1313 // MatrixWrapper and ArrayWrapper evaluators.
1314 
1315 template<typename XprType>
1317  : evaluator_base<XprType>
1318 {
1319  typedef typename remove_all<typename XprType::NestedExpressionType>::type ArgType;
1320  enum {
1321  CoeffReadCost = evaluator<ArgType>::CoeffReadCost,
1322  Flags = evaluator<ArgType>::Flags,
1323  Alignment = evaluator<ArgType>::Alignment
1324  };
1325 
1326  EIGEN_DEVICE_FUNC explicit evaluator_wrapper_base(const ArgType& arg) : m_argImpl(arg) {}
1327 
1328  typedef typename ArgType::Scalar Scalar;
1329  typedef typename ArgType::CoeffReturnType CoeffReturnType;
1330 
1331  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1332  CoeffReturnType coeff(Index row, Index col) const
1333  {
1334  return m_argImpl.coeff(row, col);
1335  }
1336 
1337  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1338  CoeffReturnType coeff(Index index) const
1339  {
1340  return m_argImpl.coeff(index);
1341  }
1342 
1343  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1344  Scalar& coeffRef(Index row, Index col)
1345  {
1346  return m_argImpl.coeffRef(row, col);
1347  }
1348 
1349  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1350  Scalar& coeffRef(Index index)
1351  {
1352  return m_argImpl.coeffRef(index);
1353  }
1354 
1355  template<int LoadMode, typename PacketType>
1356  EIGEN_STRONG_INLINE
1357  PacketType packet(Index row, Index col) const
1358  {
1359  return m_argImpl.template packet<LoadMode,PacketType>(row, col);
1360  }
1361 
1362  template<int LoadMode, typename PacketType>
1363  EIGEN_STRONG_INLINE
1364  PacketType packet(Index index) const
1365  {
1366  return m_argImpl.template packet<LoadMode,PacketType>(index);
1367  }
1368 
1369  template<int StoreMode, typename PacketType>
1370  EIGEN_STRONG_INLINE
1371  void writePacket(Index row, Index col, const PacketType& x)
1372  {
1373  m_argImpl.template writePacket<StoreMode>(row, col, x);
1374  }
1375 
1376  template<int StoreMode, typename PacketType>
1377  EIGEN_STRONG_INLINE
1378  void writePacket(Index index, const PacketType& x)
1379  {
1380  m_argImpl.template writePacket<StoreMode>(index, x);
1381  }
1382 
1383 protected:
1384  evaluator<ArgType> m_argImpl;
1385 };
1386 
1387 template<typename TArgType>
1388 struct unary_evaluator<MatrixWrapper<TArgType> >
1389  : evaluator_wrapper_base<MatrixWrapper<TArgType> >
1390 {
1391  typedef MatrixWrapper<TArgType> XprType;
1392 
1393  EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& wrapper)
1394  : evaluator_wrapper_base<MatrixWrapper<TArgType> >(wrapper.nestedExpression())
1395  { }
1396 };
1397 
1398 template<typename TArgType>
1399 struct unary_evaluator<ArrayWrapper<TArgType> >
1400  : evaluator_wrapper_base<ArrayWrapper<TArgType> >
1401 {
1402  typedef ArrayWrapper<TArgType> XprType;
1403 
1404  EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& wrapper)
1405  : evaluator_wrapper_base<ArrayWrapper<TArgType> >(wrapper.nestedExpression())
1406  { }
1407 };
1408 
1409 
1410 // -------------------- Reverse --------------------
1411 
1412 // defined in Reverse.h:
1413 template<typename PacketType, bool ReversePacket> struct reverse_packet_cond;
1414 
1415 template<typename ArgType, int Direction>
1416 struct unary_evaluator<Reverse<ArgType, Direction> >
1417  : evaluator_base<Reverse<ArgType, Direction> >
1418 {
1419  typedef Reverse<ArgType, Direction> XprType;
1420  typedef typename XprType::Scalar Scalar;
1421  typedef typename XprType::CoeffReturnType CoeffReturnType;
1422 
1423  enum {
1424  IsRowMajor = XprType::IsRowMajor,
1425  IsColMajor = !IsRowMajor,
1426  ReverseRow = (Direction == Vertical) || (Direction == BothDirections),
1427  ReverseCol = (Direction == Horizontal) || (Direction == BothDirections),
1428  ReversePacket = (Direction == BothDirections)
1429  || ((Direction == Vertical) && IsColMajor)
1430  || ((Direction == Horizontal) && IsRowMajor),
1431 
1432  CoeffReadCost = evaluator<ArgType>::CoeffReadCost,
1433 
1434  // let's enable LinearAccess only with vectorization because of the product overhead
1435  // FIXME enable DirectAccess with negative strides?
1436  Flags0 = evaluator<ArgType>::Flags,
1437  LinearAccess = ( (Direction==BothDirections) && (int(Flags0)&PacketAccessBit) )
1438  || ((ReverseRow && XprType::ColsAtCompileTime==1) || (ReverseCol && XprType::RowsAtCompileTime==1))
1439  ? LinearAccessBit : 0,
1440 
1441  Flags = int(Flags0) & (HereditaryBits | PacketAccessBit | LinearAccess),
1442 
1443  Alignment = 0 // FIXME in some rare cases, Alignment could be preserved, like a Vector4f.
1444  };
1445 
1446  EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& reverse)
1447  : m_argImpl(reverse.nestedExpression()),
1448  m_rows(ReverseRow ? reverse.nestedExpression().rows() : 1),
1449  m_cols(ReverseCol ? reverse.nestedExpression().cols() : 1)
1450  { }
1451 
1452  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1453  CoeffReturnType coeff(Index row, Index col) const
1454  {
1455  return m_argImpl.coeff(ReverseRow ? m_rows.value() - row - 1 : row,
1456  ReverseCol ? m_cols.value() - col - 1 : col);
1457  }
1458 
1459  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1460  CoeffReturnType coeff(Index index) const
1461  {
1462  return m_argImpl.coeff(m_rows.value() * m_cols.value() - index - 1);
1463  }
1464 
1465  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1466  Scalar& coeffRef(Index row, Index col)
1467  {
1468  return m_argImpl.coeffRef(ReverseRow ? m_rows.value() - row - 1 : row,
1469  ReverseCol ? m_cols.value() - col - 1 : col);
1470  }
1471 
1472  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1473  Scalar& coeffRef(Index index)
1474  {
1475  return m_argImpl.coeffRef(m_rows.value() * m_cols.value() - index - 1);
1476  }
1477 
1478  template<int LoadMode, typename PacketType>
1479  EIGEN_STRONG_INLINE
1480  PacketType packet(Index row, Index col) const
1481  {
1482  enum {
1483  PacketSize = unpacket_traits<PacketType>::size,
1484  OffsetRow = ReverseRow && IsColMajor ? PacketSize : 1,
1485  OffsetCol = ReverseCol && IsRowMajor ? PacketSize : 1
1486  };
1488  return reverse_packet::run(m_argImpl.template packet<LoadMode,PacketType>(
1489  ReverseRow ? m_rows.value() - row - OffsetRow : row,
1490  ReverseCol ? m_cols.value() - col - OffsetCol : col));
1491  }
1492 
1493  template<int LoadMode, typename PacketType>
1494  EIGEN_STRONG_INLINE
1495  PacketType packet(Index index) const
1496  {
1497  enum { PacketSize = unpacket_traits<PacketType>::size };
1498  return preverse(m_argImpl.template packet<LoadMode,PacketType>(m_rows.value() * m_cols.value() - index - PacketSize));
1499  }
1500 
1501  template<int LoadMode, typename PacketType>
1502  EIGEN_STRONG_INLINE
1503  void writePacket(Index row, Index col, const PacketType& x)
1504  {
1505  // FIXME we could factorize some code with packet(i,j)
1506  enum {
1507  PacketSize = unpacket_traits<PacketType>::size,
1508  OffsetRow = ReverseRow && IsColMajor ? PacketSize : 1,
1509  OffsetCol = ReverseCol && IsRowMajor ? PacketSize : 1
1510  };
1512  m_argImpl.template writePacket<LoadMode>(
1513  ReverseRow ? m_rows.value() - row - OffsetRow : row,
1514  ReverseCol ? m_cols.value() - col - OffsetCol : col,
1515  reverse_packet::run(x));
1516  }
1517 
1518  template<int LoadMode, typename PacketType>
1519  EIGEN_STRONG_INLINE
1520  void writePacket(Index index, const PacketType& x)
1521  {
1522  enum { PacketSize = unpacket_traits<PacketType>::size };
1523  m_argImpl.template writePacket<LoadMode>
1524  (m_rows.value() * m_cols.value() - index - PacketSize, preverse(x));
1525  }
1526 
1527 protected:
1528  evaluator<ArgType> m_argImpl;
1529 
1530  // If we do not reverse rows, then we do not need to know the number of rows; same for columns
1531  // Nonetheless, in this case it is important to set to 1 such that the coeff(index) method works fine for vectors.
1534 };
1535 
1536 
1537 // -------------------- Diagonal --------------------
1538 
1539 template<typename ArgType, int DiagIndex>
1540 struct evaluator<Diagonal<ArgType, DiagIndex> >
1541  : evaluator_base<Diagonal<ArgType, DiagIndex> >
1542 {
1543  typedef Diagonal<ArgType, DiagIndex> XprType;
1544 
1545  enum {
1546  CoeffReadCost = evaluator<ArgType>::CoeffReadCost,
1547 
1548  Flags = (unsigned int)(evaluator<ArgType>::Flags & (HereditaryBits | DirectAccessBit) & ~RowMajorBit) | LinearAccessBit,
1549 
1550  Alignment = 0
1551  };
1552 
1553  EIGEN_DEVICE_FUNC explicit evaluator(const XprType& diagonal)
1554  : m_argImpl(diagonal.nestedExpression()),
1555  m_index(diagonal.index())
1556  { }
1557 
1558  typedef typename XprType::Scalar Scalar;
1559  // FIXME having to check whether ArgType is sparse here i not very nice.
1561  typename XprType::CoeffReturnType,Scalar>::type CoeffReturnType;
1562 
1563  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1564  CoeffReturnType coeff(Index row, Index) const
1565  {
1566  return m_argImpl.coeff(row + rowOffset(), row + colOffset());
1567  }
1568 
1569  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1570  CoeffReturnType coeff(Index index) const
1571  {
1572  return m_argImpl.coeff(index + rowOffset(), index + colOffset());
1573  }
1574 
1575  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1576  Scalar& coeffRef(Index row, Index)
1577  {
1578  return m_argImpl.coeffRef(row + rowOffset(), row + colOffset());
1579  }
1580 
1581  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1582  Scalar& coeffRef(Index index)
1583  {
1584  return m_argImpl.coeffRef(index + rowOffset(), index + colOffset());
1585  }
1586 
1587 protected:
1588  evaluator<ArgType> m_argImpl;
1590 
1591 private:
1592  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rowOffset() const { return m_index.value() > 0 ? 0 : -m_index.value(); }
1593  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index colOffset() const { return m_index.value() > 0 ? m_index.value() : 0; }
1594 };
1595 
1596 
1597 //----------------------------------------------------------------------
1598 // deprecated code
1599 //----------------------------------------------------------------------
1600 
1601 // -------------------- EvalToTemp --------------------
1602 
1603 // expression class for evaluating nested expression to a temporary
1604 
1605 template<typename ArgType> class EvalToTemp;
1606 
1607 template<typename ArgType>
1608 struct traits<EvalToTemp<ArgType> >
1609  : public traits<ArgType>
1610 { };
1611 
1612 template<typename ArgType>
1613 class EvalToTemp
1614  : public dense_xpr_base<EvalToTemp<ArgType> >::type
1615 {
1616  public:
1617 
1618  typedef typename dense_xpr_base<EvalToTemp>::type Base;
1619  EIGEN_GENERIC_PUBLIC_INTERFACE(EvalToTemp)
1620 
1621  explicit EvalToTemp(const ArgType& arg)
1622  : m_arg(arg)
1623  { }
1624 
1625  const ArgType& arg() const
1626  {
1627  return m_arg;
1628  }
1629 
1630  Index rows() const
1631  {
1632  return m_arg.rows();
1633  }
1634 
1635  Index cols() const
1636  {
1637  return m_arg.cols();
1638  }
1639 
1640  private:
1641  const ArgType& m_arg;
1642 };
1643 
1644 template<typename ArgType>
1645 struct evaluator<EvalToTemp<ArgType> >
1646  : public evaluator<typename ArgType::PlainObject>
1647 {
1648  typedef EvalToTemp<ArgType> XprType;
1649  typedef typename ArgType::PlainObject PlainObject;
1650  typedef evaluator<PlainObject> Base;
1651 
1652  EIGEN_DEVICE_FUNC explicit evaluator(const XprType& xpr)
1653  : m_result(xpr.arg())
1654  {
1655  ::new (static_cast<Base*>(this)) Base(m_result);
1656  }
1657 
1658  // This constructor is used when nesting an EvalTo evaluator in another evaluator
1659  EIGEN_DEVICE_FUNC evaluator(const ArgType& arg)
1660  : m_result(arg)
1661  {
1662  ::new (static_cast<Base*>(this)) Base(m_result);
1663  }
1664 
1665 protected:
1666  PlainObject m_result;
1667 };
1668 
1669 } // namespace internal
1670 
1671 } // end namespace Eigen
1672 
1673 #endif // EIGEN_COREEVALUATORS_H
const internal::remove_all< MatrixTypeNested >::type & nestedExpression() const
Definition: CwiseUnaryView.h:80
Generic expression of a matrix where all coefficients are defined by a functor.
Definition: CwiseNullaryOp.h:60
Definition: DenseCoeffsBase.h:654
Definition: CoreEvaluators.h:949
Definition: Constants.h:526
Definition: CoreEvaluators.h:344
const int HugeCost
This value means that the cost to evaluate an expression coefficient is either very expensive or cann...
Definition: Constants.h:39
Expression of a mathematical vector or matrix as an array object.
Definition: ArrayWrapper.h:41
EIGEN_DEVICE_FUNC const TernaryOp & functor() const
Definition: CwiseTernaryOp.h:175
Definition: XprHelper.h:158
For Reverse, all columns are reversed; for PartialReduxExpr and VectorwiseOp, act on columns...
Definition: Constants.h:265
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Scalar * data() const
Definition: PlainObjectBase.h:249
A matrix or vector expression mapping an existing array of data.
Definition: Map.h:88
Definition: Meta.h:63
Expression of the transpose of a matrix.
Definition: Transpose.h:52
const unsigned int DirectAccessBit
Means that the underlying array of coefficients can be directly accessed as a plain strided array...
Definition: Constants.h:150
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const UnaryOp & functor() const
Definition: CwiseUnaryOp.h:75
The type used to identify a permutation storage.
Definition: Constants.h:500
Definition: CoreEvaluators.h:90
Definition: CoreEvaluators.h:65
const ViewOp & functor() const
Definition: CwiseUnaryView.h:76
Namespace containing all symbols from the Eigen library.
Definition: bench_norm.cpp:85
Definition: NullaryFunctors.h:151
Generic expression of a partially reduxed matrix.
Definition: ForwardDeclarations.h:244
Holds information about the various numeric (i.e.
Definition: NumTraits.h:150
Definition: Meta.h:58
const unsigned int RowMajorBit
for a matrix, this means that the storage order is row-major.
Definition: Constants.h:61
Definition: Constants.h:512
Definition: Meta.h:272
Definition: CoreEvaluators.h:29
const unsigned int PacketAccessBit
Short version: means the expression might be vectorized.
Definition: Constants.h:89
EIGEN_DEVICE_FUNC const _RhsNested & rhs() const
Definition: CwiseBinaryOp.h:135
EIGEN_DEVICE_FUNC const internal::remove_all< MatrixTypeNested >::type & nestedExpression() const
Definition: Transpose.h:74
Definition: XprHelper.h:463
Definition: CoreEvaluators.h:109
Definition: GenericPacketMath.h:96
Generic lvalue expression of a coefficient-wise unary operator of a matrix or a vector.
Definition: CwiseUnaryView.h:58
Expression of an array as a mathematical vector or matrix.
Definition: ArrayBase.h:15
Generic expression where a coefficient-wise binary operator is applied to two expressions.
Definition: CwiseBinaryOp.h:77
Definition: CoreEvaluators.h:1316
Definition: CoreEvaluators.h:55
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
Definition: Meta.h:33
Definition: XprHelper.h:106
Dense storage base class for matrices and arrays.
Definition: PlainObjectBase.h:92
Generic expression where a coefficient-wise ternary operator is applied to two expressions.
Definition: CwiseTernaryOp.h:84
EIGEN_DEVICE_FUNC const NullaryOp & functor() const
Definition: CwiseNullaryOp.h:84
EIGEN_DEVICE_FUNC const _Arg1Nested & arg1() const
Definition: CwiseTernaryOp.h:166
Expression of the multiple replication of a matrix or vector.
Definition: Replicate.h:61
Definition: ForwardDeclarations.h:25
Definition: CoreEvaluators.h:1413
EIGEN_DEVICE_FUNC const _Arg3Nested & arg3() const
Definition: CwiseTernaryOp.h:172
Definition: CoreEvaluators.h:61
For Reverse, all rows are reversed; for PartialReduxExpr and VectorwiseOp, act on rows...
Definition: Constants.h:268
A matrix or vector expression mapping an existing expression.
Definition: Ref.h:190
For Reverse, both rows and columns are reversed; not used for PartialReduxExpr and VectorwiseOp...
Definition: Constants.h:271
EIGEN_DEVICE_FUNC const BinaryOp & functor() const
Definition: CwiseBinaryOp.h:138
Definition: BandTriangularSolver.h:13
Definition: CoreEvaluators.h:79
Definition: XprHelper.h:146
Expression of a fixed-size or dynamic-size block.
Definition: Block.h:103
Definition: DenseCoeffsBase.h:666
Definition: CoreEvaluators.h:84
The type used to identify a general solver (factored) storage.
Definition: Constants.h:497
General-purpose arrays with easy API for coefficient-wise operations.
Definition: Array.h:45
The type used to identify a dense storage.
Definition: Constants.h:491
The type used to identify a permutation storage.
Definition: Constants.h:503
Definition: CoreEvaluators.h:805
Expression of a diagonal/subdiagonal/superdiagonal in a matrix.
Definition: Diagonal.h:63
EIGEN_DEVICE_FUNC const _Arg2Nested & arg2() const
Definition: CwiseTernaryOp.h:169
const int Dynamic
This value means that a positive quantity (e.g., a size) is not known at compile-time, and that instead the value is stored in some runtime variable.
Definition: Constants.h:21
const unsigned int EvalBeforeNestingBit
means the expression should be evaluated by the calling expression
Definition: Constants.h:65
Definition: CoreEvaluators.h:1605
Generic expression where a coefficient-wise unary operator is applied to an expression.
Definition: CwiseUnaryOp.h:55
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const internal::remove_all< XprTypeNested >::type & nestedExpression() const
Definition: CwiseUnaryOp.h:80
Definition: TensorMeta.h:50
The matrix class, also used for vectors and row-vectors.
Definition: Matrix.h:178
Expression of the reverse of a vector or matrix.
Definition: Reverse.h:63
Definition: Constants.h:513
const unsigned int LinearAccessBit
Short version: means the expression can be seen as 1D vector.
Definition: Constants.h:125
Definition: ForwardDeclarations.h:17
EIGEN_DEVICE_FUNC const _LhsNested & lhs() const
Definition: CwiseBinaryOp.h:132
Definition: CoreEvaluators.h:70
Definition: Constants.h:520
Expression of a coefficient wise version of the C++ ternary operator ?:
Definition: Select.h:52
Definition: Constants.h:519