compbio
TensorReverse.h
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2014 Navdeep Jaitly <ndjaitly@google.com>
5 // Benoit Steiner <benoit.steiner.goog@gmail.com>
6 //
7 // This Source Code Form is subject to the terms of the Mozilla
8 // Public License v. 2.0. If a copy of the MPL was not distributed
9 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
10 
11 #ifndef EIGEN_CXX11_TENSOR_TENSOR_REVERSE_H
12 #define EIGEN_CXX11_TENSOR_TENSOR_REVERSE_H
13 namespace Eigen {
14 
21 namespace internal {
22 template<typename ReverseDimensions, typename XprType>
23 struct traits<TensorReverseOp<ReverseDimensions,
24  XprType> > : public traits<XprType>
25 {
26  typedef typename XprType::Scalar Scalar;
27  typedef traits<XprType> XprTraits;
28  typedef typename XprTraits::StorageKind StorageKind;
29  typedef typename XprTraits::Index Index;
30  typedef typename XprType::Nested Nested;
31  typedef typename remove_reference<Nested>::type _Nested;
32  static const int NumDimensions = XprTraits::NumDimensions;
33  static const int Layout = XprTraits::Layout;
34 };
35 
36 template<typename ReverseDimensions, typename XprType>
37 struct eval<TensorReverseOp<ReverseDimensions, XprType>, Eigen::Dense>
38 {
40 };
41 
42 template<typename ReverseDimensions, typename XprType>
43 struct nested<TensorReverseOp<ReverseDimensions, XprType>, 1,
44  typename eval<TensorReverseOp<ReverseDimensions, XprType> >::type>
45 {
47 };
48 
49 } // end namespace internal
50 
51 template<typename ReverseDimensions, typename XprType>
52 class TensorReverseOp : public TensorBase<TensorReverseOp<ReverseDimensions,
53  XprType>, WriteAccessors>
54 {
55  public:
57  typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
58  typedef typename XprType::CoeffReturnType CoeffReturnType;
59  typedef typename Eigen::internal::nested<TensorReverseOp>::type Nested;
61  StorageKind;
63 
64  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorReverseOp(
65  const XprType& expr, const ReverseDimensions& reverse_dims)
66  : m_xpr(expr), m_reverse_dims(reverse_dims) { }
67 
68  EIGEN_DEVICE_FUNC
69  const ReverseDimensions& reverse() const { return m_reverse_dims; }
70 
71  EIGEN_DEVICE_FUNC
73  expression() const { return m_xpr; }
74 
75  EIGEN_DEVICE_FUNC
76  EIGEN_STRONG_INLINE TensorReverseOp& operator = (const TensorReverseOp& other)
77  {
79  Assign assign(*this, other);
81  return *this;
82  }
83 
84  template<typename OtherDerived>
85  EIGEN_DEVICE_FUNC
86  EIGEN_STRONG_INLINE TensorReverseOp& operator = (const OtherDerived& other)
87  {
89  Assign assign(*this, other);
91  return *this;
92  }
93 
94  protected:
95  typename XprType::Nested m_xpr;
96  const ReverseDimensions m_reverse_dims;
97 };
98 
99 // Eval as rvalue
100 template<typename ReverseDimensions, typename ArgType, typename Device>
101 struct TensorEvaluator<const TensorReverseOp<ReverseDimensions, ArgType>, Device>
102 {
104  typedef typename XprType::Index Index;
105  static const int NumDims = internal::array_size<ReverseDimensions>::value;
107  typedef typename XprType::Scalar Scalar;
108  typedef typename XprType::CoeffReturnType CoeffReturnType;
110  static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
111 
112  enum {
113  IsAligned = false,
116  CoordAccess = false, // to be implemented
117  RawAccess = false
118  };
119 
120  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op,
121  const Device& device)
122  : m_impl(op.expression(), device), m_reverse(op.reverse())
123  {
124  // Reversing a scalar isn't supported yet. It would be a no-op anyway.
125  EIGEN_STATIC_ASSERT((NumDims > 0), YOU_MADE_A_PROGRAMMING_MISTAKE);
126 
127  // Compute strides
128  m_dimensions = m_impl.dimensions();
129  if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
130  m_strides[0] = 1;
131  for (int i = 1; i < NumDims; ++i) {
132  m_strides[i] = m_strides[i-1] * m_dimensions[i-1];
133  }
134  } else {
135  m_strides[NumDims-1] = 1;
136  for (int i = NumDims - 2; i >= 0; --i) {
137  m_strides[i] = m_strides[i+1] * m_dimensions[i+1];
138  }
139  }
140  }
141 
142  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
143  const Dimensions& dimensions() const { return m_dimensions; }
144 
145  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar*) {
146  m_impl.evalSubExprsIfNeeded(NULL);
147  return true;
148  }
149  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() {
150  m_impl.cleanup();
151  }
152 
153  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index reverseIndex(
154  Index index) const {
155  eigen_assert(index < dimensions().TotalSize());
156  Index inputIndex = 0;
157  if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
158  for (int i = NumDims - 1; i > 0; --i) {
159  Index idx = index / m_strides[i];
160  index -= idx * m_strides[i];
161  if (m_reverse[i]) {
162  idx = m_dimensions[i] - idx - 1;
163  }
164  inputIndex += idx * m_strides[i] ;
165  }
166  if (m_reverse[0]) {
167  inputIndex += (m_dimensions[0] - index - 1);
168  } else {
169  inputIndex += index;
170  }
171  } else {
172  for (int i = 0; i < NumDims - 1; ++i) {
173  Index idx = index / m_strides[i];
174  index -= idx * m_strides[i];
175  if (m_reverse[i]) {
176  idx = m_dimensions[i] - idx - 1;
177  }
178  inputIndex += idx * m_strides[i] ;
179  }
180  if (m_reverse[NumDims-1]) {
181  inputIndex += (m_dimensions[NumDims-1] - index - 1);
182  } else {
183  inputIndex += index;
184  }
185  }
186  return inputIndex;
187  }
188 
189  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(
190  Index index) const {
191  return m_impl.coeff(reverseIndex(index));
192  }
193 
194  template<int LoadMode>
195  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
196  PacketReturnType packet(Index index) const
197  {
198  EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE)
199  eigen_assert(index+PacketSize-1 < dimensions().TotalSize());
200 
201  // TODO(ndjaitly): write a better packing routine that uses
202  // local structure.
203  EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type
204  values[PacketSize];
205  for (int i = 0; i < PacketSize; ++i) {
206  values[i] = coeff(index+i);
207  }
208  PacketReturnType rslt = internal::pload<PacketReturnType>(values);
209  return rslt;
210  }
211 
212  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const {
213  double compute_cost = NumDims * (2 * TensorOpCost::AddCost<Index>() +
214  2 * TensorOpCost::MulCost<Index>() +
215  TensorOpCost::DivCost<Index>());
216  for (int i = 0; i < NumDims; ++i) {
217  if (m_reverse[i]) {
218  compute_cost += 2 * TensorOpCost::AddCost<Index>();
219  }
220  }
221  return m_impl.costPerCoeff(vectorized) +
222  TensorOpCost(0, 0, compute_cost, false /* vectorized */, PacketSize);
223  }
224 
225  EIGEN_DEVICE_FUNC Scalar* data() const { return NULL; }
226 
227  protected:
228  Dimensions m_dimensions;
229  array<Index, NumDims> m_strides;
231  ReverseDimensions m_reverse;
232 };
233 
234 // Eval as lvalue
235 
236 template <typename ReverseDimensions, typename ArgType, typename Device>
237 struct TensorEvaluator<TensorReverseOp<ReverseDimensions, ArgType>, Device>
238  : public TensorEvaluator<const TensorReverseOp<ReverseDimensions, ArgType>,
239  Device> {
241  Device> Base;
243  typedef typename XprType::Index Index;
244  static const int NumDims = internal::array_size<ReverseDimensions>::value;
246 
247  enum {
248  IsAligned = false,
251  CoordAccess = false, // to be implemented
252  RawAccess = false
253  };
254  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op,
255  const Device& device)
256  : Base(op, device) {}
257 
258  typedef typename XprType::Scalar Scalar;
259  typedef typename XprType::CoeffReturnType CoeffReturnType;
261  static const int PacketSize = internal::unpacket_traits<PacketReturnType>::size;
262 
263  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
264  const Dimensions& dimensions() const { return this->m_dimensions; }
265 
266  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index) {
267  return this->m_impl.coeffRef(this->reverseIndex(index));
268  }
269 
270  template <int StoreMode> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
271  void writePacket(Index index, const PacketReturnType& x) {
272  EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE)
273  eigen_assert(index+PacketSize-1 < dimensions().TotalSize());
274 
275  // This code is pilfered from TensorMorphing.h
276  EIGEN_ALIGN_MAX CoeffReturnType values[PacketSize];
277  internal::pstore<CoeffReturnType, PacketReturnType>(values, x);
278  for (int i = 0; i < PacketSize; ++i) {
279  this->coeffRef(index+i) = values[i];
280  }
281  }
282 
283 };
284 
285 
286 } // end namespace Eigen
287 
288 #endif // EIGEN_CXX11_TENSOR_TENSOR_REVERSE_H
Definition: TensorExecutor.h:27
Definition: TensorCostModel.h:25
Storage order is column major (see TopicStorageOrders).
Definition: Constants.h:320
Definition: XprHelper.h:158
Namespace containing all symbols from the Eigen library.
Definition: bench_norm.cpp:85
A cost model used to limit the number of threads used for evaluating tensor expression.
Definition: TensorEvaluator.h:28
Definition: TensorAssign.h:60
Definition: TensorForwardDeclarations.h:52
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
Definition: Meta.h:33
Definition: TensorDeviceDefault.h:17
The tensor base class.
Definition: TensorBase.h:827
Definition: BandTriangularSolver.h:13
Definition: TensorTraits.h:170
The type used to identify a dense storage.
Definition: Constants.h:491
Generic expression where a coefficient-wise unary operator is applied to an expression.
Definition: CwiseUnaryOp.h:55
Definition: ForwardDeclarations.h:17
Definition: XprHelper.h:312
Definition: EmulateArray.h:203