compbio
SparseSelfAdjointView.h
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2009-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
5 //
6 // This Source Code Form is subject to the terms of the Mozilla
7 // Public License v. 2.0. If a copy of the MPL was not distributed
8 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9 
10 #ifndef EIGEN_SPARSE_SELFADJOINTVIEW_H
11 #define EIGEN_SPARSE_SELFADJOINTVIEW_H
12 
13 namespace Eigen {
14 
29 namespace internal {
30 
31 template<typename MatrixType, unsigned int Mode>
32 struct traits<SparseSelfAdjointView<MatrixType,Mode> > : traits<MatrixType> {
33 };
34 
35 template<int SrcMode,int DstMode,typename MatrixType,int DestOrder>
36 void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::StorageIndex>& _dest, const typename MatrixType::StorageIndex* perm = 0);
37 
38 template<int Mode,typename MatrixType,int DestOrder>
39 void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::StorageIndex>& _dest, const typename MatrixType::StorageIndex* perm = 0);
40 
41 }
42 
43 template<typename MatrixType, unsigned int _Mode> class SparseSelfAdjointView
44  : public EigenBase<SparseSelfAdjointView<MatrixType,_Mode> >
45 {
46  public:
47 
48  enum {
49  Mode = _Mode,
52  };
53 
55  typedef typename MatrixType::Scalar Scalar;
56  typedef typename MatrixType::StorageIndex StorageIndex;
60 
61  explicit inline SparseSelfAdjointView(MatrixType& matrix) : m_matrix(matrix)
62  {
63  eigen_assert(rows()==cols() && "SelfAdjointView is only for squared matrices");
64  }
65 
66  inline Index rows() const { return m_matrix.rows(); }
67  inline Index cols() const { return m_matrix.cols(); }
68 
70  const _MatrixTypeNested& matrix() const { return m_matrix; }
71  typename internal::remove_reference<MatrixTypeNested>::type& matrix() { return m_matrix; }
72 
78  template<typename OtherDerived>
81  {
82  return Product<SparseSelfAdjointView, OtherDerived>(*this, rhs.derived());
83  }
84 
90  template<typename OtherDerived> friend
93  {
94  return Product<OtherDerived, SparseSelfAdjointView>(lhs.derived(), rhs);
95  }
96 
98  template<typename OtherDerived>
101  {
102  return Product<SparseSelfAdjointView,OtherDerived>(*this, rhs.derived());
103  }
104 
106  template<typename OtherDerived> friend
109  {
110  return Product<OtherDerived,SparseSelfAdjointView>(lhs.derived(), rhs);
111  }
112 
121  template<typename DerivedU>
122  SparseSelfAdjointView& rankUpdate(const SparseMatrixBase<DerivedU>& u, const Scalar& alpha = Scalar(1));
123 
125  // TODO implement twists in a more evaluator friendly fashion
127  {
129  }
130 
131  template<typename SrcMatrixType,int SrcMode>
133  {
134  internal::call_assignment_no_alias_no_transpose(*this, permutedMatrix);
135  return *this;
136  }
137 
138  SparseSelfAdjointView& operator=(const SparseSelfAdjointView& src)
139  {
141  return *this = src.twistedBy(pnull);
142  }
143 
144  template<typename SrcMatrixType,unsigned int SrcMode>
146  {
148  return *this = src.twistedBy(pnull);
149  }
150 
151  void resize(Index rows, Index cols)
152  {
153  EIGEN_ONLY_USED_FOR_DEBUG(rows);
154  EIGEN_ONLY_USED_FOR_DEBUG(cols);
155  eigen_assert(rows == this->rows() && cols == this->cols()
156  && "SparseSelfadjointView::resize() does not actually allow to resize.");
157  }
158 
159  protected:
160 
161  MatrixTypeNested m_matrix;
162  //mutable VectorI m_countPerRow;
163  //mutable VectorI m_countPerCol;
164  private:
165  template<typename Dest> void evalTo(Dest &) const;
166 };
167 
168 /***************************************************************************
169 * Implementation of SparseMatrixBase methods
170 ***************************************************************************/
171 
172 template<typename Derived>
173 template<unsigned int UpLo>
174 typename SparseMatrixBase<Derived>::template ConstSelfAdjointViewReturnType<UpLo>::Type SparseMatrixBase<Derived>::selfadjointView() const
175 {
177 }
178 
179 template<typename Derived>
180 template<unsigned int UpLo>
181 typename SparseMatrixBase<Derived>::template SelfAdjointViewReturnType<UpLo>::Type SparseMatrixBase<Derived>::selfadjointView()
182 {
183  return SparseSelfAdjointView<Derived, UpLo>(derived());
184 }
185 
186 /***************************************************************************
187 * Implementation of SparseSelfAdjointView methods
188 ***************************************************************************/
189 
190 template<typename MatrixType, unsigned int Mode>
191 template<typename DerivedU>
194 {
196  if(alpha==Scalar(0))
197  m_matrix = tmp.template triangularView<Mode>();
198  else
199  m_matrix += alpha * tmp.template triangularView<Mode>();
200 
201  return *this;
202 }
203 
204 namespace internal {
205 
206 // TODO currently a selfadjoint expression has the form SelfAdjointView<.,.>
207 // in the future selfadjoint-ness should be defined by the expression traits
208 // such that Transpose<SelfAdjointView<.,.> > is valid. (currently TriangularBase::transpose() is overloaded to make it work)
209 template<typename MatrixType, unsigned int Mode>
210 struct evaluator_traits<SparseSelfAdjointView<MatrixType,Mode> >
211 {
214 };
215 
217 
220 
221 template< typename DstXprType, typename SrcXprType, typename Functor>
222 struct Assignment<DstXprType, SrcXprType, Functor, SparseSelfAdjoint2Sparse>
223 {
224  typedef typename DstXprType::StorageIndex StorageIndex;
225  template<typename DestScalar,int StorageOrder>
227  {
228  internal::permute_symm_to_fullsymm<SrcXprType::Mode>(src.matrix(), dst);
229  }
230 
231  template<typename DestScalar>
233  {
234  // TODO directly evaluate into dst;
235  SparseMatrix<DestScalar,ColMajor,StorageIndex> tmp(dst.rows(),dst.cols());
236  internal::permute_symm_to_fullsymm<SrcXprType::Mode>(src.matrix(), tmp);
237  dst = tmp;
238  }
239 };
240 
241 } // end namespace internal
242 
243 /***************************************************************************
244 * Implementation of sparse self-adjoint time dense matrix
245 ***************************************************************************/
246 
247 namespace internal {
248 
249 template<int Mode, typename SparseLhsType, typename DenseRhsType, typename DenseResType, typename AlphaType>
250 inline void sparse_selfadjoint_time_dense_product(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha)
251 {
252  EIGEN_ONLY_USED_FOR_DEBUG(alpha);
253 
255  typedef typename internal::remove_all<SparseLhsTypeNested>::type SparseLhsTypeNestedCleaned;
257  typedef typename LhsEval::InnerIterator LhsIterator;
258  typedef typename SparseLhsType::Scalar LhsScalar;
259 
260  enum {
261  LhsIsRowMajor = (LhsEval::Flags&RowMajorBit)==RowMajorBit,
262  ProcessFirstHalf =
263  ((Mode&(Upper|Lower))==(Upper|Lower))
264  || ( (Mode&Upper) && !LhsIsRowMajor)
265  || ( (Mode&Lower) && LhsIsRowMajor),
266  ProcessSecondHalf = !ProcessFirstHalf
267  };
268 
269  SparseLhsTypeNested lhs_nested(lhs);
270  LhsEval lhsEval(lhs_nested);
271 
272  // work on one column at once
273  for (Index k=0; k<rhs.cols(); ++k)
274  {
275  for (Index j=0; j<lhs.outerSize(); ++j)
276  {
277  LhsIterator i(lhsEval,j);
278  // handle diagonal coeff
279  if (ProcessSecondHalf)
280  {
281  while (i && i.index()<j) ++i;
282  if(i && i.index()==j)
283  {
284  res(j,k) += alpha * i.value() * rhs(j,k);
285  ++i;
286  }
287  }
288 
289  // premultiplied rhs for scatters
291  // accumulator for partial scalar product
292  typename DenseResType::Scalar res_j(0);
293  for(; (ProcessFirstHalf ? i && i.index() < j : i) ; ++i)
294  {
295  LhsScalar lhs_ij = i.value();
296  if(!LhsIsRowMajor) lhs_ij = numext::conj(lhs_ij);
297  res_j += lhs_ij * rhs(i.index(),k);
298  res(i.index(),k) += numext::conj(lhs_ij) * rhs_j;
299  }
300  res(j,k) += alpha * res_j;
301 
302  // handle diagonal coeff
303  if (ProcessFirstHalf && i && (i.index()==j))
304  res(j,k) += alpha * i.value() * rhs(j,k);
305  }
306  }
307 }
308 
309 
310 template<typename LhsView, typename Rhs, int ProductType>
311 struct generic_product_impl<LhsView, Rhs, SparseSelfAdjointShape, DenseShape, ProductType>
312 : generic_product_impl_base<LhsView, Rhs, generic_product_impl<LhsView, Rhs, SparseSelfAdjointShape, DenseShape, ProductType> >
313 {
314  template<typename Dest>
315  static void scaleAndAddTo(Dest& dst, const LhsView& lhsView, const Rhs& rhs, const typename Dest::Scalar& alpha)
316  {
317  typedef typename LhsView::_MatrixTypeNested Lhs;
318  typedef typename nested_eval<Lhs,Dynamic>::type LhsNested;
319  typedef typename nested_eval<Rhs,Dynamic>::type RhsNested;
320  LhsNested lhsNested(lhsView.matrix());
321  RhsNested rhsNested(rhs);
322 
323  internal::sparse_selfadjoint_time_dense_product<LhsView::Mode>(lhsNested, rhsNested, dst, alpha);
324  }
325 };
326 
327 template<typename Lhs, typename RhsView, int ProductType>
328 struct generic_product_impl<Lhs, RhsView, DenseShape, SparseSelfAdjointShape, ProductType>
329 : generic_product_impl_base<Lhs, RhsView, generic_product_impl<Lhs, RhsView, DenseShape, SparseSelfAdjointShape, ProductType> >
330 {
331  template<typename Dest>
332  static void scaleAndAddTo(Dest& dst, const Lhs& lhs, const RhsView& rhsView, const typename Dest::Scalar& alpha)
333  {
334  typedef typename RhsView::_MatrixTypeNested Rhs;
335  typedef typename nested_eval<Lhs,Dynamic>::type LhsNested;
336  typedef typename nested_eval<Rhs,Dynamic>::type RhsNested;
337  LhsNested lhsNested(lhs);
338  RhsNested rhsNested(rhsView.matrix());
339 
340  // transpose everything
341  Transpose<Dest> dstT(dst);
342  internal::sparse_selfadjoint_time_dense_product<RhsView::Mode>(rhsNested.transpose(), lhsNested.transpose(), dstT, alpha);
343  }
344 };
345 
346 // NOTE: these two overloads are needed to evaluate the sparse selfadjoint view into a full sparse matrix
347 // TODO: maybe the copy could be handled by generic_product_impl so that these overloads would not be needed anymore
348 
349 template<typename LhsView, typename Rhs, int ProductTag>
350 struct product_evaluator<Product<LhsView, Rhs, DefaultProduct>, ProductTag, SparseSelfAdjointShape, SparseShape>
351  : public evaluator<typename Product<typename Rhs::PlainObject, Rhs, DefaultProduct>::PlainObject>
352 {
354  typedef typename XprType::PlainObject PlainObject;
356 
357  product_evaluator(const XprType& xpr)
358  : m_lhs(xpr.lhs()), m_result(xpr.rows(), xpr.cols())
359  {
360  ::new (static_cast<Base*>(this)) Base(m_result);
362  }
363 
364 protected:
365  typename Rhs::PlainObject m_lhs;
366  PlainObject m_result;
367 };
368 
369 template<typename Lhs, typename RhsView, int ProductTag>
370 struct product_evaluator<Product<Lhs, RhsView, DefaultProduct>, ProductTag, SparseShape, SparseSelfAdjointShape>
371  : public evaluator<typename Product<Lhs, typename Lhs::PlainObject, DefaultProduct>::PlainObject>
372 {
374  typedef typename XprType::PlainObject PlainObject;
376 
377  product_evaluator(const XprType& xpr)
378  : m_rhs(xpr.rhs()), m_result(xpr.rows(), xpr.cols())
379  {
380  ::new (static_cast<Base*>(this)) Base(m_result);
382  }
383 
384 protected:
385  typename Lhs::PlainObject m_rhs;
386  PlainObject m_result;
387 };
388 
389 } // namespace internal
390 
391 /***************************************************************************
392 * Implementation of symmetric copies and permutations
393 ***************************************************************************/
394 namespace internal {
395 
396 template<int Mode,typename MatrixType,int DestOrder>
397 void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::StorageIndex>& _dest, const typename MatrixType::StorageIndex* perm)
398 {
399  typedef typename MatrixType::StorageIndex StorageIndex;
400  typedef typename MatrixType::Scalar Scalar;
402  typedef Matrix<StorageIndex,Dynamic,1> VectorI;
403  typedef evaluator<MatrixType> MatEval;
404  typedef typename evaluator<MatrixType>::InnerIterator MatIterator;
405 
406  MatEval matEval(mat);
407  Dest& dest(_dest.derived());
408  enum {
409  StorageOrderMatch = int(Dest::IsRowMajor) == int(MatrixType::IsRowMajor)
410  };
411 
412  Index size = mat.rows();
413  VectorI count;
414  count.resize(size);
415  count.setZero();
416  dest.resize(size,size);
417  for(Index j = 0; j<size; ++j)
418  {
419  Index jp = perm ? perm[j] : j;
420  for(MatIterator it(matEval,j); it; ++it)
421  {
422  Index i = it.index();
423  Index r = it.row();
424  Index c = it.col();
425  Index ip = perm ? perm[i] : i;
426  if(Mode==(Upper|Lower))
427  count[StorageOrderMatch ? jp : ip]++;
428  else if(r==c)
429  count[ip]++;
430  else if(( Mode==Lower && r>c) || ( Mode==Upper && r<c))
431  {
432  count[ip]++;
433  count[jp]++;
434  }
435  }
436  }
437  Index nnz = count.sum();
438 
439  // reserve space
440  dest.resizeNonZeros(nnz);
441  dest.outerIndexPtr()[0] = 0;
442  for(Index j=0; j<size; ++j)
443  dest.outerIndexPtr()[j+1] = dest.outerIndexPtr()[j] + count[j];
444  for(Index j=0; j<size; ++j)
445  count[j] = dest.outerIndexPtr()[j];
446 
447  // copy data
448  for(StorageIndex j = 0; j<size; ++j)
449  {
450  for(MatIterator it(matEval,j); it; ++it)
451  {
452  StorageIndex i = internal::convert_index<StorageIndex>(it.index());
453  Index r = it.row();
454  Index c = it.col();
455 
456  StorageIndex jp = perm ? perm[j] : j;
457  StorageIndex ip = perm ? perm[i] : i;
458 
459  if(Mode==(Upper|Lower))
460  {
461  Index k = count[StorageOrderMatch ? jp : ip]++;
462  dest.innerIndexPtr()[k] = StorageOrderMatch ? ip : jp;
463  dest.valuePtr()[k] = it.value();
464  }
465  else if(r==c)
466  {
467  Index k = count[ip]++;
468  dest.innerIndexPtr()[k] = ip;
469  dest.valuePtr()[k] = it.value();
470  }
471  else if(( (Mode&Lower)==Lower && r>c) || ( (Mode&Upper)==Upper && r<c))
472  {
473  if(!StorageOrderMatch)
474  std::swap(ip,jp);
475  Index k = count[jp]++;
476  dest.innerIndexPtr()[k] = ip;
477  dest.valuePtr()[k] = it.value();
478  k = count[ip]++;
479  dest.innerIndexPtr()[k] = jp;
480  dest.valuePtr()[k] = numext::conj(it.value());
481  }
482  }
483  }
484 }
485 
486 template<int _SrcMode,int _DstMode,typename MatrixType,int DstOrder>
487 void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DstOrder,typename MatrixType::StorageIndex>& _dest, const typename MatrixType::StorageIndex* perm)
488 {
489  typedef typename MatrixType::StorageIndex StorageIndex;
490  typedef typename MatrixType::Scalar Scalar;
491  SparseMatrix<Scalar,DstOrder,StorageIndex>& dest(_dest.derived());
492  typedef Matrix<StorageIndex,Dynamic,1> VectorI;
493  typedef evaluator<MatrixType> MatEval;
494  typedef typename evaluator<MatrixType>::InnerIterator MatIterator;
495 
496  enum {
497  SrcOrder = MatrixType::IsRowMajor ? RowMajor : ColMajor,
498  StorageOrderMatch = int(SrcOrder) == int(DstOrder),
499  DstMode = DstOrder==RowMajor ? (_DstMode==Upper ? Lower : Upper) : _DstMode,
500  SrcMode = SrcOrder==RowMajor ? (_SrcMode==Upper ? Lower : Upper) : _SrcMode
501  };
502 
503  MatEval matEval(mat);
504 
505  Index size = mat.rows();
506  VectorI count(size);
507  count.setZero();
508  dest.resize(size,size);
509  for(StorageIndex j = 0; j<size; ++j)
510  {
511  StorageIndex jp = perm ? perm[j] : j;
512  for(MatIterator it(matEval,j); it; ++it)
513  {
514  StorageIndex i = it.index();
515  if((int(SrcMode)==int(Lower) && i<j) || (int(SrcMode)==int(Upper) && i>j))
516  continue;
517 
518  StorageIndex ip = perm ? perm[i] : i;
519  count[int(DstMode)==int(Lower) ? (std::min)(ip,jp) : (std::max)(ip,jp)]++;
520  }
521  }
522  dest.outerIndexPtr()[0] = 0;
523  for(Index j=0; j<size; ++j)
524  dest.outerIndexPtr()[j+1] = dest.outerIndexPtr()[j] + count[j];
525  dest.resizeNonZeros(dest.outerIndexPtr()[size]);
526  for(Index j=0; j<size; ++j)
527  count[j] = dest.outerIndexPtr()[j];
528 
529  for(StorageIndex j = 0; j<size; ++j)
530  {
531 
532  for(MatIterator it(matEval,j); it; ++it)
533  {
534  StorageIndex i = it.index();
535  if((int(SrcMode)==int(Lower) && i<j) || (int(SrcMode)==int(Upper) && i>j))
536  continue;
537 
538  StorageIndex jp = perm ? perm[j] : j;
539  StorageIndex ip = perm? perm[i] : i;
540 
541  Index k = count[int(DstMode)==int(Lower) ? (std::min)(ip,jp) : (std::max)(ip,jp)]++;
542  dest.innerIndexPtr()[k] = int(DstMode)==int(Lower) ? (std::max)(ip,jp) : (std::min)(ip,jp);
543 
544  if(!StorageOrderMatch) std::swap(ip,jp);
545  if( ((int(DstMode)==int(Lower) && ip<jp) || (int(DstMode)==int(Upper) && ip>jp)))
546  dest.valuePtr()[k] = numext::conj(it.value());
547  else
548  dest.valuePtr()[k] = it.value();
549  }
550  }
551 }
552 
553 }
554 
555 // TODO implement twists in a more evaluator friendly fashion
556 
557 namespace internal {
558 
559 template<typename MatrixType, int Mode>
560 struct traits<SparseSymmetricPermutationProduct<MatrixType,Mode> > : traits<MatrixType> {
561 };
562 
563 }
564 
565 template<typename MatrixType,int Mode>
567  : public EigenBase<SparseSymmetricPermutationProduct<MatrixType,Mode> >
568 {
569  public:
570  typedef typename MatrixType::Scalar Scalar;
571  typedef typename MatrixType::StorageIndex StorageIndex;
572  enum {
575  };
576  protected:
578  public:
580  typedef typename MatrixType::Nested MatrixTypeNested;
582 
583  SparseSymmetricPermutationProduct(const MatrixType& mat, const Perm& perm)
584  : m_matrix(mat), m_perm(perm)
585  {}
586 
587  inline Index rows() const { return m_matrix.rows(); }
588  inline Index cols() const { return m_matrix.cols(); }
589 
590  const NestedExpression& matrix() const { return m_matrix; }
591  const Perm& perm() const { return m_perm; }
592 
593  protected:
594  MatrixTypeNested m_matrix;
595  const Perm& m_perm;
596 
597 };
598 
599 namespace internal {
600 
601 template<typename DstXprType, typename MatrixType, int Mode, typename Scalar>
602 struct Assignment<DstXprType, SparseSymmetricPermutationProduct<MatrixType,Mode>, internal::assign_op<Scalar,typename MatrixType::Scalar>, Sparse2Sparse>
603 {
605  typedef typename DstXprType::StorageIndex DstIndex;
606  template<int Options>
607  static void run(SparseMatrix<Scalar,Options,DstIndex> &dst, const SrcXprType &src, const internal::assign_op<Scalar,typename MatrixType::Scalar> &)
608  {
609  // internal::permute_symm_to_fullsymm<Mode>(m_matrix,_dest,m_perm.indices().data());
611  internal::permute_symm_to_fullsymm<Mode>(src.matrix(),tmp,src.perm().indices().data());
612  dst = tmp;
613  }
614 
615  template<typename DestType,unsigned int DestMode>
617  {
618  internal::permute_symm_to_symm<Mode,DestMode>(src.matrix(),dst.matrix(),src.perm().indices().data());
619  }
620 };
621 
622 } // end namespace internal
623 
624 } // end namespace Eigen
625 
626 #endif // EIGEN_SPARSE_SELFADJOINTVIEW_H
Definition: NonLinearOptimization.cpp:108
Product< SparseSelfAdjointView, OtherDerived > operator*(const SparseMatrixBase< OtherDerived > &rhs) const
Definition: SparseSelfAdjointView.h:80
Storage order is column major (see TopicStorageOrders).
Definition: Constants.h:320
Definition: Constants.h:526
Expression of the product of two arbitrary matrices or vectors.
Definition: Product.h:71
Definition: ForwardDeclarations.h:162
A versatible sparse matrix representation.
Definition: SparseMatrix.h:92
Definition: SparseSelfAdjointView.h:566
Definition: SparseSelfAdjointView.h:216
Expression of the transpose of a matrix.
Definition: Transpose.h:52
friend Product< OtherDerived, SparseSelfAdjointView > operator*(const SparseMatrixBase< OtherDerived > &lhs, const SparseSelfAdjointView &rhs)
Definition: SparseSelfAdjointView.h:92
friend Product< OtherDerived, SparseSelfAdjointView > operator*(const MatrixBase< OtherDerived > &lhs, const SparseSelfAdjointView &rhs)
Efficient dense vector/matrix times sparse self-adjoint matrix product.
Definition: SparseSelfAdjointView.h:108
Definition: CoreEvaluators.h:90
Namespace containing all symbols from the Eigen library.
Definition: bench_norm.cpp:85
Pseudo expression to manipulate a triangular sparse matrix as a selfadjoint matrix.
Definition: SparseSelfAdjointView.h:43
Definition: Constants.h:521
Eigen::Index Index
The interface type of indices.
Definition: EigenBase.h:37
const unsigned int RowMajorBit
for a matrix, this means that the storage order is row-major.
Definition: Constants.h:61
Definition: Constants.h:512
Definition: ProductEvaluators.h:343
Definition: AssignmentFunctors.h:21
Definition: AssignEvaluator.h:753
Common base class for all classes T such that MatrixBase has an operator=(T) and a constructor Matrix...
Definition: EigenBase.h:28
Base class of any sparse matrices or sparse expressions.
Definition: ForwardDeclarations.h:281
View matrix as a lower triangular matrix.
Definition: Constants.h:204
Definition: ProductEvaluators.h:86
const IndicesType & indices() const
const version of indices().
Definition: PermutationMatrix.h:388
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
Definition: Meta.h:33
A sparse matrix class designed for matrix assembly purpose.
Definition: SparseUtil.h:53
View matrix as an upper triangular matrix.
Definition: Constants.h:206
Product< SparseSelfAdjointView, OtherDerived > operator*(const MatrixBase< OtherDerived > &rhs) const
Efficient sparse self-adjoint matrix times dense vector/matrix product.
Definition: SparseSelfAdjointView.h:100
Definition: SparseUtil.h:138
Definition: BandTriangularSolver.h:13
SparseSelfAdjointView & rankUpdate(const SparseMatrixBase< DerivedU > &u, const Scalar &alpha=Scalar(1))
Perform a symmetric rank K update of the selfadjoint matrix *this: where u is a vector or matrix...
Definition: CoreEvaluators.h:79
SparseSymmetricPermutationProduct< _MatrixTypeNested, Mode > twistedBy(const PermutationMatrix< Dynamic, Dynamic, StorageIndex > &perm) const
Definition: SparseSelfAdjointView.h:126
Definition: AssignEvaluator.h:740
Storage order is row major (see TopicStorageOrders).
Definition: Constants.h:322
Determines whether the given binary operation of two numeric types is allowed and what the scalar ret...
Definition: XprHelper.h:757
Generic expression where a coefficient-wise unary operator is applied to an expression.
Definition: CwiseUnaryOp.h:55
Base class for all dense matrices, vectors, and expressions.
Definition: MatrixBase.h:48
Definition: ForwardDeclarations.h:17
Definition: SparseAssign.h:61