compbio
SparseMatrix.h
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
5 //
6 // This Source Code Form is subject to the terms of the Mozilla
7 // Public License v. 2.0. If a copy of the MPL was not distributed
8 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9 
10 #ifndef EIGEN_SPARSEMATRIX_H
11 #define EIGEN_SPARSEMATRIX_H
12 
13 namespace Eigen {
14 
41 namespace internal {
42 template<typename _Scalar, int _Options, typename _Index>
43 struct traits<SparseMatrix<_Scalar, _Options, _Index> >
44 {
45  typedef _Scalar Scalar;
46  typedef _Index StorageIndex;
47  typedef Sparse StorageKind;
48  typedef MatrixXpr XprKind;
49  enum {
50  RowsAtCompileTime = Dynamic,
51  ColsAtCompileTime = Dynamic,
52  MaxRowsAtCompileTime = Dynamic,
53  MaxColsAtCompileTime = Dynamic,
54  Flags = _Options | NestByRefBit | LvalueBit | CompressedAccessBit,
55  SupportedAccessPatterns = InnerRandomAccessPattern
56  };
57 };
58 
59 template<typename _Scalar, int _Options, typename _Index, int DiagIndex>
60 struct traits<Diagonal<SparseMatrix<_Scalar, _Options, _Index>, DiagIndex> >
61 {
63  typedef typename ref_selector<MatrixType>::type MatrixTypeNested;
65 
66  typedef _Scalar Scalar;
67  typedef Dense StorageKind;
68  typedef _Index StorageIndex;
69  typedef MatrixXpr XprKind;
70 
71  enum {
72  RowsAtCompileTime = Dynamic,
73  ColsAtCompileTime = 1,
74  MaxRowsAtCompileTime = Dynamic,
75  MaxColsAtCompileTime = 1,
76  Flags = LvalueBit
77  };
78 };
79 
80 template<typename _Scalar, int _Options, typename _Index, int DiagIndex>
81 struct traits<Diagonal<const SparseMatrix<_Scalar, _Options, _Index>, DiagIndex> >
82  : public traits<Diagonal<SparseMatrix<_Scalar, _Options, _Index>, DiagIndex> >
83 {
84  enum {
85  Flags = 0
86  };
87 };
88 
89 } // end namespace internal
90 
91 template<typename _Scalar, int _Options, typename _Index>
93  : public SparseCompressedBase<SparseMatrix<_Scalar, _Options, _Index> >
94 {
96  using Base::convert_index;
97  friend class SparseVector<_Scalar,0,_Index>;
98  public:
99  using Base::isCompressed;
100  using Base::nonZeros;
101  EIGEN_SPARSE_PUBLIC_INTERFACE(SparseMatrix)
102  using Base::operator+=;
103  using Base::operator-=;
104 
108  typedef typename Base::InnerIterator InnerIterator;
110 
111 
112  using Base::IsRowMajor;
114  enum {
115  Options = _Options
116  };
117 
118  typedef typename Base::IndexVector IndexVector;
119  typedef typename Base::ScalarVector ScalarVector;
120  protected:
122 
123  Index m_outerSize;
124  Index m_innerSize;
125  StorageIndex* m_outerIndex;
126  StorageIndex* m_innerNonZeros; // optional, if null then the data is compressed
127  Storage m_data;
128 
129  public:
130 
132  inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; }
134  inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; }
135 
137  inline Index innerSize() const { return m_innerSize; }
139  inline Index outerSize() const { return m_outerSize; }
140 
144  inline const Scalar* valuePtr() const { return m_data.valuePtr(); }
148  inline Scalar* valuePtr() { return m_data.valuePtr(); }
149 
153  inline const StorageIndex* innerIndexPtr() const { return m_data.indexPtr(); }
157  inline StorageIndex* innerIndexPtr() { return m_data.indexPtr(); }
158 
162  inline const StorageIndex* outerIndexPtr() const { return m_outerIndex; }
166  inline StorageIndex* outerIndexPtr() { return m_outerIndex; }
167 
171  inline const StorageIndex* innerNonZeroPtr() const { return m_innerNonZeros; }
175  inline StorageIndex* innerNonZeroPtr() { return m_innerNonZeros; }
176 
178  inline Storage& data() { return m_data; }
180  inline const Storage& data() const { return m_data; }
181 
184  inline Scalar coeff(Index row, Index col) const
185  {
186  eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
187 
188  const Index outer = IsRowMajor ? row : col;
189  const Index inner = IsRowMajor ? col : row;
190  Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
191  return m_data.atInRange(m_outerIndex[outer], end, StorageIndex(inner));
192  }
193 
202  inline Scalar& coeffRef(Index row, Index col)
203  {
204  eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
205 
206  const Index outer = IsRowMajor ? row : col;
207  const Index inner = IsRowMajor ? col : row;
208 
209  Index start = m_outerIndex[outer];
210  Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
211  eigen_assert(end>=start && "you probably called coeffRef on a non finalized matrix");
212  if(end<=start)
213  return insert(row,col);
214  const Index p = m_data.searchLowerIndex(start,end-1,StorageIndex(inner));
215  if((p<end) && (m_data.index(p)==inner))
216  return m_data.value(p);
217  else
218  return insert(row,col);
219  }
220 
236  Scalar& insert(Index row, Index col);
237 
238  public:
239 
247  inline void setZero()
248  {
249  m_data.clear();
250  memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(StorageIndex));
251  if(m_innerNonZeros)
252  memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(StorageIndex));
253  }
254 
258  inline void reserve(Index reserveSize)
259  {
260  eigen_assert(isCompressed() && "This function does not make sense in non compressed mode.");
261  m_data.reserve(reserveSize);
262  }
263 
264  #ifdef EIGEN_PARSED_BY_DOXYGEN
265 
277  template<class SizesType>
278  inline void reserve(const SizesType& reserveSizes);
279  #else
280  template<class SizesType>
281  inline void reserve(const SizesType& reserveSizes, const typename SizesType::value_type& enableif =
282  #if (!EIGEN_COMP_MSVC) || (EIGEN_COMP_MSVC>=1500) // MSVC 2005 fails to compile with this typename
283  typename
284  #endif
285  SizesType::value_type())
286  {
287  EIGEN_UNUSED_VARIABLE(enableif);
288  reserveInnerVectors(reserveSizes);
289  }
290  #endif // EIGEN_PARSED_BY_DOXYGEN
291  protected:
292  template<class SizesType>
293  inline void reserveInnerVectors(const SizesType& reserveSizes)
294  {
295  if(isCompressed())
296  {
297  Index totalReserveSize = 0;
298  // turn the matrix into non-compressed mode
299  m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
300  if (!m_innerNonZeros) internal::throw_std_bad_alloc();
301 
302  // temporarily use m_innerSizes to hold the new starting points.
303  StorageIndex* newOuterIndex = m_innerNonZeros;
304 
305  StorageIndex count = 0;
306  for(Index j=0; j<m_outerSize; ++j)
307  {
308  newOuterIndex[j] = count;
309  count += reserveSizes[j] + (m_outerIndex[j+1]-m_outerIndex[j]);
310  totalReserveSize += reserveSizes[j];
311  }
312  m_data.reserve(totalReserveSize);
313  StorageIndex previousOuterIndex = m_outerIndex[m_outerSize];
314  for(Index j=m_outerSize-1; j>=0; --j)
315  {
316  StorageIndex innerNNZ = previousOuterIndex - m_outerIndex[j];
317  for(Index i=innerNNZ-1; i>=0; --i)
318  {
319  m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
320  m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
321  }
322  previousOuterIndex = m_outerIndex[j];
323  m_outerIndex[j] = newOuterIndex[j];
324  m_innerNonZeros[j] = innerNNZ;
325  }
326  m_outerIndex[m_outerSize] = m_outerIndex[m_outerSize-1] + m_innerNonZeros[m_outerSize-1] + reserveSizes[m_outerSize-1];
327 
328  m_data.resize(m_outerIndex[m_outerSize]);
329  }
330  else
331  {
332  StorageIndex* newOuterIndex = static_cast<StorageIndex*>(std::malloc((m_outerSize+1)*sizeof(StorageIndex)));
333  if (!newOuterIndex) internal::throw_std_bad_alloc();
334 
335  StorageIndex count = 0;
336  for(Index j=0; j<m_outerSize; ++j)
337  {
338  newOuterIndex[j] = count;
339  StorageIndex alreadyReserved = (m_outerIndex[j+1]-m_outerIndex[j]) - m_innerNonZeros[j];
340  StorageIndex toReserve = std::max<StorageIndex>(reserveSizes[j], alreadyReserved);
341  count += toReserve + m_innerNonZeros[j];
342  }
343  newOuterIndex[m_outerSize] = count;
344 
345  m_data.resize(count);
346  for(Index j=m_outerSize-1; j>=0; --j)
347  {
348  Index offset = newOuterIndex[j] - m_outerIndex[j];
349  if(offset>0)
350  {
351  StorageIndex innerNNZ = m_innerNonZeros[j];
352  for(Index i=innerNNZ-1; i>=0; --i)
353  {
354  m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
355  m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
356  }
357  }
358  }
359 
360  std::swap(m_outerIndex, newOuterIndex);
361  std::free(newOuterIndex);
362  }
363 
364  }
365  public:
366 
367  //--- low level purely coherent filling ---
368 
379  inline Scalar& insertBack(Index row, Index col)
380  {
381  return insertBackByOuterInner(IsRowMajor?row:col, IsRowMajor?col:row);
382  }
383 
386  inline Scalar& insertBackByOuterInner(Index outer, Index inner)
387  {
388  eigen_assert(Index(m_outerIndex[outer+1]) == m_data.size() && "Invalid ordered insertion (invalid outer index)");
389  eigen_assert( (m_outerIndex[outer+1]-m_outerIndex[outer]==0 || m_data.index(m_data.size()-1)<inner) && "Invalid ordered insertion (invalid inner index)");
390  Index p = m_outerIndex[outer+1];
391  ++m_outerIndex[outer+1];
392  m_data.append(Scalar(0), inner);
393  return m_data.value(p);
394  }
395 
398  inline Scalar& insertBackByOuterInnerUnordered(Index outer, Index inner)
399  {
400  Index p = m_outerIndex[outer+1];
401  ++m_outerIndex[outer+1];
402  m_data.append(Scalar(0), inner);
403  return m_data.value(p);
404  }
405 
408  inline void startVec(Index outer)
409  {
410  eigen_assert(m_outerIndex[outer]==Index(m_data.size()) && "You must call startVec for each inner vector sequentially");
411  eigen_assert(m_outerIndex[outer+1]==0 && "You must call startVec for each inner vector sequentially");
412  m_outerIndex[outer+1] = m_outerIndex[outer];
413  }
414 
418  inline void finalize()
419  {
420  if(isCompressed())
421  {
422  StorageIndex size = internal::convert_index<StorageIndex>(m_data.size());
423  Index i = m_outerSize;
424  // find the last filled column
425  while (i>=0 && m_outerIndex[i]==0)
426  --i;
427  ++i;
428  while (i<=m_outerSize)
429  {
430  m_outerIndex[i] = size;
431  ++i;
432  }
433  }
434  }
435 
436  //---
437 
438  template<typename InputIterators>
439  void setFromTriplets(const InputIterators& begin, const InputIterators& end);
440 
441  template<typename InputIterators,typename DupFunctor>
442  void setFromTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func);
443 
444  void sumupDuplicates() { collapseDuplicates(internal::scalar_sum_op<Scalar,Scalar>()); }
445 
446  template<typename DupFunctor>
447  void collapseDuplicates(DupFunctor dup_func = DupFunctor());
448 
449  //---
450 
453  Scalar& insertByOuterInner(Index j, Index i)
454  {
455  return insert(IsRowMajor ? j : i, IsRowMajor ? i : j);
456  }
457 
461  {
462  if(isCompressed())
463  return;
464 
465  eigen_internal_assert(m_outerIndex!=0 && m_outerSize>0);
466 
467  Index oldStart = m_outerIndex[1];
468  m_outerIndex[1] = m_innerNonZeros[0];
469  for(Index j=1; j<m_outerSize; ++j)
470  {
471  Index nextOldStart = m_outerIndex[j+1];
472  Index offset = oldStart - m_outerIndex[j];
473  if(offset>0)
474  {
475  for(Index k=0; k<m_innerNonZeros[j]; ++k)
476  {
477  m_data.index(m_outerIndex[j]+k) = m_data.index(oldStart+k);
478  m_data.value(m_outerIndex[j]+k) = m_data.value(oldStart+k);
479  }
480  }
481  m_outerIndex[j+1] = m_outerIndex[j] + m_innerNonZeros[j];
482  oldStart = nextOldStart;
483  }
484  std::free(m_innerNonZeros);
485  m_innerNonZeros = 0;
486  m_data.resize(m_outerIndex[m_outerSize]);
487  m_data.squeeze();
488  }
489 
491  void uncompress()
492  {
493  if(m_innerNonZeros != 0)
494  return;
495  m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
496  for (Index i = 0; i < m_outerSize; i++)
497  {
498  m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
499  }
500  }
501 
503  void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision())
504  {
505  prune(default_prunning_func(reference,epsilon));
506  }
507 
515  template<typename KeepFunc>
516  void prune(const KeepFunc& keep = KeepFunc())
517  {
518  // TODO optimize the uncompressed mode to avoid moving and allocating the data twice
519  makeCompressed();
520 
521  StorageIndex k = 0;
522  for(Index j=0; j<m_outerSize; ++j)
523  {
524  Index previousStart = m_outerIndex[j];
525  m_outerIndex[j] = k;
526  Index end = m_outerIndex[j+1];
527  for(Index i=previousStart; i<end; ++i)
528  {
529  if(keep(IsRowMajor?j:m_data.index(i), IsRowMajor?m_data.index(i):j, m_data.value(i)))
530  {
531  m_data.value(k) = m_data.value(i);
532  m_data.index(k) = m_data.index(i);
533  ++k;
534  }
535  }
536  }
537  m_outerIndex[m_outerSize] = k;
538  m_data.resize(k,0);
539  }
540 
549  void conservativeResize(Index rows, Index cols)
550  {
551  // No change
552  if (this->rows() == rows && this->cols() == cols) return;
553 
554  // If one dimension is null, then there is nothing to be preserved
555  if(rows==0 || cols==0) return resize(rows,cols);
556 
557  Index innerChange = IsRowMajor ? cols - this->cols() : rows - this->rows();
558  Index outerChange = IsRowMajor ? rows - this->rows() : cols - this->cols();
559  StorageIndex newInnerSize = convert_index(IsRowMajor ? cols : rows);
560 
561  // Deals with inner non zeros
562  if (m_innerNonZeros)
563  {
564  // Resize m_innerNonZeros
565  StorageIndex *newInnerNonZeros = static_cast<StorageIndex*>(std::realloc(m_innerNonZeros, (m_outerSize + outerChange) * sizeof(StorageIndex)));
566  if (!newInnerNonZeros) internal::throw_std_bad_alloc();
567  m_innerNonZeros = newInnerNonZeros;
568 
569  for(Index i=m_outerSize; i<m_outerSize+outerChange; i++)
570  m_innerNonZeros[i] = 0;
571  }
572  else if (innerChange < 0)
573  {
574  // Inner size decreased: allocate a new m_innerNonZeros
575  m_innerNonZeros = static_cast<StorageIndex*>(std::malloc((m_outerSize+outerChange+1) * sizeof(StorageIndex)));
576  if (!m_innerNonZeros) internal::throw_std_bad_alloc();
577  for(Index i = 0; i < m_outerSize; i++)
578  m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
579  }
580 
581  // Change the m_innerNonZeros in case of a decrease of inner size
582  if (m_innerNonZeros && innerChange < 0)
583  {
584  for(Index i = 0; i < m_outerSize + (std::min)(outerChange, Index(0)); i++)
585  {
586  StorageIndex &n = m_innerNonZeros[i];
587  StorageIndex start = m_outerIndex[i];
588  while (n > 0 && m_data.index(start+n-1) >= newInnerSize) --n;
589  }
590  }
591 
592  m_innerSize = newInnerSize;
593 
594  // Re-allocate outer index structure if necessary
595  if (outerChange == 0)
596  return;
597 
598  StorageIndex *newOuterIndex = static_cast<StorageIndex*>(std::realloc(m_outerIndex, (m_outerSize + outerChange + 1) * sizeof(StorageIndex)));
599  if (!newOuterIndex) internal::throw_std_bad_alloc();
600  m_outerIndex = newOuterIndex;
601  if (outerChange > 0)
602  {
603  StorageIndex last = m_outerSize == 0 ? 0 : m_outerIndex[m_outerSize];
604  for(Index i=m_outerSize; i<m_outerSize+outerChange+1; i++)
605  m_outerIndex[i] = last;
606  }
607  m_outerSize += outerChange;
608  }
609 
617  void resize(Index rows, Index cols)
618  {
619  const Index outerSize = IsRowMajor ? rows : cols;
620  m_innerSize = IsRowMajor ? cols : rows;
621  m_data.clear();
622  if (m_outerSize != outerSize || m_outerSize==0)
623  {
624  std::free(m_outerIndex);
625  m_outerIndex = static_cast<StorageIndex*>(std::malloc((outerSize + 1) * sizeof(StorageIndex)));
626  if (!m_outerIndex) internal::throw_std_bad_alloc();
627 
628  m_outerSize = outerSize;
629  }
630  if(m_innerNonZeros)
631  {
632  std::free(m_innerNonZeros);
633  m_innerNonZeros = 0;
634  }
635  memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(StorageIndex));
636  }
637 
640  void resizeNonZeros(Index size)
641  {
642  m_data.resize(size);
643  }
644 
646  const ConstDiagonalReturnType diagonal() const { return ConstDiagonalReturnType(*this); }
647 
652  DiagonalReturnType diagonal() { return DiagonalReturnType(*this); }
653 
655  inline SparseMatrix()
656  : m_outerSize(-1), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
657  {
658  check_template_parameters();
659  resize(0, 0);
660  }
661 
663  inline SparseMatrix(Index rows, Index cols)
664  : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
665  {
666  check_template_parameters();
667  resize(rows, cols);
668  }
669 
671  template<typename OtherDerived>
673  : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
674  {
676  YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
677  check_template_parameters();
678  const bool needToTranspose = (Flags & RowMajorBit) != (internal::evaluator<OtherDerived>::Flags & RowMajorBit);
679  if (needToTranspose)
680  *this = other.derived();
681  else
682  {
683  #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
684  EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
685  #endif
686  internal::call_assignment_no_alias(*this, other.derived());
687  }
688  }
689 
691  template<typename OtherDerived, unsigned int UpLo>
693  : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
694  {
695  check_template_parameters();
696  Base::operator=(other);
697  }
698 
700  inline SparseMatrix(const SparseMatrix& other)
701  : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
702  {
703  check_template_parameters();
704  *this = other.derived();
705  }
706 
708  template<typename OtherDerived>
710  : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
711  {
712  check_template_parameters();
713  initAssignment(other);
714  other.evalTo(*this);
715  }
716 
718  template<typename OtherDerived>
720  : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
721  {
722  check_template_parameters();
723  *this = other.derived();
724  }
725 
728  inline void swap(SparseMatrix& other)
729  {
730  //EIGEN_DBG_SPARSE(std::cout << "SparseMatrix:: swap\n");
731  std::swap(m_outerIndex, other.m_outerIndex);
732  std::swap(m_innerSize, other.m_innerSize);
733  std::swap(m_outerSize, other.m_outerSize);
734  std::swap(m_innerNonZeros, other.m_innerNonZeros);
735  m_data.swap(other.m_data);
736  }
737 
740  inline void setIdentity()
741  {
742  eigen_assert(rows() == cols() && "ONLY FOR SQUARED MATRICES");
743  this->m_data.resize(rows());
744  Eigen::Map<IndexVector>(this->m_data.indexPtr(), rows()).setLinSpaced(0, StorageIndex(rows()-1));
745  Eigen::Map<ScalarVector>(this->m_data.valuePtr(), rows()).setOnes();
746  Eigen::Map<IndexVector>(this->m_outerIndex, rows()+1).setLinSpaced(0, StorageIndex(rows()));
747  std::free(m_innerNonZeros);
748  m_innerNonZeros = 0;
749  }
750  inline SparseMatrix& operator=(const SparseMatrix& other)
751  {
752  if (other.isRValue())
753  {
754  swap(other.const_cast_derived());
755  }
756  else if(this!=&other)
757  {
758  #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
759  EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
760  #endif
761  initAssignment(other);
762  if(other.isCompressed())
763  {
764  internal::smart_copy(other.m_outerIndex, other.m_outerIndex + m_outerSize + 1, m_outerIndex);
765  m_data = other.m_data;
766  }
767  else
768  {
769  Base::operator=(other);
770  }
771  }
772  return *this;
773  }
774 
775 #ifndef EIGEN_PARSED_BY_DOXYGEN
776  template<typename OtherDerived>
777  inline SparseMatrix& operator=(const EigenBase<OtherDerived>& other)
778  { return Base::operator=(other.derived()); }
779 #endif // EIGEN_PARSED_BY_DOXYGEN
780 
781  template<typename OtherDerived>
782  EIGEN_DONT_INLINE SparseMatrix& operator=(const SparseMatrixBase<OtherDerived>& other);
783 
784  friend std::ostream & operator << (std::ostream & s, const SparseMatrix& m)
785  {
786  EIGEN_DBG_SPARSE(
787  s << "Nonzero entries:\n";
788  if(m.isCompressed())
789  {
790  for (Index i=0; i<m.nonZeros(); ++i)
791  s << "(" << m.m_data.value(i) << "," << m.m_data.index(i) << ") ";
792  }
793  else
794  {
795  for (Index i=0; i<m.outerSize(); ++i)
796  {
797  Index p = m.m_outerIndex[i];
798  Index pe = m.m_outerIndex[i]+m.m_innerNonZeros[i];
799  Index k=p;
800  for (; k<pe; ++k) {
801  s << "(" << m.m_data.value(k) << "," << m.m_data.index(k) << ") ";
802  }
803  for (; k<m.m_outerIndex[i+1]; ++k) {
804  s << "(_,_) ";
805  }
806  }
807  }
808  s << std::endl;
809  s << std::endl;
810  s << "Outer pointers:\n";
811  for (Index i=0; i<m.outerSize(); ++i) {
812  s << m.m_outerIndex[i] << " ";
813  }
814  s << " $" << std::endl;
815  if(!m.isCompressed())
816  {
817  s << "Inner non zeros:\n";
818  for (Index i=0; i<m.outerSize(); ++i) {
819  s << m.m_innerNonZeros[i] << " ";
820  }
821  s << " $" << std::endl;
822  }
823  s << std::endl;
824  );
825  s << static_cast<const SparseMatrixBase<SparseMatrix>&>(m);
826  return s;
827  }
828 
830  inline ~SparseMatrix()
831  {
832  std::free(m_outerIndex);
833  std::free(m_innerNonZeros);
834  }
835 
837  Scalar sum() const;
838 
839 # ifdef EIGEN_SPARSEMATRIX_PLUGIN
840 # include EIGEN_SPARSEMATRIX_PLUGIN
841 # endif
842 
843 protected:
844 
845  template<typename Other>
846  void initAssignment(const Other& other)
847  {
848  resize(other.rows(), other.cols());
849  if(m_innerNonZeros)
850  {
851  std::free(m_innerNonZeros);
852  m_innerNonZeros = 0;
853  }
854  }
855 
858  EIGEN_DONT_INLINE Scalar& insertCompressed(Index row, Index col);
859 
863  {
864  StorageIndex m_index;
865  StorageIndex m_value;
866  public:
867  typedef StorageIndex value_type;
869  : m_index(convert_index(i)), m_value(convert_index(v))
870  {}
871 
872  StorageIndex operator[](Index i) const { return i==m_index ? m_value : 0; }
873  };
874 
877  EIGEN_DONT_INLINE Scalar& insertUncompressed(Index row, Index col);
878 
879 public:
882  EIGEN_STRONG_INLINE Scalar& insertBackUncompressed(Index row, Index col)
883  {
884  const Index outer = IsRowMajor ? row : col;
885  const Index inner = IsRowMajor ? col : row;
886 
887  eigen_assert(!isCompressed());
888  eigen_assert(m_innerNonZeros[outer]<=(m_outerIndex[outer+1] - m_outerIndex[outer]));
889 
890  Index p = m_outerIndex[outer] + m_innerNonZeros[outer]++;
891  m_data.index(p) = convert_index(inner);
892  return (m_data.value(p) = 0);
893  }
894 
895 private:
896  static void check_template_parameters()
897  {
898  EIGEN_STATIC_ASSERT(NumTraits<StorageIndex>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE);
899  EIGEN_STATIC_ASSERT((Options&(ColMajor|RowMajor))==Options,INVALID_MATRIX_TEMPLATE_PARAMETERS);
900  }
901 
902  struct default_prunning_func {
903  default_prunning_func(const Scalar& ref, const RealScalar& eps) : reference(ref), epsilon(eps) {}
904  inline bool operator() (const Index&, const Index&, const Scalar& value) const
905  {
906  return !internal::isMuchSmallerThan(value, reference, epsilon);
907  }
908  Scalar reference;
909  RealScalar epsilon;
910  };
911 };
912 
913 namespace internal {
914 
915 template<typename InputIterator, typename SparseMatrixType, typename DupFunctor>
916 void set_from_triplets(const InputIterator& begin, const InputIterator& end, SparseMatrixType& mat, DupFunctor dup_func)
917 {
918  enum { IsRowMajor = SparseMatrixType::IsRowMajor };
919  typedef typename SparseMatrixType::Scalar Scalar;
920  typedef typename SparseMatrixType::StorageIndex StorageIndex;
922 
923  if(begin!=end)
924  {
925  // pass 1: count the nnz per inner-vector
926  typename SparseMatrixType::IndexVector wi(trMat.outerSize());
927  wi.setZero();
928  for(InputIterator it(begin); it!=end; ++it)
929  {
930  eigen_assert(it->row()>=0 && it->row()<mat.rows() && it->col()>=0 && it->col()<mat.cols());
931  wi(IsRowMajor ? it->col() : it->row())++;
932  }
933 
934  // pass 2: insert all the elements into trMat
935  trMat.reserve(wi);
936  for(InputIterator it(begin); it!=end; ++it)
937  trMat.insertBackUncompressed(it->row(),it->col()) = it->value();
938 
939  // pass 3:
940  trMat.collapseDuplicates(dup_func);
941  }
942 
943  // pass 4: transposed copy -> implicit sorting
944  mat = trMat;
945 }
946 
947 }
948 
949 
987 template<typename Scalar, int _Options, typename _Index>
988 template<typename InputIterators>
989 void SparseMatrix<Scalar,_Options,_Index>::setFromTriplets(const InputIterators& begin, const InputIterators& end)
990 {
991  internal::set_from_triplets<InputIterators, SparseMatrix<Scalar,_Options,_Index> >(begin, end, *this, internal::scalar_sum_op<Scalar,Scalar>());
992 }
993 
1003 template<typename Scalar, int _Options, typename _Index>
1004 template<typename InputIterators,typename DupFunctor>
1005 void SparseMatrix<Scalar,_Options,_Index>::setFromTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func)
1006 {
1007  internal::set_from_triplets<InputIterators, SparseMatrix<Scalar,_Options,_Index>, DupFunctor>(begin, end, *this, dup_func);
1008 }
1009 
1011 template<typename Scalar, int _Options, typename _Index>
1012 template<typename DupFunctor>
1014 {
1015  eigen_assert(!isCompressed());
1016  // TODO, in practice we should be able to use m_innerNonZeros for that task
1017  IndexVector wi(innerSize());
1018  wi.fill(-1);
1019  StorageIndex count = 0;
1020  // for each inner-vector, wi[inner_index] will hold the position of first element into the index/value buffers
1021  for(Index j=0; j<outerSize(); ++j)
1022  {
1023  StorageIndex start = count;
1024  Index oldEnd = m_outerIndex[j]+m_innerNonZeros[j];
1025  for(Index k=m_outerIndex[j]; k<oldEnd; ++k)
1026  {
1027  Index i = m_data.index(k);
1028  if(wi(i)>=start)
1029  {
1030  // we already meet this entry => accumulate it
1031  m_data.value(wi(i)) = dup_func(m_data.value(wi(i)), m_data.value(k));
1032  }
1033  else
1034  {
1035  m_data.value(count) = m_data.value(k);
1036  m_data.index(count) = m_data.index(k);
1037  wi(i) = count;
1038  ++count;
1039  }
1040  }
1041  m_outerIndex[j] = start;
1042  }
1043  m_outerIndex[m_outerSize] = count;
1044 
1045  // turn the matrix into compressed form
1046  std::free(m_innerNonZeros);
1047  m_innerNonZeros = 0;
1048  m_data.resize(m_outerIndex[m_outerSize]);
1049 }
1050 
1051 template<typename Scalar, int _Options, typename _Index>
1052 template<typename OtherDerived>
1054 {
1056  YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
1057 
1058  #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
1059  EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
1060  #endif
1061 
1062  const bool needToTranspose = (Flags & RowMajorBit) != (internal::evaluator<OtherDerived>::Flags & RowMajorBit);
1063  if (needToTranspose)
1064  {
1065  #ifdef EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
1066  EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
1067  #endif
1068  // two passes algorithm:
1069  // 1 - compute the number of coeffs per dest inner vector
1070  // 2 - do the actual copy/eval
1071  // Since each coeff of the rhs has to be evaluated twice, let's evaluate it if needed
1073  typedef typename internal::remove_all<OtherCopy>::type _OtherCopy;
1074  typedef internal::evaluator<_OtherCopy> OtherCopyEval;
1075  OtherCopy otherCopy(other.derived());
1076  OtherCopyEval otherCopyEval(otherCopy);
1077 
1078  SparseMatrix dest(other.rows(),other.cols());
1079  Eigen::Map<IndexVector> (dest.m_outerIndex,dest.outerSize()).setZero();
1080 
1081  // pass 1
1082  // FIXME the above copy could be merged with that pass
1083  for (Index j=0; j<otherCopy.outerSize(); ++j)
1084  for (typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it)
1085  ++dest.m_outerIndex[it.index()];
1086 
1087  // prefix sum
1088  StorageIndex count = 0;
1089  IndexVector positions(dest.outerSize());
1090  for (Index j=0; j<dest.outerSize(); ++j)
1091  {
1092  StorageIndex tmp = dest.m_outerIndex[j];
1093  dest.m_outerIndex[j] = count;
1094  positions[j] = count;
1095  count += tmp;
1096  }
1097  dest.m_outerIndex[dest.outerSize()] = count;
1098  // alloc
1099  dest.m_data.resize(count);
1100  // pass 2
1101  for (StorageIndex j=0; j<otherCopy.outerSize(); ++j)
1102  {
1103  for (typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it)
1104  {
1105  Index pos = positions[it.index()]++;
1106  dest.m_data.index(pos) = j;
1107  dest.m_data.value(pos) = it.value();
1108  }
1109  }
1110  this->swap(dest);
1111  return *this;
1112  }
1113  else
1114  {
1115  if(other.isRValue())
1116  {
1117  initAssignment(other.derived());
1118  }
1119  // there is no special optimization
1120  return Base::operator=(other.derived());
1121  }
1122 }
1123 
1124 template<typename _Scalar, int _Options, typename _Index>
1125 typename SparseMatrix<_Scalar,_Options,_Index>::Scalar& SparseMatrix<_Scalar,_Options,_Index>::insert(Index row, Index col)
1126 {
1127  eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
1128 
1129  const Index outer = IsRowMajor ? row : col;
1130  const Index inner = IsRowMajor ? col : row;
1131 
1132  if(isCompressed())
1133  {
1134  if(nonZeros()==0)
1135  {
1136  // reserve space if not already done
1137  if(m_data.allocatedSize()==0)
1138  m_data.reserve(2*m_innerSize);
1139 
1140  // turn the matrix into non-compressed mode
1141  m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
1142  if(!m_innerNonZeros) internal::throw_std_bad_alloc();
1143 
1144  memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(StorageIndex));
1145 
1146  // pack all inner-vectors to the end of the pre-allocated space
1147  // and allocate the entire free-space to the first inner-vector
1148  StorageIndex end = convert_index(m_data.allocatedSize());
1149  for(Index j=1; j<=m_outerSize; ++j)
1150  m_outerIndex[j] = end;
1151  }
1152  else
1153  {
1154  // turn the matrix into non-compressed mode
1155  m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
1156  if(!m_innerNonZeros) internal::throw_std_bad_alloc();
1157  for(Index j=0; j<m_outerSize; ++j)
1158  m_innerNonZeros[j] = m_outerIndex[j+1]-m_outerIndex[j];
1159  }
1160  }
1161 
1162  // check whether we can do a fast "push back" insertion
1163  Index data_end = m_data.allocatedSize();
1164 
1165  // First case: we are filling a new inner vector which is packed at the end.
1166  // We assume that all remaining inner-vectors are also empty and packed to the end.
1167  if(m_outerIndex[outer]==data_end)
1168  {
1169  eigen_internal_assert(m_innerNonZeros[outer]==0);
1170 
1171  // pack previous empty inner-vectors to end of the used-space
1172  // and allocate the entire free-space to the current inner-vector.
1173  StorageIndex p = convert_index(m_data.size());
1174  Index j = outer;
1175  while(j>=0 && m_innerNonZeros[j]==0)
1176  m_outerIndex[j--] = p;
1177 
1178  // push back the new element
1179  ++m_innerNonZeros[outer];
1180  m_data.append(Scalar(0), inner);
1181 
1182  // check for reallocation
1183  if(data_end != m_data.allocatedSize())
1184  {
1185  // m_data has been reallocated
1186  // -> move remaining inner-vectors back to the end of the free-space
1187  // so that the entire free-space is allocated to the current inner-vector.
1188  eigen_internal_assert(data_end < m_data.allocatedSize());
1189  StorageIndex new_end = convert_index(m_data.allocatedSize());
1190  for(Index k=outer+1; k<=m_outerSize; ++k)
1191  if(m_outerIndex[k]==data_end)
1192  m_outerIndex[k] = new_end;
1193  }
1194  return m_data.value(p);
1195  }
1196 
1197  // Second case: the next inner-vector is packed to the end
1198  // and the current inner-vector end match the used-space.
1199  if(m_outerIndex[outer+1]==data_end && m_outerIndex[outer]+m_innerNonZeros[outer]==m_data.size())
1200  {
1201  eigen_internal_assert(outer+1==m_outerSize || m_innerNonZeros[outer+1]==0);
1202 
1203  // add space for the new element
1204  ++m_innerNonZeros[outer];
1205  m_data.resize(m_data.size()+1);
1206 
1207  // check for reallocation
1208  if(data_end != m_data.allocatedSize())
1209  {
1210  // m_data has been reallocated
1211  // -> move remaining inner-vectors back to the end of the free-space
1212  // so that the entire free-space is allocated to the current inner-vector.
1213  eigen_internal_assert(data_end < m_data.allocatedSize());
1214  StorageIndex new_end = convert_index(m_data.allocatedSize());
1215  for(Index k=outer+1; k<=m_outerSize; ++k)
1216  if(m_outerIndex[k]==data_end)
1217  m_outerIndex[k] = new_end;
1218  }
1219 
1220  // and insert it at the right position (sorted insertion)
1221  Index startId = m_outerIndex[outer];
1222  Index p = m_outerIndex[outer]+m_innerNonZeros[outer]-1;
1223  while ( (p > startId) && (m_data.index(p-1) > inner) )
1224  {
1225  m_data.index(p) = m_data.index(p-1);
1226  m_data.value(p) = m_data.value(p-1);
1227  --p;
1228  }
1229 
1230  m_data.index(p) = convert_index(inner);
1231  return (m_data.value(p) = 0);
1232  }
1233 
1234  if(m_data.size() != m_data.allocatedSize())
1235  {
1236  // make sure the matrix is compatible to random un-compressed insertion:
1237  m_data.resize(m_data.allocatedSize());
1238  this->reserveInnerVectors(Array<StorageIndex,Dynamic,1>::Constant(m_outerSize, 2));
1239  }
1240 
1241  return insertUncompressed(row,col);
1242 }
1243 
1244 template<typename _Scalar, int _Options, typename _Index>
1245 EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_Index>::Scalar& SparseMatrix<_Scalar,_Options,_Index>::insertUncompressed(Index row, Index col)
1246 {
1247  eigen_assert(!isCompressed());
1248 
1249  const Index outer = IsRowMajor ? row : col;
1250  const StorageIndex inner = convert_index(IsRowMajor ? col : row);
1251 
1252  Index room = m_outerIndex[outer+1] - m_outerIndex[outer];
1253  StorageIndex innerNNZ = m_innerNonZeros[outer];
1254  if(innerNNZ>=room)
1255  {
1256  // this inner vector is full, we need to reallocate the whole buffer :(
1257  reserve(SingletonVector(outer,std::max<StorageIndex>(2,innerNNZ)));
1258  }
1259 
1260  Index startId = m_outerIndex[outer];
1261  Index p = startId + m_innerNonZeros[outer];
1262  while ( (p > startId) && (m_data.index(p-1) > inner) )
1263  {
1264  m_data.index(p) = m_data.index(p-1);
1265  m_data.value(p) = m_data.value(p-1);
1266  --p;
1267  }
1268  eigen_assert((p<=startId || m_data.index(p-1)!=inner) && "you cannot insert an element that already exists, you must call coeffRef to this end");
1269 
1270  m_innerNonZeros[outer]++;
1271 
1272  m_data.index(p) = inner;
1273  return (m_data.value(p) = 0);
1274 }
1275 
1276 template<typename _Scalar, int _Options, typename _Index>
1277 EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_Index>::Scalar& SparseMatrix<_Scalar,_Options,_Index>::insertCompressed(Index row, Index col)
1278 {
1279  eigen_assert(isCompressed());
1280 
1281  const Index outer = IsRowMajor ? row : col;
1282  const Index inner = IsRowMajor ? col : row;
1283 
1284  Index previousOuter = outer;
1285  if (m_outerIndex[outer+1]==0)
1286  {
1287  // we start a new inner vector
1288  while (previousOuter>=0 && m_outerIndex[previousOuter]==0)
1289  {
1290  m_outerIndex[previousOuter] = convert_index(m_data.size());
1291  --previousOuter;
1292  }
1293  m_outerIndex[outer+1] = m_outerIndex[outer];
1294  }
1295 
1296  // here we have to handle the tricky case where the outerIndex array
1297  // starts with: [ 0 0 0 0 0 1 ...] and we are inserted in, e.g.,
1298  // the 2nd inner vector...
1299  bool isLastVec = (!(previousOuter==-1 && m_data.size()!=0))
1300  && (size_t(m_outerIndex[outer+1]) == m_data.size());
1301 
1302  size_t startId = m_outerIndex[outer];
1303  // FIXME let's make sure sizeof(long int) == sizeof(size_t)
1304  size_t p = m_outerIndex[outer+1];
1305  ++m_outerIndex[outer+1];
1306 
1307  double reallocRatio = 1;
1308  if (m_data.allocatedSize()<=m_data.size())
1309  {
1310  // if there is no preallocated memory, let's reserve a minimum of 32 elements
1311  if (m_data.size()==0)
1312  {
1313  m_data.reserve(32);
1314  }
1315  else
1316  {
1317  // we need to reallocate the data, to reduce multiple reallocations
1318  // we use a smart resize algorithm based on the current filling ratio
1319  // in addition, we use double to avoid integers overflows
1320  double nnzEstimate = double(m_outerIndex[outer])*double(m_outerSize)/double(outer+1);
1321  reallocRatio = (nnzEstimate-double(m_data.size()))/double(m_data.size());
1322  // furthermore we bound the realloc ratio to:
1323  // 1) reduce multiple minor realloc when the matrix is almost filled
1324  // 2) avoid to allocate too much memory when the matrix is almost empty
1325  reallocRatio = (std::min)((std::max)(reallocRatio,1.5),8.);
1326  }
1327  }
1328  m_data.resize(m_data.size()+1,reallocRatio);
1329 
1330  if (!isLastVec)
1331  {
1332  if (previousOuter==-1)
1333  {
1334  // oops wrong guess.
1335  // let's correct the outer offsets
1336  for (Index k=0; k<=(outer+1); ++k)
1337  m_outerIndex[k] = 0;
1338  Index k=outer+1;
1339  while(m_outerIndex[k]==0)
1340  m_outerIndex[k++] = 1;
1341  while (k<=m_outerSize && m_outerIndex[k]!=0)
1342  m_outerIndex[k++]++;
1343  p = 0;
1344  --k;
1345  k = m_outerIndex[k]-1;
1346  while (k>0)
1347  {
1348  m_data.index(k) = m_data.index(k-1);
1349  m_data.value(k) = m_data.value(k-1);
1350  k--;
1351  }
1352  }
1353  else
1354  {
1355  // we are not inserting into the last inner vec
1356  // update outer indices:
1357  Index j = outer+2;
1358  while (j<=m_outerSize && m_outerIndex[j]!=0)
1359  m_outerIndex[j++]++;
1360  --j;
1361  // shift data of last vecs:
1362  Index k = m_outerIndex[j]-1;
1363  while (k>=Index(p))
1364  {
1365  m_data.index(k) = m_data.index(k-1);
1366  m_data.value(k) = m_data.value(k-1);
1367  k--;
1368  }
1369  }
1370  }
1371 
1372  while ( (p > startId) && (m_data.index(p-1) > inner) )
1373  {
1374  m_data.index(p) = m_data.index(p-1);
1375  m_data.value(p) = m_data.value(p-1);
1376  --p;
1377  }
1378 
1379  m_data.index(p) = inner;
1380  return (m_data.value(p) = 0);
1381 }
1382 
1383 namespace internal {
1384 
1385 template<typename _Scalar, int _Options, typename _Index>
1386 struct evaluator<SparseMatrix<_Scalar,_Options,_Index> >
1387  : evaluator<SparseCompressedBase<SparseMatrix<_Scalar,_Options,_Index> > >
1388 {
1391  evaluator() : Base() {}
1392  explicit evaluator(const SparseMatrixType &mat) : Base(mat) {}
1393 };
1394 
1395 }
1396 
1397 } // end namespace Eigen
1398 
1399 #endif // EIGEN_SPARSEMATRIX_H
StorageIndex * outerIndexPtr()
Definition: SparseMatrix.h:166
const ConstDiagonalReturnType diagonal() const
Definition: SparseMatrix.h:646
bool isCompressed() const
Definition: SparseCompressedBase.h:107
Scalar & insert(Index row, Index col)
Definition: SparseMatrix.h:1125
Index cols() const
Definition: SparseMatrixBase.h:169
Storage order is column major (see TopicStorageOrders).
Definition: Constants.h:320
void conservativeResize(Index rows, Index cols)
Resizes the matrix to a rows x cols matrix leaving old values untouched.
Definition: SparseMatrix.h:549
const unsigned int CompressedAccessBit
Means that the underlying coefficients can be accessed through pointers to the sparse (un)compressed ...
Definition: Constants.h:186
const Scalar * valuePtr() const
Definition: SparseMatrix.h:144
A versatible sparse matrix representation.
Definition: SparseMatrix.h:92
void uncompress()
Turns the matrix into the uncompressed mode.
Definition: SparseMatrix.h:491
void prune(const KeepFunc &keep=KeepFunc())
Turns the matrix into compressed format, and suppresses all nonzeros which do not satisfy the predica...
Definition: SparseMatrix.h:516
DiagonalReturnType diagonal()
Definition: SparseMatrix.h:652
~SparseMatrix()
Destructor.
Definition: SparseMatrix.h:830
A matrix or vector expression mapping an existing array of data.
Definition: Map.h:88
Definition: Meta.h:63
Definition: BinaryFunctors.h:32
Scalar & coeffRef(Index row, Index col)
Definition: SparseMatrix.h:202
const unsigned int LvalueBit
Means the expression has a coeffRef() method, i.e.
Definition: Constants.h:139
Definition: CoreEvaluators.h:90
SparseMatrix(const SparseMatrix &other)
Copy constructor (it performs a deep copy)
Definition: SparseMatrix.h:700
Namespace containing all symbols from the Eigen library.
Definition: bench_norm.cpp:85
Pseudo expression to manipulate a triangular sparse matrix as a selfadjoint matrix.
Definition: SparseSelfAdjointView.h:43
StorageIndex * innerNonZeroPtr()
Definition: SparseMatrix.h:175
Holds information about the various numeric (i.e.
Definition: NumTraits.h:150
Index cols() const
Definition: SparseMatrix.h:134
Index nonZeros() const
Definition: SparseCompressedBase.h:56
Eigen::Index Index
The interface type of indices.
Definition: EigenBase.h:37
const unsigned int RowMajorBit
for a matrix, this means that the storage order is row-major.
Definition: Constants.h:61
Index innerSize() const
Definition: SparseMatrix.h:137
SparseMatrix(const DiagonalBase< OtherDerived > &other)
Copy constructor with in-place evaluation.
Definition: SparseMatrix.h:719
Definition: XprHelper.h:437
Definition: SparseMatrix.h:862
Scalar * valuePtr()
Definition: SparseMatrix.h:148
Common base class for all classes T such that MatrixBase has an operator=(T) and a constructor Matrix...
Definition: EigenBase.h:28
The type used to identify a matrix expression.
Definition: Constants.h:506
Definition: ReturnByValue.h:50
void setFromTriplets(const InputIterators &begin, const InputIterators &end)
Fill the matrix *this with the list of triplets defined by the iterator range begin - end...
Definition: SparseMatrix.h:989
Index rows() const
Definition: SparseMatrixBase.h:167
void setIdentity()
Sets *this to the identity matrix.
Definition: SparseMatrix.h:740
Base class of any sparse matrices or sparse expressions.
Definition: ForwardDeclarations.h:281
void prune(const Scalar &reference, const RealScalar &epsilon=NumTraits< RealScalar >::dummy_precision())
Suppresses all nonzeros which are much smaller than reference under the tolerence epsilon...
Definition: SparseMatrix.h:503
const StorageIndex * innerNonZeroPtr() const
Definition: SparseMatrix.h:171
a sparse vector class
Definition: SparseUtil.h:54
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
Definition: Meta.h:33
void swap(SparseMatrix &other)
Swaps the content of two sparse matrices of the same type.
Definition: SparseMatrix.h:728
Definition: SparseCompressedBase.h:136
Index rows() const
Definition: SparseMatrix.h:132
void resize(Index rows, Index cols)
Resizes the matrix to a rows x cols matrix and initializes it to zero.
Definition: SparseMatrix.h:617
void reserve(Index reserveSize)
Preallocates reserveSize non zeros.
Definition: SparseMatrix.h:258
Definition: XprHelper.h:396
Scalar atInRange(Index start, Index end, Index key, const Scalar &defaultValue=Scalar(0)) const
Like at(), but the search is performed in the range [start,end)
Definition: CompressedStorage.h:159
The type used to identify a general sparse storage.
Definition: Constants.h:494
SparseMatrix(Index rows, Index cols)
Constructs a rows x cols empty matrix.
Definition: SparseMatrix.h:663
void makeCompressed()
Turns the matrix into the compressed format.
Definition: SparseMatrix.h:460
SparseMatrix(const SparseMatrixBase< OtherDerived > &other)
Constructs a sparse matrix from the sparse expression other.
Definition: SparseMatrix.h:672
SparseMatrix(const ReturnByValue< OtherDerived > &other)
Copy constructor with in-place evaluation.
Definition: SparseMatrix.h:709
SparseMatrix()
Default constructor yielding an empty 0 x 0 matrix.
Definition: SparseMatrix.h:655
const StorageIndex * innerIndexPtr() const
Definition: SparseMatrix.h:153
Definition: BandTriangularSolver.h:13
StorageIndex * innerIndexPtr()
Definition: SparseMatrix.h:157
const StorageIndex * outerIndexPtr() const
Definition: SparseMatrix.h:162
Definition: DiagonalMatrix.h:18
General-purpose arrays with easy API for coefficient-wise operations.
Definition: Array.h:45
Storage order is row major (see TopicStorageOrders).
Definition: Constants.h:322
Scalar coeff(Index row, Index col) const
Definition: SparseMatrix.h:184
The type used to identify a dense storage.
Definition: Constants.h:491
void setZero()
Removes all non zeros but keep allocated memory.
Definition: SparseMatrix.h:247
Expression of a diagonal/subdiagonal/superdiagonal in a matrix.
Definition: Diagonal.h:63
const int Dynamic
This value means that a positive quantity (e.g., a size) is not known at compile-time, and that instead the value is stored in some runtime variable.
Definition: Constants.h:21
Index outerSize() const
Definition: SparseMatrix.h:139
SparseMatrix(const SparseSelfAdjointView< OtherDerived, UpLo > &other)
Constructs a sparse matrix from the sparse selfadjoint view other.
Definition: SparseMatrix.h:692
NumTraits< Scalar >::Real RealScalar
This is the "real scalar" type; if the Scalar type is already real numbers (e.g.
Definition: SparseMatrixBase.h:119
Common base class for sparse [compressed]-{row|column}-storage format.
Definition: SparseCompressedBase.h:15
Sparse matrix.
Definition: MappedSparseMatrix.h:32
EIGEN_DEVICE_FUNC Derived & derived()
Definition: EigenBase.h:44
Definition: ForwardDeclarations.h:17
Index searchLowerIndex(Index key) const
Definition: CompressedStorage.h:125
Definition: SparseCompressedBase.h:214