init repo.
This commit is contained in:
378
3party/eigen/Eigen/src/SparseCore/AmbiVector.h
Normal file
378
3party/eigen/Eigen/src/SparseCore/AmbiVector.h
Normal file
@ -0,0 +1,378 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_AMBIVECTOR_H
|
||||
#define EIGEN_AMBIVECTOR_H
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
|
||||
/** \internal
|
||||
* Hybrid sparse/dense vector class designed for intensive read-write operations.
|
||||
*
|
||||
* See BasicSparseLLT and SparseProduct for usage examples.
|
||||
*/
|
||||
template<typename _Scalar, typename _StorageIndex>
|
||||
class AmbiVector
|
||||
{
|
||||
public:
|
||||
typedef _Scalar Scalar;
|
||||
typedef _StorageIndex StorageIndex;
|
||||
typedef typename NumTraits<Scalar>::Real RealScalar;
|
||||
|
||||
explicit AmbiVector(Index size)
|
||||
: m_buffer(0), m_zero(0), m_size(0), m_end(0), m_allocatedSize(0), m_allocatedElements(0), m_mode(-1)
|
||||
{
|
||||
resize(size);
|
||||
}
|
||||
|
||||
void init(double estimatedDensity);
|
||||
void init(int mode);
|
||||
|
||||
Index nonZeros() const;
|
||||
|
||||
/** Specifies a sub-vector to work on */
|
||||
void setBounds(Index start, Index end) { m_start = convert_index(start); m_end = convert_index(end); }
|
||||
|
||||
void setZero();
|
||||
|
||||
void restart();
|
||||
Scalar& coeffRef(Index i);
|
||||
Scalar& coeff(Index i);
|
||||
|
||||
class Iterator;
|
||||
|
||||
~AmbiVector() { delete[] m_buffer; }
|
||||
|
||||
void resize(Index size)
|
||||
{
|
||||
if (m_allocatedSize < size)
|
||||
reallocate(size);
|
||||
m_size = convert_index(size);
|
||||
}
|
||||
|
||||
StorageIndex size() const { return m_size; }
|
||||
|
||||
protected:
|
||||
StorageIndex convert_index(Index idx)
|
||||
{
|
||||
return internal::convert_index<StorageIndex>(idx);
|
||||
}
|
||||
|
||||
void reallocate(Index size)
|
||||
{
|
||||
// if the size of the matrix is not too large, let's allocate a bit more than needed such
|
||||
// that we can handle dense vector even in sparse mode.
|
||||
delete[] m_buffer;
|
||||
if (size<1000)
|
||||
{
|
||||
Index allocSize = (size * sizeof(ListEl) + sizeof(Scalar) - 1)/sizeof(Scalar);
|
||||
m_allocatedElements = convert_index((allocSize*sizeof(Scalar))/sizeof(ListEl));
|
||||
m_buffer = new Scalar[allocSize];
|
||||
}
|
||||
else
|
||||
{
|
||||
m_allocatedElements = convert_index((size*sizeof(Scalar))/sizeof(ListEl));
|
||||
m_buffer = new Scalar[size];
|
||||
}
|
||||
m_size = convert_index(size);
|
||||
m_start = 0;
|
||||
m_end = m_size;
|
||||
}
|
||||
|
||||
void reallocateSparse()
|
||||
{
|
||||
Index copyElements = m_allocatedElements;
|
||||
m_allocatedElements = (std::min)(StorageIndex(m_allocatedElements*1.5),m_size);
|
||||
Index allocSize = m_allocatedElements * sizeof(ListEl);
|
||||
allocSize = (allocSize + sizeof(Scalar) - 1)/sizeof(Scalar);
|
||||
Scalar* newBuffer = new Scalar[allocSize];
|
||||
std::memcpy(newBuffer, m_buffer, copyElements * sizeof(ListEl));
|
||||
delete[] m_buffer;
|
||||
m_buffer = newBuffer;
|
||||
}
|
||||
|
||||
protected:
|
||||
// element type of the linked list
|
||||
struct ListEl
|
||||
{
|
||||
StorageIndex next;
|
||||
StorageIndex index;
|
||||
Scalar value;
|
||||
};
|
||||
|
||||
// used to store data in both mode
|
||||
Scalar* m_buffer;
|
||||
Scalar m_zero;
|
||||
StorageIndex m_size;
|
||||
StorageIndex m_start;
|
||||
StorageIndex m_end;
|
||||
StorageIndex m_allocatedSize;
|
||||
StorageIndex m_allocatedElements;
|
||||
StorageIndex m_mode;
|
||||
|
||||
// linked list mode
|
||||
StorageIndex m_llStart;
|
||||
StorageIndex m_llCurrent;
|
||||
StorageIndex m_llSize;
|
||||
};
|
||||
|
||||
/** \returns the number of non zeros in the current sub vector */
|
||||
template<typename _Scalar,typename _StorageIndex>
|
||||
Index AmbiVector<_Scalar,_StorageIndex>::nonZeros() const
|
||||
{
|
||||
if (m_mode==IsSparse)
|
||||
return m_llSize;
|
||||
else
|
||||
return m_end - m_start;
|
||||
}
|
||||
|
||||
template<typename _Scalar,typename _StorageIndex>
|
||||
void AmbiVector<_Scalar,_StorageIndex>::init(double estimatedDensity)
|
||||
{
|
||||
if (estimatedDensity>0.1)
|
||||
init(IsDense);
|
||||
else
|
||||
init(IsSparse);
|
||||
}
|
||||
|
||||
template<typename _Scalar,typename _StorageIndex>
|
||||
void AmbiVector<_Scalar,_StorageIndex>::init(int mode)
|
||||
{
|
||||
m_mode = mode;
|
||||
// This is only necessary in sparse mode, but we set these unconditionally to avoid some maybe-uninitialized warnings
|
||||
// if (m_mode==IsSparse)
|
||||
{
|
||||
m_llSize = 0;
|
||||
m_llStart = -1;
|
||||
}
|
||||
}
|
||||
|
||||
/** Must be called whenever we might perform a write access
|
||||
* with an index smaller than the previous one.
|
||||
*
|
||||
* Don't worry, this function is extremely cheap.
|
||||
*/
|
||||
template<typename _Scalar,typename _StorageIndex>
|
||||
void AmbiVector<_Scalar,_StorageIndex>::restart()
|
||||
{
|
||||
m_llCurrent = m_llStart;
|
||||
}
|
||||
|
||||
/** Set all coefficients of current subvector to zero */
|
||||
template<typename _Scalar,typename _StorageIndex>
|
||||
void AmbiVector<_Scalar,_StorageIndex>::setZero()
|
||||
{
|
||||
if (m_mode==IsDense)
|
||||
{
|
||||
for (Index i=m_start; i<m_end; ++i)
|
||||
m_buffer[i] = Scalar(0);
|
||||
}
|
||||
else
|
||||
{
|
||||
eigen_assert(m_mode==IsSparse);
|
||||
m_llSize = 0;
|
||||
m_llStart = -1;
|
||||
}
|
||||
}
|
||||
|
||||
template<typename _Scalar,typename _StorageIndex>
|
||||
_Scalar& AmbiVector<_Scalar,_StorageIndex>::coeffRef(Index i)
|
||||
{
|
||||
if (m_mode==IsDense)
|
||||
return m_buffer[i];
|
||||
else
|
||||
{
|
||||
ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_buffer);
|
||||
// TODO factorize the following code to reduce code generation
|
||||
eigen_assert(m_mode==IsSparse);
|
||||
if (m_llSize==0)
|
||||
{
|
||||
// this is the first element
|
||||
m_llStart = 0;
|
||||
m_llCurrent = 0;
|
||||
++m_llSize;
|
||||
llElements[0].value = Scalar(0);
|
||||
llElements[0].index = convert_index(i);
|
||||
llElements[0].next = -1;
|
||||
return llElements[0].value;
|
||||
}
|
||||
else if (i<llElements[m_llStart].index)
|
||||
{
|
||||
// this is going to be the new first element of the list
|
||||
ListEl& el = llElements[m_llSize];
|
||||
el.value = Scalar(0);
|
||||
el.index = convert_index(i);
|
||||
el.next = m_llStart;
|
||||
m_llStart = m_llSize;
|
||||
++m_llSize;
|
||||
m_llCurrent = m_llStart;
|
||||
return el.value;
|
||||
}
|
||||
else
|
||||
{
|
||||
StorageIndex nextel = llElements[m_llCurrent].next;
|
||||
eigen_assert(i>=llElements[m_llCurrent].index && "you must call restart() before inserting an element with lower or equal index");
|
||||
while (nextel >= 0 && llElements[nextel].index<=i)
|
||||
{
|
||||
m_llCurrent = nextel;
|
||||
nextel = llElements[nextel].next;
|
||||
}
|
||||
|
||||
if (llElements[m_llCurrent].index==i)
|
||||
{
|
||||
// the coefficient already exists and we found it !
|
||||
return llElements[m_llCurrent].value;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (m_llSize>=m_allocatedElements)
|
||||
{
|
||||
reallocateSparse();
|
||||
llElements = reinterpret_cast<ListEl*>(m_buffer);
|
||||
}
|
||||
eigen_internal_assert(m_llSize<m_allocatedElements && "internal error: overflow in sparse mode");
|
||||
// let's insert a new coefficient
|
||||
ListEl& el = llElements[m_llSize];
|
||||
el.value = Scalar(0);
|
||||
el.index = convert_index(i);
|
||||
el.next = llElements[m_llCurrent].next;
|
||||
llElements[m_llCurrent].next = m_llSize;
|
||||
++m_llSize;
|
||||
return el.value;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template<typename _Scalar,typename _StorageIndex>
|
||||
_Scalar& AmbiVector<_Scalar,_StorageIndex>::coeff(Index i)
|
||||
{
|
||||
if (m_mode==IsDense)
|
||||
return m_buffer[i];
|
||||
else
|
||||
{
|
||||
ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_buffer);
|
||||
eigen_assert(m_mode==IsSparse);
|
||||
if ((m_llSize==0) || (i<llElements[m_llStart].index))
|
||||
{
|
||||
return m_zero;
|
||||
}
|
||||
else
|
||||
{
|
||||
Index elid = m_llStart;
|
||||
while (elid >= 0 && llElements[elid].index<i)
|
||||
elid = llElements[elid].next;
|
||||
|
||||
if (llElements[elid].index==i)
|
||||
return llElements[m_llCurrent].value;
|
||||
else
|
||||
return m_zero;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/** Iterator over the nonzero coefficients */
|
||||
template<typename _Scalar,typename _StorageIndex>
|
||||
class AmbiVector<_Scalar,_StorageIndex>::Iterator
|
||||
{
|
||||
public:
|
||||
typedef _Scalar Scalar;
|
||||
typedef typename NumTraits<Scalar>::Real RealScalar;
|
||||
|
||||
/** Default constructor
|
||||
* \param vec the vector on which we iterate
|
||||
* \param epsilon the minimal value used to prune zero coefficients.
|
||||
* In practice, all coefficients having a magnitude smaller than \a epsilon
|
||||
* are skipped.
|
||||
*/
|
||||
explicit Iterator(const AmbiVector& vec, const RealScalar& epsilon = 0)
|
||||
: m_vector(vec)
|
||||
{
|
||||
using std::abs;
|
||||
m_epsilon = epsilon;
|
||||
m_isDense = m_vector.m_mode==IsDense;
|
||||
if (m_isDense)
|
||||
{
|
||||
m_currentEl = 0; // this is to avoid a compilation warning
|
||||
m_cachedValue = 0; // this is to avoid a compilation warning
|
||||
m_cachedIndex = m_vector.m_start-1;
|
||||
++(*this);
|
||||
}
|
||||
else
|
||||
{
|
||||
ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_vector.m_buffer);
|
||||
m_currentEl = m_vector.m_llStart;
|
||||
while (m_currentEl>=0 && abs(llElements[m_currentEl].value)<=m_epsilon)
|
||||
m_currentEl = llElements[m_currentEl].next;
|
||||
if (m_currentEl<0)
|
||||
{
|
||||
m_cachedValue = 0; // this is to avoid a compilation warning
|
||||
m_cachedIndex = -1;
|
||||
}
|
||||
else
|
||||
{
|
||||
m_cachedIndex = llElements[m_currentEl].index;
|
||||
m_cachedValue = llElements[m_currentEl].value;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
StorageIndex index() const { return m_cachedIndex; }
|
||||
Scalar value() const { return m_cachedValue; }
|
||||
|
||||
operator bool() const { return m_cachedIndex>=0; }
|
||||
|
||||
Iterator& operator++()
|
||||
{
|
||||
using std::abs;
|
||||
if (m_isDense)
|
||||
{
|
||||
do {
|
||||
++m_cachedIndex;
|
||||
} while (m_cachedIndex<m_vector.m_end && abs(m_vector.m_buffer[m_cachedIndex])<=m_epsilon);
|
||||
if (m_cachedIndex<m_vector.m_end)
|
||||
m_cachedValue = m_vector.m_buffer[m_cachedIndex];
|
||||
else
|
||||
m_cachedIndex=-1;
|
||||
}
|
||||
else
|
||||
{
|
||||
ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_vector.m_buffer);
|
||||
do {
|
||||
m_currentEl = llElements[m_currentEl].next;
|
||||
} while (m_currentEl>=0 && abs(llElements[m_currentEl].value)<=m_epsilon);
|
||||
if (m_currentEl<0)
|
||||
{
|
||||
m_cachedIndex = -1;
|
||||
}
|
||||
else
|
||||
{
|
||||
m_cachedIndex = llElements[m_currentEl].index;
|
||||
m_cachedValue = llElements[m_currentEl].value;
|
||||
}
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
protected:
|
||||
const AmbiVector& m_vector; // the target vector
|
||||
StorageIndex m_currentEl; // the current element in sparse/linked-list mode
|
||||
RealScalar m_epsilon; // epsilon used to prune zero coefficients
|
||||
StorageIndex m_cachedIndex; // current coordinate
|
||||
Scalar m_cachedValue; // current value
|
||||
bool m_isDense; // mode of the vector
|
||||
};
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_AMBIVECTOR_H
|
274
3party/eigen/Eigen/src/SparseCore/CompressedStorage.h
Normal file
274
3party/eigen/Eigen/src/SparseCore/CompressedStorage.h
Normal file
@ -0,0 +1,274 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_COMPRESSED_STORAGE_H
|
||||
#define EIGEN_COMPRESSED_STORAGE_H
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
|
||||
/** \internal
|
||||
* Stores a sparse set of values as a list of values and a list of indices.
|
||||
*
|
||||
*/
|
||||
template<typename _Scalar,typename _StorageIndex>
|
||||
class CompressedStorage
|
||||
{
|
||||
public:
|
||||
|
||||
typedef _Scalar Scalar;
|
||||
typedef _StorageIndex StorageIndex;
|
||||
|
||||
protected:
|
||||
|
||||
typedef typename NumTraits<Scalar>::Real RealScalar;
|
||||
|
||||
public:
|
||||
|
||||
CompressedStorage()
|
||||
: m_values(0), m_indices(0), m_size(0), m_allocatedSize(0)
|
||||
{}
|
||||
|
||||
explicit CompressedStorage(Index size)
|
||||
: m_values(0), m_indices(0), m_size(0), m_allocatedSize(0)
|
||||
{
|
||||
resize(size);
|
||||
}
|
||||
|
||||
CompressedStorage(const CompressedStorage& other)
|
||||
: m_values(0), m_indices(0), m_size(0), m_allocatedSize(0)
|
||||
{
|
||||
*this = other;
|
||||
}
|
||||
|
||||
CompressedStorage& operator=(const CompressedStorage& other)
|
||||
{
|
||||
resize(other.size());
|
||||
if(other.size()>0)
|
||||
{
|
||||
internal::smart_copy(other.m_values, other.m_values + m_size, m_values);
|
||||
internal::smart_copy(other.m_indices, other.m_indices + m_size, m_indices);
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
void swap(CompressedStorage& other)
|
||||
{
|
||||
std::swap(m_values, other.m_values);
|
||||
std::swap(m_indices, other.m_indices);
|
||||
std::swap(m_size, other.m_size);
|
||||
std::swap(m_allocatedSize, other.m_allocatedSize);
|
||||
}
|
||||
|
||||
~CompressedStorage()
|
||||
{
|
||||
delete[] m_values;
|
||||
delete[] m_indices;
|
||||
}
|
||||
|
||||
void reserve(Index size)
|
||||
{
|
||||
Index newAllocatedSize = m_size + size;
|
||||
if (newAllocatedSize > m_allocatedSize)
|
||||
reallocate(newAllocatedSize);
|
||||
}
|
||||
|
||||
void squeeze()
|
||||
{
|
||||
if (m_allocatedSize>m_size)
|
||||
reallocate(m_size);
|
||||
}
|
||||
|
||||
void resize(Index size, double reserveSizeFactor = 0)
|
||||
{
|
||||
if (m_allocatedSize<size)
|
||||
{
|
||||
Index realloc_size = (std::min<Index>)(NumTraits<StorageIndex>::highest(), size + Index(reserveSizeFactor*double(size)));
|
||||
if(realloc_size<size)
|
||||
internal::throw_std_bad_alloc();
|
||||
reallocate(realloc_size);
|
||||
}
|
||||
m_size = size;
|
||||
}
|
||||
|
||||
void append(const Scalar& v, Index i)
|
||||
{
|
||||
Index id = m_size;
|
||||
resize(m_size+1, 1);
|
||||
m_values[id] = v;
|
||||
m_indices[id] = internal::convert_index<StorageIndex>(i);
|
||||
}
|
||||
|
||||
inline Index size() const { return m_size; }
|
||||
inline Index allocatedSize() const { return m_allocatedSize; }
|
||||
inline void clear() { m_size = 0; }
|
||||
|
||||
const Scalar* valuePtr() const { return m_values; }
|
||||
Scalar* valuePtr() { return m_values; }
|
||||
const StorageIndex* indexPtr() const { return m_indices; }
|
||||
StorageIndex* indexPtr() { return m_indices; }
|
||||
|
||||
inline Scalar& value(Index i) { eigen_internal_assert(m_values!=0); return m_values[i]; }
|
||||
inline const Scalar& value(Index i) const { eigen_internal_assert(m_values!=0); return m_values[i]; }
|
||||
|
||||
inline StorageIndex& index(Index i) { eigen_internal_assert(m_indices!=0); return m_indices[i]; }
|
||||
inline const StorageIndex& index(Index i) const { eigen_internal_assert(m_indices!=0); return m_indices[i]; }
|
||||
|
||||
/** \returns the largest \c k such that for all \c j in [0,k) index[\c j]\<\a key */
|
||||
inline Index searchLowerIndex(Index key) const
|
||||
{
|
||||
return searchLowerIndex(0, m_size, key);
|
||||
}
|
||||
|
||||
/** \returns the largest \c k in [start,end) such that for all \c j in [start,k) index[\c j]\<\a key */
|
||||
inline Index searchLowerIndex(Index start, Index end, Index key) const
|
||||
{
|
||||
while(end>start)
|
||||
{
|
||||
Index mid = (end+start)>>1;
|
||||
if (m_indices[mid]<key)
|
||||
start = mid+1;
|
||||
else
|
||||
end = mid;
|
||||
}
|
||||
return start;
|
||||
}
|
||||
|
||||
/** \returns the stored value at index \a key
|
||||
* If the value does not exist, then the value \a defaultValue is returned without any insertion. */
|
||||
inline Scalar at(Index key, const Scalar& defaultValue = Scalar(0)) const
|
||||
{
|
||||
if (m_size==0)
|
||||
return defaultValue;
|
||||
else if (key==m_indices[m_size-1])
|
||||
return m_values[m_size-1];
|
||||
// ^^ optimization: let's first check if it is the last coefficient
|
||||
// (very common in high level algorithms)
|
||||
const Index id = searchLowerIndex(0,m_size-1,key);
|
||||
return ((id<m_size) && (m_indices[id]==key)) ? m_values[id] : defaultValue;
|
||||
}
|
||||
|
||||
/** Like at(), but the search is performed in the range [start,end) */
|
||||
inline Scalar atInRange(Index start, Index end, Index key, const Scalar &defaultValue = Scalar(0)) const
|
||||
{
|
||||
if (start>=end)
|
||||
return defaultValue;
|
||||
else if (end>start && key==m_indices[end-1])
|
||||
return m_values[end-1];
|
||||
// ^^ optimization: let's first check if it is the last coefficient
|
||||
// (very common in high level algorithms)
|
||||
const Index id = searchLowerIndex(start,end-1,key);
|
||||
return ((id<end) && (m_indices[id]==key)) ? m_values[id] : defaultValue;
|
||||
}
|
||||
|
||||
/** \returns a reference to the value at index \a key
|
||||
* If the value does not exist, then the value \a defaultValue is inserted
|
||||
* such that the keys are sorted. */
|
||||
inline Scalar& atWithInsertion(Index key, const Scalar& defaultValue = Scalar(0))
|
||||
{
|
||||
Index id = searchLowerIndex(0,m_size,key);
|
||||
if (id>=m_size || m_indices[id]!=key)
|
||||
{
|
||||
if (m_allocatedSize<m_size+1)
|
||||
{
|
||||
m_allocatedSize = 2*(m_size+1);
|
||||
internal::scoped_array<Scalar> newValues(m_allocatedSize);
|
||||
internal::scoped_array<StorageIndex> newIndices(m_allocatedSize);
|
||||
|
||||
// copy first chunk
|
||||
internal::smart_copy(m_values, m_values +id, newValues.ptr());
|
||||
internal::smart_copy(m_indices, m_indices+id, newIndices.ptr());
|
||||
|
||||
// copy the rest
|
||||
if(m_size>id)
|
||||
{
|
||||
internal::smart_copy(m_values +id, m_values +m_size, newValues.ptr() +id+1);
|
||||
internal::smart_copy(m_indices+id, m_indices+m_size, newIndices.ptr()+id+1);
|
||||
}
|
||||
std::swap(m_values,newValues.ptr());
|
||||
std::swap(m_indices,newIndices.ptr());
|
||||
}
|
||||
else if(m_size>id)
|
||||
{
|
||||
internal::smart_memmove(m_values +id, m_values +m_size, m_values +id+1);
|
||||
internal::smart_memmove(m_indices+id, m_indices+m_size, m_indices+id+1);
|
||||
}
|
||||
m_size++;
|
||||
m_indices[id] = internal::convert_index<StorageIndex>(key);
|
||||
m_values[id] = defaultValue;
|
||||
}
|
||||
return m_values[id];
|
||||
}
|
||||
|
||||
void moveChunk(Index from, Index to, Index chunkSize)
|
||||
{
|
||||
eigen_internal_assert(to+chunkSize <= m_size);
|
||||
if(to>from && from+chunkSize>to)
|
||||
{
|
||||
// move backward
|
||||
internal::smart_memmove(m_values+from, m_values+from+chunkSize, m_values+to);
|
||||
internal::smart_memmove(m_indices+from, m_indices+from+chunkSize, m_indices+to);
|
||||
}
|
||||
else
|
||||
{
|
||||
internal::smart_copy(m_values+from, m_values+from+chunkSize, m_values+to);
|
||||
internal::smart_copy(m_indices+from, m_indices+from+chunkSize, m_indices+to);
|
||||
}
|
||||
}
|
||||
|
||||
void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision())
|
||||
{
|
||||
Index k = 0;
|
||||
Index n = size();
|
||||
for (Index i=0; i<n; ++i)
|
||||
{
|
||||
if (!internal::isMuchSmallerThan(value(i), reference, epsilon))
|
||||
{
|
||||
value(k) = value(i);
|
||||
index(k) = index(i);
|
||||
++k;
|
||||
}
|
||||
}
|
||||
resize(k,0);
|
||||
}
|
||||
|
||||
protected:
|
||||
|
||||
inline void reallocate(Index size)
|
||||
{
|
||||
#ifdef EIGEN_SPARSE_COMPRESSED_STORAGE_REALLOCATE_PLUGIN
|
||||
EIGEN_SPARSE_COMPRESSED_STORAGE_REALLOCATE_PLUGIN
|
||||
#endif
|
||||
eigen_internal_assert(size!=m_allocatedSize);
|
||||
internal::scoped_array<Scalar> newValues(size);
|
||||
internal::scoped_array<StorageIndex> newIndices(size);
|
||||
Index copySize = (std::min)(size, m_size);
|
||||
if (copySize>0) {
|
||||
internal::smart_copy(m_values, m_values+copySize, newValues.ptr());
|
||||
internal::smart_copy(m_indices, m_indices+copySize, newIndices.ptr());
|
||||
}
|
||||
std::swap(m_values,newValues.ptr());
|
||||
std::swap(m_indices,newIndices.ptr());
|
||||
m_allocatedSize = size;
|
||||
}
|
||||
|
||||
protected:
|
||||
Scalar* m_values;
|
||||
StorageIndex* m_indices;
|
||||
Index m_size;
|
||||
Index m_allocatedSize;
|
||||
|
||||
};
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_COMPRESSED_STORAGE_H
|
@ -0,0 +1,352 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2008-2015 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_CONSERVATIVESPARSESPARSEPRODUCT_H
|
||||
#define EIGEN_CONSERVATIVESPARSESPARSEPRODUCT_H
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
|
||||
template<typename Lhs, typename Rhs, typename ResultType>
|
||||
static void conservative_sparse_sparse_product_impl(const Lhs& lhs, const Rhs& rhs, ResultType& res, bool sortedInsertion = false)
|
||||
{
|
||||
typedef typename remove_all<Lhs>::type::Scalar LhsScalar;
|
||||
typedef typename remove_all<Rhs>::type::Scalar RhsScalar;
|
||||
typedef typename remove_all<ResultType>::type::Scalar ResScalar;
|
||||
|
||||
// make sure to call innerSize/outerSize since we fake the storage order.
|
||||
Index rows = lhs.innerSize();
|
||||
Index cols = rhs.outerSize();
|
||||
eigen_assert(lhs.outerSize() == rhs.innerSize());
|
||||
|
||||
ei_declare_aligned_stack_constructed_variable(bool, mask, rows, 0);
|
||||
ei_declare_aligned_stack_constructed_variable(ResScalar, values, rows, 0);
|
||||
ei_declare_aligned_stack_constructed_variable(Index, indices, rows, 0);
|
||||
|
||||
std::memset(mask,0,sizeof(bool)*rows);
|
||||
|
||||
evaluator<Lhs> lhsEval(lhs);
|
||||
evaluator<Rhs> rhsEval(rhs);
|
||||
|
||||
// estimate the number of non zero entries
|
||||
// given a rhs column containing Y non zeros, we assume that the respective Y columns
|
||||
// of the lhs differs in average of one non zeros, thus the number of non zeros for
|
||||
// the product of a rhs column with the lhs is X+Y where X is the average number of non zero
|
||||
// per column of the lhs.
|
||||
// Therefore, we have nnz(lhs*rhs) = nnz(lhs) + nnz(rhs)
|
||||
Index estimated_nnz_prod = lhsEval.nonZerosEstimate() + rhsEval.nonZerosEstimate();
|
||||
|
||||
res.setZero();
|
||||
res.reserve(Index(estimated_nnz_prod));
|
||||
// we compute each column of the result, one after the other
|
||||
for (Index j=0; j<cols; ++j)
|
||||
{
|
||||
|
||||
res.startVec(j);
|
||||
Index nnz = 0;
|
||||
for (typename evaluator<Rhs>::InnerIterator rhsIt(rhsEval, j); rhsIt; ++rhsIt)
|
||||
{
|
||||
RhsScalar y = rhsIt.value();
|
||||
Index k = rhsIt.index();
|
||||
for (typename evaluator<Lhs>::InnerIterator lhsIt(lhsEval, k); lhsIt; ++lhsIt)
|
||||
{
|
||||
Index i = lhsIt.index();
|
||||
LhsScalar x = lhsIt.value();
|
||||
if(!mask[i])
|
||||
{
|
||||
mask[i] = true;
|
||||
values[i] = x * y;
|
||||
indices[nnz] = i;
|
||||
++nnz;
|
||||
}
|
||||
else
|
||||
values[i] += x * y;
|
||||
}
|
||||
}
|
||||
if(!sortedInsertion)
|
||||
{
|
||||
// unordered insertion
|
||||
for(Index k=0; k<nnz; ++k)
|
||||
{
|
||||
Index i = indices[k];
|
||||
res.insertBackByOuterInnerUnordered(j,i) = values[i];
|
||||
mask[i] = false;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// alternative ordered insertion code:
|
||||
const Index t200 = rows/11; // 11 == (log2(200)*1.39)
|
||||
const Index t = (rows*100)/139;
|
||||
|
||||
// FIXME reserve nnz non zeros
|
||||
// FIXME implement faster sorting algorithms for very small nnz
|
||||
// if the result is sparse enough => use a quick sort
|
||||
// otherwise => loop through the entire vector
|
||||
// In order to avoid to perform an expensive log2 when the
|
||||
// result is clearly very sparse we use a linear bound up to 200.
|
||||
if((nnz<200 && nnz<t200) || nnz * numext::log2(int(nnz)) < t)
|
||||
{
|
||||
if(nnz>1) std::sort(indices,indices+nnz);
|
||||
for(Index k=0; k<nnz; ++k)
|
||||
{
|
||||
Index i = indices[k];
|
||||
res.insertBackByOuterInner(j,i) = values[i];
|
||||
mask[i] = false;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// dense path
|
||||
for(Index i=0; i<rows; ++i)
|
||||
{
|
||||
if(mask[i])
|
||||
{
|
||||
mask[i] = false;
|
||||
res.insertBackByOuterInner(j,i) = values[i];
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
res.finalize();
|
||||
}
|
||||
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
namespace internal {
|
||||
|
||||
template<typename Lhs, typename Rhs, typename ResultType,
|
||||
int LhsStorageOrder = (traits<Lhs>::Flags&RowMajorBit) ? RowMajor : ColMajor,
|
||||
int RhsStorageOrder = (traits<Rhs>::Flags&RowMajorBit) ? RowMajor : ColMajor,
|
||||
int ResStorageOrder = (traits<ResultType>::Flags&RowMajorBit) ? RowMajor : ColMajor>
|
||||
struct conservative_sparse_sparse_product_selector;
|
||||
|
||||
template<typename Lhs, typename Rhs, typename ResultType>
|
||||
struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,ColMajor,ColMajor>
|
||||
{
|
||||
typedef typename remove_all<Lhs>::type LhsCleaned;
|
||||
typedef typename LhsCleaned::Scalar Scalar;
|
||||
|
||||
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
|
||||
{
|
||||
typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::StorageIndex> RowMajorMatrix;
|
||||
typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorMatrixAux;
|
||||
typedef typename sparse_eval<ColMajorMatrixAux,ResultType::RowsAtCompileTime,ResultType::ColsAtCompileTime,ColMajorMatrixAux::Flags>::type ColMajorMatrix;
|
||||
|
||||
// If the result is tall and thin (in the extreme case a column vector)
|
||||
// then it is faster to sort the coefficients inplace instead of transposing twice.
|
||||
// FIXME, the following heuristic is probably not very good.
|
||||
if(lhs.rows()>rhs.cols())
|
||||
{
|
||||
ColMajorMatrix resCol(lhs.rows(),rhs.cols());
|
||||
// perform sorted insertion
|
||||
internal::conservative_sparse_sparse_product_impl<Lhs,Rhs,ColMajorMatrix>(lhs, rhs, resCol, true);
|
||||
res = resCol.markAsRValue();
|
||||
}
|
||||
else
|
||||
{
|
||||
ColMajorMatrixAux resCol(lhs.rows(),rhs.cols());
|
||||
// resort to transpose to sort the entries
|
||||
internal::conservative_sparse_sparse_product_impl<Lhs,Rhs,ColMajorMatrixAux>(lhs, rhs, resCol, false);
|
||||
RowMajorMatrix resRow(resCol);
|
||||
res = resRow.markAsRValue();
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
template<typename Lhs, typename Rhs, typename ResultType>
|
||||
struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,ColMajor,ColMajor>
|
||||
{
|
||||
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
|
||||
{
|
||||
typedef SparseMatrix<typename Rhs::Scalar,RowMajor,typename ResultType::StorageIndex> RowMajorRhs;
|
||||
typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::StorageIndex> RowMajorRes;
|
||||
RowMajorRhs rhsRow = rhs;
|
||||
RowMajorRes resRow(lhs.rows(), rhs.cols());
|
||||
internal::conservative_sparse_sparse_product_impl<RowMajorRhs,Lhs,RowMajorRes>(rhsRow, lhs, resRow);
|
||||
res = resRow;
|
||||
}
|
||||
};
|
||||
|
||||
template<typename Lhs, typename Rhs, typename ResultType>
|
||||
struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,RowMajor,ColMajor>
|
||||
{
|
||||
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
|
||||
{
|
||||
typedef SparseMatrix<typename Lhs::Scalar,RowMajor,typename ResultType::StorageIndex> RowMajorLhs;
|
||||
typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::StorageIndex> RowMajorRes;
|
||||
RowMajorLhs lhsRow = lhs;
|
||||
RowMajorRes resRow(lhs.rows(), rhs.cols());
|
||||
internal::conservative_sparse_sparse_product_impl<Rhs,RowMajorLhs,RowMajorRes>(rhs, lhsRow, resRow);
|
||||
res = resRow;
|
||||
}
|
||||
};
|
||||
|
||||
template<typename Lhs, typename Rhs, typename ResultType>
|
||||
struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,RowMajor,ColMajor>
|
||||
{
|
||||
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
|
||||
{
|
||||
typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::StorageIndex> RowMajorMatrix;
|
||||
RowMajorMatrix resRow(lhs.rows(), rhs.cols());
|
||||
internal::conservative_sparse_sparse_product_impl<Rhs,Lhs,RowMajorMatrix>(rhs, lhs, resRow);
|
||||
res = resRow;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
template<typename Lhs, typename Rhs, typename ResultType>
|
||||
struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,ColMajor,RowMajor>
|
||||
{
|
||||
typedef typename traits<typename remove_all<Lhs>::type>::Scalar Scalar;
|
||||
|
||||
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
|
||||
{
|
||||
typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorMatrix;
|
||||
ColMajorMatrix resCol(lhs.rows(), rhs.cols());
|
||||
internal::conservative_sparse_sparse_product_impl<Lhs,Rhs,ColMajorMatrix>(lhs, rhs, resCol);
|
||||
res = resCol;
|
||||
}
|
||||
};
|
||||
|
||||
template<typename Lhs, typename Rhs, typename ResultType>
|
||||
struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,ColMajor,RowMajor>
|
||||
{
|
||||
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
|
||||
{
|
||||
typedef SparseMatrix<typename Lhs::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorLhs;
|
||||
typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorRes;
|
||||
ColMajorLhs lhsCol = lhs;
|
||||
ColMajorRes resCol(lhs.rows(), rhs.cols());
|
||||
internal::conservative_sparse_sparse_product_impl<ColMajorLhs,Rhs,ColMajorRes>(lhsCol, rhs, resCol);
|
||||
res = resCol;
|
||||
}
|
||||
};
|
||||
|
||||
template<typename Lhs, typename Rhs, typename ResultType>
|
||||
struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,RowMajor,RowMajor>
|
||||
{
|
||||
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
|
||||
{
|
||||
typedef SparseMatrix<typename Rhs::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorRhs;
|
||||
typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorRes;
|
||||
ColMajorRhs rhsCol = rhs;
|
||||
ColMajorRes resCol(lhs.rows(), rhs.cols());
|
||||
internal::conservative_sparse_sparse_product_impl<Lhs,ColMajorRhs,ColMajorRes>(lhs, rhsCol, resCol);
|
||||
res = resCol;
|
||||
}
|
||||
};
|
||||
|
||||
template<typename Lhs, typename Rhs, typename ResultType>
|
||||
struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,RowMajor,RowMajor>
|
||||
{
|
||||
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
|
||||
{
|
||||
typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::StorageIndex> RowMajorMatrix;
|
||||
typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorMatrix;
|
||||
RowMajorMatrix resRow(lhs.rows(),rhs.cols());
|
||||
internal::conservative_sparse_sparse_product_impl<Rhs,Lhs,RowMajorMatrix>(rhs, lhs, resRow);
|
||||
// sort the non zeros:
|
||||
ColMajorMatrix resCol(resRow);
|
||||
res = resCol;
|
||||
}
|
||||
};
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
|
||||
namespace internal {
|
||||
|
||||
template<typename Lhs, typename Rhs, typename ResultType>
|
||||
static void sparse_sparse_to_dense_product_impl(const Lhs& lhs, const Rhs& rhs, ResultType& res)
|
||||
{
|
||||
typedef typename remove_all<Lhs>::type::Scalar LhsScalar;
|
||||
typedef typename remove_all<Rhs>::type::Scalar RhsScalar;
|
||||
Index cols = rhs.outerSize();
|
||||
eigen_assert(lhs.outerSize() == rhs.innerSize());
|
||||
|
||||
evaluator<Lhs> lhsEval(lhs);
|
||||
evaluator<Rhs> rhsEval(rhs);
|
||||
|
||||
for (Index j=0; j<cols; ++j)
|
||||
{
|
||||
for (typename evaluator<Rhs>::InnerIterator rhsIt(rhsEval, j); rhsIt; ++rhsIt)
|
||||
{
|
||||
RhsScalar y = rhsIt.value();
|
||||
Index k = rhsIt.index();
|
||||
for (typename evaluator<Lhs>::InnerIterator lhsIt(lhsEval, k); lhsIt; ++lhsIt)
|
||||
{
|
||||
Index i = lhsIt.index();
|
||||
LhsScalar x = lhsIt.value();
|
||||
res.coeffRef(i,j) += x * y;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
namespace internal {
|
||||
|
||||
template<typename Lhs, typename Rhs, typename ResultType,
|
||||
int LhsStorageOrder = (traits<Lhs>::Flags&RowMajorBit) ? RowMajor : ColMajor,
|
||||
int RhsStorageOrder = (traits<Rhs>::Flags&RowMajorBit) ? RowMajor : ColMajor>
|
||||
struct sparse_sparse_to_dense_product_selector;
|
||||
|
||||
template<typename Lhs, typename Rhs, typename ResultType>
|
||||
struct sparse_sparse_to_dense_product_selector<Lhs,Rhs,ResultType,ColMajor,ColMajor>
|
||||
{
|
||||
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
|
||||
{
|
||||
internal::sparse_sparse_to_dense_product_impl<Lhs,Rhs,ResultType>(lhs, rhs, res);
|
||||
}
|
||||
};
|
||||
|
||||
template<typename Lhs, typename Rhs, typename ResultType>
|
||||
struct sparse_sparse_to_dense_product_selector<Lhs,Rhs,ResultType,RowMajor,ColMajor>
|
||||
{
|
||||
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
|
||||
{
|
||||
typedef SparseMatrix<typename Lhs::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorLhs;
|
||||
ColMajorLhs lhsCol(lhs);
|
||||
internal::sparse_sparse_to_dense_product_impl<ColMajorLhs,Rhs,ResultType>(lhsCol, rhs, res);
|
||||
}
|
||||
};
|
||||
|
||||
template<typename Lhs, typename Rhs, typename ResultType>
|
||||
struct sparse_sparse_to_dense_product_selector<Lhs,Rhs,ResultType,ColMajor,RowMajor>
|
||||
{
|
||||
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
|
||||
{
|
||||
typedef SparseMatrix<typename Rhs::Scalar,ColMajor,typename ResultType::StorageIndex> ColMajorRhs;
|
||||
ColMajorRhs rhsCol(rhs);
|
||||
internal::sparse_sparse_to_dense_product_impl<Lhs,ColMajorRhs,ResultType>(lhs, rhsCol, res);
|
||||
}
|
||||
};
|
||||
|
||||
template<typename Lhs, typename Rhs, typename ResultType>
|
||||
struct sparse_sparse_to_dense_product_selector<Lhs,Rhs,ResultType,RowMajor,RowMajor>
|
||||
{
|
||||
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
|
||||
{
|
||||
Transpose<ResultType> trRes(res);
|
||||
internal::sparse_sparse_to_dense_product_impl<Rhs,Lhs,Transpose<ResultType> >(rhs, lhs, trRes);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_CONSERVATIVESPARSESPARSEPRODUCT_H
|
67
3party/eigen/Eigen/src/SparseCore/MappedSparseMatrix.h
Normal file
67
3party/eigen/Eigen/src/SparseCore/MappedSparseMatrix.h
Normal file
@ -0,0 +1,67 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_MAPPED_SPARSEMATRIX_H
|
||||
#define EIGEN_MAPPED_SPARSEMATRIX_H
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
/** \deprecated Use Map<SparseMatrix<> >
|
||||
* \class MappedSparseMatrix
|
||||
*
|
||||
* \brief Sparse matrix
|
||||
*
|
||||
* \param _Scalar the scalar type, i.e. the type of the coefficients
|
||||
*
|
||||
* See http://www.netlib.org/linalg/html_templates/node91.html for details on the storage scheme.
|
||||
*
|
||||
*/
|
||||
namespace internal {
|
||||
template<typename _Scalar, int _Flags, typename _StorageIndex>
|
||||
struct traits<MappedSparseMatrix<_Scalar, _Flags, _StorageIndex> > : traits<SparseMatrix<_Scalar, _Flags, _StorageIndex> >
|
||||
{};
|
||||
} // end namespace internal
|
||||
|
||||
template<typename _Scalar, int _Flags, typename _StorageIndex>
|
||||
class MappedSparseMatrix
|
||||
: public Map<SparseMatrix<_Scalar, _Flags, _StorageIndex> >
|
||||
{
|
||||
typedef Map<SparseMatrix<_Scalar, _Flags, _StorageIndex> > Base;
|
||||
|
||||
public:
|
||||
|
||||
typedef typename Base::StorageIndex StorageIndex;
|
||||
typedef typename Base::Scalar Scalar;
|
||||
|
||||
inline MappedSparseMatrix(Index rows, Index cols, Index nnz, StorageIndex* outerIndexPtr, StorageIndex* innerIndexPtr, Scalar* valuePtr, StorageIndex* innerNonZeroPtr = 0)
|
||||
: Base(rows, cols, nnz, outerIndexPtr, innerIndexPtr, valuePtr, innerNonZeroPtr)
|
||||
{}
|
||||
|
||||
/** Empty destructor */
|
||||
inline ~MappedSparseMatrix() {}
|
||||
};
|
||||
|
||||
namespace internal {
|
||||
|
||||
template<typename _Scalar, int _Options, typename _StorageIndex>
|
||||
struct evaluator<MappedSparseMatrix<_Scalar,_Options,_StorageIndex> >
|
||||
: evaluator<SparseCompressedBase<MappedSparseMatrix<_Scalar,_Options,_StorageIndex> > >
|
||||
{
|
||||
typedef MappedSparseMatrix<_Scalar,_Options,_StorageIndex> XprType;
|
||||
typedef evaluator<SparseCompressedBase<XprType> > Base;
|
||||
|
||||
evaluator() : Base() {}
|
||||
explicit evaluator(const XprType &mat) : Base(mat) {}
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_MAPPED_SPARSEMATRIX_H
|
270
3party/eigen/Eigen/src/SparseCore/SparseAssign.h
Normal file
270
3party/eigen/Eigen/src/SparseCore/SparseAssign.h
Normal file
@ -0,0 +1,270 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_SPARSEASSIGN_H
|
||||
#define EIGEN_SPARSEASSIGN_H
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
template<typename Derived>
|
||||
template<typename OtherDerived>
|
||||
Derived& SparseMatrixBase<Derived>::operator=(const EigenBase<OtherDerived> &other)
|
||||
{
|
||||
internal::call_assignment_no_alias(derived(), other.derived());
|
||||
return derived();
|
||||
}
|
||||
|
||||
template<typename Derived>
|
||||
template<typename OtherDerived>
|
||||
Derived& SparseMatrixBase<Derived>::operator=(const ReturnByValue<OtherDerived>& other)
|
||||
{
|
||||
// TODO use the evaluator mechanism
|
||||
other.evalTo(derived());
|
||||
return derived();
|
||||
}
|
||||
|
||||
template<typename Derived>
|
||||
template<typename OtherDerived>
|
||||
inline Derived& SparseMatrixBase<Derived>::operator=(const SparseMatrixBase<OtherDerived>& other)
|
||||
{
|
||||
// by default sparse evaluation do not alias, so we can safely bypass the generic call_assignment routine
|
||||
internal::Assignment<Derived,OtherDerived,internal::assign_op<Scalar,typename OtherDerived::Scalar> >
|
||||
::run(derived(), other.derived(), internal::assign_op<Scalar,typename OtherDerived::Scalar>());
|
||||
return derived();
|
||||
}
|
||||
|
||||
template<typename Derived>
|
||||
inline Derived& SparseMatrixBase<Derived>::operator=(const Derived& other)
|
||||
{
|
||||
internal::call_assignment_no_alias(derived(), other.derived());
|
||||
return derived();
|
||||
}
|
||||
|
||||
namespace internal {
|
||||
|
||||
template<>
|
||||
struct storage_kind_to_evaluator_kind<Sparse> {
|
||||
typedef IteratorBased Kind;
|
||||
};
|
||||
|
||||
template<>
|
||||
struct storage_kind_to_shape<Sparse> {
|
||||
typedef SparseShape Shape;
|
||||
};
|
||||
|
||||
struct Sparse2Sparse {};
|
||||
struct Sparse2Dense {};
|
||||
|
||||
template<> struct AssignmentKind<SparseShape, SparseShape> { typedef Sparse2Sparse Kind; };
|
||||
template<> struct AssignmentKind<SparseShape, SparseTriangularShape> { typedef Sparse2Sparse Kind; };
|
||||
template<> struct AssignmentKind<DenseShape, SparseShape> { typedef Sparse2Dense Kind; };
|
||||
template<> struct AssignmentKind<DenseShape, SparseTriangularShape> { typedef Sparse2Dense Kind; };
|
||||
|
||||
|
||||
template<typename DstXprType, typename SrcXprType>
|
||||
void assign_sparse_to_sparse(DstXprType &dst, const SrcXprType &src)
|
||||
{
|
||||
typedef typename DstXprType::Scalar Scalar;
|
||||
typedef internal::evaluator<DstXprType> DstEvaluatorType;
|
||||
typedef internal::evaluator<SrcXprType> SrcEvaluatorType;
|
||||
|
||||
SrcEvaluatorType srcEvaluator(src);
|
||||
|
||||
const bool transpose = (DstEvaluatorType::Flags & RowMajorBit) != (SrcEvaluatorType::Flags & RowMajorBit);
|
||||
const Index outerEvaluationSize = (SrcEvaluatorType::Flags&RowMajorBit) ? src.rows() : src.cols();
|
||||
if ((!transpose) && src.isRValue())
|
||||
{
|
||||
// eval without temporary
|
||||
dst.resize(src.rows(), src.cols());
|
||||
dst.setZero();
|
||||
dst.reserve((std::min)(src.rows()*src.cols(), (std::max)(src.rows(),src.cols())*2));
|
||||
for (Index j=0; j<outerEvaluationSize; ++j)
|
||||
{
|
||||
dst.startVec(j);
|
||||
for (typename SrcEvaluatorType::InnerIterator it(srcEvaluator, j); it; ++it)
|
||||
{
|
||||
Scalar v = it.value();
|
||||
dst.insertBackByOuterInner(j,it.index()) = v;
|
||||
}
|
||||
}
|
||||
dst.finalize();
|
||||
}
|
||||
else
|
||||
{
|
||||
// eval through a temporary
|
||||
eigen_assert(( ((internal::traits<DstXprType>::SupportedAccessPatterns & OuterRandomAccessPattern)==OuterRandomAccessPattern) ||
|
||||
(!((DstEvaluatorType::Flags & RowMajorBit) != (SrcEvaluatorType::Flags & RowMajorBit)))) &&
|
||||
"the transpose operation is supposed to be handled in SparseMatrix::operator=");
|
||||
|
||||
enum { Flip = (DstEvaluatorType::Flags & RowMajorBit) != (SrcEvaluatorType::Flags & RowMajorBit) };
|
||||
|
||||
|
||||
DstXprType temp(src.rows(), src.cols());
|
||||
|
||||
temp.reserve((std::min)(src.rows()*src.cols(), (std::max)(src.rows(),src.cols())*2));
|
||||
for (Index j=0; j<outerEvaluationSize; ++j)
|
||||
{
|
||||
temp.startVec(j);
|
||||
for (typename SrcEvaluatorType::InnerIterator it(srcEvaluator, j); it; ++it)
|
||||
{
|
||||
Scalar v = it.value();
|
||||
temp.insertBackByOuterInner(Flip?it.index():j,Flip?j:it.index()) = v;
|
||||
}
|
||||
}
|
||||
temp.finalize();
|
||||
|
||||
dst = temp.markAsRValue();
|
||||
}
|
||||
}
|
||||
|
||||
// Generic Sparse to Sparse assignment
|
||||
template< typename DstXprType, typename SrcXprType, typename Functor>
|
||||
struct Assignment<DstXprType, SrcXprType, Functor, Sparse2Sparse>
|
||||
{
|
||||
static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
|
||||
{
|
||||
assign_sparse_to_sparse(dst.derived(), src.derived());
|
||||
}
|
||||
};
|
||||
|
||||
// Generic Sparse to Dense assignment
|
||||
template< typename DstXprType, typename SrcXprType, typename Functor, typename Weak>
|
||||
struct Assignment<DstXprType, SrcXprType, Functor, Sparse2Dense, Weak>
|
||||
{
|
||||
static void run(DstXprType &dst, const SrcXprType &src, const Functor &func)
|
||||
{
|
||||
if(internal::is_same<Functor,internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> >::value)
|
||||
dst.setZero();
|
||||
|
||||
internal::evaluator<SrcXprType> srcEval(src);
|
||||
resize_if_allowed(dst, src, func);
|
||||
internal::evaluator<DstXprType> dstEval(dst);
|
||||
|
||||
const Index outerEvaluationSize = (internal::evaluator<SrcXprType>::Flags&RowMajorBit) ? src.rows() : src.cols();
|
||||
for (Index j=0; j<outerEvaluationSize; ++j)
|
||||
for (typename internal::evaluator<SrcXprType>::InnerIterator i(srcEval,j); i; ++i)
|
||||
func.assignCoeff(dstEval.coeffRef(i.row(),i.col()), i.value());
|
||||
}
|
||||
};
|
||||
|
||||
// Specialization for dense ?= dense +/- sparse and dense ?= sparse +/- dense
|
||||
template<typename DstXprType, typename Func1, typename Func2>
|
||||
struct assignment_from_dense_op_sparse
|
||||
{
|
||||
template<typename SrcXprType, typename InitialFunc>
|
||||
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
|
||||
void run(DstXprType &dst, const SrcXprType &src, const InitialFunc& /*func*/)
|
||||
{
|
||||
#ifdef EIGEN_SPARSE_ASSIGNMENT_FROM_DENSE_OP_SPARSE_PLUGIN
|
||||
EIGEN_SPARSE_ASSIGNMENT_FROM_DENSE_OP_SPARSE_PLUGIN
|
||||
#endif
|
||||
|
||||
call_assignment_no_alias(dst, src.lhs(), Func1());
|
||||
call_assignment_no_alias(dst, src.rhs(), Func2());
|
||||
}
|
||||
|
||||
// Specialization for dense1 = sparse + dense2; -> dense1 = dense2; dense1 += sparse;
|
||||
template<typename Lhs, typename Rhs, typename Scalar>
|
||||
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
|
||||
typename internal::enable_if<internal::is_same<typename internal::evaluator_traits<Rhs>::Shape,DenseShape>::value>::type
|
||||
run(DstXprType &dst, const CwiseBinaryOp<internal::scalar_sum_op<Scalar,Scalar>, const Lhs, const Rhs> &src,
|
||||
const internal::assign_op<typename DstXprType::Scalar,Scalar>& /*func*/)
|
||||
{
|
||||
#ifdef EIGEN_SPARSE_ASSIGNMENT_FROM_SPARSE_ADD_DENSE_PLUGIN
|
||||
EIGEN_SPARSE_ASSIGNMENT_FROM_SPARSE_ADD_DENSE_PLUGIN
|
||||
#endif
|
||||
|
||||
// Apply the dense matrix first, then the sparse one.
|
||||
call_assignment_no_alias(dst, src.rhs(), Func1());
|
||||
call_assignment_no_alias(dst, src.lhs(), Func2());
|
||||
}
|
||||
|
||||
// Specialization for dense1 = sparse - dense2; -> dense1 = -dense2; dense1 += sparse;
|
||||
template<typename Lhs, typename Rhs, typename Scalar>
|
||||
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
|
||||
typename internal::enable_if<internal::is_same<typename internal::evaluator_traits<Rhs>::Shape,DenseShape>::value>::type
|
||||
run(DstXprType &dst, const CwiseBinaryOp<internal::scalar_difference_op<Scalar,Scalar>, const Lhs, const Rhs> &src,
|
||||
const internal::assign_op<typename DstXprType::Scalar,Scalar>& /*func*/)
|
||||
{
|
||||
#ifdef EIGEN_SPARSE_ASSIGNMENT_FROM_SPARSE_SUB_DENSE_PLUGIN
|
||||
EIGEN_SPARSE_ASSIGNMENT_FROM_SPARSE_SUB_DENSE_PLUGIN
|
||||
#endif
|
||||
|
||||
// Apply the dense matrix first, then the sparse one.
|
||||
call_assignment_no_alias(dst, -src.rhs(), Func1());
|
||||
call_assignment_no_alias(dst, src.lhs(), add_assign_op<typename DstXprType::Scalar,typename Lhs::Scalar>());
|
||||
}
|
||||
};
|
||||
|
||||
#define EIGEN_CATCH_ASSIGN_DENSE_OP_SPARSE(ASSIGN_OP,BINOP,ASSIGN_OP2) \
|
||||
template< typename DstXprType, typename Lhs, typename Rhs, typename Scalar> \
|
||||
struct Assignment<DstXprType, CwiseBinaryOp<internal::BINOP<Scalar,Scalar>, const Lhs, const Rhs>, internal::ASSIGN_OP<typename DstXprType::Scalar,Scalar>, \
|
||||
Sparse2Dense, \
|
||||
typename internal::enable_if< internal::is_same<typename internal::evaluator_traits<Lhs>::Shape,DenseShape>::value \
|
||||
|| internal::is_same<typename internal::evaluator_traits<Rhs>::Shape,DenseShape>::value>::type> \
|
||||
: assignment_from_dense_op_sparse<DstXprType, internal::ASSIGN_OP<typename DstXprType::Scalar,typename Lhs::Scalar>, internal::ASSIGN_OP2<typename DstXprType::Scalar,typename Rhs::Scalar> > \
|
||||
{}
|
||||
|
||||
EIGEN_CATCH_ASSIGN_DENSE_OP_SPARSE(assign_op, scalar_sum_op,add_assign_op);
|
||||
EIGEN_CATCH_ASSIGN_DENSE_OP_SPARSE(add_assign_op,scalar_sum_op,add_assign_op);
|
||||
EIGEN_CATCH_ASSIGN_DENSE_OP_SPARSE(sub_assign_op,scalar_sum_op,sub_assign_op);
|
||||
|
||||
EIGEN_CATCH_ASSIGN_DENSE_OP_SPARSE(assign_op, scalar_difference_op,sub_assign_op);
|
||||
EIGEN_CATCH_ASSIGN_DENSE_OP_SPARSE(add_assign_op,scalar_difference_op,sub_assign_op);
|
||||
EIGEN_CATCH_ASSIGN_DENSE_OP_SPARSE(sub_assign_op,scalar_difference_op,add_assign_op);
|
||||
|
||||
|
||||
// Specialization for "dst = dec.solve(rhs)"
|
||||
// NOTE we need to specialize it for Sparse2Sparse to avoid ambiguous specialization error
|
||||
template<typename DstXprType, typename DecType, typename RhsType, typename Scalar>
|
||||
struct Assignment<DstXprType, Solve<DecType,RhsType>, internal::assign_op<Scalar,Scalar>, Sparse2Sparse>
|
||||
{
|
||||
typedef Solve<DecType,RhsType> SrcXprType;
|
||||
static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,Scalar> &)
|
||||
{
|
||||
Index dstRows = src.rows();
|
||||
Index dstCols = src.cols();
|
||||
if((dst.rows()!=dstRows) || (dst.cols()!=dstCols))
|
||||
dst.resize(dstRows, dstCols);
|
||||
|
||||
src.dec()._solve_impl(src.rhs(), dst);
|
||||
}
|
||||
};
|
||||
|
||||
struct Diagonal2Sparse {};
|
||||
|
||||
template<> struct AssignmentKind<SparseShape,DiagonalShape> { typedef Diagonal2Sparse Kind; };
|
||||
|
||||
template< typename DstXprType, typename SrcXprType, typename Functor>
|
||||
struct Assignment<DstXprType, SrcXprType, Functor, Diagonal2Sparse>
|
||||
{
|
||||
typedef typename DstXprType::StorageIndex StorageIndex;
|
||||
typedef typename DstXprType::Scalar Scalar;
|
||||
|
||||
template<int Options, typename AssignFunc>
|
||||
static void run(SparseMatrix<Scalar,Options,StorageIndex> &dst, const SrcXprType &src, const AssignFunc &func)
|
||||
{ dst.assignDiagonal(src.diagonal(), func); }
|
||||
|
||||
template<typename DstDerived>
|
||||
static void run(SparseMatrixBase<DstDerived> &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
|
||||
{ dst.derived().diagonal() = src.diagonal(); }
|
||||
|
||||
template<typename DstDerived>
|
||||
static void run(SparseMatrixBase<DstDerived> &dst, const SrcXprType &src, const internal::add_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
|
||||
{ dst.derived().diagonal() += src.diagonal(); }
|
||||
|
||||
template<typename DstDerived>
|
||||
static void run(SparseMatrixBase<DstDerived> &dst, const SrcXprType &src, const internal::sub_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &/*func*/)
|
||||
{ dst.derived().diagonal() -= src.diagonal(); }
|
||||
};
|
||||
} // end namespace internal
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_SPARSEASSIGN_H
|
571
3party/eigen/Eigen/src/SparseCore/SparseBlock.h
Normal file
571
3party/eigen/Eigen/src/SparseCore/SparseBlock.h
Normal file
@ -0,0 +1,571 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_SPARSE_BLOCK_H
|
||||
#define EIGEN_SPARSE_BLOCK_H
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
// Subset of columns or rows
|
||||
template<typename XprType, int BlockRows, int BlockCols>
|
||||
class BlockImpl<XprType,BlockRows,BlockCols,true,Sparse>
|
||||
: public SparseMatrixBase<Block<XprType,BlockRows,BlockCols,true> >
|
||||
{
|
||||
typedef typename internal::remove_all<typename XprType::Nested>::type _MatrixTypeNested;
|
||||
typedef Block<XprType, BlockRows, BlockCols, true> BlockType;
|
||||
public:
|
||||
enum { IsRowMajor = internal::traits<BlockType>::IsRowMajor };
|
||||
protected:
|
||||
enum { OuterSize = IsRowMajor ? BlockRows : BlockCols };
|
||||
typedef SparseMatrixBase<BlockType> Base;
|
||||
using Base::convert_index;
|
||||
public:
|
||||
EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType)
|
||||
|
||||
inline BlockImpl(XprType& xpr, Index i)
|
||||
: m_matrix(xpr), m_outerStart(convert_index(i)), m_outerSize(OuterSize)
|
||||
{}
|
||||
|
||||
inline BlockImpl(XprType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
|
||||
: m_matrix(xpr), m_outerStart(convert_index(IsRowMajor ? startRow : startCol)), m_outerSize(convert_index(IsRowMajor ? blockRows : blockCols))
|
||||
{}
|
||||
|
||||
EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
|
||||
EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
|
||||
|
||||
Index nonZeros() const
|
||||
{
|
||||
typedef internal::evaluator<XprType> EvaluatorType;
|
||||
EvaluatorType matEval(m_matrix);
|
||||
Index nnz = 0;
|
||||
Index end = m_outerStart + m_outerSize.value();
|
||||
for(Index j=m_outerStart; j<end; ++j)
|
||||
for(typename EvaluatorType::InnerIterator it(matEval, j); it; ++it)
|
||||
++nnz;
|
||||
return nnz;
|
||||
}
|
||||
|
||||
inline const Scalar coeff(Index row, Index col) const
|
||||
{
|
||||
return m_matrix.coeff(row + (IsRowMajor ? m_outerStart : 0), col + (IsRowMajor ? 0 : m_outerStart));
|
||||
}
|
||||
|
||||
inline const Scalar coeff(Index index) const
|
||||
{
|
||||
return m_matrix.coeff(IsRowMajor ? m_outerStart : index, IsRowMajor ? index : m_outerStart);
|
||||
}
|
||||
|
||||
inline const XprType& nestedExpression() const { return m_matrix; }
|
||||
inline XprType& nestedExpression() { return m_matrix; }
|
||||
Index startRow() const { return IsRowMajor ? m_outerStart : 0; }
|
||||
Index startCol() const { return IsRowMajor ? 0 : m_outerStart; }
|
||||
Index blockRows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
|
||||
Index blockCols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
|
||||
|
||||
protected:
|
||||
|
||||
typename internal::ref_selector<XprType>::non_const_type m_matrix;
|
||||
Index m_outerStart;
|
||||
const internal::variable_if_dynamic<Index, OuterSize> m_outerSize;
|
||||
|
||||
protected:
|
||||
// Disable assignment with clear error message.
|
||||
// Note that simply removing operator= yields compilation errors with ICC+MSVC
|
||||
template<typename T>
|
||||
BlockImpl& operator=(const T&)
|
||||
{
|
||||
EIGEN_STATIC_ASSERT(sizeof(T)==0, THIS_SPARSE_BLOCK_SUBEXPRESSION_IS_READ_ONLY);
|
||||
return *this;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
/***************************************************************************
|
||||
* specialization for SparseMatrix
|
||||
***************************************************************************/
|
||||
|
||||
namespace internal {
|
||||
|
||||
template<typename SparseMatrixType, int BlockRows, int BlockCols>
|
||||
class sparse_matrix_block_impl
|
||||
: public SparseCompressedBase<Block<SparseMatrixType,BlockRows,BlockCols,true> >
|
||||
{
|
||||
typedef typename internal::remove_all<typename SparseMatrixType::Nested>::type _MatrixTypeNested;
|
||||
typedef Block<SparseMatrixType, BlockRows, BlockCols, true> BlockType;
|
||||
typedef SparseCompressedBase<Block<SparseMatrixType,BlockRows,BlockCols,true> > Base;
|
||||
using Base::convert_index;
|
||||
public:
|
||||
enum { IsRowMajor = internal::traits<BlockType>::IsRowMajor };
|
||||
EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType)
|
||||
protected:
|
||||
typedef typename Base::IndexVector IndexVector;
|
||||
enum { OuterSize = IsRowMajor ? BlockRows : BlockCols };
|
||||
public:
|
||||
|
||||
inline sparse_matrix_block_impl(SparseMatrixType& xpr, Index i)
|
||||
: m_matrix(xpr), m_outerStart(convert_index(i)), m_outerSize(OuterSize)
|
||||
{}
|
||||
|
||||
inline sparse_matrix_block_impl(SparseMatrixType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
|
||||
: m_matrix(xpr), m_outerStart(convert_index(IsRowMajor ? startRow : startCol)), m_outerSize(convert_index(IsRowMajor ? blockRows : blockCols))
|
||||
{}
|
||||
|
||||
template<typename OtherDerived>
|
||||
inline BlockType& operator=(const SparseMatrixBase<OtherDerived>& other)
|
||||
{
|
||||
typedef typename internal::remove_all<typename SparseMatrixType::Nested>::type _NestedMatrixType;
|
||||
_NestedMatrixType& matrix = m_matrix;
|
||||
// This assignment is slow if this vector set is not empty
|
||||
// and/or it is not at the end of the nonzeros of the underlying matrix.
|
||||
|
||||
// 1 - eval to a temporary to avoid transposition and/or aliasing issues
|
||||
Ref<const SparseMatrix<Scalar, IsRowMajor ? RowMajor : ColMajor, StorageIndex> > tmp(other.derived());
|
||||
eigen_internal_assert(tmp.outerSize()==m_outerSize.value());
|
||||
|
||||
// 2 - let's check whether there is enough allocated memory
|
||||
Index nnz = tmp.nonZeros();
|
||||
Index start = m_outerStart==0 ? 0 : m_matrix.outerIndexPtr()[m_outerStart]; // starting position of the current block
|
||||
Index end = m_matrix.outerIndexPtr()[m_outerStart+m_outerSize.value()]; // ending position of the current block
|
||||
Index block_size = end - start; // available room in the current block
|
||||
Index tail_size = m_matrix.outerIndexPtr()[m_matrix.outerSize()] - end;
|
||||
|
||||
Index free_size = m_matrix.isCompressed()
|
||||
? Index(matrix.data().allocatedSize()) + block_size
|
||||
: block_size;
|
||||
|
||||
Index tmp_start = tmp.outerIndexPtr()[0];
|
||||
|
||||
bool update_trailing_pointers = false;
|
||||
if(nnz>free_size)
|
||||
{
|
||||
// realloc manually to reduce copies
|
||||
typename SparseMatrixType::Storage newdata(m_matrix.data().allocatedSize() - block_size + nnz);
|
||||
|
||||
internal::smart_copy(m_matrix.valuePtr(), m_matrix.valuePtr() + start, newdata.valuePtr());
|
||||
internal::smart_copy(m_matrix.innerIndexPtr(), m_matrix.innerIndexPtr() + start, newdata.indexPtr());
|
||||
|
||||
internal::smart_copy(tmp.valuePtr() + tmp_start, tmp.valuePtr() + tmp_start + nnz, newdata.valuePtr() + start);
|
||||
internal::smart_copy(tmp.innerIndexPtr() + tmp_start, tmp.innerIndexPtr() + tmp_start + nnz, newdata.indexPtr() + start);
|
||||
|
||||
internal::smart_copy(matrix.valuePtr()+end, matrix.valuePtr()+end + tail_size, newdata.valuePtr()+start+nnz);
|
||||
internal::smart_copy(matrix.innerIndexPtr()+end, matrix.innerIndexPtr()+end + tail_size, newdata.indexPtr()+start+nnz);
|
||||
|
||||
newdata.resize(m_matrix.outerIndexPtr()[m_matrix.outerSize()] - block_size + nnz);
|
||||
|
||||
matrix.data().swap(newdata);
|
||||
|
||||
update_trailing_pointers = true;
|
||||
}
|
||||
else
|
||||
{
|
||||
if(m_matrix.isCompressed() && nnz!=block_size)
|
||||
{
|
||||
// no need to realloc, simply copy the tail at its respective position and insert tmp
|
||||
matrix.data().resize(start + nnz + tail_size);
|
||||
|
||||
internal::smart_memmove(matrix.valuePtr()+end, matrix.valuePtr() + end+tail_size, matrix.valuePtr() + start+nnz);
|
||||
internal::smart_memmove(matrix.innerIndexPtr()+end, matrix.innerIndexPtr() + end+tail_size, matrix.innerIndexPtr() + start+nnz);
|
||||
|
||||
update_trailing_pointers = true;
|
||||
}
|
||||
|
||||
internal::smart_copy(tmp.valuePtr() + tmp_start, tmp.valuePtr() + tmp_start + nnz, matrix.valuePtr() + start);
|
||||
internal::smart_copy(tmp.innerIndexPtr() + tmp_start, tmp.innerIndexPtr() + tmp_start + nnz, matrix.innerIndexPtr() + start);
|
||||
}
|
||||
|
||||
// update outer index pointers and innerNonZeros
|
||||
if(IsVectorAtCompileTime)
|
||||
{
|
||||
if(!m_matrix.isCompressed())
|
||||
matrix.innerNonZeroPtr()[m_outerStart] = StorageIndex(nnz);
|
||||
matrix.outerIndexPtr()[m_outerStart] = StorageIndex(start);
|
||||
}
|
||||
else
|
||||
{
|
||||
StorageIndex p = StorageIndex(start);
|
||||
for(Index k=0; k<m_outerSize.value(); ++k)
|
||||
{
|
||||
StorageIndex nnz_k = internal::convert_index<StorageIndex>(tmp.innerVector(k).nonZeros());
|
||||
if(!m_matrix.isCompressed())
|
||||
matrix.innerNonZeroPtr()[m_outerStart+k] = nnz_k;
|
||||
matrix.outerIndexPtr()[m_outerStart+k] = p;
|
||||
p += nnz_k;
|
||||
}
|
||||
}
|
||||
|
||||
if(update_trailing_pointers)
|
||||
{
|
||||
StorageIndex offset = internal::convert_index<StorageIndex>(nnz - block_size);
|
||||
for(Index k = m_outerStart + m_outerSize.value(); k<=matrix.outerSize(); ++k)
|
||||
{
|
||||
matrix.outerIndexPtr()[k] += offset;
|
||||
}
|
||||
}
|
||||
|
||||
return derived();
|
||||
}
|
||||
|
||||
inline BlockType& operator=(const BlockType& other)
|
||||
{
|
||||
return operator=<BlockType>(other);
|
||||
}
|
||||
|
||||
inline const Scalar* valuePtr() const
|
||||
{ return m_matrix.valuePtr(); }
|
||||
inline Scalar* valuePtr()
|
||||
{ return m_matrix.valuePtr(); }
|
||||
|
||||
inline const StorageIndex* innerIndexPtr() const
|
||||
{ return m_matrix.innerIndexPtr(); }
|
||||
inline StorageIndex* innerIndexPtr()
|
||||
{ return m_matrix.innerIndexPtr(); }
|
||||
|
||||
inline const StorageIndex* outerIndexPtr() const
|
||||
{ return m_matrix.outerIndexPtr() + m_outerStart; }
|
||||
inline StorageIndex* outerIndexPtr()
|
||||
{ return m_matrix.outerIndexPtr() + m_outerStart; }
|
||||
|
||||
inline const StorageIndex* innerNonZeroPtr() const
|
||||
{ return isCompressed() ? 0 : (m_matrix.innerNonZeroPtr()+m_outerStart); }
|
||||
inline StorageIndex* innerNonZeroPtr()
|
||||
{ return isCompressed() ? 0 : (m_matrix.innerNonZeroPtr()+m_outerStart); }
|
||||
|
||||
bool isCompressed() const { return m_matrix.innerNonZeroPtr()==0; }
|
||||
|
||||
inline Scalar& coeffRef(Index row, Index col)
|
||||
{
|
||||
return m_matrix.coeffRef(row + (IsRowMajor ? m_outerStart : 0), col + (IsRowMajor ? 0 : m_outerStart));
|
||||
}
|
||||
|
||||
inline const Scalar coeff(Index row, Index col) const
|
||||
{
|
||||
return m_matrix.coeff(row + (IsRowMajor ? m_outerStart : 0), col + (IsRowMajor ? 0 : m_outerStart));
|
||||
}
|
||||
|
||||
inline const Scalar coeff(Index index) const
|
||||
{
|
||||
return m_matrix.coeff(IsRowMajor ? m_outerStart : index, IsRowMajor ? index : m_outerStart);
|
||||
}
|
||||
|
||||
const Scalar& lastCoeff() const
|
||||
{
|
||||
EIGEN_STATIC_ASSERT_VECTOR_ONLY(sparse_matrix_block_impl);
|
||||
eigen_assert(Base::nonZeros()>0);
|
||||
if(m_matrix.isCompressed())
|
||||
return m_matrix.valuePtr()[m_matrix.outerIndexPtr()[m_outerStart+1]-1];
|
||||
else
|
||||
return m_matrix.valuePtr()[m_matrix.outerIndexPtr()[m_outerStart]+m_matrix.innerNonZeroPtr()[m_outerStart]-1];
|
||||
}
|
||||
|
||||
EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
|
||||
EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
|
||||
|
||||
inline const SparseMatrixType& nestedExpression() const { return m_matrix; }
|
||||
inline SparseMatrixType& nestedExpression() { return m_matrix; }
|
||||
Index startRow() const { return IsRowMajor ? m_outerStart : 0; }
|
||||
Index startCol() const { return IsRowMajor ? 0 : m_outerStart; }
|
||||
Index blockRows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
|
||||
Index blockCols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
|
||||
|
||||
protected:
|
||||
|
||||
typename internal::ref_selector<SparseMatrixType>::non_const_type m_matrix;
|
||||
Index m_outerStart;
|
||||
const internal::variable_if_dynamic<Index, OuterSize> m_outerSize;
|
||||
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
|
||||
template<typename _Scalar, int _Options, typename _StorageIndex, int BlockRows, int BlockCols>
|
||||
class BlockImpl<SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true,Sparse>
|
||||
: public internal::sparse_matrix_block_impl<SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols>
|
||||
{
|
||||
public:
|
||||
typedef _StorageIndex StorageIndex;
|
||||
typedef SparseMatrix<_Scalar, _Options, _StorageIndex> SparseMatrixType;
|
||||
typedef internal::sparse_matrix_block_impl<SparseMatrixType,BlockRows,BlockCols> Base;
|
||||
inline BlockImpl(SparseMatrixType& xpr, Index i)
|
||||
: Base(xpr, i)
|
||||
{}
|
||||
|
||||
inline BlockImpl(SparseMatrixType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
|
||||
: Base(xpr, startRow, startCol, blockRows, blockCols)
|
||||
{}
|
||||
|
||||
using Base::operator=;
|
||||
};
|
||||
|
||||
template<typename _Scalar, int _Options, typename _StorageIndex, int BlockRows, int BlockCols>
|
||||
class BlockImpl<const SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true,Sparse>
|
||||
: public internal::sparse_matrix_block_impl<const SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols>
|
||||
{
|
||||
public:
|
||||
typedef _StorageIndex StorageIndex;
|
||||
typedef const SparseMatrix<_Scalar, _Options, _StorageIndex> SparseMatrixType;
|
||||
typedef internal::sparse_matrix_block_impl<SparseMatrixType,BlockRows,BlockCols> Base;
|
||||
inline BlockImpl(SparseMatrixType& xpr, Index i)
|
||||
: Base(xpr, i)
|
||||
{}
|
||||
|
||||
inline BlockImpl(SparseMatrixType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
|
||||
: Base(xpr, startRow, startCol, blockRows, blockCols)
|
||||
{}
|
||||
|
||||
using Base::operator=;
|
||||
private:
|
||||
template<typename Derived> BlockImpl(const SparseMatrixBase<Derived>& xpr, Index i);
|
||||
template<typename Derived> BlockImpl(const SparseMatrixBase<Derived>& xpr);
|
||||
};
|
||||
|
||||
//----------
|
||||
|
||||
/** Generic implementation of sparse Block expression.
|
||||
* Real-only.
|
||||
*/
|
||||
template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel>
|
||||
class BlockImpl<XprType,BlockRows,BlockCols,InnerPanel,Sparse>
|
||||
: public SparseMatrixBase<Block<XprType,BlockRows,BlockCols,InnerPanel> >, internal::no_assignment_operator
|
||||
{
|
||||
typedef Block<XprType, BlockRows, BlockCols, InnerPanel> BlockType;
|
||||
typedef SparseMatrixBase<BlockType> Base;
|
||||
using Base::convert_index;
|
||||
public:
|
||||
enum { IsRowMajor = internal::traits<BlockType>::IsRowMajor };
|
||||
EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType)
|
||||
|
||||
typedef typename internal::remove_all<typename XprType::Nested>::type _MatrixTypeNested;
|
||||
|
||||
/** Column or Row constructor
|
||||
*/
|
||||
inline BlockImpl(XprType& xpr, Index i)
|
||||
: m_matrix(xpr),
|
||||
m_startRow( (BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) ? convert_index(i) : 0),
|
||||
m_startCol( (BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) ? convert_index(i) : 0),
|
||||
m_blockRows(BlockRows==1 ? 1 : xpr.rows()),
|
||||
m_blockCols(BlockCols==1 ? 1 : xpr.cols())
|
||||
{}
|
||||
|
||||
/** Dynamic-size constructor
|
||||
*/
|
||||
inline BlockImpl(XprType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
|
||||
: m_matrix(xpr), m_startRow(convert_index(startRow)), m_startCol(convert_index(startCol)), m_blockRows(convert_index(blockRows)), m_blockCols(convert_index(blockCols))
|
||||
{}
|
||||
|
||||
inline Index rows() const { return m_blockRows.value(); }
|
||||
inline Index cols() const { return m_blockCols.value(); }
|
||||
|
||||
inline Scalar& coeffRef(Index row, Index col)
|
||||
{
|
||||
return m_matrix.coeffRef(row + m_startRow.value(), col + m_startCol.value());
|
||||
}
|
||||
|
||||
inline const Scalar coeff(Index row, Index col) const
|
||||
{
|
||||
return m_matrix.coeff(row + m_startRow.value(), col + m_startCol.value());
|
||||
}
|
||||
|
||||
inline Scalar& coeffRef(Index index)
|
||||
{
|
||||
return m_matrix.coeffRef(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
|
||||
m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));
|
||||
}
|
||||
|
||||
inline const Scalar coeff(Index index) const
|
||||
{
|
||||
return m_matrix.coeff(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
|
||||
m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));
|
||||
}
|
||||
|
||||
inline const XprType& nestedExpression() const { return m_matrix; }
|
||||
inline XprType& nestedExpression() { return m_matrix; }
|
||||
Index startRow() const { return m_startRow.value(); }
|
||||
Index startCol() const { return m_startCol.value(); }
|
||||
Index blockRows() const { return m_blockRows.value(); }
|
||||
Index blockCols() const { return m_blockCols.value(); }
|
||||
|
||||
protected:
|
||||
// friend class internal::GenericSparseBlockInnerIteratorImpl<XprType,BlockRows,BlockCols,InnerPanel>;
|
||||
friend struct internal::unary_evaluator<Block<XprType,BlockRows,BlockCols,InnerPanel>, internal::IteratorBased, Scalar >;
|
||||
|
||||
Index nonZeros() const { return Dynamic; }
|
||||
|
||||
typename internal::ref_selector<XprType>::non_const_type m_matrix;
|
||||
const internal::variable_if_dynamic<Index, XprType::RowsAtCompileTime == 1 ? 0 : Dynamic> m_startRow;
|
||||
const internal::variable_if_dynamic<Index, XprType::ColsAtCompileTime == 1 ? 0 : Dynamic> m_startCol;
|
||||
const internal::variable_if_dynamic<Index, RowsAtCompileTime> m_blockRows;
|
||||
const internal::variable_if_dynamic<Index, ColsAtCompileTime> m_blockCols;
|
||||
|
||||
protected:
|
||||
// Disable assignment with clear error message.
|
||||
// Note that simply removing operator= yields compilation errors with ICC+MSVC
|
||||
template<typename T>
|
||||
BlockImpl& operator=(const T&)
|
||||
{
|
||||
EIGEN_STATIC_ASSERT(sizeof(T)==0, THIS_SPARSE_BLOCK_SUBEXPRESSION_IS_READ_ONLY);
|
||||
return *this;
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
namespace internal {
|
||||
|
||||
template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>
|
||||
struct unary_evaluator<Block<ArgType,BlockRows,BlockCols,InnerPanel>, IteratorBased >
|
||||
: public evaluator_base<Block<ArgType,BlockRows,BlockCols,InnerPanel> >
|
||||
{
|
||||
class InnerVectorInnerIterator;
|
||||
class OuterVectorInnerIterator;
|
||||
public:
|
||||
typedef Block<ArgType,BlockRows,BlockCols,InnerPanel> XprType;
|
||||
typedef typename XprType::StorageIndex StorageIndex;
|
||||
typedef typename XprType::Scalar Scalar;
|
||||
|
||||
enum {
|
||||
IsRowMajor = XprType::IsRowMajor,
|
||||
|
||||
OuterVector = (BlockCols==1 && ArgType::IsRowMajor)
|
||||
| // FIXME | instead of || to please GCC 4.4.0 stupid warning "suggest parentheses around &&".
|
||||
// revert to || as soon as not needed anymore.
|
||||
(BlockRows==1 && !ArgType::IsRowMajor),
|
||||
|
||||
CoeffReadCost = evaluator<ArgType>::CoeffReadCost,
|
||||
Flags = XprType::Flags
|
||||
};
|
||||
|
||||
typedef typename internal::conditional<OuterVector,OuterVectorInnerIterator,InnerVectorInnerIterator>::type InnerIterator;
|
||||
|
||||
explicit unary_evaluator(const XprType& op)
|
||||
: m_argImpl(op.nestedExpression()), m_block(op)
|
||||
{}
|
||||
|
||||
inline Index nonZerosEstimate() const {
|
||||
const Index nnz = m_block.nonZeros();
|
||||
if(nnz < 0) {
|
||||
// Scale the non-zero estimate for the underlying expression linearly with block size.
|
||||
// Return zero if the underlying block is empty.
|
||||
const Index nested_sz = m_block.nestedExpression().size();
|
||||
return nested_sz == 0 ? 0 : m_argImpl.nonZerosEstimate() * m_block.size() / nested_sz;
|
||||
}
|
||||
return nnz;
|
||||
}
|
||||
|
||||
protected:
|
||||
typedef typename evaluator<ArgType>::InnerIterator EvalIterator;
|
||||
|
||||
evaluator<ArgType> m_argImpl;
|
||||
const XprType &m_block;
|
||||
};
|
||||
|
||||
template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>
|
||||
class unary_evaluator<Block<ArgType,BlockRows,BlockCols,InnerPanel>, IteratorBased>::InnerVectorInnerIterator
|
||||
: public EvalIterator
|
||||
{
|
||||
// NOTE MSVC fails to compile if we don't explicitely "import" IsRowMajor from unary_evaluator
|
||||
// because the base class EvalIterator has a private IsRowMajor enum too. (bug #1786)
|
||||
// NOTE We cannot call it IsRowMajor because it would shadow unary_evaluator::IsRowMajor
|
||||
enum { XprIsRowMajor = unary_evaluator::IsRowMajor };
|
||||
const XprType& m_block;
|
||||
Index m_end;
|
||||
public:
|
||||
|
||||
EIGEN_STRONG_INLINE InnerVectorInnerIterator(const unary_evaluator& aEval, Index outer)
|
||||
: EvalIterator(aEval.m_argImpl, outer + (XprIsRowMajor ? aEval.m_block.startRow() : aEval.m_block.startCol())),
|
||||
m_block(aEval.m_block),
|
||||
m_end(XprIsRowMajor ? aEval.m_block.startCol()+aEval.m_block.blockCols() : aEval.m_block.startRow()+aEval.m_block.blockRows())
|
||||
{
|
||||
while( (EvalIterator::operator bool()) && (EvalIterator::index() < (XprIsRowMajor ? m_block.startCol() : m_block.startRow())) )
|
||||
EvalIterator::operator++();
|
||||
}
|
||||
|
||||
inline StorageIndex index() const { return EvalIterator::index() - convert_index<StorageIndex>(XprIsRowMajor ? m_block.startCol() : m_block.startRow()); }
|
||||
inline Index outer() const { return EvalIterator::outer() - (XprIsRowMajor ? m_block.startRow() : m_block.startCol()); }
|
||||
inline Index row() const { return EvalIterator::row() - m_block.startRow(); }
|
||||
inline Index col() const { return EvalIterator::col() - m_block.startCol(); }
|
||||
|
||||
inline operator bool() const { return EvalIterator::operator bool() && EvalIterator::index() < m_end; }
|
||||
};
|
||||
|
||||
template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>
|
||||
class unary_evaluator<Block<ArgType,BlockRows,BlockCols,InnerPanel>, IteratorBased>::OuterVectorInnerIterator
|
||||
{
|
||||
// NOTE see above
|
||||
enum { XprIsRowMajor = unary_evaluator::IsRowMajor };
|
||||
const unary_evaluator& m_eval;
|
||||
Index m_outerPos;
|
||||
const Index m_innerIndex;
|
||||
Index m_end;
|
||||
EvalIterator m_it;
|
||||
public:
|
||||
|
||||
EIGEN_STRONG_INLINE OuterVectorInnerIterator(const unary_evaluator& aEval, Index outer)
|
||||
: m_eval(aEval),
|
||||
m_outerPos( (XprIsRowMajor ? aEval.m_block.startCol() : aEval.m_block.startRow()) ),
|
||||
m_innerIndex(XprIsRowMajor ? aEval.m_block.startRow() : aEval.m_block.startCol()),
|
||||
m_end(XprIsRowMajor ? aEval.m_block.startCol()+aEval.m_block.blockCols() : aEval.m_block.startRow()+aEval.m_block.blockRows()),
|
||||
m_it(m_eval.m_argImpl, m_outerPos)
|
||||
{
|
||||
EIGEN_UNUSED_VARIABLE(outer);
|
||||
eigen_assert(outer==0);
|
||||
|
||||
while(m_it && m_it.index() < m_innerIndex) ++m_it;
|
||||
if((!m_it) || (m_it.index()!=m_innerIndex))
|
||||
++(*this);
|
||||
}
|
||||
|
||||
inline StorageIndex index() const { return convert_index<StorageIndex>(m_outerPos - (XprIsRowMajor ? m_eval.m_block.startCol() : m_eval.m_block.startRow())); }
|
||||
inline Index outer() const { return 0; }
|
||||
inline Index row() const { return XprIsRowMajor ? 0 : index(); }
|
||||
inline Index col() const { return XprIsRowMajor ? index() : 0; }
|
||||
|
||||
inline Scalar value() const { return m_it.value(); }
|
||||
inline Scalar& valueRef() { return m_it.valueRef(); }
|
||||
|
||||
inline OuterVectorInnerIterator& operator++()
|
||||
{
|
||||
// search next non-zero entry
|
||||
while(++m_outerPos<m_end)
|
||||
{
|
||||
// Restart iterator at the next inner-vector:
|
||||
m_it.~EvalIterator();
|
||||
::new (&m_it) EvalIterator(m_eval.m_argImpl, m_outerPos);
|
||||
// search for the key m_innerIndex in the current outer-vector
|
||||
while(m_it && m_it.index() < m_innerIndex) ++m_it;
|
||||
if(m_it && m_it.index()==m_innerIndex) break;
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
inline operator bool() const { return m_outerPos < m_end; }
|
||||
};
|
||||
|
||||
template<typename _Scalar, int _Options, typename _StorageIndex, int BlockRows, int BlockCols>
|
||||
struct unary_evaluator<Block<SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true>, IteratorBased>
|
||||
: evaluator<SparseCompressedBase<Block<SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true> > >
|
||||
{
|
||||
typedef Block<SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true> XprType;
|
||||
typedef evaluator<SparseCompressedBase<XprType> > Base;
|
||||
explicit unary_evaluator(const XprType &xpr) : Base(xpr) {}
|
||||
};
|
||||
|
||||
template<typename _Scalar, int _Options, typename _StorageIndex, int BlockRows, int BlockCols>
|
||||
struct unary_evaluator<Block<const SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true>, IteratorBased>
|
||||
: evaluator<SparseCompressedBase<Block<const SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true> > >
|
||||
{
|
||||
typedef Block<const SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true> XprType;
|
||||
typedef evaluator<SparseCompressedBase<XprType> > Base;
|
||||
explicit unary_evaluator(const XprType &xpr) : Base(xpr) {}
|
||||
};
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_SPARSE_BLOCK_H
|
206
3party/eigen/Eigen/src/SparseCore/SparseColEtree.h
Normal file
206
3party/eigen/Eigen/src/SparseCore/SparseColEtree.h
Normal file
@ -0,0 +1,206 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
|
||||
/*
|
||||
|
||||
* NOTE: This file is the modified version of sp_coletree.c file in SuperLU
|
||||
|
||||
* -- SuperLU routine (version 3.1) --
|
||||
* Univ. of California Berkeley, Xerox Palo Alto Research Center,
|
||||
* and Lawrence Berkeley National Lab.
|
||||
* August 1, 2008
|
||||
*
|
||||
* Copyright (c) 1994 by Xerox Corporation. All rights reserved.
|
||||
*
|
||||
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY
|
||||
* EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
|
||||
*
|
||||
* Permission is hereby granted to use or copy this program for any
|
||||
* purpose, provided the above notices are retained on all copies.
|
||||
* Permission to modify the code and to distribute modified code is
|
||||
* granted, provided the above notices are retained, and a notice that
|
||||
* the code was modified is included with the above copyright notice.
|
||||
*/
|
||||
#ifndef SPARSE_COLETREE_H
|
||||
#define SPARSE_COLETREE_H
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
|
||||
/** Find the root of the tree/set containing the vertex i : Use Path halving */
|
||||
template<typename Index, typename IndexVector>
|
||||
Index etree_find (Index i, IndexVector& pp)
|
||||
{
|
||||
Index p = pp(i); // Parent
|
||||
Index gp = pp(p); // Grand parent
|
||||
while (gp != p)
|
||||
{
|
||||
pp(i) = gp; // Parent pointer on find path is changed to former grand parent
|
||||
i = gp;
|
||||
p = pp(i);
|
||||
gp = pp(p);
|
||||
}
|
||||
return p;
|
||||
}
|
||||
|
||||
/** Compute the column elimination tree of a sparse matrix
|
||||
* \param mat The matrix in column-major format.
|
||||
* \param parent The elimination tree
|
||||
* \param firstRowElt The column index of the first element in each row
|
||||
* \param perm The permutation to apply to the column of \b mat
|
||||
*/
|
||||
template <typename MatrixType, typename IndexVector>
|
||||
int coletree(const MatrixType& mat, IndexVector& parent, IndexVector& firstRowElt, typename MatrixType::StorageIndex *perm=0)
|
||||
{
|
||||
typedef typename MatrixType::StorageIndex StorageIndex;
|
||||
StorageIndex nc = convert_index<StorageIndex>(mat.cols()); // Number of columns
|
||||
StorageIndex m = convert_index<StorageIndex>(mat.rows());
|
||||
StorageIndex diagSize = (std::min)(nc,m);
|
||||
IndexVector root(nc); // root of subtree of etree
|
||||
root.setZero();
|
||||
IndexVector pp(nc); // disjoint sets
|
||||
pp.setZero(); // Initialize disjoint sets
|
||||
parent.resize(mat.cols());
|
||||
//Compute first nonzero column in each row
|
||||
firstRowElt.resize(m);
|
||||
firstRowElt.setConstant(nc);
|
||||
firstRowElt.segment(0, diagSize).setLinSpaced(diagSize, 0, diagSize-1);
|
||||
bool found_diag;
|
||||
for (StorageIndex col = 0; col < nc; col++)
|
||||
{
|
||||
StorageIndex pcol = col;
|
||||
if(perm) pcol = perm[col];
|
||||
for (typename MatrixType::InnerIterator it(mat, pcol); it; ++it)
|
||||
{
|
||||
Index row = it.row();
|
||||
firstRowElt(row) = (std::min)(firstRowElt(row), col);
|
||||
}
|
||||
}
|
||||
/* Compute etree by Liu's algorithm for symmetric matrices,
|
||||
except use (firstRowElt[r],c) in place of an edge (r,c) of A.
|
||||
Thus each row clique in A'*A is replaced by a star
|
||||
centered at its first vertex, which has the same fill. */
|
||||
StorageIndex rset, cset, rroot;
|
||||
for (StorageIndex col = 0; col < nc; col++)
|
||||
{
|
||||
found_diag = col>=m;
|
||||
pp(col) = col;
|
||||
cset = col;
|
||||
root(cset) = col;
|
||||
parent(col) = nc;
|
||||
/* The diagonal element is treated here even if it does not exist in the matrix
|
||||
* hence the loop is executed once more */
|
||||
StorageIndex pcol = col;
|
||||
if(perm) pcol = perm[col];
|
||||
for (typename MatrixType::InnerIterator it(mat, pcol); it||!found_diag; ++it)
|
||||
{ // A sequence of interleaved find and union is performed
|
||||
Index i = col;
|
||||
if(it) i = it.index();
|
||||
if (i == col) found_diag = true;
|
||||
|
||||
StorageIndex row = firstRowElt(i);
|
||||
if (row >= col) continue;
|
||||
rset = internal::etree_find(row, pp); // Find the name of the set containing row
|
||||
rroot = root(rset);
|
||||
if (rroot != col)
|
||||
{
|
||||
parent(rroot) = col;
|
||||
pp(cset) = rset;
|
||||
cset = rset;
|
||||
root(cset) = col;
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Depth-first search from vertex n. No recursion.
|
||||
* This routine was contributed by Cédric Doucet, CEDRAT Group, Meylan, France.
|
||||
*/
|
||||
template <typename IndexVector>
|
||||
void nr_etdfs (typename IndexVector::Scalar n, IndexVector& parent, IndexVector& first_kid, IndexVector& next_kid, IndexVector& post, typename IndexVector::Scalar postnum)
|
||||
{
|
||||
typedef typename IndexVector::Scalar StorageIndex;
|
||||
StorageIndex current = n, first, next;
|
||||
while (postnum != n)
|
||||
{
|
||||
// No kid for the current node
|
||||
first = first_kid(current);
|
||||
|
||||
// no kid for the current node
|
||||
if (first == -1)
|
||||
{
|
||||
// Numbering this node because it has no kid
|
||||
post(current) = postnum++;
|
||||
|
||||
// looking for the next kid
|
||||
next = next_kid(current);
|
||||
while (next == -1)
|
||||
{
|
||||
// No more kids : back to the parent node
|
||||
current = parent(current);
|
||||
// numbering the parent node
|
||||
post(current) = postnum++;
|
||||
|
||||
// Get the next kid
|
||||
next = next_kid(current);
|
||||
}
|
||||
// stopping criterion
|
||||
if (postnum == n+1) return;
|
||||
|
||||
// Updating current node
|
||||
current = next;
|
||||
}
|
||||
else
|
||||
{
|
||||
current = first;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* \brief Post order a tree
|
||||
* \param n the number of nodes
|
||||
* \param parent Input tree
|
||||
* \param post postordered tree
|
||||
*/
|
||||
template <typename IndexVector>
|
||||
void treePostorder(typename IndexVector::Scalar n, IndexVector& parent, IndexVector& post)
|
||||
{
|
||||
typedef typename IndexVector::Scalar StorageIndex;
|
||||
IndexVector first_kid, next_kid; // Linked list of children
|
||||
StorageIndex postnum;
|
||||
// Allocate storage for working arrays and results
|
||||
first_kid.resize(n+1);
|
||||
next_kid.setZero(n+1);
|
||||
post.setZero(n+1);
|
||||
|
||||
// Set up structure describing children
|
||||
first_kid.setConstant(-1);
|
||||
for (StorageIndex v = n-1; v >= 0; v--)
|
||||
{
|
||||
StorageIndex dad = parent(v);
|
||||
next_kid(v) = first_kid(dad);
|
||||
first_kid(dad) = v;
|
||||
}
|
||||
|
||||
// Depth-first search from dummy root vertex #n
|
||||
postnum = 0;
|
||||
internal::nr_etdfs(n, parent, first_kid, next_kid, post, postnum);
|
||||
}
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // SPARSE_COLETREE_H
|
370
3party/eigen/Eigen/src/SparseCore/SparseCompressedBase.h
Normal file
370
3party/eigen/Eigen/src/SparseCore/SparseCompressedBase.h
Normal file
@ -0,0 +1,370 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2015 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_SPARSE_COMPRESSED_BASE_H
|
||||
#define EIGEN_SPARSE_COMPRESSED_BASE_H
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
template<typename Derived> class SparseCompressedBase;
|
||||
|
||||
namespace internal {
|
||||
|
||||
template<typename Derived>
|
||||
struct traits<SparseCompressedBase<Derived> > : traits<Derived>
|
||||
{};
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
/** \ingroup SparseCore_Module
|
||||
* \class SparseCompressedBase
|
||||
* \brief Common base class for sparse [compressed]-{row|column}-storage format.
|
||||
*
|
||||
* This class defines the common interface for all derived classes implementing the compressed sparse storage format, such as:
|
||||
* - SparseMatrix
|
||||
* - Ref<SparseMatrixType,Options>
|
||||
* - Map<SparseMatrixType>
|
||||
*
|
||||
*/
|
||||
template<typename Derived>
|
||||
class SparseCompressedBase
|
||||
: public SparseMatrixBase<Derived>
|
||||
{
|
||||
public:
|
||||
typedef SparseMatrixBase<Derived> Base;
|
||||
EIGEN_SPARSE_PUBLIC_INTERFACE(SparseCompressedBase)
|
||||
using Base::operator=;
|
||||
using Base::IsRowMajor;
|
||||
|
||||
class InnerIterator;
|
||||
class ReverseInnerIterator;
|
||||
|
||||
protected:
|
||||
typedef typename Base::IndexVector IndexVector;
|
||||
Eigen::Map<IndexVector> innerNonZeros() { return Eigen::Map<IndexVector>(innerNonZeroPtr(), isCompressed()?0:derived().outerSize()); }
|
||||
const Eigen::Map<const IndexVector> innerNonZeros() const { return Eigen::Map<const IndexVector>(innerNonZeroPtr(), isCompressed()?0:derived().outerSize()); }
|
||||
|
||||
public:
|
||||
|
||||
/** \returns the number of non zero coefficients */
|
||||
inline Index nonZeros() const
|
||||
{
|
||||
if(Derived::IsVectorAtCompileTime && outerIndexPtr()==0)
|
||||
return derived().nonZeros();
|
||||
else if(isCompressed())
|
||||
return outerIndexPtr()[derived().outerSize()]-outerIndexPtr()[0];
|
||||
else if(derived().outerSize()==0)
|
||||
return 0;
|
||||
else
|
||||
return innerNonZeros().sum();
|
||||
}
|
||||
|
||||
/** \returns a const pointer to the array of values.
|
||||
* This function is aimed at interoperability with other libraries.
|
||||
* \sa innerIndexPtr(), outerIndexPtr() */
|
||||
inline const Scalar* valuePtr() const { return derived().valuePtr(); }
|
||||
/** \returns a non-const pointer to the array of values.
|
||||
* This function is aimed at interoperability with other libraries.
|
||||
* \sa innerIndexPtr(), outerIndexPtr() */
|
||||
inline Scalar* valuePtr() { return derived().valuePtr(); }
|
||||
|
||||
/** \returns a const pointer to the array of inner indices.
|
||||
* This function is aimed at interoperability with other libraries.
|
||||
* \sa valuePtr(), outerIndexPtr() */
|
||||
inline const StorageIndex* innerIndexPtr() const { return derived().innerIndexPtr(); }
|
||||
/** \returns a non-const pointer to the array of inner indices.
|
||||
* This function is aimed at interoperability with other libraries.
|
||||
* \sa valuePtr(), outerIndexPtr() */
|
||||
inline StorageIndex* innerIndexPtr() { return derived().innerIndexPtr(); }
|
||||
|
||||
/** \returns a const pointer to the array of the starting positions of the inner vectors.
|
||||
* This function is aimed at interoperability with other libraries.
|
||||
* \warning it returns the null pointer 0 for SparseVector
|
||||
* \sa valuePtr(), innerIndexPtr() */
|
||||
inline const StorageIndex* outerIndexPtr() const { return derived().outerIndexPtr(); }
|
||||
/** \returns a non-const pointer to the array of the starting positions of the inner vectors.
|
||||
* This function is aimed at interoperability with other libraries.
|
||||
* \warning it returns the null pointer 0 for SparseVector
|
||||
* \sa valuePtr(), innerIndexPtr() */
|
||||
inline StorageIndex* outerIndexPtr() { return derived().outerIndexPtr(); }
|
||||
|
||||
/** \returns a const pointer to the array of the number of non zeros of the inner vectors.
|
||||
* This function is aimed at interoperability with other libraries.
|
||||
* \warning it returns the null pointer 0 in compressed mode */
|
||||
inline const StorageIndex* innerNonZeroPtr() const { return derived().innerNonZeroPtr(); }
|
||||
/** \returns a non-const pointer to the array of the number of non zeros of the inner vectors.
|
||||
* This function is aimed at interoperability with other libraries.
|
||||
* \warning it returns the null pointer 0 in compressed mode */
|
||||
inline StorageIndex* innerNonZeroPtr() { return derived().innerNonZeroPtr(); }
|
||||
|
||||
/** \returns whether \c *this is in compressed form. */
|
||||
inline bool isCompressed() const { return innerNonZeroPtr()==0; }
|
||||
|
||||
/** \returns a read-only view of the stored coefficients as a 1D array expression.
|
||||
*
|
||||
* \warning this method is for \b compressed \b storage \b only, and it will trigger an assertion otherwise.
|
||||
*
|
||||
* \sa valuePtr(), isCompressed() */
|
||||
const Map<const Array<Scalar,Dynamic,1> > coeffs() const { eigen_assert(isCompressed()); return Array<Scalar,Dynamic,1>::Map(valuePtr(),nonZeros()); }
|
||||
|
||||
/** \returns a read-write view of the stored coefficients as a 1D array expression
|
||||
*
|
||||
* \warning this method is for \b compressed \b storage \b only, and it will trigger an assertion otherwise.
|
||||
*
|
||||
* Here is an example:
|
||||
* \include SparseMatrix_coeffs.cpp
|
||||
* and the output is:
|
||||
* \include SparseMatrix_coeffs.out
|
||||
*
|
||||
* \sa valuePtr(), isCompressed() */
|
||||
Map<Array<Scalar,Dynamic,1> > coeffs() { eigen_assert(isCompressed()); return Array<Scalar,Dynamic,1>::Map(valuePtr(),nonZeros()); }
|
||||
|
||||
protected:
|
||||
/** Default constructor. Do nothing. */
|
||||
SparseCompressedBase() {}
|
||||
|
||||
/** \internal return the index of the coeff at (row,col) or just before if it does not exist.
|
||||
* This is an analogue of std::lower_bound.
|
||||
*/
|
||||
internal::LowerBoundIndex lower_bound(Index row, Index col) const
|
||||
{
|
||||
eigen_internal_assert(row>=0 && row<this->rows() && col>=0 && col<this->cols());
|
||||
|
||||
const Index outer = Derived::IsRowMajor ? row : col;
|
||||
const Index inner = Derived::IsRowMajor ? col : row;
|
||||
|
||||
Index start = this->outerIndexPtr()[outer];
|
||||
Index end = this->isCompressed() ? this->outerIndexPtr()[outer+1] : this->outerIndexPtr()[outer] + this->innerNonZeroPtr()[outer];
|
||||
eigen_assert(end>=start && "you are using a non finalized sparse matrix or written coefficient does not exist");
|
||||
internal::LowerBoundIndex p;
|
||||
p.value = std::lower_bound(this->innerIndexPtr()+start, this->innerIndexPtr()+end,inner) - this->innerIndexPtr();
|
||||
p.found = (p.value<end) && (this->innerIndexPtr()[p.value]==inner);
|
||||
return p;
|
||||
}
|
||||
|
||||
friend struct internal::evaluator<SparseCompressedBase<Derived> >;
|
||||
|
||||
private:
|
||||
template<typename OtherDerived> explicit SparseCompressedBase(const SparseCompressedBase<OtherDerived>&);
|
||||
};
|
||||
|
||||
template<typename Derived>
|
||||
class SparseCompressedBase<Derived>::InnerIterator
|
||||
{
|
||||
public:
|
||||
InnerIterator()
|
||||
: m_values(0), m_indices(0), m_outer(0), m_id(0), m_end(0)
|
||||
{}
|
||||
|
||||
InnerIterator(const InnerIterator& other)
|
||||
: m_values(other.m_values), m_indices(other.m_indices), m_outer(other.m_outer), m_id(other.m_id), m_end(other.m_end)
|
||||
{}
|
||||
|
||||
InnerIterator& operator=(const InnerIterator& other)
|
||||
{
|
||||
m_values = other.m_values;
|
||||
m_indices = other.m_indices;
|
||||
const_cast<OuterType&>(m_outer).setValue(other.m_outer.value());
|
||||
m_id = other.m_id;
|
||||
m_end = other.m_end;
|
||||
return *this;
|
||||
}
|
||||
|
||||
InnerIterator(const SparseCompressedBase& mat, Index outer)
|
||||
: m_values(mat.valuePtr()), m_indices(mat.innerIndexPtr()), m_outer(outer)
|
||||
{
|
||||
if(Derived::IsVectorAtCompileTime && mat.outerIndexPtr()==0)
|
||||
{
|
||||
m_id = 0;
|
||||
m_end = mat.nonZeros();
|
||||
}
|
||||
else
|
||||
{
|
||||
m_id = mat.outerIndexPtr()[outer];
|
||||
if(mat.isCompressed())
|
||||
m_end = mat.outerIndexPtr()[outer+1];
|
||||
else
|
||||
m_end = m_id + mat.innerNonZeroPtr()[outer];
|
||||
}
|
||||
}
|
||||
|
||||
explicit InnerIterator(const SparseCompressedBase& mat)
|
||||
: m_values(mat.valuePtr()), m_indices(mat.innerIndexPtr()), m_outer(0), m_id(0), m_end(mat.nonZeros())
|
||||
{
|
||||
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived);
|
||||
}
|
||||
|
||||
explicit InnerIterator(const internal::CompressedStorage<Scalar,StorageIndex>& data)
|
||||
: m_values(data.valuePtr()), m_indices(data.indexPtr()), m_outer(0), m_id(0), m_end(data.size())
|
||||
{
|
||||
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived);
|
||||
}
|
||||
|
||||
inline InnerIterator& operator++() { m_id++; return *this; }
|
||||
inline InnerIterator& operator+=(Index i) { m_id += i ; return *this; }
|
||||
|
||||
inline InnerIterator operator+(Index i)
|
||||
{
|
||||
InnerIterator result = *this;
|
||||
result += i;
|
||||
return result;
|
||||
}
|
||||
|
||||
inline const Scalar& value() const { return m_values[m_id]; }
|
||||
inline Scalar& valueRef() { return const_cast<Scalar&>(m_values[m_id]); }
|
||||
|
||||
inline StorageIndex index() const { return m_indices[m_id]; }
|
||||
inline Index outer() const { return m_outer.value(); }
|
||||
inline Index row() const { return IsRowMajor ? m_outer.value() : index(); }
|
||||
inline Index col() const { return IsRowMajor ? index() : m_outer.value(); }
|
||||
|
||||
inline operator bool() const { return (m_id < m_end); }
|
||||
|
||||
protected:
|
||||
const Scalar* m_values;
|
||||
const StorageIndex* m_indices;
|
||||
typedef internal::variable_if_dynamic<Index,Derived::IsVectorAtCompileTime?0:Dynamic> OuterType;
|
||||
const OuterType m_outer;
|
||||
Index m_id;
|
||||
Index m_end;
|
||||
private:
|
||||
// If you get here, then you're not using the right InnerIterator type, e.g.:
|
||||
// SparseMatrix<double,RowMajor> A;
|
||||
// SparseMatrix<double>::InnerIterator it(A,0);
|
||||
template<typename T> InnerIterator(const SparseMatrixBase<T>&, Index outer);
|
||||
};
|
||||
|
||||
template<typename Derived>
|
||||
class SparseCompressedBase<Derived>::ReverseInnerIterator
|
||||
{
|
||||
public:
|
||||
ReverseInnerIterator(const SparseCompressedBase& mat, Index outer)
|
||||
: m_values(mat.valuePtr()), m_indices(mat.innerIndexPtr()), m_outer(outer)
|
||||
{
|
||||
if(Derived::IsVectorAtCompileTime && mat.outerIndexPtr()==0)
|
||||
{
|
||||
m_start = 0;
|
||||
m_id = mat.nonZeros();
|
||||
}
|
||||
else
|
||||
{
|
||||
m_start = mat.outerIndexPtr()[outer];
|
||||
if(mat.isCompressed())
|
||||
m_id = mat.outerIndexPtr()[outer+1];
|
||||
else
|
||||
m_id = m_start + mat.innerNonZeroPtr()[outer];
|
||||
}
|
||||
}
|
||||
|
||||
explicit ReverseInnerIterator(const SparseCompressedBase& mat)
|
||||
: m_values(mat.valuePtr()), m_indices(mat.innerIndexPtr()), m_outer(0), m_start(0), m_id(mat.nonZeros())
|
||||
{
|
||||
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived);
|
||||
}
|
||||
|
||||
explicit ReverseInnerIterator(const internal::CompressedStorage<Scalar,StorageIndex>& data)
|
||||
: m_values(data.valuePtr()), m_indices(data.indexPtr()), m_outer(0), m_start(0), m_id(data.size())
|
||||
{
|
||||
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived);
|
||||
}
|
||||
|
||||
inline ReverseInnerIterator& operator--() { --m_id; return *this; }
|
||||
inline ReverseInnerIterator& operator-=(Index i) { m_id -= i; return *this; }
|
||||
|
||||
inline ReverseInnerIterator operator-(Index i)
|
||||
{
|
||||
ReverseInnerIterator result = *this;
|
||||
result -= i;
|
||||
return result;
|
||||
}
|
||||
|
||||
inline const Scalar& value() const { return m_values[m_id-1]; }
|
||||
inline Scalar& valueRef() { return const_cast<Scalar&>(m_values[m_id-1]); }
|
||||
|
||||
inline StorageIndex index() const { return m_indices[m_id-1]; }
|
||||
inline Index outer() const { return m_outer.value(); }
|
||||
inline Index row() const { return IsRowMajor ? m_outer.value() : index(); }
|
||||
inline Index col() const { return IsRowMajor ? index() : m_outer.value(); }
|
||||
|
||||
inline operator bool() const { return (m_id > m_start); }
|
||||
|
||||
protected:
|
||||
const Scalar* m_values;
|
||||
const StorageIndex* m_indices;
|
||||
typedef internal::variable_if_dynamic<Index,Derived::IsVectorAtCompileTime?0:Dynamic> OuterType;
|
||||
const OuterType m_outer;
|
||||
Index m_start;
|
||||
Index m_id;
|
||||
};
|
||||
|
||||
namespace internal {
|
||||
|
||||
template<typename Derived>
|
||||
struct evaluator<SparseCompressedBase<Derived> >
|
||||
: evaluator_base<Derived>
|
||||
{
|
||||
typedef typename Derived::Scalar Scalar;
|
||||
typedef typename Derived::InnerIterator InnerIterator;
|
||||
|
||||
enum {
|
||||
CoeffReadCost = NumTraits<Scalar>::ReadCost,
|
||||
Flags = Derived::Flags
|
||||
};
|
||||
|
||||
evaluator() : m_matrix(0), m_zero(0)
|
||||
{
|
||||
EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
|
||||
}
|
||||
explicit evaluator(const Derived &mat) : m_matrix(&mat), m_zero(0)
|
||||
{
|
||||
EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
|
||||
}
|
||||
|
||||
inline Index nonZerosEstimate() const {
|
||||
return m_matrix->nonZeros();
|
||||
}
|
||||
|
||||
operator Derived&() { return m_matrix->const_cast_derived(); }
|
||||
operator const Derived&() const { return *m_matrix; }
|
||||
|
||||
typedef typename DenseCoeffsBase<Derived,ReadOnlyAccessors>::CoeffReturnType CoeffReturnType;
|
||||
const Scalar& coeff(Index row, Index col) const
|
||||
{
|
||||
Index p = find(row,col);
|
||||
|
||||
if(p==Dynamic)
|
||||
return m_zero;
|
||||
else
|
||||
return m_matrix->const_cast_derived().valuePtr()[p];
|
||||
}
|
||||
|
||||
Scalar& coeffRef(Index row, Index col)
|
||||
{
|
||||
Index p = find(row,col);
|
||||
eigen_assert(p!=Dynamic && "written coefficient does not exist");
|
||||
return m_matrix->const_cast_derived().valuePtr()[p];
|
||||
}
|
||||
|
||||
protected:
|
||||
|
||||
Index find(Index row, Index col) const
|
||||
{
|
||||
internal::LowerBoundIndex p = m_matrix->lower_bound(row,col);
|
||||
return p.found ? p.value : Dynamic;
|
||||
}
|
||||
|
||||
const Derived *m_matrix;
|
||||
const Scalar m_zero;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_SPARSE_COMPRESSED_BASE_H
|
722
3party/eigen/Eigen/src/SparseCore/SparseCwiseBinaryOp.h
Normal file
722
3party/eigen/Eigen/src/SparseCore/SparseCwiseBinaryOp.h
Normal file
@ -0,0 +1,722 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_SPARSE_CWISE_BINARY_OP_H
|
||||
#define EIGEN_SPARSE_CWISE_BINARY_OP_H
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
// Here we have to handle 3 cases:
|
||||
// 1 - sparse op dense
|
||||
// 2 - dense op sparse
|
||||
// 3 - sparse op sparse
|
||||
// We also need to implement a 4th iterator for:
|
||||
// 4 - dense op dense
|
||||
// Finally, we also need to distinguish between the product and other operations :
|
||||
// configuration returned mode
|
||||
// 1 - sparse op dense product sparse
|
||||
// generic dense
|
||||
// 2 - dense op sparse product sparse
|
||||
// generic dense
|
||||
// 3 - sparse op sparse product sparse
|
||||
// generic sparse
|
||||
// 4 - dense op dense product dense
|
||||
// generic dense
|
||||
//
|
||||
// TODO to ease compiler job, we could specialize product/quotient with a scalar
|
||||
// and fallback to cwise-unary evaluator using bind1st_op and bind2nd_op.
|
||||
|
||||
template<typename BinaryOp, typename Lhs, typename Rhs>
|
||||
class CwiseBinaryOpImpl<BinaryOp, Lhs, Rhs, Sparse>
|
||||
: public SparseMatrixBase<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >
|
||||
{
|
||||
public:
|
||||
typedef CwiseBinaryOp<BinaryOp, Lhs, Rhs> Derived;
|
||||
typedef SparseMatrixBase<Derived> Base;
|
||||
EIGEN_SPARSE_PUBLIC_INTERFACE(Derived)
|
||||
CwiseBinaryOpImpl()
|
||||
{
|
||||
EIGEN_STATIC_ASSERT((
|
||||
(!internal::is_same<typename internal::traits<Lhs>::StorageKind,
|
||||
typename internal::traits<Rhs>::StorageKind>::value)
|
||||
|| ((internal::evaluator<Lhs>::Flags&RowMajorBit) == (internal::evaluator<Rhs>::Flags&RowMajorBit))),
|
||||
THE_STORAGE_ORDER_OF_BOTH_SIDES_MUST_MATCH);
|
||||
}
|
||||
};
|
||||
|
||||
namespace internal {
|
||||
|
||||
|
||||
// Generic "sparse OP sparse"
|
||||
template<typename XprType> struct binary_sparse_evaluator;
|
||||
|
||||
template<typename BinaryOp, typename Lhs, typename Rhs>
|
||||
struct binary_evaluator<CwiseBinaryOp<BinaryOp, Lhs, Rhs>, IteratorBased, IteratorBased>
|
||||
: evaluator_base<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >
|
||||
{
|
||||
protected:
|
||||
typedef typename evaluator<Lhs>::InnerIterator LhsIterator;
|
||||
typedef typename evaluator<Rhs>::InnerIterator RhsIterator;
|
||||
typedef CwiseBinaryOp<BinaryOp, Lhs, Rhs> XprType;
|
||||
typedef typename traits<XprType>::Scalar Scalar;
|
||||
typedef typename XprType::StorageIndex StorageIndex;
|
||||
public:
|
||||
|
||||
class InnerIterator
|
||||
{
|
||||
public:
|
||||
|
||||
EIGEN_STRONG_INLINE InnerIterator(const binary_evaluator& aEval, Index outer)
|
||||
: m_lhsIter(aEval.m_lhsImpl,outer), m_rhsIter(aEval.m_rhsImpl,outer), m_functor(aEval.m_functor)
|
||||
{
|
||||
this->operator++();
|
||||
}
|
||||
|
||||
EIGEN_STRONG_INLINE InnerIterator& operator++()
|
||||
{
|
||||
if (m_lhsIter && m_rhsIter && (m_lhsIter.index() == m_rhsIter.index()))
|
||||
{
|
||||
m_id = m_lhsIter.index();
|
||||
m_value = m_functor(m_lhsIter.value(), m_rhsIter.value());
|
||||
++m_lhsIter;
|
||||
++m_rhsIter;
|
||||
}
|
||||
else if (m_lhsIter && (!m_rhsIter || (m_lhsIter.index() < m_rhsIter.index())))
|
||||
{
|
||||
m_id = m_lhsIter.index();
|
||||
m_value = m_functor(m_lhsIter.value(), Scalar(0));
|
||||
++m_lhsIter;
|
||||
}
|
||||
else if (m_rhsIter && (!m_lhsIter || (m_lhsIter.index() > m_rhsIter.index())))
|
||||
{
|
||||
m_id = m_rhsIter.index();
|
||||
m_value = m_functor(Scalar(0), m_rhsIter.value());
|
||||
++m_rhsIter;
|
||||
}
|
||||
else
|
||||
{
|
||||
m_value = Scalar(0); // this is to avoid a compilation warning
|
||||
m_id = -1;
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
EIGEN_STRONG_INLINE Scalar value() const { return m_value; }
|
||||
|
||||
EIGEN_STRONG_INLINE StorageIndex index() const { return m_id; }
|
||||
EIGEN_STRONG_INLINE Index outer() const { return m_lhsIter.outer(); }
|
||||
EIGEN_STRONG_INLINE Index row() const { return Lhs::IsRowMajor ? m_lhsIter.row() : index(); }
|
||||
EIGEN_STRONG_INLINE Index col() const { return Lhs::IsRowMajor ? index() : m_lhsIter.col(); }
|
||||
|
||||
EIGEN_STRONG_INLINE operator bool() const { return m_id>=0; }
|
||||
|
||||
protected:
|
||||
LhsIterator m_lhsIter;
|
||||
RhsIterator m_rhsIter;
|
||||
const BinaryOp& m_functor;
|
||||
Scalar m_value;
|
||||
StorageIndex m_id;
|
||||
};
|
||||
|
||||
|
||||
enum {
|
||||
CoeffReadCost = int(evaluator<Lhs>::CoeffReadCost) + int(evaluator<Rhs>::CoeffReadCost) + int(functor_traits<BinaryOp>::Cost),
|
||||
Flags = XprType::Flags
|
||||
};
|
||||
|
||||
explicit binary_evaluator(const XprType& xpr)
|
||||
: m_functor(xpr.functor()),
|
||||
m_lhsImpl(xpr.lhs()),
|
||||
m_rhsImpl(xpr.rhs())
|
||||
{
|
||||
EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<BinaryOp>::Cost);
|
||||
EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
|
||||
}
|
||||
|
||||
inline Index nonZerosEstimate() const {
|
||||
return m_lhsImpl.nonZerosEstimate() + m_rhsImpl.nonZerosEstimate();
|
||||
}
|
||||
|
||||
protected:
|
||||
const BinaryOp m_functor;
|
||||
evaluator<Lhs> m_lhsImpl;
|
||||
evaluator<Rhs> m_rhsImpl;
|
||||
};
|
||||
|
||||
// dense op sparse
|
||||
template<typename BinaryOp, typename Lhs, typename Rhs>
|
||||
struct binary_evaluator<CwiseBinaryOp<BinaryOp, Lhs, Rhs>, IndexBased, IteratorBased>
|
||||
: evaluator_base<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >
|
||||
{
|
||||
protected:
|
||||
typedef typename evaluator<Rhs>::InnerIterator RhsIterator;
|
||||
typedef CwiseBinaryOp<BinaryOp, Lhs, Rhs> XprType;
|
||||
typedef typename traits<XprType>::Scalar Scalar;
|
||||
typedef typename XprType::StorageIndex StorageIndex;
|
||||
public:
|
||||
|
||||
class InnerIterator
|
||||
{
|
||||
enum { IsRowMajor = (int(Rhs::Flags)&RowMajorBit)==RowMajorBit };
|
||||
public:
|
||||
|
||||
EIGEN_STRONG_INLINE InnerIterator(const binary_evaluator& aEval, Index outer)
|
||||
: m_lhsEval(aEval.m_lhsImpl), m_rhsIter(aEval.m_rhsImpl,outer), m_functor(aEval.m_functor), m_value(0), m_id(-1), m_innerSize(aEval.m_expr.rhs().innerSize())
|
||||
{
|
||||
this->operator++();
|
||||
}
|
||||
|
||||
EIGEN_STRONG_INLINE InnerIterator& operator++()
|
||||
{
|
||||
++m_id;
|
||||
if(m_id<m_innerSize)
|
||||
{
|
||||
Scalar lhsVal = m_lhsEval.coeff(IsRowMajor?m_rhsIter.outer():m_id,
|
||||
IsRowMajor?m_id:m_rhsIter.outer());
|
||||
if(m_rhsIter && m_rhsIter.index()==m_id)
|
||||
{
|
||||
m_value = m_functor(lhsVal, m_rhsIter.value());
|
||||
++m_rhsIter;
|
||||
}
|
||||
else
|
||||
m_value = m_functor(lhsVal, Scalar(0));
|
||||
}
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
EIGEN_STRONG_INLINE Scalar value() const { eigen_internal_assert(m_id<m_innerSize); return m_value; }
|
||||
|
||||
EIGEN_STRONG_INLINE StorageIndex index() const { return m_id; }
|
||||
EIGEN_STRONG_INLINE Index outer() const { return m_rhsIter.outer(); }
|
||||
EIGEN_STRONG_INLINE Index row() const { return IsRowMajor ? m_rhsIter.outer() : m_id; }
|
||||
EIGEN_STRONG_INLINE Index col() const { return IsRowMajor ? m_id : m_rhsIter.outer(); }
|
||||
|
||||
EIGEN_STRONG_INLINE operator bool() const { return m_id<m_innerSize; }
|
||||
|
||||
protected:
|
||||
const evaluator<Lhs> &m_lhsEval;
|
||||
RhsIterator m_rhsIter;
|
||||
const BinaryOp& m_functor;
|
||||
Scalar m_value;
|
||||
StorageIndex m_id;
|
||||
StorageIndex m_innerSize;
|
||||
};
|
||||
|
||||
|
||||
enum {
|
||||
CoeffReadCost = int(evaluator<Lhs>::CoeffReadCost) + int(evaluator<Rhs>::CoeffReadCost) + int(functor_traits<BinaryOp>::Cost),
|
||||
Flags = XprType::Flags
|
||||
};
|
||||
|
||||
explicit binary_evaluator(const XprType& xpr)
|
||||
: m_functor(xpr.functor()),
|
||||
m_lhsImpl(xpr.lhs()),
|
||||
m_rhsImpl(xpr.rhs()),
|
||||
m_expr(xpr)
|
||||
{
|
||||
EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<BinaryOp>::Cost);
|
||||
EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
|
||||
}
|
||||
|
||||
inline Index nonZerosEstimate() const {
|
||||
return m_expr.size();
|
||||
}
|
||||
|
||||
protected:
|
||||
const BinaryOp m_functor;
|
||||
evaluator<Lhs> m_lhsImpl;
|
||||
evaluator<Rhs> m_rhsImpl;
|
||||
const XprType &m_expr;
|
||||
};
|
||||
|
||||
// sparse op dense
|
||||
template<typename BinaryOp, typename Lhs, typename Rhs>
|
||||
struct binary_evaluator<CwiseBinaryOp<BinaryOp, Lhs, Rhs>, IteratorBased, IndexBased>
|
||||
: evaluator_base<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >
|
||||
{
|
||||
protected:
|
||||
typedef typename evaluator<Lhs>::InnerIterator LhsIterator;
|
||||
typedef CwiseBinaryOp<BinaryOp, Lhs, Rhs> XprType;
|
||||
typedef typename traits<XprType>::Scalar Scalar;
|
||||
typedef typename XprType::StorageIndex StorageIndex;
|
||||
public:
|
||||
|
||||
class InnerIterator
|
||||
{
|
||||
enum { IsRowMajor = (int(Lhs::Flags)&RowMajorBit)==RowMajorBit };
|
||||
public:
|
||||
|
||||
EIGEN_STRONG_INLINE InnerIterator(const binary_evaluator& aEval, Index outer)
|
||||
: m_lhsIter(aEval.m_lhsImpl,outer), m_rhsEval(aEval.m_rhsImpl), m_functor(aEval.m_functor), m_value(0), m_id(-1), m_innerSize(aEval.m_expr.lhs().innerSize())
|
||||
{
|
||||
this->operator++();
|
||||
}
|
||||
|
||||
EIGEN_STRONG_INLINE InnerIterator& operator++()
|
||||
{
|
||||
++m_id;
|
||||
if(m_id<m_innerSize)
|
||||
{
|
||||
Scalar rhsVal = m_rhsEval.coeff(IsRowMajor?m_lhsIter.outer():m_id,
|
||||
IsRowMajor?m_id:m_lhsIter.outer());
|
||||
if(m_lhsIter && m_lhsIter.index()==m_id)
|
||||
{
|
||||
m_value = m_functor(m_lhsIter.value(), rhsVal);
|
||||
++m_lhsIter;
|
||||
}
|
||||
else
|
||||
m_value = m_functor(Scalar(0),rhsVal);
|
||||
}
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
EIGEN_STRONG_INLINE Scalar value() const { eigen_internal_assert(m_id<m_innerSize); return m_value; }
|
||||
|
||||
EIGEN_STRONG_INLINE StorageIndex index() const { return m_id; }
|
||||
EIGEN_STRONG_INLINE Index outer() const { return m_lhsIter.outer(); }
|
||||
EIGEN_STRONG_INLINE Index row() const { return IsRowMajor ? m_lhsIter.outer() : m_id; }
|
||||
EIGEN_STRONG_INLINE Index col() const { return IsRowMajor ? m_id : m_lhsIter.outer(); }
|
||||
|
||||
EIGEN_STRONG_INLINE operator bool() const { return m_id<m_innerSize; }
|
||||
|
||||
protected:
|
||||
LhsIterator m_lhsIter;
|
||||
const evaluator<Rhs> &m_rhsEval;
|
||||
const BinaryOp& m_functor;
|
||||
Scalar m_value;
|
||||
StorageIndex m_id;
|
||||
StorageIndex m_innerSize;
|
||||
};
|
||||
|
||||
|
||||
enum {
|
||||
CoeffReadCost = int(evaluator<Lhs>::CoeffReadCost) + int(evaluator<Rhs>::CoeffReadCost) + int(functor_traits<BinaryOp>::Cost),
|
||||
Flags = XprType::Flags
|
||||
};
|
||||
|
||||
explicit binary_evaluator(const XprType& xpr)
|
||||
: m_functor(xpr.functor()),
|
||||
m_lhsImpl(xpr.lhs()),
|
||||
m_rhsImpl(xpr.rhs()),
|
||||
m_expr(xpr)
|
||||
{
|
||||
EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<BinaryOp>::Cost);
|
||||
EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
|
||||
}
|
||||
|
||||
inline Index nonZerosEstimate() const {
|
||||
return m_expr.size();
|
||||
}
|
||||
|
||||
protected:
|
||||
const BinaryOp m_functor;
|
||||
evaluator<Lhs> m_lhsImpl;
|
||||
evaluator<Rhs> m_rhsImpl;
|
||||
const XprType &m_expr;
|
||||
};
|
||||
|
||||
template<typename T,
|
||||
typename LhsKind = typename evaluator_traits<typename T::Lhs>::Kind,
|
||||
typename RhsKind = typename evaluator_traits<typename T::Rhs>::Kind,
|
||||
typename LhsScalar = typename traits<typename T::Lhs>::Scalar,
|
||||
typename RhsScalar = typename traits<typename T::Rhs>::Scalar> struct sparse_conjunction_evaluator;
|
||||
|
||||
// "sparse .* sparse"
|
||||
template<typename T1, typename T2, typename Lhs, typename Rhs>
|
||||
struct binary_evaluator<CwiseBinaryOp<scalar_product_op<T1,T2>, Lhs, Rhs>, IteratorBased, IteratorBased>
|
||||
: sparse_conjunction_evaluator<CwiseBinaryOp<scalar_product_op<T1,T2>, Lhs, Rhs> >
|
||||
{
|
||||
typedef CwiseBinaryOp<scalar_product_op<T1,T2>, Lhs, Rhs> XprType;
|
||||
typedef sparse_conjunction_evaluator<XprType> Base;
|
||||
explicit binary_evaluator(const XprType& xpr) : Base(xpr) {}
|
||||
};
|
||||
// "dense .* sparse"
|
||||
template<typename T1, typename T2, typename Lhs, typename Rhs>
|
||||
struct binary_evaluator<CwiseBinaryOp<scalar_product_op<T1,T2>, Lhs, Rhs>, IndexBased, IteratorBased>
|
||||
: sparse_conjunction_evaluator<CwiseBinaryOp<scalar_product_op<T1,T2>, Lhs, Rhs> >
|
||||
{
|
||||
typedef CwiseBinaryOp<scalar_product_op<T1,T2>, Lhs, Rhs> XprType;
|
||||
typedef sparse_conjunction_evaluator<XprType> Base;
|
||||
explicit binary_evaluator(const XprType& xpr) : Base(xpr) {}
|
||||
};
|
||||
// "sparse .* dense"
|
||||
template<typename T1, typename T2, typename Lhs, typename Rhs>
|
||||
struct binary_evaluator<CwiseBinaryOp<scalar_product_op<T1,T2>, Lhs, Rhs>, IteratorBased, IndexBased>
|
||||
: sparse_conjunction_evaluator<CwiseBinaryOp<scalar_product_op<T1,T2>, Lhs, Rhs> >
|
||||
{
|
||||
typedef CwiseBinaryOp<scalar_product_op<T1,T2>, Lhs, Rhs> XprType;
|
||||
typedef sparse_conjunction_evaluator<XprType> Base;
|
||||
explicit binary_evaluator(const XprType& xpr) : Base(xpr) {}
|
||||
};
|
||||
|
||||
// "sparse ./ dense"
|
||||
template<typename T1, typename T2, typename Lhs, typename Rhs>
|
||||
struct binary_evaluator<CwiseBinaryOp<scalar_quotient_op<T1,T2>, Lhs, Rhs>, IteratorBased, IndexBased>
|
||||
: sparse_conjunction_evaluator<CwiseBinaryOp<scalar_quotient_op<T1,T2>, Lhs, Rhs> >
|
||||
{
|
||||
typedef CwiseBinaryOp<scalar_quotient_op<T1,T2>, Lhs, Rhs> XprType;
|
||||
typedef sparse_conjunction_evaluator<XprType> Base;
|
||||
explicit binary_evaluator(const XprType& xpr) : Base(xpr) {}
|
||||
};
|
||||
|
||||
// "sparse && sparse"
|
||||
template<typename Lhs, typename Rhs>
|
||||
struct binary_evaluator<CwiseBinaryOp<scalar_boolean_and_op, Lhs, Rhs>, IteratorBased, IteratorBased>
|
||||
: sparse_conjunction_evaluator<CwiseBinaryOp<scalar_boolean_and_op, Lhs, Rhs> >
|
||||
{
|
||||
typedef CwiseBinaryOp<scalar_boolean_and_op, Lhs, Rhs> XprType;
|
||||
typedef sparse_conjunction_evaluator<XprType> Base;
|
||||
explicit binary_evaluator(const XprType& xpr) : Base(xpr) {}
|
||||
};
|
||||
// "dense && sparse"
|
||||
template<typename Lhs, typename Rhs>
|
||||
struct binary_evaluator<CwiseBinaryOp<scalar_boolean_and_op, Lhs, Rhs>, IndexBased, IteratorBased>
|
||||
: sparse_conjunction_evaluator<CwiseBinaryOp<scalar_boolean_and_op, Lhs, Rhs> >
|
||||
{
|
||||
typedef CwiseBinaryOp<scalar_boolean_and_op, Lhs, Rhs> XprType;
|
||||
typedef sparse_conjunction_evaluator<XprType> Base;
|
||||
explicit binary_evaluator(const XprType& xpr) : Base(xpr) {}
|
||||
};
|
||||
// "sparse && dense"
|
||||
template<typename Lhs, typename Rhs>
|
||||
struct binary_evaluator<CwiseBinaryOp<scalar_boolean_and_op, Lhs, Rhs>, IteratorBased, IndexBased>
|
||||
: sparse_conjunction_evaluator<CwiseBinaryOp<scalar_boolean_and_op, Lhs, Rhs> >
|
||||
{
|
||||
typedef CwiseBinaryOp<scalar_boolean_and_op, Lhs, Rhs> XprType;
|
||||
typedef sparse_conjunction_evaluator<XprType> Base;
|
||||
explicit binary_evaluator(const XprType& xpr) : Base(xpr) {}
|
||||
};
|
||||
|
||||
// "sparse ^ sparse"
|
||||
template<typename XprType>
|
||||
struct sparse_conjunction_evaluator<XprType, IteratorBased, IteratorBased>
|
||||
: evaluator_base<XprType>
|
||||
{
|
||||
protected:
|
||||
typedef typename XprType::Functor BinaryOp;
|
||||
typedef typename XprType::Lhs LhsArg;
|
||||
typedef typename XprType::Rhs RhsArg;
|
||||
typedef typename evaluator<LhsArg>::InnerIterator LhsIterator;
|
||||
typedef typename evaluator<RhsArg>::InnerIterator RhsIterator;
|
||||
typedef typename XprType::StorageIndex StorageIndex;
|
||||
typedef typename traits<XprType>::Scalar Scalar;
|
||||
public:
|
||||
|
||||
class InnerIterator
|
||||
{
|
||||
public:
|
||||
|
||||
EIGEN_STRONG_INLINE InnerIterator(const sparse_conjunction_evaluator& aEval, Index outer)
|
||||
: m_lhsIter(aEval.m_lhsImpl,outer), m_rhsIter(aEval.m_rhsImpl,outer), m_functor(aEval.m_functor)
|
||||
{
|
||||
while (m_lhsIter && m_rhsIter && (m_lhsIter.index() != m_rhsIter.index()))
|
||||
{
|
||||
if (m_lhsIter.index() < m_rhsIter.index())
|
||||
++m_lhsIter;
|
||||
else
|
||||
++m_rhsIter;
|
||||
}
|
||||
}
|
||||
|
||||
EIGEN_STRONG_INLINE InnerIterator& operator++()
|
||||
{
|
||||
++m_lhsIter;
|
||||
++m_rhsIter;
|
||||
while (m_lhsIter && m_rhsIter && (m_lhsIter.index() != m_rhsIter.index()))
|
||||
{
|
||||
if (m_lhsIter.index() < m_rhsIter.index())
|
||||
++m_lhsIter;
|
||||
else
|
||||
++m_rhsIter;
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
EIGEN_STRONG_INLINE Scalar value() const { return m_functor(m_lhsIter.value(), m_rhsIter.value()); }
|
||||
|
||||
EIGEN_STRONG_INLINE StorageIndex index() const { return m_lhsIter.index(); }
|
||||
EIGEN_STRONG_INLINE Index outer() const { return m_lhsIter.outer(); }
|
||||
EIGEN_STRONG_INLINE Index row() const { return m_lhsIter.row(); }
|
||||
EIGEN_STRONG_INLINE Index col() const { return m_lhsIter.col(); }
|
||||
|
||||
EIGEN_STRONG_INLINE operator bool() const { return (m_lhsIter && m_rhsIter); }
|
||||
|
||||
protected:
|
||||
LhsIterator m_lhsIter;
|
||||
RhsIterator m_rhsIter;
|
||||
const BinaryOp& m_functor;
|
||||
};
|
||||
|
||||
|
||||
enum {
|
||||
CoeffReadCost = int(evaluator<LhsArg>::CoeffReadCost) + int(evaluator<RhsArg>::CoeffReadCost) + int(functor_traits<BinaryOp>::Cost),
|
||||
Flags = XprType::Flags
|
||||
};
|
||||
|
||||
explicit sparse_conjunction_evaluator(const XprType& xpr)
|
||||
: m_functor(xpr.functor()),
|
||||
m_lhsImpl(xpr.lhs()),
|
||||
m_rhsImpl(xpr.rhs())
|
||||
{
|
||||
EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<BinaryOp>::Cost);
|
||||
EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
|
||||
}
|
||||
|
||||
inline Index nonZerosEstimate() const {
|
||||
return (std::min)(m_lhsImpl.nonZerosEstimate(), m_rhsImpl.nonZerosEstimate());
|
||||
}
|
||||
|
||||
protected:
|
||||
const BinaryOp m_functor;
|
||||
evaluator<LhsArg> m_lhsImpl;
|
||||
evaluator<RhsArg> m_rhsImpl;
|
||||
};
|
||||
|
||||
// "dense ^ sparse"
|
||||
template<typename XprType>
|
||||
struct sparse_conjunction_evaluator<XprType, IndexBased, IteratorBased>
|
||||
: evaluator_base<XprType>
|
||||
{
|
||||
protected:
|
||||
typedef typename XprType::Functor BinaryOp;
|
||||
typedef typename XprType::Lhs LhsArg;
|
||||
typedef typename XprType::Rhs RhsArg;
|
||||
typedef evaluator<LhsArg> LhsEvaluator;
|
||||
typedef typename evaluator<RhsArg>::InnerIterator RhsIterator;
|
||||
typedef typename XprType::StorageIndex StorageIndex;
|
||||
typedef typename traits<XprType>::Scalar Scalar;
|
||||
public:
|
||||
|
||||
class InnerIterator
|
||||
{
|
||||
enum { IsRowMajor = (int(RhsArg::Flags)&RowMajorBit)==RowMajorBit };
|
||||
|
||||
public:
|
||||
|
||||
EIGEN_STRONG_INLINE InnerIterator(const sparse_conjunction_evaluator& aEval, Index outer)
|
||||
: m_lhsEval(aEval.m_lhsImpl), m_rhsIter(aEval.m_rhsImpl,outer), m_functor(aEval.m_functor), m_outer(outer)
|
||||
{}
|
||||
|
||||
EIGEN_STRONG_INLINE InnerIterator& operator++()
|
||||
{
|
||||
++m_rhsIter;
|
||||
return *this;
|
||||
}
|
||||
|
||||
EIGEN_STRONG_INLINE Scalar value() const
|
||||
{ return m_functor(m_lhsEval.coeff(IsRowMajor?m_outer:m_rhsIter.index(),IsRowMajor?m_rhsIter.index():m_outer), m_rhsIter.value()); }
|
||||
|
||||
EIGEN_STRONG_INLINE StorageIndex index() const { return m_rhsIter.index(); }
|
||||
EIGEN_STRONG_INLINE Index outer() const { return m_rhsIter.outer(); }
|
||||
EIGEN_STRONG_INLINE Index row() const { return m_rhsIter.row(); }
|
||||
EIGEN_STRONG_INLINE Index col() const { return m_rhsIter.col(); }
|
||||
|
||||
EIGEN_STRONG_INLINE operator bool() const { return m_rhsIter; }
|
||||
|
||||
protected:
|
||||
const LhsEvaluator &m_lhsEval;
|
||||
RhsIterator m_rhsIter;
|
||||
const BinaryOp& m_functor;
|
||||
const Index m_outer;
|
||||
};
|
||||
|
||||
|
||||
enum {
|
||||
CoeffReadCost = int(evaluator<LhsArg>::CoeffReadCost) + int(evaluator<RhsArg>::CoeffReadCost) + int(functor_traits<BinaryOp>::Cost),
|
||||
Flags = XprType::Flags
|
||||
};
|
||||
|
||||
explicit sparse_conjunction_evaluator(const XprType& xpr)
|
||||
: m_functor(xpr.functor()),
|
||||
m_lhsImpl(xpr.lhs()),
|
||||
m_rhsImpl(xpr.rhs())
|
||||
{
|
||||
EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<BinaryOp>::Cost);
|
||||
EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
|
||||
}
|
||||
|
||||
inline Index nonZerosEstimate() const {
|
||||
return m_rhsImpl.nonZerosEstimate();
|
||||
}
|
||||
|
||||
protected:
|
||||
const BinaryOp m_functor;
|
||||
evaluator<LhsArg> m_lhsImpl;
|
||||
evaluator<RhsArg> m_rhsImpl;
|
||||
};
|
||||
|
||||
// "sparse ^ dense"
|
||||
template<typename XprType>
|
||||
struct sparse_conjunction_evaluator<XprType, IteratorBased, IndexBased>
|
||||
: evaluator_base<XprType>
|
||||
{
|
||||
protected:
|
||||
typedef typename XprType::Functor BinaryOp;
|
||||
typedef typename XprType::Lhs LhsArg;
|
||||
typedef typename XprType::Rhs RhsArg;
|
||||
typedef typename evaluator<LhsArg>::InnerIterator LhsIterator;
|
||||
typedef evaluator<RhsArg> RhsEvaluator;
|
||||
typedef typename XprType::StorageIndex StorageIndex;
|
||||
typedef typename traits<XprType>::Scalar Scalar;
|
||||
public:
|
||||
|
||||
class InnerIterator
|
||||
{
|
||||
enum { IsRowMajor = (int(LhsArg::Flags)&RowMajorBit)==RowMajorBit };
|
||||
|
||||
public:
|
||||
|
||||
EIGEN_STRONG_INLINE InnerIterator(const sparse_conjunction_evaluator& aEval, Index outer)
|
||||
: m_lhsIter(aEval.m_lhsImpl,outer), m_rhsEval(aEval.m_rhsImpl), m_functor(aEval.m_functor), m_outer(outer)
|
||||
{}
|
||||
|
||||
EIGEN_STRONG_INLINE InnerIterator& operator++()
|
||||
{
|
||||
++m_lhsIter;
|
||||
return *this;
|
||||
}
|
||||
|
||||
EIGEN_STRONG_INLINE Scalar value() const
|
||||
{ return m_functor(m_lhsIter.value(),
|
||||
m_rhsEval.coeff(IsRowMajor?m_outer:m_lhsIter.index(),IsRowMajor?m_lhsIter.index():m_outer)); }
|
||||
|
||||
EIGEN_STRONG_INLINE StorageIndex index() const { return m_lhsIter.index(); }
|
||||
EIGEN_STRONG_INLINE Index outer() const { return m_lhsIter.outer(); }
|
||||
EIGEN_STRONG_INLINE Index row() const { return m_lhsIter.row(); }
|
||||
EIGEN_STRONG_INLINE Index col() const { return m_lhsIter.col(); }
|
||||
|
||||
EIGEN_STRONG_INLINE operator bool() const { return m_lhsIter; }
|
||||
|
||||
protected:
|
||||
LhsIterator m_lhsIter;
|
||||
const evaluator<RhsArg> &m_rhsEval;
|
||||
const BinaryOp& m_functor;
|
||||
const Index m_outer;
|
||||
};
|
||||
|
||||
|
||||
enum {
|
||||
CoeffReadCost = int(evaluator<LhsArg>::CoeffReadCost) + int(evaluator<RhsArg>::CoeffReadCost) + int(functor_traits<BinaryOp>::Cost),
|
||||
Flags = XprType::Flags
|
||||
};
|
||||
|
||||
explicit sparse_conjunction_evaluator(const XprType& xpr)
|
||||
: m_functor(xpr.functor()),
|
||||
m_lhsImpl(xpr.lhs()),
|
||||
m_rhsImpl(xpr.rhs())
|
||||
{
|
||||
EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<BinaryOp>::Cost);
|
||||
EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
|
||||
}
|
||||
|
||||
inline Index nonZerosEstimate() const {
|
||||
return m_lhsImpl.nonZerosEstimate();
|
||||
}
|
||||
|
||||
protected:
|
||||
const BinaryOp m_functor;
|
||||
evaluator<LhsArg> m_lhsImpl;
|
||||
evaluator<RhsArg> m_rhsImpl;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
/***************************************************************************
|
||||
* Implementation of SparseMatrixBase and SparseCwise functions/operators
|
||||
***************************************************************************/
|
||||
|
||||
template<typename Derived>
|
||||
template<typename OtherDerived>
|
||||
Derived& SparseMatrixBase<Derived>::operator+=(const EigenBase<OtherDerived> &other)
|
||||
{
|
||||
call_assignment(derived(), other.derived(), internal::add_assign_op<Scalar,typename OtherDerived::Scalar>());
|
||||
return derived();
|
||||
}
|
||||
|
||||
template<typename Derived>
|
||||
template<typename OtherDerived>
|
||||
Derived& SparseMatrixBase<Derived>::operator-=(const EigenBase<OtherDerived> &other)
|
||||
{
|
||||
call_assignment(derived(), other.derived(), internal::assign_op<Scalar,typename OtherDerived::Scalar>());
|
||||
return derived();
|
||||
}
|
||||
|
||||
template<typename Derived>
|
||||
template<typename OtherDerived>
|
||||
EIGEN_STRONG_INLINE Derived &
|
||||
SparseMatrixBase<Derived>::operator-=(const SparseMatrixBase<OtherDerived> &other)
|
||||
{
|
||||
return derived() = derived() - other.derived();
|
||||
}
|
||||
|
||||
template<typename Derived>
|
||||
template<typename OtherDerived>
|
||||
EIGEN_STRONG_INLINE Derived &
|
||||
SparseMatrixBase<Derived>::operator+=(const SparseMatrixBase<OtherDerived>& other)
|
||||
{
|
||||
return derived() = derived() + other.derived();
|
||||
}
|
||||
|
||||
template<typename Derived>
|
||||
template<typename OtherDerived>
|
||||
Derived& SparseMatrixBase<Derived>::operator+=(const DiagonalBase<OtherDerived>& other)
|
||||
{
|
||||
call_assignment_no_alias(derived(), other.derived(), internal::add_assign_op<Scalar,typename OtherDerived::Scalar>());
|
||||
return derived();
|
||||
}
|
||||
|
||||
template<typename Derived>
|
||||
template<typename OtherDerived>
|
||||
Derived& SparseMatrixBase<Derived>::operator-=(const DiagonalBase<OtherDerived>& other)
|
||||
{
|
||||
call_assignment_no_alias(derived(), other.derived(), internal::sub_assign_op<Scalar,typename OtherDerived::Scalar>());
|
||||
return derived();
|
||||
}
|
||||
|
||||
template<typename Derived>
|
||||
template<typename OtherDerived>
|
||||
EIGEN_STRONG_INLINE const typename SparseMatrixBase<Derived>::template CwiseProductDenseReturnType<OtherDerived>::Type
|
||||
SparseMatrixBase<Derived>::cwiseProduct(const MatrixBase<OtherDerived> &other) const
|
||||
{
|
||||
return typename CwiseProductDenseReturnType<OtherDerived>::Type(derived(), other.derived());
|
||||
}
|
||||
|
||||
template<typename DenseDerived, typename SparseDerived>
|
||||
EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_sum_op<typename DenseDerived::Scalar,typename SparseDerived::Scalar>, const DenseDerived, const SparseDerived>
|
||||
operator+(const MatrixBase<DenseDerived> &a, const SparseMatrixBase<SparseDerived> &b)
|
||||
{
|
||||
return CwiseBinaryOp<internal::scalar_sum_op<typename DenseDerived::Scalar,typename SparseDerived::Scalar>, const DenseDerived, const SparseDerived>(a.derived(), b.derived());
|
||||
}
|
||||
|
||||
template<typename SparseDerived, typename DenseDerived>
|
||||
EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_sum_op<typename SparseDerived::Scalar,typename DenseDerived::Scalar>, const SparseDerived, const DenseDerived>
|
||||
operator+(const SparseMatrixBase<SparseDerived> &a, const MatrixBase<DenseDerived> &b)
|
||||
{
|
||||
return CwiseBinaryOp<internal::scalar_sum_op<typename SparseDerived::Scalar,typename DenseDerived::Scalar>, const SparseDerived, const DenseDerived>(a.derived(), b.derived());
|
||||
}
|
||||
|
||||
template<typename DenseDerived, typename SparseDerived>
|
||||
EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_difference_op<typename DenseDerived::Scalar,typename SparseDerived::Scalar>, const DenseDerived, const SparseDerived>
|
||||
operator-(const MatrixBase<DenseDerived> &a, const SparseMatrixBase<SparseDerived> &b)
|
||||
{
|
||||
return CwiseBinaryOp<internal::scalar_difference_op<typename DenseDerived::Scalar,typename SparseDerived::Scalar>, const DenseDerived, const SparseDerived>(a.derived(), b.derived());
|
||||
}
|
||||
|
||||
template<typename SparseDerived, typename DenseDerived>
|
||||
EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_difference_op<typename SparseDerived::Scalar,typename DenseDerived::Scalar>, const SparseDerived, const DenseDerived>
|
||||
operator-(const SparseMatrixBase<SparseDerived> &a, const MatrixBase<DenseDerived> &b)
|
||||
{
|
||||
return CwiseBinaryOp<internal::scalar_difference_op<typename SparseDerived::Scalar,typename DenseDerived::Scalar>, const SparseDerived, const DenseDerived>(a.derived(), b.derived());
|
||||
}
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_SPARSE_CWISE_BINARY_OP_H
|
150
3party/eigen/Eigen/src/SparseCore/SparseCwiseUnaryOp.h
Normal file
150
3party/eigen/Eigen/src/SparseCore/SparseCwiseUnaryOp.h
Normal file
@ -0,0 +1,150 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2008-2015 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_SPARSE_CWISE_UNARY_OP_H
|
||||
#define EIGEN_SPARSE_CWISE_UNARY_OP_H
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
|
||||
template<typename UnaryOp, typename ArgType>
|
||||
struct unary_evaluator<CwiseUnaryOp<UnaryOp,ArgType>, IteratorBased>
|
||||
: public evaluator_base<CwiseUnaryOp<UnaryOp,ArgType> >
|
||||
{
|
||||
public:
|
||||
typedef CwiseUnaryOp<UnaryOp, ArgType> XprType;
|
||||
|
||||
class InnerIterator;
|
||||
|
||||
enum {
|
||||
CoeffReadCost = int(evaluator<ArgType>::CoeffReadCost) + int(functor_traits<UnaryOp>::Cost),
|
||||
Flags = XprType::Flags
|
||||
};
|
||||
|
||||
explicit unary_evaluator(const XprType& op) : m_functor(op.functor()), m_argImpl(op.nestedExpression())
|
||||
{
|
||||
EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<UnaryOp>::Cost);
|
||||
EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
|
||||
}
|
||||
|
||||
inline Index nonZerosEstimate() const {
|
||||
return m_argImpl.nonZerosEstimate();
|
||||
}
|
||||
|
||||
protected:
|
||||
typedef typename evaluator<ArgType>::InnerIterator EvalIterator;
|
||||
|
||||
const UnaryOp m_functor;
|
||||
evaluator<ArgType> m_argImpl;
|
||||
};
|
||||
|
||||
template<typename UnaryOp, typename ArgType>
|
||||
class unary_evaluator<CwiseUnaryOp<UnaryOp,ArgType>, IteratorBased>::InnerIterator
|
||||
: public unary_evaluator<CwiseUnaryOp<UnaryOp,ArgType>, IteratorBased>::EvalIterator
|
||||
{
|
||||
protected:
|
||||
typedef typename XprType::Scalar Scalar;
|
||||
typedef typename unary_evaluator<CwiseUnaryOp<UnaryOp,ArgType>, IteratorBased>::EvalIterator Base;
|
||||
public:
|
||||
|
||||
EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& unaryOp, Index outer)
|
||||
: Base(unaryOp.m_argImpl,outer), m_functor(unaryOp.m_functor)
|
||||
{}
|
||||
|
||||
EIGEN_STRONG_INLINE InnerIterator& operator++()
|
||||
{ Base::operator++(); return *this; }
|
||||
|
||||
EIGEN_STRONG_INLINE Scalar value() const { return m_functor(Base::value()); }
|
||||
|
||||
protected:
|
||||
const UnaryOp m_functor;
|
||||
private:
|
||||
Scalar& valueRef();
|
||||
};
|
||||
|
||||
template<typename ViewOp, typename ArgType>
|
||||
struct unary_evaluator<CwiseUnaryView<ViewOp,ArgType>, IteratorBased>
|
||||
: public evaluator_base<CwiseUnaryView<ViewOp,ArgType> >
|
||||
{
|
||||
public:
|
||||
typedef CwiseUnaryView<ViewOp, ArgType> XprType;
|
||||
|
||||
class InnerIterator;
|
||||
|
||||
enum {
|
||||
CoeffReadCost = int(evaluator<ArgType>::CoeffReadCost) + int(functor_traits<ViewOp>::Cost),
|
||||
Flags = XprType::Flags
|
||||
};
|
||||
|
||||
explicit unary_evaluator(const XprType& op) : m_functor(op.functor()), m_argImpl(op.nestedExpression())
|
||||
{
|
||||
EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<ViewOp>::Cost);
|
||||
EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
|
||||
}
|
||||
|
||||
protected:
|
||||
typedef typename evaluator<ArgType>::InnerIterator EvalIterator;
|
||||
|
||||
const ViewOp m_functor;
|
||||
evaluator<ArgType> m_argImpl;
|
||||
};
|
||||
|
||||
template<typename ViewOp, typename ArgType>
|
||||
class unary_evaluator<CwiseUnaryView<ViewOp,ArgType>, IteratorBased>::InnerIterator
|
||||
: public unary_evaluator<CwiseUnaryView<ViewOp,ArgType>, IteratorBased>::EvalIterator
|
||||
{
|
||||
protected:
|
||||
typedef typename XprType::Scalar Scalar;
|
||||
typedef typename unary_evaluator<CwiseUnaryView<ViewOp,ArgType>, IteratorBased>::EvalIterator Base;
|
||||
public:
|
||||
|
||||
EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& unaryOp, Index outer)
|
||||
: Base(unaryOp.m_argImpl,outer), m_functor(unaryOp.m_functor)
|
||||
{}
|
||||
|
||||
EIGEN_STRONG_INLINE InnerIterator& operator++()
|
||||
{ Base::operator++(); return *this; }
|
||||
|
||||
EIGEN_STRONG_INLINE Scalar value() const { return m_functor(Base::value()); }
|
||||
EIGEN_STRONG_INLINE Scalar& valueRef() { return m_functor(Base::valueRef()); }
|
||||
|
||||
protected:
|
||||
const ViewOp m_functor;
|
||||
};
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
template<typename Derived>
|
||||
EIGEN_STRONG_INLINE Derived&
|
||||
SparseMatrixBase<Derived>::operator*=(const Scalar& other)
|
||||
{
|
||||
typedef typename internal::evaluator<Derived>::InnerIterator EvalIterator;
|
||||
internal::evaluator<Derived> thisEval(derived());
|
||||
for (Index j=0; j<outerSize(); ++j)
|
||||
for (EvalIterator i(thisEval,j); i; ++i)
|
||||
i.valueRef() *= other;
|
||||
return derived();
|
||||
}
|
||||
|
||||
template<typename Derived>
|
||||
EIGEN_STRONG_INLINE Derived&
|
||||
SparseMatrixBase<Derived>::operator/=(const Scalar& other)
|
||||
{
|
||||
typedef typename internal::evaluator<Derived>::InnerIterator EvalIterator;
|
||||
internal::evaluator<Derived> thisEval(derived());
|
||||
for (Index j=0; j<outerSize(); ++j)
|
||||
for (EvalIterator i(thisEval,j); i; ++i)
|
||||
i.valueRef() /= other;
|
||||
return derived();
|
||||
}
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_SPARSE_CWISE_UNARY_OP_H
|
342
3party/eigen/Eigen/src/SparseCore/SparseDenseProduct.h
Normal file
342
3party/eigen/Eigen/src/SparseCore/SparseDenseProduct.h
Normal file
@ -0,0 +1,342 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2008-2015 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_SPARSEDENSEPRODUCT_H
|
||||
#define EIGEN_SPARSEDENSEPRODUCT_H
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
|
||||
template <> struct product_promote_storage_type<Sparse,Dense, OuterProduct> { typedef Sparse ret; };
|
||||
template <> struct product_promote_storage_type<Dense,Sparse, OuterProduct> { typedef Sparse ret; };
|
||||
|
||||
template<typename SparseLhsType, typename DenseRhsType, typename DenseResType,
|
||||
typename AlphaType,
|
||||
int LhsStorageOrder = ((SparseLhsType::Flags&RowMajorBit)==RowMajorBit) ? RowMajor : ColMajor,
|
||||
bool ColPerCol = ((DenseRhsType::Flags&RowMajorBit)==0) || DenseRhsType::ColsAtCompileTime==1>
|
||||
struct sparse_time_dense_product_impl;
|
||||
|
||||
template<typename SparseLhsType, typename DenseRhsType, typename DenseResType>
|
||||
struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, typename DenseResType::Scalar, RowMajor, true>
|
||||
{
|
||||
typedef typename internal::remove_all<SparseLhsType>::type Lhs;
|
||||
typedef typename internal::remove_all<DenseRhsType>::type Rhs;
|
||||
typedef typename internal::remove_all<DenseResType>::type Res;
|
||||
typedef typename evaluator<Lhs>::InnerIterator LhsInnerIterator;
|
||||
typedef evaluator<Lhs> LhsEval;
|
||||
static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha)
|
||||
{
|
||||
LhsEval lhsEval(lhs);
|
||||
|
||||
Index n = lhs.outerSize();
|
||||
#ifdef EIGEN_HAS_OPENMP
|
||||
Eigen::initParallel();
|
||||
Index threads = Eigen::nbThreads();
|
||||
#endif
|
||||
|
||||
for(Index c=0; c<rhs.cols(); ++c)
|
||||
{
|
||||
#ifdef EIGEN_HAS_OPENMP
|
||||
// This 20000 threshold has been found experimentally on 2D and 3D Poisson problems.
|
||||
// It basically represents the minimal amount of work to be done to be worth it.
|
||||
if(threads>1 && lhsEval.nonZerosEstimate() > 20000)
|
||||
{
|
||||
#pragma omp parallel for schedule(dynamic,(n+threads*4-1)/(threads*4)) num_threads(threads)
|
||||
for(Index i=0; i<n; ++i)
|
||||
processRow(lhsEval,rhs,res,alpha,i,c);
|
||||
}
|
||||
else
|
||||
#endif
|
||||
{
|
||||
for(Index i=0; i<n; ++i)
|
||||
processRow(lhsEval,rhs,res,alpha,i,c);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void processRow(const LhsEval& lhsEval, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha, Index i, Index col)
|
||||
{
|
||||
typename Res::Scalar tmp(0);
|
||||
for(LhsInnerIterator it(lhsEval,i); it ;++it)
|
||||
tmp += it.value() * rhs.coeff(it.index(),col);
|
||||
res.coeffRef(i,col) += alpha * tmp;
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
// FIXME: what is the purpose of the following specialization? Is it for the BlockedSparse format?
|
||||
// -> let's disable it for now as it is conflicting with generic scalar*matrix and matrix*scalar operators
|
||||
// template<typename T1, typename T2/*, int _Options, typename _StrideType*/>
|
||||
// struct ScalarBinaryOpTraits<T1, Ref<T2/*, _Options, _StrideType*/> >
|
||||
// {
|
||||
// enum {
|
||||
// Defined = 1
|
||||
// };
|
||||
// typedef typename CwiseUnaryOp<scalar_multiple2_op<T1, typename T2::Scalar>, T2>::PlainObject ReturnType;
|
||||
// };
|
||||
|
||||
template<typename SparseLhsType, typename DenseRhsType, typename DenseResType, typename AlphaType>
|
||||
struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, AlphaType, ColMajor, true>
|
||||
{
|
||||
typedef typename internal::remove_all<SparseLhsType>::type Lhs;
|
||||
typedef typename internal::remove_all<DenseRhsType>::type Rhs;
|
||||
typedef typename internal::remove_all<DenseResType>::type Res;
|
||||
typedef evaluator<Lhs> LhsEval;
|
||||
typedef typename LhsEval::InnerIterator LhsInnerIterator;
|
||||
static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha)
|
||||
{
|
||||
LhsEval lhsEval(lhs);
|
||||
for(Index c=0; c<rhs.cols(); ++c)
|
||||
{
|
||||
for(Index j=0; j<lhs.outerSize(); ++j)
|
||||
{
|
||||
// typename Res::Scalar rhs_j = alpha * rhs.coeff(j,c);
|
||||
typename ScalarBinaryOpTraits<AlphaType, typename Rhs::Scalar>::ReturnType rhs_j(alpha * rhs.coeff(j,c));
|
||||
for(LhsInnerIterator it(lhsEval,j); it ;++it)
|
||||
res.coeffRef(it.index(),c) += it.value() * rhs_j;
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
template<typename SparseLhsType, typename DenseRhsType, typename DenseResType>
|
||||
struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, typename DenseResType::Scalar, RowMajor, false>
|
||||
{
|
||||
typedef typename internal::remove_all<SparseLhsType>::type Lhs;
|
||||
typedef typename internal::remove_all<DenseRhsType>::type Rhs;
|
||||
typedef typename internal::remove_all<DenseResType>::type Res;
|
||||
typedef evaluator<Lhs> LhsEval;
|
||||
typedef typename LhsEval::InnerIterator LhsInnerIterator;
|
||||
static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha)
|
||||
{
|
||||
Index n = lhs.rows();
|
||||
LhsEval lhsEval(lhs);
|
||||
|
||||
#ifdef EIGEN_HAS_OPENMP
|
||||
Eigen::initParallel();
|
||||
Index threads = Eigen::nbThreads();
|
||||
// This 20000 threshold has been found experimentally on 2D and 3D Poisson problems.
|
||||
// It basically represents the minimal amount of work to be done to be worth it.
|
||||
if(threads>1 && lhsEval.nonZerosEstimate()*rhs.cols() > 20000)
|
||||
{
|
||||
#pragma omp parallel for schedule(dynamic,(n+threads*4-1)/(threads*4)) num_threads(threads)
|
||||
for(Index i=0; i<n; ++i)
|
||||
processRow(lhsEval,rhs,res,alpha,i);
|
||||
}
|
||||
else
|
||||
#endif
|
||||
{
|
||||
for(Index i=0; i<n; ++i)
|
||||
processRow(lhsEval, rhs, res, alpha, i);
|
||||
}
|
||||
}
|
||||
|
||||
static void processRow(const LhsEval& lhsEval, const DenseRhsType& rhs, Res& res, const typename Res::Scalar& alpha, Index i)
|
||||
{
|
||||
typename Res::RowXpr res_i(res.row(i));
|
||||
for(LhsInnerIterator it(lhsEval,i); it ;++it)
|
||||
res_i += (alpha*it.value()) * rhs.row(it.index());
|
||||
}
|
||||
};
|
||||
|
||||
template<typename SparseLhsType, typename DenseRhsType, typename DenseResType>
|
||||
struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, typename DenseResType::Scalar, ColMajor, false>
|
||||
{
|
||||
typedef typename internal::remove_all<SparseLhsType>::type Lhs;
|
||||
typedef typename internal::remove_all<DenseRhsType>::type Rhs;
|
||||
typedef typename internal::remove_all<DenseResType>::type Res;
|
||||
typedef typename evaluator<Lhs>::InnerIterator LhsInnerIterator;
|
||||
static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha)
|
||||
{
|
||||
evaluator<Lhs> lhsEval(lhs);
|
||||
for(Index j=0; j<lhs.outerSize(); ++j)
|
||||
{
|
||||
typename Rhs::ConstRowXpr rhs_j(rhs.row(j));
|
||||
for(LhsInnerIterator it(lhsEval,j); it ;++it)
|
||||
res.row(it.index()) += (alpha*it.value()) * rhs_j;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
template<typename SparseLhsType, typename DenseRhsType, typename DenseResType,typename AlphaType>
|
||||
inline void sparse_time_dense_product(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha)
|
||||
{
|
||||
sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, AlphaType>::run(lhs, rhs, res, alpha);
|
||||
}
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
namespace internal {
|
||||
|
||||
template<typename Lhs, typename Rhs, int ProductType>
|
||||
struct generic_product_impl<Lhs, Rhs, SparseShape, DenseShape, ProductType>
|
||||
: generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,SparseShape,DenseShape,ProductType> >
|
||||
{
|
||||
typedef typename Product<Lhs,Rhs>::Scalar Scalar;
|
||||
|
||||
template<typename Dest>
|
||||
static void scaleAndAddTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha)
|
||||
{
|
||||
typedef typename nested_eval<Lhs,((Rhs::Flags&RowMajorBit)==0) ? 1 : Rhs::ColsAtCompileTime>::type LhsNested;
|
||||
typedef typename nested_eval<Rhs,((Lhs::Flags&RowMajorBit)==0) ? 1 : Dynamic>::type RhsNested;
|
||||
LhsNested lhsNested(lhs);
|
||||
RhsNested rhsNested(rhs);
|
||||
internal::sparse_time_dense_product(lhsNested, rhsNested, dst, alpha);
|
||||
}
|
||||
};
|
||||
|
||||
template<typename Lhs, typename Rhs, int ProductType>
|
||||
struct generic_product_impl<Lhs, Rhs, SparseTriangularShape, DenseShape, ProductType>
|
||||
: generic_product_impl<Lhs, Rhs, SparseShape, DenseShape, ProductType>
|
||||
{};
|
||||
|
||||
template<typename Lhs, typename Rhs, int ProductType>
|
||||
struct generic_product_impl<Lhs, Rhs, DenseShape, SparseShape, ProductType>
|
||||
: generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,DenseShape,SparseShape,ProductType> >
|
||||
{
|
||||
typedef typename Product<Lhs,Rhs>::Scalar Scalar;
|
||||
|
||||
template<typename Dst>
|
||||
static void scaleAndAddTo(Dst& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha)
|
||||
{
|
||||
typedef typename nested_eval<Lhs,((Rhs::Flags&RowMajorBit)==0) ? Dynamic : 1>::type LhsNested;
|
||||
typedef typename nested_eval<Rhs,((Lhs::Flags&RowMajorBit)==RowMajorBit) ? 1 : Lhs::RowsAtCompileTime>::type RhsNested;
|
||||
LhsNested lhsNested(lhs);
|
||||
RhsNested rhsNested(rhs);
|
||||
|
||||
// transpose everything
|
||||
Transpose<Dst> dstT(dst);
|
||||
internal::sparse_time_dense_product(rhsNested.transpose(), lhsNested.transpose(), dstT, alpha);
|
||||
}
|
||||
};
|
||||
|
||||
template<typename Lhs, typename Rhs, int ProductType>
|
||||
struct generic_product_impl<Lhs, Rhs, DenseShape, SparseTriangularShape, ProductType>
|
||||
: generic_product_impl<Lhs, Rhs, DenseShape, SparseShape, ProductType>
|
||||
{};
|
||||
|
||||
template<typename LhsT, typename RhsT, bool NeedToTranspose>
|
||||
struct sparse_dense_outer_product_evaluator
|
||||
{
|
||||
protected:
|
||||
typedef typename conditional<NeedToTranspose,RhsT,LhsT>::type Lhs1;
|
||||
typedef typename conditional<NeedToTranspose,LhsT,RhsT>::type ActualRhs;
|
||||
typedef Product<LhsT,RhsT,DefaultProduct> ProdXprType;
|
||||
|
||||
// if the actual left-hand side is a dense vector,
|
||||
// then build a sparse-view so that we can seamlessly iterate over it.
|
||||
typedef typename conditional<is_same<typename internal::traits<Lhs1>::StorageKind,Sparse>::value,
|
||||
Lhs1, SparseView<Lhs1> >::type ActualLhs;
|
||||
typedef typename conditional<is_same<typename internal::traits<Lhs1>::StorageKind,Sparse>::value,
|
||||
Lhs1 const&, SparseView<Lhs1> >::type LhsArg;
|
||||
|
||||
typedef evaluator<ActualLhs> LhsEval;
|
||||
typedef evaluator<ActualRhs> RhsEval;
|
||||
typedef typename evaluator<ActualLhs>::InnerIterator LhsIterator;
|
||||
typedef typename ProdXprType::Scalar Scalar;
|
||||
|
||||
public:
|
||||
enum {
|
||||
Flags = NeedToTranspose ? RowMajorBit : 0,
|
||||
CoeffReadCost = HugeCost
|
||||
};
|
||||
|
||||
class InnerIterator : public LhsIterator
|
||||
{
|
||||
public:
|
||||
InnerIterator(const sparse_dense_outer_product_evaluator &xprEval, Index outer)
|
||||
: LhsIterator(xprEval.m_lhsXprImpl, 0),
|
||||
m_outer(outer),
|
||||
m_empty(false),
|
||||
m_factor(get(xprEval.m_rhsXprImpl, outer, typename internal::traits<ActualRhs>::StorageKind() ))
|
||||
{}
|
||||
|
||||
EIGEN_STRONG_INLINE Index outer() const { return m_outer; }
|
||||
EIGEN_STRONG_INLINE Index row() const { return NeedToTranspose ? m_outer : LhsIterator::index(); }
|
||||
EIGEN_STRONG_INLINE Index col() const { return NeedToTranspose ? LhsIterator::index() : m_outer; }
|
||||
|
||||
EIGEN_STRONG_INLINE Scalar value() const { return LhsIterator::value() * m_factor; }
|
||||
EIGEN_STRONG_INLINE operator bool() const { return LhsIterator::operator bool() && (!m_empty); }
|
||||
|
||||
protected:
|
||||
Scalar get(const RhsEval &rhs, Index outer, Dense = Dense()) const
|
||||
{
|
||||
return rhs.coeff(outer);
|
||||
}
|
||||
|
||||
Scalar get(const RhsEval &rhs, Index outer, Sparse = Sparse())
|
||||
{
|
||||
typename RhsEval::InnerIterator it(rhs, outer);
|
||||
if (it && it.index()==0 && it.value()!=Scalar(0))
|
||||
return it.value();
|
||||
m_empty = true;
|
||||
return Scalar(0);
|
||||
}
|
||||
|
||||
Index m_outer;
|
||||
bool m_empty;
|
||||
Scalar m_factor;
|
||||
};
|
||||
|
||||
sparse_dense_outer_product_evaluator(const Lhs1 &lhs, const ActualRhs &rhs)
|
||||
: m_lhs(lhs), m_lhsXprImpl(m_lhs), m_rhsXprImpl(rhs)
|
||||
{
|
||||
EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
|
||||
}
|
||||
|
||||
// transpose case
|
||||
sparse_dense_outer_product_evaluator(const ActualRhs &rhs, const Lhs1 &lhs)
|
||||
: m_lhs(lhs), m_lhsXprImpl(m_lhs), m_rhsXprImpl(rhs)
|
||||
{
|
||||
EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
|
||||
}
|
||||
|
||||
protected:
|
||||
const LhsArg m_lhs;
|
||||
evaluator<ActualLhs> m_lhsXprImpl;
|
||||
evaluator<ActualRhs> m_rhsXprImpl;
|
||||
};
|
||||
|
||||
// sparse * dense outer product
|
||||
template<typename Lhs, typename Rhs>
|
||||
struct product_evaluator<Product<Lhs, Rhs, DefaultProduct>, OuterProduct, SparseShape, DenseShape>
|
||||
: sparse_dense_outer_product_evaluator<Lhs,Rhs, Lhs::IsRowMajor>
|
||||
{
|
||||
typedef sparse_dense_outer_product_evaluator<Lhs,Rhs, Lhs::IsRowMajor> Base;
|
||||
|
||||
typedef Product<Lhs, Rhs> XprType;
|
||||
typedef typename XprType::PlainObject PlainObject;
|
||||
|
||||
explicit product_evaluator(const XprType& xpr)
|
||||
: Base(xpr.lhs(), xpr.rhs())
|
||||
{}
|
||||
|
||||
};
|
||||
|
||||
template<typename Lhs, typename Rhs>
|
||||
struct product_evaluator<Product<Lhs, Rhs, DefaultProduct>, OuterProduct, DenseShape, SparseShape>
|
||||
: sparse_dense_outer_product_evaluator<Lhs,Rhs, Rhs::IsRowMajor>
|
||||
{
|
||||
typedef sparse_dense_outer_product_evaluator<Lhs,Rhs, Rhs::IsRowMajor> Base;
|
||||
|
||||
typedef Product<Lhs, Rhs> XprType;
|
||||
typedef typename XprType::PlainObject PlainObject;
|
||||
|
||||
explicit product_evaluator(const XprType& xpr)
|
||||
: Base(xpr.lhs(), xpr.rhs())
|
||||
{}
|
||||
|
||||
};
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_SPARSEDENSEPRODUCT_H
|
138
3party/eigen/Eigen/src/SparseCore/SparseDiagonalProduct.h
Normal file
138
3party/eigen/Eigen/src/SparseCore/SparseDiagonalProduct.h
Normal file
@ -0,0 +1,138 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2009-2015 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_SPARSE_DIAGONAL_PRODUCT_H
|
||||
#define EIGEN_SPARSE_DIAGONAL_PRODUCT_H
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
// The product of a diagonal matrix with a sparse matrix can be easily
|
||||
// implemented using expression template.
|
||||
// We have two consider very different cases:
|
||||
// 1 - diag * row-major sparse
|
||||
// => each inner vector <=> scalar * sparse vector product
|
||||
// => so we can reuse CwiseUnaryOp::InnerIterator
|
||||
// 2 - diag * col-major sparse
|
||||
// => each inner vector <=> densevector * sparse vector cwise product
|
||||
// => again, we can reuse specialization of CwiseBinaryOp::InnerIterator
|
||||
// for that particular case
|
||||
// The two other cases are symmetric.
|
||||
|
||||
namespace internal {
|
||||
|
||||
enum {
|
||||
SDP_AsScalarProduct,
|
||||
SDP_AsCwiseProduct
|
||||
};
|
||||
|
||||
template<typename SparseXprType, typename DiagonalCoeffType, int SDP_Tag>
|
||||
struct sparse_diagonal_product_evaluator;
|
||||
|
||||
template<typename Lhs, typename Rhs, int ProductTag>
|
||||
struct product_evaluator<Product<Lhs, Rhs, DefaultProduct>, ProductTag, DiagonalShape, SparseShape>
|
||||
: public sparse_diagonal_product_evaluator<Rhs, typename Lhs::DiagonalVectorType, Rhs::Flags&RowMajorBit?SDP_AsScalarProduct:SDP_AsCwiseProduct>
|
||||
{
|
||||
typedef Product<Lhs, Rhs, DefaultProduct> XprType;
|
||||
enum { CoeffReadCost = HugeCost, Flags = Rhs::Flags&RowMajorBit, Alignment = 0 }; // FIXME CoeffReadCost & Flags
|
||||
|
||||
typedef sparse_diagonal_product_evaluator<Rhs, typename Lhs::DiagonalVectorType, Rhs::Flags&RowMajorBit?SDP_AsScalarProduct:SDP_AsCwiseProduct> Base;
|
||||
explicit product_evaluator(const XprType& xpr) : Base(xpr.rhs(), xpr.lhs().diagonal()) {}
|
||||
};
|
||||
|
||||
template<typename Lhs, typename Rhs, int ProductTag>
|
||||
struct product_evaluator<Product<Lhs, Rhs, DefaultProduct>, ProductTag, SparseShape, DiagonalShape>
|
||||
: public sparse_diagonal_product_evaluator<Lhs, Transpose<const typename Rhs::DiagonalVectorType>, Lhs::Flags&RowMajorBit?SDP_AsCwiseProduct:SDP_AsScalarProduct>
|
||||
{
|
||||
typedef Product<Lhs, Rhs, DefaultProduct> XprType;
|
||||
enum { CoeffReadCost = HugeCost, Flags = Lhs::Flags&RowMajorBit, Alignment = 0 }; // FIXME CoeffReadCost & Flags
|
||||
|
||||
typedef sparse_diagonal_product_evaluator<Lhs, Transpose<const typename Rhs::DiagonalVectorType>, Lhs::Flags&RowMajorBit?SDP_AsCwiseProduct:SDP_AsScalarProduct> Base;
|
||||
explicit product_evaluator(const XprType& xpr) : Base(xpr.lhs(), xpr.rhs().diagonal().transpose()) {}
|
||||
};
|
||||
|
||||
template<typename SparseXprType, typename DiagonalCoeffType>
|
||||
struct sparse_diagonal_product_evaluator<SparseXprType, DiagonalCoeffType, SDP_AsScalarProduct>
|
||||
{
|
||||
protected:
|
||||
typedef typename evaluator<SparseXprType>::InnerIterator SparseXprInnerIterator;
|
||||
typedef typename SparseXprType::Scalar Scalar;
|
||||
|
||||
public:
|
||||
class InnerIterator : public SparseXprInnerIterator
|
||||
{
|
||||
public:
|
||||
InnerIterator(const sparse_diagonal_product_evaluator &xprEval, Index outer)
|
||||
: SparseXprInnerIterator(xprEval.m_sparseXprImpl, outer),
|
||||
m_coeff(xprEval.m_diagCoeffImpl.coeff(outer))
|
||||
{}
|
||||
|
||||
EIGEN_STRONG_INLINE Scalar value() const { return m_coeff * SparseXprInnerIterator::value(); }
|
||||
protected:
|
||||
typename DiagonalCoeffType::Scalar m_coeff;
|
||||
};
|
||||
|
||||
sparse_diagonal_product_evaluator(const SparseXprType &sparseXpr, const DiagonalCoeffType &diagCoeff)
|
||||
: m_sparseXprImpl(sparseXpr), m_diagCoeffImpl(diagCoeff)
|
||||
{}
|
||||
|
||||
Index nonZerosEstimate() const { return m_sparseXprImpl.nonZerosEstimate(); }
|
||||
|
||||
protected:
|
||||
evaluator<SparseXprType> m_sparseXprImpl;
|
||||
evaluator<DiagonalCoeffType> m_diagCoeffImpl;
|
||||
};
|
||||
|
||||
|
||||
template<typename SparseXprType, typename DiagCoeffType>
|
||||
struct sparse_diagonal_product_evaluator<SparseXprType, DiagCoeffType, SDP_AsCwiseProduct>
|
||||
{
|
||||
typedef typename SparseXprType::Scalar Scalar;
|
||||
typedef typename SparseXprType::StorageIndex StorageIndex;
|
||||
|
||||
typedef typename nested_eval<DiagCoeffType,SparseXprType::IsRowMajor ? SparseXprType::RowsAtCompileTime
|
||||
: SparseXprType::ColsAtCompileTime>::type DiagCoeffNested;
|
||||
|
||||
class InnerIterator
|
||||
{
|
||||
typedef typename evaluator<SparseXprType>::InnerIterator SparseXprIter;
|
||||
public:
|
||||
InnerIterator(const sparse_diagonal_product_evaluator &xprEval, Index outer)
|
||||
: m_sparseIter(xprEval.m_sparseXprEval, outer), m_diagCoeffNested(xprEval.m_diagCoeffNested)
|
||||
{}
|
||||
|
||||
inline Scalar value() const { return m_sparseIter.value() * m_diagCoeffNested.coeff(index()); }
|
||||
inline StorageIndex index() const { return m_sparseIter.index(); }
|
||||
inline Index outer() const { return m_sparseIter.outer(); }
|
||||
inline Index col() const { return SparseXprType::IsRowMajor ? m_sparseIter.index() : m_sparseIter.outer(); }
|
||||
inline Index row() const { return SparseXprType::IsRowMajor ? m_sparseIter.outer() : m_sparseIter.index(); }
|
||||
|
||||
EIGEN_STRONG_INLINE InnerIterator& operator++() { ++m_sparseIter; return *this; }
|
||||
inline operator bool() const { return m_sparseIter; }
|
||||
|
||||
protected:
|
||||
SparseXprIter m_sparseIter;
|
||||
DiagCoeffNested m_diagCoeffNested;
|
||||
};
|
||||
|
||||
sparse_diagonal_product_evaluator(const SparseXprType &sparseXpr, const DiagCoeffType &diagCoeff)
|
||||
: m_sparseXprEval(sparseXpr), m_diagCoeffNested(diagCoeff)
|
||||
{}
|
||||
|
||||
Index nonZerosEstimate() const { return m_sparseXprEval.nonZerosEstimate(); }
|
||||
|
||||
protected:
|
||||
evaluator<SparseXprType> m_sparseXprEval;
|
||||
DiagCoeffNested m_diagCoeffNested;
|
||||
};
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_SPARSE_DIAGONAL_PRODUCT_H
|
98
3party/eigen/Eigen/src/SparseCore/SparseDot.h
Normal file
98
3party/eigen/Eigen/src/SparseCore/SparseDot.h
Normal file
@ -0,0 +1,98 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_SPARSE_DOT_H
|
||||
#define EIGEN_SPARSE_DOT_H
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
template<typename Derived>
|
||||
template<typename OtherDerived>
|
||||
typename internal::traits<Derived>::Scalar
|
||||
SparseMatrixBase<Derived>::dot(const MatrixBase<OtherDerived>& other) const
|
||||
{
|
||||
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
|
||||
EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)
|
||||
EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(Derived,OtherDerived)
|
||||
EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
|
||||
YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
|
||||
|
||||
eigen_assert(size() == other.size());
|
||||
eigen_assert(other.size()>0 && "you are using a non initialized vector");
|
||||
|
||||
internal::evaluator<Derived> thisEval(derived());
|
||||
typename internal::evaluator<Derived>::InnerIterator i(thisEval, 0);
|
||||
Scalar res(0);
|
||||
while (i)
|
||||
{
|
||||
res += numext::conj(i.value()) * other.coeff(i.index());
|
||||
++i;
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
template<typename Derived>
|
||||
template<typename OtherDerived>
|
||||
typename internal::traits<Derived>::Scalar
|
||||
SparseMatrixBase<Derived>::dot(const SparseMatrixBase<OtherDerived>& other) const
|
||||
{
|
||||
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
|
||||
EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)
|
||||
EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(Derived,OtherDerived)
|
||||
EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
|
||||
YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
|
||||
|
||||
eigen_assert(size() == other.size());
|
||||
|
||||
internal::evaluator<Derived> thisEval(derived());
|
||||
typename internal::evaluator<Derived>::InnerIterator i(thisEval, 0);
|
||||
|
||||
internal::evaluator<OtherDerived> otherEval(other.derived());
|
||||
typename internal::evaluator<OtherDerived>::InnerIterator j(otherEval, 0);
|
||||
|
||||
Scalar res(0);
|
||||
while (i && j)
|
||||
{
|
||||
if (i.index()==j.index())
|
||||
{
|
||||
res += numext::conj(i.value()) * j.value();
|
||||
++i; ++j;
|
||||
}
|
||||
else if (i.index()<j.index())
|
||||
++i;
|
||||
else
|
||||
++j;
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
template<typename Derived>
|
||||
inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real
|
||||
SparseMatrixBase<Derived>::squaredNorm() const
|
||||
{
|
||||
return numext::real((*this).cwiseAbs2().sum());
|
||||
}
|
||||
|
||||
template<typename Derived>
|
||||
inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real
|
||||
SparseMatrixBase<Derived>::norm() const
|
||||
{
|
||||
using std::sqrt;
|
||||
return sqrt(squaredNorm());
|
||||
}
|
||||
|
||||
template<typename Derived>
|
||||
inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real
|
||||
SparseMatrixBase<Derived>::blueNorm() const
|
||||
{
|
||||
return internal::blueNorm_impl(*this);
|
||||
}
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_SPARSE_DOT_H
|
29
3party/eigen/Eigen/src/SparseCore/SparseFuzzy.h
Normal file
29
3party/eigen/Eigen/src/SparseCore/SparseFuzzy.h
Normal file
@ -0,0 +1,29 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_SPARSE_FUZZY_H
|
||||
#define EIGEN_SPARSE_FUZZY_H
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
template<typename Derived>
|
||||
template<typename OtherDerived>
|
||||
bool SparseMatrixBase<Derived>::isApprox(const SparseMatrixBase<OtherDerived>& other, const RealScalar &prec) const
|
||||
{
|
||||
const typename internal::nested_eval<Derived,2,PlainObject>::type actualA(derived());
|
||||
typename internal::conditional<bool(IsRowMajor)==bool(OtherDerived::IsRowMajor),
|
||||
const typename internal::nested_eval<OtherDerived,2,PlainObject>::type,
|
||||
const PlainObject>::type actualB(other.derived());
|
||||
|
||||
return (actualA - actualB).squaredNorm() <= prec * prec * numext::mini(actualA.squaredNorm(), actualB.squaredNorm());
|
||||
}
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_SPARSE_FUZZY_H
|
305
3party/eigen/Eigen/src/SparseCore/SparseMap.h
Normal file
305
3party/eigen/Eigen/src/SparseCore/SparseMap.h
Normal file
@ -0,0 +1,305 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2015 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_SPARSE_MAP_H
|
||||
#define EIGEN_SPARSE_MAP_H
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
|
||||
template<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>
|
||||
struct traits<Map<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> >
|
||||
: public traits<SparseMatrix<MatScalar,MatOptions,MatIndex> >
|
||||
{
|
||||
typedef SparseMatrix<MatScalar,MatOptions,MatIndex> PlainObjectType;
|
||||
typedef traits<PlainObjectType> TraitsBase;
|
||||
enum {
|
||||
Flags = TraitsBase::Flags & (~NestByRefBit)
|
||||
};
|
||||
};
|
||||
|
||||
template<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>
|
||||
struct traits<Map<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> >
|
||||
: public traits<SparseMatrix<MatScalar,MatOptions,MatIndex> >
|
||||
{
|
||||
typedef SparseMatrix<MatScalar,MatOptions,MatIndex> PlainObjectType;
|
||||
typedef traits<PlainObjectType> TraitsBase;
|
||||
enum {
|
||||
Flags = TraitsBase::Flags & (~ (NestByRefBit | LvalueBit))
|
||||
};
|
||||
};
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
template<typename Derived,
|
||||
int Level = internal::accessors_level<Derived>::has_write_access ? WriteAccessors : ReadOnlyAccessors
|
||||
> class SparseMapBase;
|
||||
|
||||
/** \ingroup SparseCore_Module
|
||||
* class SparseMapBase
|
||||
* \brief Common base class for Map and Ref instance of sparse matrix and vector.
|
||||
*/
|
||||
template<typename Derived>
|
||||
class SparseMapBase<Derived,ReadOnlyAccessors>
|
||||
: public SparseCompressedBase<Derived>
|
||||
{
|
||||
public:
|
||||
typedef SparseCompressedBase<Derived> Base;
|
||||
typedef typename Base::Scalar Scalar;
|
||||
typedef typename Base::StorageIndex StorageIndex;
|
||||
enum { IsRowMajor = Base::IsRowMajor };
|
||||
using Base::operator=;
|
||||
protected:
|
||||
|
||||
typedef typename internal::conditional<
|
||||
bool(internal::is_lvalue<Derived>::value),
|
||||
Scalar *, const Scalar *>::type ScalarPointer;
|
||||
typedef typename internal::conditional<
|
||||
bool(internal::is_lvalue<Derived>::value),
|
||||
StorageIndex *, const StorageIndex *>::type IndexPointer;
|
||||
|
||||
Index m_outerSize;
|
||||
Index m_innerSize;
|
||||
Array<StorageIndex,2,1> m_zero_nnz;
|
||||
IndexPointer m_outerIndex;
|
||||
IndexPointer m_innerIndices;
|
||||
ScalarPointer m_values;
|
||||
IndexPointer m_innerNonZeros;
|
||||
|
||||
public:
|
||||
|
||||
/** \copydoc SparseMatrixBase::rows() */
|
||||
inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; }
|
||||
/** \copydoc SparseMatrixBase::cols() */
|
||||
inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; }
|
||||
/** \copydoc SparseMatrixBase::innerSize() */
|
||||
inline Index innerSize() const { return m_innerSize; }
|
||||
/** \copydoc SparseMatrixBase::outerSize() */
|
||||
inline Index outerSize() const { return m_outerSize; }
|
||||
/** \copydoc SparseCompressedBase::nonZeros */
|
||||
inline Index nonZeros() const { return m_zero_nnz[1]; }
|
||||
|
||||
/** \copydoc SparseCompressedBase::isCompressed */
|
||||
bool isCompressed() const { return m_innerNonZeros==0; }
|
||||
|
||||
//----------------------------------------
|
||||
// direct access interface
|
||||
/** \copydoc SparseMatrix::valuePtr */
|
||||
inline const Scalar* valuePtr() const { return m_values; }
|
||||
/** \copydoc SparseMatrix::innerIndexPtr */
|
||||
inline const StorageIndex* innerIndexPtr() const { return m_innerIndices; }
|
||||
/** \copydoc SparseMatrix::outerIndexPtr */
|
||||
inline const StorageIndex* outerIndexPtr() const { return m_outerIndex; }
|
||||
/** \copydoc SparseMatrix::innerNonZeroPtr */
|
||||
inline const StorageIndex* innerNonZeroPtr() const { return m_innerNonZeros; }
|
||||
//----------------------------------------
|
||||
|
||||
/** \copydoc SparseMatrix::coeff */
|
||||
inline Scalar coeff(Index row, Index col) const
|
||||
{
|
||||
const Index outer = IsRowMajor ? row : col;
|
||||
const Index inner = IsRowMajor ? col : row;
|
||||
|
||||
Index start = m_outerIndex[outer];
|
||||
Index end = isCompressed() ? m_outerIndex[outer+1] : start + m_innerNonZeros[outer];
|
||||
if (start==end)
|
||||
return Scalar(0);
|
||||
else if (end>0 && inner==m_innerIndices[end-1])
|
||||
return m_values[end-1];
|
||||
// ^^ optimization: let's first check if it is the last coefficient
|
||||
// (very common in high level algorithms)
|
||||
|
||||
const StorageIndex* r = std::lower_bound(&m_innerIndices[start],&m_innerIndices[end-1],inner);
|
||||
const Index id = r-&m_innerIndices[0];
|
||||
return ((*r==inner) && (id<end)) ? m_values[id] : Scalar(0);
|
||||
}
|
||||
|
||||
inline SparseMapBase(Index rows, Index cols, Index nnz, IndexPointer outerIndexPtr, IndexPointer innerIndexPtr,
|
||||
ScalarPointer valuePtr, IndexPointer innerNonZerosPtr = 0)
|
||||
: m_outerSize(IsRowMajor?rows:cols), m_innerSize(IsRowMajor?cols:rows), m_zero_nnz(0,internal::convert_index<StorageIndex>(nnz)), m_outerIndex(outerIndexPtr),
|
||||
m_innerIndices(innerIndexPtr), m_values(valuePtr), m_innerNonZeros(innerNonZerosPtr)
|
||||
{}
|
||||
|
||||
// for vectors
|
||||
inline SparseMapBase(Index size, Index nnz, IndexPointer innerIndexPtr, ScalarPointer valuePtr)
|
||||
: m_outerSize(1), m_innerSize(size), m_zero_nnz(0,internal::convert_index<StorageIndex>(nnz)), m_outerIndex(m_zero_nnz.data()),
|
||||
m_innerIndices(innerIndexPtr), m_values(valuePtr), m_innerNonZeros(0)
|
||||
{}
|
||||
|
||||
/** Empty destructor */
|
||||
inline ~SparseMapBase() {}
|
||||
|
||||
protected:
|
||||
inline SparseMapBase() {}
|
||||
};
|
||||
|
||||
/** \ingroup SparseCore_Module
|
||||
* class SparseMapBase
|
||||
* \brief Common base class for writable Map and Ref instance of sparse matrix and vector.
|
||||
*/
|
||||
template<typename Derived>
|
||||
class SparseMapBase<Derived,WriteAccessors>
|
||||
: public SparseMapBase<Derived,ReadOnlyAccessors>
|
||||
{
|
||||
typedef MapBase<Derived, ReadOnlyAccessors> ReadOnlyMapBase;
|
||||
|
||||
public:
|
||||
typedef SparseMapBase<Derived, ReadOnlyAccessors> Base;
|
||||
typedef typename Base::Scalar Scalar;
|
||||
typedef typename Base::StorageIndex StorageIndex;
|
||||
enum { IsRowMajor = Base::IsRowMajor };
|
||||
|
||||
using Base::operator=;
|
||||
|
||||
public:
|
||||
|
||||
//----------------------------------------
|
||||
// direct access interface
|
||||
using Base::valuePtr;
|
||||
using Base::innerIndexPtr;
|
||||
using Base::outerIndexPtr;
|
||||
using Base::innerNonZeroPtr;
|
||||
/** \copydoc SparseMatrix::valuePtr */
|
||||
inline Scalar* valuePtr() { return Base::m_values; }
|
||||
/** \copydoc SparseMatrix::innerIndexPtr */
|
||||
inline StorageIndex* innerIndexPtr() { return Base::m_innerIndices; }
|
||||
/** \copydoc SparseMatrix::outerIndexPtr */
|
||||
inline StorageIndex* outerIndexPtr() { return Base::m_outerIndex; }
|
||||
/** \copydoc SparseMatrix::innerNonZeroPtr */
|
||||
inline StorageIndex* innerNonZeroPtr() { return Base::m_innerNonZeros; }
|
||||
//----------------------------------------
|
||||
|
||||
/** \copydoc SparseMatrix::coeffRef */
|
||||
inline Scalar& coeffRef(Index row, Index col)
|
||||
{
|
||||
const Index outer = IsRowMajor ? row : col;
|
||||
const Index inner = IsRowMajor ? col : row;
|
||||
|
||||
Index start = Base::m_outerIndex[outer];
|
||||
Index end = Base::isCompressed() ? Base::m_outerIndex[outer+1] : start + Base::m_innerNonZeros[outer];
|
||||
eigen_assert(end>=start && "you probably called coeffRef on a non finalized matrix");
|
||||
eigen_assert(end>start && "coeffRef cannot be called on a zero coefficient");
|
||||
StorageIndex* r = std::lower_bound(&Base::m_innerIndices[start],&Base::m_innerIndices[end],inner);
|
||||
const Index id = r - &Base::m_innerIndices[0];
|
||||
eigen_assert((*r==inner) && (id<end) && "coeffRef cannot be called on a zero coefficient");
|
||||
return const_cast<Scalar*>(Base::m_values)[id];
|
||||
}
|
||||
|
||||
inline SparseMapBase(Index rows, Index cols, Index nnz, StorageIndex* outerIndexPtr, StorageIndex* innerIndexPtr,
|
||||
Scalar* valuePtr, StorageIndex* innerNonZerosPtr = 0)
|
||||
: Base(rows, cols, nnz, outerIndexPtr, innerIndexPtr, valuePtr, innerNonZerosPtr)
|
||||
{}
|
||||
|
||||
// for vectors
|
||||
inline SparseMapBase(Index size, Index nnz, StorageIndex* innerIndexPtr, Scalar* valuePtr)
|
||||
: Base(size, nnz, innerIndexPtr, valuePtr)
|
||||
{}
|
||||
|
||||
/** Empty destructor */
|
||||
inline ~SparseMapBase() {}
|
||||
|
||||
protected:
|
||||
inline SparseMapBase() {}
|
||||
};
|
||||
|
||||
/** \ingroup SparseCore_Module
|
||||
*
|
||||
* \brief Specialization of class Map for SparseMatrix-like storage.
|
||||
*
|
||||
* \tparam SparseMatrixType the equivalent sparse matrix type of the referenced data, it must be a template instance of class SparseMatrix.
|
||||
*
|
||||
* \sa class Map, class SparseMatrix, class Ref<SparseMatrixType,Options>
|
||||
*/
|
||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||
template<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>
|
||||
class Map<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType>
|
||||
: public SparseMapBase<Map<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> >
|
||||
#else
|
||||
template<typename SparseMatrixType>
|
||||
class Map<SparseMatrixType>
|
||||
: public SparseMapBase<Derived,WriteAccessors>
|
||||
#endif
|
||||
{
|
||||
public:
|
||||
typedef SparseMapBase<Map> Base;
|
||||
EIGEN_SPARSE_PUBLIC_INTERFACE(Map)
|
||||
enum { IsRowMajor = Base::IsRowMajor };
|
||||
|
||||
public:
|
||||
|
||||
/** Constructs a read-write Map to a sparse matrix of size \a rows x \a cols, containing \a nnz non-zero coefficients,
|
||||
* stored as a sparse format as defined by the pointers \a outerIndexPtr, \a innerIndexPtr, and \a valuePtr.
|
||||
* If the optional parameter \a innerNonZerosPtr is the null pointer, then a standard compressed format is assumed.
|
||||
*
|
||||
* This constructor is available only if \c SparseMatrixType is non-const.
|
||||
*
|
||||
* More details on the expected storage schemes are given in the \ref TutorialSparse "manual pages".
|
||||
*/
|
||||
inline Map(Index rows, Index cols, Index nnz, StorageIndex* outerIndexPtr,
|
||||
StorageIndex* innerIndexPtr, Scalar* valuePtr, StorageIndex* innerNonZerosPtr = 0)
|
||||
: Base(rows, cols, nnz, outerIndexPtr, innerIndexPtr, valuePtr, innerNonZerosPtr)
|
||||
{}
|
||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||
/** Empty destructor */
|
||||
inline ~Map() {}
|
||||
};
|
||||
|
||||
template<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>
|
||||
class Map<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType>
|
||||
: public SparseMapBase<Map<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> >
|
||||
{
|
||||
public:
|
||||
typedef SparseMapBase<Map> Base;
|
||||
EIGEN_SPARSE_PUBLIC_INTERFACE(Map)
|
||||
enum { IsRowMajor = Base::IsRowMajor };
|
||||
|
||||
public:
|
||||
#endif
|
||||
/** This is the const version of the above constructor.
|
||||
*
|
||||
* This constructor is available only if \c SparseMatrixType is const, e.g.:
|
||||
* \code Map<const SparseMatrix<double> > \endcode
|
||||
*/
|
||||
inline Map(Index rows, Index cols, Index nnz, const StorageIndex* outerIndexPtr,
|
||||
const StorageIndex* innerIndexPtr, const Scalar* valuePtr, const StorageIndex* innerNonZerosPtr = 0)
|
||||
: Base(rows, cols, nnz, outerIndexPtr, innerIndexPtr, valuePtr, innerNonZerosPtr)
|
||||
{}
|
||||
|
||||
/** Empty destructor */
|
||||
inline ~Map() {}
|
||||
};
|
||||
|
||||
namespace internal {
|
||||
|
||||
template<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>
|
||||
struct evaluator<Map<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> >
|
||||
: evaluator<SparseCompressedBase<Map<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> > >
|
||||
{
|
||||
typedef evaluator<SparseCompressedBase<Map<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> > > Base;
|
||||
typedef Map<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> XprType;
|
||||
evaluator() : Base() {}
|
||||
explicit evaluator(const XprType &mat) : Base(mat) {}
|
||||
};
|
||||
|
||||
template<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>
|
||||
struct evaluator<Map<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> >
|
||||
: evaluator<SparseCompressedBase<Map<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> > >
|
||||
{
|
||||
typedef evaluator<SparseCompressedBase<Map<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> > > Base;
|
||||
typedef Map<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> XprType;
|
||||
evaluator() : Base() {}
|
||||
explicit evaluator(const XprType &mat) : Base(mat) {}
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_SPARSE_MAP_H
|
1518
3party/eigen/Eigen/src/SparseCore/SparseMatrix.h
Normal file
1518
3party/eigen/Eigen/src/SparseCore/SparseMatrix.h
Normal file
File diff suppressed because it is too large
Load Diff
398
3party/eigen/Eigen/src/SparseCore/SparseMatrixBase.h
Normal file
398
3party/eigen/Eigen/src/SparseCore/SparseMatrixBase.h
Normal file
@ -0,0 +1,398 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_SPARSEMATRIXBASE_H
|
||||
#define EIGEN_SPARSEMATRIXBASE_H
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
/** \ingroup SparseCore_Module
|
||||
*
|
||||
* \class SparseMatrixBase
|
||||
*
|
||||
* \brief Base class of any sparse matrices or sparse expressions
|
||||
*
|
||||
* \tparam Derived is the derived type, e.g. a sparse matrix type, or an expression, etc.
|
||||
*
|
||||
* This class can be extended with the help of the plugin mechanism described on the page
|
||||
* \ref TopicCustomizing_Plugins by defining the preprocessor symbol \c EIGEN_SPARSEMATRIXBASE_PLUGIN.
|
||||
*/
|
||||
template<typename Derived> class SparseMatrixBase
|
||||
: public EigenBase<Derived>
|
||||
{
|
||||
public:
|
||||
|
||||
typedef typename internal::traits<Derived>::Scalar Scalar;
|
||||
|
||||
/** The numeric type of the expression' coefficients, e.g. float, double, int or std::complex<float>, etc.
|
||||
*
|
||||
* It is an alias for the Scalar type */
|
||||
typedef Scalar value_type;
|
||||
|
||||
typedef typename internal::packet_traits<Scalar>::type PacketScalar;
|
||||
typedef typename internal::traits<Derived>::StorageKind StorageKind;
|
||||
|
||||
/** The integer type used to \b store indices within a SparseMatrix.
|
||||
* For a \c SparseMatrix<Scalar,Options,IndexType> it an alias of the third template parameter \c IndexType. */
|
||||
typedef typename internal::traits<Derived>::StorageIndex StorageIndex;
|
||||
|
||||
typedef typename internal::add_const_on_value_type_if_arithmetic<
|
||||
typename internal::packet_traits<Scalar>::type
|
||||
>::type PacketReturnType;
|
||||
|
||||
typedef SparseMatrixBase StorageBaseType;
|
||||
|
||||
typedef Matrix<StorageIndex,Dynamic,1> IndexVector;
|
||||
typedef Matrix<Scalar,Dynamic,1> ScalarVector;
|
||||
|
||||
template<typename OtherDerived>
|
||||
Derived& operator=(const EigenBase<OtherDerived> &other);
|
||||
|
||||
enum {
|
||||
|
||||
RowsAtCompileTime = internal::traits<Derived>::RowsAtCompileTime,
|
||||
/**< The number of rows at compile-time. This is just a copy of the value provided
|
||||
* by the \a Derived type. If a value is not known at compile-time,
|
||||
* it is set to the \a Dynamic constant.
|
||||
* \sa MatrixBase::rows(), MatrixBase::cols(), ColsAtCompileTime, SizeAtCompileTime */
|
||||
|
||||
ColsAtCompileTime = internal::traits<Derived>::ColsAtCompileTime,
|
||||
/**< The number of columns at compile-time. This is just a copy of the value provided
|
||||
* by the \a Derived type. If a value is not known at compile-time,
|
||||
* it is set to the \a Dynamic constant.
|
||||
* \sa MatrixBase::rows(), MatrixBase::cols(), RowsAtCompileTime, SizeAtCompileTime */
|
||||
|
||||
|
||||
SizeAtCompileTime = (internal::size_at_compile_time<internal::traits<Derived>::RowsAtCompileTime,
|
||||
internal::traits<Derived>::ColsAtCompileTime>::ret),
|
||||
/**< This is equal to the number of coefficients, i.e. the number of
|
||||
* rows times the number of columns, or to \a Dynamic if this is not
|
||||
* known at compile-time. \sa RowsAtCompileTime, ColsAtCompileTime */
|
||||
|
||||
MaxRowsAtCompileTime = RowsAtCompileTime,
|
||||
MaxColsAtCompileTime = ColsAtCompileTime,
|
||||
|
||||
MaxSizeAtCompileTime = (internal::size_at_compile_time<MaxRowsAtCompileTime,
|
||||
MaxColsAtCompileTime>::ret),
|
||||
|
||||
IsVectorAtCompileTime = RowsAtCompileTime == 1 || ColsAtCompileTime == 1,
|
||||
/**< This is set to true if either the number of rows or the number of
|
||||
* columns is known at compile-time to be equal to 1. Indeed, in that case,
|
||||
* we are dealing with a column-vector (if there is only one column) or with
|
||||
* a row-vector (if there is only one row). */
|
||||
|
||||
NumDimensions = int(MaxSizeAtCompileTime) == 1 ? 0 : bool(IsVectorAtCompileTime) ? 1 : 2,
|
||||
/**< This value is equal to Tensor::NumDimensions, i.e. 0 for scalars, 1 for vectors,
|
||||
* and 2 for matrices.
|
||||
*/
|
||||
|
||||
Flags = internal::traits<Derived>::Flags,
|
||||
/**< This stores expression \ref flags flags which may or may not be inherited by new expressions
|
||||
* constructed from this one. See the \ref flags "list of flags".
|
||||
*/
|
||||
|
||||
IsRowMajor = Flags&RowMajorBit ? 1 : 0,
|
||||
|
||||
InnerSizeAtCompileTime = int(IsVectorAtCompileTime) ? int(SizeAtCompileTime)
|
||||
: int(IsRowMajor) ? int(ColsAtCompileTime) : int(RowsAtCompileTime),
|
||||
|
||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||
_HasDirectAccess = (int(Flags)&DirectAccessBit) ? 1 : 0 // workaround sunCC
|
||||
#endif
|
||||
};
|
||||
|
||||
/** \internal the return type of MatrixBase::adjoint() */
|
||||
typedef typename internal::conditional<NumTraits<Scalar>::IsComplex,
|
||||
CwiseUnaryOp<internal::scalar_conjugate_op<Scalar>, Eigen::Transpose<const Derived> >,
|
||||
Transpose<const Derived>
|
||||
>::type AdjointReturnType;
|
||||
typedef Transpose<Derived> TransposeReturnType;
|
||||
typedef typename internal::add_const<Transpose<const Derived> >::type ConstTransposeReturnType;
|
||||
|
||||
// FIXME storage order do not match evaluator storage order
|
||||
typedef SparseMatrix<Scalar, Flags&RowMajorBit ? RowMajor : ColMajor, StorageIndex> PlainObject;
|
||||
|
||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||
/** This is the "real scalar" type; if the \a Scalar type is already real numbers
|
||||
* (e.g. int, float or double) then \a RealScalar is just the same as \a Scalar. If
|
||||
* \a Scalar is \a std::complex<T> then RealScalar is \a T.
|
||||
*
|
||||
* \sa class NumTraits
|
||||
*/
|
||||
typedef typename NumTraits<Scalar>::Real RealScalar;
|
||||
|
||||
/** \internal the return type of coeff()
|
||||
*/
|
||||
typedef typename internal::conditional<_HasDirectAccess, const Scalar&, Scalar>::type CoeffReturnType;
|
||||
|
||||
/** \internal Represents a matrix with all coefficients equal to one another*/
|
||||
typedef CwiseNullaryOp<internal::scalar_constant_op<Scalar>,Matrix<Scalar,Dynamic,Dynamic> > ConstantReturnType;
|
||||
|
||||
/** type of the equivalent dense matrix */
|
||||
typedef Matrix<Scalar,RowsAtCompileTime,ColsAtCompileTime> DenseMatrixType;
|
||||
/** type of the equivalent square matrix */
|
||||
typedef Matrix<Scalar,EIGEN_SIZE_MAX(RowsAtCompileTime,ColsAtCompileTime),
|
||||
EIGEN_SIZE_MAX(RowsAtCompileTime,ColsAtCompileTime)> SquareMatrixType;
|
||||
|
||||
inline const Derived& derived() const { return *static_cast<const Derived*>(this); }
|
||||
inline Derived& derived() { return *static_cast<Derived*>(this); }
|
||||
inline Derived& const_cast_derived() const
|
||||
{ return *static_cast<Derived*>(const_cast<SparseMatrixBase*>(this)); }
|
||||
|
||||
typedef EigenBase<Derived> Base;
|
||||
|
||||
#endif // not EIGEN_PARSED_BY_DOXYGEN
|
||||
|
||||
#define EIGEN_CURRENT_STORAGE_BASE_CLASS Eigen::SparseMatrixBase
|
||||
#ifdef EIGEN_PARSED_BY_DOXYGEN
|
||||
#define EIGEN_DOC_UNARY_ADDONS(METHOD,OP) /** <p>This method does not change the sparsity of \c *this: the OP is applied to explicitly stored coefficients only. \sa SparseCompressedBase::coeffs() </p> */
|
||||
#define EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL /** <p> \warning This method returns a read-only expression for any sparse matrices. \sa \ref TutorialSparse_SubMatrices "Sparse block operations" </p> */
|
||||
#define EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(COND) /** <p> \warning This method returns a read-write expression for COND sparse matrices only. Otherwise, the returned expression is read-only. \sa \ref TutorialSparse_SubMatrices "Sparse block operations" </p> */
|
||||
#else
|
||||
#define EIGEN_DOC_UNARY_ADDONS(X,Y)
|
||||
#define EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL
|
||||
#define EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF(COND)
|
||||
#endif
|
||||
# include "../plugins/CommonCwiseUnaryOps.h"
|
||||
# include "../plugins/CommonCwiseBinaryOps.h"
|
||||
# include "../plugins/MatrixCwiseUnaryOps.h"
|
||||
# include "../plugins/MatrixCwiseBinaryOps.h"
|
||||
# include "../plugins/BlockMethods.h"
|
||||
# ifdef EIGEN_SPARSEMATRIXBASE_PLUGIN
|
||||
# include EIGEN_SPARSEMATRIXBASE_PLUGIN
|
||||
# endif
|
||||
#undef EIGEN_CURRENT_STORAGE_BASE_CLASS
|
||||
#undef EIGEN_DOC_UNARY_ADDONS
|
||||
#undef EIGEN_DOC_BLOCK_ADDONS_NOT_INNER_PANEL
|
||||
#undef EIGEN_DOC_BLOCK_ADDONS_INNER_PANEL_IF
|
||||
|
||||
/** \returns the number of rows. \sa cols() */
|
||||
inline Index rows() const { return derived().rows(); }
|
||||
/** \returns the number of columns. \sa rows() */
|
||||
inline Index cols() const { return derived().cols(); }
|
||||
/** \returns the number of coefficients, which is \a rows()*cols().
|
||||
* \sa rows(), cols(). */
|
||||
inline Index size() const { return rows() * cols(); }
|
||||
/** \returns true if either the number of rows or the number of columns is equal to 1.
|
||||
* In other words, this function returns
|
||||
* \code rows()==1 || cols()==1 \endcode
|
||||
* \sa rows(), cols(), IsVectorAtCompileTime. */
|
||||
inline bool isVector() const { return rows()==1 || cols()==1; }
|
||||
/** \returns the size of the storage major dimension,
|
||||
* i.e., the number of columns for a columns major matrix, and the number of rows otherwise */
|
||||
Index outerSize() const { return (int(Flags)&RowMajorBit) ? this->rows() : this->cols(); }
|
||||
/** \returns the size of the inner dimension according to the storage order,
|
||||
* i.e., the number of rows for a columns major matrix, and the number of cols otherwise */
|
||||
Index innerSize() const { return (int(Flags)&RowMajorBit) ? this->cols() : this->rows(); }
|
||||
|
||||
bool isRValue() const { return m_isRValue; }
|
||||
Derived& markAsRValue() { m_isRValue = true; return derived(); }
|
||||
|
||||
SparseMatrixBase() : m_isRValue(false) { /* TODO check flags */ }
|
||||
|
||||
|
||||
template<typename OtherDerived>
|
||||
Derived& operator=(const ReturnByValue<OtherDerived>& other);
|
||||
|
||||
template<typename OtherDerived>
|
||||
inline Derived& operator=(const SparseMatrixBase<OtherDerived>& other);
|
||||
|
||||
inline Derived& operator=(const Derived& other);
|
||||
|
||||
protected:
|
||||
|
||||
template<typename OtherDerived>
|
||||
inline Derived& assign(const OtherDerived& other);
|
||||
|
||||
template<typename OtherDerived>
|
||||
inline void assignGeneric(const OtherDerived& other);
|
||||
|
||||
public:
|
||||
|
||||
friend std::ostream & operator << (std::ostream & s, const SparseMatrixBase& m)
|
||||
{
|
||||
typedef typename Derived::Nested Nested;
|
||||
typedef typename internal::remove_all<Nested>::type NestedCleaned;
|
||||
|
||||
if (Flags&RowMajorBit)
|
||||
{
|
||||
Nested nm(m.derived());
|
||||
internal::evaluator<NestedCleaned> thisEval(nm);
|
||||
for (Index row=0; row<nm.outerSize(); ++row)
|
||||
{
|
||||
Index col = 0;
|
||||
for (typename internal::evaluator<NestedCleaned>::InnerIterator it(thisEval, row); it; ++it)
|
||||
{
|
||||
for ( ; col<it.index(); ++col)
|
||||
s << "0 ";
|
||||
s << it.value() << " ";
|
||||
++col;
|
||||
}
|
||||
for ( ; col<m.cols(); ++col)
|
||||
s << "0 ";
|
||||
s << std::endl;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
Nested nm(m.derived());
|
||||
internal::evaluator<NestedCleaned> thisEval(nm);
|
||||
if (m.cols() == 1) {
|
||||
Index row = 0;
|
||||
for (typename internal::evaluator<NestedCleaned>::InnerIterator it(thisEval, 0); it; ++it)
|
||||
{
|
||||
for ( ; row<it.index(); ++row)
|
||||
s << "0" << std::endl;
|
||||
s << it.value() << std::endl;
|
||||
++row;
|
||||
}
|
||||
for ( ; row<m.rows(); ++row)
|
||||
s << "0" << std::endl;
|
||||
}
|
||||
else
|
||||
{
|
||||
SparseMatrix<Scalar, RowMajorBit, StorageIndex> trans = m;
|
||||
s << static_cast<const SparseMatrixBase<SparseMatrix<Scalar, RowMajorBit, StorageIndex> >&>(trans);
|
||||
}
|
||||
}
|
||||
return s;
|
||||
}
|
||||
|
||||
template<typename OtherDerived>
|
||||
Derived& operator+=(const SparseMatrixBase<OtherDerived>& other);
|
||||
template<typename OtherDerived>
|
||||
Derived& operator-=(const SparseMatrixBase<OtherDerived>& other);
|
||||
|
||||
template<typename OtherDerived>
|
||||
Derived& operator+=(const DiagonalBase<OtherDerived>& other);
|
||||
template<typename OtherDerived>
|
||||
Derived& operator-=(const DiagonalBase<OtherDerived>& other);
|
||||
|
||||
template<typename OtherDerived>
|
||||
Derived& operator+=(const EigenBase<OtherDerived> &other);
|
||||
template<typename OtherDerived>
|
||||
Derived& operator-=(const EigenBase<OtherDerived> &other);
|
||||
|
||||
Derived& operator*=(const Scalar& other);
|
||||
Derived& operator/=(const Scalar& other);
|
||||
|
||||
template<typename OtherDerived> struct CwiseProductDenseReturnType {
|
||||
typedef CwiseBinaryOp<internal::scalar_product_op<typename ScalarBinaryOpTraits<
|
||||
typename internal::traits<Derived>::Scalar,
|
||||
typename internal::traits<OtherDerived>::Scalar
|
||||
>::ReturnType>,
|
||||
const Derived,
|
||||
const OtherDerived
|
||||
> Type;
|
||||
};
|
||||
|
||||
template<typename OtherDerived>
|
||||
EIGEN_STRONG_INLINE const typename CwiseProductDenseReturnType<OtherDerived>::Type
|
||||
cwiseProduct(const MatrixBase<OtherDerived> &other) const;
|
||||
|
||||
// sparse * diagonal
|
||||
template<typename OtherDerived>
|
||||
const Product<Derived,OtherDerived>
|
||||
operator*(const DiagonalBase<OtherDerived> &other) const
|
||||
{ return Product<Derived,OtherDerived>(derived(), other.derived()); }
|
||||
|
||||
// diagonal * sparse
|
||||
template<typename OtherDerived> friend
|
||||
const Product<OtherDerived,Derived>
|
||||
operator*(const DiagonalBase<OtherDerived> &lhs, const SparseMatrixBase& rhs)
|
||||
{ return Product<OtherDerived,Derived>(lhs.derived(), rhs.derived()); }
|
||||
|
||||
// sparse * sparse
|
||||
template<typename OtherDerived>
|
||||
const Product<Derived,OtherDerived,AliasFreeProduct>
|
||||
operator*(const SparseMatrixBase<OtherDerived> &other) const;
|
||||
|
||||
// sparse * dense
|
||||
template<typename OtherDerived>
|
||||
const Product<Derived,OtherDerived>
|
||||
operator*(const MatrixBase<OtherDerived> &other) const
|
||||
{ return Product<Derived,OtherDerived>(derived(), other.derived()); }
|
||||
|
||||
// dense * sparse
|
||||
template<typename OtherDerived> friend
|
||||
const Product<OtherDerived,Derived>
|
||||
operator*(const MatrixBase<OtherDerived> &lhs, const SparseMatrixBase& rhs)
|
||||
{ return Product<OtherDerived,Derived>(lhs.derived(), rhs.derived()); }
|
||||
|
||||
/** \returns an expression of P H P^-1 where H is the matrix represented by \c *this */
|
||||
SparseSymmetricPermutationProduct<Derived,Upper|Lower> twistedBy(const PermutationMatrix<Dynamic,Dynamic,StorageIndex>& perm) const
|
||||
{
|
||||
return SparseSymmetricPermutationProduct<Derived,Upper|Lower>(derived(), perm);
|
||||
}
|
||||
|
||||
template<typename OtherDerived>
|
||||
Derived& operator*=(const SparseMatrixBase<OtherDerived>& other);
|
||||
|
||||
template<int Mode>
|
||||
inline const TriangularView<const Derived, Mode> triangularView() const;
|
||||
|
||||
template<unsigned int UpLo> struct SelfAdjointViewReturnType { typedef SparseSelfAdjointView<Derived, UpLo> Type; };
|
||||
template<unsigned int UpLo> struct ConstSelfAdjointViewReturnType { typedef const SparseSelfAdjointView<const Derived, UpLo> Type; };
|
||||
|
||||
template<unsigned int UpLo> inline
|
||||
typename ConstSelfAdjointViewReturnType<UpLo>::Type selfadjointView() const;
|
||||
template<unsigned int UpLo> inline
|
||||
typename SelfAdjointViewReturnType<UpLo>::Type selfadjointView();
|
||||
|
||||
template<typename OtherDerived> Scalar dot(const MatrixBase<OtherDerived>& other) const;
|
||||
template<typename OtherDerived> Scalar dot(const SparseMatrixBase<OtherDerived>& other) const;
|
||||
RealScalar squaredNorm() const;
|
||||
RealScalar norm() const;
|
||||
RealScalar blueNorm() const;
|
||||
|
||||
TransposeReturnType transpose() { return TransposeReturnType(derived()); }
|
||||
const ConstTransposeReturnType transpose() const { return ConstTransposeReturnType(derived()); }
|
||||
const AdjointReturnType adjoint() const { return AdjointReturnType(transpose()); }
|
||||
|
||||
DenseMatrixType toDense() const
|
||||
{
|
||||
return DenseMatrixType(derived());
|
||||
}
|
||||
|
||||
template<typename OtherDerived>
|
||||
bool isApprox(const SparseMatrixBase<OtherDerived>& other,
|
||||
const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const;
|
||||
|
||||
template<typename OtherDerived>
|
||||
bool isApprox(const MatrixBase<OtherDerived>& other,
|
||||
const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const
|
||||
{ return toDense().isApprox(other,prec); }
|
||||
|
||||
/** \returns the matrix or vector obtained by evaluating this expression.
|
||||
*
|
||||
* Notice that in the case of a plain matrix or vector (not an expression) this function just returns
|
||||
* a const reference, in order to avoid a useless copy.
|
||||
*/
|
||||
inline const typename internal::eval<Derived>::type eval() const
|
||||
{ return typename internal::eval<Derived>::type(derived()); }
|
||||
|
||||
Scalar sum() const;
|
||||
|
||||
inline const SparseView<Derived>
|
||||
pruned(const Scalar& reference = Scalar(0), const RealScalar& epsilon = NumTraits<Scalar>::dummy_precision()) const;
|
||||
|
||||
protected:
|
||||
|
||||
bool m_isRValue;
|
||||
|
||||
static inline StorageIndex convert_index(const Index idx) {
|
||||
return internal::convert_index<StorageIndex>(idx);
|
||||
}
|
||||
private:
|
||||
template<typename Dest> void evalTo(Dest &) const;
|
||||
};
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_SPARSEMATRIXBASE_H
|
178
3party/eigen/Eigen/src/SparseCore/SparsePermutation.h
Normal file
178
3party/eigen/Eigen/src/SparseCore/SparsePermutation.h
Normal file
@ -0,0 +1,178 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2012 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_SPARSE_PERMUTATION_H
|
||||
#define EIGEN_SPARSE_PERMUTATION_H
|
||||
|
||||
// This file implements sparse * permutation products
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
|
||||
template<typename ExpressionType, int Side, bool Transposed>
|
||||
struct permutation_matrix_product<ExpressionType, Side, Transposed, SparseShape>
|
||||
{
|
||||
typedef typename nested_eval<ExpressionType, 1>::type MatrixType;
|
||||
typedef typename remove_all<MatrixType>::type MatrixTypeCleaned;
|
||||
|
||||
typedef typename MatrixTypeCleaned::Scalar Scalar;
|
||||
typedef typename MatrixTypeCleaned::StorageIndex StorageIndex;
|
||||
|
||||
enum {
|
||||
SrcStorageOrder = MatrixTypeCleaned::Flags&RowMajorBit ? RowMajor : ColMajor,
|
||||
MoveOuter = SrcStorageOrder==RowMajor ? Side==OnTheLeft : Side==OnTheRight
|
||||
};
|
||||
|
||||
typedef typename internal::conditional<MoveOuter,
|
||||
SparseMatrix<Scalar,SrcStorageOrder,StorageIndex>,
|
||||
SparseMatrix<Scalar,int(SrcStorageOrder)==RowMajor?ColMajor:RowMajor,StorageIndex> >::type ReturnType;
|
||||
|
||||
template<typename Dest,typename PermutationType>
|
||||
static inline void run(Dest& dst, const PermutationType& perm, const ExpressionType& xpr)
|
||||
{
|
||||
MatrixType mat(xpr);
|
||||
if(MoveOuter)
|
||||
{
|
||||
SparseMatrix<Scalar,SrcStorageOrder,StorageIndex> tmp(mat.rows(), mat.cols());
|
||||
Matrix<StorageIndex,Dynamic,1> sizes(mat.outerSize());
|
||||
for(Index j=0; j<mat.outerSize(); ++j)
|
||||
{
|
||||
Index jp = perm.indices().coeff(j);
|
||||
sizes[((Side==OnTheLeft) ^ Transposed) ? jp : j] = StorageIndex(mat.innerVector(((Side==OnTheRight) ^ Transposed) ? jp : j).nonZeros());
|
||||
}
|
||||
tmp.reserve(sizes);
|
||||
for(Index j=0; j<mat.outerSize(); ++j)
|
||||
{
|
||||
Index jp = perm.indices().coeff(j);
|
||||
Index jsrc = ((Side==OnTheRight) ^ Transposed) ? jp : j;
|
||||
Index jdst = ((Side==OnTheLeft) ^ Transposed) ? jp : j;
|
||||
for(typename MatrixTypeCleaned::InnerIterator it(mat,jsrc); it; ++it)
|
||||
tmp.insertByOuterInner(jdst,it.index()) = it.value();
|
||||
}
|
||||
dst = tmp;
|
||||
}
|
||||
else
|
||||
{
|
||||
SparseMatrix<Scalar,int(SrcStorageOrder)==RowMajor?ColMajor:RowMajor,StorageIndex> tmp(mat.rows(), mat.cols());
|
||||
Matrix<StorageIndex,Dynamic,1> sizes(tmp.outerSize());
|
||||
sizes.setZero();
|
||||
PermutationMatrix<Dynamic,Dynamic,StorageIndex> perm_cpy;
|
||||
if((Side==OnTheLeft) ^ Transposed)
|
||||
perm_cpy = perm;
|
||||
else
|
||||
perm_cpy = perm.transpose();
|
||||
|
||||
for(Index j=0; j<mat.outerSize(); ++j)
|
||||
for(typename MatrixTypeCleaned::InnerIterator it(mat,j); it; ++it)
|
||||
sizes[perm_cpy.indices().coeff(it.index())]++;
|
||||
tmp.reserve(sizes);
|
||||
for(Index j=0; j<mat.outerSize(); ++j)
|
||||
for(typename MatrixTypeCleaned::InnerIterator it(mat,j); it; ++it)
|
||||
tmp.insertByOuterInner(perm_cpy.indices().coeff(it.index()),j) = it.value();
|
||||
dst = tmp;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
namespace internal {
|
||||
|
||||
template <int ProductTag> struct product_promote_storage_type<Sparse, PermutationStorage, ProductTag> { typedef Sparse ret; };
|
||||
template <int ProductTag> struct product_promote_storage_type<PermutationStorage, Sparse, ProductTag> { typedef Sparse ret; };
|
||||
|
||||
// TODO, the following two overloads are only needed to define the right temporary type through
|
||||
// typename traits<permutation_sparse_matrix_product<Rhs,Lhs,OnTheRight,false> >::ReturnType
|
||||
// whereas it should be correctly handled by traits<Product<> >::PlainObject
|
||||
|
||||
template<typename Lhs, typename Rhs, int ProductTag>
|
||||
struct product_evaluator<Product<Lhs, Rhs, AliasFreeProduct>, ProductTag, PermutationShape, SparseShape>
|
||||
: public evaluator<typename permutation_matrix_product<Rhs,OnTheLeft,false,SparseShape>::ReturnType>
|
||||
{
|
||||
typedef Product<Lhs, Rhs, AliasFreeProduct> XprType;
|
||||
typedef typename permutation_matrix_product<Rhs,OnTheLeft,false,SparseShape>::ReturnType PlainObject;
|
||||
typedef evaluator<PlainObject> Base;
|
||||
|
||||
enum {
|
||||
Flags = Base::Flags | EvalBeforeNestingBit
|
||||
};
|
||||
|
||||
explicit product_evaluator(const XprType& xpr)
|
||||
: m_result(xpr.rows(), xpr.cols())
|
||||
{
|
||||
::new (static_cast<Base*>(this)) Base(m_result);
|
||||
generic_product_impl<Lhs, Rhs, PermutationShape, SparseShape, ProductTag>::evalTo(m_result, xpr.lhs(), xpr.rhs());
|
||||
}
|
||||
|
||||
protected:
|
||||
PlainObject m_result;
|
||||
};
|
||||
|
||||
template<typename Lhs, typename Rhs, int ProductTag>
|
||||
struct product_evaluator<Product<Lhs, Rhs, AliasFreeProduct>, ProductTag, SparseShape, PermutationShape >
|
||||
: public evaluator<typename permutation_matrix_product<Lhs,OnTheRight,false,SparseShape>::ReturnType>
|
||||
{
|
||||
typedef Product<Lhs, Rhs, AliasFreeProduct> XprType;
|
||||
typedef typename permutation_matrix_product<Lhs,OnTheRight,false,SparseShape>::ReturnType PlainObject;
|
||||
typedef evaluator<PlainObject> Base;
|
||||
|
||||
enum {
|
||||
Flags = Base::Flags | EvalBeforeNestingBit
|
||||
};
|
||||
|
||||
explicit product_evaluator(const XprType& xpr)
|
||||
: m_result(xpr.rows(), xpr.cols())
|
||||
{
|
||||
::new (static_cast<Base*>(this)) Base(m_result);
|
||||
generic_product_impl<Lhs, Rhs, SparseShape, PermutationShape, ProductTag>::evalTo(m_result, xpr.lhs(), xpr.rhs());
|
||||
}
|
||||
|
||||
protected:
|
||||
PlainObject m_result;
|
||||
};
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
/** \returns the matrix with the permutation applied to the columns
|
||||
*/
|
||||
template<typename SparseDerived, typename PermDerived>
|
||||
inline const Product<SparseDerived, PermDerived, AliasFreeProduct>
|
||||
operator*(const SparseMatrixBase<SparseDerived>& matrix, const PermutationBase<PermDerived>& perm)
|
||||
{ return Product<SparseDerived, PermDerived, AliasFreeProduct>(matrix.derived(), perm.derived()); }
|
||||
|
||||
/** \returns the matrix with the permutation applied to the rows
|
||||
*/
|
||||
template<typename SparseDerived, typename PermDerived>
|
||||
inline const Product<PermDerived, SparseDerived, AliasFreeProduct>
|
||||
operator*( const PermutationBase<PermDerived>& perm, const SparseMatrixBase<SparseDerived>& matrix)
|
||||
{ return Product<PermDerived, SparseDerived, AliasFreeProduct>(perm.derived(), matrix.derived()); }
|
||||
|
||||
|
||||
/** \returns the matrix with the inverse permutation applied to the columns.
|
||||
*/
|
||||
template<typename SparseDerived, typename PermutationType>
|
||||
inline const Product<SparseDerived, Inverse<PermutationType>, AliasFreeProduct>
|
||||
operator*(const SparseMatrixBase<SparseDerived>& matrix, const InverseImpl<PermutationType, PermutationStorage>& tperm)
|
||||
{
|
||||
return Product<SparseDerived, Inverse<PermutationType>, AliasFreeProduct>(matrix.derived(), tperm.derived());
|
||||
}
|
||||
|
||||
/** \returns the matrix with the inverse permutation applied to the rows.
|
||||
*/
|
||||
template<typename SparseDerived, typename PermutationType>
|
||||
inline const Product<Inverse<PermutationType>, SparseDerived, AliasFreeProduct>
|
||||
operator*(const InverseImpl<PermutationType,PermutationStorage>& tperm, const SparseMatrixBase<SparseDerived>& matrix)
|
||||
{
|
||||
return Product<Inverse<PermutationType>, SparseDerived, AliasFreeProduct>(tperm.derived(), matrix.derived());
|
||||
}
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_SPARSE_SELFADJOINTVIEW_H
|
181
3party/eigen/Eigen/src/SparseCore/SparseProduct.h
Normal file
181
3party/eigen/Eigen/src/SparseCore/SparseProduct.h
Normal file
@ -0,0 +1,181 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2008-2015 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_SPARSEPRODUCT_H
|
||||
#define EIGEN_SPARSEPRODUCT_H
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
/** \returns an expression of the product of two sparse matrices.
|
||||
* By default a conservative product preserving the symbolic non zeros is performed.
|
||||
* The automatic pruning of the small values can be achieved by calling the pruned() function
|
||||
* in which case a totally different product algorithm is employed:
|
||||
* \code
|
||||
* C = (A*B).pruned(); // suppress numerical zeros (exact)
|
||||
* C = (A*B).pruned(ref);
|
||||
* C = (A*B).pruned(ref,epsilon);
|
||||
* \endcode
|
||||
* where \c ref is a meaningful non zero reference value.
|
||||
* */
|
||||
template<typename Derived>
|
||||
template<typename OtherDerived>
|
||||
inline const Product<Derived,OtherDerived,AliasFreeProduct>
|
||||
SparseMatrixBase<Derived>::operator*(const SparseMatrixBase<OtherDerived> &other) const
|
||||
{
|
||||
return Product<Derived,OtherDerived,AliasFreeProduct>(derived(), other.derived());
|
||||
}
|
||||
|
||||
namespace internal {
|
||||
|
||||
// sparse * sparse
|
||||
template<typename Lhs, typename Rhs, int ProductType>
|
||||
struct generic_product_impl<Lhs, Rhs, SparseShape, SparseShape, ProductType>
|
||||
{
|
||||
template<typename Dest>
|
||||
static void evalTo(Dest& dst, const Lhs& lhs, const Rhs& rhs)
|
||||
{
|
||||
evalTo(dst, lhs, rhs, typename evaluator_traits<Dest>::Shape());
|
||||
}
|
||||
|
||||
// dense += sparse * sparse
|
||||
template<typename Dest,typename ActualLhs>
|
||||
static void addTo(Dest& dst, const ActualLhs& lhs, const Rhs& rhs, typename enable_if<is_same<typename evaluator_traits<Dest>::Shape,DenseShape>::value,int*>::type* = 0)
|
||||
{
|
||||
typedef typename nested_eval<ActualLhs,Dynamic>::type LhsNested;
|
||||
typedef typename nested_eval<Rhs,Dynamic>::type RhsNested;
|
||||
LhsNested lhsNested(lhs);
|
||||
RhsNested rhsNested(rhs);
|
||||
internal::sparse_sparse_to_dense_product_selector<typename remove_all<LhsNested>::type,
|
||||
typename remove_all<RhsNested>::type, Dest>::run(lhsNested,rhsNested,dst);
|
||||
}
|
||||
|
||||
// dense -= sparse * sparse
|
||||
template<typename Dest>
|
||||
static void subTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, typename enable_if<is_same<typename evaluator_traits<Dest>::Shape,DenseShape>::value,int*>::type* = 0)
|
||||
{
|
||||
addTo(dst, -lhs, rhs);
|
||||
}
|
||||
|
||||
protected:
|
||||
|
||||
// sparse = sparse * sparse
|
||||
template<typename Dest>
|
||||
static void evalTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, SparseShape)
|
||||
{
|
||||
typedef typename nested_eval<Lhs,Dynamic>::type LhsNested;
|
||||
typedef typename nested_eval<Rhs,Dynamic>::type RhsNested;
|
||||
LhsNested lhsNested(lhs);
|
||||
RhsNested rhsNested(rhs);
|
||||
internal::conservative_sparse_sparse_product_selector<typename remove_all<LhsNested>::type,
|
||||
typename remove_all<RhsNested>::type, Dest>::run(lhsNested,rhsNested,dst);
|
||||
}
|
||||
|
||||
// dense = sparse * sparse
|
||||
template<typename Dest>
|
||||
static void evalTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, DenseShape)
|
||||
{
|
||||
dst.setZero();
|
||||
addTo(dst, lhs, rhs);
|
||||
}
|
||||
};
|
||||
|
||||
// sparse * sparse-triangular
|
||||
template<typename Lhs, typename Rhs, int ProductType>
|
||||
struct generic_product_impl<Lhs, Rhs, SparseShape, SparseTriangularShape, ProductType>
|
||||
: public generic_product_impl<Lhs, Rhs, SparseShape, SparseShape, ProductType>
|
||||
{};
|
||||
|
||||
// sparse-triangular * sparse
|
||||
template<typename Lhs, typename Rhs, int ProductType>
|
||||
struct generic_product_impl<Lhs, Rhs, SparseTriangularShape, SparseShape, ProductType>
|
||||
: public generic_product_impl<Lhs, Rhs, SparseShape, SparseShape, ProductType>
|
||||
{};
|
||||
|
||||
// dense = sparse-product (can be sparse*sparse, sparse*perm, etc.)
|
||||
template< typename DstXprType, typename Lhs, typename Rhs>
|
||||
struct Assignment<DstXprType, Product<Lhs,Rhs,AliasFreeProduct>, internal::assign_op<typename DstXprType::Scalar,typename Product<Lhs,Rhs,AliasFreeProduct>::Scalar>, Sparse2Dense>
|
||||
{
|
||||
typedef Product<Lhs,Rhs,AliasFreeProduct> SrcXprType;
|
||||
static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &)
|
||||
{
|
||||
Index dstRows = src.rows();
|
||||
Index dstCols = src.cols();
|
||||
if((dst.rows()!=dstRows) || (dst.cols()!=dstCols))
|
||||
dst.resize(dstRows, dstCols);
|
||||
|
||||
generic_product_impl<Lhs, Rhs>::evalTo(dst,src.lhs(),src.rhs());
|
||||
}
|
||||
};
|
||||
|
||||
// dense += sparse-product (can be sparse*sparse, sparse*perm, etc.)
|
||||
template< typename DstXprType, typename Lhs, typename Rhs>
|
||||
struct Assignment<DstXprType, Product<Lhs,Rhs,AliasFreeProduct>, internal::add_assign_op<typename DstXprType::Scalar,typename Product<Lhs,Rhs,AliasFreeProduct>::Scalar>, Sparse2Dense>
|
||||
{
|
||||
typedef Product<Lhs,Rhs,AliasFreeProduct> SrcXprType;
|
||||
static void run(DstXprType &dst, const SrcXprType &src, const internal::add_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &)
|
||||
{
|
||||
generic_product_impl<Lhs, Rhs>::addTo(dst,src.lhs(),src.rhs());
|
||||
}
|
||||
};
|
||||
|
||||
// dense -= sparse-product (can be sparse*sparse, sparse*perm, etc.)
|
||||
template< typename DstXprType, typename Lhs, typename Rhs>
|
||||
struct Assignment<DstXprType, Product<Lhs,Rhs,AliasFreeProduct>, internal::sub_assign_op<typename DstXprType::Scalar,typename Product<Lhs,Rhs,AliasFreeProduct>::Scalar>, Sparse2Dense>
|
||||
{
|
||||
typedef Product<Lhs,Rhs,AliasFreeProduct> SrcXprType;
|
||||
static void run(DstXprType &dst, const SrcXprType &src, const internal::sub_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> &)
|
||||
{
|
||||
generic_product_impl<Lhs, Rhs>::subTo(dst,src.lhs(),src.rhs());
|
||||
}
|
||||
};
|
||||
|
||||
template<typename Lhs, typename Rhs, int Options>
|
||||
struct unary_evaluator<SparseView<Product<Lhs, Rhs, Options> >, IteratorBased>
|
||||
: public evaluator<typename Product<Lhs, Rhs, DefaultProduct>::PlainObject>
|
||||
{
|
||||
typedef SparseView<Product<Lhs, Rhs, Options> > XprType;
|
||||
typedef typename XprType::PlainObject PlainObject;
|
||||
typedef evaluator<PlainObject> Base;
|
||||
|
||||
explicit unary_evaluator(const XprType& xpr)
|
||||
: m_result(xpr.rows(), xpr.cols())
|
||||
{
|
||||
using std::abs;
|
||||
::new (static_cast<Base*>(this)) Base(m_result);
|
||||
typedef typename nested_eval<Lhs,Dynamic>::type LhsNested;
|
||||
typedef typename nested_eval<Rhs,Dynamic>::type RhsNested;
|
||||
LhsNested lhsNested(xpr.nestedExpression().lhs());
|
||||
RhsNested rhsNested(xpr.nestedExpression().rhs());
|
||||
|
||||
internal::sparse_sparse_product_with_pruning_selector<typename remove_all<LhsNested>::type,
|
||||
typename remove_all<RhsNested>::type, PlainObject>::run(lhsNested,rhsNested,m_result,
|
||||
abs(xpr.reference())*xpr.epsilon());
|
||||
}
|
||||
|
||||
protected:
|
||||
PlainObject m_result;
|
||||
};
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
// sparse matrix = sparse-product (can be sparse*sparse, sparse*perm, etc.)
|
||||
template<typename Scalar, int _Options, typename _StorageIndex>
|
||||
template<typename Lhs, typename Rhs>
|
||||
SparseMatrix<Scalar,_Options,_StorageIndex>& SparseMatrix<Scalar,_Options,_StorageIndex>::operator=(const Product<Lhs,Rhs,AliasFreeProduct>& src)
|
||||
{
|
||||
// std::cout << "in Assignment : " << DstOptions << "\n";
|
||||
SparseMatrix dst(src.rows(),src.cols());
|
||||
internal::generic_product_impl<Lhs, Rhs>::evalTo(dst,src.lhs(),src.rhs());
|
||||
this->swap(dst);
|
||||
return *this;
|
||||
}
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_SPARSEPRODUCT_H
|
49
3party/eigen/Eigen/src/SparseCore/SparseRedux.h
Normal file
49
3party/eigen/Eigen/src/SparseCore/SparseRedux.h
Normal file
@ -0,0 +1,49 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_SPARSEREDUX_H
|
||||
#define EIGEN_SPARSEREDUX_H
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
template<typename Derived>
|
||||
typename internal::traits<Derived>::Scalar
|
||||
SparseMatrixBase<Derived>::sum() const
|
||||
{
|
||||
eigen_assert(rows()>0 && cols()>0 && "you are using a non initialized matrix");
|
||||
Scalar res(0);
|
||||
internal::evaluator<Derived> thisEval(derived());
|
||||
for (Index j=0; j<outerSize(); ++j)
|
||||
for (typename internal::evaluator<Derived>::InnerIterator iter(thisEval,j); iter; ++iter)
|
||||
res += iter.value();
|
||||
return res;
|
||||
}
|
||||
|
||||
template<typename _Scalar, int _Options, typename _Index>
|
||||
typename internal::traits<SparseMatrix<_Scalar,_Options,_Index> >::Scalar
|
||||
SparseMatrix<_Scalar,_Options,_Index>::sum() const
|
||||
{
|
||||
eigen_assert(rows()>0 && cols()>0 && "you are using a non initialized matrix");
|
||||
if(this->isCompressed())
|
||||
return Matrix<Scalar,1,Dynamic>::Map(m_data.valuePtr(), m_data.size()).sum();
|
||||
else
|
||||
return Base::sum();
|
||||
}
|
||||
|
||||
template<typename _Scalar, int _Options, typename _Index>
|
||||
typename internal::traits<SparseVector<_Scalar,_Options, _Index> >::Scalar
|
||||
SparseVector<_Scalar,_Options,_Index>::sum() const
|
||||
{
|
||||
eigen_assert(rows()>0 && cols()>0 && "you are using a non initialized matrix");
|
||||
return Matrix<Scalar,1,Dynamic>::Map(m_data.valuePtr(), m_data.size()).sum();
|
||||
}
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_SPARSEREDUX_H
|
397
3party/eigen/Eigen/src/SparseCore/SparseRef.h
Normal file
397
3party/eigen/Eigen/src/SparseCore/SparseRef.h
Normal file
@ -0,0 +1,397 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2015 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_SPARSE_REF_H
|
||||
#define EIGEN_SPARSE_REF_H
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
enum {
|
||||
StandardCompressedFormat = 2 /**< used by Ref<SparseMatrix> to specify whether the input storage must be in standard compressed form */
|
||||
};
|
||||
|
||||
namespace internal {
|
||||
|
||||
template<typename Derived> class SparseRefBase;
|
||||
|
||||
template<typename MatScalar, int MatOptions, typename MatIndex, int _Options, typename _StrideType>
|
||||
struct traits<Ref<SparseMatrix<MatScalar,MatOptions,MatIndex>, _Options, _StrideType> >
|
||||
: public traits<SparseMatrix<MatScalar,MatOptions,MatIndex> >
|
||||
{
|
||||
typedef SparseMatrix<MatScalar,MatOptions,MatIndex> PlainObjectType;
|
||||
enum {
|
||||
Options = _Options,
|
||||
Flags = traits<PlainObjectType>::Flags | CompressedAccessBit | NestByRefBit
|
||||
};
|
||||
|
||||
template<typename Derived> struct match {
|
||||
enum {
|
||||
StorageOrderMatch = PlainObjectType::IsVectorAtCompileTime || Derived::IsVectorAtCompileTime || ((PlainObjectType::Flags&RowMajorBit)==(Derived::Flags&RowMajorBit)),
|
||||
MatchAtCompileTime = (Derived::Flags&CompressedAccessBit) && StorageOrderMatch
|
||||
};
|
||||
typedef typename internal::conditional<MatchAtCompileTime,internal::true_type,internal::false_type>::type type;
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
template<typename MatScalar, int MatOptions, typename MatIndex, int _Options, typename _StrideType>
|
||||
struct traits<Ref<const SparseMatrix<MatScalar,MatOptions,MatIndex>, _Options, _StrideType> >
|
||||
: public traits<Ref<SparseMatrix<MatScalar,MatOptions,MatIndex>, _Options, _StrideType> >
|
||||
{
|
||||
enum {
|
||||
Flags = (traits<SparseMatrix<MatScalar,MatOptions,MatIndex> >::Flags | CompressedAccessBit | NestByRefBit) & ~LvalueBit
|
||||
};
|
||||
};
|
||||
|
||||
template<typename MatScalar, int MatOptions, typename MatIndex, int _Options, typename _StrideType>
|
||||
struct traits<Ref<SparseVector<MatScalar,MatOptions,MatIndex>, _Options, _StrideType> >
|
||||
: public traits<SparseVector<MatScalar,MatOptions,MatIndex> >
|
||||
{
|
||||
typedef SparseVector<MatScalar,MatOptions,MatIndex> PlainObjectType;
|
||||
enum {
|
||||
Options = _Options,
|
||||
Flags = traits<PlainObjectType>::Flags | CompressedAccessBit | NestByRefBit
|
||||
};
|
||||
|
||||
template<typename Derived> struct match {
|
||||
enum {
|
||||
MatchAtCompileTime = (Derived::Flags&CompressedAccessBit) && Derived::IsVectorAtCompileTime
|
||||
};
|
||||
typedef typename internal::conditional<MatchAtCompileTime,internal::true_type,internal::false_type>::type type;
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
template<typename MatScalar, int MatOptions, typename MatIndex, int _Options, typename _StrideType>
|
||||
struct traits<Ref<const SparseVector<MatScalar,MatOptions,MatIndex>, _Options, _StrideType> >
|
||||
: public traits<Ref<SparseVector<MatScalar,MatOptions,MatIndex>, _Options, _StrideType> >
|
||||
{
|
||||
enum {
|
||||
Flags = (traits<SparseVector<MatScalar,MatOptions,MatIndex> >::Flags | CompressedAccessBit | NestByRefBit) & ~LvalueBit
|
||||
};
|
||||
};
|
||||
|
||||
template<typename Derived>
|
||||
struct traits<SparseRefBase<Derived> > : public traits<Derived> {};
|
||||
|
||||
template<typename Derived> class SparseRefBase
|
||||
: public SparseMapBase<Derived>
|
||||
{
|
||||
public:
|
||||
|
||||
typedef SparseMapBase<Derived> Base;
|
||||
EIGEN_SPARSE_PUBLIC_INTERFACE(SparseRefBase)
|
||||
|
||||
SparseRefBase()
|
||||
: Base(RowsAtCompileTime==Dynamic?0:RowsAtCompileTime,ColsAtCompileTime==Dynamic?0:ColsAtCompileTime, 0, 0, 0, 0, 0)
|
||||
{}
|
||||
|
||||
protected:
|
||||
|
||||
template<typename Expression>
|
||||
void construct(Expression& expr)
|
||||
{
|
||||
if(expr.outerIndexPtr()==0)
|
||||
::new (static_cast<Base*>(this)) Base(expr.size(), expr.nonZeros(), expr.innerIndexPtr(), expr.valuePtr());
|
||||
else
|
||||
::new (static_cast<Base*>(this)) Base(expr.rows(), expr.cols(), expr.nonZeros(), expr.outerIndexPtr(), expr.innerIndexPtr(), expr.valuePtr(), expr.innerNonZeroPtr());
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
|
||||
|
||||
/**
|
||||
* \ingroup SparseCore_Module
|
||||
*
|
||||
* \brief A sparse matrix expression referencing an existing sparse expression
|
||||
*
|
||||
* \tparam SparseMatrixType the equivalent sparse matrix type of the referenced data, it must be a template instance of class SparseMatrix.
|
||||
* \tparam Options specifies whether the a standard compressed format is required \c Options is \c #StandardCompressedFormat, or \c 0.
|
||||
* The default is \c 0.
|
||||
*
|
||||
* \sa class Ref
|
||||
*/
|
||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||
template<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>
|
||||
class Ref<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType >
|
||||
: public internal::SparseRefBase<Ref<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType > >
|
||||
#else
|
||||
template<typename SparseMatrixType, int Options>
|
||||
class Ref<SparseMatrixType, Options>
|
||||
: public SparseMapBase<Derived,WriteAccessors> // yes, that's weird to use Derived here, but that works!
|
||||
#endif
|
||||
{
|
||||
typedef SparseMatrix<MatScalar,MatOptions,MatIndex> PlainObjectType;
|
||||
typedef internal::traits<Ref> Traits;
|
||||
template<int OtherOptions>
|
||||
inline Ref(const SparseMatrix<MatScalar,OtherOptions,MatIndex>& expr);
|
||||
template<int OtherOptions>
|
||||
inline Ref(const MappedSparseMatrix<MatScalar,OtherOptions,MatIndex>& expr);
|
||||
public:
|
||||
|
||||
typedef internal::SparseRefBase<Ref> Base;
|
||||
EIGEN_SPARSE_PUBLIC_INTERFACE(Ref)
|
||||
|
||||
|
||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||
template<int OtherOptions>
|
||||
inline Ref(SparseMatrix<MatScalar,OtherOptions,MatIndex>& expr)
|
||||
{
|
||||
EIGEN_STATIC_ASSERT(bool(Traits::template match<SparseMatrix<MatScalar,OtherOptions,MatIndex> >::MatchAtCompileTime), STORAGE_LAYOUT_DOES_NOT_MATCH);
|
||||
eigen_assert( ((Options & int(StandardCompressedFormat))==0) || (expr.isCompressed()) );
|
||||
Base::construct(expr.derived());
|
||||
}
|
||||
|
||||
template<int OtherOptions>
|
||||
inline Ref(MappedSparseMatrix<MatScalar,OtherOptions,MatIndex>& expr)
|
||||
{
|
||||
EIGEN_STATIC_ASSERT(bool(Traits::template match<SparseMatrix<MatScalar,OtherOptions,MatIndex> >::MatchAtCompileTime), STORAGE_LAYOUT_DOES_NOT_MATCH);
|
||||
eigen_assert( ((Options & int(StandardCompressedFormat))==0) || (expr.isCompressed()) );
|
||||
Base::construct(expr.derived());
|
||||
}
|
||||
|
||||
template<typename Derived>
|
||||
inline Ref(const SparseCompressedBase<Derived>& expr)
|
||||
#else
|
||||
/** Implicit constructor from any sparse expression (2D matrix or 1D vector) */
|
||||
template<typename Derived>
|
||||
inline Ref(SparseCompressedBase<Derived>& expr)
|
||||
#endif
|
||||
{
|
||||
EIGEN_STATIC_ASSERT(bool(internal::is_lvalue<Derived>::value), THIS_EXPRESSION_IS_NOT_A_LVALUE__IT_IS_READ_ONLY);
|
||||
EIGEN_STATIC_ASSERT(bool(Traits::template match<Derived>::MatchAtCompileTime), STORAGE_LAYOUT_DOES_NOT_MATCH);
|
||||
eigen_assert( ((Options & int(StandardCompressedFormat))==0) || (expr.isCompressed()) );
|
||||
Base::construct(expr.const_cast_derived());
|
||||
}
|
||||
};
|
||||
|
||||
// this is the const ref version
|
||||
template<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>
|
||||
class Ref<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType>
|
||||
: public internal::SparseRefBase<Ref<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> >
|
||||
{
|
||||
typedef SparseMatrix<MatScalar,MatOptions,MatIndex> TPlainObjectType;
|
||||
typedef internal::traits<Ref> Traits;
|
||||
public:
|
||||
|
||||
typedef internal::SparseRefBase<Ref> Base;
|
||||
EIGEN_SPARSE_PUBLIC_INTERFACE(Ref)
|
||||
|
||||
template<typename Derived>
|
||||
inline Ref(const SparseMatrixBase<Derived>& expr) : m_hasCopy(false)
|
||||
{
|
||||
construct(expr.derived(), typename Traits::template match<Derived>::type());
|
||||
}
|
||||
|
||||
inline Ref(const Ref& other) : Base(other), m_hasCopy(false) {
|
||||
// copy constructor shall not copy the m_object, to avoid unnecessary malloc and copy
|
||||
}
|
||||
|
||||
template<typename OtherRef>
|
||||
inline Ref(const RefBase<OtherRef>& other) : m_hasCopy(false) {
|
||||
construct(other.derived(), typename Traits::template match<OtherRef>::type());
|
||||
}
|
||||
|
||||
~Ref() {
|
||||
if(m_hasCopy) {
|
||||
TPlainObjectType* obj = reinterpret_cast<TPlainObjectType*>(&m_storage);
|
||||
obj->~TPlainObjectType();
|
||||
}
|
||||
}
|
||||
|
||||
protected:
|
||||
|
||||
template<typename Expression>
|
||||
void construct(const Expression& expr,internal::true_type)
|
||||
{
|
||||
if((Options & int(StandardCompressedFormat)) && (!expr.isCompressed()))
|
||||
{
|
||||
TPlainObjectType* obj = reinterpret_cast<TPlainObjectType*>(&m_storage);
|
||||
::new (obj) TPlainObjectType(expr);
|
||||
m_hasCopy = true;
|
||||
Base::construct(*obj);
|
||||
}
|
||||
else
|
||||
{
|
||||
Base::construct(expr);
|
||||
}
|
||||
}
|
||||
|
||||
template<typename Expression>
|
||||
void construct(const Expression& expr, internal::false_type)
|
||||
{
|
||||
TPlainObjectType* obj = reinterpret_cast<TPlainObjectType*>(&m_storage);
|
||||
::new (obj) TPlainObjectType(expr);
|
||||
m_hasCopy = true;
|
||||
Base::construct(*obj);
|
||||
}
|
||||
|
||||
protected:
|
||||
typename internal::aligned_storage<sizeof(TPlainObjectType), EIGEN_ALIGNOF(TPlainObjectType)>::type m_storage;
|
||||
bool m_hasCopy;
|
||||
};
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* \ingroup SparseCore_Module
|
||||
*
|
||||
* \brief A sparse vector expression referencing an existing sparse vector expression
|
||||
*
|
||||
* \tparam SparseVectorType the equivalent sparse vector type of the referenced data, it must be a template instance of class SparseVector.
|
||||
*
|
||||
* \sa class Ref
|
||||
*/
|
||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||
template<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>
|
||||
class Ref<SparseVector<MatScalar,MatOptions,MatIndex>, Options, StrideType >
|
||||
: public internal::SparseRefBase<Ref<SparseVector<MatScalar,MatOptions,MatIndex>, Options, StrideType > >
|
||||
#else
|
||||
template<typename SparseVectorType>
|
||||
class Ref<SparseVectorType>
|
||||
: public SparseMapBase<Derived,WriteAccessors>
|
||||
#endif
|
||||
{
|
||||
typedef SparseVector<MatScalar,MatOptions,MatIndex> PlainObjectType;
|
||||
typedef internal::traits<Ref> Traits;
|
||||
template<int OtherOptions>
|
||||
inline Ref(const SparseVector<MatScalar,OtherOptions,MatIndex>& expr);
|
||||
public:
|
||||
|
||||
typedef internal::SparseRefBase<Ref> Base;
|
||||
EIGEN_SPARSE_PUBLIC_INTERFACE(Ref)
|
||||
|
||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||
template<int OtherOptions>
|
||||
inline Ref(SparseVector<MatScalar,OtherOptions,MatIndex>& expr)
|
||||
{
|
||||
EIGEN_STATIC_ASSERT(bool(Traits::template match<SparseVector<MatScalar,OtherOptions,MatIndex> >::MatchAtCompileTime), STORAGE_LAYOUT_DOES_NOT_MATCH);
|
||||
Base::construct(expr.derived());
|
||||
}
|
||||
|
||||
template<typename Derived>
|
||||
inline Ref(const SparseCompressedBase<Derived>& expr)
|
||||
#else
|
||||
/** Implicit constructor from any 1D sparse vector expression */
|
||||
template<typename Derived>
|
||||
inline Ref(SparseCompressedBase<Derived>& expr)
|
||||
#endif
|
||||
{
|
||||
EIGEN_STATIC_ASSERT(bool(internal::is_lvalue<Derived>::value), THIS_EXPRESSION_IS_NOT_A_LVALUE__IT_IS_READ_ONLY);
|
||||
EIGEN_STATIC_ASSERT(bool(Traits::template match<Derived>::MatchAtCompileTime), STORAGE_LAYOUT_DOES_NOT_MATCH);
|
||||
Base::construct(expr.const_cast_derived());
|
||||
}
|
||||
};
|
||||
|
||||
// this is the const ref version
|
||||
template<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>
|
||||
class Ref<const SparseVector<MatScalar,MatOptions,MatIndex>, Options, StrideType>
|
||||
: public internal::SparseRefBase<Ref<const SparseVector<MatScalar,MatOptions,MatIndex>, Options, StrideType> >
|
||||
{
|
||||
typedef SparseVector<MatScalar,MatOptions,MatIndex> TPlainObjectType;
|
||||
typedef internal::traits<Ref> Traits;
|
||||
public:
|
||||
|
||||
typedef internal::SparseRefBase<Ref> Base;
|
||||
EIGEN_SPARSE_PUBLIC_INTERFACE(Ref)
|
||||
|
||||
template<typename Derived>
|
||||
inline Ref(const SparseMatrixBase<Derived>& expr) : m_hasCopy(false)
|
||||
{
|
||||
construct(expr.derived(), typename Traits::template match<Derived>::type());
|
||||
}
|
||||
|
||||
inline Ref(const Ref& other) : Base(other), m_hasCopy(false) {
|
||||
// copy constructor shall not copy the m_object, to avoid unnecessary malloc and copy
|
||||
}
|
||||
|
||||
template<typename OtherRef>
|
||||
inline Ref(const RefBase<OtherRef>& other) : m_hasCopy(false) {
|
||||
construct(other.derived(), typename Traits::template match<OtherRef>::type());
|
||||
}
|
||||
|
||||
~Ref() {
|
||||
if(m_hasCopy) {
|
||||
TPlainObjectType* obj = reinterpret_cast<TPlainObjectType*>(&m_storage);
|
||||
obj->~TPlainObjectType();
|
||||
}
|
||||
}
|
||||
|
||||
protected:
|
||||
|
||||
template<typename Expression>
|
||||
void construct(const Expression& expr,internal::true_type)
|
||||
{
|
||||
Base::construct(expr);
|
||||
}
|
||||
|
||||
template<typename Expression>
|
||||
void construct(const Expression& expr, internal::false_type)
|
||||
{
|
||||
TPlainObjectType* obj = reinterpret_cast<TPlainObjectType*>(&m_storage);
|
||||
::new (obj) TPlainObjectType(expr);
|
||||
m_hasCopy = true;
|
||||
Base::construct(*obj);
|
||||
}
|
||||
|
||||
protected:
|
||||
typename internal::aligned_storage<sizeof(TPlainObjectType), EIGEN_ALIGNOF(TPlainObjectType)>::type m_storage;
|
||||
bool m_hasCopy;
|
||||
};
|
||||
|
||||
namespace internal {
|
||||
|
||||
// FIXME shall we introduce a general evaluatior_ref that we can specialize for any sparse object once, and thus remove this copy-pasta thing...
|
||||
|
||||
template<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>
|
||||
struct evaluator<Ref<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> >
|
||||
: evaluator<SparseCompressedBase<Ref<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> > >
|
||||
{
|
||||
typedef evaluator<SparseCompressedBase<Ref<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> > > Base;
|
||||
typedef Ref<SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> XprType;
|
||||
evaluator() : Base() {}
|
||||
explicit evaluator(const XprType &mat) : Base(mat) {}
|
||||
};
|
||||
|
||||
template<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>
|
||||
struct evaluator<Ref<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> >
|
||||
: evaluator<SparseCompressedBase<Ref<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> > >
|
||||
{
|
||||
typedef evaluator<SparseCompressedBase<Ref<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> > > Base;
|
||||
typedef Ref<const SparseMatrix<MatScalar,MatOptions,MatIndex>, Options, StrideType> XprType;
|
||||
evaluator() : Base() {}
|
||||
explicit evaluator(const XprType &mat) : Base(mat) {}
|
||||
};
|
||||
|
||||
template<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>
|
||||
struct evaluator<Ref<SparseVector<MatScalar,MatOptions,MatIndex>, Options, StrideType> >
|
||||
: evaluator<SparseCompressedBase<Ref<SparseVector<MatScalar,MatOptions,MatIndex>, Options, StrideType> > >
|
||||
{
|
||||
typedef evaluator<SparseCompressedBase<Ref<SparseVector<MatScalar,MatOptions,MatIndex>, Options, StrideType> > > Base;
|
||||
typedef Ref<SparseVector<MatScalar,MatOptions,MatIndex>, Options, StrideType> XprType;
|
||||
evaluator() : Base() {}
|
||||
explicit evaluator(const XprType &mat) : Base(mat) {}
|
||||
};
|
||||
|
||||
template<typename MatScalar, int MatOptions, typename MatIndex, int Options, typename StrideType>
|
||||
struct evaluator<Ref<const SparseVector<MatScalar,MatOptions,MatIndex>, Options, StrideType> >
|
||||
: evaluator<SparseCompressedBase<Ref<const SparseVector<MatScalar,MatOptions,MatIndex>, Options, StrideType> > >
|
||||
{
|
||||
typedef evaluator<SparseCompressedBase<Ref<const SparseVector<MatScalar,MatOptions,MatIndex>, Options, StrideType> > > Base;
|
||||
typedef Ref<const SparseVector<MatScalar,MatOptions,MatIndex>, Options, StrideType> XprType;
|
||||
evaluator() : Base() {}
|
||||
explicit evaluator(const XprType &mat) : Base(mat) {}
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_SPARSE_REF_H
|
659
3party/eigen/Eigen/src/SparseCore/SparseSelfAdjointView.h
Normal file
659
3party/eigen/Eigen/src/SparseCore/SparseSelfAdjointView.h
Normal file
@ -0,0 +1,659 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2009-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_SPARSE_SELFADJOINTVIEW_H
|
||||
#define EIGEN_SPARSE_SELFADJOINTVIEW_H
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
/** \ingroup SparseCore_Module
|
||||
* \class SparseSelfAdjointView
|
||||
*
|
||||
* \brief Pseudo expression to manipulate a triangular sparse matrix as a selfadjoint matrix.
|
||||
*
|
||||
* \param MatrixType the type of the dense matrix storing the coefficients
|
||||
* \param Mode can be either \c #Lower or \c #Upper
|
||||
*
|
||||
* This class is an expression of a sefladjoint matrix from a triangular part of a matrix
|
||||
* with given dense storage of the coefficients. It is the return type of MatrixBase::selfadjointView()
|
||||
* and most of the time this is the only way that it is used.
|
||||
*
|
||||
* \sa SparseMatrixBase::selfadjointView()
|
||||
*/
|
||||
namespace internal {
|
||||
|
||||
template<typename MatrixType, unsigned int Mode>
|
||||
struct traits<SparseSelfAdjointView<MatrixType,Mode> > : traits<MatrixType> {
|
||||
};
|
||||
|
||||
template<int SrcMode,int DstMode,typename MatrixType,int DestOrder>
|
||||
void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::StorageIndex>& _dest, const typename MatrixType::StorageIndex* perm = 0);
|
||||
|
||||
template<int Mode,typename MatrixType,int DestOrder>
|
||||
void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::StorageIndex>& _dest, const typename MatrixType::StorageIndex* perm = 0);
|
||||
|
||||
}
|
||||
|
||||
template<typename MatrixType, unsigned int _Mode> class SparseSelfAdjointView
|
||||
: public EigenBase<SparseSelfAdjointView<MatrixType,_Mode> >
|
||||
{
|
||||
public:
|
||||
|
||||
enum {
|
||||
Mode = _Mode,
|
||||
TransposeMode = ((Mode & Upper) ? Lower : 0) | ((Mode & Lower) ? Upper : 0),
|
||||
RowsAtCompileTime = internal::traits<SparseSelfAdjointView>::RowsAtCompileTime,
|
||||
ColsAtCompileTime = internal::traits<SparseSelfAdjointView>::ColsAtCompileTime
|
||||
};
|
||||
|
||||
typedef EigenBase<SparseSelfAdjointView> Base;
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
typedef typename MatrixType::StorageIndex StorageIndex;
|
||||
typedef Matrix<StorageIndex,Dynamic,1> VectorI;
|
||||
typedef typename internal::ref_selector<MatrixType>::non_const_type MatrixTypeNested;
|
||||
typedef typename internal::remove_all<MatrixTypeNested>::type _MatrixTypeNested;
|
||||
|
||||
explicit inline SparseSelfAdjointView(MatrixType& matrix) : m_matrix(matrix)
|
||||
{
|
||||
eigen_assert(rows()==cols() && "SelfAdjointView is only for squared matrices");
|
||||
}
|
||||
|
||||
inline Index rows() const { return m_matrix.rows(); }
|
||||
inline Index cols() const { return m_matrix.cols(); }
|
||||
|
||||
/** \internal \returns a reference to the nested matrix */
|
||||
const _MatrixTypeNested& matrix() const { return m_matrix; }
|
||||
typename internal::remove_reference<MatrixTypeNested>::type& matrix() { return m_matrix; }
|
||||
|
||||
/** \returns an expression of the matrix product between a sparse self-adjoint matrix \c *this and a sparse matrix \a rhs.
|
||||
*
|
||||
* Note that there is no algorithmic advantage of performing such a product compared to a general sparse-sparse matrix product.
|
||||
* Indeed, the SparseSelfadjointView operand is first copied into a temporary SparseMatrix before computing the product.
|
||||
*/
|
||||
template<typename OtherDerived>
|
||||
Product<SparseSelfAdjointView, OtherDerived>
|
||||
operator*(const SparseMatrixBase<OtherDerived>& rhs) const
|
||||
{
|
||||
return Product<SparseSelfAdjointView, OtherDerived>(*this, rhs.derived());
|
||||
}
|
||||
|
||||
/** \returns an expression of the matrix product between a sparse matrix \a lhs and a sparse self-adjoint matrix \a rhs.
|
||||
*
|
||||
* Note that there is no algorithmic advantage of performing such a product compared to a general sparse-sparse matrix product.
|
||||
* Indeed, the SparseSelfadjointView operand is first copied into a temporary SparseMatrix before computing the product.
|
||||
*/
|
||||
template<typename OtherDerived> friend
|
||||
Product<OtherDerived, SparseSelfAdjointView>
|
||||
operator*(const SparseMatrixBase<OtherDerived>& lhs, const SparseSelfAdjointView& rhs)
|
||||
{
|
||||
return Product<OtherDerived, SparseSelfAdjointView>(lhs.derived(), rhs);
|
||||
}
|
||||
|
||||
/** Efficient sparse self-adjoint matrix times dense vector/matrix product */
|
||||
template<typename OtherDerived>
|
||||
Product<SparseSelfAdjointView,OtherDerived>
|
||||
operator*(const MatrixBase<OtherDerived>& rhs) const
|
||||
{
|
||||
return Product<SparseSelfAdjointView,OtherDerived>(*this, rhs.derived());
|
||||
}
|
||||
|
||||
/** Efficient dense vector/matrix times sparse self-adjoint matrix product */
|
||||
template<typename OtherDerived> friend
|
||||
Product<OtherDerived,SparseSelfAdjointView>
|
||||
operator*(const MatrixBase<OtherDerived>& lhs, const SparseSelfAdjointView& rhs)
|
||||
{
|
||||
return Product<OtherDerived,SparseSelfAdjointView>(lhs.derived(), rhs);
|
||||
}
|
||||
|
||||
/** Perform a symmetric rank K update of the selfadjoint matrix \c *this:
|
||||
* \f$ this = this + \alpha ( u u^* ) \f$ where \a u is a vector or matrix.
|
||||
*
|
||||
* \returns a reference to \c *this
|
||||
*
|
||||
* To perform \f$ this = this + \alpha ( u^* u ) \f$ you can simply
|
||||
* call this function with u.adjoint().
|
||||
*/
|
||||
template<typename DerivedU>
|
||||
SparseSelfAdjointView& rankUpdate(const SparseMatrixBase<DerivedU>& u, const Scalar& alpha = Scalar(1));
|
||||
|
||||
/** \returns an expression of P H P^-1 */
|
||||
// TODO implement twists in a more evaluator friendly fashion
|
||||
SparseSymmetricPermutationProduct<_MatrixTypeNested,Mode> twistedBy(const PermutationMatrix<Dynamic,Dynamic,StorageIndex>& perm) const
|
||||
{
|
||||
return SparseSymmetricPermutationProduct<_MatrixTypeNested,Mode>(m_matrix, perm);
|
||||
}
|
||||
|
||||
template<typename SrcMatrixType,int SrcMode>
|
||||
SparseSelfAdjointView& operator=(const SparseSymmetricPermutationProduct<SrcMatrixType,SrcMode>& permutedMatrix)
|
||||
{
|
||||
internal::call_assignment_no_alias_no_transpose(*this, permutedMatrix);
|
||||
return *this;
|
||||
}
|
||||
|
||||
SparseSelfAdjointView& operator=(const SparseSelfAdjointView& src)
|
||||
{
|
||||
PermutationMatrix<Dynamic,Dynamic,StorageIndex> pnull;
|
||||
return *this = src.twistedBy(pnull);
|
||||
}
|
||||
|
||||
// Since we override the copy-assignment operator, we need to explicitly re-declare the copy-constructor
|
||||
EIGEN_DEFAULT_COPY_CONSTRUCTOR(SparseSelfAdjointView)
|
||||
|
||||
template<typename SrcMatrixType,unsigned int SrcMode>
|
||||
SparseSelfAdjointView& operator=(const SparseSelfAdjointView<SrcMatrixType,SrcMode>& src)
|
||||
{
|
||||
PermutationMatrix<Dynamic,Dynamic,StorageIndex> pnull;
|
||||
return *this = src.twistedBy(pnull);
|
||||
}
|
||||
|
||||
void resize(Index rows, Index cols)
|
||||
{
|
||||
EIGEN_ONLY_USED_FOR_DEBUG(rows);
|
||||
EIGEN_ONLY_USED_FOR_DEBUG(cols);
|
||||
eigen_assert(rows == this->rows() && cols == this->cols()
|
||||
&& "SparseSelfadjointView::resize() does not actually allow to resize.");
|
||||
}
|
||||
|
||||
protected:
|
||||
|
||||
MatrixTypeNested m_matrix;
|
||||
//mutable VectorI m_countPerRow;
|
||||
//mutable VectorI m_countPerCol;
|
||||
private:
|
||||
template<typename Dest> void evalTo(Dest &) const;
|
||||
};
|
||||
|
||||
/***************************************************************************
|
||||
* Implementation of SparseMatrixBase methods
|
||||
***************************************************************************/
|
||||
|
||||
template<typename Derived>
|
||||
template<unsigned int UpLo>
|
||||
typename SparseMatrixBase<Derived>::template ConstSelfAdjointViewReturnType<UpLo>::Type SparseMatrixBase<Derived>::selfadjointView() const
|
||||
{
|
||||
return SparseSelfAdjointView<const Derived, UpLo>(derived());
|
||||
}
|
||||
|
||||
template<typename Derived>
|
||||
template<unsigned int UpLo>
|
||||
typename SparseMatrixBase<Derived>::template SelfAdjointViewReturnType<UpLo>::Type SparseMatrixBase<Derived>::selfadjointView()
|
||||
{
|
||||
return SparseSelfAdjointView<Derived, UpLo>(derived());
|
||||
}
|
||||
|
||||
/***************************************************************************
|
||||
* Implementation of SparseSelfAdjointView methods
|
||||
***************************************************************************/
|
||||
|
||||
template<typename MatrixType, unsigned int Mode>
|
||||
template<typename DerivedU>
|
||||
SparseSelfAdjointView<MatrixType,Mode>&
|
||||
SparseSelfAdjointView<MatrixType,Mode>::rankUpdate(const SparseMatrixBase<DerivedU>& u, const Scalar& alpha)
|
||||
{
|
||||
SparseMatrix<Scalar,(MatrixType::Flags&RowMajorBit)?RowMajor:ColMajor> tmp = u * u.adjoint();
|
||||
if(alpha==Scalar(0))
|
||||
m_matrix = tmp.template triangularView<Mode>();
|
||||
else
|
||||
m_matrix += alpha * tmp.template triangularView<Mode>();
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
namespace internal {
|
||||
|
||||
// TODO currently a selfadjoint expression has the form SelfAdjointView<.,.>
|
||||
// in the future selfadjoint-ness should be defined by the expression traits
|
||||
// such that Transpose<SelfAdjointView<.,.> > is valid. (currently TriangularBase::transpose() is overloaded to make it work)
|
||||
template<typename MatrixType, unsigned int Mode>
|
||||
struct evaluator_traits<SparseSelfAdjointView<MatrixType,Mode> >
|
||||
{
|
||||
typedef typename storage_kind_to_evaluator_kind<typename MatrixType::StorageKind>::Kind Kind;
|
||||
typedef SparseSelfAdjointShape Shape;
|
||||
};
|
||||
|
||||
struct SparseSelfAdjoint2Sparse {};
|
||||
|
||||
template<> struct AssignmentKind<SparseShape,SparseSelfAdjointShape> { typedef SparseSelfAdjoint2Sparse Kind; };
|
||||
template<> struct AssignmentKind<SparseSelfAdjointShape,SparseShape> { typedef Sparse2Sparse Kind; };
|
||||
|
||||
template< typename DstXprType, typename SrcXprType, typename Functor>
|
||||
struct Assignment<DstXprType, SrcXprType, Functor, SparseSelfAdjoint2Sparse>
|
||||
{
|
||||
typedef typename DstXprType::StorageIndex StorageIndex;
|
||||
typedef internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> AssignOpType;
|
||||
|
||||
template<typename DestScalar,int StorageOrder>
|
||||
static void run(SparseMatrix<DestScalar,StorageOrder,StorageIndex> &dst, const SrcXprType &src, const AssignOpType&/*func*/)
|
||||
{
|
||||
internal::permute_symm_to_fullsymm<SrcXprType::Mode>(src.matrix(), dst);
|
||||
}
|
||||
|
||||
// FIXME: the handling of += and -= in sparse matrices should be cleanup so that next two overloads could be reduced to:
|
||||
template<typename DestScalar,int StorageOrder,typename AssignFunc>
|
||||
static void run(SparseMatrix<DestScalar,StorageOrder,StorageIndex> &dst, const SrcXprType &src, const AssignFunc& func)
|
||||
{
|
||||
SparseMatrix<DestScalar,StorageOrder,StorageIndex> tmp(src.rows(),src.cols());
|
||||
run(tmp, src, AssignOpType());
|
||||
call_assignment_no_alias_no_transpose(dst, tmp, func);
|
||||
}
|
||||
|
||||
template<typename DestScalar,int StorageOrder>
|
||||
static void run(SparseMatrix<DestScalar,StorageOrder,StorageIndex> &dst, const SrcXprType &src,
|
||||
const internal::add_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar>& /* func */)
|
||||
{
|
||||
SparseMatrix<DestScalar,StorageOrder,StorageIndex> tmp(src.rows(),src.cols());
|
||||
run(tmp, src, AssignOpType());
|
||||
dst += tmp;
|
||||
}
|
||||
|
||||
template<typename DestScalar,int StorageOrder>
|
||||
static void run(SparseMatrix<DestScalar,StorageOrder,StorageIndex> &dst, const SrcXprType &src,
|
||||
const internal::sub_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar>& /* func */)
|
||||
{
|
||||
SparseMatrix<DestScalar,StorageOrder,StorageIndex> tmp(src.rows(),src.cols());
|
||||
run(tmp, src, AssignOpType());
|
||||
dst -= tmp;
|
||||
}
|
||||
|
||||
template<typename DestScalar>
|
||||
static void run(DynamicSparseMatrix<DestScalar,ColMajor,StorageIndex>& dst, const SrcXprType &src, const AssignOpType&/*func*/)
|
||||
{
|
||||
// TODO directly evaluate into dst;
|
||||
SparseMatrix<DestScalar,ColMajor,StorageIndex> tmp(dst.rows(),dst.cols());
|
||||
internal::permute_symm_to_fullsymm<SrcXprType::Mode>(src.matrix(), tmp);
|
||||
dst = tmp;
|
||||
}
|
||||
};
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
/***************************************************************************
|
||||
* Implementation of sparse self-adjoint time dense matrix
|
||||
***************************************************************************/
|
||||
|
||||
namespace internal {
|
||||
|
||||
template<int Mode, typename SparseLhsType, typename DenseRhsType, typename DenseResType, typename AlphaType>
|
||||
inline void sparse_selfadjoint_time_dense_product(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha)
|
||||
{
|
||||
EIGEN_ONLY_USED_FOR_DEBUG(alpha);
|
||||
|
||||
typedef typename internal::nested_eval<SparseLhsType,DenseRhsType::MaxColsAtCompileTime>::type SparseLhsTypeNested;
|
||||
typedef typename internal::remove_all<SparseLhsTypeNested>::type SparseLhsTypeNestedCleaned;
|
||||
typedef evaluator<SparseLhsTypeNestedCleaned> LhsEval;
|
||||
typedef typename LhsEval::InnerIterator LhsIterator;
|
||||
typedef typename SparseLhsType::Scalar LhsScalar;
|
||||
|
||||
enum {
|
||||
LhsIsRowMajor = (LhsEval::Flags&RowMajorBit)==RowMajorBit,
|
||||
ProcessFirstHalf =
|
||||
((Mode&(Upper|Lower))==(Upper|Lower))
|
||||
|| ( (Mode&Upper) && !LhsIsRowMajor)
|
||||
|| ( (Mode&Lower) && LhsIsRowMajor),
|
||||
ProcessSecondHalf = !ProcessFirstHalf
|
||||
};
|
||||
|
||||
SparseLhsTypeNested lhs_nested(lhs);
|
||||
LhsEval lhsEval(lhs_nested);
|
||||
|
||||
// work on one column at once
|
||||
for (Index k=0; k<rhs.cols(); ++k)
|
||||
{
|
||||
for (Index j=0; j<lhs.outerSize(); ++j)
|
||||
{
|
||||
LhsIterator i(lhsEval,j);
|
||||
// handle diagonal coeff
|
||||
if (ProcessSecondHalf)
|
||||
{
|
||||
while (i && i.index()<j) ++i;
|
||||
if(i && i.index()==j)
|
||||
{
|
||||
res.coeffRef(j,k) += alpha * i.value() * rhs.coeff(j,k);
|
||||
++i;
|
||||
}
|
||||
}
|
||||
|
||||
// premultiplied rhs for scatters
|
||||
typename ScalarBinaryOpTraits<AlphaType, typename DenseRhsType::Scalar>::ReturnType rhs_j(alpha*rhs(j,k));
|
||||
// accumulator for partial scalar product
|
||||
typename DenseResType::Scalar res_j(0);
|
||||
for(; (ProcessFirstHalf ? i && i.index() < j : i) ; ++i)
|
||||
{
|
||||
LhsScalar lhs_ij = i.value();
|
||||
if(!LhsIsRowMajor) lhs_ij = numext::conj(lhs_ij);
|
||||
res_j += lhs_ij * rhs.coeff(i.index(),k);
|
||||
res(i.index(),k) += numext::conj(lhs_ij) * rhs_j;
|
||||
}
|
||||
res.coeffRef(j,k) += alpha * res_j;
|
||||
|
||||
// handle diagonal coeff
|
||||
if (ProcessFirstHalf && i && (i.index()==j))
|
||||
res.coeffRef(j,k) += alpha * i.value() * rhs.coeff(j,k);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template<typename LhsView, typename Rhs, int ProductType>
|
||||
struct generic_product_impl<LhsView, Rhs, SparseSelfAdjointShape, DenseShape, ProductType>
|
||||
: generic_product_impl_base<LhsView, Rhs, generic_product_impl<LhsView, Rhs, SparseSelfAdjointShape, DenseShape, ProductType> >
|
||||
{
|
||||
template<typename Dest>
|
||||
static void scaleAndAddTo(Dest& dst, const LhsView& lhsView, const Rhs& rhs, const typename Dest::Scalar& alpha)
|
||||
{
|
||||
typedef typename LhsView::_MatrixTypeNested Lhs;
|
||||
typedef typename nested_eval<Lhs,Dynamic>::type LhsNested;
|
||||
typedef typename nested_eval<Rhs,Dynamic>::type RhsNested;
|
||||
LhsNested lhsNested(lhsView.matrix());
|
||||
RhsNested rhsNested(rhs);
|
||||
|
||||
internal::sparse_selfadjoint_time_dense_product<LhsView::Mode>(lhsNested, rhsNested, dst, alpha);
|
||||
}
|
||||
};
|
||||
|
||||
template<typename Lhs, typename RhsView, int ProductType>
|
||||
struct generic_product_impl<Lhs, RhsView, DenseShape, SparseSelfAdjointShape, ProductType>
|
||||
: generic_product_impl_base<Lhs, RhsView, generic_product_impl<Lhs, RhsView, DenseShape, SparseSelfAdjointShape, ProductType> >
|
||||
{
|
||||
template<typename Dest>
|
||||
static void scaleAndAddTo(Dest& dst, const Lhs& lhs, const RhsView& rhsView, const typename Dest::Scalar& alpha)
|
||||
{
|
||||
typedef typename RhsView::_MatrixTypeNested Rhs;
|
||||
typedef typename nested_eval<Lhs,Dynamic>::type LhsNested;
|
||||
typedef typename nested_eval<Rhs,Dynamic>::type RhsNested;
|
||||
LhsNested lhsNested(lhs);
|
||||
RhsNested rhsNested(rhsView.matrix());
|
||||
|
||||
// transpose everything
|
||||
Transpose<Dest> dstT(dst);
|
||||
internal::sparse_selfadjoint_time_dense_product<RhsView::TransposeMode>(rhsNested.transpose(), lhsNested.transpose(), dstT, alpha);
|
||||
}
|
||||
};
|
||||
|
||||
// NOTE: these two overloads are needed to evaluate the sparse selfadjoint view into a full sparse matrix
|
||||
// TODO: maybe the copy could be handled by generic_product_impl so that these overloads would not be needed anymore
|
||||
|
||||
template<typename LhsView, typename Rhs, int ProductTag>
|
||||
struct product_evaluator<Product<LhsView, Rhs, DefaultProduct>, ProductTag, SparseSelfAdjointShape, SparseShape>
|
||||
: public evaluator<typename Product<typename Rhs::PlainObject, Rhs, DefaultProduct>::PlainObject>
|
||||
{
|
||||
typedef Product<LhsView, Rhs, DefaultProduct> XprType;
|
||||
typedef typename XprType::PlainObject PlainObject;
|
||||
typedef evaluator<PlainObject> Base;
|
||||
|
||||
product_evaluator(const XprType& xpr)
|
||||
: m_lhs(xpr.lhs()), m_result(xpr.rows(), xpr.cols())
|
||||
{
|
||||
::new (static_cast<Base*>(this)) Base(m_result);
|
||||
generic_product_impl<typename Rhs::PlainObject, Rhs, SparseShape, SparseShape, ProductTag>::evalTo(m_result, m_lhs, xpr.rhs());
|
||||
}
|
||||
|
||||
protected:
|
||||
typename Rhs::PlainObject m_lhs;
|
||||
PlainObject m_result;
|
||||
};
|
||||
|
||||
template<typename Lhs, typename RhsView, int ProductTag>
|
||||
struct product_evaluator<Product<Lhs, RhsView, DefaultProduct>, ProductTag, SparseShape, SparseSelfAdjointShape>
|
||||
: public evaluator<typename Product<Lhs, typename Lhs::PlainObject, DefaultProduct>::PlainObject>
|
||||
{
|
||||
typedef Product<Lhs, RhsView, DefaultProduct> XprType;
|
||||
typedef typename XprType::PlainObject PlainObject;
|
||||
typedef evaluator<PlainObject> Base;
|
||||
|
||||
product_evaluator(const XprType& xpr)
|
||||
: m_rhs(xpr.rhs()), m_result(xpr.rows(), xpr.cols())
|
||||
{
|
||||
::new (static_cast<Base*>(this)) Base(m_result);
|
||||
generic_product_impl<Lhs, typename Lhs::PlainObject, SparseShape, SparseShape, ProductTag>::evalTo(m_result, xpr.lhs(), m_rhs);
|
||||
}
|
||||
|
||||
protected:
|
||||
typename Lhs::PlainObject m_rhs;
|
||||
PlainObject m_result;
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
|
||||
/***************************************************************************
|
||||
* Implementation of symmetric copies and permutations
|
||||
***************************************************************************/
|
||||
namespace internal {
|
||||
|
||||
template<int Mode,typename MatrixType,int DestOrder>
|
||||
void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::StorageIndex>& _dest, const typename MatrixType::StorageIndex* perm)
|
||||
{
|
||||
typedef typename MatrixType::StorageIndex StorageIndex;
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
typedef SparseMatrix<Scalar,DestOrder,StorageIndex> Dest;
|
||||
typedef Matrix<StorageIndex,Dynamic,1> VectorI;
|
||||
typedef evaluator<MatrixType> MatEval;
|
||||
typedef typename evaluator<MatrixType>::InnerIterator MatIterator;
|
||||
|
||||
MatEval matEval(mat);
|
||||
Dest& dest(_dest.derived());
|
||||
enum {
|
||||
StorageOrderMatch = int(Dest::IsRowMajor) == int(MatrixType::IsRowMajor)
|
||||
};
|
||||
|
||||
Index size = mat.rows();
|
||||
VectorI count;
|
||||
count.resize(size);
|
||||
count.setZero();
|
||||
dest.resize(size,size);
|
||||
for(Index j = 0; j<size; ++j)
|
||||
{
|
||||
Index jp = perm ? perm[j] : j;
|
||||
for(MatIterator it(matEval,j); it; ++it)
|
||||
{
|
||||
Index i = it.index();
|
||||
Index r = it.row();
|
||||
Index c = it.col();
|
||||
Index ip = perm ? perm[i] : i;
|
||||
if(Mode==int(Upper|Lower))
|
||||
count[StorageOrderMatch ? jp : ip]++;
|
||||
else if(r==c)
|
||||
count[ip]++;
|
||||
else if(( Mode==Lower && r>c) || ( Mode==Upper && r<c))
|
||||
{
|
||||
count[ip]++;
|
||||
count[jp]++;
|
||||
}
|
||||
}
|
||||
}
|
||||
Index nnz = count.sum();
|
||||
|
||||
// reserve space
|
||||
dest.resizeNonZeros(nnz);
|
||||
dest.outerIndexPtr()[0] = 0;
|
||||
for(Index j=0; j<size; ++j)
|
||||
dest.outerIndexPtr()[j+1] = dest.outerIndexPtr()[j] + count[j];
|
||||
for(Index j=0; j<size; ++j)
|
||||
count[j] = dest.outerIndexPtr()[j];
|
||||
|
||||
// copy data
|
||||
for(StorageIndex j = 0; j<size; ++j)
|
||||
{
|
||||
for(MatIterator it(matEval,j); it; ++it)
|
||||
{
|
||||
StorageIndex i = internal::convert_index<StorageIndex>(it.index());
|
||||
Index r = it.row();
|
||||
Index c = it.col();
|
||||
|
||||
StorageIndex jp = perm ? perm[j] : j;
|
||||
StorageIndex ip = perm ? perm[i] : i;
|
||||
|
||||
if(Mode==int(Upper|Lower))
|
||||
{
|
||||
Index k = count[StorageOrderMatch ? jp : ip]++;
|
||||
dest.innerIndexPtr()[k] = StorageOrderMatch ? ip : jp;
|
||||
dest.valuePtr()[k] = it.value();
|
||||
}
|
||||
else if(r==c)
|
||||
{
|
||||
Index k = count[ip]++;
|
||||
dest.innerIndexPtr()[k] = ip;
|
||||
dest.valuePtr()[k] = it.value();
|
||||
}
|
||||
else if(( (Mode&Lower)==Lower && r>c) || ( (Mode&Upper)==Upper && r<c))
|
||||
{
|
||||
if(!StorageOrderMatch)
|
||||
std::swap(ip,jp);
|
||||
Index k = count[jp]++;
|
||||
dest.innerIndexPtr()[k] = ip;
|
||||
dest.valuePtr()[k] = it.value();
|
||||
k = count[ip]++;
|
||||
dest.innerIndexPtr()[k] = jp;
|
||||
dest.valuePtr()[k] = numext::conj(it.value());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template<int _SrcMode,int _DstMode,typename MatrixType,int DstOrder>
|
||||
void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DstOrder,typename MatrixType::StorageIndex>& _dest, const typename MatrixType::StorageIndex* perm)
|
||||
{
|
||||
typedef typename MatrixType::StorageIndex StorageIndex;
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
SparseMatrix<Scalar,DstOrder,StorageIndex>& dest(_dest.derived());
|
||||
typedef Matrix<StorageIndex,Dynamic,1> VectorI;
|
||||
typedef evaluator<MatrixType> MatEval;
|
||||
typedef typename evaluator<MatrixType>::InnerIterator MatIterator;
|
||||
|
||||
enum {
|
||||
SrcOrder = MatrixType::IsRowMajor ? RowMajor : ColMajor,
|
||||
StorageOrderMatch = int(SrcOrder) == int(DstOrder),
|
||||
DstMode = DstOrder==RowMajor ? (_DstMode==Upper ? Lower : Upper) : _DstMode,
|
||||
SrcMode = SrcOrder==RowMajor ? (_SrcMode==Upper ? Lower : Upper) : _SrcMode
|
||||
};
|
||||
|
||||
MatEval matEval(mat);
|
||||
|
||||
Index size = mat.rows();
|
||||
VectorI count(size);
|
||||
count.setZero();
|
||||
dest.resize(size,size);
|
||||
for(StorageIndex j = 0; j<size; ++j)
|
||||
{
|
||||
StorageIndex jp = perm ? perm[j] : j;
|
||||
for(MatIterator it(matEval,j); it; ++it)
|
||||
{
|
||||
StorageIndex i = it.index();
|
||||
if((int(SrcMode)==int(Lower) && i<j) || (int(SrcMode)==int(Upper) && i>j))
|
||||
continue;
|
||||
|
||||
StorageIndex ip = perm ? perm[i] : i;
|
||||
count[int(DstMode)==int(Lower) ? (std::min)(ip,jp) : (std::max)(ip,jp)]++;
|
||||
}
|
||||
}
|
||||
dest.outerIndexPtr()[0] = 0;
|
||||
for(Index j=0; j<size; ++j)
|
||||
dest.outerIndexPtr()[j+1] = dest.outerIndexPtr()[j] + count[j];
|
||||
dest.resizeNonZeros(dest.outerIndexPtr()[size]);
|
||||
for(Index j=0; j<size; ++j)
|
||||
count[j] = dest.outerIndexPtr()[j];
|
||||
|
||||
for(StorageIndex j = 0; j<size; ++j)
|
||||
{
|
||||
|
||||
for(MatIterator it(matEval,j); it; ++it)
|
||||
{
|
||||
StorageIndex i = it.index();
|
||||
if((int(SrcMode)==int(Lower) && i<j) || (int(SrcMode)==int(Upper) && i>j))
|
||||
continue;
|
||||
|
||||
StorageIndex jp = perm ? perm[j] : j;
|
||||
StorageIndex ip = perm? perm[i] : i;
|
||||
|
||||
Index k = count[int(DstMode)==int(Lower) ? (std::min)(ip,jp) : (std::max)(ip,jp)]++;
|
||||
dest.innerIndexPtr()[k] = int(DstMode)==int(Lower) ? (std::max)(ip,jp) : (std::min)(ip,jp);
|
||||
|
||||
if(!StorageOrderMatch) std::swap(ip,jp);
|
||||
if( ((int(DstMode)==int(Lower) && ip<jp) || (int(DstMode)==int(Upper) && ip>jp)))
|
||||
dest.valuePtr()[k] = numext::conj(it.value());
|
||||
else
|
||||
dest.valuePtr()[k] = it.value();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// TODO implement twists in a more evaluator friendly fashion
|
||||
|
||||
namespace internal {
|
||||
|
||||
template<typename MatrixType, int Mode>
|
||||
struct traits<SparseSymmetricPermutationProduct<MatrixType,Mode> > : traits<MatrixType> {
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
template<typename MatrixType,int Mode>
|
||||
class SparseSymmetricPermutationProduct
|
||||
: public EigenBase<SparseSymmetricPermutationProduct<MatrixType,Mode> >
|
||||
{
|
||||
public:
|
||||
typedef typename MatrixType::Scalar Scalar;
|
||||
typedef typename MatrixType::StorageIndex StorageIndex;
|
||||
enum {
|
||||
RowsAtCompileTime = internal::traits<SparseSymmetricPermutationProduct>::RowsAtCompileTime,
|
||||
ColsAtCompileTime = internal::traits<SparseSymmetricPermutationProduct>::ColsAtCompileTime
|
||||
};
|
||||
protected:
|
||||
typedef PermutationMatrix<Dynamic,Dynamic,StorageIndex> Perm;
|
||||
public:
|
||||
typedef Matrix<StorageIndex,Dynamic,1> VectorI;
|
||||
typedef typename MatrixType::Nested MatrixTypeNested;
|
||||
typedef typename internal::remove_all<MatrixTypeNested>::type NestedExpression;
|
||||
|
||||
SparseSymmetricPermutationProduct(const MatrixType& mat, const Perm& perm)
|
||||
: m_matrix(mat), m_perm(perm)
|
||||
{}
|
||||
|
||||
inline Index rows() const { return m_matrix.rows(); }
|
||||
inline Index cols() const { return m_matrix.cols(); }
|
||||
|
||||
const NestedExpression& matrix() const { return m_matrix; }
|
||||
const Perm& perm() const { return m_perm; }
|
||||
|
||||
protected:
|
||||
MatrixTypeNested m_matrix;
|
||||
const Perm& m_perm;
|
||||
|
||||
};
|
||||
|
||||
namespace internal {
|
||||
|
||||
template<typename DstXprType, typename MatrixType, int Mode, typename Scalar>
|
||||
struct Assignment<DstXprType, SparseSymmetricPermutationProduct<MatrixType,Mode>, internal::assign_op<Scalar,typename MatrixType::Scalar>, Sparse2Sparse>
|
||||
{
|
||||
typedef SparseSymmetricPermutationProduct<MatrixType,Mode> SrcXprType;
|
||||
typedef typename DstXprType::StorageIndex DstIndex;
|
||||
template<int Options>
|
||||
static void run(SparseMatrix<Scalar,Options,DstIndex> &dst, const SrcXprType &src, const internal::assign_op<Scalar,typename MatrixType::Scalar> &)
|
||||
{
|
||||
// internal::permute_symm_to_fullsymm<Mode>(m_matrix,_dest,m_perm.indices().data());
|
||||
SparseMatrix<Scalar,(Options&RowMajor)==RowMajor ? ColMajor : RowMajor, DstIndex> tmp;
|
||||
internal::permute_symm_to_fullsymm<Mode>(src.matrix(),tmp,src.perm().indices().data());
|
||||
dst = tmp;
|
||||
}
|
||||
|
||||
template<typename DestType,unsigned int DestMode>
|
||||
static void run(SparseSelfAdjointView<DestType,DestMode>& dst, const SrcXprType &src, const internal::assign_op<Scalar,typename MatrixType::Scalar> &)
|
||||
{
|
||||
internal::permute_symm_to_symm<Mode,DestMode>(src.matrix(),dst.matrix(),src.perm().indices().data());
|
||||
}
|
||||
};
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_SPARSE_SELFADJOINTVIEW_H
|
124
3party/eigen/Eigen/src/SparseCore/SparseSolverBase.h
Normal file
124
3party/eigen/Eigen/src/SparseCore/SparseSolverBase.h
Normal file
@ -0,0 +1,124 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2014 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_SPARSESOLVERBASE_H
|
||||
#define EIGEN_SPARSESOLVERBASE_H
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
|
||||
/** \internal
|
||||
* Helper functions to solve with a sparse right-hand-side and result.
|
||||
* The rhs is decomposed into small vertical panels which are solved through dense temporaries.
|
||||
*/
|
||||
template<typename Decomposition, typename Rhs, typename Dest>
|
||||
typename enable_if<Rhs::ColsAtCompileTime!=1 && Dest::ColsAtCompileTime!=1>::type
|
||||
solve_sparse_through_dense_panels(const Decomposition &dec, const Rhs& rhs, Dest &dest)
|
||||
{
|
||||
EIGEN_STATIC_ASSERT((Dest::Flags&RowMajorBit)==0,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
|
||||
typedef typename Dest::Scalar DestScalar;
|
||||
// we process the sparse rhs per block of NbColsAtOnce columns temporarily stored into a dense matrix.
|
||||
static const Index NbColsAtOnce = 4;
|
||||
Index rhsCols = rhs.cols();
|
||||
Index size = rhs.rows();
|
||||
// the temporary matrices do not need more columns than NbColsAtOnce:
|
||||
Index tmpCols = (std::min)(rhsCols, NbColsAtOnce);
|
||||
Eigen::Matrix<DestScalar,Dynamic,Dynamic> tmp(size,tmpCols);
|
||||
Eigen::Matrix<DestScalar,Dynamic,Dynamic> tmpX(size,tmpCols);
|
||||
for(Index k=0; k<rhsCols; k+=NbColsAtOnce)
|
||||
{
|
||||
Index actualCols = std::min<Index>(rhsCols-k, NbColsAtOnce);
|
||||
tmp.leftCols(actualCols) = rhs.middleCols(k,actualCols);
|
||||
tmpX.leftCols(actualCols) = dec.solve(tmp.leftCols(actualCols));
|
||||
dest.middleCols(k,actualCols) = tmpX.leftCols(actualCols).sparseView();
|
||||
}
|
||||
}
|
||||
|
||||
// Overload for vector as rhs
|
||||
template<typename Decomposition, typename Rhs, typename Dest>
|
||||
typename enable_if<Rhs::ColsAtCompileTime==1 || Dest::ColsAtCompileTime==1>::type
|
||||
solve_sparse_through_dense_panels(const Decomposition &dec, const Rhs& rhs, Dest &dest)
|
||||
{
|
||||
typedef typename Dest::Scalar DestScalar;
|
||||
Index size = rhs.rows();
|
||||
Eigen::Matrix<DestScalar,Dynamic,1> rhs_dense(rhs);
|
||||
Eigen::Matrix<DestScalar,Dynamic,1> dest_dense(size);
|
||||
dest_dense = dec.solve(rhs_dense);
|
||||
dest = dest_dense.sparseView();
|
||||
}
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
/** \class SparseSolverBase
|
||||
* \ingroup SparseCore_Module
|
||||
* \brief A base class for sparse solvers
|
||||
*
|
||||
* \tparam Derived the actual type of the solver.
|
||||
*
|
||||
*/
|
||||
template<typename Derived>
|
||||
class SparseSolverBase : internal::noncopyable
|
||||
{
|
||||
public:
|
||||
|
||||
/** Default constructor */
|
||||
SparseSolverBase()
|
||||
: m_isInitialized(false)
|
||||
{}
|
||||
|
||||
~SparseSolverBase()
|
||||
{}
|
||||
|
||||
Derived& derived() { return *static_cast<Derived*>(this); }
|
||||
const Derived& derived() const { return *static_cast<const Derived*>(this); }
|
||||
|
||||
/** \returns an expression of the solution x of \f$ A x = b \f$ using the current decomposition of A.
|
||||
*
|
||||
* \sa compute()
|
||||
*/
|
||||
template<typename Rhs>
|
||||
inline const Solve<Derived, Rhs>
|
||||
solve(const MatrixBase<Rhs>& b) const
|
||||
{
|
||||
eigen_assert(m_isInitialized && "Solver is not initialized.");
|
||||
eigen_assert(derived().rows()==b.rows() && "solve(): invalid number of rows of the right hand side matrix b");
|
||||
return Solve<Derived, Rhs>(derived(), b.derived());
|
||||
}
|
||||
|
||||
/** \returns an expression of the solution x of \f$ A x = b \f$ using the current decomposition of A.
|
||||
*
|
||||
* \sa compute()
|
||||
*/
|
||||
template<typename Rhs>
|
||||
inline const Solve<Derived, Rhs>
|
||||
solve(const SparseMatrixBase<Rhs>& b) const
|
||||
{
|
||||
eigen_assert(m_isInitialized && "Solver is not initialized.");
|
||||
eigen_assert(derived().rows()==b.rows() && "solve(): invalid number of rows of the right hand side matrix b");
|
||||
return Solve<Derived, Rhs>(derived(), b.derived());
|
||||
}
|
||||
|
||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||
/** \internal default implementation of solving with a sparse rhs */
|
||||
template<typename Rhs,typename Dest>
|
||||
void _solve_impl(const SparseMatrixBase<Rhs> &b, SparseMatrixBase<Dest> &dest) const
|
||||
{
|
||||
internal::solve_sparse_through_dense_panels(derived(), b.derived(), dest.derived());
|
||||
}
|
||||
#endif // EIGEN_PARSED_BY_DOXYGEN
|
||||
|
||||
protected:
|
||||
|
||||
mutable bool m_isInitialized;
|
||||
};
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_SPARSESOLVERBASE_H
|
@ -0,0 +1,198 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_SPARSESPARSEPRODUCTWITHPRUNING_H
|
||||
#define EIGEN_SPARSESPARSEPRODUCTWITHPRUNING_H
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
|
||||
|
||||
// perform a pseudo in-place sparse * sparse product assuming all matrices are col major
|
||||
template<typename Lhs, typename Rhs, typename ResultType>
|
||||
static void sparse_sparse_product_with_pruning_impl(const Lhs& lhs, const Rhs& rhs, ResultType& res, const typename ResultType::RealScalar& tolerance)
|
||||
{
|
||||
// return sparse_sparse_product_with_pruning_impl2(lhs,rhs,res);
|
||||
|
||||
typedef typename remove_all<Rhs>::type::Scalar RhsScalar;
|
||||
typedef typename remove_all<ResultType>::type::Scalar ResScalar;
|
||||
typedef typename remove_all<Lhs>::type::StorageIndex StorageIndex;
|
||||
|
||||
// make sure to call innerSize/outerSize since we fake the storage order.
|
||||
Index rows = lhs.innerSize();
|
||||
Index cols = rhs.outerSize();
|
||||
//Index size = lhs.outerSize();
|
||||
eigen_assert(lhs.outerSize() == rhs.innerSize());
|
||||
|
||||
// allocate a temporary buffer
|
||||
AmbiVector<ResScalar,StorageIndex> tempVector(rows);
|
||||
|
||||
// mimics a resizeByInnerOuter:
|
||||
if(ResultType::IsRowMajor)
|
||||
res.resize(cols, rows);
|
||||
else
|
||||
res.resize(rows, cols);
|
||||
|
||||
evaluator<Lhs> lhsEval(lhs);
|
||||
evaluator<Rhs> rhsEval(rhs);
|
||||
|
||||
// estimate the number of non zero entries
|
||||
// given a rhs column containing Y non zeros, we assume that the respective Y columns
|
||||
// of the lhs differs in average of one non zeros, thus the number of non zeros for
|
||||
// the product of a rhs column with the lhs is X+Y where X is the average number of non zero
|
||||
// per column of the lhs.
|
||||
// Therefore, we have nnz(lhs*rhs) = nnz(lhs) + nnz(rhs)
|
||||
Index estimated_nnz_prod = lhsEval.nonZerosEstimate() + rhsEval.nonZerosEstimate();
|
||||
|
||||
res.reserve(estimated_nnz_prod);
|
||||
double ratioColRes = double(estimated_nnz_prod)/(double(lhs.rows())*double(rhs.cols()));
|
||||
for (Index j=0; j<cols; ++j)
|
||||
{
|
||||
// FIXME:
|
||||
//double ratioColRes = (double(rhs.innerVector(j).nonZeros()) + double(lhs.nonZeros())/double(lhs.cols()))/double(lhs.rows());
|
||||
// let's do a more accurate determination of the nnz ratio for the current column j of res
|
||||
tempVector.init(ratioColRes);
|
||||
tempVector.setZero();
|
||||
for (typename evaluator<Rhs>::InnerIterator rhsIt(rhsEval, j); rhsIt; ++rhsIt)
|
||||
{
|
||||
// FIXME should be written like this: tmp += rhsIt.value() * lhs.col(rhsIt.index())
|
||||
tempVector.restart();
|
||||
RhsScalar x = rhsIt.value();
|
||||
for (typename evaluator<Lhs>::InnerIterator lhsIt(lhsEval, rhsIt.index()); lhsIt; ++lhsIt)
|
||||
{
|
||||
tempVector.coeffRef(lhsIt.index()) += lhsIt.value() * x;
|
||||
}
|
||||
}
|
||||
res.startVec(j);
|
||||
for (typename AmbiVector<ResScalar,StorageIndex>::Iterator it(tempVector,tolerance); it; ++it)
|
||||
res.insertBackByOuterInner(j,it.index()) = it.value();
|
||||
}
|
||||
res.finalize();
|
||||
}
|
||||
|
||||
template<typename Lhs, typename Rhs, typename ResultType,
|
||||
int LhsStorageOrder = traits<Lhs>::Flags&RowMajorBit,
|
||||
int RhsStorageOrder = traits<Rhs>::Flags&RowMajorBit,
|
||||
int ResStorageOrder = traits<ResultType>::Flags&RowMajorBit>
|
||||
struct sparse_sparse_product_with_pruning_selector;
|
||||
|
||||
template<typename Lhs, typename Rhs, typename ResultType>
|
||||
struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,ColMajor,ColMajor,ColMajor>
|
||||
{
|
||||
typedef typename ResultType::RealScalar RealScalar;
|
||||
|
||||
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
|
||||
{
|
||||
typename remove_all<ResultType>::type _res(res.rows(), res.cols());
|
||||
internal::sparse_sparse_product_with_pruning_impl<Lhs,Rhs,ResultType>(lhs, rhs, _res, tolerance);
|
||||
res.swap(_res);
|
||||
}
|
||||
};
|
||||
|
||||
template<typename Lhs, typename Rhs, typename ResultType>
|
||||
struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,ColMajor,ColMajor,RowMajor>
|
||||
{
|
||||
typedef typename ResultType::RealScalar RealScalar;
|
||||
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
|
||||
{
|
||||
// we need a col-major matrix to hold the result
|
||||
typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::StorageIndex> SparseTemporaryType;
|
||||
SparseTemporaryType _res(res.rows(), res.cols());
|
||||
internal::sparse_sparse_product_with_pruning_impl<Lhs,Rhs,SparseTemporaryType>(lhs, rhs, _res, tolerance);
|
||||
res = _res;
|
||||
}
|
||||
};
|
||||
|
||||
template<typename Lhs, typename Rhs, typename ResultType>
|
||||
struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,RowMajor,RowMajor,RowMajor>
|
||||
{
|
||||
typedef typename ResultType::RealScalar RealScalar;
|
||||
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
|
||||
{
|
||||
// let's transpose the product to get a column x column product
|
||||
typename remove_all<ResultType>::type _res(res.rows(), res.cols());
|
||||
internal::sparse_sparse_product_with_pruning_impl<Rhs,Lhs,ResultType>(rhs, lhs, _res, tolerance);
|
||||
res.swap(_res);
|
||||
}
|
||||
};
|
||||
|
||||
template<typename Lhs, typename Rhs, typename ResultType>
|
||||
struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,RowMajor,RowMajor,ColMajor>
|
||||
{
|
||||
typedef typename ResultType::RealScalar RealScalar;
|
||||
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
|
||||
{
|
||||
typedef SparseMatrix<typename Lhs::Scalar,ColMajor,typename Lhs::StorageIndex> ColMajorMatrixLhs;
|
||||
typedef SparseMatrix<typename Rhs::Scalar,ColMajor,typename Lhs::StorageIndex> ColMajorMatrixRhs;
|
||||
ColMajorMatrixLhs colLhs(lhs);
|
||||
ColMajorMatrixRhs colRhs(rhs);
|
||||
internal::sparse_sparse_product_with_pruning_impl<ColMajorMatrixLhs,ColMajorMatrixRhs,ResultType>(colLhs, colRhs, res, tolerance);
|
||||
|
||||
// let's transpose the product to get a column x column product
|
||||
// typedef SparseMatrix<typename ResultType::Scalar> SparseTemporaryType;
|
||||
// SparseTemporaryType _res(res.cols(), res.rows());
|
||||
// sparse_sparse_product_with_pruning_impl<Rhs,Lhs,SparseTemporaryType>(rhs, lhs, _res);
|
||||
// res = _res.transpose();
|
||||
}
|
||||
};
|
||||
|
||||
template<typename Lhs, typename Rhs, typename ResultType>
|
||||
struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,ColMajor,RowMajor,RowMajor>
|
||||
{
|
||||
typedef typename ResultType::RealScalar RealScalar;
|
||||
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
|
||||
{
|
||||
typedef SparseMatrix<typename Lhs::Scalar,RowMajor,typename Lhs::StorageIndex> RowMajorMatrixLhs;
|
||||
RowMajorMatrixLhs rowLhs(lhs);
|
||||
sparse_sparse_product_with_pruning_selector<RowMajorMatrixLhs,Rhs,ResultType,RowMajor,RowMajor>(rowLhs,rhs,res,tolerance);
|
||||
}
|
||||
};
|
||||
|
||||
template<typename Lhs, typename Rhs, typename ResultType>
|
||||
struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,RowMajor,ColMajor,RowMajor>
|
||||
{
|
||||
typedef typename ResultType::RealScalar RealScalar;
|
||||
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
|
||||
{
|
||||
typedef SparseMatrix<typename Rhs::Scalar,RowMajor,typename Lhs::StorageIndex> RowMajorMatrixRhs;
|
||||
RowMajorMatrixRhs rowRhs(rhs);
|
||||
sparse_sparse_product_with_pruning_selector<Lhs,RowMajorMatrixRhs,ResultType,RowMajor,RowMajor,RowMajor>(lhs,rowRhs,res,tolerance);
|
||||
}
|
||||
};
|
||||
|
||||
template<typename Lhs, typename Rhs, typename ResultType>
|
||||
struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,ColMajor,RowMajor,ColMajor>
|
||||
{
|
||||
typedef typename ResultType::RealScalar RealScalar;
|
||||
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
|
||||
{
|
||||
typedef SparseMatrix<typename Rhs::Scalar,ColMajor,typename Lhs::StorageIndex> ColMajorMatrixRhs;
|
||||
ColMajorMatrixRhs colRhs(rhs);
|
||||
internal::sparse_sparse_product_with_pruning_impl<Lhs,ColMajorMatrixRhs,ResultType>(lhs, colRhs, res, tolerance);
|
||||
}
|
||||
};
|
||||
|
||||
template<typename Lhs, typename Rhs, typename ResultType>
|
||||
struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,RowMajor,ColMajor,ColMajor>
|
||||
{
|
||||
typedef typename ResultType::RealScalar RealScalar;
|
||||
static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance)
|
||||
{
|
||||
typedef SparseMatrix<typename Lhs::Scalar,ColMajor,typename Lhs::StorageIndex> ColMajorMatrixLhs;
|
||||
ColMajorMatrixLhs colLhs(lhs);
|
||||
internal::sparse_sparse_product_with_pruning_impl<ColMajorMatrixLhs,Rhs,ResultType>(colLhs, rhs, res, tolerance);
|
||||
}
|
||||
};
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_SPARSESPARSEPRODUCTWITHPRUNING_H
|
92
3party/eigen/Eigen/src/SparseCore/SparseTranspose.h
Normal file
92
3party/eigen/Eigen/src/SparseCore/SparseTranspose.h
Normal file
@ -0,0 +1,92 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2008-2015 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_SPARSETRANSPOSE_H
|
||||
#define EIGEN_SPARSETRANSPOSE_H
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
template<typename MatrixType,int CompressedAccess=int(MatrixType::Flags&CompressedAccessBit)>
|
||||
class SparseTransposeImpl
|
||||
: public SparseMatrixBase<Transpose<MatrixType> >
|
||||
{};
|
||||
|
||||
template<typename MatrixType>
|
||||
class SparseTransposeImpl<MatrixType,CompressedAccessBit>
|
||||
: public SparseCompressedBase<Transpose<MatrixType> >
|
||||
{
|
||||
typedef SparseCompressedBase<Transpose<MatrixType> > Base;
|
||||
public:
|
||||
using Base::derived;
|
||||
typedef typename Base::Scalar Scalar;
|
||||
typedef typename Base::StorageIndex StorageIndex;
|
||||
|
||||
inline Index nonZeros() const { return derived().nestedExpression().nonZeros(); }
|
||||
|
||||
inline const Scalar* valuePtr() const { return derived().nestedExpression().valuePtr(); }
|
||||
inline const StorageIndex* innerIndexPtr() const { return derived().nestedExpression().innerIndexPtr(); }
|
||||
inline const StorageIndex* outerIndexPtr() const { return derived().nestedExpression().outerIndexPtr(); }
|
||||
inline const StorageIndex* innerNonZeroPtr() const { return derived().nestedExpression().innerNonZeroPtr(); }
|
||||
|
||||
inline Scalar* valuePtr() { return derived().nestedExpression().valuePtr(); }
|
||||
inline StorageIndex* innerIndexPtr() { return derived().nestedExpression().innerIndexPtr(); }
|
||||
inline StorageIndex* outerIndexPtr() { return derived().nestedExpression().outerIndexPtr(); }
|
||||
inline StorageIndex* innerNonZeroPtr() { return derived().nestedExpression().innerNonZeroPtr(); }
|
||||
};
|
||||
}
|
||||
|
||||
template<typename MatrixType> class TransposeImpl<MatrixType,Sparse>
|
||||
: public internal::SparseTransposeImpl<MatrixType>
|
||||
{
|
||||
protected:
|
||||
typedef internal::SparseTransposeImpl<MatrixType> Base;
|
||||
};
|
||||
|
||||
namespace internal {
|
||||
|
||||
template<typename ArgType>
|
||||
struct unary_evaluator<Transpose<ArgType>, IteratorBased>
|
||||
: public evaluator_base<Transpose<ArgType> >
|
||||
{
|
||||
typedef typename evaluator<ArgType>::InnerIterator EvalIterator;
|
||||
public:
|
||||
typedef Transpose<ArgType> XprType;
|
||||
|
||||
inline Index nonZerosEstimate() const {
|
||||
return m_argImpl.nonZerosEstimate();
|
||||
}
|
||||
|
||||
class InnerIterator : public EvalIterator
|
||||
{
|
||||
public:
|
||||
EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& unaryOp, Index outer)
|
||||
: EvalIterator(unaryOp.m_argImpl,outer)
|
||||
{}
|
||||
|
||||
Index row() const { return EvalIterator::col(); }
|
||||
Index col() const { return EvalIterator::row(); }
|
||||
};
|
||||
|
||||
enum {
|
||||
CoeffReadCost = evaluator<ArgType>::CoeffReadCost,
|
||||
Flags = XprType::Flags
|
||||
};
|
||||
|
||||
explicit unary_evaluator(const XprType& op) :m_argImpl(op.nestedExpression()) {}
|
||||
|
||||
protected:
|
||||
evaluator<ArgType> m_argImpl;
|
||||
};
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_SPARSETRANSPOSE_H
|
189
3party/eigen/Eigen/src/SparseCore/SparseTriangularView.h
Normal file
189
3party/eigen/Eigen/src/SparseCore/SparseTriangularView.h
Normal file
@ -0,0 +1,189 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2009-2015 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_SPARSE_TRIANGULARVIEW_H
|
||||
#define EIGEN_SPARSE_TRIANGULARVIEW_H
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
/** \ingroup SparseCore_Module
|
||||
*
|
||||
* \brief Base class for a triangular part in a \b sparse matrix
|
||||
*
|
||||
* This class is an abstract base class of class TriangularView, and objects of type TriangularViewImpl cannot be instantiated.
|
||||
* It extends class TriangularView with additional methods which are available for sparse expressions only.
|
||||
*
|
||||
* \sa class TriangularView, SparseMatrixBase::triangularView()
|
||||
*/
|
||||
template<typename MatrixType, unsigned int Mode> class TriangularViewImpl<MatrixType,Mode,Sparse>
|
||||
: public SparseMatrixBase<TriangularView<MatrixType,Mode> >
|
||||
{
|
||||
enum { SkipFirst = ((Mode&Lower) && !(MatrixType::Flags&RowMajorBit))
|
||||
|| ((Mode&Upper) && (MatrixType::Flags&RowMajorBit)),
|
||||
SkipLast = !SkipFirst,
|
||||
SkipDiag = (Mode&ZeroDiag) ? 1 : 0,
|
||||
HasUnitDiag = (Mode&UnitDiag) ? 1 : 0
|
||||
};
|
||||
|
||||
typedef TriangularView<MatrixType,Mode> TriangularViewType;
|
||||
|
||||
protected:
|
||||
// dummy solve function to make TriangularView happy.
|
||||
void solve() const;
|
||||
|
||||
typedef SparseMatrixBase<TriangularViewType> Base;
|
||||
public:
|
||||
|
||||
EIGEN_SPARSE_PUBLIC_INTERFACE(TriangularViewType)
|
||||
|
||||
typedef typename MatrixType::Nested MatrixTypeNested;
|
||||
typedef typename internal::remove_reference<MatrixTypeNested>::type MatrixTypeNestedNonRef;
|
||||
typedef typename internal::remove_all<MatrixTypeNested>::type MatrixTypeNestedCleaned;
|
||||
|
||||
template<typename RhsType, typename DstType>
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE void _solve_impl(const RhsType &rhs, DstType &dst) const {
|
||||
if(!(internal::is_same<RhsType,DstType>::value && internal::extract_data(dst) == internal::extract_data(rhs)))
|
||||
dst = rhs;
|
||||
this->solveInPlace(dst);
|
||||
}
|
||||
|
||||
/** Applies the inverse of \c *this to the dense vector or matrix \a other, "in-place" */
|
||||
template<typename OtherDerived> void solveInPlace(MatrixBase<OtherDerived>& other) const;
|
||||
|
||||
/** Applies the inverse of \c *this to the sparse vector or matrix \a other, "in-place" */
|
||||
template<typename OtherDerived> void solveInPlace(SparseMatrixBase<OtherDerived>& other) const;
|
||||
|
||||
};
|
||||
|
||||
namespace internal {
|
||||
|
||||
template<typename ArgType, unsigned int Mode>
|
||||
struct unary_evaluator<TriangularView<ArgType,Mode>, IteratorBased>
|
||||
: evaluator_base<TriangularView<ArgType,Mode> >
|
||||
{
|
||||
typedef TriangularView<ArgType,Mode> XprType;
|
||||
|
||||
protected:
|
||||
|
||||
typedef typename XprType::Scalar Scalar;
|
||||
typedef typename XprType::StorageIndex StorageIndex;
|
||||
typedef typename evaluator<ArgType>::InnerIterator EvalIterator;
|
||||
|
||||
enum { SkipFirst = ((Mode&Lower) && !(ArgType::Flags&RowMajorBit))
|
||||
|| ((Mode&Upper) && (ArgType::Flags&RowMajorBit)),
|
||||
SkipLast = !SkipFirst,
|
||||
SkipDiag = (Mode&ZeroDiag) ? 1 : 0,
|
||||
HasUnitDiag = (Mode&UnitDiag) ? 1 : 0
|
||||
};
|
||||
|
||||
public:
|
||||
|
||||
enum {
|
||||
CoeffReadCost = evaluator<ArgType>::CoeffReadCost,
|
||||
Flags = XprType::Flags
|
||||
};
|
||||
|
||||
explicit unary_evaluator(const XprType &xpr) : m_argImpl(xpr.nestedExpression()), m_arg(xpr.nestedExpression()) {}
|
||||
|
||||
inline Index nonZerosEstimate() const {
|
||||
return m_argImpl.nonZerosEstimate();
|
||||
}
|
||||
|
||||
class InnerIterator : public EvalIterator
|
||||
{
|
||||
typedef EvalIterator Base;
|
||||
public:
|
||||
|
||||
EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& xprEval, Index outer)
|
||||
: Base(xprEval.m_argImpl,outer), m_returnOne(false), m_containsDiag(Base::outer()<xprEval.m_arg.innerSize())
|
||||
{
|
||||
if(SkipFirst)
|
||||
{
|
||||
while((*this) && ((HasUnitDiag||SkipDiag) ? this->index()<=outer : this->index()<outer))
|
||||
Base::operator++();
|
||||
if(HasUnitDiag)
|
||||
m_returnOne = m_containsDiag;
|
||||
}
|
||||
else if(HasUnitDiag && ((!Base::operator bool()) || Base::index()>=Base::outer()))
|
||||
{
|
||||
if((!SkipFirst) && Base::operator bool())
|
||||
Base::operator++();
|
||||
m_returnOne = m_containsDiag;
|
||||
}
|
||||
}
|
||||
|
||||
EIGEN_STRONG_INLINE InnerIterator& operator++()
|
||||
{
|
||||
if(HasUnitDiag && m_returnOne)
|
||||
m_returnOne = false;
|
||||
else
|
||||
{
|
||||
Base::operator++();
|
||||
if(HasUnitDiag && (!SkipFirst) && ((!Base::operator bool()) || Base::index()>=Base::outer()))
|
||||
{
|
||||
if((!SkipFirst) && Base::operator bool())
|
||||
Base::operator++();
|
||||
m_returnOne = m_containsDiag;
|
||||
}
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
EIGEN_STRONG_INLINE operator bool() const
|
||||
{
|
||||
if(HasUnitDiag && m_returnOne)
|
||||
return true;
|
||||
if(SkipFirst) return Base::operator bool();
|
||||
else
|
||||
{
|
||||
if (SkipDiag) return (Base::operator bool() && this->index() < this->outer());
|
||||
else return (Base::operator bool() && this->index() <= this->outer());
|
||||
}
|
||||
}
|
||||
|
||||
// inline Index row() const { return (ArgType::Flags&RowMajorBit ? Base::outer() : this->index()); }
|
||||
// inline Index col() const { return (ArgType::Flags&RowMajorBit ? this->index() : Base::outer()); }
|
||||
inline StorageIndex index() const
|
||||
{
|
||||
if(HasUnitDiag && m_returnOne) return internal::convert_index<StorageIndex>(Base::outer());
|
||||
else return Base::index();
|
||||
}
|
||||
inline Scalar value() const
|
||||
{
|
||||
if(HasUnitDiag && m_returnOne) return Scalar(1);
|
||||
else return Base::value();
|
||||
}
|
||||
|
||||
protected:
|
||||
bool m_returnOne;
|
||||
bool m_containsDiag;
|
||||
private:
|
||||
Scalar& valueRef();
|
||||
};
|
||||
|
||||
protected:
|
||||
evaluator<ArgType> m_argImpl;
|
||||
const ArgType& m_arg;
|
||||
};
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
template<typename Derived>
|
||||
template<int Mode>
|
||||
inline const TriangularView<const Derived, Mode>
|
||||
SparseMatrixBase<Derived>::triangularView() const
|
||||
{
|
||||
return TriangularView<const Derived, Mode>(derived());
|
||||
}
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_SPARSE_TRIANGULARVIEW_H
|
186
3party/eigen/Eigen/src/SparseCore/SparseUtil.h
Normal file
186
3party/eigen/Eigen/src/SparseCore/SparseUtil.h
Normal file
@ -0,0 +1,186 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_SPARSEUTIL_H
|
||||
#define EIGEN_SPARSEUTIL_H
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
#ifdef NDEBUG
|
||||
#define EIGEN_DBG_SPARSE(X)
|
||||
#else
|
||||
#define EIGEN_DBG_SPARSE(X) X
|
||||
#endif
|
||||
|
||||
#define EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(Derived, Op) \
|
||||
template<typename OtherDerived> \
|
||||
EIGEN_STRONG_INLINE Derived& operator Op(const Eigen::SparseMatrixBase<OtherDerived>& other) \
|
||||
{ \
|
||||
return Base::operator Op(other.derived()); \
|
||||
} \
|
||||
EIGEN_STRONG_INLINE Derived& operator Op(const Derived& other) \
|
||||
{ \
|
||||
return Base::operator Op(other); \
|
||||
}
|
||||
|
||||
#define EIGEN_SPARSE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, Op) \
|
||||
template<typename Other> \
|
||||
EIGEN_STRONG_INLINE Derived& operator Op(const Other& scalar) \
|
||||
{ \
|
||||
return Base::operator Op(scalar); \
|
||||
}
|
||||
|
||||
#define EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATORS(Derived) \
|
||||
EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(Derived, =)
|
||||
|
||||
|
||||
#define EIGEN_SPARSE_PUBLIC_INTERFACE(Derived) \
|
||||
EIGEN_GENERIC_PUBLIC_INTERFACE(Derived)
|
||||
|
||||
|
||||
const int CoherentAccessPattern = 0x1;
|
||||
const int InnerRandomAccessPattern = 0x2 | CoherentAccessPattern;
|
||||
const int OuterRandomAccessPattern = 0x4 | CoherentAccessPattern;
|
||||
const int RandomAccessPattern = 0x8 | OuterRandomAccessPattern | InnerRandomAccessPattern;
|
||||
|
||||
template<typename _Scalar, int _Flags = 0, typename _StorageIndex = int> class SparseMatrix;
|
||||
template<typename _Scalar, int _Flags = 0, typename _StorageIndex = int> class DynamicSparseMatrix;
|
||||
template<typename _Scalar, int _Flags = 0, typename _StorageIndex = int> class SparseVector;
|
||||
template<typename _Scalar, int _Flags = 0, typename _StorageIndex = int> class MappedSparseMatrix;
|
||||
|
||||
template<typename MatrixType, unsigned int UpLo> class SparseSelfAdjointView;
|
||||
template<typename Lhs, typename Rhs> class SparseDiagonalProduct;
|
||||
template<typename MatrixType> class SparseView;
|
||||
|
||||
template<typename Lhs, typename Rhs> class SparseSparseProduct;
|
||||
template<typename Lhs, typename Rhs> class SparseTimeDenseProduct;
|
||||
template<typename Lhs, typename Rhs> class DenseTimeSparseProduct;
|
||||
template<typename Lhs, typename Rhs, bool Transpose> class SparseDenseOuterProduct;
|
||||
|
||||
template<typename Lhs, typename Rhs> struct SparseSparseProductReturnType;
|
||||
template<typename Lhs, typename Rhs,
|
||||
int InnerSize = EIGEN_SIZE_MIN_PREFER_FIXED(internal::traits<Lhs>::ColsAtCompileTime,internal::traits<Rhs>::RowsAtCompileTime)> struct DenseSparseProductReturnType;
|
||||
|
||||
template<typename Lhs, typename Rhs,
|
||||
int InnerSize = EIGEN_SIZE_MIN_PREFER_FIXED(internal::traits<Lhs>::ColsAtCompileTime,internal::traits<Rhs>::RowsAtCompileTime)> struct SparseDenseProductReturnType;
|
||||
template<typename MatrixType,int UpLo> class SparseSymmetricPermutationProduct;
|
||||
|
||||
namespace internal {
|
||||
|
||||
template<typename T,int Rows,int Cols,int Flags> struct sparse_eval;
|
||||
|
||||
template<typename T> struct eval<T,Sparse>
|
||||
: sparse_eval<T, traits<T>::RowsAtCompileTime,traits<T>::ColsAtCompileTime,traits<T>::Flags>
|
||||
{};
|
||||
|
||||
template<typename T,int Cols,int Flags> struct sparse_eval<T,1,Cols,Flags> {
|
||||
typedef typename traits<T>::Scalar _Scalar;
|
||||
typedef typename traits<T>::StorageIndex _StorageIndex;
|
||||
public:
|
||||
typedef SparseVector<_Scalar, RowMajor, _StorageIndex> type;
|
||||
};
|
||||
|
||||
template<typename T,int Rows,int Flags> struct sparse_eval<T,Rows,1,Flags> {
|
||||
typedef typename traits<T>::Scalar _Scalar;
|
||||
typedef typename traits<T>::StorageIndex _StorageIndex;
|
||||
public:
|
||||
typedef SparseVector<_Scalar, ColMajor, _StorageIndex> type;
|
||||
};
|
||||
|
||||
// TODO this seems almost identical to plain_matrix_type<T, Sparse>
|
||||
template<typename T,int Rows,int Cols,int Flags> struct sparse_eval {
|
||||
typedef typename traits<T>::Scalar _Scalar;
|
||||
typedef typename traits<T>::StorageIndex _StorageIndex;
|
||||
enum { _Options = ((Flags&RowMajorBit)==RowMajorBit) ? RowMajor : ColMajor };
|
||||
public:
|
||||
typedef SparseMatrix<_Scalar, _Options, _StorageIndex> type;
|
||||
};
|
||||
|
||||
template<typename T,int Flags> struct sparse_eval<T,1,1,Flags> {
|
||||
typedef typename traits<T>::Scalar _Scalar;
|
||||
public:
|
||||
typedef Matrix<_Scalar, 1, 1> type;
|
||||
};
|
||||
|
||||
template<typename T> struct plain_matrix_type<T,Sparse>
|
||||
{
|
||||
typedef typename traits<T>::Scalar _Scalar;
|
||||
typedef typename traits<T>::StorageIndex _StorageIndex;
|
||||
enum { _Options = ((evaluator<T>::Flags&RowMajorBit)==RowMajorBit) ? RowMajor : ColMajor };
|
||||
public:
|
||||
typedef SparseMatrix<_Scalar, _Options, _StorageIndex> type;
|
||||
};
|
||||
|
||||
template<typename T>
|
||||
struct plain_object_eval<T,Sparse>
|
||||
: sparse_eval<T, traits<T>::RowsAtCompileTime,traits<T>::ColsAtCompileTime, evaluator<T>::Flags>
|
||||
{};
|
||||
|
||||
template<typename Decomposition, typename RhsType>
|
||||
struct solve_traits<Decomposition,RhsType,Sparse>
|
||||
{
|
||||
typedef typename sparse_eval<RhsType, RhsType::RowsAtCompileTime, RhsType::ColsAtCompileTime,traits<RhsType>::Flags>::type PlainObject;
|
||||
};
|
||||
|
||||
template<typename Derived>
|
||||
struct generic_xpr_base<Derived, MatrixXpr, Sparse>
|
||||
{
|
||||
typedef SparseMatrixBase<Derived> type;
|
||||
};
|
||||
|
||||
struct SparseTriangularShape { static std::string debugName() { return "SparseTriangularShape"; } };
|
||||
struct SparseSelfAdjointShape { static std::string debugName() { return "SparseSelfAdjointShape"; } };
|
||||
|
||||
template<> struct glue_shapes<SparseShape,SelfAdjointShape> { typedef SparseSelfAdjointShape type; };
|
||||
template<> struct glue_shapes<SparseShape,TriangularShape > { typedef SparseTriangularShape type; };
|
||||
|
||||
// return type of SparseCompressedBase::lower_bound;
|
||||
struct LowerBoundIndex {
|
||||
LowerBoundIndex() : value(-1), found(false) {}
|
||||
LowerBoundIndex(Index val, bool ok) : value(val), found(ok) {}
|
||||
Index value;
|
||||
bool found;
|
||||
};
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
/** \ingroup SparseCore_Module
|
||||
*
|
||||
* \class Triplet
|
||||
*
|
||||
* \brief A small structure to hold a non zero as a triplet (i,j,value).
|
||||
*
|
||||
* \sa SparseMatrix::setFromTriplets()
|
||||
*/
|
||||
template<typename Scalar, typename StorageIndex=typename SparseMatrix<Scalar>::StorageIndex >
|
||||
class Triplet
|
||||
{
|
||||
public:
|
||||
Triplet() : m_row(0), m_col(0), m_value(0) {}
|
||||
|
||||
Triplet(const StorageIndex& i, const StorageIndex& j, const Scalar& v = Scalar(0))
|
||||
: m_row(i), m_col(j), m_value(v)
|
||||
{}
|
||||
|
||||
/** \returns the row index of the element */
|
||||
const StorageIndex& row() const { return m_row; }
|
||||
|
||||
/** \returns the column index of the element */
|
||||
const StorageIndex& col() const { return m_col; }
|
||||
|
||||
/** \returns the value of the element */
|
||||
const Scalar& value() const { return m_value; }
|
||||
protected:
|
||||
StorageIndex m_row, m_col;
|
||||
Scalar m_value;
|
||||
};
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_SPARSEUTIL_H
|
478
3party/eigen/Eigen/src/SparseCore/SparseVector.h
Normal file
478
3party/eigen/Eigen/src/SparseCore/SparseVector.h
Normal file
@ -0,0 +1,478 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2008-2015 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_SPARSEVECTOR_H
|
||||
#define EIGEN_SPARSEVECTOR_H
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
/** \ingroup SparseCore_Module
|
||||
* \class SparseVector
|
||||
*
|
||||
* \brief a sparse vector class
|
||||
*
|
||||
* \tparam _Scalar the scalar type, i.e. the type of the coefficients
|
||||
*
|
||||
* See http://www.netlib.org/linalg/html_templates/node91.html for details on the storage scheme.
|
||||
*
|
||||
* This class can be extended with the help of the plugin mechanism described on the page
|
||||
* \ref TopicCustomizing_Plugins by defining the preprocessor symbol \c EIGEN_SPARSEVECTOR_PLUGIN.
|
||||
*/
|
||||
|
||||
namespace internal {
|
||||
template<typename _Scalar, int _Options, typename _StorageIndex>
|
||||
struct traits<SparseVector<_Scalar, _Options, _StorageIndex> >
|
||||
{
|
||||
typedef _Scalar Scalar;
|
||||
typedef _StorageIndex StorageIndex;
|
||||
typedef Sparse StorageKind;
|
||||
typedef MatrixXpr XprKind;
|
||||
enum {
|
||||
IsColVector = (_Options & RowMajorBit) ? 0 : 1,
|
||||
|
||||
RowsAtCompileTime = IsColVector ? Dynamic : 1,
|
||||
ColsAtCompileTime = IsColVector ? 1 : Dynamic,
|
||||
MaxRowsAtCompileTime = RowsAtCompileTime,
|
||||
MaxColsAtCompileTime = ColsAtCompileTime,
|
||||
Flags = _Options | NestByRefBit | LvalueBit | (IsColVector ? 0 : RowMajorBit) | CompressedAccessBit,
|
||||
SupportedAccessPatterns = InnerRandomAccessPattern
|
||||
};
|
||||
};
|
||||
|
||||
// Sparse-Vector-Assignment kinds:
|
||||
enum {
|
||||
SVA_RuntimeSwitch,
|
||||
SVA_Inner,
|
||||
SVA_Outer
|
||||
};
|
||||
|
||||
template< typename Dest, typename Src,
|
||||
int AssignmentKind = !bool(Src::IsVectorAtCompileTime) ? SVA_RuntimeSwitch
|
||||
: Src::InnerSizeAtCompileTime==1 ? SVA_Outer
|
||||
: SVA_Inner>
|
||||
struct sparse_vector_assign_selector;
|
||||
|
||||
}
|
||||
|
||||
template<typename _Scalar, int _Options, typename _StorageIndex>
|
||||
class SparseVector
|
||||
: public SparseCompressedBase<SparseVector<_Scalar, _Options, _StorageIndex> >
|
||||
{
|
||||
typedef SparseCompressedBase<SparseVector> Base;
|
||||
using Base::convert_index;
|
||||
public:
|
||||
EIGEN_SPARSE_PUBLIC_INTERFACE(SparseVector)
|
||||
EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseVector, +=)
|
||||
EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseVector, -=)
|
||||
|
||||
typedef internal::CompressedStorage<Scalar,StorageIndex> Storage;
|
||||
enum { IsColVector = internal::traits<SparseVector>::IsColVector };
|
||||
|
||||
enum {
|
||||
Options = _Options
|
||||
};
|
||||
|
||||
EIGEN_STRONG_INLINE Index rows() const { return IsColVector ? m_size : 1; }
|
||||
EIGEN_STRONG_INLINE Index cols() const { return IsColVector ? 1 : m_size; }
|
||||
EIGEN_STRONG_INLINE Index innerSize() const { return m_size; }
|
||||
EIGEN_STRONG_INLINE Index outerSize() const { return 1; }
|
||||
|
||||
EIGEN_STRONG_INLINE const Scalar* valuePtr() const { return m_data.valuePtr(); }
|
||||
EIGEN_STRONG_INLINE Scalar* valuePtr() { return m_data.valuePtr(); }
|
||||
|
||||
EIGEN_STRONG_INLINE const StorageIndex* innerIndexPtr() const { return m_data.indexPtr(); }
|
||||
EIGEN_STRONG_INLINE StorageIndex* innerIndexPtr() { return m_data.indexPtr(); }
|
||||
|
||||
inline const StorageIndex* outerIndexPtr() const { return 0; }
|
||||
inline StorageIndex* outerIndexPtr() { return 0; }
|
||||
inline const StorageIndex* innerNonZeroPtr() const { return 0; }
|
||||
inline StorageIndex* innerNonZeroPtr() { return 0; }
|
||||
|
||||
/** \internal */
|
||||
inline Storage& data() { return m_data; }
|
||||
/** \internal */
|
||||
inline const Storage& data() const { return m_data; }
|
||||
|
||||
inline Scalar coeff(Index row, Index col) const
|
||||
{
|
||||
eigen_assert(IsColVector ? (col==0 && row>=0 && row<m_size) : (row==0 && col>=0 && col<m_size));
|
||||
return coeff(IsColVector ? row : col);
|
||||
}
|
||||
inline Scalar coeff(Index i) const
|
||||
{
|
||||
eigen_assert(i>=0 && i<m_size);
|
||||
return m_data.at(StorageIndex(i));
|
||||
}
|
||||
|
||||
inline Scalar& coeffRef(Index row, Index col)
|
||||
{
|
||||
eigen_assert(IsColVector ? (col==0 && row>=0 && row<m_size) : (row==0 && col>=0 && col<m_size));
|
||||
return coeffRef(IsColVector ? row : col);
|
||||
}
|
||||
|
||||
/** \returns a reference to the coefficient value at given index \a i
|
||||
* This operation involes a log(rho*size) binary search. If the coefficient does not
|
||||
* exist yet, then a sorted insertion into a sequential buffer is performed.
|
||||
*
|
||||
* This insertion might be very costly if the number of nonzeros above \a i is large.
|
||||
*/
|
||||
inline Scalar& coeffRef(Index i)
|
||||
{
|
||||
eigen_assert(i>=0 && i<m_size);
|
||||
|
||||
return m_data.atWithInsertion(StorageIndex(i));
|
||||
}
|
||||
|
||||
public:
|
||||
|
||||
typedef typename Base::InnerIterator InnerIterator;
|
||||
typedef typename Base::ReverseInnerIterator ReverseInnerIterator;
|
||||
|
||||
inline void setZero() { m_data.clear(); }
|
||||
|
||||
/** \returns the number of non zero coefficients */
|
||||
inline Index nonZeros() const { return m_data.size(); }
|
||||
|
||||
inline void startVec(Index outer)
|
||||
{
|
||||
EIGEN_UNUSED_VARIABLE(outer);
|
||||
eigen_assert(outer==0);
|
||||
}
|
||||
|
||||
inline Scalar& insertBackByOuterInner(Index outer, Index inner)
|
||||
{
|
||||
EIGEN_UNUSED_VARIABLE(outer);
|
||||
eigen_assert(outer==0);
|
||||
return insertBack(inner);
|
||||
}
|
||||
inline Scalar& insertBack(Index i)
|
||||
{
|
||||
m_data.append(0, i);
|
||||
return m_data.value(m_data.size()-1);
|
||||
}
|
||||
|
||||
Scalar& insertBackByOuterInnerUnordered(Index outer, Index inner)
|
||||
{
|
||||
EIGEN_UNUSED_VARIABLE(outer);
|
||||
eigen_assert(outer==0);
|
||||
return insertBackUnordered(inner);
|
||||
}
|
||||
inline Scalar& insertBackUnordered(Index i)
|
||||
{
|
||||
m_data.append(0, i);
|
||||
return m_data.value(m_data.size()-1);
|
||||
}
|
||||
|
||||
inline Scalar& insert(Index row, Index col)
|
||||
{
|
||||
eigen_assert(IsColVector ? (col==0 && row>=0 && row<m_size) : (row==0 && col>=0 && col<m_size));
|
||||
|
||||
Index inner = IsColVector ? row : col;
|
||||
Index outer = IsColVector ? col : row;
|
||||
EIGEN_ONLY_USED_FOR_DEBUG(outer);
|
||||
eigen_assert(outer==0);
|
||||
return insert(inner);
|
||||
}
|
||||
Scalar& insert(Index i)
|
||||
{
|
||||
eigen_assert(i>=0 && i<m_size);
|
||||
|
||||
Index startId = 0;
|
||||
Index p = Index(m_data.size()) - 1;
|
||||
// TODO smart realloc
|
||||
m_data.resize(p+2,1);
|
||||
|
||||
while ( (p >= startId) && (m_data.index(p) > i) )
|
||||
{
|
||||
m_data.index(p+1) = m_data.index(p);
|
||||
m_data.value(p+1) = m_data.value(p);
|
||||
--p;
|
||||
}
|
||||
m_data.index(p+1) = convert_index(i);
|
||||
m_data.value(p+1) = 0;
|
||||
return m_data.value(p+1);
|
||||
}
|
||||
|
||||
/**
|
||||
*/
|
||||
inline void reserve(Index reserveSize) { m_data.reserve(reserveSize); }
|
||||
|
||||
|
||||
inline void finalize() {}
|
||||
|
||||
/** \copydoc SparseMatrix::prune(const Scalar&,const RealScalar&) */
|
||||
void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision())
|
||||
{
|
||||
m_data.prune(reference,epsilon);
|
||||
}
|
||||
|
||||
/** Resizes the sparse vector to \a rows x \a cols
|
||||
*
|
||||
* This method is provided for compatibility with matrices.
|
||||
* For a column vector, \a cols must be equal to 1.
|
||||
* For a row vector, \a rows must be equal to 1.
|
||||
*
|
||||
* \sa resize(Index)
|
||||
*/
|
||||
void resize(Index rows, Index cols)
|
||||
{
|
||||
eigen_assert((IsColVector ? cols : rows)==1 && "Outer dimension must equal 1");
|
||||
resize(IsColVector ? rows : cols);
|
||||
}
|
||||
|
||||
/** Resizes the sparse vector to \a newSize
|
||||
* This method deletes all entries, thus leaving an empty sparse vector
|
||||
*
|
||||
* \sa conservativeResize(), setZero() */
|
||||
void resize(Index newSize)
|
||||
{
|
||||
m_size = newSize;
|
||||
m_data.clear();
|
||||
}
|
||||
|
||||
/** Resizes the sparse vector to \a newSize, while leaving old values untouched.
|
||||
*
|
||||
* If the size of the vector is decreased, then the storage of the out-of bounds coefficients is kept and reserved.
|
||||
* Call .data().squeeze() to free extra memory.
|
||||
*
|
||||
* \sa reserve(), setZero()
|
||||
*/
|
||||
void conservativeResize(Index newSize)
|
||||
{
|
||||
if (newSize < m_size)
|
||||
{
|
||||
Index i = 0;
|
||||
while (i<m_data.size() && m_data.index(i)<newSize) ++i;
|
||||
m_data.resize(i);
|
||||
}
|
||||
m_size = newSize;
|
||||
}
|
||||
|
||||
void resizeNonZeros(Index size) { m_data.resize(size); }
|
||||
|
||||
inline SparseVector() : m_size(0) { check_template_parameters(); resize(0); }
|
||||
|
||||
explicit inline SparseVector(Index size) : m_size(0) { check_template_parameters(); resize(size); }
|
||||
|
||||
inline SparseVector(Index rows, Index cols) : m_size(0) { check_template_parameters(); resize(rows,cols); }
|
||||
|
||||
template<typename OtherDerived>
|
||||
inline SparseVector(const SparseMatrixBase<OtherDerived>& other)
|
||||
: m_size(0)
|
||||
{
|
||||
#ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
|
||||
EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
|
||||
#endif
|
||||
check_template_parameters();
|
||||
*this = other.derived();
|
||||
}
|
||||
|
||||
inline SparseVector(const SparseVector& other)
|
||||
: Base(other), m_size(0)
|
||||
{
|
||||
check_template_parameters();
|
||||
*this = other.derived();
|
||||
}
|
||||
|
||||
/** Swaps the values of \c *this and \a other.
|
||||
* Overloaded for performance: this version performs a \em shallow swap by swapping pointers and attributes only.
|
||||
* \sa SparseMatrixBase::swap()
|
||||
*/
|
||||
inline void swap(SparseVector& other)
|
||||
{
|
||||
std::swap(m_size, other.m_size);
|
||||
m_data.swap(other.m_data);
|
||||
}
|
||||
|
||||
template<int OtherOptions>
|
||||
inline void swap(SparseMatrix<Scalar,OtherOptions,StorageIndex>& other)
|
||||
{
|
||||
eigen_assert(other.outerSize()==1);
|
||||
std::swap(m_size, other.m_innerSize);
|
||||
m_data.swap(other.m_data);
|
||||
}
|
||||
|
||||
inline SparseVector& operator=(const SparseVector& other)
|
||||
{
|
||||
if (other.isRValue())
|
||||
{
|
||||
swap(other.const_cast_derived());
|
||||
}
|
||||
else
|
||||
{
|
||||
resize(other.size());
|
||||
m_data = other.m_data;
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
template<typename OtherDerived>
|
||||
inline SparseVector& operator=(const SparseMatrixBase<OtherDerived>& other)
|
||||
{
|
||||
SparseVector tmp(other.size());
|
||||
internal::sparse_vector_assign_selector<SparseVector,OtherDerived>::run(tmp,other.derived());
|
||||
this->swap(tmp);
|
||||
return *this;
|
||||
}
|
||||
|
||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||
template<typename Lhs, typename Rhs>
|
||||
inline SparseVector& operator=(const SparseSparseProduct<Lhs,Rhs>& product)
|
||||
{
|
||||
return Base::operator=(product);
|
||||
}
|
||||
#endif
|
||||
|
||||
friend std::ostream & operator << (std::ostream & s, const SparseVector& m)
|
||||
{
|
||||
for (Index i=0; i<m.nonZeros(); ++i)
|
||||
s << "(" << m.m_data.value(i) << "," << m.m_data.index(i) << ") ";
|
||||
s << std::endl;
|
||||
return s;
|
||||
}
|
||||
|
||||
/** Destructor */
|
||||
inline ~SparseVector() {}
|
||||
|
||||
/** Overloaded for performance */
|
||||
Scalar sum() const;
|
||||
|
||||
public:
|
||||
|
||||
/** \internal \deprecated use setZero() and reserve() */
|
||||
EIGEN_DEPRECATED void startFill(Index reserve)
|
||||
{
|
||||
setZero();
|
||||
m_data.reserve(reserve);
|
||||
}
|
||||
|
||||
/** \internal \deprecated use insertBack(Index,Index) */
|
||||
EIGEN_DEPRECATED Scalar& fill(Index r, Index c)
|
||||
{
|
||||
eigen_assert(r==0 || c==0);
|
||||
return fill(IsColVector ? r : c);
|
||||
}
|
||||
|
||||
/** \internal \deprecated use insertBack(Index) */
|
||||
EIGEN_DEPRECATED Scalar& fill(Index i)
|
||||
{
|
||||
m_data.append(0, i);
|
||||
return m_data.value(m_data.size()-1);
|
||||
}
|
||||
|
||||
/** \internal \deprecated use insert(Index,Index) */
|
||||
EIGEN_DEPRECATED Scalar& fillrand(Index r, Index c)
|
||||
{
|
||||
eigen_assert(r==0 || c==0);
|
||||
return fillrand(IsColVector ? r : c);
|
||||
}
|
||||
|
||||
/** \internal \deprecated use insert(Index) */
|
||||
EIGEN_DEPRECATED Scalar& fillrand(Index i)
|
||||
{
|
||||
return insert(i);
|
||||
}
|
||||
|
||||
/** \internal \deprecated use finalize() */
|
||||
EIGEN_DEPRECATED void endFill() {}
|
||||
|
||||
// These two functions were here in the 3.1 release, so let's keep them in case some code rely on them.
|
||||
/** \internal \deprecated use data() */
|
||||
EIGEN_DEPRECATED Storage& _data() { return m_data; }
|
||||
/** \internal \deprecated use data() */
|
||||
EIGEN_DEPRECATED const Storage& _data() const { return m_data; }
|
||||
|
||||
# ifdef EIGEN_SPARSEVECTOR_PLUGIN
|
||||
# include EIGEN_SPARSEVECTOR_PLUGIN
|
||||
# endif
|
||||
|
||||
protected:
|
||||
|
||||
static void check_template_parameters()
|
||||
{
|
||||
EIGEN_STATIC_ASSERT(NumTraits<StorageIndex>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE);
|
||||
EIGEN_STATIC_ASSERT((_Options&(ColMajor|RowMajor))==Options,INVALID_MATRIX_TEMPLATE_PARAMETERS);
|
||||
}
|
||||
|
||||
Storage m_data;
|
||||
Index m_size;
|
||||
};
|
||||
|
||||
namespace internal {
|
||||
|
||||
template<typename _Scalar, int _Options, typename _Index>
|
||||
struct evaluator<SparseVector<_Scalar,_Options,_Index> >
|
||||
: evaluator_base<SparseVector<_Scalar,_Options,_Index> >
|
||||
{
|
||||
typedef SparseVector<_Scalar,_Options,_Index> SparseVectorType;
|
||||
typedef evaluator_base<SparseVectorType> Base;
|
||||
typedef typename SparseVectorType::InnerIterator InnerIterator;
|
||||
typedef typename SparseVectorType::ReverseInnerIterator ReverseInnerIterator;
|
||||
|
||||
enum {
|
||||
CoeffReadCost = NumTraits<_Scalar>::ReadCost,
|
||||
Flags = SparseVectorType::Flags
|
||||
};
|
||||
|
||||
evaluator() : Base() {}
|
||||
|
||||
explicit evaluator(const SparseVectorType &mat) : m_matrix(&mat)
|
||||
{
|
||||
EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
|
||||
}
|
||||
|
||||
inline Index nonZerosEstimate() const {
|
||||
return m_matrix->nonZeros();
|
||||
}
|
||||
|
||||
operator SparseVectorType&() { return m_matrix->const_cast_derived(); }
|
||||
operator const SparseVectorType&() const { return *m_matrix; }
|
||||
|
||||
const SparseVectorType *m_matrix;
|
||||
};
|
||||
|
||||
template< typename Dest, typename Src>
|
||||
struct sparse_vector_assign_selector<Dest,Src,SVA_Inner> {
|
||||
static void run(Dest& dst, const Src& src) {
|
||||
eigen_internal_assert(src.innerSize()==src.size());
|
||||
typedef internal::evaluator<Src> SrcEvaluatorType;
|
||||
SrcEvaluatorType srcEval(src);
|
||||
for(typename SrcEvaluatorType::InnerIterator it(srcEval, 0); it; ++it)
|
||||
dst.insert(it.index()) = it.value();
|
||||
}
|
||||
};
|
||||
|
||||
template< typename Dest, typename Src>
|
||||
struct sparse_vector_assign_selector<Dest,Src,SVA_Outer> {
|
||||
static void run(Dest& dst, const Src& src) {
|
||||
eigen_internal_assert(src.outerSize()==src.size());
|
||||
typedef internal::evaluator<Src> SrcEvaluatorType;
|
||||
SrcEvaluatorType srcEval(src);
|
||||
for(Index i=0; i<src.size(); ++i)
|
||||
{
|
||||
typename SrcEvaluatorType::InnerIterator it(srcEval, i);
|
||||
if(it)
|
||||
dst.insert(i) = it.value();
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
template< typename Dest, typename Src>
|
||||
struct sparse_vector_assign_selector<Dest,Src,SVA_RuntimeSwitch> {
|
||||
static void run(Dest& dst, const Src& src) {
|
||||
if(src.outerSize()==1) sparse_vector_assign_selector<Dest,Src,SVA_Inner>::run(dst, src);
|
||||
else sparse_vector_assign_selector<Dest,Src,SVA_Outer>::run(dst, src);
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_SPARSEVECTOR_H
|
254
3party/eigen/Eigen/src/SparseCore/SparseView.h
Normal file
254
3party/eigen/Eigen/src/SparseCore/SparseView.h
Normal file
@ -0,0 +1,254 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2011-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
// Copyright (C) 2010 Daniel Lowengrub <lowdanie@gmail.com>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_SPARSEVIEW_H
|
||||
#define EIGEN_SPARSEVIEW_H
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
|
||||
template<typename MatrixType>
|
||||
struct traits<SparseView<MatrixType> > : traits<MatrixType>
|
||||
{
|
||||
typedef typename MatrixType::StorageIndex StorageIndex;
|
||||
typedef Sparse StorageKind;
|
||||
enum {
|
||||
Flags = int(traits<MatrixType>::Flags) & (RowMajorBit)
|
||||
};
|
||||
};
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
/** \ingroup SparseCore_Module
|
||||
* \class SparseView
|
||||
*
|
||||
* \brief Expression of a dense or sparse matrix with zero or too small values removed
|
||||
*
|
||||
* \tparam MatrixType the type of the object of which we are removing the small entries
|
||||
*
|
||||
* This class represents an expression of a given dense or sparse matrix with
|
||||
* entries smaller than \c reference * \c epsilon are removed.
|
||||
* It is the return type of MatrixBase::sparseView() and SparseMatrixBase::pruned()
|
||||
* and most of the time this is the only way it is used.
|
||||
*
|
||||
* \sa MatrixBase::sparseView(), SparseMatrixBase::pruned()
|
||||
*/
|
||||
template<typename MatrixType>
|
||||
class SparseView : public SparseMatrixBase<SparseView<MatrixType> >
|
||||
{
|
||||
typedef typename MatrixType::Nested MatrixTypeNested;
|
||||
typedef typename internal::remove_all<MatrixTypeNested>::type _MatrixTypeNested;
|
||||
typedef SparseMatrixBase<SparseView > Base;
|
||||
public:
|
||||
EIGEN_SPARSE_PUBLIC_INTERFACE(SparseView)
|
||||
typedef typename internal::remove_all<MatrixType>::type NestedExpression;
|
||||
|
||||
explicit SparseView(const MatrixType& mat, const Scalar& reference = Scalar(0),
|
||||
const RealScalar &epsilon = NumTraits<Scalar>::dummy_precision())
|
||||
: m_matrix(mat), m_reference(reference), m_epsilon(epsilon) {}
|
||||
|
||||
inline Index rows() const { return m_matrix.rows(); }
|
||||
inline Index cols() const { return m_matrix.cols(); }
|
||||
|
||||
inline Index innerSize() const { return m_matrix.innerSize(); }
|
||||
inline Index outerSize() const { return m_matrix.outerSize(); }
|
||||
|
||||
/** \returns the nested expression */
|
||||
const typename internal::remove_all<MatrixTypeNested>::type&
|
||||
nestedExpression() const { return m_matrix; }
|
||||
|
||||
Scalar reference() const { return m_reference; }
|
||||
RealScalar epsilon() const { return m_epsilon; }
|
||||
|
||||
protected:
|
||||
MatrixTypeNested m_matrix;
|
||||
Scalar m_reference;
|
||||
RealScalar m_epsilon;
|
||||
};
|
||||
|
||||
namespace internal {
|
||||
|
||||
// TODO find a way to unify the two following variants
|
||||
// This is tricky because implementing an inner iterator on top of an IndexBased evaluator is
|
||||
// not easy because the evaluators do not expose the sizes of the underlying expression.
|
||||
|
||||
template<typename ArgType>
|
||||
struct unary_evaluator<SparseView<ArgType>, IteratorBased>
|
||||
: public evaluator_base<SparseView<ArgType> >
|
||||
{
|
||||
typedef typename evaluator<ArgType>::InnerIterator EvalIterator;
|
||||
public:
|
||||
typedef SparseView<ArgType> XprType;
|
||||
|
||||
class InnerIterator : public EvalIterator
|
||||
{
|
||||
protected:
|
||||
typedef typename XprType::Scalar Scalar;
|
||||
public:
|
||||
|
||||
EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& sve, Index outer)
|
||||
: EvalIterator(sve.m_argImpl,outer), m_view(sve.m_view)
|
||||
{
|
||||
incrementToNonZero();
|
||||
}
|
||||
|
||||
EIGEN_STRONG_INLINE InnerIterator& operator++()
|
||||
{
|
||||
EvalIterator::operator++();
|
||||
incrementToNonZero();
|
||||
return *this;
|
||||
}
|
||||
|
||||
using EvalIterator::value;
|
||||
|
||||
protected:
|
||||
const XprType &m_view;
|
||||
|
||||
private:
|
||||
void incrementToNonZero()
|
||||
{
|
||||
while((bool(*this)) && internal::isMuchSmallerThan(value(), m_view.reference(), m_view.epsilon()))
|
||||
{
|
||||
EvalIterator::operator++();
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
enum {
|
||||
CoeffReadCost = evaluator<ArgType>::CoeffReadCost,
|
||||
Flags = XprType::Flags
|
||||
};
|
||||
|
||||
explicit unary_evaluator(const XprType& xpr) : m_argImpl(xpr.nestedExpression()), m_view(xpr) {}
|
||||
|
||||
protected:
|
||||
evaluator<ArgType> m_argImpl;
|
||||
const XprType &m_view;
|
||||
};
|
||||
|
||||
template<typename ArgType>
|
||||
struct unary_evaluator<SparseView<ArgType>, IndexBased>
|
||||
: public evaluator_base<SparseView<ArgType> >
|
||||
{
|
||||
public:
|
||||
typedef SparseView<ArgType> XprType;
|
||||
protected:
|
||||
enum { IsRowMajor = (XprType::Flags&RowMajorBit)==RowMajorBit };
|
||||
typedef typename XprType::Scalar Scalar;
|
||||
typedef typename XprType::StorageIndex StorageIndex;
|
||||
public:
|
||||
|
||||
class InnerIterator
|
||||
{
|
||||
public:
|
||||
|
||||
EIGEN_STRONG_INLINE InnerIterator(const unary_evaluator& sve, Index outer)
|
||||
: m_sve(sve), m_inner(0), m_outer(outer), m_end(sve.m_view.innerSize())
|
||||
{
|
||||
incrementToNonZero();
|
||||
}
|
||||
|
||||
EIGEN_STRONG_INLINE InnerIterator& operator++()
|
||||
{
|
||||
m_inner++;
|
||||
incrementToNonZero();
|
||||
return *this;
|
||||
}
|
||||
|
||||
EIGEN_STRONG_INLINE Scalar value() const
|
||||
{
|
||||
return (IsRowMajor) ? m_sve.m_argImpl.coeff(m_outer, m_inner)
|
||||
: m_sve.m_argImpl.coeff(m_inner, m_outer);
|
||||
}
|
||||
|
||||
EIGEN_STRONG_INLINE StorageIndex index() const { return m_inner; }
|
||||
inline Index row() const { return IsRowMajor ? m_outer : index(); }
|
||||
inline Index col() const { return IsRowMajor ? index() : m_outer; }
|
||||
|
||||
EIGEN_STRONG_INLINE operator bool() const { return m_inner < m_end && m_inner>=0; }
|
||||
|
||||
protected:
|
||||
const unary_evaluator &m_sve;
|
||||
Index m_inner;
|
||||
const Index m_outer;
|
||||
const Index m_end;
|
||||
|
||||
private:
|
||||
void incrementToNonZero()
|
||||
{
|
||||
while((bool(*this)) && internal::isMuchSmallerThan(value(), m_sve.m_view.reference(), m_sve.m_view.epsilon()))
|
||||
{
|
||||
m_inner++;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
enum {
|
||||
CoeffReadCost = evaluator<ArgType>::CoeffReadCost,
|
||||
Flags = XprType::Flags
|
||||
};
|
||||
|
||||
explicit unary_evaluator(const XprType& xpr) : m_argImpl(xpr.nestedExpression()), m_view(xpr) {}
|
||||
|
||||
protected:
|
||||
evaluator<ArgType> m_argImpl;
|
||||
const XprType &m_view;
|
||||
};
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
/** \ingroup SparseCore_Module
|
||||
*
|
||||
* \returns a sparse expression of the dense expression \c *this with values smaller than
|
||||
* \a reference * \a epsilon removed.
|
||||
*
|
||||
* This method is typically used when prototyping to convert a quickly assembled dense Matrix \c D to a SparseMatrix \c S:
|
||||
* \code
|
||||
* MatrixXd D(n,m);
|
||||
* SparseMatrix<double> S;
|
||||
* S = D.sparseView(); // suppress numerical zeros (exact)
|
||||
* S = D.sparseView(reference);
|
||||
* S = D.sparseView(reference,epsilon);
|
||||
* \endcode
|
||||
* where \a reference is a meaningful non zero reference value,
|
||||
* and \a epsilon is a tolerance factor defaulting to NumTraits<Scalar>::dummy_precision().
|
||||
*
|
||||
* \sa SparseMatrixBase::pruned(), class SparseView */
|
||||
template<typename Derived>
|
||||
const SparseView<Derived> MatrixBase<Derived>::sparseView(const Scalar& reference,
|
||||
const typename NumTraits<Scalar>::Real& epsilon) const
|
||||
{
|
||||
return SparseView<Derived>(derived(), reference, epsilon);
|
||||
}
|
||||
|
||||
/** \returns an expression of \c *this with values smaller than
|
||||
* \a reference * \a epsilon removed.
|
||||
*
|
||||
* This method is typically used in conjunction with the product of two sparse matrices
|
||||
* to automatically prune the smallest values as follows:
|
||||
* \code
|
||||
* C = (A*B).pruned(); // suppress numerical zeros (exact)
|
||||
* C = (A*B).pruned(ref);
|
||||
* C = (A*B).pruned(ref,epsilon);
|
||||
* \endcode
|
||||
* where \c ref is a meaningful non zero reference value.
|
||||
* */
|
||||
template<typename Derived>
|
||||
const SparseView<Derived>
|
||||
SparseMatrixBase<Derived>::pruned(const Scalar& reference,
|
||||
const RealScalar& epsilon) const
|
||||
{
|
||||
return SparseView<Derived>(derived(), reference, epsilon);
|
||||
}
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif
|
315
3party/eigen/Eigen/src/SparseCore/TriangularSolver.h
Normal file
315
3party/eigen/Eigen/src/SparseCore/TriangularSolver.h
Normal file
@ -0,0 +1,315 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_SPARSETRIANGULARSOLVER_H
|
||||
#define EIGEN_SPARSETRIANGULARSOLVER_H
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
|
||||
template<typename Lhs, typename Rhs, int Mode,
|
||||
int UpLo = (Mode & Lower)
|
||||
? Lower
|
||||
: (Mode & Upper)
|
||||
? Upper
|
||||
: -1,
|
||||
int StorageOrder = int(traits<Lhs>::Flags) & RowMajorBit>
|
||||
struct sparse_solve_triangular_selector;
|
||||
|
||||
// forward substitution, row-major
|
||||
template<typename Lhs, typename Rhs, int Mode>
|
||||
struct sparse_solve_triangular_selector<Lhs,Rhs,Mode,Lower,RowMajor>
|
||||
{
|
||||
typedef typename Rhs::Scalar Scalar;
|
||||
typedef evaluator<Lhs> LhsEval;
|
||||
typedef typename evaluator<Lhs>::InnerIterator LhsIterator;
|
||||
static void run(const Lhs& lhs, Rhs& other)
|
||||
{
|
||||
LhsEval lhsEval(lhs);
|
||||
for(Index col=0 ; col<other.cols() ; ++col)
|
||||
{
|
||||
for(Index i=0; i<lhs.rows(); ++i)
|
||||
{
|
||||
Scalar tmp = other.coeff(i,col);
|
||||
Scalar lastVal(0);
|
||||
Index lastIndex = 0;
|
||||
for(LhsIterator it(lhsEval, i); it; ++it)
|
||||
{
|
||||
lastVal = it.value();
|
||||
lastIndex = it.index();
|
||||
if(lastIndex==i)
|
||||
break;
|
||||
tmp -= lastVal * other.coeff(lastIndex,col);
|
||||
}
|
||||
if (Mode & UnitDiag)
|
||||
other.coeffRef(i,col) = tmp;
|
||||
else
|
||||
{
|
||||
eigen_assert(lastIndex==i);
|
||||
other.coeffRef(i,col) = tmp/lastVal;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// backward substitution, row-major
|
||||
template<typename Lhs, typename Rhs, int Mode>
|
||||
struct sparse_solve_triangular_selector<Lhs,Rhs,Mode,Upper,RowMajor>
|
||||
{
|
||||
typedef typename Rhs::Scalar Scalar;
|
||||
typedef evaluator<Lhs> LhsEval;
|
||||
typedef typename evaluator<Lhs>::InnerIterator LhsIterator;
|
||||
static void run(const Lhs& lhs, Rhs& other)
|
||||
{
|
||||
LhsEval lhsEval(lhs);
|
||||
for(Index col=0 ; col<other.cols() ; ++col)
|
||||
{
|
||||
for(Index i=lhs.rows()-1 ; i>=0 ; --i)
|
||||
{
|
||||
Scalar tmp = other.coeff(i,col);
|
||||
Scalar l_ii(0);
|
||||
LhsIterator it(lhsEval, i);
|
||||
while(it && it.index()<i)
|
||||
++it;
|
||||
if(!(Mode & UnitDiag))
|
||||
{
|
||||
eigen_assert(it && it.index()==i);
|
||||
l_ii = it.value();
|
||||
++it;
|
||||
}
|
||||
else if (it && it.index() == i)
|
||||
++it;
|
||||
for(; it; ++it)
|
||||
{
|
||||
tmp -= it.value() * other.coeff(it.index(),col);
|
||||
}
|
||||
|
||||
if (Mode & UnitDiag) other.coeffRef(i,col) = tmp;
|
||||
else other.coeffRef(i,col) = tmp/l_ii;
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// forward substitution, col-major
|
||||
template<typename Lhs, typename Rhs, int Mode>
|
||||
struct sparse_solve_triangular_selector<Lhs,Rhs,Mode,Lower,ColMajor>
|
||||
{
|
||||
typedef typename Rhs::Scalar Scalar;
|
||||
typedef evaluator<Lhs> LhsEval;
|
||||
typedef typename evaluator<Lhs>::InnerIterator LhsIterator;
|
||||
static void run(const Lhs& lhs, Rhs& other)
|
||||
{
|
||||
LhsEval lhsEval(lhs);
|
||||
for(Index col=0 ; col<other.cols() ; ++col)
|
||||
{
|
||||
for(Index i=0; i<lhs.cols(); ++i)
|
||||
{
|
||||
Scalar& tmp = other.coeffRef(i,col);
|
||||
if (tmp!=Scalar(0)) // optimization when other is actually sparse
|
||||
{
|
||||
LhsIterator it(lhsEval, i);
|
||||
while(it && it.index()<i)
|
||||
++it;
|
||||
if(!(Mode & UnitDiag))
|
||||
{
|
||||
eigen_assert(it && it.index()==i);
|
||||
tmp /= it.value();
|
||||
}
|
||||
if (it && it.index()==i)
|
||||
++it;
|
||||
for(; it; ++it)
|
||||
other.coeffRef(it.index(), col) -= tmp * it.value();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// backward substitution, col-major
|
||||
template<typename Lhs, typename Rhs, int Mode>
|
||||
struct sparse_solve_triangular_selector<Lhs,Rhs,Mode,Upper,ColMajor>
|
||||
{
|
||||
typedef typename Rhs::Scalar Scalar;
|
||||
typedef evaluator<Lhs> LhsEval;
|
||||
typedef typename evaluator<Lhs>::InnerIterator LhsIterator;
|
||||
static void run(const Lhs& lhs, Rhs& other)
|
||||
{
|
||||
LhsEval lhsEval(lhs);
|
||||
for(Index col=0 ; col<other.cols() ; ++col)
|
||||
{
|
||||
for(Index i=lhs.cols()-1; i>=0; --i)
|
||||
{
|
||||
Scalar& tmp = other.coeffRef(i,col);
|
||||
if (tmp!=Scalar(0)) // optimization when other is actually sparse
|
||||
{
|
||||
if(!(Mode & UnitDiag))
|
||||
{
|
||||
// TODO replace this by a binary search. make sure the binary search is safe for partially sorted elements
|
||||
LhsIterator it(lhsEval, i);
|
||||
while(it && it.index()!=i)
|
||||
++it;
|
||||
eigen_assert(it && it.index()==i);
|
||||
other.coeffRef(i,col) /= it.value();
|
||||
}
|
||||
LhsIterator it(lhsEval, i);
|
||||
for(; it && it.index()<i; ++it)
|
||||
other.coeffRef(it.index(), col) -= tmp * it.value();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||
|
||||
template<typename ExpressionType,unsigned int Mode>
|
||||
template<typename OtherDerived>
|
||||
void TriangularViewImpl<ExpressionType,Mode,Sparse>::solveInPlace(MatrixBase<OtherDerived>& other) const
|
||||
{
|
||||
eigen_assert(derived().cols() == derived().rows() && derived().cols() == other.rows());
|
||||
eigen_assert((!(Mode & ZeroDiag)) && bool(Mode & (Upper|Lower)));
|
||||
|
||||
enum { copy = internal::traits<OtherDerived>::Flags & RowMajorBit };
|
||||
|
||||
typedef typename internal::conditional<copy,
|
||||
typename internal::plain_matrix_type_column_major<OtherDerived>::type, OtherDerived&>::type OtherCopy;
|
||||
OtherCopy otherCopy(other.derived());
|
||||
|
||||
internal::sparse_solve_triangular_selector<ExpressionType, typename internal::remove_reference<OtherCopy>::type, Mode>::run(derived().nestedExpression(), otherCopy);
|
||||
|
||||
if (copy)
|
||||
other = otherCopy;
|
||||
}
|
||||
#endif
|
||||
|
||||
// pure sparse path
|
||||
|
||||
namespace internal {
|
||||
|
||||
template<typename Lhs, typename Rhs, int Mode,
|
||||
int UpLo = (Mode & Lower)
|
||||
? Lower
|
||||
: (Mode & Upper)
|
||||
? Upper
|
||||
: -1,
|
||||
int StorageOrder = int(Lhs::Flags) & (RowMajorBit)>
|
||||
struct sparse_solve_triangular_sparse_selector;
|
||||
|
||||
// forward substitution, col-major
|
||||
template<typename Lhs, typename Rhs, int Mode, int UpLo>
|
||||
struct sparse_solve_triangular_sparse_selector<Lhs,Rhs,Mode,UpLo,ColMajor>
|
||||
{
|
||||
typedef typename Rhs::Scalar Scalar;
|
||||
typedef typename promote_index_type<typename traits<Lhs>::StorageIndex,
|
||||
typename traits<Rhs>::StorageIndex>::type StorageIndex;
|
||||
static void run(const Lhs& lhs, Rhs& other)
|
||||
{
|
||||
const bool IsLower = (UpLo==Lower);
|
||||
AmbiVector<Scalar,StorageIndex> tempVector(other.rows()*2);
|
||||
tempVector.setBounds(0,other.rows());
|
||||
|
||||
Rhs res(other.rows(), other.cols());
|
||||
res.reserve(other.nonZeros());
|
||||
|
||||
for(Index col=0 ; col<other.cols() ; ++col)
|
||||
{
|
||||
// FIXME estimate number of non zeros
|
||||
tempVector.init(.99/*float(other.col(col).nonZeros())/float(other.rows())*/);
|
||||
tempVector.setZero();
|
||||
tempVector.restart();
|
||||
for (typename Rhs::InnerIterator rhsIt(other, col); rhsIt; ++rhsIt)
|
||||
{
|
||||
tempVector.coeffRef(rhsIt.index()) = rhsIt.value();
|
||||
}
|
||||
|
||||
for(Index i=IsLower?0:lhs.cols()-1;
|
||||
IsLower?i<lhs.cols():i>=0;
|
||||
i+=IsLower?1:-1)
|
||||
{
|
||||
tempVector.restart();
|
||||
Scalar& ci = tempVector.coeffRef(i);
|
||||
if (ci!=Scalar(0))
|
||||
{
|
||||
// find
|
||||
typename Lhs::InnerIterator it(lhs, i);
|
||||
if(!(Mode & UnitDiag))
|
||||
{
|
||||
if (IsLower)
|
||||
{
|
||||
eigen_assert(it.index()==i);
|
||||
ci /= it.value();
|
||||
}
|
||||
else
|
||||
ci /= lhs.coeff(i,i);
|
||||
}
|
||||
tempVector.restart();
|
||||
if (IsLower)
|
||||
{
|
||||
if (it.index()==i)
|
||||
++it;
|
||||
for(; it; ++it)
|
||||
tempVector.coeffRef(it.index()) -= ci * it.value();
|
||||
}
|
||||
else
|
||||
{
|
||||
for(; it && it.index()<i; ++it)
|
||||
tempVector.coeffRef(it.index()) -= ci * it.value();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Index count = 0;
|
||||
// FIXME compute a reference value to filter zeros
|
||||
for (typename AmbiVector<Scalar,StorageIndex>::Iterator it(tempVector/*,1e-12*/); it; ++it)
|
||||
{
|
||||
++ count;
|
||||
// std::cerr << "fill " << it.index() << ", " << col << "\n";
|
||||
// std::cout << it.value() << " ";
|
||||
// FIXME use insertBack
|
||||
res.insert(it.index(), col) = it.value();
|
||||
}
|
||||
// std::cout << "tempVector.nonZeros() == " << int(count) << " / " << (other.rows()) << "\n";
|
||||
}
|
||||
res.finalize();
|
||||
other = res.markAsRValue();
|
||||
}
|
||||
};
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||
template<typename ExpressionType,unsigned int Mode>
|
||||
template<typename OtherDerived>
|
||||
void TriangularViewImpl<ExpressionType,Mode,Sparse>::solveInPlace(SparseMatrixBase<OtherDerived>& other) const
|
||||
{
|
||||
eigen_assert(derived().cols() == derived().rows() && derived().cols() == other.rows());
|
||||
eigen_assert( (!(Mode & ZeroDiag)) && bool(Mode & (Upper|Lower)));
|
||||
|
||||
// enum { copy = internal::traits<OtherDerived>::Flags & RowMajorBit };
|
||||
|
||||
// typedef typename internal::conditional<copy,
|
||||
// typename internal::plain_matrix_type_column_major<OtherDerived>::type, OtherDerived&>::type OtherCopy;
|
||||
// OtherCopy otherCopy(other.derived());
|
||||
|
||||
internal::sparse_solve_triangular_sparse_selector<ExpressionType, OtherDerived, Mode>::run(derived().nestedExpression(), other.derived());
|
||||
|
||||
// if (copy)
|
||||
// other = otherCopy;
|
||||
}
|
||||
#endif
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_SPARSETRIANGULARSOLVER_H
|
Reference in New Issue
Block a user