init repo.

This commit is contained in:
tqcq
2024-02-23 18:07:37 +08:00
commit 1a9e41d167
512 changed files with 191774 additions and 0 deletions

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,812 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2009-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
// Copyright (C) 2013-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_JACOBISVD_H
#define EIGEN_JACOBISVD_H
namespace Eigen {
namespace internal {
// forward declaration (needed by ICC)
// the empty body is required by MSVC
template<typename MatrixType, int QRPreconditioner,
bool IsComplex = NumTraits<typename MatrixType::Scalar>::IsComplex>
struct svd_precondition_2x2_block_to_be_real {};
/*** QR preconditioners (R-SVD)
***
*** Their role is to reduce the problem of computing the SVD to the case of a square matrix.
*** This approach, known as R-SVD, is an optimization for rectangular-enough matrices, and is a requirement for
*** JacobiSVD which by itself is only able to work on square matrices.
***/
enum { PreconditionIfMoreColsThanRows, PreconditionIfMoreRowsThanCols };
template<typename MatrixType, int QRPreconditioner, int Case>
struct qr_preconditioner_should_do_anything
{
enum { a = MatrixType::RowsAtCompileTime != Dynamic &&
MatrixType::ColsAtCompileTime != Dynamic &&
MatrixType::ColsAtCompileTime <= MatrixType::RowsAtCompileTime,
b = MatrixType::RowsAtCompileTime != Dynamic &&
MatrixType::ColsAtCompileTime != Dynamic &&
MatrixType::RowsAtCompileTime <= MatrixType::ColsAtCompileTime,
ret = !( (QRPreconditioner == NoQRPreconditioner) ||
(Case == PreconditionIfMoreColsThanRows && bool(a)) ||
(Case == PreconditionIfMoreRowsThanCols && bool(b)) )
};
};
template<typename MatrixType, int QRPreconditioner, int Case,
bool DoAnything = qr_preconditioner_should_do_anything<MatrixType, QRPreconditioner, Case>::ret
> struct qr_preconditioner_impl {};
template<typename MatrixType, int QRPreconditioner, int Case>
class qr_preconditioner_impl<MatrixType, QRPreconditioner, Case, false>
{
public:
void allocate(const JacobiSVD<MatrixType, QRPreconditioner>&) {}
bool run(JacobiSVD<MatrixType, QRPreconditioner>&, const MatrixType&)
{
return false;
}
};
/*** preconditioner using FullPivHouseholderQR ***/
template<typename MatrixType>
class qr_preconditioner_impl<MatrixType, FullPivHouseholderQRPreconditioner, PreconditionIfMoreRowsThanCols, true>
{
public:
typedef typename MatrixType::Scalar Scalar;
enum
{
RowsAtCompileTime = MatrixType::RowsAtCompileTime,
MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime
};
typedef Matrix<Scalar, 1, RowsAtCompileTime, RowMajor, 1, MaxRowsAtCompileTime> WorkspaceType;
void allocate(const JacobiSVD<MatrixType, FullPivHouseholderQRPreconditioner>& svd)
{
if (svd.rows() != m_qr.rows() || svd.cols() != m_qr.cols())
{
m_qr.~QRType();
::new (&m_qr) QRType(svd.rows(), svd.cols());
}
if (svd.m_computeFullU) m_workspace.resize(svd.rows());
}
bool run(JacobiSVD<MatrixType, FullPivHouseholderQRPreconditioner>& svd, const MatrixType& matrix)
{
if(matrix.rows() > matrix.cols())
{
m_qr.compute(matrix);
svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.cols(),matrix.cols()).template triangularView<Upper>();
if(svd.m_computeFullU) m_qr.matrixQ().evalTo(svd.m_matrixU, m_workspace);
if(svd.computeV()) svd.m_matrixV = m_qr.colsPermutation();
return true;
}
return false;
}
private:
typedef FullPivHouseholderQR<MatrixType> QRType;
QRType m_qr;
WorkspaceType m_workspace;
};
template<typename MatrixType>
class qr_preconditioner_impl<MatrixType, FullPivHouseholderQRPreconditioner, PreconditionIfMoreColsThanRows, true>
{
public:
typedef typename MatrixType::Scalar Scalar;
enum
{
RowsAtCompileTime = MatrixType::RowsAtCompileTime,
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
Options = MatrixType::Options
};
typedef typename internal::make_proper_matrix_type<
Scalar, ColsAtCompileTime, RowsAtCompileTime, Options, MaxColsAtCompileTime, MaxRowsAtCompileTime
>::type TransposeTypeWithSameStorageOrder;
void allocate(const JacobiSVD<MatrixType, FullPivHouseholderQRPreconditioner>& svd)
{
if (svd.cols() != m_qr.rows() || svd.rows() != m_qr.cols())
{
m_qr.~QRType();
::new (&m_qr) QRType(svd.cols(), svd.rows());
}
m_adjoint.resize(svd.cols(), svd.rows());
if (svd.m_computeFullV) m_workspace.resize(svd.cols());
}
bool run(JacobiSVD<MatrixType, FullPivHouseholderQRPreconditioner>& svd, const MatrixType& matrix)
{
if(matrix.cols() > matrix.rows())
{
m_adjoint = matrix.adjoint();
m_qr.compute(m_adjoint);
svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.rows(),matrix.rows()).template triangularView<Upper>().adjoint();
if(svd.m_computeFullV) m_qr.matrixQ().evalTo(svd.m_matrixV, m_workspace);
if(svd.computeU()) svd.m_matrixU = m_qr.colsPermutation();
return true;
}
else return false;
}
private:
typedef FullPivHouseholderQR<TransposeTypeWithSameStorageOrder> QRType;
QRType m_qr;
TransposeTypeWithSameStorageOrder m_adjoint;
typename internal::plain_row_type<MatrixType>::type m_workspace;
};
/*** preconditioner using ColPivHouseholderQR ***/
template<typename MatrixType>
class qr_preconditioner_impl<MatrixType, ColPivHouseholderQRPreconditioner, PreconditionIfMoreRowsThanCols, true>
{
public:
void allocate(const JacobiSVD<MatrixType, ColPivHouseholderQRPreconditioner>& svd)
{
if (svd.rows() != m_qr.rows() || svd.cols() != m_qr.cols())
{
m_qr.~QRType();
::new (&m_qr) QRType(svd.rows(), svd.cols());
}
if (svd.m_computeFullU) m_workspace.resize(svd.rows());
else if (svd.m_computeThinU) m_workspace.resize(svd.cols());
}
bool run(JacobiSVD<MatrixType, ColPivHouseholderQRPreconditioner>& svd, const MatrixType& matrix)
{
if(matrix.rows() > matrix.cols())
{
m_qr.compute(matrix);
svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.cols(),matrix.cols()).template triangularView<Upper>();
if(svd.m_computeFullU) m_qr.householderQ().evalTo(svd.m_matrixU, m_workspace);
else if(svd.m_computeThinU)
{
svd.m_matrixU.setIdentity(matrix.rows(), matrix.cols());
m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixU, m_workspace);
}
if(svd.computeV()) svd.m_matrixV = m_qr.colsPermutation();
return true;
}
return false;
}
private:
typedef ColPivHouseholderQR<MatrixType> QRType;
QRType m_qr;
typename internal::plain_col_type<MatrixType>::type m_workspace;
};
template<typename MatrixType>
class qr_preconditioner_impl<MatrixType, ColPivHouseholderQRPreconditioner, PreconditionIfMoreColsThanRows, true>
{
public:
typedef typename MatrixType::Scalar Scalar;
enum
{
RowsAtCompileTime = MatrixType::RowsAtCompileTime,
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
Options = MatrixType::Options
};
typedef typename internal::make_proper_matrix_type<
Scalar, ColsAtCompileTime, RowsAtCompileTime, Options, MaxColsAtCompileTime, MaxRowsAtCompileTime
>::type TransposeTypeWithSameStorageOrder;
void allocate(const JacobiSVD<MatrixType, ColPivHouseholderQRPreconditioner>& svd)
{
if (svd.cols() != m_qr.rows() || svd.rows() != m_qr.cols())
{
m_qr.~QRType();
::new (&m_qr) QRType(svd.cols(), svd.rows());
}
if (svd.m_computeFullV) m_workspace.resize(svd.cols());
else if (svd.m_computeThinV) m_workspace.resize(svd.rows());
m_adjoint.resize(svd.cols(), svd.rows());
}
bool run(JacobiSVD<MatrixType, ColPivHouseholderQRPreconditioner>& svd, const MatrixType& matrix)
{
if(matrix.cols() > matrix.rows())
{
m_adjoint = matrix.adjoint();
m_qr.compute(m_adjoint);
svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.rows(),matrix.rows()).template triangularView<Upper>().adjoint();
if(svd.m_computeFullV) m_qr.householderQ().evalTo(svd.m_matrixV, m_workspace);
else if(svd.m_computeThinV)
{
svd.m_matrixV.setIdentity(matrix.cols(), matrix.rows());
m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixV, m_workspace);
}
if(svd.computeU()) svd.m_matrixU = m_qr.colsPermutation();
return true;
}
else return false;
}
private:
typedef ColPivHouseholderQR<TransposeTypeWithSameStorageOrder> QRType;
QRType m_qr;
TransposeTypeWithSameStorageOrder m_adjoint;
typename internal::plain_row_type<MatrixType>::type m_workspace;
};
/*** preconditioner using HouseholderQR ***/
template<typename MatrixType>
class qr_preconditioner_impl<MatrixType, HouseholderQRPreconditioner, PreconditionIfMoreRowsThanCols, true>
{
public:
void allocate(const JacobiSVD<MatrixType, HouseholderQRPreconditioner>& svd)
{
if (svd.rows() != m_qr.rows() || svd.cols() != m_qr.cols())
{
m_qr.~QRType();
::new (&m_qr) QRType(svd.rows(), svd.cols());
}
if (svd.m_computeFullU) m_workspace.resize(svd.rows());
else if (svd.m_computeThinU) m_workspace.resize(svd.cols());
}
bool run(JacobiSVD<MatrixType, HouseholderQRPreconditioner>& svd, const MatrixType& matrix)
{
if(matrix.rows() > matrix.cols())
{
m_qr.compute(matrix);
svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.cols(),matrix.cols()).template triangularView<Upper>();
if(svd.m_computeFullU) m_qr.householderQ().evalTo(svd.m_matrixU, m_workspace);
else if(svd.m_computeThinU)
{
svd.m_matrixU.setIdentity(matrix.rows(), matrix.cols());
m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixU, m_workspace);
}
if(svd.computeV()) svd.m_matrixV.setIdentity(matrix.cols(), matrix.cols());
return true;
}
return false;
}
private:
typedef HouseholderQR<MatrixType> QRType;
QRType m_qr;
typename internal::plain_col_type<MatrixType>::type m_workspace;
};
template<typename MatrixType>
class qr_preconditioner_impl<MatrixType, HouseholderQRPreconditioner, PreconditionIfMoreColsThanRows, true>
{
public:
typedef typename MatrixType::Scalar Scalar;
enum
{
RowsAtCompileTime = MatrixType::RowsAtCompileTime,
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
Options = MatrixType::Options
};
typedef typename internal::make_proper_matrix_type<
Scalar, ColsAtCompileTime, RowsAtCompileTime, Options, MaxColsAtCompileTime, MaxRowsAtCompileTime
>::type TransposeTypeWithSameStorageOrder;
void allocate(const JacobiSVD<MatrixType, HouseholderQRPreconditioner>& svd)
{
if (svd.cols() != m_qr.rows() || svd.rows() != m_qr.cols())
{
m_qr.~QRType();
::new (&m_qr) QRType(svd.cols(), svd.rows());
}
if (svd.m_computeFullV) m_workspace.resize(svd.cols());
else if (svd.m_computeThinV) m_workspace.resize(svd.rows());
m_adjoint.resize(svd.cols(), svd.rows());
}
bool run(JacobiSVD<MatrixType, HouseholderQRPreconditioner>& svd, const MatrixType& matrix)
{
if(matrix.cols() > matrix.rows())
{
m_adjoint = matrix.adjoint();
m_qr.compute(m_adjoint);
svd.m_workMatrix = m_qr.matrixQR().block(0,0,matrix.rows(),matrix.rows()).template triangularView<Upper>().adjoint();
if(svd.m_computeFullV) m_qr.householderQ().evalTo(svd.m_matrixV, m_workspace);
else if(svd.m_computeThinV)
{
svd.m_matrixV.setIdentity(matrix.cols(), matrix.rows());
m_qr.householderQ().applyThisOnTheLeft(svd.m_matrixV, m_workspace);
}
if(svd.computeU()) svd.m_matrixU.setIdentity(matrix.rows(), matrix.rows());
return true;
}
else return false;
}
private:
typedef HouseholderQR<TransposeTypeWithSameStorageOrder> QRType;
QRType m_qr;
TransposeTypeWithSameStorageOrder m_adjoint;
typename internal::plain_row_type<MatrixType>::type m_workspace;
};
/*** 2x2 SVD implementation
***
*** JacobiSVD consists in performing a series of 2x2 SVD subproblems
***/
template<typename MatrixType, int QRPreconditioner>
struct svd_precondition_2x2_block_to_be_real<MatrixType, QRPreconditioner, false>
{
typedef JacobiSVD<MatrixType, QRPreconditioner> SVD;
typedef typename MatrixType::RealScalar RealScalar;
static bool run(typename SVD::WorkMatrixType&, SVD&, Index, Index, RealScalar&) { return true; }
};
template<typename MatrixType, int QRPreconditioner>
struct svd_precondition_2x2_block_to_be_real<MatrixType, QRPreconditioner, true>
{
typedef JacobiSVD<MatrixType, QRPreconditioner> SVD;
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
static bool run(typename SVD::WorkMatrixType& work_matrix, SVD& svd, Index p, Index q, RealScalar& maxDiagEntry)
{
using std::sqrt;
using std::abs;
Scalar z;
JacobiRotation<Scalar> rot;
RealScalar n = sqrt(numext::abs2(work_matrix.coeff(p,p)) + numext::abs2(work_matrix.coeff(q,p)));
const RealScalar considerAsZero = (std::numeric_limits<RealScalar>::min)();
const RealScalar precision = NumTraits<Scalar>::epsilon();
if(n==0)
{
// make sure first column is zero
work_matrix.coeffRef(p,p) = work_matrix.coeffRef(q,p) = Scalar(0);
if(abs(numext::imag(work_matrix.coeff(p,q)))>considerAsZero)
{
// work_matrix.coeff(p,q) can be zero if work_matrix.coeff(q,p) is not zero but small enough to underflow when computing n
z = abs(work_matrix.coeff(p,q)) / work_matrix.coeff(p,q);
work_matrix.row(p) *= z;
if(svd.computeU()) svd.m_matrixU.col(p) *= conj(z);
}
if(abs(numext::imag(work_matrix.coeff(q,q)))>considerAsZero)
{
z = abs(work_matrix.coeff(q,q)) / work_matrix.coeff(q,q);
work_matrix.row(q) *= z;
if(svd.computeU()) svd.m_matrixU.col(q) *= conj(z);
}
// otherwise the second row is already zero, so we have nothing to do.
}
else
{
rot.c() = conj(work_matrix.coeff(p,p)) / n;
rot.s() = work_matrix.coeff(q,p) / n;
work_matrix.applyOnTheLeft(p,q,rot);
if(svd.computeU()) svd.m_matrixU.applyOnTheRight(p,q,rot.adjoint());
if(abs(numext::imag(work_matrix.coeff(p,q)))>considerAsZero)
{
z = abs(work_matrix.coeff(p,q)) / work_matrix.coeff(p,q);
work_matrix.col(q) *= z;
if(svd.computeV()) svd.m_matrixV.col(q) *= z;
}
if(abs(numext::imag(work_matrix.coeff(q,q)))>considerAsZero)
{
z = abs(work_matrix.coeff(q,q)) / work_matrix.coeff(q,q);
work_matrix.row(q) *= z;
if(svd.computeU()) svd.m_matrixU.col(q) *= conj(z);
}
}
// update largest diagonal entry
maxDiagEntry = numext::maxi<RealScalar>(maxDiagEntry,numext::maxi<RealScalar>(abs(work_matrix.coeff(p,p)), abs(work_matrix.coeff(q,q))));
// and check whether the 2x2 block is already diagonal
RealScalar threshold = numext::maxi<RealScalar>(considerAsZero, precision * maxDiagEntry);
return abs(work_matrix.coeff(p,q))>threshold || abs(work_matrix.coeff(q,p)) > threshold;
}
};
template<typename _MatrixType, int QRPreconditioner>
struct traits<JacobiSVD<_MatrixType,QRPreconditioner> >
: traits<_MatrixType>
{
typedef _MatrixType MatrixType;
};
} // end namespace internal
/** \ingroup SVD_Module
*
*
* \class JacobiSVD
*
* \brief Two-sided Jacobi SVD decomposition of a rectangular matrix
*
* \tparam _MatrixType the type of the matrix of which we are computing the SVD decomposition
* \tparam QRPreconditioner this optional parameter allows to specify the type of QR decomposition that will be used internally
* for the R-SVD step for non-square matrices. See discussion of possible values below.
*
* SVD decomposition consists in decomposing any n-by-p matrix \a A as a product
* \f[ A = U S V^* \f]
* where \a U is a n-by-n unitary, \a V is a p-by-p unitary, and \a S is a n-by-p real positive matrix which is zero outside of its main diagonal;
* the diagonal entries of S are known as the \em singular \em values of \a A and the columns of \a U and \a V are known as the left
* and right \em singular \em vectors of \a A respectively.
*
* Singular values are always sorted in decreasing order.
*
* This JacobiSVD decomposition computes only the singular values by default. If you want \a U or \a V, you need to ask for them explicitly.
*
* You can ask for only \em thin \a U or \a V to be computed, meaning the following. In case of a rectangular n-by-p matrix, letting \a m be the
* smaller value among \a n and \a p, there are only \a m singular vectors; the remaining columns of \a U and \a V do not correspond to actual
* singular vectors. Asking for \em thin \a U or \a V means asking for only their \a m first columns to be formed. So \a U is then a n-by-m matrix,
* and \a V is then a p-by-m matrix. Notice that thin \a U and \a V are all you need for (least squares) solving.
*
* Here's an example demonstrating basic usage:
* \include JacobiSVD_basic.cpp
* Output: \verbinclude JacobiSVD_basic.out
*
* This JacobiSVD class is a two-sided Jacobi R-SVD decomposition, ensuring optimal reliability and accuracy. The downside is that it's slower than
* bidiagonalizing SVD algorithms for large square matrices; however its complexity is still \f$ O(n^2p) \f$ where \a n is the smaller dimension and
* \a p is the greater dimension, meaning that it is still of the same order of complexity as the faster bidiagonalizing R-SVD algorithms.
* In particular, like any R-SVD, it takes advantage of non-squareness in that its complexity is only linear in the greater dimension.
*
* If the input matrix has inf or nan coefficients, the result of the computation is undefined, but the computation is guaranteed to
* terminate in finite (and reasonable) time.
*
* The possible values for QRPreconditioner are:
* \li ColPivHouseholderQRPreconditioner is the default. In practice it's very safe. It uses column-pivoting QR.
* \li FullPivHouseholderQRPreconditioner, is the safest and slowest. It uses full-pivoting QR.
* Contrary to other QRs, it doesn't allow computing thin unitaries.
* \li HouseholderQRPreconditioner is the fastest, and less safe and accurate than the pivoting variants. It uses non-pivoting QR.
* This is very similar in safety and accuracy to the bidiagonalization process used by bidiagonalizing SVD algorithms (since bidiagonalization
* is inherently non-pivoting). However the resulting SVD is still more reliable than bidiagonalizing SVDs because the Jacobi-based iterarive
* process is more reliable than the optimized bidiagonal SVD iterations.
* \li NoQRPreconditioner allows not to use a QR preconditioner at all. This is useful if you know that you will only be computing
* JacobiSVD decompositions of square matrices. Non-square matrices require a QR preconditioner. Using this option will result in
* faster compilation and smaller executable code. It won't significantly speed up computation, since JacobiSVD is always checking
* if QR preconditioning is needed before applying it anyway.
*
* \sa MatrixBase::jacobiSvd()
*/
template<typename _MatrixType, int QRPreconditioner> class JacobiSVD
: public SVDBase<JacobiSVD<_MatrixType,QRPreconditioner> >
{
typedef SVDBase<JacobiSVD> Base;
public:
typedef _MatrixType MatrixType;
typedef typename MatrixType::Scalar Scalar;
typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
enum {
RowsAtCompileTime = MatrixType::RowsAtCompileTime,
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
DiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime,ColsAtCompileTime),
MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
MaxDiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(MaxRowsAtCompileTime,MaxColsAtCompileTime),
MatrixOptions = MatrixType::Options
};
typedef typename Base::MatrixUType MatrixUType;
typedef typename Base::MatrixVType MatrixVType;
typedef typename Base::SingularValuesType SingularValuesType;
typedef typename internal::plain_row_type<MatrixType>::type RowType;
typedef typename internal::plain_col_type<MatrixType>::type ColType;
typedef Matrix<Scalar, DiagSizeAtCompileTime, DiagSizeAtCompileTime,
MatrixOptions, MaxDiagSizeAtCompileTime, MaxDiagSizeAtCompileTime>
WorkMatrixType;
/** \brief Default Constructor.
*
* The default constructor is useful in cases in which the user intends to
* perform decompositions via JacobiSVD::compute(const MatrixType&).
*/
JacobiSVD()
{}
/** \brief Default Constructor with memory preallocation
*
* Like the default constructor but with preallocation of the internal data
* according to the specified problem size.
* \sa JacobiSVD()
*/
JacobiSVD(Index rows, Index cols, unsigned int computationOptions = 0)
{
allocate(rows, cols, computationOptions);
}
/** \brief Constructor performing the decomposition of given matrix.
*
* \param matrix the matrix to decompose
* \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed.
* By default, none is computed. This is a bit-field, the possible bits are #ComputeFullU, #ComputeThinU,
* #ComputeFullV, #ComputeThinV.
*
* Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not
* available with the (non-default) FullPivHouseholderQR preconditioner.
*/
explicit JacobiSVD(const MatrixType& matrix, unsigned int computationOptions = 0)
{
compute(matrix, computationOptions);
}
/** \brief Method performing the decomposition of given matrix using custom options.
*
* \param matrix the matrix to decompose
* \param computationOptions optional parameter allowing to specify if you want full or thin U or V unitaries to be computed.
* By default, none is computed. This is a bit-field, the possible bits are #ComputeFullU, #ComputeThinU,
* #ComputeFullV, #ComputeThinV.
*
* Thin unitaries are only available if your matrix type has a Dynamic number of columns (for example MatrixXf). They also are not
* available with the (non-default) FullPivHouseholderQR preconditioner.
*/
JacobiSVD& compute(const MatrixType& matrix, unsigned int computationOptions);
/** \brief Method performing the decomposition of given matrix using current options.
*
* \param matrix the matrix to decompose
*
* This method uses the current \a computationOptions, as already passed to the constructor or to compute(const MatrixType&, unsigned int).
*/
JacobiSVD& compute(const MatrixType& matrix)
{
return compute(matrix, m_computationOptions);
}
using Base::computeU;
using Base::computeV;
using Base::rows;
using Base::cols;
using Base::rank;
private:
void allocate(Index rows, Index cols, unsigned int computationOptions);
protected:
using Base::m_matrixU;
using Base::m_matrixV;
using Base::m_singularValues;
using Base::m_info;
using Base::m_isInitialized;
using Base::m_isAllocated;
using Base::m_usePrescribedThreshold;
using Base::m_computeFullU;
using Base::m_computeThinU;
using Base::m_computeFullV;
using Base::m_computeThinV;
using Base::m_computationOptions;
using Base::m_nonzeroSingularValues;
using Base::m_rows;
using Base::m_cols;
using Base::m_diagSize;
using Base::m_prescribedThreshold;
WorkMatrixType m_workMatrix;
template<typename __MatrixType, int _QRPreconditioner, bool _IsComplex>
friend struct internal::svd_precondition_2x2_block_to_be_real;
template<typename __MatrixType, int _QRPreconditioner, int _Case, bool _DoAnything>
friend struct internal::qr_preconditioner_impl;
internal::qr_preconditioner_impl<MatrixType, QRPreconditioner, internal::PreconditionIfMoreColsThanRows> m_qr_precond_morecols;
internal::qr_preconditioner_impl<MatrixType, QRPreconditioner, internal::PreconditionIfMoreRowsThanCols> m_qr_precond_morerows;
MatrixType m_scaledMatrix;
};
template<typename MatrixType, int QRPreconditioner>
void JacobiSVD<MatrixType, QRPreconditioner>::allocate(Eigen::Index rows, Eigen::Index cols, unsigned int computationOptions)
{
eigen_assert(rows >= 0 && cols >= 0);
if (m_isAllocated &&
rows == m_rows &&
cols == m_cols &&
computationOptions == m_computationOptions)
{
return;
}
m_rows = rows;
m_cols = cols;
m_info = Success;
m_isInitialized = false;
m_isAllocated = true;
m_computationOptions = computationOptions;
m_computeFullU = (computationOptions & ComputeFullU) != 0;
m_computeThinU = (computationOptions & ComputeThinU) != 0;
m_computeFullV = (computationOptions & ComputeFullV) != 0;
m_computeThinV = (computationOptions & ComputeThinV) != 0;
eigen_assert(!(m_computeFullU && m_computeThinU) && "JacobiSVD: you can't ask for both full and thin U");
eigen_assert(!(m_computeFullV && m_computeThinV) && "JacobiSVD: you can't ask for both full and thin V");
eigen_assert(EIGEN_IMPLIES(m_computeThinU || m_computeThinV, MatrixType::ColsAtCompileTime==Dynamic) &&
"JacobiSVD: thin U and V are only available when your matrix has a dynamic number of columns.");
if (QRPreconditioner == FullPivHouseholderQRPreconditioner)
{
eigen_assert(!(m_computeThinU || m_computeThinV) &&
"JacobiSVD: can't compute thin U or thin V with the FullPivHouseholderQR preconditioner. "
"Use the ColPivHouseholderQR preconditioner instead.");
}
m_diagSize = (std::min)(m_rows, m_cols);
m_singularValues.resize(m_diagSize);
if(RowsAtCompileTime==Dynamic)
m_matrixU.resize(m_rows, m_computeFullU ? m_rows
: m_computeThinU ? m_diagSize
: 0);
if(ColsAtCompileTime==Dynamic)
m_matrixV.resize(m_cols, m_computeFullV ? m_cols
: m_computeThinV ? m_diagSize
: 0);
m_workMatrix.resize(m_diagSize, m_diagSize);
if(m_cols>m_rows) m_qr_precond_morecols.allocate(*this);
if(m_rows>m_cols) m_qr_precond_morerows.allocate(*this);
if(m_rows!=m_cols) m_scaledMatrix.resize(rows,cols);
}
template<typename MatrixType, int QRPreconditioner>
JacobiSVD<MatrixType, QRPreconditioner>&
JacobiSVD<MatrixType, QRPreconditioner>::compute(const MatrixType& matrix, unsigned int computationOptions)
{
using std::abs;
allocate(matrix.rows(), matrix.cols(), computationOptions);
// currently we stop when we reach precision 2*epsilon as the last bit of precision can require an unreasonable number of iterations,
// only worsening the precision of U and V as we accumulate more rotations
const RealScalar precision = RealScalar(2) * NumTraits<Scalar>::epsilon();
// limit for denormal numbers to be considered zero in order to avoid infinite loops (see bug 286)
const RealScalar considerAsZero = (std::numeric_limits<RealScalar>::min)();
// Scaling factor to reduce over/under-flows
RealScalar scale = matrix.cwiseAbs().template maxCoeff<PropagateNaN>();
if (!(numext::isfinite)(scale)) {
m_isInitialized = true;
m_info = InvalidInput;
return *this;
}
if(scale==RealScalar(0)) scale = RealScalar(1);
/*** step 1. The R-SVD step: we use a QR decomposition to reduce to the case of a square matrix */
if(m_rows!=m_cols)
{
m_scaledMatrix = matrix / scale;
m_qr_precond_morecols.run(*this, m_scaledMatrix);
m_qr_precond_morerows.run(*this, m_scaledMatrix);
}
else
{
m_workMatrix = matrix.block(0,0,m_diagSize,m_diagSize) / scale;
if(m_computeFullU) m_matrixU.setIdentity(m_rows,m_rows);
if(m_computeThinU) m_matrixU.setIdentity(m_rows,m_diagSize);
if(m_computeFullV) m_matrixV.setIdentity(m_cols,m_cols);
if(m_computeThinV) m_matrixV.setIdentity(m_cols, m_diagSize);
}
/*** step 2. The main Jacobi SVD iteration. ***/
RealScalar maxDiagEntry = m_workMatrix.cwiseAbs().diagonal().maxCoeff();
bool finished = false;
while(!finished)
{
finished = true;
// do a sweep: for all index pairs (p,q), perform SVD of the corresponding 2x2 sub-matrix
for(Index p = 1; p < m_diagSize; ++p)
{
for(Index q = 0; q < p; ++q)
{
// if this 2x2 sub-matrix is not diagonal already...
// notice that this comparison will evaluate to false if any NaN is involved, ensuring that NaN's don't
// keep us iterating forever. Similarly, small denormal numbers are considered zero.
RealScalar threshold = numext::maxi<RealScalar>(considerAsZero, precision * maxDiagEntry);
if(abs(m_workMatrix.coeff(p,q))>threshold || abs(m_workMatrix.coeff(q,p)) > threshold)
{
finished = false;
// perform SVD decomposition of 2x2 sub-matrix corresponding to indices p,q to make it diagonal
// the complex to real operation returns true if the updated 2x2 block is not already diagonal
if(internal::svd_precondition_2x2_block_to_be_real<MatrixType, QRPreconditioner>::run(m_workMatrix, *this, p, q, maxDiagEntry))
{
JacobiRotation<RealScalar> j_left, j_right;
internal::real_2x2_jacobi_svd(m_workMatrix, p, q, &j_left, &j_right);
// accumulate resulting Jacobi rotations
m_workMatrix.applyOnTheLeft(p,q,j_left);
if(computeU()) m_matrixU.applyOnTheRight(p,q,j_left.transpose());
m_workMatrix.applyOnTheRight(p,q,j_right);
if(computeV()) m_matrixV.applyOnTheRight(p,q,j_right);
// keep track of the largest diagonal coefficient
maxDiagEntry = numext::maxi<RealScalar>(maxDiagEntry,numext::maxi<RealScalar>(abs(m_workMatrix.coeff(p,p)), abs(m_workMatrix.coeff(q,q))));
}
}
}
}
}
/*** step 3. The work matrix is now diagonal, so ensure it's positive so its diagonal entries are the singular values ***/
for(Index i = 0; i < m_diagSize; ++i)
{
// For a complex matrix, some diagonal coefficients might note have been
// treated by svd_precondition_2x2_block_to_be_real, and the imaginary part
// of some diagonal entry might not be null.
if(NumTraits<Scalar>::IsComplex && abs(numext::imag(m_workMatrix.coeff(i,i)))>considerAsZero)
{
RealScalar a = abs(m_workMatrix.coeff(i,i));
m_singularValues.coeffRef(i) = abs(a);
if(computeU()) m_matrixU.col(i) *= m_workMatrix.coeff(i,i)/a;
}
else
{
// m_workMatrix.coeff(i,i) is already real, no difficulty:
RealScalar a = numext::real(m_workMatrix.coeff(i,i));
m_singularValues.coeffRef(i) = abs(a);
if(computeU() && (a<RealScalar(0))) m_matrixU.col(i) = -m_matrixU.col(i);
}
}
m_singularValues *= scale;
/*** step 4. Sort singular values in descending order and compute the number of nonzero singular values ***/
m_nonzeroSingularValues = m_diagSize;
for(Index i = 0; i < m_diagSize; i++)
{
Index pos;
RealScalar maxRemainingSingularValue = m_singularValues.tail(m_diagSize-i).maxCoeff(&pos);
if(maxRemainingSingularValue == RealScalar(0))
{
m_nonzeroSingularValues = i;
break;
}
if(pos)
{
pos += i;
std::swap(m_singularValues.coeffRef(i), m_singularValues.coeffRef(pos));
if(computeU()) m_matrixU.col(pos).swap(m_matrixU.col(i));
if(computeV()) m_matrixV.col(pos).swap(m_matrixV.col(i));
}
}
m_isInitialized = true;
return *this;
}
/** \svd_module
*
* \return the singular value decomposition of \c *this computed by two-sided
* Jacobi transformations.
*
* \sa class JacobiSVD
*/
template<typename Derived>
JacobiSVD<typename MatrixBase<Derived>::PlainObject>
MatrixBase<Derived>::jacobiSvd(unsigned int computationOptions) const
{
return JacobiSVD<PlainObject>(*this, computationOptions);
}
} // end namespace Eigen
#endif // EIGEN_JACOBISVD_H

View File

@@ -0,0 +1,91 @@
/*
Copyright (c) 2011, Intel Corporation. All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
********************************************************************************
* Content : Eigen bindings to LAPACKe
* Singular Value Decomposition - SVD.
********************************************************************************
*/
#ifndef EIGEN_JACOBISVD_LAPACKE_H
#define EIGEN_JACOBISVD_LAPACKE_H
namespace Eigen {
/** \internal Specialization for the data types supported by LAPACKe */
#define EIGEN_LAPACKE_SVD(EIGTYPE, LAPACKE_TYPE, LAPACKE_RTYPE, LAPACKE_PREFIX, EIGCOLROW, LAPACKE_COLROW) \
template<> inline \
JacobiSVD<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynamic>, ColPivHouseholderQRPreconditioner>& \
JacobiSVD<Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynamic>, ColPivHouseholderQRPreconditioner>::compute(const Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynamic>& matrix, unsigned int computationOptions) \
{ \
typedef Matrix<EIGTYPE, Dynamic, Dynamic, EIGCOLROW, Dynamic, Dynamic> MatrixType; \
/*typedef MatrixType::Scalar Scalar;*/ \
/*typedef MatrixType::RealScalar RealScalar;*/ \
allocate(matrix.rows(), matrix.cols(), computationOptions); \
\
/*const RealScalar precision = RealScalar(2) * NumTraits<Scalar>::epsilon();*/ \
m_nonzeroSingularValues = m_diagSize; \
\
lapack_int lda = internal::convert_index<lapack_int>(matrix.outerStride()), ldu, ldvt; \
lapack_int matrix_order = LAPACKE_COLROW; \
char jobu, jobvt; \
LAPACKE_TYPE *u, *vt, dummy; \
jobu = (m_computeFullU) ? 'A' : (m_computeThinU) ? 'S' : 'N'; \
jobvt = (m_computeFullV) ? 'A' : (m_computeThinV) ? 'S' : 'N'; \
if (computeU()) { \
ldu = internal::convert_index<lapack_int>(m_matrixU.outerStride()); \
u = (LAPACKE_TYPE*)m_matrixU.data(); \
} else { ldu=1; u=&dummy; }\
MatrixType localV; \
lapack_int vt_rows = (m_computeFullV) ? internal::convert_index<lapack_int>(m_cols) : (m_computeThinV) ? internal::convert_index<lapack_int>(m_diagSize) : 1; \
if (computeV()) { \
localV.resize(vt_rows, m_cols); \
ldvt = internal::convert_index<lapack_int>(localV.outerStride()); \
vt = (LAPACKE_TYPE*)localV.data(); \
} else { ldvt=1; vt=&dummy; }\
Matrix<LAPACKE_RTYPE, Dynamic, Dynamic> superb; superb.resize(m_diagSize, 1); \
MatrixType m_temp; m_temp = matrix; \
LAPACKE_##LAPACKE_PREFIX##gesvd( matrix_order, jobu, jobvt, internal::convert_index<lapack_int>(m_rows), internal::convert_index<lapack_int>(m_cols), (LAPACKE_TYPE*)m_temp.data(), lda, (LAPACKE_RTYPE*)m_singularValues.data(), u, ldu, vt, ldvt, superb.data()); \
if (computeV()) m_matrixV = localV.adjoint(); \
/* for(int i=0;i<m_diagSize;i++) if (m_singularValues.coeffRef(i) < precision) { m_nonzeroSingularValues--; m_singularValues.coeffRef(i)=RealScalar(0);}*/ \
m_isInitialized = true; \
return *this; \
}
EIGEN_LAPACKE_SVD(double, double, double, d, ColMajor, LAPACK_COL_MAJOR)
EIGEN_LAPACKE_SVD(float, float, float , s, ColMajor, LAPACK_COL_MAJOR)
EIGEN_LAPACKE_SVD(dcomplex, lapack_complex_double, double, z, ColMajor, LAPACK_COL_MAJOR)
EIGEN_LAPACKE_SVD(scomplex, lapack_complex_float, float , c, ColMajor, LAPACK_COL_MAJOR)
EIGEN_LAPACKE_SVD(double, double, double, d, RowMajor, LAPACK_ROW_MAJOR)
EIGEN_LAPACKE_SVD(float, float, float , s, RowMajor, LAPACK_ROW_MAJOR)
EIGEN_LAPACKE_SVD(dcomplex, lapack_complex_double, double, z, RowMajor, LAPACK_ROW_MAJOR)
EIGEN_LAPACKE_SVD(scomplex, lapack_complex_float, float , c, RowMajor, LAPACK_ROW_MAJOR)
} // end namespace Eigen
#endif // EIGEN_JACOBISVD_LAPACKE_H

View File

@@ -0,0 +1,376 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2009-2010 Benoit Jacob <jacob.benoit.1@gmail.com>
// Copyright (C) 2014 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// Copyright (C) 2013 Gauthier Brun <brun.gauthier@gmail.com>
// Copyright (C) 2013 Nicolas Carre <nicolas.carre@ensimag.fr>
// Copyright (C) 2013 Jean Ceccato <jean.ceccato@ensimag.fr>
// Copyright (C) 2013 Pierre Zoppitelli <pierre.zoppitelli@ensimag.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_SVDBASE_H
#define EIGEN_SVDBASE_H
namespace Eigen {
namespace internal {
template<typename Derived> struct traits<SVDBase<Derived> >
: traits<Derived>
{
typedef MatrixXpr XprKind;
typedef SolverStorage StorageKind;
typedef int StorageIndex;
enum { Flags = 0 };
};
}
/** \ingroup SVD_Module
*
*
* \class SVDBase
*
* \brief Base class of SVD algorithms
*
* \tparam Derived the type of the actual SVD decomposition
*
* SVD decomposition consists in decomposing any n-by-p matrix \a A as a product
* \f[ A = U S V^* \f]
* where \a U is a n-by-n unitary, \a V is a p-by-p unitary, and \a S is a n-by-p real positive matrix which is zero outside of its main diagonal;
* the diagonal entries of S are known as the \em singular \em values of \a A and the columns of \a U and \a V are known as the left
* and right \em singular \em vectors of \a A respectively.
*
* Singular values are always sorted in decreasing order.
*
*
* You can ask for only \em thin \a U or \a V to be computed, meaning the following. In case of a rectangular n-by-p matrix, letting \a m be the
* smaller value among \a n and \a p, there are only \a m singular vectors; the remaining columns of \a U and \a V do not correspond to actual
* singular vectors. Asking for \em thin \a U or \a V means asking for only their \a m first columns to be formed. So \a U is then a n-by-m matrix,
* and \a V is then a p-by-m matrix. Notice that thin \a U and \a V are all you need for (least squares) solving.
*
* The status of the computation can be retrived using the \a info() method. Unless \a info() returns \a Success, the results should be not
* considered well defined.
*
* If the input matrix has inf or nan coefficients, the result of the computation is undefined, and \a info() will return \a InvalidInput, but the computation is guaranteed to
* terminate in finite (and reasonable) time.
* \sa class BDCSVD, class JacobiSVD
*/
template<typename Derived> class SVDBase
: public SolverBase<SVDBase<Derived> >
{
public:
template<typename Derived_>
friend struct internal::solve_assertion;
typedef typename internal::traits<Derived>::MatrixType MatrixType;
typedef typename MatrixType::Scalar Scalar;
typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
typedef typename Eigen::internal::traits<SVDBase>::StorageIndex StorageIndex;
typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3
enum {
RowsAtCompileTime = MatrixType::RowsAtCompileTime,
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
DiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime,ColsAtCompileTime),
MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime,
MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime,
MaxDiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(MaxRowsAtCompileTime,MaxColsAtCompileTime),
MatrixOptions = MatrixType::Options
};
typedef Matrix<Scalar, RowsAtCompileTime, RowsAtCompileTime, MatrixOptions, MaxRowsAtCompileTime, MaxRowsAtCompileTime> MatrixUType;
typedef Matrix<Scalar, ColsAtCompileTime, ColsAtCompileTime, MatrixOptions, MaxColsAtCompileTime, MaxColsAtCompileTime> MatrixVType;
typedef typename internal::plain_diag_type<MatrixType, RealScalar>::type SingularValuesType;
Derived& derived() { return *static_cast<Derived*>(this); }
const Derived& derived() const { return *static_cast<const Derived*>(this); }
/** \returns the \a U matrix.
*
* For the SVD decomposition of a n-by-p matrix, letting \a m be the minimum of \a n and \a p,
* the U matrix is n-by-n if you asked for \link Eigen::ComputeFullU ComputeFullU \endlink, and is n-by-m if you asked for \link Eigen::ComputeThinU ComputeThinU \endlink.
*
* The \a m first columns of \a U are the left singular vectors of the matrix being decomposed.
*
* This method asserts that you asked for \a U to be computed.
*/
const MatrixUType& matrixU() const
{
_check_compute_assertions();
eigen_assert(computeU() && "This SVD decomposition didn't compute U. Did you ask for it?");
return m_matrixU;
}
/** \returns the \a V matrix.
*
* For the SVD decomposition of a n-by-p matrix, letting \a m be the minimum of \a n and \a p,
* the V matrix is p-by-p if you asked for \link Eigen::ComputeFullV ComputeFullV \endlink, and is p-by-m if you asked for \link Eigen::ComputeThinV ComputeThinV \endlink.
*
* The \a m first columns of \a V are the right singular vectors of the matrix being decomposed.
*
* This method asserts that you asked for \a V to be computed.
*/
const MatrixVType& matrixV() const
{
_check_compute_assertions();
eigen_assert(computeV() && "This SVD decomposition didn't compute V. Did you ask for it?");
return m_matrixV;
}
/** \returns the vector of singular values.
*
* For the SVD decomposition of a n-by-p matrix, letting \a m be the minimum of \a n and \a p, the
* returned vector has size \a m. Singular values are always sorted in decreasing order.
*/
const SingularValuesType& singularValues() const
{
_check_compute_assertions();
return m_singularValues;
}
/** \returns the number of singular values that are not exactly 0 */
Index nonzeroSingularValues() const
{
_check_compute_assertions();
return m_nonzeroSingularValues;
}
/** \returns the rank of the matrix of which \c *this is the SVD.
*
* \note This method has to determine which singular values should be considered nonzero.
* For that, it uses the threshold value that you can control by calling
* setThreshold(const RealScalar&).
*/
inline Index rank() const
{
using std::abs;
_check_compute_assertions();
if(m_singularValues.size()==0) return 0;
RealScalar premultiplied_threshold = numext::maxi<RealScalar>(m_singularValues.coeff(0) * threshold(), (std::numeric_limits<RealScalar>::min)());
Index i = m_nonzeroSingularValues-1;
while(i>=0 && m_singularValues.coeff(i) < premultiplied_threshold) --i;
return i+1;
}
/** Allows to prescribe a threshold to be used by certain methods, such as rank() and solve(),
* which need to determine when singular values are to be considered nonzero.
* This is not used for the SVD decomposition itself.
*
* When it needs to get the threshold value, Eigen calls threshold().
* The default is \c NumTraits<Scalar>::epsilon()
*
* \param threshold The new value to use as the threshold.
*
* A singular value will be considered nonzero if its value is strictly greater than
* \f$ \vert singular value \vert \leqslant threshold \times \vert max singular value \vert \f$.
*
* If you want to come back to the default behavior, call setThreshold(Default_t)
*/
Derived& setThreshold(const RealScalar& threshold)
{
m_usePrescribedThreshold = true;
m_prescribedThreshold = threshold;
return derived();
}
/** Allows to come back to the default behavior, letting Eigen use its default formula for
* determining the threshold.
*
* You should pass the special object Eigen::Default as parameter here.
* \code svd.setThreshold(Eigen::Default); \endcode
*
* See the documentation of setThreshold(const RealScalar&).
*/
Derived& setThreshold(Default_t)
{
m_usePrescribedThreshold = false;
return derived();
}
/** Returns the threshold that will be used by certain methods such as rank().
*
* See the documentation of setThreshold(const RealScalar&).
*/
RealScalar threshold() const
{
eigen_assert(m_isInitialized || m_usePrescribedThreshold);
// this temporary is needed to workaround a MSVC issue
Index diagSize = (std::max<Index>)(1,m_diagSize);
return m_usePrescribedThreshold ? m_prescribedThreshold
: RealScalar(diagSize)*NumTraits<Scalar>::epsilon();
}
/** \returns true if \a U (full or thin) is asked for in this SVD decomposition */
inline bool computeU() const { return m_computeFullU || m_computeThinU; }
/** \returns true if \a V (full or thin) is asked for in this SVD decomposition */
inline bool computeV() const { return m_computeFullV || m_computeThinV; }
inline Index rows() const { return m_rows; }
inline Index cols() const { return m_cols; }
#ifdef EIGEN_PARSED_BY_DOXYGEN
/** \returns a (least squares) solution of \f$ A x = b \f$ using the current SVD decomposition of A.
*
* \param b the right-hand-side of the equation to solve.
*
* \note Solving requires both U and V to be computed. Thin U and V are enough, there is no need for full U or V.
*
* \note SVD solving is implicitly least-squares. Thus, this method serves both purposes of exact solving and least-squares solving.
* In other words, the returned solution is guaranteed to minimize the Euclidean norm \f$ \Vert A x - b \Vert \f$.
*/
template<typename Rhs>
inline const Solve<Derived, Rhs>
solve(const MatrixBase<Rhs>& b) const;
#endif
/** \brief Reports whether previous computation was successful.
*
* \returns \c Success if computation was successful.
*/
EIGEN_DEVICE_FUNC
ComputationInfo info() const
{
eigen_assert(m_isInitialized && "SVD is not initialized.");
return m_info;
}
#ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename RhsType, typename DstType>
void _solve_impl(const RhsType &rhs, DstType &dst) const;
template<bool Conjugate, typename RhsType, typename DstType>
void _solve_impl_transposed(const RhsType &rhs, DstType &dst) const;
#endif
protected:
static void check_template_parameters()
{
EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
}
void _check_compute_assertions() const {
eigen_assert(m_isInitialized && "SVD is not initialized.");
}
template<bool Transpose_, typename Rhs>
void _check_solve_assertion(const Rhs& b) const {
EIGEN_ONLY_USED_FOR_DEBUG(b);
_check_compute_assertions();
eigen_assert(computeU() && computeV() && "SVDBase::solve(): Both unitaries U and V are required to be computed (thin unitaries suffice).");
eigen_assert((Transpose_?cols():rows())==b.rows() && "SVDBase::solve(): invalid number of rows of the right hand side matrix b");
}
// return true if already allocated
bool allocate(Index rows, Index cols, unsigned int computationOptions) ;
MatrixUType m_matrixU;
MatrixVType m_matrixV;
SingularValuesType m_singularValues;
ComputationInfo m_info;
bool m_isInitialized, m_isAllocated, m_usePrescribedThreshold;
bool m_computeFullU, m_computeThinU;
bool m_computeFullV, m_computeThinV;
unsigned int m_computationOptions;
Index m_nonzeroSingularValues, m_rows, m_cols, m_diagSize;
RealScalar m_prescribedThreshold;
/** \brief Default Constructor.
*
* Default constructor of SVDBase
*/
SVDBase()
: m_info(Success),
m_isInitialized(false),
m_isAllocated(false),
m_usePrescribedThreshold(false),
m_computeFullU(false),
m_computeThinU(false),
m_computeFullV(false),
m_computeThinV(false),
m_computationOptions(0),
m_rows(-1), m_cols(-1), m_diagSize(0)
{
check_template_parameters();
}
};
#ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename Derived>
template<typename RhsType, typename DstType>
void SVDBase<Derived>::_solve_impl(const RhsType &rhs, DstType &dst) const
{
// A = U S V^*
// So A^{-1} = V S^{-1} U^*
Matrix<typename RhsType::Scalar, Dynamic, RhsType::ColsAtCompileTime, 0, MatrixType::MaxRowsAtCompileTime, RhsType::MaxColsAtCompileTime> tmp;
Index l_rank = rank();
tmp.noalias() = m_matrixU.leftCols(l_rank).adjoint() * rhs;
tmp = m_singularValues.head(l_rank).asDiagonal().inverse() * tmp;
dst = m_matrixV.leftCols(l_rank) * tmp;
}
template<typename Derived>
template<bool Conjugate, typename RhsType, typename DstType>
void SVDBase<Derived>::_solve_impl_transposed(const RhsType &rhs, DstType &dst) const
{
// A = U S V^*
// So A^{-*} = U S^{-1} V^*
// And A^{-T} = U_conj S^{-1} V^T
Matrix<typename RhsType::Scalar, Dynamic, RhsType::ColsAtCompileTime, 0, MatrixType::MaxRowsAtCompileTime, RhsType::MaxColsAtCompileTime> tmp;
Index l_rank = rank();
tmp.noalias() = m_matrixV.leftCols(l_rank).transpose().template conjugateIf<Conjugate>() * rhs;
tmp = m_singularValues.head(l_rank).asDiagonal().inverse() * tmp;
dst = m_matrixU.template conjugateIf<!Conjugate>().leftCols(l_rank) * tmp;
}
#endif
template<typename MatrixType>
bool SVDBase<MatrixType>::allocate(Index rows, Index cols, unsigned int computationOptions)
{
eigen_assert(rows >= 0 && cols >= 0);
if (m_isAllocated &&
rows == m_rows &&
cols == m_cols &&
computationOptions == m_computationOptions)
{
return true;
}
m_rows = rows;
m_cols = cols;
m_info = Success;
m_isInitialized = false;
m_isAllocated = true;
m_computationOptions = computationOptions;
m_computeFullU = (computationOptions & ComputeFullU) != 0;
m_computeThinU = (computationOptions & ComputeThinU) != 0;
m_computeFullV = (computationOptions & ComputeFullV) != 0;
m_computeThinV = (computationOptions & ComputeThinV) != 0;
eigen_assert(!(m_computeFullU && m_computeThinU) && "SVDBase: you can't ask for both full and thin U");
eigen_assert(!(m_computeFullV && m_computeThinV) && "SVDBase: you can't ask for both full and thin V");
eigen_assert(EIGEN_IMPLIES(m_computeThinU || m_computeThinV, MatrixType::ColsAtCompileTime==Dynamic) &&
"SVDBase: thin U and V are only available when your matrix has a dynamic number of columns.");
m_diagSize = (std::min)(m_rows, m_cols);
m_singularValues.resize(m_diagSize);
if(RowsAtCompileTime==Dynamic)
m_matrixU.resize(m_rows, m_computeFullU ? m_rows : m_computeThinU ? m_diagSize : 0);
if(ColsAtCompileTime==Dynamic)
m_matrixV.resize(m_cols, m_computeFullV ? m_cols : m_computeThinV ? m_diagSize : 0);
return false;
}
}// end namespace
#endif // EIGEN_SVDBASE_H

View File

@@ -0,0 +1,414 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2010 Benoit Jacob <jacob.benoit.1@gmail.com>
// Copyright (C) 2013-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_BIDIAGONALIZATION_H
#define EIGEN_BIDIAGONALIZATION_H
namespace Eigen {
namespace internal {
// UpperBidiagonalization will probably be replaced by a Bidiagonalization class, don't want to make it stable API.
// At the same time, it's useful to keep for now as it's about the only thing that is testing the BandMatrix class.
template<typename _MatrixType> class UpperBidiagonalization
{
public:
typedef _MatrixType MatrixType;
enum {
RowsAtCompileTime = MatrixType::RowsAtCompileTime,
ColsAtCompileTime = MatrixType::ColsAtCompileTime,
ColsAtCompileTimeMinusOne = internal::decrement_size<ColsAtCompileTime>::ret
};
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3
typedef Matrix<Scalar, 1, ColsAtCompileTime> RowVectorType;
typedef Matrix<Scalar, RowsAtCompileTime, 1> ColVectorType;
typedef BandMatrix<RealScalar, ColsAtCompileTime, ColsAtCompileTime, 1, 0, RowMajor> BidiagonalType;
typedef Matrix<Scalar, ColsAtCompileTime, 1> DiagVectorType;
typedef Matrix<Scalar, ColsAtCompileTimeMinusOne, 1> SuperDiagVectorType;
typedef HouseholderSequence<
const MatrixType,
const typename internal::remove_all<typename Diagonal<const MatrixType,0>::ConjugateReturnType>::type
> HouseholderUSequenceType;
typedef HouseholderSequence<
const typename internal::remove_all<typename MatrixType::ConjugateReturnType>::type,
Diagonal<const MatrixType,1>,
OnTheRight
> HouseholderVSequenceType;
/**
* \brief Default Constructor.
*
* The default constructor is useful in cases in which the user intends to
* perform decompositions via Bidiagonalization::compute(const MatrixType&).
*/
UpperBidiagonalization() : m_householder(), m_bidiagonal(), m_isInitialized(false) {}
explicit UpperBidiagonalization(const MatrixType& matrix)
: m_householder(matrix.rows(), matrix.cols()),
m_bidiagonal(matrix.cols(), matrix.cols()),
m_isInitialized(false)
{
compute(matrix);
}
UpperBidiagonalization& compute(const MatrixType& matrix);
UpperBidiagonalization& computeUnblocked(const MatrixType& matrix);
const MatrixType& householder() const { return m_householder; }
const BidiagonalType& bidiagonal() const { return m_bidiagonal; }
const HouseholderUSequenceType householderU() const
{
eigen_assert(m_isInitialized && "UpperBidiagonalization is not initialized.");
return HouseholderUSequenceType(m_householder, m_householder.diagonal().conjugate());
}
const HouseholderVSequenceType householderV() // const here gives nasty errors and i'm lazy
{
eigen_assert(m_isInitialized && "UpperBidiagonalization is not initialized.");
return HouseholderVSequenceType(m_householder.conjugate(), m_householder.const_derived().template diagonal<1>())
.setLength(m_householder.cols()-1)
.setShift(1);
}
protected:
MatrixType m_householder;
BidiagonalType m_bidiagonal;
bool m_isInitialized;
};
// Standard upper bidiagonalization without fancy optimizations
// This version should be faster for small matrix size
template<typename MatrixType>
void upperbidiagonalization_inplace_unblocked(MatrixType& mat,
typename MatrixType::RealScalar *diagonal,
typename MatrixType::RealScalar *upper_diagonal,
typename MatrixType::Scalar* tempData = 0)
{
typedef typename MatrixType::Scalar Scalar;
Index rows = mat.rows();
Index cols = mat.cols();
typedef Matrix<Scalar,Dynamic,1,ColMajor,MatrixType::MaxRowsAtCompileTime,1> TempType;
TempType tempVector;
if(tempData==0)
{
tempVector.resize(rows);
tempData = tempVector.data();
}
for (Index k = 0; /* breaks at k==cols-1 below */ ; ++k)
{
Index remainingRows = rows - k;
Index remainingCols = cols - k - 1;
// construct left householder transform in-place in A
mat.col(k).tail(remainingRows)
.makeHouseholderInPlace(mat.coeffRef(k,k), diagonal[k]);
// apply householder transform to remaining part of A on the left
mat.bottomRightCorner(remainingRows, remainingCols)
.applyHouseholderOnTheLeft(mat.col(k).tail(remainingRows-1), mat.coeff(k,k), tempData);
if(k == cols-1) break;
// construct right householder transform in-place in mat
mat.row(k).tail(remainingCols)
.makeHouseholderInPlace(mat.coeffRef(k,k+1), upper_diagonal[k]);
// apply householder transform to remaining part of mat on the left
mat.bottomRightCorner(remainingRows-1, remainingCols)
.applyHouseholderOnTheRight(mat.row(k).tail(remainingCols-1).adjoint(), mat.coeff(k,k+1), tempData);
}
}
/** \internal
* Helper routine for the block reduction to upper bidiagonal form.
*
* Let's partition the matrix A:
*
* | A00 A01 |
* A = | |
* | A10 A11 |
*
* This function reduces to bidiagonal form the left \c rows x \a blockSize vertical panel [A00/A10]
* and the \a blockSize x \c cols horizontal panel [A00 A01] of the matrix \a A. The bottom-right block A11
* is updated using matrix-matrix products:
* A22 -= V * Y^T - X * U^T
* where V and U contains the left and right Householder vectors. U and V are stored in A10, and A01
* respectively, and the update matrices X and Y are computed during the reduction.
*
*/
template<typename MatrixType>
void upperbidiagonalization_blocked_helper(MatrixType& A,
typename MatrixType::RealScalar *diagonal,
typename MatrixType::RealScalar *upper_diagonal,
Index bs,
Ref<Matrix<typename MatrixType::Scalar, Dynamic, Dynamic,
traits<MatrixType>::Flags & RowMajorBit> > X,
Ref<Matrix<typename MatrixType::Scalar, Dynamic, Dynamic,
traits<MatrixType>::Flags & RowMajorBit> > Y)
{
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::RealScalar RealScalar;
typedef typename NumTraits<RealScalar>::Literal Literal;
enum { StorageOrder = traits<MatrixType>::Flags & RowMajorBit };
typedef InnerStride<int(StorageOrder) == int(ColMajor) ? 1 : Dynamic> ColInnerStride;
typedef InnerStride<int(StorageOrder) == int(ColMajor) ? Dynamic : 1> RowInnerStride;
typedef Ref<Matrix<Scalar, Dynamic, 1>, 0, ColInnerStride> SubColumnType;
typedef Ref<Matrix<Scalar, 1, Dynamic>, 0, RowInnerStride> SubRowType;
typedef Ref<Matrix<Scalar, Dynamic, Dynamic, StorageOrder > > SubMatType;
Index brows = A.rows();
Index bcols = A.cols();
Scalar tau_u, tau_u_prev(0), tau_v;
for(Index k = 0; k < bs; ++k)
{
Index remainingRows = brows - k;
Index remainingCols = bcols - k - 1;
SubMatType X_k1( X.block(k,0, remainingRows,k) );
SubMatType V_k1( A.block(k,0, remainingRows,k) );
// 1 - update the k-th column of A
SubColumnType v_k = A.col(k).tail(remainingRows);
v_k -= V_k1 * Y.row(k).head(k).adjoint();
if(k) v_k -= X_k1 * A.col(k).head(k);
// 2 - construct left Householder transform in-place
v_k.makeHouseholderInPlace(tau_v, diagonal[k]);
if(k+1<bcols)
{
SubMatType Y_k ( Y.block(k+1,0, remainingCols, k+1) );
SubMatType U_k1 ( A.block(0,k+1, k,remainingCols) );
// this eases the application of Householder transforAions
// A(k,k) will store tau_v later
A(k,k) = Scalar(1);
// 3 - Compute y_k^T = tau_v * ( A^T*v_k - Y_k-1*V_k-1^T*v_k - U_k-1*X_k-1^T*v_k )
{
SubColumnType y_k( Y.col(k).tail(remainingCols) );
// let's use the beginning of column k of Y as a temporary vector
SubColumnType tmp( Y.col(k).head(k) );
y_k.noalias() = A.block(k,k+1, remainingRows,remainingCols).adjoint() * v_k; // bottleneck
tmp.noalias() = V_k1.adjoint() * v_k;
y_k.noalias() -= Y_k.leftCols(k) * tmp;
tmp.noalias() = X_k1.adjoint() * v_k;
y_k.noalias() -= U_k1.adjoint() * tmp;
y_k *= numext::conj(tau_v);
}
// 4 - update k-th row of A (it will become u_k)
SubRowType u_k( A.row(k).tail(remainingCols) );
u_k = u_k.conjugate();
{
u_k -= Y_k * A.row(k).head(k+1).adjoint();
if(k) u_k -= U_k1.adjoint() * X.row(k).head(k).adjoint();
}
// 5 - construct right Householder transform in-place
u_k.makeHouseholderInPlace(tau_u, upper_diagonal[k]);
// this eases the application of Householder transformations
// A(k,k+1) will store tau_u later
A(k,k+1) = Scalar(1);
// 6 - Compute x_k = tau_u * ( A*u_k - X_k-1*U_k-1^T*u_k - V_k*Y_k^T*u_k )
{
SubColumnType x_k ( X.col(k).tail(remainingRows-1) );
// let's use the beginning of column k of X as a temporary vectors
// note that tmp0 and tmp1 overlaps
SubColumnType tmp0 ( X.col(k).head(k) ),
tmp1 ( X.col(k).head(k+1) );
x_k.noalias() = A.block(k+1,k+1, remainingRows-1,remainingCols) * u_k.transpose(); // bottleneck
tmp0.noalias() = U_k1 * u_k.transpose();
x_k.noalias() -= X_k1.bottomRows(remainingRows-1) * tmp0;
tmp1.noalias() = Y_k.adjoint() * u_k.transpose();
x_k.noalias() -= A.block(k+1,0, remainingRows-1,k+1) * tmp1;
x_k *= numext::conj(tau_u);
tau_u = numext::conj(tau_u);
u_k = u_k.conjugate();
}
if(k>0) A.coeffRef(k-1,k) = tau_u_prev;
tau_u_prev = tau_u;
}
else
A.coeffRef(k-1,k) = tau_u_prev;
A.coeffRef(k,k) = tau_v;
}
if(bs<bcols)
A.coeffRef(bs-1,bs) = tau_u_prev;
// update A22
if(bcols>bs && brows>bs)
{
SubMatType A11( A.bottomRightCorner(brows-bs,bcols-bs) );
SubMatType A10( A.block(bs,0, brows-bs,bs) );
SubMatType A01( A.block(0,bs, bs,bcols-bs) );
Scalar tmp = A01(bs-1,0);
A01(bs-1,0) = Literal(1);
A11.noalias() -= A10 * Y.topLeftCorner(bcols,bs).bottomRows(bcols-bs).adjoint();
A11.noalias() -= X.topLeftCorner(brows,bs).bottomRows(brows-bs) * A01;
A01(bs-1,0) = tmp;
}
}
/** \internal
*
* Implementation of a block-bidiagonal reduction.
* It is based on the following paper:
* The Design of a Parallel Dense Linear Algebra Software Library: Reduction to Hessenberg, Tridiagonal, and Bidiagonal Form.
* by Jaeyoung Choi, Jack J. Dongarra, David W. Walker. (1995)
* section 3.3
*/
template<typename MatrixType, typename BidiagType>
void upperbidiagonalization_inplace_blocked(MatrixType& A, BidiagType& bidiagonal,
Index maxBlockSize=32,
typename MatrixType::Scalar* /*tempData*/ = 0)
{
typedef typename MatrixType::Scalar Scalar;
typedef Block<MatrixType,Dynamic,Dynamic> BlockType;
Index rows = A.rows();
Index cols = A.cols();
Index size = (std::min)(rows, cols);
// X and Y are work space
enum { StorageOrder = traits<MatrixType>::Flags & RowMajorBit };
Matrix<Scalar,
MatrixType::RowsAtCompileTime,
Dynamic,
StorageOrder,
MatrixType::MaxRowsAtCompileTime> X(rows,maxBlockSize);
Matrix<Scalar,
MatrixType::ColsAtCompileTime,
Dynamic,
StorageOrder,
MatrixType::MaxColsAtCompileTime> Y(cols,maxBlockSize);
Index blockSize = (std::min)(maxBlockSize,size);
Index k = 0;
for(k = 0; k < size; k += blockSize)
{
Index bs = (std::min)(size-k,blockSize); // actual size of the block
Index brows = rows - k; // rows of the block
Index bcols = cols - k; // columns of the block
// partition the matrix A:
//
// | A00 A01 A02 |
// | |
// A = | A10 A11 A12 |
// | |
// | A20 A21 A22 |
//
// where A11 is a bs x bs diagonal block,
// and let:
// | A11 A12 |
// B = | |
// | A21 A22 |
BlockType B = A.block(k,k,brows,bcols);
// This stage performs the bidiagonalization of A11, A21, A12, and updating of A22.
// Finally, the algorithm continue on the updated A22.
//
// However, if B is too small, or A22 empty, then let's use an unblocked strategy
if(k+bs==cols || bcols<48) // somewhat arbitrary threshold
{
upperbidiagonalization_inplace_unblocked(B,
&(bidiagonal.template diagonal<0>().coeffRef(k)),
&(bidiagonal.template diagonal<1>().coeffRef(k)),
X.data()
);
break; // We're done
}
else
{
upperbidiagonalization_blocked_helper<BlockType>( B,
&(bidiagonal.template diagonal<0>().coeffRef(k)),
&(bidiagonal.template diagonal<1>().coeffRef(k)),
bs,
X.topLeftCorner(brows,bs),
Y.topLeftCorner(bcols,bs)
);
}
}
}
template<typename _MatrixType>
UpperBidiagonalization<_MatrixType>& UpperBidiagonalization<_MatrixType>::computeUnblocked(const _MatrixType& matrix)
{
Index rows = matrix.rows();
Index cols = matrix.cols();
EIGEN_ONLY_USED_FOR_DEBUG(cols);
eigen_assert(rows >= cols && "UpperBidiagonalization is only for Arices satisfying rows>=cols.");
m_householder = matrix;
ColVectorType temp(rows);
upperbidiagonalization_inplace_unblocked(m_householder,
&(m_bidiagonal.template diagonal<0>().coeffRef(0)),
&(m_bidiagonal.template diagonal<1>().coeffRef(0)),
temp.data());
m_isInitialized = true;
return *this;
}
template<typename _MatrixType>
UpperBidiagonalization<_MatrixType>& UpperBidiagonalization<_MatrixType>::compute(const _MatrixType& matrix)
{
Index rows = matrix.rows();
Index cols = matrix.cols();
EIGEN_ONLY_USED_FOR_DEBUG(rows);
EIGEN_ONLY_USED_FOR_DEBUG(cols);
eigen_assert(rows >= cols && "UpperBidiagonalization is only for Arices satisfying rows>=cols.");
m_householder = matrix;
upperbidiagonalization_inplace_blocked(m_householder, m_bidiagonal);
m_isInitialized = true;
return *this;
}
#if 0
/** \return the Householder QR decomposition of \c *this.
*
* \sa class Bidiagonalization
*/
template<typename Derived>
const UpperBidiagonalization<typename MatrixBase<Derived>::PlainObject>
MatrixBase<Derived>::bidiagonalization() const
{
return UpperBidiagonalization<PlainObject>(eval());
}
#endif
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_BIDIAGONALIZATION_H